text
stringlengths 0
3.34M
|
---|
@userplot PlotToroidalMomentumError
@recipe function f(p::PlotToroidalMomentumError; nplot=1, latex=true)
if length(p.args) == 2 && typeof(p.args[1]) <: TimeSeries && typeof(p.args[2]) <: DataSeries
t = p.args[1]
pφ = p.args[2]
@assert t.n == pφ.nt
else
error("Toroidal momentum error plot should be given a timeseries and a data series. Got: $(typeof(p.args))")
end
legend := :none
size := (800,400)
@series begin
if latex
xlabel := L"t"
ylabel := L"[P_\varphi (t) - P_\varphi (0)] / P_\varphi (0)"
else
xlabel := "t"
ylabel := "[P(t) - P(0)] / P(0)"
end
xlims := (t[0], Inf)
yformatter := :scientific
guidefont := font(18)
tickfont := font(12)
right_margin := 10mm
t[0:nplot:end], pφ[0:nplot:end]
end
end
|
[STATEMENT]
lemma at_begin_fst_awtn_Bk[intro]: "at_begin_fst_awtn (as, am) (q, aaa, []) ires
\<Longrightarrow> at_begin_fst_awtn (as, am) (q, aaa, [Bk]) ires"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. at_begin_fst_awtn (as, am) (q, aaa, []) ires \<Longrightarrow> at_begin_fst_awtn (as, am) (q, aaa, [Bk]) ires
[PROOF STEP]
apply(auto simp: at_begin_fst_awtn.simps)
[PROOF STATE]
proof (prove)
goal:
No subgoals!
[PROOF STEP]
done |
## Analyse
install needed Packages
```
# only 4 anaconda users
# for our project we need to install some packages
# in a lokal IDE this must be done via console or the anaconda package manager
#Windows
# install https://support.microsoft.com/en-us/help/2977003/the-latest-supported-visual-c-downloads
# insall https://visualstudio.microsoft.com/de/thank-you-downloading-visual-studio/?sku=Community&rel=15
# pip install pm4py
# pip install graphviz
# pip install pm4pybpmn
# pip install pygraphviz
#mac
# pip install pm4py
# python -c "import pm4py"
# pip install graphviz
# pip install pm4pybpmn
# pip install pygraphviz
#linux
# g++ -v
# apt-get install graphviz
# pip install pm4py
# pip install graphviz
# pip install pm4pybpmn
# pip install pygraphviz
# details / error handling check https://pm4py.fit.fraunhofer.de/install-page#item-1-2
```
```
# only 4 Colab
if 'google.colab' in str(get_ipython()):
!apt-get install python3-tk
#!apt-get install -y graphviz-dev
#!apt-get install python3-dev graphviz libgraphviz-dev pkg-config
!pip install graphviz
!pip install pm4pybpmn
!pip install pygraphviz
```
Reading package lists... Done
Building dependency tree
Reading state information... Done
python3-tk is already the newest version (3.6.9-1~18.04).
0 upgraded, 0 newly installed, 0 to remove and 14 not upgraded.
Requirement already satisfied: graphviz in /usr/local/lib/python3.6/dist-packages (0.10.1)
Requirement already satisfied: pm4pybpmn in /usr/local/lib/python3.6/dist-packages (0.1.6)
Requirement already satisfied: intervaltree in /usr/local/lib/python3.6/dist-packages (from pm4pybpmn) (2.1.0)
Requirement already satisfied: joblib in /usr/local/lib/python3.6/dist-packages (from pm4pybpmn) (0.17.0)
Requirement already satisfied: pydotplus in /usr/local/lib/python3.6/dist-packages (from pm4pybpmn) (2.0.2)
Requirement already satisfied: networkx>=2.2 in /usr/local/lib/python3.6/dist-packages (from pm4pybpmn) (2.5)
Requirement already satisfied: lime in /usr/local/lib/python3.6/dist-packages (from pm4pybpmn) (0.2.0.1)
Requirement already satisfied: bpmn-python==0.0.18 in /usr/local/lib/python3.6/dist-packages (from pm4pybpmn) (0.0.18)
Requirement already satisfied: pm4py in /usr/local/lib/python3.6/dist-packages (from pm4pybpmn) (2.1.0.2)
Requirement already satisfied: sortedcontainers in /usr/local/lib/python3.6/dist-packages (from intervaltree->pm4pybpmn) (2.3.0)
Requirement already satisfied: pyparsing>=2.0.1 in /usr/local/lib/python3.6/dist-packages (from pydotplus->pm4pybpmn) (2.4.7)
Requirement already satisfied: decorator>=4.3.0 in /usr/local/lib/python3.6/dist-packages (from networkx>=2.2->pm4pybpmn) (4.4.2)
Requirement already satisfied: matplotlib in /usr/local/lib/python3.6/dist-packages (from lime->pm4pybpmn) (3.2.2)
Requirement already satisfied: tqdm in /usr/local/lib/python3.6/dist-packages (from lime->pm4pybpmn) (4.41.1)
Requirement already satisfied: scikit-learn>=0.18 in /usr/local/lib/python3.6/dist-packages (from lime->pm4pybpmn) (0.22.2.post1)
Requirement already satisfied: numpy in /usr/local/lib/python3.6/dist-packages (from lime->pm4pybpmn) (1.18.5)
Requirement already satisfied: scipy in /usr/local/lib/python3.6/dist-packages (from lime->pm4pybpmn) (1.4.1)
Requirement already satisfied: scikit-image>=0.12 in /usr/local/lib/python3.6/dist-packages (from lime->pm4pybpmn) (0.16.2)
Requirement already satisfied: pandas in /usr/local/lib/python3.6/dist-packages (from bpmn-python==0.0.18->pm4pybpmn) (1.1.4)
Requirement already satisfied: six in /usr/local/lib/python3.6/dist-packages (from bpmn-python==0.0.18->pm4pybpmn) (1.15.0)
Requirement already satisfied: lxml in /usr/local/lib/python3.6/dist-packages (from pm4py->pm4pybpmn) (4.2.6)
Requirement already satisfied: graphviz in /usr/local/lib/python3.6/dist-packages (from pm4py->pm4pybpmn) (0.10.1)
Requirement already satisfied: pyvis in /usr/local/lib/python3.6/dist-packages (from pm4py->pm4pybpmn) (0.1.8.2)
Requirement already satisfied: pulp<=2.1 in /usr/local/lib/python3.6/dist-packages (from pm4py->pm4pybpmn) (2.1)
Requirement already satisfied: jsonpickle in /usr/local/lib/python3.6/dist-packages (from pm4py->pm4pybpmn) (1.4.2)
Requirement already satisfied: sympy in /usr/local/lib/python3.6/dist-packages (from pm4py->pm4pybpmn) (1.1.1)
Requirement already satisfied: ciso8601; python_version < "3.7" in /usr/local/lib/python3.6/dist-packages (from pm4py->pm4pybpmn) (2.1.3)
Requirement already satisfied: pm4pycvxopt>=0.0.10; python_version < "3.9" in /usr/local/lib/python3.6/dist-packages (from pm4py->pm4pybpmn) (0.0.10)
Requirement already satisfied: stringdist in /usr/local/lib/python3.6/dist-packages (from pm4py->pm4pybpmn) (1.0.9)
Requirement already satisfied: deprecation in /usr/local/lib/python3.6/dist-packages (from pm4py->pm4pybpmn) (2.1.0)
Requirement already satisfied: pytz in /usr/local/lib/python3.6/dist-packages (from pm4py->pm4pybpmn) (2018.9)
Requirement already satisfied: python-dateutil>=2.1 in /usr/local/lib/python3.6/dist-packages (from matplotlib->lime->pm4pybpmn) (2.8.1)
Requirement already satisfied: kiwisolver>=1.0.1 in /usr/local/lib/python3.6/dist-packages (from matplotlib->lime->pm4pybpmn) (1.3.1)
Requirement already satisfied: cycler>=0.10 in /usr/local/lib/python3.6/dist-packages (from matplotlib->lime->pm4pybpmn) (0.10.0)
Requirement already satisfied: pillow>=4.3.0 in /usr/local/lib/python3.6/dist-packages (from scikit-image>=0.12->lime->pm4pybpmn) (7.0.0)
Requirement already satisfied: PyWavelets>=0.4.0 in /usr/local/lib/python3.6/dist-packages (from scikit-image>=0.12->lime->pm4pybpmn) (1.1.1)
Requirement already satisfied: imageio>=2.3.0 in /usr/local/lib/python3.6/dist-packages (from scikit-image>=0.12->lime->pm4pybpmn) (2.4.1)
Requirement already satisfied: jinja2>=2.9.6 in /usr/local/lib/python3.6/dist-packages (from pyvis->pm4py->pm4pybpmn) (2.11.2)
Requirement already satisfied: ipython>=5.3.0 in /usr/local/lib/python3.6/dist-packages (from pyvis->pm4py->pm4pybpmn) (5.5.0)
Requirement already satisfied: importlib-metadata; python_version < "3.8" in /usr/local/lib/python3.6/dist-packages (from jsonpickle->pm4py->pm4pybpmn) (2.0.0)
Requirement already satisfied: mpmath>=0.19 in /usr/local/lib/python3.6/dist-packages (from sympy->pm4py->pm4pybpmn) (1.1.0)
Requirement already satisfied: cvxopt in /usr/local/lib/python3.6/dist-packages (from pm4pycvxopt>=0.0.10; python_version < "3.9"->pm4py->pm4pybpmn) (1.2.5)
Requirement already satisfied: packaging in /usr/local/lib/python3.6/dist-packages (from deprecation->pm4py->pm4pybpmn) (20.4)
Requirement already satisfied: MarkupSafe>=0.23 in /usr/local/lib/python3.6/dist-packages (from jinja2>=2.9.6->pyvis->pm4py->pm4pybpmn) (1.1.1)
Requirement already satisfied: pexpect; sys_platform != "win32" in /usr/local/lib/python3.6/dist-packages (from ipython>=5.3.0->pyvis->pm4py->pm4pybpmn) (4.8.0)
Requirement already satisfied: pygments in /usr/local/lib/python3.6/dist-packages (from ipython>=5.3.0->pyvis->pm4py->pm4pybpmn) (2.6.1)
Requirement already satisfied: simplegeneric>0.8 in /usr/local/lib/python3.6/dist-packages (from ipython>=5.3.0->pyvis->pm4py->pm4pybpmn) (0.8.1)
Requirement already satisfied: setuptools>=18.5 in /usr/local/lib/python3.6/dist-packages (from ipython>=5.3.0->pyvis->pm4py->pm4pybpmn) (50.3.2)
Requirement already satisfied: prompt-toolkit<2.0.0,>=1.0.4 in /usr/local/lib/python3.6/dist-packages (from ipython>=5.3.0->pyvis->pm4py->pm4pybpmn) (1.0.18)
Requirement already satisfied: pickleshare in /usr/local/lib/python3.6/dist-packages (from ipython>=5.3.0->pyvis->pm4py->pm4pybpmn) (0.7.5)
Requirement already satisfied: traitlets>=4.2 in /usr/local/lib/python3.6/dist-packages (from ipython>=5.3.0->pyvis->pm4py->pm4pybpmn) (4.3.3)
Requirement already satisfied: zipp>=0.5 in /usr/local/lib/python3.6/dist-packages (from importlib-metadata; python_version < "3.8"->jsonpickle->pm4py->pm4pybpmn) (3.4.0)
Requirement already satisfied: ptyprocess>=0.5 in /usr/local/lib/python3.6/dist-packages (from pexpect; sys_platform != "win32"->ipython>=5.3.0->pyvis->pm4py->pm4pybpmn) (0.6.0)
Requirement already satisfied: wcwidth in /usr/local/lib/python3.6/dist-packages (from prompt-toolkit<2.0.0,>=1.0.4->ipython>=5.3.0->pyvis->pm4py->pm4pybpmn) (0.2.5)
Requirement already satisfied: ipython-genutils in /usr/local/lib/python3.6/dist-packages (from traitlets>=4.2->ipython>=5.3.0->pyvis->pm4py->pm4pybpmn) (0.2.0)
Requirement already satisfied: pygraphviz in /usr/local/lib/python3.6/dist-packages (1.6)
## 1.1 BPMN import
```
#import libraries
import os
import graphviz
import pandas as pd
import lxml as lx
import networkx
import matplotlib
import pydotplus
```
```
#get & load data
!wget https://bitbucket.org/JMU-BWL6/iiv_data_bpmn/raw/14f0142e9ed9c7b79305a21c03e77d12f82ec7da/Test.bpmn
```
```
# show XML in JPN
tree = lx.etree.parse("Test.bpmn")
pretty = lx.etree.tostring(tree, encoding="unicode", pretty_print=True)
print(pretty)
```
## 1.2 BPMN chart
```
#import pm4py bib
from pm4pybpmn.objects.bpmn.importer import bpmn20 as bpmn_importer3
from pm4pybpmn.visualization.bpmn import factory as bpmn_vis_factory
```
```
#BPMN import as graph
bpmn_graph = bpmn_importer3.import_bpmn("Test.bpmn")
```
```
#BPMN graph to figure
bpmn_figure = bpmn_vis_factory.apply(bpmn_graph)
bpmn_vis_factory.view(bpmn_figure)
```
|
Today's game at Moss Rose was expected to be a highly contested game with Macclesfield having a new manager and Mansfield playing a very beatable bottom of the league team. The question was would Shirtliff play for a 0-0 draw or try to be more assertive.
The game opened with a totally over the top, ‘Rocky’ style introduction for ‘the legendary’ Paul Ince. The two teams emerged with Macclesfield playing the ball on the floor and Mansfield playing hoof and hope, giving the front men very little service. Stags had the first change with a limp header over the bar from Barker in the first minute. The first real chance fell to Macclesfield as Hjelde miss-timed a header letting Bullock in behind him who lashed a shot into the side netting from 6 yards. Then in the 5th minute a good run into the box and across the byline from Heath on the left wing who played the ball back to Bullock whose goal bound shot produced a good reflex save from Muggleton to his left with the ball just going over the bar for a Macc corner. In the 25th minute Tipton ran down the right wing, swinging in a cross to the penalty spot which was met by a sublime header from Rob Scott which thundered against the base of Muggleton's right hand post. Quickest to react was the offside Bullock who slotted home over Muggleton only to have it denied. At this point Mansfield looked like the team with only 5 points after a pathetic opening half hour. Oh dear, it does look like we’re holding on for 0-0! After a brief spell of Mansfield pressure towards the end of the first half the Stags had another half chance with Baptiste heading well over under pressure. The Mansfield problems in the first half were the lack of midfield play. The marking was poor and every time the defence won the ball the only option was to hit the ball forwards to the isolated strikers.
Now the 2nd half. In the 50th minute the ball was wide on the right with Heath whose cross-come-shot sailed over Muggleton's bald patch into the net, it certainly had a hint of 2002 world cup flashbacks! The Stags woke up, why do we only do it when we go a goal behind? In the 52nd minute some good work on the right by Mansfield resulted in the ball falling to the right foot of Mullins, whose low side foot shot had the goalkeeper scrambling only to hit the post and come back out. After 60 mins Boulding came on to replace the illusive Jelleyman, the effect was immediate. Not long after, Johnny Mullins floated in a dream of a cross to any of three awaiting Stags attackers at the back post, Richie Barker headed, the goalkeeper parried straight onto Giles Cokes right knee (or nads) and in it went! Get in, 1-1, the Stags were back on level terms.
In the 69th minute Bappa picked the ball up on the half way line and ran forwards unchallenged until unleashing a shot from distance which whistled ... 7 foot wide. Good pressure followed from the Stags and fans were hopeful that we could grab a second goal from the game. The break of play allowed Danny Reet to be replaced by Chris Beardsley much to the dismay of some travelling fans. The out of sorts Hamshaw then gave away a fee kick from a promising position for Macc in the 81st minute. The ball was floated into the area, pinged about and fell to Captain Morley, 12 yards out who poked past Muggleton into the right hand corner. Now what? Stags could be heading towards defeat by the worst team in England. More Mansfield pressure after 83 mins resulted in a corner kick to the Stags, the ball come in from a Hamshaw delivery, Bappa headed it sideways to Hjelde who had his header headed off line by Navarro, the loose ball was picked up by Hamshaw who drifted a ball to the back post where Richie Barker met it, powering the header at Brain who could only parry it into his top left hand corner. 2-2, to the amazement of the Stags fans. Macclesfield has a rare opportunity as a ball was whipped in from the left, which landed at the feet of a surprised Baptise who was lucky the ball ricocheted into the arms of Muggleton.
Then came the penalty. A cross came in and was cleared to the feet of a Stags player whose shot struck the arm of a Macc defender and the referee had no hesitation in raising his whistle to his lips and awarding the Stags a penalty at the death. Up stepped Richie Barker in front of the travelling Stags contingent. Low and behold the big captain cooly slotted it to the keepers right in off the post with the goalie going to his left. The fans were in raptures, we had come back from 2-1 down with 10 minutes to go. How had we done that?
Muggleton - 5, good first half reflex save but gave nothing but an average performance after that. Too static for corners.
Hjelde - 5, Strong at the back but caught napping at times.
Baptiste - 8, Influential from start to finish.
Sheehan - 5, got forwards well and put in some good crosses in the second half. Corner delivery was poor.
D’Laryea - 5, Strong but not creative.
Coke - 6, Tried to be creative, right place at right time for goal.
Jelleyman - 5, Could have done more, didn’t look like a winger.
Reet - 5, Looked unfit and wasn’t effective.
Barker - 6, Involved in all three goals but not much else.
Boulding (for Jelleyman, 59) - 7, Came on and produced chances as well as winning a handful of corners, added something.
Beardsley (for Reet, 70) - 5, Hustled and bustled.
Subs not used: White, Buxton, Lloyd.
Brain, Regan, Morley, Scott, Brightwell (Teague 73), Bullock, Navarro, Hadfield, McIntyre, Tipton (Murphy 73), Heath (Weir-Daley 78). Subs Not Used: Lee, Miles.
C Oliver - 6, Gave some wacky decisions but played the advantage rule well. |
[GOAL]
ι : Type u_1
V : Type u
inst✝⁷ : Category.{v, u} V
inst✝⁶ : HasZeroMorphisms V
inst✝⁵ : HasZeroObject V
inst✝⁴ : HasEqualizers V
inst✝³ : HasImages V
inst✝² : HasImageMaps V
inst✝¹ : HasCokernels V
c : ComplexShape ι
C D E : HomologicalComplex V c
f : C ⟶ D
inst✝ : IsIso f
i : ι
⊢ IsIso ((homologyFunctor V c i).map f)
[PROOFSTEP]
change IsIso ((homologyFunctor V c i).mapIso (asIso f)).hom
[GOAL]
ι : Type u_1
V : Type u
inst✝⁷ : Category.{v, u} V
inst✝⁶ : HasZeroMorphisms V
inst✝⁵ : HasZeroObject V
inst✝⁴ : HasEqualizers V
inst✝³ : HasImages V
inst✝² : HasImageMaps V
inst✝¹ : HasCokernels V
c : ComplexShape ι
C D E : HomologicalComplex V c
f : C ⟶ D
inst✝ : IsIso f
i : ι
⊢ IsIso ((homologyFunctor V c i).mapIso (asIso f)).hom
[PROOFSTEP]
infer_instance
[GOAL]
ι : Type u_1
V : Type u
inst✝⁸ : Category.{v, u} V
inst✝⁷ : HasZeroMorphisms V
inst✝⁶ : HasZeroObject V
inst✝⁵ : HasEqualizers V
inst✝⁴ : HasImages V
inst✝³ : HasImageMaps V
inst✝² : HasCokernels V
c : ComplexShape ι
C D E : HomologicalComplex V c
f : C ⟶ D
inst✝¹ : QuasiIso f
g : D ⟶ E
inst✝ : QuasiIso g
i : ι
⊢ IsIso ((homologyFunctor V c i).map (f ≫ g))
[PROOFSTEP]
rw [Functor.map_comp]
[GOAL]
ι : Type u_1
V : Type u
inst✝⁸ : Category.{v, u} V
inst✝⁷ : HasZeroMorphisms V
inst✝⁶ : HasZeroObject V
inst✝⁵ : HasEqualizers V
inst✝⁴ : HasImages V
inst✝³ : HasImageMaps V
inst✝² : HasCokernels V
c : ComplexShape ι
C D E : HomologicalComplex V c
f : C ⟶ D
inst✝¹ : QuasiIso f
g : D ⟶ E
inst✝ : QuasiIso g
i : ι
⊢ IsIso ((homologyFunctor V c i).map f ≫ (homologyFunctor V c i).map g)
[PROOFSTEP]
infer_instance
[GOAL]
ι : Type u_1
V : Type u
inst✝¹³ : Category.{v, u} V
inst✝¹² : HasZeroMorphisms V
inst✝¹¹ : HasZeroObject V
inst✝¹⁰ : HasEqualizers V
inst✝⁹ : HasImages V
inst✝⁸ : HasImageMaps V
inst✝⁷ : HasCokernels V
c : ComplexShape ι
C✝ D✝ E : HomologicalComplex V c
W : Type u_2
inst✝⁶ : Category.{u_3, u_2} W
inst✝⁵ : Preadditive W
inst✝⁴ : HasCokernels W
inst✝³ : HasImages W
inst✝² : HasEqualizers W
inst✝¹ : HasZeroObject W
inst✝ : HasImageMaps W
C D : HomologicalComplex W c
e : HomotopyEquiv C D
i : ι
⊢ IsIso ((homologyFunctor W c i).map e.hom)
[PROOFSTEP]
refine' ⟨⟨(homologyFunctor W c i).map e.inv, _⟩⟩
[GOAL]
ι : Type u_1
V : Type u
inst✝¹³ : Category.{v, u} V
inst✝¹² : HasZeroMorphisms V
inst✝¹¹ : HasZeroObject V
inst✝¹⁰ : HasEqualizers V
inst✝⁹ : HasImages V
inst✝⁸ : HasImageMaps V
inst✝⁷ : HasCokernels V
c : ComplexShape ι
C✝ D✝ E : HomologicalComplex V c
W : Type u_2
inst✝⁶ : Category.{u_3, u_2} W
inst✝⁵ : Preadditive W
inst✝⁴ : HasCokernels W
inst✝³ : HasImages W
inst✝² : HasEqualizers W
inst✝¹ : HasZeroObject W
inst✝ : HasImageMaps W
C D : HomologicalComplex W c
e : HomotopyEquiv C D
i : ι
⊢ (homologyFunctor W c i).map e.hom ≫ (homologyFunctor W c i).map e.inv = 𝟙 ((homologyFunctor W c i).obj C) ∧
(homologyFunctor W c i).map e.inv ≫ (homologyFunctor W c i).map e.hom = 𝟙 ((homologyFunctor W c i).obj D)
[PROOFSTEP]
simp only [← Functor.map_comp, ← (homologyFunctor W c i).map_id]
[GOAL]
ι : Type u_1
V : Type u
inst✝¹³ : Category.{v, u} V
inst✝¹² : HasZeroMorphisms V
inst✝¹¹ : HasZeroObject V
inst✝¹⁰ : HasEqualizers V
inst✝⁹ : HasImages V
inst✝⁸ : HasImageMaps V
inst✝⁷ : HasCokernels V
c : ComplexShape ι
C✝ D✝ E : HomologicalComplex V c
W : Type u_2
inst✝⁶ : Category.{u_3, u_2} W
inst✝⁵ : Preadditive W
inst✝⁴ : HasCokernels W
inst✝³ : HasImages W
inst✝² : HasEqualizers W
inst✝¹ : HasZeroObject W
inst✝ : HasImageMaps W
C D : HomologicalComplex W c
e : HomotopyEquiv C D
i : ι
⊢ (homologyFunctor W c i).map (e.hom ≫ e.inv) = (homologyFunctor W c i).map (𝟙 C) ∧
(homologyFunctor W c i).map (e.inv ≫ e.hom) = (homologyFunctor W c i).map (𝟙 D)
[PROOFSTEP]
constructor
[GOAL]
case left
ι : Type u_1
V : Type u
inst✝¹³ : Category.{v, u} V
inst✝¹² : HasZeroMorphisms V
inst✝¹¹ : HasZeroObject V
inst✝¹⁰ : HasEqualizers V
inst✝⁹ : HasImages V
inst✝⁸ : HasImageMaps V
inst✝⁷ : HasCokernels V
c : ComplexShape ι
C✝ D✝ E : HomologicalComplex V c
W : Type u_2
inst✝⁶ : Category.{u_3, u_2} W
inst✝⁵ : Preadditive W
inst✝⁴ : HasCokernels W
inst✝³ : HasImages W
inst✝² : HasEqualizers W
inst✝¹ : HasZeroObject W
inst✝ : HasImageMaps W
C D : HomologicalComplex W c
e : HomotopyEquiv C D
i : ι
⊢ (homologyFunctor W c i).map (e.hom ≫ e.inv) = (homologyFunctor W c i).map (𝟙 C)
[PROOFSTEP]
apply homology_map_eq_of_homotopy
[GOAL]
case right
ι : Type u_1
V : Type u
inst✝¹³ : Category.{v, u} V
inst✝¹² : HasZeroMorphisms V
inst✝¹¹ : HasZeroObject V
inst✝¹⁰ : HasEqualizers V
inst✝⁹ : HasImages V
inst✝⁸ : HasImageMaps V
inst✝⁷ : HasCokernels V
c : ComplexShape ι
C✝ D✝ E : HomologicalComplex V c
W : Type u_2
inst✝⁶ : Category.{u_3, u_2} W
inst✝⁵ : Preadditive W
inst✝⁴ : HasCokernels W
inst✝³ : HasImages W
inst✝² : HasEqualizers W
inst✝¹ : HasZeroObject W
inst✝ : HasImageMaps W
C D : HomologicalComplex W c
e : HomotopyEquiv C D
i : ι
⊢ (homologyFunctor W c i).map (e.inv ≫ e.hom) = (homologyFunctor W c i).map (𝟙 D)
[PROOFSTEP]
apply homology_map_eq_of_homotopy
[GOAL]
case left.h
ι : Type u_1
V : Type u
inst✝¹³ : Category.{v, u} V
inst✝¹² : HasZeroMorphisms V
inst✝¹¹ : HasZeroObject V
inst✝¹⁰ : HasEqualizers V
inst✝⁹ : HasImages V
inst✝⁸ : HasImageMaps V
inst✝⁷ : HasCokernels V
c : ComplexShape ι
C✝ D✝ E : HomologicalComplex V c
W : Type u_2
inst✝⁶ : Category.{u_3, u_2} W
inst✝⁵ : Preadditive W
inst✝⁴ : HasCokernels W
inst✝³ : HasImages W
inst✝² : HasEqualizers W
inst✝¹ : HasZeroObject W
inst✝ : HasImageMaps W
C D : HomologicalComplex W c
e : HomotopyEquiv C D
i : ι
⊢ Homotopy (e.hom ≫ e.inv) (𝟙 C)
case right.h
ι : Type u_1
V : Type u
inst✝¹³ : Category.{v, u} V
inst✝¹² : HasZeroMorphisms V
inst✝¹¹ : HasZeroObject V
inst✝¹⁰ : HasEqualizers V
inst✝⁹ : HasImages V
inst✝⁸ : HasImageMaps V
inst✝⁷ : HasCokernels V
c : ComplexShape ι
C✝ D✝ E : HomologicalComplex V c
W : Type u_2
inst✝⁶ : Category.{u_3, u_2} W
inst✝⁵ : Preadditive W
inst✝⁴ : HasCokernels W
inst✝³ : HasImages W
inst✝² : HasEqualizers W
inst✝¹ : HasZeroObject W
inst✝ : HasImageMaps W
C D : HomologicalComplex W c
e : HomotopyEquiv C D
i : ι
⊢ Homotopy (e.inv ≫ e.hom) (𝟙 D)
[PROOFSTEP]
exacts [e.homotopyHomInvId, e.homotopyInvHomId]
[GOAL]
ι : Type u_1
V : Type u
inst✝¹³ : Category.{v, u} V
inst✝¹² : HasZeroMorphisms V
inst✝¹¹ : HasZeroObject V
inst✝¹⁰ : HasEqualizers V
inst✝⁹ : HasImages V
inst✝⁸ : HasImageMaps V
inst✝⁷ : HasCokernels V
c : ComplexShape ι
C✝ D✝ E : HomologicalComplex V c
W : Type u_2
inst✝⁶ : Category.{u_3, u_2} W
inst✝⁵ : Preadditive W
inst✝⁴ : HasCokernels W
inst✝³ : HasImages W
inst✝² : HasEqualizers W
inst✝¹ : HasZeroObject W
inst✝ : HasImageMaps W
C D : HomologicalComplex W c
e : HomotopyEquiv C D
i : ι
⊢ (asIso ((homologyFunctor W c i).map e.hom)).inv = (homologyFunctor W c i).map e.inv
[PROOFSTEP]
symm
[GOAL]
ι : Type u_1
V : Type u
inst✝¹³ : Category.{v, u} V
inst✝¹² : HasZeroMorphisms V
inst✝¹¹ : HasZeroObject V
inst✝¹⁰ : HasEqualizers V
inst✝⁹ : HasImages V
inst✝⁸ : HasImageMaps V
inst✝⁷ : HasCokernels V
c : ComplexShape ι
C✝ D✝ E : HomologicalComplex V c
W : Type u_2
inst✝⁶ : Category.{u_3, u_2} W
inst✝⁵ : Preadditive W
inst✝⁴ : HasCokernels W
inst✝³ : HasImages W
inst✝² : HasEqualizers W
inst✝¹ : HasZeroObject W
inst✝ : HasImageMaps W
C D : HomologicalComplex W c
e : HomotopyEquiv C D
i : ι
⊢ (homologyFunctor W c i).map e.inv = (asIso ((homologyFunctor W c i).map e.hom)).inv
[PROOFSTEP]
haveI := e.toQuasiIso.1 i
[GOAL]
ι : Type u_1
V : Type u
inst✝¹³ : Category.{v, u} V
inst✝¹² : HasZeroMorphisms V
inst✝¹¹ : HasZeroObject V
inst✝¹⁰ : HasEqualizers V
inst✝⁹ : HasImages V
inst✝⁸ : HasImageMaps V
inst✝⁷ : HasCokernels V
c : ComplexShape ι
C✝ D✝ E : HomologicalComplex V c
W : Type u_2
inst✝⁶ : Category.{u_3, u_2} W
inst✝⁵ : Preadditive W
inst✝⁴ : HasCokernels W
inst✝³ : HasImages W
inst✝² : HasEqualizers W
inst✝¹ : HasZeroObject W
inst✝ : HasImageMaps W
C D : HomologicalComplex W c
e : HomotopyEquiv C D
i : ι
this : IsIso ((homologyFunctor W c i).map e.hom)
⊢ (homologyFunctor W c i).map e.inv = (asIso ((homologyFunctor W c i).map e.hom)).inv
[PROOFSTEP]
simp only [← Iso.hom_comp_eq_id, asIso_hom, ← Functor.map_comp, ← (homologyFunctor W c i).map_id,
homology_map_eq_of_homotopy e.homotopyHomInvId _]
[GOAL]
ι : Type u_1
V : Type u
inst✝⁸ : Category.{v, u} V
inst✝⁷ : HasZeroMorphisms V
inst✝⁶ : HasZeroObject V
inst✝⁵ : HasEqualizers V
inst✝⁴ : HasImages V
inst✝³ : HasImageMaps V
inst✝² : HasCokernels V
c : ComplexShape ι
C D E : HomologicalComplex V c
W : Type u_2
inst✝¹ : Category.{?u.22788, u_2} W
inst✝ : Abelian W
X : ChainComplex W ℕ
Y : W
f : X ⟶ (ChainComplex.single₀ W).obj Y
hf✝ hf : QuasiIso f
⊢ d X 1 0 ≫ HomologicalComplex.Hom.f f 0 = 0
[PROOFSTEP]
rw [← f.2 1 0 rfl]
[GOAL]
ι : Type u_1
V : Type u
inst✝⁸ : Category.{v, u} V
inst✝⁷ : HasZeroMorphisms V
inst✝⁶ : HasZeroObject V
inst✝⁵ : HasEqualizers V
inst✝⁴ : HasImages V
inst✝³ : HasImageMaps V
inst✝² : HasCokernels V
c : ComplexShape ι
C D E : HomologicalComplex V c
W : Type u_2
inst✝¹ : Category.{?u.22788, u_2} W
inst✝ : Abelian W
X : ChainComplex W ℕ
Y : W
f : X ⟶ (ChainComplex.single₀ W).obj Y
hf✝ hf : QuasiIso f
⊢ HomologicalComplex.Hom.f f 1 ≫ d ((ChainComplex.single₀ W).obj Y) 1 0 = 0
[PROOFSTEP]
exact comp_zero
[GOAL]
ι : Type u_1
V : Type u
inst✝⁸ : Category.{v, u} V
inst✝⁷ : HasZeroMorphisms V
inst✝⁶ : HasZeroObject V
inst✝⁵ : HasEqualizers V
inst✝⁴ : HasImages V
inst✝³ : HasImageMaps V
inst✝² : HasCokernels V
c : ComplexShape ι
C D E : HomologicalComplex V c
W : Type u_2
inst✝¹ : Category.{u_3, u_2} W
inst✝ : Abelian W
X : ChainComplex W ℕ
Y : W
f : X ⟶ (ChainComplex.single₀ W).obj Y
hf✝ hf : QuasiIso f
⊢ (toSingle₀CokernelAtZeroIso f).hom =
cokernel.desc (d X 1 0) (HomologicalComplex.Hom.f f 0) (_ : d X 1 0 ≫ HomologicalComplex.Hom.f f 0 = 0)
[PROOFSTEP]
ext
[GOAL]
case h
ι : Type u_1
V : Type u
inst✝⁸ : Category.{v, u} V
inst✝⁷ : HasZeroMorphisms V
inst✝⁶ : HasZeroObject V
inst✝⁵ : HasEqualizers V
inst✝⁴ : HasImages V
inst✝³ : HasImageMaps V
inst✝² : HasCokernels V
c : ComplexShape ι
C D E : HomologicalComplex V c
W : Type u_2
inst✝¹ : Category.{u_3, u_2} W
inst✝ : Abelian W
X : ChainComplex W ℕ
Y : W
f : X ⟶ (ChainComplex.single₀ W).obj Y
hf✝ hf : QuasiIso f
⊢ coequalizer.π (d X 1 0) 0 ≫ (toSingle₀CokernelAtZeroIso f).hom =
coequalizer.π (d X 1 0) 0 ≫
cokernel.desc (d X 1 0) (HomologicalComplex.Hom.f f 0) (_ : d X 1 0 ≫ HomologicalComplex.Hom.f f 0 = 0)
[PROOFSTEP]
dsimp only [toSingle₀CokernelAtZeroIso, ChainComplex.homologyZeroIso, homologyOfZeroRight, homology.mapIso,
ChainComplex.homologyFunctor0Single₀, cokernel.map]
[GOAL]
case h
ι : Type u_1
V : Type u
inst✝⁸ : Category.{v, u} V
inst✝⁷ : HasZeroMorphisms V
inst✝⁶ : HasZeroObject V
inst✝⁵ : HasEqualizers V
inst✝⁴ : HasImages V
inst✝³ : HasImageMaps V
inst✝² : HasCokernels V
c : ComplexShape ι
C D E : HomologicalComplex V c
W : Type u_2
inst✝¹ : Category.{u_3, u_2} W
inst✝ : Abelian W
X : ChainComplex W ℕ
Y : W
f : X ⟶ (ChainComplex.single₀ W).obj Y
hf✝ hf : QuasiIso f
⊢ coequalizer.π (d X 1 0) 0 ≫
((Iso.mk
(homology.map (_ : dTo X 0 ≫ dFrom X 0 = 0) (_ : d X 1 0 ≫ 0 = 0)
(Arrow.isoMk (xPrevIso X ChainComplex.homologyZeroIso.proof_10)
(Iso.refl (Arrow.mk (dTo X 0)).right)).hom
(Arrow.isoMk (Iso.refl (Arrow.mk (dFrom X 0)).left) (Iso.refl (Arrow.mk (dFrom X 0)).right)).hom
(_ :
(Arrow.isoMk (xPrevIso X (_ : 0 + 1 = 0 + 1)) (Iso.refl (Arrow.mk (dTo X 0)).right)).hom.right =
(Arrow.isoMk (xPrevIso X (_ : 0 + 1 = 0 + 1)) (Iso.refl (Arrow.mk (dTo X 0)).right)).hom.right))
(homology.map (_ : d X 1 0 ≫ 0 = 0) (_ : dTo X 0 ≫ dFrom X 0 = 0)
(Arrow.isoMk (xPrevIso X ChainComplex.homologyZeroIso.proof_10)
(Iso.refl (Arrow.mk (dTo X 0)).right)).inv
(Arrow.isoMk (Iso.refl (Arrow.mk (dFrom X 0)).left) (Iso.refl (Arrow.mk (dFrom X 0)).right)).inv
(_ :
(Arrow.isoMk (xPrevIso X ChainComplex.homologyZeroIso.proof_10)
(Iso.refl (Arrow.mk (dTo X 0)).right)).inv.right =
(Arrow.isoMk (Iso.refl (Arrow.mk (dFrom X 0)).left)
(Iso.refl (Arrow.mk (dFrom X 0)).right)).inv.left)) ≪≫
cokernel.mapIso (imageToKernel (d X 1 0) 0 (_ : d X 1 0 ≫ 0 = 0)) (image.ι (d X 1 0))
(imageSubobjectIso (d X 1 0)) (kernelSubobjectIso 0 ≪≫ kernelZeroIsoSource)
(_ :
imageToKernel (d X 1 0) 0 (_ : d X 1 0 ≫ 0 = 0) ≫ (kernelSubobjectIso 0).hom ≫ kernel.ι 0 =
(imageSubobjectIso (d X 1 0)).hom ≫ image.ι (d X 1 0)) ≪≫
cokernelImageι (d X 1 0)).symm ≪≫
asIso ((homologyFunctor W (ComplexShape.down ℕ) 0).map f) ≪≫
(NatIso.ofComponents fun X =>
homology.congr
(_ : dTo ((ChainComplex.single₀ W).obj X) 0 ≫ dFrom ((ChainComplex.single₀ W).obj X) 0 = 0)
(_ : 0 ≫ 0 = 0) (_ : 0 = 0)
(_ : d ((ChainComplex.single₀ W).obj X) 0 (ComplexShape.next (ComplexShape.down ℕ) 0) = 0) ≪≫
homologyZeroZero).app
Y).hom =
coequalizer.π (d X 1 0) 0 ≫
cokernel.desc (d X 1 0) (HomologicalComplex.Hom.f f 0) (_ : d X 1 0 ≫ HomologicalComplex.Hom.f f 0 = 0)
[PROOFSTEP]
dsimp [asIso]
[GOAL]
case h
ι : Type u_1
V : Type u
inst✝⁸ : Category.{v, u} V
inst✝⁷ : HasZeroMorphisms V
inst✝⁶ : HasZeroObject V
inst✝⁵ : HasEqualizers V
inst✝⁴ : HasImages V
inst✝³ : HasImageMaps V
inst✝² : HasCokernels V
c : ComplexShape ι
C D E : HomologicalComplex V c
W : Type u_2
inst✝¹ : Category.{u_3, u_2} W
inst✝ : Abelian W
X : ChainComplex W ℕ
Y : W
f : X ⟶ (ChainComplex.single₀ W).obj Y
hf✝ hf : QuasiIso f
⊢ cokernel.π (d X 1 0) ≫
((cokernel.desc (d X 1 0) (cokernel.π (image.ι (d X 1 0))) (_ : d X 1 0 ≫ cokernel.π (image.ι (d X 1 0)) = 0) ≫
cokernel.map (image.ι (d X 1 0)) (imageToKernel (d X 1 0) 0 (_ : d X 1 0 ≫ 0 = 0))
(imageSubobjectIso (d X 1 0)).inv (kernelZeroIsoSource.inv ≫ (kernelSubobjectIso 0).inv)
(_ :
image.ι (d X 1 0) ≫ (kernelSubobjectIso 0 ≪≫ kernelZeroIsoSource).inv =
(imageSubobjectIso (d X 1 0)).inv ≫ imageToKernel (d X 1 0) 0 (_ : d X 1 0 ≫ 0 = 0))) ≫
homology.map (_ : d X 1 0 ≫ 0 = 0) (_ : dTo X 0 ≫ dFrom X 0 = 0)
(Arrow.isoMk (xPrevIso X ChainComplex.homologyZeroIso.proof_10) (Iso.refl (HomologicalComplex.X X 0))).inv
(Arrow.isoMk (Iso.refl (HomologicalComplex.X X 0)) (Iso.refl (xNext X 0))).inv
(_ :
(Arrow.isoMk (xPrevIso X ChainComplex.homologyZeroIso.proof_10)
(Iso.refl (HomologicalComplex.X X 0))).inv.right =
(Arrow.isoMk (Iso.refl (HomologicalComplex.X X 0)) (Iso.refl (xNext X 0))).inv.left)) ≫
homology.map (_ : dTo X 0 ≫ dFrom X 0 = 0)
(_ : dTo ((ChainComplex.single₀ W).obj Y) 0 ≫ dFrom ((ChainComplex.single₀ W).obj Y) 0 = 0) (sqTo f 0)
(sqFrom f 0) (_ : (sqTo f 0).right = (sqTo f 0).right) ≫
homology.map (_ : dTo ((ChainComplex.single₀ W).obj Y) 0 ≫ dFrom ((ChainComplex.single₀ W).obj Y) 0 = 0)
(_ : 0 ≫ 0 = 0) (CommaMorphism.mk (𝟙 (xPrev ((ChainComplex.single₀ W).obj Y) 0)) (𝟙 Y))
(CommaMorphism.mk (𝟙 Y) (𝟙 (xNext ((ChainComplex.single₀ W).obj Y) 0)))
(_ :
(CommaMorphism.mk (𝟙 (Arrow.mk 0).left) (𝟙 (Arrow.mk 0).right)).right =
(CommaMorphism.mk (𝟙 (Arrow.mk 0).left) (𝟙 (Arrow.mk 0).right)).right) ≫
homology.desc 0 0 (_ : 0 ≫ 0 = 0) (Subobject.arrow (kernelSubobject 0))
(_ : imageToKernel 0 0 (_ : 0 ≫ 0 = 0) ≫ Subobject.arrow (kernelSubobject 0) = 0) =
cokernel.π (d X 1 0) ≫
cokernel.desc (d X 1 0) (HomologicalComplex.Hom.f f 0) (_ : d X 1 0 ≫ HomologicalComplex.Hom.f f 0 = 0)
[PROOFSTEP]
simp only [cokernel.π_desc, Category.assoc, homology.map_desc, cokernel.π_desc_assoc]
[GOAL]
case h
ι : Type u_1
V : Type u
inst✝⁸ : Category.{v, u} V
inst✝⁷ : HasZeroMorphisms V
inst✝⁶ : HasZeroObject V
inst✝⁵ : HasEqualizers V
inst✝⁴ : HasImages V
inst✝³ : HasImageMaps V
inst✝² : HasCokernels V
c : ComplexShape ι
C D E : HomologicalComplex V c
W : Type u_2
inst✝¹ : Category.{u_3, u_2} W
inst✝ : Abelian W
X : ChainComplex W ℕ
Y : W
f : X ⟶ (ChainComplex.single₀ W).obj Y
hf✝ hf : QuasiIso f
⊢ kernelZeroIsoSource.inv ≫
(kernelSubobjectIso 0).inv ≫
cokernel.π (imageToKernel (d X 1 0) 0 (_ : d X 1 0 ≫ 0 = 0)) ≫
homology.desc (d X 1 0) 0 (_ : d X 1 0 ≫ 0 = 0)
(kernelSubobjectMap (Arrow.isoMk (Iso.refl (HomologicalComplex.X X 0)) (Iso.refl (xNext X 0))).inv ≫
kernelSubobjectMap (sqFrom f 0) ≫
kernelSubobjectMap (CommaMorphism.mk (𝟙 Y) (𝟙 (xNext ((ChainComplex.single₀ W).obj Y) 0))) ≫
Subobject.arrow (kernelSubobject 0))
(_ :
imageToKernel (d X 1 0) 0 (_ : d X 1 0 ≫ 0 = 0) ≫
kernelSubobjectMap (Arrow.isoMk (Iso.refl (HomologicalComplex.X X 0)) (Iso.refl (xNext X 0))).inv ≫
kernelSubobjectMap (sqFrom f 0) ≫
kernelSubobjectMap (CommaMorphism.mk (𝟙 Y) (𝟙 (xNext ((ChainComplex.single₀ W).obj Y) 0))) ≫
Subobject.arrow (kernelSubobject 0) =
0) =
HomologicalComplex.Hom.f f 0
[PROOFSTEP]
simp [homology.desc, Iso.refl_inv (X.X 0)]
[GOAL]
ι : Type u_1
V : Type u
inst✝⁸ : Category.{v, u} V
inst✝⁷ : HasZeroMorphisms V
inst✝⁶ : HasZeroObject V
inst✝⁵ : HasEqualizers V
inst✝⁴ : HasImages V
inst✝³ : HasImageMaps V
inst✝² : HasCokernels V
c : ComplexShape ι
C D E : HomologicalComplex V c
W : Type u_2
inst✝¹ : Category.{u_3, u_2} W
inst✝ : Abelian W
X : ChainComplex W ℕ
Y : W
f : X ⟶ (ChainComplex.single₀ W).obj Y
hf✝ hf : QuasiIso f
⊢ Epi (HomologicalComplex.Hom.f f 0)
[PROOFSTEP]
constructor
[GOAL]
case left_cancellation
ι : Type u_1
V : Type u
inst✝⁸ : Category.{v, u} V
inst✝⁷ : HasZeroMorphisms V
inst✝⁶ : HasZeroObject V
inst✝⁵ : HasEqualizers V
inst✝⁴ : HasImages V
inst✝³ : HasImageMaps V
inst✝² : HasCokernels V
c : ComplexShape ι
C D E : HomologicalComplex V c
W : Type u_2
inst✝¹ : Category.{u_3, u_2} W
inst✝ : Abelian W
X : ChainComplex W ℕ
Y : W
f : X ⟶ (ChainComplex.single₀ W).obj Y
hf✝ hf : QuasiIso f
⊢ ∀ {Z : W} (g h : HomologicalComplex.X ((ChainComplex.single₀ W).obj Y) 0 ⟶ Z),
HomologicalComplex.Hom.f f 0 ≫ g = HomologicalComplex.Hom.f f 0 ≫ h → g = h
[PROOFSTEP]
intro Z g h Hgh
[GOAL]
case left_cancellation
ι : Type u_1
V : Type u
inst✝⁸ : Category.{v, u} V
inst✝⁷ : HasZeroMorphisms V
inst✝⁶ : HasZeroObject V
inst✝⁵ : HasEqualizers V
inst✝⁴ : HasImages V
inst✝³ : HasImageMaps V
inst✝² : HasCokernels V
c : ComplexShape ι
C D E : HomologicalComplex V c
W : Type u_2
inst✝¹ : Category.{u_3, u_2} W
inst✝ : Abelian W
X : ChainComplex W ℕ
Y : W
f : X ⟶ (ChainComplex.single₀ W).obj Y
hf✝ hf : QuasiIso f
Z : W
g h : HomologicalComplex.X ((ChainComplex.single₀ W).obj Y) 0 ⟶ Z
Hgh : HomologicalComplex.Hom.f f 0 ≫ g = HomologicalComplex.Hom.f f 0 ≫ h
⊢ g = h
[PROOFSTEP]
rw [← cokernel.π_desc (X.d 1 0) (f.f 0) (by rw [← f.2 1 0 rfl]; exact comp_zero), ←
toSingle₀CokernelAtZeroIso_hom_eq] at Hgh
[GOAL]
ι : Type u_1
V : Type u
inst✝⁸ : Category.{v, u} V
inst✝⁷ : HasZeroMorphisms V
inst✝⁶ : HasZeroObject V
inst✝⁵ : HasEqualizers V
inst✝⁴ : HasImages V
inst✝³ : HasImageMaps V
inst✝² : HasCokernels V
c : ComplexShape ι
C D E : HomologicalComplex V c
W : Type u_2
inst✝¹ : Category.{u_3, u_2} W
inst✝ : Abelian W
X : ChainComplex W ℕ
Y : W
f : X ⟶ (ChainComplex.single₀ W).obj Y
hf✝ hf : QuasiIso f
Z : W
g h : HomologicalComplex.X ((ChainComplex.single₀ W).obj Y) 0 ⟶ Z
Hgh : HomologicalComplex.Hom.f f 0 ≫ g = HomologicalComplex.Hom.f f 0 ≫ h
⊢ d X 1 0 ≫ HomologicalComplex.Hom.f f 0 = 0
[PROOFSTEP]
rw [← f.2 1 0 rfl]
[GOAL]
ι : Type u_1
V : Type u
inst✝⁸ : Category.{v, u} V
inst✝⁷ : HasZeroMorphisms V
inst✝⁶ : HasZeroObject V
inst✝⁵ : HasEqualizers V
inst✝⁴ : HasImages V
inst✝³ : HasImageMaps V
inst✝² : HasCokernels V
c : ComplexShape ι
C D E : HomologicalComplex V c
W : Type u_2
inst✝¹ : Category.{u_3, u_2} W
inst✝ : Abelian W
X : ChainComplex W ℕ
Y : W
f : X ⟶ (ChainComplex.single₀ W).obj Y
hf✝ hf : QuasiIso f
Z : W
g h : HomologicalComplex.X ((ChainComplex.single₀ W).obj Y) 0 ⟶ Z
Hgh : HomologicalComplex.Hom.f f 0 ≫ g = HomologicalComplex.Hom.f f 0 ≫ h
⊢ HomologicalComplex.Hom.f f 1 ≫ d ((ChainComplex.single₀ W).obj Y) 1 0 = 0
[PROOFSTEP]
exact comp_zero
[GOAL]
case left_cancellation
ι : Type u_1
V : Type u
inst✝⁸ : Category.{v, u} V
inst✝⁷ : HasZeroMorphisms V
inst✝⁶ : HasZeroObject V
inst✝⁵ : HasEqualizers V
inst✝⁴ : HasImages V
inst✝³ : HasImageMaps V
inst✝² : HasCokernels V
c : ComplexShape ι
C D E : HomologicalComplex V c
W : Type u_2
inst✝¹ : Category.{u_3, u_2} W
inst✝ : Abelian W
X : ChainComplex W ℕ
Y : W
f : X ⟶ (ChainComplex.single₀ W).obj Y
hf✝ hf : QuasiIso f
Z : W
g h : HomologicalComplex.X ((ChainComplex.single₀ W).obj Y) 0 ⟶ Z
Hgh✝ : HomologicalComplex.Hom.f f 0 ≫ g = HomologicalComplex.Hom.f f 0 ≫ h
Hgh :
(cokernel.π (d X 1 0) ≫ (toSingle₀CokernelAtZeroIso f).hom) ≫ g =
(cokernel.π (d X 1 0) ≫ (toSingle₀CokernelAtZeroIso f).hom) ≫ h
⊢ g = h
[PROOFSTEP]
rw [(@cancel_epi _ _ _ _ _ _ (epi_comp _ _) _ _).1 Hgh]
[GOAL]
ι : Type u_1
V : Type u
inst✝⁸ : Category.{v, u} V
inst✝⁷ : HasZeroMorphisms V
inst✝⁶ : HasZeroObject V
inst✝⁵ : HasEqualizers V
inst✝⁴ : HasImages V
inst✝³ : HasImageMaps V
inst✝² : HasCokernels V
c : ComplexShape ι
C D E : HomologicalComplex V c
W : Type u_2
inst✝¹ : Category.{u_3, u_2} W
inst✝ : Abelian W
X : ChainComplex W ℕ
Y : W
f : X ⟶ (ChainComplex.single₀ W).obj Y
hf✝ hf : QuasiIso f
⊢ Exact (d X 1 0) (HomologicalComplex.Hom.f f 0)
[PROOFSTEP]
rw [Preadditive.exact_iff_homology_zero]
[GOAL]
ι : Type u_1
V : Type u
inst✝⁸ : Category.{v, u} V
inst✝⁷ : HasZeroMorphisms V
inst✝⁶ : HasZeroObject V
inst✝⁵ : HasEqualizers V
inst✝⁴ : HasImages V
inst✝³ : HasImageMaps V
inst✝² : HasCokernels V
c : ComplexShape ι
C D E : HomologicalComplex V c
W : Type u_2
inst✝¹ : Category.{u_3, u_2} W
inst✝ : Abelian W
X : ChainComplex W ℕ
Y : W
f : X ⟶ (ChainComplex.single₀ W).obj Y
hf✝ hf : QuasiIso f
⊢ ∃ w, Nonempty (_root_.homology (d X 1 0) (HomologicalComplex.Hom.f f 0) w ≅ 0)
[PROOFSTEP]
have h : X.d 1 0 ≫ f.f 0 = 0 := by simp only [← f.2 1 0 rfl, ChainComplex.single₀_obj_X_d, comp_zero]
[GOAL]
ι : Type u_1
V : Type u
inst✝⁸ : Category.{v, u} V
inst✝⁷ : HasZeroMorphisms V
inst✝⁶ : HasZeroObject V
inst✝⁵ : HasEqualizers V
inst✝⁴ : HasImages V
inst✝³ : HasImageMaps V
inst✝² : HasCokernels V
c : ComplexShape ι
C D E : HomologicalComplex V c
W : Type u_2
inst✝¹ : Category.{u_3, u_2} W
inst✝ : Abelian W
X : ChainComplex W ℕ
Y : W
f : X ⟶ (ChainComplex.single₀ W).obj Y
hf✝ hf : QuasiIso f
⊢ d X 1 0 ≫ HomologicalComplex.Hom.f f 0 = 0
[PROOFSTEP]
simp only [← f.2 1 0 rfl, ChainComplex.single₀_obj_X_d, comp_zero]
[GOAL]
ι : Type u_1
V : Type u
inst✝⁸ : Category.{v, u} V
inst✝⁷ : HasZeroMorphisms V
inst✝⁶ : HasZeroObject V
inst✝⁵ : HasEqualizers V
inst✝⁴ : HasImages V
inst✝³ : HasImageMaps V
inst✝² : HasCokernels V
c : ComplexShape ι
C D E : HomologicalComplex V c
W : Type u_2
inst✝¹ : Category.{u_3, u_2} W
inst✝ : Abelian W
X : ChainComplex W ℕ
Y : W
f : X ⟶ (ChainComplex.single₀ W).obj Y
hf✝ hf : QuasiIso f
h : d X 1 0 ≫ HomologicalComplex.Hom.f f 0 = 0
⊢ ∃ w, Nonempty (_root_.homology (d X 1 0) (HomologicalComplex.Hom.f f 0) w ≅ 0)
[PROOFSTEP]
refine' ⟨h, Nonempty.intro (homologyIsoKernelDesc _ _ _ ≪≫ _)⟩
[GOAL]
ι : Type u_1
V : Type u
inst✝⁸ : Category.{v, u} V
inst✝⁷ : HasZeroMorphisms V
inst✝⁶ : HasZeroObject V
inst✝⁵ : HasEqualizers V
inst✝⁴ : HasImages V
inst✝³ : HasImageMaps V
inst✝² : HasCokernels V
c : ComplexShape ι
C D E : HomologicalComplex V c
W : Type u_2
inst✝¹ : Category.{u_3, u_2} W
inst✝ : Abelian W
X : ChainComplex W ℕ
Y : W
f : X ⟶ (ChainComplex.single₀ W).obj Y
hf✝ hf : QuasiIso f
h : d X 1 0 ≫ HomologicalComplex.Hom.f f 0 = 0
⊢ kernel (cokernel.desc (d X 1 0) (HomologicalComplex.Hom.f f 0) h) ≅ 0
[PROOFSTEP]
suffices IsIso (cokernel.desc _ _ h) by apply kernel.ofMono
[GOAL]
ι : Type u_1
V : Type u
inst✝⁸ : Category.{v, u} V
inst✝⁷ : HasZeroMorphisms V
inst✝⁶ : HasZeroObject V
inst✝⁵ : HasEqualizers V
inst✝⁴ : HasImages V
inst✝³ : HasImageMaps V
inst✝² : HasCokernels V
c : ComplexShape ι
C D E : HomologicalComplex V c
W : Type u_2
inst✝¹ : Category.{u_3, u_2} W
inst✝ : Abelian W
X : ChainComplex W ℕ
Y : W
f : X ⟶ (ChainComplex.single₀ W).obj Y
hf✝ hf : QuasiIso f
h : d X 1 0 ≫ HomologicalComplex.Hom.f f 0 = 0
this : IsIso (cokernel.desc (d X 1 0) (HomologicalComplex.Hom.f f 0) h)
⊢ kernel (cokernel.desc (d X 1 0) (HomologicalComplex.Hom.f f 0) h) ≅ 0
[PROOFSTEP]
apply kernel.ofMono
[GOAL]
ι : Type u_1
V : Type u
inst✝⁸ : Category.{v, u} V
inst✝⁷ : HasZeroMorphisms V
inst✝⁶ : HasZeroObject V
inst✝⁵ : HasEqualizers V
inst✝⁴ : HasImages V
inst✝³ : HasImageMaps V
inst✝² : HasCokernels V
c : ComplexShape ι
C D E : HomologicalComplex V c
W : Type u_2
inst✝¹ : Category.{u_3, u_2} W
inst✝ : Abelian W
X : ChainComplex W ℕ
Y : W
f : X ⟶ (ChainComplex.single₀ W).obj Y
hf✝ hf : QuasiIso f
h : d X 1 0 ≫ HomologicalComplex.Hom.f f 0 = 0
⊢ IsIso (cokernel.desc (d X 1 0) (HomologicalComplex.Hom.f f 0) h)
[PROOFSTEP]
rw [← toSingle₀CokernelAtZeroIso_hom_eq]
[GOAL]
ι : Type u_1
V : Type u
inst✝⁸ : Category.{v, u} V
inst✝⁷ : HasZeroMorphisms V
inst✝⁶ : HasZeroObject V
inst✝⁵ : HasEqualizers V
inst✝⁴ : HasImages V
inst✝³ : HasImageMaps V
inst✝² : HasCokernels V
c : ComplexShape ι
C D E : HomologicalComplex V c
W : Type u_2
inst✝¹ : Category.{u_3, u_2} W
inst✝ : Abelian W
X : ChainComplex W ℕ
Y : W
f : X ⟶ (ChainComplex.single₀ W).obj Y
hf✝ hf : QuasiIso f
h : d X 1 0 ≫ HomologicalComplex.Hom.f f 0 = 0
⊢ IsIso (toSingle₀CokernelAtZeroIso f).hom
[PROOFSTEP]
infer_instance
[GOAL]
ι : Type u_1
V : Type u
inst✝⁸ : Category.{v, u} V
inst✝⁷ : HasZeroMorphisms V
inst✝⁶ : HasZeroObject V
inst✝⁵ : HasEqualizers V
inst✝⁴ : HasImages V
inst✝³ : HasImageMaps V
inst✝² : HasCokernels V
c : ComplexShape ι
C D E : HomologicalComplex V c
W : Type u_2
inst✝¹ : Category.{?u.68820, u_2} W
inst✝ : Abelian W
X : CochainComplex W ℕ
Y : W
f : (CochainComplex.single₀ W).obj Y ⟶ X
hf : QuasiIso f
⊢ HomologicalComplex.Hom.f f 0 ≫ d X 0 1 = 0
[PROOFSTEP]
rw [f.2 0 1 rfl]
[GOAL]
ι : Type u_1
V : Type u
inst✝⁸ : Category.{v, u} V
inst✝⁷ : HasZeroMorphisms V
inst✝⁶ : HasZeroObject V
inst✝⁵ : HasEqualizers V
inst✝⁴ : HasImages V
inst✝³ : HasImageMaps V
inst✝² : HasCokernels V
c : ComplexShape ι
C D E : HomologicalComplex V c
W : Type u_2
inst✝¹ : Category.{?u.68820, u_2} W
inst✝ : Abelian W
X : CochainComplex W ℕ
Y : W
f : (CochainComplex.single₀ W).obj Y ⟶ X
hf : QuasiIso f
⊢ d ((CochainComplex.single₀ W).obj Y) 0 1 ≫ HomologicalComplex.Hom.f f 1 = 0
[PROOFSTEP]
exact zero_comp
[GOAL]
ι : Type u_1
V : Type u
inst✝⁸ : Category.{v, u} V
inst✝⁷ : HasZeroMorphisms V
inst✝⁶ : HasZeroObject V
inst✝⁵ : HasEqualizers V
inst✝⁴ : HasImages V
inst✝³ : HasImageMaps V
inst✝² : HasCokernels V
c : ComplexShape ι
C D E : HomologicalComplex V c
W : Type u_2
inst✝¹ : Category.{u_3, u_2} W
inst✝ : Abelian W
X : CochainComplex W ℕ
Y : W
f : (CochainComplex.single₀ W).obj Y ⟶ X
hf : QuasiIso f
⊢ (fromSingle₀KernelAtZeroIso f).inv =
kernel.lift (d X 0 1) (HomologicalComplex.Hom.f f 0) (_ : HomologicalComplex.Hom.f f 0 ≫ d X 0 1 = 0)
[PROOFSTEP]
ext
[GOAL]
case h
ι : Type u_1
V : Type u
inst✝⁸ : Category.{v, u} V
inst✝⁷ : HasZeroMorphisms V
inst✝⁶ : HasZeroObject V
inst✝⁵ : HasEqualizers V
inst✝⁴ : HasImages V
inst✝³ : HasImageMaps V
inst✝² : HasCokernels V
c : ComplexShape ι
C D E : HomologicalComplex V c
W : Type u_2
inst✝¹ : Category.{u_3, u_2} W
inst✝ : Abelian W
X : CochainComplex W ℕ
Y : W
f : (CochainComplex.single₀ W).obj Y ⟶ X
hf : QuasiIso f
⊢ (fromSingle₀KernelAtZeroIso f).inv ≫ equalizer.ι (d X 0 1) 0 =
kernel.lift (d X 0 1) (HomologicalComplex.Hom.f f 0) (_ : HomologicalComplex.Hom.f f 0 ≫ d X 0 1 = 0) ≫
equalizer.ι (d X 0 1) 0
[PROOFSTEP]
dsimp only [fromSingle₀KernelAtZeroIso, CochainComplex.homologyZeroIso, homologyOfZeroLeft, homology.mapIso,
CochainComplex.homologyFunctor0Single₀, kernel.map]
[GOAL]
case h
ι : Type u_1
V : Type u
inst✝⁸ : Category.{v, u} V
inst✝⁷ : HasZeroMorphisms V
inst✝⁶ : HasZeroObject V
inst✝⁵ : HasEqualizers V
inst✝⁴ : HasImages V
inst✝³ : HasImageMaps V
inst✝² : HasCokernels V
c : ComplexShape ι
C D E : HomologicalComplex V c
W : Type u_2
inst✝¹ : Category.{u_3, u_2} W
inst✝ : Abelian W
X : CochainComplex W ℕ
Y : W
f : (CochainComplex.single₀ W).obj Y ⟶ X
hf : QuasiIso f
⊢ ((Iso.mk
(homology.map (_ : dTo X 0 ≫ dFrom X 0 = 0) (_ : 0 ≫ d X 0 1 = 0)
(Arrow.isoMk (xPrevIsoSelf X CochainComplex.homologyZeroIso.proof_9)
(Iso.refl (Arrow.mk (dTo X 0)).right)).hom
(Arrow.isoMk (Iso.refl (Arrow.mk (dFrom X 0)).left)
(xNextIso X CochainComplex.homologyZeroIso.proof_11)).hom
(_ : 𝟙 (HomologicalComplex.X X 0) = 𝟙 (HomologicalComplex.X X 0)))
(homology.map (_ : 0 ≫ d X 0 1 = 0) (_ : dTo X 0 ≫ dFrom X 0 = 0)
(Arrow.isoMk (xPrevIsoSelf X CochainComplex.homologyZeroIso.proof_9)
(Iso.refl (Arrow.mk (dTo X 0)).right)).inv
(Arrow.isoMk (Iso.refl (Arrow.mk (dFrom X 0)).left)
(xNextIso X CochainComplex.homologyZeroIso.proof_11)).inv
(_ :
(Arrow.isoMk (xPrevIsoSelf X CochainComplex.homologyZeroIso.proof_9)
(Iso.refl (Arrow.mk (dTo X 0)).right)).inv.right =
(Arrow.isoMk (Iso.refl (Arrow.mk (dFrom X 0)).left)
(xNextIso X CochainComplex.homologyZeroIso.proof_11)).inv.left)) ≪≫
(cokernelIsoOfEq (_ : imageToKernel 0 (d X 0 1) (_ : 0 ≫ d X 0 1 = 0) = 0) ≪≫ cokernelZeroIsoTarget) ≪≫
kernelSubobjectIso (d X 0 1)).symm ≪≫
(asIso ((homologyFunctor W (ComplexShape.up ℕ) 0).map f)).symm ≪≫
(NatIso.ofComponents fun X =>
homology.congr
(_ : dTo ((CochainComplex.single₀ W).obj X) 0 ≫ dFrom ((CochainComplex.single₀ W).obj X) 0 = 0)
(_ : 0 ≫ 0 = 0)
(_ : d ((CochainComplex.single₀ W).obj X) (ComplexShape.prev (ComplexShape.up ℕ) 0) 0 = 0)
(_ : 0 = 0) ≪≫
homologyZeroZero).app
Y).inv ≫
equalizer.ι (d X 0 1) 0 =
kernel.lift (d X 0 1) (HomologicalComplex.Hom.f f 0) (_ : HomologicalComplex.Hom.f f 0 ≫ d X 0 1 = 0) ≫
equalizer.ι (d X 0 1) 0
[PROOFSTEP]
simp only [Iso.trans_inv, Iso.app_inv, Iso.symm_inv, Category.assoc, equalizer_as_kernel, kernel.lift_ι]
[GOAL]
case h
ι : Type u_1
V : Type u
inst✝⁸ : Category.{v, u} V
inst✝⁷ : HasZeroMorphisms V
inst✝⁶ : HasZeroObject V
inst✝⁵ : HasEqualizers V
inst✝⁴ : HasImages V
inst✝³ : HasImageMaps V
inst✝² : HasCokernels V
c : ComplexShape ι
C D E : HomologicalComplex V c
W : Type u_2
inst✝¹ : Category.{u_3, u_2} W
inst✝ : Abelian W
X : CochainComplex W ℕ
Y : W
f : (CochainComplex.single₀ W).obj Y ⟶ X
hf : QuasiIso f
⊢ NatTrans.app
(NatIso.ofComponents fun X =>
homology.congr
(_ : dTo ((CochainComplex.single₀ W).obj X) 0 ≫ dFrom ((CochainComplex.single₀ W).obj X) 0 = 0)
(_ : 0 ≫ 0 = 0)
(_ : d ((CochainComplex.single₀ W).obj X) (ComplexShape.prev (ComplexShape.up ℕ) 0) 0 = 0)
(_ : 0 = 0) ≪≫
homologyZeroZero).inv
Y ≫
(asIso ((homologyFunctor W (ComplexShape.up ℕ) 0).map f)).hom ≫
(Iso.mk
(homology.map (_ : dTo X 0 ≫ dFrom X 0 = 0) (_ : 0 ≫ d X 0 1 = 0)
(Arrow.isoMk (xPrevIsoSelf X CochainComplex.homologyZeroIso.proof_9)
(Iso.refl (Arrow.mk (dTo X 0)).right)).hom
(Arrow.isoMk (Iso.refl (Arrow.mk (dFrom X 0)).left)
(xNextIso X CochainComplex.homologyZeroIso.proof_11)).hom
(_ : 𝟙 (HomologicalComplex.X X 0) = 𝟙 (HomologicalComplex.X X 0)))
(homology.map (_ : 0 ≫ d X 0 1 = 0) (_ : dTo X 0 ≫ dFrom X 0 = 0)
(Arrow.isoMk (xPrevIsoSelf X CochainComplex.homologyZeroIso.proof_9)
(Iso.refl (Arrow.mk (dTo X 0)).right)).inv
(Arrow.isoMk (Iso.refl (Arrow.mk (dFrom X 0)).left)
(xNextIso X CochainComplex.homologyZeroIso.proof_11)).inv
(_ :
(Arrow.isoMk (xPrevIsoSelf X CochainComplex.homologyZeroIso.proof_9)
(Iso.refl (Arrow.mk (dTo X 0)).right)).inv.right =
(Arrow.isoMk (Iso.refl (Arrow.mk (dFrom X 0)).left)
(xNextIso X CochainComplex.homologyZeroIso.proof_11)).inv.left)) ≪≫
(cokernelIsoOfEq (_ : imageToKernel 0 (d X 0 1) (_ : 0 ≫ d X 0 1 = 0) = 0) ≪≫ cokernelZeroIsoTarget) ≪≫
kernelSubobjectIso (d X 0 1)).hom ≫
kernel.ι (d X 0 1) =
HomologicalComplex.Hom.f f 0
[PROOFSTEP]
dsimp [asIso]
[GOAL]
case h
ι : Type u_1
V : Type u
inst✝⁸ : Category.{v, u} V
inst✝⁷ : HasZeroMorphisms V
inst✝⁶ : HasZeroObject V
inst✝⁵ : HasEqualizers V
inst✝⁴ : HasImages V
inst✝³ : HasImageMaps V
inst✝² : HasCokernels V
c : ComplexShape ι
C D E : HomologicalComplex V c
W : Type u_2
inst✝¹ : Category.{u_3, u_2} W
inst✝ : Abelian W
X : CochainComplex W ℕ
Y : W
f : (CochainComplex.single₀ W).obj Y ⟶ X
hf : QuasiIso f
⊢ ((inv (Subobject.arrow (kernelSubobject 0)) ≫ homology.π 0 0 (_ : 0 ≫ 0 = 0)) ≫
homology.map (_ : 0 ≫ 0 = 0)
(_ : dTo ((CochainComplex.single₀ W).obj Y) 0 ≫ dFrom ((CochainComplex.single₀ W).obj Y) 0 = 0)
(CommaMorphism.mk (𝟙 (xPrev ((CochainComplex.single₀ W).obj Y) 0)) (𝟙 Y))
(CommaMorphism.mk (𝟙 Y) (𝟙 (xNext ((CochainComplex.single₀ W).obj Y) 0)))
(_ :
(CommaMorphism.mk (𝟙 (Arrow.mk 0).left) (𝟙 (Arrow.mk 0).right)).right =
(CommaMorphism.mk (𝟙 (Arrow.mk 0).left) (𝟙 (Arrow.mk 0).right)).right)) ≫
homology.map (_ : dTo ((CochainComplex.single₀ W).obj Y) 0 ≫ dFrom ((CochainComplex.single₀ W).obj Y) 0 = 0)
(_ : dTo X 0 ≫ dFrom X 0 = 0) (sqTo f 0) (sqFrom f 0) (_ : (sqTo f 0).right = (sqTo f 0).right) ≫
(homology.map (_ : dTo X 0 ≫ dFrom X 0 = 0) (_ : 0 ≫ d X 0 1 = 0)
(Arrow.isoMk (xPrevIsoSelf X CochainComplex.homologyZeroIso.proof_9)
(Iso.refl (HomologicalComplex.X X 0))).hom
(Arrow.isoMk (Iso.refl (HomologicalComplex.X X 0))
(xNextIso X CochainComplex.homologyZeroIso.proof_11)).hom
(_ : 𝟙 (HomologicalComplex.X X 0) = 𝟙 (HomologicalComplex.X X 0)) ≫
((cokernelIsoOfEq (_ : imageToKernel 0 (d X 0 1) (_ : 0 ≫ d X 0 1 = 0) = 0)).hom ≫
cokernelZeroIsoTarget.hom) ≫
(kernelSubobjectIso (d X 0 1)).hom) ≫
kernel.ι (d X 0 1) =
HomologicalComplex.Hom.f f 0
[PROOFSTEP]
simp only [Category.assoc, homology.π_map, cokernelZeroIsoTarget_hom, cokernelIsoOfEq_hom_comp_desc,
kernelSubobject_arrow, homology.π_map_assoc, IsIso.inv_comp_eq]
[GOAL]
case h
ι : Type u_1
V : Type u
inst✝⁸ : Category.{v, u} V
inst✝⁷ : HasZeroMorphisms V
inst✝⁶ : HasZeroObject V
inst✝⁵ : HasEqualizers V
inst✝⁴ : HasImages V
inst✝³ : HasImageMaps V
inst✝² : HasCokernels V
c : ComplexShape ι
C D E : HomologicalComplex V c
W : Type u_2
inst✝¹ : Category.{u_3, u_2} W
inst✝ : Abelian W
X : CochainComplex W ℕ
Y : W
f : (CochainComplex.single₀ W).obj Y ⟶ X
hf : QuasiIso f
⊢ kernelSubobjectMap (CommaMorphism.mk (𝟙 Y) (𝟙 (xNext ((CochainComplex.single₀ W).obj Y) 0))) ≫
kernelSubobjectMap (sqFrom f 0) ≫
kernelSubobjectMap
(Arrow.isoMk (Iso.refl (HomologicalComplex.X X 0))
(xNextIso X CochainComplex.homologyZeroIso.proof_11)).hom ≫
homology.π 0 (d X 0 1) (_ : 0 ≫ d X 0 1 = 0) ≫
cokernel.desc (imageToKernel 0 (d X 0 1) (_ : 0 ≫ d X 0 1 = 0))
(𝟙 (Subobject.underlying.obj (kernelSubobject (d X 0 1))))
(_ :
imageToKernel 0 (d X 0 1) (_ : 0 ≫ d X 0 1 = 0) ≫
𝟙 (Subobject.underlying.obj (kernelSubobject (d X 0 1))) =
0) ≫
Subobject.arrow (kernelSubobject (d X 0 1)) =
Subobject.arrow (kernelSubobject 0) ≫ HomologicalComplex.Hom.f f 0
[PROOFSTEP]
simp [homology.π, kernelSubobjectMap_comp, Iso.refl_hom (X.X 0), Category.comp_id]
[GOAL]
ι : Type u_1
V : Type u
inst✝⁸ : Category.{v, u} V
inst✝⁷ : HasZeroMorphisms V
inst✝⁶ : HasZeroObject V
inst✝⁵ : HasEqualizers V
inst✝⁴ : HasImages V
inst✝³ : HasImageMaps V
inst✝² : HasCokernels V
c : ComplexShape ι
C D E : HomologicalComplex V c
W : Type u_2
inst✝¹ : Category.{u_3, u_2} W
inst✝ : Abelian W
X : CochainComplex W ℕ
Y : W
f : (CochainComplex.single₀ W).obj Y ⟶ X
hf : QuasiIso f
⊢ Mono (HomologicalComplex.Hom.f f 0)
[PROOFSTEP]
constructor
[GOAL]
case right_cancellation
ι : Type u_1
V : Type u
inst✝⁸ : Category.{v, u} V
inst✝⁷ : HasZeroMorphisms V
inst✝⁶ : HasZeroObject V
inst✝⁵ : HasEqualizers V
inst✝⁴ : HasImages V
inst✝³ : HasImageMaps V
inst✝² : HasCokernels V
c : ComplexShape ι
C D E : HomologicalComplex V c
W : Type u_2
inst✝¹ : Category.{u_3, u_2} W
inst✝ : Abelian W
X : CochainComplex W ℕ
Y : W
f : (CochainComplex.single₀ W).obj Y ⟶ X
hf : QuasiIso f
⊢ ∀ {Z : W} (g h : Z ⟶ HomologicalComplex.X ((CochainComplex.single₀ W).obj Y) 0),
g ≫ HomologicalComplex.Hom.f f 0 = h ≫ HomologicalComplex.Hom.f f 0 → g = h
[PROOFSTEP]
intro Z g h Hgh
[GOAL]
case right_cancellation
ι : Type u_1
V : Type u
inst✝⁸ : Category.{v, u} V
inst✝⁷ : HasZeroMorphisms V
inst✝⁶ : HasZeroObject V
inst✝⁵ : HasEqualizers V
inst✝⁴ : HasImages V
inst✝³ : HasImageMaps V
inst✝² : HasCokernels V
c : ComplexShape ι
C D E : HomologicalComplex V c
W : Type u_2
inst✝¹ : Category.{u_3, u_2} W
inst✝ : Abelian W
X : CochainComplex W ℕ
Y : W
f : (CochainComplex.single₀ W).obj Y ⟶ X
hf : QuasiIso f
Z : W
g h : Z ⟶ HomologicalComplex.X ((CochainComplex.single₀ W).obj Y) 0
Hgh : g ≫ HomologicalComplex.Hom.f f 0 = h ≫ HomologicalComplex.Hom.f f 0
⊢ g = h
[PROOFSTEP]
rw [← kernel.lift_ι (X.d 0 1) (f.f 0) (by rw [f.2 0 1 rfl]; exact zero_comp), ← fromSingle₀KernelAtZeroIso_inv_eq] at
Hgh
[GOAL]
ι : Type u_1
V : Type u
inst✝⁸ : Category.{v, u} V
inst✝⁷ : HasZeroMorphisms V
inst✝⁶ : HasZeroObject V
inst✝⁵ : HasEqualizers V
inst✝⁴ : HasImages V
inst✝³ : HasImageMaps V
inst✝² : HasCokernels V
c : ComplexShape ι
C D E : HomologicalComplex V c
W : Type u_2
inst✝¹ : Category.{u_3, u_2} W
inst✝ : Abelian W
X : CochainComplex W ℕ
Y : W
f : (CochainComplex.single₀ W).obj Y ⟶ X
hf : QuasiIso f
Z : W
g h : Z ⟶ HomologicalComplex.X ((CochainComplex.single₀ W).obj Y) 0
Hgh : g ≫ HomologicalComplex.Hom.f f 0 = h ≫ HomologicalComplex.Hom.f f 0
⊢ HomologicalComplex.Hom.f f 0 ≫ d X 0 1 = 0
[PROOFSTEP]
rw [f.2 0 1 rfl]
[GOAL]
ι : Type u_1
V : Type u
inst✝⁸ : Category.{v, u} V
inst✝⁷ : HasZeroMorphisms V
inst✝⁶ : HasZeroObject V
inst✝⁵ : HasEqualizers V
inst✝⁴ : HasImages V
inst✝³ : HasImageMaps V
inst✝² : HasCokernels V
c : ComplexShape ι
C D E : HomologicalComplex V c
W : Type u_2
inst✝¹ : Category.{u_3, u_2} W
inst✝ : Abelian W
X : CochainComplex W ℕ
Y : W
f : (CochainComplex.single₀ W).obj Y ⟶ X
hf : QuasiIso f
Z : W
g h : Z ⟶ HomologicalComplex.X ((CochainComplex.single₀ W).obj Y) 0
Hgh : g ≫ HomologicalComplex.Hom.f f 0 = h ≫ HomologicalComplex.Hom.f f 0
⊢ d ((CochainComplex.single₀ W).obj Y) 0 1 ≫ HomologicalComplex.Hom.f f 1 = 0
[PROOFSTEP]
exact zero_comp
[GOAL]
case right_cancellation
ι : Type u_1
V : Type u
inst✝⁸ : Category.{v, u} V
inst✝⁷ : HasZeroMorphisms V
inst✝⁶ : HasZeroObject V
inst✝⁵ : HasEqualizers V
inst✝⁴ : HasImages V
inst✝³ : HasImageMaps V
inst✝² : HasCokernels V
c : ComplexShape ι
C D E : HomologicalComplex V c
W : Type u_2
inst✝¹ : Category.{u_3, u_2} W
inst✝ : Abelian W
X : CochainComplex W ℕ
Y : W
f : (CochainComplex.single₀ W).obj Y ⟶ X
hf : QuasiIso f
Z : W
g h : Z ⟶ HomologicalComplex.X ((CochainComplex.single₀ W).obj Y) 0
Hgh✝ : g ≫ HomologicalComplex.Hom.f f 0 = h ≫ HomologicalComplex.Hom.f f 0
Hgh :
g ≫ (fromSingle₀KernelAtZeroIso f).inv ≫ kernel.ι (d X 0 1) =
h ≫ (fromSingle₀KernelAtZeroIso f).inv ≫ kernel.ι (d X 0 1)
⊢ g = h
[PROOFSTEP]
rw [(@cancel_mono _ _ _ _ _ _ (mono_comp _ _) _ _).1 Hgh]
[GOAL]
ι : Type u_1
V : Type u
inst✝⁸ : Category.{v, u} V
inst✝⁷ : HasZeroMorphisms V
inst✝⁶ : HasZeroObject V
inst✝⁵ : HasEqualizers V
inst✝⁴ : HasImages V
inst✝³ : HasImageMaps V
inst✝² : HasCokernels V
c : ComplexShape ι
C D E : HomologicalComplex V c
W : Type u_2
inst✝¹ : Category.{u_3, u_2} W
inst✝ : Abelian W
X : CochainComplex W ℕ
Y : W
f : (CochainComplex.single₀ W).obj Y ⟶ X
hf : QuasiIso f
⊢ Exact (HomologicalComplex.Hom.f f 0) (d X 0 1)
[PROOFSTEP]
rw [Preadditive.exact_iff_homology_zero]
[GOAL]
ι : Type u_1
V : Type u
inst✝⁸ : Category.{v, u} V
inst✝⁷ : HasZeroMorphisms V
inst✝⁶ : HasZeroObject V
inst✝⁵ : HasEqualizers V
inst✝⁴ : HasImages V
inst✝³ : HasImageMaps V
inst✝² : HasCokernels V
c : ComplexShape ι
C D E : HomologicalComplex V c
W : Type u_2
inst✝¹ : Category.{u_3, u_2} W
inst✝ : Abelian W
X : CochainComplex W ℕ
Y : W
f : (CochainComplex.single₀ W).obj Y ⟶ X
hf : QuasiIso f
⊢ ∃ w, Nonempty (_root_.homology (HomologicalComplex.Hom.f f 0) (d X 0 1) w ≅ 0)
[PROOFSTEP]
have h : f.f 0 ≫ X.d 0 1 = 0 := by simp only [HomologicalComplex.Hom.comm, CochainComplex.single₀_obj_X_d, zero_comp]
[GOAL]
ι : Type u_1
V : Type u
inst✝⁸ : Category.{v, u} V
inst✝⁷ : HasZeroMorphisms V
inst✝⁶ : HasZeroObject V
inst✝⁵ : HasEqualizers V
inst✝⁴ : HasImages V
inst✝³ : HasImageMaps V
inst✝² : HasCokernels V
c : ComplexShape ι
C D E : HomologicalComplex V c
W : Type u_2
inst✝¹ : Category.{u_3, u_2} W
inst✝ : Abelian W
X : CochainComplex W ℕ
Y : W
f : (CochainComplex.single₀ W).obj Y ⟶ X
hf : QuasiIso f
⊢ HomologicalComplex.Hom.f f 0 ≫ d X 0 1 = 0
[PROOFSTEP]
simp only [HomologicalComplex.Hom.comm, CochainComplex.single₀_obj_X_d, zero_comp]
[GOAL]
ι : Type u_1
V : Type u
inst✝⁸ : Category.{v, u} V
inst✝⁷ : HasZeroMorphisms V
inst✝⁶ : HasZeroObject V
inst✝⁵ : HasEqualizers V
inst✝⁴ : HasImages V
inst✝³ : HasImageMaps V
inst✝² : HasCokernels V
c : ComplexShape ι
C D E : HomologicalComplex V c
W : Type u_2
inst✝¹ : Category.{u_3, u_2} W
inst✝ : Abelian W
X : CochainComplex W ℕ
Y : W
f : (CochainComplex.single₀ W).obj Y ⟶ X
hf : QuasiIso f
h : HomologicalComplex.Hom.f f 0 ≫ d X 0 1 = 0
⊢ ∃ w, Nonempty (_root_.homology (HomologicalComplex.Hom.f f 0) (d X 0 1) w ≅ 0)
[PROOFSTEP]
refine' ⟨h, Nonempty.intro (homologyIsoCokernelLift _ _ _ ≪≫ _)⟩
[GOAL]
ι : Type u_1
V : Type u
inst✝⁸ : Category.{v, u} V
inst✝⁷ : HasZeroMorphisms V
inst✝⁶ : HasZeroObject V
inst✝⁵ : HasEqualizers V
inst✝⁴ : HasImages V
inst✝³ : HasImageMaps V
inst✝² : HasCokernels V
c : ComplexShape ι
C D E : HomologicalComplex V c
W : Type u_2
inst✝¹ : Category.{u_3, u_2} W
inst✝ : Abelian W
X : CochainComplex W ℕ
Y : W
f : (CochainComplex.single₀ W).obj Y ⟶ X
hf : QuasiIso f
h : HomologicalComplex.Hom.f f 0 ≫ d X 0 1 = 0
⊢ cokernel (kernel.lift (d X 0 1) (HomologicalComplex.Hom.f f 0) h) ≅ 0
[PROOFSTEP]
suffices IsIso (kernel.lift (X.d 0 1) (f.f 0) h) by apply cokernel.ofEpi
[GOAL]
ι : Type u_1
V : Type u
inst✝⁸ : Category.{v, u} V
inst✝⁷ : HasZeroMorphisms V
inst✝⁶ : HasZeroObject V
inst✝⁵ : HasEqualizers V
inst✝⁴ : HasImages V
inst✝³ : HasImageMaps V
inst✝² : HasCokernels V
c : ComplexShape ι
C D E : HomologicalComplex V c
W : Type u_2
inst✝¹ : Category.{u_3, u_2} W
inst✝ : Abelian W
X : CochainComplex W ℕ
Y : W
f : (CochainComplex.single₀ W).obj Y ⟶ X
hf : QuasiIso f
h : HomologicalComplex.Hom.f f 0 ≫ d X 0 1 = 0
this : IsIso (kernel.lift (d X 0 1) (HomologicalComplex.Hom.f f 0) h)
⊢ cokernel (kernel.lift (d X 0 1) (HomologicalComplex.Hom.f f 0) h) ≅ 0
[PROOFSTEP]
apply cokernel.ofEpi
[GOAL]
ι : Type u_1
V : Type u
inst✝⁸ : Category.{v, u} V
inst✝⁷ : HasZeroMorphisms V
inst✝⁶ : HasZeroObject V
inst✝⁵ : HasEqualizers V
inst✝⁴ : HasImages V
inst✝³ : HasImageMaps V
inst✝² : HasCokernels V
c : ComplexShape ι
C D E : HomologicalComplex V c
W : Type u_2
inst✝¹ : Category.{u_3, u_2} W
inst✝ : Abelian W
X : CochainComplex W ℕ
Y : W
f : (CochainComplex.single₀ W).obj Y ⟶ X
hf : QuasiIso f
h : HomologicalComplex.Hom.f f 0 ≫ d X 0 1 = 0
⊢ IsIso (kernel.lift (d X 0 1) (HomologicalComplex.Hom.f f 0) h)
[PROOFSTEP]
rw [← fromSingle₀KernelAtZeroIso_inv_eq f]
[GOAL]
ι : Type u_1
V : Type u
inst✝⁸ : Category.{v, u} V
inst✝⁷ : HasZeroMorphisms V
inst✝⁶ : HasZeroObject V
inst✝⁵ : HasEqualizers V
inst✝⁴ : HasImages V
inst✝³ : HasImageMaps V
inst✝² : HasCokernels V
c : ComplexShape ι
C D E : HomologicalComplex V c
W : Type u_2
inst✝¹ : Category.{u_3, u_2} W
inst✝ : Abelian W
X : CochainComplex W ℕ
Y : W
f : (CochainComplex.single₀ W).obj Y ⟶ X
hf : QuasiIso f
h : HomologicalComplex.Hom.f f 0 ≫ d X 0 1 = 0
⊢ IsIso (fromSingle₀KernelAtZeroIso f).inv
[PROOFSTEP]
infer_instance
[GOAL]
ι : Type u_1
V : Type u
inst✝¹⁴ : Category.{v, u} V
inst✝¹³ : HasZeroMorphisms V
inst✝¹² : HasZeroObject V
inst✝¹¹ : HasEqualizers V
inst✝¹⁰ : HasImages V
inst✝⁹ : HasImageMaps V
inst✝⁸ : HasCokernels V
c : ComplexShape ι
C✝ D✝ E : HomologicalComplex V c
A : Type u_2
inst✝⁷ : Category.{u_4, u_2} A
inst✝⁶ : Abelian A
B : Type u_3
inst✝⁵ : Category.{u_5, u_3} B
inst✝⁴ : Abelian B
F : A ⥤ B
inst✝³ : Additive F
inst✝² : PreservesFiniteLimits F
inst✝¹ : PreservesFiniteColimits F
inst✝ : Faithful F
C D : HomologicalComplex A c
f : C ⟶ D
hf : QuasiIso ((mapHomologicalComplex F c).map f)
i : ι
⊢ IsIso (F.map ((homologyFunctor A c i).map f))
[PROOFSTEP]
rw [← Functor.comp_map, ← NatIso.naturality_2 (F.homologyFunctorIso i) f, Functor.comp_map]
[GOAL]
ι : Type u_1
V : Type u
inst✝¹⁴ : Category.{v, u} V
inst✝¹³ : HasZeroMorphisms V
inst✝¹² : HasZeroObject V
inst✝¹¹ : HasEqualizers V
inst✝¹⁰ : HasImages V
inst✝⁹ : HasImageMaps V
inst✝⁸ : HasCokernels V
c : ComplexShape ι
C✝ D✝ E : HomologicalComplex V c
A : Type u_2
inst✝⁷ : Category.{u_4, u_2} A
inst✝⁶ : Abelian A
B : Type u_3
inst✝⁵ : Category.{u_5, u_3} B
inst✝⁴ : Abelian B
F : A ⥤ B
inst✝³ : Additive F
inst✝² : PreservesFiniteLimits F
inst✝¹ : PreservesFiniteColimits F
inst✝ : Faithful F
C D : HomologicalComplex A c
f : C ⟶ D
hf : QuasiIso ((mapHomologicalComplex F c).map f)
i : ι
⊢ IsIso
(NatTrans.app (homologyFunctorIso F i).hom C ≫
(homologyFunctor B c i).map ((mapHomologicalComplex F c).map f) ≫ NatTrans.app (homologyFunctorIso F i).inv D)
[PROOFSTEP]
infer_instance
|
/-
Copyright (c) 2022 Sébastien Gouëzel. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Sébastien Gouëzel
-/
import analysis.inner_product_space.orientation
import measure_theory.measure.haar_lebesgue
/-!
# Volume forms and measures on inner product spaces
A volume form induces a Lebesgue measure on general finite-dimensional real vector spaces. In this
file, we discuss the specific situation of inner product spaces, where an orientation gives
rise to a canonical volume form. We show that the measure coming from this volume form gives
measure `1` to the parallelepiped spanned by any orthonormal basis, and that it coincides with
the canonical `volume` from the `measure_space` instance.
-/
open finite_dimensional measure_theory measure_theory.measure set
variables {ι F : Type*}
variables [fintype ι] [normed_add_comm_group F] [inner_product_space ℝ F] [finite_dimensional ℝ F]
[measurable_space F] [borel_space F]
section
variables {m n : ℕ} [_i : fact (finrank ℝ F = n)]
include _i
/-- The volume form coming from an orientation in an inner product space gives measure `1` to the
parallelepiped associated to any orthonormal basis. This is a rephrasing of
`abs_volume_form_apply_of_orthonormal` in terms of measures. -/
lemma orientation.measure_orthonormal_basis
(o : orientation ℝ F (fin n)) (b : orthonormal_basis ι ℝ F) :
o.volume_form.measure (parallelepiped b) = 1 :=
begin
have e : ι ≃ fin n,
{ refine fintype.equiv_fin_of_card_eq _,
rw [← _i.out, finrank_eq_card_basis b.to_basis] },
have A : ⇑b = (b.reindex e) ∘ e,
{ ext x,
simp only [orthonormal_basis.coe_reindex, function.comp_app, equiv.symm_apply_apply] },
rw [A, parallelepiped_comp_equiv, alternating_map.measure_parallelepiped,
o.abs_volume_form_apply_of_orthonormal, ennreal.of_real_one],
end
/-- In an oriented inner product space, the measure coming from the canonical volume form
associated to an orientation coincides with the volume. -/
lemma orientation.measure_eq_volume (o : orientation ℝ F (fin n)) :
o.volume_form.measure = volume :=
begin
have A : o.volume_form.measure ((std_orthonormal_basis ℝ F).to_basis.parallelepiped) = 1,
from orientation.measure_orthonormal_basis o (std_orthonormal_basis ℝ F),
rw [add_haar_measure_unique o.volume_form.measure
((std_orthonormal_basis ℝ F).to_basis.parallelepiped), A, one_smul],
simp only [volume, basis.add_haar],
end
end
/-- The volume measure in a finite-dimensional inner product space gives measure `1` to the
parallelepiped spanned by any orthonormal basis. -/
lemma orthonormal_basis.volume_parallelepiped (b : orthonormal_basis ι ℝ F) :
volume (parallelepiped b) = 1 :=
begin
haveI : fact (finrank ℝ F = finrank ℝ F) := ⟨rfl⟩,
let o := (std_orthonormal_basis ℝ F).to_basis.orientation,
rw ← o.measure_eq_volume,
exact o.measure_orthonormal_basis b,
end
|
lemma (in landau_symbol) mult_in_1: assumes "f \<in> L F (\<lambda>_. 1)" "g \<in> L F (\<lambda>_. 1)" shows "(\<lambda>x. f x * g x) \<in> L F (\<lambda>_. 1)" |
# Tutorial 4 - Pressure vs Temperature
Surfinpy has the functionality to generate phase diagrams as a function of pressure vs temperature based upon the methodology used in Molinari et al. (J. Phys. Chem. C 116, 12, 7073-7082) according to
\begin{align}
\gamma_{adsorbed, T, P} & = \gamma_{bare} + ( C ( E_{ads, T} - RTln(\frac{p}{p^o})
\end{align}
where $\gamma_{adsorbed, T, p}$ is the surface energy of the surface with adsorbed species at temperature (T) and pressure (P), $\gamma_{bare}$ is the suface energy of the bare surface, C is the coverage of adsorbed species, $E_{ads}$ is the adsorption energy,
\begin{align}
E_{ads, T} & = E_{slab, adsorbant} - (E_{slab, bare} + n_{H_2O} E_{H_2O, T}) / n_{H_2O}
\end{align}
where $E_{slab, adsorbant}$ is the energy of the surface and the adsorbed species, $n_{H_2O}$ is he number of adsorbed species,
\begin{align}
E_{H_2O, (T)} & = E_{H_2O, (g)} - TS_{(T)}
\end{align}
where $S_{(T)}$ is the experimental entropy of gaseous water in the standard state.
So let's look at this in action. The module used is called p_vs_t
```python
import matplotlib.pyplot as plt
from surfinpy import utils as ut
from surfinpy import p_vs_t
from surfinpy import data
```
There are a number of user inputs that are required, the inputs are similiar to mu_vs_mu but with some small differences. First we need the energy from DFT of the adsorbant (eV), the energy of the surface ($j_m^2$) and the data for each surface. For clarity, in the `surfinpy.data.DataSet` objects for each surface it is always assumed that the adsorbing species is 'Y'.
```python
adsorbant = -14.00
```
```python
SE = 1.40
```
```python
stoich = data.DataSet(cation = 24, x = 48, y = 0, area = 60.22, energy = -575.00, label = 'Bare')
H2O = data.DataSet(cation = 24, x = 48, y = 2, area = 60.22, energy = -605.00, label = '1 Water')
H2O_2 = data.DataSet(cation = 24, x = 48, y = 8, area = 60.22, energy = -695.00, label = '2 Water')
```
```python
data = [H2O, H2O_2]
```
The coverage of the adsorbing species (in this case water) is also needed. surfinpy has a built in function within the utils module to do this for you, it takes the number of adsorbing species and the surface area and calculates the coverage for you.
```python
cov = ut.calculate_coverage(data)
```
We need the thermochemical data for the adsorbed species in order to make the DFT energy of our adsorbing species a temperature dependent term. This data has been taken directly from the NIST Janaf database and has been cut to just include the temperature and S values within the temperature range that interests us (In this case 0 - 1000 K).
```python
thermochem = ut.read_nist("H2O.txt")
```
Now we can generate our phase diagram .
```python
system = p_vs_t.calculate(stoich, data, SE, adsorbant, thermochem, coverage=cov)
ax = system.plot(colourmap='Spectral', figsize=(6, 4), ylabel='$log_{10} (P_{H_2O})$')
plt.savefig("../../../docs/source/Figures/Surfaces_7.png", dpi=600)
plt.show()
```
```python
```
|
{- Byzantine Fault Tolerant Consensus Verification in Agda, version 0.9.
Copyright (c) 2020, 2021, Oracle and/or its affiliates.
Licensed under the Universal Permissive License v 1.0 as shown at https://opensource.oracle.com/licenses/upl
-}
open import LibraBFT.Prelude
open import LibraBFT.Base.PKCS
-- This module defines the types used to define a SystemModel.
module LibraBFT.Yasm.Base
(ℓ-EC : Level)
(EpochConfig : Set ℓ-EC)
(epochId : EpochConfig → ℕ)
(authorsN : EpochConfig → ℕ)
where
EpochId : Set
EpochId = ℕ
Member : EpochConfig → Set
Member = Fin ∘ authorsN
record EpochConfigFor (eid : EpochId) : Set ℓ-EC where
field
epochConfig : EpochConfig
forEpochId : epochId epochConfig ≡ eid
-- Our system is configured through a value of type
-- SystemParameters where we specify:
record SystemParameters : Set (ℓ+1 0ℓ ℓ⊔ ℓ-EC) where
constructor mkSysParms
field
PeerId : Set
_≟PeerId_ : ∀ (p₁ p₂ : PeerId) → Dec (p₁ ≡ p₂)
PeerState : Set
initPS : PeerState -- Represents an uninitialised PeerState, about which we know nothing whatsoever
Msg : Set
Part : Set -- Types of interest that can be represented in Msgs
-- The messages must be able to carry signatures
instance Part-sig : WithSig Part
-- A relation specifying what Parts are included in a Msg.
_⊂Msg_ : Part → Msg → Set
-- Finally, messages must carry an epoch id and might have an author
part-epoch : Part → EpochId
-- Initializes a potentially-empty state with an EpochConfig
init : PeerId → EpochConfig → PeerState → PeerState × List Msg
-- Handles a message on a previously initialized peer.
handle : PeerId → Msg → PeerState → PeerState × List Msg
-- TODO-3?: So far, handlers only produce messages to be sent.
-- It would be reasonable to generalize this to something like
--
-- data Action = Send Msg | Crash CrashMsg | Log LogMsg | ...
--
-- on the system level, and have the handlers return List Action,
-- rather than just ListMsg. For example, if an assertion fires, this
-- could "kill the process" and make it not send any messages in the future.
-- We could also then prove that the handlers do not crash, certain
-- messages are logged under certain circumstances, etc.
--
-- Alternatively, we could keep this outside the system model by
-- defining an application-specific peerState type, for example:
--
-- > libraHandle : Msg → Status × Log × LState → Status × LState × List Action
-- > libraHandle _ (Crashed , l , s) = Crashed , s , [] -- i.e., crashed peers never send messages
-- >
-- > handle = filter isSend ∘ libraHandle
|
------------------------------------------------------------------------------
-- Distributive laws on a binary operation: Lemma 3
------------------------------------------------------------------------------
{-# OPTIONS --exact-split #-}
{-# OPTIONS --no-sized-types #-}
{-# OPTIONS --no-universe-polymorphism #-}
{-# OPTIONS --without-K #-}
module DistributiveLaws.Lemma3-ATP where
open import DistributiveLaws.Base
------------------------------------------------------------------------------
postulate lemma₃ : ∀ x y z → (x · y) · (z · z) ≡ (x · y) · z
{-# ATP prove lemma₃ #-}
|
Outside of his association with the Bruins , Ross also helped to improve the game . He created a style of hockey puck still used today , and advocated an improved style of goal nets , a change that lasted forty years . In 1947 Ross donated the Art Ross Trophy , awarded to the leading scorer of the NHL regular season . Ross was inducted into the Hockey Hall of Fame in 1949 .
|
Our local Albertson’s, in Oregon, removed self service checkouts during the summer. Self service checkouts have been seen as a semi-automated way of reducing labor costs, and for some customers, a faster checkout experience. However, Albertson’s grocery is now removing self service checkouts from additional (but not all) stores in Southern California.
Previous story Will automation lead to better jobs, rather than fewer? Some say yes. |
/-
Copyright (c) 2014 Microsoft Corporation. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Floris van Doorn, Leonardo de Moura
-/
import Mathlib.PrePort
import Mathlib.Lean3Lib.init.logic
universes u
namespace Mathlib
namespace nat
notation:1024 "ℕ" => Mathlib.nat
inductive less_than_or_equal (a : ℕ) : ℕ → Prop
where
| refl : less_than_or_equal a a
| step : ∀ {b : ℕ}, less_than_or_equal a b → less_than_or_equal a (Nat.succ b)
protected instance has_le : HasLessEq ℕ :=
{ LessEq := less_than_or_equal }
protected def le (n : ℕ) (m : ℕ) :=
less_than_or_equal n m
protected def lt (n : ℕ) (m : ℕ) :=
less_than_or_equal (Nat.succ n) m
protected instance has_lt : HasLess ℕ :=
{ Less := nat.lt }
def pred : ℕ → ℕ :=
Nat.pred
protected def sub : ℕ → ℕ → ℕ :=
Nat.sub
protected def mul : ℕ → ℕ → ℕ :=
Nat.mul
protected instance has_sub : Sub ℕ :=
{ sub := Nat.sub }
protected instance has_mul : Mul ℕ :=
{ mul := Nat.mul }
-- defeq to the instance provided by comm_semiring
protected instance has_dvd : has_dvd ℕ :=
has_dvd.mk fun (a b : ℕ) => ∃ (c : ℕ), b = a * c
protected instance decidable_eq : DecidableEq ℕ :=
sorry
def repeat {α : Type u} (f : ℕ → α → α) : ℕ → α → α :=
sorry
protected instance inhabited : Inhabited ℕ :=
{ default := 0 }
@[simp] theorem nat_zero_eq_zero : 0 = 0 :=
rfl
/- properties of inequality -/
protected def le_refl (a : ℕ) : a ≤ a :=
less_than_or_equal.refl
theorem le_succ (n : ℕ) : n ≤ Nat.succ n :=
less_than_or_equal.step (nat.le_refl n)
theorem succ_le_succ {n : ℕ} {m : ℕ} : n ≤ m → Nat.succ n ≤ Nat.succ m :=
fun (h : n ≤ m) =>
less_than_or_equal._oldrec (nat.le_refl (Nat.succ n))
(fun (a : ℕ) (b : less_than_or_equal n a) => less_than_or_equal.step) h
theorem zero_le (n : ℕ) : 0 ≤ n := sorry
theorem zero_lt_succ (n : ℕ) : 0 < Nat.succ n :=
succ_le_succ (zero_le n)
def succ_pos (n : ℕ) : 0 < Nat.succ n :=
zero_lt_succ
theorem not_succ_le_zero (n : ℕ) : Nat.succ n ≤ 0 → False := sorry
theorem not_lt_zero (a : ℕ) : ¬a < 0 :=
not_succ_le_zero a
theorem pred_le_pred {n : ℕ} {m : ℕ} : n ≤ m → Nat.pred n ≤ Nat.pred m := sorry
theorem le_of_succ_le_succ {n : ℕ} {m : ℕ} : Nat.succ n ≤ Nat.succ m → n ≤ m :=
pred_le_pred
protected instance decidable_le (a : ℕ) (b : ℕ) : Decidable (a ≤ b) :=
sorry
protected instance decidable_lt (a : ℕ) (b : ℕ) : Decidable (a < b) :=
nat.decidable_le (Nat.succ a) b
protected theorem eq_or_lt_of_le {a : ℕ} {b : ℕ} (h : a ≤ b) : a = b ∨ a < b :=
less_than_or_equal.cases_on h (Or.inl rfl) fun (n : ℕ) (h : less_than_or_equal a n) => Or.inr (succ_le_succ h)
theorem lt_succ_of_le {a : ℕ} {b : ℕ} : a ≤ b → a < Nat.succ b :=
succ_le_succ
@[simp] theorem succ_sub_succ_eq_sub (a : ℕ) (b : ℕ) : Nat.succ a - Nat.succ b = a - b :=
nat.rec_on b ((fun (this : Nat.succ a - 1 = a - 0) => this) (Eq.refl (Nat.succ a - 1)))
fun (b : ℕ) => congr_arg Nat.pred
theorem not_succ_le_self (n : ℕ) : ¬Nat.succ n ≤ n :=
Nat.rec (not_succ_le_zero 0)
(fun (a : ℕ) (b : ¬Nat.succ a ≤ a) (c : Nat.succ (Nat.succ a) ≤ Nat.succ a) => b (le_of_succ_le_succ c)) n
protected theorem lt_irrefl (n : ℕ) : ¬n < n :=
not_succ_le_self n
protected theorem le_trans {n : ℕ} {m : ℕ} {k : ℕ} (h1 : n ≤ m) : m ≤ k → n ≤ k :=
less_than_or_equal._oldrec h1 fun (p : ℕ) (h2 : less_than_or_equal m p) => less_than_or_equal.step
theorem pred_le (n : ℕ) : Nat.pred n ≤ n :=
nat.cases_on n (idRhs (less_than_or_equal (Nat.pred 0) (Nat.pred 0)) less_than_or_equal.refl)
fun (n : ℕ) =>
idRhs (less_than_or_equal (Nat.pred (Nat.succ n)) (Nat.succ n)) (less_than_or_equal.step less_than_or_equal.refl)
theorem pred_lt {n : ℕ} : n ≠ 0 → Nat.pred n < n := sorry
theorem sub_le (a : ℕ) (b : ℕ) : a - b ≤ a :=
nat.rec_on b (nat.le_refl (a - 0)) fun (b₁ : ℕ) => nat.le_trans (pred_le (a - b₁))
theorem sub_lt {a : ℕ} {b : ℕ} : 0 < a → 0 < b → a - b < a := sorry
protected theorem lt_of_lt_of_le {n : ℕ} {m : ℕ} {k : ℕ} : n < m → m ≤ k → n < k :=
nat.le_trans
/- Basic nat.add lemmas -/
protected theorem zero_add (n : ℕ) : 0 + n = n := sorry
theorem succ_add (n : ℕ) (m : ℕ) : Nat.succ n + m = Nat.succ (n + m) := sorry
theorem add_succ (n : ℕ) (m : ℕ) : n + Nat.succ m = Nat.succ (n + m) :=
rfl
protected theorem add_zero (n : ℕ) : n + 0 = n :=
rfl
theorem add_one (n : ℕ) : n + 1 = Nat.succ n :=
rfl
theorem succ_eq_add_one (n : ℕ) : Nat.succ n = n + 1 :=
rfl
/- Basic lemmas for comparing numerals -/
protected theorem bit0_succ_eq (n : ℕ) : bit0 (Nat.succ n) = Nat.succ (Nat.succ (bit0 n)) :=
(fun (this : Nat.succ (Nat.succ n + n) = Nat.succ (Nat.succ (n + n))) => this) (congr_arg Nat.succ (succ_add n n))
protected theorem zero_lt_bit0 {n : ℕ} : n ≠ 0 → 0 < bit0 n := sorry
protected theorem zero_lt_bit1 (n : ℕ) : 0 < bit1 n :=
zero_lt_succ (bit0 n)
protected theorem bit0_ne_zero {n : ℕ} : n ≠ 0 → bit0 n ≠ 0 := sorry
protected theorem bit1_ne_zero (n : ℕ) : bit1 n ≠ 0 :=
(fun (this : Nat.succ (n + n) ≠ 0) => this) fun (h : Nat.succ (n + n) = 0) => nat.no_confusion h
|
import System
import Data.List.Views
data Palindrome : List a -> Type where
PNil : Palindrome []
POne : Palindrome [x]
PRec : Palindrome xs -> Palindrome (x :: xs ++ [x])
total
palindrome : DecEq a => (xs : List a) -> Maybe (Palindrome xs)
palindrome xs with (vList xs)
palindrome [] | VNil = Just PNil
palindrome [x] | VOne = Just POne
palindrome (x :: (ys ++ [y])) | (VCons z) with (decEq x y)
palindrome (y :: (ys ++ [y])) | (VCons urec) | (Yes Refl)
= case palindrome ys | urec of
Nothing => Nothing
Just x => Just (PRec x)
palindrome (x :: (ys ++ [y])) | (VCons z) | (No contra)
= Nothing
palinBool : DecEq a => List a -> Bool
palinBool xs = case palindrome xs of
Nothing => False
Just _ => True
main : IO ()
main = do getArgs >>= printLn
[_, num] <- getArgs
let list = replicate (cast num) 'a'
printLn (palinBool list)
|
! semantically bad, but should be parsable
interface bad9 ! this interface is invalid !
! despite the fact that it is unambiguous !
subroutine s9a(x)
real :: x
end subroutine s9a
subroutine s9b(x)
interface
function x(a)
real :: x,a
end function x
end interface
end subroutine s9b
subroutine s9c(x)
interface
function x(a)
real :: x
integer :: a
end function x
end interface
end subroutine s9c
end interface bad9
end
|
(* Copyright (c) 2012-2015, Robbert Krebbers. *)
(* This file is distributed under the terms of the BSD license. *)
Require Export types integer_operations.
Local Open Scope ctype_scope.
Local Unset Elimination Schemes.
Class Env (K : iType) : iType := {
env_type_env :> IntEnv K;
size_of : env K → type K → nat;
align_of : env K → type K → nat;
field_sizes : env K → list (type K) → list nat;
alloc_can_fail : bool
}.
Arguments size_of _ _ _ _ : simpl never.
Arguments align_of _ _ _ _ : simpl never.
Arguments field_sizes _ _ _ _ : simpl never.
Definition ptr_size_of `{Env K} (Γ : env K) (τp : ptr_type K) : nat :=
match τp with TType τ => size_of Γ τ | _ => 1 end.
Definition offset_of `{Env K} (Γ : env K) (τs : list (type K))
(i : nat) : nat := sum_list $ take i $ field_sizes Γ τs.
Definition bit_size_of `{Env K} (Γ : env K)
(τ : type K) : nat := size_of Γ τ * char_bits.
Definition bit_align_of `{Env K} (Γ : env K)
(τ : type K) : nat := align_of Γ τ * char_bits.
Definition ptr_bit_size_of `{Env K} (Γ : env K) (τp : ptr_type K) : nat :=
match τp with TType τ => bit_size_of Γ τ | _ => char_bits end.
Definition field_bit_sizes `{Env K} (Γ : env K)
(τs : list (type K)) : list nat :=
(λ sz, sz * char_bits) <$> field_sizes Γ τs.
Definition field_bit_padding `{Env K}
(Γ : env K) (τs : list (type K)) : list nat :=
zip_with (λ sz τ, sz - bit_size_of Γ τ) (field_bit_sizes Γ τs) τs.
Definition bit_offset_of `{Env K}
(Γ : env K) (τs : list (type K)) (i : nat) : nat :=
sum_list $ take i $ field_bit_sizes Γ τs.
Class EnvSpec (K : iType) `{Env K} := {
int_env_spec :> IntEnvSpec K;
size_of_ptr_ne_0 Γ τp : size_of Γ (τp.*) ≠ 0;
size_of_int Γ τi : size_of Γ (intT τi) = rank_size (rank τi);
size_of_void_ne_0 Γ : size_of Γ voidT ≠ 0;
size_of_array Γ τ n : size_of Γ (τ.[n]) = n * size_of Γ τ;
size_of_struct Γ t τs :
✓ Γ → Γ !! t = Some τs →
size_of Γ (structT t) = sum_list (field_sizes Γ τs);
size_of_fields Γ τs :
✓ Γ → Forall2 (λ τ sz, size_of Γ τ ≤ sz) τs (field_sizes Γ τs);
size_of_union Γ t τs :
✓ Γ → Γ !! t = Some τs →
Forall (λ τ, size_of Γ τ ≤ size_of Γ (unionT t)) τs;
align_of_array Γ τ n : (align_of Γ τ | align_of Γ (τ.[n]));
align_of_compound Γ c t τs i τ :
✓ Γ → Γ !! t = Some τs → τs !! i = Some τ →
(align_of Γ τ | align_of Γ (compoundT{c} t));
align_of_divide Γ τ :
✓ Γ → ✓{Γ} τ → (align_of Γ τ | size_of Γ τ);
align_of_offset_of Γ τs i τ :
✓ Γ → ✓{Γ}* τs → τs !! i = Some τ → (align_of Γ τ | offset_of Γ τs i);
size_of_weaken Γ1 Γ2 τ :
✓ Γ1 → ✓{Γ1} τ → Γ1 ⊆ Γ2 → size_of Γ1 τ = size_of Γ2 τ;
align_of_weaken Γ1 Γ2 τ :
✓ Γ1 → ✓{Γ1} τ → Γ1 ⊆ Γ2 → align_of Γ1 τ = align_of Γ2 τ;
fields_sizes_weaken Γ1 Γ2 τs :
✓ Γ1 → ✓{Γ1}* τs → Γ1 ⊆ Γ2 → field_sizes Γ1 τs = field_sizes Γ2 τs
}.
Section env_spec.
Context `{EnvSpec K}.
Implicit Types τ σ : type K.
Implicit Types τs σs : list (type K).
Implicit Types Γ : env K.
Lemma size_of_char Γ si : size_of Γ (intT (IntType si char_rank)) = 1.
Proof. rewrite size_of_int. by apply rank_size_char. Qed.
Lemma field_sizes_length Γ τs : ✓ Γ → length (field_sizes Γ τs) = length τs.
Proof. symmetry. by eapply Forall2_length, size_of_fields. Qed.
Lemma field_sizes_nil Γ : ✓ Γ → field_sizes Γ [] = [].
Proof. intros. apply nil_length_inv. by rewrite field_sizes_length. Qed.
Lemma size_of_union_lookup Γ t τs i τ :
✓ Γ → Γ !! t = Some τs → τs !! i = Some τ →
size_of Γ τ ≤ size_of Γ (unionT t).
Proof.
intros. assert (Forall (λ τ, size_of Γ τ ≤ size_of Γ (unionT t)) τs) as Hτs
by eauto using size_of_union; rewrite Forall_lookup in Hτs. eauto.
Qed.
Lemma size_of_struct_lookup Γ t τs i τ :
✓ Γ → Γ !! t = Some τs → τs !! i = Some τ →
size_of Γ τ ≤ size_of Γ (structT t).
Proof.
intros HΓ Ht Hτs. erewrite size_of_struct by eauto. clear Ht. revert i Hτs.
induction (size_of_fields Γ τs HΓ) as [|σ sz σs szs]; intros [|?] ?;
simplify_equality'; auto with lia.
transitivity (sum_list szs); eauto with lia.
Qed.
Lemma size_of_union_singleton Γ t τ :
✓ Γ → Γ !! t = Some [τ] → size_of Γ τ ≤ size_of Γ (unionT t).
Proof. intros. by apply (size_of_union_lookup Γ t [τ] 0). Qed.
Lemma sizes_of_weaken P Γ1 Γ2 τs :
✓ Γ1 → ✓{Γ1}* τs → Γ1 ⊆ Γ2 →
Forall (λ τ', P (size_of Γ1 τ')) τs → Forall (λ τ', P (size_of Γ2 τ')) τs.
Proof.
induction 4; decompose_Forall_hyps; constructor; simpl;
erewrite <-1?size_of_weaken by eauto; eauto.
Qed.
Lemma bit_size_of_weaken Γ1 Γ2 τ :
✓ Γ1 → ✓{Γ1} τ → Γ1 ⊆ Γ2 → bit_size_of Γ1 τ = bit_size_of Γ2 τ.
Proof. intros. unfold bit_size_of. f_equal. by apply size_of_weaken. Qed.
Lemma bit_size_of_int Γ τi : bit_size_of Γ (intT τi) = int_width τi.
Proof. unfold bit_size_of. by rewrite size_of_int. Qed.
Lemma bit_size_of_char Γ si :
bit_size_of Γ (intT (IntType si char_rank)) = char_bits.
Proof. rewrite bit_size_of_int. by apply int_width_char. Qed.
Lemma bit_size_of_int_same_kind Γ τi1 τi2 :
rank τi1 = rank τi2 → bit_size_of Γ (intT τi1) = bit_size_of Γ (intT τi2).
Proof.
destruct τi1, τi2; intros; simplify_equality'. by rewrite !bit_size_of_int.
Qed.
Lemma bit_size_of_array Γ τ n : bit_size_of Γ (τ.[n]) = n * bit_size_of Γ τ.
Proof. unfold bit_size_of. by rewrite !size_of_array, Nat.mul_assoc. Qed.
Lemma bit_size_of_struct Γ t τs :
✓ Γ → Γ !! t = Some τs →
bit_size_of Γ (structT t) = sum_list (field_bit_sizes Γ τs).
Proof.
unfold bit_size_of, field_bit_sizes. intros.
erewrite size_of_struct by eauto.
induction (field_sizes Γ τs); csimpl; auto with lia.
Qed.
Lemma bit_size_of_fields Γ τs :
✓ Γ → Forall2 (λ τ sz, bit_size_of Γ τ ≤ sz) τs (field_bit_sizes Γ τs).
Proof.
intros HΓ. unfold bit_size_of, field_bit_sizes.
induction (size_of_fields Γ τs HΓ);
simpl; constructor; auto using Nat.mul_le_mono_nonneg_r with lia.
Qed.
Lemma bit_size_of_union Γ t τs :
✓ Γ → Γ !! t = Some τs →
Forall (λ τ, bit_size_of Γ τ ≤ bit_size_of Γ (unionT t)) τs.
Proof.
intros ? Hτs. apply size_of_union in Hτs; auto. unfold bit_size_of.
induction Hτs; constructor; auto using Nat.mul_le_mono_nonneg_r with lia.
Qed.
Lemma bit_size_of_union_lookup Γ t τs i τ :
✓ Γ → Γ !! t = Some τs → τs !! i = Some τ →
bit_size_of Γ τ ≤ bit_size_of Γ (unionT t).
Proof.
intros. unfold bit_size_of. apply Nat.mul_le_mono_nonneg_r;
eauto using size_of_union_lookup with lia.
Qed.
Lemma bit_size_of_union_singleton Γ t τ :
✓ Γ → Γ !! t = Some [τ] → bit_size_of Γ τ ≤ bit_size_of Γ (unionT t).
Proof. intros. by apply (bit_size_of_union_lookup Γ t [τ] 0). Qed.
Lemma ptr_bit_size_of_alt Γ τp :
ptr_bit_size_of Γ τp = ptr_size_of Γ τp * char_bits.
Proof. destruct τp; simpl; unfold bit_size_of; lia. Qed.
Lemma field_bit_sizes_weaken Γ1 Γ2 τs :
✓ Γ1 → ✓{Γ1}* τs → Γ1 ⊆ Γ2 → field_bit_sizes Γ1 τs = field_bit_sizes Γ2 τs.
Proof. unfold field_bit_sizes. auto using fields_sizes_weaken with f_equal. Qed.
Lemma field_bit_sizes_length Γ τs :
✓ Γ → length (field_bit_sizes Γ τs) = length τs.
Proof. symmetry. by eapply Forall2_length, bit_size_of_fields. Qed.
Lemma field_bit_sizes_nil Γ : ✓ Γ → field_bit_sizes Γ [] = [].
Proof. intros. apply nil_length_inv. by rewrite field_bit_sizes_length. Qed.
Lemma field_bit_padding_weaken Γ1 Γ2 τs :
✓ Γ1 → ✓{Γ1}* τs → Γ1 ⊆ Γ2 →
field_bit_padding Γ1 τs = field_bit_padding Γ2 τs.
Proof.
intros HΓ1 Hτs ?. unfold field_bit_padding.
erewrite <-(field_bit_sizes_weaken Γ1 Γ2) by eauto.
induction (bit_size_of_fields _ τs HΓ1); decompose_Forall_hyps;
auto using bit_size_of_weaken with f_equal.
Qed.
Lemma field_bit_padding_length Γ τs :
✓ Γ → length (field_bit_padding Γ τs) = length τs.
Proof.
intros. unfold field_bit_padding.
rewrite zip_with_length, field_bit_sizes_length by done; lia.
Qed.
Lemma bit_offset_of_weaken Γ1 Γ2 τs i :
✓ Γ1 → ✓{Γ1}* τs → Γ1 ⊆ Γ2 →
bit_offset_of Γ1 τs i = bit_offset_of Γ2 τs i.
Proof.
unfold bit_offset_of. eauto using field_bit_sizes_weaken with f_equal.
Qed.
Lemma bit_offset_of_alt Γ τs i :
bit_offset_of Γ τs i = offset_of Γ τs i * char_bits.
Proof.
unfold bit_offset_of, offset_of, field_bit_sizes.
revert i. induction (field_sizes Γ τs) as [|?? IH];
intros [|i]; simpl; auto with lia.
by rewrite IH, Nat.mul_add_distr_r.
Qed.
Lemma bit_offset_of_lt Γ τs i j σ :
✓ Γ → τs !! i = Some σ → i < j →
bit_offset_of Γ τs i + bit_size_of Γ σ ≤ bit_offset_of Γ τs j.
Proof.
intros HΓ. revert i j σ. unfold bit_offset_of.
induction (bit_size_of_fields _ τs HΓ) as [|τ sz τs szs ?? IH];
intros [|i] [|j] σ ??; simplify_equality'; try lia.
specialize (IH i j σ). intuition lia.
Qed.
Lemma bit_offset_of_size Γ t τs i σ :
✓ Γ → Γ !! t = Some τs → τs !! i = Some σ →
bit_offset_of Γ τs i + bit_size_of Γ σ ≤ bit_size_of Γ (structT t).
Proof.
intros HΓ Ht. erewrite bit_size_of_struct by eauto; clear Ht.
revert i σ. unfold bit_offset_of. induction (bit_size_of_fields _ τs HΓ)
as [|τ sz τs szs ?? IH]; intros [|i] σ ?; simplify_equality'; [lia|].
specialize (IH i σ). intuition lia.
Qed.
Lemma align_of_char Γ si : ✓ Γ → align_of Γ (intT (IntType si char_rank)) = 1.
Proof.
intros. apply Nat.divide_1_r; rewrite <-(size_of_char Γ si).
apply align_of_divide; repeat constructor; auto.
Qed.
Lemma bit_align_of_array Γ τ n : (bit_align_of Γ τ | bit_align_of Γ (τ.[n])).
Proof. apply Nat.mul_divide_mono_r, align_of_array. Qed.
Lemma bit_align_of_compound Γ c t τs i τ :
✓ Γ → Γ !! t = Some τs → τs !! i = Some τ →
(bit_align_of Γ τ | bit_align_of Γ (compoundT{c} t)).
Proof. eauto using Nat.mul_divide_mono_r, align_of_compound. Qed.
Lemma bit_align_of_divide Γ τ :
✓ Γ → ✓{Γ} τ → (bit_align_of Γ τ | bit_size_of Γ τ).
Proof. eauto using Nat.mul_divide_mono_r, align_of_divide. Qed.
Lemma bit_align_of_offset_of Γ τs i τ :
✓ Γ → ✓{Γ}* τs → τs !! i = Some τ →
(bit_align_of Γ τ | bit_offset_of Γ τs i).
Proof.
rewrite bit_offset_of_alt.
eauto using Nat.mul_divide_mono_r, align_of_offset_of.
Qed.
Lemma bit_align_of_weaken Γ1 Γ2 τ :
✓ Γ1 → ✓{Γ1} τ → Γ1 ⊆ Γ2 → bit_align_of Γ1 τ = bit_align_of Γ2 τ.
Proof. unfold bit_align_of; auto using align_of_weaken, f_equal. Qed.
Lemma size_of_base_ne_0 Γ τb : size_of Γ (baseT τb) ≠ 0.
Proof.
destruct τb; auto using size_of_void_ne_0, size_of_ptr_ne_0.
rewrite size_of_int. apply rank_size_ne_0.
Qed.
Lemma bit_size_of_base_ne_0 Γ τb : bit_size_of Γ (baseT τb) ≠ 0.
Proof. apply Nat.neq_mul_0. auto using char_bits_ne_0, size_of_base_ne_0. Qed.
#[global] Instance: ∀ Γ τb, PropHolds (size_of Γ (baseT τb) ≠ 0).
Proof. apply size_of_base_ne_0. Qed.
#[global] Instance: ∀ Γ τb, PropHolds (bit_size_of Γ (baseT τb) ≠ 0).
Proof. apply bit_size_of_base_ne_0. Qed.
Lemma size_of_ne_0 Γ τ : ✓ Γ → ✓{Γ} τ → size_of Γ τ ≠ 0.
Proof.
intros HΓ. revert τ. refine (type_env_ind _ HΓ _ _ _ _).
* auto using size_of_base_ne_0.
* intros. rewrite size_of_array. by apply Nat.neq_mul_0.
* intros [] t τs Ht Hτs IH Hlen.
+ erewrite size_of_struct by eauto. clear Ht.
destruct (size_of_fields Γ τs HΓ); decompose_Forall_hyps; auto with lia.
+ apply size_of_union in Ht; auto.
destruct Ht; decompose_Forall_hyps; auto with lia.
Qed.
Lemma align_of_ne_0 Γ τ : ✓ Γ → ✓{Γ} τ → align_of Γ τ ≠ 0.
Proof. eauto using Nat_divide_ne_0, size_of_ne_0, align_of_divide. Qed.
Lemma size_of_pos Γ τ : ✓ Γ → ✓{Γ} τ → 0 < size_of Γ τ.
Proof. intros. by apply Nat.neq_0_lt_0, size_of_ne_0. Qed.
Lemma bit_size_of_ne_0 Γ τ : ✓ Γ → ✓{Γ} τ → bit_size_of Γ τ ≠ 0.
Proof. intros. apply Nat.neq_mul_0. auto using char_bits_ne_0,size_of_ne_0. Qed.
Lemma bit_size_of_pos Γ τ : ✓ Γ → ✓{Γ} τ → 0 < bit_size_of Γ τ.
Proof. intros. by apply Nat.neq_0_lt_0, bit_size_of_ne_0. Qed.
End env_spec.
|
{-# OPTIONS --enable-prop #-}
True : Prop
True = {P : Prop} → P → P
|
Kestral's fitted kitchens are a feature enhancing many homes across the country.
We use the best quality materials installed by fully-trained, skilled tradesmen, fully certified for all the electrical, gas and plumbing required.
Attention to detail and an immaculate finish are the hallmarks of our work. |
module CDLagrangianRefFEsTests
using Gridap
using Gridap.TensorValues
using Gridap.ReferenceFEs
using Test
using Gridap.ReferenceFEs: _CDLagrangianRefFE
T = Float64
reffe = _CDLagrangianRefFE(T,SEGMENT,(2,),(DISC,))
test_lagrangian_reference_fe(reffe)
T = Float64
reffe = _CDLagrangianRefFE(T,QUAD,1,(CONT,DISC))
test_lagrangian_reference_fe(reffe)
conf = CDConformity( (CONT,DISC) )
@test conf == Conformity(reffe)
@test get_face_own_dofs(QUAD4,conf) == get_face_own_dofs(reffe)
@test get_face_own_dofs_permutations(QUAD4,conf) == get_face_own_dofs_permutations(reffe)
T = VectorValue{2,Float64}
reffe = _CDLagrangianRefFE(T,QUAD,(2,0),(CONT,DISC))
test_lagrangian_reference_fe(reffe)
@test get_face_own_dofs(reffe,L2Conformity()) == [[], [], [], [], [], [], [], [], [1, 2, 3, 4, 5, 6]]
@test get_face_own_nodes(reffe,L2Conformity()) == [[], [], [], [], [], [], [], [], [1, 2, 3]]
T = VectorValue{3,Float64}
reffe = _CDLagrangianRefFE(T,HEX,(2,2,2),(CONT,CONT,DISC))
test_lagrangian_reference_fe(reffe)
T = VectorValue{3,Float64}
reffe = _CDLagrangianRefFE(T,HEX,2,(CONT,CONT,DISC))
test_lagrangian_reference_fe(reffe)
T = VectorValue{2,Float64}
reffe = LagrangianRefFE(T,QUAD,(2,0))
@test Conformity(reffe) == CDConformity((CONT,DISC))
test_lagrangian_reference_fe(reffe)
T = VectorValue{3,Float64}
reffe = LagrangianRefFE(T,HEX,(0,2,0))
@test Conformity(reffe) == CDConformity((DISC,CONT,DISC))
test_lagrangian_reference_fe(reffe)
reffe = ReferenceFE(HEX,lagrangian,T,(0,2,0))
@test Conformity(reffe) == CDConformity((DISC,CONT,DISC))
test_lagrangian_reference_fe(reffe)
reffe = LagrangianRefFE(T,QUAD,(2,2))
end # module
|
/-
Copyright (c) 2018 Johannes Hölzl. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Johannes Hölzl
! This file was ported from Lean 3 source module group_theory.specific_groups.cyclic
! leanprover-community/mathlib commit 0f6670b8af2dff699de1c0b4b49039b31bc13c46
! Please do not edit these lines, except to modify the commit id
! if you have ported upstream changes.
-/
import Mathbin.Algebra.BigOperators.Order
import Mathbin.Data.Nat.Totient
import Mathbin.GroupTheory.OrderOfElement
import Mathbin.GroupTheory.Subgroup.Simple
import Mathbin.Tactic.Group
import Mathbin.GroupTheory.Exponent
/-!
# Cyclic groups
A group `G` is called cyclic if there exists an element `g : G` such that every element of `G` is of
the form `g ^ n` for some `n : ℕ`. This file only deals with the predicate on a group to be cyclic.
For the concrete cyclic group of order `n`, see `data.zmod.basic`.
## Main definitions
* `is_cyclic` is a predicate on a group stating that the group is cyclic.
## Main statements
* `is_cyclic_of_prime_card` proves that a finite group of prime order is cyclic.
* `is_simple_group_of_prime_card`, `is_simple_group.is_cyclic`,
and `is_simple_group.prime_card` classify finite simple abelian groups.
* `is_cyclic.exponent_eq_card`: For a finite cyclic group `G`, the exponent is equal to
the group's cardinality.
* `is_cyclic.exponent_eq_zero_of_infinite`: Infinite cyclic groups have exponent zero.
* `is_cyclic.iff_exponent_eq_card`: A finite commutative group is cyclic iff its exponent
is equal to its cardinality.
## Tags
cyclic group
-/
universe u
variable {α : Type u} {a : α}
section Cyclic
open BigOperators
attribute [local instance] setFintype
open Subgroup
/- ./././Mathport/Syntax/Translate/Command.lean:388:30: infer kinds are unsupported in Lean 4: #[`exists_generator] [] -/
/-- A group is called *cyclic* if it is generated by a single element. -/
class IsAddCyclic (α : Type u) [AddGroup α] : Prop where
exists_generator : ∃ g : α, ∀ x, x ∈ AddSubgroup.zmultiples g
#align is_add_cyclic IsAddCyclic
/- ./././Mathport/Syntax/Translate/Command.lean:388:30: infer kinds are unsupported in Lean 4: #[`exists_generator] [] -/
/-- A group is called *cyclic* if it is generated by a single element. -/
@[to_additive IsAddCyclic]
class IsCyclic (α : Type u) [Group α] : Prop where
exists_generator : ∃ g : α, ∀ x, x ∈ zpowers g
#align is_cyclic IsCyclic
#align is_add_cyclic IsAddCyclic
@[to_additive is_add_cyclic_of_subsingleton]
instance (priority := 100) isCyclic_of_subsingleton [Group α] [Subsingleton α] : IsCyclic α :=
⟨⟨1, fun x => by
rw [Subsingleton.elim x 1]
exact mem_zpowers 1⟩⟩
#align is_cyclic_of_subsingleton isCyclic_of_subsingleton
#align is_add_cyclic_of_subsingleton is_add_cyclic_of_subsingleton
/-- A cyclic group is always commutative. This is not an `instance` because often we have a better
proof of `comm_group`. -/
@[to_additive
"A cyclic group is always commutative. This is not an `instance` because often we have\n a better proof of `add_comm_group`."]
def IsCyclic.commGroup [hg : Group α] [IsCyclic α] : CommGroup α :=
{ hg with
mul_comm := fun x y =>
let ⟨g, hg⟩ := IsCyclic.exists_generator α
let ⟨n, hn⟩ := hg x
let ⟨m, hm⟩ := hg y
hm ▸ hn ▸ zpow_mul_comm _ _ _ }
#align is_cyclic.comm_group IsCyclic.commGroup
#align is_add_cyclic.add_comm_group IsAddCyclic.addCommGroup
variable [Group α]
@[to_additive MonoidAddHom.map_add_cyclic]
theorem MonoidHom.map_cyclic {G : Type _} [Group G] [h : IsCyclic G] (σ : G →* G) :
∃ m : ℤ, ∀ g : G, σ g = g ^ m :=
by
obtain ⟨h, hG⟩ := IsCyclic.exists_generator G
obtain ⟨m, hm⟩ := hG (σ h)
refine' ⟨m, fun g => _⟩
obtain ⟨n, rfl⟩ := hG g
rw [MonoidHom.map_zpow, ← hm, ← zpow_mul, ← zpow_mul']
#align monoid_hom.map_cyclic MonoidHom.map_cyclic
#align monoid_add_hom.map_add_cyclic MonoidAddHom.map_add_cyclic
@[to_additive is_add_cyclic_of_orderOf_eq_card]
theorem isCyclic_of_orderOf_eq_card [Fintype α] (x : α) (hx : orderOf x = Fintype.card α) :
IsCyclic α := by
classical
use x
simp_rw [← SetLike.mem_coe, ← Set.eq_univ_iff_forall]
rw [← Fintype.card_congr (Equiv.Set.univ α), orderOf_eq_card_zpowers] at hx
exact Set.eq_of_subset_of_card_le (Set.subset_univ _) (ge_of_eq hx)
#align is_cyclic_of_order_of_eq_card isCyclic_of_orderOf_eq_card
#align is_add_cyclic_of_order_of_eq_card is_add_cyclic_of_orderOf_eq_card
/-- A finite group of prime order is cyclic. -/
@[to_additive is_add_cyclic_of_prime_card "A finite group of prime order is cyclic."]
theorem isCyclic_of_prime_card {α : Type u} [Group α] [Fintype α] {p : ℕ} [hp : Fact p.Prime]
(h : Fintype.card α = p) : IsCyclic α :=
⟨by
obtain ⟨g, hg⟩ : ∃ g : α, g ≠ 1 := Fintype.exists_ne_of_one_lt_card (h.symm ▸ hp.1.one_lt) 1
classical
-- for fintype (subgroup.zpowers g)
have : Fintype.card (Subgroup.zpowers g) ∣ p :=
by
rw [← h]
apply card_subgroup_dvd_card
rw [Nat.dvd_prime hp.1] at this
cases this
· rw [Fintype.card_eq_one_iff] at this
cases' this with t ht
suffices g = 1 by contradiction
have hgt :=
ht
⟨g, by
change g ∈ Subgroup.zpowers g
exact Subgroup.mem_zpowers g⟩
rw [← ht 1] at hgt
change (⟨_, _⟩ : Subgroup.zpowers g) = ⟨_, _⟩ at hgt
simpa using hgt
· use g
intro x
rw [← h] at this
rw [Subgroup.eq_top_of_card_eq _ this]
exact Subgroup.mem_top _⟩
#align is_cyclic_of_prime_card isCyclic_of_prime_card
#align is_add_cyclic_of_prime_card is_add_cyclic_of_prime_card
@[to_additive add_orderOf_eq_card_of_forall_mem_zmultiples]
theorem orderOf_eq_card_of_forall_mem_zpowers [Fintype α] {g : α} (hx : ∀ x, x ∈ zpowers g) :
orderOf g = Fintype.card α := by
classical
rw [orderOf_eq_card_zpowers]
apply Fintype.card_of_finset'
simpa using hx
#align order_of_eq_card_of_forall_mem_zpowers orderOf_eq_card_of_forall_mem_zpowers
#align add_order_of_eq_card_of_forall_mem_zmultiples add_orderOf_eq_card_of_forall_mem_zmultiples
@[to_additive Infinite.add_orderOf_eq_zero_of_forall_mem_zmultiples]
theorem Infinite.orderOf_eq_zero_of_forall_mem_zpowers [Infinite α] {g : α}
(h : ∀ x, x ∈ zpowers g) : orderOf g = 0 := by
classical
rw [orderOf_eq_zero_iff']
refine' fun n hn hgn => _
have ho := orderOf_pos' ((isOfFinOrder_iff_pow_eq_one g).mpr ⟨n, hn, hgn⟩)
obtain ⟨x, hx⟩ :=
Infinite.exists_not_mem_finset (Finset.image (pow g) <| Finset.range <| orderOf g)
apply hx
rw [← mem_powers_iff_mem_range_order_of' g x ho, Submonoid.mem_powers_iff]
obtain ⟨k, hk⟩ := h x
obtain ⟨k, rfl | rfl⟩ := k.eq_coe_or_neg
· exact ⟨k, by exact_mod_cast hk⟩
let t : ℤ := -k % orderOf g
rw [zpow_eq_mod_orderOf] at hk
have : 0 ≤ t := Int.emod_nonneg (-k) (by exact_mod_cast ho.ne')
refine' ⟨t.to_nat, _⟩
rwa [← zpow_ofNat, Int.toNat_of_nonneg this]
#align infinite.order_of_eq_zero_of_forall_mem_zpowers Infinite.orderOf_eq_zero_of_forall_mem_zpowers
#align infinite.add_order_of_eq_zero_of_forall_mem_zmultiples Infinite.add_orderOf_eq_zero_of_forall_mem_zmultiples
@[to_additive Bot.is_add_cyclic]
instance Bot.isCyclic {α : Type u} [Group α] : IsCyclic (⊥ : Subgroup α) :=
⟨⟨1, fun x => ⟨0, Subtype.eq <| (zpow_zero (1 : α)).trans <| Eq.symm (Subgroup.mem_bot.1 x.2)⟩⟩⟩
#align bot.is_cyclic Bot.isCyclic
#align bot.is_add_cyclic Bot.is_add_cyclic
@[to_additive AddSubgroup.is_add_cyclic]
instance Subgroup.isCyclic {α : Type u} [Group α] [IsCyclic α] (H : Subgroup α) : IsCyclic H :=
haveI := Classical.propDecidable
let ⟨g, hg⟩ := IsCyclic.exists_generator α
if hx : ∃ x : α, x ∈ H ∧ x ≠ (1 : α) then
let ⟨x, hx₁, hx₂⟩ := hx
let ⟨k, hk⟩ := hg x
have hex : ∃ n : ℕ, 0 < n ∧ g ^ n ∈ H :=
⟨k.natAbs,
Nat.pos_of_ne_zero fun h => hx₂ <| by rw [← hk, Int.eq_zero_of_natAbs_eq_zero h, zpow_zero],
match k, hk with
| (k : ℕ), hk => by rw [Int.natAbs_ofNat, ← zpow_ofNat, hk] <;> exact hx₁
| -[k+1], hk => by rw [Int.natAbs_of_negSucc, ← Subgroup.inv_mem_iff H] <;> simp_all⟩
⟨⟨⟨g ^ Nat.find hex, (Nat.find_spec hex).2⟩, fun ⟨x, hx⟩ =>
let ⟨k, hk⟩ := hg x
have hk₁ : g ^ ((Nat.find hex : ℤ) * (k / Nat.find hex)) ∈ zpowers (g ^ Nat.find hex) :=
⟨k / Nat.find hex, by rw [← zpow_ofNat, zpow_mul]⟩
have hk₂ : g ^ ((Nat.find hex : ℤ) * (k / Nat.find hex)) ∈ H :=
by
rw [zpow_mul]
apply H.zpow_mem
exact_mod_cast (Nat.find_spec hex).2
have hk₃ : g ^ (k % Nat.find hex) ∈ H :=
(Subgroup.mul_mem_cancel_right H hk₂).1 <| by
rw [← zpow_add, Int.emod_add_ediv, hk] <;> exact hx
have hk₄ : k % Nat.find hex = (k % Nat.find hex).natAbs := by
rw [Int.natAbs_of_nonneg
(Int.emod_nonneg _ (Int.coe_nat_ne_zero_iff_pos.2 (Nat.find_spec hex).1))]
have hk₅ : g ^ (k % Nat.find hex).natAbs ∈ H := by rwa [← zpow_ofNat, ← hk₄]
have hk₆ : (k % (Nat.find hex : ℤ)).natAbs = 0 :=
by_contradiction fun h =>
Nat.find_min hex
(Int.ofNat_lt.1 <| by
rw [← hk₄] <;> exact Int.emod_lt_of_pos _ (Int.coe_nat_pos.2 (Nat.find_spec hex).1))
⟨Nat.pos_of_ne_zero h, hk₅⟩
⟨k / (Nat.find hex : ℤ),
Subtype.ext_iff_val.2
(by
suffices g ^ ((Nat.find hex : ℤ) * (k / Nat.find hex)) = x by simpa [zpow_mul]
rw [Int.mul_ediv_cancel'
(Int.dvd_of_emod_eq_zero (Int.eq_zero_of_natAbs_eq_zero hk₆)),
hk])⟩⟩⟩
else
by
have : H = (⊥ : Subgroup α) :=
Subgroup.ext fun x =>
⟨fun h => by simp at * <;> tauto, fun h => by rw [Subgroup.mem_bot.1 h] <;> exact H.one_mem⟩
clear _let_match <;> subst this <;> infer_instance
#align subgroup.is_cyclic Subgroup.isCyclic
#align add_subgroup.is_add_cyclic AddSubgroup.is_add_cyclic
open Finset Nat
section Classical
open Classical
/- ./././Mathport/Syntax/Translate/Tactic/Lean3.lean:132:4: warning: unsupported: rw with cfg: { occs := occurrences.pos[occurrences.pos] «expr[ ,]»([2, 3]) } -/
@[to_additive IsAddCyclic.card_pow_eq_one_le]
theorem IsCyclic.card_pow_eq_one_le [DecidableEq α] [Fintype α] [IsCyclic α] {n : ℕ} (hn0 : 0 < n) :
(univ.filterₓ fun a : α => a ^ n = 1).card ≤ n :=
let ⟨g, hg⟩ := IsCyclic.exists_generator α
calc
(univ.filterₓ fun a : α => a ^ n = 1).card ≤
(zpowers (g ^ (Fintype.card α / Nat.gcd n (Fintype.card α))) : Set α).toFinset.card :=
card_le_of_subset fun x hx =>
let ⟨m, hm⟩ := show x ∈ Submonoid.powers g from mem_powers_iff_mem_zpowers.2 <| hg x
Set.mem_toFinset.2
⟨(m / (Fintype.card α / Nat.gcd n (Fintype.card α)) : ℕ),
by
have hgmn : g ^ (m * Nat.gcd n (Fintype.card α)) = 1 := by
rw [pow_mul, hm, ← pow_gcd_card_eq_one_iff] <;> exact (mem_filter.1 hx).2
rw [zpow_ofNat, ← pow_mul, Nat.mul_div_cancel_left', hm]
refine' dvd_of_mul_dvd_mul_right (gcd_pos_of_pos_left (Fintype.card α) hn0) _
conv_lhs =>
rw [Nat.div_mul_cancel (Nat.gcd_dvd_right _ _), ←
orderOf_eq_card_of_forall_mem_zpowers hg]
exact orderOf_dvd_of_pow_eq_one hgmn⟩
_ ≤ n := by
let ⟨m, hm⟩ := Nat.gcd_dvd_right n (Fintype.card α)
have hm0 : 0 < m :=
Nat.pos_of_ne_zero fun hm0 =>
by
rw [hm0, MulZeroClass.mul_zero, Fintype.card_eq_zero_iff] at hm
exact hm.elim' 1
simp only [Set.toFinset_card, SetLike.coe_sort_coe]
rw [← orderOf_eq_card_zpowers, orderOf_pow g, orderOf_eq_card_of_forall_mem_zpowers hg]
rw [hm]
rw [Nat.mul_div_cancel_left _ (gcd_pos_of_pos_left _ hn0), gcd_mul_left_left, hm,
Nat.mul_div_cancel _ hm0]
exact le_of_dvd hn0 (Nat.gcd_dvd_left _ _)
#align is_cyclic.card_pow_eq_one_le IsCyclic.card_pow_eq_one_le
#align is_add_cyclic.card_pow_eq_one_le IsAddCyclic.card_pow_eq_one_le
end Classical
@[to_additive]
theorem IsCyclic.exists_monoid_generator [Finite α] [IsCyclic α] :
∃ x : α, ∀ y : α, y ∈ Submonoid.powers x :=
by
simp_rw [mem_powers_iff_mem_zpowers]
exact IsCyclic.exists_generator α
#align is_cyclic.exists_monoid_generator IsCyclic.exists_monoid_generator
#align is_add_cyclic.exists_add_monoid_generator IsAddCyclic.exists_add_monoid_generator
section
variable [DecidableEq α] [Fintype α]
@[to_additive]
theorem IsCyclic.image_range_orderOf (ha : ∀ x : α, x ∈ zpowers a) :
Finset.image (fun i => a ^ i) (range (orderOf a)) = univ :=
by
simp_rw [← SetLike.mem_coe] at ha
simp only [image_range_orderOf, set.eq_univ_iff_forall.mpr ha, Set.toFinset_univ]
#align is_cyclic.image_range_order_of IsCyclic.image_range_orderOf
#align is_add_cyclic.image_range_order_of IsAddCyclic.image_range_orderOf
@[to_additive]
theorem IsCyclic.image_range_card (ha : ∀ x : α, x ∈ zpowers a) :
Finset.image (fun i => a ^ i) (range (Fintype.card α)) = univ := by
rw [← orderOf_eq_card_of_forall_mem_zpowers ha, IsCyclic.image_range_orderOf ha]
#align is_cyclic.image_range_card IsCyclic.image_range_card
#align is_add_cyclic.image_range_card IsAddCyclic.image_range_card
end
section Totient
variable [DecidableEq α] [Fintype α]
(hn : ∀ n : ℕ, 0 < n → (univ.filterₓ fun a : α => a ^ n = 1).card ≤ n)
include hn
private theorem card_pow_eq_one_eq_order_of_aux (a : α) :
(Finset.univ.filterₓ fun b : α => b ^ orderOf a = 1).card = orderOf a :=
le_antisymm (hn _ (orderOf_pos a))
(calc
orderOf a = @Fintype.card (zpowers a) (id _) := orderOf_eq_card_zpowers
_ ≤
@Fintype.card (↑(univ.filterₓ fun b : α => b ^ orderOf a = 1) : Set α)
(Fintype.ofFinset _ fun _ => Iff.rfl) :=
(@Fintype.card_le_of_injective (zpowers a)
(↑(univ.filterₓ fun b : α => b ^ orderOf a = 1) : Set α) (id _) (id _)
(fun b =>
⟨b.1,
mem_filter.2
⟨mem_univ _, by
let ⟨i, hi⟩ := b.2
rw [← hi, ← zpow_ofNat, ← zpow_mul, mul_comm, zpow_mul, zpow_ofNat,
pow_orderOf_eq_one, one_zpow]⟩⟩)
fun _ _ h => Subtype.eq (Subtype.mk.inj h))
_ = (univ.filterₓ fun b : α => b ^ orderOf a = 1).card := Fintype.card_ofFinset _ _
)
#align card_pow_eq_one_eq_order_of_aux card_pow_eq_one_eq_order_of_aux
open Nat
-- use φ for nat.totient
private theorem card_order_of_eq_totient_aux₁ :
∀ {d : ℕ},
d ∣ Fintype.card α →
0 < (univ.filterₓ fun a : α => orderOf a = d).card →
(univ.filterₓ fun a : α => orderOf a = d).card = φ d :=
by
intro d hd hpos
induction' d using Nat.strongRec' with d IH
rcases Decidable.eq_or_ne d 0 with (rfl | hd0)
· cases Fintype.card_ne_zero (eq_zero_of_zero_dvd hd)
rcases card_pos.1 hpos with ⟨a, ha'⟩
have ha : orderOf a = d := (mem_filter.1 ha').2
have h1 :
(∑ m in d.proper_divisors, (univ.filter fun a : α => orderOf a = m).card) =
∑ m in d.proper_divisors, φ m :=
by
refine' Finset.sum_congr rfl fun m hm => _
simp only [mem_filter, mem_range, mem_proper_divisors] at hm
refine' IH m hm.2 (hm.1.trans hd) (Finset.card_pos.2 ⟨a ^ (d / m), _⟩)
simp only [mem_filter, mem_univ, orderOf_pow a, ha, true_and_iff,
Nat.gcd_eq_right (div_dvd_of_dvd hm.1), Nat.div_div_self hm.1 hd0]
have h2 :
(∑ m in d.divisors, (univ.filter fun a : α => orderOf a = m).card) = ∑ m in d.divisors, φ m :=
by
rw [← filter_dvd_eq_divisors hd0, sum_card_orderOf_eq_card_pow_eq_one hd0,
filter_dvd_eq_divisors hd0, sum_totient, ← ha, card_pow_eq_one_eq_order_of_aux hn a]
simpa [← cons_self_proper_divisors hd0, ← h1] using h2
#align card_order_of_eq_totient_aux₁ card_order_of_eq_totient_aux₁
theorem card_orderOf_eq_totient_aux₂ {d : ℕ} (hd : d ∣ Fintype.card α) :
(univ.filterₓ fun a : α => orderOf a = d).card = φ d :=
by
let c := Fintype.card α
have hc0 : 0 < c := Fintype.card_pos_iff.2 ⟨1⟩
apply card_order_of_eq_totient_aux₁ hn hd
by_contra h0
simp only [not_lt, _root_.le_zero_iff, card_eq_zero] at h0
apply lt_irrefl c
calc
c = ∑ m in c.divisors, (univ.filter fun a : α => orderOf a = m).card :=
by
simp only [← filter_dvd_eq_divisors hc0.ne', sum_card_orderOf_eq_card_pow_eq_one hc0.ne']
apply congr_arg card
simp
_ = ∑ m in c.divisors.erase d, (univ.filter fun a : α => orderOf a = m).card :=
by
rw [eq_comm]
refine' sum_subset (erase_subset _ _) fun m hm₁ hm₂ => _
have : m = d := by
contrapose! hm₂
exact mem_erase_of_ne_of_mem hm₂ hm₁
simp [this, h0]
_ ≤ ∑ m in c.divisors.erase d, φ m :=
by
refine' sum_le_sum fun m hm => _
have hmc : m ∣ c := by
simp only [mem_erase, mem_divisors] at hm
tauto
rcases(Filter (fun a : α => orderOf a = m) univ).card.eq_zero_or_pos with (h1 | h1)
· simp [h1]
· simp [card_order_of_eq_totient_aux₁ hn hmc h1]
_ < ∑ m in c.divisors, φ m :=
(sum_erase_lt_of_pos (mem_divisors.2 ⟨hd, hc0.ne'⟩) (totient_pos (pos_of_dvd_of_pos hd hc0)))
_ = c := sum_totient _
#align card_order_of_eq_totient_aux₂ card_orderOf_eq_totient_aux₂
theorem isCyclic_of_card_pow_eq_one_le : IsCyclic α :=
have : (univ.filterₓ fun a : α => orderOf a = Fintype.card α).Nonempty :=
card_pos.1 <| by
rw [card_orderOf_eq_totient_aux₂ hn dvd_rfl] <;>
exact totient_pos (Fintype.card_pos_iff.2 ⟨1⟩)
let ⟨x, hx⟩ := this
isCyclic_of_orderOf_eq_card x (Finset.mem_filter.1 hx).2
#align is_cyclic_of_card_pow_eq_one_le isCyclic_of_card_pow_eq_one_le
theorem isAddCyclic_of_card_pow_eq_one_le {α} [AddGroup α] [DecidableEq α] [Fintype α]
(hn : ∀ n : ℕ, 0 < n → (univ.filterₓ fun a : α => n • a = 0).card ≤ n) : IsAddCyclic α :=
by
obtain ⟨g, hg⟩ := @isCyclic_of_card_pow_eq_one_le (Multiplicative α) _ _ _ hn
exact ⟨⟨g, hg⟩⟩
#align is_add_cyclic_of_card_pow_eq_one_le isAddCyclic_of_card_pow_eq_one_le
attribute [to_additive isCyclic_of_card_pow_eq_one_le] isAddCyclic_of_card_pow_eq_one_le
end Totient
theorem IsCyclic.card_orderOf_eq_totient [IsCyclic α] [Fintype α] {d : ℕ}
(hd : d ∣ Fintype.card α) : (univ.filterₓ fun a : α => orderOf a = d).card = totient d := by
classical apply card_orderOf_eq_totient_aux₂ (fun n => IsCyclic.card_pow_eq_one_le) hd
#align is_cyclic.card_order_of_eq_totient IsCyclic.card_orderOf_eq_totient
theorem IsAddCyclic.card_order_of_eq_totient {α} [AddGroup α] [IsAddCyclic α] [Fintype α] {d : ℕ}
(hd : d ∣ Fintype.card α) : (univ.filterₓ fun a : α => addOrderOf a = d).card = totient d :=
by
obtain ⟨g, hg⟩ := id ‹IsAddCyclic α›
exact @IsCyclic.card_orderOf_eq_totient (Multiplicative α) _ ⟨⟨g, hg⟩⟩ _ _ hd
#align is_add_cyclic.card_order_of_eq_totient IsAddCyclic.card_order_of_eq_totient
attribute [to_additive IsCyclic.card_orderOf_eq_totient] IsAddCyclic.card_order_of_eq_totient
/-- A finite group of prime order is simple. -/
@[to_additive "A finite group of prime order is simple."]
theorem isSimpleGroup_of_prime_card {α : Type u} [Group α] [Fintype α] {p : ℕ} [hp : Fact p.Prime]
(h : Fintype.card α = p) : IsSimpleGroup α :=
⟨by
have h' := Nat.Prime.one_lt (Fact.out p.prime)
rw [← h] at h'
haveI := Fintype.one_lt_card_iff_nontrivial.1 h'
apply exists_pair_ne α, fun H Hn => by
classical
have hcard := card_subgroup_dvd_card H
rw [h, dvd_prime (Fact.out p.prime)] at hcard
refine' hcard.imp (fun h1 => _) fun hp => _
· haveI := Fintype.card_le_one_iff_subsingleton.1 (le_of_eq h1)
apply eq_bot_of_subsingleton
· exact eq_top_of_card_eq _ (hp.trans h.symm)⟩
#align is_simple_group_of_prime_card isSimpleGroup_of_prime_card
#align is_simple_add_group_of_prime_card is_simple_add_group_of_prime_card
end Cyclic
section QuotientCenter
open Subgroup
variable {G : Type _} {H : Type _} [Group G] [Group H]
/-- A group is commutative if the quotient by the center is cyclic.
Also see `comm_group_of_cycle_center_quotient` for the `comm_group` instance. -/
@[to_additive commutative_of_add_cyclic_center_quotient
"A group is commutative if the quotient by\n the center is cyclic. Also see `add_comm_group_of_cycle_center_quotient`\n for the `add_comm_group` instance."]
theorem commutative_of_cyclic_center_quotient [IsCyclic H] (f : G →* H) (hf : f.ker ≤ center G)
(a b : G) : a * b = b * a :=
let ⟨⟨x, y, (hxy : f y = x)⟩, (hx : ∀ a : f, a ∈ zpowers _)⟩ := IsCyclic.exists_generator f.range
let ⟨m, hm⟩ := hx ⟨f a, a, rfl⟩
let ⟨n, hn⟩ := hx ⟨f b, b, rfl⟩
have hm : x ^ m = f a := by simpa [Subtype.ext_iff] using hm
have hn : x ^ n = f b := by simpa [Subtype.ext_iff] using hn
have ha : y ^ (-m) * a ∈ center G :=
hf (by rw [f.mem_ker, f.map_mul, f.map_zpow, hxy, zpow_neg, hm, inv_mul_self])
have hb : y ^ (-n) * b ∈ center G :=
hf (by rw [f.mem_ker, f.map_mul, f.map_zpow, hxy, zpow_neg, hn, inv_mul_self])
calc
a * b = y ^ m * (y ^ (-m) * a * y ^ n) * (y ^ (-n) * b) := by simp [mul_assoc]
_ = y ^ m * (y ^ n * (y ^ (-m) * a)) * (y ^ (-n) * b) := by rw [mem_center_iff.1 ha]
_ = y ^ m * y ^ n * y ^ (-m) * (a * (y ^ (-n) * b)) := by simp [mul_assoc]
_ = y ^ m * y ^ n * y ^ (-m) * (y ^ (-n) * b * a) := by rw [mem_center_iff.1 hb]
_ = b * a := by group
#align commutative_of_cyclic_center_quotient commutative_of_cyclic_center_quotient
#align commutative_of_add_cyclic_center_quotient commutative_of_add_cyclic_center_quotient
/-- A group is commutative if the quotient by the center is cyclic. -/
@[to_additive commutativeOfAddCycleCenterQuotient
"A group is commutative if the quotient by\n the center is cyclic."]
def commGroupOfCycleCenterQuotient [IsCyclic H] (f : G →* H) (hf : f.ker ≤ center G) :
CommGroup G :=
{ show Group G by infer_instance with mul_comm := commutative_of_cyclic_center_quotient f hf }
#align comm_group_of_cycle_center_quotient commGroupOfCycleCenterQuotient
#align commutative_of_add_cycle_center_quotient commutativeOfAddCycleCenterQuotient
end QuotientCenter
namespace IsSimpleGroup
section CommGroup
variable [CommGroup α] [IsSimpleGroup α]
@[to_additive IsSimpleAddGroup.is_add_cyclic]
instance (priority := 100) : IsCyclic α :=
by
cases' subsingleton_or_nontrivial α with hi hi <;> haveI := hi
· apply isCyclic_of_subsingleton
· obtain ⟨g, hg⟩ := exists_ne (1 : α)
refine' ⟨⟨g, fun x => _⟩⟩
cases' IsSimpleOrder.eq_bot_or_eq_top (Subgroup.zpowers g) with hb ht
· exfalso
apply hg
rw [← Subgroup.mem_bot, ← hb]
apply Subgroup.mem_zpowers
· rw [ht]
apply Subgroup.mem_top
@[to_additive]
theorem prime_card [Fintype α] : (Fintype.card α).Prime :=
by
have h0 : 0 < Fintype.card α := Fintype.card_pos_iff.2 (by infer_instance)
obtain ⟨g, hg⟩ := IsCyclic.exists_generator α
rw [Nat.prime_def_lt'']
refine' ⟨Fintype.one_lt_card_iff_nontrivial.2 inferInstance, fun n hn => _⟩
refine' (IsSimpleOrder.eq_bot_or_eq_top (Subgroup.zpowers (g ^ n))).symm.imp _ _
· intro h
have hgo := orderOf_pow g
rw [orderOf_eq_card_of_forall_mem_zpowers hg, Nat.gcd_eq_right_iff_dvd.1 hn,
orderOf_eq_card_of_forall_mem_zpowers, eq_comm,
Nat.div_eq_iff_eq_mul_left (Nat.pos_of_dvd_of_pos hn h0) hn] at hgo
· exact (mul_left_cancel₀ (ne_of_gt h0) ((mul_one (Fintype.card α)).trans hgo)).symm
· intro x
rw [h]
exact Subgroup.mem_top _
· intro h
apply le_antisymm (Nat.le_of_dvd h0 hn)
rw [← orderOf_eq_card_of_forall_mem_zpowers hg]
apply orderOf_le_of_pow_eq_one (Nat.pos_of_dvd_of_pos hn h0)
rw [← Subgroup.mem_bot, ← h]
exact Subgroup.mem_zpowers _
#align is_simple_group.prime_card IsSimpleGroup.prime_card
#align is_simple_add_group.prime_card IsSimpleAddGroup.prime_card
end CommGroup
end IsSimpleGroup
@[to_additive AddCommGroup.is_simple_iff_is_add_cyclic_and_prime_card]
theorem CommGroup.is_simple_iff_isCyclic_and_prime_card [Fintype α] [CommGroup α] :
IsSimpleGroup α ↔ IsCyclic α ∧ (Fintype.card α).Prime :=
by
constructor
· intro h
exact ⟨IsSimpleGroup.isCyclic, IsSimpleGroup.prime_card⟩
· rintro ⟨hc, hp⟩
haveI : Fact (Fintype.card α).Prime := ⟨hp⟩
exact isSimpleGroup_of_prime_card rfl
#align comm_group.is_simple_iff_is_cyclic_and_prime_card CommGroup.is_simple_iff_isCyclic_and_prime_card
#align add_comm_group.is_simple_iff_is_add_cyclic_and_prime_card AddCommGroup.is_simple_iff_is_add_cyclic_and_prime_card
section Exponent
open Monoid
@[to_additive]
theorem IsCyclic.exponent_eq_card [Group α] [IsCyclic α] [Fintype α] :
exponent α = Fintype.card α :=
by
obtain ⟨g, hg⟩ := IsCyclic.exists_generator α
apply Nat.dvd_antisymm
· rw [← lcm_order_eq_exponent, Finset.lcm_dvd_iff]
exact fun b _ => orderOf_dvd_card_univ
rw [← orderOf_eq_card_of_forall_mem_zpowers hg]
exact order_dvd_exponent _
#align is_cyclic.exponent_eq_card IsCyclic.exponent_eq_card
#align is_add_cyclic.exponent_eq_card IsAddCyclic.exponent_eq_card
@[to_additive]
theorem IsCyclic.of_exponent_eq_card [CommGroup α] [Fintype α] (h : exponent α = Fintype.card α) :
IsCyclic α :=
let ⟨g, _, hg⟩ := Finset.mem_image.mp (Finset.max'_mem _ _)
isCyclic_of_orderOf_eq_card g <| hg.trans <| exponent_eq_max'_orderOf.symm.trans h
#align is_cyclic.of_exponent_eq_card IsCyclic.of_exponent_eq_card
#align is_add_cyclic.of_exponent_eq_card IsAddCyclic.of_exponent_eq_card
@[to_additive]
theorem IsCyclic.iff_exponent_eq_card [CommGroup α] [Fintype α] :
IsCyclic α ↔ exponent α = Fintype.card α :=
⟨fun h => IsCyclic.exponent_eq_card, IsCyclic.of_exponent_eq_card⟩
#align is_cyclic.iff_exponent_eq_card IsCyclic.iff_exponent_eq_card
#align is_add_cyclic.iff_exponent_eq_card IsAddCyclic.iff_exponent_eq_card
@[to_additive]
theorem IsCyclic.exponent_eq_zero_of_infinite [Group α] [IsCyclic α] [Infinite α] :
exponent α = 0 :=
let ⟨g, hg⟩ := IsCyclic.exists_generator α
exponent_eq_zero_of_order_zero <| Infinite.orderOf_eq_zero_of_forall_mem_zpowers hg
#align is_cyclic.exponent_eq_zero_of_infinite IsCyclic.exponent_eq_zero_of_infinite
#align is_add_cyclic.exponent_eq_zero_of_infinite IsAddCyclic.exponent_eq_zero_of_infinite
end Exponent
|
# -*- coding: utf-8 -*-
import datetime
import json
import os
import time
import numpy as np
import torch
from dwi_ml.data.dataset.data_list import (DataListForTorch,
LazyDataListForTorch)
from dwi_ml.experiment.timer import Timer
from dwi_ml.experiment.monitoring import ValueHistoryMonitor
class DWIMLAbstractLocal:
""" Meant for projects working on learning local information in the
voxel. Information will be X = a voxel. """
def __init__(self):
raise NotImplementedError
def build_model(self):
raise NotImplementedError
def train(self, **kwargs):
raise NotImplementedError
def save(self):
raise NotImplementedError
def load_model(self, filepath, **kwargs):
raise NotImplementedError
class DWIMLAbstractSequences:
""" Meant for projects working on learning tractography. Information will
be X = sequences."""
def __init__(self,
train_database_path,
valid_database_path,
name: str = None,
# Concerning the choice of inputs:
nb_degree_angles: int = 128,
add_streamline_noise: bool = False,
streamlines_cut_ratio: float = None, step_size: float = None,
neighborhood_dist_mm: float = None,
nb_neighborhood_axes: int = 6,
add_previous_dir: bool = False,
lazy: bool = False,
# Concerning the memory usage:
batch_size: int = 20000, volumes_per_batch: int = None,
cycles_per_volume_batch: int = 1,
n_epoch: int = 100, seed: int = 1234, patience: int = 20,
use_gpu: bool = True, num_workers: int = 0,
worker_interpolation: bool = False,
cache_manager: bool = False, taskman_managed: bool = False):
"""
Mandatory parameters:
---------------------
train_database_path : str
Path to training database (hdf5 file)
valid_database_path : str
Path to validation database (hdf5 file)
Optional parameters:
--------------------
====> General
name : str
Optional name of the experiment. If given, it is prepended to the
auto-generated name. [None]
====> Concerning the choice of inputs:
nb_degree_angles: int
Precision for angles: number of directions on the sphere. If
previous direction is added to input, we need to know how many that
is. But we manage the output with output_model, not with this
option. [128]
add_streamline_noise : bool
If set, add random gaussian noise to streamline coordinates
on-the-fly. Noise variance is 0.1 * step-size, or 0.1mm if no step
size is used. [False]
streamlines_cut_ratio : float
Percentage of streamlines to randomly cut in each batch. If None, do
not split streamlines. [None]
NOTE: Preprocessed .hdf5 file should contain resampled
streamlines; otherwise, cutting streamlines will be biased
towards long segments (less points)
step_size : float
Constant step size that every streamline should have between points
(in mm). If None, train on streamlines as they are (ex, compressed).
[None]
neighborhood_dist_mm : float
If given, add neighboring information to the input signal at the
given distance in each axis (in mm). [None]
neighborhood_axes : int
Nb of axes at which to get neighborhood distance. Default = 6 (up,
down, left, right, front, back).
add_previous_dir : bool
If set, add the previous streamline direction to the input signal.
[False]
lazy : bool
If True, use a lazy dataset. [False]
====> Concerning the memory usage:
batch_size : int
Number of time steps to use in a batch (the length of sequences vary
a lot, so we define the number of time steps to use a more
consistent amount of memory) [20,000]
volumes_per_batch : int
Limit the number of sampled volumes inside a single batch.
If None, use true random sampling. [None]
cycles_per_volume_batch : int
Number of batches where the same volumes will be reused before
sampling new volumes. [1]
n_epoch : int
Maximum number of epochs [100]
seed : int
Seed for random numbers [1234]
patience : int
Use early stopping. Defines the number of epochs after which
the model should stop training if the loss hasn't improved. [20]
use_gpu : bool
Use the GPU; if False, use CPU. [True]
num_workers : int
Number of processes that should process the data between training
updates. [0]
worker_interpolation : bool
If True and num_workers > 0, interpolation will be done on CPU by
the workers. Otherwise, interpolation is done on the main thread
using the chosen device. [False]
cache_manager : bool
If True, use a cache manager to keep volumes and streamlines in
memory. [False]
taskman_managed : bool
If True, taskman manages the experiment. Do not output progress
bars and instead output special messages for taskman. [False]
"""
# Init mandatory properties
self.train_database_path = train_database_path
self.valid_database_path = valid_database_path
# Init optional properties
self.name = name
# Init "globals" from user's project
self.nb_degree_angles = nb_degree_angles
# Init args concerning choice of inputs
self.add_streamline_noise = add_streamline_noise
self.streamlines_cut_ratio = streamlines_cut_ratio
self.step_size = step_size
self.neighborhood_dist_mm = neighborhood_dist_mm
self.nb_neighborhood_axes = nb_neighborhood_axes # toDo. À voir!! Je vais peut-être devoir changer int pour str='method'
# On aurait la méthode "6axes" et la méthode "mimicGrid" pour mon CNN
# où je prendrais 27 axes, pas tous de la même longueur! Possiblement
# le double d'axe pour avoir l'équivalent de 2 voxels autour de mon point
# dans toutes les directions. Ça pourrait être [str|int]
self.add_previous_dir = add_previous_dir
self.lazy = lazy
# Init args concerning memory usage
self.batch_size = int(batch_size)
self.volumes_per_batch = volumes_per_batch
self.n_epoch = int(n_epoch)
self.seed = seed
self.patience = patience
self.use_gpu = use_gpu
self.num_workers = num_workers
self.worker_interpolation = worker_interpolation
self.cycles_per_volume_batch = cycles_per_volume_batch
self.cache_manager = cache_manager
self.taskman_managed = taskman_managed
self.taskman_report = {
'loss_train': None,
'loss_valid': None,
'epoch': None,
'best_epoch': None,
'best_score': None,
'update': None,
'update_loss': None
}
# Time limited run
self.hangup_time = None
htime = os.environ.get('HANGUP_TIME', None)
if htime is not None:
self.hangup_time = int(htime)
print('Will hang up at ' + htime)
# Set device
self.device = None
if self.use_gpu and torch.cuda.is_available():
self.device = torch.device('cuda')
else:
self.device = torch.device('cpu')
# Set random numbers
self.rng = np.random.RandomState(self.seed)
torch.manual_seed(self.seed) # Set torch seed
if self.use_gpu:
torch.cuda.manual_seed(self.seed) # toDo. Pourquoi ça dit error?
# If using worker_interpolation, data is processed on CPU
self.dataset_device = torch.device(
'cpu') if self.worker_interpolation else self.device
# Init datasets
# NOTE. WE HOPE THAT MULTISUBJECT CAN REALLY BE COMMON TO ALL OF US.
# So, I've pu the dataset creation here in the abstract. Else we can
# bring it back to each user's script.
other_kw_args = {}
if self.lazy:
dataset_cls = LazyMultiSubjectDataset
if self.cache_manager:
other_kw_args['cache_size'] = self.volumes_per_batch
else:
dataset_cls = MultiSubjectDataset
self.train_dataset = dataset_cls(
self.train_database_path, self.rng,
add_streamline_noise=self.add_streamline_noise,
step_size=self.step_size,
neighborhood_dist_mm=self.neighborhood_dist_mm,
streamlines_cut_ratio=self.streamlines_cut_ratio,
add_previous_dir=self.add_previous_dir,
do_interpolation=self.worker_interpolation,
device=self.dataset_device,
taskman_managed=self.taskman_managed,
**other_kw_args)
self.valid_dataset = dataset_cls(
self.valid_database_path, self.rng,
add_streamline_noise=False,
step_size=self.step_size,
neighborhood_dist_mm=self.neighborhood_dist_mm,
streamlines_cut_ratio=None,
add_previous_dir=self.add_previous_dir,
do_interpolation=self.worker_interpolation,
device=self.dataset_device,
taskman_managed=self.taskman_managed,
**other_kw_args)
# Other variables
self.sh_order = None # Will be set once the dataset is loaded
self.input_size = None # Will be set once the dataset is loaded
self.current_epoch = 0
self.experiment_dir = (self.name if self.name
else datetime.datetime.now().strftime(
"%Y_%m_%d_%H%M%S")) + '_' + type(self).__name__
self.optimizer = None # Will be defined later with ADAM
self.model = None # Will be defined by the main user
# Setup monitors
self.train_loss_monitor = ValueHistoryMonitor("Training loss")
self.valid_loss_monitor = ValueHistoryMonitor("Validation loss")
self.grad_norm_monitor = ValueHistoryMonitor("Grad Norm") # ToDo Est-ce que tout le monde utilise grad norm??
def train(self, **kwargs):
raise NotImplementedError
# ToDo: "train" depends on each user, but can we define
# sub-functions here that could encapsulate some sub-tasks that
# everybody uses? One day we could compare our codes.
def save(self):
raise NotImplementedError
def load_model(self, filepath, **kwargs):
raise NotImplementedError
def load_dataset(self):
"""
This method loads the data (streamlines and data volume).
"""
with Timer("Loading training dataset", newline=True, color='blue'):
self.train_dataset.load()
input_size = self._compute_input_size()
self.input_size = input_size
self.sh_order = self.train_dataset.sh_order
with Timer("Loading validation dataset", newline=True, color='blue'):
self.valid_dataset.load()
def _compute_input_size(self):
# Basic input size
expected_input_size = self.train_dataset.multisubject_manager.feature_size
# + neighbors
if self.neighborhood_dist_mm:
expected_input_size += \
self.nb_neighborhood_axes * \
self.train_dataset.multisubject_manager.feature_size
# + previous direction
if self.add_previous_dir:
expected_input_size += self.nb_degree_angles
return expected_input_size
def _should_quit(self, iter_timer):
# If:
# hang up signal received
# time remaining is less than one epoch + 30 seconds
# exit training.
return (self.hangup_time is not None and
time.time() + iter_timer.mean * 2.0 + 30 > self.hangup_time)
def _update_taskman_report(self, updates):
self.taskman_report.update(updates)
self.taskman_report['time'] = time.time()
print('!taskman' + json.dumps(self.taskman_report), flush=True)
|
chapter \<open> Probability Logic \label{chapter:probability} \<close>
theory Probability_Logic
imports
"Propositional_Logic_Class.Classical_Connectives"
HOL.Real
"HOL-Library.Countable"
begin
no_notation FuncSet.funcset (infixr "\<rightarrow>" 60)
section \<open> Definition of Probability Logic \label{sec:definition-of-probability-logic} \<close>
text \<open> Probability logic is defined in terms of an operator over
classical logic obeying certain postulates. Scholars often credit
George Boole for first conceiving this kind of formulation @{cite booleChapterXVITheory1853}.
Theodore Hailperin in particular has written extensively on this subject
@{cite hailperinProbabilityLogic1984
and hailperinBooleLogicProbability1986
and hailperinSententialProbabilityLogic1996}. \<close>
text \<open> The presentation below roughly follows Kolmogorov's axiomatization
@{cite kolmogoroffChapterElementareWahrscheinlichkeitsrechnung1933}.
A key difference is that we only require \<^emph>\<open>finite additivity\<close>, rather
than \<^emph>\<open>countable additivity\<close>. Finite additivity is also defined in
terms of implication \<^term>\<open>(\<rightarrow>)\<close>. \<close>
class probability_logic = classical_logic +
fixes \<P> :: "'a \<Rightarrow> real"
assumes probability_non_negative: "\<P> \<phi> \<ge> 0"
assumes probability_unity: "\<turnstile> \<phi> \<Longrightarrow> \<P> \<phi> = 1"
assumes probability_implicational_additivity:
"\<turnstile> \<phi> \<rightarrow> \<psi> \<rightarrow> \<bottom> \<Longrightarrow> \<P> ((\<phi> \<rightarrow> \<bottom>) \<rightarrow> \<psi>) = \<P> \<phi> + \<P> \<psi>"
text \<open> A similar axiomatization may be credited to
Rescher @{cite \<open>pg. 185\<close> rescherManyvaluedLogic1969}.
However, our formulation has fewer axioms.
While Rescher assumes \<^term>\<open>\<turnstile> \<phi> \<leftrightarrow> \<psi> \<Longrightarrow> \<P> \<phi> = \<P> \<psi>\<close>, we
show this is a lemma in \S\ref{sec:prob-logic-alt-def}. \<close>
section \<open> Why Finite Additivity? \label{section:why-finite-additivity} \<close>
text \<open> In this section we touch on why we have chosen to
employ finite additivity in our axiomatization of
@{class probability_logic} and deviate from conventional
probability theory. \<close>
text \<open> Conventional probability obeys an axiom known as \<^emph>\<open>countable additivity\<close>.
Traditionally it states if \<open>S\<close> is a countable set of sets which are
pairwise disjoint, then the limit \<open>\<Sum> s \<in> S. \<P> s\<close> exists and
\<open>\<P> (\<Union> S) = (\<Sum> s \<in> S. \<P> s)\<close>. This is more powerful than our
finite additivity axiom
@{lemma \<open>\<turnstile> \<phi> \<rightarrow> \<psi> \<rightarrow> \<bottom> \<Longrightarrow> \<P> ((\<phi> \<rightarrow> \<bottom>) \<rightarrow> \<psi>) = \<P> \<phi> + \<P> \<psi>\<close>
by (metis probability_implicational_additivity) }. \<close>
text \<open> However, we argue that demanding countable additivity is not practical. \<close>
text \<open> Historically, the statisticians Bruno de Finetti and Leonard Savage
gave the most well known critiques. In @{cite definettiSuiPassaggiLimite1930}
de Finetti shows various properties which are true for countably additive
probability measures may not hold for finitely additive measures.
Savage @{cite savageDifficultiesTheoryPersonal1967}, on the other hand,
develops probability based on choices prizes in lotteries. \<close>
text \<open> We instead argue that if we demand countable additivity, then certain
properties of real world software would no longer be formally verifiable
as we demonstrate here. In particular, it prohibits conventional recursive
data structures for defining propositions. Our argument is derivative of
one given by Giangiacomo Gerla @{cite \<open>Section 3\<close> gerlaInferencesProbabilityLogic1994}. \<close>
text \<open> By taking equivalence classes modulo \<^term>\<open>\<lambda> \<phi> \<psi> . \<turnstile> \<phi> \<leftrightarrow> \<psi>\<close>,
any classical logic instance gives rise to a Boolean algebra known as
a \<^emph>\<open>Lindenbaum Algebra\<close>. In the case of @{typ "'a classical_propositional_formula"}
this Boolean algebra algebra is both countable and \<^emph>\<open>atomless\<close>.
A theorem of Horn and Tarski @{cite \<open>Theorem 3.2\<close> hornMeasuresBooleanAlgebras1948}
asserts there can be no countably additive \<^term>\<open>Pr\<close> for a countable
atomless Boolean algebra. \<close>
text \<open> The above argument is not intended as a blanket refutation of conventional
probability theory. It is simply an impossibility result with respect
to software implementations of probability logic. Plenty of classic results
in probability rely on countable additivity. A nice example, formalized in
Isabelle/HOL, is Bouffon's needle @{cite eberlBuffonNeedleProblem2017}. \<close>
section \<open> Basic Properties of Probability Logic \<close>
lemma (in probability_logic) probability_additivity:
assumes "\<turnstile> \<sim> (\<phi> \<sqinter> \<psi>)"
shows "\<P> (\<phi> \<squnion> \<psi>) = \<P> \<phi> + \<P> \<psi>"
using
assms
unfolding
conjunction_def
disjunction_def
negation_def
by (simp add: probability_implicational_additivity)
lemma (in probability_logic) probability_alternate_additivity:
assumes "\<turnstile> \<phi> \<rightarrow> \<psi> \<rightarrow> \<bottom>"
shows "\<P> (\<phi> \<squnion> \<psi>) = \<P> \<phi> + \<P> \<psi>"
using assms
by (metis
probability_additivity
double_negation_converse
modus_ponens
conjunction_def
negation_def)
lemma (in probability_logic) complementation:
"\<P> (\<sim> \<phi>) = 1 - \<P> \<phi>"
by (metis
probability_alternate_additivity
probability_unity
bivalence
negation_elimination
add.commute
add_diff_cancel_left')
lemma (in probability_logic) unity_upper_bound:
"\<P> \<phi> \<le> 1"
by (metis
(no_types)
diff_ge_0_iff_ge
probability_non_negative
complementation)
section \<open> Alternate Definition of Probability Logic \label{sec:prob-logic-alt-def} \<close>
text \<open> There is an alternate axiomatization of probability logic,
due to Brian Gaines @{cite \<open>pg. 159, postulates P7, P8, and P8\<close> gainesFuzzyProbabilityUncertainty1978}
and independently formulated by Brian Weatherson @{cite weathersonClassicalIntuitionisticProbability2003}.
As Weatherson notes, this axiomatization is suited to formulating
\<^emph>\<open>intuitionistic\<close> probability logic. In the case where the underlying
logic is classical the Gaines/Weatherson axiomatization is equivalent
to the traditional Kolmogorov axiomatization from
\S\ref{sec:definition-of-probability-logic}. \<close>
class gaines_weatherson_probability = classical_logic +
fixes \<P> :: "'a \<Rightarrow> real"
assumes gaines_weatherson_thesis:
"\<P> \<top> = 1"
assumes gaines_weatherson_antithesis:
"\<P> \<bottom> = 0"
assumes gaines_weatherson_monotonicity:
"\<turnstile> \<phi> \<rightarrow> \<psi> \<Longrightarrow> \<P> \<phi> \<le> \<P> \<psi>"
assumes gaines_weatherson_sum_rule:
"\<P> \<phi> + \<P> \<psi> = \<P> (\<phi> \<sqinter> \<psi>) + \<P> (\<phi> \<squnion> \<psi>)"
sublocale gaines_weatherson_probability \<subseteq> probability_logic
proof
fix \<phi>
have "\<turnstile> \<bottom> \<rightarrow> \<phi>"
by (simp add: ex_falso_quodlibet)
thus "0 \<le> \<P> \<phi>"
using
gaines_weatherson_antithesis
gaines_weatherson_monotonicity
by fastforce
next
fix \<phi>
assume "\<turnstile> \<phi>"
thus "\<P> \<phi> = 1"
by (metis
gaines_weatherson_thesis
gaines_weatherson_monotonicity
eq_iff
axiom_k
ex_falso_quodlibet
modus_ponens
verum_def)
next
fix \<phi> \<psi>
assume "\<turnstile> \<phi> \<rightarrow> \<psi> \<rightarrow> \<bottom>"
hence "\<turnstile> \<sim> (\<phi> \<sqinter> \<psi>)"
by (simp add: conjunction_def negation_def)
thus "\<P> ((\<phi> \<rightarrow> \<bottom>) \<rightarrow> \<psi>) = \<P> \<phi> + \<P> \<psi>"
by (metis
add.commute
add.right_neutral
eq_iff
disjunction_def
ex_falso_quodlibet
negation_def
gaines_weatherson_antithesis
gaines_weatherson_monotonicity
gaines_weatherson_sum_rule)
qed
lemma (in probability_logic) monotonicity:
"\<turnstile> \<phi> \<rightarrow> \<psi> \<Longrightarrow> \<P> \<phi> \<le> \<P> \<psi>"
proof -
assume "\<turnstile> \<phi> \<rightarrow> \<psi>"
hence "\<turnstile> \<sim> (\<phi> \<sqinter> \<sim> \<psi>)"
unfolding negation_def conjunction_def
by (metis
conjunction_def
exclusion_contrapositive_equivalence
negation_def
weak_biconditional_weaken)
hence "\<P> (\<phi> \<squnion> \<sim> \<psi>) = \<P> \<phi> + \<P> (\<sim> \<psi>)"
by (simp add: probability_additivity)
hence "\<P> \<phi> + \<P> (\<sim> \<psi>) \<le> 1"
by (metis unity_upper_bound)
hence "\<P> \<phi> + 1 - \<P> \<psi> \<le> 1"
by (simp add: complementation)
thus ?thesis by linarith
qed
lemma (in probability_logic) biconditional_equivalence:
"\<turnstile> \<phi> \<leftrightarrow> \<psi> \<Longrightarrow> \<P> \<phi> = \<P> \<psi>"
by (meson
eq_iff
modus_ponens
biconditional_left_elimination
biconditional_right_elimination
monotonicity)
lemma (in probability_logic) sum_rule:
"\<P> (\<phi> \<squnion> \<psi>) + \<P> (\<phi> \<sqinter> \<psi>) = \<P> \<phi> + \<P> \<psi>"
proof -
have "\<turnstile> (\<phi> \<squnion> \<psi>) \<leftrightarrow> (\<phi> \<squnion> \<psi> \<setminus> (\<phi> \<sqinter> \<psi>))"
proof -
have "\<forall> \<MM>. \<MM> \<Turnstile>\<^sub>p\<^sub>r\<^sub>o\<^sub>p (\<^bold>\<langle>\<phi>\<^bold>\<rangle> \<squnion> \<^bold>\<langle>\<psi>\<^bold>\<rangle>) \<leftrightarrow> (\<^bold>\<langle>\<phi>\<^bold>\<rangle> \<squnion> \<^bold>\<langle>\<psi>\<^bold>\<rangle> \<setminus> (\<^bold>\<langle>\<phi>\<^bold>\<rangle> \<sqinter> \<^bold>\<langle>\<psi>\<^bold>\<rangle>))"
unfolding
classical_logic_class.subtraction_def
classical_logic_class.negation_def
classical_logic_class.biconditional_def
classical_logic_class.conjunction_def
classical_logic_class.disjunction_def
by simp
hence "\<turnstile> \<^bold>\<lparr> (\<^bold>\<langle>\<phi>\<^bold>\<rangle> \<squnion> \<^bold>\<langle>\<psi>\<^bold>\<rangle>) \<leftrightarrow> (\<^bold>\<langle>\<phi>\<^bold>\<rangle> \<squnion> \<^bold>\<langle>\<psi>\<^bold>\<rangle> \<setminus> (\<^bold>\<langle>\<phi>\<^bold>\<rangle> \<sqinter> \<^bold>\<langle>\<psi>\<^bold>\<rangle>)) \<^bold>\<rparr>"
using propositional_semantics by blast
thus ?thesis by simp
qed
moreover have "\<turnstile> \<phi> \<rightarrow> (\<psi> \<setminus> (\<phi> \<sqinter> \<psi>)) \<rightarrow> \<bottom>"
proof -
have "\<forall> \<MM>. \<MM> \<Turnstile>\<^sub>p\<^sub>r\<^sub>o\<^sub>p \<^bold>\<langle>\<phi>\<^bold>\<rangle> \<rightarrow> (\<^bold>\<langle>\<psi>\<^bold>\<rangle> \<setminus> (\<^bold>\<langle>\<phi>\<^bold>\<rangle> \<sqinter> \<^bold>\<langle>\<psi>\<^bold>\<rangle>)) \<rightarrow> \<bottom>"
unfolding
classical_logic_class.subtraction_def
classical_logic_class.negation_def
classical_logic_class.biconditional_def
classical_logic_class.conjunction_def
classical_logic_class.disjunction_def
by simp
hence "\<turnstile> \<^bold>\<lparr> \<^bold>\<langle>\<phi>\<^bold>\<rangle> \<rightarrow> (\<^bold>\<langle>\<psi>\<^bold>\<rangle> \<setminus> (\<^bold>\<langle>\<phi>\<^bold>\<rangle> \<sqinter> \<^bold>\<langle>\<psi>\<^bold>\<rangle>)) \<rightarrow> \<bottom> \<^bold>\<rparr>"
using propositional_semantics by blast
thus ?thesis by simp
qed
hence "\<P> (\<phi> \<squnion> \<psi>) = \<P> \<phi> + \<P> (\<psi> \<setminus> (\<phi> \<sqinter> \<psi>))"
using
probability_alternate_additivity
biconditional_equivalence
calculation
by auto
moreover have "\<turnstile> \<psi> \<leftrightarrow> (\<psi> \<setminus> (\<phi> \<sqinter> \<psi>) \<squnion> (\<phi> \<sqinter> \<psi>))"
proof -
have "\<forall> \<MM>. \<MM> \<Turnstile>\<^sub>p\<^sub>r\<^sub>o\<^sub>p \<^bold>\<langle>\<psi>\<^bold>\<rangle> \<leftrightarrow> (\<^bold>\<langle>\<psi>\<^bold>\<rangle> \<setminus> (\<^bold>\<langle>\<phi>\<^bold>\<rangle> \<sqinter> \<^bold>\<langle>\<psi>\<^bold>\<rangle>) \<squnion> (\<^bold>\<langle>\<phi>\<^bold>\<rangle> \<sqinter> \<^bold>\<langle>\<psi>\<^bold>\<rangle>))"
unfolding
classical_logic_class.subtraction_def
classical_logic_class.negation_def
classical_logic_class.biconditional_def
classical_logic_class.conjunction_def
classical_logic_class.disjunction_def
by auto
hence "\<turnstile> \<^bold>\<lparr> \<^bold>\<langle>\<psi>\<^bold>\<rangle> \<leftrightarrow> (\<^bold>\<langle>\<psi>\<^bold>\<rangle> \<setminus> (\<^bold>\<langle>\<phi>\<^bold>\<rangle> \<sqinter> \<^bold>\<langle>\<psi>\<^bold>\<rangle>) \<squnion> (\<^bold>\<langle>\<phi>\<^bold>\<rangle> \<sqinter> \<^bold>\<langle>\<psi>\<^bold>\<rangle>)) \<^bold>\<rparr>"
using propositional_semantics by
blast
thus ?thesis by simp
qed
moreover have "\<turnstile> (\<psi> \<setminus> (\<phi> \<sqinter> \<psi>)) \<rightarrow> (\<phi> \<sqinter> \<psi>) \<rightarrow> \<bottom>"
unfolding
subtraction_def
negation_def
conjunction_def
using
conjunction_def
conjunction_right_elimination
by auto
hence "\<P> \<psi> = \<P> (\<psi> \<setminus> (\<phi> \<sqinter> \<psi>)) + \<P> (\<phi> \<sqinter> \<psi>)"
using
probability_alternate_additivity
biconditional_equivalence
calculation
by auto
ultimately show ?thesis
by simp
qed
sublocale probability_logic \<subseteq> gaines_weatherson_probability
proof
show "\<P> \<top> = 1"
by (simp add: probability_unity)
next
show "\<P> \<bottom> = 0"
by (metis
add_cancel_left_right
probability_additivity
ex_falso_quodlibet
probability_unity
bivalence
conjunction_right_elimination
negation_def)
next
fix \<phi> \<psi>
assume "\<turnstile> \<phi> \<rightarrow> \<psi>"
thus "\<P> \<phi> \<le> \<P> \<psi>"
using monotonicity
by auto
next
fix \<phi> \<psi>
show "\<P> \<phi> + \<P> \<psi> = \<P> (\<phi> \<sqinter> \<psi>) + \<P> (\<phi> \<squnion> \<psi>)"
by (metis sum_rule add.commute)
qed
sublocale probability_logic \<subseteq> consistent_classical_logic
proof
show "\<not> \<turnstile> \<bottom>" using probability_unity gaines_weatherson_antithesis by auto
qed
lemma (in probability_logic) subtraction_identity:
"\<P> (\<phi> \<setminus> \<psi>) = \<P> \<phi> - \<P> (\<phi> \<sqinter> \<psi>)"
proof -
have "\<turnstile> \<phi> \<leftrightarrow> ((\<phi> \<setminus> \<psi>) \<squnion> (\<phi> \<sqinter> \<psi>))"
proof -
have "\<forall> \<MM>. \<MM> \<Turnstile>\<^sub>p\<^sub>r\<^sub>o\<^sub>p \<^bold>\<langle>\<phi>\<^bold>\<rangle> \<leftrightarrow> ((\<^bold>\<langle>\<phi>\<^bold>\<rangle> \<setminus> \<^bold>\<langle>\<psi>\<^bold>\<rangle>) \<squnion> (\<^bold>\<langle>\<phi>\<^bold>\<rangle> \<sqinter> \<^bold>\<langle>\<psi>\<^bold>\<rangle>))"
unfolding
classical_logic_class.subtraction_def
classical_logic_class.negation_def
classical_logic_class.biconditional_def
classical_logic_class.conjunction_def
classical_logic_class.disjunction_def
by (simp, blast)
hence "\<turnstile> \<^bold>\<lparr> \<^bold>\<langle>\<phi>\<^bold>\<rangle> \<leftrightarrow> ((\<^bold>\<langle>\<phi>\<^bold>\<rangle> \<setminus> \<^bold>\<langle>\<psi>\<^bold>\<rangle>) \<squnion> (\<^bold>\<langle>\<phi>\<^bold>\<rangle> \<sqinter> \<^bold>\<langle>\<psi>\<^bold>\<rangle>)) \<^bold>\<rparr>"
using propositional_semantics by blast
thus ?thesis by simp
qed
hence "\<P> \<phi> = \<P> ((\<phi> \<setminus> \<psi>) \<squnion> (\<phi> \<sqinter> \<psi>))"
using biconditional_equivalence
by simp
moreover have "\<turnstile> \<sim>((\<phi> \<setminus> \<psi>) \<sqinter> (\<phi> \<sqinter> \<psi>))"
proof -
have "\<forall> \<MM>. \<MM> \<Turnstile>\<^sub>p\<^sub>r\<^sub>o\<^sub>p \<sim>((\<^bold>\<langle>\<phi>\<^bold>\<rangle> \<setminus> \<^bold>\<langle>\<psi>\<^bold>\<rangle>) \<sqinter> (\<^bold>\<langle>\<phi>\<^bold>\<rangle> \<sqinter> \<^bold>\<langle>\<psi>\<^bold>\<rangle>))"
unfolding
classical_logic_class.subtraction_def
classical_logic_class.negation_def
classical_logic_class.conjunction_def
classical_logic_class.disjunction_def
by simp
hence "\<turnstile> \<^bold>\<lparr> \<sim>((\<^bold>\<langle>\<phi>\<^bold>\<rangle> \<setminus> \<^bold>\<langle>\<psi>\<^bold>\<rangle>) \<sqinter> (\<^bold>\<langle>\<phi>\<^bold>\<rangle> \<sqinter> \<^bold>\<langle>\<psi>\<^bold>\<rangle>)) \<^bold>\<rparr>"
using propositional_semantics by blast
thus ?thesis by simp
qed
ultimately show ?thesis
using probability_additivity
by auto
qed
section \<open> Basic Probability Logic Inequality Results \label{sec:basic-probability-inequality-results}\<close>
lemma (in probability_logic) disjunction_sum_inequality:
"\<P> (\<phi> \<squnion> \<psi>) \<le> \<P> \<phi> + \<P> \<psi>"
proof -
have "\<P> (\<phi> \<squnion> \<psi>) + \<P> (\<phi> \<sqinter> \<psi>) = \<P> \<phi> + \<P> \<psi>"
"0 \<le> \<P> (\<phi> \<sqinter> \<psi>)"
by (simp add: sum_rule, simp add: probability_non_negative)
thus ?thesis by linarith
qed
lemma (in probability_logic)
arbitrary_disjunction_list_summation_inequality:
"\<P> (\<Squnion> \<Phi>) \<le> (\<Sum>\<phi>\<leftarrow>\<Phi>. \<P> \<phi>)"
proof (induct \<Phi>)
case Nil
then show ?case by (simp add: gaines_weatherson_antithesis)
next
case (Cons \<phi> \<Phi>)
have "\<P> (\<Squnion> (\<phi> # \<Phi>)) \<le> \<P> \<phi> + \<P> (\<Squnion> \<Phi>)"
using disjunction_sum_inequality
by simp
with Cons have "\<P> (\<Squnion> (\<phi> # \<Phi>)) \<le> \<P> \<phi> + (\<Sum>\<phi>\<leftarrow>\<Phi>. \<P> \<phi>)" by linarith
then show ?case by simp
qed
lemma (in probability_logic) implication_list_summation_inequality:
assumes "\<turnstile> \<phi> \<rightarrow> \<Squnion> \<Psi>"
shows "\<P> \<phi> \<le> (\<Sum>\<psi>\<leftarrow>\<Psi>. \<P> \<psi>)"
using
assms
arbitrary_disjunction_list_summation_inequality
monotonicity
order_trans
by blast
lemma (in probability_logic) arbitrary_disjunction_set_summation_inequality:
"\<P> (\<Squnion> \<Phi>) \<le> (\<Sum>\<phi> \<in> set \<Phi>. \<P> \<phi>)"
by (metis
arbitrary_disjunction_list_summation_inequality
arbitrary_disjunction_remdups
biconditional_equivalence
sum.set_conv_list)
lemma (in probability_logic) implication_set_summation_inequality:
assumes "\<turnstile> \<phi> \<rightarrow> \<Squnion> \<Psi>"
shows "\<P> \<phi> \<le> (\<Sum>\<psi> \<in> set \<Psi>. \<P> \<psi>)"
using
assms
arbitrary_disjunction_set_summation_inequality
monotonicity
order_trans
by blast
section \<open> Dirac Measures \label{sec:dirac-measures}\<close>
text \<open> Before presenting \<^emph>\<open>Dirac measures\<close> in probability logic, we first
give the set of all functions satisfying probability logic.\<close>
definition (in classical_logic) probabilities :: "('a \<Rightarrow> real) set"
where "probabilities =
{\<P>. class.probability_logic (\<lambda> \<phi>. \<turnstile> \<phi>) (\<rightarrow>) \<bottom> \<P> }"
text \<open> Traditionally, a Dirac measure is a function \<^term>\<open>\<delta>\<^sub>x\<close> where
\<^term>\<open>\<delta>\<^sub>x(S) = (1::real)\<close> if \<^term>\<open>x \<in> S\<close> and \<^term>\<open>\<delta>\<^sub>x(S) = (0::real)\<close>
otherwise. This means that Dirac measures correspond to special
ultrafilters on their underlying \<^term>\<open>\<sigma>\<close>-algebra which are
closed under countable unions. \<close>
text \<open> Probability logic, as discussed in \S\ref{section:why-finite-additivity},
may not have countable joins in its underlying logic. In the setting
of probability logic, Dirac measures are simple probability functions
that are either 0 or 1. \<close>
definition (in classical_logic) dirac_measures :: "('a \<Rightarrow> real) set"
where "dirac_measures =
{ \<P>. class.probability_logic (\<lambda> \<phi>. \<turnstile> \<phi>) (\<rightarrow>) \<bottom> \<P>
\<and> (\<forall>x. \<P> x = 0 \<or> \<P> x = 1) }"
lemma (in classical_logic) dirac_measures_subset:
"dirac_measures \<subseteq> probabilities"
unfolding
probabilities_def
dirac_measures_def
by fastforce
text \<open> Maximally consistent sets correspond to Dirac measures. One direction
of this correspondence is established below. \<close>
lemma (in classical_logic) MCS_dirac_measure:
assumes "MCS \<Omega>"
shows "(\<lambda> \<chi>. if \<chi>\<in>\<Omega> then (1 :: real) else 0) \<in> dirac_measures"
(is "?\<P> \<in> dirac_measures")
proof -
have "class.probability_logic (\<lambda> \<phi>. \<turnstile> \<phi>) (\<rightarrow>) \<bottom> ?\<P>"
proof (standard, simp,
meson
assms
formula_maximally_consistent_set_def_reflection
maximally_consistent_set_def
set_deduction_weaken)
fix \<phi> \<psi>
assume "\<turnstile> \<phi> \<rightarrow> \<psi> \<rightarrow> \<bottom>"
hence "\<phi> \<sqinter> \<psi> \<notin> \<Omega>"
by (metis
assms
formula_consistent_def
formula_maximally_consistent_set_def_def
maximally_consistent_set_def
conjunction_def
set_deduction_modus_ponens
set_deduction_reflection
set_deduction_weaken)
hence "\<phi> \<notin> \<Omega> \<or> \<psi> \<notin> \<Omega>"
using
assms
formula_maximally_consistent_set_def_reflection
maximally_consistent_set_def
conjunction_set_deduction_equivalence
by meson
have "\<phi> \<squnion> \<psi> \<in> \<Omega> = (\<phi> \<in> \<Omega> \<or> \<psi> \<in> \<Omega>)"
by (metis
\<open>\<phi> \<sqinter> \<psi> \<notin> \<Omega>\<close>
assms
formula_maximally_consistent_set_def_implication
maximally_consistent_set_def
conjunction_def
disjunction_def)
have "?\<P> (\<phi> \<squnion> \<psi>) = ?\<P> \<phi> + ?\<P> \<psi>"
proof (cases "\<phi> \<squnion> \<psi> \<in> \<Omega>")
case True
hence \<diamondsuit>: "1 = ?\<P> (\<phi> \<squnion> \<psi>)" by simp
show ?thesis
proof (cases "\<phi> \<in> \<Omega>")
case True
hence "\<psi> \<notin> \<Omega>"
using \<open>\<phi> \<notin> \<Omega> \<or> \<psi> \<notin> \<Omega>\<close>
by blast
have "?\<P> (\<phi> \<squnion> \<psi>) = (1::real)" using \<diamondsuit> by simp
also have "... = 1 + (0::real)" by linarith
also have "... = ?\<P> \<phi> + ?\<P> \<psi>"
using \<open>\<psi> \<notin> \<Omega>\<close> \<open>\<phi> \<in> \<Omega>\<close> by simp
finally show ?thesis .
next
case False
hence "\<psi> \<in> \<Omega>"
using \<open>\<phi> \<squnion> \<psi> \<in> \<Omega>\<close> \<open>(\<phi> \<squnion> \<psi> \<in> \<Omega>) = (\<phi> \<in> \<Omega> \<or> \<psi> \<in> \<Omega>)\<close>
by blast
have "?\<P> (\<phi> \<squnion> \<psi>) = (1::real)" using \<diamondsuit> by simp
also have "... = (0::real) + 1" by linarith
also have "... = ?\<P> \<phi> + ?\<P> \<psi>"
using \<open>\<psi> \<in> \<Omega>\<close> \<open>\<phi> \<notin> \<Omega>\<close> by simp
finally show ?thesis .
qed
next
case False
moreover from this have "\<phi> \<notin> \<Omega>" "\<psi> \<notin> \<Omega>"
using \<open>(\<phi> \<squnion> \<psi> \<in> \<Omega>) = (\<phi> \<in> \<Omega> \<or> \<psi> \<in> \<Omega>)\<close> by blast+
ultimately show ?thesis by simp
qed
thus "?\<P> ((\<phi> \<rightarrow> \<bottom>) \<rightarrow> \<psi>) = ?\<P> \<phi> + ?\<P> \<psi>"
unfolding disjunction_def .
qed
thus ?thesis
unfolding dirac_measures_def
by simp
qed
notation FuncSet.funcset (infixr "\<rightarrow>" 60)
end
|
```python
import loader
from sympy import *
init_printing()
from root.solver import *
```
#### Find the general solution of $y^{(4)} - 4y''' + 4y'' = 0$
```python
yc, p = nth_order_const_coeff(1, -4, 4, 0, 0)
p.display()
```
$\displaystyle \text{Characteristic equation: }$
$\displaystyle r^{4} - 4 r^{3} + 4 r^{2} = 0$
$\displaystyle \text{Roots: }\left\{ \begin{array}{ll}r_{1,2} = 0\\r_{3,4} = 2\\\end{array} \right.$
$\displaystyle \text{General Solution: }$
$\displaystyle y = C_{1} + C_{2} t + C_{3} e^{2 t} + C_{4} t e^{2 t}$
#### Find the general solution of $y^{(6)} + y = 0$
```python
yc, p = nth_order_const_coeff(1, 0, 0, 0, 0, 0, 1)
p.display()
```
$\displaystyle \text{Characteristic equation: }$
$\displaystyle r^{6} + 1 = 0$
$\displaystyle \text{Roots: }\left\{ \begin{array}{ll}r_{1} = i\\r_{2} = - i\\r_{3} = - \frac{\sqrt{3}}{2} - \frac{i}{2}\\r_{4} = - \frac{\sqrt{3}}{2} + \frac{i}{2}\\r_{5} = \frac{\sqrt{3}}{2} - \frac{i}{2}\\r_{6} = \frac{\sqrt{3}}{2} + \frac{i}{2}\\\end{array} \right.$
$\displaystyle \text{General Solution: }$
$\displaystyle y = C_{1} \sin{\left(t \right)} + C_{2} \cos{\left(t \right)} + C_{3} e^{- \frac{\sqrt{3} t}{2}} \sin{\left(\frac{t}{2} \right)} + C_{4} e^{- \frac{\sqrt{3} t}{2}} \cos{\left(\frac{t}{2} \right)} + C_{5} e^{\frac{\sqrt{3} t}{2}} \sin{\left(\frac{t}{2} \right)} + C_{6} e^{\frac{\sqrt{3} t}{2}} \cos{\left(\frac{t}{2} \right)}$
|
import Lean.Meta
import Lean.Elab
import Mathlib.Algebra.Group.Defs
import UnitConjecture.Tactics.ReduceGoal
import Std
import Lean
import Experiments.ProdSeq
open Lean Meta Elab Nat Term Std ProdSeq
def Lean.Expr.simplify(e: Expr) : MetaM Expr := do
let r ← simp e (← Simp.Context.mkDefault)
return r.1.expr
def hOp? (fname: Name)(e : Expr) : MetaM (Option (Expr × Expr)) := do
let type ← inferType e
if e.isAppOfArity fname 6 then
let x := e.appFn!.appArg!
let y := e.appArg!
if (← isDefEq (← inferType x) type) &&
(← isDefEq (← inferType y) type) then
return some (x, y)
else
return none
else
return none
def invOp? (fname: Name)(e : Expr) : MetaM (Option (Expr)) := do
let type ← inferType e
if e.isAppOfArity fname 4 then
let y := e.appArg!
if (← isDefEq (← inferType y) type) then
return some y
else
return none
else
return none
inductive AddTree (α : Type _)[Repr α] where
| leaf : α → AddTree α
| negLeaf : α → AddTree α
| node : AddTree α → AddTree α → AddTree α
| subNode: AddTree α → AddTree α → AddTree α
def AddTree.fold {α : Type _}[AddCommGroup α][Repr α] (t : AddTree α ) : α :=
match t with
| AddTree.leaf a => a
| AddTree.node l r => (fold l) + (fold r)
| AddTree.negLeaf a => -a
| AddTree.subNode l r => (fold l) - (fold r)
def AddTree.foldMul {α : Type _}[CommGroup α][Repr α] (t : AddTree α ) : α :=
match t with
| AddTree.leaf a => a
| AddTree.node l r => (foldMul l) * (foldMul r)
| AddTree.negLeaf a => a⁻¹
| AddTree.subNode l r => (foldMul l) / (foldMul r)
abbrev IndexAddTree := AddTree Nat
@[inline] def AddTree.indexTree {α : Type _}[Repr α][DecidableEq α](t: AddTree α)
(accum : Array α := #[]) :
IndexAddTree × (Array α) :=
match t with
| AddTree.leaf a =>
match accum.getIdx? a with
| some i => (AddTree.leaf i, accum)
| none => (AddTree.leaf (accum.size), accum.push a)
| AddTree.node l r =>
let (lIdx, lAccum) := indexTree l accum
let (rIdx, rAccum) := indexTree r lAccum
(AddTree.node lIdx rIdx, rAccum)
| AddTree.negLeaf a =>
match accum.getIdx? a with
| some i => (AddTree.negLeaf i, accum)
| none => (AddTree.negLeaf (accum.size), accum.push a)
| AddTree.subNode l r =>
let (lIdx, lAccum) := indexTree l accum
let (rIdx, rAccum) := indexTree r lAccum
(AddTree.subNode lIdx rIdx, rAccum)
def AddTree.indexTreeM (t: AddTree Expr)(accumExpr : Expr) :
TermElabM Expr := do
let accumL ← unpack accumExpr
let accum := accumL.toArray
match t with
| AddTree.leaf a =>
match ← accum.findIdxM? fun e => isDefEq e a with
| some i =>
mkPair (← mkAppM ``AddTree.leaf #[ToExpr.toExpr i]) accumExpr
| none =>
let newAccum ← pack (accum.push a).toList
mkPair (← mkAppM ``AddTree.leaf #[ToExpr.toExpr accum.size]) newAccum
| AddTree.node l r =>
let lexpr ← indexTreeM l accumExpr
let (lIdx, lAccum) := (← split? lexpr).get!
let rexpr ← indexTreeM r lAccum
let (rIdx, rAccum) := (← split? rexpr).get!
mkPair (← mkAppM ``AddTree.node #[lIdx, rIdx]) (rAccum)
| AddTree.negLeaf a =>
match ← accum.findIdxM? fun e => isDefEq e a with
| some i =>
mkPair (← mkAppM ``AddTree.negLeaf #[ToExpr.toExpr i]) accumExpr
| none =>
let newAccum ← pack (accum.push a).toList
mkPair (← mkAppM ``AddTree.leaf #[ToExpr.toExpr accum.size]) newAccum
| AddTree.subNode l r =>
let lexpr ← indexTreeM l accumExpr
let (lIdx, lAccum) := (← split? lexpr).get!
let rexpr ← indexTreeM r lAccum
let (rIdx, rAccum) := (← split? rexpr).get!
mkPair (← mkAppM ``AddTree.subNode #[lIdx, rIdx]) (rAccum)
lemma Array.size_pos_if_index {α : Type _} [DecidableEq α] {arr : Array α} {a : α} {i : ℕ} : arr.getIdx? a = some i → arr.size > 0 := by
rw [getIdx?, findIdx?, findIdx?.loop]
exact if h:0 < arr.size then
λ _ => h
else by simp [h]
lemma Array.push_size_pos {α : Type _} (arr : Array α) (a : α) : (arr.push a).size > 0 := by
match arr with
| ⟨l⟩ =>
simp only [push, size]
induction l with
| nil => simp only [List.concat, List.length]
| cons _ _ _ => simp only [List.concat, List.length, Nat.add_one]; apply Nat.succ_pos
@[irreducible] theorem pos_size {α : Type _}[Repr α][DecidableEq α] (t: AddTree α) : (arr : Array α) → (t.indexTree arr).2.size > 0 := by
induction t with
| leaf a =>
simp only [AddTree.indexTree]
intro arr
match h:arr.getIdx? a with
| some i => simp only [h]; exact Array.size_pos_if_index h
| none => simp only [h]; apply Array.push_size_pos
| negLeaf a =>
simp only [AddTree.indexTree]
intro arr
match h:arr.getIdx? a with
| some i => simp only [h]; exact Array.size_pos_if_index h
| none => simp only [h]; apply Array.push_size_pos
| node _ _ _ ihr => simp only [AddTree.indexTree]; intro arr; apply ihr
| subNode _ _ _ ihr => simp only [AddTree.indexTree]; intro arr; apply ihr
partial def treeM (e : Expr) : MetaM Expr := do
match ← hOp? ``HAdd.hAdd e with
| some (a, b) => do
let l ← treeM a
let r ← treeM b
mkAppM ``AddTree.node #[l, r]
| none =>
match ← hOp? ``HMul.hMul e with
| some (a, b) => do
let l ← treeM a
let r ← treeM b
mkAppM ``AddTree.node #[l, r]
| none =>
match ← hOp? ``HSub.hSub e with
| some (a, b) => do
let l ← treeM a
let r ← treeM b
mkAppM ``AddTree.subNode #[l, r]
| none =>
match ← hOp? ``HDiv.hDiv e with
| some (a, b) => do
let l ← treeM a
let r ← treeM b
mkAppM ``AddTree.subNode #[l, r]
| none =>
match ← invOp? ``Neg.neg e with
| some a => mkAppM ``AddTree.negLeaf #[a]
| none =>
match ← invOp? ``Inv.inv e with
| some a => mkAppM ``AddTree.negLeaf #[a]
| none =>
mkAppM ``AddTree.leaf #[e]
partial def addTreeM (e : Expr) : MetaM <| AddTree Expr := do
match ← hOp? ``HAdd.hAdd e with
| some (a, b) => do
let l ← addTreeM a
let r ← addTreeM b
return AddTree.node l r
| none =>
match ← hOp? ``HMul.hMul e with
| some (a, b) => do
let l ← addTreeM a
let r ← addTreeM b
return AddTree.node l r
| none =>
match ← hOp? ``HSub.hSub e with
| some (a, b) => do
let l ← addTreeM a
let r ← addTreeM b
return AddTree.subNode l r
| none =>
match ← hOp? ``HDiv.hDiv e with
| some (a, b) => do
let l ← addTreeM a
let r ← addTreeM b
return AddTree.subNode l r
| none =>
match ← invOp? ``Neg.neg e with
| some a => return AddTree.negLeaf a
| none =>
match ← invOp? ``Inv.inv e with
| some a => return AddTree.negLeaf a
| none =>
return AddTree.leaf e
@[simp] def IndexAddTree.foldMap {α : Type _}[AddCommGroup α][Repr α]
(t : IndexAddTree)(basisImages: Array α)(h: basisImages.size > 0) : α :=
match t with
| AddTree.leaf i => basisImages.get (Fin.ofNat' i h)
| AddTree.node l r =>
let lImage := foldMap l basisImages h
let rImage := foldMap r basisImages h
lImage + rImage
| AddTree.negLeaf i => -basisImages.get (Fin.ofNat' i h)
| AddTree.subNode l r =>
let lImage := foldMap l basisImages h
let rImage := foldMap r basisImages h
lImage - rImage
@[simp] def IndexAddTree.foldMap! {α : Type _}[AddCommGroup α][Repr α][Inhabited α]
(t : IndexAddTree)(basisImages: Array α) : α :=
match t with
| AddTree.leaf i => basisImages.get! i
| AddTree.node l r =>
let lImage := foldMap! l basisImages
let rImage := foldMap! r basisImages
lImage + rImage
| AddTree.negLeaf i => -basisImages.get! i
| AddTree.subNode l r =>
let lImage := foldMap! l basisImages
let rImage := foldMap! r basisImages
lImage - rImage
def IndexAddTree.foldMapMAux
(t : IndexAddTree)(basisImages: Array Expr) : TermElabM Expr := do
match t with
| AddTree.leaf i => return basisImages.get! i
| AddTree.node l r =>
let lImage ← foldMapMAux l basisImages
let rImage ← foldMapMAux r basisImages
mkAppM ``HAdd.hAdd #[lImage, rImage]
| AddTree.negLeaf i => mkAppM ``Neg.neg #[basisImages.get! i]
| AddTree.subNode l r =>
let lImage ← foldMapMAux l basisImages
let rImage ← foldMapMAux r basisImages
mkAppM ``HSub.hSub #[lImage, rImage]
partial def exprNatLeaf : Expr → TermElabM (Option Nat) := fun expr =>
do
let mvar ← mkFreshExprMVar (some (mkConst ``Nat))
let sExp' ← mkAppM ``AddTree.leaf #[mvar]
let expr ← reduce expr
Term.synthesizeSyntheticMVarsNoPostponing
if ← isDefEq sExp' expr then
Term.synthesizeSyntheticMVarsNoPostponing
let index ← exprNat (← whnf mvar)
return some index
else
return none
partial def exprNatNegLeaf : Expr → TermElabM (Option Nat) := fun expr =>
do
let mvar ← mkFreshExprMVar (some (mkConst ``Nat))
let sExp' ← mkAppM ``AddTree.negLeaf #[mvar]
let expr ← reduce expr
Term.synthesizeSyntheticMVarsNoPostponing
if ← isDefEq sExp' expr then
Term.synthesizeSyntheticMVarsNoPostponing
let index ← exprNat (← whnf mvar)
return some index
else
return none
elab "leafIndex!" t: term : term => do
let e ← elabTerm t none
let e ← reduce e
let n? ← exprNatLeaf e
return (ToExpr.toExpr <| n?.get!)
#eval leafIndex! (AddTree.leaf 7)
partial def exprNode : Expr → TermElabM (Option (Expr × Expr)) := fun expr =>
do
let mvar ← mkFreshExprMVar (some (mkConst ``IndexAddTree))
let mvar' ← mkFreshExprMVar (some (mkConst ``IndexAddTree))
let sExp' ← mkAppM ``AddTree.node #[mvar, mvar']
let expr ← reduce expr
Term.synthesizeSyntheticMVarsNoPostponing
if ← isDefEq sExp' expr then
Term.synthesizeSyntheticMVarsNoPostponing
return some (mvar, mvar')
else
return none
partial def exprSubNode : Expr → TermElabM (Option (Expr × Expr)) := fun expr =>
do
let mvar ← mkFreshExprMVar (some (mkConst ``IndexAddTree))
let mvar' ← mkFreshExprMVar (some (mkConst ``IndexAddTree))
let sExp' ← mkAppM ``AddTree.subNode #[mvar, mvar']
let expr ← reduce expr
Term.synthesizeSyntheticMVarsNoPostponing
if ← isDefEq sExp' expr then
Term.synthesizeSyntheticMVarsNoPostponing
return some (mvar, mvar')
else
return none
partial def exprIndexTree : Expr → TermElabM IndexAddTree := fun expr =>
do
match ← exprNode expr with
| some (l, r) => do
let l ← exprIndexTree l
let r ← exprIndexTree r
return AddTree.node l r
| none =>
match ← exprSubNode expr with
| some (l, r) => do
let l ← exprIndexTree l
let r ← exprIndexTree r
return AddTree.subNode l r
| none =>
match ← exprNatLeaf expr with
| some i => return AddTree.leaf i
| none =>
match ← exprNatNegLeaf expr with
| some i => return AddTree.negLeaf i
| none =>
throwError s!"expression {expr} is not a leaf or node"
def IndexAddTree.toString (t: IndexAddTree) : String :=
match t with
| AddTree.leaf i => s!"leaf {i}"
| AddTree.node l r =>
"node (" ++ toString l ++ ") (" ++ toString r ++ ")"
| AddTree.negLeaf i => s!"negLeaf {i}"
| AddTree.subNode l r =>
"subNode (" ++ toString l ++ ") (" ++ toString r ++")"
instance : ToString IndexAddTree := ⟨IndexAddTree.toString⟩
elab "checktree" t:term : term => do
let e ← elabTerm t none
let e ← reduce e
let tree ← exprIndexTree e
logInfo s!"got tree {tree}"
return e
#eval checktree (AddTree.node (AddTree.leaf 7) (AddTree.leaf 8))
def IndexAddTree.foldMapM
(tExp: Expr)(basisImages: Array Expr) : TermElabM Expr := do
let t ← exprIndexTree tExp
foldMapMAux t basisImages
def IndexAddTree.foldMapAux {α : Type _}[AddCommGroup α][Repr α]
(t : IndexAddTree)(basisImages: Array α)(h: basisImages.size > 0) : α :=
match t with
| AddTree.leaf i => basisImages.get (Fin.ofNat' i h)
| AddTree.node l r =>
let lImage := foldMap l basisImages h
let rImage := foldMap r basisImages h
lImage + rImage
| AddTree.negLeaf i => -basisImages.get (Fin.ofNat' i h)
| AddTree.subNode l r =>
let lImage := foldMap l basisImages h
let rImage := foldMap r basisImages h
lImage - rImage
@[simp] def foldPair{α : Type _}[AddCommGroup α][Repr α]
(tb : IndexAddTree × Array α)(h: tb.snd.size > 0) : α :=
let (t, basisImages) := tb
t.foldMap basisImages h
@[simp] def IndexAddTree.foldMapMul {α : Type _}[CommGroup α][Repr α]
(t : IndexAddTree)(basisImages: Array α)(h: basisImages.size > 0) : α :=
match t with
| AddTree.leaf i => basisImages.get (Fin.ofNat' i h)
| AddTree.node l r =>
let lImage := foldMapMul l basisImages h
let rImage := foldMapMul r basisImages h
lImage * rImage
| AddTree.negLeaf i => (basisImages.get (Fin.ofNat' i h))⁻¹
| AddTree.subNode l r =>
let lImage := foldMapMul l basisImages h
let rImage := foldMapMul r basisImages h
lImage / rImage
@[simp, reducible] def AddTree.indexTree₀ {α : Type _}[Repr α][DecidableEq α](t: AddTree α) :
IndexAddTree × {a : Array α // a.size > 0} :=
let indT := (t.indexTree #[]).fst
let arr := (t.indexTree #[]).snd
let p : arr.size > 0 := by apply pos_size
(indT, ⟨arr, p⟩)
elab "indexTree#" t:term : term => do
let e ← elabTerm t none
let t ← treeM e
let res ← mkAppM ``AddTree.indexTree₀ #[t]
reduce res
def listToExpr (lst: List Expr) : MetaM (Expr × Expr) := do
let α ← inferType lst.head!
let nilList ← mkAppOptM ``List.nil #[some α]
return (← lst.foldrM (fun l i => do
mkAppOptM ``List.cons #[some α, some l, some i]) nilList,
α)
def AddTree.indexTreeM' (t : AddTree Expr) : TermElabM Expr :=
do
let res ← AddTree.indexTreeM t (mkConst ``Unit.unit)
let res ← reduce res
let (tree, lstIn) := (← split? res).get!
let lst ← unpack lstIn
let lstExprPair ← listToExpr lst
let (lstExpr, _) := lstExprPair
mkAppM ``Prod.mk #[tree, lstExpr]
def AddTree.indexTreeM'' (t : AddTree Expr) :
TermElabM <| Expr × (List Expr) :=
do
let res ← AddTree.indexTreeM t (mkConst ``Unit.unit)
let res ← reduce res
let (tree, lstIn) := (← split? res).get!
let lst ← unpack lstIn
return (tree, lst)
elab "roundtrip#" t:term : term => do
let e ← elabTerm t none
let t ← addTreeM e
let (indTree, lst) ← AddTree.indexTreeM'' t
let arr := lst.toArray
IndexAddTree.foldMapM indTree arr
elab "indTree#" t:term : term => do
let e ← elabTerm t none
let t ← addTreeM e
AddTree.indexTreeM' t
@[simp] noncomputable def egInd {α : Type _}[AddCommGroup α][Repr α][DecidableEq α] (x y: α) :=
indexTree# (x + y + x - y)
#print egInd
@[simp] def egInd' {α : Type _}[AddCommGroup α][Repr α][DecidableEq α][Inhabited α] (x y: α) :=
indTree# (x + y + x - y)
#check @egInd'
#print egInd'
@[simp] def egInd'' {α : Type _}[ag : AddCommGroup α][r : Repr α][de : DecidableEq α][w : Inhabited α] (x y: α) : IndexAddTree × (List α) := @egInd' α ag r de w x y
#print egInd''
@[simp] noncomputable def egIndMap {α : Type _}[AddCommGroup α][Repr α][DecidableEq α]
(x y: α) : α :=
(egInd x y).fst.foldMap (egInd x y).snd.val (egInd x y).snd.property
#print egIndMap
theorem egIndMapInv{α : Type _}[AddCommGroup α][Repr α][DecidableEq α]
(x y: α) : egIndMap x y = x + y + x - y := by
simp
admit
@[simp] def egIndMap'' {α : Type _}[AddCommGroup α][Repr α][DecidableEq α][Inhabited α]
(x y: α) : α :=
let (tree, l) := egInd'' x y
let arr := l.toArray
tree.foldMap! arr
theorem egIndMapInv''{α : Type _}[AddCommGroup α][Repr α][DecidableEq α][Inhabited α]
(x y: α) : egIndMap'' x y = x + y + x - y := by
simp
admit
theorem egRoundtrip{α : Type _}[AddCommGroup α][Repr α][DecidableEq α][Inhabited α]
(x y : α) : x + y + x - y = roundtrip# (x + y + x - y) := by rfl
def ℤnType : Nat → MetaM Expr
| 0 => return mkConst ``Unit
| n + 1 => do mkAppM ``Prod #[mkConst ``Int, ← ℤnType n]
#eval (ℤnType 2)
|
[STATEMENT]
lemma add_poly_p_add_mset_comb:
\<open>add_poly_p\<^sup>*\<^sup>* (A, Aa, {#}) ({#}, {#}, r) \<Longrightarrow>
add_poly_p\<^sup>*\<^sup>*
(add_mset (xs, n) A, Aa, {#})
({#}, {#}, add_mset (xs, n) r)\<close>
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. add_poly_p\<^sup>*\<^sup>* (A, Aa, {#}) ({#}, {#}, r) \<Longrightarrow> add_poly_p\<^sup>*\<^sup>* (add_mset (xs, n) A, Aa, {#}) ({#}, {#}, add_mset (xs, n) r)
[PROOF STEP]
apply (rule converse_rtranclp_into_rtranclp)
[PROOF STATE]
proof (prove)
goal (2 subgoals):
1. add_poly_p\<^sup>*\<^sup>* (A, Aa, {#}) ({#}, {#}, r) \<Longrightarrow> add_poly_p (add_mset (xs, n) A, Aa, {#}) ?b
2. add_poly_p\<^sup>*\<^sup>* (A, Aa, {#}) ({#}, {#}, r) \<Longrightarrow> add_poly_p\<^sup>*\<^sup>* ?b ({#}, {#}, add_mset (xs, n) r)
[PROOF STEP]
apply (rule add_poly_p.add_new_coeff_l)
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. add_poly_p\<^sup>*\<^sup>* (A, Aa, {#}) ({#}, {#}, r) \<Longrightarrow> add_poly_p\<^sup>*\<^sup>* (A, Aa, {#(xs, n)#}) ({#}, {#}, add_mset (xs, n) r)
[PROOF STEP]
using add_poly_p_add_to_result[of A Aa \<open>{#}\<close> \<open>{#}\<close> \<open>{#}\<close> r \<open>{#(xs, n)#}\<close>]
[PROOF STATE]
proof (prove)
using this:
add_poly_p\<^sup>*\<^sup>* (A, Aa, {#}) ({#}, {#}, r) \<Longrightarrow> add_poly_p\<^sup>*\<^sup>* (A, Aa, {#(xs, n)#} + {#}) ({#}, {#}, {#(xs, n)#} + r)
goal (1 subgoal):
1. add_poly_p\<^sup>*\<^sup>* (A, Aa, {#}) ({#}, {#}, r) \<Longrightarrow> add_poly_p\<^sup>*\<^sup>* (A, Aa, {#(xs, n)#}) ({#}, {#}, add_mset (xs, n) r)
[PROOF STEP]
by auto |
lemma measure_Un3: assumes "A \<in> fmeasurable M" "B \<in> fmeasurable M" shows "measure M (A \<union> B) = measure M A + measure M B - measure M (A \<inter> B)" |
import numpy as np
import matplotlib.pyplot as plt
from astropy import units as u
from astropy.coordinates import SkyCoord
from astroquery.mast import Tesscut
def get_data(ra, dec, units="deg", size=64):
c = SkyCoord(ra, dec, units=units)
data_table = Tesscut.download_cutouts(c, size=size)
return data_table
def plot_lightcurves(cpm):
fig, axs = plt.subplots(2, 1, figsize=(18, 12))
data = cpm.target_fluxes
model = cpm.lsq_prediction
res = data - cpm.cpm_prediction
axs[0].plot(cpm.time, data, ".", color="k", label="Data", markersize=6)
axs[0].plot(
cpm.time, model, ".", color="C3", label="Model", markersize=4, alpha=0.6
)
if cpm.poly_params.shape != (0,):
axs[0].plot(
cpm.time,
cpm.cpm_prediction,
".",
color="C1",
label="CPM",
markersize=4,
alpha=0.4,
)
axs[0].plot(
cpm.time,
cpm.poly_prediction,
"-",
color="C2",
label="Poly",
markersize=4,
alpha=0.7,
)
axs[1].plot(cpm.time, res, ".-", label="Residual (Data - CPM)", markersize=3)
for i in range(2):
axs[i].legend(fontsize=15)
plt.show()
def summary_plot(cpm, n=20, subtract_polynomials=False, save=False):
"""Shows a summary plot of a CPM fit to a pixel.
The top row consists of three images: (left) image showing median values for each pixel,
(middle) same image but the target pixel (the pixel we are trying to predict the lightcurve for),
the exclusion region (area where predictor pixels CANNOT be chosen from), and the predictor pixels are shown,
(right) same image but now the top-``n`` contributing pixels of the CPM are shown. The contribution is
defined as the absolute value of the coefficients of each predictor pixel. For example, the top 5 contributing pixels
are the pixels which have the highest absolute coefficient values calculated when ``lsq`` is run.
"""
top_n_loc, top_n_mask = cpm.get_contributing_pixels(n)
plt.figure(figsize=(18, 14))
ax1 = plt.subplot2grid((4, 3), (0, 0), rowspan=2)
ax2 = plt.subplot2grid((4, 3), (0, 1), rowspan=2)
ax3 = plt.subplot2grid((4, 3), (0, 2), rowspan=2)
ax4 = plt.subplot2grid((4, 3), (2, 0), colspan=3)
ax5 = plt.subplot2grid((4, 3), (3, 0), colspan=3)
first_image = cpm.pixel_medians
# first_image = cpm.im_fluxes[0,:,:]
ax1.imshow(
first_image,
origin="lower",
vmin=np.nanpercentile(first_image, 10),
vmax=np.nanpercentile(first_image, 90),
)
ax2.imshow(
first_image,
origin="lower",
vmin=np.nanpercentile(first_image, 10),
vmax=np.nanpercentile(first_image, 90),
)
ax2.imshow(cpm.excluded_pixels_mask, origin="lower", cmap="Set1", alpha=0.5)
ax2.imshow(cpm.target_pixel_mask, origin="lower", cmap="binary", alpha=1.0)
ax2.imshow(cpm.predictor_pixels_mask, origin="lower", cmap="binary_r", alpha=0.9)
ax3.imshow(
first_image,
origin="lower",
vmin=np.nanpercentile(first_image, 10),
vmax=np.nanpercentile(first_image, 90),
)
ax3.imshow(cpm.excluded_pixels_mask, origin="lower", cmap="Set1", alpha=0.5)
ax3.imshow(cpm.target_pixel_mask, origin="lower", cmap="binary", alpha=1.0)
ax3.imshow(cpm.predictor_pixels_mask, origin="lower", cmap="binary_r", alpha=0.9)
ax3.imshow(top_n_mask, origin="lower", cmap="Set1")
data = cpm.rescaled_target_fluxes
model = cpm.lsq_prediction
if (subtract_polynomials == True) or (
(subtract_polynomials == False) & (cpm.cpm_prediction is None)
):
res = data - model
plot_label = "Residual (Data - Model)"
elif (subtract_polynomials == False) & (cpm.cpm_prediction is not None):
res = data - (cpm.cpm_prediction + cpm.const_prediction)
plot_label = "Residual (Data - CPM)"
ax4.plot(cpm.time, data, ".", color="k", label="Data", markersize=4)
ax4.plot(cpm.time, model, ".", color="C3", label="Model", markersize=4, alpha=0.4)
if cpm.valid is not None:
ax4.plot(
cpm.time[~cpm.valid], data[~cpm.valid], "x", color="gray", label="Clipped"
)
if cpm.cpm_prediction is not None:
ax4.plot(
cpm.time,
cpm.cpm_prediction,
".",
color="C1",
label="CPM",
markersize=3,
alpha=0.4,
)
ax4.plot(
cpm.time,
cpm.poly_prediction,
".",
color="C2",
label="Poly",
markersize=3,
alpha=0.4,
)
ax5.plot(cpm.time, res, ".-", label=plot_label, markersize=7)
for dump in cpm.dump_times:
ax5.axvline(dump, color="red", alpha=0.5)
plt.suptitle(
"N={} Predictor Pixels, Method: {}, L2Reg={}".format(
cpm.num_predictor_pixels,
cpm.method_predictor_pixels,
"{:.0e}".format(cpm.cpm_regularization),
),
y=0.89,
fontsize=15,
)
ax1.set_title("Cmap set to 10%, 90% values of image")
ax2.set_title("Target (White), Excluded (Red Shade), Predictors (Black)")
ax3.set_title("Top N={} contributing pixels to prediction (Red)".format(n))
ax4.set_title(
"Lightcurves of Target Pixel {}".format((cpm.target_row, cpm.target_col))
)
# ax4.set_ylabel("Flux [e-/s]")
ax4.set_ylabel("Relative Flux")
ax4.legend(fontsize=12)
ax5.set_xlabel("Time (BJD-2457000) [Day]", fontsize=12)
# ax5.set_ylabel("Residual Flux [e-/s]")
ax5.set_ylabel("Fractional Relative Fux")
ax5.legend(fontsize=12)
if save == True:
plt.savefig(
"cpm_target_{}_reg_{}_filename_{}.png".format(
(cpm.target_row, cpm.target_col),
"{:.0e}".format(cpm.cpm_regularization),
cpm.file_name,
),
dpi=200,
)
def stitch_sectors(t1, t2, lc1, lc2, points=50):
offset = np.ones((points, 1))
offset
m = np.block([
[t1[-points:].reshape(-1, 1), offset, np.zeros((points, 1))],
[t2[:points].reshape(-1, 1), np.zeros((points, 1)), offset]
])
y = np.concatenate((lc1[-points:], lc2[:points]))
a = np.dot(m.T, m)
b = np.dot(m.T, y)
params = np.linalg.solve(a, b)
time = np.concatenate((t1, t2))
diff = np.abs(params[-2] - params[-1]) + params[0]*(time[int(points/2) + 1] - time[int(points/2)])
return (diff, time, np.concatenate((lc1, lc2+diff)))
# Maybe this function should be a method for the Source class.
# def get_outliers(lc, window=50, sigma=5, sigma_upper=None, sigma_lower=None):
# if sigma_upper is None:
# sigma_upper = sigma
# if sigma_lower is None:
# sigma_lower = sigma
# median_lc = median_filter(lc, size=window)
# median_subtracted_lc = lc - median_lc
# outliers = np.full(lc.shape, False)
# while True:
# std = np.std(median_subtracted_lc)
# clipped_upper = median_subtracted_lc > sigma_upper*std
# clipped_lower = median_subtracted_lc < -sigma_lower*std
# out = clipped_upper + clipped_lower
# if np.sum(out) == np.sum(outliers):
# break
# outliers += out
# return outliers
# from IPython.display import HTML
# import matplotlib.animation as animation
# fig, axes = plt.subplots(1, 1, figsize=(12, 12))
# ims = []
# for i in range(0, lc_matrix.shape[0], 3):
# im1 = axes.imshow(lc_matrix[i], animated=True,
# vmin=np.percentile(lc_matrix, 0), vmax=np.percentile(lc_matrix, 100))
# ims.append([im1]);
# fig.colorbar(im1, ax=axes, fraction=0.046)
# # fig.colorbar(im2, ax=axes[1], fraction=0.046)
# # fig.colorbar(im3, ax=axes[2], fraction=0.046)
# ani = animation.ArtistAnimation(fig, ims, interval=50, blit=True,
# repeat_delay=1000);
# HTML(ani.to_jshtml()) |
foo() # Error: U0
traceback()
|
/-
Copyright (c) 2020 Yury Kudryashov. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Yury Kudryashov
-/
import algebra.group_power.basic
import logic.function.iterate
import group_theory.perm.basic
import group_theory.group_action.opposite
/-!
# Iterates of monoid and ring homomorphisms
Iterate of a monoid/ring homomorphism is a monoid/ring homomorphism but it has a wrong type, so Lean
can't apply lemmas like `monoid_hom.map_one` to `f^[n] 1`. Though it is possible to define
a monoid structure on the endomorphisms, quite often we do not want to convert from
`M →* M` to `monoid.End M` and from `f^[n]` to `f^n` just to apply a simple lemma.
So, we restate standard `*_hom.map_*` lemmas under names `*_hom.iterate_map_*`.
We also prove formulas for iterates of add/mul left/right.
## Tags
homomorphism, iterate
-/
open function
variables {M : Type*} {N : Type*} {G : Type*} {H : Type*}
/-- An auxiliary lemma that can be used to prove `⇑(f ^ n) = (⇑f^[n])`. -/
lemma hom_coe_pow {F : Type*} [monoid F] (c : F → M → M) (h1 : c 1 = id)
(hmul : ∀ f g, c (f * g) = c f ∘ c g) (f : F) : ∀ n, c (f ^ n) = (c f^[n])
| 0 := by { rw [pow_zero, h1], refl }
| (n + 1) := by rw [pow_succ, iterate_succ', hmul, hom_coe_pow]
namespace monoid_hom
section
variables [mul_one_class M] [mul_one_class N]
@[simp, to_additive]
theorem iterate_map_one (f : M →* M) (n : ℕ) : f^[n] 1 = 1 :=
iterate_fixed f.map_one n
@[simp, to_additive]
theorem iterate_map_mul (f : M →* M) (n : ℕ) (x y) :
f^[n] (x * y) = (f^[n] x) * (f^[n] y) :=
semiconj₂.iterate f.map_mul n x y
end
variables [monoid M] [monoid N] [group G] [group H]
@[simp, to_additive]
theorem iterate_map_inv (f : G →* G) (n : ℕ) (x) :
f^[n] (x⁻¹) = (f^[n] x)⁻¹ :=
commute.iterate_left f.map_inv n x
@[simp, to_additive]
theorem iterate_map_div (f : G →* G) (n : ℕ) (x y) :
f^[n] (x / y) = (f^[n] x) / (f^[n] y) :=
semiconj₂.iterate f.map_div n x y
theorem iterate_map_pow (f : M →* M) (n : ℕ) (a) (m : ℕ) : f^[n] (a^m) = (f^[n] a)^m :=
commute.iterate_left (λ x, f.map_pow x m) n a
theorem iterate_map_zpow (f : G →* G) (n : ℕ) (a) (m : ℤ) : f^[n] (a^m) = (f^[n] a)^m :=
commute.iterate_left (λ x, f.map_zpow x m) n a
end monoid_hom
lemma monoid.End.coe_pow {M} [monoid M] (f : monoid.End M) (n : ℕ) : ⇑(f^n) = (f^[n]) :=
hom_coe_pow _ rfl (λ f g, rfl) _ _
-- we define these manually so that we can pick a better argument order
namespace add_monoid_hom
variables [add_monoid M] [add_group G]
theorem iterate_map_smul (f : M →+ M) (n m : ℕ) (x : M) :
f^[n] (m • x) = m • (f^[n] x) :=
f.to_multiplicative.iterate_map_pow n x m
attribute [to_additive, to_additive_reorder 5] monoid_hom.iterate_map_pow
theorem iterate_map_zsmul (f : G →+ G) (n : ℕ) (m : ℤ) (x : G) :
f^[n] (m • x) = m • (f^[n] x) :=
f.to_multiplicative.iterate_map_zpow n x m
attribute [to_additive, to_additive_reorder 5] monoid_hom.iterate_map_zpow
end add_monoid_hom
lemma add_monoid.End.coe_pow {A} [add_monoid A] (f : add_monoid.End A) (n : ℕ) : ⇑(f^n) = (f^[n]) :=
hom_coe_pow _ rfl (λ f g, rfl) _ _
namespace ring_hom
section semiring
variables {R : Type*} [semiring R] (f : R →+* R) (n : ℕ) (x y : R)
lemma coe_pow (n : ℕ) : ⇑(f^n) = (f^[n]) :=
hom_coe_pow _ rfl (λ f g, rfl) f n
theorem iterate_map_one : f^[n] 1 = 1 := f.to_monoid_hom.iterate_map_one n
theorem iterate_map_zero : f^[n] 0 = 0 := f.to_add_monoid_hom.iterate_map_zero n
theorem iterate_map_add : f^[n] (x + y) = (f^[n] x) + (f^[n] y) :=
f.to_add_monoid_hom.iterate_map_add n x y
theorem iterate_map_mul : f^[n] (x * y) = (f^[n] x) * (f^[n] y) :=
f.to_monoid_hom.iterate_map_mul n x y
theorem iterate_map_pow (a) (n m : ℕ) : f^[n] (a^m) = (f^[n] a)^m :=
f.to_monoid_hom.iterate_map_pow n a m
theorem iterate_map_smul (n m : ℕ) (x : R) :
f^[n] (m • x) = m • (f^[n] x) :=
f.to_add_monoid_hom.iterate_map_smul n m x
end semiring
variables {R : Type*} [ring R] (f : R →+* R) (n : ℕ) (x y : R)
theorem iterate_map_sub : f^[n] (x - y) = (f^[n] x) - (f^[n] y) :=
f.to_add_monoid_hom.iterate_map_sub n x y
theorem iterate_map_neg : f^[n] (-x) = -(f^[n] x) :=
f.to_add_monoid_hom.iterate_map_neg n x
theorem iterate_map_zsmul (n : ℕ) (m : ℤ) (x : R) :
f^[n] (m • x) = m • (f^[n] x) :=
f.to_add_monoid_hom.iterate_map_zsmul n m x
end ring_hom
lemma equiv.perm.coe_pow {α : Type*} (f : equiv.perm α) (n : ℕ) : ⇑(f ^ n) = (f^[n]) :=
hom_coe_pow _ rfl (λ _ _, rfl) _ _
--what should be the namespace for this section?
section monoid
variables [monoid G] (a : G) (n : ℕ)
@[simp, to_additive] lemma smul_iterate [mul_action G H] :
((•) a : H → H)^[n] = (•) (a^n) :=
funext (λ b, nat.rec_on n (by rw [iterate_zero, id.def, pow_zero, one_smul])
(λ n ih, by rw [iterate_succ', comp_app, ih, pow_succ, mul_smul]))
@[simp, to_additive] lemma mul_left_iterate : ((*) a)^[n] = (*) (a^n) :=
smul_iterate a n
@[simp, to_additive] lemma mul_right_iterate : (* a)^[n] = (* a ^ n) :=
smul_iterate (mul_opposite.op a) n
@[to_additive]
lemma mul_right_iterate_apply_one : (* a)^[n] 1 = a ^ n :=
by simp [mul_right_iterate]
end monoid
section semigroup
variables [semigroup G] {a b c : G}
@[to_additive]
lemma semiconj_by.function_semiconj_mul_left (h : semiconj_by a b c) :
function.semiconj ((*)a) ((*)b) ((*)c) :=
λ j, by rw [← mul_assoc, h.eq, mul_assoc]
@[to_additive]
lemma commute.function_commute_mul_left (h : commute a b) :
function.commute ((*)a) ((*)b) :=
semiconj_by.function_semiconj_mul_left h
@[to_additive]
lemma semiconj_by.function_semiconj_mul_right_swap (h : semiconj_by a b c) :
function.semiconj (*a) (*c) (*b) :=
λ j, by simp_rw [mul_assoc, ← h.eq]
@[to_additive]
lemma commute.function_commute_mul_right (h : commute a b) :
function.commute (*a) (*b) :=
semiconj_by.function_semiconj_mul_right_swap h
end semigroup
|
Two weeks prior to the NHL trade deadline , Columbus announced that unlike earlier in the season , they would listen to trade proposals involving Rick Nash , though they were not actively shopping him . Howson stated that the team was open to all options for improving the team , including trading Nash . Speculation was that in return for Nash the Blue Jackets would ask for a " combination of young , proven players , high @-@ end prospects and draft picks . " Leading up to the trade deadline , the Blue Jackets dealt Antoine <unk> to the Phoenix Coyotes for two draft picks and goaltender Curtis McElhinney . Despite being injured at the time , the acquisition of McElhinney was believed to give Columbus the flexibility to trade Curtis Sanford . The following day , on February 23 , Columbus traded Jeff Carter to the Kings . In the deal , Columbus acquired defenseman Jack Johnson and a first @-@ round draft pick ; the team was given the choice of taking the pick in either 2012 or 2013 . At the deadline , Columbus was unable to come to terms on a deal involving Nash , but they did make one more move ; they sent center Samuel Pahlsson to the Vancouver Canucks in exchange for two fourth @-@ round draft picks and minor league defenseman Taylor Ellington . Following the trade deadline , Howson announced that the team had attempted to trade Nash at the player 's request . Nash stated that he had requested the trade after being informed that the franchise was going into another rebuilding phase . He further noted that he felt that he " could be a huge part of that towards bringing assets in , " and in his view " it was the best thing for the team , the organization , and personally for [ his ] career . " After the personnel changes , the Blue Jackets closed out the month with a three @-@ game losing streak .
|
The $\sigma$-algebra generated by a set $M$ is equal to $M$. |
lemma zero_less_norm_iff [simp]: "norm x > 0 \<longleftrightarrow> x \<noteq> 0" |
#pragma once
#define OEMRESOURCE
#include <atomic>
#include <concepts>
#include <ppl.h>
#include <gsl/gsl>
// Included before windows.h, because pfc.h includes winsock2.h
#include "../pfc/pfc.h"
#include <windows.h>
#include <SHLWAPI.H>
#include "../foobar2000/SDK/foobar2000.h"
#include "../mmh/stdafx.h"
#include "../ui_helpers/stdafx.h"
#include "config_var.h"
#include "config_object.h"
#include "console.h"
#include "fcl.h"
#include "info_box.h"
#include "initquit.h"
#include "library.h"
#include "low_level_hook.h"
#include "main_thread_callback.h"
#include "sort.h"
#include "stream.h"
|
module Colist where
open import Size using (Size ; Size<_)
data Colist (i : Size) {a} (A : Set a) : Set a
record ∞Colist (i : Size) {a} (A : Set a) : Set a where
coinductive
constructor delay_
field force : ∀ {j : Size< i} → Colist j A
data Colist (i : Size) {a} (A : Set a) where
[] : Colist i A
_∷_ : (x : A) (xs : ∞Colist i A) → Colist i A
|
PROGRAM PRINTRANDOM
INTEGER N
INTEGER SEED
REAL RANDX
SEED = 128
RANDX = 0
N = 0
10 CALL RANDOM(SEED, RANDX)
PRINT *, RANDX
N = N + 1
IF (N.LT.5) GOTO 10
END
* --------------------------------------------
* Psuedo random number generator
* --------------------------------------------
SUBROUTINE RANDOM(SEED, RANDX)
INTEGER SEED
REAL RANDX
SEED = 2045*SEED + 1
SEED = SEED - (SEED/1048576)*1048576
RANDX = REAL(SEED + 1)/1048577.0
RETURN
END
* --------------------------------------------
|
pacman::p_load(foreign)
#https://www.rdocumentation.org/packages/datasets/versions/3.6.2/topics/mtcars
#opis pod powy?szym linkiem
# #sprawdzenie corelacji
# pacman::p_load(corrplot)
# cars.cor <- sapply(cars,function(x) as.numeric(x))
# cor_matrix=cor(cars.cor)
# corrplot(cor_matrix, method = number)
# wczytaj_mtcars <- function(){
# cars = mtcars
# # summary(cars)
#
#
# cars$vs = factor(cars$vs ,labels = c("V", "S"))
# cars$am = factor(cars$am , labels = c("automatic", "manual") )
# cars$gear = as.factor(cars$gear)
#
# cat("Wczytano mtcars i poprawiono dane")
# return(cars)
# }
rmse <- function(actual, predicted){
sqrt(mean((actual - predicted)^2))
}
wczytaj_mtcars <- function(){
cars = mtcars
cars = as.data.frame(sapply(cars, as.numeric))#zmiana na numeryczne i "ramke"
set.seed(123)
indexy <- sample(nrow(cars), size = trunc(0.25* nrow(cars)) )
train <- cars[indexy, ]
test <- cars[-indexy, ]
#wybieranie kolumn "ważnych"
tmp <- lm(qsec~1 , data = cars)
forward <- step(tmp, direction = "forward", scope = list(upper=.~.+mpg+cyl+hp+drat+wt+vs+am+gear+carb), trace=0)
summary(forward)$r.squared#0.8523863
tmp <- lm(qsec~mpg+cyl+hp+drat+wt+vs+am+gear+carb , data = cars)
backward <- step(tmp, direction = "backward", scope = list(upper=.~.+mpg+cyl+hp+drat+wt+vs+am+gear+carb), trace=0)
summary(backward)$r.squared#0.8453189
tmp <- lm(qsec~. , data = cars)
both <- step(tmp, direction = "both", scope = list(upper=.~.+mpg+cyl+hp+drat+wt+vs+am+gear+carb), trace=0)
summary(both)$r.squared #0.8642928
# cat("forward r.squared = ", summary(forward)$r.squared)
# cat("backward r.squared = ", summary(backward)$r.squared)
# cat("both r.squared = ", summary(both)$r.squared)
#
nazwyKolumn <- variable.names(both) #najlepszy both
nazwyKolumn[1] <- "qsec" #poprawka nazwy
return (list("columns" = nazwyKolumn, "train" = train, "test"=test))
}
svm_mtcars <- function(kolumny, dane, kernel = "radial"){
model = svm(qsec~., data=dane[,kolumny], kernel = kernel, type="nu-regression")
return (model)
} |
= = Reaction = =
|
lemma frontier_interior_subset: "frontier(interior S) \<subseteq> frontier S" |
[STATEMENT]
lemma above_not_increase:
"system \<in> -Highest i Int {s. j\<notin>above i s} co {s. j\<notin>above i s}"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. system \<in> - Highest i \<inter> {s. j \<notin> PriorityAux.above i s} co {s. j \<notin> PriorityAux.above i s}
[PROOF STEP]
apply (insert reach_lemma [of concl: j])
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. (\<And>k r q. derive k r q \<Longrightarrow> reach j q \<subseteq> reach j r \<union> {k}) \<Longrightarrow> system \<in> - Highest i \<inter> {s. j \<notin> PriorityAux.above i s} co {s. j \<notin> PriorityAux.above i s}
[PROOF STEP]
apply (simp add: system_def Component_def mk_total_program_def totalize_JN,
safety)
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<And>s x. \<lbrakk>\<And>k r. r\<inverse> `` {k} = {} \<Longrightarrow> (r - {(x, y). x = k \<or> y = k} \<inter> r \<union> ({(x, y). x = k \<or> y = k} \<inter> r)\<inverse>)\<^sup>+ `` {j} \<subseteq> insert k (r\<^sup>+ `` {j}); s\<inverse> `` {x} = {}; s\<inverse> `` {i} \<noteq> {}; (i, j) \<notin> (s\<inverse>)\<^sup>+; (i, j) \<in> ((s - {(xa, y). xa = x \<or> y = x} \<inter> s \<union> ({(xa, y). xa = x \<or> y = x} \<inter> s)\<inverse>)\<inverse>)\<^sup>+\<rbrakk> \<Longrightarrow> False
[PROOF STEP]
apply (simp add: trancl_converse, blast)
[PROOF STATE]
proof (prove)
goal:
No subgoals!
[PROOF STEP]
done |
Require Export Iron.Language.SystemF2Data.Exp.Base.
Require Export Iron.Language.SystemF2Data.Exp.Relation.WfX.
(********************************************************************)
(* Weak normal forms cannot be reduced further by
call-by-value evaluation. *)
Inductive wnfX : exp -> Prop :=
| Wnf_XVar
: forall i
, wnfX (XVar i)
| Wnf_XLAM
: forall x1
, wnfX (XLAM x1)
| Wnf_XLam
: forall t1 x2
, wnfX (XLam t1 x2)
| Wnf_XCon
: forall dc ts xs
, Forall wnfX xs
-> wnfX (XCon dc ts xs)
| Wnf_XLit
: forall l
, wnfX (XLit l).
Hint Constructors wnfX.
(********************************************************************)
(* Values are closed expressions that cannot be reduced further. *)
Inductive value : exp -> Prop :=
| Value
: forall xx
, wnfX xx -> closedX xx
-> value xx.
Hint Constructors value.
Lemma value_wnfX
: forall xx, value xx -> wnfX xx.
Proof. intros. inverts H. auto. Qed.
Hint Resolve value_wnfX.
Lemma value_closedX
: forall xx, value xx -> closedX xx.
Proof. intros. inverts H. auto. Qed.
Hint Resolve value_closedX.
Lemma value_wnfXs_XCon
: forall ts xs dc
, value (XCon dc ts xs) -> Forall wnfX xs.
Proof.
intros. inverts H. inverts H0. auto.
Qed.
Hint Resolve value_wnfXs_XCon.
Lemma value_closedXs_XCon
: forall ts xs dc
, value (XCon dc ts xs) -> Forall closedX xs.
Proof.
intros. inverts H. inverts H1. auto.
Qed.
Hint Resolve value_closedXs_XCon.
|
lemma coeff_degree_mult: "coeff (p * q) (degree (p * q)) = coeff q (degree q) * coeff p (degree p)" for p q :: "'a::{comm_semiring_0,semiring_no_zero_divisors} poly" |
setwd('/Users/Cam/Desktop/ds_practice/faa-laser-incident-reports')
getwd()
ls()
options(prompt='R> ')
options(continue = '\t')
laser_dat = read.csv('adjusted_laser_data.csv')
head(laser_dat)
?table
laser_dat$day_of_week = factor(laser_dat$day_of_week,c('Monday','Tuesday','Wednesday','Thursday','Friday','Saturday','Sunday'))
day_breakdown = table(laser_dat$day_of_week)
day_breakdown
barplot(day_breakdown,ylim=c(0,3000))
results = chisq.test(day_breakdown)
summary(results)
results
results$observed
results$expected
holiday_breakdown = table(laser_dat$holidays)
holiday_breakdown
barplot(holiday_breakdown) |
import numpy as np
import sirf.STIR as pet
from typing import Callable
def scaleCT(imageArray:np.array) -> np.array:
"""Scales a numpy array with HU/10000 to 511 keV attenuation correction coefficients.
Made to use on CT images converted directly with nm_mrac2mu.
Parameters
----------
imageArray : np.array
np.array in units HU/10000
Returns
-------
np.array
imageArray containing linear attenuation coefficients at 511 KeV.
References:
Burger, C., Goerres, G., Schoenes, S. et al. PET attenuation
coefficients from CT images: experimental evaluation of the
transformation of CT into PET 511-keV attenuation coefficients. Eur
J Nucl Med 29, 922–927 (2002).
https://doi.org/10.1007/s00259-002-0796-3
"""
imageArrayScaled = imageArray*1e4
muH20PET = 0.096 * 1e4
muBonePET = 0.172 * 1e4
muH20CT = 0.184 * 1e4
muBoneCT = 0.428 * 1e4
resImArr = np.where(
imageArrayScaled > 0,
muH20PET
+ imageArrayScaled
* (muH20CT / 1000)
* ((muBonePET - muH20PET) / (muBoneCT - muH20CT)),
muH20PET * (imageArrayScaled + 1000) / (1000),
)
return resImArr/1e4
def scaleImageData(inputImageData : pet.ImageData, scaler : Callable[[np.array], np.array]) -> pet.ImageData:
"""Scales imput sirf.STIR.ImageData using the a defined scaler function.
Parameters
----------
inputImageData : pet.ImageData
input pet.ImageData
scaler : callable[[np.array], np.array]
Any scaler function that takes np.array as input and output. Input and
output must be of the same size.
Returns
-------
pet.ImageData
Scaled pet.ImageData
"""
inputArray = inputImageData.as_array()
outputArray = scaler(inputArray)
outputImageData = inputImageData.copy()
outputImageData.fill(outputArray)
return outputImageData |
[STATEMENT]
lemma freshInputTransition:
fixes P :: pi
and a :: name
and u :: name
and P' :: pi
and c :: name
assumes "P \<longmapsto> a<u> \<prec> P'"
and "c \<sharp> P"
and "c \<noteq> u"
shows "c \<sharp> P'"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. c \<sharp> P'
[PROOF STEP]
using assms
[PROOF STATE]
proof (prove)
using this:
P \<longmapsto> a<u> \<prec> P'
c \<sharp> P
c \<noteq> u
goal (1 subgoal):
1. c \<sharp> P'
[PROOF STEP]
by(nominal_induct x2=="a<u> \<prec> P'" arbitrary: P' rule: TransitionsEarly.strong_induct)
(fastforce simp add: residual.inject name_fresh_abs fresh_fact1 fresh_fact2)+ |
# Extraction of a checklist
txt <- pdftools::pdf_text('./Week 2/3 - Wednesday/data extraction practical/cache/jimenez-uzcategui_2014')
txt <- txt[3:15]
txt <- lapply(txt, gsub, pattern = '\\(|\\)', replacement = '')
# extracting strings of interest
species_names <- lapply(stringi::stri_extract_all_regex(txt, '(?<=\r\n[0-9]{1,3}\\. ).+(?=\r\n)|(?<=^[0-9]{1,3}\\. ).+(?=\r\n)'),
gsub, pattern = ' [A-Z].*$|ined.', replacement = '')
status <- stringi::stri_extract_all_regex(txt, '(?<=\r\n {1,15}Origin: ).*(?=, [A-Za-z]*\\.\r\n)')
distribution <- stringi::stri_extract_all_regex(txt, '(?<= {1,15}Galapagos Distribution: ).*?(?=\\. *)')
# cleaning strings
species_names[[13]] <- NULL
distribution[[5]] <- append(distribution[[5]], 'Española, Floreana, Isabela, Marchena, Pinta, San Cristóbal, Santa Cruz, Santa Fé, Santiago', 1)
distribution[[11]] <- append(distribution[[11]], 'Fernandina, Floreana, Isabela, Marchena, Pinzón, San Cristóbal, Santa Cruz, Santiago', 2)
distribution[[12]] <- c(distribution[[12]], 'Española, Fernandina, Floreana, Isabela, Pinta, San Cristóbal, Santa Cruz, Santa Fé, Santiago')
ddata <- data.frame(species = unlist(species_names),
status = unlist(status),
distribution = unlist(distribution)
)
ddata[ddata$species %in% c('Aegialomys galapagoensis','Megaoryzomys curioi','Megaoryzomys sp. 1','Nesoryzomys darwini','Nesoryzomys indefessus','Nesoryzomys sp. 1','Nesoryzomys sp. 2','Nesoryzomys sp. 3'), "status"] <- 'NativeExtinct'
|
class OfNatSound (α : Type u) [Add α] [(n : Nat) → OfNat α n] : Prop where
ofNat_add (n m : Nat) : (OfNat.ofNat n : α) + OfNat.ofNat m = OfNat.ofNat (n+m)
export OfNatSound (ofNat_add)
theorem ex1 {α : Type u} [Add α] [(n : Nat) → OfNat α n] [OfNatSound α] : (10000000 : α) + 10000000 = 20000000 :=
ofNat_add ..
class Zero (α : Type u) where
zero : α
class One (α : Type u) where
one : α
instance [Zero α] : OfNat α (nat_lit 0) where
ofNat := Zero.zero
instance [One α] : OfNat α (nat_lit 1) where
ofNat := One.one
-- Some example structure
class S (α : Type u) extends Add α, Mul α, Zero α, One α where
add_assoc (a b c : α) : a + b + c = a + (b + c)
add_zero (a : α) : a + 0 = a
zero_add (a : α) : 0 + a = a
mul_zero (a : α) : a * 0 = 0
mul_one (a : α) : a * 1 = a
left_distrib (a b c : α) : a * (b + c) = a * b + a * c
-- Very simply default `ofNat` for `S`
protected def S.ofNat (α : Type u) [S α] : Nat → α
| 0 => 0
| n+1 => S.ofNat α n + 1
instance [S α] : OfNat α n where
ofNat := S.ofNat α n
instance [S α] : OfNatSound α where
ofNat_add n m := by
induction m with
| zero => simp [S.ofNat]; erw [S.add_zero]; done
| succ m ih => simp [OfNat.ofNat, S.ofNat] at *; erw [← ih]; rw [S.add_assoc]
theorem S.ofNat_mul [S α] (n m : Nat) : (OfNat.ofNat n : α) * OfNat.ofNat m = OfNat.ofNat (n * m) := by
induction m with
| zero => rw [S.mul_zero, Nat.mul_zero]
| succ m ih =>
show OfNat.ofNat (α := α) n * OfNat.ofNat (m + 1) = OfNat.ofNat (n * m.succ)
rw [Nat.mul_succ, ← ofNat_add, ← ofNat_add, ← ih, left_distrib]
simp [OfNat.ofNat, S.ofNat]
erw [S.zero_add, S.mul_one]
theorem ex2 [S α] : (100000000000000000 : α) * 20000000000000000 = 2000000000000000000000000000000000 :=
S.ofNat_mul ..
#print ex2
|
[STATEMENT]
lemma (in prob_space) exponential_distributed_expectation:
"0 < l \<Longrightarrow> distributed M lborel X (exponential_density l) \<Longrightarrow> expectation X = 1 / l"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<lbrakk>0 < l; distributed M lborel X (\<lambda>x. ennreal (exponential_density l x))\<rbrakk> \<Longrightarrow> expectation X = 1 / l
[PROOF STEP]
using erlang_ith_moment[of l X 0 1]
[PROOF STATE]
proof (prove)
using this:
\<lbrakk>0 < l; distributed M lborel X (\<lambda>x. ennreal (exponential_density l x))\<rbrakk> \<Longrightarrow> expectation (\<lambda>x. X x ^ 1) = fact (0 + 1) / (fact 0 * l ^ 1)
goal (1 subgoal):
1. \<lbrakk>0 < l; distributed M lborel X (\<lambda>x. ennreal (exponential_density l x))\<rbrakk> \<Longrightarrow> expectation X = 1 / l
[PROOF STEP]
by simp |
import matplotlib.pyplot as plt
from matplotlib import cm
import numpy as np
import cartopy
import cartopy.io.shapereader as shpreader
import cartopy.crs as ccrs
import pandas as pd
"""
Data Prep
"""
# === Read in demographics CSV ===
# This csv was from the SQL restapi_demographics_table
names = ['id', 'session_id', 'platform', 'gender', 'age', 'ethnicity',
'country', 'timestamp', 'qiraah']
demo_df = pd.read_csv('demographics.csv', delimiter=';', header=None, names=names)
# === Cleanup ===
# Make all genders same
demo_df['gender'] = demo_df['gender'].str.lower()
# Convert arabic to eng.
demo_df.loc[demo_df['gender'] == 'ذكر', 'gender'] = 'male'
# === Country Counts ===
demo_country_df = demo_df['ethnicity'].value_counts().to_frame()
# Add normalized column for colors
country_counts = demo_country_df.values
norm_values = country_counts / np.linalg.norm(country_counts)
demo_country_df['norm_value'] = norm_values
"""
Plot Prep
"""
# === Country DB ===
shpfilename = shpreader.natural_earth(resolution='110m',
category='cultural',
name='admin_0_countries')
reader = shpreader.Reader(shpfilename)
countries = reader.records()
# === Plotting ===
# Colors
cmap = cm.YlGn
# Prepare plot
fig, ax = plt.subplots(subplot_kw={'projection': ccrs.PlateCarree()})
ax.add_feature(cartopy.feature.BORDERS, linestyle='-', alpha=.5)
ax.set_extent([-150, 60, -25, 60])
# Iterate over all countries in df
# Country code attr.: 'WB_A2'
for country in countries:
country_code = country.attributes['WB_A2']
try:
country_count = demo_country_df.ethnicity[country_code]
country_color = cmap(demo_country_df.norm_value[country_code])
ax.add_geometries(country.geometry, ccrs.PlateCarree(),
facecolor=country_color,
label=country_code)
except KeyError:
ax.add_geometries(country.geometry, ccrs.PlateCarree(),
facecolor=cmap(0),
label=country_code)
# Colorbar
sm = cm.ScalarMappable(cmap=cmap)
sm._A = country_counts
cb = plt.colorbar(sm, ax=ax, orientation='horizontal', pad=0.05)
cb.ax.set_xlabel('Number of Users')
plt.title("Tarteel's Demographic Breakdown")
plt.show()
fig.savefig('tarteel_dmg.png')
|
@userplot GroupedBar
grouped_xy(x::AbstractVector, y::AbstractMatrix) = x, y
grouped_xy(y::AbstractMatrix) = 1:size(y,1), y
@recipe function f(g::GroupedBar)
x, y = grouped_xy(g.args...)
nr, nc = size(y)
isstack = pop!(d, :bar_position, :dodge) == :stack
# extract xnums and set default bar width.
# might need to set xticks as well
xnums = if eltype(x) <: Number
bar_width --> (0.8 * mean(diff(x)))
x
else
bar_width --> 0.8
ux = unique(x)
xnums = (1:length(ux)) - 0.5
xticks --> (xnums, ux)
xnums
end
@assert length(xnums) == nr
# compute the x centers. for dodge, make a matrix for each column
x = if isstack
x
else
bws = d[:bar_width] / nc
bar_width := bws
xmat = zeros(nr,nc)
for r=1:nr
bw = _cycle(bws, r)
farleft = xnums[r] - 0.5 * (bw * nc)
for c=1:nc
xmat[r,c] = farleft + 0.5bw + (c-1)*bw
end
end
xmat
end
# compute fillrange
fillrange := if isstack
# shift y/fillrange up
y = copy(y)
fr = zeros(nr, nc)
for c=2:nc
for r=1:nr
fr[r,c] = y[r,c-1]
y[r,c] += fr[r,c]
end
end
fr
else
get(d, :fillrange, nothing)
end
seriestype := :bar
x, y
end
|
[STATEMENT]
lemma inf_Inf: assumes nonempty: "A \<noteq> {}"
shows "inf x (Inf A) = Inf ((inf x) ` A)"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. inf x (Inf A) = Inf (inf x ` A)
[PROOF STEP]
using assms
[PROOF STATE]
proof (prove)
using this:
A \<noteq> {}
goal (1 subgoal):
1. inf x (Inf A) = Inf (inf x ` A)
[PROOF STEP]
by (auto simp add: INF_inf_const1 nonempty) |
Require StlcFix.SpecSyntax.
Require StlcIso.SpecSyntax.
Require Import BacktransFI.UValHelpers.
Require Import StlcFix.SpecTyping.
Require Import StlcFix.StlcOmega.
Require Import StlcFix.LemmasTyping.
Require Import StlcFix.SpecEvaluation.
Require Import StlcFix.LemmasEvaluation.
Require Import StlcIso.SpecEvaluation.
Require Import StlcIso.LemmasEvaluation.
Require Import LogRelFI.PseudoType.
Require Import LogRelFI.LemmasPseudoType.
Require Import LogRelFI.LR.
Require Import LogRelFI.LemmasLR.
Require Import LogRelFI.LemmasIntro.
Require Import LogRelFI.LemmasInversion.
Require Import Lia.
Require Import Db.Lemmas.
Require Import UValFI.UVal.
Require StlcIso.Fix.
Require Lia.
Definition uvalApp_pctx₁ n ts₂ τ τ' :=
F.papp₁
(F.papp₂
(F.abs (UValFI n (I.tarr τ τ'))
(F.abs (UValFI n τ)
(F.app (caseArrUp n (F.var 1) τ τ') (F.var 0))))
F.phole)
ts₂.
Definition uvalApp_pctxA₁ n ts₂ τ τ' :=
F.a_papp₁ (UValFI n τ) (UValFI n τ')
(F.a_papp₂ (UValFI n (I.tarr τ τ')) (F.tarr (UValFI n τ) (UValFI n τ'))
(F.a_abs (UValFI n (I.tarr τ τ')) (F.tarr (UValFI n τ) (UValFI n τ'))
(F.a_abs (UValFI n τ) (UValFI n τ')
(F.a_app (UValFI n τ) (UValFI n τ')
(caseArrUpA n (F.a_var 1) τ τ')
(F.a_var 0))))
F.a_phole)
ts₂.
Definition uvalApp_pctx₂ n ts₁ τ τ' :=
F.papp₂
(F.app
(F.abs (UValFI n (I.tarr τ τ'))
(F.abs (UValFI n τ)
(F.app
(caseArrUp n (F.var 1) τ τ')
(F.var 0))))
ts₁)
F.phole.
Definition uvalApp_pctxA₂ n ts₁ τ τ' :=
F.a_papp₂ (UValFI n τ) (UValFI n τ')
(F.a_app (UValFI n (I.tarr τ τ')) (F.tarr (UValFI n τ) (UValFI n τ'))
(F.a_abs (UValFI n (I.tarr τ τ')) (F.tarr (UValFI n τ) (UValFI n τ'))
(F.a_abs (UValFI n τ) (UValFI n τ')
(F.a_app (UValFI n τ) (UValFI n τ')
(caseArrUpA n (F.a_var 1) τ τ')
(F.a_var 0))))
ts₁)
F.a_phole.
Definition uvalApp n ts₁ ts₂ τ τ' :=
F.app (F.app (F.abs (UValFI n (I.tarr τ τ')) (F.abs (UValFI n τ) (F.app (caseArrUp n (F.var 1) τ τ') (F.var 0)))) ts₁) ts₂.
Definition uvalAppA n ts₁ ts₂ τ τ' :=
F.a_app (UValFI n τ) (UValFI n τ')
(F.a_app (UValFI n (I.tarr τ τ')) (F.tarr (UValFI n τ) (UValFI n τ'))
(F.a_abs (UValFI n (I.tarr τ τ')) (F.tarr (UValFI n τ) (UValFI n τ'))
(F.a_abs (UValFI n τ) (UValFI n τ')
(F.a_app (UValFI n τ) (UValFI n τ')
(caseArrUpA n (F.a_var 1) τ τ')
(F.a_var 0))))
ts₁)
ts₂.
(* Arguments uvalApp_pctx₁ n ts₂ : simpl never. *)
(* Arguments uvalApp_pctx₂ n ts₁ : simpl never. *)
(* Arguments uvalApp n ts₁ ts₂ : simpl never. *)
Lemma uvalApp_T {n ts₁ ts₂ Γ τ τ'} :
⟪ Γ ⊢ ts₁ : UValFI n (I.tarr τ τ') ⟫ →
⟪ Γ ⊢ ts₂ : UValFI n τ ⟫ →
⟪ Γ ⊢ uvalApp n ts₁ ts₂ τ τ' : UValFI n τ' ⟫.
Proof.
unfold uvalApp.
eauto with typing uval_typing.
Qed.
Lemma uvalApp_pctx₁_T {n ts₂ Γ τ τ'} :
⟪ Γ ⊢ ts₂ : UValFI n τ ⟫ →
⟪ ⊢ uvalApp_pctx₁ n ts₂ τ τ' : Γ , UValFI n (I.tarr τ τ') → Γ , UValFI n τ' ⟫.
Proof.
unfold uvalApp_pctx₁.
eauto with typing uval_typing.
Qed.
Lemma uvalApp_pctx₂_T {n ts₁ Γ τ τ'} :
⟪ Γ ⊢ ts₁ : UValFI n (I.tarr τ τ') ⟫ →
⟪ ⊢ uvalApp_pctx₂ n ts₁ τ τ' : Γ , UValFI n τ → Γ , UValFI n τ' ⟫.
Proof.
unfold uvalApp_pctx₂.
eauto with typing uval_typing.
Qed.
Lemma uvalAppA_T {n ts₁ ts₂ Γ τ τ'} :
⟪ Γ a⊢ ts₁ : UValFI n (I.tarr τ τ') ⟫ →
⟪ Γ a⊢ ts₂ : UValFI n τ ⟫ →
⟪ Γ a⊢ uvalAppA n ts₁ ts₂ τ τ' : UValFI n τ' ⟫.
Proof.
unfold uvalAppA.
eauto with typing uval_typing.
Qed.
Lemma uvalApp_pctxA₁_T {n ts₂ Γ τ τ'} :
⟪ Γ a⊢ ts₂ : UValFI n τ ⟫ →
⟪ a⊢ uvalApp_pctxA₁ n ts₂ τ τ' : Γ , UValFI n (I.tarr τ τ') → Γ , UValFI n τ' ⟫.
Proof.
unfold uvalApp_pctxA₁.
eauto with typing uval_typing.
Qed.
Lemma uvalApp_pctxA₂_T {n ts₁ Γ τ τ'} :
⟪ Γ a⊢ ts₁ : UValFI n (I.tarr τ τ') ⟫ →
⟪ a⊢ uvalApp_pctxA₂ n ts₁ τ τ' : Γ , UValFI n τ → Γ , UValFI n τ' ⟫.
Proof.
unfold uvalApp_pctxA₂.
eauto with typing uval_typing.
Qed.
Lemma eraseAnnot_uvalAppA {n ts₁ ts₂ τ τ'} :
eraseAnnot (uvalAppA n ts₁ ts₂ τ τ') = uvalApp n (eraseAnnot ts₁) (eraseAnnot ts₂) τ τ'.
Proof.
unfold uvalAppA, uvalApp.
cbn.
now rewrite ?eraseAnnot_caseArrUpA.
Qed.
Lemma eraseAnnot_pctx_uvalApp_pctxA₁ {n ts₂ τ τ'} :
eraseAnnot_pctx (uvalApp_pctxA₁ n ts₂ τ τ') = uvalApp_pctx₁ n (eraseAnnot ts₂) τ τ'.
Proof.
unfold uvalApp_pctxA₁, uvalApp_pctx₁.
cbn.
now rewrite ?eraseAnnot_caseArrUpA.
Qed.
Lemma eraseAnnot_pctx_uvalApp_pctxA₂ {n ts₁ τ τ'} :
eraseAnnot_pctx (uvalApp_pctxA₂ n ts₁ τ τ') = uvalApp_pctx₂ n (eraseAnnot ts₁) τ τ'.
Proof.
unfold uvalApp_pctxA₂, uvalApp_pctx₂.
cbn.
now rewrite ?eraseAnnot_caseArrUpA.
Qed.
#[export]
Hint Resolve uvalApp_T : uval_typing.
#[export]
Hint Resolve uvalApp_pctx₁_T : uval_typing.
#[export]
Hint Resolve uvalApp_pctx₂_T : uval_typing.
#[export]
Hint Resolve uvalAppA_T : uval_typing.
#[export]
Hint Resolve uvalApp_pctxA₁_T : uval_typing.
#[export]
Hint Resolve uvalApp_pctxA₂_T : uval_typing.
Local Ltac crush :=
repeat (repeat F.crushStlcSyntaxMatchH;
repeat I.crushStlcSyntaxMatchH;
repeat F.crushStlcEval;
repeat I.crushStlcEval;
(* repeat crushUtlcEvaluationMatchH; *)
(* repeat crushUtlcEvaluationMatchH2; *)
(* repeat crushUtlcEvaluationMatchH2; *)
(* repeat crushUtlcScopingMatchH; *)
repeat crushDbSyntaxMatchH;
repeat crushDbLemmasMatchH;
repeat crushDbLemmasRewriteH;
try assumption;
crushOfType;
trivial;
eauto using caseUnit_pctx_ectx, caseSum_pctx_ectx, caseArr_pctx_ectx, upgrade_value, downgrade_value with typing
).
Lemma uvalApp_sub {n ts₁ ts₂ τ τ' γ} :
(uvalApp n ts₁ ts₂ τ τ') [γ] = uvalApp n (ts₁[γ]) (ts₂[γ]) τ τ'.
Proof.
unfold uvalApp; cbn.
crush; rewrite caseArrUp_sub;
crush.
Qed.
Lemma termrel_uvalApp {d w n p ts₁ tu₁ ts₂ tu₂ τ τ'} :
ValidTy τ → ValidTy τ' →
dir_world_prec n w d p →
termrel d w (pEmulDV n p (I.tarr τ τ')) ts₁ tu₁ →
(∀ w' : World, w' ≤ w → termrel d w' (pEmulDV n p τ) ts₂ tu₂) →
termrel d w (pEmulDV n p τ') (uvalApp n ts₁ ts₂ τ τ') (I.app tu₁ tu₂).
Proof.
intros vτ vτ' dwp tr₁ tr₂.
unfold uvalApp, caseArrUp, caseArrUp_pctx.
(* evaluate ts₁ and tu₁ *)
eapply (termrel_ectx' tr₁); F.inferContext; I.inferContext.
unfold pctx_cat, I.ECtx; crush.
(* continuation boilerplate *)
intros w' futw vs₁ vu₁ vr₁.
destruct (valrel_implies_OfType vr₁) as [[vvs₁ ?] [? ?]].
simpl in H, H1.
cbn.
rewrite F.pctx_cat_app; cbn.
(* beta-reduce the outer let *)
eapply termrel_antired_eval_left.
eapply (F.eval_from_eval₀ (F.eval_beta vvs₁)); F.inferContext; crush.
cbn; crush.
(* bureaucracy *)
fold (caseArr n (F.app (upgrade n 1 (I.tarr τ τ')) (F.var 1)) τ τ').
rewrite caseArr_sub; cbn; crush; rewrite upgrade_sub.
(* evaluate ts₂ and tu₂ *)
specialize (tr₂ w' futw).
eapply (termrel_ectx' tr₂); F.inferContext; I.inferContext;
unfold pctx_cat, I.ECtx; crush.
(* continuation boilerplate *)
intros w'' futw' vs₂ vu₂ vr₂.
destruct (valrel_implies_Value vr₂) as [vvs₂ ?].
cbn.
(* beta-reduce the remaining let *)
eapply termrel_antired_eval_left.
eapply (F.eval_from_eval₀ (F.eval_beta vvs₂)); F.inferContext; crush.
cbn; crush.
(* bureaucracy *)
fold (caseArr n (F.app (upgrade n 1 (I.tarr τ τ')) (F.var 1)) τ τ').
rewrite ?caseArr_sub; cbn; crush; rewrite ?upgrade_sub.
rewrite <- ?ap_liftSub; rewrite -> ?liftSub_wkm; rewrite (apply_wkm_beta1_cancel vs₁ vs₂).
(* execute the upgrade *)
assert (w'' ≤ w) by lia.
simpl in H, H1.
assert (valrel d w'' (pEmulDV n p (I.tarr τ τ')) vs₁ vu₁) by eauto using valrel_mono.
assert (trupg : termrel d w'' (pEmulDV (n + 1) p (I.tarr τ τ')) (F.app (upgrade n 1 (I.tarr τ τ')) vs₁) vu₁)
by (eapply upgrade_works'; crushValidTy; now eapply (dwp_mono dwp)).
unfold caseArr.
eapply (termrel_ectx' trupg); F.inferContext; I.inferContext; cbn; crush.
(* continuation bureaucracy *)
intros w''' futw'' vs₁' vu₁' vr₁'.
replace (n + 1) with (S n) in vr₁' by lia.
destruct (valrel_implies_OfType vr₁') as [[_ _] [_ ?]].
simpl in H5.
destruct (valrel_implies_Value vr₁').
(* case analysis *)
eapply invert_valrel_pEmulDV_for_caseUValArr in vr₁'.
destruct vr₁' as [(vs₁'' & ? & es & vr₁')|(? & div)]; subst.
- (* Correct case *)
(* caseArr succeeds *)
eapply termrel_antired_star_left.
fold (caseArr n (F.inl vs₁'') τ τ').
eapply (F.evalstar_ctx' es); F.inferContext; crush.
cbn.
crush.
(* application works *)
eapply valrel_in_termrel in vr₁'.
eapply (termrel_app vr₁').
(* application argument is also fine *)
eauto using valrel_in_termrel, valrel_mono.
- (* unk case *)
eapply dwp_invert_imprecise in dwp; subst.
eapply termrel_div_lt.
fold (caseArr n vs₁' τ τ').
eapply (F.divergence_closed_under_evalcontext' div); F.inferContext; cbn; crush.
- split; trivial.
- simpl; trivial.
Qed.
Lemma uvalApp_pctx₁_app {n ts₁ ts₂ τ τ'} :
F.pctx_app ts₁ (uvalApp_pctx₁ n ts₂ τ τ') = uvalApp n ts₁ ts₂ τ τ'.
Proof.
crush.
Qed.
Lemma uvalApp_pctx₂_app {n ts₁ ts₂ τ τ'} :
F.pctx_app ts₂ (uvalApp_pctx₂ n ts₁ τ τ') = uvalApp n ts₁ ts₂ τ τ'.
Proof.
crush.
Qed.
Arguments uvalApp_pctx₁ n ts₂ : simpl never.
Arguments uvalApp_pctx₂ n ts₁ : simpl never.
Arguments uvalApp n ts₁ ts₂ : simpl never.
Arguments uvalApp_pctxA₁ n ts₂ : simpl never.
Arguments uvalApp_pctxA₂ n ts₁ : simpl never.
Arguments uvalAppA n ts₁ ts₂ : simpl never.
|
[STATEMENT]
lemma main_is_initial[simp]: "initialproc fg (main fg)"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. initialproc fg (main fg)
[PROOF STEP]
by (unfold initialproc_def) simp |
theory Ex009
imports Main
begin
lemma " \<not>C \<longrightarrow> A \<or> ((A \<or> C) \<longrightarrow> B)"
proof -
{
assume "\<not>C"
{
assume "\<not>(A \<or> ((A \<or> C) \<longrightarrow> B))"
{
assume "A \<or> C"
{
assume A
hence "A \<or> ((A \<or> C) \<longrightarrow> B)" by (rule disjI1)
with \<open>\<not>(A \<or> ((A \<or> C) \<longrightarrow> B))\<close> have False by contradiction
hence B by (rule FalseE)
}
{
assume C
with \<open>\<not>C\<close> have False by contradiction
hence B by (rule FalseE)
}
with \<open>A \<or> C\<close> and \<open>A \<Longrightarrow> B\<close> have B by (rule disjE)
}
hence "A \<or> C \<longrightarrow> B" by (rule impI)
hence "A \<or> (A \<or> C \<longrightarrow> B)" by (rule disjI2)
with \<open>\<not>(A \<or> ((A \<or> C) \<longrightarrow> B))\<close> have False by contradiction
}
hence "\<not>\<not>(A \<or> ((A \<or> C) \<longrightarrow> B))" by (rule notI)
hence "A \<or> ((A \<or> C) \<longrightarrow> B)" by (rule notnotD)
}
thus ?thesis by (rule impI)
qed
|
As you are probably aware, one of the big events of the year is our annual recital. Participation in the recital isn’t mandatory. Your child may get the benefit of learning a dance routine even if he or she doesn’t wish to perform. We encourage everyone to participate as part of our curriculum, but no one is obligated in any way to do so. Participation in our recital requires learning a routine, paying a recital fee and purchasing a costume. Recital fees pay for the auditorium and production costs, and enable you to invite as many people as you wish without having to sell tickets or ask guests to pay an admission fee. Recital fees are due October 31st, and will be posted in September. Recital fees are nonrefundable.
Costumes for the recital are ordered in January and must be paid for in full before they will be ordered. Recital costumes will be selected during class time in December. Costume fees are due January 12, 2017. Costume fees are nonrefundable.
It is important that your child attends class on a regular basis. Children get sick and conflicts sometimes arise, but if absenteeism is kept to a minimum, it will greatly enhance your child’s progress. Unnecessary absences can turn a positive experience into a stressful one.
You will be contacted by email if the weather causes us to cancel your child’s class. You may also call the studio to check on cancellations.
There is no eating or drinking in our lobby. Children who are in our lobby need to refrain from running, talking loudly and playing in the restrooms. |
/-
Copyright (c) 2022 Eric Wieser. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Eric Wieser
! This file was ported from Lean 3 source module data.fin.tuple.nat_antidiagonal
! leanprover-community/mathlib commit 98e83c3d541c77cdb7da20d79611a780ff8e7d90
! Please do not edit these lines, except to modify the commit id
! if you have ported upstream changes.
-/
import Mathlib.Algebra.BigOperators.Fin
import Mathlib.Data.Finset.NatAntidiagonal
import Mathlib.Data.Fin.VecNotation
import Mathlib.Logic.Equiv.Fin
/-!
# Collections of tuples of naturals with the same sum
This file generalizes `List.Nat.Antidiagonal n`, `Multiset.Nat.Antidiagonal n`, and
`Finset.Nat.Antidiagonal n` from the pair of elements `x : ℕ × ℕ` such that `n = x.1 + x.2`, to
the sequence of elements `x : Fin k → ℕ` such that `n = ∑ i, x i`.
## Main definitions
* `List.Nat.antidiagonalTuple`
* `Multiset.Nat.antidiagonalTuple`
* `Finset.Nat.antidiagonalTuple`
## Main results
* `antidiagonalTuple 2 n` is analogous to `antidiagonal n`:
* `List.Nat.antidiagonalTuple_two`
* `Multiset.Nat.antidiagonalTuple_two`
* `Finset.Nat.antidiagonalTuple_two`
## Implementation notes
While we could implement this by filtering `(Fintype.PiFinset $ λ _, range (n + 1))` or similar,
this implementation would be much slower.
In the future, we could consider generalizing `Finset.Nat.antidiagonalTuple` further to
support finitely-supported functions, as is done with `cut` in
`archive/100-theorems-list/45_partition.lean`.
-/
open BigOperators
/-! ### Lists -/
namespace List.Nat
/-- `List.antidiagonalTuple k n` is a list of all `k`-tuples which sum to `n`.
This list contains no duplicates (`List.Nat.nodup_antidiagonalTuple`), and is sorted
lexicographically (`List.Nat.antidiagonalTuple_pairwise_pi_lex`), starting with `![0, ..., n]`
and ending with `![n, ..., 0]`.
```
#eval antidiagonalTuple 3 2
-- [![0, 0, 2], ![0, 1, 1], ![0, 2, 0], ![1, 0, 1], ![1, 1, 0], ![2, 0, 0]]
```
-/
def antidiagonalTuple : ∀ k, ℕ → List (Fin k → ℕ)
| 0, 0 => [![]]
| 0, _ + 1 => []
| k + 1, n =>
(List.Nat.antidiagonal n).bind fun ni =>
(antidiagonalTuple k ni.2).map fun x => Fin.cons ni.1 x
#align list.nat.antidiagonal_tuple List.Nat.antidiagonalTuple
@[simp]
theorem antidiagonalTuple_zero_zero : antidiagonalTuple 0 0 = [![]] :=
rfl
#align list.nat.antidiagonal_tuple_zero_zero List.Nat.antidiagonalTuple_zero_zero
@[simp]
theorem antidiagonalTuple_zero_succ (n : ℕ) : antidiagonalTuple 0 n.succ = [] :=
rfl
#align list.nat.antidiagonal_tuple_zero_succ List.Nat.antidiagonalTuple_zero_succ
theorem mem_antidiagonalTuple {n : ℕ} {k : ℕ} {x : Fin k → ℕ} :
x ∈ antidiagonalTuple k n ↔ (∑ i, x i) = n := by
induction x using Fin.consInduction generalizing n with
| h0 =>
cases n
· simp
· simp [eq_comm]
| h x₀ x ih =>
simp_rw [Fin.sum_cons]
rw [antidiagonalTuple] -- porting note: simp_rw doesn't use the equation lemma properly
simp_rw [List.mem_bind, List.mem_map,
List.Nat.mem_antidiagonal, Fin.cons_eq_cons, exists_eq_right_right, ih,
@eq_comm _ _ (Prod.snd _), and_comm (a := Prod.snd _ = _),
←Prod.mk.inj_iff (a₁ := Prod.fst _), Prod.mk.eta, exists_eq_right]
#align list.nat.mem_antidiagonal_tuple List.Nat.mem_antidiagonalTuple
/-- The antidiagonal of `n` does not contain duplicate entries. -/
theorem nodup_antidiagonalTuple (k n : ℕ) : List.Nodup (antidiagonalTuple k n) := by
induction' k with k ih generalizing n
· cases n
· simp
· simp [eq_comm]
simp_rw [antidiagonalTuple, List.nodup_bind]
constructor
· intro i _
exact (ih i.snd).map (Fin.cons_right_injective (α := fun _ => ℕ) i.fst)
induction' n with n n_ih
· exact List.pairwise_singleton _ _
· rw [List.Nat.antidiagonal_succ]
refine' List.Pairwise.cons (fun a ha x hx₁ hx₂ => _) (n_ih.map _ fun a b h x hx₁ hx₂ => _)
· rw [List.mem_map] at hx₁ hx₂ ha
obtain ⟨⟨a, -, rfl⟩, ⟨x₁, -, rfl⟩, ⟨x₂, -, h⟩⟩ := ha, hx₁, hx₂
rw [Fin.cons_eq_cons] at h
injection h.1
· rw [List.mem_map] at hx₁ hx₂
obtain ⟨⟨x₁, hx₁, rfl⟩, ⟨x₂, hx₂, h₁₂⟩⟩ := hx₁, hx₂
dsimp at h₁₂
rw [Fin.cons_eq_cons, Nat.succ_inj'] at h₁₂
obtain ⟨h₁₂, rfl⟩ := h₁₂
rw [h₁₂] at h
exact h (List.mem_map_of_mem _ hx₁) (List.mem_map_of_mem _ hx₂)
#align list.nat.nodup_antidiagonal_tuple List.Nat.nodup_antidiagonalTuple
theorem antidiagonalTuple_zero_right : ∀ k, antidiagonalTuple k 0 = [0]
| 0 => (congr_arg fun x => [x]) <| Subsingleton.elim _ _
| k + 1 =>
by
rw [antidiagonalTuple, antidiagonal_zero, List.bind_singleton, antidiagonalTuple_zero_right k,
List.map_singleton]
exact congr_arg (fun x => [x]) Matrix.cons_zero_zero
#align list.nat.antidiagonal_tuple_zero_right List.Nat.antidiagonalTuple_zero_right
@[simp]
theorem antidiagonalTuple_one (n : ℕ) : antidiagonalTuple 1 n = [![n]] := by
simp_rw [antidiagonalTuple, antidiagonal, List.range_succ, List.map_append, List.map_singleton,
tsub_self, List.bind_append, List.bind_singleton, antidiagonalTuple_zero_zero,
List.map_singleton, List.map_bind]
conv_rhs => rw [← List.nil_append [![n]]]
congr 1
simp_rw [List.bind_eq_nil, List.mem_range, List.map_eq_nil]
intro x hx
obtain ⟨m, rfl⟩ := Nat.exists_eq_add_of_lt hx
rw [add_assoc, add_tsub_cancel_left, antidiagonalTuple_zero_succ]
#align list.nat.antidiagonal_tuple_one List.Nat.antidiagonalTuple_one
theorem antidiagonalTuple_two (n : ℕ) :
antidiagonalTuple 2 n = (antidiagonal n).map fun i => ![i.1, i.2] := by
rw [antidiagonalTuple]
simp_rw [antidiagonalTuple_one, List.map_singleton]
rw [List.map_eq_bind]
rfl
#align list.nat.antidiagonal_tuple_two List.Nat.antidiagonalTuple_two
theorem antidiagonalTuple_pairwise_pi_lex :
∀ k n, (antidiagonalTuple k n).Pairwise (Pi.Lex (· < ·) @fun _ => (· < ·))
| 0, 0 => List.pairwise_singleton _ _
| 0, _ + 1 => List.Pairwise.nil
| k + 1, n =>
by
simp_rw [antidiagonalTuple, List.pairwise_bind, List.pairwise_map, List.mem_map,
forall_exists_index, and_imp, forall_apply_eq_imp_iff₂]
simp only [mem_antidiagonal, Prod.forall, and_imp, forall_apply_eq_imp_iff₂]
simp only [Fin.pi_lex_lt_cons_cons, eq_self_iff_true, true_and_iff, lt_self_iff_false,
false_or_iff]
refine' ⟨fun _ _ _ => antidiagonalTuple_pairwise_pi_lex k _, _⟩
induction' n with n n_ih
· rw [antidiagonal_zero]
exact List.pairwise_singleton _ _
· rw [antidiagonal_succ, List.pairwise_cons, List.pairwise_map]
refine' ⟨fun p hp x hx y hy => _, _⟩
· rw [List.mem_map, Prod.exists] at hp
obtain ⟨a, b, _, rfl : (Nat.succ a, b) = p⟩ := hp
exact Or.inl (Nat.zero_lt_succ _)
dsimp
simp_rw [Nat.succ_inj', Nat.succ_lt_succ_iff]
exact n_ih
#align list.nat.antidiagonal_tuple_pairwise_pi_lex List.Nat.antidiagonalTuple_pairwise_pi_lex
end List.Nat
/-! ### Multisets -/
namespace Multiset.Nat
/-- `Multiset.Nat.antidiagonalTuple k n` is a multiset of `k`-tuples summing to `n` -/
def antidiagonalTuple (k n : ℕ) : Multiset (Fin k → ℕ) :=
List.Nat.antidiagonalTuple k n
#align multiset.nat.antidiagonal_tuple Multiset.Nat.antidiagonalTuple
@[simp]
theorem antidiagonalTuple_zero_zero : antidiagonalTuple 0 0 = {![]} :=
rfl
#align multiset.nat.antidiagonal_tuple_zero_zero Multiset.Nat.antidiagonalTuple_zero_zero
@[simp]
theorem antidiagonalTuple_zero_succ (n : ℕ) : antidiagonalTuple 0 n.succ = 0 :=
rfl
#align multiset.nat.antidiagonal_tuple_zero_succ Multiset.Nat.antidiagonalTuple_zero_succ
theorem mem_antidiagonalTuple {n : ℕ} {k : ℕ} {x : Fin k → ℕ} :
x ∈ antidiagonalTuple k n ↔ (∑ i, x i) = n :=
List.Nat.mem_antidiagonalTuple
#align multiset.nat.mem_antidiagonal_tuple Multiset.Nat.mem_antidiagonalTuple
theorem nodup_antidiagonalTuple (k n : ℕ) : (antidiagonalTuple k n).Nodup :=
List.Nat.nodup_antidiagonalTuple _ _
#align multiset.nat.nodup_antidiagonal_tuple Multiset.Nat.nodup_antidiagonalTuple
theorem antidiagonalTuple_zero_right (k : ℕ) : antidiagonalTuple k 0 = {0} :=
congr_arg _ (List.Nat.antidiagonalTuple_zero_right k)
#align multiset.nat.antidiagonal_tuple_zero_right Multiset.Nat.antidiagonalTuple_zero_right
@[simp]
theorem antidiagonalTuple_one (n : ℕ) : antidiagonalTuple 1 n = {![n]} :=
congr_arg _ (List.Nat.antidiagonalTuple_one n)
#align multiset.nat.antidiagonal_tuple_one Multiset.Nat.antidiagonalTuple_one
theorem antidiagonalTuple_two (n : ℕ) :
antidiagonalTuple 2 n = (antidiagonal n).map fun i => ![i.1, i.2] :=
congr_arg _ (List.Nat.antidiagonalTuple_two n)
#align multiset.nat.antidiagonal_tuple_two Multiset.Nat.antidiagonalTuple_two
end Multiset.Nat
/-! ### Finsets -/
namespace Finset.Nat
/-- `Finset.Nat.antidiagonalTuple k n` is a finset of `k`-tuples summing to `n` -/
def antidiagonalTuple (k n : ℕ) : Finset (Fin k → ℕ) :=
⟨Multiset.Nat.antidiagonalTuple k n, Multiset.Nat.nodup_antidiagonalTuple k n⟩
#align finset.nat.antidiagonal_tuple Finset.Nat.antidiagonalTuple
@[simp]
theorem antidiagonalTuple_zero_zero : antidiagonalTuple 0 0 = {![]} :=
rfl
#align finset.nat.antidiagonal_tuple_zero_zero Finset.Nat.antidiagonalTuple_zero_zero
@[simp]
theorem antidiagonalTuple_zero_succ (n : ℕ) : antidiagonalTuple 0 n.succ = ∅ :=
rfl
#align finset.nat.antidiagonal_tuple_zero_succ Finset.Nat.antidiagonalTuple_zero_succ
theorem mem_antidiagonalTuple {n : ℕ} {k : ℕ} {x : Fin k → ℕ} :
x ∈ antidiagonalTuple k n ↔ (∑ i, x i) = n :=
List.Nat.mem_antidiagonalTuple
#align finset.nat.mem_antidiagonal_tuple Finset.Nat.mem_antidiagonalTuple
theorem antidiagonalTuple_zero_right (k : ℕ) : antidiagonalTuple k 0 = {0} :=
Finset.eq_of_veq (Multiset.Nat.antidiagonalTuple_zero_right k)
#align finset.nat.antidiagonal_tuple_zero_right Finset.Nat.antidiagonalTuple_zero_right
@[simp]
theorem antidiagonalTuple_one (n : ℕ) : antidiagonalTuple 1 n = {![n]} :=
Finset.eq_of_veq (Multiset.Nat.antidiagonalTuple_one n)
#align finset.nat.antidiagonal_tuple_one Finset.Nat.antidiagonalTuple_one
theorem antidiagonalTuple_two (n : ℕ) :
antidiagonalTuple 2 n = (antidiagonal n).map (piFinTwoEquiv fun _ => ℕ).symm.toEmbedding :=
Finset.eq_of_veq (Multiset.Nat.antidiagonalTuple_two n)
#align finset.nat.antidiagonal_tuple_two Finset.Nat.antidiagonalTuple_two
section EquivProd
/-- The disjoint union of antidiagonal tuples `Σ n, antidiagonalTuple k n` is equivalent to the
`k`-tuple `Fin k → ℕ`. This is such an equivalence, obtained by mapping `(n, x)` to `x`.
This is the tuple version of `Finset.Nat.sigmaAntidiagonalEquivProd`. -/
@[simps]
def sigmaAntidiagonalTupleEquivTuple (k : ℕ) : (Σ n, antidiagonalTuple k n) ≃ (Fin k → ℕ)
where
toFun x := x.2
invFun x := ⟨∑ i, x i, x, mem_antidiagonalTuple.mpr rfl⟩
left_inv := fun ⟨_, _, h⟩ => Sigma.subtype_ext (mem_antidiagonalTuple.mp h) rfl
right_inv _ := rfl
#align finset.nat.sigma_antidiagonal_tuple_equiv_tuple Finset.Nat.sigmaAntidiagonalTupleEquivTuple
end EquivProd
end Finset.Nat
|
For every occasion, from the light to the creamy with chunky ingredients, we have got the perfect soup for you.
marked the beginning of the Hügli story and remain part of our core convenience range.
Hügli offers a selection of over 200 dessert options ranging from mousses to puddings that are quick and simple to prepare.
Hügli offers its customers an innovative Flavour System comprising base notes, profile flavours and top notes.
The next edition of PLMA’s annual “World of Private Label” International Trade Show is only a few weeks away. |
[STATEMENT]
theorem JF1set_simps:
"JF1set a = F1set1 (dtor1 a) \<union>
((\<Union> b \<in> F1set2 (dtor1 a). JF1set b) \<union>
(\<Union> b \<in> F1set3 (dtor1 a). JF2set b))"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. JF1set a = F1set1 (dtor1 a) \<union> (\<Union> (JF1set ` F1set2 (dtor1 a)) \<union> \<Union> (JF2set ` F1set3 (dtor1 a)))
[PROOF STEP]
apply (rule equalityI)
[PROOF STATE]
proof (prove)
goal (2 subgoals):
1. JF1set a \<subseteq> F1set1 (dtor1 a) \<union> (\<Union> (JF1set ` F1set2 (dtor1 a)) \<union> \<Union> (JF2set ` F1set3 (dtor1 a)))
2. F1set1 (dtor1 a) \<union> (\<Union> (JF1set ` F1set2 (dtor1 a)) \<union> \<Union> (JF2set ` F1set3 (dtor1 a))) \<subseteq> JF1set a
[PROOF STEP]
apply (rule conjunct1[OF JFset_le])
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. F1set1 (dtor1 a) \<union> (\<Union> (JF1set ` F1set2 (dtor1 a)) \<union> \<Union> (JF2set ` F1set3 (dtor1 a))) \<subseteq> JF1set a
[PROOF STEP]
apply (rule Un_least)
[PROOF STATE]
proof (prove)
goal (2 subgoals):
1. F1set1 (dtor1 a) \<subseteq> JF1set a
2. \<Union> (JF1set ` F1set2 (dtor1 a)) \<union> \<Union> (JF2set ` F1set3 (dtor1 a)) \<subseteq> JF1set a
[PROOF STEP]
apply (rule F1set1_incl_JF1set)
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<Union> (JF1set ` F1set2 (dtor1 a)) \<union> \<Union> (JF2set ` F1set3 (dtor1 a)) \<subseteq> JF1set a
[PROOF STEP]
apply (rule Un_least)
[PROOF STATE]
proof (prove)
goal (2 subgoals):
1. \<Union> (JF1set ` F1set2 (dtor1 a)) \<subseteq> JF1set a
2. \<Union> (JF2set ` F1set3 (dtor1 a)) \<subseteq> JF1set a
[PROOF STEP]
apply (rule UN_least)
[PROOF STATE]
proof (prove)
goal (2 subgoals):
1. \<And>b. b \<in> F1set2 (dtor1 a) \<Longrightarrow> JF1set b \<subseteq> JF1set a
2. \<Union> (JF2set ` F1set3 (dtor1 a)) \<subseteq> JF1set a
[PROOF STEP]
apply (erule F1set2_JF1set_incl_JF1set)
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<Union> (JF2set ` F1set3 (dtor1 a)) \<subseteq> JF1set a
[PROOF STEP]
apply (rule UN_least)
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<And>b. b \<in> F1set3 (dtor1 a) \<Longrightarrow> JF2set b \<subseteq> JF1set a
[PROOF STEP]
apply (erule F1set3_JF2set_incl_JF1set)
[PROOF STATE]
proof (prove)
goal:
No subgoals!
[PROOF STEP]
done |
\section{Introduction}
\label{sec:intro}
A common goal in digital simulations of life is to induce the emergence of
certain structures or processes. These might include self-replicating or
autopoietic organisms, evolutionary processes, and intelligent behavior. Often
identifying the growth of these structures and processes is reduced to
identifying a growth in complexity. Hence, designers of artificial life
simulations are interested in the question of measuring complexity.
As artificial life simulations scale, quantitatively assessing the simulation's
success in meeting the designer's criteria is essential. Identifying and
selecting amongst the design choices of a given simulation in a qualitative
manner is not scalable and likely to admit experimental bias. As an analogy,
imagine constructing a machine-learning model to learn a given task without an
objective function. Curiosity driven exploration and reinforcement learning can
result in the model learning to successfully complete the task. However, from
the empiricists perspective, assessing the ability of the model at completing
the task is critical. At the very least, some criterion is needed for
evaluating the model, if not training it. Similarly, in artificial life, while
the measure of complexity is typically not used as an objective to guide the
active simulation, it is at least essential to evaluate the post hoc success of
the simulation.
Complexity is notoriously ill-defined, subjective, and difficult to measure
quantitatively~\citep{gell2002complexity, mitchell2009complexity,
wiesner2019measuring}. One of the challenges in measuring complexity is
identifying the ``Goldilocks zone'' between total order and total disorder
where complexity may reside (\emph{c.f.}
figure~\ref{fig:complexity_and_entropy}). Information theoretic measures such
as Shannon entropy~\citep{shannon1948} and Kolmogorov complexity (sometimes
called Algorithmic Information Content)~\citep{kolmogorov1965, solomonoff1964},
are actually measures of randomness. As figure~\ref{fig:complexity_and_entropy}
illustrates, these measures increase monotonically with the disorder of a
system.
\begin{figure}
\centering
\includegraphics[width=0.7\textwidth]{figures/complexity_and_entropy}
\caption{An illustration of the one-hump of complexity (solid line) between
order and disorder. Measures such as Shannon entropy or algorithmic
information content (AIC) (both illustrated by the dashed line) grow
monotonically with disorder.}
\label{fig:complexity_and_entropy}
\end{figure}
Because of this difficulty, many measures of complexity have been
proposed~\citep{lloyd2001measures}, all with differing trade-offs. Often they
fail the ``one-hump'' criterion~\citep{adami2002complexity} increasing with
order or disorder. This is typical of the aforementioned information theoretic
criteria (Shannon entropy and Kolmogorov complexity) and constructions which
use them~\citep{lloyd1988complexity}. Other measures which satisfy the one-hump
test are not easily made operational requiring either a large degree of
subjective input from the designer or are exceedingly difficult to
compute~\citep{crutchfield1989inferring, gell1996information,
grassberger1986toward}.
We propose yet another a measure complexity, Hierarchical Information Content
(HIC). The central tenet of HIC is that complexity growth is found at the
compositions of ordered and disordered systems. We elaborate and defend the
motivating principles of HIC in section~\ref{sec:finding_complexity}. We do not
intend for HIC to serve as an ersatz for complexity in a general sense, and in
fact we make make no further attempt to rigorously define complexity. Rather,
we propose HIC as a measure which quantitatively captures some of the essential
aspects of complex systems. We promptly disclose two limitations of HIC. First,
HIC is not intended to work well for every case, but to work well for many
cases that are of interest, particularly in simulations of artificial life.
Second, while HIC can be used with few assumptions, in many instances it may
gain utility from the subjective input by the designer of the system.
|
%%
%% Beginning of file 'sample.tex'
%%
%% Modified 2015 December
%%
%% This is a sample manuscript marked up using the
%% AASTeX v6.x LaTeX 2e macros.
%% AASTeX is now based on Alexey Vikhlinin's emulateapj.cls
%% (Copyright 2000-2015). See the classfile for details.
%%
%% AASTeX requires revtex4-1.cls (http://publish.aps.org/revtex4/) and
%% other external packages (latexsym, graphicx, amssymb, longtable, and epsf).
%% All of these external packages should already be present in the modern TeX
%% distributions. If not they can also be obtained at www.ctan.org.
%% The first piece of markup in an AASTeX v6.x document is the \documentclass
%% command. LaTeX will ignore any data that comes before this command. The
%% documentclass can take an optional argument to modify the output style.
%% The command below calls the preprint style which will produce a tightly
%% typeset, one-column, single-spaced document. It is the default and thus
%% does not need to be explicitly stated.
%%
%% using aastex version 6
\documentclass{aastex6}
%% The other main article choice is a tightly typeset, two-column article
%% that more closely resembles the final typeset pdf article.
%%
%% \documentclass[twocolumn]{aastex6}
%%
%% There are other optional arguments one can envoke to allow other
%% actions.
%%
% These are the available options:
% manuscript : onecolumn, doublespace, 12pt fonts
% preprint : onecolumn, single space, 10pt fonts
% preprint2 : twocolumn, single space, 10pt fonts
% twocolumn : a two column article. Probably not needed, but here just in case.
% onecolumn : a one column article; default option.
% twocolappendix: make 2 column appendix
% onecolappendix: make 1 column appendix is the default.
% astrosymb : Loads Astrosymb font and define \astrocommands.
% tighten : Makes baselineskip slightly smaller
% times : uses times font instead of the default
% linenumbers : turn on lineno package.
% trackchanges : required to see the revision mark up and print output
% numberedappendix: Labels appendix sections A, B, ... This is the default.
% appendixfloats: Needed. Resets figure and table counters to zero
%% these can be used in any combination, e.g.
%%
%% \documentclass[twocolumn,twocolappendix,linenumbers,trackchanges]{aastex6}
%% If you want to create your own macros, you can do so
%% using \newcommand. Your macros should appear before
%% the \begin{document} command.
%%
\newcommand{\vdag}{(v)^\dagger}
\newcommand\aastex{AAS\TeX}
\newcommand\latex{La\TeX}
%% AASTeX 6.0 supports the ability to suppress the names and affiliations
%% of some authors and displaying them under a "collaboration" banner to
%% minimize the amount of author information that to be printed. This
%% should be reserved for articles with an extreme number of authors.
%%
%% Mark up commands to limit the number of authors on the front page.
\AuthorCallLimit=1
%% Will only show Schwarz & Muench since Schwarz and Muench
%% are in the same \author call.
\fullcollaborationName{The Friends of AASTeX Collaboration}
%% will print the collaboration text after the shortened author list.
%% These commands have to COME BEFORE the \author calls.
%%
%% Note that all of these author will be shown in the published article.
%% This feature is meant to be used prior to acceptance to make the
%% front end of a long author article more manageable.
%% Use \allauthors at the manuscript end to show the full author list.
%% The following command can be used to set the latex table counters. It
%% is needed in this document because it uses a mix of latex tabular and
%% AASTeX deluxetables. In general it should not be needed.
%\setcounter{table}{1}
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%%
%% The following commented section outlines numerous optional output that
%% can be displayed in the front matter or as running meta-data.
%%
%% You can insert a short comment on the title page using the command below.
%% \slugcomment{Not to appear in Nonlearned J., 45.}
%%
%% If you wish, you may supply running head information, although
%% this information may be modified by the editorial offices.
%%\shorttitle{\aastex sample article}
%%\shortauthors{Schwarz et al.}
%%
%% You can add a light gray and diagonal water-mark to the first page
%% with this command:
%% \watermark{text}
%% where "text", e.g. DRAFT, is the text to appear. If the text is
%% long you can control the water-mark size with:
%% \setwatermarkfontsize{dimension}
%% where dimension is any recognized LaTeX dimension, e.g. pt, in, etc.
%%
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%% This is the end of the preamble. Indicate the beginning of the
%% paper itself with \begin{document}.
\begin{document}
%% LaTeX will automatically break titles if they run longer than
%% one line. However, you may use \\ to force a line break if
%% you desire.
\title{An Example Article using \aastex v6.0}
%% Use \author, \affil, plus the \and command to format author and affiliation
%% information. If done correctly the peer review system will be able to
%% automatically put the author and affiliation information from the manuscript
%% and save the corresponding author the trouble of entering it by hand.
%%
%% The \affil should be used to document primary affiliations and the
%% \altaffil should be used for secondary affiliations, titles, or email.
%% Authors with the same affiliation can be grouped in a single
%% \author and \affil call.
\author{Greg J. Schwarz\altaffilmark{1,2} and August Muench\altaffilmark{1}}
\affil{American Astronomical Society \\
2000 Florida Ave., NW, Suite 300 \\
Washington, DC 20009-1231, USA}
\author{Butler Burton\altaffilmark{3}}
\affil{National Radio Astronomy Observatory}
\author{Amy Hendrickson}
\affil{TeXnology Inc}
\author{Julie Steffen\altaffilmark{4}}
\affil{American Astronomical Society \\
2000 Florida Ave., NW, Suite 300 \\
Washington, DC 20009-1231, USA}
%% Use the \and command so offset the last author.
\and
\author{Jeff Lewandowski\altaffilmark{5}}
\affil{IOP Publishing, Washington, DC 20005}
%% Notice that each of these authors has alternate affiliations, which
%% are identified by the \altaffilmark after each name. Specify alternate
%% affiliation information with \altaffiltext, with one command per each
%% affiliation.
\altaffiltext{1}{AAS Journals Data Scientist}
\altaffiltext{2}{[email protected]}
\altaffiltext{3}{AAS Journals Associate Editor-in-Chief}
\altaffiltext{4}{AAS Director of Publishing}
\altaffiltext{5}{IOP Senior Publisher for the AAS Journals}
%% Mark off the abstract in the ``abstract'' environment.
\begin{abstract}
This example manuscript is intended to serve as a tutorial and template for
authors to use when writing their own AAS Journal articles. The manuscript
includes a history of \aastex\ and documents the new features in the latest
version, 6.0, including many figure and table examples. Information on
features not explicitly mentioned in the article can be viewed in the
manuscript comments or more extensive online documentation. Authors are
welcome replace the text, tables, figures, and bibliography with their own
and submit the resulting manuscript to the AAS Journals peer review system.
The first lesson in the tutorial is to remind authors that the AAS
Journals, the Astrophysical Journal (ApJ), the Astrophysical Journal
Letters (ApJL), and Astronomical Journal (AJ), all have a 250 word limit
for the abstract. If you exceed this length the Editorial office will ask
you to shorten it.
\end{abstract}
%% Keywords should appear after the \end{abstract} command.
%% See the online documentation for the full list of available subject
%% keywords and the rules for their use.
\keywords{editorials, notices ---
miscellaneous --- catalogs --- surveys}
%% From the front matter, we move on to the body of the paper.
%% Sections are demarcated by \section and \subsection, respectively.
%% Observe the use of the LaTeX \label
%% command after the \subsection to give a symbolic KEY to the
%% subsection for cross-referencing in a \ref command.
%% You can use LaTeX's \ref and \label commands to keep track of
%% cross-references to sections, equations, tables, and figures.
%% That way, if you change the order of any elements, LaTeX will
%% automatically renumber them.
%% We recommend that authors also use the natbib \citep
%% and \citet commands to identify citations. The citations are
%% tied to the reference list via symbolic KEYs. The KEY corresponds
%% to the KEY in the \bibitem in the reference list below.
\section{Introduction} \label{sec:intro}
\latex\ \footnote{\url{http://www.latex-project.org/}} is a document markup
language that is particularly well suited for the publication of
mathematical and scientific articles \citep{lamport94}. \latex\ was written
in 1985 by Leslie Lamport who based it on the \TeX\ typesetting language
which itself was created by Donald E. Knuth in 1978. In 1988 a suite of
\latex\ macros were developed to investigate electronic submission and
publication of AAS Journal articles \citep{1989BAAS...21..780H}. Shortly
afterwards, Chris Biemesdefer merged these macros and more into a \latex\
2.08 style file called \aastex. These early \aastex\ versions introduced
many common commands and practices that authors take for granted today such
as long table support in the form of deluxetable. Substantial revisions
were made by Lee Brotzman and Pierre Landau when the package was updated to
v4.0. AASTeX v5.0, written in 1995 by Arthur Ogawa, upgraded to \latex\ 2e
which uses the document class in lieu of a style file. Other improvements
to version 5 included hypertext support, landscape deluxetables and
improved figure support to facilitate electronic submission. The last
major release, \aastex\ v5.2 in 2005, introduced additional graphics
support plus new mark up to identifier astronomical objects, datasets and
facilities.
In 1996 Maxim Markevitch modified the AAS preprint style file, aaspp4.sty,
to closely emulate the very tight, two column style of a typeset
Astrophysical Journal article. The result was emulateapj.sty. A year
later Alexey Vikhlinin took over development and maintenance. In 2001 he
converted emulateapj into a class file in \latex\ 2e and in 2003 Vikhlinin
completely rewrote emulateapj based on the APS Journal's RevTEX class.
During this time emulateapj gained growing acceptance in the astronomical
community as it filled an author need to obtain an approximate number of
manuscript pages prior to submission for cost and length estimates. The
tighter typeset also had the added advantage of saving paper when printing
out hard copies.
Even though author publication charges are no longer based on print pages
\footnote{see Section \ref{sec:pubcharge} in the Appendix for more details
about how current article costs are calculated.} the emulateapj class file
has proven to be extremely popular with AAS Journal authors. An informal
analysis of submitted \latex\ manuscripts in early 2015 reveals that
$\sim$65\% either call emulateapj or have the emulateapj classfile in the
comments indicating it was used at some stage of the manuscript
construction. Clearly authors want to have access to a tightly typeset
version of the article when corresponding with co-authors and for preprint
submissions.
When planning the next \aastex\ release the popularity of emulateapj played
an important roll in the decision to drop the old base code and adopt and
modify emulateapj for \aastex\ v6.0 instead. The change brings \aastex\
inline with what the majority of authors are already using while still
delivering new and improved features. \aastex\ v6.0 was written by Amy
Hendrickson and released in 2016. Some of the new features in v6.0
include:
\begin{enumerate}
\item improved citations for third party data repositories and software,
\item easier construction of matrix figures consisting of multiple
encapsulated postscript (EPS) or portable document format (PDF) files,
\item figure set mark up for large collections of similar figures,
\item color mark up to easily enable/disable revised text highlighting,
\item improved url support and hyperlinking, and
\item numerous table options such as the ability to hide columns, column
decimal alignment, automatic column math mode and numbering, plus splitting of
wide tables.
\end{enumerate}
The rest of this article provides information and examples on how to create
your own AAS Journal manuscript. Special emphasis is placed on how to use
the full potential of \aastex\ v6.0. The next section describes the
different manuscript styles available and how they differ from past
releases. Section \ref{sec:floats} describes how tables and figures are
placed in a \latex\ document. Specific examples of tables, Section
\ref{subsec:tables}, and figures, Section \ref{subsec:figures}, are also
provided. Section \ref{sec:displaymath} discusses how to display math and
incorporate equations in a manuscript while Section \ref{sec:highlight}
discuss how to use the new revision mark up. The last section,
\ref{sec:cite}, shows how recognize software and external data as first
class references in the manuscript bibliography. An appendix is included
to show how to construct one and provide some information on how article
charges are calculated. Additional information is available both embedded
in the comments of this \latex\ file and in the online documentation at
\url{http://journals.aas.org/authors/aastex.html}.
\section{Manuscript styles} \label{sec:style}
The default style in \aastex\ v6.0 is a tight single column style, e.g.
10 point font, single spaced. It can also be called directly with \\
\noindent {\tt\string\documentclass[onecolumn]\{aastex6\}}. \\
\noindent but as the default the argument is not necessary. The single
column style is very useful for article with wide equations. It is also the
easiest to style to work with since figures and tables, see Section
\ref{sec:floats}, will span the entire page, reducing the need for
address float sizing.
To invoke a two column style similar to the what is produced in
the published PDF copy use \\
\noindent {\tt\string\documentclass[twocolumn]\{aastex6\}}. \\
\noindent Note that in the two column style figures and tables will only
span one column unless specifically ordered across both with the ``*'' flag,
e.g. \\
\noindent{\tt\string\begin\{figure*\}} ... {\tt\string\end\{figure*\}}, \\
\noindent{\tt\string\begin\{table*\}} ... {\tt\string\end\{table*\}}, and \\
\noindent{\tt\string\begin\{deluxetable*\}} ... {\tt\string\end\{deluxetable*\}}. \\
\noindent This option is ignored in the onecolumn style.
Some other style options are outlined in the commented sections of this
article. Any combination of style options can be used.
Two style options that are needed for fully use the new revision tracking
feature, see Section \ref{sec:highlight}, are {\tt\string linenumbers} which
uses the lineno style file to number each article line in the left margin and
{\tt\string trackchanges} which controls the revision and commenting highlight
output.
Previous versions of \aastex\ accepted other styles most of which have been
modified. The old {\tt\string manuscript} option now produces a single
column, double spaced format with 12 point font. {\tt\string preprint} and
{\tt\string preprint2} now are aliased with {\tt\string onecolumn} and
{\tt\string twocolumn}, respectively.
\section{Floats} \label{sec:floats}
Floats are non-text items that generally can not be split over a page.
They also have captions and can be numbered for reference. Primarily these
are figures and tables but authors can define their own. \latex\ tries to
place a float where indicated in the manuscript but will move it later if
there is not enough room at that location, hence the term ``float''.
Authors are encouraged to embed their tables and figures within the text as
they are mentioned. Please do not place the figures and text at the end of
the article as was the old practice. Editors and and the vast majority of
referees find it much easier to read a manuscript with embedded figures and
tables.
Depending on the number of floats and the particular amount of text and
equations present in a manuscript the ultimate location of any specific
float can be hard to predict prior to compilation. It is recommended that
authors not spend significant time trying to get float placement perfect
for peer review. The AAS Journal's publisher has sophisticated typesetting
software that will produce the optimal layout during production.
\begin{deluxetable}{c|cc}
\tablecaption{ApJ costs from 1991 to 2013\tablenotemark{a} \label{tab:table}}
\tablehead{
\colhead{Year} & \colhead{Subscription} & \colhead{Publication} \\
\colhead{} & \colhead{cost} & \colhead{charges\tablenotemark{b}}\\
\colhead{} & \colhead{(\$)} & \colhead{(\$/page)}
}
\colnumbers
\startdata
1991 & 600 & 100 \\
1992 & 650 & 105 \\
1993 & 550 & 103 \\
1994 & 450 & 110 \\
1995 & 410 & 112 \\
1996 & 400 & 114 \\
1997 & 525 & 115 \\
1998 & 590 & 116 \\
1999 & 575 & 115 \\
2000 & 450 & 103 \\
2001 & 490 & 90 \\
2002 & 500 & 88 \\
2003 & 450 & 90 \\
2004 & 460 & 88 \\
2005 & 440 & 79 \\
2006 & 350 & 77 \\
2007 & 325 & 70 \\
2008 & 320 & 65 \\
2009 & 190 & 68 \\
2010 & 280 & 70 \\
2011 & 275 & 68 \\
2012 & 150 & 56 \\
2013 & 140 & 55 \\
\enddata
\tablenotetext{a}{Adjusted for inflation}
\tablenotetext{b}{Accounts for the change from page charges to digital quanta in April, 2011}
\tablecomments{Note that {\tt \string \colnumbers} does not work with the
vertical line alignment token. If you want vertical lines in the headers you
can not use this command at this time.}
\end{deluxetable}
For authors that do want to take the time to optimize the locations of
their floats there are some techniques that can be used. The simplest
solution is to placing a float earlier in the text to get the position
right but this option will break down if the manuscript is altered, see
Table \ref{tab:table}. A better method is to force \latex\ to place a
float in a general area with the use of the optional {\tt\string [placement
specifier]} parameter for figures and tables. This parameter goes after
{\tt\string \begin\{figure\}}, {\tt\string \begin\{table\}}, and
{\tt\string \begin\{deluxetable\}}. The main arguments the specifier takes
are ``h'', ``t'', ``b'', and ``!''. These tell \latex\ to place the float
\underline{h}ere (or as close as possible to this location as possible), at
the \underline{t}op of the page, and at the \underline{b}ottom of the page.
The last argument, ``!'', tells \latex\ to override its internal method of
calculating the float position. A sequence of rules can be created by
using multiple arguments. For example, {\tt\string \begin\{figure\}[htb!]}
tells \latex\ to try the current location first, then the top of the page
and finally the bottom of the page without regard to what it thinks the
proper position should be. Many of the tables and figures in this article
use a placement specifier to set their positions.
Note that the \latex\ {\tt\string tabular} environment is not a float.
Only when a {\tt\string tabular} is surrounded by
{\tt\string\begin\{table\}} ... {\tt\string\end\{table\}} is it a true
float and the rules and suggestions above apply. In addition, with the
addition of the longtable package to span a page break, deluxetable is also
no longer a float be default. An author must make a deluxetable a float by
adding the command {\tt\string \floattable} right before the {\tt\string \begin\{deluxetable\}} call. This command will allow \latex to optimally place
the deluxetable but note that long tables will not longer split over a page.
It is up to the reader to strike the right balance.
\floattable
\begin{deluxetable}{ccCrlc}
\tablecaption{Column math mode in an observation log \label{tab:mathmode}}
\tablecolumns{6}
\tablenum{2}
\tablewidth{0pt}
\tablehead{
\colhead{UT start time\tablenotemark{a}} &
\colhead{MJD start time\tablenotemark{a}} &
\colhead{Seeing} & \colhead{Filter} & \colhead{Inst.} \\
\colhead{(YYYY-mm-dd)} & \colhead{(d)} &
\colhead{(arcsec)} & \colhead{} & \colhead{}
}
\startdata
2012-03-26 & 56012.997 & \sim 0.\arcsec5 & H$\alpha$ & NOT \\
2012-03-27 & 56013.944 & 1.\arcsec5 & grism & SMARTS \\
2012-03-28 & 56014.984 & \nodata & F814M & HST \\
2012-03-30 & 56016.978 & 1.\arcsec5\pm0.25 & B\&C & Bok \\
\enddata
\tablenotetext{a}{At exposure start.}
\tablecomments{The ``C'' command column identifier in the 3 column turns on
math mode for that specific column. One could do the same for the next
column so that dollar signs would not be needed for H$\alpha$
but then all the other text would also be in math mode and thus typeset
in Latin Modern math and you will need to put it back to Roman by hand.
Note that if you do change this column to math mode the dollar signs already
present will not cause a problem. Table \ref{tab:mathmode} is published
in its entirety in the machine readable format. A portion is
shown here for guidance regarding its form and content.}
\end{deluxetable}
%\vspace{5mm}
\subsection{Tables} \label{subsec:tables}
Tables can be constructed with \latex's standard table environment or the
\aastex's deluxetable environment. The deluxetable construct handles long
tables better but has a larger overhead due to the greater amount of
defined mark up used set up and manipulate the table structure. The choice
of which to use is up to the author. Examples of both environments are
used in this manuscript. Table \ref{tab:table} is a simple deluxetable
example that gives the approximate changes in the subscription costs and
author publication charges from 1991 to 2013.
Tables longer than 250 data lines and complex tables should only have a
short example table with the full data set available in the machine
readable format. The machine readable table will be available via the
``DATA'' link in the HTML version of the article. Authors are required to
indicate to the reader where the data can be obtained in the table
comments. Suggested text is given in the comments of Table
\ref{tab:mathmode}. Authors are encouraged to create their own machine
readable tables using the online tool at
\url{http://authortools.aas.org/MRT/upload.html}.
\aastex\ v6.0 introduces five new table features that are designed to make
table construction easier and the resulting display better for AAS Journal
authors. The items are:
\begin{enumerate}
\item Declaring math mode in specific columns,
\item Column decimal alignment,
\item Automatic column header numbering,
\item Hiding columns, and
\item Splitting wide tables into two or three parts.
\end{enumerate}
Each of these new features are illustrated in following Table examples.
All five features work with the regular \latex\ tabular environment and in
\aastex's deluxetable environment. The examples in this manuscript also
show where the two process differ.
\subsubsection{Column math mode}
Both the \latex\ tabular and \aastex\ deluxetable require an argument to
define the alignment and number of columns. The most common values are
``c'', ``l'' and ``r'' for \underline{c}enter, \underline{l}eft, and
\underline{r}ight justification. If these values are capitalized, e.g.
``C'', ``L'', or ``R'', then that specific column will automatically be in math
mode meaning that \$s are not required. Note that having embedded dollar
signs in the table does not affect the output. The third and forth columns
of Table \ref{tab:mathmode} shows how this math mode works.
\subsubsection{Decimal alignment}
Aligning a column by the decimal point can be difficult with only center,
left, and right justification options. It is possible to use phantom calls
in the data, e.g. {\tt\string\phn}, to align columns by hand but this can
be tedious in long or complex tables. To address this \aastex\ introduces
the {\tt\string\decimals} command and a new column justification option,
``D'', to align data in that column on the decimal. In deluxetable the
{\tt\string\decimals} command is invoked before the {\tt\string\startdata}
call but can be anywhere in \latex's tabular environment.
Two other important thing to note when using decimal alignment is that each
decimal column \textit{must end with a space before the ampersand}, e.g.
``\&\&'' is not allowed. Empty decimal columns are indicated with a decimal,
e.g. ``.''. Do not use deluxetable's {\tt\string\nodata} command.
The ``D'' alignment token works by splitting the column into two parts on the
decimal. While this is invisible to the user one must be aware of how it
works so that the headers are accounted for correctly. All decimal column
headers need to span two columns to get the alignment correct. This can be
done with a multicolumn call, e.g {\tt\string\multicolumn2c\{\}} or
{\tt\string\multicolumn\{2\}\{c\}\{\}}, or use the new
{\tt\string\twocolhead\{\}} command in deluxetable. Since \latex\ is
splitting these columns into two it is important to get the table width
right so that they appear joined on the page. You may have to run the
\latex\ compiler twice to get it right. Table \ref{tab:decimal}
illustrates how decimal alignment works in the tabular environment with a
$\pm$ symbol embedded between the last two columns.
%% Note that the \setcounter and \renewcommand are needed here because
%% this example is using a mix of deluxetable and tabular. Here the
%% deluxetable counters are set with \tablenum but the situation is a bit
%% more complex for tabular. Use the first command to set the Table number
%% to ONE LESS than it should be. The next command will auto increment it
%% to the desired number.
\setcounter{table}{2}
\begin{table}[h!]
\renewcommand{\thetable}{\arabic{table}}
\centering
\caption{Decimal alignment made easy} \label{tab:decimal}
\begin{tabular}{cD@{$\pm$}D}
\tablewidth{0pt}
\hline
\hline
Column & \multicolumn2c{Value} & \multicolumn2c{Uncertainty}\\
\hline
\decimals
A & 1234 & 100.0 \\
B & 123.4 & 10.1 \\
C & 12.34 & 1.01 \\
D & 1.234 & 0.101 \\
E & .1234 & 0.01001 \\
F & 1.0 & . \\
\hline
\multicolumn{5}{c}{NOTE. - Two decimal aligned columns}
\end{tabular}
\end{table}
\subsubsection{Automatic column header numbering} \label{subsubsec:autonumber}
The command {\tt\string\colnumbers} can be included to automatically number
each column as the last row in the header. Per the AAS Journal table format
standards, each column index numbers will be surrounded by parentheses. In
a \latex\ tabular environment the {\tt\string\colnumbers} should be invoked
at the location where the author wants the numbers to appear, e.g. after
the last line of specified table header rows. In deluxetable this command
has to come before {\tt\string\startdata}. {\tt\string\colnumbers} will
not increment for columns hidden by the ``h'' command, see Section
\ref{subsubsec:hide}. Table \ref{tab:table} uses this command to
automatically generate column index numbers.
Note that when using decimal alignment in a table the command
{\tt\string\decimalcolnumbers} must be used instead of
{\tt\string\colnumbers} and {\tt\string\decimals}. Table \ref{tab:messier}
illustrates this specific functionality.
\subsubsection{Hiding columns} \label{subsubsec:hide}
Entire columns can be \underline{h}idden from display simply by changing
the specified column identifier to ``h''. In the \latex\ tabular environment
this column identifier conceals the entire column including the header
columns. In \aastex's deluxetables the header row is specifically
declared with the {\tt\string\tablehead} call and each header column is
marked with {\tt\string\colhead} call. In order to make a specific header
disappear with the ``h'' column identifier in deluxetable use
{\tt\string\nocolhead} instead to suppress that particular column header.
Authors can use this option in many different ways. Since column data can
be easily suppressed authors can include extra information and hid it
based on the comments of co-authors or referees. For wide tables that will
have a machine readable version, authors could put all the information in
the \latex\ table but use this option to hid as many columns as needed until
it fits on a page. This concealed column table would serve as the
example table for the full machine readable version. Regardless of how
columns are obscured, authors are responsible for removing any unneeded
column data or alerting the editorial office about how to treat these
columns during production for the final typeset article.
Table \ref{tab:messier} provides some basic information about the first ten
Messier Objects and illustrates how many of these new features can be used
together. It has automatic column numbering, decimal alignment of the
distances, and one concealed column. The Common name column
is the third in the \latex\ deluxetable but does not appear when the article
is compiled. This hidden column can be shown simply by changing the ``h'' in
the column identifier preamble to another valid value. This table also
uses {\tt\string\tablenum} to renumber the table because a \latex\ tabular
table was inserted before it.
\floattable
\begin{deluxetable}{cchlDlc}
\tablenum{4}
\tablecaption{Fun facts about the first 10 messier objects\label{tab:messier}}
\tablewidth{0pt}
\tablehead{
\colhead{Messier} & \colhead{NGC/IC} & \nocolhead{Common} & \colhead{Object} &
\multicolumn2c{Distance} & \colhead{} & \colhead{V} \\
\colhead{Number} & \colhead{Number} & \nocolhead{Name} & \colhead{Type} &
\multicolumn2c{(kpc)} & \colhead{Constellation} & \colhead{(mag)}
}
\decimalcolnumbers
\startdata
M1 & NGC 1952 & Crab Nebula & Supernova remnant & 2 & Taurus & 8.4 \\
M2 & NGC 7089 & Messier 2 & Cluster, globular & 11.5 & Aquarius & 6.3 \\
M3 & NGC 5272 & Messier 3 & Cluster, globular & 10.4 & Canes Venatici & 6.2 \\
M4 & NGC 6121 & Messier 4 & Cluster, globular & 2.2 & Scorpius & 5.9 \\
M5 & NGC 5904 & Messier 5 & Cluster, globular & 24.5 & Serpens & 5.9 \\
M6 & NGC 6405 & Butterfly Cluster & Cluster, open & 0.31 & Scorpius & 4.2 \\
M7 & NGC 6475 & Ptolemy Cluster & Cluster, open & 0.3 & Scorpius & 3.3 \\
M8 & NGC 6523 & Lagoon Nebula & Nebula with cluster & 1.25 & Sagittarius & 6.0 \\
M9 & NGC 6333 & Messier 9 & Cluster, globular & 7.91 & Ophiuchus & 8.4 \\
M10 & NGC 6254 & Messier 10 & Cluster, globular & 4.42 & Ophiuchus & 6.4 \\
\enddata
\tablecomments{This table ``hides'' the third column in the \latex\ when compiled.
The Distance is also centered on the decimals. Note that all of the values
in a decimal aligned column have to have a space before the next ampersand.}
\end{deluxetable}
\subsubsection{Splitting a table into multiple horizontal components}
Since the AAS Journals are now all electronic with no print version there is
no reason why tables can not be as wide as authors need them to be.
However, there are some artificial limitations based on the width of a
print page. The old way around this limitation was to use landscape mode
with the {\tt\string\rotate} command and the smallest available table font
sizes, e.g. {\tt\string\tablewidth}, to get the table to fit.
Unfortunately, this was not alway enough but now along with the hide column
option outlined in Section \ref{subsubsec:hide} there is a new way to break
a table into two or three components so that it flows down a page by
invoking a new table type, splittabular or splitdeluxetable. Within these
tables a new ``B'' column separator is introduced. Much like the vertical
bar option, ``$\vert$'', that produces a vertical table lines, e.g. Table
\ref{tab:table}, the new ``B'' separator indicates where to \underline{B}reak
a table. Up to two ``B''s may be included.
Table 5 % \ref{tab:deluxesplit} this freaks it out when it is used!
shows how to split a wide deluxetable in half with
the {\tt\string\splitdeluxetable} command. The {\tt\string\colnumbers}
option is on to show how the automatic column numbering carries through the
second table component, see Section \ref{subsubsec:autonumber}.
The last example, Table \ref{tab:tablesplit}, shows how to split the same
table but with a regular \latex\ tabular call and into three parts. Decimal
alignment is included in the third column and the ``Component'' column is
hidden to illustrate the new features working together.
\begin{splitdeluxetable*}{lccccccBccccccc}
\tabletypesize{\scriptsize}
\tablewidth{0pt}
\tablenum{5}
\tablecaption{Measurements of Emission Lines: 1 break \label{tab:deluxesplit}}
\tablehead{
\colhead{Model} & \colhead{Component}& \colhead{Shift} & \colhead{FWHM} &
\multicolumn{10}{c}{Flux} \\
\colhead{} & \colhead{} & \colhead{($\rm
km~s^{-1}$)}& \colhead{($\rm km~s^{-1}$)} & \multicolumn{10}{c}{($\rm
10^{-17}~erg~s^{-1}~cm^{-2}$)} \\
\cline{5-14}
\colhead{} & \colhead{} &
\colhead{} & \colhead{} & \colhead{Ly$\alpha$} & \colhead{N\,{\footnotesize
V}} & \colhead{Si\,{\footnotesize IV}} & \colhead{C\,{\footnotesize IV}} &
\colhead{Mg\,{\footnotesize II}} & \colhead{H$\gamma$} & \colhead{H$\beta$}
& \colhead{H$\alpha$} & \colhead{He\,{\footnotesize I}} &
\colhead{Pa$\gamma$}
}
\colnumbers
\startdata
{ }& BELs& -97.13 & 9117$\pm 38$& 1033$\pm 33$&$< 35$&$< 166$& 637$\pm 31$& 1951$\pm 26$& 991$\pm 30$& 3502$\pm 42$& 20285$\pm 80$& 2025$\pm 116$& 1289$\pm 107$\\
{Model 1}& IELs& -4049.123 & 1974$\pm 22$& 2495$\pm 30$&$< 42$&$< 109$& 995$\pm 186$& 83$\pm 30$& 75$\pm 23$& 130$\pm 25$& 357$\pm 94$& 194$\pm 64$& 36$\pm 23$\\
{ }& NELs& \nodata & 641$\pm 4$& 449$\pm 23$&$< 6$&$< 9$& -- & 275$\pm 18$& 150$\pm 11$& 313$\pm 12$& 958$\pm 43$& 318$\pm 34$& 151$\pm 17$\\
\hline
{ }& BELs& -85 & 8991$\pm 41$& 988$\pm 29$&$< 24$&$< 173$& 623$\pm 28$& 1945$\pm 29$& 989$\pm 27$& 3498$\pm 37$& 20288$\pm 73$& 2047$\pm 143$& 1376$\pm 167$\\
{Model 2}& IELs& -51000 & 2025$\pm 26$& 2494$\pm 32$&$< 37$&$< 124$& 1005$\pm 190$& 72$\pm 28$& 72$\pm 21$& 113$\pm 18$& 271$\pm 85$& 205$\pm 72$& 34$\pm 21$\\
{ }& NELs& 52 & 637$\pm 10$& 477$\pm 17$&$< 4$&$< 8$& -- & 278$\pm 17$& 153$\pm 10$& 317$\pm 15$& 969$\pm 40$& 325$\pm 37$&
147$\pm 22$\\
\enddata
\tablecomments{This is an example of how to split a deluxetable. You can
split any table with this command into two or three parts. The location of
the split is given by the author based on the placement of the ``B''
indicators in the column identifier preamble. For more information please
look at the new \aastex\ instructions.}
\end{splitdeluxetable*}
%\clearpage
\setcounter{table}{5}
\begin{table}[h!]
\renewcommand{\thetable}{\arabic{table}}
\caption{Measurements of Emission Lines: 2 breaks\label{tab:tablesplit}}
\begin{splittabular}{lhDcBccccccBcccc}
%\multicolumn{5}{c}{Table 6} \\
%\multicolumn{5}{c}{Measurements of Emission Lines} \\
\hline
\hline
Model & Component & \multicolumn2c{Shift} & FWHM &
\multicolumn{10}{c}{Flux} \\
& & \multicolumn2c{($\rm km~s^{-1}$)} & {($\rm km~s^{-1}$)} &
\multicolumn{10}{c}{($\rm 10^{-17}~erg~s^{-1}~cm^{-2}$)} \\
\cline{5-15}
& & & & & {Ly$\alpha$} & {N\,{\footnotesize V}} &
{Si\,{\footnotesize IV}} & {C\,{\footnotesize IV}} &
{Mg\,{\footnotesize II}} & {H$\gamma$} & {H$\beta$}
& {H$\alpha$} & {He\,{\footnotesize I}} & {Pa$\gamma$} \\
%\hline
\decimalcolnumbers
& BELs& -97.13 & 9117$\pm 38$& 1033$\pm 33$&$< 35$&$< 166$& 637$\pm 31$& 1951$\pm 26$& 991$\pm 30$& 3502$\pm 42$& 20285$\pm 80$& 2025$\pm 116$& 1289$\pm 107$\\
Model 1 & IELs& -4049.123 & 1974$\pm 22$& 2495$\pm 30$&$< 42$&$< 109$& 995$\pm 186$& 83$\pm 30$& 75$\pm 23$& 130$\pm 25$& 357$\pm 94$& 194$\pm 64$& 36$\pm 23$\\
& NELs& . & 641$\pm 4$& 449$\pm 23$&$< 6$&$< 9$& -- & 275$\pm 18$& 150$\pm 11$& 313$\pm 12$& 958$\pm 43$& 318$\pm 34$& 151$\pm 17$\\
\hline
& BELs& -85 & 8991$\pm 41$& 988$\pm 29$&$< 24$&$< 173$& 623$\pm 28$& 1945$\pm 29$& 989$\pm 27$& 3498$\pm 37$& 20288$\pm 73$& 2047$\pm 143$& 1376$\pm 167$\\
Model 2 & IELs& -51000 & 2025$\pm 26$& 2494$\pm 32$&$< 37$&$< 124$& 1005$\pm 190$& 72$\pm 28$& 72$\pm 21$& 113$\pm 18$& 271$\pm 85$& 205$\pm 72$& 34$\pm 21$\\
& NELs& 52 & 637$\pm 10$& 477$\pm 17$&$< 4$&$< 8$& -- & 278$\pm 17$& 153$\pm 10$& 317$\pm 15$& 969$\pm 40$& 325$\pm 37$& 147$\pm 22$\\
\hline
\end{splittabular}
\end{table}
\subsection{Figures\label{subsec:figures}}
%% The "ht!" tells LaTeX to put the figure "here" first, at the "top" next
%% and to override the normal way of calculating a float position
\begin{figure}[ht!]
\figurenum{1}
\plotone{cost.eps}
\caption{The subscription and author publication costs from 1991 to 2013.
The data comes from Table \ref{tab:table}.\label{fig:general}}
\end{figure}
Authors can include a wide number of different graphics with their articles
in encapsulated postscript (EPS) or portable document format (PDF). These
range from general figures all authors are familiar with to new enhanced
graphics that can only be fully experienced in HTML. The later include
animations, figure sets and interactive figures. This portion of the
article provides examples for setting up all these graphics in with the
latest version of \aastex.
\subsection{General figures\label{subsec:general}}
\aastex\ has a {\tt\string\plotone} command to display a figure
consisting of one EPS/PDF file. Figure \ref{fig:general} is an example
which uses the data from Table \ref{tab:table}. For a general figure
consisting of two EPS/PDF files the {\tt\string\plottwo} command can be
used to position the two image files side by side. Figure \ref{fig:f2}
shows the Swift/XRT X-ray light curves of two recurrent novae. The data
from Figures \ref{fig:f2} through \ref{fig:fig4} are taken from Table 2 of
\citet{2011ApJS..197...31S}.
\begin{figure}
\figurenum{2}
\plottwo{RS_Oph.eps}{U_Sco.eps}
\caption{Swift/XRT X-ray light curves of RS Oph and U Sco which represent
the two canonical recurrent types, a long period system with a red giant
secondary and a short period system with a dwarf/sub-dwarf secondary,
respectively.\label{fig:f2}}
\end{figure}
Both {\tt\string\plotone} and {\tt\string\plottwo} take a
{\tt\string\caption} and an option {\tt\string\figurenum} command to
specify the figure number. Each is based on the {\tt\string graphicx}
package command, {\tt\string\includegraphics}. Authors are welcome to use
{\tt\string\includegraphics} along with its optional arguments that control
the height, width, scale, and position angle of a file within the figure.
More information on the full usage of {\tt\string\includegraphics} can be
found at \break
\url{https://en.wikibooks.org/wiki/LaTeX/Importing\_Graphics\#Including\_graphics}.
\subsection{Grid figures}
Including more than two EPS/PDF files in a single figure call can be tricky
easily format. To make the process easier for authors \aastex\ v6.0 offers
{\tt\string\gridline} which allows any number of individual EPS/PDF file
calls within a single figure. Each file cited in a {\tt\string\gridline}
will be displayed in a row. By adding more {\tt\string\gridline} calls an
author can easily construct a matrix X by Y individual files as a
single general figure.
For each {\tt\string\gridline} command a EPS/PDF file is called by one of
four different commands. These are {\tt\string\fig},
{\tt\string\rightfig}, {\tt\string\leftfig}, and {\tt\string\boxedfig}.
The first file call specifies no image position justification while the
next two will right and left justify the image, respectively. The
{\tt\string\boxedfig} is similar to {\tt\string\fig} except that a box is
drawn around the figure file when displayed. Each of these commands takes
three arguments. The first is the file name. The second is the width that
file should be displayed at. While any natural \latex\ unit is allowed, it
is recommended that author use fractional units with the
{\tt\string\textwidth}. The last argument is text for a subcaption.
Figure \ref{fig:pyramid} shows an inverted pyramid of individual
figure constructed with six individual EPS files using the
{\tt\string\gridline} option.
\begin{figure}
\figurenum{3}
\gridline{\fig{V2491_Cyg.eps}{0.3\textwidth}{(a)}
\fig{CSS081007.eps}{0.3\textwidth}{(b)}
\fig{LMC_2009.eps}{0.3\textwidth}{(c)}
}
\gridline{\fig{RS_Oph.eps}{0.3\textwidth}{(d)}
\fig{U_Sco.eps}{0.3\textwidth}{(e)}
}
\gridline{\fig{KT_Eri.eps}{0.3\textwidth}{(f)}}
\caption{Inverted pyramid figure of six individual files. The nova are
(a) V2491 Cyg, (b) HV Cet, (c) LMC 2009, (d) RS Oph, (e) U Sco, and
(f) KT Eri.\label{fig:pyramid}}
\end{figure}
\subsection{Figure sets}
A large collection of similar style figures should be grouped together as a
figure set. The derived PDF article will only shows an example figure
while the enhanced content is available in the figure set in the electronic
edition. The advantage of a figure set gives the reader the ability to
easily sort through the figure collection to find individual component
figures. All of the figure set components, along with their html framework,
are also available for download in a .tar.gz package.
Special \latex\ mark up is required to create a figure set. Prior to
\aastex\ v6.0 the underlying mark up commands had to be inserted by hand
but is now included. Note that when an article with figure set is compiled
in \latex\ none of the component figures are shown and a floating Figure
Set caption will appear in the resulting PDF.
\figsetstart
\figsetnum{4}
\figsettitle{Swift X-ray light curves}
\figsetgrpstart
\figsetgrpnum{1.1}
\figsetgrptitle{KT Eri}
\figsetplot{KT_Eri.eps}
\figsetgrpnote{The Swift/XRT X-ray light curve for the first year after
outburst.}
\figsetgrpend
\figsetgrpstart
\figsetgrpnum{1.2}
\figsetgrptitle{RS Oph}
\figsetplot{RS_Oph.eps}
\figsetgrpnote{The Swift/XRT X-ray light curve for the first year after
outburst.}
\figsetgrpend
\figsetgrpstart
\figsetgrpnum{1.3}
\figsetgrptitle{U Sco}
\figsetplot{U_Sco.eps}
\figsetgrpnote{The Swift/XRT X-ray light curve for the first year after
outburst.}
\figsetgrpend
\figsetgrpstart
\figsetgrpnum{1.4}
\figsetgrptitle{V2491 Cyg}
\figsetplot{V2491_Cyg.eps}
\figsetgrpnote{The Swift/XRT X-ray light curve for the first year after
outburst.}
\figsetgrpend
\figsetgrpstart
\figsetgrpnum{1.5}
\figsetgrptitle{Nova LMC 2009}
\figsetplot{LMC_2009.eps}
\figsetgrpnote{The Swift/XRT X-ray light curve for the first year after
outburst.}
\figsetgrpend
\figsetgrpstart
\figsetgrpnum{1.6}
\figsetgrptitle{HV Cet}
\figsetplot{CSS081007.eps}
\figsetgrpnote{The Swift/XRT X-ray light curve for the first year after
outburst.}
\figsetgrpend
\figsetgrpstart
\figsetgrpnum{1.7}
\figsetgrptitle{V2672 Oph}
\figsetplot{V2672_Oph.eps}
\figsetgrpnote{The Swift/XRT X-ray light curve for the first year after
outburst.}
\figsetgrpend
\figsetgrpstart
\figsetgrpnum{1.8}
\figsetgrptitle{V407 Cyg}
\figsetplot{V407_Cyg.eps}
\figsetgrpnote{The Swift/XRT X-ray light curve for the first year after
outburst.}
\figsetgrpend
\figsetend
\begin{figure}
\figurenum{4}
\plotone{KT_Eri.eps}
\caption{The Swift/XRT X-ray light curve for the first year after
outburst of the suspected recurrent nova KT Eri. At a maximum count rate of
328 ct/s, KT Eri was the brightest nova in X-rays observed to date. All
the component figures are available in the Figure Set. \label{fig:fig4}}
\end{figure}
Authors are encouraged to use an online tool at
%\url{http://authortools.aas.org/FIGSETS/make-figset.html} to generate their
own specific figure set mark up to incorporate into their \latex\ articles.
\subsection{Animations}
Authors may include animations in their articles. A single still frame from
the animation should be included as a regular figure to serve as an example.
The associated figure caption should indicate to the reader exactly what the
animation shows and that the animation is available online.
\begin{figure}
\figurenum{5}
\plotone{video3.eps}
\caption{Example image from the animation which is available in the electronic
edition.}
\end{figure}
\subsection{Interactive figures}
Interactive figures give the reader the ability to manipulate the
information contained in an image which can add clarity or help further the
author's narrative. These figures consist of two parts, the figure file in
a specific format and a javascript and html frame work that provides the
interactive control. An example of an interactive figure is a 3D model.
The underlying figure is a X3D file while x3dom.js is the javascript driver
that displays it. An author created interface is added via a html wrapper.
The first 3D model published by the AAS Journals using this technique was
\citet{2014ApJ...793..127V}. Authors should consult the online tutorials
for more information on how to construct their own interactive figures.
As with animations authors should include a non-interactive regular figure
to use as an example. The example figure should also indicate to the reader
that the enhanced figure is interactive and can be accessed online.
\section{Displaying mathematics} \label{sec:displaymath}
The most common mathematical symbols and formulas are in the amsmath
package. \aastex\ requires this package so there is no need to
specifically call for it in the document preamble. Most modern \latex\
distributions already contain this package. If you do not have this
package or the other required packages, revtex4-1, latexsym, graphicx,
amssymb, longtable, and epsf, they can be obtained from
\url{http://www.ctan.org}
Mathematics can be displayed either within the text, e.g. $E = mc^2$, or
separate from in an equation. In order to be properly rendered, all inline
math text has to be declared by surrounding the math by dollar signs (\$).
A complex equation example with inline math as part of the explanation
follows.
\begin{equation}
%\begin{displaymath}
\bar v(p_2,\sigma_2)P_{-\tau}\hat a_1\hat a_2\cdots
\hat a_nu(p_1,\sigma_1) ,
%\end{displaymath}
\end{equation}
where $p$ and $\sigma$ label the initial $e^{\pm}$ four-momenta
and helicities $(\sigma = \pm 1)$, $\hat a_i=a^\mu_i\gamma_\nu$
and $P_\tau=\frac{1}{2}(1+\tau\gamma_5)$ is a chirality projection
operator $(\tau = \pm1)$. This produces a single line formula. \latex\ will
auto-number this and any subsequent equations. If no number is desired then
the {\tt\string equation} call should be replaced with {\tt\string displaymath}.
\latex\ can also handle a a multi-line equation. Use {\tt\string eqnarray}
for more than one line and end each line with a
\textbackslash\textbackslash. Each line will be numbered unless the
\textbackslash\textbackslash\ is preceded by a {\tt\string\nonumber}
command. Alignment points can be added with ampersands (\&). There should be
two ampersands per line. In the examples they are centered on the equal
symbol.
\begin{eqnarray}
\gamma^\mu & = &
\left(
\begin{array}{cc}
0 & \sigma^\mu_+ \\
\sigma^\mu_- & 0
\end{array} \right) ,
\gamma^5= \left(
\begin{array}{cc}
-1 & 0\\
0 & 1
\end{array} \right) , \\
\sigma^\mu_{\pm} & = & ({\bf 1} ,\pm \sigma) ,
\end{eqnarray}
\begin{eqnarray}
\hat a & = & \left(
\begin{array}{cc}
0 & (\hat a)_+\\
(\hat a)_- & 0
\end{array}\right), \nonumber \\
(\hat a)_\pm & = & a_\mu\sigma^\mu_\pm
\end{eqnarray}
%% Putting eqnarrays or equations inside the mathletters environment groups
%% the enclosed equations by letter. For instance, the eqnarray below, instead
%% of being numbered, say, (4) and (5), would be numbered (4a) and (4b).
%% LaTeX the paper and look at the output to see the results.
\section{Revision tracking and color highlighting} \label{sec:highlight}
Authors sometimes use color to highlight changes to their manuscript in
response to editor and referee comments. In \aastex\ new commands
have been introduced to make this easier and formalize the process.
The first method is through a new set of editing mark up commands that
specifically identify what has been changed. These commands are
{\tt\string\added\{<text>\}}, {\tt\string\deleted\{<text>\}}, and
{\tt\string\replaced\{<old text>\}\{<replaced text>\}}. To activate these
commands the {\tt\string trackchanges} option must be used in the
{\tt\string\documentclass} call. When compiled this will produce the
marked text in red. The {\tt\string\explain\{<text>\}} can be used to add
text to provide information to the reader describing the change. Its
output is purple italic font. To see how {\tt\string\added\{<important
added info>\}}, {\tt\string\deleted\{<this can be deleted text>\}},
{\tt\string\replaced\{<old data>\}\{<replaced data>\}}, and \break
{\tt\string\explain\{<text explaining the change>\}} commands will produce
\added{important added information}\deleted{, deleted text, and }
\replaced{old data}{and replaced data,} toggle between versions compiled with
and without the {\tt\string trackchanges} option.\explain{text explaining
the change}
A summary list of all these tracking commands can be produced at the end of
the article by adding the {\tt\string\listofchanges} just before the
{\tt\string\end\{document\}} call. The page number for each change will be
provided. If the {\tt\string linenumbers} option is also included in the
documentcall call then not only will all the lines in the article be
numbered for handy reference but the summary list will also include the
line number for each change.
The second method does not have the ability to highlight the specific
nature of the changes but does allow the author to document changes over
multiple revisions. The commands are {\tt\string\edit1\{<text>\}},
{\tt\string\edit2\{<text>\}} and {\tt\string\edit3\{<text>\}} and they
produce {\tt\string<text>} that is highlighted in bold red, italic blue and
underlined purple, respectively. Authors should use the first command to
\edit1{indicated which text has been changed from the first revision.} The
second command is to highlight \edit2{new or modified text from a second
revision}. If a third revision is needed then the last command should be
used \edit3{to show this changed text}. Since over 90\% of all manuscripts
are accepted after the 3rd revision these commands make it easy to identify
what text has been added and when. Once the article is accepted all the
highlight color can be turned off simply by adding the
{\tt\string\turnoffediting} command in the preamble.
Similar to marking editing changes with the {\tt\string\edit} options there
is also the {\tt\string\authorcomments1\{<text>\}},\break
{\tt\string\authorcomments2\{<text>\}} and
{\tt\string\authorcomments3\{<text>\}} commands. These produce the same
bold red, italic blue and underlined purple text but when the
{\tt\string\turnoffediting} command is present the {\tt\string<text>}
material does not appear in the manuscript. Authors can use these commands
to mark up text that they are not sure should appear in the final
manuscript or as a way to communicate comments between co-authors when
writing the article.
\section{Software and third party data repository citations} \label{sec:cite}
The AAS Journals would like to encourage authors to change software and
third party data repository references from the current standard of a
footnote to a first class citation in the bibliography. As a bibliographic
citation these important references will be more easily captured and credit
will be given to the appropriate people.
The first step to making this happen is to have the data or software in
a long term repository that has made these items available via a persistent
identifier like a Digital Object Identifier (DOI). A list of repositories
that satisfy this criteria plus each one's pros and cons are given at \break
\url{https://github.com/AASJournals/Tutorials/tree/master/Repositories}.
In the bibliography the format for data or code follows this format: \\
\noindent author year, title, version, publisher, prefix:identifier\\
\citet{2015ApJ...805...23C} provides a example of how the citation in the
article references the external code at
\url{http://dx.doi.org/10.5281/zenodo.15991}. Unfortunately, bibtex does
not have specific bibtex entries for these types of references so the
``@misc'' type should be used. The Repository tutorial explains how to code
the ``@misc'' type correctly. The most recent apj.bst file, available with
\aastex\ v6.0, will output bibtex ``@misc'' type properly.
%% If you wish to include an acknowledgments section in your paper,
%% separate it off from the body of the text using the \acknowledgments
%% command.
\acknowledgments
We thank all the people that have made this AASTeX what it is today. This
includes but not limited to Bob Hanisch, Chris Biemesderfer, Lee Brotzman,
Pierre Landau, Arthur Ogawa, Maxim Markevitch, Alexey Vikhlinin and Amy
Hendrickson.
%% To help institutions obtain information on the effectiveness of their
%% telescopes the AAS Journals has created a group of keywords for telescope
%% facilities.
%% Following the acknowledgments section, use the following syntax and the
%% \facility{} macro to list the keywords of facilities used in the research
%% for the paper. Each keyword is check against the master list during
%% copy editing. Individual instruments can be provided in parentheses,
%% after the keyword, but they are not verified.
\vspace{5mm}
\facilities{HST(STIS), Swift(XRT and UVOT), AAVSO, CTIO:1.3m,
CTIO:1.5m,CXO}
\software{IRAF, cloudy, IDL}
%% Appendix material should be preceded with a single \appendix command.
%% There should be a \section command for each appendix. Mark appendix
%% subsections with the same markup you use in the main body of the paper.
%% Each Appendix (indicated with \section) will be lettered A, B, C, etc.
%% The equation counter will reset when it encounters the \appendix
%% command and will number appendix equations (A1), (A2), etc.
\appendix
\section{Appendix information}
Appendices can be broken into separate sections just like in the main text.
The only difference is that each appendix section is indexed by a letter
(A, B, C, etc.) instead of a number. Likewise numbered equations have
the section letter appended. Here is an equation as an example.
\begin{equation}
I = \frac{1}{1 + d_{1}^{P (1 + d_{2} )}}
\end{equation}
Appendix tables and figures should not be numbered like equations. Instead
they should continue the sequence from the main article body.
\section{Author publication charges} \label{sec:pubcharge}
Finally some information about the AAS Journal's publication charges.
In April 2011 the traditional way of calculating author charges based on
the number of printed pages was changed. The reason for the change
was due to a recognition of the growing number of article items that could not
be represented in print. Now author charges are determined by a number of
digital ``quanta''. A single quantum is 350 words, one figure, one table,
and one enhanced digital item. For the latter this includes machine readable
tables, figure sets, animations, and interactive figures. The current cost
is \$27 per word quantum and \$30 for all other quantum type.
%% The reference list follows the main body and any appendices.
%% Use LaTeX's thebibliography environment to mark up your reference list.
%% Note \begin{thebibliography} is followed by an empty set of
%% curly braces. If you forget this, LaTeX will generate the error
%% "Perhaps a missing \item?".
%%
%% thebibliography produces citations in the text using \bibitem-\cite
%% cross-referencing. Each reference is preceded by a
%% \bibitem command that defines in curly braces the KEY that corresponds
%% to the KEY in the \cite commands (see the first section above).
%% Make sure that you provide a unique KEY for every \bibitem or else the
%% paper will not LaTeX. The square brackets should contain
%% the citation text that LaTeX will insert in
%% place of the \cite commands.
%% We have used macros to produce journal name abbreviations.
%% \aastex provides a number of these for the more frequently-cited journals.
%% See the Author Guide for a list of them.
%% Note that the style of the \bibitem labels (in []) is slightly
%% different from previous examples. The natbib system solves a host
%% of citation expression problems, but it is necessary to clearly
%% delimit the year from the author name used in the citation.
%% See the natbib documentation for more details and options.
\begin{thebibliography}{}
\bibitem[Corrales(2015)]{2015ApJ...805...23C} Corrales, L.\ 2015, \apj, 805, 23
\bibitem[Hanisch \& Biemesderfer(1989)]{1989BAAS...21..780H} Hanisch, R.~J., \& Biemesderfer, C.~D.\ 1989, \baas, 21, 780
\bibitem[Lamport(1994)]{lamport94} Lamport, L. 1994, LaTeX: A Document Preparation System, 2nd Edition (Boston, Addison-Wesley Professional)
\bibitem[Schwarz et al.(2011)]{2011ApJS..197...31S} Schwarz, G.~J., Ness, J.-U., Osborne, J.~P., et al.\ 2011, \apjs, 197, 31
\bibitem[Vogt et al.(2014)]{2014ApJ...793..127V} Vogt, F.~P.~A., Dopita, M.~A., Kewley, L.~J., et al.\ 2014, \apj, 793, 127
\end{thebibliography}
%% This command is needed to show the entire author+affilation list when
%% the collaboration and author truncation commands are used. It has to
%% go at the end of the manuscript.
\allauthors
%% Include this line if you are using the \added, \replaced, \deleted
%% commands to see a summary list of all changes at the end of the article.
\listofchanges
\end{document}
%% End of file `sample.tex'.
|
theory Check
imports Submission
begin
theorem erase_correct: "\<lbrakk> (c,s) \<Rightarrow> s'; (erase l c,t) \<Rightarrow> t'; 0 \<turnstile> c; s = t (< l) \<rbrakk>
\<Longrightarrow> s' = t' (< l)"
by (rule Submission.erase_correct)
theorem well_initialized_commands: "(D A c B) \<Longrightarrow> (s = s' on A) \<Longrightarrow> ((c,s) \<Rightarrow> t) \<Longrightarrow> ((c,s') \<Rightarrow> t') \<Longrightarrow> t=t' on B"
by (rule Submission.well_initialized_commands)
end |
! Copyright (c) 2017, Lehrstuhl fuer Angewandte Mechanik, Technische
! Universitaet Muenchen.
!
! Distributed under BSD-3-Clause License. See LICENSE-File for more information
!
subroutine get_index_of_csr_data(i, j, indptr, indices, k, n, m)
! i, j are the indices of the global array
! indptr and indices are the arrays from the csr matrix
! k is the python-index of the data array in the csr matrix
! n is the dimension of the indptr-array
! m is the dimension of the indices array
! be careful, but the indexing is given in numpy arrays.
! So the entries containing indices start at zero!
implicit none
integer, intent(in) :: n, m
integer, intent(in) :: i, j, indptr(n), indices(m)
integer, intent(out) :: k
k = indptr(i+1)
do while( j /= indices(k+1) )
k = k + 1
if (k > indptr(i+2)) then
k = 0
exit
end if
end do
end subroutine
subroutine fill_csr_matrix(indptr, indices, vals, Mat, k_indices, N, M, o)
implicit none
! Mat: local element (stiffness) matrix
! k_indices: array of the indices mapping the local dofs to the global dofs
! N: Number of rows and columns in the CSR-Matrix
! M: Number of nonzero entries in the CSR-Matrix
! o: Number of rows or columns of the 'small' K-matrix, is also the number of the k_indices
integer, intent(in) :: N, M, o
real(8), intent(inout) :: vals(M)
real(8), intent(in) :: Mat(o, o)
integer, intent(in) :: indptr(N), indices(M), k_indices(o)
integer :: i, j, k
external :: get_index_of_csr_data
! loop over the indices of K
do i=0,o-1
do j=0,o-1
call get_index_of_csr_data(k_indices(i+1), k_indices(j+1), indptr, indices, k, N, M)
vals(k+1) = vals(k+1) + Mat(i+1, j+1)
end do
end do
end subroutine
|
% KSVD running file
% in this file a synthetic test of the K-SVD algorithm is performed. First,
% a random dictionary with normalized columns is being generated, and then
% a set of data signals, each as a linear combination of 3 dictionary
% element is created, with noise level of 20SNR. this set is given as input
% to the K-SVD algorithm.
% a different mode for activating the K-SVD algorithm is until a fixed
% error is reached in the Sparse coding stage, instead until a fixed number of coefficients is found
% (it was used by us for the
% denoising experiments). in order to switch between those two modes just
% change the param.errorFlag (0 - for fixed number of coefficients, 1 -
% until a certain error is reached).
param.L = 3; % number of elements in each linear combination.
param.K = 50; % number of dictionary elements
param.numIteration = 50; % number of iteration to execute the K-SVD algorithm.
param.errorFlag = 0; % decompose signals until a certain error is reached. do not use fix number of coefficients.
%param.errorGoal = sigma;
param.preserveDCAtom = 0;
%%%%%%% creating the data to train on %%%%%%%%
N = 1500; % number of signals to generate
n = 20; % dimension of each data
SNRdB = 20; % level of noise to be added
[param.TrueDictionary, D, x] = gererateSyntheticDictionaryAndData(N, param.L, n, param.K, SNRdB);
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%%%%%%%% initial dictionary: Dictionary elements %%%%%%%%
param.InitializationMethod = 'DataElements';
param.displayProgress = 1;
disp('Starting to train the dictionary');
[Dictionary,output] = KSVD(D,param);
disp(['The KSVD algorithm retrived ',num2str(output.ratio(end)),' atoms from the original dictionary']);
[Dictionary,output] = MOD(D,param);
disp(['The MOD algorithm retrived ',num2str(output.ratio(end)),' atoms from the original dictionary']);
|
/**
* $Id$
*
* Copyright (C)
* 2013 - $Date$
* Martin Wolf <[email protected]>
*
* \file boost/numpy/dstream/detail/loop_service.hpp
* \version $Revision$
* \date $Date$
* \author Martin Wolf <[email protected]>
*
* \brief This file defines the loop_service_arity<N>::loop_service template
* that provides functionalities for iterating over the loop dimensions
* of a set of input arrays.
*
* This file is distributed under the Boost Software License,
* Version 1.0. (See accompanying file LICENSE_1_0.txt or copy at
* http://www.boost.org/LICENSE_1_0.txt).
*/
#if !defined(BOOST_PP_IS_ITERATING)
#ifndef BOOST_NUMPY_DSTREAM_DETAIL_LOOP_SERVICE_HPP_INCLUDED
#define BOOST_NUMPY_DSTREAM_DETAIL_LOOP_SERVICE_HPP_INCLUDED
#include <algorithm>
#include <set>
#include <sstream>
#include <vector>
#include <boost/preprocessor/cat.hpp>
#include <boost/preprocessor/control/if.hpp>
#include <boost/preprocessor/facilities/intercept.hpp>
#include <boost/preprocessor/iterate.hpp>
#include <boost/preprocessor/repetition/enum_params.hpp>
#include <boost/preprocessor/repetition/enum_binary_params.hpp>
#include <boost/preprocessor/repetition/repeat.hpp>
#include <boost/numpy/detail/logging.hpp>
#include <boost/numpy/detail/max.hpp>
#include <boost/numpy/dstream/detail/input_array_service.hpp>
namespace boost {
namespace numpy {
namespace dstream {
namespace detail {
struct max_loop_shape_selector
{
max_loop_shape_selector(std::vector<intptr_t> const & arr_shape, int const arr_loop_nd)
: arr_shape_(arr_shape)
, arr_loop_nd_(arr_loop_nd)
{}
std::vector<intptr_t>
get_arr_loop_shape() const
{
std::vector<intptr_t> arr_loop_shape(arr_loop_nd_);
std::copy(arr_shape_.begin(), arr_shape_.begin() + arr_loop_nd_, arr_loop_shape.begin());
return arr_loop_shape;
}
std::vector<intptr_t> const & arr_shape_;
int const arr_loop_nd_;
};
inline
bool operator>(max_loop_shape_selector const & lhs, max_loop_shape_selector const & rhs)
{
return (lhs.arr_loop_nd_ > rhs.arr_loop_nd_);
}
template <int Arity>
struct loop_service_arity;
#define BOOST_PP_ITERATION_PARAMS_1 \
(3, (1, BOOST_NUMPY_LIMIT_INPUT_AND_OUTPUT_ARITY, <boost/numpy/dstream/detail/loop_service.hpp>))
#include BOOST_PP_ITERATE()
}// namespace detail
}// namespace dstream
}// namespace numpy
}// namespace boost
#endif // ! BOOST_NUMPY_DSTREAM_DETAIL_LOOP_SERVICE_HPP_INCLUDED
#else
#define N BOOST_PP_ITERATION()
template <>
struct loop_service_arity<N>
{
BOOST_STATIC_CONSTANT(int, arity = N);
template <BOOST_PP_ENUM_PARAMS_Z(1, N, class InArrDef)>
class loop_service
{
public:
//----------------------------------------------------------------------
// Define a boost::mpl::bool_ type specifying if any input array is an
// an object array. This information could be needed to set iterator
// flags correctly.
// Note: By default, boost::mpl::or_ has only a maximal arity of 5, so
// we have to construct a nested sequence of boost::mpl::or_<.,.>
// with always, two arguments.
#define BOOST_NUMPY_DEF_pre_or(z, n, data) \
typename boost::mpl::or_<
#define BOOST_NUMPY_DEF_arr_dtype_is_bp_object(n) \
boost::is_same<typename BOOST_PP_CAT(InArrDef,n)::value_type, python::object>
#define BOOST_NUMPY_DEF_post_or(z, n, data) \
BOOST_PP_COMMA() BOOST_NUMPY_DEF_arr_dtype_is_bp_object(BOOST_PP_ADD(n,1)) >::type
typedef BOOST_PP_REPEAT(BOOST_PP_SUB(N,1), BOOST_NUMPY_DEF_pre_or, ~)
BOOST_NUMPY_DEF_arr_dtype_is_bp_object(0)
BOOST_PP_REPEAT(BOOST_PP_SUB(N,1), BOOST_NUMPY_DEF_post_or, ~)
object_arrays_are_involved;
#undef BOOST_NUMPY_DEF_post_or
#undef BOOST_NUMPY_DEF_arr_dtype_is_bp_object
#undef BOOST_NUMPY_DEF_pre_or
//----------------------------------------------------------------------
#define BOOST_NUMPY_DEF(z, n, data) \
BOOST_PP_COMMA_IF(n) BOOST_PP_CAT(_in_arr_service_,n) ( BOOST_PP_CAT(in_arr_service_,n) )
loop_service( BOOST_PP_ENUM_BINARY_PARAMS_Z(1, N, input_array_service< InArrDef, > & in_arr_service_) )
: _is_virtual_loop(false)
, BOOST_PP_REPEAT(N, BOOST_NUMPY_DEF, ~)
#undef BOOST_NUMPY_DEF
{
// Calculate the loop shape. It's just the biggest loop shape of all
// individual input array loop shapes.
#define BOOST_NUMPY_DEF(z, n, data) \
BOOST_PP_COMMA_IF(n) max_loop_shape_selector( \
BOOST_PP_CAT(_in_arr_service_,n).get_arr_shape() \
, BOOST_PP_CAT(_in_arr_service_,n).get_arr_loop_nd() )
_loop_shape = boost::numpy::detail::max(
BOOST_PP_REPEAT(N, BOOST_NUMPY_DEF, ~)
).get_arr_loop_shape();
#undef BOOST_NUMPY_DEF
// Make sure, that the loop shape as at least 1 dimension with one
// iteration.
if(_loop_shape.size() == 0)
{
BOOST_NUMPY_LOG("Do a virtual loop")
_is_virtual_loop = true;
_loop_shape.push_back(1);
#define BOOST_NUMPY_DEF(z, n, data) \
BOOST_PP_CAT(in_arr_service_,n).prepend_loop_dimension();
BOOST_PP_REPEAT(N, BOOST_NUMPY_DEF, ~)
#undef BOOST_NUMPY_DEF
}
// Set the broadcasting rules for all input arrays.
#define BOOST_NUMPY_DEF(z, n, data) \
BOOST_PP_CAT(_in_arr_service_,n) .set_arr_bcr(get_loop_nd());
BOOST_PP_REPEAT(N, BOOST_NUMPY_DEF, ~)
#undef BOOST_NUMPY_DEF
// Check if the lengths of all the core dimensions of all the
// input arrays are compatible to each other.
// 1. Get a unique set of all used dimension ids.
std::set<int> ids;
#define BOOST_NUMPY_DEF(z, n, data) \
std::vector<int> const & BOOST_PP_CAT(in_arr_core_shape_ids_,n) = BOOST_PP_CAT(in_arr_service_,n).get_arr_core_shape_ids();\
ids.insert( BOOST_PP_CAT(in_arr_core_shape_ids_,n).begin(), BOOST_PP_CAT(in_arr_core_shape_ids_,n).end());
BOOST_PP_REPEAT(N, BOOST_NUMPY_DEF, ~)
#undef BOOST_NUMPY_DEF
// 2. Loop through the dimension ids and for each id, collect the
// list of dimension lengths of all input arrays.
std::set<int>::const_iterator it;
std::set<int>::const_iterator const ids_end = ids.end();
for(it=ids.begin(); it!=ids_end; ++it)
{
int const id = *it;
if(id <= 0)
{
#define BOOST_NUMPY_DEF(z, n, data) \
intptr_t const BOOST_PP_CAT(len_,n) = BOOST_PP_CAT(_in_arr_service_,n).get_core_dim_len(id);
BOOST_PP_REPEAT(N, BOOST_NUMPY_DEF, ~)
#undef BOOST_NUMPY_DEF
// 3. Get the maximum of all dimension lengths.
intptr_t const max_len = numpy::detail::max(BOOST_PP_ENUM_PARAMS_Z(1, N, len_));
// 4. Compare if all dimension length are equal to this
// maximal value or are of size 0 (i.e. not defined for
// an array), or 1 (i.e. broadcast-able).
#define BOOST_NUMPY_DEF(z, n, data) \
BOOST_PP_IF(n, &&, ) (BOOST_PP_CAT(len_,n) == max_len || BOOST_PP_CAT(len_,n) == 0 || BOOST_PP_CAT(len_,n) == 1)
if( ! ( BOOST_PP_REPEAT(N, BOOST_NUMPY_DEF, ~) ) )
#undef BOOST_NUMPY_DEF
{
std::stringstream msg;
msg << "One of the variable sized array dimensions has "
<< "the wrong length! It must be of length 1 or "
<< max_len << "!";
PyErr_SetString(PyExc_ValueError, msg.str().c_str());
python::throw_error_already_set();
}
}
}
// TODO: Check if the loop dimension length of all the input arrays
// are compatible to each other. This is just to prevend an
// cryptic iterator error message to the user.
}
inline
int
get_loop_nd() const
{
return _loop_shape.size();
}
inline
std::vector<intptr_t>
get_loop_shape() const
{
return _loop_shape;
}
intptr_t *
get_loop_shape_data()
{
return &(_loop_shape.front());
}
intptr_t const * const
get_loop_shape_data() const
{
return &(_loop_shape.front());
}
intptr_t
get_loop_size() const
{
size_t const loop_nd = _loop_shape.size();
intptr_t loop_size = (loop_nd == 0 ? 0 : 1);
for(size_t i=0; i<loop_nd; ++i)
{
loop_size *= _loop_shape[i];
}
return loop_size;
}
/**
* \brief Returns the maximum length of the core dimension that has the
* given id. All input arrays are searched and the maximum number
* is returned.
* If the given dimension id is not found for all the input array,
* 0 will be returned.
*/
intptr_t
get_core_dim_len(int const id) const
{
return boost::numpy::detail::max(
BOOST_PP_ENUM_BINARY_PARAMS_Z(1, N, _in_arr_service_, .get_core_dim_len(id) BOOST_PP_INTERCEPT)
);
}
inline
bool
is_virtual_loop() const
{
return _is_virtual_loop;
}
protected:
std::vector<intptr_t> _loop_shape;
bool _is_virtual_loop;
#define BOOST_NUMPY_DEF(z, n, data) \
input_array_service< BOOST_PP_CAT(InArrDef,n) > & BOOST_PP_CAT(_in_arr_service_,n) ;
BOOST_PP_REPEAT(N, BOOST_NUMPY_DEF, ~)
#undef BOOST_NUMPY_DEF
};
};
#undef N
#endif // BOOST_PP_IS_ITERATING
|
<!-- dom:TITLE: Week 42 Solving differential equations and Convolutional (CNN) -->
# Week 42 Solving differential equations and Convolutional (CNN)
<!-- dom:AUTHOR: Morten Hjorth-Jensen at Department of Physics, University of Oslo & Department of Physics and Astronomy and National Superconducting Cyclotron Laboratory, Michigan State University -->
<!-- Author: -->
**Morten Hjorth-Jensen**, Department of Physics, University of Oslo and Department of Physics and Astronomy and National Superconducting Cyclotron Laboratory, Michigan State University
Date: **Oct 22, 2021**
Copyright 1999-2021, Morten Hjorth-Jensen. Released under CC Attribution-NonCommercial 4.0 license
## Plan for week 42
* Thursday: Solving differential equations with Neural Networks and start Convolutional Neural Networks and examples.
* [Video of Lecture](https://www.uio.no/studier/emner/matnat/fys/FYS-STK3155/h21/forelesningsvideoer/LectureOctober21.mp4?vrtx=view-as-webpage)
* Friday: Convolutional Neural Networks.
* [Video of Lecture](https://www.uio.no/studier/emner/matnat/fys/FYS-STK4155/h21/forelesningsvideoer/LectureOctober22.mp4?vrtx=view-as-webpage)
* Reading recommendations:
a. See lecture notes for week 42 at <https://compphysics.github.io/MachineLearning/doc/web/course.html.>
b. For neural networks we recommend Goodfellow et al chapters 6 and 7. For CNNs, see Goodfellow et al chapter 9. See also chapter 11 and 12 on practicalities and applications
c. Reading suggestions for implementation of CNNs: [Aurelien Geron's chapter 13](https://github.com/CompPhysics/MachineLearning/blob/master/doc/Textbooks/TensorflowML.pdf).
**Excellent lectures on CNNs.**
* [Video on Convolutional Neural Networks from MIT](https://www.youtube.com/watch?v=iaSUYvmCekI&ab_channel=AlexanderAmini)
* [Video on CNNs from Stanford](https://www.youtube.com/watch?v=bNb2fEVKeEo&list=PLC1qU-LWwrF64f4QKQT-Vg5Wr4qEE1Zxk&index=6&ab_channel=StanfordUniversitySchoolofEngineering)
**And Lecture material on CNNs.**
* [Lectures from IN5400 spring 2019](https://www.uio.no/studier/emner/matnat/ifi/IN5400/v19/material/week5/in5400_2019_week5_convolutional_nerual_networks.pdf)
* [Lectures from IN5400 spring 2021](https://www.uio.no/studier/emner/matnat/ifi/IN5400/v21/lecture-slides/in5400_2021_w5_lecture_convolutions.pdf)
* [See also Michael Nielsen's Lectures](http://neuralnetworksanddeeplearning.com/chap6.html)
## Using Automatic differentiation
In our discussions of ordinary differential equations
we will also study the usage of [Autograd](https://www.youtube.com/watch?v=fRf4l5qaX1M&ab_channel=AlexSmola) in computing gradients for deep learning. For the documentation of Autograd and examples see the lectures slides from [week 40](https://compphysics.github.io/MachineLearning/doc/pub/week40/html/week40.html) and the [Autograd documentation](https://github.com/HIPS/autograd).
## Solving ODEs with Deep Learning
The Universal Approximation Theorem states that a neural network can
approximate any function at a single hidden layer along with one input
and output layer to any given precision.
**Book on solving differential equations with ML methods.**
[An Introduction to Neural Network Methods for Differential Equations](https://www.springer.com/gp/book/9789401798150), by Yadav and Kumar.
## Ordinary Differential Equations
An ordinary differential equation (ODE) is an equation involving functions having one variable.
In general, an ordinary differential equation looks like
<!-- Equation labels as ordinary links -->
<div id="ode"></div>
$$
\begin{equation} \label{ode} \tag{1}
f\left(x, \, g(x), \, g'(x), \, g''(x), \, \dots \, , \, g^{(n)}(x)\right) = 0
\end{equation}
$$
where $g(x)$ is the function to find, and $g^{(n)}(x)$ is the $n$-th derivative of $g(x)$.
The $f\left(x, g(x), g'(x), g''(x), \, \dots \, , g^{(n)}(x)\right)$ is just a way to write that there is an expression involving $x$ and $g(x), \ g'(x), \ g''(x), \, \dots \, , \text{ and } g^{(n)}(x)$ on the left side of the equality sign in ([1](#ode)).
The highest order of derivative, that is the value of $n$, determines to the order of the equation.
The equation is referred to as a $n$-th order ODE.
Along with ([1](#ode)), some additional conditions of the function $g(x)$ are typically given
for the solution to be unique.
## The trial solution
Let the trial solution $g_t(x)$ be
<!-- Equation labels as ordinary links -->
<div id="_auto1"></div>
$$
\begin{equation}
g_t(x) = h_1(x) + h_2(x,N(x,P))
\label{_auto1} \tag{2}
\end{equation}
$$
where $h_1(x)$ is a function that makes $g_t(x)$ satisfy a given set
of conditions, $N(x,P)$ a neural network with weights and biases
described by $P$ and $h_2(x, N(x,P))$ some expression involving the
neural network. The role of the function $h_2(x, N(x,P))$, is to
ensure that the output from $N(x,P)$ is zero when $g_t(x)$ is
evaluated at the values of $x$ where the given conditions must be
satisfied. The function $h_1(x)$ should alone make $g_t(x)$ satisfy
the conditions.
But what about the network $N(x,P)$?
As described previously, an optimization method could be used to minimize the parameters of a neural network, that being its weights and biases, through backward propagation.
## Minimization process
For the minimization to be defined, we need to have a cost function at hand to minimize.
It is given that $f\left(x, \, g(x), \, g'(x), \, g''(x), \, \dots \, , \, g^{(n)}(x)\right)$ should be equal to zero in ([1](#ode)).
We can choose to consider the mean squared error as the cost function for an input $x$.
Since we are looking at one input, the cost function is just $f$ squared.
The cost function $c\left(x, P \right)$ can therefore be expressed as
$$
C\left(x, P\right) = \big(f\left(x, \, g(x), \, g'(x), \, g''(x), \, \dots \, , \, g^{(n)}(x)\right)\big)^2
$$
If $N$ inputs are given as a vector $\boldsymbol{x}$ with elements $x_i$ for $i = 1,\dots,N$,
the cost function becomes
<!-- Equation labels as ordinary links -->
<div id="cost"></div>
$$
\begin{equation} \label{cost} \tag{3}
C\left(\boldsymbol{x}, P\right) = \frac{1}{N} \sum_{i=1}^N \big(f\left(x_i, \, g(x_i), \, g'(x_i), \, g''(x_i), \, \dots \, , \, g^{(n)}(x_i)\right)\big)^2
\end{equation}
$$
The neural net should then find the parameters $P$ that minimizes the cost function in
([3](#cost)) for a set of $N$ training samples $x_i$.
## Minimizing the cost function using gradient descent and automatic differentiation
To perform the minimization using gradient descent, the gradient of $C\left(\boldsymbol{x}, P\right)$ is needed.
It might happen so that finding an analytical expression of the gradient of $C(\boldsymbol{x}, P)$ from ([3](#cost)) gets too messy, depending on which cost function one desires to use.
Luckily, there exists libraries that makes the job for us through automatic differentiation.
Automatic differentiation is a method of finding the derivatives numerically with very high precision.
## Example: Exponential decay
An exponential decay of a quantity $g(x)$ is described by the equation
<!-- Equation labels as ordinary links -->
<div id="solve_expdec"></div>
$$
\begin{equation} \label{solve_expdec} \tag{4}
g'(x) = -\gamma g(x)
\end{equation}
$$
with $g(0) = g_0$ for some chosen initial value $g_0$.
The analytical solution of ([4](#solve_expdec)) is
<!-- Equation labels as ordinary links -->
<div id="_auto2"></div>
$$
\begin{equation}
g(x) = g_0 \exp\left(-\gamma x\right)
\label{_auto2} \tag{5}
\end{equation}
$$
Having an analytical solution at hand, it is possible to use it to compare how well a neural network finds a solution of ([4](#solve_expdec)).
## The function to solve for
The program will use a neural network to solve
<!-- Equation labels as ordinary links -->
<div id="solveode"></div>
$$
\begin{equation} \label{solveode} \tag{6}
g'(x) = -\gamma g(x)
\end{equation}
$$
where $g(0) = g_0$ with $\gamma$ and $g_0$ being some chosen values.
In this example, $\gamma = 2$ and $g_0 = 10$.
## The trial solution
To begin with, a trial solution $g_t(t)$ must be chosen. A general trial solution for ordinary differential equations could be
$$
g_t(x, P) = h_1(x) + h_2(x, N(x, P))
$$
with $h_1(x)$ ensuring that $g_t(x)$ satisfies some conditions and $h_2(x,N(x, P))$ an expression involving $x$ and the output from the neural network $N(x,P)$ with $P $ being the collection of the weights and biases for each layer. For now, it is assumed that the network consists of one input layer, one hidden layer, and one output layer.
## Setup of Network
In this network, there are no weights and bias at the input layer, so $P = \{ P_{\text{hidden}}, P_{\text{output}} \}$.
If there are $N_{\text{hidden} }$ neurons in the hidden layer, then $P_{\text{hidden}}$ is a $N_{\text{hidden} } \times (1 + N_{\text{input}})$ matrix, given that there are $N_{\text{input}}$ neurons in the input layer.
The first column in $P_{\text{hidden} }$ represents the bias for each neuron in the hidden layer and the second column represents the weights for each neuron in the hidden layer from the input layer.
If there are $N_{\text{output} }$ neurons in the output layer, then $P_{\text{output}} $ is a $N_{\text{output} } \times (1 + N_{\text{hidden} })$ matrix.
Its first column represents the bias of each neuron and the remaining columns represents the weights to each neuron.
It is given that $g(0) = g_0$. The trial solution must fulfill this condition to be a proper solution of ([6](#solveode)). A possible way to ensure that $g_t(0, P) = g_0$, is to let $F(N(x,P)) = x \cdot N(x,P)$ and $A(x) = g_0$. This gives the following trial solution:
<!-- Equation labels as ordinary links -->
<div id="trial"></div>
$$
\begin{equation} \label{trial} \tag{7}
g_t(x, P) = g_0 + x \cdot N(x, P)
\end{equation}
$$
## Reformulating the problem
We wish that our neural network manages to minimize a given cost function.
A reformulation of out equation, ([6](#solveode)), must therefore be done,
such that it describes the problem a neural network can solve for.
The neural network must find the set of weights and biases $P$ such that the trial solution in ([7](#trial)) satisfies ([6](#solveode)).
The trial solution
$$
g_t(x, P) = g_0 + x \cdot N(x, P)
$$
has been chosen such that it already solves the condition $g(0) = g_0$. What remains, is to find $P$ such that
<!-- Equation labels as ordinary links -->
<div id="nnmin"></div>
$$
\begin{equation} \label{nnmin} \tag{8}
g_t'(x, P) = - \gamma g_t(x, P)
\end{equation}
$$
is fulfilled as *best as possible*.
## More technicalities
The left hand side and right hand side of ([8](#nnmin)) must be computed separately, and then the neural network must choose weights and biases, contained in $P$, such that the sides are equal as best as possible.
This means that the absolute or squared difference between the sides must be as close to zero, ideally equal to zero.
In this case, the difference squared shows to be an appropriate measurement of how erroneous the trial solution is with respect to $P$ of the neural network.
This gives the following cost function our neural network must solve for:
$$
\min_{P}\Big\{ \big(g_t'(x, P) - ( -\gamma g_t(x, P) \big)^2 \Big\}
$$
(the notation $\min_{P}\{ f(x, P) \}$ means that we desire to find $P$ that yields the minimum of $f(x, P)$)
or, in terms of weights and biases for the hidden and output layer in our network:
$$
\min_{P_{\text{hidden} }, \ P_{\text{output} }}\Big\{ \big(g_t'(x, \{ P_{\text{hidden} }, P_{\text{output} }\}) - ( -\gamma g_t(x, \{ P_{\text{hidden} }, P_{\text{output} }\}) \big)^2 \Big\}
$$
for an input value $x$.
## More details
If the neural network evaluates $g_t(x, P)$ at more values for $x$, say $N$ values $x_i$ for $i = 1, \dots, N$, then the *total* error to minimize becomes
<!-- Equation labels as ordinary links -->
<div id="min"></div>
$$
\begin{equation} \label{min} \tag{9}
\min_{P}\Big\{\frac{1}{N} \sum_{i=1}^N \big(g_t'(x_i, P) - ( -\gamma g_t(x_i, P) \big)^2 \Big\}
\end{equation}
$$
Letting $\boldsymbol{x}$ be a vector with elements $x_i$ and $C(\boldsymbol{x}, P) = \frac{1}{N} \sum_i \big(g_t'(x_i, P) - ( -\gamma g_t(x_i, P) \big)^2$ denote the cost function, the minimization problem that our network must solve, becomes
$$
\min_{P} C(\boldsymbol{x}, P)
$$
In terms of $P_{\text{hidden} }$ and $P_{\text{output} }$, this could also be expressed as
$$
\min_{P_{\text{hidden} }, \ P_{\text{output} }} C(\boldsymbol{x}, \{P_{\text{hidden} }, P_{\text{output} }\})
$$
## A possible implementation of a neural network
For simplicity, it is assumed that the input is an array $\boldsymbol{x} = (x_1, \dots, x_N)$ with $N$ elements. It is at these points the neural network should find $P$ such that it fulfills ([9](#min)).
First, the neural network must feed forward the inputs.
This means that $\boldsymbol{x}s$ must be passed through an input layer, a hidden layer and a output layer. The input layer in this case, does not need to process the data any further.
The input layer will consist of $N_{\text{input} }$ neurons, passing its element to each neuron in the hidden layer. The number of neurons in the hidden layer will be $N_{\text{hidden} }$.
## Technicalities
For the $i$-th in the hidden layer with weight $w_i^{\text{hidden} }$ and bias $b_i^{\text{hidden} }$, the weighting from the $j$-th neuron at the input layer is:
$$
\begin{aligned}
z_{i,j}^{\text{hidden}} &= b_i^{\text{hidden}} + w_i^{\text{hidden}}x_j \\
&=
\begin{pmatrix}
b_i^{\text{hidden}} & w_i^{\text{hidden}}
\end{pmatrix}
\begin{pmatrix}
1 \\
x_j
\end{pmatrix}
\end{aligned}
$$
## Final technicalities I
The result after weighting the inputs at the $i$-th hidden neuron can be written as a vector:
$$
\begin{aligned}
\boldsymbol{z}_{i}^{\text{hidden}} &= \Big( b_i^{\text{hidden}} + w_i^{\text{hidden}}x_1 , \ b_i^{\text{hidden}} + w_i^{\text{hidden}} x_2, \ \dots \, , \ b_i^{\text{hidden}} + w_i^{\text{hidden}} x_N\Big) \\
&=
\begin{pmatrix}
b_i^{\text{hidden}} & w_i^{\text{hidden}}
\end{pmatrix}
\begin{pmatrix}
1 & 1 & \dots & 1 \\
x_1 & x_2 & \dots & x_N
\end{pmatrix} \\
&= \boldsymbol{p}_{i, \text{hidden}}^T X
\end{aligned}
$$
## Final technicalities II
The vector $\boldsymbol{p}_{i, \text{hidden}}^T$ constitutes each row in $P_{\text{hidden} }$, which contains the weights for the neural network to minimize according to ([9](#min)).
After having found $\boldsymbol{z}_{i}^{\text{hidden}} $ for every $i$-th neuron within the hidden layer, the vector will be sent to an activation function $a_i(\boldsymbol{z})$.
In this example, the sigmoid function has been chosen to be the activation function for each hidden neuron:
$$
f(z) = \frac{1}{1 + \exp{(-z)}}
$$
It is possible to use other activations functions for the hidden layer also.
The output $\boldsymbol{x}_i^{\text{hidden}}$ from each $i$-th hidden neuron is:
$$
\boldsymbol{x}_i^{\text{hidden} } = f\big( \boldsymbol{z}_{i}^{\text{hidden}} \big)
$$
The outputs $\boldsymbol{x}_i^{\text{hidden} } $ are then sent to the output layer.
The output layer consists of one neuron in this case, and combines the
output from each of the neurons in the hidden layers. The output layer
combines the results from the hidden layer using some weights $w_i^{\text{output}}$
and biases $b_i^{\text{output}}$. In this case,
it is assumes that the number of neurons in the output layer is one.
## Final technicalities III
The procedure of weighting the output neuron $j$ in the hidden layer to the $i$-th neuron in the output layer is similar as for the hidden layer described previously.
$$
\begin{aligned}
z_{1,j}^{\text{output}} & =
\begin{pmatrix}
b_1^{\text{output}} & \boldsymbol{w}_1^{\text{output}}
\end{pmatrix}
\begin{pmatrix}
1 \\
\boldsymbol{x}_j^{\text{hidden}}
\end{pmatrix}
\end{aligned}
$$
## Final technicalities IV
Expressing $z_{1,j}^{\text{output}}$ as a vector gives the following way of weighting the inputs from the hidden layer:
$$
\boldsymbol{z}_{1}^{\text{output}} =
\begin{pmatrix}
b_1^{\text{output}} & \boldsymbol{w}_1^{\text{output}}
\end{pmatrix}
\begin{pmatrix}
1 & 1 & \dots & 1 \\
\boldsymbol{x}_1^{\text{hidden}} & \boldsymbol{x}_2^{\text{hidden}} & \dots & \boldsymbol{x}_N^{\text{hidden}}
\end{pmatrix}
$$
In this case we seek a continuous range of values since we are approximating a function. This means that after computing $\boldsymbol{z}_{1}^{\text{output}}$ the neural network has finished its feed forward step, and $\boldsymbol{z}_{1}^{\text{output}}$ is the final output of the network.
## Back propagation
The next step is to decide how the parameters should be changed such that they minimize the cost function.
The chosen cost function for this problem is
$$
C(\boldsymbol{x}, P) = \frac{1}{N} \sum_i \big(g_t'(x_i, P) - ( -\gamma g_t(x_i, P) \big)^2
$$
In order to minimize the cost function, an optimization method must be chosen.
Here, gradient descent with a constant step size has been chosen.
## Gradient descent
The idea of the gradient descent algorithm is to update parameters in
a direction where the cost function decreases goes to a minimum.
In general, the update of some parameters $\boldsymbol{\omega}$ given a cost
function defined by some weights $\boldsymbol{\omega}$, $C(\boldsymbol{x},
\boldsymbol{\omega})$, goes as follows:
$$
\boldsymbol{\omega}_{\text{new} } = \boldsymbol{\omega} - \lambda \nabla_{\boldsymbol{\omega}} C(\boldsymbol{x}, \boldsymbol{\omega})
$$
for a number of iterations or until $ \big|\big| \boldsymbol{\omega}_{\text{new} } - \boldsymbol{\omega} \big|\big|$ becomes smaller than some given tolerance.
The value of $\lambda$ decides how large steps the algorithm must take
in the direction of $ \nabla_{\boldsymbol{\omega}} C(\boldsymbol{x}, \boldsymbol{\omega})$.
The notation $\nabla_{\boldsymbol{\omega}}$ express the gradient with respect
to the elements in $\boldsymbol{\omega}$.
In our case, we have to minimize the cost function $C(\boldsymbol{x}, P)$ with
respect to the two sets of weights and biases, that is for the hidden
layer $P_{\text{hidden} }$ and for the output layer $P_{\text{output}
}$ .
This means that $P_{\text{hidden} }$ and $P_{\text{output} }$ is updated by
$$
\begin{aligned}
P_{\text{hidden},\text{new}} &= P_{\text{hidden}} - \lambda \nabla_{P_{\text{hidden}}} C(\boldsymbol{x}, P) \\
P_{\text{output},\text{new}} &= P_{\text{output}} - \lambda \nabla_{P_{\text{output}}} C(\boldsymbol{x}, P)
\end{aligned}
$$
## The code for solving the ODE
```python
%matplotlib inline
import autograd.numpy as np
from autograd import grad, elementwise_grad
import autograd.numpy.random as npr
from matplotlib import pyplot as plt
def sigmoid(z):
return 1/(1 + np.exp(-z))
# Assuming one input, hidden, and output layer
def neural_network(params, x):
# Find the weights (including and biases) for the hidden and output layer.
# Assume that params is a list of parameters for each layer.
# The biases are the first element for each array in params,
# and the weights are the remaning elements in each array in params.
w_hidden = params[0]
w_output = params[1]
# Assumes input x being an one-dimensional array
num_values = np.size(x)
x = x.reshape(-1, num_values)
# Assume that the input layer does nothing to the input x
x_input = x
## Hidden layer:
# Add a row of ones to include bias
x_input = np.concatenate((np.ones((1,num_values)), x_input ), axis = 0)
z_hidden = np.matmul(w_hidden, x_input)
x_hidden = sigmoid(z_hidden)
## Output layer:
# Include bias:
x_hidden = np.concatenate((np.ones((1,num_values)), x_hidden ), axis = 0)
z_output = np.matmul(w_output, x_hidden)
x_output = z_output
return x_output
# The trial solution using the deep neural network:
def g_trial(x,params, g0 = 10):
return g0 + x*neural_network(params,x)
# The right side of the ODE:
def g(x, g_trial, gamma = 2):
return -gamma*g_trial
# The cost function:
def cost_function(P, x):
# Evaluate the trial function with the current parameters P
g_t = g_trial(x,P)
# Find the derivative w.r.t x of the neural network
d_net_out = elementwise_grad(neural_network,1)(P,x)
# Find the derivative w.r.t x of the trial function
d_g_t = elementwise_grad(g_trial,0)(x,P)
# The right side of the ODE
func = g(x, g_t)
err_sqr = (d_g_t - func)**2
cost_sum = np.sum(err_sqr)
return cost_sum / np.size(err_sqr)
# Solve the exponential decay ODE using neural network with one input, hidden, and output layer
def solve_ode_neural_network(x, num_neurons_hidden, num_iter, lmb):
## Set up initial weights and biases
# For the hidden layer
p0 = npr.randn(num_neurons_hidden, 2 )
# For the output layer
p1 = npr.randn(1, num_neurons_hidden + 1 ) # +1 since bias is included
P = [p0, p1]
print('Initial cost: %g'%cost_function(P, x))
## Start finding the optimal weights using gradient descent
# Find the Python function that represents the gradient of the cost function
# w.r.t the 0-th input argument -- that is the weights and biases in the hidden and output layer
cost_function_grad = grad(cost_function,0)
# Let the update be done num_iter times
for i in range(num_iter):
# Evaluate the gradient at the current weights and biases in P.
# The cost_grad consist now of two arrays;
# one for the gradient w.r.t P_hidden and
# one for the gradient w.r.t P_output
cost_grad = cost_function_grad(P, x)
P[0] = P[0] - lmb * cost_grad[0]
P[1] = P[1] - lmb * cost_grad[1]
print('Final cost: %g'%cost_function(P, x))
return P
def g_analytic(x, gamma = 2, g0 = 10):
return g0*np.exp(-gamma*x)
# Solve the given problem
if __name__ == '__main__':
# Set seed such that the weight are initialized
# with same weights and biases for every run.
npr.seed(15)
## Decide the vales of arguments to the function to solve
N = 10
x = np.linspace(0, 1, N)
## Set up the initial parameters
num_hidden_neurons = 10
num_iter = 10000
lmb = 0.001
# Use the network
P = solve_ode_neural_network(x, num_hidden_neurons, num_iter, lmb)
# Print the deviation from the trial solution and true solution
res = g_trial(x,P)
res_analytical = g_analytic(x)
print('Max absolute difference: %g'%np.max(np.abs(res - res_analytical)))
# Plot the results
plt.figure(figsize=(10,10))
plt.title('Performance of neural network solving an ODE compared to the analytical solution')
plt.plot(x, res_analytical)
plt.plot(x, res[0,:])
plt.legend(['analytical','nn'])
plt.xlabel('x')
plt.ylabel('g(x)')
plt.show()
```
## The network with one input layer, specified number of hidden layers, and one output layer
It is also possible to extend the construction of our network into a more general one, allowing the network to contain more than one hidden layers.
The number of neurons within each hidden layer are given as a list of integers in the program below.
```python
import autograd.numpy as np
from autograd import grad, elementwise_grad
import autograd.numpy.random as npr
from matplotlib import pyplot as plt
def sigmoid(z):
return 1/(1 + np.exp(-z))
# The neural network with one input layer and one output layer,
# but with number of hidden layers specified by the user.
def deep_neural_network(deep_params, x):
# N_hidden is the number of hidden layers
N_hidden = np.size(deep_params) - 1 # -1 since params consists of
# parameters to all the hidden
# layers AND the output layer.
# Assumes input x being an one-dimensional array
num_values = np.size(x)
x = x.reshape(-1, num_values)
# Assume that the input layer does nothing to the input x
x_input = x
# Due to multiple hidden layers, define a variable referencing to the
# output of the previous layer:
x_prev = x_input
## Hidden layers:
for l in range(N_hidden):
# From the list of parameters P; find the correct weigths and bias for this layer
w_hidden = deep_params[l]
# Add a row of ones to include bias
x_prev = np.concatenate((np.ones((1,num_values)), x_prev ), axis = 0)
z_hidden = np.matmul(w_hidden, x_prev)
x_hidden = sigmoid(z_hidden)
# Update x_prev such that next layer can use the output from this layer
x_prev = x_hidden
## Output layer:
# Get the weights and bias for this layer
w_output = deep_params[-1]
# Include bias:
x_prev = np.concatenate((np.ones((1,num_values)), x_prev), axis = 0)
z_output = np.matmul(w_output, x_prev)
x_output = z_output
return x_output
# The trial solution using the deep neural network:
def g_trial_deep(x,params, g0 = 10):
return g0 + x*deep_neural_network(params, x)
# The right side of the ODE:
def g(x, g_trial, gamma = 2):
return -gamma*g_trial
# The same cost function as before, but calls deep_neural_network instead.
def cost_function_deep(P, x):
# Evaluate the trial function with the current parameters P
g_t = g_trial_deep(x,P)
# Find the derivative w.r.t x of the neural network
d_net_out = elementwise_grad(deep_neural_network,1)(P,x)
# Find the derivative w.r.t x of the trial function
d_g_t = elementwise_grad(g_trial_deep,0)(x,P)
# The right side of the ODE
func = g(x, g_t)
err_sqr = (d_g_t - func)**2
cost_sum = np.sum(err_sqr)
return cost_sum / np.size(err_sqr)
# Solve the exponential decay ODE using neural network with one input and one output layer,
# but with specified number of hidden layers from the user.
def solve_ode_deep_neural_network(x, num_neurons, num_iter, lmb):
# num_hidden_neurons is now a list of number of neurons within each hidden layer
# The number of elements in the list num_hidden_neurons thus represents
# the number of hidden layers.
# Find the number of hidden layers:
N_hidden = np.size(num_neurons)
## Set up initial weights and biases
# Initialize the list of parameters:
P = [None]*(N_hidden + 1) # + 1 to include the output layer
P[0] = npr.randn(num_neurons[0], 2 )
for l in range(1,N_hidden):
P[l] = npr.randn(num_neurons[l], num_neurons[l-1] + 1) # +1 to include bias
# For the output layer
P[-1] = npr.randn(1, num_neurons[-1] + 1 ) # +1 since bias is included
print('Initial cost: %g'%cost_function_deep(P, x))
## Start finding the optimal weights using gradient descent
# Find the Python function that represents the gradient of the cost function
# w.r.t the 0-th input argument -- that is the weights and biases in the hidden and output layer
cost_function_deep_grad = grad(cost_function_deep,0)
# Let the update be done num_iter times
for i in range(num_iter):
# Evaluate the gradient at the current weights and biases in P.
# The cost_grad consist now of N_hidden + 1 arrays; the gradient w.r.t the weights and biases
# in the hidden layers and output layers evaluated at x.
cost_deep_grad = cost_function_deep_grad(P, x)
for l in range(N_hidden+1):
P[l] = P[l] - lmb * cost_deep_grad[l]
print('Final cost: %g'%cost_function_deep(P, x))
return P
def g_analytic(x, gamma = 2, g0 = 10):
return g0*np.exp(-gamma*x)
# Solve the given problem
if __name__ == '__main__':
npr.seed(15)
## Decide the vales of arguments to the function to solve
N = 10
x = np.linspace(0, 1, N)
## Set up the initial parameters
num_hidden_neurons = np.array([10,10])
num_iter = 10000
lmb = 0.001
P = solve_ode_deep_neural_network(x, num_hidden_neurons, num_iter, lmb)
res = g_trial_deep(x,P)
res_analytical = g_analytic(x)
plt.figure(figsize=(10,10))
plt.title('Performance of a deep neural network solving an ODE compared to the analytical solution')
plt.plot(x, res_analytical)
plt.plot(x, res[0,:])
plt.legend(['analytical','dnn'])
plt.ylabel('g(x)')
plt.show()
```
## Example: Population growth
A logistic model of population growth assumes that a population converges toward an equilibrium.
The population growth can be modeled by
<!-- Equation labels as ordinary links -->
<div id="log"></div>
$$
\begin{equation} \label{log} \tag{10}
g'(t) = \alpha g(t)(A - g(t))
\end{equation}
$$
where $g(t)$ is the population density at time $t$, $\alpha > 0$ the growth rate and $A > 0$ is the maximum population number in the environment.
Also, at $t = 0$ the population has the size $g(0) = g_0$, where $g_0$ is some chosen constant.
In this example, similar network as for the exponential decay using Autograd has been used to solve the equation. However, as the implementation might suffer from e.g numerical instability
and high execution time (this might be more apparent in the examples solving PDEs),
using a library like TensorFlow is recommended.
Here, we stay with a more simple approach and implement for comparison, the simple forward Euler method.
## Setting up the problem
Here, we will model a population $g(t)$ in an environment having carrying capacity $A$.
The population follows the model
<!-- Equation labels as ordinary links -->
<div id="solveode_population"></div>
$$
\begin{equation} \label{solveode_population} \tag{11}
g'(t) = \alpha g(t)(A - g(t))
\end{equation}
$$
where $g(0) = g_0$.
In this example, we let $\alpha = 2$, $A = 1$, and $g_0 = 1.2$.
## The trial solution
We will get a slightly different trial solution, as the boundary conditions are different
compared to the case for exponential decay.
A possible trial solution satisfying the condition $g(0) = g_0$ could be
$$
h_1(t) = g_0 + t \cdot N(t,P)
$$
with $N(t,P)$ being the output from the neural network with weights and biases for each layer collected in the set $P$.
The analytical solution is
$$
g(t) = \frac{Ag_0}{g_0 + (A - g_0)\exp(-\alpha A t)}
$$
## The program using Autograd
The network will be the similar as for the exponential decay example, but with some small modifications for our problem.
```python
import autograd.numpy as np
from autograd import grad, elementwise_grad
import autograd.numpy.random as npr
from matplotlib import pyplot as plt
def sigmoid(z):
return 1/(1 + np.exp(-z))
# Function to get the parameters.
# Done such that one can easily change the paramaters after one's liking.
def get_parameters():
alpha = 2
A = 1
g0 = 1.2
return alpha, A, g0
def deep_neural_network(P, x):
# N_hidden is the number of hidden layers
N_hidden = np.size(P) - 1 # -1 since params consist of parameters to all the hidden layers AND the output layer
# Assumes input x being an one-dimensional array
num_values = np.size(x)
x = x.reshape(-1, num_values)
# Assume that the input layer does nothing to the input x
x_input = x
# Due to multiple hidden layers, define a variable referencing to the
# output of the previous layer:
x_prev = x_input
## Hidden layers:
for l in range(N_hidden):
# From the list of parameters P; find the correct weigths and bias for this layer
w_hidden = P[l]
# Add a row of ones to include bias
x_prev = np.concatenate((np.ones((1,num_values)), x_prev ), axis = 0)
z_hidden = np.matmul(w_hidden, x_prev)
x_hidden = sigmoid(z_hidden)
# Update x_prev such that next layer can use the output from this layer
x_prev = x_hidden
## Output layer:
# Get the weights and bias for this layer
w_output = P[-1]
# Include bias:
x_prev = np.concatenate((np.ones((1,num_values)), x_prev), axis = 0)
z_output = np.matmul(w_output, x_prev)
x_output = z_output
return x_output
def cost_function_deep(P, x):
# Evaluate the trial function with the current parameters P
g_t = g_trial_deep(x,P)
# Find the derivative w.r.t x of the trial function
d_g_t = elementwise_grad(g_trial_deep,0)(x,P)
# The right side of the ODE
func = f(x, g_t)
err_sqr = (d_g_t - func)**2
cost_sum = np.sum(err_sqr)
return cost_sum / np.size(err_sqr)
# The right side of the ODE:
def f(x, g_trial):
alpha,A, g0 = get_parameters()
return alpha*g_trial*(A - g_trial)
# The trial solution using the deep neural network:
def g_trial_deep(x, params):
alpha,A, g0 = get_parameters()
return g0 + x*deep_neural_network(params,x)
# The analytical solution:
def g_analytic(t):
alpha,A, g0 = get_parameters()
return A*g0/(g0 + (A - g0)*np.exp(-alpha*A*t))
def solve_ode_deep_neural_network(x, num_neurons, num_iter, lmb):
# num_hidden_neurons is now a list of number of neurons within each hidden layer
# Find the number of hidden layers:
N_hidden = np.size(num_neurons)
## Set up initial weigths and biases
# Initialize the list of parameters:
P = [None]*(N_hidden + 1) # + 1 to include the output layer
P[0] = npr.randn(num_neurons[0], 2 )
for l in range(1,N_hidden):
P[l] = npr.randn(num_neurons[l], num_neurons[l-1] + 1) # +1 to include bias
# For the output layer
P[-1] = npr.randn(1, num_neurons[-1] + 1 ) # +1 since bias is included
print('Initial cost: %g'%cost_function_deep(P, x))
## Start finding the optimal weigths using gradient descent
# Find the Python function that represents the gradient of the cost function
# w.r.t the 0-th input argument -- that is the weights and biases in the hidden and output layer
cost_function_deep_grad = grad(cost_function_deep,0)
# Let the update be done num_iter times
for i in range(num_iter):
# Evaluate the gradient at the current weights and biases in P.
# The cost_grad consist now of N_hidden + 1 arrays; the gradient w.r.t the weights and biases
# in the hidden layers and output layers evaluated at x.
cost_deep_grad = cost_function_deep_grad(P, x)
for l in range(N_hidden+1):
P[l] = P[l] - lmb * cost_deep_grad[l]
print('Final cost: %g'%cost_function_deep(P, x))
return P
if __name__ == '__main__':
npr.seed(4155)
## Decide the vales of arguments to the function to solve
Nt = 10
T = 1
t = np.linspace(0,T, Nt)
## Set up the initial parameters
num_hidden_neurons = [100, 50, 25]
num_iter = 1000
lmb = 1e-3
P = solve_ode_deep_neural_network(t, num_hidden_neurons, num_iter, lmb)
g_dnn_ag = g_trial_deep(t,P)
g_analytical = g_analytic(t)
# Find the maximum absolute difference between the solutons:
diff_ag = np.max(np.abs(g_dnn_ag - g_analytical))
print("The max absolute difference between the solutions is: %g"%diff_ag)
plt.figure(figsize=(10,10))
plt.title('Performance of neural network solving an ODE compared to the analytical solution')
plt.plot(t, g_analytical)
plt.plot(t, g_dnn_ag[0,:])
plt.legend(['analytical','nn'])
plt.xlabel('t')
plt.ylabel('g(t)')
plt.show()
```
## Using forward Euler to solve the ODE
A straightforward way of solving an ODE numerically, is to use Euler's method.
Euler's method uses Taylor series to approximate the value at a function $f$ at a step $\Delta x$ from $x$:
$$
f(x + \Delta x) \approx f(x) + \Delta x f'(x)
$$
In our case, using Euler's method to approximate the value of $g$ at a step $\Delta t$ from $t$ yields
$$
\begin{aligned}
g(t + \Delta t) &\approx g(t) + \Delta t g'(t) \\
&= g(t) + \Delta t \big(\alpha g(t)(A - g(t))\big)
\end{aligned}
$$
along with the condition that $g(0) = g_0$.
Let $t_i = i \cdot \Delta t$ where $\Delta t = \frac{T}{N_t-1}$ where $T$ is the final time our solver must solve for and $N_t$ the number of values for $t \in [0, T]$ for $i = 0, \dots, N_t-1$.
For $i \geq 1$, we have that
$$
\begin{aligned}
t_i &= i\Delta t \\
&= (i - 1)\Delta t + \Delta t \\
&= t_{i-1} + \Delta t
\end{aligned}
$$
Now, if $g_i = g(t_i)$ then
<!-- Equation labels as ordinary links -->
<div id="odenum"></div>
$$
\begin{equation}
\begin{aligned}
g_i &= g(t_i) \\
&= g(t_{i-1} + \Delta t) \\
&\approx g(t_{i-1}) + \Delta t \big(\alpha g(t_{i-1})(A - g(t_{i-1}))\big) \\
&= g_{i-1} + \Delta t \big(\alpha g_{i-1}(A - g_{i-1})\big)
\end{aligned}
\end{equation} \label{odenum} \tag{12}
$$
for $i \geq 1$ and $g_0 = g(t_0) = g(0) = g_0$.
Equation ([12](#odenum)) could be implemented in the following way,
extending the program that uses the network using Autograd:
```python
# Assume that all function definitions from the example program using Autograd
# are located here.
if __name__ == '__main__':
npr.seed(4155)
## Decide the vales of arguments to the function to solve
Nt = 10
T = 1
t = np.linspace(0,T, Nt)
## Set up the initial parameters
num_hidden_neurons = [100,50,25]
num_iter = 1000
lmb = 1e-3
P = solve_ode_deep_neural_network(t, num_hidden_neurons, num_iter, lmb)
g_dnn_ag = g_trial_deep(t,P)
g_analytical = g_analytic(t)
# Find the maximum absolute difference between the solutons:
diff_ag = np.max(np.abs(g_dnn_ag - g_analytical))
print("The max absolute difference between the solutions is: %g"%diff_ag)
plt.figure(figsize=(10,10))
plt.title('Performance of neural network solving an ODE compared to the analytical solution')
plt.plot(t, g_analytical)
plt.plot(t, g_dnn_ag[0,:])
plt.legend(['analytical','nn'])
plt.xlabel('t')
plt.ylabel('g(t)')
## Find an approximation to the funtion using forward Euler
alpha, A, g0 = get_parameters()
dt = T/(Nt - 1)
# Perform forward Euler to solve the ODE
g_euler = np.zeros(Nt)
g_euler[0] = g0
for i in range(1,Nt):
g_euler[i] = g_euler[i-1] + dt*(alpha*g_euler[i-1]*(A - g_euler[i-1]))
# Print the errors done by each method
diff1 = np.max(np.abs(g_euler - g_analytical))
diff2 = np.max(np.abs(g_dnn_ag[0,:] - g_analytical))
print('Max absolute difference between Euler method and analytical: %g'%diff1)
print('Max absolute difference between deep neural network and analytical: %g'%diff2)
# Plot results
plt.figure(figsize=(10,10))
plt.plot(t,g_euler)
plt.plot(t,g_analytical)
plt.plot(t,g_dnn_ag[0,:])
plt.legend(['euler','analytical','dnn'])
plt.xlabel('Time t')
plt.ylabel('g(t)')
plt.show()
```
## Example: Solving the one dimensional Poisson equation
The Poisson equation for $g(x)$ in one dimension is
<!-- Equation labels as ordinary links -->
<div id="poisson"></div>
$$
\begin{equation} \label{poisson} \tag{13}
-g''(x) = f(x)
\end{equation}
$$
where $f(x)$ is a given function for $x \in (0,1)$.
The conditions that $g(x)$ is chosen to fulfill, are
$$
\begin{align*}
g(0) &= 0 \\
g(1) &= 0
\end{align*}
$$
This equation can be solved numerically using programs where e.g Autograd and TensorFlow are used.
The results from the networks can then be compared to the analytical solution.
In addition, it could be interesting to see how a typical method for numerically solving second order ODEs compares to the neural networks.
## The specific equation to solve for
Here, the function $g(x)$ to solve for follows the equation
$$
-g''(x) = f(x),\qquad x \in (0,1)
$$
where $f(x)$ is a given function, along with the chosen conditions
<!-- Equation labels as ordinary links -->
<div id="cond"></div>
$$
\begin{aligned}
g(0) = g(1) = 0
\end{aligned}\label{cond} \tag{14}
$$
In this example, we consider the case when $f(x) = (3x + x^2)\exp(x)$.
For this case, a possible trial solution satisfying the conditions could be
$$
g_t(x) = x \cdot (1-x) \cdot N(P,x)
$$
The analytical solution for this problem is
$$
g(x) = x(1 - x)\exp(x)
$$
## Solving the equation using Autograd
```python
import autograd.numpy as np
from autograd import grad, elementwise_grad
import autograd.numpy.random as npr
from matplotlib import pyplot as plt
def sigmoid(z):
return 1/(1 + np.exp(-z))
def deep_neural_network(deep_params, x):
# N_hidden is the number of hidden layers
N_hidden = np.size(deep_params) - 1 # -1 since params consist of parameters to all the hidden layers AND the output layer
# Assumes input x being an one-dimensional array
num_values = np.size(x)
x = x.reshape(-1, num_values)
# Assume that the input layer does nothing to the input x
x_input = x
# Due to multiple hidden layers, define a variable referencing to the
# output of the previous layer:
x_prev = x_input
## Hidden layers:
for l in range(N_hidden):
# From the list of parameters P; find the correct weigths and bias for this layer
w_hidden = deep_params[l]
# Add a row of ones to include bias
x_prev = np.concatenate((np.ones((1,num_values)), x_prev ), axis = 0)
z_hidden = np.matmul(w_hidden, x_prev)
x_hidden = sigmoid(z_hidden)
# Update x_prev such that next layer can use the output from this layer
x_prev = x_hidden
## Output layer:
# Get the weights and bias for this layer
w_output = deep_params[-1]
# Include bias:
x_prev = np.concatenate((np.ones((1,num_values)), x_prev), axis = 0)
z_output = np.matmul(w_output, x_prev)
x_output = z_output
return x_output
def solve_ode_deep_neural_network(x, num_neurons, num_iter, lmb):
# num_hidden_neurons is now a list of number of neurons within each hidden layer
# Find the number of hidden layers:
N_hidden = np.size(num_neurons)
## Set up initial weigths and biases
# Initialize the list of parameters:
P = [None]*(N_hidden + 1) # + 1 to include the output layer
P[0] = npr.randn(num_neurons[0], 2 )
for l in range(1,N_hidden):
P[l] = npr.randn(num_neurons[l], num_neurons[l-1] + 1) # +1 to include bias
# For the output layer
P[-1] = npr.randn(1, num_neurons[-1] + 1 ) # +1 since bias is included
print('Initial cost: %g'%cost_function_deep(P, x))
## Start finding the optimal weigths using gradient descent
# Find the Python function that represents the gradient of the cost function
# w.r.t the 0-th input argument -- that is the weights and biases in the hidden and output layer
cost_function_deep_grad = grad(cost_function_deep,0)
# Let the update be done num_iter times
for i in range(num_iter):
# Evaluate the gradient at the current weights and biases in P.
# The cost_grad consist now of N_hidden + 1 arrays; the gradient w.r.t the weights and biases
# in the hidden layers and output layers evaluated at x.
cost_deep_grad = cost_function_deep_grad(P, x)
for l in range(N_hidden+1):
P[l] = P[l] - lmb * cost_deep_grad[l]
print('Final cost: %g'%cost_function_deep(P, x))
return P
## Set up the cost function specified for this Poisson equation:
# The right side of the ODE
def f(x):
return (3*x + x**2)*np.exp(x)
def cost_function_deep(P, x):
# Evaluate the trial function with the current parameters P
g_t = g_trial_deep(x,P)
# Find the derivative w.r.t x of the trial function
d2_g_t = elementwise_grad(elementwise_grad(g_trial_deep,0))(x,P)
right_side = f(x)
err_sqr = (-d2_g_t - right_side)**2
cost_sum = np.sum(err_sqr)
return cost_sum/np.size(err_sqr)
# The trial solution:
def g_trial_deep(x,P):
return x*(1-x)*deep_neural_network(P,x)
# The analytic solution;
def g_analytic(x):
return x*(1-x)*np.exp(x)
if __name__ == '__main__':
npr.seed(4155)
## Decide the vales of arguments to the function to solve
Nx = 10
x = np.linspace(0,1, Nx)
## Set up the initial parameters
num_hidden_neurons = [200,100]
num_iter = 1000
lmb = 1e-3
P = solve_ode_deep_neural_network(x, num_hidden_neurons, num_iter, lmb)
g_dnn_ag = g_trial_deep(x,P)
g_analytical = g_analytic(x)
# Find the maximum absolute difference between the solutons:
max_diff = np.max(np.abs(g_dnn_ag - g_analytical))
print("The max absolute difference between the solutions is: %g"%max_diff)
plt.figure(figsize=(10,10))
plt.title('Performance of neural network solving an ODE compared to the analytical solution')
plt.plot(x, g_analytical)
plt.plot(x, g_dnn_ag[0,:])
plt.legend(['analytical','nn'])
plt.xlabel('x')
plt.ylabel('g(x)')
plt.show()
```
## Comparing with a numerical scheme
The Poisson equation is possible to solve using Taylor series to approximate the second derivative.
Using Taylor series, the second derivative can be expressed as
$$
g''(x) = \frac{g(x + \Delta x) - 2g(x) + g(x-\Delta x)}{\Delta x^2} + E_{\Delta x}(x)
$$
where $\Delta x$ is a small step size and $E_{\Delta x}(x)$ being the error term.
Looking away from the error terms gives an approximation to the second derivative:
<!-- Equation labels as ordinary links -->
<div id="approx"></div>
$$
\begin{equation} \label{approx} \tag{15}
g''(x) \approx \frac{g(x + \Delta x) - 2g(x) + g(x-\Delta x)}{\Delta x^2}
\end{equation}
$$
If $x_i = i \Delta x = x_{i-1} + \Delta x$ and $g_i = g(x_i)$ for $i = 1,\dots N_x - 2$ with $N_x$ being the number of values for $x$, ([15](#approx)) becomes
$$
\begin{aligned}
g''(x_i) &\approx \frac{g(x_i + \Delta x) - 2g(x_i) + g(x_i -\Delta x)}{\Delta x^2} \\
&= \frac{g_{i+1} - 2g_i + g_{i-1}}{\Delta x^2}
\end{aligned}
$$
Since we know from our problem that
$$
\begin{aligned}
-g''(x) &= f(x) \\
&= (3x + x^2)\exp(x)
\end{aligned}
$$
along with the conditions $g(0) = g(1) = 0$,
the following scheme can be used to find an approximate solution for $g(x)$ numerically:
<!-- Equation labels as ordinary links -->
<div id="odesys"></div>
$$
\begin{equation}
\begin{aligned}
-\Big( \frac{g_{i+1} - 2g_i + g_{i-1}}{\Delta x^2} \Big) &= f(x_i) \\
-g_{i+1} + 2g_i - g_{i-1} &= \Delta x^2 f(x_i)
\end{aligned}
\end{equation} \label{odesys} \tag{16}
$$
for $i = 1, \dots, N_x - 2$ where $g_0 = g_{N_x - 1} = 0$ and $f(x_i) = (3x_i + x_i^2)\exp(x_i)$, which is given for our specific problem.
The equation can be rewritten into a matrix equation:
$$
\begin{aligned}
\begin{pmatrix}
2 & -1 & 0 & \dots & 0 \\
-1 & 2 & -1 & \dots & 0 \\
\vdots & & \ddots & & \vdots \\
0 & \dots & -1 & 2 & -1 \\
0 & \dots & 0 & -1 & 2\\
\end{pmatrix}
\begin{pmatrix}
g_1 \\
g_2 \\
\vdots \\
g_{N_x - 3} \\
g_{N_x - 2}
\end{pmatrix}
&=
\Delta x^2
\begin{pmatrix}
f(x_1) \\
f(x_2) \\
\vdots \\
f(x_{N_x - 3}) \\
f(x_{N_x - 2})
\end{pmatrix} \\
\boldsymbol{A}\boldsymbol{g} &= \boldsymbol{f},
\end{aligned}
$$
which makes it possible to solve for the vector $\boldsymbol{g}$.
## Setting up the code
We can then compare the result from this numerical scheme with the output from our network using Autograd:
```python
import autograd.numpy as np
from autograd import grad, elementwise_grad
import autograd.numpy.random as npr
from matplotlib import pyplot as plt
def sigmoid(z):
return 1/(1 + np.exp(-z))
def deep_neural_network(deep_params, x):
# N_hidden is the number of hidden layers
N_hidden = np.size(deep_params) - 1 # -1 since params consist of parameters to all the hidden layers AND the output layer
# Assumes input x being an one-dimensional array
num_values = np.size(x)
x = x.reshape(-1, num_values)
# Assume that the input layer does nothing to the input x
x_input = x
# Due to multiple hidden layers, define a variable referencing to the
# output of the previous layer:
x_prev = x_input
## Hidden layers:
for l in range(N_hidden):
# From the list of parameters P; find the correct weigths and bias for this layer
w_hidden = deep_params[l]
# Add a row of ones to include bias
x_prev = np.concatenate((np.ones((1,num_values)), x_prev ), axis = 0)
z_hidden = np.matmul(w_hidden, x_prev)
x_hidden = sigmoid(z_hidden)
# Update x_prev such that next layer can use the output from this layer
x_prev = x_hidden
## Output layer:
# Get the weights and bias for this layer
w_output = deep_params[-1]
# Include bias:
x_prev = np.concatenate((np.ones((1,num_values)), x_prev), axis = 0)
z_output = np.matmul(w_output, x_prev)
x_output = z_output
return x_output
def solve_ode_deep_neural_network(x, num_neurons, num_iter, lmb):
# num_hidden_neurons is now a list of number of neurons within each hidden layer
# Find the number of hidden layers:
N_hidden = np.size(num_neurons)
## Set up initial weigths and biases
# Initialize the list of parameters:
P = [None]*(N_hidden + 1) # + 1 to include the output layer
P[0] = npr.randn(num_neurons[0], 2 )
for l in range(1,N_hidden):
P[l] = npr.randn(num_neurons[l], num_neurons[l-1] + 1) # +1 to include bias
# For the output layer
P[-1] = npr.randn(1, num_neurons[-1] + 1 ) # +1 since bias is included
print('Initial cost: %g'%cost_function_deep(P, x))
## Start finding the optimal weigths using gradient descent
# Find the Python function that represents the gradient of the cost function
# w.r.t the 0-th input argument -- that is the weights and biases in the hidden and output layer
cost_function_deep_grad = grad(cost_function_deep,0)
# Let the update be done num_iter times
for i in range(num_iter):
# Evaluate the gradient at the current weights and biases in P.
# The cost_grad consist now of N_hidden + 1 arrays; the gradient w.r.t the weights and biases
# in the hidden layers and output layers evaluated at x.
cost_deep_grad = cost_function_deep_grad(P, x)
for l in range(N_hidden+1):
P[l] = P[l] - lmb * cost_deep_grad[l]
print('Final cost: %g'%cost_function_deep(P, x))
return P
## Set up the cost function specified for this Poisson equation:
# The right side of the ODE
def f(x):
return (3*x + x**2)*np.exp(x)
def cost_function_deep(P, x):
# Evaluate the trial function with the current parameters P
g_t = g_trial_deep(x,P)
# Find the derivative w.r.t x of the trial function
d2_g_t = elementwise_grad(elementwise_grad(g_trial_deep,0))(x,P)
right_side = f(x)
err_sqr = (-d2_g_t - right_side)**2
cost_sum = np.sum(err_sqr)
return cost_sum/np.size(err_sqr)
# The trial solution:
def g_trial_deep(x,P):
return x*(1-x)*deep_neural_network(P,x)
# The analytic solution;
def g_analytic(x):
return x*(1-x)*np.exp(x)
if __name__ == '__main__':
npr.seed(4155)
## Decide the vales of arguments to the function to solve
Nx = 10
x = np.linspace(0,1, Nx)
## Set up the initial parameters
num_hidden_neurons = [200,100]
num_iter = 1000
lmb = 1e-3
P = solve_ode_deep_neural_network(x, num_hidden_neurons, num_iter, lmb)
g_dnn_ag = g_trial_deep(x,P)
g_analytical = g_analytic(x)
# Find the maximum absolute difference between the solutons:
plt.figure(figsize=(10,10))
plt.title('Performance of neural network solving an ODE compared to the analytical solution')
plt.plot(x, g_analytical)
plt.plot(x, g_dnn_ag[0,:])
plt.legend(['analytical','nn'])
plt.xlabel('x')
plt.ylabel('g(x)')
## Perform the computation using the numerical scheme
dx = 1/(Nx - 1)
# Set up the matrix A
A = np.zeros((Nx-2,Nx-2))
A[0,0] = 2
A[0,1] = -1
for i in range(1,Nx-3):
A[i,i-1] = -1
A[i,i] = 2
A[i,i+1] = -1
A[Nx - 3, Nx - 4] = -1
A[Nx - 3, Nx - 3] = 2
# Set up the vector f
f_vec = dx**2 * f(x[1:-1])
# Solve the equation
g_res = np.linalg.solve(A,f_vec)
g_vec = np.zeros(Nx)
g_vec[1:-1] = g_res
# Print the differences between each method
max_diff1 = np.max(np.abs(g_dnn_ag - g_analytical))
max_diff2 = np.max(np.abs(g_vec - g_analytical))
print("The max absolute difference between the analytical solution and DNN Autograd: %g"%max_diff1)
print("The max absolute difference between the analytical solution and numerical scheme: %g"%max_diff2)
# Plot the results
plt.figure(figsize=(10,10))
plt.plot(x,g_vec)
plt.plot(x,g_analytical)
plt.plot(x,g_dnn_ag[0,:])
plt.legend(['numerical scheme','analytical','dnn'])
plt.show()
```
## Partial Differential Equations
A partial differential equation (PDE) has a solution here the function
is defined by multiple variables. The equation may involve all kinds
of combinations of which variables the function is differentiated with
respect to.
In general, a partial differential equation for a function $g(x_1,\dots,x_N)$ with $N$ variables may be expressed as
<!-- Equation labels as ordinary links -->
<div id="PDE"></div>
$$
\begin{equation} \label{PDE} \tag{17}
f\left(x_1, \, \dots \, , x_N, \frac{\partial g(x_1,\dots,x_N) }{\partial x_1}, \dots , \frac{\partial g(x_1,\dots,x_N) }{\partial x_N}, \frac{\partial g(x_1,\dots,x_N) }{\partial x_1\partial x_2}, \, \dots \, , \frac{\partial^n g(x_1,\dots,x_N) }{\partial x_N^n} \right) = 0
\end{equation}
$$
where $f$ is an expression involving all kinds of possible mixed derivatives of $g(x_1,\dots,x_N)$ up to an order $n$. In order for the solution to be unique, some additional conditions must also be given.
## Type of problem
The problem our network must solve for, is similar to the ODE case.
We must have a trial solution $g_t$ at hand.
For instance, the trial solution could be expressed as
$$
\begin{align*}
g_t(x_1,\dots,x_N) = h_1(x_1,\dots,x_N) + h_2(x_1,\dots,x_N,N(x_1,\dots,x_N,P))
\end{align*}
$$
where $h_1(x_1,\dots,x_N)$ is a function that ensures $g_t(x_1,\dots,x_N)$ satisfies some given conditions.
The neural network $N(x_1,\dots,x_N,P)$ has weights and biases described by $P$ and $h_2(x_1,\dots,x_N,N(x_1,\dots,x_N,P))$ is an expression using the output from the neural network in some way.
The role of the function $h_2(x_1,\dots,x_N,N(x_1,\dots,x_N,P))$, is to ensure that the output of $N(x_1,\dots,x_N,P)$ is zero when $g_t(x_1,\dots,x_N)$ is evaluated at the values of $x_1,\dots,x_N$ where the given conditions must be satisfied. The function $h_1(x_1,\dots,x_N)$ should alone make $g_t(x_1,\dots,x_N)$ satisfy the conditions.
## Network requirements
The network tries then the minimize the cost function following the
same ideas as described for the ODE case, but now with more than one
variables to consider. The concept still remains the same; find a set
of parameters $P$ such that the expression $f$ in ([17](#PDE)) is as
close to zero as possible.
As for the ODE case, the cost function is the mean squared error that
the network must try to minimize. The cost function for the network to
minimize is
$$
C\left(x_1, \dots, x_N, P\right) = \left( f\left(x_1, \, \dots \, , x_N, \frac{\partial g(x_1,\dots,x_N) }{\partial x_1}, \dots , \frac{\partial g(x_1,\dots,x_N) }{\partial x_N}, \frac{\partial g(x_1,\dots,x_N) }{\partial x_1\partial x_2}, \, \dots \, , \frac{\partial^n g(x_1,\dots,x_N) }{\partial x_N^n} \right) \right)^2
$$
## More details
If we let $\boldsymbol{x} = \big( x_1, \dots, x_N \big)$ be an array containing the values for $x_1, \dots, x_N$ respectively, the cost function can be reformulated into the following:
$$
C\left(\boldsymbol{x}, P\right) = f\left( \left( \boldsymbol{x}, \frac{\partial g(\boldsymbol{x}) }{\partial x_1}, \dots , \frac{\partial g(\boldsymbol{x}) }{\partial x_N}, \frac{\partial g(\boldsymbol{x}) }{\partial x_1\partial x_2}, \, \dots \, , \frac{\partial^n g(\boldsymbol{x}) }{\partial x_N^n} \right) \right)^2
$$
If we also have $M$ different sets of values for $x_1, \dots, x_N$, that is $\boldsymbol{x}_i = \big(x_1^{(i)}, \dots, x_N^{(i)}\big)$ for $i = 1,\dots,M$ being the rows in matrix $X$, the cost function can be generalized into
$$
C\left(X, P \right) = \sum_{i=1}^M f\left( \left( \boldsymbol{x}_i, \frac{\partial g(\boldsymbol{x}_i) }{\partial x_1}, \dots , \frac{\partial g(\boldsymbol{x}_i) }{\partial x_N}, \frac{\partial g(\boldsymbol{x}_i) }{\partial x_1\partial x_2}, \, \dots \, , \frac{\partial^n g(\boldsymbol{x}_i) }{\partial x_N^n} \right) \right)^2.
$$
## Example: The diffusion equation
In one spatial dimension, the equation reads
$$
\frac{\partial g(x,t)}{\partial t} = \frac{\partial^2 g(x,t)}{\partial x^2}
$$
where a possible choice of conditions are
$$
\begin{align*}
g(0,t) &= 0 ,\qquad t \geq 0 \\
g(1,t) &= 0, \qquad t \geq 0 \\
g(x,0) &= u(x),\qquad x\in [0,1]
\end{align*}
$$
with $u(x)$ being some given function.
## Defining the problem
For this case, we want to find $g(x,t)$ such that
<!-- Equation labels as ordinary links -->
<div id="diffonedim"></div>
$$
\begin{equation}
\frac{\partial g(x,t)}{\partial t} = \frac{\partial^2 g(x,t)}{\partial x^2}
\end{equation} \label{diffonedim} \tag{18}
$$
and
$$
\begin{align*}
g(0,t) &= 0 ,\qquad t \geq 0 \\
g(1,t) &= 0, \qquad t \geq 0 \\
g(x,0) &= u(x),\qquad x\in [0,1]
\end{align*}
$$
with $u(x) = \sin(\pi x)$.
First, let us set up the deep neural network.
The deep neural network will follow the same structure as discussed in the examples solving the ODEs.
First, we will look into how Autograd could be used in a network tailored to solve for bivariate functions.
## Setting up the network using Autograd
The only change to do here, is to extend our network such that
functions of multiple parameters are correctly handled. In this case
we have two variables in our function to solve for, that is time $t$
and position $x$. The variables will be represented by a
one-dimensional array in the program. The program will evaluate the
network at each possible pair $(x,t)$, given an array for the desired
$x$-values and $t$-values to approximate the solution at.
```python
def sigmoid(z):
return 1/(1 + np.exp(-z))
def deep_neural_network(deep_params, x):
# x is now a point and a 1D numpy array; make it a column vector
num_coordinates = np.size(x,0)
x = x.reshape(num_coordinates,-1)
num_points = np.size(x,1)
# N_hidden is the number of hidden layers
N_hidden = np.size(deep_params) - 1 # -1 since params consist of parameters to all the hidden layers AND the output layer
# Assume that the input layer does nothing to the input x
x_input = x
x_prev = x_input
## Hidden layers:
for l in range(N_hidden):
# From the list of parameters P; find the correct weigths and bias for this layer
w_hidden = deep_params[l]
# Add a row of ones to include bias
x_prev = np.concatenate((np.ones((1,num_points)), x_prev ), axis = 0)
z_hidden = np.matmul(w_hidden, x_prev)
x_hidden = sigmoid(z_hidden)
# Update x_prev such that next layer can use the output from this layer
x_prev = x_hidden
## Output layer:
# Get the weights and bias for this layer
w_output = deep_params[-1]
# Include bias:
x_prev = np.concatenate((np.ones((1,num_points)), x_prev), axis = 0)
z_output = np.matmul(w_output, x_prev)
x_output = z_output
return x_output[0][0]
```
## Setting up the network using Autograd; The trial solution
The cost function must then iterate through the given arrays
containing values for $x$ and $t$, defines a point $(x,t)$ the deep
neural network and the trial solution is evaluated at, and then finds
the Jacobian of the trial solution.
A possible trial solution for this PDE is
$$
g_t(x,t) = h_1(x,t) + x(1-x)tN(x,t,P)
$$
with $A(x,t)$ being a function ensuring that $g_t(x,t)$ satisfies our given conditions, and $N(x,t,P)$ being the output from the deep neural network using weights and biases for each layer from $P$.
To fulfill the conditions, $A(x,t)$ could be:
$$
h_1(x,t) = (1-t)\Big(u(x) - \big((1-x)u(0) + x u(1)\big)\Big) = (1-t)u(x) = (1-t)\sin(\pi x)
$$
since $(0) = u(1) = 0$ and $u(x) = \sin(\pi x)$.
## Why the jacobian?
The Jacobian is used because the program must find the derivative of
the trial solution with respect to $x$ and $t$.
This gives the necessity of computing the Jacobian matrix, as we want
to evaluate the gradient with respect to $x$ and $t$ (note that the
Jacobian of a scalar-valued multivariate function is simply its
gradient).
In Autograd, the differentiation is by default done with respect to
the first input argument of your Python function. Since the points is
an array representing $x$ and $t$, the Jacobian is calculated using
the values of $x$ and $t$.
To find the second derivative with respect to $x$ and $t$, the
Jacobian can be found for the second time. The result is a Hessian
matrix, which is the matrix containing all the possible second order
mixed derivatives of $g(x,t)$.
```python
# Set up the trial function:
def u(x):
return np.sin(np.pi*x)
def g_trial(point,P):
x,t = point
return (1-t)*u(x) + x*(1-x)*t*deep_neural_network(P,point)
# The right side of the ODE:
def f(point):
return 0.
# The cost function:
def cost_function(P, x, t):
cost_sum = 0
g_t_jacobian_func = jacobian(g_trial)
g_t_hessian_func = hessian(g_trial)
for x_ in x:
for t_ in t:
point = np.array([x_,t_])
g_t = g_trial(point,P)
g_t_jacobian = g_t_jacobian_func(point,P)
g_t_hessian = g_t_hessian_func(point,P)
g_t_dt = g_t_jacobian[1]
g_t_d2x = g_t_hessian[0][0]
func = f(point)
err_sqr = ( (g_t_dt - g_t_d2x) - func)**2
cost_sum += err_sqr
return cost_sum
```
## Setting up the network using Autograd; The full program
Having set up the network, along with the trial solution and cost function, we can now see how the deep neural network performs by comparing the results to the analytical solution.
The analytical solution of our problem is
$$
g(x,t) = \exp(-\pi^2 t)\sin(\pi x)
$$
A possible way to implement a neural network solving the PDE, is given below.
Be aware, though, that it is fairly slow for the parameters used.
A better result is possible, but requires more iterations, and thus longer time to complete.
Indeed, the program below is not optimal in its implementation, but rather serves as an example on how to implement and use a neural network to solve a PDE.
Using TensorFlow results in a much better execution time. Try it!
```python
import autograd.numpy as np
from autograd import jacobian,hessian,grad
import autograd.numpy.random as npr
from matplotlib import cm
from matplotlib import pyplot as plt
from mpl_toolkits.mplot3d import axes3d
## Set up the network
def sigmoid(z):
return 1/(1 + np.exp(-z))
def deep_neural_network(deep_params, x):
# x is now a point and a 1D numpy array; make it a column vector
num_coordinates = np.size(x,0)
x = x.reshape(num_coordinates,-1)
num_points = np.size(x,1)
# N_hidden is the number of hidden layers
N_hidden = np.size(deep_params) - 1 # -1 since params consist of parameters to all the hidden layers AND the output layer
# Assume that the input layer does nothing to the input x
x_input = x
x_prev = x_input
## Hidden layers:
for l in range(N_hidden):
# From the list of parameters P; find the correct weigths and bias for this layer
w_hidden = deep_params[l]
# Add a row of ones to include bias
x_prev = np.concatenate((np.ones((1,num_points)), x_prev ), axis = 0)
z_hidden = np.matmul(w_hidden, x_prev)
x_hidden = sigmoid(z_hidden)
# Update x_prev such that next layer can use the output from this layer
x_prev = x_hidden
## Output layer:
# Get the weights and bias for this layer
w_output = deep_params[-1]
# Include bias:
x_prev = np.concatenate((np.ones((1,num_points)), x_prev), axis = 0)
z_output = np.matmul(w_output, x_prev)
x_output = z_output
return x_output[0][0]
## Define the trial solution and cost function
def u(x):
return np.sin(np.pi*x)
def g_trial(point,P):
x,t = point
return (1-t)*u(x) + x*(1-x)*t*deep_neural_network(P,point)
# The right side of the ODE:
def f(point):
return 0.
# The cost function:
def cost_function(P, x, t):
cost_sum = 0
g_t_jacobian_func = jacobian(g_trial)
g_t_hessian_func = hessian(g_trial)
for x_ in x:
for t_ in t:
point = np.array([x_,t_])
g_t = g_trial(point,P)
g_t_jacobian = g_t_jacobian_func(point,P)
g_t_hessian = g_t_hessian_func(point,P)
g_t_dt = g_t_jacobian[1]
g_t_d2x = g_t_hessian[0][0]
func = f(point)
err_sqr = ( (g_t_dt - g_t_d2x) - func)**2
cost_sum += err_sqr
return cost_sum /( np.size(x)*np.size(t) )
## For comparison, define the analytical solution
def g_analytic(point):
x,t = point
return np.exp(-np.pi**2*t)*np.sin(np.pi*x)
## Set up a function for training the network to solve for the equation
def solve_pde_deep_neural_network(x,t, num_neurons, num_iter, lmb):
## Set up initial weigths and biases
N_hidden = np.size(num_neurons)
## Set up initial weigths and biases
# Initialize the list of parameters:
P = [None]*(N_hidden + 1) # + 1 to include the output layer
P[0] = npr.randn(num_neurons[0], 2 + 1 ) # 2 since we have two points, +1 to include bias
for l in range(1,N_hidden):
P[l] = npr.randn(num_neurons[l], num_neurons[l-1] + 1) # +1 to include bias
# For the output layer
P[-1] = npr.randn(1, num_neurons[-1] + 1 ) # +1 since bias is included
print('Initial cost: ',cost_function(P, x, t))
cost_function_grad = grad(cost_function,0)
# Let the update be done num_iter times
for i in range(num_iter):
cost_grad = cost_function_grad(P, x , t)
for l in range(N_hidden+1):
P[l] = P[l] - lmb * cost_grad[l]
print('Final cost: ',cost_function(P, x, t))
return P
if __name__ == '__main__':
### Use the neural network:
npr.seed(15)
## Decide the vales of arguments to the function to solve
Nx = 10; Nt = 10
x = np.linspace(0, 1, Nx)
t = np.linspace(0,1,Nt)
## Set up the parameters for the network
num_hidden_neurons = [100, 25]
num_iter = 250
lmb = 0.01
P = solve_pde_deep_neural_network(x,t, num_hidden_neurons, num_iter, lmb)
## Store the results
g_dnn_ag = np.zeros((Nx, Nt))
G_analytical = np.zeros((Nx, Nt))
for i,x_ in enumerate(x):
for j, t_ in enumerate(t):
point = np.array([x_, t_])
g_dnn_ag[i,j] = g_trial(point,P)
G_analytical[i,j] = g_analytic(point)
# Find the map difference between the analytical and the computed solution
diff_ag = np.abs(g_dnn_ag - G_analytical)
print('Max absolute difference between the analytical solution and the network: %g'%np.max(diff_ag))
## Plot the solutions in two dimensions, that being in position and time
T,X = np.meshgrid(t,x)
fig = plt.figure(figsize=(10,10))
ax = fig.gca(projection='3d')
ax.set_title('Solution from the deep neural network w/ %d layer'%len(num_hidden_neurons))
s = ax.plot_surface(T,X,g_dnn_ag,linewidth=0,antialiased=False,cmap=cm.viridis)
ax.set_xlabel('Time $t$')
ax.set_ylabel('Position $x$');
fig = plt.figure(figsize=(10,10))
ax = fig.gca(projection='3d')
ax.set_title('Analytical solution')
s = ax.plot_surface(T,X,G_analytical,linewidth=0,antialiased=False,cmap=cm.viridis)
ax.set_xlabel('Time $t$')
ax.set_ylabel('Position $x$');
fig = plt.figure(figsize=(10,10))
ax = fig.gca(projection='3d')
ax.set_title('Difference')
s = ax.plot_surface(T,X,diff_ag,linewidth=0,antialiased=False,cmap=cm.viridis)
ax.set_xlabel('Time $t$')
ax.set_ylabel('Position $x$');
## Take some slices of the 3D plots just to see the solutions at particular times
indx1 = 0
indx2 = int(Nt/2)
indx3 = Nt-1
t1 = t[indx1]
t2 = t[indx2]
t3 = t[indx3]
# Slice the results from the DNN
res1 = g_dnn_ag[:,indx1]
res2 = g_dnn_ag[:,indx2]
res3 = g_dnn_ag[:,indx3]
# Slice the analytical results
res_analytical1 = G_analytical[:,indx1]
res_analytical2 = G_analytical[:,indx2]
res_analytical3 = G_analytical[:,indx3]
# Plot the slices
plt.figure(figsize=(10,10))
plt.title("Computed solutions at time = %g"%t1)
plt.plot(x, res1)
plt.plot(x,res_analytical1)
plt.legend(['dnn','analytical'])
plt.figure(figsize=(10,10))
plt.title("Computed solutions at time = %g"%t2)
plt.plot(x, res2)
plt.plot(x,res_analytical2)
plt.legend(['dnn','analytical'])
plt.figure(figsize=(10,10))
plt.title("Computed solutions at time = %g"%t3)
plt.plot(x, res3)
plt.plot(x,res_analytical3)
plt.legend(['dnn','analytical'])
plt.show()
```
## Example: Solving the wave equation with Neural Networks
The wave equation is
$$
\frac{\partial^2 g(x,t)}{\partial t^2} = c^2\frac{\partial^2 g(x,t)}{\partial x^2}
$$
with $c$ being the specified wave speed.
Here, the chosen conditions are
$$
\begin{align*}
g(0,t) &= 0 \\
g(1,t) &= 0 \\
g(x,0) &= u(x) \\
\frac{\partial g(x,t)}{\partial t} \Big |_{t = 0} &= v(x)
\end{align*}
$$
where $\frac{\partial g(x,t)}{\partial t} \Big |_{t = 0}$ means the derivative of $g(x,t)$ with respect to $t$ is evaluated at $t = 0$, and $u(x)$ and $v(x)$ being given functions.
## The problem to solve for
The wave equation to solve for, is
<!-- Equation labels as ordinary links -->
<div id="wave"></div>
$$
\begin{equation} \label{wave} \tag{19}
\frac{\partial^2 g(x,t)}{\partial t^2} = c^2 \frac{\partial^2 g(x,t)}{\partial x^2}
\end{equation}
$$
where $c$ is the given wave speed.
The chosen conditions for this equation are
<!-- Equation labels as ordinary links -->
<div id="condwave"></div>
$$
\begin{aligned}
g(0,t) &= 0, &t \geq 0 \\
g(1,t) &= 0, &t \geq 0 \\
g(x,0) &= u(x), &x\in[0,1] \\
\frac{\partial g(x,t)}{\partial t}\Big |_{t = 0} &= v(x), &x \in [0,1]
\end{aligned} \label{condwave} \tag{20}
$$
In this example, let $c = 1$ and $u(x) = \sin(\pi x)$ and $v(x) = -\pi\sin(\pi x)$.
## The trial solution
Setting up the network is done in similar matter as for the example of solving the diffusion equation.
The only things we have to change, is the trial solution such that it satisfies the conditions from ([20](#condwave)) and the cost function.
The trial solution becomes slightly different since we have other conditions than in the example of solving the diffusion equation. Here, a possible trial solution $g_t(x,t)$ is
$$
g_t(x,t) = h_1(x,t) + x(1-x)t^2N(x,t,P)
$$
where
$$
h_1(x,t) = (1-t^2)u(x) + tv(x)
$$
Note that this trial solution satisfies the conditions only if $u(0) = v(0) = u(1) = v(1) = 0$, which is the case in this example.
## The analytical solution
The analytical solution for our specific problem, is
$$
g(x,t) = \sin(\pi x)\cos(\pi t) - \sin(\pi x)\sin(\pi t)
$$
## Solving the wave equation - the full program using Autograd
```python
import autograd.numpy as np
from autograd import hessian,grad
import autograd.numpy.random as npr
from matplotlib import cm
from matplotlib import pyplot as plt
from mpl_toolkits.mplot3d import axes3d
## Set up the trial function:
def u(x):
return np.sin(np.pi*x)
def v(x):
return -np.pi*np.sin(np.pi*x)
def h1(point):
x,t = point
return (1 - t**2)*u(x) + t*v(x)
def g_trial(point,P):
x,t = point
return h1(point) + x*(1-x)*t**2*deep_neural_network(P,point)
## Define the cost function
def cost_function(P, x, t):
cost_sum = 0
g_t_hessian_func = hessian(g_trial)
for x_ in x:
for t_ in t:
point = np.array([x_,t_])
g_t_hessian = g_t_hessian_func(point,P)
g_t_d2x = g_t_hessian[0][0]
g_t_d2t = g_t_hessian[1][1]
err_sqr = ( (g_t_d2t - g_t_d2x) )**2
cost_sum += err_sqr
return cost_sum / (np.size(t) * np.size(x))
## The neural network
def sigmoid(z):
return 1/(1 + np.exp(-z))
def deep_neural_network(deep_params, x):
# x is now a point and a 1D numpy array; make it a column vector
num_coordinates = np.size(x,0)
x = x.reshape(num_coordinates,-1)
num_points = np.size(x,1)
# N_hidden is the number of hidden layers
N_hidden = np.size(deep_params) - 1 # -1 since params consist of parameters to all the hidden layers AND the output layer
# Assume that the input layer does nothing to the input x
x_input = x
x_prev = x_input
## Hidden layers:
for l in range(N_hidden):
# From the list of parameters P; find the correct weigths and bias for this layer
w_hidden = deep_params[l]
# Add a row of ones to include bias
x_prev = np.concatenate((np.ones((1,num_points)), x_prev ), axis = 0)
z_hidden = np.matmul(w_hidden, x_prev)
x_hidden = sigmoid(z_hidden)
# Update x_prev such that next layer can use the output from this layer
x_prev = x_hidden
## Output layer:
# Get the weights and bias for this layer
w_output = deep_params[-1]
# Include bias:
x_prev = np.concatenate((np.ones((1,num_points)), x_prev), axis = 0)
z_output = np.matmul(w_output, x_prev)
x_output = z_output
return x_output[0][0]
## The analytical solution
def g_analytic(point):
x,t = point
return np.sin(np.pi*x)*np.cos(np.pi*t) - np.sin(np.pi*x)*np.sin(np.pi*t)
def solve_pde_deep_neural_network(x,t, num_neurons, num_iter, lmb):
## Set up initial weigths and biases
N_hidden = np.size(num_neurons)
## Set up initial weigths and biases
# Initialize the list of parameters:
P = [None]*(N_hidden + 1) # + 1 to include the output layer
P[0] = npr.randn(num_neurons[0], 2 + 1 ) # 2 since we have two points, +1 to include bias
for l in range(1,N_hidden):
P[l] = npr.randn(num_neurons[l], num_neurons[l-1] + 1) # +1 to include bias
# For the output layer
P[-1] = npr.randn(1, num_neurons[-1] + 1 ) # +1 since bias is included
print('Initial cost: ',cost_function(P, x, t))
cost_function_grad = grad(cost_function,0)
# Let the update be done num_iter times
for i in range(num_iter):
cost_grad = cost_function_grad(P, x , t)
for l in range(N_hidden+1):
P[l] = P[l] - lmb * cost_grad[l]
print('Final cost: ',cost_function(P, x, t))
return P
if __name__ == '__main__':
### Use the neural network:
npr.seed(15)
## Decide the vales of arguments to the function to solve
Nx = 10; Nt = 10
x = np.linspace(0, 1, Nx)
t = np.linspace(0,1,Nt)
## Set up the parameters for the network
num_hidden_neurons = [50,20]
num_iter = 1000
lmb = 0.01
P = solve_pde_deep_neural_network(x,t, num_hidden_neurons, num_iter, lmb)
## Store the results
res = np.zeros((Nx, Nt))
res_analytical = np.zeros((Nx, Nt))
for i,x_ in enumerate(x):
for j, t_ in enumerate(t):
point = np.array([x_, t_])
res[i,j] = g_trial(point,P)
res_analytical[i,j] = g_analytic(point)
diff = np.abs(res - res_analytical)
print("Max difference between analytical and solution from nn: %g"%np.max(diff))
## Plot the solutions in two dimensions, that being in position and time
T,X = np.meshgrid(t,x)
fig = plt.figure(figsize=(10,10))
ax = fig.gca(projection='3d')
ax.set_title('Solution from the deep neural network w/ %d layer'%len(num_hidden_neurons))
s = ax.plot_surface(T,X,res,linewidth=0,antialiased=False,cmap=cm.viridis)
ax.set_xlabel('Time $t$')
ax.set_ylabel('Position $x$');
fig = plt.figure(figsize=(10,10))
ax = fig.gca(projection='3d')
ax.set_title('Analytical solution')
s = ax.plot_surface(T,X,res_analytical,linewidth=0,antialiased=False,cmap=cm.viridis)
ax.set_xlabel('Time $t$')
ax.set_ylabel('Position $x$');
fig = plt.figure(figsize=(10,10))
ax = fig.gca(projection='3d')
ax.set_title('Difference')
s = ax.plot_surface(T,X,diff,linewidth=0,antialiased=False,cmap=cm.viridis)
ax.set_xlabel('Time $t$')
ax.set_ylabel('Position $x$');
## Take some slices of the 3D plots just to see the solutions at particular times
indx1 = 0
indx2 = int(Nt/2)
indx3 = Nt-1
t1 = t[indx1]
t2 = t[indx2]
t3 = t[indx3]
# Slice the results from the DNN
res1 = res[:,indx1]
res2 = res[:,indx2]
res3 = res[:,indx3]
# Slice the analytical results
res_analytical1 = res_analytical[:,indx1]
res_analytical2 = res_analytical[:,indx2]
res_analytical3 = res_analytical[:,indx3]
# Plot the slices
plt.figure(figsize=(10,10))
plt.title("Computed solutions at time = %g"%t1)
plt.plot(x, res1)
plt.plot(x,res_analytical1)
plt.legend(['dnn','analytical'])
plt.figure(figsize=(10,10))
plt.title("Computed solutions at time = %g"%t2)
plt.plot(x, res2)
plt.plot(x,res_analytical2)
plt.legend(['dnn','analytical'])
plt.figure(figsize=(10,10))
plt.title("Computed solutions at time = %g"%t3)
plt.plot(x, res3)
plt.plot(x,res_analytical3)
plt.legend(['dnn','analytical'])
plt.show()
```
## Resources on differential equations and deep learning
1. [Artificial neural networks for solving ordinary and partial differential equations by I.E. Lagaris et al](https://pdfs.semanticscholar.org/d061/df393e0e8fbfd0ea24976458b7d42419040d.pdf)
2. [Neural networks for solving differential equations by A. Honchar](https://becominghuman.ai/neural-networks-for-solving-differential-equations-fa230ac5e04c)
3. [Solving differential equations using neural networks by M.M Chiaramonte and M. Kiener](http://cs229.stanford.edu/proj2013/ChiaramonteKiener-SolvingDifferentialEquationsUsingNeuralNetworks.pdf)
4. [Introduction to Partial Differential Equations by A. Tveito, R. Winther](https://www.springer.com/us/book/9783540225515)
## Convolutional Neural Networks (recognizing images)
Convolutional neural networks (CNNs) were developed during the last
decade of the previous century, with a focus on character recognition
tasks. Nowadays, CNNs are a central element in the spectacular success
of deep learning methods. The success in for example image
classifications have made them a central tool for most machine
learning practitioners.
CNNs are very similar to ordinary Neural Networks.
They are made up of neurons that have learnable weights and
biases. Each neuron receives some inputs, performs a dot product and
optionally follows it with a non-linearity. The whole network still
expresses a single differentiable score function: from the raw image
pixels on one end to class scores at the other. And they still have a
loss function (for example Softmax) on the last (fully-connected) layer
and all the tips/tricks we developed for learning regular Neural
Networks still apply (back propagation, gradient descent etc etc).
## What is the Difference
**CNN architectures make the explicit assumption that
the inputs are images, which allows us to encode certain properties
into the architecture. These then make the forward function more
efficient to implement and vastly reduce the amount of parameters in
the network.**
Here we provide only a superficial overview, for the more interested, we recommend highly the course
[IN5400 – Machine Learning for Image Analysis](https://www.uio.no/studier/emner/matnat/ifi/IN5400/index-eng.html)
and the slides of [CS231](http://cs231n.github.io/convolutional-networks/).
Another good read is the article here <https://arxiv.org/pdf/1603.07285.pdf>.
## Neural Networks vs CNNs
Neural networks are defined as **affine transformations**, that is
a vector is received as input and is multiplied with a matrix of so-called weights (our unknown paramters) to produce an
output (to which a bias vector is usually added before passing the result
through a nonlinear activation function). This is applicable to any type of input, be it an
image, a sound clip or an unordered collection of features: whatever their
dimensionality, their representation can always be flattened into a vector
before the transformation.
## Why CNNS for images, sound files, medical images from CT scans etc?
However, when we consider images, sound clips and many other similar kinds of data, these data have an intrinsic
structure. More formally, they share these important properties:
* They are stored as multi-dimensional arrays (think of the pixels of a figure) .
* They feature one or more axes for which ordering matters (e.g., width and height axes for an image, time axis for a sound clip).
* One axis, called the channel axis, is used to access different views of the data (e.g., the red, green and blue channels of a color image, or the left and right channels of a stereo audio track).
These properties are not exploited when an affine transformation is applied; in
fact, all the axes are treated in the same way and the topological information
is not taken into account. Still, taking advantage of the implicit structure of
the data may prove very handy in solving some tasks, like computer vision and
speech recognition, and in these cases it would be best to preserve it. This is
where discrete convolutions come into play.
A discrete convolution is a linear transformation that preserves this notion of
ordering. It is sparse (only a few input units contribute to a given output
unit) and reuses parameters (the same weights are applied to multiple locations
in the input).
## Regular NNs don’t scale well to full images
As an example, consider
an image of size $32\times 32\times 3$ (32 wide, 32 high, 3 color channels), so a
single fully-connected neuron in a first hidden layer of a regular
Neural Network would have $32\times 32\times 3 = 3072$ weights. This amount still
seems manageable, but clearly this fully-connected structure does not
scale to larger images. For example, an image of more respectable
size, say $200\times 200\times 3$, would lead to neurons that have
$200\times 200\times 3 = 120,000$ weights.
We could have
several such neurons, and the parameters would add up quickly! Clearly,
this full connectivity is wasteful and the huge number of parameters
would quickly lead to possible overfitting.
<!-- dom:FIGURE: [figslides/nn.jpeg, width=500 frac=0.6] A regular 3-layer Neural Network. -->
<!-- begin figure -->
<p style="font-size: 0.9em"><i>Figure 1: A regular 3-layer Neural Network.</i></p><!-- end figure -->
## 3D volumes of neurons
Convolutional Neural Networks take advantage of the fact that the
input consists of images and they constrain the architecture in a more
sensible way.
In particular, unlike a regular Neural Network, the
layers of a CNN have neurons arranged in 3 dimensions: width,
height, depth. (Note that the word depth here refers to the third
dimension of an activation volume, not to the depth of a full Neural
Network, which can refer to the total number of layers in a network.)
To understand it better, the above example of an image
with an input volume of
activations has dimensions $32\times 32\times 3$ (width, height,
depth respectively).
The neurons in a layer will
only be connected to a small region of the layer before it, instead of
all of the neurons in a fully-connected manner. Moreover, the final
output layer could for this specific image have dimensions $1\times 1 \times 10$,
because by the
end of the CNN architecture we will reduce the full image into a
single vector of class scores, arranged along the depth
dimension.
<!-- dom:FIGURE: [figslides/cnn.jpeg, width=500 frac=0.6] A CNN arranges its neurons in three dimensions (width, height, depth), as visualized in one of the layers. Every layer of a CNN transforms the 3D input volume to a 3D output volume of neuron activations. In this example, the red input layer holds the image, so its width and height would be the dimensions of the image, and the depth would be 3 (Red, Green, Blue channels). -->
<!-- begin figure -->
<p style="font-size: 0.9em"><i>Figure 1: A CNN arranges its neurons in three dimensions (width, height, depth), as visualized in one of the layers. Every layer of a CNN transforms the 3D input volume to a 3D output volume of neuron activations. In this example, the red input layer holds the image, so its width and height would be the dimensions of the image, and the depth would be 3 (Red, Green, Blue channels).</i></p><!-- end figure -->
<!-- !split -->
## Layers used to build CNNs
A simple CNN is a sequence of layers, and every layer of a CNN
transforms one volume of activations to another through a
differentiable function. We use three main types of layers to build
CNN architectures: Convolutional Layer, Pooling Layer, and
Fully-Connected Layer (exactly as seen in regular Neural Networks). We
will stack these layers to form a full CNN architecture.
A simple CNN for image classification could have the architecture:
* **INPUT** ($32\times 32 \times 3$) will hold the raw pixel values of the image, in this case an image of width 32, height 32, and with three color channels R,G,B.
* **CONV** (convolutional )layer will compute the output of neurons that are connected to local regions in the input, each computing a dot product between their weights and a small region they are connected to in the input volume. This may result in volume such as $[32\times 32\times 12]$ if we decided to use 12 filters.
* **RELU** layer will apply an elementwise activation function, such as the $max(0,x)$ thresholding at zero. This leaves the size of the volume unchanged ($[32\times 32\times 12]$).
* **POOL** (pooling) layer will perform a downsampling operation along the spatial dimensions (width, height), resulting in volume such as $[16\times 16\times 12]$.
* **FC** (i.e. fully-connected) layer will compute the class scores, resulting in volume of size $[1\times 1\times 10]$, where each of the 10 numbers correspond to a class score, such as among the 10 categories of the MNIST images we considered above . As with ordinary Neural Networks and as the name implies, each neuron in this layer will be connected to all the numbers in the previous volume.
## Transforming images
CNNs transform the original image layer by layer from the original
pixel values to the final class scores.
Observe that some layers contain
parameters and other don’t. In particular, the CNN layers perform
transformations that are a function of not only the activations in the
input volume, but also of the parameters (the weights and biases of
the neurons). On the other hand, the RELU/POOL layers will implement a
fixed function. The parameters in the CONV/FC layers will be trained
with gradient descent so that the class scores that the CNN computes
are consistent with the labels in the training set for each image.
## CNNs in brief
In summary:
* A CNN architecture is in the simplest case a list of Layers that transform the image volume into an output volume (e.g. holding the class scores)
* There are a few distinct types of Layers (e.g. CONV/FC/RELU/POOL are by far the most popular)
* Each Layer accepts an input 3D volume and transforms it to an output 3D volume through a differentiable function
* Each Layer may or may not have parameters (e.g. CONV/FC do, RELU/POOL don’t)
* Each Layer may or may not have additional hyperparameters (e.g. CONV/FC/POOL do, RELU doesn’t)
For more material on convolutional networks, we strongly recommend
the course
[IN5400 – Machine Learning for Image Analysis](https://www.uio.no/studier/emner/matnat/ifi/IN5400/index-eng.html)
and the slides of [CS231](http://cs231n.github.io/convolutional-networks/) which is taught at Stanford University (consistently ranked as one of the top computer science programs in the world). [Michael Nielsen's book is a must read, in particular chapter 6 which deals with CNNs](http://neuralnetworksanddeeplearning.com/chap6.html).
The textbook by Goodfellow et al, see chapter 9 contains an in depth discussion as well.
## Key Idea
A dense neural network is representd by an affine operation (like matrix-matrix multiplication) where all parameters are included.
The key idea in CNNs for say imaging is that in images neighbor pixels tend to be related! So we connect
only neighboring neurons in the input instead of connecting all with the first hidden layer.
We say we perform a filtering (convolution is the mathematical operation).
## Mathematics of CNNs
The mathematics of CNNs is based on the mathematical operation of
**convolution**. In mathematics (in particular in functional analysis),
convolution is represented by matheematical operation (integration,
summation etc) on two function in order to produce a third function
that expresses how the shape of one gets modified by the other.
Convolution has a plethora of applications in a variety of disciplines, spanning from statistics to signal processing, computer vision, solutions of differential equations,linear algebra, engineering, and yes, machine learning.
Mathematically, convolution is defined as follows (one-dimensional example):
Let us define a continuous function $y(t)$ given by
$$
y(t) = \int x(a) w(t-a) da,
$$
where $x(a)$ represents a so-called input and $w(t-a)$ is normally called the weight function or kernel.
The above integral is written in a more compact form as
$$
y(t) = \left(x * w\right)(t).
$$
The discretized version reads
$$
y(t) = \sum_{a=-\infty}^{a=\infty}x(a)w(t-a).
$$
Computing the inverse of the above convolution operations is known as deconvolution.
How can we use this? And what does it mean? Let us study some familiar examples first.
## Convolution Examples: Polynomial multiplication
We have already met such an example in project 1 when we tried to set
up the design matrix for a two-dimensional function. This was an
example of polynomial multiplication. Let us recast such a problem in terms of the convolution operation.
Let us look a the following polynomials to second and third order, respectively:
$$
p(t) = \alpha_0+\alpha_1 t+\alpha_2 t^2,
$$
and
$$
s(t) = \beta_0+\beta_1 t+\beta_2 t^2+\beta_3 t^3.
$$
The polynomial multiplication gives us a new polynomial of degree $5$
$$
z(t) = \delta_0+\delta_1 t+\delta_2 t^2+\delta_3 t^3+\delta_4 t^4+\delta_5 t^5.
$$
## Efficient Polynomial Multiplication
Computing polynomial products can be implemented efficiently if we rewrite the more brute force multiplications using convolution.
We note first that the new coefficients are given as
$$
\begin{split}
\delta_0=&\alpha_0\beta_0\\
\delta_1=&\alpha_1\beta_0+\alpha_1\beta_0\\
\delta_2=&\alpha_0\beta_2+\alpha_1\beta_1+\alpha_2\beta_0\\
\delta_3=&\alpha_1\beta_2+\alpha_2\beta_1+\alpha_0\beta_3\\
\delta_4=&\alpha_2\beta_2+\alpha_1\beta_3\\
\delta_5=&\alpha_2\beta_3.\\
\end{split}
$$
We note that $\alpha_i=0$ except for $i\in \left\{0,1,2\right\}$ and $\beta_i=0$ except for $i\in\left\{0,1,2,3\right\}$.
We can then rewrite the coefficients $\delta_j$ using a discrete convolution as
$$
\delta_j = \sum_{i=-\infty}^{i=\infty}\alpha_i\beta_{j-i}=(\alpha * \beta)_j,
$$
or as a double sum with restriction $l=i+j$
$$
\delta_l = \sum_{ij}\alpha_i\beta_{j}.
$$
Do you see a potential drawback with these equations?
## A more efficient way of coding the above Convolution
Since we only have a finite number of $\alpha$ and $\beta$ values
which are non-zero, we can rewrite the above convolution expressions
as a matrix-vector multiplication
$$
\boldsymbol{\delta}=\begin{bmatrix}\alpha_0 & 0 & 0 & 0 \\
\alpha_1 & \alpha_0 & 0 & 0 \\
\alpha_2 & \alpha_1 & \alpha_0 & 0 \\
0 & \alpha_2 & \alpha_1 & \alpha_0 \\
0 & 0 & \alpha_2 & \alpha_1 \\
0 & 0 & 0 & \alpha_2
\end{bmatrix}\begin{bmatrix} \beta_0 \\ \beta_1 \\ \beta_2 \\ \beta_3\end{bmatrix}.
$$
The process is commutative and we can easily see that we can rewrite the multiplication in terms of a matrix holding $\beta$ and a vector holding $\alpha$.
In this case we have
$$
\boldsymbol{\delta}=\begin{bmatrix}\beta_0 & 0 & 0 \\
\beta_1 & \beta_0 & 0 \\
\beta_2 & \beta_1 & \beta_0 \\
\beta_3 & \beta_2 & \beta_1 \\
0 & \beta_3 & \beta_2 \\
0 & 0 & \beta_3
\end{bmatrix}\begin{bmatrix} \alpha_0 \\ \alpha_1 \\ \alpha_2\end{bmatrix}.
$$
Note that the use of these matrices is for mathematical purposes only and not implementation purposes.
When implementing the above equation we do not encode (and allocate memory) the matrices explicitely.
We rather code the convolutions in the minimal memory footprint that they require.
Does the number of floating point operations change here when we use the commutative property?
## Convolution Examples: Principle of Superposition and Periodic Forces (Fourier Transforms)
For problems with so-called harmonic oscillations, given by for example the following differential equation
$$
m\frac{d^2x}{dt^2}+\eta\frac{dx}{dt}+x(t)=F(t),
$$
where $F(t)$ is an applied external force acting on the system (often called a driving force), one can use the theory of Fourier transformations to find the solutions of this type of equations.
If one has several driving forces, $F(t)=\sum_n F_n(t)$, one can find
the particular solution to each $F_n$, $x_{pn}(t)$, and the particular
solution for the entire driving force is then given by a series like
<!-- Equation labels as ordinary links -->
<div id="_auto3"></div>
$$
\begin{equation}
x_p(t)=\sum_nx_{pn}(t).
\label{_auto3} \tag{21}
\end{equation}
$$
## Principle of Superposition
This is known as the principle of superposition. It only applies when
the homogenous equation is linear. If there were an anharmonic term
such as $x^3$ in the homogenous equation, then when one summed various
solutions, $x=(\sum_n x_n)^2$, one would get cross
terms. Superposition is especially useful when $F(t)$ can be written
as a sum of sinusoidal terms, because the solutions for each
sinusoidal (sine or cosine) term is analytic.
Driving forces are often periodic, even when they are not
sinusoidal. Periodicity implies that for some time $\tau$
$$
\begin{eqnarray}
F(t+\tau)=F(t).
\end{eqnarray}
$$
One example of a non-sinusoidal periodic force is a square wave. Many
components in electric circuits are non-linear, e.g. diodes, which
makes many wave forms non-sinusoidal even when the circuits are being
driven by purely sinusoidal sources.
## Simple Code Example
The code here shows a typical example of such a square wave generated using the functionality included in the **scipy** Python package. We have used a period of $\tau=0.2$.
```python
import numpy as np
import math
from scipy import signal
import matplotlib.pyplot as plt
# number of points
n = 500
# start and final times
t0 = 0.0
tn = 1.0
# Period
t = np.linspace(t0, tn, n, endpoint=False)
SqrSignal = np.zeros(n)
SqrSignal = 1.0+signal.square(2*np.pi*5*t)
plt.plot(t, SqrSignal)
plt.ylim(-0.5, 2.5)
plt.show()
```
For the sinusoidal example the
period is $\tau=2\pi/\omega$. However, higher harmonics can also
satisfy the periodicity requirement. In general, any force that
satisfies the periodicity requirement can be expressed as a sum over
harmonics,
<!-- Equation labels as ordinary links -->
<div id="_auto4"></div>
$$
\begin{equation}
F(t)=\frac{f_0}{2}+\sum_{n>0} f_n\cos(2n\pi t/\tau)+g_n\sin(2n\pi t/\tau).
\label{_auto4} \tag{22}
\end{equation}
$$
## Wrapping up Fourier transforms
We can write down the answer for
$x_{pn}(t)$, by substituting $f_n/m$ or $g_n/m$ for $F_0/m$. By
writing each factor $2n\pi t/\tau$ as $n\omega t$, with $\omega\equiv
2\pi/\tau$,
<!-- Equation labels as ordinary links -->
<div id="eq:fourierdef1"></div>
$$
\begin{equation}
\label{eq:fourierdef1} \tag{23}
F(t)=\frac{f_0}{2}+\sum_{n>0}f_n\cos(n\omega t)+g_n\sin(n\omega t).
\end{equation}
$$
The solutions for $x(t)$ then come from replacing $\omega$ with
$n\omega$ for each term in the particular solution,
$$
\begin{eqnarray}
x_p(t)&=&\frac{f_0}{2k}+\sum_{n>0} \alpha_n\cos(n\omega t-\delta_n)+\beta_n\sin(n\omega t-\delta_n),\\
\nonumber
\alpha_n&=&\frac{f_n/m}{\sqrt{((n\omega)^2-\omega_0^2)+4\beta^2n^2\omega^2}},\\
\nonumber
\beta_n&=&\frac{g_n/m}{\sqrt{((n\omega)^2-\omega_0^2)+4\beta^2n^2\omega^2}},\\
\nonumber
\delta_n&=&\tan^{-1}\left(\frac{2\beta n\omega}{\omega_0^2-n^2\omega^2}\right).
\end{eqnarray}
$$
## Finding the Coefficients
Because the forces have been applied for a long time, any non-zero
damping eliminates the homogenous parts of the solution, so one need
only consider the particular solution for each $n$.
The problem is considered solved if one can find expressions for the
coefficients $f_n$ and $g_n$, even though the solutions are expressed
as an infinite sum. The coefficients can be extracted from the
function $F(t)$ by
<!-- Equation labels as ordinary links -->
<div id="eq:fourierdef2"></div>
$$
\begin{eqnarray}
\label{eq:fourierdef2} \tag{24}
f_n&=&\frac{2}{\tau}\int_{-\tau/2}^{\tau/2} dt~F(t)\cos(2n\pi t/\tau),\\
\nonumber
g_n&=&\frac{2}{\tau}\int_{-\tau/2}^{\tau/2} dt~F(t)\sin(2n\pi t/\tau).
\end{eqnarray}
$$
To check the consistency of these expressions and to verify
Eq. ([24](#eq:fourierdef2)), one can insert the expansion of $F(t)$ in
Eq. ([23](#eq:fourierdef1)) into the expression for the coefficients in
Eq. ([24](#eq:fourierdef2)) and see whether
$$
\begin{eqnarray}
f_n&=?&\frac{2}{\tau}\int_{-\tau/2}^{\tau/2} dt~\left\{
\frac{f_0}{2}+\sum_{m>0}f_m\cos(m\omega t)+g_m\sin(m\omega t)
\right\}\cos(n\omega t).
\end{eqnarray}
$$
Immediately, one can throw away all the terms with $g_m$ because they
convolute an even and an odd function. The term with $f_0/2$
disappears because $\cos(n\omega t)$ is equally positive and negative
over the interval and will integrate to zero. For all the terms
$f_m\cos(m\omega t)$ appearing in the sum, one can use angle addition
formulas to see that $\cos(m\omega t)\cos(n\omega
t)=(1/2)(\cos[(m+n)\omega t]+\cos[(m-n)\omega t]$. This will integrate
to zero unless $m=n$. In that case the $m=n$ term gives
<!-- Equation labels as ordinary links -->
<div id="_auto5"></div>
$$
\begin{equation}
\int_{-\tau/2}^{\tau/2}dt~\cos^2(m\omega t)=\frac{\tau}{2},
\label{_auto5} \tag{25}
\end{equation}
$$
and
$$
\begin{eqnarray}
f_n&=?&\frac{2}{\tau}\int_{-\tau/2}^{\tau/2} dt~f_n/2\\
\nonumber
&=&f_n~\checkmark.
\end{eqnarray}
$$
The same method can be used to check for the consistency of $g_n$.
## Final words on Fourier Transforms
The code here uses the Fourier series applied to a
square wave signal. The code here
visualizes the various approximations given by Fourier series compared
with a square wave with period $T=0.2$ (dimensionless time), width $0.1$ and max value of the force $F=2$. We
see that when we increase the number of components in the Fourier
series, the Fourier series approximation gets closer and closer to the
square wave signal.
```python
import numpy as np
import math
from scipy import signal
import matplotlib.pyplot as plt
# number of points
n = 500
# start and final times
t0 = 0.0
tn = 1.0
# Period
T =0.2
# Max value of square signal
Fmax= 2.0
# Width of signal
Width = 0.1
t = np.linspace(t0, tn, n, endpoint=False)
SqrSignal = np.zeros(n)
FourierSeriesSignal = np.zeros(n)
SqrSignal = 1.0+signal.square(2*np.pi*5*t+np.pi*Width/T)
a0 = Fmax*Width/T
FourierSeriesSignal = a0
Factor = 2.0*Fmax/np.pi
for i in range(1,500):
FourierSeriesSignal += Factor/(i)*np.sin(np.pi*i*Width/T)*np.cos(i*t*2*np.pi/T)
plt.plot(t, SqrSignal)
plt.plot(t, FourierSeriesSignal)
plt.ylim(-0.5, 2.5)
plt.show()
```
## Two-dimensional Objects
We often use convolutions over more than one dimension at a time. If
we have a two-dimensional image $I$ as input, we can have a **filter**
defined by a two-dimensional **kernel** $K$. This leads to an output $S$
$$
S_(i,j)=(I * K)(i,j) = \sum_m\sum_n I(m,n)K(i-m,j-n).
$$
Convolution is a commutatitave process, which means we can rewrite this equation as
$$
S_(i,j)=(I * K)(i,j) = \sum_m\sum_n I(i-m,j-n)K(m,n).
$$
Normally the latter is more straightforward to implement in a machine elarning library since there is less variation in the range of values of $m$ and $n$.
## Cross-Correlation
Many deep learning libraries implement cross-correlation instead of convolution
$$
S_(i,j)=(I * K)(i,j) = \sum_m\sum_n I(i+m,j-+)K(m,n).
$$
## More on Dimensionalities
In feilds like signal processing (and imaging as well), one designs
so-called filters. These filters are defined by the convolutions and
are often hand-crafted. One may specify filters for smoothing, edge
detection, frequency reshaping, and similar operations. However with
neural networks the idea is to automatically learn the filters and use
many of them in conjunction with non-linear operations (activation
functions).
As an example consider a neural network operating on sound sequence
data. Assume that we an input vector $\boldsymbol{x}$ of length $d=10^6$. We
construct then a neural network with onle hidden layer only with
$10^4$ nodes. This means that we will have a weight matrix with
$10^4\times 10^6=10^{10}$ weights to be determined, together with $10^4$ biases.
Assume furthermore that we have an output layer which is meant to train whether the sound sequence represents a human voice (true) or something else (false).
It means that we have only one output node. But since this output node connects to $10^4$ nodes in the hidden layer, there are in total $10^4$ weights to be determined for the output layer, plus one bias. In total we have
$$
\mathrm{NumberParameters}=10^{10}+10^4+10^4+1 \approx 10^{10},
$$
that is ten billion parameters to determine.
## Further Dimensionality Remarks
In today’s architecture one can train such neural networks, however
this is a huge number of parameters for the task at hand. In general,
it is a very wasteful and inefficient use of dense matrices as
parameters. Just as importantly, such trained network parameters are
very specific for the type of input data on which they were trained
and the network is not likely to generalize easily to variations in
the input.
The main principles that justify convolutions is locality of
information and repetion of patterns within the signal. Sound samples
of the input in adjacent spots are much more likely to affect each
other than those that are very far away. Similarly, sounds are
repeated in multiple times in the signal. While slightly simplistic,
reasoning about such a sound example demonstrates this. The same
principles then apply to images and other similar data.
## CNNs in more detail, Lecture from IN5400
* [Lectures from IN5400 spring 2019](https://www.uio.no/studier/emner/matnat/ifi/IN5400/v19/material/week5/in5400_2019_week5_convolutional_nerual_networks.pdf)
## CNNs in more detail, building convolutional neural networks in Tensorflow and Keras
As discussed above, CNNs are neural networks built from the assumption that the inputs
to the network are 2D images. This is important because the number of features or pixels in images
grows very fast with the image size, and an enormous number of weights and biases are needed in order to build an accurate network.
As before, we still have our input, a hidden layer and an output. What's novel about convolutional networks
are the **convolutional** and **pooling** layers stacked in pairs between the input and the hidden layer.
In addition, the data is no longer represented as a 2D feature matrix, instead each input is a number of 2D
matrices, typically 1 for each color dimension (Red, Green, Blue).
## Setting it up
It means that to represent the entire
dataset of images, we require a 4D matrix or **tensor**. This tensor has the dimensions:
$$
(n_{inputs},\, n_{pixels, width},\, n_{pixels, height},\, depth) .
$$
## The MNIST dataset again
The MNIST dataset consists of grayscale images with a pixel size of
$28\times 28$, meaning we require $28 \times 28 = 724$ weights to each
neuron in the first hidden layer.
If we were to analyze images of size $128\times 128$ we would require
$128 \times 128 = 16384$ weights to each neuron. Even worse if we were
dealing with color images, as most images are, we have an image matrix
of size $128\times 128$ for each color dimension (Red, Green, Blue),
meaning 3 times the number of weights $= 49152$ are required for every
single neuron in the first hidden layer.
## Strong correlations
Images typically have strong local correlations, meaning that a small
part of the image varies little from its neighboring regions. If for
example we have an image of a blue car, we can roughly assume that a
small blue part of the image is surrounded by other blue regions.
Therefore, instead of connecting every single pixel to a neuron in the
first hidden layer, as we have previously done with deep neural
networks, we can instead connect each neuron to a small part of the
image (in all 3 RGB depth dimensions). The size of each small area is
fixed, and known as a [receptive](https://en.wikipedia.org/wiki/Receptive_field).
<!-- !split -->
## Layers of a CNN
The layers of a convolutional neural network arrange neurons in 3D: width, height and depth.
The input image is typically a square matrix of depth 3.
A **convolution** is performed on the image which outputs
a 3D volume of neurons. The weights to the input are arranged in a number of 2D matrices, known as **filters**.
Each filter slides along the input image, taking the dot product
between each small part of the image and the filter, in all depth
dimensions. This is then passed through a non-linear function,
typically the **Rectified Linear (ReLu)** function, which serves as the
activation of the neurons in the first convolutional layer. This is
further passed through a **pooling layer**, which reduces the size of the
convolutional layer, e.g. by taking the maximum or average across some
small regions, and this serves as input to the next convolutional
layer.
## Systematic reduction
By systematically reducing the size of the input volume, through
convolution and pooling, the network should create representations of
small parts of the input, and then from them assemble representations
of larger areas. The final pooling layer is flattened to serve as
input to a hidden layer, such that each neuron in the final pooling
layer is connected to every single neuron in the hidden layer. This
then serves as input to the output layer, e.g. a softmax output for
classification.
## Prerequisites: Collect and pre-process data
```python
# import necessary packages
import numpy as np
import matplotlib.pyplot as plt
from sklearn import datasets
# ensure the same random numbers appear every time
np.random.seed(0)
# display images in notebook
%matplotlib inline
plt.rcParams['figure.figsize'] = (12,12)
# download MNIST dataset
digits = datasets.load_digits()
# define inputs and labels
inputs = digits.images
labels = digits.target
# RGB images have a depth of 3
# our images are grayscale so they should have a depth of 1
inputs = inputs[:,:,:,np.newaxis]
print("inputs = (n_inputs, pixel_width, pixel_height, depth) = " + str(inputs.shape))
print("labels = (n_inputs) = " + str(labels.shape))
# choose some random images to display
n_inputs = len(inputs)
indices = np.arange(n_inputs)
random_indices = np.random.choice(indices, size=5)
for i, image in enumerate(digits.images[random_indices]):
plt.subplot(1, 5, i+1)
plt.axis('off')
plt.imshow(image, cmap=plt.cm.gray_r, interpolation='nearest')
plt.title("Label: %d" % digits.target[random_indices[i]])
plt.show()
```
## Importing Keras and Tensorflow
```python
from tensorflow.keras import datasets, layers, models
from tensorflow.keras.layers import Input
from tensorflow.keras.models import Sequential #This allows appending layers to existing models
from tensorflow.keras.layers import Dense #This allows defining the characteristics of a particular layer
from tensorflow.keras import optimizers #This allows using whichever optimiser we want (sgd,adam,RMSprop)
from tensorflow.keras import regularizers #This allows using whichever regularizer we want (l1,l2,l1_l2)
from tensorflow.keras.utils import to_categorical #This allows using categorical cross entropy as the cost function
#from tensorflow.keras import Conv2D
#from tensorflow.keras import MaxPooling2D
#from tensorflow.keras import Flatten
from sklearn.model_selection import train_test_split
# representation of labels
labels = to_categorical(labels)
# split into train and test data
# one-liner from scikit-learn library
train_size = 0.8
test_size = 1 - train_size
X_train, X_test, Y_train, Y_test = train_test_split(inputs, labels, train_size=train_size,
test_size=test_size)
```
<!-- !split -->
## Running with Keras
```python
def create_convolutional_neural_network_keras(input_shape, receptive_field,
n_filters, n_neurons_connected, n_categories,
eta, lmbd):
model = Sequential()
model.add(layers.Conv2D(n_filters, (receptive_field, receptive_field), input_shape=input_shape, padding='same',
activation='relu', kernel_regularizer=regularizers.l2(lmbd)))
model.add(layers.MaxPooling2D(pool_size=(2, 2)))
model.add(layers.Flatten())
model.add(layers.Dense(n_neurons_connected, activation='relu', kernel_regularizer=regularizers.l2(lmbd)))
model.add(layers.Dense(n_categories, activation='softmax', kernel_regularizer=regularizers.l2(lmbd)))
sgd = optimizers.SGD(lr=eta)
model.compile(loss='categorical_crossentropy', optimizer=sgd, metrics=['accuracy'])
return model
epochs = 100
batch_size = 100
input_shape = X_train.shape[1:4]
receptive_field = 3
n_filters = 10
n_neurons_connected = 50
n_categories = 10
eta_vals = np.logspace(-5, 1, 7)
lmbd_vals = np.logspace(-5, 1, 7)
```
## Final part
```python
CNN_keras = np.zeros((len(eta_vals), len(lmbd_vals)), dtype=object)
for i, eta in enumerate(eta_vals):
for j, lmbd in enumerate(lmbd_vals):
CNN = create_convolutional_neural_network_keras(input_shape, receptive_field,
n_filters, n_neurons_connected, n_categories,
eta, lmbd)
CNN.fit(X_train, Y_train, epochs=epochs, batch_size=batch_size, verbose=0)
scores = CNN.evaluate(X_test, Y_test)
CNN_keras[i][j] = CNN
print("Learning rate = ", eta)
print("Lambda = ", lmbd)
print("Test accuracy: %.3f" % scores[1])
print()
```
## Final visualization
```python
# visual representation of grid search
# uses seaborn heatmap, could probably do this in matplotlib
import seaborn as sns
sns.set()
train_accuracy = np.zeros((len(eta_vals), len(lmbd_vals)))
test_accuracy = np.zeros((len(eta_vals), len(lmbd_vals)))
for i in range(len(eta_vals)):
for j in range(len(lmbd_vals)):
CNN = CNN_keras[i][j]
train_accuracy[i][j] = CNN.evaluate(X_train, Y_train)[1]
test_accuracy[i][j] = CNN.evaluate(X_test, Y_test)[1]
fig, ax = plt.subplots(figsize = (10, 10))
sns.heatmap(train_accuracy, annot=True, ax=ax, cmap="viridis")
ax.set_title("Training Accuracy")
ax.set_ylabel("$\eta$")
ax.set_xlabel("$\lambda$")
plt.show()
fig, ax = plt.subplots(figsize = (10, 10))
sns.heatmap(test_accuracy, annot=True, ax=ax, cmap="viridis")
ax.set_title("Test Accuracy")
ax.set_ylabel("$\eta$")
ax.set_xlabel("$\lambda$")
plt.show()
```
## The CIFAR01 data set
The CIFAR10 dataset contains 60,000 color images in 10 classes, with
6,000 images in each class. The dataset is divided into 50,000
training images and 10,000 testing images. The classes are mutually
exclusive and there is no overlap between them.
```python
import tensorflow as tf
from tensorflow.keras import datasets, layers, models
import matplotlib.pyplot as plt
# We import the data set
(train_images, train_labels), (test_images, test_labels) = datasets.cifar10.load_data()
# Normalize pixel values to be between 0 and 1 by dividing by 255.
train_images, test_images = train_images / 255.0, test_images / 255.0
```
## Verifying the data set
To verify that the dataset looks correct, let's plot the first 25 images from the training set and display the class name below each image.
```python
class_names = ['airplane', 'automobile', 'bird', 'cat', 'deer',
'dog', 'frog', 'horse', 'ship', 'truck']
plt.figure(figsize=(10,10))
for i in range(25):
plt.subplot(5,5,i+1)
plt.xticks([])
plt.yticks([])
plt.grid(False)
plt.imshow(train_images[i], cmap=plt.cm.binary)
# The CIFAR labels happen to be arrays,
# which is why you need the extra index
plt.xlabel(class_names[train_labels[i][0]])
plt.show()
```
## Set up the model
The 6 lines of code below define the convolutional base using a common pattern: a stack of Conv2D and MaxPooling2D layers.
As input, a CNN takes tensors of shape (image_height, image_width, color_channels), ignoring the batch size. If you are new to these dimensions, color_channels refers to (R,G,B). In this example, you will configure our CNN to process inputs of shape (32, 32, 3), which is the format of CIFAR images. You can do this by passing the argument input_shape to our first layer.
```python
model = models.Sequential()
model.add(layers.Conv2D(32, (3, 3), activation='relu', input_shape=(32, 32, 3)))
model.add(layers.MaxPooling2D((2, 2)))
model.add(layers.Conv2D(64, (3, 3), activation='relu'))
model.add(layers.MaxPooling2D((2, 2)))
model.add(layers.Conv2D(64, (3, 3), activation='relu'))
# Let's display the architecture of our model so far.
model.summary()
```
You can see that the output of every Conv2D and MaxPooling2D layer is a 3D tensor of shape (height, width, channels). The width and height dimensions tend to shrink as you go deeper in the network. The number of output channels for each Conv2D layer is controlled by the first argument (e.g., 32 or 64). Typically, as the width and height shrink, you can afford (computationally) to add more output channels in each Conv2D layer.
## Add Dense layers on top
To complete our model, you will feed the last output tensor from the
convolutional base (of shape (4, 4, 64)) into one or more Dense layers
to perform classification. Dense layers take vectors as input (which
are 1D), while the current output is a 3D tensor. First, you will
flatten (or unroll) the 3D output to 1D, then add one or more Dense
layers on top. CIFAR has 10 output classes, so you use a final Dense
layer with 10 outputs and a softmax activation.
```python
model.add(layers.Flatten())
model.add(layers.Dense(64, activation='relu'))
model.add(layers.Dense(10))
Here's the complete architecture of our model.
model.summary()
```
As you can see, our (4, 4, 64) outputs were flattened into vectors of shape (1024) before going through two Dense layers.
## Compile and train the model
```python
model.compile(optimizer='adam',
loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True),
metrics=['accuracy'])
history = model.fit(train_images, train_labels, epochs=10,
validation_data=(test_images, test_labels))
```
## Finally, evaluate the model
```python
plt.plot(history.history['accuracy'], label='accuracy')
plt.plot(history.history['val_accuracy'], label = 'val_accuracy')
plt.xlabel('Epoch')
plt.ylabel('Accuracy')
plt.ylim([0.5, 1])
plt.legend(loc='lower right')
test_loss, test_acc = model.evaluate(test_images, test_labels, verbose=2)
print(test_acc)
```
|
import System.Concurrency
-- Simple producing thread.
producer : Channel Nat -> IO ()
producer c = ignore $ for [1..100] $ \n => channelPut c n
-- Test that using the same channel with multiple producers is okay.
main : IO ()
main =
do c <- makeChannel
tids <- for [1..10] $ \_ => fork (producer c)
vals <- for [1..1000] $ \_ => channelGet c
ignore $ traverse (\t => threadWait t) tids
let s = sum vals
if s == 50500
then putStrLn "Success!"
else putStrLn "How did we get here?"
|
[GOAL]
m n k l : ℕ
⊢ 1 < 0 ↔ 0 ≠ 0 ∧ 0 ≠ 1
[PROOFSTEP]
decide
[GOAL]
m n k l : ℕ
⊢ 1 < 1 ↔ 1 ≠ 0 ∧ 1 ≠ 1
[PROOFSTEP]
decide
[GOAL]
m n✝ k l n : ℕ
⊢ 1 < n + 2 ↔ n + 2 ≠ 0 ∧ n + 2 ≠ 1
[PROOFSTEP]
simp
[GOAL]
m n k l : ℕ
⊢ 0 = m * n ↔ m = 0 ∨ n = 0
[PROOFSTEP]
rw [eq_comm, Nat.mul_eq_zero]
[GOAL]
m n k l : ℕ
⊢ m + n = max m n ↔ m = 0 ∨ n = 0
[PROOFSTEP]
rw [← min_eq_zero_iff]
[GOAL]
m n k l : ℕ
⊢ m + n = max m n ↔ min m n = 0
[PROOFSTEP]
cases' le_total m n with H H
[GOAL]
case inl
m n k l : ℕ
H : m ≤ n
⊢ m + n = max m n ↔ min m n = 0
[PROOFSTEP]
simp [H]
[GOAL]
case inr
m n k l : ℕ
H : n ≤ m
⊢ m + n = max m n ↔ min m n = 0
[PROOFSTEP]
simp [H]
[GOAL]
m n k l : ℕ
⊢ m + n = min m n ↔ m = 0 ∧ n = 0
[PROOFSTEP]
rw [← max_eq_zero_iff]
[GOAL]
m n k l : ℕ
⊢ m + n = min m n ↔ max m n = 0
[PROOFSTEP]
cases' le_total m n with H H
[GOAL]
case inl
m n k l : ℕ
H : m ≤ n
⊢ m + n = min m n ↔ max m n = 0
[PROOFSTEP]
simp [H]
[GOAL]
case inr
m n k l : ℕ
H : n ≤ m
⊢ m + n = min m n ↔ max m n = 0
[PROOFSTEP]
simp [H]
[GOAL]
m n k l : ℕ
H : m * n = 1
⊢ n * ?m.12220 H = 1
[PROOFSTEP]
rwa [mul_comm]
[GOAL]
m n k l : ℕ
⊢ 2 ≤ 0 ↔ 0 ≠ 0 ∧ 0 ≠ 1
[PROOFSTEP]
simp
[GOAL]
m n k l : ℕ
⊢ 2 ≤ 1 ↔ 1 ≠ 0 ∧ 1 ≠ 1
[PROOFSTEP]
simp
[GOAL]
m n✝ k l n : ℕ
⊢ 2 ≤ n + 2 ↔ n + 2 ≠ 0 ∧ n + 2 ≠ 1
[PROOFSTEP]
simp
[GOAL]
m✝ n✝ k l m n : ℕ
⊢ 0 < m + n → 0 < m ∨ 0 < n
[PROOFSTEP]
intro h
[GOAL]
m✝ n✝ k l m n : ℕ
h : 0 < m + n
⊢ 0 < m ∨ 0 < n
[PROOFSTEP]
cases' m with m
[GOAL]
case zero
m n✝ k l n : ℕ
h : 0 < zero + n
⊢ 0 < zero ∨ 0 < n
[PROOFSTEP]
simp [zero_add] at h
[GOAL]
case zero
m n✝ k l n : ℕ
h : 0 < n
⊢ 0 < zero ∨ 0 < n
[PROOFSTEP]
exact Or.inr h
[GOAL]
case succ
m✝ n✝ k l n m : ℕ
h : 0 < succ m + n
⊢ 0 < succ m ∨ 0 < n
[PROOFSTEP]
exact Or.inl (succ_pos _)
[GOAL]
m✝ n✝ k l m n : ℕ
⊢ 0 < m ∨ 0 < n → 0 < m + n
[PROOFSTEP]
intro h
[GOAL]
m✝ n✝ k l m n : ℕ
h : 0 < m ∨ 0 < n
⊢ 0 < m + n
[PROOFSTEP]
cases' h with mpos npos
[GOAL]
case inl
m✝ n✝ k l m n : ℕ
mpos : 0 < m
⊢ 0 < m + n
[PROOFSTEP]
apply add_pos_left mpos
[GOAL]
case inr
m✝ n✝ k l m n : ℕ
npos : 0 < n
⊢ 0 < m + n
[PROOFSTEP]
apply add_pos_right _ npos
[GOAL]
m n k l : ℕ
⊢ m + n = 1 ↔ m = 0 ∧ n = 1 ∨ m = 1 ∧ n = 0
[PROOFSTEP]
cases n
[GOAL]
case zero
m k l : ℕ
⊢ m + zero = 1 ↔ m = 0 ∧ zero = 1 ∨ m = 1 ∧ zero = 0
[PROOFSTEP]
simp [succ_eq_add_one, ← add_assoc, succ_inj']
[GOAL]
case succ
m k l n✝ : ℕ
⊢ m + succ n✝ = 1 ↔ m = 0 ∧ succ n✝ = 1 ∨ m = 1 ∧ succ n✝ = 0
[PROOFSTEP]
simp [succ_eq_add_one, ← add_assoc, succ_inj']
[GOAL]
m n k l : ℕ
⊢ m + n = 2 ↔ m = 0 ∧ n = 2 ∨ m = 1 ∧ n = 1 ∨ m = 2 ∧ n = 0
[PROOFSTEP]
cases n
[GOAL]
case zero
m k l : ℕ
⊢ m + zero = 2 ↔ m = 0 ∧ zero = 2 ∨ m = 1 ∧ zero = 1 ∨ m = 2 ∧ zero = 0
[PROOFSTEP]
simp [(succ_ne_zero 1).symm, (show 2 = Nat.succ 1 from rfl), succ_eq_add_one, ← add_assoc, succ_inj', add_eq_one_iff]
[GOAL]
case succ
m k l n✝ : ℕ
⊢ m + succ n✝ = 2 ↔ m = 0 ∧ succ n✝ = 2 ∨ m = 1 ∧ succ n✝ = 1 ∨ m = 2 ∧ succ n✝ = 0
[PROOFSTEP]
simp [(succ_ne_zero 1).symm, (show 2 = Nat.succ 1 from rfl), succ_eq_add_one, ← add_assoc, succ_inj', add_eq_one_iff]
[GOAL]
m n k l : ℕ
⊢ m + n = 3 ↔ m = 0 ∧ n = 3 ∨ m = 1 ∧ n = 2 ∨ m = 2 ∧ n = 1 ∨ m = 3 ∧ n = 0
[PROOFSTEP]
cases n
[GOAL]
case zero
m k l : ℕ
⊢ m + zero = 3 ↔ m = 0 ∧ zero = 3 ∨ m = 1 ∧ zero = 2 ∨ m = 2 ∧ zero = 1 ∨ m = 3 ∧ zero = 0
[PROOFSTEP]
simp [(succ_ne_zero 1).symm, succ_eq_add_one, (show 3 = Nat.succ 2 from rfl), ← add_assoc, succ_inj', add_eq_two_iff]
[GOAL]
case succ
m k l n✝ : ℕ
⊢ m + succ n✝ = 3 ↔ m = 0 ∧ succ n✝ = 3 ∨ m = 1 ∧ succ n✝ = 2 ∨ m = 2 ∧ succ n✝ = 1 ∨ m = 3 ∧ succ n✝ = 0
[PROOFSTEP]
simp [(succ_ne_zero 1).symm, succ_eq_add_one, (show 3 = Nat.succ 2 from rfl), ← add_assoc, succ_inj', add_eq_two_iff]
[GOAL]
m n k l : ℕ
⊢ n ≤ m ∧ m ≤ n + 1 ↔ m = n ∨ m = n + 1
[PROOFSTEP]
rw [le_add_one_iff, and_or_left, ← le_antisymm_iff, eq_comm, and_iff_right_of_imp]
[GOAL]
m n k l : ℕ
⊢ m = n + 1 → n ≤ m
[PROOFSTEP]
rintro rfl
[GOAL]
n k l : ℕ
⊢ n ≤ n + 1
[PROOFSTEP]
exact n.le_succ
[GOAL]
m n k l : ℕ
hab : m < n
hcd : k < l
⊢ m + k + 1 < n + l
[PROOFSTEP]
rw [add_assoc]
[GOAL]
m n k l : ℕ
hab : m < n
hcd : k < l
⊢ m + (k + 1) < n + l
[PROOFSTEP]
exact add_lt_add_of_lt_of_le hab (Nat.succ_le_iff.2 hcd)
[GOAL]
m n k l : ℕ
⊢ m ≤ succ n → pred m ≤ n
[PROOFSTEP]
cases m
[GOAL]
case zero
n k l : ℕ
⊢ zero ≤ succ n → pred zero ≤ n
[PROOFSTEP]
exact fun _ => zero_le n
[GOAL]
case succ
n k l n✝ : ℕ
⊢ succ n✝ ≤ succ n → pred (succ n✝) ≤ n
[PROOFSTEP]
exact le_of_succ_le_succ
[GOAL]
m n k l : ℕ
⊢ OrderedSub ℕ
[PROOFSTEP]
constructor
[GOAL]
case tsub_le_iff_right
m n k l : ℕ
⊢ ∀ (a b c : ℕ), a - b ≤ c ↔ a ≤ c + b
[PROOFSTEP]
intro m n k
[GOAL]
case tsub_le_iff_right
m✝ n✝ k✝ l m n k : ℕ
⊢ m - n ≤ k ↔ m ≤ k + n
[PROOFSTEP]
induction' n with n ih generalizing k
[GOAL]
case tsub_le_iff_right.zero
m✝ n k✝¹ l m k✝ k : ℕ
⊢ m - zero ≤ k ↔ m ≤ k + zero
[PROOFSTEP]
simp
[GOAL]
case tsub_le_iff_right.succ
m✝ n✝ k✝¹ l m k✝ n : ℕ
ih : ∀ (k : ℕ), m - n ≤ k ↔ m ≤ k + n
k : ℕ
⊢ m - succ n ≤ k ↔ m ≤ k + succ n
[PROOFSTEP]
simp only [sub_succ, pred_le_iff, ih, succ_add, add_succ]
[GOAL]
m n k l : ℕ
h : k + l = m + n - 1
⊢ m ≤ k ∨ n ≤ l
[PROOFSTEP]
cases' le_or_lt m k with h' h' <;> [left; right]
[GOAL]
m n k l : ℕ
h : k + l = m + n - 1
⊢ m ≤ k ∨ n ≤ l
[PROOFSTEP]
cases' le_or_lt m k with h' h'
[GOAL]
case inl
m n k l : ℕ
h : k + l = m + n - 1
h' : m ≤ k
⊢ m ≤ k ∨ n ≤ l
[PROOFSTEP]
left
[GOAL]
case inr
m n k l : ℕ
h : k + l = m + n - 1
h' : k < m
⊢ m ≤ k ∨ n ≤ l
[PROOFSTEP]
right
[GOAL]
case inl.h
m n k l : ℕ
h : k + l = m + n - 1
h' : m ≤ k
⊢ m ≤ k
[PROOFSTEP]
exact h'
[GOAL]
case inr.h
m n k l : ℕ
h : k + l = m + n - 1
h' : k < m
⊢ n ≤ l
[PROOFSTEP]
replace h' := add_lt_add_right h' l
[GOAL]
case inr.h
m n k l : ℕ
h : k + l = m + n - 1
h' : k + l < m + l
⊢ n ≤ l
[PROOFSTEP]
rw [h] at h'
[GOAL]
case inr.h
m n k l : ℕ
h : k + l = m + n - 1
h' : m + n - 1 < m + l
⊢ n ≤ l
[PROOFSTEP]
cases' n.eq_zero_or_pos with hn hn
[GOAL]
case inr.h.inl
m n k l : ℕ
h : k + l = m + n - 1
h' : m + n - 1 < m + l
hn : n = 0
⊢ n ≤ l
[PROOFSTEP]
rw [hn]
[GOAL]
case inr.h.inl
m n k l : ℕ
h : k + l = m + n - 1
h' : m + n - 1 < m + l
hn : n = 0
⊢ 0 ≤ l
[PROOFSTEP]
exact zero_le l
[GOAL]
case inr.h.inr
m n k l : ℕ
h : k + l = m + n - 1
h' : m + n - 1 < m + l
hn : n > 0
⊢ n ≤ l
[PROOFSTEP]
rw [n.add_sub_assoc (Nat.succ_le_of_lt hn), add_lt_add_iff_left] at h'
[GOAL]
case inr.h.inr
m n k l : ℕ
h : k + l = m + n - 1
h' : n - succ 0 < l
hn : n > 0
⊢ n ≤ l
[PROOFSTEP]
exact Nat.le_of_pred_lt h'
[GOAL]
m n✝ k l n : ℕ
⊢ n + 1 ≤ (n + 1) * (n + 1)
[PROOFSTEP]
simp
[GOAL]
m n k l : ℕ
h : 0 < n
⊢ m ≤ n * m
[PROOFSTEP]
conv =>
lhs
rw [← one_mul m]
[GOAL]
m n k l : ℕ
h : 0 < n
| m ≤ n * m
[PROOFSTEP]
lhs
rw [← one_mul m]
[GOAL]
m n k l : ℕ
h : 0 < n
| m ≤ n * m
[PROOFSTEP]
lhs
rw [← one_mul m]
[GOAL]
m n k l : ℕ
h : 0 < n
| m ≤ n * m
[PROOFSTEP]
lhs
[GOAL]
m n k l : ℕ
h : 0 < n
| m
[PROOFSTEP]
rw [← one_mul m]
[GOAL]
m n k l : ℕ
h : 0 < n
⊢ 1 * m ≤ n * m
[PROOFSTEP]
exact mul_le_mul_of_nonneg_right h.nat_succ_le (zero_le _)
[GOAL]
m n k l : ℕ
h : 0 < n
⊢ m ≤ m * n
[PROOFSTEP]
conv =>
lhs
rw [← mul_one m]
[GOAL]
m n k l : ℕ
h : 0 < n
| m ≤ m * n
[PROOFSTEP]
lhs
rw [← mul_one m]
[GOAL]
m n k l : ℕ
h : 0 < n
| m ≤ m * n
[PROOFSTEP]
lhs
rw [← mul_one m]
[GOAL]
m n k l : ℕ
h : 0 < n
| m ≤ m * n
[PROOFSTEP]
lhs
[GOAL]
m n k l : ℕ
h : 0 < n
| m
[PROOFSTEP]
rw [← mul_one m]
[GOAL]
m n k l : ℕ
h : 0 < n
⊢ m * 1 ≤ m * n
[PROOFSTEP]
exact mul_le_mul_of_nonneg_left h.nat_succ_le (zero_le _)
[GOAL]
m n✝ k l n i : ℕ
hi : i ≠ 0
⊢ n ≤ i + (n - 1)
[PROOFSTEP]
refine le_trans ?_ add_tsub_le_assoc
[GOAL]
m n✝ k l n i : ℕ
hi : i ≠ 0
⊢ n ≤ i + n - 1
[PROOFSTEP]
simp [add_comm, Nat.add_sub_assoc, one_le_iff_ne_zero.2 hi]
[GOAL]
m n k l : ℕ
hm : m ≠ 0
hn : n ≠ 0
⊢ m + n - 1 ≤ m * n
[PROOFSTEP]
cases m
[GOAL]
case zero
n k l : ℕ
hn : n ≠ 0
hm : zero ≠ 0
⊢ zero + n - 1 ≤ zero * n
[PROOFSTEP]
cases hm rfl
[GOAL]
case succ
n k l : ℕ
hn : n ≠ 0
n✝ : ℕ
hm : succ n✝ ≠ 0
⊢ succ n✝ + n - 1 ≤ succ n✝ * n
[PROOFSTEP]
rw [succ_add, succ_sub_one, succ_mul]
[GOAL]
case succ
n k l : ℕ
hn : n ≠ 0
n✝ : ℕ
hm : succ n✝ ≠ 0
⊢ n✝ + n ≤ n✝ * n + n
[PROOFSTEP]
exact add_le_add_right (le_mul_of_one_le_right' $ succ_le_iff.2 $ pos_iff_ne_zero.2 hn) _
[GOAL]
m n k l : ℕ
P : ℕ → ℕ → Prop
ha : ∀ (a : ℕ), P (a + 1) (a + 1)
hb : ∀ (b : ℕ), P 0 (b + 1)
hd : ∀ (a b : ℕ), a < b → P (a + 1) b → P a (b + 1) → P (a + 1) (b + 1)
a b : ℕ
h : a + 1 < b + 1
⊢ P (a + 1) (b + 1)
[PROOFSTEP]
apply hd _ _ ((add_lt_add_iff_right _).1 h)
[GOAL]
case a
m n k l : ℕ
P : ℕ → ℕ → Prop
ha : ∀ (a : ℕ), P (a + 1) (a + 1)
hb : ∀ (b : ℕ), P 0 (b + 1)
hd : ∀ (a b : ℕ), a < b → P (a + 1) b → P a (b + 1) → P (a + 1) (b + 1)
a b : ℕ
h : a + 1 < b + 1
⊢ P (a + 1) b
[PROOFSTEP]
have this : a + 1 = b ∨ a + 1 < b := by rwa [← le_iff_eq_or_lt, ← Nat.lt_succ_iff]
[GOAL]
m n k l : ℕ
P : ℕ → ℕ → Prop
ha : ∀ (a : ℕ), P (a + 1) (a + 1)
hb : ∀ (b : ℕ), P 0 (b + 1)
hd : ∀ (a b : ℕ), a < b → P (a + 1) b → P a (b + 1) → P (a + 1) (b + 1)
a b : ℕ
h : a + 1 < b + 1
⊢ a + 1 = b ∨ a + 1 < b
[PROOFSTEP]
rwa [← le_iff_eq_or_lt, ← Nat.lt_succ_iff]
[GOAL]
case a
m n k l : ℕ
P : ℕ → ℕ → Prop
ha : ∀ (a : ℕ), P (a + 1) (a + 1)
hb : ∀ (b : ℕ), P 0 (b + 1)
hd : ∀ (a b : ℕ), a < b → P (a + 1) b → P a (b + 1) → P (a + 1) (b + 1)
a b : ℕ
h : a + 1 < b + 1
this : a + 1 = b ∨ a + 1 < b
⊢ P (a + 1) b
[PROOFSTEP]
have wf : (a + 1) + b < (a + 1) + (b + 1) := by simp
[GOAL]
m n k l : ℕ
P : ℕ → ℕ → Prop
ha : ∀ (a : ℕ), P (a + 1) (a + 1)
hb : ∀ (b : ℕ), P 0 (b + 1)
hd : ∀ (a b : ℕ), a < b → P (a + 1) b → P a (b + 1) → P (a + 1) (b + 1)
a b : ℕ
h : a + 1 < b + 1
this : a + 1 = b ∨ a + 1 < b
⊢ a + 1 + b < a + 1 + (b + 1)
[PROOFSTEP]
simp
[GOAL]
case a
m n k l : ℕ
P : ℕ → ℕ → Prop
ha : ∀ (a : ℕ), P (a + 1) (a + 1)
hb : ∀ (b : ℕ), P 0 (b + 1)
hd : ∀ (a b : ℕ), a < b → P (a + 1) b → P a (b + 1) → P (a + 1) (b + 1)
a b : ℕ
h : a + 1 < b + 1
this : a + 1 = b ∨ a + 1 < b
wf : a + 1 + b < a + 1 + (b + 1)
⊢ P (a + 1) b
[PROOFSTEP]
rcases this with (rfl | h)
[GOAL]
case a.inl
m n k l : ℕ
P : ℕ → ℕ → Prop
ha : ∀ (a : ℕ), P (a + 1) (a + 1)
hb : ∀ (b : ℕ), P 0 (b + 1)
hd : ∀ (a b : ℕ), a < b → P (a + 1) b → P a (b + 1) → P (a + 1) (b + 1)
a : ℕ
h : a + 1 < a + 1 + 1
wf : a + 1 + (a + 1) < a + 1 + (a + 1 + 1)
⊢ P (a + 1) (a + 1)
[PROOFSTEP]
exact ha _
[GOAL]
case a.inr
m n k l : ℕ
P : ℕ → ℕ → Prop
ha : ∀ (a : ℕ), P (a + 1) (a + 1)
hb : ∀ (b : ℕ), P 0 (b + 1)
hd : ∀ (a b : ℕ), a < b → P (a + 1) b → P a (b + 1) → P (a + 1) (b + 1)
a b : ℕ
h✝ : a + 1 < b + 1
wf : a + 1 + b < a + 1 + (b + 1)
h : a + 1 < b
⊢ P (a + 1) b
[PROOFSTEP]
apply diag_induction P ha hb hd (a + 1) b h
[GOAL]
case a
m n k l : ℕ
P : ℕ → ℕ → Prop
ha : ∀ (a : ℕ), P (a + 1) (a + 1)
hb : ∀ (b : ℕ), P 0 (b + 1)
hd : ∀ (a b : ℕ), a < b → P (a + 1) b → P a (b + 1) → P (a + 1) (b + 1)
a b : ℕ
h : a + 1 < b + 1
⊢ P a (b + 1)
[PROOFSTEP]
have _ : a + (b + 1) < (a + 1) + (b + 1) := by simp
[GOAL]
m n k l : ℕ
P : ℕ → ℕ → Prop
ha : ∀ (a : ℕ), P (a + 1) (a + 1)
hb : ∀ (b : ℕ), P 0 (b + 1)
hd : ∀ (a b : ℕ), a < b → P (a + 1) b → P a (b + 1) → P (a + 1) (b + 1)
a b : ℕ
h : a + 1 < b + 1
⊢ a + (b + 1) < a + 1 + (b + 1)
[PROOFSTEP]
simp
[GOAL]
case a
m n k l : ℕ
P : ℕ → ℕ → Prop
ha : ∀ (a : ℕ), P (a + 1) (a + 1)
hb : ∀ (b : ℕ), P 0 (b + 1)
hd : ∀ (a b : ℕ), a < b → P (a + 1) b → P a (b + 1) → P (a + 1) (b + 1)
a b : ℕ
h : a + 1 < b + 1
x✝ : a + (b + 1) < a + 1 + (b + 1)
⊢ P a (b + 1)
[PROOFSTEP]
apply diag_induction P ha hb hd a (b + 1)
[GOAL]
case a
m n k l : ℕ
P : ℕ → ℕ → Prop
ha : ∀ (a : ℕ), P (a + 1) (a + 1)
hb : ∀ (b : ℕ), P 0 (b + 1)
hd : ∀ (a b : ℕ), a < b → P (a + 1) b → P a (b + 1) → P (a + 1) (b + 1)
a b : ℕ
h : a + 1 < b + 1
x✝ : a + (b + 1) < a + 1 + (b + 1)
⊢ a < b + 1
[PROOFSTEP]
apply lt_of_le_of_lt (Nat.le_succ _) h
[GOAL]
P : ℕ → ℕ → Prop
ha : ∀ (a : ℕ), P (a + 1) (a + 1)
hb : ∀ (b : ℕ), P 0 (b + 1)
hd : ∀ (a b : ℕ), a < b → P (a + 1) b → P a (b + 1) → P (a + 1) (b + 1)
_x✝ : (x : ℕ) ×' (x_1 : ℕ) ×' x < x_1
a✝² :
∀ (y : (x : ℕ) ×' (x_1 : ℕ) ×' x < x_1),
(invImage (fun a => PSigma.casesOn a fun a snd => PSigma.casesOn snd fun b snd => a + b) instWellFoundedRelation).1
y _x✝ →
P y.1 y.2.1
x✝⁴ : ℕ
x✝³ : (x : ℕ) ×' x✝⁴ < x
a✝¹ :
∀ (y : (x : ℕ) ×' (x_1 : ℕ) ×' x < x_1),
(invImage (fun a => PSigma.casesOn a fun a snd => PSigma.casesOn snd fun b snd => a + b) instWellFoundedRelation).1
y { fst := x✝⁴, snd := x✝³ } →
P y.1 y.2.1
x✝² : ℕ
x✝¹ : x✝⁴ < x✝²
a✝ :
∀ (y : (x : ℕ) ×' (x_1 : ℕ) ×' x < x_1),
(invImage (fun a => PSigma.casesOn a fun a snd => PSigma.casesOn snd fun b snd => a + b) instWellFoundedRelation).1
y { fst := x✝⁴, snd := { fst := x✝², snd := x✝¹ } } →
P y.1 y.2.1
a b : ℕ
h✝ : a + 1 < b + 1
x✝ :
∀ (y : (x : ℕ) ×' (x_1 : ℕ) ×' x < x_1),
(invImage (fun a => PSigma.casesOn a fun a snd => PSigma.casesOn snd fun b snd => a + b) instWellFoundedRelation).1
y { fst := succ a, snd := { fst := succ b, snd := h✝ } } →
P y.1 y.2.1
this : a + 1 = b ∨ a + 1 < b
wf : a + 1 + b < a + 1 + (b + 1)
h : a + 1 < b
⊢ (invImage (fun a => PSigma.casesOn a fun a snd => PSigma.casesOn snd fun b snd => a + b) instWellFoundedRelation).1
{ fst := a + 1, snd := { fst := b, snd := h } } { fst := succ a, snd := { fst := succ b, snd := h✝ } }
[PROOFSTEP]
{assumption
}
[GOAL]
P : ℕ → ℕ → Prop
ha : ∀ (a : ℕ), P (a + 1) (a + 1)
hb : ∀ (b : ℕ), P 0 (b + 1)
hd : ∀ (a b : ℕ), a < b → P (a + 1) b → P a (b + 1) → P (a + 1) (b + 1)
_x✝ : (x : ℕ) ×' (x_1 : ℕ) ×' x < x_1
a✝² :
∀ (y : (x : ℕ) ×' (x_1 : ℕ) ×' x < x_1),
(invImage (fun a => PSigma.casesOn a fun a snd => PSigma.casesOn snd fun b snd => a + b) instWellFoundedRelation).1
y _x✝ →
P y.1 y.2.1
x✝⁴ : ℕ
x✝³ : (x : ℕ) ×' x✝⁴ < x
a✝¹ :
∀ (y : (x : ℕ) ×' (x_1 : ℕ) ×' x < x_1),
(invImage (fun a => PSigma.casesOn a fun a snd => PSigma.casesOn snd fun b snd => a + b) instWellFoundedRelation).1
y { fst := x✝⁴, snd := x✝³ } →
P y.1 y.2.1
x✝² : ℕ
x✝¹ : x✝⁴ < x✝²
a✝ :
∀ (y : (x : ℕ) ×' (x_1 : ℕ) ×' x < x_1),
(invImage (fun a => PSigma.casesOn a fun a snd => PSigma.casesOn snd fun b snd => a + b) instWellFoundedRelation).1
y { fst := x✝⁴, snd := { fst := x✝², snd := x✝¹ } } →
P y.1 y.2.1
a b : ℕ
h✝ : a + 1 < b + 1
x✝ :
∀ (y : (x : ℕ) ×' (x_1 : ℕ) ×' x < x_1),
(invImage (fun a => PSigma.casesOn a fun a snd => PSigma.casesOn snd fun b snd => a + b) instWellFoundedRelation).1
y { fst := succ a, snd := { fst := succ b, snd := h✝ } } →
P y.1 y.2.1
this : a + 1 = b ∨ a + 1 < b
wf : a + 1 + b < a + 1 + (b + 1)
h : a + 1 < b
⊢ (invImage (fun a => PSigma.casesOn a fun a snd => PSigma.casesOn snd fun b snd => a + b) instWellFoundedRelation).1
{ fst := a + 1, snd := { fst := b, snd := h } } { fst := succ a, snd := { fst := succ b, snd := h✝ } }
[PROOFSTEP]
assumption
[GOAL]
P : ℕ → ℕ → Prop
ha : ∀ (a : ℕ), P (a + 1) (a + 1)
hb : ∀ (b : ℕ), P 0 (b + 1)
hd : ∀ (a b : ℕ), a < b → P (a + 1) b → P a (b + 1) → P (a + 1) (b + 1)
_x✝ : (x : ℕ) ×' (x_1 : ℕ) ×' x < x_1
a✝³ :
∀ (y : (x : ℕ) ×' (x_1 : ℕ) ×' x < x_1),
(invImage (fun a => PSigma.casesOn a fun a snd => PSigma.casesOn snd fun b snd => a + b) instWellFoundedRelation).1
y _x✝ →
P y.1 y.2.1
x✝⁵ : ℕ
x✝⁴ : (x : ℕ) ×' x✝⁵ < x
a✝² :
∀ (y : (x : ℕ) ×' (x_1 : ℕ) ×' x < x_1),
(invImage (fun a => PSigma.casesOn a fun a snd => PSigma.casesOn snd fun b snd => a + b) instWellFoundedRelation).1
y { fst := x✝⁵, snd := x✝⁴ } →
P y.1 y.2.1
x✝³ : ℕ
x✝² : x✝⁵ < x✝³
a✝¹ :
∀ (y : (x : ℕ) ×' (x_1 : ℕ) ×' x < x_1),
(invImage (fun a => PSigma.casesOn a fun a snd => PSigma.casesOn snd fun b snd => a + b) instWellFoundedRelation).1
y { fst := x✝⁵, snd := { fst := x✝³, snd := x✝² } } →
P y.1 y.2.1
a b : ℕ
h : a + 1 < b + 1
x✝¹ :
∀ (y : (x : ℕ) ×' (x_1 : ℕ) ×' x < x_1),
(invImage (fun a => PSigma.casesOn a fun a snd => PSigma.casesOn snd fun b snd => a + b) instWellFoundedRelation).1
y { fst := succ a, snd := { fst := succ b, snd := h } } →
P y.1 y.2.1
x✝ : a + (b + 1) < a + 1 + (b + 1)
a✝ : a < b + 1
⊢ (invImage (fun a => PSigma.casesOn a fun a snd => PSigma.casesOn snd fun b snd => a + b) instWellFoundedRelation).1
{ fst := a, snd := { fst := b + 1, snd := a✝ } } { fst := succ a, snd := { fst := succ b, snd := h } }
[PROOFSTEP]
{assumption
}
[GOAL]
P : ℕ → ℕ → Prop
ha : ∀ (a : ℕ), P (a + 1) (a + 1)
hb : ∀ (b : ℕ), P 0 (b + 1)
hd : ∀ (a b : ℕ), a < b → P (a + 1) b → P a (b + 1) → P (a + 1) (b + 1)
_x✝ : (x : ℕ) ×' (x_1 : ℕ) ×' x < x_1
a✝³ :
∀ (y : (x : ℕ) ×' (x_1 : ℕ) ×' x < x_1),
(invImage (fun a => PSigma.casesOn a fun a snd => PSigma.casesOn snd fun b snd => a + b) instWellFoundedRelation).1
y _x✝ →
P y.1 y.2.1
x✝⁵ : ℕ
x✝⁴ : (x : ℕ) ×' x✝⁵ < x
a✝² :
∀ (y : (x : ℕ) ×' (x_1 : ℕ) ×' x < x_1),
(invImage (fun a => PSigma.casesOn a fun a snd => PSigma.casesOn snd fun b snd => a + b) instWellFoundedRelation).1
y { fst := x✝⁵, snd := x✝⁴ } →
P y.1 y.2.1
x✝³ : ℕ
x✝² : x✝⁵ < x✝³
a✝¹ :
∀ (y : (x : ℕ) ×' (x_1 : ℕ) ×' x < x_1),
(invImage (fun a => PSigma.casesOn a fun a snd => PSigma.casesOn snd fun b snd => a + b) instWellFoundedRelation).1
y { fst := x✝⁵, snd := { fst := x✝³, snd := x✝² } } →
P y.1 y.2.1
a b : ℕ
h : a + 1 < b + 1
x✝¹ :
∀ (y : (x : ℕ) ×' (x_1 : ℕ) ×' x < x_1),
(invImage (fun a => PSigma.casesOn a fun a snd => PSigma.casesOn snd fun b snd => a + b) instWellFoundedRelation).1
y { fst := succ a, snd := { fst := succ b, snd := h } } →
P y.1 y.2.1
x✝ : a + (b + 1) < a + 1 + (b + 1)
a✝ : a < b + 1
⊢ (invImage (fun a => PSigma.casesOn a fun a snd => PSigma.casesOn snd fun b snd => a + b) instWellFoundedRelation).1
{ fst := a, snd := { fst := b + 1, snd := a✝ } } { fst := succ a, snd := { fst := succ b, snd := h } }
[PROOFSTEP]
assumption
[GOAL]
m n k l : ℕ
h : m ≤ k * n
k0 : k = 0
⊢ m / k ≤ n
[PROOFSTEP]
rw [k0, Nat.div_zero]
[GOAL]
m n k l : ℕ
h : m ≤ k * n
k0 : k = 0
⊢ 0 ≤ n
[PROOFSTEP]
apply zero_le
[GOAL]
m✝ n✝ k l m n : ℕ
n0 : n = 0
⊢ m / n ≤ m
[PROOFSTEP]
rw [n0, Nat.div_zero]
[GOAL]
m✝ n✝ k l m n : ℕ
n0 : n = 0
⊢ 0 ≤ m
[PROOFSTEP]
apply zero_le
[GOAL]
m n k l : ℕ
hn : 2 ≤ n
h : m ≤ m / n
⊢ n * m ≤ m
[PROOFSTEP]
rw [mul_comm]
[GOAL]
m n k l : ℕ
hn : 2 ≤ n
h : m ≤ m / n
⊢ m * n ≤ m
[PROOFSTEP]
exact (Nat.le_div_iff_mul_le' (lt_of_lt_of_le (by decide) hn)).1 h
[GOAL]
m n k l : ℕ
hn : 2 ≤ n
h : m ≤ m / n
⊢ 0 < 2
[PROOFSTEP]
decide
[GOAL]
m✝ n✝ k✝ l m n k : ℕ
hm0 : m = 0
⊢ m / k * n / m ≤ n / k
[PROOFSTEP]
simp [hm0]
[GOAL]
m✝ n✝ k✝ l m n k : ℕ
hm0 : ¬m = 0
⊢ m / k * n ≤ n * m / k
[PROOFSTEP]
rw [mul_comm]
[GOAL]
m✝ n✝ k✝ l m n k : ℕ
hm0 : ¬m = 0
⊢ n * (m / k) ≤ n * m / k
[PROOFSTEP]
exact mul_div_le_mul_div_assoc _ _ _
[GOAL]
m✝ n✝ k✝ l m n k : ℕ
hm0 : ¬m = 0
⊢ n * m / k / m = n / k
[PROOFSTEP]
{rw [Nat.div_div_eq_div_mul, mul_comm n, mul_comm k, Nat.mul_div_mul_left _ _ (Nat.pos_of_ne_zero hm0)]
}
[GOAL]
m✝ n✝ k✝ l m n k : ℕ
hm0 : ¬m = 0
⊢ n * m / k / m = n / k
[PROOFSTEP]
rw [Nat.div_div_eq_div_mul, mul_comm n, mul_comm k, Nat.mul_div_mul_left _ _ (Nat.pos_of_ne_zero hm0)]
[GOAL]
m n k l : ℕ
hmk : k ∣ m
hnl : l ∣ n
⊢ m * n / (k * l) = m / k * (n / l)
[PROOFSTEP]
rcases k.eq_zero_or_pos with (rfl | hk0)
[GOAL]
case inl
m n l : ℕ
hnl : l ∣ n
hmk : 0 ∣ m
⊢ m * n / (0 * l) = m / 0 * (n / l)
[PROOFSTEP]
simp
[GOAL]
case inr
m n k l : ℕ
hmk : k ∣ m
hnl : l ∣ n
hk0 : k > 0
⊢ m * n / (k * l) = m / k * (n / l)
[PROOFSTEP]
rcases l.eq_zero_or_pos with (rfl | hl0)
[GOAL]
case inr.inl
m n k : ℕ
hmk : k ∣ m
hk0 : k > 0
hnl : 0 ∣ n
⊢ m * n / (k * 0) = m / k * (n / 0)
[PROOFSTEP]
simp
[GOAL]
case inr.inr
m n k l : ℕ
hmk : k ∣ m
hnl : l ∣ n
hk0 : k > 0
hl0 : l > 0
⊢ m * n / (k * l) = m / k * (n / l)
[PROOFSTEP]
obtain ⟨_, rfl⟩ := hmk
[GOAL]
case inr.inr.intro
n k l : ℕ
hnl : l ∣ n
hk0 : k > 0
hl0 : l > 0
w✝ : ℕ
⊢ k * w✝ * n / (k * l) = k * w✝ / k * (n / l)
[PROOFSTEP]
obtain ⟨_, rfl⟩ := hnl
[GOAL]
case inr.inr.intro.intro
k l : ℕ
hk0 : k > 0
hl0 : l > 0
w✝¹ w✝ : ℕ
⊢ k * w✝¹ * (l * w✝) / (k * l) = k * w✝¹ / k * (l * w✝ / l)
[PROOFSTEP]
rw [mul_mul_mul_comm, Nat.mul_div_cancel_left _ hk0, Nat.mul_div_cancel_left _ hl0,
Nat.mul_div_cancel_left _ (mul_pos hk0 hl0)]
[GOAL]
m n k l a b : ℕ
h : a / 2 < a - b
⊢ b ≤ a / 2
[PROOFSTEP]
rw [Nat.le_div_iff_mul_le two_pos]
[GOAL]
m n k l a b : ℕ
h : a / 2 < a - b
⊢ b * 2 ≤ a
[PROOFSTEP]
rw [Nat.div_lt_iff_lt_mul two_pos, Nat.mul_sub_right_distrib, lt_tsub_iff_right, mul_two a] at h
[GOAL]
m n k l a b : ℕ
h✝ : a < a * 2 - b * 2
h : a + b * 2 < a + a
⊢ b * 2 ≤ a
[PROOFSTEP]
exact le_of_lt (Nat.lt_of_add_lt_add_left h)
[GOAL]
m n k l a b : ℕ
h : a - b ≤ a / 2
⊢ a / 2 ≤ b
[PROOFSTEP]
rw [Nat.le_div_iff_mul_le two_pos, Nat.mul_sub_right_distrib, tsub_le_iff_right, mul_two, add_le_add_iff_left] at h
[GOAL]
m n k l a b : ℕ
h✝ : a * 2 - b * 2 ≤ a
h : a ≤ b * 2
⊢ a / 2 ≤ b
[PROOFSTEP]
rw [← Nat.mul_div_left b two_pos]
[GOAL]
m n k l a b : ℕ
h✝ : a * 2 - b * 2 ≤ a
h : a ≤ b * 2
⊢ a / 2 ≤ b * 2 / 2
[PROOFSTEP]
exact Nat.div_le_div_right h
[GOAL]
m n k l : ℕ
hn : n % 2 = 1
⊢ 2 * (n / 2) = n - 1
[PROOFSTEP]
conv =>
rhs
rw [← Nat.mod_add_div n 2, hn, @add_tsub_cancel_left]
[GOAL]
m n k l : ℕ
hn : n % 2 = 1
| 2 * (n / 2) = n - 1
[PROOFSTEP]
rhs
rw [← Nat.mod_add_div n 2, hn, @add_tsub_cancel_left]
[GOAL]
m n k l : ℕ
hn : n % 2 = 1
| 2 * (n / 2) = n - 1
[PROOFSTEP]
rhs
rw [← Nat.mod_add_div n 2, hn, @add_tsub_cancel_left]
[GOAL]
m n k l : ℕ
hn : n % 2 = 1
| 2 * (n / 2) = n - 1
[PROOFSTEP]
rhs
[GOAL]
m n k l : ℕ
hn : n % 2 = 1
| n - 1
[PROOFSTEP]
rw [← Nat.mod_add_div n 2, hn, @add_tsub_cancel_left]
[GOAL]
m n k l : ℕ
h : n ∣ m
hm : m ≠ 0
⊢ m / (m / n) = n
[PROOFSTEP]
rcases h with ⟨_, rfl⟩
[GOAL]
case intro
n k l w✝ : ℕ
hm : n * w✝ ≠ 0
⊢ n * w✝ / (n * w✝ / n) = n
[PROOFSTEP]
rw [mul_ne_zero_iff] at hm
[GOAL]
case intro
n k l w✝ : ℕ
hm : n ≠ 0 ∧ w✝ ≠ 0
⊢ n * w✝ / (n * w✝ / n) = n
[PROOFSTEP]
rw [mul_div_right _ (Nat.pos_of_ne_zero hm.1), mul_div_left _ (Nat.pos_of_ne_zero hm.2)]
[GOAL]
m✝ n✝ k✝ l m n k : ℕ
⊢ m % (n * k) / n = m / n % k
[PROOFSTEP]
rcases Nat.eq_zero_or_pos n with (rfl | hn)
[GOAL]
case inl
m✝ n k✝ l m k : ℕ
⊢ m % (0 * k) / 0 = m / 0 % k
case inr m✝ n✝ k✝ l m n k : ℕ hn : n > 0 ⊢ m % (n * k) / n = m / n % k
[PROOFSTEP]
simp [mod_zero]
[GOAL]
case inr
m✝ n✝ k✝ l m n k : ℕ
hn : n > 0
⊢ m % (n * k) / n = m / n % k
[PROOFSTEP]
rcases Nat.eq_zero_or_pos k with (rfl | hk)
[GOAL]
case inr.inl
m✝ n✝ k l m n : ℕ
hn : n > 0
⊢ m % (n * 0) / n = m / n % 0
case inr.inr m✝ n✝ k✝ l m n k : ℕ hn : n > 0 hk : k > 0 ⊢ m % (n * k) / n = m / n % k
[PROOFSTEP]
simp [mod_zero]
[GOAL]
case inr.inr
m✝ n✝ k✝ l m n k : ℕ
hn : n > 0
hk : k > 0
⊢ m % (n * k) / n = m / n % k
[PROOFSTEP]
conv_rhs => rw [← mod_add_div m (n * k)]
[GOAL]
m✝ n✝ k✝ l m n k : ℕ
hn : n > 0
hk : k > 0
| m / n % k
[PROOFSTEP]
rw [← mod_add_div m (n * k)]
[GOAL]
m✝ n✝ k✝ l m n k : ℕ
hn : n > 0
hk : k > 0
| m / n % k
[PROOFSTEP]
rw [← mod_add_div m (n * k)]
[GOAL]
m✝ n✝ k✝ l m n k : ℕ
hn : n > 0
hk : k > 0
| m / n % k
[PROOFSTEP]
rw [← mod_add_div m (n * k)]
[GOAL]
case inr.inr
m✝ n✝ k✝ l m n k : ℕ
hn : n > 0
hk : k > 0
⊢ m % (n * k) / n = (m % (n * k) + n * k * (m / (n * k))) / n % k
[PROOFSTEP]
rw [mul_assoc, add_mul_div_left _ _ hn, add_mul_mod_self_left,
mod_eq_of_lt (Nat.div_lt_of_lt_mul (mod_lt _ (mul_pos hn hk)))]
[GOAL]
m✝ n✝ k✝ l m n k : ℕ
⊢ m % (k * n) / n = m / n % k
[PROOFSTEP]
rw [mul_comm k, mod_mul_right_div_self]
[GOAL]
m n k l : ℕ
h1 : 0 < n
h2 : n < m
⊢ ¬m ∣ n
[PROOFSTEP]
rintro ⟨k, rfl⟩
[GOAL]
case intro
m k✝ l k : ℕ
h1 : 0 < m * k
h2 : m * k < m
⊢ False
[PROOFSTEP]
rcases Nat.eq_zero_or_pos k with (rfl | hk)
[GOAL]
case intro.inl
m k l : ℕ
h1 : 0 < m * 0
h2 : m * 0 < m
⊢ False
[PROOFSTEP]
exact lt_irrefl 0 h1
[GOAL]
case intro.inr
m k✝ l k : ℕ
h1 : 0 < m * k
h2 : m * k < m
hk : k > 0
⊢ False
[PROOFSTEP]
exact not_lt.2 (le_mul_of_pos_right hk) h2
[GOAL]
m n k l : ℕ
h : m % k = n % k
⊢ (m - n) % k = 0
[PROOFSTEP]
rw [← Nat.mod_add_div m k, ← Nat.mod_add_div n k, ← h, tsub_add_eq_tsub_tsub, @add_tsub_cancel_left, ← mul_tsub k,
Nat.mul_mod_right]
[GOAL]
m n k l : ℕ
⊢ (m + n) % k = if k ≤ m % k + n % k then m % k + n % k - k else m % k + n % k
[PROOFSTEP]
cases k
[GOAL]
case zero
m n l : ℕ
⊢ (m + n) % zero = if zero ≤ m % zero + n % zero then m % zero + n % zero - zero else m % zero + n % zero
case succ
m n l n✝ : ℕ
⊢ (m + n) % succ n✝ =
if succ n✝ ≤ m % succ n✝ + n % succ n✝ then m % succ n✝ + n % succ n✝ - succ n✝ else m % succ n✝ + n % succ n✝
[PROOFSTEP]
simp [mod_zero]
[GOAL]
case succ
m n l n✝ : ℕ
⊢ (m + n) % succ n✝ =
if succ n✝ ≤ m % succ n✝ + n % succ n✝ then m % succ n✝ + n % succ n✝ - succ n✝ else m % succ n✝ + n % succ n✝
[PROOFSTEP]
rw [Nat.add_mod]
[GOAL]
case succ
m n l n✝ : ℕ
⊢ (m % succ n✝ + n % succ n✝) % succ n✝ =
if succ n✝ ≤ m % succ n✝ + n % succ n✝ then m % succ n✝ + n % succ n✝ - succ n✝ else m % succ n✝ + n % succ n✝
[PROOFSTEP]
split_ifs with h
[GOAL]
case pos
m n l n✝ : ℕ
h : succ n✝ ≤ m % succ n✝ + n % succ n✝
⊢ (m % succ n✝ + n % succ n✝) % succ n✝ = m % succ n✝ + n % succ n✝ - succ n✝
[PROOFSTEP]
rw [Nat.mod_eq_sub_mod h, Nat.mod_eq_of_lt]
[GOAL]
case pos
m n l n✝ : ℕ
h : succ n✝ ≤ m % succ n✝ + n % succ n✝
⊢ m % succ n✝ + n % succ n✝ - succ n✝ < succ n✝
[PROOFSTEP]
exact (tsub_lt_iff_right h).mpr (Nat.add_lt_add (m.mod_lt (zero_lt_succ _)) (n.mod_lt (zero_lt_succ _)))
[GOAL]
case neg
m n l n✝ : ℕ
h : ¬succ n✝ ≤ m % succ n✝ + n % succ n✝
⊢ (m % succ n✝ + n % succ n✝) % succ n✝ = m % succ n✝ + n % succ n✝
[PROOFSTEP]
exact Nat.mod_eq_of_lt (lt_of_not_ge h)
[GOAL]
m n k l : ℕ
hmn : n ∣ m
hkl : l ∣ k
exi1 : ∃ x, m = n * x
exi2 : ∃ y, k = l * y
hn : n = 0
⊢ m / n * (k / l) = m * k / (n * l)
[PROOFSTEP]
simp [hn]
[GOAL]
m n k l : ℕ
hmn : n ∣ m
hkl : l ∣ k
exi1 : ∃ x, m = n * x
exi2 : ∃ y, k = l * y
hn : ¬n = 0
this : 0 < n
hl : l = 0
⊢ m / n * (k / l) = m * k / (n * l)
[PROOFSTEP]
simp [hl]
[GOAL]
m n k l : ℕ
hmn : n ∣ m
hkl : l ∣ k
exi1 : ∃ x, m = n * x
exi2 : ∃ y, k = l * y
hn : ¬n = 0
this : 0 < n
hl : ¬l = 0
⊢ m / n * (k / l) = m * k / (n * l)
[PROOFSTEP]
have : 0 < l := Nat.pos_of_ne_zero hl
[GOAL]
m n k l : ℕ
hmn : n ∣ m
hkl : l ∣ k
exi1 : ∃ x, m = n * x
exi2 : ∃ y, k = l * y
hn : ¬n = 0
this✝ : 0 < n
hl : ¬l = 0
this : 0 < l
⊢ m / n * (k / l) = m * k / (n * l)
[PROOFSTEP]
cases' exi1 with x hx
[GOAL]
case intro
m n k l : ℕ
hmn : n ∣ m
hkl : l ∣ k
exi2 : ∃ y, k = l * y
hn : ¬n = 0
this✝ : 0 < n
hl : ¬l = 0
this : 0 < l
x : ℕ
hx : m = n * x
⊢ m / n * (k / l) = m * k / (n * l)
[PROOFSTEP]
cases' exi2 with y hy
[GOAL]
case intro.intro
m n k l : ℕ
hmn : n ∣ m
hkl : l ∣ k
hn : ¬n = 0
this✝ : 0 < n
hl : ¬l = 0
this : 0 < l
x : ℕ
hx : m = n * x
y : ℕ
hy : k = l * y
⊢ m / n * (k / l) = m * k / (n * l)
[PROOFSTEP]
rw [hx, hy, Nat.mul_div_cancel_left, Nat.mul_div_cancel_left]
[GOAL]
case intro.intro
m n k l : ℕ
hmn : n ∣ m
hkl : l ∣ k
hn : ¬n = 0
this✝ : 0 < n
hl : ¬l = 0
this : 0 < l
x : ℕ
hx : m = n * x
y : ℕ
hy : k = l * y
⊢ x * y = n * x * (l * y) / (n * l)
case intro.intro.H
m n k l : ℕ
hmn : n ∣ m
hkl : l ∣ k
hn : ¬n = 0
this✝ : 0 < n
hl : ¬l = 0
this : 0 < l
x : ℕ
hx : m = n * x
y : ℕ
hy : k = l * y
⊢ 0 < l
case intro.intro.H
m n k l : ℕ
hmn : n ∣ m
hkl : l ∣ k
hn : ¬n = 0
this✝ : 0 < n
hl : ¬l = 0
this : 0 < l
x : ℕ
hx : m = n * x
y : ℕ
hy : k = l * y
⊢ 0 < n
[PROOFSTEP]
apply Eq.symm
[GOAL]
case intro.intro.h
m n k l : ℕ
hmn : n ∣ m
hkl : l ∣ k
hn : ¬n = 0
this✝ : 0 < n
hl : ¬l = 0
this : 0 < l
x : ℕ
hx : m = n * x
y : ℕ
hy : k = l * y
⊢ n * x * (l * y) / (n * l) = x * y
case intro.intro.H
m n k l : ℕ
hmn : n ∣ m
hkl : l ∣ k
hn : ¬n = 0
this✝ : 0 < n
hl : ¬l = 0
this : 0 < l
x : ℕ
hx : m = n * x
y : ℕ
hy : k = l * y
⊢ 0 < l
case intro.intro.H
m n k l : ℕ
hmn : n ∣ m
hkl : l ∣ k
hn : ¬n = 0
this✝ : 0 < n
hl : ¬l = 0
this : 0 < l
x : ℕ
hx : m = n * x
y : ℕ
hy : k = l * y
⊢ 0 < n
[PROOFSTEP]
apply Nat.div_eq_of_eq_mul_left
[GOAL]
case intro.intro.h.H1
m n k l : ℕ
hmn : n ∣ m
hkl : l ∣ k
hn : ¬n = 0
this✝ : 0 < n
hl : ¬l = 0
this : 0 < l
x : ℕ
hx : m = n * x
y : ℕ
hy : k = l * y
⊢ 0 < n * l
case intro.intro.h.H2
m n k l : ℕ
hmn : n ∣ m
hkl : l ∣ k
hn : ¬n = 0
this✝ : 0 < n
hl : ¬l = 0
this : 0 < l
x : ℕ
hx : m = n * x
y : ℕ
hy : k = l * y
⊢ n * x * (l * y) = x * y * (n * l)
case intro.intro.H
m n k l : ℕ
hmn : n ∣ m
hkl : l ∣ k
hn : ¬n = 0
this✝ : 0 < n
hl : ¬l = 0
this : 0 < l
x : ℕ
hx : m = n * x
y : ℕ
hy : k = l * y
⊢ 0 < l
case intro.intro.H
m n k l : ℕ
hmn : n ∣ m
hkl : l ∣ k
hn : ¬n = 0
this✝ : 0 < n
hl : ¬l = 0
this : 0 < l
x : ℕ
hx : m = n * x
y : ℕ
hy : k = l * y
⊢ 0 < n
[PROOFSTEP]
apply mul_pos
[GOAL]
case intro.intro.h.H1.ha
m n k l : ℕ
hmn : n ∣ m
hkl : l ∣ k
hn : ¬n = 0
this✝ : 0 < n
hl : ¬l = 0
this : 0 < l
x : ℕ
hx : m = n * x
y : ℕ
hy : k = l * y
⊢ 0 < n
case intro.intro.h.H1.hb
m n k l : ℕ
hmn : n ∣ m
hkl : l ∣ k
hn : ¬n = 0
this✝ : 0 < n
hl : ¬l = 0
this : 0 < l
x : ℕ
hx : m = n * x
y : ℕ
hy : k = l * y
⊢ 0 < l
case intro.intro.h.H2
m n k l : ℕ
hmn : n ∣ m
hkl : l ∣ k
hn : ¬n = 0
this✝ : 0 < n
hl : ¬l = 0
this : 0 < l
x : ℕ
hx : m = n * x
y : ℕ
hy : k = l * y
⊢ n * x * (l * y) = x * y * (n * l)
case intro.intro.H
m n k l : ℕ
hmn : n ∣ m
hkl : l ∣ k
hn : ¬n = 0
this✝ : 0 < n
hl : ¬l = 0
this : 0 < l
x : ℕ
hx : m = n * x
y : ℕ
hy : k = l * y
⊢ 0 < l
case intro.intro.H
m n k l : ℕ
hmn : n ∣ m
hkl : l ∣ k
hn : ¬n = 0
this✝ : 0 < n
hl : ¬l = 0
this : 0 < l
x : ℕ
hx : m = n * x
y : ℕ
hy : k = l * y
⊢ 0 < n
[PROOFSTEP]
repeat'
assumption
-- Porting note: this line was `cc` in Lean3
[GOAL]
case intro.intro.h.H1.ha
m n k l : ℕ
hmn : n ∣ m
hkl : l ∣ k
hn : ¬n = 0
this✝ : 0 < n
hl : ¬l = 0
this : 0 < l
x : ℕ
hx : m = n * x
y : ℕ
hy : k = l * y
⊢ 0 < n
[PROOFSTEP]
assumption
-- Porting note: this line was `cc` in Lean3
[GOAL]
case intro.intro.h.H1.hb
m n k l : ℕ
hmn : n ∣ m
hkl : l ∣ k
hn : ¬n = 0
this✝ : 0 < n
hl : ¬l = 0
this : 0 < l
x : ℕ
hx : m = n * x
y : ℕ
hy : k = l * y
⊢ 0 < l
[PROOFSTEP]
assumption
-- Porting note: this line was `cc` in Lean3
[GOAL]
case intro.intro.h.H2
m n k l : ℕ
hmn : n ∣ m
hkl : l ∣ k
hn : ¬n = 0
this✝ : 0 < n
hl : ¬l = 0
this : 0 < l
x : ℕ
hx : m = n * x
y : ℕ
hy : k = l * y
⊢ n * x * (l * y) = x * y * (n * l)
[PROOFSTEP]
assumption
-- Porting note: this line was `cc` in Lean3
[GOAL]
case intro.intro.H
m n k l : ℕ
hmn : n ∣ m
hkl : l ∣ k
hn : ¬n = 0
this✝ : 0 < n
hl : ¬l = 0
this : 0 < l
x : ℕ
hx : m = n * x
y : ℕ
hy : k = l * y
⊢ 0 < l
[PROOFSTEP]
assumption
-- Porting note: this line was `cc` in Lean3
[GOAL]
case intro.intro.H
m n k l : ℕ
hmn : n ∣ m
hkl : l ∣ k
hn : ¬n = 0
this✝ : 0 < n
hl : ¬l = 0
this : 0 < l
x : ℕ
hx : m = n * x
y : ℕ
hy : k = l * y
⊢ 0 < n
[PROOFSTEP]
assumption
-- Porting note: this line was `cc` in Lean3
[GOAL]
case intro.intro.h.H2
m n k l : ℕ
hmn : n ∣ m
hkl : l ∣ k
hn : ¬n = 0
this✝ : 0 < n
hl : ¬l = 0
this : 0 < l
x : ℕ
hx : m = n * x
y : ℕ
hy : k = l * y
⊢ n * x * (l * y) = x * y * (n * l)
[PROOFSTEP]
simp only [mul_comm, mul_left_comm, mul_assoc]
[GOAL]
m n k l : ℕ
⊢ m / n = m ↔ m = 0 ∨ n = 1
[PROOFSTEP]
constructor
[GOAL]
case mp
m n k l : ℕ
⊢ m / n = m → m = 0 ∨ n = 1
[PROOFSTEP]
intro
[GOAL]
case mp
m n k l : ℕ
a✝ : m / n = m
⊢ m = 0 ∨ n = 1
[PROOFSTEP]
match n with
| 0 => simp_all
| 1 =>
right
rfl
| n + 2 =>
left
have : m / (n + 2) ≤ m / 2 := div_le_div_left (by simp) (by decide)
refine eq_zero_of_le_half ?_
simp_all
[GOAL]
m n k l : ℕ
a✝ : m / 0 = m
⊢ m = 0 ∨ 0 = 1
[PROOFSTEP]
simp_all
[GOAL]
m n k l : ℕ
a✝ : m / 1 = m
⊢ m = 0 ∨ 1 = 1
[PROOFSTEP]
right
[GOAL]
case h
m n k l : ℕ
a✝ : m / 1 = m
⊢ 1 = 1
[PROOFSTEP]
rfl
[GOAL]
m n✝ k l n : ℕ
a✝ : m / (n + 2) = m
⊢ m = 0 ∨ n + 2 = 1
[PROOFSTEP]
left
[GOAL]
case h
m n✝ k l n : ℕ
a✝ : m / (n + 2) = m
⊢ m = 0
[PROOFSTEP]
have : m / (n + 2) ≤ m / 2 := div_le_div_left (by simp) (by decide)
[GOAL]
m n✝ k l n : ℕ
a✝ : m / (n + 2) = m
⊢ 2 ≤ n + 2
[PROOFSTEP]
simp
[GOAL]
m n✝ k l n : ℕ
a✝ : m / (n + 2) = m
⊢ 0 < 2
[PROOFSTEP]
decide
[GOAL]
case h
m n✝ k l n : ℕ
a✝ : m / (n + 2) = m
this : m / (n + 2) ≤ m / 2
⊢ m = 0
[PROOFSTEP]
refine eq_zero_of_le_half ?_
[GOAL]
case h
m n✝ k l n : ℕ
a✝ : m / (n + 2) = m
this : m / (n + 2) ≤ m / 2
⊢ m ≤ m / 2
[PROOFSTEP]
simp_all
[GOAL]
case mpr
m n k l : ℕ
⊢ m = 0 ∨ n = 1 → m / n = m
[PROOFSTEP]
rintro (rfl | rfl)
[GOAL]
case mpr.inl
n k l : ℕ
⊢ 0 / n = 0
[PROOFSTEP]
simp
[GOAL]
case mpr.inr
m k l : ℕ
⊢ m / 1 = m
[PROOFSTEP]
simp
[GOAL]
m n k l : ℕ
⊢ m / n = (m - m % n) / n
[PROOFSTEP]
by_cases n0 : n = 0
[GOAL]
case pos
m n k l : ℕ
n0 : n = 0
⊢ m / n = (m - m % n) / n
[PROOFSTEP]
rw [n0, Nat.div_zero, Nat.div_zero]
[GOAL]
case neg
m n k l : ℕ
n0 : ¬n = 0
⊢ m / n = (m - m % n) / n
[PROOFSTEP]
have : m - m % n = n * (m / n) := by rw [tsub_eq_iff_eq_add_of_le (Nat.mod_le _ _), add_comm, mod_add_div]
[GOAL]
m n k l : ℕ
n0 : ¬n = 0
⊢ m - m % n = n * (m / n)
[PROOFSTEP]
rw [tsub_eq_iff_eq_add_of_le (Nat.mod_le _ _), add_comm, mod_add_div]
[GOAL]
case neg
m n k l : ℕ
n0 : ¬n = 0
this : m - m % n = n * (m / n)
⊢ m / n = (m - m % n) / n
[PROOFSTEP]
rw [this, mul_div_right _ (Nat.pos_of_ne_zero n0)]
[GOAL]
m n k l : ℕ
h1 : n * k < m
h2 : m < n * (k + 1)
⊢ ¬n ∣ m
[PROOFSTEP]
rintro ⟨d, rfl⟩
[GOAL]
case intro
n k l d : ℕ
h1 : n * k < n * d
h2 : n * d < n * (k + 1)
⊢ False
[PROOFSTEP]
exact Monotone.ne_of_lt_of_lt_nat (Covariant.monotone_of_const n) k h1 h2 d rfl
[GOAL]
m n k l : ℕ
p q : ℕ → Prop
inst✝¹ : DecidablePred p
inst✝ : DecidablePred q
h : ∃ n, p n
⊢ 0 < Nat.find h ↔ ¬p 0
[PROOFSTEP]
rw [pos_iff_ne_zero, Ne, Nat.find_eq_zero]
[GOAL]
m n k l : ℕ
p q : ℕ → Prop
inst✝¹ : DecidablePred p
inst✝ : DecidablePred q
hₘ : ∃ m, p (m + n)
hₙ : ∃ n, p n
hn : n ≤ Nat.find hₙ
⊢ Nat.find hₘ + n = Nat.find hₙ
[PROOFSTEP]
refine ((le_find_iff _ _).2 fun m hm hpm => hm.not_le ?_).antisymm ?_
[GOAL]
case refine_1
m✝ n k l : ℕ
p q : ℕ → Prop
inst✝¹ : DecidablePred p
inst✝ : DecidablePred q
hₘ : ∃ m, p (m + n)
hₙ : ∃ n, p n
hn : n ≤ Nat.find hₙ
m : ℕ
hm : m < Nat.find hₘ + n
hpm : p m
⊢ Nat.find hₘ + n ≤ m
[PROOFSTEP]
have hnm : n ≤ m := hn.trans (find_le hpm)
[GOAL]
case refine_1
m✝ n k l : ℕ
p q : ℕ → Prop
inst✝¹ : DecidablePred p
inst✝ : DecidablePred q
hₘ : ∃ m, p (m + n)
hₙ : ∃ n, p n
hn : n ≤ Nat.find hₙ
m : ℕ
hm : m < Nat.find hₘ + n
hpm : p m
hnm : n ≤ m
⊢ Nat.find hₘ + n ≤ m
[PROOFSTEP]
refine add_le_of_le_tsub_right_of_le hnm (find_le ?_)
[GOAL]
case refine_1
m✝ n k l : ℕ
p q : ℕ → Prop
inst✝¹ : DecidablePred p
inst✝ : DecidablePred q
hₘ : ∃ m, p (m + n)
hₙ : ∃ n, p n
hn : n ≤ Nat.find hₙ
m : ℕ
hm : m < Nat.find hₘ + n
hpm : p m
hnm : n ≤ m
⊢ p (m - n + n)
[PROOFSTEP]
rwa [tsub_add_cancel_of_le hnm]
[GOAL]
case refine_2
m n k l : ℕ
p q : ℕ → Prop
inst✝¹ : DecidablePred p
inst✝ : DecidablePred q
hₘ : ∃ m, p (m + n)
hₙ : ∃ n, p n
hn : n ≤ Nat.find hₙ
⊢ Nat.find hₙ ≤ Nat.find hₘ + n
[PROOFSTEP]
rw [← tsub_le_iff_right]
[GOAL]
case refine_2
m n k l : ℕ
p q : ℕ → Prop
inst✝¹ : DecidablePred p
inst✝ : DecidablePred q
hₘ : ∃ m, p (m + n)
hₙ : ∃ n, p n
hn : n ≤ Nat.find hₙ
⊢ Nat.find hₙ - n ≤ Nat.find hₘ
[PROOFSTEP]
refine (le_find_iff _ _).2 fun m hm hpm => hm.not_le ?_
[GOAL]
case refine_2
m✝ n k l : ℕ
p q : ℕ → Prop
inst✝¹ : DecidablePred p
inst✝ : DecidablePred q
hₘ : ∃ m, p (m + n)
hₙ : ∃ n, p n
hn : n ≤ Nat.find hₙ
m : ℕ
hm : m < Nat.find hₙ - n
hpm : p (m + n)
⊢ Nat.find hₙ - n ≤ m
[PROOFSTEP]
rw [tsub_le_iff_right]
[GOAL]
case refine_2
m✝ n k l : ℕ
p q : ℕ → Prop
inst✝¹ : DecidablePred p
inst✝ : DecidablePred q
hₘ : ∃ m, p (m + n)
hₙ : ∃ n, p n
hn : n ≤ Nat.find hₙ
m : ℕ
hm : m < Nat.find hₙ - n
hpm : p (m + n)
⊢ Nat.find hₙ ≤ m + n
[PROOFSTEP]
exact find_le hpm
[GOAL]
m n k l : ℕ
P Q : ℕ → Prop
inst✝ : DecidablePred P
⊢ Nat.findGreatest P k = m ↔ m ≤ k ∧ (m ≠ 0 → P m) ∧ ∀ ⦃n : ℕ⦄, m < n → n ≤ k → ¬P n
[PROOFSTEP]
induction' k with k ihk generalizing m
[GOAL]
case zero
m✝ n k l : ℕ
P Q : ℕ → Prop
inst✝ : DecidablePred P
m : ℕ
⊢ Nat.findGreatest P zero = m ↔ m ≤ zero ∧ (m ≠ 0 → P m) ∧ ∀ ⦃n : ℕ⦄, m < n → n ≤ zero → ¬P n
[PROOFSTEP]
rw [eq_comm, Iff.comm]
[GOAL]
case zero
m✝ n k l : ℕ
P Q : ℕ → Prop
inst✝ : DecidablePred P
m : ℕ
⊢ (m ≤ zero ∧ (m ≠ 0 → P m) ∧ ∀ ⦃n : ℕ⦄, m < n → n ≤ zero → ¬P n) ↔ m = Nat.findGreatest P zero
[PROOFSTEP]
simp only [zero_eq, nonpos_iff_eq_zero, ne_eq, findGreatest_zero, and_iff_left_iff_imp]
[GOAL]
case zero
m✝ n k l : ℕ
P Q : ℕ → Prop
inst✝ : DecidablePred P
m : ℕ
⊢ m = 0 → (¬m = 0 → P m) ∧ ∀ ⦃n : ℕ⦄, m < n → n = 0 → ¬P n
[PROOFSTEP]
rintro rfl
[GOAL]
case zero
m n k l : ℕ
P Q : ℕ → Prop
inst✝ : DecidablePred P
⊢ (¬0 = 0 → P 0) ∧ ∀ ⦃n : ℕ⦄, 0 < n → n = 0 → ¬P n
[PROOFSTEP]
exact ⟨fun h => (h rfl).elim, fun n hlt heq => (hlt.ne heq.symm).elim⟩
[GOAL]
case succ
m✝ n k✝ l : ℕ
P Q : ℕ → Prop
inst✝ : DecidablePred P
k : ℕ
ihk : ∀ {m : ℕ}, Nat.findGreatest P k = m ↔ m ≤ k ∧ (m ≠ 0 → P m) ∧ ∀ ⦃n : ℕ⦄, m < n → n ≤ k → ¬P n
m : ℕ
⊢ Nat.findGreatest P (succ k) = m ↔ m ≤ succ k ∧ (m ≠ 0 → P m) ∧ ∀ ⦃n : ℕ⦄, m < n → n ≤ succ k → ¬P n
[PROOFSTEP]
by_cases hk : P (k + 1)
[GOAL]
case pos
m✝ n k✝ l : ℕ
P Q : ℕ → Prop
inst✝ : DecidablePred P
k : ℕ
ihk : ∀ {m : ℕ}, Nat.findGreatest P k = m ↔ m ≤ k ∧ (m ≠ 0 → P m) ∧ ∀ ⦃n : ℕ⦄, m < n → n ≤ k → ¬P n
m : ℕ
hk : P (k + 1)
⊢ Nat.findGreatest P (succ k) = m ↔ m ≤ succ k ∧ (m ≠ 0 → P m) ∧ ∀ ⦃n : ℕ⦄, m < n → n ≤ succ k → ¬P n
[PROOFSTEP]
rw [findGreatest_eq hk]
[GOAL]
case pos
m✝ n k✝ l : ℕ
P Q : ℕ → Prop
inst✝ : DecidablePred P
k : ℕ
ihk : ∀ {m : ℕ}, Nat.findGreatest P k = m ↔ m ≤ k ∧ (m ≠ 0 → P m) ∧ ∀ ⦃n : ℕ⦄, m < n → n ≤ k → ¬P n
m : ℕ
hk : P (k + 1)
⊢ k + 1 = m ↔ m ≤ succ k ∧ (m ≠ 0 → P m) ∧ ∀ ⦃n : ℕ⦄, m < n → n ≤ succ k → ¬P n
[PROOFSTEP]
constructor
[GOAL]
case pos.mp
m✝ n k✝ l : ℕ
P Q : ℕ → Prop
inst✝ : DecidablePred P
k : ℕ
ihk : ∀ {m : ℕ}, Nat.findGreatest P k = m ↔ m ≤ k ∧ (m ≠ 0 → P m) ∧ ∀ ⦃n : ℕ⦄, m < n → n ≤ k → ¬P n
m : ℕ
hk : P (k + 1)
⊢ k + 1 = m → m ≤ succ k ∧ (m ≠ 0 → P m) ∧ ∀ ⦃n : ℕ⦄, m < n → n ≤ succ k → ¬P n
[PROOFSTEP]
rintro rfl
[GOAL]
case pos.mp
m n k✝ l : ℕ
P Q : ℕ → Prop
inst✝ : DecidablePred P
k : ℕ
ihk : ∀ {m : ℕ}, Nat.findGreatest P k = m ↔ m ≤ k ∧ (m ≠ 0 → P m) ∧ ∀ ⦃n : ℕ⦄, m < n → n ≤ k → ¬P n
hk : P (k + 1)
⊢ k + 1 ≤ succ k ∧ (k + 1 ≠ 0 → P (k + 1)) ∧ ∀ ⦃n : ℕ⦄, k + 1 < n → n ≤ succ k → ¬P n
[PROOFSTEP]
exact ⟨le_rfl, fun _ => hk, fun n hlt hle => (hlt.not_le hle).elim⟩
[GOAL]
case pos.mpr
m✝ n k✝ l : ℕ
P Q : ℕ → Prop
inst✝ : DecidablePred P
k : ℕ
ihk : ∀ {m : ℕ}, Nat.findGreatest P k = m ↔ m ≤ k ∧ (m ≠ 0 → P m) ∧ ∀ ⦃n : ℕ⦄, m < n → n ≤ k → ¬P n
m : ℕ
hk : P (k + 1)
⊢ (m ≤ succ k ∧ (m ≠ 0 → P m) ∧ ∀ ⦃n : ℕ⦄, m < n → n ≤ succ k → ¬P n) → k + 1 = m
[PROOFSTEP]
rintro ⟨hle, h0, hm⟩
[GOAL]
case pos.mpr.intro.intro
m✝ n k✝ l : ℕ
P Q : ℕ → Prop
inst✝ : DecidablePred P
k : ℕ
ihk : ∀ {m : ℕ}, Nat.findGreatest P k = m ↔ m ≤ k ∧ (m ≠ 0 → P m) ∧ ∀ ⦃n : ℕ⦄, m < n → n ≤ k → ¬P n
m : ℕ
hk : P (k + 1)
hle : m ≤ succ k
h0 : m ≠ 0 → P m
hm : ∀ ⦃n : ℕ⦄, m < n → n ≤ succ k → ¬P n
⊢ k + 1 = m
[PROOFSTEP]
rcases Decidable.eq_or_lt_of_le hle with (rfl | hlt)
[GOAL]
case pos.mpr.intro.intro.inl
m n k✝ l : ℕ
P Q : ℕ → Prop
inst✝ : DecidablePred P
k : ℕ
ihk : ∀ {m : ℕ}, Nat.findGreatest P k = m ↔ m ≤ k ∧ (m ≠ 0 → P m) ∧ ∀ ⦃n : ℕ⦄, m < n → n ≤ k → ¬P n
hk : P (k + 1)
hle : succ k ≤ succ k
h0 : succ k ≠ 0 → P (succ k)
hm : ∀ ⦃n : ℕ⦄, succ k < n → n ≤ succ k → ¬P n
⊢ k + 1 = succ k
case pos.mpr.intro.intro.inr
m✝ n k✝ l : ℕ
P Q : ℕ → Prop
inst✝ : DecidablePred P
k : ℕ
ihk : ∀ {m : ℕ}, Nat.findGreatest P k = m ↔ m ≤ k ∧ (m ≠ 0 → P m) ∧ ∀ ⦃n : ℕ⦄, m < n → n ≤ k → ¬P n
m : ℕ
hk : P (k + 1)
hle : m ≤ succ k
h0 : m ≠ 0 → P m
hm : ∀ ⦃n : ℕ⦄, m < n → n ≤ succ k → ¬P n
hlt : m < succ k
⊢ k + 1 = m
[PROOFSTEP]
exacts [rfl, (hm hlt le_rfl hk).elim]
[GOAL]
case neg
m✝ n k✝ l : ℕ
P Q : ℕ → Prop
inst✝ : DecidablePred P
k : ℕ
ihk : ∀ {m : ℕ}, Nat.findGreatest P k = m ↔ m ≤ k ∧ (m ≠ 0 → P m) ∧ ∀ ⦃n : ℕ⦄, m < n → n ≤ k → ¬P n
m : ℕ
hk : ¬P (k + 1)
⊢ Nat.findGreatest P (succ k) = m ↔ m ≤ succ k ∧ (m ≠ 0 → P m) ∧ ∀ ⦃n : ℕ⦄, m < n → n ≤ succ k → ¬P n
[PROOFSTEP]
rw [findGreatest_of_not hk, ihk]
[GOAL]
case neg
m✝ n k✝ l : ℕ
P Q : ℕ → Prop
inst✝ : DecidablePred P
k : ℕ
ihk : ∀ {m : ℕ}, Nat.findGreatest P k = m ↔ m ≤ k ∧ (m ≠ 0 → P m) ∧ ∀ ⦃n : ℕ⦄, m < n → n ≤ k → ¬P n
m : ℕ
hk : ¬P (k + 1)
⊢ (m ≤ k ∧ (m ≠ 0 → P m) ∧ ∀ ⦃n : ℕ⦄, m < n → n ≤ k → ¬P n) ↔
m ≤ succ k ∧ (m ≠ 0 → P m) ∧ ∀ ⦃n : ℕ⦄, m < n → n ≤ succ k → ¬P n
[PROOFSTEP]
constructor
[GOAL]
case neg.mp
m✝ n k✝ l : ℕ
P Q : ℕ → Prop
inst✝ : DecidablePred P
k : ℕ
ihk : ∀ {m : ℕ}, Nat.findGreatest P k = m ↔ m ≤ k ∧ (m ≠ 0 → P m) ∧ ∀ ⦃n : ℕ⦄, m < n → n ≤ k → ¬P n
m : ℕ
hk : ¬P (k + 1)
⊢ (m ≤ k ∧ (m ≠ 0 → P m) ∧ ∀ ⦃n : ℕ⦄, m < n → n ≤ k → ¬P n) →
m ≤ succ k ∧ (m ≠ 0 → P m) ∧ ∀ ⦃n : ℕ⦄, m < n → n ≤ succ k → ¬P n
[PROOFSTEP]
rintro ⟨hle, hP, hm⟩
[GOAL]
case neg.mp.intro.intro
m✝ n k✝ l : ℕ
P Q : ℕ → Prop
inst✝ : DecidablePred P
k : ℕ
ihk : ∀ {m : ℕ}, Nat.findGreatest P k = m ↔ m ≤ k ∧ (m ≠ 0 → P m) ∧ ∀ ⦃n : ℕ⦄, m < n → n ≤ k → ¬P n
m : ℕ
hk : ¬P (k + 1)
hle : m ≤ k
hP : m ≠ 0 → P m
hm : ∀ ⦃n : ℕ⦄, m < n → n ≤ k → ¬P n
⊢ m ≤ succ k ∧ (m ≠ 0 → P m) ∧ ∀ ⦃n : ℕ⦄, m < n → n ≤ succ k → ¬P n
[PROOFSTEP]
refine ⟨hle.trans k.le_succ, hP, fun n hlt hle => ?_⟩
[GOAL]
case neg.mp.intro.intro
m✝ n✝ k✝ l : ℕ
P Q : ℕ → Prop
inst✝ : DecidablePred P
k : ℕ
ihk : ∀ {m : ℕ}, Nat.findGreatest P k = m ↔ m ≤ k ∧ (m ≠ 0 → P m) ∧ ∀ ⦃n : ℕ⦄, m < n → n ≤ k → ¬P n
m : ℕ
hk : ¬P (k + 1)
hle✝ : m ≤ k
hP : m ≠ 0 → P m
hm : ∀ ⦃n : ℕ⦄, m < n → n ≤ k → ¬P n
n : ℕ
hlt : m < n
hle : n ≤ succ k
⊢ ¬P n
[PROOFSTEP]
rcases Decidable.eq_or_lt_of_le hle with (rfl | hlt')
[GOAL]
case neg.mp.intro.intro.inl
m✝ n k✝ l : ℕ
P Q : ℕ → Prop
inst✝ : DecidablePred P
k : ℕ
ihk : ∀ {m : ℕ}, Nat.findGreatest P k = m ↔ m ≤ k ∧ (m ≠ 0 → P m) ∧ ∀ ⦃n : ℕ⦄, m < n → n ≤ k → ¬P n
m : ℕ
hk : ¬P (k + 1)
hle✝ : m ≤ k
hP : m ≠ 0 → P m
hm : ∀ ⦃n : ℕ⦄, m < n → n ≤ k → ¬P n
hlt : m < succ k
hle : succ k ≤ succ k
⊢ ¬P (succ k)
case neg.mp.intro.intro.inr
m✝ n✝ k✝ l : ℕ
P Q : ℕ → Prop
inst✝ : DecidablePred P
k : ℕ
ihk : ∀ {m : ℕ}, Nat.findGreatest P k = m ↔ m ≤ k ∧ (m ≠ 0 → P m) ∧ ∀ ⦃n : ℕ⦄, m < n → n ≤ k → ¬P n
m : ℕ
hk : ¬P (k + 1)
hle✝ : m ≤ k
hP : m ≠ 0 → P m
hm : ∀ ⦃n : ℕ⦄, m < n → n ≤ k → ¬P n
n : ℕ
hlt : m < n
hle : n ≤ succ k
hlt' : n < succ k
⊢ ¬P n
[PROOFSTEP]
exacts [hk, hm hlt <| lt_succ_iff.1 hlt']
[GOAL]
case neg.mpr
m✝ n k✝ l : ℕ
P Q : ℕ → Prop
inst✝ : DecidablePred P
k : ℕ
ihk : ∀ {m : ℕ}, Nat.findGreatest P k = m ↔ m ≤ k ∧ (m ≠ 0 → P m) ∧ ∀ ⦃n : ℕ⦄, m < n → n ≤ k → ¬P n
m : ℕ
hk : ¬P (k + 1)
⊢ (m ≤ succ k ∧ (m ≠ 0 → P m) ∧ ∀ ⦃n : ℕ⦄, m < n → n ≤ succ k → ¬P n) →
m ≤ k ∧ (m ≠ 0 → P m) ∧ ∀ ⦃n : ℕ⦄, m < n → n ≤ k → ¬P n
[PROOFSTEP]
rintro ⟨hle, hP, hm⟩
[GOAL]
case neg.mpr.intro.intro
m✝ n k✝ l : ℕ
P Q : ℕ → Prop
inst✝ : DecidablePred P
k : ℕ
ihk : ∀ {m : ℕ}, Nat.findGreatest P k = m ↔ m ≤ k ∧ (m ≠ 0 → P m) ∧ ∀ ⦃n : ℕ⦄, m < n → n ≤ k → ¬P n
m : ℕ
hk : ¬P (k + 1)
hle : m ≤ succ k
hP : m ≠ 0 → P m
hm : ∀ ⦃n : ℕ⦄, m < n → n ≤ succ k → ¬P n
⊢ m ≤ k ∧ (m ≠ 0 → P m) ∧ ∀ ⦃n : ℕ⦄, m < n → n ≤ k → ¬P n
[PROOFSTEP]
refine ⟨lt_succ_iff.1 (hle.lt_of_ne ?_), hP, fun n hlt hle => hm hlt (hle.trans k.le_succ)⟩
[GOAL]
case neg.mpr.intro.intro
m✝ n k✝ l : ℕ
P Q : ℕ → Prop
inst✝ : DecidablePred P
k : ℕ
ihk : ∀ {m : ℕ}, Nat.findGreatest P k = m ↔ m ≤ k ∧ (m ≠ 0 → P m) ∧ ∀ ⦃n : ℕ⦄, m < n → n ≤ k → ¬P n
m : ℕ
hk : ¬P (k + 1)
hle : m ≤ succ k
hP : m ≠ 0 → P m
hm : ∀ ⦃n : ℕ⦄, m < n → n ≤ succ k → ¬P n
⊢ m ≠ succ k
[PROOFSTEP]
rintro rfl
[GOAL]
case neg.mpr.intro.intro
m n k✝ l : ℕ
P Q : ℕ → Prop
inst✝ : DecidablePred P
k : ℕ
ihk : ∀ {m : ℕ}, Nat.findGreatest P k = m ↔ m ≤ k ∧ (m ≠ 0 → P m) ∧ ∀ ⦃n : ℕ⦄, m < n → n ≤ k → ¬P n
hk : ¬P (k + 1)
hle : succ k ≤ succ k
hP : succ k ≠ 0 → P (succ k)
hm : ∀ ⦃n : ℕ⦄, succ k < n → n ≤ succ k → ¬P n
⊢ False
[PROOFSTEP]
exact hk (hP k.succ_ne_zero)
[GOAL]
m n k l : ℕ
P Q : ℕ → Prop
inst✝ : DecidablePred P
⊢ Nat.findGreatest P k = 0 ↔ ∀ ⦃n : ℕ⦄, 0 < n → n ≤ k → ¬P n
[PROOFSTEP]
simp [findGreatest_eq_iff]
[GOAL]
m n k l : ℕ
P Q : ℕ → Prop
inst✝ : DecidablePred P
hmb : m ≤ n
hm : P m
⊢ P (Nat.findGreatest P n)
[PROOFSTEP]
by_cases h : Nat.findGreatest P n = 0
[GOAL]
case pos
m n k l : ℕ
P Q : ℕ → Prop
inst✝ : DecidablePred P
hmb : m ≤ n
hm : P m
h : Nat.findGreatest P n = 0
⊢ P (Nat.findGreatest P n)
[PROOFSTEP]
cases m
[GOAL]
case pos.zero
n k l : ℕ
P Q : ℕ → Prop
inst✝ : DecidablePred P
h : Nat.findGreatest P n = 0
hmb : zero ≤ n
hm : P zero
⊢ P (Nat.findGreatest P n)
[PROOFSTEP]
rwa [h]
[GOAL]
case pos.succ
n k l : ℕ
P Q : ℕ → Prop
inst✝ : DecidablePred P
h : Nat.findGreatest P n = 0
n✝ : ℕ
hmb : succ n✝ ≤ n
hm : P (succ n✝)
⊢ P (Nat.findGreatest P n)
[PROOFSTEP]
exact ((findGreatest_eq_zero_iff.1 h) (zero_lt_succ _) hmb hm).elim
[GOAL]
case neg
m n k l : ℕ
P Q : ℕ → Prop
inst✝ : DecidablePred P
hmb : m ≤ n
hm : P m
h : ¬Nat.findGreatest P n = 0
⊢ P (Nat.findGreatest P n)
[PROOFSTEP]
exact (findGreatest_eq_iff.1 rfl).2.1 h
[GOAL]
m n k l : ℕ
P✝ Q : ℕ → Prop
inst✝¹ : DecidablePred P✝
P : ℕ → Prop
inst✝ : DecidablePred P
⊢ Monotone (Nat.findGreatest P)
[PROOFSTEP]
refine monotone_nat_of_le_succ fun n => ?_
[GOAL]
m n✝ k l : ℕ
P✝ Q : ℕ → Prop
inst✝¹ : DecidablePred P✝
P : ℕ → Prop
inst✝ : DecidablePred P
n : ℕ
⊢ Nat.findGreatest P n ≤ Nat.findGreatest P (n + 1)
[PROOFSTEP]
rw [findGreatest_succ]
[GOAL]
m n✝ k l : ℕ
P✝ Q : ℕ → Prop
inst✝¹ : DecidablePred P✝
P : ℕ → Prop
inst✝ : DecidablePred P
n : ℕ
⊢ Nat.findGreatest P n ≤ if P (n + 1) then n + 1 else Nat.findGreatest P n
[PROOFSTEP]
split_ifs
[GOAL]
case pos
m n✝ k l : ℕ
P✝ Q : ℕ → Prop
inst✝¹ : DecidablePred P✝
P : ℕ → Prop
inst✝ : DecidablePred P
n : ℕ
h✝ : P (n + 1)
⊢ Nat.findGreatest P n ≤ n + 1
[PROOFSTEP]
exact (findGreatest_le n).trans (le_succ _)
[GOAL]
case neg
m n✝ k l : ℕ
P✝ Q : ℕ → Prop
inst✝¹ : DecidablePred P✝
P : ℕ → Prop
inst✝ : DecidablePred P
n : ℕ
h✝ : ¬P (n + 1)
⊢ Nat.findGreatest P n ≤ Nat.findGreatest P n
[PROOFSTEP]
rfl
[GOAL]
m n k l : ℕ
P Q : ℕ → Prop
inst✝¹ : DecidablePred P
inst✝ : DecidablePred Q
hPQ : P ≤ Q
⊢ Nat.findGreatest P ≤ Nat.findGreatest Q
[PROOFSTEP]
intro n
[GOAL]
m n✝ k l : ℕ
P Q : ℕ → Prop
inst✝¹ : DecidablePred P
inst✝ : DecidablePred Q
hPQ : P ≤ Q
n : ℕ
⊢ Nat.findGreatest P n ≤ Nat.findGreatest Q n
[PROOFSTEP]
induction' n with n hn
[GOAL]
case zero
m n k l : ℕ
P Q : ℕ → Prop
inst✝¹ : DecidablePred P
inst✝ : DecidablePred Q
hPQ : P ≤ Q
⊢ Nat.findGreatest P zero ≤ Nat.findGreatest Q zero
[PROOFSTEP]
rfl
[GOAL]
case succ
m n✝ k l : ℕ
P Q : ℕ → Prop
inst✝¹ : DecidablePred P
inst✝ : DecidablePred Q
hPQ : P ≤ Q
n : ℕ
hn : Nat.findGreatest P n ≤ Nat.findGreatest Q n
⊢ Nat.findGreatest P (succ n) ≤ Nat.findGreatest Q (succ n)
[PROOFSTEP]
by_cases h : P (n + 1)
[GOAL]
case pos
m n✝ k l : ℕ
P Q : ℕ → Prop
inst✝¹ : DecidablePred P
inst✝ : DecidablePred Q
hPQ : P ≤ Q
n : ℕ
hn : Nat.findGreatest P n ≤ Nat.findGreatest Q n
h : P (n + 1)
⊢ Nat.findGreatest P (succ n) ≤ Nat.findGreatest Q (succ n)
[PROOFSTEP]
rw [findGreatest_eq h, findGreatest_eq (hPQ _ h)]
[GOAL]
case neg
m n✝ k l : ℕ
P Q : ℕ → Prop
inst✝¹ : DecidablePred P
inst✝ : DecidablePred Q
hPQ : P ≤ Q
n : ℕ
hn : Nat.findGreatest P n ≤ Nat.findGreatest Q n
h : ¬P (n + 1)
⊢ Nat.findGreatest P (succ n) ≤ Nat.findGreatest Q (succ n)
[PROOFSTEP]
rw [findGreatest_of_not h]
[GOAL]
case neg
m n✝ k l : ℕ
P Q : ℕ → Prop
inst✝¹ : DecidablePred P
inst✝ : DecidablePred Q
hPQ : P ≤ Q
n : ℕ
hn : Nat.findGreatest P n ≤ Nat.findGreatest Q n
h : ¬P (n + 1)
⊢ Nat.findGreatest P n ≤ Nat.findGreatest Q (succ n)
[PROOFSTEP]
exact hn.trans (Nat.findGreatest_mono_right _ <| le_succ _)
[GOAL]
m n k l : ℕ
h : bit0 m ≤ bit1 n
⊢ m ≤ n
[PROOFSTEP]
rwa [← Nat.lt_succ_iff, n.bit1_eq_succ_bit0, ← n.bit0_succ_eq, bit0_lt_bit0, Nat.lt_succ_iff] at h
[GOAL]
m n k l : ℕ
h : bit1 m ≤ bit0 n
⊢ m < n
[PROOFSTEP]
rwa [m.bit1_eq_succ_bit0, succ_le_iff, bit0_lt_bit0] at h
[GOAL]
m n k l lo hi : ℕ
P : ℕ → Prop
H : DecidablePred P
al : ∀ (x : ℕ), x < hi - lo → P (lo + x)
x : ℕ
hl : lo ≤ x
hh : x < hi
⊢ P x
[PROOFSTEP]
have := al (x - lo) ((tsub_lt_tsub_iff_right hl).mpr hh)
[GOAL]
m n k l lo hi : ℕ
P : ℕ → Prop
H : DecidablePred P
al : ∀ (x : ℕ), x < hi - lo → P (lo + x)
x : ℕ
hl : lo ≤ x
hh : x < hi
this : P (lo + (x - lo))
⊢ P x
[PROOFSTEP]
rwa [add_tsub_cancel_of_le hl] at this
|
function [int, keepindex, repindex] = mesh_laplacian_interp(lap, index);
% MESH_LAPLACIAN_INTERP: Computes the zero Laplacian interpolation matrix
%
% Useage: [int, keepindex, repindex] = mesh_laplacian_interp(lap, index)
%
% This function calculates an interpolation matrix that provides
% the coefficients for the calculation of potential values at all
% unknown vertices of a mesh, given known potential values at
% a subset of the mesh vertices (at 'index'). The interpolation
% solution is constrained by a minimal norm of the Laplacian
% of the mesh. See the reference below for details.
%
% 'lap' is the laplacian matrix for the full mesh (see mesh_laplacian)
% 'int' is the matrix which interpolates from the points in 'index'
% to the full mesh. 'index' is a row vector of indices into a
% subset of the vertices used to calculate 'lap'. This subset
% is where the electric potential is known and usually corresponds
% to the given electrode vertices, eg:
%
% index = dsearchn(scalpvert,elecvert)';
%
% If 'index' contains repeated indices, only the unique indices
% are useful. The 'keepindex' array can be used to select these.
% The 'repindex' array is the repeated indices.
%
% Interpolations can be done using matrix 'int', eg:
%
% [int, keepindex, repindex] = mesh_laplacian_interp(lap,index);
% if isempty(repindex),
% Vint = int * Vknown;
% else
% Vint = int * Vknown(keepindex);
% end
%
% This implements interpolation method B (p. 336) of
% Oostendorp T, Oosterom A & Huiskamp G (1989),
% Interpolation on a triangulated 3D surface.
% Journal of Computational Physics, 80: 331-343.
%
% Licence: GNU GPL, no implied or express warranties
% History: (c) 04/2002 Robert Oostenveld
% - agreed to release 'lapint' under GNU GPL
% 04/2002, [email protected]
% - introduced check for index replications and
% adjusted calculations/output accordingly
% - converted lap to sparse matrix and solution
% of interpolation matrix with \ operator
% - accepts sparse lap input and returns sparse int
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
if size(lap,1)~=size(lap,2), error('MESH_LAPLACIAN_INTERP: lap matrix is not square'); end
if issparse(lap), lap = full(lap); end
tic
if size(index,1)~=1, index = index'; end
% Remove any replicate indices from 'index'
[KnownIndex, i, i] = union(index,index);
if ~isequal(length(KnownIndex),length(index)),
fprintf('\a\n\nMESH_LAPLACIAN_INTERP: Warning:\n\nTrimming duplicate values from index, use keepindex!\n\n');
end
keepindex = sort(i);
repindex = setdiff(1:length(index),sort(i));
KnownIndex = index(keepindex); % unsort KnownIndex
clear index
k = length(KnownIndex);
n = length(lap);
fprintf('MESH_LAPLACIAN_INTERP: Calc Interpolation matrix for %5d to %5d vertices...',k,n);
% find 'unknown' indices of lap matrix
UnknownIndex = setdiff(1:n, KnownIndex);
% reshuffle rows & columns of lap matrix
lapi = [KnownIndex, UnknownIndex];
lap = lap(lapi, :); % rows
lap = lap(:, lapi); % columns
% Segregate known/unknown portions of lap
k = length(KnownIndex);
n = length(lap);
L11 = lap(1:k ,1:k );
L12 = lap(1:k ,(k+1):n);
L21 = lap((k+1):n,1:k );
L22 = lap((k+1):n,(k+1):n);
clear lap lapi; % tidy up some memory
% Convert to sparse for quicker computation
A = sparse([L12; L22]);
B = sparse([L11; L21]);
clear L11 L12 L21 L22; % tidy up some memory
%int = -pinv(A) * B; % cannot use pinv with sparse matrix
int = -A \ B;
% Convert result back to full matrix
int = full(int);
% append the interpolating piece to the identity matrix
% these take care of the known potentials
int = [eye(k); int];
% reshuffle the columns of the interpolating matrix
[tmp, order] = sort(KnownIndex);
int = int(:,order);
% reshuffle the rows of the interpolating matrix
[tmp, order] = sort([KnownIndex, UnknownIndex]);
int = int(order, :);
int = sparse(int);
t = toc;
fprintf('done (%6.2f sec).\n',t);
return
|
#!/usr/bin/env python3
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import argparse
import numpy as np
import torch as th
import torchvision
from tqdm import tqdm
from tensorboardX import SummaryWriter
import utils
from networks.network import Net
def main(args):
writer = SummaryWriter(os.path.join("logs", args.dir))
trainloader, testloader = get_loaders(args.batch_size)
net = Net(n_layers=args.layers)
states = []
for x, labels in trainloader:
states.append([th.rand(args.batch_size, 500, device=device) for _ in range(args.layers)]
+ [th.rand(args.batch_size, 10, device=device)])
for epoch in range(args.epochs):
running_loss = running_energy = running_true_positive = 0.
for i, (x, labels) in enumerate(tqdm(trainloader)):
x, labels = x.to(device).view(x.shape[0], -1), labels.to(device)
t = th.zeros(x.shape[0], 10, device=device)
t.scatter_(1, labels.unsqueeze(1), 1)
units = [x] + states[i]
units_free, units_clamped = net.fixed_points(units, t)
states[i] = units_free[1:]
net.update(units_free, units_clamped)
running_true_positive += (units_free[-1].argmax(1) == labels).sum().item()
running_loss += (t - units_free[-1]).pow(2).sum().item()
energy_train = running_energy / (len(trainloader) * args.batch_size)
accuracy_train = running_true_positive / (len(trainloader) * args.batch_size)
loss_train = running_loss / (len(trainloader) * args.batch_size)
print(f"Energy: {energy_train}, Accuracy: {accuracy_train}, Loss: {loss_train}")
writer.add_scalar(f"loss", loss_train, epoch)
writer.add_scalar(f"energy", energy_train, epoch)
writer.add_scalar(f"accuracy", accuracy_train, epoch)
def get_loaders(batch_size, fashion=False):
mnist = torchvision.datasets.MNIST
if fashion:
mnist = torchvision.datasets.FashionMNIST
transform = torchvision.transforms.Compose(
[torchvision.transforms.ToTensor(),])
trainloader = th.utils.data.DataLoader(
mnist(root="./data", train=True, download=True, transform=transform),
batch_size=batch_size,
shuffle=True,
num_workers=2)
testloader = th.utils.data.DataLoader(
mnist(root="./data", train=False, download=True, transform=transform),
batch_size=batch_size,
shuffle=False,
num_workers=2)
return trainloader, testloader
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--layers", type=int, default=1)
parser.add_argument("--epochs", type=int, default=25)
parser.add_argument("--batch-size", type=int, default=20)
parser.add_argument("--fashion", action="store_true", default=False,
help="use fashion mnist")
parser.add_argument("--dir", default=utils.timestamp(),
help="name of output log directory")
parser.add_argument("--no-cuda", action="store_true", default=False)
args = parser.parse_args()
device = th.device("cpu" if (not th.cuda.is_available() or args.no_cuda) else "cuda")
device = th.device("cpu") # gpu version not working
main(args)
|
! { dg-do run }
!
! Check simplification of MERGE.
!
INTEGER, PARAMETER :: array(3) = [1, 2, 3]
LOGICAL, PARAMETER :: mask(3) = [ .TRUE., .FALSE., .TRUE. ]
INTEGER, PARAMETER :: scalar_1 = MERGE (1, 0, .TRUE.)
INTEGER, PARAMETER :: scalar_2 = MERGE (0, 1, .FALSE.)
INTEGER, PARAMETER :: array_1(3) = MERGE (array, 0, .TRUE.)
INTEGER, PARAMETER :: array_2(3) = MERGE (array, 0, .FALSE.)
INTEGER, PARAMETER :: array_3(3) = MERGE (0, array, .TRUE.)
INTEGER, PARAMETER :: array_4(3) = MERGE (0, array, .FALSE.)
INTEGER, PARAMETER :: array_5(3) = MERGE (1, 0, mask)
INTEGER, PARAMETER :: array_6(3) = MERGE (array, -array, mask)
INTEGER, PARAMETER :: array_7(3) = MERGE ([1,2,3], -array, mask)
IF (scalar_1 /= 1 .OR. scalar_2 /= 1) CALL abort
IF (.NOT. ALL (array_1 == array)) CALL abort
IF (.NOT. ALL (array_2 == [0, 0, 0])) CALL abort
IF (.NOT. ALL (array_3 == [0, 0, 0])) CALL abort
IF (.NOT. ALL (array_4 == array)) CALL abort
IF (.NOT. ALL (array_5 == [1, 0, 1])) CALL abort
IF (.NOT. ALL (array_6 == [1, -2, 3])) CALL abort
END
|
The Spanish and Neapolitan governments had classified forty three Italian cardinals into five categories : " good " ( eleven cardinals ) , " indifferent " ( eight ) , " doubtful " ( three ) , " bad " ( fifteen ) and " very bad " ( six ) :
|
-- Copyright (c) Microsoft Corporation. All rights reserved.
-- Licensed under the MIT license.
universes u v
-- Basic
lemma prod_inj: ∀ {α:Type u} {β:Type v} (p q:α × β),
p = q ↔ p.fst = q.fst ∧ p.snd = q.snd
:= begin
intros,
split,
{ intros H, rw H, split; refl },
{ intros H, cases p, cases q, simp at H,
cases H with H1 H2, rw [H1, H2] }
end
lemma ite_or:
∀ {α:Type} {c} {x y z:α} (HDEC:decidable c)
(HITE:x = @ite c HDEC α y z),
x = y ∨ x = z
:= begin
intros,
cases HDEC,
{ rw if_neg at HITE, right, assumption, assumption },
{ rw if_pos at HITE, left, assumption, assumption }
end
-- Arithmetic operations
lemma nat.pow_mul_pow: ∀ (m n:ℕ),
m * m^n = m^(n+1)
:= begin
intros, apply nat.mul_comm
end
lemma nat.lt_pow2: ∀ (n:ℕ),
n < 2^n
:= begin
intros,
induction n,
{ simp, constructor },
{
have n_ih := nat.succ_lt_succ n_ih,
apply lt_of_lt_of_le,
{ apply n_ih },
{ unfold has_pow.pow, rw nat.pow, rw nat.mul_comm, rw nat.succ_mul,
simp, rw ← nat.add_one,
apply nat.add_le_add_left,
apply nat.pos_pow_of_pos, constructor, constructor
}
}
end
lemma nat.mod_pow2: ∀ (x:ℕ),
x % 2^x = x
:= begin
intros, apply nat.mod_eq_of_lt, apply nat.lt_pow2
end
lemma neq_symm: ∀ {α:Type u} (x y:α),
x ≠ y → y ≠ x
:= begin
intros, intros H, rw H at a, apply a, refl
end
lemma nat.not_gt_not_eq_implies_lt: ∀ (x y:ℕ),
¬(x > y) → (x ≠ y) → x < y
:= begin
intros x y H1 H2,
have H3 := nat.lt_or_ge x y,
apply or.resolve_right,
apply H3,
intros H0, apply H1, apply nat.lt_of_le_and_ne, apply H0, apply neq_symm, assumption
end
-- list
lemma list.foldl_unroll: ∀ {α:Type u} {β:Type v} {f:α → β → α} a (l:list β) x,
list.foldl f a (l++[x]) = f (list.foldl f a l) x
:= begin
intros,
revert a,
induction l,
{ simp },
{ intros, simp, rw l_ih }
end
lemma list.notmem_and {α:Type u} [decidable_eq α] : ∀ (l:list α) h a,
(a ∉ h::l) ↔ (a ≠ h ∧ a ∉ l)
:= begin
intros,
have HD:decidable (a ∈ l),
{ apply (list.decidable_mem a l)},
simp, rw decidable.not_or_iff_and_not
end
lemma list.cases_back: ∀ {α:Type u} (l:list α),
l = [] ∨ ∃ a l', l = l' ++ [a]
:= begin
intros,
induction l,
{ left, refl },
{
cases l_ih,
{
right,
rw l_ih,
apply exists.intro l_hd,
apply exists.intro [], refl
},
{
cases l_ih,
cases l_ih_h,
right,
rw l_ih_h_h,
apply exists.intro l_ih_w,
apply exists.intro (l_hd :: l_ih_h_w),
refl
}
}
end
lemma list.map_nil: ∀ {α:Type u} {β:Type v} (l:list α) (f:α → β)
(H:list.map f l = []), l = []
:= begin
intros,
induction l,
simp, simp at H, cases H
end
lemma list.map_eq: ∀ {α:Type u} {β:Type v} (l:list α) (f g:α → β)
(H:∀ x, f x = g x),
l.map f = l.map g
:= begin
intros, induction l; simp [*]
end
lemma list.foldr_inv: ∀ {α:Type u} {β:Type v} (la:list α) (b:β) (f:α → β → β)
(P:β → Prop)
(HINV:∀ b', P b' → ∀ a', P (f a' b'))
(H0:P b),
P (list.foldr f b la)
:= begin
intros,
induction la,
{ simp, assumption },
{ simp, apply HINV, assumption }
end
lemma list.foldr_eq: ∀ {α:Type u} {β:Type v} (la1 la2:list α)
(b1 b2:β) (f1 f2:α → β → β)
(HF: ∀ a b, f1 a b = f2 a b)
(HLA:la1 = la2)
(HB: b1 = b2),
list.foldr f1 b1 la1 = list.foldr f2 b2 la2
:= begin
intros,
revert la1,
induction la2,
{ intros, subst HLA, simp, assumption },
{ intros,
subst HLA,
simp,
rw la2_ih,
apply HF, refl
}
end
lemma list.append_eq: ∀ {α:Type u} {l l21 l22:list α},
l ++ l21 = l ++ l22 → l21 = l22
:= begin
intros,
induction l, simp at a, assumption,
apply l_ih, simp at a, assumption
end
lemma list.append_nil_eq: ∀ {α:Type u} {l l2:list α},
l = l2 ++ l → l2 = []
:= begin
intros, induction l with h t, simp at a, rw a,
cases l2, refl,
have a' : (h :: t).length = (l2_hd :: l2_tl ++ h :: t).length, rw ← a,
simp at a',
have a' := nat.add_left_cancel a',
rw ← nat.add_assoc at a',
rw nat.add_comm 1 at a',
rw nat.add_assoc at a',
rw ← nat.add_zero (list.length t) at a',
have a' := nat.add_left_cancel a',
rw nat.one_add at a', cases a'
end
lemma list.append_eq2: ∀ {α:Type u} {l l1 l2:list α},
l1 ++ l = l2 ++ l → l1 = l2
:= begin
intros,
revert l2,
induction l1, intros, simp at a, rw list.append_nil_eq a,
intros, cases l2,
rw list.nil_append at a, have a' := list.append_nil_eq (eq.symm a), assumption,
injection a, rw h_1, rw l1_ih, apply h_2
end
inductive list.unique {α:Type u}: list α → Prop
| nil: list.unique []
| cons: ∀ a l (H:list.unique l) (H2:a ∉ l), list.unique (a::l)
lemma list.not_mem_append: ∀ {α:Type u} {a} {l1 l2:list α}
(H1: a ∉ l1) (H2: a ∉ l2), a ∉ (l1 ++ l2)
:= begin
intros, intros H0, rw list.mem_append at H0,
cases H0, apply H1, assumption, apply H2, assumption
end
-- if two lists are different, appended lists are always different
lemma list.append_ne {α:Type u}:
∀ {s1 s2:list α} {a1 a2:α} (H:a1 ≠ a2),
s1 ++ [a1] ≠ s2 ++ [a2]
:= begin
intros, revert s2,
induction s1,
{
intros, cases s2, simp, assumption,
simp, intros H, cases H, cases s2_tl,
simp at H_right, assumption, cases H_right
},
{
intros, intros H, cases s2, simp at H,
cases H, cases s1_tl, simp at H_right, assumption,
simp at H_right, assumption,
apply s1_ih, simp at H, cases H, assumption
}
end
-- strings
def string.reverse (s:string):string := s.to_list.reverse.as_string
lemma string.append_to_list: ∀ {s1 s2:string},
(s1 ++ s2).to_list = s1.to_list ++ s2.to_list
:= begin
intros,
cases s1, cases s2,
unfold string.to_list,
refl
end
lemma string.eq_list: ∀ {s1 s2:string},
s1 = s2 ↔ s1.to_list = s2.to_list
:= begin
intros,
split,
intros,
cases s1, cases s2,
injection a,
intros, cases s1, cases s2, unfold string.to_list at a, rw a
end
lemma string.eq_suffix: ∀ {s1 s2 suffix:string}
(H:s1 ++ suffix = s2 ++ suffix), s1 = s2
:= begin
intros,
rw string.eq_list at H,
rw string.append_to_list at H,
rw string.append_to_list at H,
have H := list.append_eq2 H,
rw string.eq_list,
assumption
end
lemma string.eq_prefix: ∀ {s1 s2 pref:string}
(H:pref++s1 = pref++s2), s1 = s2
:= begin
intros,
rw string.eq_list at H,
rw string.append_to_list at H,
rw string.append_to_list at H,
have H := list.append_eq H,
rw string.eq_list,
assumption
end
lemma slist_prefix_notin: ∀ {l: list string} {a} (pref: string)
(H:a ∉ l), pref ++ a ∉ l.map (λs, pref ++ s)
:= begin
intros,
intros H',
apply H, clear H,
induction l with h t, simp at H', cases H',
simp at *,
cases H', left, apply string.eq_prefix, assumption,
right, apply l_ih, assumption
end
-- Just a special case - if first characters of two prefixes are
-- different, ∉ holds.
lemma slist_prefix_notin2: ∀ {l: list string} {a}
(pfx1 pfx2: string) {pfx1_t pfx2_t: list char} (pfx1_h pfx2_h : char)
(H:a ∉ l) (HNE:pfx1_h ≠ pfx2_h)
(H1:pfx1 = (pfx1_h::pfx1_t).as_string)
(H1:pfx2 = (pfx2_h::pfx2_t).as_string),
pfx1 ++ a ∉ l.map (λs, pfx2 ++ s)
:= begin
intros,
induction l with h t,
{ simp },
rw list.map,
intros H0, apply l_ih,
{ intros H', apply H, simp, right, assumption },
simp at H0,
cases H0,
{
rw string.eq_list at H0,
rw string.append_to_list at H0,
rw string.append_to_list at H0,
rw [H1, H1_1] at H0,
unfold list.as_string at H0,
unfold string.to_list at H0,
simp at H0, cases H0, exfalso, apply HNE, assumption
}, apply H0
end
lemma slist_prefix_unique: ∀ (l: list string) (pref: string)
(H:list.unique l), list.unique (l.map (λs, pref ++ s))
:= begin
intros,
induction l with h t,
{ apply H },
{
cases H, have H := l_ih H_H,
apply list.unique.cons, assumption,
simp, apply slist_prefix_notin, assumption
}
end |
/-
Copyright (c) 2020 Aaron Anderson. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Aaron Anderson
-/
import data.polynomial.degree.definitions
/-!
# Cancel the leading terms of two polynomials
## Definition
* `cancel_leads p q`: the polynomial formed by multiplying `p` and `q` by monomials so that they
have the same leading term, and then subtracting.
## Main Results
The degree of `cancel_leads` is less than that of the larger of the two polynomials being cancelled.
Thus it is useful for induction or minimal-degree arguments.
-/
namespace polynomial
noncomputable theory
open_locale polynomial
variables {R : Type*}
section comm_ring
variables [ring R] (p q : R[X])
/-- `cancel_leads p q` is formed by multiplying `p` and `q` by monomials so that they
have the same leading term, and then subtracting. -/
def cancel_leads : R[X] :=
C p.leading_coeff * X ^ (p.nat_degree - q.nat_degree) * q -
C q.leading_coeff * X ^ (q.nat_degree - p.nat_degree) * p
variables {p q}
@[simp] lemma neg_cancel_leads : - p.cancel_leads q = q.cancel_leads p := neg_sub _ _
end comm_ring
section comm_ring
variables [comm_ring R] {p q : R[X]}
lemma dvd_cancel_leads_of_dvd_of_dvd {r : R[X]} (pq : p ∣ q) (pr : p ∣ r) :
p ∣ q.cancel_leads r :=
dvd_sub (pr.trans (dvd.intro_left _ rfl)) (pq.trans (dvd.intro_left _ rfl))
end comm_ring
lemma nat_degree_cancel_leads_lt_of_nat_degree_le_nat_degree [comm_ring R] [is_domain R]
{p q : R[X]} (h : p.nat_degree ≤ q.nat_degree) (hq : 0 < q.nat_degree) :
(p.cancel_leads q).nat_degree < q.nat_degree :=
begin
by_cases hp : p = 0,
{ convert hq,
simp [hp, cancel_leads], },
rw [cancel_leads, sub_eq_add_neg, tsub_eq_zero_iff_le.mpr h, pow_zero, mul_one],
by_cases h0 :
C p.leading_coeff * q + -(C q.leading_coeff * X ^ (q.nat_degree - p.nat_degree) * p) = 0,
{ convert hq,
simp only [h0, nat_degree_zero], },
have hq0 : ¬ q = 0,
{ contrapose! hq,
simp [hq] },
apply lt_of_le_of_ne,
{ rw [← with_bot.coe_le_coe, ← degree_eq_nat_degree h0, ← degree_eq_nat_degree hq0],
apply le_trans (degree_add_le _ _),
rw ← leading_coeff_eq_zero at hp hq0,
simp only [max_le_iff, degree_C hp, degree_C hq0, le_refl q.degree, true_and, nat.cast_with_bot,
nsmul_one, degree_neg, degree_mul, zero_add, degree_X, degree_pow],
rw leading_coeff_eq_zero at hp hq0,
rw [degree_eq_nat_degree hp, degree_eq_nat_degree hq0, ← with_bot.coe_add, with_bot.coe_le_coe,
tsub_add_cancel_of_le h], },
{ contrapose! h0,
rw [← leading_coeff_eq_zero, leading_coeff, h0, mul_assoc, mul_comm _ p,
← tsub_add_cancel_of_le h, add_comm _ p.nat_degree],
simp only [coeff_mul_X_pow, coeff_neg, coeff_C_mul, add_tsub_cancel_left, coeff_add],
rw [add_comm p.nat_degree, tsub_add_cancel_of_le h, ← leading_coeff, ← leading_coeff,
mul_comm _ q.leading_coeff, ← sub_eq_add_neg, ← mul_sub, sub_self, mul_zero] }
end
end polynomial
|
State Before: α : Type u_1
β : Type ?u.19875
t : α
ts ys l : List α
⊢ l ∈ (permutationsAux2 t ts [] ys id).snd ↔ ∃ l₁ l₂, l₂ ≠ [] ∧ ys = l₁ ++ l₂ ∧ l = l₁ ++ t :: l₂ ++ ts State After: α : Type u_1
β : Type ?u.19875
t : α
ts ys l : List α
⊢ l ∈ (permutationsAux2 t ts [] ys fun x => [] ++ x).snd ↔ ∃ l₁ l₂, l₂ ≠ [] ∧ ys = l₁ ++ l₂ ∧ l = l₁ ++ t :: l₂ ++ ts Tactic: rw [show @id (List α) = ([] ++ .) by funext _; rfl] State Before: α : Type u_1
β : Type ?u.19875
t : α
ts ys l : List α
⊢ l ∈ (permutationsAux2 t ts [] ys fun x => [] ++ x).snd ↔ ∃ l₁ l₂, l₂ ≠ [] ∧ ys = l₁ ++ l₂ ∧ l = l₁ ++ t :: l₂ ++ ts State After: no goals Tactic: apply mem_permutationsAux2 State Before: α : Type u_1
β : Type ?u.19875
t : α
ts ys l : List α
⊢ id = fun x => [] ++ x State After: case h
α : Type u_1
β : Type ?u.19875
t : α
ts ys l x✝ : List α
⊢ id x✝ = [] ++ x✝ Tactic: funext _ State Before: case h
α : Type u_1
β : Type ?u.19875
t : α
ts ys l x✝ : List α
⊢ id x✝ = [] ++ x✝ State After: no goals Tactic: rfl |
From iris.proofmode Require Import coq_tactics reduction.
From iris.proofmode Require Export tactics.
From iris.program_logic Require Import atomic.
From iris.heap_lang Require Export tactics derived_laws.
From iris.heap_lang Require Import notation.
From iris.prelude Require Import options.
Import uPred.
Lemma tac_wp_expr_eval `{!heapG Σ} Δ s E Φ e e' :
(∀ (e'':=e'), e = e'') →
envs_entails Δ (WP e' @ s; E {{ Φ }}) → envs_entails Δ (WP e @ s; E {{ Φ }}).
Proof. by intros ->. Qed.
Lemma tac_twp_expr_eval `{!heapG Σ} Δ s E Φ e e' :
(∀ (e'':=e'), e = e'') →
envs_entails Δ (WP e' @ s; E [{ Φ }]) → envs_entails Δ (WP e @ s; E [{ Φ }]).
Proof. by intros ->. Qed.
Tactic Notation "wp_expr_eval" tactic3(t) :=
iStartProof;
lazymatch goal with
| |- envs_entails _ (wp ?s ?E ?e ?Q) =>
notypeclasses refine (tac_wp_expr_eval _ _ _ _ e _ _ _);
[let x := fresh in intros x; t; unfold x; notypeclasses refine eq_refl|]
| |- envs_entails _ (twp ?s ?E ?e ?Q) =>
notypeclasses refine (tac_twp_expr_eval _ _ _ _ e _ _ _);
[let x := fresh in intros x; t; unfold x; notypeclasses refine eq_refl|]
| _ => fail "wp_expr_eval: not a 'wp'"
end.
Ltac wp_expr_simpl := wp_expr_eval simpl.
Lemma tac_wp_pure `{!heapG Σ} Δ Δ' s E K e1 e2 φ n Φ :
PureExec φ n e1 e2 →
φ →
MaybeIntoLaterNEnvs n Δ Δ' →
envs_entails Δ' (WP (fill K e2) @ s; E {{ Φ }}) →
envs_entails Δ (WP (fill K e1) @ s; E {{ Φ }}).
Proof.
rewrite envs_entails_eq=> ??? HΔ'. rewrite into_laterN_env_sound /=.
(* We want [pure_exec_fill] to be available to TC search locally. *)
pose proof @pure_exec_fill.
rewrite HΔ' -lifting.wp_pure_step_later //.
Qed.
Lemma tac_twp_pure `{!heapG Σ} Δ s E K e1 e2 φ n Φ :
PureExec φ n e1 e2 →
φ →
envs_entails Δ (WP (fill K e2) @ s; E [{ Φ }]) →
envs_entails Δ (WP (fill K e1) @ s; E [{ Φ }]).
Proof.
rewrite envs_entails_eq=> ?? ->.
(* We want [pure_exec_fill] to be available to TC search locally. *)
pose proof @pure_exec_fill.
rewrite -total_lifting.twp_pure_step //.
Qed.
Lemma tac_wp_value_nofupd `{!heapG Σ} Δ s E Φ v :
envs_entails Δ (Φ v) → envs_entails Δ (WP (Val v) @ s; E {{ Φ }}).
Proof. rewrite envs_entails_eq=> ->. by apply wp_value. Qed.
Lemma tac_twp_value_nofupd `{!heapG Σ} Δ s E Φ v :
envs_entails Δ (Φ v) → envs_entails Δ (WP (Val v) @ s; E [{ Φ }]).
Proof. rewrite envs_entails_eq=> ->. by apply twp_value. Qed.
Lemma tac_wp_value `{!heapG Σ} Δ s E (Φ : val → iPropI Σ) v :
envs_entails Δ (|NC={E}=> Φ v) → envs_entails Δ (WP (Val v) @ s; E {{ Φ }}).
Proof. rewrite envs_entails_eq=> ->. by rewrite wp_value_fupd. Qed.
Lemma tac_twp_value `{!heapG Σ} Δ s E (Φ : val → iPropI Σ) v :
envs_entails Δ (|NC={E}=> Φ v) → envs_entails Δ (WP (Val v) @ s; E [{ Φ }]).
Proof. rewrite envs_entails_eq=> ->. by rewrite twp_value_fupd. Qed.
(** Simplify the goal if it is [WP] of a value.
If the postcondition already allows a fupd, do not add a second one.
But otherwise, *do* add a fupd. This ensures that all the lemmas applied
here are bidirectional, so we never will make a goal unprovable. *)
Ltac wp_value_head :=
lazymatch goal with
| |- envs_entails _ (wp ?s ?E (Val _) (λ _, fupd ?E _ _)) =>
eapply tac_wp_value_nofupd
| |- envs_entails _ (wp ?s ?E (Val _) (λ _, wp _ ?E _ _)) =>
eapply tac_wp_value_nofupd
| |- envs_entails _ (wp ?s ?E (Val _) _) =>
eapply tac_wp_value
| |- envs_entails _ (twp ?s ?E (Val _) (λ _, fupd ?E _ _)) =>
eapply tac_twp_value_nofupd
| |- envs_entails _ (twp ?s ?E (Val _) (λ _, twp _ ?E _ _)) =>
eapply tac_twp_value_nofupd
| |- envs_entails _ (twp ?s ?E (Val _) _) =>
eapply tac_twp_value
end.
Ltac wp_finish :=
wp_expr_simpl; (* simplify occurences of subst/fill *)
try wp_value_head; (* in case we have reached a value, get rid of the WP *)
pm_prettify. (* prettify ▷s caused by [MaybeIntoLaterNEnvs] and
λs caused by wp_value *)
Ltac solve_vals_compare_safe :=
(* The first branch is for when we have [vals_compare_safe] in the context.
The other two branches are for when either one of the branches reduces to
[True] or we have it in the context. *)
fast_done || (left; fast_done) || (right; fast_done).
(** The argument [efoc] can be used to specify the construct that should be
reduced. For example, you can write [wp_pure (EIf _ _ _)], which will search
for an [EIf _ _ _] in the expression, and reduce it.
The use of [open_constr] in this tactic is essential. It will convert all holes
(i.e. [_]s) into evars, that later get unified when an occurences is found
(see [unify e' efoc] in the code below). *)
Tactic Notation "wp_pure" open_constr(efoc) :=
iStartProof;
lazymatch goal with
| |- envs_entails _ (wp ?s ?E ?e ?Q) =>
let e := eval simpl in e in
reshape_expr e ltac:(fun K e' =>
unify e' efoc;
eapply (tac_wp_pure _ _ _ _ K e');
[iSolveTC (* PureExec *)
|try solve_vals_compare_safe (* The pure condition for PureExec --
handles trivial goals, including [vals_compare_safe] *)
|iSolveTC (* IntoLaters *)
|wp_finish (* new goal *)
])
|| fail "wp_pure: cannot find" efoc "in" e "or" efoc "is not a redex"
| |- envs_entails _ (twp ?s ?E ?e ?Q) =>
let e := eval simpl in e in
reshape_expr e ltac:(fun K e' =>
unify e' efoc;
eapply (tac_twp_pure _ _ _ K e');
[iSolveTC (* PureExec *)
|try solve_vals_compare_safe (* The pure condition for PureExec *)
|wp_finish (* new goal *)
])
|| fail "wp_pure: cannot find" efoc "in" e "or" efoc "is not a redex"
| _ => fail "wp_pure: not a 'wp'"
end.
(* TODO: do this in one go, without [repeat]. *)
Ltac wp_pures :=
iStartProof;
first [ (* The `;[]` makes sure that no side-condition magically spawns. *)
progress repeat (wp_pure _; [])
| wp_finish (* In case wp_pure never ran, make sure we do the usual cleanup. *)
].
(** Unlike [wp_pures], the tactics [wp_rec] and [wp_lam] should also reduce
lambdas/recs that are hidden behind a definition, i.e. they should use
[AsRecV_recv] as a proper instance instead of a [Hint Extern].
We achieve this by putting [AsRecV_recv] in the current environment so that it
can be used as an instance by the typeclass resolution system. We then perform
the reduction, and finally we clear this new hypothesis. *)
Tactic Notation "wp_rec" :=
let H := fresh in
assert (H := AsRecV_recv);
wp_pure (App _ _);
clear H.
Tactic Notation "wp_if" := wp_pure (If _ _ _).
Tactic Notation "wp_if_true" := wp_pure (If (LitV (LitBool true)) _ _).
Tactic Notation "wp_if_false" := wp_pure (If (LitV (LitBool false)) _ _).
Tactic Notation "wp_unop" := wp_pure (UnOp _ _).
Tactic Notation "wp_binop" := wp_pure (BinOp _ _ _).
Tactic Notation "wp_op" := wp_unop || wp_binop.
Tactic Notation "wp_lam" := wp_rec.
Tactic Notation "wp_let" := wp_pure (Rec BAnon (BNamed _) _); wp_lam.
Tactic Notation "wp_seq" := wp_pure (Rec BAnon BAnon _); wp_lam.
Tactic Notation "wp_proj" := wp_pure (Fst _) || wp_pure (Snd _).
Tactic Notation "wp_case" := wp_pure (Case _ _ _).
Tactic Notation "wp_match" := wp_case; wp_pure (Rec _ _ _); wp_lam.
Tactic Notation "wp_inj" := wp_pure (InjL _) || wp_pure (InjR _).
Tactic Notation "wp_pair" := wp_pure (Pair _ _).
Tactic Notation "wp_closure" := wp_pure (Rec _ _ _).
Lemma tac_wp_bind `{!heapG Σ} K Δ s E Φ e f :
f = (λ e, fill K e) → (* as an eta expanded hypothesis so that we can `simpl` it *)
envs_entails Δ (WP e @ s; E {{ v, WP f (Val v) @ s; E {{ Φ }} }})%I →
envs_entails Δ (WP fill K e @ s; E {{ Φ }}).
Proof. rewrite envs_entails_eq=> -> ->. by apply: wp_bind. Qed.
Lemma tac_twp_bind `{!heapG Σ} K Δ s E Φ e f :
f = (λ e, fill K e) → (* as an eta expanded hypothesis so that we can `simpl` it *)
envs_entails Δ (WP e @ s; E [{ v, WP f (Val v) @ s; E [{ Φ }] }])%I →
envs_entails Δ (WP fill K e @ s; E [{ Φ }]).
Proof. rewrite envs_entails_eq=> -> ->. by apply: twp_bind. Qed.
Ltac wp_bind_core K :=
lazymatch eval hnf in K with
| [] => idtac
| _ => eapply (tac_wp_bind K); [simpl; reflexivity|reduction.pm_prettify]
end.
Ltac twp_bind_core K :=
lazymatch eval hnf in K with
| [] => idtac
| _ => eapply (tac_twp_bind K); [simpl; reflexivity|reduction.pm_prettify]
end.
Tactic Notation "wp_bind" open_constr(efoc) :=
iStartProof;
lazymatch goal with
| |- envs_entails _ (wp ?s ?E ?e ?Q) =>
first [ reshape_expr e ltac:(fun K e' => unify e' efoc; wp_bind_core K)
| fail 1 "wp_bind: cannot find" efoc "in" e ]
| |- envs_entails _ (twp ?s ?E ?e ?Q) =>
first [ reshape_expr e ltac:(fun K e' => unify e' efoc; twp_bind_core K)
| fail 1 "wp_bind: cannot find" efoc "in" e ]
| _ => fail "wp_bind: not a 'wp'"
end.
(** Heap tactics *)
Section heap.
Context `{!heapG Σ}.
Implicit Types P Q : iProp Σ.
Implicit Types Φ : val → iProp Σ.
Implicit Types Δ : envs (uPredI (iResUR Σ)).
Implicit Types v : val.
Implicit Types z : Z.
Lemma tac_wp_allocN Δ Δ' s E j K v n Φ :
(0 < n)%Z →
MaybeIntoLaterNEnvs 1 Δ Δ' →
(∀ l,
match envs_app false (Esnoc Enil j (array l (DfracOwn 1) (replicate (Z.to_nat n) v))) Δ' with
| Some Δ'' =>
envs_entails Δ'' (WP fill K (Val $ LitV $ LitLoc l) @ s; E {{ Φ }})
| None => False
end) →
envs_entails Δ (WP fill K (AllocN (Val $ LitV $ LitInt n) (Val v)) @ s; E {{ Φ }}).
Proof.
rewrite envs_entails_eq=> ? ? HΔ.
rewrite -wp_bind. eapply wand_apply; first exact: wp_allocN.
rewrite left_id into_laterN_env_sound; apply later_mono, forall_intro=> l.
specialize (HΔ l).
destruct (envs_app _ _ _) as [Δ''|] eqn:HΔ'; [ | contradiction ].
rewrite envs_app_sound //; simpl.
apply wand_intro_l. by rewrite (sep_elim_l (l ↦∗ _)%I) right_id wand_elim_r.
Qed.
Lemma tac_twp_allocN Δ s E j K v n Φ :
(0 < n)%Z →
(∀ l,
match envs_app false (Esnoc Enil j (array l (DfracOwn 1) (replicate (Z.to_nat n) v))) Δ with
| Some Δ' =>
envs_entails Δ' (WP fill K (Val $ LitV $ LitLoc l) @ s; E [{ Φ }])
| None => False
end) →
envs_entails Δ (WP fill K (AllocN (Val $ LitV $ LitInt n) (Val v)) @ s; E [{ Φ }]).
Proof.
rewrite envs_entails_eq=> ? HΔ.
rewrite -twp_bind. eapply wand_apply; first exact: twp_allocN.
rewrite left_id. apply forall_intro=> l.
specialize (HΔ l).
destruct (envs_app _ _ _) as [Δ'|] eqn:HΔ'; [ | contradiction ].
rewrite envs_app_sound //; simpl.
apply wand_intro_l. by rewrite (sep_elim_l (l ↦∗ _)%I) right_id wand_elim_r.
Qed.
Lemma tac_wp_alloc Δ Δ' s E j K v Φ :
MaybeIntoLaterNEnvs 1 Δ Δ' →
(∀ l,
match envs_app false (Esnoc Enil j (l ↦ v)) Δ' with
| Some Δ'' =>
envs_entails Δ'' (WP fill K (Val $ LitV l) @ s; E {{ Φ }})
| None => False
end) →
envs_entails Δ (WP fill K (Alloc (Val v)) @ s; E {{ Φ }}).
Proof.
rewrite envs_entails_eq=> ? HΔ.
rewrite -wp_bind. eapply wand_apply; first exact: wp_alloc.
rewrite left_id into_laterN_env_sound; apply later_mono, forall_intro=> l.
specialize (HΔ l).
destruct (envs_app _ _ _) as [Δ''|] eqn:HΔ'; [ | contradiction ].
rewrite envs_app_sound //; simpl.
apply wand_intro_l. by rewrite (sep_elim_l (l ↦ v)%I) right_id wand_elim_r.
Qed.
Lemma tac_twp_alloc Δ s E j K v Φ :
(∀ l,
match envs_app false (Esnoc Enil j (l ↦ v)) Δ with
| Some Δ' =>
envs_entails Δ' (WP fill K (Val $ LitV $ LitLoc l) @ s; E [{ Φ }])
| None => False
end) →
envs_entails Δ (WP fill K (Alloc (Val v)) @ s; E [{ Φ }]).
Proof.
rewrite envs_entails_eq=> HΔ.
rewrite -twp_bind. eapply wand_apply; first exact: twp_alloc.
rewrite left_id. apply forall_intro=> l.
specialize (HΔ l).
destruct (envs_app _ _ _) as [Δ''|] eqn:HΔ'; [ | contradiction ].
rewrite envs_app_sound //; simpl.
apply wand_intro_l. by rewrite (sep_elim_l (l ↦ v)%I) right_id wand_elim_r.
Qed.
Lemma tac_wp_free Δ Δ' s E i K l v Φ :
MaybeIntoLaterNEnvs 1 Δ Δ' →
envs_lookup i Δ' = Some (false, l ↦ v)%I →
(let Δ'' := envs_delete false i false Δ' in
envs_entails Δ'' (WP fill K (Val $ LitV LitUnit) @ s; E {{ Φ }})) →
envs_entails Δ (WP fill K (Free (LitV l)) @ s; E {{ Φ }}).
Proof.
rewrite envs_entails_eq=> ? Hlk Hfin.
rewrite -wp_bind. eapply wand_apply; first exact: wp_free.
rewrite into_laterN_env_sound -later_sep envs_lookup_split //; simpl.
rewrite -Hfin wand_elim_r (envs_lookup_sound' _ _ _ _ _ Hlk).
apply later_mono, sep_mono_r, wand_intro_r. rewrite right_id //.
Qed.
Lemma tac_twp_free Δ s E i K l v Φ :
envs_lookup i Δ = Some (false, l ↦ v)%I →
(let Δ' := envs_delete false i false Δ in
envs_entails Δ' (WP fill K (Val $ LitV LitUnit) @ s; E [{ Φ }])) →
envs_entails Δ (WP fill K (Free (LitV l)) @ s; E [{ Φ }]).
Proof.
rewrite envs_entails_eq=> Hlk Hfin.
rewrite -twp_bind. eapply wand_apply; first exact: twp_free.
rewrite envs_lookup_split //; simpl.
rewrite -Hfin wand_elim_r (envs_lookup_sound' _ _ _ _ _ Hlk).
apply sep_mono_r, wand_intro_r. rewrite right_id //.
Qed.
Lemma tac_wp_load Δ Δ' s E i K b l q v Φ :
MaybeIntoLaterNEnvs 1 Δ Δ' →
envs_lookup i Δ' = Some (b, l ↦{q} v)%I →
envs_entails Δ' (WP fill K (Val v) @ s; E {{ Φ }}) →
envs_entails Δ (WP fill K (Load (LitV l)) @ s; E {{ Φ }}).
Proof.
rewrite envs_entails_eq=> ?? Hi.
rewrite -wp_bind. eapply wand_apply; first exact: wp_load.
rewrite into_laterN_env_sound -later_sep envs_lookup_split //; simpl.
apply later_mono.
destruct b; simpl.
* iIntros "[#$ He]". iIntros "_". iApply Hi. iApply "He". iFrame "#".
* by apply sep_mono_r, wand_mono.
Qed.
Lemma tac_twp_load Δ s E i K b l q v Φ :
envs_lookup i Δ = Some (b, l ↦{q} v)%I →
envs_entails Δ (WP fill K (Val v) @ s; E [{ Φ }]) →
envs_entails Δ (WP fill K (Load (LitV l)) @ s; E [{ Φ }]).
Proof.
rewrite envs_entails_eq=> ? Hi.
rewrite -twp_bind. eapply wand_apply; first exact: twp_load.
rewrite envs_lookup_split //; simpl.
destruct b; simpl.
- iIntros "[#$ He]". iIntros "_". iApply Hi. iApply "He". iFrame "#".
- iIntros "[$ He]". iIntros "Hl". iApply Hi. iApply "He". iFrame "Hl".
Qed.
Lemma tac_wp_store Δ Δ' s E i K l v v' Φ :
MaybeIntoLaterNEnvs 1 Δ Δ' →
envs_lookup i Δ' = Some (false, l ↦ v)%I →
match envs_simple_replace i false (Esnoc Enil i (l ↦ v')) Δ' with
| Some Δ'' => envs_entails Δ'' (WP fill K (Val $ LitV LitUnit) @ s; E {{ Φ }})
| None => False
end →
envs_entails Δ (WP fill K (Store (LitV l) (Val v')) @ s; E {{ Φ }}).
Proof.
rewrite envs_entails_eq=> ???.
destruct (envs_simple_replace _ _ _) as [Δ''|] eqn:HΔ''; [ | contradiction ].
rewrite -wp_bind. eapply wand_apply; first by eapply wp_store.
rewrite into_laterN_env_sound -later_sep envs_simple_replace_sound //; simpl.
rewrite right_id. by apply later_mono, sep_mono_r, wand_mono.
Qed.
Lemma tac_twp_store Δ s E i K l v v' Φ :
envs_lookup i Δ = Some (false, l ↦ v)%I →
match envs_simple_replace i false (Esnoc Enil i (l ↦ v')) Δ with
| Some Δ' => envs_entails Δ' (WP fill K (Val $ LitV LitUnit) @ s; E [{ Φ }])
| None => False
end →
envs_entails Δ (WP fill K (Store (LitV l) v') @ s; E [{ Φ }]).
Proof.
rewrite envs_entails_eq. intros.
destruct (envs_simple_replace _ _ _) as [Δ''|] eqn:HΔ''; [ | contradiction ].
rewrite -twp_bind. eapply wand_apply; first by eapply twp_store.
rewrite envs_simple_replace_sound //; simpl.
rewrite right_id. by apply sep_mono_r, wand_mono.
Qed.
Lemma tac_wp_cmpxchg Δ Δ' s E i K l v v1 v2 Φ :
MaybeIntoLaterNEnvs 1 Δ Δ' →
envs_lookup i Δ' = Some (false, l ↦ v)%I →
vals_compare_safe v v1 →
match envs_simple_replace i false (Esnoc Enil i (l ↦ v2)) Δ' with
| Some Δ'' =>
v = v1 →
envs_entails Δ'' (WP fill K (Val $ PairV v (LitV $ LitBool true)) @ s; E {{ Φ }})
| None => False
end →
(v ≠ v1 →
envs_entails Δ' (WP fill K (Val $ PairV v (LitV $ LitBool false)) @ s; E {{ Φ }})) →
envs_entails Δ (WP fill K (CmpXchg (LitV l) (Val v1) (Val v2)) @ s; E {{ Φ }}).
Proof.
rewrite envs_entails_eq=> ??? Hsuc Hfail.
destruct (envs_simple_replace _ _ _ _) as [Δ''|] eqn:HΔ''; [ | contradiction ].
destruct (decide (v = v1)) as [Heq|Hne].
- rewrite -wp_bind. eapply wand_apply.
{ eapply wp_cmpxchg_suc; eauto. }
rewrite into_laterN_env_sound -later_sep /= {1}envs_simple_replace_sound //; simpl.
apply later_mono, sep_mono_r. rewrite right_id. apply wand_mono; auto.
- rewrite -wp_bind. eapply wand_apply.
{ eapply wp_cmpxchg_fail; eauto. }
rewrite into_laterN_env_sound -later_sep /= {1}envs_lookup_split //; simpl.
apply later_mono, sep_mono_r. apply wand_mono; auto.
Qed.
Lemma tac_twp_cmpxchg Δ s E i K l v v1 v2 Φ :
envs_lookup i Δ = Some (false, l ↦ v)%I →
vals_compare_safe v v1 →
match envs_simple_replace i false (Esnoc Enil i (l ↦ v2)) Δ with
| Some Δ' =>
v = v1 →
envs_entails Δ' (WP fill K (Val $ PairV v (LitV $ LitBool true)) @ s; E [{ Φ }])
| None => False
end →
(v ≠ v1 →
envs_entails Δ (WP fill K (Val $ PairV v (LitV $ LitBool false)) @ s; E [{ Φ }])) →
envs_entails Δ (WP fill K (CmpXchg (LitV l) v1 v2) @ s; E [{ Φ }]).
Proof.
rewrite envs_entails_eq=> ?? Hsuc Hfail.
destruct (envs_simple_replace _ _ _ _) as [Δ''|] eqn:HΔ''; [ | contradiction ].
destruct (decide (v = v1)) as [Heq|Hne].
- rewrite -twp_bind. eapply wand_apply.
{ eapply twp_cmpxchg_suc; eauto. }
rewrite /= {1}envs_simple_replace_sound //; simpl.
apply sep_mono_r. rewrite right_id. apply wand_mono; auto.
- rewrite -twp_bind. eapply wand_apply.
{ eapply twp_cmpxchg_fail; eauto. }
rewrite /= {1}envs_lookup_split //; simpl.
apply sep_mono_r. apply wand_mono; auto.
Qed.
Lemma tac_wp_cmpxchg_fail Δ Δ' s E i K l q v v1 v2 Φ :
MaybeIntoLaterNEnvs 1 Δ Δ' →
envs_lookup i Δ' = Some (false, l ↦{q} v)%I →
v ≠ v1 → vals_compare_safe v v1 →
envs_entails Δ' (WP fill K (Val $ PairV v (LitV $ LitBool false)) @ s; E {{ Φ }}) →
envs_entails Δ (WP fill K (CmpXchg (LitV l) v1 v2) @ s; E {{ Φ }}).
Proof.
rewrite envs_entails_eq=> ?????.
rewrite -wp_bind. eapply wand_apply; first exact: wp_cmpxchg_fail.
rewrite into_laterN_env_sound -later_sep envs_lookup_split //; simpl.
by apply later_mono, sep_mono_r, wand_mono.
Qed.
Lemma tac_twp_cmpxchg_fail Δ s E i K l q v v1 v2 Φ :
envs_lookup i Δ = Some (false, l ↦{q} v)%I →
v ≠ v1 → vals_compare_safe v v1 →
envs_entails Δ (WP fill K (Val $ PairV v (LitV $ LitBool false)) @ s; E [{ Φ }]) →
envs_entails Δ (WP fill K (CmpXchg (LitV l) v1 v2) @ s; E [{ Φ }]).
Proof.
rewrite envs_entails_eq. intros. rewrite -twp_bind.
eapply wand_apply; first exact: twp_cmpxchg_fail.
rewrite envs_lookup_split //=. by do 2 f_equiv.
Qed.
Lemma tac_wp_cmpxchg_suc Δ Δ' s E i K l v v1 v2 Φ :
MaybeIntoLaterNEnvs 1 Δ Δ' →
envs_lookup i Δ' = Some (false, l ↦ v)%I →
v = v1 → vals_compare_safe v v1 →
match envs_simple_replace i false (Esnoc Enil i (l ↦ v2)) Δ' with
| Some Δ'' =>
envs_entails Δ'' (WP fill K (Val $ PairV v (LitV $ LitBool true)) @ s; E {{ Φ }})
| None => False
end →
envs_entails Δ (WP fill K (CmpXchg (LitV l) v1 v2) @ s; E {{ Φ }}).
Proof.
rewrite envs_entails_eq=> ?????; subst.
destruct (envs_simple_replace _ _ _) as [Δ''|] eqn:HΔ''; [ | contradiction ].
rewrite -wp_bind. eapply wand_apply.
{ eapply wp_cmpxchg_suc; eauto. }
rewrite into_laterN_env_sound -later_sep envs_simple_replace_sound //; simpl.
rewrite right_id. by apply later_mono, sep_mono_r, wand_mono.
Qed.
Lemma tac_twp_cmpxchg_suc Δ s E i K l v v1 v2 Φ :
envs_lookup i Δ = Some (false, l ↦ v)%I →
v = v1 → vals_compare_safe v v1 →
match envs_simple_replace i false (Esnoc Enil i (l ↦ v2)) Δ with
| Some Δ' =>
envs_entails Δ' (WP fill K (Val $ PairV v (LitV $ LitBool true)) @ s; E [{ Φ }])
| None => False
end →
envs_entails Δ (WP fill K (CmpXchg (LitV l) v1 v2) @ s; E [{ Φ }]).
Proof.
rewrite envs_entails_eq=>????; subst.
destruct (envs_simple_replace _ _ _) as [Δ''|] eqn:HΔ''; [ | contradiction ].
rewrite -twp_bind. eapply wand_apply.
{ eapply twp_cmpxchg_suc; eauto. }
rewrite envs_simple_replace_sound //; simpl.
rewrite right_id. by apply sep_mono_r, wand_mono.
Qed.
Lemma tac_wp_faa Δ Δ' s E i K l z1 z2 Φ :
MaybeIntoLaterNEnvs 1 Δ Δ' →
envs_lookup i Δ' = Some (false, l ↦ LitV z1)%I →
match envs_simple_replace i false (Esnoc Enil i (l ↦ LitV (LitInt (z1 + z2)))) Δ' with
| Some Δ'' => envs_entails Δ'' (WP fill K (Val $ LitV z1) @ s; E {{ Φ }})
| None => False
end →
envs_entails Δ (WP fill K (FAA (LitV l) (LitV z2)) @ s; E {{ Φ }}).
Proof.
rewrite envs_entails_eq=> ???.
destruct (envs_simple_replace _ _ _) as [Δ''|] eqn:HΔ''; [ | contradiction ].
rewrite -wp_bind. eapply wand_apply; first exact: (wp_faa _ _ _ z1 z2).
rewrite into_laterN_env_sound -later_sep envs_simple_replace_sound //; simpl.
rewrite right_id. by apply later_mono, sep_mono_r, wand_mono.
Qed.
Lemma tac_twp_faa Δ s E i K l z1 z2 Φ :
envs_lookup i Δ = Some (false, l ↦ LitV z1)%I →
match envs_simple_replace i false (Esnoc Enil i (l ↦ LitV (LitInt (z1 + z2)))) Δ with
| Some Δ' => envs_entails Δ' (WP fill K (Val $ LitV z1) @ s; E [{ Φ }])
| None => False
end →
envs_entails Δ (WP fill K (FAA (LitV l) (LitV z2)) @ s; E [{ Φ }]).
Proof.
rewrite envs_entails_eq=> ??.
destruct (envs_simple_replace _ _ _) as [Δ'|] eqn:HΔ'; [ | contradiction ].
rewrite -twp_bind. eapply wand_apply; first exact: (twp_faa _ _ _ z1 z2).
rewrite envs_simple_replace_sound //; simpl.
rewrite right_id. by apply sep_mono_r, wand_mono.
Qed.
End heap.
(** The tactic [wp_apply_core lem tac_suc tac_fail] evaluates [lem] to a
hypothesis [H] that can be applied, and then runs [wp_bind_core K; tac_suc H]
for every possible evaluation context [K].
- The tactic [tac_suc] should do [iApplyHyp H] to actually apply the hypothesis,
but can perform other operations in addition (see [wp_apply] and [awp_apply]
below).
- The tactic [tac_fail cont] is called when [tac_suc H] fails for all evaluation
contexts [K], and can perform further operations before invoking [cont] to
try again.
TC resolution of [lem] premises happens *after* [tac_suc H] got executed. *)
Ltac wp_apply_core lem tac_suc tac_fail := first
[iPoseProofCore lem as false (fun H =>
lazymatch goal with
| |- envs_entails _ (wp ?s ?E ?e ?Q) =>
reshape_expr e ltac:(fun K e' =>
wp_bind_core K; tac_suc H)
| |- envs_entails _ (twp ?s ?E ?e ?Q) =>
reshape_expr e ltac:(fun K e' =>
twp_bind_core K; tac_suc H)
| _ => fail 1 "wp_apply: not a 'wp'"
end)
|tac_fail ltac:(fun _ => wp_apply_core lem tac_suc tac_fail)
|let P := type of lem in
fail "wp_apply: cannot apply" lem ":" P ].
Tactic Notation "wp_apply" open_constr(lem) :=
wp_apply_core lem ltac:(fun H => iApplyHyp H; try iNext; try wp_expr_simpl)
ltac:(fun cont => fail).
Tactic Notation "wp_smart_apply" open_constr(lem) :=
wp_apply_core lem ltac:(fun H => iApplyHyp H; try iNext; try wp_expr_simpl)
ltac:(fun cont => wp_pure _; []; cont ()).
(** Tactic tailored for atomic triples: the first, simple one just runs
[iAuIntro] on the goal, as atomic triples always have an atomic update as their
premise. The second one additionaly does some framing: it gets rid of [Hs] from
the context, which is intended to be the non-laterable assertions that iAuIntro
would choke on. You get them all back in the continuation of the atomic
operation. *)
Tactic Notation "awp_apply" open_constr(lem) :=
wp_apply_core lem ltac:(fun H => iApplyHyp H) ltac:(fun cont => fail);
last iAuIntro.
Tactic Notation "awp_apply" open_constr(lem) "without" constr(Hs) :=
wp_apply_core lem
ltac:(fun H =>
iApply wp_frame_wand_l; iSplitL Hs; [iAccu|iApplyHyp H])
ltac:(fun cont => fail);
last iAuIntro.
Tactic Notation "wp_alloc" ident(l) "as" constr(H) :=
let Htmp := iFresh in
let finish _ :=
first [intros l | fail 1 "wp_alloc:" l "not fresh"];
pm_reduce;
lazymatch goal with
| |- False => fail 1 "wp_alloc:" H "not fresh"
| _ => iDestructHyp Htmp as H; wp_finish
end in
wp_pures;
(** The code first tries to use allocation lemma for a single reference,
ie, [tac_wp_alloc] (respectively, [tac_twp_alloc]).
If that fails, it tries to use the lemma [tac_wp_allocN]
(respectively, [tac_twp_allocN]) for allocating an array.
Notice that we could have used the array allocation lemma also for single
references. However, that would produce the resource l ↦∗ [v] instead of
l ↦ v for single references. These are logically equivalent assertions
but are not equal. *)
lazymatch goal with
| |- envs_entails _ (wp ?s ?E ?e ?Q) =>
let process_single _ :=
first
[reshape_expr e ltac:(fun K e' => eapply (tac_wp_alloc _ _ _ _ Htmp K))
|fail 1 "wp_alloc: cannot find 'Alloc' in" e];
[iSolveTC
|finish ()]
in
let process_array _ :=
first
[reshape_expr e ltac:(fun K e' => eapply (tac_wp_allocN _ _ _ _ Htmp K))
|fail 1 "wp_alloc: cannot find 'Alloc' in" e];
[idtac|iSolveTC
|finish ()]
in (process_single ()) || (process_array ())
| |- envs_entails _ (twp ?s ?E ?e ?Q) =>
let process_single _ :=
first
[reshape_expr e ltac:(fun K e' => eapply (tac_twp_alloc _ _ _ Htmp K))
|fail 1 "wp_alloc: cannot find 'Alloc' in" e];
finish ()
in
let process_array _ :=
first
[reshape_expr e ltac:(fun K e' => eapply (tac_twp_allocN _ _ _ Htmp K))
|fail 1 "wp_alloc: cannot find 'Alloc' in" e];
[idtac
|finish ()]
in (process_single ()) || (process_array ())
| _ => fail "wp_alloc: not a 'wp'"
end.
Tactic Notation "wp_alloc" ident(l) :=
wp_alloc l as "?".
Tactic Notation "wp_free" :=
let solve_mapsto _ :=
let l := match goal with |- _ = Some (_, (?l ↦{_} _)%I) => l end in
iAssumptionCore || fail "wp_free: cannot find" l "↦ ?" in
wp_pures;
lazymatch goal with
| |- envs_entails _ (wp ?s ?E ?e ?Q) =>
first
[reshape_expr e ltac:(fun K e' => eapply (tac_wp_free _ _ _ _ _ K))
|fail 1 "wp_free: cannot find 'Free' in" e];
[iSolveTC
|solve_mapsto ()
|pm_reduce; wp_finish]
| |- envs_entails _ (twp ?s ?E ?e ?Q) =>
first
[reshape_expr e ltac:(fun K e' => eapply (tac_twp_free _ _ _ _ K))
|fail 1 "wp_free: cannot find 'Free' in" e];
[solve_mapsto ()
|pm_reduce; wp_finish]
| _ => fail "wp_free: not a 'wp'"
end.
Tactic Notation "wp_load" :=
let solve_mapsto _ :=
let l := match goal with |- _ = Some (_, (?l ↦{_} _)%I) => l end in
iAssumptionCore || fail "wp_load: cannot find" l "↦ ?" in
wp_pures;
lazymatch goal with
| |- envs_entails _ (wp ?s ?E ?e ?Q) =>
first
[reshape_expr e ltac:(fun K e' => eapply (tac_wp_load _ _ _ _ _ K))
|fail 1 "wp_load: cannot find 'Load' in" e];
[iSolveTC
|solve_mapsto ()
|wp_finish]
| |- envs_entails _ (twp ?s ?E ?e ?Q) =>
first
[reshape_expr e ltac:(fun K e' => eapply (tac_twp_load _ _ _ _ K))
|fail 1 "wp_load: cannot find 'Load' in" e];
[solve_mapsto ()
|wp_finish]
| _ => fail "wp_load: not a 'wp'"
end.
Tactic Notation "wp_store" :=
let solve_mapsto _ :=
let l := match goal with |- _ = Some (_, (?l ↦{_} _)%I) => l end in
iAssumptionCore || fail "wp_store: cannot find" l "↦ ?" in
wp_pures;
lazymatch goal with
| |- envs_entails _ (wp ?s ?E ?e ?Q) =>
first
[reshape_expr e ltac:(fun K e' => eapply (tac_wp_store _ _ _ _ _ K))
|fail 1 "wp_store: cannot find 'Store' in" e];
[iSolveTC
|solve_mapsto ()
|pm_reduce; first [wp_seq|wp_finish]]
| |- envs_entails _ (twp ?s ?E ?e ?Q) =>
first
[reshape_expr e ltac:(fun K e' => eapply (tac_twp_store _ _ _ _ K))
|fail 1 "wp_store: cannot find 'Store' in" e];
[solve_mapsto ()
|pm_reduce; first [wp_seq|wp_finish]]
| _ => fail "wp_store: not a 'wp'"
end.
Tactic Notation "wp_cmpxchg" "as" simple_intropattern(H1) "|" simple_intropattern(H2) :=
let solve_mapsto _ :=
let l := match goal with |- _ = Some (_, (?l ↦{_} _)%I) => l end in
iAssumptionCore || fail "wp_cmpxchg: cannot find" l "↦ ?" in
wp_pures;
lazymatch goal with
| |- envs_entails _ (wp ?s ?E ?e ?Q) =>
first
[reshape_expr e ltac:(fun K e' => eapply (tac_wp_cmpxchg _ _ _ _ _ K))
|fail 1 "wp_cmpxchg: cannot find 'CmpXchg' in" e];
[iSolveTC
|solve_mapsto ()
|try solve_vals_compare_safe
|pm_reduce; intros H1; wp_finish
|intros H2; wp_finish]
| |- envs_entails _ (twp ?E ?e ?Q) =>
first
[reshape_expr e ltac:(fun K e' => eapply (tac_twp_cmpxchg _ _ _ _ K))
|fail 1 "wp_cmpxchg: cannot find 'CmpXchg' in" e];
[solve_mapsto ()
|try solve_vals_compare_safe
|pm_reduce; intros H1; wp_finish
|intros H2; wp_finish]
| _ => fail "wp_cmpxchg: not a 'wp'"
end.
Tactic Notation "wp_cmpxchg_fail" :=
let solve_mapsto _ :=
let l := match goal with |- _ = Some (_, (?l ↦{_} _)%I) => l end in
iAssumptionCore || fail "wp_cmpxchg_fail: cannot find" l "↦ ?" in
wp_pures;
lazymatch goal with
| |- envs_entails _ (wp ?s ?E ?e ?Q) =>
first
[reshape_expr e ltac:(fun K e' => eapply (tac_wp_cmpxchg_fail _ _ _ _ _ K))
|fail 1 "wp_cmpxchg_fail: cannot find 'CmpXchg' in" e];
[iSolveTC
|solve_mapsto ()
|try (simpl; congruence) (* value inequality *)
|try solve_vals_compare_safe
|wp_finish]
| |- envs_entails _ (twp ?s ?E ?e ?Q) =>
first
[reshape_expr e ltac:(fun K e' => eapply (tac_twp_cmpxchg_fail _ _ _ _ K))
|fail 1 "wp_cmpxchg_fail: cannot find 'CmpXchg' in" e];
[solve_mapsto ()
|try (simpl; congruence) (* value inequality *)
|try solve_vals_compare_safe
|wp_finish]
| _ => fail "wp_cmpxchg_fail: not a 'wp'"
end.
Tactic Notation "wp_cmpxchg_suc" :=
let solve_mapsto _ :=
let l := match goal with |- _ = Some (_, (?l ↦{_} _)%I) => l end in
iAssumptionCore || fail "wp_cmpxchg_suc: cannot find" l "↦ ?" in
wp_pures;
lazymatch goal with
| |- envs_entails _ (wp ?s ?E ?e ?Q) =>
first
[reshape_expr e ltac:(fun K e' => eapply (tac_wp_cmpxchg_suc _ _ _ _ _ K))
|fail 1 "wp_cmpxchg_suc: cannot find 'CmpXchg' in" e];
[iSolveTC
|solve_mapsto ()
|try (simpl; congruence) (* value equality *)
|try solve_vals_compare_safe
|pm_reduce; wp_finish]
| |- envs_entails _ (twp ?s ?E ?e ?Q) =>
first
[reshape_expr e ltac:(fun K e' => eapply (tac_twp_cmpxchg_suc _ _ _ _ K))
|fail 1 "wp_cmpxchg_suc: cannot find 'CmpXchg' in" e];
[solve_mapsto ()
|try (simpl; congruence) (* value equality *)
|try solve_vals_compare_safe
|pm_reduce; wp_finish]
| _ => fail "wp_cmpxchg_suc: not a 'wp'"
end.
Tactic Notation "wp_faa" :=
let solve_mapsto _ :=
let l := match goal with |- _ = Some (_, (?l ↦{_} _)%I) => l end in
iAssumptionCore || fail "wp_faa: cannot find" l "↦ ?" in
wp_pures;
lazymatch goal with
| |- envs_entails _ (wp ?s ?E ?e ?Q) =>
first
[reshape_expr e ltac:(fun K e' => eapply (tac_wp_faa _ _ _ _ _ K))
|fail 1 "wp_faa: cannot find 'FAA' in" e];
[iSolveTC
|solve_mapsto ()
|pm_reduce; wp_finish]
| |- envs_entails _ (twp ?s ?E ?e ?Q) =>
first
[reshape_expr e ltac:(fun K e' => eapply (tac_twp_faa _ _ _ _ K))
|fail 1 "wp_faa: cannot find 'FAA' in" e];
[solve_mapsto ()
|pm_reduce; wp_finish]
| _ => fail "wp_faa: not a 'wp'"
end.
|
####### SIMULATION OF MARGINAL ###########################################
### +++ INITIALIZATION +++
### import packages
using Random, LinearAlgebra, RDatasets, MixedModels, Distributions, Optim, CSV, Dates, CategoricalArrays, GLM
### define functions
vcov2 = function(x, xbar, σ, τ, n, γ)
m = length(xbar)
a = γ / τ^2 * m # 1-1 entry
d = (sum(x.^2) - n * γ * sum(xbar.^2) ) / σ^2 # 2-2 entry
b = γ / τ^2 / n * sum(x) # 2-1 entry
[d -b; -b a] / (a * d - b^2) # return inverse (X^t V^-1 X)^-1
end
beta = function(vcov1, x, xbar, y, ybar, σ, τ, n, γ)
c1 = γ / τ^2 * sum(ybar) # = 1^t V^-1 y
c2 = (x' * y - n * γ * sum(ybar .* xbar))/σ^2 # = x^t V^-1 y
vcov1 * [c1 c2]' # (X^tV^-1X)^-1 X^tV^-1y
end
### SELECT PARAMETERS
m = 10
n = 1
τ0 = 8^0.5
σ0 = 4^0.5
### fix parameters
p = 2
nsim = 10000
Δ = 0
Random.seed!(123)
### +++ PER SPECIFICATION SETTING +++
REdistr = "normal"
### variable parameters
N = m * n
β0 = [-4.9, 0.03]
x = 10 * randn(N)
xbar = vec(sum(reshape(x, n, :), dims=1) / n)
X = hcat(fill(1, N), x)
### conditional evaluation
if REdistr == "normal"
v = τ0 * randn(m)
elseif REdistr == "tdist"
v = rand(TDist(2), m)
elseif REdistr == "chi2"
v = rand(Chisq(τ0^2 / 2), m) .- τ0^2 / 2 - m
end
u = β0[1] .+ β0[2] * x + repeat(v, inner = n)
μ0 = β0[1] .+ β0[2] .* xbar + v
hatmatrix = inv(X' * X) * X'
detX = det(X' * X)
loglik = function(α, y)
ξ = exp(α[1])
beta = hatmatrix * y
residual = y - X * beta
(p * log(detX / ξ) + N * log(ξ) + residual' * residual / ξ)
end
τ1 = zeros(10000, 1)
for i in 1:10000
y = u + σ0 * randn(N)
τ1[i] = sqrt(maximum([exp(optimize(α -> loglik(α,y), [1.0], BFGS()).minimizer[1]) - σ0^2 0]))
end
τ1 = mean(τ1) # should be (close to) τ0, but theory for conditional convergence is missing
σ1 = σ0 #mean(σ1)
### +++ KNOWN DELTA +++
### MARGINAL COVARIANCE MATRIX
### helpful parameters
γ0 = τ1^2 / (τ1^2 + σ1^2 / n)
vcov0 = vcov2(x, xbar, σ1, τ1, n, γ0) # = (X^tV^-1X)^-1
d0 = (1 - γ0) .* hcat(fill(1, m), xbar) # d' = l' - b'X
dd0 = d0' * d0
### assemble covariance matrix parts
g1 = γ0 * σ1^2 / n
### assemble inverse of covariance matrix with Woodbury
Σ0diag = g1
Σ0i = (I - d0 * inv(Σ0diag * inv(vcov0) + dd0) * d0' ) / Σ0diag
### CONDITIONAL COVARIANCE MATRIX
l1 = γ0^2 / n * σ1^2
### assemble inverse of covariance matrix with Woodbury
cΣ0diag = l1
cΣ0i = (I - d0 * inv(cΣ0diag * σ1^2 *
inv(vcov0 * (2 * γ0 * I + n^2 * dd0 * vcov0)) + dd0) * d0' ) / cΣ0diag
### NONCENTRALITY PARAMETER
AZv = (γ0 - 1) .* v + n / τ1^2 .* d0 * vcov0 * d0' * v
λ0 = AZv' * cΣ0i * AZv
### QUANTILES
qc0 = cquantile(NoncentralChisq(m, λ0), 0.05) # quantile of alpha = 0.05
qm = cquantile(Chisq(m), 0.05) # quantile of alpha = 0.05
### simulation
Coverage = zeros(5, length(Δ))
λ = zeros(nsim, 2)
Volume = ones(nsim, 5)
Τ = zeros(nsim, 1)
Statistic = zeros(nsim, 4)
for i in 1:nsim
# i = 1
### +++ EMPIRICAL +++
y = u + σ0 * randn(N)
τ = sqrt(maximum([exp(optimize(α -> loglik(α,y), [1.0], BFGS()).minimizer[1]) - σ0^2 0.01]))
Τ[i,1] = τ
ybar = vec(sum(reshape(y, n, :), dims=1) / n)
### make sure the variance parameters are positive
σ = σ1
### MARGINAL
### helpful parameters
γ = τ^2 / (τ^2 + σ^2 / n)
vcov1 = vcov2(x, xbar, σ, τ, n, γ) # = (X^tV^-1X)^-1
d = (1-γ) .* hcat(fill(1, m), xbar) # d' = l' - b'X
dd = d' * d
a = n * τ^2 + σ^2
### assemble diagonal matrix parts
g1 = γ * σ^2 / n
g3 = 2 * σ^4 / τ^2 * γ / N
### assemble inverse of covariance matrix with Woodbury
Σdiag = g1 + 2 * g3
Σ = Σdiag * I + d * vcov1 * d'
Σi = (I - d * inv(Σdiag * inv(vcov1) + dd) * d' ) / Σdiag
### CONDITIONAL
### assemble diagonal matrix parts
l1 = γ^2 / n * σ^2
l3_1 = 2 * σ^2 * τ^2 / N/ a^3
l3_2 = 8 / N / a^4 / n^2 * τ^2 * σ^2 * (
+ (a^2 - σ^2) / n * (
+ σ^6 / a^2 * n
- (σ^4 / a^2) * (τ + a / 2 * n))
+ σ^10 / a^2 * (n^3 + 1 / n)
+ a * σ^4 * (n - 1 + 1 / n / 2) +
- σ^6 * (2 * n - 1 + 1 / n) +
+ σ^8 / a * (1 + (n-1) * (n^2 + 1) / n) / 2)
l3_3 = 0
l3_4 = - 8 / N/ n * τ^2 * σ^4 / a^3 * (
- a
+ σ^2 * (n + 1))
l3 = 2 * (l3_1 + l3_2 + l3_3 + l3_4)
l4 = 2 * σ^4 / N / a^2 * ( 1 / n + 1)
l5 = 2 * n / N * σ^4 * (σ^2 - 2 * n * τ^2) / a^2
### assemble inverse of covariance matrix with Woodbury
cΣdiag = maximum([l1 + l3 + l4 - l5, 1/100000])
cΣi = (I - d * inv(cΣdiag * σ^2 *
inv(vcov1 * (2 * γ * I + n^2 * dd * vcov1)) + dd) * d' ) / cΣdiag
### COMPUTE MEAN
β1 = beta(vcov0, x, xbar, y, ybar, σ1, τ1, n, γ0)
μ1 = (1 - γ0) * (β1[1] .+ β1[2] .* xbar) + γ0 * ybar
β2 = beta(vcov1, x, xbar, y, ybar, σ, τ, n, γ) # = fixef(model)
μ2 = (1 - γ) * (β2[1] .+ β2[2] .* xbar) + γ * ybar
### NONCENTRALITY PARAMETER
ay = d0 * β1 - (1 - γ0) * ybar
λ[i, 1] = maximum([(ay' * cΣ0i * ay)[1] - tr(cΣ0i) * γ0^2 / n^3 / τ1^4 * σ1^2, 0])
ay = d * β2 - (1 - γ) * ybar
λ[i, 2] = maximum([(ay' * cΣi * ay)[1] - tr(cΣi) * σ^2 / a^2 / n, 0])
### QUANTILES
qc1 = cquantile(NoncentralChisq(m, λ[i, 1]), 0.05) # quantile of alpha = 0.05
qc2 = cquantile(NoncentralChisq(m, λ[i, 2]), 0.05) # quantile of alpha = 0.05
### COMPUTE STATISTICS
t1 = (μ1 - μ0) .+ Δ'
t2 = (μ2 - μ0) .+ Δ'
Statistic[i,:] = [sum(t1' .* (t1' * Σ0i), dims = 2)[1], #marginal known
sum(t2' .* (t2' * Σi), dims = 2)[1], #marginal REML
sum(t1' .* (t1' * cΣ0i), dims = 2)[1], #conditional known
sum(t2' .* (t2' * cΣi), dims = 2)[1]] #conditional REML
Coverage[1,1] += (Statistic[i,1] .< qm) / nsim
Coverage[2,1] += (Statistic[i,2] .< qm) / nsim
Coverage[3,1] += (Statistic[i,3] .< qc0) / nsim
Coverage[4,1] += (Statistic[i,3] .< qc1) / nsim
Coverage[5,1] += (Statistic[i,4] .< qc2) / nsim
### COMPUTE RELATIVE VOLUME
# (via approximations as the determinant of large matrices is numerically unstable)
Volume[i, 3] = (Σ0diag / cΣ0diag * qm / qc0)^(-m / 2)
Volume[i, 4] = (Σ0diag / cΣ0diag * qm / qc1)^(-m / 2)
Volume[i, 5] = (Σdiag / cΣdiag * qm / qc2)^(-m / 2)
end
|
(*<*)
(*
Title: FR.thy (FlexRay: Verification)
Author: Maria Spichkova <maria.spichkova at rmit.edu.au>, 2013
*)
(*>*)
header {* FlexRay: Verification *}
theory FR_proof
imports FR
begin
subsection {* Properties of the function Send *}
lemma Send_L1:
assumes "Send return send get activation"
and "send t \<noteq> []"
shows "(activation t) \<noteq> []"
using assms by (simp add: Send_def, auto)
lemma Send_L2:
assumes "Send return send get activation"
and "(activation t) \<noteq> []"
and "return t \<noteq> []"
shows "(send t) \<noteq> []"
using assms by (simp add: Send_def)
subsection {* Properties of the component Scheduler *}
lemma Scheduler_L1:
assumes h1:"Scheduler C activation"
and h2:"(activation t) \<noteq> []"
shows "(t mod (cycleLength C)) mem (schedule C)"
using assms
proof -
{ assume a1:"\<not> t mod cycleLength C mem schedule C"
from h1 have
"if t mod cycleLength C mem schedule C
then activation t = [t mod cycleLength C]
else activation t = []"
by (simp add: Scheduler_def Let_def)
from a1 and this have "activation t = []" by simp
from this and h2 have sg3:"False" by simp
} from this have sg4:"(t mod (cycleLength C)) mem (schedule C)" by blast
from this show ?thesis by simp
qed
lemma Scheduler_L2:
assumes "Scheduler C activation"
and "\<not> (t mod cycleLength C) mem (schedule C)"
shows "activation t = []"
using assms by (simp add: Scheduler_def Let_def)
lemma Scheduler_L3:
assumes "Scheduler C activation"
and "(t mod cycleLength C) mem (schedule C)"
shows "activation t \<noteq> []"
using assms by (simp add: Scheduler_def Let_def)
lemma Scheduler_L4:
assumes "Scheduler C activation"
and "(t mod cycleLength C) mem (schedule C)"
shows "activation t = [t mod cycleLength C]"
using assms by (simp add: Scheduler_def Let_def)
lemma correct_DisjointSchedules1:
assumes h1:"DisjointSchedules n nC"
and h2:"IdenticCycleLength n nC"
and h3:"(t mod cycleLength (nC i)) mem schedule (nC i)"
and h4:"i < n"
and h5:"j < n"
and h6:"i \<noteq> j"
shows "\<not> (t mod cycleLength (nC j) mem schedule (nC j))"
proof -
from h1 and h4 and h5 and h6 have sg1:"disjoint (schedule (nC i)) (schedule (nC j))"
by (simp add: DisjointSchedules_def)
from h2 and h4 and h5 have sg2:"cycleLength (nC i) = cycleLength (nC j)"
by (metis IdenticCycleLength_def)
from sg1 and h3 have sg3:"\<not> (t mod (cycleLength (nC i))) mem (schedule (nC j))"
by (simp add: mem_notdisjoint2)
from sg2 and sg3 show ?thesis by simp
qed
subsection {* Disjoint Frames *}
lemma disjointFrame_L1:
assumes h1:"DisjointSchedules n nC"
and h2:"IdenticCycleLength n nC"
and h3:"\<forall> i < n. FlexRayController (nReturn i) rcv
(nC i) (nStore i) (nSend i) (nGet i)"
and h4:"nSend i t \<noteq> []"
and h5:"i < n"
and h6:"j < n"
and h7:"i \<noteq> j"
shows "nSend j t = []"
proof -
from h3 and h5 have sg1:
"FlexRayController (nReturn i) rcv (nC i) (nStore i) (nSend i) (nGet i)"
by auto
from h3 and h6 have sg2:
"FlexRayController (nReturn j) rcv (nC j) (nStore j) (nSend j) (nGet j)"
by auto
from sg1 obtain activation1 where
a1:"Scheduler (nC i) activation1" and
a2:"BusInterface activation1 (nReturn i) rcv (nStore i) (nSend i) (nGet i)"
by (simp add: FlexRayController_def, auto)
from sg2 obtain activation2 where
a3:"Scheduler (nC j) activation2" and
a4:"BusInterface activation2 (nReturn j) rcv (nStore j) (nSend j) (nGet j)"
by (simp add: FlexRayController_def, auto)
from h1 and h5 and h6 and h7 have sg3:"disjoint (schedule (nC i)) (schedule (nC j))"
by (simp add: DisjointSchedules_def)
from a2 have sg4a:"Send (nReturn i) (nSend i) (nGet i) activation1"
by (simp add: BusInterface_def)
from sg4a and h4 have sg5:"activation1 t \<noteq> []" by (simp add: Send_L1)
from a1 and sg5 have sg6:"(t mod (cycleLength (nC i))) mem (schedule (nC i))"
by (simp add: Scheduler_L1)
from h2 and h5 and h6 have sg7:"cycleLength (nC i) = cycleLength (nC j)"
by (metis IdenticCycleLength_def)
from sg3 and sg6 have sg8:"\<not> (t mod (cycleLength (nC i))) mem (schedule (nC j))"
by (simp add: mem_notdisjoint2)
from sg8 and sg7 have sg9:"\<not> (t mod (cycleLength (nC j))) mem (schedule (nC j))"
by simp
from sg9 and a3 have sg10:"activation2 t = []" by (simp add: Scheduler_L2)
from a4 have sg11:"Send (nReturn j) (nSend j) (nGet j) activation2"
by (simp add: BusInterface_def)
from sg11 and sg10 show ?thesis by (simp add: Send_def)
qed
lemma disjointFrame_L2:
assumes "DisjointSchedules n nC"
and "IdenticCycleLength n nC"
and "\<forall> i < n. FlexRayController (nReturn i) rcv
(nC i) (nStore i) (nSend i) (nGet i)"
shows "inf_disj n nSend"
using assms
apply (simp add: inf_disj_def, clarify)
by (rule disjointFrame_L1, auto)
lemma disjointFrame_L3:
assumes h1:"DisjointSchedules n nC"
and h2:"IdenticCycleLength n nC"
and h3:"\<forall> i < n. FlexRayController (nReturn i) rcv
(nC i) (nStore i) (nSend i) (nGet i)"
and h4:"t mod cycleLength (nC i) mem schedule (nC i)"
and h5:"i < n"
and h6:"j < n"
and h7:"i \<noteq> j"
shows "nSend j t = []"
proof -
from h2 and h5 and h6 have sg1:"cycleLength (nC i) = cycleLength (nC j)"
by (metis IdenticCycleLength_def)
from h1 and h5 and h6 and h7 have sg2:"disjoint (schedule (nC i)) (schedule (nC j))"
by (simp add: DisjointSchedules_def)
from sg2 and h4 have sg3:"\<not> (t mod (cycleLength (nC i))) mem (schedule (nC j))"
by (simp add: mem_notdisjoint2)
from sg1 and sg3 have sg4:"\<not> (t mod (cycleLength (nC j))) mem (schedule (nC j))"
by simp
from h3 and h6 have sg5:
"FlexRayController (nReturn j) rcv (nC j) (nStore j) (nSend j) (nGet j)"
by auto
from sg5 obtain activation2 where
a1:"Scheduler (nC j) activation2" and
a2:"BusInterface activation2 (nReturn j) rcv (nStore j) (nSend j) (nGet j)"
by (simp add: FlexRayController_def, auto)
from sg4 and a1 have sg6:"activation2 t = []" by (simp add: Scheduler_L2)
from a2 have sg7:"Send (nReturn j) (nSend j) (nGet j) activation2"
by (simp add: BusInterface_def)
from sg7 and sg6 show ?thesis by (simp add: Send_def)
qed
subsection {* Properties of the sheaf of channels nSend *}
lemma fr_Send1:
assumes frc:"FlexRayController (nReturn i) recv (nC i) (nStore i) (nSend i) (nGet i)"
and h1:"\<not> (t mod cycleLength (nC i) mem schedule (nC i))"
shows "(nSend i) t = []"
proof -
from frc obtain activation where
a1:"Scheduler (nC i) activation" and
a2:"BusInterface activation (nReturn i) recv (nStore i) (nSend i) (nGet i)"
by (simp add: FlexRayController_def, auto)
from a1 and h1 have sg1:"activation t = []" by (simp add: Scheduler_L2)
from a2 have sg2:"Send (nReturn i) (nSend i) (nGet i) activation"
by (simp add: BusInterface_def)
from sg2 and sg1 show ?thesis by (simp add: Send_def)
qed
lemma fr_Send2:
assumes h1:"\<forall>i<n. FlexRayController (nReturn i) recv (nC i) (nStore i) (nSend i) (nGet i)"
and h2:"DisjointSchedules n nC"
and h3:"IdenticCycleLength n nC"
and h4:"t mod cycleLength (nC k) mem schedule (nC k)"
and h5:"k < n"
shows "nSend k t = nReturn k t"
proof -
from h1 and h5 have sg1:
"FlexRayController (nReturn k) recv (nC k) (nStore k) (nSend k) (nGet k)"
by auto
from sg1 obtain activation where
a1:"Scheduler (nC k) activation" and
a2:"BusInterface activation (nReturn k) recv (nStore k) (nSend k) (nGet k)"
by (simp add: FlexRayController_def, auto)
from a1 and h4 have sg3:"activation t \<noteq> []" by (simp add: Scheduler_L3)
from a2 have sg4:"Send (nReturn k) (nSend k) (nGet k) activation"
by (simp add: BusInterface_def)
from sg4 and sg3 show ?thesis by (simp add: Send_def)
qed
lemma fr_Send3:
assumes "\<forall>i<n. FlexRayController (nReturn i) recv (nC i) (nStore i) (nSend i) (nGet i)"
and "DisjointSchedules n nC"
and "IdenticCycleLength n nC"
and "t mod cycleLength (nC k) mem schedule (nC k)"
and "k < n"
and "nReturn k t \<noteq> []"
shows "nSend k t \<noteq> []"
using assms by (simp add: fr_Send2)
lemma fr_Send4:
assumes "\<forall>i<n. FlexRayController (nReturn i) recv (nC i) (nStore i) (nSend i) (nGet i)"
and "DisjointSchedules n nC"
and "IdenticCycleLength n nC"
and "t mod cycleLength (nC k) mem schedule (nC k)"
and "k < n"
and "nReturn k t \<noteq> []"
shows "\<exists>k. k < n \<longrightarrow> nSend k t \<noteq> []"
using assms
by (metis fr_Send3)
lemma fr_Send5:
assumes h1:"\<forall>i<n. FlexRayController (nReturn i) recv (nC i) (nStore i) (nSend i) (nGet i)"
and h2:"DisjointSchedules n nC"
and h3:"IdenticCycleLength n nC"
and h4:"t mod cycleLength (nC k) mem schedule (nC k)"
and h5:"k < n"
and h6:"nReturn k t \<noteq> []"
and h7:"\<forall>k<n. nSend k t = []"
shows "False"
proof -
from h1 and h2 and h3 and h4 and h5 and h6 have sg1:"nSend k t \<noteq> []"
by (simp add: fr_Send2)
from h7 and h5 have sg2:"nSend k t = []" by blast
from sg1 and sg2 show ?thesis by simp
qed
lemma fr_Send6:
assumes "\<forall>i<n. FlexRayController (nReturn i) recv (nC i) (nStore i) (nSend i) (nGet i)"
and "DisjointSchedules n nC"
and "IdenticCycleLength n nC"
and "t mod cycleLength (nC k) mem schedule (nC k)"
and "k < n"
and "nReturn k t \<noteq> []"
shows "\<exists>k<n. nSend k t \<noteq> []"
using assms
by (metis fr_Send3)
lemma fr_Send7:
assumes "\<forall>i<n. FlexRayController (nReturn i) recv (nC i) (nStore i) (nSend i) (nGet i)"
and "DisjointSchedules n nC"
and "IdenticCycleLength n nC"
and "t mod cycleLength (nC k) mem schedule (nC k)"
and "k < n"
and "j < n"
and "nReturn k t = []"
shows "nSend j t = []"
using assms
by (metis (full_types) disjointFrame_L3 fr_Send2)
lemma fr_Send8:
assumes "\<forall>i<n. FlexRayController (nReturn i) recv (nC i) (nStore i) (nSend i) (nGet i)"
and "DisjointSchedules n nC"
and "IdenticCycleLength n nC"
and "t mod cycleLength (nC k) mem schedule (nC k)"
and "k < n"
and "nReturn k t = []"
shows "\<not> (\<exists>k<n. nSend k t \<noteq> [])"
using assms by (auto, simp add: fr_Send7)
lemma fr_nC_Send:
assumes "\<forall>i<n. FlexRayController (nReturn i) recv (nC i) (nStore i) (nSend i) (nGet i)"
and "k < n"
and "DisjointSchedules n nC"
and "IdenticCycleLength n nC"
and "t mod cycleLength (nC k) mem schedule (nC k)"
shows "\<forall>j. j < n \<and> j \<noteq> k \<longrightarrow> (nSend j) t = []"
using assms by (clarify, simp add: disjointFrame_L3)
lemma length_nSend:
assumes h1:"BusInterface activation (nReturn i) recv (nStore i) (nSend i) (nGet i)"
and h2:"\<forall>t. length (nReturn i t) \<le> Suc 0"
shows "length (nSend i t) \<le> Suc 0"
proof -
from h1 have sg1:"Send (nReturn i) (nSend i) (nGet i) activation"
by (simp add: BusInterface_def)
from sg1 have sg2:
"if activation t = [] then nGet i t = [] \<and> nSend i t = []
else nGet i t = activation t \<and> nSend i t = nReturn i t"
by (simp add: Send_def)
show ?thesis
proof (cases "activation t = []")
assume a1:"activation t = []"
from sg2 and a1 show ?thesis by simp
next
assume a2:"activation t \<noteq> []"
from h2 have sg3:"length (nReturn i t) \<le> Suc 0" by auto
from sg2 and a2 and sg3 show ?thesis by simp
qed
qed
lemma msg_nSend:
assumes "BusInterface activation (nReturn i) recv (nStore i) (nSend i) (nGet i)"
and "msg (Suc 0) (nReturn i)"
shows "msg (Suc 0) (nSend i)"
using assms by (simp add: msg_def, clarify, simp add: length_nSend)
lemma Broadcast_nSend_empty1:
assumes h1:"Broadcast n nSend recv"
and h2:"\<forall>k<n. nSend k t = []"
shows "recv t = []"
using assms
by (metis Broadcast_def)
subsection {* Properties of the sheaf of channels nGet *}
lemma fr_nGet1a:
assumes h1:"FlexRayController (nReturn k) recv (nC k) (nStore k) (nSend k) (nGet k)"
and h2:"t mod cycleLength (nC k) mem schedule (nC k)"
shows "nGet k t = [t mod cycleLength (nC k)]"
proof -
from h1 obtain activation1 where
a1:"Scheduler (nC k) activation1" and
a2:"BusInterface activation1 (nReturn k) recv (nStore k) (nSend k) (nGet k)"
by (simp add: FlexRayController_def, auto)
from a2 have sg1:"Send (nReturn k) (nSend k) (nGet k) activation1"
by (simp add: BusInterface_def)
from sg1 have sg2:
"if activation1 t = [] then nGet k t = [] \<and> nSend k t = []
else nGet k t = activation1 t \<and> nSend k t = nReturn k t"
by (simp add: Send_def)
from a1 and h2 have sg3:"activation1 t = [t mod cycleLength (nC k)]"
by (simp add: Scheduler_L4)
from sg2 and sg3 show ?thesis by simp
qed
lemma fr_nGet1:
assumes "\<forall>i<n. FlexRayController (nReturn i) recv (nC i) (nStore i) (nSend i) (nGet i)"
and "t mod cycleLength (nC k) mem schedule (nC k)"
and "k < n"
shows "nGet k t = [t mod cycleLength (nC k)]"
using assms
by (metis fr_nGet1a)
lemma fr_nGet2:
assumes h1:"\<forall>i<n. FlexRayController (nReturn i) recv (nC i) (nStore i) (nSend i) (nGet i)"
and h2:"\<not> (t mod cycleLength (nC k) mem schedule (nC k))"
and h3:"k < n"
shows "nGet k t = []"
proof -
from h1 and h3 have sg1:
"FlexRayController (nReturn k) recv (nC k) (nStore k) (nSend k) (nGet k)"
by auto
from sg1 and h2 show ?thesis by (rule fr_nGet2a)
qed
lemma length_nGet1:
assumes "FlexRayController (nReturn k) recv (nC k) (nStore k) (nSend k) (nGet k)"
shows "length (nGet k t) \<le> Suc 0"
proof (cases "t mod cycleLength (nC k) mem schedule (nC k)")
assume "t mod cycleLength (nC k) mem schedule (nC k)"
from assms and this have "nGet k t = [t mod cycleLength (nC k)]"
by (rule fr_nGet1a)
then show ?thesis by auto
next
assume "\<not> (t mod cycleLength (nC k) mem schedule (nC k))"
from assms and this have "nGet k t = []" by (rule fr_nGet2a)
then show ?thesis by auto
qed
lemma msg_nGet1:
assumes "FlexRayController (nReturn k) recv (nC k) (nStore k) (nSend k) (nGet k)"
shows "msg (Suc 0) (nGet k)"
using assms
by (simp add: msg_def, auto, rule length_nGet1)
lemma msg_nGet2:
assumes "\<forall>i<n. FlexRayController (nReturn i) recv (nC i) (nStore i) (nSend i) (nGet i)"
and "k < n"
shows "msg (Suc 0) (nGet k)"
using assms
by (metis msg_nGet1)
subsection {* Properties of the sheaf of channels nStore *}
lemma fr_nStore_nReturn1:
assumes h0:"Broadcast n nSend recv"
and h1:"inf_disj n nSend"
and h2:"\<forall>i<n. FlexRayController (nReturn i) recv (nC i) (nStore i) (nSend i) (nGet i)"
and h3:"DisjointSchedules n nC"
and h4:"IdenticCycleLength n nC"
and h5:"t mod cycleLength (nC k) mem schedule (nC k)"
and h6:"k < n"
and h7:"j < n"
and h8:"j \<noteq> k"
shows "nStore j t = nReturn k t"
proof -
from h2 and h6 have sg1:
"FlexRayController (nReturn k) recv (nC k) (nStore k) (nSend k) (nGet k)"
by auto
from h2 and h7 have sg2:
"FlexRayController (nReturn j) recv (nC j) (nStore j) (nSend j) (nGet j)"
by auto
from sg1 obtain activation1 where
a1:"Scheduler (nC k) activation1" and
a2:"BusInterface activation1 (nReturn k) recv (nStore k) (nSend k) (nGet k)"
by (simp add: FlexRayController_def, auto)
from sg2 obtain activation2 where
a3:"Scheduler (nC j) activation2" and
a4:"BusInterface activation2 (nReturn j) recv (nStore j) (nSend j) (nGet j)"
by (simp add: FlexRayController_def, auto)
from a4 have sg3:"Receive recv (nStore j) activation2"
by (simp add: BusInterface_def)
from this have sg4:
"if activation2 t = [] then nStore j t = recv t else nStore j t = []"
by (simp add: Receive_def)
from a1 and h5 have sg5:"activation1 t \<noteq> []"
by (simp add: Scheduler_L3)
from h4 and h6 and h7 have sg6:"cycleLength (nC k) = cycleLength (nC j)"
by (metis IdenticCycleLength_def)
from h3 and h6 and h7 and h8 have sg7:"disjoint (schedule (nC k)) (schedule (nC j))"
by (simp add: DisjointSchedules_def)
from sg7 and h5 have sg8:"\<not> (t mod (cycleLength (nC k))) mem (schedule (nC j))"
by (simp add: mem_notdisjoint2)
from sg6 and sg8 have sg9:"\<not> (t mod (cycleLength (nC j))) mem (schedule (nC j))"
by simp
from sg9 and a3 have sg10:"activation2 t = []" by (simp add: Scheduler_L2)
from sg10 and sg4 have sg11:"nStore j t = recv t" by simp
from h0 have sg15:
"if \<exists>k<n. nSend k t \<noteq> []
then recv t = nSend (SOME k. k < n \<and> nSend k t \<noteq> []) t
else recv t = []"
by (simp add: Broadcast_def)
show ?thesis
proof (cases "nReturn k t = []")
assume a5: "nReturn k t = []"
from h2 and h3 and h4 and h5 and h6 and a5 have sg16:"\<not> (\<exists>k<n. nSend k t \<noteq> [])"
by (simp add: fr_Send8)
from sg16 and sg15 have sg17:"recv t = []" by simp
from sg11 and sg17 have sg18:"nStore j t = []" by simp
from this and a5 show ?thesis by simp
next
assume a6:"nReturn k t \<noteq> []"
from h2 and h3 and h4 and h5 and h6 and a6 have sg19:"\<exists>k<n. nSend k t \<noteq> []"
by (simp add: fr_Send6)
from h2 and h3 and h4 and h5 and h6 and a6 have sg20:"nSend k t \<noteq> []"
by (simp add: fr_Send3)
from h1 and sg20 and h6 have sg21:"(SOME k. k < n \<and> nSend k t \<noteq> []) = k"
by (simp add: inf_disj_index)
from sg15 and sg19 have sg22:"recv t = nSend (SOME k. k < n \<and> nSend k t \<noteq> []) t"
by simp
from sg22 and sg21 have sg23:"recv t = nSend k t" by simp
from h2 and h3 and h4 and h5 and h6 have sg24:"nSend k t = nReturn k t"
by (simp add: fr_Send2)
from sg11 and sg23 and sg24 show ?thesis by simp
qed
qed
lemma fr_nStore_nReturn2:
assumes h1:"Cable n nSend recv"
and h2:"\<forall>i<n. FlexRayController (nReturn i) recv (nC i) (nStore i) (nSend i) (nGet i)"
and h3:"DisjointSchedules n nC"
and h4:"IdenticCycleLength n nC"
and h5:"t mod cycleLength (nC k) mem schedule (nC k)"
and h6:"k < n"
and h7:"j < n"
and h8:"j \<noteq> k"
shows "nStore j t = nReturn k t"
proof -
from h1 have sg1:"inf_disj n nSend \<longrightarrow> Broadcast n nSend recv"
by (simp add: Cable_def)
from h3 and h4 and h2 have sg2:"inf_disj n nSend"
by (simp add: disjointFrame_L2)
from sg1 and sg2 have sg3:"Broadcast n nSend recv" by simp
from sg3 and sg2 and assms show ?thesis by (simp add: fr_nStore_nReturn1)
qed
lemma fr_nStore_empty1:
assumes h1:"Cable n nSend recv"
and h2:"\<forall>i<n. FlexRayController (nReturn i) recv (nC i) (nStore i) (nSend i) (nGet i)"
and h3:"DisjointSchedules n nC"
and h4:"IdenticCycleLength n nC"
and h5:"(t mod cycleLength (nC k) mem schedule (nC k))"
and h6:"k < n"
shows "nStore k t = []"
proof -
from h2 and h6 have sg1:
"FlexRayController (nReturn k) recv (nC k) (nStore k) (nSend k) (nGet k)"
by auto
from sg1 obtain activation1 where
a1:"Scheduler (nC k) activation1" and
a2:"BusInterface activation1 (nReturn k) recv (nStore k) (nSend k) (nGet k)"
by (simp add: FlexRayController_def, auto)
from a2 have sg2:"Receive recv (nStore k) activation1"
by (simp add: BusInterface_def)
from this have sg3:
"if activation1 t = [] then nStore k t = recv t else nStore k t = []"
by (simp add: Receive_def)
from a1 and h5 have sg4:"activation1 t \<noteq> []"
by (simp add: Scheduler_L3)
from sg3 and sg4 show ?thesis by simp
qed
lemma fr_nStore_nReturn3:
assumes "Cable n nSend recv"
and "\<forall>i<n. FlexRayController (nReturn i) recv (nC i) (nStore i) (nSend i) (nGet i)"
and "DisjointSchedules n nC"
and "IdenticCycleLength n nC"
and "t mod cycleLength (nC k) mem schedule (nC k)"
and "k < n"
shows "\<forall>j. j < n \<and> j \<noteq> k \<longrightarrow> nStore j t = nReturn k t"
using assms
by (clarify, simp add: fr_nStore_nReturn2)
lemma length_nStore:
assumes h1:"\<forall>i<n. FlexRayController (nReturn i) recv (nC i) (nStore i) (nSend i) (nGet i)"
and h2:"DisjointSchedules n nC"
and h3:"IdenticCycleLength n nC"
and h4:"inf_disj n nSend"
and h5:"i < n"
and h6:"\<forall> i<n. msg (Suc 0) (nReturn i)"
and h7:"Broadcast n nSend recv"
shows "length (nStore i t) \<le> Suc 0"
proof -
from h7 have sg1:
"if \<exists>k<n. nSend k t \<noteq> []
then recv t = nSend (SOME k. k < n \<and> nSend k t \<noteq> []) t
else recv t = []"
by (simp add: Broadcast_def)
show ?thesis
proof (cases "\<exists>k<n. nSend k t \<noteq> []")
assume "\<exists>k<n. nSend k t \<noteq> []"
from this obtain k where a2:"k<n" and a3:"nSend k t \<noteq> []" by auto
from h1 and a2 have
"FlexRayController (nReturn k) recv (nC k) (nStore k) (nSend k) (nGet k)"
by auto
then obtain activation1 where
a4:"Scheduler (nC k) activation1" and
a5:"BusInterface activation1 (nReturn k) recv (nStore k) (nSend k) (nGet k)"
by (simp add: FlexRayController_def, auto)
from a5 have sg5:"Send (nReturn k) (nSend k) (nGet k) activation1"
by (simp add: BusInterface_def)
from a5 have sg6:"Receive recv (nStore k) activation1"
by (simp add: BusInterface_def)
from sg5 and a3 have sg7:"(activation1 t) \<noteq> []" by (simp add: Send_L1)
from sg6 have sg8:
"if activation1 t = []
then nStore k t = recv t else nStore k t = []"
by (simp add: Receive_def)
from sg8 and sg7 have sg9:"nStore k t = []" by simp
from a4 and sg7 have sg10:"(t mod (cycleLength (nC k))) mem (schedule (nC k))"
by (simp add: Scheduler_L1)
show ?thesis
proof (cases "i = k")
assume "i = k"
from sg9 and this show ?thesis by simp
next
assume "i \<noteq> k"
from h7 and h4 and h1 and h2 and h3 and sg10 and a2 and h5 and this have sg11:
"nStore i t = nReturn k t"
by (simp add: fr_nStore_nReturn1)
from h6 and a2 have sg12:"msg (Suc 0) (nReturn k)" by auto
from a2 and h6 have sg13:"length (nReturn k t) \<le> Suc 0"
by (simp add: msg_def)
from sg11 and sg13 show ?thesis by simp
qed
next
assume "\<not> (\<exists>k<n. nSend k t \<noteq> [])"
from h7 and this have sg14:"recv t = []" by (simp add: Broadcast_nSend_empty1)
from h1 and h5 have
"FlexRayController (nReturn i) recv (nC i) (nStore i) (nSend i) (nGet i)"
by auto
then obtain activation2 where
a11:"Scheduler (nC i) activation2" and
a12:"BusInterface activation2 (nReturn i) recv (nStore i) (nSend i) (nGet i)"
by (simp add: FlexRayController_def, auto)
from a12 have "Receive recv (nStore i) activation2"
by (simp add: BusInterface_def)
then have sg17:
"if activation2 t = []
then nStore i t = recv t else nStore i t = []"
by (simp add: Receive_def)
show ?thesis
proof (cases "activation2 t = []")
assume aa3:"activation2 t = []"
from sg17 and aa3 and sg14 have "nStore i t = []" by simp
then show ?thesis by simp
next
assume aa4:"activation2 t \<noteq> []"
from sg17 and aa4 have "nStore i t = []" by simp
then show ?thesis by simp
qed
qed
qed
lemma msg_nStore:
assumes " \<forall>i<n. FlexRayController (nReturn i) recv (nC i) (nStore i) (nSend i) (nGet i)"
and "DisjointSchedules n nC"
and "IdenticCycleLength n nC"
and "inf_disj n nSend"
and "i < n"
and "\<forall> i<n. msg (Suc 0) (nReturn i)"
and "Cable n nSend recv"
shows "msg (Suc 0) (nStore i)"
using assms
apply (simp (no_asm) add: msg_def, simp add: Cable_def, clarify)
by (simp add: length_nStore)
subsection {* Refinement Properties *}
lemma fr_refinement_FrameTransmission:
assumes "Cable n nSend recv"
and "\<forall>i<n. FlexRayController (nReturn i) recv (nC i) (nStore i) (nSend i) (nGet i)"
and "DisjointSchedules n nC"
and "IdenticCycleLength n nC"
shows "FrameTransmission n nStore nReturn nGet nC"
using assms
apply (simp add: FrameTransmission_def Let_def, auto)
apply (simp add: fr_nGet1)
by (simp add: fr_nStore_nReturn3)
lemma FlexRayArch_CorrectSheaf:
assumes "FlexRayArch n nReturn nC nStore nGet"
shows "CorrectSheaf n"
using assms by (simp add: FlexRayArch_def)
lemma FlexRayArch_FrameTransmission:
assumes h1:"FlexRayArch n nReturn nC nStore nGet"
and h2:"\<forall>i<n. msg (Suc 0) (nReturn i)"
and h3:"DisjointSchedules n nC"
and h4:"IdenticCycleLength n nC"
shows "FrameTransmission n nStore nReturn nGet nC"
proof -
from assms obtain nSend recv where
a1:"Cable n nSend recv" and
a2:"\<forall>i<n. FlexRayController (nReturn i) recv (nC i) (nStore i) (nSend i) (nGet i)"
by (simp add: FlexRayArch_def FlexRayArchitecture_def, auto)
from a1 and a2 and h3 and h4 show ?thesis
by (rule fr_refinement_FrameTransmission)
qed
lemma FlexRayArch_nGet:
assumes h1:"FlexRayArch n nReturn nC nStore nGet"
and h2:"\<forall>i<n. msg (Suc 0) (nReturn i)"
and h3:"DisjointSchedules n nC"
and h4:"IdenticCycleLength n nC"
and h5:"i < n"
shows "msg (Suc 0) (nGet i)"
proof -
from assms obtain nSend recv where
a1:"Cable n nSend recv" and
a2:"\<forall>i<n. FlexRayController (nReturn i) recv (nC i) (nStore i) (nSend i) (nGet i)"
by (simp add: FlexRayArch_def FlexRayArchitecture_def, auto)
from a2 and h5 show ?thesis by (rule msg_nGet2)
qed
lemma FlexRayArch_nStore:
assumes h1:"FlexRayArch n nReturn nC nStore nGet"
and h2:"\<forall>i<n. msg (Suc 0) (nReturn i)"
and h3:"DisjointSchedules n nC"
and h4:"IdenticCycleLength n nC"
and h5:"i < n"
shows "msg (Suc 0) (nStore i)"
proof -
from assms obtain nSend recv where
a1:"Cable n nSend recv" and
a2:"\<forall>i<n. FlexRayController (nReturn i) recv (nC i) (nStore i) (nSend i) (nGet i)"
by (simp add: FlexRayArch_def FlexRayArchitecture_def, auto)
from h3 and h4 and a2 have sg1:"inf_disj n nSend" by (simp add: disjointFrame_L2)
from a2 and h3 and h4 and sg1 and h5 and h2 and a1 show ?thesis
by (rule msg_nStore)
qed
theorem main_fr_refinement:
assumes "FlexRayArch n nReturn nC nStore nGet"
shows "FlexRay n nReturn nC nStore nGet"
using assms
by (simp add: FlexRay_def
FlexRayArch_CorrectSheaf
FlexRayArch_FrameTransmission
FlexRayArch_nGet
FlexRayArch_nStore)
end |
[STATEMENT]
lemma (in Square_impl) Square_spec:
shows "\<forall>n. \<Gamma>\<turnstile>\<lbrace>\<acute>N = n\<rbrace> \<acute>R :== PROC Square(\<acute>N) \<lbrace>\<acute>R = n * n\<rbrace>"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<forall>n. \<Gamma>\<turnstile> \<lbrace>\<acute>N = n\<rbrace> \<acute>R :== PROC Square(\<acute>N) \<lbrace>\<acute>R = n * n\<rbrace>
[PROOF STEP]
by vcg simp |
[STATEMENT]
lemma Nmlize_Trg:
assumes "Arr t"
shows "\<^bold>\<lfloor>Trg t\<^bold>\<rfloor> = Trg \<^bold>\<lfloor>t\<^bold>\<rfloor>" and "\<^bold>\<lfloor>Trg t\<^bold>\<rfloor> = Trg t"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<^bold>\<lfloor>Trg t\<^bold>\<rfloor> = Trg \<^bold>\<lfloor>t\<^bold>\<rfloor> &&& \<^bold>\<lfloor>Trg t\<^bold>\<rfloor> = Trg t
[PROOF STEP]
proof -
[PROOF STATE]
proof (state)
goal (2 subgoals):
1. \<^bold>\<lfloor>Trg t\<^bold>\<rfloor> = Trg \<^bold>\<lfloor>t\<^bold>\<rfloor>
2. \<^bold>\<lfloor>Trg t\<^bold>\<rfloor> = Trg t
[PROOF STEP]
have 1: "Obj (Trg t)"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. Obj (Trg t)
[PROOF STEP]
using assms
[PROOF STATE]
proof (prove)
using this:
Arr t
goal (1 subgoal):
1. Obj (Trg t)
[PROOF STEP]
by simp
[PROOF STATE]
proof (state)
this:
Obj (Trg t)
goal (2 subgoals):
1. \<^bold>\<lfloor>Trg t\<^bold>\<rfloor> = Trg \<^bold>\<lfloor>t\<^bold>\<rfloor>
2. \<^bold>\<lfloor>Trg t\<^bold>\<rfloor> = Trg t
[PROOF STEP]
obtain a where a: "obj a \<and> Trg t = \<^bold>\<langle>a\<^bold>\<rangle>\<^sub>0"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. (\<And>a. obj a \<and> Trg t = \<^bold>\<langle>a\<^bold>\<rangle>\<^sub>0 \<Longrightarrow> thesis) \<Longrightarrow> thesis
[PROOF STEP]
using 1
[PROOF STATE]
proof (prove)
using this:
Obj (Trg t)
goal (1 subgoal):
1. (\<And>a. obj a \<and> Trg t = \<^bold>\<langle>a\<^bold>\<rangle>\<^sub>0 \<Longrightarrow> thesis) \<Longrightarrow> thesis
[PROOF STEP]
by (cases "Trg t", simp_all)
[PROOF STATE]
proof (state)
this:
obj a \<and> Trg t = \<^bold>\<langle>a\<^bold>\<rangle>\<^sub>0
goal (2 subgoals):
1. \<^bold>\<lfloor>Trg t\<^bold>\<rfloor> = Trg \<^bold>\<lfloor>t\<^bold>\<rfloor>
2. \<^bold>\<lfloor>Trg t\<^bold>\<rfloor> = Trg t
[PROOF STEP]
show "\<^bold>\<lfloor>Trg t\<^bold>\<rfloor> = Trg t"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<^bold>\<lfloor>Trg t\<^bold>\<rfloor> = Trg t
[PROOF STEP]
using a
[PROOF STATE]
proof (prove)
using this:
obj a \<and> Trg t = \<^bold>\<langle>a\<^bold>\<rangle>\<^sub>0
goal (1 subgoal):
1. \<^bold>\<lfloor>Trg t\<^bold>\<rfloor> = Trg t
[PROOF STEP]
by simp
[PROOF STATE]
proof (state)
this:
\<^bold>\<lfloor>Trg t\<^bold>\<rfloor> = Trg t
goal (1 subgoal):
1. \<^bold>\<lfloor>Trg t\<^bold>\<rfloor> = Trg \<^bold>\<lfloor>t\<^bold>\<rfloor>
[PROOF STEP]
thus "\<^bold>\<lfloor>Trg t\<^bold>\<rfloor> = Trg \<^bold>\<lfloor>t\<^bold>\<rfloor>"
[PROOF STATE]
proof (prove)
using this:
\<^bold>\<lfloor>Trg t\<^bold>\<rfloor> = Trg t
goal (1 subgoal):
1. \<^bold>\<lfloor>Trg t\<^bold>\<rfloor> = Trg \<^bold>\<lfloor>t\<^bold>\<rfloor>
[PROOF STEP]
using assms Nmlize_in_Hom
[PROOF STATE]
proof (prove)
using this:
\<^bold>\<lfloor>Trg t\<^bold>\<rfloor> = Trg t
Arr t
Arr ?t \<Longrightarrow> \<^bold>\<lfloor>?t\<^bold>\<rfloor> \<in> HHom (Src ?t) (Trg ?t)
Arr ?t \<Longrightarrow> \<^bold>\<lfloor>?t\<^bold>\<rfloor> \<in> VHom \<^bold>\<lfloor>Dom ?t\<^bold>\<rfloor> \<^bold>\<lfloor>Cod ?t\<^bold>\<rfloor>
goal (1 subgoal):
1. \<^bold>\<lfloor>Trg t\<^bold>\<rfloor> = Trg \<^bold>\<lfloor>t\<^bold>\<rfloor>
[PROOF STEP]
by simp
[PROOF STATE]
proof (state)
this:
\<^bold>\<lfloor>Trg t\<^bold>\<rfloor> = Trg \<^bold>\<lfloor>t\<^bold>\<rfloor>
goal:
No subgoals!
[PROOF STEP]
qed |
Require Coq.Bool.Bool.
Module BoolNotation.
Infix "||" := orb.
Infix "&&" := andb.
End BoolNotation.
Import BoolNotation.
Definition consider_bool (b:bool) : {b=true}+{b=false}.
Proof. destruct (Bool.bool_dec b true) as [H | H] ; eauto.
apply Bool.not_true_is_false in H ; eauto.
Qed.
Definition orf {A} (f:A -> bool) (g:A -> bool) (a:A) : bool := f a || g a.
Definition andf {A} (f:A -> bool) (g:A -> bool) (a:A) : bool := f a && g a.
Lemma bool_conj_true : forall b1 b2, (b1 && b2) = true <-> b1 = true /\ b2 = true.
Proof.
intros ; constructor ; intros.
- destruct b1,b2 ; auto.
- destruct b1,b2 ; auto.
destruct H as [H1 H2] ; discriminate H2.
destruct H as [H1 H2] ; discriminate H1.
destruct H as [H1 H2] ; discriminate H1.
Qed.
|
"""
Demonstration of Tube
"""
print('testing!')
import ctypes
print('imported ctypes')
print(ctypes.__dict__)
print('dict done')
import ctypes.util
print('imported util')
print(ctypes.util.find_library)
import vispy
# vispy.set_log_level('debug')
import sys
from vispy import scene
from vispy.geometry.torusknot import TorusKnot
from colorsys import hsv_to_rgb
import numpy as np
canvas = scene.SceneCanvas(keys='interactive', bgcolor='white')
canvas.unfreeze()
canvas.view = canvas.central_widget.add_view()
points1 = TorusKnot(5, 3).first_component[:-1]
points1[:, 0] -= 20.
points1[:, 2] -= 15.
points2 = points1.copy()
points2[:, 2] += 30.
points3 = points1.copy()
points3[:, 0] += 41.
points3[:, 2] += 30
points4 = points1.copy()
points4[:, 0] += 41.
colors = np.linspace(0, 1, len(points1))
colors = np.array([hsv_to_rgb(c, 1, 1) for c in colors])
vertex_colors = np.random.random(8 * len(points1))
vertex_colors = np.array([hsv_to_rgb(c, 1, 1) for c in vertex_colors])
l1 = scene.visuals.Tube(points1,
shading='flat',
color=colors, # this is overridden by
# the vertex_colors argument
vertex_colors=vertex_colors,
tube_points=8)
l2 = scene.visuals.Tube(points2,
color=['red', 'green', 'blue'],
shading='smooth',
tube_points=8)
l3 = scene.visuals.Tube(points3,
color=colors,
shading='flat',
tube_points=8,
closed=True)
l4 = scene.visuals.Tube(points4,
color=colors,
shading='flat',
tube_points=8,
mode='lines')
canvas.view.add(l1)
canvas.view.add(l2)
canvas.view.add(l3)
canvas.view.add(l4)
canvas.view.camera = scene.TurntableCamera()
# tube does not expose its limits yet
canvas.view.camera.set_range((-20, 20), (-20, 20), (-20, 20))
canvas.show()
if __name__ == '__main__':
if sys.flags.interactive != 1:
canvas.app.run()
|
Google launched its annual developer conference I/O in 2015, the latest version of Android, many features of the Android M lifted the curtain. Google has many features as outlined in the conference which will appear in the latest version Android M.
According to Google’s Vice President (Engineering), Dave Burke core theme will Improve the User Experience in the new version of Android M.
1. Android M will open the way to improve communication between applications. When users open a link in an app, then they have to choose between Chrome browser and any other app. Android M app in the software itself will decide what is good for opening the link.
2. Android M now has a new feature will be called “Dose” that. “Dose” of your smartphone and tablet battery time will increase. If you forget to turn off the app or the app will be running in the background so it will stop making your own passive application and your battery will increase battery standby time will more than ever.
3. Android M Devices for charging USB type C as well as adopt the new standard. So now you have your smartphone or tablet’s battery USB Type-C cable with a two-way, you will quickly be able to charge the battery from your friend’s smartphone or tablet. The two devices will be necessary in the Android M.
4. Your smartphone and tablet Android M, fingerprints scanner will support another native feature. Like the iPhone, Android M users will be able to unlock the phone from his Fingerprints and applications can verify your payment.
5. Now for Mobile Payments, Google will use a new platform Android pay. The platform will run on standard NFC and payment in retail stores and applications will be able to use, but the company operated its first Google Wallet was no mention about.
6. Standalone Chrome browser will work best with a weak Internet connection. It will check the Internet connection and the connection of the devices themselves, such as weakening the image heavy content will be removed. The page will load quickly. As Apple’s Safari browser is also Instapeper.
7. Now you will be able to control Android M an app permission. For example, if you want an app to blocks access to the Internet, you will be able to disable it. As such, if you want to Use Whatsapp offline, you will be able to turn off its Internet access permission.
In addition, some minor changes have been made Android M Forms Selection feature has improved. Additionally, when a user Android M shares, so the software will remember the most frequent contacts and apps which will allow data are shared and Suggest. Similarly, the volume of individual streams such as Android M alarm, notification, ringtones etc. will return controls that were hidden in lollipops. Android M’s developer preview will be available in right now, for all users, while the rest will be launched in the third quarter of 2015. |
[STATEMENT]
lemma cross_basis: "(axis 1 1) \<times> (axis 2 1) = axis 3 1" "(axis 2 1) \<times> (axis 1 1) = -(axis 3 1)"
"(axis 2 1) \<times> (axis 3 1) = axis 1 1" "(axis 3 1) \<times> (axis 2 1) = -(axis 1 1)"
"(axis 3 1) \<times> (axis 1 1) = axis 2 1" "(axis 1 1) \<times> (axis 3 1) = -(axis 2 1)"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. (axis 1 1 \<times> axis 2 1 = axis 3 1 &&& axis 2 1 \<times> axis 1 1 = - axis 3 1 &&& axis 2 1 \<times> axis 3 1 = axis 1 1) &&& axis 3 1 \<times> axis 2 1 = - axis 1 1 &&& axis 3 1 \<times> axis 1 1 = axis 2 1 &&& axis 1 1 \<times> axis 3 1 = - axis 2 1
[PROOF STEP]
using exhaust_3
[PROOF STATE]
proof (prove)
using this:
?x = 1 \<or> ?x = 2 \<or> ?x = 3
goal (1 subgoal):
1. (axis 1 1 \<times> axis 2 1 = axis 3 1 &&& axis 2 1 \<times> axis 1 1 = - axis 3 1 &&& axis 2 1 \<times> axis 3 1 = axis 1 1) &&& axis 3 1 \<times> axis 2 1 = - axis 1 1 &&& axis 3 1 \<times> axis 1 1 = axis 2 1 &&& axis 1 1 \<times> axis 3 1 = - axis 2 1
[PROOF STEP]
by (force simp add: axis_def cross3_simps)+ |
The composition of a polynomial with the sum of two polynomials is equal to the sum of the compositions of the polynomial with each of the two polynomials. |
lemma emeasure_SUP: assumes sets: "\<And>i. i \<in> I \<Longrightarrow> sets (M i) = sets N" "X \<in> sets N" "I \<noteq> {}" shows "emeasure (SUP i\<in>I. M i) X = (SUP J\<in>{J. J \<noteq> {} \<and> finite J \<and> J \<subseteq> I}. emeasure (SUP i\<in>J. M i) X)" |
using DataStructures: OrderedDict
@testset "Datasets" begin
data = [rand(4,5,12), rand(4,5,12), rand(4,5)]
axlist1 = [RangeAxis("XVals",1.0:4.0), CategoricalAxis("YVals",[1,2,3,4,5]), RangeAxis("Time",Date(2001,1,15):Month(1):Date(2001,12,15))]
axlist2 = [RangeAxis("XVals",1.0:4.0), CategoricalAxis("YVals",[1,2,3,4,5])]
props = [Dict("att$i"=>i) for i=1:3]
c1,c2,c3 = (YAXArray(axlist1, data[1], props[1]),
YAXArray(axlist1, data[2], props[2]),
YAXArray(axlist2, data[3], props[3])
)
ds = Dataset(avar = c1, something = c2, smaller = c3)
@testset "Basic functions" begin
b = IOBuffer()
show(b,ds)
s = split(String(take!(b)),"\n")
s2 = """
YAXArray Dataset
Dimensions:
XVals Axis with 4 Elements from 1.0 to 4.0
YVals Axis with 5 elements: 1 2 3 4 5
Time Axis with 12 Elements from 2001-01-15 to 2001-12-15
Variables: avar something smaller """
s2 = split(s2,"\n")
# @test s[[1,2,6]] == s2[[1,2,6]]
# @test all(i->in(i,s2), s[3:5])
for n in [:avar, :something, :smaller, :XVals, :Time, :YVals]
@test n in propertynames(ds)
@test n in propertynames(ds, true)
end
@test :axes ∉ propertynames(ds)
@test :cubes ∉ propertynames(ds)
@test :axes ∈ propertynames(ds, true)
#Test getproperty
@test all(i->in(i,values(ds.axes)),axlist1)
@test collect(keys(ds.cubes)) == [:avar, :something, :smaller]
@test collect(values(ds.cubes)) == [c1,c2,c3]
@test ds.avar === c1
@test ds.something === c2
@test ds.smaller === c3
@test ds[:avar] === c1
ds2 = ds[[:avar, :smaller]]
@test collect(keys(ds2.cubes)) == [:avar, :smaller]
@test collect(values(ds2.cubes)) == [c1,c3]
@test YAXArrays.Datasets.fuzzyfind("hal", ["hallo","bla","something"]) == 1
ds3 = ds[["av", "some"]]
@test collect(keys(ds3.cubes)) == [:av, :some]
@test collect(values(ds3.cubes)) == [c1,c2]
@test ds["avar"] === c1
end
@testset "Dataset interface" begin
struct MockDataset
vars
dims
attrs
path
end
Base.getindex(d::MockDataset,i) = d.vars[i]
Base.haskey(d::MockDataset,i) = haskey(d.vars,i)
YAXArrayBase.get_varnames(d::MockDataset) = (keys(d.vars)...,)
YAXArrayBase.get_var_dims(d::MockDataset,name) = d.dims[name]
YAXArrayBase.get_var_attrs(d::MockDataset, name) = d.attrs[name]
YAXArrayBase.allow_missings(d::MockDataset) = !occursin("nomissings",d.path)
function YAXArrayBase.create_empty(::Type{MockDataset},path)
mkpath(dirname(path))
open(_->nothing, path,"w")
MockDataset(Dict(),Dict(),Dict(),path)
end
function YAXArrayBase.add_var(ds::MockDataset, T, name, s, dimlist, atts;kwargs...)
data = Array{T}(undef,s...)
ds.vars[name] = data
ds.dims[name] = dimlist
ds.attrs[name] = atts
data
end
YAXArrayBase.backendlist[:mock] = MockDataset
push!(YAXArrayBase.backendregex,r".mock$"=>MockDataset)
data1,data2,data3,d1,d2,d3 = (rand(12,5,2),rand(12,5),rand(12,5,2),1:12, 0.1:0.1:0.5, ["One","Two"])
att1 = Dict("att1"=>5,"_ARRAY_OFFSET"=>(2,0,0))
att2 = Dict("att2"=>6,"_ARRAY_OFFSET"=>(2,0))
attd1 = Dict("_ARRAY_OFFSET"=>2, "units"=>"days since 2001-01-01", "calendar"=>"gregorian")
attd2 = Dict("attd"=>"d")
attd3 = Dict("attd"=>"d")
function MockDataset(p)
MockDataset(
OrderedDict("Var1"=>data1, "Var2"=>data2, "Var3"=>data3, "time"=>d1,"d2"=>d2, "d3"=>d3),
Dict("Var1"=>("time","d2","d3"),"Var2"=>("time","d2"),"Var3"=>("time","d2","d3"),"time"=>("time",),"d2"=>["d2"],"d3"=>["d3"]),
Dict("Var1"=>att1,"Var2"=>att2,"Var3"=>att1,"time"=>attd1,"d2"=>attd2,"d3"=>attd3),
p
)
end
m = MockDataset("testpath.mock")
@testset "collectdims" begin
dcollect = YAXArrays.Datasets.collectdims(m)
@test dcollect["time"].ax isa RangeAxis
@test YAXArrays.Cubes.Axes.axname(dcollect["time"].ax) == "time"
@test dcollect["time"].ax.values == DateTime(2001,1,4):Day(1):DateTime(2001,1,13)
@test dcollect["time"].offs == 2
@test dcollect["d2"].ax isa RangeAxis
@test YAXArrays.Cubes.Axes.axname(dcollect["d2"].ax) == "d2"
@test dcollect["d2"].ax.values == 0.1:0.1:0.5
@test dcollect["d2"].offs == 0
@test dcollect["d3"].ax isa CategoricalAxis
@test YAXArrays.Cubes.Axes.axname(dcollect["d3"].ax) == "d3"
@test dcollect["d3"].ax.values == ["One","Two"]
@test dcollect["d3"].offs == 0
a1 = [0.1,0.2,0.3,0.4]
a2 = [0.1,0.21,0.3,0.4]
@test YAXArrays.Datasets.testrange(a1)== 0.1:0.1:0.4
@test YAXArrays.Datasets.testrange(a2) isa Array
@test YAXArrays.Datasets.testrange(a2) == [0.1,0.21,0.3,0.4]
end
@testset "open_dataset" begin
ds = open_dataset("test.mock")
@test size(ds.Var1) == (10,5,2)
@test size(ds.Var2) == (10,5)
@test all(in(keys(ds.axes)),(:time,:d2,:d3))
ar = Cube(ds)
@test ar isa YAXArray
@test size(ar) == (10,5,2,2)
@test YAXArrays.Cubes.Axes.axname.(ar.axes) == ["time","d2","d3","Variable"]
@test ar.axes[4].values == ["Var1","Var3"]
end
@testset "Dataset creation" begin
al = [RangeAxis("Time",Date(2001):Month(1):Date(2001,12,31)), CategoricalAxis("Variable",["A","B"]), RangeAxis("Xvals",1:10)]
#Basic
newds, newds2 = YAXArrays.Datasets.createdataset(MockDataset,al)
@test YAXArrays.Cubes.axsym.(newds2.axes) == [:Time, :Xvals, :Variable]
@test newds2.axes[1].values == Date(2001):Month(1):Date(2001,12,31)
@test newds2.axes[3].values == ["A","B"]
@test newds2.axes[2].values == 1:10
@test newds2.data isa YAXArrays.Cubes.DiskArrayTools.DiskArrayStack
# A bit more advanced
fn = string(tempname(),".mock")
newds, newds2 = YAXArrays.Datasets.createdataset(MockDataset,al,path = fn, persist = false,
chunksize = (4,2,4), chunkoffset = (2,0,3), properties = Dict("att1"=>5), datasetaxis="A")
@test size(newds.data) == (12,2,10)
@test size(newds.data.parent) == (14,2,13)
@test eltype(newds.data) <: Union{Float32,Missing}
@test newds.properties["att1"] == 5
@test isfile(fn)
newds = nothing
newds2 = nothing
# Without missings
fn = string(tempname(),"nomissings.mock")
newds = YAXArrays.Datasets.createdataset(MockDataset,al,path = fn,datasetaxis="A")
end
end
end
|
! RUN: %S/test_errors.sh %s %t %f18
module m
interface
subroutine sub0
end
!ERROR: A PROCEDURE statement is only allowed in a generic interface block
procedure :: sub1, sub2
end interface
contains
subroutine sub1
end
subroutine sub2
end
end
|
lemma measure_restrict_space: assumes "\<Omega> \<inter> space M \<in> sets M" "A \<subseteq> \<Omega>" shows "measure (restrict_space M \<Omega>) A = measure M A" |
if_test/if_test.cpp
if_test/display.cpp
if_test/main.cpp
if_test/stimulus.cpp
|
module Lib.IO where
open import Lib.List
open import Lib.Prelude
{-# IMPORT System.Environment #-}
FilePath = String
postulate
IO : Set -> Set
getLine : IO String
putStrLn : String -> IO Unit
putStr : String -> IO Unit
bindIO : {A B : Set} -> IO A -> (A -> IO B) -> IO B
returnIO : {A : Set} -> A -> IO A
getArgs : IO (List String)
readFile : FilePath -> IO String
writeFile : FilePath -> String -> IO Unit
{-# BUILTIN IO IO #-}
{-# COMPILED_TYPE IO IO #-}
{-# COMPILED putStr putStr #-}
{-# COMPILED putStrLn putStrLn #-}
{-# COMPILED bindIO (\_ _ -> (>>=) :: IO a -> (a -> IO b) -> IO b) #-}
{-# COMPILED returnIO (\_ -> return :: a -> IO a) #-}
-- we need to throw away the type argument to return and bind
-- and resolve the overloading explicitly (since Alonzo
-- output is sprinkled with unsafeCoerce#).
{-# COMPILED getLine getLine #-}
{-# COMPILED getArgs System.Environment.getArgs #-}
{-# COMPILED readFile readFile #-}
{-# COMPILED writeFile writeFile #-}
mapM : {A B : Set} -> (A -> IO B) -> List A -> IO (List B)
mapM f [] = returnIO []
mapM f (x :: xs) = bindIO (f x) \y -> bindIO (mapM f xs) \ys -> returnIO (y :: ys)
mapM₋ : {A : Set} -> (A -> IO Unit) -> List A -> IO Unit
mapM₋ f xs = bindIO (mapM f xs) \_ -> returnIO unit
|
State Before: 𝕜 : Type u_1
E : Type u_2
F : Type ?u.3801506
inst✝⁶ : IsROrC 𝕜
inst✝⁵ : NormedAddCommGroup E
inst✝⁴ : InnerProductSpace 𝕜 E
inst✝³ : NormedAddCommGroup F
inst✝² : InnerProductSpace ℝ F
dec_E : DecidableEq E
ι : Type u_3
dec_ι : DecidableEq ι
G : ι → Type ?u.3801569
inst✝¹ : (i : ι) → NormedAddCommGroup (G i)
inst✝ : (i : ι) → InnerProductSpace 𝕜 (G i)
V✝ : (i : ι) → G i →ₗᵢ[𝕜] E
hV✝ : OrthogonalFamily 𝕜 G V✝
dec_V : (i : ι) → (x : G i) → Decidable (x ≠ 0)
V : ι → Submodule 𝕜 E
hV : OrthogonalFamily 𝕜 (fun i => { x // x ∈ V i }) fun i => Submodule.subtypeₗᵢ (V i)
hV_sum : IsInternal fun i => V i
α : ι → Type u_4
v_family : (i : ι) → Basis (α i) 𝕜 { x // x ∈ V i }
hv_family : ∀ (i : ι), Orthonormal 𝕜 ↑(v_family i)
⊢ Orthonormal 𝕜 ↑(collectedBasis hV_sum v_family) State After: no goals Tactic: simpa only [hV_sum.collectedBasis_coe] using hV.orthonormal_sigma_orthonormal hv_family |
import numpy as np
import PIL.ImageGrab
import cv2
import time
from Keyboard import PressKey,ReleaseKey, W,A,S,D
def main():
for i in list(range(4))[:: -1]:
print(i+1)
time.sleep(1)
last_time = time.time()
while(True):
# PressKey(W)
# ReleaseKey(W)
printscreen_numpy = np.array(PIL.ImageGrab.grab(bbox=(0,40,800,640)))
print("loop take {} seconds".format(time.time()-last_time))
last_time = time.time()
new_screen = process_img(printscreen_numpy)
cv2.imshow("winndowTest",printscreen_numpy)
if cv2.waitKey(25) & 0xFF == ord('q'):
cv2.destroyAllWindows()
break
def process_img(image):
original_image = image
processed_img = cv2.cvtColor(image,cv2.COLOR_BGR2GRAY)
process_img = cv2.Canny(processed_img,threshold1= 200,threshold2=300)
return process_img
if __name__ == '__main__':
main() |
State Before: m n✝ a✝ b✝ c d : ℤ
a b n : ℕ
⊢ ↑a ≡ ↑b [ZMOD ↑n] ↔ a ≡ b [MOD n] State After: m n✝ a✝ b✝ c d : ℤ
a b n : ℕ
⊢ ↑a % ↑n = ↑b % ↑n ↔ a % n = b % n Tactic: unfold ModEq Nat.ModEq State Before: m n✝ a✝ b✝ c d : ℤ
a b n : ℕ
⊢ ↑a % ↑n = ↑b % ↑n ↔ a % n = b % n State After: m n✝ a✝ b✝ c d : ℤ
a b n : ℕ
⊢ ↑a % ↑n = ↑b % ↑n ↔ ↑(a % n) = ↑(b % n) Tactic: rw [← Int.ofNat_inj] State Before: m n✝ a✝ b✝ c d : ℤ
a b n : ℕ
⊢ ↑a % ↑n = ↑b % ↑n ↔ ↑(a % n) = ↑(b % n) State After: no goals Tactic: simp [coe_nat_mod] |
/-
Copyright (c) 2022 Kexing Ying. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Kexing Ying, Rémy Degenne
-/
import probability.process.stopping
/-!
# Hitting time
Given a stochastic process, the hitting time provides the first time the process ``hits'' some
subset of the state space. The hitting time is a stopping time in the case that the time index is
discrete and the process is adapted (this is true in a far more general setting however we have
only proved it for the discrete case so far).
## Main definition
* `measure_theory.hitting`: the hitting time of a stochastic process
## Main results
* `measure_theory.hitting_is_stopping_time`: a discrete hitting time of an adapted process is a
stopping time
## Implementation notes
In the definition of the hitting time, we bound the hitting time by an upper and lower bound.
This is to ensure that our result is meaningful in the case we are taking the infimum of an
empty set or the infimum of a set which is unbounded from below. With this, we can talk about
hitting times indexed by the natural numbers or the reals. By taking the bounds to be
`⊤` and `⊥`, we obtain the standard definition in the case that the index is `ℕ∞` or `ℝ≥0∞`.
-/
open filter order topological_space
open_locale classical measure_theory nnreal ennreal topology big_operators
namespace measure_theory
variables {Ω β ι : Type*} {m : measurable_space Ω}
/-- Hitting time: given a stochastic process `u` and a set `s`, `hitting u s n m` is the first time
`u` is in `s` after time `n` and before time `m` (if `u` does not hit `s` after time `n` and
before `m` then the hitting time is simply `m`).
The hitting time is a stopping time if the process is adapted and discrete. -/
noncomputable def hitting [preorder ι] [has_Inf ι] (u : ι → Ω → β) (s : set β) (n m : ι) : Ω → ι :=
λ x, if ∃ j ∈ set.Icc n m, u j x ∈ s then Inf (set.Icc n m ∩ {i : ι | u i x ∈ s}) else m
section inequalities
variables [conditionally_complete_linear_order ι] {u : ι → Ω → β} {s : set β} {n i : ι} {ω : Ω}
/-- This lemma is strictly weaker than `hitting_of_le`. -/
lemma hitting_of_lt {m : ι} (h : m < n) : hitting u s n m ω = m :=
begin
simp_rw [hitting],
have h_not : ¬ ∃ (j : ι) (H : j ∈ set.Icc n m), u j ω ∈ s,
{ push_neg,
intro j,
rw set.Icc_eq_empty_of_lt h,
simp only [set.mem_empty_iff_false, is_empty.forall_iff], },
simp only [h_not, if_false],
end
lemma hitting_le {m : ι} (ω : Ω) : hitting u s n m ω ≤ m :=
begin
cases le_or_lt n m with h_le h_lt,
{ simp only [hitting],
split_ifs,
{ obtain ⟨j, hj₁, hj₂⟩ := h,
exact (cInf_le (bdd_below.inter_of_left bdd_below_Icc) (set.mem_inter hj₁ hj₂)).trans hj₁.2 },
{ exact le_rfl }, },
{ rw hitting_of_lt h_lt, },
end
lemma not_mem_of_lt_hitting {m k : ι}
(hk₁ : k < hitting u s n m ω) (hk₂ : n ≤ k) :
u k ω ∉ s :=
begin
classical,
intro h,
have hexists : ∃ j ∈ set.Icc n m, u j ω ∈ s,
refine ⟨k, ⟨hk₂, le_trans hk₁.le $ hitting_le _⟩, h⟩,
refine not_le.2 hk₁ _,
simp_rw [hitting, if_pos hexists],
exact cInf_le bdd_below_Icc.inter_of_left ⟨⟨hk₂, le_trans hk₁.le $ hitting_le _⟩, h⟩,
end
lemma hitting_eq_end_iff {m : ι} :
hitting u s n m ω = m ↔ (∃ j ∈ set.Icc n m, u j ω ∈ s) →
Inf (set.Icc n m ∩ {i : ι | u i ω ∈ s}) = m :=
by rw [hitting, ite_eq_right_iff]
lemma hitting_of_le {m : ι} (hmn : m ≤ n) :
hitting u s n m ω = m :=
begin
obtain (rfl | h) := le_iff_eq_or_lt.1 hmn,
{ simp only [hitting, set.Icc_self, ite_eq_right_iff, set.mem_Icc, exists_prop,
forall_exists_index, and_imp],
intros i hi₁ hi₂ hi,
rw [set.inter_eq_left_iff_subset.2, cInf_singleton],
exact set.singleton_subset_iff.2 (le_antisymm hi₂ hi₁ ▸ hi) },
{ exact hitting_of_lt h }
end
lemma le_hitting {m : ι} (hnm : n ≤ m) (ω : Ω) : n ≤ hitting u s n m ω :=
begin
simp only [hitting],
split_ifs,
{ refine le_cInf _ (λ b hb, _),
{ obtain ⟨k, hk_Icc, hk_s⟩ := h,
exact ⟨k, hk_Icc, hk_s⟩, },
{ rw set.mem_inter_iff at hb,
exact hb.1.1, }, },
{ exact hnm },
end
lemma le_hitting_of_exists {m : ι} (h_exists : ∃ j ∈ set.Icc n m, u j ω ∈ s) :
n ≤ hitting u s n m ω :=
begin
refine le_hitting _ ω,
by_contra,
rw set.Icc_eq_empty_of_lt (not_le.mp h) at h_exists,
simpa using h_exists,
end
lemma hitting_mem_Icc {m : ι} (hnm : n ≤ m) (ω : Ω) : hitting u s n m ω ∈ set.Icc n m :=
⟨le_hitting hnm ω, hitting_le ω⟩
lemma hitting_mem_set [is_well_order ι (<)] {m : ι} (h_exists : ∃ j ∈ set.Icc n m, u j ω ∈ s) :
u (hitting u s n m ω) ω ∈ s :=
begin
simp_rw [hitting, if_pos h_exists],
have h_nonempty : (set.Icc n m ∩ {i : ι | u i ω ∈ s}).nonempty,
{ obtain ⟨k, hk₁, hk₂⟩ := h_exists,
exact ⟨k, set.mem_inter hk₁ hk₂⟩, },
have h_mem := Inf_mem h_nonempty,
rw [set.mem_inter_iff] at h_mem,
exact h_mem.2,
end
lemma hitting_mem_set_of_hitting_lt [is_well_order ι (<)] {m : ι}
(hl : hitting u s n m ω < m) :
u (hitting u s n m ω) ω ∈ s :=
begin
by_cases h : ∃ j ∈ set.Icc n m, u j ω ∈ s,
{ exact hitting_mem_set h },
{ simp_rw [hitting, if_neg h] at hl,
exact false.elim (hl.ne rfl) }
end
lemma hitting_le_iff_of_exists [is_well_order ι (<)] {m : ι}
(h_exists : ∃ j ∈ set.Icc n m, u j ω ∈ s) :
hitting u s n m ω ≤ i ↔ ∃ j ∈ set.Icc n i, u j ω ∈ s :=
begin
split; intro h',
{ exact ⟨hitting u s n m ω, ⟨le_hitting_of_exists h_exists, h'⟩, hitting_mem_set h_exists⟩, },
{ have h'' : ∃ k ∈ set.Icc n (min m i), u k ω ∈ s,
{ obtain ⟨k₁, hk₁_mem, hk₁_s⟩ := h_exists,
obtain ⟨k₂, hk₂_mem, hk₂_s⟩ := h',
refine ⟨min k₁ k₂, ⟨le_min hk₁_mem.1 hk₂_mem.1, min_le_min hk₁_mem.2 hk₂_mem.2⟩, _⟩,
exact min_rec' (λ j, u j ω ∈ s) hk₁_s hk₂_s, },
obtain ⟨k, hk₁, hk₂⟩ := h'',
refine le_trans _ (hk₁.2.trans (min_le_right _ _)),
exact hitting_le_of_mem hk₁.1 (hk₁.2.trans (min_le_left _ _)) hk₂, },
end
lemma hitting_le_iff_of_lt [is_well_order ι (<)] {m : ι} (i : ι) (hi : i < m) :
hitting u s n m ω ≤ i ↔ ∃ j ∈ set.Icc n i, u j ω ∈ s :=
begin
by_cases h_exists : ∃ j ∈ set.Icc n m, u j ω ∈ s,
{ rw hitting_le_iff_of_exists h_exists, },
{ simp_rw [hitting, if_neg h_exists],
push_neg at h_exists,
simp only [not_le.mpr hi, set.mem_Icc, false_iff, not_exists, and_imp],
exact λ k hkn hki, h_exists k ⟨hkn, hki.trans hi.le⟩, },
end
lemma hitting_lt_iff [is_well_order ι (<)] {m : ι} (i : ι) (hi : i ≤ m) :
hitting u s n m ω < i ↔ ∃ j ∈ set.Ico n i, u j ω ∈ s :=
begin
split; intro h',
{ have h : ∃ j ∈ set.Icc n m, u j ω ∈ s,
{ by_contra,
simp_rw [hitting, if_neg h, ← not_le] at h',
exact h' hi, },
exact ⟨hitting u s n m ω, ⟨le_hitting_of_exists h, h'⟩, hitting_mem_set h⟩, },
{ obtain ⟨k, hk₁, hk₂⟩ := h',
refine lt_of_le_of_lt _ hk₁.2,
exact hitting_le_of_mem hk₁.1 (hk₁.2.le.trans hi) hk₂, },
end
lemma hitting_eq_hitting_of_exists
{m₁ m₂ : ι} (h : m₁ ≤ m₂) (h' : ∃ j ∈ set.Icc n m₁, u j ω ∈ s) :
hitting u s n m₁ ω = hitting u s n m₂ ω :=
begin
simp only [hitting, if_pos h'],
obtain ⟨j, hj₁, hj₂⟩ := h',
rw if_pos,
{ refine le_antisymm _ (cInf_le_cInf bdd_below_Icc.inter_of_left ⟨j, hj₁, hj₂⟩
(set.inter_subset_inter_left _ (set.Icc_subset_Icc_right h))),
refine le_cInf ⟨j, set.Icc_subset_Icc_right h hj₁, hj₂⟩ (λ i hi, _),
by_cases hi' : i ≤ m₁,
{ exact cInf_le bdd_below_Icc.inter_of_left ⟨⟨hi.1.1, hi'⟩, hi.2⟩ },
{ exact ((cInf_le bdd_below_Icc.inter_of_left ⟨hj₁, hj₂⟩).trans (hj₁.2.trans le_rfl)).trans
(le_of_lt (not_le.1 hi')) } },
exact ⟨j, ⟨hj₁.1, hj₁.2.trans h⟩, hj₂⟩,
end
lemma hitting_mono {m₁ m₂ : ι} (hm : m₁ ≤ m₂) :
hitting u s n m₁ ω ≤ hitting u s n m₂ ω :=
begin
by_cases h : ∃ j ∈ set.Icc n m₁, u j ω ∈ s,
{ exact (hitting_eq_hitting_of_exists hm h).le },
{ simp_rw [hitting, if_neg h],
split_ifs with h',
{ obtain ⟨j, hj₁, hj₂⟩ := h',
refine le_cInf ⟨j, hj₁, hj₂⟩ _,
by_contra hneg, push_neg at hneg,
obtain ⟨i, hi₁, hi₂⟩ := hneg,
exact h ⟨i, ⟨hi₁.1.1, hi₂.le⟩, hi₁.2⟩ },
{ exact hm } }
end
end inequalities
/-- A discrete hitting time is a stopping time. -/
lemma hitting_is_stopping_time
[conditionally_complete_linear_order ι] [is_well_order ι (<)] [countable ι]
[topological_space β] [pseudo_metrizable_space β] [measurable_space β] [borel_space β]
{f : filtration ι m} {u : ι → Ω → β} {s : set β} {n n' : ι}
(hu : adapted f u) (hs : measurable_set s) :
is_stopping_time f (hitting u s n n') :=
begin
intro i,
cases le_or_lt n' i with hi hi,
{ have h_le : ∀ ω, hitting u s n n' ω ≤ i := λ x, (hitting_le x).trans hi,
simp [h_le], },
{ have h_set_eq_Union : {ω | hitting u s n n' ω ≤ i} = ⋃ j ∈ set.Icc n i, u j ⁻¹' s,
{ ext x,
rw [set.mem_set_of_eq, hitting_le_iff_of_lt _ hi],
simp only [set.mem_Icc, exists_prop, set.mem_Union, set.mem_preimage], },
rw h_set_eq_Union,
exact measurable_set.Union (λ j, measurable_set.Union $
λ hj, f.mono hj.2 _ ((hu j).measurable hs)) }
end
lemma stopped_value_hitting_mem [conditionally_complete_linear_order ι] [is_well_order ι (<)]
{u : ι → Ω → β} {s : set β} {n m : ι} {ω : Ω} (h : ∃ j ∈ set.Icc n m, u j ω ∈ s) :
stopped_value u (hitting u s n m) ω ∈ s :=
begin
simp only [stopped_value, hitting, if_pos h],
obtain ⟨j, hj₁, hj₂⟩ := h,
have : Inf (set.Icc n m ∩ {i | u i ω ∈ s}) ∈ set.Icc n m ∩ {i | u i ω ∈ s} :=
Inf_mem (set.nonempty_of_mem ⟨hj₁, hj₂⟩),
exact this.2,
end
/-- The hitting time of a discrete process with the starting time indexed by a stopping time
is a stopping time. -/
lemma is_stopping_time_hitting_is_stopping_time
[conditionally_complete_linear_order ι] [is_well_order ι (<)] [countable ι]
[topological_space ι] [order_topology ι] [first_countable_topology ι]
[topological_space β] [pseudo_metrizable_space β] [measurable_space β] [borel_space β]
{f : filtration ι m} {u : ι → Ω → β} {τ : Ω → ι} (hτ : is_stopping_time f τ)
{N : ι} (hτbdd : ∀ x, τ x ≤ N) {s : set β} (hs : measurable_set s) (hf : adapted f u) :
is_stopping_time f (λ x, hitting u s (τ x) N x) :=
begin
intro n,
have h₁ : {x | hitting u s (τ x) N x ≤ n} =
(⋃ i ≤ n, {x | τ x = i} ∩ {x | hitting u s i N x ≤ n}) ∪
(⋃ i > n, {x | τ x = i} ∩ {x | hitting u s i N x ≤ n}),
{ ext x,
simp [← exists_or_distrib, ← or_and_distrib_right, le_or_lt] },
have h₂ : (⋃ i > n, {x | τ x = i} ∩ {x | hitting u s i N x ≤ n}) = ∅,
{ ext x,
simp only [gt_iff_lt, set.mem_Union, set.mem_inter_iff, set.mem_set_of_eq,
exists_prop, set.mem_empty_iff_false, iff_false, not_exists, not_and, not_le],
rintro m hm rfl,
exact lt_of_lt_of_le hm (le_hitting (hτbdd _) _) },
rw [h₁, h₂, set.union_empty],
exact measurable_set.Union (λ i, measurable_set.Union
(λ hi, (f.mono hi _ (hτ.measurable_set_eq i)).inter (hitting_is_stopping_time hf hs n))),
end
section complete_lattice
variables [complete_lattice ι] {u : ι → Ω → β} {s : set β} {f : filtration ι m}
lemma hitting_eq_Inf (ω : Ω) : hitting u s ⊥ ⊤ ω = Inf {i : ι | u i ω ∈ s} :=
begin
simp only [hitting, set.mem_Icc, bot_le, le_top, and_self, exists_true_left, set.Icc_bot,
set.Iic_top, set.univ_inter, ite_eq_left_iff, not_exists],
intro h_nmem_s,
symmetry,
rw Inf_eq_top,
exact λ i hi_mem_s, absurd hi_mem_s (h_nmem_s i),
end
end complete_lattice
section conditionally_complete_linear_order_bot
variables [conditionally_complete_linear_order_bot ι] [is_well_order ι (<)]
variables {u : ι → Ω → β} {s : set β} {f : filtration ℕ m}
lemma hitting_bot_le_iff {i n : ι} {ω : Ω} (hx : ∃ j, j ≤ n ∧ u j ω ∈ s) :
hitting u s ⊥ n ω ≤ i ↔ ∃ j ≤ i, u j ω ∈ s :=
begin
cases lt_or_le i n with hi hi,
{ rw hitting_le_iff_of_lt _ hi,
simp, },
{ simp only [(hitting_le ω).trans hi, true_iff],
obtain ⟨j, hj₁, hj₂⟩ := hx,
exact ⟨j, hj₁.trans hi, hj₂⟩, },
end
end conditionally_complete_linear_order_bot
end measure_theory
|
subroutine ksxlim (x,xlb,xub,ndv,nside)
implicit double precision (a-h,o-z)
dimension x(*),xlb(*),xub(*)
c
c routine to insure x-vector does not violate
c upper or lower bounds
c
c author - Gregory A. Wrenn
c location - Lockheed Engineering and Sciences Co.
c 144 Research Drive
c Hampton, Va. 23666
c
c last modification - 17 July 1996
c
if (nside .eq. 0) go to 20
do 10 i = 1,ndv
xx = x(i)
xl = xlb(i)
xu = xub(i)
if (xx .lt. xl) xx = xl
if (xx .gt. xu) xx = xu
x(i) = xx
10 continue
20 continue
c
return
end
|
#include <cmath>
//#include <tuple>
//#include <armadillo>
#include <gsl/gsl_sf_bessel.h> // Bessel functions
#include <gsl/gsl_sf_lambert.h> // Lambert W functions
#include "nufftw.h"
namespace nufftw
{
typedef arma::Mat<double> Mat;
typedef arma::Mat<complex> CMat;
typedef arma::Col<double> Vec;
typedef arma::Col<complex> CVec;
/** Evaluate Chebyshev polynomials of degree 0,...,n at x. */
Mat chebT(int n, const Vec& x)
{
int N = x.n_rows;
Mat T = arma::zeros(N,n+1);
T.col(0).ones();
if (n>0) {
T.col(1) = x;
for (int k=1; k<n; ++k) {
T.col(k+1) = 2*x%T.col(k)-T.col(k-1);
}
}
return T;
}
/** The bivarate Chebyshev coefficients for the function
* f(x,y) = exp(-i*x.*y) on the domain [-gam, gam]x[0,2*pi], as given by
* Lemma A.2 of Townsend's DPhil thesis. */
void bessel_coeffs(int K, double gamma, complex* cfs)
{
const complex I(0,1);
//CMat cfs(K,K,arma::fill::zeros);
double arg = -0.5*gamma*M_PI;
for (int p=0; p<K; ++p) {
for (int q=p%2; q<K; q+=2) {
cfs(p,q) = 4. * std::pow(I,q) * gsl_sf_bessel_Jn((p+q)*0.5, arg) * gsl_sf_bessel_Jn((q-p)*0.5, arg);
}
}
cfs.row(0)*=0.5;
cfs.col(0)*=0.5;
return cfs;
}
// arg = -gam*pi/2;
// [pp, qq] = meshgrid(0:K-1);
// cfs = 4*(1i).^qq.*besselj((pp+qq)/2,arg).*besselj((qq-pp)/2, arg);
// cfs(2:2:end,1:2:end) = 0;
// cfs(1:2:end,2:2:end) = 0;
// cfs(1,:) = cfs(1,:)/2;
// cfs(:,1) = cfs(:,1)/2;
// std::tuple<Vec, Vec, double, int> parameters(const Vec& x, double tol)
// {
// int N = x.n_rows;
// Vec s = arma::round(N*x);
// Vec t = s - arma::floor(s/N)*N;
// double gamma = arma::norm(N*x - s, "inf");
// double xi = std::log(std::log(10/tol)/(7*gamma));
// double lw = gsl_sf_lambert_W0(xi);
// int K = std::ceil(5*gamma*std::exp(lw));
// // double log_xi = std::log(xi);
// // double inv_xi = 1.0/xi;
// // double lw = xi - log_xi*(1.0 + inv_xi*(1.0 + inv_xi*(0.5*log_xi - 1.0)));
// return {s, t, gamma, K};
// }
Vec closestGridpoint(const Vec& x)
{
int N = x.n_rows;
return arma::round(N*x);
}
Vec closestFFTSample(const Vec& s)
{
int N = s.n_rows;
return s - arma::floor(s/N)*N;
}
double perturbationParameter(const Vec& x, const Vec& s)
{
int N = x.n_rows;
return arma::norm(N*x-s, "inf");
}
int rankParameter(double gamma, double tol)
{
double xi = std::log(std::log(10./tol)/(7.*gamma));
double lw = gsl_sf_lambert_W0(xi);
return std::ceil(5.*gamma*std::exp(lw));
}
/** Construct a low rank approximation to A_{jk} = exp(-2*pi*1i*(x_j-s_j/N)*k),
* 0<=j,k<=N-1, where |x_j-j/N|<=gamma<=1/2. */
// std::tuple<Mat<complex>, Mat<complex>> constructUV(const Vec& x, const Vec& omega, int K, double tol)
// {
// int N = omega.n_rows;
// auto [s, t, gamma, K] = parameters(x, tol);
// Vec er = N*x - s;
// CVec scl = arma::exp(-I*M_PI*er);
// CMat u = (chebT(K-1, er/gamma) * besselCoeffs(K, gamma)).each_col() % scl;
// CMat v = chebT(K-1, 2*omega/N-1);
// }
CMat constructU(const Vec& x, const Vec& s, int gamma, int K)
{
const complex I(0,1);
int N = x.n_rows;
Vec er = N*x - s;
CVec scl = arma::exp(-I*M_PI*er);
CMat u = (chebT(K-1, er/gamma) * besselCoeffs(K, gamma)).each_col() % scl;
return u;
}
CMat constructV(int N, int K)
{
CMat v = chebT(K-1, 2*arma::linspace(0,N-1)/N-1);
return v;
}
CMat constructV(const Vec& omega, int K)
{
int N = omega.n_rows;
CMat v = chebT(K-1, 2*omega/N-1);
return v;
}
plan_nufft2_1d()
nufftw::execute();
nufftw::destroy_plan()
nufftw::plan_nudft2_1d
nufftw_plan nufft2(int N, const complex* c, const double* x, complex* f, double tol)
{
// Compute parameters
Vec s = closestGridpoint(x);
Vec t = closestFFTSample(s);
double gamma = perturbationParameter(x, s);
int K = rankParameter(gamma, tol);
CMat u = constructU(x, s, gamma, K);
CMat v = constructV(N, K);
// nufftw_plan p = plan_nufft2(N, x);
nufftw_plan p = [&]() {
};
p(c,f);
return p;
}
}
|
theory PALandWiseMenPuzzle2021_4Agents imports Main (* Sebastian Reiche and Christoph Benzmüller, 2021 *)
begin
(* Parameter settings for Nitpick *) nitpick_params[user_axioms=true, format=4, show_all]
typedecl i (* Type of possible worlds *)
type_synonym \<sigma> = "i\<Rightarrow>bool" (* \<D> *)
type_synonym \<tau> = "\<sigma>\<Rightarrow>i\<Rightarrow>bool" (* Type of world depended formulas (truth sets) *)
type_synonym \<alpha> = "i\<Rightarrow>i\<Rightarrow>bool" (* Type of accessibility relations between world *)
(* Some useful relations (for constraining accessibility relations) *)
definition reflexive::"\<alpha>\<Rightarrow>bool" where "reflexive R \<equiv> \<forall>x. R x x"
definition symmetric::"\<alpha>\<Rightarrow>bool" where "symmetric R \<equiv> \<forall>x y. R x y \<longrightarrow> R y x"
definition transitive::"\<alpha>\<Rightarrow>bool" where "transitive R \<equiv> \<forall>x y z. R x y \<and> R y z \<longrightarrow> R x z"
definition euclidean::"\<alpha>\<Rightarrow>bool" where "euclidean R \<equiv> \<forall>x y z. R x y \<and> R x z \<longrightarrow> R y z"
definition intersection_rel::"\<alpha>\<Rightarrow>\<alpha>\<Rightarrow>\<alpha>" where "intersection_rel R Q \<equiv> \<lambda>u v. R u v \<and> Q u v"
definition union_rel::"\<alpha>\<Rightarrow>\<alpha>\<Rightarrow>\<alpha>" where "union_rel R Q \<equiv> \<lambda>u v. R u v \<or> Q u v"
definition sub_rel::"\<alpha>\<Rightarrow>\<alpha>\<Rightarrow>bool" where "sub_rel R Q \<equiv> \<forall>u v. R u v \<longrightarrow> Q u v"
definition inverse_rel::"\<alpha>\<Rightarrow>\<alpha>" where "inverse_rel R \<equiv> \<lambda>u v. R v u"
definition bigunion_rel::"(\<alpha>\<Rightarrow>bool)\<Rightarrow>\<alpha>" ("\<^bold>\<Union>_") where "\<^bold>\<Union> X \<equiv> \<lambda>u v. \<exists>R. (X R) \<and> (R u v)"
definition bigintersection_rel::"(\<alpha>\<Rightarrow>bool)\<Rightarrow>\<alpha>" ("\<^bold>\<Inter>_") where "\<^bold>\<Inter> X \<equiv> \<lambda>u v. \<forall>R. (X R) \<longrightarrow> (R u v)"
(*In HOL the transitive closure of a relation can be defined in a single line.*)
definition tc::"\<alpha>\<Rightarrow>\<alpha>" where "tc R \<equiv> \<lambda>x y.\<forall>Q. transitive Q \<longrightarrow> (sub_rel R Q \<longrightarrow> Q x y)"
(* Lifted HOMML connectives for PAL *)
abbreviation patom::"\<sigma>\<Rightarrow>\<tau>" ("\<^sup>A_"[79]80) where "\<^sup>Ap \<equiv> \<lambda>W w. W w \<and> p w"
abbreviation ptop::"\<tau>" ("\<^bold>\<top>") where "\<^bold>\<top> \<equiv> \<lambda>W w. True"
abbreviation pneg::"\<tau>\<Rightarrow>\<tau>" ("\<^bold>\<not>_"[52]53) where "\<^bold>\<not>\<phi> \<equiv> \<lambda>W w. \<not>(\<phi> W w)"
abbreviation pand::"\<tau>\<Rightarrow>\<tau>\<Rightarrow>\<tau>" (infixr"\<^bold>\<and>"51) where "\<phi>\<^bold>\<and>\<psi> \<equiv> \<lambda>W w. (\<phi> W w) \<and> (\<psi> W w)"
abbreviation por::"\<tau>\<Rightarrow>\<tau>\<Rightarrow>\<tau>" (infixr"\<^bold>\<or>"50) where "\<phi>\<^bold>\<or>\<psi> \<equiv> \<lambda>W w. (\<phi> W w) \<or> (\<psi> W w)"
abbreviation pimp::"\<tau>\<Rightarrow>\<tau>\<Rightarrow>\<tau>" (infixr"\<^bold>\<rightarrow>"49) where "\<phi>\<^bold>\<rightarrow>\<psi> \<equiv> \<lambda>W w. (\<phi> W w) \<longrightarrow> (\<psi> W w)"
abbreviation pequ::"\<tau>\<Rightarrow>\<tau>\<Rightarrow>\<tau>" (infixr"\<^bold>\<leftrightarrow>"48) where "\<phi>\<^bold>\<leftrightarrow>\<psi> \<equiv> \<lambda>W w. (\<phi> W w) \<longleftrightarrow> (\<psi> W w)"
abbreviation pknow::"\<alpha>\<Rightarrow>\<tau>\<Rightarrow>\<tau>" ("\<^bold>K_ _") where "\<^bold>K r \<phi> \<equiv> \<lambda>W w.\<forall>v. (W v \<and> r w v) \<longrightarrow> (\<phi> W v)"
abbreviation ppal::"\<tau>\<Rightarrow>\<tau>\<Rightarrow>\<tau>" ("\<^bold>[\<^bold>!_\<^bold>]_") where "\<^bold>[\<^bold>!\<phi>\<^bold>]\<psi> \<equiv> \<lambda>W w. (\<phi> W w) \<longrightarrow> (\<psi> (\<lambda>z. W z \<and> \<phi> W z) w)"
(* Validity of \<tau>-type lifted PAL formulas *)
abbreviation pvalid::"\<tau> \<Rightarrow> bool" ("\<^bold>\<lfloor>_\<^bold>\<rfloor>"[7]8) where "\<^bold>\<lfloor>\<phi>\<^bold>\<rfloor> \<equiv> \<forall>W.\<forall>w. W w \<longrightarrow> \<phi> W w"
(* Agent Knowledge, Mutual Knowledge, Common Knowledge *)
abbreviation "EVR A \<equiv> \<^bold>\<Union> A"
abbreviation "DIS A \<equiv> \<^bold>\<Inter> A"
abbreviation agttknows::"\<alpha>\<Rightarrow>\<tau>\<Rightarrow>\<tau>" ("\<^bold>K\<^sub>_ _") where "\<^bold>K\<^sub>r \<phi> \<equiv> \<^bold>K r \<phi>"
abbreviation evrknows::"(\<alpha>\<Rightarrow>bool)\<Rightarrow>\<tau>\<Rightarrow>\<tau>" ("\<^bold>E\<^sub>_ _") where "\<^bold>E\<^sub>A \<phi> \<equiv> \<^bold>K (EVR A) \<phi>"
abbreviation prck::"(\<alpha>\<Rightarrow>bool)\<Rightarrow>\<tau>\<Rightarrow>\<tau>\<Rightarrow>\<tau>" ("\<^bold>C\<^sub>_\<^bold>\<lparr>_\<^bold>|_\<^bold>\<rparr>")
where "\<^bold>C\<^sub>A\<^bold>\<lparr>\<phi>\<^bold>|\<psi>\<^bold>\<rparr> \<equiv> \<lambda>W w. \<forall>v. \<not>(tc (intersection_rel (EVR A) (\<lambda>u v. W v \<and> \<phi> W v)) w v) \<or> (\<psi> W v)"
abbreviation pcmn::"(\<alpha>\<Rightarrow>bool)\<Rightarrow>\<tau>\<Rightarrow>\<tau>" ("\<^bold>C\<^sub>_ _") where "\<^bold>C\<^sub>A \<phi> \<equiv> \<^bold>C\<^sub>A\<^bold>\<lparr>\<^bold>\<top>\<^bold>|\<phi>\<^bold>\<rparr>"
abbreviation disknows :: "(\<alpha>\<Rightarrow>bool)\<Rightarrow>\<tau>\<Rightarrow>\<tau>" ("\<^bold>D\<^sub>_ _") where "\<^bold>D\<^sub>A \<phi> \<equiv> \<^bold>K (DIS A) \<phi>"
(* Introducing "Defs" as the set of the above definitions; useful for convenient unfolding *)
named_theorems Defs
declare reflexive_def[Defs] symmetric_def[Defs] transitive_def[Defs] euclidean_def[Defs]
intersection_rel_def[Defs] union_rel_def[Defs] sub_rel_def[Defs] inverse_rel_def[Defs]
bigunion_rel_def[Defs] tc_def[Defs]
abbreviation "S5Agent i \<equiv> reflexive i \<and> transitive i \<and> euclidean i"
abbreviation "S5Agents A \<equiv> \<forall>i. (A i \<longrightarrow> S5Agent i)"
(***********************************************************************************************)
(***** Wise Men Puzzle *****)
(***********************************************************************************************)
(*** Encoding of the wise men puzzle in PAL ***)
(* Agents *)
consts a::"\<alpha>" b::"\<alpha>" c::"\<alpha>" d::"\<alpha>" (* Agents modeled as accessibility relations *)
abbreviation Agent::"\<alpha>\<Rightarrow>bool" ("\<A>") where "\<A> x \<equiv> x = a \<or> x = b \<or> x = c \<or> x = d"
axiomatization where group_S5: "S5Agents \<A>"
(*** Encoding of the wise men puzzle in PAL ***)
consts ws::"\<alpha>\<Rightarrow>\<sigma>"
axiomatization where WM1: "\<^bold>\<lfloor>\<^bold>C\<^sub>\<A> (\<^sup>Aws a \<^bold>\<or> \<^sup>Aws b \<^bold>\<or> \<^sup>Aws c \<^bold>\<or> \<^sup>Aws d)\<^bold>\<rfloor>"
axiomatization where
(* Common knowledge: If x not has a white spot then y know this *)
WM2ab: "\<^bold>\<lfloor>\<^bold>C\<^sub>\<A> (\<^bold>\<not>(\<^sup>Aws a) \<^bold>\<rightarrow> (\<^bold>K\<^sub>b (\<^bold>\<not>(\<^sup>Aws a))))\<^bold>\<rfloor>" and
WM2ac: "\<^bold>\<lfloor>\<^bold>C\<^sub>\<A> (\<^bold>\<not>(\<^sup>Aws a) \<^bold>\<rightarrow> (\<^bold>K\<^sub>c (\<^bold>\<not>(\<^sup>Aws a))))\<^bold>\<rfloor>" and
WM2ad: "\<^bold>\<lfloor>\<^bold>C\<^sub>\<A> (\<^bold>\<not>(\<^sup>Aws a) \<^bold>\<rightarrow> (\<^bold>K\<^sub>d (\<^bold>\<not>(\<^sup>Aws a))))\<^bold>\<rfloor>" and
WM2ba: "\<^bold>\<lfloor>\<^bold>C\<^sub>\<A> (\<^bold>\<not>(\<^sup>Aws b) \<^bold>\<rightarrow> (\<^bold>K\<^sub>a (\<^bold>\<not>(\<^sup>Aws b))))\<^bold>\<rfloor>" and
WM2bc: "\<^bold>\<lfloor>\<^bold>C\<^sub>\<A> (\<^bold>\<not>(\<^sup>Aws b) \<^bold>\<rightarrow> (\<^bold>K\<^sub>c (\<^bold>\<not>(\<^sup>Aws b))))\<^bold>\<rfloor>" and
WM2bd: "\<^bold>\<lfloor>\<^bold>C\<^sub>\<A> (\<^bold>\<not>(\<^sup>Aws b) \<^bold>\<rightarrow> (\<^bold>K\<^sub>d (\<^bold>\<not>(\<^sup>Aws b))))\<^bold>\<rfloor>" and
WM2ca: "\<^bold>\<lfloor>\<^bold>C\<^sub>\<A> (\<^bold>\<not>(\<^sup>Aws c) \<^bold>\<rightarrow> (\<^bold>K\<^sub>a (\<^bold>\<not>(\<^sup>Aws c))))\<^bold>\<rfloor>" and
WM2cb: "\<^bold>\<lfloor>\<^bold>C\<^sub>\<A> (\<^bold>\<not>(\<^sup>Aws c) \<^bold>\<rightarrow> (\<^bold>K\<^sub>b (\<^bold>\<not>(\<^sup>Aws c))))\<^bold>\<rfloor>" and
WM2cd: "\<^bold>\<lfloor>\<^bold>C\<^sub>\<A> (\<^bold>\<not>(\<^sup>Aws c) \<^bold>\<rightarrow> (\<^bold>K\<^sub>d (\<^bold>\<not>(\<^sup>Aws c))))\<^bold>\<rfloor>" and
WM2da: "\<^bold>\<lfloor>\<^bold>C\<^sub>\<A> (\<^bold>\<not>(\<^sup>Aws d) \<^bold>\<rightarrow> (\<^bold>K\<^sub>a (\<^bold>\<not>(\<^sup>Aws d))))\<^bold>\<rfloor>" and
WM2db: "\<^bold>\<lfloor>\<^bold>C\<^sub>\<A> (\<^bold>\<not>(\<^sup>Aws d) \<^bold>\<rightarrow> (\<^bold>K\<^sub>b (\<^bold>\<not>(\<^sup>Aws d))))\<^bold>\<rfloor>" and
WM2dc: "\<^bold>\<lfloor>\<^bold>C\<^sub>\<A> (\<^bold>\<not>(\<^sup>Aws d) \<^bold>\<rightarrow> (\<^bold>K\<^sub>c (\<^bold>\<not>(\<^sup>Aws d))))\<^bold>\<rfloor>"
(* Automated solutions of the Wise Men Puzzle with 4 Agents*)
theorem whitespot_c_1: "\<^bold>\<lfloor>\<^bold>[\<^bold>!\<^bold>\<not>\<^bold>K\<^sub>a(\<^sup>Aws a)\<^bold>](\<^bold>[\<^bold>!\<^bold>\<not>\<^bold>K\<^sub>b(\<^sup>Aws b)\<^bold>](\<^bold>[\<^bold>!\<^bold>\<not>\<^bold>K\<^sub>c(\<^sup>Aws c)\<^bold>](\<^bold>K\<^sub>d (\<^sup>Aws d))))\<^bold>\<rfloor>"
using WM1 WM2ba WM2ca WM2cb WM2da WM2db WM2dc
unfolding Defs
by (smt (verit))
theorem whitespot_c_2:
"\<^bold>\<lfloor>\<^bold>[\<^bold>!\<^bold>\<not>((\<^bold>K\<^sub>a (\<^sup>Aws a)) \<^bold>\<or> (\<^bold>K\<^sub>a (\<^bold>\<not>\<^sup>Aws a)))\<^bold>](\<^bold>[\<^bold>!\<^bold>\<not>((\<^bold>K\<^sub>b (\<^sup>Aws b)) \<^bold>\<or> (\<^bold>K\<^sub>b (\<^bold>\<not>\<^sup>Aws b)))\<^bold>](\<^bold>[\<^bold>!\<^bold>\<not>((\<^bold>K\<^sub>c (\<^sup>Aws c)) \<^bold>\<or> (\<^bold>K\<^sub>c (\<^bold>\<not>\<^sup>Aws c)))\<^bold>](\<^bold>K\<^sub>d (\<^sup>Aws d))))\<^bold>\<rfloor>"
using whitespot_c_1
unfolding Defs sledgehammer[verbose]()
oops
(* Consistency confirmed by nitpick *)
lemma True nitpick [satisfy] oops (* model found *)
end |
Require Import Coq.Vectors.Vector.
Require Import Coq.PArith.PArith.
Require Import Lia.
Require Import Equations.Prop.Equations.
Set Equations With UIP.
Generalizable All Variables.
Definition obj_idx : Type := positive.
Definition arr_idx (n : nat) : Type := Fin.t n.
Import VectorNotations.
Definition obj_pair := (obj_idx * obj_idx)%type.
Inductive Term {a} (tys : Vector.t (obj_idx * obj_idx) a)
: obj_idx -> obj_idx -> Type :=
| Ident : forall dom, Term tys dom dom
| Morph (f : arr_idx a) : Term tys (fst (tys[@f])) (snd (tys[@f]))
| Comp (dom mid cod : obj_idx)
(f : Term tys mid cod) (g : Term tys dom mid) :
Term tys dom cod.
Arguments Ident {a tys dom}.
Arguments Morph {a tys} f.
Arguments Comp {a tys dom mid cod} f g.
Import Sigma_Notations.
Require Import Wellfounded Relations.
Derive NoConfusion for positive.
Derive EqDec for positive.
Derive Signature NoConfusion Subterm for Term.
Fixpoint term_size
{a : nat} {tys : Vector.t obj_pair a}
{dom cod} (t : @Term a tys dom cod) : nat :=
match t with
| Ident => 1%nat
| Morph _ => 1%nat
| Comp f g => 1%nat + term_size f + term_size g
end.
Set Program Mode.
Show Obligation Tactic.
Equations? comp_assoc_simpl_rec {a : nat} {tys dom cod}
(t : @Term a tys dom cod) : {t' : @Term a tys dom cod | term_size t' <= term_size t}
by wf (term_size t) lt :=
comp_assoc_simpl_rec (Comp f g) with comp_assoc_simpl_rec f => {
| exist _ (Comp i j) Hle => Comp i (comp_assoc_simpl_rec (Comp j g));
| x => Comp x (comp_assoc_simpl_rec g)
};
comp_assoc_simpl_rec x := x.
Proof.
1-2,4:lia.
all:(simpl; try Program.Tactics.destruct_call comp_assoc_simpl_rec; simpl in *; try lia).
Time Defined.
Definition comp_assoc_simpl {a}
{tys : Vector.t obj_pair a} {dom cod} (t : Term tys dom cod) : Term tys dom cod :=
comp_assoc_simpl_rec t.
Lemma comp_assoc_simpl_ident {a} {tys : Vector.t obj_pair a} {dom cod} (g : Term tys dom cod) :
comp_assoc_simpl (Comp Ident g) = Comp Ident (comp_assoc_simpl g).
Proof.
unfold comp_assoc_simpl.
Opaque comp_assoc_simpl_rec.
autorewrite with comp_assoc_simpl_rec. simpl. reflexivity.
Qed.
Unset Program Mode.
Open Scope program_scope.
Lemma comp_assoc_simpl_comp {a} {tys : Vector.t obj_pair a} {dom mid cod}
(f : Term tys mid cod) (g : Term tys dom mid) :
comp_assoc_simpl (Comp f g) =
match comp_assoc_simpl f in Term _ mid cod return Term tys dom mid -> Term tys dom cod with
| Comp f f' => fun g => Comp f (comp_assoc_simpl (Comp f' g))
| x => fun g => Comp x (comp_assoc_simpl g) end g.
Proof.
unfold comp_assoc_simpl.
simp comp_assoc_simpl_rec.
revert dom g. Tactics.reverse.
let felim := constr:(fun_elim (f := @comp_assoc_simpl_rec)) in
unshelve refine_ho (felim _ _ _ _ _ _); simpl; intros; try reflexivity.
Qed.
|
If $0 \leq x \leq 1$, then the derivative of the function $f(x) = \exp(\iota \cdot \text{linepath}(s, t, x))$ at $x$ is $\iota \cdot r \cdot (t - s) \cdot \exp(\iota \cdot \text{linepath}(s, t, x))$. |
### ABM figure for showing proportion by gear sectors for Alternatives 4 and 5, and maybe 6
## dh 3/1/2018
app_plot<-function(mratio,mrpn) {
library(ggplot2)
library(ggthemes)
library(extrafont)
library(plyr)
library(scales)
library(reshape2)
mratio_l<-melt(mratio)
mratio_l$Year<-rep(as.numeric(row.names(mratio)),6)
names(mratio_l)<-c("Area","Proportion","Year")
mratio_l$Proportion<-round(mratio_l$Proportion*100,3)
fill <- topo.colors(6)
p4 <- ggplot() +
geom_bar(aes(y = Proportion, x = Year, fill = Area), data = mratio_l, stat="identity") +
# geom_text(data=mratio_l, aes(x = Year, y = Proportion, label = paste0(Proportion,"%")),
# colour="black", family="Tahoma", size=3.5) +
# geom_hline(yintercept = trawl_ratio, colour="dark gray",size=2)+
theme(legend.position="bottom", legend.direction="horizontal",legend.title = element_blank()) +
theme(legend.text=element_text(size=12))+
scale_x_continuous(breaks=seq(2000,as.numeric(row.names(mratio)[nrow(mratio)]),3)) +
scale_y_continuous(labels = dollar_format(suffix = "%", prefix = "")) +
labs(x="Year", y="Percentage",size=14) +
ggtitle("Allocation by ratio of mature females")+
scale_fill_manual(values=fill) +
theme(axis.line = element_line(size=1, colour = "black"),
panel.grid.major = element_line(colour = "#d3d3d3"), panel.grid.minor = element_blank(),
panel.border = element_blank(), panel.background = element_blank()) +
theme(plot.title = element_text(size = 18, family = "Tahoma", face = "bold"),
text=element_text(family="Tahoma"),
axis.title=element_text(size=16),
axis.text.x=element_text(colour="black", size = 12),
axis.text.y=element_text(colour="black", size = 12))
p4
ggsave("ratio_allocation.png",width=7,height=5,dpi=300)
mratio_l<-melt(mrpn)
mratio_l$Year<-rep(as.numeric(row.names(mratio)),6)
names(mratio_l)<-c("Area","Proportion","Year")
mratio_l$Proportion<-round(mratio_l$Proportion*100,3)
fill <- topo.colors(6)
p4 <- ggplot() +
geom_bar(aes(y = Proportion, x = Year, fill = Area), data = mratio_l, stat="identity") +
# geom_text(data=mratio_l, aes(x = Year, y = Proportion, label = paste0(Proportion,"%")),
# colour="black", family="Tahoma", size=3.5) +
# geom_hline(yintercept = trawl_ratio, colour="dark gray",size=2)+
theme(legend.position="bottom", legend.direction="horizontal",legend.title = element_blank()) +
theme(legend.text=element_text(size=12))+
scale_x_continuous(breaks=seq(2000,2017,3)) +
scale_y_continuous(labels = dollar_format(suffix = "%", prefix = "")) +
labs(x="Year", y="Percentage",size=14) +
ggtitle("Allocation by RPN of mature females")+
scale_fill_manual(values=fill) +
theme(axis.line = element_line(size=1, colour = "black"),
panel.grid.major = element_line(colour = "#d3d3d3"), panel.grid.minor = element_blank(),
panel.border = element_blank(), panel.background = element_blank()) +
theme(plot.title = element_text(size = 18, family = "Tahoma", face = "bold"),
text=element_text(family="Tahoma"),
axis.title=element_text(size=16),
axis.text.x=element_text(colour="black", size = 12),
axis.text.y=element_text(colour="black", size = 12))
p4
ggsave("mature_rpn_allocation.png",width=7,height=5,dpi=300)
} |
A set $S$ is Lebesgue measurable if and only if the indicator function of $S$ is integrable. |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.