text
stringlengths 0
3.34M
|
---|
State Before: q : ℚ
m : ℤ
n : ℕ
x : ℝ
⊢ Irrational (↑m * x) ↔ m ≠ 0 ∧ Irrational x State After: no goals Tactic: rw [← cast_coe_int, irrational_rat_mul_iff, Int.cast_ne_zero] |
Eagle Claw Grandmasters Leung Shum and Lily Lau believe " <unk> Tong " ( the Cantonese rendering of his name ) was a monk who brought young Yue to the Shaolin Monastery and taught him a set of hand techniques , which Yue later adapted to create his Ying Kuen ( Eagle fist ) . Liang <unk> states practitioners of Emei <unk> Qigong believe Yue trained under Zhou as a child and competed to become China 's top fighter at an early age . Their lineage story dictates Zhou also took Yue to a " Buddhist hermit " who taught him said qigong style . Northern Praying Mantis Master Yuen <unk> says Zhou taught Yue the " same school " of martial arts as he did his Water Margin students and that the General was the originator of the praying mantis technique " Black Tiger <unk> [ sic ] Heart " . Although Martial arts historian Stanley Henning admits that Yue 's biographies do not mention boxing , he says " he [ Yue ] almost certainly did practice some form of bare handed fighting " to prepare for his weapons training . But he does not suggest who Yue might have learned it from .
|
import topology.basic
import topology.compact_open
import data.nat.prime
import data.real.basic
import data.real.irrational
import data.complex.basic
import data.fin.basic
import geometry.euclidean.basic
import analysis.inner_product_space.pi_L2
import algebra.group.defs
import algebra.field.basic
import combinatorics.configuration
import ring_theory.polynomial.basic
import group_theory.free_group
import combinatorics.simple_graph.basic
import ring_theory.integral_closure
import data.fintype.card
import category_theory.category.basic
import ring_theory.discrete_valuation_ring
import group_theory.torsion
import linear_algebra.matrix.charpoly.basic
import algebra.order.absolute_value
import analysis.convex.basic
import topology.uniform_space.uniform_convergence_topology
import topology.sequences
import analysis.normed.group.infinite_sum
import data.nat.choose.sum
import group_theory.specific_groups.cyclic
import group_theory.order_of_element
import analysis.mean_inequalities
import analysis.normed_space.banach
import topology.algebra.continuous_monoid_hom
import linear_algebra.matrix.symmetric
import analysis.inner_product_space.spectrum
import ring_theory.class_group
import ring_theory.dedekind_domain.basic
import ring_theory.principal_ideal_domain
import model_theory.satisfiability
import probability.integration
import ring_theory.simple_module
import category_theory.preadditive.schur
import representation_theory.maschke
import topology.paracompact
import combinatorics.simple_graph.coloring
--Prove that Homomorphisms between two finite abelian groups are isomorphic to homomorphisms between their duals in Lean mathlib
--OUTPUT 1
theorem finite.abelian.hom_iso (G H : Type*) [group G] [group H] [fintype G] [fintype H] [fintype_mul_class G] [fintype_mul_class H]
[fintype_add_class G] [fintype_add_class H] [add_comm_group G] [add_comm_group H] [mul_comm_group G] [mul_comm_group H] :
is_iso (λ g : G ≃ₗ[*] H, G ≃ₗ[*] H) := sorry
/-! #brief Prove that every finite abelian group is isomorphic to a finite direct sum of cyclic groups
-/
--Prove that every finite abelian group is isomorphic to a finite direct sum of cyclic groups
theorem finite.abelian.direct_sum_cyclic (G : Type*) [group G] [fintype G] [fintype_mul_class G] [fintype_add_class G]
[add_comm_group G] [mul_comm_group G] : ∃ n : ℕ, Π (A : G ^ n), ∃ (g : G), ∃ (f : G ^ n), A.prod g ∧ (is_unit f) ∧ is_iso f := sorry
/-! #brief Prove that any homomorphism between two finite abelian groups can be extended in a univorm way to a homomorphism between their direct sum
-/
--Prove that any homomorphism between two finite abelian groups can be extended in a univorm way to a homomorphism between their direct sum
theorem finite.abelian.ext (G H : Type*) [group G] [group H] [fintype G] [fintype H] [fintype_mul_class G] [fintype_mul_class H]
[fintype_add_class G] [fintype_add_class H] [add_comm_group G] [add_comm_group H] [mul_comm_group G] [mul_comm_group H] :
∀ f : G ≃ₗ[*] H, ∃ g : G ^ ∞ ≃ₗ[*] H ^ ∞, ∀ n: ℕ, ∃ k: ℕ, ∃ h : G ^ n ≃ₗ[*] H ^ k, ∀ (A : G ^ n), f A = f.to_fun A := sorry
/-! #brief Prove that any finite abelian group is cyclically equivalent to an unique group
-/
--Prove that any finite abelian group is cyclically equivalent to an unique group
theorem finite.abelian.cyclically_iso (G : Type*) [group G] [fintype G] [fintype_mul_class G] [fintype_add_class G]
[add_comm_group G] [mul_comm_group G] : ∃ (n : ℕ) (f : G ≃ₗ[*] G ^ n), ∀ (g: G ^ n), ∃ (k : G ^ n) (h : G ^ n), f g = f.to_fun g := sorry
/-! #brief Prove that any finite abelian group is cyclically iso to a group whose order is a power of a prime
-/
--Prove that any finite abelian group is cyclically iso to a group whose order is a power of a prime
theorem finite.abelian.cyclically_iso_prime_power (G : Type*) [group G] [fintype G] [fintype_mul_class G] [fintype_add_class G]
[add_comm_group G] [mul_comm_group G] : ∃ (p : ℕ) (n : ℕ) (f : G ≃ₗ[*] G ^ p ^ n), ∀ (g : G ^ p ^ n), ∃ (k : G ^ p ^ n) (h : G ^ p ^ n), f g = f.to_fun g := sorry
/-! #brief Prove that Cyclic Groups are Abelian
-/
--Prove that Cyclic Groups are Abelian
theorem cyclic.abelian (G : Type*) [group G] [fintype G] [fintype_mul_class G] [fintype_add_class G] [mul_comm_group G] :
∀ (g : G), ∃ (n : ℕ), ∃ (f : G), is_unit f ∧ (group_hom.ker f = ⊥) → is_subgroup (subtype.val '' {g | ↑g = 1}) 1 := sorry
/-! #brief Prove that cyclic groups are isomorphic to their duals
-/
--Prove that cyclic groups are isomorphic to their duals
theorem cyclic.dual (G : Type*) [group G] [fintype G] [fintype_mul_class G] [fintype_add_class G] [add_comm_group G]
[mul_comm_group G] [fintype_mul_class G] [fintype_add_class G] : ∃ (g : G), ∃ (f : G ^ G), f.to_fun g = 1 := sorry
/-! #brief Prove that the finite group G is cyclically isomorphic to a finite group whose elements are powers of a generator
-/
--Prove that the finite group G is cyclically isomorphic to a finite group whose elements are powers of a generator
theorem cyclic.generator (G : Type*) [group G] [fintype G] [fintype_mul_class G] [fintype_add_class G] [mul_comm_group G] :
∃ (g : G), ∃ (n : ℕ), ∃ (f : G ^ n), ∃ (h : G ^ n), is_unit h ∧ f.to_fun g = 1 ∧ f = h.to_fun g := sorry
/-! #brief Prove that a finite abelian group whose order is a power of a prime is cyclically isomorphic to a finite cyclic group
-/
--Prove that a finite abelian group whose order is a power of a prime is cyclically isomorphic to a finite cyclic group
theorem cyclic.finite.power_prime (G : Type*) [group G] [fintype G] [fintype_mul_class G] [fintype_add_class G] [fintype_add_class G]
[add_comm_group G] [mul_comm_group G] : ∃ (p : ℕ) (n : ℕ), ∃ (g : G), ∃ (f : G ^ p ^ n), ∃ (h : G ^ p ^ n), is_unit h ∧ f.to_fun g = 1 ∧ f = h.to_fun g := sorry
/-! #brief Prove that the order of an element in a finite abelian group is a power of a prime
-/
--Prove that the order of an element in a finite abelian group is a power of a prime
theorem cyclic.finite.el_order_prime_power (G : Type*) [group G] [fintype G] [fintype_mul_class G] [fintype_add_class G] [add_comm_group G]
[mul_comm_group G] : ∀ (g : G), ∃ (p : ℕ) (n : ℕ) (h : G ^ p ^ n), is_unit h ∧ h.to_fun g = 1 := sorry
/-! #brief Prove that a finite group G whose elements are the powers of a generator is cyclically isomorphic to a group of the form G ^ p ^ n
-/
--Prove that a finite group G whose elements are the powers of a generator is cyclically isomorphic to a group of the form G ^ p ^ n
theorem cyclic.finite.gen_iso (G : Type*) [group G] [fintype G] [fintype_mul_class G] [fintype_add_class G] [add_comm_group G]
[mul_comm_group G] : ∀ (g : G), ∃ (n :
--OUTPUT 2
theorem my_theorem(g h : fin_group (α)) (hg : g ≃* h): homomorphism g h = dual_homomorphism (dual_fin_group g) (dual_fin_group h) := sorry
--This is definitely not the way to go about proving this, but I am having a lot of trouble figuring out even what the correct statment is, since I am new (in fact this is the first thing I have tried to prove in lean).
--The problem is that I cannot use the fact that Homomorphisms between finite abelian groups are isomorphic to dual homomorphisms of their duals, so I was wondering if there is a way to show this in mathlib. Thanks!
import algebra.group.hom order.lattice
universes u v w
variables {α : Type u} {β : Type v} {γ : Type w}
def fin_group_hom_equiv : equiv (fin_group α) (fin_group β) (group_hom α β) (group_hom β α) :=
{ to_fun := λ f, f.to_hom.symm,
inv_fun := λ g, g.to_hom,
left_inv := λ f, by ext; simp [f.to_hom, (f.to_hom).symm],
right_inv := λ g, by ext; simp [g.to_hom, (g.to_hom).symm] }
def group_hom_fin_group_equiv : equiv (group_hom α β) (group_hom β α) (fin_group α) (fin_group β) :=
{ to_fun := λ g, ⟨ker g.to_hom, _⟩,
inv_fun := λ g, ⟨ker g.to_hom, _⟩,
left_inv := λ g, by ext; simp [ker, g.to_hom, smul_eq_mul, mul_comm],
right_inv := λ g, by ext; simp [ker, g.to_hom, smul_eq_mul, mul_comm] }
def dual_homomorphism' : fin_group α → fin_group β → set (α → β) :=
λ g h, (g.to_hom.symm.ker.to_hom.symm).image
def homomorphism' : fin_group α → fin_group β → set (β → α) :=
λ g h, (g.to_hom.symm.ker.to_hom.symm).image
variables (g h : fin_group α)
lemma homomorphism_eq_dual_homomorphism : (dual_homomorphism g h : set (α → β)) = homomorphism h g :=
begin
ext f,
split,
{ intro h,
let h' := @h _ g.is_group_hom,
simp [h'] at h' ⊢,
exact h' },
{ intro h,
let h' := @h _ (g.is_group_hom.symm),
simp [h'] at h' ⊢,
exact h' }
--OUTPUT 3
theorem duals.hom_gp_bij : ∀{α}[fintype α] {G H}[decidable_eq α] (f : G → H) (g : G ≃* H),
g.ker = fintype.univ → hom_gp g.symm_gp → (f = g.to_fun) :=
begin
intros α h G H h2 f,
induction h,
intros g h1 h2,
cases h2,
congr,
ext,
simp only [g.eq_ker],
split,
simp only[g.eq_ker],
intros h2 h3,
have h4 := h2.2 h3,
rw finset.mem_univ at h4,
exact h4,
simp only[g.eq_ker],
intros h2 h3,
have h4 := h2.2 h3,
rw finset.mem_univ at h4,
exact h4,
|
\chapter[Future work and Proposed simulations]{Future work and Proposed
simulations}
\section{Summary}
The need for this work has been shown by a summary of the current state of the
art of \gls{MSR} depletion simulator capabilities. The literature review in
Chapter 1 concluded that most \gls{MSR} depletion simulators typically assume
ideal (rather than realistically constrained) poison removal rates for the
nuclear system performance modeling. Moreover, most of the simulators assumed
constant extraction efficiency vectors, which must be determined by the user
in the input file and cannot be a function of other parameters. The Python
toolkit, SaltProc v1.0, will directly couple with the Serpent 2 Monte Carlo
depletion code for liquid-fueled \gls{MSR} depletion simulation to enable
realistic online reprocessing system modeling. The SaltProc v1.0 seeks to be a
universal tool for fuel composition evolution analysis in \glspl{MSR} with
taking into account the complex fuel salt reprocessing system. Such
reprocessing systems may consist of multiple components with variable removal
efficiencies and rates. Moreover, these components can be connected in series,
parallel, or a combination, which will be accurately treated in the SaltProc
v1.0. Section~\ref{sec:reproc-plant} details the generic design of \gls{MSR}
fuel salt reprocessing systems. Section~\ref{sec:tool_design} describes the
SaltProc v1.0 architecture and design that is required to successfully model
comprehensive liquid-fueled \glspl{MSR} with online fuel reprocessing systems.
Figure~\ref{fig:workflow} shows an outline of this work. The current chapter
details each Stage of the proposed work.
\begin{sidewaysfigure}[ht!] % replace 't' with 'b' to force it to
\centering
\includegraphics[width=1.06\textwidth]{progress_chart.pdf}
\caption{Workflow for the simulations proposed in this work.}
\label{fig:workflow}
\end{sidewaysfigure}
\FloatBarrier
\section{Stage 1: Basic online reprocessing demonstration}
In Stage 1, \gls{MSR} online reprocessing simulation capabilities have been
reviewed and summarized (Chapter 1). SaltProc v0.1 was demonstrated for
simplified burnup calculation for the \gls{MSBR} as a part of my M.Sc. thesis
\cite{rykhlevskii_advanced_2018} and published paper
\cite{rykhlevskii_modeling_2019}. These efforts illuminated depletion of the
fuel salt in the \gls{MSBR} for 60 years of operation and took into account
the following processes:
\begin{enumerate}
\item \gls{FP} removal from the salt with fixed, ideal extraction
efficiencies (the fuel reprocessing system removed 100\% of target
poisons).
\item $^{233}$Pa removal (100\%) and feed of an equal mass of $^{233}$U
into the core (instantaneous $^{233}$Pa decay to $^{233}$U was assumed).
\item Fresh fertile material ($^{232}$Th) feed to maintain the constant
fuel salt inventory.
\end{enumerate}
Additionally, the effect of removing fission products from the fuel salt was
investigated separately for a different group of \glspl{FP} (noble gases,
noble and seminoble metals, rare earth elements). As expected, removing
fission products provides significant neutronic benefit and enables a longer
core lifetime. Section~\ref{sec:pre-results-msbr} described key findings after
completing Stage 1.
\section{Stage 2: SaltProc v1.0 demonstration and validation for the TAP}
Simulating a realistic multi-component fuel reprocessing system is important
for calculating an accurate fuel salt composition. SaltProc v0.1 was
completely refactored for modeling a complicated salt reprocessing system. To
demonstrate SaltProc v1.0 capabilities, we have created a full-core \gls{TAP}
\gls{MSR} model in Serpent 2 \cite{chaube_tap_2019} which was described in
detail in Section~\ref{sec:tap_model}. Moreover, the multi-component fuel
reprocessing system of the \gls{TAP} was developed on this stage
(Section~\ref{sec:stage2-demo}). Section~\ref{sec:stage2-demo} also presented
preliminary results of Stage 2. The Stage 2 demonstration case has following
advantages over Stage 1:
\begin{itemize}
\item SaltProc v0.1 (Stage 1) approximated the fuel salt reprocessing
system
as a single ``black'' box, which removes the entire mass (100\% removal
efficiency) of processed elements at once. In contrast, SaltProc v1.0
treats the fuel reprocessing system as a complex structure of components,
each removing a specific set of elements with specific extraction
efficiency.
\item SaltProc v1.0 inherently checks mass conservation at each depletion
step and dynamically calculates feed stream to maintain the fuel salt
inventory constant.
\item SaltProc v1.0 tracks the waste stream from each component.
\end{itemize}
The foremost future effort in this stage is to enable switching between
multiple Serpent geometries during simulation. For the \gls{TAP} concept, the
number of moderator rods in the core varies from 1332 at the startup to 6700
at the \gls{EOL}. The user will have an option to choose when SaltProc v1.0
should switch to next geometry: (1) after a specific depletion time (e.g., 18
months which is a common maintenance/refueling shutdown interval for
\glspl{LWR}); or (2) when the effective multiplication factor reaches a
specific value (e.g., $1.00<k_{eff} < 1.002$). Additionally, SaltProc v1.0
will correct the total fuel salt inventory in the primary loop to compensate
for the core geometry change. Overall, the adjustable geometry capability will
realistically simulate long-term (60 year) operation of the \gls{TAP} reactor
to obtain accurate fuel salt composition at different moments during operation.
Results obtained in Stage 2 will be used for code-to-code verification with
ChemTriton/Shift results for full-core \gls{TAP} core geometry from the most
recent \gls{ORNL} technical report TM-2017/475 \cite{betzler_assessment_2017}.
Notably, the fuel salt composition evolution during the \gls{TAP} reactor
operation and corresponding core geometry are determinative for all next
stages.
This work is developed with a test-driven development paradigm. Specifically,
before any new functionality is implemented, a suite of tests is written,
which as carefully define its expected behavior as possible. The code is then
written to pass the test suite. In this way, the tool developed in this work
is expected to be comprehensively tested in parallel with its development.
Thus, after code-to-code verification with ChemTriton/Shift multiple-component
integration tests will be added to the test harness to make sure that future
changes in the code will not break previous functionality.
Test problems will help comprehensively define and confirm each unit of the
demonstration functionality. These problems will include fundamental,
information-passing tests as well as more challenging multiple-component
integration tests. Every unit of functionality within the toolkit will be
tested as an integral part of development.
This milestone will result in a processing system model capable of simulating
various liquid-fueled \glspl{MSR} with multi-component fuel reprocessing
systems but with constant separation efficiencies, defined at runtime.
Additionally, this stage will demonstrate a key feature of the \gls{TAP}
\gls{MSR} - adjusting the moderator rod configuration - which is necessary to
maintain the reactor criticality during the 60-years lifetime.
\section{Stage 3: Variable xenon extraction rate} \label{sec:xe-removal-rate}
When Stage 2 is complete, a series of extensions to the Stage 2 model will
be pursued. These will incorporate extraction efficiencies as a function of
many physical system design parameters (e.g., void fraction in the salt,
helium bubble size). Mathematical correlations for the efficiencies will be
taken from relationships in the literature \cite{peebles_removal_1968,
gabbard_development_1974} and CFD simulations currently being conducted
at the University of Illinois at Urbana-Champaign \cite{huff_enabling_2018}.
For demonstration proposes, just xenon removal efficiency will be defined as a
function of many parameters (Section~\ref{sec:gas-separ}) due to
limited data provided in the listed literature. For other fission products
from the \gls{TAP} reprocessing scheme (table~\ref{tab:reprocessing_list}),
removal efficiencies will be defined based on the removal rates from the
table, assuming time-independent extraction efficiency. This milestone will
result in a realistic online reprocessing system model capable of modeling
\gls{MSR} systems with parameterized, realistically achievable process rates,
and extraction efficiencies.
Another anticipated extension will test the \gls{TAP} reactor ability to
operate in a load-following regime. Short-term (3 days) depletion using
SaltProc v1.0 will be performed with the core power changing in the [0, 100\%]
range with a ramp rate 10\%/min (to be competitive with natural gas peaking
plants, which ramp at or above 10\% of their capacity)
\cite{huff_enabling_2018}.
Figure~\ref{fig:load} shows the load curve selected to demonstrate the
worst-case scenario of load-following:
\begin{enumerate}
\item Startup with fresh fuel and operating on 100\% of \gls{HFP}
level
for 40 hours to reach $^{135}$Xe/$^{135}$I equilibrium;
\item Load-following power drop (0.1 \gls{HFP}/min), from \gls{HFP}
to \gls{HZP};
\item Shutdown for 8 hours\footnote{At startup. Time after shutdown when
$^{135}$Xe concentration would reach maximum value greatly depends on
neutron energy spectrum which for the \gls{TAP} concept changes
significantly during operation.} to reach the $^{135}$Xe peak;
\item Load-following power rise (0.1 \gls{HFP}/min), from \gls{HZP}
to \gls{HFP}.
\end{enumerate}
This scenario can be considered as backing up solar power with
nuclear on a high-solar penetration grid (e.g., in California).
\begin{figure}[bth!] % replace 't' with 'b' to
\centering
\includegraphics[width=0.8\textwidth]{load_curve.png}
\caption{Tentative load curve for short-term load-following depletion
simulation for the \gls{TAP} reactor using SaltProc v1.0.}
\label{fig:load}
\end{figure}
The depletion step time for short-term simulation will be varied in a range
from 1 to 60 min to find a compromise between accuracy and computational cost.
It is expected that load-following performance will be better at the \gls{BOL}
because the neutron energy spectrum thermalizes during the reactor operation.
Thus, the short-term load-following simulation will be repeated for the
\gls{BOL}, the middle of life, and the \gls{EOL} to assess the \gls{TAP}
concept performance in a load-following regime during the whole reactor
lifetime.
Additionally, sensitivity analysis of input parameters in the xenon extraction
correlation will be conducted to determine the range of key parameters (e.g.,
mass transfer coefficient, helium sparging rate, gas-liquid interfacial area,
temperature) when load-following is possible for the \gls{TAP} reactor in a
worst-case power demand scenario. These multiple system configurations
incorporating user-parametrized components in the fuel salt processing system
will be collected and published in a \textit{.json}-compatible database for
use with the SaltProc v1.0 to encourage further research in this area.
\section{Stage 4: Prototype design for the Xe removal system}
As the model becomes capable of incorporating user-parametrized components
with correlation-based extraction efficiency for the helium sparging
component, constraints bounding a suitable sparger design will be determined
and described. These design ranges (i.e., helium sparging rate) obtained from
the previous Stage. The ultimate objective of the design is to ensure
load-following operation during most of the operation period when
minimizing the fuel salt inventory. That is, constrained optimization problem
must be solved to minimize total fuel salt volume outside of the core. The
target design parameters for the sparger include: the volume of sparger,
helium flow rate, salt flow rate, and geometry.
Additionally, nuclear criticality safety analysis will be performed using
MCNP6 \cite{werner_mcnp6._2018} to confirm that the selected sparger design
has a subcritical configuration. If the sparger geometry obtained during the
optimization process is supercritical, the fission gas removal system would
contain multiple spargers of smaller size connected in parallel. Total fuel
salt volume and sparger size are expected to be smaller at the \gls{BOL} and
increase steadily as the neutron energy spectrum becomes softer.
\section{Stage 5: \gls{TAP} Safety Analysis}
The objective of this Stage is to characterize neutronics limits related to
load following. High-fidelity simulations will achieve this goal with the
Serpent 2 Monte Carlo code. Specifically, changes in safety parameters
(Section~\ref{sec:safety-param}) will be evaluated for two time frames:
\begin{enumerate}
\item Long-time-scale changes in safety parameters should not compromise
\gls{TAP} \gls{MSR} safety.
\item Load-following operation at key moments in the reactor lifetime
(e.g., at startup, at the middle of life, at the end of life) must not
result in significant changes in safety parameters.
\end{enumerate}
Section~\ref{sec:safety-param-res} showed preliminary calculations of
temperature coefficients and reactivity control system worth for the \gls{TAP}
at startup. The next step will develop a axially discretized core geometry in
Serpent with non-uniform axial density distribution to estimate the axial
power offset. Afterward, safety parameters will be calculated at the
\gls{BOL}, the middle of life, and the \gls{EOL} to capture safety parameter
variation over long time scales. Validation against previous work in a
collaboration between Transatomic Power and ORNL
\cite{betzler_assessment_2017, betzler_fuel_2018} will also be
performed for confidence building. Additionally, analysis for different xenon
removal efficiencies (i.e., in the range from 0 to 100\%) will be performed to
capture the effect of $^{135}$Xe concentration on safety.
To analyze the impact of the load-following operation on \gls{TAP} concept
safety, safety parameter calculations will be repeated for the load-following
transient. The combination of fuel and moderator temperature coefficients must
remain strongly negative, and the reactivity worth of control rods must be
sufficient to shut down the reactor for all times during load-following
operation.
\section{Conclusions}
Details of gas removal and fuel salt processing systems in liquid-fueled
\glspl{MSR} have historically been conceptual rather than concrete. Usually,
researchers assume ideal rather than realistically constrained poison
extraction efficiency for reactor performance calculations. This work will
more realistically model an online molten salt processing system with a focus
on the gas removal system of the prospective \gls{TAP} \gls{MSR}. SaltProc, a
Python toolkit was developed as a part of this work. SaltProc couple directly
with the Serpent 2 Monte Carlo burnup software to capture the evolution of
fuel salt composition during reactor operation in the context of an online
fuel processing system.
Modeling and simulation of the online reprocessing system in the \gls{MSR}
has shown promise in past research. Our work on simulating online fuel
reprocessing for the thorium-fueled \gls{MSBR} yielded interesting results:
notable neutron energy spectrum shift and corresponding changes in safety
parameters during operation. Additional preliminary work also showed promising
results in modeling a simplified fuel processing system for the \gls{TAP}
\gls{MSR}. These simulations motivate future work in modeling advanced
liquid-fueled \gls{MSR} plant designs.
To establish a feasible system design for molten salt fuel reprocessing, a
more advanced model of the \gls{TAP} \gls{MSR} system with adjustable core
geometry and realistically achievable extraction efficiencies will be
developed. Extended SaltProc v1.0 will realistically capture the dynamics of
fuels salt composition changes with higher accuracy. SaltProc v1.0 will also
be employed to simulate the \gls{TAP} \gls{MSR} behavior in short-term
transients to determine the feasibility of load following. Additionally, input
parameters such as flow rates, bubble size, and the void fraction will be
varied to determine the range of these parameters when the load following is
possible for the \gls{TAP} concept.
In addition to these simulations, several extensions are suggested to
advance our preliminary work. First, the feasible design parameters of the
sparger, critical component of the \gls{TAP} gas removal system, will be
optimized through sensitivity analysis of geometry and system conditions. To
guarantee criticality safety, an MCNP6 simulation will be performed to define
an appropriate sparger geometry. Further effort will focus on safety
parameter evolution in the \gls{TAP} reactor during lifetime (60 years),
when the moderator rod configuration discretely changes. Finally, dynamics of
the safety parameters will be investigated for a short-term case as was
described in section~\ref{sec:xe-removal-rate}: load following over three days
with fixed moderator configuration and worst-case scenario of the power level
change.
|
program test_input3d_layer
use iso_fortran_env, only: stderr => error_unit
use nf, only: input, layer
use nf_input1d_layer, only: input1d_layer
implicit none
type(layer) :: test_layer
real, allocatable :: output(:,:,:)
logical :: ok = .true.
test_layer = input([3, 32, 32])
if (.not. test_layer % name == 'input') then
ok = .false.
write(stderr, '(a)') 'input3d layer has its name set correctly.. failed'
end if
if (.not. test_layer % initialized) then
ok = .false.
write(stderr, '(a)') 'input3d layer should be marked as initialized.. failed'
end if
if (.not. all(test_layer % layer_shape == [3, 32, 32])) then
ok = .false.
write(stderr, '(a)') 'input1d layer is created with requested size.. failed'
end if
if (.not. size(test_layer % input_layer_shape) == 0) then
ok = .false.
write(stderr, '(a)') 'input3d layer has no input layer shape.. failed'
end if
call test_layer % get_output(output)
if (.not. all(output == 0)) then
ok = .false.
write(stderr, '(a)') 'input3d layer values are all initialized to 0.. failed'
end if
if (ok) then
print '(a)', 'test_input3d_layer: All tests passed.'
else
write(stderr, '(a)') 'test_input3d_layer: One or more tests failed.'
stop 1
end if
end program test_input3d_layer
|
Require Import ExtLib.Core.RelDec.
Set Implicit Arguments.
Set Strict Implicit.
Global Instance RelDec_eq_unit : RelDec (@eq unit) :=
{ rel_dec := fun _ _ => true }.
Global Instance RelDec_Correct_eq_unit : RelDec_Correct RelDec_eq_unit.
constructor. destruct x; destruct y; auto; simpl. intuition.
Qed.
|
Formal statement is: lemma tendsto_asymp_equiv_cong: assumes "f \<sim>[F] g" shows "(f \<longlongrightarrow> c) F \<longleftrightarrow> (g \<longlongrightarrow> c) F" Informal statement is: If two functions are asymptotically equivalent, then they have the same limit. |
State Before: m n✝ a b c d n : ℕ
⊢ n % 4 = 1 → n % 2 = 1 State After: no goals Tactic: simpa [ModEq, show 2 * 2 = 4 by norm_num] using @ModEq.of_mul_left 2 n 1 2 State Before: m n✝ a b c d n : ℕ
⊢ 2 * 2 = 4 State After: no goals Tactic: norm_num |
baz <- 1
|
------------------------------------------------------------------------
-- The Agda standard library
--
-- Examples showing how the case expressions can be used with anonymous
-- pattern-matching lambda abstractions
------------------------------------------------------------------------
{-# OPTIONS --without-K --safe #-}
module README.Case where
open import Data.Fin hiding (pred)
open import Data.Maybe hiding (from-just)
open import Data.Nat hiding (pred)
open import Data.List
open import Data.Sum
open import Data.Product
open import Function
open import Relation.Nullary
open import Relation.Binary
open import Relation.Binary.PropositionalEquality
------------------------------------------------------------------------
-- Different types of pattern-matching lambdas
-- absurd pattern
empty : ∀ {a} {A : Set a} → Fin 0 → A
empty i = case i of λ ()
-- {}-delimited and ;-separated list of clauses
-- Note that they do not need to be on different lines
pred : ℕ → ℕ
pred n = case n of λ
{ zero → zero
; (suc n) → n
}
-- where-introduced and indentation-identified block of list of clauses
from-just : ∀ {a} {A : Set a} (x : Maybe A) → From-just x
from-just x = case x return From-just of λ where
(just x) → x
nothing → _
------------------------------------------------------------------------
-- We can define some recursive functions with case
plus : ℕ → ℕ → ℕ
plus m n = case m of λ
{ zero → n
; (suc m) → suc (plus m n)
}
div2 : ℕ → ℕ
div2 zero = zero
div2 (suc m) = case m of λ where
zero → zero
(suc m') → suc (div2 m')
-- Note that some natural uses of case are rejected by the termination
-- checker:
-- module _ {a} {A : Set a} (eq? : Decidable {A = A} _≡_) where
-- pairBy : List A → List (A ⊎ (A × A))
-- pairBy [] = []
-- pairBy (x ∷ []) = inj₁ x ∷ []
-- pairBy (x ∷ y ∷ xs) = case eq? x y of λ where
-- (yes _) → inj₂ (x , y) ∷ pairBy xs
-- (no _) → inj₁ x ∷ pairBy (y ∷ xs)
|
// $Id: pfc_libraries.h 37984 2018-10-27 15:50:30Z p20068 $
// $URL: https://svn01.fh-hagenberg.at/bin/cepheiden/vocational/teaching/ESD/SPS3/2018-WS/ILV/src/Snippets/bitmap-gsl/pfc_libraries.h $
// $Revision: 37984 $
// $Date: 2018-10-27 17:50:30 +0200 (Sa., 27 Okt 2018) $
// $Author: p20068 $
//
// Creator: Peter Kulczycki (peter.kulczycki<AT>fh-hagenberg.at)
// Creation Date:
// Copyright: (c) 2018 Peter Kulczycki (peter.kulczycki<AT>fh-hagenberg.at)
//
// License: This document contains proprietary information belonging to
// University of Applied Sciences Upper Austria, Campus
// Hagenberg. It is distributed under the Boost Software License,
// Version 1.0 (see http://www.boost.org/LICENSE_1_0.txt).
//
// Annotation: This file is part of the code snippets handed out during one
// of my HPC lessons held at the University of Applied Sciences
// Upper Austria, Campus Hagenberg.
#pragma once
#include "./pfc_macros.h"
// -------------------------------------------------------------------------------------------------
#if defined PFC_DETECTED_COMPILER_NVCC
#define PFC_DO_NOT_USE_BOOST_UNITS
#define PFC_DO_NOT_USE_GSL
#define PFC_DO_NOT_USE_VLD
#define PFC_DO_NOT_USE_WINDOWS
#endif
// -------------------------------------------------------------------------------------------------
#undef PFC_HAVE_VLD
#undef PFC_VLD_INCLUDED
#if __has_include (<vld.h>) && !defined PFC_DO_NOT_USE_VLD // Visual Leak Detector (https://kinddragon.github.io/vld)
#include <vld.h>
#define PFC_HAVE_VLD
#define PFC_VLD_INCLUDED
#pragma message ("PFC: using 'Visual Leak Detector'")
#else
#pragma message ("PFC: not using 'Visual Leak Detector'")
#endif
// -------------------------------------------------------------------------------------------------
#undef PFC_HAVE_GSL
#undef PFC_GSL_INCLUDED
#if __has_include (<gsl/gsl>) && !defined PFC_DO_NOT_USE_GSL // Guideline Support Library (https://github.com/Microsoft/GSL)
#include <gsl/gsl>
#define PFC_HAVE_GSL
#define PFC_GSL_INCLUDED
#pragma message ("PFC: using 'Guideline Support Library'")
#else
#pragma message ("PFC: not using 'Guideline Support Library'")
#endif
// -------------------------------------------------------------------------------------------------
#undef PFC_HAVE_BOOST_UNITS
#undef PFC_BOOST_UNITS_INCLUDED
#if __has_include (<boost/units/io.hpp>)
#if __has_include (<boost/units/systems/si/length.hpp>)
#if __has_include (<boost/units/systems/si/prefixes.hpp>) && !defined PFC_DO_NOT_USE_BOOST_UNITS
#include <boost/units/io.hpp> // http://www.boost.org
#include <boost/units/systems/si/length.hpp> // https://sourceforge.net/projects/boost/files/boost-binaries
#include <boost/units/systems/si/prefixes.hpp> //
#define PFC_HAVE_BOOST_UNITS
#define PFC_BOOST_UNITS_INCLUDED
#pragma message ("PFC: using 'Boost.Units'")
#else
#pragma message ("PFC: not using 'Boost.Units'")
#endif
#endif
#endif
// -------------------------------------------------------------------------------------------------
#undef PFC_HAVE_WINDOWS
#undef PFC_WINDOWS_INCLUDED
#if __has_include (<windows.h>) && !defined PFC_DO_NOT_USE_WINDOWS
#undef NOMINMAX
#define NOMINMAX
#undef STRICT
#define STRICT
#undef VC_EXTRALEAN
#define VC_EXTRALEAN
#undef WIN32_LEAN_AND_MEAN
#define WIN32_LEAN_AND_MEAN
#include <windows.h>
#define PFC_HAVE_WINDOWS
#define PFC_WINDOWS_INCLUDED
#pragma message ("PFC: using 'windows.h'")
#else
#pragma message ("PFC: not using 'windows.h'")
#endif
|
[STATEMENT]
lemma trg_dom [simp]:
shows "trg (dom \<mu>) = trg \<mu>"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. trg (local.dom \<mu>) = trg \<mu>
[PROOF STEP]
by (metis arr_dom_iff_arr ide_char ide_trg trg.is_extensional trg.preserves_dom) |
%Define
$kw_lexer_class /.NoKWLexer./
$_IDENTIFIER /.0./
%End
%Headers
--
-- Additional methods for the action class not provided in the template
--
/.
public class NoKWLexer
{
public int[] getKeywordKinds() { return null; }
public int lexer(int curtok, int lasttok) { return 0; }
public void setInputChars(char[] inputChars) { }
int getKind(int c) { return 0; }
public NoKWLexer(char[] inputChars, int identifierKind) { }
}
./
%End
%Import
LexerBasicMapF.gi
%End
|
[GOAL]
n : ℤ
⊢ sqrt (n * n) = ↑(natAbs n)
[PROOFSTEP]
rw [sqrt, ← natAbs_mul_self, toNat_coe_nat, Nat.sqrt_eq]
[GOAL]
x : ℤ
x✝ : ∃ n, n * n = x
n : ℤ
hn : n * n = x
⊢ sqrt x * sqrt x = x
[PROOFSTEP]
rw [← hn, sqrt_eq, ← Int.ofNat_mul, natAbs_mul_self]
|
import SciLean.Core
import SciLean.Functions.EpsNorm
namespace SciLean
variable {X} [Hilbert X]
def εpow (ε : ℝ) (x : X) (y : ℝ) : ℝ := Math.pow (∥x∥² + ε^2) (y/2)
argument x [Fact (ε≠0)]
isSmooth := sorry,
diff_simp := y * ⟪dx, x⟫ * εpow ε x (y-2) by sorry,
hasAdjDiff := by constructor; infer_instance; simp; intro; infer_instance; done,
adjDiff_simp := (y * (dx' * εpow ε x (y-2))) * x by (simp[adjointDifferential]; unfold hold; simp; unfold hold; simp; done)
-- Defined in EpsLog.lean
-- argument y [Fact (ε≠0)]
-- isSmooth := sorry,
-- diff_simp := dy * (εlog ε x) * εpow ε x y by sorry
notation "∥" x "∥^{" y "," ε "}" => εpow ε x y
@[simp]
theorem εpow.is_εnorm_at_one (x : X) (ε : ℝ) : ∥x∥^{1,ε} = ∥x∥{ε} := sorry
@[simp]
theorem εpow.is_pow_at_zero (x : X) (y : ℝ) : ∥x∥^{y,0} = ∥x∥^y := sorry
theorem εpow.is_normSqr_at_two (x : X) (y : ℝ) : ∥x∥^{(2:ℝ),ε} = ∥x∥² + ε^2 := sorry
@[simp]
theorem εpow.recip_εnorm_is_εpow (x : X) (ε : ℝ) (c : ℝ) : c/∥x∥{ε} = c * ∥x∥^{-1,ε} := sorry
instance εpow.isNonNegative (x : X) (y : ℝ) : Fact (∥x∥^{y,ε} ≥ 0) := sorry
instance εpow.isPositive [Fact (ε≠0)] (x : X) (y : ℝ) : Fact (∥x∥^{y,ε} > 0) := sorry
@[simp]
theorem εpow.is_pow_of_εnorm (ε : ℝ) [Fact (ε≠0)] (x : X) (y : ℝ) : ∥x∥{ε}^y = ∥x∥^{y,ε} := sorry
|
!Programa principal líneas de la 1 a la 8
program llamar_subrutina
implicit none
real :: matriz(10,20)
!Inicializar en cero todos los valores
matriz(:,:) = 0.0
call subrutina_imprimir_matriz(10,20,matriz)
end program llamar_subrutina
! Subrutina usada en el programa principal líneas de la 11 a la 22
subroutine subrutina_imprimir_matriz(numero_filas,numero_columnas,matriz)
implicit none
integer, intent(in) :: numero_filas
integer, intent(in) :: numero_columnas
real, intent(in) :: matriz(numero_filas,numero_columnas)
integer :: i
do i=1,numero_filas
print *,matriz(i,1:numero_columnas)
end do
end subroutine subrutina_imprimir_matriz
|
module TypeDrivenDevelopment
||| Map a list of strings to their length
allLengths : List String -> List Nat
allLengths xs = map length xs
||| Negate a boolean
invert : (t : Bool) -> Bool
invert True = False
invert False = True
||| Describe whether a list is empty or not
describeList : List Int -> String
describeList [] = "Empty"
describeList (x :: xs) = "Non-empty"
xor : Bool -> Bool -> Bool
xor False y = y
xor True y = not y
mutual
isEven : Nat -> Bool
isEven Z = True
isEven (S k) = isOdd k
isOdd : Nat -> Bool
isOdd Z = False
isOdd (S k) = isEven k
|
If $f \in L_F(g)$ and $h \in \Theta_F(g)$, then $f \in L_F(h)$. |
theory Extended_Assertion_Exceptions
imports
Abstract_Rbt
Assertion_Tree_Lookup
Utilities_Ext
begin
context rbt_impl
begin
lemma rbt_less_value_ex_eq_2 [simp]:
"rbt_of t |\<guillemotleft> kn \<Longrightarrow> rbt_assn_ext t (ex - {kn}) ti = rbt_assn_ext t ex ti"
proof (induction t arbitrary: ti)
case ATEmpty
show ?case by simp
next
case (ATBranch c k v ci li ki vi ri l r)
hence "kn \<noteq> k" by auto
with ATBranch show ?case
unfolding rbt_assn_ext_unfold
apply simp
done
qed
lemma rbt_less_value_ex_eq_1 [simp]:
"rbt_of t |\<guillemotleft> kn \<Longrightarrow> rbt_assn_ext t (Set.insert kn ex) ti = rbt_assn_ext t ex ti"
apply (subst rbt_less_value_ex_eq_2[symmetric]) by simp+
lemma rbt_greater_value_ex_eq_2 [simp]:
"kn \<guillemotleft>| rbt_of t \<Longrightarrow> rbt_assn_ext t (ex - {kn}) ti = rbt_assn_ext t ex ti"
proof (induction t arbitrary: ti)
case ATEmpty
show ?case by simp
next
case (ATBranch c k v ci li ki vi ri l r)
hence "kn \<noteq> k" by auto
with ATBranch show ?case
unfolding rbt_assn_ext_unfold
apply simp
done
qed
lemma rbt_greater_value_ex_eq_1 [simp]:
"kn \<guillemotleft>| rbt_of t \<Longrightarrow> rbt_assn_ext t (Set.insert kn ex) ti = rbt_assn_ext t ex ti"
apply (subst rbt_greater_value_ex_eq_2[symmetric]) by simp+
lemma [simp]:
"kn < k \<Longrightarrow> value_of_key (ATBranch c k v ci li ki vi ri l r) kn = value_of_key l kn"
unfolding value_of_key_def apply (simp add: value_of_key'.simps)
unfolding value_of_key'.simps[symmetric]
using value_of_key'_value_of_key_eq by metis
lemma [simp]:
"k < kn \<Longrightarrow> value_of_key (ATBranch c k v ci li ki vi ri l r) kn = value_of_key r kn"
unfolding value_of_key_def apply (auto simp add: value_of_key'.simps)
unfolding value_of_key'.simps[symmetric]
using value_of_key'_value_of_key_eq by metis
lemma [simp]:
"value_of_key (ATBranch c k v ci li ki vi ri l r) k = Some vi"
unfolding value_of_key_def apply (auto simp add: value_of_key'.simps)
done
lemma value_ex_split_ent:
assumes
"value_of_key t kn = Some vi" and
"kn \<notin> ex" and
"rbt_sorted (rbt_of t)"
shows
"
rbt_assn_ext t ex ti \<turnstile> rbt_assn_ext t ({kn} \<union> ex) ti ** \<upharpoonleft>value_assn (the (rbt_lookup (rbt_of t) kn)) vi
"
using assms
proof(induction t arbitrary: ti)
case ATEmpty
then show ?case by simp
next
case (ATBranch c k v ci li ki vi ri l r)
then show ?case
proof(cases kn k rule: linorder_cases)
case less
note ATBranch(1)[isep_dest]
from less have "k \<noteq> kn" by simp
moreover from less ATBranch(5) have "kn \<guillemotleft>| rbt_of r"
using rbt_greater_trans by auto
ultimately show ?thesis using less ATBranch(3-5)
unfolding rbt_assn_ext_unfold
apply simp
apply (sepwith simp)
apply simp
done
next
case equal
with ATBranch show ?thesis
unfolding rbt_assn_ext_unfold
apply -
apply (sepEwith \<open>auto intro: rbt_less_trans rbt_greater_trans\<close>)
apply simp
apply sep
done
next
case greater
note ATBranch(2)[isep_dest]
from greater have "k \<noteq> kn" by simp
moreover from greater ATBranch(5) have "rbt_of l |\<guillemotleft> kn"
using rbt_less_trans by auto
ultimately show ?thesis using greater ATBranch(3-5)
unfolding rbt_assn_ext_unfold
apply auto[]
apply (sepwith \<open>simp add: order_less_not_sym\<close>)
apply simp
done
qed
qed
lemma value_ex_split_red:
assumes
"value_of_key t kn = Some vi" and
"kn \<notin> ex" and
"rbt_sorted (rbt_of t)"
shows
"
is_sep_red (rbt_assn_ext t (ex \<union> {kn}) ti) \<box> (rbt_assn_ext t ex ti) (\<upharpoonleft>value_assn (the (rbt_lookup (rbt_of t) kn)) vi)
"
apply (rule is_sep_redI)
subgoal premises prems
apply (sep isep_dest: value_ex_split_ent)
using assms apply auto
apply (sep isep_dest: prems[simplified])
done
done
lemma value_ex_join_ent':
assumes
"value_of_key t kn = Some vi" and
"kn \<in> ex" and
"rbt_sorted (rbt_of t)"
shows
"
rbt_assn_ext t ex ti ** \<upharpoonleft>value_assn v vi \<turnstile>
(EXS t_res.
rbt_assn_ext t_res (ex - {kn}) ti **
\<up>(rbt_of t_res = rbt_update kn v (rbt_of t)) **
ctx(rbt_sorted (rbt_of t_res)) **
\<up>(ptr_of_key t_res ti = ptr_of_key t ti) **
\<up>(value_of_key t_res = value_of_key t)
)
"
using assms
proof (induction t arbitrary: ti)
case ATEmpty
then show ?case by simp
next
case (ATBranch c k v ci li ki vi ri l r)
show ?case
proof(cases kn k rule: linorder_cases)
case less
note ATBranch(2)[isep_dest]
from less have "kn \<noteq> k" by simp
moreover from ATBranch(5) rbt_greater_trans less have "kn \<guillemotleft>| rbt_of r" by auto
ultimately show ?thesis using ATBranch(3-5) less
apply -
unfolding rbt_assn_ext_unfold
apply (isep_drule drule: ATBranch(1))
apply (auto)[3]
apply (sepEwith \<open>(solves auto)?\<close>)
apply (simp add: rbt_map_entry_rbt_less rbt_map_entry_rbt_sorted)
apply (sepEwith \<open>solves pok_solver | solves vok_solver\<close>)
apply simp
apply sep
done
next
case equal
with ATBranch(3-5) show ?thesis
unfolding rbt_assn_ext_unfold
apply -
apply (sepEwith \<open>solves pok_solver | solves vok_solver\<close>)
apply simp
apply sep
done
next
case greater
note ATBranch(2)[isep_dest]
from greater have "\<not>kn < k" by simp
moreover from greater have "kn \<noteq> k" by simp
moreover from ATBranch(5) rbt_less_trans greater have "rbt_of l |\<guillemotleft> kn" by auto
ultimately show ?thesis using ATBranch(3-5) greater
apply -
unfolding rbt_assn_ext_unfold
apply (isep_drule drule: ATBranch(2))
apply (auto)[3]
apply (sepEwith \<open>(solves auto)?\<close>)
apply (simp add: rbt_map_entry_rbt_greater rbt_map_entry_rbt_sorted)
apply (sepEwith \<open>solves pok_solver | solves vok_solver\<close>)
apply simp
apply sep
done
qed
qed
lemmas value_ex_join_ent = value_ex_join_ent'[simplified ctx_def]
lemma value_ex_join_red:
"rbt_sorted (rbt_of t1) \<Longrightarrow> k \<in> ex \<Longrightarrow> k \<notin> ex' \<Longrightarrow> value_of_key t1 k = Some vi \<Longrightarrow>
is_sep_red
(EXS t2. rbt_assn_ext t2 (ex - {k}) ti ** \<up>(rbt_of t2 = rbt_update k v (rbt_of t1)))
(EXS t3. rbt_assn_ext t3 ex' ti)
(rbt_assn_ext t1 ex ti ** \<upharpoonleft>value_assn v vi)
(EXS t3. rbt_assn_ext t3 ex' ti)
"
apply (rule is_sep_redI)
subgoal premises prems for Ps Qs
apply (rule entails_trans[OF _ prems(5)])
apply (isep_drule drule: value_ex_join_ent)
using prems(1-4) apply auto[3]
apply sepE
done
done
end
end |
State Before: α : Type u
β : Type v
γ : Type ?u.10908
f✝ g✝ : Ultrafilter α
s t : Set α
p q : α → Prop
u : Ultrafilter α
f g : Filter α
⊢ ¬↑u ≤ f ⊔ g ↔ ¬(↑u ≤ f ∨ ↑u ≤ g) State After: no goals Tactic: simp only [← disjoint_iff_not_le, not_or, disjoint_sup_right] |
(* Title: Metric and semimetric spaces
Author: Tim Makarios <tjm1983 at gmail.com>, 2012
Maintainer: Tim Makarios <tjm1983 at gmail.com>
*)
section "Metric and semimetric spaces"
theory Metric
imports "HOL-Analysis.Multivariate_Analysis"
begin
locale semimetric =
fixes dist :: "'p \<Rightarrow> 'p \<Rightarrow> real"
assumes nonneg [simp]: "dist x y \<ge> 0"
and eq_0 [simp]: "dist x y = 0 \<longleftrightarrow> x = y"
and symm: "dist x y = dist y x"
begin
lemma refl [simp]: "dist x x = 0"
by simp
end
locale metric =
fixes dist :: "'p \<Rightarrow> 'p \<Rightarrow> real"
assumes [simp]: "dist x y = 0 \<longleftrightarrow> x = y"
and triangle: "dist x z \<le> dist y x + dist y z"
sublocale metric < semimetric
proof
{ fix w
have "dist w w = 0" by simp }
note [simp] = this
fix x y
show "0 \<le> dist x y"
proof -
from triangle [of y y x] show "0 \<le> dist x y" by simp
qed
show "dist x y = 0 \<longleftrightarrow> x = y" by simp
show "dist x y = dist y x"
proof -
{ fix w z
have "dist w z \<le> dist z w"
proof -
from triangle [of w z z] show "dist w z \<le> dist z w" by simp
qed }
hence "dist x y \<le> dist y x" and "dist y x \<le> dist x y" by simp+
thus "dist x y = dist y x" by simp
qed
qed
definition norm_dist :: "('a::real_normed_vector) \<Rightarrow> 'a \<Rightarrow> real" where
[simp]: "norm_dist x y \<equiv> norm (x - y)"
interpretation norm_metric: metric norm_dist
proof
fix x y
show "norm_dist x y = 0 \<longleftrightarrow> x = y" by simp
fix z
from norm_triangle_ineq [of "x - y" "y - z"] have
"norm (x - z) \<le> norm (x - y) + norm (y - z)" by simp
with norm_minus_commute [of x y] show
"norm_dist x z \<le> norm_dist y x + norm_dist y z" by simp
qed
end
|
= = = Flying schools = = =
|
# ------------------------------------------------------------------
# Licensed under the ISC License. See LICENSE in the project root.
# ------------------------------------------------------------------
# connected component of adjacency matrix containing vertex
function component(adjacency::AbstractMatrix{Int}, vertex::Int)
frontier = [vertex]
visited = Int[]
# breadth-first search
while !isempty(frontier)
u = pop!(frontier)
push!(visited, u)
for v in findall(!iszero, adjacency[u,:])
if v ∉ visited
push!(frontier, v)
end
end
end
visited
end
"""
@metasolver solver solvertype body
A helper macro to create a solver named `solver` of type `solvertype`
with parameters specified in `body`.
## Examples
Create a solver with parameters `mean` and `variogram` for each variable
of the problem, and a global parameter that specifies whether or not
to use the GPU:
```julia
julia> @metasolver MySolver AbstractSimulationSolver begin
@param mean = 0.0
@param variogram = GaussianVariogram()
@jparam rho = 0.7
@global gpu = false
end
```
### Notes
This macro is not intended to be used directly, see other macros defined
below for estimation and simulation solvers.
"""
macro metasolver(solver, solvertype, body)
# discard any content that doesn't start with @param or @global
content = filter(arg -> arg isa Expr, body.args)
# lines starting with @param refer to single variable parameters
vparams = filter(p -> p.args[1] == Symbol("@param"), content)
vparams = map(p -> p.args[3], vparams)
# lines starting with @jparam refer to joint variable parameters
jparams = filter(p -> p.args[1] == Symbol("@jparam"), content)
jparams = map(p -> p.args[3], jparams)
# lines starting with @global refer to global solver parameters
gparams = filter(p -> p.args[1] == Symbol("@global"), content)
gparams = map(p -> p.args[3], gparams)
# add default value of `nothing` if necessary
gparams = map(p -> p isa Symbol ? :($p = nothing) : p, gparams)
# replace Expr(:=, a, 2) by Expr(:kw, a, 2) for valid kw args
gparams = map(p -> Expr(:kw, p.args...), gparams)
# keyword names
gkeys = map(p -> p.args[1], gparams)
# solver parameter type for single variable
solvervparam = Symbol(solver,"Param")
# solver parameter type for joint variables
solverjparam = Symbol(solver,"JointParam")
# variables are symbols or tuples of symbols
vtype = Symbol
jtype = NTuple{<:Any,Symbol}
esc(quote
$Parameters.@with_kw_noshow struct $solvervparam
__dummy__ = nothing
$(vparams...)
end
$Parameters.@with_kw_noshow struct $solverjparam
__dummy__ = nothing
$(jparams...)
end
@doc (@doc $solvervparam) (
struct $solver <: $solvertype
vparams::Dict{$vtype,$solvervparam}
jparams::Dict{$jtype,$solverjparam}
$(gkeys...)
# auxiliary fields
varnames::Vector{Symbol}
adjacency::Matrix{Int}
function $solver(vparams::Dict{$vtype,$solvervparam},
jparams::Dict{$jtype,$solverjparam},
$(gkeys...))
svars = collect(keys(vparams))
jvars = collect(keys(jparams))
lens₁ = length.(jvars)
lens₂ = length.(unique.(jvars))
@assert all(lens₁ .== lens₂ .> 1) "invalid joint variable specification"
varnames = svars ∪ Iterators.flatten(jvars)
adjacency = zeros(Int, length(varnames), length(varnames))
for (i, u) in enumerate(varnames)
for vtuple in jvars
if u ∈ vtuple
for v in vtuple
j = indexin([v], varnames)[1]
i == j || (adjacency[i,j] = 1)
end
end
end
end
new(vparams, jparams, $(gkeys...), varnames, adjacency)
end
end)
function $solver(params...; $(gparams...))
# build dictionaries for inner constructor
vdict = Dict{$vtype,$solvervparam}()
jdict = Dict{$jtype,$solverjparam}()
# convert named tuples to solver parameters
for (varname, varparams) in params
kwargs = [k => v for (k,v) in zip(keys(varparams), varparams)]
if varname isa Symbol
push!(vdict, varname => $solvervparam(; kwargs...))
else
push!(jdict, varname => $solverjparam(; kwargs...))
end
end
$solver(vdict, jdict, $(gkeys...))
end
function GeoStatsBase.covariables(var::Symbol, solver::$solver)
vind = indexin([var], solver.varnames)[1]
if vind ≠ nothing
comp = GeoStatsBase.component(solver.adjacency, vind)
vars = Tuple(solver.varnames[sort(comp)])
params = []
for v in vars
push!(params, (v,) => solver.vparams[v])
end
for vtuple in keys(solver.jparams)
if any(v ∈ vars for v in vtuple)
push!(params, vtuple => solver.jparams[vtuple])
end
end
else
# default parameter for single variable
vars = (var,)
params = [(var,) => $solvervparam()]
end
(names=vars, params=Dict(params))
end
GeoStatsBase.variables(solver::$solver) = solver.varnames
# ------------
# IO methods
# ------------
function Base.show(io::IO, solver::$solver)
print(io, $solver)
end
function Base.show(io::IO, ::MIME"text/plain", solver::$solver)
println(io, solver)
for (var, varparams) in merge(solver.vparams, solver.jparams)
if var isa Symbol
println(io, " └─", var)
else
println(io, " └─", join(var, "—"))
end
pnames = setdiff(fieldnames(typeof(varparams)), [:__dummy__])
for pname in pnames
pval = getfield(varparams, pname)
if pval ≠ nothing
print(io, " └─", pname, " ⇨ ")
show(IOContext(io, :compact => true), pval)
println(io, "")
end
end
end
end
end)
end
"""
@estimsolver solver body
A helper macro to create a estimation solver named `solver` with parameters
specified in `body`. For examples, please check the documentation for
`@metasolver`.
"""
macro estimsolver(solver, body)
esc(quote
GeoStatsBase.@metasolver $solver GeoStatsBase.AbstractEstimationSolver $body
end)
end
"""
@estimsolver solver body
A helper macro to create a simulation solver named `solver` with parameters
specified in `body`. For examples, please check the documentation for
`@metasolver`.
"""
macro simsolver(solver, body)
esc(quote
GeoStatsBase.@metasolver $solver GeoStatsBase.AbstractSimulationSolver $body
end)
end
|
equity.price <- function(security, refresh=TRUE, file=NA) {
if (isTRUE(refresh)) {
## get current price info for each security
allprice <- equity.info(security, extract=c('Name', 'Previous Close'))
names(allprice) <- c('Date', 'Name', 'Close')
allprice$Date <- NULL
## Some assets will return NA so search for NA and, if it exists, replace with 1 and label it "Cash"
## Cash, money markets and individual bonds also fall into this
## rename rows with NA for rowname to "Cash"
rownames(allprice)[rownames(allprice) == "NA"] <- "Cash" # not a true "NA" so not using is.na()
## replace value for 1 unit of "Cash" to 1
allprice[rownames(allprice) == "Cash", 2] <- 1
## write prices to file
if (!is.na(file)) write.csv(allprice, file)
} else {
## read current price info from allprice.csv
allprice <- read.csv(file)
rownames(allprice) <- allprice$X
allprice$X <- NULL
}
return(allprice)
}
|
open import Prelude
open import Nat
open import List
module contexts where
-- helper function
diff-1 : ∀{n m} → n < m → Nat
diff-1 n<m = difference (n<m→1+n≤m n<m)
---- the core declarations ----
-- TODO move definitions
_ctx : Set → Set
A ctx = List (Nat ∧ A)
-- nil context
∅ : {A : Set} → A ctx
∅ = []
-- singleton context
■_ : {A : Set} → (Nat ∧ A) → A ctx
■_ (x , a) = (x , a) :: []
infixr 100 ■_
-- context extension/insertion - never use _::_
_,,_ : ∀{A} → A ctx → (Nat ∧ A) → A ctx
[] ,, (x , a) = ■ (x , a)
((hx , ha) :: t) ,, (x , a) with <dec x hx
... | Inl x<hx = (x , a) :: ((diff-1 x<hx , ha) :: t)
... | Inr (Inl refl) = (x , a) :: t
... | Inr (Inr hx<x) = (hx , ha) :: (t ,, (diff-1 hx<x , a))
infixl 10 _,,_
-- membership, or presence, in a context
data _∈_ : {A : Set} (p : Nat ∧ A) → (Γ : A ctx) → Set where
InH : {A : Set} {Γ : A ctx} {x : Nat} {a : A} →
(x , a) ∈ ((x , a) :: Γ)
InT : {A : Set} {Γ : A ctx} {x s : Nat} {a a' : A} →
(x , a) ∈ Γ →
((x + 1+ s , a)) ∈ ((s , a') :: Γ)
-- the domain of a context
dom : {A : Set} → A ctx → Nat → Set
dom {A} Γ x = Σ[ a ∈ A ] ((x , a) ∈ Γ)
-- apartness for contexts
_#_ : {A : Set} (n : Nat) → (Γ : A ctx) → Set
x # Γ = dom Γ x → ⊥
_##_ : {A : Set} → A ctx → A ctx → Set
Γ1 ## Γ2 = (x : Nat) → dom Γ1 x → x # Γ2
_≈_ : {A : Set} → A ctx → A ctx → Set
_≈_ {A} Γ1 Γ2 = (x : Nat) (a1 a2 : A) →
(x , a1) ∈ Γ1 ∧ (x , a2) ∈ Γ2 →
a1 == a2
-- TODO theorems and explanation
ctxmap : {A B : Set} → (A → B) → A ctx → B ctx
ctxmap f Γ = map (λ {(hx , ha) → (hx , f ha)}) Γ
-- TODO theorems
-- returns a list of the values stored in the context
ctx⇒values : {A : Set} → A ctx → List A
-- TODO theorems
-- converts a list of key-value pairs into a context, with later pairs in the list
-- overriding bindings definend by previous pairs
list⇒ctx : {A : Set} → List (Nat ∧ A) → A ctx
-- TODO theorems
-- converts a list of key-value pairs into a multi-context, where each value of
-- the result is the sublist of values from the former that were mapped to by the
-- corresponding key
list⇒list-ctx : {A : Set} → List (Nat ∧ A) → (List A) ctx
-- union merge A B is the union of A and B,
-- with (merge a b) being invoked to handle the mappings they have in common
union : {A : Set} → (A → A → A) → A ctx → A ctx → A ctx
-- The primary way to test membership is to use _∈_,
-- but this can be used in cases where using _∈_
-- would be too verbose or awkward.
-- The lookup theorems prove that they are compatible
_⦃⦃_⦄⦄ : {A : Set} → A ctx → Nat → Maybe A
[] ⦃⦃ x ⦄⦄ = None
((hx , ha) :: t) ⦃⦃ x ⦄⦄ with <dec x hx
... | Inl x<hx = None
... | Inr (Inl refl) = Some ha
... | Inr (Inr hx<x) = t ⦃⦃ diff-1 hx<x ⦄⦄
---- lemmas ----
undiff-1 : (x s : Nat) → (x<s+1+x : x < s + 1+ x) → s == diff-1 x<s+1+x
undiff-1 x s x<s+1+x
rewrite n+1+m==1+n+m {s} {x} | ! (m-n==1+m-1+n n≤m+n (n<m→1+n≤m x<s+1+x)) | +comm {s} {x}
= ! (n+m-n==m n≤n+m)
too-small : {A : Set} {Γ : A ctx} {xl xb : Nat} {a : A} →
xl < xb →
dom ((xb , a) :: Γ) xl →
⊥
too-small (_ , ne) (_ , InH) = ne refl
too-small (x+1+xb≤xb , x+1+xb==xb) (_ , InT _) =
x+1+xb==xb (≤antisym x+1+xb≤xb (≤trans (≤1+ ≤refl) n≤m+n))
all-not-none : {A : Set} {Γ : A ctx} {x : Nat} {a : A} →
None ≠ (((x , a) :: Γ) ⦃⦃ x ⦄⦄)
all-not-none {x = x} rewrite <dec-refl x = λ ()
all-bindings-==-rec-eq : {A : Set} {Γ1 Γ2 : A ctx} {x : Nat} {a : A} →
((x' : Nat) → ((x , a) :: Γ1) ⦃⦃ x' ⦄⦄ == ((x , a) :: Γ2) ⦃⦃ x' ⦄⦄) →
((x' : Nat) → Γ1 ⦃⦃ x' ⦄⦄ == Γ2 ⦃⦃ x' ⦄⦄)
all-bindings-==-rec-eq {x = x} h x'
with h (x' + 1+ x)
... | eq
with <dec (x' + 1+ x) x
... | Inl x'+1+x<x
= abort (<antisym x'+1+x<x (n<m→n<s+m n<1+n))
... | Inr (Inl x'+1+x==x)
= abort ((flip n≠n+1+m) (n+1+m==1+n+m · (+comm {1+ x} · x'+1+x==x)))
... | Inr (Inr x<x'+1+x)
rewrite ! (undiff-1 x x' x<x'+1+x) = eq
all-bindings-==-rec : {A : Set} {Γ1 Γ2 : A ctx} {x1 x2 : Nat} {a1 a2 : A} →
((x : Nat) → ((x1 , a1) :: Γ1) ⦃⦃ x ⦄⦄ == ((x2 , a2) :: Γ2) ⦃⦃ x ⦄⦄) →
((x : Nat) → Γ1 ⦃⦃ x ⦄⦄ == Γ2 ⦃⦃ x ⦄⦄)
all-bindings-==-rec {x1 = x1} {x2} h x
with h x1 | h x2
... | eq1 | eq2
rewrite <dec-refl x1 | <dec-refl x2
with <dec x1 x2 | <dec x2 x1
... | Inl _ | _
= abort (somenotnone eq1)
... | Inr _ | Inl _
= abort (somenotnone (! eq2))
... | Inr (Inl refl) | Inr (Inl refl)
rewrite someinj eq1 | someinj eq2
= all-bindings-==-rec-eq h x
... | Inr (Inl refl) | Inr (Inr x2<x2)
= abort (<antirefl x2<x2)
... | Inr (Inr x2<x2) | Inr (Inl refl)
= abort (<antirefl x2<x2)
... | Inr (Inr x2<x1) | Inr (Inr x1<x2)
= abort (<antisym x1<x2 x2<x1)
---- core theorems ----
-- lookup is decidable
lookup-dec : {A : Set} (Γ : A ctx) (x : Nat) →
Σ[ a ∈ A ] (Γ ⦃⦃ x ⦄⦄ == Some a) ∨ Γ ⦃⦃ x ⦄⦄ == None
lookup-dec Γ x
with Γ ⦃⦃ x ⦄⦄
... | Some a = Inl (a , refl)
... | None = Inr refl
-- The next two theorems show that lookup (_⦃⦃_⦄⦄) is consistent with membership (_∈_)
lookup-cons-1 : {A : Set} {Γ : A ctx} {x : Nat} {a : A} →
Γ ⦃⦃ x ⦄⦄ == Some a →
(x , a) ∈ Γ
lookup-cons-1 {Γ = []} ()
lookup-cons-1 {Γ = (hx , ha) :: t} {x} h
with <dec x hx
lookup-cons-1 {_} {(hx , ha) :: t} {x} () | Inl _
lookup-cons-1 {_} {(hx , ha) :: t} {.hx} refl | Inr (Inl refl) = InH
lookup-cons-1 {_} {(hx , ha) :: t} {x} {a = a} h | Inr (Inr hx<x)
= tr
(λ y → (y , a) ∈ ((hx , ha) :: t))
(m-n+n==m (n<m→1+n≤m hx<x))
(InT (lookup-cons-1 {Γ = t} h))
lookup-cons-2 : {A : Set} {Γ : A ctx} {x : Nat} {a : A} →
(x , a) ∈ Γ →
Γ ⦃⦃ x ⦄⦄ == Some a
lookup-cons-2 {x = x} InH rewrite <dec-refl x = refl
lookup-cons-2 (InT {Γ = Γ} {x = x} {s} {a} x∈Γ)
with <dec (x + 1+ s) s
... | Inl x+1+s<s = abort (<antisym x+1+s<s (n<m→n<s+m n<1+n))
... | Inr (Inl x+1+s==s) = abort ((flip n≠n+1+m) (n+1+m==1+n+m · (+comm {1+ s} · x+1+s==s)))
... | Inr (Inr s<x+1+s)
with lookup-cons-2 x∈Γ
... | h rewrite ! (undiff-1 s x s<x+1+s) = h
-- membership (_∈_) respects insertion (_,,_)
x,a∈Γ,,x,a : {A : Set} {Γ : A ctx} {x : Nat} {a : A} →
(x , a) ∈ (Γ ,, (x , a))
x,a∈Γ,,x,a {Γ = []} {x} {a} = InH
x,a∈Γ,,x,a {_} {(hx , ha) :: t} {x} {a}
with <dec x hx
... | Inl _ = InH
... | Inr (Inl refl) = InH
... | Inr (Inr hx<x) =
tr
(λ y → (y , a) ∈ ((hx , ha) :: (t ,, (diff-1 hx<x , a))))
(m-n+n==m (n<m→1+n≤m hx<x))
(InT (x,a∈Γ,,x,a {Γ = t} {diff-1 hx<x} {a}))
-- insertion can't generate spurious membership
x∈Γ+→x∈Γ : {A : Set} {Γ : A ctx} {x x' : Nat} {a a' : A} →
x ≠ x' →
(x , a) ∈ (Γ ,, (x' , a')) →
(x , a) ∈ Γ
x∈Γ+→x∈Γ {Γ = []} x≠x' InH = abort (x≠x' refl)
x∈Γ+→x∈Γ {Γ = []} x≠x' (InT ())
x∈Γ+→x∈Γ {Γ = (hx , ha) :: t} {x' = x'} x≠x' x∈Γ+
with <dec x' hx
x∈Γ+→x∈Γ {_} {(hx , ha) :: t} {x' = x'} x≠x' InH | Inl x'<hx = abort (x≠x' refl)
x∈Γ+→x∈Γ {_} {(hx , ha) :: t} {x' = x'} x≠x' (InT InH) | Inl x'<hx
rewrite m-n+n==m (n<m→1+n≤m x'<hx) = InH
x∈Γ+→x∈Γ {_} {(hx , ha) :: t} {x' = x'} x≠x' (InT (InT {x = x} x∈Γ+)) | Inl x'<hx
rewrite +assc {x} {1+ (diff-1 x'<hx)} {1+ x'} | m-n+n==m (n<m→1+n≤m x'<hx)
= InT x∈Γ+
x∈Γ+→x∈Γ {_} {(hx , ha) :: t} {x' = .hx} x≠x' InH | Inr (Inl refl) = abort (x≠x' refl)
x∈Γ+→x∈Γ {_} {(hx , ha) :: t} {x' = .hx} x≠x' (InT x∈Γ+) | Inr (Inl refl) = InT x∈Γ+
x∈Γ+→x∈Γ {_} {(hx , ha) :: t} {x' = x'} x≠x' InH | Inr (Inr hx<x') = InH
x∈Γ+→x∈Γ {_} {(hx , ha) :: t} {x' = x'} x≠x' (InT x∈Γ+) | Inr (Inr hx<x')
= InT (x∈Γ+→x∈Γ (λ where refl → x≠x' (m-n+n==m (n<m→1+n≤m hx<x'))) x∈Γ+)
-- insertion respects membership
x∈Γ→x∈Γ+ : {A : Set} {Γ : A ctx} {x x' : Nat} {a a' : A} →
x ≠ x' →
(x , a) ∈ Γ →
(x , a) ∈ (Γ ,, (x' , a'))
x∈Γ→x∈Γ+ {x = x} {x'} {a} {a'} x≠x' (InH {Γ = Γ'})
with <dec x' x
... | Inl x'<x
= tr
(λ y → (y , a) ∈ ((x' , a') :: ((diff-1 x'<x , a) :: Γ')))
(m-n+n==m (n<m→1+n≤m x'<x))
(InT InH)
... | Inr (Inl refl) = abort (x≠x' refl)
... | Inr (Inr x<x') = InH
x∈Γ→x∈Γ+ {x = .(_ + 1+ _)} {x'} {a} {a'} x≠x' (InT {Γ = Γ} {x} {s} {a' = a''} x∈Γ)
with <dec x' s
... | Inl x'<s
= tr
(λ y → (y , a) ∈ ((x' , a') :: ((diff-1 x'<s , a'') :: Γ)))
((+assc {b = 1+ (diff-1 x'<s)}) · (ap1 (_+_ x) (1+ap (m-n+n==m (n<m→1+n≤m x'<s)))))
(InT (InT x∈Γ))
... | Inr (Inl refl) = InT x∈Γ
... | Inr (Inr s<x') =
InT (x∈Γ→x∈Γ+ (λ where refl → x≠x' (m-n+n==m (n<m→1+n≤m s<x'))) x∈Γ)
-- Decidability of membership
-- This also packages up an appeal to context membership into a form that
-- lets us retain more information
ctxindirect : {A : Set} (Γ : A ctx) (x : Nat) → dom Γ x ∨ x # Γ
ctxindirect [] x = Inr (λ ())
ctxindirect ((hx , ha) :: t) x
with <dec x hx
... | Inl x<hx = Inr (too-small x<hx)
... | Inr (Inl refl) = Inl (ha , InH)
... | Inr (Inr hx<x)
with ctxindirect t (diff-1 hx<x)
ctxindirect ((hx , ha) :: t) x | Inr (Inr hx<x) | Inl (a , rec) =
Inl (a , tr
(λ y → (y , a) ∈ ((hx , ha) :: t))
(m-n+n==m (n<m→1+n≤m hx<x))
(InT rec))
ctxindirect {A} ((hx , ha) :: t) x | Inr (Inr hx<x) | Inr dne =
Inr x∉Γ
where
x∉Γ : Σ[ a ∈ A ] ((x , a) ∈ ((hx , ha) :: t)) → ⊥
x∉Γ (_ , x∈Γ) with x∈Γ
... | InH = (π2 hx<x) refl
... | InT {x = s} x-hx-1∈t
rewrite ! (undiff-1 hx s hx<x) = dne (_ , x-hx-1∈t)
-- contexts give at most one binding for each variable
ctxunicity : {A : Set} {Γ : A ctx} {x : Nat} {a a' : A} →
(x , a) ∈ Γ →
(x , a') ∈ Γ →
a == a'
ctxunicity ah a'h
with lookup-cons-2 ah | lookup-cons-2 a'h
... | ah' | a'h' = someinj (! ah' · a'h')
-- everything is apart from the nil context
x#∅ : {A : Set} {x : Nat} → _#_ {A} x ∅
x#∅ (_ , ())
-- if an index is in the domain of a singleton context, it's the only
-- index in the context
lem-dom-eq : {A : Set} {a : A} {n m : Nat} →
dom (■ (m , a)) n →
n == m
lem-dom-eq (_ , InH) = refl
lem-dom-eq (_ , InT ())
-- If two contexts are semantically equivalent
-- (i.e. they represent the same mapping from ids to values),
-- then they are physically equal as judged by _==_
ctx-==-eqv : {A : Set} {Γ1 Γ2 : A ctx} →
((x : Nat) → Γ1 ⦃⦃ x ⦄⦄ == Γ2 ⦃⦃ x ⦄⦄) →
Γ1 == Γ2
ctx-==-eqv {Γ1 = []} {[]} all-bindings-== = refl
ctx-==-eqv {Γ1 = []} {(hx2 , ha2) :: t2} all-bindings-==
= abort (all-not-none {Γ = t2} {x = hx2} (all-bindings-== hx2))
ctx-==-eqv {Γ1 = (hx1 , ha1) :: t1} {[]} all-bindings-==
= abort (all-not-none {Γ = t1} {x = hx1} (! (all-bindings-== hx1)))
ctx-==-eqv {Γ1 = (hx1 , ha1) :: t1} {(hx2 , ha2) :: t2} all-bindings-==
rewrite ctx-==-eqv {Γ1 = t1} {t2} (all-bindings-==-rec all-bindings-==)
with all-bindings-== hx1 | all-bindings-== hx2
... | ha1== | ha2== rewrite <dec-refl hx1 | <dec-refl hx2
with <dec hx1 hx2 | <dec hx2 hx1
... | Inl hx1<hx2 | _
= abort (somenotnone ha1==)
... | Inr (Inl refl) | Inl hx2<hx1
= abort (somenotnone (! ha2==))
... | Inr (Inr hx2<hx1) | Inl hx2<'hx1
= abort (somenotnone (! ha2==))
... | Inr (Inl refl) | Inr _
rewrite someinj ha1== = refl
... | Inr (Inr hx2<hx1) | Inr (Inl refl)
rewrite someinj ha2== = refl
... | Inr (Inr hx2<hx1) | Inr (Inr hx1<hx2)
= abort (<antisym hx1<hx2 hx2<hx1)
-- equality of contexts is decidable
ctx-==-dec : {A : Set}
(Γ1 Γ2 : A ctx) →
((a1 a2 : A) → a1 == a2 ∨ a1 ≠ a2) →
Γ1 == Γ2 ∨ Γ1 ≠ Γ2
ctx-==-dec [] [] _ = Inl refl
ctx-==-dec [] (_ :: _) _ = Inr (λ ())
ctx-==-dec (_ :: _) [] _ = Inr (λ ())
ctx-==-dec ((hx1 , ha1) :: t1) ((hx2 , ha2) :: t2) A==dec
with natEQ hx1 hx2 | A==dec ha1 ha2 | ctx-==-dec t1 t2 A==dec
... | Inl refl | Inl refl | Inl refl = Inl refl
... | Inl refl | Inl refl | Inr ne = Inr λ where refl → ne refl
... | Inl refl | Inr ne | _ = Inr λ where refl → ne refl
... | Inr ne | _ | _ = Inr λ where refl → ne refl
-- A useful way to destruct context membership. Never destruct a context via _::_
ctx-split : {A : Set} {Γ : A ctx} {n m : Nat} {an am : A} →
(n , an) ∈ (Γ ,, (m , am)) →
(n ≠ m ∧ (n , an) ∈ Γ) ∨ (n == m ∧ an == am)
ctx-split {Γ = Γ} {n} {m} {an} {am} n∈Γ+
with natEQ n m
... | Inl refl = Inr (refl , ctxunicity n∈Γ+ (x,a∈Γ,,x,a {Γ = Γ}))
... | Inr n≠m = Inl (n≠m , x∈Γ+→x∈Γ n≠m n∈Γ+)
-- I'd say "God dammit agda" but AFAICT Coq is terrible about this as well
lemma-bullshit : {A : Set} (Γ' : A ctx) (a : A) (n m : Nat) →
Σ[ Γ ∈ A ctx ] (Γ == (n + 1+ m , a) :: Γ')
lemma-bullshit Γ' a n m = ((n + 1+ m , a) :: Γ') , refl
-- Allows the elimination of contexts. Never destruct a context via _::_
ctx-elim : {A : Set} {Γ : A ctx} →
Γ == ∅
∨
Σ[ n ∈ Nat ] Σ[ a ∈ A ] Σ[ Γ' ∈ A ctx ]
(Γ == Γ' ,, (n , a) ∧ n # Γ')
ctx-elim {Γ = []} = Inl refl
ctx-elim {Γ = (n , a) :: []} = Inr (_ , _ , _ , refl , x#∅)
ctx-elim {Γ = (n , a) :: ((m , a2) :: Γ'')}
with lemma-bullshit Γ'' a2 m n
... | Γ' , eq
= Inr (n , a , Γ' , eqP , not-dom)
where
eqP : (n , a) :: ((m , a2) :: Γ'') == Γ' ,, (n , a)
eqP rewrite eq with <dec n (m + 1+ n)
... | Inl n<m+1+n
rewrite ! (undiff-1 n m n<m+1+n) = refl
... | Inr (Inl n==m+1+n)
= abort (n≠n+1+m (n==m+1+n · (n+1+m==1+n+m · +comm {1+ m})))
... | Inr (Inr m+1+n<n)
= abort (<antisym m+1+n<n (n<m→n<s+m n<1+n))
not-dom : dom Γ' n → ⊥
not-dom rewrite eq = λ n∈Γ' →
too-small (n<m→n<s+m n<1+n) n∈Γ'
-- When using ctx-elim, this theorem is useful for establishing termination
ctx-decreasing : {A : Set} {Γ : A ctx} {n : Nat} {a : A} →
n # Γ →
∥ Γ ,, (n , a) ∥ == 1+ ∥ Γ ∥
ctx-decreasing {Γ = []} n#Γ = refl
ctx-decreasing {Γ = (n' , a') :: Γ} {n} n#Γ
with <dec n n'
... | Inl n<n' = refl
... | Inr (Inl refl) = abort (n#Γ (_ , InH))
... | Inr (Inr n'<n)
= 1+ap (ctx-decreasing λ {(a , diff∈Γ) →
n#Γ (a , tr
(λ y → (y , a) ∈ ((n' , a') :: Γ))
(m-n+n==m (n<m→1+n≤m n'<n))
(InT diff∈Γ))})
---- contrapositives of some previous theorems ----
lem-neq-apart : {A : Set} {a : A} {n m : Nat} →
n ≠ m →
n # (■ (m , a))
lem-neq-apart n≠m h = n≠m (lem-dom-eq h)
x#Γ→x#Γ+ : {A : Set} {Γ : A ctx} {x x' : Nat} {a' : A} →
x ≠ x' →
x # Γ →
x # (Γ ,, (x' , a'))
x#Γ→x#Γ+ {Γ = Γ} {x} {x'} {a'} x≠x' x#Γ
with ctxindirect (Γ ,, (x' , a')) x
... | Inl (_ , x∈Γ+) = abort (x#Γ (_ , x∈Γ+→x∈Γ x≠x' x∈Γ+))
... | Inr x#Γ+ = x#Γ+
x#Γ+→x#Γ : {A : Set} {Γ : A ctx} {x x' : Nat} {a' : A} →
x # (Γ ,, (x' , a')) →
x # Γ
x#Γ+→x#Γ {Γ = Γ} {x} {x'} {a'} x#Γ+
with ctxindirect Γ x
... | Inr x#Γ = x#Γ
... | Inl (_ , x∈Γ)
with natEQ x x'
... | Inl refl = abort (x#Γ+ (_ , x,a∈Γ,,x,a {Γ = Γ}))
... | Inr x≠x' = abort (x#Γ+ (_ , x∈Γ→x∈Γ+ x≠x' x∈Γ))
lookup-cp-1 : {A : Set} {Γ : A ctx} {x : Nat} →
x # Γ →
Γ ⦃⦃ x ⦄⦄ == None
lookup-cp-1 {Γ = Γ} {x} x#Γ
with lookup-dec Γ x
... | Inl (_ , x∈Γ) = abort (x#Γ (_ , (lookup-cons-1 x∈Γ)))
... | Inr x#'Γ = x#'Γ
lookup-cp-2 : {A : Set} {Γ : A ctx} {x : Nat} →
Γ ⦃⦃ x ⦄⦄ == None →
x # Γ
lookup-cp-2 {Γ = Γ} {x} x#Γ
with ctxindirect Γ x
... | Inl (_ , x∈Γ) = abort (somenotnone ((! (lookup-cons-2 x∈Γ)) · x#Γ))
... | Inr x#'Γ = x#'Γ
---- some definitions ----
merge' : {A : Set} → (A → A → A) → Maybe A → A → A
merge' merge ma1 a2
with ma1
... | None = a2
... | Some a1 = merge a1 a2
union' : {A : Set} → (A → A → A) → A ctx → A ctx → Nat → A ctx
union' merge Γ1 [] _ = Γ1
union' merge Γ1 ((hx , ha) :: Γ2) offset
= union' merge (Γ1 ,, (hx + offset , merge' merge (Γ1 ⦃⦃ hx + offset ⦄⦄) ha)) Γ2 (1+ hx + offset)
union merge Γ1 Γ2 = union' merge Γ1 Γ2 0
---- union theorems ----
lemma-math' : ∀{x x1 n} → x ≠ x1 + (n + 1+ x)
lemma-math' {x} {x1} {n}
rewrite ! (+assc {x1} {n} {1+ x})
| n+1+m==1+n+m {x1 + n} {x}
| +comm {1+ x1 + n} {x}
= n≠n+1+m
lemma-union'-0 : {A : Set} {m : A → A → A} {Γ1 Γ2 : A ctx} {x n : Nat} {a : A} →
(x , a) ∈ Γ1 →
(x , a) ∈ union' m Γ1 Γ2 (n + 1+ x)
lemma-union'-0 {Γ2 = []} x∈Γ1 = x∈Γ1
lemma-union'-0 {Γ2 = (x1 , a1) :: Γ2} {x} {n} x∈Γ1
rewrite ! (+assc {1+ x1} {n} {1+ x})
= lemma-union'-0 {Γ2 = Γ2} {n = 1+ x1 + n} (x∈Γ→x∈Γ+ (lemma-math' {x1 = x1} {n}) x∈Γ1)
lemma-union'-1 : {A : Set} {m : A → A → A} {Γ1 Γ2 : A ctx} {x n : Nat} {a : A} →
(x , a) ∈ Γ1 →
(n≤x : n ≤ x) →
(difference n≤x) # Γ2 →
(x , a) ∈ union' m Γ1 Γ2 n
lemma-union'-1 {Γ2 = []} {x} x∈Γ1 n≤x x-n#Γ2 = x∈Γ1
lemma-union'-1 {m = m} {Γ1} {(x1 , a1) :: Γ2} {x} {n} {a} x∈Γ1 n≤x x-n#Γ2
with <dec x (x1 + n)
lemma-union'-1 {m = m} {Γ1} {(x1 , a1) :: Γ2} {x} {n} {a} x∈Γ1 n≤x x-n#Γ2 | Inl x<x1+n
with Γ1 ⦃⦃ x1 + n ⦄⦄
lemma-union'-1 {m = m} {Γ1} {(x1 , a1) :: Γ2} {x} {n} {a} x∈Γ1 n≤x x-n#Γ2 | Inl x<x1+n | Some a'
= tr
(λ y → (x , a) ∈ union' m (Γ1 ,, (x1 + n , m a' a1)) Γ2 y)
(n+1+m==1+n+m {difference (π1 x<x1+n)} · 1+ap (m-n+n==m (π1 x<x1+n)))
(lemma-union'-0 {Γ2 = Γ2} (x∈Γ→x∈Γ+ (π2 x<x1+n) x∈Γ1))
lemma-union'-1 {m = m} {Γ1} {(x1 , a1) :: Γ2} {x} {n} {a} x∈Γ1 n≤x x-n#Γ2 | Inl x<x1+n | None
= tr
(λ y → (x , a) ∈ union' m (Γ1 ,, (x1 + n , a1)) Γ2 y)
(n+1+m==1+n+m {difference (π1 x<x1+n)} · 1+ap (m-n+n==m (π1 x<x1+n)))
(lemma-union'-0 {Γ2 = Γ2} (x∈Γ→x∈Γ+ (π2 x<x1+n) x∈Γ1))
lemma-union'-1 {m = m} {Γ1} {(x1 , a1) :: Γ2} {x} {n} {a} x∈Γ1 n≤x x-n#Γ2 | Inr (Inl refl)
rewrite +comm {x1} {n} | n+m-n==m n≤x
= abort (x-n#Γ2 (_ , InH))
lemma-union'-1 {m = m} {Γ1} {(x1 , a1) :: Γ2} {x} {n} {a} x∈Γ1 n≤x x-n#Γ2 | Inr (Inr x1+n<x)
rewrite (! (a+b==c→a==c-b (+assc {diff-1 x1+n<x} · m-n+n==m (n<m→1+n≤m x1+n<x)) n≤x))
= lemma-union'-1
(x∈Γ→x∈Γ+ (flip (π2 x1+n<x)) x∈Γ1)
(n<m→1+n≤m x1+n<x)
λ {(_ , x-x1-n∈Γ2) → x-n#Γ2 (_ , InT x-x1-n∈Γ2)}
lemma-union'-2 : {A : Set} {m : A → A → A} {Γ1 Γ2 : A ctx} {x n : Nat} {a : A} →
(x + n) # Γ1 →
(x , a) ∈ Γ2 →
(x + n , a) ∈ union' m Γ1 Γ2 n
lemma-union'-2 {Γ1 = Γ1} x+n#Γ1 (InH {Γ = Γ2})
rewrite lookup-cp-1 x+n#Γ1
= lemma-union'-0 {Γ2 = Γ2} {n = Z} (x,a∈Γ,,x,a {Γ = Γ1})
lemma-union'-2 {Γ1 = Γ1} {n = n} x+n#Γ1 (InT {Γ = Γ2} {x = x} {s} x∈Γ2)
rewrite +assc {x} {1+ s} {n}
with Γ1 ⦃⦃ s + n ⦄⦄
... | Some a'
= lemma-union'-2
(λ {(_ , x∈Γ1+) →
x+n#Γ1 (_ , x∈Γ+→x∈Γ (flip (lemma-math' {x1 = Z})) x∈Γ1+)})
x∈Γ2
... | None
= lemma-union'-2
(λ {(_ , x∈Γ1+) →
x+n#Γ1 (_ , x∈Γ+→x∈Γ (flip (lemma-math' {x1 = Z})) x∈Γ1+)})
x∈Γ2
lemma-union'-3 : {A : Set} {m : A → A → A} {Γ1 Γ2 : A ctx} {x n : Nat} {a1 a2 : A} →
(x + n , a1) ∈ Γ1 →
(x , a2) ∈ Γ2 →
(x + n , m a1 a2) ∈ union' m Γ1 Γ2 n
lemma-union'-3 {Γ1 = Γ1} x+n∈Γ1 (InH {Γ = Γ2})
rewrite lookup-cons-2 x+n∈Γ1
= lemma-union'-0 {Γ2 = Γ2} {n = Z} (x,a∈Γ,,x,a {Γ = Γ1})
lemma-union'-3 {Γ1 = Γ1} {n = n} x+n∈Γ1 (InT {Γ = Γ2} {x = x} {s} x∈Γ2)
rewrite +assc {x} {1+ s} {n}
with Γ1 ⦃⦃ s + n ⦄⦄
... | Some a'
= lemma-union'-3 (x∈Γ→x∈Γ+ (flip (lemma-math' {x1 = Z})) x+n∈Γ1) x∈Γ2
... | None
= lemma-union'-3 (x∈Γ→x∈Γ+ (flip (lemma-math' {x1 = Z})) x+n∈Γ1) x∈Γ2
lemma-union'-4 : {A : Set} {m : A → A → A} {Γ1 Γ2 : A ctx} {x n : Nat} →
dom (union' m Γ1 Γ2 n) x →
dom Γ1 x ∨ (Σ[ s ∈ Nat ] (x == n + s ∧ dom Γ2 s))
lemma-union'-4 {Γ2 = []} x∈un = Inl x∈un
lemma-union'-4 {Γ1 = Γ1} {(x1 , a1) :: Γ2} {x} {n} x∈un
with lemma-union'-4 {Γ2 = Γ2} x∈un
... | Inr (s , refl , _ , s∈Γ2)
rewrite +comm {x1} {n}
| ! (n+1+m==1+n+m {n + x1} {s})
| +assc {n} {x1} {1+ s}
| +comm {x1} {1+ s}
| ! (n+1+m==1+n+m {s} {x1})
= Inr (_ , refl , _ , InT s∈Γ2)
... | Inl (_ , x∈Γ1+)
with natEQ x (n + x1)
... | Inl refl = Inr (_ , refl , _ , InH)
... | Inr x≠n+x1
rewrite +comm {x1} {n}
= Inl (_ , x∈Γ+→x∈Γ x≠n+x1 x∈Γ1+)
x,a∈Γ1→x∉Γ2→x,a∈Γ1∪Γ2 : {A : Set} {m : A → A → A} {Γ1 Γ2 : A ctx} {x : Nat} {a : A} →
(x , a) ∈ Γ1 →
x # Γ2 →
(x , a) ∈ union m Γ1 Γ2
x,a∈Γ1→x∉Γ2→x,a∈Γ1∪Γ2 {Γ2 = Γ2} x∈Γ1 x#Γ2
= lemma-union'-1 x∈Γ1 0≤n (tr (λ y → y # Γ2) (! (n+m-n==m 0≤n)) x#Γ2)
x∉Γ1→x,a∈Γ2→x,a∈Γ1∪Γ2 : {A : Set} {m : A → A → A} {Γ1 Γ2 : A ctx} {x : Nat} {a : A} →
x # Γ1 →
(x , a) ∈ Γ2 →
(x , a) ∈ union m Γ1 Γ2
x∉Γ1→x,a∈Γ2→x,a∈Γ1∪Γ2 {Γ1 = Γ1} {x = x} x#Γ1 x∈Γ2
with lemma-union'-2 {n = Z} (tr (λ y → y # Γ1) (! n+Z==n) x#Γ1) x∈Γ2
... | rslt
rewrite n+Z==n {x}
= rslt
x∈Γ1→x∈Γ2→x∈Γ1∪Γ2 : {A : Set} {m : A → A → A} {Γ1 Γ2 : A ctx} {x : Nat} {a1 a2 : A} →
(x , a1) ∈ Γ1 →
(x , a2) ∈ Γ2 →
(x , m a1 a2) ∈ union m Γ1 Γ2
x∈Γ1→x∈Γ2→x∈Γ1∪Γ2 {Γ1 = Γ1} {Γ2} {x} {a1} x∈Γ1 x∈Γ2
with lemma-union'-3 (tr (λ y → (y , a1) ∈ Γ1) (! n+Z==n) x∈Γ1) x∈Γ2
... | rslt
rewrite n+Z==n {x}
= rslt
x∈Γ1∪Γ2→x∈Γ1∨x∈Γ2 : {A : Set} {m : A → A → A} {Γ1 Γ2 : A ctx} {x : Nat} →
dom (union m Γ1 Γ2) x →
dom Γ1 x ∨ dom Γ2 x
x∈Γ1∪Γ2→x∈Γ1∨x∈Γ2 x∈Γ1∪Γ2
with lemma-union'-4 {n = Z} x∈Γ1∪Γ2
x∈Γ1∪Γ2→x∈Γ1∨x∈Γ2 x∈Γ1∪Γ2 | Inl x'∈Γ1 = Inl x'∈Γ1
x∈Γ1∪Γ2→x∈Γ1∨x∈Γ2 x∈Γ1∪Γ2 | Inr (_ , refl , x'∈Γ2) = Inr x'∈Γ2
---- contraction and exchange ----
-- TODO these proofs could use refactoring -
-- contraction should probably make use of ctx-==-dec and
-- exchange is way too long and repetitive
contraction : {A : Set} {Γ : A ctx} {x : Nat} {a a' : A} →
Γ ,, (x , a') ,, (x , a) == Γ ,, (x , a)
contraction {Γ = []} {x} rewrite <dec-refl x = refl
contraction {Γ = (hx , ha) :: t} {x} {a} {a'}
with <dec x hx
... | Inl _ rewrite <dec-refl x = refl
... | Inr (Inl refl) rewrite <dec-refl hx = refl
... | Inr (Inr hx<x)
with <dec x hx
... | Inl x<hx = abort (<antisym x<hx hx<x)
... | Inr (Inl refl) = abort (<antirefl hx<x)
... | Inr (Inr hx<'x)
rewrite diff-proof-irrelevance (n<m→1+n≤m hx<x) (n<m→1+n≤m hx<'x)
| contraction {Γ = t} {diff-1 hx<'x} {a} {a'}
= refl
exchange : {A : Set} {Γ : A ctx} {x1 x2 : Nat} {a1 a2 : A} →
x1 ≠ x2 →
Γ ,, (x1 , a1) ,, (x2 , a2) == Γ ,, (x2 , a2) ,, (x1 , a1)
exchange {A} {Γ} {x1} {x2} {a1} {a2} x1≠x2
= ctx-==-eqv fun
where
fun : (x : Nat) →
(Γ ,, (x1 , a1) ,, (x2 , a2)) ⦃⦃ x ⦄⦄ ==
(Γ ,, (x2 , a2) ,, (x1 , a1)) ⦃⦃ x ⦄⦄
fun x
with natEQ x x1 | natEQ x x2 | ctxindirect Γ x
fun x | Inl refl | Inl refl | _
= abort (x1≠x2 refl)
fun x1 | Inl refl | Inr x≠x2 | Inl (_ , x1∈Γ)
with x,a∈Γ,,x,a {Γ = Γ} {x1} {a1}
... | x∈Γ+1
with x∈Γ→x∈Γ+ {a' = a2} x≠x2 x∈Γ+1 | x,a∈Γ,,x,a {Γ = Γ ,, (x2 , a2)} {x1} {a1}
... | x∈Γ++1 | x∈Γ++2
rewrite lookup-cons-2 x∈Γ++1 | lookup-cons-2 x∈Γ++2 = refl
fun x1 | Inl refl | Inr x≠x2 | Inr x1#Γ
with x,a∈Γ,,x,a {Γ = Γ} {x1} {a1}
... | x∈Γ+1
with x∈Γ→x∈Γ+ {a' = a2} x≠x2 x∈Γ+1 | x,a∈Γ,,x,a {Γ = Γ ,, (x2 , a2)} {x1} {a1}
... | x∈Γ++1 | x∈Γ++2
rewrite lookup-cons-2 x∈Γ++1 | lookup-cons-2 x∈Γ++2 = refl
fun x2 | Inr x≠x1 | Inl refl | Inl (_ , x2∈Γ)
with x,a∈Γ,,x,a {Γ = Γ} {x2} {a2}
... | x∈Γ+2
with x∈Γ→x∈Γ+ {a' = a1} x≠x1 x∈Γ+2 | x,a∈Γ,,x,a {Γ = Γ ,, (x1 , a1)} {x2} {a2}
... | x∈Γ++1 | x∈Γ++2
rewrite lookup-cons-2 x∈Γ++1 | lookup-cons-2 x∈Γ++2 = refl
fun x2 | Inr x≠x1 | Inl refl | Inr x2#Γ
with x,a∈Γ,,x,a {Γ = Γ} {x2} {a2}
... | x∈Γ+2
with x∈Γ→x∈Γ+ {a' = a1} x≠x1 x∈Γ+2 | x,a∈Γ,,x,a {Γ = Γ ,, (x1 , a1)} {x2} {a2}
... | x∈Γ++1 | x∈Γ++2
rewrite lookup-cons-2 x∈Γ++1 | lookup-cons-2 x∈Γ++2 = refl
fun x | Inr x≠x1 | Inr x≠x2 | Inl (_ , x∈Γ)
with x∈Γ→x∈Γ+ {a' = a1} x≠x1 x∈Γ | x∈Γ→x∈Γ+ {a' = a2} x≠x2 x∈Γ
... | x∈Γ+1 | x∈Γ+2
with x∈Γ→x∈Γ+ {a' = a2} x≠x2 x∈Γ+1 | x∈Γ→x∈Γ+ {a' = a1} x≠x1 x∈Γ+2
... | x∈Γ++1 | x∈Γ++2
rewrite lookup-cons-2 x∈Γ++1 | lookup-cons-2 x∈Γ++2 = refl
fun x | Inr x≠x1 | Inr x≠x2 | Inr x#Γ
with x#Γ→x#Γ+ {a' = a1} x≠x1 x#Γ | x#Γ→x#Γ+ {a' = a2} x≠x2 x#Γ
... | x#Γ+1 | x#Γ+2
with x#Γ→x#Γ+ {a' = a2} x≠x2 x#Γ+1 | x#Γ→x#Γ+ {a' = a1} x≠x1 x#Γ+2
... | x#Γ++1 | x#Γ++2
rewrite lookup-cp-1 x#Γ++1 | lookup-cp-1 x#Γ++2 = refl
---- remaining function definitions ----
list⇒ctx = foldl _,,_ ∅
list⇒list-ctx {A} l
= foldl f ∅ (reverse l)
where
f : (List A) ctx → Nat ∧ A → (List A) ctx
f Γ (n , a)
with ctxindirect Γ n
... | Inl (as , n∈Γ)
= Γ ,, (n , a :: as)
... | Inr n#Γ
= Γ ,, (n , a :: [])
ctx⇒values = map π2
|
[GOAL]
a : ℤ
p : ℕ
pp : Fact (Nat.Prime p)
⊢ ↑a = 0 ↔ Int.gcd a ↑p ≠ 1
[PROOFSTEP]
rw [Ne, Int.gcd_comm, Int.gcd_eq_one_iff_coprime, (Nat.prime_iff_prime_int.1 pp.1).coprime_iff_not_dvd,
Classical.not_not, int_cast_zmod_eq_zero_iff_dvd]
|
-----------------------------------------------------------------------------
-- |
-- Module : Berp.Base
-- Copyright : (c) 2010 Bernie Pope
-- License : BSD-style
-- Maintainer : [email protected]
-- Stability : experimental
-- Portability : ghc
--
-- This module exports all the primitive functions which are needed by
-- the compiled programs. Avoid putting extraneous exports in this file
-- because it is imported by all compiled programs.
--
-----------------------------------------------------------------------------
module Berp.Base
( module Complex
, int, none, string, true, false, def, lambda, (=:), stmt, ifThenElse, ret, pass, break
, continue, while, whileElse, for, forElse, ifThen, (@@), tailCall, tuple, read, var
, (%), (+), (-), (*), (.), (/), (==), (<), (>), (<=), (>=), and, or, not, klass, setattr, list, dictionary
, subs, try, tryElse, tryFinally, tryElseFinally, except, exceptDefault, raise, reRaise, raiseFrom
, pure, pureObject, yield, generator, returnGenerator, unaryMinus, unaryPlus, invert, runEval
, interpretStmt, topVar, unpack, setitem, Pat (..), complex, set
, importModule, readGlobal, writeGlobal, readLocal, writeLocal, run, runWithGlobals, importAll )
where
import Prelude ()
import Data.Complex as Complex (Complex (..))
import Berp.Base.TopLevel (importModule, importAll, run, runWithGlobals)
import Berp.Base.Prims
( (=:), stmt, ifThenElse, ret, pass, break, continue, while, whileElse, for, forElse, ifThen,
(@@), tailCall, read, var, setattr, subs, try, tryElse, tryFinally, tryElseFinally, except,
exceptDefault, raise, reRaise, raiseFrom, yield, def, lambda, generator, returnGenerator, topVar, pure,
pureObject, unpack, setitem, Pat (..), readGlobal, writeGlobal,
readLocal, writeLocal )
import Berp.Base.Operators
((%), (+), (-), (*), (.), (/), (==), (<), (>), (<=), (>=), and, or, unaryMinus, unaryPlus, invert, not)
import Berp.Base.Monad (runEval, interpretStmt)
import Berp.Base.Class (klass)
import Berp.Base.StdTypes.Integer (int)
import Berp.Base.StdTypes.Tuple (tuple)
import Berp.Base.StdTypes.Bool (true, false)
import Berp.Base.StdTypes.String (string)
import Berp.Base.StdTypes.None (none)
import Berp.Base.StdTypes.List (list)
import Berp.Base.StdTypes.Dictionary (dictionary)
import Berp.Base.StdTypes.Set (set)
import Berp.Base.StdTypes.Complex (complex)
|
{-# OPTIONS --without-K --rewriting #-}
{-
This file contains a bunch of basic stuff which is needed early.
Maybe it should be organised better.
-}
module lib.Base where
{- Universes and typing
Agda has explicit universe polymorphism, which means that there is an actual
type of universe levels on which you can quantify. This type is called [ULevel]
and comes equipped with the following operations:
- [lzero] : [ULevel] (in order to have at least one universe)
- [lsucc] : [ULevel → ULevel] (the [i]th universe level is a term in the
[lsucc i]th universe)
- [lmax] : [ULevel → ULevel → ULevel] (in order to type dependent products (where
the codomain is in a uniform universe level)
This type is postulated below and linked to Agda’s universe polymorphism
mechanism via the built-in module Agda.Primitive (it’s the new way).
In plain Agda, the [i]th universe is called [Set i], which is not a very good
name from the point of view of HoTT, so we define [Type] as a synonym of [Set]
and [Set] should never be used again.
-}
open import Agda.Primitive public using (lzero)
renaming (Level to ULevel; lsuc to lsucc; _⊔_ to lmax)
Type : (i : ULevel) → Set (lsucc i)
Type i = Set i
Type₀ = Type lzero
Type0 = Type lzero
Type₁ = Type (lsucc lzero)
Type1 = Type (lsucc lzero)
{-
There is no built-in or standard way to coerce an ambiguous term to a given type
(like [u : A] in ML), the symbol [:] is reserved, and the Unicode [∶] is really
a bad idea.
So we’re using the symbol [_:>_], which has the advantage that it can micmic
Coq’s [u = v :> A].
-}
of-type : ∀ {i} (A : Type i) (u : A) → A
of-type A u = u
infix 40 of-type
syntax of-type A u = u :> A
{- Instance search -}
⟨⟩ : ∀ {i} {A : Type i} {{a : A}} → A
⟨⟩ {{a}} = a
{- Identity type
The identity type is called [Path] and [_==_] because the symbol [=] is
reserved in Agda.
The constant path is [idp]. Note that all arguments of [idp] are implicit.
-}
infix 30 _==_
data _==_ {i} {A : Type i} (a : A) : A → Type i where
idp : a == a
Path = _==_
{-# BUILTIN EQUALITY _==_ #-}
{- Paulin-Mohring J rule
At the time I’m writing this (July 2013), the identity type is somehow broken in
Agda dev, it behaves more or less as the Martin-Löf identity type instead of
behaving like the Paulin-Mohring identity type.
So here is the Paulin-Mohring J rule -}
J : ∀ {i j} {A : Type i} {a : A} (B : (a' : A) (p : a == a') → Type j) (d : B a idp)
{a' : A} (p : a == a') → B a' p
J B d idp = d
J' : ∀ {i j} {A : Type i} {a : A} (B : (a' : A) (p : a' == a) → Type j) (d : B a idp)
{a' : A} (p : a' == a) → B a' p
J' B d idp = d
{- Rewriting
This is a new pragma added to Agda to help create higher inductive types.
-}
infix 30 _↦_
postulate -- HIT
_↦_ : ∀ {i} {A : Type i} → A → A → Type i
{-# BUILTIN REWRITE _↦_ #-}
{- Unit type
The unit type is defined as record so that we also get the η-rule definitionally.
-}
record ⊤ : Type₀ where
instance constructor unit
Unit = ⊤
{-# BUILTIN UNIT ⊤ #-}
{- Dependent paths
The notion of dependent path is a very important notion.
If you have a dependent type [B] over [A], a path [p : x == y] in [A] and two
points [u : B x] and [v : B y], there is a type [u == v [ B ↓ p ]] of paths from
[u] to [v] lying over the path [p].
By definition, if [p] is a constant path, then [u == v [ B ↓ p ]] is just an
ordinary path in the fiber.
-}
PathOver : ∀ {i j} {A : Type i} (B : A → Type j)
{x y : A} (p : x == y) (u : B x) (v : B y) → Type j
PathOver B idp u v = (u == v)
infix 30 PathOver
syntax PathOver B p u v =
u == v [ B ↓ p ]
{- Ap, coe and transport
Given two fibrations over a type [A], a fiberwise map between the two fibrations
can be applied to any dependent path in the first fibration ([ap↓]).
As a special case, when [A] is [Unit], we find the familiar [ap] ([ap] is
defined in terms of [ap↓] because it shouldn’t change anything for the user
and this is helpful in some rare cases)
-}
ap : ∀ {i j} {A : Type i} {B : Type j} (f : A → B) {x y : A}
→ (x == y → f x == f y)
ap f idp = idp
ap↓ : ∀ {i j k} {A : Type i} {B : A → Type j} {C : A → Type k}
(g : {a : A} → B a → C a) {x y : A} {p : x == y}
{u : B x} {v : B y}
→ (u == v [ B ↓ p ] → g u == g v [ C ↓ p ])
ap↓ g {p = idp} p = ap g p
{-
[apd↓] is defined in lib.PathOver. Unlike [ap↓] and [ap], [apd] is not
definitionally a special case of [apd↓]
-}
apd : ∀ {i j} {A : Type i} {B : A → Type j} (f : (a : A) → B a) {x y : A}
→ (p : x == y) → f x == f y [ B ↓ p ]
apd f idp = idp
{-
An equality between types gives two maps back and forth
-}
coe : ∀ {i} {A B : Type i} (p : A == B) → A → B
coe idp x = x
coe! : ∀ {i} {A B : Type i} (p : A == B) → B → A
coe! idp x = x
{-
The operations of transport forward and backward are defined in terms of [ap]
and [coe], because this is more convenient in practice.
-}
transport : ∀ {i j} {A : Type i} (B : A → Type j) {x y : A} (p : x == y)
→ (B x → B y)
transport B p = coe (ap B p)
transport! : ∀ {i j} {A : Type i} (B : A → Type j) {x y : A} (p : x == y)
→ (B y → B x)
transport! B p = coe! (ap B p)
{- Π-types
Shorter notation for Π-types.
-}
Π : ∀ {i j} (A : Type i) (P : A → Type j) → Type (lmax i j)
Π A P = (x : A) → P x
{- Σ-types
Σ-types are defined as a record so that we have definitional η.
-}
infixr 60 _,_
record Σ {i j} (A : Type i) (B : A → Type j) : Type (lmax i j) where
constructor _,_
field
fst : A
snd : B fst
open Σ public
pair= : ∀ {i j} {A : Type i} {B : A → Type j}
{a a' : A} (p : a == a') {b : B a} {b' : B a'}
(q : b == b' [ B ↓ p ])
→ (a , b) == (a' , b')
pair= idp q = ap (_ ,_) q
pair×= : ∀ {i j} {A : Type i} {B : Type j}
{a a' : A} (p : a == a') {b b' : B} (q : b == b')
→ (a , b) == (a' , b')
pair×= idp q = pair= idp q
{- Empty type
We define the eliminator of the empty type using an absurd pattern. Given that
absurd patterns are not consistent with HIT, we will not use empty patterns
anymore after that.
-}
data ⊥ : Type₀ where
Empty = ⊥
⊥-elim : ∀ {i} {P : ⊥ → Type i} → ((x : ⊥) → P x)
⊥-elim ()
Empty-elim = ⊥-elim
{- Negation and disequality -}
¬ : ∀ {i} (A : Type i) → Type i
¬ A = A → ⊥
_≠_ : ∀ {i} {A : Type i} → (A → A → Type i)
x ≠ y = ¬ (x == y)
{- Natural numbers -}
data ℕ : Type₀ where
O : ℕ
S : (n : ℕ) → ℕ
Nat = ℕ
{-# BUILTIN NATURAL ℕ #-}
{- Lifting to a higher universe level
The operation of lifting enjoys both β and η definitionally.
It’s a bit annoying to use, but it’s not used much (for now).
-}
record Lift {i j} (A : Type i) : Type (lmax i j) where
instance constructor lift
field
lower : A
open Lift public
{- Equational reasoning
Equational reasoning is a way to write readable chains of equalities.
The idea is that you can write the following:
t : a == e
t = a =⟨ p ⟩
b =⟨ q ⟩
c =⟨ r ⟩
d =⟨ s ⟩
e ∎
where [p] is a path from [a] to [b], [q] is a path from [b] to [c], and so on.
You often have to apply some equality in some context, for instance [p] could be
[ap ctx thm] where [thm] is the interesting theorem used to prove that [a] is
equal to [b], and [ctx] is the context.
In such cases, you can use instead [thm |in-ctx ctx]. The advantage is that
[ctx] is usually boring whereas the first word of [thm] is the most interesting
part.
_=⟨_⟩ is not definitionally the same thing as concatenation of paths _∙_ because
we haven’t defined concatenation of paths yet, and also you probably shouldn’t
reason on paths constructed with equational reasoning.
If you do want to reason on paths constructed with equational reasoning, check
out lib.types.PathSeq instead.
-}
infixr 10 _=⟨_⟩_
infix 15 _=∎
_=⟨_⟩_ : ∀ {i} {A : Type i} (x : A) {y z : A} → x == y → y == z → x == z
_ =⟨ idp ⟩ idp = idp
_=∎ : ∀ {i} {A : Type i} (x : A) → x == x
_ =∎ = idp
infixl 40 ap
syntax ap f p = p |in-ctx f
{- Various basic functions and function operations
The identity function on a type [A] is [idf A] and the constant function at some
point [b] is [cst b].
Composition of functions ([_∘_]) can handle dependent functions.
-}
idf : ∀ {i} (A : Type i) → (A → A)
idf A = λ x → x
cst : ∀ {i j} {A : Type i} {B : Type j} (b : B) → (A → B)
cst b = λ _ → b
infixr 80 _∘_
_∘_ : ∀ {i j k} {A : Type i} {B : A → Type j} {C : (a : A) → (B a → Type k)}
→ (g : {a : A} → Π (B a) (C a)) → (f : Π A B) → Π A (λ a → C a (f a))
g ∘ f = λ x → g (f x)
-- Application
infixr 0 _$_
_$_ : ∀ {i j} {A : Type i} {B : A → Type j} → (∀ x → B x) → (∀ x → B x)
f $ x = f x
-- (Un)curryfication
curry : ∀ {i j k} {A : Type i} {B : A → Type j} {C : Σ A B → Type k}
→ (∀ s → C s) → (∀ x y → C (x , y))
curry f x y = f (x , y)
uncurry : ∀ {i j k} {A : Type i} {B : A → Type j} {C : ∀ x → B x → Type k}
→ (∀ x y → C x y) → (∀ s → C (fst s) (snd s))
uncurry f (x , y) = f x y
{- Truncation levels
The type of truncation levels is isomorphic to the type of natural numbers but
"starts at -2".
-}
data TLevel : Type₀ where
⟨-2⟩ : TLevel
S : (n : TLevel) → TLevel
ℕ₋₂ = TLevel
⟨_⟩₋₂ : ℕ → ℕ₋₂
⟨ O ⟩₋₂ = ⟨-2⟩
⟨ S n ⟩₋₂ = S ⟨ n ⟩₋₂
{- Coproducts and case analysis -}
data Coprod {i j} (A : Type i) (B : Type j) : Type (lmax i j) where
inl : A → Coprod A B
inr : B → Coprod A B
infixr 80 _⊔_
_⊔_ = Coprod
Dec : ∀ {i} (P : Type i) → Type i
Dec P = P ⊔ ¬ P
{-
Pointed types and pointed maps.
[A ⊙→ B] was pointed, but it was never used as a pointed type.
-}
infix 60 ⊙[_,_]
record Ptd (i : ULevel) : Type (lsucc i) where
constructor ⊙[_,_]
field
de⊙ : Type i
pt : de⊙
open Ptd public
ptd : ∀ {i} (A : Type i) → A → Ptd i
ptd = ⊙[_,_]
ptd= : ∀ {i} {A A' : Type i} (p : A == A')
{a : A} {a' : A'} (q : a == a' [ idf _ ↓ p ])
→ ⊙[ A , a ] == ⊙[ A' , a' ]
ptd= idp q = ap ⊙[ _ ,_] q
Ptd₀ = Ptd lzero
infixr 0 _⊙→_
_⊙→_ : ∀ {i j} → Ptd i → Ptd j → Type (lmax i j)
⊙[ A , a₀ ] ⊙→ ⊙[ B , b₀ ] = Σ (A → B) (λ f → f a₀ == b₀)
⊙idf : ∀ {i} (X : Ptd i) → X ⊙→ X
⊙idf X = (λ x → x) , idp
⊙cst : ∀ {i j} {X : Ptd i} {Y : Ptd j} → X ⊙→ Y
⊙cst {Y = Y} = (λ x → pt Y) , idp
{-
Used in a hack to make HITs maybe consistent. This is just a parametrized unit
type (positively)
-}
data Phantom {i} {A : Type i} (a : A) : Type₀ where
phantom : Phantom a
{-
Numeric literal overloading
This enables writing numeric literals
-}
record FromNat {i} (A : Type i) : Type (lsucc i) where
field
in-range : ℕ → Type i
read : ∀ n → ⦃ _ : in-range n ⦄ → A
open FromNat ⦃...⦄ public using () renaming (read to from-nat)
{-# BUILTIN FROMNAT from-nat #-}
record FromNeg {i} (A : Type i) : Type (lsucc i) where
field
in-range : ℕ → Type i
read : ∀ n → ⦃ _ : in-range n ⦄ → A
open FromNeg ⦃...⦄ public using () renaming (read to from-neg)
{-# BUILTIN FROMNEG from-neg #-}
instance
ℕ-reader : FromNat ℕ
FromNat.in-range ℕ-reader _ = ⊤
FromNat.read ℕ-reader n = n
TLevel-reader : FromNat TLevel
FromNat.in-range TLevel-reader _ = ⊤
FromNat.read TLevel-reader n = S (S ⟨ n ⟩₋₂)
TLevel-neg-reader : FromNeg TLevel
FromNeg.in-range TLevel-neg-reader O = ⊤
FromNeg.in-range TLevel-neg-reader 1 = ⊤
FromNeg.in-range TLevel-neg-reader 2 = ⊤
FromNeg.in-range TLevel-neg-reader (S (S (S _))) = ⊥
FromNeg.read TLevel-neg-reader O = S (S ⟨-2⟩)
FromNeg.read TLevel-neg-reader 1 = S ⟨-2⟩
FromNeg.read TLevel-neg-reader 2 = ⟨-2⟩
FromNeg.read TLevel-neg-reader (S (S (S _))) ⦃()⦄
|
State Before: J : Type v
C : Type u
inst✝² : Category C
D : Type u
inst✝¹ : Category D
G : D ⥤ C
inst✝ : IsRightAdjoint G
⊢ SolutionSetCondition G State After: J : Type v
C : Type u
inst✝² : Category C
D : Type u
inst✝¹ : Category D
G : D ⥤ C
inst✝ : IsRightAdjoint G
A : C
⊢ ∃ ι B f, ∀ (X : D) (h : A ⟶ G.obj X), ∃ i g, f i ≫ G.map g = h Tactic: intro A State Before: J : Type v
C : Type u
inst✝² : Category C
D : Type u
inst✝¹ : Category D
G : D ⥤ C
inst✝ : IsRightAdjoint G
A : C
⊢ ∃ ι B f, ∀ (X : D) (h : A ⟶ G.obj X), ∃ i g, f i ≫ G.map g = h State After: J : Type v
C : Type u
inst✝² : Category C
D : Type u
inst✝¹ : Category D
G : D ⥤ C
inst✝ : IsRightAdjoint G
A : C
⊢ ∀ (X : D) (h : A ⟶ G.obj X), ∃ i g, (fun x => (Adjunction.ofRightAdjoint G).unit.app A) i ≫ G.map g = h Tactic: refine'
⟨PUnit, fun _ => (leftAdjoint G).obj A, fun _ => (Adjunction.ofRightAdjoint G).unit.app A, _⟩ State Before: J : Type v
C : Type u
inst✝² : Category C
D : Type u
inst✝¹ : Category D
G : D ⥤ C
inst✝ : IsRightAdjoint G
A : C
⊢ ∀ (X : D) (h : A ⟶ G.obj X), ∃ i g, (fun x => (Adjunction.ofRightAdjoint G).unit.app A) i ≫ G.map g = h State After: J : Type v
C : Type u
inst✝² : Category C
D : Type u
inst✝¹ : Category D
G : D ⥤ C
inst✝ : IsRightAdjoint G
A : C
B : D
h : A ⟶ G.obj B
⊢ ∃ i g, (fun x => (Adjunction.ofRightAdjoint G).unit.app A) i ≫ G.map g = h Tactic: intro B h State Before: J : Type v
C : Type u
inst✝² : Category C
D : Type u
inst✝¹ : Category D
G : D ⥤ C
inst✝ : IsRightAdjoint G
A : C
B : D
h : A ⟶ G.obj B
⊢ ∃ i g, (fun x => (Adjunction.ofRightAdjoint G).unit.app A) i ≫ G.map g = h State After: J : Type v
C : Type u
inst✝² : Category C
D : Type u
inst✝¹ : Category D
G : D ⥤ C
inst✝ : IsRightAdjoint G
A : C
B : D
h : A ⟶ G.obj B
⊢ (fun x => (Adjunction.ofRightAdjoint G).unit.app A) PUnit.unit ≫
G.map (↑(Adjunction.homEquiv (Adjunction.ofRightAdjoint G) A B).symm h) =
h Tactic: refine' ⟨PUnit.unit, ((Adjunction.ofRightAdjoint G).homEquiv _ _).symm h, _⟩ State Before: J : Type v
C : Type u
inst✝² : Category C
D : Type u
inst✝¹ : Category D
G : D ⥤ C
inst✝ : IsRightAdjoint G
A : C
B : D
h : A ⟶ G.obj B
⊢ (fun x => (Adjunction.ofRightAdjoint G).unit.app A) PUnit.unit ≫
G.map (↑(Adjunction.homEquiv (Adjunction.ofRightAdjoint G) A B).symm h) =
h State After: no goals Tactic: rw [← Adjunction.homEquiv_unit, Equiv.apply_symm_apply] |
//=============================================================================
// XOP SPECIFIC RESOURCE : XOPC 1100 - XOP EXTERNAL OPERATION DEFINITION
//=============================================================================
1100 XOPC
BEGIN
//-- kSORT_X2
"sort_x2\0",
XOPOp,
//-- kSORT_X3
"sort_x3\0",
XOPOp,
//-- kSORT_X4
"sort_x4\0",
XOPOp,
0,
END
|
module Fail.BadBuiltinImport where
import Agda.Builtin.Nat
{-# FOREIGN AGDA2HS
import RandomModule (Natural)
import AlsoNotRight (foo, Natural(..))
import AsConstructor (D(Natural))
#-}
|
mutable struct Explicit <: TuningStrategy end
# models! returns all available models in the range at once:
MLJTuning.models!(tuning::Explicit, model, history::Nothing,
state, verbosity) = state
MLJTuning.models!(tuning::Explicit, model, history,
state, verbosity) = state[length(history) + 1:end]
function MLJTuning.default_n(tuning::Explicit, range)
try
length(range)
catch MethodError
10
end
end
|
From discprob.basic Require Import base order.
From discprob.prob Require Import prob countable finite stochastic_order.
From discprob.monad.finite Require Import monad.
From mathcomp Require Import ssreflect ssrbool ssrfun eqtype ssrnat seq div choice fintype.
From mathcomp Require Import tuple finfun bigop prime binomial finset.
Require Import Reals Fourier Psatz Omega.
Definition output {A} (p: ldist A) := [seq i.2 | i <- outcomes p].
Definition mspec {A: eqType} (m: ldist A) (P: A → Prop) :=
∀ y, y \in output m → P y.
Lemma mspec_mret {A: eqType} (x: A) (P: A → Prop):
P x → mspec (mret x) P.
Proof.
intros HP y. rewrite /mret/dist_ret/output//= mem_seq1.
move /eqP => -> //.
Qed.
Lemma mspec_conseq {A: eqType} (m: ldist A) (P Q: A → Prop):
mspec m P → (∀ a, P a → Q a) → mspec m Q.
Proof.
intros HP HPQ a Hin. apply HPQ; eauto.
Qed.
Lemma output_mbind_in {A B: eqType} b (m: ldist A) (f: A → ldist B):
(b \in output (x ← m; f x)) →
∃ r a, ((r, a)) \in outcomes m ∧ (b \in output (f a)).
Proof.
rewrite /mbind/output.
move /mapP => [[r b' Hin]] => //= ->.
eapply (in_ldist_bind _ r b' m) in Hin as (r'&r''&c'&Hin1&Hin2&Heq).
exists r'', c'; split; auto. apply /mapP; eauto.
Qed.
Lemma mspec_mbind {A B: eqType} (f: A → ldist B) m (P: A → Prop) (Q: B → Prop):
mspec m P →
(∀ a, P a → mspec (f a) Q) →
mspec (mbind f m) Q.
Proof.
intros Hinput Hbody b Hin.
edestruct (output_mbind_in b m f) as (r&?&Hin'&Hout); eauto.
eapply Hbody; eauto. apply Hinput; eauto.
apply /mapP; eauto.
Qed.
Tactic Notation "tbind" open_constr(P) :=
match goal with
| [ |- mspec (mbind ?f ?m) ?Q ] =>
intros; eapply (@mspec_mbind _ _ f m P); auto
end.
Lemma fun_to_mspec {A: eqType} (m: ldist A) (P: A → Prop):
mspec m P → (∀ x, P ((rvar_of_ldist m) x)).
Proof.
rewrite /mspec/output => Hspec /= x.
apply /Hspec/mem_nth. rewrite size_map. inversion x. done.
Qed.
Lemma mspec_range_eq_dist {A: eqType} (m1 m2: ldist A) (P: pred A):
mspec m1 P →
mspec m2 P →
(∀ a, P a → pr_eq (rvar_of_ldist m1) a = pr_eq (rvar_of_ldist m2) a) →
eq_dist (rvar_of_ldist m1) (rvar_of_ldist m2).
Proof.
intros Hm1 Hm2 Hin_eq a.
specialize (Hm1 a). rewrite //= in Hm1.
specialize (Hm2 a). rewrite //= in Hm2.
specialize (Hin_eq a).
case_eq (P a).
- intros HP. apply Hin_eq. auto.
- intros HnP. transitivity 0; last symmetry.
* apply pr_img_nin. intros Hin. move /negP in HnP. apply HnP.
apply Hm1. by rewrite /output -mem_undup -img_rvar_of_ldist'.
* apply pr_img_nin. intros Hin. move /negP in HnP. apply HnP.
apply Hm2. by rewrite /output -mem_undup -img_rvar_of_ldist'.
Qed.
Lemma mspec_eq_dist_ldist_bind_ext {A B: eqType} m P (f g: A → ldist B):
mspec m P →
(∀ a, P a → eq_dist (rvar_of_ldist (f a)) (rvar_of_ldist (g a))) →
eq_dist (rvar_of_ldist (mbind f m)) (rvar_of_ldist (mbind g m)).
Proof.
intros Hspec.
rewrite /eq_dist => Heq b. rewrite ?pr_mbind_ldist1.
eapply eq_bigr => a _. rewrite Heq; first done.
apply Hspec. rewrite /output.
rewrite -mem_undup -img_rvar_of_ldist'.
destruct a as (x&Hin) => //=.
Qed.
Lemma Ex_bound {A : eqType} (X: ldist A) f r:
mspec X (λ x, f x <= r) →
Ex (rvar_comp (rvar_of_ldist X) f) <= r.
Proof.
intros Hmspec. rewrite Ex_fin_comp.
eapply Rle_trans.
{
eapply Rle_bigr => i _.
apply Rmult_le_compat_l; last apply Hmspec.
- apply Rge_le, ge_pr_0.
- destruct i as (?&?) => //=. rewrite /output -mem_undup -img_rvar_of_ldist' //.
}
rewrite -big_distrl //= (pr_sum_all (rvar_of_ldist X)) Rmult_1_l. fourier.
Qed. |
#
# GreenMachine: The GreenMachine: Hyperbolic Groups in GAP
#
# Implementations
#
InstallGlobalFunction( GreenMachine_Example,
function()
Print( "This is a placeholder function, replace it with your own code.\n" );
end );
|
[STATEMENT]
lemma BIT_c2: assumes A: "x \<noteq> y"
"init \<in> {[x,y],[y,x]}"
"v \<in> lang (seq [Atom x, Times (Atom y) (Atom x), Star (Times (Atom y) (Atom x)), Atom x])"
shows "T\<^sub>p_on_rand' BIT (type0 init x y) v = 0.75 * (length v - 1) - 0.5" (is ?T)
and "config'_rand BIT (type0 init x y) v = (type0 init x y)" (is ?C)
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. T\<^sub>p_on_rand' BIT (type0 init x y) v = 75 / 10\<^sup>2 * real (length v - 1) - 5 / 10 &&& Partial_Cost_Model.config'_rand BIT (type0 init x y) v = type0 init x y
[PROOF STEP]
proof -
[PROOF STATE]
proof (state)
goal (2 subgoals):
1. T\<^sub>p_on_rand' BIT (type0 init x y) v = 75 / 10\<^sup>2 * real (length v - 1) - 5 / 10
2. Partial_Cost_Model.config'_rand BIT (type0 init x y) v = type0 init x y
[PROOF STEP]
from assms(3)
[PROOF STATE]
proof (chain)
picking this:
v \<in> lang (seq [Atom x, Times (Atom y) (Atom x), Star (Times (Atom y) (Atom x)), Atom x])
[PROOF STEP]
obtain w where vw: "v = [x]@w" and
w: "w \<in> lang (seq [Times (Atom y) (Atom x), Star (Times (Atom y) (Atom x)), Atom x])"
[PROOF STATE]
proof (prove)
using this:
v \<in> lang (seq [Atom x, Times (Atom y) (Atom x), Star (Times (Atom y) (Atom x)), Atom x])
goal (1 subgoal):
1. (\<And>w. \<lbrakk>v = [x] @ w; w \<in> lang (seq [Times (Atom y) (Atom x), Star (Times (Atom y) (Atom x)), Atom x])\<rbrakk> \<Longrightarrow> thesis) \<Longrightarrow> thesis
[PROOF STEP]
by(auto)
[PROOF STATE]
proof (state)
this:
v = [x] @ w
w \<in> lang (seq [Times (Atom y) (Atom x), Star (Times (Atom y) (Atom x)), Atom x])
goal (2 subgoals):
1. T\<^sub>p_on_rand' BIT (type0 init x y) v = 75 / 10\<^sup>2 * real (length v - 1) - 5 / 10
2. Partial_Cost_Model.config'_rand BIT (type0 init x y) v = type0 init x y
[PROOF STEP]
have c1: "config'_rand BIT (type0 init x y) [x] = type0 init x y"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. Partial_Cost_Model.config'_rand BIT (type0 init x y) [x] = type0 init x y
[PROOF STEP]
using assms
[PROOF STATE]
proof (prove)
using this:
x \<noteq> y
init \<in> {[x, y], [y, x]}
v \<in> lang (seq [Atom x, Times (Atom y) (Atom x), Star (Times (Atom y) (Atom x)), Atom x])
goal (1 subgoal):
1. Partial_Cost_Model.config'_rand BIT (type0 init x y) [x] = type0 init x y
[PROOF STEP]
by(auto simp add: oneBIT_step)
[PROOF STATE]
proof (state)
this:
Partial_Cost_Model.config'_rand BIT (type0 init x y) [x] = type0 init x y
goal (2 subgoals):
1. T\<^sub>p_on_rand' BIT (type0 init x y) v = 75 / 10\<^sup>2 * real (length v - 1) - 5 / 10
2. Partial_Cost_Model.config'_rand BIT (type0 init x y) v = type0 init x y
[PROOF STEP]
have t1: "T\<^sub>p_on_rand' BIT (type0 init x y) [x] = 0"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. T\<^sub>p_on_rand' BIT (type0 init x y) [x] = 0
[PROOF STEP]
using assms
[PROOF STATE]
proof (prove)
using this:
x \<noteq> y
init \<in> {[x, y], [y, x]}
v \<in> lang (seq [Atom x, Times (Atom y) (Atom x), Star (Times (Atom y) (Atom x)), Atom x])
goal (1 subgoal):
1. T\<^sub>p_on_rand' BIT (type0 init x y) [x] = 0
[PROOF STEP]
by(auto simp add: costBIT)
[PROOF STATE]
proof (state)
this:
T\<^sub>p_on_rand' BIT (type0 init x y) [x] = 0
goal (2 subgoals):
1. T\<^sub>p_on_rand' BIT (type0 init x y) v = 75 / 10\<^sup>2 * real (length v - 1) - 5 / 10
2. Partial_Cost_Model.config'_rand BIT (type0 init x y) v = type0 init x y
[PROOF STEP]
show "T\<^sub>p_on_rand' BIT (type0 init x y) v
= 0.75 * (length v - 1) - 0.5"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. T\<^sub>p_on_rand' BIT (type0 init x y) v = 75 / 10\<^sup>2 * real (length v - 1) - 5 / 10
[PROOF STEP]
unfolding vw
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. T\<^sub>p_on_rand' BIT (type0 init x y) ([x] @ w) = 75 / 10\<^sup>2 * real (length ([x] @ w) - 1) - 5 / 10
[PROOF STEP]
apply(simp only: T_on_rand'_append c1 BIT_c[OF assms(1,2) w] t1)
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. 0 + (75 / 10\<^sup>2 * real (length w) - 5 / 10) = 75 / 10\<^sup>2 * real (length ([x] @ w) - 1) - 5 / 10
[PROOF STEP]
by (simp)
[PROOF STATE]
proof (state)
this:
T\<^sub>p_on_rand' BIT (type0 init x y) v = 75 / 10\<^sup>2 * real (length v - 1) - 5 / 10
goal (1 subgoal):
1. Partial_Cost_Model.config'_rand BIT (type0 init x y) v = type0 init x y
[PROOF STEP]
show "config'_rand BIT (type0 init x y) v = (type0 init x y)"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. Partial_Cost_Model.config'_rand BIT (type0 init x y) v = type0 init x y
[PROOF STEP]
unfolding vw
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. Partial_Cost_Model.config'_rand BIT (type0 init x y) ([x] @ w) = type0 init x y
[PROOF STEP]
by(simp only: config'_rand_append c1 BIT_c[OF assms(1,2) w])
[PROOF STATE]
proof (state)
this:
Partial_Cost_Model.config'_rand BIT (type0 init x y) v = type0 init x y
goal:
No subgoals!
[PROOF STEP]
qed |
using H5PLEXOS
zipfiles = ["Model Base_8200 Solution.zip",
"Model Base_8200 NoInterval Solution.zip",
"Model DAY_AHEAD_NO_TX Solution.zip",
"Model DAY_AHEAD_NO_TX Stochastic Solution.zip",
"Model DAY_AHEAD_ALL_TX Solution.zip",
"Model DA_h2hybrid_SCUC_select_lines_Test_1day Solution.zip",
"Model DAY_AHEAD_PRAS Solution.zip"]
zipfiles = vcat(zipfiles, ["Model Test$i Solution.zip" for i in 1:9])
testfolder = dirname(@__FILE__) * "/"
for zipfile in zipfiles
println(zipfile)
zippath = testfolder * zipfile
# TODO: Actually test things
process(zippath, replace(zippath, ".zip" => ".h5"))
end
|
lemma space_in_measure_of[simp]: "\<Omega> \<in> sets (measure_of \<Omega> M \<mu>)" |
%-------------------------------------------------------
% DOCUMENT CONFIGURATIONS
%-------------------------------------------------------
%-------------------------------------------------------
% START OF IMPLEMENTATIONS
%-------------------------------------------------------
\section{Implementations}
%-------------------------------------------------------
% COMMUNICATION SECTION
%-------------------------------------------------------
\input{./implementations/consensus.tex}
%-------------------------------------------------------
% WEBGATE SECTION
%-------------------------------------------------------
\input{./implementations/webgate.tex}
\clearpage
%-------------------------------------------------------
% END OF PREFACE
%------------------------------------------------------- |
import data.finset
import algebra.big_operators
import data.fintype
open finset
lemma disjoint_equiv_classes (α : Type*) [fintype α] [h : setoid α] [decidable_rel h.r] [decidable_eq α]:
∀ x ∈ @finset.univ (quotient h) _, ∀ y ∈ @finset.univ (quotient h) _, x ≠ y →
(finset.filter (λ b : α, ⟦b⟧ = x) finset.univ) ∩ (finset.filter (λ b : α, ⟦b⟧ = y) finset.univ) = ∅ :=
begin
intros x hx y hx hxy,
rw ←filter_and,
rw ←filter_false,
congr,funext,
suffices : ⟦a⟧ = x ∧ ⟦a⟧ = y → false,
simp [this],
intro H,cases H with Hx Hy, rw Hx at Hy,apply hxy,exact Hy,
intro x,show decidable false, refine is_false id
end
lemma sum_equiv_classes {α β : Type*} [add_comm_monoid α] [fintype β] (f : β → α)
(h : setoid β) [decidable_rel h.r] [decidable_eq β] :
finset.sum (@finset.univ β _) f = finset.sum finset.univ
(λ (x : quotient h), finset.sum (filter (λ b : β, ⟦b⟧ = x) finset.univ) f) :=
begin
rw ←finset.sum_bind (disjoint_equiv_classes β),
congr,symmetry,
rw eq_univ_iff_forall,
intro b,
rw mem_bind,
existsi ⟦b⟧,
existsi (mem_univ ⟦b⟧),
rw mem_filter,
split,exact mem_univ b,refl
end
-- now let's define the equivalence relation on s by a is related to a and g(a) (and that's it)
definition gbar {β : Type*} {s : finset β} (g : Π a ∈ s, β)
(h₄ : ∀ a ha, g a ha ∈ s) :
(↑s : set β) → (↑s : set β) :=
--λ ⟨a,ha⟩,⟨g a ha,h₄ a ha⟩
λ x,⟨g x.val x.property, h₄ x.val x.property⟩
definition gbar_involution {β : Type*} {s : finset β} (g : Π a ∈ s, β)
(h₄ : ∀ a ha, g a ha ∈ s) (h₅ : ∀ a ha, g (g a ha) (h₄ a ha) = a) :
let gb := gbar g h₄ in
∀ x, gb (gb x) = x :=
begin
intros gb x,
apply subtype.eq,
have H := h₅ x.val x.property,
rw ←H,refl,
end
private definition eqv {β : Type*} {s : finset β} (g : Π a ∈ s, β)
(h₄ : ∀ a ha, g a ha ∈ s) (h₅ : ∀ a ha, g (g a ha) (h₄ a ha) = a)
(a₁ a₂ : (↑s : set β)) : Prop :=
let gb := gbar g h₄ in a₁ = a₂ ∨ a₁ = gb a₂
private theorem eqv.refl {β : Type*} {s : finset β} (g : Π a ∈ s, β)
(h₄ : ∀ a ha, g a ha ∈ s) (h₅ : ∀ a ha, g (g a ha) (h₄ a ha) = a) :
∀ a : (↑s : set β), eqv g h₄ h₅ a a := λ a, or.inl rfl
private theorem eqv.symm {β : Type*} {s : finset β} (g : Π a ∈ s, β)
(h₄ : ∀ a ha, g a ha ∈ s) (h₅ : ∀ a ha, g (g a ha) (h₄ a ha) = a) :
∀ a₁ a₂ : (↑s : set β), eqv g h₄ h₅ a₁ a₂ → eqv g h₄ h₅ a₂ a₁
| a₁ a₂ (or.inl h) := or.inl h.symm
| a₁ a₂ (or.inr h) := or.inr (by rw h;exact (gbar_involution g h₄ h₅ a₂).symm)
private theorem eqv.trans {β : Type*} {s : finset β} (g : Π a ∈ s, β)
(h₄ : ∀ a ha, g a ha ∈ s) (h₅ : ∀ a ha, g (g a ha) (h₄ a ha) = a) :
∀ a₁ a₂ a₃: (↑s : set β), eqv g h₄ h₅ a₁ a₂ → eqv g h₄ h₅ a₂ a₃ → eqv g h₄ h₅ a₁ a₃
| a₁ a₂ a₃ (or.inl h12) (or.inl h23) := or.inl (eq.trans h12 h23)
| a₁ a₂ a₃ (or.inl h12) (or.inr h23) := or.inr (h12.symm ▸ h23)
| a₁ a₂ a₃ (or.inr h12) (or.inl h23) := or.inr (h23 ▸ h12)
| a₁ a₂ a₃ (or.inr h12) (or.inr h23) := or.inl (by rw [h12,h23];exact (gbar_involution g h₄ h₅ a₃))
private theorem is_equivalence {β : Type*} {s : finset β} (g : Π a ∈ s, β)
(h₄ : ∀ a ha, g a ha ∈ s) (h₅ : ∀ a ha, g (g a ha) (h₄ a ha) = a)
: equivalence (eqv g h₄ h₅) := ⟨eqv.refl g h₄ h₅,eqv.symm g h₄ h₅,eqv.trans g h₄ h₅⟩
instance {β : Type*} [decidable_eq β] {s : finset β} (g : Π a ∈ s, β)
(h₄ : ∀ a ha, g a ha ∈ s) (h₅ : ∀ a ha, g (g a ha) (h₄ a ha) = a) : decidable_rel (eqv g h₄ h₅) :=
begin
intros a₁ a₂,
by_cases H12 : a₁ = a₂,
refine is_true (or.inl H12),
by_cases H12g : a₁ = gbar g h₄ a₂,
refine is_true (or.inr H12g),
refine is_false _,
intro H,cases H,
apply H12,exact H,
apply H12g,exact H,
end
lemma sum_keji {α β : Type*} [add_comm_monoid α] [decidable_eq β] {f : β → α}
{s : finset β} (g : Π a ∈ s, β) (h₀ : ∀ a ha, f a + f (g a ha) = 0)
(h₁ : ∀ a ha, g a ha ≠ a) (h₂ : ∀ a₁ a₂ ha₁ ha₂, g a₁ ha₁ = g a₂ ha₂ → a₁ = a₂)
(h₃ : ∀ a ∈ s, ∃ b hb, g b hb = a) (h₄ : ∀ a ha, g a ha ∈ s) (h₅ : ∀ a ha, g (g a ha) (h₄ a ha) = a ) :
s.sum f = 0 :=
begin
let gb := gbar g h₄,
let β' := ↥(↑s : set β),
letI fβ' : fintype β' := by apply_instance,
let inst_2 : fintype β' := by apply_instance,
let f' : β' → α := λ b,f b,
let h : setoid β' := {r := eqv g h₄ h₅,iseqv := is_equivalence g h₄ h₅},
let inst_4 : decidable_eq β' := by apply_instance,
let inst_3 : decidable_rel h.r := by apply_instance,
have H : s.sum f = sum univ f',
{ let g' : β' → β := λ x, x.val,
let s' : finset β' := finset.univ,
have Hinj : ∀ x ∈ s', ∀ y ∈ s', g' x = g' y → x = y,
{ intros x Hx y Hy,exact subtype.eq,
},
have H2 := @sum_image β α β' f _ _ _ s' g' Hinj,
have H3 : image g' s' = s,
{ ext,split,
{ rw finset.mem_image,
intro Ha,
cases Ha with b Hb,
cases Hb with Hb Hg',
rw ←Hg',
exact b.property,
},
intro Ha,
rw finset.mem_image,
existsi (⟨a,Ha⟩ : β'),
existsi (mem_univ _),refl
},
rw ←H3,
rw H2,
refl
},
rw H,
-- now finally rewrite sum_equiv_classes
rw @sum_equiv_classes α β' _ fβ' f' h _ _,
rw ←sum_const_zero,
congr,funext,
let b := quotient.out x,
suffices : (filter (λ (b : β'), ⟦b⟧ = x) univ) = insert b (finset.singleton (gb b)),
{ rw this,
have H2 : b ∉ finset.singleton (gb b),
rw mem_singleton,
intro H3,replace H3 := H3.symm,
apply h₁ b.val b.property,
have H4 : (gb b).val = b.val := by rw H3,
exact H4,
rw finset.sum_insert H2,
rw finset.sum_singleton,
show f b.val + f (g b.val b.property) = 0,
exact h₀ b.val b.property
},
clear H,
have H : ∀ c : β', ⟦c⟧ = x ↔ c = b ∨ c = gb b,
{ intro c,split,swap,
intro H2,cases H2,rw H2,simp,
rw H2,
suffices : ⟦gb b⟧ = ⟦b⟧, by simp [this],
rw quotient.eq,
exact or.inr rfl,
have H : x = ⟦b⟧ := by simp,
rw H,rw quotient.eq,
intro H2, cases H2,left,exact H2,
right,exact H2,
},
ext,
have H2 : a ∈ insert b (finset.singleton (gb b)) ↔ a = b ∨ a = gb b := by simp,
rw H2,
rw ←H a,
simp,
end
|
{-|
Module : HF.Gauss
Copyright : Copyright (c) David Schlegel
License : BSD
Maintainer : David Schlegel
Stability : experimental
Portability : Haskell
Gaussian Integral evaluation plays an important role in quantum chemistry. Here functions will be provided to compute the most important integrals involving gaussian-type orbitals (GTOs).
An unnormalized primitive cartesian Gaussian Orbital has the form
<<centered_gaussian.svg centered_gaussian>>
where /A/ denotes the center of the orbital.
An unnormalized contracted Gaussian Orbital is a linear combination of primitive Gaussian:
<<centered_contracted_gaussian.svg centered_contracted_gaussian>>
"Gauss" uses the datatype contstructors 'PG' (Primitive Gaussian) and 'Ctr' (Contraction), defined in "HF.Data".
Notice: Formulas of Gaussian Integrals are taken from:
Fundamentals of Molecular Integral Evaluation
by Justin T. Fermann and Edward F. Valeev
-}
module HF.Gauss (
-- * Basic functions
distance, frac, center, itersum, erf, f, factorial2, general_erf,
-- * Normalization
normCtr, normPG,
-- * Integral Evaluation
s_12, t_12,
-- * Contraction operations
zipContractionWith, constr_matrix
) where
import HF.Data
import Numeric.Container hiding (linspace)
import Numeric.LinearAlgebra.Data hiding (linspace)
import Data.Maybe
import Control.DeepSeq
import Debug.Trace
---------------------
---------------------
--Helper Functions---
---------------------
---------------------
itersum function range
| range == [] = 0
| otherwise = (function $ head range) + itersum function (tail range)
-- |Calculates the factorial f(x) = x!
factorial n
| n < 0 = 0
| otherwise = product [1..n]
-- |Calculates the Doublefactorial f(x) = x!!
factorial2 :: (Show a, Eq a, Num a) => a -> a
factorial2 0 = 1
factorial2 1 = 1
factorial2 (-1) = 1
factorial2 n = n * factorial2 (n-2)
-- |Calculates the binomialcoefficient n over k
--binom :: Integer -> Integer -> Double
binom n 0 = 1
binom 0 k = 0
binom n k = binom (n-1) (k-1) * n `div` k
-- |Computes the distance between to vectors:
--
-- <<norm.svg norm>>
distance :: Vector Double -> Vector Double -> Double
distance rA rB = norm2(rA - rB)
-- | Calculates effective coefficient:
--
-- >>> frac alpha beta = alpha*beta/(alpha + beta)
frac :: Double -> Double -> Double
frac alpha beta = alpha*beta/(alpha + beta)
-- | Calculates weighted radius of two gaussians
center :: Double -- ^ alpha
-> Vector Double -- ^ rA
-> Double -- ^ beta
-> Vector Double -- ^ rB
-> Vector Double
center a rA b rB = ((scalar b * rB) * (scalar a * rA))/scalar (a+b)
-- |error function calculation
erf :: Double -> Double
erf x = 2/sqrt pi * sum [x/(2*n+1) * product [-x^2/k | k <- [1..n] ] | n <- [0..100]]
--error function calculation
f_0 :: Double -> Double
f_0 = erf
-- | General error function. See Eq. 4.118 Thijssen.
general_erf nu u
| nu == 0 = sqrt pi * erf (sqrt u) * 1/(2*sqrt u)
| otherwise = 1/(2*u) * (2*nu - 1) * (general_erf (nu-1) u ) - exp (-u)
--------------------------
---Important Functions----
--------------------------
-- |Calculates normalization factor of contracted Gaussian with arbitrary angular momentum
normCtr :: Ctr -> Double
normCtr contr = (prefactor * summation)**(-1.0/2.0)
--See also Eq. 2.11
where
(l, m, n) = lmncontract contr
--(l,m,n) = (fromIntegral l_, fromIntegral m_, fromIntegral n_) --This looks quite dirty
n_sum = lengthcontract contr -1
a = toList $ coefflist contr
alp = [alpha prim | prim <- (gaussians contr)]
mom = fromIntegral $ momentumctr contr
prefactor = 1.0/(2.0**mom)*pi**(3.0/2.0)* factorial2 (2*l-1) * factorial2 (2*m-1)* factorial2 (2*n-1)
summation = sum $ concat $ [[(a !! i)*(a !! j)/((alp !! i +alp !! j)**(mom + 3.0/2.0)) | i <-[0..n_sum]]| j <- [0..n_sum]]
-- |Calculates normalization factor of primitive Gaussian with arbitrary angular momentum
normPG :: PG -> Double
normPG (PG lmn alpha pos) = (2*alpha/pi)**(3/4) * (a/b)**(1/2)
where
(l, m, n) = (fromIntegral $ lmn !! 0, fromIntegral $ lmn !! 1, fromIntegral $ lmn !! 2)
a = (8*alpha)**(l+m+n) * factorial l * factorial m * factorial n
b = factorial (2*l) * factorial (2*m) * factorial (2*n)
-- |Calculates f_k (l1 l2 pa_x pb_x) used in Gaussian Product Theorem
f :: Int -> Int -> Int -> Double -> Double -> Double
--trace ("f" ++ " " ++ show k ++ " " ++ show l1_ ++ " " ++ show l2_ ++ " " ++ show pa_x ++ " " ++ show pb_x) $
f k l1_ l2_ pa_x pb_x = sum $ concat $ [[pa_x**(fromIntegral (l1_-i)) * pb_x**(fromIntegral(l2_-j)) * fromIntegral ((binom l1_ i) * (binom l2_ j))| i <- [0..l1_], i+j <= k]| j <-[0..l2_]]
--See also Eq. 2.45
-- |Evaluates a given function for two contractions. The operation is symmetric in contraction arguments.
zipContractionWith :: (PG -> PG -> Double) -- ^ Function of two primitive Gaussians
-> Ctr -- ^ Contraction
-> Ctr -- ^ Contraction
-> Double
zipContractionWith zipfunction (Ctr pglist1 coeffs1) (Ctr pglist2 coeffs2) = pr1 * pr2 * value
where
coefflist1 = toList coeffs1
coefflist2 = toList coeffs2
n1 = length coefflist1 -1
n2 = length coefflist2 -1
pr1 = normCtr (Ctr pglist1 coeffs1) --I am not so sure about this
pr2 = normCtr (Ctr pglist2 coeffs2) --I am not so sure about this
value = sum $ [(coefflist1 !! i)* (coefflist2 !! j)* (zipfunction (pglist1 !! i) (pglist2 !! j)) | i <- [0..n1], j <- [0..n2]]
-- |Construct a matrix out of a list of Contractions and a function for two primitive gaussians
constr_matrix :: [Ctr] -- ^ List of Contraction
-> (PG -> PG -> Double) -- ^ Function of two primitive Gaussians
-> Matrix Double -- ^ Matrix of dimensions (length [Ctr]) x (length [Ctr])
constr_matrix contractionlist function = buildMatrix (length contractionlist) (length contractionlist) (\(i,j) -> zipContractionWith function (contractionlist !! i) (contractionlist !! j) )
-- |Calculates overlap integral of two primitive gaussians
-- | <<s_12.svg s_12>>
s_12 :: PG -> PG -> Double
s_12 (PG lmn1 alpha1 pos1) (PG lmn2 alpha2 pos2) = trace ("s_12: range " ++ show range ++ " f 0 =" ++ show (factor 0) ++ " f 1 = " ++ show (factor 1)++ " f 2 =" ++ show (factor 2)) $ prefactor * (factor 0) * (factor 1) * (factor 2) --pref * I_x * I_y * I_z
where
(l1, m1, n1) = (lmn1 !! 0, lmn1 !! 1, lmn1 !! 2)
(l2, m2, n2) = (lmn2 !! 0, lmn2 !! 1, lmn2 !! 2)
g = alpha1 + alpha2
prefactor = exp (-alpha1*alpha2 * (distance pos1 pos2) /g)
p = center alpha1 pos1 alpha2 pos2
pa = toList $ p - pos1
pb = toList $ p - pos2
range = [0..(fromIntegral (l1+l2) /2)] :: (Enum a, Fractional a) => [a]
-- See also Eq. 3.15
function k i = force $ f (round (2*i)) l1 l2 (pa !! k) (pb !! k) * (factorial2 (2.0* i -1.0)) * ((2*g)** (- i)) * (pi/g)**(1/2)
factor k = itersum (function k) range
--i k = sum $ [f (round (2*i)) l1 l2 (pa !! k) (pb !! k) * factorial2 (2.0* i -1.0) * ((2*g)** (- i)) * (pi/g)**(1/2) | i <- range ]
-- |Calculates kinetic energy integral of two primitive gaussians
-- | <<t_12.svg t_12>>
t_12 :: PG -> PG -> Double
t_12 (PG lmn1 alpha1 pos1) (PG lmn2 alpha2 pos2) = (i 0) + (i 1) + (i 2) -- I_x + I_y + I_z
where
--lifts or lowers l, m or n, indicated by index k for a lmn-list, respectively
pp1 list k = [if i == k then list !! i + 1 else list !! i | i<-[0..2]] --lifts
mm1 list k = [if i == k then list !! i - 1 else list !! i | i<-[0..2]] --lowers
--overlaps where parts of l, m or n ar lifted +1 or lowered -1
m1m1 k = s_12 (PG (mm1 lmn1 k) alpha1 pos1) (PG (mm1 lmn2 k) alpha2 pos2) -- <-1|-1>_k
p1p1 k = s_12 (PG (pp1 lmn1 k) alpha1 pos1) (PG (pp1 lmn2 k) alpha2 pos2) -- <+1|+1>_k
p1m1 k = s_12 (PG (pp1 lmn1 k) alpha1 pos1) (PG (mm1 lmn2 k) alpha2 pos2) -- <+1|-1>_k
m1p1 k = s_12 (PG (mm1 lmn1 k) alpha1 pos1) (PG (pp1 lmn2 k) alpha2 pos2) -- <-1|+1>_k
--See also Eq.4.13
i k = 1/2 * (fromIntegral (lmn1 !! k)) * (fromIntegral (lmn2 !! k)) * (m1m1 k)
+ 2 * alpha1 * alpha2 * (p1p1 k)
- alpha1 * (fromIntegral (lmn2 !! k)) * (p1m1 k)
- alpha2 * (fromIntegral (lmn1 !! k)) * (m1p1 k)
{-
-- |Calculates nuclear attraction integral of two primitive gaussians
v_12 :: Double -> Vecotr Double -> PG -> PG -> Double
v_12 z r_c (PG lmn1 alpha1 pos1) (PG lmn2 alpha2 pos2) = z*pi*pre/g * 2* summation
where
ax, ay, az = (fromList pos1) !! 0, (fromList pos1) !! 1, (fromList pos1) !! 2
bx, by, bz = (fromList pos2) !! 0, (fromList pos2) !! 1, (fromList pos2) !! 2
r_cx, r_cy, r_cz = (fromList r_c) !! 0, (fromList r_c) !! 1, (fromList r_c) !! 2
pref = exp (-alpha1*alpha2 * (distance pos1 pos2) /g)
g = alpha1 + alpha2
p = 1/g * (alpha1*pos1 + alpha2*pos2)
px, py, pz = (fromList p) !! 0, (fromList p) !! 1, (fromList p) !! 2
eta = (alpha1 + alpha2)/g
mu_x = l1 + l2 - 2*(ijk1 + ijk2) - (opq1 + opq2)
mu_y = l1 + l2 - 2*(j1 + j2) - (p1 + p2)
mu_z = l1 + l2 - 2*(k1 + k2) - (q1 + q2)
nu = mu_x + mu_y + mu_z - (u + v + w)
summation = general_erf nu (g * distance p r_c) * a_x * a_y * a_z
--ranges:
ijk1_range = [0..floor $ (fromIntegral l1 /2)] :: (Integral a, Enum a) => [a]
ijk2_range = [0..floor $ (fromIntegral l2 /2)] :: (Integral a, Enum a) => [a]
opq1_range ijk1 = [0..floor $ (fromIntegral l1 - 2*ijk1)] :: (Integral a, Enum a) => [a]
opq2_range ijk2 = [0..floor $ (fromIntegral l2 - 2*ijk2)] :: (Integral a, Enum a) => [a]
rst_range opq1 opq2 = [0..floor $ fromIntegral (opq1+l2) /2] :: (Integral a, Enum a) => [a]
u_range mu_x = [0..floor $ (fromIntegral mu_x /2)] :: (Integral a, Enum a) => [a]
a_x = (-1)**(l1+l2) * (factorial l1) * (factorial l2) *
(-1)**(opq2 + r) * factorial (opq1 + opq2) * 1/(4**(ijk1+ijk2+r) * factorial ijk1 * factorial ijk2 * factorial opq1 * factorial opq2 * factorial r) *
alpha1**(opq2-ijk1-r) * alpha2**(opq1-ijk2-r) * (ax-bx)**(opq1+opq2-2*r) * 1/(factorial (l1-2*ijk1-opq1) * factorial (l2-2*ijk2-opq2) * factorial (opq1-2*opq2-2*r)) *
* [ (-1)**u * factorial mu_x * (px -r_cx)**(mu_x - 2*u) * 1/ ((4)**u * factorial u * factorial (mu_x-2*u) * g**(opq1 + opq2 - r + u)) | u <- [0..1/2 * mu_x]]
-}
--v_12_test :: Double -> Vector Double -> PG -> PG -> [Double]
{- v_12_test z r_c (PG lmn1 alpha1 pos1) (PG lmn2 alpha2 pos2) = summation 0
where
--(ax, ay, az) = ((toList pos1) !! 0, (toList pos1) !! 1, (toList pos1) !! 2)
--(bx, by, bz) = ((toList pos2) !! 0, (toList pos2) !! 1, (toList pos2) !! 2)
--(r_cx, r_cy, r_cz) = ((toList r_c) !! 0, (toList r_c) !! 1, (toList r_c) !! 2)
--lmn1_ = fromList lmn1
--lmn2_ = fromList lmn2
pref = exp (-alpha1*alpha2 * (distance pos1 pos2) /g)
g = alpha1 + alpha2
p = scale (1/g) (add (scale alpha1 pos1) (scale alpha2 pos2))
--(px, py, pz) = ((toList p) !! 0, (toList p) !! 1, (toList p) !! 2)
--f_nu ijk1 ijk2 opq1 opq2 = general_erf (nu ijk1 ijk2 opq1 opq2) (g * distance p r_c)
eta = (alpha1 + alpha2)/g
mu ijk1 ijk2 opq1 opq2 s = fromIntegral (atIndex lmn1 s) + fromIntegral (atIndex lmn2 s) - 2*(ijk1 + ijk2) - (opq1 + opq2)
--nu ijk1 ijk2 opq1 opq2 = (mu ijk1 ijk2 opq1 opq2 s) + (mu ijk1 ijk2 opq1 opq2 s) + (mu ijk1 ijk2 opq1 opq2 s) - (u v q)
--ranges alle ranges (X,Y,Z) mit einer klappe schlagen, dafür index s:
ijk1_range s = [0..fromIntegral $ floor $ fromIntegral (lmn1 !! s) /2] :: [Double]
ijk2_range s = [0..fromIntegral $ floor $ fromIntegral (lmn2 !! s) /2] :: [Double]
opq1_range ijk1 s = [0..fromIntegral $ floor $ (fromIntegral (lmn1 !! s) - 2*ijk1)] :: [Double]
opq2_range ijk2 s = [0..fromIntegral $ floor $ (fromIntegral (lmn2 !! s) - 2*ijk2)] :: [Double]
rst_range opq1 opq2 s = [0..fromIntegral $ floor $ (opq1+opq2) /2] :: [Double]
uvw_range ijk1 ijk2 opq1 opq2 s = [0..fromIntegral $ floor $ ((mu ijk1 ijk2 opq1 opq2 s) /2)] :: [Double]
--a_x :: Num a => a -> a -> a -> a -> a -> Double
--(-1)**(l1+l2) * (factorial fromIntegral (lmn1 !! s)) * (factorial fromIntegral (lmn2 !! s)) *
function ijk1 ijk2 opq1 opq2 rst s = (-1)**(opq2 + rst) * factorial (opq1 + opq2) * 1/(4**(ijk1+ijk2+rst) * factorial ijk1 * factorial ijk2 * factorial opq1 * factorial opq2 * factorial rst) * alpha1**(opq2-ijk1-rst) * alpha2**(opq1-ijk2-rst) * (atIndex pos1 s - atIndex pos2 s)**(opq1+opq2-2*rst) * 1/(factorial ((lmn1 !! s)-2*ijk1-opq1) * factorial ((lmn2 !! s)-2*ijk2-opq2) * factorial (opq1-2*opq2-2*rst)) * sum $ [ (-1)**uvw * factorial (mu ijk1 ijk2 opq1 opq2 s) * ((atIndex p s) - (atIndex r_c s))**((mu ijk1 ijk2 opq1 opq2 s) - 2*uvw) * 1/ ((4)**uvw * factorial uvw * factorial ((mu ijk1 ijk2 opq1 opq2 s)-2*uvw) * g**(opq1 + opq2 - rst + uvw)) | uvw <- (uvw_range ijk1 ijk2 opq1 opq2 s)]
summation s = (-1)**(lmn1 !! s + lmn2 !! s) * (factorial fromIntegral (lmn1 !! s)) * (factorial fromIntegral (lmn2 !! s)) * sum $ concat $ concat $ [[[function ijk1 ijk2 opq1 opq2 rst s | rst <- (rst_range opq1 opq2 s)] | opq1 <- (opq1_range ijk1 s), opq2 <- (opq2_range ijk2 s)]| ijk1 <- ijk1_range s, ijk2 <- ijk2_range s]
-}
-----------------
--Deprecated-----
-----------------
-- Here all importand integrals involving gaussian functions will be evaluated.
--Overlap integral for s orbitals
--calculates <1s,alpha, A | 1s, beta, B>
overlaps :: Double -> Double -> Vector Double -> Vector Double -> Double
overlaps alpha beta rA rB = prefactor * exp exponent
where prefactor = (pi/(alpha + beta))**(3/2)
exponent = - (frac alpha beta * distance rA rB)
--Kinetic integral
--calculates <1s,alpha, A | - Laplace | 1s, beta, B>
kinetic :: Double -> Double -> Vector Double -> Vector Double -> Double
kinetic alpha beta rA rB = prefactor * exp(exponent)
where prefactor = (frac alpha beta)
*(6 -4 *(frac alpha beta) * distance rA rB)
*(pi/(alpha + beta))**(3/2)
exponent = - ((frac alpha beta) * distance rA rB)
--Nuclear interaction integral
--calculates <1s,alpha, A | - Z/r_C | 1s, beta, B>
nuclear :: Double -> Double -> Vector Double -> Vector Double -> Vector Double -> Double -> Double
nuclear alpha beta rA rB rC z = pref1 * pref2 * exp(exponent)
where pref1 = -2*pi*z/(alpha + beta)
pref2 = f_0 arg
arg = (distance (center alpha rA beta rB) rC ) * (alpha*beta)
exponent = - ((frac alpha beta) * (distance rA rB))
--two-electron integral
--important!
--calculates <1s,alpha, A ; 1s, beta, B | 1s,gamma, C ; 1s, delta, D >
twoelectron :: Double -> Double -> Double -> Double -> Vector Double -> Vector Double -> Vector Double -> Vector Double -> Double
twoelectron a b g d rA rB rC rD = pref * exp(exponent) * (f_0 arg)
where pref = (2*pi**(5/2))/
( (a+g)*(b+d)*(a+b+g+d)**(1/2) )
exponent = -a*g/(a+g) * (distance rA rC)
-b*d/(b+d) * (distance rB rD)
arg = (a+g)*(b+d)/(a+b+g+d) * ( distance (center a rA g rC) (center b rB d rD) )
--Overlap Matrix S_pq = <p|q>
--overlap p q = t
--where
-- t = buildMatrix (n*n) (n*n) (\(i,j) -> overlaps alpha beta rA rB )
|
#ifndef __COMANDO_R__
#define __COMANDO_R__
struct Comando {
int num_param;
char **params;
char *string;
int (*executar)(void *this, void *controlador);
};
#endif /* __COMANDO_R__ */ |
#! /usr/local/bin/RScript
library(data.table)
library(ggplot2)
library(ggthemes)
# Set working directories
setwd("~/bu/Desktop/MCMonitor")
figures.dir = "~/bu/Desktop/MCMonitor/Plots/"
# Read data
dd = data.table(read.delim("./Plots_data/time_evolution.csv.gz",
sep=","))
dd[, item_distribution := as.character(item_distribution)]
dd[, graph := as.character(graph)]
max_time = max(dd$t)+1
# Time evolution plot
p = ggplot(data=dd, aes(x=t, y=objective_value))
p = p + stat_summary(aes(y=objective_value, color=item_distribution, group=item_distribution), geom="line", fun.y=mean)
p = p + scale_x_continuous(name="Time", breaks=seq(0,max_time,1))
p = p + scale_y_continuous(name="Baseline Objective")
p = p + facet_grid(graph~item_distribution)
p = p + theme_bw()
p = p + theme(strip.background = element_blank())
p
ggsave(paste0(figures.dir, "time_evolution.pdf"), w=16, h=16)
|
Require Import Coq.ZArith.ZArith.
Require Import Coq.micromega.Lia.
Require Import Crypto.Util.ZRange.
Require Import Crypto.Util.ZRange.BasicLemmas.
Require Import Crypto.Util.ZRange.Operations.
Require Import Crypto.Util.ZUtil.Tactics.DivModToQuotRem.
Require Import Crypto.Util.ZUtil.Tactics.LtbToLt.
Require Import Crypto.Util.ZUtil.Tactics.PeelLe.
Require Import Crypto.Util.ZUtil.Tactics.ReplaceNegWithPos.
Require Import Crypto.Util.ZUtil.Hints.Core.
Require Import Crypto.Util.ZUtil.ZSimplify.
Require Import Crypto.Util.ZUtil.ZSimplify.Core.
Require Import Crypto.Util.ZUtil.Modulo.
Require Import Crypto.Util.ZUtil.Div.
Require Import Crypto.Util.Prod.
Require Import Crypto.Util.Tactics.BreakMatch.
Require Import Crypto.Util.Tactics.DestructHead.
Local Open Scope Z_scope.
Module ZRange.
Lemma is_bounded_by_bool_split_bounds_pos x r m (Hm : 0 < m)
: is_bounded_by_bool x r = true
-> andb (is_bounded_by_bool (x mod m) (fst (Operations.ZRange.split_bounds_pos r m)))
(is_bounded_by_bool (x / m) (snd (Operations.ZRange.split_bounds_pos r m))) = true.
Proof.
cbv [is_bounded_by_bool Operations.ZRange.split_bounds_pos andb].
repeat first [ progress intros
| break_innermost_match_step
| break_innermost_match_hyps_step
| progress subst
| progress cbn [fst snd lower upper] in *
| reflexivity
| discriminate
| lia
| progress Z.ltb_to_lt
| progress Z.peel_le
| match goal with
| [ r : zrange |- _ ] => let l := fresh "l" in let u := fresh "u" in destruct r as [l u]
| [ H : (?x < ?m)%Z, H' : (?m * ?q + _ <= ?x)%Z |- _ ]
=> assert (q < 0 \/ q = 0 \/ 0 < q)%Z by nia; destruct_head'_or;
[ assert (m * q < 0)%Z by nia; nia | progress subst | assert (0 < m * q)%Z by nia; nia ]
end
| progress autorewrite with zsimplify_const in *
| progress Z.div_mod_to_quot_rem
| nia ].
Qed.
Lemma is_bounded_by_bool_fst_split_bounds_pos x r m (Hm : 0 < m)
: is_bounded_by_bool x r = true
-> (is_bounded_by_bool (x mod m) (fst (Operations.ZRange.split_bounds_pos r m))) = true.
Proof. intro H; pose proof (@is_bounded_by_bool_split_bounds_pos x r m Hm H); Bool.split_andb; assumption. Qed.
Lemma is_bounded_by_bool_snd_split_bounds_pos x r m (Hm : 0 < m)
: is_bounded_by_bool x r = true
-> (is_bounded_by_bool (x / m) (snd (Operations.ZRange.split_bounds_pos r m))) = true.
Proof. intro H; pose proof (@is_bounded_by_bool_split_bounds_pos x r m Hm H); Bool.split_andb; assumption. Qed.
Lemma is_bounded_by_bool_split_bounds x r m
: is_bounded_by_bool x r = true
-> andb (is_bounded_by_bool (x mod m) (fst (Operations.ZRange.split_bounds r m)))
(is_bounded_by_bool (x / m) (snd (Operations.ZRange.split_bounds r m))) = true.
Proof.
intro; cbv [ZRange.split_bounds]; eta_expand; break_match; cbn [fst snd] in *.
all: Z.ltb_to_lt.
all: Z.replace_all_neg_with_pos.
- now apply is_bounded_by_bool_split_bounds_pos.
- autorewrite with zsimplify_const. now rewrite 1?Bool.andb_comm.
- rewrite Z.div_opp_r, Z.mod_opp_r, ZRange.is_bounded_by_bool_opp.
now apply is_bounded_by_bool_split_bounds_pos; [lia|rewrite ZRange.is_bounded_by_bool_opp].
Qed.
Lemma is_bounded_by_bool_split_bounds_and x r m
: is_bounded_by_bool x r = true
-> and (is_bounded_by_bool (x mod m) (fst (Operations.ZRange.split_bounds r m)) = true)
(is_bounded_by_bool (x / m) (snd (Operations.ZRange.split_bounds r m)) = true).
Proof.
intro H; pose proof (@is_bounded_by_bool_split_bounds x r m H).
Bool.split_andb; split; assumption.
Qed.
End ZRange.
|
[STATEMENT]
lemma amtx_get_aref:
"(uncurry (mtx_get M), uncurry (RETURN oo op_mtx_get)) \<in> [\<lambda>(_,(i,j)). i<N \<and> j<M]\<^sub>a (is_amtx N M)\<^sup>k *\<^sub>a (prod_assn nat_assn nat_assn)\<^sup>k \<rightarrow> id_assn"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. (uncurry (mtx_get M), uncurry (RETURN \<circ>\<circ> op_mtx_get)) \<in> [\<lambda>(uu_, i, j). i < N \<and> j < M]\<^sub>a (is_amtx N M)\<^sup>k *\<^sub>a (nat_assn \<times>\<^sub>a nat_assn)\<^sup>k \<rightarrow> id_assn
[PROOF STEP]
apply rule
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<And>c a. case a of (uu_, i, j) \<Rightarrow> i < N \<and> j < M \<Longrightarrow> hn_refine (fst ((is_amtx N M)\<^sup>k *\<^sub>a (nat_assn \<times>\<^sub>a nat_assn)\<^sup>k) a c) (uncurry (mtx_get M) c) (snd ((is_amtx N M)\<^sup>k *\<^sub>a (nat_assn \<times>\<^sub>a nat_assn)\<^sup>k) a c) id_assn (uncurry (RETURN \<circ>\<circ> op_mtx_get) a)
[PROOF STEP]
apply rule
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<And>c a. \<lbrakk>case a of (uu_, i, j) \<Rightarrow> i < N \<and> j < M; nofail (uncurry (RETURN \<circ>\<circ> op_mtx_get) a)\<rbrakk> \<Longrightarrow> <fst ((is_amtx N M)\<^sup>k *\<^sub>a (nat_assn \<times>\<^sub>a nat_assn)\<^sup>k) a c> uncurry (mtx_get M) c <\<lambda>r. snd ((is_amtx N M)\<^sup>k *\<^sub>a (nat_assn \<times>\<^sub>a nat_assn)\<^sup>k) a c * (\<exists>\<^sub>Ax. id_assn x r * \<up> (RETURN x \<le> uncurry (RETURN \<circ>\<circ> op_mtx_get) a))>\<^sub>t
[PROOF STEP]
apply (sep_auto simp: pure_def)
[PROOF STATE]
proof (prove)
goal:
No subgoals!
[PROOF STEP]
done |
State Before: a b : ℤ
⊢ card (Ioo a b) = toNat (b - a - 1) State After: a b : ℤ
⊢ card (map (Function.Embedding.trans Nat.castEmbedding (addLeftEmbedding (a + 1))) (range (toNat (b - a - 1)))) =
toNat (b - a - 1) Tactic: change (Finset.map _ _).card = _ State Before: a b : ℤ
⊢ card (map (Function.Embedding.trans Nat.castEmbedding (addLeftEmbedding (a + 1))) (range (toNat (b - a - 1)))) =
toNat (b - a - 1) State After: no goals Tactic: rw [Finset.card_map, Finset.card_range] |
%23: 7, Munkres \textsection 24: 1, 3, 10e
% v0.04 by Eric J. Malm, 10 Mar 2005
\documentclass[12pt,letterpaper,boxed]{article}
% set 1-inch margins in the document
\usepackage{enumerate}
\usepackage{amsthm}
\usepackage{amsmath}
\usepackage{amssymb}
\usepackage{marginnote}
\usepackage{float}
\input{macros.tex}
% \newtheorem{lemma}[section]{Lemma}
\usepackage{graphicx}
\usepackage{float}
% Note: for other writers, please take a look at the shortcuts I have already defined above.
\author{Samuel Stewart}
\title{Recognizing Mathematical Symbols with a Sparse CNN}
\maketitle
% TODO: employ roman numerals in the problem
\begin{document}
\section{Problem}
Goal: beat prediction accuracy \emph{and} speed of Haskell backend using dynamic time warp.
Hypothesis: Convolution neural network is better at recognizing characters. Sparsity enables speedup.
\section{Introduction to Neural Networks}
\subsection{Starting with a Single Neuron}
Abstractly, a neuron receives an input vector $\overline{x} \in \R^n$ and outputs a real number: a large positive number indicates activation, and a small negative number indicates deactive. A neural network consists of millions of such neurons, strung together carefully.
% picture of neuron from neural network? Hand drawn or in Omnigraffle.
A neuron has the following parameters
\begin{enumerate}
\item A shift vector $\overline{b}$for the input vector
\item A vector $\overline{w}$ of weights for the input vector
\item A mapping $f : \R \to \R$ that describes the output of the neuron (intuitively, when the neuron \textbf{activates}).
\end{enumerate}
The output of the neuron is then simply
\[
f(\overline{w} \cdot (\overline{x} + \overline{b})).
\]
A single neuron is quite powerful and a good place to begin. One can rephrase other classifiers \cite{andrej2017} within this framework. For example, if one chooses
\[
f(t) = \frac{1}{1 + e^{-t}}
\]
% include graph of logistic function on the right and the equation on the left
and tune the parameters appropriately, then one is performing \textit{logistic regression}. For real neural networks, the function
\[
f(t) = \max(0, t)
\]
% picture of max function on the right and equation on the left
is more accurate (\cite{andrej2017}).
As a simple example, consider the problem of predicting whether a given height measurement is of a man or a woman. With the logistic function as above, one can build a simple classifier in Mathematica.
% Get some height data (or generate it) in Mathematica. Compute the loss function and generate a graph.
\subsection{A Geometric Viewpoint}
Note: one can view classification problems as really just trying to decompose image into basis elements. Since each row of W is a dot product against the input data, we are really detecting the high dimensional *angle* and offset. Everything is really just high dimensional geometry.
\subsection{Neural Networks as Nonlinear Combinations of Matrix Multiplications}
\subsection{Graph Representation}
\section{Previous Work}
\section{The Problem}
\section{Methodology}
We requested the full $210,454$ samples from the author of the popular online tool Detexify which converts hand-drawn \LaTeX characters to codes [cite] using an algorithm called dynamic time warp [cite dynamic time warp]. Each sampled consists of a classification (the \LaTeX symbol code) and an array of timestamped coordinates representing the stroke pattern for a given sample. Preprocessing required converting each sample to a $200 \times 200$ grayscale image by rendering a thick line connecting the sampled points via Python Image Library [cite].
Using the Python frameworks \textbf{Lasagne} and \textbf{nolearn}, we implemented a network with the following structure
% Diagram of the network we implemented. Can we just do a network with no hidden layers? Probably
We reserved one quarter of the data to test generalizability and trained the network on the remainder. The following figure shows our loss function on the training data
% Loss function figure
The accuracy on the out of sample data was $100\%$
\subsection{Evaluation of a network with linear algebra}
\subsection{Density of neural networks}
\section{Convolution Neural Networks}
\subsection{Why are CNNs different than vanilla CNN?}
1. "better" in some ill-defined sense. I assume classification accuracy?
2. General idea appears to be that
\section{Exploiting Sparsity in Convolutional Neural Networks}
\subsection{Training the Network}
\subsection{Cost of Accuracy}
\section{Questions while learning}
1. How to select proper activation function?
2. How can one rephrase this problem mathematically?
3. Why can't neurons have multiple outputs?
4. Are there results connecting the number of samples with the accuracy / generalizability of the network?
\section{Reproducible Research Questions}
1. What did I do?
2. Why did I do it?
3. How did I set up everything at the time of the analysis?
4. When did I make changes, and what were they?
5. Who needs to access it, and how can I get it to them?
\section{References}
ConvNetJS (playground for neural nets)
http://cs.stanford.edu/people/karpathy/convnetjs/
andrej2017
Andrej Karpathy. http://cs231n.github.io/neural-networks-1/
(Spatially-sparse convolutional neural networks) https://arxiv.org/abs/1409.6070
VisTrails workflow management
https://www.vistrails.org/index.php/Main_Page
Proof of universality of neural networks:
http://neuralnetworksanddeeplearning.com/chap4.html
Pandas for data cleaning
http://t-redactyl.io/blog/2016/10/a-crash-course-in-reproducible-research-in-python.html
IPython
https://ipython.org/documentation.html
\end{document}
|
If $f$ is a measurable function on the real line, then $\int f(x) dx = c \int f(t + cx) dx$ for any $c \neq 0$. |
// Copyright (c) 2005 Daniel Wallin and Arvid Norberg
// Permission is hereby granted, free of charge, to any person obtaining a
// copy of this software and associated documentation files (the "Software"),
// to deal in the Software without restriction, including without limitation
// the rights to use, copy, modify, merge, publish, distribute, sublicense,
// and/or sell copies of the Software, and to permit persons to whom the
// Software is furnished to do so, subject to the following conditions:
// The above copyright notice and this permission notice shall be included
// in all copies or substantial portions of the Software.
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF
// ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED
// TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A
// PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
// SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR
// ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
// ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE
// OR OTHER DEALINGS IN THE SOFTWARE.
#ifndef LUABIND_OBJECT_050419_HPP
#define LUABIND_OBJECT_050419_HPP
#include <boost/implicit_cast.hpp> // detail::push()
#include <boost/ref.hpp> // detail::push()
#include <boost/mpl/bool.hpp> // value_wrapper_traits specializations
#include <boost/mpl/apply_wrap.hpp>
#include <boost/tuple/tuple.hpp>
#include <boost/optional.hpp>
#include <luabind/nil.hpp>
#include <luabind/value_wrapper.hpp>
#include <luabind/detail/pcall.hpp>
#include <luabind/handle.hpp>
#include <luabind/from_stack.hpp>
#include <luabind/detail/policy.hpp>
#include <luabind/detail/stack_utils.hpp>
#include <luabind/detail/convert_to_lua.hpp> // REFACTOR
#include <luabind/typeid.hpp>
#include <boost/iterator/iterator_facade.hpp> // iterator
#include <boost/preprocessor/iteration/iterate.hpp>
#include <boost/utility/enable_if.hpp>
namespace luabind {
namespace detail
{
namespace mpl = boost::mpl;
template<class T, class ConverterGenerator>
void push_aux(lua_State* interpreter, T& value, ConverterGenerator*)
{
typedef typename boost::mpl::if_<
boost::is_reference_wrapper<T>
, BOOST_DEDUCED_TYPENAME boost::unwrap_reference<T>::type&
, T
>::type unwrapped_type;
typename mpl::apply_wrap2<
ConverterGenerator,unwrapped_type,cpp_to_lua
>::type cv;
cv.apply(
interpreter
, boost::implicit_cast<
BOOST_DEDUCED_TYPENAME boost::unwrap_reference<T>::type&
>(value)
);
}
template<class T, class Policies>
void push(lua_State* interpreter, T& value, Policies const&)
{
typedef typename find_conversion_policy<
0
, Policies
>::type converter_policy;
push_aux(interpreter, value, (converter_policy*)0);
}
template<class T>
void push(lua_State* interpreter, T& value)
{
push(interpreter, value, null_type());
}
} // namespace detail
namespace adl
{
namespace mpl = boost::mpl;
template <class T>
class object_interface;
namespace is_object_interface_aux
{
typedef char (&yes)[1];
typedef char (&no)[2];
template <class T>
yes check(object_interface<T>*);
no check(void*);
template <class T>
struct impl
{
BOOST_STATIC_CONSTANT(bool, value =
sizeof(is_object_interface_aux::check((T*)0)) == sizeof(yes)
);
typedef mpl::bool_<value> type;
};
} // namespace detail
template <class T>
struct is_object_interface
: is_object_interface_aux::impl<T>::type
{};
template <class R, class T, class U>
struct enable_binary
# ifndef BOOST_NO_SFINAE
: boost::enable_if<
mpl::or_<
is_object_interface<T>
, is_object_interface<U>
>
, R
>
{};
# else
{
typedef R type;
};
# endif
template<class T, class U>
int binary_interpreter(lua_State*& L, T const& lhs, U const& rhs
, boost::mpl::true_, boost::mpl::true_)
{
L = value_wrapper_traits<T>::interpreter(lhs);
lua_State* L2 = value_wrapper_traits<U>::interpreter(rhs);
// you are comparing objects with different interpreters
// that's not allowed.
assert(L == L2 || L == 0 || L2 == 0);
// if the two objects we compare have different interpreters
// then they
if (L != L2) return -1;
if (L == 0) return 1;
return 0;
}
template<class T, class U>
int binary_interpreter(lua_State*& L, T const& x, U const&
, boost::mpl::true_, boost::mpl::false_)
{
L = value_wrapper_traits<T>::interpreter(x);
return 0;
}
template<class T, class U>
int binary_interpreter(lua_State*& L, T const&, U const& x, boost::mpl::false_, boost::mpl::true_)
{
L = value_wrapper_traits<U>::interpreter(x);
return 0;
}
template<class T, class U>
int binary_interpreter(lua_State*& L, T const& x, U const& y)
{
return binary_interpreter(
L
, x
, y
, is_value_wrapper<T>()
, is_value_wrapper<U>()
);
}
#define LUABIND_BINARY_OP_DEF(op, fn) \
template<class LHS, class RHS> \
typename enable_binary<bool,LHS,RHS>::type \
operator op(LHS const& lhs, RHS const& rhs) \
{ \
lua_State* L = 0; \
switch (binary_interpreter(L, lhs, rhs)) \
{ \
case 1: \
return true; \
case -1: \
return false; \
} \
\
assert(L); \
\
detail::stack_pop pop1(L, 1); \
detail::push(L, lhs); \
detail::stack_pop pop2(L, 1); \
detail::push(L, rhs); \
\
return fn(L, -1, -2) != 0; \
}
LUABIND_BINARY_OP_DEF(==, lua_equal)
LUABIND_BINARY_OP_DEF(<, lua_lessthan)
template<class ValueWrapper>
std::ostream& operator<<(std::ostream& os
, object_interface<ValueWrapper> const& v)
{
using namespace luabind;
lua_State* interpreter = value_wrapper_traits<ValueWrapper>::interpreter(
static_cast<ValueWrapper const&>(v));
detail::stack_pop pop(interpreter, 1);
value_wrapper_traits<ValueWrapper>::unwrap(interpreter
, static_cast<ValueWrapper const&>(v));
char const* p = lua_tostring(interpreter, -1);
std::size_t len = lua_strlen(interpreter, -1);
std::copy(p, p + len, std::ostream_iterator<char>(os));
return os;
}
#undef LUABIND_BINARY_OP_DEF
template<class LHS, class RHS>
typename enable_binary<bool,LHS,RHS>::type
operator>(LHS const& lhs, RHS const& rhs)
{
return !(lhs < rhs || lhs == rhs);
}
template<class LHS, class RHS>
typename enable_binary<bool,LHS,RHS>::type
operator<=(LHS const& lhs, RHS const& rhs)
{
return lhs < rhs || lhs == rhs;
}
template<class LHS, class RHS>
typename enable_binary<bool,LHS,RHS>::type
operator>=(LHS const& lhs, RHS const& rhs)
{
return !(lhs < rhs);
}
template<class LHS, class RHS>
typename enable_binary<bool,LHS,RHS>::type
operator!=(LHS const& lhs, RHS const& rhs)
{
return !(lhs == rhs);
}
template<class ValueWrapper, class Arguments>
struct call_proxy;
template<class Next>
class index_proxy;
class object;
template<class Derived>
class object_interface
{
struct safe_bool_type {};
public:
~object_interface() {}
call_proxy<Derived, boost::tuples::tuple<> > operator()();
template<class A0>
call_proxy<
Derived
, boost::tuples::tuple<A0 const*>
> operator()(A0 const& a0)
{
typedef boost::tuples::tuple<A0 const*> arguments;
return call_proxy<Derived, arguments>(
derived()
, arguments(&a0)
);
}
template<class A0, class A1>
call_proxy<
Derived
, boost::tuples::tuple<A0 const*, A1 const*>
> operator()(A0 const& a0, A1 const& a1)
{
typedef boost::tuples::tuple<A0 const*, A1 const*> arguments;
return call_proxy<Derived, arguments>(
derived()
, arguments(&a0, &a1)
);
}
// The rest of the overloads are PP-generated.
#define BOOST_PP_ITERATION_PARAMS_1 (3, \
(3, LUABIND_MAX_ARITY, <luabind/detail/object_call.hpp>))
#include BOOST_PP_ITERATE()
operator safe_bool_type*() const
{
lua_State* L = value_wrapper_traits<Derived>::interpreter(derived());
if (!L)
return 0;
value_wrapper_traits<Derived>::unwrap(L, derived());
detail::stack_pop pop(L, 1);
return lua_toboolean(L, -1) == 1 ? (safe_bool_type*)1 : 0;
}
private:
Derived& derived()
{
return *static_cast<Derived*>(this);
}
Derived const& derived() const
{
return *static_cast<Derived const*>(this);
}
};
#ifdef LUABIND_USE_VALUE_WRAPPER_TAG
struct iterator_proxy_tag;
#endif
template<class AccessPolicy>
class iterator_proxy
: public object_interface<iterator_proxy<AccessPolicy> >
{
public:
#ifdef LUABIND_USE_VALUE_WRAPPER_TAG
typedef iterator_proxy_tag value_wrapper_tag;
#endif
iterator_proxy(lua_State* interpreter, handle const& table, handle const& key)
: m_interpreter(interpreter)
, m_table_index(lua_gettop(interpreter) + 1)
, m_key_index(m_table_index + 1)
{
table.push(m_interpreter);
key.push(m_interpreter);
}
iterator_proxy(iterator_proxy const& other)
: m_interpreter(other.m_interpreter)
, m_table_index(other.m_table_index)
, m_key_index(other.m_key_index)
{
other.m_interpreter = 0;
}
~iterator_proxy()
{
if (m_interpreter)
lua_pop(m_interpreter, 2);
}
// this will set the value to nil
iterator_proxy & operator=(luabind::detail::nil_type)
{
lua_pushvalue(m_interpreter, m_key_index);
lua_pushnil(m_interpreter);
AccessPolicy::set(m_interpreter, m_table_index);
return *this;
}
template<class T>
iterator_proxy& operator=(T const& value)
{
lua_pushvalue(m_interpreter, m_key_index);
detail::push(m_interpreter, value);
AccessPolicy::set(m_interpreter, m_table_index);
return *this;
}
template<class Key>
index_proxy<iterator_proxy<AccessPolicy> > operator[](Key const& key)
{
return index_proxy<iterator_proxy<AccessPolicy> >(
*this, m_interpreter, key
);
}
// This is non-const to prevent conversion on lvalues.
operator object();
lua_State* interpreter() const
{
return m_interpreter;
}
// TODO: Why is it non-const?
void push(lua_State* interpreter)
{
assert(interpreter == m_interpreter);
lua_pushvalue(m_interpreter, m_key_index);
AccessPolicy::get(m_interpreter, m_table_index);
}
private:
mutable lua_State* m_interpreter;
int m_table_index;
int m_key_index;
};
} // namespace adl
namespace detail
{
struct basic_access
{
static void set(lua_State* interpreter, int table)
{
lua_settable(interpreter, table);
}
static void get(lua_State* interpreter, int table)
{
lua_gettable(interpreter, table);
}
};
struct raw_access
{
static void set(lua_State* interpreter, int table)
{
lua_rawset(interpreter, table);
}
static void get(lua_State* interpreter, int table)
{
lua_rawget(interpreter, table);
}
};
template<class AccessPolicy>
class basic_iterator
: public boost::iterator_facade<
basic_iterator<AccessPolicy>
, adl::iterator_proxy<AccessPolicy>
, boost::single_pass_traversal_tag
, adl::iterator_proxy<AccessPolicy>
>
{
public:
basic_iterator()
: m_interpreter(0)
{}
template<class ValueWrapper>
explicit basic_iterator(ValueWrapper const& value_wrapper)
: m_interpreter(
value_wrapper_traits<ValueWrapper>::interpreter(value_wrapper)
)
{
detail::stack_pop pop(m_interpreter, 1);
value_wrapper_traits<ValueWrapper>::unwrap(m_interpreter, value_wrapper);
lua_pushnil(m_interpreter);
if (lua_next(m_interpreter, -2) != 0)
{
detail::stack_pop pop(m_interpreter, 2);
handle(m_interpreter, -2).swap(m_key);
}
else
{
m_interpreter = 0;
return;
}
handle(m_interpreter, -1).swap(m_table);
}
adl::object key() const;
private:
friend class boost::iterator_core_access;
void increment()
{
m_table.push(m_interpreter);
m_key.push(m_interpreter);
detail::stack_pop pop(m_interpreter, 1);
if (lua_next(m_interpreter, -2) != 0)
{
m_key.replace(m_interpreter, -2);
lua_pop(m_interpreter, 2);
}
else
{
m_interpreter = 0;
handle().swap(m_table);
handle().swap(m_key);
}
}
bool equal(basic_iterator const& other) const
{
if (m_interpreter == 0 && other.m_interpreter == 0)
return true;
if (m_interpreter != other.m_interpreter)
return false;
detail::stack_pop pop(m_interpreter, 2);
m_key.push(m_interpreter);
other.m_key.push(m_interpreter);
return lua_equal(m_interpreter, -2, -1) != 0;
}
adl::iterator_proxy<AccessPolicy> dereference() const
{
return adl::iterator_proxy<AccessPolicy>(m_interpreter, m_table, m_key);
}
lua_State* m_interpreter;
handle m_table;
handle m_key;
};
// Needed because of some strange ADL issues.
#define LUABIND_OPERATOR_ADL_WKND(op) \
inline bool operator op( \
basic_iterator<basic_access> const& x \
, basic_iterator<basic_access> const& y) \
{ \
return boost::operator op(x, y); \
} \
\
inline bool operator op( \
basic_iterator<raw_access> const& x \
, basic_iterator<raw_access> const& y) \
{ \
return boost::operator op(x, y); \
}
LUABIND_OPERATOR_ADL_WKND(==)
LUABIND_OPERATOR_ADL_WKND(!=)
#undef LUABIND_OPERATOR_ADL_WKND
} // namespace detail
namespace adl
{
#ifdef LUABIND_USE_VALUE_WRAPPER_TAG
struct index_proxy_tag;
#endif
template<class Next>
class index_proxy
: public object_interface<index_proxy<Next> >
{
public:
#ifdef LUABIND_USE_VALUE_WRAPPER_TAG
typedef index_proxy_tag value_wrapper_tag;
#endif
typedef index_proxy<Next> this_type;
template<class Key>
index_proxy(Next const& next, lua_State* interpreter, Key const& key)
: m_interpreter(interpreter)
, m_key_index(lua_gettop(interpreter) + 1)
, m_next(next)
{
detail::push(m_interpreter, key);
}
index_proxy(index_proxy const& other)
: m_interpreter(other.m_interpreter)
, m_key_index(other.m_key_index)
, m_next(other.m_next)
{
other.m_interpreter = 0;
}
~index_proxy()
{
if (m_interpreter)
lua_pop(m_interpreter, 1);
}
// This is non-const to prevent conversion on lvalues.
operator object();
// this will set the value to nil
this_type& operator=(luabind::detail::nil_type)
{
value_wrapper_traits<Next>::unwrap(m_interpreter, m_next);
detail::stack_pop pop(m_interpreter, 1);
lua_pushvalue(m_interpreter, m_key_index);
lua_pushnil(m_interpreter);
lua_settable(m_interpreter, -3);
return *this;
}
template<class T>
this_type& operator=(T const& value)
{
value_wrapper_traits<Next>::unwrap(m_interpreter, m_next);
detail::stack_pop pop(m_interpreter, 1);
lua_pushvalue(m_interpreter, m_key_index);
detail::push(m_interpreter, value);
lua_settable(m_interpreter, -3);
return *this;
}
this_type& operator=(this_type const& value)
{
value_wrapper_traits<Next>::unwrap(m_interpreter, m_next);
detail::stack_pop pop(m_interpreter, 1);
lua_pushvalue(m_interpreter, m_key_index);
detail::push(m_interpreter, value);
lua_settable(m_interpreter, -3);
return *this;
}
template<class T>
index_proxy<this_type> operator[](T const& key)
{
return index_proxy<this_type>(*this, m_interpreter, key);
}
void push(lua_State* interpreter);
lua_State* interpreter() const
{
return m_interpreter;
}
private:
struct hidden_type {};
// this_type& operator=(index_proxy<Next> const&);
mutable lua_State* m_interpreter;
int m_key_index;
Next const& m_next;
};
} // namespace adl
typedef detail::basic_iterator<detail::basic_access> iterator;
typedef detail::basic_iterator<detail::raw_access> raw_iterator;
#ifndef LUABIND_USE_VALUE_WRAPPER_TAG
template<class T>
struct value_wrapper_traits<adl::index_proxy<T> >
#else
template<>
struct value_wrapper_traits<adl::index_proxy_tag>
#endif
{
typedef boost::mpl::true_ is_specialized;
template<class Next>
static lua_State* interpreter(adl::index_proxy<Next> const& proxy)
{
return proxy.interpreter();
}
template<class Next>
static void unwrap(lua_State* interpreter, adl::index_proxy<Next> const& proxy)
{
const_cast<adl::index_proxy<Next>&>(proxy).push(interpreter);
}
};
#ifndef LUABIND_USE_VALUE_WRAPPER_TAG
template<class AccessPolicy>
struct value_wrapper_traits<adl::iterator_proxy<AccessPolicy> >
#else
template<>
struct value_wrapper_traits<adl::iterator_proxy_tag>
#endif
{
typedef boost::mpl::true_ is_specialized;
template<class Proxy>
static lua_State* interpreter(Proxy const& p)
{
return p.interpreter();
}
template<class Proxy>
static void unwrap(lua_State* interpreter, Proxy const& p)
{
// TODO: Why const_cast?
const_cast<Proxy&>(p).push(interpreter);
}
};
namespace adl
{
// An object holds a reference to a Lua value residing
// in the registry.
class object : public object_interface<object>
{
public:
object()
{}
explicit object(handle const& other)
: m_handle(other)
{}
explicit object(from_stack const& stack_reference)
: m_handle(stack_reference.interpreter, stack_reference.index)
{
}
template<class T>
object(lua_State* interpreter, T const& value)
{
detail::push(interpreter, value);
detail::stack_pop pop(interpreter, 1);
handle(interpreter, -1).swap(m_handle);
}
template<class T, class Policies>
object(lua_State* interpreter, T const& value, Policies const&)
{
detail::push(interpreter, value, Policies());
detail::stack_pop pop(interpreter, 1);
handle(interpreter, -1).swap(m_handle);
}
void push(lua_State* interpreter) const;
lua_State* interpreter() const;
bool is_valid() const;
template<class T>
index_proxy<object> operator[](T const& key) const
{
return index_proxy<object>(
*this, m_handle.interpreter(), key
);
}
void swap(object& other)
{
m_handle.swap(other.m_handle);
}
private:
handle m_handle;
};
inline void object::push(lua_State* interpreter) const
{
m_handle.push(interpreter);
}
inline lua_State* object::interpreter() const
{
return m_handle.interpreter();
}
inline bool object::is_valid() const
{
return m_handle.interpreter() != 0;
}
class argument : public object_interface<argument>
{
public:
argument(from_stack const& stack_reference)
: m_interpreter(stack_reference.interpreter)
, m_index(stack_reference.index)
{
if (m_index < 0)
m_index = lua_gettop(m_interpreter) - m_index + 1;
}
template<class T>
index_proxy<argument> operator[](T const& key) const
{
return index_proxy<argument>(*this, m_interpreter, key);
}
void push(lua_State* L) const
{
lua_pushvalue(L, m_index);
}
lua_State* interpreter() const
{
return m_interpreter;
}
private:
lua_State* m_interpreter;
int m_index;
};
} // namespace adl
using adl::object;
using adl::argument;
#ifndef LUABIND_USE_VALUE_WRAPPER_TAG
template <class ValueWrapper, class Arguments>
struct value_wrapper_traits<adl::call_proxy<ValueWrapper, Arguments> >
#else
template<>
struct value_wrapper_traits<adl::call_proxy_tag>
#endif
{
typedef boost::mpl::true_ is_specialized;
template<class W, class A>
static lua_State* interpreter(adl::call_proxy<W,A> const& proxy)
{
return value_wrapper_traits<W>::interpreter(*proxy.value_wrapper);
}
template<class W, class A>
static void unwrap(lua_State*, adl::call_proxy<W,A> const& proxy)
{
object result = const_cast<adl::call_proxy<W,A>&>(proxy);
result.push(result.interpreter());
}
};
template<>
struct value_wrapper_traits<object>
{
typedef boost::mpl::true_ is_specialized;
static lua_State* interpreter(object const& value)
{
return value.interpreter();
}
static void unwrap(lua_State* interpreter, object const& value)
{
value.push(interpreter);
}
static bool check(...)
{
return true;
}
};
template<>
struct value_wrapper_traits<argument>
{
typedef boost::mpl::true_ is_specialized;
static lua_State* interpreter(argument const& value)
{
return value.interpreter();
}
static void unwrap(lua_State* interpreter, argument const& value)
{
value.push(interpreter);
}
static bool check(...)
{
return true;
}
};
template<class Next>
inline void adl::index_proxy<Next>::push(lua_State* interpreter)
{
assert(interpreter == m_interpreter);
value_wrapper_traits<Next>::unwrap(m_interpreter, m_next);
lua_pushvalue(m_interpreter, m_key_index);
lua_gettable(m_interpreter, -2);
lua_remove(m_interpreter, -2);
}
template<class Next>
inline adl::index_proxy<Next>::operator object()
{
detail::stack_pop pop(m_interpreter, 1);
push(m_interpreter);
return object(from_stack(m_interpreter, -1));
}
template<class AccessPolicy>
adl::iterator_proxy<AccessPolicy>::operator object()
{
lua_pushvalue(m_interpreter, m_key_index);
AccessPolicy::get(m_interpreter, m_table_index);
detail::stack_pop pop(m_interpreter, 1);
return object(from_stack(m_interpreter, -1));
}
template<class AccessPolicy>
object detail::basic_iterator<AccessPolicy>::key() const
{
return object(m_key);
}
namespace detail
{
template<
class T
, class ValueWrapper
, class Policies
, class ErrorPolicy
, class ReturnType
>
ReturnType object_cast_aux(
ValueWrapper const& value_wrapper
, T*
, Policies*
, ErrorPolicy*
, ReturnType*
)
{
lua_State* interpreter = value_wrapper_traits<ValueWrapper>::interpreter(
value_wrapper
);
#ifndef LUABIND_NO_ERROR_CHECKING
if (!interpreter)
return ErrorPolicy::handle_error(interpreter, typeid(void));
#endif
value_wrapper_traits<ValueWrapper>::unwrap(interpreter, value_wrapper);
detail::stack_pop pop(interpreter, 1);
typedef typename detail::find_conversion_policy<
0
, Policies
>::type converter_generator;
typename mpl::apply_wrap2<converter_generator, T, lua_to_cpp>::type cv;
if (cv.match(interpreter, LUABIND_DECORATE_TYPE(T), -1) < 0)
{
return ErrorPolicy::handle_error(interpreter, typeid(T));
}
return cv.apply(interpreter, LUABIND_DECORATE_TYPE(T), -1);
}
# ifdef BOOST_MSVC
# pragma warning(push)
# pragma warning(disable:4702) // unreachable code
# endif
template<class T>
struct throw_error_policy
{
static T handle_error(lua_State* interpreter, type_id const& type_info)
{
#ifndef LUABIND_NO_EXCEPTIONS
throw cast_failed(interpreter, type_info);
#else
cast_failed_callback_fun e = get_cast_failed_callback();
if (e) e(interpreter, type_info);
assert(0 && "object_cast failed. If you want to handle this error use "
"luabind::set_error_callback()");
std::terminate();
#endif
return *(typename boost::remove_reference<T>::type*)0;
}
};
# ifdef BOOST_MSVC
# pragma warning(pop)
# endif
template<class T>
struct nothrow_error_policy
{
static boost::optional<T> handle_error(lua_State*, type_id const&)
{
return boost::optional<T>();
}
};
} // namespace detail
template<class T, class ValueWrapper>
T object_cast(ValueWrapper const& value_wrapper)
{
return detail::object_cast_aux(
value_wrapper
, (T*)0
, (detail::null_type*)0
, (detail::throw_error_policy<T>*)0
, (T*)0
);
}
template<class T, class ValueWrapper, class Policies>
T object_cast(ValueWrapper const& value_wrapper, Policies const&)
{
return detail::object_cast_aux(
value_wrapper
, (T*)0
, (Policies*)0
, (detail::throw_error_policy<T>*)0
, (T*)0
);
}
template<class T, class ValueWrapper>
boost::optional<T> object_cast_nothrow(ValueWrapper const& value_wrapper)
{
return detail::object_cast_aux(
value_wrapper
, (T*)0
, (detail::null_type*)0
, (detail::nothrow_error_policy<T>*)0
, (boost::optional<T>*)0
);
}
template<class T, class ValueWrapper, class Policies>
boost::optional<T> object_cast_nothrow(ValueWrapper const& value_wrapper, Policies const&)
{
return detail::object_cast_aux(
value_wrapper
, (T*)0
, (Policies*)0
, (detail::nothrow_error_policy<T>*)0
, (boost::optional<T>*)0
);
}
namespace detail
{
template<int Index>
struct push_args_from_tuple
{
template<class H, class T, class Policies>
inline static void apply(lua_State* L, const boost::tuples::cons<H, T>& x, const Policies& p)
{
convert_to_lua_p<Index>(L, *x.get_head(), p);
push_args_from_tuple<Index+1>::apply(L, x.get_tail(), p);
}
template<class H, class T>
inline static void apply(lua_State* L, const boost::tuples::cons<H, T>& x)
{
convert_to_lua(L, *x.get_head());
push_args_from_tuple<Index+1>::apply(L, x.get_tail());
}
template<class Policies>
inline static void apply(lua_State*, const boost::tuples::null_type&, const Policies&) {}
inline static void apply(lua_State*, const boost::tuples::null_type&) {}
};
} // namespace detail
namespace adl
{
template<class ValueWrapper, class Arguments>
struct call_proxy
{
call_proxy(ValueWrapper& value_wrapper, Arguments arguments)
: value_wrapper(&value_wrapper)
, arguments(arguments)
{}
call_proxy(call_proxy const& other)
: value_wrapper(other.value_wrapper)
, arguments(other.arguments)
{
other.value_wrapper = 0;
}
~call_proxy()
{
if (value_wrapper)
call((detail::null_type*)0);
}
operator object()
{
return call((detail::null_type*)0);
}
template<class Policies>
object operator[](Policies const&)
{
return call((Policies*)0);
}
template<class Policies>
object call(Policies*)
{
lua_State* interpreter = value_wrapper_traits<ValueWrapper>::interpreter(
*value_wrapper
);
value_wrapper_traits<ValueWrapper>::unwrap(
interpreter
, *value_wrapper
);
value_wrapper = 0;
detail::push_args_from_tuple<1>::apply(interpreter, arguments, Policies());
if (detail::pcall(interpreter, boost::tuples::length<Arguments>::value, 1))
{
#ifndef LUABIND_NO_EXCEPTIONS
throw luabind::error(interpreter);
#else
error_callback_fun e = get_error_callback();
if (e) e(interpreter);
assert(0 && "the lua function threw an error and exceptions are disabled."
"if you want to handle this error use luabind::set_error_callback()");
std::terminate();
#endif
}
detail::stack_pop pop(interpreter, 1);
return object(from_stack(interpreter, -1));
}
mutable ValueWrapper* value_wrapper;
Arguments arguments;
};
template<class Derived>
call_proxy<Derived, boost::tuples::tuple<> >
object_interface<Derived>::operator()()
{
return call_proxy<Derived, boost::tuples::tuple<> >(
derived()
, boost::tuples::tuple<>()
);
}
// Simple value_wrapper adaptor with the sole purpose of helping with
// overload resolution. Use this as a function parameter type instead
// of "object" or "argument" to restrict the parameter to Lua tables.
template <class Base = object>
struct table : Base
{
table(from_stack const& stack_reference)
: Base(stack_reference)
{}
};
} // namespace adl
using adl::table;
template <class Base>
struct value_wrapper_traits<adl::table<Base> >
: value_wrapper_traits<Base>
{
static bool check(lua_State* L, int idx)
{
return value_wrapper_traits<Base>::check(L, idx) &&
lua_istable(L, idx);
}
};
inline object newtable(lua_State* interpreter)
{
lua_newtable(interpreter);
detail::stack_pop pop(interpreter, 1);
return object(from_stack(interpreter, -1));
}
// this could be optimized by returning a proxy
inline object globals(lua_State* interpreter)
{
lua_pushvalue(interpreter, LUA_GLOBALSINDEX);
detail::stack_pop pop(interpreter, 1);
return object(from_stack(interpreter, -1));
}
// this could be optimized by returning a proxy
inline object registry(lua_State* interpreter)
{
lua_pushvalue(interpreter, LUA_REGISTRYINDEX);
detail::stack_pop pop(interpreter, 1);
return object(from_stack(interpreter, -1));
}
template<class ValueWrapper, class K>
inline object gettable(ValueWrapper const& table, K const& key)
{
lua_State* interpreter = value_wrapper_traits<ValueWrapper>::interpreter(
table
);
value_wrapper_traits<ValueWrapper>::unwrap(interpreter, table);
detail::stack_pop pop(interpreter, 2);
detail::push(interpreter, key);
lua_gettable(interpreter, -2);
return object(from_stack(interpreter, -1));
}
template<class ValueWrapper, class K, class T>
inline void settable(ValueWrapper const& table, K const& key, T const& value)
{
lua_State* interpreter = value_wrapper_traits<ValueWrapper>::interpreter(
table
);
// TODO: Exception safe?
value_wrapper_traits<ValueWrapper>::unwrap(interpreter, table);
detail::stack_pop pop(interpreter, 1);
detail::push(interpreter, key);
detail::push(interpreter, value);
lua_settable(interpreter, -3);
}
template<class ValueWrapper, class K>
inline object rawget(ValueWrapper const& table, K const& key)
{
lua_State* interpreter = value_wrapper_traits<ValueWrapper>::interpreter(
table
);
value_wrapper_traits<ValueWrapper>::unwrap(interpreter, table);
detail::stack_pop pop(interpreter, 2);
detail::push(interpreter, key);
lua_rawget(interpreter, -2);
return object(from_stack(interpreter, -1));
}
template<class ValueWrapper, class K, class T>
inline void rawset(ValueWrapper const& table, K const& key, T const& value)
{
lua_State* interpreter = value_wrapper_traits<ValueWrapper>::interpreter(
table
);
// TODO: Exception safe?
value_wrapper_traits<ValueWrapper>::unwrap(interpreter, table);
detail::stack_pop pop(interpreter, 1);
detail::push(interpreter, key);
detail::push(interpreter, value);
lua_rawset(interpreter, -3);
}
template<class ValueWrapper>
inline int type(ValueWrapper const& value)
{
lua_State* interpreter = value_wrapper_traits<ValueWrapper>::interpreter(
value
);
value_wrapper_traits<ValueWrapper>::unwrap(interpreter, value);
detail::stack_pop pop(interpreter, 1);
return lua_type(interpreter, -1);
}
template <class ValueWrapper>
inline object getmetatable(ValueWrapper const& obj)
{
lua_State* interpreter = value_wrapper_traits<ValueWrapper>::interpreter(
obj
);
value_wrapper_traits<ValueWrapper>::unwrap(interpreter, obj);
detail::stack_pop pop(interpreter, 2);
lua_getmetatable(interpreter, -1);
return object(from_stack(interpreter, -1));
}
template <class ValueWrapper1, class ValueWrapper2>
inline void setmetatable(
ValueWrapper1 const& obj, ValueWrapper2 const& metatable)
{
lua_State* interpreter = value_wrapper_traits<ValueWrapper1>::interpreter(
obj
);
value_wrapper_traits<ValueWrapper1>::unwrap(interpreter, obj);
detail::stack_pop pop(interpreter, 1);
value_wrapper_traits<ValueWrapper2>::unwrap(interpreter, metatable);
lua_setmetatable(interpreter, -2);
}
template <class ValueWrapper>
inline lua_CFunction tocfunction(ValueWrapper const& value)
{
lua_State* interpreter = value_wrapper_traits<ValueWrapper>::interpreter(
value
);
value_wrapper_traits<ValueWrapper>::unwrap(interpreter, value);
detail::stack_pop pop(interpreter, 1);
return lua_tocfunction(interpreter, -1);
}
template <class T, class ValueWrapper>
inline T* touserdata(ValueWrapper const& value)
{
lua_State* interpreter = value_wrapper_traits<ValueWrapper>::interpreter(
value
);
value_wrapper_traits<ValueWrapper>::unwrap(interpreter, value);
detail::stack_pop pop(interpreter, 1);
return static_cast<T*>(lua_touserdata(interpreter, -1));
}
template <class ValueWrapper>
inline object getupvalue(ValueWrapper const& value, int index)
{
lua_State* interpreter = value_wrapper_traits<ValueWrapper>::interpreter(
value
);
value_wrapper_traits<ValueWrapper>::unwrap(interpreter, value);
detail::stack_pop pop(interpreter, 2);
lua_getupvalue(interpreter, -1, index);
return object(from_stack(interpreter, -1));
}
template <class ValueWrapper1, class ValueWrapper2>
inline void setupvalue(
ValueWrapper1 const& function, int index, ValueWrapper2 const& value)
{
lua_State* interpreter = value_wrapper_traits<ValueWrapper1>::interpreter(
function
);
value_wrapper_traits<ValueWrapper1>::unwrap(interpreter, function);
detail::stack_pop pop(interpreter, 1);
value_wrapper_traits<ValueWrapper2>::unwrap(interpreter, value);
lua_setupvalue(interpreter, -2, index);
}
template <class GetValueWrapper>
object property(GetValueWrapper const& get)
{
lua_State* interpreter = value_wrapper_traits<GetValueWrapper>::interpreter(
get
);
value_wrapper_traits<GetValueWrapper>::unwrap(interpreter, get);
lua_pushnil(interpreter);
lua_pushcclosure(interpreter, &detail::property_tag, 2);
detail::stack_pop pop(interpreter, 1);
return object(from_stack(interpreter, -1));
}
template <class GetValueWrapper, class SetValueWrapper>
object property(GetValueWrapper const& get, SetValueWrapper const& set)
{
lua_State* interpreter = value_wrapper_traits<GetValueWrapper>::interpreter(
get
);
value_wrapper_traits<GetValueWrapper>::unwrap(interpreter, get);
value_wrapper_traits<SetValueWrapper>::unwrap(interpreter, set);
lua_pushcclosure(interpreter, &detail::property_tag, 2);
detail::stack_pop pop(interpreter, 1);
return object(from_stack(interpreter, -1));
}
} // namespace luabind
#endif // LUABIND_OBJECT_050419_HPP
|
# Data Wrangling in R
# Social Security Disability Case Study
# Load the tidyverse
library(tidyverse)
library(lubridate)
library(stringr)
# Read in the coal dataset
ssa <- read_csv("http://594442.youcanlearnit.net/ssadisability.csv")
# Take a look at how this was imported
glimpse(ssa)
# Make the dataset long
ssa_long <- gather(ssa, month, applications, -Fiscal_Year)
# And what do we get?
print(ssa_long, n=20)
|
State Before: k l x : ℕ
w : 0 < x + 1
⊢ (x + 1) ^ k ∣ (x + 1) ^ l ↔ (x + 1) ^ k ≤ (x + 1) ^ l State After: case mp
k l x : ℕ
w : 0 < x + 1
⊢ (x + 1) ^ k ∣ (x + 1) ^ l → (x + 1) ^ k ≤ (x + 1) ^ l
case mpr
k l x : ℕ
w : 0 < x + 1
⊢ (x + 1) ^ k ≤ (x + 1) ^ l → (x + 1) ^ k ∣ (x + 1) ^ l Tactic: constructor State Before: case mp
k l x : ℕ
w : 0 < x + 1
⊢ (x + 1) ^ k ∣ (x + 1) ^ l → (x + 1) ^ k ≤ (x + 1) ^ l State After: case mp
k l x : ℕ
w : 0 < x + 1
a : (x + 1) ^ k ∣ (x + 1) ^ l
⊢ (x + 1) ^ k ≤ (x + 1) ^ l Tactic: intro a State Before: case mp
k l x : ℕ
w : 0 < x + 1
a : (x + 1) ^ k ∣ (x + 1) ^ l
⊢ (x + 1) ^ k ≤ (x + 1) ^ l State After: no goals Tactic: exact le_of_dvd (pow_pos (succ_pos x) l) a State Before: case mpr
k l x : ℕ
w : 0 < x + 1
⊢ (x + 1) ^ k ≤ (x + 1) ^ l → (x + 1) ^ k ∣ (x + 1) ^ l State After: case mpr
k l x : ℕ
w : 0 < x + 1
a : (x + 1) ^ k ≤ (x + 1) ^ l
⊢ (x + 1) ^ k ∣ (x + 1) ^ l Tactic: intro a State Before: case mpr
k l x : ℕ
w : 0 < x + 1
a : (x + 1) ^ k ≤ (x + 1) ^ l
⊢ (x + 1) ^ k ∣ (x + 1) ^ l State After: case mpr.zero
k l : ℕ
w : 0 < zero + 1
a : (zero + 1) ^ k ≤ (zero + 1) ^ l
⊢ (zero + 1) ^ k ∣ (zero + 1) ^ l
case mpr.succ
k l x : ℕ
w : 0 < succ x + 1
a : (succ x + 1) ^ k ≤ (succ x + 1) ^ l
⊢ (succ x + 1) ^ k ∣ (succ x + 1) ^ l Tactic: cases' x with x State Before: case mpr.zero
k l : ℕ
w : 0 < zero + 1
a : (zero + 1) ^ k ≤ (zero + 1) ^ l
⊢ (zero + 1) ^ k ∣ (zero + 1) ^ l State After: no goals Tactic: simp State Before: case mpr.succ
k l x : ℕ
w : 0 < succ x + 1
a : (succ x + 1) ^ k ≤ (succ x + 1) ^ l
⊢ (succ x + 1) ^ k ∣ (succ x + 1) ^ l State After: case mpr.succ
k l x : ℕ
w : 0 < succ x + 1
a : (succ x + 1) ^ k ≤ (succ x + 1) ^ l
le : k ≤ l
⊢ (succ x + 1) ^ k ∣ (succ x + 1) ^ l Tactic: have le := (pow_le_iff_le_right (Nat.le_add_left _ _)).mp a State Before: case mpr.succ
k l x : ℕ
w : 0 < succ x + 1
a : (succ x + 1) ^ k ≤ (succ x + 1) ^ l
le : k ≤ l
⊢ (succ x + 1) ^ k ∣ (succ x + 1) ^ l State After: case mpr.succ
k l x : ℕ
w : 0 < succ x + 1
a : (succ x + 1) ^ k ≤ (succ x + 1) ^ l
le : k ≤ l
⊢ (succ x + 1) ^ l = (succ x + 1) ^ k * (x + 2) ^ (l - k) Tactic: use (x + 2) ^ (l - k) State Before: case mpr.succ
k l x : ℕ
w : 0 < succ x + 1
a : (succ x + 1) ^ k ≤ (succ x + 1) ^ l
le : k ≤ l
⊢ (succ x + 1) ^ l = (succ x + 1) ^ k * (x + 2) ^ (l - k) State After: no goals Tactic: rw [← pow_add, add_comm k, tsub_add_cancel_of_le le] |
= = Central Park Zoo conditions = =
|
lemma multiplicity_characterization'_nat: "finite {p. 0 < f (p::nat)} \<longrightarrow> (\<forall>p. 0 < f p \<longrightarrow> prime p) \<longrightarrow> prime p \<longrightarrow> multiplicity p (\<Prod>p | 0 < f p. p ^ f p) = f p" |
(* Title: SSA_CFG_code.thy
Author: Denis Lohner, Sebastian Ullrich
*)
theory SSA_CFG_code imports
SSA_CFG
Mapping_Exts
"HOL-Library.Product_Lexorder"
begin
definition Union_of :: "('a \<Rightarrow> 'b set) \<Rightarrow> 'a set \<Rightarrow> 'b set"
where "Union_of f A \<equiv> \<Union>(f ` A)"
lemma Union_of_alt_def: "Union_of f A = (\<Union>x \<in> A. f x)"
unfolding Union_of_def by simp
type_synonym ('node, 'val) phis_code = "('node \<times> 'val, 'val list) mapping"
context CFG_base begin
definition addN :: "'g \<Rightarrow> 'node \<Rightarrow> ('var, 'node set) mapping \<Rightarrow> ('var, 'node set) mapping"
where "addN g n \<equiv> fold (\<lambda>v. Mapping.map_default v {} (insert n)) (sorted_list_of_set (uses g n))"
definition "addN' g n = fold (\<lambda>v m. m(v \<mapsto> case_option {n} (insert n) (m v))) (sorted_list_of_set (uses g n))"
lemma addN_transfer [transfer_rule]:
"rel_fun (=) (rel_fun (=) (rel_fun (pcr_mapping (=) (=)) (pcr_mapping (=) (=)))) addN' addN"
unfolding addN_def [abs_def] addN'_def [abs_def]
Mapping.map_default_def [abs_def] Mapping.default_def
apply (auto simp: mapping.pcr_cr_eq rel_fun_def cr_mapping_def)
apply transfer
apply (rule fold_cong)
apply simp
apply simp
apply (intro ext)
by auto
definition "useNodes_of g = fold (addN g) (\<alpha>n g) Mapping.empty"
lemmas useNodes_of_code = useNodes_of_def [unfolded addN_def [abs_def]]
declare useNodes_of_code [code]
lemma lookup_useNodes_of':
assumes [simp]: "\<And>n. finite (uses g n)"
shows "Mapping.lookup (useNodes_of g) v =
(if (\<exists>n \<in> set (\<alpha>n g). v \<in> uses g n) then Some {n \<in> set (\<alpha>n g). v \<in> uses g n} else None)"
proof -
{ fix m n xs v
have "Mapping.lookup (fold (\<lambda>v. Mapping.map_default (v::'var) {} (insert (n::'node))) xs m) v=
(case Mapping.lookup m v of None \<Rightarrow> (if v \<in> set xs then Some {n} else None)
| Some N \<Rightarrow> (if v \<in> set xs then Some (insert n N) else Some N))"
by (induction xs arbitrary: m) (auto simp: Mapping_lookup_map_default split: option.splits)
}
note addN_conv = this [of n "sorted_list_of_set (uses g n)" for g n, folded addN_def, simplified]
{ fix xs m v
have "Mapping.lookup (fold (addN g) xs m) v = (case Mapping.lookup m v of None \<Rightarrow> if (\<exists>n\<in>set xs. v \<in> uses g n) then Some {n\<in>set xs. v \<in> uses g n} else None
| Some N \<Rightarrow> Some ({n\<in>set xs. v \<in> uses g n} \<union> N))"
by (induction xs arbitrary: m) (auto split: option.splits simp: addN_conv)
}
note this [of "\<alpha>n g" Mapping.empty, simp]
show ?thesis unfolding useNodes_of_def
by (auto split: option.splits simp: lookup_empty)
qed
end
context CFG begin
lift_definition useNodes_of' :: "'g \<Rightarrow> ('var, 'node set) mapping"
is "\<lambda>g v. if (\<exists>n \<in> set (\<alpha>n g). v \<in> uses g n) then Some {n \<in> set (\<alpha>n g). v \<in> uses g n} else None" .
lemma useNodes_of': "useNodes_of' = useNodes_of"
proof
fix g
{ fix m n xs
have "fold (\<lambda>v m. m(v::'var \<mapsto> case m v of None \<Rightarrow> {n::'node} | Some x \<Rightarrow> insert n x)) xs m =
(\<lambda>v. case m v of None \<Rightarrow> (if v \<in> set xs then Some {n} else None)
| Some N \<Rightarrow> (if v \<in> set xs then Some (insert n N) else Some N))"
by (induction xs arbitrary: m)(auto split: option.splits)
}
note addN'_conv = this [of n "sorted_list_of_set (uses g n)" for g n, folded addN'_def, simplified]
{ fix xs m
have "fold (addN' g) xs m = (\<lambda>v. case m v of None \<Rightarrow> if (\<exists>n\<in>set xs. v \<in> uses g n) then Some {n\<in>set xs. v \<in> uses g n} else None
| Some N \<Rightarrow> Some ({n\<in>set xs. v \<in> uses g n} \<union> N))"
by (induction xs arbitrary: m) (auto 4 4 split: option.splits if_splits simp: addN'_conv intro!: ext)
}
note this [of "\<alpha>n g" Map.empty, simp]
show "useNodes_of' g = useNodes_of g"
unfolding mmap_def useNodes_of_def
by (transfer fixing: g) auto
qed
declare useNodes_of'.transfer [unfolded useNodes_of', transfer_rule]
end
context CFG_SSA_base begin
definition phis_addN
where "phis_addN g n = fold (\<lambda>v. Mapping.map_default v {} (insert n)) (case_option [] id (phis g n))"
definition phidefNodes where [code]:
"phidefNodes g = fold (\<lambda>(n,v). Mapping.update v n) (sorted_list_of_set (dom (phis g))) Mapping.empty"
lemma keys_phidefNodes:
assumes "finite (dom (phis g))"
shows "Mapping.keys (phidefNodes g) = snd ` dom (phis g)"
proof -
{ fix xs m x
have "fold (\<lambda>(a,b) m. m(b \<mapsto> a)) (xs::('node \<times> 'val) list) m x = (if x \<in> snd ` set xs then (Some \<circ> fst) (last [(b,a)\<leftarrow>xs. a = x]) else m x)"
by (induction xs arbitrary: m) (auto split: if_splits simp: filter_empty_conv intro: rev_image_eqI)
}
from this [of "sorted_list_of_set (dom (phis g))" Map.empty] assms
show ?thesis
unfolding phidefNodes_def keys_dom_lookup
by (transfer fixing: g phis) (auto simp: dom_def intro: rev_image_eqI)
qed
definition phiNodes_of :: "'g \<Rightarrow> ('val, ('node \<times> 'val) set) mapping"
where "phiNodes_of g = fold (phis_addN g) (sorted_list_of_set (dom (phis g))) Mapping.empty"
lemma lookup_phiNodes_of:
assumes [simp]: "finite (dom (phis g))"
shows "Mapping.lookup (phiNodes_of g) v =
(if (\<exists>n \<in> dom (phis g). v \<in> set (the (phis g n))) then Some {n \<in> dom (phis g). v \<in> set (the (phis g n))} else None)"
proof -
{
fix m n xs v
have "Mapping.lookup (fold (\<lambda>v. Mapping.map_default v {} (insert (n::'node \<times> 'val))) xs (m::('val, ('node \<times> 'val) set) mapping)) v =
(case Mapping.lookup m v of None \<Rightarrow> (if v \<in> set xs then Some {n} else None)
| Some N \<Rightarrow> (if v \<in> set xs then Some (insert n N) else Some N))"
by (induction xs arbitrary: m) (auto simp: Mapping_lookup_map_default split: option.splits)
}
note phis_addN_conv = this [of n "case_option [] id (phis g n)" for n, folded phis_addN_def]
{
fix xs m v
have "Mapping.lookup (fold (phis_addN g) xs m) v =
(case Mapping.lookup m v of None \<Rightarrow> if (\<exists>n \<in> set xs. v \<in> set (case_option [] id (phis g n))) then Some {n \<in> set xs. v \<in> set (case_option [] id (phis g n))} else None
| Some N \<Rightarrow> Some ({n \<in> set xs. v \<in> set (case_option [] id (phis g n))} \<union> N))"
by (induction xs arbitrary: m) (auto simp: phis_addN_conv split: option.splits if_splits)+
}
note this [of "sorted_list_of_set (dom (phis g))", simp]
show ?thesis
unfolding phiNodes_of_def
by (force split: option.splits simp: lookup_empty)
qed
lemmas phiNodes_of_code = phiNodes_of_def [unfolded phis_addN_def [abs_def]]
declare phiNodes_of_code [code]
lemma phis_transfer [transfer_rule]:
includes lifting_syntax
shows "((=) ===> pcr_mapping (=) (=)) phis (\<lambda>g. Mapping.Mapping (phis g))"
by (auto simp: mapping.pcr_cr_eq rel_fun_def cr_mapping_def Mapping.Mapping_inverse)
end
context CFG_SSA begin
declare lookup_phiNodes_of [OF phis_finite, simp]
declare keys_phidefNodes [OF phis_finite, simp]
end
locale CFG_SSA_ext_base = CFG_SSA_base \<alpha>e \<alpha>n invar inEdges' Entry "defs" "uses" phis
for \<alpha>e :: "'g \<Rightarrow> ('node::linorder \<times> 'edgeD \<times> 'node) set"
and \<alpha>n :: "'g \<Rightarrow> 'node list"
and invar :: "'g \<Rightarrow> bool"
and inEdges' :: "'g \<Rightarrow> 'node \<Rightarrow> ('node \<times> 'edgeD) list"
and Entry :: "'g \<Rightarrow> 'node"
and "defs" :: "'g \<Rightarrow> 'node \<Rightarrow> 'val::linorder set"
and "uses" :: "'g \<Rightarrow> 'node \<Rightarrow> 'val set"
and phis :: "'g \<Rightarrow> ('node, 'val) phis"
begin
abbreviation "cache g f \<equiv> Mapping.tabulate (\<alpha>n g) f"
lemma lookup_cache[simp]: "n \<in> set (\<alpha>n g) \<Longrightarrow> Mapping.lookup (cache g f) n = Some (f n)"
by transfer (auto simp: Map.map_of_map_restrict)
lemma lookup_cacheD [dest]: "Mapping.lookup (cache g f) x = Some y \<Longrightarrow> y = f x"
by transfer (auto simp: Map.map_of_map_restrict restrict_map_def split: if_splits)
lemma lookup_cache_usesD: "Mapping.lookup (cache g (uses g)) n = Some vs \<Longrightarrow> vs = uses g n"
by blast
end
definition[simp]: "usesOf m n \<equiv> case_option {} id (Mapping.lookup m n)"
locale CFG_SSA_ext = CFG_SSA_ext_base \<alpha>e \<alpha>n invar inEdges' Entry "defs" "uses" phis
+ CFG_SSA \<alpha>e \<alpha>n invar inEdges' Entry "defs" "uses" phis
for \<alpha>e :: "'g \<Rightarrow> ('node::linorder \<times> 'edgeD \<times> 'node) set"
and \<alpha>n :: "'g \<Rightarrow> 'node list"
and invar :: "'g \<Rightarrow> bool"
and inEdges' :: "'g \<Rightarrow> 'node \<Rightarrow> ('node \<times> 'edgeD) list"
and Entry :: "'g \<Rightarrow> 'node"
and "defs" :: "'g \<Rightarrow> 'node \<Rightarrow> 'val::linorder set"
and "uses" :: "'g \<Rightarrow> 'node \<Rightarrow> 'val set"
and phis :: "'g \<Rightarrow> ('node, 'val) phis"
begin
lemma usesOf_cache[abs_def, simp]: "usesOf (cache g (uses g)) n = uses g n"
by (auto simp: uses_in_\<alpha>n dest: lookup_cache_usesD split: option.split)
end
locale CFG_SSA_base_code = CFG_SSA_ext_base \<alpha>e \<alpha>n invar inEdges' Entry "defs" "usesOf \<circ> uses" "\<lambda>g. Mapping.lookup (phis g)"
for \<alpha>e :: "'g \<Rightarrow> ('node::linorder \<times> 'edgeD \<times> 'node) set"
and \<alpha>n :: "'g \<Rightarrow> 'node list"
and invar :: "'g \<Rightarrow> bool"
and inEdges' :: "'g \<Rightarrow> 'node \<Rightarrow> ('node \<times> 'edgeD) list"
and Entry :: "'g \<Rightarrow> 'node"
and "defs" :: "'g \<Rightarrow> 'node \<Rightarrow> 'val::linorder set"
and "uses" :: "'g \<Rightarrow> ('node, 'val set) mapping"
and phis :: "'g \<Rightarrow> ('node, 'val) phis_code"
begin
declare phis_transfer [simplified, transfer_rule]
lemma phiDefs_code [code]:
"phiDefs g n = snd ` Set.filter (\<lambda>(n',v). n' = n) (Mapping.keys (phis g))"
unfolding phiDefs_def
by transfer (auto 4 3 intro: rev_image_eqI simp: Set.filter_def)
lemmas phiUses_code [code] = phiUses_def [folded Union_of_alt_def]
declare allUses_def [code]
lemmas allVars_code [code] = allVars_def [folded Union_of_alt_def]
end
locale CFG_SSA_code = CFG_SSA_base_code \<alpha>e \<alpha>n invar inEdges' Entry "defs" "uses" phis
+ CFG_SSA_ext \<alpha>e \<alpha>n invar inEdges' Entry "defs" "usesOf \<circ> uses" "\<lambda>g. Mapping.lookup (phis g)"
for \<alpha>e :: "'g \<Rightarrow> ('node::linorder \<times> 'edgeD \<times> 'node) set"
and \<alpha>n :: "'g \<Rightarrow> 'node list"
and invar :: "'g \<Rightarrow> bool"
and inEdges' :: "'g \<Rightarrow> 'node \<Rightarrow> ('node \<times> 'edgeD) list"
and Entry :: "'g \<Rightarrow> 'node"
and "defs" :: "'g \<Rightarrow> 'node \<Rightarrow> 'val::linorder set"
and "uses" :: "'g \<Rightarrow> ('node, 'val set) mapping"
and phis :: "'g \<Rightarrow> ('node, 'val) phis_code"
definition "the_trivial v vs = (case (foldl (\<lambda>(good,v') w. if w = v then (good,v')
else case v' of Some v' \<Rightarrow> (good \<and> w = v', Some v')
| None \<Rightarrow> (good, Some w))
(True, None) vs)
of (False, _) \<Rightarrow> None | (True,v) \<Rightarrow> v)"
lemma the_trivial_Nil [simp]: "the_trivial x [] = None"
unfolding the_trivial_def by simp
lemma the_trivialI:
assumes "set vs \<subseteq> {v, v'}"
and "v' \<noteq> v"
shows "the_trivial v vs = (if set vs \<subseteq> {v} then None else Some v')"
proof -
{ fix vx
have "\<lbrakk> set vs \<subseteq> {v, v'}; v' \<noteq> v; vx \<in> {None, Some v'} \<rbrakk>
\<Longrightarrow> (case foldl (\<lambda>(good, v') w.
if w = v then (good, v')
else case v' of None \<Rightarrow> (good, Some w) | Some v' \<Rightarrow> (good \<and> w = v', Some v'))
(True, vx) vs of
(True, x) \<Rightarrow> x | (False, x) \<Rightarrow> None) = (if set vs \<subseteq> {v} then vx else Some v')"
by (induction vs arbitrary: vx; case_tac vx; auto)
}
with assms show ?thesis unfolding the_trivial_def by simp
qed
lemma the_trivial_conv:
shows "the_trivial v vs = (if \<exists>v' \<in> set vs. v' \<noteq> v \<and> set vs - {v'} \<subseteq> {v} then Some (THE v'. v' \<in> set vs \<and> v' \<noteq> v \<and> set vs - {v'} \<subseteq> {v}) else None)"
proof -
{ fix b a vs
have "a \<noteq> v
\<Longrightarrow> foldl (\<lambda>(good, v') w.
if w = v then (good, v')
else case v' of None \<Rightarrow> (good, Some w) | Some v' \<Rightarrow> (good \<and> w = v', Some v'))
(b, Some a) vs =
(b \<and> set vs \<subseteq> {v, a}, Some a)"
by (induction vs arbitrary: b; clarsimp)
}
note this[simp]
{ fix b vx
have "\<lbrakk> vx \<in> insert None (Some ` set vs); case_option True (\<lambda>vx. vx \<noteq> v) vx \<rbrakk>
\<Longrightarrow> foldl (\<lambda>(good, v') w.
if w = v then (good, v')
else case v' of None \<Rightarrow> (good, Some w) | Some v' \<Rightarrow> (good \<and> w = v', Some v'))
(b, vx) vs = (b \<and> (case vx of Some w \<Rightarrow> set vs \<subseteq> {v, w} | None \<Rightarrow> \<exists>w. set vs \<subseteq> {v, w}),
(case vx of Some w \<Rightarrow> Some w | None \<Rightarrow> if (\<exists>v'\<in>set vs. v' \<noteq> v) then Some (hd (filter (\<lambda>v'. v' \<noteq> v) vs)) else None))"
by (induction vs arbitrary: b vx; auto)
}
hence "the_trivial v vs = (if \<exists>v' \<in> set vs. v' \<noteq> v \<and> set vs - {v'} \<subseteq> {v} then Some (hd (filter (\<lambda>v'. v' \<noteq> v) vs)) else None)"
unfolding the_trivial_def by (auto split: bool.splits)
thus ?thesis
apply (auto split: if_splits)
apply (rule the_equality [THEN sym])
by (thin_tac "P" for P, (induction vs; auto))+
qed
lemma the_trivial_SomeE:
assumes "the_trivial v vs = Some v'"
obtains "v \<noteq> v'" and "set vs = {v'}" | "v \<noteq> v'" and "set vs = {v,v'}"
using assms
apply atomize_elim
apply (subst(asm) the_trivial_conv)
apply (split if_splits; simp)
by (subgoal_tac "(THE v'. v' \<in> set vs \<and> v' \<noteq> v \<and> set vs - {v'} \<subseteq> {v}) = hd (filter (\<lambda>v'. v' \<noteq> v) vs)")
(fastforce simp: set_double_filter_hd set_single_hd set_minus_one)+
locale CFG_SSA_wf_base_code = CFG_SSA_base_code \<alpha>e \<alpha>n invar inEdges' Entry "defs" "uses" phis
+ CFG_SSA_wf_base \<alpha>e \<alpha>n invar inEdges' Entry "defs" "usesOf \<circ> uses" "\<lambda>g. Mapping.lookup (phis g)"
for \<alpha>e :: "'g \<Rightarrow> ('node::linorder \<times> 'edgeD \<times> 'node) set"
and \<alpha>n :: "'g \<Rightarrow> 'node list"
and invar :: "'g \<Rightarrow> bool"
and inEdges' :: "'g \<Rightarrow> 'node \<Rightarrow> ('node \<times> 'edgeD) list"
and Entry :: "'g \<Rightarrow> 'node"
and "defs" :: "'g \<Rightarrow> 'node \<Rightarrow> 'val::linorder set"
and "uses" :: "'g \<Rightarrow> ('node, 'val set) mapping"
and phis :: "'g \<Rightarrow> ('node, 'val) phis_code"
begin
definition [code]:
"trivial_code (v::'val) vs = (the_trivial v vs \<noteq> None)"
definition[code]: "trivial_phis g = Set.filter (\<lambda>(n,v). trivial_code v (the (Mapping.lookup (phis g) (n,v)))) (Mapping.keys (phis g))"
definition [code]: "redundant_code g = (trivial_phis g \<noteq> {})"
end
locale CFG_SSA_wf_code = CFG_SSA_code \<alpha>e \<alpha>n invar inEdges' Entry "defs" "uses" phis
+ CFG_SSA_wf_base_code \<alpha>e \<alpha>n invar inEdges' Entry "defs" "uses" phis
+ CFG_SSA_wf \<alpha>e \<alpha>n invar inEdges' Entry "defs" "usesOf \<circ> uses" "\<lambda>g. Mapping.lookup (phis g)"
for \<alpha>e :: "'g \<Rightarrow> ('node::linorder \<times> 'edgeD \<times> 'node) set"
and \<alpha>n :: "'g \<Rightarrow> 'node list"
and invar :: "'g \<Rightarrow> bool"
and inEdges' :: "'g \<Rightarrow> 'node \<Rightarrow> ('node \<times> 'edgeD) list"
and Entry :: "'g \<Rightarrow> 'node"
and "defs" :: "'g \<Rightarrow> 'node \<Rightarrow> 'val::linorder set"
and "uses" :: "'g \<Rightarrow> ('node, 'val set) mapping"
and phis :: "'g \<Rightarrow> ('node, 'val) phis_code"
begin
lemma trivial_code:
"phi g v = Some vs \<Longrightarrow> trivial g v = trivial_code v vs"
unfolding trivial_def trivial_code_def
apply (auto split: option.splits simp: isTrivialPhi_def)
apply (clarsimp simp: the_trivial_conv split: if_splits)
apply (clarsimp simp: the_trivial_conv split: if_splits)
apply (erule the_trivial_SomeE)
apply simp
apply (rule phiArg_in_allVars; auto simp: phiArg_def)
apply (rename_tac v')
apply (rule_tac x=v' in bexI)
apply simp
apply (rule phiArg_in_allVars; auto simp: phiArg_def)
done
lemma trivial_phis:
"trivial_phis g = {(n,v). Mapping.lookup (phis g) (n,v) \<noteq> None \<and> trivial g v}"
unfolding trivial_phis_def Set.filter_def
apply (auto simp add: phi_def keys_dom_lookup)
apply (subst trivial_code)
apply (auto simp: image_def trivial_in_allVars phis_phi)
apply (frule trivial_phi)
apply (auto simp add: trivial_code phi_def[symmetric] phis_phi)
done
lemma redundant_code:
"redundant g = redundant_code g"
unfolding redundant_def redundant_code_def trivial_phis[of g]
apply (auto simp: image_def trivial_in_allVars)
apply (frule trivial_phi)
apply (auto simp: phi_def)
done
lemma trivial_code_mapI:
"\<lbrakk> trivial_code v vs; f ` (set vs - {v}) \<noteq> {v} ; f v = v \<rbrakk> \<Longrightarrow> trivial_code v (map f vs)"
unfolding trivial_code_def the_trivial_conv
by (auto split: if_splits)
lemma trivial_code_map_conv:
"f v = v \<Longrightarrow> trivial_code v (map f vs) \<longleftrightarrow> (\<exists>v'\<in>set vs. f v' \<noteq> v \<and> (f ` set vs) - {f v'} \<subseteq> {v})"
unfolding trivial_code_def the_trivial_conv
by auto
end
locale CFG_SSA_Transformed_code = ssa: CFG_SSA_wf_code \<alpha>e \<alpha>n invar inEdges' Entry "defs" "uses" phis
+
CFG_SSA_Transformed \<alpha>e \<alpha>n invar inEdges' Entry oldDefs oldUses "defs" "usesOf \<circ> uses" "\<lambda>g. Mapping.lookup (phis g)" var
for
\<alpha>e :: "'g \<Rightarrow> ('node::linorder \<times> 'edgeD \<times> 'node) set" and
\<alpha>n :: "'g \<Rightarrow> 'node list" and
invar :: "'g \<Rightarrow> bool" and
inEdges' :: "'g \<Rightarrow> 'node \<Rightarrow> ('node \<times> 'edgeD) list" and
Entry::"'g \<Rightarrow> 'node" and
oldDefs :: "'g \<Rightarrow> 'node \<Rightarrow> 'var::linorder set" and
oldUses :: "'g \<Rightarrow> 'node \<Rightarrow> 'var set" and
"defs" :: "'g \<Rightarrow> 'node \<Rightarrow> 'val::linorder set" and
"uses" :: "'g \<Rightarrow> ('node, 'val set) mapping" and
phis :: "'g \<Rightarrow> ('node, 'val) phis_code" and
var :: "'g \<Rightarrow> 'val \<Rightarrow> 'var"
+
assumes dom_uses_in_graph: "Mapping.keys (uses g) \<subseteq> set (\<alpha>n g)"
end
|
Most of these great items come from my Twitter feed or Facebook news feed. Follow me on Twitter, Tumblr, and Facebook for more fascinating videos, articles, essays and criticism.
Your blog seems to have a fairly eclectic approach. As regards spending some time with the blues you must try “East West” by the Paul Butterfield Blues Band and hear the stunninh guitar playing of Mike Bloomfield and Elvin Bishop…….13 minutes of pure genious. |
Formal statement is: corollary Cauchy_theorem_global_outside: assumes "open S" "f holomorphic_on S" "valid_path \<gamma>" "pathfinish \<gamma> = pathstart \<gamma>" "path_image \<gamma> \<subseteq> S" "\<And>w. w \<notin> S \<Longrightarrow> w \<in> outside(path_image \<gamma>)" shows "(f has_contour_integral 0) \<gamma>" Informal statement is: If $f$ is holomorphic on an open set $S$ and $\gamma$ is a closed path in $S$, then $\int_\gamma f(z) dz = 0$. |
(*
* Copyright 2014, General Dynamics C4 Systems
*
* SPDX-License-Identifier: GPL-2.0-only
*)
(*
CSpace refinement
*)
theory CSpace_AI
imports ArchCSpacePre_AI
begin
context begin interpretation Arch .
requalify_consts
irq_state_update
irq_state
final_matters_arch
ups_of_heap
requalify_facts
is_derived_arch_non_arch
ups_of_heap_non_arch_upd
master_arch_cap_obj_refs
master_arch_cap_cap_class
same_aobject_as_commute
arch_derive_cap_inv
loadWord_inv
valid_global_refsD2
arch_derived_is_device
update_cnode_cap_data_def
safe_parent_for_arch_not_arch
safe_parent_cap_range_arch
valid_arch_mdb_simple
set_cap_update_free_index_valid_arch_mdb
set_untyped_cap_as_full_valid_arch_mdb
valid_arch_mdb_updates
safe_parent_arch_is_parent
safe_parent_for_arch_not_arch'
safe_parent_for_arch_no_obj_refs
valid_arch_mdb_same_master_cap
valid_arch_mdb_null_filter
valid_arch_mdb_untypeds
end
declare set_cap_update_free_index_valid_arch_mdb[wp]
(* Proofs don't want to see these details. *)
declare update_cnode_cap_data_def [simp]
lemma capBadge_ordefield_simps[simp]:
"(None, y) \<in> capBadge_ordering fb"
"((y, None) \<in> capBadge_ordering fb) = (y = None)"
"((y, y) \<in> capBadge_ordering fb) = (fb \<longrightarrow> (y = None \<or> y = Some 0))"
"((Some x, Some z) \<in> capBadge_ordering fb) = (x = 0 \<or> (\<not> fb \<and> x = z))"
"(y, Some 0) \<in> capBadge_ordering fb = (y = None \<or> y = Some 0)"
by (simp add: capBadge_ordering_def disj.commute
| simp add: eq_commute image_def
| fastforce)+
lemma capBadge_ordering_trans:
"\<lbrakk> (x, y) \<in> capBadge_ordering v; (y, z) \<in> capBadge_ordering v2 \<rbrakk>
\<Longrightarrow> (x, z) \<in> capBadge_ordering v2"
by (auto simp: capBadge_ordering_def split: if_split_asm)
definition "irq_state_independent_A (P :: 'z state \<Rightarrow> bool) \<equiv>
\<forall>(f :: nat \<Rightarrow> nat) (s :: 'z state). P s \<longrightarrow> P (s\<lparr>machine_state := machine_state s
\<lparr>irq_state := f (irq_state (machine_state s))\<rparr>\<rparr>)"
lemma irq_state_independent_AI[intro!, simp]:
"\<lbrakk>\<And>s f. P (s\<lparr>machine_state := machine_state s
\<lparr>irq_state := f (irq_state (machine_state s))\<rparr>\<rparr>) = P s\<rbrakk>
\<Longrightarrow> irq_state_independent_A P"
by (simp add: irq_state_independent_A_def)
(* FIXME: Move. *)
lemma irq_state_independent_A_conjI[intro!]:
"\<lbrakk>irq_state_independent_A P; irq_state_independent_A Q\<rbrakk>
\<Longrightarrow> irq_state_independent_A (P and Q)"
"\<lbrakk>irq_state_independent_A P; irq_state_independent_A Q\<rbrakk>
\<Longrightarrow> irq_state_independent_A (\<lambda>s. P s \<and> Q s)"
by (auto simp: irq_state_independent_A_def)
(* FIXME: move *)
lemma gets_machine_state_modify:
"do x \<leftarrow> gets machine_state;
u \<leftarrow> modify (machine_state_update (\<lambda>y. x));
f x
od =
gets machine_state >>= f"
by (simp add: bind_def split_def simpler_gets_def simpler_modify_def)
locale CSpace_AI_getActiveIRQ_wp =
fixes state_ext_t :: "'state_ext::state_ext itself"
assumes getActiveIRQ_wp[wp]:
"\<And>P :: 'state_ext state \<Rightarrow> bool.
irq_state_independent_A P \<Longrightarrow> valid P (do_machine_op (getActiveIRQ in_kernel)) (\<lambda>_. P)"
lemma OR_choiceE_weak_wp:
"\<lbrace>P\<rbrace> f \<sqinter> g \<lbrace>Q\<rbrace> \<Longrightarrow> \<lbrace>P\<rbrace> OR_choiceE b f g \<lbrace>Q\<rbrace>"
apply (fastforce simp add: OR_choiceE_def alternative_def valid_def bind_def
select_f_def gets_def return_def get_def liftE_def lift_def bindE_def
split: option.splits if_split_asm)
done
context CSpace_AI_getActiveIRQ_wp begin
lemma preemption_point_inv:
fixes P :: "'state_ext state \<Rightarrow> bool"
shows
"\<lbrakk>irq_state_independent_A P; \<And>f s. P (trans_state f s) = P s\<rbrakk> \<Longrightarrow> \<lbrace>P\<rbrace> preemption_point \<lbrace>\<lambda>_. P\<rbrace>"
apply (intro impI conjI | simp add: preemption_point_def o_def
| wp hoare_post_imp[OF _ getActiveIRQ_wp] OR_choiceE_weak_wp alternative_wp[where P=P]
| wpc)+
done
end
lemma get_cap_valid [wp]:
"\<lbrace> valid_objs \<rbrace> get_cap addr \<lbrace> valid_cap \<rbrace>"
apply (wp get_cap_wp)
apply (auto dest: cte_wp_at_valid_objs_valid_cap)
done
lemma get_cap_wellformed:
"\<lbrace>valid_objs\<rbrace> get_cap slot \<lbrace>\<lambda>cap s. wellformed_cap cap\<rbrace>"
apply (rule hoare_strengthen_post, rule get_cap_valid)
apply (simp add: valid_cap_def2)
done
lemma update_cdt_cdt:
"\<lbrace>\<lambda>s. valid_mdb (cdt_update (\<lambda>_. (m (cdt s))) s)\<rbrace> update_cdt m \<lbrace>\<lambda>_. valid_mdb\<rbrace>"
by (simp add: update_cdt_def set_cdt_def) wp
(* FIXME: rename *)
lemma unpleasant_helper:
"(\<forall>a b. (\<exists>c. a = f c \<and> b = g c \<and> P c) \<longrightarrow> Q a b) = (\<forall>c. P c \<longrightarrow> Q (f c) (g c))"
by blast
lemma get_object_det:
"(r,s') \<in> fst (get_object p s) \<Longrightarrow> get_object p s = ({(r,s)}, False)"
by (auto simp: in_monad get_object_def bind_def gets_def get_def return_def)
lemma get_object_at_obj:
"\<lbrakk> (r,s') \<in> fst (get_object p s); P r \<rbrakk> \<Longrightarrow> obj_at P p s"
by (auto simp: get_object_def obj_at_def in_monad)
lemma get_cap_cte_at:
"(r,s') \<in> fst (get_cap p s) \<Longrightarrow> cte_at p s"
unfolding cte_at_def by (auto dest: get_cap_det)
lemma rab_cte_cap_to':
"s \<turnstile> \<lbrace>\<lambda>s. (is_cnode_cap (fst args) \<longrightarrow> (\<forall>r\<in>cte_refs (fst args) (interrupt_irq_node s). ex_cte_cap_wp_to P r s))
\<and> (\<forall>cap. is_cnode_cap cap \<longrightarrow> P cap)\<rbrace>
resolve_address_bits args
\<lbrace>\<lambda>rv. ex_cte_cap_wp_to P (fst rv)\<rbrace>,\<lbrace>\<top>\<top>\<rbrace>"
unfolding resolve_address_bits_def
proof (induct args arbitrary: s rule: resolve_address_bits'.induct)
case (1 z cap cref s')
have P:
"\<And>P' Q args adm s.
\<lbrakk> s \<turnstile> \<lbrace>P'\<rbrace> resolve_address_bits' z args \<lbrace>\<lambda>rv. ex_cte_cap_wp_to P (fst rv)\<rbrace>,\<lbrace>\<top>\<top>\<rbrace>;
\<And>rv s. ex_cte_cap_wp_to P (fst rv) s \<Longrightarrow> Q rv s \<rbrakk> \<Longrightarrow>
s \<turnstile> \<lbrace>P'\<rbrace> resolve_address_bits' z args \<lbrace>Q\<rbrace>,\<lbrace>\<top>\<top>\<rbrace>"
unfolding spec_validE_def
apply (fold validE_R_def)
apply (erule hoare_post_imp_R)
apply simp
done
show ?case
apply (subst resolve_address_bits'.simps)
apply (cases cap, simp_all split del: if_split)
defer 6 (* CNode *)
apply (wp+)[11]
apply (simp add: split_def cong: if_cong split del: if_split)
apply (rule hoare_pre_spec_validE)
apply (wp P [OF "1.hyps"], (simp add: in_monad | rule conjI refl)+)
apply (wp | simp | rule get_cap_wp)+
apply (fastforce simp: ex_cte_cap_wp_to_def elim!: cte_wp_at_weakenE)
done
qed
lemmas rab_cte_cap_to = use_spec(2) [OF rab_cte_cap_to']
lemma resolve_address_bits_real_cte_at:
"\<lbrace> valid_objs and valid_cap (fst args) \<rbrace>
resolve_address_bits args
\<lbrace>\<lambda>rv. real_cte_at (fst rv)\<rbrace>, -"
unfolding resolve_address_bits_def
proof (induct args rule: resolve_address_bits'.induct)
case (1 z cap cref)
show ?case
apply (clarsimp simp add: validE_R_def validE_def valid_def split: sum.split)
apply (subst (asm) resolve_address_bits'.simps)
apply (cases cap)
defer 6 (* cnode *)
apply (auto simp: in_monad)[11]
apply (rename_tac obj_ref nat list)
apply (simp only: cap.simps)
apply (case_tac "nat + length list = 0")
apply (simp add: fail_def)
apply (simp only: if_False)
apply (simp only: K_bind_def in_bindE_R)
apply (elim conjE exE)
apply (simp only: split: if_split_asm)
apply (clarsimp simp add: in_monad)
apply (clarsimp simp add: valid_cap_def)
apply (simp only: K_bind_def in_bindE_R)
apply (elim conjE exE)
apply (simp only: split: if_split_asm)
apply (frule (8) "1.hyps")
apply (clarsimp simp: in_monad validE_def validE_R_def valid_def)
apply (frule in_inv_by_hoareD [OF get_cap_inv])
apply simp
apply (frule (1) post_by_hoare [OF get_cap_valid])
apply (erule allE, erule impE, blast)
apply (clarsimp simp: in_monad split: cap.splits)
apply (drule (1) bspec, simp)+
apply (clarsimp simp: in_monad)
apply (frule in_inv_by_hoareD [OF get_cap_inv])
apply (clarsimp simp add: valid_cap_def)
done
qed
lemma resolve_address_bits_cte_at:
"\<lbrace> valid_objs and valid_cap (fst args) \<rbrace>
resolve_address_bits args
\<lbrace>\<lambda>rv. cte_at (fst rv)\<rbrace>, -"
apply (rule hoare_post_imp_R, rule resolve_address_bits_real_cte_at)
apply (erule real_cte_at_cte)
done
lemma lookup_slot_real_cte_at_wp [wp]:
"\<lbrace> valid_objs \<rbrace> lookup_slot_for_thread t addr \<lbrace>\<lambda>rv. real_cte_at (fst rv)\<rbrace>,-"
apply (simp add: lookup_slot_for_thread_def)
apply wp
apply (rule resolve_address_bits_real_cte_at)
apply simp
apply wp
apply clarsimp
apply (erule(1) objs_valid_tcb_ctable)
done
lemma lookup_slot_cte_at_wp[wp]:
"\<lbrace> valid_objs \<rbrace> lookup_slot_for_thread t addr \<lbrace>\<lambda>rv. cte_at (fst rv)\<rbrace>,-"
by (strengthen real_cte_at_cte, wp)
lemma get_cap_success:
fixes s cap ptr offset
defines "s' \<equiv> s\<lparr>kheap := [ptr \<mapsto> CNode (length offset) (\<lambda>x. if length x = length offset then Some cap else None)]\<rparr>"
shows "(cap, s') \<in> fst (get_cap (ptr, offset) s')"
by (simp add: get_cap_def get_object_def
in_monad s'_def well_formed_cnode_n_def length_set_helper dom_def
split: Structures_A.kernel_object.splits)
lemma len_drop_lemma:
assumes drop: "drop (n - length ys) xs = ys"
assumes l: "n = length xs"
shows "length ys \<le> n"
proof -
from drop
have "length (drop (n - length ys) xs) = length ys"
by simp
with l
have "length ys = n - (n - length ys)"
by simp
thus ?thesis by arith
qed
lemma drop_postfixD:
"(drop (length xs - length ys) xs = ys) \<Longrightarrow> (\<exists>zs. xs = zs @ ys)"
proof (induct xs arbitrary: ys)
case Nil thus ?case by simp
next
case (Cons x xs)
from Cons.prems
have "length ys \<le> length (x # xs)"
by (rule len_drop_lemma) simp
moreover
{ assume "length ys = length (x # xs)"
with Cons.prems
have ?case by simp
}
moreover {
assume "length ys < length (x # xs)"
hence "length ys \<le> length xs" by simp
hence "drop (length xs - length ys) xs =
drop (length (x # xs) - length ys) (x # xs)"
by (simp add: Suc_diff_le)
with Cons.prems
have ?case by (auto dest: Cons.hyps)
}
ultimately
show ?case by (auto simp: order_le_less)
qed
lemma drop_postfix_eq:
"n = length xs \<Longrightarrow> (drop (n - length ys) xs = ys) = (\<exists>zs. xs = zs @ ys)"
by (auto dest: drop_postfixD)
lemma postfix_dropD:
"xs = zs @ ys \<Longrightarrow> drop (length xs - length ys) xs = ys"
by simp
lemmas is_cap_defs = is_arch_cap_def is_zombie_def
lemma guard_mask_shift:
fixes cref' :: "'a::len word"
assumes postfix: "to_bl cref' = xs @ cref"
shows
"(length guard \<le> length cref \<and>
(cref' >> (length cref - length guard)) && mask (length guard) = of_bl guard)
= (guard \<le> cref)" (is "(_ \<and> ?l = ?r) = _ " is "(?len \<and> ?shift) = ?prefix")
proof
let ?w_len = "len_of TYPE('a)"
from postfix
have "length (to_bl cref') = length xs + length cref" by simp
hence w_len: "?w_len = \<dots>" by simp
assume "?len \<and> ?shift"
hence shift: ?shift and c_len: ?len by auto
from w_len c_len have "length guard \<le> ?w_len" by simp
with shift
have "(replicate (?w_len - length guard) False) @ guard = to_bl ?l"
by (simp add: word_rep_drop)
also
have "\<dots> = replicate (?w_len - length guard) False @
drop (?w_len - length guard) (to_bl (cref' >> (length cref - length guard)))"
by (simp add: bl_and_mask)
also
from c_len
have "\<dots> = replicate (?w_len - length guard) False @ take (length guard) cref"
by (simp add: bl_shiftr w_len word_size postfix)
finally
have "take (length guard) cref = guard" by simp
thus ?prefix by (simp add: take_prefix)
next
let ?w_len = "len_of TYPE('a)"
assume ?prefix
then obtain zs where cref: "cref = guard @ zs"
by (auto simp: prefix_def less_eq_list_def)
with postfix
have to_bl_c: "to_bl cref' = xs @ guard @ zs" by simp
hence "length (to_bl cref') = length \<dots>" by simp
hence w_len: "?w_len = \<dots>" by simp
from cref have c_len: "length guard \<le> length cref" by simp
from cref
have "length cref - length guard = length zs" by simp
hence "to_bl ?l = replicate (?w_len - length guard) False @
drop (?w_len - length guard) (to_bl (cref' >> (length zs)))"
by (simp add: bl_and_mask)
also
have "drop (?w_len - length guard) (to_bl (cref' >> (length zs))) = guard"
by (simp add: bl_shiftr word_size w_len to_bl_c)
finally
have "to_bl ?l = to_bl ?r"
by (simp add: word_rep_drop w_len)
with c_len
show "?len \<and> ?shift" by simp
qed
lemma of_bl_take:
"length xs < len_of TYPE('a) \<Longrightarrow> of_bl (take n xs) = ((of_bl xs) >> (length xs - n) :: ('a :: len) word)"
apply (clarsimp simp: bang_eq and_bang test_bit_of_bl
rev_take conj_comms nth_shiftr)
apply auto
done
lemma gets_the_tcb_get_cap:
"tcb_at t s \<Longrightarrow> liftM tcb_ctable (gets_the (get_tcb t)) s = get_cap (t, tcb_cnode_index 0) s"
apply (clarsimp simp add: tcb_at_def liftM_def bind_def assert_opt_def
gets_the_def simpler_gets_def return_def)
apply (clarsimp dest!: get_tcb_SomeD
simp add: get_cap_def tcb_cnode_map_def
get_object_def bind_def simpler_gets_def
return_def assert_def fail_def assert_opt_def)
done
lemma upd_other_cte_wp_at:
"\<lbrakk> cte_wp_at P p s; fst p \<noteq> ptr \<rbrakk> \<Longrightarrow>
cte_wp_at P p (kheap_update (\<lambda>ps. (kheap s)(ptr \<mapsto> ko)) s)"
by (auto elim!: cte_wp_atE intro: cte_wp_at_cteI cte_wp_at_tcbI)
lemma get_cap_cte_wp_at:
"\<lbrace>\<top>\<rbrace> get_cap p \<lbrace>\<lambda>rv. cte_wp_at (\<lambda>c. c = rv) p\<rbrace>"
apply (wp get_cap_wp)
apply (clarsimp elim!: cte_wp_at_weakenE)
done
lemma get_cap_sp:
"\<lbrace>P\<rbrace> get_cap p \<lbrace>\<lambda>rv. P and cte_wp_at (\<lambda>c. c = rv) p\<rbrace>"
by (wp get_cap_cte_wp_at)
lemma wf_cs_nD:
"\<lbrakk> f x = Some y; well_formed_cnode_n n f \<rbrakk> \<Longrightarrow> length x = n"
unfolding well_formed_cnode_n_def by blast
lemma set_cdt_valid_pspace:
"\<lbrace>valid_pspace\<rbrace> set_cdt m \<lbrace>\<lambda>_. valid_pspace\<rbrace>"
unfolding set_cdt_def
apply simp
apply wp
apply (erule valid_pspace_eqI)
apply clarsimp
done
lemma cte_at_pspace:
"cte_wp_at P p s \<Longrightarrow> \<exists>ko. kheap s (fst p) = Some ko"
by (auto simp: cte_wp_at_cases)
lemma tcb_cap_cases_length:
"x \<in> dom tcb_cap_cases \<Longrightarrow> length x = 3"
by (auto simp add: tcb_cap_cases_def tcb_cnode_index_def to_bl_1)
lemma cte_at_cref_len:
"\<lbrakk>cte_at (p, c) s; cte_at (p, c') s\<rbrakk> \<Longrightarrow> length c = length c'"
apply (clarsimp simp: cte_at_cases)
apply (erule disjE)
prefer 2
apply (clarsimp simp: tcb_cap_cases_length)
apply clarsimp
apply (drule (1) wf_cs_nD)
apply (drule (1) wf_cs_nD)
apply simp
done
lemma well_formed_cnode_invsI:
"\<lbrakk> valid_objs s; kheap s x = Some (CNode sz cs) \<rbrakk>
\<Longrightarrow> well_formed_cnode_n sz cs"
apply (erule(1) valid_objsE)
apply (clarsimp simp: well_formed_cnode_n_def valid_obj_def valid_cs_def valid_cs_size_def
length_set_helper)
done
lemma set_cap_cte_eq:
"(x,t) \<in> fst (set_cap c p' s) \<Longrightarrow>
cte_at p' s \<and> cte_wp_at P p t = (if p = p' then P c else cte_wp_at P p s)"
apply (cases p)
apply (cases p')
apply (auto simp: set_cap_def2 split_def in_monad cte_wp_at_cases
get_object_def set_object_def wf_cs_upd
split: Structures_A.kernel_object.splits if_split_asm
option.splits,
auto simp: tcb_cap_cases_def split: if_split_asm)
done
lemma descendants_of_cte_at:
"\<lbrakk> p \<in> descendants_of x (cdt s); valid_mdb s \<rbrakk>
\<Longrightarrow> cte_at p s"
apply (simp add: descendants_of_def)
apply (drule tranclD2)
apply (clarsimp simp: cdt_parent_defs valid_mdb_def mdb_cte_at_def
simp del: split_paired_All)
apply (fastforce elim: cte_wp_at_weakenE)
done
lemma descendants_of_cte_at2:
"\<lbrakk> p \<in> descendants_of x (cdt s); valid_mdb s \<rbrakk>
\<Longrightarrow> cte_at x s"
apply (simp add: descendants_of_def)
apply (drule tranclD)
apply (clarsimp simp: cdt_parent_defs valid_mdb_def mdb_cte_at_def
simp del: split_paired_All)
apply (fastforce elim: cte_wp_at_weakenE)
done
lemma in_set_cap_cte_at:
"(x, s') \<in> fst (set_cap c p' s) \<Longrightarrow> cte_at p s' = cte_at p s"
by (fastforce simp: cte_at_cases set_cap_def split_def wf_cs_upd
in_monad get_object_def set_object_def
split: Structures_A.kernel_object.splits if_split_asm)
lemma in_set_cap_cte_at_swp:
"(x, s') \<in> fst (set_cap c p' s) \<Longrightarrow> swp cte_at s' = swp cte_at s"
by (simp add: swp_def in_set_cap_cte_at)
(* FIXME: move *)
lemma take_to_bl_len:
fixes a :: "'a::len word"
fixes b :: "'a::len word"
assumes t: "take x (to_bl a) = take y (to_bl b)"
assumes x: "x \<le> size a"
assumes y: "y \<le> size b"
shows "x = y"
proof -
from t
have "length (take x (to_bl a)) = length (take y (to_bl b))"
by simp
also
from x have "length (take x (to_bl a)) = x"
by (simp add: word_size)
also
from y have "length (take y (to_bl b)) = y"
by (simp add: word_size)
finally
show ?thesis .
qed
definition
final_matters :: "cap \<Rightarrow> bool"
where
"final_matters cap \<equiv> case cap of
Structures_A.NullCap \<Rightarrow> False
| Structures_A.UntypedCap dev p b f \<Rightarrow> False
| Structures_A.EndpointCap r badge rights \<Rightarrow> True
| Structures_A.NotificationCap r badge rights \<Rightarrow> True
| Structures_A.CNodeCap r bits guard \<Rightarrow> True
| Structures_A.ThreadCap r \<Rightarrow> True
| Structures_A.DomainCap \<Rightarrow> False
| Structures_A.ReplyCap r master rights \<Rightarrow> False
| Structures_A.IRQControlCap \<Rightarrow> False
| Structures_A.IRQHandlerCap irq \<Rightarrow> True
| Structures_A.Zombie r b n \<Rightarrow> True
| Structures_A.ArchObjectCap ac \<Rightarrow> final_matters_arch ac"
lemmas final_matters_simps[simp]
= final_matters_def[split_simps cap.split]
lemma no_True_set_nth:
"(True \<notin> set xs) = (\<forall>n < length xs. xs ! n = False)"
apply (induct xs)
apply simp
apply (case_tac a, simp_all)
apply (rule_tac x=0 in exI)
apply simp
apply safe
apply (case_tac n, simp_all)[1]
apply (case_tac na, simp_all)[1]
apply (erule_tac x="Suc n" in allE)
apply clarsimp
done
lemma set_cap_caps_of_state_monad:
"(v, s') \<in> fst (set_cap cap p s) \<Longrightarrow> caps_of_state s' = (caps_of_state s (p \<mapsto> cap))"
apply (drule use_valid)
apply (rule set_cap_caps_of_state [where P="(=) (caps_of_state s (p\<mapsto>cap))"])
apply (rule refl)
apply simp
done
lemma descendants_of_empty:
"(descendants_of d m = {}) = (\<forall>c. \<not>m \<turnstile> d cdt_parent_of c)"
apply (simp add: descendants_of_def)
apply (rule iffI)
apply clarsimp
apply (erule allE, erule allE)
apply (erule notE)
apply fastforce
apply clarsimp
apply (drule tranclD)
apply clarsimp
done
lemma descendants_of_None:
"(\<forall>c. d \<notin> descendants_of c m) = (m d = None)"
apply (simp add: descendants_of_def cdt_parent_defs)
apply (rule iffI)
prefer 2
apply clarsimp
apply (drule tranclD2)
apply clarsimp
apply (erule contrapos_pp)
apply fastforce
done
lemma not_should_be_parent_Null [simp]:
"should_be_parent_of cap.NullCap a b c = False"
by (simp add: should_be_parent_of_def)
lemma mdb_None_no_parent:
"m p = None \<Longrightarrow> m \<Turnstile> c \<leadsto> p = False"
by (clarsimp simp: cdt_parent_defs)
lemma descendants_of_self:
assumes "descendants_of dest m = {}"
shows "descendants_of x (m(dest \<mapsto> dest)) =
(if x = dest then {dest} else descendants_of x m - {dest})" using assms
apply (clarsimp simp: descendants_of_def cdt_parent_defs)
apply (rule conjI)
apply clarsimp
apply (fastforce split: if_split_asm elim: trancl_into_trancl trancl_induct)
apply clarsimp
apply (rule set_eqI)
apply clarsimp
apply (rule iffI)
apply (erule trancl_induct)
apply fastforce
apply clarsimp
apply (erule trancl_into_trancl)
apply clarsimp
apply clarsimp
apply (rule_tac P="(a,b) \<noteq> dest" in mp)
prefer 2
apply assumption
apply (thin_tac "(a,b) \<noteq> dest")
apply (erule trancl_induct)
apply fastforce
apply (fastforce split: if_split_asm elim: trancl_into_trancl)
done
lemma descendants_of_self_None:
assumes "descendants_of dest m = {}"
assumes n: "m dest = None"
shows "descendants_of x (m(dest \<mapsto> dest)) =
(if x = dest then {dest} else descendants_of x m)"
apply (subst descendants_of_self[OF assms(1)])
apply clarsimp
apply (subgoal_tac "dest \<notin> descendants_of x m")
apply simp
apply (insert n)
apply (simp add: descendants_of_None [symmetric] del: split_paired_All)
done
lemma descendants_insert_evil_trancl_induct:
assumes "src \<noteq> dest"
assumes d: "descendants_of dest m = {}"
assumes "src \<in> descendants_of x m"
shows "src \<in> descendants_of x (m (dest \<mapsto> src))"
proof -
have r: "\<And>t. \<lbrakk> src \<in> descendants_of x m; t = src \<rbrakk> \<Longrightarrow> src \<noteq> dest \<longrightarrow> src \<in> descendants_of x (m (dest \<mapsto> t))"
unfolding descendants_of_def cdt_parent_defs
apply (simp (no_asm_use) del: fun_upd_apply)
apply (erule trancl_induct)
apply clarsimp
apply (rule r_into_trancl)
apply clarsimp
apply (rule impI)
apply (erule impE)
apply (insert d)[1]
apply (clarsimp simp: descendants_of_def cdt_parent_defs)
apply fastforce
apply (simp del: fun_upd_apply)
apply (erule trancl_into_trancl)
apply clarsimp
done
show ?thesis using assms
apply -
apply (rule r [THEN mp])
apply assumption
apply (rule refl)
apply assumption
done
qed
lemma descendants_of_insert_child:
assumes d: "descendants_of dest m = {}"
assumes s: "src \<noteq> dest"
shows
"descendants_of x (m (dest \<mapsto> src)) =
(if src \<in> descendants_of x m \<or> x = src
then descendants_of x m \<union> {dest} else descendants_of x m - {dest})"
using assms
apply (simp add: descendants_of_def cdt_parent_defs del: fun_upd_apply)
apply (rule conjI)
apply clarify
apply (rule set_eqI)
apply (simp del: fun_upd_apply)
apply (rule iffI)
apply (simp only: disj_imp)
apply (erule_tac b="xa" in trancl_induct)
apply fastforce
apply clarsimp
apply (erule impE)
apply fastforce
apply (rule trancl_into_trancl)
prefer 2
apply simp
apply assumption
apply (erule disjE)
apply (drule descendants_insert_evil_trancl_induct [OF _ d])
apply (simp add: descendants_of_def cdt_parent_defs)
apply (simp add: descendants_of_def cdt_parent_defs del: fun_upd_apply)
apply (erule trancl_into_trancl)
apply fastforce
apply (case_tac "xa = dest")
apply (simp del: fun_upd_apply)
apply (drule descendants_insert_evil_trancl_induct [OF _ d])
apply (simp add: descendants_of_def cdt_parent_defs)
apply (simp add: descendants_of_def cdt_parent_defs del: fun_upd_apply)
apply (erule trancl_into_trancl)
apply fastforce
apply (rule_tac P="xa \<noteq> dest" in mp)
prefer 2
apply assumption
apply (erule_tac b=xa in trancl_induct)
apply fastforce
apply (clarsimp simp del: fun_upd_apply)
apply (erule impE)
apply fastforce
apply (fastforce elim: trancl_into_trancl)
apply (rule conjI)
apply (clarsimp simp del: fun_upd_apply)
apply (rule set_eqI)
apply (simp del: fun_upd_apply)
apply (rule iffI)
apply (simp only: disj_imp)
apply (erule_tac b="xa" in trancl_induct)
apply fastforce
apply clarsimp
apply (erule impE)
apply fastforce
apply (rule trancl_into_trancl)
prefer 2
apply simp
apply assumption
apply (erule disjE)
apply fastforce
apply (case_tac "xa = dest")
apply fastforce
apply (rule_tac P="xa \<noteq> dest" in mp)
prefer 2
apply assumption
apply (erule_tac b=xa in trancl_induct)
apply fastforce
apply (clarsimp simp del: fun_upd_apply)
apply (erule impE)
apply fastforce
apply (fastforce elim: trancl_into_trancl)
apply clarify
apply (rule set_eqI)
apply (simp del: fun_upd_apply)
apply (rule iffI)
apply (erule trancl_induct)
apply (fastforce split: if_split_asm)
apply (clarsimp split: if_split_asm)
apply (fastforce elim: trancl_into_trancl)
apply (elim conjE)
apply (rule_tac P="xa \<noteq> dest" in mp)
prefer 2
apply assumption
apply (erule_tac b=xa in trancl_induct)
apply fastforce
apply (clarsimp simp del: fun_upd_apply)
apply (erule impE)
apply fastforce
apply (erule trancl_into_trancl)
apply fastforce
done
lemma descendants_of_NoneD:
"\<lbrakk> m p = None; p \<in> descendants_of x m \<rbrakk> \<Longrightarrow> False"
by (simp add: descendants_of_None [symmetric] del: split_paired_All)
lemma descendants_of_insert_child':
assumes d: "descendants_of dest m = {}"
assumes s: "src \<noteq> dest"
assumes m: "m dest = None"
shows
"descendants_of x (m (dest \<mapsto> src)) =
(if src \<in> descendants_of x m \<or> x = src
then descendants_of x m \<union> {dest} else descendants_of x m)"
apply (subst descendants_of_insert_child [OF d s])
apply clarsimp
apply (subgoal_tac "dest \<notin> descendants_of x m")
apply clarsimp
apply (rule notI)
apply (rule descendants_of_NoneD, rule m, assumption)
done
locale vmdb_abs =
fixes s m cs
assumes valid_mdb: "valid_mdb s"
defines "m \<equiv> cdt s"
defines "cs \<equiv> caps_of_state s"
context vmdb_abs begin
lemma no_mloop [intro!]: "no_mloop m"
using valid_mdb by (simp add: valid_mdb_def m_def)
lemma no_loops [simp, intro!]: "\<not>m \<Turnstile> p \<rightarrow> p"
using no_mloop by (cases p) (simp add: no_mloop_def)
lemma no_mdb_loop [simp, intro!]: "m p \<noteq> Some p"
proof
assume "m p = Some p"
hence "m \<Turnstile> p \<leadsto> p" by (simp add: cdt_parent_of_def)
hence "m \<Turnstile> p \<rightarrow> p" ..
thus False by simp
qed
lemma untyped_inc:
"untyped_inc m cs"
using valid_mdb by (simp add: valid_mdb_def m_def cs_def)
lemma untyped_mdb:
"untyped_mdb m cs"
using valid_mdb by (simp add: valid_mdb_def m_def cs_def)
lemma null_no_mdb:
"cs p = Some cap.NullCap \<Longrightarrow> \<not> m \<Turnstile> p \<rightarrow> p' \<and> \<not> m \<Turnstile> p' \<rightarrow> p"
using valid_mdb_no_null [OF valid_mdb]
by (simp add: m_def cs_def)
end
lemma the_arch_cap_ArchObjectCap[simp]:
"the_arch_cap (cap.ArchObjectCap cap) = cap"
by (simp add: the_arch_cap_def)
lemma cap_master_cap_simps:
"cap_master_cap (cap.EndpointCap ref bdg rghts) = cap.EndpointCap ref 0 UNIV"
"cap_master_cap (cap.NotificationCap ref bdg rghts) = cap.NotificationCap ref 0 UNIV"
"cap_master_cap (cap.CNodeCap ref bits gd) = cap.CNodeCap ref bits []"
"cap_master_cap (cap.ThreadCap ref) = cap.ThreadCap ref"
"cap_master_cap (cap.NullCap) = cap.NullCap"
"cap_master_cap (cap.DomainCap) = cap.DomainCap"
"cap_master_cap (cap.UntypedCap dev r n f) = cap.UntypedCap dev r n 0"
"cap_master_cap (cap.ReplyCap r m rights) = cap.ReplyCap r True UNIV"
"cap_master_cap (cap.IRQControlCap) = cap.IRQControlCap"
"cap_master_cap (cap.IRQHandlerCap irq) = cap.IRQHandlerCap irq"
"cap_master_cap (cap.Zombie r a b) = cap.Zombie r a b"
"cap_master_cap (ArchObjectCap ac) = ArchObjectCap (cap_master_arch_cap ac)"
by (simp_all add: cap_master_cap_def)
lemma is_original_cap_set_cap:
"(x,s') \<in> fst (set_cap p c s) \<Longrightarrow> is_original_cap s' = is_original_cap s"
by (clarsimp simp: set_cap_def in_monad split_def get_object_def set_object_def
split: if_split_asm Structures_A.kernel_object.splits)
lemma mdb_set_cap:
"(x,s') \<in> fst (set_cap p c s) \<Longrightarrow> cdt s' = cdt s"
by (clarsimp simp: set_cap_def in_monad split_def get_object_def set_object_def
split: if_split_asm Structures_A.kernel_object.splits)
lemma master_cap_obj_refs:
"cap_master_cap c = cap_master_cap c' \<Longrightarrow> obj_refs c = obj_refs c'"
by (clarsimp simp add: cap_master_cap_def
intro!: master_arch_cap_obj_refs[THEN arg_cong[where f=set_option]]
split: cap.splits)
lemma master_cap_untyped_range:
"cap_master_cap c = cap_master_cap c' \<Longrightarrow> untyped_range c = untyped_range c'"
by (simp add: cap_master_cap_def split: cap.splits)
lemma master_cap_cap_range:
"cap_master_cap c = cap_master_cap c' \<Longrightarrow> cap_range c = cap_range c'"
by (simp add: cap_range_def cong: master_cap_untyped_range master_cap_obj_refs)
lemma master_cap_ep:
"cap_master_cap c = cap_master_cap c' \<Longrightarrow> is_ep_cap c = is_ep_cap c'"
by (simp add: cap_master_cap_def is_cap_simps split: cap.splits)
lemma master_cap_ntfn:
"cap_master_cap c = cap_master_cap c' \<Longrightarrow> is_ntfn_cap c = is_ntfn_cap c'"
by (simp add: cap_master_cap_def is_cap_simps split: cap.splits)
lemma cap_master_cap_zombie:
"cap_master_cap c = cap_master_cap c' \<Longrightarrow> is_zombie c = is_zombie c'"
by (simp add: cap_master_cap_def is_cap_simps split: cap.splits)
lemma zobj_refs_def2:
"zobj_refs c = (case c of Zombie _ _ _ \<Rightarrow> {} | _ \<Rightarrow> obj_refs c)"
by (cases c; simp)
lemma cap_master_cap_zobj_refs:
"cap_master_cap c = cap_master_cap c' \<Longrightarrow> zobj_refs c = zobj_refs c'"
by (clarsimp simp add: cap_master_cap_def
intro!: master_arch_cap_obj_refs[THEN arg_cong[where f=set_option]]
split: cap.splits)
lemma caps_of_state_obj_refs:
"\<lbrakk> caps_of_state s p = Some cap; r \<in> obj_refs cap; valid_objs s \<rbrakk>
\<Longrightarrow> \<exists>ko. kheap s r = Some ko"
apply (subgoal_tac "valid_cap cap s")
prefer 2
apply (rule cte_wp_valid_cap)
apply (erule caps_of_state_cteD)
apply assumption
apply (cases cap, auto simp: valid_cap_def obj_at_def
dest: obj_ref_is_arch
split: option.splits)
done
locale mdb_insert_abs =
fixes m src dest
assumes neq: "src \<noteq> dest"
assumes dest: "m dest = None"
assumes desc: "descendants_of dest m = {}"
locale mdb_insert_abs_sib = mdb_insert_abs +
fixes n
defines "n \<equiv> m(dest := m src)"
context mdb_insert_abs begin
lemma dest_no_parent_trancl [iff]:
"(m \<Turnstile> dest \<rightarrow> p) = False" using desc
by (simp add: descendants_of_def del: split_paired_All)
lemma dest_no_parent [iff]:
"(m \<Turnstile> dest \<leadsto> p) = False"
by (fastforce dest: r_into_trancl)
lemma dest_no_parent_d [iff]:
"(m p = Some dest) = False"
apply clarsimp
apply (fold cdt_parent_defs)
apply simp
done
lemma dest_no_child [iff]:
"(m \<Turnstile> p \<leadsto> dest) = False" using dest
by (simp add: cdt_parent_defs)
lemma dest_no_child_trancl [iff]:
"(m \<Turnstile> p \<rightarrow> dest) = False"
by (clarsimp dest!: tranclD2)
lemma descendants_child:
"descendants_of x (m(dest \<mapsto> src)) =
(if src \<in> descendants_of x m \<or> x = src
then descendants_of x m \<union> {dest} else descendants_of x m)"
apply (rule descendants_of_insert_child')
apply (rule desc)
apply (rule neq)
apply (rule dest)
done
lemma descendants_inc:
assumes dinc: "descendants_inc m cs"
assumes src: "cs src = Some c"
assumes type: "cap_class cap = cap_class c \<and> cap_range cap \<subseteq> cap_range c"
shows "descendants_inc (m(dest \<mapsto> src)) (cs(dest \<mapsto> cap))"
using dinc src type
apply (simp add:descendants_inc_def del:split_paired_All)
apply (intro allI conjI)
apply (intro impI allI)
apply (simp add:descendants_child split:if_splits del:split_paired_All)
apply (erule disjE)
apply (drule spec)+
apply (erule(1) impE)
apply simp
apply blast
apply simp
apply (simp add:descendants_of_def)
apply (intro impI allI)
apply (rule conjI)
apply (intro impI)
apply (simp add:descendants_child
split:if_splits del:split_paired_All)
apply (simp add: descendants_of_def)
apply (cut_tac p = p in dest_no_parent_trancl,simp)
apply (simp add:descendants_of_def)
apply (intro impI)
apply (simp add:descendants_child split:if_splits del:split_paired_All)
done
end
lemma set_option_empty_inter:
"(set_option X \<inter> Y = {}) = (\<forall>x. X = Some x \<longrightarrow> x \<notin> Y)"
by blast
context mdb_insert_abs_sib begin
lemma dest_no_parent_d_n [iff]:
"(n p = Some dest) = False"
by (simp add: n_def)
lemma dest_no_parent_n [iff]:
"n \<Turnstile> dest \<leadsto> z = False"
by (simp add: cdt_parent_defs)
lemma dest_no_parent_n_trancl [iff]:
"n \<Turnstile> dest \<rightarrow> z = False"
by (clarsimp dest!: tranclD)
lemma n_to_dest [iff]:
"(n \<Turnstile> p \<leadsto> dest) = (m \<Turnstile> p \<leadsto> src)"
by (simp add: n_def cdt_parent_defs)
lemma parent_n:
"n \<Turnstile> p \<rightarrow> p' \<Longrightarrow> if p' = dest then m \<Turnstile> p \<rightarrow> src else m \<Turnstile> p \<rightarrow> p'"
apply (erule trancl_induct)
apply simp
apply (rule conjI)
apply (rule impI)
apply simp
apply (clarsimp simp: n_def cdt_parent_defs)
apply fastforce
apply (simp split: if_split_asm)
apply (rule conjI)
apply (rule impI)
apply simp
apply (rule impI)
apply (erule trancl_into_trancl)
apply (clarsimp simp: n_def cdt_parent_defs)
done
lemma dest_neq_Some [iff]:
"(m dest = Some p) = False" using dest
by simp
lemma parent_m:
"m \<Turnstile> p \<rightarrow> p' \<Longrightarrow> n \<Turnstile> p \<rightarrow> p'"
apply (erule trancl_induct)
apply (rule r_into_trancl)
apply (simp add: n_def cdt_parent_defs)
apply (rule impI)
apply simp
apply (erule trancl_into_trancl)
apply (simp add: n_def cdt_parent_defs)
apply (rule impI)
apply simp
done
lemma parent_m_dest:
"m \<Turnstile> p \<rightarrow> src \<Longrightarrow> n \<Turnstile> p \<rightarrow> dest"
apply (erule converse_trancl_induct)
apply (rule r_into_trancl)
apply (clarsimp simp: n_def cdt_parent_defs)
apply (rule trancl_trans)
prefer 2
apply assumption
apply (rule r_into_trancl)
apply (simp add: n_def cdt_parent_defs)
apply (rule impI)
apply simp
done
lemma parent_n_eq:
"n \<Turnstile> p \<rightarrow> p' = (if p' = dest then m \<Turnstile> p \<rightarrow> src else m \<Turnstile> p \<rightarrow> p')"
apply (rule iffI)
apply (erule parent_n)
apply (simp split: if_split_asm)
apply (erule parent_m_dest)
apply (erule parent_m)
done
lemma descendants:
"descendants_of p n =
descendants_of p m \<union> (if src \<in> descendants_of p m then {dest} else {})"
by (rule set_eqI) (simp add: descendants_of_def parent_n_eq)
end
lemma (in mdb_insert_abs) untyped_mdb:
assumes u: "untyped_mdb m cs"
assumes src: "cs src = Some c"
assumes dst: "cs dest = Some cap.NullCap"
assumes d: "is_derived m src cap c"
shows "untyped_mdb (m(dest \<mapsto> src)) (cs(dest \<mapsto> cap))"
unfolding untyped_mdb_def
apply (intro allI impI)
apply (simp add: descendants_child)
apply (rule conjI)
apply (rule impI)
apply (rule disjCI2)
apply simp
apply (case_tac "ptr = dest")
apply simp
apply (insert u)[1]
apply (unfold untyped_mdb_def)
apply (erule allE)+
apply (erule impE, rule src)
apply (erule impE)
apply (insert d)
apply (clarsimp simp: is_cap_simps is_derived_def cap_master_cap_simps
split: cap.splits)
apply (drule cap_master_cap_eqDs)
apply fastforce
apply (erule (1) impE)
apply (erule impE)
apply (clarsimp simp: is_cap_simps is_derived_def cap_master_cap_simps
split: if_split_asm cap.splits)
apply (fastforce dest: cap_master_cap_eqDs)
apply (simp add: descendants_of_def)
apply (insert u)[1]
apply (unfold untyped_mdb_def)
apply fastforce
apply (rule conjI)
apply (rule impI)
apply (rule disjCI2)
apply (simp add: neq)
apply (insert u src)[1]
apply simp
apply (unfold untyped_mdb_def)
apply (erule allE)+
apply (erule impE, rule src)
apply (erule impE)
subgoal by (clarsimp simp: is_cap_simps is_derived_def same_object_as_def
split: cap.splits)
apply (erule (1) impE)
subgoal by simp
apply (rule impI)
apply (erule conjE)
apply (simp split: if_split_asm)
subgoal by (clarsimp simp: is_cap_simps)
apply (insert u)[1]
apply (unfold untyped_mdb_def)
apply (erule allE)+
apply (erule impE, rule src)
apply (erule impE)
apply (clarsimp simp: is_cap_simps is_derived_def cap_master_cap_simps
split: cap.splits)
apply (fastforce dest: cap_master_cap_eqDs)
apply (erule (1) impE)
apply (erule impE)
apply (clarsimp simp: is_cap_simps is_derived_def cap_master_cap_simps
split: if_split_asm cap.splits)
apply (fastforce dest: cap_master_cap_eqDs)
apply (clarsimp simp: is_cap_simps is_derived_def cap_master_cap_simps
split: if_split_asm cap.splits)
apply (fastforce dest: cap_master_cap_eqDs)
apply (insert u)[1]
apply (unfold untyped_mdb_def)
apply (erule allE)+
apply (erule (1) impE)
apply (erule (1) impE)
apply (erule impE, rule src)
apply (erule impE)
apply (clarsimp simp: is_derived_def
split: if_split_asm)
apply (drule master_cap_obj_refs)
apply (fastforce dest: master_cap_obj_refs)
subgoal by (clarsimp simp:is_cap_simps cap_master_cap_def dest!: master_arch_cap_obj_refs split:cap.splits)
subgoal by simp
apply (insert u)[1]
apply (unfold untyped_mdb_def)
apply fastforce
done
lemma master_cap_class:
"cap_master_cap a = cap_master_cap b
\<Longrightarrow> cap_class a = cap_class b"
apply (case_tac a)
apply (clarsimp simp: cap_master_cap_simps dest!:cap_master_cap_eqDs master_arch_cap_cap_class)+
done
lemma is_derived_cap_class_range:
"is_derived m src cap capa
\<Longrightarrow> cap_class cap = cap_class capa \<and> cap_range cap \<subseteq> cap_range capa"
apply (clarsimp simp:is_derived_def split:if_splits)
apply (frule master_cap_cap_range)
apply (drule master_cap_class)
apply simp
apply (frule master_cap_cap_range)
apply (drule master_cap_class)
apply simp
done
lemma (in mdb_insert_abs_sib) descendants_inc:
assumes dinc: "descendants_inc m cs"
assumes src: "cs src = Some c"
assumes d: "cap_class cap = cap_class c \<and> cap_range cap \<subseteq> cap_range c"
shows "descendants_inc n (cs(dest \<mapsto> cap))"
using dinc src d
apply (simp add:descendants_inc_def del:split_paired_All)
apply (intro allI conjI)
apply (intro impI allI)
apply (simp add:descendants_child descendants split:if_splits del:split_paired_All)
apply (drule spec)+
apply (erule(1) impE)
apply simp
apply blast
apply simp
apply (simp add:descendants_of_def)
apply (intro impI allI)
apply (rule conjI)
apply (intro impI)
apply (simp add:descendants_child descendants
split:if_splits del:split_paired_All)
apply (simp add: descendants_of_def)
apply (simp add:descendants_of_def descendants)
apply (intro impI)
apply (simp add: descendants del:split_paired_All split:if_splits)
done
lemma (in mdb_insert_abs_sib) untyped_mdb_sib:
assumes u: "untyped_mdb m cs"
assumes src: "cs src = Some c"
assumes dst: "cs dest = Some cap.NullCap"
assumes d: "is_derived m src cap c"
shows "untyped_mdb n (cs(dest \<mapsto> cap))"
unfolding untyped_mdb_def
apply (intro allI impI)
apply (simp add: descendants)
apply (rule conjI)
apply (rule impI, rule disjCI)
apply (simp split: if_split_asm)
apply (insert u)[1]
apply (unfold untyped_mdb_def)
apply (erule allE)+
apply (erule impE, rule src)
apply (erule impE)
apply (insert d)
apply (clarsimp simp: is_cap_simps is_derived_def cap_master_cap_simps
split: cap.splits)
apply (fastforce dest: cap_master_cap_eqDs)
apply (erule (1) impE)
apply (erule impE)
apply (clarsimp simp: is_cap_simps is_derived_def cap_master_cap_simps
split: if_split_asm cap.splits)
apply (fastforce dest: cap_master_cap_eqDs)
apply (simp add: descendants_of_def)
apply (insert u)[1]
apply (unfold untyped_mdb_def)
apply fastforce
apply (rule impI)
apply (simp split: if_split_asm)
apply (clarsimp simp: is_cap_simps)
apply (insert u)[1]
apply (unfold untyped_mdb_def)
apply (erule allE)+
apply (erule impE, rule src)
apply (erule impE)
apply (clarsimp simp: is_cap_simps is_derived_def cap_master_cap_simps
split: cap.splits)
apply (fastforce dest: cap_master_cap_eqDs)
apply (erule (1) impE)
apply (erule impE)
apply (clarsimp simp: is_cap_simps is_derived_def cap_master_cap_simps
split: if_split_asm)
apply (fastforce dest: cap_master_cap_eqDs)
apply (clarsimp simp: is_cap_simps is_derived_def cap_master_cap_simps
split: if_split_asm)
apply (fastforce dest: cap_master_cap_eqDs)
apply (insert u)[1]
apply (unfold untyped_mdb_def)
apply (erule allE)+
apply (erule (1) impE)
apply (erule (1) impE)
apply (erule impE, rule src)
apply (erule impE)
apply (clarsimp simp: is_cap_simps is_derived_def cap_master_cap_simps
split: if_split_asm cap.splits dest!:cap_master_cap_eqDs)
apply (blast dest: master_cap_obj_refs)
apply simp
apply (insert u)[1]
apply (unfold untyped_mdb_def)
apply fastforce
done
lemma mdb_cte_at_Null_None:
"\<lbrakk> cs p = Some cap.NullCap;
mdb_cte_at (\<lambda>p. \<exists>c. cs p = Some c \<and> cap.NullCap \<noteq> c) m \<rbrakk>
\<Longrightarrow> m p = None"
apply (simp add: mdb_cte_at_def)
apply (rule classical)
apply fastforce
done
lemma mdb_cte_at_Null_descendants:
"\<lbrakk> cs p = Some cap.NullCap;
mdb_cte_at (\<lambda>p. \<exists>c. cs p = Some c \<and> cap.NullCap \<noteq> c) m \<rbrakk>
\<Longrightarrow> descendants_of p m = {}"
apply (simp add: mdb_cte_at_def descendants_of_def)
apply clarsimp
apply (drule tranclD)
apply (clarsimp simp: cdt_parent_of_def)
apply (cases p)
apply fastforce
done
lemma (in mdb_insert_abs) parency:
"(m (dest \<mapsto> src) \<Turnstile> p \<rightarrow> p') =
(if m \<Turnstile> p \<rightarrow> src \<or> p = src then p' = dest \<or> m \<Turnstile> p \<rightarrow> p' else m \<Turnstile> p \<rightarrow> p')"
using descendants_child [where x=p]
unfolding descendants_of_def
by simp fastforce
context mdb_insert_abs begin
lemmas mis_neq_simps [simp] = neq [symmetric]
lemma untyped_inc_simple:
assumes u: "untyped_inc m cs"
assumes src: "cs src = Some c"
assumes al: "cap_aligned c"
assumes dst: "cs dest = Some cap.NullCap"
assumes ut: "untyped_range cap = untyped_range c \<or> \<not>is_untyped_cap cap"
assumes ut': "is_untyped_cap cap \<longrightarrow> is_untyped_cap c"
assumes dsc: "is_untyped_cap cap \<longrightarrow> descendants_of src m = {}"
assumes usable:"is_untyped_cap cap \<longrightarrow> is_untyped_cap c \<longrightarrow> usable_untyped_range c = {}"
shows "untyped_inc (m(dest \<mapsto> src)) (cs(dest \<mapsto> cap))"
proof -
have no_usage:"\<And>p c'. is_untyped_cap cap \<longrightarrow> cs p = Some c' \<longrightarrow> untyped_range c = untyped_range c' \<longrightarrow> usable_untyped_range c' = {}"
using src u
unfolding untyped_inc_def
apply (erule_tac x = src in allE)
apply (intro impI)
apply (erule_tac x = p in allE)
apply (case_tac "is_untyped_cap c")
apply simp
apply (case_tac "is_untyped_cap c'")
apply simp
using dsc ut usable
apply clarsimp
apply (elim disjE)
apply clarsimp+
using al
apply (case_tac c',simp_all add:is_cap_simps untyped_range_non_empty)
using ut'
apply (clarsimp simp:is_cap_simps)
done
from ut ut' dsc dst
show ?thesis using u src desc
unfolding untyped_inc_def
apply (simp del: fun_upd_apply split_paired_All)
apply (intro allI)
apply (case_tac "p = dest")
apply (case_tac "p' = dest")
apply (clarsimp simp:src dst)
apply (case_tac "p'=src")
apply (erule_tac x=src in allE)
apply (erule_tac x=p' in allE)
apply (cut_tac p = src and c' = c in no_usage)
apply (clarsimp simp del:split_paired_All split del:if_split simp: descendants_child)
apply (erule_tac x=src in allE)
apply (erule_tac x=p' in allE)
apply (clarsimp simp del:split_paired_All split del:if_split simp: descendants_child)
apply (erule_tac x=p in allE)
apply (case_tac "p'=dest")
apply (case_tac "p'=src")
apply (erule_tac x=src in allE)
apply (clarsimp simp del:split_paired_All split del:if_split simp: descendants_child)
apply (erule_tac x=src in allE)
apply (clarsimp simp del:split_paired_All split del:if_split simp: descendants_child)
apply (cut_tac p = "(a,b)" and c' = ca in no_usage)
apply (clarsimp simp del:split_paired_All split del:if_split simp: descendants_child)
apply (case_tac "p' = src")
apply (erule_tac x = src in allE)
apply (clarsimp simp del:split_paired_All split del:if_split simp: descendants_child)
apply (erule_tac x = p' in allE)
apply (clarsimp simp del:split_paired_All simp: descendants_child)
done
qed
lemma untyped_inc:
assumes u: "untyped_inc m cs"
assumes src: "cs src = Some c"
assumes al: "cap_aligned c"
assumes dst: "cs dest = Some cap.NullCap"
assumes d: "is_derived m src cap c"
assumes usable:"is_untyped_cap c \<longrightarrow> usable_untyped_range c = {}"
shows "untyped_inc (m(dest \<mapsto> src)) (cs(dest \<mapsto> cap))"
proof -
from d
have "untyped_range cap = untyped_range c"
by (clarsimp simp: is_derived_def cap_master_cap_def is_cap_simps
split: cap.split_asm if_split_asm)
moreover
from d
have "is_untyped_cap cap \<longrightarrow> descendants_of src m = {}"
by (auto simp: is_derived_def cap_master_cap_def is_cap_simps
split: if_split_asm cap.splits)
moreover
from d
have "is_untyped_cap cap \<longrightarrow> is_untyped_cap c"
by (auto simp: is_derived_def cap_master_cap_def is_cap_simps
split: if_split_asm cap.splits)
ultimately
show ?thesis using assms
by (auto intro!: untyped_inc_simple)
qed
end
lemma (in mdb_insert_abs) m_Some_not_dest:
"m p = Some p' \<Longrightarrow> p' \<noteq> dest"
by clarsimp
lemma (in mdb_insert_abs) reply_caps_mdb:
assumes r: "reply_caps_mdb m cs"
assumes src: "cs src = Some c"
assumes d: "is_derived m src cap c"
shows "reply_caps_mdb (m(dest \<mapsto> src)) (cs(dest \<mapsto> cap))"
unfolding reply_caps_mdb_def
using r d
apply (intro allI impI)
apply (simp add: desc neq split: if_split_asm del: split_paired_Ex)
apply (fastforce simp: src is_derived_def is_cap_simps cap_master_cap_def)
apply (erule(1) reply_caps_mdbE)
apply (fastforce dest:m_Some_not_dest)
done
lemma (in mdb_insert_abs) reply_masters_mdb:
assumes r: "reply_masters_mdb m cs"
assumes src: "cs src = Some c"
assumes dst: "cs dest = Some cap.NullCap"
assumes d: "is_derived m src cap c"
shows "reply_masters_mdb (m(dest \<mapsto> src)) (cs(dest \<mapsto> cap))"
unfolding reply_masters_mdb_def
using r d
apply (intro allI impI)
apply (simp add: descendants_child)
apply (simp add: neq desc split: if_split_asm)
apply (clarsimp simp: src is_derived_def is_cap_simps cap_master_cap_def)
apply (unfold reply_masters_mdb_def)
apply (intro conjI)
apply (erule allE)+
apply (erule(1) impE)
apply simp
apply (rule impI)
apply (erule conjE)
apply (drule_tac x=src in bspec, assumption)
apply (clarsimp simp: src is_derived_def is_cap_simps)
apply (erule allE)+
apply (erule(1) impE)
apply (rule impI, simp)
apply (clarsimp simp: src is_derived_def is_cap_simps cap_master_cap_def)
apply (erule allE)+
apply (erule(1) impE)
apply (rule impI, simp, rule impI)
apply (erule conjE)
apply (drule_tac x=dest in bspec, assumption)
apply (simp add: dst)
done
lemma (in mdb_insert_abs) reply_mdb:
assumes r: "reply_mdb m cs"
assumes src: "cs src = Some c"
assumes dst: "cs dest = Some cap.NullCap"
assumes d: "is_derived m src cap c"
shows "reply_mdb (m(dest \<mapsto> src)) (cs(dest \<mapsto> cap))"
using r unfolding reply_mdb_def
by (simp add: reply_caps_mdb reply_masters_mdb src dst d)
lemma (in mdb_insert_abs_sib) reply_caps_mdb_sib:
assumes r: "reply_caps_mdb m cs"
assumes p: "\<not>should_be_parent_of c r cap f"
assumes rev: "is_master_reply_cap c \<longrightarrow> r"
assumes dst: "cs dest = Some cap.NullCap"
assumes d: "is_derived m src cap c"
shows "reply_caps_mdb n (cs(dest \<mapsto> cap))"
unfolding reply_caps_mdb_def
using r p d
apply (intro allI impI)
apply (simp add: desc neq split: if_split_asm del: split_paired_Ex)
apply (clarsimp simp: should_be_parent_of_def is_derived_def is_cap_simps
cap_master_cap_def rev)
apply (unfold reply_caps_mdb_def)[1]
apply (erule allE)+
apply (erule(1) impE)
apply (erule exEI)
apply (simp add: n_def)
apply blast
done
lemma (in mdb_insert_abs_sib) reply_masters_mdb_sib:
assumes r: "reply_masters_mdb m cs"
assumes src: "cs src = Some c"
assumes dst: "cs dest = Some cap.NullCap"
assumes d: "is_derived m src cap c"
shows "reply_masters_mdb n (cs(dest \<mapsto> cap))"
unfolding reply_masters_mdb_def
using r d
apply (intro allI impI)
apply (simp add: descendants)
apply (simp add: neq desc split: if_split_asm)
apply (clarsimp simp: is_derived_def is_cap_simps cap_master_cap_def)
apply (unfold reply_masters_mdb_def)
apply (intro conjI)
apply (erule allE)+
apply (erule(1) impE)
apply simp
apply (rule impI)
apply (erule conjE)
apply (drule_tac x=src in bspec, assumption)
apply (clarsimp simp: src is_derived_def is_cap_simps)
apply (erule allE)+
apply (erule(1) impE)
apply (rule impI, simp add: n_def)
apply (rule impI, erule conjE)
apply (drule_tac x=dest in bspec, assumption)
apply (simp add: dst)
done
lemma (in mdb_insert_abs_sib) reply_mdb_sib:
assumes r: "reply_mdb m cs"
assumes src: "cs src = Some c"
assumes dst: "cs dest = Some cap.NullCap"
assumes d: "is_derived m src cap c"
assumes p: "\<not>should_be_parent_of c r cap f"
assumes rev: "is_master_reply_cap c \<longrightarrow> r"
shows "reply_mdb n (cs(dest \<mapsto> cap))"
using r src dst d p rev unfolding reply_mdb_def
by (simp add: reply_caps_mdb_sib reply_masters_mdb_sib)
lemma not_parent_not_untyped:
assumes p: "\<not>should_be_parent_of c r c' f" "is_derived m p c' c" "cap_aligned c'"
assumes r: "is_untyped_cap c \<longrightarrow> r"
shows "\<not>is_untyped_cap c" using p r
apply (clarsimp simp: cap_master_cap_def should_be_parent_of_def is_cap_simps is_derived_def
split: if_split_asm cap.splits)
apply (simp add: cap_aligned_def is_physical_def)
apply (elim conjE)
apply (drule is_aligned_no_overflow, simp)
done
context mdb_insert_abs_sib begin
lemma untyped_inc:
assumes u: "untyped_inc m cs"
assumes d: "is_derived m src cap c"
assumes p: "\<not>should_be_parent_of c r cap f" "cap_aligned cap"
assumes r: "is_untyped_cap c \<longrightarrow> r"
shows "untyped_inc n (cs(dest \<mapsto> cap))"
proof -
from p d r
have u1: "\<not>is_untyped_cap c" by - (rule not_parent_not_untyped)
moreover
with d
have u2: "\<not>is_untyped_cap cap"
by (auto simp: is_derived_def cap_master_cap_def is_cap_simps
split: if_split_asm cap.splits)
ultimately
show ?thesis using u desc
unfolding untyped_inc_def
by (auto simp: descendants split: if_split_asm)
qed
end
lemma IRQ_not_derived [simp]:
"\<not>is_derived m src cap.IRQControlCap cap"
by (simp add: is_derived_def)
lemma update_original_mdb_cte_at:
"mdb_cte_at (swp (cte_wp_at P) (s\<lparr>is_original_cap := x\<rparr>))
(cdt (s\<lparr>is_original_cap := x\<rparr>)) =
mdb_cte_at (swp (cte_wp_at P) s) (cdt s)"
by (clarsimp simp:mdb_cte_at_def)
lemma update_cdt_mdb_cte_at:
"\<lbrace>\<lambda>s. mdb_cte_at (swp (cte_wp_at P) s) (cdt s) \<and>
(case (f (cdt s)) of Some p \<Rightarrow> cte_wp_at P p s \<and> cte_wp_at P c s
| None \<Rightarrow> True)\<rbrace>
update_cdt (\<lambda>cdt. cdt(c := (f cdt)))
\<lbrace>\<lambda>xc s. mdb_cte_at (swp (cte_wp_at P) s) (cdt s)\<rbrace>"
apply (clarsimp simp: update_cdt_def gets_def get_def set_cdt_def
put_def bind_def return_def valid_def)
apply (clarsimp simp: mdb_cte_at_def split:option.splits)+
done
lemma set_cap_mdb_cte_at:
"\<lbrace>\<lambda>s. mdb_cte_at (swp (cte_wp_at P) s) (cdt s) \<and>
(dest \<in> dom (cdt s)\<union> ran (cdt s) \<longrightarrow> P new_cap)\<rbrace>
set_cap new_cap dest
\<lbrace>\<lambda>xc s. mdb_cte_at (swp (cte_wp_at P) s) (cdt s)\<rbrace>"
apply (clarsimp simp:mdb_cte_at_def cte_wp_at_caps_of_state valid_def)
apply (simp add:mdb_set_cap)
apply (intro conjI)
apply (erule use_valid[OF _ set_cap_caps_of_state])
apply simp
apply (rule impI)
apply (erule_tac P = "x\<in> ran G" for x G in mp)
apply (rule ranI,simp)
apply (erule use_valid[OF _ set_cap_caps_of_state])
apply (drule spec)+
apply (drule_tac P = "cdt x y = z" for x y z in mp)
apply simp+
apply clarsimp
done
lemma mdb_cte_at_cte_wp_at:
"\<lbrakk>mdb_cte_at (swp (cte_wp_at P) s) (cdt s);
src \<in> dom (cdt s) \<or> src \<in> ran (cdt s)\<rbrakk>
\<Longrightarrow> cte_wp_at P src s"
apply (case_tac src)
apply (auto simp:mdb_cte_at_def ran_def)
done
lemma no_mloop_weaken:
"\<lbrakk>no_mloop m\<rbrakk> \<Longrightarrow> m a \<noteq> Some a"
apply (clarsimp simp:no_mloop_def cdt_parent_rel_def)
apply (subgoal_tac "(a,a)\<in> {(x, y). is_cdt_parent m x y}")
apply (drule r_into_trancl')
apply (drule_tac x = "fst a" in spec)
apply (drule_tac x = "snd a" in spec)
apply clarsimp
apply(simp add:is_cdt_parent_def)
done
lemma no_mloop_neq:
"\<lbrakk>no_mloop m;m a = Some b\<rbrakk> \<Longrightarrow> a\<noteq> b"
apply (rule ccontr)
apply (auto simp:no_mloop_weaken)
done
lemma is_derived_not_Null:
"is_derived (cdt s) src cap capa \<Longrightarrow> cap \<noteq> cap.NullCap"
by (simp add:is_derived_def)
lemma mdb_cte_at_cdt_null:
"\<lbrakk>caps_of_state s p = Some cap.NullCap;
mdb_cte_at (swp (cte_wp_at ((\<noteq>) cap.NullCap)) s) (cdt s)\<rbrakk>
\<Longrightarrow> (cdt s) p = None"
apply (rule ccontr)
apply (clarsimp)
apply (drule_tac src=p in mdb_cte_at_cte_wp_at)
apply (fastforce)
apply (clarsimp simp:cte_wp_at_caps_of_state)
done
lemma set_untyped_cap_as_full_cdt[wp]:
"\<lbrace>\<lambda>s. P (cdt s)\<rbrace> set_untyped_cap_as_full src_cap cap src \<lbrace>\<lambda>_ s'. P (cdt s')\<rbrace>"
apply (clarsimp simp:set_untyped_cap_as_full_def)
apply (wp set_cap_rvk_cdt_ct_ms)
done
lemma mdb_cte_at_set_untyped_cap_as_full:
assumes localcong:"\<And>a cap. P (cap\<lparr>free_index:= a\<rparr>) = P cap"
shows "
\<lbrace>\<lambda>s. mdb_cte_at (swp (cte_wp_at P) s) (cdt s) \<and> cte_wp_at ((=) src_cap) src s\<rbrace>
set_untyped_cap_as_full src_cap cap src
\<lbrace>\<lambda>rv s'. mdb_cte_at (swp (cte_wp_at P) s') (cdt s') \<rbrace>"
apply (clarsimp simp:set_untyped_cap_as_full_def split del:if_split)
apply (rule hoare_pre)
apply (wp set_cap_mdb_cte_at)
apply clarsimp
apply (unfold mdb_cte_at_def)
apply (intro conjI impI,elim allE domE ranE impE,simp)
apply (clarsimp simp:cte_wp_at_caps_of_state cong:local.localcong)
apply (elim allE ranE impE,simp)
apply (clarsimp simp:cte_wp_at_caps_of_state cong:local.localcong)
done
lemma set_untyped_cap_as_full_is_original[wp]:
"\<lbrace>\<lambda>s. P (is_original_cap s)\<rbrace>
set_untyped_cap_as_full src_cap cap src
\<lbrace>\<lambda>rv s'. P (is_original_cap s') \<rbrace>"
apply (simp add:set_untyped_cap_as_full_def split del:if_split)
apply (rule hoare_pre)
apply wp
apply auto
done
lemma free_index_update_ut_revocable[simp]:
"ms src = Some src_cap \<Longrightarrow>
ut_revocable P (ms (src \<mapsto> (src_cap\<lparr>free_index:=a\<rparr>))) = ut_revocable P ms"
unfolding ut_revocable_def
apply (rule iffI)
apply clarify
apply (drule_tac x = p in spec)
apply (case_tac "p = src")
apply clarsimp+
done
lemma free_index_update_irq_revocable[simp]:
"ms src = Some src_cap \<Longrightarrow>
irq_revocable P (ms(src \<mapsto> src_cap\<lparr>free_index:=a\<rparr>)) = irq_revocable P ms"
unfolding irq_revocable_def
apply (rule iffI)
apply clarify
apply (drule_tac x = p in spec)
apply (case_tac "p = src")
apply (clarsimp simp:free_index_update_def)+
apply (simp add: free_index_update_def split:cap.splits)
done
lemma free_index_update_reply_master_revocable[simp]:
"ms src = Some src_cap \<Longrightarrow>
reply_master_revocable P (ms(src \<mapsto> src_cap\<lparr>free_index:=a\<rparr>)) =
reply_master_revocable P ms"
unfolding reply_master_revocable_def
apply (rule iffI)
apply clarify
apply (drule_tac x = p in spec)
apply (case_tac "p = src")
apply (clarsimp simp:free_index_update_def is_master_reply_cap_def
split:cap.splits)+
done
lemma imp_rev: "\<lbrakk>a\<longrightarrow>b;\<not>b\<rbrakk> \<Longrightarrow> \<not> a" by auto
crunch cte_wp_at[wp]: update_cdt, set_original "\<lambda>s. cte_wp_at P p s"
(wp: crunch_wps)
lemma cap_insert_weak_cte_wp_at:
"\<lbrace>(\<lambda>s. if p = dest then P cap else p \<noteq> src \<and> cte_wp_at P p s)\<rbrace>
cap_insert cap src dest
\<lbrace>\<lambda>uu. cte_wp_at P p\<rbrace>"
unfolding cap_insert_def error_def set_untyped_cap_as_full_def
apply (simp add: bind_assoc split del: if_split )
apply (wp set_cap_cte_wp_at hoare_vcg_if_lift hoare_vcg_imp_lift get_cap_wp | simp | intro conjI impI allI)+
apply (auto simp: cte_wp_at_def)
done
lemma mdb_cte_at_more_swp[simp]: "mdb_cte_at
(swp (cte_wp_at P)
(trans_state f s)) =
mdb_cte_at
(swp (cte_wp_at P)
(s))"
apply (simp add: swp_def)
done
lemma cap_insert_mdb_cte_at:
"\<lbrace>(\<lambda>s. mdb_cte_at (swp (cte_wp_at ((\<noteq>) cap.NullCap)) s) (cdt s)) and (\<lambda>s. no_mloop (cdt s))
and valid_cap cap and
(\<lambda>s. cte_wp_at (is_derived (cdt s) src cap) src s) and
K (src \<noteq> dest) \<rbrace>
cap_insert cap src dest
\<lbrace>\<lambda>_ s. mdb_cte_at (swp (cte_wp_at ((\<noteq>) cap.NullCap)) s) (cdt s)\<rbrace>"
unfolding cap_insert_def
apply (wp | simp cong: update_original_mdb_cte_at split del: if_split)+
apply (wp update_cdt_mdb_cte_at set_cap_mdb_cte_at[simplified swp_def] | simp split del: if_split)+
apply wps
apply (wp valid_case_option_post_wp hoare_vcg_if_lift hoare_impI mdb_cte_at_set_untyped_cap_as_full[simplified swp_def]
set_cap_cte_wp_at get_cap_wp)+
apply (clarsimp simp:free_index_update_def split:cap.splits)
apply (wp)+
apply (clarsimp simp:conj_comms split del:if_split cong:prod.case_cong_weak)
apply (wps)
apply (wp valid_case_option_post_wp get_cap_wp hoare_vcg_if_lift
hoare_impI set_untyped_cap_as_full_cte_wp_at )+
apply (unfold swp_def)
apply (intro conjI | clarify)+
apply (clarsimp simp:free_index_update_def split:cap.splits)
apply (drule mdb_cte_at_cte_wp_at[simplified swp_def])
apply simp
apply (simp add:cte_wp_at_caps_of_state)
apply (clarsimp split del: if_split split:option.splits
simp: cte_wp_at_caps_of_state not_sym[OF is_derived_not_Null] neq_commute)+
apply (drule imp_rev)
apply (clarsimp split:if_splits cap.splits
simp:free_index_update_def is_cap_simps masked_as_full_def)
apply (subst (asm) mdb_cte_at_def,elim allE impE,simp,clarsimp simp:cte_wp_at_caps_of_state)+
apply (clarsimp split: if_splits cap.splits
simp: free_index_update_def is_cap_simps masked_as_full_def)
apply (subst (asm) mdb_cte_at_def,elim allE impE,simp,clarsimp simp:cte_wp_at_caps_of_state)+
done
lemma mdb_cte_at_rewrite:
"\<lbrakk>mdb_cte_at (swp (cte_wp_at ((\<noteq>) cap.NullCap)) s) (cdt s)\<rbrakk>
\<Longrightarrow> mdb_cte_at (\<lambda>p. \<exists>c. (caps_of_state s) p = Some c \<and> cap.NullCap \<noteq> c)
(cdt s)"
apply (clarsimp simp:mdb_cte_at_def)
apply (drule spec)+
apply (erule impE)
apply simp
apply (clarsimp simp:cte_wp_at_caps_of_state)
done
lemma untyped_mdb_update_free_index:
"\<lbrakk>m src = Some capa;m' = m (src\<mapsto> capa\<lparr>free_index :=x\<rparr>) \<rbrakk> \<Longrightarrow>
untyped_mdb c (m') = untyped_mdb c (m)"
apply (rule iffI)
apply (clarsimp simp:untyped_mdb_def)
apply (drule_tac x = a in spec)
apply (drule_tac x = b in spec)
apply (drule_tac x = aa in spec)
apply (drule_tac x = ba in spec)
apply (case_tac "src = (a,b)")
apply (case_tac "src = (aa,ba)")
apply (clarsimp simp:is_cap_simps free_index_update_def)
apply (drule_tac x = "capa\<lparr>free_index :=x\<rparr>" in spec)
apply (clarsimp simp:is_cap_simps free_index_update_def)
apply (drule_tac x = cap' in spec)
apply (clarsimp split:if_split_asm)+
apply (clarsimp simp:untyped_mdb_def)
apply (case_tac "src = (a,b)")
apply (clarsimp simp:is_cap_simps free_index_update_def split:cap.split_asm)+
done
lemma usable_untyped_range_empty[simp]:
"is_untyped_cap cap \<Longrightarrow> usable_untyped_range (max_free_index_update cap) = {}"
by (clarsimp simp:is_cap_simps free_index_update_def cap_aligned_def max_free_index_def)
lemma untyped_inc_update_free_index:
"\<lbrakk>m src = Some cap; m' = m (src \<mapsto> (max_free_index_update cap));
untyped_inc c m\<rbrakk> \<Longrightarrow>
untyped_inc c m'"
apply (unfold untyped_inc_def)
apply (intro allI impI)
apply (drule_tac x = p in spec)
apply (drule_tac x = p' in spec)
apply (case_tac "p = src")
apply (simp del:fun_upd_apply split_paired_All)
apply (clarsimp split:if_splits)+
done
lemma reply_cap_id_free_index:
"\<lbrakk>m src = Some capa; m' = m (src \<mapsto> capa\<lparr>free_index :=x\<rparr>)\<rbrakk> \<Longrightarrow>
m' ptr = Some (ReplyCap t master rights) \<longleftrightarrow> m ptr = Some (ReplyCap t master rights)"
by (rule iffI)
(clarsimp simp add: free_index_update_def split:if_splits cap.splits)+
lemma reply_mdb_update_free_index:
"\<lbrakk>m src = Some capa; m' = m (src \<mapsto> capa\<lparr>free_index :=x\<rparr>)\<rbrakk> \<Longrightarrow>
reply_mdb c m' = reply_mdb c m"
by (rule iffI)
(simp only: reply_mdb_def reply_caps_mdb_def reply_masters_mdb_def reply_cap_id_free_index)+
lemma set_untyped_cap_as_full_valid_mdb:
"\<lbrace>valid_mdb and cte_wp_at ((=) src_cap) src\<rbrace>
set_untyped_cap_as_full src_cap c src
\<lbrace>\<lambda>rv. valid_mdb\<rbrace>"
apply (simp add:valid_mdb_def set_untyped_cap_as_full_def split del: if_split)
apply (wp set_cap_mdb_cte_at)
apply (wps set_cap_rvk_cdt_ct_ms)
apply wpsimp+
apply (intro conjI impI)
apply (clarsimp simp:is_cap_simps free_index_update_def split:cap.splits)+
apply (simp_all add:cte_wp_at_caps_of_state)
unfolding fun_upd_def[symmetric]
apply (simp_all add: untyped_mdb_update_free_index reply_mdb_update_free_index
untyped_inc_update_free_index valid_arch_mdb_updates)
apply (erule descendants_inc_minor)
apply (clarsimp simp:cte_wp_at_caps_of_state swp_def)
apply (clarsimp simp: free_index_update_def cap_range_def split:cap.splits)
done
lemma set_free_index_valid_mdb:
"\<lbrace>\<lambda>s. valid_objs s \<and> valid_mdb s \<and> cte_wp_at ((=) cap ) cref s \<and>
(free_index_of cap \<le> idx \<and> is_untyped_cap cap \<and> idx \<le> 2^cap_bits cap)\<rbrace>
set_cap (free_index_update (\<lambda>_. idx) cap) cref
\<lbrace>\<lambda>rv s'. valid_mdb s'\<rbrace>"
apply (simp add:valid_mdb_def)
apply (rule hoare_pre)
apply (wp set_cap_mdb_cte_at)
apply (wps set_cap_rvk_cdt_ct_ms)
apply wp
apply (clarsimp simp: cte_wp_at_caps_of_state is_cap_simps free_index_of_def
reply_master_revocable_def irq_revocable_def reply_mdb_def
simp del: untyped_range.simps usable_untyped_range.simps)
unfolding fun_upd_def[symmetric]
apply (simp)
apply (frule(1) caps_of_state_valid)
proof(intro conjI impI)
fix s bits f r dev
assume mdb:"untyped_mdb (cdt s) (caps_of_state s)"
assume cstate:"caps_of_state s cref = Some (cap.UntypedCap dev r bits f)" (is "?m cref = Some ?srccap")
show "untyped_mdb (cdt s) (caps_of_state s(cref \<mapsto> cap.UntypedCap dev r bits idx))"
apply (rule untyped_mdb_update_free_index
[where capa = ?srccap and m = "caps_of_state s" and src = cref,
unfolded free_index_update_def,simplified,THEN iffD2])
apply (simp add:cstate mdb)+
done
assume arch_mdb:"valid_arch_mdb (is_original_cap s) (caps_of_state s)"
show "valid_arch_mdb (is_original_cap s) (caps_of_state s(cref \<mapsto> UntypedCap dev r bits idx))"
apply (rule valid_arch_mdb_updates(1)[where capa = ?srccap
and m="caps_of_state s" and src=cref,
unfolded free_index_update_def, simplified, THEN iffD2])
apply (simp add: cstate arch_mdb)+
done
assume inc: "untyped_inc (cdt s) (caps_of_state s)"
have untyped_range_simp: "untyped_range (cap.UntypedCap dev r bits f) = untyped_range (cap.UntypedCap dev r bits idx)"
by simp
assume valid: "s \<turnstile> cap.UntypedCap dev r bits f"
assume cmp: "f \<le> idx" "idx \<le> 2 ^ bits"
have subset_range: "usable_untyped_range (cap.UntypedCap dev r bits idx) \<subseteq> usable_untyped_range (cap.UntypedCap dev r bits f)"
using cmp valid
apply (clarsimp simp:valid_cap_def cap_aligned_def)
apply (rule word_plus_mono_right)
apply (rule of_nat_mono_maybe_le[THEN iffD1])
apply (subst word_bits_def[symmetric])
apply (erule less_le_trans[OF _ power_increasing])
apply simp
apply simp
apply (subst word_bits_def[symmetric])
apply (erule le_less_trans)
apply (erule less_le_trans[OF _ power_increasing])
apply simp+
apply (erule is_aligned_no_wrap')
apply (rule word_of_nat_less)
apply (simp add: word_bits_def)
done
note blah[simp del] = untyped_range.simps usable_untyped_range.simps
show "untyped_inc (cdt s) (caps_of_state s(cref \<mapsto> cap.UntypedCap dev r bits idx))"
using inc cstate
apply (unfold untyped_inc_def)
apply (intro allI impI)
apply (drule_tac x = p in spec)
apply (drule_tac x = p' in spec)
apply (case_tac "p = cref")
apply (simp)
apply (case_tac "p' = cref")
apply simp
apply (simp add:untyped_range_simp)
apply (intro conjI impI)
apply (simp)
apply (elim conjE)
apply (drule disjoint_subset2[OF subset_range,rotated])
apply simp+
using subset_range
apply clarsimp
apply (case_tac "p' = cref")
apply simp
apply (intro conjI)
apply (elim conjE)
apply (thin_tac "P\<longrightarrow>Q" for P Q)+
apply (simp add:untyped_range_simp)+
apply (intro impI)
apply (elim conjE | simp)+
apply (drule disjoint_subset2[OF subset_range,rotated])
apply simp
apply (intro impI)
apply (elim conjE | simp add:untyped_range_simp)+
apply (intro impI)
apply (elim conjE | simp add:untyped_range_simp)+
using subset_range
apply clarsimp+
done
assume "ut_revocable (is_original_cap s) (caps_of_state s)"
thus "ut_revocable (is_original_cap s) (caps_of_state s(cref \<mapsto> cap.UntypedCap dev r bits idx))"
using cstate
by (fastforce simp:ut_revocable_def)
assume "reply_caps_mdb (cdt s) (caps_of_state s)"
thus "reply_caps_mdb (cdt s) (caps_of_state s(cref \<mapsto> cap.UntypedCap dev r bits idx))"
using cstate
apply (simp add:reply_caps_mdb_def del:split_paired_All split_paired_Ex)
apply (intro allI impI conjI)
apply (drule spec)+
apply (erule(1) impE)
apply (erule exE)+
apply (rule_tac x = ptr' in exI)
apply simp+
apply fastforce
done
assume "reply_masters_mdb (cdt s) (caps_of_state s)"
thus "reply_masters_mdb (cdt s) (caps_of_state s(cref \<mapsto> cap.UntypedCap dev r bits idx))"
apply (simp add:reply_masters_mdb_def del:split_paired_All split_paired_Ex)
apply (intro allI impI ballI)
apply (erule exE)
apply (elim allE impE)
apply fastforce
using cstate
apply fastforce
done
assume mdb:"mdb_cte_at (swp (cte_wp_at ((\<noteq>) cap.NullCap)) s) (cdt s)"
and desc_inc:"descendants_inc (cdt s) (caps_of_state s)"
and cte:"caps_of_state s cref = Some (cap.UntypedCap dev r bits f)"
show "descendants_inc (cdt s) (caps_of_state s(cref \<mapsto> cap.UntypedCap dev r bits idx))"
using mdb cte
apply (clarsimp simp:swp_def cte_wp_at_caps_of_state)
apply (erule descendants_inc_minor[OF desc_inc])
apply (clarsimp simp:cap_range_def untyped_range.simps)
done
qed
lemma descendants_inc_upd_nullcap:
"\<lbrakk> mdb_cte_at (\<lambda>p. \<exists>c. cs p = Some c \<and> cap.NullCap \<noteq> c) m;
descendants_inc m cs;
cs slot = Some cap.NullCap\<rbrakk>
\<Longrightarrow> descendants_inc m (cs(slot \<mapsto> cap))"
apply (simp add:descendants_inc_def descendants_of_def del:split_paired_All)
apply (intro allI impI)
apply (rule conjI)
apply (intro allI impI)
apply (drule spec)+
apply (erule(1) impE)
apply (drule tranclD2)
apply (clarsimp simp:cdt_parent_rel_def is_cdt_parent_def)
apply (drule(1) mdb_cte_atD)
apply clarsimp
apply (intro allI impI)
apply (drule spec)+
apply (erule(1) impE)
apply (drule tranclD)
apply (clarsimp simp:cdt_parent_rel_def is_cdt_parent_def)
apply (drule(1) mdb_cte_atD)
apply clarsimp
done
lemma cap_aligned_free_index_update[simp]:
"cap_aligned capa \<Longrightarrow> cap_aligned (capa\<lparr>free_index :=x\<rparr>)"
apply (case_tac capa)
apply (clarsimp simp: cap_aligned_def free_index_update_def)+
done
lemma upd_commute:
"src \<noteq> dest \<Longrightarrow> (m(dest \<mapsto> cap, src \<mapsto> capa))
= (m(src \<mapsto> capa, dest \<mapsto> cap))"
apply (rule ext)
apply clarsimp
done
lemma cap_class_free_index_upd[simp]:
"cap_class (free_index_update f cap) = cap_class cap"
by (simp add:free_index_update_def split:cap.splits)
(* FIXME: Move To CSpace_I *)
lemma cap_range_free_index_update[simp]:
"cap_range (capa\<lparr>free_index:=x\<rparr>) = cap_range capa"
by(auto simp:cap_range_def free_index_update_def split:cap.splits)
(* FIXME: Move To CSpace_I *)
lemma cap_range_free_index_update2[simp]:
"cap_range (free_index_update f cap) = cap_range cap"
by (auto simp:cap_range_def free_index_update_def split:cap.splits)
lemma cap_insert_mdb [wp]:
"\<lbrace>valid_mdb and valid_cap cap and valid_objs and
(\<lambda>s. cte_wp_at (is_derived (cdt s) src cap) src s)
and K (src \<noteq> dest) \<rbrace>
cap_insert cap src dest
\<lbrace>\<lambda>_. valid_mdb\<rbrace>"
apply (simp add:valid_mdb_def)
apply (wp cap_insert_mdb_cte_at)
apply (simp add: cap_insert_def set_untyped_cap_as_full_def update_cdt_def set_cdt_def bind_assoc)
apply (wp | simp del: fun_upd_apply split del: if_split)+
apply (rule hoare_lift_Pf3[where f="is_original_cap"])
apply (wp set_cap_caps_of_state2 get_cap_wp |simp del: fun_upd_apply split del: if_split)+
apply (clarsimp simp: cte_wp_at_caps_of_state split del: if_split)
apply (subgoal_tac "mdb_insert_abs (cdt s) src dest")
prefer 2
apply (rule mdb_insert_abs.intro,simp+)
apply (erule mdb_cte_at_cdt_null,simp)
apply (rule mdb_cte_at_Null_descendants)
apply (assumption)
apply (simp add:mdb_cte_at_rewrite)
apply (subgoal_tac "mdb_insert_abs_sib (cdt s) src dest")
prefer 2
apply (erule mdb_insert_abs_sib.intro)
apply (case_tac "should_be_parent_of capa (is_original_cap s src) cap (is_cap_revocable cap capa)")
apply simp
apply (frule (4) mdb_insert_abs.untyped_mdb)
apply (frule (4) mdb_insert_abs.reply_mdb)
apply (simp)
apply (rule conjI)
apply (simp add: no_mloop_def mdb_insert_abs.parency)
apply (intro allI impI conjI)
apply (rule_tac m1 = "caps_of_state s(dest\<mapsto> cap)"
and src1 = src in iffD2[OF untyped_mdb_update_free_index,rotated,rotated])
apply (simp add:fun_upd_twist)+
apply (drule_tac cs' = "caps_of_state s(src \<mapsto> max_free_index_update capa)" in descendants_inc_minor)
apply (clarsimp simp:cte_wp_at_caps_of_state swp_def)
apply clarsimp
apply (subst upd_commute)
apply simp
apply (erule(1) mdb_insert_abs.descendants_inc)
apply simp
apply (clarsimp dest!:is_derived_cap_class_range)
apply (rule notI)
apply (simp add: mdb_insert_abs.dest_no_parent_trancl)
apply (erule mdb_insert_abs.untyped_inc_simple)
apply (rule_tac m = "caps_of_state s" and src = src in untyped_inc_update_free_index)
apply (simp add:fun_upd_twist)+
apply (frule_tac p = src in caps_of_state_valid,assumption)
apply (clarsimp simp:valid_cap_def)
apply clarsimp+
apply (clarsimp simp:is_cap_simps)+
apply (simp add:is_derived_def)
apply (clarsimp simp:is_cap_simps)
apply (clarsimp simp:ut_revocable_def is_cap_simps is_cap_revocable_def)
apply (clarsimp simp: irq_revocable_def is_cap_revocable_def)
apply (intro impI conjI)
apply (clarsimp simp:is_cap_simps free_index_update_def)+
apply (clarsimp simp: reply_master_revocable_def is_derived_def is_master_reply_cap_def is_cap_revocable_def)
apply clarsimp
apply (rule_tac m1 = "caps_of_state s(dest\<mapsto> cap)"
and src1 = src in reply_mdb_update_free_index[THEN iffD2])
apply ((simp add:fun_upd_twist)+)[3]
apply (clarsimp simp:is_cap_simps is_cap_revocable_def)
apply (rule valid_arch_mdb_updates, simp add: is_cap_simps)
apply simp
apply (simp add: no_mloop_def mdb_insert_abs.parency)
apply (intro impI conjI allI)
apply (erule(1) mdb_insert_abs.descendants_inc)
apply simp
apply (clarsimp dest!:is_derived_cap_class_range)
apply (rule notI)
apply (simp add: mdb_insert_abs.dest_no_parent_trancl)
apply (frule_tac p = src in caps_of_state_valid,assumption)
apply (erule mdb_insert_abs.untyped_inc)
apply simp+
apply (simp add:valid_cap_def)
apply simp+
apply (clarsimp simp:is_derived_def is_cap_simps cap_master_cap_simps dest!:cap_master_cap_eqDs)
apply (clarsimp simp:ut_revocable_def is_cap_simps,simp add:is_cap_revocable_def)
apply (clarsimp simp: irq_revocable_def is_cap_revocable_def)
apply (clarsimp simp: reply_master_revocable_def is_derived_def is_master_reply_cap_def is_cap_revocable_def)
apply (erule (1) valid_arch_mdb_updates)
apply (clarsimp)
apply (intro impI conjI allI)
apply (rule_tac m1 = "caps_of_state s(dest\<mapsto> cap)"
and src1 = src in iffD2[OF untyped_mdb_update_free_index,rotated,rotated])
apply (frule mdb_insert_abs_sib.untyped_mdb_sib)
apply (simp add:fun_upd_twist)+
apply (drule_tac cs' = "caps_of_state s(src \<mapsto> max_free_index_update capa)" in descendants_inc_minor)
apply (clarsimp simp:cte_wp_at_caps_of_state swp_def)
apply clarsimp
apply (subst upd_commute)
apply simp
apply (erule(1) mdb_insert_abs_sib.descendants_inc)
apply simp
apply (clarsimp dest!:is_derived_cap_class_range)
apply (simp add: no_mloop_def)
apply (simp add: mdb_insert_abs_sib.parent_n_eq)
apply (simp add: mdb_insert_abs.dest_no_parent_trancl)
apply (rule_tac m = "caps_of_state s(dest\<mapsto> cap)" and src = src in untyped_inc_update_free_index)
apply (simp add:fun_upd_twist)+
apply (frule(3) mdb_insert_abs_sib.untyped_inc)
apply (frule_tac p = src in caps_of_state_valid,assumption)
apply (simp add:valid_cap_def)
apply (simp add:valid_cap_def,
clarsimp simp:ut_revocable_def,case_tac src,
clarsimp,simp)
apply (clarsimp simp:ut_revocable_def is_cap_simps is_cap_revocable_def)
apply (clarsimp simp: irq_revocable_def is_cap_revocable_def)
apply (intro impI conjI)
apply (clarsimp simp:is_cap_simps free_index_update_def)+
apply (clarsimp simp: reply_master_revocable_def is_derived_def is_master_reply_cap_def is_cap_revocable_def)
apply (rule_tac m1 = "caps_of_state s(dest\<mapsto> cap)"
and src1 = src in iffD2[OF reply_mdb_update_free_index,rotated,rotated])
apply (frule mdb_insert_abs_sib.reply_mdb_sib,simp+)
apply (clarsimp simp:ut_revocable_def,case_tac src,clarsimp,simp)
apply (simp add:fun_upd_twist)+
apply (erule (1) valid_arch_mdb_updates, clarsimp)
apply (frule mdb_insert_abs_sib.untyped_mdb_sib)
apply (simp add:fun_upd_twist)+
apply (erule(1) mdb_insert_abs_sib.descendants_inc)
apply simp
apply (clarsimp dest!: is_derived_cap_class_range)
apply (simp add: no_mloop_def)
apply (simp add: mdb_insert_abs_sib.parent_n_eq)
apply (simp add: mdb_insert_abs.dest_no_parent_trancl)
apply (frule(3) mdb_insert_abs_sib.untyped_inc)
apply (simp add:valid_cap_def)
apply (case_tac src,clarsimp simp:ut_revocable_def)
apply simp
apply (clarsimp simp:ut_revocable_def is_cap_simps,simp add: is_cap_revocable_def)
apply (clarsimp simp: irq_revocable_def is_cap_revocable_def)
apply (clarsimp simp: reply_master_revocable_def is_derived_def is_master_reply_cap_def)
apply (frule mdb_insert_abs_sib.reply_mdb_sib,simp+)
apply (clarsimp simp:reply_master_revocable_def,case_tac src,clarsimp)
apply simp
apply (erule (1) valid_arch_mdb_updates)
done
lemma swp_cte_at_cdt_update [iff]:
"swp cte_at (cdt_update f s) = swp cte_at s"
by (simp add: swp_def)
lemma swp_cte_at_mdb_rev_update [iff]:
"swp cte_at (is_original_cap_update f s) = swp cte_at s"
by (simp add: swp_def)
lemma derived_not_Null [simp]:
"\<not>is_derived m p c cap.NullCap"
"\<not>is_derived m p cap.NullCap c"
by (auto simp: is_derived_def cap_master_cap_simps dest!: cap_master_cap_eqDs)
lemma set_untyped_cap_as_full_impact:
"\<lbrace>cte_wp_at ((=) src_cap) src\<rbrace>
set_untyped_cap_as_full src_cap c src
\<lbrace>\<lambda>r. cte_wp_at ((=) (masked_as_full src_cap c)) src\<rbrace>"
apply (simp only: set_untyped_cap_as_full_def)
apply (rule hoare_pre)
apply (wp set_cap_cte_wp_at)
apply (auto simp:masked_as_full_def elim:cte_wp_at_weakenE split:if_splits)
done
lemma is_derived_masked_as_full[simp]:
"is_derived (cdt a) src c (masked_as_full src_cap c) =
is_derived (cdt a) src c src_cap"
apply (case_tac c)
apply (simp_all add:masked_as_full_def)
apply (clarsimp simp:is_cap_simps split:if_splits)
apply (auto simp add:is_derived_def cap_master_cap_simps is_cap_simps intro!: is_derived_arch_non_arch)
done
lemma cap_range_maskedAsFull[simp]:
"cap_range (masked_as_full src_cap cap) = cap_range src_cap"
apply (clarsimp simp:masked_as_full_def is_cap_simps split:cap.splits if_splits)
done
lemma connect_eqv_singleE:
assumes single:"\<And>p p'. ((p,p') \<in> m) = ((p,p')\<in> m')"
shows "((p,p')\<in> m\<^sup>+) = ((p,p')\<in> m'\<^sup>+)"
apply (rule iffI)
apply (erule trancl_induct)
apply (rule r_into_trancl)
apply (clarsimp simp:single)
apply (drule iffD1[OF single])
apply simp
apply (erule trancl_induct)
apply (rule r_into_trancl)
apply (clarsimp simp:single)
apply (drule iffD2[OF single])
apply simp
done
lemma connect_eqv_singleE':
assumes single:"\<And>p p'. ((p,p') \<in> m) = ((p,p')\<in> m')"
shows "((p,p')\<in> m\<^sup>*) = ((p,p')\<in> m'\<^sup>*)"
apply (case_tac "p = p'")
apply simp
apply (rule iffI)
apply (drule rtranclD)
apply clarsimp
apply (rule trancl_into_rtrancl)
apply (simp add:connect_eqv_singleE[OF single])
apply (drule rtranclD)
apply clarsimp
apply (rule trancl_into_rtrancl)
apply (simp add:connect_eqv_singleE[OF single])
done
lemma identity_eq :"((=) x) = (\<lambda>c. c = x)"
by (rule ext) auto
lemma forall_eq: "(\<forall>x. P x = Q x) \<Longrightarrow> (\<forall>x. P x) = (\<forall>b. Q b)"
by auto
lemma ran_dom:"(\<forall>x\<in> ran m. P x) = (\<forall>y\<in> dom m. P (the (m y)))"
by (auto simp:ran_def dom_def)
lemma dom_in:
"(\<exists>x. c a = Some x) = (a\<in> dom c)"
by auto
lemma same_region_as_masked_as_full[simp]:
"same_region_as (masked_as_full src_cap c) = same_region_as src_cap"
apply (rule ext)+
apply (case_tac src_cap;
clarsimp simp:masked_as_full_def is_cap_simps free_index_update_def split:if_splits)
done
lemma should_be_parent_of_masked_as_full[simp]:
"should_be_parent_of (masked_as_full src_cap c) = should_be_parent_of src_cap"
apply (rule ext)+
apply (clarsimp simp:should_be_parent_of_def)
apply (case_tac src_cap; simp add:masked_as_full_def is_cap_simps free_index_update_def)
done
lemma cte_at_get_cap:
"cte_at p s \<Longrightarrow> \<exists>c. (c, s) \<in> fst (get_cap p s)"
by (clarsimp simp add: cte_wp_at_def)
lemma cte_at_get_cap_wp:
"cte_at p s \<Longrightarrow> \<exists>c. (c, s) \<in> fst (get_cap p s) \<and> cte_wp_at ((=) c) p s"
by (clarsimp simp: cte_wp_at_def)
definition
"s_d_swap p src dest \<equiv>
if p = src then dest
else if p = dest then src
else p"
lemma s_d_swap_0 [simp]: "\<lbrakk> a \<noteq>0; b \<noteq> 0 \<rbrakk> \<Longrightarrow> s_d_swap 0 a b = 0"
by (simp add: s_d_swap_def)
lemma s_d_swap_inv [simp]: "s_d_swap (s_d_swap p a b) a b = p"
by (simp add: s_d_swap_def)
lemma s_d_fst [simp]:
"s_d_swap b a b = a" by (simp add: s_d_swap_def)
lemma s_d_snd [simp]:
"s_d_swap a a b = b" by (simp add: s_d_swap_def)
lemma s_d_swap_0_eq [simp]:
"\<lbrakk> src \<noteq> 0; dest \<noteq> 0 \<rbrakk> \<Longrightarrow> (s_d_swap c src dest = 0) = (c = 0)"
by (simp add: s_d_swap_def)
lemma s_d_swap_other:
"\<lbrakk> p \<noteq> src; p \<noteq> dest \<rbrakk> \<Longrightarrow> s_d_swap p src dest = p"
by (simp add: s_d_swap_def)
lemma s_d_swap_eq_src [simp]:
"(s_d_swap p src dest = src) = (p = dest)"
by (auto simp: s_d_swap_def)
lemma s_d_swap_eq_dest:
"src \<noteq> dest \<Longrightarrow> (s_d_swap p src dest = dest) = (p = src)"
by (simp add: s_d_swap_def)
lemma s_d_swap_inj [simp]:
"(s_d_swap p src dest = s_d_swap p' src dest) = (p = p')"
by (simp add: s_d_swap_def)
locale mdb_swap_abs =
fixes m src dest s s'
fixes n'
defines "n' \<equiv> \<lambda>n. if m n = Some src then Some dest
else if m n = Some dest then Some src
else m n"
fixes n
defines "n \<equiv> n' (src := n' dest, dest := n' src)"
assumes valid_mdb: "valid_mdb s"
assumes src: "cte_at src s"
assumes dest: "cte_at dest s"
assumes m: "m = cdt s"
assumes neq [simp]: "src \<noteq> dest"
context mdb_swap_abs begin
lemmas neq' [simp] = neq [symmetric]
lemma no_mloop:
"no_mloop m"
using valid_mdb
by (simp add: valid_mdb_def m)
lemma no_loops [iff]:
"m \<Turnstile> p \<rightarrow> p = False"
using no_mloop
by (cases p) (clarsimp simp add: no_mloop_def)
lemma no_loops_d [iff]:
"m \<Turnstile> p \<leadsto> p = False"
by (fastforce dest: r_into_trancl)
lemma no_loops_m [iff]:
"(m p = Some p) = False"
apply clarsimp
apply (fold cdt_parent_defs)
apply simp
done
definition
"s_d_swp p \<equiv> s_d_swap p src dest"
declare s_d_swp_def [simp]
lemma parency_m_n:
assumes "m \<Turnstile> p \<rightarrow> p'"
shows "n \<Turnstile> s_d_swp p \<rightarrow> s_d_swp p'" using assms
proof induct
case (base y)
thus ?case
apply (simp add: s_d_swap_def)
apply safe
apply (rule r_into_trancl,
simp add: n_def n'_def cdt_parent_defs)+
done
next
case (step x y)
thus ?case
apply -
apply (erule trancl_trans)
apply (simp add: s_d_swap_def split: if_split_asm)
apply safe
apply (rule r_into_trancl,
simp add: n_def n'_def cdt_parent_defs)+
done
qed
lemma parency_n_m:
assumes "n \<Turnstile> p \<rightarrow> p'"
shows "m \<Turnstile> s_d_swp p \<rightarrow> s_d_swp p'" using assms
proof induct
case (base y)
thus ?case
apply (simp add: s_d_swap_def)
apply safe
apply (rule r_into_trancl|
simp add: n_def n'_def cdt_parent_defs split: if_split_asm)+
done
next
case (step x y)
thus ?case
apply -
apply (erule trancl_trans)
apply (simp add: s_d_swap_def split: if_split_asm)
apply safe
apply (simp add: n_def n'_def cdt_parent_defs split: if_split_asm)
apply (rule r_into_trancl,
simp add: n_def n'_def cdt_parent_defs split: if_split_asm)
apply (rule r_into_trancl,
simp add: n_def n'_def cdt_parent_defs split: if_split_asm)
apply (rule r_into_trancl,
simp add: n_def n'_def cdt_parent_defs split: if_split_asm)
apply (simp add: n_def n'_def cdt_parent_defs split: if_split_asm)
apply (rule r_into_trancl,
simp add: n_def n'_def cdt_parent_defs split: if_split_asm)
apply (rule r_into_trancl,
simp add: n_def n'_def cdt_parent_defs split: if_split_asm)
apply (rule r_into_trancl,
simp add: n_def n'_def cdt_parent_defs split: if_split_asm)
apply (rule r_into_trancl,
simp add: n_def n'_def cdt_parent_defs split: if_split_asm)
done
qed
lemmas parency_m_n' =
parency_m_n [where p="s_d_swp p" and p'="s_d_swp p'" for p p', simplified, folded s_d_swp_def]
lemma parency:
"n \<Turnstile> p \<rightarrow> p' = m \<Turnstile> s_d_swp p \<rightarrow> s_d_swp p'"
by (blast intro: parency_n_m parency_m_n')
lemma descendants:
"descendants_of p n =
(let swap = \<lambda>S. S - {src,dest} \<union>
(if src \<in> S then {dest} else {}) \<union>
(if dest \<in> S then {src} else {}) in
swap (descendants_of (s_d_swp p) m))"
apply (simp add: Let_def parency descendants_of_def s_d_swap_def)
apply auto
done
end
(* NOTE: the following lemmata are currently not used: >>> *)
lemma same_object_as_commute:
"same_object_as c' c = same_object_as c c'"
apply (subgoal_tac "!c c'. same_object_as c' c --> same_object_as c c'")
apply (rule iffI)
apply (erule_tac x=c in allE, erule_tac x=c' in allE, simp)
apply (erule_tac x=c' in allE, erule_tac x=c in allE, simp)
by (auto simp:same_object_as_def bits_of_def same_aobject_as_commute split: cap.splits)
lemma copy_of_commute:
"copy_of c' c = copy_of c c'"
apply (subgoal_tac "!c c'. copy_of c' c --> copy_of c c'")
apply (rule iffI)
apply (erule_tac x=c in allE, erule_tac x=c' in allE, simp)
apply (erule_tac x=c' in allE, erule_tac x=c in allE, simp)
apply clarsimp
apply (clarsimp simp: copy_of_def is_reply_cap_def is_master_reply_cap_def
same_object_as_commute
split: if_splits cap.splits)
by (simp_all add: same_object_as_def split: cap.splits)
lemma weak_derived_commute:
"weak_derived c' c = weak_derived c c'"
by (auto simp: weak_derived_def copy_of_commute split: if_splits)
(* <<< END unused lemmata *)
lemma weak_derived_Null:
"weak_derived c' c \<Longrightarrow> (c' = cap.NullCap) = (c = cap.NullCap)"
apply (clarsimp simp: weak_derived_def)
apply (erule disjE)
apply (clarsimp simp: copy_of_def split: if_split_asm)
apply (auto simp: is_cap_simps same_object_as_def
split: cap.splits)[1]
apply simp
done
lemma weak_derived_refl [intro!, simp]:
"weak_derived c c"
by (simp add: weak_derived_def)
lemma ensure_no_children_descendants:
"ensure_no_children p =
(\<lambda>s. if descendants_of p (cdt s) = {}
then returnOk () s
else throwError ExceptionTypes_A.RevokeFirst s)"
apply (rule ext)
apply (simp add: ensure_no_children_def bindE_def liftE_def gets_def
get_def bind_def return_def lift_def whenE_def)
apply (rule conjI)
apply (clarsimp simp: descendants_of_def cdt_parent_defs)
apply fastforce
apply (clarsimp simp: descendants_of_def cdt_parent_defs)
apply (drule tranclD)
apply clarsimp
done
locale mdb_move_abs =
fixes src dest and m :: cdt and s' s
fixes m''
defines "m'' \<equiv> \<lambda>r. if r = src then None else (m(dest := m src)) r"
fixes m'
defines "m' \<equiv> \<lambda>r. if m'' r = Some src
then Some dest
else (m(dest := m src, src := None)) r"
assumes valid_mdb: "valid_mdb s"
assumes dest_null: "cte_wp_at ((=) cap.NullCap) dest s"
assumes m: "m = cdt s"
assumes neq [simp]: "src \<noteq> dest"
context mdb_move_abs begin
lemma dest_None:
"m dest = None"
using valid_mdb dest_null
unfolding valid_mdb_def mdb_cte_at_def
apply (clarsimp simp: m [symmetric])
apply (cases dest)
apply (rule classical)
apply (clarsimp simp: cte_wp_at_def)
apply fastforce
done
lemma desc_dest [intro?, simp]:
"dest \<notin> descendants_of x m"
using dest_None
apply (clarsimp simp add: descendants_of_def)
apply (drule tranclD2)
apply (clarsimp simp: cdt_parent_of_def)
done
lemma dest_desc:
"descendants_of dest m = {}"
using valid_mdb dest_null
unfolding valid_mdb_def mdb_cte_at_def
apply (clarsimp simp add: descendants_of_def m[symmetric])
apply (drule tranclD)
apply (clarsimp simp: cdt_parent_of_def)
apply (cases dest)
apply (clarsimp simp: cte_wp_at_def)
apply fastforce
done
lemmas neq' [simp] = neq [symmetric]
lemma no_mloop:
"no_mloop m"
using valid_mdb by (simp add: m valid_mdb_def)
lemma no_loops [iff]:
"m \<Turnstile> p \<rightarrow> p = False"
using no_mloop by (cases p) (clarsimp simp add: no_mloop_def)
lemma no_src_parent' [iff]:
"m' \<Turnstile> src \<leadsto> p = False"
by (simp add: m'_def m''_def cdt_parent_defs)
lemma no_src_parent_trans' [iff]:
"m' \<Turnstile> src \<rightarrow> p = False"
by (clarsimp dest!: tranclD)
lemma no_dest_parent_trans [iff]:
"m \<Turnstile> dest \<rightarrow> p = False"
using dest_desc
by (fastforce simp add: descendants_of_def cdt_parent_defs)
lemma no_dest_parent [iff]:
"m \<turnstile> dest cdt_parent_of p = False"
by (fastforce dest: r_into_trancl)
lemma no_dest_parent_unfold [iff]:
"(m x = Some dest) = False"
using no_dest_parent
unfolding cdt_parent_defs
by simp
lemma no_src_child [iff]:
"m' \<turnstile> p cdt_parent_of src = False"
by (simp add: cdt_parent_defs m'_def m''_def)
lemma no_src_child_trans [iff]:
"m' \<turnstile> p cdt_parent_of\<^sup>+ src = False"
by (clarsimp dest!: tranclD2)
lemma direct_src_loop_unfolded [iff]:
"(m src = Some src) = False"
by (fold cdt_parent_defs) (fastforce dest: r_into_trancl)
lemma mdb_cte_at:
"mdb_cte_at (swp (cte_wp_at ((\<noteq>) cap.NullCap)) s) m"
using valid_mdb by (simp add: valid_mdb_def m)
lemma dest_no_child [iff]:
"(m dest = Some x) = False"
using dest_None by simp
lemma to_dest_direct [simp]:
"m' \<Turnstile> x \<leadsto> dest = m \<Turnstile> x \<leadsto> src"
by (clarsimp simp add: m'_def m''_def cdt_parent_defs)
lemma from_dest_direct [simp]:
"m' \<Turnstile> dest \<leadsto> x = m \<Turnstile> src \<leadsto> x"
by (clarsimp simp add: m'_def m''_def cdt_parent_defs)
lemma parent_m_m':
assumes p_neq: "p \<noteq> dest" "p \<noteq> src"
assumes px: "m \<Turnstile> p \<rightarrow> x"
shows "if x = src then m' \<Turnstile> p \<rightarrow> dest else m' \<Turnstile> p \<rightarrow> x" using px
proof induct
case (base y)
thus ?case using p_neq
apply simp
apply (rule conjI)
apply (fastforce simp add: cdt_parent_defs m'_def m''_def)
apply clarsimp
apply (rule r_into_trancl)
apply (clarsimp simp add: cdt_parent_defs m'_def m''_def)
done
next
case (step y z)
thus ?case
apply simp
apply (rule conjI)
apply (clarsimp split: if_split_asm)
apply (fastforce intro: trancl_trans)
apply (clarsimp split: if_split_asm)
apply (fastforce intro: trancl_trans)
apply (erule trancl_trans)
apply (rule r_into_trancl)
apply (simp add: cdt_parent_defs m'_def m''_def)
apply clarsimp
done
qed
lemma parent_m'_m:
assumes p_neq: "p \<noteq> dest" "p \<noteq> src"
assumes px: "m' \<Turnstile> p \<rightarrow> x"
shows "if x = dest then m \<Turnstile> p \<rightarrow> src else m \<Turnstile> p \<rightarrow> x" using px
proof induct
case (base y)
thus ?case using p_neq
apply simp
apply (rule conjI)
apply (fastforce simp add: cdt_parent_defs m'_def m''_def)
apply clarsimp
apply (rule r_into_trancl)
apply (clarsimp simp add: cdt_parent_defs m'_def m''_def split: if_split_asm)
done
next
case (step y z)
thus ?case
apply simp
apply (rule conjI)
apply (clarsimp split: if_split_asm)
apply (fastforce intro: trancl_trans)
apply (clarsimp split: if_split_asm)
apply (fastforce intro: trancl_trans)
apply (erule trancl_trans)
apply (rule r_into_trancl)
apply (simp add: cdt_parent_defs m'_def m''_def split: if_split_asm)
done
qed
lemma src_dest:
assumes d: "m' \<Turnstile> dest \<rightarrow> x"
shows "m \<Turnstile> src \<rightarrow> x" using d
proof induct
case (base y)
thus ?case
by (fastforce simp add: cdt_parent_defs m'_def m''_def split: if_split_asm)
next
fix y z
assume dest: "m' \<Turnstile> dest \<rightarrow> y"
assume y: "m' \<Turnstile> y \<leadsto> z"
assume src: "m \<Turnstile> src \<rightarrow> y"
from src
have "y \<noteq> src" by clarsimp
moreover {
assume "m z = Some src"
with src
have "m \<Turnstile> src \<rightarrow> z" by (fastforce simp add: cdt_parent_defs)
}
moreover {
assume "m src = Some y"
hence "m \<Turnstile> y \<rightarrow> src"
by (fastforce simp add: cdt_parent_defs)
with src
have "m \<Turnstile> src \<rightarrow> src" by (rule trancl_trans)
hence False ..
hence "m \<Turnstile> src \<rightarrow> z" ..
}
moreover {
assume "m z = Some y"
hence "m \<Turnstile> y \<rightarrow> z" by (fastforce simp add: cdt_parent_defs)
with src
have "m \<Turnstile> src \<rightarrow> z" by (rule trancl_trans)
}
ultimately
show "m \<Turnstile> src \<rightarrow> z" using y
by (simp add: cdt_parent_defs m'_def m''_def split: if_split_asm)
qed
lemma dest_src:
assumes "m \<Turnstile> src \<rightarrow> x"
shows "m' \<Turnstile> dest \<rightarrow> x" using assms
proof induct
case (base y)
thus ?case
by (fastforce simp add: cdt_parent_defs m'_def m''_def)
next
case (step y z)
thus ?case
apply -
apply (erule trancl_trans)
apply (rule r_into_trancl)
apply (simp (no_asm) add: cdt_parent_defs m'_def m''_def)
apply (rule conjI)
apply (clarsimp simp: cdt_parent_defs)
apply clarsimp
apply (rule conjI)
apply clarsimp
apply (drule trancl_trans, erule r_into_trancl)
apply simp
apply clarsimp
apply (rule conjI)
apply clarsimp
apply (subgoal_tac "y = src")
apply simp
apply (clarsimp simp: cdt_parent_defs)
apply (clarsimp simp: cdt_parent_defs)
done
qed
lemma descendants:
"descendants_of p m' =
(if p = src
then {}
else if p = dest
then descendants_of src m
else descendants_of p m - {src} \<union>
(if src \<in> descendants_of p m then {dest} else {}))" (is "?d = ?d'")
proof (rule set_eqI)
fix x
show "(x \<in> ?d) = (x \<in> ?d')"
apply (simp add: descendants_of_def)
apply safe
apply (fastforce simp: parent_m'_m)
apply (fastforce simp: parent_m_m')
apply (fastforce simp: parent_m_m')
apply (erule src_dest)
apply (erule dest_src)
apply (fastforce dest!: parent_m'_m split: if_split_asm)
apply (fastforce simp: parent_m_m')
done
qed
lemma parency:
"(m' \<Turnstile> p \<rightarrow> p') =
(p \<noteq> src \<and> p' \<noteq> src \<and>
(if p = dest then m \<Turnstile> src \<rightarrow> p'
else m \<Turnstile> p \<rightarrow> p' \<or> (m \<Turnstile> p \<rightarrow> src \<and> p' = dest)))"
using descendants [where p=p]
apply (simp add: descendants_of_def cong: if_cong)
apply (drule eqset_imp_iff [where x=p'])
apply clarsimp
apply fastforce
done
end
lemma copy_untyped1:
"\<lbrakk> copy_of cap cap'; is_untyped_cap cap' \<rbrakk> \<Longrightarrow> cap' = cap"
by (simp add: copy_of_def)
lemma copy_untyped2:
"\<lbrakk> copy_of cap cap'; is_untyped_cap cap \<rbrakk> \<Longrightarrow> cap' = cap"
apply (cases cap)
apply (auto simp: copy_of_def same_object_as_def is_cap_simps
split: if_split_asm cap.splits)
done
lemma copy_of_Null [simp]:
"\<not>copy_of cap.NullCap c"
by (auto simp add: copy_of_def same_object_as_def is_cap_simps
split: cap.splits)
lemma copy_of_Null2 [simp]:
"\<not>copy_of c cap.NullCap"
by (auto simp add: copy_of_def same_object_as_def is_cap_simps)
locale CSpace_AI_weak_derived =
fixes state_ext_t :: "'state_ext::state_ext itself"
assumes weak_derived_valid_cap:
"\<And>(s:: 'state_ext state) c c'. \<lbrakk> s \<turnstile> c; wellformed_cap c'; weak_derived c' c\<rbrakk> \<Longrightarrow> s \<turnstile> c'"
assumes copy_obj_refs:
"\<And>cap cap'. copy_of cap cap' \<Longrightarrow> obj_refs cap' = obj_refs cap"
assumes weak_derived_cap_class[simp]:
"\<And>cap src_cap. weak_derived cap src_cap \<Longrightarrow> cap_class cap = cap_class src_cap"
assumes weak_derived_obj_refs:
"\<And>dcap cap. weak_derived dcap cap \<Longrightarrow> obj_refs dcap = obj_refs cap"
assumes weak_derived_obj_ref_of:
"\<And>dcap cap. weak_derived dcap cap \<Longrightarrow> obj_ref_of dcap = obj_ref_of cap"
lemma weak_derived_untyped_range:
"weak_derived dcap cap \<Longrightarrow> untyped_range dcap = untyped_range cap"
by (cases dcap, auto simp: is_cap_simps weak_derived_def copy_of_def
same_object_as_def
split: if_split_asm cap.splits)
context CSpace_AI_weak_derived begin
lemma weak_derived_cap_range:
"\<And>dcap cap. weak_derived dcap cap \<Longrightarrow> cap_range dcap = cap_range cap"
by (simp add:cap_range_def weak_derived_untyped_range weak_derived_obj_refs)
end
locale mdb_move_abs_gen
= mdb_move_abs src dest m s' s m'' m'
+ CSpace_AI_weak_derived state_ext_t
for state_ext_t :: "'state_ext::state_ext itself"
and src dest m
and s' :: "'state_ext state"
and s :: "'state_ext state"
and m'' m'
context mdb_move_abs_gen begin
lemma descendants_inc:
notes split_paired_All[simp del]
assumes dc: "descendants_inc m cs"
assumes s: "cs src = Some src_cap"
assumes d: "cs dest = Some cap.NullCap"
assumes c: "weak_derived cap src_cap"
shows "descendants_inc m' (cs (dest \<mapsto> cap, src \<mapsto> cap.NullCap))"
using dc s d c
apply (simp add: descendants_inc_def descendants)
apply (intro allI conjI)
apply (intro impI allI)
apply (drule spec)+
apply (erule(1) impE)
apply (simp add:weak_derived_cap_range)
apply (simp add:descendants_of_def)
apply (intro impI)
apply (drule spec)+
apply (erule(1) impE)
apply (simp add:weak_derived_cap_range)
done
lemma untyped_inc:
assumes ut: "untyped_inc m cs"
assumes s: "cs src = Some src_cap"
assumes d: "cs dest = Some cap.NullCap"
assumes c: "weak_derived cap src_cap"
shows "untyped_inc m' (cs (dest \<mapsto> cap, src \<mapsto> cap.NullCap))"
proof -
from c
have "is_untyped_cap cap = is_untyped_cap src_cap"
"untyped_range cap = untyped_range src_cap"
"is_untyped_cap cap \<longrightarrow> usable_untyped_range cap = usable_untyped_range src_cap"
by (auto simp: copy_of_def same_object_as_def is_cap_simps weak_derived_def
split: if_split_asm cap.splits)
with ut s d
show ?thesis
apply (simp add: untyped_inc_def descendants del: split_paired_All split del: if_split)
apply (intro allI)
apply (case_tac "p = src")
apply (simp del: split_paired_All split del: if_split)
apply (simp del: split_paired_All split del: if_split)
apply (case_tac "p = dest")
apply (simp del: split_paired_All split del: if_split)
apply (case_tac "p' = src")
apply (simp del: split_paired_All split del: if_split)+
apply (case_tac "p' = dest")
apply (simp del:split_paired_All split del:if_split)+
apply (intro impI allI conjI)
apply ((erule_tac x=src in allE,erule_tac x=p' in allE,simp)+)[5]
apply (erule_tac x=src in allE)
apply (erule_tac x=p' in allE)
apply simp
apply (intro conjI impI)
apply (simp del:split_paired_All split del:if_split)+
apply (case_tac "p' = src")
apply (simp del: split_paired_All split del: if_split)+
apply (case_tac "p' = dest")
apply (simp del:split_paired_All split del:if_split)+
apply (intro impI allI conjI)
apply (erule_tac x=p in allE,erule_tac x=src in allE)
apply simp
apply (intro conjI impI)
apply (simp del:split_paired_All split del:if_split)+
apply (intro conjI impI allI)
apply (erule_tac x=p in allE,erule_tac x=p' in allE)
apply simp
done
qed
end
lemma weak_derived_untyped2:
"\<lbrakk> weak_derived cap cap'; is_untyped_cap cap \<rbrakk> \<Longrightarrow> cap' = cap"
by (auto simp: weak_derived_def copy_untyped2)
lemma weak_derived_Null_eq [simp]:
"(weak_derived NullCap cap) = (cap = NullCap)"
by (auto simp: weak_derived_def)
lemma weak_derived_eq_Null [simp]:
"(weak_derived cap NullCap) = (cap = NullCap)"
by (auto simp: weak_derived_def)
lemma weak_derived_is_untyped:
"weak_derived dcap cap \<Longrightarrow> is_untyped_cap dcap = is_untyped_cap cap"
by (cases dcap, auto simp: is_cap_simps weak_derived_def copy_of_def
same_object_as_def
split: if_split_asm cap.splits)
lemma weak_derived_irq [simp]:
"weak_derived IRQControlCap cap = (cap = IRQControlCap)"
by (auto simp add: weak_derived_def copy_of_def same_object_as_def
split: cap.splits)
lemmas (in CSpace_AI_weak_derived) weak_derived_ranges =
weak_derived_is_untyped
weak_derived_untyped_range
weak_derived_obj_refs
lemma weak_derived_is_reply:
"weak_derived dcap cap \<Longrightarrow> is_reply_cap dcap = is_reply_cap cap"
by (auto simp: weak_derived_def copy_of_def
same_object_as_def is_cap_simps
split: if_split_asm cap.split_asm)
lemma weak_derived_is_reply_master:
"weak_derived dcap cap \<Longrightarrow> is_master_reply_cap dcap = is_master_reply_cap cap"
by (auto simp: weak_derived_def copy_of_def
same_object_as_def is_cap_simps
split: if_split_asm cap.split_asm)
context begin interpretation Arch .
lemma non_arch_cap_asid_vptr_None:
assumes "\<not> is_arch_cap cap"
shows "cap_asid cap = None"
and "cap_asid_base cap = None"
and "cap_vptr cap = None"
using assms by (cases cap; simp add: is_cap_simps cap_asid_def cap_asid_base_def cap_vptr_def)+
end
lemma weak_derived_Reply:
"weak_derived (cap.ReplyCap t m R) c = (\<exists> R'. (c = cap.ReplyCap t m R'))"
"weak_derived c (cap.ReplyCap t m R) = (\<exists> R'. (c = cap.ReplyCap t m R'))"
by (auto simp: weak_derived_def copy_of_def
same_object_as_def is_cap_simps
non_arch_cap_asid_vptr_None[simplified is_cap_simps]
split: if_split_asm cap.split_asm)
lemmas (in CSpace_AI_weak_derived) weak_derived_replies =
weak_derived_is_reply
weak_derived_is_reply_master
weak_derived_obj_ref_of
lemma weak_derived_reply_equiv:
"\<lbrakk> weak_derived c c'; is_reply_cap c \<rbrakk> \<Longrightarrow> obj_ref_of c = obj_ref_of c' \<and> is_reply_cap c'"
"\<lbrakk> weak_derived c c'; is_reply_cap c' \<rbrakk> \<Longrightarrow> obj_ref_of c = obj_ref_of c' \<and> is_reply_cap c"
by (auto simp: weak_derived_def copy_of_def
same_object_as_def is_cap_simps
split: if_split_asm cap.split_asm)
context mdb_move_abs begin
lemma reply_caps_mdb:
assumes r: "reply_caps_mdb m cs"
assumes s: "cs src = Some src_cap"
assumes c: "weak_derived cap src_cap"
shows "reply_caps_mdb
m'
(cs (dest \<mapsto> cap, src \<mapsto> cap.NullCap))"
unfolding reply_caps_mdb_def m'_def m''_def
using r c s
apply (intro allI impI)
apply (simp split: if_split_asm del: split_paired_Ex)
apply (simp add: weak_derived_Reply del: split_paired_Ex)
apply (erule exE)
apply (simp del: split_paired_Ex)
apply (unfold reply_caps_mdb_def)[1]
apply (erule allE)+
apply (simp del: split_paired_Ex)
apply (erule(1) impE)
apply (erule exEI)
apply simp
apply blast
apply (rule conjI)
apply (unfold reply_caps_mdb_def)[1]
apply (erule allE)+
apply (erule(1) impE)
apply (clarsimp simp: weak_derived_Reply)
apply (rule impI)
apply (unfold reply_caps_mdb_def)[1]
apply (erule allE)+
apply (erule(1) impE)
apply (erule exEI)
apply blast
done
lemma reply_masters_mdb:
assumes r: "reply_masters_mdb m cs"
assumes s: "cs src = Some src_cap"
assumes d: "cs dest = Some cap.NullCap"
assumes c: "weak_derived cap src_cap"
shows "reply_masters_mdb m' (cs (dest \<mapsto> cap, src \<mapsto> cap.NullCap))"
unfolding reply_masters_mdb_def
using r c s d
apply (intro allI impI)
apply (subst descendants)
unfolding m'_def m''_def
apply (simp split: if_split_asm)
apply (hypsubst)
apply (simp only: weak_derived_Reply reply_masters_mdb_def)
apply fastforce
apply (simp only: reply_masters_mdb_def)
apply (simp del: split_paired_All split_paired_Ex)
apply (intro conjI impI)
apply (fastforce simp add:weak_derived_Reply)
apply fastforce+
done
lemma reply_mdb:
assumes r: "reply_mdb m cs"
assumes s: "cs src = Some src_cap"
assumes d: "cs dest = Some cap.NullCap"
assumes c: "weak_derived cap src_cap"
shows "reply_mdb m' (cs (dest \<mapsto> cap, src \<mapsto> cap.NullCap))"
using r c s d unfolding reply_mdb_def
by (simp add: reply_caps_mdb reply_masters_mdb)
end
declare is_master_reply_cap_NullCap [simp]
context CSpace_AI_weak_derived begin
lemma mdb_move_abs_gen:
"\<And>src dest m (s::'state_ext state).
mdb_move_abs src dest m s \<Longrightarrow> mdb_move_abs_gen src dest m s"
apply (unfold_locales)
apply (unfold mdb_move_abs_def)
by auto
lemma cap_move_mdb [wp]:
fixes dest cap src
shows
"\<lbrace>valid_mdb and cte_wp_at ((=) cap.NullCap) dest and
cte_wp_at (\<lambda>c. weak_derived cap c \<and> c \<noteq> cap.NullCap) src\<rbrace>
cap_move cap src dest
\<lbrace>\<lambda>_. valid_mdb :: 'state_ext state \<Rightarrow> bool\<rbrace>"
apply (simp add: cap_move_def set_cdt_def valid_mdb_def2
pred_conj_def cte_wp_at_caps_of_state)
apply (wp update_cdt_cdt | simp split del: if_split)+
apply (rule hoare_lift_Pf3[where f="is_original_cap"])
apply (wp set_cap_caps_of_state2 | simp split del: if_split)+
apply (clarsimp simp: mdb_cte_at_def fun_upd_def[symmetric]
simp del: fun_upd_apply)
apply (rule conjI)
apply (cases src, cases dest)
apply (subgoal_tac "cap.NullCap \<noteq> cap")
apply (intro allI conjI)
apply fastforce
apply (clarsimp split del: if_split)
apply (rule conjI)
apply fastforce
apply clarsimp
apply fastforce
apply (subgoal_tac "mdb_move_abs src dest (cdt s) s")
prefer 2
apply (rule mdb_move_abs.intro)
apply (simp add: valid_mdb_def swp_def cte_wp_at_caps_of_state
mdb_cte_at_def)
apply (simp add: cte_wp_at_caps_of_state)
apply (rule refl)
apply (clarsimp simp: cte_wp_at_caps_of_state)
apply (frule mdb_move_abs_gen)
apply (rule conjI)
apply (simp add: untyped_mdb_def mdb_move_abs.descendants)
apply (clarsimp simp: cte_wp_at_caps_of_state)
apply (rule conjI)
apply clarsimp
apply (rule conjI, clarsimp simp: is_cap_simps)
apply (clarsimp simp: descendants_of_def)
apply (drule tranclD)
apply (clarsimp simp: cdt_parent_of_def mdb_cte_at_def)
apply fastforce
apply clarsimp
apply (rule conjI)
apply clarsimp
apply (rule conjI, clarsimp simp: is_cap_simps)
apply clarsimp
apply (drule (1) weak_derived_untyped2)
apply (cases src)
apply clarsimp
apply clarsimp
apply (drule weak_derived_obj_refs)
apply clarsimp
apply (cases src)
apply clarsimp
apply (rule conjI)
apply (erule(4) mdb_move_abs_gen.descendants_inc)
apply (rule conjI)
apply (simp add: no_mloop_def mdb_move_abs.parency)
apply (simp add: mdb_move_abs.desc_dest [unfolded descendants_of_def, simplified])
apply (rule conjI)
apply (erule (4) mdb_move_abs_gen.untyped_inc)
apply (rule conjI)
apply (simp add: ut_revocable_def weak_derived_is_untyped del: split_paired_All)
apply (rule conjI)
apply (simp add: irq_revocable_def del: split_paired_All)
apply clarsimp
apply (metis surj_pair)
apply (rule conjI)
apply (simp add: reply_master_revocable_def del: split_paired_All)
apply (drule_tac x=src in spec, drule_tac x=capa in spec)
apply (intro impI)
apply (simp add: weak_derived_is_reply_master)
apply (rule conjI, erule (4) mdb_move_abs.reply_mdb)
apply (erule (2) valid_arch_mdb_updates)
done
end
lemma cap_move_typ_at:
"\<lbrace>\<lambda>s. P (typ_at T p s)\<rbrace> cap_move cap ptr ptr' \<lbrace>\<lambda>rv s. P (typ_at T p s)\<rbrace>"
apply (simp add: cap_move_def set_cdt_def)
apply (wp set_cap_typ_at | simp)+
done
lemma set_cdt_pspace:
"\<lbrace>valid_pspace\<rbrace> set_cdt m \<lbrace>\<lambda>_. valid_pspace\<rbrace>"
apply (simp add: set_cdt_def)
apply wp
apply (auto intro: valid_pspace_eqI)
done
lemma set_cdt_cur:
"\<lbrace>cur_tcb\<rbrace> set_cdt m \<lbrace>\<lambda>_. cur_tcb\<rbrace>"
apply (simp add: set_cdt_def)
apply wp
apply (simp add: cur_tcb_def)
done
lemma set_cdt_cte_at:
"\<lbrace>cte_at x\<rbrace> set_cdt m \<lbrace>\<lambda>_. cte_at x\<rbrace>"
by (simp add: valid_cte_at_typ set_cdt_typ_at [where P="\<lambda>x. x"])
lemma set_cdt_valid_cap:
"\<lbrace>valid_cap c\<rbrace> set_cdt m \<lbrace>\<lambda>_. valid_cap c\<rbrace>"
by (rule set_cdt_inv) simp
lemma set_cdt_iflive[wp]:
"\<lbrace>if_live_then_nonz_cap\<rbrace> set_cdt m \<lbrace>\<lambda>_. if_live_then_nonz_cap\<rbrace>"
by (simp add: set_cdt_def, wp, simp add: if_live_then_nonz_cap_def ex_nonz_cap_to_def)
lemma set_untyped_cap_as_full_cap_to:
shows
"\<lbrace>\<lambda>s. if_live_then_nonz_cap s \<and> cte_wp_at ((=) src_cap) src s\<rbrace>
set_untyped_cap_as_full src_cap cap src
\<lbrace>\<lambda>rv s. if_live_then_nonz_cap s\<rbrace>"
apply (clarsimp simp:if_live_then_nonz_cap_def set_untyped_cap_as_full_def
split del: if_split)
apply (wp hoare_vcg_all_lift hoare_vcg_imp_lift set_cap_cap_to)+
apply (auto simp add:cte_wp_at_caps_of_state)
done
lemma tcb_cap_slot_regular:
"\<lbrakk>caps_of_state s cref = Some cap; valid_objs s; kheap s (fst cref) = Some (TCB tcb)\<rbrakk>
\<Longrightarrow> \<exists>f upd check. tcb_cap_cases (snd cref) = Some (f, upd, check) \<and>
check (fst cref) (tcb_state tcb) cap "
apply (case_tac cref)
apply (clarsimp simp: caps_of_state_def gets_the_def return_def assert_def
assert_opt_def tcb_at_def get_cap_def
bind_def get_object_def simpler_gets_def
dest!: get_tcb_SomeD
split: if_splits)
apply (clarsimp simp: fail_def return_def split: option.splits)
apply (erule valid_objsE)
apply assumption
apply (simp add: valid_obj_def valid_tcb_def)
apply (clarsimp simp: tcb_cnode_map_tcb_cap_cases)
apply (drule bspec(1))
apply (erule ranI)
apply clarsimp
done
lemma set_free_index_valid_pspace:
"\<lbrace>\<lambda>s. valid_pspace s \<and> cte_wp_at ((=) cap) cref s \<and>
(free_index_of cap \<le> idx \<and> is_untyped_cap cap \<and>idx \<le> 2^ cap_bits cap)\<rbrace>
set_cap (free_index_update (\<lambda>_. idx) cap) cref
\<lbrace>\<lambda>rv s'. valid_pspace s'\<rbrace>"
apply (clarsimp simp: valid_pspace_def)
apply (wp set_cap_valid_objs update_cap_iflive set_cap_zombies')
apply (clarsimp simp:cte_wp_at_caps_of_state is_cap_simps)+
apply (frule(1) caps_of_state_valid)
apply (clarsimp simp:valid_cap_def cap_aligned_def free_index_update_def)
apply (intro conjI)
apply (clarsimp simp: valid_untyped_def)
apply (elim impE allE)
apply assumption+
apply (clarsimp simp: free_index_of_def)
apply (erule disjoint_subset[rotated])
apply clarsimp
apply (rule word_plus_mono_right)
apply (rule of_nat_mono_maybe_le[THEN iffD1])
apply (subst word_bits_def[symmetric])
apply (erule less_le_trans[OF _ power_increasing])
apply simp
apply simp
apply (subst word_bits_def[symmetric])
apply (erule le_less_trans)
apply (erule less_le_trans[OF _ power_increasing])
apply simp+
apply (erule is_aligned_no_wrap')
apply (rule word_of_nat_less)
apply (simp add: word_bits_def)
apply (clarsimp simp add: pred_tcb_at_def tcb_cap_valid_def obj_at_def is_tcb valid_ipc_buffer_cap_def
split: option.split)
apply (frule tcb_cap_slot_regular)
apply simp+
apply (clarsimp simp: is_nondevice_page_cap_simps)
done
locale CSpace_AI_set_free_index_invs =
fixes state_ext_t :: "'state_ext::state_ext itself"
assumes set_free_index_invs_known_cap:
"\<And>cap idx.
\<lbrace>\<lambda>s::'state_ext state. (free_index_of cap \<le> idx \<and> is_untyped_cap cap \<and> idx \<le> 2^cap_bits cap)
\<and> invs s \<and> cte_wp_at ((=) cap ) cref s\<rbrace>
set_cap (free_index_update (\<lambda>_. idx) cap) cref
\<lbrace>\<lambda>rv s'. invs s'\<rbrace>"
lemma (in CSpace_AI_set_free_index_invs) set_free_index_invs:
"\<lbrace>\<lambda>s::'state_ext state. (free_index_of cap \<le> idx \<and> is_untyped_cap cap \<and> idx \<le> 2^cap_bits cap) \<and>
invs s \<and> cte_wp_at (\<lambda>cp. \<exists>ptr sz dev idx' idx''. idx' \<le> idx
\<and> cp = UntypedCap dev ptr sz idx' \<and> cap = UntypedCap dev ptr sz idx'') cref s\<rbrace>
set_cap (free_index_update (\<lambda>_. idx) cap) cref
\<lbrace>\<lambda>rv s'. invs s'\<rbrace>"
apply (rule hoare_name_pre_state)
apply (simp add: cte_wp_at_caps_of_state)
apply clarify
apply (cut_tac cap="the (caps_of_state s cref)"
in set_free_index_invs_known_cap)
apply clarsimp
apply (erule hoare_pre)
apply (clarsimp simp: cte_wp_at_caps_of_state free_index_of_def)
done
lemma set_untyped_cap_as_full_cap_zombies_final:
"\<lbrace>zombies_final and cte_wp_at ((=) src_cap) src\<rbrace>
set_untyped_cap_as_full src_cap cap src
\<lbrace>\<lambda>rv s. zombies_final s\<rbrace>"
apply (clarsimp simp:set_untyped_cap_as_full_def
split:if_split_asm | rule conjI | wp set_cap_zombies )+
apply (clarsimp simp:cte_wp_at_caps_of_state)
apply (rule zombies_finalD2)
apply (simp add:get_cap_caps_of_state)
apply (rule sym,simp)
apply (simp add:get_cap_caps_of_state)
apply (rule sym,simp)
apply simp+
apply (clarsimp simp:is_cap_simps free_index_update_def)+
apply wp
apply simp
done
(* FIXME: MOVE *)
lemma set_untyped_cap_as_full_valid_pspace:
"\<lbrace>valid_pspace and cte_wp_at ((=) src_cap) src\<rbrace>
set_untyped_cap_as_full src_cap cap src
\<lbrace>\<lambda>rv s. valid_pspace s \<rbrace>"
apply (clarsimp simp:valid_pspace_def)
apply (clarsimp | wp set_untyped_cap_full_valid_objs
set_untyped_cap_as_full_cap_to set_untyped_cap_as_full_cap_zombies_final )+
done
lemma cap_insert_valid_pspace:
"\<lbrace>valid_pspace and cte_wp_at ((=) cap.NullCap) dest
and valid_cap cap and tcb_cap_valid cap dest
and (\<lambda>s. \<forall>r\<in>obj_refs cap. \<forall>p'. dest \<noteq> p' \<and> cte_wp_at (\<lambda>cap'. r \<in> obj_refs cap') p' s
\<longrightarrow> (cte_wp_at (Not \<circ> is_zombie) p' s \<and> \<not> is_zombie cap))\<rbrace>
cap_insert cap src dest
\<lbrace>\<lambda>_. valid_pspace\<rbrace>"
unfolding cap_insert_def
apply (simp add: update_cdt_def)
apply (wp new_cap_valid_pspace set_cdt_valid_pspace set_cdt_cte_at
set_untyped_cap_as_full_cte_wp_at set_untyped_cap_as_full_valid_cap
set_cdt_valid_cap hoare_drop_imps set_untyped_cap_as_full_tcb_cap_valid
set_untyped_cap_as_full_valid_pspace | simp split del: if_split)+
apply (wp hoare_vcg_ball_lift hoare_vcg_all_lift hoare_vcg_imp_lift)
apply clarsimp
apply (wp hoare_vcg_disj_lift set_untyped_cap_as_full_cte_wp_at_neg
set_untyped_cap_as_full_cte_wp_at get_cap_wp)+
apply (intro allI impI conjI)
apply (clarsimp simp: cte_wp_at_caps_of_state)+
apply (rule ccontr)
apply clarsimp
apply (drule bspec)
apply simp
apply (drule_tac x = xa in spec,drule_tac x = xb in spec)
apply (subgoal_tac "(xa,xb) = src")
apply (clarsimp simp: masked_as_full_def if_distrib split:if_splits)
apply clarsimp
done
lemma set_cdt_idle [wp]:
"\<lbrace>valid_idle\<rbrace> set_cdt m \<lbrace>\<lambda>rv. valid_idle\<rbrace>"
by (simp add: set_cdt_def, wp,
auto simp: valid_idle_def pred_tcb_at_def)
crunches cap_insert
for refs[wp]: "\<lambda>s. P (global_refs s)"
and arch [wp]: "\<lambda>s. P (arch_state s)"
and it [wp]: "\<lambda>s. P (idle_thread s)"
(wp: crunch_wps)
lemma cap_insert_idle [wp]:
"\<lbrace>valid_idle\<rbrace> cap_insert cap src dest \<lbrace>\<lambda>_. valid_idle\<rbrace>"
by (rule valid_idle_lift; wp)
crunch reply[wp]: set_cdt "valid_reply_caps"
lemma set_untyped_cap_as_full_has_reply_cap:
"\<lbrace>\<lambda>s. (has_reply_cap t s) \<and> cte_wp_at ((=) src_cap) src s\<rbrace>
set_untyped_cap_as_full src_cap cap src
\<lbrace>\<lambda>rv s. (has_reply_cap t s)\<rbrace>"
apply (clarsimp simp:has_reply_cap_def is_reply_cap_to_def)
apply (wp hoare_vcg_ex_lift)
apply (wp set_untyped_cap_as_full_cte_wp_at)
apply (clarsimp simp:cte_wp_at_caps_of_state)
apply (rule_tac x = a in exI)
apply (rule_tac x = b in exI)
apply clarsimp
done
lemma set_untyped_cap_as_full_has_reply_cap_neg:
"\<lbrace>\<lambda>s. \<not> (has_reply_cap t s) \<and> cte_wp_at ((=) src_cap) src s\<rbrace>
set_untyped_cap_as_full src_cap cap src
\<lbrace>\<lambda>rv s. \<not> (has_reply_cap t s)\<rbrace>"
apply (clarsimp simp:has_reply_cap_def is_reply_cap_to_def)
apply (wp hoare_vcg_all_lift)
apply (wp set_untyped_cap_as_full_cte_wp_at_neg)
apply (clarsimp simp:cte_wp_at_caps_of_state)
apply (drule_tac x = x in spec)
apply (drule_tac x = xa in spec)
apply (clarsimp simp:masked_as_full_def free_index_update_def is_cap_simps split:cap.splits if_splits)
done
lemma caps_of_state_cte_wp_at_neq:
"(caps_of_state s slot \<noteq> Some capa) = (\<not> cte_wp_at ((=) capa) slot s)"
by (clarsimp simp:cte_wp_at_caps_of_state)
lemma max_free_index_update_preserve_untyped:
"is_untyped_cap c \<Longrightarrow> is_untyped_cap ( max_free_index_update c)"
by simp
lemma set_untyped_cap_as_full_unique_reply_caps:
"\<lbrace>\<lambda>s. unique_reply_caps (caps_of_state s) \<and> cte_wp_at ((=) src_cap) src s\<rbrace>
set_untyped_cap_as_full src_cap cap src
\<lbrace>\<lambda>rv s. unique_reply_caps (caps_of_state s)\<rbrace>"
apply (simp add: unique_reply_caps_def set_untyped_cap_as_full_def)
apply (rule conjI)
apply clarify
apply wp
apply (clarsimp simp: is_cap_simps)
apply wpsimp
by blast
lemma set_untyped_cap_as_full_valid_reply_masters:
"\<lbrace>\<lambda>s. valid_reply_masters s \<and> cte_wp_at ((=) src_cap) src s\<rbrace>
set_untyped_cap_as_full src_cap cap src
\<lbrace>\<lambda>rv s. valid_reply_masters s \<rbrace>"
apply (clarsimp simp: set_untyped_cap_as_full_def)
apply (intro conjI impI)
apply wp
apply (clarsimp simp: cte_wp_at_caps_of_state free_index_update_def split:cap.splits)
apply wp
apply clarsimp
done
lemma set_untyped_cap_as_full_valid_global_refs[wp]:
"\<lbrace>valid_global_refs and cte_wp_at ((=) src_cap) src\<rbrace>
set_untyped_cap_as_full src_cap cap src
\<lbrace>\<lambda>r. valid_global_refs\<rbrace>"
apply (simp add:valid_global_refs_def valid_refs_def)
apply (wp hoare_vcg_all_lift set_untyped_cap_as_full_cte_wp_at_neg| wps)+
apply (clarsimp simp:cte_wp_at_caps_of_state)
done
lemma cap_insert_reply [wp]:
"\<lbrace>valid_reply_caps and cte_at dest and
(\<lambda>s. \<forall>t R. cap = cap.ReplyCap t False R \<longrightarrow>
st_tcb_at awaiting_reply t s \<and> \<not> has_reply_cap t s)\<rbrace>
cap_insert cap src dest \<lbrace>\<lambda>_. valid_reply_caps\<rbrace>"
apply (simp add: cap_insert_def update_cdt_def)
apply (wp
| simp split del: if_split
| rule hoare_drop_imp
| clarsimp simp: valid_reply_caps_def)+
apply (wp hoare_vcg_all_lift hoare_vcg_imp_lift set_untyped_cap_as_full_has_reply_cap_neg
set_untyped_cap_as_full_unique_reply_caps set_untyped_cap_as_full_cte_wp_at get_cap_wp)+
apply (clarsimp simp:cte_wp_at_caps_of_state valid_reply_caps_def)+
done
crunch reply_masters[wp]: set_cdt "valid_reply_masters"
lemma cap_insert_reply_masters [wp]:
"\<lbrace>valid_reply_masters and cte_at dest and K (\<not> is_master_reply_cap cap) \<rbrace>
cap_insert cap src dest \<lbrace>\<lambda>_. valid_reply_masters\<rbrace>"
apply (simp add: cap_insert_def update_cdt_def)
apply (wp hoare_drop_imp set_untyped_cap_as_full_valid_reply_masters
set_untyped_cap_as_full_cte_wp_at get_cap_wp
| simp add: is_cap_simps split del: if_split)+
apply (clarsimp simp:cte_wp_at_caps_of_state)
done
lemma cap_insert_aobj_at:
"arch_obj_pred P' \<Longrightarrow> \<lbrace>\<lambda>s. P (obj_at P' pd s)\<rbrace> cap_insert cap src dest \<lbrace>\<lambda>r s. P (obj_at P' pd s)\<rbrace>"
unfolding cap_insert_def update_cdt_def set_cdt_def set_untyped_cap_as_full_def
by (wpsimp wp: set_cap.aobj_at get_cap_wp)
lemma cap_insert_valid_arch [wp]:
"\<lbrace>valid_arch_state\<rbrace> cap_insert cap src dest \<lbrace>\<lambda>_. valid_arch_state\<rbrace>"
by (rule valid_arch_state_lift_aobj_at; wp cap_insert_aobj_at)
crunch caps [wp]: update_cdt "\<lambda>s. P (caps_of_state s)"
crunch irq_node [wp]: update_cdt "\<lambda>s. P (interrupt_irq_node s)"
lemma update_cdt_global [wp]:
"\<lbrace>valid_global_refs\<rbrace> update_cdt m \<lbrace>\<lambda>_. valid_global_refs\<rbrace>"
by (rule valid_global_refs_cte_lift; wp)
lemma cap_insert_valid_global_refs[wp]:
"\<lbrace>valid_global_refs and (\<lambda>s. cte_wp_at (\<lambda>scap. cap_range cap \<subseteq> cap_range scap) src s)\<rbrace>
cap_insert cap src dest
\<lbrace>\<lambda>_. valid_global_refs\<rbrace>"
apply (simp add: cap_insert_def)
apply (wp get_cap_wp|simp split del: if_split)+
apply (clarsimp simp: cte_wp_at_caps_of_state)
apply (simp add: valid_global_refs_def valid_refs_def2)
apply blast
done
crunches cap_insert
for irq_node[wp]: "\<lambda>s. P (interrupt_irq_node s)"
and vspace_objs [wp]: "valid_vspace_objs"
(wp: crunch_wps)
crunch arch_caps[wp]: update_cdt "valid_arch_caps"
lemma is_derived_obj_refs:
"is_derived m p cap cap' \<Longrightarrow> obj_refs cap = obj_refs cap'"
apply (clarsimp simp: is_derived_def is_cap_simps cap_master_cap_simps
split: if_split_asm dest!:cap_master_cap_eqDs)
apply (clarsimp simp: cap_master_cap_def)
apply (auto split: cap.split_asm dest: master_arch_cap_obj_refs)
done
locale CSpace_AI_set_untyped_cap_as_full =
fixes state_ext_t :: "'state_ext::state_ext itself"
assumes set_untyped_cap_as_full_valid_arch_caps:
"\<And>src_cap src cap.
\<lbrace>valid_arch_caps and cte_wp_at ((=) src_cap) src\<rbrace>
set_untyped_cap_as_full src_cap cap src
\<lbrace>\<lambda>ya. valid_arch_caps :: 'state_ext state \<Rightarrow> bool\<rbrace>"
assumes set_untyped_cap_as_full[wp]:
"\<And>src_cap a b src cap.
\<lbrace>\<lambda>s::'state_ext state. no_cap_to_obj_with_diff_ref a b s \<and> cte_wp_at ((=) src_cap) src s\<rbrace>
set_untyped_cap_as_full src_cap cap src
\<lbrace>\<lambda>rv s. no_cap_to_obj_with_diff_ref a b s\<rbrace>"
lemma set_untyped_cap_as_full_is_final_cap':
"\<lbrace>is_final_cap' cap' and cte_wp_at ((=) src_cap) src\<rbrace>
set_untyped_cap_as_full src_cap cap src
\<lbrace>\<lambda>rv s. is_final_cap' cap' s\<rbrace>"
apply (simp add:set_untyped_cap_as_full_def)
apply (intro conjI impI)
apply (wp set_cap_final_cap_at)
apply (clarsimp simp:cte_wp_at_caps_of_state)
apply wp
apply simp
done
lemma set_untyped_cap_as_full_access[wp]:
"\<lbrace>(\<lambda>s. P (vs_lookup s))\<rbrace>
set_untyped_cap_as_full src_cap cap src
\<lbrace>\<lambda>r s. P (vs_lookup s)\<rbrace>"
by (clarsimp simp:set_untyped_cap_as_full_def, wp)+
lemma set_untyped_cap_as_full_vs_lookup_pages[wp]:
"\<lbrace>(\<lambda>s. P (vs_lookup_pages s))\<rbrace>
set_untyped_cap_as_full src_cap cap src
\<lbrace>\<lambda>r s. P (vs_lookup_pages s)\<rbrace>"
by (clarsimp simp:set_untyped_cap_as_full_def, wp)+
(* FIXME: remove *)
lemmas set_untyped_cap_as_full_access2[wp]
= set_untyped_cap_as_full_vs_lookup_pages
lemma set_untyped_cap_as_full_obj_at_impossible:
"\<lbrace>\<lambda>s. P (obj_at P' p s) \<and> (\<forall>ko. P' ko \<longrightarrow> caps_of ko = {})\<rbrace>
set_untyped_cap_as_full src_cap cap src
\<lbrace>\<lambda>rv s. P (obj_at P' p s)\<rbrace>"
apply (clarsimp simp:set_untyped_cap_as_full_def)
apply (intro conjI impI)
apply (wp set_cap_obj_at_impossible)+
apply clarsimp
done
lemma caps_of_state_cteD':
"(caps_of_state m p = Some x \<and> P x) = cte_wp_at ((=) x and P) p m"
by (clarsimp simp:cte_wp_at_caps_of_state)
lemma derived_cap_master_cap_eq: "is_derived m n b c \<Longrightarrow> cap_master_cap b = cap_master_cap c"
by (clarsimp simp:is_derived_def split:if_splits)
locale CSpace_AI_cap_insert =
fixes state_ext_t :: "'state_ext::state_ext itself"
assumes cap_insert_valid_arch_caps:
"\<And>src cap dest.
\<lbrace>valid_arch_caps and (\<lambda>s::'state_ext state. cte_wp_at (is_derived (cdt s) src cap) src s)\<rbrace>
cap_insert cap src dest
\<lbrace>\<lambda>rv. valid_arch_caps\<rbrace>"
assumes cap_insert_cap_refs_in_kernel_window[wp]:
"\<And>cap src dest.
\<lbrace>cap_refs_in_kernel_window
and cte_wp_at (\<lambda>c. cap_range cap \<subseteq> cap_range c) src\<rbrace>
cap_insert cap src dest
\<lbrace>\<lambda>rv. cap_refs_in_kernel_window :: 'state_ext state \<Rightarrow> bool\<rbrace>"
assumes cap_insert_derived_ioports:
"\<And>src cap dest.
\<lbrace>valid_ioports and (\<lambda>s::'state_ext state. cte_wp_at (is_derived (cdt s) src cap) src s)\<rbrace>
cap_insert cap src dest
\<lbrace>\<lambda>rv. valid_ioports\<rbrace>"
lemma cap_is_device_free_index_update_simp[simp]:
"is_untyped_cap c \<Longrightarrow> cap_is_device (max_free_index_update c) = cap_is_device c"
by (case_tac c,simp_all add:is_cap_simps)
locale cap_insert_crunches begin
crunch arch_obj_at[wp]: cap_insert "ko_at (ArchObj ao) p"
(ignore: set_object set_cap wp: set_cap_obj_at_impossible crunch_wps
simp: caps_of_def cap_of_def)
crunch empty_table_at[wp]: cap_insert "obj_at (empty_table S) p"
(ignore: set_object set_cap wp: set_cap_obj_at_impossible crunch_wps
simp: empty_table_caps_of)
crunches cap_insert
for valid_global_objs[wp]: "valid_global_objs"
and global_vspace_mappings[wp]: "valid_global_vspace_mappings"
and v_ker_map[wp]: "valid_kernel_mappings"
and asid_map[wp]: valid_asid_map
and only_idle[wp]: only_idle
and equal_ker_map[wp]: "equal_kernel_mappings"
and pspace_in_kernel_window[wp]: "pspace_in_kernel_window"
(wp: get_cap_wp simp: crunch_simps)
crunch cap_refs_in_kernel_window[wp]: update_cdt "cap_refs_in_kernel_window"
end
crunch pspace_respects_device_region[wp]: cap_insert "pspace_respects_device_region"
(wp: crunch_wps)
crunch cap_refs_respects_device_region[wp]: update_cdt "cap_refs_respects_device_region"
lemma cap_insert_cap_refs_respects_device_region[wp]:
"\<lbrace>cap_refs_respects_device_region
and cte_wp_at (\<lambda>c. cap_range cap \<subseteq> cap_range c \<and> ((cap_range cap \<noteq> {}) \<longrightarrow> cap_is_device cap = cap_is_device c)) src\<rbrace>
cap_insert cap src dest
\<lbrace>\<lambda>rv. cap_refs_respects_device_region\<rbrace>"
apply (simp add: cap_insert_def set_untyped_cap_as_full_def)
apply (wp get_cap_wp set_cap_cte_wp_at' set_cap_cap_refs_respects_device_region_spec[where ptr = src]
| simp split del: if_split)+
apply (clarsimp simp: cte_wp_at_caps_of_state is_derived_def)
done
lemma is_derived_cap_range:
"is_derived m srcptr cap cap'
\<Longrightarrow> cap_range cap' = cap_range cap"
by (clarsimp simp: is_derived_def cap_range_def is_cap_simps dest!: master_cap_cap_range
split: if_split_asm)
lemma is_derived_cap_is_device:
"\<lbrakk>is_derived m srcptr cap cap'\<rbrakk>
\<Longrightarrow> cap_is_device cap' = cap_is_device cap"
apply (case_tac cap)
apply (clarsimp simp: is_derived_def
cap_range_def is_cap_simps cap_master_cap_def
split: if_split_asm cap.splits )+
apply (drule arch_derived_is_device[rotated])
apply simp+
done
lemma set_cdt_valid_ioc[wp]:
"\<lbrace>valid_ioc\<rbrace> set_cdt t \<lbrace>\<lambda>_. valid_ioc\<rbrace>"
by (simp add: set_cdt_def, wp) (simp add: valid_ioc_def)
crunch valid_ioc[wp]: update_cdt valid_ioc
(* FIXME: we could weaken this. *)
lemma set_original_valid_ioc[wp]:
"\<lbrace>valid_ioc and cte_wp_at (\<lambda>x. val \<longrightarrow> x \<noteq> cap.NullCap) slot\<rbrace>
set_original slot val
\<lbrace>\<lambda>_. valid_ioc\<rbrace>"
by (simp add: set_original_def, wp) (clarsimp simp: valid_ioc_def)
lemma valid_ioc_NullCap_not_original:
"\<lbrakk>valid_ioc s; cte_wp_at ((=) cap.NullCap) slot s\<rbrakk>
\<Longrightarrow> \<not> is_original_cap s slot"
by (cases slot) (fastforce simp add: cte_wp_at_caps_of_state valid_ioc_def)
lemma cap_insert_valid_ioc[wp]:
"\<lbrace>valid_ioc\<rbrace> cap_insert cap src dest \<lbrace>\<lambda>_. valid_ioc\<rbrace>"
apply (simp add: cap_insert_def set_untyped_cap_as_full_def)
apply (wp set_object_valid_ioc_caps set_cap_cte_wp_at get_cap_wp
| clarsimp simp:is_cap_simps is_cap_revocable_def split del: if_split)+
apply (auto simp: valid_ioc_NullCap_not_original elim: cte_wp_cte_at)
done
lemma set_cdt_vms[wp]:
"\<lbrace>valid_machine_state\<rbrace> set_cdt t \<lbrace>\<lambda>_. valid_machine_state\<rbrace>"
by (simp add: set_cdt_def, wp) (simp add: valid_machine_state_def)
crunch vms[wp]: update_cdt valid_machine_state
lemma cap_insert_vms[wp]:
"\<lbrace>valid_machine_state\<rbrace> cap_insert cap src dest \<lbrace>\<lambda>_. valid_machine_state\<rbrace>"
apply (simp add: cap_insert_def set_object_def set_untyped_cap_as_full_def)
apply (wp get_object_wp get_cap_wp| simp only: vms_ioc_update | rule hoare_drop_imp | simp split del: if_split)+
done
lemma valid_irq_states_cdt_update[simp]:
"valid_irq_states (s\<lparr>cdt := x\<rparr>) = valid_irq_states s"
by(auto simp: valid_irq_states_def)
lemma valid_irq_states_is_original_cap_update[simp]:
"valid_irq_states (s\<lparr>is_original_cap := x\<rparr>) = valid_irq_states s"
by(auto simp: valid_irq_states_def)
crunch valid_irq_states[wp]: cap_insert "valid_irq_states"
(wp: crunch_wps simp: crunch_simps)
lemma valid_irq_states_exst_update[simp]:
"valid_irq_states (s\<lparr>exst := x\<rparr>) = valid_irq_states s"
by(auto simp: valid_irq_states_def)
context CSpace_AI_cap_insert begin
interpretation cap_insert_crunches .
lemma cap_insert_invs[wp]:
"\<And>dest cap src.
\<lbrace>invs and cte_wp_at (\<lambda>c. c=Structures_A.NullCap) dest
and valid_cap cap and tcb_cap_valid cap dest
and ex_cte_cap_wp_to (appropriate_cte_cap cap) dest
and (\<lambda>s. \<forall>r\<in>obj_refs cap. \<forall>p'. dest \<noteq> p' \<and> cte_wp_at (\<lambda>cap'. r \<in> obj_refs cap') p' s
\<longrightarrow> (cte_wp_at (Not \<circ> is_zombie) p' s \<and> \<not> is_zombie cap))
and (\<lambda>s. cte_wp_at (is_derived (cdt s) src cap) src s)
and (\<lambda>s. cte_wp_at (\<lambda>cap'. \<forall>irq \<in> cap_irqs cap - cap_irqs cap'. irq_issued irq s) src s)
and (\<lambda>s. \<forall>t R. cap = cap.ReplyCap t False R \<longrightarrow>
st_tcb_at awaiting_reply t s \<and> \<not> has_reply_cap t s)
and K (\<not> is_master_reply_cap cap)\<rbrace>
cap_insert cap src dest
\<lbrace>\<lambda>rv. invs :: 'state_ext state \<Rightarrow> bool\<rbrace>"
apply (simp add: invs_def valid_state_def)
apply (rule hoare_pre)
apply (wp cap_insert_valid_pspace cap_insert_ifunsafe cap_insert_idle
valid_irq_node_typ cap_insert_valid_arch_caps cap_insert_derived_ioports)
apply (auto simp: cte_wp_at_caps_of_state is_derived_cap_is_device
is_derived_cap_range valid_pspace_def)
done
end
lemma prop_is_preserved_imp:
"\<lbrace>P and Q\<rbrace> f \<lbrace>\<lambda>rv. P\<rbrace> \<Longrightarrow> \<lbrace>P and Q\<rbrace> f \<lbrace>\<lambda>rv. P\<rbrace>"
by simp
lemma derive_cap_inv[wp]:
"\<lbrace>P\<rbrace> derive_cap slot c \<lbrace>\<lambda>rv. P\<rbrace>"
apply (case_tac c, simp_all add: derive_cap_def ensure_no_children_def whenE_def is_zombie_def, wp+)
apply clarsimp
apply (wp arch_derive_cap_inv | simp)+
done
lemma cte_at_0:
"cap_table_at bits oref s
\<Longrightarrow> cte_at (oref, replicate bits False) s"
by (clarsimp simp: obj_at_def is_cap_table
cte_at_cases well_formed_cnode_n_def length_set_helper)
lemma tcb_at_cte_at_0:
"tcb_at tcb s \<Longrightarrow> cte_at (tcb, tcb_cnode_index 0) s"
by (auto simp: obj_at_def cte_at_cases is_tcb)
lemma tcb_at_cte_at_1:
"tcb_at tcb s \<Longrightarrow> cte_at (tcb, tcb_cnode_index 1) s"
by (auto simp: obj_at_def cte_at_cases is_tcb)
lemma set_cdt_valid_objs:
"\<lbrace>valid_objs\<rbrace> set_cdt m \<lbrace>\<lambda>_. valid_objs\<rbrace>"
apply (simp add: set_cdt_def)
apply wp
apply (fastforce intro: valid_objs_pspaceI)
done
lemma get_cap_cte:
"\<lbrace>\<top>\<rbrace> get_cap y \<lbrace>\<lambda>rv. cte_at y\<rbrace>"
apply (clarsimp simp: valid_def)
apply (frule get_cap_cte_at)
apply (drule state_unchanged [OF get_cap_inv])
apply simp
done
lemma cap_swap_typ_at:
"\<lbrace>\<lambda>s. P (typ_at T p s)\<rbrace> cap_swap c x c' y \<lbrace>\<lambda>_ s. P (typ_at T p s)\<rbrace>"
apply (simp add: cap_swap_def)
apply (wp set_cdt_typ_at set_cap_typ_at
|simp split del: if_split)+
done
lemma cap_swap_valid_cap:
"\<lbrace>valid_cap c\<rbrace> cap_swap cap x cap' y \<lbrace>\<lambda>_. valid_cap c\<rbrace>"
by (simp add: cap_swap_typ_at valid_cap_typ)
lemma cap_swap_cte_at:
"\<lbrace>cte_at p\<rbrace> cap_swap c x c' y \<lbrace>\<lambda>_. cte_at p\<rbrace>"
by (simp add: valid_cte_at_typ cap_swap_typ_at [where P="\<lambda>x. x"])
lemma tcb_cap_valid_typ_st:
notes hoare_pre [wp_pre del]
assumes x: "\<And>P t. \<lbrace>\<lambda>s. P (typ_at ATCB t s)\<rbrace> f \<lbrace>\<lambda>rv s. P (typ_at ATCB t s)\<rbrace>"
and y: "\<And>P t. \<lbrace>st_tcb_at P t\<rbrace> f \<lbrace>\<lambda>rv. st_tcb_at P t\<rbrace>"
and z: "\<And>P t. \<lbrace>\<lambda>s. \<forall>tcb. ko_at (TCB tcb) t s \<longrightarrow> P (tcb_ipc_buffer tcb)\<rbrace>
f \<lbrace>\<lambda>rv s. \<forall>tcb. ko_at (TCB tcb) t s \<longrightarrow> P (tcb_ipc_buffer tcb)\<rbrace>"
shows "\<lbrace>\<lambda>s. tcb_cap_valid cap p s\<rbrace> f \<lbrace>\<lambda>rv s. tcb_cap_valid cap p s\<rbrace>"
apply (simp add: tcb_cap_valid_def)
apply (simp only: imp_conv_disj tcb_at_typ)
apply (wp hoare_vcg_disj_lift x y)
apply (simp add: z)
done
lemma set_cap_tcb_ipc_buffer:
"\<lbrace>\<lambda>s. \<forall>tcb. ko_at (TCB tcb) t s \<longrightarrow> P (tcb_ipc_buffer tcb)\<rbrace>
set_cap cap p
\<lbrace>\<lambda>rv s. \<forall>tcb. ko_at (TCB tcb) t s \<longrightarrow> P (tcb_ipc_buffer tcb)\<rbrace>"
apply (simp add: set_cap_def split_def set_object_def)
apply (wp get_object_wp | wpc)+
apply (clarsimp simp: obj_at_def)
done
lemmas set_cap_tcb_cap[wp]
= tcb_cap_valid_typ_st [OF set_cap_typ_at set_cap_pred_tcb set_cap_tcb_ipc_buffer]
lemma cap_swap_valid_objs:
"\<lbrace>valid_objs and valid_cap c and valid_cap c'
and tcb_cap_valid c' x
and tcb_cap_valid c y\<rbrace>
cap_swap c x c' y
\<lbrace>\<lambda>_. valid_objs\<rbrace>"
apply (simp add: cap_swap_def)
apply (wp set_cdt_valid_objs set_cap_valid_objs set_cap_valid_cap
|simp split del: if_split)+
done
locale CSpace_AI
= CSpace_AI_getActiveIRQ_wp state_ext_t
+ CSpace_AI_weak_derived state_ext_t
+ CSpace_AI_set_free_index_invs state_ext_t
+ CSpace_AI_set_untyped_cap_as_full state_ext_t
+ CSpace_AI_cap_insert state_ext_t
for state_ext_t :: "'state_ext::state_ext itself" +
assumes mask_cap_valid[simp]:
"\<And>(s::'state_ext state) c R. s \<turnstile> c \<Longrightarrow> s \<turnstile> mask_cap R c"
assumes mask_cap_objrefs[simp]:
"\<And>rs cap. obj_refs (mask_cap rs cap) = obj_refs cap"
assumes mask_cap_zobjrefs[simp]:
"\<And>rs cap. zobj_refs (mask_cap rs cap) = zobj_refs cap"
assumes derive_cap_valid_cap:
"\<And>cap slot.
\<lbrace>valid_cap cap :: 'state_ext state \<Rightarrow> bool\<rbrace> derive_cap slot cap \<lbrace>valid_cap\<rbrace>,-"
assumes valid_cap_update_rights[simp]:
"\<And>cap (s::'state_ext state) cr.
valid_cap cap s \<Longrightarrow> valid_cap (cap_rights_update cr cap) s"
assumes update_cap_data_validI:
"\<And>(s::'state_ext state) cap p d.
s \<turnstile> cap \<Longrightarrow> s \<turnstile> update_cap_data p d cap"
assumes tcb_cnode_index_def2:
"\<And>n. tcb_cnode_index n = nat_to_cref 3 n"
assumes ex_nonz_tcb_cte_caps:
"\<And>t (s::'state_ext state) ref cp.
\<lbrakk>ex_nonz_cap_to t s; tcb_at t s; valid_objs s; ref \<in> dom tcb_cap_cases\<rbrakk>
\<Longrightarrow> ex_cte_cap_wp_to (appropriate_cte_cap cp) (t, ref) s"
assumes setup_reply_master_arch_caps[wp]:
"\<And>t.
\<lbrace>valid_arch_caps and tcb_at t and valid_objs and pspace_aligned\<rbrace>
setup_reply_master t
\<lbrace>\<lambda>rv. valid_arch_caps :: 'state_ext state \<Rightarrow> bool\<rbrace>"
assumes setup_reply_master_cap_refs_in_kernel_window[wp]:
"\<And>t.
\<lbrace>cap_refs_in_kernel_window and tcb_at t and pspace_in_kernel_window\<rbrace>
setup_reply_master t
\<lbrace>\<lambda>rv. cap_refs_in_kernel_window :: 'state_ext state \<Rightarrow> bool\<rbrace>"
(* FIXME: prove same_region_as_def2 instead or change def *)
assumes same_region_as_Untyped2:
"\<And>pcap cap.
\<lbrakk> is_untyped_cap pcap; same_region_as pcap cap \<rbrakk>
\<Longrightarrow> (is_physical cap \<and> cap_range cap \<noteq> {} \<and> cap_range cap \<subseteq> cap_range pcap)"
assumes same_region_as_cap_class:
"\<And>a b. same_region_as a b \<Longrightarrow> cap_class a = cap_class b"
assumes setup_reply_master_ioports[wp]:
"\<And>t.
\<lbrace>valid_ioports\<rbrace>
setup_reply_master t
\<lbrace>\<lambda>rv. valid_ioports :: 'state_ext state \<Rightarrow> bool\<rbrace>"
lemma lookup_cap_valid:
"\<lbrace>valid_objs\<rbrace> lookup_cap t c \<lbrace>\<lambda>rv. valid_cap rv\<rbrace>,-"
by (simp add: lookup_cap_def split_def) wp
lemma mask_cap_is_zombie[simp]:
"is_zombie (mask_cap rs cap) = is_zombie cap"
by (cases cap, simp_all add: mask_cap_def cap_rights_update_def is_zombie_def split:bool.split)
lemma get_cap_exists[wp]:
"\<lbrace>\<top>\<rbrace> get_cap sl \<lbrace>\<lambda>rv s. \<forall>r\<in>zobj_refs rv. ex_nonz_cap_to r s\<rbrace>"
apply (wp get_cap_wp)
apply (cases sl)
apply (fastforce simp: ex_nonz_cap_to_def elim!: cte_wp_at_weakenE)
done
lemma lookup_cap_ex_cap[wp]:
"\<lbrace>\<top>\<rbrace> lookup_cap t ref \<lbrace>\<lambda>rv s. \<forall>r\<in>zobj_refs rv. ex_nonz_cap_to r s\<rbrace>,-"
apply (simp add: lookup_cap_def split_def)
apply wp
done
lemma guarded_lookup_valid_cap:
"\<lbrace>valid_objs\<rbrace> null_cap_on_failure (lookup_cap t c) \<lbrace>\<lambda>rv. valid_cap rv \<rbrace>"
apply (simp add: null_cap_on_failure_def)
apply wp
apply (rule hoare_strengthen_post)
apply (rule lookup_cap_valid [unfolded validE_R_def validE_def])
apply (simp split: sum.splits)
apply assumption
done
crunch inv[wp]: lookup_slot_for_cnode_op "P"
(wp: simp: crunch_simps)
lemma lsfco_cte_at[wp]:
"\<lbrace>invs and valid_cap cap\<rbrace>
lookup_slot_for_cnode_op bl cap ref depth
\<lbrace>\<lambda>rv. cte_at rv\<rbrace>,-"
apply (simp add: lookup_slot_for_cnode_op_def split_def unlessE_def whenE_def
split del: if_split cong: if_cong)
apply (wp | wpc | simp)+
apply (wp hoare_drop_imps resolve_address_bits_cte_at)+
apply auto
done
lemma lookup_slot_for_cnode_op_cap_to[wp]:
"\<lbrace>\<lambda>s. \<forall>r\<in>cte_refs croot (interrupt_irq_node s). ex_cte_cap_to r s\<rbrace>
lookup_slot_for_cnode_op is_src croot ptr depth
\<lbrace>\<lambda>rv. ex_cte_cap_to rv\<rbrace>,-"
proof -
have x: "\<And>x f g. (case x of [] \<Rightarrow> f | _ \<Rightarrow> g) = (if x = [] then f else g)"
by (simp split: list.splits)
show ?thesis
apply (simp add: lookup_slot_for_cnode_op_def split_def x
split del: if_split cong: if_cong)
apply (rule hoare_pre)
apply (wp | simp)+
apply (rule hoare_drop_imps)
apply (unfold unlessE_def whenE_def)
apply (wp rab_cte_cap_to)+
apply clarsimp
done
qed
lemma ct_from_words_inv [wp]:
"\<lbrace>P\<rbrace> captransfer_from_words ws \<lbrace>\<lambda>rv. P\<rbrace>"
by (simp add: captransfer_from_words_def | wp dmo_inv loadWord_inv)+
(* FIXME: move *)
crunch inv[wp]: stateAssert P
lemma not_Null_valid_imp [simp]:
"(cap \<noteq> cap.NullCap \<longrightarrow> s \<turnstile> cap) = (s \<turnstile> cap)"
by (auto simp: valid_cap_def)
lemma enc_inv [wp]:
"\<lbrace>P\<rbrace> ensure_no_children slot \<lbrace>\<lambda>rv. P\<rbrace>"
unfolding ensure_no_children_def whenE_def
apply wp
apply simp
done
lemma badge_update_valid [iff]:
"valid_cap (badge_update d cap) = valid_cap cap"
by (rule ext, cases cap)
(auto simp: badge_update_def valid_cap_def cap_aligned_def)
(* FIXME: remove *)
lemmas ensure_no_children_inv = enc_inv[of P ptr for P and ptr]
lemma ensure_empty_inv[wp]:
"\<lbrace>P\<rbrace> ensure_empty p \<lbrace>\<lambda>rv. P\<rbrace>"
by (simp add: ensure_empty_def whenE_def | wp)+
lemma get_cap_cte_wp_at3:
"\<lbrace>not cte_wp_at (not P) p\<rbrace> get_cap p \<lbrace>\<lambda>rv s. P rv\<rbrace>"
apply (rule hoare_post_imp [where Q="\<lambda>rv. cte_wp_at (\<lambda>c. c = rv) p and not cte_wp_at (not P) p"])
apply (clarsimp simp: cte_wp_at_def pred_neg_def)
apply (wp get_cap_cte_wp_at)
done
lemma ensure_empty_stronger:
"\<lbrace>\<lambda>s. cte_wp_at (\<lambda>c. c = cap.NullCap) p s \<longrightarrow> P s\<rbrace> ensure_empty p \<lbrace>\<lambda>rv. P\<rbrace>,-"
apply (simp add: ensure_empty_def whenE_def)
apply wp
apply simp
apply (simp only: imp_conv_disj)
apply (rule hoare_vcg_disj_lift)
apply (wp get_cap_cte_wp_at3)+
apply (simp add: pred_neg_def)
done
lemma set_cdt_ifunsafe[wp]:
"\<lbrace>if_unsafe_then_cap\<rbrace> set_cdt m \<lbrace>\<lambda>rv. if_unsafe_then_cap\<rbrace>"
apply (simp add: set_cdt_def)
apply wp
apply (clarsimp elim!: ifunsafe_pspaceI)
done
lemma set_cdt_ex_cap[wp]:
"\<lbrace>ex_cte_cap_to p\<rbrace> set_cdt m \<lbrace>\<lambda>rv. ex_cte_cap_to p\<rbrace>"
by (wp ex_cte_cap_to_pres set_cdt_cte_wp_at)
lemma ex_cte_wp_revokable[simp]:
"ex_cte_cap_wp_to P p (is_original_cap_update f s)
= ex_cte_cap_wp_to P p s"
by (simp add: ex_cte_cap_wp_to_def)
(* FIXME: move to StateRelation? *)
definition
"cns_of_heap h \<equiv> \<lambda>p.
case h p of Some (CNode sz cs) \<Rightarrow> if well_formed_cnode_n sz cs
then Some sz else None
| _ \<Rightarrow> None"
crunches setup_reply_master
for irq_node[wp]: "\<lambda>s. P (interrupt_irq_node s)"
and irq_states[wp]: "\<lambda>s. P (interrupt_states s)"
(wp: crunch_wps simp: crunch_simps)
lemma cns_of_heap_typ_at:
"cns_of_heap (kheap s) p = Some n \<longleftrightarrow> typ_at (ACapTable n) p s"
by (auto simp: typ_at_eq_kheap_obj(4) cns_of_heap_def
wf_unique wf_cs_n_unique
split: option.splits Structures_A.kernel_object.splits)
lemma ups_of_heap_TCB_upd[simp]:
"h x = Some (TCB tcb) \<Longrightarrow> ups_of_heap (h(x \<mapsto> TCB y)) = ups_of_heap h"
by (erule ups_of_heap_non_arch_upd) auto
lemma ups_of_heap_CNode_upd[simp]:
"h x = Some (CNode sz cs) \<Longrightarrow> ups_of_heap (h(x \<mapsto> CNode sz y)) = ups_of_heap h"
by (erule ups_of_heap_non_arch_upd) auto
lemma set_cap_ups_of_heap[wp]:
"\<lbrace>\<lambda>s. P (ups_of_heap (kheap s))\<rbrace> set_cap cap sl
\<lbrace>\<lambda>_ s. P (ups_of_heap (kheap s))\<rbrace>"
apply (simp add: set_cap_def split_def set_object_def)
apply (rule hoare_seq_ext [OF _ get_object_sp])
apply (case_tac obj)
by (auto simp: valid_def in_monad obj_at_def get_object_def)
lemma cns_of_heap_TCB_upd[simp]:
"h x = Some (TCB tcb) \<Longrightarrow> cns_of_heap (h(x \<mapsto> TCB y)) = cns_of_heap h"
by (rule ext) (simp add: cns_of_heap_def)
lemma cns_of_heap_CNode_upd[simp]:
"\<lbrakk>h a = Some (CNode sz cs); cs bl = Some cap; well_formed_cnode_n sz cs\<rbrakk>
\<Longrightarrow> cns_of_heap (h(a \<mapsto> CNode sz (cs(bl \<mapsto> cap')))) = cns_of_heap h"
apply (rule ext)
apply (auto simp add: cns_of_heap_def wf_unique)
apply (clarsimp simp add: well_formed_cnode_n_def dom_def Collect_eq)
apply (frule_tac x=bl in spec)
apply (erule_tac x=aa in allE)
apply (clarsimp split: if_split_asm)
done
lemma set_cap_cns_of_heap[wp]:
"\<lbrace>\<lambda>s. P (cns_of_heap (kheap s))\<rbrace> set_cap cap sl
\<lbrace>\<lambda>_ s. P (cns_of_heap (kheap s))\<rbrace>"
apply (simp add: set_cap_def split_def set_object_def)
apply (rule hoare_seq_ext [OF _ get_object_sp])
apply (case_tac obj)
apply (auto simp: valid_def in_monad obj_at_def get_object_def)
done
lemma no_reply_caps_for_thread:
"\<lbrakk> invs s; tcb_at t s; cte_wp_at (\<lambda>c. c = cap.NullCap) (t, tcb_cnode_index 2) s \<rbrakk>
\<Longrightarrow> \<forall>sl m R. \<not> cte_wp_at (\<lambda>c. c = cap.ReplyCap t m R) sl s"
apply clarsimp
apply (case_tac m, simp_all)
apply (fastforce simp: invs_def valid_state_def valid_reply_masters_def
cte_wp_at_caps_of_state is_master_reply_cap_to_def)
apply (subgoal_tac "st_tcb_at halted t s")
apply (fastforce simp: invs_def valid_state_def valid_reply_caps_def
has_reply_cap_def cte_wp_at_caps_of_state st_tcb_def2
is_reply_cap_to_def)
apply (thin_tac "cte_wp_at _ (a, b) s")
apply (fastforce simp: pred_tcb_at_def obj_at_def is_tcb valid_obj_def
valid_tcb_def cte_wp_at_cases tcb_cap_cases_def
dest: invs_valid_objs)
done
crunches setup_reply_master
for tcb[wp]: "tcb_at t"
and idle[wp]: "valid_idle"
(wp: set_cap_tcb simp: crunch_simps)
lemma setup_reply_master_pspace[wp]:
"\<lbrace>valid_pspace and tcb_at t\<rbrace> setup_reply_master t \<lbrace>\<lambda>rv. valid_pspace\<rbrace>"
apply (simp add: setup_reply_master_def)
apply (wp get_cap_wp set_cap_valid_pspace)
apply clarsimp
apply (rule conjI, clarsimp elim!: cte_wp_at_weakenE)
apply (rule conjI, simp add: valid_cap_def cap_aligned_def word_bits_def)
apply (clarsimp simp: tcb_at_def valid_pspace_def pspace_aligned_def)
apply (fastforce dest: get_tcb_SomeD elim: my_BallE [where y=t])
apply (clarsimp simp: tcb_cap_valid_def is_cap_simps tcb_at_st_tcb_at)
done
lemma setup_reply_master_mdb[wp]:
"\<lbrace>valid_mdb\<rbrace> setup_reply_master t \<lbrace>\<lambda>rv. valid_mdb\<rbrace>"
apply (simp add: setup_reply_master_def valid_mdb_def2 reply_mdb_def)
apply (wp set_cap_caps_of_state2 get_cap_wp)
apply (clarsimp simp add: cte_wp_at_caps_of_state simp del: fun_upd_apply)
apply (rule conjI)
apply (clarsimp simp: mdb_cte_at_def simp del: split_paired_All)
apply (rule conjI, fastforce simp: untyped_mdb_def)
apply (rule conjI, rule descendants_inc_upd_nullcap)
apply simp+
apply (rule conjI, fastforce simp: untyped_inc_def)
apply (rule conjI, fastforce simp: ut_revocable_def)
apply (rule conjI, fastforce simp: irq_revocable_def)
apply (rule conjI, fastforce simp: reply_master_revocable_def)
apply (rule conjI)
apply (fastforce simp: reply_caps_mdb_def
simp del: split_paired_All split_paired_Ex
elim!: allEI exEI)
apply (unfold reply_masters_mdb_def)[1]
apply (rule conjI, fastforce split: if_split_asm
dest: mdb_cte_at_Null_None mdb_cte_at_Null_descendants
elim!: allEI)
apply (erule valid_arch_mdb_updates)
done
lemma appropriate_cte_cap_def2:
"appropriate_cte_cap cap cte_cap =
(cap = NullCap
\<or> (\<exists> ref badge rights. cap = NotificationCap ref badge rights)
\<or> (\<forall> irq. cte_cap \<noteq> IRQHandlerCap irq))"
by (clarsimp simp: appropriate_cte_cap_def cap_irqs_def cap_irq_opt_def split: cap.split)
context CSpace_AI begin
lemma setup_reply_master_ifunsafe[wp]:
"\<And>t.
\<lbrace>if_unsafe_then_cap and tcb_at t and ex_nonz_cap_to t and valid_objs\<rbrace>
setup_reply_master t
\<lbrace>\<lambda>rv. if_unsafe_then_cap :: 'state_ext state \<Rightarrow> bool\<rbrace>"
apply (simp add: setup_reply_master_def)
apply (wp new_cap_ifunsafe get_cap_wp)
apply (fastforce elim: ex_nonz_tcb_cte_caps)
done
end
lemma setup_reply_master_reply[wp]:
"\<lbrace>valid_reply_caps and tcb_at t\<rbrace> setup_reply_master t \<lbrace>\<lambda>rv. valid_reply_caps\<rbrace>"
apply (simp add: setup_reply_master_def)
apply (wp hoare_drop_imps | simp add: if_fun_split)+
apply (fastforce elim: tcb_at_cte_at)
done
lemma setup_reply_master_reply_masters[wp]:
"\<lbrace>valid_reply_masters and tcb_at t\<rbrace>
setup_reply_master t \<lbrace>\<lambda>rv. valid_reply_masters\<rbrace>"
apply (simp add: setup_reply_master_def)
apply (wp hoare_drop_imps | simp add: if_fun_split)+
apply (fastforce elim: tcb_at_cte_at)
done
lemma setup_reply_master_globals[wp]:
"\<lbrace>valid_global_refs and ex_nonz_cap_to t\<rbrace> setup_reply_master t \<lbrace>\<lambda>rv. valid_global_refs\<rbrace>"
apply (simp add: setup_reply_master_def)
apply (wp hoare_drop_imps | simp add: if_fun_split)+
apply (clarsimp simp: ex_nonz_cap_to_def cte_wp_at_caps_of_state
cap_range_def
dest: valid_global_refsD2)
done
crunches setup_reply_master
for arch[wp]: "valid_arch_state"
and vspace_objs[wp]: "valid_vspace_objs"
(simp: crunch_simps)
lemma setup_reply_master_irq_handlers[wp]:
"\<lbrace>valid_irq_handlers and tcb_at t\<rbrace> setup_reply_master t \<lbrace>\<lambda>rv. valid_irq_handlers\<rbrace>"
apply (simp add: setup_reply_master_def)
apply (wp set_cap_irq_handlers hoare_drop_imps | simp add: if_fun_split)+
apply (fastforce elim: tcb_at_cte_at)
done
crunches setup_reply_master
for typ_at[wp]: "\<lambda>s. P (typ_at T p s)"
and cur[wp]: "cur_tcb"
and arch_state[wp]: "\<lambda>s. P (arch_state s)"
and valid_global_objs[wp]: "valid_global_objs"
and global_vspace_mappings[wp]: "valid_global_vspace_mappings"
and v_ker_map[wp]: "valid_kernel_mappings"
and eq_ker_map[wp]: "equal_kernel_mappings"
and asid_map[wp]: valid_asid_map
and only_idle[wp]: only_idle
and pspace_in_kernel_window[wp]: "pspace_in_kernel_window"
and pspace_respects_device_region[wp]: "pspace_respects_device_region"
(simp: crunch_simps)
crunch arch_ko_at: setup_reply_master "ko_at (ArchObj ao) p"
(ignore: set_cap wp: set_cap_obj_at_impossible crunch_wps
simp: if_apply_def2 caps_of_def cap_of_def)
crunch empty_table_at[wp]: setup_reply_master "obj_at (empty_table S) p"
(ignore: set_cap wp: set_cap_obj_at_impossible crunch_wps
simp: if_apply_def2 empty_table_caps_of)
lemmas setup_reply_master_valid_vso_at[wp]
= valid_vso_at_lift [OF setup_reply_master_typ_at setup_reply_master_arch_ko_at]
lemma setup_reply_master_cap_refs_respects_device_region[wp]:
"\<lbrace>cap_refs_respects_device_region and tcb_at t and pspace_in_kernel_window\<rbrace>
setup_reply_master t
\<lbrace>\<lambda>rv. cap_refs_respects_device_region\<rbrace>"
apply (simp add: setup_reply_master_def)
apply (wp get_cap_wp set_cap_cap_refs_respects_device_region)
apply (clarsimp simp: obj_at_def
cap_range_def)
apply (auto simp: cte_wp_at_caps_of_state)
done
lemma set_original_set_cap_comm:
"(set_original slot val >>= (\<lambda>_. set_cap cap slot)) =
(set_cap cap slot >>= (\<lambda>_. set_original slot val))"
apply (rule ext)
apply (clarsimp simp: bind_def split_def set_cap_def set_original_def
get_object_def set_object_def get_def put_def
simpler_gets_def simpler_modify_def
assert_def return_def fail_def)
apply (case_tac y;
simp add: return_def fail_def)
done
lemma setup_reply_master_valid_ioc[wp]:
"\<lbrace>valid_ioc\<rbrace> setup_reply_master t \<lbrace>\<lambda>_. valid_ioc\<rbrace>"
apply (simp add: setup_reply_master_def set_original_set_cap_comm)
apply (wp get_cap_wp set_cap_cte_wp_at)
apply (simp add: valid_ioc_def cte_wp_cte_at)
done
lemma setup_reply_master_vms[wp]:
"\<lbrace>valid_machine_state\<rbrace> setup_reply_master t \<lbrace>\<lambda>_. valid_machine_state\<rbrace>"
apply (simp add: setup_reply_master_def)
apply (wp get_cap_wp)
apply (simp add: valid_machine_state_def)
done
crunch valid_irq_states[wp]: setup_reply_master "valid_irq_states"
(wp: crunch_wps simp: crunch_simps)
context CSpace_AI begin
lemma setup_reply_master_invs[wp]:
"\<And>t.
\<lbrace>invs and tcb_at t and ex_nonz_cap_to t\<rbrace>
setup_reply_master t
\<lbrace>\<lambda>rv. invs :: 'state_ext state \<Rightarrow> bool\<rbrace>"
apply (simp add: invs_def valid_state_def)
apply (wp valid_irq_node_typ
| simp add: valid_pspace_def)+
done
end
context CSpace_AI begin
lemma safe_parent_cap_range:
"\<And> m p cap pcap. safe_parent_for m p cap pcap \<Longrightarrow> cap_range cap \<subseteq> cap_range pcap"
apply (clarsimp simp: safe_parent_for_def)
apply (erule disjE)
apply (clarsimp simp: cap_range_def)
apply (erule disjE)
apply clarsimp
apply (drule (1) same_region_as_Untyped2)
apply blast
apply (drule safe_parent_cap_range_arch, clarsimp simp: subset_iff)
done
end
lemma safe_parent_not_Null [simp]:
"safe_parent_for m p cap cap.NullCap = False"
by (simp add: safe_parent_for_def)
lemma safe_parent_is_parent:
"\<lbrakk> safe_parent_for m p cap pcap; caps_of_state s p = Some pcap; valid_mdb s \<rbrakk>
\<Longrightarrow> should_be_parent_of pcap (is_original_cap s p) cap f"
apply (clarsimp simp: should_be_parent_of_def safe_parent_for_def valid_mdb_def)
apply (erule disjE)
apply clarsimp
apply (erule (1) irq_revocableD)
apply (erule disjE)
apply clarsimp
apply (drule (2) ut_revocableD)
apply (clarsimp simp: is_cap_simps)
apply (drule (2) safe_parent_arch_is_parent[where f=f])
apply (clarsimp simp: is_cap_simps should_be_parent_of_def)
done
context CSpace_AI begin
lemma safe_parent_ut_descendants:
"\<And>m p cap pcap.
\<lbrakk> safe_parent_for m p cap pcap; is_untyped_cap pcap \<rbrakk>
\<Longrightarrow> descendants_of p m = {} \<and> obj_refs cap \<subseteq> untyped_range pcap"
apply (rule conjI)
apply (clarsimp simp: safe_parent_for_def)
apply (safe; clarsimp simp: is_cap_simps safe_parent_for_arch_not_arch')
apply (drule safe_parent_cap_range)
apply (clarsimp simp: is_cap_simps cap_range_def)
apply (drule (1) subsetD)
apply simp
done
lemma safe_parent_refs_or_descendants:
fixes m p cap pcap
shows
"safe_parent_for m p cap pcap \<Longrightarrow>
(obj_refs cap \<subseteq> obj_refs pcap) \<or> (descendants_of p m = {} \<and> obj_refs cap \<subseteq> untyped_range pcap) \<or> safe_parent_for_arch cap pcap"
apply (cases "is_untyped_cap pcap")
apply (drule (1) safe_parent_ut_descendants)
apply simp
apply (rule disjI1)
apply (drule safe_parent_cap_range)
apply (simp add: cap_range_def)
apply (drule not_is_untyped_no_range)
apply simp
done
end
lemma (in mdb_insert_abs) untyped_mdb_simple:
assumes u: "untyped_mdb m cs"
assumes inc: "untyped_inc m cs"
assumes src: "cs src = Some c"
assumes dst: "cs dest = Some cap.NullCap"
assumes ut: "\<not>is_untyped_cap cap"
assumes cr: "(obj_refs cap \<subseteq> obj_refs c) \<or>
(descendants_of src m = {} \<and> obj_refs cap \<subseteq> untyped_range c) \<or> safe_parent_for_arch cap c"
shows "untyped_mdb (m(dest \<mapsto> src)) (cs(dest \<mapsto> cap))"
unfolding untyped_mdb_def
using u ut cr src dst
apply (intro allI impI)
apply (simp add: descendants_child)
apply (case_tac "ptr = dest", simp)
apply simp
apply (case_tac "ptr' = dest")
apply simp
apply (rule impI)
apply (elim conjE)
apply (simp add: descendants_of_def del: split_paired_All)
apply (erule disjE)
apply (drule_tac ptr=ptr and ptr'=src in untyped_mdbD, assumption+)
apply blast
apply assumption
apply (simp add: descendants_of_def)
apply (erule disjE)
apply (elim conjE)
apply (case_tac "untyped_range c = {}", simp)
apply (frule_tac p=src and p'=ptr in untyped_incD [rotated -1, OF inc])
apply fastforce
apply assumption+
apply (simp add: descendants_of_def del: split_paired_All)
apply (elim conjE)
apply (erule disjE, fastforce)
apply (erule disjE, fastforce)
apply blast
apply (clarsimp dest!: int_not_emptyD safe_parent_for_arch_no_obj_refs)
apply (simp add: untyped_mdbD del: split_paired_All)
apply (intro impI)
apply (frule_tac ptr=src and ptr'=ptr' in untyped_mdbD)
apply clarsimp
apply assumption
apply clarsimp
apply assumption
apply simp
done
lemma (in mdb_insert_abs) reply_caps_mdb_simple:
assumes u: "reply_caps_mdb m cs"
assumes src: "cs src = Some c"
assumes sr: "\<not>is_reply_cap c \<and> \<not>is_master_reply_cap c"
assumes nr: "\<not>is_reply_cap cap \<and> \<not>is_master_reply_cap cap"
shows "reply_caps_mdb (m(dest \<mapsto> src)) (cs(dest \<mapsto> cap))"
unfolding reply_caps_mdb_def
using u src sr nr
apply (intro allI impI)
apply (simp add: descendants_child del: split_paired_Ex)
apply (case_tac "ptr = dest", simp add: is_cap_simps)
apply (simp del: split_paired_Ex)
apply (unfold reply_caps_mdb_def)
apply (elim allE)
apply (erule(1) impE)
apply (erule exEI)
apply simp
apply blast
done
lemma (in mdb_insert_abs) reply_masters_mdb_simple:
assumes u: "reply_masters_mdb m cs"
assumes src: "cs src = Some c"
assumes sr: "\<not>is_reply_cap c \<and> \<not>is_master_reply_cap c"
assumes nr: "\<not>is_reply_cap cap \<and> \<not>is_master_reply_cap cap"
shows "reply_masters_mdb (m(dest \<mapsto> src)) (cs(dest \<mapsto> cap))"
unfolding reply_masters_mdb_def
using u src sr nr
apply (intro allI impI)
apply (simp add: descendants_child del: split_paired_Ex)
apply (case_tac "ptr = dest", simp add: is_cap_simps)
apply (simp del: split_paired_Ex)
apply (unfold reply_masters_mdb_def)
apply (elim allE)
apply (erule(1) impE)
apply (elim conjE, simp add: neq is_cap_simps)
apply (intro conjI impI)
apply fastforce
apply (rule ccontr, simp)
apply (rule ballI, rule ccontr, simp add: descendants_of_def)
done
lemma safe_parent_same_region:
"safe_parent_for m p cap pcap \<Longrightarrow> same_region_as pcap cap"
by (simp add: safe_parent_for_def)
lemma (in mdb_insert_abs) reply_mdb_simple:
assumes u: "reply_mdb m cs"
assumes src: "cs src = Some c"
assumes sr: "\<not>is_reply_cap c \<and> \<not>is_master_reply_cap c"
assumes nr: "\<not>is_reply_cap cap \<and> \<not>is_master_reply_cap cap"
shows "reply_mdb (m(dest \<mapsto> src)) (cs(dest \<mapsto> cap))"
using u src sr nr unfolding reply_mdb_def
by (simp add: reply_caps_mdb_simple reply_masters_mdb_simple)
context CSpace_AI begin
lemma cap_insert_simple_mdb:
fixes dest src cap
shows
"\<lbrace>valid_mdb and valid_objs and
cte_wp_at (\<lambda>c. c = cap.NullCap) dest and
(\<lambda>s. cte_wp_at (safe_parent_for (cdt s) src cap) src s) and
K (is_simple_cap cap)\<rbrace>
cap_insert cap src dest \<lbrace>\<lambda>rv. valid_mdb :: 'state_ext state \<Rightarrow> bool\<rbrace>"
apply (simp add: cap_insert_def valid_mdb_def2 update_cdt_def set_cdt_def set_untyped_cap_as_full_def)
apply (wp set_cap_caps_of_state2 get_cap_wp|simp del: fun_upd_apply split del: if_split)+
apply (clarsimp simp: cte_wp_at_caps_of_state safe_parent_is_parent valid_mdb_def2
simp del: fun_upd_apply
split del: if_split)
apply (rule conjI)
apply (cases src, cases dest)
apply (clarsimp simp: mdb_cte_at_def is_simple_cap_def split del: if_split)
apply (subgoal_tac "mdb_insert_abs (cdt s) src dest")
prefer 2
apply (rule mdb_insert_abs.intro)
apply clarsimp
apply (erule (1) mdb_cte_at_Null_None)
apply (erule (1) mdb_cte_at_Null_descendants)
apply (intro conjI impI)
apply (clarsimp simp:mdb_cte_at_def is_simple_cap_def split del:if_split)
apply (fastforce split:if_split_asm)
apply (erule (4) mdb_insert_abs.untyped_mdb_simple)
apply (simp add: is_simple_cap_def)
apply (erule safe_parent_refs_or_descendants)
apply (erule(1) mdb_insert_abs.descendants_inc)
apply simp
apply (simp add:safe_parent_cap_range)
apply (clarsimp simp:safe_parent_for_def same_region_as_cap_class)
apply (frule mdb_insert_abs.neq)
apply (simp add: no_mloop_def mdb_insert_abs.parency)
apply (intro allI impI)
apply (rule notI)
apply (simp add: mdb_insert_abs.dest_no_parent_trancl)
apply (erule(2) mdb_insert_abs.untyped_inc_simple)
apply (drule(1) caps_of_state_valid)+
apply (simp add:valid_cap_aligned)
apply (simp add:is_simple_cap_def)+
apply (clarsimp simp: ut_revocable_def is_simple_cap_def)
apply (clarsimp simp: irq_revocable_def is_simple_cap_def)
apply (clarsimp simp: reply_master_revocable_def is_simple_cap_def)
apply (erule(2) mdb_insert_abs.reply_mdb_simple)
apply (fastforce simp: is_simple_cap_def safe_parent_for_def is_cap_simps)
apply (clarsimp simp: is_simple_cap_def)
apply (erule (2) valid_arch_mdb_simple)
done
end
lemma set_untyped_cap_as_full_caps_of_state_diff:
"\<lbrace>\<lambda>s. src \<noteq> dest \<and> P (caps_of_state s dest)\<rbrace>
set_untyped_cap_as_full src_cap cap src
\<lbrace>\<lambda>rv s. P (caps_of_state s dest)\<rbrace>"
apply (clarsimp simp:set_untyped_cap_as_full_def)
apply (intro conjI impI allI)
apply (wp|clarsimp)+
done
lemma safe_parent_for_masked_as_full[simp]:
"safe_parent_for m src a (masked_as_full src_cap b) =
safe_parent_for m src a src_cap"
apply (clarsimp simp:safe_parent_for_def)
apply (rule iffI)
apply (auto simp: masked_as_full_def free_index_update_def safe_parent_for_arch_not_arch' is_cap_simps
split: if_splits cap.splits)+
done
lemma lookup_cnode_slot_real_cte [wp]:
"\<lbrace>valid_objs and valid_cap croot\<rbrace> lookup_slot_for_cnode_op s croot ptr depth \<lbrace>\<lambda>rv. real_cte_at rv\<rbrace>, -"
apply (simp add: lookup_slot_for_cnode_op_def split_def unlessE_whenE cong: if_cong split del: if_split)
apply (rule hoare_pre)
apply (wp hoare_drop_imps resolve_address_bits_real_cte_at whenE_throwError_wp
|wpc|simp)+
done
lemma cte_refs_rights_update [simp]:
"cte_refs (cap_rights_update R cap) x = cte_refs cap x"
by (force simp: cap_rights_update_def split: cap.splits bool.split)
lemmas set_cap_typ_ats [wp] = abs_typ_at_lifts [OF set_cap_typ_at]
lemma lookup_slot_for_cnode_op_cap_to2[wp]:
"\<lbrace>\<lambda>s. (is_cnode_cap croot \<longrightarrow>
(\<forall>r\<in>cte_refs croot (interrupt_irq_node s). ex_cte_cap_wp_to P r s))
\<and> (\<forall>cp. is_cnode_cap cp \<longrightarrow> P cp)\<rbrace>
lookup_slot_for_cnode_op is_src croot ptr depth
\<lbrace>\<lambda>rv. ex_cte_cap_wp_to P rv\<rbrace>,-"
proof -
have x: "\<And>x f g. (case x of [] \<Rightarrow> f | _ \<Rightarrow> g) = (if x = [] then f else g)"
by (simp split: list.splits)
show ?thesis
apply (simp add: lookup_slot_for_cnode_op_def split_def x
split del: if_split cong: if_cong)
apply (wp | simp)+
apply (rule hoare_drop_imps)
apply (unfold unlessE_def whenE_def)
apply (wp rab_cte_cap_to)+
apply clarsimp
done
qed
end
|
[STATEMENT]
lemma EM_remainder_0 [simp]: "EM_remainder n (\<lambda>x. 0) a = 0"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. EM_remainder n (\<lambda>x. 0::'a) a = (0::'a)
[PROOF STEP]
by (rule EM_remainder_eqI) (simp add: EM_remainder'_def) |
close all;
clear all;
clc;
data_file_path = 'bin/mmv_phase_transition_noiseless_s_2.mat';
options.export = true;
options.export_dir = 'bin';
options.export_name = 'mmv_noiseless_s_2';
options.chosen_ks = [2, 4, 8, 16, 32, 64];
options.subtitle = 'Noiseless, S = 2';
spx.pursuit.PhaseTransitionAnalysis.print_results(data_file_path, ...
'CoSaMP', options);
|
\documentclass[abstract=true]{scrartcl}
%set letterpaper, 10pt for american
\usepackage{amsmath, amssymb, amsthm} %necessary math packages
\usepackage{verbatim} %if you need not to interpret latex
\usepackage{graphicx} %insert figures
\usepackage{booktabs} %nice tables
\usepackage[colorlinks]{hyperref} %links
\usepackage{xcolor} %definecolors
\usepackage{enumerate} %enumerate
\usepackage{natbib} %bib organization
\usepackage[affil-it]{authblk} %better maketitle
\usepackage{mdframed} %frame example
\usepackage{microtype} %small improvement
\usepackage{caption} %caption
\usepackage{style/matheusfarias} %my style
%\usepackage[body={4.8in,7.5in},
% top=1.2in, left=1.8in]{geometry} %american page layout
\begin{document}
\title{Template}
\date{Fall, 2021}
\author{Matheus S. Farias%
\thanks{E-mail address: \href{mailto:[email protected]}{[email protected]}}}
\affil{School of Engineering and Applied Sciences, Harvard University}
\maketitle
\begin{abstract}
Here is the abstract with a citation \cite{shannon48}.
\end{abstract}
\tableofcontents
\section{First Section}
See Example \ref{ex:ref}.
\begin{example}
First example.
\label{ex:ref}
\end{example}
This is an \emph{example} of emphasize and here a \textbf{bold text}.
\subsection{First Subsection}
Table \ref{tab:ref} is the first one.
\begin{table}[h]
\centering
\caption{\label{tab:ref}First table.}
\vspace{5pt}
\begin{tabular}{@{}lll@{}}
\toprule
\textbf{Quadrant} & $\mathbf{5}$ \textbf{m} & $\mathbf{10}$ \textbf{m} \\
\midrule
Quadrant I & $8.63\%$ & $9.11\%$ \\
Quadrant II & $5.63\%$ & $7.77\%$ \\
\bottomrule
\end{tabular}
\end{table}
Figure \ref{fig:ref} is the first one.
\begin{figure}
\centering
\includegraphics[width = \textwidth]{logo.pdf}
\caption{First figure.}
\label{fig:ref}
\end{figure}
\bibliographystyle{apalike}
\bibliography{bib/mybib.bib} %my bib
\end{document} |
#include <iostream>
#include <chrono>
#include <thread>
#include <Eigen/Eigen>
#include <pangolin/pangolin.h>
#include <SceneGraph/SceneGraph.h>
using namespace std;
int main( int /*argc*/, char** /*argv[]*/ )
{
// Create OpenGL window in single line thanks to GLUT
pangolin::CreateWindowAndBind("Main",640,480);
SceneGraph::GLSceneGraph::ApplyPreferredGlSettings();
// Scenegraph to hold GLObjects and relative transformations
SceneGraph::GLSceneGraph glGraph;
// Define grid object
SceneGraph::GLGrid glGrid( 50, 2.0, true );
glGraph.AddChild(&glGrid);
// Define axis object, and set its pose
SceneGraph::GLAxis glAxis;
glAxis.SetPose( 0, 0, 0, 0, 0, 0);
glAxis.SetScale(0.25);
glGraph.AddChild(&glAxis);
// Define 3D spiral using a GLCachedPrimitives object
SceneGraph::GLCachedPrimitives glSpiral(GL_LINE_STRIP, SceneGraph::GLColor(1.0f,0.7f,0.2f) );
for(double t=0; t < 10*M_PI; t+= M_PI/50) {
glSpiral.AddVertex(Eigen::Vector3d(cos(t)+2, sin(t)+2, -0.1*t) );
}
glGraph.AddChild(&glSpiral);
// Define 3D floating text object
SceneGraph::GLText glText3d("3D Floating Text", -1, 1, -1);
glGraph.AddChild(&glText3d);
#ifndef HAVE_GLES
SceneGraph::GLMovableAxis glMovableAxis;
glMovableAxis.SetPosition(-3,3,-1);
glGraph.AddChild(&glMovableAxis);
SceneGraph::GLAxisAlignedBox glBox;
glBox.SetResizable();
glMovableAxis.AddChild(&glBox);
// Define movable waypoint object with velocity
SceneGraph::GLWayPoint glWaypoint;
glWaypoint.SetPose(0.5,0.5,-0.1,0,0,0);
glGraph.AddChild(&glWaypoint);
// Optionally clamp waypoint to specific plane
glWaypoint.ClampToPlane(Eigen::Vector4d(0,0,1,0));
#endif
// Define Camera Render Object (for view / scene browsing)
pangolin::OpenGlRenderState stacks3d(
pangolin::ProjectionMatrix(640,480,420,420,320,240,0.1,1000),
pangolin::ModelViewLookAt(0,-2,-4, 0,1,0, pangolin::AxisNegZ)
);
// We define a new view which will reside within the container.
pangolin::View view3d;
// We set the views location on screen and add a handler which will
// let user input update the model_view matrix (stacks3d) and feed through
// to our scenegraph
view3d.SetBounds(0.0, 1.0, 0.0, 1.0, 640.0f/480.0f)
.SetHandler(new SceneGraph::HandlerSceneGraph(glGraph,stacks3d,pangolin::AxisNegZ))
.SetDrawFunction(SceneGraph::ActivateDrawFunctor(glGraph, stacks3d));
// Add our views as children to the base container.
pangolin::DisplayBase().AddDisplay(view3d);
// Default hooks for exiting (Esc) and fullscreen (tab).
while( !pangolin::ShouldQuit() )
{
// Clear whole screen
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
// Swap frames and Process Events
pangolin::FinishFrame();
// Pause for 1/60th of a second.
std::this_thread::sleep_for(std::chrono::milliseconds(1000/60));
}
return 0;
}
|
------------------------------------------------------------------------------
-- Reasoning partially about functions
------------------------------------------------------------------------------
{-# OPTIONS --allow-unsolved-metas #-}
{-# OPTIONS --exact-split #-}
{-# OPTIONS --no-sized-types #-}
{-# OPTIONS --no-universe-polymorphism #-}
{-# OPTIONS --without-K #-}
-- We cannot reasoning partially about partial functions intended to
-- operate in total values.
module FOT.FOTC.Data.Nat.AddPartialRightIdentity where
open import FOTC.Base
open import FOTC.Data.Nat
------------------------------------------------------------------------------
-- How proceed?
+-partialRightIdentity : ∀ n → n + zero ≡ n
+-partialRightIdentity n = {!!}
|
#
# This file is part of the Actors.jl Julia package,
# MIT license, part of https://github.com/JuliaActors
#
include("delays.jl")
using Actors, Test, .Delays
const fname = "test.x"
d = Dict(
:test => (4, 5, 6),
:test1 => ("a", "b", "c")
)
cp = checkpointing(1, fname)
checkpoint(cp, :test, 1,2,3)
@test restore(cp, :test) == (1,2,3)
checkpoint(cp, :test, 4,5,6)
@test restore(cp, :test) == (4,5,6)
checkpoint(cp, :test1, "a","b","c")
@test get_checkpoints(cp) == d
save_checkpoints(cp)
@test @delayed isfile(fname)
exit!(cp)
@test @delayed info(cp) == :done
cp = checkpointing(1, fname)
load_checkpoints(cp, fname)
@test get_checkpoints(cp) == d
rm(fname)
|
\paragraph{Warnings}
\begin{enumerate}
\item It is recommended you do not modify the \texttt{data} directory. Modifying the files in the data directory risks
making your assignment submissions incorrect.
\item You cannot rename the \texttt{solver.py} file or the \texttt{solve\_it()} method.
\item Be careful when using global variables in your implementation. The \texttt{solve\_it()} method will be run repeatedly and it is your job to clear the global data between runs.
\item \texttt{solver.py} must remain in the same directory as \texttt{submit.py}.
\end{enumerate}
|
function make_fft_plan(x)
xc = zeros(2 * length(x))
p = plan_rfft(xc)
return (xc, p)
end
function _fft_with_plan(x, r, p)
n = length(x)
r[1:n] = x
result = imag(conj!(p * r))
return result[1:(end - 1)]
end
function fft_oz(f, r, rmax, nr, p)
for i in 1:nr
f[i] *= (i - 1)
end
result = _fft_with_plan(f, r, p)
normalization = (4.0 * rmax^3) / nr^2
for i in 2:nr
result[i] *= normalization / (i - 1)
end
return result
end
function ifft_oz(f, r, rmax, nr, p)
for i in 1:nr
f[i] *= (i - 1)
end
result = _fft_with_plan(f, r, p)
normalization = nr * (0.5 / rmax^3)
for i in 2:nr
result[i] *= normalization / (i - 1)
end
return result
end
|
`is_element/SW` := eval(`is_element/prime_simplex_boundary`);
`is_equal/SW` := eval(`is_equal/prime_simplex_boundary`);
`is_leq/SW` := NULL;
`random_element/SW` := eval(`random_element/prime_simplex_boundary`);
`list_elements/SW` := NULL;
`count_elements/SW` := NULL;
`phi/nonempty_subsets/SW` := eval(`phi/nonempty_subsets/prime_simplex_boundary`);
|
[STATEMENT]
lemma pst_getmin_ismin:
"invpst t \<Longrightarrow> t\<noteq>Leaf \<Longrightarrow> is_min2 (pst_getmin t) (set_tree t)"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<lbrakk>invpst t; t \<noteq> \<langle>\<rangle>\<rbrakk> \<Longrightarrow> is_min2 (pst_getmin t) (Tree2.set_tree t)
[PROOF STEP]
by (cases t rule: pst_getmin.cases) auto |
[STATEMENT]
lemma type_induct [case_names Fun]:
assumes
"(\<And>T. (\<And>T1 T2. T = T1 \<rightarrow> T2 \<Longrightarrow> P T1) \<Longrightarrow>
(\<And>T1 T2. T = T1 \<rightarrow> T2 \<Longrightarrow> P T2) \<Longrightarrow> P T)"
shows "P T"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. P T
[PROOF STEP]
proof (induct T)
[PROOF STATE]
proof (state)
goal (2 subgoals):
1. P \<B>
2. \<And>T1 T2. \<lbrakk>P T1; P T2\<rbrakk> \<Longrightarrow> P (T1 \<rightarrow> T2)
[PROOF STEP]
case \<B>
[PROOF STATE]
proof (state)
this:
goal (2 subgoals):
1. P \<B>
2. \<And>T1 T2. \<lbrakk>P T1; P T2\<rbrakk> \<Longrightarrow> P (T1 \<rightarrow> T2)
[PROOF STEP]
show ?case
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. P \<B>
[PROOF STEP]
by (rule assms) simp_all
[PROOF STATE]
proof (state)
this:
P \<B>
goal (1 subgoal):
1. \<And>T1 T2. \<lbrakk>P T1; P T2\<rbrakk> \<Longrightarrow> P (T1 \<rightarrow> T2)
[PROOF STEP]
next
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. \<And>T1 T2. \<lbrakk>P T1; P T2\<rbrakk> \<Longrightarrow> P (T1 \<rightarrow> T2)
[PROOF STEP]
case Fun
[PROOF STATE]
proof (state)
this:
P T1_
P T2_
goal (1 subgoal):
1. \<And>T1 T2. \<lbrakk>P T1; P T2\<rbrakk> \<Longrightarrow> P (T1 \<rightarrow> T2)
[PROOF STEP]
show ?case
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. P (T1_ \<rightarrow> T2_)
[PROOF STEP]
by (rule assms) (insert Fun, simp_all)
[PROOF STATE]
proof (state)
this:
P (T1_ \<rightarrow> T2_)
goal:
No subgoals!
[PROOF STEP]
qed |
(* Title: OPnet_Lifting.thy
License: BSD 2-Clause. See LICENSE.
Author: Timothy Bourke
*)
section "Lifting rules for (open) partial networks"
theory OPnet_Lifting
imports ONode_Lifting OAWN_SOS OPnet
begin
lemma oreachable_par_subnet_induct [consumes, case_names init other local]:
assumes "(\<sigma>, SubnetS s t) \<in> oreachable (opnet onp (p\<^sub>1 \<parallel> p\<^sub>2)) S U"
and init: "\<And>\<sigma> s t. (\<sigma>, SubnetS s t) \<in> init (opnet onp (p\<^sub>1 \<parallel> p\<^sub>2)) \<Longrightarrow> P \<sigma> s t"
and other: "\<And>\<sigma> s t \<sigma>'. \<lbrakk> (\<sigma>, SubnetS s t) \<in> oreachable (opnet onp (p\<^sub>1 \<parallel> p\<^sub>2)) S U;
U \<sigma> \<sigma>'; P \<sigma> s t \<rbrakk> \<Longrightarrow> P \<sigma>' s t"
and local: "\<And>\<sigma> s t \<sigma>' s' t' a. \<lbrakk> (\<sigma>, SubnetS s t) \<in> oreachable (opnet onp (p\<^sub>1 \<parallel> p\<^sub>2)) S U;
((\<sigma>, SubnetS s t), a, (\<sigma>', SubnetS s' t')) \<in> trans (opnet onp (p\<^sub>1 \<parallel> p\<^sub>2));
S \<sigma> \<sigma>' a; P \<sigma> s t \<rbrakk> \<Longrightarrow> P \<sigma>' s' t'"
shows "P \<sigma> s t"
using assms(1) proof (induction "(\<sigma>, SubnetS s t)" arbitrary: s t \<sigma>)
fix s t \<sigma>
assume "(\<sigma>, SubnetS s t) \<in> init (opnet onp (p\<^sub>1 \<parallel> p\<^sub>2))"
with init show "P \<sigma> s t" .
next
fix st a s' t' \<sigma>'
assume "st \<in> oreachable (opnet onp (p\<^sub>1 \<parallel> p\<^sub>2)) S U"
and tr: "(st, a, (\<sigma>', SubnetS s' t')) \<in> trans (opnet onp (p\<^sub>1 \<parallel> p\<^sub>2))"
and "S (fst st) (fst (\<sigma>', SubnetS s' t')) a"
and IH: "\<And>s t \<sigma>. st = (\<sigma>, SubnetS s t) \<Longrightarrow> P \<sigma> s t"
from this(1) obtain s t \<sigma> where "st = (\<sigma>, SubnetS s t)"
and "(\<sigma>, SubnetS s t) \<in> oreachable (opnet onp (p\<^sub>1 \<parallel> p\<^sub>2)) S U"
by (metis net_par_oreachable_is_subnet prod.collapse)
note this(2)
moreover from tr and \<open>st = (\<sigma>, SubnetS s t)\<close>
have "((\<sigma>, SubnetS s t), a, (\<sigma>', SubnetS s' t')) \<in> trans (opnet onp (p\<^sub>1 \<parallel> p\<^sub>2))" by simp
moreover from \<open>S (fst st) (fst (\<sigma>', SubnetS s' t')) a\<close> and \<open>st = (\<sigma>, SubnetS s t)\<close>
have "S \<sigma> \<sigma>' a" by simp
moreover from IH and \<open>st = (\<sigma>, SubnetS s t)\<close> have "P \<sigma> s t" .
ultimately show "P \<sigma>' s' t'" by (rule local)
next
fix st \<sigma>' s t
assume "st \<in> oreachable (opnet onp (p\<^sub>1 \<parallel> p\<^sub>2)) S U"
and "U (fst st) \<sigma>'"
and "snd st = SubnetS s t"
and IH: "\<And>s t \<sigma>. st = (\<sigma>, SubnetS s t) \<Longrightarrow> P \<sigma> s t"
from this(1,3) obtain \<sigma> where "st = (\<sigma>, SubnetS s t)"
and "(\<sigma>, SubnetS s t) \<in> oreachable (opnet onp (p\<^sub>1 \<parallel> p\<^sub>2)) S U"
by (metis prod.collapse)
note this(2)
moreover from \<open>U (fst st) \<sigma>'\<close> and \<open>st = (\<sigma>, SubnetS s t)\<close> have "U \<sigma> \<sigma>'" by simp
moreover from IH and \<open>st = (\<sigma>, SubnetS s t)\<close> have "P \<sigma> s t" .
ultimately show "P \<sigma>' s t" by (rule other)
qed
lemma other_net_tree_ips_par_left:
assumes "other U (net_tree_ips (p\<^sub>1 \<parallel> p\<^sub>2)) \<sigma> \<sigma>'"
and "\<And>\<xi>. U \<xi> \<xi>"
shows "other U (net_tree_ips p\<^sub>1) \<sigma> \<sigma>'"
proof -
from assms(1) obtain ineq: "\<forall>i\<in>net_tree_ips (p\<^sub>1 \<parallel> p\<^sub>2). \<sigma>' i = \<sigma> i"
and outU: "\<forall>j. j\<notin>net_tree_ips (p\<^sub>1 \<parallel> p\<^sub>2) \<longrightarrow> U (\<sigma> j) (\<sigma>' j)" ..
show ?thesis
proof (rule otherI)
fix i
assume "i\<in>net_tree_ips p\<^sub>1"
hence "i\<in>net_tree_ips (p\<^sub>1 \<parallel> p\<^sub>2)" by simp
with ineq show "\<sigma>' i = \<sigma> i" ..
next
fix j
assume "j\<notin>net_tree_ips p\<^sub>1"
show "U (\<sigma> j) (\<sigma>' j)"
proof (cases "j\<in>net_tree_ips p\<^sub>2")
assume "j\<in>net_tree_ips p\<^sub>2"
hence "j\<in>net_tree_ips (p\<^sub>1 \<parallel> p\<^sub>2)" by simp
with ineq have "\<sigma>' j = \<sigma> j" ..
thus "U (\<sigma> j) (\<sigma>' j)"
by simp (rule \<open>\<And>\<xi>. U \<xi> \<xi>\<close>)
next
assume "j\<notin>net_tree_ips p\<^sub>2"
with \<open>j\<notin>net_tree_ips p\<^sub>1\<close> have "j\<notin>net_tree_ips (p\<^sub>1 \<parallel> p\<^sub>2)" by simp
with outU show "U (\<sigma> j) (\<sigma>' j)" by simp
qed
qed
qed
lemma other_net_tree_ips_par_right:
assumes "other U (net_tree_ips (p\<^sub>1 \<parallel> p\<^sub>2)) \<sigma> \<sigma>'"
and "\<And>\<xi>. U \<xi> \<xi>"
shows "other U (net_tree_ips p\<^sub>2) \<sigma> \<sigma>'"
proof -
from assms(1) have "other U (net_tree_ips (p\<^sub>2 \<parallel> p\<^sub>1)) \<sigma> \<sigma>'"
by (subst net_tree_ips_commute)
thus ?thesis using \<open>\<And>\<xi>. U \<xi> \<xi>\<close>
by (rule other_net_tree_ips_par_left)
qed
lemma ostep_arrive_invariantD [elim]:
assumes "p \<Turnstile>\<^sub>A (\<lambda>\<sigma> _. oarrivemsg I \<sigma>, U \<rightarrow>) P"
and "(\<sigma>, s) \<in> oreachable p (otherwith S IPS (oarrivemsg I)) U"
and "((\<sigma>, s), a, (\<sigma>', s')) \<in> trans p"
and "oarrivemsg I \<sigma> a"
shows "P ((\<sigma>, s), a, (\<sigma>', s'))"
proof -
from assms(2) have "(\<sigma>, s) \<in> oreachable p (\<lambda>\<sigma> _ a. oarrivemsg I \<sigma> a) U"
by (rule oreachable_weakenE) auto
thus "P ((\<sigma>, s), a, (\<sigma>', s'))"
using assms(3-4) by (rule ostep_invariantD [OF assms(1)])
qed
lemma opnet_sync_action_subnet_oreachable:
assumes "(\<sigma>, SubnetS s t) \<in> oreachable (opnet onp (p\<^sub>1 \<parallel> p\<^sub>2))
(\<lambda>\<sigma> _. oarrivemsg I \<sigma>) (other U (net_tree_ips (p\<^sub>1 \<parallel> p\<^sub>2)))"
(is "_ \<in> oreachable _ (?S (p\<^sub>1 \<parallel> p\<^sub>2)) (?U (p\<^sub>1 \<parallel> p\<^sub>2))")
and "\<And>\<xi>. U \<xi> \<xi>"
and act1: "opnet onp p\<^sub>1 \<Turnstile>\<^sub>A (\<lambda>\<sigma> _. oarrivemsg I \<sigma>, other U (net_tree_ips p\<^sub>1) \<rightarrow>)
globala (\<lambda>(\<sigma>, a, \<sigma>'). castmsg (I \<sigma>) a
\<and> (a = \<tau> \<or> (\<exists>i d. a = i:deliver(d)) \<longrightarrow>
((\<forall>i\<in>net_tree_ips p\<^sub>1. U (\<sigma> i) (\<sigma>' i))
\<and> (\<forall>i. i\<notin>net_tree_ips p\<^sub>1 \<longrightarrow> \<sigma>' i = \<sigma> i))))"
and act2: "opnet onp p\<^sub>2 \<Turnstile>\<^sub>A (\<lambda>\<sigma> _. oarrivemsg I \<sigma>, other U (net_tree_ips p\<^sub>2) \<rightarrow>)
globala (\<lambda>(\<sigma>, a, \<sigma>'). castmsg (I \<sigma>) a
\<and> (a = \<tau> \<or> (\<exists>i d. a = i:deliver(d)) \<longrightarrow>
((\<forall>i\<in>net_tree_ips p\<^sub>2. U (\<sigma> i) (\<sigma>' i))
\<and> (\<forall>i. i\<notin>net_tree_ips p\<^sub>2 \<longrightarrow> \<sigma>' i = \<sigma> i))))"
shows "(\<sigma>, s) \<in> oreachable (opnet onp p\<^sub>1) (\<lambda>\<sigma> _. oarrivemsg I \<sigma>) (other U (net_tree_ips p\<^sub>1))
\<and> (\<sigma>, t) \<in> oreachable (opnet onp p\<^sub>2) (\<lambda>\<sigma> _. oarrivemsg I \<sigma>) (other U (net_tree_ips p\<^sub>2))
\<and> net_tree_ips p\<^sub>1 \<inter> net_tree_ips p\<^sub>2 = {}"
using assms(1)
proof (induction rule: oreachable_par_subnet_induct)
case (init \<sigma> s t)
hence sinit: "(\<sigma>, s) \<in> init (opnet onp p\<^sub>1)"
and tinit: "(\<sigma>, t) \<in> init (opnet onp p\<^sub>2)"
and "net_ips s \<inter> net_ips t = {}" by auto
moreover from sinit have "net_ips s = net_tree_ips p\<^sub>1"
by (rule opnet_net_ips_net_tree_ips_init)
moreover from tinit have "net_ips t = net_tree_ips p\<^sub>2"
by (rule opnet_net_ips_net_tree_ips_init)
ultimately show ?case by (auto elim: oreachable_init)
next
case (other \<sigma> s t \<sigma>')
hence "other U (net_tree_ips (p\<^sub>1 \<parallel> p\<^sub>2)) \<sigma> \<sigma>'"
and IHs: "(\<sigma>, s) \<in> oreachable (opnet onp p\<^sub>1) (?S p\<^sub>1) (?U p\<^sub>1)"
and IHt: "(\<sigma>, t) \<in> oreachable (opnet onp p\<^sub>2) (?S p\<^sub>2) (?U p\<^sub>2)"
and "net_tree_ips p\<^sub>1 \<inter> net_tree_ips p\<^sub>2 = {}" by auto
have "(\<sigma>', s) \<in> oreachable (opnet onp p\<^sub>1) (?S p\<^sub>1) (?U p\<^sub>1)"
proof -
from \<open>?U (p\<^sub>1 \<parallel> p\<^sub>2) \<sigma> \<sigma>'\<close> and \<open>\<And>\<xi>. U \<xi> \<xi>\<close> have "?U p\<^sub>1 \<sigma> \<sigma>'"
by (rule other_net_tree_ips_par_left)
with IHs show ?thesis by - (erule(1) oreachable_other')
qed
moreover have "(\<sigma>', t) \<in> oreachable (opnet onp p\<^sub>2) (?S p\<^sub>2) (?U p\<^sub>2)"
proof -
from \<open>?U (p\<^sub>1 \<parallel> p\<^sub>2) \<sigma> \<sigma>'\<close> and \<open>\<And>\<xi>. U \<xi> \<xi>\<close> have "?U p\<^sub>2 \<sigma> \<sigma>'"
by (rule other_net_tree_ips_par_right)
with IHt show ?thesis by - (erule(1) oreachable_other')
qed
ultimately show ?case using \<open>net_tree_ips p\<^sub>1 \<inter> net_tree_ips p\<^sub>2 = {}\<close> by simp
next
case (local \<sigma> s t \<sigma>' s' t' a)
hence stor: "(\<sigma>, SubnetS s t) \<in> oreachable (opnet onp (p\<^sub>1 \<parallel> p\<^sub>2)) (?S (p\<^sub>1 \<parallel> p\<^sub>2)) (?U (p\<^sub>1 \<parallel> p\<^sub>2))"
and tr: "((\<sigma>, SubnetS s t), a, (\<sigma>', SubnetS s' t')) \<in> trans (opnet onp (p\<^sub>1 \<parallel> p\<^sub>2))"
and "oarrivemsg I \<sigma> a"
and sor: "(\<sigma>, s) \<in> oreachable (opnet onp p\<^sub>1) (?S p\<^sub>1) (?U p\<^sub>1)"
and tor: "(\<sigma>, t) \<in> oreachable (opnet onp p\<^sub>2) (?S p\<^sub>2) (?U p\<^sub>2)"
and "net_tree_ips p\<^sub>1 \<inter> net_tree_ips p\<^sub>2 = {}" by auto
from tr have "((\<sigma>, SubnetS s t), a, (\<sigma>', SubnetS s' t'))
\<in> opnet_sos (trans (opnet onp p\<^sub>1)) (trans (opnet onp p\<^sub>2))" by simp
hence "(\<sigma>', s') \<in> oreachable (opnet onp p\<^sub>1) (?S p\<^sub>1) (?U p\<^sub>1)
\<and> (\<sigma>', t') \<in> oreachable (opnet onp p\<^sub>2) (?S p\<^sub>2) (?U p\<^sub>2)"
proof (cases)
fix H K m H' K'
assume "a = (H \<union> H')\<not>(K \<union> K'):arrive(m)"
and str: "((\<sigma>, s), H\<not>K:arrive(m), (\<sigma>', s')) \<in> trans (opnet onp p\<^sub>1)"
and ttr: "((\<sigma>, t), H'\<not>K':arrive(m), (\<sigma>', t')) \<in> trans (opnet onp p\<^sub>2)"
from this(1) and \<open>oarrivemsg I \<sigma> a\<close> have "I \<sigma> m" by simp
with sor str
have "(\<sigma>', s') \<in> oreachable (opnet onp p\<^sub>1) (?S p\<^sub>1) (?U p\<^sub>1)"
by - (erule(1) oreachable_local, auto)
moreover from \<open>I \<sigma> m\<close> tor ttr
have "(\<sigma>', t') \<in> oreachable (opnet onp p\<^sub>2) (?S p\<^sub>2) (?U p\<^sub>2)"
by - (erule(1) oreachable_local, auto)
ultimately show ?thesis ..
next
fix R m H K
assume str: "((\<sigma>, s), R:*cast(m), (\<sigma>', s')) \<in> trans (opnet onp p\<^sub>1)"
and ttr: "((\<sigma>, t), H\<not>K:arrive(m), (\<sigma>', t')) \<in> trans (opnet onp p\<^sub>2)"
from sor str have "I \<sigma> m"
by - (drule(1) ostep_invariantD [OF act1], simp_all)
with sor str
have "(\<sigma>', s') \<in> oreachable (opnet onp p\<^sub>1) (?S p\<^sub>1) (?U p\<^sub>1)"
by - (erule(1) oreachable_local, auto)
moreover from \<open>I \<sigma> m\<close> tor ttr
have "(\<sigma>', t') \<in> oreachable (opnet onp p\<^sub>2) (?S p\<^sub>2) (?U p\<^sub>2)"
by - (erule(1) oreachable_local, auto)
ultimately show ?thesis ..
next
fix R m H K
assume str: "((\<sigma>, s), H\<not>K:arrive(m), (\<sigma>', s')) \<in> trans (opnet onp p\<^sub>1)"
and ttr: "((\<sigma>, t), R:*cast(m), (\<sigma>', t')) \<in> trans (opnet onp p\<^sub>2)"
from tor ttr have "I \<sigma> m"
by - (drule(1) ostep_invariantD [OF act2], simp_all)
with sor str
have "(\<sigma>', s') \<in> oreachable (opnet onp p\<^sub>1) (?S p\<^sub>1) (?U p\<^sub>1)"
by - (erule(1) oreachable_local, auto)
moreover from \<open>I \<sigma> m\<close> tor ttr
have "(\<sigma>', t') \<in> oreachable (opnet onp p\<^sub>2) (?S p\<^sub>2) (?U p\<^sub>2)"
by - (erule(1) oreachable_local, auto)
ultimately show ?thesis ..
next
fix i i'
assume str: "((\<sigma>, s), connect(i, i'), (\<sigma>', s')) \<in> trans (opnet onp p\<^sub>1)"
and ttr: "((\<sigma>, t), connect(i, i'), (\<sigma>', t')) \<in> trans (opnet onp p\<^sub>2)"
with sor str
have "(\<sigma>', s') \<in> oreachable (opnet onp p\<^sub>1) (?S p\<^sub>1) (?U p\<^sub>1)"
by - (erule(1) oreachable_local, auto)
moreover from tor ttr
have "(\<sigma>', t') \<in> oreachable (opnet onp p\<^sub>2) (?S p\<^sub>2) (?U p\<^sub>2)"
by - (erule(1) oreachable_local, auto)
ultimately show ?thesis ..
next
fix i i'
assume str: "((\<sigma>, s), disconnect(i, i'), (\<sigma>', s')) \<in> trans (opnet onp p\<^sub>1)"
and ttr: "((\<sigma>, t), disconnect(i, i'), (\<sigma>', t')) \<in> trans (opnet onp p\<^sub>2)"
with sor str
have "(\<sigma>', s') \<in> oreachable (opnet onp p\<^sub>1) (?S p\<^sub>1) (?U p\<^sub>1)"
by - (erule(1) oreachable_local, auto)
moreover from tor ttr
have "(\<sigma>', t') \<in> oreachable (opnet onp p\<^sub>2) (?S p\<^sub>2) (?U p\<^sub>2)"
by - (erule(1) oreachable_local, auto)
ultimately show ?thesis ..
next
fix i d
assume "t' = t"
and str: "((\<sigma>, s), i:deliver(d), (\<sigma>', s')) \<in> trans (opnet onp p\<^sub>1)"
from sor str have "\<forall>j. j\<notin>net_tree_ips p\<^sub>1 \<longrightarrow> \<sigma>' j = \<sigma> j"
by - (drule(1) ostep_invariantD [OF act1], simp_all)
moreover with \<open>net_tree_ips p\<^sub>1 \<inter> net_tree_ips p\<^sub>2 = {}\<close>
have "\<forall>j. j\<in>net_tree_ips p\<^sub>2 \<longrightarrow> \<sigma>' j = \<sigma> j" by auto
moreover from sor str have "\<forall>j\<in>net_tree_ips p\<^sub>1. U (\<sigma> j) (\<sigma>' j)"
by - (drule(1) ostep_invariantD [OF act1], simp_all)
ultimately have "(\<sigma>', t') \<in> oreachable (opnet onp p\<^sub>2) (?S p\<^sub>2) (?U p\<^sub>2)"
using tor \<open>t' = t\<close> by (clarsimp elim!: oreachable_other')
(metis otherI \<open>\<And>\<xi>. U \<xi> \<xi>\<close>)+
moreover from sor str
have "(\<sigma>', s') \<in> oreachable (opnet onp p\<^sub>1) (?S p\<^sub>1) (?U p\<^sub>1)"
by - (erule(1) oreachable_local, auto)
ultimately show ?thesis by (rule conjI [rotated])
next
fix i d
assume "s' = s"
and ttr: "((\<sigma>, t), i:deliver(d), (\<sigma>', t')) \<in> trans (opnet onp p\<^sub>2)"
from tor ttr have "\<forall>j. j\<notin>net_tree_ips p\<^sub>2 \<longrightarrow> \<sigma>' j = \<sigma> j"
by - (drule(1) ostep_invariantD [OF act2], simp_all)
moreover with \<open>net_tree_ips p\<^sub>1 \<inter> net_tree_ips p\<^sub>2 = {}\<close>
have "\<forall>j. j\<in>net_tree_ips p\<^sub>1 \<longrightarrow> \<sigma>' j = \<sigma> j" by auto
moreover from tor ttr have "\<forall>j\<in>net_tree_ips p\<^sub>2. U (\<sigma> j) (\<sigma>' j)"
by - (drule(1) ostep_invariantD [OF act2], simp_all)
ultimately have "(\<sigma>', s') \<in> oreachable (opnet onp p\<^sub>1) (?S p\<^sub>1) (?U p\<^sub>1)"
using sor \<open>s' = s\<close> by (clarsimp elim!: oreachable_other')
(metis otherI \<open>\<And>\<xi>. U \<xi> \<xi>\<close>)+
moreover from tor ttr
have "(\<sigma>', t') \<in> oreachable (opnet onp p\<^sub>2) (?S p\<^sub>2) (?U p\<^sub>2)"
by - (erule(1) oreachable_local, auto)
ultimately show ?thesis ..
next
assume "t' = t"
and str: "((\<sigma>, s), \<tau>, (\<sigma>', s')) \<in> trans (opnet onp p\<^sub>1)"
from sor str have "\<forall>j. j\<notin>net_tree_ips p\<^sub>1 \<longrightarrow> \<sigma>' j = \<sigma> j"
by - (drule(1) ostep_invariantD [OF act1], simp_all)
moreover with \<open>net_tree_ips p\<^sub>1 \<inter> net_tree_ips p\<^sub>2 = {}\<close>
have "\<forall>j. j\<in>net_tree_ips p\<^sub>2 \<longrightarrow> \<sigma>' j = \<sigma> j" by auto
moreover from sor str have "\<forall>j\<in>net_tree_ips p\<^sub>1. U (\<sigma> j) (\<sigma>' j)"
by - (drule(1) ostep_invariantD [OF act1], simp_all)
ultimately have "(\<sigma>', t') \<in> oreachable (opnet onp p\<^sub>2) (?S p\<^sub>2) (?U p\<^sub>2)"
using tor \<open>t' = t\<close> by (clarsimp elim!: oreachable_other')
(metis otherI \<open>\<And>\<xi>. U \<xi> \<xi>\<close>)+
moreover from sor str
have "(\<sigma>', s') \<in> oreachable (opnet onp p\<^sub>1) (?S p\<^sub>1) (?U p\<^sub>1)"
by - (erule(1) oreachable_local, auto)
ultimately show ?thesis by (rule conjI [rotated])
next
assume "s' = s"
and ttr: "((\<sigma>, t), \<tau>, (\<sigma>', t')) \<in> trans (opnet onp p\<^sub>2)"
from tor ttr have "\<forall>j. j\<notin>net_tree_ips p\<^sub>2 \<longrightarrow> \<sigma>' j = \<sigma> j"
by - (drule(1) ostep_invariantD [OF act2], simp_all)
moreover with \<open>net_tree_ips p\<^sub>1 \<inter> net_tree_ips p\<^sub>2 = {}\<close>
have "\<forall>j. j\<in>net_tree_ips p\<^sub>1 \<longrightarrow> \<sigma>' j = \<sigma> j" by auto
moreover from tor ttr have "\<forall>j\<in>net_tree_ips p\<^sub>2. U (\<sigma> j) (\<sigma>' j)"
by - (drule(1) ostep_invariantD [OF act2], simp_all)
ultimately have "(\<sigma>', s') \<in> oreachable (opnet onp p\<^sub>1) (?S p\<^sub>1) (?U p\<^sub>1)"
using sor \<open>s' = s\<close> by (clarsimp elim!: oreachable_other')
(metis otherI \<open>\<And>\<xi>. U \<xi> \<xi>\<close>)+
moreover from tor ttr
have "(\<sigma>', t') \<in> oreachable (opnet onp p\<^sub>2) (?S p\<^sub>2) (?U p\<^sub>2)"
by - (erule(1) oreachable_local, auto)
ultimately show ?thesis ..
qed
with \<open>net_tree_ips p\<^sub>1 \<inter> net_tree_ips p\<^sub>2 = {}\<close> show ?case by simp
qed
text \<open>
`Splitting' reachability is trivial when there are no assumptions on interleavings, but
this is useless for showing non-trivial properties, since the interleaving steps can do
anything at all. This lemma is too weak.
\<close>
lemma subnet_oreachable_true_true:
assumes "(\<sigma>, SubnetS s\<^sub>1 s\<^sub>2) \<in> oreachable (opnet onp (p\<^sub>1 \<parallel> p\<^sub>2)) (\<lambda>_ _ _. True) (\<lambda>_ _. True)"
shows "(\<sigma>, s\<^sub>1) \<in> oreachable (opnet onp p\<^sub>1) (\<lambda>_ _ _. True) (\<lambda>_ _. True)"
"(\<sigma>, s\<^sub>2) \<in> oreachable (opnet onp p\<^sub>2) (\<lambda>_ _ _. True) (\<lambda>_ _. True)"
(is "_ \<in> ?oreachable p\<^sub>2")
using assms proof -
from assms have "(\<sigma>, s\<^sub>1) \<in> ?oreachable p\<^sub>1 \<and> (\<sigma>, s\<^sub>2) \<in> ?oreachable p\<^sub>2"
proof (induction rule: oreachable_par_subnet_induct)
fix \<sigma> s\<^sub>1 s\<^sub>2
assume "(\<sigma>, SubnetS s\<^sub>1 s\<^sub>2) \<in> init (opnet onp (p\<^sub>1 \<parallel> p\<^sub>2))"
thus "(\<sigma>, s\<^sub>1) \<in> ?oreachable p\<^sub>1 \<and> (\<sigma>, s\<^sub>2) \<in> ?oreachable p\<^sub>2"
by (auto dest: oreachable_init)
next
case (local \<sigma> s\<^sub>1 s\<^sub>2 \<sigma>' s\<^sub>1' s\<^sub>2' a)
hence "(\<sigma>, SubnetS s\<^sub>1 s\<^sub>2) \<in> ?oreachable (p\<^sub>1 \<parallel> p\<^sub>2)"
and sr1: "(\<sigma>, s\<^sub>1) \<in> ?oreachable p\<^sub>1"
and sr2: "(\<sigma>, s\<^sub>2) \<in> ?oreachable p\<^sub>2"
and "((\<sigma>, SubnetS s\<^sub>1 s\<^sub>2), a, (\<sigma>', SubnetS s\<^sub>1' s\<^sub>2')) \<in> trans (opnet onp (p\<^sub>1 \<parallel> p\<^sub>2))" by auto
from this(4)
have "((\<sigma>, SubnetS s\<^sub>1 s\<^sub>2), a, (\<sigma>', SubnetS s\<^sub>1' s\<^sub>2'))
\<in> opnet_sos (trans (opnet onp p\<^sub>1)) (trans (opnet onp p\<^sub>2))" by simp
thus "(\<sigma>', s\<^sub>1') \<in> ?oreachable p\<^sub>1 \<and> (\<sigma>', s\<^sub>2') \<in> ?oreachable p\<^sub>2"
proof cases
fix R m H K
assume "a = R:*cast(m)"
and tr1: "((\<sigma>, s\<^sub>1), R:*cast(m), (\<sigma>', s\<^sub>1')) \<in> trans (opnet onp p\<^sub>1)"
and tr2: "((\<sigma>, s\<^sub>2), H\<not>K:arrive(m), (\<sigma>', s\<^sub>2')) \<in> trans (opnet onp p\<^sub>2)"
from sr1 and tr1 and TrueI have "(\<sigma>', s\<^sub>1') \<in> ?oreachable p\<^sub>1"
by (rule oreachable_local')
moreover from sr2 and tr2 and TrueI have "(\<sigma>', s\<^sub>2') \<in> ?oreachable p\<^sub>2"
by (rule oreachable_local')
ultimately show ?thesis ..
next
assume "a = \<tau>"
and "s\<^sub>2' = s\<^sub>2"
and tr1: "((\<sigma>, s\<^sub>1), \<tau>, (\<sigma>', s\<^sub>1')) \<in> trans (opnet onp p\<^sub>1)"
from sr2 and this(2) have "(\<sigma>', s\<^sub>2') \<in> ?oreachable p\<^sub>2" by auto
moreover have "(\<lambda>_ _. True) \<sigma> \<sigma>'" by (rule TrueI)
ultimately have "(\<sigma>', s\<^sub>2') \<in> ?oreachable p\<^sub>2"
by (rule oreachable_other')
moreover from sr1 and tr1 and TrueI have "(\<sigma>', s\<^sub>1') \<in> ?oreachable p\<^sub>1"
by (rule oreachable_local')
qed (insert sr1 sr2, simp_all, (metis (no_types) oreachable_local'
oreachable_other')+)
qed auto
thus "(\<sigma>, s\<^sub>1) \<in> ?oreachable p\<^sub>1"
"(\<sigma>, s\<^sub>2) \<in> ?oreachable p\<^sub>2" by auto
qed
text \<open>
It may also be tempting to try splitting from the assumption
@{term "(\<sigma>, SubnetS s\<^sub>1 s\<^sub>2) \<in> oreachable (opnet onp (p\<^sub>1 \<parallel> p\<^sub>2)) (\<lambda>_ _ _. True) (\<lambda>_ _. False)"},
where the environment step would be trivially true (since the assumption is false), but the
lemma cannot be shown when only one side acts, since it must guarantee the assumption for
the other side.
\<close>
lemma lift_opnet_sync_action:
assumes "\<And>\<xi>. U \<xi> \<xi>"
and act1: "\<And>i R. \<langle>i : onp i : R\<rangle>\<^sub>o \<Turnstile>\<^sub>A (\<lambda>\<sigma> _. oarrivemsg I \<sigma>, other U {i} \<rightarrow>)
globala (\<lambda>(\<sigma>, a, _). castmsg (I \<sigma>) a)"
and act2: "\<And>i R. \<langle>i : onp i : R\<rangle>\<^sub>o \<Turnstile>\<^sub>A (\<lambda>\<sigma> _. oarrivemsg I \<sigma>, other U {i} \<rightarrow>)
globala (\<lambda>(\<sigma>, a, \<sigma>'). (a \<noteq> \<tau> \<and> (\<forall>d. a \<noteq> i:deliver(d)) \<longrightarrow> S (\<sigma> i) (\<sigma>' i)))"
and act3: "\<And>i R. \<langle>i : onp i : R\<rangle>\<^sub>o \<Turnstile>\<^sub>A (\<lambda>\<sigma> _. oarrivemsg I \<sigma>, other U {i} \<rightarrow>)
globala (\<lambda>(\<sigma>, a, \<sigma>'). (a = \<tau> \<or> (\<exists>d. a = i:deliver(d)) \<longrightarrow> U (\<sigma> i) (\<sigma>' i)))"
shows "opnet onp p \<Turnstile>\<^sub>A (\<lambda>\<sigma> _. oarrivemsg I \<sigma>, other U (net_tree_ips p) \<rightarrow>)
globala (\<lambda>(\<sigma>, a, \<sigma>'). castmsg (I \<sigma>) a
\<and> (a \<noteq> \<tau> \<and> (\<forall>i d. a \<noteq> i:deliver(d)) \<longrightarrow>
(\<forall>i\<in>net_tree_ips p. S (\<sigma> i) (\<sigma>' i)))
\<and> (a = \<tau> \<or> (\<exists>i d. a = i:deliver(d)) \<longrightarrow>
((\<forall>i\<in>net_tree_ips p. U (\<sigma> i) (\<sigma>' i))
\<and> (\<forall>i. i\<notin>net_tree_ips p \<longrightarrow> \<sigma>' i = \<sigma> i))))"
(is "opnet onp p \<Turnstile>\<^sub>A (?I, ?U p \<rightarrow>) ?inv (net_tree_ips p)")
proof (induction p)
fix i R
show "opnet onp \<langle>i; R\<rangle> \<Turnstile>\<^sub>A (?I, ?U \<langle>i; R\<rangle> \<rightarrow>) ?inv (net_tree_ips \<langle>i; R\<rangle>)"
proof (rule ostep_invariantI, simp only: opnet.simps net_tree_ips.simps)
fix \<sigma> s a \<sigma>' s'
assume sor: "(\<sigma>, s) \<in> oreachable (\<langle>i : onp i : R\<rangle>\<^sub>o) (\<lambda>\<sigma> _. oarrivemsg I \<sigma>) (other U {i})"
and str: "((\<sigma>, s), a, (\<sigma>', s')) \<in> trans (\<langle>i : onp i : R\<rangle>\<^sub>o)"
and oam: "oarrivemsg I \<sigma> a"
hence "castmsg (I \<sigma>) a"
by - (drule(2) ostep_invariantD [OF act1], simp)
moreover from sor str oam have "a \<noteq> \<tau> \<and> (\<forall>i d. a \<noteq> i:deliver(d)) \<longrightarrow> S (\<sigma> i) (\<sigma>' i)"
by - (drule(2) ostep_invariantD [OF act2], simp)
moreover have "a = \<tau> \<or> (\<exists>i d. a = i:deliver(d)) \<longrightarrow> U (\<sigma> i) (\<sigma>' i)"
proof -
from sor str oam have "a = \<tau> \<or> (\<exists>d. a = i:deliver(d)) \<longrightarrow> U (\<sigma> i) (\<sigma>' i)"
by - (drule(2) ostep_invariantD [OF act3], simp)
moreover from sor str oam have "\<forall>j. j\<noteq>i \<longrightarrow> (\<forall>d. a \<noteq> j:deliver(d))"
by - (drule(2) ostep_invariantD [OF node_local_deliver], simp)
ultimately show ?thesis
by clarsimp metis
qed
moreover from sor str oam have "\<forall>j. j\<noteq>i \<longrightarrow> (\<forall>d. a \<noteq> j:deliver(d))"
by - (drule(2) ostep_invariantD [OF node_local_deliver], simp)
moreover from sor str oam have "a = \<tau> \<or> (\<exists>i d. a = i:deliver(d)) \<longrightarrow> (\<forall>j. j\<noteq>i \<longrightarrow> \<sigma>' j = \<sigma> j)"
by - (drule(2) ostep_invariantD [OF node_tau_deliver_unchanged], simp)
ultimately show "?inv {i} ((\<sigma>, s), a, (\<sigma>', s'))" by simp
qed
next
fix p\<^sub>1 p\<^sub>2
assume inv1: "opnet onp p\<^sub>1 \<Turnstile>\<^sub>A (?I, ?U p\<^sub>1 \<rightarrow>) ?inv (net_tree_ips p\<^sub>1)"
and inv2: "opnet onp p\<^sub>2 \<Turnstile>\<^sub>A (?I, ?U p\<^sub>2 \<rightarrow>) ?inv (net_tree_ips p\<^sub>2)"
show "opnet onp (p\<^sub>1 \<parallel> p\<^sub>2) \<Turnstile>\<^sub>A (?I, ?U (p\<^sub>1 \<parallel> p\<^sub>2) \<rightarrow>) ?inv (net_tree_ips (p\<^sub>1 \<parallel> p\<^sub>2))"
proof (rule ostep_invariantI)
fix \<sigma> st a \<sigma>' st'
assume "(\<sigma>, st) \<in> oreachable (opnet onp (p\<^sub>1 \<parallel> p\<^sub>2)) ?I (?U (p\<^sub>1 \<parallel> p\<^sub>2))"
and "((\<sigma>, st), a, (\<sigma>', st')) \<in> trans (opnet onp (p\<^sub>1 \<parallel> p\<^sub>2))"
and "oarrivemsg I \<sigma> a"
from this(1) obtain s t
where "st = SubnetS s t"
and *: "(\<sigma>, SubnetS s t) \<in> oreachable (opnet onp (p\<^sub>1 \<parallel> p\<^sub>2)) ?I (?U (p\<^sub>1 \<parallel> p\<^sub>2))"
by - (frule net_par_oreachable_is_subnet, metis)
from this(2) and inv1 and inv2
obtain sor: "(\<sigma>, s) \<in> oreachable (opnet onp p\<^sub>1) ?I (?U p\<^sub>1)"
and tor: "(\<sigma>, t) \<in> oreachable (opnet onp p\<^sub>2) ?I (?U p\<^sub>2)"
and "net_tree_ips p\<^sub>1 \<inter> net_tree_ips p\<^sub>2 = {}"
by - (drule opnet_sync_action_subnet_oreachable [OF _ \<open>\<And>\<xi>. U \<xi> \<xi>\<close>], auto)
from * and \<open>((\<sigma>, st), a, (\<sigma>', st')) \<in> trans (opnet onp (p\<^sub>1 \<parallel> p\<^sub>2))\<close> and \<open>st = SubnetS s t\<close>
obtain s' t' where "st' = SubnetS s' t'"
and "((\<sigma>, SubnetS s t), a, (\<sigma>', SubnetS s' t'))
\<in> opnet_sos (trans (opnet onp p\<^sub>1)) (trans (opnet onp p\<^sub>2))"
by clarsimp (frule opartial_net_preserves_subnets, metis)
from this(2)
have"castmsg (I \<sigma>) a
\<and> (a \<noteq> \<tau> \<and> (\<forall>i d. a \<noteq> i:deliver(d)) \<longrightarrow> (\<forall>i\<in>net_tree_ips (p\<^sub>1 \<parallel> p\<^sub>2). S (\<sigma> i) (\<sigma>' i)))
\<and> (a = \<tau> \<or> (\<exists>i d. a = i:deliver(d)) \<longrightarrow> (\<forall>i\<in>net_tree_ips (p\<^sub>1 \<parallel> p\<^sub>2). U (\<sigma> i) (\<sigma>' i))
\<and> (\<forall>i. i \<notin> net_tree_ips (p\<^sub>1 \<parallel> p\<^sub>2) \<longrightarrow> \<sigma>' i = \<sigma> i))"
proof cases
fix R m H K
assume "a = R:*cast(m)"
and str: "((\<sigma>, s), R:*cast(m), (\<sigma>', s')) \<in> trans (opnet onp p\<^sub>1)"
and ttr: "((\<sigma>, t), H\<not>K:arrive(m), (\<sigma>', t')) \<in> trans (opnet onp p\<^sub>2)"
from sor and str have "I \<sigma> m \<and> (\<forall>i\<in>net_tree_ips p\<^sub>1. S (\<sigma> i) (\<sigma>' i))"
by (auto dest: ostep_invariantD [OF inv1])
moreover with tor and ttr have "\<forall>i\<in>net_tree_ips p\<^sub>2. S (\<sigma> i) (\<sigma>' i)"
by (auto dest: ostep_invariantD [OF inv2])
ultimately show ?thesis
using \<open>a = R:*cast(m)\<close> by auto
next
fix R m H K
assume "a = R:*cast(m)"
and str: "((\<sigma>, s), H\<not>K:arrive(m), (\<sigma>', s')) \<in> trans (opnet onp p\<^sub>1)"
and ttr: "((\<sigma>, t), R:*cast(m), (\<sigma>', t')) \<in> trans (opnet onp p\<^sub>2)"
from tor and ttr have "I \<sigma> m \<and> (\<forall>i\<in>net_tree_ips p\<^sub>2. S (\<sigma> i) (\<sigma>' i))"
by (auto dest: ostep_invariantD [OF inv2])
moreover with sor and str have "\<forall>i\<in>net_tree_ips p\<^sub>1. S (\<sigma> i) (\<sigma>' i)"
by (auto dest: ostep_invariantD [OF inv1])
ultimately show ?thesis
using \<open>a = R:*cast(m)\<close> by auto
next
fix H K m H' K'
assume "a = (H \<union> H')\<not>(K \<union> K'):arrive(m)"
and str: "((\<sigma>, s), H\<not>K:arrive(m), (\<sigma>', s')) \<in> trans (opnet onp p\<^sub>1)"
and ttr: "((\<sigma>, t), H'\<not>K':arrive(m), (\<sigma>', t')) \<in> trans (opnet onp p\<^sub>2)"
from this(1) and \<open>oarrivemsg I \<sigma> a\<close> have "I \<sigma> m" by simp
with sor and str have "\<forall>i\<in>net_tree_ips p\<^sub>1. S (\<sigma> i) (\<sigma>' i)"
by (auto dest: ostep_invariantD [OF inv1])
moreover from tor and ttr and \<open>I \<sigma> m\<close> have "\<forall>i\<in>net_tree_ips p\<^sub>2. S (\<sigma> i) (\<sigma>' i)"
by (auto dest: ostep_invariantD [OF inv2])
ultimately show ?thesis
using \<open>a = (H \<union> H')\<not>(K \<union> K'):arrive(m)\<close> by auto
next
fix i d
assume "a = i:deliver(d)"
and str: "((\<sigma>, s), i:deliver(d), (\<sigma>', s')) \<in> trans (opnet onp p\<^sub>1)"
with sor have "((\<forall>i\<in>net_tree_ips p\<^sub>1. U (\<sigma> i) (\<sigma>' i))
\<and> (\<forall>i. i\<notin>net_tree_ips p\<^sub>1 \<longrightarrow> \<sigma>' i = \<sigma> i))"
by (auto dest!: ostep_invariantD [OF inv1])
with \<open>a = i:deliver(d)\<close> and \<open>\<And>\<xi>. U \<xi> \<xi>\<close> show ?thesis
by auto
next
fix i d
assume "a = i:deliver(d)"
and ttr: "((\<sigma>, t), i:deliver(d), (\<sigma>', t')) \<in> trans (opnet onp p\<^sub>2)"
with tor have "((\<forall>i\<in>net_tree_ips p\<^sub>2. U (\<sigma> i) (\<sigma>' i))
\<and> (\<forall>i. i\<notin>net_tree_ips p\<^sub>2 \<longrightarrow> \<sigma>' i = \<sigma> i))"
by (auto dest!: ostep_invariantD [OF inv2])
with \<open>a = i:deliver(d)\<close> and \<open>\<And>\<xi>. U \<xi> \<xi>\<close> show ?thesis
by auto
next
assume "a = \<tau>"
and str: "((\<sigma>, s), \<tau>, (\<sigma>', s')) \<in> trans (opnet onp p\<^sub>1)"
with sor have "((\<forall>i\<in>net_tree_ips p\<^sub>1. U (\<sigma> i) (\<sigma>' i))
\<and> (\<forall>i. i\<notin>net_tree_ips p\<^sub>1 \<longrightarrow> \<sigma>' i = \<sigma> i))"
by (auto dest!: ostep_invariantD [OF inv1])
with \<open>a = \<tau>\<close> and \<open>\<And>\<xi>. U \<xi> \<xi>\<close> show ?thesis
by auto
next
assume "a = \<tau>"
and ttr: "((\<sigma>, t), \<tau>, (\<sigma>', t')) \<in> trans (opnet onp p\<^sub>2)"
with tor have "((\<forall>i\<in>net_tree_ips p\<^sub>2. U (\<sigma> i) (\<sigma>' i))
\<and> (\<forall>i. i\<notin>net_tree_ips p\<^sub>2 \<longrightarrow> \<sigma>' i = \<sigma> i))"
by (auto dest!: ostep_invariantD [OF inv2])
with \<open>a = \<tau>\<close> and \<open>\<And>\<xi>. U \<xi> \<xi>\<close> show ?thesis
by auto
next
fix i i'
assume "a = connect(i, i')"
and str: "((\<sigma>, s), connect(i, i'), (\<sigma>', s')) \<in> trans (opnet onp p\<^sub>1)"
and ttr: "((\<sigma>, t), connect(i, i'), (\<sigma>', t')) \<in> trans (opnet onp p\<^sub>2)"
from sor and str have "\<forall>i\<in>net_tree_ips p\<^sub>1. S (\<sigma> i) (\<sigma>' i)"
by (auto dest: ostep_invariantD [OF inv1])
moreover from tor and ttr have "\<forall>i\<in>net_tree_ips p\<^sub>2. S (\<sigma> i) (\<sigma>' i)"
by (auto dest: ostep_invariantD [OF inv2])
ultimately show ?thesis
using \<open>a = connect(i, i')\<close> by auto
next
fix i i'
assume "a = disconnect(i, i')"
and str: "((\<sigma>, s), disconnect(i, i'), (\<sigma>', s')) \<in> trans (opnet onp p\<^sub>1)"
and ttr: "((\<sigma>, t), disconnect(i, i'), (\<sigma>', t')) \<in> trans (opnet onp p\<^sub>2)"
from sor and str have "\<forall>i\<in>net_tree_ips p\<^sub>1. S (\<sigma> i) (\<sigma>' i)"
by (auto dest: ostep_invariantD [OF inv1])
moreover from tor and ttr have "\<forall>i\<in>net_tree_ips p\<^sub>2. S (\<sigma> i) (\<sigma>' i)"
by (auto dest: ostep_invariantD [OF inv2])
ultimately show ?thesis
using \<open>a = disconnect(i, i')\<close> by auto
qed
thus "?inv (net_tree_ips (p\<^sub>1 \<parallel> p\<^sub>2)) ((\<sigma>, st), a, (\<sigma>', st'))" by simp
qed
qed
theorem subnet_oreachable:
assumes "(\<sigma>, SubnetS s t) \<in> oreachable (opnet onp (p\<^sub>1 \<parallel> p\<^sub>2))
(otherwith S (net_tree_ips (p\<^sub>1 \<parallel> p\<^sub>2)) (oarrivemsg I))
(other U (net_tree_ips (p\<^sub>1 \<parallel> p\<^sub>2)))"
(is "_ \<in> oreachable _ (?S (p\<^sub>1 \<parallel> p\<^sub>2)) (?U (p\<^sub>1 \<parallel> p\<^sub>2))")
and "\<And>\<xi>. S \<xi> \<xi>"
and "\<And>\<xi>. U \<xi> \<xi>"
and node1: "\<And>i R. \<langle>i : onp i : R\<rangle>\<^sub>o \<Turnstile>\<^sub>A (\<lambda>\<sigma> _. oarrivemsg I \<sigma>, other U {i} \<rightarrow>)
globala (\<lambda>(\<sigma>, a, _). castmsg (I \<sigma>) a)"
and node2: "\<And>i R. \<langle>i : onp i : R\<rangle>\<^sub>o \<Turnstile>\<^sub>A (\<lambda>\<sigma> _. oarrivemsg I \<sigma>, other U {i} \<rightarrow>)
globala (\<lambda>(\<sigma>, a, \<sigma>'). (a \<noteq> \<tau> \<and> (\<forall>d. a \<noteq> i:deliver(d)) \<longrightarrow> S (\<sigma> i) (\<sigma>' i)))"
and node3: "\<And>i R. \<langle>i : onp i : R\<rangle>\<^sub>o \<Turnstile>\<^sub>A (\<lambda>\<sigma> _. oarrivemsg I \<sigma>, other U {i} \<rightarrow>)
globala (\<lambda>(\<sigma>, a, \<sigma>'). (a = \<tau> \<or> (\<exists>d. a = i:deliver(d)) \<longrightarrow> U (\<sigma> i) (\<sigma>' i)))"
shows "(\<sigma>, s) \<in> oreachable (opnet onp p\<^sub>1)
(otherwith S (net_tree_ips p\<^sub>1) (oarrivemsg I))
(other U (net_tree_ips p\<^sub>1))
\<and> (\<sigma>, t) \<in> oreachable (opnet onp p\<^sub>2)
(otherwith S (net_tree_ips p\<^sub>2) (oarrivemsg I))
(other U (net_tree_ips p\<^sub>2))
\<and> net_tree_ips p\<^sub>1 \<inter> net_tree_ips p\<^sub>2 = {}"
using assms(1) proof (induction rule: oreachable_par_subnet_induct)
case (init \<sigma> s t)
hence sinit: "(\<sigma>, s) \<in> init (opnet onp p\<^sub>1)"
and tinit: "(\<sigma>, t) \<in> init (opnet onp p\<^sub>2)"
and "net_ips s \<inter> net_ips t = {}" by auto
moreover from sinit have "net_ips s = net_tree_ips p\<^sub>1"
by (rule opnet_net_ips_net_tree_ips_init)
moreover from tinit have "net_ips t = net_tree_ips p\<^sub>2"
by (rule opnet_net_ips_net_tree_ips_init)
ultimately show ?case by (auto elim: oreachable_init)
next
case (other \<sigma> s t \<sigma>')
hence "other U (net_tree_ips (p\<^sub>1 \<parallel> p\<^sub>2)) \<sigma> \<sigma>'"
and IHs: "(\<sigma>, s) \<in> oreachable (opnet onp p\<^sub>1) (?S p\<^sub>1) (?U p\<^sub>1)"
and IHt: "(\<sigma>, t) \<in> oreachable (opnet onp p\<^sub>2) (?S p\<^sub>2) (?U p\<^sub>2)"
and "net_tree_ips p\<^sub>1 \<inter> net_tree_ips p\<^sub>2 = {}" by auto
have "(\<sigma>', s) \<in> oreachable (opnet onp p\<^sub>1) (?S p\<^sub>1) (?U p\<^sub>1)"
proof -
from \<open>?U (p\<^sub>1 \<parallel> p\<^sub>2) \<sigma> \<sigma>'\<close> and \<open>\<And>\<xi>. U \<xi> \<xi>\<close> have "?U p\<^sub>1 \<sigma> \<sigma>'"
by (rule other_net_tree_ips_par_left)
with IHs show ?thesis by - (erule(1) oreachable_other')
qed
moreover have "(\<sigma>', t) \<in> oreachable (opnet onp p\<^sub>2) (?S p\<^sub>2) (?U p\<^sub>2)"
proof -
from \<open>?U (p\<^sub>1 \<parallel> p\<^sub>2) \<sigma> \<sigma>'\<close> and \<open>\<And>\<xi>. U \<xi> \<xi>\<close> have "?U p\<^sub>2 \<sigma> \<sigma>'"
by (rule other_net_tree_ips_par_right)
with IHt show ?thesis by - (erule(1) oreachable_other')
qed
ultimately show ?case using \<open>net_tree_ips p\<^sub>1 \<inter> net_tree_ips p\<^sub>2 = {}\<close> by simp
next
case (local \<sigma> s t \<sigma>' s' t' a)
hence stor: "(\<sigma>, SubnetS s t) \<in> oreachable (opnet onp (p\<^sub>1 \<parallel> p\<^sub>2)) (?S (p\<^sub>1 \<parallel> p\<^sub>2)) (?U (p\<^sub>1 \<parallel> p\<^sub>2))"
and tr: "((\<sigma>, SubnetS s t), a, (\<sigma>', SubnetS s' t')) \<in> trans (opnet onp (p\<^sub>1 \<parallel> p\<^sub>2))"
and "?S (p\<^sub>1 \<parallel> p\<^sub>2) \<sigma> \<sigma>' a"
and sor: "(\<sigma>, s) \<in> oreachable (opnet onp p\<^sub>1) (?S p\<^sub>1) (?U p\<^sub>1)"
and tor: "(\<sigma>, t) \<in> oreachable (opnet onp p\<^sub>2) (?S p\<^sub>2) (?U p\<^sub>2)"
and "net_tree_ips p\<^sub>1 \<inter> net_tree_ips p\<^sub>2 = {}" by auto
have act: "\<And>p. opnet onp p \<Turnstile>\<^sub>A (\<lambda>\<sigma> _. oarrivemsg I \<sigma>, other U (net_tree_ips p) \<rightarrow>)
globala (\<lambda>(\<sigma>, a, \<sigma>'). castmsg (I \<sigma>) a
\<and> (a \<noteq> \<tau> \<and> (\<forall>i d. a \<noteq> i:deliver(d)) \<longrightarrow>
(\<forall>i\<in>net_tree_ips p. S (\<sigma> i) (\<sigma>' i)))
\<and> (a = \<tau> \<or> (\<exists>i d. a = i:deliver(d)) \<longrightarrow>
((\<forall>i\<in>net_tree_ips p. U (\<sigma> i) (\<sigma>' i))
\<and> (\<forall>i. i\<notin>net_tree_ips p \<longrightarrow> \<sigma>' i = \<sigma> i))))"
by (rule lift_opnet_sync_action [OF assms(3-6)])
from \<open>?S (p\<^sub>1 \<parallel> p\<^sub>2) \<sigma> \<sigma>' a\<close> have "\<forall>j. j \<notin> net_tree_ips (p\<^sub>1 \<parallel> p\<^sub>2) \<longrightarrow> S (\<sigma> j) (\<sigma>' j)"
and "oarrivemsg I \<sigma> a"
by (auto elim!: otherwithE)
from tr have "((\<sigma>, SubnetS s t), a, (\<sigma>', SubnetS s' t'))
\<in> opnet_sos (trans (opnet onp p\<^sub>1)) (trans (opnet onp p\<^sub>2))" by simp
hence "(\<sigma>', s') \<in> oreachable (opnet onp p\<^sub>1) (?S p\<^sub>1) (?U p\<^sub>1)
\<and> (\<sigma>', t') \<in> oreachable (opnet onp p\<^sub>2) (?S p\<^sub>2) (?U p\<^sub>2)"
proof (cases)
fix H K m H' K'
assume "a = (H \<union> H')\<not>(K \<union> K'):arrive(m)"
and str: "((\<sigma>, s), H\<not>K:arrive(m), (\<sigma>', s')) \<in> trans (opnet onp p\<^sub>1)"
and ttr: "((\<sigma>, t), H'\<not>K':arrive(m), (\<sigma>', t')) \<in> trans (opnet onp p\<^sub>2)"
from this(1) and \<open>?S (p\<^sub>1 \<parallel> p\<^sub>2) \<sigma> \<sigma>' a\<close> have "I \<sigma> m" by auto
with sor str have "\<forall>i\<in>net_tree_ips p\<^sub>1. S (\<sigma> i) (\<sigma>' i)"
by - (drule(1) ostep_arrive_invariantD [OF act], simp_all)
moreover from \<open>I \<sigma> m\<close> tor ttr have "\<forall>i\<in>net_tree_ips p\<^sub>2. S (\<sigma> i) (\<sigma>' i)"
by - (drule(1) ostep_arrive_invariantD [OF act], simp_all)
ultimately have "\<forall>i. S (\<sigma> i) (\<sigma>' i)"
using \<open>\<forall>j. j \<notin> net_tree_ips (p\<^sub>1 \<parallel> p\<^sub>2) \<longrightarrow> S (\<sigma> j) (\<sigma>' j)\<close> by auto
with \<open>I \<sigma> m\<close> sor str
have "(\<sigma>', s') \<in> oreachable (opnet onp p\<^sub>1) (?S p\<^sub>1) (?U p\<^sub>1)"
by - (erule(1) oreachable_local, auto)
moreover from \<open>\<forall>i. S (\<sigma> i) (\<sigma>' i)\<close> \<open>I \<sigma> m\<close> tor ttr
have "(\<sigma>', t') \<in> oreachable (opnet onp p\<^sub>2) (?S p\<^sub>2) (?U p\<^sub>2)"
by - (erule(1) oreachable_local, auto)
ultimately show ?thesis ..
next
fix R m H K
assume str: "((\<sigma>, s), R:*cast(m), (\<sigma>', s')) \<in> trans (opnet onp p\<^sub>1)"
and ttr: "((\<sigma>, t), H\<not>K:arrive(m), (\<sigma>', t')) \<in> trans (opnet onp p\<^sub>2)"
from sor str have "I \<sigma> m"
by - (drule(1) ostep_arrive_invariantD [OF act], simp_all)
with sor str tor ttr have "\<forall>i. S (\<sigma> i) (\<sigma>' i)"
using \<open>\<forall>j. j \<notin> net_tree_ips (p\<^sub>1 \<parallel> p\<^sub>2) \<longrightarrow> S (\<sigma> j) (\<sigma>' j)\<close>
by (fastforce dest!: ostep_arrive_invariantD [OF act] ostep_arrive_invariantD [OF act])
with \<open>I \<sigma> m\<close> sor str
have "(\<sigma>', s') \<in> oreachable (opnet onp p\<^sub>1) (?S p\<^sub>1) (?U p\<^sub>1)"
by - (erule(1) oreachable_local, auto)
moreover from \<open>\<forall>i. S (\<sigma> i) (\<sigma>' i)\<close> \<open>I \<sigma> m\<close> tor ttr
have "(\<sigma>', t') \<in> oreachable (opnet onp p\<^sub>2) (?S p\<^sub>2) (?U p\<^sub>2)"
by - (erule(1) oreachable_local, auto)
ultimately show ?thesis ..
next
fix R m H K
assume str: "((\<sigma>, s), H\<not>K:arrive(m), (\<sigma>', s')) \<in> trans (opnet onp p\<^sub>1)"
and ttr: "((\<sigma>, t), R:*cast(m), (\<sigma>', t')) \<in> trans (opnet onp p\<^sub>2)"
from tor ttr have "I \<sigma> m"
by - (drule(1) ostep_arrive_invariantD [OF act], simp_all)
with sor str tor ttr have "\<forall>i. S (\<sigma> i) (\<sigma>' i)"
using \<open>\<forall>j. j \<notin> net_tree_ips (p\<^sub>1 \<parallel> p\<^sub>2) \<longrightarrow> S (\<sigma> j) (\<sigma>' j)\<close>
by (fastforce dest!: ostep_arrive_invariantD [OF act] ostep_arrive_invariantD [OF act])
with \<open>I \<sigma> m\<close> sor str
have "(\<sigma>', s') \<in> oreachable (opnet onp p\<^sub>1) (?S p\<^sub>1) (?U p\<^sub>1)"
by - (erule(1) oreachable_local, auto)
moreover from \<open>\<forall>i. S (\<sigma> i) (\<sigma>' i)\<close> \<open>I \<sigma> m\<close> tor ttr
have "(\<sigma>', t') \<in> oreachable (opnet onp p\<^sub>2) (?S p\<^sub>2) (?U p\<^sub>2)"
by - (erule(1) oreachable_local, auto)
ultimately show ?thesis ..
next
fix i i'
assume str: "((\<sigma>, s), connect(i, i'), (\<sigma>', s')) \<in> trans (opnet onp p\<^sub>1)"
and ttr: "((\<sigma>, t), connect(i, i'), (\<sigma>', t')) \<in> trans (opnet onp p\<^sub>2)"
with sor tor have "\<forall>i. S (\<sigma> i) (\<sigma>' i)"
using \<open>\<forall>j. j \<notin> net_tree_ips (p\<^sub>1 \<parallel> p\<^sub>2) \<longrightarrow> S (\<sigma> j) (\<sigma>' j)\<close>
by (fastforce dest!: ostep_arrive_invariantD [OF act] ostep_arrive_invariantD [OF act])
with sor str
have "(\<sigma>', s') \<in> oreachable (opnet onp p\<^sub>1) (?S p\<^sub>1) (?U p\<^sub>1)"
by - (erule(1) oreachable_local, auto)
moreover from \<open>\<forall>i. S (\<sigma> i) (\<sigma>' i)\<close> tor ttr
have "(\<sigma>', t') \<in> oreachable (opnet onp p\<^sub>2) (?S p\<^sub>2) (?U p\<^sub>2)"
by - (erule(1) oreachable_local, auto)
ultimately show ?thesis ..
next
fix i i'
assume str: "((\<sigma>, s), disconnect(i, i'), (\<sigma>', s')) \<in> trans (opnet onp p\<^sub>1)"
and ttr: "((\<sigma>, t), disconnect(i, i'), (\<sigma>', t')) \<in> trans (opnet onp p\<^sub>2)"
with sor tor have "\<forall>i. S (\<sigma> i) (\<sigma>' i)"
using \<open>\<forall>j. j \<notin> net_tree_ips (p\<^sub>1 \<parallel> p\<^sub>2) \<longrightarrow> S (\<sigma> j) (\<sigma>' j)\<close>
by (fastforce dest!: ostep_arrive_invariantD [OF act] ostep_arrive_invariantD [OF act])
with sor str
have "(\<sigma>', s') \<in> oreachable (opnet onp p\<^sub>1) (?S p\<^sub>1) (?U p\<^sub>1)"
by - (erule(1) oreachable_local, auto)
moreover from \<open>\<forall>i. S (\<sigma> i) (\<sigma>' i)\<close> tor ttr
have "(\<sigma>', t') \<in> oreachable (opnet onp p\<^sub>2) (?S p\<^sub>2) (?U p\<^sub>2)"
by - (erule(1) oreachable_local, auto)
ultimately show ?thesis ..
next
fix i d
assume "t' = t"
and str: "((\<sigma>, s), i:deliver(d), (\<sigma>', s')) \<in> trans (opnet onp p\<^sub>1)"
from sor str have "\<forall>j. j\<notin>net_tree_ips p\<^sub>1 \<longrightarrow> \<sigma>' j = \<sigma> j"
by - (drule(1) ostep_arrive_invariantD [OF act], simp_all)
hence "\<forall>j. j\<notin>net_tree_ips p\<^sub>1 \<longrightarrow> S (\<sigma> j) (\<sigma>' j)"
by (auto intro: \<open>\<And>\<xi>. S \<xi> \<xi>\<close>)
with sor str
have "(\<sigma>', s') \<in> oreachable (opnet onp p\<^sub>1) (?S p\<^sub>1) (?U p\<^sub>1)"
by - (erule(1) oreachable_local, auto)
moreover have "(\<sigma>', t') \<in> oreachable (opnet onp p\<^sub>2) (?S p\<^sub>2) (?U p\<^sub>2)"
proof -
from \<open>\<forall>j. j\<notin>net_tree_ips p\<^sub>1 \<longrightarrow> \<sigma>' j = \<sigma> j\<close> and \<open>net_tree_ips p\<^sub>1 \<inter> net_tree_ips p\<^sub>2 = {}\<close>
have "\<forall>j. j\<in>net_tree_ips p\<^sub>2 \<longrightarrow> \<sigma>' j = \<sigma> j" by auto
moreover from sor str have "\<forall>j\<in>net_tree_ips p\<^sub>1. U (\<sigma> j) (\<sigma>' j)"
by - (drule(1) ostep_arrive_invariantD [OF act], simp_all)
ultimately show ?thesis
using tor \<open>t' = t\<close> \<open>\<forall>j. j \<notin> net_tree_ips p\<^sub>1 \<longrightarrow> \<sigma>' j = \<sigma> j\<close>
by (clarsimp elim!: oreachable_other')
(metis otherI \<open>\<And>\<xi>. U \<xi> \<xi>\<close>)+
qed
ultimately show ?thesis ..
next
fix i d
assume "s' = s"
and ttr: "((\<sigma>, t), i:deliver(d), (\<sigma>', t')) \<in> trans (opnet onp p\<^sub>2)"
from tor ttr have "\<forall>j. j\<notin>net_tree_ips p\<^sub>2 \<longrightarrow> \<sigma>' j = \<sigma> j"
by - (drule(1) ostep_arrive_invariantD [OF act], simp_all)
hence "\<forall>j. j\<notin>net_tree_ips p\<^sub>2 \<longrightarrow> S (\<sigma> j) (\<sigma>' j)"
by (auto intro: \<open>\<And>\<xi>. S \<xi> \<xi>\<close>)
with tor ttr
have "(\<sigma>', t') \<in> oreachable (opnet onp p\<^sub>2) (?S p\<^sub>2) (?U p\<^sub>2)"
by - (erule(1) oreachable_local, auto)
moreover have "(\<sigma>', s') \<in> oreachable (opnet onp p\<^sub>1) (?S p\<^sub>1) (?U p\<^sub>1)"
proof -
from \<open>\<forall>j. j\<notin>net_tree_ips p\<^sub>2 \<longrightarrow> \<sigma>' j = \<sigma> j\<close> and \<open>net_tree_ips p\<^sub>1 \<inter> net_tree_ips p\<^sub>2 = {}\<close>
have "\<forall>j. j\<in>net_tree_ips p\<^sub>1 \<longrightarrow> \<sigma>' j = \<sigma> j" by auto
moreover from tor ttr have "\<forall>j\<in>net_tree_ips p\<^sub>2. U (\<sigma> j) (\<sigma>' j)"
by - (drule(1) ostep_arrive_invariantD [OF act], simp_all)
ultimately show ?thesis
using sor \<open>s' = s\<close> \<open>\<forall>j. j \<notin> net_tree_ips p\<^sub>2 \<longrightarrow> \<sigma>' j = \<sigma> j\<close>
by (clarsimp elim!: oreachable_other')
(metis otherI \<open>\<And>\<xi>. U \<xi> \<xi>\<close>)+
qed
ultimately show ?thesis by - (rule conjI)
next
assume "s' = s"
and ttr: "((\<sigma>, t), \<tau>, (\<sigma>', t')) \<in> trans (opnet onp p\<^sub>2)"
from tor ttr have "\<forall>j. j\<notin>net_tree_ips p\<^sub>2 \<longrightarrow> \<sigma>' j = \<sigma> j"
by - (drule(1) ostep_arrive_invariantD [OF act], simp_all)
hence "\<forall>j. j\<notin>net_tree_ips p\<^sub>2 \<longrightarrow> S (\<sigma> j) (\<sigma>' j)"
by (auto intro: \<open>\<And>\<xi>. S \<xi> \<xi>\<close>)
with tor ttr
have "(\<sigma>', t') \<in> oreachable (opnet onp p\<^sub>2) (?S p\<^sub>2) (?U p\<^sub>2)"
by - (erule(1) oreachable_local, auto)
moreover have "(\<sigma>', s') \<in> oreachable (opnet onp p\<^sub>1) (?S p\<^sub>1) (?U p\<^sub>1)"
proof -
from \<open>\<forall>j. j\<notin>net_tree_ips p\<^sub>2 \<longrightarrow> \<sigma>' j = \<sigma> j\<close> and \<open>net_tree_ips p\<^sub>1 \<inter> net_tree_ips p\<^sub>2 = {}\<close>
have "\<forall>j. j\<in>net_tree_ips p\<^sub>1 \<longrightarrow> \<sigma>' j = \<sigma> j" by auto
moreover from tor ttr have "\<forall>j\<in>net_tree_ips p\<^sub>2. U (\<sigma> j) (\<sigma>' j)"
by - (drule(1) ostep_arrive_invariantD [OF act], simp_all)
ultimately show ?thesis
using sor \<open>s' = s\<close> \<open>\<forall>j. j \<notin> net_tree_ips p\<^sub>2 \<longrightarrow> \<sigma>' j = \<sigma> j\<close>
by (clarsimp elim!: oreachable_other')
(metis otherI \<open>\<And>\<xi>. U \<xi> \<xi>\<close>)+
qed
ultimately show ?thesis by - (rule conjI)
next
assume "t' = t"
and str: "((\<sigma>, s), \<tau>, (\<sigma>', s')) \<in> trans (opnet onp p\<^sub>1)"
from sor str have "\<forall>j. j\<notin>net_tree_ips p\<^sub>1 \<longrightarrow> \<sigma>' j = \<sigma> j"
by - (drule(1) ostep_arrive_invariantD [OF act], simp_all)
hence "\<forall>j. j\<notin>net_tree_ips p\<^sub>1 \<longrightarrow> S (\<sigma> j) (\<sigma>' j)"
by (auto intro: \<open>\<And>\<xi>. S \<xi> \<xi>\<close>)
with sor str
have "(\<sigma>', s') \<in> oreachable (opnet onp p\<^sub>1) (?S p\<^sub>1) (?U p\<^sub>1)"
by - (erule(1) oreachable_local, auto)
moreover have "(\<sigma>', t') \<in> oreachable (opnet onp p\<^sub>2) (?S p\<^sub>2) (?U p\<^sub>2)"
proof -
from \<open>\<forall>j. j\<notin>net_tree_ips p\<^sub>1 \<longrightarrow> \<sigma>' j = \<sigma> j\<close> and \<open>net_tree_ips p\<^sub>1 \<inter> net_tree_ips p\<^sub>2 = {}\<close>
have "\<forall>j. j\<in>net_tree_ips p\<^sub>2 \<longrightarrow> \<sigma>' j = \<sigma> j" by auto
moreover from sor str have "\<forall>j\<in>net_tree_ips p\<^sub>1. U (\<sigma> j) (\<sigma>' j)"
by - (drule(1) ostep_arrive_invariantD [OF act], simp_all)
ultimately show ?thesis
using tor \<open>t' = t\<close> \<open>\<forall>j. j \<notin> net_tree_ips p\<^sub>1 \<longrightarrow> \<sigma>' j = \<sigma> j\<close>
by (clarsimp elim!: oreachable_other')
(metis otherI \<open>\<And>\<xi>. U \<xi> \<xi>\<close>)+
qed
ultimately show ?thesis ..
qed
with \<open>net_tree_ips p\<^sub>1 \<inter> net_tree_ips p\<^sub>2 = {}\<close> show ?case by simp
qed
lemmas subnet_oreachable1 [dest] = subnet_oreachable [THEN conjunct1, rotated 1]
lemmas subnet_oreachable2 [dest] = subnet_oreachable [THEN conjunct2, THEN conjunct1, rotated 1]
lemmas subnet_oreachable_disjoint [dest] = subnet_oreachable
[THEN conjunct2, THEN conjunct2, rotated 1]
corollary pnet_lift:
assumes "\<And>ii R\<^sub>i. \<langle>ii : onp ii : R\<^sub>i\<rangle>\<^sub>o
\<Turnstile> (otherwith S {ii} (oarrivemsg I), other U {ii} \<rightarrow>) global (P ii)"
and "\<And>\<xi>. S \<xi> \<xi>"
and "\<And>\<xi>. U \<xi> \<xi>"
and node1: "\<And>i R. \<langle>i : onp i : R\<rangle>\<^sub>o \<Turnstile>\<^sub>A (\<lambda>\<sigma> _. oarrivemsg I \<sigma>, other U {i} \<rightarrow>)
globala (\<lambda>(\<sigma>, a, _). castmsg (I \<sigma>) a)"
and node2: "\<And>i R. \<langle>i : onp i : R\<rangle>\<^sub>o \<Turnstile>\<^sub>A (\<lambda>\<sigma> _. oarrivemsg I \<sigma>, other U {i} \<rightarrow>)
globala (\<lambda>(\<sigma>, a, \<sigma>'). (a \<noteq> \<tau> \<and> (\<forall>d. a \<noteq> i:deliver(d)) \<longrightarrow> S (\<sigma> i) (\<sigma>' i)))"
and node3: "\<And>i R. \<langle>i : onp i : R\<rangle>\<^sub>o \<Turnstile>\<^sub>A (\<lambda>\<sigma> _. oarrivemsg I \<sigma>, other U {i} \<rightarrow>)
globala (\<lambda>(\<sigma>, a, \<sigma>'). (a = \<tau> \<or> (\<exists>d. a = i:deliver(d)) \<longrightarrow> U (\<sigma> i) (\<sigma>' i)))"
shows "opnet onp p \<Turnstile> (otherwith S (net_tree_ips p) (oarrivemsg I),
other U (net_tree_ips p) \<rightarrow>) global (\<lambda>\<sigma>. \<forall>i\<in>net_tree_ips p. P i \<sigma>)"
(is "_ \<Turnstile> (?owS p, ?U p \<rightarrow>) _")
proof (induction p)
fix ii R\<^sub>i
from assms(1) show "opnet onp \<langle>ii; R\<^sub>i\<rangle> \<Turnstile> (?owS \<langle>ii; R\<^sub>i\<rangle>, ?U \<langle>ii; R\<^sub>i\<rangle> \<rightarrow>)
global (\<lambda>\<sigma>. \<forall>i\<in>net_tree_ips \<langle>ii; R\<^sub>i\<rangle>. P i \<sigma>)" by auto
next
fix p\<^sub>1 p\<^sub>2
assume ih1: "opnet onp p\<^sub>1 \<Turnstile> (?owS p\<^sub>1, ?U p\<^sub>1 \<rightarrow>) global (\<lambda>\<sigma>. \<forall>i\<in>net_tree_ips p\<^sub>1. P i \<sigma>)"
and ih2: "opnet onp p\<^sub>2 \<Turnstile> (?owS p\<^sub>2, ?U p\<^sub>2 \<rightarrow>) global (\<lambda>\<sigma>. \<forall>i\<in>net_tree_ips p\<^sub>2. P i \<sigma>)"
show "opnet onp (p\<^sub>1 \<parallel> p\<^sub>2) \<Turnstile> (?owS (p\<^sub>1 \<parallel> p\<^sub>2), ?U (p\<^sub>1 \<parallel> p\<^sub>2) \<rightarrow>)
global (\<lambda>\<sigma>. \<forall>i\<in>net_tree_ips (p\<^sub>1 \<parallel> p\<^sub>2). P i \<sigma>)"
unfolding oinvariant_def
proof
fix pq
assume "pq \<in> oreachable (opnet onp (p\<^sub>1 \<parallel> p\<^sub>2)) (?owS (p\<^sub>1 \<parallel> p\<^sub>2)) (?U (p\<^sub>1 \<parallel> p\<^sub>2))"
moreover then obtain \<sigma> s t where "pq = (\<sigma>, SubnetS s t)"
by (metis net_par_oreachable_is_subnet surjective_pairing)
ultimately have "(\<sigma>, SubnetS s t) \<in> oreachable (opnet onp (p\<^sub>1 \<parallel> p\<^sub>2))
(?owS (p\<^sub>1 \<parallel> p\<^sub>2)) (?U (p\<^sub>1 \<parallel> p\<^sub>2))" by simp
then obtain sor: "(\<sigma>, s) \<in> oreachable (opnet onp p\<^sub>1) (?owS p\<^sub>1) (?U p\<^sub>1)"
and tor: "(\<sigma>, t) \<in> oreachable (opnet onp p\<^sub>2) (?owS p\<^sub>2) (?U p\<^sub>2)"
by - (drule subnet_oreachable [OF _ _ _ node1 node2 node3], auto intro: assms(2-3))
from sor have "\<forall>i\<in>net_tree_ips p\<^sub>1. P i \<sigma>"
by (auto dest: oinvariantD [OF ih1])
moreover from tor have "\<forall>i\<in>net_tree_ips p\<^sub>2. P i \<sigma>"
by (auto dest: oinvariantD [OF ih2])
ultimately have "\<forall>i\<in>net_tree_ips (p\<^sub>1 \<parallel> p\<^sub>2). P i \<sigma>" by auto
with \<open>pq = (\<sigma>, SubnetS s t)\<close> show "global (\<lambda>\<sigma>. \<forall>i\<in>net_tree_ips (p\<^sub>1 \<parallel> p\<^sub>2). P i \<sigma>) pq" by simp
qed
qed
end
|
State Before: p : ℕ
R : Type ?u.75316
hp : Fact (Nat.Prime p)
inst✝ : CommRing R
n : ℕ
⊢ ↑constantCoeff (wittAdd p n) = 0 State After: p : ℕ
R : Type ?u.75316
hp : Fact (Nat.Prime p)
inst✝ : CommRing R
n : ℕ
⊢ ↑constantCoeff (X 0 + X 1) = 0 Tactic: apply constantCoeff_wittStructureInt p _ _ n State Before: p : ℕ
R : Type ?u.75316
hp : Fact (Nat.Prime p)
inst✝ : CommRing R
n : ℕ
⊢ ↑constantCoeff (X 0 + X 1) = 0 State After: no goals Tactic: simp only [add_zero, RingHom.map_add, constantCoeff_X] |
/-
Copyright (c) 2021 Jireh Loreaux. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Jireh Loreaux
! This file was ported from Lean 3 source module analysis.normed_space.spectrum
! leanprover-community/mathlib commit f2ce6086713c78a7f880485f7917ea547a215982
! Please do not edit these lines, except to modify the commit id
! if you have ported upstream changes.
-/
import Mathbin.Algebra.Algebra.Spectrum
import Mathbin.Analysis.SpecialFunctions.Pow
import Mathbin.Analysis.Complex.Liouville
import Mathbin.Analysis.Complex.Polynomial
import Mathbin.Analysis.Analytic.RadiusLiminf
import Mathbin.Topology.Algebra.Module.CharacterSpace
import Mathbin.Analysis.NormedSpace.Exponential
/-!
# The spectrum of elements in a complete normed algebra
This file contains the basic theory for the resolvent and spectrum of a Banach algebra.
## Main definitions
* `spectral_radius : ℝ≥0∞`: supremum of `‖k‖₊` for all `k ∈ spectrum 𝕜 a`
* `normed_ring.alg_equiv_complex_of_complete`: **Gelfand-Mazur theorem** For a complex
Banach division algebra, the natural `algebra_map ℂ A` is an algebra isomorphism whose inverse
is given by selecting the (unique) element of `spectrum ℂ a`
## Main statements
* `spectrum.is_open_resolvent_set`: the resolvent set is open.
* `spectrum.is_closed`: the spectrum is closed.
* `spectrum.subset_closed_ball_norm`: the spectrum is a subset of closed disk of radius
equal to the norm.
* `spectrum.is_compact`: the spectrum is compact.
* `spectrum.spectral_radius_le_nnnorm`: the spectral radius is bounded above by the norm.
* `spectrum.has_deriv_at_resolvent`: the resolvent function is differentiable on the resolvent set.
* `spectrum.pow_nnnorm_pow_one_div_tendsto_nhds_spectral_radius`: Gelfand's formula for the
spectral radius in Banach algebras over `ℂ`.
* `spectrum.nonempty`: the spectrum of any element in a complex Banach algebra is nonempty.
## TODO
* compute all derivatives of `resolvent a`.
-/
open ENNReal NNReal
/-- The *spectral radius* is the supremum of the `nnnorm` (`‖⬝‖₊`) of elements in the spectrum,
coerced into an element of `ℝ≥0∞`. Note that it is possible for `spectrum 𝕜 a = ∅`. In this
case, `spectral_radius a = 0`. It is also possible that `spectrum 𝕜 a` be unbounded (though
not for Banach algebras, see `spectrum.is_bounded`, below). In this case,
`spectral_radius a = ∞`. -/
noncomputable def spectralRadius (𝕜 : Type _) {A : Type _} [NormedField 𝕜] [Ring A] [Algebra 𝕜 A]
(a : A) : ℝ≥0∞ :=
⨆ k ∈ spectrum 𝕜 a, ‖k‖₊
#align spectral_radius spectralRadius
variable {𝕜 : Type _} {A : Type _}
namespace spectrum
section SpectrumCompact
open Filter
variable [NormedField 𝕜] [NormedRing A] [NormedAlgebra 𝕜 A]
-- mathport name: exprσ
local notation "σ" => spectrum 𝕜
-- mathport name: exprρ
local notation "ρ" => resolventSet 𝕜
-- mathport name: «expr↑ₐ»
local notation "↑ₐ" => algebraMap 𝕜 A
@[simp]
theorem SpectralRadius.of_subsingleton [Subsingleton A] (a : A) : spectralRadius 𝕜 a = 0 := by
simp [spectralRadius]
#align spectrum.spectral_radius.of_subsingleton spectrum.SpectralRadius.of_subsingleton
@[simp]
theorem spectralRadius_zero : spectralRadius 𝕜 (0 : A) = 0 :=
by
nontriviality A
simp [spectralRadius]
#align spectrum.spectral_radius_zero spectrum.spectralRadius_zero
theorem mem_resolventSet_of_spectralRadius_lt {a : A} {k : 𝕜} (h : spectralRadius 𝕜 a < ‖k‖₊) :
k ∈ ρ a :=
Classical.not_not.mp fun hn => h.not_le <| le_supᵢ₂ k hn
#align spectrum.mem_resolvent_set_of_spectral_radius_lt spectrum.mem_resolventSet_of_spectralRadius_lt
variable [CompleteSpace A]
theorem isOpen_resolventSet (a : A) : IsOpen (ρ a) :=
Units.isOpen.Preimage ((continuous_algebraMap 𝕜 A).sub continuous_const)
#align spectrum.is_open_resolvent_set spectrum.isOpen_resolventSet
protected theorem isClosed (a : A) : IsClosed (σ a) :=
(isOpen_resolventSet a).isClosed_compl
#align spectrum.is_closed spectrum.isClosed
theorem mem_resolventSet_of_norm_lt_mul {a : A} {k : 𝕜} (h : ‖a‖ * ‖(1 : A)‖ < ‖k‖) : k ∈ ρ a :=
by
rw [resolventSet, Set.mem_setOf_eq, Algebra.algebraMap_eq_smul_one]
nontriviality A
have hk : k ≠ 0 :=
ne_zero_of_norm_ne_zero ((mul_nonneg (norm_nonneg _) (norm_nonneg _)).trans_lt h).ne'
let ku := Units.map ↑ₐ.toMonoidHom (Units.mk0 k hk)
rw [← inv_inv ‖(1 : A)‖,
mul_inv_lt_iff (inv_pos.2 <| norm_pos_iff.2 (one_ne_zero : (1 : A) ≠ 0))] at h
have hku : ‖-a‖ < ‖(↑ku⁻¹ : A)‖⁻¹ := by simpa [ku, norm_algebraMap] using h
simpa [ku, sub_eq_add_neg, Algebra.algebraMap_eq_smul_one] using (ku.add (-a) hku).IsUnit
#align spectrum.mem_resolvent_set_of_norm_lt_mul spectrum.mem_resolventSet_of_norm_lt_mul
theorem mem_resolventSet_of_norm_lt [NormOneClass A] {a : A} {k : 𝕜} (h : ‖a‖ < ‖k‖) : k ∈ ρ a :=
mem_resolventSet_of_norm_lt_mul (by rwa [norm_one, mul_one])
#align spectrum.mem_resolvent_set_of_norm_lt spectrum.mem_resolventSet_of_norm_lt
theorem norm_le_norm_mul_of_mem {a : A} {k : 𝕜} (hk : k ∈ σ a) : ‖k‖ ≤ ‖a‖ * ‖(1 : A)‖ :=
le_of_not_lt <| mt mem_resolventSet_of_norm_lt_mul hk
#align spectrum.norm_le_norm_mul_of_mem spectrum.norm_le_norm_mul_of_mem
theorem norm_le_norm_of_mem [NormOneClass A] {a : A} {k : 𝕜} (hk : k ∈ σ a) : ‖k‖ ≤ ‖a‖ :=
le_of_not_lt <| mt mem_resolventSet_of_norm_lt hk
#align spectrum.norm_le_norm_of_mem spectrum.norm_le_norm_of_mem
theorem subset_closedBall_norm_mul (a : A) : σ a ⊆ Metric.closedBall (0 : 𝕜) (‖a‖ * ‖(1 : A)‖) :=
fun k hk => by simp [norm_le_norm_mul_of_mem hk]
#align spectrum.subset_closed_ball_norm_mul spectrum.subset_closedBall_norm_mul
theorem subset_closedBall_norm [NormOneClass A] (a : A) : σ a ⊆ Metric.closedBall (0 : 𝕜) ‖a‖ :=
fun k hk => by simp [norm_le_norm_of_mem hk]
#align spectrum.subset_closed_ball_norm spectrum.subset_closedBall_norm
theorem is_bounded (a : A) : Metric.Bounded (σ a) :=
(Metric.bounded_iff_subset_ball 0).mpr ⟨‖a‖ * ‖(1 : A)‖, subset_closedBall_norm_mul a⟩
#align spectrum.is_bounded spectrum.is_bounded
protected theorem isCompact [ProperSpace 𝕜] (a : A) : IsCompact (σ a) :=
Metric.isCompact_of_isClosed_bounded (spectrum.isClosed a) (is_bounded a)
#align spectrum.is_compact spectrum.isCompact
theorem spectralRadius_le_nnnorm [NormOneClass A] (a : A) : spectralRadius 𝕜 a ≤ ‖a‖₊ :=
by
refine' supᵢ₂_le fun k hk => _
exact_mod_cast norm_le_norm_of_mem hk
#align spectrum.spectral_radius_le_nnnorm spectrum.spectralRadius_le_nnnorm
theorem exists_nnnorm_eq_spectralRadius_of_nonempty [ProperSpace 𝕜] {a : A} (ha : (σ a).Nonempty) :
∃ k ∈ σ a, (‖k‖₊ : ℝ≥0∞) = spectralRadius 𝕜 a :=
by
obtain ⟨k, hk, h⟩ := (spectrum.isCompact a).exists_forall_ge ha continuous_nnnorm.continuous_on
exact ⟨k, hk, le_antisymm (le_supᵢ₂ k hk) (supᵢ₂_le <| by exact_mod_cast h)⟩
#align spectrum.exists_nnnorm_eq_spectral_radius_of_nonempty spectrum.exists_nnnorm_eq_spectralRadius_of_nonempty
theorem spectralRadius_lt_of_forall_lt_of_nonempty [ProperSpace 𝕜] {a : A} (ha : (σ a).Nonempty)
{r : ℝ≥0} (hr : ∀ k ∈ σ a, ‖k‖₊ < r) : spectralRadius 𝕜 a < r :=
supₛ_image.symm.trans_lt <|
((spectrum.isCompact a).supₛ_lt_iff_of_continuous ha
(ENNReal.continuous_coe.comp continuous_nnnorm).ContinuousOn (r : ℝ≥0∞)).mpr
(by exact_mod_cast hr)
#align spectrum.spectral_radius_lt_of_forall_lt_of_nonempty spectrum.spectralRadius_lt_of_forall_lt_of_nonempty
open ENNReal Polynomial
variable (𝕜)
theorem spectralRadius_le_pow_nnnorm_pow_one_div (a : A) (n : ℕ) :
spectralRadius 𝕜 a ≤ ‖a ^ (n + 1)‖₊ ^ (1 / (n + 1) : ℝ) * ‖(1 : A)‖₊ ^ (1 / (n + 1) : ℝ) :=
by
refine' supᵢ₂_le fun k hk => _
-- apply easy direction of the spectral mapping theorem for polynomials
have pow_mem : k ^ (n + 1) ∈ σ (a ^ (n + 1)) := by
simpa only [one_mul, Algebra.algebraMap_eq_smul_one, one_smul, aeval_monomial, one_mul,
eval_monomial] using subset_polynomial_aeval a (monomial (n + 1) (1 : 𝕜)) ⟨k, hk, rfl⟩
-- power of the norm is bounded by norm of the power
have nnnorm_pow_le : (↑(‖k‖₊ ^ (n + 1)) : ℝ≥0∞) ≤ ‖a ^ (n + 1)‖₊ * ‖(1 : A)‖₊ := by
simpa only [Real.toNNReal_mul (norm_nonneg _), norm_toNNReal, nnnorm_pow k (n + 1),
ENNReal.coe_mul] using coe_mono (Real.toNNReal_mono (norm_le_norm_mul_of_mem pow_mem))
-- take (n + 1)ᵗʰ roots and clean up the left-hand side
have hn : 0 < ((n + 1 : ℕ) : ℝ) := by exact_mod_cast Nat.succ_pos'
convert monotone_rpow_of_nonneg (one_div_pos.mpr hn).le nnnorm_pow_le
erw [coe_pow, ← rpow_nat_cast, ← rpow_mul, mul_one_div_cancel hn.ne', rpow_one]
rw [Nat.cast_succ, ENNReal.coe_mul_rpow]
#align spectrum.spectral_radius_le_pow_nnnorm_pow_one_div spectrum.spectralRadius_le_pow_nnnorm_pow_one_div
theorem spectralRadius_le_liminf_pow_nnnorm_pow_one_div (a : A) :
spectralRadius 𝕜 a ≤ atTop.liminf fun n : ℕ => (‖a ^ n‖₊ : ℝ≥0∞) ^ (1 / n : ℝ) :=
by
refine' ENNReal.le_of_forall_lt_one_mul_le fun ε hε => _
by_cases ε = 0
· simp only [h, MulZeroClass.zero_mul, zero_le']
have hε' : ε⁻¹ ≠ ∞ := fun h' =>
h (by simpa only [inv_inv, inv_top] using congr_arg (fun x : ℝ≥0∞ => x⁻¹) h')
simp only [ENNReal.mul_le_iff_le_inv h (hε.trans_le le_top).Ne, mul_comm ε⁻¹,
liminf_eq_supr_infi_of_nat', ENNReal.supᵢ_mul, ENNReal.infᵢ_mul hε']
rw [← ENNReal.inv_lt_inv, inv_one] at hε
obtain ⟨N, hN⟩ :=
eventually_at_top.mp
(ENNReal.eventually_pow_one_div_le (ENNReal.coe_ne_top : ↑‖(1 : A)‖₊ ≠ ∞) hε)
refine' le_trans _ (le_supᵢ _ (N + 1))
refine' le_infᵢ fun n => _
simp only [← add_assoc]
refine' (spectral_radius_le_pow_nnnorm_pow_one_div 𝕜 a (n + N)).trans _
norm_cast
exact mul_le_mul_left' (hN (n + N + 1) (by linarith)) _
#align spectrum.spectral_radius_le_liminf_pow_nnnorm_pow_one_div spectrum.spectralRadius_le_liminf_pow_nnnorm_pow_one_div
end SpectrumCompact
section resolvent
open Filter Asymptotics
variable [NontriviallyNormedField 𝕜] [NormedRing A] [NormedAlgebra 𝕜 A] [CompleteSpace A]
-- mathport name: exprρ
local notation "ρ" => resolventSet 𝕜
-- mathport name: «expr↑ₐ»
local notation "↑ₐ" => algebraMap 𝕜 A
theorem hasDerivAt_resolvent {a : A} {k : 𝕜} (hk : k ∈ ρ a) :
HasDerivAt (resolvent a) (-resolvent a k ^ 2) k :=
by
have H₁ : HasFderivAt Ring.inverse _ (↑ₐ k - a) := hasFderivAt_ring_inverse hk.unit
have H₂ : HasDerivAt (fun k => ↑ₐ k - a) 1 k := by
simpa using (Algebra.linearMap 𝕜 A).HasDerivAt.sub_const a
simpa [resolvent, sq, hk.unit_spec, ← Ring.inverse_unit hk.unit] using H₁.comp_has_deriv_at k H₂
#align spectrum.has_deriv_at_resolvent spectrum.hasDerivAt_resolvent
/- TODO: Once there is sufficient API for bornology, we should get a nice filter / asymptotics
version of this, for example: `tendsto (resolvent a) (cobounded 𝕜) (𝓝 0)` or more specifically
`(resolvent a) =O[cobounded 𝕜] (λ z, z⁻¹)`. -/
theorem norm_resolvent_le_forall (a : A) :
∀ ε > 0, ∃ R > 0, ∀ z : 𝕜, R ≤ ‖z‖ → ‖resolvent a z‖ ≤ ε :=
by
obtain ⟨c, c_pos, hc⟩ := (@NormedRing.inverse_one_sub_norm A _ _).exists_pos
rw [is_O_with_iff, eventually_iff, Metric.mem_nhds_iff] at hc
rcases hc with ⟨δ, δ_pos, hδ⟩
simp only [CstarRing.norm_one, mul_one] at hδ
intro ε hε
have ha₁ : 0 < ‖a‖ + 1 := lt_of_le_of_lt (norm_nonneg a) (lt_add_one _)
have min_pos : 0 < min (δ * (‖a‖ + 1)⁻¹) (ε * c⁻¹) :=
lt_min (mul_pos δ_pos (inv_pos.mpr ha₁)) (mul_pos hε (inv_pos.mpr c_pos))
refine' ⟨(min (δ * (‖a‖ + 1)⁻¹) (ε * c⁻¹))⁻¹, inv_pos.mpr min_pos, fun z hz => _⟩
have hnz : z ≠ 0 := norm_pos_iff.mp (lt_of_lt_of_le (inv_pos.mpr min_pos) hz)
replace hz := inv_le_of_inv_le min_pos hz
rcases(⟨Units.mk0 z hnz, Units.val_mk0 hnz⟩ : IsUnit z) with ⟨z, rfl⟩
have lt_δ : ‖z⁻¹ • a‖ < δ :=
by
rw [Units.smul_def, norm_smul, Units.val_inv_eq_inv_val, norm_inv]
calc
‖(z : 𝕜)‖⁻¹ * ‖a‖ ≤ δ * (‖a‖ + 1)⁻¹ * ‖a‖ :=
mul_le_mul_of_nonneg_right (hz.trans (min_le_left _ _)) (norm_nonneg _)
_ < δ :=
by
conv =>
rw [mul_assoc]
rhs
rw [(mul_one δ).symm]
exact
mul_lt_mul_of_pos_left
((inv_mul_lt_iff ha₁).mpr ((mul_one (‖a‖ + 1)).symm ▸ lt_add_one _)) δ_pos
rw [← inv_smul_smul z (resolvent a (z : 𝕜)), units_smul_resolvent_self, resolvent,
Algebra.algebraMap_eq_smul_one, one_smul, Units.smul_def, norm_smul, Units.val_inv_eq_inv_val,
norm_inv]
calc
_ ≤ ε * c⁻¹ * c :=
mul_le_mul (hz.trans (min_le_right _ _)) (hδ (mem_ball_zero_iff.mpr lt_δ)) (norm_nonneg _)
(mul_pos hε (inv_pos.mpr c_pos)).le
_ = _ := inv_mul_cancel_right₀ c_pos.ne.symm ε
#align spectrum.norm_resolvent_le_forall spectrum.norm_resolvent_le_forall
end resolvent
section OneSubSmul
open ContinuousMultilinearMap ENNReal FormalMultilinearSeries
open NNReal ENNReal
variable [NontriviallyNormedField 𝕜] [NormedRing A] [NormedAlgebra 𝕜 A]
variable (𝕜)
/-- In a Banach algebra `A` over a nontrivially normed field `𝕜`, for any `a : A` the
power series with coefficients `a ^ n` represents the function `(1 - z • a)⁻¹` in a disk of
radius `‖a‖₊⁻¹`. -/
theorem hasFpowerSeriesOnBallInverseOneSubSmul [CompleteSpace A] (a : A) :
HasFpowerSeriesOnBall (fun z : 𝕜 => Ring.inverse (1 - z • a))
(fun n => ContinuousMultilinearMap.mkPiField 𝕜 (Fin n) (a ^ n)) 0 ‖a‖₊⁻¹ :=
{ r_le :=
by
refine'
le_of_forall_nnreal_lt fun r hr => le_radius_of_bound_nnreal _ (max 1 ‖(1 : A)‖₊) fun n => _
rw [← norm_toNNReal, norm_mk_pi_field, norm_toNNReal]
cases n
· simp only [le_refl, mul_one, or_true_iff, le_max_iff, pow_zero]
· refine'
le_trans (le_trans (mul_le_mul_right' (nnnorm_pow_le' a n.succ_pos) (r ^ n.succ)) _)
(le_max_left _ _)
· by_cases ‖a‖₊ = 0
· simp only [h, MulZeroClass.zero_mul, zero_le', pow_succ]
· rw [← coe_inv h, coe_lt_coe, NNReal.lt_inv_iff_mul_lt h] at hr
simpa only [← mul_pow, mul_comm] using pow_le_one' hr.le n.succ
r_pos := ENNReal.inv_pos.mpr coe_ne_top
HasSum := fun y hy =>
by
have norm_lt : ‖y • a‖ < 1 := by
by_cases h : ‖a‖₊ = 0
· simp only [nnnorm_eq_zero.mp h, norm_zero, zero_lt_one, smul_zero]
· have nnnorm_lt : ‖y‖₊ < ‖a‖₊⁻¹ := by
simpa only [← coe_inv h, mem_ball_zero_iff, Metric.emetric_ball_nnreal] using hy
rwa [← coe_nnnorm, ← Real.lt_toNNReal_iff_coe_lt, Real.toNNReal_one, nnnorm_smul, ←
NNReal.lt_inv_iff_mul_lt h]
simpa [← smul_pow, (NormedRing.summable_geometric_of_norm_lt_1 _ norm_lt).hasSum_iff] using
(NormedRing.inverse_oneSub _ norm_lt).symm }
#align spectrum.has_fpower_series_on_ball_inverse_one_sub_smul spectrum.hasFpowerSeriesOnBallInverseOneSubSmul
variable {𝕜}
theorem isUnit_one_sub_smul_of_lt_inv_radius {a : A} {z : 𝕜} (h : ↑‖z‖₊ < (spectralRadius 𝕜 a)⁻¹) :
IsUnit (1 - z • a) := by
by_cases hz : z = 0
· simp only [hz, isUnit_one, sub_zero, zero_smul]
· let u := Units.mk0 z hz
suffices hu : IsUnit (u⁻¹ • 1 - a)
· rwa [IsUnit.smul_sub_iff_sub_inv_smul, inv_inv u] at hu
· rw [Units.smul_def, ← Algebra.algebraMap_eq_smul_one, ← mem_resolvent_set_iff]
refine' mem_resolvent_set_of_spectral_radius_lt _
rwa [Units.val_inv_eq_inv_val, nnnorm_inv,
coe_inv (nnnorm_ne_zero_iff.mpr (Units.val_mk0 hz ▸ hz : (u : 𝕜) ≠ 0)), lt_inv_iff_lt_inv]
#align spectrum.is_unit_one_sub_smul_of_lt_inv_radius spectrum.isUnit_one_sub_smul_of_lt_inv_radius
/-- In a Banach algebra `A` over `𝕜`, for `a : A` the function `λ z, (1 - z • a)⁻¹` is
differentiable on any closed ball centered at zero of radius `r < (spectral_radius 𝕜 a)⁻¹`. -/
theorem differentiableOn_inverse_one_sub_smul [CompleteSpace A] {a : A} {r : ℝ≥0}
(hr : (r : ℝ≥0∞) < (spectralRadius 𝕜 a)⁻¹) :
DifferentiableOn 𝕜 (fun z : 𝕜 => Ring.inverse (1 - z • a)) (Metric.closedBall 0 r) :=
by
intro z z_mem
apply DifferentiableAt.differentiableWithinAt
have hu : IsUnit (1 - z • a) :=
by
refine' is_unit_one_sub_smul_of_lt_inv_radius (lt_of_le_of_lt (coe_mono _) hr)
simpa only [norm_toNNReal, Real.toNNReal_coe] using
Real.toNNReal_mono (mem_closed_ball_zero_iff.mp z_mem)
have H₁ : Differentiable 𝕜 fun w : 𝕜 => 1 - w • a := (differentiable_id.smul_const a).const_sub 1
exact DifferentiableAt.comp z (differentiableAt_inverse hu.unit) H₁.differentiable_at
#align spectrum.differentiable_on_inverse_one_sub_smul spectrum.differentiableOn_inverse_one_sub_smul
end OneSubSmul
section GelfandFormula
open Filter ENNReal ContinuousMultilinearMap
open Topology
variable [NormedRing A] [NormedAlgebra ℂ A] [CompleteSpace A]
/-- The `limsup` relationship for the spectral radius used to prove `spectrum.gelfand_formula`. -/
theorem limsup_pow_nnnorm_pow_one_div_le_spectralRadius (a : A) :
limsup (fun n : ℕ => ↑‖a ^ n‖₊ ^ (1 / n : ℝ)) atTop ≤ spectralRadius ℂ a :=
by
refine' ennreal.inv_le_inv.mp (le_of_forall_pos_nnreal_lt fun r r_pos r_lt => _)
simp_rw [inv_limsup, ← one_div]
let p : FormalMultilinearSeries ℂ ℂ A := fun n =>
ContinuousMultilinearMap.mkPiField ℂ (Fin n) (a ^ n)
suffices h : (r : ℝ≥0∞) ≤ p.radius
· convert h
simp only [p.radius_eq_liminf, ← norm_toNNReal, norm_mk_pi_field]
congr
ext n
rw [norm_toNNReal, ENNReal.coe_rpow_def ‖a ^ n‖₊ (1 / n : ℝ), if_neg]
exact fun ha => by linarith [ha.2, (one_div_nonneg.mpr n.cast_nonneg : 0 ≤ (1 / n : ℝ))]
· have H₁ := (differentiable_on_inverse_one_sub_smul r_lt).HasFpowerSeriesOnBall r_pos
exact ((has_fpower_series_on_ball_inverse_one_sub_smul ℂ a).exchangeRadius H₁).r_le
#align spectrum.limsup_pow_nnnorm_pow_one_div_le_spectral_radius spectrum.limsup_pow_nnnorm_pow_one_div_le_spectralRadius
/-- **Gelfand's formula**: Given an element `a : A` of a complex Banach algebra, the
`spectral_radius` of `a` is the limit of the sequence `‖a ^ n‖₊ ^ (1 / n)` -/
theorem pow_nnnorm_pow_one_div_tendsto_nhds_spectralRadius (a : A) :
Tendsto (fun n : ℕ => (‖a ^ n‖₊ ^ (1 / n : ℝ) : ℝ≥0∞)) atTop (𝓝 (spectralRadius ℂ a)) :=
tendsto_of_le_liminf_of_limsup_le (spectralRadius_le_liminf_pow_nnnorm_pow_one_div ℂ a)
(limsup_pow_nnnorm_pow_one_div_le_spectralRadius a)
#align spectrum.pow_nnnorm_pow_one_div_tendsto_nhds_spectral_radius spectrum.pow_nnnorm_pow_one_div_tendsto_nhds_spectralRadius
/- This is the same as `pow_nnnorm_pow_one_div_tendsto_nhds_spectral_radius` but for `norm`
instead of `nnnorm`. -/
/-- **Gelfand's formula**: Given an element `a : A` of a complex Banach algebra, the
`spectral_radius` of `a` is the limit of the sequence `‖a ^ n‖₊ ^ (1 / n)` -/
theorem pow_norm_pow_one_div_tendsto_nhds_spectralRadius (a : A) :
Tendsto (fun n : ℕ => ENNReal.ofReal (‖a ^ n‖ ^ (1 / n : ℝ))) atTop (𝓝 (spectralRadius ℂ a)) :=
by
convert pow_nnnorm_pow_one_div_tendsto_nhds_spectral_radius a
ext1
rw [← of_real_rpow_of_nonneg (norm_nonneg _) _, ← coe_nnnorm, coe_nnreal_eq]
exact one_div_nonneg.mpr (by exact_mod_cast zero_le _)
#align spectrum.pow_norm_pow_one_div_tendsto_nhds_spectral_radius spectrum.pow_norm_pow_one_div_tendsto_nhds_spectralRadius
end GelfandFormula
section NonemptySpectrum
variable [NormedRing A] [NormedAlgebra ℂ A] [CompleteSpace A] [Nontrivial A] (a : A)
/-- In a (nontrivial) complex Banach algebra, every element has nonempty spectrum. -/
protected theorem nonempty : (spectrum ℂ a).Nonempty :=
by
/- Suppose `σ a = ∅`, then resolvent set is `ℂ`, any `(z • 1 - a)` is a unit, and `resolvent`
is differentiable on `ℂ`. -/
rw [Set.nonempty_iff_ne_empty]
by_contra h
have H₀ : resolventSet ℂ a = Set.univ := by rwa [spectrum, Set.compl_empty_iff] at h
have H₁ : Differentiable ℂ fun z : ℂ => resolvent a z := fun z =>
(has_deriv_at_resolvent (H₀.symm ▸ Set.mem_univ z : z ∈ resolventSet ℂ a)).DifferentiableAt
/- The norm of the resolvent is small for all sufficently large `z`, and by compactness and
continuity it is bounded on the complement of a large ball, thus uniformly bounded on `ℂ`.
By Liouville's theorem `λ z, resolvent a z` is constant -/
have H₂ := norm_resolvent_le_forall a
have H₃ : ∀ z : ℂ, resolvent a z = resolvent a (0 : ℂ) :=
by
refine' fun z => H₁.apply_eq_apply_of_bounded (bounded_iff_forall_norm_le.mpr _) z 0
rcases H₂ 1 zero_lt_one with ⟨R, R_pos, hR⟩
rcases(ProperSpace.isCompact_closedBall (0 : ℂ) R).exists_bound_of_continuousOn
H₁.continuous.continuous_on with
⟨C, hC⟩
use max C 1
rintro _ ⟨w, rfl⟩
refine' Or.elim (em (‖w‖ ≤ R)) (fun hw => _) fun hw => _
· exact (hC w (mem_closed_ball_zero_iff.mpr hw)).trans (le_max_left _ _)
· exact (hR w (not_le.mp hw).le).trans (le_max_right _ _)
-- `resolvent a 0 = 0`, which is a contradition because it isn't a unit.
have H₅ : resolvent a (0 : ℂ) = 0 :=
by
refine' norm_eq_zero.mp (le_antisymm (le_of_forall_pos_le_add fun ε hε => _) (norm_nonneg _))
rcases H₂ ε hε with ⟨R, R_pos, hR⟩
simpa only [H₃ R] using
(zero_add ε).symm.subst (hR R (by exact_mod_cast (Real.norm_of_nonneg R_pos.lt.le).symm.le))
-- `not_is_unit_zero` is where we need `nontrivial A`, it is unavoidable.
exact
not_isUnit_zero
(H₅.subst (is_unit_resolvent.mp (mem_resolvent_set_iff.mp (H₀.symm ▸ Set.mem_univ 0))))
#align spectrum.nonempty spectrum.nonempty
/-- In a complex Banach algebra, the spectral radius is always attained by some element of the
spectrum. -/
theorem exists_nnnorm_eq_spectralRadius : ∃ z ∈ spectrum ℂ a, (‖z‖₊ : ℝ≥0∞) = spectralRadius ℂ a :=
exists_nnnorm_eq_spectralRadius_of_nonempty (spectrum.nonempty a)
#align spectrum.exists_nnnorm_eq_spectral_radius spectrum.exists_nnnorm_eq_spectralRadius
/-- In a complex Banach algebra, if every element of the spectrum has norm strictly less than
`r : ℝ≥0`, then the spectral radius is also strictly less than `r`. -/
theorem spectralRadius_lt_of_forall_lt {r : ℝ≥0} (hr : ∀ z ∈ spectrum ℂ a, ‖z‖₊ < r) :
spectralRadius ℂ a < r :=
spectralRadius_lt_of_forall_lt_of_nonempty (spectrum.nonempty a) hr
#align spectrum.spectral_radius_lt_of_forall_lt spectrum.spectralRadius_lt_of_forall_lt
open Polynomial
open Polynomial
/-- The **spectral mapping theorem** for polynomials in a Banach algebra over `ℂ`. -/
theorem map_polynomial_aeval (p : ℂ[X]) :
spectrum ℂ (aeval a p) = (fun k => eval k p) '' spectrum ℂ a :=
map_polynomial_aeval_of_nonempty a p (spectrum.nonempty a)
#align spectrum.map_polynomial_aeval spectrum.map_polynomial_aeval
/-- A specialization of the spectral mapping theorem for polynomials in a Banach algebra over `ℂ`
to monic monomials. -/
protected theorem map_pow (n : ℕ) : spectrum ℂ (a ^ n) = (fun x => x ^ n) '' spectrum ℂ a := by
simpa only [aeval_X_pow, eval_pow, eval_X] using map_polynomial_aeval a (X ^ n)
#align spectrum.map_pow spectrum.map_pow
end NonemptySpectrum
section GelfandMazurIsomorphism
variable [NormedRing A] [NormedAlgebra ℂ A] (hA : ∀ {a : A}, IsUnit a ↔ a ≠ 0)
include hA
-- mathport name: exprσ
local notation "σ" => spectrum ℂ
theorem algebraMap_eq_of_mem {a : A} {z : ℂ} (h : z ∈ σ a) : algebraMap ℂ A z = a := by
rwa [mem_iff, hA, Classical.not_not, sub_eq_zero] at h
#align spectrum.algebra_map_eq_of_mem spectrum.algebraMap_eq_of_mem
/-- **Gelfand-Mazur theorem**: For a complex Banach division algebra, the natural `algebra_map ℂ A`
is an algebra isomorphism whose inverse is given by selecting the (unique) element of
`spectrum ℂ a`. In addition, `algebra_map_isometry` guarantees this map is an isometry.
Note: because `normed_division_ring` requires the field `norm_mul' : ∀ a b, ‖a * b‖ = ‖a‖ * ‖b‖`, we
don't use this type class and instead opt for a `normed_ring` in which the nonzero elements are
precisely the units. This allows for the application of this isomorphism in broader contexts, e.g.,
to the quotient of a complex Banach algebra by a maximal ideal. In the case when `A` is actually a
`normed_division_ring`, one may fill in the argument `hA` with the lemma `is_unit_iff_ne_zero`. -/
@[simps]
noncomputable def NormedRing.algEquivComplexOfComplete [CompleteSpace A] : ℂ ≃ₐ[ℂ] A :=
let nt : Nontrivial A := ⟨⟨1, 0, hA.mp ⟨⟨1, 1, mul_one _, mul_one _⟩, rfl⟩⟩⟩
{ Algebra.ofId ℂ A with
toFun := algebraMap ℂ A
invFun := fun a => (@spectrum.nonempty _ _ _ _ nt a).some
left_inv := fun z => by
simpa only [@scalar_eq _ _ _ _ _ nt _] using
(@spectrum.nonempty _ _ _ _ nt <| algebraMap ℂ A z).some_mem
right_inv := fun a => algebraMap_eq_of_mem (@hA) (@spectrum.nonempty _ _ _ _ nt a).some_mem }
#align normed_ring.alg_equiv_complex_of_complete NormedRing.algEquivComplexOfComplete
end GelfandMazurIsomorphism
section ExpMapping
-- mathport name: «expr↑ₐ»
local notation "↑ₐ" => algebraMap 𝕜 A
/-- For `𝕜 = ℝ` or `𝕜 = ℂ`, `exp 𝕜` maps the spectrum of `a` into the spectrum of `exp 𝕜 a`. -/
theorem exp_mem_exp [IsROrC 𝕜] [NormedRing A] [NormedAlgebra 𝕜 A] [CompleteSpace A] (a : A) {z : 𝕜}
(hz : z ∈ spectrum 𝕜 a) : exp 𝕜 z ∈ spectrum 𝕜 (exp 𝕜 a) :=
by
have hexpmul : exp 𝕜 a = exp 𝕜 (a - ↑ₐ z) * ↑ₐ (exp 𝕜 z) := by
rw [algebraMap_exp_comm z, ← exp_add_of_commute (Algebra.commutes z (a - ↑ₐ z)).symm,
sub_add_cancel]
let b := ∑' n : ℕ, ((n + 1).factorial⁻¹ : 𝕜) • (a - ↑ₐ z) ^ n
have hb : Summable fun n : ℕ => ((n + 1).factorial⁻¹ : 𝕜) • (a - ↑ₐ z) ^ n :=
by
refine' summable_of_norm_bounded_eventually _ (Real.summable_pow_div_factorial ‖a - ↑ₐ z‖) _
filter_upwards [Filter.eventually_cofinite_ne 0]with n hn
rw [norm_smul, mul_comm, norm_inv, IsROrC.norm_eq_abs, IsROrC.abs_cast_nat, ← div_eq_mul_inv]
exact
div_le_div (pow_nonneg (norm_nonneg _) n) (norm_pow_le' (a - ↑ₐ z) (zero_lt_iff.mpr hn))
(by exact_mod_cast Nat.factorial_pos n)
(by exact_mod_cast Nat.factorial_le (lt_add_one n).le)
have h₀ : (∑' n : ℕ, ((n + 1).factorial⁻¹ : 𝕜) • (a - ↑ₐ z) ^ (n + 1)) = (a - ↑ₐ z) * b := by
simpa only [mul_smul_comm, pow_succ] using hb.tsum_mul_left (a - ↑ₐ z)
have h₁ : (∑' n : ℕ, ((n + 1).factorial⁻¹ : 𝕜) • (a - ↑ₐ z) ^ (n + 1)) = b * (a - ↑ₐ z) := by
simpa only [pow_succ', Algebra.smul_mul_assoc] using hb.tsum_mul_right (a - ↑ₐ z)
have h₃ : exp 𝕜 (a - ↑ₐ z) = 1 + (a - ↑ₐ z) * b :=
by
rw [exp_eq_tsum]
convert tsum_eq_zero_add (exp_series_summable' (a - ↑ₐ z))
simp only [Nat.factorial_zero, Nat.cast_one, inv_one, pow_zero, one_smul]
exact h₀.symm
rw [spectrum.mem_iff, IsUnit.sub_iff, ← one_mul (↑ₐ (exp 𝕜 z)), hexpmul, ← _root_.sub_mul,
Commute.isUnit_mul_iff (Algebra.commutes (exp 𝕜 z) (exp 𝕜 (a - ↑ₐ z) - 1)).symm,
sub_eq_iff_eq_add'.mpr h₃, Commute.isUnit_mul_iff (h₀ ▸ h₁ : (a - ↑ₐ z) * b = b * (a - ↑ₐ z))]
exact not_and_of_not_left _ (not_and_of_not_left _ ((not_iff_not.mpr IsUnit.sub_iff).mp hz))
#align spectrum.exp_mem_exp spectrum.exp_mem_exp
end ExpMapping
end spectrum
namespace AlgHom
section NormedField
variable {F : Type _} [NormedField 𝕜] [NormedRing A] [NormedAlgebra 𝕜 A] [CompleteSpace A]
-- mathport name: «expr↑ₐ»
local notation "↑ₐ" => algebraMap 𝕜 A
/-- An algebra homomorphism into the base field, as a continuous linear map (since it is
automatically bounded). See note [lower instance priority] -/
instance (priority := 100) [AlgHomClass F 𝕜 A 𝕜] : ContinuousLinearMapClass F 𝕜 A 𝕜 :=
{ AlgHomClass.linearMapClass with
map_continuous := fun φ =>
AddMonoidHomClass.continuous_of_bound φ ‖(1 : A)‖ fun a =>
mul_comm ‖a‖ ‖(1 : A)‖ ▸ spectrum.norm_le_norm_mul_of_mem (apply_mem_spectrum φ _) }
/-- An algebra homomorphism into the base field, as a continuous linear map (since it is
automatically bounded). -/
def toContinuousLinearMap (φ : A →ₐ[𝕜] 𝕜) : A →L[𝕜] 𝕜 :=
{ φ.toLinearMap with cont := map_continuous φ }
#align alg_hom.to_continuous_linear_map AlgHom.toContinuousLinearMap
@[simp]
theorem coe_toContinuousLinearMap (φ : A →ₐ[𝕜] 𝕜) : ⇑φ.toContinuousLinearMap = φ :=
rfl
#align alg_hom.coe_to_continuous_linear_map AlgHom.coe_toContinuousLinearMap
theorem norm_apply_le_self_mul_norm_one [AlgHomClass F 𝕜 A 𝕜] (f : F) (a : A) :
‖f a‖ ≤ ‖a‖ * ‖(1 : A)‖ :=
spectrum.norm_le_norm_mul_of_mem (apply_mem_spectrum f _)
#align alg_hom.norm_apply_le_self_mul_norm_one AlgHom.norm_apply_le_self_mul_norm_one
theorem norm_apply_le_self [NormOneClass A] [AlgHomClass F 𝕜 A 𝕜] (f : F) (a : A) : ‖f a‖ ≤ ‖a‖ :=
spectrum.norm_le_norm_of_mem (apply_mem_spectrum f _)
#align alg_hom.norm_apply_le_self AlgHom.norm_apply_le_self
end NormedField
section NontriviallyNormedField
variable [NontriviallyNormedField 𝕜] [NormedRing A] [NormedAlgebra 𝕜 A] [CompleteSpace A]
-- mathport name: «expr↑ₐ»
local notation "↑ₐ" => algebraMap 𝕜 A
@[simp]
theorem toContinuousLinearMap_norm [NormOneClass A] (φ : A →ₐ[𝕜] 𝕜) :
‖φ.toContinuousLinearMap‖ = 1 :=
ContinuousLinearMap.op_norm_eq_of_bounds zero_le_one
(fun a => (one_mul ‖a‖).symm ▸ spectrum.norm_le_norm_of_mem (apply_mem_spectrum φ _))
fun _ _ h => by simpa only [coe_to_continuous_linear_map, map_one, norm_one, mul_one] using h 1
#align alg_hom.to_continuous_linear_map_norm AlgHom.toContinuousLinearMap_norm
end NontriviallyNormedField
end AlgHom
namespace WeakDual
namespace CharacterSpace
variable [NontriviallyNormedField 𝕜] [NormedRing A] [CompleteSpace A]
variable [NormedAlgebra 𝕜 A]
/-- The equivalence between characters and algebra homomorphisms into the base field. -/
def equivAlgHom : characterSpace 𝕜 A ≃ (A →ₐ[𝕜] 𝕜)
where
toFun := toAlgHom
invFun f :=
{ val := f.toContinuousLinearMap
property := by
rw [eq_set_map_one_map_mul]
exact ⟨map_one f, map_mul f⟩ }
left_inv f := Subtype.ext <| ContinuousLinearMap.ext fun x => rfl
right_inv f := AlgHom.ext fun x => rfl
#align weak_dual.character_space.equiv_alg_hom WeakDual.characterSpace.equivAlgHom
@[simp]
theorem equivAlgHom_coe (f : characterSpace 𝕜 A) : ⇑(equivAlgHom f) = f :=
rfl
#align weak_dual.character_space.equiv_alg_hom_coe WeakDual.characterSpace.equivAlgHom_coe
@[simp]
theorem equivAlgHom_symm_coe (f : A →ₐ[𝕜] 𝕜) : ⇑(equivAlgHom.symm f) = f :=
rfl
#align weak_dual.character_space.equiv_alg_hom_symm_coe WeakDual.characterSpace.equivAlgHom_symm_coe
end CharacterSpace
end WeakDual
|
Margaret Roberts was appointed as City of Davis Davis City Clerk on January 17, 2006.
Prior to coming to the City of Davis, Ms. Roberts was City Clerk for the City of Rio Vista and has over 16 years of local government experience.
She currently serves as the Northern Division Professional Development Representative for the City Clerks Association of California and Vice President of the Northern California City Clerks Association. She has served as a Northern Division board member for several years, working on a variety of issues related to city clerks and local government.
|
lemma closed_insert [continuous_intros, simp]: assumes "closed S" shows "closed (insert a S)" |
{-# OPTIONS --without-K #-}
open import HoTT.Base
open import HoTT.HLevel
open import HoTT.HLevel.Truncate
open import HoTT.Logic
open import HoTT.Identity
open import HoTT.Identity.Identity
open import HoTT.Identity.Coproduct
open import HoTT.Identity.Sigma
open import HoTT.Identity.Pi
open import HoTT.Identity.Universe
open import HoTT.Equivalence
open import HoTT.Equivalence.Lift
open import HoTT.Sigma.Transport
module HoTT.Exercises.Chapter3 where
module Exercise1 {i} {A B : 𝒰 i} (e : A ≃ B) (A-set : isSet A)
where
□ : isSet B
□ {x} {y} p q = ap⁻¹ (ap≃ (e ⁻¹ₑ) x y) (A-set (ap g p) (ap g q))
where open Iso (eqv→iso e)
module Exercise2 {i} {A B : 𝒰 i} (A-set : isSet A) (B-set : isSet B)
where
□ : isSet (A + B)
□ {inl x} {inl y} p q = ap⁻¹ =+-equiv
(ap lift (A-set (lower (=+-elim p)) (lower (=+-elim q))))
□ {inl x} {inr y} p q = 𝟎-rec (=+-elim p)
□ {inr x} {inl y} p q = 𝟎-rec (=+-elim p)
□ {inr x} {inr y} p q = ap⁻¹ =+-equiv
(ap lift (B-set (lower (=+-elim p)) (lower (=+-elim q))))
module Exercise3
{i} {A : 𝒰 i} (A-set : isSet A)
{j} {B : A → 𝒰 j} (B-set : {x : A} → isSet (B x))
where
□ : isSet (Σ A B)
□ {x = x@(x₁ , x₂)} {y = y@(y₁ , y₂)} p q =
ap⁻¹ =Σ-equiv (lemma (pr₁ =Σ-equiv p) (pr₁ =Σ-equiv q))
where
lemma : (p q : Σ (x₁ == y₁) λ p₁ → (transport B p₁ x₂ == y₂)) → p == q
lemma (p₁ , p₂) (q₁ , q₂) = pair⁼ (r₁ , r₂)
where
r₁ = A-set p₁ q₁
r₂ = B-set (transport _ r₁ p₂) q₂
module Exercise4 {i} {A : 𝒰 i} where
_ : isProp A → isContr (A → A)
_ = λ A-prop → id , λ f → funext λ x → A-prop x (f x)
_ : isContr (A → A) → isProp A
_ = λ where
(f , contr) x y → happly (contr (const x) ⁻¹ ∙ contr (const y)) x
module Exercise5 {i} {A : 𝒰 i} where
open import HoTT.Pi.Transport
open import HoTT.Sigma.Transport
_ : isProp A ≃ (A → isContr A)
_ = f , qinv→isequiv (g , η , ε)
where
f : isProp A → (A → isContr A)
f A-prop x = x , A-prop x
g : (A → isContr A) → isProp A
g h x y = let contr = pr₂ (h x) in contr x ⁻¹ ∙ contr y
η : g ∘ f ~ id
η _ = isProp-prop _ _
ε : f ∘ g ~ id
ε h = funext λ _ → isContr-prop _ _
module Exercise6 {i} {A : 𝒰 i} where
instance
LEM-prop : ⦃ hlevel 1 A ⦄ → hlevel 1 (A + ¬ A)
LEM-prop = isProp→hlevel1 λ where
(inl a) (inl a') → ap inl center
(inl a) (inr f) → 𝟎-rec (f a)
(inr f) (inl b') → 𝟎-rec (f b')
(inr f) (inr f') → ap inr center
module Exercise7
{i} {A : 𝒰 i} {A-prop : isProp A}
{j} {B : 𝒰 j} {B-prop : isProp B}
where
□ : ¬ (A × B) → isProp (A + B)
□ f = λ where
(inl x) (inl y) → ap inl (A-prop _ _)
(inl x) (inr y) → 𝟎-rec (f (x , y))
(inr x) (inl y) → 𝟎-rec (f (y , x))
(inr x) (inr y) → ap inr (B-prop _ _)
module Exercise8 {i j} {A : 𝒰 i} {B : 𝒰 j} {f : A → B} where
open import HoTT.Equivalence.Proposition
prop₁ : qinv f → ∥ qinv f ∥
prop₁ e = ∣ e ∣
prop₂ : ∥ qinv f ∥ → qinv f
prop₂ e = isequiv→qinv (∥-rec ⦃ isProp→hlevel1 isequiv-prop ⦄ qinv→isequiv e)
prop₃ : isProp ∥ qinv f ∥
prop₃ = hlevel1→isProp
module Exercise9 {i} (lem : LEM {i}) where
open import HoTT.Logic
open import HoTT.Equivalence.Lift
_ : Prop𝒰 i ≃ 𝟐
_ = f , qinv→isequiv (g , η , ε)
where
f : Prop𝒰 i → 𝟐
f P with lem P
... | inl _ = 0₂
... | inr _ = 1₂
g : 𝟐 → Prop𝒰 i
g 0₂ = ⊤
g 1₂ = ⊥
η : g ∘ f ~ id
η P with lem P
... | inl t = hlevel⁼ (ua (prop-equiv (const t) (const ★)))
... | inr f = hlevel⁼ (ua (prop-equiv 𝟎-rec (𝟎-rec ∘ f)))
ε : f ∘ g ~ id
ε 0₂ with lem (g 0₂)
... | inl _ = refl
... | inr f = 𝟎-rec (f ★)
ε 1₂ with lem (g 1₂)
... | inl ()
... | inr _ = refl
import HoTT.Exercises.Chapter3.Exercise10
module Exercise11 where
open variables
open import HoTT.Pi.Transport
open import HoTT.Identity.Boolean
prop : ¬ ((A : 𝒰₀) → ∥ A ∥ → A)
prop f = 𝟎-rec (g (f 𝟐 ∣ 0₂ ∣) p)
where
not = 𝟐-rec 1₂ 0₂
e : 𝟐 ≃ 𝟐
e = not , qinv→isequiv (not , 𝟐-ind _ refl refl , 𝟐-ind _ refl refl)
g : (x : 𝟐) → ¬ (not x == x)
g 0₂ = 𝟎-rec ∘ pr₁ =𝟐-equiv
g 1₂ = 𝟎-rec ∘ pr₁ =𝟐-equiv
p : not (f 𝟐 ∣ 0₂ ∣) == f 𝟐 ∣ 0₂ ∣
p =
not (f 𝟐 ∣ 0₂ ∣)
=⟨ ap (λ e → pr₁ e (f 𝟐 ∣ 0₂ ∣)) (=𝒰-β e) ⁻¹ ⟩
transport id (ua e) (f 𝟐 ∣ 0₂ ∣)
=⟨ ap (λ x → transport id (ua e) (f 𝟐 x)) center ⟩
transport id (ua e) (f 𝟐 (transport ∥_∥ (ua e ⁻¹) ∣ 0₂ ∣))
=⟨ happly (transport-→ ∥_∥ id (ua e) (f 𝟐) ⁻¹) ∣ 0₂ ∣ ⟩
transport (λ A → ∥ A ∥ → A) (ua e) (f 𝟐) ∣ 0₂ ∣
=⟨ happly (apd f (ua e)) ∣ 0₂ ∣ ⟩
f 𝟐 ∣ 0₂ ∣
∎
where open =-Reasoning
module Exercise12 {i} {A : 𝒰 i} (lem : LEM {i}) where
open variables using (B ; j)
□ : ∥ (∥ A ∥ → A) ∥
□ with lem (type ∥ A ∥)
... | inl x = swap ∥-rec x λ x → ∣ const x ∣
... | inr f = ∣ 𝟎-rec ∘ f ∣
module Exercise13 {i} (lem : LEM∞ {i}) where
□ : AC {i} {i} {i}
□ {X = X} {A = A} {P = P} f = ∣ pr₁ ∘ g , pr₂ ∘ g ∣
where
g : (x : X) → Σ (A x) (P x)
g x with lem (Σ (A x) (P x))
... | inl t = t
... | inr b = ∥-rec ⦃ hlevel-in (λ {x} → 𝟎-rec (b x)) ⦄ id (f x)
module Exercise14 (lem : ∀ {i} → LEM {i}) where
open import HoTT.Sigma.Universal
open variables
∥_∥' : 𝒰 i → 𝒰 i
∥ A ∥' = ¬ ¬ A
∣_∣' : A → ∥ A ∥'
∣ a ∣' f = f a
∥'-hlevel : hlevel 1 ∥ A ∥'
∥'-hlevel = ⟨⟩
∥'-rec : ⦃ hlevel 1 B ⦄ → (f : A → B) →
Σ[ g ∶ (∥ A ∥' → B) ] Π[ a ∶ A ] g ∣ a ∣' == f a
∥'-rec f = λ where
.pr₁ a → +-rec id (λ b → 𝟎-rec (a (𝟎-rec ∘ b ∘ f))) (lem (type _))
.pr₂ _ → center
_ : ∥ A ∥' ≃ ∥ A ∥
_ = let open Iso in iso→eqv λ where
.f → pr₁ (∥'-rec ∣_∣)
.g → ∥-rec ∣_∣'
.η _ → center
.ε _ → center
module Exercise15
(LiftProp-isequiv : ∀ {i j} → isequiv (LiftProp {i} {j}))
where
open import HoTT.Equivalence.Transport
open variables
open module LiftProp-qinv {i} {j} = qinv (isequiv→qinv (LiftProp-isequiv {i} {j}))
renaming (g to LowerProp ; η to LiftProp-η ; ε to LiftProp-ε)
∥_∥' : 𝒰 (lsuc i) → 𝒰 (lsuc i)
∥_∥' {i} A = (P : Prop𝒰 i) → (A → P ty) → P ty
∣_∣' : A → ∥ A ∥'
∣ a ∣' _ f = f a
∥'-hlevel : hlevel 1 ∥ A ∥'
∥'-hlevel = ⟨⟩
∥'-rec : {A : 𝒰 (lsuc i)} {(type B) : Prop𝒰 (lsuc i)} → (f : A → B) →
Σ (∥ A ∥' → B) λ g → (a : A) → g ∣ a ∣' == f a
∥'-rec {_} {_} {B} f = let p = ap _ty (LiftProp-ε B) in λ where
.pr₁ a → transport id p (lift (a (LowerProp B) (lower ∘ transport id (p ⁻¹) ∘ f)))
-- We do not get a definitional equality since our propositional
-- resizing equivalence does not give us definitionally equal
-- types, i.e. LowerProp B ≢ B. If it did, then we could write
--
-- ∥'-rec f a :≡ a (LowerProp B) f
--
-- and then we'd have ∥'-rec f ∣a∣' ≡ f a.
.pr₂ a → Eqv.ε (transport-equiv p) (f a)
module Exercise16
{i} {(type X) : Set𝒰 i}
{j} {Y : X → Prop𝒰 j}
(lem : ∀ {i} → LEM {i})
where
_ : Π[ x ∶ X ] ¬ ¬ Y x ty ≃ ¬ ¬ (Π[ x ∶ X ] Y x ty)
_ = let open Iso in iso→eqv λ where
.f s t → t λ x → +-rec id (𝟎-rec ∘ s x) (lem (Y x))
.g s x y → 𝟎-rec (s λ f → 𝟎-rec (y (f x)))
.η _ → center
.ε _ → center
module Exercise17
{i} {A : 𝒰 i}
{j} {B : ∥ A ∥ → Prop𝒰 j}
where
∥-ind : ((a : A) → B ∣ a ∣ ty) → (x : ∥ A ∥) → B x ty
∥-ind f x = ∥-rec (transport (_ty ∘ B) center ∘ f) x
where instance _ = λ {x} → hlevel𝒰.h (B x)
module Exercise18 {i} where
open Exercise6
_ : LEM {i} → LDN {i}
_ = λ lem P f → +-rec id (𝟎-rec ∘ f) (lem P)
_ : LDN {i} → LEM {i}
_ = λ ldn P → ldn (type (P ty + ¬ P ty)) λ f → f (inr (f ∘ inl))
module Exercise19
{i} {P : ℕ → 𝒰 i}
(P-lem : (n : ℕ) → P n + ¬ P n)
-- Do not assume that all P n are mere propositions so we can
-- reuse this solution for exercise 23.
where
open import HoTT.NaturalNumber renaming (_+_ to _+ₙ_ ; +-comm to +ₙ-comm)
open import HoTT.Identity.NaturalNumber
open import HoTT.Transport.Identity
open import HoTT.Base.Inspect
open import HoTT.Identity.Product
open import HoTT.Equivalence.Sigma
open import HoTT.Equivalence.Coproduct
open import HoTT.Equivalence.Empty
-- P n does not hold for any m < n
ℕ* : ℕ → 𝒰 i
ℕ* zero = 𝟏
ℕ* (succ n) = ¬ P n × ℕ* n
P* : {n : ℕ} → P n → 𝒰 i
P* {n} p = inl p == P-lem n
-- ℕ* is the product of 𝟏 and some ¬ P n, all of which are
-- propositions, so it is a proposition as well.
instance ℕ*-hlevel : {n : ℕ} → hlevel 1 (ℕ* n)
ℕ*-hlevel {zero} = 𝟏-hlevel
ℕ*-hlevel {succ n} = ×-hlevel
P*-hlevel : {n : ℕ} → hlevel 1 (Σ (P n) P*)
hlevel-out (P*-hlevel {n}) {p , _} = ⟨⟩
where
e : P n ≃ P n + ¬ P n
e = +-empty₂ ⁻¹ₑ ∙ₑ +-equiv reflₑ (𝟎-equiv (_$ p))
instance
_ = =-contrᵣ (P-lem n)
_ = raise ⦃ equiv-hlevel (Σ-equiv₁ e ⁻¹ₑ) ⦄
instance _ = P*-hlevel
-- Extract evidence that ¬ P m for some m < n
extract : {m n : ℕ} → m < n → ℕ* n → ¬ P m
extract {m} (k , p) = pr₁ ∘ weaken ∘ transport ℕ* p'
where
p' = p ⁻¹ ∙ +ₙ-comm (succ m) k
weaken : {k : ℕ} → ℕ* (k +ₙ succ m) → ℕ* (succ m)
weaken {zero} = id
weaken {succ k} = weaken ∘ pr₂
-- The smallest n such that P n holds
Pₒ = Σ (Σ ℕ λ n → Σ (P n) P*) (ℕ* ∘ pr₁)
Pₒ-prop : isProp Pₒ
Pₒ-prop ((n , p , _) , n*) ((m , q , _) , m*) =
pair⁼ (pair⁼ (n=m , center) , center)
where
n=m : n == m
n=m with n <=> m
-- If n = m, then there is nothing to do.
... | inl n=m = n=m
-- If n < m, we have P n and we know ℕ* m contains ¬ P n
-- somewhere inside, so we can just extract it to find a
-- contradiction.
... | inr (inl n<m) = 𝟎-rec (extract n<m m* p)
-- The m < n case is symmetrical.
... | inr (inr m<n) = 𝟎-rec (extract m<n n* q)
instance _ = isProp→hlevel1 Pₒ-prop
-- Use the decidability of P to search for an instance of P in
-- some finite range of natural numbers, keeping track of evidence
-- that P does not hold for lower n.
find-P : (n : ℕ) → Pₒ + ℕ* n
find-P zero = inr ★
find-P (succ n) with find-P n | P-lem n | inspect P-lem n
... | inl x | _ | _ = inl x
... | inr n* | inl p | [ p* ] = inl ((n , p , p*) , n*)
... | inr n* | inr ¬p | _ = inr (¬p , n*)
-- If we know P holds for some n, then we have an upper bound for
-- which to search for the smallest n. If we do not find any other
-- n' ≤ n such that P n', then we have a contradiction.
to-Pₒ : Σ ℕ P → Pₒ
to-Pₒ (n , p) with find-P (succ n)
... | inl x = x
... | inr (¬p , _) = 𝟎-rec (¬p p)
from-Pₒ : Pₒ → Σ ℕ P
from-Pₒ ((n , p , p*) , n*) = n , p
_ : ∥ Σ ℕ P ∥ → Σ ℕ P
_ = from-Pₒ ∘ ∥-rec to-Pₒ
module Exercise20 where
open variables
-- See HoTT.HLevel
_ : ⦃ _ : hlevel 0 A ⦄ → Σ A P ≃ P center
_ = Σ-contr₁
module Exercise21 {i} {P : 𝒰 i} where
open import HoTT.Equivalence.Proposition
_ : isProp P ≃ (P ≃ ∥ P ∥)
_ = prop-equiv ⦃ isProp-hlevel1 ⦄ f g
where
f : isProp P → P ≃ ∥ P ∥
f p = prop-equiv ∣_∣ (∥-rec id)
where instance _ = isProp→hlevel1 p
g : P ≃ ∥ P ∥ → isProp P
g e = hlevel1→isProp ⦃ equiv-hlevel (e ⁻¹ₑ) ⦄
instance
_ = Σ-hlevel ⦃ Π-hlevel ⦄ ⦃ isProp→hlevel1 isequiv-prop ⦄
module Exercise22 where
Fin : ℕ → 𝒰₀
Fin zero = 𝟎
Fin (succ n) = Fin n + 𝟏
□ : ∀ {i} (n : ℕ) {A : Fin n → 𝒰 i} {P : (x : Fin n) → A x → 𝒰 i} →
((x : Fin n) → ∥ Σ (A x) (P x) ∥) →
∥ Σ ((x : Fin n) → A x) (λ g → ((x : Fin n) → P x (g x))) ∥
□ zero _ = ∣ 𝟎-ind , 𝟎-ind ∣
□ (succ n) {A} {P} f =
swap ∥-rec (f (inr ★)) λ zₛ →
swap ∥-rec (□ n (f ∘ inl)) λ zₙ →
let f = f' zₛ zₙ in ∣ pr₁ ∘ f , pr₂ ∘ f ∣
where
f' : _ → _ → (x : Fin (succ n)) → Σ (A x) (P x)
f' (_ , _) (g , h) (inl m) = g m , h m
f' (x , y) (_ , _) (inr ★) = x , y
module Exercise23 where
-- The solution for Exercise19 covers the case where P is not
-- necessarily a mere proposition.
open Exercise19
|
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% NOTE: This script and pricing function are in progress, and have not been well tested
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%Calculating forward value of the call with Antonov's mapping strategy
T = 1; %Time to maturity
r = 0.00; %Risk-free interest rate
F_0 = 1.1; %Initial forward value
ModParams.v0 = 0.2; %Inital volatility
ModParams.beta = 0.7; %Exponent
ModParams.alpha = 0.08; %Vol-vol
ModParams.rho = 0; %Correlation
% ModParams.v0 = 0.25; %Inital volatility
% ModParams.beta = 0.6; %Exponent
% ModParams.alpha = 0.3; %Vol-vol
% ModParams.rho = -0.5; %Correlation
Kvec = F_0*[0.6 0.8 0.90 0.95 0.999 1.05 1.10 1.2 1.4];
call = 0;
%%% Price Strikes
prices = zeros(length(Kvec),1);
for k=1:length(Kvec)
K = Kvec(k);
prices(k) = SABR_European_AntonovApprox(F_0,K,T,call,r,ModParams);
fprintf('%.8f\n', prices(k));
end
|
using Base: @deprecate
@deprecate ci(args...) confint(args...)
@deprecate MultinomialLRT MultinomialLRTest
@deprecate OneSampleHotellingT2 OneSampleHotellingT2Test
@deprecate EqualCovHotellingT2 EqualCovHotellingT2Test
@deprecate UnequalCovHotellingT2 UnequalCovHotellingT2Test
@deprecate BartlettsTest BartlettTest
@deprecate confint(x::HypothesisTest, alpha::Real; kwargs...) confint(x; level=1-alpha, kwargs...) |
/**
*
* @file core_zlacpy_pivot.c
*
* PLASMA core_blas kernel
* PLASMA is a software package provided by Univ. of Tennessee,
* Univ. of California Berkeley and Univ. of Colorado Denver
*
* @version 2.6.0
* @author Mathieu Faverge
* @date 2013-02-01
* @precisions normal z -> c d s
*
**/
#include <lapacke.h>
#include "common.h"
#define A(m, n) BLKADDR(descA, PLASMA_Complex64_t, m, n)
/***************************************************************************//**
*
* @ingroup CORE_PLASMA_Complex64_t
*
* CORE_zlacpy_pivot extracts the original version of the rows selected by the
* ipiv array and copies them into a new buffer.
*
* This kernel is used by tournament pivoting algorithms, to extract the
* selected rows from the original matrix that will make it to the next level
* of the tournament.
*
*******************************************************************************
*
* @param[in] descA
* The descriptor of the matrix A in which the kernel will extract the
* original rows.
*
* @param[in] direct
* @arg PlasmaRowwise: The extracted rows are stored in column major layout.
* @arg PlasmaColumnwise: The extracted rows are store in row major layout.
*
* @param[in] k1
* The first element of IPIV for which a row interchange will
* be done.
*
* @param[in] k2
* The last element of IPIV for which a row interchange will
* be done.
*
* @param[in] ipiv
* The pivot indices; Only the element in position k1 to k2
* are accessed. The pivots should be included in the interval 1 to A.m
*
* @param[in,out] rankin
* On entry, the global indices relative to the full matrix A
* factorized, in the local sub-matrix. If init == 1, rankin is
* initialized to A.i, .. A.i+descA.m
* On exit, rows are permutted according to ipiv.
*
* @param[out] rankout
* On exit, contains the global indices of the first (k2-k1+1) rows.
*
* @param[out] A
* An lda-by-descA.n matrix. On exit, A contains the original version
* of the rows selected by the pivoting process.
*
* @param[in] lda
* The leading dimension of the array A. lda >= max(1,(k2-k1+1)).
*
* @param[in] init
* True if rankin needs to be initialized.
* False, if rankin contains already initialized data.
*
*******************************************************************************
*
* @return
* \retval PLASMA_SUCCESS successful exit
* \retval <0 if INFO = -k, the k-th argument had an illegal value
*
******************************************************************************/
#if defined(PLASMA_HAVE_WEAK)
#pragma weak CORE_zlacpy_pivot = PCORE_zlacpy_pivot
#define CORE_zlacpy_pivot PCORE_zlacpy_pivot
#endif
int CORE_zlacpy_pivot( const PLASMA_desc descA,
PLASMA_enum direct, int k1, int k2, const int *ipiv,
int *rankin, int *rankout,
PLASMA_Complex64_t *A, int lda,
int init)
{
int i, ip, it, ir, ld;
const int *lpiv;
int *ro = rankout;
/* Init rankin if first step */
if ( init ) {
int val = descA.i;
for(i=0; i<descA.m; i++, val++) {
rankin[i] = val;
}
}
/* Generate the rankout */
ro = rankout;
lpiv = ipiv;
for(i=k1-1; i<k2; i++, ro++, lpiv++) {
*ro = rankin[ (*lpiv) - 1 ];
rankin[ (*lpiv) - 1 ] = rankin[ i ];
}
/* Extract the rows */
if (direct == PlasmaRowwise) {
ro = rankout;
for(i=k1-1; i<k2; i++, ro++) {
ip = (*ro) - descA.i;
it = ip / descA.mb;
ir = ip % descA.mb;
ld = BLKLDD(descA, it);
cblas_zcopy(descA.n, A(it, 0) + ir, ld,
A + i, lda );
}
}
else {
ro = rankout;
for(i=k1-1; i<k2; i++, ro++) {
ip = (*ro) - descA.i;
it = ip / descA.mb;
ir = ip % descA.mb;
ld = BLKLDD(descA, it);
cblas_zcopy(descA.n, A(it, 0) + ir, ld,
A + i*lda, 1 );
}
}
return PLASMA_SUCCESS;
}
|
(* Chap 15 Extraction *)
(* Chap 15.1 Basic Extraction *)
Require Coq.extraction.Extraction.
Extraction Language OCaml.
From Coq Require Import Arith.Arith.
From Coq Require Import Init.Nat.
From Coq Require Import Arith.EqNat.
From LF Require Import ImpCEvalFun.
Extraction "test1.ml" ceval_step.
(* Chap 15.2 Controlling Extraction of Specific Types *)
Extract Inductive bool => "bool" [ "true" "false" ].
Extract Inductive nat => "int"
[ "0" "(fun x -> x + 1)" ]
"(fun zero succ n →
if n=0 then zero () else succ (n-1))".
Extract Constant plus => "( + )".
Extract Constant mult => "( * )".
Extract Constant eqb => "( = )".
Extract Constant minus => "( - )".
Extraction "test2.ml" ceval_step.
(* Chap 15.3 A Complete Example *)
Require Import ExtrOcamlBasic.
Require Import ExtrOcamlString.
Extract Inductive sumbool => "bool" ["true" "false"].
From LF Require Import Imp.
From LF Require Import ImpParser.
From LF Require Import Maps.
Extraction "test.ml" empty_st ceval_step parse.
(* Chap 15.4 Discussion *)
(* Skipped *)
(* Chap 15.5 Going Further *)
(* End *)
|
{-# OPTIONS --without-K #-}
open import lib.Basics
open import lib.types.Group
open import lib.types.Bool
open import lib.types.Nat
open import lib.types.Pi
open import lib.types.Sigma
open import lib.groups.Homomorphisms
open import lib.groups.Lift
open import lib.groups.Unit
module lib.groups.GroupProduct where
{- binary product -}
×-group-struct : ∀ {i j} {A : Type i} {B : Type j}
(GS : GroupStructure A) (HS : GroupStructure B)
→ GroupStructure (A × B)
×-group-struct GS HS = record {
ident = (ident GS , ident HS);
inv = λ {(g , h) → (inv GS g , inv HS h)};
comp = λ {(g₁ , h₁) (g₂ , h₂) → (comp GS g₁ g₂ , comp HS h₁ h₂)};
unitl = λ {(g , h) → pair×= (unitl GS g) (unitl HS h)};
unitr = λ {(g , h) → pair×= (unitr GS g) (unitr HS h)};
assoc = λ {(g₁ , h₁) (g₂ , h₂) (g₃ , h₃) →
pair×= (assoc GS g₁ g₂ g₃) (assoc HS h₁ h₂ h₃)};
invl = λ {(g , h) → pair×= (invl GS g) (invl HS h)};
invr = λ {(g , h) → pair×= (invr GS g) (invr HS h)}}
where open GroupStructure
_×ᴳ_ : ∀ {i j} → Group i → Group j → Group (lmax i j)
_×ᴳ_ (group A A-level A-struct) (group B B-level B-struct) =
group (A × B) (×-level A-level B-level) (×-group-struct A-struct B-struct)
{- general product -}
Π-group-struct : ∀ {i j} {I : Type i} {A : I → Type j}
(FS : (i : I) → GroupStructure (A i))
→ GroupStructure (Π I A)
Π-group-struct FS = record {
ident = ident ∘ FS;
inv = λ f i → inv (FS i) (f i);
comp = λ f g i → comp (FS i) (f i) (g i);
unitl = λ f → (λ= (λ i → unitl (FS i) (f i)));
unitr = λ f → (λ= (λ i → unitr (FS i) (f i)));
assoc = λ f g h → (λ= (λ i → assoc (FS i) (f i) (g i) (h i)));
invl = λ f → (λ= (λ i → invl (FS i) (f i)));
invr = λ f → (λ= (λ i → invr (FS i) (f i)))}
where open GroupStructure
Πᴳ : ∀ {i j} (I : Type i) (F : I → Group j) → Group (lmax i j)
Πᴳ I F = group (Π I (El ∘ F)) (Π-level (λ i → El-level (F i)))
(Π-group-struct (group-struct ∘ F))
where open Group
{- the product of abelian groups is abelian -}
×ᴳ-abelian : ∀ {i j} {G : Group i} {H : Group j}
→ is-abelian G → is-abelian H → is-abelian (G ×ᴳ H)
×ᴳ-abelian aG aH (g₁ , h₁) (g₂ , h₂) = pair×= (aG g₁ g₂) (aH h₁ h₂)
Πᴳ-abelian : ∀ {i j} {I : Type i} {F : I → Group j}
→ (∀ i → is-abelian (F i)) → is-abelian (Πᴳ I F)
Πᴳ-abelian aF f₁ f₂ = λ= (λ i → aF i (f₁ i) (f₂ i))
{- defining a homomorphism into a binary product -}
×ᴳ-hom-in : ∀ {i j k} {G : Group i} {H : Group j} {K : Group k}
→ (G →ᴳ H) → (G →ᴳ K) → (G →ᴳ H ×ᴳ K)
×ᴳ-hom-in (group-hom h h-comp) (group-hom k k-comp) = record {
f = λ x → (h x , k x);
pres-comp = λ x y → pair×= (h-comp x y) (k-comp x y)}
{- projection homomorphisms -}
×ᴳ-fst : ∀ {i j} {G : Group i} {H : Group j} → (G ×ᴳ H →ᴳ G)
×ᴳ-fst = record {f = fst; pres-comp = λ _ _ → idp}
×ᴳ-snd : ∀ {i j} {G : Group i} {H : Group j} → (G ×ᴳ H →ᴳ H)
×ᴳ-snd = record {f = snd; pres-comp = λ _ _ → idp}
Πᴳ-proj : ∀ {i j} {I : Type i} {F : I → Group j} (i : I)
→ (Πᴳ I F →ᴳ F i)
Πᴳ-proj i = record {
f = λ f → f i;
pres-comp = λ _ _ → idp}
{- injection homomorphisms -}
module _ {i j} {G : Group i} {H : Group j} where
×ᴳ-inl : G →ᴳ G ×ᴳ H
×ᴳ-inl = ×ᴳ-hom-in (idhom G) cst-hom
×ᴳ-inr : H →ᴳ G ×ᴳ H
×ᴳ-inr = ×ᴳ-hom-in (cst-hom {H = G}) (idhom H)
×ᴳ-diag : ∀ {i} {G : Group i} → (G →ᴳ G ×ᴳ G)
×ᴳ-diag = ×ᴳ-hom-in (idhom _) (idhom _)
{- when G is abelian, we can define a map H×K → G as a sum of maps
- H → G and K → G (that is, the product behaves as a sum) -}
module _ {i j k} {G : Group i} {H : Group j} {K : Group k}
(G-abelian : is-abelian G) where
private
module G = Group G
module H = Group H
module K = Group K
lemma : (g₁ g₂ g₃ g₄ : G.El) →
G.comp (G.comp g₁ g₂) (G.comp g₃ g₄)
== G.comp (G.comp g₁ g₃) (G.comp g₂ g₄)
lemma g₁ g₂ g₃ g₄ =
(g₁ □ g₂) □ (g₃ □ g₄)
=⟨ G.assoc g₁ g₂ (g₃ □ g₄) ⟩
g₁ □ (g₂ □ (g₃ □ g₄))
=⟨ G-abelian g₃ g₄ |in-ctx (λ w → g₁ □ (g₂ □ w)) ⟩
g₁ □ (g₂ □ (g₄ □ g₃))
=⟨ ! (G.assoc g₂ g₄ g₃) |in-ctx (λ w → g₁ □ w) ⟩
g₁ □ ((g₂ □ g₄) □ g₃)
=⟨ G-abelian (g₂ □ g₄) g₃ |in-ctx (λ w → g₁ □ w) ⟩
g₁ □ (g₃ □ (g₂ □ g₄))
=⟨ ! (G.assoc g₁ g₃ (g₂ □ g₄)) ⟩
(g₁ □ g₃) □ (g₂ □ g₄) ∎
where _□_ = G.comp
×ᴳ-sum-hom : (H →ᴳ G) → (K →ᴳ G) → (H ×ᴳ K →ᴳ G)
×ᴳ-sum-hom φ ψ = record {
f = λ {(h , k) → G.comp (φ.f h) (ψ.f k)};
pres-comp = λ {(h₁ , k₁) (h₂ , k₂) →
G.comp (φ.f (H.comp h₁ h₂)) (ψ.f (K.comp k₁ k₂))
=⟨ φ.pres-comp h₁ h₂ |in-ctx (λ w → G.comp w (ψ.f (K.comp k₁ k₂))) ⟩
G.comp (G.comp (φ.f h₁) (φ.f h₂)) (ψ.f (K.comp k₁ k₂))
=⟨ ψ.pres-comp k₁ k₂
|in-ctx (λ w → G.comp (G.comp (φ.f h₁) (φ.f h₂)) w) ⟩
G.comp (G.comp (φ.f h₁) (φ.f h₂)) (G.comp (ψ.f k₁) (ψ.f k₂))
=⟨ lemma (φ.f h₁) (φ.f h₂) (ψ.f k₁) (ψ.f k₂) ⟩
G.comp (G.comp (φ.f h₁) (ψ.f k₁)) (G.comp (φ.f h₂) (ψ.f k₂)) ∎}}
where
module φ = GroupHom φ
module ψ = GroupHom ψ
abstract
×ᴳ-sum-hom-η : ∀ {i j} (G : Group i) (H : Group j)
(aGH : is-abelian (G ×ᴳ H))
→ idhom (G ×ᴳ H) == ×ᴳ-sum-hom aGH (×ᴳ-inl {G = G}) (×ᴳ-inr {G = G})
×ᴳ-sum-hom-η G H aGH = hom= _ _ $ λ= $ λ {(g , h) →
! (pair×= (Group.unitr G g) (Group.unitl H h))}
∘-×ᴳ-sum-hom : ∀ {i j k l}
{G : Group i} {H : Group j} {K : Group k} {L : Group l}
(aK : is-abelian K) (aL : is-abelian L)
(φ : K →ᴳ L) (ψ : G →ᴳ K) (χ : H →ᴳ K)
→ ×ᴳ-sum-hom aL (φ ∘ᴳ ψ) (φ ∘ᴳ χ) == φ ∘ᴳ (×ᴳ-sum-hom aK ψ χ)
∘-×ᴳ-sum-hom aK aL φ ψ χ = hom= _ _ $ λ= $ λ {(g , h) →
! (GroupHom.pres-comp φ (GroupHom.f ψ g) (GroupHom.f χ h))}
{- define a homomorphism G₁ × G₂ → H₁ × H₂ from homomorphisms
- G₁ → H₁ and G₂ → H₂ -}
×ᴳ-parallel-hom : ∀ {i j k l} {G₁ : Group i} {G₂ : Group j}
{H₁ : Group k} {H₂ : Group l}
→ (G₁ →ᴳ H₁) → (G₂ →ᴳ H₂) → (G₁ ×ᴳ G₂ →ᴳ H₁ ×ᴳ H₂)
×ᴳ-parallel-hom φ ψ = record {
f = λ {(h₁ , h₂) → (φ.f h₁ , ψ.f h₂)};
pres-comp = λ {(h₁ , h₂) (h₁' , h₂') →
pair×= (φ.pres-comp h₁ h₁') (ψ.pres-comp h₂ h₂')}}
where
module φ = GroupHom φ
module ψ = GroupHom ψ
{- 0ᴳ is a unit for product -}
×ᴳ-unit-l : ∀ {i} {G : Group i} → 0ᴳ {i} ×ᴳ G == G
×ᴳ-unit-l = group-ua
(×ᴳ-snd {G = 0ᴳ} ,
is-eq snd (λ g → (lift unit , g)) (λ _ → idp) (λ _ → idp))
×ᴳ-unit-r : ∀ {i} {G : Group i} → G ×ᴳ 0ᴳ {i} == G
×ᴳ-unit-r = group-ua
(×ᴳ-fst , (is-eq fst (λ g → (g , lift unit)) (λ _ → idp) (λ _ → idp)))
{- A product Πᴳ indexed by Bool is the same as a binary product -}
module _ {i} (Pick : Lift {j = i} Bool → Group i) where
Πᴳ-Bool-is-×ᴳ :
Πᴳ (Lift Bool) Pick == (Pick (lift true)) ×ᴳ (Pick (lift false))
Πᴳ-Bool-is-×ᴳ = group-ua (φ , e)
where
φ = ×ᴳ-hom-in (Πᴳ-proj {F = Pick} (lift true))
(Πᴳ-proj {F = Pick} (lift false))
e : is-equiv (GroupHom.f φ)
e = is-eq _ (λ {(g , h) → λ {(lift true) → g; (lift false) → h}})
(λ _ → idp)
(λ _ → λ= (λ {(lift true) → idp; (lift false) → idp}))
{- Commutativity of ×ᴳ -}
×ᴳ-comm : ∀ {i j} (H : Group i) (K : Group j) → H ×ᴳ K ≃ᴳ K ×ᴳ H
×ᴳ-comm H K =
(record {
f = λ {(h , k) → (k , h)};
pres-comp = λ _ _ → idp} ,
snd (equiv _ (λ {(k , h) → (h , k)}) (λ _ → idp) (λ _ → idp)))
{- Associativity of ×ᴳ -}
×ᴳ-assoc : ∀ {i j k} (G : Group i) (H : Group j) (K : Group k)
→ ((G ×ᴳ H) ×ᴳ K) == (G ×ᴳ (H ×ᴳ K))
×ᴳ-assoc G H K = group-ua
(record {
f = λ {((g , h) , k) → (g , (h , k))};
pres-comp = λ _ _ → idp} ,
snd (equiv _ (λ {(g , (h , k)) → ((g , h) , k)}) (λ _ → idp) (λ _ → idp)))
module _ {i} where
_^ᴳ_ : Group i → ℕ → Group i
H ^ᴳ O = 0ᴳ
H ^ᴳ (S n) = H ×ᴳ (H ^ᴳ n)
^ᴳ-sum : (H : Group i) (m n : ℕ) → (H ^ᴳ m) ×ᴳ (H ^ᴳ n) == H ^ᴳ (m + n)
^ᴳ-sum H O n = ×ᴳ-unit-l {G = H ^ᴳ n}
^ᴳ-sum H (S m) n =
×ᴳ-assoc H (H ^ᴳ m) (H ^ᴳ n) ∙ ap (λ K → H ×ᴳ K) (^ᴳ-sum H m n)
|
### 主成分分析の例
### - 県別の生活環境に関するデータ
## パッケージの読み込み
require(MASS)
require(tidyverse)
require(ggfortify)
require(GGally)
## データの読み込み ("jpamenity.csv"を用いる)
raw <- read.csv(file="data/jpamenity.csv") # データの読み込み
scan(file="data/jpamenity.txt",what=character(),sep=";") # 説明の表示
## データの整形
mydata <- raw[-1,-c(1,2)] # 不要な行・列を削除
names(mydata) <- names(read.csv("data/jpamenityitem.csv")) # 変数名の略記
rownames(mydata) <- raw[-1,1] # 各行の名前を県名
areaname <- c("北海道","東北","関東","中部","近畿","中国","四国","九州")
area <- rep(areaname,c(1,6,7,9,7,5,4,8))
## データの内容を表示
## print(mydata) # 全データの表示
head(mydata) # 最初の6個を表示
## tail(mydata) # 最後の6個を表示
## データの散布図 (一部項目のみ): 図(a)
item <- c(1,7,8,18,19,20)
## print(names(mydata)[item])
ggscatmat(data.frame(mydata,area),
columns=item, color="area", alpha=.5) +
theme(text=element_text(family="HiraMaruProN-W4"))
## ## ggparis を用いる場合 (legendが付かない)
## ggpairs(data.frame(mydata,area),
## columns=item, mapping=aes(colour=area)) +
## theme(text=element_text(family="HiraMaruProN-W4"))
## 主成分分析
model <-princomp(mydata,cor=TRUE)
## model <-prcomp(mydata,scale.=TRUE) # prcompを使う場合
## 分析結果を表示
print(model)
## 寄与率 (正規化あり): 図(b)
plot(model)
## 主成分得点 (scale=1) [既定値]: 図(c)
autoplot(model, data=mydata, shape=FALSE,
label=TRUE, label.family="HiraMaruProN-W4", label.size=3,
loadings=TRUE, loadings.colour="blue",
loadings.label=TRUE, loadings.label.family="HiraMaruProN-W4",
loadings.label.size=4, loadings.label.colour="blue",
main="県別の生活環境") +
theme(text=element_text(family="HiraMaruProN-W4"))
## 中心部の拡大表示: 図(d)
autoplot(model, data=mydata, shape=FALSE,
xlim=c(-.3,.3), ylim=c(-.3,.3),
label=TRUE, label.family="HiraMaruProN-W4", label.size=3,
loadings=TRUE, loadings.colour="blue",
loadings.label=TRUE, loadings.label.family="HiraMaruProN-W4",
loadings.label.size=4, loadings.label.colour="blue",
main="県別の生活環境 (中心を拡大)") +
theme(text=element_text(family="HiraMaruProN-W4"))
## 主成分得点 (scale=0): 図(e)
autoplot(model, data=mydata, scale=0, shape=FALSE,
label=TRUE, label.family="HiraMaruProN-W4", label.size=3,
loadings=TRUE, loadings.colour="blue",
loadings.label=TRUE, loadings.label.family="HiraMaruProN-W4",
loadings.label.size=4, loadings.label.colour="blue",
main="scale=0での表示") +
theme(text=element_text(family="HiraMaruProN-W4"))
## 主成分得点 (scale=1/2): 図(f)
autoplot(model, data=mydata, scale=1/2, shape=FALSE,
label=TRUE, label.family="HiraMaruProN-W4", label.size=3,
loadings=TRUE, loadings.colour="blue",
loadings.label=TRUE, loadings.label.family="HiraMaruProN-W4",
loadings.label.size=4, loadings.label.colour="blue",
main="scale=1/2での表示") +
theme(text=element_text(family="HiraMaruProN-W4"))
|
const FrontalPole = BilateralStructure("frontal pole")
const FrontopolarCortex = BilateralStructure("frontopolar cortex")
const SuperiorFrontalGyrus = BilateralStructure("superior frontal gyrus")
const MiddleFrontalGyrus = BilateralStructure("middle frontal gyrus")
const RostralMiddleFrontal = BilateralStructure("rostral middle frontal gyrus")
const CaudalMiddleFrontalGyrus = BilateralStructure("caudal middle frontal gyrus")
const ParsOrbitalis = BilateralStructure("pars orbitalis")
const ParsTriangularis = BilateralStructure("pars triangularis")
const ParsOpercularis = BilateralStructure("pars opercularis")
const InferiorFrontalGyrus = BilateralStructure("inferior frontal gyrus")
const PrecentralGyrus = BilateralStructure("precentral gyrus")
const MedialFrontalGyrus = BilateralStructure("medial frontal gyrus")
# update SupplementaryMotorArea name
const ParacentralLobule = BilateralStructure("paracentral lobule")
const OrbitalGyrus = BilateralStructure("orbital gyrus")
const LateralOrbitofrontalGyrus = BilateralStructure("lateral_orbitofrontal gyrus")
const MedialOrbitofrontalGyrus = BilateralStructure("lateral orbitofrontal gyrus")
const FrontoMarginalGyrus = BilateralStructure("fronto-marginal gyrus")
const GyrusRectus = BilateralStructure("gyrus rectus")
const RostralGyrus = BilateralStructure("rostral gyrus")
const FrontalLobe = BilateralStructure("frontal lobe")
#const FrontoMarginalSulcus = Sulcus("fronto-marginal_sulcus")
|
<div style="border: 1px solid #C1E1FF; padding: 20px; background-color: #F3F6FF; border-radius:5px;">
<center>
<h1>Detección de cáncer de mama utilizando `sklearn`</h1><br/><br/>
<span style="color:#222"><b>Diego García Morate<b></span><br/>
<span style="color:#222">diegogm at unlimiteck.com</span><br/>
<br/>
<span style="color:#666">Meetup Data Science with Python</span><br/>
<span style="color:#888">Miércoles, 7 de Marzo de 2018 @ TheCubeMadrid</span><br/>
</center>
</div>
<div style="background-color:#009fdf; color:white; padding:30px; border-radius:5px; font-size:28px; font-weight:500;">
<span>Los problemas de clasificación</span>
</div>
Imaginate que tenemos un dataset con datos de distintos sensores de un coche (temperatura del motor, nivel de aceite...), y queremos saber a partir de esos sensores si el coche está funcionando bien o mal. ¿Cómo podríamos hacerlo?.
La manera tradicional consiste en crear una serie de reglas que relacionen los distintos sensores y que definan que rangos de funcionamiento son buenos o malos. De esta manera podríamos definir si el funcionamiento es `bueno` o `malo`.
Cada una de las reglas podría ser de este estilo:
<pre style="background-color:#efefef; padding: 20px;">
si (temp_motor > 100ºC) y (nivel_aceite < 30) entonces estado malo
si (temp_motor > 30ºC) y (nivel_aceite > 30) entonces estado bueno
</pre>
El problema de este enfoque es que nos obliga a conocer las relaciones que existen entre cada una de las variables y esto puede ser muy complicado.
La premisa de la que parte el aprendizaje automático es la siguiente: **¿Podríamos a partir de un conjunto de datos ya etiquetados generar automáticamente esas `reglas`?**
La respuesta es sí, y eso es una de los principales problemas a resolver en Machine Learning.
Antes de empezar:
* Llamaremos a esas `reglas` que queremos generar: **`modelo`**.
* Llamaremos al conjunto de datos previamente etiquetados: `conjunto de entrenamiento` o `dataset`.
* A cada uno de los casos individuales a evaluar lo llamaremos `instancia`.
<div style="background-color:#009fdf; color:white; padding:30px; border-radius:5px; font-size:28px; font-weight:500;">
<span>Objetivo de este challenge</span>
</div>
El objetivo de este challenge es entrenar un **modelo** capaz de determinar de forma automática si un tumor es `benigno` o `maligno`.
De paso, aprenderemos algunas técnicas de evaluación algoritmos de clasificación.
### Configuración del entorno
Antes de empezar vamos a cargar unos cuantos paquetes de python. Entre ellos usaremos `sklearn` como biblioteca de ML sobre la cual entrenar nuestro modelo.
```python
%matplotlib inline
import warnings
warnings.filterwarnings("ignore")
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import sklearn
from sklearn.metrics import confusion_matrix
from sklearn.metrics import precision_score, recall_score
```
<div style="background-color:#009fdf; color:white; padding:30px; border-radius:5px; font-size:28px; font-weight:500;">
<span>Dataset</span>
</div>
En este caso vamos a utilizar un dataset real de análisis de cáncer de mama del Breast Cancer Center de Wisconsin. Este dataset está disponible en `sklearn` así que cargarlo es tan fácil como:
```python
from sklearn import datasets
dataset = sklearn.datasets.load_breast_cancer()
```
Como siempre, una vez cargado debemos inspeccionar y comprender el dataset:
```python
print(dataset.keys())
```
dict_keys(['data', 'target', 'target_names', 'DESCR', 'feature_names'])
```python
print(dataset.DESCR)
```
Breast Cancer Wisconsin (Diagnostic) Database
=============================================
Notes
-----
Data Set Characteristics:
:Number of Instances: 569
:Number of Attributes: 30 numeric, predictive attributes and the class
:Attribute Information:
- radius (mean of distances from center to points on the perimeter)
- texture (standard deviation of gray-scale values)
- perimeter
- area
- smoothness (local variation in radius lengths)
- compactness (perimeter^2 / area - 1.0)
- concavity (severity of concave portions of the contour)
- concave points (number of concave portions of the contour)
- symmetry
- fractal dimension ("coastline approximation" - 1)
The mean, standard error, and "worst" or largest (mean of the three
largest values) of these features were computed for each image,
resulting in 30 features. For instance, field 3 is Mean Radius, field
13 is Radius SE, field 23 is Worst Radius.
- class:
- WDBC-Malignant
- WDBC-Benign
:Summary Statistics:
===================================== ====== ======
Min Max
===================================== ====== ======
radius (mean): 6.981 28.11
texture (mean): 9.71 39.28
perimeter (mean): 43.79 188.5
area (mean): 143.5 2501.0
smoothness (mean): 0.053 0.163
compactness (mean): 0.019 0.345
concavity (mean): 0.0 0.427
concave points (mean): 0.0 0.201
symmetry (mean): 0.106 0.304
fractal dimension (mean): 0.05 0.097
radius (standard error): 0.112 2.873
texture (standard error): 0.36 4.885
perimeter (standard error): 0.757 21.98
area (standard error): 6.802 542.2
smoothness (standard error): 0.002 0.031
compactness (standard error): 0.002 0.135
concavity (standard error): 0.0 0.396
concave points (standard error): 0.0 0.053
symmetry (standard error): 0.008 0.079
fractal dimension (standard error): 0.001 0.03
radius (worst): 7.93 36.04
texture (worst): 12.02 49.54
perimeter (worst): 50.41 251.2
area (worst): 185.2 4254.0
smoothness (worst): 0.071 0.223
compactness (worst): 0.027 1.058
concavity (worst): 0.0 1.252
concave points (worst): 0.0 0.291
symmetry (worst): 0.156 0.664
fractal dimension (worst): 0.055 0.208
===================================== ====== ======
:Missing Attribute Values: None
:Class Distribution: 212 - Malignant, 357 - Benign
:Creator: Dr. William H. Wolberg, W. Nick Street, Olvi L. Mangasarian
:Donor: Nick Street
:Date: November, 1995
This is a copy of UCI ML Breast Cancer Wisconsin (Diagnostic) datasets.
https://goo.gl/U2Uwz2
Features are computed from a digitized image of a fine needle
aspirate (FNA) of a breast mass. They describe
characteristics of the cell nuclei present in the image.
Separating plane described above was obtained using
Multisurface Method-Tree (MSM-T) [K. P. Bennett, "Decision Tree
Construction Via Linear Programming." Proceedings of the 4th
Midwest Artificial Intelligence and Cognitive Science Society,
pp. 97-101, 1992], a classification method which uses linear
programming to construct a decision tree. Relevant features
were selected using an exhaustive search in the space of 1-4
features and 1-3 separating planes.
The actual linear program used to obtain the separating plane
in the 3-dimensional space is that described in:
[K. P. Bennett and O. L. Mangasarian: "Robust Linear
Programming Discrimination of Two Linearly Inseparable Sets",
Optimization Methods and Software 1, 1992, 23-34].
This database is also available through the UW CS ftp server:
ftp ftp.cs.wisc.edu
cd math-prog/cpo-dataset/machine-learn/WDBC/
References
----------
- W.N. Street, W.H. Wolberg and O.L. Mangasarian. Nuclear feature extraction
for breast tumor diagnosis. IS&T/SPIE 1993 International Symposium on
Electronic Imaging: Science and Technology, volume 1905, pages 861-870,
San Jose, CA, 1993.
- O.L. Mangasarian, W.N. Street and W.H. Wolberg. Breast cancer diagnosis and
prognosis via linear programming. Operations Research, 43(4), pages 570-577,
July-August 1995.
- W.H. Wolberg, W.N. Street, and O.L. Mangasarian. Machine learning techniques
to diagnose breast cancer from fine-needle aspirates. Cancer Letters 77 (1994)
163-171.
Como se indica en la descripción del dataset, éste consta de 569 instancias con 30 atributos numéricos cada uno.
```python
dataset_X = dataset.data
```
```python
dataset_X.shape
```
(569, 30)
```python
dataset_X[0]
```
array([ 1.79900000e+01, 1.03800000e+01, 1.22800000e+02,
1.00100000e+03, 1.18400000e-01, 2.77600000e-01,
3.00100000e-01, 1.47100000e-01, 2.41900000e-01,
7.87100000e-02, 1.09500000e+00, 9.05300000e-01,
8.58900000e+00, 1.53400000e+02, 6.39900000e-03,
4.90400000e-02, 5.37300000e-02, 1.58700000e-02,
3.00300000e-02, 6.19300000e-03, 2.53800000e+01,
1.73300000e+01, 1.84600000e+02, 2.01900000e+03,
1.62200000e-01, 6.65600000e-01, 7.11900000e-01,
2.65400000e-01, 4.60100000e-01, 1.18900000e-01])
Este dataset clasifica los casos en cáncer maligno ($0$) o benigno ($1$).
```python
print(dataset.target_names)
```
['malignant' 'benign']
```python
dataset_y = dataset.target
```
```python
dataset_y.shape
```
(569,)
```python
print(dataset_y)
```
[0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 1 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
1 0 0 0 0 0 0 0 0 1 0 1 1 1 1 1 0 0 1 0 0 1 1 1 1 0 1 0 0 1 1 1 1 0 1 0 0
1 0 1 0 0 1 1 1 0 0 1 0 0 0 1 1 1 0 1 1 0 0 1 1 1 0 0 1 1 1 1 0 1 1 0 1 1
1 1 1 1 1 1 0 0 0 1 0 0 1 1 1 0 0 1 0 1 0 0 1 0 0 1 1 0 1 1 0 1 1 1 1 0 1
1 1 1 1 1 1 1 1 0 1 1 1 1 0 0 1 0 1 1 0 0 1 1 0 0 1 1 1 1 0 1 1 0 0 0 1 0
1 0 1 1 1 0 1 1 0 0 1 0 0 0 0 1 0 0 0 1 0 1 0 1 1 0 1 0 0 0 0 1 1 0 0 1 1
1 0 1 1 1 1 1 0 0 1 1 0 1 1 0 0 1 0 1 1 1 1 0 1 1 1 1 1 0 1 0 0 0 0 0 0 0
0 0 0 0 0 0 0 1 1 1 1 1 1 0 1 0 1 1 0 1 1 0 1 0 0 1 1 1 1 1 1 1 1 1 1 1 1
1 0 1 1 0 1 0 1 1 1 1 1 1 1 1 1 1 1 1 1 1 0 1 1 1 0 1 0 1 1 1 1 0 0 0 1 1
1 1 0 1 0 1 0 1 1 1 0 1 1 1 1 1 1 1 0 0 0 1 1 1 1 1 1 1 1 1 1 1 0 0 1 0 0
0 1 0 0 1 1 1 1 1 0 1 1 1 1 1 0 1 1 1 0 1 1 0 0 1 1 1 1 1 1 0 1 1 1 1 1 1
1 0 1 1 1 1 1 0 1 1 0 1 1 1 1 1 1 1 1 1 1 1 1 0 1 0 0 1 0 1 1 1 1 1 0 1 1
0 1 0 1 1 0 1 0 1 1 1 1 1 1 1 1 0 0 1 1 1 1 1 1 0 1 1 1 1 1 1 1 1 1 1 0 1
1 1 1 1 1 1 0 1 0 1 1 0 1 1 1 1 1 0 0 1 0 1 0 1 1 1 1 1 0 1 1 0 1 0 1 0 0
1 1 1 0 1 1 1 1 1 1 1 1 1 1 1 0 1 0 0 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1
1 1 1 1 1 1 1 0 0 0 0 0 0 1]
### Preguntas sobre el dataset
#### ¿Está balanceado o desbalanceado?
Un dataset se denomina balanceado cuando el número de instancias de cada clase es similar.
```python
np.bincount(dataset.target)
```
array([212, 357])
Como podemos ver hay 212 casos malignos y 357 benignos.
```python
np.bincount(dataset.target)[0] / dataset.target.shape[0]
```
0.37258347978910367
El 37% de los casos son malignos y el 63% son benignos, el dataset está desbalanceado pero no mucho.
#### ¿Es suficientemente extenso para entrenar un modelo?
Tiene 569 instancias, con 30 atributos cada una.
* Si cada atributo fuera una variable binaria totalmente independiente necesitaríamos $2^{30}$ = 1.000 millones de instancias para recubrir todas las posibles combinaciones
No existe una respuesta fácil a esta pregunta, dependerá de la redundancia interna entre variables, la relación entre las mismas y la aportación de cada variable a la variable objetivo.
<div style="background-color:#009fdf; color:white; padding:30px; border-radius:5px; font-size:28px; font-weight:500;">
<span>Métricas de evaluación</span>
</div>
Ya tenemos cargado el dataset y, antes de entrenar un modelo, tenemos que definir una **función objetivo**. En este caso lo que tenemos que hacer es establecer una métrica de evaluación que generalmente tendremos que maximizar.
La métrica más sencilla es el porcentaje acierto:
$$\%_{acierto} = \frac{{Casos \space bien \space clasificados}}{{Casos \space totales}}$$
### ¿Cual sería el clasificador más sencillo con mejor % de acierto que se te ocurre? ¿es útil en la práctica?
#### a) Moneda al aire
Una moneda al aire acertaría el 50% de los casos positivos y 50% de los negativos. Es decir:
$$
\begin{align}
P(acertar) &= 50\% * P(Malignos) + 50\% * P(Benignos) \\
P(acertar) &= 50\% * 37\% + 50\% * 63\% \\
P(acertar) &= 18,5\% + 31,5\% \\
P(acertar) &= 50\%
\end{align}
$$
#### b) Quedarse siempre con la clase más frecuente
Clasificar todos los casos como benignos que es la clase más frecuente (63%)
```python
from sklearn.dummy import DummyClassifier
dc_most_frequent = DummyClassifier(strategy='most_frequent', random_state=None)
```
```python
dc_most_frequent.fit(dataset_X, dataset_y)
dc_most_frequent.score(dataset_X, dataset_y)
```
0.62741652021089633
### ¿Estos clasificadores son útiles en la práctica?
No mucho, porque en el fondo el incremento de información que proporciona este clasificador sobre el problema es 0.
Aunque son útiles para derterminar el porcentaje de acierto mínimo que debería cumplir un clasificador. En este caso todo lo que esté por debajo de un 63% no resulta muy interesante.
<div style="background-color:#009fdf; color:white; padding:30px; border-radius:5px; font-size:28px; font-weight:500;">
<span>Conjunto de entrenamiento y test</span>
</div>
Algo fundamental cuando entrenamos un modelo es hacer una división del conjunto de entrenamiento y de test. No tendría sentido entrenar el modelo con los mismos datos que luego a vamos a utilizar en su evaluación..
Podemos partir el dataset en conjunto de entrenamiento y test con `sklearn` con la siguiente función:
```python
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(dataset_X, dataset_y, test_size=.3, random_state=42)
```
```python
X_train.shape
```
(398, 30)
```python
X_test.shape
```
(171, 30)
<div style="background-color:#009fdf; color:white; padding:30px; border-radius:5px; font-size:28px; font-weight:500;">
<span>Entrena un modelo</span>
</div>
Una vez que tenemos el conjunto de entrenamiento y test vamos a entrenar el modelo. En este caso vamos a utilizar un algoritmo k-vecinos (`KNeighborsClassifier`) como clasificador.
Podemos instanciar este modelo con `sklearn` de la siguiente forma:
```python
from sklearn.neighbors import KNeighborsClassifier
knn = KNeighborsClassifier()
```
Una vez instanciado, lo podemos entrenar con la función `fit` utilizando el conjunto de entrenamiento:
```python
knn.fit(X_train, y_train)
```
KNeighborsClassifier(algorithm='auto', leaf_size=30, metric='minkowski',
metric_params=None, n_jobs=1, n_neighbors=5, p=2,
weights='uniform')
Una vez entrenado lo podemos utilizar para predecir las instancias del conjunto de test.
```python
y_predicted = knn.predict(X_test)
```
```python
y_predicted
```
array([1, 0, 0, 1, 1, 0, 0, 0, 1, 1, 1, 0, 1, 1, 1, 0, 1, 1, 1, 0, 1, 1, 0,
1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 0, 1, 0, 1, 1, 0, 1, 1, 1, 1,
1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 0, 1, 1, 1, 0, 0, 1, 1, 1, 0, 0, 1,
1, 1, 0, 1, 1, 1, 1, 1, 0, 1, 1, 0, 1, 1, 0, 0, 0, 0, 0, 1, 1, 1, 1,
1, 1, 1, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 1, 1, 0, 1, 1, 0, 1, 1, 0, 1,
0, 1, 1, 1, 0, 1, 1, 1, 0, 1, 0, 0, 1, 1, 0, 0, 0, 1, 1, 0, 0, 1, 1,
1, 0, 1, 0, 1, 1, 0, 1, 0, 0, 0, 1, 0, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1,
1, 1, 1, 0, 1, 1, 1, 1, 0, 1])
Y podemos calcular el porcentaje de acierto de la siguiente manera:
```python
(knn.predict(X_test) == y_test).mean()
```
0.95906432748538006
También existe la función `score` que nos devuelve automáticamente el porcentaje de acierto:
```python
knn.score(X_test, y_test)
```
0.95906432748538006
El porcentaje de acierto es un 95%. **¿Es bueno o malo?**
<div style="background-color:#009fdf; color:white; padding:30px; border-radius:5px; font-size:28px; font-weight:500;">
<span>Evaluación de resultados</span>
</div>
## Matriz de confusión
Aunque el porcentaje de acierto es sencillo de entender muestra una realidad muy parcial del problema.
Imaginate que en nuestro ejemplo del automóvil tuviéramos un 83% de acierto. Podría ocurrir que nuestro modelo clasificara bien todos los casos en los que el coche estuviera bien, pero ninguno en los que estuviera mal. Podemos tener un buen porcentaje de acierto, y que el clasificador funcione mal.
Un método un poco más complejo, pero muy efectivo es utilizar la `matriz de confusión` esta matriz nos indica por cada categoría a clasificar (lo que se denomina `clase`) cuantos casos han sido clasificados bien y cuantos mal.
```python
from sklearn.metrics import confusion_matrix
confusion_matrix(y_test, y_predicted)
```
array([[ 57, 6],
[ 1, 107]])
Podemos dibujar la matriz de confusión con `sklearn` y el paquete `scikitplot` de la siguiente manera:
```python
import scikitplot as skplt
skplt.metrics.plot_confusion_matrix(y_test, y_predicted)
```
### Si tengo un paciente con un cáncer maligno (0), ¿qué probabilidad hay de que el modelo lo clasifique como cáncer maligno (0)?
La probabilidad de que lo clasifique bien es:
```python
57 / (57+6)
```
0.9047619047619048
### Si tengo un paciente con un cáncer benigno (1), ¿qué probabilidad hay de que el modelo lo clasifique como cáncer benigno (1)?
```python
107 / (107 + 1)
```
0.9907407407407407
En este caso esta medida se conoce como `recall`, también lo podemos calcular como:
```python
recall_score(y_test, y_predicted)
```
0.9907407407407407
```python
skplt.metrics.plot_confusion_matrix(y_test, y_predicted)
```
### Si tengo un paciente con un cáncer maligno (0), ¿qué probabilidad hay de que el modelo lo clasifique como cáncer benigno (1)?
```python
6 / (57+6)
```
### Si tengo un paciente con un cáncer benigno (1), ¿qué probabilidad hay de que el modelo lo clasifique como cáncer maligno (0)?
```python
1 / (107+1)
```
<div style="background-color:#009fdf; color:white; padding:30px; border-radius:5px; font-size:28px; font-weight:500;">
<span>Conclusiones</span>
</div>
## ¿Qué podrías concluir del clasificador?
Está bien, pero sería mejor que aumentara el % de acierto de los cánceres malignos. Porque a esa gente es mejor identificarla y no fallar nunca en su identificación.
A los falsos positivos les puedo repetir la prueba, pero a los falsos negativos no.
## ¿Se te ocurre alguna manera de mejorarlo?
* Utilizar modelos más potentes
* Ampliar el dataset
* Penalizar al modelo los falsos negativos con una matriz de coste
## ¿Es útil este modelo en la práctica?
Habría que contrastar los resultados con los clasificadores actuales (médicos) y también habría que identificar y cuantificar el coste de la extracción y preprocesamiento de los datos.
|
/-
Copyright (c) 2015 Robert Y. Lewis. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Author: Robert Y. Lewis
The real numbers, constructed as equivalence classes of Cauchy sequences of rationals.
This construction follows Bishop and Bridges (1985).
The construction of the reals is arranged in four files.
- basic.lean proves properties about regular sequences of rationals in the namespace rat_seq,
defines ℝ to be the quotient type of regular sequences mod equivalence, and shows ℝ is a ring
in namespace real. No classical axioms are used.
- order.lean defines an order on regular sequences and lifts the order to ℝ. In the namespace real,
ℝ is shown to be an ordered ring. No classical axioms are used.
- division.lean defines the inverse of a regular sequence and lifts this to ℝ. If a sequence is
equivalent to the 0 sequence, its inverse is the zero sequence. In the namespace real, ℝ is shown
to be an ordered field. This construction is classical.
- complete.lean
-/
import data.nat data.rat.order data.pnat
open nat eq pnat
open - [coercion] rat
local postfix `⁻¹` := pnat.inv
-- small helper lemmas
private theorem s_mul_assoc_lemma_3 (a b n : ℕ+) (p : ℚ) :
p * ((a * n)⁻¹ + (b * n)⁻¹) = p * (a⁻¹ + b⁻¹) * n⁻¹ :=
by rewrite [rat.mul_assoc, right_distrib, *pnat.inv_mul_eq_mul_inv]
private theorem s_mul_assoc_lemma_4 {n : ℕ+} {ε q : ℚ} (Hε : ε > 0) (Hq : q > 0)
(H : n ≥ pceil (q / ε)) :
q * n⁻¹ ≤ ε :=
begin
note H2 := pceil_helper H (div_pos_of_pos_of_pos Hq Hε),
note H3 := mul_le_of_le_div (div_pos_of_pos_of_pos Hq Hε) H2,
rewrite -(one_mul ε),
apply mul_le_mul_of_mul_div_le,
repeat assumption
end
private theorem find_thirds (a b : ℚ) (H : b > 0) : ∃ n : ℕ+, a + n⁻¹ + n⁻¹ + n⁻¹ < a + b :=
let n := pceil (of_nat 4 / b) in
have of_nat 3 * n⁻¹ < b, from calc
of_nat 3 * n⁻¹ < of_nat 4 * n⁻¹
: mul_lt_mul_of_pos_right dec_trivial !pnat.inv_pos
... ≤ of_nat 4 * (b / of_nat 4)
: mul_le_mul_of_nonneg_left (!inv_pceil_div dec_trivial H) !of_nat_nonneg
... = b / of_nat 4 * of_nat 4 : mul.comm
... = b : !div_mul_cancel dec_trivial,
exists.intro n (calc
a + n⁻¹ + n⁻¹ + n⁻¹ = a + (1 + 1 + 1) * n⁻¹ : by rewrite [+right_distrib, +rat.one_mul, -+add.assoc]
... = a + of_nat 3 * n⁻¹ : {show 1+1+1=of_nat 3, from dec_trivial}
... < a + b : rat.add_lt_add_left this a)
private theorem squeeze {a b : ℚ} (H : ∀ j : ℕ+, a ≤ b + j⁻¹ + j⁻¹ + j⁻¹) : a ≤ b :=
begin
apply le_of_not_gt,
intro Hb,
cases exists_add_lt_and_pos_of_lt Hb with [c, Hc],
cases find_thirds b c (and.right Hc) with [j, Hbj],
have Ha : a > b + j⁻¹ + j⁻¹ + j⁻¹, from lt.trans Hbj (and.left Hc),
apply (not_le_of_gt Ha) !H
end
private theorem rewrite_helper (a b c d : ℚ) : a * b - c * d = a * (b - d) + (a - c) * d :=
by rewrite [mul_sub_left_distrib, mul_sub_right_distrib, add_sub, sub_add_cancel]
private theorem rewrite_helper3 (a b c d e f g: ℚ) : a * (b + c) - (d * e + f * g) =
(a * b - d * e) + (a * c - f * g) :=
by rewrite [left_distrib, add_sub_comm]
private theorem rewrite_helper4 (a b c d : ℚ) : a * b - c * d = (a * b - a * d) + (a * d - c * d) :=
by rewrite[add_sub, sub_add_cancel]
private theorem rewrite_helper5 (a b x y : ℚ) : a - b = (a - x) + (x - y) + (y - b) :=
by rewrite[*add_sub, *sub_add_cancel]
private theorem rewrite_helper7 (a b c d x : ℚ) :
a * b * c - d = (b * c) * (a - x) + (x * b * c - d) :=
begin
have ∀ (a b c : ℚ), a * b * c = b * c * a,
begin
intros a b c,
rewrite (mul.right_comm b c a),
rewrite (mul.comm b a)
end,
rewrite [mul_sub_left_distrib, add_sub],
calc
a * b * c - d = a * b * c - x * b * c + x * b * c - d : sub_add_cancel
... = b * c * a - b * c * x + x * b * c - d :
begin
rewrite [this a b c, this x b c]
end
end
private theorem ineq_helper (a b : ℚ) (k m n : ℕ+) (H : a ≤ (k * 2 * m)⁻¹ + (k * 2 * n)⁻¹)
(H2 : b ≤ (k * 2 * m)⁻¹ + (k * 2 * n)⁻¹) :
(rat_of_pnat k) * a + b * (rat_of_pnat k) ≤ m⁻¹ + n⁻¹ :=
have H3 : (k * 2 * m)⁻¹ + (k * 2 * n)⁻¹ = (2 * k)⁻¹ * (m⁻¹ + n⁻¹),
begin
rewrite [left_distrib, *pnat.inv_mul_eq_mul_inv],
rewrite (mul.comm k⁻¹)
end,
have H' : a ≤ (2 * k)⁻¹ * (m⁻¹ + n⁻¹),
begin
rewrite H3 at H,
exact H
end,
have H2' : b ≤ (2 * k)⁻¹ * (m⁻¹ + n⁻¹),
begin
rewrite H3 at H2,
exact H2
end,
have a + b ≤ k⁻¹ * (m⁻¹ + n⁻¹), from calc
a + b ≤ (2 * k)⁻¹ * (m⁻¹ + n⁻¹) + (2 * k)⁻¹ * (m⁻¹ + n⁻¹) : add_le_add H' H2'
... = ((2 * k)⁻¹ + (2 * k)⁻¹) * (m⁻¹ + n⁻¹) : by rewrite right_distrib
... = k⁻¹ * (m⁻¹ + n⁻¹) : by rewrite (pnat.add_halves k),
calc (rat_of_pnat k) * a + b * (rat_of_pnat k)
= (rat_of_pnat k) * a + (rat_of_pnat k) * b : by rewrite (mul.comm b)
... = (rat_of_pnat k) * (a + b) : left_distrib
... ≤ (rat_of_pnat k) * (k⁻¹ * (m⁻¹ + n⁻¹)) :
iff.mp (!le_iff_mul_le_mul_left !rat_of_pnat_is_pos) this
... = m⁻¹ + n⁻¹ :
by rewrite[-mul.assoc, pnat.inv_cancel_left, one_mul]
private theorem factor_lemma (a b c d e : ℚ) : abs (a + b + c - (d + (b + e))) = abs ((a - d) + (c - e)) :=
!congr_arg (calc
a + b + c - (d + (b + e)) = a + b + c - (d + b + e) : rat.add_assoc
... = a + b - (d + b) + (c - e) : add_sub_comm
... = a + b - b - d + (c - e) : sub_add_eq_sub_sub_swap
... = a - d + (c - e) : add_sub_cancel)
private theorem factor_lemma_2 (a b c d : ℚ) : (a + b) + (c + d) = (a + c) + (d + b) :=
begin
note H := (binary.comm4 add.comm add.assoc a b c d),
rewrite [add.comm b d at H],
exact H
end
--------------------------------------
-- define cauchy sequences and equivalence. show equivalence actually is one
namespace rat_seq
notation `seq` := ℕ+ → ℚ
definition regular (s : seq) := ∀ m n : ℕ+, abs (s m - s n) ≤ m⁻¹ + n⁻¹
definition equiv (s t : seq) := ∀ n : ℕ+, abs (s n - t n) ≤ n⁻¹ + n⁻¹
infix `≡` := equiv
theorem equiv.refl (s : seq) : s ≡ s :=
begin
intros,
rewrite [sub_self, abs_zero],
apply add_invs_nonneg
end
theorem equiv.symm (s t : seq) (H : s ≡ t) : t ≡ s :=
begin
intros,
rewrite [-abs_neg, neg_sub],
exact H n
end
theorem bdd_of_eq {s t : seq} (H : s ≡ t) :
∀ j : ℕ+, ∀ n : ℕ+, n ≥ 2 * j → abs (s n - t n) ≤ j⁻¹ :=
begin
intros [j, n, Hn],
apply le.trans,
apply H,
rewrite -(pnat.add_halves j),
apply add_le_add,
apply inv_ge_of_le Hn,
apply inv_ge_of_le Hn
end
theorem eq_of_bdd {s t : seq} (Hs : regular s) (Ht : regular t)
(H : ∀ j : ℕ+, ∃ Nj : ℕ+, ∀ n : ℕ+, Nj ≤ n → abs (s n - t n) ≤ j⁻¹) : s ≡ t :=
begin
intros,
have Hj : (∀ j : ℕ+, abs (s n - t n) ≤ n⁻¹ + n⁻¹ + j⁻¹ + j⁻¹ + j⁻¹), begin
intros,
cases H j with [Nj, HNj],
rewrite [-(sub_add_cancel (s n) (s (max j Nj))), +sub_eq_add_neg,
add.assoc (s n + -s (max j Nj)), ↑regular at *],
apply rat.le_trans,
apply abs_add_le_abs_add_abs,
apply rat.le_trans,
apply add_le_add,
apply Hs,
rewrite [-(sub_add_cancel (s (max j Nj)) (t (max j Nj))), add.assoc],
apply abs_add_le_abs_add_abs,
apply rat.le_trans,
apply rat.add_le_add_left,
apply add_le_add,
apply HNj (max j Nj) (le_max_right j Nj),
apply Ht,
have hsimp : ∀ m : ℕ+, n⁻¹ + m⁻¹ + (j⁻¹ + (m⁻¹ + n⁻¹)) = n⁻¹ + n⁻¹ + j⁻¹ + (m⁻¹ + m⁻¹),
from λm, calc
n⁻¹ + m⁻¹ + (j⁻¹ + (m⁻¹ + n⁻¹)) = n⁻¹ + (j⁻¹ + (m⁻¹ + n⁻¹)) + m⁻¹ : add.right_comm
... = n⁻¹ + (j⁻¹ + m⁻¹ + n⁻¹) + m⁻¹ : add.assoc
... = n⁻¹ + (n⁻¹ + (j⁻¹ + m⁻¹)) + m⁻¹ : add.comm
... = n⁻¹ + n⁻¹ + j⁻¹ + (m⁻¹ + m⁻¹) :
by rewrite[-*add.assoc],
rewrite hsimp,
have Hms : (max j Nj)⁻¹ + (max j Nj)⁻¹ ≤ j⁻¹ + j⁻¹, begin
apply add_le_add,
apply inv_ge_of_le (le_max_left j Nj),
apply inv_ge_of_le (le_max_left j Nj),
end,
apply (calc
n⁻¹ + n⁻¹ + j⁻¹ + ((max j Nj)⁻¹ + (max j Nj)⁻¹) ≤ n⁻¹ + n⁻¹ + j⁻¹ + (j⁻¹ + j⁻¹) :
rat.add_le_add_left Hms
... = n⁻¹ + n⁻¹ + j⁻¹ + j⁻¹ + j⁻¹ : by rewrite *rat.add_assoc)
end,
apply squeeze Hj
end
theorem eq_of_bdd_var {s t : seq} (Hs : regular s) (Ht : regular t)
(H : ∀ ε : ℚ, ε > 0 → ∃ Nj : ℕ+, ∀ n : ℕ+, Nj ≤ n → abs (s n - t n) ≤ ε) : s ≡ t :=
begin
apply eq_of_bdd,
repeat assumption,
intros,
apply H,
apply pnat.inv_pos
end
theorem bdd_of_eq_var {s t : seq} (Hs : regular s) (Ht : regular t) (Heq : s ≡ t) :
∀ ε : ℚ, ε > 0 → ∃ Nj : ℕ+, ∀ n : ℕ+, Nj ≤ n → abs (s n - t n) ≤ ε :=
begin
intro ε Hε,
cases pnat_bound Hε with [N, HN],
existsi 2 * N,
intro n Hn,
apply rat.le_trans,
apply bdd_of_eq Heq N n Hn,
exact HN -- assumption -- TODO: something funny here; what is 11.source.to_has_le_2?
end
theorem equiv.trans (s t u : seq) (Hs : regular s) (Ht : regular t) (Hu : regular u)
(H : s ≡ t) (H2 : t ≡ u) : s ≡ u :=
begin
apply eq_of_bdd Hs Hu,
intros,
existsi 2 * (2 * j),
intro n Hn,
rewrite [-sub_add_cancel (s n) (t n), *sub_eq_add_neg, add.assoc],
apply rat.le_trans,
apply abs_add_le_abs_add_abs,
have Hst : abs (s n - t n) ≤ (2 * j)⁻¹, from bdd_of_eq H _ _ Hn,
have Htu : abs (t n - u n) ≤ (2 * j)⁻¹, from bdd_of_eq H2 _ _ Hn,
rewrite -(pnat.add_halves j),
apply add_le_add,
exact Hst, exact Htu
end
-----------------------------------
-- define operations on cauchy sequences. show operations preserve regularity
private definition K (s : seq) : ℕ+ := pnat.pos (ubound (abs (s pone)) + 1 + 1) dec_trivial
private theorem canon_bound {s : seq} (Hs : regular s) (n : ℕ+) : abs (s n) ≤ rat_of_pnat (K s) :=
calc
abs (s n) = abs (s n - s pone + s pone) : by rewrite sub_add_cancel
... ≤ abs (s n - s pone) + abs (s pone) : abs_add_le_abs_add_abs
... ≤ n⁻¹ + pone⁻¹ + abs (s pone) : add_le_add_right !Hs
... = n⁻¹ + (1 + abs (s pone)) : by rewrite [pone_inv, rat.add_assoc]
... ≤ 1 + (1 + abs (s pone)) : add_le_add_right (inv_le_one n)
... = abs (s pone) + (1 + 1) :
by rewrite [add.comm 1 (abs (s pone)), add.comm 1, rat.add_assoc]
... ≤ of_nat (ubound (abs (s pone))) + (1 + 1) : add_le_add_right (!ubound_ge)
... = of_nat (ubound (abs (s pone)) + (1 + 1)) : of_nat_add
... = of_nat (ubound (abs (s pone)) + 1 + 1) : add.assoc
... = rat_of_pnat (K s) : by esimp
theorem bdd_of_regular {s : seq} (H : regular s) : ∃ b : ℚ, ∀ n : ℕ+, s n ≤ b :=
begin
existsi rat_of_pnat (K s),
intros,
apply rat.le_trans,
apply le_abs_self,
apply canon_bound H
end
theorem bdd_of_regular_strict {s : seq} (H : regular s) : ∃ b : ℚ, ∀ n : ℕ+, s n < b :=
begin
cases bdd_of_regular H with [b, Hb],
existsi b + 1,
intro n,
apply rat.lt_of_le_of_lt,
apply Hb,
apply lt_add_of_pos_right,
apply zero_lt_one
end
definition K₂ (s t : seq) := max (K s) (K t)
private theorem K₂_symm (s t : seq) : K₂ s t = K₂ t s :=
!max.comm
theorem canon_2_bound_left (s t : seq) (Hs : regular s) (n : ℕ+) :
abs (s n) ≤ rat_of_pnat (K₂ s t) :=
calc
abs (s n) ≤ rat_of_pnat (K s) : canon_bound Hs n
... ≤ rat_of_pnat (K₂ s t) : rat_of_pnat_le_of_pnat_le (!le_max_left)
theorem canon_2_bound_right (s t : seq) (Ht : regular t) (n : ℕ+) :
abs (t n) ≤ rat_of_pnat (K₂ s t) :=
calc
abs (t n) ≤ rat_of_pnat (K t) : canon_bound Ht n
... ≤ rat_of_pnat (K₂ s t) : rat_of_pnat_le_of_pnat_le (!le_max_right)
definition sadd (s t : seq) : seq := λ n, (s (2 * n)) + (t (2 * n))
theorem reg_add_reg {s t : seq} (Hs : regular s) (Ht : regular t) : regular (sadd s t) :=
begin
rewrite [↑regular at *, ↑sadd],
intros,
rewrite add_sub_comm,
apply rat.le_trans,
apply abs_add_le_abs_add_abs,
rewrite add_halves_double,
apply add_le_add,
apply Hs,
apply Ht
end
definition smul (s t : seq) : seq := λ n : ℕ+, (s ((K₂ s t) * 2 * n)) * (t ((K₂ s t) * 2 * n))
theorem reg_mul_reg {s t : seq} (Hs : regular s) (Ht : regular t) : regular (smul s t) :=
begin
rewrite [↑regular at *, ↑smul],
intros,
rewrite rewrite_helper,
apply rat.le_trans,
apply abs_add_le_abs_add_abs,
apply rat.le_trans,
apply add_le_add,
rewrite abs_mul,
apply mul_le_mul_of_nonneg_right,
apply canon_2_bound_left s t Hs,
apply abs_nonneg,
rewrite abs_mul,
apply mul_le_mul_of_nonneg_left,
apply canon_2_bound_right s t Ht,
apply abs_nonneg,
apply ineq_helper,
apply Ht,
apply Hs
end
definition sneg (s : seq) : seq := λ n : ℕ+, - (s n)
theorem reg_neg_reg {s : seq} (Hs : regular s) : regular (sneg s) :=
begin
rewrite [↑regular at *, ↑sneg],
intros,
rewrite [-abs_neg, neg_sub, sub_neg_eq_add, add.comm],
apply Hs
end
-----------------------------------
-- show properties of +, *, -
definition zero : seq := λ n, 0
definition one : seq := λ n, 1
theorem s_add_comm (s t : seq) : sadd s t ≡ sadd t s :=
begin
esimp [sadd],
intro n,
rewrite [sub_add_eq_sub_sub, add_sub_cancel, sub_self, abs_zero],
apply add_invs_nonneg
end
theorem s_add_assoc (s t u : seq) (Hs : regular s) (Hu : regular u) :
sadd (sadd s t) u ≡ sadd s (sadd t u) :=
begin
rewrite [↑sadd, ↑equiv, ↑regular at *],
intros,
rewrite factor_lemma,
apply rat.le_trans,
apply abs_add_le_abs_add_abs,
apply rat.le_trans,
rotate 1,
apply add_le_add_right,
apply inv_two_mul_le_inv,
rewrite [-(pnat.add_halves (2 * n)), -(pnat.add_halves n), factor_lemma_2],
apply add_le_add,
apply Hs,
apply Hu
end
theorem s_mul_comm (s t : seq) : smul s t ≡ smul t s :=
begin
rewrite ↑smul,
intros n,
rewrite [*(K₂_symm s t), rat.mul_comm, sub_self, abs_zero],
apply add_invs_nonneg
end
private definition DK (s t : seq) := (K₂ s t) * 2
private theorem DK_rewrite (s t : seq) : (K₂ s t) * 2 = DK s t := rfl
private definition TK (s t u : seq) := (DK (λ (n : ℕ+), s (mul (DK s t) n) * t (mul (DK s t) n)) u)
private theorem TK_rewrite (s t u : seq) :
(DK (λ (n : ℕ+), s (mul (DK s t) n) * t (mul (DK s t) n)) u) = TK s t u := rfl
private theorem s_mul_assoc_lemma (s t u : seq) (a b c d : ℕ+) :
abs (s a * t a * u b - s c * t d * u d) ≤ abs (t a) * abs (u b) * abs (s a - s c) +
abs (s c) * abs (t a) * abs (u b - u d) + abs (s c) * abs (u d) * abs (t a - t d) :=
begin
rewrite (rewrite_helper7 _ _ _ _ (s c)),
apply rat.le_trans,
apply abs_add_le_abs_add_abs,
rewrite rat.add_assoc,
apply add_le_add,
rewrite 2 abs_mul,
apply le.refl,
rewrite [*rat.mul_assoc, -mul_sub_left_distrib, -left_distrib, abs_mul],
apply mul_le_mul_of_nonneg_left,
rewrite rewrite_helper,
apply le.trans,
apply abs_add_le_abs_add_abs,
apply add_le_add,
rewrite abs_mul, apply rat.le_refl,
rewrite [abs_mul, rat.mul_comm], apply rat.le_refl,
apply abs_nonneg
end
private definition Kq (s : seq) := rat_of_pnat (K s) + 1
private theorem Kq_bound {s : seq} (H : regular s) : ∀ n, abs (s n) ≤ Kq s :=
begin
intros,
apply le_of_lt,
apply lt_of_le_of_lt,
apply canon_bound H,
apply lt_add_of_pos_right,
apply zero_lt_one
end
private theorem Kq_bound_nonneg {s : seq} (H : regular s) : 0 ≤ Kq s :=
le.trans !abs_nonneg (Kq_bound H 2)
private theorem Kq_bound_pos {s : seq} (H : regular s) : 0 < Kq s :=
have H1 : 0 ≤ rat_of_pnat (K s), from rat.le_trans (!abs_nonneg) (canon_bound H 2),
add_pos_of_nonneg_of_pos H1 rat.zero_lt_one
private theorem s_mul_assoc_lemma_5 {s t u : seq} (Hs : regular s) (Ht : regular t) (Hu : regular u)
(a b c : ℕ+) : abs (t a) * abs (u b) * abs (s a - s c) ≤ (Kq t) * (Kq u) * (a⁻¹ + c⁻¹) :=
begin
repeat apply mul_le_mul,
apply Kq_bound Ht,
apply Kq_bound Hu,
apply abs_nonneg,
apply Kq_bound_nonneg Ht,
apply Hs,
apply abs_nonneg,
apply rat.mul_nonneg,
apply Kq_bound_nonneg Ht,
apply Kq_bound_nonneg Hu,
end
private theorem s_mul_assoc_lemma_2 {s t u : seq} (Hs : regular s) (Ht : regular t)
(Hu : regular u) (a b c d : ℕ+) :
abs (t a) * abs (u b) * abs (s a - s c) + abs (s c) * abs (t a) * abs (u b - u d)
+ abs (s c) * abs (u d) * abs (t a - t d) ≤
(Kq t) * (Kq u) * (a⁻¹ + c⁻¹) + (Kq s) * (Kq t) * (b⁻¹ + d⁻¹) + (Kq s) * (Kq u) * (a⁻¹ + d⁻¹) :=
begin
apply add_le_add_three,
repeat (assumption | apply mul_le_mul | apply Kq_bound | apply Kq_bound_nonneg |
apply abs_nonneg),
apply Hs,
apply abs_nonneg,
apply rat.mul_nonneg,
repeat (assumption | apply mul_le_mul | apply Kq_bound | apply Kq_bound_nonneg |
apply abs_nonneg),
apply Hu,
apply abs_nonneg,
apply rat.mul_nonneg,
repeat (assumption | apply mul_le_mul | apply Kq_bound | apply Kq_bound_nonneg |
apply abs_nonneg),
apply Ht,
apply abs_nonneg,
apply rat.mul_nonneg,
repeat (apply Kq_bound_nonneg; assumption)
end
theorem s_mul_assoc {s t u : seq} (Hs : regular s) (Ht : regular t) (Hu : regular u) :
smul (smul s t) u ≡ smul s (smul t u) :=
begin
apply eq_of_bdd_var,
repeat apply reg_mul_reg,
apply Hs,
apply Ht,
apply Hu,
apply reg_mul_reg Hs,
apply reg_mul_reg Ht Hu,
intros,
apply exists.intro,
intros,
rewrite [↑smul, *DK_rewrite, *TK_rewrite, -*pnat.mul_assoc, -*mul.assoc],
apply rat.le_trans,
apply s_mul_assoc_lemma,
apply rat.le_trans,
apply s_mul_assoc_lemma_2,
apply Hs,
apply Ht,
apply Hu,
rewrite [*s_mul_assoc_lemma_3, -distrib_three_right],
apply s_mul_assoc_lemma_4,
apply a,
repeat apply add_pos,
repeat apply mul_pos,
apply Kq_bound_pos Ht,
apply Kq_bound_pos Hu,
apply add_pos,
repeat apply pnat.inv_pos,
repeat apply rat.mul_pos,
apply Kq_bound_pos Hs,
apply Kq_bound_pos Ht,
apply add_pos,
repeat apply pnat.inv_pos,
repeat apply rat.mul_pos,
apply Kq_bound_pos Hs,
apply Kq_bound_pos Hu,
apply add_pos,
repeat apply pnat.inv_pos,
apply a_1
end
theorem zero_is_reg : regular zero :=
begin
rewrite [↑regular, ↑zero],
intros,
rewrite [sub_zero, abs_zero],
apply add_invs_nonneg
end
theorem s_zero_add (s : seq) (H : regular s) : sadd zero s ≡ s :=
begin
rewrite [↑sadd, ↑zero, ↑equiv, ↑regular at H],
intros,
rewrite [rat.zero_add],
apply rat.le_trans,
apply H,
apply add_le_add,
apply inv_two_mul_le_inv,
apply rat.le_refl
end
theorem s_add_zero (s : seq) (H : regular s) : sadd s zero ≡ s :=
begin
rewrite [↑sadd, ↑zero, ↑equiv, ↑regular at H],
intros,
rewrite [rat.add_zero],
apply rat.le_trans,
apply H,
apply add_le_add,
apply inv_two_mul_le_inv,
apply rat.le_refl
end
theorem s_neg_cancel (s : seq) (H : regular s) : sadd (sneg s) s ≡ zero :=
begin
rewrite [↑sadd, ↑sneg, ↑regular at H, ↑zero, ↑equiv],
intros,
rewrite [neg_add_eq_sub, sub_self, sub_zero, abs_zero],
apply add_invs_nonneg
end
theorem neg_s_cancel (s : seq) (H : regular s) : sadd s (sneg s) ≡ zero :=
begin
apply equiv.trans,
rotate 3,
apply s_add_comm,
apply s_neg_cancel s H,
repeat (apply reg_add_reg | apply reg_neg_reg | assumption),
apply zero_is_reg
end
theorem add_well_defined {s t u v : seq} (Hs : regular s) (Ht : regular t) (Hu : regular u)
(Hv : regular v) (Esu : s ≡ u) (Etv : t ≡ v) : sadd s t ≡ sadd u v :=
begin
rewrite [↑sadd, ↑equiv at *],
intros,
rewrite [add_sub_comm, add_halves_double],
apply rat.le_trans,
apply abs_add_le_abs_add_abs,
apply add_le_add,
apply Esu,
apply Etv
end
set_option tactic.goal_names false
private theorem mul_bound_helper {s t : seq} (Hs : regular s) (Ht : regular t) (a b c : ℕ+)
(j : ℕ+) :
∃ N : ℕ+, ∀ n : ℕ+, n ≥ N → abs (s (a * n) * t (b * n) - s (c * n) * t (c * n)) ≤ j⁻¹ :=
begin
existsi pceil (((rat_of_pnat (K s)) * (b⁻¹ + c⁻¹) + (a⁻¹ + c⁻¹) *
(rat_of_pnat (K t))) * (rat_of_pnat j)),
intros n Hn,
rewrite rewrite_helper4,
apply rat.le_trans,
apply abs_add_le_abs_add_abs,
apply rat.le_trans,
rotate 1,
show n⁻¹ * ((rat_of_pnat (K s)) * (b⁻¹ + c⁻¹)) +
n⁻¹ * ((a⁻¹ + c⁻¹) * (rat_of_pnat (K t))) ≤ j⁻¹, begin
rewrite -left_distrib,
apply rat.le_trans,
apply mul_le_mul_of_nonneg_right,
apply pceil_helper Hn,
{ repeat (apply mul_pos | apply add_pos | apply rat_of_pnat_is_pos |
apply pnat.inv_pos) },
apply rat.le_of_lt,
apply add_pos,
apply rat.mul_pos,
apply rat_of_pnat_is_pos,
apply add_pos,
apply pnat.inv_pos,
apply pnat.inv_pos,
apply rat.mul_pos,
apply add_pos,
apply pnat.inv_pos,
apply pnat.inv_pos,
apply rat_of_pnat_is_pos,
have H : (rat_of_pnat (K s) * (b⁻¹ + c⁻¹) + (a⁻¹ + c⁻¹) * rat_of_pnat (K t)) ≠ 0, begin
apply ne_of_gt,
repeat (apply mul_pos | apply add_pos | apply rat_of_pnat_is_pos | apply pnat.inv_pos),
end,
rewrite (!div_helper H),
apply rat.le_refl
end,
apply add_le_add,
rewrite [-mul_sub_left_distrib, abs_mul],
apply rat.le_trans,
apply mul_le_mul,
apply canon_bound,
apply Hs,
apply Ht,
apply abs_nonneg,
apply rat.le_of_lt,
apply rat_of_pnat_is_pos,
rewrite [*pnat.inv_mul_eq_mul_inv, -right_distrib, -rat.mul_assoc, rat.mul_comm],
apply mul_le_mul_of_nonneg_left,
apply rat.le_refl,
apply rat.le_of_lt,
apply pnat.inv_pos,
rewrite [-mul_sub_right_distrib, abs_mul],
apply rat.le_trans,
apply mul_le_mul,
apply Hs,
apply canon_bound,
apply Ht,
apply abs_nonneg,
apply add_invs_nonneg,
rewrite [*pnat.inv_mul_eq_mul_inv, -right_distrib, mul.comm _ n⁻¹, rat.mul_assoc],
apply mul_le_mul,
repeat apply rat.le_refl,
apply rat.le_of_lt,
apply rat.mul_pos,
apply add_pos,
repeat apply pnat.inv_pos,
apply rat_of_pnat_is_pos,
apply rat.le_of_lt,
apply pnat.inv_pos
end
theorem s_distrib {s t u : seq} (Hs : regular s) (Ht : regular t) (Hu : regular u) :
smul s (sadd t u) ≡ sadd (smul s t) (smul s u) :=
begin
apply eq_of_bdd,
repeat (assumption | apply reg_add_reg | apply reg_mul_reg),
intros,
let exh1 := λ a b c, mul_bound_helper Hs Ht a b c (2 * j),
apply exists.elim,
apply exh1,
rotate 3,
intros N1 HN1,
let exh2 := λ d e f, mul_bound_helper Hs Hu d e f (2 * j),
apply exists.elim,
apply exh2,
rotate 3,
intros N2 HN2,
existsi max N1 N2,
intros n Hn,
rewrite [↑sadd at *, ↑smul, rewrite_helper3, -pnat.add_halves j, -*pnat.mul_assoc at *],
apply rat.le_trans,
apply abs_add_le_abs_add_abs,
apply add_le_add,
apply HN1,
apply le.trans,
apply le_max_left N1 N2,
apply Hn,
apply HN2,
apply le.trans,
apply le_max_right N1 N2,
apply Hn
end
theorem mul_zero_equiv_zero {s t : seq} (Hs : regular s) (Ht : regular t) (Htz : t ≡ zero) :
smul s t ≡ zero :=
begin
apply eq_of_bdd_var,
apply reg_mul_reg Hs Ht,
apply zero_is_reg,
intro ε Hε,
let Bd := bdd_of_eq_var Ht zero_is_reg Htz (ε / (Kq s))
(div_pos_of_pos_of_pos Hε (Kq_bound_pos Hs)),
cases Bd with [N, HN],
existsi N,
intro n Hn,
rewrite [↑equiv at Htz, ↑zero at *, sub_zero, ↑smul, abs_mul],
apply le.trans,
apply mul_le_mul,
apply Kq_bound Hs,
have HN' : ∀ (n : ℕ+), N ≤ n → abs (t n) ≤ ε / Kq s,
from λ n, (eq.subst (sub_zero (t n)) (HN n)),
apply HN',
apply le.trans Hn,
apply pnat.mul_le_mul_left,
apply abs_nonneg,
apply le_of_lt (Kq_bound_pos Hs),
rewrite (mul_div_cancel' (ne.symm (ne_of_lt (Kq_bound_pos Hs)))),
apply le.refl
end
private theorem neg_bound_eq_bound (s : seq) : K (sneg s) = K s :=
by rewrite [↑K, ↑sneg, abs_neg]
private theorem neg_bound2_eq_bound2 (s t : seq) : K₂ s (sneg t) = K₂ s t :=
by rewrite [↑K₂, neg_bound_eq_bound]
private theorem sneg_def (s : seq) : (λ (n : ℕ+), -(s n)) = sneg s := rfl
theorem mul_neg_equiv_neg_mul {s t : seq} : smul s (sneg t) ≡ sneg (smul s t) :=
begin
rewrite [↑equiv, ↑smul],
intros,
rewrite [↑sneg, *sub_neg_eq_add, -neg_mul_eq_mul_neg, add.comm],
rewrite [*sneg_def t, *neg_bound2_eq_bound2, add.right_inv, abs_zero],
apply add_invs_nonneg
end
theorem equiv_of_diff_equiv_zero {s t : seq} (Hs : regular s) (Ht : regular t)
(H : sadd s (sneg t) ≡ zero) : s ≡ t :=
begin
have hsimp : ∀ a b c d e : ℚ, a + b + c + (d + e) = b + d + a + e + c, from
λ a b c d e, calc
a + b + c + (d + e) = a + b + (d + e) + c : add.right_comm
... = a + (b + d) + e + c : by rewrite[-*add.assoc]
... = b + d + a + e + c : add.comm,
apply eq_of_bdd Hs Ht,
intros,
note He := bdd_of_eq H,
existsi 2 * (2 * (2 * j)),
intros n Hn,
rewrite (rewrite_helper5 _ _ (s (2 * n)) (t (2 * n))),
apply rat.le_trans,
apply abs_add_three,
apply rat.le_trans,
apply add_le_add_three,
apply Hs,
rewrite [↑sadd at He, ↑sneg at He, ↑zero at He],
let He' := λ a b c, eq.subst !sub_zero (He a b c),
apply (He' _ _ Hn),
apply Ht,
rewrite [hsimp, pnat.add_halves, -(pnat.add_halves j), -(pnat.add_halves (2 * j)), -*rat.add_assoc],
apply add_le_add_right,
apply add_le_add_three,
repeat (apply rat.le_trans; apply inv_ge_of_le Hn; apply inv_two_mul_le_inv)
end
theorem s_sub_cancel (s : seq) : sadd s (sneg s) ≡ zero :=
begin
rewrite [↑equiv, ↑sadd, ↑sneg, ↑zero],
intros,
rewrite [sub_zero, add.right_inv, abs_zero],
apply add_invs_nonneg
end
theorem diff_equiv_zero_of_equiv {s t : seq} (Hs : regular s) (Ht : regular t) (H : s ≡ t) :
sadd s (sneg t) ≡ zero :=
begin
apply equiv.trans,
rotate 4,
apply s_sub_cancel t,
rotate 2,
apply zero_is_reg,
apply add_well_defined,
repeat (assumption | apply reg_neg_reg),
apply equiv.refl,
repeat (assumption | apply reg_add_reg | apply reg_neg_reg)
end
private theorem mul_well_defined_half1 {s t u : seq} (Hs : regular s) (Ht : regular t)
(Hu : regular u) (Etu : t ≡ u) : smul s t ≡ smul s u :=
begin
apply equiv_of_diff_equiv_zero,
rotate 2,
apply equiv.trans,
rotate 3,
apply equiv.symm,
apply add_well_defined,
rotate 4,
apply equiv.refl,
apply mul_neg_equiv_neg_mul,
apply equiv.trans,
rotate 3,
apply equiv.symm,
apply s_distrib,
rotate 3,
apply mul_zero_equiv_zero,
rotate 2,
apply diff_equiv_zero_of_equiv,
repeat (assumption | apply reg_mul_reg | apply reg_neg_reg | apply reg_add_reg |
apply zero_is_reg)
end
private theorem mul_well_defined_half2 {s t u : seq} (Hs : regular s) (Ht : regular t)
(Hu : regular u) (Est : s ≡ t) : smul s u ≡ smul t u :=
begin
apply equiv.trans,
rotate 3,
apply s_mul_comm,
apply equiv.trans,
rotate 3,
apply mul_well_defined_half1,
rotate 2,
apply Ht,
rotate 1,
apply s_mul_comm,
repeat (assumption | apply reg_mul_reg)
end
theorem mul_well_defined {s t u v : seq} (Hs : regular s) (Ht : regular t) (Hu : regular u)
(Hv : regular v) (Esu : s ≡ u) (Etv : t ≡ v) : smul s t ≡ smul u v :=
begin
apply equiv.trans,
exact reg_mul_reg Hs Ht,
exact reg_mul_reg Hs Hv,
exact reg_mul_reg Hu Hv,
apply mul_well_defined_half1,
repeat assumption,
apply mul_well_defined_half2,
repeat assumption
end
theorem neg_well_defined {s t : seq} (Est : s ≡ t) : sneg s ≡ sneg t :=
begin
rewrite [↑sneg, ↑equiv at *],
intros,
rewrite [-abs_neg, neg_sub, sub_neg_eq_add, add.comm],
apply Est
end
theorem one_is_reg : regular one :=
begin
rewrite [↑regular, ↑one],
intros,
rewrite [sub_self, abs_zero],
apply add_invs_nonneg
end
theorem s_one_mul {s : seq} (H : regular s) : smul one s ≡ s :=
begin
intros,
rewrite [↑smul, ↑one, rat.one_mul],
apply rat.le_trans,
apply H,
apply add_le_add_right,
apply pnat.inv_mul_le_inv
end
theorem s_mul_one {s : seq} (H : regular s) : smul s one ≡ s :=
begin
apply equiv.trans,
apply reg_mul_reg H one_is_reg,
rotate 2,
apply s_mul_comm,
apply s_one_mul H,
apply reg_mul_reg one_is_reg H,
apply H
end
theorem zero_nequiv_one : ¬ zero ≡ one :=
begin
intro Hz,
rewrite [↑equiv at Hz, ↑zero at Hz, ↑one at Hz],
note H := Hz (2 * 2),
rewrite [zero_sub at H, abs_neg at H, pnat.add_halves at H],
have H' : pone⁻¹ ≤ 2⁻¹, from calc
pone⁻¹ = 1 : by rewrite -pone_inv
... = abs 1 : abs_of_pos zero_lt_one
... ≤ 2⁻¹ : H,
let H'' := ge_of_inv_le H',
apply absurd (one_lt_two) (not_lt_of_ge H'')
end
---------------------------------------------
-- constant sequences
definition const (a : ℚ) : seq := λ n, a
theorem const_reg (a : ℚ) : regular (const a) :=
begin
intros,
rewrite [↑const, sub_self, abs_zero],
apply add_invs_nonneg
end
theorem add_consts (a b : ℚ) : sadd (const a) (const b) ≡ const (a + b) :=
by apply equiv.refl
theorem mul_consts (a b : ℚ) : smul (const a) (const b) ≡ const (a * b) :=
by apply equiv.refl
theorem neg_const (a : ℚ) : sneg (const a) ≡ const (-a) :=
by apply equiv.refl
section
open rat
lemma eq_of_const_equiv {a b : ℚ} (H : const a ≡ const b) : a = b :=
have H₁ : ∀ n : ℕ+, abs (a - b) ≤ n⁻¹ + n⁻¹, from H,
eq_of_forall_abs_sub_le
(take ε,
suppose ε > 0,
have ε / 2 > 0, begin exact div_pos_of_pos_of_pos this two_pos end,
obtain n (Hn : n⁻¹ ≤ ε / 2), from pnat_bound this,
show abs (a - b) ≤ ε, from calc
abs (a - b) ≤ n⁻¹ + n⁻¹ : H₁ n
... ≤ ε / 2 + ε / 2 : add_le_add Hn Hn
... = ε : add_halves)
end
---------------------------------------------
-- create the type of regular sequences and lift theorems
record reg_seq : Type :=
(sq : seq) (is_reg : regular sq)
definition requiv (s t : reg_seq) := (reg_seq.sq s) ≡ (reg_seq.sq t)
definition requiv.refl (s : reg_seq) : requiv s s := equiv.refl (reg_seq.sq s)
definition requiv.symm (s t : reg_seq) (H : requiv s t) : requiv t s :=
equiv.symm (reg_seq.sq s) (reg_seq.sq t) H
definition requiv.trans (s t u : reg_seq) (H : requiv s t) (H2 : requiv t u) : requiv s u :=
equiv.trans _ _ _ (reg_seq.is_reg s) (reg_seq.is_reg t) (reg_seq.is_reg u) H H2
definition radd (s t : reg_seq) : reg_seq :=
reg_seq.mk (sadd (reg_seq.sq s) (reg_seq.sq t))
(reg_add_reg (reg_seq.is_reg s) (reg_seq.is_reg t))
infix + := radd
definition rmul (s t : reg_seq) : reg_seq :=
reg_seq.mk (smul (reg_seq.sq s) (reg_seq.sq t))
(reg_mul_reg (reg_seq.is_reg s) (reg_seq.is_reg t))
infix * := rmul
definition rneg (s : reg_seq) : reg_seq :=
reg_seq.mk (sneg (reg_seq.sq s)) (reg_neg_reg (reg_seq.is_reg s))
prefix - := rneg
definition radd_well_defined {s t u v : reg_seq} (H : requiv s u) (H2 : requiv t v) :
requiv (s + t) (u + v) :=
add_well_defined (reg_seq.is_reg s) (reg_seq.is_reg t) (reg_seq.is_reg u) (reg_seq.is_reg v) H H2
definition rmul_well_defined {s t u v : reg_seq} (H : requiv s u) (H2 : requiv t v) :
requiv (s * t) (u * v) :=
mul_well_defined (reg_seq.is_reg s) (reg_seq.is_reg t) (reg_seq.is_reg u) (reg_seq.is_reg v) H H2
definition rneg_well_defined {s t : reg_seq} (H : requiv s t) : requiv (-s) (-t) :=
neg_well_defined H
theorem requiv_is_equiv : equivalence requiv :=
mk_equivalence requiv requiv.refl requiv.symm requiv.trans
definition reg_seq.to_setoid [instance] : setoid reg_seq :=
⦃setoid, r := requiv, iseqv := requiv_is_equiv⦄
definition r_zero : reg_seq :=
reg_seq.mk (zero) (zero_is_reg)
definition r_one : reg_seq :=
reg_seq.mk (one) (one_is_reg)
theorem r_add_comm (s t : reg_seq) : requiv (s + t) (t + s) :=
s_add_comm (reg_seq.sq s) (reg_seq.sq t)
theorem r_add_assoc (s t u : reg_seq) : requiv (s + t + u) (s + (t + u)) :=
s_add_assoc (reg_seq.sq s) (reg_seq.sq t) (reg_seq.sq u) (reg_seq.is_reg s) (reg_seq.is_reg u)
theorem r_zero_add (s : reg_seq) : requiv (r_zero + s) s :=
s_zero_add (reg_seq.sq s) (reg_seq.is_reg s)
theorem r_add_zero (s : reg_seq) : requiv (s + r_zero) s :=
s_add_zero (reg_seq.sq s) (reg_seq.is_reg s)
theorem r_neg_cancel (s : reg_seq) : requiv (-s + s) r_zero :=
s_neg_cancel (reg_seq.sq s) (reg_seq.is_reg s)
theorem r_mul_comm (s t : reg_seq) : requiv (s * t) (t * s) :=
s_mul_comm (reg_seq.sq s) (reg_seq.sq t)
theorem r_mul_assoc (s t u : reg_seq) : requiv (s * t * u) (s * (t * u)) :=
s_mul_assoc (reg_seq.is_reg s) (reg_seq.is_reg t) (reg_seq.is_reg u)
theorem r_mul_one (s : reg_seq) : requiv (s * r_one) s :=
s_mul_one (reg_seq.is_reg s)
theorem r_one_mul (s : reg_seq) : requiv (r_one * s) s :=
s_one_mul (reg_seq.is_reg s)
theorem r_distrib (s t u : reg_seq) : requiv (s * (t + u)) (s * t + s * u) :=
s_distrib (reg_seq.is_reg s) (reg_seq.is_reg t) (reg_seq.is_reg u)
theorem r_zero_nequiv_one : ¬ requiv r_zero r_one :=
zero_nequiv_one
definition r_const (a : ℚ) : reg_seq := reg_seq.mk (const a) (const_reg a)
theorem r_add_consts (a b : ℚ) : requiv (r_const a + r_const b) (r_const (a + b)) := add_consts a b
theorem r_mul_consts (a b : ℚ) : requiv (r_const a * r_const b) (r_const (a * b)) := mul_consts a b
theorem r_neg_const (a : ℚ) : requiv (-r_const a) (r_const (-a)) := neg_const a
end rat_seq
----------------------------------------------
-- take quotients to get ℝ and show it's a comm ring
open rat_seq
definition real := quot reg_seq.to_setoid
namespace real
notation `ℝ` := real
protected definition prio := num.pred rat.prio
protected definition add (x y : ℝ) : ℝ :=
(quot.lift_on₂ x y (λ a b, quot.mk (a + b))
(take a b c d : reg_seq, take Hab : requiv a c, take Hcd : requiv b d,
quot.sound (radd_well_defined Hab Hcd)))
--infix [priority real.prio] + := add
protected definition mul (x y : ℝ) : ℝ :=
(quot.lift_on₂ x y (λ a b, quot.mk (a * b))
(take a b c d : reg_seq, take Hab : requiv a c, take Hcd : requiv b d,
quot.sound (rmul_well_defined Hab Hcd)))
--infix [priority real.prio] * := mul
protected definition neg (x : ℝ) : ℝ :=
(quot.lift_on x (λ a, quot.mk (-a)) (take a b : reg_seq, take Hab : requiv a b,
quot.sound (rneg_well_defined Hab)))
--prefix [priority real.prio] `-` := neg
definition real_has_add [instance] [priority real.prio] : has_add real :=
has_add.mk real.add
definition real_has_mul [instance] [priority real.prio] : has_mul real :=
has_mul.mk real.mul
definition real_has_neg [instance] [priority real.prio] : has_neg real :=
has_neg.mk real.neg
protected definition sub [reducible] (a b : ℝ) : real := a + (-b)
definition real_has_sub [instance] [priority real.prio] : has_sub real :=
has_sub.mk real.sub
open rat -- no coercions before
definition of_rat [coercion] (a : ℚ) : ℝ := quot.mk (r_const a)
definition of_int [coercion] (i : ℤ) : ℝ := i
definition of_nat [coercion] (n : ℕ) : ℝ := n
definition of_num [coercion] [reducible] (n : num) : ℝ := of_rat (rat.of_num n)
definition real_has_zero [reducible] : has_zero real := has_zero.mk (of_rat 0)
local attribute real_has_zero [instance] [priority real.prio]
definition real_has_one [reducible] : has_one real := has_one.mk (of_rat 1)
local attribute real_has_one [instance] [priority real.prio]
theorem real_zero_eq_rat_zero : (0:real) = of_rat (0:rat) :=
rfl
theorem real_one_eq_rat_one : (1:real) = of_rat (1:rat) :=
rfl
protected theorem add_comm (x y : ℝ) : x + y = y + x :=
quot.induction_on₂ x y (λ s t, quot.sound (r_add_comm s t))
protected theorem add_assoc (x y z : ℝ) : x + y + z = x + (y + z) :=
quot.induction_on₃ x y z (λ s t u, quot.sound (r_add_assoc s t u))
protected theorem zero_add (x : ℝ) : 0 + x = x :=
quot.induction_on x (λ s, quot.sound (r_zero_add s))
protected theorem add_zero (x : ℝ) : x + 0 = x :=
quot.induction_on x (λ s, quot.sound (r_add_zero s))
protected theorem neg_cancel (x : ℝ) : -x + x = 0 :=
quot.induction_on x (λ s, quot.sound (r_neg_cancel s))
protected theorem mul_assoc (x y z : ℝ) : x * y * z = x * (y * z) :=
quot.induction_on₃ x y z (λ s t u, quot.sound (r_mul_assoc s t u))
protected theorem mul_comm (x y : ℝ) : x * y = y * x :=
quot.induction_on₂ x y (λ s t, quot.sound (r_mul_comm s t))
protected theorem one_mul (x : ℝ) : 1 * x = x :=
quot.induction_on x (λ s, quot.sound (r_one_mul s))
protected theorem mul_one (x : ℝ) : x * 1 = x :=
quot.induction_on x (λ s, quot.sound (r_mul_one s))
protected theorem left_distrib (x y z : ℝ) : x * (y + z) = x * y + x * z :=
quot.induction_on₃ x y z (λ s t u, quot.sound (r_distrib s t u))
protected theorem right_distrib (x y z : ℝ) : (x + y) * z = x * z + y * z :=
by rewrite [real.mul_comm, real.left_distrib, {x * _}real.mul_comm, {y * _}real.mul_comm]
protected theorem zero_ne_one : ¬ (0 : ℝ) = 1 :=
take H : 0 = 1,
absurd (quot.exact H) (r_zero_nequiv_one)
protected definition comm_ring [reducible] : comm_ring ℝ :=
begin
fapply comm_ring.mk,
exact real.add,
exact real.add_assoc,
exact 0,
exact real.zero_add,
exact real.add_zero,
exact real.neg,
exact real.neg_cancel,
exact real.add_comm,
exact real.mul,
exact real.mul_assoc,
apply 1,
apply real.one_mul,
apply real.mul_one,
apply real.left_distrib,
apply real.right_distrib,
apply real.mul_comm
end
theorem of_int_eq (a : ℤ) : of_int a = of_rat (rat.of_int a) := rfl
theorem of_nat_eq (a : ℕ) : of_nat a = of_rat (rat.of_nat a) := rfl
theorem of_rat.inj {x y : ℚ} (H : of_rat x = of_rat y) : x = y :=
eq_of_const_equiv (quot.exact H)
theorem eq_of_of_rat_eq_of_rat {x y : ℚ} (H : of_rat x = of_rat y) : x = y :=
of_rat.inj H
theorem of_rat_eq_of_rat_iff (x y : ℚ) : of_rat x = of_rat y ↔ x = y :=
iff.intro eq_of_of_rat_eq_of_rat !congr_arg
theorem of_int.inj {a b : ℤ} (H : of_int a = of_int b) : a = b :=
rat.of_int.inj (of_rat.inj H)
theorem eq_of_of_int_eq_of_int {a b : ℤ} (H : of_int a = of_int b) : a = b :=
of_int.inj H
theorem of_int_eq_of_int_iff (a b : ℤ) : of_int a = of_int b ↔ a = b :=
iff.intro of_int.inj !congr_arg
theorem of_nat.inj {a b : ℕ} (H : of_nat a = of_nat b) : a = b :=
int.of_nat.inj (of_int.inj H)
theorem eq_of_of_nat_eq_of_nat {a b : ℕ} (H : of_nat a = of_nat b) : a = b :=
of_nat.inj H
theorem of_nat_eq_of_nat_iff (a b : ℕ) : of_nat a = of_nat b ↔ a = b :=
iff.intro of_nat.inj !congr_arg
theorem of_rat_add (a b : ℚ) : of_rat (a + b) = of_rat a + of_rat b :=
quot.sound (r_add_consts a b)
theorem of_rat_neg (a : ℚ) : of_rat (-a) = -of_rat a :=
eq.symm (quot.sound (r_neg_const a))
theorem of_rat_mul (a b : ℚ) : of_rat (a * b) = of_rat a * of_rat b :=
quot.sound (r_mul_consts a b)
theorem of_rat_zero : of_rat 0 = 0 := rfl
theorem of_rat_one : of_rat 1 = 1 := rfl
open int
theorem of_int_add (a b : ℤ) : of_int (a + b) = of_int a + of_int b :=
by rewrite [of_int_eq, rat.of_int_add, of_rat_add]
theorem of_int_neg (a : ℤ) : of_int (-a) = -of_int a :=
by rewrite [of_int_eq, rat.of_int_neg, of_rat_neg]
theorem of_int_mul (a b : ℤ) : of_int (a * b) = of_int a * of_int b :=
by rewrite [of_int_eq, rat.of_int_mul, of_rat_mul]
theorem of_nat_add (a b : ℕ) : of_nat (a + b) = of_nat a + of_nat b :=
by rewrite [of_nat_eq, rat.of_nat_add, of_rat_add]
theorem of_nat_mul (a b : ℕ) : of_nat (a * b) = of_nat a * of_nat b :=
by rewrite [of_nat_eq, rat.of_nat_mul, of_rat_mul]
theorem add_half_of_rat (n : ℕ+) : of_rat (2 * n)⁻¹ + of_rat (2 * n)⁻¹ = of_rat (n⁻¹) :=
by rewrite [-of_rat_add, pnat.add_halves]
theorem one_add_one : 1 + 1 = (2 : ℝ) := rfl
end real
|
using GOF3R
using Test
@testset "GOF3R.jl" begin
GOF3R.s3gof3r_jll.gof3r() do exe
@test success(`$exe --version`)
end
end
|
import data.multiset data.finset .to_multiset algebra.big_operators
open classical multiset finset
local attribute [instance] prop_decidable
universe u
variable α : Type u
lemma eq_zero_iff_to_finset_eq_empty {g : multiset α} : g = 0 ↔ g.to_finset = ∅ :=
begin
apply iff.intro,
{
intro h1,
rw finset.ext,
intro a,
simp [*]
},
{
intro h1,
by_contradiction h2,
rcases (exists_mem_of_ne_zero h2) with ⟨m, h3⟩,
rw [←mem_to_finset, h1] at h3,
have : ¬ m ∈ ∅,
from finset.not_mem_empty m,
contradiction
}
end
lemma prod_ne_zero_of_forall_mem_ne_zero' {β : Type u} [has_zero α] [integral_domain β] {f : finset α } {g : α → β}
(ha : ∀ x : α, x ≠ 0 → g x ≠ 0) (hb : (0 : β) ≠ 1) : (∀ x ∈ f, x ≠ (0 :α)) → (finset.prod f g ≠ 0) :=
begin
apply finset.induction_on f,
{
simp *,
},
{
intros a s h1 h2 h3,
have h4 : (∀ (x : α), x ∈ s → x ≠ 0),
{
intros x h4,
simp *,
},
have h5 : finset.prod s g ≠ 0,
from h2 h4,
have h6 : a ≠ 0,
{
apply h3,
simp,
},
have h7 : g a ≠ 0,
from ha _ h6,
rw finset.prod_insert h1,
exact mul_ne_zero h7 h5,
}
end
|
{-# OPTIONS --rewriting #-}
module Properties.TypeNormalization where
open import Luau.Type using (Type; Scalar; nil; number; string; boolean; never; unknown; _⇒_; _∪_; _∩_)
open import Luau.Subtyping using (Tree; Language; ¬Language; function; scalar; unknown; left; right; function-ok₁; function-ok₂; function-err; function-tgt; scalar-function; scalar-function-ok; scalar-function-err; scalar-function-tgt; function-scalar; _,_)
open import Luau.TypeNormalization using (_∪ⁿ_; _∩ⁿ_; _∪ᶠ_; _∪ⁿˢ_; _∩ⁿˢ_; normalize)
open import Luau.Subtyping using (_<:_; _≮:_; witness; never)
open import Properties.Subtyping using (<:-trans; <:-refl; <:-unknown; <:-never; <:-∪-left; <:-∪-right; <:-∪-lub; <:-∩-left; <:-∩-right; <:-∩-glb; <:-∩-symm; <:-function; <:-function-∪-∩; <:-function-∩-∪; <:-function-∪; <:-everything; <:-union; <:-∪-assocl; <:-∪-assocr; <:-∪-symm; <:-intersect; ∪-distl-∩-<:; ∪-distr-∩-<:; <:-∪-distr-∩; <:-∪-distl-∩; ∩-distl-∪-<:; <:-∩-distl-∪; <:-∩-distr-∪; scalar-∩-function-<:-never; scalar-≢-∩-<:-never)
-- Normal forms for types
data FunType : Type → Set
data Normal : Type → Set
data FunType where
_⇒_ : ∀ {S T} → Normal S → Normal T → FunType (S ⇒ T)
_∩_ : ∀ {F G} → FunType F → FunType G → FunType (F ∩ G)
data Normal where
_⇒_ : ∀ {S T} → Normal S → Normal T → Normal (S ⇒ T)
_∩_ : ∀ {F G} → FunType F → FunType G → Normal (F ∩ G)
_∪_ : ∀ {S T} → Normal S → Scalar T → Normal (S ∪ T)
never : Normal never
unknown : Normal unknown
data OptScalar : Type → Set where
never : OptScalar never
number : OptScalar number
boolean : OptScalar boolean
string : OptScalar string
nil : OptScalar nil
-- Top function type
fun-top : ∀ {F} → (FunType F) → (F <: (never ⇒ unknown))
fun-top (S ⇒ T) = <:-function <:-never <:-unknown
fun-top (F ∩ G) = <:-trans <:-∩-left (fun-top F)
-- function types are inhabited
fun-function : ∀ {F} → FunType F → Language F function
fun-function (S ⇒ T) = function
fun-function (F ∩ G) = (fun-function F , fun-function G)
fun-≮:-never : ∀ {F} → FunType F → (F ≮: never)
fun-≮:-never F = witness function (fun-function F) never
-- function types aren't scalars
fun-¬scalar : ∀ {F S t} → (s : Scalar S) → FunType F → Language F t → ¬Language S t
fun-¬scalar s (S ⇒ T) function = scalar-function s
fun-¬scalar s (S ⇒ T) (function-ok₁ p) = scalar-function-ok s
fun-¬scalar s (S ⇒ T) (function-ok₂ p) = scalar-function-ok s
fun-¬scalar s (S ⇒ T) (function-err p) = scalar-function-err s
fun-¬scalar s (S ⇒ T) (function-tgt p) = scalar-function-tgt s
fun-¬scalar s (F ∩ G) (p₁ , p₂) = fun-¬scalar s G p₂
¬scalar-fun : ∀ {F S} → FunType F → (s : Scalar S) → ¬Language F (scalar s)
¬scalar-fun (S ⇒ T) s = function-scalar s
¬scalar-fun (F ∩ G) s = left (¬scalar-fun F s)
scalar-≮:-fun : ∀ {F S} → FunType F → Scalar S → S ≮: F
scalar-≮:-fun F s = witness (scalar s) (scalar s) (¬scalar-fun F s)
unknown-≮:-fun : ∀ {F} → FunType F → unknown ≮: F
unknown-≮:-fun F = witness (scalar nil) unknown (¬scalar-fun F nil)
-- Normalization produces normal types
normal : ∀ T → Normal (normalize T)
normalᶠ : ∀ {F} → FunType F → Normal F
normal-∪ⁿ : ∀ {S T} → Normal S → Normal T → Normal (S ∪ⁿ T)
normal-∩ⁿ : ∀ {S T} → Normal S → Normal T → Normal (S ∩ⁿ T)
normal-∪ⁿˢ : ∀ {S T} → Normal S → OptScalar T → Normal (S ∪ⁿˢ T)
normal-∩ⁿˢ : ∀ {S T} → Normal S → Scalar T → OptScalar (S ∩ⁿˢ T)
normal-∪ᶠ : ∀ {F G} → FunType F → FunType G → FunType (F ∪ᶠ G)
normal nil = never ∪ nil
normal (S ⇒ T) = (normal S) ⇒ (normal T)
normal never = never
normal unknown = unknown
normal boolean = never ∪ boolean
normal number = never ∪ number
normal string = never ∪ string
normal (S ∪ T) = normal-∪ⁿ (normal S) (normal T)
normal (S ∩ T) = normal-∩ⁿ (normal S) (normal T)
normalᶠ (S ⇒ T) = S ⇒ T
normalᶠ (F ∩ G) = F ∩ G
normal-∪ⁿ S (T₁ ∪ T₂) = (normal-∪ⁿ S T₁) ∪ T₂
normal-∪ⁿ S never = S
normal-∪ⁿ S unknown = unknown
normal-∪ⁿ never (T ⇒ U) = T ⇒ U
normal-∪ⁿ never (G₁ ∩ G₂) = G₁ ∩ G₂
normal-∪ⁿ unknown (T ⇒ U) = unknown
normal-∪ⁿ unknown (G₁ ∩ G₂) = unknown
normal-∪ⁿ (R ⇒ S) (T ⇒ U) = normalᶠ (normal-∪ᶠ (R ⇒ S) (T ⇒ U))
normal-∪ⁿ (R ⇒ S) (G₁ ∩ G₂) = normalᶠ (normal-∪ᶠ (R ⇒ S) (G₁ ∩ G₂))
normal-∪ⁿ (F₁ ∩ F₂) (T ⇒ U) = normalᶠ (normal-∪ᶠ (F₁ ∩ F₂) (T ⇒ U))
normal-∪ⁿ (F₁ ∩ F₂) (G₁ ∩ G₂) = normalᶠ (normal-∪ᶠ (F₁ ∩ F₂) (G₁ ∩ G₂))
normal-∪ⁿ (S₁ ∪ S₂) (T₁ ⇒ T₂) = normal-∪ⁿ S₁ (T₁ ⇒ T₂) ∪ S₂
normal-∪ⁿ (S₁ ∪ S₂) (G₁ ∩ G₂) = normal-∪ⁿ S₁ (G₁ ∩ G₂) ∪ S₂
normal-∩ⁿ S never = never
normal-∩ⁿ S unknown = S
normal-∩ⁿ S (T ∪ U) = normal-∪ⁿˢ (normal-∩ⁿ S T) (normal-∩ⁿˢ S U )
normal-∩ⁿ never (T ⇒ U) = never
normal-∩ⁿ unknown (T ⇒ U) = T ⇒ U
normal-∩ⁿ (R ⇒ S) (T ⇒ U) = (R ⇒ S) ∩ (T ⇒ U)
normal-∩ⁿ (R ∩ S) (T ⇒ U) = (R ∩ S) ∩ (T ⇒ U)
normal-∩ⁿ (R ∪ S) (T ⇒ U) = normal-∩ⁿ R (T ⇒ U)
normal-∩ⁿ never (T ∩ U) = never
normal-∩ⁿ unknown (T ∩ U) = T ∩ U
normal-∩ⁿ (R ⇒ S) (T ∩ U) = (R ⇒ S) ∩ (T ∩ U)
normal-∩ⁿ (R ∩ S) (T ∩ U) = (R ∩ S) ∩ (T ∩ U)
normal-∩ⁿ (R ∪ S) (T ∩ U) = normal-∩ⁿ R (T ∩ U)
normal-∪ⁿˢ S never = S
normal-∪ⁿˢ never number = never ∪ number
normal-∪ⁿˢ unknown number = unknown
normal-∪ⁿˢ (R ⇒ S) number = (R ⇒ S) ∪ number
normal-∪ⁿˢ (R ∩ S) number = (R ∩ S) ∪ number
normal-∪ⁿˢ (R ∪ number) number = R ∪ number
normal-∪ⁿˢ (R ∪ boolean) number = normal-∪ⁿˢ R number ∪ boolean
normal-∪ⁿˢ (R ∪ string) number = normal-∪ⁿˢ R number ∪ string
normal-∪ⁿˢ (R ∪ nil) number = normal-∪ⁿˢ R number ∪ nil
normal-∪ⁿˢ never boolean = never ∪ boolean
normal-∪ⁿˢ unknown boolean = unknown
normal-∪ⁿˢ (R ⇒ S) boolean = (R ⇒ S) ∪ boolean
normal-∪ⁿˢ (R ∩ S) boolean = (R ∩ S) ∪ boolean
normal-∪ⁿˢ (R ∪ number) boolean = normal-∪ⁿˢ R boolean ∪ number
normal-∪ⁿˢ (R ∪ boolean) boolean = R ∪ boolean
normal-∪ⁿˢ (R ∪ string) boolean = normal-∪ⁿˢ R boolean ∪ string
normal-∪ⁿˢ (R ∪ nil) boolean = normal-∪ⁿˢ R boolean ∪ nil
normal-∪ⁿˢ never string = never ∪ string
normal-∪ⁿˢ unknown string = unknown
normal-∪ⁿˢ (R ⇒ S) string = (R ⇒ S) ∪ string
normal-∪ⁿˢ (R ∩ S) string = (R ∩ S) ∪ string
normal-∪ⁿˢ (R ∪ number) string = normal-∪ⁿˢ R string ∪ number
normal-∪ⁿˢ (R ∪ boolean) string = normal-∪ⁿˢ R string ∪ boolean
normal-∪ⁿˢ (R ∪ string) string = R ∪ string
normal-∪ⁿˢ (R ∪ nil) string = normal-∪ⁿˢ R string ∪ nil
normal-∪ⁿˢ never nil = never ∪ nil
normal-∪ⁿˢ unknown nil = unknown
normal-∪ⁿˢ (R ⇒ S) nil = (R ⇒ S) ∪ nil
normal-∪ⁿˢ (R ∩ S) nil = (R ∩ S) ∪ nil
normal-∪ⁿˢ (R ∪ number) nil = normal-∪ⁿˢ R nil ∪ number
normal-∪ⁿˢ (R ∪ boolean) nil = normal-∪ⁿˢ R nil ∪ boolean
normal-∪ⁿˢ (R ∪ string) nil = normal-∪ⁿˢ R nil ∪ string
normal-∪ⁿˢ (R ∪ nil) nil = R ∪ nil
normal-∩ⁿˢ never number = never
normal-∩ⁿˢ never boolean = never
normal-∩ⁿˢ never string = never
normal-∩ⁿˢ never nil = never
normal-∩ⁿˢ unknown number = number
normal-∩ⁿˢ unknown boolean = boolean
normal-∩ⁿˢ unknown string = string
normal-∩ⁿˢ unknown nil = nil
normal-∩ⁿˢ (R ⇒ S) number = never
normal-∩ⁿˢ (R ⇒ S) boolean = never
normal-∩ⁿˢ (R ⇒ S) string = never
normal-∩ⁿˢ (R ⇒ S) nil = never
normal-∩ⁿˢ (R ∩ S) number = never
normal-∩ⁿˢ (R ∩ S) boolean = never
normal-∩ⁿˢ (R ∩ S) string = never
normal-∩ⁿˢ (R ∩ S) nil = never
normal-∩ⁿˢ (R ∪ number) number = number
normal-∩ⁿˢ (R ∪ boolean) number = normal-∩ⁿˢ R number
normal-∩ⁿˢ (R ∪ string) number = normal-∩ⁿˢ R number
normal-∩ⁿˢ (R ∪ nil) number = normal-∩ⁿˢ R number
normal-∩ⁿˢ (R ∪ number) boolean = normal-∩ⁿˢ R boolean
normal-∩ⁿˢ (R ∪ boolean) boolean = boolean
normal-∩ⁿˢ (R ∪ string) boolean = normal-∩ⁿˢ R boolean
normal-∩ⁿˢ (R ∪ nil) boolean = normal-∩ⁿˢ R boolean
normal-∩ⁿˢ (R ∪ number) string = normal-∩ⁿˢ R string
normal-∩ⁿˢ (R ∪ boolean) string = normal-∩ⁿˢ R string
normal-∩ⁿˢ (R ∪ string) string = string
normal-∩ⁿˢ (R ∪ nil) string = normal-∩ⁿˢ R string
normal-∩ⁿˢ (R ∪ number) nil = normal-∩ⁿˢ R nil
normal-∩ⁿˢ (R ∪ boolean) nil = normal-∩ⁿˢ R nil
normal-∩ⁿˢ (R ∪ string) nil = normal-∩ⁿˢ R nil
normal-∩ⁿˢ (R ∪ nil) nil = nil
normal-∪ᶠ (R ⇒ S) (T ⇒ U) = (normal-∩ⁿ R T) ⇒ (normal-∪ⁿ S U)
normal-∪ᶠ (R ⇒ S) (G ∩ H) = normal-∪ᶠ (R ⇒ S) G ∩ normal-∪ᶠ (R ⇒ S) H
normal-∪ᶠ (E ∩ F) G = normal-∪ᶠ E G ∩ normal-∪ᶠ F G
scalar-∩-fun-<:-never : ∀ {F S} → FunType F → Scalar S → (F ∩ S) <: never
scalar-∩-fun-<:-never (T ⇒ U) S = scalar-∩-function-<:-never S
scalar-∩-fun-<:-never (F ∩ G) S = <:-trans (<:-intersect <:-∩-left <:-refl) (scalar-∩-fun-<:-never F S)
flipper : ∀ {S T U} → ((S ∪ T) ∪ U) <: ((S ∪ U) ∪ T)
flipper = <:-trans <:-∪-assocr (<:-trans (<:-union <:-refl <:-∪-symm) <:-∪-assocl)
∩-<:-∩ⁿ : ∀ {S T} → Normal S → Normal T → (S ∩ T) <: (S ∩ⁿ T)
∩ⁿ-<:-∩ : ∀ {S T} → Normal S → Normal T → (S ∩ⁿ T) <: (S ∩ T)
∩-<:-∩ⁿˢ : ∀ {S T} → Normal S → Scalar T → (S ∩ T) <: (S ∩ⁿˢ T)
∩ⁿˢ-<:-∩ : ∀ {S T} → Normal S → Scalar T → (S ∩ⁿˢ T) <: (S ∩ T)
∪ᶠ-<:-∪ : ∀ {F G} → FunType F → FunType G → (F ∪ᶠ G) <: (F ∪ G)
∪ⁿ-<:-∪ : ∀ {S T} → Normal S → Normal T → (S ∪ⁿ T) <: (S ∪ T)
∪-<:-∪ⁿ : ∀ {S T} → Normal S → Normal T → (S ∪ T) <: (S ∪ⁿ T)
∪ⁿˢ-<:-∪ : ∀ {S T} → Normal S → OptScalar T → (S ∪ⁿˢ T) <: (S ∪ T)
∪-<:-∪ⁿˢ : ∀ {S T} → Normal S → OptScalar T → (S ∪ T) <: (S ∪ⁿˢ T)
∩-<:-∩ⁿ S never = <:-∩-right
∩-<:-∩ⁿ S unknown = <:-∩-left
∩-<:-∩ⁿ S (T ∪ U) = <:-trans <:-∩-distl-∪ (<:-trans (<:-union (∩-<:-∩ⁿ S T) (∩-<:-∩ⁿˢ S U)) (∪-<:-∪ⁿˢ (normal-∩ⁿ S T) (normal-∩ⁿˢ S U)) )
∩-<:-∩ⁿ never (T ⇒ U) = <:-∩-left
∩-<:-∩ⁿ unknown (T ⇒ U) = <:-∩-right
∩-<:-∩ⁿ (R ⇒ S) (T ⇒ U) = <:-refl
∩-<:-∩ⁿ (R ∩ S) (T ⇒ U) = <:-refl
∩-<:-∩ⁿ (R ∪ S) (T ⇒ U) = <:-trans <:-∩-distr-∪ (<:-trans (<:-union (∩-<:-∩ⁿ R (T ⇒ U)) (<:-trans <:-∩-symm (∩-<:-∩ⁿˢ (T ⇒ U) S))) (<:-∪-lub <:-refl <:-never))
∩-<:-∩ⁿ never (T ∩ U) = <:-∩-left
∩-<:-∩ⁿ unknown (T ∩ U) = <:-∩-right
∩-<:-∩ⁿ (R ⇒ S) (T ∩ U) = <:-refl
∩-<:-∩ⁿ (R ∩ S) (T ∩ U) = <:-refl
∩-<:-∩ⁿ (R ∪ S) (T ∩ U) = <:-trans <:-∩-distr-∪ (<:-trans (<:-union (∩-<:-∩ⁿ R (T ∩ U)) (<:-trans <:-∩-symm (∩-<:-∩ⁿˢ (T ∩ U) S))) (<:-∪-lub <:-refl <:-never))
∩ⁿ-<:-∩ S never = <:-never
∩ⁿ-<:-∩ S unknown = <:-∩-glb <:-refl <:-unknown
∩ⁿ-<:-∩ S (T ∪ U) = <:-trans (∪ⁿˢ-<:-∪ (normal-∩ⁿ S T) (normal-∩ⁿˢ S U)) (<:-trans (<:-union (∩ⁿ-<:-∩ S T) (∩ⁿˢ-<:-∩ S U)) ∩-distl-∪-<:)
∩ⁿ-<:-∩ never (T ⇒ U) = <:-never
∩ⁿ-<:-∩ unknown (T ⇒ U) = <:-∩-glb <:-unknown <:-refl
∩ⁿ-<:-∩ (R ⇒ S) (T ⇒ U) = <:-refl
∩ⁿ-<:-∩ (R ∩ S) (T ⇒ U) = <:-refl
∩ⁿ-<:-∩ (R ∪ S) (T ⇒ U) = <:-trans (∩ⁿ-<:-∩ R (T ⇒ U)) (<:-∩-glb (<:-trans <:-∩-left <:-∪-left) <:-∩-right)
∩ⁿ-<:-∩ never (T ∩ U) = <:-never
∩ⁿ-<:-∩ unknown (T ∩ U) = <:-∩-glb <:-unknown <:-refl
∩ⁿ-<:-∩ (R ⇒ S) (T ∩ U) = <:-refl
∩ⁿ-<:-∩ (R ∩ S) (T ∩ U) = <:-refl
∩ⁿ-<:-∩ (R ∪ S) (T ∩ U) = <:-trans (∩ⁿ-<:-∩ R (T ∩ U)) (<:-∩-glb (<:-trans <:-∩-left <:-∪-left) <:-∩-right)
∩-<:-∩ⁿˢ never number = <:-∩-left
∩-<:-∩ⁿˢ never boolean = <:-∩-left
∩-<:-∩ⁿˢ never string = <:-∩-left
∩-<:-∩ⁿˢ never nil = <:-∩-left
∩-<:-∩ⁿˢ unknown T = <:-∩-right
∩-<:-∩ⁿˢ (R ⇒ S) T = scalar-∩-fun-<:-never (R ⇒ S) T
∩-<:-∩ⁿˢ (F ∩ G) T = scalar-∩-fun-<:-never (F ∩ G) T
∩-<:-∩ⁿˢ (R ∪ number) number = <:-∩-right
∩-<:-∩ⁿˢ (R ∪ boolean) number = <:-trans <:-∩-distr-∪ (<:-∪-lub (∩-<:-∩ⁿˢ R number) (scalar-≢-∩-<:-never boolean number (λ ())))
∩-<:-∩ⁿˢ (R ∪ string) number = <:-trans <:-∩-distr-∪ (<:-∪-lub (∩-<:-∩ⁿˢ R number) (scalar-≢-∩-<:-never string number (λ ())))
∩-<:-∩ⁿˢ (R ∪ nil) number = <:-trans <:-∩-distr-∪ (<:-∪-lub (∩-<:-∩ⁿˢ R number) (scalar-≢-∩-<:-never nil number (λ ())))
∩-<:-∩ⁿˢ (R ∪ number) boolean = <:-trans <:-∩-distr-∪ (<:-∪-lub (∩-<:-∩ⁿˢ R boolean) (scalar-≢-∩-<:-never number boolean (λ ())))
∩-<:-∩ⁿˢ (R ∪ boolean) boolean = <:-∩-right
∩-<:-∩ⁿˢ (R ∪ string) boolean = <:-trans <:-∩-distr-∪ (<:-∪-lub (∩-<:-∩ⁿˢ R boolean) (scalar-≢-∩-<:-never string boolean (λ ())))
∩-<:-∩ⁿˢ (R ∪ nil) boolean = <:-trans <:-∩-distr-∪ (<:-∪-lub (∩-<:-∩ⁿˢ R boolean) (scalar-≢-∩-<:-never nil boolean (λ ())))
∩-<:-∩ⁿˢ (R ∪ number) string = <:-trans <:-∩-distr-∪ (<:-∪-lub (∩-<:-∩ⁿˢ R string) (scalar-≢-∩-<:-never number string (λ ())))
∩-<:-∩ⁿˢ (R ∪ boolean) string = <:-trans <:-∩-distr-∪ (<:-∪-lub (∩-<:-∩ⁿˢ R string) (scalar-≢-∩-<:-never boolean string (λ ())))
∩-<:-∩ⁿˢ (R ∪ string) string = <:-∩-right
∩-<:-∩ⁿˢ (R ∪ nil) string = <:-trans <:-∩-distr-∪ (<:-∪-lub (∩-<:-∩ⁿˢ R string) (scalar-≢-∩-<:-never nil string (λ ())))
∩-<:-∩ⁿˢ (R ∪ number) nil = <:-trans <:-∩-distr-∪ (<:-∪-lub (∩-<:-∩ⁿˢ R nil) (scalar-≢-∩-<:-never number nil (λ ())))
∩-<:-∩ⁿˢ (R ∪ boolean) nil = <:-trans <:-∩-distr-∪ (<:-∪-lub (∩-<:-∩ⁿˢ R nil) (scalar-≢-∩-<:-never boolean nil (λ ())))
∩-<:-∩ⁿˢ (R ∪ string) nil = <:-trans <:-∩-distr-∪ (<:-∪-lub (∩-<:-∩ⁿˢ R nil) (scalar-≢-∩-<:-never string nil (λ ())))
∩-<:-∩ⁿˢ (R ∪ nil) nil = <:-∩-right
∩ⁿˢ-<:-∩ never T = <:-never
∩ⁿˢ-<:-∩ unknown T = <:-∩-glb <:-unknown <:-refl
∩ⁿˢ-<:-∩ (R ⇒ S) T = <:-never
∩ⁿˢ-<:-∩ (F ∩ G) T = <:-never
∩ⁿˢ-<:-∩ (R ∪ number) number = <:-∩-glb <:-∪-right <:-refl
∩ⁿˢ-<:-∩ (R ∪ boolean) number = <:-trans (∩ⁿˢ-<:-∩ R number) (<:-intersect <:-∪-left <:-refl)
∩ⁿˢ-<:-∩ (R ∪ string) number = <:-trans (∩ⁿˢ-<:-∩ R number) (<:-intersect <:-∪-left <:-refl)
∩ⁿˢ-<:-∩ (R ∪ nil) number = <:-trans (∩ⁿˢ-<:-∩ R number) (<:-intersect <:-∪-left <:-refl)
∩ⁿˢ-<:-∩ (R ∪ number) boolean = <:-trans (∩ⁿˢ-<:-∩ R boolean) (<:-intersect <:-∪-left <:-refl)
∩ⁿˢ-<:-∩ (R ∪ boolean) boolean = <:-∩-glb <:-∪-right <:-refl
∩ⁿˢ-<:-∩ (R ∪ string) boolean = <:-trans (∩ⁿˢ-<:-∩ R boolean) (<:-intersect <:-∪-left <:-refl)
∩ⁿˢ-<:-∩ (R ∪ nil) boolean = <:-trans (∩ⁿˢ-<:-∩ R boolean) (<:-intersect <:-∪-left <:-refl)
∩ⁿˢ-<:-∩ (R ∪ number) string = <:-trans (∩ⁿˢ-<:-∩ R string) (<:-intersect <:-∪-left <:-refl)
∩ⁿˢ-<:-∩ (R ∪ boolean) string = <:-trans (∩ⁿˢ-<:-∩ R string) (<:-intersect <:-∪-left <:-refl)
∩ⁿˢ-<:-∩ (R ∪ string) string = <:-∩-glb <:-∪-right <:-refl
∩ⁿˢ-<:-∩ (R ∪ nil) string = <:-trans (∩ⁿˢ-<:-∩ R string) (<:-intersect <:-∪-left <:-refl)
∩ⁿˢ-<:-∩ (R ∪ number) nil = <:-trans (∩ⁿˢ-<:-∩ R nil) (<:-intersect <:-∪-left <:-refl)
∩ⁿˢ-<:-∩ (R ∪ boolean) nil = <:-trans (∩ⁿˢ-<:-∩ R nil) (<:-intersect <:-∪-left <:-refl)
∩ⁿˢ-<:-∩ (R ∪ string) nil = <:-trans (∩ⁿˢ-<:-∩ R nil) (<:-intersect <:-∪-left <:-refl)
∩ⁿˢ-<:-∩ (R ∪ nil) nil = <:-∩-glb <:-∪-right <:-refl
∪ᶠ-<:-∪ (R ⇒ S) (T ⇒ U) = <:-trans (<:-function (∩-<:-∩ⁿ R T) (∪ⁿ-<:-∪ S U)) <:-function-∪-∩
∪ᶠ-<:-∪ (R ⇒ S) (G ∩ H) = <:-trans (<:-intersect (∪ᶠ-<:-∪ (R ⇒ S) G) (∪ᶠ-<:-∪ (R ⇒ S) H)) ∪-distl-∩-<:
∪ᶠ-<:-∪ (E ∩ F) G = <:-trans (<:-intersect (∪ᶠ-<:-∪ E G) (∪ᶠ-<:-∪ F G)) ∪-distr-∩-<:
∪-<:-∪ᶠ : ∀ {F G} → FunType F → FunType G → (F ∪ G) <: (F ∪ᶠ G)
∪-<:-∪ᶠ (R ⇒ S) (T ⇒ U) = <:-trans <:-function-∪ (<:-function (∩ⁿ-<:-∩ R T) (∪-<:-∪ⁿ S U))
∪-<:-∪ᶠ (R ⇒ S) (G ∩ H) = <:-trans <:-∪-distl-∩ (<:-intersect (∪-<:-∪ᶠ (R ⇒ S) G) (∪-<:-∪ᶠ (R ⇒ S) H))
∪-<:-∪ᶠ (E ∩ F) G = <:-trans <:-∪-distr-∩ (<:-intersect (∪-<:-∪ᶠ E G) (∪-<:-∪ᶠ F G))
∪ⁿˢ-<:-∪ S never = <:-∪-left
∪ⁿˢ-<:-∪ never number = <:-refl
∪ⁿˢ-<:-∪ never boolean = <:-refl
∪ⁿˢ-<:-∪ never string = <:-refl
∪ⁿˢ-<:-∪ never nil = <:-refl
∪ⁿˢ-<:-∪ unknown number = <:-∪-left
∪ⁿˢ-<:-∪ unknown boolean = <:-∪-left
∪ⁿˢ-<:-∪ unknown string = <:-∪-left
∪ⁿˢ-<:-∪ unknown nil = <:-∪-left
∪ⁿˢ-<:-∪ (R ⇒ S) number = <:-refl
∪ⁿˢ-<:-∪ (R ⇒ S) boolean = <:-refl
∪ⁿˢ-<:-∪ (R ⇒ S) string = <:-refl
∪ⁿˢ-<:-∪ (R ⇒ S) nil = <:-refl
∪ⁿˢ-<:-∪ (R ∩ S) number = <:-refl
∪ⁿˢ-<:-∪ (R ∩ S) boolean = <:-refl
∪ⁿˢ-<:-∪ (R ∩ S) string = <:-refl
∪ⁿˢ-<:-∪ (R ∩ S) nil = <:-refl
∪ⁿˢ-<:-∪ (R ∪ number) number = <:-union <:-∪-left <:-refl
∪ⁿˢ-<:-∪ (R ∪ boolean) number = <:-trans (<:-union (∪ⁿˢ-<:-∪ R number) <:-refl) flipper
∪ⁿˢ-<:-∪ (R ∪ string) number = <:-trans (<:-union (∪ⁿˢ-<:-∪ R number) <:-refl) flipper
∪ⁿˢ-<:-∪ (R ∪ nil) number = <:-trans (<:-union (∪ⁿˢ-<:-∪ R number) <:-refl) flipper
∪ⁿˢ-<:-∪ (R ∪ number) boolean = <:-trans (<:-union (∪ⁿˢ-<:-∪ R boolean) <:-refl) flipper
∪ⁿˢ-<:-∪ (R ∪ boolean) boolean = <:-union <:-∪-left <:-refl
∪ⁿˢ-<:-∪ (R ∪ string) boolean = <:-trans (<:-union (∪ⁿˢ-<:-∪ R boolean) <:-refl) flipper
∪ⁿˢ-<:-∪ (R ∪ nil) boolean = <:-trans (<:-union (∪ⁿˢ-<:-∪ R boolean) <:-refl) flipper
∪ⁿˢ-<:-∪ (R ∪ number) string = <:-trans (<:-union (∪ⁿˢ-<:-∪ R string) <:-refl) flipper
∪ⁿˢ-<:-∪ (R ∪ boolean) string = <:-trans (<:-union (∪ⁿˢ-<:-∪ R string) <:-refl) flipper
∪ⁿˢ-<:-∪ (R ∪ string) string = <:-union <:-∪-left <:-refl
∪ⁿˢ-<:-∪ (R ∪ nil) string = <:-trans (<:-union (∪ⁿˢ-<:-∪ R string) <:-refl) flipper
∪ⁿˢ-<:-∪ (R ∪ number) nil = <:-trans (<:-union (∪ⁿˢ-<:-∪ R nil) <:-refl) flipper
∪ⁿˢ-<:-∪ (R ∪ boolean) nil = <:-trans (<:-union (∪ⁿˢ-<:-∪ R nil) <:-refl) flipper
∪ⁿˢ-<:-∪ (R ∪ string) nil = <:-trans (<:-union (∪ⁿˢ-<:-∪ R nil) <:-refl) flipper
∪ⁿˢ-<:-∪ (R ∪ nil) nil = <:-union <:-∪-left <:-refl
∪-<:-∪ⁿˢ T never = <:-∪-lub <:-refl <:-never
∪-<:-∪ⁿˢ never number = <:-refl
∪-<:-∪ⁿˢ never boolean = <:-refl
∪-<:-∪ⁿˢ never string = <:-refl
∪-<:-∪ⁿˢ never nil = <:-refl
∪-<:-∪ⁿˢ unknown number = <:-unknown
∪-<:-∪ⁿˢ unknown boolean = <:-unknown
∪-<:-∪ⁿˢ unknown string = <:-unknown
∪-<:-∪ⁿˢ unknown nil = <:-unknown
∪-<:-∪ⁿˢ (R ⇒ S) number = <:-refl
∪-<:-∪ⁿˢ (R ⇒ S) boolean = <:-refl
∪-<:-∪ⁿˢ (R ⇒ S) string = <:-refl
∪-<:-∪ⁿˢ (R ⇒ S) nil = <:-refl
∪-<:-∪ⁿˢ (R ∩ S) number = <:-refl
∪-<:-∪ⁿˢ (R ∩ S) boolean = <:-refl
∪-<:-∪ⁿˢ (R ∩ S) string = <:-refl
∪-<:-∪ⁿˢ (R ∩ S) nil = <:-refl
∪-<:-∪ⁿˢ (R ∪ number) number = <:-∪-lub <:-refl <:-∪-right
∪-<:-∪ⁿˢ (R ∪ boolean) number = <:-trans flipper (<:-union (∪-<:-∪ⁿˢ R number) <:-refl)
∪-<:-∪ⁿˢ (R ∪ string) number = <:-trans flipper (<:-union (∪-<:-∪ⁿˢ R number) <:-refl)
∪-<:-∪ⁿˢ (R ∪ nil) number = <:-trans flipper (<:-union (∪-<:-∪ⁿˢ R number) <:-refl)
∪-<:-∪ⁿˢ (R ∪ number) boolean = <:-trans flipper (<:-union (∪-<:-∪ⁿˢ R boolean) <:-refl)
∪-<:-∪ⁿˢ (R ∪ boolean) boolean = <:-∪-lub <:-refl <:-∪-right
∪-<:-∪ⁿˢ (R ∪ string) boolean = <:-trans flipper (<:-union (∪-<:-∪ⁿˢ R boolean) <:-refl)
∪-<:-∪ⁿˢ (R ∪ nil) boolean = <:-trans flipper (<:-union (∪-<:-∪ⁿˢ R boolean) <:-refl)
∪-<:-∪ⁿˢ (R ∪ number) string = <:-trans flipper (<:-union (∪-<:-∪ⁿˢ R string) <:-refl)
∪-<:-∪ⁿˢ (R ∪ boolean) string = <:-trans flipper (<:-union (∪-<:-∪ⁿˢ R string) <:-refl)
∪-<:-∪ⁿˢ (R ∪ string) string = <:-∪-lub <:-refl <:-∪-right
∪-<:-∪ⁿˢ (R ∪ nil) string = <:-trans flipper (<:-union (∪-<:-∪ⁿˢ R string) <:-refl)
∪-<:-∪ⁿˢ (R ∪ number) nil = <:-trans flipper (<:-union (∪-<:-∪ⁿˢ R nil) <:-refl)
∪-<:-∪ⁿˢ (R ∪ boolean) nil = <:-trans flipper (<:-union (∪-<:-∪ⁿˢ R nil) <:-refl)
∪-<:-∪ⁿˢ (R ∪ string) nil = <:-trans flipper (<:-union (∪-<:-∪ⁿˢ R nil) <:-refl)
∪-<:-∪ⁿˢ (R ∪ nil) nil = <:-∪-lub <:-refl <:-∪-right
∪ⁿ-<:-∪ S never = <:-∪-left
∪ⁿ-<:-∪ S unknown = <:-∪-right
∪ⁿ-<:-∪ never (T ⇒ U) = <:-∪-right
∪ⁿ-<:-∪ unknown (T ⇒ U) = <:-∪-left
∪ⁿ-<:-∪ (R ⇒ S) (T ⇒ U) = ∪ᶠ-<:-∪ (R ⇒ S) (T ⇒ U)
∪ⁿ-<:-∪ (R ∩ S) (T ⇒ U) = ∪ᶠ-<:-∪ (R ∩ S) (T ⇒ U)
∪ⁿ-<:-∪ (R ∪ S) (T ⇒ U) = <:-trans (<:-union (∪ⁿ-<:-∪ R (T ⇒ U)) <:-refl) (<:-∪-lub (<:-∪-lub (<:-trans <:-∪-left <:-∪-left) <:-∪-right) (<:-trans <:-∪-right <:-∪-left))
∪ⁿ-<:-∪ never (T ∩ U) = <:-∪-right
∪ⁿ-<:-∪ unknown (T ∩ U) = <:-∪-left
∪ⁿ-<:-∪ (R ⇒ S) (T ∩ U) = ∪ᶠ-<:-∪ (R ⇒ S) (T ∩ U)
∪ⁿ-<:-∪ (R ∩ S) (T ∩ U) = ∪ᶠ-<:-∪ (R ∩ S) (T ∩ U)
∪ⁿ-<:-∪ (R ∪ S) (T ∩ U) = <:-trans (<:-union (∪ⁿ-<:-∪ R (T ∩ U)) <:-refl) (<:-∪-lub (<:-∪-lub (<:-trans <:-∪-left <:-∪-left) <:-∪-right) (<:-trans <:-∪-right <:-∪-left))
∪ⁿ-<:-∪ S (T ∪ U) = <:-∪-lub (<:-trans (∪ⁿ-<:-∪ S T) (<:-union <:-refl <:-∪-left)) (<:-trans <:-∪-right <:-∪-right)
∪-<:-∪ⁿ S never = <:-∪-lub <:-refl <:-never
∪-<:-∪ⁿ S unknown = <:-unknown
∪-<:-∪ⁿ never (T ⇒ U) = <:-∪-lub <:-never <:-refl
∪-<:-∪ⁿ unknown (T ⇒ U) = <:-unknown
∪-<:-∪ⁿ (R ⇒ S) (T ⇒ U) = ∪-<:-∪ᶠ (R ⇒ S) (T ⇒ U)
∪-<:-∪ⁿ (R ∩ S) (T ⇒ U) = ∪-<:-∪ᶠ (R ∩ S) (T ⇒ U)
∪-<:-∪ⁿ (R ∪ S) (T ⇒ U) = <:-trans <:-∪-assocr (<:-trans (<:-union <:-refl <:-∪-symm) (<:-trans <:-∪-assocl (<:-union (∪-<:-∪ⁿ R (T ⇒ U)) <:-refl)))
∪-<:-∪ⁿ never (T ∩ U) = <:-∪-lub <:-never <:-refl
∪-<:-∪ⁿ unknown (T ∩ U) = <:-unknown
∪-<:-∪ⁿ (R ⇒ S) (T ∩ U) = ∪-<:-∪ᶠ (R ⇒ S) (T ∩ U)
∪-<:-∪ⁿ (R ∩ S) (T ∩ U) = ∪-<:-∪ᶠ (R ∩ S) (T ∩ U)
∪-<:-∪ⁿ (R ∪ S) (T ∩ U) = <:-trans <:-∪-assocr (<:-trans (<:-union <:-refl <:-∪-symm) (<:-trans <:-∪-assocl (<:-union (∪-<:-∪ⁿ R (T ∩ U)) <:-refl)))
∪-<:-∪ⁿ never (T ∪ U) = <:-trans <:-∪-assocl (<:-union (∪-<:-∪ⁿ never T) <:-refl)
∪-<:-∪ⁿ unknown (T ∪ U) = <:-trans <:-∪-assocl (<:-union (∪-<:-∪ⁿ unknown T) <:-refl)
∪-<:-∪ⁿ (R ⇒ S) (T ∪ U) = <:-trans <:-∪-assocl (<:-union (∪-<:-∪ⁿ (R ⇒ S) T) <:-refl)
∪-<:-∪ⁿ (R ∩ S) (T ∪ U) = <:-trans <:-∪-assocl (<:-union (∪-<:-∪ⁿ (R ∩ S) T) <:-refl)
∪-<:-∪ⁿ (R ∪ S) (T ∪ U) = <:-trans <:-∪-assocl (<:-union (∪-<:-∪ⁿ (R ∪ S) T) <:-refl)
normalize-<: : ∀ T → normalize T <: T
<:-normalize : ∀ T → T <: normalize T
<:-normalize nil = <:-∪-right
<:-normalize (S ⇒ T) = <:-function (normalize-<: S) (<:-normalize T)
<:-normalize never = <:-refl
<:-normalize unknown = <:-refl
<:-normalize boolean = <:-∪-right
<:-normalize number = <:-∪-right
<:-normalize string = <:-∪-right
<:-normalize (S ∪ T) = <:-trans (<:-union (<:-normalize S) (<:-normalize T)) (∪-<:-∪ⁿ (normal S) (normal T))
<:-normalize (S ∩ T) = <:-trans (<:-intersect (<:-normalize S) (<:-normalize T)) (∩-<:-∩ⁿ (normal S) (normal T))
normalize-<: nil = <:-∪-lub <:-never <:-refl
normalize-<: (S ⇒ T) = <:-function (<:-normalize S) (normalize-<: T)
normalize-<: never = <:-refl
normalize-<: unknown = <:-refl
normalize-<: boolean = <:-∪-lub <:-never <:-refl
normalize-<: number = <:-∪-lub <:-never <:-refl
normalize-<: string = <:-∪-lub <:-never <:-refl
normalize-<: (S ∪ T) = <:-trans (∪ⁿ-<:-∪ (normal S) (normal T)) (<:-union (normalize-<: S) (normalize-<: T))
normalize-<: (S ∩ T) = <:-trans (∩ⁿ-<:-∩ (normal S) (normal T)) (<:-intersect (normalize-<: S) (normalize-<: T))
|
FUNCTION K31R(J,M,K,L,R, JA)
C BY PAULA GAIL GRISELL
C EDITED FROM BASIC BY AX, 1.17.79
k31r=ja ! Added 7/27/93 to report own old value
IF(M.EQ.1) S=0.
S=S+J
A=S/M
K31R=1
IF (A .LT..5) K31R=0
RETURN
END
|
function rmgismo()
% RMGISMO(SOURCE) removes all existing paths containing the phrase
% 'GISMO'.
% Author: Michael West, Geophysical Institute, Univ. of Alaska Fairbanks
% $Date$
% $Revision$
admin.deprecated(mfilename,'admin.remove');
% REMOVE EXISTING GISMO PATHS
pathList = path;
n = 1;
while true
t = strtok(pathList(n:end), pathsep);
OnePath = sprintf('%s', t);
if strfind(OnePath,'GISMO');
%disp(['removing: ' OnePath])
rmpath(OnePath);
end;
n = n + length(t) + 1;
if isempty(strfind(pathList(n:end),':'))
break
end;
end
|
(***************************************************************************)
(* This is part of FA_Completeness, it is distributed under the terms *)
(* of the GNU Lesser General Public License version 3 *)
(* (see file LICENSE for more details) *)
(* *)
(* Copyright 2020-2022: Yaoshun Fu and Wensheng Yu. *)
(***************************************************************************)
Require Import t2.
Require Export Seq.
Definition NestedIntervals a b := Increase a /\
Decrease b /\ ILT_Seq a b /\ Limit (Minus_Seq b a) O.
Theorem NITex : ∀ a b, NestedIntervals a b ->
∃ ξ, (∀ n, a n ≦ ξ /\ ξ ≦ b n) /\ Limit a ξ /\ Limit b ξ.
Proof.
intros; red in H; destruct H, H0, H1.
assert (Boundup_Seq (b 1) a).
{ red; intros. red in H, H0, H1; destruct (Theorem24 n).
- pose proof H3; apply H in H3. eapply Theorem173; eauto.
- rewrite H3; auto. }
destruct (MCTup _ _ H H3) as [ξ H4].
assert (Limit b ξ).
{ rewrite (SeqCon1 a b), <- Theorem175''; apply SeqLimPlus; auto. }
exists ξ; repeat split; auto.
- apply Increase_limitP; auto. - apply Decrease_limitP; auto.
Qed.
Theorem NITuni : ∀ a b, NestedIntervals a b -> ∀ ξ1 ξ2,
(∀ n, a n ≦ ξ1 /\ ξ1 ≦ b n) /\ Limit a ξ1 /\ Limit b ξ1 ->
(∀ n, a n ≦ ξ2 /\ ξ2 ≦ b n) /\ Limit a ξ2 /\ Limit b ξ2 -> ξ1 = ξ2.
Proof.
intros; destruct H0 as [_ [H0 _]], H1 as [_ [H1 _]].
eapply LimUni; eauto.
Qed.
Corollary Cor_NIT: ∀ a b, NestedIntervals a b ->
∃ ξ, (∀ N, a N ≦ ξ /\ ξ ≦ b N) /\
(∀ ε, ε > O -> ∃ N, ∀ n, (IGT_N n N) ->
[(a n) | (b n)] ⊂ (ξ|-ε)).
Proof.
intros. apply NITex in H; destruct H as [ξ H], H, H0.
exists ξ; split; intros; auto.
destruct H0 with ε as [N1 H3]; auto.
destruct H1 with ε as [N2 H4]; auto.
exists (Plus_N N1 N2); intros.
pose proof (H3 _ (Theorem15 _ _ _ (Theorem18 N1 N2) H5)).
pose proof (Theorem18 N2 N1); rewrite Theorem6 in H7.
pose proof (H4 _ (Theorem15 _ _ _ H7 H5)).
red; intros; destruct H9, H9; constructor; split.
- apply Ab1 in H6; destruct H6.
apply Theorem172 with (Γ:=a n); right; split; auto.
apply Theorem188_1' with (Θ:=ξ) in H6; Simpl_Rin H6.
rewrite Theorem175 in H6; auto.
- apply Ab1 in H8; destruct H8.
apply Theorem172 with (Γ:=b n); left; split; auto.
apply Theorem188_1' with (Θ:=ξ) in H11; Simpl_Rin H11.
rewrite Theorem175; auto.
Qed.
|
program cpp5
implicit none
#include "cpp5.h"
x = (2+3)*5
print *, x, C123
end program
|
State Before: α : Type u_2
β : Type u_1
γ : Type ?u.75384
ι : Sort ?u.75387
ι' : Sort ?u.75390
f✝ : ι → α
s t : Set α
f : α → β
p : Set β → Prop
⊢ (∃ s x, p s) ↔ ∃ s, p (f '' s) State After: no goals Tactic: simp |
Verbal and Non–verbal Communication: Non–verbal communication (like body language and hand gestures) considered harmless in the U.S. may be offensive to people in Nicaragua and other countries to which you plan to travel. The list of gestures considered rude in other countries can grow beyond the obvious. For more help communicating in Nicaragua, please see the Communication Sheets.
Sexually Transmitted Diseases: Keep yourself free from sexually transmitted diseases by using protection (like condoms or abstinence). Also, remember that "no" may not always be interpreted as "no" in Nicaragua and in other countries. Inform yourself about the types of diseases prevalent in the area in which you are studying and the areas in which you will be traveling.
International Sources of Information: Inform yourself as much as possible about your new environment, making use of as many different sources as possible – online, in the library, on television and radio news programs, and in the paper. Don't limit yourself to U.S. sources. Instead, contrast the U.S. information with that provided by Nicaragua and other countries. Please see "Media" in the Resources section of this Handbook. |
%% The SantaFe example
%
% Simulate a set of pole figures for the SantaFe standard ODF, estimate
% an ODF and compare it to the inital SantaFe ODF.
%% Open in Editor
%
%% Simulate pole figures
CS = crystalSymmetry('m-3m');
% crystal directions
h = [Miller(1,0,0,CS),Miller(1,1,0,CS),Miller(1,1,1,CS),Miller(2,1,1,CS)];
% specimen directions
r = equispacedS2Grid('resolution',5*degree,'antipodal');
% pole figures
pf = calcPoleFigure(SantaFe,h,r);
% add some noise
pf = noisepf(pf,100);
% plot them
plot(pf,'MarkerSize',5)
mtexColorMap LaboTeX
%% ODF Estimation with Ghost Correction
rec = calcODF(pf)
%% ODF Estimation without Ghost Correction
rec2 = calcODF(pf,'NoGhostCorrection')
%% Error analysis
% calculate RP error
calcError(rec,SantaFe)
% difference plot between meassured and recalculated pole figures
plotDiff(pf,rec)
%% Plot estimated pole figures
plotPDF(rec,pf.h,'antipodal')
%% Plot estimated ODF (Ghost Corrected)
plot(rec,'sections',18,'resolution',5*degree,...
'contourf','FontSize',10,'silent','figSize','large','minmax')
mtexColorMap white2black
%% Plot odf
plot(SantaFe,'sections',18,'contourf','FontSize',10,'silent',...
'figSize','large','minmax')
mtexColorMap white2black
%% Plot Fourier Coefficients
%%
close all;
% true ODF
plotSpektra(SantaFe,'bandwidth',32,'linewidth',2)
% keep plot for adding the next plots
hold all
% With ghost correction:
plotSpektra(rec,'bandwidth',32,'linewidth',2)
% Without ghost correction:
plotSpektra(rec2,'bandwidth',32,'linewidth',2)
legend({'true ODF','with ghost correction','without ghost correction'})
% next plot command overwrites plot
hold off
|
module Linear.Metric
import Linear.Epsilon
import Linear.Vect
public export
interface Additive f => Metric f where
||| Compute the inner product of two vectors or (equivalently)
||| convert a vector `f a` into a covector `f a -> a`.
|||
||| ```idris example
||| V2 1 2 `dot` V2 3 4
||| ```
dot : Num a => f a -> f a -> a
||| Compute the squared norm. The name quadrance arises from
||| Norman J. Wildberger's rational trigonometry.
quadrance : Num a => f a -> a
quadrance v = dot v v
||| Compute the quadrance of the difference
qd : Neg a => f a -> f a -> a
qd f g = quadrance (f ^-^ g)
||| Compute the distance between two vectors in a metric space
distance : (Floating a, Neg a) => f a -> f a -> a
distance f g = norm (f ^-^ g)
||| Compute the norm of a vector in a metric space
norm : Floating a => f a -> a
norm v = cast (sqrt (cast (quadrance v)))
||| Convert a non-zero vector to unit vector.
signorm : Floating a => f a -> f a
signorm v = let m = norm v in map (/m) v
public export
implementation (Additive f, Foldable f) => Metric f where
dot x y = sum $ liftI2 (*) x y
||| Normalize a 'Metric' functor to have unit 'norm'. This function
||| does not change the functor if its 'norm' is 0 or 1.
export
normalize : (Floating a, Metric f, Epsilon a, Neg a) => f a -> f a
normalize v =
let l = quadrance v
in if nearZero l || nearZero (1 - l)
then v
else map (/ cast (sqrt (cast l))) v
||| `project u v` computes the projection of `v` onto `u`.
export
project : (Metric v, Fractional a, Neg a) => v a -> v a -> v a
project u v = ((v `dot` u) / quadrance u) *^ u
|
news The Federal Department of Human Services today announced a deal with IT services giant Accenture that will see the company help replace the ageing Child Support payments system, using the SAP technology which Accenture developed extensive skills with during the Commonwealth Bank’s core banking placement project.
The current Child Support platform, known as ‘Cuba’, was first deployed in 2002, according to tender documents released last year — more than a decade ago, or an age in technology terms. It contains all of the Child Support Agency’s electronic customer records and supports transactions for assessments, payments and data exchange with external agencies.
And the scale of the system is not small. It directly supports over 1.2 million Australian children through providing some $3.2 billion per year in payments. It also helps the department administer child support services and collects child support so that separated payments can ensure “the financial and emotional wellbeing of their families”.
It also notes that its aims in future include the ability to support self-sufficiency through online and mobile platforms — implying it has limited capability in that area right now — and that it wants to reduce the number and impact of “system-generated errors” in its current Cuba platform. It also appears that the current system is not able to easily handle “complex cases” or “emergency response situations”.
In May 2013, Human Services Minister Jan McLucas announced that the then-Gillard Government would, as the Senator said at the time, “deliver improvements to information technology systems to ensure millions of Australians receive government payments efficiently and effectively,” upgrading the Child Support system.
In a statement released today, the Department said the new system would be delivered using SAP technology and with the support of both Accenture and SAP. The contract represents part of a $102.2 million investment over five years announced by McLucas to replace the Department’s outdated child support payment system.
New Minister for Human Services, Marise Payne said the contract would see DHS staff working closely with Accenture and SAP teams. “Accenture and SAP will assist in building the replacement system while ensuring that the department is left with a skilled, in-house workforce able to maintain the system into the future, at reduced cost to the taxpayer,” Payne said.
Managing director of Accenture’s health and public sector business in Australia Catherine Garner said the company was very pleased to have been selected to deliver the new payment system.
“This is an extremely important initiative that will improve the Department’s ability to effectively and efficiently deliver services to families,” Garner said. The new Child Support system is expected to be implemented in 2015 with further upgrades to be completed by June 2018.
Accenture has solid experience in recent times both with SAP as well as with the Federal Government. It was responsible for substantial packages work on the Commonwealth Bank’s mammoth core banking upgrade program, which also used SAP technology, as well as the Australian Taxation Office’s Change Program, which used Oracle software. |
subsection \<open>Equivalence of register machine and arithmetizing equations\<close>
theory Machine_Equation_Equivalence imports All_Equations
"../Register_Machine/MachineEquations"
"../Register_Machine/MultipleToSingleSteps"
begin
context register_machine
begin
lemma conclusion_4_5:
assumes is_val: "is_valid_initial ic p a"
and n_def: "n \<equiv> length (snd ic)"
shows "(\<exists>q. terminates ic p q) = rm_equations a"
proof (rule)
assume "\<exists>q. terminates ic p q"
then obtain q::nat where terminates: "terminates ic p q" by auto
hence "q>0" using terminates_def by auto
have "\<exists>c>1. cells_bounded ic p c"
using terminate_c_exists terminates is_val is_valid_initial_def by blast
then obtain c where c: "cells_bounded ic p c \<and> c > 1" by auto
define b where "b \<equiv> B c"
define d where "d \<equiv> D q c b"
define e where "e \<equiv> E q b"
define f where "f \<equiv> F q c b"
have "c>1" using c by auto
have "b>1" using c b_def B_def
using nat_neq_iff by fastforce
define r where "r \<equiv> RLe ic p b q"
define s where "s \<equiv> SKe ic p b q"
define z where "z \<equiv> ZLe ic p b q"
interpret equations: rm_eq_fixes p n a b c d e f q r z s by unfold_locales
have "equations.mask_equations"
proof -
have "\<forall>l<n. r l \<preceq> d"
using lm04_15_register_masking[of "ic" "p" "c" _ "q"] r_def n_def d_def b_def c by auto
moreover have "\<forall>l<n. z l \<preceq> e"
using lm04_15_zero_masking z_def n_def e_def b_def c by auto
moreover have "\<forall>l<n. 2 ^ c * z l = r l + d && f"
using lm04_20_zero_definition r_def z_def n_def d_def f_def b_def c by auto
ultimately show ?thesis unfolding equations.mask_equations_def equations.register_mask_def
equations.zero_indicator_mask_def equations.zero_indicator_0_or_1_def by auto
qed
moreover have "equations.register_equations"
proof -
have "r 0 = a + b * r 0 + b * \<Sum>R+ p 0 s - b * \<Sum>R- p 0 (\<lambda>k. s k && z 0)"
using lm04_23_multiple_register1[of "ic" "p" "a" "c" "0" "q"] is_val c terminates `q>0` r_def
s_def z_def b_def bitAND_commutes by auto
moreover have "\<forall>l>0. l < n \<longrightarrow> r l = b * r l + b * \<Sum>R+ p l s - b * \<Sum>R- p l (\<lambda>k. s k && z l)"
using lm04_22_multiple_register[of "ic" "p" "a" "c" _ "q"]
b_def c terminates r_def s_def z_def is_val bitAND_commutes n_def `q>0` by auto
moreover have "l<n \<Longrightarrow> r l < b^q" for l
proof -
assume "l<n"
hence Rlq: "R ic p l q = 0"
using terminates terminates_def correct_halt_def R_def n_def by auto
have c_ineq: "(2::nat)^c \<le> 2 ^ Suc c - Suc 0" using `c>1` by auto
have "\<forall>t. R ic p l t < 2 ^ c" using c `l<n` n_def by auto
hence R_bound: " \<forall>t. R ic p l t < 2 ^ Suc c - Suc 0" using c_ineq
by (metis dual_order.strict_trans linorder_neqE_nat not_less)
have "(\<Sum>t = 0..q. b ^ t * R ic p l t) = (\<Sum>t = 0..(Suc (q-1)). b ^ t * R ic p l t)"
using `q>0` by auto
also have "... = (\<Sum>t = 0..q-1. b ^ t * R ic p l t) + b^q * R ic p l q"
using Set_Interval.comm_monoid_add_class.sum.atLeast0_atMost_Suc[of _ "q-1"] `q>0` by auto
also have "... = (\<Sum>t = 0..q-1. b ^ t * R ic p l t)" using Rlq by auto
also have "... < b ^ q" using b_def R_bound
base_summation_bound[of "R ic p l" "c" "q-1"] `q>0` by (auto simp: mult.commute)
finally show ?thesis using r_def RLe_def by auto
qed
ultimately show ?thesis unfolding equations.register_equations_def equations.register_0_def
equations.register_l_def equations.register_bound_def by auto
qed
moreover have "equations.state_equations"
proof -
have "equations.state_relations_from_recursion"
proof -
have "\<forall>d>0. d\<le>m \<longrightarrow> s d = b*\<Sum>S+ p d (\<lambda>k. s k) + b*\<Sum>S- p d (\<lambda>k. s k && z (modifies (p!k)))
+ b*\<Sum>S0 p d (\<lambda>k. s k && (e - z (modifies (p!k))))"
apply (auto simp: s_def z_def)
using lm04_24_multiple_step_states[of "ic" "p" "a" "c" _ "q"]
b_def c terminates s_def z_def is_val bitAND_commutes m_def `q>0` e_def E_def by auto
moreover have "s 0 = 1 + b*\<Sum>S+ p 0 (\<lambda>k. s k) + b*\<Sum>S- p 0 (\<lambda>k. s k && z (modifies (p!k)))
+ b*\<Sum>S0 p 0 (\<lambda>k. s k && (e - z (modifies (p!k))))"
using lm04_25_multiple_step_state1[of "ic" "p" "a" "c" _ "q"]
b_def c terminates s_def z_def is_val bitAND_commutes m_def `q>0` e_def E_def by auto
ultimately show ?thesis unfolding equations.state_relations_from_recursion_def
equations.state_0_def equations.state_d_def equations.state_m_def by auto
qed
moreover have "equations.state_unique_equations"
proof -
have "k<m \<longrightarrow> s k < b ^ q" for k
using state_q_bound is_val terminates \<open>q>0\<close> b_def s_def m_def c by auto
moreover have "k\<le>m \<longrightarrow> s k \<preceq> e" for k
using state_mask is_val terminates \<open>q>0\<close> b_def e_def s_def c by auto
ultimately show ?thesis unfolding equations.state_unique_equations_def
equations.state_mask_def equations.state_bound_def by auto
qed
moreover have "\<forall>M\<le>m. sum s {..M} \<preceq> e"
using state_sum_mask is_val terminates \<open>q>0\<close> b_def e_def s_def c `b>1` m_def by auto
moreover have "s m = b^q"
using halting_condition_04_27[of "ic" "p" "a" "q" "c"] m_def b_def is_val `q>0` terminates
s_def by auto
ultimately show ?thesis unfolding equations.state_equations_def
equations.state_partial_sum_mask_def equations.state_m_def by auto
qed
moreover have "equations.constants_equations"
unfolding equations.constants_equations_def equations.constant_b_def
equations.constant_d_def equations.constant_e_def equations.constant_f_def
using b_def d_def e_def f_def by auto
moreover have "equations.miscellaneous_equations"
proof -
have tapelength: "length (snd ic) > 0"
using is_val is_valid_initial_def[of "ic" "p" "a"] by auto
have "R ic p 0 0 = a" using is_val is_valid_initial_def[of "ic" "p" "a"]
R_def List.hd_conv_nth[of "snd ic"] by auto
moreover have "R ic p 0 0 < 2^c" using c tapelength by auto
ultimately have "a < 2^c" by auto
thus ?thesis unfolding equations.miscellaneous_equations_def equations.c_gt_0_def
equations.a_bound_def equations.q_gt_0_def
using \<open>q > 0\<close> \<open>c > 1\<close> by auto
qed
ultimately show "rm_equations a" unfolding rm_equations_def all_equations_def by blast
next
assume "rm_equations a"
then obtain q b c d e f r z s where
reg: "rm_eq_fixes.register_equations p n a b q r z s" and
state: "rm_eq_fixes.state_equations p b e q z s" and
mask: "rm_eq_fixes.mask_equations n c d e f r z" and
const: "rm_eq_fixes.constants_equations b c d e f q" and
misc: "rm_eq_fixes.miscellaneous_equations a c q"
unfolding rm_equations_def all_equations_def by auto
have fx: "rm_eq_fixes p n"
unfolding rm_eq_fixes_def using local.register_machine_axioms by auto
have "q>0" using misc fx rm_eq_fixes.miscellaneous_equations_def
rm_eq_fixes.q_gt_0_def by auto
have "b>1" using B_def const rm_eq_fixes.constants_equations_def
rm_eq_fixes.constant_b_def fx
by (metis One_nat_def Zero_not_Suc less_one n_not_Suc_n nat_neq_iff nat_power_eq_Suc_0_iff
numeral_2_eq_2 of_nat_0 of_nat_power_eq_of_nat_cancel_iff of_nat_zero_less_power_iff pos2)
have "n>0" using is_val is_valid_initial_def[of "ic" "p" "a"] n_def by auto
have "m>0" using m_def is_val is_valid_initial_def[of "ic" "p"] is_valid_def[of "ic" "p"] by auto
define Seq where "Seq \<equiv> (\<lambda>k t. nth_digit (s k) t b)"
define Req where "Req \<equiv> (\<lambda>l t. nth_digit (r l) t b)"
define Zeq where "Zeq \<equiv> (\<lambda>l t. nth_digit (z l) t b)"
(* Quick and dirty: :\<acute>| *)
have mask_old: "mask_equations n r z c d e f" and
reg_old: "reg_equations p r z s b a (length (snd ic)) q" and
state_old: "state_equations p s z b e q (length p - 1)" and
const_old: "rm_constants q c b d e f a"
subgoal
using mask rm_eq_fixes.mask_equations_def rm_eq_fixes.register_mask_def fx
mask_equations_def rm_eq_fixes.zero_indicator_0_or_1_def rm_eq_fixes.zero_indicator_mask_def
by simp
subgoal
using reg state mask const misc using rm_eq_fixes.register_equations_def
rm_eq_fixes.register_0_def rm_eq_fixes.register_l_def rm_eq_fixes.register_bound_def
reg_equations_def n_def fx by simp
subgoal
using state fx state_equations_def rm_eq_fixes.state_equations_def
rm_eq_fixes.state_relations_from_recursion_def rm_eq_fixes.state_0_def rm_eq_fixes.state_m_def
rm_eq_fixes.state_d_def rm_eq_fixes.state_unique_equations_def rm_eq_fixes.state_mask_def
rm_eq_fixes.state_bound_def rm_eq_fixes.state_partial_sum_mask_def m_def by simp
subgoal unfolding rm_constants_def
using const misc fx rm_eq_fixes.constants_equations_def
rm_eq_fixes.miscellaneous_equations_def rm_eq_fixes.constant_b_def rm_eq_fixes.constant_d_def
rm_eq_fixes.constant_e_def rm_eq_fixes.constant_f_def rm_eq_fixes.c_gt_0_def
rm_eq_fixes.q_gt_0_def rm_eq_fixes.a_bound_def by simp
done
hence RZS_eq: "l<n \<Longrightarrow> j\<le>m \<Longrightarrow> t\<le>q \<Longrightarrow>
R ic p l t = Req l t \<and> Z ic p l t = Zeq l t \<and> S ic p j t = Seq j t" for l j t
using rzs_eq[of "m" "p" "n" "ic" "a" "r" "z"] mask_old reg_old state_old const_old
m_def n_def is_val `q>0` Seq_def Req_def Zeq_def by auto
have R_eq: "l<n \<Longrightarrow> t\<le>q \<Longrightarrow> R ic p l t = Req l t" for l t using RZS_eq by auto
have Z_eq: "l<n \<Longrightarrow> t\<le>q \<Longrightarrow> Z ic p l t = Zeq l t" for l t using RZS_eq by auto
have S_eq: "j\<le>m \<Longrightarrow> t\<le>q \<Longrightarrow> S ic p j t = Seq j t" for j t using RZS_eq[of "0"] `n>0` by auto
have "ishalt (p!m)" using m_def is_val
is_valid_initial_def[of "ic" "p" "a"] is_valid_def[of "ic" "p"] by auto
have "Seq m q = 1" using state nth_digit_def Seq_def `b>1`
using fx rm_eq_fixes.state_equations_def
rm_eq_fixes.state_relations_from_recursion_def
rm_eq_fixes.state_m_def by auto
hence "S ic p m q = 1" using S_eq by auto
hence "fst (steps ic p q) = m" using S_def by(cases "fst (steps ic p q) = m"; auto)
hence qhalt: "ishalt (p ! (fst (steps ic p q)))" using S_def `ishalt (p!m)` by auto
hence rempty: "snd (steps ic p q) ! l = 0" if "l < n" for l
unfolding R_def[symmetric]
using R_eq[of l q] \<open>l < n\<close> apply auto
using reg Req_def nth_digit_def
using rm_eq_fixes.register_equations_def
rm_eq_fixes.register_l_def
rm_eq_fixes.register_0_def
rm_eq_fixes.register_bound_def
by auto (simp add: fx)
have state_m_0: "t<q \<Longrightarrow> S ic p m t = 0" for t
proof -
assume "t<q"
have "b ^ q div b ^ t = b^(q-t)"
by (metis \<open>1 < b\<close> \<open>t < q\<close> less_imp_le not_one_le_zero power_diff)
also have "... mod b = 0" using \<open>1 < b\<close> \<open>t < q\<close> by simp
finally have mod: "b^q div b^t mod b = 0" by auto
have "s m = b^q" using state fx rm_eq_fixes.state_equations_def
rm_eq_fixes.state_m_def
rm_eq_fixes.state_relations_from_recursion_def by auto
hence "Seq m t = 0" using Seq_def nth_digit_def mod by auto
with S_eq `t < q` show ?thesis by auto
qed
have "\<forall>k<m. \<not> ishalt (p!k)"
using is_val is_valid_initial_def[of "ic" "p" "a"] is_valid_def[of "ic" "p"] m_def by auto
moreover have "t<q \<longrightarrow> fst (steps ic p t) < length p - 1" for t
proof (rule ccontr)
assume asm: "\<not> (t < q \<longrightarrow> fst (steps ic p t) < length p - 1)"
hence "t<q" by auto
with asm have "fst (steps ic p t) \<ge> length p - 1" by auto
moreover have "fst (steps ic p t) \<le> length p - 1"
using p_contains[of "ic" "p" "a" "t"] is_val by auto
ultimately have "fst (steps ic p t) = m" using m_def by auto
hence "S ic p m t = 1" using S_def by auto
thus "False" using state_m_0[of "t"] `t<q` by auto
qed
ultimately have "t<q \<longrightarrow> \<not> ishalt (p ! (fst (steps ic p t)))" for t using m_def by auto
hence no_early_halt: "t<q \<longrightarrow> \<not> ishalt (p ! (fst (steps ic p t)))" for t using state_m_0 by auto
have "correct_halt ic p q" using qhalt rempty correct_halt_def n_def by auto
thus "(\<exists>q. terminates ic p q)" using no_early_halt terminates_def `q>0` by auto
qed
end
end |
using PyCall
try
pyimport("wandb")
@info "Using Pre-Installed Wandb Version"
catch e
try
run(`$(PyCall.pyprogramname) -m pip install wandb`)
catch ee
if !(typeof(ee) <: PyCall.PyError)
rethrow(ee)
end
@warn "Python dependencies not installed.\n" *
"Either\n" *
"- Rebuild `PyCall` to use Conda by running the following in Julia REPL " *
"- `ENV[PYTHON]=\"\"; using Pkg; Pkg.build(\"PyCall\"); Pkg.build(\"Wandb\")\n" *
"- Or install the dependencies by running `pip` - `pip install wandb>=0.11`"
end
end |
def fact: nat -> nat
| 0 := 1
| (n+1) := (n+1) * (fact n)
example: fact 5 = 120 := rfl
def c: nat -> nat
| 0 := 1
| 1 := 1
| (n+1) := (c n) * 2*(2*n + 1)/(n+2)
#eval c 1
#eval c 5
example: c 5 = 42 := rfl
#eval c 10
lemma lt_pred (x: nat) (h: 0 < x): (x-1) < x := nat.sub_lt h (nat.lt.base 0)
def fact' := well_founded.fix nat.lt_wf (λ x f, if h : x > 0 then x * (f (x-1) (lt_pred x h) ) else 1)
#eval fact' 8
example: fact' 8 = fact 8 := rfl.
lemma fact'_succ (n: nat): fact' (n+1) = (n+1)*(fact' n) := rfl.
theorem commutes (n: nat): fact n = fact' n := begin
induction n, reflexivity,
case nat.succ: n ih {
simp [fact, fact'_succ, ih],
}
end
|
State Before: J : Type u₁
inst✝³ : Category J
K✝ : Type u₂
inst✝² : Category K✝
C : Type u₃
inst✝¹ : Category C
D : Type u₄
inst✝ : Category D
F K : J ⥤ C
c d : Cocone K
f : c ⟶ d
i : IsIso f.Hom
⊢ f ≫ CoconeMorphism.mk (inv f.Hom) = 𝟙 c ∧ CoconeMorphism.mk (inv f.Hom) ≫ f = 𝟙 d State After: no goals Tactic: aesop_cat |
% inputdlg2() - inputdlg function clone with coloring and help for
% eeglab().
%
% Usage:
% >> Answer = inputdlg2(Prompt,Title,LineNo,DefAns,funcname);
%
% Inputs:
% Same as inputdlg. Using the optional additionnal funcname parameter
% the function will create a help button. The help message will be
% displayed using the pophelp() function.
%
% Output:
% Same as inputdlg
%
% Note: The advantage of this function is that the color of the window
% can be changed and that it displays an help button. Edit
% supergui to change window options. Also the parameter LineNo
% can only be one.
%
% Author: Arnaud Delorme, CNL / Salk Institute, La Jolla, 11 August 2002
%
% See also: supergui(), inputgui()
% Copyright (C) Arnaud Delorme, CNL / Salk Institute, [email protected]
%
% This program is free software; you can redistribute it and/or modify
% it under the terms of the GNU General Public License as published by
% the Free Software Foundation; either version 2 of the License, or
% (at your option) any later version.
%
% This program is distributed in the hope that it will be useful,
% but WITHOUT ANY WARRANTY; without even the implied warranty of
% MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
% GNU General Public License for more details.
%
% You should have received a copy of the GNU General Public License
% along with this program; if not, write to the Free Software
% Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
function [result] = inputdlg2(Prompt,Title,LineNo,DefAns,funcname);
if nargin < 4
help inputdlg2;
return;
end;
if nargin < 5
funcname = '';
end;
if length(Prompt) ~= length(DefAns)
error('inputdlg2: prompt and default answer cell array must have the smae size');
end;
geometry = {};
listgui = {};
% determine if vertical or horizontal
% -----------------------------------
geomvert = [];
for index = 1:length(Prompt)
geomvert = [geomvert size(Prompt{index},1) 1]; % default is vertical geometry
end;
if all(geomvert == 1) & length(Prompt) > 1
geomvert = []; % horizontal
end;
for index = 1:length(Prompt)
if ~isempty(geomvert) % vertical
geometry = { geometry{:} [ 1] [1 ]};
else
geometry = { geometry{:} [ 1 0.6 ]};
end;
listgui = { listgui{:} { 'Style', 'text', 'string', Prompt{index}} ...
{ 'Style', 'edit', 'string', DefAns{index} } };
end;
result = inputgui(geometry, listgui, ['pophelp(''' funcname ''');'], Title, [], 'normal', geomvert);
|
#Image scraper using Julia and Selenium. Current build only scrapes google image
using PyCall
using WebDriver
using Requests
using JSON
using ArgParse
function scrollThroughPage(driver::WebDriver.Driver)
#Scrolls down to the bottom of each page of the image search, to reveal all images
const scrollScript = "window.scrollBy(0, 1000000)"
const waitTime= 0.2
const scrolls = 5
for i in 1:scrolls
execute_script(driver, scrollScript)
sleep(waitTime)
end
end
function clickThroughPage(driver::WebDriver.Driver)
#clicks through to the next page of the image search automatically
const nextButtonSelector = "//input[@value='Show more results']"
const waitTime= 0.2
click(find_element_by_xpath(driver, nextButtonSelector))
sleep(waitTime)
end
function parseImageElement(img::WebDriver.WebElement, extensions::Tuple)
#gets the image url and type of the html image elements extracted from the page
innerhtml = JSON.parse(get_attribute(img, "innerHTML"))
img_url = innerhtml["ou"]
img_type = innerhtml["ity"]
#we do our default type replacing here
if !(img_type in extensions)
img_type = "jpg"
end
return img_url, img_type
end
function requestAndSaveImage(url::AbstractString, fname::AbstractString,img_num::Integer, stream::Bool=false)
#requests an image given a url and saves it or streams it to a file
if stream == true
try
stream = Requests.get_streaming(url)
open(fname, "w") do file
while !eof(stream)
write(file, readavailable(stream))
end
end
catch Exception e
println("Image stream failed: " * string(e))
end
end
if stream ==false
try
res = Requests.get(url)
#fname = basepath *"/" * string(img_num)
Requests.save(res, fname)
catch Exception e
println("Image download failed: " *string(e))
end
end
end
function setupDirectories(basepath)
if !isdir(basepath)
mkdir(basepath)
end
end
#the big function so we can see if anything works
function scrape_images_routine(searchTerm::AbstractString, num_images::Integer, basepath::AbstractString=searchTerm, streaming::Bool=false, parallel::Bool = false, extensions::Tuple=("jpg", "jpeg", "png", "gif"), verbose::Bool = true)
#setup our constants
const url = "https://www.google.co.in/search?q="*searchTerm*"&source=lnms&tbm=isch"
const images_per_page = 400
const number_of_scrolls = num_images/images_per_page +1
const driver_path = "/home/beren/work/julia/misc/chromedriver"
driver = init_chrome(driver_path)
#also should allow driver customizatoin at some point, but can't be bothered - could perhaps pare this into a separate function also for ease
if verbose==true
println("Driver initialized")
end
#get the search term
get(driver, url)
if verbose==true
println("Searching for " * searchTerm)
end
#if all of this works, we make the dirs for our thing
setupDirectories(basepath)
img_counter::Integer = 0
for i in 1:number_of_scrolls
scrollThroughPage(driver) # scroll through page to load all images
images = find_elements_by_xpath(driver,"//div[contains(@class, 'rg_meta')]") # get image urls
println("Total Images found on this page: " * string(length(images)))
for img in images
img_url, img_type = parseImageElement(img, extensions) # parse our image
fname = basepath*"/"*searchTerm * "_"*string(img_counter)*"."*img_type # create filename for saving
requestAndSaveImage(img_url, fname,img_counter, streaming)
img_counter +=1
#and we check our loop functionality
if img_counter >= num_images
if verbose==true
println(string(num_images) *" images found. Image scraper exiting")
end
return
end
end
end
end
# okay, commandline functoins
function parseCommandLine()
s = ArgParseSettings(prog="Julia Image Scraper with Selenium", description="An image scraper of google images written in Julia", commands_are_required=false, version="0.0.1", add_version=true)
@add_arg_table s begin
"search_term"
help="the search you want to get images from"
required=true
arg_type = String
"number_images"
help="the number of images you want to scrape"
required=true
arg_type = Int
"base_path"
help="the base path of the directory you want to save the images in"
arg_type=String
default=""
"-s"
help="whether to stream images or download them and save all at once"
action = :store_true
"-p"
help="whether to download images in parallel"
action= :store_true
"-v"
help="run the scraper in verbose mode"
action = :store_true
end
return parse_args(s)
end
function run_scrape_from_cmdline()
args = parseCommandLine()
const search_term = args["search_term"]
const num = args["number_images"]
const base_path = args["base_path"]
const stream = args["-s"]
const parallel = args["-p"]
const verbose = args["-v"]
scrape_images_routine(search_term, num, base_path, stream, parallel, verbose)
end
print(isinteractive())
#run the file from the commandline if it is active
if isinteractive()
run_scrape_from_cmdline()
end
#run_scrape_from_cmdline()
# and now for our functions allowing a greater specialisation of arguments
function scrape_images(searchTerm::AbstractString, num_images::Integer, basepath::AbstractString, streaming::Bool=false, parallel::Bool = false, extensions::Tuple=("jpg", "jpeg", "png", "gif"), verbose::Bool = true)
println(basepath)
#this is identical to the routine, we just call it
scrape_images_routine(searchTerm, num_images, basepath, streaming, parallel, extensions, verbose)
end
function scrape_images(searchTerm::Array{AbstractString}, num_images::Integer, basepath::AbstractString="", streaming::Bool=false, parallel::Bool = false, extensions::Tuple=("jpg", "jpeg", "png", "gif"), verbose::Bool = true)
const len = length(searchTerm)
for i in 1:len
scrape_images_routine(searchTerm[i], num_images, basepath, streaming, parallel, extensions, verbose)
end
end
function scrape_images(searchTerm::Array{AbstractString}, num_images::Array{Integer}, basepath::AbstractString="", streaming::Bool=false, parallel::Bool = false, extensions::Tuple=("jpg", "jpeg", "png", "gif"), verbose::Bool = true)
const len = length(searchTerm)
@assert len == length(num_images) "number of terms and number of images for each term must be the same length"
for i in 1:len
scrape_images_routine(searchTerm[i], num_images[i], basepath, streaming, parallel, extensions, verbose)
end
end
function scrape_images(searchTerm::Array{AbstractString}, num_images::Array{Integer}, basepath::Array{AbstractString}, streaming::Bool=false, parallel::Bool = false, extensions::Tuple=("jpg", "jpeg", "png", "gif"), verbose::Bool = true)
const len = length(searchTerm)
@assert len == length(num_images) == length(basepath) "number of terms and number of images for each term must be the same length as must the number of different basepaths for each"
for i in 1:len
scrape_images_routine(searchTerm[i], num_images[i], basepath[i], streaming, parallel, extensions, verbose)
end
end
function scrape_images(searchTerm::Array{AbstractString}, num_images::Integer, basepath::Array{AbstractString}, streaming::Bool=false, parallel::Bool = false, extensions::Tuple=("jpg", "jpeg", "png", "gif"), verbose::Bool = true)
const len = length(searchTerm)
@assert len == length(basepath) "number of terms and number of images for each term must be the same length as must the number of different basepaths for each"
for i in 1:len
scrape_images_routine(searchTerm[i], num_images, basepath[i], streaming, parallel, extensions, verbose)
end
end
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.