text
stringlengths 0
3.34M
|
---|
const threshold = 5
function normalize_text(path::String)
println("Tokenizing...")
text = readstring(`java -cp stanford-ner.jar edu.stanford.nlp.process.PTBTokenizer $path -preserveLines`)
println("Normalizing...")
words = String[]
countdict = Dict{String,Int}()
for w in split(text," ")
w = normalize_string(w, :NFKC)
w = lowercase(w)
w = replace(w, r"[0-9]", '0') # ex. 123.45 -> 000.00
push!(words, w)
if haskey(countdict, w)
countdict[w] += 1
else
countdict[w] = 1
end
end
println("Original vocabulary size: $(length(countdict))")
unkdict = Dict{String,String}()
for (k,v) in countdict
v < threshold && (unkdict[k] = k)
end
c = length(countdict) - length(unkdict)
println("Normalized (>= $threshold) vocabulary size: $c")
words = map(words) do w
haskey(unkdict,w) ? "UNKNOWN" : w
end
str = join(words, " ")
open("$path.clean","w") do io
println(io, str)
end
end
normalize_text(ARGS[1])
|
/-
Copyright (c) 2020 Markus Himmel. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Markus Himmel
-/
import Mathlib.PrePort
import Mathlib.Lean3Lib.init.default
import Mathlib.tactic.linarith.default
import Mathlib.PostPort
namespace Mathlib
/-!
# Bitwise operations on natural numbers
In the first half of this file, we provide theorems for reasoning about natural numbers from their
bitwise properties. In the second half of this file, we show properties of the bitwise operations
`lor`, `land` and `lxor`, which are defined in core.
## Main results
* `eq_of_test_bit_eq`: two natural numbers are equal if they have equal bits at every position.
* `exists_most_significant_bit`: if `n ≠ 0`, then there is some position `i` that contains the most
significant `1`-bit of `n`.
* `lt_of_test_bit`: if `n` and `m` are numbers and `i` is a position such that the `i`-th bit of
of `n` is zero, the `i`-th bit of `m` is one, and all more significant bits are equal, then
`n < m`.
## Future work
There is another way to express bitwise properties of natural number: `digits 2`. The two ways
should be connected.
## Keywords
bitwise, and, or, xor
-/
namespace nat
@[simp] theorem bit_ff : bit false = bit0 := rfl
@[simp] theorem bit_tt : bit tt = bit1 := rfl
@[simp] theorem bit_eq_zero {n : ℕ} {b : Bool} : bit b n = 0 ↔ n = 0 ∧ b = false := sorry
theorem zero_of_test_bit_eq_ff {n : ℕ} (h : ∀ (i : ℕ), test_bit n i = false) : n = 0 := sorry
@[simp] theorem zero_test_bit (i : ℕ) : test_bit 0 i = false := sorry
/-- Bitwise extensionality: Two numbers agree if they agree at every bit position. -/
theorem eq_of_test_bit_eq {n : ℕ} {m : ℕ} (h : ∀ (i : ℕ), test_bit n i = test_bit m i) : n = m :=
sorry
theorem exists_most_significant_bit {n : ℕ} (h : n ≠ 0) :
∃ (i : ℕ), test_bit n i = tt ∧ ∀ (j : ℕ), i < j → test_bit n j = false :=
sorry
theorem lt_of_test_bit {n : ℕ} {m : ℕ} (i : ℕ) (hn : test_bit n i = false) (hm : test_bit m i = tt)
(hnm : ∀ (j : ℕ), i < j → test_bit n j = test_bit m j) : n < m :=
sorry
/-- If `f` is a commutative operation on bools such that `f ff ff = ff`, then `bitwise f` is also
commutative. -/
theorem bitwise_comm {f : Bool → Bool → Bool} (hf : ∀ (b b' : Bool), f b b' = f b' b)
(hf' : f false false = false) (n : ℕ) (m : ℕ) : bitwise f n m = bitwise f m n :=
sorry
theorem lor_comm (n : ℕ) (m : ℕ) : lor n m = lor m n := bitwise_comm bool.bor_comm rfl n m
theorem land_comm (n : ℕ) (m : ℕ) : land n m = land m n := bitwise_comm bool.band_comm rfl n m
theorem lxor_comm (n : ℕ) (m : ℕ) : lxor n m = lxor m n := bitwise_comm bool.bxor_comm rfl n m
@[simp] theorem zero_lxor (n : ℕ) : lxor 0 n = n := rfl
@[simp] theorem lxor_zero (n : ℕ) : lxor n 0 = n := lxor_comm 0 n ▸ rfl
@[simp] theorem zero_land (n : ℕ) : land 0 n = 0 := rfl
@[simp] theorem land_zero (n : ℕ) : land n 0 = 0 := land_comm 0 n ▸ rfl
@[simp] theorem zero_lor (n : ℕ) : lor 0 n = n := rfl
@[simp] theorem lor_zero (n : ℕ) : lor n 0 = n := lor_comm 0 n ▸ rfl
/-- Proving associativity of bitwise operations in general essentially boils down to a huge case
distinction, so it is shorter to use this tactic instead of proving it in the general case. -/
theorem lxor_assoc (n : ℕ) (m : ℕ) (k : ℕ) : lxor (lxor n m) k = lxor n (lxor m k) := sorry
theorem land_assoc (n : ℕ) (m : ℕ) (k : ℕ) : land (land n m) k = land n (land m k) := sorry
theorem lor_assoc (n : ℕ) (m : ℕ) (k : ℕ) : lor (lor n m) k = lor n (lor m k) := sorry
@[simp] theorem lxor_self (n : ℕ) : lxor n n = 0 := sorry
theorem lxor_right_inj {n : ℕ} {m : ℕ} {m' : ℕ} (h : lxor n m = lxor n m') : m = m' := sorry
theorem lxor_left_inj {n : ℕ} {n' : ℕ} {m : ℕ} (h : lxor n m = lxor n' m) : n = n' :=
lxor_right_inj
(eq.mp (Eq._oldrec (Eq.refl (lxor m n = lxor n' m)) (lxor_comm n' m))
(eq.mp (Eq._oldrec (Eq.refl (lxor n m = lxor n' m)) (lxor_comm n m)) h))
theorem lxor_eq_zero {n : ℕ} {m : ℕ} : lxor n m = 0 ↔ n = m :=
{ mp :=
eq.mpr (id (Eq._oldrec (Eq.refl (lxor n m = 0 → n = m)) (Eq.symm (lxor_self m))))
lxor_left_inj,
mpr := fun (ᾰ : n = m) => Eq._oldrec (lxor_self n) ᾰ }
theorem lxor_trichotomy {a : ℕ} {b : ℕ} {c : ℕ} (h : lxor a (lxor b c) ≠ 0) :
lxor b c < a ∨ lxor a c < b ∨ lxor a b < c :=
sorry
end Mathlib |
theory IHT_Binary_Search
imports
"../Functional/BinarySearch"
"../SepLogicTime/SLTC_Main"
"../Asymptotics/Asymptotics_1D"
begin
partial_function (heap_time) binarysearch :: "nat \<Rightarrow> nat \<Rightarrow> 'a::{heap,linorder} \<Rightarrow> 'a array \<Rightarrow> bool Heap" where
"binarysearch l r x a = (
if l \<ge> r then return False
else if l + 1 \<ge> r then do {
v \<leftarrow> Array_Time.nth a l;
return (v = x) }
else let m = avg l r in do {
v \<leftarrow> Array_Time.nth a m;
(if v = x then return True
else if v < x then binarysearch (m + 1) r x a
else binarysearch l m x a)
})" (* apply pat_completeness by auto
termination by (relation "Wellfounded.measure (\<lambda>(l,r,a,f). r-l)") auto
*)
print_theorems
function binarysearch_time :: "nat \<Rightarrow> nat" where
"n < 2 \<Longrightarrow> binarysearch_time n = 2"
| "n \<ge> 2 \<Longrightarrow> binarysearch_time n = 2 + binarysearch_time (n div 2)"
by force simp_all
termination by (relation "Wellfounded.measure (\<lambda>n. n)") auto
definition binarysearch_time' :: "nat \<Rightarrow> real" where
"binarysearch_time' n = real (binarysearch_time n)"
lemma div_2_to_rounding:
"n - n div 2 = nat \<lceil>n / 2\<rceil>" "n div 2 = nat \<lfloor>n / 2\<rfloor>" by linarith+
lemma binarysearch_time'_Theta: "(\<lambda>n. binarysearch_time' n) \<in> \<Theta>(\<lambda>n. ln (real n))"
apply (master_theorem2 2.3 recursion: binarysearch_time.simps(2) rew: binarysearch_time'_def div_2_to_rounding)
prefer 2 apply auto2
by (auto simp: binarysearch_time'_def)
setup \<open>fold add_rewrite_rule @{thms binarysearch_time.simps}\<close>
lemma binarysearch_mono [backward]:
"m \<le> n \<Longrightarrow> binarysearch_time m \<le> binarysearch_time n"
proof (induction n arbitrary: m rule: less_induct)
case (less n)
show ?case
proof (cases "m<2")
case True
then show ?thesis apply (cases "n<2") by auto
next
case False
then show ?thesis using less(2) by (auto intro: less(1))
qed
qed
lemma avg_diff1 [resolve]: "(l::nat) \<le> r \<Longrightarrow> r - (avg l r + 1) \<le> (r - l) div 2" by simp
lemma avg_diff2 [resolve]: "(l::nat) \<le> r \<Longrightarrow> avg l r - l \<le> (r - l) div 2" by simp
lemma binarysearch_correct [hoare_triple]:
"r \<le> length xs \<Longrightarrow> l \<le> r \<Longrightarrow>
<a \<mapsto>\<^sub>a xs * $(binarysearch_time (r - l))>
binarysearch l r x a
<\<lambda>res. a \<mapsto>\<^sub>a xs * \<up>(res \<longleftrightarrow> binarysearch_fun l r x xs)>\<^sub>t"
@proof @fun_induct "binarysearch_fun l r x xs"
@unfold "binarysearch_fun l r x xs"
@case "l \<ge> r" @case "l + 1 \<ge> r"
@let "m = avg l r"
@have "r - l \<ge> 2" @with
@have "l + 2 = 2 + l" @have "r \<ge> 2 + l" (* TODO: simplify *)
@end
@case "xs ! m < x" @with
@have "binarysearch_time ((r - l) div 2) \<ge>\<^sub>t binarysearch_time (r - (m + 1))"
@end
@case "xs ! m > x" @with
@have "binarysearch_time ((r - l) div 2) \<ge>\<^sub>t binarysearch_time (m - l)"
@end
@qed
lemma binarysearch_correct' [hoare_triple]:
"sorted xs \<Longrightarrow> r \<le> length xs \<Longrightarrow> l \<le> r \<Longrightarrow>
<a \<mapsto>\<^sub>a xs * $(binarysearch_time (r - l))>
binarysearch l r x a
<\<lambda>res. a \<mapsto>\<^sub>a xs * \<up>(res \<longleftrightarrow> (\<exists>i. l \<le> i \<and> i < r \<and> xs ! i = x))>\<^sub>t"
by auto2
end
|
\documentclass[9pt,twocolumn,twoside]{../../styles/osajnl}
\usepackage{fancyvrb}
\journal{i524}
\title{Apache Ranger}
\author[1,*, +]{Avadhoot Agasti}
\affil[1]{School of Informatics and Computing, Bloomington, IN 47408, U.S.A.}
\affil[*]{Corresponding authors: [email protected]}
\affil[+]{HID - SL-IO-3000}
\dates{paper2, \today}
\ociscodes{Apache Ranger, LDAP, Active Directory, Apache Knox, Apache Atlas,
Apache Hive, Apache Hadoop, Yarn, Apache HBase, Apache Storm, Apache Kafka,
Data Lake, Apache Sentry, Hive Server2, Java}
% replace this with your url in github/gitlab
\doi{\url{https://github.com/cloudmesh/sp17-i524/raw/master/paper2/S17-IO-3000/report.pdf}}
\begin{abstract}
Apache Hadoop provides various data storage, data access and data processing
services. Apache Ranger is part of the Hadoop ecosystem. Apache Ranger
provides capability to perform security administration tasks for storage,
access and processing of data in Hadoop. Using Ranger, Hadoop administrator
can perform security administration tasks using a central user interface or
restful web
services. Hadoop administrator can define policies which enable
users or user-groups to perform
specific actions using Hadoop components and tools. Ranger provides role based
access control for datasets on Hadoop at column and row level. Ranger also
provides centralized auditing of user access and security related
administrative actions.\newline
\end{abstract}
\setboolean{displaycopyright}{true}
\begin{document}
\maketitle
\section{Introduction}
Apache Ranger is open source software project designed to provide centralized
security services to various components of Apache Hadoop. Apache Hadoop
provides various mechanism to store, process and access the data. Each
Apache tool has its own security mechanism. This increases administrative
overhead and is also error prone. Apache Ranger fills this gap to provide
a central security and auditing mechanism for various Hadoop components. Using
Ranger, Hadoop administrator can perform security administration tasks
using a central user interface or restful web services. The administrator can
define
policies which,
enable users or user-groups to perform specific actions using Hadoop
components
and tools. Ranger provides role based access control for datasets on
Hadoop at column and row level. Ranger also provides centralized auditing
of user access and security related administrative actions.
\section{ARCHITECTURE OVERVIEW}
\cite{www-ranger-architecture} describes the important components of Ranger as
explained below:
\subsection{Ranger Admin Portal}
Ranger admin portal is the main interaction point for the user. A user can
define policies using the Ranger admin portal. These policies are stored in
a policy database. The Policies are polled by various plugins. Admin
portal also collects the audit data from plugins and stores it in HDFS or in a
relational database.
\subsection{Ranger Plugins}
Plugins are Java programs, which are invoked as part of the cluster component.
For example, the ranger-hive plugin is embedded as part of Hive Server2. The
plugins cache the policies, and intercept the user request and evaluates it
against the policies. Plugins also collect the audit data for that
specific component and send to admin portal.
\subsection{User group sync}
While Ranger provides authorization or access control mechanism, it needs to
know the users and the groups. Ranger integrates with unix user management
system or LDAP or active directory to fetch the users and the groups
information. The user group sync component is responsible for this integration.
\section{HADOOP COMPONENTS SUPPORTED BY RANGER}
Ranger supports auditing and authorization for following Hadoop components
\cite{www-ranger-faq}.
\subsection{Apache Hadoop and HDFS}
Apache Ranger provides plugin for Hadoop, which helps in enforcing data access
policies. The HDFS plugin works with name node to check if the user's access
request to a file on HDFS is valid or not.
\subsection{Apache Hive}
Apache Hive provides SQL interface on top of the data stored in HDFS. Apache
Hive supports two types of authorization: storage based authorization and
SQL standard authorization. Ranger provides centralized authorization
interface for Hive, which provides granular access control at table and column
level. Ranger's hive plugin is part of Hive Server2.
\subsection{Apache HBase}
Apache HBase is NoSQL database implemented on top of Hadoop and HDFS. Ranger
provides coprocessor plugin for HBase, which performs authorization checks and
audit log collections.
\subsection{Apache Storm}
Ranger provides plugin to Nimbus server which helps in performing the
security authorization on Apache Storm.
\subsection{Apache Knox}
Apache Knox provides service level authorization for users and groups. Ranger
provides plugin for Knox using which, administration of policies can be
supported. The audit over Knox data enables user to perform detailed
analysis of who and when accessed Knox.
\subsection{Apache Solr}
Solr provides free text search capabilities on top of Hadoop. Ranger is
useful to protect Solr collections from unauthorized usage.
\subsection{Apache Kafka}
Ranger can manager access control on Kafka topics. Policies can be
implemented to control which users can write to a Kafka topic and which users
can read from a Kafka topic.
\subsection{Yarn}
Yarn is resource management layer for Hadoop. Administrators can setup queues
in Yarn and then allocate users and resources per queue basis. Policies can
be defined in Ranger to define who can write to various Yarn queues.
\section{IMPORTANT FEATURES OF RANGER}
The blog article \cite{www-ranger-key-features} explains the 2 important
features of Apache Ranger.
\subsection{Dynamic Column Masking}
Dynamic data masking at column level is an important feature of Apache Ranger
. Using this feature, the administrator can setup data masking policy. The
data masking makes sure that only authorized users can see the actual data
while other users will see the masked data. Since the masked data is format
preserving, they can continue their work without getting access to the actual
sensitive data. For example, the application developers can use masked data
to develop the application whereas when the application is actually
deployed, it will show actual data to the authorized user. Similarly, a
security administrator may chose to mask credit card number when it is
displayed to a service agent.
\subsection{Row Level Filtering}
The data authorization is typically required at column level as well as at row
level. For example, in an organization which is geographically distributed in
many locations, the security administrator may want to give access of a data
from a specific location to the specific user. In other example, a
hospital data security administrator may want to allow doctors to see
only his or her patients. Using Ranger, such row level access control can
be specified and implemented.
\section{HADOOP DISTRIBUTION SUPPORT}
Ranger can be deployed on top of Apache Hadoop.
\cite{www-ranger-on-apache-hadoop} provides detailed steps of building and
deploying Ranger on top of Apache Hadoop.
Hortonwork Distribution of Hadoop(HDP) supports Ranger deployment using
Ambari. \cite{www-ranger-on-hdp} provides installation, deployment and
configuration steps for Ranger as part of HDP deployment.
Cloudera Hadoop Distribution (CDH) does not support Ranger. According to
\cite{www-ranger-on-cdh}, Ranger is not recommended on CDH and instead Apache
Sentry should be used as central security and audit tool on top of CDH.
\section{USE CASES}
Apache Ranger provides centralized security framework which can be useful in
many use cases as explained below.
\subsection{Data Lake}
\cite{data-lake-whitepaper} explains that storing many types of data in the
same repository is one of the most important feature of data lake. With
multiple datasets, the ownership, security and access control of the data
becomes primary concern. Using Apache Ranger, the security administrator can
define fine grain control on the data access.
\subsection{Multi-tenant Deployment of Hadoop}
Hadoop provides ability to store and process data from multiple
tenants. The security framework provided by Apache Ranger can be utilized to
protect the data and resources from un-authorized access.
\section{Apache Ranger and Apache Sentry}
According to \cite{www-5security-blog}, Apache Sentry and Apache Ranger have
many features in common. Apache Sentry (\cite{www-apache-sentry}) provides
role based authorization to data and metadata stored in Hadoop.
\section{Educational Material}
\cite{www-ranger-tutorial} provides tutorial on topics like A)Security
resources B)Auditing C)Securing HDFS, Hive and HBase with Knox and Ranger D)
Using Apache Atlas' Tag based policies with Ranger.
\cite{www-ranger-quickstart} provides step by step guidance on getting latest
code base of Apache Ranger, building and deploying it.
\section{Licensing}
Apache Ranger is available under Apache 2.0 License.
\section{Conclusion}
Apache Ranger is useful to Hadoop Security Administrators since it enables
the granular authorization and access control. It also provides central
security framework to different data storage and access mechanism like Hive,
HBase and Storm. Apache Ranger also provides audit mechanism. With Apache
Ranger, the security can be enhanced for complex Hadoop use cases like Data
Lake.
\section*{Acknowledgements}
The authors thank Prof. Gregor von Laszewski for his technical guidance.
% Bibliography
\bibliography{references}
\end{document}
|
/-
Copyright (c) 2022 Eric Wieser. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Eric Wieser
-/
import analysis.normed_space.exponential
import analysis.matrix
import linear_algebra.matrix.zpow
import linear_algebra.matrix.hermitian
import linear_algebra.matrix.symmetric
import topology.uniform_space.matrix
/-!
# Lemmas about the matrix exponential
In this file, we provide results about `exp` on `matrix`s over a topological or normed algebra.
Note that generic results over all topological spaces such as `exp_zero` can be used on matrices
without issue, so are not repeated here. The topological results specific to matrices are:
* `matrix.exp_transpose`
* `matrix.exp_conj_transpose`
* `matrix.exp_diagonal`
* `matrix.exp_block_diagonal`
* `matrix.exp_block_diagonal'`
Lemmas like `exp_add_of_commute` require a canonical norm on the type; while there are multiple
sensible choices for the norm of a `matrix` (`matrix.normed_add_comm_group`,
`matrix.frobenius_normed_add_comm_group`, `matrix.linfty_op_normed_add_comm_group`), none of them
are canonical. In an application where a particular norm is chosen using
`local attribute [instance]`, then the usual lemmas about `exp` are fine. When choosing a norm is
undesirable, the results in this file can be used.
In this file, we copy across the lemmas about `exp`, but hide the requirement for a norm inside the
proof.
* `matrix.exp_add_of_commute`
* `matrix.exp_sum_of_commute`
* `matrix.exp_nsmul`
* `matrix.is_unit_exp`
* `matrix.exp_units_conj`
* `matrix.exp_units_conj'`
Additionally, we prove some results about `matrix.has_inv` and `matrix.div_inv_monoid`, as the
results for general rings are instead stated about `ring.inverse`:
* `matrix.exp_neg`
* `matrix.exp_zsmul`
* `matrix.exp_conj`
* `matrix.exp_conj'`
## Implementation notes
This file runs into some sharp edges on typeclass search in lean 3, especially regarding pi types.
To work around this, we copy a handful of instances for when lean can't find them by itself.
Hopefully we will be able to remove these in Lean 4.
## TODO
* Show that `matrix.det (exp 𝕂 A) = exp 𝕂 (matrix.trace A)`
## References
* https://en.wikipedia.org/wiki/Matrix_exponential
-/
open_locale matrix big_operators
section hacks_for_pi_instance_search
/-- A special case of `pi.topological_ring` for when `R` is not dependently typed. -/
instance function.topological_ring (I : Type*) (R : Type*)
[non_unital_ring R] [topological_space R] [topological_ring R] :
topological_ring (I → R) :=
pi.topological_ring
/-- A special case of `function.algebra` for when A is a `ring` not a `semiring` -/
instance function.algebra_ring (I : Type*) {R : Type*} (A : Type*) [comm_semiring R]
[ring A] [algebra R A] : algebra R (I → A) :=
pi.algebra _ _
/-- A special case of `pi.algebra` for when `f = λ i, matrix (m i) (m i) A`. -/
instance pi.matrix_algebra (I R A : Type*) (m : I → Type*)
[comm_semiring R] [semiring A] [algebra R A]
[Π i, fintype (m i)] [Π i, decidable_eq (m i)] :
algebra R (Π i, matrix (m i) (m i) A) :=
@pi.algebra I R (λ i, matrix (m i) (m i) A) _ _ (λ i, matrix.algebra)
/-- A special case of `pi.topological_ring` for when `f = λ i, matrix (m i) (m i) A`. -/
instance pi.matrix_topological_ring (I A : Type*) (m : I → Type*)
[ring A] [topological_space A] [topological_ring A]
[Π i, fintype (m i)] :
topological_ring (Π i, matrix (m i) (m i) A) :=
@pi.topological_ring _ (λ i, matrix (m i) (m i) A) _ _ (λ i, matrix.topological_ring)
end hacks_for_pi_instance_search
variables (𝕂 : Type*) {m n p : Type*} {n' : m → Type*} {𝔸 : Type*}
namespace matrix
section topological
section ring
variables [fintype m] [decidable_eq m] [fintype n] [decidable_eq n]
[Π i, fintype (n' i)] [Π i, decidable_eq (n' i)]
[field 𝕂] [ring 𝔸] [topological_space 𝔸] [topological_ring 𝔸] [algebra 𝕂 𝔸] [t2_space 𝔸]
lemma exp_diagonal (v : m → 𝔸) : exp 𝕂 (diagonal v) = diagonal (exp 𝕂 v) :=
by simp_rw [exp_eq_tsum, diagonal_pow, ←diagonal_smul, ←diagonal_tsum]
lemma exp_block_diagonal (v : m → matrix n n 𝔸) :
exp 𝕂 (block_diagonal v) = block_diagonal (exp 𝕂 v) :=
by simp_rw [exp_eq_tsum, ←block_diagonal_pow, ←block_diagonal_smul, ←block_diagonal_tsum]
lemma exp_block_diagonal' (v : Π i, matrix (n' i) (n' i) 𝔸) :
exp 𝕂 (block_diagonal' v) = block_diagonal' (exp 𝕂 v) :=
by simp_rw [exp_eq_tsum, ←block_diagonal'_pow, ←block_diagonal'_smul, ←block_diagonal'_tsum]
lemma exp_conj_transpose [star_ring 𝔸] [has_continuous_star 𝔸] (A : matrix m m 𝔸) :
exp 𝕂 Aᴴ = (exp 𝕂 A)ᴴ :=
(star_exp A).symm
lemma is_hermitian.exp [star_ring 𝔸] [has_continuous_star 𝔸] {A : matrix m m 𝔸}
(h : A.is_hermitian) : (exp 𝕂 A).is_hermitian :=
(exp_conj_transpose _ _).symm.trans $ congr_arg _ h
end ring
section comm_ring
variables [fintype m] [decidable_eq m] [field 𝕂]
[comm_ring 𝔸] [topological_space 𝔸] [topological_ring 𝔸] [algebra 𝕂 𝔸] [t2_space 𝔸]
lemma exp_transpose (A : matrix m m 𝔸) : exp 𝕂 Aᵀ = (exp 𝕂 A)ᵀ :=
by simp_rw [exp_eq_tsum, transpose_tsum, transpose_smul, transpose_pow]
lemma is_symm.exp {A : matrix m m 𝔸} (h : A.is_symm) : (exp 𝕂 A).is_symm :=
(exp_transpose _ _).symm.trans $ congr_arg _ h
end comm_ring
end topological
section normed
variables [is_R_or_C 𝕂]
[fintype m] [decidable_eq m]
[fintype n] [decidable_eq n]
[Π i, fintype (n' i)] [Π i, decidable_eq (n' i)]
[normed_ring 𝔸] [normed_algebra 𝕂 𝔸] [complete_space 𝔸]
lemma exp_add_of_commute (A B : matrix m m 𝔸) (h : commute A B) :
exp 𝕂 (A + B) = exp 𝕂 A ⬝ exp 𝕂 B :=
begin
letI : semi_normed_ring (matrix m m 𝔸) := matrix.linfty_op_semi_normed_ring,
letI : normed_ring (matrix m m 𝔸) := matrix.linfty_op_normed_ring,
letI : normed_algebra 𝕂 (matrix m m 𝔸) := matrix.linfty_op_normed_algebra,
exact exp_add_of_commute h,
end
lemma exp_sum_of_commute {ι} (s : finset ι) (f : ι → matrix m m 𝔸)
(h : (s : set ι).pairwise $ λ i j, commute (f i) (f j)) :
exp 𝕂 (∑ i in s, f i) = s.noncomm_prod (λ i, exp 𝕂 (f i))
(λ i hi j hj _, (h.of_refl hi hj).exp 𝕂) :=
begin
letI : semi_normed_ring (matrix m m 𝔸) := matrix.linfty_op_semi_normed_ring,
letI : normed_ring (matrix m m 𝔸) := matrix.linfty_op_normed_ring,
letI : normed_algebra 𝕂 (matrix m m 𝔸) := matrix.linfty_op_normed_algebra,
exact exp_sum_of_commute s f h,
end
lemma exp_nsmul (n : ℕ) (A : matrix m m 𝔸) :
exp 𝕂 (n • A) = exp 𝕂 A ^ n :=
begin
letI : semi_normed_ring (matrix m m 𝔸) := matrix.linfty_op_semi_normed_ring,
letI : normed_ring (matrix m m 𝔸) := matrix.linfty_op_normed_ring,
letI : normed_algebra 𝕂 (matrix m m 𝔸) := matrix.linfty_op_normed_algebra,
exact exp_nsmul n A,
end
lemma is_unit_exp (A : matrix m m 𝔸) : is_unit (exp 𝕂 A) :=
begin
letI : semi_normed_ring (matrix m m 𝔸) := matrix.linfty_op_semi_normed_ring,
letI : normed_ring (matrix m m 𝔸) := matrix.linfty_op_normed_ring,
letI : normed_algebra 𝕂 (matrix m m 𝔸) := matrix.linfty_op_normed_algebra,
exact is_unit_exp _ A,
end
lemma exp_units_conj (U : (matrix m m 𝔸)ˣ) (A : matrix m m 𝔸) :
exp 𝕂 (↑U ⬝ A ⬝ ↑(U⁻¹) : matrix m m 𝔸) = ↑U ⬝ exp 𝕂 A ⬝ ↑(U⁻¹) :=
begin
letI : semi_normed_ring (matrix m m 𝔸) := matrix.linfty_op_semi_normed_ring,
letI : normed_ring (matrix m m 𝔸) := matrix.linfty_op_normed_ring,
letI : normed_algebra 𝕂 (matrix m m 𝔸) := matrix.linfty_op_normed_algebra,
exact exp_units_conj _ U A,
end
lemma exp_units_conj' (U : (matrix m m 𝔸)ˣ) (A : matrix m m 𝔸) :
exp 𝕂 (↑(U⁻¹) ⬝ A ⬝ U : matrix m m 𝔸) = ↑(U⁻¹) ⬝ exp 𝕂 A ⬝ U :=
exp_units_conj 𝕂 U⁻¹ A
end normed
section normed_comm
variables [is_R_or_C 𝕂]
[fintype m] [decidable_eq m]
[fintype n] [decidable_eq n]
[Π i, fintype (n' i)] [Π i, decidable_eq (n' i)]
[normed_comm_ring 𝔸] [normed_algebra 𝕂 𝔸] [complete_space 𝔸]
lemma exp_neg (A : matrix m m 𝔸) : exp 𝕂 (-A) = (exp 𝕂 A)⁻¹ :=
begin
rw nonsing_inv_eq_ring_inverse,
letI : semi_normed_ring (matrix m m 𝔸) := matrix.linfty_op_semi_normed_ring,
letI : normed_ring (matrix m m 𝔸) := matrix.linfty_op_normed_ring,
letI : normed_algebra 𝕂 (matrix m m 𝔸) := matrix.linfty_op_normed_algebra,
exact (ring.inverse_exp _ A).symm,
end
lemma exp_zsmul (z : ℤ) (A : matrix m m 𝔸) : exp 𝕂 (z • A) = exp 𝕂 A ^ z :=
begin
obtain ⟨n, rfl | rfl⟩ := z.eq_coe_or_neg,
{ rw [zpow_coe_nat, coe_nat_zsmul, exp_nsmul] },
{ have : is_unit (exp 𝕂 A).det := (matrix.is_unit_iff_is_unit_det _).mp (is_unit_exp _ _),
rw [matrix.zpow_neg this, zpow_coe_nat, neg_smul,
exp_neg, coe_nat_zsmul, exp_nsmul] },
end
lemma exp_conj (U : matrix m m 𝔸) (A : matrix m m 𝔸) (hy : is_unit U) :
exp 𝕂 (U ⬝ A ⬝ U⁻¹) = U ⬝ exp 𝕂 A ⬝ U⁻¹ :=
let ⟨u, hu⟩ := hy in hu ▸ by simpa only [matrix.coe_units_inv] using exp_units_conj 𝕂 u A
lemma exp_conj' (U : matrix m m 𝔸) (A : matrix m m 𝔸) (hy : is_unit U) :
exp 𝕂 (U⁻¹ ⬝ A ⬝ U) = U⁻¹ ⬝ exp 𝕂 A ⬝ U :=
let ⟨u, hu⟩ := hy in hu ▸ by simpa only [matrix.coe_units_inv] using exp_units_conj' 𝕂 u A
end normed_comm
end matrix
|
/-
Copyright (c) 2020 Thomas Browning. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Thomas Browning
-/
import algebra.gcd_monoid.multiset
import combinatorics.partition
import group_theory.perm.cycle.basic
import ring_theory.int.basic
import tactic.linarith
/-!
# Cycle Types
In this file we define the cycle type of a permutation.
## Main definitions
- `σ.cycle_type` where `σ` is a permutation of a `fintype`
- `σ.partition` where `σ` is a permutation of a `fintype`
## Main results
- `sum_cycle_type` : The sum of `σ.cycle_type` equals `σ.support.card`
- `lcm_cycle_type` : The lcm of `σ.cycle_type` equals `order_of σ`
- `is_conj_iff_cycle_type_eq` : Two permutations are conjugate if and only if they have the same
cycle type.
- `exists_prime_order_of_dvd_card`: For every prime `p` dividing the order of a finite group `G`
there exists an element of order `p` in `G`. This is known as Cauchy's theorem.
-/
namespace equiv.perm
open equiv list multiset
variables {α : Type*} [fintype α]
section cycle_type
variables [decidable_eq α]
/-- The cycle type of a permutation -/
def cycle_type (σ : perm α) : multiset ℕ :=
σ.cycle_factors_finset.1.map (finset.card ∘ support)
lemma cycle_type_def (σ : perm α) :
σ.cycle_type = σ.cycle_factors_finset.1.map (finset.card ∘ support) := rfl
lemma cycle_type_eq' {σ : perm α} (s : finset (perm α))
(h1 : ∀ f : perm α, f ∈ s → f.is_cycle) (h2 : ∀ (a ∈ s) (b ∈ s), a ≠ b → disjoint a b)
(h0 : s.noncomm_prod id
(λ a ha b hb, (em (a = b)).by_cases (λ h, h ▸ commute.refl a)
(set.pairwise.mono' (λ _ _, disjoint.commute) h2 ha hb)) = σ) :
σ.cycle_type = s.1.map (finset.card ∘ support) :=
begin
rw cycle_type_def,
congr,
rw cycle_factors_finset_eq_finset,
exact ⟨h1, h2, h0⟩
end
lemma cycle_type_eq {σ : perm α} (l : list (perm α)) (h0 : l.prod = σ)
(h1 : ∀ σ : perm α, σ ∈ l → σ.is_cycle) (h2 : l.pairwise disjoint) :
σ.cycle_type = l.map (finset.card ∘ support) :=
begin
have hl : l.nodup := nodup_of_pairwise_disjoint_cycles h1 h2,
rw cycle_type_eq' l.to_finset,
{ simp [list.dedup_eq_self.mpr hl] },
{ simpa using h1 },
{ simpa [hl] using h0 },
{ simpa [list.dedup_eq_self.mpr hl] using h2.forall disjoint.symmetric }
end
lemma cycle_type_one : (1 : perm α).cycle_type = 0 :=
cycle_type_eq [] rfl (λ _, false.elim) pairwise.nil
lemma cycle_type_eq_zero {σ : perm α} : σ.cycle_type = 0 ↔ σ = 1 :=
by simp [cycle_type_def, cycle_factors_finset_eq_empty_iff]
lemma card_cycle_type_eq_zero {σ : perm α} : σ.cycle_type.card = 0 ↔ σ = 1 :=
by rw [card_eq_zero, cycle_type_eq_zero]
lemma two_le_of_mem_cycle_type {σ : perm α} {n : ℕ} (h : n ∈ σ.cycle_type) : 2 ≤ n :=
begin
simp only [cycle_type_def, ←finset.mem_def, function.comp_app, multiset.mem_map,
mem_cycle_factors_finset_iff] at h,
obtain ⟨_, ⟨hc, -⟩, rfl⟩ := h,
exact hc.two_le_card_support
end
lemma one_lt_of_mem_cycle_type {σ : perm α} {n : ℕ} (h : n ∈ σ.cycle_type) : 1 < n :=
two_le_of_mem_cycle_type h
lemma is_cycle.cycle_type {σ : perm α} (hσ : is_cycle σ) : σ.cycle_type = [σ.support.card] :=
cycle_type_eq [σ] (mul_one σ) (λ τ hτ, (congr_arg is_cycle (list.mem_singleton.mp hτ)).mpr hσ)
(pairwise_singleton disjoint σ)
lemma card_cycle_type_eq_one {σ : perm α} : σ.cycle_type.card = 1 ↔ σ.is_cycle :=
begin
rw card_eq_one,
simp_rw [cycle_type_def, multiset.map_eq_singleton, ←finset.singleton_val,
finset.val_inj, cycle_factors_finset_eq_singleton_iff],
split,
{ rintro ⟨_, _, ⟨h, -⟩, -⟩,
exact h },
{ intro h,
use [σ.support.card, σ],
simp [h] }
end
lemma disjoint.cycle_type {σ τ : perm α} (h : disjoint σ τ) :
(σ * τ).cycle_type = σ.cycle_type + τ.cycle_type :=
begin
rw [cycle_type_def, cycle_type_def, cycle_type_def, h.cycle_factors_finset_mul_eq_union,
←multiset.map_add, finset.union_val, multiset.add_eq_union_iff_disjoint.mpr _],
rw [←finset.disjoint_val],
exact h.disjoint_cycle_factors_finset
end
lemma cycle_type_inv (σ : perm α) : σ⁻¹.cycle_type = σ.cycle_type :=
cycle_induction_on (λ τ : perm α, τ⁻¹.cycle_type = τ.cycle_type) σ rfl
(λ σ hσ, by rw [hσ.cycle_type, hσ.inv.cycle_type, support_inv])
(λ σ τ hστ hc hσ hτ, by rw [mul_inv_rev, hστ.cycle_type, ←hσ, ←hτ, add_comm,
disjoint.cycle_type (λ x, or.imp (λ h : τ x = x, inv_eq_iff_eq.mpr h.symm)
(λ h : σ x = x, inv_eq_iff_eq.mpr h.symm) (hστ x).symm)])
lemma cycle_type_conj {σ τ : perm α} : (τ * σ * τ⁻¹).cycle_type = σ.cycle_type :=
begin
revert τ,
apply cycle_induction_on _ σ,
{ intro,
simp },
{ intros σ hσ τ,
rw [hσ.cycle_type, hσ.is_cycle_conj.cycle_type, card_support_conj] },
{ intros σ τ hd hc hσ hτ π,
rw [← conj_mul, hd.cycle_type, disjoint.cycle_type, hσ, hτ],
intro a,
apply (hd (π⁻¹ a)).imp _ _;
{ intro h, rw [perm.mul_apply, perm.mul_apply, h, apply_inv_self] } }
end
lemma sum_cycle_type (σ : perm α) : σ.cycle_type.sum = σ.support.card :=
cycle_induction_on (λ τ : perm α, τ.cycle_type.sum = τ.support.card) σ
(by rw [cycle_type_one, sum_zero, support_one, finset.card_empty])
(λ σ hσ, by rw [hσ.cycle_type, coe_sum, list.sum_singleton])
(λ σ τ hστ hc hσ hτ, by rw [hστ.cycle_type, sum_add, hσ, hτ, hστ.card_support_mul])
lemma sign_of_cycle_type' (σ : perm α) :
sign σ = (σ.cycle_type.map (λ n, -(-1 : ℤˣ) ^ n)).prod :=
cycle_induction_on (λ τ : perm α, sign τ = (τ.cycle_type.map (λ n, -(-1 : ℤˣ) ^ n)).prod) σ
(by rw [sign_one, cycle_type_one, multiset.map_zero, prod_zero])
(λ σ hσ, by rw [hσ.sign, hσ.cycle_type, coe_map, coe_prod,
list.map_singleton, list.prod_singleton])
(λ σ τ hστ hc hσ hτ, by rw [sign_mul, hσ, hτ, hστ.cycle_type, multiset.map_add, prod_add])
lemma sign_of_cycle_type (f : perm α) :
sign f = (-1 : ℤˣ)^(f.cycle_type.sum + f.cycle_type.card) :=
cycle_induction_on
(λ f : perm α, sign f = (-1 : ℤˣ)^(f.cycle_type.sum + f.cycle_type.card))
f
( -- base_one
by rw [equiv.perm.cycle_type_one, sign_one, multiset.sum_zero, multiset.card_zero, pow_zero] )
( -- base_cycles
λ f hf,
by rw [equiv.perm.is_cycle.cycle_type hf, hf.sign,
coe_sum, list.sum_cons, sum_nil, add_zero, coe_card, length_singleton,
pow_add, pow_one, mul_comm, neg_mul, one_mul] )
( -- induction_disjoint
λ f g hfg hf Pf Pg,
by rw [equiv.perm.disjoint.cycle_type hfg,
multiset.sum_add, multiset.card_add,← add_assoc,
add_comm f.cycle_type.sum g.cycle_type.sum,
add_assoc g.cycle_type.sum _ _,
add_comm g.cycle_type.sum _,
add_assoc, pow_add,
← Pf, ← Pg,
equiv.perm.sign_mul])
lemma lcm_cycle_type (σ : perm α) : σ.cycle_type.lcm = order_of σ :=
cycle_induction_on (λ τ : perm α, τ.cycle_type.lcm = order_of τ) σ
(by rw [cycle_type_one, lcm_zero, order_of_one])
(λ σ hσ, by rw [hσ.cycle_type, ←singleton_coe, ←singleton_eq_cons, lcm_singleton,
order_of_is_cycle hσ, normalize_eq])
(λ σ τ hστ hc hσ hτ, by rw [hστ.cycle_type, lcm_add, lcm_eq_nat_lcm, hστ.order_of, hσ, hτ])
lemma dvd_of_mem_cycle_type {σ : perm α} {n : ℕ} (h : n ∈ σ.cycle_type) : n ∣ order_of σ :=
begin
rw ← lcm_cycle_type,
exact dvd_lcm h,
end
lemma order_of_cycle_of_dvd_order_of (f : perm α) (x : α) :
order_of (cycle_of f x) ∣ order_of f :=
begin
by_cases hx : f x = x,
{ rw ←cycle_of_eq_one_iff at hx,
simp [hx] },
{ refine dvd_of_mem_cycle_type _,
rw [cycle_type, multiset.mem_map],
refine ⟨f.cycle_of x, _, _⟩,
{ rwa [←finset.mem_def, cycle_of_mem_cycle_factors_finset_iff, mem_support] },
{ simp [order_of_is_cycle (is_cycle_cycle_of _ hx)] } }
end
lemma two_dvd_card_support {σ : perm α} (hσ : σ ^ 2 = 1) : 2 ∣ σ.support.card :=
(congr_arg (has_dvd.dvd 2) σ.sum_cycle_type).mp
(multiset.dvd_sum (λ n hn, by rw le_antisymm (nat.le_of_dvd zero_lt_two $
(dvd_of_mem_cycle_type hn).trans $ order_of_dvd_of_pow_eq_one hσ) (two_le_of_mem_cycle_type hn)))
lemma cycle_type_prime_order {σ : perm α} (hσ : (order_of σ).prime) :
∃ n : ℕ, σ.cycle_type = repeat (order_of σ) (n + 1) :=
begin
rw eq_repeat_of_mem (λ n hn, or_iff_not_imp_left.mp
(hσ.eq_one_or_self_of_dvd n (dvd_of_mem_cycle_type hn)) (one_lt_of_mem_cycle_type hn).ne'),
use σ.cycle_type.card - 1,
rw tsub_add_cancel_of_le,
rw [nat.succ_le_iff, pos_iff_ne_zero, ne, card_cycle_type_eq_zero],
intro H,
rw [H, order_of_one] at hσ,
exact hσ.ne_one rfl,
end
lemma is_cycle_of_prime_order {σ : perm α} (h1 : (order_of σ).prime)
(h2 : σ.support.card < 2 * (order_of σ)) : σ.is_cycle :=
begin
obtain ⟨n, hn⟩ := cycle_type_prime_order h1,
rw [←σ.sum_cycle_type, hn, multiset.sum_repeat, nsmul_eq_mul, nat.cast_id, mul_lt_mul_right
(order_of_pos σ), nat.succ_lt_succ_iff, nat.lt_succ_iff, nat.le_zero_iff] at h2,
rw [←card_cycle_type_eq_one, hn, card_repeat, h2],
end
lemma cycle_type_le_of_mem_cycle_factors_finset {f g : perm α}
(hf : f ∈ g.cycle_factors_finset) :
f.cycle_type ≤ g.cycle_type :=
begin
rw mem_cycle_factors_finset_iff at hf,
rw [cycle_type_def, cycle_type_def, hf.left.cycle_factors_finset_eq_singleton],
refine map_le_map _,
simpa [←finset.mem_def, mem_cycle_factors_finset_iff] using hf
end
lemma cycle_type_mul_mem_cycle_factors_finset_eq_sub {f g : perm α}
(hf : f ∈ g.cycle_factors_finset) :
(g * f⁻¹).cycle_type = g.cycle_type - f.cycle_type :=
begin
suffices : (g * f⁻¹).cycle_type + f.cycle_type = g.cycle_type - f.cycle_type + f.cycle_type,
{ rw tsub_add_cancel_of_le (cycle_type_le_of_mem_cycle_factors_finset hf) at this,
simp [←this] },
simp [←(disjoint_mul_inv_of_mem_cycle_factors_finset hf).cycle_type,
tsub_add_cancel_of_le (cycle_type_le_of_mem_cycle_factors_finset hf)]
end
theorem is_conj_of_cycle_type_eq {σ τ : perm α} (h : cycle_type σ = cycle_type τ) : is_conj σ τ :=
begin
revert τ,
apply cycle_induction_on _ σ,
{ intros τ h,
rw [cycle_type_one, eq_comm, cycle_type_eq_zero] at h,
rw h },
{ intros σ hσ τ hστ,
have hτ := card_cycle_type_eq_one.2 hσ,
rw [hστ, card_cycle_type_eq_one] at hτ,
apply hσ.is_conj hτ,
rw [hσ.cycle_type, hτ.cycle_type, coe_eq_coe, singleton_perm] at hστ,
simp only [and_true, eq_self_iff_true] at hστ,
exact hστ },
{ intros σ τ hστ hσ h1 h2 π hπ,
rw [hστ.cycle_type] at hπ,
{ have h : σ.support.card ∈ map (finset.card ∘ perm.support) π.cycle_factors_finset.val,
{ simp [←cycle_type_def, ←hπ, hσ.cycle_type] },
obtain ⟨σ', hσ'l, hσ'⟩ := multiset.mem_map.mp h,
have key : is_conj (σ' * (π * σ'⁻¹)) π,
{ rw is_conj_iff,
use σ'⁻¹,
simp [mul_assoc] },
refine is_conj.trans _ key,
have hs : σ.cycle_type = σ'.cycle_type,
{ rw [←finset.mem_def, mem_cycle_factors_finset_iff] at hσ'l,
rw [hσ.cycle_type, ←hσ', hσ'l.left.cycle_type] },
refine hστ.is_conj_mul (h1 hs) (h2 _) _,
{ rw [cycle_type_mul_mem_cycle_factors_finset_eq_sub, ←hπ, add_comm, hs,
add_tsub_cancel_right],
rwa finset.mem_def },
{ exact (disjoint_mul_inv_of_mem_cycle_factors_finset hσ'l).symm } } }
end
theorem is_conj_iff_cycle_type_eq {σ τ : perm α} :
is_conj σ τ ↔ σ.cycle_type = τ.cycle_type :=
⟨λ h, begin
obtain ⟨π, rfl⟩ := is_conj_iff.1 h,
rw cycle_type_conj,
end, is_conj_of_cycle_type_eq⟩
@[simp] lemma cycle_type_extend_domain {β : Type*} [fintype β] [decidable_eq β]
{p : β → Prop} [decidable_pred p] (f : α ≃ subtype p) {g : perm α} :
cycle_type (g.extend_domain f) = cycle_type g :=
begin
apply cycle_induction_on _ g,
{ rw [extend_domain_one, cycle_type_one, cycle_type_one] },
{ intros σ hσ,
rw [(hσ.extend_domain f).cycle_type, hσ.cycle_type, card_support_extend_domain] },
{ intros σ τ hd hc hσ hτ,
rw [hd.cycle_type, ← extend_domain_mul, (hd.extend_domain f).cycle_type, hσ, hτ] }
end
lemma mem_cycle_type_iff {n : ℕ} {σ : perm α} :
n ∈ cycle_type σ ↔ ∃ c τ : perm α, σ = c * τ ∧ disjoint c τ ∧ is_cycle c ∧ c.support.card = n :=
begin
split,
{ intro h,
obtain ⟨l, rfl, hlc, hld⟩ := trunc_cycle_factors σ,
rw cycle_type_eq _ rfl hlc hld at h,
obtain ⟨c, cl, rfl⟩ := list.exists_of_mem_map h,
rw (list.perm_cons_erase cl).pairwise_iff (λ _ _ hd, _) at hld,
swap, { exact hd.symm },
refine ⟨c, (l.erase c).prod, _, _, hlc _ cl, rfl⟩,
{ rw [← list.prod_cons,
(list.perm_cons_erase cl).symm.prod_eq' (hld.imp (λ _ _, disjoint.commute))] },
{ exact disjoint_prod_right _ (λ g, list.rel_of_pairwise_cons hld) } },
{ rintros ⟨c, t, rfl, hd, hc, rfl⟩,
simp [hd.cycle_type, hc.cycle_type] }
end
lemma le_card_support_of_mem_cycle_type {n : ℕ} {σ : perm α} (h : n ∈ cycle_type σ) :
n ≤ σ.support.card :=
(le_sum_of_mem h).trans (le_of_eq σ.sum_cycle_type)
lemma cycle_type_of_card_le_mem_cycle_type_add_two {n : ℕ} {g : perm α}
(hn2 : fintype.card α < n + 2) (hng : n ∈ g.cycle_type) :
g.cycle_type = {n} :=
begin
obtain ⟨c, g', rfl, hd, hc, rfl⟩ := mem_cycle_type_iff.1 hng,
by_cases g'1 : g' = 1,
{ rw [hd.cycle_type, hc.cycle_type, multiset.singleton_eq_cons, multiset.singleton_coe,
g'1, cycle_type_one, add_zero] },
contrapose! hn2,
apply le_trans _ (c * g').support.card_le_univ,
rw [hd.card_support_mul],
exact add_le_add_left (two_le_card_support_of_ne_one g'1) _,
end
end cycle_type
lemma card_compl_support_modeq [decidable_eq α] {p n : ℕ} [hp : fact p.prime] {σ : perm α}
(hσ : σ ^ p ^ n = 1) : σ.supportᶜ.card ≡ fintype.card α [MOD p] :=
begin
rw [nat.modeq_iff_dvd' σ.supportᶜ.card_le_univ, ←finset.card_compl, compl_compl],
refine (congr_arg _ σ.sum_cycle_type).mp (multiset.dvd_sum (λ k hk, _)),
obtain ⟨m, -, hm⟩ := (nat.dvd_prime_pow hp.out).mp (order_of_dvd_of_pow_eq_one hσ),
obtain ⟨l, -, rfl⟩ := (nat.dvd_prime_pow hp.out).mp
((congr_arg _ hm).mp (dvd_of_mem_cycle_type hk)),
exact dvd_pow_self _ (λ h, (one_lt_of_mem_cycle_type hk).ne $ by rw [h, pow_zero]),
end
lemma exists_fixed_point_of_prime {p n : ℕ} [hp : fact p.prime] (hα : ¬ p ∣ fintype.card α)
{σ : perm α} (hσ : σ ^ p ^ n = 1) : ∃ a : α, σ a = a :=
begin
classical,
contrapose! hα,
simp_rw ← mem_support at hα,
exact nat.modeq_zero_iff_dvd.mp ((congr_arg _ (finset.card_eq_zero.mpr (compl_eq_bot.mpr
(finset.eq_univ_iff_forall.mpr hα)))).mp (card_compl_support_modeq hσ).symm),
end
lemma exists_fixed_point_of_prime' {p n : ℕ} [hp : fact p.prime] (hα : p ∣ fintype.card α)
{σ : perm α} (hσ : σ ^ p ^ n = 1) {a : α} (ha : σ a = a) : ∃ b : α, σ b = b ∧ b ≠ a :=
begin
classical,
have h : ∀ b : α, b ∈ σ.supportᶜ ↔ σ b = b :=
λ b, by rw [finset.mem_compl, mem_support, not_not],
obtain ⟨b, hb1, hb2⟩ := finset.exists_ne_of_one_lt_card (lt_of_lt_of_le hp.out.one_lt
(nat.le_of_dvd (finset.card_pos.mpr ⟨a, (h a).mpr ha⟩) (nat.modeq_zero_iff_dvd.mp
((card_compl_support_modeq hσ).trans (nat.modeq_zero_iff_dvd.mpr hα))))) a,
exact ⟨b, (h b).mp hb1, hb2⟩,
end
lemma is_cycle_of_prime_order' {σ : perm α} (h1 : (order_of σ).prime)
(h2 : fintype.card α < 2 * (order_of σ)) : σ.is_cycle :=
begin
classical,
exact is_cycle_of_prime_order h1 (lt_of_le_of_lt σ.support.card_le_univ h2),
end
section cauchy
variables (G : Type*) [group G] (n : ℕ)
/-- The type of vectors with terms from `G`, length `n`, and product equal to `1:G`. -/
def vectors_prod_eq_one : set (vector G n) :=
{v | v.to_list.prod = 1}
namespace vectors_prod_eq_one
lemma mem_iff {n : ℕ} (v : vector G n) :
v ∈ vectors_prod_eq_one G n ↔ v.to_list.prod = 1 := iff.rfl
lemma zero_eq : vectors_prod_eq_one G 0 = {vector.nil} :=
set.eq_singleton_iff_unique_mem.mpr ⟨eq.refl (1 : G), λ v hv, v.eq_nil⟩
lemma one_eq : vectors_prod_eq_one G 1 = {vector.nil.cons 1} :=
begin
simp_rw [set.eq_singleton_iff_unique_mem, mem_iff,
vector.to_list_singleton, list.prod_singleton, vector.head_cons],
exact ⟨rfl, λ v hv, v.cons_head_tail.symm.trans (congr_arg2 vector.cons hv v.tail.eq_nil)⟩,
end
instance zero_unique : unique (vectors_prod_eq_one G 0) :=
by { rw zero_eq, exact set.unique_singleton vector.nil }
instance one_unique : unique (vectors_prod_eq_one G 1) :=
by { rw one_eq, exact set.unique_singleton (vector.nil.cons 1) }
/-- Given a vector `v` of length `n`, make a vector of length `n + 1` whose product is `1`,
by appending the inverse of the product of `v`. -/
@[simps] def vector_equiv : vector G n ≃ vectors_prod_eq_one G (n + 1) :=
{ to_fun := λ v, ⟨v.to_list.prod⁻¹ ::ᵥ v,
by rw [mem_iff, vector.to_list_cons, list.prod_cons, inv_mul_self]⟩,
inv_fun := λ v, v.1.tail,
left_inv := λ v, v.tail_cons v.to_list.prod⁻¹,
right_inv := λ v, subtype.ext ((congr_arg2 vector.cons (eq_inv_of_mul_eq_one_left (by
{ rw [←list.prod_cons, ←vector.to_list_cons, v.1.cons_head_tail],
exact v.2 })).symm rfl).trans v.1.cons_head_tail) }
/-- Given a vector `v` of length `n` whose product is 1, make a vector of length `n - 1`,
by deleting the last entry of `v`. -/
def equiv_vector : vectors_prod_eq_one G n ≃ vector G (n - 1) :=
((vector_equiv G (n - 1)).trans (if hn : n = 0 then (show vectors_prod_eq_one G (n - 1 + 1) ≃
vectors_prod_eq_one G n, by { rw hn, apply equiv_of_unique })
else by rw tsub_add_cancel_of_le (nat.pos_of_ne_zero hn).nat_succ_le)).symm
instance [fintype G] : fintype (vectors_prod_eq_one G n) :=
fintype.of_equiv (vector G (n - 1)) (equiv_vector G n).symm
lemma card [fintype G] :
fintype.card (vectors_prod_eq_one G n) = fintype.card G ^ (n - 1) :=
(fintype.card_congr (equiv_vector G n)).trans (card_vector (n - 1))
variables {G n} {g : G} (v : vectors_prod_eq_one G n) (j k : ℕ)
/-- Rotate a vector whose product is 1. -/
def rotate : vectors_prod_eq_one G n :=
⟨⟨_, (v.1.1.length_rotate k).trans v.1.2⟩, list.prod_rotate_eq_one_of_prod_eq_one v.2 k⟩
lemma rotate_zero : rotate v 0 = v :=
subtype.ext (subtype.ext v.1.1.rotate_zero)
lemma rotate_rotate : rotate (rotate v j) k = rotate v (j + k) :=
subtype.ext (subtype.ext (v.1.1.rotate_rotate j k))
lemma rotate_length : rotate v n = v :=
subtype.ext (subtype.ext ((congr_arg _ v.1.2.symm).trans v.1.1.rotate_length))
end vectors_prod_eq_one
/-- For every prime `p` dividing the order of a finite group `G` there exists an element of order
`p` in `G`. This is known as Cauchy's theorem. -/
lemma _root_.exists_prime_order_of_dvd_card {G : Type*} [group G] [fintype G] (p : ℕ)
[hp : fact p.prime] (hdvd : p ∣ fintype.card G) : ∃ x : G, order_of x = p :=
begin
have hp' : p - 1 ≠ 0 := mt tsub_eq_zero_iff_le.mp (not_le_of_lt hp.out.one_lt),
have Scard := calc p ∣ fintype.card G ^ (p - 1) : hdvd.trans (dvd_pow (dvd_refl _) hp')
... = fintype.card (vectors_prod_eq_one G p) : (vectors_prod_eq_one.card G p).symm,
let f : ℕ → vectors_prod_eq_one G p → vectors_prod_eq_one G p :=
λ k v, vectors_prod_eq_one.rotate v k,
have hf1 : ∀ v, f 0 v = v := vectors_prod_eq_one.rotate_zero,
have hf2 : ∀ j k v, f k (f j v) = f (j + k) v :=
λ j k v, vectors_prod_eq_one.rotate_rotate v j k,
have hf3 : ∀ v, f p v = v := vectors_prod_eq_one.rotate_length,
let σ := equiv.mk (f 1) (f (p - 1))
(λ s, by rw [hf2, add_tsub_cancel_of_le hp.out.one_lt.le, hf3])
(λ s, by rw [hf2, tsub_add_cancel_of_le hp.out.one_lt.le, hf3]),
have hσ : ∀ k v, (σ ^ k) v = f k v :=
λ k v, nat.rec (hf1 v).symm (λ k hk, eq.trans (by exact congr_arg σ hk) (hf2 k 1 v)) k,
replace hσ : σ ^ (p ^ 1) = 1 := perm.ext (λ v, by rw [pow_one, hσ, hf3, one_apply]),
let v₀ : vectors_prod_eq_one G p := ⟨vector.repeat 1 p, (list.prod_repeat 1 p).trans (one_pow p)⟩,
have hv₀ : σ v₀ = v₀ := subtype.ext (subtype.ext (list.rotate_repeat (1 : G) p 1)),
obtain ⟨v, hv1, hv2⟩ := exists_fixed_point_of_prime' Scard hσ hv₀,
refine exists_imp_exists (λ g hg, order_of_eq_prime _ (λ hg', hv2 _))
(list.rotate_one_eq_self_iff_eq_repeat.mp (subtype.ext_iff.mp (subtype.ext_iff.mp hv1))),
{ rw [←list.prod_repeat, ←v.1.2, ←hg, (show v.val.val.prod = 1, from v.2)] },
{ rw [subtype.ext_iff_val, subtype.ext_iff_val, hg, hg', v.1.2],
refl },
end
/-- For every prime `p` dividing the order of a finite additive group `G` there exists an element of
order `p` in `G`. This is the additive version of Cauchy's theorem. -/
lemma _root_.exists_prime_add_order_of_dvd_card {G : Type*} [add_group G] [fintype G] (p : ℕ)
[hp : fact p.prime] (hdvd : p ∣ fintype.card G) : ∃ x : G, add_order_of x = p :=
@exists_prime_order_of_dvd_card (multiplicative G) _ _ _ _ hdvd
attribute [to_additive exists_prime_add_order_of_dvd_card] exists_prime_order_of_dvd_card
end cauchy
lemma subgroup_eq_top_of_swap_mem [decidable_eq α] {H : subgroup (perm α)}
[d : decidable_pred (∈ H)] {τ : perm α} (h0 : (fintype.card α).prime)
(h1 : fintype.card α ∣ fintype.card H) (h2 : τ ∈ H) (h3 : is_swap τ) :
H = ⊤ :=
begin
haveI : fact (fintype.card α).prime := ⟨h0⟩,
obtain ⟨σ, hσ⟩ := exists_prime_order_of_dvd_card (fintype.card α) h1,
have hσ1 : order_of (σ : perm α) = fintype.card α := (order_of_subgroup σ).trans hσ,
have hσ2 : is_cycle ↑σ := is_cycle_of_prime_order'' h0 hσ1,
have hσ3 : (σ : perm α).support = ⊤ :=
finset.eq_univ_of_card (σ : perm α).support ((order_of_is_cycle hσ2).symm.trans hσ1),
have hσ4 : subgroup.closure {↑σ, τ} = ⊤ := closure_prime_cycle_swap h0 hσ2 hσ3 h3,
rw [eq_top_iff, ←hσ4, subgroup.closure_le, set.insert_subset, set.singleton_subset_iff],
exact ⟨subtype.mem σ, h2⟩,
end
section partition
variables [decidable_eq α]
/-- The partition corresponding to a permutation -/
def partition (σ : perm α) : (fintype.card α).partition :=
{ parts := σ.cycle_type + repeat 1 (fintype.card α - σ.support.card),
parts_pos := λ n hn,
begin
cases mem_add.mp hn with hn hn,
{ exact zero_lt_one.trans (one_lt_of_mem_cycle_type hn) },
{ exact lt_of_lt_of_le zero_lt_one (ge_of_eq (multiset.eq_of_mem_repeat hn)) },
end,
parts_sum := by rw [sum_add, sum_cycle_type, multiset.sum_repeat, nsmul_eq_mul,
nat.cast_id, mul_one, add_tsub_cancel_of_le σ.support.card_le_univ] }
lemma parts_partition {σ : perm α} :
σ.partition.parts = σ.cycle_type + repeat 1 (fintype.card α - σ.support.card) := rfl
lemma filter_parts_partition_eq_cycle_type {σ : perm α} :
(partition σ).parts.filter (λ n, 2 ≤ n) = σ.cycle_type :=
begin
rw [parts_partition, filter_add, multiset.filter_eq_self.2 (λ _, two_le_of_mem_cycle_type),
multiset.filter_eq_nil.2 (λ a h, _), add_zero],
rw multiset.eq_of_mem_repeat h,
dec_trivial
end
lemma partition_eq_of_is_conj {σ τ : perm α} :
is_conj σ τ ↔ σ.partition = τ.partition :=
begin
rw [is_conj_iff_cycle_type_eq],
refine ⟨λ h, _, λ h, _⟩,
{ rw [nat.partition.ext_iff, parts_partition, parts_partition,
← sum_cycle_type, ← sum_cycle_type, h] },
{ rw [← filter_parts_partition_eq_cycle_type, ← filter_parts_partition_eq_cycle_type, h] }
end
end partition
/-!
### 3-cycles
-/
/-- A three-cycle is a cycle of length 3. -/
def is_three_cycle [decidable_eq α] (σ : perm α) : Prop := σ.cycle_type = {3}
namespace is_three_cycle
variables [decidable_eq α] {σ : perm α}
lemma cycle_type (h : is_three_cycle σ) : σ.cycle_type = {3} := h
lemma card_support (h : is_three_cycle σ) : σ.support.card = 3 :=
by rw [←sum_cycle_type, h.cycle_type, multiset.sum_singleton]
lemma _root_.card_support_eq_three_iff : σ.support.card = 3 ↔ σ.is_three_cycle :=
begin
refine ⟨λ h, _, is_three_cycle.card_support⟩,
by_cases h0 : σ.cycle_type = 0,
{ rw [←sum_cycle_type, h0, sum_zero] at h,
exact (ne_of_lt zero_lt_three h).elim },
obtain ⟨n, hn⟩ := exists_mem_of_ne_zero h0,
by_cases h1 : σ.cycle_type.erase n = 0,
{ rw [←sum_cycle_type, ←cons_erase hn, h1, ←singleton_eq_cons, multiset.sum_singleton] at h,
rw [is_three_cycle, ←cons_erase hn, h1, h, singleton_eq_cons] },
obtain ⟨m, hm⟩ := exists_mem_of_ne_zero h1,
rw [←sum_cycle_type, ←cons_erase hn, ←cons_erase hm, multiset.sum_cons, multiset.sum_cons] at h,
-- TODO: linarith [...] should solve this directly
have : ∀ {k}, 2 ≤ m → 2 ≤ n → n + (m + k) = 3 → false, { intros, linarith },
cases this (two_le_of_mem_cycle_type (mem_of_mem_erase hm)) (two_le_of_mem_cycle_type hn) h,
end
lemma is_cycle (h : is_three_cycle σ) : is_cycle σ :=
by rw [←card_cycle_type_eq_one, h.cycle_type, card_singleton]
lemma sign (h : is_three_cycle σ) : sign σ = 1 :=
begin
rw [equiv.perm.sign_of_cycle_type, h.cycle_type],
refl,
end
lemma inv {f : perm α} (h : is_three_cycle f) : is_three_cycle (f⁻¹) :=
by rwa [is_three_cycle, cycle_type_inv]
@[simp] lemma inv_iff {f : perm α} : is_three_cycle (f⁻¹) ↔ is_three_cycle f :=
⟨by { rw ← inv_inv f, apply inv }, inv⟩
lemma order_of {g : perm α} (ht : is_three_cycle g) :
order_of g = 3 :=
by rw [←lcm_cycle_type, ht.cycle_type, multiset.lcm_singleton, normalize_eq]
lemma is_three_cycle_sq {g : perm α} (ht : is_three_cycle g) :
is_three_cycle (g * g) :=
begin
rw [←pow_two, ←card_support_eq_three_iff, support_pow_coprime, ht.card_support],
rw [ht.order_of, nat.coprime_iff_gcd_eq_one],
norm_num,
end
end is_three_cycle
section
variable [decidable_eq α]
lemma is_three_cycle_swap_mul_swap_same
{a b c : α} (ab : a ≠ b) (ac : a ≠ c) (bc : b ≠ c) :
is_three_cycle (swap a b * swap a c) :=
begin
suffices h : support (swap a b * swap a c) = {a, b, c},
{ rw [←card_support_eq_three_iff, h],
simp [ab, ac, bc] },
apply le_antisymm ((support_mul_le _ _).trans (λ x, _)) (λ x hx, _),
{ simp [ab, ac, bc] },
{ simp only [finset.mem_insert, finset.mem_singleton] at hx,
rw mem_support,
simp only [perm.coe_mul, function.comp_app, ne.def],
obtain rfl | rfl | rfl := hx,
{ rw [swap_apply_left, swap_apply_of_ne_of_ne ac.symm bc.symm],
exact ac.symm },
{ rw [swap_apply_of_ne_of_ne ab.symm bc, swap_apply_right],
exact ab },
{ rw [swap_apply_right, swap_apply_left],
exact bc } }
end
open subgroup
lemma swap_mul_swap_same_mem_closure_three_cycles
{a b c : α} (ab : a ≠ b) (ac : a ≠ c) :
(swap a b * swap a c) ∈ closure {σ : perm α | is_three_cycle σ } :=
begin
by_cases bc : b = c,
{ subst bc,
simp [one_mem] },
exact subset_closure (is_three_cycle_swap_mul_swap_same ab ac bc)
end
lemma is_swap.mul_mem_closure_three_cycles {σ τ : perm α}
(hσ : is_swap σ) (hτ : is_swap τ) :
σ * τ ∈ closure {σ : perm α | is_three_cycle σ } :=
begin
obtain ⟨a, b, ab, rfl⟩ := hσ,
obtain ⟨c, d, cd, rfl⟩ := hτ,
by_cases ac : a = c,
{ subst ac,
exact swap_mul_swap_same_mem_closure_three_cycles ab cd },
have h' : swap a b * swap c d = swap a b * swap a c * (swap c a * swap c d),
{ simp [swap_comm c a, mul_assoc] },
rw h',
exact mul_mem (swap_mul_swap_same_mem_closure_three_cycles ab ac)
(swap_mul_swap_same_mem_closure_three_cycles (ne.symm ac) cd),
end
end
end equiv.perm
|
(* Title: HOL/ex/BigO.thy
Authors: Jeremy Avigad and Kevin Donnelly; proofs tidied by LCP
*)
section \<open>Big O notation\<close>
theory BigO
imports
Complex_Main
"HOL-Library.Function_Algebras"
"HOL-Library.Set_Algebras"
begin
text \<open>
This library is designed to support asymptotic ``big O'' calculations,
i.e.~reasoning with expressions of the form \<open>f = O(g)\<close> and \<open>f = g + O(h)\<close>.
An earlier version of this library is described in detail in \<^cite>\<open>"Avigad-Donnelly"\<close>.
The main changes in this version are as follows:
\<^item> We have eliminated the \<open>O\<close> operator on sets. (Most uses of this seem
to be inessential.)
\<^item> We no longer use \<open>+\<close> as output syntax for \<open>+o\<close>
\<^item> Lemmas involving \<open>sumr\<close> have been replaced by more general lemmas
involving `\<open>sum\<close>.
\<^item> The library has been expanded, with e.g.~support for expressions of
the form \<open>f < g + O(h)\<close>.
Note also since the Big O library includes rules that demonstrate set
inclusion, to use the automated reasoners effectively with the library one
should redeclare the theorem \<open>subsetI\<close> as an intro rule, rather than as an
\<open>intro!\<close> rule, for example, using \<^theory_text>\<open>declare subsetI [del, intro]\<close>.
\<close>
subsection \<open>Definitions\<close>
definition bigo :: "('a \<Rightarrow> 'b::linordered_idom) \<Rightarrow> ('a \<Rightarrow> 'b) set" ("(1O'(_'))")
where "O(f:: 'a \<Rightarrow> 'b) = {h. \<exists>c. \<forall>x. \<bar>h x\<bar> \<le> c * \<bar>f x\<bar>}"
lemma bigo_pos_const:
"(\<exists>c::'a::linordered_idom. \<forall>x. \<bar>h x\<bar> \<le> c * \<bar>f x\<bar>) \<longleftrightarrow>
(\<exists>c. 0 < c \<and> (\<forall>x. \<bar>h x\<bar> \<le> c * \<bar>f x\<bar>))"
by (metis (no_types, opaque_lifting) abs_ge_zero abs_not_less_zero abs_of_nonneg dual_order.trans
mult_1 zero_less_abs_iff zero_less_mult_iff zero_less_one)
lemma bigo_alt_def: "O(f) = {h. \<exists>c. 0 < c \<and> (\<forall>x. \<bar>h x\<bar> \<le> c * \<bar>f x\<bar>)}"
by (auto simp add: bigo_def bigo_pos_const)
lemma bigo_elt_subset [intro]: "f \<in> O(g) \<Longrightarrow> O(f) \<le> O(g)"
apply (auto simp add: bigo_alt_def)
by (metis (no_types, opaque_lifting) mult.assoc mult_le_cancel_iff2 order.trans
zero_less_mult_iff)
lemma bigo_refl [intro]: "f \<in> O(f)"
using bigo_def comm_monoid_mult_class.mult_1 dual_order.eq_iff by blast
lemma bigo_zero: "0 \<in> O(g)"
using bigo_def mult_le_cancel_left1 by fastforce
lemma bigo_zero2: "O(\<lambda>x. 0) = {\<lambda>x. 0}"
by (auto simp add: bigo_def)
lemma bigo_plus_self_subset [intro]: "O(f) + O(f) \<subseteq> O(f)"
apply (auto simp add: bigo_alt_def set_plus_def)
apply (rule_tac x = "c + ca" in exI)
by (smt (verit, best) abs_triangle_ineq add_mono add_pos_pos comm_semiring_class.distrib dual_order.trans)
lemma bigo_plus_idemp [simp]: "O(f) + O(f) = O(f)"
by (simp add: antisym bigo_plus_self_subset bigo_zero set_zero_plus2)
lemma bigo_plus_subset [intro]: "O(f + g) \<subseteq> O(f) + O(g)"
apply (rule subsetI)
apply (auto simp add: bigo_def bigo_pos_const func_plus set_plus_def)
apply (subst bigo_pos_const [symmetric])+
apply (rule_tac x = "\<lambda>n. if \<bar>g n\<bar> \<le> \<bar>f n\<bar> then x n else 0" in exI)
apply (rule conjI)
apply (rule_tac x = "c + c" in exI)
apply (clarsimp)
apply (smt (verit, ccfv_threshold) mult.commute abs_triangle_ineq add_le_cancel_left dual_order.trans mult.left_commute mult_2 mult_le_cancel_iff2)
apply (simp add: order_less_le)
apply (rule_tac x = "\<lambda>n. if \<bar>f n\<bar> < \<bar>g n\<bar> then x n else 0" in exI)
apply (rule conjI)
apply (rule_tac x = "c + c" in exI)
apply auto
apply (subgoal_tac "c * \<bar>f xa + g xa\<bar> \<le> (c + c) * \<bar>g xa\<bar>")
apply (metis mult_2 order.trans)
apply simp
done
lemma bigo_plus_subset2 [intro]: "A \<subseteq> O(f) \<Longrightarrow> B \<subseteq> O(f) \<Longrightarrow> A + B \<subseteq> O(f)"
using bigo_plus_idemp set_plus_mono2 by blast
lemma bigo_plus_eq: "\<forall>x. 0 \<le> f x \<Longrightarrow> \<forall>x. 0 \<le> g x \<Longrightarrow> O(f + g) = O(f) + O(g)"
apply (rule equalityI)
apply (rule bigo_plus_subset)
apply (simp add: bigo_alt_def set_plus_def func_plus)
apply clarify
apply (rule_tac x = "max c ca" in exI)
by (smt (verit, del_insts) add.commute abs_triangle_ineq add_mono_thms_linordered_field(3) distrib_left less_max_iff_disj linorder_not_less max.orderE max_mult_distrib_right order_le_less)
lemma bigo_bounded_alt: "\<forall>x. 0 \<le> f x \<Longrightarrow> \<forall>x. f x \<le> c * g x \<Longrightarrow> f \<in> O(g)"
by (simp add: bigo_def) (metis abs_mult abs_of_nonneg order_trans)
lemma bigo_bounded: "\<forall>x. 0 \<le> f x \<Longrightarrow> \<forall>x. f x \<le> g x \<Longrightarrow> f \<in> O(g)"
by (metis bigo_bounded_alt mult_1)
lemma bigo_bounded2: "\<forall>x. lb x \<le> f x \<Longrightarrow> \<forall>x. f x \<le> lb x + g x \<Longrightarrow> f \<in> lb +o O(g)"
by (simp add: add.commute bigo_bounded diff_le_eq set_minus_imp_plus)
lemma bigo_abs: "(\<lambda>x. \<bar>f x\<bar>) =o O(f)"
by (smt (verit, del_insts) abs_abs bigo_def bigo_refl mem_Collect_eq)
lemma bigo_abs2: "f =o O(\<lambda>x. \<bar>f x\<bar>)"
by (smt (verit, del_insts) abs_abs bigo_def bigo_refl mem_Collect_eq)
lemma bigo_abs3: "O(f) = O(\<lambda>x. \<bar>f x\<bar>)"
using bigo_abs bigo_abs2 bigo_elt_subset by blast
lemma bigo_abs4: assumes "f =o g +o O(h)" shows "(\<lambda>x. \<bar>f x\<bar>) =o (\<lambda>x. \<bar>g x\<bar>) +o O(h)"
proof -
{ assume *: "f - g \<in> O(h)"
have "(\<lambda>x. \<bar>f x\<bar> - \<bar>g x\<bar>) =o O(\<lambda>x. \<bar>\<bar>f x\<bar> - \<bar>g x\<bar>\<bar>)"
by (rule bigo_abs2)
also have "\<dots> \<subseteq> O(\<lambda>x. \<bar>f x - g x\<bar>)"
by (simp add: abs_triangle_ineq3 bigo_bounded bigo_elt_subset)
also have "\<dots> \<subseteq> O(f - g)"
using bigo_abs3 by fastforce
also from * have "\<dots> \<subseteq> O(h)"
by (rule bigo_elt_subset)
finally have "(\<lambda>x. \<bar>f x\<bar> - \<bar>g x\<bar>) \<in> O(h)" . }
then show ?thesis
by (smt (verit) assms bigo_alt_def fun_diff_def mem_Collect_eq set_minus_imp_plus set_plus_imp_minus)
qed
lemma bigo_abs5: "f =o O(g) \<Longrightarrow> (\<lambda>x. \<bar>f x\<bar>) =o O(g)"
by (auto simp: bigo_def)
lemma bigo_elt_subset2 [intro]:
assumes *: "f \<in> g +o O(h)"
shows "O(f) \<subseteq> O(g) + O(h)"
proof -
note *
also have "g +o O(h) \<subseteq> O(g) + O(h)"
by (auto del: subsetI)
also have "\<dots> = O(\<lambda>x. \<bar>g x\<bar>) + O(\<lambda>x. \<bar>h x\<bar>)"
by (subst bigo_abs3 [symmetric])+ (rule refl)
also have "\<dots> = O((\<lambda>x. \<bar>g x\<bar>) + (\<lambda>x. \<bar>h x\<bar>))"
by (rule bigo_plus_eq [symmetric]) auto
finally have "f \<in> \<dots>" .
then have "O(f) \<subseteq> \<dots>"
by (elim bigo_elt_subset)
also have "\<dots> = O(\<lambda>x. \<bar>g x\<bar>) + O(\<lambda>x. \<bar>h x\<bar>)"
by (rule bigo_plus_eq, auto)
finally show ?thesis
by (simp flip: bigo_abs3)
qed
lemma bigo_mult [intro]: "O(f)*O(g) \<subseteq> O(f * g)"
apply (rule subsetI)
apply (subst bigo_def)
apply (clarsimp simp add: bigo_alt_def set_times_def func_times)
apply (rule_tac x = "c * ca" in exI)
by (smt (verit, ccfv_threshold) mult.commute mult.assoc abs_ge_zero abs_mult dual_order.trans mult_mono)
lemma bigo_mult2 [intro]: "f *o O(g) \<subseteq> O(f * g)"
by (metis bigo_mult bigo_refl dual_order.trans mult.commute set_times_mono4)
lemma bigo_mult3: "f \<in> O(h) \<Longrightarrow> g \<in> O(j) \<Longrightarrow> f * g \<in> O(h * j)"
using bigo_mult mult.commute mult.commute set_times_intro subsetD by blast
lemma bigo_mult4 [intro]: "f \<in> k +o O(h) \<Longrightarrow> g * f \<in> (g * k) +o O(g * h)"
by (metis bigo_mult3 bigo_refl left_diff_distrib' mult.commute set_minus_imp_plus set_plus_imp_minus)
lemma bigo_mult5:
fixes f :: "'a \<Rightarrow> 'b::linordered_field"
assumes "\<forall>x. f x \<noteq> 0"
shows "O(f * g) \<subseteq> f *o O(g)"
proof
fix h
assume "h \<in> O(f * g)"
then have "(\<lambda>x. 1 / (f x)) * h \<in> (\<lambda>x. 1 / f x) *o O(f * g)"
by auto
also have "\<dots> \<subseteq> O((\<lambda>x. 1 / f x) * (f * g))"
by (rule bigo_mult2)
also have "(\<lambda>x. 1 / f x) * (f * g) = g"
using assms by auto
finally have "(\<lambda>x. (1::'b) / f x) * h \<in> O(g)" .
then have "f * ((\<lambda>x. (1::'b) / f x) * h) \<in> f *o O(g)"
by auto
also have "f * ((\<lambda>x. (1::'b) / f x) * h) = h"
by (simp add: assms times_fun_def)
finally show "h \<in> f *o O(g)" .
qed
lemma bigo_mult6: "\<forall>x. f x \<noteq> 0 \<Longrightarrow> O(f * g) = f *o O(g)"
for f :: "'a \<Rightarrow> 'b::linordered_field"
by (simp add: bigo_mult2 bigo_mult5 subset_antisym)
lemma bigo_mult7: "\<forall>x. f x \<noteq> 0 \<Longrightarrow> O(f * g) \<subseteq> O(f) * O(g)"
for f :: "'a \<Rightarrow> 'b::linordered_field"
by (metis bigo_mult6 bigo_refl mult.commute set_times_mono4)
lemma bigo_mult8: "\<forall>x. f x \<noteq> 0 \<Longrightarrow> O(f * g) = O(f) * O(g)"
for f :: "'a \<Rightarrow> 'b::linordered_field"
by (simp add: bigo_mult bigo_mult7 subset_antisym)
lemma bigo_minus [intro]: "f \<in> O(g) \<Longrightarrow> - f \<in> O(g)"
by (auto simp add: bigo_def fun_Compl_def)
lemma bigo_minus2:
assumes "f \<in> g +o O(h)" shows "- f \<in> -g +o O(h)"
proof -
have "- f + g \<in> O(h)"
by (metis assms bigo_minus minus_diff_eq set_plus_imp_minus uminus_add_conv_diff)
then show ?thesis
by (simp add: set_minus_imp_plus)
qed
lemma bigo_minus3: "O(- f) = O(f)"
by (auto simp add: bigo_def fun_Compl_def)
lemma bigo_plus_absorb_lemma1:
assumes *: "f \<in> O(g)"
shows "f +o O(g) \<subseteq> O(g)"
using assms bigo_plus_idemp set_plus_mono4 by blast
lemma bigo_plus_absorb_lemma2:
assumes *: "f \<in> O(g)"
shows "O(g) \<subseteq> f +o O(g)"
proof -
from * have "- f \<in> O(g)"
by auto
then have "- f +o O(g) \<subseteq> O(g)"
by (elim bigo_plus_absorb_lemma1)
then have "f +o (- f +o O(g)) \<subseteq> f +o O(g)"
by auto
also have "f +o (- f +o O(g)) = O(g)"
by (simp add: set_plus_rearranges)
finally show ?thesis .
qed
lemma bigo_plus_absorb [simp]: "f \<in> O(g) \<Longrightarrow> f +o O(g) = O(g)"
by (simp add: bigo_plus_absorb_lemma1 bigo_plus_absorb_lemma2 subset_antisym)
lemma bigo_plus_absorb2 [intro]: "f \<in> O(g) \<Longrightarrow> A \<subseteq> O(g) \<Longrightarrow> f +o A \<subseteq> O(g)"
using bigo_plus_absorb set_plus_mono by blast
lemma bigo_add_commute_imp: "f \<in> g +o O(h) \<Longrightarrow> g \<in> f +o O(h)"
by (metis bigo_minus minus_diff_eq set_minus_imp_plus set_plus_imp_minus)
lemma bigo_add_commute: "f \<in> g +o O(h) \<longleftrightarrow> g \<in> f +o O(h)"
using bigo_add_commute_imp by blast
lemma bigo_const1: "(\<lambda>x. c) \<in> O(\<lambda>x. 1)"
by (auto simp add: bigo_def ac_simps)
lemma bigo_const2 [intro]: "O(\<lambda>x. c) \<subseteq> O(\<lambda>x. 1)"
by (metis bigo_elt_subset bigo_const1)
lemma bigo_const3: "c \<noteq> 0 \<Longrightarrow> (\<lambda>x. 1) \<in> O(\<lambda>x. c)"
for c :: "'a::linordered_field"
by (metis bigo_bounded_alt le_numeral_extra(4) nonzero_divide_eq_eq zero_less_one_class.zero_le_one)
lemma bigo_const4: "c \<noteq> 0 \<Longrightarrow> O(\<lambda>x. 1) \<subseteq> O(\<lambda>x. c)"
for c :: "'a::linordered_field"
by (metis bigo_elt_subset bigo_const3)
lemma bigo_const [simp]: "c \<noteq> 0 \<Longrightarrow> O(\<lambda>x. c) = O(\<lambda>x. 1)"
for c :: "'a::linordered_field"
by (metis equalityI bigo_const2 bigo_const4)
lemma bigo_const_mult1: "(\<lambda>x. c * f x) \<in> O(f)"
by (smt (z3) abs_mult bigo_def bigo_refl mem_Collect_eq mult.left_commute mult_commute_abs)
lemma bigo_const_mult2: "O(\<lambda>x. c * f x) \<subseteq> O(f)"
by (metis bigo_elt_subset bigo_const_mult1)
lemma bigo_const_mult3: "c \<noteq> 0 \<Longrightarrow> f \<in> O(\<lambda>x. c * f x)"
for c :: "'a::linordered_field"
by (simp add: bigo_def) (metis abs_mult field_class.field_divide_inverse mult.commute nonzero_divide_eq_eq order_refl)
lemma bigo_const_mult4: "c \<noteq> 0 \<Longrightarrow> O(f) \<subseteq> O(\<lambda>x. c * f x)"
for c :: "'a::linordered_field"
by (simp add: bigo_const_mult3 bigo_elt_subset)
lemma bigo_const_mult [simp]: "c \<noteq> 0 \<Longrightarrow> O(\<lambda>x. c * f x) = O(f)"
for c :: "'a::linordered_field"
by (simp add: bigo_const_mult2 bigo_const_mult4 subset_antisym)
lemma bigo_const_mult5 [simp]: "(\<lambda>x. c) *o O(f) = O(f)" if "c \<noteq> 0"
for c :: "'a::linordered_field"
proof
show "O(f) \<subseteq> (\<lambda>x. c) *o O(f)"
using that
apply (clarsimp simp add: bigo_def elt_set_times_def func_times)
apply (rule_tac x = "\<lambda>y. inverse c * x y" in exI)
apply (simp add: mult.assoc [symmetric] abs_mult)
apply (rule_tac x = "\<bar>inverse c\<bar> * ca" in exI)
apply auto
done
have "O(\<lambda>x. c * f x) \<subseteq> O(f)"
by (simp add: bigo_const_mult2)
then show "(\<lambda>x. c) *o O(f) \<subseteq> O(f)"
using order_trans[OF bigo_mult2] by (force simp add: times_fun_def)
qed
lemma bigo_const_mult6 [intro]: "(\<lambda>x. c) *o O(f) \<subseteq> O(f)"
apply (auto intro!: simp add: bigo_def elt_set_times_def func_times)
apply (rule_tac x = "ca * \<bar>c\<bar>" in exI)
by (smt (verit, ccfv_SIG) ab_semigroup_mult_class.mult_ac(1) abs_abs abs_le_self_iff abs_mult le_cases3 mult.commute mult_left_mono)
lemma bigo_const_mult7 [intro]:
assumes *: "f =o O(g)"
shows "(\<lambda>x. c * f x) =o O(g)"
proof -
from * have "(\<lambda>x. c) * f =o (\<lambda>x. c) *o O(g)"
by auto
also have "(\<lambda>x. c) * f = (\<lambda>x. c * f x)"
by (simp add: func_times)
also have "(\<lambda>x. c) *o O(g) \<subseteq> O(g)"
by (auto del: subsetI)
finally show ?thesis .
qed
lemma bigo_compose1: "f =o O(g) \<Longrightarrow> (\<lambda>x. f (k x)) =o O(\<lambda>x. g (k x))"
by (auto simp: bigo_def)
lemma bigo_compose2: "f =o g +o O(h) \<Longrightarrow> (\<lambda>x. f (k x)) =o (\<lambda>x. g (k x)) +o O(\<lambda>x. h(k x))"
by (smt (verit, best) set_minus_plus bigo_def fun_diff_def mem_Collect_eq)
subsection \<open>Sum\<close>
lemma bigo_sum_main:
assumes "\<forall>x. \<forall>y \<in> A x. 0 \<le> h x y" and "\<forall>x. \<forall>y \<in> A x. \<bar>f x y\<bar> \<le> c * h x y"
shows "(\<lambda>x. \<Sum>y \<in> A x. f x y) =o O(\<lambda>x. \<Sum>y \<in> A x. h x y)"
proof -
have "(\<Sum>i\<in>A x. \<bar>f x i\<bar>) \<le> \<bar>c\<bar> * sum (h x) (A x)" for x
by (smt (verit, ccfv_threshold) assms abs_mult_pos abs_of_nonneg abs_of_nonpos dual_order.trans le_cases3 neg_0_le_iff_le sum_distrib_left sum_mono)
then show ?thesis
using assms by (fastforce simp add: bigo_def sum_nonneg intro: order_trans [OF sum_abs])
qed
lemma bigo_sum1: "\<forall>x y. 0 \<le> h x y \<Longrightarrow>
\<exists>c. \<forall>x y. \<bar>f x y\<bar> \<le> c * h x y \<Longrightarrow>
(\<lambda>x. \<Sum>y \<in> A x. f x y) =o O(\<lambda>x. \<Sum>y \<in> A x. h x y)"
by (metis (no_types) bigo_sum_main)
lemma bigo_sum2: "\<forall>y. 0 \<le> h y \<Longrightarrow>
\<exists>c. \<forall>y. \<bar>f y\<bar> \<le> c * (h y) \<Longrightarrow>
(\<lambda>x. \<Sum>y \<in> A x. f y) =o O(\<lambda>x. \<Sum>y \<in> A x. h y)"
by (rule bigo_sum1) auto
lemma bigo_sum3: "f =o O(h) \<Longrightarrow>
(\<lambda>x. \<Sum>y \<in> A x. l x y * f (k x y)) =o O(\<lambda>x. \<Sum>y \<in> A x. \<bar>l x y * h (k x y)\<bar>)"
apply (rule bigo_sum1)
using abs_ge_zero apply blast
apply (clarsimp simp: bigo_def)
by (smt (verit, ccfv_threshold) abs_mult abs_not_less_zero mult.left_commute mult_le_cancel_left)
lemma bigo_sum4: "f =o g +o O(h) \<Longrightarrow>
(\<lambda>x. \<Sum>y \<in> A x. l x y * f (k x y)) =o
(\<lambda>x. \<Sum>y \<in> A x. l x y * g (k x y)) +o
O(\<lambda>x. \<Sum>y \<in> A x. \<bar>l x y * h (k x y)\<bar>)"
using bigo_sum3 [of "f-g" h l k A]
apply (simp add: algebra_simps sum_subtractf)
by (smt (verit) bigo_alt_def minus_apply set_minus_imp_plus set_plus_imp_minus mem_Collect_eq)
lemma bigo_sum5: "f =o O(h) \<Longrightarrow> \<forall>x y. 0 \<le> l x y \<Longrightarrow>
\<forall>x. 0 \<le> h x \<Longrightarrow>
(\<lambda>x. \<Sum>y \<in> A x. l x y * f (k x y)) =o
O(\<lambda>x. \<Sum>y \<in> A x. l x y * h (k x y))"
using bigo_sum3 [of f h l k A] by simp
lemma bigo_sum6: "f =o g +o O(h) \<Longrightarrow> \<forall>x y. 0 \<le> l x y \<Longrightarrow>
\<forall>x. 0 \<le> h x \<Longrightarrow>
(\<lambda>x. \<Sum>y \<in> A x. l x y * f (k x y)) =o
(\<lambda>x. \<Sum>y \<in> A x. l x y * g (k x y)) +o
O(\<lambda>x. \<Sum>y \<in> A x. l x y * h (k x y))"
using bigo_sum5 [of "f-g" h l k A]
apply (simp add: algebra_simps sum_subtractf)
by (smt (verit, del_insts) bigo_alt_def set_minus_imp_plus minus_apply set_plus_imp_minus mem_Collect_eq)
subsection \<open>Misc useful stuff\<close>
lemma bigo_useful_add: "f =o O(h) \<Longrightarrow> g =o O(h) \<Longrightarrow> f + g =o O(h)"
using bigo_plus_idemp set_plus_intro by blast
lemma bigo_useful_const_mult: "c \<noteq> 0 \<Longrightarrow> (\<lambda>x. c) * f =o O(h) \<Longrightarrow> f =o O(h)"
for c :: "'a::linordered_field"
using bigo_elt_subset bigo_mult6 by fastforce
lemma bigo_fix: "(\<lambda>x::nat. f (x + 1)) =o O(\<lambda>x. h (x + 1)) \<Longrightarrow> f 0 = 0 \<Longrightarrow> f =o O(h)"
by (simp add: bigo_alt_def) (metis abs_eq_0_iff abs_ge_zero abs_mult abs_of_pos not0_implies_Suc)
lemma bigo_fix2:
"(\<lambda>x. f ((x::nat) + 1)) =o (\<lambda>x. g(x + 1)) +o O(\<lambda>x. h(x + 1)) \<Longrightarrow>
f 0 = g 0 \<Longrightarrow> f =o g +o O(h)"
apply (rule set_minus_imp_plus [OF bigo_fix])
apply (smt (verit, del_insts) bigo_alt_def fun_diff_def set_plus_imp_minus mem_Collect_eq)
apply simp
done
subsection \<open>Less than or equal to\<close>
definition lesso :: "('a \<Rightarrow> 'b::linordered_idom) \<Rightarrow> ('a \<Rightarrow> 'b) \<Rightarrow> 'a \<Rightarrow> 'b" (infixl "<o" 70)
where "f <o g = (\<lambda>x. max (f x - g x) 0)"
lemma bigo_lesseq1: "f =o O(h) \<Longrightarrow> \<forall>x. \<bar>g x\<bar> \<le> \<bar>f x\<bar> \<Longrightarrow> g =o O(h)"
by (smt (verit, del_insts) bigo_def mem_Collect_eq order_trans)
lemma bigo_lesseq2: "f =o O(h) \<Longrightarrow> \<forall>x. \<bar>g x\<bar> \<le> f x \<Longrightarrow> g =o O(h)"
by (metis (mono_tags, lifting) abs_ge_zero abs_of_nonneg bigo_lesseq1 dual_order.trans)
lemma bigo_lesseq3: "f =o O(h) \<Longrightarrow> \<forall>x. 0 \<le> g x \<Longrightarrow> \<forall>x. g x \<le> f x \<Longrightarrow> g =o O(h)"
by (meson bigo_bounded bigo_elt_subset subsetD)
lemma bigo_lesseq4: "f =o O(h) \<Longrightarrow> \<forall>x. 0 \<le> g x \<Longrightarrow> \<forall>x. g x \<le> \<bar>f x\<bar> \<Longrightarrow> g =o O(h)"
by (metis abs_of_nonneg bigo_lesseq1)
lemma bigo_lesso1: "\<forall>x. f x \<le> g x \<Longrightarrow> f <o g =o O(h)"
by (smt (verit, del_insts) abs_ge_zero add_0 bigo_abs3 bigo_bounded diff_le_eq lesso_def max_def order_refl)
lemma bigo_lesso2: "f =o g +o O(h) \<Longrightarrow> \<forall>x. 0 \<le> k x \<Longrightarrow> \<forall>x. k x \<le> f x \<Longrightarrow> k <o g =o O(h)"
unfolding lesso_def
apply (rule bigo_lesseq4 [of "f-g"])
apply (erule set_plus_imp_minus)
using max.cobounded2 apply blast
by (smt (verit) abs_ge_zero abs_of_nonneg diff_ge_0_iff_ge diff_mono diff_self fun_diff_def order_refl max.coboundedI2 max_def)
lemma bigo_lesso3: "f =o g +o O(h) \<Longrightarrow> \<forall>x. 0 \<le> k x \<Longrightarrow> \<forall>x. g x \<le> k x \<Longrightarrow> f <o k =o O(h)"
unfolding lesso_def
apply (rule bigo_lesseq4 [of "f-g"])
apply (erule set_plus_imp_minus)
using max.cobounded2 apply blast
by (smt (verit) abs_eq_iff abs_ge_zero abs_if abs_minus_le_zero diff_left_mono fun_diff_def le_max_iff_disj order.trans order_eq_refl)
lemma bigo_lesso4:
fixes k :: "'a \<Rightarrow> 'b::linordered_field"
assumes f: "f <o g =o O(k)" and g: "g =o h +o O(k)"
shows "f <o h =o O(k)"
proof -
have "g - h \<in> O(k)"
by (simp add: g set_plus_imp_minus)
then have "(\<lambda>x. \<bar>g x - h x\<bar>) \<in> O(k)"
using bigo_abs5 by force
then have \<section>: "(\<lambda>x. max (f x - g x) 0) + (\<lambda>x. \<bar>g x - h x\<bar>) \<in> O(k)"
by (metis (mono_tags, lifting) bigo_lesseq1 bigo_useful_add dual_order.eq_iff f lesso_def)
have "\<bar>max (f x - h x) 0\<bar> \<le> ((\<lambda>x. max (f x - g x) 0) + (\<lambda>x. \<bar>g x - h x\<bar>)) x" for x
by (auto simp add: func_plus fun_diff_def algebra_simps split: split_max abs_split)
then show ?thesis
by (smt (verit, ccfv_SIG) \<section> bigo_lesseq2 lesso_def)
qed
lemma bigo_lesso5:
assumes "f <o g =o O(h)" shows "\<exists>C. \<forall>x. f x \<le> g x + C * \<bar>h x\<bar>"
proof -
obtain c where "0 < c" and c: "\<And>x. f x - g x \<le> c * \<bar>h x\<bar>"
using assms by (auto simp: lesso_def bigo_alt_def)
have "\<bar>max (f x - g x) 0\<bar> = max (f x - g x) 0" for x
by (auto simp add: algebra_simps)
then show ?thesis
by (metis c add.commute diff_le_eq)
qed
lemma lesso_add: "f <o g =o O(h) \<Longrightarrow> k <o l =o O(h) \<Longrightarrow> (f + k) <o (g + l) =o O(h)"
unfolding lesso_def
using bigo_useful_add by (fastforce split: split_max intro: bigo_lesseq3)
lemma bigo_LIMSEQ1: "f \<longlonglongrightarrow> 0" if f: "f =o O(g)" and g: "g \<longlonglongrightarrow> 0"
for f g :: "nat \<Rightarrow> real"
proof -
{ fix r::real
assume "0 < r"
obtain c::real where "0 < c" and rc: "\<And>x. \<bar>f x\<bar> \<le> c * \<bar>g x\<bar>"
using f by (auto simp: LIMSEQ_iff bigo_alt_def)
with g \<open>0 < r\<close> obtain no where "\<forall>n\<ge>no. \<bar>g n\<bar> < r/c"
by (fastforce simp: LIMSEQ_iff)
then have "\<exists>no. \<forall>n\<ge>no. \<bar>f n\<bar> < r"
by (metis \<open>0 < c\<close> mult.commute order_le_less_trans pos_less_divide_eq rc) }
then show ?thesis
by (auto simp: LIMSEQ_iff)
qed
lemma bigo_LIMSEQ2:
fixes f g :: "nat \<Rightarrow> real"
assumes "f =o g +o O(h)" "h \<longlonglongrightarrow> 0" and f: "f \<longlonglongrightarrow> a"
shows "g \<longlonglongrightarrow> a"
proof -
have "f - g \<longlonglongrightarrow> 0"
using assms bigo_LIMSEQ1 set_plus_imp_minus by blast
then have "(\<lambda>n. f n - g n) \<longlonglongrightarrow> 0"
by (simp add: fun_diff_def)
then show ?thesis
using Lim_transform_eq f by blast
qed
end
|
%flag C "-I/usr/include/libdrm -I/usr/include/libpng16 "
%flag C "-lGLEW -lGLU -lGL -lpng16 -lz "
%flag C "-lGL"
|
lemma normalize_content [simp]: "normalize (content p) = content p" |
[STATEMENT]
lemma iFROM_conv: "[n\<dots>] = UNIV \<oplus> n"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. [n\<dots>] = UNIV \<oplus> n
[PROOF STEP]
by (simp add: iFROM_0[symmetric] iFROM_add) |
! { dg-do run }
! { dg-add-options ieee }
! { dg-skip-if "NaN not supported" { spu-*-* } { "*" } { "" } }
!
! List-directed part of PR fortran/43298
! and follow up to PR fortran/34319.
!
! Check handling of "NAN(alphanum)"
!
character(len=200) :: str
real :: r
complex :: z
! read_real:
r = 1.0
str = 'INfinity' ; read(str,*) r
if (r < 0 .or. r /= r*1.1) call abort()
r = 1.0
str = '-INF' ; read(str,*) r
if (r > 0 .or. r /= r*1.1) call abort()
r = 1.0
str = '+INF' ; read(str,*) r
if (r < 0 .or. r /= r*1.1) call abort()
r = 1.0
str = '-inFiniTY' ; read(str,*) r
if (r > 0 .or. r /= r*1.1) call abort()
r = 1.0
str = 'NAN' ; read(str,*) r
if (.not. isnan(r)) call abort()
r = 1.0
str = '-NAN' ; read(str,*) r
if (.not. isnan(r)) call abort()
r = 1.0
str = '+NAN' ; read(str,*) r
if (.not. isnan(r)) call abort()
r = 1.0
str = 'NAN(0x111)' ; read(str,*) r
if (.not. isnan(r)) call abort()
r = 1.0
str = '-NAN(123)' ; read(str,*) r
if (.not. isnan(r)) call abort()
r = 1.0
str = '+NAN(0xFFE)' ; read(str,*) r
if (.not. isnan(r)) call abort()
! parse_real
z = cmplx(-2.0,-4.0)
str = '(0.0,INfinity)' ; read(str,*) z
if (aimag(z) < 0 .or. aimag(z) /= aimag(z)*1.1) call abort()
z = cmplx(-2.0,-4.0)
str = '(-INF,0.0)' ; read(str,*) z
if (real(z) > 0 .or. real(z) /= real(z)*1.1) call abort()
z = cmplx(-2.0,-4.0)
str = '(0.0,+INF)' ; read(str,*) z
if (aimag(z) < 0 .or. aimag(z) /= aimag(z)*1.1) call abort()
z = cmplx(-2.0,-4.0)
str = '(-inFiniTY,0.0)' ; read(str,*) z
if (real(z) > 0 .or. real(z) /= real(z)*1.1) call abort()
z = cmplx(-2.0,-4.0)
str = '(NAN,0.0)' ; read(str,*) z
if (.not. isnan(real(z))) call abort()
z = cmplx(-2.0,-4.0)
str = '(0.0,-NAN)' ; read(str,*) z
if (.not. isnan(aimag(z))) call abort()
z = cmplx(-2.0,-4.0)
str = '(+NAN,0.0)' ; read(str,*) z
if (.not. isnan(real(z))) call abort()
z = cmplx(-2.0,-4.0)
str = '(NAN(0x111),0.0)' ; read(str,*) z
if (.not. isnan(real(z))) call abort()
z = cmplx(-2.0,-4.0)
str = '(0.0,-NaN(123))' ; read(str,*) z
if (.not. isnan(aimag(z))) call abort()
z = cmplx(-2.0,-4.0)
str = '(+nan(0xFFE),0.0)' ; read(str,*) z
if (.not. isnan(real(z))) call abort()
end
|
Require Export GeoCoq.Elements.OriginalProofs.lemma_ray.
Section Euclid.
Context `{Ax:euclidean_neutral_ruler_compass}.
Lemma lemma_ray1 :
forall A B P,
Out A B P ->
(BetS A P B \/ eq B P \/ BetS A B P).
Proof.
intros.
assert (~ ~ (BetS A P B \/ eq B P \/ BetS A B P)).
{
intro.
assert (neq P B) by (conclude lemma_inequalitysymmetric).
assert (BetS A B P) by (conclude lemma_ray).
contradict.
}
close.
Qed.
End Euclid.
|
Formal statement is: lemma degree_of_int [simp]: "degree (of_int k) = 0" Informal statement is: The degree of an integer is zero. |
/-
Copyright (c) 2016 Microsoft Corporation. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Leonardo de Moura
! This file was ported from Lean 3 source module init.meta.decl_cmds
! leanprover-community/mathlib commit b40f3af8018f0cc5811d5f56e4f9888877009b4f
! Please do not edit these lines, except to modify the commit id
! if you have ported upstream changes.
-/
prelude
import Leanbin.Init.Meta.Tactic
import Leanbin.Init.Meta.RbMap
open Tactic
open Native
private unsafe def apply_replacement (replacements : name_map Name) (e : expr) : expr :=
e.replace fun e d =>
match e with
| expr.const n ls =>
match replacements.find n with
| some new_n => some (expr.const new_n ls)
| none => none
| _ => none
#align apply_replacement apply_replacement
/--
Given a set of constant renamings `replacements` and a declaration name `src_decl_name`, create a new
declaration called `new_decl_name` s.t. its type is the type of `src_decl_name` after applying the
given constant replacement.
Remark: the new type must be definitionally equal to the type of `src_decl_name`.
Example:
Assume the environment contains
def f : nat -> nat := ...
def g : nat -> nat := f
lemma f_lemma : forall a, f a > 0 := ...
Moreover, assume we have a mapping M containing `f -> `g
Then, the command
run_command copy_decl_updating_type M `f_lemma `g_lemma
creates the declaration
lemma g_lemma : forall a, g a > 0 := ... -/
unsafe def copy_decl_updating_type (replacements : name_map Name) (src_decl_name : Name)
(new_decl_name : Name) : Tactic := do
let env ← get_env
let decl ← env.get src_decl_name
let decl := decl.update_name <| new_decl_name
let decl := decl.update_type <| apply_replacement replacements decl.type
let decl := decl.update_value <| expr.const src_decl_name (decl.univ_params.map level.param)
add_decl decl
#align copy_decl_updating_type copy_decl_updating_type
unsafe def copy_decl_using (replacements : name_map Name) (src_decl_name : Name)
(new_decl_name : Name) : Tactic := do
let env ← get_env
let decl ← env.get src_decl_name
let decl := decl.update_name <| new_decl_name
let decl := decl.update_type <| apply_replacement replacements decl.type
let decl := decl.map_value <| apply_replacement replacements
add_decl decl
#align copy_decl_using copy_decl_using
|
Formal statement is: lemma span_Basis [simp]: "span Basis = UNIV" Informal statement is: The span of the basis of a vector space is the entire vector space. |
-----------------------------------------------------------------------------
-- |
-- Module : Graphics.Rendering.Plot
-- Copyright : (c) A. V. H. McPhail 2010
-- License : BSD3
--
-- Maintainer : haskell.vivian.mcphail <at> gmail <dot> com
-- Stability : provisional
-- Portability : portable
--
-- Graphical plots
--
-----------------------------------------------------------------------------
module Graphics.Rendering.Plot (
-- * Re-exported for convenience
module Graphics.Rendering.Plot.Figure.Simple
, module Graphics.Rendering.Plot.Figure
, module Graphics.Rendering.Plot.Render
-- * Example
-- $example
) where
-----------------------------------------------------------------------------
import Graphics.Rendering.Plot.Figure.Simple
import Graphics.Rendering.Plot.Figure
import Graphics.Rendering.Plot.Render
-----------------------------------------------------------------------------
{- $example
Create some data:
> ln = 25
> ts = linspace ln (0,1)
> rs = randomVector 0 Gaussian ln
>
> ss = sin (15*2*pi*ts)
> ds = 0.25*rs + ss
> es = constant (0.25*(stddev rs)) ln
>
> fs :: Double -> Double
> fs = sin . (15*2*pi*)
Perform actions in 'Figure a' to create a figure
> test_graph = do
> withTextDefaults $ setFontFamily "OpenSymbol"
> withTitle $ setText "Testing plot package:"
> withSubTitle $ do
> setText "with 1 second of a 15Hz sine wave"
> setFontSize 10
> setPlots 1 1
> withPlot (1,1) $ do
> setDataset (ts,[point (ds,es) (Cross,red),line fs blue])
> addAxis XAxis (Side Lower) $ withAxisLabel $ setText "time (s)"
> addAxis YAxis (Side Lower) $ withAxisLabel $ setText "amplitude"
> addAxis XAxis (Value 0) $ return ()
> setRangeFromData XAxis Lower Linear
> setRange YAxis Lower Linear (-1.25) 1.25
Render the graph to a Cairo 'Render ()' action that takes the width
and height of the drawing area
> test_render :: (Double,Double) -> Render ()
> test_render = render test_graph
The same graph using the 'Simple' interface
> test_graph2 = do
> plot (ts,[point (ds,es) (Cross,red),line fs blue])
> title "Testing plot package:"
> subtitle "with 1 second of a 15Hz sine wave"
> xlabel "time (s)"
> ylabel "amplitude"
> yrange Linear (-1.25) 1.25
The 'Render a' action can be used in GTK or with Cairo to write to file in PS, PDF, SVG, or PNG
Display a greyscale matrix
> ms :: Matrix Double
> ms = buildMatrix 64 64 (\(x,y) -> sin (2*2*pi*(fromIntegral x)/64) * cos (5*2*pi*(fromIntegral y)/64))
> mat_fig = do
> setPlots 1 1
> withPlot (1,1) $ do
> setDataset ms
> addAxis XAxis (Side Lower) $ setTickLabelFormat "%.0f"
> addAxis YAxis (Side Lower) $ setTickLabelFormat "%.0f"
> setRangeFromData XAxis Lower Linear
> setRangeFromData YAxis Lower Linear
The ODE example from hmatrix:
> import Numeric.GSL
> import Numeric.LinearAlgebra
> xdot t [x,v] = [v, -0.95*x - 0.1*v]
> ts = linspace 100 (0,20)
> sol = odeSolve xdot [10,0] ts
> ode_fig = plot (Line,ts,[sol])
-}
-----------------------------------------------------------------------------
|
State Before: l r : List Char
it : Iterator
h : ValidFor l r it
⊢ ValidFor (List.reverse r ++ l) [] (Iterator.toEnd it) State After: l r : List Char
it : Iterator
h : ValidFor l r it
⊢ ValidFor (List.reverse r ++ l) [] { s := { data := List.reverseAux l r }, i := { byteIdx := utf8Len l + utf8Len r } } Tactic: simp [Iterator.toEnd, h.toString] State Before: l r : List Char
it : Iterator
h : ValidFor l r it
⊢ ValidFor (List.reverse r ++ l) [] { s := { data := List.reverseAux l r }, i := { byteIdx := utf8Len l + utf8Len r } } State After: no goals Tactic: exact .of_eq _ (by simp [List.reverseAux_eq]) (by simp [Nat.add_comm]) State Before: l r : List Char
it : Iterator
h : ValidFor l r it
⊢ { s := { data := List.reverseAux l r }, i := { byteIdx := utf8Len l + utf8Len r } }.s.data =
List.reverseAux (List.reverse r ++ l) [] State After: no goals Tactic: simp [List.reverseAux_eq] State Before: l r : List Char
it : Iterator
h : ValidFor l r it
⊢ { s := { data := List.reverseAux l r }, i := { byteIdx := utf8Len l + utf8Len r } }.i.byteIdx =
utf8Len (List.reverse r ++ l) State After: no goals Tactic: simp [Nat.add_comm] |
[GOAL]
α β : Type u
f : Type u → Type v
inst✝¹ : Functor f
inst✝ : LawfulFunctor f
h : α ≃ β
x : f α
⊢ ↑h.symm <$> ↑h <$> x = x
[PROOFSTEP]
simp [map_map]
[GOAL]
α β : Type u
f : Type u → Type v
inst✝¹ : Functor f
inst✝ : LawfulFunctor f
h : α ≃ β
x : f β
⊢ ↑h <$> ↑h.symm <$> x = x
[PROOFSTEP]
simp [map_map]
[GOAL]
α β : Type u
f : Type u → Type v
inst✝¹ : Functor f
inst✝ : LawfulFunctor f
⊢ mapEquiv f (Equiv.refl α) = Equiv.refl (f α)
[PROOFSTEP]
ext x
[GOAL]
case H
α β : Type u
f : Type u → Type v
inst✝¹ : Functor f
inst✝ : LawfulFunctor f
x : f α
⊢ ↑(mapEquiv f (Equiv.refl α)) x = ↑(Equiv.refl (f α)) x
[PROOFSTEP]
simp only [mapEquiv_apply, refl_apply]
[GOAL]
case H
α β : Type u
f : Type u → Type v
inst✝¹ : Functor f
inst✝ : LawfulFunctor f
x : f α
⊢ ↑(Equiv.refl α) <$> x = x
[PROOFSTEP]
exact LawfulFunctor.id_map x
[GOAL]
α β : Type u
α' β' : Type v
F : Type u → Type v → Type w
inst✝¹ : Bifunctor F
inst✝ : LawfulBifunctor F
h : α ≃ β
h' : α' ≃ β'
x : F α α'
⊢ bimap (↑h.symm) (↑h'.symm) (bimap (↑h) (↑h') x) = x
[PROOFSTEP]
simp [bimap_bimap, id_bimap]
[GOAL]
α β : Type u
α' β' : Type v
F : Type u → Type v → Type w
inst✝¹ : Bifunctor F
inst✝ : LawfulBifunctor F
h : α ≃ β
h' : α' ≃ β'
x : F β β'
⊢ bimap (↑h) (↑h') (bimap (↑h.symm) (↑h'.symm) x) = x
[PROOFSTEP]
simp [bimap_bimap, id_bimap]
[GOAL]
α β : Type u
α' β' : Type v
F : Type u → Type v → Type w
inst✝¹ : Bifunctor F
inst✝ : LawfulBifunctor F
⊢ mapEquiv F (Equiv.refl α) (Equiv.refl α') = Equiv.refl (F α α')
[PROOFSTEP]
ext x
[GOAL]
case H
α β : Type u
α' β' : Type v
F : Type u → Type v → Type w
inst✝¹ : Bifunctor F
inst✝ : LawfulBifunctor F
x : F α α'
⊢ ↑(mapEquiv F (Equiv.refl α) (Equiv.refl α')) x = ↑(Equiv.refl (F α α')) x
[PROOFSTEP]
simp [id_bimap]
|
[STATEMENT]
lemma S_list_pgallery_decomp1:
assumes ss: "set ss = S" and gal: "Cs\<noteq>[]" "pgallery (C0#Cs)"
shows "\<exists>s\<in>set ss. \<exists>C\<in>set Cs. \<forall>(f,g)\<in>fundfoldpairs.
s = Abs_induced_automorph f g \<longrightarrow> C \<in> g\<turnstile>\<C>"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<exists>s\<in>set ss. \<exists>C\<in>set Cs. \<forall>(f, g)\<in>fundfoldpairs. s = Abs_induced_automorph f g \<longrightarrow> C \<in> g \<turnstile> \<C>
[PROOF STEP]
proof (cases Cs)
[PROOF STATE]
proof (state)
goal (2 subgoals):
1. Cs = [] \<Longrightarrow> \<exists>s\<in>set ss. \<exists>C\<in>set Cs. \<forall>(f, g)\<in>fundfoldpairs. s = Abs_induced_automorph f g \<longrightarrow> C \<in> g \<turnstile> \<C>
2. \<And>a list. Cs = a # list \<Longrightarrow> \<exists>s\<in>set ss. \<exists>C\<in>set Cs. \<forall>(f, g)\<in>fundfoldpairs. s = Abs_induced_automorph f g \<longrightarrow> C \<in> g \<turnstile> \<C>
[PROOF STEP]
case (Cons D Ds)
[PROOF STATE]
proof (state)
this:
Cs = D # Ds
goal (2 subgoals):
1. Cs = [] \<Longrightarrow> \<exists>s\<in>set ss. \<exists>C\<in>set Cs. \<forall>(f, g)\<in>fundfoldpairs. s = Abs_induced_automorph f g \<longrightarrow> C \<in> g \<turnstile> \<C>
2. \<And>a list. Cs = a # list \<Longrightarrow> \<exists>s\<in>set ss. \<exists>C\<in>set Cs. \<forall>(f, g)\<in>fundfoldpairs. s = Abs_induced_automorph f g \<longrightarrow> C \<in> g \<turnstile> \<C>
[PROOF STEP]
with gal(2)
[PROOF STATE]
proof (chain)
picking this:
pgallery (C0 # Cs)
Cs = D # Ds
[PROOF STEP]
have "D\<in>fundadjset"
[PROOF STATE]
proof (prove)
using this:
pgallery (C0 # Cs)
Cs = D # Ds
goal (1 subgoal):
1. D \<in> adjacentset C0 - {C0}
[PROOF STEP]
using pgallery_def chamberD_simplex adjacentset_def
[PROOF STATE]
proof (prove)
using this:
pgallery (C0 # Cs)
Cs = D # Ds
pgallery ?xs \<equiv> Ball (set ?xs) chamber \<and> padjacentchain ?xs
chamber ?x \<Longrightarrow> ?x \<in> X
adjacentset ?x = {y \<in> X. ?x \<sim> y}
goal (1 subgoal):
1. D \<in> adjacentset C0 - {C0}
[PROOF STEP]
by fastforce
[PROOF STATE]
proof (state)
this:
D \<in> adjacentset C0 - {C0}
goal (2 subgoals):
1. Cs = [] \<Longrightarrow> \<exists>s\<in>set ss. \<exists>C\<in>set Cs. \<forall>(f, g)\<in>fundfoldpairs. s = Abs_induced_automorph f g \<longrightarrow> C \<in> g \<turnstile> \<C>
2. \<And>a list. Cs = a # list \<Longrightarrow> \<exists>s\<in>set ss. \<exists>C\<in>set Cs. \<forall>(f, g)\<in>fundfoldpairs. s = Abs_induced_automorph f g \<longrightarrow> C \<in> g \<turnstile> \<C>
[PROOF STEP]
from this
[PROOF STATE]
proof (chain)
picking this:
D \<in> adjacentset C0 - {C0}
[PROOF STEP]
obtain s where s: "s\<in>S" "D = s`\<rightarrow>C0"
[PROOF STATE]
proof (prove)
using this:
D \<in> adjacentset C0 - {C0}
goal (1 subgoal):
1. (\<And>s. \<lbrakk>s \<in> (\<Union>(f, g)\<in>fundfoldpairs. {Abs_induced_automorph f g}); D = s `\<rightarrow> C0\<rbrakk> \<Longrightarrow> thesis) \<Longrightarrow> thesis
[PROOF STEP]
using fundadjset_eq_S_image
[PROOF STATE]
proof (prove)
using this:
D \<in> adjacentset C0 - {C0}
?D \<in> adjacentset C0 - {C0} \<Longrightarrow> \<exists>s\<in>\<Union>(f, g)\<in>fundfoldpairs. {Abs_induced_automorph f g}. ?D = s `\<rightarrow> C0
goal (1 subgoal):
1. (\<And>s. \<lbrakk>s \<in> (\<Union>(f, g)\<in>fundfoldpairs. {Abs_induced_automorph f g}); D = s `\<rightarrow> C0\<rbrakk> \<Longrightarrow> thesis) \<Longrightarrow> thesis
[PROOF STEP]
by blast
[PROOF STATE]
proof (state)
this:
s \<in> (\<Union>(f, g)\<in>fundfoldpairs. {Abs_induced_automorph f g})
D = s `\<rightarrow> C0
goal (2 subgoals):
1. Cs = [] \<Longrightarrow> \<exists>s\<in>set ss. \<exists>C\<in>set Cs. \<forall>(f, g)\<in>fundfoldpairs. s = Abs_induced_automorph f g \<longrightarrow> C \<in> g \<turnstile> \<C>
2. \<And>a list. Cs = a # list \<Longrightarrow> \<exists>s\<in>set ss. \<exists>C\<in>set Cs. \<forall>(f, g)\<in>fundfoldpairs. s = Abs_induced_automorph f g \<longrightarrow> C \<in> g \<turnstile> \<C>
[PROOF STEP]
from s(2)
[PROOF STATE]
proof (chain)
picking this:
D = s `\<rightarrow> C0
[PROOF STEP]
have
"\<forall>(f,g)\<in>fundfoldpairs. s = Abs_induced_automorph f g \<longrightarrow> D\<in>g\<turnstile>\<C>"
[PROOF STATE]
proof (prove)
using this:
D = s `\<rightarrow> C0
goal (1 subgoal):
1. \<forall>(f, g)\<in>fundfoldpairs. s = Abs_induced_automorph f g \<longrightarrow> D \<in> g \<turnstile> \<C>
[PROOF STEP]
using fundfoldpairs_def fundfoldpairs_fundchamber_image
OpposedThinChamberComplexFoldings.basechambers_half_chamber_systems(2)
[PROOF STATE]
proof (prove)
using this:
D = s `\<rightarrow> C0
fundfoldpairs \<equiv> {(f, g). OpposedThinChamberComplexFoldings X f g C0}
(?f, ?g) \<in> fundfoldpairs \<Longrightarrow> Abs_induced_automorph ?f ?g `\<rightarrow> C0 = ?g ` C0
OpposedThinChamberComplexFoldings ?X ?f ?g ?C0.0 \<Longrightarrow> ?g ` ?C0.0 \<in> ?g \<turnstile> ChamberComplex.chamber_system ?X
goal (1 subgoal):
1. \<forall>(f, g)\<in>fundfoldpairs. s = Abs_induced_automorph f g \<longrightarrow> D \<in> g \<turnstile> \<C>
[PROOF STEP]
by auto
[PROOF STATE]
proof (state)
this:
\<forall>(f, g)\<in>fundfoldpairs. s = Abs_induced_automorph f g \<longrightarrow> D \<in> g \<turnstile> \<C>
goal (2 subgoals):
1. Cs = [] \<Longrightarrow> \<exists>s\<in>set ss. \<exists>C\<in>set Cs. \<forall>(f, g)\<in>fundfoldpairs. s = Abs_induced_automorph f g \<longrightarrow> C \<in> g \<turnstile> \<C>
2. \<And>a list. Cs = a # list \<Longrightarrow> \<exists>s\<in>set ss. \<exists>C\<in>set Cs. \<forall>(f, g)\<in>fundfoldpairs. s = Abs_induced_automorph f g \<longrightarrow> C \<in> g \<turnstile> \<C>
[PROOF STEP]
with s(1) ss Cons
[PROOF STATE]
proof (chain)
picking this:
s \<in> (\<Union>(f, g)\<in>fundfoldpairs. {Abs_induced_automorph f g})
set ss = (\<Union>(f, g)\<in>fundfoldpairs. {Abs_induced_automorph f g})
Cs = D # Ds
\<forall>(f, g)\<in>fundfoldpairs. s = Abs_induced_automorph f g \<longrightarrow> D \<in> g \<turnstile> \<C>
[PROOF STEP]
show ?thesis
[PROOF STATE]
proof (prove)
using this:
s \<in> (\<Union>(f, g)\<in>fundfoldpairs. {Abs_induced_automorph f g})
set ss = (\<Union>(f, g)\<in>fundfoldpairs. {Abs_induced_automorph f g})
Cs = D # Ds
\<forall>(f, g)\<in>fundfoldpairs. s = Abs_induced_automorph f g \<longrightarrow> D \<in> g \<turnstile> \<C>
goal (1 subgoal):
1. \<exists>s\<in>set ss. \<exists>C\<in>set Cs. \<forall>(f, g)\<in>fundfoldpairs. s = Abs_induced_automorph f g \<longrightarrow> C \<in> g \<turnstile> \<C>
[PROOF STEP]
by auto
[PROOF STATE]
proof (state)
this:
\<exists>s\<in>set ss. \<exists>C\<in>set Cs. \<forall>(f, g)\<in>fundfoldpairs. s = Abs_induced_automorph f g \<longrightarrow> C \<in> g \<turnstile> \<C>
goal (1 subgoal):
1. Cs = [] \<Longrightarrow> \<exists>s\<in>set ss. \<exists>C\<in>set Cs. \<forall>(f, g)\<in>fundfoldpairs. s = Abs_induced_automorph f g \<longrightarrow> C \<in> g \<turnstile> \<C>
[PROOF STEP]
qed (simp add: gal(1)) |
--import tactic.ring
theorem m (a b c:ℕ) : (a+b) * c = a * c + b * c := begin
induction c with c H,
{
refl
},
{
have H2 := nat.mul_succ,
rw H2, clear H2,
have H2 := nat.mul_succ a c,
rw H2, clear H2,
have H2 := nat.mul_succ b c,
rw H2, rw H, simp
}
end
theorem m2 (a b c:ℕ) : c * (a+b) = a * c + b * c := begin
have H := m a b c,
rw ←H,
apply nat.mul_comm
end
theorem m3 (n:ℕ) : 2*n = n+n :=begin
induction n,{refl},
{
rw nat.mul_succ 2 n_n,
rw n_ih,
have H : nat.succ n_n = n_n + 1, {refl},
rw H,simp
}
end
theorem a (n:ℕ) : n^2 = n*n := begin
unfold pow nat.pow, simp
end
theorem b (a b:ℕ) : (a+b)^2 = a^2 + b^2 + 2*a*b :=
begin
unfold pow nat.pow,
simp,
have H₁ := m a b (a+b), rw H₁, clear H₁,
have H₁ := m2 a b a, rw H₁, clear H₁,
have H₁ := m2 a b b, rw H₁, clear H₁, simp,
have H₂ : a * b = b * a,
{
exact nat.mul_comm a b
},
rw ←H₂,
have H₃ : 2 * a * b = a * b + a * b,
{
have H₄ := m3 (a*b),
have H₅ : 2 * (a * b) = 2 * a * b,
{
rw nat.mul_assoc 2 a b
},
{
rw H₅ at H₄, clear H₅,
rw H₄
}
},
rw H₃,
simp
end
theorem c (a n:ℕ) : n^2 + 2*a*n = (n+a)^2 - a^2 := begin
have H := b n a,
rw H,
simp,
have H₂ := nat.add_sub_cancel (n ^ 2 + 2 * n * a) (a^2),
have H₃ :
a^2 + (n ^ 2 + 2 * n * a) - a^2 = (n ^ 2 + 2 * n * a) + a^2 - a^2,{simp},
rw H₃,
rw H₂,
have H₄ : 2*a*n = 2*n*a,
{
have H₅ := m3 (a*n),
clear H H₂ H₃,
have H₆ : 2*a*n = 2*(a*n),
{
exact nat.mul_assoc 2 a n
},
rw H₆, rw H₅,
have H₅ := m3 (n*a),
have H₆ : 2*n*a = 2*(n*a),
{
exact nat.mul_assoc 2 n a
},
rw H₆, rw H₅,
rw nat.mul_comm a n
},
rw H₄
end
-- ax^2 + bx + c = 0
-- x^2 + bx/a + c/a = 0
-- x^2 + bx/a = - c/a
-- (x + b/2a)^2 - b^2/4a^2 = -c/a
-- (x + b/2a)^2 = b^2/4a^2 - c/a
-- = b^2 - 4ac / 4a^2
-- x + b/2a = S ∨ -S
-- x = -b/2a + S ∨ -b/2a - S
theorem quad (k : Type) [field k] (a b c x S : k)
(HS : S*S = b*b - 4*a*c) (char_not_2 : (2:k) ≠ 0) :
a * x*x + b * x + c = 0 ↔
(x = (-b + S ) / 2*a ∨
x = (-b - S ) / 2*a ) := begin
split,
{
intro H₁,
}
end
|
text\<open> 18 November 2021: Exercise for Homework 11 in CS 511 \<close>
text\<open> Proof of theorem RichGF_5 at the end of this script is a possible solution \<close>
theory HW11_solution
imports Main
begin
text\<open>
If every poor man has a rich father, then there is a rich man who has a rich grandfather \<close>
theorem RichGF_1 :
"\<forall> x. \<not> rich x \<longrightarrow> rich (father x) \<Longrightarrow> \<exists> x. rich x \<and> rich (father (father x))"
by blast (* you can also try 'auto' *)
text \<open> The second proof contains 5 'apply' instructions,
followed by a single invocation of the pre-defined method 'auto' \<close>
theorem RichGF_2 :
"\<forall> x. \<not> rich x \<longrightarrow> rich (father x) \<Longrightarrow> \<exists> x. rich x \<and> rich (father (father x))"
apply (rule exCI)
apply (rule conjI)
apply (rule classical)
apply (rotate_tac -2)
apply (erule notE)
by auto
text \<open> We simplify the notation a little, by replacing unary predicate 'rich' by 'R'
and unary function 'father' by 'f' \<close>
text\<open> The third proof contains 18 'apply' instructions,
followed by a single invocation of the pre-defined method 'blast' \<close>
theorem RichGF_3 :
"\<forall> x. \<not> R x \<longrightarrow> R (f x) \<Longrightarrow> \<exists> x. R (f (f x)) \<and> R x"
apply (rule classical)
apply (rule exI)
apply (rule conjI)
apply (rule classical)
apply (rule allE, assumption)
apply (erule impE, assumption)
apply (erule notE)
apply (rule exI)
apply (rule conjI, assumption)
apply (rule classical)
apply (erule allE, erule notE, erule impE)
apply assumption+
by blast
text \<open> The fourth proof contains 45 'apply' instructions,
and no invocation of any pre-defined method \<close>
theorem RichGF_4 :
"\<forall> x. \<not> R x \<longrightarrow> R (f x) \<Longrightarrow> \<exists> x. R x \<and> R (f (f x))"
apply (rule classical)
apply (rule exI)
apply (rule conjI)
apply (rule classical)
apply (rule allE, assumption)
apply (erule impE, assumption)
apply (erule notE)
apply (rule exCI)
apply (rule conjI, assumption)
apply (rule classical)
apply (erule notE, erule allE, erule mp, rule notI)
apply (erule allE, rotate_tac -1, erule notE, rule conjI, rotate_tac -1)
apply assumption+
apply (rule classical)
apply (rule allE, assumption)
apply (erule impE, assumption)
apply (erule notE)
apply (rule exCI)
apply (rule conjI, assumption)
apply (rule classical)
apply (erule notE, erule allE, erule mp, rule notI)
apply (erule notE, erule allE, erule notE, rule conjI, rotate_tac -1)
apply assumption+
done
text \<open> The fifth proof contains 33 'apply' instructions,
and no invocation of any pre-defined method \<close>
theorem RichGF_5 :
"\<forall> x. \<not> R x \<longrightarrow> R (f x) \<Longrightarrow> \<exists> x. R (f (f x)) \<and> R x"
apply (rule classical)
apply (rule exI)
apply (rule conjI)
apply (rule classical)
apply (rule allE, assumption)
apply (erule impE, assumption)
apply (erule notE)
apply (rule exI)
apply (rule conjI, assumption)
apply (rule classical)
apply (erule allE, erule notE, erule impE)
apply assumption apply assumption
apply (rule classical)
apply (rule allE) apply assumption
apply (erule impE) apply assumption
apply (erule notE)
apply (rule exI)
apply (rule conjI) apply assumption
apply (rule classical)
apply (erule allE)
apply (erule notE)
apply (erule impE) apply assumption
apply assumption
done
end |
[STATEMENT]
lemma add_mult_distrib_mat[algebra_simps]: assumes m: "A \<in> carrier_mat nr n"
"B \<in> carrier_mat nr n" "C \<in> carrier_mat n nc"
shows "(A + B) * C = A * C + B * C"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. (A + B) * C = A * C + B * C
[PROOF STEP]
using m
[PROOF STATE]
proof (prove)
using this:
A \<in> carrier_mat nr n
B \<in> carrier_mat nr n
C \<in> carrier_mat n nc
goal (1 subgoal):
1. (A + B) * C = A * C + B * C
[PROOF STEP]
by (intro eq_matI, auto simp: add_scalar_prod_distrib[of _ n]) |
import linear_algebra.tensor_product
import deprecated.subring
-- Swap these ↑ two imports, and then `foo` will always be happy.
-- This was not the cases on commit `df4500242eb6aa6ee20b315b185b0f97a9b359c5`.
-- You would get a timeout.
import algebra.module.submodule
variables {R M N P Q : Type*} [comm_ring R]
variables [add_comm_group M] [module R M]
variables [add_comm_group N] [module R N]
open function
lemma injective_iff (f : M →ₗ[R] N) : function.injective f ↔ ∀ m, f m = 0 → m = 0 :=
add_monoid_hom.injective_iff f.to_add_monoid_hom
lemma foo (L : submodule R (unit → R))
(H : ∀ (m : tensor_product R ↥L ↥L), (tensor_product.map L.subtype L.subtype) m = 0 → m = 0) :
injective (tensor_product.map L.subtype L.subtype) :=
(injective_iff _).mpr H
|
Off-Broadway Tickets - Buy Off-Broadway Theatre Tickets at Stub.com!
Buy Off-Broadway tickets at Stub!
To buy Off-Broadway tickets for sale at discounted prices, choose from the Off-Broadway schedule and dates below. Stub offers cheap Off-Broadway tickets for 2019 Off-Broadway events along with Off-Broadway cost information. For questions on purchasing Off-Broadway tickets or general inquries, please contact our ticket specialists for all your ticket needs. |
{-# LANGUAGE BangPatterns #-}
{-# LANGUAGE CPP #-}
{-# LANGUAGE FlexibleContexts #-}
#if __GLASGOW_HASKELL__ >= 800
{-# OPTIONS_GHC -Wno-redundant-constraints #-}
#endif
-- |
-- Module : Graphics.Image.ColorSpace.Complex
-- Copyright : (c) Alexey Kuleshevich 2016
-- License : BSD3
-- Maintainer : Alexey Kuleshevich <[email protected]>
-- Stability : experimental
-- Portability : non-portable
--
module Graphics.Image.ColorSpace.Complex (
-- *** Rectangular form
Complex(..), (+:), realPart, imagPart,
-- *** Polar form
mkPolar, cis, polar, magnitude, phase,
-- *** Conjugate
conjugate
) where
import Graphics.Image.Interface (Pixel)
import Control.Applicative
import Data.Complex (Complex(..))
import qualified Data.Complex as C hiding (Complex(..))
infix 6 +:
-- | Constrcut a complex pixel from two pixels representing real and imaginary parts.
--
-- @ PixelRGB 4 8 6 '+:' PixelRGB 7 1 1 __==__ PixelRGB (4 ':+' 7) (8 ':+' 1) (6 ':+' 1) @
--
(+:) :: Applicative (Pixel cs) => Pixel cs e -> Pixel cs e -> Pixel cs (Complex e)
(+:) = liftA2 (:+)
{-# INLINE (+:) #-}
-- | Extracts the real part of a complex pixel.
realPart :: (Applicative (Pixel cs), RealFloat e) => Pixel cs (Complex e) -> Pixel cs e
realPart = liftA C.realPart
{-# INLINE realPart #-}
-- | Extracts the imaginary part of a complex pixel.
imagPart :: (Applicative (Pixel cs), RealFloat e) => Pixel cs (Complex e) -> Pixel cs e
imagPart = liftA C.imagPart
{-# INLINE imagPart #-}
-- | Form a complex pixel from polar components of magnitude and phase.
mkPolar :: (Applicative (Pixel cs), RealFloat e) =>
Pixel cs e -> Pixel cs e -> Pixel cs (Complex e)
mkPolar = liftA2 C.mkPolar
{-# INLINE mkPolar #-}
-- | @'cis' t@ is a complex pixel with magnitude 1 and phase t (modulo @2*'pi'@).
cis :: (Applicative (Pixel cs), RealFloat e) => Pixel cs e -> Pixel cs (Complex e)
cis = liftA C.cis
{-# INLINE cis #-}
-- | The function @'polar'@ takes a complex pixel and returns a (magnitude, phase)
-- pair of pixels in canonical form: the magnitude is nonnegative, and the phase
-- in the range @(-'pi', 'pi']@; if the magnitude is zero, then so is the phase.
polar :: (Applicative (Pixel cs), RealFloat e) => Pixel cs (Complex e) -> (Pixel cs e, Pixel cs e)
polar !zPx = (magnitude zPx, phase zPx)
{-# INLINE polar #-}
-- | The nonnegative magnitude of a complex pixel.
magnitude :: (Applicative (Pixel cs), RealFloat e) => Pixel cs (Complex e) -> Pixel cs e
magnitude = liftA C.magnitude
{-# INLINE magnitude #-}
-- | The phase of a complex pixel, in the range @(-'pi', 'pi']@. If the
-- magnitude is zero, then so is the phase.
phase :: (Applicative (Pixel cs), RealFloat e) => Pixel cs (Complex e) -> Pixel cs e
phase = liftA C.phase
{-# INLINE phase #-}
-- | The conjugate of a complex pixel.
conjugate :: (Applicative (Pixel cs), RealFloat e) => Pixel cs (Complex e) -> Pixel cs (Complex e)
conjugate = liftA C.conjugate
{-# INLINE conjugate #-}
|
% Script demo_set_geometry
% Illustrates the different components that define an image geometry.
%
% demo_set_geometry
%
%
% See also definition_of_geometry
%
% Author: Saskia Bollmann & Lars Kasper
% Created: 2018-11-05
% Copyright (C) 2018 Institute for Biomedical Engineering
% University of Zurich and ETH Zurich
%
% This file is part of the TAPAS UniQC Toolbox, which is released
% under the terms of the GNU General Public License (GPL), version 3.
% You can redistribute it and/or modify it under the terms of the GPL
% (either version 3 or, at your option, any later version).
% For further details, see the file COPYING or
% <http://www.gnu.org/licenses/>.
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%% 1. Load data
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% get example data
dataPath = tapas_uniqc_get_path('data');
niftiFile4D = fullfile(dataPath, 'nifti', 'rest', 'fmri_short.nii');
dataLoad = MrImage(niftiFile4D);
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%% 2. Create MrImage object from matrix
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% no geometry information is supplied, defaults are used
dataRaw = MrImage(dataLoad.data); % as reference, remains unchanged
data = MrImage(dataLoad.data); % geometry and dimInfo will be adapted
dataRaw = dataRaw.select('t', 1);
data = data.select('t', 1);
data.parameters.save.fileName = 'orig.nii';
disp_centre_and_origin(data);
data.plot('plotType', 'spmi');
% Note 1: no dimInfo/geometry information are supplied, defaults are used:
% nSamples is derived from the data matrix
% resolutions is assumed to be 1
% the origin (voxel at position [0 0 0] mm) is in the center of the image
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%% 2. Add resolution
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% plot data using the classical way, but adding sampling points
% (this option is only available with the image processing toolbox)
iptsetpref('ImshowAxesVisible', 'on');
f = data.plot;
a = f.CurrentAxes;
nX = round(a.XLim(2)/data.dimInfo.nSamples(1));
xAxis = repmat(data.dimInfo.samplingPoints{1}, [1,nX]);
a.XTickLabel = xAxis(a.XTick);
nY = round(a.YLim(2)/data.dimInfo.nSamples(2));
yAxis = repmat(data.dimInfo.samplingPoints{2}, [1,nY]);
a.YTickLabel = yAxis(a.YTick);
% add additional resolution information
data.dimInfo.resolutions = dataLoad.dimInfo.resolutions;
f = data.plot;
a = f.CurrentAxes;
nX = round(a.XLim(2)/data.dimInfo.nSamples(1));
xAxis = repmat(data.dimInfo.samplingPoints{1}, [1,nX]);
a.XTickLabel = xAxis(a.XTick);
nY = round(a.YLim(2)/data.dimInfo.nSamples(2));
yAxis = repmat(data.dimInfo.samplingPoints{2}, [1,nY]);
a.YTickLabel = yAxis(a.YTick);
disp_centre_and_origin(data);
data.plot('plotType', 'spmi', 'overlayImages', dataRaw);
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%% 3. Add Shear
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% none of these options will affect matrix plot
data.affineTransformation.shear = [0.5 0 0];
data.plot('plotType', 'spmi', 'overlayImages', dataRaw);
disp_centre_and_origin(data);
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%% 4. Add Rotation
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
data.affineTransformation.shear = [0 0 0];
data.affineTransformation.rotation_deg = [0 30 0];
data.plot('plotType', 'spmi', 'overlayImages', dataRaw);
disp_centre_and_origin(data);
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%% 4a. Add Translation (offcentre_mm) in the affineTrafo
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
data.affineTransformation.offcenter_mm(3) = 10;
data.plot('plotType', 'spmi', 'overlayImages', dataRaw);
% world space origin is changed (but note, that, since the transformation is
% applied last, the origin changes in the two dimension which are affected by the rotation)
disp_centre_and_origin(data);
% but voxel space origin is maintained
disp(data.dimInfo.get_origin());
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%% 4b. Change translation in dimInfo
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% now the translation is applied first
data2 = data.select('t', 1).copyobj();
data2.affineTransformation.offcenter_mm(3) = -10;
data2.dimInfo.set_dims(3, 'firstSamplingPoint', data2.dimInfo.samplingPoints{3}(1) + 20);
data2.plot('plotType', 'spmi', 'overlayImages', data.select('t', 1));
disp_centre_and_origin(data2);
|
{-# OPTIONS -v impossible:10 #-}
-- Andreas, 2021-05-12, issue #5379
module ImpossibleVerboseReduceM where
-- Note: keep ReduceM as first word after IMPOSSIBLE!
{-# IMPOSSIBLE ReduceM should also print this debug message. #-}
-- Should print the text after IMPOSSIBLE and then raise an internal error.
|
\documentclass[11pt, letterpaper]{amsart}
\usepackage[
bindingoffset=0.2in,
left=1in,
right=1in,
top=1in,
bottom=1in,
footskip=0.25in]{geometry}
\usepackage{graphicx}
\usepackage{amssymb}
\usepackage{epstopdf}
\usepackage{subcaption}
\usepackage{placeins}
\usepackage{wrapfig}
\usepackage[htt]{hyphenat}
\usepackage{microtype}
\usepackage[backend=bibtex]{biblatex}
\addbibresource{writeup.bib}
\appto{\bibsetup}{\raggedright}
\usepackage{hyperref}
% Force figures to stay with text
\let\Oldsection\section
\renewcommand{\section}{\FloatBarrier\Oldsection}
\let\Oldsubsection\subsection
\renewcommand{\subsection}{\FloatBarrier\Oldsubsection}
\let\Oldsubsubsection\subsubsection
\renewcommand{\subsubsection}{\FloatBarrier\Oldsubsubsection}
\DeclareGraphicsRule{.tif}{png}{.png}{`convert #1 `dirname #1`/`basename #1 .tif`.png}
\graphicspath{{./notebooks/figs/}}
\title{STAT 215A, Final Project\\Classifying ciTBI in Youth}
\author{Florica Constantine, Hyunsuk Kim, Mark Oussoren, Sahil Saxena}
\email{\{florica, hyskim7, mark\_oussoren, sahilsaxena18\}@berkeley.edu}
\date{2021 December 10 Friday}
\begin{document}
\maketitle
\section{Introduction}
% How can we best vet and/or improve the clinical decision rule for your given problem? Most importantly, the clinical decision rule should be highly predictive and minimize the amount of missed diagnoses (i.e. have a very high sensitivity). It should also be easy-to-use, using variables that clinicians can readily have access to when making their decisions. Finally, the interpretability of the rule helps to check whether its predictions will make sense for new patients and makes it easier to apply in new settings.
Clinically-important traumatic brain injuries, hereafter referred to as ciTBIs, are both common in children and require immediate medical attention, as lack of care can lead to death or permanent disability. However, diagnoses often require a CT scan to confirm the presence of a TBI \cite{brenner2002estimating}. In children, this need is problematic, as the radiation from a CT scan can lead to long-term adverse affects\footnote{Per our clinical collaborators, we note that medical practitioners say that children are at greater risk for long-term adverse affects from radiation because they have a longer life expectancy (years left to live) than adults. I.e., their primary rationale is that of patients having longer to live, as opposed to something inherent different about childrens' reaction to radiation.}; hence, given a child presenting with a potential TBI, it is desirable to find a way to decide whether they actually need a CT scan balanced with the knowledge that forgoing a CT scan in the presence of an actual TBI is to be avoided. In this report, we revisit the data from \cite{kuppermann2009identification} and derive a new decision rule for identifying which child patients need a CT scan. That is, given details (data) of a patient's injury and condition, we translate these into numerical features that we may feed into a statistical model to predict the need for a CT scan.
In this report, we begin by discussing the data in section \ref{sec:data}. We discuss its collection in section \ref{ssec:datacollect} and some of its details in section \ref{ssec:datamean}. We perform a lengthy exploratory data analysis (EDA) in section \ref{ssec:datamean}. In section \ref{sec:model}, we fit models to the data and interpret the results. We discuss the baseline model from \cite{kuppermann2009identification} in section \ref{ssec:baseline} and results in section \ref{ssec:results}. We perform an analysis with an alternative data split (explained later) in section \ref{ssec:verbal}. Finally, we present conclusions in section \ref{sec:conclusions}.
\section{Data}\label{sec:data}
\subsection{Data collection} \label{ssec:datacollect}
% What are the most relevant data to collect to answer the question in (1)? Ideas from experimental design (a subfield of statistics) and active learning (a subfield of machine learning) are useful here. The above question is good to ask even if the data has already been collected because understanding the ideal data collection process might reveal shortcomings of the actual data collection process and shed light on analysis steps to follow. The questions below are useful to ask: How were the data collected? At what locations? Over what time period? Who collected them? What instruments were used? Have the operators and instruments changed over the period? Try to imagine yourself at the data collection site physically.
The authors in \cite{kuppermann2009identification} collected data in a prospective cohort study from 43,499 patients younger than 18 years of age that visited a hospital within 24 hours of experiencing head trauma. The study was run across 25 pediatric emergency departments over a span of approximately 2 years, where the last few months were used to collect samples for validating the decision rules derived in the original study. Only patients with GCS (Glasgow Coma Scale) scores of 14 or 15 were considered; those with scores 13 or less were enrolled but were not grouped with the others. For each patient, a trained investigator or other medical personnel recorded various prespecified details, e.g., mechanism of injury, medical history, and responses to standardized questions about the presence of several symptoms or signs of head trauma on a standardized data form.
For a small subset of patients (approximately 4\%), a second assessment was performed for quality control purposes--note that we do not use this information, but that its presence is reassuring. Note that there will likely be uncaught entry errors or errors arising from incorrect patient reporting in the data, as well as subjective biases in reporting (e.g., what constitutes a severe injury might differ among physicians and between physicians, parents, and child patients). All of these are sources of randomness in the data. Moreover, there will be natural differences arising from the different hospitals and the different populations that they serve---we are not privy to this information, but it is a source of potential batch effects. Nonetheless, we believe that apart from age groupings (discussed later), all of the data may be analyzed together---we cannot correct for this unavoidable randomness, and this is the best that we can do\footnote{Indeed, we might prefer having some noise in the training data, as it may in fact improve generalization performance of our models \cite{carlini2019evaluating}.}. Moreover, each sample in the data comes from a unique person-event combination, that is, there are no repeated samples or temporal dependencies that we know of. It is certainly possible that a patient is in the data twice, but we believe that this is likely a very rare event, if it occurs at all.
\subsection{Data Feature Meaning}\label{ssec:datamean}
% What does each variable mean in the data? What does it measure? Does it measure what it is supposed to measure? How could things go wrong? What statistical assumptions is one making by assuming things didn’t go wrong? (Knowing the data collection process helps here.) Meaning of each variable -- ask students to imagine being there at the ER and giving a Glasgow coma score, for example, and also a couple of variables -- ask students what could cause different values written down. How were the data cleaned? By whom?
In this section, we discuss some of the important variables and features in the dataset.
The study defined the following as a positive, ciTBI outcome: death from a TBI, a hospital admission of over 2 days following a diagnosis of TBI from a CT scan, intubation for over 24 hours due to head trauma, or the need for neurosurgery following a CT scan. Other patients were assigned to the negative outcome; to find missed positives, the study coordinators performed telephone surveys to follow up with parents and tracked followup visits. If a positive outcome was missed, the patient's label was updated to positive.
An important variable in this data set is the GCS score. The Glasgow Coma Scale (GCS) is a common scoring system used in emergency departments to determine a patient's level of consciousness by rating their ability to pass certain tests for eye and motor movement along with verbal ability \cite{teasdale2014glasgow}. The scores from each of these three categories are summed to form a total GCS score, valued in the range 3-15. The lower the score a patient has in each category leads to a lower GCS total score (meaning the worse a state a patient is in).
Note that several variables or descriptors in the study require the ability to converse with the child for assignment, e.g., the presence of a headache or whether or not the child is suffering from amnesia. Similarly, a GCS score for a pre-verbal child is also calculated by slightly different metrics than those for an adult. Judging verbal ability, especially, is different with a condition like `inappropriate words' being instead assessed as `cries of pain' for those children who are pre-verbal. Even motor ability has some conditions assessed differently such as looking for `spontaneous movement' rather than `follow commands' in preverbal children. Hence, as in \cite{kuppermann2009identification}, we chose to separate patients under the age of two (presumed pre-verbal) from those aged two or older (presumed verbal) in our analysis. Moreover, as children under the age of two are considered more at risk for long-term adverse effects from radiation, it is reasonable to consider this group separately \cite{brenner2002estimating}. Note that this is not a perfect grouping as some children will be verbal by age two and some children are not verbal after age 2 (we look into this in our stability analysis in section \ref{ssec:verbal}), but, nonetheless, it is good developmental benchmark \cite{blackwell2007pediatric}.
The variables in our data are all categorical or ordinal, except for age (however the categorial version of the age variable where it was discretized by $< 2$ years old and $\geq 2$ years old was used in all our analyses for the reasons stated above). While it would be ideal if instead the continuous version of the variables were reported and they were not pre-sorted into sometimes arbitrarily chosen categories (i.e. the length of a seizure is binned as $< 1$ min, $1 - 5$ min, $5 ‐ 15$ min, and $> 15$ min), we are restricted to the categorical data.
Several binary indicator variables exist in the dataset, looking at, respectively, whether a child suffered a loss of consciousness, seizure, headache, vomiting, altered mental state, palpable skull fracture, basilar skull fracture, hematoma, trauma above the clavicles, neurological deficits, or other (non-head) substantial injuries. Each of these variables also has more specific follow up questions, e.g. the type of basilar skull fracture if it is indicated a patient has one. Other important variables included in our data set are the injury mechanism, injury severity, and whether the child is acting normally, is intubated, is paralyzed, and/or is sedated.
Lastly, we also have several meta variables such as patient number, race, ethnicity, gender, position of medical professional, and certification of medical professional. These variables do not affect whether a patient will be positive for ciTBI. However, they may be useful to look at after our analyses are complete in case they are acting as a proxy for something deeper that is taking place but should not be used as feature inputs to our models.
\subsection{Exploratory Data Analysis} \label{ssec:eda}
\subsubsection{Outcome}
First, we looked at our outcome variable. Recall that the study defined the following as a positive, ciTBI outcome: death from a TBI, a hospital admission of over 2 days following a diagnosis of TBI from a CT scan, intubation for over 24 hours due to head trauma, or the need for neurosurgery following a CT scan. That is, the presence of any of the four sub-outcomes constituted a positive outcome. The lack of all four sub-outcomes constituted a negative outcome. In the data, we noted that there were 20 patients that had a missing value for the final outcome: this is a discrepancy with \cite{kuppermann2009identification}, wherein 18 rather than 20 patients have a missing value. This difference could not be resolved. Of these 20 patients, 17 of them are negative for all four of the sub-outcome variables making up our outcome. We thus assign these patients as being negative for a ciTBI. For the three remaining patients, they had missing values for one or more of the four outcomes and based on clinical guidance, were dropped. The proportion of each of the four sub-outcomes is shown below in Figure \ref{fig:outcome_type}. We can see that the vast majority of people were positive for a prolonged hospital stay.
\begin{figure}
\centering
\includegraphics[width=0.5\textwidth]{outcome_type.png}
\caption{Outcome type for ciTBI patients by age group}\label{fig:outcome_type}
\end{figure}
\subsubsection{GCS Scores}
From \cite{kuppermann2009identification}, we learned that it is not controversial to perform a CT scan for patients with a GCS score ranging from 3 to 13 as in this group the risk of finding a TBI on a CT is more than 20\%. For our data set, we looked at the proportion of patients positive for ciTBI with a GCS scores in the range of 3-13 and also for those in the range for 14-15 in Figure \ref{fig:GCSClass}. Looking at this, we can see that 40\% of patients with a GCS score in the range of 3 to 13 were positive for ciTBI versus only 0.8\% of those with a GCS score of 14 or 15---this is quite a dramatic difference. However, we wanted to know if separating the GCS score into classes with a cutoff GCS score of 14, in particular, was the best possible split. We broke up the previous plot further into individual GCS scores (Figure \ref{fig:GCSTotal}). We can see that, in general, the lower the GCS score the higher the proportion is for a patient to be positive for ciTBI---as expected. Even at a GCS of 13, 20\% of patients were positive for ciTBI. Thus, keeping the current cutoff of 3-13 and 14-15 as the two separate GCS classes seems reasonable. Hence, we remove any patients that have a GCS in the range of 3-13 (969 total patients), as the risk of having a positive ciTBI is too high and any reasonable or acceptable (to a practitioner) decision rule would suggest always performing a CT scan for this group.
\begin{figure}
\begin{minipage}[b]{0.5\linewidth}
\centering
\includegraphics[width=\textwidth]{GCSClass_prop.png}
\subcaption{Proportion of patients with ciTBI by age and GCS Class}\label{fig:GCSClass}
\end{minipage}%
\begin{minipage}[b]{0.5\linewidth}
\centering
\includegraphics[width=\textwidth]{GCSTotal_prop.png}
\subcaption{Proportion of patients with ciTBI by age and GCS Score}\label{fig:GCSTotal}
\end{minipage}
\caption{GCS Score and ciTBI}\label{fig:GCS}
\end{figure}
\subsubsection{Data Missingness}
Next, we look at the rate of missingness for each feature in Figure \ref{fig:cov_missing}. We note that the features `Dizzy' and `Ethnicity' are missing in more than 35\% of patients. On the data form, ethnicity asks whether the patient is hispanic or not and may potentially be skipped over by a patient if they fill in the race field instead (or if they are too young to fill out a form and the medical personnel does not want to guess). However, we are already considering ethnicity to be a meta variable and did not use it in our analyses. After speaking with the clinicians, we learned that notating whether a patient is dizzy or not is not very relevant in diagnosing TBI and it is also a very subjective variable: it is highly susceptible to change from patient to patient based on their own personal definition of feeling dizzy. Thus, as there is no objective way to compare or impute this variable, we decided to drop it.
For the other variables with missingness, they were either imputed with what a `healthy' response would be, e.g. a `No' value would be imputed for missing paralyzed or intubation values---this choice was based on clinical guidance, as in the view of practitioners, serious events like paralysis or intubation are unlikely to be left unnotated. Otherwise, the response that was the mode was used for variables where there was no clear `healthy' response. e.g. hematoma size. We note that, per Figure \ref{fig:cov_missing}, all features used in our analysis have a missingness under $10$\%, and hence we believe that imputation likely has a minor effect on our results---even if some values were actually positive (rather than negative or healthy).
Many variables have a parent question such as `Seiz' for seizure that have follow up question such as the length of the seizure. If a patient has a response of `No' for seizure then in the form `Not applicable' is often marked for each follow up question. Without loss of generality, we convert these `Not applicable' answers to be `No' to make analyses easier to perform.
We further note that the majority of patients have only around 1\% of data features missing, and are at maximum still under 20\% (Figure \ref{fig:sample_missingness}) and thus we do not drop any patients from our analyses.
\begin{figure}
\begin{minipage}[b]{0.5\linewidth}
\centering
\includegraphics[width=\textwidth]{covariate_missingness.png}
\subcaption{Fraction of samples missing a given feature}\label{fig:cov_missing}
\end{minipage}
\begin{minipage}[b]{0.5\linewidth}
\centering
\includegraphics[width=\textwidth]{sample_missingness.png}
\subcaption{Fraction of entries missing within a sample}\label{fig:sample_missingness}
\end{minipage}
\caption{Missingingness in the data}\label{fig:missing_agg}
\end{figure}
\subsubsection{Age Class Cutoff} \label{sssec:ageclass}
The age was a major factor in \cite{kuppermann2009identification} for creating a decision rule. Two rules were created based on age categories of $< 2$ and $\geq 2$ years of age. We can see a large portion of the patient population in our data set is younger and around 2 years of age in Figure \ref{fig:age_dist}.
\begin{figure}
\centering
\includegraphics[width=0.5\textwidth]{age_dist.png}
\caption{Age Distribution}\label{fig:age_dist}
\end{figure}
Besides radiation exposure risks, one reason to demarcate age at $2$ years is because of verbal ability: below this age, children typically do not talk coherently if at all. We wanted to check the number of pre-nonverbal subjects at each age to see if two years old is actually a good cutoff age for being verbal. In Figure \ref{fig:preverbal}, we can see that actually there are still a large proportion of subjects that are pre-verbal at ages 2, 3, and even 4 when calculated based on responses for whether a patient had a headache or amnesia in the data. Both of these features are the closest proxy we have to knowing how many pre-verbal patients are in our data set, as a binary variable for being pre-verbal does not exist. It is reassuring that the proportions between the two for each age are extremely similar.
\begin{figure}
\begin{minipage}[b]{0.5\linewidth}
\centering
\includegraphics[width=\textwidth]{amnesia_preverbal.png}
\subcaption{Preverbal response to Amnesia question}\label{fig:amnesia_preverbal}
\end{minipage}%
\begin{minipage}[b]{0.5\linewidth}
\centering
\includegraphics[width=\textwidth]{headache_preverbal.png}
\subcaption{Preverbal response to Headache question}\label{fig:headache_preverbal}
\end{minipage}
\caption{Preverbal responses to Amnesia and Headache questions}\label{fig:preverbal}
\end{figure}
We will revisit this division in section \ref{ssec:verbal}, where we explore the results of fitting models for different age splits.
\subsubsection{Distribution of Features by Age} \label{sssec:agefeatures}
Next, we look at the occurrence of ciTBI in each of our two age categories in Figure \ref{fig:age_by_outcome}. We can see that the proportion of ciTBI in each age category is very close to being the same. The proportions looking at injury severity in Figure \ref{fig:age_by_injury_severity} are similar across age category.
\begin{figure}
\begin{minipage}[b]{0.5\linewidth}
\centering
\includegraphics[width=\textwidth]{age_comparison_by_group.png}
\subcaption{Proportion of outcomes grouped by age}\label{fig:age_by_outcome}
\end{minipage}%
\begin{minipage}[b]{0.5\linewidth}
\centering
\includegraphics[width=\textwidth]{age_by_injuryseverity.png}
\subcaption{Proportion of injury severity grouped by age}\label{fig:age_by_injury_severity}
\end{minipage}
\caption{Outcome and Injury Severity}\label{fig:age_distributions}
\end{figure}
However, there may still be other variables with different proportions of positive ciTBI across age categories in Figure \ref{fig:age_covariate}. That is, for each age category and for each outcome, we look at the proportion of patients with the indicated symptom. This exercise might be indicative as to whether such a variable would potentially lead to a different decision rule between the two groups. We can see that the proportion of patients with ciTBI are noticeably different between age $<2$ and age $\geq 2$ for 'Vomit' and 'OSI' (other non-head injury). Also, we note that the variables measuring amnesia and headache cannot be answered by those that are pre-verbal and thus may be useful in a decision rule for those over age 2 but not under.
\begin{figure}
\centering
\includegraphics[width=\textwidth]{covariate_by_age.png}
\caption{Proportion of positive feature identifiers by outcome (ciTBI) and age}\label{fig:age_covariate}
\end{figure}
\subsubsection{Correlation of Features to Outcome}
Next, we examine whether any of the features are particularly correlated to the outcome by calculating the Spearman's $\rho$ coefficient on the ordinal variables against the binary outcome. We can see in Figure \ref{fig:spearman_corr_to_outcome} that none of the features are well correlated with the outcome. A maximum correlation coefficient of 0.12 is attained by the altered mental state feature.
\begin{figure}
\centering
\includegraphics[width=0.5\textwidth]{spearman_corr_to_outcome.png}
\caption{Spearman Correlation of Features to Outcome}\label{fig:spearman_corr_to_outcome}
\end{figure}
\subsubsection{Principal Component Analysis}
We perform Principal Component Analysis (PCA) on the one-hot encoded data \cite{pearson1901liii}. In Figure \ref{fig:pca_cum_var}, we see that the nearly all of the variance is explained by the first 100 components, and that the first few components capture most of the variance (the first two components explain 13\%, the first five explain 30\%, and the first twenty explain 50\% of variation in the data). That is, noting that the PCA eigenvalues (variances) decay rapidly, we might believe that this dataset behaves like a low-rank signal plus noise.
In Figure \ref{fig:pca}, we project the one-hot encoded data to two dimensions to study if the classes (age and outcome) are visually separable. First, we see that the classes do not separate, but that there are two distinct clusters in the data---since the data were taken from 25 hospitals and that there was no laboratory or experimental processing of the data, we did not suspect a batch effect (there would only be two potential batches). Instead, we see that the presence of an OSI (other, non-head-related injury) leads to the two clusters. Note that the prevalence of OSI in the data is low (10\%), but that it is enough to strongly affect the results of PCA. We will keep this in mind when doing our analyses and look to see if the majority of the misclassified points come from patients who had an OSI injury. This means we may want to consider forming a separate decision rule for this subgroup of the patient population.
\begin{figure}
\centering
\includegraphics[width=\textwidth]{pca_cum_var.png}
\caption{PCA: Cumulative Variance Explained and PCA Eigenvalues; the second row is a zoomed-in version of the top row. }\label{fig:pca_cum_var}
\end{figure}
\begin{figure}
\begin{minipage}[b]{0.5\linewidth}
\centering
\includegraphics[width=\textwidth]{pca_age_outcome.png}
\subcaption{2-dimensional PCA projection colored by Age and Outcome}\label{fig:pca_age_outcome}
\end{minipage}%
\begin{minipage}[b]{0.5\linewidth}
\centering
\includegraphics[width=\textwidth]{pca_osi.png}
\subcaption{2-dimensional PCA projection colored by OSI}\label{fig:pca_osi}
\end{minipage}
\caption{2-dimensional PCA projection and two natural clusters}\label{fig:pca}
\end{figure}
\section{Modeling: Results and Discussion}\label{sec:model}
In this section, we fit models to predict whether patients have a ciTBI outcome from their covariates. We consider several models: $\ell_1$-penalized logistic regression \cite{simon2013sparse}, group $\ell_1$-penalized logistic regression \cite{simon2013sparse}, a single decision tree \cite{safavian1991survey}, a random forest \cite{ho1995random}, AdaBoost \cite{schapire2013explaining}, LogitBoost \cite{cai2006using}, and a linear SVM \cite{boser1992training}. All of these models were chosen for their relative ease of implementation and interpretability. We focus on the metric of sensitivity as, per our clinical collaborators, clinicians will only accept or trust decision rules with $\sim 0.1-1$\% missed positives (ciTBI cases) in children, and on NPV as we want to minimize unneeded CT scans. Note that minimizing unneeded CT scans minimizes unneeded exposure to long-term adverse effects from radiation, and also increases patient throughput and hospital efficiency. As all of these algorithms have one or more parameters that can be tuned, based on practitioner feedback, we chose to operate at a point wherein the sensitivity was at least $0.99$ (or as close to it as possible) and the negative predictive value (NPV) was as close to $1$ as possible. However, we found that operating at a point corresponding to $0.99$ validation sensitivity lead to poor generalization on the test set (to be discussed later), and present results corresponding to a validation sensitivity of $0.95$. We also present results corresponding to a validation sensitivity of $0.99$ for comparison.
We continue with the data split of age $< 2$ and $\geq 2$ and fit models for each group; we also fit models for the entire, unsplit dataset. The unsplit models can be thought of as a stability and reality check for whether the age-based demarcation is significant and necessary, as our exploratory data analysis in sections \ref{sssec:ageclass} and \ref{sssec:agefeatures} indicate that it may not be. In section \ref{ssec:verbal}, we study the effect of a different data split (pre-verbal v. verbal) and compare the results with the age-$2$ split.
\subsection{Baseline Model} \label{ssec:baseline}
Before proceeding, we briefly describe the model from \cite{kuppermann2009identification}, hereafter referred to as the baseline model. The model consists of two decision trees: one for patients with an age under $2$ years, and one for patients aged two years or older. The tree for patients younger than $2$ consists of questions about the presence of: an altered mental status (AMS), scalp hematoma, loss of consciousness for greater than $5$ seconds, a severe cause of injury, a palpable or possible skull fracture, and abnormal behavior per a parent. The questions are asked in the given order, and if all answers are `no', no scan is recommended. For patients older than $2$, the tree consists of questions about the presence of: an altered mental status (AMS), loss of consciousness, a history of vomiting, a severe cause of injury, a palpable or possible skull fracture, signs of a basilar skull fracture, and the presence of a severe headache. Once again, all `no' answers leads to no scan.
\subsection{Results} \label{ssec:results}
In Table \ref{tab:val_performance}, we present results for all of the algorithms on the validation set. That is, we trained all algorithms on the training set for a wide variety of parameters (if there were any), selected an appropriate operating point/threshold as described above on the validation set, and have summarized the selected operating points for each algorithm. We see that $\ell_1$-penalized Logistic Regression has the highest AUC while having a sensitivity close to $0.95$ and an NPV close to $1$. Moreover, relative to other algorithms with similar characteristics (e.g., the Group $\ell_1$-penalized Logistic Regression and AdaBoost), we see that the specificity is much higher. Hence, we selected $\ell_1$-penalized Logistic Regression as our `best' method. Moreover, we see that this method performs better than the baseline algorithm: we have better validation AUCs as well as better specificities and comparable NPVs at similar sensitivities. In general, we see that the models trained on the unsplit (by age) data perform slightly worse than both of the models trained on the individual halves.
\begin{table}[h]
\resizebox{\textwidth}{!}{%
\begin{tabular}{llllllll}
Algorithm & Age & AUC & Accuracy & Sensitivity & Specificity & NPV & Balanced Accuracy \\
\hline
$\ell_1$-penalized Logistic Regression & young & 0.938 & 0.764 & 1.0 & 0.762 & 1.0 & 0.881 \\
$\ell_1$-penalized Logistic Regression & old & 0.931 & 0.751 & 0.957 & 0.75 & 1.0 & 0.854 \\
$\ell_1$-penalized Logistic Regression & all & 0.917 & 0.75 & 0.952 & 0.748 & 0.999 & 0.85 \\
Group $\ell_1$-penalized Logistic Regression & young & 0.908 & 0.728 & 1.0 & 0.726 & 1.0 & 0.863 \\
Group $\ell_1$-penalized Logistic Regression & old & 0.917 & 0.68 & 0.957 & 0.678 & 1.0 & 0.818 \\
Group $\ell_1$-penalized Logistic Regression & all & 0.917 & 0.745 & 0.952 & 0.743 & 0.999 & 0.848 \\
AdaBoost & young & 0.781 & 0.064 & 1.0 & 0.058 & 1.0 & 0.529 \\
AdaBoost & old & 0.872 & 0.25 & 0.957 & 0.245 & 0.999 & 0.601 \\
AdaBoost & all & 0.899 & 0.59 & 0.952 & 0.587 & 0.999 & 0.77 \\
LogitBoost & young & 0.825 & 0.889 & 0.714 & 0.891 & 0.998 & 0.802 \\
LogitBoost & old & 0.814 & 0.213 & 0.957 & 0.208 & 0.998 & 0.583 \\
LogitBoost & all & 0.746 & 0.198 & 0.952 & 0.191 & 0.998 & 0.572 \\
Decision Tree & young & 0.898 & 0.118 & 0.929 & 0.113 & 0.996 & 0.521 \\
Decision Tree & old & 0.875 & 0.724 & 0.957 & 0.722 & 1.0 & 0.84 \\
Decision Tree & all & 0.809 & 0.01 & 0.988 & 0.0 & 0.75 & 0.494 \\
Random Forest & young & 0.815 & 0.844 & 0.714 & 0.845 & 0.998 & 0.779 \\
Random Forest & old & 0.889 & 0.796 & 0.894 & 0.795 & 0.999 & 0.844 \\
Random Forest & all & 0.845 & 0.816 & 0.798 & 0.816 & 0.998 & 0.807 \\
Linear SVM & young & 0.275 & 0.014 & 1.0 & 0.008 & 1.0 & 0.504 \\
Linear SVM & old & 0.645 & 0.057 & 0.957 & 0.051 & 0.994 & 0.504 \\
Linear SVM & all & 0.644 & 0.063 & 0.952 & 0.054 & 0.991 & 0.503 \\
Baseline & young & 0.903 & 0.545 & 1.0 & 0.542 & 1.0 & 0.771 \\
Baseline & old & 0.869 & 0.615 & 0.957 & 0.613 & 0.999 & 0.785
\end{tabular} }
\caption{Algorithm performance on validation data for each data split}\label{tab:val_performance}
\end{table}
We note that the group $\ell_1$-penalized Logistic Regression performed almost as well as the $\ell_1$-penalized Logistic Regression. However, this method is extremely sensitive to the regularization parameter, and we suspect that slight changes in the data used for training would lead to vastly different results. This result is unfortunate, as in principle, the grouping would allow us to enforce sparsity across a group of covariates (e.g., everything vomit related) and hence improve interpretability. We note that the decision tree also performed well (recall that the baseline model is also a decision tree), but that the logistic regression was better. Also, decision trees can heavily depend on the training data in ways that regression models do not: they are more complex models and create multiple decision boundaries whereas logistic regression creates only one. The random forest and boosted models (AdaBoost and LogitBoost) do not perform as well; we noticed that the performance was not linear in the number of trees, and conjecture that there may be some degree of overfitting on the training data. Either way, an ensemble model is naturally harder to interpret than a linear model. The SVM performed quite poorly, in contrast---it is somewhat surprising that a logistic regression method performs well where a linear SVM does not, but we chose not to investigate further given time and space constraints.
Hence, we summarize results for $\ell_1$-regularized Logistic Regression and the baseline models on the test set in Table \ref{tab:test_performance}. We found that the regularization parameter for both the young and old models was $\approx 0.336$ and that the parameter for the model trained on the unsplit data was $\approx 1.129$. We see that the test sensitivity is close to $0.95$ and that the NPV is still close to $1$, and that the AUC is close to $0.85$; these numbers are a slight drop from the validation results, but are still good---in particular, they are comparable to the baseline model, with a better specificity. Once again, we see that the model trained on the unsplit (by age) data performs slightly worse than both of the models trained on the individual halves. Recalling Figure \ref{fig:pca}, wherein we saw a significant clustering effect based on OSI, we note that OSI and classification accuracy are not well correlated.
\begin{table}[h]
\resizebox{\textwidth}{!}{%
\begin{tabular}{llllllll}
Algorithm & Age & AUC & Accuracy & Sensitivity & Specificity & NPV & Balanced Accuracy \\
\hline
$\ell_1$-penalized Logistic Regression & young & 0.846 & 0.772 & 0.923 & 0.77 & 0.999 & 0.846 \\
$\ell_1$-penalized Logistic Regression & old & 0.848 & 0.762 & 0.937 & 0.76 & 0.999 & 0.848 \\
$\ell_1$-penalized Logistic Regression & all & 0.822 & 0.794 & 0.85 & 0.793 & 0.999 & 0.822 \\
Baseline & young & 0.875 & 0.554 & 1.0 & 0.549 & 1.0 & 0.774 \\
Baseline & old & 0.873 & 0.639 & 0.937 & 0.636 & 0.999 & 0.786
\end{tabular}}
\caption{Algorithm performance on test data for each data split}\label{tab:test_performance}
\end{table}
\subsection{Discussion of the $\ell_1$-regularized Logistic Regression model} \label{ssec:discuss}
In this section, we provide some insights from studying the logistic regression models that we have fit. We note that this form of model is a good choice, as it is naturally interpretable: the $\ell_1$-penalization leads to naturally sparse coefficient vectors, so that only a subset of features are used in prediction. The sparsity combined with the linear nature of the classifier means that the coefficients' magnitudes have meaning (our data is one-hot encoded, so each coefficient is easily interpretable as a contribution of that feature), and the form of the model means that the odds ratio is a linear function of the data---this model is hence easy to use.
In Figure \ref{fig:lr_ft_young}, \ref{fig:lr_ft_old}, and \ref{fig:lr_ft_all}, we provide computed feature importances from the model. That is, we report the magnitude of coefficients times the standard deviation (on the validation set) of the features. We see that across all of the data splits, AMS (altered mental state) is an important variable, as are various features related to vomit, hematoma location, and loss of consciousness.
\begin{figure}
\centering
\includegraphics[width=0.7\textwidth]{lr_feat_importance_young.png}
\caption{Feature importances from the $\ell_1$-regularized Logistic Regression for young patients}\label{fig:lr_ft_young}
\end{figure}
\begin{figure}
\centering
\includegraphics[width=\textwidth]{lr_feat_importance_old.png}
\caption{Feature importances from the $\ell_1$-regularized Logistic Regression for older patients}\label{fig:lr_ft_old}
\end{figure}
\begin{figure}
\centering
\includegraphics[width=\textwidth]{lr_feat_importance_all.png}
\caption{Feature importances from the $\ell_1$-regularized Logistic Regression for all patients}\label{fig:lr_ft_all}
\end{figure}
In Figure \ref{fig:lr_roc}, we present ROC curves for the $\ell_1$-regularized Logistic Regression for the validation and the test data. We see that all of the logistic regression models are better than the baseline model, and that the ROC curves are far from the 45-degree line.
\begin{figure}
\begin{minipage}[b]{0.5\linewidth}
\centering
\includegraphics[width=\textwidth]{lr_val_roc.png}
\subcaption{ROC curve on the validation data for the $\ell_1$-regularized Logistic Regression model}\label{fig:lr_val_roc}
\end{minipage}%
\begin{minipage}[b]{0.5\linewidth}
\centering
\includegraphics[width=\textwidth]{lr_test_roc.png}
\subcaption{ROC curve on the test data for the $\ell_1$-regularized Logistic Regression model}\label{fig:lr_test_roc}
\end{minipage}
\caption{ROC curves for the $\ell_1$-regularized Logistic Regression model}\label{fig:lr_roc}
\end{figure}
Finally, in Figure \ref{fig:lr_lambda_auc}, we study the validation AUC as a function of the regularization strength. We see that in a neighborhood of the chosen value ($[10^{-1}, 10^{1/2}]$), the AUC is relatively stable and high. That is, perturbations to the regularization parameter or not searching on a fine enough grid are not concerns in our analysis.
\begin{figure}
\centering
\includegraphics[width=0.7\textwidth]{lr_auc_lambda.png}
\caption{Validation AUC for the $\ell_1$-regularized Logistic Regression as a function of regularization strength}\label{fig:lr_lambda_auc}
\end{figure}
\subsubsection{Model stability}
In this section, we report the results from bootstrapping the training dataset and evaluating the bootstrapped logistic regression models on the validation set. We measure the validation AUC as well as track which regularization parameter led to the best model. Ideally, we would see a tight concentration close to and around the previously observed values. As this step involves a parameter sweep, we used 20 bootstrap samples of size equal to a quarter of the original dataset and searched over 20 values of the regularization parameter. Our results appear in Figure \ref{fig:lr_bootstrap_val}, where we see that all of the AUCs have relatively tight concentration around the previously observed values. Moreover, the chosen regularization parameters are still close to the values picked for the chosen model. We present the mean and standard deviation of the bootstrapped validation AUCs in Table \ref{tab:bs_auc}: the values are consistent with what we saw earlier on the full dataset.
\begin{table}[h]
\begin{tabular}{llll}
Age & Mean AUC & Standard Deviation of AUC & Model AUC (full data) \\
\hline
Young & 0.909 & 0.0172 & 0.938 \\
Old & 0.905 & 0.0124 & 0.931 \\
All & 0.892 & 0.009933 & 0.917
\end{tabular}
\caption{Bootstrapped mean and standard deviation of validation AUCs for the $\ell_1$-regularized Logistic Regression model; note that the 20 bootstrapped datasets were of smaller size (one quarter) than the original data.}\label{tab:bs_auc}
\end{table}
\begin{figure}
\centering
\includegraphics[width=0.7\textwidth]{lr_bootstrap_val.png}
\caption{Bootstrapped validation AUC for the $\ell_1$-regularized Logistic Regression and the corresponding regularization parameters. Recall that the young and old models previously had parameters of $0.336$ and the all model had a parameter value of $1.129$.}\label{fig:lr_bootstrap_val}
\end{figure}
For comparison, we present bootstrapped AUCs for the baseline model on the validation dataset. We report results from 50 bootstrapped datasets with sample sizes equal to that of the original dataset. We present our results in Table \ref{tab:bs_auc_baseline} and Figure \ref{fig:baseline_bootstrap_val}: the values are consistent with what we saw earlier on the full dataset.
\begin{table}[h]
\begin{tabular}{llll}
Age & Mean AUC & Standard Deviation of AUC & Model AUC (full data) \\
\hline
Young & 0.870 & 0.0211 & 0.903 \\
Old & 0.902 & 0.0191 & 0.869
\end{tabular}
\caption{Bootstrapped mean and standard deviation of validation AUCs for the baseline model}\label{tab:bs_auc_baseline}
\end{table}
\begin{figure}
\centering
\includegraphics[width=0.7\textwidth]{baseline_bootstrap_val.png}
\caption{Bootstrapped validation AUC for the baseline model.}\label{fig:baseline_bootstrap_val}
\end{figure}
\subsubsection{A different threshold}
In this section, we look at the effect of setting the operating point based on a different threshold. Elsewhere in this report, we choose an operating point such that the validation sensitivity is as close to $0.95$ as possible. Here, we look at choosing an operating point such that the sensitivity is as close to $0.99$ as possible. This choice is motivated by discussions we had with our clinical practitioner contacts.
In Table \ref{tab:val_performance_99}, we present results on the validation data. These results are comparable to those with the previous threshold, though the accuracy on the all model is slightly worse. In Table \ref{tab:test_performance_99}, we present results on the test data. These results are noticeably worse, especially for the old and all models: we conjecture that the high sensitivity constraints set on the validation data lead to poor generalization on the test data, especially given the low prevalence of ciTBI cases in the data (0.8\%).
\begin{table}[h]
\resizebox{\textwidth}{!}{%
\begin{tabular}{llllllll}
Algorithm & Age & AUC & Accuracy & Sensitivity & Specificity & NPV & Balanced Accuracy \\
\hline
$\ell_1$-penalized Logistic Regression & young & 0.938 & 0.763 & 1.000 & 0.761 & 1.000 & 0.881 \\
$\ell_1$-penalized Logistic Regression & old & 0.931 & 0.0683 & 1.000 & 0.0614 & 1.000 & 0.531 \\
$\ell_1$-penalized Logistic Regression & all & 0.917 & 0.0685 & 1.000 & 0.0593 & 1.000 & 0.530
\end{tabular}}
\caption{Algorithm performance on validation data for each data split with a differently set threshold (0.99 sensitivity)}\label{tab:val_performance_99}
\end{table}
\begin{table}[h]
\resizebox{\textwidth}{!}{%
\begin{tabular}{llllllll}
Algorithm & Age & AUC & Accuracy & Sensitivity & Specificity & NPV & Balanced Accuracy \\
\hline
$\ell_1$-penalized Logistic Regression & young & 0.846 & 0.771 & 0.923 & 0.769 & 0.998 & 0.866 \\
$\ell_1$-penalized Logistic Regression & old & 0.530 & 0.0703 & 1.000 & 0.0610 & 1.000 & 0.530 \\
$\ell_1$-penalized Logistic Regression & all & 0.587 & 0.197 & 0.983 & 0.191 & 0.999 & 0.587
\end{tabular}}
\caption{Algorithm performance on test data for each data split with a differently set threshold (0.99 sensitivity)}\label{tab:test_performance_99}
\end{table}
\subsection{A different data split: Pre-verbal v. verbal} \label{ssec:verbal}
In this section, we return to Figure \ref{fig:preverbal}, where we saw that many patients under the age of $5$ were pre-verbal, but the standard practice is to separate subjects before and after the age of $2$. Moreover, there were relatively few patients over the age of $5$ that were not verbal (104 subjects in validation), so we conjecture that a rule split based on pre-verbal v. verbal status might be a better choice. Using the same train/validation/test splits, we re-trained our $\ell_1$-regularized Logistic Regression models.
In Figure \ref{fig:lr_roc_vb}, we present ROC curves for the $\ell_1$-regularized Logistic Regression for the validation and the test data with the new data split. We see that all of the logistic regression models are better than the baseline model, and that the ROC curves are far from the 45-degree line, but that this split leads to slightly worse performance than the original division at the age of $2$. Nonetheless, we believe that this or similar data splits merit further investigation: there are children under the age of two that are verbal and can hence communicate their mental status, but there are also those over the age of two that are not and hence cannot communicate. Per our clinical collaborators, childrens' verbal abilities fall along a spectrum, especially before the age of 5, and it is hence difficult to separate patients based on age.
\begin{figure}
\begin{minipage}[b]{0.5\linewidth}
\centering
\includegraphics[width=\textwidth]{lr_val_roc_verbal.png}
\subcaption{ROC curve on the validation data for the $\ell_1$-regularized Logistic Regression model}\label{fig:lr_val_roc_vb}
\end{minipage}%
\begin{minipage}[b]{0.5\linewidth}
\centering
\includegraphics[width=\textwidth]{lr_test_roc_verbal.png}
\subcaption{ROC curve on the test data for the $\ell_1$-regularized Logistic Regression model}\label{fig:lr_test_roc_vb}
\end{minipage}
\caption{ROC curves for the $\ell_1$-regularized Logistic Regression model with the pre-verbal/verbal split}\label{fig:lr_roc_vb}
\end{figure}
\section{Conclusions} \label{sec:conclusions}
In this report, we have looked at patient data from a prospective cohort study wherein patients between the ages of 0-17 who visited one of a series of hospitals presenting with a potential TBI were enrolled. We were given data from patient questionnaires that was converted to numerical features and fit models to predict the need for a CT scan. We found that an $\ell_1$-penalized logistic regression model performed the best and was an improvement (better AUC, and comparable NPV and a higher specificity at similar sensitivities) on the model derived in \cite{kuppermann2009identification}. Interestingly, the model trained on all of the data (unsplit by age) is only marginally worse (in terms of the AUC and other performance statistics) than the individual models trained on each age group: we are not surprised by this, based off our exploratory data analysis. Importantly, our logistic regression model (like the baseline model) can be computed quickly, as it is a simple, linear model. Hence, our model, like the baseline model, can be implemented easily on medical devices to help in practitioners' decision making.
There are many threads left unfinished in this work, mostly because of time and space constraints. First, it would be interesting to systematically investigate whether a better age cutoff (in terms of model performance) could be obtained, or whether an age cutoff combined with some other factor (like pre-verbal v. verbal) would be better. Additionally, it would be good to compare additional models and to investigate the performance and utility of some model explainability techniques, like LIME \cite{ribeiro2016should} or SHAP \cite{vstrumbelj2014explaining}. Finally, it would be important and interesting to understand why performance at an operating point of $0.99$ validation sensitivity generalizes poorly, and if there is a better classifier or method that would lead to better results at this operating point. We believe that a larger dataset with more positive samples (more ciTBI cases) would be helpful in this task.
\subsection{Division of Labor}
Hyunsuk Kim contributed to the exploratory data analysis, implemented the $\ell_1$-penalized logistic regression, grouped $\ell_1$-penalized logistic regression, consolidated everyone's model code into one larger wrapper function, worked on the baseline model and on implementing \texttt{baseline.py}, edited and coded functions to find the statistical metrics saved by each of the models, and offered comments on the final report.
Mark Oussoren contributed to much of the exploratory data analysis, did most of the data processing and implementation of dataset.py, worked on coding the baseline model, summarized his findings for EDA and the baseline model, documented the judgement calls made working with this data set, contributed to the \texttt{data\_dictionary.md} file, and offered comments on the final report.
Sahil Saxena created a slide deck to share with our clinician contact, implemented an SVM model and looked at the effect of different kernels on SVM performance, explored 3D visualizations of SVM and Logistic Regression on the first few principal components of our data, compiled the \texttt{data\_dictionary.md} file, wrote much of the README file, and offered comments on the final report.
Florica Constantine acted as the project lead overseeing the project, editing others results across all aspects of the project, and also individually worked on much of the exploratory data analysis, implemented the boosting models, implemented the stability analysis, implemented \texttt{model\_best.py} and some of \texttt{baseline.py}, documented judgement calls, contributed to the README file, and wrote the final report.
\subsection{Acknowledgements}
We would like to thank Dr. Aaron Kornblith and Nathan Velarde for their time and guidance, especially in answering our questions about clinical practices and the data.
\printbibliography
\end{document}
|
[GOAL]
Ω : Type u_1
F : Type u_2
m mΩ : MeasurableSpace Ω
μ : Measure Ω
f : Ω → F
inst✝ : TopologicalSpace F
hm : m ≤ mΩ
hf : AEStronglyMeasurable f μ
⊢ AEStronglyMeasurable (fun x => f x.snd) (Measure.map (fun ω => (id ω, id ω)) μ)
[PROOFSTEP]
rw [← aestronglyMeasurable_comp_snd_map_prod_mk_iff (measurable_id'' hm)] at hf
[GOAL]
Ω : Type u_1
F : Type u_2
m mΩ : MeasurableSpace Ω
μ : Measure Ω
f : Ω → F
inst✝ : TopologicalSpace F
hm : m ≤ mΩ
hf : AEStronglyMeasurable (fun x => f x.snd) (Measure.map (fun ω => (id ω, ω)) μ)
⊢ AEStronglyMeasurable (fun x => f x.snd) (Measure.map (fun ω => (id ω, id ω)) μ)
[PROOFSTEP]
simp_rw [id.def] at hf ⊢
[GOAL]
Ω : Type u_1
F : Type u_2
m mΩ : MeasurableSpace Ω
μ : Measure Ω
f : Ω → F
inst✝ : TopologicalSpace F
hm : m ≤ mΩ
hf : AEStronglyMeasurable (fun x => f x.snd) (Measure.map (fun ω => (ω, ω)) μ)
⊢ AEStronglyMeasurable (fun x => f x.snd) (Measure.map (fun ω => (ω, ω)) μ)
[PROOFSTEP]
exact hf
[GOAL]
Ω : Type u_1
F : Type u_2
m mΩ : MeasurableSpace Ω
μ : Measure Ω
f : Ω → F
inst✝ : NormedAddCommGroup F
hm : m ≤ mΩ
hf : Integrable f
⊢ Integrable fun x => f x.snd
[PROOFSTEP]
rw [← integrable_comp_snd_map_prod_mk_iff (measurable_id'' hm)] at hf
[GOAL]
Ω : Type u_1
F : Type u_2
m mΩ : MeasurableSpace Ω
μ : Measure Ω
f : Ω → F
inst✝ : NormedAddCommGroup F
hm : m ≤ mΩ
hf : Integrable fun x => f x.snd
⊢ Integrable fun x => f x.snd
[PROOFSTEP]
simp_rw [id.def] at hf ⊢
[GOAL]
Ω : Type u_1
F : Type u_2
m mΩ : MeasurableSpace Ω
μ : Measure Ω
f : Ω → F
inst✝ : NormedAddCommGroup F
hm : m ≤ mΩ
hf : Integrable fun x => f x.snd
⊢ Integrable fun x => f x.snd
[PROOFSTEP]
exact hf
[GOAL]
Ω : Type u_1
F : Type u_2
inst✝⁴ : TopologicalSpace Ω
m mΩ : MeasurableSpace Ω
inst✝³ : PolishSpace Ω
inst✝² : BorelSpace Ω
inst✝¹ : Nonempty Ω
μ : Measure Ω
inst✝ : IsFiniteMeasure μ
ω : Ω
⊢ ↑(condexpKernel μ m) ω = ↑(condDistrib id id μ) (id ω)
[PROOFSTEP]
simp_rw [condexpKernel, kernel.comap_apply]
[GOAL]
Ω : Type u_1
F : Type u_2
inst✝⁴ : TopologicalSpace Ω
m mΩ : MeasurableSpace Ω
inst✝³ : PolishSpace Ω
inst✝² : BorelSpace Ω
inst✝¹ : Nonempty Ω
μ : Measure Ω
inst✝ : IsFiniteMeasure μ
⊢ IsMarkovKernel (condexpKernel μ m)
[PROOFSTEP]
simp only [condexpKernel]
[GOAL]
Ω : Type u_1
F : Type u_2
inst✝⁴ : TopologicalSpace Ω
m mΩ : MeasurableSpace Ω
inst✝³ : PolishSpace Ω
inst✝² : BorelSpace Ω
inst✝¹ : Nonempty Ω
μ : Measure Ω
inst✝ : IsFiniteMeasure μ
⊢ IsMarkovKernel (kernel.comap (condDistrib id id μ) id (_ : Measurable id))
[PROOFSTEP]
infer_instance
[GOAL]
Ω : Type u_1
F : Type u_2
inst✝⁵ : TopologicalSpace Ω
m mΩ : MeasurableSpace Ω
inst✝⁴ : PolishSpace Ω
inst✝³ : BorelSpace Ω
inst✝² : Nonempty Ω
μ : Measure Ω
inst✝¹ : IsFiniteMeasure μ
inst✝ : NormedAddCommGroup F
f : Ω → F
s : Set Ω
hs : MeasurableSet s
⊢ Measurable fun ω => ↑↑(↑(condexpKernel μ m) ω) s
[PROOFSTEP]
simp_rw [condexpKernel_apply_eq_condDistrib]
[GOAL]
Ω : Type u_1
F : Type u_2
inst✝⁵ : TopologicalSpace Ω
m mΩ : MeasurableSpace Ω
inst✝⁴ : PolishSpace Ω
inst✝³ : BorelSpace Ω
inst✝² : Nonempty Ω
μ : Measure Ω
inst✝¹ : IsFiniteMeasure μ
inst✝ : NormedAddCommGroup F
f : Ω → F
s : Set Ω
hs : MeasurableSet s
⊢ Measurable fun ω => ↑↑(↑(condDistrib id id μ) (id ω)) s
[PROOFSTEP]
refine Measurable.mono ?_ (inf_le_left : m ⊓ mΩ ≤ m) le_rfl
[GOAL]
Ω : Type u_1
F : Type u_2
inst✝⁵ : TopologicalSpace Ω
m mΩ : MeasurableSpace Ω
inst✝⁴ : PolishSpace Ω
inst✝³ : BorelSpace Ω
inst✝² : Nonempty Ω
μ : Measure Ω
inst✝¹ : IsFiniteMeasure μ
inst✝ : NormedAddCommGroup F
f : Ω → F
s : Set Ω
hs : MeasurableSet s
⊢ Measurable fun ω => ↑↑(↑(condDistrib id id μ) (id ω)) s
[PROOFSTEP]
convert measurable_condDistrib (μ := μ) hs
[GOAL]
case h.e'_3
Ω : Type u_1
F : Type u_2
inst✝⁵ : TopologicalSpace Ω
m mΩ : MeasurableSpace Ω
inst✝⁴ : PolishSpace Ω
inst✝³ : BorelSpace Ω
inst✝² : Nonempty Ω
μ : Measure Ω
inst✝¹ : IsFiniteMeasure μ
inst✝ : NormedAddCommGroup F
f : Ω → F
s : Set Ω
hs : MeasurableSet s
⊢ m ⊓ mΩ = MeasurableSpace.comap id (m ⊓ mΩ)
[PROOFSTEP]
rw [MeasurableSpace.comap_id]
[GOAL]
Ω : Type u_1
F : Type u_2
inst✝⁷ : TopologicalSpace Ω
m mΩ : MeasurableSpace Ω
inst✝⁶ : PolishSpace Ω
inst✝⁵ : BorelSpace Ω
inst✝⁴ : Nonempty Ω
μ : Measure Ω
inst✝³ : IsFiniteMeasure μ
inst✝² : NormedAddCommGroup F
f : Ω → F
inst✝¹ : NormedSpace ℝ F
inst✝ : CompleteSpace F
hf : AEStronglyMeasurable f μ
⊢ AEStronglyMeasurable (fun ω => ∫ (y : Ω), f y ∂↑(condexpKernel μ m) ω) μ
[PROOFSTEP]
simp_rw [condexpKernel_apply_eq_condDistrib]
[GOAL]
Ω : Type u_1
F : Type u_2
inst✝⁷ : TopologicalSpace Ω
m mΩ : MeasurableSpace Ω
inst✝⁶ : PolishSpace Ω
inst✝⁵ : BorelSpace Ω
inst✝⁴ : Nonempty Ω
μ : Measure Ω
inst✝³ : IsFiniteMeasure μ
inst✝² : NormedAddCommGroup F
f : Ω → F
inst✝¹ : NormedSpace ℝ F
inst✝ : CompleteSpace F
hf : AEStronglyMeasurable f μ
⊢ AEStronglyMeasurable (fun ω => ∫ (y : Ω), f y ∂↑(condDistrib id id μ) (id ω)) μ
[PROOFSTEP]
exact
AEStronglyMeasurable.integral_condDistrib (aemeasurable_id'' μ (inf_le_right : m ⊓ mΩ ≤ mΩ)) aemeasurable_id
(hf.comp_snd_map_prod_id inf_le_right)
[GOAL]
Ω : Type u_1
F : Type u_2
inst✝⁷ : TopologicalSpace Ω
m mΩ : MeasurableSpace Ω
inst✝⁶ : PolishSpace Ω
inst✝⁵ : BorelSpace Ω
inst✝⁴ : Nonempty Ω
μ : Measure Ω
inst✝³ : IsFiniteMeasure μ
inst✝² : NormedAddCommGroup F
f : Ω → F
inst✝¹ : NormedSpace ℝ F
inst✝ : CompleteSpace F
hf : AEStronglyMeasurable f μ
⊢ AEStronglyMeasurable' m (fun ω => ∫ (y : Ω), f y ∂↑(condexpKernel μ m) ω) μ
[PROOFSTEP]
rw [condexpKernel]
[GOAL]
Ω : Type u_1
F : Type u_2
inst✝⁷ : TopologicalSpace Ω
m mΩ : MeasurableSpace Ω
inst✝⁶ : PolishSpace Ω
inst✝⁵ : BorelSpace Ω
inst✝⁴ : Nonempty Ω
μ : Measure Ω
inst✝³ : IsFiniteMeasure μ
inst✝² : NormedAddCommGroup F
f : Ω → F
inst✝¹ : NormedSpace ℝ F
inst✝ : CompleteSpace F
hf : AEStronglyMeasurable f μ
⊢ AEStronglyMeasurable' m (fun ω => ∫ (y : Ω), f y ∂↑(kernel.comap (condDistrib id id μ) id (_ : Measurable id)) ω) μ
[PROOFSTEP]
have h :=
aestronglyMeasurable'_integral_condDistrib (aemeasurable_id'' μ (inf_le_right : m ⊓ mΩ ≤ mΩ)) aemeasurable_id
(hf.comp_snd_map_prod_id (inf_le_right : m ⊓ mΩ ≤ mΩ))
[GOAL]
Ω : Type u_1
F : Type u_2
inst✝⁷ : TopologicalSpace Ω
m mΩ : MeasurableSpace Ω
inst✝⁶ : PolishSpace Ω
inst✝⁵ : BorelSpace Ω
inst✝⁴ : Nonempty Ω
μ : Measure Ω
inst✝³ : IsFiniteMeasure μ
inst✝² : NormedAddCommGroup F
f : Ω → F
inst✝¹ : NormedSpace ℝ F
inst✝ : CompleteSpace F
hf : AEStronglyMeasurable f μ
h :
AEStronglyMeasurable' (MeasurableSpace.comap id (m ⊓ mΩ))
(fun a => ∫ (y : Ω), f (id a, y).snd ∂↑(condDistrib id id μ) (id a)) μ
⊢ AEStronglyMeasurable' m (fun ω => ∫ (y : Ω), f y ∂↑(kernel.comap (condDistrib id id μ) id (_ : Measurable id)) ω) μ
[PROOFSTEP]
rw [MeasurableSpace.comap_id] at h
[GOAL]
Ω : Type u_1
F : Type u_2
inst✝⁷ : TopologicalSpace Ω
m mΩ : MeasurableSpace Ω
inst✝⁶ : PolishSpace Ω
inst✝⁵ : BorelSpace Ω
inst✝⁴ : Nonempty Ω
μ : Measure Ω
inst✝³ : IsFiniteMeasure μ
inst✝² : NormedAddCommGroup F
f : Ω → F
inst✝¹ : NormedSpace ℝ F
inst✝ : CompleteSpace F
hf : AEStronglyMeasurable f μ
h : AEStronglyMeasurable' (m ⊓ mΩ) (fun a => ∫ (y : Ω), f (id a, y).snd ∂↑(condDistrib id id μ) (id a)) μ
⊢ AEStronglyMeasurable' m (fun ω => ∫ (y : Ω), f y ∂↑(kernel.comap (condDistrib id id μ) id (_ : Measurable id)) ω) μ
[PROOFSTEP]
exact AEStronglyMeasurable'.mono h inf_le_left
[GOAL]
Ω : Type u_1
F : Type u_2
inst✝⁵ : TopologicalSpace Ω
m mΩ : MeasurableSpace Ω
inst✝⁴ : PolishSpace Ω
inst✝³ : BorelSpace Ω
inst✝² : Nonempty Ω
μ : Measure Ω
inst✝¹ : IsFiniteMeasure μ
inst✝ : NormedAddCommGroup F
f : Ω → F
hf_int : Integrable f
⊢ ∀ᵐ (ω : Ω) ∂μ, Integrable f
[PROOFSTEP]
rw [condexpKernel]
[GOAL]
Ω : Type u_1
F : Type u_2
inst✝⁵ : TopologicalSpace Ω
m mΩ : MeasurableSpace Ω
inst✝⁴ : PolishSpace Ω
inst✝³ : BorelSpace Ω
inst✝² : Nonempty Ω
μ : Measure Ω
inst✝¹ : IsFiniteMeasure μ
inst✝ : NormedAddCommGroup F
f : Ω → F
hf_int : Integrable f
⊢ ∀ᵐ (ω : Ω) ∂μ, Integrable f
[PROOFSTEP]
exact
Integrable.condDistrib_ae (aemeasurable_id'' μ (inf_le_right : m ⊓ mΩ ≤ mΩ)) aemeasurable_id
(hf_int.comp_snd_map_prod_id (inf_le_right : m ⊓ mΩ ≤ mΩ))
[GOAL]
Ω : Type u_1
F : Type u_2
inst✝⁵ : TopologicalSpace Ω
m mΩ : MeasurableSpace Ω
inst✝⁴ : PolishSpace Ω
inst✝³ : BorelSpace Ω
inst✝² : Nonempty Ω
μ : Measure Ω
inst✝¹ : IsFiniteMeasure μ
inst✝ : NormedAddCommGroup F
f : Ω → F
hf_int : Integrable f
⊢ Integrable fun ω => ∫ (y : Ω), ‖f y‖ ∂↑(condexpKernel μ m) ω
[PROOFSTEP]
rw [condexpKernel]
[GOAL]
Ω : Type u_1
F : Type u_2
inst✝⁵ : TopologicalSpace Ω
m mΩ : MeasurableSpace Ω
inst✝⁴ : PolishSpace Ω
inst✝³ : BorelSpace Ω
inst✝² : Nonempty Ω
μ : Measure Ω
inst✝¹ : IsFiniteMeasure μ
inst✝ : NormedAddCommGroup F
f : Ω → F
hf_int : Integrable f
⊢ Integrable fun ω => ∫ (y : Ω), ‖f y‖ ∂↑(kernel.comap (condDistrib id id μ) id (_ : Measurable id)) ω
[PROOFSTEP]
exact
Integrable.integral_norm_condDistrib (aemeasurable_id'' μ (inf_le_right : m ⊓ mΩ ≤ mΩ)) aemeasurable_id
(hf_int.comp_snd_map_prod_id (inf_le_right : m ⊓ mΩ ≤ mΩ))
[GOAL]
Ω : Type u_1
F : Type u_2
inst✝⁷ : TopologicalSpace Ω
m mΩ : MeasurableSpace Ω
inst✝⁶ : PolishSpace Ω
inst✝⁵ : BorelSpace Ω
inst✝⁴ : Nonempty Ω
μ : Measure Ω
inst✝³ : IsFiniteMeasure μ
inst✝² : NormedAddCommGroup F
f : Ω → F
inst✝¹ : NormedSpace ℝ F
inst✝ : CompleteSpace F
hf_int : Integrable f
⊢ Integrable fun ω => ‖∫ (y : Ω), f y ∂↑(condexpKernel μ m) ω‖
[PROOFSTEP]
rw [condexpKernel]
[GOAL]
Ω : Type u_1
F : Type u_2
inst✝⁷ : TopologicalSpace Ω
m mΩ : MeasurableSpace Ω
inst✝⁶ : PolishSpace Ω
inst✝⁵ : BorelSpace Ω
inst✝⁴ : Nonempty Ω
μ : Measure Ω
inst✝³ : IsFiniteMeasure μ
inst✝² : NormedAddCommGroup F
f : Ω → F
inst✝¹ : NormedSpace ℝ F
inst✝ : CompleteSpace F
hf_int : Integrable f
⊢ Integrable fun ω => ‖∫ (y : Ω), f y ∂↑(kernel.comap (condDistrib id id μ) id (_ : Measurable id)) ω‖
[PROOFSTEP]
exact
Integrable.norm_integral_condDistrib (aemeasurable_id'' μ (inf_le_right : m ⊓ mΩ ≤ mΩ)) aemeasurable_id
(hf_int.comp_snd_map_prod_id (inf_le_right : m ⊓ mΩ ≤ mΩ))
[GOAL]
Ω : Type u_1
F : Type u_2
inst✝⁷ : TopologicalSpace Ω
m mΩ : MeasurableSpace Ω
inst✝⁶ : PolishSpace Ω
inst✝⁵ : BorelSpace Ω
inst✝⁴ : Nonempty Ω
μ : Measure Ω
inst✝³ : IsFiniteMeasure μ
inst✝² : NormedAddCommGroup F
f : Ω → F
inst✝¹ : NormedSpace ℝ F
inst✝ : CompleteSpace F
hf_int : Integrable f
⊢ Integrable fun ω => ∫ (y : Ω), f y ∂↑(condexpKernel μ m) ω
[PROOFSTEP]
rw [condexpKernel]
[GOAL]
Ω : Type u_1
F : Type u_2
inst✝⁷ : TopologicalSpace Ω
m mΩ : MeasurableSpace Ω
inst✝⁶ : PolishSpace Ω
inst✝⁵ : BorelSpace Ω
inst✝⁴ : Nonempty Ω
μ : Measure Ω
inst✝³ : IsFiniteMeasure μ
inst✝² : NormedAddCommGroup F
f : Ω → F
inst✝¹ : NormedSpace ℝ F
inst✝ : CompleteSpace F
hf_int : Integrable f
⊢ Integrable fun ω => ∫ (y : Ω), f y ∂↑(kernel.comap (condDistrib id id μ) id (_ : Measurable id)) ω
[PROOFSTEP]
exact
Integrable.integral_condDistrib (aemeasurable_id'' μ (inf_le_right : m ⊓ mΩ ≤ mΩ)) aemeasurable_id
(hf_int.comp_snd_map_prod_id (inf_le_right : m ⊓ mΩ ≤ mΩ))
[GOAL]
Ω : Type u_1
F : Type u_2
inst✝⁵ : TopologicalSpace Ω
m mΩ : MeasurableSpace Ω
inst✝⁴ : PolishSpace Ω
inst✝³ : BorelSpace Ω
inst✝² : Nonempty Ω
μ : Measure Ω
inst✝¹ : IsFiniteMeasure μ
inst✝ : NormedAddCommGroup F
f : Ω → F
s : Set Ω
hs : MeasurableSet s
⊢ Integrable fun ω => ENNReal.toReal (↑↑(↑(condexpKernel μ m) ω) s)
[PROOFSTEP]
rw [condexpKernel]
[GOAL]
Ω : Type u_1
F : Type u_2
inst✝⁵ : TopologicalSpace Ω
m mΩ : MeasurableSpace Ω
inst✝⁴ : PolishSpace Ω
inst✝³ : BorelSpace Ω
inst✝² : Nonempty Ω
μ : Measure Ω
inst✝¹ : IsFiniteMeasure μ
inst✝ : NormedAddCommGroup F
f : Ω → F
s : Set Ω
hs : MeasurableSet s
⊢ Integrable fun ω => ENNReal.toReal (↑↑(↑(kernel.comap (condDistrib id id μ) id (_ : Measurable id)) ω) s)
[PROOFSTEP]
exact integrable_toReal_condDistrib (aemeasurable_id'' μ (inf_le_right : m ⊓ mΩ ≤ mΩ)) hs
[GOAL]
Ω : Type u_1
F : Type u_2
inst✝⁵ : TopologicalSpace Ω
m mΩ : MeasurableSpace Ω
inst✝⁴ : PolishSpace Ω
inst✝³ : BorelSpace Ω
inst✝² : Nonempty Ω
μ : Measure Ω
inst✝¹ inst✝ : IsFiniteMeasure μ
s : Set Ω
hs : MeasurableSet s
⊢ (fun ω => ENNReal.toReal (↑↑(↑(condexpKernel μ m) ω) s)) =ᵐ[μ] μ[indicator s fun ω => 1|m ⊓ mΩ]
[PROOFSTEP]
have h := condDistrib_ae_eq_condexp (μ := μ) (measurable_id'' (inf_le_right : m ⊓ mΩ ≤ mΩ)) measurable_id hs
[GOAL]
Ω : Type u_1
F : Type u_2
inst✝⁵ : TopologicalSpace Ω
m mΩ : MeasurableSpace Ω
inst✝⁴ : PolishSpace Ω
inst✝³ : BorelSpace Ω
inst✝² : Nonempty Ω
μ : Measure Ω
inst✝¹ inst✝ : IsFiniteMeasure μ
s : Set Ω
hs : MeasurableSet s
h :
(fun a => ENNReal.toReal (↑↑(↑(condDistrib id id μ) (id a)) s)) =ᵐ[μ]
μ[indicator (id ⁻¹' s) fun ω => 1|MeasurableSpace.comap id (m ⊓ mΩ)]
⊢ (fun ω => ENNReal.toReal (↑↑(↑(condexpKernel μ m) ω) s)) =ᵐ[μ] μ[indicator s fun ω => 1|m ⊓ mΩ]
[PROOFSTEP]
simp only [id_eq, ge_iff_le, MeasurableSpace.comap_id, preimage_id_eq] at h
[GOAL]
Ω : Type u_1
F : Type u_2
inst✝⁵ : TopologicalSpace Ω
m mΩ : MeasurableSpace Ω
inst✝⁴ : PolishSpace Ω
inst✝³ : BorelSpace Ω
inst✝² : Nonempty Ω
μ : Measure Ω
inst✝¹ inst✝ : IsFiniteMeasure μ
s : Set Ω
hs : MeasurableSet s
h : (fun a => ENNReal.toReal (↑↑(↑(condDistrib id id μ) a) s)) =ᵐ[μ] μ[indicator s fun ω => 1|m ⊓ mΩ]
⊢ (fun ω => ENNReal.toReal (↑↑(↑(condexpKernel μ m) ω) s)) =ᵐ[μ] μ[indicator s fun ω => 1|m ⊓ mΩ]
[PROOFSTEP]
simp_rw [condexpKernel_apply_eq_condDistrib]
[GOAL]
Ω : Type u_1
F : Type u_2
inst✝⁵ : TopologicalSpace Ω
m mΩ : MeasurableSpace Ω
inst✝⁴ : PolishSpace Ω
inst✝³ : BorelSpace Ω
inst✝² : Nonempty Ω
μ : Measure Ω
inst✝¹ inst✝ : IsFiniteMeasure μ
s : Set Ω
hs : MeasurableSet s
h : (fun a => ENNReal.toReal (↑↑(↑(condDistrib id id μ) a) s)) =ᵐ[μ] μ[indicator s fun ω => 1|m ⊓ mΩ]
⊢ (fun ω => ENNReal.toReal (↑↑(↑(condDistrib id id μ) (id ω)) s)) =ᵐ[μ] μ[indicator s fun ω => 1|m ⊓ mΩ]
[PROOFSTEP]
exact h
[GOAL]
Ω : Type u_1
F : Type u_2
inst✝⁵ : TopologicalSpace Ω
m mΩ : MeasurableSpace Ω
inst✝⁴ : PolishSpace Ω
inst✝³ : BorelSpace Ω
inst✝² : Nonempty Ω
μ : Measure Ω
inst✝¹ inst✝ : IsFiniteMeasure μ
hm : m ≤ mΩ
s : Set Ω
hs : MeasurableSet s
⊢ μ[indicator s fun ω => 1|m ⊓ mΩ] =ᵐ[μ] μ[indicator s fun ω => 1|m]
[PROOFSTEP]
rw [inf_of_le_left hm]
[GOAL]
Ω : Type u_1
F : Type u_2
inst✝⁵ : TopologicalSpace Ω
m mΩ : MeasurableSpace Ω
inst✝⁴ : PolishSpace Ω
inst✝³ : BorelSpace Ω
inst✝² : Nonempty Ω
μ : Measure Ω
inst✝¹ inst✝ : IsFiniteMeasure μ
hm : m ≤ mΩ
s : Set Ω
hs : MeasurableSet s
⊢ (fun ω => ENNReal.toReal (↑↑(↑(condexpKernel μ m) ω) s)) =ᵐ[Measure.trim μ hm] μ[indicator s fun ω => 1|m]
[PROOFSTEP]
rw [ae_eq_trim_iff hm _ stronglyMeasurable_condexp]
[GOAL]
Ω : Type u_1
F : Type u_2
inst✝⁵ : TopologicalSpace Ω
m mΩ : MeasurableSpace Ω
inst✝⁴ : PolishSpace Ω
inst✝³ : BorelSpace Ω
inst✝² : Nonempty Ω
μ : Measure Ω
inst✝¹ inst✝ : IsFiniteMeasure μ
hm : m ≤ mΩ
s : Set Ω
hs : MeasurableSet s
⊢ (fun ω => ENNReal.toReal (↑↑(↑(condexpKernel μ m) ω) s)) =ᵐ[μ] μ[indicator s fun ω => 1|m]
[PROOFSTEP]
exact condexpKernel_ae_eq_condexp hm hs
[GOAL]
Ω : Type u_1
F : Type u_2
inst✝⁵ : TopologicalSpace Ω
m mΩ : MeasurableSpace Ω
inst✝⁴ : PolishSpace Ω
inst✝³ : BorelSpace Ω
inst✝² : Nonempty Ω
μ : Measure Ω
inst✝¹ inst✝ : IsFiniteMeasure μ
hm : m ≤ mΩ
s : Set Ω
hs : MeasurableSet s
⊢ StronglyMeasurable fun ω => ENNReal.toReal (↑↑(↑(condexpKernel μ m) ω) s)
[PROOFSTEP]
refine Measurable.stronglyMeasurable ?_
[GOAL]
Ω : Type u_1
F : Type u_2
inst✝⁵ : TopologicalSpace Ω
m mΩ : MeasurableSpace Ω
inst✝⁴ : PolishSpace Ω
inst✝³ : BorelSpace Ω
inst✝² : Nonempty Ω
μ : Measure Ω
inst✝¹ inst✝ : IsFiniteMeasure μ
hm : m ≤ mΩ
s : Set Ω
hs : MeasurableSet s
⊢ Measurable fun ω => ENNReal.toReal (↑↑(↑(condexpKernel μ m) ω) s)
[PROOFSTEP]
exact @Measurable.ennreal_toReal _ m _ (measurable_condexpKernel hs)
[GOAL]
Ω : Type u_1
F : Type u_2
inst✝⁷ : TopologicalSpace Ω
m mΩ : MeasurableSpace Ω
inst✝⁶ : PolishSpace Ω
inst✝⁵ : BorelSpace Ω
inst✝⁴ : Nonempty Ω
μ : Measure Ω
inst✝³ : IsFiniteMeasure μ
inst✝² : NormedAddCommGroup F
f : Ω → F
inst✝¹ : NormedSpace ℝ F
inst✝ : CompleteSpace F
hf_int : Integrable f
⊢ μ[f|m ⊓ mΩ] =ᵐ[μ] fun ω => ∫ (y : Ω), f y ∂↑(condexpKernel μ m) ω
[PROOFSTEP]
have hX : @Measurable Ω Ω mΩ (m ⊓ mΩ) id := measurable_id.mono le_rfl (inf_le_right : m ⊓ mΩ ≤ mΩ)
[GOAL]
Ω : Type u_1
F : Type u_2
inst✝⁷ : TopologicalSpace Ω
m mΩ : MeasurableSpace Ω
inst✝⁶ : PolishSpace Ω
inst✝⁵ : BorelSpace Ω
inst✝⁴ : Nonempty Ω
μ : Measure Ω
inst✝³ : IsFiniteMeasure μ
inst✝² : NormedAddCommGroup F
f : Ω → F
inst✝¹ : NormedSpace ℝ F
inst✝ : CompleteSpace F
hf_int : Integrable f
hX : Measurable id
⊢ μ[f|m ⊓ mΩ] =ᵐ[μ] fun ω => ∫ (y : Ω), f y ∂↑(condexpKernel μ m) ω
[PROOFSTEP]
simp_rw [condexpKernel_apply_eq_condDistrib]
[GOAL]
Ω : Type u_1
F : Type u_2
inst✝⁷ : TopologicalSpace Ω
m mΩ : MeasurableSpace Ω
inst✝⁶ : PolishSpace Ω
inst✝⁵ : BorelSpace Ω
inst✝⁴ : Nonempty Ω
μ : Measure Ω
inst✝³ : IsFiniteMeasure μ
inst✝² : NormedAddCommGroup F
f : Ω → F
inst✝¹ : NormedSpace ℝ F
inst✝ : CompleteSpace F
hf_int : Integrable f
hX : Measurable id
⊢ μ[f|m ⊓ mΩ] =ᵐ[μ] fun ω => ∫ (y : Ω), f y ∂↑(condDistrib id id μ) (id ω)
[PROOFSTEP]
have h := condexp_ae_eq_integral_condDistrib_id hX hf_int
[GOAL]
Ω : Type u_1
F : Type u_2
inst✝⁷ : TopologicalSpace Ω
m mΩ : MeasurableSpace Ω
inst✝⁶ : PolishSpace Ω
inst✝⁵ : BorelSpace Ω
inst✝⁴ : Nonempty Ω
μ : Measure Ω
inst✝³ : IsFiniteMeasure μ
inst✝² : NormedAddCommGroup F
f : Ω → F
inst✝¹ : NormedSpace ℝ F
inst✝ : CompleteSpace F
hf_int : Integrable f
hX : Measurable id
h : μ[f|MeasurableSpace.comap id (m ⊓ mΩ)] =ᵐ[μ] fun a => ∫ (y : Ω), f y ∂↑(condDistrib id id μ) (id a)
⊢ μ[f|m ⊓ mΩ] =ᵐ[μ] fun ω => ∫ (y : Ω), f y ∂↑(condDistrib id id μ) (id ω)
[PROOFSTEP]
simpa only [MeasurableSpace.comap_id, id_eq] using h
[GOAL]
Ω : Type u_1
F : Type u_2
inst✝⁷ : TopologicalSpace Ω
m mΩ : MeasurableSpace Ω
inst✝⁶ : PolishSpace Ω
inst✝⁵ : BorelSpace Ω
inst✝⁴ : Nonempty Ω
μ : Measure Ω
inst✝³ : IsFiniteMeasure μ
inst✝² : NormedAddCommGroup F
f : Ω → F
inst✝¹ : NormedSpace ℝ F
inst✝ : CompleteSpace F
hm : m ≤ mΩ
hf_int : Integrable f
⊢ μ[f|m ⊓ mΩ] =ᵐ[μ] μ[f|m]
[PROOFSTEP]
rw [inf_of_le_left hm]
|
[STATEMENT]
lemma cos_boundaries:
assumes "0 \<le> real_of_float x" and "x \<le> pi / 2"
shows "cos x \<in> {(lb_sin_cos_aux prec (get_even n) 1 1 (x * x)) .. (ub_sin_cos_aux prec (get_odd n) 1 1 (x * x))}"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. cos (real_of_float x) \<in> {real_of_float (lb_sin_cos_aux prec (get_even n) 1 1 (x * x))..real_of_float (ub_sin_cos_aux prec (get_odd n) 1 1 (x * x))}
[PROOF STEP]
proof (cases "real_of_float x = 0")
[PROOF STATE]
proof (state)
goal (2 subgoals):
1. real_of_float x = 0 \<Longrightarrow> cos (real_of_float x) \<in> {real_of_float (lb_sin_cos_aux prec (get_even n) 1 1 (x * x))..real_of_float (ub_sin_cos_aux prec (get_odd n) 1 1 (x * x))}
2. real_of_float x \<noteq> 0 \<Longrightarrow> cos (real_of_float x) \<in> {real_of_float (lb_sin_cos_aux prec (get_even n) 1 1 (x * x))..real_of_float (ub_sin_cos_aux prec (get_odd n) 1 1 (x * x))}
[PROOF STEP]
case False
[PROOF STATE]
proof (state)
this:
real_of_float x \<noteq> 0
goal (2 subgoals):
1. real_of_float x = 0 \<Longrightarrow> cos (real_of_float x) \<in> {real_of_float (lb_sin_cos_aux prec (get_even n) 1 1 (x * x))..real_of_float (ub_sin_cos_aux prec (get_odd n) 1 1 (x * x))}
2. real_of_float x \<noteq> 0 \<Longrightarrow> cos (real_of_float x) \<in> {real_of_float (lb_sin_cos_aux prec (get_even n) 1 1 (x * x))..real_of_float (ub_sin_cos_aux prec (get_odd n) 1 1 (x * x))}
[PROOF STEP]
hence "real_of_float x \<noteq> 0"
[PROOF STATE]
proof (prove)
using this:
real_of_float x \<noteq> 0
goal (1 subgoal):
1. real_of_float x \<noteq> 0
[PROOF STEP]
by auto
[PROOF STATE]
proof (state)
this:
real_of_float x \<noteq> 0
goal (2 subgoals):
1. real_of_float x = 0 \<Longrightarrow> cos (real_of_float x) \<in> {real_of_float (lb_sin_cos_aux prec (get_even n) 1 1 (x * x))..real_of_float (ub_sin_cos_aux prec (get_odd n) 1 1 (x * x))}
2. real_of_float x \<noteq> 0 \<Longrightarrow> cos (real_of_float x) \<in> {real_of_float (lb_sin_cos_aux prec (get_even n) 1 1 (x * x))..real_of_float (ub_sin_cos_aux prec (get_odd n) 1 1 (x * x))}
[PROOF STEP]
hence "0 < x" and "0 < real_of_float x"
[PROOF STATE]
proof (prove)
using this:
real_of_float x \<noteq> 0
goal (1 subgoal):
1. 0 < x &&& 0 < real_of_float x
[PROOF STEP]
using \<open>0 \<le> real_of_float x\<close>
[PROOF STATE]
proof (prove)
using this:
real_of_float x \<noteq> 0
0 \<le> real_of_float x
goal (1 subgoal):
1. 0 < x &&& 0 < real_of_float x
[PROOF STEP]
by auto
[PROOF STATE]
proof (state)
this:
0 < x
0 < real_of_float x
goal (2 subgoals):
1. real_of_float x = 0 \<Longrightarrow> cos (real_of_float x) \<in> {real_of_float (lb_sin_cos_aux prec (get_even n) 1 1 (x * x))..real_of_float (ub_sin_cos_aux prec (get_odd n) 1 1 (x * x))}
2. real_of_float x \<noteq> 0 \<Longrightarrow> cos (real_of_float x) \<in> {real_of_float (lb_sin_cos_aux prec (get_even n) 1 1 (x * x))..real_of_float (ub_sin_cos_aux prec (get_odd n) 1 1 (x * x))}
[PROOF STEP]
have "0 < x * x"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. 0 < x * x
[PROOF STEP]
using \<open>0 < x\<close>
[PROOF STATE]
proof (prove)
using this:
0 < x
goal (1 subgoal):
1. 0 < x * x
[PROOF STEP]
by simp
[PROOF STATE]
proof (state)
this:
0 < x * x
goal (2 subgoals):
1. real_of_float x = 0 \<Longrightarrow> cos (real_of_float x) \<in> {real_of_float (lb_sin_cos_aux prec (get_even n) 1 1 (x * x))..real_of_float (ub_sin_cos_aux prec (get_odd n) 1 1 (x * x))}
2. real_of_float x \<noteq> 0 \<Longrightarrow> cos (real_of_float x) \<in> {real_of_float (lb_sin_cos_aux prec (get_even n) 1 1 (x * x))..real_of_float (ub_sin_cos_aux prec (get_odd n) 1 1 (x * x))}
[PROOF STEP]
have morph_to_if_power: "(\<Sum> i=0..<n. (-1::real) ^ i * (1/(fact (2 * i))) * x ^ (2 * i)) =
(\<Sum> i = 0 ..< 2 * n. (if even(i) then ((- 1) ^ (i div 2))/((fact i)) else 0) * x ^ i)"
(is "?sum = ?ifsum") for x n
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. (\<Sum>i = 0..<n. (- 1) ^ i * (1 / fact (2 * i)) * x ^ (2 * i)) = (\<Sum>i = 0..<2 * n. (if even i then (- 1) ^ (i div 2) / fact i else 0) * x ^ i)
[PROOF STEP]
proof -
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. (\<Sum>i = 0..<n. (- 1) ^ i * (1 / fact (2 * i)) * x ^ (2 * i)) = (\<Sum>i = 0..<2 * n. (if even i then (- 1) ^ (i div 2) / fact i else 0) * x ^ i)
[PROOF STEP]
have "?sum = ?sum + (\<Sum> j = 0 ..< n. 0)"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. (\<Sum>i = 0..<n. (- 1) ^ i * (1 / fact (2 * i)) * x ^ (2 * i)) = (\<Sum>i = 0..<n. (- 1) ^ i * (1 / fact (2 * i)) * x ^ (2 * i)) + (\<Sum>j = 0..<n. 0)
[PROOF STEP]
by auto
[PROOF STATE]
proof (state)
this:
(\<Sum>i = 0..<n. (- 1) ^ i * (1 / fact (2 * i)) * x ^ (2 * i)) = (\<Sum>i = 0..<n. (- 1) ^ i * (1 / fact (2 * i)) * x ^ (2 * i)) + (\<Sum>j = 0..<n. 0)
goal (1 subgoal):
1. (\<Sum>i = 0..<n. (- 1) ^ i * (1 / fact (2 * i)) * x ^ (2 * i)) = (\<Sum>i = 0..<2 * n. (if even i then (- 1) ^ (i div 2) / fact i else 0) * x ^ i)
[PROOF STEP]
also
[PROOF STATE]
proof (state)
this:
(\<Sum>i = 0..<n. (- 1) ^ i * (1 / fact (2 * i)) * x ^ (2 * i)) = (\<Sum>i = 0..<n. (- 1) ^ i * (1 / fact (2 * i)) * x ^ (2 * i)) + (\<Sum>j = 0..<n. 0)
goal (1 subgoal):
1. (\<Sum>i = 0..<n. (- 1) ^ i * (1 / fact (2 * i)) * x ^ (2 * i)) = (\<Sum>i = 0..<2 * n. (if even i then (- 1) ^ (i div 2) / fact i else 0) * x ^ i)
[PROOF STEP]
have "\<dots> =
(\<Sum> j = 0 ..< n. (- 1) ^ ((2 * j) div 2) / ((fact (2 * j))) * x ^(2 * j)) + (\<Sum> j = 0 ..< n. 0)"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. (\<Sum>i = 0..<n. (- 1) ^ i * (1 / fact (2 * i)) * x ^ (2 * i)) + (\<Sum>j = 0..<n. 0) = (\<Sum>j = 0..<n. (- 1) ^ (2 * j div 2) / fact (2 * j) * x ^ (2 * j)) + (\<Sum>j = 0..<n. 0)
[PROOF STEP]
by auto
[PROOF STATE]
proof (state)
this:
(\<Sum>i = 0..<n. (- 1) ^ i * (1 / fact (2 * i)) * x ^ (2 * i)) + (\<Sum>j = 0..<n. 0) = (\<Sum>j = 0..<n. (- 1) ^ (2 * j div 2) / fact (2 * j) * x ^ (2 * j)) + (\<Sum>j = 0..<n. 0)
goal (1 subgoal):
1. (\<Sum>i = 0..<n. (- 1) ^ i * (1 / fact (2 * i)) * x ^ (2 * i)) = (\<Sum>i = 0..<2 * n. (if even i then (- 1) ^ (i div 2) / fact i else 0) * x ^ i)
[PROOF STEP]
also
[PROOF STATE]
proof (state)
this:
(\<Sum>i = 0..<n. (- 1) ^ i * (1 / fact (2 * i)) * x ^ (2 * i)) + (\<Sum>j = 0..<n. 0) = (\<Sum>j = 0..<n. (- 1) ^ (2 * j div 2) / fact (2 * j) * x ^ (2 * j)) + (\<Sum>j = 0..<n. 0)
goal (1 subgoal):
1. (\<Sum>i = 0..<n. (- 1) ^ i * (1 / fact (2 * i)) * x ^ (2 * i)) = (\<Sum>i = 0..<2 * n. (if even i then (- 1) ^ (i div 2) / fact i else 0) * x ^ i)
[PROOF STEP]
have "\<dots> = (\<Sum> i = 0 ..< 2 * n. if even i then (- 1) ^ (i div 2) / ((fact i)) * x ^ i else 0)"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. (\<Sum>j = 0..<n. (- 1) ^ (2 * j div 2) / fact (2 * j) * x ^ (2 * j)) + (\<Sum>j = 0..<n. 0) = (\<Sum>i = 0..<2 * n. if even i then (- 1) ^ (i div 2) / fact i * x ^ i else 0)
[PROOF STEP]
unfolding sum_split_even_odd atLeast0LessThan
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. (\<Sum>j<n. (- 1) ^ (2 * j div 2) / fact (2 * j) * x ^ (2 * j)) + (\<Sum>j<n. 0) = (\<Sum>i<n. (- 1) ^ (2 * i div 2) / fact (2 * i) * x ^ (2 * i)) + (\<Sum>i<n. 0)
[PROOF STEP]
..
[PROOF STATE]
proof (state)
this:
(\<Sum>j = 0..<n. (- 1) ^ (2 * j div 2) / fact (2 * j) * x ^ (2 * j)) + (\<Sum>j = 0..<n. 0) = (\<Sum>i = 0..<2 * n. if even i then (- 1) ^ (i div 2) / fact i * x ^ i else 0)
goal (1 subgoal):
1. (\<Sum>i = 0..<n. (- 1) ^ i * (1 / fact (2 * i)) * x ^ (2 * i)) = (\<Sum>i = 0..<2 * n. (if even i then (- 1) ^ (i div 2) / fact i else 0) * x ^ i)
[PROOF STEP]
also
[PROOF STATE]
proof (state)
this:
(\<Sum>j = 0..<n. (- 1) ^ (2 * j div 2) / fact (2 * j) * x ^ (2 * j)) + (\<Sum>j = 0..<n. 0) = (\<Sum>i = 0..<2 * n. if even i then (- 1) ^ (i div 2) / fact i * x ^ i else 0)
goal (1 subgoal):
1. (\<Sum>i = 0..<n. (- 1) ^ i * (1 / fact (2 * i)) * x ^ (2 * i)) = (\<Sum>i = 0..<2 * n. (if even i then (- 1) ^ (i div 2) / fact i else 0) * x ^ i)
[PROOF STEP]
have "\<dots> = (\<Sum> i = 0 ..< 2 * n. (if even i then (- 1) ^ (i div 2) / ((fact i)) else 0) * x ^ i)"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. (\<Sum>i = 0..<2 * n. if even i then (- 1) ^ (i div 2) / fact i * x ^ i else 0) = (\<Sum>i = 0..<2 * n. (if even i then (- 1) ^ (i div 2) / fact i else 0) * x ^ i)
[PROOF STEP]
by (rule sum.cong) auto
[PROOF STATE]
proof (state)
this:
(\<Sum>i = 0..<2 * n. if even i then (- 1) ^ (i div 2) / fact i * x ^ i else 0) = (\<Sum>i = 0..<2 * n. (if even i then (- 1) ^ (i div 2) / fact i else 0) * x ^ i)
goal (1 subgoal):
1. (\<Sum>i = 0..<n. (- 1) ^ i * (1 / fact (2 * i)) * x ^ (2 * i)) = (\<Sum>i = 0..<2 * n. (if even i then (- 1) ^ (i div 2) / fact i else 0) * x ^ i)
[PROOF STEP]
finally
[PROOF STATE]
proof (chain)
picking this:
(\<Sum>i = 0..<n. (- 1) ^ i * (1 / fact (2 * i)) * x ^ (2 * i)) = (\<Sum>i = 0..<2 * n. (if even i then (- 1) ^ (i div 2) / fact i else 0) * x ^ i)
[PROOF STEP]
show ?thesis
[PROOF STATE]
proof (prove)
using this:
(\<Sum>i = 0..<n. (- 1) ^ i * (1 / fact (2 * i)) * x ^ (2 * i)) = (\<Sum>i = 0..<2 * n. (if even i then (- 1) ^ (i div 2) / fact i else 0) * x ^ i)
goal (1 subgoal):
1. (\<Sum>i = 0..<n. (- 1) ^ i * (1 / fact (2 * i)) * x ^ (2 * i)) = (\<Sum>i = 0..<2 * n. (if even i then (- 1) ^ (i div 2) / fact i else 0) * x ^ i)
[PROOF STEP]
.
[PROOF STATE]
proof (state)
this:
(\<Sum>i = 0..<n. (- 1) ^ i * (1 / fact (2 * i)) * x ^ (2 * i)) = (\<Sum>i = 0..<2 * n. (if even i then (- 1) ^ (i div 2) / fact i else 0) * x ^ i)
goal:
No subgoals!
[PROOF STEP]
qed
[PROOF STATE]
proof (state)
this:
(\<Sum>i = 0..<?n. (- 1) ^ i * (1 / fact (2 * i)) * ?x ^ (2 * i)) = (\<Sum>i = 0..<2 * ?n. (if even i then (- 1) ^ (i div 2) / fact i else 0) * ?x ^ i)
goal (2 subgoals):
1. real_of_float x = 0 \<Longrightarrow> cos (real_of_float x) \<in> {real_of_float (lb_sin_cos_aux prec (get_even n) 1 1 (x * x))..real_of_float (ub_sin_cos_aux prec (get_odd n) 1 1 (x * x))}
2. real_of_float x \<noteq> 0 \<Longrightarrow> cos (real_of_float x) \<in> {real_of_float (lb_sin_cos_aux prec (get_even n) 1 1 (x * x))..real_of_float (ub_sin_cos_aux prec (get_odd n) 1 1 (x * x))}
[PROOF STEP]
{
[PROOF STATE]
proof (state)
this:
(\<Sum>i = 0..<?n. (- 1) ^ i * (1 / fact (2 * i)) * ?x ^ (2 * i)) = (\<Sum>i = 0..<2 * ?n. (if even i then (- 1) ^ (i div 2) / fact i else 0) * ?x ^ i)
goal (2 subgoals):
1. real_of_float x = 0 \<Longrightarrow> cos (real_of_float x) \<in> {real_of_float (lb_sin_cos_aux prec (get_even n) 1 1 (x * x))..real_of_float (ub_sin_cos_aux prec (get_odd n) 1 1 (x * x))}
2. real_of_float x \<noteq> 0 \<Longrightarrow> cos (real_of_float x) \<in> {real_of_float (lb_sin_cos_aux prec (get_even n) 1 1 (x * x))..real_of_float (ub_sin_cos_aux prec (get_odd n) 1 1 (x * x))}
[PROOF STEP]
fix n :: nat
[PROOF STATE]
proof (state)
goal (2 subgoals):
1. real_of_float x = 0 \<Longrightarrow> cos (real_of_float x) \<in> {real_of_float (lb_sin_cos_aux prec (get_even n) 1 1 (x * x))..real_of_float (ub_sin_cos_aux prec (get_odd n) 1 1 (x * x))}
2. real_of_float x \<noteq> 0 \<Longrightarrow> cos (real_of_float x) \<in> {real_of_float (lb_sin_cos_aux prec (get_even n) 1 1 (x * x))..real_of_float (ub_sin_cos_aux prec (get_odd n) 1 1 (x * x))}
[PROOF STEP]
assume "0 < n"
[PROOF STATE]
proof (state)
this:
0 < n
goal (2 subgoals):
1. real_of_float x = 0 \<Longrightarrow> cos (real_of_float x) \<in> {real_of_float (lb_sin_cos_aux prec (get_even n) 1 1 (x * x))..real_of_float (ub_sin_cos_aux prec (get_odd n) 1 1 (x * x))}
2. real_of_float x \<noteq> 0 \<Longrightarrow> cos (real_of_float x) \<in> {real_of_float (lb_sin_cos_aux prec (get_even n) 1 1 (x * x))..real_of_float (ub_sin_cos_aux prec (get_odd n) 1 1 (x * x))}
[PROOF STEP]
hence "0 < 2 * n"
[PROOF STATE]
proof (prove)
using this:
0 < n
goal (1 subgoal):
1. 0 < 2 * n
[PROOF STEP]
by auto
[PROOF STATE]
proof (state)
this:
0 < 2 * n
goal (2 subgoals):
1. real_of_float x = 0 \<Longrightarrow> cos (real_of_float x) \<in> {real_of_float (lb_sin_cos_aux prec (get_even n) 1 1 (x * x))..real_of_float (ub_sin_cos_aux prec (get_odd n) 1 1 (x * x))}
2. real_of_float x \<noteq> 0 \<Longrightarrow> cos (real_of_float x) \<in> {real_of_float (lb_sin_cos_aux prec (get_even n) 1 1 (x * x))..real_of_float (ub_sin_cos_aux prec (get_odd n) 1 1 (x * x))}
[PROOF STEP]
obtain t where "0 < t" and "t < real_of_float x" and
cos_eq: "cos x = (\<Sum> i = 0 ..< 2 * n. (if even(i) then ((- 1) ^ (i div 2))/((fact i)) else 0) * (real_of_float x) ^ i)
+ (cos (t + 1/2 * (2 * n) * pi) / (fact (2*n))) * (real_of_float x)^(2*n)"
(is "_ = ?SUM + ?rest / ?fact * ?pow")
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. (\<And>t. \<lbrakk>0 < t; t < real_of_float x; cos (real_of_float x) = (\<Sum>i = 0..<2 * n. (if even i then (- 1) ^ (i div 2) / fact i else 0) * real_of_float x ^ i) + cos (t + 1 / 2 * real (2 * n) * pi) / fact (2 * n) * real_of_float x ^ (2 * n)\<rbrakk> \<Longrightarrow> thesis) \<Longrightarrow> thesis
[PROOF STEP]
using Maclaurin_cos_expansion2[OF \<open>0 < real_of_float x\<close> \<open>0 < 2 * n\<close>]
[PROOF STATE]
proof (prove)
using this:
\<exists>t>0. t < real_of_float x \<and> cos (real_of_float x) = (\<Sum>m<2 * n. cos_coeff m * real_of_float x ^ m) + cos (t + 1 / 2 * real (2 * n) * pi) / fact (2 * n) * real_of_float x ^ (2 * n)
goal (1 subgoal):
1. (\<And>t. \<lbrakk>0 < t; t < real_of_float x; cos (real_of_float x) = (\<Sum>i = 0..<2 * n. (if even i then (- 1) ^ (i div 2) / fact i else 0) * real_of_float x ^ i) + cos (t + 1 / 2 * real (2 * n) * pi) / fact (2 * n) * real_of_float x ^ (2 * n)\<rbrakk> \<Longrightarrow> thesis) \<Longrightarrow> thesis
[PROOF STEP]
unfolding cos_coeff_def atLeast0LessThan
[PROOF STATE]
proof (prove)
using this:
\<exists>t>0. t < real_of_float x \<and> cos (real_of_float x) = (\<Sum>m<2 * n. (if even m then (- 1) ^ (m div 2) / fact m else 0) * real_of_float x ^ m) + cos (t + 1 / 2 * real (2 * n) * pi) / fact (2 * n) * real_of_float x ^ (2 * n)
goal (1 subgoal):
1. (\<And>t. \<lbrakk>0 < t; t < real_of_float x; cos (real_of_float x) = (\<Sum>i<2 * n. (if even i then (- 1) ^ (i div 2) / fact i else 0) * real_of_float x ^ i) + cos (t + 1 / 2 * real (2 * n) * pi) / fact (2 * n) * real_of_float x ^ (2 * n)\<rbrakk> \<Longrightarrow> thesis) \<Longrightarrow> thesis
[PROOF STEP]
by auto
[PROOF STATE]
proof (state)
this:
0 < t
t < real_of_float x
cos (real_of_float x) = (\<Sum>i = 0..<2 * n. (if even i then (- 1) ^ (i div 2) / fact i else 0) * real_of_float x ^ i) + cos (t + 1 / 2 * real (2 * n) * pi) / fact (2 * n) * real_of_float x ^ (2 * n)
goal (2 subgoals):
1. real_of_float x = 0 \<Longrightarrow> cos (real_of_float x) \<in> {real_of_float (lb_sin_cos_aux prec (get_even n) 1 1 (x * x))..real_of_float (ub_sin_cos_aux prec (get_odd n) 1 1 (x * x))}
2. real_of_float x \<noteq> 0 \<Longrightarrow> cos (real_of_float x) \<in> {real_of_float (lb_sin_cos_aux prec (get_even n) 1 1 (x * x))..real_of_float (ub_sin_cos_aux prec (get_odd n) 1 1 (x * x))}
[PROOF STEP]
have "cos t * (- 1) ^ n = cos t * cos (n * pi) + sin t * sin (n * pi)"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. cos t * (- 1) ^ n = cos t * cos (real n * pi) + sin t * sin (real n * pi)
[PROOF STEP]
by auto
[PROOF STATE]
proof (state)
this:
cos t * (- 1) ^ n = cos t * cos (real n * pi) + sin t * sin (real n * pi)
goal (2 subgoals):
1. real_of_float x = 0 \<Longrightarrow> cos (real_of_float x) \<in> {real_of_float (lb_sin_cos_aux prec (get_even n) 1 1 (x * x))..real_of_float (ub_sin_cos_aux prec (get_odd n) 1 1 (x * x))}
2. real_of_float x \<noteq> 0 \<Longrightarrow> cos (real_of_float x) \<in> {real_of_float (lb_sin_cos_aux prec (get_even n) 1 1 (x * x))..real_of_float (ub_sin_cos_aux prec (get_odd n) 1 1 (x * x))}
[PROOF STEP]
also
[PROOF STATE]
proof (state)
this:
cos t * (- 1) ^ n = cos t * cos (real n * pi) + sin t * sin (real n * pi)
goal (2 subgoals):
1. real_of_float x = 0 \<Longrightarrow> cos (real_of_float x) \<in> {real_of_float (lb_sin_cos_aux prec (get_even n) 1 1 (x * x))..real_of_float (ub_sin_cos_aux prec (get_odd n) 1 1 (x * x))}
2. real_of_float x \<noteq> 0 \<Longrightarrow> cos (real_of_float x) \<in> {real_of_float (lb_sin_cos_aux prec (get_even n) 1 1 (x * x))..real_of_float (ub_sin_cos_aux prec (get_odd n) 1 1 (x * x))}
[PROOF STEP]
have "\<dots> = cos (t + n * pi)"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. cos t * cos (real n * pi) + sin t * sin (real n * pi) = cos (t + real n * pi)
[PROOF STEP]
by (simp add: cos_add)
[PROOF STATE]
proof (state)
this:
cos t * cos (real n * pi) + sin t * sin (real n * pi) = cos (t + real n * pi)
goal (2 subgoals):
1. real_of_float x = 0 \<Longrightarrow> cos (real_of_float x) \<in> {real_of_float (lb_sin_cos_aux prec (get_even n) 1 1 (x * x))..real_of_float (ub_sin_cos_aux prec (get_odd n) 1 1 (x * x))}
2. real_of_float x \<noteq> 0 \<Longrightarrow> cos (real_of_float x) \<in> {real_of_float (lb_sin_cos_aux prec (get_even n) 1 1 (x * x))..real_of_float (ub_sin_cos_aux prec (get_odd n) 1 1 (x * x))}
[PROOF STEP]
also
[PROOF STATE]
proof (state)
this:
cos t * cos (real n * pi) + sin t * sin (real n * pi) = cos (t + real n * pi)
goal (2 subgoals):
1. real_of_float x = 0 \<Longrightarrow> cos (real_of_float x) \<in> {real_of_float (lb_sin_cos_aux prec (get_even n) 1 1 (x * x))..real_of_float (ub_sin_cos_aux prec (get_odd n) 1 1 (x * x))}
2. real_of_float x \<noteq> 0 \<Longrightarrow> cos (real_of_float x) \<in> {real_of_float (lb_sin_cos_aux prec (get_even n) 1 1 (x * x))..real_of_float (ub_sin_cos_aux prec (get_odd n) 1 1 (x * x))}
[PROOF STEP]
have "\<dots> = ?rest"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. cos (t + real n * pi) = cos (t + 1 / 2 * real (2 * n) * pi)
[PROOF STEP]
by auto
[PROOF STATE]
proof (state)
this:
cos (t + real n * pi) = cos (t + 1 / 2 * real (2 * n) * pi)
goal (2 subgoals):
1. real_of_float x = 0 \<Longrightarrow> cos (real_of_float x) \<in> {real_of_float (lb_sin_cos_aux prec (get_even n) 1 1 (x * x))..real_of_float (ub_sin_cos_aux prec (get_odd n) 1 1 (x * x))}
2. real_of_float x \<noteq> 0 \<Longrightarrow> cos (real_of_float x) \<in> {real_of_float (lb_sin_cos_aux prec (get_even n) 1 1 (x * x))..real_of_float (ub_sin_cos_aux prec (get_odd n) 1 1 (x * x))}
[PROOF STEP]
finally
[PROOF STATE]
proof (chain)
picking this:
cos t * (- 1) ^ n = cos (t + 1 / 2 * real (2 * n) * pi)
[PROOF STEP]
have "cos t * (- 1) ^ n = ?rest"
[PROOF STATE]
proof (prove)
using this:
cos t * (- 1) ^ n = cos (t + 1 / 2 * real (2 * n) * pi)
goal (1 subgoal):
1. cos t * (- 1) ^ n = cos (t + 1 / 2 * real (2 * n) * pi)
[PROOF STEP]
.
[PROOF STATE]
proof (state)
this:
cos t * (- 1) ^ n = cos (t + 1 / 2 * real (2 * n) * pi)
goal (2 subgoals):
1. real_of_float x = 0 \<Longrightarrow> cos (real_of_float x) \<in> {real_of_float (lb_sin_cos_aux prec (get_even n) 1 1 (x * x))..real_of_float (ub_sin_cos_aux prec (get_odd n) 1 1 (x * x))}
2. real_of_float x \<noteq> 0 \<Longrightarrow> cos (real_of_float x) \<in> {real_of_float (lb_sin_cos_aux prec (get_even n) 1 1 (x * x))..real_of_float (ub_sin_cos_aux prec (get_odd n) 1 1 (x * x))}
[PROOF STEP]
moreover
[PROOF STATE]
proof (state)
this:
cos t * (- 1) ^ n = cos (t + 1 / 2 * real (2 * n) * pi)
goal (2 subgoals):
1. real_of_float x = 0 \<Longrightarrow> cos (real_of_float x) \<in> {real_of_float (lb_sin_cos_aux prec (get_even n) 1 1 (x * x))..real_of_float (ub_sin_cos_aux prec (get_odd n) 1 1 (x * x))}
2. real_of_float x \<noteq> 0 \<Longrightarrow> cos (real_of_float x) \<in> {real_of_float (lb_sin_cos_aux prec (get_even n) 1 1 (x * x))..real_of_float (ub_sin_cos_aux prec (get_odd n) 1 1 (x * x))}
[PROOF STEP]
have "t \<le> pi / 2"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. t \<le> pi / 2
[PROOF STEP]
using \<open>t < real_of_float x\<close> and \<open>x \<le> pi / 2\<close>
[PROOF STATE]
proof (prove)
using this:
t < real_of_float x
real_of_float x \<le> pi / 2
goal (1 subgoal):
1. t \<le> pi / 2
[PROOF STEP]
by auto
[PROOF STATE]
proof (state)
this:
t \<le> pi / 2
goal (2 subgoals):
1. real_of_float x = 0 \<Longrightarrow> cos (real_of_float x) \<in> {real_of_float (lb_sin_cos_aux prec (get_even n) 1 1 (x * x))..real_of_float (ub_sin_cos_aux prec (get_odd n) 1 1 (x * x))}
2. real_of_float x \<noteq> 0 \<Longrightarrow> cos (real_of_float x) \<in> {real_of_float (lb_sin_cos_aux prec (get_even n) 1 1 (x * x))..real_of_float (ub_sin_cos_aux prec (get_odd n) 1 1 (x * x))}
[PROOF STEP]
hence "0 \<le> cos t"
[PROOF STATE]
proof (prove)
using this:
t \<le> pi / 2
goal (1 subgoal):
1. 0 \<le> cos t
[PROOF STEP]
using \<open>0 < t\<close> and cos_ge_zero
[PROOF STATE]
proof (prove)
using this:
t \<le> pi / 2
0 < t
\<lbrakk>- (pi / 2) \<le> ?x; ?x \<le> pi / 2\<rbrakk> \<Longrightarrow> 0 \<le> cos ?x
goal (1 subgoal):
1. 0 \<le> cos t
[PROOF STEP]
by auto
[PROOF STATE]
proof (state)
this:
0 \<le> cos t
goal (2 subgoals):
1. real_of_float x = 0 \<Longrightarrow> cos (real_of_float x) \<in> {real_of_float (lb_sin_cos_aux prec (get_even n) 1 1 (x * x))..real_of_float (ub_sin_cos_aux prec (get_odd n) 1 1 (x * x))}
2. real_of_float x \<noteq> 0 \<Longrightarrow> cos (real_of_float x) \<in> {real_of_float (lb_sin_cos_aux prec (get_even n) 1 1 (x * x))..real_of_float (ub_sin_cos_aux prec (get_odd n) 1 1 (x * x))}
[PROOF STEP]
ultimately
[PROOF STATE]
proof (chain)
picking this:
cos t * (- 1) ^ n = cos (t + 1 / 2 * real (2 * n) * pi)
0 \<le> cos t
[PROOF STEP]
have even: "even n \<Longrightarrow> 0 \<le> ?rest" and odd: "odd n \<Longrightarrow> 0 \<le> - ?rest "
[PROOF STATE]
proof (prove)
using this:
cos t * (- 1) ^ n = cos (t + 1 / 2 * real (2 * n) * pi)
0 \<le> cos t
goal (1 subgoal):
1. (even n \<Longrightarrow> 0 \<le> cos (t + 1 / 2 * real (2 * n) * pi)) &&& (odd n \<Longrightarrow> 0 \<le> - cos (t + 1 / 2 * real (2 * n) * pi))
[PROOF STEP]
by auto
[PROOF STATE]
proof (state)
this:
even n \<Longrightarrow> 0 \<le> cos (t + 1 / 2 * real (2 * n) * pi)
odd n \<Longrightarrow> 0 \<le> - cos (t + 1 / 2 * real (2 * n) * pi)
goal (2 subgoals):
1. real_of_float x = 0 \<Longrightarrow> cos (real_of_float x) \<in> {real_of_float (lb_sin_cos_aux prec (get_even n) 1 1 (x * x))..real_of_float (ub_sin_cos_aux prec (get_odd n) 1 1 (x * x))}
2. real_of_float x \<noteq> 0 \<Longrightarrow> cos (real_of_float x) \<in> {real_of_float (lb_sin_cos_aux prec (get_even n) 1 1 (x * x))..real_of_float (ub_sin_cos_aux prec (get_odd n) 1 1 (x * x))}
[PROOF STEP]
have "0 < ?fact"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. 0 < fact (2 * n)
[PROOF STEP]
by auto
[PROOF STATE]
proof (state)
this:
0 < fact (2 * n)
goal (2 subgoals):
1. real_of_float x = 0 \<Longrightarrow> cos (real_of_float x) \<in> {real_of_float (lb_sin_cos_aux prec (get_even n) 1 1 (x * x))..real_of_float (ub_sin_cos_aux prec (get_odd n) 1 1 (x * x))}
2. real_of_float x \<noteq> 0 \<Longrightarrow> cos (real_of_float x) \<in> {real_of_float (lb_sin_cos_aux prec (get_even n) 1 1 (x * x))..real_of_float (ub_sin_cos_aux prec (get_odd n) 1 1 (x * x))}
[PROOF STEP]
have "0 < ?pow"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. 0 < real_of_float x ^ (2 * n)
[PROOF STEP]
using \<open>0 < real_of_float x\<close>
[PROOF STATE]
proof (prove)
using this:
0 < real_of_float x
goal (1 subgoal):
1. 0 < real_of_float x ^ (2 * n)
[PROOF STEP]
by auto
[PROOF STATE]
proof (state)
this:
0 < real_of_float x ^ (2 * n)
goal (2 subgoals):
1. real_of_float x = 0 \<Longrightarrow> cos (real_of_float x) \<in> {real_of_float (lb_sin_cos_aux prec (get_even n) 1 1 (x * x))..real_of_float (ub_sin_cos_aux prec (get_odd n) 1 1 (x * x))}
2. real_of_float x \<noteq> 0 \<Longrightarrow> cos (real_of_float x) \<in> {real_of_float (lb_sin_cos_aux prec (get_even n) 1 1 (x * x))..real_of_float (ub_sin_cos_aux prec (get_odd n) 1 1 (x * x))}
[PROOF STEP]
{
[PROOF STATE]
proof (state)
this:
0 < real_of_float x ^ (2 * n)
goal (2 subgoals):
1. real_of_float x = 0 \<Longrightarrow> cos (real_of_float x) \<in> {real_of_float (lb_sin_cos_aux prec (get_even n) 1 1 (x * x))..real_of_float (ub_sin_cos_aux prec (get_odd n) 1 1 (x * x))}
2. real_of_float x \<noteq> 0 \<Longrightarrow> cos (real_of_float x) \<in> {real_of_float (lb_sin_cos_aux prec (get_even n) 1 1 (x * x))..real_of_float (ub_sin_cos_aux prec (get_odd n) 1 1 (x * x))}
[PROOF STEP]
assume "even n"
[PROOF STATE]
proof (state)
this:
even n
goal (2 subgoals):
1. real_of_float x = 0 \<Longrightarrow> cos (real_of_float x) \<in> {real_of_float (lb_sin_cos_aux prec (get_even n) 1 1 (x * x))..real_of_float (ub_sin_cos_aux prec (get_odd n) 1 1 (x * x))}
2. real_of_float x \<noteq> 0 \<Longrightarrow> cos (real_of_float x) \<in> {real_of_float (lb_sin_cos_aux prec (get_even n) 1 1 (x * x))..real_of_float (ub_sin_cos_aux prec (get_odd n) 1 1 (x * x))}
[PROOF STEP]
have "(lb_sin_cos_aux prec n 1 1 (x * x)) \<le> ?SUM"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. real_of_float (lb_sin_cos_aux prec n 1 1 (x * x)) \<le> (\<Sum>i = 0..<2 * n. (if even i then (- 1) ^ (i div 2) / fact i else 0) * real_of_float x ^ i)
[PROOF STEP]
unfolding morph_to_if_power[symmetric]
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. real_of_float (lb_sin_cos_aux prec n 1 1 (x * x)) \<le> (\<Sum>i = 0..<n. (- 1) ^ i * (1 / fact (2 * i)) * real_of_float x ^ (2 * i))
[PROOF STEP]
using cos_aux
[PROOF STATE]
proof (prove)
using this:
real_of_float (lb_sin_cos_aux ?prec ?n 1 1 (?x * ?x)) \<le> (\<Sum>i = 0..<?n. (- 1) ^ i * (1 / fact (2 * i)) * real_of_float ?x ^ (2 * i))
(\<Sum>i = 0..<?n. (- 1) ^ i * (1 / fact (2 * i)) * real_of_float ?x ^ (2 * i)) \<le> real_of_float (ub_sin_cos_aux ?prec ?n 1 1 (?x * ?x))
goal (1 subgoal):
1. real_of_float (lb_sin_cos_aux prec n 1 1 (x * x)) \<le> (\<Sum>i = 0..<n. (- 1) ^ i * (1 / fact (2 * i)) * real_of_float x ^ (2 * i))
[PROOF STEP]
by auto
[PROOF STATE]
proof (state)
this:
real_of_float (lb_sin_cos_aux prec n 1 1 (x * x)) \<le> (\<Sum>i = 0..<2 * n. (if even i then (- 1) ^ (i div 2) / fact i else 0) * real_of_float x ^ i)
goal (2 subgoals):
1. real_of_float x = 0 \<Longrightarrow> cos (real_of_float x) \<in> {real_of_float (lb_sin_cos_aux prec (get_even n) 1 1 (x * x))..real_of_float (ub_sin_cos_aux prec (get_odd n) 1 1 (x * x))}
2. real_of_float x \<noteq> 0 \<Longrightarrow> cos (real_of_float x) \<in> {real_of_float (lb_sin_cos_aux prec (get_even n) 1 1 (x * x))..real_of_float (ub_sin_cos_aux prec (get_odd n) 1 1 (x * x))}
[PROOF STEP]
also
[PROOF STATE]
proof (state)
this:
real_of_float (lb_sin_cos_aux prec n 1 1 (x * x)) \<le> (\<Sum>i = 0..<2 * n. (if even i then (- 1) ^ (i div 2) / fact i else 0) * real_of_float x ^ i)
goal (2 subgoals):
1. real_of_float x = 0 \<Longrightarrow> cos (real_of_float x) \<in> {real_of_float (lb_sin_cos_aux prec (get_even n) 1 1 (x * x))..real_of_float (ub_sin_cos_aux prec (get_odd n) 1 1 (x * x))}
2. real_of_float x \<noteq> 0 \<Longrightarrow> cos (real_of_float x) \<in> {real_of_float (lb_sin_cos_aux prec (get_even n) 1 1 (x * x))..real_of_float (ub_sin_cos_aux prec (get_odd n) 1 1 (x * x))}
[PROOF STEP]
have "\<dots> \<le> cos x"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. (\<Sum>i = 0..<2 * n. (if even i then (- 1) ^ (i div 2) / fact i else 0) * real_of_float x ^ i) \<le> cos (real_of_float x)
[PROOF STEP]
proof -
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. (\<Sum>i = 0..<2 * n. (if even i then (- 1) ^ (i div 2) / fact i else 0) * real_of_float x ^ i) \<le> cos (real_of_float x)
[PROOF STEP]
from even[OF \<open>even n\<close>] \<open>0 < ?fact\<close> \<open>0 < ?pow\<close>
[PROOF STATE]
proof (chain)
picking this:
0 \<le> cos (t + 1 / 2 * real (2 * n) * pi)
0 < fact (2 * n)
0 < real_of_float x ^ (2 * n)
[PROOF STEP]
have "0 \<le> (?rest / ?fact) * ?pow"
[PROOF STATE]
proof (prove)
using this:
0 \<le> cos (t + 1 / 2 * real (2 * n) * pi)
0 < fact (2 * n)
0 < real_of_float x ^ (2 * n)
goal (1 subgoal):
1. 0 \<le> cos (t + 1 / 2 * real (2 * n) * pi) / fact (2 * n) * real_of_float x ^ (2 * n)
[PROOF STEP]
by simp
[PROOF STATE]
proof (state)
this:
0 \<le> cos (t + 1 / 2 * real (2 * n) * pi) / fact (2 * n) * real_of_float x ^ (2 * n)
goal (1 subgoal):
1. (\<Sum>i = 0..<2 * n. (if even i then (- 1) ^ (i div 2) / fact i else 0) * real_of_float x ^ i) \<le> cos (real_of_float x)
[PROOF STEP]
thus ?thesis
[PROOF STATE]
proof (prove)
using this:
0 \<le> cos (t + 1 / 2 * real (2 * n) * pi) / fact (2 * n) * real_of_float x ^ (2 * n)
goal (1 subgoal):
1. (\<Sum>i = 0..<2 * n. (if even i then (- 1) ^ (i div 2) / fact i else 0) * real_of_float x ^ i) \<le> cos (real_of_float x)
[PROOF STEP]
unfolding cos_eq
[PROOF STATE]
proof (prove)
using this:
0 \<le> cos (t + 1 / 2 * real (2 * n) * pi) / fact (2 * n) * real_of_float x ^ (2 * n)
goal (1 subgoal):
1. (\<Sum>i = 0..<2 * n. (if even i then (- 1) ^ (i div 2) / fact i else 0) * real_of_float x ^ i) \<le> (\<Sum>i = 0..<2 * n. (if even i then (- 1) ^ (i div 2) / fact i else 0) * real_of_float x ^ i) + cos (t + 1 / 2 * real (2 * n) * pi) / fact (2 * n) * real_of_float x ^ (2 * n)
[PROOF STEP]
by auto
[PROOF STATE]
proof (state)
this:
(\<Sum>i = 0..<2 * n. (if even i then (- 1) ^ (i div 2) / fact i else 0) * real_of_float x ^ i) \<le> cos (real_of_float x)
goal:
No subgoals!
[PROOF STEP]
qed
[PROOF STATE]
proof (state)
this:
(\<Sum>i = 0..<2 * n. (if even i then (- 1) ^ (i div 2) / fact i else 0) * real_of_float x ^ i) \<le> cos (real_of_float x)
goal (2 subgoals):
1. real_of_float x = 0 \<Longrightarrow> cos (real_of_float x) \<in> {real_of_float (lb_sin_cos_aux prec (get_even n) 1 1 (x * x))..real_of_float (ub_sin_cos_aux prec (get_odd n) 1 1 (x * x))}
2. real_of_float x \<noteq> 0 \<Longrightarrow> cos (real_of_float x) \<in> {real_of_float (lb_sin_cos_aux prec (get_even n) 1 1 (x * x))..real_of_float (ub_sin_cos_aux prec (get_odd n) 1 1 (x * x))}
[PROOF STEP]
finally
[PROOF STATE]
proof (chain)
picking this:
real_of_float (lb_sin_cos_aux prec n 1 1 (x * x)) \<le> cos (real_of_float x)
[PROOF STEP]
have "(lb_sin_cos_aux prec n 1 1 (x * x)) \<le> cos x"
[PROOF STATE]
proof (prove)
using this:
real_of_float (lb_sin_cos_aux prec n 1 1 (x * x)) \<le> cos (real_of_float x)
goal (1 subgoal):
1. real_of_float (lb_sin_cos_aux prec n 1 1 (x * x)) \<le> cos (real_of_float x)
[PROOF STEP]
.
[PROOF STATE]
proof (state)
this:
real_of_float (lb_sin_cos_aux prec n 1 1 (x * x)) \<le> cos (real_of_float x)
goal (2 subgoals):
1. real_of_float x = 0 \<Longrightarrow> cos (real_of_float x) \<in> {real_of_float (lb_sin_cos_aux prec (get_even n) 1 1 (x * x))..real_of_float (ub_sin_cos_aux prec (get_odd n) 1 1 (x * x))}
2. real_of_float x \<noteq> 0 \<Longrightarrow> cos (real_of_float x) \<in> {real_of_float (lb_sin_cos_aux prec (get_even n) 1 1 (x * x))..real_of_float (ub_sin_cos_aux prec (get_odd n) 1 1 (x * x))}
[PROOF STEP]
}
[PROOF STATE]
proof (state)
this:
even n \<Longrightarrow> real_of_float (lb_sin_cos_aux prec n 1 1 (x * x)) \<le> cos (real_of_float x)
goal (2 subgoals):
1. real_of_float x = 0 \<Longrightarrow> cos (real_of_float x) \<in> {real_of_float (lb_sin_cos_aux prec (get_even n) 1 1 (x * x))..real_of_float (ub_sin_cos_aux prec (get_odd n) 1 1 (x * x))}
2. real_of_float x \<noteq> 0 \<Longrightarrow> cos (real_of_float x) \<in> {real_of_float (lb_sin_cos_aux prec (get_even n) 1 1 (x * x))..real_of_float (ub_sin_cos_aux prec (get_odd n) 1 1 (x * x))}
[PROOF STEP]
note lb = this
[PROOF STATE]
proof (state)
this:
even n \<Longrightarrow> real_of_float (lb_sin_cos_aux prec n 1 1 (x * x)) \<le> cos (real_of_float x)
goal (2 subgoals):
1. real_of_float x = 0 \<Longrightarrow> cos (real_of_float x) \<in> {real_of_float (lb_sin_cos_aux prec (get_even n) 1 1 (x * x))..real_of_float (ub_sin_cos_aux prec (get_odd n) 1 1 (x * x))}
2. real_of_float x \<noteq> 0 \<Longrightarrow> cos (real_of_float x) \<in> {real_of_float (lb_sin_cos_aux prec (get_even n) 1 1 (x * x))..real_of_float (ub_sin_cos_aux prec (get_odd n) 1 1 (x * x))}
[PROOF STEP]
{
[PROOF STATE]
proof (state)
this:
even n \<Longrightarrow> real_of_float (lb_sin_cos_aux prec n 1 1 (x * x)) \<le> cos (real_of_float x)
goal (2 subgoals):
1. real_of_float x = 0 \<Longrightarrow> cos (real_of_float x) \<in> {real_of_float (lb_sin_cos_aux prec (get_even n) 1 1 (x * x))..real_of_float (ub_sin_cos_aux prec (get_odd n) 1 1 (x * x))}
2. real_of_float x \<noteq> 0 \<Longrightarrow> cos (real_of_float x) \<in> {real_of_float (lb_sin_cos_aux prec (get_even n) 1 1 (x * x))..real_of_float (ub_sin_cos_aux prec (get_odd n) 1 1 (x * x))}
[PROOF STEP]
assume "odd n"
[PROOF STATE]
proof (state)
this:
odd n
goal (2 subgoals):
1. real_of_float x = 0 \<Longrightarrow> cos (real_of_float x) \<in> {real_of_float (lb_sin_cos_aux prec (get_even n) 1 1 (x * x))..real_of_float (ub_sin_cos_aux prec (get_odd n) 1 1 (x * x))}
2. real_of_float x \<noteq> 0 \<Longrightarrow> cos (real_of_float x) \<in> {real_of_float (lb_sin_cos_aux prec (get_even n) 1 1 (x * x))..real_of_float (ub_sin_cos_aux prec (get_odd n) 1 1 (x * x))}
[PROOF STEP]
have "cos x \<le> ?SUM"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. cos (real_of_float x) \<le> (\<Sum>i = 0..<2 * n. (if even i then (- 1) ^ (i div 2) / fact i else 0) * real_of_float x ^ i)
[PROOF STEP]
proof -
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. cos (real_of_float x) \<le> (\<Sum>i = 0..<2 * n. (if even i then (- 1) ^ (i div 2) / fact i else 0) * real_of_float x ^ i)
[PROOF STEP]
from \<open>0 < ?fact\<close> and \<open>0 < ?pow\<close> and odd[OF \<open>odd n\<close>]
[PROOF STATE]
proof (chain)
picking this:
0 < fact (2 * n)
0 < real_of_float x ^ (2 * n)
0 \<le> - cos (t + 1 / 2 * real (2 * n) * pi)
[PROOF STEP]
have "0 \<le> (- ?rest) / ?fact * ?pow"
[PROOF STATE]
proof (prove)
using this:
0 < fact (2 * n)
0 < real_of_float x ^ (2 * n)
0 \<le> - cos (t + 1 / 2 * real (2 * n) * pi)
goal (1 subgoal):
1. 0 \<le> - cos (t + 1 / 2 * real (2 * n) * pi) / fact (2 * n) * real_of_float x ^ (2 * n)
[PROOF STEP]
by (metis mult_nonneg_nonneg divide_nonneg_pos less_imp_le)
[PROOF STATE]
proof (state)
this:
0 \<le> - cos (t + 1 / 2 * real (2 * n) * pi) / fact (2 * n) * real_of_float x ^ (2 * n)
goal (1 subgoal):
1. cos (real_of_float x) \<le> (\<Sum>i = 0..<2 * n. (if even i then (- 1) ^ (i div 2) / fact i else 0) * real_of_float x ^ i)
[PROOF STEP]
thus ?thesis
[PROOF STATE]
proof (prove)
using this:
0 \<le> - cos (t + 1 / 2 * real (2 * n) * pi) / fact (2 * n) * real_of_float x ^ (2 * n)
goal (1 subgoal):
1. cos (real_of_float x) \<le> (\<Sum>i = 0..<2 * n. (if even i then (- 1) ^ (i div 2) / fact i else 0) * real_of_float x ^ i)
[PROOF STEP]
unfolding cos_eq
[PROOF STATE]
proof (prove)
using this:
0 \<le> - cos (t + 1 / 2 * real (2 * n) * pi) / fact (2 * n) * real_of_float x ^ (2 * n)
goal (1 subgoal):
1. (\<Sum>i = 0..<2 * n. (if even i then (- 1) ^ (i div 2) / fact i else 0) * real_of_float x ^ i) + cos (t + 1 / 2 * real (2 * n) * pi) / fact (2 * n) * real_of_float x ^ (2 * n) \<le> (\<Sum>i = 0..<2 * n. (if even i then (- 1) ^ (i div 2) / fact i else 0) * real_of_float x ^ i)
[PROOF STEP]
by auto
[PROOF STATE]
proof (state)
this:
cos (real_of_float x) \<le> (\<Sum>i = 0..<2 * n. (if even i then (- 1) ^ (i div 2) / fact i else 0) * real_of_float x ^ i)
goal:
No subgoals!
[PROOF STEP]
qed
[PROOF STATE]
proof (state)
this:
cos (real_of_float x) \<le> (\<Sum>i = 0..<2 * n. (if even i then (- 1) ^ (i div 2) / fact i else 0) * real_of_float x ^ i)
goal (2 subgoals):
1. real_of_float x = 0 \<Longrightarrow> cos (real_of_float x) \<in> {real_of_float (lb_sin_cos_aux prec (get_even n) 1 1 (x * x))..real_of_float (ub_sin_cos_aux prec (get_odd n) 1 1 (x * x))}
2. real_of_float x \<noteq> 0 \<Longrightarrow> cos (real_of_float x) \<in> {real_of_float (lb_sin_cos_aux prec (get_even n) 1 1 (x * x))..real_of_float (ub_sin_cos_aux prec (get_odd n) 1 1 (x * x))}
[PROOF STEP]
also
[PROOF STATE]
proof (state)
this:
cos (real_of_float x) \<le> (\<Sum>i = 0..<2 * n. (if even i then (- 1) ^ (i div 2) / fact i else 0) * real_of_float x ^ i)
goal (2 subgoals):
1. real_of_float x = 0 \<Longrightarrow> cos (real_of_float x) \<in> {real_of_float (lb_sin_cos_aux prec (get_even n) 1 1 (x * x))..real_of_float (ub_sin_cos_aux prec (get_odd n) 1 1 (x * x))}
2. real_of_float x \<noteq> 0 \<Longrightarrow> cos (real_of_float x) \<in> {real_of_float (lb_sin_cos_aux prec (get_even n) 1 1 (x * x))..real_of_float (ub_sin_cos_aux prec (get_odd n) 1 1 (x * x))}
[PROOF STEP]
have "\<dots> \<le> (ub_sin_cos_aux prec n 1 1 (x * x))"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. (\<Sum>i = 0..<2 * n. (if even i then (- 1) ^ (i div 2) / fact i else 0) * real_of_float x ^ i) \<le> real_of_float (ub_sin_cos_aux prec n 1 1 (x * x))
[PROOF STEP]
unfolding morph_to_if_power[symmetric]
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. (\<Sum>i = 0..<n. (- 1) ^ i * (1 / fact (2 * i)) * real_of_float x ^ (2 * i)) \<le> real_of_float (ub_sin_cos_aux prec n 1 1 (x * x))
[PROOF STEP]
using cos_aux
[PROOF STATE]
proof (prove)
using this:
real_of_float (lb_sin_cos_aux ?prec ?n 1 1 (?x * ?x)) \<le> (\<Sum>i = 0..<?n. (- 1) ^ i * (1 / fact (2 * i)) * real_of_float ?x ^ (2 * i))
(\<Sum>i = 0..<?n. (- 1) ^ i * (1 / fact (2 * i)) * real_of_float ?x ^ (2 * i)) \<le> real_of_float (ub_sin_cos_aux ?prec ?n 1 1 (?x * ?x))
goal (1 subgoal):
1. (\<Sum>i = 0..<n. (- 1) ^ i * (1 / fact (2 * i)) * real_of_float x ^ (2 * i)) \<le> real_of_float (ub_sin_cos_aux prec n 1 1 (x * x))
[PROOF STEP]
by auto
[PROOF STATE]
proof (state)
this:
(\<Sum>i = 0..<2 * n. (if even i then (- 1) ^ (i div 2) / fact i else 0) * real_of_float x ^ i) \<le> real_of_float (ub_sin_cos_aux prec n 1 1 (x * x))
goal (2 subgoals):
1. real_of_float x = 0 \<Longrightarrow> cos (real_of_float x) \<in> {real_of_float (lb_sin_cos_aux prec (get_even n) 1 1 (x * x))..real_of_float (ub_sin_cos_aux prec (get_odd n) 1 1 (x * x))}
2. real_of_float x \<noteq> 0 \<Longrightarrow> cos (real_of_float x) \<in> {real_of_float (lb_sin_cos_aux prec (get_even n) 1 1 (x * x))..real_of_float (ub_sin_cos_aux prec (get_odd n) 1 1 (x * x))}
[PROOF STEP]
finally
[PROOF STATE]
proof (chain)
picking this:
cos (real_of_float x) \<le> real_of_float (ub_sin_cos_aux prec n 1 1 (x * x))
[PROOF STEP]
have "cos x \<le> (ub_sin_cos_aux prec n 1 1 (x * x))"
[PROOF STATE]
proof (prove)
using this:
cos (real_of_float x) \<le> real_of_float (ub_sin_cos_aux prec n 1 1 (x * x))
goal (1 subgoal):
1. cos (real_of_float x) \<le> real_of_float (ub_sin_cos_aux prec n 1 1 (x * x))
[PROOF STEP]
.
[PROOF STATE]
proof (state)
this:
cos (real_of_float x) \<le> real_of_float (ub_sin_cos_aux prec n 1 1 (x * x))
goal (2 subgoals):
1. real_of_float x = 0 \<Longrightarrow> cos (real_of_float x) \<in> {real_of_float (lb_sin_cos_aux prec (get_even n) 1 1 (x * x))..real_of_float (ub_sin_cos_aux prec (get_odd n) 1 1 (x * x))}
2. real_of_float x \<noteq> 0 \<Longrightarrow> cos (real_of_float x) \<in> {real_of_float (lb_sin_cos_aux prec (get_even n) 1 1 (x * x))..real_of_float (ub_sin_cos_aux prec (get_odd n) 1 1 (x * x))}
[PROOF STEP]
}
[PROOF STATE]
proof (state)
this:
odd n \<Longrightarrow> cos (real_of_float x) \<le> real_of_float (ub_sin_cos_aux prec n 1 1 (x * x))
goal (2 subgoals):
1. real_of_float x = 0 \<Longrightarrow> cos (real_of_float x) \<in> {real_of_float (lb_sin_cos_aux prec (get_even n) 1 1 (x * x))..real_of_float (ub_sin_cos_aux prec (get_odd n) 1 1 (x * x))}
2. real_of_float x \<noteq> 0 \<Longrightarrow> cos (real_of_float x) \<in> {real_of_float (lb_sin_cos_aux prec (get_even n) 1 1 (x * x))..real_of_float (ub_sin_cos_aux prec (get_odd n) 1 1 (x * x))}
[PROOF STEP]
note ub = this and lb
[PROOF STATE]
proof (state)
this:
odd n \<Longrightarrow> cos (real_of_float x) \<le> real_of_float (ub_sin_cos_aux prec n 1 1 (x * x))
even n \<Longrightarrow> real_of_float (lb_sin_cos_aux prec n 1 1 (x * x)) \<le> cos (real_of_float x)
goal (2 subgoals):
1. real_of_float x = 0 \<Longrightarrow> cos (real_of_float x) \<in> {real_of_float (lb_sin_cos_aux prec (get_even n) 1 1 (x * x))..real_of_float (ub_sin_cos_aux prec (get_odd n) 1 1 (x * x))}
2. real_of_float x \<noteq> 0 \<Longrightarrow> cos (real_of_float x) \<in> {real_of_float (lb_sin_cos_aux prec (get_even n) 1 1 (x * x))..real_of_float (ub_sin_cos_aux prec (get_odd n) 1 1 (x * x))}
[PROOF STEP]
}
[PROOF STATE]
proof (state)
this:
\<lbrakk>0 < ?na2; odd ?na2\<rbrakk> \<Longrightarrow> cos (real_of_float x) \<le> real_of_float (ub_sin_cos_aux prec ?na2 1 1 (x * x))
\<lbrakk>0 < ?na2; even ?na2\<rbrakk> \<Longrightarrow> real_of_float (lb_sin_cos_aux prec ?na2 1 1 (x * x)) \<le> cos (real_of_float x)
goal (2 subgoals):
1. real_of_float x = 0 \<Longrightarrow> cos (real_of_float x) \<in> {real_of_float (lb_sin_cos_aux prec (get_even n) 1 1 (x * x))..real_of_float (ub_sin_cos_aux prec (get_odd n) 1 1 (x * x))}
2. real_of_float x \<noteq> 0 \<Longrightarrow> cos (real_of_float x) \<in> {real_of_float (lb_sin_cos_aux prec (get_even n) 1 1 (x * x))..real_of_float (ub_sin_cos_aux prec (get_odd n) 1 1 (x * x))}
[PROOF STEP]
note ub = this(1) and lb = this(2)
[PROOF STATE]
proof (state)
this:
\<lbrakk>0 < ?na2; odd ?na2\<rbrakk> \<Longrightarrow> cos (real_of_float x) \<le> real_of_float (ub_sin_cos_aux prec ?na2 1 1 (x * x))
\<lbrakk>0 < ?na2; even ?na2\<rbrakk> \<Longrightarrow> real_of_float (lb_sin_cos_aux prec ?na2 1 1 (x * x)) \<le> cos (real_of_float x)
goal (2 subgoals):
1. real_of_float x = 0 \<Longrightarrow> cos (real_of_float x) \<in> {real_of_float (lb_sin_cos_aux prec (get_even n) 1 1 (x * x))..real_of_float (ub_sin_cos_aux prec (get_odd n) 1 1 (x * x))}
2. real_of_float x \<noteq> 0 \<Longrightarrow> cos (real_of_float x) \<in> {real_of_float (lb_sin_cos_aux prec (get_even n) 1 1 (x * x))..real_of_float (ub_sin_cos_aux prec (get_odd n) 1 1 (x * x))}
[PROOF STEP]
have "cos x \<le> (ub_sin_cos_aux prec (get_odd n) 1 1 (x * x))"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. cos (real_of_float x) \<le> real_of_float (ub_sin_cos_aux prec (get_odd n) 1 1 (x * x))
[PROOF STEP]
using ub[OF odd_pos[OF get_odd] get_odd]
[PROOF STATE]
proof (prove)
using this:
cos (real_of_float x) \<le> real_of_float (ub_sin_cos_aux prec (get_odd ?n3) 1 1 (x * x))
goal (1 subgoal):
1. cos (real_of_float x) \<le> real_of_float (ub_sin_cos_aux prec (get_odd n) 1 1 (x * x))
[PROOF STEP]
.
[PROOF STATE]
proof (state)
this:
cos (real_of_float x) \<le> real_of_float (ub_sin_cos_aux prec (get_odd n) 1 1 (x * x))
goal (2 subgoals):
1. real_of_float x = 0 \<Longrightarrow> cos (real_of_float x) \<in> {real_of_float (lb_sin_cos_aux prec (get_even n) 1 1 (x * x))..real_of_float (ub_sin_cos_aux prec (get_odd n) 1 1 (x * x))}
2. real_of_float x \<noteq> 0 \<Longrightarrow> cos (real_of_float x) \<in> {real_of_float (lb_sin_cos_aux prec (get_even n) 1 1 (x * x))..real_of_float (ub_sin_cos_aux prec (get_odd n) 1 1 (x * x))}
[PROOF STEP]
moreover
[PROOF STATE]
proof (state)
this:
cos (real_of_float x) \<le> real_of_float (ub_sin_cos_aux prec (get_odd n) 1 1 (x * x))
goal (2 subgoals):
1. real_of_float x = 0 \<Longrightarrow> cos (real_of_float x) \<in> {real_of_float (lb_sin_cos_aux prec (get_even n) 1 1 (x * x))..real_of_float (ub_sin_cos_aux prec (get_odd n) 1 1 (x * x))}
2. real_of_float x \<noteq> 0 \<Longrightarrow> cos (real_of_float x) \<in> {real_of_float (lb_sin_cos_aux prec (get_even n) 1 1 (x * x))..real_of_float (ub_sin_cos_aux prec (get_odd n) 1 1 (x * x))}
[PROOF STEP]
have "(lb_sin_cos_aux prec (get_even n) 1 1 (x * x)) \<le> cos x"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. real_of_float (lb_sin_cos_aux prec (get_even n) 1 1 (x * x)) \<le> cos (real_of_float x)
[PROOF STEP]
proof (cases "0 < get_even n")
[PROOF STATE]
proof (state)
goal (2 subgoals):
1. 0 < get_even n \<Longrightarrow> real_of_float (lb_sin_cos_aux prec (get_even n) 1 1 (x * x)) \<le> cos (real_of_float x)
2. \<not> 0 < get_even n \<Longrightarrow> real_of_float (lb_sin_cos_aux prec (get_even n) 1 1 (x * x)) \<le> cos (real_of_float x)
[PROOF STEP]
case True
[PROOF STATE]
proof (state)
this:
0 < get_even n
goal (2 subgoals):
1. 0 < get_even n \<Longrightarrow> real_of_float (lb_sin_cos_aux prec (get_even n) 1 1 (x * x)) \<le> cos (real_of_float x)
2. \<not> 0 < get_even n \<Longrightarrow> real_of_float (lb_sin_cos_aux prec (get_even n) 1 1 (x * x)) \<le> cos (real_of_float x)
[PROOF STEP]
show ?thesis
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. real_of_float (lb_sin_cos_aux prec (get_even n) 1 1 (x * x)) \<le> cos (real_of_float x)
[PROOF STEP]
using lb[OF True get_even]
[PROOF STATE]
proof (prove)
using this:
real_of_float (lb_sin_cos_aux prec (get_even n) 1 1 (x * x)) \<le> cos (real_of_float x)
goal (1 subgoal):
1. real_of_float (lb_sin_cos_aux prec (get_even n) 1 1 (x * x)) \<le> cos (real_of_float x)
[PROOF STEP]
.
[PROOF STATE]
proof (state)
this:
real_of_float (lb_sin_cos_aux prec (get_even n) 1 1 (x * x)) \<le> cos (real_of_float x)
goal (1 subgoal):
1. \<not> 0 < get_even n \<Longrightarrow> real_of_float (lb_sin_cos_aux prec (get_even n) 1 1 (x * x)) \<le> cos (real_of_float x)
[PROOF STEP]
next
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. \<not> 0 < get_even n \<Longrightarrow> real_of_float (lb_sin_cos_aux prec (get_even n) 1 1 (x * x)) \<le> cos (real_of_float x)
[PROOF STEP]
case False
[PROOF STATE]
proof (state)
this:
\<not> 0 < get_even n
goal (1 subgoal):
1. \<not> 0 < get_even n \<Longrightarrow> real_of_float (lb_sin_cos_aux prec (get_even n) 1 1 (x * x)) \<le> cos (real_of_float x)
[PROOF STEP]
hence "get_even n = 0"
[PROOF STATE]
proof (prove)
using this:
\<not> 0 < get_even n
goal (1 subgoal):
1. get_even n = 0
[PROOF STEP]
by auto
[PROOF STATE]
proof (state)
this:
get_even n = 0
goal (1 subgoal):
1. \<not> 0 < get_even n \<Longrightarrow> real_of_float (lb_sin_cos_aux prec (get_even n) 1 1 (x * x)) \<le> cos (real_of_float x)
[PROOF STEP]
have "- (pi / 2) \<le> x"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. - (pi / 2) \<le> real_of_float x
[PROOF STEP]
by (rule order_trans[OF _ \<open>0 < real_of_float x\<close>[THEN less_imp_le]]) auto
[PROOF STATE]
proof (state)
this:
- (pi / 2) \<le> real_of_float x
goal (1 subgoal):
1. \<not> 0 < get_even n \<Longrightarrow> real_of_float (lb_sin_cos_aux prec (get_even n) 1 1 (x * x)) \<le> cos (real_of_float x)
[PROOF STEP]
with \<open>x \<le> pi / 2\<close>
[PROOF STATE]
proof (chain)
picking this:
real_of_float x \<le> pi / 2
- (pi / 2) \<le> real_of_float x
[PROOF STEP]
show ?thesis
[PROOF STATE]
proof (prove)
using this:
real_of_float x \<le> pi / 2
- (pi / 2) \<le> real_of_float x
goal (1 subgoal):
1. real_of_float (lb_sin_cos_aux prec (get_even n) 1 1 (x * x)) \<le> cos (real_of_float x)
[PROOF STEP]
unfolding \<open>get_even n = 0\<close> lb_sin_cos_aux.simps minus_float.rep_eq zero_float.rep_eq
[PROOF STATE]
proof (prove)
using this:
real_of_float x \<le> pi / 2
- (pi / 2) \<le> real_of_float x
goal (1 subgoal):
1. 0 \<le> cos (real_of_float x)
[PROOF STEP]
using cos_ge_zero
[PROOF STATE]
proof (prove)
using this:
real_of_float x \<le> pi / 2
- (pi / 2) \<le> real_of_float x
\<lbrakk>- (pi / 2) \<le> ?x; ?x \<le> pi / 2\<rbrakk> \<Longrightarrow> 0 \<le> cos ?x
goal (1 subgoal):
1. 0 \<le> cos (real_of_float x)
[PROOF STEP]
by auto
[PROOF STATE]
proof (state)
this:
real_of_float (lb_sin_cos_aux prec (get_even n) 1 1 (x * x)) \<le> cos (real_of_float x)
goal:
No subgoals!
[PROOF STEP]
qed
[PROOF STATE]
proof (state)
this:
real_of_float (lb_sin_cos_aux prec (get_even n) 1 1 (x * x)) \<le> cos (real_of_float x)
goal (2 subgoals):
1. real_of_float x = 0 \<Longrightarrow> cos (real_of_float x) \<in> {real_of_float (lb_sin_cos_aux prec (get_even n) 1 1 (x * x))..real_of_float (ub_sin_cos_aux prec (get_odd n) 1 1 (x * x))}
2. real_of_float x \<noteq> 0 \<Longrightarrow> cos (real_of_float x) \<in> {real_of_float (lb_sin_cos_aux prec (get_even n) 1 1 (x * x))..real_of_float (ub_sin_cos_aux prec (get_odd n) 1 1 (x * x))}
[PROOF STEP]
ultimately
[PROOF STATE]
proof (chain)
picking this:
cos (real_of_float x) \<le> real_of_float (ub_sin_cos_aux prec (get_odd n) 1 1 (x * x))
real_of_float (lb_sin_cos_aux prec (get_even n) 1 1 (x * x)) \<le> cos (real_of_float x)
[PROOF STEP]
show ?thesis
[PROOF STATE]
proof (prove)
using this:
cos (real_of_float x) \<le> real_of_float (ub_sin_cos_aux prec (get_odd n) 1 1 (x * x))
real_of_float (lb_sin_cos_aux prec (get_even n) 1 1 (x * x)) \<le> cos (real_of_float x)
goal (1 subgoal):
1. cos (real_of_float x) \<in> {real_of_float (lb_sin_cos_aux prec (get_even n) 1 1 (x * x))..real_of_float (ub_sin_cos_aux prec (get_odd n) 1 1 (x * x))}
[PROOF STEP]
by auto
[PROOF STATE]
proof (state)
this:
cos (real_of_float x) \<in> {real_of_float (lb_sin_cos_aux prec (get_even n) 1 1 (x * x))..real_of_float (ub_sin_cos_aux prec (get_odd n) 1 1 (x * x))}
goal (1 subgoal):
1. real_of_float x = 0 \<Longrightarrow> cos (real_of_float x) \<in> {real_of_float (lb_sin_cos_aux prec (get_even n) 1 1 (x * x))..real_of_float (ub_sin_cos_aux prec (get_odd n) 1 1 (x * x))}
[PROOF STEP]
next
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. real_of_float x = 0 \<Longrightarrow> cos (real_of_float x) \<in> {real_of_float (lb_sin_cos_aux prec (get_even n) 1 1 (x * x))..real_of_float (ub_sin_cos_aux prec (get_odd n) 1 1 (x * x))}
[PROOF STEP]
case True
[PROOF STATE]
proof (state)
this:
real_of_float x = 0
goal (1 subgoal):
1. real_of_float x = 0 \<Longrightarrow> cos (real_of_float x) \<in> {real_of_float (lb_sin_cos_aux prec (get_even n) 1 1 (x * x))..real_of_float (ub_sin_cos_aux prec (get_odd n) 1 1 (x * x))}
[PROOF STEP]
hence "x = 0"
[PROOF STATE]
proof (prove)
using this:
real_of_float x = 0
goal (1 subgoal):
1. x = 0
[PROOF STEP]
by (simp add: real_of_float_eq)
[PROOF STATE]
proof (state)
this:
x = 0
goal (1 subgoal):
1. real_of_float x = 0 \<Longrightarrow> cos (real_of_float x) \<in> {real_of_float (lb_sin_cos_aux prec (get_even n) 1 1 (x * x))..real_of_float (ub_sin_cos_aux prec (get_odd n) 1 1 (x * x))}
[PROOF STEP]
thus ?thesis
[PROOF STATE]
proof (prove)
using this:
x = 0
goal (1 subgoal):
1. cos (real_of_float x) \<in> {real_of_float (lb_sin_cos_aux prec (get_even n) 1 1 (x * x))..real_of_float (ub_sin_cos_aux prec (get_odd n) 1 1 (x * x))}
[PROOF STEP]
using lb_sin_cos_aux_zero_le_one one_le_ub_sin_cos_aux
[PROOF STATE]
proof (prove)
using this:
x = 0
lb_sin_cos_aux ?prec ?n ?i ?j 0 \<le> 1
odd ?n \<Longrightarrow> 1 \<le> ub_sin_cos_aux ?prec ?n ?i (Suc 0) 0
goal (1 subgoal):
1. cos (real_of_float x) \<in> {real_of_float (lb_sin_cos_aux prec (get_even n) 1 1 (x * x))..real_of_float (ub_sin_cos_aux prec (get_odd n) 1 1 (x * x))}
[PROOF STEP]
by simp
[PROOF STATE]
proof (state)
this:
cos (real_of_float x) \<in> {real_of_float (lb_sin_cos_aux prec (get_even n) 1 1 (x * x))..real_of_float (ub_sin_cos_aux prec (get_odd n) 1 1 (x * x))}
goal:
No subgoals!
[PROOF STEP]
qed |
\documentclass[12pt]{article}
\input{physics1}
\begin{document}
\noindent
Name: \rule[-1ex]{0.55\textwidth}{0.1pt}
NetID: \rule[-1ex]{0.2\textwidth}{0.1pt}
\section*{NYU Physics I---Term Exam 1}
\paragraph{\problemname~\theproblem:}\refstepcounter{problem}%
(From Problem Set 1, Problem~3)
Combine a mass $M$ and a length $h$ and an acceleration $g$ into
something that has units of time. You don't have to use all three
quantities if you don't need to.
\vfill
\paragraph{\problemname~\theproblem:}\refstepcounter{problem}%
(From Problem Set 2, Problem~3)
Make a plot of the velocity $v_y$ against time $t$ for a stone thrown upwards
at speed $v_y = +5\,\mps$. Use as the acceleration due to gravity $a_y = -10\,\mpss$.
Plot for the time interval $0<t<1\,\s$.
Label your axes with sufficient precision that I can check your numbers.
\vfill
\paragraph{\problemname~\theproblem:}\refstepcounter{problem}%
(From Lecture, 2018-09-11)
We spent time talking about two vectors, $\vec{v}_3$ and $\vec{v}_4$,
which were the velocities of the rock on a no-air-resistance trajectory.
What was wrong with this picture, that we drew?
\marginpar{\includegraphics[width=1in]{../jpg/wrong_vectors.png}}
\vfill
\clearpage
\paragraph{\problemname~\theproblem:}\refstepcounter{problem}%
(From Lecture, 2018-09-13)
A car is moving at constant speed $v$ along a horizontal, circular path
of radius $R$. Is there a non-zero net force on the car? Why?
\vfill
\paragraph{\problemname~\theproblem:}\refstepcounter{problem}%
(From Lecture, 2018-09-18)
We gave three arguments that $g\,\sin\theta$ was a good guess for the
acceleration of a block down an inclined plane. The first argument was that it
has the right units! What were the other two arguments? \emph{Hint:} They were limiting
cases!
\vfill
\paragraph{\problemname~\theproblem:}\refstepcounter{problem}%
(From recitation, week of 2018-09-10)
You made a table of times, accelerations, positions, and velocities.
If in the third row you had
\begin{equation}
t_3 = 0.3\,\s
\quad
a_3 = -10.0\,\mpss
\quad
v_3 = -2.0\,\mps
\quad
x_3 = 9.7\,\m
\quad ,
\end{equation}
then what would you write for $v_4$ in fourth row, which looks like
\begin{equation}
t_4 = 0.4\,\s
\quad
a_4 = -10.0\,\mpss
\quad
v_4 = \rule[-1ex]{20pt}{0.1pt}\,\mps
\quad
x_4 = \rule[-1ex]{20pt}{0.1pt}\,\m
\quad ?
\end{equation}
\vfill
~
\end{document}
|
/-
Copyright (c) 2015 Jeremy Avigad. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Jeremy Avigad, Robert Y. Lewis
-/
import data.int.cast
/-!
# Lemmas about power operations on monoids and groups
This file contains lemmas about `monoid.pow`, `group.pow`, `nsmul`, `zsmul`
which require additional imports besides those available in `algebra.group_power.basic`.
-/
universes u v w x y z u₁ u₂
variables {M : Type u} {N : Type v} {G : Type w} {H : Type x} {A : Type y} {B : Type z}
{R : Type u₁} {S : Type u₂}
/-!
### (Additive) monoid
-/
section monoid
variables [monoid M] [monoid N] [add_monoid A] [add_monoid B]
@[simp] theorem nsmul_one [has_one A] : ∀ n : ℕ, n • (1 : A) = n :=
add_monoid_hom.eq_nat_cast
⟨λ n, n • (1 : A), zero_nsmul _, λ _ _, add_nsmul _ _ _⟩
(one_nsmul _)
@[simp, norm_cast, to_additive]
lemma units.coe_pow (u : units M) (n : ℕ) : ((u ^ n : units M) : M) = u ^ n :=
(units.coe_hom M).map_pow u n
instance invertible_pow (m : M) [invertible m] (n : ℕ) : invertible (m ^ n) :=
{ inv_of := ⅟ m ^ n,
inv_of_mul_self := by rw [← (commute_inv_of m).symm.mul_pow, inv_of_mul_self, one_pow],
mul_inv_of_self := by rw [← (commute_inv_of m).mul_pow, mul_inv_of_self, one_pow] }
lemma inv_of_pow (m : M) [invertible m] (n : ℕ) [invertible (m ^ n)] :
⅟(m ^ n) = ⅟m ^ n :=
@invertible_unique M _ (m ^ n) (m ^ n) rfl ‹_› (invertible_pow m n)
lemma is_unit.pow {m : M} (n : ℕ) : is_unit m → is_unit (m ^ n) :=
λ ⟨u, hu⟩, ⟨u ^ n, by simp *⟩
@[simp] lemma is_unit_pow_succ_iff {m : M} {n : ℕ} :
is_unit (m ^ (n + 1)) ↔ is_unit m :=
begin
refine ⟨_, λ h, h.pow _⟩,
rw [pow_succ, ((commute.refl _).pow_right _).is_unit_mul_iff],
exact and.left
end
lemma is_unit_pos_pow_iff {m : M} :
∀ {n : ℕ} (h : 0 < n), is_unit (m ^ n) ↔ is_unit m
| (n + 1) _ := is_unit_pow_succ_iff
/-- If `x ^ n.succ = 1` then `x` has an inverse, `x^n`. -/
def invertible_of_pow_succ_eq_one (x : M) (n : ℕ) (hx : x ^ n.succ = 1) :
invertible x :=
⟨x ^ n, (pow_succ' x n).symm.trans hx, (pow_succ x n).symm.trans hx⟩
/-- If `x ^ n = 1` then `x` has an inverse, `x^(n - 1)`. -/
def invertible_of_pow_eq_one (x : M) (n : ℕ) (hx : x ^ n = 1) (hn : 0 < n) :
invertible x :=
begin
apply invertible_of_pow_succ_eq_one x (n - 1),
convert hx,
exact tsub_add_cancel_of_le (nat.succ_le_of_lt hn),
end
lemma is_unit_of_pow_eq_one (x : M) (n : ℕ) (hx : x ^ n = 1) (hn : 0 < n) :
is_unit x :=
begin
haveI := invertible_of_pow_eq_one x n hx hn,
exact is_unit_of_invertible x
end
lemma smul_pow [mul_action M N] [is_scalar_tower M N N] [smul_comm_class M N N]
(k : M) (x : N) (p : ℕ) :
(k • x) ^ p = k ^ p • x ^ p :=
begin
induction p with p IH,
{ simp },
{ rw [pow_succ', IH, smul_mul_smul, ←pow_succ', ←pow_succ'] }
end
@[simp] lemma smul_pow' [mul_distrib_mul_action M N] (x : M) (m : N) (n : ℕ) :
x • m ^ n = (x • m) ^ n :=
begin
induction n with n ih,
{ rw [pow_zero, pow_zero], exact smul_one x },
{ rw [pow_succ, pow_succ], exact (smul_mul' x m (m ^ n)).trans (congr_arg _ ih) }
end
end monoid
section group
variables [group G] [group H] [add_group A] [add_group B]
open int
local attribute [ematch] le_of_lt
open nat
theorem zsmul_one [has_one A] (n : ℤ) : n • (1 : A) = n :=
by cases n; simp
@[to_additive add_one_zsmul]
lemma zpow_add_one (a : G) : ∀ n : ℤ, a ^ (n + 1) = a ^ n * a
| (of_nat n) := by simp [← int.coe_nat_succ, pow_succ']
| -[1+0] := by simp [int.neg_succ_of_nat_eq]
| -[1+(n+1)] := by rw [int.neg_succ_of_nat_eq, zpow_neg, neg_add, neg_add_cancel_right, zpow_neg,
← int.coe_nat_succ, zpow_coe_nat, zpow_coe_nat, pow_succ _ (n + 1), mul_inv_rev,
inv_mul_cancel_right]
@[to_additive zsmul_sub_one]
lemma zpow_sub_one (a : G) (n : ℤ) : a ^ (n - 1) = a ^ n * a⁻¹ :=
calc a ^ (n - 1) = a ^ (n - 1) * a * a⁻¹ : (mul_inv_cancel_right _ _).symm
... = a^n * a⁻¹ : by rw [← zpow_add_one, sub_add_cancel]
@[to_additive add_zsmul]
lemma zpow_add (a : G) (m n : ℤ) : a ^ (m + n) = a ^ m * a ^ n :=
begin
induction n using int.induction_on with n ihn n ihn,
case hz : { simp },
{ simp only [← add_assoc, zpow_add_one, ihn, mul_assoc] },
{ rw [zpow_sub_one, ← mul_assoc, ← ihn, ← zpow_sub_one, add_sub_assoc] }
end
@[to_additive add_zsmul_self]
lemma mul_self_zpow (b : G) (m : ℤ) : b*b^m = b^(m+1) :=
by { conv_lhs {congr, rw ← zpow_one b }, rw [← zpow_add, add_comm] }
@[to_additive add_self_zsmul]
lemma mul_zpow_self (b : G) (m : ℤ) : b^m*b = b^(m+1) :=
by { conv_lhs {congr, skip, rw ← zpow_one b }, rw [← zpow_add, add_comm] }
@[to_additive sub_zsmul]
lemma zpow_sub (a : G) (m n : ℤ) : a ^ (m - n) = a ^ m * (a ^ n)⁻¹ :=
by rw [sub_eq_add_neg, zpow_add, zpow_neg]
@[to_additive one_add_zsmul]
theorem zpow_one_add (a : G) (i : ℤ) : a ^ (1 + i) = a * a ^ i :=
by rw [zpow_add, zpow_one]
@[to_additive]
theorem zpow_mul_comm (a : G) (i j : ℤ) : a ^ i * a ^ j = a ^ j * a ^ i :=
by rw [← zpow_add, ← zpow_add, add_comm]
-- note that `mul_zsmul` and `zpow_mul` have the primes swapped since their argument order
-- and therefore the more "natural" choice of lemma is reversed.
@[to_additive mul_zsmul']
theorem zpow_mul (a : G) (m n : ℤ) : a ^ (m * n) = (a ^ m) ^ n :=
int.induction_on n (by simp) (λ n ihn, by simp [mul_add, zpow_add, ihn])
(λ n ihn, by simp only [mul_sub, zpow_sub, ihn, mul_one, zpow_one])
@[to_additive mul_zsmul]
theorem zpow_mul' (a : G) (m n : ℤ) : a ^ (m * n) = (a ^ n) ^ m :=
by rw [mul_comm, zpow_mul]
@[to_additive bit0_zsmul]
theorem zpow_bit0 (a : G) (n : ℤ) : a ^ bit0 n = a ^ n * a ^ n := zpow_add _ _ _
@[to_additive bit1_zsmul]
theorem zpow_bit1 (a : G) (n : ℤ) : a ^ bit1 n = a ^ n * a ^ n * a :=
by rw [bit1, zpow_add, zpow_bit0, zpow_one]
@[simp, norm_cast, to_additive]
lemma units.coe_zpow (u : units G) (n : ℤ) : ((u ^ n : units G) : G) = u ^ n :=
(units.coe_hom G).map_zpow u n
end group
section ordered_add_comm_group
variables [ordered_add_comm_group A]
/-! Lemmas about `zsmul` under ordering, placed here (rather than in `algebra.group_power.order`
with their friends) because they require facts from `data.int.basic`-/
open int
lemma zsmul_pos {a : A} (ha : 0 < a) {k : ℤ} (hk : (0:ℤ) < k) : 0 < k • a :=
begin
lift k to ℕ using int.le_of_lt hk,
rw coe_nat_zsmul,
apply nsmul_pos ha,
exact (coe_nat_pos.mp hk).ne',
end
theorem zsmul_strict_mono_left {a : A} (ha : 0 < a) : strict_mono (λ n : ℤ, n • a) :=
λ n m h,
calc n • a = n • a + 0 : (add_zero _).symm
... < n • a + (m - n) • a : add_lt_add_left (zsmul_pos ha (sub_pos.mpr h)) _
... = m • a : by { rw [← add_zsmul], simp }
theorem zsmul_mono_left {a : A} (ha : 0 ≤ a) : monotone (λ n : ℤ, n • a) :=
λ n m h,
calc n • a = n • a + 0 : (add_zero _).symm
... ≤ n • a + (m - n) • a : add_le_add_left (zsmul_nonneg ha (sub_nonneg.mpr h)) _
... = m • a : by { rw [← add_zsmul], simp }
theorem zsmul_le_zsmul {a : A} {n m : ℤ} (ha : 0 ≤ a) (h : n ≤ m) : n • a ≤ m • a :=
zsmul_mono_left ha h
theorem zsmul_lt_zsmul {a : A} {n m : ℤ} (ha : 0 < a) (h : n < m) : n • a < m • a :=
zsmul_strict_mono_left ha h
theorem zsmul_le_zsmul_iff {a : A} {n m : ℤ} (ha : 0 < a) : n • a ≤ m • a ↔ n ≤ m :=
(zsmul_strict_mono_left ha).le_iff_le
theorem zsmul_lt_zsmul_iff {a : A} {n m : ℤ} (ha : 0 < a) : n • a < m • a ↔ n < m :=
(zsmul_strict_mono_left ha).lt_iff_lt
variables (A)
lemma zsmul_strict_mono_right {n : ℤ} (hn : 0 < n) :
strict_mono ((•) n : A → A) :=
λ a b hab, begin
rw ← sub_pos at hab,
rw [← sub_pos, ← zsmul_sub],
exact zsmul_pos hab hn,
end
lemma zsmul_mono_right {n : ℤ} (hn : 0 ≤ n) :
monotone ((•) n : A → A) :=
λ a b hab, begin
rw ← sub_nonneg at hab,
rw [← sub_nonneg, ← zsmul_sub],
exact zsmul_nonneg hab hn,
end
variables {A}
theorem zsmul_le_zsmul' {n : ℤ} (hn : 0 ≤ n) {a₁ a₂ : A} (h : a₁ ≤ a₂) : n • a₁ ≤ n • a₂ :=
zsmul_mono_right A hn h
theorem zsmul_lt_zsmul' {n : ℤ} (hn : 0 < n) {a₁ a₂ : A} (h : a₁ < a₂) : n • a₁ < n • a₂ :=
zsmul_strict_mono_right A hn h
lemma abs_nsmul {α : Type*} [linear_ordered_add_comm_group α] (n : ℕ) (a : α) :
|n • a| = n • |a| :=
begin
cases le_total a 0 with hneg hpos,
{ rw [abs_of_nonpos hneg, ← abs_neg, ← neg_nsmul, abs_of_nonneg],
exact nsmul_nonneg (neg_nonneg.mpr hneg) n },
{ rw [abs_of_nonneg hpos, abs_of_nonneg],
exact nsmul_nonneg hpos n }
end
lemma abs_zsmul {α : Type*} [linear_ordered_add_comm_group α] (n : ℤ) (a : α) :
|n • a| = |n| • |a| :=
begin
by_cases n0 : 0 ≤ n,
{ lift n to ℕ using n0,
simp only [abs_nsmul, coe_nat_abs, coe_nat_zsmul] },
{ lift (- n) to ℕ using int.le_of_lt (neg_pos.mpr (not_le.mp n0)) with m h,
rw [← abs_neg (n • a), ← neg_zsmul, ← abs_neg n, ← h, coe_nat_zsmul, coe_nat_abs,
coe_nat_zsmul],
exact abs_nsmul m _ },
end
lemma abs_add_eq_add_abs_le {α : Type*} [linear_ordered_add_comm_group α] {a b : α} (hle : a ≤ b) :
|a + b| = |a| + |b| ↔ (0 ≤ a ∧ 0 ≤ b ∨ a ≤ 0 ∧ b ≤ 0) :=
begin
by_cases a0 : 0 ≤ a; by_cases b0 : 0 ≤ b,
{ simp [a0, b0, abs_of_nonneg, add_nonneg a0 b0] },
{ exact (lt_irrefl (0 : α) (a0.trans_lt (hle.trans_lt (not_le.mp b0)))).elim },
any_goals { simp [(not_le.mp a0).le, (not_le.mp b0).le, abs_of_nonpos, add_nonpos, add_comm] },
obtain F := (not_le.mp a0),
have : (|a + b| = -a + b ↔ b ≤ 0) ↔ (|a + b| =
|a| + |b| ↔ 0 ≤ a ∧ 0 ≤ b ∨ a ≤ 0 ∧ b ≤ 0),
{ simp [a0, b0, abs_of_neg, abs_of_nonneg, F, F.le] },
refine this.mp ⟨λ h, _, λ h, by simp only [le_antisymm h b0, abs_of_neg F, add_zero]⟩,
by_cases ba : a + b ≤ 0,
{ refine le_of_eq (eq_zero_of_neg_eq _),
rwa [abs_of_nonpos ba, neg_add_rev, add_comm, add_right_inj] at h },
{ refine (lt_irrefl (0 : α) _).elim,
rw [abs_of_pos (not_le.mp ba), add_left_inj] at h,
rwa eq_zero_of_neg_eq h.symm at F }
end
lemma abs_add_eq_add_abs_iff {α : Type*} [linear_ordered_add_comm_group α] (a b : α) :
|a + b| = |a| + |b| ↔ (0 ≤ a ∧ 0 ≤ b ∨ a ≤ 0 ∧ b ≤ 0) :=
begin
by_cases ab : a ≤ b,
{ exact abs_add_eq_add_abs_le ab },
{ rw [add_comm a, add_comm (abs _), abs_add_eq_add_abs_le ((not_le.mp ab).le), and.comm,
@and.comm (b ≤ 0 ) _] }
end
end ordered_add_comm_group
section linear_ordered_add_comm_group
variable [linear_ordered_add_comm_group A]
theorem zsmul_le_zsmul_iff' {n : ℤ} (hn : 0 < n) {a₁ a₂ : A} : n • a₁ ≤ n • a₂ ↔ a₁ ≤ a₂ :=
(zsmul_strict_mono_right A hn).le_iff_le
theorem zsmul_lt_zsmul_iff' {n : ℤ} (hn : 0 < n) {a₁ a₂ : A} : n • a₁ < n • a₂ ↔ a₁ < a₂ :=
(zsmul_strict_mono_right A hn).lt_iff_lt
theorem nsmul_le_nsmul_iff {a : A} {n m : ℕ} (ha : 0 < a) : n • a ≤ m • a ↔ n ≤ m :=
begin
refine ⟨λ h, _, nsmul_le_nsmul $ le_of_lt ha⟩,
by_contra H,
exact lt_irrefl _ (lt_of_lt_of_le (nsmul_lt_nsmul ha (not_le.mp H)) h)
end
theorem nsmul_lt_nsmul_iff {a : A} {n m : ℕ} (ha : 0 < a) : n • a < m • a ↔ n < m :=
begin
refine ⟨λ h, _, nsmul_lt_nsmul ha⟩,
by_contra H,
exact lt_irrefl _ (lt_of_le_of_lt (nsmul_le_nsmul (le_of_lt ha) $ not_lt.mp H) h)
end
/-- See also `smul_right_injective`. TODO: provide a `no_zero_smul_divisors` instance. We can't
do that here because importing that definition would create import cycles. -/
lemma zsmul_right_injective {m : ℤ} (hm : m ≠ 0) : function.injective ((•) m : A → A) :=
begin
cases hm.symm.lt_or_lt,
{ exact (zsmul_strict_mono_right A h).injective, },
{ intros a b hab,
refine (zsmul_strict_mono_right A (neg_pos.mpr h)).injective _,
rw [neg_zsmul, neg_zsmul, hab], },
end
lemma zsmul_right_inj {a b : A} {m : ℤ} (hm : m ≠ 0) : m • a = m • b ↔ a = b :=
(zsmul_right_injective hm).eq_iff
/-- Alias of `zsmul_right_inj`, for ease of discovery alongside `zsmul_le_zsmul_iff'` and
`zsmul_lt_zsmul_iff'`. -/
lemma zsmul_eq_zsmul_iff' {a b : A} {m : ℤ} (hm : m ≠ 0) : m • a = m • b ↔ a = b :=
zsmul_right_inj hm
end linear_ordered_add_comm_group
@[simp] lemma with_bot.coe_nsmul [add_monoid A] (a : A) (n : ℕ) :
((n • a : A) : with_bot A) = n • a :=
add_monoid_hom.map_nsmul ⟨(coe : A → with_bot A), with_bot.coe_zero, with_bot.coe_add⟩ a n
theorem nsmul_eq_mul' [semiring R] (a : R) (n : ℕ) : n • a = a * n :=
by induction n with n ih; [rw [zero_nsmul, nat.cast_zero, mul_zero],
rw [succ_nsmul', ih, nat.cast_succ, mul_add, mul_one]]
@[simp] theorem nsmul_eq_mul [semiring R] (n : ℕ) (a : R) : n • a = n * a :=
by rw [nsmul_eq_mul', (n.cast_commute a).eq]
theorem mul_nsmul_left [semiring R] (a b : R) (n : ℕ) : n • (a * b) = a * (n • b) :=
by rw [nsmul_eq_mul', nsmul_eq_mul', mul_assoc]
theorem mul_nsmul_assoc [semiring R] (a b : R) (n : ℕ) : n • (a * b) = n • a * b :=
by rw [nsmul_eq_mul, nsmul_eq_mul, mul_assoc]
@[simp, norm_cast] theorem nat.cast_pow [semiring R] (n m : ℕ) : (↑(n ^ m) : R) = ↑n ^ m :=
begin
induction m with m ih,
{ rw [pow_zero, pow_zero], exact nat.cast_one },
{ rw [pow_succ', pow_succ', nat.cast_mul, ih] }
end
@[simp, norm_cast] theorem int.coe_nat_pow (n m : ℕ) : ((n ^ m : ℕ) : ℤ) = n ^ m :=
by induction m with m ih; [exact int.coe_nat_one, rw [pow_succ', pow_succ', int.coe_nat_mul, ih]]
theorem int.nat_abs_pow (n : ℤ) (k : ℕ) : int.nat_abs (n ^ k) = (int.nat_abs n) ^ k :=
by induction k with k ih; [refl, rw [pow_succ', int.nat_abs_mul, pow_succ', ih]]
-- The next four lemmas allow us to replace multiplication by a numeral with a `zsmul` expression.
-- They are used by the `noncomm_ring` tactic, to normalise expressions before passing to `abel`.
lemma bit0_mul [ring R] {n r : R} : bit0 n * r = (2 : ℤ) • (n * r) :=
by { dsimp [bit0], rw [add_mul, add_zsmul, one_zsmul], }
lemma mul_bit0 [ring R] {n r : R} : r * bit0 n = (2 : ℤ) • (r * n) :=
by { dsimp [bit0], rw [mul_add, add_zsmul, one_zsmul], }
lemma bit1_mul [ring R] {n r : R} : bit1 n * r = (2 : ℤ) • (n * r) + r :=
by { dsimp [bit1], rw [add_mul, bit0_mul, one_mul], }
lemma mul_bit1 [ring R] {n r : R} : r * bit1 n = (2 : ℤ) • (r * n) + r :=
by { dsimp [bit1], rw [mul_add, mul_bit0, mul_one], }
@[simp] theorem zsmul_eq_mul [ring R] (a : R) : ∀ (n : ℤ), n • a = n * a
| (n : ℕ) := by { rw [coe_nat_zsmul, nsmul_eq_mul], refl }
| -[1+ n] := by simp [nat.cast_succ, neg_add_rev, int.cast_neg_succ_of_nat, add_mul]
theorem zsmul_eq_mul' [ring R] (a : R) (n : ℤ) : n • a = a * n :=
by rw [zsmul_eq_mul, (n.cast_commute a).eq]
theorem mul_zsmul_left [ring R] (a b : R) (n : ℤ) : n • (a * b) = a * (n • b) :=
by rw [zsmul_eq_mul', zsmul_eq_mul', mul_assoc]
theorem mul_zsmul_assoc [ring R] (a b : R) (n : ℤ) : n • (a * b) = n • a * b :=
by rw [zsmul_eq_mul, zsmul_eq_mul, mul_assoc]
lemma zsmul_int_int (a b : ℤ) : a • b = a * b := by simp
lemma zsmul_int_one (n : ℤ) : n • 1 = n := by simp
@[simp, norm_cast] theorem int.cast_pow [ring R] (n : ℤ) (m : ℕ) : (↑(n ^ m) : R) = ↑n ^ m :=
begin
induction m with m ih,
{ rw [pow_zero, pow_zero, int.cast_one] },
{ rw [pow_succ, pow_succ, int.cast_mul, ih] }
end
lemma neg_one_pow_eq_pow_mod_two [ring R] {n : ℕ} : (-1 : R) ^ n = (-1) ^ (n % 2) :=
by rw [← nat.mod_add_div n 2, pow_add, pow_mul]; simp [sq]
section ordered_semiring
variables [ordered_semiring R] {a : R}
/-- Bernoulli's inequality. This version works for semirings but requires
additional hypotheses `0 ≤ a * a` and `0 ≤ (1 + a) * (1 + a)`. -/
theorem one_add_mul_le_pow' (Hsq : 0 ≤ a * a) (Hsq' : 0 ≤ (1 + a) * (1 + a))
(H : 0 ≤ 2 + a) :
∀ (n : ℕ), 1 + (n : R) * a ≤ (1 + a) ^ n
| 0 := by simp
| 1 := by simp
| (n+2) :=
have 0 ≤ (n : R) * (a * a * (2 + a)) + a * a,
from add_nonneg (mul_nonneg n.cast_nonneg (mul_nonneg Hsq H)) Hsq,
calc 1 + (↑(n + 2) : R) * a ≤ 1 + ↑(n + 2) * a + (n * (a * a * (2 + a)) + a * a) :
(le_add_iff_nonneg_right _).2 this
... = (1 + a) * (1 + a) * (1 + n * a) :
by { simp [add_mul, mul_add, bit0, mul_assoc, (n.cast_commute (_ : R)).left_comm],
ac_refl }
... ≤ (1 + a) * (1 + a) * (1 + a)^n :
mul_le_mul_of_nonneg_left (one_add_mul_le_pow' n) Hsq'
... = (1 + a)^(n + 2) : by simp only [pow_succ, mul_assoc]
private lemma pow_le_pow_of_le_one_aux (h : 0 ≤ a) (ha : a ≤ 1) (i : ℕ) :
∀ k : ℕ, a ^ (i + k) ≤ a ^ i
| 0 := by simp
| (k+1) := by { rw [←add_assoc, ←one_mul (a^i), pow_succ],
exact mul_le_mul ha (pow_le_pow_of_le_one_aux _) (pow_nonneg h _) zero_le_one }
lemma pow_le_pow_of_le_one (h : 0 ≤ a) (ha : a ≤ 1) {i j : ℕ} (hij : i ≤ j) :
a ^ j ≤ a ^ i :=
let ⟨k, hk⟩ := nat.exists_eq_add_of_le hij in
by rw hk; exact pow_le_pow_of_le_one_aux h ha _ _
lemma pow_le_of_le_one (h₀ : 0 ≤ a) (h₁ : a ≤ 1) {n : ℕ} (hn : n ≠ 0) : a ^ n ≤ a :=
(pow_one a).subst (pow_le_pow_of_le_one h₀ h₁ (nat.pos_of_ne_zero hn))
lemma sq_le (h₀ : 0 ≤ a) (h₁ : a ≤ 1) : a ^ 2 ≤ a := pow_le_of_le_one h₀ h₁ two_ne_zero
end ordered_semiring
section linear_ordered_semiring
variables [linear_ordered_semiring R]
lemma sign_cases_of_C_mul_pow_nonneg {C r : R} (h : ∀ n : ℕ, 0 ≤ C * r ^ n) :
C = 0 ∨ (0 < C ∧ 0 ≤ r) :=
begin
have : 0 ≤ C, by simpa only [pow_zero, mul_one] using h 0,
refine this.eq_or_lt.elim (λ h, or.inl h.symm) (λ hC, or.inr ⟨hC, _⟩),
refine nonneg_of_mul_nonneg_left _ hC,
simpa only [pow_one] using h 1
end
end linear_ordered_semiring
section linear_ordered_ring
variables [linear_ordered_ring R] {a : R} {n : ℕ}
@[simp] lemma abs_pow (a : R) (n : ℕ) : |a ^ n| = |a| ^ n :=
(pow_abs a n).symm
@[simp] theorem pow_bit1_neg_iff : a ^ bit1 n < 0 ↔ a < 0 :=
⟨λ h, not_le.1 $ λ h', not_le.2 h $ pow_nonneg h' _, λ ha, pow_bit1_neg ha n⟩
@[simp] theorem pow_bit1_nonneg_iff : 0 ≤ a ^ bit1 n ↔ 0 ≤ a :=
le_iff_le_iff_lt_iff_lt.2 pow_bit1_neg_iff
@[simp] theorem pow_bit1_nonpos_iff : a ^ bit1 n ≤ 0 ↔ a ≤ 0 :=
by simp only [le_iff_lt_or_eq, pow_bit1_neg_iff, pow_eq_zero_iff (bit1_pos (zero_le n))]
@[simp] theorem pow_bit1_pos_iff : 0 < a ^ bit1 n ↔ 0 < a :=
lt_iff_lt_of_le_iff_le pow_bit1_nonpos_iff
lemma even.pow_nonneg (hn : even n) (a : R) : 0 ≤ a ^ n :=
by cases hn with k hk; simpa only [hk, two_mul] using pow_bit0_nonneg a k
lemma even.pow_pos (hn : even n) (ha : a ≠ 0) : 0 < a ^ n :=
by cases hn with k hk; simpa only [hk, two_mul] using pow_bit0_pos ha k
lemma odd.pow_nonpos (hn : odd n) (ha : a ≤ 0) : a ^ n ≤ 0:=
by cases hn with k hk; simpa only [hk, two_mul] using pow_bit1_nonpos_iff.mpr ha
lemma odd.pow_neg (hn : odd n) (ha : a < 0) : a ^ n < 0:=
by cases hn with k hk; simpa only [hk, two_mul] using pow_bit1_neg_iff.mpr ha
lemma odd.pow_nonneg_iff (hn : odd n) : 0 ≤ a ^ n ↔ 0 ≤ a :=
⟨λ h, le_of_not_lt (λ ha, h.not_lt $ hn.pow_neg ha), λ ha, pow_nonneg ha n⟩
lemma odd.pow_nonpos_iff (hn : odd n) : a ^ n ≤ 0 ↔ a ≤ 0 :=
⟨λ h, le_of_not_lt (λ ha, h.not_lt $ pow_pos ha _), hn.pow_nonpos⟩
lemma odd.pow_pos_iff (hn : odd n) : 0 < a ^ n ↔ 0 < a :=
⟨λ h, lt_of_not_ge' (λ ha, h.not_le $ hn.pow_nonpos ha), λ ha, pow_pos ha n⟩
lemma odd.pow_neg_iff (hn : odd n) : a ^ n < 0 ↔ a < 0 :=
⟨λ h, lt_of_not_ge' (λ ha, h.not_le $ pow_nonneg ha _), hn.pow_neg⟩
lemma even.pow_pos_iff (hn : even n) (h₀ : 0 < n) : 0 < a ^ n ↔ a ≠ 0 :=
⟨λ h ha, by { rw [ha, zero_pow h₀] at h, exact lt_irrefl 0 h }, hn.pow_pos⟩
lemma even.pow_abs {p : ℕ} (hp : even p) (a : R) : |a| ^ p = a ^ p :=
begin
rw [←abs_pow, abs_eq_self],
exact hp.pow_nonneg _
end
@[simp] lemma pow_bit0_abs (a : R) (p : ℕ) : |a| ^ bit0 p = a ^ bit0 p := (even_bit0 _).pow_abs _
lemma strict_mono_pow_bit1 (n : ℕ) : strict_mono (λ a : R, a ^ bit1 n) :=
begin
intros a b hab,
cases le_total a 0 with ha ha,
{ cases le_or_lt b 0 with hb hb,
{ rw [← neg_lt_neg_iff, ← neg_pow_bit1, ← neg_pow_bit1],
exact pow_lt_pow_of_lt_left (neg_lt_neg hab) (neg_nonneg.2 hb) (bit1_pos (zero_le n)) },
{ exact (pow_bit1_nonpos_iff.2 ha).trans_lt (pow_bit1_pos_iff.2 hb) } },
{ exact pow_lt_pow_of_lt_left hab ha (bit1_pos (zero_le n)) }
end
lemma odd.strict_mono_pow (hn : odd n) : strict_mono (λ a : R, a ^ n) :=
by cases hn with k hk; simpa only [hk, two_mul] using strict_mono_pow_bit1 _
/-- Bernoulli's inequality for `n : ℕ`, `-2 ≤ a`. -/
theorem one_add_mul_le_pow (H : -2 ≤ a) (n : ℕ) : 1 + (n : R) * a ≤ (1 + a) ^ n :=
one_add_mul_le_pow' (mul_self_nonneg _) (mul_self_nonneg _) (neg_le_iff_add_nonneg'.1 H) _
/-- Bernoulli's inequality reformulated to estimate `a^n`. -/
theorem one_add_mul_sub_le_pow (H : -1 ≤ a) (n : ℕ) : 1 + (n : R) * (a - 1) ≤ a ^ n :=
have -2 ≤ a - 1, by rwa [bit0, neg_add, ← sub_eq_add_neg, sub_le_sub_iff_right],
by simpa only [add_sub_cancel'_right] using one_add_mul_le_pow this n
end linear_ordered_ring
/-- Bernoulli's inequality reformulated to estimate `(n : K)`. -/
theorem nat.cast_le_pow_sub_div_sub {K : Type*} [linear_ordered_field K] {a : K} (H : 1 < a)
(n : ℕ) :
(n : K) ≤ (a ^ n - 1) / (a - 1) :=
(le_div_iff (sub_pos.2 H)).2 $ le_sub_left_of_add_le $
one_add_mul_sub_le_pow ((neg_le_self $ @zero_le_one K _).trans H.le) _
/-- For any `a > 1` and a natural `n` we have `n ≤ a ^ n / (a - 1)`. See also
`nat.cast_le_pow_sub_div_sub` for a stronger inequality with `a ^ n - 1` in the numerator. -/
theorem nat.cast_le_pow_div_sub {K : Type*} [linear_ordered_field K] {a : K} (H : 1 < a) (n : ℕ) :
(n : K) ≤ a ^ n / (a - 1) :=
(n.cast_le_pow_sub_div_sub H).trans $ div_le_div_of_le (sub_nonneg.2 H.le)
(sub_le_self _ zero_le_one)
namespace int
lemma units_sq (u : units ℤ) : u ^ 2 = 1 :=
(sq u).symm ▸ units_mul_self u
alias int.units_sq ← int.units_pow_two
lemma units_pow_eq_pow_mod_two (u : units ℤ) (n : ℕ) : u ^ n = u ^ (n % 2) :=
by conv {to_lhs, rw ← nat.mod_add_div n 2}; rw [pow_add, pow_mul, units_sq, one_pow, mul_one]
@[simp] lemma nat_abs_sq (x : ℤ) : (x.nat_abs ^ 2 : ℤ) = x ^ 2 :=
by rw [sq, int.nat_abs_mul_self', sq]
alias int.nat_abs_sq ← int.nat_abs_pow_two
lemma abs_le_self_sq (a : ℤ) : (int.nat_abs a : ℤ) ≤ a ^ 2 :=
by { rw [← int.nat_abs_sq a, sq], norm_cast, apply nat.le_mul_self }
alias int.abs_le_self_sq ← int.abs_le_self_pow_two
lemma le_self_sq (b : ℤ) : b ≤ b ^ 2 := le_trans (le_nat_abs) (abs_le_self_sq _)
alias int.le_self_sq ← int.le_self_pow_two
lemma pow_right_injective {x : ℤ} (h : 1 < x.nat_abs) : function.injective ((^) x : ℕ → ℤ) :=
begin
suffices : function.injective (nat_abs ∘ ((^) x : ℕ → ℤ)),
{ exact function.injective.of_comp this },
convert nat.pow_right_injective h,
ext n,
rw [function.comp_app, nat_abs_pow]
end
end int
variables (M G A)
/-- Monoid homomorphisms from `multiplicative ℕ` are defined by the image
of `multiplicative.of_add 1`. -/
def powers_hom [monoid M] : M ≃ (multiplicative ℕ →* M) :=
{ to_fun := λ x, ⟨λ n, x ^ n.to_add, by { convert pow_zero x, exact to_add_one },
λ m n, pow_add x m n⟩,
inv_fun := λ f, f (multiplicative.of_add 1),
left_inv := pow_one,
right_inv := λ f, monoid_hom.ext $ λ n, by { simp [← f.map_pow, ← of_add_nsmul] } }
/-- Monoid homomorphisms from `multiplicative ℤ` are defined by the image
of `multiplicative.of_add 1`. -/
def zpowers_hom [group G] : G ≃ (multiplicative ℤ →* G) :=
{ to_fun := λ x, ⟨λ n, x ^ n.to_add, zpow_zero x, λ m n, zpow_add x m n⟩,
inv_fun := λ f, f (multiplicative.of_add 1),
left_inv := zpow_one,
right_inv := λ f, monoid_hom.ext $ λ n, by { simp [← f.map_zpow, ← of_add_zsmul ] } }
/-- Additive homomorphisms from `ℕ` are defined by the image of `1`. -/
def multiples_hom [add_monoid A] : A ≃ (ℕ →+ A) :=
{ to_fun := λ x, ⟨λ n, n • x, zero_nsmul x, λ m n, add_nsmul _ _ _⟩,
inv_fun := λ f, f 1,
left_inv := one_nsmul,
right_inv := λ f, add_monoid_hom.ext_nat $ one_nsmul (f 1) }
/-- Additive homomorphisms from `ℤ` are defined by the image of `1`. -/
def zmultiples_hom [add_group A] : A ≃ (ℤ →+ A) :=
{ to_fun := λ x, ⟨λ n, n • x, zero_zsmul x, λ m n, add_zsmul _ _ _⟩,
inv_fun := λ f, f 1,
left_inv := one_zsmul,
right_inv := λ f, add_monoid_hom.ext_int $ one_zsmul (f 1) }
attribute [to_additive multiples_hom] powers_hom
attribute [to_additive zmultiples_hom] zpowers_hom
variables {M G A}
@[simp] lemma powers_hom_apply [monoid M] (x : M) (n : multiplicative ℕ) :
powers_hom M x n = x ^ n.to_add := rfl
@[simp] lemma powers_hom_symm_apply [monoid M] (f : multiplicative ℕ →* M) :
(powers_hom M).symm f = f (multiplicative.of_add 1) := rfl
@[simp] lemma zpowers_hom_apply [group G] (x : G) (n : multiplicative ℤ) :
zpowers_hom G x n = x ^ n.to_add := rfl
@[simp] lemma zpowers_hom_symm_apply [group G] (f : multiplicative ℤ →* G) :
(zpowers_hom G).symm f = f (multiplicative.of_add 1) := rfl
@[simp] lemma multiples_hom_apply [add_monoid A] (x : A) (n : ℕ) :
multiples_hom A x n = n • x := rfl
attribute [to_additive multiples_hom_apply] powers_hom_apply
@[simp] lemma multiples_hom_symm_apply [add_monoid A] (f : ℕ →+ A) :
(multiples_hom A).symm f = f 1 := rfl
attribute [to_additive multiples_hom_symm_apply] powers_hom_symm_apply
@[simp] lemma zmultiples_hom_apply [add_group A] (x : A) (n : ℤ) :
zmultiples_hom A x n = n • x := rfl
attribute [to_additive zmultiples_hom_apply] zpowers_hom_apply
@[simp] lemma zmultiples_hom_symm_apply [add_group A] (f : ℤ →+ A) :
(zmultiples_hom A).symm f = f 1 := rfl
attribute [to_additive zmultiples_hom_symm_apply] zpowers_hom_symm_apply
-- TODO use to_additive in the rest of this file
lemma monoid_hom.apply_mnat [monoid M] (f : multiplicative ℕ →* M) (n : multiplicative ℕ) :
f n = (f (multiplicative.of_add 1)) ^ n.to_add :=
by rw [← powers_hom_symm_apply, ← powers_hom_apply, equiv.apply_symm_apply]
@[ext] lemma monoid_hom.ext_mnat [monoid M] ⦃f g : multiplicative ℕ →* M⦄
(h : f (multiplicative.of_add 1) = g (multiplicative.of_add 1)) : f = g :=
monoid_hom.ext $ λ n, by rw [f.apply_mnat, g.apply_mnat, h]
lemma monoid_hom.apply_mint [group M] (f : multiplicative ℤ →* M) (n : multiplicative ℤ) :
f n = (f (multiplicative.of_add 1)) ^ n.to_add :=
by rw [← zpowers_hom_symm_apply, ← zpowers_hom_apply, equiv.apply_symm_apply]
/-! `monoid_hom.ext_mint` is defined in `data.int.cast` -/
lemma add_monoid_hom.apply_nat [add_monoid M] (f : ℕ →+ M) (n : ℕ) :
f n = n • (f 1) :=
by rw [← multiples_hom_symm_apply, ← multiples_hom_apply, equiv.apply_symm_apply]
/-! `add_monoid_hom.ext_nat` is defined in `data.nat.cast` -/
lemma add_monoid_hom.apply_int [add_group M] (f : ℤ →+ M) (n : ℤ) :
f n = n • (f 1) :=
by rw [← zmultiples_hom_symm_apply, ← zmultiples_hom_apply, equiv.apply_symm_apply]
/-! `add_monoid_hom.ext_int` is defined in `data.int.cast` -/
variables (M G A)
/-- If `M` is commutative, `powers_hom` is a multiplicative equivalence. -/
def powers_mul_hom [comm_monoid M] : M ≃* (multiplicative ℕ →* M) :=
{ map_mul' := λ a b, monoid_hom.ext $ by simp [mul_pow],
..powers_hom M}
/-- If `M` is commutative, `zpowers_hom` is a multiplicative equivalence. -/
def zpowers_mul_hom [comm_group G] : G ≃* (multiplicative ℤ →* G) :=
{ map_mul' := λ a b, monoid_hom.ext $ by simp [mul_zpow],
..zpowers_hom G}
/-- If `M` is commutative, `multiples_hom` is an additive equivalence. -/
def multiples_add_hom [add_comm_monoid A] : A ≃+ (ℕ →+ A) :=
{ map_add' := λ a b, add_monoid_hom.ext $ by simp [nsmul_add],
..multiples_hom A}
/-- If `M` is commutative, `zmultiples_hom` is an additive equivalence. -/
def zmultiples_add_hom [add_comm_group A] : A ≃+ (ℤ →+ A) :=
{ map_add' := λ a b, add_monoid_hom.ext $ by simp [zsmul_add],
..zmultiples_hom A}
variables {M G A}
@[simp] lemma powers_mul_hom_apply [comm_monoid M] (x : M) (n : multiplicative ℕ) :
powers_mul_hom M x n = x ^ n.to_add := rfl
@[simp] lemma powers_mul_hom_symm_apply [comm_monoid M] (f : multiplicative ℕ →* M) :
(powers_mul_hom M).symm f = f (multiplicative.of_add 1) := rfl
@[simp] lemma zpowers_mul_hom_apply [comm_group G] (x : G) (n : multiplicative ℤ) :
zpowers_mul_hom G x n = x ^ n.to_add := rfl
@[simp] lemma zpowers_mul_hom_symm_apply [comm_group G] (f : multiplicative ℤ →* G) :
(zpowers_mul_hom G).symm f = f (multiplicative.of_add 1) := rfl
@[simp] lemma multiples_add_hom_apply [add_comm_monoid A] (x : A) (n : ℕ) :
multiples_add_hom A x n = n • x := rfl
@[simp] lemma multiples_add_hom_symm_apply [add_comm_monoid A] (f : ℕ →+ A) :
(multiples_add_hom A).symm f = f 1 := rfl
@[simp] lemma zmultiples_add_hom_apply [add_comm_group A] (x : A) (n : ℤ) :
zmultiples_add_hom A x n = n • x := rfl
@[simp] lemma zmultiples_add_hom_symm_apply [add_comm_group A] (f : ℤ →+ A) :
(zmultiples_add_hom A).symm f = f 1 := rfl
/-!
### Commutativity (again)
Facts about `semiconj_by` and `commute` that require `zpow` or `zsmul`, or the fact that integer
multiplication equals semiring multiplication.
-/
namespace semiconj_by
section
variables [semiring R] {a x y : R}
@[simp] lemma cast_nat_mul_right (h : semiconj_by a x y) (n : ℕ) :
semiconj_by a ((n : R) * x) (n * y) :=
semiconj_by.mul_right (nat.commute_cast _ _) h
@[simp] lemma cast_nat_mul_left (h : semiconj_by a x y) (n : ℕ) : semiconj_by ((n : R) * a) x y :=
semiconj_by.mul_left (nat.cast_commute _ _) h
@[simp] lemma cast_nat_mul_cast_nat_mul (h : semiconj_by a x y) (m n : ℕ) :
semiconj_by ((m : R) * a) (n * x) (n * y) :=
(h.cast_nat_mul_left m).cast_nat_mul_right n
end
variables [monoid M] [group G] [ring R]
@[simp, to_additive] lemma units_zpow_right {a : M} {x y : units M} (h : semiconj_by a x y) :
∀ m : ℤ, semiconj_by a (↑(x^m)) (↑(y^m))
| (n : ℕ) := by simp only [zpow_coe_nat, units.coe_pow, h, pow_right]
| -[1+n] := by simp only [zpow_neg_succ_of_nat, units.coe_pow, units_inv_right, h, pow_right]
variables {a b x y x' y' : R}
@[simp] lemma cast_int_mul_right (h : semiconj_by a x y) (m : ℤ) :
semiconj_by a ((m : ℤ) * x) (m * y) :=
semiconj_by.mul_right (int.commute_cast _ _) h
@[simp] lemma cast_int_mul_left (h : semiconj_by a x y) (m : ℤ) : semiconj_by ((m : R) * a) x y :=
semiconj_by.mul_left (int.cast_commute _ _) h
@[simp] lemma cast_int_mul_cast_int_mul (h : semiconj_by a x y) (m n : ℤ) :
semiconj_by ((m : R) * a) (n * x) (n * y) :=
(h.cast_int_mul_left m).cast_int_mul_right n
end semiconj_by
namespace commute
section
variables [semiring R] {a b : R}
@[simp] theorem cast_nat_mul_right (h : commute a b) (n : ℕ) : commute a ((n : R) * b) :=
h.cast_nat_mul_right n
@[simp] theorem cast_nat_mul_left (h : commute a b) (n : ℕ) : commute ((n : R) * a) b :=
h.cast_nat_mul_left n
@[simp] theorem cast_nat_mul_cast_nat_mul (h : commute a b) (m n : ℕ) :
commute ((m : R) * a) (n * b) :=
h.cast_nat_mul_cast_nat_mul m n
@[simp] theorem self_cast_nat_mul (n : ℕ) : commute a (n * a) :=
(commute.refl a).cast_nat_mul_right n
@[simp] theorem cast_nat_mul_self (n : ℕ) : commute ((n : R) * a) a :=
(commute.refl a).cast_nat_mul_left n
@[simp] theorem self_cast_nat_mul_cast_nat_mul (m n : ℕ) : commute ((m : R) * a) (n * a) :=
(commute.refl a).cast_nat_mul_cast_nat_mul m n
end
variables [monoid M] [group G] [ring R]
@[simp, to_additive] lemma units_zpow_right {a : M} {u : units M} (h : commute a u) (m : ℤ) :
commute a (↑(u^m)) :=
h.units_zpow_right m
@[simp, to_additive] lemma units_zpow_left {u : units M} {a : M} (h : commute ↑u a) (m : ℤ) :
commute (↑(u^m)) a :=
(h.symm.units_zpow_right m).symm
variables {a b : R}
@[simp] lemma cast_int_mul_right (h : commute a b) (m : ℤ) : commute a (m * b) :=
h.cast_int_mul_right m
@[simp] lemma cast_int_mul_left (h : commute a b) (m : ℤ) : commute ((m : R) * a) b :=
h.cast_int_mul_left m
lemma cast_int_mul_cast_int_mul (h : commute a b) (m n : ℤ) : commute ((m : R) * a) (n * b) :=
h.cast_int_mul_cast_int_mul m n
variables (a) (m n : ℤ)
@[simp] lemma cast_int_left : commute (m : R) a :=
by { rw [← mul_one (m : R)], exact (one_left a).cast_int_mul_left m }
@[simp] lemma cast_int_right : commute a m :=
by { rw [← mul_one (m : R)], exact (one_right a).cast_int_mul_right m }
@[simp] theorem self_cast_int_mul : commute a (n * a) := (commute.refl a).cast_int_mul_right n
@[simp] theorem cast_int_mul_self : commute ((n : R) * a) a := (commute.refl a).cast_int_mul_left n
theorem self_cast_int_mul_cast_int_mul : commute ((m : R) * a) (n * a) :=
(commute.refl a).cast_int_mul_cast_int_mul m n
end commute
section multiplicative
open multiplicative
@[simp] lemma nat.to_add_pow (a : multiplicative ℕ) (b : ℕ) : to_add (a ^ b) = to_add a * b :=
begin
induction b with b ih,
{ erw [pow_zero, to_add_one, mul_zero] },
{ simp [*, pow_succ, add_comm, nat.mul_succ] }
end
@[simp] lemma nat.of_add_mul (a b : ℕ) : of_add (a * b) = of_add a ^ b :=
(nat.to_add_pow _ _).symm
@[simp] lemma int.to_add_pow (a : multiplicative ℤ) (b : ℕ) : to_add (a ^ b) = to_add a * b :=
by induction b; simp [*, mul_add, pow_succ, add_comm]
@[simp] lemma int.to_add_zpow (a : multiplicative ℤ) (b : ℤ) : to_add (a ^ b) = to_add a * b :=
int.induction_on b (by simp)
(by simp [zpow_add, mul_add] {contextual := tt})
(by simp [zpow_add, mul_add, sub_eq_add_neg, -int.add_neg_one] {contextual := tt})
@[simp] lemma int.of_add_mul (a b : ℤ) : of_add (a * b) = of_add a ^ b :=
(int.to_add_zpow _ _).symm
end multiplicative
namespace units
variables [monoid M]
lemma conj_pow (u : units M) (x : M) (n : ℕ) : (↑u * x * ↑(u⁻¹))^n = u * x^n * ↑(u⁻¹) :=
(divp_eq_iff_mul_eq.2 ((u.mk_semiconj_by x).pow_right n).eq.symm).symm
lemma conj_pow' (u : units M) (x : M) (n : ℕ) : (↑(u⁻¹) * x * u)^n = ↑(u⁻¹) * x^n * u:=
(u⁻¹).conj_pow x n
end units
namespace mul_opposite
/-- Moving to the opposite monoid commutes with taking powers. -/
@[simp] lemma op_pow [monoid M] (x : M) (n : ℕ) : op (x ^ n) = (op x) ^ n := rfl
@[simp] lemma unop_pow [monoid M] (x : Mᵐᵒᵖ) (n : ℕ) : unop (x ^ n) = (unop x) ^ n := rfl
/-- Moving to the opposite group or group_with_zero commutes with taking powers. -/
@[simp] lemma op_zpow [div_inv_monoid M] (x : M) (z : ℤ) : op (x ^ z) = (op x) ^ z := rfl
@[simp] lemma unop_zpow [div_inv_monoid M] (x : Mᵐᵒᵖ) (z : ℤ) : unop (x ^ z) = (unop x) ^ z := rfl
end mul_opposite
|
lemma one_pow (m : mynat) : (1 : mynat) ^ m = 1 :=
begin
induction m with n hn,
rwa [pow_zero],
rw pow_succ,
rw hn,
simp,
end
|
Formal statement is: lemma coeffs_monom [code abstract]: "coeffs (monom a n) = (if a = 0 then [] else replicate n 0 @ [a])" Informal statement is: The coefficients of a monomial are the coefficients of the constant polynomial with the same value, padded with zeros. |
[STATEMENT]
lemma mset_ordering_addition:
assumes "A = B + C"
shows "B \<subseteq># A"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. B \<subseteq># A
[PROOF STEP]
using assms
[PROOF STATE]
proof (prove)
using this:
A = B + C
goal (1 subgoal):
1. B \<subseteq># A
[PROOF STEP]
by simp |
import Sexp
def translateTerm : Sexp → String
| sexp!{(= {e₁} {e₂})} => s!"({translateTerm e₁}) = ({translateTerm e₂})"
| sexp!{(distinct {e₁} {e₂})}
| sexp!{(not (= {e₁} {e₂}))} => s!"({translateTerm e₁}) ≠ ({translateTerm e₂})"
| sexp!{(store {a} {i} {v})} => s!"({translateTerm a}).write {i} ({translateTerm v})"
| sexp!{(select {a} {i})} => s!"({translateTerm a}).read {i}"
| sexp!{(and {p} {q})} => s!"{translateTerm p} → {translateTerm q}"
| .atom s => s
| _ => ""
def translateCommand : Sexp → String
| sexp!{(declare-const {a} A)} => s!"{a}"
| sexp!{(declare-fun {a} () A)} => "{" ++ s!"{a} : A I E" ++ "}"
| sexp!{(assert {e})} => translateTerm e
| _ => ""
def translateQuery (q : String) : String := Id.run do
let .ok cmds := Sexp.parseMany q | panic! "translation failed."
let (asserts, decls) := cmds.partition (· matches sexp!{(assert {_})})
let decls := (decls.map translateCommand).filter (· ≠ "")
let asserts := asserts.map translateCommand
let lq := s!"import LMT
variable \{I} [Nonempty I] \{E} [Nonempty E] [Nonempty (A I E)]
example \{{String.intercalate " " decls} : A I E} :
{String.intercalate " → " asserts} → False := by
arr"
return lq
open System in
def main (args : List String) : IO Unit := do
let path := args[0]!
let query ← IO.FS.readFile ⟨path⟩
IO.println (translateQuery query)
|
MODULE mo_salsa_types
USE classSection, ONLY : Section
IMPLICIT NONE
! This module is a container for SALSA datatypes.
! Previously most of the stuff was placed in mo_submctl.
! Replacing them here help avoiding cyclic dependencies and
! improves the code structure.
! All particle properties for SALSA. All the setup and pointer associations will be done in mo_aero_init
TYPE(Section), ALLOCATABLE, TARGET :: allSALSA(:,:,:) ! Parent array holding all particle and hydrometeor types consecutively
! Particle type specific pointers to "allSALSA" master array defined in mo_salsa_driver.
! Pointer association is done in mo_salsa_init. These should be accessed by importin mo_submctl,
! not by dummy arguments.
TYPE(Section), POINTER :: aero(:,:,:) => NULL(), &
cloud(:,:,:) => NULL(), &
precp(:,:,:) => NULL(), &
ice(:,:,:) => NULL(), &
snow(:,:,:) => NULL(), &
liquid(:,:,:) => NULL(), &
frozen(:,:,:) => NULL()
! Star and end indices for different particle types in the allSALSA array
INTEGER :: iaero, faero, icloud, fcloud, iprecp, fprecp, iice, fice, isnow, fsnow
END MODULE mo_salsa_types
|
> module BoundedNat.Properties
> import Data.Fin
> import Control.Isomorphism
> import Syntax.PreorderReasoning
> import BoundedNat.BoundedNat
> import BoundedNat.Operations
> import Basic.Operations
> import Nat.LTProperties
> import Sigma.Sigma
> import Pairs.Operations
> import Sigma.Properties
> import Fin.Properties
> import Finite.Predicates
> import Unique.Predicates
> %default total
> %access public export
> %auto_implicits on
No natural number is smaller than zero
> implementation [UninhabitedLTBZ] Uninhabited (LTB Z) where
> uninhabited (MkSigma n prf) = absurd prf
Basic properties
> |||
> toFinLemma0 : (n : Nat) -> (b : Nat) -> (prf : LT n b) ->
> finToNat (toFin (MkSigma n prf)) = n
> toFinLemma0 n Z prf = absurd prf
> toFinLemma0 Z (S a) (LTESucc prf) = Refl
> toFinLemma0 (S m) (S a) (LTESucc prf) = let ih = toFinLemma0 m a prf
> in rewrite ih in Refl
> %freeze toFinLemma0 -- frozen
> |||
> toFinLemma1 : (n : Nat) -> (b : Nat) -> (prf : LT n b) ->
> finToNat (FS (toFin (MkSigma n prf))) = S n
> toFinLemma1 n b prf =
> ( finToNat (FS (toFin (MkSigma n prf))) )
> ={ Refl }= -- definition of |finToNat|
> ( S (finToNat (toFin (MkSigma n prf))) )
> ={ cong (toFinLemma0 n b prf) }= -- |toFinLemma0|, functionality of |S|
> ( S n )
> QED
> %freeze toFinLemma1 -- frozen
> |||
> toFinLemma2 : (n : Nat) -> (b : Nat) -> (prf : LT n b) ->
> finToNatLemma (toFin (MkSigma n prf)) = prf
> toFinLemma2 n Z prf = absurd prf
> toFinLemma2 Z (S b) (LTESucc LTEZero) = Refl
> {-
> toFinLemma2 (S n) (S b) (LTESucc prf) =
> ( finToNatLemma (toFin (S n ** LTESucc prf)) )
> ={ Refl }= -- definition of |toFin|
> ( finToNatLemma (FS (toFin (n ** prf))) )
> ={ Refl }= -- definition of |finToNatLemma|
> ( LTESucc (finToNatLemma (toFin (n ** prf))) )
> ={ depCong2' {alpha = Nat}
> {P = \ n => LT n b}
> {Q = \ n, prf => LT (S n) (S b)}
> {a1 = finToNat (toFin (n ** prf))}
> {a2 = n}
> {Pa1 = finToNatLemma (toFin (n ** prf))}
> {Pa2 = prf}
> (\ n, prf => LTESucc prf)
> (toFinLemma0 n b prf)
> (toFinLemma2 n b prf) }=
> ( LTESucc prf )
> QED
> -}
> --{-
> toFinLemma2 (S n) (S b) (LTESucc prf) = trans s1 (trans s2 s3) where
> s1 : finToNatLemma (toFin (MkSigma (S n) (LTESucc prf)))
> =
> finToNatLemma (FS (toFin (MkSigma n prf)))
> s1 = Refl
> s2 : finToNatLemma (FS (toFin (MkSigma n prf)))
> =
> LTESucc (finToNatLemma (toFin (MkSigma n prf)))
> s2 = Refl
> {-
> sx : finToNatLemma (toFin (n ** prf))
> =
> prf
> sx = toFinLemma2 n b prf
> sy : LTESucc (finToNatLemma (toFin (n ** prf)))
> =
> LTESucc prf
> sy = cong {a = finToNatLemma (toFin (n ** prf))} {b = prf} {f = LTESucc} sx
> -}
> s3 : LTESucc (finToNatLemma (toFin (MkSigma n prf)))
> =
> LTESucc prf
> s3 = depCong2' {alpha = Nat}
> {P = \ n => LT n b}
> {Q = \ n, prf => LT (S n) (S b)}
> {a1 = finToNat (toFin (MkSigma n prf))}
> {a2 = n}
> {Pa1 = finToNatLemma (toFin (MkSigma n prf))}
> {Pa2 = prf}
> {f = \ n, prf => LTESucc prf}
> (toFinLemma0 n b prf)
> (toFinLemma2 n b prf)
> ---}
> %freeze toFinLemma2 -- frozen
> |||
> toFinLemma3 : (n : Nat) -> (b : Nat) -> (prf : LT n b) ->
> finToNatLemma (FS (toFin (MkSigma n prf))) = LTESucc prf
> {-
> toFinLemma3 n b prf =
> ( finToNatLemma (FS (toFin (n ** prf))) )
> ={ replace {a = Fin (S b)}
> {x = FS (toFin (n ** prf))}
> {y = toFin (S n ** LTESucc prf)}
> {P = \ x => finToNatLemma (FS (toFin (n ** prf))) = finToNatLemma x}
> Refl Refl }=
> ( finToNatLemma (toFin (S n ** LTESucc prf)) )
> ={ toFinLemma2 (S n) (S b) (LTESucc prf) }=
> ( LTESucc prf )
> -}
> toFinLemma3 n b prf = trans s1 s2 where
> s0 : FS (toFin (MkSigma n prf)) = toFin (MkSigma (S n) (LTESucc prf))
> s0 = Refl
> s1 : finToNatLemma (FS (toFin (MkSigma n prf)))
> =
> finToNatLemma (toFin (MkSigma (S n) (LTESucc prf)))
> s1 = replace {a = Fin (S b)}
> {x = FS (toFin (MkSigma n prf))}
> {y = toFin (MkSigma (S n) (LTESucc prf))}
> {P = \ x => finToNatLemma (FS (toFin (MkSigma n prf))) = finToNatLemma x}
> s0 Refl
> s2 : finToNatLemma (toFin (MkSigma (S n) (LTESucc prf)))
> =
> LTESucc prf
> s2 = toFinLemma2 (S n) (S b) (LTESucc prf)
> %freeze toFinLemma3 -- frozen
> {-
> |||
> toFinLemma6 : (n : Nat) -> (b : Nat) -> (prf : LT n b) ->
> toFin (S n ** LTESucc prf) = FS (toFin (n ** prf))
> -}
> ||| |toFin| is the left-inverse of |fromFin|
> toFinFromFinLemma : (k : Fin b) -> toFin (fromFin k) = k
> toFinFromFinLemma {b = Z} k = absurd k
> toFinFromFinLemma {b = S m} FZ = Refl
> toFinFromFinLemma {b = S m} (FS k) =
> let ih = toFinFromFinLemma k in
> rewrite ih in
> Refl
> %freeze toFinFromFinLemma -- frozen
> using implementation UninhabitedLTBZ
> ||| |fromFin| is the left-inverse of |toFin|
> fromFinToFinLemma : (n : LTB b) -> fromFin (toFin n) = n
> fromFinToFinLemma {b = Z} k = absurd k
> fromFinToFinLemma {b = S m} (MkSigma Z (LTESucc LTEZero)) = Refl
> fromFinToFinLemma {b = S m} (MkSigma (S n) (LTESucc prf)) = s6 where
> s1 : fromFin (toFin (MkSigma (S n) (LTESucc prf)))
> =
> fromFin (FS (toFin (MkSigma n prf)))
> s1 = Refl
> s2 : fromFin (FS (toFin (MkSigma n prf)))
> =
> MkSigma (finToNat (FS (toFin (MkSigma n prf)))) (finToNatLemma (FS (toFin (MkSigma n prf))))
> s2 = Refl
> s3 : finToNat (FS (toFin (MkSigma n prf))) = S n
> s3 = toFinLemma1 n m prf
> s4 : finToNatLemma (FS (toFin (MkSigma n prf))) = LTESucc prf
> s4 = toFinLemma3 n m prf
> s5 : MkSigma {A = Nat} {P = \ i => LT i (S m)}
> (finToNat (FS (toFin (MkSigma n prf))))
> (finToNatLemma (FS (toFin (MkSigma n prf))))
> =
> MkSigma {A = Nat} {P = \ i => LT i (S m)} (S n) (LTESucc prf)
> s5 = depCong2 {f = MkSigma {A = Nat} {P = \ i => LT i (S m)}} s3 s4
> s6 : fromFin (toFin (MkSigma (S n) (LTESucc prf))) = MkSigma (S n) (LTESucc prf)
> s6 = trans s1 (trans s2 s5)
> %freeze fromFinToFinLemma
Finitness properties
> ||| Bounded |Nat|s are finite:
> finiteLTB : (b : Nat) -> Finite (LTB b)
> finiteLTB b = MkSigma b iso where
> iso : Iso (LTB b) (Fin b)
> iso = MkIso toFin fromFin toFinFromFinLemma fromFinToFinLemma
> %freeze finiteLTB -- frozen
> {-
> ||| Subtypes of bounded |Nat|s are finite:
> finiteSubLTB : (b : Nat) -> (P : LTB b -> Type) -> Dec1 P -> (uP : Unique1 P) -> Finite (SubType (LTB b) P uP)
> finiteSubLTB b P dP uP = finiteSubTypeLemma0 {A = LTB b} {P} (finiteLTB b) dP uP
> -}
Decidability properties
> ||| Equality of bounded |Nat|s is decidable
> decEqLTB : {b : Nat} -> (i : LTB b) -> (j : LTB b) -> Dec (i = j)
> decEqLTB {b} (MkSigma m p) (MkSigma n q) with (decEq m n)
> | (Yes prf) = Yes (sigmaEqLemma1 (MkSigma m p) (MkSigma n q) prf (uniqueLT))
> | (No contra) = No (\ prf => contra (getWitnessPreservesEq prf))
> %freeze decEqLTB -- frozen
> implementation [DecEqLTB] DecEq (LTB b) where
> decEq {b} i j = decEqLTB {b} i j
Show
> using (b : Nat)
> implementation [ShowLTB] Show (LTB b) where
> show (MkSigma i _) = show i
|
module Numeric.Floating
public export
interface Fractional ty => Floating ty where
constructor MkFloating
pi : ty
euler : ty
exp : ty -> ty
log : ty -> ty
pow : ty -> ty -> ty -- (**)
sin : ty -> ty
cos : ty -> ty
tan : ty -> ty
asin : ty -> ty
acos : ty -> ty
atan : ty -> ty
sinh : ty -> ty
cosh : ty -> ty
tanh : ty -> ty
sqrt : ty -> ty
export
Floating Double where
pi = Prelude.Types.pi
euler = Prelude.Types.euler
exp = Prelude.Types.exp
log = Prelude.Types.log
pow = Prelude.Types.pow
sin = Prelude.Types.sin
cos = Prelude.Types.cos
tan = Prelude.Types.tan
asin = Prelude.Types.asin
acos = Prelude.Types.acos
atan = Prelude.Types.atan
sinh = Prelude.Types.sinh
cosh = Prelude.Types.cosh
tanh = Prelude.Types.tanh
sqrt = Prelude.Types.sqrt
-- export
-- Floating Double where
-- pi = 3.14159265358979323846
-- euler = 2.7182818284590452354
-- exp x = prim__doubleExp x
-- log x = prim__doubleLog x
-- pow x y = exp (y * log x) -- prim__doublePow x y
-- sin x = prim__doubleSin x
-- cos x = prim__doubleCos x
-- tan x = prim__doubleTan x
-- asin x = prim__doubleASin x
-- acos x = prim__doubleACos x
-- atan x = prim__doubleATan x
-- sinh x = (exp x - exp (-x)) / 2
-- cosh x = (exp x + exp (-x)) / 2
-- tanh x = sinh x / cosh x -- can this NaN via cosh = 0? can cosh even be 0?
-- sqrt x = prim__doubleSqrt x
-- floor x = prim__doubleFloor x
-- ceiling x = prim__doubleCeiling x
|
lemma bounded_linear_sum: fixes f :: "'i \<Rightarrow> 'a::real_normed_vector \<Rightarrow> 'b::real_normed_vector" shows "(\<And>i. i \<in> I \<Longrightarrow> bounded_linear (f i)) \<Longrightarrow> bounded_linear (\<lambda>x. \<Sum>i\<in>I. f i x)" |
\section{Vision of the Solution}
% Now that we know what the customer needs,
% what will the solution look like?
\subsection{Vision Statement}
% This is the formal vision statement.
%
% For: user class
% Who: statement of need
% The: title of product
% Is: statement of solution
% Unlike: closest alternative solution
% Our Product: differentiation statement
For on the go researchers and academics who wish to increase their available paper reading time, \TeX 2Speech will provide on-demand speech synthesis for papers and a wide variety of other scientific and technical documentation written in the \LaTeX\ format. Unlike standard TTS systems, \TeX 2Speech will be able to effectively parse a \LaTeX\ document containing mathematical equations, and convert it to comprehensible spoken word.
\subsection{Major Features}
% Describe the major features that solve the customer's needs.
Major features include the ability to take an arbitrary \LaTeX\ document and convert it into an audio representation of the text. Major commands and environments will be supported in this conversion to benefit the majority of users. The primary feature enabling this would be our implementation to convert \LaTeX\ files into Speech Synthesis Markup Language (SSML), or something similar. The marked-up file would be fed to a TTS synthesis program for the audio output. Features that will be visible to the user include the option to upload their \LaTeX\ document to our service, as well as downloading the resulting audio.
\subsection{Assumptions and Dependencies}
% What does this product depend on?
% The goal here is to describe, with as much detail as possible, what the envisioned
% solution needs to operate.
It must be assumed that users have access to the internet to utilize this program. Our service will depend both on some web hosting location, along with some cloud-based TTS service, either one failing will render our service inoperable.
|
%******APPENDIX******
\chapter*{Appendix A}
\addcontentsline{toc}{chapter}{Appendix A}
(Code/Etc. here)
\chapter*{Appendix B}
\addcontentsline{toc}{chapter}{Appendix B}
(Code/Etc. here)
%******END OF APPENDIX******
\newpage
|
/-
Copyright (c) 2022 Yaël Dillies, Bhavik Mehta. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Yaël Dillies, Bhavik Mehta
-/
import algebra.big_operators.basic
import order.locally_finite
import order.atoms
import order.sup_indep
/-!
# Finite partitions
In this file, we define finite partitions. A finpartition of `a : α` is a finite set of pairwise
disjoint parts `parts : finset α` which does not contain `⊥` and whose supremum is `a`.
## Constructions
We provide many ways to build finpartitions:
* `finpartition.of_erase`: Builds a finpartition by erasing `⊥` for you.
* `finpartition.of_subset`: Builds a finpartition from a subset of the parts of a previous
finpartition.
* `finpartition.empty`: The empty finpartition of `⊥`.
* `finpartition.indiscrete`: The indiscrete, aka trivial, aka pure, finpartition made of a single
part.
* `finpartition.discrete`: The discrete finpartition of `s : finset α` made of singletons.
* `finpartition.bind`: Puts together the finpartitions of the parts of a finpartition into a new
finpartition.
* `finpartition.atomise`: Makes a finpartition of `s : finset α` by breaking `s` along all finsets
in `F : finset (finset α)`. Two elements of `s` belong to the same part iff they belong to the
same elements of `F`.
`finpartition.indiscrete` and `finpartition.bind` together form the monadic structure of
`finpartition`.
## Implementation notes
Forbidding `⊥` as a part follows mathematical tradition and is a pragmatic choice concerning
operations on `finpartition`. Not caring about `⊥` being a part or not breaks extensionality (it's
not because the parts of `P` and the parts of `Q` have the same elements that `P = Q`). Enforcing
`⊥` to be a part makes `finpartition.bind` uglier and doesn't rid us of the need of
`finpartition.of_erase`.
## TODO
Link `finpartition` and `setoid.is_partition`.
The order is the wrong way around to make `finpartition a` a graded order. Is it bad to depart from
the literature and turn the order around?
-/
lemma set.pairwise_disjoint.eq_of_le {α ι : Type*} [semilattice_inf α] [order_bot α] {s : set ι}
{f : ι → α} (hs : s.pairwise_disjoint f) {i j : ι} (hi : i ∈ s) (hj : j ∈ s)
(h : f i ≠ ⊥) (hf : f i ≤ f j) :
i = j :=
begin
classical,
by_contra hij,
exact h ((hs hi hj hij).eq_bot_of_le hf),
end
instance {α ι : Type*} [lattice α] [order_bot α] {s t : finset ι} {f : ι → α} {i : ι}
[decidable_eq ι] [decidable_eq α] : decidable (s.sup_indep f) :=
begin
apply @finset.decidable_forall_of_decidable_subsets _ _ _ _,
intros t ht,
apply @finset.decidable_dforall_finset _ _ _ _,
exact λ i hi, @implies.decidable _ _ _ (decidable_of_iff' (_ = ⊥) disjoint_iff),
end
open finset function
open_locale big_operators
variables {α : Type*}
/-- A finite partition of `a : α` is a pairwise disjoint finite set of elements whose supremum is
`a`. We forbid `⊥` as a part. -/
@[ext, derive decidable_eq] structure finpartition [lattice α] [order_bot α] (a : α) :=
(parts : finset α)
(sup_indep : parts.sup_indep id)
(sup_parts : parts.sup id = a)
(not_bot_mem : ⊥ ∉ parts)
attribute [protected] finpartition.sup_indep
namespace finpartition
section lattice
variables [lattice α] [order_bot α]
/-- A `finpartition` constructor which does not insist on `⊥` not being a part. -/
@[simps] def of_erase [decidable_eq α] {a : α} (parts : finset α)
(sup_indep : parts.sup_indep id) (sup_parts : parts.sup id = a) :
finpartition a :=
{ parts := parts.erase ⊥,
sup_indep := sup_indep.subset (erase_subset _ _),
sup_parts := (sup_erase_bot _).trans sup_parts,
not_bot_mem := not_mem_erase _ _ }
/-- A `finpartition` constructor from a bigger existing finpartition. -/
@[simps] def of_subset {a b : α} (P : finpartition a) {parts : finset α}
(subset : parts ⊆ P.parts) (sup_parts : parts.sup id = b) :
finpartition b :=
{ parts := parts,
sup_indep := P.sup_indep.subset subset,
sup_parts := sup_parts,
not_bot_mem := λ h, P.not_bot_mem (subset h) }
/-- Changes the type of a finpartition to an equal one. -/
@[simps] def copy {a b : α} (P : finpartition a) (h : a = b) : finpartition b :=
{ parts := P.parts,
sup_indep := P.sup_indep,
sup_parts := h ▸ P.sup_parts,
not_bot_mem := P.not_bot_mem }
variables (α)
/-- The empty finpartition. -/
@[simps] protected def empty : finpartition (⊥ : α) :=
{ parts := ∅,
sup_indep := sup_indep_empty _,
sup_parts := finset.sup_empty,
not_bot_mem := not_mem_empty ⊥ }
instance : inhabited (finpartition (⊥ : α)) := ⟨finpartition.empty α⟩
@[simp] lemma default_eq_empty : (default : finpartition (⊥ : α)) = finpartition.empty α := rfl
variables {α} {a : α}
/-- The finpartition in one part, aka indiscrete finpartition. -/
@[simps] def indiscrete (ha : a ≠ ⊥) : finpartition a :=
{ parts := {a},
sup_indep := sup_indep_singleton _ _,
sup_parts := finset.sup_singleton,
not_bot_mem := λ h, ha (mem_singleton.1 h).symm }
variables (P : finpartition a)
protected lemma le {b : α} (hb : b ∈ P.parts) : b ≤ a := (le_sup hb).trans P.sup_parts.le
lemma ne_bot {b : α} (hb : b ∈ P.parts) : b ≠ ⊥ := λ h, P.not_bot_mem $ h.subst hb
protected lemma disjoint : (P.parts : set α).pairwise_disjoint id := P.sup_indep.pairwise_disjoint
variables {P}
lemma parts_eq_empty_iff : P.parts = ∅ ↔ a = ⊥ :=
begin
simp_rw ←P.sup_parts,
refine ⟨λ h, _, λ h, eq_empty_iff_forall_not_mem.2 (λ b hb, P.not_bot_mem _)⟩,
{ rw h,
exact finset.sup_empty },
{ rwa ←le_bot_iff.1 ((le_sup hb).trans h.le) }
end
lemma parts_nonempty_iff : P.parts.nonempty ↔ a ≠ ⊥ :=
by rw [nonempty_iff_ne_empty, not_iff_not, parts_eq_empty_iff]
lemma parts_nonempty (P : finpartition a) (ha : a ≠ ⊥) : P.parts.nonempty := parts_nonempty_iff.2 ha
instance : unique (finpartition (⊥ : α)) :=
{ uniq := λ P,
by { ext a, exact iff_of_false (λ h, P.ne_bot h $ le_bot_iff.1 $ P.le h) (not_mem_empty a) },
..finpartition.inhabited α }
/-- There's a unique partition of an atom. -/
@[reducible] -- See note [reducible non instances]
def _root_.is_atom.unique_finpartition (ha : is_atom a) : unique (finpartition a) :=
{ default := indiscrete ha.1,
uniq := λ P, begin
have h : ∀ b ∈ P.parts, b = a,
{ exact λ b hb, (eq_bot_or_eq_of_le_atom ha $ P.le hb).resolve_left (P.ne_bot hb) },
ext b,
refine iff.trans ⟨h b, _⟩ mem_singleton.symm,
rintro rfl,
obtain ⟨c, hc⟩ := P.parts_nonempty ha.1,
simp_rw ←h c hc,
exact hc,
end }
/-! ### Refinement order -/
section order
/-- We say that `P ≤ Q` if `P` refines `Q`: each part of `P` is less than some part of `Q`. -/
instance : has_le (finpartition a) := ⟨λ P Q, ∀ ⦃b⦄, b ∈ P.parts → ∃ c ∈ Q.parts, b ≤ c⟩
instance : partial_order (finpartition a) :=
{ le_refl := λ P b hb, ⟨b, hb, le_rfl⟩,
le_trans := λ P Q R hPQ hQR b hb, begin
obtain ⟨c, hc, hbc⟩ := hPQ hb,
obtain ⟨d, hd, hcd⟩ := hQR hc,
exact ⟨d, hd, hbc.trans hcd⟩,
end,
le_antisymm := λ P Q hPQ hQP, begin
ext b,
refine ⟨λ hb, _, λ hb, _⟩,
{ obtain ⟨c, hc, hbc⟩ := hPQ hb,
obtain ⟨d, hd, hcd⟩ := hQP hc,
rwa hbc.antisymm,
rwa P.disjoint.eq_of_le hb hd (P.ne_bot hb) (hbc.trans hcd) },
{ obtain ⟨c, hc, hbc⟩ := hQP hb,
obtain ⟨d, hd, hcd⟩ := hPQ hc,
rwa hbc.antisymm,
rwa Q.disjoint.eq_of_le hb hd (Q.ne_bot hb) (hbc.trans hcd) }
end,
..finpartition.has_le }
instance [decidable (a = ⊥)] : order_top (finpartition a) :=
{ top := if ha : a = ⊥ then (finpartition.empty α).copy ha.symm else indiscrete ha,
le_top := λ P,
begin
split_ifs,
{ intros x hx,
simpa [h, P.ne_bot hx] using P.le hx },
{ exact λ b hb, ⟨a, mem_singleton_self _, P.le hb⟩ }
end }
end order
end lattice
section distrib_lattice
variables [distrib_lattice α] [order_bot α] [decidable_eq α] {a b c : α}
instance : has_inf (finpartition a) :=
⟨λ P Q, of_erase ((P.parts.product Q.parts).image $ λ bc, bc.1 ⊓ bc.2)
begin
rw sup_indep_iff_disjoint_erase,
simp only [mem_image, and_imp, exists_prop, forall_exists_index, id.def, prod.exists,
mem_product, finset.disjoint_sup_right, mem_erase, ne.def],
rintro _ x₁ y₁ hx₁ hy₁ rfl _ h x₂ y₂ hx₂ hy₂ rfl,
rcases eq_or_ne x₁ x₂ with rfl | xdiff,
{ refine disjoint.mono inf_le_right inf_le_right (Q.disjoint hy₁ hy₂ _),
intro t,
simpa [t] using h },
exact disjoint.mono inf_le_left inf_le_left (P.disjoint hx₁ hx₂ xdiff),
end
begin
rw [sup_image, comp.left_id, sup_product_left],
transitivity P.parts.sup id ⊓ Q.parts.sup id,
{ simp_rw [finset.sup_inf_distrib_right, finset.sup_inf_distrib_left],
refl },
{ rw [P.sup_parts, Q.sup_parts, inf_idem] }
end⟩
@[simp] lemma parts_inf (P Q : finpartition a) :
(P ⊓ Q).parts = ((P.parts.product Q.parts).image $ λ bc : α × α, bc.1 ⊓ bc.2).erase ⊥ := rfl
instance : semilattice_inf (finpartition a) :=
{ inf_le_left := λ P Q b hb, begin
obtain ⟨c, hc, rfl⟩ := mem_image.1 (mem_of_mem_erase hb),
rw mem_product at hc,
exact ⟨c.1, hc.1, inf_le_left⟩,
end,
inf_le_right := λ P Q b hb, begin
obtain ⟨c, hc, rfl⟩ := mem_image.1 (mem_of_mem_erase hb),
rw mem_product at hc,
exact ⟨c.2, hc.2, inf_le_right⟩,
end,
le_inf := λ P Q R hPQ hPR b hb, begin
obtain ⟨c, hc, hbc⟩ := hPQ hb,
obtain ⟨d, hd, hbd⟩ := hPR hb,
have h := _root_.le_inf hbc hbd,
refine ⟨c ⊓ d, mem_erase_of_ne_of_mem (ne_bot_of_le_ne_bot (P.ne_bot hb) h)
(mem_image.2 ⟨(c, d), mem_product.2 ⟨hc, hd⟩, rfl⟩), h⟩,
end,
..finpartition.partial_order, ..finpartition.has_inf }
lemma exists_le_of_le {P Q : finpartition a} (h : P ≤ Q) (hb : b ∈ Q.parts) :
∃ c ∈ P.parts, c ≤ b :=
begin
by_contra' H,
refine Q.ne_bot hb (disjoint_self.1 $ disjoint.mono_right (Q.le hb) _),
rw [←P.sup_parts, finset.disjoint_sup_right],
rintro c hc,
obtain ⟨d, hd, hcd⟩ := h hc,
refine (Q.disjoint hb hd _).mono_right hcd,
rintro rfl,
exact H _ hc hcd,
end
lemma card_mono {P Q : finpartition a} (h : P ≤ Q) : Q.parts.card ≤ P.parts.card :=
begin
classical,
have : ∀ b ∈ Q.parts, ∃ c ∈ P.parts, c ≤ b := λ b, exists_le_of_le h,
choose f hP hf using this,
rw ←card_attach,
refine card_le_card_of_inj_on (λ b, f _ b.2) (λ b _, hP _ b.2) (λ b hb c hc h, _),
exact subtype.coe_injective (Q.disjoint.elim b.2 c.2 $ λ H, P.ne_bot (hP _ b.2) $
disjoint_self.1 $ H.mono (hf _ b.2) $ h.le.trans $ hf _ c.2),
end
section bind
variables {a} {P : finpartition a} {Q : Π i ∈ P.parts, finpartition i}
/-- Given a finpartition `P` of `a` and finpartitions of each part of `P`, this yields the
finpartition of `a` obtained by juxtaposing all the subpartitions. -/
@[simps] def bind (P : finpartition a) (Q : Π i ∈ P.parts, finpartition i) : finpartition a :=
{ parts := P.parts.attach.bUnion (λ i, (Q i.1 i.2).parts),
sup_indep := begin
rw sup_indep_iff_pairwise_disjoint,
rintro a ha b hb h,
rw [finset.mem_coe, finset.mem_bUnion] at ha hb,
obtain ⟨⟨A, hA⟩, -, ha⟩ := ha,
obtain ⟨⟨B, hB⟩, -, hb⟩ := hb,
obtain rfl | hAB := eq_or_ne A B,
{ exact (Q A hA).disjoint ha hb h },
{ exact (P.disjoint hA hB hAB).mono ((Q A hA).le ha) ((Q B hB).le hb) }
end,
sup_parts := begin
simp_rw [sup_bUnion, ←P.sup_parts],
rw [eq_comm, ←finset.sup_attach],
exact sup_congr rfl (λ b hb, (Q b.1 b.2).sup_parts.symm),
end,
not_bot_mem := λ h, begin
rw finset.mem_bUnion at h,
obtain ⟨⟨A, hA⟩, -, h⟩ := h,
exact (Q A hA).not_bot_mem h,
end }
lemma mem_bind : b ∈ (P.bind Q).parts ↔ ∃ A hA, b ∈ (Q A hA).parts :=
begin
rw [bind, mem_bUnion],
split,
{ rintro ⟨⟨A, hA⟩, -, h⟩,
exact ⟨A, hA, h⟩ },
{ rintro ⟨A, hA, h⟩,
exact ⟨⟨A, hA⟩, mem_attach _ ⟨A, hA⟩, h⟩ }
end
lemma card_bind (Q : Π i ∈ P.parts, finpartition i) :
(P.bind Q).parts.card = ∑ A in P.parts.attach, (Q _ A.2).parts.card :=
begin
apply card_bUnion,
rintro ⟨b, hb⟩ - ⟨c, hc⟩ - hbc d,
rw [inf_eq_inter, mem_inter],
rintro ⟨hdb, hdc⟩,
rw [ne.def, subtype.mk_eq_mk] at hbc,
exact (Q b hb).ne_bot hdb (eq_bot_iff.2 $
(le_inf ((Q b hb).le hdb) $ (Q c hc).le hdc).trans $ P.disjoint hb hc hbc),
end
end bind
/-- Adds `b` to a finpartition of `a` to make a finpartition of `a ⊔ b`. -/
@[simps] def extend (P : finpartition a) (hb : b ≠ ⊥) (hab : disjoint a b) (hc : a ⊔ b = c) :
finpartition c :=
{ parts := insert b P.parts,
sup_indep :=
begin
rw [sup_indep_iff_pairwise_disjoint, coe_insert],
exact P.disjoint.insert (λ d hd hbd, hab.symm.mono_right $ P.le hd),
end,
sup_parts := by rwa [sup_insert, P.sup_parts, id, _root_.sup_comm],
not_bot_mem := λ h, (mem_insert.1 h).elim hb.symm P.not_bot_mem }
lemma card_extend (P : finpartition a) (b c : α) {hb : b ≠ ⊥} {hab : disjoint a b}
{hc : a ⊔ b = c} :
(P.extend hb hab hc).parts.card = P.parts.card + 1 :=
card_insert_of_not_mem $ λ h, hb $ hab.symm.eq_bot_of_le $ P.le h
end distrib_lattice
section generalized_boolean_algebra
variables [generalized_boolean_algebra α] [decidable_eq α] {a : α} (P : finpartition a)
/-- Restricts a finpartition to avoid a given element. -/
@[simps] def avoid (b : α) : finpartition (a \ b) :=
of_erase
(P.parts.image (\ b))
(P.disjoint.image_finset_of_le $ λ a, sdiff_le).sup_indep
(begin
rw [sup_image, comp.left_id, finset.sup_sdiff_right],
congr,
exact P.sup_parts,
end)
end generalized_boolean_algebra
end finpartition
/-! ### Finite partitions of finsets -/
namespace finpartition
variables [decidable_eq α] {s t : finset α} (P : finpartition s)
lemma nonempty_of_mem_parts {a : finset α} (ha : a ∈ P.parts) : a.nonempty :=
nonempty_iff_ne_empty.2 $ P.ne_bot ha
lemma exists_mem {a : α} (ha : a ∈ s) : ∃ t ∈ P.parts, a ∈ t :=
by { simp_rw ←P.sup_parts at ha, exact mem_sup.1 ha }
lemma bUnion_parts : P.parts.bUnion id = s := (sup_eq_bUnion _ _).symm.trans P.sup_parts
lemma sum_card_parts : ∑ i in P.parts, i.card = s.card :=
begin
convert congr_arg finset.card P.bUnion_parts,
rw card_bUnion P.sup_indep.pairwise_disjoint,
refl,
end
/-- `⊥` is the partition in singletons, aka discrete partition. -/
instance (s : finset α) : has_bot (finpartition s) :=
⟨{ parts := s.map ⟨singleton, singleton_injective⟩,
sup_indep := set.pairwise_disjoint.sup_indep begin
rw finset.coe_map,
exact finset.pairwise_disjoint_range_singleton.subset (set.image_subset_range _ _),
end,
sup_parts := by rw [sup_map, comp.left_id, embedding.coe_fn_mk, finset.sup_singleton'],
not_bot_mem := by simp }⟩
@[simp] lemma parts_bot (s : finset α) :
(⊥ : finpartition s).parts = s.map ⟨singleton, singleton_injective⟩ := rfl
lemma card_bot (s : finset α) : (⊥ : finpartition s).parts.card = s.card := finset.card_map _
lemma mem_bot_iff : t ∈ (⊥ : finpartition s).parts ↔ ∃ a ∈ s, {a} = t := mem_map
instance (s : finset α) : order_bot (finpartition s) :=
{ bot_le := λ P t ht, begin
rw mem_bot_iff at ht,
obtain ⟨a, ha, rfl⟩ := ht,
obtain ⟨t, ht, hat⟩ := P.exists_mem ha,
exact ⟨t, ht, singleton_subset_iff.2 hat⟩,
end,
..finpartition.has_bot s }
lemma card_parts_le_card (P : finpartition s) : P.parts.card ≤ s.card :=
by { rw ←card_bot s, exact card_mono bot_le }
section atomise
/-- Cuts `s` along the finsets in `F`: Two elements of `s` will be in the same part if they are
in the same finsets of `F`. -/
def atomise (s : finset α) (F : finset (finset α)) : finpartition s :=
of_erase
(F.powerset.image $ λ Q, s.filter (λ i, ∀ t ∈ F, t ∈ Q ↔ i ∈ t))
(set.pairwise_disjoint.sup_indep $ λ x hx y hy h z hz, h begin
rw [mem_coe, mem_image] at hx hy,
obtain ⟨Q, hQ, rfl⟩ := hx,
obtain ⟨R, hR, rfl⟩ := hy,
suffices h : Q = R,
{ subst h },
rw [id, id, inf_eq_inter, mem_inter, mem_filter, mem_filter] at hz,
rw mem_powerset at hQ hR,
ext i,
refine ⟨λ hi, _, λ hi, _⟩,
{ rwa [hz.2.2 _ (hQ hi), ←hz.1.2 _ (hQ hi)] },
{ rwa [hz.1.2 _ (hR hi), ←hz.2.2 _ (hR hi)] }
end)
(begin
refine (finset.sup_le $ λ t ht, _).antisymm (λ a ha, _),
{ rw mem_image at ht,
obtain ⟨A, hA, rfl⟩ := ht,
exact s.filter_subset _ },
{ rw [mem_sup],
refine ⟨s.filter (λ i, ∀ t, t ∈ F → (t ∈ F.filter (λ u, a ∈ u) ↔ i ∈ t)),
mem_image_of_mem _ (mem_powerset.2 $ filter_subset _ _), mem_filter.2 ⟨ha, λ t ht, _⟩⟩,
rw mem_filter,
exact and_iff_right ht }
end)
variables {F : finset (finset α)}
lemma mem_atomise {t : finset α} :
t ∈ (atomise s F).parts ↔ t.nonempty ∧ ∃ (Q ⊆ F), s.filter (λ i, ∀ u ∈ F, u ∈ Q ↔ i ∈ u) = t :=
by simp only [atomise, of_erase, bot_eq_empty, mem_erase, mem_image, nonempty_iff_ne_empty,
mem_singleton, and_comm, mem_powerset, exists_prop]
lemma atomise_empty (hs : s.nonempty) : (atomise s ∅).parts = {s} :=
begin
simp only [atomise, powerset_empty, image_singleton, not_mem_empty, forall_false_left,
implies_true_iff, filter_true],
exact erase_eq_of_not_mem (not_mem_singleton.2 hs.ne_empty.symm),
end
lemma card_atomise_le : (atomise s F).parts.card ≤ 2^F.card :=
(card_le_of_subset $ erase_subset _ _).trans $ finset.card_image_le.trans (card_powerset _).le
lemma bUnion_filter_atomise (t : finset α) (ht : t ∈ F) (hts : t ⊆ s) :
((atomise s F).parts.filter $ λ u, u ⊆ t).bUnion id = t :=
begin
ext a,
rw mem_bUnion,
refine ⟨λ ⟨u, hu, ha⟩, (mem_filter.1 hu).2 ha, λ ha, _⟩,
obtain ⟨u, hu, hau⟩ := (atomise s F).exists_mem (hts ha),
refine ⟨u, mem_filter.2 ⟨hu, λ b hb, _⟩, hau⟩,
obtain ⟨Q, hQ, rfl⟩ := (mem_atomise.1 hu).2,
rw mem_filter at hau hb,
rwa [←hb.2 _ ht, hau.2 _ ht]
end
end atomise
end finpartition
|
(* Author: Tobias Nipkow, Florian Haftmann, TU Muenchen *)
section \<open>Character and string types\<close>
theory String
imports Enum Bit_Operations Code_Numeral
begin
subsection \<open>Strings as list of bytes\<close>
text \<open>
When modelling strings, we follow the approach given
in \<^url>\<open>https://utf8everywhere.org/\<close>:
\<^item> Strings are a list of bytes (8 bit).
\<^item> Byte values from 0 to 127 are US-ASCII.
\<^item> Byte values from 128 to 255 are uninterpreted blobs.
\<close>
subsubsection \<open>Bytes as datatype\<close>
datatype char =
Char (digit0: bool) (digit1: bool) (digit2: bool) (digit3: bool)
(digit4: bool) (digit5: bool) (digit6: bool) (digit7: bool)
context comm_semiring_1
begin
definition of_char :: \<open>char \<Rightarrow> 'a\<close>
where \<open>of_char c = horner_sum of_bool 2 [digit0 c, digit1 c, digit2 c, digit3 c, digit4 c, digit5 c, digit6 c, digit7 c]\<close>
lemma of_char_Char [simp]:
\<open>of_char (Char b0 b1 b2 b3 b4 b5 b6 b7) =
horner_sum of_bool 2 [b0, b1, b2, b3, b4, b5, b6, b7]\<close>
by (simp add: of_char_def)
end
context unique_euclidean_semiring_with_bit_operations
begin
definition char_of :: \<open>'a \<Rightarrow> char\<close>
where \<open>char_of n = Char (odd n) (bit n 1) (bit n 2) (bit n 3) (bit n 4) (bit n 5) (bit n 6) (bit n 7)\<close>
lemma char_of_take_bit_eq:
\<open>char_of (take_bit n m) = char_of m\<close> if \<open>n \<ge> 8\<close>
using that by (simp add: char_of_def bit_take_bit_iff)
lemma char_of_char [simp]:
\<open>char_of (of_char c) = c\<close>
by (simp only: of_char_def char_of_def bit_horner_sum_bit_iff) simp
lemma char_of_comp_of_char [simp]:
"char_of \<circ> of_char = id"
by (simp add: fun_eq_iff)
lemma inj_of_char:
\<open>inj of_char\<close>
proof (rule injI)
fix c d
assume "of_char c = of_char d"
then have "char_of (of_char c) = char_of (of_char d)"
by simp
then show "c = d"
by simp
qed
lemma of_char_eqI:
\<open>c = d\<close> if \<open>of_char c = of_char d\<close>
using that inj_of_char by (simp add: inj_eq)
lemma of_char_eq_iff [simp]:
\<open>of_char c = of_char d \<longleftrightarrow> c = d\<close>
by (auto intro: of_char_eqI)
lemma of_char_of [simp]:
\<open>of_char (char_of a) = a mod 256\<close>
proof -
have \<open>[0..<8] = [0, Suc 0, 2, 3, 4, 5, 6, 7 :: nat]\<close>
by (simp add: upt_eq_Cons_conv)
then have \<open>[odd a, bit a 1, bit a 2, bit a 3, bit a 4, bit a 5, bit a 6, bit a 7] = map (bit a) [0..<8]\<close>
by simp
then have \<open>of_char (char_of a) = take_bit 8 a\<close>
by (simp only: char_of_def of_char_def char.sel horner_sum_bit_eq_take_bit)
then show ?thesis
by (simp add: take_bit_eq_mod)
qed
lemma char_of_mod_256 [simp]:
\<open>char_of (n mod 256) = char_of n\<close>
by (rule of_char_eqI) simp
lemma of_char_mod_256 [simp]:
\<open>of_char c mod 256 = of_char c\<close>
proof -
have \<open>of_char (char_of (of_char c)) mod 256 = of_char (char_of (of_char c))\<close>
by (simp only: of_char_of) simp
then show ?thesis
by simp
qed
lemma char_of_quasi_inj [simp]:
\<open>char_of m = char_of n \<longleftrightarrow> m mod 256 = n mod 256\<close> (is \<open>?P \<longleftrightarrow> ?Q\<close>)
proof
assume ?Q
then show ?P
by (auto intro: of_char_eqI)
next
assume ?P
then have \<open>of_char (char_of m) = of_char (char_of n)\<close>
by simp
then show ?Q
by simp
qed
lemma char_of_eq_iff:
\<open>char_of n = c \<longleftrightarrow> take_bit 8 n = of_char c\<close>
by (auto intro: of_char_eqI simp add: take_bit_eq_mod)
lemma char_of_nat [simp]:
\<open>char_of (of_nat n) = char_of n\<close>
by (simp add: char_of_def String.char_of_def drop_bit_of_nat bit_simps possible_bit_def)
end
lemma inj_on_char_of_nat [simp]:
"inj_on char_of {0::nat..<256}"
by (rule inj_onI) simp
lemma nat_of_char_less_256 [simp]:
"of_char c < (256 :: nat)"
proof -
have "of_char c mod (256 :: nat) < 256"
by arith
then show ?thesis by simp
qed
lemma range_nat_of_char:
"range of_char = {0::nat..<256}"
proof (rule; rule)
fix n :: nat
assume "n \<in> range of_char"
then show "n \<in> {0..<256}"
by auto
next
fix n :: nat
assume "n \<in> {0..<256}"
then have "n = of_char (char_of n)"
by simp
then show "n \<in> range of_char"
by (rule range_eqI)
qed
lemma UNIV_char_of_nat:
"UNIV = char_of ` {0::nat..<256}"
proof -
have "range (of_char :: char \<Rightarrow> nat) = of_char ` char_of ` {0::nat..<256}"
by (auto simp add: range_nat_of_char intro!: image_eqI)
with inj_of_char [where ?'a = nat] show ?thesis
by (simp add: inj_image_eq_iff)
qed
lemma card_UNIV_char:
"card (UNIV :: char set) = 256"
by (auto simp add: UNIV_char_of_nat card_image)
context
includes lifting_syntax integer.lifting natural.lifting
begin
lemma [transfer_rule]:
\<open>((=) ===> pcr_integer) of_char of_char\<close>
by (unfold of_char_def) transfer_prover
lemma [transfer_rule]:
\<open>(pcr_natural ===> (=)) char_of char_of\<close>
by (unfold char_of_def) transfer_prover
lemma [transfer_rule]:
\<open>((=) ===> pcr_natural) of_char of_char\<close>
by (unfold of_char_def) transfer_prover
end
lifting_update integer.lifting
lifting_forget integer.lifting
lifting_update natural.lifting
lifting_forget natural.lifting
syntax
"_Char" :: "str_position \<Rightarrow> char" ("CHR _")
"_Char_ord" :: "num_const \<Rightarrow> char" ("CHR _")
type_synonym string = "char list"
syntax
"_String" :: "str_position \<Rightarrow> string" ("_")
ML_file \<open>Tools/string_syntax.ML\<close>
instantiation char :: enum
begin
definition
"Enum.enum = [
CHR 0x00, CHR 0x01, CHR 0x02, CHR 0x03,
CHR 0x04, CHR 0x05, CHR 0x06, CHR 0x07,
CHR 0x08, CHR 0x09, CHR ''\<newline>'', CHR 0x0B,
CHR 0x0C, CHR 0x0D, CHR 0x0E, CHR 0x0F,
CHR 0x10, CHR 0x11, CHR 0x12, CHR 0x13,
CHR 0x14, CHR 0x15, CHR 0x16, CHR 0x17,
CHR 0x18, CHR 0x19, CHR 0x1A, CHR 0x1B,
CHR 0x1C, CHR 0x1D, CHR 0x1E, CHR 0x1F,
CHR '' '', CHR ''!'', CHR 0x22, CHR ''#'',
CHR ''$'', CHR ''%'', CHR ''&'', CHR 0x27,
CHR ''('', CHR '')'', CHR ''*'', CHR ''+'',
CHR '','', CHR ''-'', CHR ''.'', CHR ''/'',
CHR ''0'', CHR ''1'', CHR ''2'', CHR ''3'',
CHR ''4'', CHR ''5'', CHR ''6'', CHR ''7'',
CHR ''8'', CHR ''9'', CHR '':'', CHR '';'',
CHR ''<'', CHR ''='', CHR ''>'', CHR ''?'',
CHR ''@'', CHR ''A'', CHR ''B'', CHR ''C'',
CHR ''D'', CHR ''E'', CHR ''F'', CHR ''G'',
CHR ''H'', CHR ''I'', CHR ''J'', CHR ''K'',
CHR ''L'', CHR ''M'', CHR ''N'', CHR ''O'',
CHR ''P'', CHR ''Q'', CHR ''R'', CHR ''S'',
CHR ''T'', CHR ''U'', CHR ''V'', CHR ''W'',
CHR ''X'', CHR ''Y'', CHR ''Z'', CHR ''['',
CHR 0x5C, CHR '']'', CHR ''^'', CHR ''_'',
CHR 0x60, CHR ''a'', CHR ''b'', CHR ''c'',
CHR ''d'', CHR ''e'', CHR ''f'', CHR ''g'',
CHR ''h'', CHR ''i'', CHR ''j'', CHR ''k'',
CHR ''l'', CHR ''m'', CHR ''n'', CHR ''o'',
CHR ''p'', CHR ''q'', CHR ''r'', CHR ''s'',
CHR ''t'', CHR ''u'', CHR ''v'', CHR ''w'',
CHR ''x'', CHR ''y'', CHR ''z'', CHR ''{'',
CHR ''|'', CHR ''}'', CHR ''~'', CHR 0x7F,
CHR 0x80, CHR 0x81, CHR 0x82, CHR 0x83,
CHR 0x84, CHR 0x85, CHR 0x86, CHR 0x87,
CHR 0x88, CHR 0x89, CHR 0x8A, CHR 0x8B,
CHR 0x8C, CHR 0x8D, CHR 0x8E, CHR 0x8F,
CHR 0x90, CHR 0x91, CHR 0x92, CHR 0x93,
CHR 0x94, CHR 0x95, CHR 0x96, CHR 0x97,
CHR 0x98, CHR 0x99, CHR 0x9A, CHR 0x9B,
CHR 0x9C, CHR 0x9D, CHR 0x9E, CHR 0x9F,
CHR 0xA0, CHR 0xA1, CHR 0xA2, CHR 0xA3,
CHR 0xA4, CHR 0xA5, CHR 0xA6, CHR 0xA7,
CHR 0xA8, CHR 0xA9, CHR 0xAA, CHR 0xAB,
CHR 0xAC, CHR 0xAD, CHR 0xAE, CHR 0xAF,
CHR 0xB0, CHR 0xB1, CHR 0xB2, CHR 0xB3,
CHR 0xB4, CHR 0xB5, CHR 0xB6, CHR 0xB7,
CHR 0xB8, CHR 0xB9, CHR 0xBA, CHR 0xBB,
CHR 0xBC, CHR 0xBD, CHR 0xBE, CHR 0xBF,
CHR 0xC0, CHR 0xC1, CHR 0xC2, CHR 0xC3,
CHR 0xC4, CHR 0xC5, CHR 0xC6, CHR 0xC7,
CHR 0xC8, CHR 0xC9, CHR 0xCA, CHR 0xCB,
CHR 0xCC, CHR 0xCD, CHR 0xCE, CHR 0xCF,
CHR 0xD0, CHR 0xD1, CHR 0xD2, CHR 0xD3,
CHR 0xD4, CHR 0xD5, CHR 0xD6, CHR 0xD7,
CHR 0xD8, CHR 0xD9, CHR 0xDA, CHR 0xDB,
CHR 0xDC, CHR 0xDD, CHR 0xDE, CHR 0xDF,
CHR 0xE0, CHR 0xE1, CHR 0xE2, CHR 0xE3,
CHR 0xE4, CHR 0xE5, CHR 0xE6, CHR 0xE7,
CHR 0xE8, CHR 0xE9, CHR 0xEA, CHR 0xEB,
CHR 0xEC, CHR 0xED, CHR 0xEE, CHR 0xEF,
CHR 0xF0, CHR 0xF1, CHR 0xF2, CHR 0xF3,
CHR 0xF4, CHR 0xF5, CHR 0xF6, CHR 0xF7,
CHR 0xF8, CHR 0xF9, CHR 0xFA, CHR 0xFB,
CHR 0xFC, CHR 0xFD, CHR 0xFE, CHR 0xFF]"
definition
"Enum.enum_all P \<longleftrightarrow> list_all P (Enum.enum :: char list)"
definition
"Enum.enum_ex P \<longleftrightarrow> list_ex P (Enum.enum :: char list)"
lemma enum_char_unfold:
"Enum.enum = map char_of [0..<256]"
proof -
have "map (of_char :: char \<Rightarrow> nat) Enum.enum = [0..<256]"
by (simp add: enum_char_def of_char_def upt_conv_Cons_Cons numeral_2_eq_2 [symmetric])
then have "map char_of (map (of_char :: char \<Rightarrow> nat) Enum.enum) =
map char_of [0..<256]"
by simp
then show ?thesis
by simp
qed
instance proof
show UNIV: "UNIV = set (Enum.enum :: char list)"
by (simp add: enum_char_unfold UNIV_char_of_nat atLeast0LessThan)
show "distinct (Enum.enum :: char list)"
by (auto simp add: enum_char_unfold distinct_map intro: inj_onI)
show "\<And>P. Enum.enum_all P \<longleftrightarrow> Ball (UNIV :: char set) P"
by (simp add: UNIV enum_all_char_def list_all_iff)
show "\<And>P. Enum.enum_ex P \<longleftrightarrow> Bex (UNIV :: char set) P"
by (simp add: UNIV enum_ex_char_def list_ex_iff)
qed
end
lemma linorder_char:
"class.linorder (\<lambda>c d. of_char c \<le> (of_char d :: nat)) (\<lambda>c d. of_char c < (of_char d :: nat))"
by standard auto
text \<open>Optimized version for execution\<close>
definition char_of_integer :: "integer \<Rightarrow> char"
where [code_abbrev]: "char_of_integer = char_of"
definition integer_of_char :: "char \<Rightarrow> integer"
where [code_abbrev]: "integer_of_char = of_char"
lemma char_of_integer_code [code]:
"char_of_integer k = (let
(q0, b0) = bit_cut_integer k;
(q1, b1) = bit_cut_integer q0;
(q2, b2) = bit_cut_integer q1;
(q3, b3) = bit_cut_integer q2;
(q4, b4) = bit_cut_integer q3;
(q5, b5) = bit_cut_integer q4;
(q6, b6) = bit_cut_integer q5;
(_, b7) = bit_cut_integer q6
in Char b0 b1 b2 b3 b4 b5 b6 b7)"
by (simp add: bit_cut_integer_def char_of_integer_def char_of_def div_mult2_numeral_eq bit_iff_odd_drop_bit drop_bit_eq_div)
lemma integer_of_char_code [code]:
"integer_of_char (Char b0 b1 b2 b3 b4 b5 b6 b7) =
((((((of_bool b7 * 2 + of_bool b6) * 2 +
of_bool b5) * 2 + of_bool b4) * 2 +
of_bool b3) * 2 + of_bool b2) * 2 +
of_bool b1) * 2 + of_bool b0"
by (simp add: integer_of_char_def of_char_def)
subsection \<open>Strings as dedicated type for target language code generation\<close>
subsubsection \<open>Logical specification\<close>
context
begin
qualified definition ascii_of :: "char \<Rightarrow> char"
where "ascii_of c = Char (digit0 c) (digit1 c) (digit2 c) (digit3 c) (digit4 c) (digit5 c) (digit6 c) False"
qualified lemma ascii_of_Char [simp]:
"ascii_of (Char b0 b1 b2 b3 b4 b5 b6 b7) = Char b0 b1 b2 b3 b4 b5 b6 False"
by (simp add: ascii_of_def)
qualified lemma not_digit7_ascii_of [simp]:
"\<not> digit7 (ascii_of c)"
by (simp add: ascii_of_def)
qualified lemma ascii_of_idem:
"ascii_of c = c" if "\<not> digit7 c"
using that by (cases c) simp
qualified lemma char_of_ascii_of [simp]:
"of_char (ascii_of c) = take_bit 7 (of_char c :: nat)"
by (cases c) (simp only: ascii_of_Char of_char_Char take_bit_horner_sum_bit_eq, simp)
qualified typedef literal = "{cs. \<forall>c\<in>set cs. \<not> digit7 c}"
morphisms explode Abs_literal
proof
show "[] \<in> {cs. \<forall>c\<in>set cs. \<not> digit7 c}"
by simp
qed
qualified setup_lifting type_definition_literal
qualified lift_definition implode :: "string \<Rightarrow> literal"
is "map ascii_of"
by auto
qualified lemma implode_explode_eq [simp]:
"String.implode (String.explode s) = s"
proof transfer
fix cs
show "map ascii_of cs = cs" if "\<forall>c\<in>set cs. \<not> digit7 c"
using that
by (induction cs) (simp_all add: ascii_of_idem)
qed
qualified lemma explode_implode_eq [simp]:
"String.explode (String.implode cs) = map ascii_of cs"
by transfer rule
end
subsubsection \<open>Syntactic representation\<close>
text \<open>
Logical ground representations for literals are:
\<^enum> \<open>0\<close> for the empty literal;
\<^enum> \<open>Literal b0 \<dots> b6 s\<close> for a literal starting with one
character and continued by another literal.
Syntactic representations for literals are:
\<^enum> Printable text as string prefixed with \<open>STR\<close>;
\<^enum> A single ascii value as numerical hexadecimal value prefixed with \<open>STR\<close>.
\<close>
instantiation String.literal :: zero
begin
context
begin
qualified lift_definition zero_literal :: String.literal
is Nil
by simp
instance ..
end
end
context
begin
qualified abbreviation (output) empty_literal :: String.literal
where "empty_literal \<equiv> 0"
qualified lift_definition Literal :: "bool \<Rightarrow> bool \<Rightarrow> bool \<Rightarrow> bool \<Rightarrow> bool \<Rightarrow> bool \<Rightarrow> bool \<Rightarrow> String.literal \<Rightarrow> String.literal"
is "\<lambda>b0 b1 b2 b3 b4 b5 b6 cs. Char b0 b1 b2 b3 b4 b5 b6 False # cs"
by auto
qualified lemma Literal_eq_iff [simp]:
"Literal b0 b1 b2 b3 b4 b5 b6 s = Literal c0 c1 c2 c3 c4 c5 c6 t
\<longleftrightarrow> (b0 \<longleftrightarrow> c0) \<and> (b1 \<longleftrightarrow> c1) \<and> (b2 \<longleftrightarrow> c2) \<and> (b3 \<longleftrightarrow> c3)
\<and> (b4 \<longleftrightarrow> c4) \<and> (b5 \<longleftrightarrow> c5) \<and> (b6 \<longleftrightarrow> c6) \<and> s = t"
by transfer simp
qualified lemma empty_neq_Literal [simp]:
"empty_literal \<noteq> Literal b0 b1 b2 b3 b4 b5 b6 s"
by transfer simp
qualified lemma Literal_neq_empty [simp]:
"Literal b0 b1 b2 b3 b4 b5 b6 s \<noteq> empty_literal"
by transfer simp
end
code_datatype "0 :: String.literal" String.Literal
syntax
"_Literal" :: "str_position \<Rightarrow> String.literal" ("STR _")
"_Ascii" :: "num_const \<Rightarrow> String.literal" ("STR _")
ML_file \<open>Tools/literal.ML\<close>
subsubsection \<open>Operations\<close>
instantiation String.literal :: plus
begin
context
begin
qualified lift_definition plus_literal :: "String.literal \<Rightarrow> String.literal \<Rightarrow> String.literal"
is "(@)"
by auto
instance ..
end
end
instance String.literal :: monoid_add
by (standard; transfer) simp_all
instantiation String.literal :: size
begin
context
includes literal.lifting
begin
lift_definition size_literal :: "String.literal \<Rightarrow> nat"
is length .
end
instance ..
end
instantiation String.literal :: equal
begin
context
begin
qualified lift_definition equal_literal :: "String.literal \<Rightarrow> String.literal \<Rightarrow> bool"
is HOL.equal .
instance
by (standard; transfer) (simp add: equal)
end
end
instantiation String.literal :: linorder
begin
context
begin
qualified lift_definition less_eq_literal :: "String.literal \<Rightarrow> String.literal \<Rightarrow> bool"
is "ord.lexordp_eq (\<lambda>c d. of_char c < (of_char d :: nat))"
.
qualified lift_definition less_literal :: "String.literal \<Rightarrow> String.literal \<Rightarrow> bool"
is "ord.lexordp (\<lambda>c d. of_char c < (of_char d :: nat))"
.
instance proof -
from linorder_char interpret linorder "ord.lexordp_eq (\<lambda>c d. of_char c < (of_char d :: nat))"
"ord.lexordp (\<lambda>c d. of_char c < (of_char d :: nat)) :: string \<Rightarrow> string \<Rightarrow> bool"
by (rule linorder.lexordp_linorder)
show "PROP ?thesis"
by (standard; transfer) (simp_all add: less_le_not_le linear)
qed
end
end
lemma infinite_literal:
"infinite (UNIV :: String.literal set)"
proof -
define S where "S = range (\<lambda>n. replicate n CHR ''A'')"
have "inj_on String.implode S"
proof (rule inj_onI)
fix cs ds
assume "String.implode cs = String.implode ds"
then have "String.explode (String.implode cs) = String.explode (String.implode ds)"
by simp
moreover assume "cs \<in> S" and "ds \<in> S"
ultimately show "cs = ds"
by (auto simp add: S_def)
qed
moreover have "infinite S"
by (auto simp add: S_def dest: finite_range_imageI [of _ length])
ultimately have "infinite (String.implode ` S)"
by (simp add: finite_image_iff)
then show ?thesis
by (auto intro: finite_subset)
qed
subsubsection \<open>Executable conversions\<close>
context
begin
qualified lift_definition asciis_of_literal :: "String.literal \<Rightarrow> integer list"
is "map of_char"
.
qualified lemma asciis_of_zero [simp, code]:
"asciis_of_literal 0 = []"
by transfer simp
qualified lemma asciis_of_Literal [simp, code]:
"asciis_of_literal (String.Literal b0 b1 b2 b3 b4 b5 b6 s) =
of_char (Char b0 b1 b2 b3 b4 b5 b6 False) # asciis_of_literal s "
by transfer simp
qualified lift_definition literal_of_asciis :: "integer list \<Rightarrow> String.literal"
is "map (String.ascii_of \<circ> char_of)"
by auto
qualified lemma literal_of_asciis_Nil [simp, code]:
"literal_of_asciis [] = 0"
by transfer simp
qualified lemma literal_of_asciis_Cons [simp, code]:
"literal_of_asciis (k # ks) = (case char_of k
of Char b0 b1 b2 b3 b4 b5 b6 b7 \<Rightarrow> String.Literal b0 b1 b2 b3 b4 b5 b6 (literal_of_asciis ks))"
by (simp add: char_of_def) (transfer, simp add: char_of_def)
qualified lemma literal_of_asciis_of_literal [simp]:
"literal_of_asciis (asciis_of_literal s) = s"
proof transfer
fix cs
assume "\<forall>c\<in>set cs. \<not> digit7 c"
then show "map (String.ascii_of \<circ> char_of) (map of_char cs) = cs"
by (induction cs) (simp_all add: String.ascii_of_idem)
qed
qualified lemma explode_code [code]:
"String.explode s = map char_of (asciis_of_literal s)"
by transfer simp
qualified lemma implode_code [code]:
"String.implode cs = literal_of_asciis (map of_char cs)"
by transfer simp
qualified lemma equal_literal [code]:
"HOL.equal (String.Literal b0 b1 b2 b3 b4 b5 b6 s)
(String.Literal a0 a1 a2 a3 a4 a5 a6 r)
\<longleftrightarrow> (b0 \<longleftrightarrow> a0) \<and> (b1 \<longleftrightarrow> a1) \<and> (b2 \<longleftrightarrow> a2) \<and> (b3 \<longleftrightarrow> a3)
\<and> (b4 \<longleftrightarrow> a4) \<and> (b5 \<longleftrightarrow> a5) \<and> (b6 \<longleftrightarrow> a6) \<and> (s = r)"
by (simp add: equal)
end
subsubsection \<open>Technical code generation setup\<close>
text \<open>Alternative constructor for generated computations\<close>
context
begin
qualified definition Literal' :: "bool \<Rightarrow> bool \<Rightarrow> bool \<Rightarrow> bool \<Rightarrow> bool \<Rightarrow> bool \<Rightarrow> bool \<Rightarrow> String.literal \<Rightarrow> String.literal"
where [simp]: "Literal' = String.Literal"
lemma [code_computation_unfold]:
"String.Literal = Literal'"
by simp
end
code_reserved SML string String Char List
code_reserved OCaml string String Char List
code_reserved Haskell Prelude
code_reserved Scala string
code_printing
type_constructor String.literal \<rightharpoonup>
(SML) "string"
and (OCaml) "string"
and (Haskell) "String"
and (Scala) "String"
| constant "STR ''''" \<rightharpoonup>
(SML) "\"\""
and (OCaml) "\"\""
and (Haskell) "\"\""
and (Scala) "\"\""
setup \<open>
fold Literal.add_code ["SML", "OCaml", "Haskell", "Scala"]
\<close>
code_printing
constant "(+) :: String.literal \<Rightarrow> String.literal \<Rightarrow> String.literal" \<rightharpoonup>
(SML) infixl 18 "^"
and (OCaml) infixr 6 "^"
and (Haskell) infixr 5 "++"
and (Scala) infixl 7 "+"
| constant String.literal_of_asciis \<rightharpoonup>
(SML) "!(String.implode/ o List.map (fn k => if 0 <= k andalso k < 128 then (Char.chr o IntInf.toInt) k else raise Fail \"Non-ASCII character in literal\"))"
and (OCaml) "!(let xs = _
and chr k =
let l = Z.to'_int k
in if 0 <= l && l < 128
then Char.chr l
else failwith \"Non-ASCII character in literal\"
in String.init (List.length xs) (List.nth (List.map chr xs)))"
and (Haskell) "map/ (let chr k | (0 <= k && k < 128) = Prelude.toEnum k :: Prelude.Char in chr . Prelude.fromInteger)"
and (Scala) "\"\"/ ++/ _.map((k: BigInt) => if (BigInt(0) <= k && k < BigInt(128)) k.charValue else sys.error(\"Non-ASCII character in literal\"))"
| constant String.asciis_of_literal \<rightharpoonup>
(SML) "!(List.map (fn c => let val k = Char.ord c in if k < 128 then IntInf.fromInt k else raise Fail \"Non-ASCII character in literal\" end) /o String.explode)"
and (OCaml) "!(let s = _ in let rec exp i l = if i < 0 then l else exp (i - 1) (let k = Char.code (String.get s i) in
if k < 128 then Z.of'_int k :: l else failwith \"Non-ASCII character in literal\") in exp (String.length s - 1) [])"
and (Haskell) "map/ (let ord k | (k < 128) = Prelude.toInteger k in ord . (Prelude.fromEnum :: Prelude.Char -> Prelude.Int))"
and (Scala) "!(_.toList.map(c => { val k: Int = c.toInt; if (k < 128) BigInt(k) else sys.error(\"Non-ASCII character in literal\") }))"
| class_instance String.literal :: equal \<rightharpoonup>
(Haskell) -
| constant "HOL.equal :: String.literal \<Rightarrow> String.literal \<Rightarrow> bool" \<rightharpoonup>
(SML) "!((_ : string) = _)"
and (OCaml) "!((_ : string) = _)"
and (Haskell) infix 4 "=="
and (Scala) infixl 5 "=="
| constant "(\<le>) :: String.literal \<Rightarrow> String.literal \<Rightarrow> bool" \<rightharpoonup>
(SML) "!((_ : string) <= _)"
and (OCaml) "!((_ : string) <= _)"
and (Haskell) infix 4 "<="
\<comment> \<open>Order operations for \<^typ>\<open>String.literal\<close> work in Haskell only
if no type class instance needs to be generated, because String = [Char] in Haskell
and \<^typ>\<open>char list\<close> need not have the same order as \<^typ>\<open>String.literal\<close>.\<close>
and (Scala) infixl 4 "<="
and (Eval) infixl 6 "<="
| constant "(<) :: String.literal \<Rightarrow> String.literal \<Rightarrow> bool" \<rightharpoonup>
(SML) "!((_ : string) < _)"
and (OCaml) "!((_ : string) < _)"
and (Haskell) infix 4 "<"
and (Scala) infixl 4 "<"
and (Eval) infixl 6 "<"
subsubsection \<open>Code generation utility\<close>
setup \<open>Sign.map_naming (Name_Space.mandatory_path "Code")\<close>
definition abort :: "String.literal \<Rightarrow> (unit \<Rightarrow> 'a) \<Rightarrow> 'a"
where [simp]: "abort _ f = f ()"
declare [[code drop: Code.abort]]
lemma abort_cong:
"msg = msg' \<Longrightarrow> Code.abort msg f = Code.abort msg' f"
by simp
setup \<open>Sign.map_naming Name_Space.parent_path\<close>
setup \<open>Code_Simp.map_ss (Simplifier.add_cong @{thm Code.abort_cong})\<close>
code_printing
constant Code.abort \<rightharpoonup>
(SML) "!(raise/ Fail/ _)"
and (OCaml) "failwith"
and (Haskell) "!(error/ ::/ forall a./ String -> (() -> a) -> a)"
and (Scala) "!{/ sys.error((_));/ ((_)).apply(())/ }"
subsubsection \<open>Finally\<close>
lifting_update literal.lifting
lifting_forget literal.lifting
end
|
[STATEMENT]
lemma (in valid_graph) path_split_set:
assumes "is_path v p v'" and "v\<in>W" and "v'\<notin>W"
obtains p1 p2 u w u' where
"p=p1@(u,w,u')#p2" and
"int_vertices p1 \<subseteq> W" and "u\<in>W" and "u'\<notin>W"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. (\<And>p1 u w u' p2. \<lbrakk>p = p1 @ (u, w, u') # p2; int_vertices p1 \<subseteq> W; u \<in> W; u' \<notin> W\<rbrakk> \<Longrightarrow> thesis) \<Longrightarrow> thesis
[PROOF STEP]
using assms
[PROOF STATE]
proof (prove)
using this:
is_path v p v'
v \<in> W
v' \<notin> W
goal (1 subgoal):
1. (\<And>p1 u w u' p2. \<lbrakk>p = p1 @ (u, w, u') # p2; int_vertices p1 \<subseteq> W; u \<in> W; u' \<notin> W\<rbrakk> \<Longrightarrow> thesis) \<Longrightarrow> thesis
[PROOF STEP]
proof (induct p arbitrary: v thesis)
[PROOF STATE]
proof (state)
goal (2 subgoals):
1. \<And>v thesis. \<lbrakk>\<And>p1 u w u' p2. \<lbrakk>[] = p1 @ (u, w, u') # p2; int_vertices p1 \<subseteq> W; u \<in> W; u' \<notin> W\<rbrakk> \<Longrightarrow> thesis; is_path v [] v'; v \<in> W; v' \<notin> W\<rbrakk> \<Longrightarrow> thesis
2. \<And>a p v thesis. \<lbrakk>\<And>v thesis. \<lbrakk>\<And>p1 u w u' p2. \<lbrakk>p = p1 @ (u, w, u') # p2; int_vertices p1 \<subseteq> W; u \<in> W; u' \<notin> W\<rbrakk> \<Longrightarrow> thesis; is_path v p v'; v \<in> W; v' \<notin> W\<rbrakk> \<Longrightarrow> thesis; \<And>p1 u w u' p2. \<lbrakk>a # p = p1 @ (u, w, u') # p2; int_vertices p1 \<subseteq> W; u \<in> W; u' \<notin> W\<rbrakk> \<Longrightarrow> thesis; is_path v (a # p) v'; v \<in> W; v' \<notin> W\<rbrakk> \<Longrightarrow> thesis
[PROOF STEP]
case Nil
[PROOF STATE]
proof (state)
this:
\<lbrakk>[] = ?p1.0 @ (?u, ?w, ?u') # ?p2.0; int_vertices ?p1.0 \<subseteq> W; ?u \<in> W; ?u' \<notin> W\<rbrakk> \<Longrightarrow> thesis
is_path v [] v'
v \<in> W
v' \<notin> W
goal (2 subgoals):
1. \<And>v thesis. \<lbrakk>\<And>p1 u w u' p2. \<lbrakk>[] = p1 @ (u, w, u') # p2; int_vertices p1 \<subseteq> W; u \<in> W; u' \<notin> W\<rbrakk> \<Longrightarrow> thesis; is_path v [] v'; v \<in> W; v' \<notin> W\<rbrakk> \<Longrightarrow> thesis
2. \<And>a p v thesis. \<lbrakk>\<And>v thesis. \<lbrakk>\<And>p1 u w u' p2. \<lbrakk>p = p1 @ (u, w, u') # p2; int_vertices p1 \<subseteq> W; u \<in> W; u' \<notin> W\<rbrakk> \<Longrightarrow> thesis; is_path v p v'; v \<in> W; v' \<notin> W\<rbrakk> \<Longrightarrow> thesis; \<And>p1 u w u' p2. \<lbrakk>a # p = p1 @ (u, w, u') # p2; int_vertices p1 \<subseteq> W; u \<in> W; u' \<notin> W\<rbrakk> \<Longrightarrow> thesis; is_path v (a # p) v'; v \<in> W; v' \<notin> W\<rbrakk> \<Longrightarrow> thesis
[PROOF STEP]
thus ?case
[PROOF STATE]
proof (prove)
using this:
\<lbrakk>[] = ?p1.0 @ (?u, ?w, ?u') # ?p2.0; int_vertices ?p1.0 \<subseteq> W; ?u \<in> W; ?u' \<notin> W\<rbrakk> \<Longrightarrow> thesis
is_path v [] v'
v \<in> W
v' \<notin> W
goal (1 subgoal):
1. thesis
[PROOF STEP]
by auto
[PROOF STATE]
proof (state)
this:
thesis
goal (1 subgoal):
1. \<And>a p v thesis. \<lbrakk>\<And>v thesis. \<lbrakk>\<And>p1 u w u' p2. \<lbrakk>p = p1 @ (u, w, u') # p2; int_vertices p1 \<subseteq> W; u \<in> W; u' \<notin> W\<rbrakk> \<Longrightarrow> thesis; is_path v p v'; v \<in> W; v' \<notin> W\<rbrakk> \<Longrightarrow> thesis; \<And>p1 u w u' p2. \<lbrakk>a # p = p1 @ (u, w, u') # p2; int_vertices p1 \<subseteq> W; u \<in> W; u' \<notin> W\<rbrakk> \<Longrightarrow> thesis; is_path v (a # p) v'; v \<in> W; v' \<notin> W\<rbrakk> \<Longrightarrow> thesis
[PROOF STEP]
next
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. \<And>a p v thesis. \<lbrakk>\<And>v thesis. \<lbrakk>\<And>p1 u w u' p2. \<lbrakk>p = p1 @ (u, w, u') # p2; int_vertices p1 \<subseteq> W; u \<in> W; u' \<notin> W\<rbrakk> \<Longrightarrow> thesis; is_path v p v'; v \<in> W; v' \<notin> W\<rbrakk> \<Longrightarrow> thesis; \<And>p1 u w u' p2. \<lbrakk>a # p = p1 @ (u, w, u') # p2; int_vertices p1 \<subseteq> W; u \<in> W; u' \<notin> W\<rbrakk> \<Longrightarrow> thesis; is_path v (a # p) v'; v \<in> W; v' \<notin> W\<rbrakk> \<Longrightarrow> thesis
[PROOF STEP]
case (Cons vv p)
[PROOF STATE]
proof (state)
this:
\<lbrakk>\<And>p1 u w u' p2. \<lbrakk>p = p1 @ (u, w, u') # p2; int_vertices p1 \<subseteq> W; u \<in> W; u' \<notin> W\<rbrakk> \<Longrightarrow> ?thesis; is_path ?v p v'; ?v \<in> W; v' \<notin> W\<rbrakk> \<Longrightarrow> ?thesis
\<lbrakk>vv # p = ?p1.0 @ (?u, ?w, ?u') # ?p2.0; int_vertices ?p1.0 \<subseteq> W; ?u \<in> W; ?u' \<notin> W\<rbrakk> \<Longrightarrow> thesis
is_path v (vv # p) v'
v \<in> W
v' \<notin> W
goal (1 subgoal):
1. \<And>a p v thesis. \<lbrakk>\<And>v thesis. \<lbrakk>\<And>p1 u w u' p2. \<lbrakk>p = p1 @ (u, w, u') # p2; int_vertices p1 \<subseteq> W; u \<in> W; u' \<notin> W\<rbrakk> \<Longrightarrow> thesis; is_path v p v'; v \<in> W; v' \<notin> W\<rbrakk> \<Longrightarrow> thesis; \<And>p1 u w u' p2. \<lbrakk>a # p = p1 @ (u, w, u') # p2; int_vertices p1 \<subseteq> W; u \<in> W; u' \<notin> W\<rbrakk> \<Longrightarrow> thesis; is_path v (a # p) v'; v \<in> W; v' \<notin> W\<rbrakk> \<Longrightarrow> thesis
[PROOF STEP]
note [simp, intro!] = \<open>v\<in>W\<close> \<open>v'\<notin>W\<close>
[PROOF STATE]
proof (state)
this:
v \<in> W
v' \<notin> W
goal (1 subgoal):
1. \<And>a p v thesis. \<lbrakk>\<And>v thesis. \<lbrakk>\<And>p1 u w u' p2. \<lbrakk>p = p1 @ (u, w, u') # p2; int_vertices p1 \<subseteq> W; u \<in> W; u' \<notin> W\<rbrakk> \<Longrightarrow> thesis; is_path v p v'; v \<in> W; v' \<notin> W\<rbrakk> \<Longrightarrow> thesis; \<And>p1 u w u' p2. \<lbrakk>a # p = p1 @ (u, w, u') # p2; int_vertices p1 \<subseteq> W; u \<in> W; u' \<notin> W\<rbrakk> \<Longrightarrow> thesis; is_path v (a # p) v'; v \<in> W; v' \<notin> W\<rbrakk> \<Longrightarrow> thesis
[PROOF STEP]
from Cons.prems
[PROOF STATE]
proof (chain)
picking this:
\<lbrakk>vv # p = ?p1.0 @ (?u, ?w, ?u') # ?p2.0; int_vertices ?p1.0 \<subseteq> W; ?u \<in> W; ?u' \<notin> W\<rbrakk> \<Longrightarrow> thesis
is_path v (vv # p) v'
v \<in> W
v' \<notin> W
[PROOF STEP]
obtain w u' where
[simp]: "vv=(v,w,u')" and
REST: "is_path u' p v'"
[PROOF STATE]
proof (prove)
using this:
\<lbrakk>vv # p = ?p1.0 @ (?u, ?w, ?u') # ?p2.0; int_vertices ?p1.0 \<subseteq> W; ?u \<in> W; ?u' \<notin> W\<rbrakk> \<Longrightarrow> thesisa__
is_path v (vv # p) v'
v \<in> W
v' \<notin> W
goal (1 subgoal):
1. (\<And>w u'. \<lbrakk>vv = (v, w, u'); is_path u' p v'\<rbrakk> \<Longrightarrow> thesis) \<Longrightarrow> thesis
[PROOF STEP]
by (cases vv) auto
[PROOF STATE]
proof (state)
this:
vv = (v, w, u')
is_path u' p v'
goal (1 subgoal):
1. \<And>a p v thesis. \<lbrakk>\<And>v thesis. \<lbrakk>\<And>p1 u w u' p2. \<lbrakk>p = p1 @ (u, w, u') # p2; int_vertices p1 \<subseteq> W; u \<in> W; u' \<notin> W\<rbrakk> \<Longrightarrow> thesis; is_path v p v'; v \<in> W; v' \<notin> W\<rbrakk> \<Longrightarrow> thesis; \<And>p1 u w u' p2. \<lbrakk>a # p = p1 @ (u, w, u') # p2; int_vertices p1 \<subseteq> W; u \<in> W; u' \<notin> W\<rbrakk> \<Longrightarrow> thesis; is_path v (a # p) v'; v \<in> W; v' \<notin> W\<rbrakk> \<Longrightarrow> thesis
[PROOF STEP]
txt \<open>Distinguish wether the second node \<open>u'\<close> of the path is
in \<open>W\<close>. If yes, the proposition follows by the
induction hypothesis, otherwise it is straightforward, as
the split takes place at the first edge of the path.\<close>
[PROOF STATE]
proof (state)
this:
vv = (v, w, u')
is_path u' p v'
goal (1 subgoal):
1. \<And>a p v thesis. \<lbrakk>\<And>v thesis. \<lbrakk>\<And>p1 u w u' p2. \<lbrakk>p = p1 @ (u, w, u') # p2; int_vertices p1 \<subseteq> W; u \<in> W; u' \<notin> W\<rbrakk> \<Longrightarrow> thesis; is_path v p v'; v \<in> W; v' \<notin> W\<rbrakk> \<Longrightarrow> thesis; \<And>p1 u w u' p2. \<lbrakk>a # p = p1 @ (u, w, u') # p2; int_vertices p1 \<subseteq> W; u \<in> W; u' \<notin> W\<rbrakk> \<Longrightarrow> thesis; is_path v (a # p) v'; v \<in> W; v' \<notin> W\<rbrakk> \<Longrightarrow> thesis
[PROOF STEP]
{
[PROOF STATE]
proof (state)
this:
vv = (v, w, u')
is_path u' p v'
goal (1 subgoal):
1. \<And>a p v thesis. \<lbrakk>\<And>v thesis. \<lbrakk>\<And>p1 u w u' p2. \<lbrakk>p = p1 @ (u, w, u') # p2; int_vertices p1 \<subseteq> W; u \<in> W; u' \<notin> W\<rbrakk> \<Longrightarrow> thesis; is_path v p v'; v \<in> W; v' \<notin> W\<rbrakk> \<Longrightarrow> thesis; \<And>p1 u w u' p2. \<lbrakk>a # p = p1 @ (u, w, u') # p2; int_vertices p1 \<subseteq> W; u \<in> W; u' \<notin> W\<rbrakk> \<Longrightarrow> thesis; is_path v (a # p) v'; v \<in> W; v' \<notin> W\<rbrakk> \<Longrightarrow> thesis
[PROOF STEP]
assume A [simp, intro!]: "u'\<in>W"
[PROOF STATE]
proof (state)
this:
u' \<in> W
goal (1 subgoal):
1. \<And>a p v thesis. \<lbrakk>\<And>v thesis. \<lbrakk>\<And>p1 u w u' p2. \<lbrakk>p = p1 @ (u, w, u') # p2; int_vertices p1 \<subseteq> W; u \<in> W; u' \<notin> W\<rbrakk> \<Longrightarrow> thesis; is_path v p v'; v \<in> W; v' \<notin> W\<rbrakk> \<Longrightarrow> thesis; \<And>p1 u w u' p2. \<lbrakk>a # p = p1 @ (u, w, u') # p2; int_vertices p1 \<subseteq> W; u \<in> W; u' \<notin> W\<rbrakk> \<Longrightarrow> thesis; is_path v (a # p) v'; v \<in> W; v' \<notin> W\<rbrakk> \<Longrightarrow> thesis
[PROOF STEP]
from Cons.hyps[OF _ REST]
[PROOF STATE]
proof (chain)
picking this:
\<lbrakk>\<And>p1 u w u' p2. \<lbrakk>p = p1 @ (u, w, u') # p2; int_vertices p1 \<subseteq> W; u \<in> W; u' \<notin> W\<rbrakk> \<Longrightarrow> ?thesis; u' \<in> W; v' \<notin> W\<rbrakk> \<Longrightarrow> ?thesis
[PROOF STEP]
obtain p1 uu ww uu' p2 where
"p=p1@(uu,ww,uu')#p2" "int_vertices p1 \<subseteq> W" "uu \<in> W" "uu' \<notin> W"
[PROOF STATE]
proof (prove)
using this:
\<lbrakk>\<And>p1 u w u' p2. \<lbrakk>p = p1 @ (u, w, u') # p2; int_vertices p1 \<subseteq> W; u \<in> W; u' \<notin> W\<rbrakk> \<Longrightarrow> ?thesis; u' \<in> W; v' \<notin> W\<rbrakk> \<Longrightarrow> ?thesis
goal (1 subgoal):
1. (\<And>p1 uu ww uu' p2. \<lbrakk>p = p1 @ (uu, ww, uu') # p2; int_vertices p1 \<subseteq> W; uu \<in> W; uu' \<notin> W\<rbrakk> \<Longrightarrow> thesis) \<Longrightarrow> thesis
[PROOF STEP]
by blast
[PROOF STATE]
proof (state)
this:
p = p1 @ (uu, ww, uu') # p2
int_vertices p1 \<subseteq> W
uu \<in> W
uu' \<notin> W
goal (1 subgoal):
1. \<And>a p v thesis. \<lbrakk>\<And>v thesis. \<lbrakk>\<And>p1 u w u' p2. \<lbrakk>p = p1 @ (u, w, u') # p2; int_vertices p1 \<subseteq> W; u \<in> W; u' \<notin> W\<rbrakk> \<Longrightarrow> thesis; is_path v p v'; v \<in> W; v' \<notin> W\<rbrakk> \<Longrightarrow> thesis; \<And>p1 u w u' p2. \<lbrakk>a # p = p1 @ (u, w, u') # p2; int_vertices p1 \<subseteq> W; u \<in> W; u' \<notin> W\<rbrakk> \<Longrightarrow> thesis; is_path v (a # p) v'; v \<in> W; v' \<notin> W\<rbrakk> \<Longrightarrow> thesis
[PROOF STEP]
with Cons.prems(1)[of "vv#p1" uu ww uu' p2]
[PROOF STATE]
proof (chain)
picking this:
\<lbrakk>vv # p = (vv # p1) @ (uu, ww, uu') # p2; int_vertices (vv # p1) \<subseteq> W; uu \<in> W; uu' \<notin> W\<rbrakk> \<Longrightarrow> thesis
p = p1 @ (uu, ww, uu') # p2
int_vertices p1 \<subseteq> W
uu \<in> W
uu' \<notin> W
[PROOF STEP]
have thesis
[PROOF STATE]
proof (prove)
using this:
\<lbrakk>vv # p = (vv # p1) @ (uu, ww, uu') # p2; int_vertices (vv # p1) \<subseteq> W; uu \<in> W; uu' \<notin> W\<rbrakk> \<Longrightarrow> thesis
p = p1 @ (uu, ww, uu') # p2
int_vertices p1 \<subseteq> W
uu \<in> W
uu' \<notin> W
goal (1 subgoal):
1. thesis
[PROOF STEP]
by auto
[PROOF STATE]
proof (state)
this:
thesis
goal (1 subgoal):
1. \<And>a p v thesis. \<lbrakk>\<And>v thesis. \<lbrakk>\<And>p1 u w u' p2. \<lbrakk>p = p1 @ (u, w, u') # p2; int_vertices p1 \<subseteq> W; u \<in> W; u' \<notin> W\<rbrakk> \<Longrightarrow> thesis; is_path v p v'; v \<in> W; v' \<notin> W\<rbrakk> \<Longrightarrow> thesis; \<And>p1 u w u' p2. \<lbrakk>a # p = p1 @ (u, w, u') # p2; int_vertices p1 \<subseteq> W; u \<in> W; u' \<notin> W\<rbrakk> \<Longrightarrow> thesis; is_path v (a # p) v'; v \<in> W; v' \<notin> W\<rbrakk> \<Longrightarrow> thesis
[PROOF STEP]
}
[PROOF STATE]
proof (state)
this:
u' \<in> W \<Longrightarrow> thesis
goal (1 subgoal):
1. \<And>a p v thesis. \<lbrakk>\<And>v thesis. \<lbrakk>\<And>p1 u w u' p2. \<lbrakk>p = p1 @ (u, w, u') # p2; int_vertices p1 \<subseteq> W; u \<in> W; u' \<notin> W\<rbrakk> \<Longrightarrow> thesis; is_path v p v'; v \<in> W; v' \<notin> W\<rbrakk> \<Longrightarrow> thesis; \<And>p1 u w u' p2. \<lbrakk>a # p = p1 @ (u, w, u') # p2; int_vertices p1 \<subseteq> W; u \<in> W; u' \<notin> W\<rbrakk> \<Longrightarrow> thesis; is_path v (a # p) v'; v \<in> W; v' \<notin> W\<rbrakk> \<Longrightarrow> thesis
[PROOF STEP]
moreover
[PROOF STATE]
proof (state)
this:
u' \<in> W \<Longrightarrow> thesis
goal (1 subgoal):
1. \<And>a p v thesis. \<lbrakk>\<And>v thesis. \<lbrakk>\<And>p1 u w u' p2. \<lbrakk>p = p1 @ (u, w, u') # p2; int_vertices p1 \<subseteq> W; u \<in> W; u' \<notin> W\<rbrakk> \<Longrightarrow> thesis; is_path v p v'; v \<in> W; v' \<notin> W\<rbrakk> \<Longrightarrow> thesis; \<And>p1 u w u' p2. \<lbrakk>a # p = p1 @ (u, w, u') # p2; int_vertices p1 \<subseteq> W; u \<in> W; u' \<notin> W\<rbrakk> \<Longrightarrow> thesis; is_path v (a # p) v'; v \<in> W; v' \<notin> W\<rbrakk> \<Longrightarrow> thesis
[PROOF STEP]
{
[PROOF STATE]
proof (state)
this:
u' \<in> W \<Longrightarrow> thesis
goal (1 subgoal):
1. \<And>a p v thesis. \<lbrakk>\<And>v thesis. \<lbrakk>\<And>p1 u w u' p2. \<lbrakk>p = p1 @ (u, w, u') # p2; int_vertices p1 \<subseteq> W; u \<in> W; u' \<notin> W\<rbrakk> \<Longrightarrow> thesis; is_path v p v'; v \<in> W; v' \<notin> W\<rbrakk> \<Longrightarrow> thesis; \<And>p1 u w u' p2. \<lbrakk>a # p = p1 @ (u, w, u') # p2; int_vertices p1 \<subseteq> W; u \<in> W; u' \<notin> W\<rbrakk> \<Longrightarrow> thesis; is_path v (a # p) v'; v \<in> W; v' \<notin> W\<rbrakk> \<Longrightarrow> thesis
[PROOF STEP]
assume "u'\<notin>W"
[PROOF STATE]
proof (state)
this:
u' \<notin> W
goal (1 subgoal):
1. \<And>a p v thesis. \<lbrakk>\<And>v thesis. \<lbrakk>\<And>p1 u w u' p2. \<lbrakk>p = p1 @ (u, w, u') # p2; int_vertices p1 \<subseteq> W; u \<in> W; u' \<notin> W\<rbrakk> \<Longrightarrow> thesis; is_path v p v'; v \<in> W; v' \<notin> W\<rbrakk> \<Longrightarrow> thesis; \<And>p1 u w u' p2. \<lbrakk>a # p = p1 @ (u, w, u') # p2; int_vertices p1 \<subseteq> W; u \<in> W; u' \<notin> W\<rbrakk> \<Longrightarrow> thesis; is_path v (a # p) v'; v \<in> W; v' \<notin> W\<rbrakk> \<Longrightarrow> thesis
[PROOF STEP]
with Cons.prems(1)[of "[]" v w u' p]
[PROOF STATE]
proof (chain)
picking this:
\<lbrakk>vv # p = [] @ (v, w, u') # p; int_vertices [] \<subseteq> W; v \<in> W; u' \<notin> W\<rbrakk> \<Longrightarrow> thesis
u' \<notin> W
[PROOF STEP]
have thesis
[PROOF STATE]
proof (prove)
using this:
\<lbrakk>vv # p = [] @ (v, w, u') # p; int_vertices [] \<subseteq> W; v \<in> W; u' \<notin> W\<rbrakk> \<Longrightarrow> thesis
u' \<notin> W
goal (1 subgoal):
1. thesis
[PROOF STEP]
by auto
[PROOF STATE]
proof (state)
this:
thesis
goal (1 subgoal):
1. \<And>a p v thesis. \<lbrakk>\<And>v thesis. \<lbrakk>\<And>p1 u w u' p2. \<lbrakk>p = p1 @ (u, w, u') # p2; int_vertices p1 \<subseteq> W; u \<in> W; u' \<notin> W\<rbrakk> \<Longrightarrow> thesis; is_path v p v'; v \<in> W; v' \<notin> W\<rbrakk> \<Longrightarrow> thesis; \<And>p1 u w u' p2. \<lbrakk>a # p = p1 @ (u, w, u') # p2; int_vertices p1 \<subseteq> W; u \<in> W; u' \<notin> W\<rbrakk> \<Longrightarrow> thesis; is_path v (a # p) v'; v \<in> W; v' \<notin> W\<rbrakk> \<Longrightarrow> thesis
[PROOF STEP]
}
[PROOF STATE]
proof (state)
this:
u' \<notin> W \<Longrightarrow> thesis
goal (1 subgoal):
1. \<And>a p v thesis. \<lbrakk>\<And>v thesis. \<lbrakk>\<And>p1 u w u' p2. \<lbrakk>p = p1 @ (u, w, u') # p2; int_vertices p1 \<subseteq> W; u \<in> W; u' \<notin> W\<rbrakk> \<Longrightarrow> thesis; is_path v p v'; v \<in> W; v' \<notin> W\<rbrakk> \<Longrightarrow> thesis; \<And>p1 u w u' p2. \<lbrakk>a # p = p1 @ (u, w, u') # p2; int_vertices p1 \<subseteq> W; u \<in> W; u' \<notin> W\<rbrakk> \<Longrightarrow> thesis; is_path v (a # p) v'; v \<in> W; v' \<notin> W\<rbrakk> \<Longrightarrow> thesis
[PROOF STEP]
ultimately
[PROOF STATE]
proof (chain)
picking this:
u' \<in> W \<Longrightarrow> thesis
u' \<notin> W \<Longrightarrow> thesis
[PROOF STEP]
show thesis
[PROOF STATE]
proof (prove)
using this:
u' \<in> W \<Longrightarrow> thesis
u' \<notin> W \<Longrightarrow> thesis
goal (1 subgoal):
1. thesis
[PROOF STEP]
by blast
[PROOF STATE]
proof (state)
this:
thesis
goal:
No subgoals!
[PROOF STEP]
qed |
! ###############################################################################################################################
! Begin MIT license text.
! _______________________________________________________________________________________________________
! Copyright 2019 Dr William R Case, Jr ([email protected])
! Permission is hereby granted, free of charge, to any person obtaining a copy of this software and
! associated documentation files (the "Software"), to deal in the Software without restriction, including
! without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
! copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to
! the following conditions:
! The above copyright notice and this permission notice shall be included in all copies or substantial
! portions of the Software and documentation.
! THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
! OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
! FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
! AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
! LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
! OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
! THE SOFTWARE.
! _______________________________________________________________________________________________________
! End MIT license text.
MODULE WRITE_USETSTR_Interface
INTERFACE
SUBROUTINE WRITE_USETSTR
USE PENTIUM_II_KIND, ONLY : BYTE, LONG
USE IOUNT1, ONLY : ERR, F04, F06, WRT_LOG
USE SCONTR, ONLY : BLNK_SUB_NAM, FATAL_ERR, MTDOF, NDOFA, NDOFF, NDOFG, NDOFL, NDOFM, NDOFN, NDOFO, NDOFR, &
NDOFS, NDOFSA, NDOFSB, NDOFSE, NDOFSG, NDOFSZ, NUM_USET_U1, NUM_USET_U2, TSET_CHR_LEN
USE TIMDAT, ONLY : TSEC
USE SUBR_BEGEND_LEVELS, ONLY : WRITE_USETSTR_BEGEND
USE DOF_TABLES, ONLY : TDOFI, USETSTR_TABLE
IMPLICIT NONE
CHARACTER(LEN=LEN(BLNK_SUB_NAM)):: SUBR_NAME = 'WRITE_USETSTR'
CHARACTER( 1*BYTE) :: USETSTR_OUTPUT ! If 'Y' then output of USET tables is requested
INTEGER(LONG) :: GRID_NUM(NDOFG) ! Array of grid numbers for members of a DOF set requested in USETSTR
INTEGER(LONG) :: COMP_NUM(NDOFG) ! Array of comp numbers for members of a DOF set requested in USETSTR
INTEGER(LONG) :: NUM_LEFT ! Used when printing a line of 10 values in the set
INTEGER(LONG), PARAMETER :: SUBR_BEGEND = WRITE_USETSTR_BEGEND
END SUBROUTINE WRITE_USETSTR
END INTERFACE
END MODULE WRITE_USETSTR_Interface
|
State Before: E : Type ?u.311271
inst✝³ : NormedAddCommGroup E
inst✝² : NormedSpace ℝ E
F : Type ?u.311367
inst✝¹ : NormedAddCommGroup F
inst✝ : NormedSpace ℝ F
f f' : ℝ → ℝ
a b : ℝ
hab : a < b
hfc : ContinuousOn f (Icc a b)
hff'✝ : ∀ (x : ℝ), x ∈ Ioo a b → HasDerivAt f (f' x) x
hfd : DifferentiableOn ℝ f (Ioo a b)
g g' : ℝ → ℝ
hgc : ContinuousOn g (Icc a b)
hgg'✝ : ∀ (x : ℝ), x ∈ Ioo a b → HasDerivAt g (g' x) x
hgd : DifferentiableOn ℝ g (Ioo a b)
lfa lga lfb lgb : ℝ
hff' : ∀ (x : ℝ), x ∈ Ioo a b → HasDerivAt f (f' x) x
hgg' : ∀ (x : ℝ), x ∈ Ioo a b → HasDerivAt g (g' x) x
hfa : Tendsto f (𝓝[Ioi a] a) (𝓝 lfa)
hga : Tendsto g (𝓝[Ioi a] a) (𝓝 lga)
hfb : Tendsto f (𝓝[Iio b] b) (𝓝 lfb)
hgb : Tendsto g (𝓝[Iio b] b) (𝓝 lgb)
⊢ ∃ c, c ∈ Ioo a b ∧ (lgb - lga) * f' c = (lfb - lfa) * g' c State After: E : Type ?u.311271
inst✝³ : NormedAddCommGroup E
inst✝² : NormedSpace ℝ E
F : Type ?u.311367
inst✝¹ : NormedAddCommGroup F
inst✝ : NormedSpace ℝ F
f f' : ℝ → ℝ
a b : ℝ
hab : a < b
hfc : ContinuousOn f (Icc a b)
hff'✝ : ∀ (x : ℝ), x ∈ Ioo a b → HasDerivAt f (f' x) x
hfd : DifferentiableOn ℝ f (Ioo a b)
g g' : ℝ → ℝ
hgc : ContinuousOn g (Icc a b)
hgg'✝ : ∀ (x : ℝ), x ∈ Ioo a b → HasDerivAt g (g' x) x
hgd : DifferentiableOn ℝ g (Ioo a b)
lfa lga lfb lgb : ℝ
hff' : ∀ (x : ℝ), x ∈ Ioo a b → HasDerivAt f (f' x) x
hgg' : ∀ (x : ℝ), x ∈ Ioo a b → HasDerivAt g (g' x) x
hfa : Tendsto f (𝓝[Ioi a] a) (𝓝 lfa)
hga : Tendsto g (𝓝[Ioi a] a) (𝓝 lga)
hfb : Tendsto f (𝓝[Iio b] b) (𝓝 lfb)
hgb : Tendsto g (𝓝[Iio b] b) (𝓝 lgb)
h : ℝ → ℝ := fun x => (lgb - lga) * f x - (lfb - lfa) * g x
⊢ ∃ c, c ∈ Ioo a b ∧ (lgb - lga) * f' c = (lfb - lfa) * g' c Tactic: let h x := (lgb - lga) * f x - (lfb - lfa) * g x State Before: E : Type ?u.311271
inst✝³ : NormedAddCommGroup E
inst✝² : NormedSpace ℝ E
F : Type ?u.311367
inst✝¹ : NormedAddCommGroup F
inst✝ : NormedSpace ℝ F
f f' : ℝ → ℝ
a b : ℝ
hab : a < b
hfc : ContinuousOn f (Icc a b)
hff'✝ : ∀ (x : ℝ), x ∈ Ioo a b → HasDerivAt f (f' x) x
hfd : DifferentiableOn ℝ f (Ioo a b)
g g' : ℝ → ℝ
hgc : ContinuousOn g (Icc a b)
hgg'✝ : ∀ (x : ℝ), x ∈ Ioo a b → HasDerivAt g (g' x) x
hgd : DifferentiableOn ℝ g (Ioo a b)
lfa lga lfb lgb : ℝ
hff' : ∀ (x : ℝ), x ∈ Ioo a b → HasDerivAt f (f' x) x
hgg' : ∀ (x : ℝ), x ∈ Ioo a b → HasDerivAt g (g' x) x
hfa : Tendsto f (𝓝[Ioi a] a) (𝓝 lfa)
hga : Tendsto g (𝓝[Ioi a] a) (𝓝 lga)
hfb : Tendsto f (𝓝[Iio b] b) (𝓝 lfb)
hgb : Tendsto g (𝓝[Iio b] b) (𝓝 lgb)
h : ℝ → ℝ := fun x => (lgb - lga) * f x - (lfb - lfa) * g x
⊢ ∃ c, c ∈ Ioo a b ∧ (lgb - lga) * f' c = (lfb - lfa) * g' c State After: E : Type ?u.311271
inst✝³ : NormedAddCommGroup E
inst✝² : NormedSpace ℝ E
F : Type ?u.311367
inst✝¹ : NormedAddCommGroup F
inst✝ : NormedSpace ℝ F
f f' : ℝ → ℝ
a b : ℝ
hab : a < b
hfc : ContinuousOn f (Icc a b)
hff'✝ : ∀ (x : ℝ), x ∈ Ioo a b → HasDerivAt f (f' x) x
hfd : DifferentiableOn ℝ f (Ioo a b)
g g' : ℝ → ℝ
hgc : ContinuousOn g (Icc a b)
hgg'✝ : ∀ (x : ℝ), x ∈ Ioo a b → HasDerivAt g (g' x) x
hgd : DifferentiableOn ℝ g (Ioo a b)
lfa lga lfb lgb : ℝ
hff' : ∀ (x : ℝ), x ∈ Ioo a b → HasDerivAt f (f' x) x
hgg' : ∀ (x : ℝ), x ∈ Ioo a b → HasDerivAt g (g' x) x
hfa : Tendsto f (𝓝[Ioi a] a) (𝓝 lfa)
hga : Tendsto g (𝓝[Ioi a] a) (𝓝 lga)
hfb : Tendsto f (𝓝[Iio b] b) (𝓝 lfb)
hgb : Tendsto g (𝓝[Iio b] b) (𝓝 lgb)
h : ℝ → ℝ := fun x => (lgb - lga) * f x - (lfb - lfa) * g x
hha : Tendsto h (𝓝[Ioi a] a) (𝓝 (lgb * lfa - lfb * lga))
⊢ ∃ c, c ∈ Ioo a b ∧ (lgb - lga) * f' c = (lfb - lfa) * g' c Tactic: have hha : Tendsto h (𝓝[>] a) (𝓝 <| lgb * lfa - lfb * lga) := by
have : Tendsto h (𝓝[>] a) (𝓝 <| (lgb - lga) * lfa - (lfb - lfa) * lga) :=
(tendsto_const_nhds.mul hfa).sub (tendsto_const_nhds.mul hga)
convert this using 2
ring State Before: E : Type ?u.311271
inst✝³ : NormedAddCommGroup E
inst✝² : NormedSpace ℝ E
F : Type ?u.311367
inst✝¹ : NormedAddCommGroup F
inst✝ : NormedSpace ℝ F
f f' : ℝ → ℝ
a b : ℝ
hab : a < b
hfc : ContinuousOn f (Icc a b)
hff'✝ : ∀ (x : ℝ), x ∈ Ioo a b → HasDerivAt f (f' x) x
hfd : DifferentiableOn ℝ f (Ioo a b)
g g' : ℝ → ℝ
hgc : ContinuousOn g (Icc a b)
hgg'✝ : ∀ (x : ℝ), x ∈ Ioo a b → HasDerivAt g (g' x) x
hgd : DifferentiableOn ℝ g (Ioo a b)
lfa lga lfb lgb : ℝ
hff' : ∀ (x : ℝ), x ∈ Ioo a b → HasDerivAt f (f' x) x
hgg' : ∀ (x : ℝ), x ∈ Ioo a b → HasDerivAt g (g' x) x
hfa : Tendsto f (𝓝[Ioi a] a) (𝓝 lfa)
hga : Tendsto g (𝓝[Ioi a] a) (𝓝 lga)
hfb : Tendsto f (𝓝[Iio b] b) (𝓝 lfb)
hgb : Tendsto g (𝓝[Iio b] b) (𝓝 lgb)
h : ℝ → ℝ := fun x => (lgb - lga) * f x - (lfb - lfa) * g x
hha : Tendsto h (𝓝[Ioi a] a) (𝓝 (lgb * lfa - lfb * lga))
⊢ ∃ c, c ∈ Ioo a b ∧ (lgb - lga) * f' c = (lfb - lfa) * g' c State After: E : Type ?u.311271
inst✝³ : NormedAddCommGroup E
inst✝² : NormedSpace ℝ E
F : Type ?u.311367
inst✝¹ : NormedAddCommGroup F
inst✝ : NormedSpace ℝ F
f f' : ℝ → ℝ
a b : ℝ
hab : a < b
hfc : ContinuousOn f (Icc a b)
hff'✝ : ∀ (x : ℝ), x ∈ Ioo a b → HasDerivAt f (f' x) x
hfd : DifferentiableOn ℝ f (Ioo a b)
g g' : ℝ → ℝ
hgc : ContinuousOn g (Icc a b)
hgg'✝ : ∀ (x : ℝ), x ∈ Ioo a b → HasDerivAt g (g' x) x
hgd : DifferentiableOn ℝ g (Ioo a b)
lfa lga lfb lgb : ℝ
hff' : ∀ (x : ℝ), x ∈ Ioo a b → HasDerivAt f (f' x) x
hgg' : ∀ (x : ℝ), x ∈ Ioo a b → HasDerivAt g (g' x) x
hfa : Tendsto f (𝓝[Ioi a] a) (𝓝 lfa)
hga : Tendsto g (𝓝[Ioi a] a) (𝓝 lga)
hfb : Tendsto f (𝓝[Iio b] b) (𝓝 lfb)
hgb : Tendsto g (𝓝[Iio b] b) (𝓝 lgb)
h : ℝ → ℝ := fun x => (lgb - lga) * f x - (lfb - lfa) * g x
hha : Tendsto h (𝓝[Ioi a] a) (𝓝 (lgb * lfa - lfb * lga))
hhb : Tendsto h (𝓝[Iio b] b) (𝓝 (lgb * lfa - lfb * lga))
⊢ ∃ c, c ∈ Ioo a b ∧ (lgb - lga) * f' c = (lfb - lfa) * g' c Tactic: have hhb : Tendsto h (𝓝[<] b) (𝓝 <| lgb * lfa - lfb * lga) := by
have : Tendsto h (𝓝[<] b) (𝓝 <| (lgb - lga) * lfb - (lfb - lfa) * lgb) :=
(tendsto_const_nhds.mul hfb).sub (tendsto_const_nhds.mul hgb)
convert this using 2
ring State Before: E : Type ?u.311271
inst✝³ : NormedAddCommGroup E
inst✝² : NormedSpace ℝ E
F : Type ?u.311367
inst✝¹ : NormedAddCommGroup F
inst✝ : NormedSpace ℝ F
f f' : ℝ → ℝ
a b : ℝ
hab : a < b
hfc : ContinuousOn f (Icc a b)
hff'✝ : ∀ (x : ℝ), x ∈ Ioo a b → HasDerivAt f (f' x) x
hfd : DifferentiableOn ℝ f (Ioo a b)
g g' : ℝ → ℝ
hgc : ContinuousOn g (Icc a b)
hgg'✝ : ∀ (x : ℝ), x ∈ Ioo a b → HasDerivAt g (g' x) x
hgd : DifferentiableOn ℝ g (Ioo a b)
lfa lga lfb lgb : ℝ
hff' : ∀ (x : ℝ), x ∈ Ioo a b → HasDerivAt f (f' x) x
hgg' : ∀ (x : ℝ), x ∈ Ioo a b → HasDerivAt g (g' x) x
hfa : Tendsto f (𝓝[Ioi a] a) (𝓝 lfa)
hga : Tendsto g (𝓝[Ioi a] a) (𝓝 lga)
hfb : Tendsto f (𝓝[Iio b] b) (𝓝 lfb)
hgb : Tendsto g (𝓝[Iio b] b) (𝓝 lgb)
h : ℝ → ℝ := fun x => (lgb - lga) * f x - (lfb - lfa) * g x
hha : Tendsto h (𝓝[Ioi a] a) (𝓝 (lgb * lfa - lfb * lga))
hhb : Tendsto h (𝓝[Iio b] b) (𝓝 (lgb * lfa - lfb * lga))
⊢ ∃ c, c ∈ Ioo a b ∧ (lgb - lga) * f' c = (lfb - lfa) * g' c State After: E : Type ?u.311271
inst✝³ : NormedAddCommGroup E
inst✝² : NormedSpace ℝ E
F : Type ?u.311367
inst✝¹ : NormedAddCommGroup F
inst✝ : NormedSpace ℝ F
f f' : ℝ → ℝ
a b : ℝ
hab : a < b
hfc : ContinuousOn f (Icc a b)
hff'✝ : ∀ (x : ℝ), x ∈ Ioo a b → HasDerivAt f (f' x) x
hfd : DifferentiableOn ℝ f (Ioo a b)
g g' : ℝ → ℝ
hgc : ContinuousOn g (Icc a b)
hgg'✝ : ∀ (x : ℝ), x ∈ Ioo a b → HasDerivAt g (g' x) x
hgd : DifferentiableOn ℝ g (Ioo a b)
lfa lga lfb lgb : ℝ
hff' : ∀ (x : ℝ), x ∈ Ioo a b → HasDerivAt f (f' x) x
hgg' : ∀ (x : ℝ), x ∈ Ioo a b → HasDerivAt g (g' x) x
hfa : Tendsto f (𝓝[Ioi a] a) (𝓝 lfa)
hga : Tendsto g (𝓝[Ioi a] a) (𝓝 lga)
hfb : Tendsto f (𝓝[Iio b] b) (𝓝 lfb)
hgb : Tendsto g (𝓝[Iio b] b) (𝓝 lgb)
h : ℝ → ℝ := fun x => (lgb - lga) * f x - (lfb - lfa) * g x
hha : Tendsto h (𝓝[Ioi a] a) (𝓝 (lgb * lfa - lfb * lga))
hhb : Tendsto h (𝓝[Iio b] b) (𝓝 (lgb * lfa - lfb * lga))
h' : ℝ → ℝ := fun x => (lgb - lga) * f' x - (lfb - lfa) * g' x
⊢ ∃ c, c ∈ Ioo a b ∧ (lgb - lga) * f' c = (lfb - lfa) * g' c Tactic: let h' x := (lgb - lga) * f' x - (lfb - lfa) * g' x State Before: E : Type ?u.311271
inst✝³ : NormedAddCommGroup E
inst✝² : NormedSpace ℝ E
F : Type ?u.311367
inst✝¹ : NormedAddCommGroup F
inst✝ : NormedSpace ℝ F
f f' : ℝ → ℝ
a b : ℝ
hab : a < b
hfc : ContinuousOn f (Icc a b)
hff'✝ : ∀ (x : ℝ), x ∈ Ioo a b → HasDerivAt f (f' x) x
hfd : DifferentiableOn ℝ f (Ioo a b)
g g' : ℝ → ℝ
hgc : ContinuousOn g (Icc a b)
hgg'✝ : ∀ (x : ℝ), x ∈ Ioo a b → HasDerivAt g (g' x) x
hgd : DifferentiableOn ℝ g (Ioo a b)
lfa lga lfb lgb : ℝ
hff' : ∀ (x : ℝ), x ∈ Ioo a b → HasDerivAt f (f' x) x
hgg' : ∀ (x : ℝ), x ∈ Ioo a b → HasDerivAt g (g' x) x
hfa : Tendsto f (𝓝[Ioi a] a) (𝓝 lfa)
hga : Tendsto g (𝓝[Ioi a] a) (𝓝 lga)
hfb : Tendsto f (𝓝[Iio b] b) (𝓝 lfb)
hgb : Tendsto g (𝓝[Iio b] b) (𝓝 lgb)
h : ℝ → ℝ := fun x => (lgb - lga) * f x - (lfb - lfa) * g x
hha : Tendsto h (𝓝[Ioi a] a) (𝓝 (lgb * lfa - lfb * lga))
hhb : Tendsto h (𝓝[Iio b] b) (𝓝 (lgb * lfa - lfb * lga))
h' : ℝ → ℝ := fun x => (lgb - lga) * f' x - (lfb - lfa) * g' x
⊢ ∃ c, c ∈ Ioo a b ∧ (lgb - lga) * f' c = (lfb - lfa) * g' c State After: E : Type ?u.311271
inst✝³ : NormedAddCommGroup E
inst✝² : NormedSpace ℝ E
F : Type ?u.311367
inst✝¹ : NormedAddCommGroup F
inst✝ : NormedSpace ℝ F
f f' : ℝ → ℝ
a b : ℝ
hab : a < b
hfc : ContinuousOn f (Icc a b)
hff'✝ : ∀ (x : ℝ), x ∈ Ioo a b → HasDerivAt f (f' x) x
hfd : DifferentiableOn ℝ f (Ioo a b)
g g' : ℝ → ℝ
hgc : ContinuousOn g (Icc a b)
hgg'✝ : ∀ (x : ℝ), x ∈ Ioo a b → HasDerivAt g (g' x) x
hgd : DifferentiableOn ℝ g (Ioo a b)
lfa lga lfb lgb : ℝ
hff' : ∀ (x : ℝ), x ∈ Ioo a b → HasDerivAt f (f' x) x
hgg' : ∀ (x : ℝ), x ∈ Ioo a b → HasDerivAt g (g' x) x
hfa : Tendsto f (𝓝[Ioi a] a) (𝓝 lfa)
hga : Tendsto g (𝓝[Ioi a] a) (𝓝 lga)
hfb : Tendsto f (𝓝[Iio b] b) (𝓝 lfb)
hgb : Tendsto g (𝓝[Iio b] b) (𝓝 lgb)
h : ℝ → ℝ := fun x => (lgb - lga) * f x - (lfb - lfa) * g x
hha : Tendsto h (𝓝[Ioi a] a) (𝓝 (lgb * lfa - lfb * lga))
hhb : Tendsto h (𝓝[Iio b] b) (𝓝 (lgb * lfa - lfb * lga))
h' : ℝ → ℝ := fun x => (lgb - lga) * f' x - (lfb - lfa) * g' x
hhh' : ∀ (x : ℝ), x ∈ Ioo a b → HasDerivAt h (h' x) x
⊢ ∃ c, c ∈ Ioo a b ∧ (lgb - lga) * f' c = (lfb - lfa) * g' c Tactic: have hhh' : ∀ x ∈ Ioo a b, HasDerivAt h (h' x) x := by
intro x hx
exact ((hff' x hx).const_mul _).sub ((hgg' x hx).const_mul _) State Before: E : Type ?u.311271
inst✝³ : NormedAddCommGroup E
inst✝² : NormedSpace ℝ E
F : Type ?u.311367
inst✝¹ : NormedAddCommGroup F
inst✝ : NormedSpace ℝ F
f f' : ℝ → ℝ
a b : ℝ
hab : a < b
hfc : ContinuousOn f (Icc a b)
hff'✝ : ∀ (x : ℝ), x ∈ Ioo a b → HasDerivAt f (f' x) x
hfd : DifferentiableOn ℝ f (Ioo a b)
g g' : ℝ → ℝ
hgc : ContinuousOn g (Icc a b)
hgg'✝ : ∀ (x : ℝ), x ∈ Ioo a b → HasDerivAt g (g' x) x
hgd : DifferentiableOn ℝ g (Ioo a b)
lfa lga lfb lgb : ℝ
hff' : ∀ (x : ℝ), x ∈ Ioo a b → HasDerivAt f (f' x) x
hgg' : ∀ (x : ℝ), x ∈ Ioo a b → HasDerivAt g (g' x) x
hfa : Tendsto f (𝓝[Ioi a] a) (𝓝 lfa)
hga : Tendsto g (𝓝[Ioi a] a) (𝓝 lga)
hfb : Tendsto f (𝓝[Iio b] b) (𝓝 lfb)
hgb : Tendsto g (𝓝[Iio b] b) (𝓝 lgb)
h : ℝ → ℝ := fun x => (lgb - lga) * f x - (lfb - lfa) * g x
hha : Tendsto h (𝓝[Ioi a] a) (𝓝 (lgb * lfa - lfb * lga))
hhb : Tendsto h (𝓝[Iio b] b) (𝓝 (lgb * lfa - lfb * lga))
h' : ℝ → ℝ := fun x => (lgb - lga) * f' x - (lfb - lfa) * g' x
hhh' : ∀ (x : ℝ), x ∈ Ioo a b → HasDerivAt h (h' x) x
⊢ ∃ c, c ∈ Ioo a b ∧ (lgb - lga) * f' c = (lfb - lfa) * g' c State After: case intro.intro
E : Type ?u.311271
inst✝³ : NormedAddCommGroup E
inst✝² : NormedSpace ℝ E
F : Type ?u.311367
inst✝¹ : NormedAddCommGroup F
inst✝ : NormedSpace ℝ F
f f' : ℝ → ℝ
a b : ℝ
hab : a < b
hfc : ContinuousOn f (Icc a b)
hff'✝ : ∀ (x : ℝ), x ∈ Ioo a b → HasDerivAt f (f' x) x
hfd : DifferentiableOn ℝ f (Ioo a b)
g g' : ℝ → ℝ
hgc : ContinuousOn g (Icc a b)
hgg'✝ : ∀ (x : ℝ), x ∈ Ioo a b → HasDerivAt g (g' x) x
hgd : DifferentiableOn ℝ g (Ioo a b)
lfa lga lfb lgb : ℝ
hff' : ∀ (x : ℝ), x ∈ Ioo a b → HasDerivAt f (f' x) x
hgg' : ∀ (x : ℝ), x ∈ Ioo a b → HasDerivAt g (g' x) x
hfa : Tendsto f (𝓝[Ioi a] a) (𝓝 lfa)
hga : Tendsto g (𝓝[Ioi a] a) (𝓝 lga)
hfb : Tendsto f (𝓝[Iio b] b) (𝓝 lfb)
hgb : Tendsto g (𝓝[Iio b] b) (𝓝 lgb)
h : ℝ → ℝ := fun x => (lgb - lga) * f x - (lfb - lfa) * g x
hha : Tendsto h (𝓝[Ioi a] a) (𝓝 (lgb * lfa - lfb * lga))
hhb : Tendsto h (𝓝[Iio b] b) (𝓝 (lgb * lfa - lfb * lga))
h' : ℝ → ℝ := fun x => (lgb - lga) * f' x - (lfb - lfa) * g' x
hhh' : ∀ (x : ℝ), x ∈ Ioo a b → HasDerivAt h (h' x) x
c : ℝ
cmem : c ∈ Ioo a b
hc : h' c = 0
⊢ ∃ c, c ∈ Ioo a b ∧ (lgb - lga) * f' c = (lfb - lfa) * g' c Tactic: rcases exists_hasDerivAt_eq_zero' hab hha hhb hhh' with ⟨c, cmem, hc⟩ State Before: case intro.intro
E : Type ?u.311271
inst✝³ : NormedAddCommGroup E
inst✝² : NormedSpace ℝ E
F : Type ?u.311367
inst✝¹ : NormedAddCommGroup F
inst✝ : NormedSpace ℝ F
f f' : ℝ → ℝ
a b : ℝ
hab : a < b
hfc : ContinuousOn f (Icc a b)
hff'✝ : ∀ (x : ℝ), x ∈ Ioo a b → HasDerivAt f (f' x) x
hfd : DifferentiableOn ℝ f (Ioo a b)
g g' : ℝ → ℝ
hgc : ContinuousOn g (Icc a b)
hgg'✝ : ∀ (x : ℝ), x ∈ Ioo a b → HasDerivAt g (g' x) x
hgd : DifferentiableOn ℝ g (Ioo a b)
lfa lga lfb lgb : ℝ
hff' : ∀ (x : ℝ), x ∈ Ioo a b → HasDerivAt f (f' x) x
hgg' : ∀ (x : ℝ), x ∈ Ioo a b → HasDerivAt g (g' x) x
hfa : Tendsto f (𝓝[Ioi a] a) (𝓝 lfa)
hga : Tendsto g (𝓝[Ioi a] a) (𝓝 lga)
hfb : Tendsto f (𝓝[Iio b] b) (𝓝 lfb)
hgb : Tendsto g (𝓝[Iio b] b) (𝓝 lgb)
h : ℝ → ℝ := fun x => (lgb - lga) * f x - (lfb - lfa) * g x
hha : Tendsto h (𝓝[Ioi a] a) (𝓝 (lgb * lfa - lfb * lga))
hhb : Tendsto h (𝓝[Iio b] b) (𝓝 (lgb * lfa - lfb * lga))
h' : ℝ → ℝ := fun x => (lgb - lga) * f' x - (lfb - lfa) * g' x
hhh' : ∀ (x : ℝ), x ∈ Ioo a b → HasDerivAt h (h' x) x
c : ℝ
cmem : c ∈ Ioo a b
hc : h' c = 0
⊢ ∃ c, c ∈ Ioo a b ∧ (lgb - lga) * f' c = (lfb - lfa) * g' c State After: no goals Tactic: exact ⟨c, cmem, sub_eq_zero.1 hc⟩ State Before: E : Type ?u.311271
inst✝³ : NormedAddCommGroup E
inst✝² : NormedSpace ℝ E
F : Type ?u.311367
inst✝¹ : NormedAddCommGroup F
inst✝ : NormedSpace ℝ F
f f' : ℝ → ℝ
a b : ℝ
hab : a < b
hfc : ContinuousOn f (Icc a b)
hff'✝ : ∀ (x : ℝ), x ∈ Ioo a b → HasDerivAt f (f' x) x
hfd : DifferentiableOn ℝ f (Ioo a b)
g g' : ℝ → ℝ
hgc : ContinuousOn g (Icc a b)
hgg'✝ : ∀ (x : ℝ), x ∈ Ioo a b → HasDerivAt g (g' x) x
hgd : DifferentiableOn ℝ g (Ioo a b)
lfa lga lfb lgb : ℝ
hff' : ∀ (x : ℝ), x ∈ Ioo a b → HasDerivAt f (f' x) x
hgg' : ∀ (x : ℝ), x ∈ Ioo a b → HasDerivAt g (g' x) x
hfa : Tendsto f (𝓝[Ioi a] a) (𝓝 lfa)
hga : Tendsto g (𝓝[Ioi a] a) (𝓝 lga)
hfb : Tendsto f (𝓝[Iio b] b) (𝓝 lfb)
hgb : Tendsto g (𝓝[Iio b] b) (𝓝 lgb)
h : ℝ → ℝ := fun x => (lgb - lga) * f x - (lfb - lfa) * g x
⊢ Tendsto h (𝓝[Ioi a] a) (𝓝 (lgb * lfa - lfb * lga)) State After: E : Type ?u.311271
inst✝³ : NormedAddCommGroup E
inst✝² : NormedSpace ℝ E
F : Type ?u.311367
inst✝¹ : NormedAddCommGroup F
inst✝ : NormedSpace ℝ F
f f' : ℝ → ℝ
a b : ℝ
hab : a < b
hfc : ContinuousOn f (Icc a b)
hff'✝ : ∀ (x : ℝ), x ∈ Ioo a b → HasDerivAt f (f' x) x
hfd : DifferentiableOn ℝ f (Ioo a b)
g g' : ℝ → ℝ
hgc : ContinuousOn g (Icc a b)
hgg'✝ : ∀ (x : ℝ), x ∈ Ioo a b → HasDerivAt g (g' x) x
hgd : DifferentiableOn ℝ g (Ioo a b)
lfa lga lfb lgb : ℝ
hff' : ∀ (x : ℝ), x ∈ Ioo a b → HasDerivAt f (f' x) x
hgg' : ∀ (x : ℝ), x ∈ Ioo a b → HasDerivAt g (g' x) x
hfa : Tendsto f (𝓝[Ioi a] a) (𝓝 lfa)
hga : Tendsto g (𝓝[Ioi a] a) (𝓝 lga)
hfb : Tendsto f (𝓝[Iio b] b) (𝓝 lfb)
hgb : Tendsto g (𝓝[Iio b] b) (𝓝 lgb)
h : ℝ → ℝ := fun x => (lgb - lga) * f x - (lfb - lfa) * g x
this : Tendsto h (𝓝[Ioi a] a) (𝓝 ((lgb - lga) * lfa - (lfb - lfa) * lga))
⊢ Tendsto h (𝓝[Ioi a] a) (𝓝 (lgb * lfa - lfb * lga)) Tactic: have : Tendsto h (𝓝[>] a) (𝓝 <| (lgb - lga) * lfa - (lfb - lfa) * lga) :=
(tendsto_const_nhds.mul hfa).sub (tendsto_const_nhds.mul hga) State Before: E : Type ?u.311271
inst✝³ : NormedAddCommGroup E
inst✝² : NormedSpace ℝ E
F : Type ?u.311367
inst✝¹ : NormedAddCommGroup F
inst✝ : NormedSpace ℝ F
f f' : ℝ → ℝ
a b : ℝ
hab : a < b
hfc : ContinuousOn f (Icc a b)
hff'✝ : ∀ (x : ℝ), x ∈ Ioo a b → HasDerivAt f (f' x) x
hfd : DifferentiableOn ℝ f (Ioo a b)
g g' : ℝ → ℝ
hgc : ContinuousOn g (Icc a b)
hgg'✝ : ∀ (x : ℝ), x ∈ Ioo a b → HasDerivAt g (g' x) x
hgd : DifferentiableOn ℝ g (Ioo a b)
lfa lga lfb lgb : ℝ
hff' : ∀ (x : ℝ), x ∈ Ioo a b → HasDerivAt f (f' x) x
hgg' : ∀ (x : ℝ), x ∈ Ioo a b → HasDerivAt g (g' x) x
hfa : Tendsto f (𝓝[Ioi a] a) (𝓝 lfa)
hga : Tendsto g (𝓝[Ioi a] a) (𝓝 lga)
hfb : Tendsto f (𝓝[Iio b] b) (𝓝 lfb)
hgb : Tendsto g (𝓝[Iio b] b) (𝓝 lgb)
h : ℝ → ℝ := fun x => (lgb - lga) * f x - (lfb - lfa) * g x
this : Tendsto h (𝓝[Ioi a] a) (𝓝 ((lgb - lga) * lfa - (lfb - lfa) * lga))
⊢ Tendsto h (𝓝[Ioi a] a) (𝓝 (lgb * lfa - lfb * lga)) State After: case h.e'_5.h.e'_3
E : Type ?u.311271
inst✝³ : NormedAddCommGroup E
inst✝² : NormedSpace ℝ E
F : Type ?u.311367
inst✝¹ : NormedAddCommGroup F
inst✝ : NormedSpace ℝ F
f f' : ℝ → ℝ
a b : ℝ
hab : a < b
hfc : ContinuousOn f (Icc a b)
hff'✝ : ∀ (x : ℝ), x ∈ Ioo a b → HasDerivAt f (f' x) x
hfd : DifferentiableOn ℝ f (Ioo a b)
g g' : ℝ → ℝ
hgc : ContinuousOn g (Icc a b)
hgg'✝ : ∀ (x : ℝ), x ∈ Ioo a b → HasDerivAt g (g' x) x
hgd : DifferentiableOn ℝ g (Ioo a b)
lfa lga lfb lgb : ℝ
hff' : ∀ (x : ℝ), x ∈ Ioo a b → HasDerivAt f (f' x) x
hgg' : ∀ (x : ℝ), x ∈ Ioo a b → HasDerivAt g (g' x) x
hfa : Tendsto f (𝓝[Ioi a] a) (𝓝 lfa)
hga : Tendsto g (𝓝[Ioi a] a) (𝓝 lga)
hfb : Tendsto f (𝓝[Iio b] b) (𝓝 lfb)
hgb : Tendsto g (𝓝[Iio b] b) (𝓝 lgb)
h : ℝ → ℝ := fun x => (lgb - lga) * f x - (lfb - lfa) * g x
this : Tendsto h (𝓝[Ioi a] a) (𝓝 ((lgb - lga) * lfa - (lfb - lfa) * lga))
⊢ lgb * lfa - lfb * lga = (lgb - lga) * lfa - (lfb - lfa) * lga Tactic: convert this using 2 State Before: case h.e'_5.h.e'_3
E : Type ?u.311271
inst✝³ : NormedAddCommGroup E
inst✝² : NormedSpace ℝ E
F : Type ?u.311367
inst✝¹ : NormedAddCommGroup F
inst✝ : NormedSpace ℝ F
f f' : ℝ → ℝ
a b : ℝ
hab : a < b
hfc : ContinuousOn f (Icc a b)
hff'✝ : ∀ (x : ℝ), x ∈ Ioo a b → HasDerivAt f (f' x) x
hfd : DifferentiableOn ℝ f (Ioo a b)
g g' : ℝ → ℝ
hgc : ContinuousOn g (Icc a b)
hgg'✝ : ∀ (x : ℝ), x ∈ Ioo a b → HasDerivAt g (g' x) x
hgd : DifferentiableOn ℝ g (Ioo a b)
lfa lga lfb lgb : ℝ
hff' : ∀ (x : ℝ), x ∈ Ioo a b → HasDerivAt f (f' x) x
hgg' : ∀ (x : ℝ), x ∈ Ioo a b → HasDerivAt g (g' x) x
hfa : Tendsto f (𝓝[Ioi a] a) (𝓝 lfa)
hga : Tendsto g (𝓝[Ioi a] a) (𝓝 lga)
hfb : Tendsto f (𝓝[Iio b] b) (𝓝 lfb)
hgb : Tendsto g (𝓝[Iio b] b) (𝓝 lgb)
h : ℝ → ℝ := fun x => (lgb - lga) * f x - (lfb - lfa) * g x
this : Tendsto h (𝓝[Ioi a] a) (𝓝 ((lgb - lga) * lfa - (lfb - lfa) * lga))
⊢ lgb * lfa - lfb * lga = (lgb - lga) * lfa - (lfb - lfa) * lga State After: no goals Tactic: ring State Before: E : Type ?u.311271
inst✝³ : NormedAddCommGroup E
inst✝² : NormedSpace ℝ E
F : Type ?u.311367
inst✝¹ : NormedAddCommGroup F
inst✝ : NormedSpace ℝ F
f f' : ℝ → ℝ
a b : ℝ
hab : a < b
hfc : ContinuousOn f (Icc a b)
hff'✝ : ∀ (x : ℝ), x ∈ Ioo a b → HasDerivAt f (f' x) x
hfd : DifferentiableOn ℝ f (Ioo a b)
g g' : ℝ → ℝ
hgc : ContinuousOn g (Icc a b)
hgg'✝ : ∀ (x : ℝ), x ∈ Ioo a b → HasDerivAt g (g' x) x
hgd : DifferentiableOn ℝ g (Ioo a b)
lfa lga lfb lgb : ℝ
hff' : ∀ (x : ℝ), x ∈ Ioo a b → HasDerivAt f (f' x) x
hgg' : ∀ (x : ℝ), x ∈ Ioo a b → HasDerivAt g (g' x) x
hfa : Tendsto f (𝓝[Ioi a] a) (𝓝 lfa)
hga : Tendsto g (𝓝[Ioi a] a) (𝓝 lga)
hfb : Tendsto f (𝓝[Iio b] b) (𝓝 lfb)
hgb : Tendsto g (𝓝[Iio b] b) (𝓝 lgb)
h : ℝ → ℝ := fun x => (lgb - lga) * f x - (lfb - lfa) * g x
hha : Tendsto h (𝓝[Ioi a] a) (𝓝 (lgb * lfa - lfb * lga))
⊢ Tendsto h (𝓝[Iio b] b) (𝓝 (lgb * lfa - lfb * lga)) State After: E : Type ?u.311271
inst✝³ : NormedAddCommGroup E
inst✝² : NormedSpace ℝ E
F : Type ?u.311367
inst✝¹ : NormedAddCommGroup F
inst✝ : NormedSpace ℝ F
f f' : ℝ → ℝ
a b : ℝ
hab : a < b
hfc : ContinuousOn f (Icc a b)
hff'✝ : ∀ (x : ℝ), x ∈ Ioo a b → HasDerivAt f (f' x) x
hfd : DifferentiableOn ℝ f (Ioo a b)
g g' : ℝ → ℝ
hgc : ContinuousOn g (Icc a b)
hgg'✝ : ∀ (x : ℝ), x ∈ Ioo a b → HasDerivAt g (g' x) x
hgd : DifferentiableOn ℝ g (Ioo a b)
lfa lga lfb lgb : ℝ
hff' : ∀ (x : ℝ), x ∈ Ioo a b → HasDerivAt f (f' x) x
hgg' : ∀ (x : ℝ), x ∈ Ioo a b → HasDerivAt g (g' x) x
hfa : Tendsto f (𝓝[Ioi a] a) (𝓝 lfa)
hga : Tendsto g (𝓝[Ioi a] a) (𝓝 lga)
hfb : Tendsto f (𝓝[Iio b] b) (𝓝 lfb)
hgb : Tendsto g (𝓝[Iio b] b) (𝓝 lgb)
h : ℝ → ℝ := fun x => (lgb - lga) * f x - (lfb - lfa) * g x
hha : Tendsto h (𝓝[Ioi a] a) (𝓝 (lgb * lfa - lfb * lga))
this : Tendsto h (𝓝[Iio b] b) (𝓝 ((lgb - lga) * lfb - (lfb - lfa) * lgb))
⊢ Tendsto h (𝓝[Iio b] b) (𝓝 (lgb * lfa - lfb * lga)) Tactic: have : Tendsto h (𝓝[<] b) (𝓝 <| (lgb - lga) * lfb - (lfb - lfa) * lgb) :=
(tendsto_const_nhds.mul hfb).sub (tendsto_const_nhds.mul hgb) State Before: E : Type ?u.311271
inst✝³ : NormedAddCommGroup E
inst✝² : NormedSpace ℝ E
F : Type ?u.311367
inst✝¹ : NormedAddCommGroup F
inst✝ : NormedSpace ℝ F
f f' : ℝ → ℝ
a b : ℝ
hab : a < b
hfc : ContinuousOn f (Icc a b)
hff'✝ : ∀ (x : ℝ), x ∈ Ioo a b → HasDerivAt f (f' x) x
hfd : DifferentiableOn ℝ f (Ioo a b)
g g' : ℝ → ℝ
hgc : ContinuousOn g (Icc a b)
hgg'✝ : ∀ (x : ℝ), x ∈ Ioo a b → HasDerivAt g (g' x) x
hgd : DifferentiableOn ℝ g (Ioo a b)
lfa lga lfb lgb : ℝ
hff' : ∀ (x : ℝ), x ∈ Ioo a b → HasDerivAt f (f' x) x
hgg' : ∀ (x : ℝ), x ∈ Ioo a b → HasDerivAt g (g' x) x
hfa : Tendsto f (𝓝[Ioi a] a) (𝓝 lfa)
hga : Tendsto g (𝓝[Ioi a] a) (𝓝 lga)
hfb : Tendsto f (𝓝[Iio b] b) (𝓝 lfb)
hgb : Tendsto g (𝓝[Iio b] b) (𝓝 lgb)
h : ℝ → ℝ := fun x => (lgb - lga) * f x - (lfb - lfa) * g x
hha : Tendsto h (𝓝[Ioi a] a) (𝓝 (lgb * lfa - lfb * lga))
this : Tendsto h (𝓝[Iio b] b) (𝓝 ((lgb - lga) * lfb - (lfb - lfa) * lgb))
⊢ Tendsto h (𝓝[Iio b] b) (𝓝 (lgb * lfa - lfb * lga)) State After: case h.e'_5.h.e'_3
E : Type ?u.311271
inst✝³ : NormedAddCommGroup E
inst✝² : NormedSpace ℝ E
F : Type ?u.311367
inst✝¹ : NormedAddCommGroup F
inst✝ : NormedSpace ℝ F
f f' : ℝ → ℝ
a b : ℝ
hab : a < b
hfc : ContinuousOn f (Icc a b)
hff'✝ : ∀ (x : ℝ), x ∈ Ioo a b → HasDerivAt f (f' x) x
hfd : DifferentiableOn ℝ f (Ioo a b)
g g' : ℝ → ℝ
hgc : ContinuousOn g (Icc a b)
hgg'✝ : ∀ (x : ℝ), x ∈ Ioo a b → HasDerivAt g (g' x) x
hgd : DifferentiableOn ℝ g (Ioo a b)
lfa lga lfb lgb : ℝ
hff' : ∀ (x : ℝ), x ∈ Ioo a b → HasDerivAt f (f' x) x
hgg' : ∀ (x : ℝ), x ∈ Ioo a b → HasDerivAt g (g' x) x
hfa : Tendsto f (𝓝[Ioi a] a) (𝓝 lfa)
hga : Tendsto g (𝓝[Ioi a] a) (𝓝 lga)
hfb : Tendsto f (𝓝[Iio b] b) (𝓝 lfb)
hgb : Tendsto g (𝓝[Iio b] b) (𝓝 lgb)
h : ℝ → ℝ := fun x => (lgb - lga) * f x - (lfb - lfa) * g x
hha : Tendsto h (𝓝[Ioi a] a) (𝓝 (lgb * lfa - lfb * lga))
this : Tendsto h (𝓝[Iio b] b) (𝓝 ((lgb - lga) * lfb - (lfb - lfa) * lgb))
⊢ lgb * lfa - lfb * lga = (lgb - lga) * lfb - (lfb - lfa) * lgb Tactic: convert this using 2 State Before: case h.e'_5.h.e'_3
E : Type ?u.311271
inst✝³ : NormedAddCommGroup E
inst✝² : NormedSpace ℝ E
F : Type ?u.311367
inst✝¹ : NormedAddCommGroup F
inst✝ : NormedSpace ℝ F
f f' : ℝ → ℝ
a b : ℝ
hab : a < b
hfc : ContinuousOn f (Icc a b)
hff'✝ : ∀ (x : ℝ), x ∈ Ioo a b → HasDerivAt f (f' x) x
hfd : DifferentiableOn ℝ f (Ioo a b)
g g' : ℝ → ℝ
hgc : ContinuousOn g (Icc a b)
hgg'✝ : ∀ (x : ℝ), x ∈ Ioo a b → HasDerivAt g (g' x) x
hgd : DifferentiableOn ℝ g (Ioo a b)
lfa lga lfb lgb : ℝ
hff' : ∀ (x : ℝ), x ∈ Ioo a b → HasDerivAt f (f' x) x
hgg' : ∀ (x : ℝ), x ∈ Ioo a b → HasDerivAt g (g' x) x
hfa : Tendsto f (𝓝[Ioi a] a) (𝓝 lfa)
hga : Tendsto g (𝓝[Ioi a] a) (𝓝 lga)
hfb : Tendsto f (𝓝[Iio b] b) (𝓝 lfb)
hgb : Tendsto g (𝓝[Iio b] b) (𝓝 lgb)
h : ℝ → ℝ := fun x => (lgb - lga) * f x - (lfb - lfa) * g x
hha : Tendsto h (𝓝[Ioi a] a) (𝓝 (lgb * lfa - lfb * lga))
this : Tendsto h (𝓝[Iio b] b) (𝓝 ((lgb - lga) * lfb - (lfb - lfa) * lgb))
⊢ lgb * lfa - lfb * lga = (lgb - lga) * lfb - (lfb - lfa) * lgb State After: no goals Tactic: ring State Before: E : Type ?u.311271
inst✝³ : NormedAddCommGroup E
inst✝² : NormedSpace ℝ E
F : Type ?u.311367
inst✝¹ : NormedAddCommGroup F
inst✝ : NormedSpace ℝ F
f f' : ℝ → ℝ
a b : ℝ
hab : a < b
hfc : ContinuousOn f (Icc a b)
hff'✝ : ∀ (x : ℝ), x ∈ Ioo a b → HasDerivAt f (f' x) x
hfd : DifferentiableOn ℝ f (Ioo a b)
g g' : ℝ → ℝ
hgc : ContinuousOn g (Icc a b)
hgg'✝ : ∀ (x : ℝ), x ∈ Ioo a b → HasDerivAt g (g' x) x
hgd : DifferentiableOn ℝ g (Ioo a b)
lfa lga lfb lgb : ℝ
hff' : ∀ (x : ℝ), x ∈ Ioo a b → HasDerivAt f (f' x) x
hgg' : ∀ (x : ℝ), x ∈ Ioo a b → HasDerivAt g (g' x) x
hfa : Tendsto f (𝓝[Ioi a] a) (𝓝 lfa)
hga : Tendsto g (𝓝[Ioi a] a) (𝓝 lga)
hfb : Tendsto f (𝓝[Iio b] b) (𝓝 lfb)
hgb : Tendsto g (𝓝[Iio b] b) (𝓝 lgb)
h : ℝ → ℝ := fun x => (lgb - lga) * f x - (lfb - lfa) * g x
hha : Tendsto h (𝓝[Ioi a] a) (𝓝 (lgb * lfa - lfb * lga))
hhb : Tendsto h (𝓝[Iio b] b) (𝓝 (lgb * lfa - lfb * lga))
h' : ℝ → ℝ := fun x => (lgb - lga) * f' x - (lfb - lfa) * g' x
⊢ ∀ (x : ℝ), x ∈ Ioo a b → HasDerivAt h (h' x) x State After: E : Type ?u.311271
inst✝³ : NormedAddCommGroup E
inst✝² : NormedSpace ℝ E
F : Type ?u.311367
inst✝¹ : NormedAddCommGroup F
inst✝ : NormedSpace ℝ F
f f' : ℝ → ℝ
a b : ℝ
hab : a < b
hfc : ContinuousOn f (Icc a b)
hff'✝ : ∀ (x : ℝ), x ∈ Ioo a b → HasDerivAt f (f' x) x
hfd : DifferentiableOn ℝ f (Ioo a b)
g g' : ℝ → ℝ
hgc : ContinuousOn g (Icc a b)
hgg'✝ : ∀ (x : ℝ), x ∈ Ioo a b → HasDerivAt g (g' x) x
hgd : DifferentiableOn ℝ g (Ioo a b)
lfa lga lfb lgb : ℝ
hff' : ∀ (x : ℝ), x ∈ Ioo a b → HasDerivAt f (f' x) x
hgg' : ∀ (x : ℝ), x ∈ Ioo a b → HasDerivAt g (g' x) x
hfa : Tendsto f (𝓝[Ioi a] a) (𝓝 lfa)
hga : Tendsto g (𝓝[Ioi a] a) (𝓝 lga)
hfb : Tendsto f (𝓝[Iio b] b) (𝓝 lfb)
hgb : Tendsto g (𝓝[Iio b] b) (𝓝 lgb)
h : ℝ → ℝ := fun x => (lgb - lga) * f x - (lfb - lfa) * g x
hha : Tendsto h (𝓝[Ioi a] a) (𝓝 (lgb * lfa - lfb * lga))
hhb : Tendsto h (𝓝[Iio b] b) (𝓝 (lgb * lfa - lfb * lga))
h' : ℝ → ℝ := fun x => (lgb - lga) * f' x - (lfb - lfa) * g' x
x : ℝ
hx : x ∈ Ioo a b
⊢ HasDerivAt h (h' x) x Tactic: intro x hx State Before: E : Type ?u.311271
inst✝³ : NormedAddCommGroup E
inst✝² : NormedSpace ℝ E
F : Type ?u.311367
inst✝¹ : NormedAddCommGroup F
inst✝ : NormedSpace ℝ F
f f' : ℝ → ℝ
a b : ℝ
hab : a < b
hfc : ContinuousOn f (Icc a b)
hff'✝ : ∀ (x : ℝ), x ∈ Ioo a b → HasDerivAt f (f' x) x
hfd : DifferentiableOn ℝ f (Ioo a b)
g g' : ℝ → ℝ
hgc : ContinuousOn g (Icc a b)
hgg'✝ : ∀ (x : ℝ), x ∈ Ioo a b → HasDerivAt g (g' x) x
hgd : DifferentiableOn ℝ g (Ioo a b)
lfa lga lfb lgb : ℝ
hff' : ∀ (x : ℝ), x ∈ Ioo a b → HasDerivAt f (f' x) x
hgg' : ∀ (x : ℝ), x ∈ Ioo a b → HasDerivAt g (g' x) x
hfa : Tendsto f (𝓝[Ioi a] a) (𝓝 lfa)
hga : Tendsto g (𝓝[Ioi a] a) (𝓝 lga)
hfb : Tendsto f (𝓝[Iio b] b) (𝓝 lfb)
hgb : Tendsto g (𝓝[Iio b] b) (𝓝 lgb)
h : ℝ → ℝ := fun x => (lgb - lga) * f x - (lfb - lfa) * g x
hha : Tendsto h (𝓝[Ioi a] a) (𝓝 (lgb * lfa - lfb * lga))
hhb : Tendsto h (𝓝[Iio b] b) (𝓝 (lgb * lfa - lfb * lga))
h' : ℝ → ℝ := fun x => (lgb - lga) * f' x - (lfb - lfa) * g' x
x : ℝ
hx : x ∈ Ioo a b
⊢ HasDerivAt h (h' x) x State After: no goals Tactic: exact ((hff' x hx).const_mul _).sub ((hgg' x hx).const_mul _) |
import Smt
theorem resolution (p q r : Bool) : p || q → !p || r → q || r := by
smt
intro hpq
intro hnpr
cases p <;> cases r <;> simp_all
|
[GOAL]
ι : Type u_1
α : ι → Type u
U : (i : ι) → UniformSpace (α i)
β : Type u_2
inst✝ : UniformSpace β
f : β → (i : ι) → α i
⊢ UniformContinuous f ↔ ∀ (i : ι), UniformContinuous fun x => f x i
[PROOFSTEP]
simp only [UniformContinuous, Pi.uniformity, tendsto_iInf, tendsto_comap_iff, Function.comp]
[GOAL]
ι : Type u_1
α : ι → Type u
U : (i : ι) → UniformSpace (α i)
inst✝ : ∀ (i : ι), CompleteSpace (α i)
⊢ ∀ {f : Filter ((i : ι) → α i)}, Cauchy f → ∃ x, f ≤ 𝓝 x
[PROOFSTEP]
intro f hf
[GOAL]
ι : Type u_1
α : ι → Type u
U : (i : ι) → UniformSpace (α i)
inst✝ : ∀ (i : ι), CompleteSpace (α i)
f : Filter ((i : ι) → α i)
hf : Cauchy f
⊢ ∃ x, f ≤ 𝓝 x
[PROOFSTEP]
haveI := hf.1
[GOAL]
ι : Type u_1
α : ι → Type u
U : (i : ι) → UniformSpace (α i)
inst✝ : ∀ (i : ι), CompleteSpace (α i)
f : Filter ((i : ι) → α i)
hf : Cauchy f
this : NeBot f
⊢ ∃ x, f ≤ 𝓝 x
[PROOFSTEP]
have : ∀ i, ∃ x : α i, Filter.map (fun a : ∀ i, α i => a i) f ≤ 𝓝 x :=
by
intro i
have key : Cauchy (map (fun a : ∀ i : ι, α i => a i) f) := hf.map (Pi.uniformContinuous_proj α i)
exact cauchy_iff_exists_le_nhds.1 key
[GOAL]
ι : Type u_1
α : ι → Type u
U : (i : ι) → UniformSpace (α i)
inst✝ : ∀ (i : ι), CompleteSpace (α i)
f : Filter ((i : ι) → α i)
hf : Cauchy f
this : NeBot f
⊢ ∀ (i : ι), ∃ x, map (fun a => a i) f ≤ 𝓝 x
[PROOFSTEP]
intro i
[GOAL]
ι : Type u_1
α : ι → Type u
U : (i : ι) → UniformSpace (α i)
inst✝ : ∀ (i : ι), CompleteSpace (α i)
f : Filter ((i : ι) → α i)
hf : Cauchy f
this : NeBot f
i : ι
⊢ ∃ x, map (fun a => a i) f ≤ 𝓝 x
[PROOFSTEP]
have key : Cauchy (map (fun a : ∀ i : ι, α i => a i) f) := hf.map (Pi.uniformContinuous_proj α i)
[GOAL]
ι : Type u_1
α : ι → Type u
U : (i : ι) → UniformSpace (α i)
inst✝ : ∀ (i : ι), CompleteSpace (α i)
f : Filter ((i : ι) → α i)
hf : Cauchy f
this : NeBot f
i : ι
key : Cauchy (map (fun a => a i) f)
⊢ ∃ x, map (fun a => a i) f ≤ 𝓝 x
[PROOFSTEP]
exact cauchy_iff_exists_le_nhds.1 key
[GOAL]
ι : Type u_1
α : ι → Type u
U : (i : ι) → UniformSpace (α i)
inst✝ : ∀ (i : ι), CompleteSpace (α i)
f : Filter ((i : ι) → α i)
hf : Cauchy f
this✝ : NeBot f
this : ∀ (i : ι), ∃ x, map (fun a => a i) f ≤ 𝓝 x
⊢ ∃ x, f ≤ 𝓝 x
[PROOFSTEP]
choose x hx using this
[GOAL]
ι : Type u_1
α : ι → Type u
U : (i : ι) → UniformSpace (α i)
inst✝ : ∀ (i : ι), CompleteSpace (α i)
f : Filter ((i : ι) → α i)
hf : Cauchy f
this : NeBot f
x : (i : ι) → α i
hx : ∀ (i : ι), map (fun a => a i) f ≤ 𝓝 (x i)
⊢ ∃ x, f ≤ 𝓝 x
[PROOFSTEP]
use x
[GOAL]
case h
ι : Type u_1
α : ι → Type u
U : (i : ι) → UniformSpace (α i)
inst✝ : ∀ (i : ι), CompleteSpace (α i)
f : Filter ((i : ι) → α i)
hf : Cauchy f
this : NeBot f
x : (i : ι) → α i
hx : ∀ (i : ι), map (fun a => a i) f ≤ 𝓝 (x i)
⊢ f ≤ 𝓝 x
[PROOFSTEP]
rwa [nhds_pi, le_pi]
[GOAL]
ι : Type u_1
α : ι → Type u
U : (i : ι) → UniformSpace (α i)
inst✝ : ∀ (i : ι), SeparatedSpace (α i)
x y : (i : ι) → α i
H : ∀ (r : Set (((i : ι) → α i) × ((i : ι) → α i))), r ∈ 𝓤 ((i : ι) → α i) → (x, y) ∈ r
⊢ x = y
[PROOFSTEP]
ext i
[GOAL]
case h
ι : Type u_1
α : ι → Type u
U : (i : ι) → UniformSpace (α i)
inst✝ : ∀ (i : ι), SeparatedSpace (α i)
x y : (i : ι) → α i
H : ∀ (r : Set (((i : ι) → α i) × ((i : ι) → α i))), r ∈ 𝓤 ((i : ι) → α i) → (x, y) ∈ r
i : ι
⊢ x i = y i
[PROOFSTEP]
apply eq_of_separated_of_uniformContinuous (Pi.uniformContinuous_proj α i)
[GOAL]
case h
ι : Type u_1
α : ι → Type u
U : (i : ι) → UniformSpace (α i)
inst✝ : ∀ (i : ι), SeparatedSpace (α i)
x y : (i : ι) → α i
H : ∀ (r : Set (((i : ι) → α i) × ((i : ι) → α i))), r ∈ 𝓤 ((i : ι) → α i) → (x, y) ∈ r
i : ι
⊢ (fun i => x i) ≈ fun i => y i
[PROOFSTEP]
apply H
|
lemma collinear: fixes S :: "'a::{perfect_space,real_vector} set" shows "collinear S \<longleftrightarrow> (\<exists>u. u \<noteq> 0 \<and> (\<forall>x \<in> S. \<forall> y \<in> S. \<exists>c. x - y = c *\<^sub>R u))" |
//
// Boost.Process
// ~~~~~~~~~~~~~
//
// Copyright (c) 2006, 2007 Julio M. Merino Vidal
// Copyright (c) 2008 Boris Schaeling
//
// Distributed under the Boost Software License, Version 1.0. (See accompanying
// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
//
#include <boost/process/config.hpp>
#if defined(BOOST_POSIX_API)
# include <boost/process/posix_context.hpp>
namespace bp = ::boost::process;
bp::posix_context *test_it()
{
return new bp::posix_context();
}
#endif
|
# 18 PDEs: Waves – Students
(See *Computational Physics* Ch 21 and *Computational Modeling* Ch 6.5.)
## Background: waves on a string
Assume a 1D string of length $L$ with mass density per unit length $\rho$ along the $x$ direction. It is held under constant tension $T$ (force per unit length). Ignore frictional forces and the tension is so high that we can ignore sagging due to gravity.
### 1D wave equation
The string is displaced in the $y$ direction from its rest position, i.e., the displacement $y(x, t)$ is a function of space $x$ and time $t$.
For small relative displacements $y(x, t)/L \ll 1$ and therefore small slopes $\partial y/\partial x$ we can describe $y(x, t)$ with a *linear* equation of motion:
Newton's second law applied to short elements of the string with length $\Delta x$ and mass $\Delta m = \rho \Delta x$: the left hand side contains the *restoring force* that opposes the displacement, the right hand side is the acceleration of the string element:
\begin{align}
\sum F_{y}(x) &= \Delta m\, a(x, t)\\
T \sin\theta(x+\Delta x) - T \sin\theta(x) &= \rho \Delta x \frac{\partial^2 y(x, t)}{\partial t^2}
\end{align}
The angle $\theta$ measures by how much the string is bent away from the resting configuration.
Because we assume small relative displacements, the angles are small ($\theta \ll 1$) and we can make the small angle approximation
$$
\sin\theta \approx \tan\theta = \frac{\partial y}{\partial x}
$$
and hence
\begin{align}
T \left.\frac{\partial y}{\partial x}\right|_{x+\Delta x} - T \left.\frac{\partial y}{\partial x}\right|_{x} &= \rho \Delta x \frac{\partial^2 y(x, t)}{\partial t^2}\\
\frac{T \left.\frac{\partial y}{\partial x}\right|_{x+\Delta x} - T \left.\frac{\partial y}{\partial x}\right|_{x}}{\Delta x} &= \rho \frac{\partial^2 y}{\partial t^2}
\end{align}
or in the limit $\Delta x \rightarrow 0$ a linear hyperbolic PDE results:
\begin{gather}
\frac{\partial^2 y(x, t)}{\partial x^2} = \frac{1}{c^2} \frac{\partial^2 y(x, t)}{\partial t^2}, \quad c = \sqrt{\frac{T}{\rho}}
\end{gather}
where $c$ has the dimension of a velocity. This is the (linear) **wave equation**.
### General solution: waves
General solutions are propagating waves:
If $f(x)$ is a solution at $t=0$ then
$$
y_{\mp}(x, t) = f(x \mp ct)
$$
are also solutions at later $t > 0$.
Because of linearity, any linear combination is also a solution, so the most general solution contains both right and left propagating waves
$$
y(x, t) = A f(x - ct) + B g(x + ct)
$$
(If $f$ and/or $g$ are present depends on the initial conditions.)
In three dimensions the wave equation is
$$
\boldsymbol{\nabla}^2 y(\mathbf{x}, t) - \frac{1}{c^2} \frac{\partial^2 y(\mathbf{x}, t)}{\partial t^2} = 0\
$$
### Boundary and initial conditions
* The boundary conditions could be that the ends are fixed
$$y(0, t) = y(L, t) = 0$$
* The *initial condition* is a shape for the string, e.g., a Gaussian at the center
$$
y(x, t=0) = g(x) = y_0 \frac{1}{\sqrt{2\pi\sigma}} \exp\left[-\frac{(x - x_0)^2}{2\sigma^2}\right]
$$
at time 0.
* Because the wave equation is *second order in time* we need a second initial condition, for instance, the string is released from rest:
$$
\frac{\partial y(x, t=0)}{\partial t} = 0
$$
(The derivative, i.e., the initial displacement velocity is provided.)
### Analytical solution
Solve (as always) with *separation of variables*.
$$
y(x, t) = X(x) T(t)
$$
and this yields the general solution (with boundary conditions of fixed string ends and initial condition of zero velocity) as a superposition of normal modes
$$
y(x, t) = \sum_{n=0}^{+\infty} B_n \sin k_n x\, \cos\omega_n t,
\quad \omega_n = ck_n,\ k_n = n \frac{2\pi}{L} = n k_0.
$$
(The angular frequency $\omega$ and the wave vector $k$ are determined from the boundary conditions.)
The coefficients $B_n$ are obtained from the initial shape:
$$
y(x, t=0) = \sum_{n=0}^{+\infty} B_n \sin n k_0 x = g(x)
$$
In principle one can use the fact that $\int_0^L dx \sin m k_0 x \, \sin n k_0 x = \pi \delta_{mn}$ (orthogonality) to calculate the coefficients:
\begin{align}
\int_0^L dx \sin m k_0 x \sum_{n=0}^{+\infty} B_n \sin n k_0 x &= \int_0^L dx \sin(m k_0 x) \, g(x)\\
\pi \sum_{n=0}^{+\infty} B_n \delta_{mn} &= \dots \\
B_m &= \pi^{-1} \dots
\end{align}
(but the analytical solution is ugly and I cannot be bothered to put it down here.)
## Numerical solution
1. discretize wave equation
2. time stepping: leap frog algorithm (iterate)
Use the central difference approximation for the second order derivatives:
\begin{align}
\frac{\partial^2 y}{\partial t^2} &\approx \frac{y(x, t+\Delta t) + y(x, t-\Delta t) - 2y(x, t)}{\Delta t ^2} = \frac{y_{i, j+1} + y_{i, j-1} - 2y_{i,j}}{\Delta t^2}\\
\frac{\partial^2 y}{\partial x^2} &\approx \frac{y(x+\Delta x, t) + y(x-\Delta x, t) - 2y(x, t)}{\Delta x ^2} = \frac{y_{i+1, j} + y_{i-1, j} - 2y_{i,j}}{\Delta x^2}
\end{align}
and substitute into the wave equation to yield the *discretized* wave equation:
$$
\frac{y_{i+1, j} + y_{i-1, j} - 2y_{i,j}}{\Delta x^2} = \frac{1}{c^2} \frac{y_{i, j+1} + y_{i, j-1} - 2y_{i,j}}{\Delta t^2}
$$
#### Student activity: derive the finite difference version of the 1D wave equation
Re-arrange so that the future terms $j+1$ can be calculated from the present $j$ and past $j-1$ terms:
$$
? = ?
$$
Use $\beta := \frac{c}{\Delta x/\Delta t}$ to write your solution.
This is the time stepping algorithm for the wave equation.
## Numerical implementation
```python
# if you have plotting problems, try
# %matplotlib inline
%matplotlib notebook
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
plt.style.use('ggplot')
```
Implement the time stepping algorithm in the code below. Look for sections `# TODO`.
```python
L = 0.5 # m
Nx = 50
Nt = 100
Dx = L/Nx
# TODO: choose Dt
Dt = # s
rho = 1.5e-2 # kg/m
tension = 150 # N
c = np.sqrt(tension/rho)
# TODO: calculate beta
beta =
beta2 =
print("c = {0} m/s".format(c))
print("Dx = {0} m, Dt = {1} s, Dx/Dt = {2} m/s".format(Dx, Dt, Dx/Dt))
print("beta = {}".format(beta))
X = np.linspace(0, L, Nx+1) # need N+1!
def gaussian(x, y0=0.05, x0=L/2, sigma=0.1*L):
return y0/np.sqrt(2*np.pi*sigma) * np.exp(-(x-x0)**2/(2*sigma**2))
# displacements at j-1, j, j+1
y0 = np.zeros_like(X)
y1 = np.zeros_like(y0)
y2 = np.zeros_like(y0)
# save array
y_t = np.zeros((Nt+1, Nx+1))
# boundary conditions
# TODO: set boundary conditions
y2[:] = y0
# initial conditions: velocity 0, i.e. no difference between y0 and y1
y0[1:-1] = y1[1:-1] = gaussian(X)[1:-1]
# save initial
t_index = 0
y_t[t_index, :] = y0
t_index += 1
y_t[t_index, :] = y1
for jt in range(2, Nt):
# TODO: time stepping algorithm
t_index += 1
y_t[t_index, :] = y2
print("Iteration {0:5d}".format(jt), end="\r")
else:
print("Completed {0:5d} iterations: t={1} s".format(jt, jt*Dt))
```
### 1D plot
Plot the output in the save array `y_t`. Vary the time steps that you look at with `y_t[start:end]`.
We indicate time by color changing.
```python
ax = plt.subplot(111)
ax.set_prop_cycle("color", [plt.cm.viridis_r(i) for i in np.linspace(0, 1, len(y_t))])
ax.plot(X, y_t.T);
```
### 1D Animation
For 1D animation to work in a Jupyter notebook, use
```python
%matplotlib notebook
```
If no animations are visible, restart kernel and execute the `%matplotlib notebook` cell as the very first one in the notebook.
We use `matplotlib.animation` to look at movies of our solution:
```python
import matplotlib.animation as animation
```
The `update_wave()` function simply re-draws our image for every `frame`.
```python
y_limits = 1.05*y_t.min(), 1.05*y_t.max()
fig1 = plt.figure(figsize=(5,5))
ax = fig1.add_subplot(111)
ax.set_aspect(1)
def update_wave(frame, data):
global ax, Dt, y_limits
ax.clear()
ax.set_xlabel("x (m)")
ax.set_ylabel("y (m)")
ax.plot(X, data[frame])
ax.set_ylim(y_limits)
ax.text(0.1, 0.9, "t = {0:3.1f} ms".format(frame*Dt*1e3), transform=ax.transAxes)
wave_anim = animation.FuncAnimation(fig1, update_wave, frames=len(y_t), fargs=(y_t,),
interval=30, blit=True, repeat_delay=100)
```
### 3D plot
(Uses functions from previous lessons.)
```python
def plot_y(y_t, Dt, Dx, step=1):
T, X = np.meshgrid(range(y_t.shape[0]), range(y_t.shape[1]))
Y = y_t.T[X, T] # intepret index 0 as "t" and index 1 as "x", but plot x along axis 1 and t along axis 2
fig = plt.figure()
ax = fig.add_subplot(111, projection="3d")
ax.plot_wireframe(X*Dx, T*Dt*step, Y)
ax.set_ylabel(r"time $t$ (s)")
ax.set_xlabel(r"position $x$ (m)")
ax.set_zlabel(r"displacement $y$ (m)")
fig.tight_layout()
return ax
def plot_surf(y_t, Dt, Dx, step=1, filename=None, offset=-1, zlabel=r'displacement',
elevation=40, azimuth=-20, cmap=plt.cm.coolwarm):
"""Plot y_t as a 3D plot with contour plot underneath.
Arguments
---------
y_t : 2D array
displacement y(t, x)
filename : string or None, optional (default: None)
If `None` then show the figure and return the axes object.
If a string is given (like "contour.png") it will only plot
to the filename and close the figure but return the filename.
offset : float, optional (default: 20)
position the 2D contour plot by offset along the Z direction
under the minimum Z value
zlabel : string, optional
label for the Z axis and color scale bar
elevation : float, optional
choose elevation for initial viewpoint
azimuth : float, optional
chooze azimuth angle for initial viewpoint
"""
t = np.arange(y_t.shape[0], dtype=int)
x = np.arange(y_t.shape[1], dtype=int)
T, X = np.meshgrid(t, x)
Y = y_t.T[X, T]
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
surf = ax.plot_surface(X*Dx, T*Dt*step, Y, cmap=cmap, rstride=1, cstride=1, alpha=1)
cset = ax.contourf(X*Dx, T*Dt*step, Y, 20, zdir='z', offset=offset+Y.min(), cmap=cmap)
ax.set_xlabel('x')
ax.set_ylabel('t')
ax.set_zlabel(zlabel)
ax.set_zlim(offset + Y.min(), Y.max())
ax.view_init(elev=elevation, azim=azimuth)
cb = fig.colorbar(surf, shrink=0.5, aspect=5)
cb.set_label(zlabel)
if filename:
fig.savefig(filename)
plt.close(fig)
return filename
else:
return ax
```
```python
plot_y(y_t, Dt, Dx, step)
```
```python
plot_surf(y_t, Dt, Dx, step, offset=0, cmap=plt.cm.coolwarm)
```
## von Neumann stability analysis: Courant condition
Assume that the solutions of the discretized equation can be written as normal modes
$$
y_{m,j} = \xi(k)^j e^{ikm\Delta x}, \quad t=j\Delta t,\ x=m\Delta x
$$
The time stepping algorith is stable if
$$
|\xi(k)| < 1
$$
Insert normal modes into the discretized equation
$$
y_{i,j+1} = 2(1 - \beta^2)y_{i,j} - y_{i, j-1} + \beta^2 (y_{i+1,j} + y_{i-1,j}), \quad
\beta := \frac{c}{\Delta x/\Delta t}
$$
and simplify (use $1-\cos x = 2\sin^2\frac{x}{2}$):
$$
\xi^2 - 2(1-2\beta^2 s^2)\xi + 1 = 0, \quad s=\sin(k\Delta x/2)
$$
The characteristic equation has roots
$$
\xi_{\pm} = 1 - 2\beta^2 s^2 \pm \sqrt{(1-2\beta^2 s^2)^2 - 1}.
$$
It has one root for
$$
\left|1-2\beta^2 s^2\right| = 1,
$$
i.e., for
$$
\beta s = 1
$$
We have two real roots for
$$
\left|1-2\beta^2 s^2\right| < 1 \\
\beta s > 1
$$
but one of the roots is always $|\xi| > 1$ and hence these solutions will diverge and not be stable.
For
$$
\left|1-2\beta^2 s^2\right| ≥ 1 \\
\beta s ≤ 1
$$
the roots will be *complex conjugates of each other*
$$
\xi_\pm = 1 - 2\beta^2s^2 \pm i\sqrt{1-(1-2\beta^2s^2)^2}
$$
and the *magnitude*
$$
|\xi_{\pm}|^2 = (1 - 2\beta^2s^2)^2 - (1-(1-2\beta^2s^2)^2) = 1
$$
is unity: Thus the solutions will not grow and will be *stable* for
$$
\beta s ≤ 1\\
\frac{c}{\frac{\Delta x}{\Delta t}} \sin\frac{k \Delta x}{2} ≤ 1
$$
Assuming the "worst case" for the $\sin$ factor (namely, 1), the **condition for stability** is
$$
c ≤ \frac{\Delta x}{\Delta t}
$$
or
$$
\beta ≤ 1.
$$
This is also known as the **Courant condition**. When written as
$$
\Delta t ≤ \frac{\Delta x}{c}
$$
it means that the time step $\Delta t$ (for a given $\Delta x$) must be *smaller than the time that the wave takes to travel one grid step*.
```python
```
|
\documentclass[a4paper, 12pt]{article}
\usepackage{amsmath}
\usepackage{amsthm}
\usepackage{amssymb}
\usepackage{longtable}
\usepackage{pdflscape}
\usepackage{algorithm}
\usepackage{graphicx}
\usepackage{float}
\usepackage[noend]{algpseudocode}
\usepackage{url}
\usepackage{tikz}
\usetikzlibrary{arrows}
\usepackage{float}
\newlength\tindent
\setlength{\tindent}{\parindent}
\setlength{\parindent}{0pt}
\renewcommand{\indent}{\hspace*{\tindent}}
\newtheorem{thm}{Theorem}
\newtheorem{cor}{Corollary}[thm]
\newtheorem{lemma}{Lemma}[thm]
\title{COMP 304 Assignment 4}
\author{Daniel Braithwaite}
\begin{document}
\pagenumbering{gobble}
\maketitle
\newpage
\pagenumbering{arabic}
\section{Example Query's}
\begin{enumerate}
\item \textbf{answer([i, feel, bad], R).} will unify R to be [what, makes, you, feel, bad, qm].
\item The previous query also works in reverse \textbf{answer(Q, [what, makes, you, feel, bad, qm]} will unify Q to be [i, feel, bad].
\end{enumerate}
Similarly works the same for all other types of query's that the program can receive
\section{Working Backwards}
In my solution given a reply string it is able to find a question string. A few tweaks had to be made for this to work.\\
Firstly the match predicate is used to take a question string to a reply string. Originally it just matched the first words of the query string. The problem here was that when working backwards pro-log dident know which definition of match to use, it would pick the first option. Here it would get stuck as part of creating the reply involves appending two lists together and, it would be unable to find the correct original list because the words dont line up. To solve this the match predicate will match on the first words of question and reply.\\
There is still an issue with working backwards, it will successfully find a solution but then if you ask for another it will enter an infinite loop and eventually run out of memory. It seems to create an infinitely big list while trying to reverse the append predicate, im not sure why this happens
\end{document} |
# This file was generated, do not modify it. # hide
using MLJ
using PrettyPrinting
MLJ.color_off() # hide
X, y = @load_iris
@load DecisionTreeClassifier |
using Revise
using ADCME
using ADSeismic
using PyPlot
using DelimitedFiles
matplotlib.use("agg")
param = AcousticPropagatorParams(NX=150, NY=150, NSTEP=1000, DELTAT=1e-4, DELTAX=1.0, DELTAY=1.0,
vp_ref = 3000.0, Rcoef=0.001,
USE_PML_XMIN=true, USE_PML_XMAX=true, USE_PML_YMIN=true, USE_PML_YMAX=true)
rc = Ricker(param, 15.0, 100.0, 1e10)
srci = [param.NX ÷ 2]
srcj = [param.NY ÷ 2]
srcv = reshape(rc, :, 1)
src = AcousticSource(srci, srcj, srcv)
c = 3000.0*ones(param.NX+2, param.NY+2)
model = AcousticPropagatorSolver(param, src, c)
sess = Session(); init(sess)
u = run(sess, model.u)
p = visualize_wavefield(u, param)
saveanim(p, "acoustic-wavefield.gif")
|
-----------------------------------------------------------------------------
-- |
-- Module : Berp.Base.StdTypes.Complex
-- Copyright : (c) 2010 Bernie Pope
-- License : BSD-style
-- Maintainer : [email protected]
-- Stability : experimental
-- Portability : ghc
--
-- The standard floating point type.
--
-----------------------------------------------------------------------------
module Berp.Base.StdTypes.Complex (complex, complexClass) where
import Data.Complex (Complex (..), realPart, imagPart)
import Berp.Base.Monad (constantIO)
import Berp.Base.Prims (primitive, raise)
import Berp.Base.SemanticTypes (Object (..), Eval)
import Berp.Base.Identity (newIdentity)
import Berp.Base.Attributes (mkAttributesList)
import Berp.Base.StdNames
import Berp.Base.Builtins (notImplementedError)
import Berp.Base.Operators
( addComplexComplexComplex
, addComplexIntComplex
, addComplexFloatComplex
, subComplexComplexComplex
, subComplexIntComplex
, subComplexFloatComplex
, mulComplexComplexComplex
, mulComplexIntComplex
, mulComplexFloatComplex
, divComplexComplexComplex
, divComplexIntComplex
, divComplexFloatComplex
, eqComplexComplexBool
, eqComplexIntBool
, eqComplexFloatBool )
import {-# SOURCE #-} Berp.Base.StdTypes.Type (newType)
import Berp.Base.StdTypes.ObjectBase (objectBase)
import Berp.Base.StdTypes.String (string)
{-# NOINLINE complex #-}
complex :: Complex Double -> Object
complex c = constantIO $ do
identity <- newIdentity
return $ Complex { object_identity = identity, object_complex = c }
{-# NOINLINE complexClass #-}
complexClass :: Object
complexClass = constantIO $ do
dict <- attributes
newType [string "complex", objectBase, dict]
attributes :: IO Object
attributes = mkAttributesList
[ (specialAddName, add)
, (specialSubName, sub)
, (specialMulName, mul)
, (specialDivName, divide)
, (specialEqName, eq)
, (specialStrName, str)
]
mkOp :: (Object -> Object -> Eval Object) ->
(Object -> Object -> Eval Object) ->
(Object -> Object -> Eval Object) ->
Object
mkOp opComplex opFloat opInt = primitive 2 fun
where
fun (x:y:_) =
case y of
Complex {} -> opComplex x y
Float {} -> opFloat x y
Integer {} -> opInt x y
_other -> raise notImplementedError
fun _other = error "operator on Complex applied to wrong number of arguments"
add :: Object
add = mkOp addComplexComplexComplex addComplexFloatComplex addComplexIntComplex
sub :: Object
sub = mkOp subComplexComplexComplex subComplexFloatComplex subComplexIntComplex
mul :: Object
mul = mkOp mulComplexComplexComplex mulComplexFloatComplex mulComplexIntComplex
divide :: Object
divide = mkOp divComplexComplexComplex divComplexFloatComplex divComplexIntComplex
eq :: Object
eq = mkOp eqComplexComplexBool eqComplexFloatBool eqComplexIntBool
str :: Object
str = primitive 1 fun
where
fun (x:_) = return $ string $ showComplex x
fun _other = error "str method on Complex applied to wrong number of arguments"
showComplex :: Object -> String
showComplex obj
| r == 0 = if i < 0 then "-" ++ showImg else showImg
| i < 0 = "(" ++ showR ++ "-" ++ showImg ++ ")"
| otherwise = "(" ++ showR ++ "+" ++ showImg ++ ")"
where
showImg = showI ++ "j"
showI = showNum $ abs i
showR = showNum r
c = object_complex obj
i = imagPart c
r = realPart c
showNum :: Double -> String
showNum n
| fracPart == 0 = show intPart
| otherwise = show n
where
(intPart, fracPart) = properFraction n :: (Integer, Double)
|
subsection \<open>Stuttering\<close>
theory Stuttering
imports
Semantics.IRStepThms
begin
inductive stutter:: "IRGraph \<Rightarrow> MapState \<Rightarrow> Params \<Rightarrow> FieldRefHeap \<Rightarrow> ID \<Rightarrow> ID \<Rightarrow> bool" ("_ _ _ _ \<turnstile> _ \<leadsto> _" 55)
for g m p h where
StutterStep:
"\<lbrakk>g, p \<turnstile> (nid,m,h) \<rightarrow> (nid',m,h)\<rbrakk>
\<Longrightarrow> g m p h \<turnstile> nid \<leadsto> nid'" |
Transitive:
"\<lbrakk>g, p \<turnstile> (nid,m,h) \<rightarrow> (nid'',m,h);
g m p h \<turnstile> nid'' \<leadsto> nid'\<rbrakk>
\<Longrightarrow> g m p h \<turnstile> nid \<leadsto> nid'"
lemma stuttering_successor:
assumes "(g, p \<turnstile> (nid, m, h) \<rightarrow> (nid', m, h))"
shows "{P'. (g m p h \<turnstile> nid \<leadsto> P')} = {nid'} \<union> {nid''. (g m p h \<turnstile> nid' \<leadsto> nid'')}"
proof -
have nextin: "nid' \<in> {P'. (g m p h \<turnstile> nid \<leadsto> P')}"
using assms StutterStep by blast
have nextsubset: "{nid''. (g m p h \<turnstile> nid' \<leadsto> nid'')} \<subseteq> {P'. (g m p h \<turnstile> nid \<leadsto> P')}"
by (metis Collect_mono assms stutter.Transitive)
have "\<forall>n \<in> {P'. (g m p h \<turnstile> nid \<leadsto> P')} . n = nid' \<or> n \<in> {nid''. (g m p h \<turnstile> nid' \<leadsto> nid'')}"
using stepDet
by (metis (no_types, lifting) Pair_inject assms mem_Collect_eq stutter.simps)
then show ?thesis
using insert_absorb mk_disjoint_insert nextin nextsubset by auto
qed
end |
#[global]
Set Primitive Projections.
#[global]
Unset Printing Primitive Projection Parameters.
#[global]
Set Universe Polymorphism.
#[global]
Set Default Goal Selector "!".
#[global]
Unset Universe Minimization ToSet.
Require Import Coq.Unicode.Utf8.
Require Import Coq.Setoids.Setoid.
Require Import Coq.Classes.SetoidClass.
Require Import Coq.Strings.String.
(* Prototype an approach based around "structured cospans" *)
#[universes(cumulative)]
Class Term := {
T: Type ;
T_Setoid: Setoid T ;
mt: T ;
trv: T ;
sum: T → T → T ;
prod: T → T → T ;
exp: T → T → T ;
sum_Proper: Proper (equiv ==> equiv ==> equiv) sum ;
prod_Proper: Proper (equiv ==> equiv ==> equiv) prod ;
exp_Proper: Proper (equiv ==> equiv ==> equiv) exp ;
}.
Coercion T: Term >-> Sortclass.
Existing Instance T_Setoid.
Existing Instance sum_Proper.
Existing Instance prod_Proper.
Existing Instance exp_Proper.
Notation "∅" := mt.
Notation "·" := trv.
Infix "+" := sum.
Infix "*" := prod.
Infix "^" := exp.
#[universes(cumulative)]
Class Homomorphism {A B: Term} (f: A → B): Prop := {
map_Proper: Proper (equiv ==> equiv) f ;
map_mt: f ∅ == ∅ ;
map_trv: f · == · ;
map_sum x y: f (x + y) == f x + f y ;
map_prod x y: f (x * y) == f x * f y ;
map_exp x y: f (exp x y) == exp (f x) (f y) ;
}.
Existing Instance map_Proper.
Module Import Hom.
Definition Hom (A B: Term) := { f: A → B | Homomorphism f}.
#[program]
Definition id A : Hom A A := λ x, x.
Next Obligation.
Proof.
Admitted.
End Hom.
Module Terminal.
#[refine]
Instance terminal_Setoid: Setoid unit := {
equiv _ _ := True ;
}.
Proof.
exists.
all: exists.
Defined.
#[refine]
Instance Terminal: Term := {
T := unit ;
mt := tt ;
trv := tt ;
sum _ _ := tt ;
prod _ _ := tt ;
exp _ _ := tt ;
}.
Proof.
all: intros ? ? ? ? ? ?.
all: reflexivity.
Defined.
End Terminal.
Module Free.
#[universes(cumulative)]
Inductive free {U: Type} :=
| η (u: U)
| mt | trv
| sum (A B: free)
| prod (A B: free)
| exp (B A: free)
.
Arguments free: clear implicits.
Instance free_Setoid U: Setoid (free U) := {
equiv := eq ;
}.
Instance Free U: Term := {
T := free U ;
mt := mt ;
trv := trv ;
sum := sum ;
prod := prod ;
exp := exp ;
}.
#[program]
Definition ε (A: Term): Hom (Free A) A :=
fix loop e :=
match e with
| η T => T
| mt => ∅
| trv => ·
| sum A B => loop A + loop B
| prod A B => loop A * loop B
| exp A B => loop A ^ loop B
end.
Next Obligation.
Proof.
exists.
all: cbn.
all: try reflexivity.
intros ? ? p.
rewrite p.
reflexivity.
Qed.
#[program]
Definition map {A B} (f: A → B): Hom (Free A) (Free B) :=
fix loop x :=
match x with
| η u => η (f u)
| mt => mt
| trv => trv
| sum A B => sum (loop A) (loop B)
| prod A B => prod (loop A) (loop B)
| exp A B => exp (loop A) (loop B)
end.
Next Obligation.
Proof.
exists.
all: cbn.
all: try reflexivity.
intros x y p.
induction x.
all: inversion p.
all: subst.
all: cbn.
all: try reflexivity.
Qed.
#[program]
Definition join (A: Type): Hom (Free (Free A)) (Free A) :=
fix loop e :=
match e with
| η T => T
| mt => ∅
| trv => ·
| sum A B => loop A + loop B
| prod A B => loop A * loop B
| exp A B => loop A ^ loop B
end.
Next Obligation.
Proof.
exists.
all: cbn.
all: try reflexivity.
intros ? ? p.
rewrite p.
reflexivity.
Qed.
#[program]
Definition bind {A B} (f: A → Free B): Hom (Free A) (Free B) := λ x, proj1_sig (join _) (proj1_sig (map f) x).
Next Obligation.
Proof.
Admitted.
End Free.
Module Pullback.
Definition pullback {A B C} (f: Hom A C) (g: Hom B C) :=
{ '(x, y) | proj1_sig f x == proj1_sig g y }.
#[program]
Instance pullback_Setoid {A B C} (f: Hom A C) (g: Hom B C): Setoid (pullback f g) := {
equiv x y := fst (proj1_sig x) == fst (proj1_sig y) ∧ snd (proj1_sig x) == snd (proj1_sig y) ;
}.
Next Obligation.
Proof.
exists.
- intros ?.
split.
all: reflexivity.
- intros ? ? p.
destruct p.
split.
all: symmetry.
all: auto.
- intros ? ? ? p q.
destruct p as [p1 p2], q as [q1 q2].
rewrite p1,p2,q1,q2.
split.
all: reflexivity.
Defined.
#[program]
Instance Pullback {A B C} (f: Hom A C) (g: Hom B C): Term := {
T := pullback f g ;
mt := (∅, ∅) ;
trv := (·, ·) ;
sum A B := (fst A + fst B, snd A + snd B) ;
prod A B := (fst A * fst B, snd A * snd B) ;
exp A B := (fst A ^ fst B, snd A ^ snd B) ;
}.
Next Obligation.
Proof.
destruct f, g.
cbn.
repeat rewrite map_mt.
reflexivity.
Qed.
Next Obligation.
Proof.
destruct f, g.
cbn.
repeat rewrite map_trv.
reflexivity.
Qed.
Next Obligation.
Proof.
destruct f as [f fp], g as [g p],
A as [[A1 A2] Ap], B as [[B1 B2] Bp].
cbn in *.
repeat rewrite map_sum.
rewrite Ap, Bp.
reflexivity.
Qed.
Next Obligation.
Proof.
destruct f as [f fp], g as [g p],
A as [[A1 A2] Ap], B as [[B1 B2] Bp].
cbn in *.
repeat rewrite map_prod.
rewrite Ap, Bp.
reflexivity.
Qed.
Next Obligation.
Proof.
destruct f as [f fp], g as [g p],
A as [[A1 A2] Ap], B as [[B1 B2] Bp].
cbn in *.
repeat rewrite map_exp.
rewrite Ap, Bp.
reflexivity.
Qed.
Next Obligation.
Proof.
intros x x' p y y' q.
cbn in *.
destruct x as [[x1 x2] xp],
x' as [[x1' x2'] xp'],
y as [[y1 y2] yp],
y' as [[y1' y2'] yp'].
cbn in *.
destruct p as [p1 p2], q as [q1 q2].
cbn in *.
rewrite p1, p2, q1, q2.
split.
all: reflexivity.
Qed.
Next Obligation.
Proof.
intros x x' p y y' q.
cbn in *.
destruct x as [[x1 x2] xp],
x' as [[x1' x2'] xp'],
y as [[y1 y2] yp],
y' as [[y1' y2'] yp'].
cbn in *.
destruct p as [p1 p2], q as [q1 q2].
cbn in *.
rewrite p1, p2, q1, q2.
split.
all: reflexivity.
Qed.
Next Obligation.
Proof.
intros x x' p y y' q.
cbn in *.
destruct x as [[x1 x2] xp],
x' as [[x1' x2'] xp'],
y as [[y1 y2] yp],
y' as [[y1' y2'] yp'].
cbn in *.
destruct p as [p1 p2], q as [q1 q2].
cbn in *.
rewrite p1, p2, q1, q2.
split.
all: reflexivity.
Qed.
End Pullback.
Module Slice.
(* Term/S *)
#[universes(cumulative)]
Record bundle A := {
s: Term ;
π: Hom s A ;
}.
Arguments s {A}.
Arguments π {A}.
Definition slice {S} (A B: bundle S) := { f : s A → s B | Homomorphism f ∧ ∀ x, proj1_sig (π B) (f x) == proj1_sig (π A) x }.
#[program]
Definition id {S} (A: bundle S): slice A A := λ x, x.
Next Obligation.
Proof.
split.
- exists.
all: try reflexivity.
intros ? ? ?.
auto.
- reflexivity.
Qed.
#[program]
Definition compose {S} {A B C: bundle S} (f: slice B C) (g: slice A B): slice A C :=
λ x, f (g x).
Next Obligation.
Proof.
destruct f as [f [fH fp]], g as [g [gH gp]].
cbn in *.
split.
- exists.
+ intros ? ? p.
rewrite p.
reflexivity.
+ repeat rewrite map_mt.
reflexivity.
+ repeat rewrite map_trv.
reflexivity.
+ intros.
repeat rewrite map_sum.
reflexivity.
+ intros.
repeat rewrite map_prod.
reflexivity.
+ intros.
repeat rewrite map_exp.
reflexivity.
- intros.
rewrite (fp _).
rewrite (gp _).
reflexivity.
Qed.
Import Terminal.
#[program]
Definition No (S: Term): bundle Terminal := {| π (_: S) := tt |}.
Next Obligation.
Proof.
exists.
all: try reflexivity.
intros ? ? p.
reflexivity.
Qed.
#[program]
Definition map {A B} (f: Hom A B): slice (No A) (No B) := f.
Next Obligation.
Proof.
destruct f as [f fh].
cbn.
split.
- exists.
+ intros ? ? p.
rewrite p.
reflexivity.
+ rewrite map_mt.
reflexivity.
+ rewrite map_trv.
reflexivity.
+ intros.
rewrite map_sum.
reflexivity.
+ intros.
rewrite map_prod.
reflexivity.
+ intros.
rewrite map_exp.
reflexivity.
- intros.
exists.
Qed.
Infix "∘" := compose (at level 30).
#[program]
Definition basechange {A B: Term} (f: Hom A B) (x: bundle B): bundle A :=
{| s := Pullback.Pullback f (π x) ;
π x := fst x |}.
Next Obligation.
Proof.
exists.
all: cbn.
all: try reflexivity.
intros ? ? p.
destruct p as [p q].
rewrite p.
reflexivity.
Qed.
#[program]
Definition Σ {A B: Term} (f: Hom A B) (g: bundle A): bundle B :=
{| π x := f (π g x) |}.
Next Obligation.
Proof.
destruct f as [f fp], g as [s [g gp]].
cbn.
exists.
all: cbn.
- intros ? ? p.
rewrite p.
reflexivity.
- repeat rewrite map_mt.
reflexivity.
- repeat rewrite map_trv.
reflexivity.
- intros.
repeat rewrite map_sum.
reflexivity.
- intros.
repeat rewrite map_prod.
reflexivity.
- intros.
repeat rewrite map_exp.
reflexivity.
Qed.
#[program]
Definition Π {A B: Term} (f: Hom A B) (g: bundle A): bundle B :=
{| s := Pullback.Pullback (π g) (Hom.id _) ;
π x := f (snd (proj1_sig x)) ; |}.
Next Obligation.
Proof.
destruct f as [f fp].
destruct g as [s [g gp]].
cbn.
exists.
all: cbn.
- intros ? ? p.
destruct p as [p q].
rewrite q.
reflexivity.
- rewrite map_mt.
reflexivity.
- rewrite map_trv.
reflexivity.
- intros.
rewrite map_sum.
reflexivity.
- intros.
rewrite map_prod.
reflexivity.
- intros.
rewrite map_exp.
reflexivity.
Qed.
(* Reader monad IIRC *)
#[program]
Definition pure {S T} {A: bundle S} (f: Hom S T): slice A (basechange f (Π f A)) :=
λ x, (proj1_sig (π A) x, (x, proj1_sig (π A) x)).
Next Obligation.
Proof.
reflexivity.
Qed.
Next Obligation.
Proof.
reflexivity.
Qed.
Next Obligation.
Proof.
destruct A as [sA [A Ap]],
f as [f fp].
cbn in *.
split.
- exists.
all: cbn.
+ intros ? ? p.
cbn.
repeat rewrite p.
all: repeat split.
all: reflexivity.
+ repeat rewrite map_mt.
all: repeat split.
all: reflexivity.
+ repeat rewrite map_trv.
all: repeat split.
all: reflexivity.
+ intros.
repeat rewrite map_sum.
all: repeat split.
all: reflexivity.
+ intros.
repeat rewrite map_prod.
all: repeat split.
all: reflexivity.
+ intros.
repeat rewrite map_exp.
all: repeat split.
all: reflexivity.
- intros.
reflexivity.
Qed.
End Slice.
Module StructuredSlice.
Import Slice.
Import Free.
Definition Obj S := bundle (Free S).
Definition Struct {S}: Obj S → Obj S → Type := @slice (Free S).
Definition id {S} (A: Obj S) := Slice.id A.
Definition compose {S} {A B C: Obj S} := @Slice.compose _ A B C.
Infix "∘" := compose (at level 30).
Definition Any S: Obj S := {| π := Hom.id _ |}.
Definition basechange {A B: Type} (f: A → Free B): Obj B → Obj A :=
basechange (Free.bind f).
Definition Σ {A B: Type} (f: A → Free B): Obj A → Obj B :=
Σ (Free.bind f).
Definition Π {A B: Type} (f: A → Free B): Obj A → Obj B :=
Π (Free.bind f).
Definition pure {S T} {A: Obj S} (f: S → Free T): Struct A (basechange f (Π f A)) :=
pure (Free.bind f).
Infix "::" := basechange.
End StructuredSlice.
Module Span.
#[universes(cumulative)]
Record span A B := {
s: Term ;
π1: Hom s A ;
π2: Hom s B ;
}.
Arguments s {A B}.
Arguments π1 {A B}.
Arguments π2 {A B}.
#[program]
Definition id A: span A A := {| π1 x := x ; π2 x := x |}.
Next Obligation.
Proof.
exists.
all: try reflexivity.
intros ? ? ?.
auto.
Qed.
Next Obligation.
Proof.
exists.
all: try reflexivity.
intros ? ? ?.
auto.
Qed.
#[program]
Definition compose {A B C} (f: span B C) (g: span A B): span A C :=
{|
s := Pullback.Pullback (π1 f) (π2 g) ;
π1 x := proj1_sig (π1 g) (snd (proj1_sig x)) ;
π2 x := proj1_sig (π2 f) (fst (proj1_sig x)) ;
|}.
Next Obligation.
Proof.
destruct f as [fs [f1 f1p] [f2 f2p]].
destruct g as [gs [g1 g1p] [g2 g2p]].
cbn.
exists.
- intros ? ? p.
destruct p as [p q].
rewrite q.
reflexivity.
- cbn.
rewrite map_mt.
reflexivity.
- cbn.
rewrite map_trv.
reflexivity.
- intros.
cbn.
rewrite map_sum.
reflexivity.
- intros.
cbn.
rewrite map_prod.
reflexivity.
- intros.
cbn.
rewrite map_exp.
reflexivity.
Qed.
Next Obligation.
Proof.
destruct f as [fs [f1 f1p] [f2 f2p]].
destruct g as [gs [g1 g1p] [g2 g2p]].
cbn.
exists.
- intros ? ? p.
destruct p as [p q].
rewrite p.
reflexivity.
- cbn.
rewrite map_mt.
reflexivity.
- cbn.
rewrite map_trv.
reflexivity.
- intros.
cbn.
rewrite map_sum.
reflexivity.
- intros.
cbn.
rewrite map_prod.
reflexivity.
- intros.
cbn.
rewrite map_exp.
reflexivity.
Qed.
#[program]
Definition transpose {A B} (f: span A B): span B A :=
{| π1 := π2 f ; π2 := π1 f |}.
#[program]
Definition map {A B} (f: Hom A B): span A B := {| π1 x := x ; π2 := f |}.
Next Obligation.
Proof.
exists.
all: try reflexivity.
intros ? ? ?.
auto.
Qed.
Infix "∘" := compose (at level 30).
Notation "f 'ᵀ'" := (transpose f) (at level 1).
End Span.
Module Structured.
Import Span.
Import Free.
Definition Struct A B := span (Free A) (Free B).
Definition id A := Span.id (Free A).
Definition compose {A B C} := @Span.compose (Free A) (Free B) (Free C).
Definition transpose {A B} := @Span.transpose (Free A) (Free B).
Definition map {A B}: Hom (Free A) (Free B) → Struct A B := Span.map.
Infix "∘" := compose (at level 30).
Notation "f 'ᵀ'" := (transpose f) (at level 1).
End Structured.
Import Free.
Import Slice.
Import StructuredSlice.
Open Scope string_scope.
Definition subst x (e: Free string) y: Free string :=
if string_dec x y then e else η y.
Infix "::=" := subst (at level 30).
(* FIXME define generic adjunction first then specialize to substutition *)
Definition eval {A: Term}: Obj A → bundle A := Slice.Π (ε _).
#[program]
Definition close (S: Term) {A B: Obj S} (f: Struct A B): slice (eval A) (eval B) :=
λ x, (proj1_sig f (fst (proj1_sig x)), snd (proj1_sig x)).
Next Obligation.
Proof.
destruct x as [[x y] xp].
destruct f as [f [fH fp]].
cbn in *.
rewrite <- (fp x) in xp.
auto.
Qed.
Next Obligation.
Proof.
destruct f as [f [p q]].
cbn in *.
split.
- exists.
all: cbn.
+ intros ? ? r.
destruct r as [r r'].
cbn.
rewrite r, r'.
split.
all: reflexivity.
+ split.
2: reflexivity.
rewrite map_mt.
reflexivity.
+ split.
2: reflexivity.
rewrite map_trv.
reflexivity.
+ split.
2: reflexivity.
rewrite map_sum.
reflexivity.
+ split.
2: reflexivity.
rewrite map_prod.
reflexivity.
+ split.
2: reflexivity.
rewrite map_exp.
reflexivity.
- intros x.
reflexivity.
Qed.
Instance Type_Setoid: Setoid Type := {
equiv := eq ;
}.
#[program]
Instance Set_Term: Term := {
T := Set ;
mt := Empty_set ;
trv := unit ;
sum := Datatypes.sum ;
prod := Datatypes.prod ;
exp A B := A → B ;
}.
Definition close_Term {A B} := @close Set_Term A B.
Definition Forsome (x: Obj nat): Obj nat := basechange (λ x, η (S x)) (Π (λ x, η (S x)) x).
Definition Forall (x: Obj nat): Obj nat := basechange (λ x, η (S x)) (Π (λ x, η (S x)) x).
(* weird *)
Definition foo: Struct (Any nat) (Forall (Any nat)) := pure (λ x, η (S x)).
|
module ComplexIMPORT where
{-# IMPORT Prelude as P #-}
|
/-
M1F 2017-18 Sheet 1 Question 2 to 4 solutions.
Author : Kevin Buzzard
This file should work with any version of lean -- whether you installed it yourself
or are running the version on https://leanprover.github.io/live/latest/
-/
-- We probably need the "law of the excluded middle" for this question -- every
-- proposition is either true or false! Don't even ask me to explain what the
-- other options are, but Lean does not come with this axiom by default (blame
-- the computer scientists) and mathematicians have to add it themselves.
-- It's easy to add though. "em" for excluded middle.
axiom em (X : Prop) : X ∨ ¬ X
variables P Q R S : Prop -- A "Prop" is a proposition, that is, a true/false statement.
-- Sheet 1 Q2 is true.
theorem m1f_sheet01_q02_is_T (HQP : Q → P) (HnQnR : ¬ Q → ¬ R) : R → P :=
begin
intro HR, -- hypothesis R
cases em Q with HQ HnQ, -- Q is either true or false.
-- Q is true in this branch.
exact HQP HQ, -- HPQ HQ is a proof of P.
-- Q is false in this branch
-- HnQ is the hypothesis "not Q"
-- HnQnR is "not Q implies not R"
-- so HnQnR HnQ is a proof of "not R"
-- i.e. a proof of "R implies false"
-- but HR is a proof of R
-- and that's enough for a contradiction.
have HnR : ¬ R,
exact HnQnR HnQ,
contradiction,
end
|
/-
Copyright (c) 2020 Sébastien Gouëzel. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Anatole Dedecker, Sébastien Gouëzel, Yury G. Kudryashov, Dylan MacKenzie, Patrick Massot
-/
import algebra.order.field.basic
import analysis.asymptotics.asymptotics
import analysis.specific_limits.basic
/-!
# A collection of specific limit computations
This file contains important specific limit computations in (semi-)normed groups/rings/spaces, as
as well as such computations in `ℝ` when the natural proof passes through a fact about normed
spaces.
-/
noncomputable theory
open classical set function filter finset metric asymptotics
open_locale classical topology nat big_operators uniformity nnreal ennreal
variables {α : Type*} {β : Type*} {ι : Type*}
lemma tendsto_norm_at_top_at_top : tendsto (norm : ℝ → ℝ) at_top at_top :=
tendsto_abs_at_top_at_top
lemma summable_of_absolute_convergence_real {f : ℕ → ℝ} :
(∃r, tendsto (λn, (∑ i in range n, |f i|)) at_top (𝓝 r)) → summable f
| ⟨r, hr⟩ :=
begin
refine summable_of_summable_norm ⟨r, (has_sum_iff_tendsto_nat_of_nonneg _ _).2 _⟩,
exact assume i, norm_nonneg _,
simpa only using hr
end
/-! ### Powers -/
lemma tendsto_norm_zero' {𝕜 : Type*} [normed_add_comm_group 𝕜] :
tendsto (norm : 𝕜 → ℝ) (𝓝[≠] 0) (𝓝[>] 0) :=
tendsto_norm_zero.inf $ tendsto_principal_principal.2 $ λ x hx, norm_pos_iff.2 hx
namespace normed_field
lemma tendsto_norm_inverse_nhds_within_0_at_top {𝕜 : Type*} [normed_field 𝕜] :
tendsto (λ x:𝕜, ‖x⁻¹‖) (𝓝[≠] 0) at_top :=
(tendsto_inv_zero_at_top.comp tendsto_norm_zero').congr $ λ x, (norm_inv x).symm
lemma tendsto_norm_zpow_nhds_within_0_at_top {𝕜 : Type*} [normed_field 𝕜] {m : ℤ}
(hm : m < 0) :
tendsto (λ x : 𝕜, ‖x ^ m‖) (𝓝[≠] 0) at_top :=
begin
rcases neg_surjective m with ⟨m, rfl⟩,
rw neg_lt_zero at hm, lift m to ℕ using hm.le, rw int.coe_nat_pos at hm,
simp only [norm_pow, zpow_neg, zpow_coe_nat, ← inv_pow],
exact (tendsto_pow_at_top hm.ne').comp normed_field.tendsto_norm_inverse_nhds_within_0_at_top
end
/-- The (scalar) product of a sequence that tends to zero with a bounded one also tends to zero. -/
lemma tendsto_zero_smul_of_tendsto_zero_of_bounded {ι 𝕜 𝔸 : Type*} [normed_field 𝕜]
[normed_add_comm_group 𝔸] [normed_space 𝕜 𝔸] {l : filter ι} {ε : ι → 𝕜} {f : ι → 𝔸}
(hε : tendsto ε l (𝓝 0)) (hf : filter.is_bounded_under (≤) l (norm ∘ f)) :
tendsto (ε • f) l (𝓝 0) :=
begin
rw ← is_o_one_iff 𝕜 at hε ⊢,
simpa using is_o.smul_is_O hε (hf.is_O_const (one_ne_zero : (1 : 𝕜) ≠ 0))
end
@[simp] lemma continuous_at_zpow {𝕜 : Type*} [nontrivially_normed_field 𝕜] {m : ℤ} {x : 𝕜} :
continuous_at (λ x, x ^ m) x ↔ x ≠ 0 ∨ 0 ≤ m :=
begin
refine ⟨_, continuous_at_zpow₀ _ _⟩,
contrapose!, rintro ⟨rfl, hm⟩ hc,
exact not_tendsto_at_top_of_tendsto_nhds (hc.tendsto.mono_left nhds_within_le_nhds).norm
(tendsto_norm_zpow_nhds_within_0_at_top hm)
end
@[simp] lemma continuous_at_inv {𝕜 : Type*} [nontrivially_normed_field 𝕜] {x : 𝕜} :
continuous_at has_inv.inv x ↔ x ≠ 0 :=
by simpa [(zero_lt_one' ℤ).not_le] using @continuous_at_zpow _ _ (-1) x
end normed_field
lemma is_o_pow_pow_of_lt_left {r₁ r₂ : ℝ} (h₁ : 0 ≤ r₁) (h₂ : r₁ < r₂) :
(λ n : ℕ, r₁ ^ n) =o[at_top] (λ n, r₂ ^ n) :=
have H : 0 < r₂ := h₁.trans_lt h₂,
is_o_of_tendsto (λ n hn, false.elim $ H.ne' $ pow_eq_zero hn) $
(tendsto_pow_at_top_nhds_0_of_lt_1 (div_nonneg h₁ (h₁.trans h₂.le)) ((div_lt_one H).2 h₂)).congr
(λ n, div_pow _ _ _)
lemma is_O_pow_pow_of_le_left {r₁ r₂ : ℝ} (h₁ : 0 ≤ r₁) (h₂ : r₁ ≤ r₂) :
(λ n : ℕ, r₁ ^ n) =O[at_top] (λ n, r₂ ^ n) :=
h₂.eq_or_lt.elim (λ h, h ▸ is_O_refl _ _) (λ h, (is_o_pow_pow_of_lt_left h₁ h).is_O)
lemma is_o_pow_pow_of_abs_lt_left {r₁ r₂ : ℝ} (h : |r₁| < |r₂|) :
(λ n : ℕ, r₁ ^ n) =o[at_top] (λ n, r₂ ^ n) :=
begin
refine (is_o.of_norm_left _).of_norm_right,
exact (is_o_pow_pow_of_lt_left (abs_nonneg r₁) h).congr (pow_abs r₁) (pow_abs r₂)
end
/-- Various statements equivalent to the fact that `f n` grows exponentially slower than `R ^ n`.
* 0: $f n = o(a ^ n)$ for some $-R < a < R$;
* 1: $f n = o(a ^ n)$ for some $0 < a < R$;
* 2: $f n = O(a ^ n)$ for some $-R < a < R$;
* 3: $f n = O(a ^ n)$ for some $0 < a < R$;
* 4: there exist `a < R` and `C` such that one of `C` and `R` is positive and $|f n| ≤ Ca^n$
for all `n`;
* 5: there exists `0 < a < R` and a positive `C` such that $|f n| ≤ Ca^n$ for all `n`;
* 6: there exists `a < R` such that $|f n| ≤ a ^ n$ for sufficiently large `n`;
* 7: there exists `0 < a < R` such that $|f n| ≤ a ^ n$ for sufficiently large `n`.
NB: For backwards compatibility, if you add more items to the list, please append them at the end of
the list. -/
lemma tfae_exists_lt_is_o_pow (f : ℕ → ℝ) (R : ℝ) :
tfae [∃ a ∈ Ioo (-R) R, f =o[at_top] pow a,
∃ a ∈ Ioo 0 R, f =o[at_top] (pow a),
∃ a ∈ Ioo (-R) R, f =O[at_top] pow a,
∃ a ∈ Ioo 0 R, f =O[at_top] pow a,
∃ (a < R) C (h₀ : 0 < C ∨ 0 < R), ∀ n, |f n| ≤ C * a ^ n,
∃ (a ∈ Ioo 0 R) (C > 0), ∀ n, |f n| ≤ C * a ^ n,
∃ a < R, ∀ᶠ n in at_top, |f n| ≤ a ^ n,
∃ a ∈ Ioo 0 R, ∀ᶠ n in at_top, |f n| ≤ a ^ n] :=
begin
have A : Ico 0 R ⊆ Ioo (-R) R,
from λ x hx, ⟨(neg_lt_zero.2 (hx.1.trans_lt hx.2)).trans_le hx.1, hx.2⟩,
have B : Ioo 0 R ⊆ Ioo (-R) R := subset.trans Ioo_subset_Ico_self A,
-- First we prove that 1-4 are equivalent using 2 → 3 → 4, 1 → 3, and 2 → 1
tfae_have : 1 → 3, from λ ⟨a, ha, H⟩, ⟨a, ha, H.is_O⟩,
tfae_have : 2 → 1, from λ ⟨a, ha, H⟩, ⟨a, B ha, H⟩,
tfae_have : 3 → 2,
{ rintro ⟨a, ha, H⟩,
rcases exists_between (abs_lt.2 ha) with ⟨b, hab, hbR⟩,
exact ⟨b, ⟨(abs_nonneg a).trans_lt hab, hbR⟩,
H.trans_is_o (is_o_pow_pow_of_abs_lt_left (hab.trans_le (le_abs_self b)))⟩ },
tfae_have : 2 → 4, from λ ⟨a, ha, H⟩, ⟨a, ha, H.is_O⟩,
tfae_have : 4 → 3, from λ ⟨a, ha, H⟩, ⟨a, B ha, H⟩,
-- Add 5 and 6 using 4 → 6 → 5 → 3
tfae_have : 4 → 6,
{ rintro ⟨a, ha, H⟩,
rcases bound_of_is_O_nat_at_top H with ⟨C, hC₀, hC⟩,
refine ⟨a, ha, C, hC₀, λ n, _⟩,
simpa only [real.norm_eq_abs, abs_pow, abs_of_nonneg ha.1.le]
using hC (pow_ne_zero n ha.1.ne') },
tfae_have : 6 → 5, from λ ⟨a, ha, C, H₀, H⟩, ⟨a, ha.2, C, or.inl H₀, H⟩,
tfae_have : 5 → 3,
{ rintro ⟨a, ha, C, h₀, H⟩,
rcases sign_cases_of_C_mul_pow_nonneg (λ n, (abs_nonneg _).trans (H n)) with rfl | ⟨hC₀, ha₀⟩,
{ obtain rfl : f = 0, by { ext n, simpa using H n },
simp only [lt_irrefl, false_or] at h₀,
exact ⟨0, ⟨neg_lt_zero.2 h₀, h₀⟩, is_O_zero _ _⟩ },
exact ⟨a, A ⟨ha₀, ha⟩,
is_O_of_le' _ (λ n, (H n).trans $ mul_le_mul_of_nonneg_left (le_abs_self _) hC₀.le)⟩ },
-- Add 7 and 8 using 2 → 8 → 7 → 3
tfae_have : 2 → 8,
{ rintro ⟨a, ha, H⟩,
refine ⟨a, ha, (H.def zero_lt_one).mono (λ n hn, _)⟩,
rwa [real.norm_eq_abs, real.norm_eq_abs, one_mul, abs_pow, abs_of_pos ha.1] at hn },
tfae_have : 8 → 7, from λ ⟨a, ha, H⟩, ⟨a, ha.2, H⟩,
tfae_have : 7 → 3,
{ rintro ⟨a, ha, H⟩,
have : 0 ≤ a, from nonneg_of_eventually_pow_nonneg (H.mono $ λ n, (abs_nonneg _).trans),
refine ⟨a, A ⟨this, ha⟩, is_O.of_bound 1 _⟩,
simpa only [real.norm_eq_abs, one_mul, abs_pow, abs_of_nonneg this] },
tfae_finish
end
/-- For any natural `k` and a real `r > 1` we have `n ^ k = o(r ^ n)` as `n → ∞`. -/
lemma is_o_pow_const_const_pow_of_one_lt {R : Type*} [normed_ring R] (k : ℕ) {r : ℝ} (hr : 1 < r) :
(λ n, n ^ k : ℕ → R) =o[at_top] (λ n, r ^ n) :=
begin
have : tendsto (λ x : ℝ, x ^ k) (𝓝[>] 1) (𝓝 1),
from ((continuous_id.pow k).tendsto' (1 : ℝ) 1 (one_pow _)).mono_left inf_le_left,
obtain ⟨r' : ℝ, hr' : r' ^ k < r, h1 : 1 < r'⟩ :=
((this.eventually (gt_mem_nhds hr)).and self_mem_nhds_within).exists,
have h0 : 0 ≤ r' := zero_le_one.trans h1.le,
suffices : (λ n, n ^ k : ℕ → R) =O[at_top] (λ n : ℕ, (r' ^ k) ^ n),
from this.trans_is_o (is_o_pow_pow_of_lt_left (pow_nonneg h0 _) hr'),
conv in ((r' ^ _) ^ _) { rw [← pow_mul, mul_comm, pow_mul] },
suffices : ∀ n : ℕ, ‖(n : R)‖ ≤ (r' - 1)⁻¹ * ‖(1 : R)‖ * ‖r' ^ n‖,
from (is_O_of_le' _ this).pow _,
intro n, rw mul_right_comm,
refine n.norm_cast_le.trans (mul_le_mul_of_nonneg_right _ (norm_nonneg _)),
simpa [div_eq_inv_mul, real.norm_eq_abs, abs_of_nonneg h0] using n.cast_le_pow_div_sub h1
end
/-- For a real `r > 1` we have `n = o(r ^ n)` as `n → ∞`. -/
lemma is_o_coe_const_pow_of_one_lt {R : Type*} [normed_ring R] {r : ℝ} (hr : 1 < r) :
(coe : ℕ → R) =o[at_top] (λ n, r ^ n) :=
by simpa only [pow_one] using @is_o_pow_const_const_pow_of_one_lt R _ 1 _ hr
/-- If `‖r₁‖ < r₂`, then for any naturak `k` we have `n ^ k r₁ ^ n = o (r₂ ^ n)` as `n → ∞`. -/
lemma is_o_pow_const_mul_const_pow_const_pow_of_norm_lt {R : Type*} [normed_ring R] (k : ℕ)
{r₁ : R} {r₂ : ℝ} (h : ‖r₁‖ < r₂) :
(λ n, n ^ k * r₁ ^ n : ℕ → R) =o[at_top] (λ n, r₂ ^ n) :=
begin
by_cases h0 : r₁ = 0,
{ refine (is_o_zero _ _).congr' (mem_at_top_sets.2 $ ⟨1, λ n hn, _⟩) eventually_eq.rfl,
simp [zero_pow (zero_lt_one.trans_le hn), h0] },
rw [← ne.def, ← norm_pos_iff] at h0,
have A : (λ n, n ^ k : ℕ → R) =o[at_top] (λ n, (r₂ / ‖r₁‖) ^ n),
from is_o_pow_const_const_pow_of_one_lt k ((one_lt_div h0).2 h),
suffices : (λ n, r₁ ^ n) =O[at_top] (λ n, ‖r₁‖ ^ n),
by simpa [div_mul_cancel _ (pow_pos h0 _).ne'] using A.mul_is_O this,
exact is_O.of_bound 1 (by simpa using eventually_norm_pow_le r₁)
end
lemma tendsto_pow_const_div_const_pow_of_one_lt (k : ℕ) {r : ℝ} (hr : 1 < r) :
tendsto (λ n, n ^ k / r ^ n : ℕ → ℝ) at_top (𝓝 0) :=
(is_o_pow_const_const_pow_of_one_lt k hr).tendsto_div_nhds_zero
/-- If `|r| < 1`, then `n ^ k r ^ n` tends to zero for any natural `k`. -/
lemma tendsto_pow_const_mul_const_pow_of_abs_lt_one (k : ℕ) {r : ℝ} (hr : |r| < 1) :
tendsto (λ n, n ^ k * r ^ n : ℕ → ℝ) at_top (𝓝 0) :=
begin
by_cases h0 : r = 0,
{ exact tendsto_const_nhds.congr'
(mem_at_top_sets.2 ⟨1, λ n hn, by simp [zero_lt_one.trans_le hn, h0]⟩) },
have hr' : 1 < (|r|)⁻¹, from one_lt_inv (abs_pos.2 h0) hr,
rw tendsto_zero_iff_norm_tendsto_zero,
simpa [div_eq_mul_inv] using tendsto_pow_const_div_const_pow_of_one_lt k hr'
end
/-- If `0 ≤ r < 1`, then `n ^ k r ^ n` tends to zero for any natural `k`.
This is a specialized version of `tendsto_pow_const_mul_const_pow_of_abs_lt_one`, singled out
for ease of application. -/
lemma tendsto_pow_const_mul_const_pow_of_lt_one (k : ℕ) {r : ℝ} (hr : 0 ≤ r) (h'r : r < 1) :
tendsto (λ n, n ^ k * r ^ n : ℕ → ℝ) at_top (𝓝 0) :=
tendsto_pow_const_mul_const_pow_of_abs_lt_one k (abs_lt.2 ⟨neg_one_lt_zero.trans_le hr, h'r⟩)
/-- If `|r| < 1`, then `n * r ^ n` tends to zero. -/
lemma tendsto_self_mul_const_pow_of_abs_lt_one {r : ℝ} (hr : |r| < 1) :
tendsto (λ n, n * r ^ n : ℕ → ℝ) at_top (𝓝 0) :=
by simpa only [pow_one] using tendsto_pow_const_mul_const_pow_of_abs_lt_one 1 hr
/-- If `0 ≤ r < 1`, then `n * r ^ n` tends to zero. This is a specialized version of
`tendsto_self_mul_const_pow_of_abs_lt_one`, singled out for ease of application. -/
lemma tendsto_self_mul_const_pow_of_lt_one {r : ℝ} (hr : 0 ≤ r) (h'r : r < 1) :
tendsto (λ n, n * r ^ n : ℕ → ℝ) at_top (𝓝 0) :=
by simpa only [pow_one] using tendsto_pow_const_mul_const_pow_of_lt_one 1 hr h'r
/-- In a normed ring, the powers of an element x with `‖x‖ < 1` tend to zero. -/
lemma tendsto_pow_at_top_nhds_0_of_norm_lt_1 {R : Type*} [normed_ring R] {x : R}
(h : ‖x‖ < 1) : tendsto (λ (n : ℕ), x ^ n) at_top (𝓝 0) :=
begin
apply squeeze_zero_norm' (eventually_norm_pow_le x),
exact tendsto_pow_at_top_nhds_0_of_lt_1 (norm_nonneg _) h,
end
lemma tendsto_pow_at_top_nhds_0_of_abs_lt_1 {r : ℝ} (h : |r| < 1) :
tendsto (λn:ℕ, r^n) at_top (𝓝 0) :=
tendsto_pow_at_top_nhds_0_of_norm_lt_1 h
/-! ### Geometric series-/
section geometric
variables {K : Type*} [normed_field K] {ξ : K}
lemma has_sum_geometric_of_norm_lt_1 (h : ‖ξ‖ < 1) : has_sum (λn:ℕ, ξ ^ n) (1 - ξ)⁻¹ :=
begin
have xi_ne_one : ξ ≠ 1, by { contrapose! h, simp [h] },
have A : tendsto (λn, (ξ ^ n - 1) * (ξ - 1)⁻¹) at_top (𝓝 ((0 - 1) * (ξ - 1)⁻¹)),
from ((tendsto_pow_at_top_nhds_0_of_norm_lt_1 h).sub tendsto_const_nhds).mul tendsto_const_nhds,
rw [has_sum_iff_tendsto_nat_of_summable_norm],
{ simpa [geom_sum_eq, xi_ne_one, neg_inv, div_eq_mul_inv] using A },
{ simp [norm_pow, summable_geometric_of_lt_1 (norm_nonneg _) h] }
end
lemma summable_geometric_of_norm_lt_1 (h : ‖ξ‖ < 1) : summable (λn:ℕ, ξ ^ n) :=
⟨_, has_sum_geometric_of_norm_lt_1 h⟩
lemma tsum_geometric_of_norm_lt_1 (h : ‖ξ‖ < 1) : ∑'n:ℕ, ξ ^ n = (1 - ξ)⁻¹ :=
(has_sum_geometric_of_norm_lt_1 h).tsum_eq
lemma has_sum_geometric_of_abs_lt_1 {r : ℝ} (h : |r| < 1) : has_sum (λn:ℕ, r ^ n) (1 - r)⁻¹ :=
has_sum_geometric_of_norm_lt_1 h
lemma summable_geometric_of_abs_lt_1 {r : ℝ} (h : |r| < 1) : summable (λn:ℕ, r ^ n) :=
summable_geometric_of_norm_lt_1 h
lemma tsum_geometric_of_abs_lt_1 {r : ℝ} (h : |r| < 1) : ∑'n:ℕ, r ^ n = (1 - r)⁻¹ :=
tsum_geometric_of_norm_lt_1 h
/-- A geometric series in a normed field is summable iff the norm of the common ratio is less than
one. -/
@[simp] lemma summable_geometric_iff_norm_lt_1 : summable (λ n : ℕ, ξ ^ n) ↔ ‖ξ‖ < 1 :=
begin
refine ⟨λ h, _, summable_geometric_of_norm_lt_1⟩,
obtain ⟨k : ℕ, hk : dist (ξ ^ k) 0 < 1⟩ :=
(h.tendsto_cofinite_zero.eventually (ball_mem_nhds _ zero_lt_one)).exists,
simp only [norm_pow, dist_zero_right] at hk,
rw [← one_pow k] at hk,
exact lt_of_pow_lt_pow _ zero_le_one hk
end
end geometric
section mul_geometric
lemma summable_norm_pow_mul_geometric_of_norm_lt_1 {R : Type*} [normed_ring R]
(k : ℕ) {r : R} (hr : ‖r‖ < 1) : summable (λ n : ℕ, ‖(n ^ k * r ^ n : R)‖) :=
begin
rcases exists_between hr with ⟨r', hrr', h⟩,
exact summable_of_is_O_nat (summable_geometric_of_lt_1 ((norm_nonneg _).trans hrr'.le) h)
(is_o_pow_const_mul_const_pow_const_pow_of_norm_lt _ hrr').is_O.norm_left
end
lemma summable_pow_mul_geometric_of_norm_lt_1 {R : Type*} [normed_ring R] [complete_space R]
(k : ℕ) {r : R} (hr : ‖r‖ < 1) : summable (λ n, n ^ k * r ^ n : ℕ → R) :=
summable_of_summable_norm $ summable_norm_pow_mul_geometric_of_norm_lt_1 _ hr
/-- If `‖r‖ < 1`, then `∑' n : ℕ, n * r ^ n = r / (1 - r) ^ 2`, `has_sum` version. -/
lemma has_sum_coe_mul_geometric_of_norm_lt_1 {𝕜 : Type*} [normed_field 𝕜] [complete_space 𝕜]
{r : 𝕜} (hr : ‖r‖ < 1) : has_sum (λ n, n * r ^ n : ℕ → 𝕜) (r / (1 - r) ^ 2) :=
begin
have A : summable (λ n, n * r ^ n : ℕ → 𝕜),
by simpa using summable_pow_mul_geometric_of_norm_lt_1 1 hr,
have B : has_sum (pow r : ℕ → 𝕜) (1 - r)⁻¹, from has_sum_geometric_of_norm_lt_1 hr,
refine A.has_sum_iff.2 _,
have hr' : r ≠ 1, by { rintro rfl, simpa [lt_irrefl] using hr },
set s : 𝕜 := ∑' n : ℕ, n * r ^ n,
calc s = (1 - r) * s / (1 - r) : (mul_div_cancel_left _ (sub_ne_zero.2 hr'.symm)).symm
... = (s - r * s) / (1 - r) : by rw [sub_mul, one_mul]
... = ((0 : ℕ) * r ^ 0 + (∑' n : ℕ, (n + 1 : ℕ) * r ^ (n + 1)) - r * s) / (1 - r) :
by rw ← tsum_eq_zero_add A
... = (r * (∑' n : ℕ, (n + 1) * r ^ n) - r * s) / (1 - r) :
by simp [pow_succ, mul_left_comm _ r, tsum_mul_left]
... = r / (1 - r) ^ 2 :
by simp [add_mul, tsum_add A B.summable, mul_add, B.tsum_eq, ← div_eq_mul_inv, sq,
div_div]
end
/-- If `‖r‖ < 1`, then `∑' n : ℕ, n * r ^ n = r / (1 - r) ^ 2`. -/
lemma tsum_coe_mul_geometric_of_norm_lt_1 {𝕜 : Type*} [normed_field 𝕜] [complete_space 𝕜]
{r : 𝕜} (hr : ‖r‖ < 1) :
(∑' n : ℕ, n * r ^ n : 𝕜) = (r / (1 - r) ^ 2) :=
(has_sum_coe_mul_geometric_of_norm_lt_1 hr).tsum_eq
end mul_geometric
section summable_le_geometric
variables [seminormed_add_comm_group α] {r C : ℝ} {f : ℕ → α}
lemma seminormed_add_comm_group.cauchy_seq_of_le_geometric {C : ℝ} {r : ℝ} (hr : r < 1)
{u : ℕ → α} (h : ∀ n, ‖u n - u (n + 1)‖ ≤ C*r^n) : cauchy_seq u :=
cauchy_seq_of_le_geometric r C hr (by simpa [dist_eq_norm] using h)
lemma dist_partial_sum_le_of_le_geometric (hf : ∀n, ‖f n‖ ≤ C * r^n) (n : ℕ) :
dist (∑ i in range n, f i) (∑ i in range (n+1), f i) ≤ C * r ^ n :=
begin
rw [sum_range_succ, dist_eq_norm, ← norm_neg, neg_sub, add_sub_cancel'],
exact hf n,
end
/-- If `‖f n‖ ≤ C * r ^ n` for all `n : ℕ` and some `r < 1`, then the partial sums of `f` form a
Cauchy sequence. This lemma does not assume `0 ≤ r` or `0 ≤ C`. -/
lemma cauchy_seq_finset_of_geometric_bound (hr : r < 1) (hf : ∀n, ‖f n‖ ≤ C * r^n) :
cauchy_seq (λ s : finset (ℕ), ∑ x in s, f x) :=
cauchy_seq_finset_of_norm_bounded _
(aux_has_sum_of_le_geometric hr (dist_partial_sum_le_of_le_geometric hf)).summable hf
/-- If `‖f n‖ ≤ C * r ^ n` for all `n : ℕ` and some `r < 1`, then the partial sums of `f` are within
distance `C * r ^ n / (1 - r)` of the sum of the series. This lemma does not assume `0 ≤ r` or
`0 ≤ C`. -/
lemma norm_sub_le_of_geometric_bound_of_has_sum (hr : r < 1) (hf : ∀n, ‖f n‖ ≤ C * r^n)
{a : α} (ha : has_sum f a) (n : ℕ) :
‖(∑ x in finset.range n, f x) - a‖ ≤ (C * r ^ n) / (1 - r) :=
begin
rw ← dist_eq_norm,
apply dist_le_of_le_geometric_of_tendsto r C hr (dist_partial_sum_le_of_le_geometric hf),
exact ha.tendsto_sum_nat
end
@[simp] lemma dist_partial_sum (u : ℕ → α) (n : ℕ) :
dist (∑ k in range (n + 1), u k) (∑ k in range n, u k) = ‖u n‖ :=
by simp [dist_eq_norm, sum_range_succ]
@[simp] lemma dist_partial_sum' (u : ℕ → α) (n : ℕ) :
dist (∑ k in range n, u k) (∑ k in range (n+1), u k) = ‖u n‖ :=
by simp [dist_eq_norm', sum_range_succ]
lemma cauchy_series_of_le_geometric {C : ℝ} {u : ℕ → α}
{r : ℝ} (hr : r < 1) (h : ∀ n, ‖u n‖ ≤ C*r^n) : cauchy_seq (λ n, ∑ k in range n, u k) :=
cauchy_seq_of_le_geometric r C hr (by simp [h])
lemma normed_add_comm_group.cauchy_series_of_le_geometric' {C : ℝ} {u : ℕ → α} {r : ℝ} (hr : r < 1)
(h : ∀ n, ‖u n‖ ≤ C*r^n) : cauchy_seq (λ n, ∑ k in range (n + 1), u k) :=
(cauchy_series_of_le_geometric hr h).comp_tendsto $ tendsto_add_at_top_nat 1
lemma normed_add_comm_group.cauchy_series_of_le_geometric'' {C : ℝ} {u : ℕ → α} {N : ℕ} {r : ℝ}
(hr₀ : 0 < r) (hr₁ : r < 1)
(h : ∀ n ≥ N, ‖u n‖ ≤ C*r^n) : cauchy_seq (λ n, ∑ k in range (n + 1), u k) :=
begin
set v : ℕ → α := λ n, if n < N then 0 else u n,
have hC : 0 ≤ C,
from (zero_le_mul_right $ pow_pos hr₀ N).mp ((norm_nonneg _).trans $ h N $ le_refl N),
have : ∀ n ≥ N, u n = v n,
{ intros n hn,
simp [v, hn, if_neg (not_lt.mpr hn)] },
refine cauchy_seq_sum_of_eventually_eq this (normed_add_comm_group.cauchy_series_of_le_geometric'
hr₁ _),
{ exact C },
intro n,
dsimp [v],
split_ifs with H H,
{ rw norm_zero,
exact mul_nonneg hC (pow_nonneg hr₀.le _) },
{ push_neg at H,
exact h _ H }
end
end summable_le_geometric
section normed_ring_geometric
variables {R : Type*} [normed_ring R] [complete_space R]
open normed_space
/-- A geometric series in a complete normed ring is summable.
Proved above (same name, different namespace) for not-necessarily-complete normed fields. -/
lemma normed_ring.summable_geometric_of_norm_lt_1
(x : R) (h : ‖x‖ < 1) : summable (λ (n:ℕ), x ^ n) :=
begin
have h1 : summable (λ (n:ℕ), ‖x‖ ^ n) := summable_geometric_of_lt_1 (norm_nonneg _) h,
refine summable_of_norm_bounded_eventually _ h1 _,
rw nat.cofinite_eq_at_top,
exact eventually_norm_pow_le x,
end
/-- Bound for the sum of a geometric series in a normed ring. This formula does not assume that the
normed ring satisfies the axiom `‖1‖ = 1`. -/
lemma normed_ring.tsum_geometric_of_norm_lt_1
(x : R) (h : ‖x‖ < 1) : ‖∑' n:ℕ, x ^ n‖ ≤ ‖(1:R)‖ - 1 + (1 - ‖x‖)⁻¹ :=
begin
rw tsum_eq_zero_add (normed_ring.summable_geometric_of_norm_lt_1 x h),
simp only [pow_zero],
refine le_trans (norm_add_le _ _) _,
have : ‖∑' b : ℕ, (λ n, x ^ (n + 1)) b‖ ≤ (1 - ‖x‖)⁻¹ - 1,
{ refine tsum_of_norm_bounded _ (λ b, norm_pow_le' _ (nat.succ_pos b)),
convert (has_sum_nat_add_iff' 1).mpr (has_sum_geometric_of_lt_1 (norm_nonneg x) h),
simp },
linarith
end
lemma geom_series_mul_neg (x : R) (h : ‖x‖ < 1) :
(∑' i:ℕ, x ^ i) * (1 - x) = 1 :=
begin
have := ((normed_ring.summable_geometric_of_norm_lt_1 x h).has_sum.mul_right (1 - x)),
refine tendsto_nhds_unique this.tendsto_sum_nat _,
have : tendsto (λ (n : ℕ), 1 - x ^ n) at_top (𝓝 1),
{ simpa using tendsto_const_nhds.sub (tendsto_pow_at_top_nhds_0_of_norm_lt_1 h) },
convert ← this,
ext n,
rw [←geom_sum_mul_neg, finset.sum_mul],
end
lemma mul_neg_geom_series (x : R) (h : ‖x‖ < 1) :
(1 - x) * ∑' i:ℕ, x ^ i = 1 :=
begin
have := (normed_ring.summable_geometric_of_norm_lt_1 x h).has_sum.mul_left (1 - x),
refine tendsto_nhds_unique this.tendsto_sum_nat _,
have : tendsto (λ (n : ℕ), 1 - x ^ n) at_top (nhds 1),
{ simpa using tendsto_const_nhds.sub
(tendsto_pow_at_top_nhds_0_of_norm_lt_1 h) },
convert ← this,
ext n,
rw [←mul_neg_geom_sum, finset.mul_sum]
end
end normed_ring_geometric
/-! ### Summability tests based on comparison with geometric series -/
lemma summable_of_ratio_norm_eventually_le {α : Type*} [seminormed_add_comm_group α]
[complete_space α] {f : ℕ → α} {r : ℝ} (hr₁ : r < 1)
(h : ∀ᶠ n in at_top, ‖f (n+1)‖ ≤ r * ‖f n‖) : summable f :=
begin
by_cases hr₀ : 0 ≤ r,
{ rw eventually_at_top at h,
rcases h with ⟨N, hN⟩,
rw ← @summable_nat_add_iff α _ _ _ _ N,
refine summable_of_norm_bounded (λ n, ‖f N‖ * r^n)
(summable.mul_left _ $ summable_geometric_of_lt_1 hr₀ hr₁) (λ n, _),
conv_rhs {rw [mul_comm, ← zero_add N]},
refine le_geom hr₀ n (λ i _, _),
convert hN (i + N) (N.le_add_left i) using 3,
ac_refl },
{ push_neg at hr₀,
refine summable_of_norm_bounded_eventually 0 summable_zero _,
rw nat.cofinite_eq_at_top,
filter_upwards [h] with _ hn,
by_contra' h,
exact not_lt.mpr (norm_nonneg _) (lt_of_le_of_lt hn $ mul_neg_of_neg_of_pos hr₀ h), },
end
lemma summable_of_ratio_test_tendsto_lt_one {α : Type*} [normed_add_comm_group α] [complete_space α]
{f : ℕ → α} {l : ℝ} (hl₁ : l < 1) (hf : ∀ᶠ n in at_top, f n ≠ 0)
(h : tendsto (λ n, ‖f (n+1)‖/‖f n‖) at_top (𝓝 l)) : summable f :=
begin
rcases exists_between hl₁ with ⟨r, hr₀, hr₁⟩,
refine summable_of_ratio_norm_eventually_le hr₁ _,
filter_upwards [eventually_le_of_tendsto_lt hr₀ h, hf] with _ _ h₁,
rwa ← div_le_iff (norm_pos_iff.mpr h₁),
end
lemma not_summable_of_ratio_norm_eventually_ge {α : Type*} [seminormed_add_comm_group α]
{f : ℕ → α} {r : ℝ} (hr : 1 < r) (hf : ∃ᶠ n in at_top, ‖f n‖ ≠ 0)
(h : ∀ᶠ n in at_top, r * ‖f n‖ ≤ ‖f (n+1)‖) : ¬ summable f :=
begin
rw eventually_at_top at h,
rcases h with ⟨N₀, hN₀⟩,
rw frequently_at_top at hf,
rcases hf N₀ with ⟨N, hNN₀ : N₀ ≤ N, hN⟩,
rw ← @summable_nat_add_iff α _ _ _ _ N,
refine mt summable.tendsto_at_top_zero
(λ h', not_tendsto_at_top_of_tendsto_nhds (tendsto_norm_zero.comp h') _),
convert tendsto_at_top_of_geom_le _ hr _,
{ refine lt_of_le_of_ne (norm_nonneg _) _,
intro h'',
specialize hN₀ N hNN₀,
simp only [comp_app, zero_add] at h'',
exact hN h''.symm },
{ intro i,
dsimp only [comp_app],
convert (hN₀ (i + N) (hNN₀.trans (N.le_add_left i))) using 3,
ac_refl }
end
lemma not_summable_of_ratio_test_tendsto_gt_one {α : Type*} [seminormed_add_comm_group α]
{f : ℕ → α} {l : ℝ} (hl : 1 < l)
(h : tendsto (λ n, ‖f (n+1)‖/‖f n‖) at_top (𝓝 l)) : ¬ summable f :=
begin
have key : ∀ᶠ n in at_top, ‖f n‖ ≠ 0,
{ filter_upwards [eventually_ge_of_tendsto_gt hl h] with _ hn hc,
rw [hc, div_zero] at hn,
linarith },
rcases exists_between hl with ⟨r, hr₀, hr₁⟩,
refine not_summable_of_ratio_norm_eventually_ge hr₀ key.frequently _,
filter_upwards [eventually_ge_of_tendsto_gt hr₁ h, key] with _ _ h₁,
rwa ← le_div_iff (lt_of_le_of_ne (norm_nonneg _) h₁.symm)
end
section
/-! ### Dirichlet and alternating series tests -/
variables {E : Type*} [normed_add_comm_group E] [normed_space ℝ E]
variables {b : ℝ} {f : ℕ → ℝ} {z : ℕ → E}
/-- **Dirichlet's Test** for monotone sequences. -/
theorem monotone.cauchy_seq_series_mul_of_tendsto_zero_of_bounded
(hfa : monotone f) (hf0 : tendsto f at_top (𝓝 0)) (hgb : ∀ n, ‖∑ i in range n, z i‖ ≤ b) :
cauchy_seq (λ n, ∑ i in range (n + 1), (f i) • z i) :=
begin
simp_rw [finset.sum_range_by_parts _ _ (nat.succ _), sub_eq_add_neg,
nat.succ_sub_succ_eq_sub, tsub_zero],
apply (normed_field.tendsto_zero_smul_of_tendsto_zero_of_bounded hf0
⟨b, eventually_map.mpr $ eventually_of_forall $ λ n, hgb $ n+1⟩).cauchy_seq.add,
refine (cauchy_seq_range_of_norm_bounded _ _ (λ n, _ : ∀ n, _ ≤ b * |f(n+1) - f(n)|)).neg,
{ simp_rw [abs_of_nonneg (sub_nonneg_of_le (hfa (nat.le_succ _))), ← mul_sum],
apply real.uniform_continuous_const_mul.comp_cauchy_seq,
simp_rw [sum_range_sub, sub_eq_add_neg],
exact (tendsto.cauchy_seq hf0).add_const },
{ rw [norm_smul, mul_comm],
exact mul_le_mul_of_nonneg_right (hgb _) (abs_nonneg _) },
end
/-- **Dirichlet's test** for antitone sequences. -/
theorem antitone.cauchy_seq_series_mul_of_tendsto_zero_of_bounded
(hfa : antitone f) (hf0 : tendsto f at_top (𝓝 0)) (hzb : ∀ n, ‖∑ i in range n, z i‖ ≤ b) :
cauchy_seq (λ n, ∑ i in range (n+1), (f i) • z i) :=
begin
have hfa': monotone (λ n, -f n) := λ _ _ hab, neg_le_neg $ hfa hab,
have hf0': tendsto (λ n, -f n) at_top (𝓝 0) := by { convert hf0.neg, norm_num },
convert (hfa'.cauchy_seq_series_mul_of_tendsto_zero_of_bounded hf0' hzb).neg,
funext,
simp
end
lemma norm_sum_neg_one_pow_le (n : ℕ) : ‖∑ i in range n, (-1 : ℝ) ^ i‖ ≤ 1 :=
by { rw [neg_one_geom_sum], split_ifs; norm_num }
/-- The **alternating series test** for monotone sequences.
See also `tendsto_alternating_series_of_monotone_tendsto_zero`. -/
theorem monotone.cauchy_seq_alternating_series_of_tendsto_zero
(hfa : monotone f) (hf0 : tendsto f at_top (𝓝 0)) :
cauchy_seq (λ n, ∑ i in range (n+1), (-1) ^ i * f i) :=
begin
simp_rw [mul_comm],
exact hfa.cauchy_seq_series_mul_of_tendsto_zero_of_bounded hf0 norm_sum_neg_one_pow_le
end
/-- The **alternating series test** for monotone sequences. -/
theorem monotone.tendsto_alternating_series_of_tendsto_zero
(hfa : monotone f) (hf0 : tendsto f at_top (𝓝 0)) :
∃ l, tendsto (λ n, ∑ i in range (n+1), (-1) ^ i * f i) at_top (𝓝 l) :=
cauchy_seq_tendsto_of_complete $ hfa.cauchy_seq_alternating_series_of_tendsto_zero hf0
/-- The **alternating series test** for antitone sequences.
See also `tendsto_alternating_series_of_antitone_tendsto_zero`. -/
theorem antitone.cauchy_seq_alternating_series_of_tendsto_zero
(hfa : antitone f) (hf0 : tendsto f at_top (𝓝 0)) :
cauchy_seq (λ n, ∑ i in range (n+1), (-1) ^ i * f i) :=
begin
simp_rw [mul_comm],
exact
hfa.cauchy_seq_series_mul_of_tendsto_zero_of_bounded hf0 norm_sum_neg_one_pow_le
end
/-- The **alternating series test** for antitone sequences. -/
theorem antitone.tendsto_alternating_series_of_tendsto_zero
(hfa : antitone f) (hf0 : tendsto f at_top (𝓝 0)) :
∃ l, tendsto (λ n, ∑ i in range (n+1), (-1) ^ i * f i) at_top (𝓝 l) :=
cauchy_seq_tendsto_of_complete $ hfa.cauchy_seq_alternating_series_of_tendsto_zero hf0
end
/-!
### Factorial
-/
/-- The series `∑' n, x ^ n / n!` is summable of any `x : ℝ`. See also `exp_series_div_summable`
for a version that also works in `ℂ`, and `exp_series_summable'` for a version that works in
any normed algebra over `ℝ` or `ℂ`. -/
lemma real.summable_pow_div_factorial (x : ℝ) :
summable (λ n, x ^ n / n! : ℕ → ℝ) :=
begin
-- We start with trivial extimates
have A : (0 : ℝ) < ⌊‖x‖⌋₊ + 1, from zero_lt_one.trans_le (by simp),
have B : ‖x‖ / (⌊‖x‖⌋₊ + 1) < 1, from (div_lt_one A).2 (nat.lt_floor_add_one _),
-- Then we apply the ratio test. The estimate works for `n ≥ ⌊‖x‖⌋₊`.
suffices : ∀ n ≥ ⌊‖x‖⌋₊, ‖x ^ (n + 1) / (n + 1)!‖ ≤ ‖x‖ / (⌊‖x‖⌋₊ + 1) * ‖x ^ n / ↑n!‖,
from summable_of_ratio_norm_eventually_le B (eventually_at_top.2 ⟨⌊‖x‖⌋₊, this⟩),
-- Finally, we prove the upper estimate
intros n hn,
calc ‖x ^ (n + 1) / (n + 1)!‖ = (‖x‖ / (n + 1)) * ‖x ^ n / n!‖ :
by rw [pow_succ, nat.factorial_succ, nat.cast_mul, ← div_mul_div_comm,
norm_mul, norm_div, real.norm_coe_nat, nat.cast_succ]
... ≤ (‖x‖ / (⌊‖x‖⌋₊ + 1)) * ‖x ^ n / n!‖ :
by mono* with [0 ≤ ‖x ^ n / n!‖, 0 ≤ ‖x‖]; apply norm_nonneg
end
lemma real.tendsto_pow_div_factorial_at_top (x : ℝ) :
tendsto (λ n, x ^ n / n! : ℕ → ℝ) at_top (𝓝 0) :=
(real.summable_pow_div_factorial x).tendsto_at_top_zero
|
import data.pfun
import logic.relation
import logic.function.iterate
import tactic.apply_fun
import tactic.linear_combination
namespace option
@[simp] lemma map_eq_some'_symm {α β : Type*} (f : α → β) (x : option α) (y : β) :
some y = x.map f ↔ ∃ a, x = some a ∧ f a = y := by { cases x; simp, exact comm, }
@[simp] lemma map_eq_none'_symm {α β : Type*} (f : α → β) (x : option α) :
none = x.map f ↔ none = x := by cases x; simp
end option
namespace part
@[simp] lemma restrict_dom {α : Type*} (x : part α) {p : Prop} (hp : p → x.dom) :
(x.restrict p hp).dom ↔ p := by refl
end part
namespace pfun
@[simp] lemma res_dom {α β : Type*} (f : α →. β) {p : set α} (hp : p ⊆ f.dom) :
(f.restrict hp).dom = p := by simp [pfun.dom, pfun.restrict]
/-- Restrict with the intersection of a set -/
def res_inter {α β : Type*} (f : α →. β) (p : set α) : α →. β :=
f.restrict (set.inter_subset_right p f.dom)
@[simp] lemma mem_res_inter {α β : Type*} {f : α →. β} {p : set α} {x y} :
y ∈ f.res_inter p x ↔ x ∈ p ∧ y ∈ f x :=
by { simp [res_inter], tauto, }
@[simp] lemma res_inter_res_inter {α β : Type*} {f : α →. β} {p₁ p₂ : set α} :
(f.res_inter p₁).res_inter p₂ = f.res_inter (p₁ ∩ p₂) :=
by { ext, simp, tauto, }
@[simp] lemma res_inter_dom {α β : Type*} (f : α →. β) (p : set α) :
(f.res_inter p).dom = p ∩ f.dom := by simp [res_inter]
@[simp] lemma res_inter_dom' {α β : Type*} {f : α →. β} {p : set α} :
∀ {x}, (f.res_inter p x).dom ↔ x ∈ p ∧ (f x).dom :=
set.ext_iff.mp (res_inter_dom f p)
@[simp] lemma coe_res_inter {α β : Type*} (f : α → β) (p : set α) :
(f : α →. β).res_inter p = pfun.res f p :=
by { ext x, simp [mem_res], tauto, }
end pfun
open relation
open nat (iterate)
open function (update iterate_succ iterate_succ_apply iterate_succ'
iterate_succ_apply' iterate_zero_apply)
namespace part_eval
/-- Run a state transition function `σ → option σ` "to completion". The return value is the last
state returned before a `none` result. If the state transition function always returns `some` or any step of the
transition function diverges, then the computation diverges, returning `part.none`. -/
def eval {σ} (f : σ →. option σ) : σ → part σ :=
pfun.fix (λ s, (f s).map (λ x, x.elim (sum.inl s) sum.inr))
/-- The reflexive transitive closure of a state transition function. `reaches f a b` means
there is a finite sequence of steps `f a = some a₁`, `f a₁ = some a₂`, ... such that `aₙ = b`.
This relation permits zero steps of the state transition function. -/
def reaches {σ} (f : σ →. option σ) : σ → σ → Prop :=
refl_trans_gen (λ a b, some b ∈ f a)
@[refl] lemma reaches.refl {σ} (f : σ →. option σ) (x : σ) :
reaches f x x := refl_trans_gen.refl
@[trans] lemma reaches.trans {σ} {f : σ →. option σ} {x y z : σ} :
reaches f x y → reaches f y z → reaches f x z := refl_trans_gen.trans
lemma reaches_fwd {σ} {f : σ →. option σ} {x y : σ} :
some y ∈ f x → reaches f x y := @refl_trans_gen.single _ _ x y
theorem reaches_mono {σ} {f : σ →. option σ} (S : set σ) (hS : S ⊆ f.dom) {x y} (hf : reaches (f.restrict hS) x y) :
reaches f x y :=
by { apply refl_trans_gen.mono _ hf, simp, }
theorem reaches_mono' {σ} {f g : σ →. option σ} (hfg : ∀ ⦃x y⦄, y ∈ f x → y ∈ g x) {x y} (hf : reaches f x y) :
reaches g x y :=
by { apply refl_trans_gen.mono _ hf, intros _ _, apply hfg, }
theorem invariant_of_reaches {σ} {f : σ →. option σ} (S : set σ) (hS : ∀ ⦃x y⦄, x ∈ S → some y ∈ f x → y ∈ S)
{x y} (hx : x ∈ S) (hf : reaches f x y) : y ∈ S :=
by { induction hf with x' y' hfx' hfy' ih, { exact hx, }, exact hS ih hfy', }
theorem reaches_of_invariant {σ} {f : σ →. option σ} (S : set σ) (hS : ∀ ⦃x y⦄, x ∈ S → some y ∈ f x → y ∈ S)
{x y} (hx : x ∈ S) (hf : reaches f x y) : reaches (f.res_inter S) x y :=
begin
induction hf using relation.refl_trans_gen.head_induction_on with x' y' hx' hy' ih, { refl, },
apply reaches.trans (reaches_fwd _) (ih _),
{ simp only [pfun.mem_res_inter], exact ⟨hx, hx'⟩, }, { exact hS hx hx', }
end
/-- The transitive closure of a state transition function. `reaches₁ f a b` means there is a
nonempty finite sequence of steps `f a = some a₁`, `f a₁ = some a₂`, ... such that `aₙ = b`.
This relation does not permit zero steps of the state transition function. -/
def reaches₁ {σ} (f : σ →. option σ) : σ → σ → Prop :=
trans_gen (λ a b, some b ∈ f a)
theorem reaches_iff_eq_or_reaches₁ {σ} {f : σ →. option σ} {a b} :
reaches f a b ↔ b = a ∨ reaches₁ f a b := refl_trans_gen_iff_eq_or_trans_gen
theorem reaches₁_head'_iff {σ} {f : σ →. option σ} {a b} :
reaches₁ f a b ↔ ∃ c : σ, some c ∈ f a ∧ reaches f c b := trans_gen.head'_iff
theorem reaches₁_eq {σ} {f : σ →. option σ} {a b c}
(h : f a = f b) : reaches₁ f a c ↔ reaches₁ f b c :=
trans_gen.head'_iff.trans (trans_gen.head'_iff.trans $ by rw h).symm
theorem reaches_total {σ} {f : σ →. option σ}
{a b c} (hab : reaches f a b) (hac : reaches f a c) :
reaches f b c ∨ reaches f c b :=
refl_trans_gen.total_of_right_unique (λ x y z hx hy, option.some_injective _ (part.mem_unique hx hy)) hab hac
theorem reaches₁_fwd {σ} {f : σ →. option σ}
{a b c} (h₁ : reaches₁ f a c) (h₂ : some b ∈ f a) : reaches f b c :=
begin
rw reaches₁_head'_iff at h₁, rcases h₁ with ⟨b', ⟨h₂', H⟩⟩,
cases part.mem_unique h₂ h₂', exact H,
end
theorem reaches₁_single {σ} {f : σ →. option σ}
{a b} : some b ∈ f a → reaches₁ f a b :=
@trans_gen.single σ _ a b
/-- A variation on `reaches`. `reaches₀ f a b` holds if whenever `reaches₁ f b c` then
`reaches₁ f a c`. This is a weaker property than `reaches` and is useful for replacing states with
equivalent states without taking a step. -/
def reaches₀ {σ} (f : σ →. option σ) (a b : σ) : Prop :=
∀ c, reaches₁ f b c → reaches₁ f a c
theorem reaches₀.trans {σ} {f : σ →. option σ} {a b c : σ}
(h₁ : reaches₀ f a b) (h₂ : reaches₀ f b c) : reaches₀ f a c
| d h₃ := h₁ _ (h₂ _ h₃)
@[refl] theorem reaches₀.refl {σ} {f : σ →. option σ} (a : σ) : reaches₀ f a a
| b h := h
theorem reaches₀.single {σ} {f : σ →. option σ} {a b : σ}
(h : some b ∈ f a) : reaches₀ f a b
| c h₂ := h₂.head h
theorem reaches₀.head {σ} {f : σ →. option σ} {a b c : σ}
(h : some b ∈ f a) (h₂ : reaches₀ f b c) : reaches₀ f a c :=
(reaches₀.single h).trans h₂
theorem reaches₀.tail {σ} {f : σ →. option σ} {a b c : σ}
(h₁ : reaches₀ f a b) (h : some c ∈ f b) : reaches₀ f a c :=
h₁.trans (reaches₀.single h)
theorem reaches₀_eq {σ} {f : σ →. option σ} {a b}
(e : f a = f b) : reaches₀ f a b
| d h := (reaches₁_eq e).2 h
theorem reaches₁.to₀ {σ} {f : σ →. option σ} {a b : σ}
(h : reaches₁ f a b) : reaches₀ f a b
| c h₂ := h.trans h₂
theorem reaches.to₀ {σ} {f : σ →. option σ} {a b : σ}
(h : reaches f a b) : reaches₀ f a b
| c h₂ := h₂.trans_right h
theorem reaches₀.tail' {σ} {f : σ →. option σ} {a b c : σ}
(h : reaches₀ f a b) (h₂ : some c ∈ f b) : reaches₁ f a c :=
h _ (trans_gen.single h₂)
/-- (co-)Induction principle for `eval`. If a property `C` holds of any point `a` evaluating to `b`
which is either terminal (meaning `a = b`) or where the next point also satisfies `C`, then it
holds of any point where `eval f a` evaluates to `b`. This formalizes the notion that if
`eval f a` evaluates to `b` then it reaches terminal state `b` in finitely many steps. -/
@[elab_as_eliminator] def eval_induction {σ}
{f : σ →. option σ} {b : σ} {C : σ → Sort*} {a : σ} (h : b ∈ eval f a)
(H : ∀ a, b ∈ eval f a →
(∀ a', f a = part.some (some a') → C a') → C a) : C a :=
by { dsimp only [eval] at *, exact pfun.fix_induction h (λ _ b ih, H _ b (λ _ ha, ih _ (by simp [ha]))) }
theorem mem_eval {σ} {f : σ →. option σ} {a b} :
b ∈ eval f a ↔ reaches f a b ∧ f b = part.some none :=
begin
split,
{ intro h,
apply eval_induction h, clear h a, intros a hb ih,
have : (f a).dom := by simpa using pfun.dom_of_mem_fix hb,
rw part.dom_iff_mem at this,
rcases this with ⟨a'|a', ha'⟩, rw ← part.eq_some_iff at ha',
{ rw [eval] at hb, cases (part.mem_unique hb (pfun.fix_stop a _) : b = a),
{ exact ⟨by refl, ha'⟩, }, { simp [ha'], } },
specialize ih a' (by rwa part.eq_some_iff),
exact ⟨(reaches_fwd ha').trans ih.1, ih.2⟩, },
{ rintro ⟨h₁, h₂⟩,
induction h₁ using relation.refl_trans_gen.head_induction_on with a' b' ha' hb ih,
{ apply pfun.fix_stop, simp [h₂], },
rw [eval, pfun.fix_fwd _ b'], { exact ih, },
rw ← part.eq_some_iff at ha', simp [ha'], }
end
lemma eval_mono {σ} {f g : σ →. option σ} (hfg : ∀ ⦃x y⦄, y ∈ f x → y ∈ g x) {x y} (h : y ∈ eval f x) :
y ∈ eval g x :=
by { rw [mem_eval, part.eq_some_iff] at *, exact ⟨reaches_mono' hfg h.1, hfg h.2⟩, }
lemma eval_eq_of_invariant {σ} (f : σ →. option σ) (S : set σ) (hS : ∀ ⦃x y⦄, x ∈ S → some y ∈ f x → y ∈ S) {x} (hx : x ∈ S) :
eval f x = eval (f.res_inter S) x :=
by { ext y, split, swap, { intro h, apply eval_mono _ h, simp, }, simp [mem_eval, part.eq_some_iff],
intros H₁ H₂, exact ⟨reaches_of_invariant S hS hx H₁, invariant_of_reaches _ hS hx H₁, H₂⟩, }
@[simp] lemma eval_next_iter_eq_none {σ} (f : σ →. option σ) (a : σ) (h : (eval f a).dom) :
f ((eval f a).get h) = part.some none :=
by { have := part.get_mem h, rw mem_eval at this, exact this.2, }
theorem eval_maximal₁ {σ} {f : σ →. option σ} {a b : σ}
(h : b ∈ eval f a) (c) : ¬ reaches₁ f b c | bc :=
let ⟨ab, b0⟩ := mem_eval.1 h, ⟨b', h', _⟩ := trans_gen.head'_iff.1 bc in
by { rw b0 at h', simpa using h', }
theorem eval_maximal {σ} {f : σ →. option σ} {a b}
(h : b ∈ eval f a) {c} : reaches f b c ↔ c = b :=
let ⟨ab, b0⟩ := mem_eval.1 h in
refl_trans_gen_iff_eq $ λ b' h',
by { rw b0 at h', simpa using h', }
theorem reaches_eval {σ} {f : σ →. option σ} {a b}
(ab : reaches f a b) : eval f a = eval f b :=
part.ext $ λ c,
⟨λ h, let ⟨ac, c0⟩ := mem_eval.1 h in
mem_eval.2 ⟨(or_iff_left_of_imp $ by exact
λ cb, (eval_maximal h).1 cb ▸ refl_trans_gen.refl).1
(reaches_total ab ac), c0⟩,
λ h, let ⟨bc, c0⟩ := mem_eval.1 h in mem_eval.2 ⟨ab.trans bc, c0⟩,⟩
/-- Given a relation `tr : σ₁ → σ₂ → Prop` between state spaces, and state transition functions
`f₁ : σ₁ → option σ₁` and `f₂ : σ₂ → option σ₂`, `respects f₁ f₂ tr` means that if `tr a₁ a₂` holds
initially and `f₁` takes a step to `a₂` then `f₂` will take one or more steps before reaching a
state `b₂` satisfying `tr a₂ b₂`, and if `f₁ a₁` terminates then `f₂ a₂` also terminates.
Such a relation `tr` is also known as a refinement. -/
-- def respects {σ₁ σ₂}
-- (f₁ : σ₁ →. option σ₁) (f₂ : σ₂ →. option σ₂) (tr : σ₁ → σ₂ → Prop) :=
-- ∀ ⦃a₁ a₂⦄, tr a₁ a₂ → (match f₁ a₁ with
-- | part.none := f₂ a₂ = part.none
-- | part.some (some b₁) := ∃ b₂, tr b₁ b₂ ∧ reaches₁ f₂ a₂ b₂
-- | part.some none := f₂ a₂ = none
-- end : Prop)
structure respects {σ₁ σ₂} (f₁ : σ₁ →. option σ₁) (f₂ : σ₂ →. option σ₂) (tr : σ₁ → σ₂ → Prop) : Prop :=
(dom_of_dom : ∀ ⦃a₁ a₂⦄, tr a₁ a₂ → (f₂ a₂).dom → (f₁ a₁).dom)
(some_of_some : ∀ ⦃a₁ a₂ b₁⦄, tr a₁ a₂ → some b₁ ∈ (f₁ a₁) → ∃ b₂, tr b₁ b₂ ∧ reaches₁ f₂ a₂ b₂)
(none_of_none : ∀ ⦃a₁ a₂⦄, tr a₁ a₂ → none ∈ (f₁ a₁) → none ∈ (f₂ a₂))
variables {σ₁ σ₂ : Type*} {f₁ : σ₁ →. option σ₁} {f₂ : σ₂ →. option σ₂} {tr : σ₁ → σ₂ → Prop}
lemma respects.exists_some {a₁ a₂ b₁} (H : respects f₁ f₂ tr) (aa : tr a₁ a₂) (hb₁ : some b₁ ∈ f₁ a₁) :
∃ b₂, some b₂ ∈ f₂ a₂ :=
by { obtain ⟨b₂, ⟨_, hb₂⟩⟩ := H.some_of_some aa hb₁, rw reaches₁_head'_iff at hb₂, tauto, }
lemma respects.dom_iff_domm {a₁ a₂} (H : respects f₁ f₂ tr) (aa : tr a₁ a₂) :
(f₁ a₁).dom ↔ (f₂ a₂).dom :=
begin
refine ⟨λ h, _, H.dom_of_dom aa⟩,
rw [part.dom_iff_mem] at h ⊢, cases h with b₁ hb,
cases b₁,
{ use none, exact H.none_of_none aa hb, },
{ obtain ⟨b₂, hb₂⟩ := H.exists_some aa hb, exact ⟨_, hb₂⟩, }
end
lemma respects.none_iff_none {a₁ a₂} (H : respects f₁ f₂ tr) (aa : tr a₁ a₂) :
none ∈ f₁ a₁ ↔ none ∈ f₂ a₂ :=
begin
refine ⟨H.none_of_none aa, λ h, _⟩,
obtain ⟨x, hx⟩ : ∃ x, x ∈ f₁ a₁, { rw [← part.dom_iff_mem, H.dom_iff_domm aa, part.dom_iff_mem], exact ⟨_, h⟩, },
cases x, { exact hx, },
obtain ⟨_, hb⟩ := H.exists_some aa hx, cases part.mem_unique h hb,
end
lemma respects.some_iff_some {a₁ a₂} (H : respects f₁ f₂ tr) (aa : tr a₁ a₂) :
(∃ b₁, some b₁ ∈ f₁ a₁) ↔ (∃ b₂, some b₂ ∈ f₂ a₂) :=
begin
refine ⟨λ ⟨b₁, hb₁⟩, H.exists_some aa hb₁, _⟩,
rintro ⟨b₂, hb₂⟩,
obtain ⟨x, hx⟩ : ∃ x, x ∈ f₁ a₁, { rw [← part.dom_iff_mem, H.dom_iff_domm aa, part.dom_iff_mem], exact ⟨_, hb₂⟩, },
cases x, { rw H.none_iff_none aa at hx, cases part.mem_unique hb₂ hx, },
exact ⟨_, hx⟩,
end
theorem tr_reaches₁
(H : respects f₁ f₂ tr) {a₁ a₂} (aa : tr a₁ a₂) {b₁} (ab : reaches₁ f₁ a₁ b₁) :
∃ b₂, tr b₁ b₂ ∧ reaches₁ f₂ a₂ b₂ :=
begin
induction ab with c₁ ac c₁ d₁ ac cd IH,
{ exact H.some_of_some aa ac, },
{ rcases IH with ⟨c₂, cc, ac₂⟩,
obtain ⟨b₂, ⟨h₁, h₂⟩⟩ := H.some_of_some cc cd,
exact ⟨b₂, ⟨h₁, ac₂.trans h₂⟩⟩, }
end
theorem tr_reaches {σ₁ σ₂ f₁ f₂} {tr : σ₁ → σ₂ → Prop}
(H : respects f₁ f₂ tr) {a₁ a₂} (aa : tr a₁ a₂) {b₁} (ab : reaches f₁ a₁ b₁) :
∃ b₂, tr b₁ b₂ ∧ reaches f₂ a₂ b₂ :=
begin
rcases refl_trans_gen_iff_eq_or_trans_gen.1 ab with rfl | ab,
{ exact ⟨_, aa, refl_trans_gen.refl⟩ },
{ exact let ⟨b₂, bb, h⟩ := tr_reaches₁ H aa ab in
⟨b₂, bb, h.to_refl⟩ }
end
theorem tr_reaches_rev {σ₁ σ₂ f₁ f₂} {tr : σ₁ → σ₂ → Prop}
(H : respects f₁ f₂ tr) {a₁ a₂} (aa : tr a₁ a₂) {b₂} (ab : reaches f₂ a₂ b₂) :
∃ c₁ c₂, reaches f₂ b₂ c₂ ∧ tr c₁ c₂ ∧ reaches f₁ a₁ c₁ :=
begin
induction ab with a₂' a₂'' ha₂ ha₂' ih,
{ refine ⟨a₁, a₂, _, aa, _⟩; refl, },
rcases ih with ⟨c₁, c₂, c₂h, trh, c₁h⟩,
by_cases H : c₂ = a₂',
{ subst H, clear c₂h,
obtain ⟨c₁', hc₁'⟩ := (H.some_iff_some trh).mpr ⟨_, ha₂'⟩,
obtain ⟨c₂', hc₂, hc₂'⟩ := H.some_of_some trh hc₁',
exact ⟨c₁', c₂', reaches₁_fwd hc₂' ha₂', hc₂, c₁h.trans (reaches_fwd hc₁')⟩, },
refine ⟨c₁, c₂, _, trh, c₁h⟩,
simp_rw [reaches_iff_eq_or_reaches₁, H, false_or] at c₂h,
apply reaches₁_fwd c₂h ha₂',
end
theorem tr_eval {σ₁ σ₂ f₁ f₂} {tr : σ₁ → σ₂ → Prop}
(H : respects f₁ f₂ tr) {a₁ b₁ a₂} (aa : tr a₁ a₂)
(ab : b₁ ∈ eval f₁ a₁) : ∃ b₂, tr b₁ b₂ ∧ b₂ ∈ eval f₂ a₂ :=
begin
cases mem_eval.1 ab with ab b0,
rcases tr_reaches H aa ab with ⟨b₂, bb, ab⟩,
refine ⟨_, bb, mem_eval.2 ⟨ab, _⟩⟩,
rw part.eq_some_iff at ⊢ b0, rwa ← H.none_iff_none bb,
end
theorem tr_eval_rev {σ₁ σ₂ f₁ f₂} {tr : σ₁ → σ₂ → Prop}
(H : respects f₁ f₂ tr) {a₁ b₂ a₂} (aa : tr a₁ a₂)
(ab : b₂ ∈ eval f₂ a₂) : ∃ b₁, tr b₁ b₂ ∧ b₁ ∈ eval f₁ a₁ :=
begin
cases mem_eval.1 ab with ab b0,
rcases tr_reaches_rev H aa ab with ⟨c₁, c₂, bc, cc, ac⟩,
cases (refl_trans_gen_iff_eq _).1 bc,
swap, { intros _ h, rw b0 at h, simpa using h, },
refine ⟨_, cc, mem_eval.2 ⟨ac, _⟩⟩,
rw part.eq_some_iff at b0 ⊢, rwa H.none_iff_none cc,
end
theorem tr_eval_dom {σ₁ σ₂ f₁ f₂} {tr : σ₁ → σ₂ → Prop}
(H : respects f₁ f₂ tr) {a₁ a₂} (aa : tr a₁ a₂) :
(eval f₂ a₂).dom ↔ (eval f₁ a₁).dom :=
⟨λ h, let ⟨b₂, tr, h, _⟩ := tr_eval_rev H aa ⟨h, rfl⟩ in h,
λ h, let ⟨b₂, tr, h, _⟩ := tr_eval H aa ⟨h, rfl⟩ in h⟩
/-- A simpler version of `respects` when the state transition relation `tr` is a function. -/
structure frespects {σ₁ σ₂} (f₁ : σ₁ →. option σ₁) (f₂ : σ₂ →. option σ₂) (tr : σ₁ → σ₂) : Prop :=
(dom_of_dom : ∀ ⦃a : σ₁⦄, (f₂ (tr a)).dom → (f₁ a).dom)
(some_of_some : ∀ ⦃a b : σ₁⦄, some b ∈ f₁ a → reaches₁ f₂ (tr a) (tr b))
(none_of_none : ∀ ⦃a⦄, none ∈ f₁ a → none ∈ f₂ (tr a))
/-- An even simpler version where both take only one step each time -/
structure fcommutes {σ₁ σ₂} (f₁ : σ₁ →. option σ₁) (f₂ : σ₂ →. option σ₂) (tr : σ₁ → σ₂) : Prop :=
(dom_of_dom : ∀ ⦃a : σ₁⦄, (f₂ (tr a)).dom → (f₁ a).dom)
(some_of_some : ∀ ⦃a b : σ₁⦄, some b ∈ f₁ a → some (tr b) ∈ f₂ (tr a))
(none_of_none : ∀ ⦃a⦄, none ∈ f₁ a → none ∈ f₂ (tr a))
variable {ftr : σ₁ → σ₂}
theorem fcommutes.to_frespects (H : fcommutes f₁ f₂ ftr) : frespects f₁ f₂ ftr :=
{ dom_of_dom := H.dom_of_dom,
some_of_some := λ a b h, by { apply reaches₁_single, exact H.some_of_some h, },
none_of_none := H.none_of_none }
lemma fcommutes.some_of_some' (H : fcommutes f₁ f₂ ftr) {a b : σ₁}
(h : some (ftr b) ∈ f₂ (ftr a)) :
∃ y, ftr y = ftr b ∧ some y ∈ f₁ a :=
begin
obtain ⟨y, hy⟩ := part.dom_iff_mem.mp (H.dom_of_dom (part.dom_iff_mem.mpr ⟨_, h⟩)),
cases y, { cases part.mem_unique h (H.none_of_none hy), },
refine ⟨_, _, hy⟩, exact (option.some.inj (part.mem_unique h (H.some_of_some hy))).symm,
end
theorem fun_respects : respects f₁ f₂ (λ a b, ftr a = b) ↔ frespects f₁ f₂ ftr :=
begin
split,
{ intro H,
refine ⟨λ a, H.dom_of_dom rfl, λ a b hab, _, λ a ha, H.none_of_none rfl ha⟩,
simpa using H.some_of_some rfl hab, },
{ intro H,
refine ⟨_, _, _⟩, { rintro a₁ a₂ rfl h, exact H.dom_of_dom h, },
{ rintro a₁ a₂ b₁ rfl h, exact ⟨_, rfl, H.some_of_some h⟩, },
rintro a₁ a₂ rfl h, exact H.none_of_none h, }
end
lemma frespects.dom_iff_dom (H : frespects f₁ f₂ ftr) ⦃x : σ₁⦄ :
(f₁ x).dom ↔ (f₂ (ftr x)).dom :=
respects.dom_iff_domm (fun_respects.mpr H) rfl
-- f(g(x)) = x
-- S(g(a)) --> a'
-- g(a) --> a
-- theorem fcommutes.symm (H : fcommutes f₁ f₂ ftr) {ftr_inv : σ₂ → σ₁} (hinv : function.right_inverse ftr_inv ftr) :
-- fcommutes f₂ f₁ ftr_inv :=
-- { dom_of_dom := λ a, by simp [(fun_respects.mpr H.to_frespects).dom_iff_domm (hinv a)],
-- some_of_some := λ a b h,
-- begin
-- rw [← hinv b, ← hinv a] at h, have := H.some_of_some' h,
-- end,
-- none_of_none := _ }
theorem frespects.eval_eq (H : frespects f₁ f₂ ftr)
(a₁ : σ₁) : eval f₂ (ftr a₁) = (eval f₁ a₁).map ftr :=
begin
rw ← fun_respects at H,
apply part.ext', { exact tr_eval_dom H rfl, },
intros h₂ h₁, simp at h₁,
have := tr_eval H rfl (part.get_mem h₁),
simp at this ⊢, rwa part.get_eq_iff_mem,
end
theorem frespects.of_eval (H : frespects f₁ f₂ ftr)
{a b : σ₁} (h : b ∈ eval f₁ a) : (ftr b) ∈ eval f₂ (ftr a) :=
by { rw H.eval_eq, exact part.mem_map ftr h, }
theorem frespects.none_iff_none (H : frespects f₁ f₂ ftr) (a : σ₁) :
none ∈ f₁ a ↔ none ∈ f₂ (ftr a) :=
by { rw ← fun_respects at H, rw H.none_iff_none rfl, }
theorem frespects.eval_dom (H : frespects f₁ f₂ ftr) (x : σ₁) :
(eval f₂ (ftr x)).dom ↔ (eval f₁ x).dom := by simp [H.eval_eq]
theorem frespects.eval_get_eq (H : frespects f₁ f₂ ftr) (a : σ₁) :
∀ h, ftr ((eval f₁ a).get h) = (eval f₂ (ftr a)).get (by rwa H.eval_dom) :=
by { intros, simp [H.eval_eq], refl, }
section track_with
variables {σ α : Type*} (f : σ →. option σ) (t : σ →. ℕ)
def with_time : ℕ × σ →. option (ℕ × σ) :=
λ tx, (f tx.2).bind (λ r₁, (t tx.2).bind (λ r₂ : ℕ, part.some (r₁.map $ λ r₁', (tx.1 + r₂, r₁'))))
theorem with_time_respects {f : σ →. option σ} {t : σ →. ℕ} (ht : ∀ x, (t x).dom ↔ (f x).dom) : frespects (with_time f t) f prod.snd :=
{ dom_of_dom := λ a, by simp [with_time, ht],
some_of_some := λ ⟨a₁, x₁⟩ ⟨a₂, x₂⟩ h, by { apply reaches₁_single, simp [with_time] at h, rcases h with ⟨_, h, _, _, rfl, rfl⟩, exact h, },
none_of_none := λ ⟨a, x⟩, by { simp [with_time], exact λ h _ _, h, } }
theorem with_time_respects_self (n : ℕ) : frespects (with_time f t) (with_time f t) (prod.map (+n) id) :=
{ dom_of_dom := λ a, by { simp [with_time], exact and.intro, },
some_of_some := λ ⟨a₁, x₁⟩ ⟨a₂, x₂⟩ h,
begin
apply reaches₁_single,
simp [with_time] at h ⊢,
rcases h with ⟨a, ha, t, ht₁, rfl, rfl⟩,
exact ⟨_, ha, t, ht₁, rfl, by ac_refl⟩,
end,
none_of_none := by { simp [with_time], tauto, } }
def time_iter : σ →. ℕ :=
λ s, (eval (with_time f t) (0, s)).bind (λ r, (t r.2).map (+r.1))
variables {f t}
lemma with_time_restrict (S : set σ) :
with_time (f.res_inter S) t = (with_time f t).res_inter (prod.snd⁻¹' S) :=
by { ext, simp [with_time], tauto, }
theorem time_iter_dom_iff (ht : ∀ x, (t x).dom ↔ (f x).dom) {x} :
(time_iter f t x).dom ↔ (eval f x).dom :=
begin
simp [time_iter],
have := with_time_respects ht,
simp_rw [← this.eval_dom (0, x), this.eval_get_eq (0, x), ht, eval_next_iter_eq_none f x], simp,
end
lemma with_time_mono {g : σ →. option σ} (hfg : ∀ ⦃x y⦄, y ∈ f x → y ∈ g x) :
∀ ⦃x y⦄, y ∈ with_time f t x → y ∈ with_time g t x := by { simp [with_time], tauto, }
lemma time_iter_mono {g : σ →. option σ} (hfg : ∀ ⦃x y⦄, y ∈ f x → y ∈ g x) {x y} (hx : y ∈ time_iter f t x) :
y ∈ time_iter g t x :=
begin
simp [time_iter] at hx ⊢, rcases hx with ⟨a, b, h₁, ⟨a', h₂, rfl⟩⟩,
refine ⟨a, b, _, ⟨a', h₂, rfl⟩⟩, apply eval_mono (with_time_mono hfg) h₁,
end
theorem time_iter_eq_iff (ht : ∀ x, (t x).dom ↔ (f x).dom) (x : σ) (n : ℕ) :
n ∈ time_iter f t x ↔ ∃ t' b, reaches (with_time f t) (0, x) (t', b) ∧ none ∈ f b ∧ n ∈ (+t') <$> (t b) :=
begin
simp [time_iter, mem_eval],
apply exists₂_congr, intros a b,
conv_lhs { rw and_assoc, }, apply and_congr, { refl, },
apply and_congr, { rw ← (with_time_respects ht).none_iff_none (a, b), exact part.eq_some_iff, }, { refl, },
end
lemma time_iter_invariant {g : σ →. option σ} (S : set σ) (hS : ∀ ⦃x y⦄, x ∈ S → some y ∈ g x → y ∈ S) {x} (hx : x ∈ S) :
time_iter g t x = time_iter (g.res_inter S) t x :=
begin
simp only [time_iter], rw eval_eq_of_invariant (with_time g t) (prod.snd⁻¹' S), { simp [with_time_restrict], },
{ rintros ⟨x₁, x₂⟩ ⟨y₁, y₂⟩, simp [with_time], rintros hx₂ x' hx' t' ht' rfl rfl, exact hS hx₂ hx', },
simpa,
end
theorem time_iter_eq_iff_of_eval (ht : ∀ x, (t x).dom ↔ (f x).dom) {x n b} (hb : b ∈ eval f x) :
n ∈ time_iter f t x ↔ ∃ t', reaches (with_time f t) (0, x) (t', b) ∧ none ∈ f b ∧ n ∈ (+t') <$> (t b) :=
begin
suffices : ∀ {t' b'}, reaches (with_time f t) (0, x) (t', b') → none ∈ f b' → b = b',
{ rw time_iter_eq_iff ht, apply exists_congr, intro n, split, { rintro ⟨b, h₁, h₂, h₃⟩, cases this h₁ h₂, tauto, }, intro, use b, tauto, },
intros n b' h₁ h₂, rw [← (with_time_respects ht).none_iff_none (n, b'), ← part.eq_some_iff] at h₂,
exact part.mem_unique hb ((with_time_respects ht).of_eval (mem_eval.mpr ⟨h₁, h₂⟩)),
end
@[simp] lemma one_def : (1 : part ℕ) = part.some 1 := rfl
lemma time_eval_const_respects (ht : ∀ ⦃x⦄, (f x).dom → (t x).dom) (J : ℕ) :
respects (with_time (f.res_inter {s | ∀ k ∈ t s, k ≤ J}) (λ _, 1))
(with_time (f.res_inter {s | ∀ k ∈ t s, k ≤ J}) t)
(λ s₁ s₂, s₁.2 = s₂.2 ∧ s₂.1 ≤ J * s₁.1) :=
{ dom_of_dom := by { rintro ⟨t₁, s⟩ ⟨t₂, s⟩, dsimp only, rintro ⟨rfl, _⟩, simp [with_time], tauto, },
some_of_some :=
begin
rintro ⟨t₁, s₁⟩ ⟨t₂, s₁⟩ ⟨t₃, s₂⟩, dsimp only, rintro ⟨rfl, hb⟩,
simp [with_time], rintros s₂' hs hn rfl rfl,
rcases part.dom_iff_mem.mp (ht (part.dom_iff_mem.mpr ⟨_, hn⟩)) with ⟨tn, htn⟩,
use [t₂ + tn, s₂, rfl], { mono, }, apply reaches₁_single, simp, refine ⟨⟨_, _⟩, _⟩; assumption,
end,
none_of_none :=
begin
rintro ⟨t₁, s₁⟩ ⟨t₂, s₁⟩, dsimp only, rintro ⟨rfl, _⟩,
simp [with_time, ← part.dom_iff_mem],
refine λ h₁ h₂, ⟨⟨h₁, h₂⟩, ht _⟩, rw part.dom_iff_mem, exact ⟨_, h₂⟩,
end }
lemma with_time_le_of_iters_le {x : σ} {n J : ℕ} (ht : ∀ x, (f x).dom → (t x).dom)
(h : n ∈ time_iter (f.res_inter {s | ∀ k ∈ t s, k ≤ J}) (pfun.pure 1) x) :
∃ k ∈ time_iter f t x, k ≤ n * J :=
begin
simp [time_iter, pfun.pure] at h, rcases h with ⟨n, ⟨⟨s, hs⟩, rfl⟩⟩,
obtain ⟨⟨tf, sf⟩, h₁, h₂⟩ := tr_eval (time_eval_const_respects ht J) _ hs, swap, { use (0, x), }, swap, { split; refl, },
dsimp only at h₁, rcases h₁ with ⟨rfl, h₁⟩,
simp [time_iter],
obtain ⟨tl, htl, tl_le⟩ : ∃ tl ∈ t s, tl ≤ J,
{ rw mem_eval at h₂, rcases h₂ with ⟨_, h₂⟩, simp [part.eq_some_iff, with_time] at h₂,
rcases h₂ with ⟨⟨H, _⟩, ⟨tl, htl⟩⟩, use [tl, htl, H _ htl], },
refine ⟨tf + tl, ⟨⟨tf, s, _, ⟨tl, htl, by ac_refl⟩⟩, _⟩⟩,
{ apply eval_mono (with_time_mono _) h₂, simp, },
conv_rhs { rw [add_mul, add_comm], }, mono, { rw mul_comm, exact h₁, }, simpa using tl_le,
end
theorem fcommutes.to_time_frespects (H : fcommutes f₁ f₂ ftr) :
fcommutes (with_time f₁ (pfun.pure 1)) (with_time f₂ (pfun.pure 1)) (prod.map id ftr) :=
{ dom_of_dom := by { simpa [with_time, pfun.pure] using H.dom_of_dom, },
some_of_some :=
begin
simp [with_time, pfun.pure], rintro a₁ b₁ ⟨a₂, b₂⟩ x hx x rfl,
simp, rintro rfl rfl, refine ⟨some (ftr x), _, rfl, rfl⟩,
exact H.some_of_some hx,
end,
none_of_none := by simpa [with_time, pfun.pure] using H.none_of_none }
theorem eq_time_of_fcommutes (H : fcommutes f₁ f₂ ftr) (x : σ₁) :
time_iter f₁ (pfun.pure 1) x = time_iter f₂ (pfun.pure 1) (ftr x) :=
begin
have := H.to_time_frespects.to_frespects.eval_eq, simp [pfun.pure] at this,
simp [time_iter, this, pfun.pure],
end
theorem fcommutes.restrict (H : fcommutes f₁ f₂ ftr) (S : set σ₂) :
fcommutes (f₁.res_inter (ftr⁻¹' S)) (f₂.res_inter S) ftr :=
{ dom_of_dom := λ x, by { simp, rw ← H.to_frespects.dom_iff_dom, tauto, },
some_of_some := λ a b, by { simp, intros h₁ h₂, exact ⟨h₁, H.some_of_some h₂⟩, },
none_of_none := λ a, by { simp, intros h₁ h₂, exact ⟨h₁, H.none_of_none h₂⟩, } }
end track_with
end part_eval |
//impulse: /give @p spawn_egg 1 0 {display:{Name:"Laser Spawner"},EntityTag:{id:"minecraft:chicken",CustomName:"MMH_LaserSpawner",Silent:1,NoGravity:1}}
//MMH_loadChunks()
///clone ~1 ~1 ~1 ~2 ~8 ~2 24 1 16
#MMH
repeat process MMH_laserSpawner {
if: /testfor @e[type=Chicken,name=MMH_LaserSpawner]
then {
MMH_loadChunks()
/execute @e[type=Chicken,name=MMH_LaserSpawner] ~ ~ ~ scoreboard players operation Rotation MMH_Rotation = @p MMH_Rotation
/scoreboard players test Rotation MMH_Rotation 0 0
conditional: /execute @e[type=Chicken,name=MMH_LaserSpawner] ~ ~ ~ clone 24 1 16 25 2 17 ~-3 ~-1 ~-1 masked
/scoreboard players test Rotation MMH_Rotation 1 1
conditional: /execute @e[type=Chicken,name=MMH_LaserSpawner] ~ ~ ~ clone 24 3 16 25 4 17 ~ ~-1 ~-3 masked
/scoreboard players test Rotation MMH_Rotation 2 2
conditional: /execute @e[type=Chicken,name=MMH_LaserSpawner] ~ ~ ~ clone 24 5 16 25 6 17 ~2 ~-1 ~ masked
/scoreboard players test Rotation MMH_Rotation 3 3
conditional: /execute @e[type=Chicken,name=MMH_LaserSpawner] ~ ~ ~ clone 24 7 16 25 8 17 ~-1 ~-1 ~2 masked
/tp @e[type=Chicken,name=MMH_LaserSpawner] ~ -100 ~
}
}
//impulse: /summon area_effect_cloud ~3 ~1 ~1 {CustomName:"ACV_Laser",Rotation:[-90.0f,0.0f],Duration:2147483647}
//impulse: /summon area_effect_cloud ~-1 ~1 ~3 {CustomName:"ACV_Laser",Rotation:[0.0f,0.0f],Duration:2147483647}
//impulse: /summon area_effect_cloud ~-3 ~1 ~-1 {CustomName:"ACV_Laser",Rotation:[90.0f,0.0f],Duration:2147483647}
//impulse: /summon area_effect_cloud ~1 ~1 ~-3 {CustomName:"ACV_Laser",Rotation:[180.0f,0.0f],Duration:2147483647}
|
lemmas prime_imp_power_coprime_int = prime_imp_power_coprime[where ?'a = int] |
# Virtual Offset
# ==============
primitive type VirtualOffset 64 end
function Base.convert(::Type{VirtualOffset}, x::UInt64)
return reinterpret(VirtualOffset, x)
end
function Base.convert(::Type{UInt64}, x::VirtualOffset)
return reinterpret(UInt64, x)
end
"""
VirtualOffset(block_offset::Integer, inblock_offset::Integer)
Create a virtual file offset from `block_offset` and `inblock_offset`.
`block_offset` is an offset pointing to the beggining position of a BGZF block
in a BGZF file and `inblock_offset` is an offset pointing to the begining
position of a binary data within a uncompressed BGZF block. These values are
zero-based and their valid ranges are [0, 1 << 48) and [0, 1 << 16),
respectively.
"""
function VirtualOffset(block_offset::Integer, inblock_offset::Integer)
if !(0 ≤ block_offset < (1 << 48))
throw(ArgumentError("block file offset must be in [0, $(1 << 48))"))
elseif !(0 ≤ inblock_offset < (1 << 16))
throw(ArgumentError("in-block offset must be in [0, $(1 << 16))"))
end
return convert(VirtualOffset, (UInt64(block_offset) << 16) | UInt64(inblock_offset))
end
function Base.isless(x::VirtualOffset, y::VirtualOffset)
return isless(convert(UInt64, x), convert(UInt64, y))
end
# NOTE: This doesn't check the valid range of virtual offset.
function Base.:+(voffset::VirtualOffset, x::Integer)
return convert(VirtualOffset, convert(UInt64, voffset) + UInt64(x))
end
function Base.getindex(voffset::VirtualOffset, i::Integer)
return offsets(voffset)[i]
end
function offsets(voffset::VirtualOffset)
x = convert(UInt64, voffset)
return x >> 16, x & 0xffff
end
function Base.show(io::IO, voffset::VirtualOffset)
block_offset, inblock_offset = offsets(voffset)
print(io, summary(voffset), "(", block_offset, ", ", inblock_offset, ")")
end
function Base.read(io::IO, ::Type{VirtualOffset})
return convert(VirtualOffset, read(io, UInt64))
end
function Base.write(io::IO, voffset::VirtualOffset)
return write(io, convert(UInt64, voffset))
end
|
section \<open>More on Polynomials\<close>
text \<open>This theory contains several results on content, gcd, primitive part, etc..
Moreover, there is a slightly improved code-equation for computing the gcd.\<close>
theory Missing_Polynomial_Factorial
imports "HOL-Computational_Algebra.Polynomial_Factorial"
Polynomial_Interpolation.Missing_Polynomial
begin
text \<open>Improved code equation for @{const gcd_poly_code}
which avoids computing the content twice.\<close>
lemma gcd_poly_code_code[code]: "gcd_poly_code p q =
(if p = 0 then normalize q else if q = 0 then normalize p else let
c1 = content p;
c2 = content q;
p' = map_poly (\<lambda> x. x div c1) p;
q' = map_poly (\<lambda> x. x div c2) q
in smult (gcd c1 c2) (gcd_poly_code_aux p' q'))"
unfolding gcd_poly_code_def Let_def primitive_part_def by simp
lemma gcd_smult: fixes f g :: "'a :: {factorial_ring_gcd,semiring_gcd_mult_normalize} poly"
defines cf: "cf \<equiv> content f"
and cg: "cg \<equiv> content g"
shows "gcd (smult a f) g = (if a = 0 \<or> f = 0 then normalize g else
smult (gcd a (cg div (gcd cf cg))) (gcd f g))"
proof (cases "a = 0 \<or> f = 0")
case False
let ?c = "content"
let ?pp = primitive_part
let ?ua = "unit_factor a"
let ?na = "normalize a"
define H where "H = gcd (?c f) (?c g)"
have "H dvd ?c f" unfolding H_def by auto
then obtain F where fh: "?c f = H * F" unfolding dvd_def by blast
from False have cf0: "?c f \<noteq> 0" by auto
hence H: "H \<noteq> 0" unfolding H_def by auto
from arg_cong[OF fh, of "\<lambda> f. f div H"] H have F: "F = ?c f div H" by auto
have "H dvd ?c g" unfolding H_def by auto
then obtain G where gh: "?c g = H * G" unfolding dvd_def by blast
from arg_cong[OF gh, of "\<lambda> f. f div H"] H have G: "G = ?c g div H" by auto
have "coprime F G" using H unfolding F G H_def
using cf0 div_gcd_coprime by blast
have "is_unit ?ua" using False by simp
then have ua: "is_unit [: ?ua :]"
by (simp add: is_unit_const_poly_iff)
have "gcd (smult a f) g = smult (gcd (?na * ?c f) (?c g))
(gcd (smult ?ua (?pp f)) (?pp g))"
unfolding gcd_poly_decompose[of "smult a f"]
content_smult primitive_part_smult by simp
also have "smult ?ua (?pp f) = ?pp f * [: ?ua :]" by simp
also have "gcd \<dots> (?pp g) = gcd (?pp f) (?pp g)"
unfolding gcd_mult_unit1[OF ua] ..
also have "gcd (?na * ?c f) (?c g) = gcd ((?na * F) * H) (G * H)"
unfolding fh gh by (simp add: ac_simps)
also have "\<dots> = gcd (?na * F) G * normalize H" unfolding gcd_mult_right gcd.commute[of G]
by (simp add: normalize_mult)
also have "normalize H = H" by (metis H_def normalize_gcd)
finally
have "gcd (smult a f) g = smult (gcd (?na * F) G) (smult H (gcd (?pp f) (?pp g)))" by simp
also have "smult H (gcd (?pp f) (?pp g)) = gcd f g" unfolding H_def
by (rule gcd_poly_decompose[symmetric])
also have "gcd (?na * F) G = gcd (F * ?na) G" by (simp add: ac_simps)
also have "\<dots> = gcd ?na G"
using \<open>coprime F G\<close> by (simp add: gcd_mult_right_left_cancel ac_simps)
finally show ?thesis unfolding G H_def cg cf using False by simp
next
case True
hence "gcd (smult a f) g = normalize g" by (cases "a = 0", auto)
thus ?thesis using True by simp
qed
lemma gcd_smult_ex: assumes "a \<noteq> 0"
shows "\<exists> b. gcd (smult a f) g = smult b (gcd f g) \<and> b \<noteq> 0"
proof (cases "f = 0")
case True
thus ?thesis by (intro exI[of _ 1], auto)
next
case False
hence id: "(a = 0 \<or> f = 0) = False" using assms by auto
show ?thesis unfolding gcd_smult id if_False
by (intro exI conjI, rule refl, insert assms, auto)
qed
lemma primitive_part_idemp[simp]:
fixes f :: "'a :: {semiring_gcd,normalization_semidom_multiplicative} poly"
shows "primitive_part (primitive_part f) = primitive_part f"
by (metis content_primitive_part[of f] primitive_part_eq_0_iff primitive_part_prim)
lemma content_gcd_primitive:
"f \<noteq> 0 \<Longrightarrow> content (gcd (primitive_part f) g) = 1"
"f \<noteq> 0 \<Longrightarrow> content (gcd (primitive_part f) (primitive_part g)) = 1"
by (metis (no_types, lifting) content_dvd_contentI content_primitive_part gcd_dvd1 is_unit_content_iff)+
lemma content_gcd_content: "content (gcd f g) = gcd (content f) (content g)"
(is "?l = ?r")
proof -
let ?c = "content"
have "?l = normalize (gcd (?c f) (?c g)) *
?c (gcd (primitive_part f) (primitive_part g))"
unfolding gcd_poly_decompose[of f g] content_smult ..
also have "\<dots> = gcd (?c f) (?c g) *
?c (gcd (primitive_part f) (primitive_part g))" by simp
also have "\<dots> = ?r" using content_gcd_primitive[of f g]
by (metis (no_types, lifting) content_dvd_contentI content_eq_zero_iff
content_primitive_part gcd_dvd2 gcd_eq_0_iff is_unit_content_iff mult_cancel_left1)
finally show ?thesis .
qed
lemma gcd_primitive_part:
"gcd (primitive_part f) (primitive_part g) = normalize (primitive_part (gcd f g))"
proof(cases "f = 0")
case True
show ?thesis unfolding gcd_poly_decompose[of f g] gcd_0_left primitive_part_0 True
by (simp add: associatedI primitive_part_dvd_primitive_partI)
next
case False
have "normalize 1 = normalize (unit_factor (gcd (content f) (content g)))"
by (simp add: False)
then show ?thesis unfolding gcd_poly_decompose[of f g]
by (metis (no_types) Polynomial.normalize_smult content_gcd_primitive(1)[OF False] content_times_primitive_part normalize_gcd primitive_part_smult)
qed
lemma primitive_part_gcd: "primitive_part (gcd f g)
= unit_factor (gcd f g) * gcd (primitive_part f) (primitive_part g)"
unfolding gcd_primitive_part
by (metis (no_types, lifting)
content_times_primitive_part gcd.normalize_idem mult_cancel_left2 mult_smult_left
normalize_eq_0_iff normalize_mult_unit_factor primitive_part_eq_0_iff
smult_content_normalize_primitive_part unit_factor_mult_normalize)
lemma primitive_part_normalize:
fixes f :: "'a :: {semiring_gcd,idom_divide,normalization_semidom_multiplicative} poly"
shows "primitive_part (normalize f) = normalize (primitive_part f)"
proof (cases "f = 0")
case True
thus ?thesis by simp
next
case False
have "normalize (content (normalize (primitive_part f))) = 1"
using content_primitive_part[OF False] content_dvd content_const
content_dvd_contentI dvd_normalize_iff is_unit_content_iff by (metis (no_types))
then have "content (normalize (primitive_part f)) = 1" by fastforce
then have "content (normalize f) = 1 * content f"
by (metis (no_types) content_smult mult.commute normalize_content
smult_content_normalize_primitive_part)
then have "content f = content (normalize f)"
by simp
then show ?thesis unfolding smult_content_normalize_primitive_part[of f,symmetric]
by (metis (no_types) False content_times_primitive_part mult.commute mult_cancel_left
mult_smult_right smult_content_normalize_primitive_part)
qed
lemma length_coeffs_primitive_part[simp]: "length (coeffs (primitive_part f)) = length (coeffs f)"
proof (cases "f = 0")
case False
hence "length (coeffs f) \<noteq> 0" "length (coeffs (primitive_part f)) \<noteq> 0" by auto
thus ?thesis using degree_primitive_part[of f, unfolded degree_eq_length_coeffs] by linarith
qed simp
lemma degree_unit_factor[simp]: "degree (unit_factor f) = 0"
by (simp add: monom_0 unit_factor_poly_def)
lemma degree_normalize[simp]: "degree (normalize f) = degree f"
proof (cases "f = 0")
case False
have "degree f = degree (unit_factor f * normalize f)" by simp
also have "\<dots> = degree (unit_factor f) + degree (normalize f)"
by (rule degree_mult_eq, insert False, auto)
finally show ?thesis by simp
qed simp
lemma content_iff: "x dvd content p \<longleftrightarrow> (\<forall> c \<in> set (coeffs p). x dvd c)"
by (simp add: content_def dvd_gcd_list_iff)
definition primitive where
"primitive f \<longleftrightarrow> (\<forall>x. (\<forall>y \<in> set (coeffs f). x dvd y) \<longrightarrow> x dvd 1)"
lemma primitiveI:
assumes "(\<And>x. (\<And>y. y \<in> set (coeffs f) \<Longrightarrow> x dvd y) \<Longrightarrow> x dvd 1)"
shows "primitive f" by (insert assms, auto simp: primitive_def)
lemma primitiveD:
assumes "primitive f"
shows "(\<And>y. y \<in> set (coeffs f) \<Longrightarrow> x dvd y) \<Longrightarrow> x dvd 1"
by (insert assms, auto simp: primitive_def)
lemma not_primitiveE:
assumes "\<not> primitive f"
and "\<And>x. (\<And>y. y \<in> set (coeffs f) \<Longrightarrow> x dvd y) \<Longrightarrow> \<not> x dvd 1 \<Longrightarrow> thesis"
shows thesis by (insert assms, auto simp: primitive_def)
lemma primitive_iff_content_eq_1[simp]:
fixes f :: "'a :: semiring_gcd poly"
shows "primitive f \<longleftrightarrow> content f = 1"
proof(intro iffI primitiveI)
fix x
assume "(\<And>y. y \<in> set (coeffs f) \<Longrightarrow> x dvd y)"
from gcd_list_greatest[of "coeffs f", OF this]
have "x dvd content f" by (simp add: content_def)
also assume "content f = 1"
finally show "x dvd 1".
next
assume "primitive f"
from primitiveD[OF this list_gcd[of _ "coeffs f"], folded content_def]
show "content f = 1" by simp
qed
lemma primitive_prod_list:
fixes fs :: "'a :: {factorial_semiring,semiring_Gcd,normalization_semidom_multiplicative} poly list"
assumes "primitive (prod_list fs)" and "f \<in> set fs" shows "primitive f"
proof (insert assms, induct fs arbitrary: f)
case (Cons f' fs)
from Cons.prems
have "is_unit (content f' * content (prod_list fs))" by (auto simp: content_mult)
from this[unfolded is_unit_mult_iff]
have "content f' = 1" and "content (prod_list fs) = 1" by auto
moreover from Cons.prems have "f = f' \<or> f \<in> set fs" by auto
ultimately show ?case using Cons.hyps[of f] by auto
qed auto
lemma irreducible_imp_primitive:
fixes f :: "'a :: {idom,semiring_gcd} poly"
assumes irr: "irreducible f" and deg: "degree f \<noteq> 0" shows "primitive f"
proof (rule ccontr)
assume not: "\<not> ?thesis"
then have "\<not> [:content f:] dvd 1" by simp
moreover have "f = [:content f:] * primitive_part f" by simp
note Factorial_Ring.irreducibleD[OF irr this]
ultimately
have "primitive_part f dvd 1" by auto
from this[unfolded poly_dvd_1] have "degree f = 0" by auto
with deg show False by auto
qed
lemma irreducible_primitive_connect:
fixes f :: "'a :: {idom,semiring_gcd} poly"
assumes cf: "primitive f" shows "irreducible\<^sub>d f \<longleftrightarrow> irreducible f" (is "?l \<longleftrightarrow> ?r")
proof
assume l: ?l show ?r
proof(rule ccontr, elim not_irreducibleE)
from l have deg: "degree f > 0" by (auto dest: irreducible\<^sub>dD)
from cf have f0: "f \<noteq> 0" by auto
then show "f = 0 \<Longrightarrow> False" by auto
show "f dvd 1 \<Longrightarrow> False" using deg by (auto simp:poly_dvd_1)
fix a b assume fab: "f = a * b" and a1: "\<not> a dvd 1" and b1: "\<not> b dvd 1"
then have af: "a dvd f" and bf: "b dvd f" by auto
with f0 have a0: "a \<noteq> 0" and b0: "b \<noteq> 0" by auto
from irreducible\<^sub>dD(2)[OF l, of a] af dvd_imp_degree_le[OF af f0]
have "degree a = 0 \<or> degree a = degree f"
by (metis degree_smult_le irreducible\<^sub>d_dvd_smult l le_antisym Nat.neq0_conv)
then show False
proof(elim disjE)
assume "degree a = 0"
then obtain c where ac: "a = [:c:]" by (auto dest: degree0_coeffs)
from fab[unfolded ac] have "c dvd content f" by (simp add: content_iff coeffs_smult)
with cf have "c dvd 1" by simp
then have "a dvd 1" by (auto simp: ac)
with a1 show False by auto
next
assume dega: "degree a = degree f"
with f0 degree_mult_eq[OF a0 b0] fab have "degree b = 0" by (auto simp: ac_simps)
then obtain c where bc: "b = [:c:]" by (auto dest: degree0_coeffs)
from fab[unfolded bc] have "c dvd content f" by (simp add: content_iff coeffs_smult)
with cf have "c dvd 1" by simp
then have "b dvd 1" by (auto simp: bc)
with b1 show False by auto
qed
qed
next
assume r: ?r
show ?l
proof(intro irreducible\<^sub>dI)
show "degree f > 0"
proof (rule ccontr)
assume "\<not>degree f > 0"
then obtain f0 where f: "f = [:f0:]" by (auto dest: degree0_coeffs)
from cf[unfolded this] have "normalize f0 = 1" by auto
then have "f0 dvd 1" by (unfold normalize_1_iff)
with r[unfolded f irreducible_const_poly_iff] show False by auto
qed
next
fix g h assume deg_g: "degree g > 0" and deg_gf: "degree g < degree f" and fgh: "f = g * h"
with r have "g dvd 1 \<or> h dvd 1" by auto
with deg_g have "degree h = 0" by (auto simp: poly_dvd_1)
with deg_gf[unfolded fgh] degree_mult_eq[of g h] show False by (cases "g = 0 \<or> h = 0", auto)
qed
qed
lemma deg_not_zero_imp_not_unit:
fixes f:: "'a::{idom_divide,semidom_divide_unit_factor} poly"
assumes deg_f: "degree f > 0"
shows "\<not> is_unit f"
proof -
have "degree (normalize f) > 0"
using deg_f degree_normalize by auto
hence "normalize f \<noteq> 1"
by fastforce
thus "\<not> is_unit f" using normalize_1_iff by auto
qed
lemma content_pCons[simp]: "content (pCons a p) = gcd a (content p)"
proof(induct p arbitrary: a)
case 0 show ?case by simp
next
case (pCons c p)
then show ?case by (cases "p = 0", auto simp: content_def cCons_def)
qed
lemma content_field_poly:
fixes f :: "'a :: {field,semiring_gcd} poly"
shows "content f = (if f = 0 then 0 else 1)"
by(induct f, auto simp: dvd_field_iff is_unit_normalize)
end
|
#' @param datatypeid Accepts a valid data type id or a vector or list of data
#' type ids. (optional)
#' @param locationid Accepts a valid location id or a vector or list of
#' location ids (optional)
#' @param sortfield The field to sort results by. Supports id, name, mindate,
#' maxdate, and datacoverage fields (optional)
#' @param sortorder Which order to sort by, asc or desc. Defaults to
#' asc (optional)
#' @param limit Defaults to 25, limits the number of results in the response.
#' Maximum is 1000 (optional)
#' @param offset Defaults to 0, used to offset the resultlist (optional)
#' @param ... Curl options passed on to \code{\link[crul]{HttpClient}}
#' (optional)
|
import category_theory.category.default
universes v u -- The order in this declaration matters: v often needs to be explicitly specified while u often can be omitted
namespace category_theory
variables (C : Type u) [category.{v} C]
--rewrite this
/-
# Category world
## Level 7: Composition of monomorphisms
Now we show that the composition of two monomorphisms produces another monomorphism.-/
/- Lemma
If $$f : X ⟶ Y$$ and $$g : X ⟶ Y$$ are monomorphisms, then $$f ≫ g : X ⟶ Z$$ is a monomorphism.
-/
lemma epi_comp' {X Y Z : C} (f : X ⟶ Y) [epi f] (g : Y ⟶ Z) [epi g] : epi (f ≫ g) :=
begin
split,
intros Z h l hyp,
rw ← cancel_epi g,
rw ← cancel_epi f,
rw ← category.assoc,
rw ← category.assoc,
exact hyp,
end
end category_theory |
(*
Title : VDM mathematical toolkit, Sep 2021
Author: Leo Freitas
*)
theory VDMToolkit
imports
(* Include real fields, list and option type ordering *)
Complex_Main
"HOL-Library.List_Lexorder"
"HOL-Library.Option_ord"
"HOL-Library.LaTeXsugar"
begin
type_notation bool ("\<bool>")
type_notation nat ("\<nat>")
type_notation int ("\<int>")
type_notation rat ("\<rat>")
type_notation real ("\<real>")
(*****************************************************************)
section \<open> Basic types \<close>
type_synonym VDMNat = \<int>
type_synonym VDMNat1 = \<int>
type_synonym VDMInt = \<int>
type_synonym VDMRat = \<rat>
type_synonym VDMReal = \<real>
type_synonym VDMChar = char
definition
inv_VDMNat :: "\<int> \<Rightarrow> \<bool>"
where
(*<*) [intro!]: (*>*)
"inv_VDMNat n \<equiv> n \<ge> 0"
definition
inv_VDMNat1 :: "\<int> \<Rightarrow> \<bool>"
where
(*<*)[intro!]: (*>*)
"inv_VDMNat1 n \<equiv> n > 0"
definition
inv_True :: "'a \<Rightarrow> \<bool>"
where
[intro!]: "inv_True \<equiv> \<lambda> x . True"
definition
"inv_bool" :: "\<bool> \<Rightarrow> \<bool>"
where
(*<*) [intro!]: (*>*)
"inv_bool i \<equiv> inv_True i"
definition
inv_VDMChar :: "VDMChar \<Rightarrow> \<bool>"
where
(*<*) [intro!]: (*>*)
"inv_VDMChar c \<equiv> inv_True c"
definition
inv_VDMInt :: "\<int> \<Rightarrow> \<bool>"
where
(*<*) [intro!]: (*>*)
"inv_VDMInt i \<equiv> inv_True i"
definition
inv_VDMReal :: "\<real> \<Rightarrow> \<bool>"
where
(*<*) [intro!]: (*>*)
"inv_VDMReal r \<equiv> inv_True r"
definition
inv_VDMRat :: "\<rat> \<Rightarrow> \<bool>"
where
(*<*) [intro!]: (*>*)
"inv_VDMRat r \<equiv> inv_True r"
lemma l_inv_True_True[simp]: "inv_True r"
by (simp add: inv_True_def)
text \<open>VDM has div and mod but also rem for remainder. This is treated
differently depending on whether the values involved have different sign.
For now, we add these equivalences below, but might have to pay price in proof
later (i.e. TODO: add lemmas linking vdmdiv/rem to Isabelle's div/mod). \<close>
value " 7 div ( 3::\<int>) = 2"
value "-7 div (-3::\<int>) = 2"
value "-7 div ( 3::\<int>) = -3" (* in VDM this -2!*)
value " 7 div (-3::\<int>) = -3" (* in VDM this -2!*)
value "1 div (-2::\<int>) = -1" (* in VDM this is 0! *)
value "-1 div (2::\<int>) = -1" (* in VDM this is 0! *)
value " 7 mod ( 3::\<int>) = 1"
value "-7 mod (-3::\<int>) = -1"
value "-7 mod ( 3::\<int>) = 2"
value " 7 mod (-3::\<int>) = -2"
value "7 * (3::int)"
value "0 div (-3::\<int>) = 0"
lemma "\<lfloor>10.01323\<rfloor> = 10" apply (simp only: floor_eq_iff)
by (simp add: floor_eq_iff)
text \<open>VDM narrow expressions are tricky; but at least for reals/rat (floor_ceiling class) to VDMInt is fine\<close>
definition
vdm_narrow_real :: "('a::floor_ceiling) \<Rightarrow> VDMInt"
where
"vdm_narrow_real r \<equiv> \<lfloor>r\<rfloor>"
value "vdm_narrow_real (4.5::VDMRat)"
value "vdm_narrow_real (4.5::VDMReal)"
definition
vdm_div :: "VDMInt \<Rightarrow> VDMInt \<Rightarrow> VDMInt" (infixl "vdmdiv" 70)
where
[intro!] :
"x vdmdiv y \<equiv>
(if ((x / y) < 0) then
-\<lfloor>\<bar>-x / y\<bar>\<rfloor>
else
\<lfloor>\<bar>x / y\<bar>\<rfloor>)"
lemma vdmdiv_div_ge0[simp] :
"x \<ge> 0 \<Longrightarrow> y \<ge> 0 \<Longrightarrow> x vdmdiv y = x div y"
unfolding vdm_div_def
apply (induct y) apply simp_all
by (metis divide_less_0_iff floor_divide_of_int_eq floor_less_zero floor_of_int floor_of_nat le_less_trans less_irrefl of_int_of_nat_eq of_nat_less_0_iff)
(*
apply (induct x)
apply simp_all
apply (induct y)
apply safe
apply (simp add: divide_less_0_iff)
apply (metis abs_of_nat floor_divide_of_int_eq of_int_of_nat_eq)
defer
using divide_pos_neg apply force
using [[show_types]]
nitpick
Nitpicking goal:
\<And>n na.
real (na::nat) / real_of_int (- int (Suc (n::nat))) < 0 \<Longrightarrow>
- \<lfloor>real na / \<bar>real_of_int (- int (Suc n))\<bar>\<rfloor> = int na div - int (Suc n)
Nitpick found a counterexample:
Skolem constants:
n = 1
na = 1
1 / -2 < 0 \<Longrightarrow>
0 = 1 div -2
value "(1::int) div -2 = -1"
value "\<lfloor>1 / (2::real)\<rfloor> = 0"
*)
definition
pre_vdm_div :: "VDMInt \<Rightarrow> VDMInt \<Rightarrow> \<bool>"
where
"pre_vdm_div x y \<equiv> y \<noteq> 0"
definition
post_vdm_div :: "VDMInt \<Rightarrow> VDMInt \<Rightarrow> VDMInt \<Rightarrow> \<bool>"
where
"post_vdm_div x y RESULT \<equiv>
(x \<ge> 0 \<and> y \<ge> 0 \<longrightarrow> RESULT \<ge> 0) \<and>
(x < 0 \<and> y < 0 \<longrightarrow> RESULT \<ge> 0) \<and>
(x < 0 \<or> y < 0 \<and> \<not>(x < 0 \<and> y < 0) \<longrightarrow> RESULT < 0)"
definition
vdm_mod :: "VDMInt \<Rightarrow> VDMInt \<Rightarrow> VDMInt" (infixl "vdmmod" 70)
where
[intro!] :
"x vdmmod y \<equiv> x - y * \<lfloor>x / y\<rfloor>"
lemma vdmmod_mod_ge0[simp] :
"y \<ge> 0 \<Longrightarrow> x vdmmod y = x mod y"
unfolding vdm_mod_def
apply (induct y) apply simp_all
by (metis floor_divide_of_int_eq minus_div_mult_eq_mod mult.commute of_int_of_nat_eq)
definition
pre_vdm_mod :: "VDMInt \<Rightarrow> VDMInt \<Rightarrow> \<bool>"
where
"pre_vdm_mod x y \<equiv> y \<noteq> 0"
definition
post_vdm_mod :: "VDMInt \<Rightarrow> VDMInt \<Rightarrow> VDMInt \<Rightarrow> \<bool>"
where
"post_vdm_mod x y RESULT \<equiv>
(y \<ge> 0 \<longrightarrow> RESULT \<ge> 0) \<and>
(y < 0 \<longrightarrow> RESULT < 0)"
definition
vdm_rem :: "VDMInt \<Rightarrow> VDMInt \<Rightarrow> VDMInt" (infixl "vdmrem" 70)
where
[intro!] :
"x vdmrem y \<equiv> x - y * (x vdmdiv y)"
definition
pre_vdm_rem :: "VDMInt \<Rightarrow> VDMInt \<Rightarrow> \<bool>"
where
"pre_vdm_rem x y \<equiv> y \<noteq> 0"
value "((1::nat),(2::nat),(3::nat))"
definition
post_vdm_rem :: "VDMInt \<Rightarrow> VDMInt \<Rightarrow> VDMInt \<Rightarrow> \<bool>"
where
"post_vdm_rem x y RESULT \<equiv>
(x \<ge> 0 \<longrightarrow> RESULT \<ge> 0) \<and>
(x < 0 \<longrightarrow> RESULT < 0)"
value " 7 vdmdiv ( 3::\<int>) = 2"
value "-7 vdmdiv (-3::\<int>) = 2"
value "-7 vdmdiv ( 3::\<int>) = -2" (* in VDM this -2!*)
value " 7 vdmdiv (-3::\<int>) = -2"
value " 7 vdmmod ( 3::\<int>) = 1"
value "-7 vdmmod (-3::\<int>) = -1"
value "-7 vdmmod ( 3::\<int>) = 2"
value " 7 vdmmod (-3::\<int>) = -2"
value " 7 vdmrem ( 3::\<int>) = 1"
value "-7 vdmrem (-3::\<int>) = -1"
value "-7 vdmrem ( 3::\<int>) = -1"
value " 7 vdmrem (-3::\<int>) = 1"
text \<open>VDM has the ** operator for numbers, which is Math.pow, and accepts
non-integer exponents. For Isabelle, we have ^ for nat, and powr for a subset
of the reals (i.e. real_normed_algebr_1+banach; natural logarithm exponentiation).
This assumes that the parameters involved will be of similar nature. \<close>
find_theorems "_ _ (_::real)" name:powr
lemma "4 powr (1/(2::int)) = 2" by simp
definition
vdm_pow :: "'a::ln \<Rightarrow> 'a::ln \<Rightarrow> 'a::ln" (infixl "vdmpow" 80)
where
[intro!]: "x vdmpow y \<equiv> x powr y"
definition
pre_vdm_pow :: "'a::ln \<Rightarrow> 'a::ln \<Rightarrow> \<bool>"
where
"pre_vdm_pow x y \<equiv> True"
definition
post_vdm_pow_post :: "'a::ln \<Rightarrow> 'a::ln \<Rightarrow> 'a::ln \<Rightarrow> \<bool>"
where
"post_vdm_pow_post x y RESULT \<equiv> True"
text \<open>For floor and abs, we just use Isabelle's. Note that in VDM abs of int
will return int, so this will entail more complicated translations. \<close>
find_theorems "_ (_::'a list list)" name:concat
definition
vdm_floor :: "VDMReal \<Rightarrow> VDMInt"
where
[intro!]: "vdm_floor x \<equiv> \<lfloor>x\<rfloor>"
definition
post_vdm_floor :: "VDMReal \<Rightarrow> VDMInt \<Rightarrow> \<bool>"
where
"post_vdm_floor x RESULT \<equiv>
of_int RESULT \<le> x \<and> x < of_int (RESULT + 1)"
(* same as the floor_correct axiom of Archimedian_Field*)
definition
vdm_abs :: "('a::{zero,abs,ord}) \<Rightarrow> ('a::{zero,abs,ord})"
where
[intro!]: "vdm_abs x \<equiv> \<bar>x\<bar>"
definition
post_vdm_abs :: "('a::{zero,abs,ord}) \<Rightarrow> ('a::{zero,abs,ord}) \<Rightarrow> \<bool>"
where
"post_vdm_abs x RESULT \<equiv> RESULT \<ge> 0" (*inv_VDMNat RESULT"*)
subsection \<open>VDM tokens\<close>
text
\<open>VDM tokens are like a record of parametric type (i.e. you can
have anything inside a mk_token(x) expression, akin to a VDM record
like Token :: token : ?. Isabelle does not allow parametric records.
Chose to use datatypes instead. \<close>
datatype 'a VDMToken = Token 'a
definition
inv_VDMToken :: "'a VDMToken \<Rightarrow> \<bool>"
where
"inv_VDMToken t \<equiv> inv_True t"
definition
inv_VDMToken' :: "('a \<Rightarrow> \<bool>) \<Rightarrow> 'a VDMToken \<Rightarrow> \<bool>"
where
"inv_VDMToken' inv_T t \<equiv> case t of Token a \<Rightarrow> inv_T a"
lemmas inv_VDMToken_defs = inv_VDMToken_def inv_True_def
(*****************************************************************)
section \<open> Sets \<close>
type_synonym 'a VDMSet = "'a set"
type_synonym 'a VDMSet1 = "'a set"
definition
inv_VDMSet :: "'a VDMSet \<Rightarrow> \<bool>"
where
[intro!]: "inv_VDMSet s \<equiv> finite s"
lemma l_invVDMSet_finite_f: "inv_VDMSet s \<Longrightarrow> finite s"
using inv_VDMSet_def by auto
definition
inv_VDMSet1 :: "'a VDMSet1 \<Rightarrow> \<bool>"
where
[intro!]: "inv_VDMSet1 s \<equiv> inv_VDMSet s \<and> s \<noteq> {}"
lemmas inv_VDMSet_defs = inv_VDMSet1_def
lemmas inv_VDMSet1_defs = inv_VDMSet1_def inv_VDMSet_def
definition
inv_SetElems :: "('a \<Rightarrow> \<bool>) \<Rightarrow> 'a VDMSet \<Rightarrow> \<bool>"
where
"inv_SetElems einv s \<equiv> \<forall> e \<in> s . einv e"
lemma l_inv_SetElems_Cons[simp]: "(inv_SetElems f (insert a s)) = (f a \<and> (inv_SetElems f s))"
unfolding inv_SetElems_def
by auto
lemma l_inv_SetElems_Un[simp]: "(inv_SetElems f (S \<union> T)) = (inv_SetElems f S \<and> inv_SetElems f T)"
unfolding inv_SetElems_def
by auto
lemma l_inv_SetElems_Int[simp]: "(inv_SetElems f (S \<inter> T)) = (inv_SetElems f (S \<inter> T))"
unfolding inv_SetElems_def
by auto
lemma l_inv_SetElems_empty[simp]: "inv_SetElems f {}"
unfolding inv_SetElems_def by simp
lemma l_invSetElems_inv_True_True[simp]: "inv_SetElems inv_True r"
by (simp add: inv_SetElems_def)
text \<open> Added wrapped version of the definition so that we can translate
complex structured types (e.g. seq of seq of T, etc.). Param order matter
for partial instantiation (e.g. inv_VDMSet' (inv_VDMSet' inv_VDMNat) s).\<close>
definition
inv_VDMSet' :: "('a \<Rightarrow> \<bool>) \<Rightarrow> 'a VDMSet \<Rightarrow> \<bool>"
where
[intro!]: "inv_VDMSet' einv s \<equiv> inv_VDMSet s \<and> inv_SetElems einv s"
definition
inv_VDMSet1' :: "('a \<Rightarrow> \<bool>) \<Rightarrow> 'a VDMSet1 \<Rightarrow> \<bool>"
where
[intro!]: "inv_VDMSet1' einv s \<equiv> inv_VDMSet1 s \<and> inv_SetElems einv s"
lemmas inv_VDMSet'_defs = inv_VDMSet'_def inv_VDMSet_def inv_SetElems_def
lemmas inv_VDMSet1'_defs = inv_VDMSet1'_def inv_VDMSet1_defs inv_SetElems_def
definition
vdm_card :: "'a VDMSet \<Rightarrow> VDMNat"
where
"vdm_card s \<equiv> (if inv_VDMSet s then int (card s) else undefined)"
definition
pre_vdm_card :: "'a VDMSet \<Rightarrow> \<bool>"
where
[intro!]: "pre_vdm_card s \<equiv> inv_VDMSet s"
definition
post_vdm_card :: "'a VDMSet \<Rightarrow> VDMNat \<Rightarrow> \<bool>"
where
[intro!]: "post_vdm_card s RESULT \<equiv> pre_vdm_card s \<longrightarrow> inv_VDMNat RESULT"
lemmas vdm_card_defs = vdm_card_def inv_VDMSet_def
lemma "vdm_card {0,1,(2::int)} = 3"
unfolding vdm_card_def inv_VDMSet_def by simp
lemma l_vdm_card_finite[simp]: "finite s \<Longrightarrow> vdm_card s = int (card s)"
unfolding vdm_card_defs by simp
lemma l_vdm_card_range[simp]: "x \<le> y \<Longrightarrow> vdm_card {x .. y} = y - x + 1"
unfolding vdm_card_defs by simp
lemma l_vdm_card_positive[simp]:
"finite s \<Longrightarrow> 0 \<le> vdm_card s"
by simp
lemma l_vdm_card_VDMNat[simp]:
"finite s \<Longrightarrow> inv_VDMNat (vdm_card s)"
by (simp add: inv_VDMSet_def inv_VDMNat_def)
lemma l_vdm_card_non_negative[simp]:
"finite s \<Longrightarrow> s \<noteq> {} \<Longrightarrow> 0 < vdm_card s"
by (simp add: card_gt_0_iff)
theorem PO_feas_vdm_card:
"pre_vdm_card s \<Longrightarrow> post_vdm_card s (vdm_card s)"
by (simp add: inv_VDMNat_def inv_VDMSet_def post_vdm_card_def pre_vdm_card_def)
lemma l_vdm_card_isa_card[simp]:
"finite s \<Longrightarrow> card s \<le> i \<Longrightarrow> vdm_card s \<le> i"
by simp
lemma l_isa_card_inter_bound:
"finite T \<Longrightarrow> card T \<le> i \<Longrightarrow> card (S \<inter> T) \<le> i"
thm card_mono inf_le2 le_trans card_seteq Int_commute nat_le_linear
by (meson card_mono inf_le2 le_trans)
lemma l_vdm_card_inter_bound:
"finite T \<Longrightarrow> vdm_card T \<le> i \<Longrightarrow> vdm_card (S \<inter> T) \<le> i"
proof -
assume a1: "vdm_card T \<le> i"
assume a2: "finite T"
have f3: "\<forall>A Aa. ((card (A::'a set) \<le> card (Aa::'a set) \<or> \<not> vdm_card A \<le> vdm_card Aa) \<or> infinite A) \<or> infinite Aa"
by (metis (full_types) l_vdm_card_finite of_nat_le_iff)
{ assume "T \<inter> S \<noteq> T"
then have "vdm_card (T \<inter> S) \<noteq> vdm_card T \<and> T \<inter> S \<noteq> T \<or> vdm_card (T \<inter> S) \<le> i"
using a1 by presburger
then have "vdm_card (T \<inter> S) \<le> i"
using f3 a2 a1 by (meson card_seteq dual_order.trans inf_le1 infinite_super verit_la_generic) }
then show ?thesis
using a1 by (metis (no_types) Int_commute)
qed
text \<open> @TODO power set \<close>
(*****************************************************************)
section \<open> Sequences \<close>
type_synonym 'a VDMSeq = "'a list"
type_synonym 'a VDMSeq1 = "'a list"
definition
inv_VDMSeq1 :: "'a VDMSeq1 \<Rightarrow> \<bool>"
where
[intro!]: "inv_VDMSeq1 s \<equiv> s \<noteq> []"
text\<open> Sequences may have invariants within their inner type. \<close>
definition
inv_SeqElems :: "('a \<Rightarrow> \<bool>) \<Rightarrow> 'a VDMSeq \<Rightarrow> \<bool>"
where
[intro!]: "inv_SeqElems einv s \<equiv> list_all einv s"
definition
inv_SeqElems0 :: "('a \<Rightarrow> \<bool>) \<Rightarrow> 'a VDMSeq \<Rightarrow> \<bool>"
where
"inv_SeqElems0 einv s \<equiv> \<forall> e \<in> (set s) . einv e"
text \<open> Isabelle's list @{term hd} and @{term tl} functions have the
same name as VDM. Nevertheless, their results is defined for empty lists.
We need to rule them out.
\<close>
definition
inv_VDMSeq' :: "('a \<Rightarrow> \<bool>) \<Rightarrow> 'a VDMSeq \<Rightarrow> \<bool>"
where
[intro!]: "inv_VDMSeq' einv s \<equiv> inv_SeqElems einv s"
definition
inv_VDMSeq1' :: "('a \<Rightarrow> \<bool>) \<Rightarrow> 'a VDMSeq1 \<Rightarrow> \<bool>"
where
[intro!]: "inv_VDMSeq1' einv s \<equiv> inv_VDMSeq' einv s \<and> inv_VDMSeq1 s"
lemmas inv_VDMSeq'_defs = inv_VDMSeq'_def inv_SeqElems_def
lemmas inv_VDMSeq1'_defs = inv_VDMSeq1'_def inv_VDMSeq'_defs inv_VDMSeq1_def
(*****************************************************************)
subsection \<open> Sequence operators specification \<close>
definition
len :: "'a VDMSeq \<Rightarrow> VDMNat"
where
[intro!]: "len l \<equiv> int (length l)"
definition
post_len :: "'a VDMSeq \<Rightarrow> VDMNat \<Rightarrow> \<bool>"
where
"post_len s R \<equiv> inv_VDMNat(R)"
definition
elems :: "'a VDMSeq \<Rightarrow> 'a VDMSet"
where
[intro!]: "elems l \<equiv> set l"
text \<open> Be careful with representation differences
VDM lists are 1-based, whereas Isabelle list
are 0-based. This function returns {0,1,2}
for sequence [A, B, C] instead of {1,2,3} \<close>
definition
inds0 :: "'a VDMSeq \<Rightarrow> VDMNat set"
where
"inds0 l \<equiv> {0 ..< len l}"
value "inds0 [A, B, C]"
(* indexes are 0, 1, 2; VDM would give 1, 2, 3 *)
definition
inds :: "'a VDMSeq \<Rightarrow> VDMNat1 set"
where
[intro!]: "inds l \<equiv> {1 .. len l}"
definition
post_inds :: "'a VDMSeq \<Rightarrow> VDMNat1 set \<Rightarrow> \<bool>"
where
"post_inds l R \<equiv> (length l) = (card R)"
definition
inds_as_nat :: "'a VDMSeq \<Rightarrow> \<nat> set"
where
"inds_as_nat l \<equiv> {1 .. nat (len l)}"
text \<open> @{term applyList} plays with @{typ "'a option"} type instead of @{term undefined}. \<close>
definition
applyList :: "'a VDMSeq \<Rightarrow> \<nat> \<Rightarrow> 'a option"
where
"applyList l n \<equiv> (if (n > 0 \<and> int n \<le> len l) then
Some(l ! (n - (1::nat)))
else
None)"
text \<open> @{term applyVDMSeq} sticks with @{term undefined}. \<close>
definition
applyVDMSeq :: "'a VDMSeq \<Rightarrow> VDMNat1 \<Rightarrow> 'a" (infixl "$" 100)
where
"applyVDMSeq l n \<equiv> (if (inv_VDMNat1 n \<and> n \<le> len l) then
(l ! nat (n - 1))
else
undefined)"
(* TODO: fold these three into proper one *)
definition
applyVDMSubseq :: "'a VDMSeq \<Rightarrow> VDMNat1 \<Rightarrow> VDMNat1 \<Rightarrow> 'a VDMSeq" ("(1_ {_$$_})")
where
"applyVDMSubseq l i j \<equiv> (if (inv_VDMNat1 i \<and> inv_VDMNat1 j) then
nths l {nat i.. nat j}
else
undefined
)"
value "nths [1,2,(3::nat)] {2..3}"
value "[1,2,3::nat] {2$$3}"
definition
applyVDMSubseq' :: "'a VDMSeq \<Rightarrow> VDMNat1 \<Rightarrow> VDMNat1 \<Rightarrow> 'a VDMSeq" ("_ $$ (1{_.._})") where
"s$${l..u} \<equiv> if inv_VDMNat1 l \<and> inv_VDMNat1 u \<and> (l \<le> u) then
nths s {(nat l)-1..(nat u)-1}
else
[]"
\<comment> \<open>Thanks to Tom Hayle for this generalised version\<close>
definition
extendedSubSeq :: "'a VDMSeq \<Rightarrow> VDMNat1 VDMSet \<Rightarrow> 'a VDMSeq"
where
"extendedSubSeq xs s \<equiv> nths xs {x::nat | x . x+1 \<in> s }"
(*lemma "s$${l..u} = subSeq s {l..u}" *)
(* negatives give funny result, as nat -4 = 0 and nat -1 = 0! *)
value "nths [A,B,C,D] {(nat (-1))..(nat (-4))}"
value "nths [A,B,C,D] {(nat (-4))..(nat (-1))}"
value "[A,B,C,D]$${-4..-1}"
value "[A,B,C,D]$${-1..-4}"
value "[A,B,C,D,E]$${4..1}"
value "[A,B,C,D,E]$${1..5}" (* 5-1+1*)
value "[A,B,C,D,E]$${2..5}" (* 5-2+1*)
value "[A,B,C,D,E]$${1..3}"
value "[A,B,C,D,E]$${0..2}"
value "[A,B,C,D,E]$${-1..2}"
value "[A,B,C,D,E]$${-10..20}"
value "[A,B,C,D,E]$${2..-1}"
value "[A,B,C,D,E]$${2..2}"
value "[A,B,C,D,E]$${0..1}"
value "len ([A,B,C,D,E]$${2..2})"
value "len ([A]$${2..2})"
value "card {(2::int)..2}"
value "[A,B,C,D,E]$${0..0}"
find_theorems "card {_.._}"
lemma l_vdmsubseq_empty[simp]:
"[] $$ {l..u} = []" unfolding applyVDMSubseq'_def by simp
lemma l_vdmsubseq_beyond[simp]:
"l > u \<Longrightarrow> s $$ {l..u} = []" unfolding applyVDMSubseq'_def by simp
\<comment> \<open> The nat conversion makes int a nat, but the subtration of Suc 0 makes it int again! \<close>
lemma l_vdmsubseq_len_l0: "{i. i < length s \<and> nat l - Suc 0 \<le> i \<and> i \<le> nat u - Suc 0} =
{nat l - Suc 0..nat u - Suc 0} \<inter> {0..<(length s)}"
by (safe;simp)
lemma l_vdmsubseq_len_l1: "
{nat l - Suc (0::\<nat>)..nat u - Suc (0::\<nat>)} \<inter> {0::\<nat>..<length s} =
{int (nat l - Suc (0::\<nat>))..int (nat u - Suc (0::\<nat>))} \<inter> {0::\<int>..<int (length s)}"
by simp
lemma l_vdmsubseq_len[simp]:
"len (s $$ {(l::int)..u}) = (if inv_VDMNat1 l \<and> inv_VDMNat1 u \<and> (l \<le> u) then card ({(nat l) - Suc 0..(nat u) - Suc 0} \<inter> {0..<(len s)}) else 0)"
unfolding applyVDMSubseq'_def len_def min_def inv_VDMNat1_def
apply (simp add: length_nths l_vdmsubseq_len_l0)
apply (safe;simp) using[[show_types]]
apply (insert l_vdmsubseq_len_l1[of l u s])
apply (simp add: l_isa_card_inter_bound)
find_theorems "card (_ \<inter> _)"
oops
lemma "l \<le> u \<Longrightarrow> s \<noteq> [] \<Longrightarrow> len (s $$ {l..u}) = (if l \<le> 1 then min 0 (min u (len s)) else u - l + 1)"
unfolding applyVDMSubseq_def inv_VDMNat1_def len_def min_def
apply (simp add: length_nths;safe) nitpick
apply (subgoal_tac "{i. i < length s \<and> i \<le> nat u - Suc 0} = {0..u-1}")
apply (subgoal_tac "card {0..u - 1} = u", simp)
oops
lemma l_vdmsubseq_len[simp]:
"len (s $$ {l..u}) = (if (s = [] \<or> l > u \<or> l \<le> 0 \<or> u \<le> 0) then 0 else u - l + 1)"
unfolding applyVDMSubseq'_def inv_VDMNat1_def len_def
apply (simp)
apply (subgoal_tac "{i. i < length s \<and> nat l - Suc 0 \<le> i \<and> i \<le> nat u - Suc 0} = {nat l - Suc 0..nat u - Suc 0+1}")
apply simp_all
nitpick
oops
(*
s(i,...,j) =
1.........(len s)
i.......j = j - i + 1
i....j = j - i + 1
-1..0..1
i............j = j - 1 + 1
i.......j = j - 1 + 1
i...............j = len s - 1 + 1
i..j = j - i + 1 = 0
= (min j (len s) - max 1 i) + 1
"len (s $$ {l..u}) = (min 0 (max 1 ((min u (len s)) - (min l 1) + 1)))"
*)
lemma l1: "{i. i < length s \<and> nat l - Suc 0 \<le> i \<and> i \<le> nat u - Suc 0} =
(if l < u then {(nat l) - 1..(nat u) - 1} else {})"
apply (safe) nitpick oops
lemma l_vdmsubseq_len[simp]:
"len (s $$ {l..u}) = max 0 ((min (nat u) (len s) - max 1 (nat l)) + 1)"
unfolding applyVDMSubseq'_def inv_VDMNat1_def len_def
apply (simp add: length_nths;safe)
defer
apply simp
apply (simp ) (*add: l1) *)
nitpick oops
lemma l_vdmsubseq_ext_eq:
"inv_VDMNat1 l \<Longrightarrow> inv_VDMNat1 u \<Longrightarrow> s $$ {l..u} = (extendedSubSeq s {l..u})"
unfolding extendedSubSeq_def applyVDMSubseq'_def inv_VDMNat1_def
apply (simp;safe)
apply (subgoal_tac "{nat l - Suc 0..nat u - Suc 0} = {x. l \<le> int x + 1 \<and> int x + 1 \<le> u}")
apply (erule subst; simp)
apply (safe;simp)
apply linarith+
apply (subgoal_tac "{x. l \<le> int x + 1 \<and> int x + 1 \<le> u} = {}")
apply (erule ssubst,simp)
by auto
text \<open> VDM \verb'l1 ++ l2' is just @{term "l1 @ l2"} \<close>
thm append_def
lemmas applyVDMSeq_defs = applyVDMSeq_def inv_VDMNat1_def len_def
definition
pre_applyVDMSeq :: "'a VDMSeq \<Rightarrow> VDMNat1 \<Rightarrow> \<bool>"
where
"pre_applyVDMSeq xs i \<equiv> inv_VDMNat1 i \<and> i \<le> len xs" (*\<and> i \<in> inds xs?*)
definition
post_applyVDMSeq :: "'a VDMSeq \<Rightarrow> VDMNat1 \<Rightarrow> 'a \<Rightarrow> \<bool>"
where
"post_applyVDMSeq xs i R \<equiv> pre_applyVDMSeq xs i \<longrightarrow> R = xs $ i"
theorem PO_applyVDMSeq_fsb:
"\<forall> xs i . pre_applyVDMSeq xs i \<longrightarrow> post_applyVDMSeq xs i (xs$i)"
unfolding post_applyVDMSeq_def pre_applyVDMSeq_def by simp
definition
pre_applyVDMSubseq :: "'a VDMSeq \<Rightarrow> VDMNat1 \<Rightarrow> VDMNat1 \<Rightarrow> \<bool>"
where
"pre_applyVDMSubseq xs l u \<equiv> inv_VDMNat1 l \<and> inv_VDMNat1 u \<and> l \<le> u"
definition
post_applyVDMSubseq :: "'a VDMSeq \<Rightarrow> VDMNat1 \<Rightarrow> VDMNat1 \<Rightarrow> 'a VDMSeq \<Rightarrow> \<bool>"
where
"post_applyVDMSubseq xs l u R \<equiv> R = (if pre_applyVDMSubseq xs l u then (xs$${l..u}) else [])"
theorem PO_applyVDMSubseq_fsb:
"\<forall> xs i . pre_applyVDMSubseq xs l u \<longrightarrow> post_applyVDMSubseq xs l u (xs$${l..u})"
unfolding post_applyVDMSubseq_def pre_applyVDMSubseq_def by simp
definition
post_append :: "'a VDMSeq \<Rightarrow> 'a VDMSeq \<Rightarrow> 'a VDMSeq \<Rightarrow> \<bool>"
where
"post_append s t r \<equiv> r = s @ t"
lemmas VDMSeq_defs = elems_def inds_def applyVDMSeq_defs
lemma l_applyVDMSeq_inds[simp]:
"pre_applyVDMSeq xs i = (i \<in> inds xs)"
unfolding pre_applyVDMSeq_def inv_VDMNat1_def len_def inds_def
by auto
text \<open> Isabelle @{term hd} and @{term tl} is the same as VDM \<close>
definition
pre_hd :: "'a VDMSeq \<Rightarrow> \<bool>"
where
"pre_hd s \<equiv> s \<noteq> []"
definition
post_hd :: "'a VDMSeq \<Rightarrow> 'a \<Rightarrow> \<bool>"
where
"post_hd s RESULT \<equiv> pre_hd s \<longrightarrow> (RESULT \<in> elems s \<or> RESULT = s$1)"
definition
pre_tl :: "'a VDMSeq \<Rightarrow> \<bool>"
where
"pre_tl s \<equiv> s \<noteq> []"
definition
post_tl :: "'a VDMSeq \<Rightarrow> 'a VDMSeq \<Rightarrow> \<bool>"
where
"post_tl s RESULT \<equiv> pre_tl s \<longrightarrow> elems RESULT \<subseteq> elems s"
definition
vdm_reverse :: "'a VDMSeq \<Rightarrow> 'a VDMSeq"
where
[intro!]: "vdm_reverse xs \<equiv> rev xs"
definition
post_vdm_reverse :: "'a VDMSeq \<Rightarrow> 'a VDMSeq \<Rightarrow> \<bool>"
where
"post_vdm_reverse xs R \<equiv> elems xs = elems R"
definition
conc :: "'a VDMSeq VDMSeq \<Rightarrow> 'a VDMSeq"
where
[intro!]: "conc xs \<equiv> concat xs"
definition
vdmtake :: "VDMNat \<Rightarrow> 'a VDMSeq \<Rightarrow> 'a VDMSeq"
where
"vdmtake n s \<equiv> (if inv_VDMNat n then take (nat n) s else [])"
definition
post_vdmtake :: "VDMNat \<Rightarrow> 'a VDMSeq \<Rightarrow> 'a VDMSeq \<Rightarrow> \<bool>"
where
"post_vdmtake n s RESULT \<equiv>
len RESULT = min n (len s)
\<and> elems RESULT \<subseteq> elems s"
definition
seq_prefix :: "'a VDMSeq \<Rightarrow> 'a VDMSeq \<Rightarrow> \<bool>" ("(_/ \<sqsubseteq> _)" [51, 51] 50)
where
"s \<sqsubseteq> t \<equiv> (s = t) \<or> (s = []) \<or> (len s \<le> len t \<and> (\<exists> i \<in> inds t . s = vdmtake i t))"
definition
post_seq_prefix :: "'a VDMSeq \<Rightarrow> 'a VDMSeq \<Rightarrow> \<bool> \<Rightarrow> \<bool>"
where
"post_seq_prefix s t RESULT \<equiv>
RESULT \<longrightarrow> (elems s \<subseteq> elems t \<and> len s \<le> len t)"
(*****************************************************************)
subsection \<open> Sequence operators lemmas \<close>
lemma l_inv_VDMSet_finite[simp]:
"finite xs \<Longrightarrow> inv_VDMSet xs"
unfolding inv_VDMSet_def by simp
lemma l_inv_SeqElems_alt: "inv_SeqElems einv s = inv_SeqElems0 einv s"
by (simp add: elems_def inv_SeqElems0_def inv_SeqElems_def list_all_iff)
lemma l_inv_SeqElems_empty[simp]: "inv_SeqElems f []"
by (simp add: inv_SeqElems_def)
lemma l_inv_SeqElems_Cons: "(inv_SeqElems f (a#s)) = (f a \<and> (inv_SeqElems f s))"
unfolding inv_SeqElems_def elems_def by auto
lemma l_inv_SeqElems_Cons': "f a \<Longrightarrow> inv_SeqElems f s \<Longrightarrow> inv_SeqElems f (a#s)"
by (simp add: l_inv_SeqElems_Cons)
lemma l_inv_SeqElems_append: "(inv_SeqElems f (xs @ [x])) = (f x \<and> (inv_SeqElems f xs))"
unfolding inv_SeqElems_def elems_def by auto
lemma l_inv_SeqElems_append': "f x \<Longrightarrow> inv_SeqElems f xs \<Longrightarrow> inv_SeqElems f (xs @ [x])"
by (simp add: l_inv_SeqElems_append)
lemma l_invSeqElems_inv_True_True[simp]: "inv_SeqElems inv_True r"
by (simp add: inv_SeqElems_def rev_induct)
lemma l_len_nat1[simp]: "s \<noteq> [] \<Longrightarrow> 0 < len s"
unfolding len_def by simp
lemma l_len_append_single[simp]: "len(xs @ [x]) = 1 + len xs"
apply (induct xs)
apply simp_all
unfolding len_def by simp_all
lemma l_len_empty[simp]: "len [] = 0" unfolding len_def by simp
lemma l_len_cons[simp]: "len(x # xs) = 1 + len xs"
apply (induct xs)
unfolding len_def by simp_all
lemma l_elems_append[simp]: "elems (xs @ [x]) = insert x (elems xs)"
unfolding elems_def by simp
lemma l_elems_cons[simp]: "elems (x # xs) = insert x (elems xs)"
unfolding elems_def by simp
lemma l_elems_empty[simp]: "elems [] = {}" unfolding elems_def by simp
lemma l_inj_seq: "distinct s \<Longrightarrow> nat (len s) = card (elems s)"
by (induct s) (simp_all add: elems_def len_def) (* add: l_elems_cons *)
lemma l_elems_finite[simp]:
"finite (elems l)"
by (simp add: elems_def)
lemma l_inds_append[simp]: "inds (xs @ [x]) = insert (len (xs @ [x])) (inds xs)"
unfolding inds_def
by (simp add: atLeastAtMostPlus1_int_conv len_def)
lemma l_inds_cons[simp]: "inds (x # xs) = {1 .. (len xs + 1)}"
unfolding inds_def len_def
by simp
lemma l_len_within_inds[simp]: "s \<noteq> [] \<Longrightarrow> len s \<in> inds s"
unfolding len_def inds_def
apply (induct s)
by simp_all
lemma l_inds_empty[simp]: "inds [] = {}"
unfolding inds_def len_def by simp
lemma l_inds_as_nat_append: "inds_as_nat (xs @ [x]) = insert (length (xs @ [x])) (inds_as_nat xs)"
unfolding inds_as_nat_def len_def by auto
lemma l_applyVDM_len1: "s $ (len s + 1) = undefined"
unfolding applyVDMSeq_def len_def by simp
lemma l_applyVDM_zero[simp]: "s $ 0 = undefined"
unfolding applyVDMSeq_defs by simp
(* this goal is too specific; they are useful in specific situations *)
lemma l_applyVDM1: "(x # xs) $ 1 = x"
by (simp add: applyVDMSeq_defs)
lemma l_applyVDM2: "(x # xs) $ 2 = xs $ 1"
by (simp add: applyVDMSeq_defs)
(* generalise previous failure for a better matching goal: trade $ for ! *)
lemma l_applyVDM1_gen[simp]: "s \<noteq> [] \<Longrightarrow> s $ 1 = s ! 0"
by (induct s, simp_all add: applyVDMSeq_defs)
lemma l_applyVDMSeq_i[simp]: "i \<in> inds s \<Longrightarrow> s $ i = s ! nat(i - 1)"
unfolding applyVDMSeq_defs inds_def by simp
lemma l_applyVDM_cons_gt1empty: "i > 1 \<Longrightarrow> (x # []) $ i = undefined"
by (simp add: applyVDMSeq_defs)
lemma l_applyVDM_cons_gt1: "len xs > 0 \<Longrightarrow> i > 1 \<Longrightarrow> (x # xs) $ i = xs $ (i - 1)"
apply (simp add: applyVDMSeq_defs) (* again too complex; try avoiding the trade $ for ! again *)
apply (intro impI)
apply (induct xs rule: length_induct)
apply simp_all
by (smt nat_1 nat_diff_distrib)
lemma l_applyVDMSeq_defined: "s \<noteq> [] \<Longrightarrow> inv_SeqElems (\<lambda> x . x \<noteq> undefined) s \<Longrightarrow> s $ (len s) \<noteq> undefined"
unfolding applyVDMSeq_defs
apply (simp) (* add: l_len_nat1)*)
apply (cases "nat (int (length s) - 1)")
apply simp_all
apply (cases s)
apply simp_all
unfolding inv_SeqElems_def
apply simp
by (simp add: list_all_length)
(*thm ssubst[OF l_inv_SeqElems_alt]
apply (subst ssubst[OF l_inv_SeqElems_alt])*)
lemma l_applyVDMSeq_append_last:
"(ms @ [m]) $ (len (ms @ [m])) = m"
unfolding applyVDMSeq_defs
by (simp)
lemma l_applyVDMSeq_cons_last:
"(m # ms) $ (len (m # ms)) = (if ms = [] then m else ms $ (len ms))"
apply (simp)
unfolding applyVDMSeq_defs
by (simp add: nat_diff_distrib')
lemma l_inds_in_set:
"i \<in> inds s \<Longrightarrow> s$i \<in> set s"
unfolding inds_def applyVDMSeq_def inv_VDMNat1_def len_def
apply (simp,safe)
by (simp)
lemma l_inv_SeqElems_inds_inv_T:
"inv_SeqElems inv_T s \<Longrightarrow> i \<in> inds s \<Longrightarrow> inv_T (s$i)"
apply (simp add: l_inv_SeqElems_alt)
unfolding inv_SeqElems0_def
apply (erule_tac x="s$i" in ballE)
apply simp
using l_inds_in_set by blast
lemma l_inv_SeqElems_all:
"inv_SeqElems inv_T s = (\<forall> i \<in> inds s . inv_T (s$i))"
unfolding inv_SeqElems_def
apply (simp add: list_all_length)
unfolding inds_def len_def
apply (safe,simp, safe)
apply (erule_tac x="nat(i-1)" in allE)
apply simp
apply (erule_tac x="int n + 1" in ballE)
by simp+
lemma l_inds_upto: "(i \<in> inds s) = (i \<in> {1..len s})"
by (simp add: inds_def)
lemma l_vdmtake_take[simp]: "vdmtake n s = take n s"
unfolding vdmtake_def inv_VDMNat_def
by simp
lemma l_seq_prefix_append_empty[simp]: "s \<sqsubseteq> s @ []"
unfolding seq_prefix_def
by simp
lemma l_seq_prefix_id[simp]: "s \<sqsubseteq> s"
unfolding seq_prefix_def
by simp
lemma l_len_append[simp]: "len s \<le> len (s @ t)"
apply (induct t)
by (simp_all add: len_def)
lemma l_vdmtake_len[simp]: "vdmtake (len s) s = s"
unfolding vdmtake_def len_def inv_VDMNat_def by simp
lemma l_vdmtake_len_append[simp]: "vdmtake (len s) (s @ t) = s"
unfolding vdmtake_def len_def inv_VDMNat_def by simp
lemma l_vdmtake_append[simp]: "vdmtake (len s + len t) (s @ t) = (s @ t)"
apply (induct t)
apply simp_all
unfolding vdmtake_def len_def inv_VDMNat_def
by simp
value "vdmtake (1 + len [a,b,c]) ([a,b,c] @ [a])"
lemma l_seq_prefix_append[simp]: "s \<sqsubseteq> s @ t"
unfolding seq_prefix_def
apply (induct t)
apply simp+
apply (elim disjE)
apply (simp_all)
apply (cases s, simp)
apply (rule disjI2, rule disjI2)
apply (rule_tac x="len s" in bexI)
apply (metis l_vdmtake_len_append)
using l_len_within_inds apply blast
by (metis (full_types) atLeastAtMost_iff inds_def l_len_append l_len_within_inds l_vdmtake_len_append)
lemma l_elems_of_inds_of_nth:
"1 < j \<Longrightarrow> j < int (length s) \<Longrightarrow> s ! nat (j - 1) \<in> set s"
by simp
lemma l_elems_inds_found:
"x \<in> set s \<Longrightarrow> (\<exists> i . i < length s \<and> s ! i = x)"
(*apply (simp only: ListMem_iff[symmetric])*)
apply (induct s)
apply simp_all
apply safe
by auto
lemma l_elems_of_inds:
"(x \<in> elems s) = (\<exists> j . j \<in> inds s \<and> (s$j) = x)"
unfolding elems_def inds_def
apply (rule iffI)
unfolding applyVDMSeq_def len_def
apply (frule l_elems_inds_found)
apply safe
apply (rule_tac x="int(i)+1" in exI)
apply (simp add: inv_VDMNat1_def)
using inv_VDMNat1_def by fastforce
(*****************************************************************)
section \<open> Optional inner type invariant check \<close>
definition
inv_Option :: "('a \<Rightarrow> \<bool>) \<Rightarrow> 'a option \<Rightarrow> \<bool>"
where
[intro!]: "inv_Option inv_type v \<equiv> v \<noteq> None \<longrightarrow> inv_type (the v)"
lemma l_inv_option_Some[simp]:
"inv_Option inv_type (Some x) = inv_type x"
unfolding inv_Option_def
by simp
lemma l_inv_option_None[simp]:
"inv_Option inv_type None"
unfolding inv_Option_def
by simp
(*****************************************************************)
section \<open> Maps \<close>
(*type_synonym ('a, 'b) "VDMMap" = "'a \<rightharpoonup> 'b" (infixr "\<rightharpoonup>" 0)*)
text \<open>
In Isabelle, VDM maps can be declared by the @{text "\<rightharpoonup>"} operator (not @{text "\<Rightarrow>"})
(i.e. type 'right' and you will see the arrow on dropdown menu).
It represents a function to an optional result as follows:
VDM : map X to Y
Isabelle: @{text "X \<rightharpoonup> Y"}
which is the same as
Isabelle: @{text "X \<Rightarrow> Y option"}
where an optional type is like using nil in VDM (map X to [Y]).
That is, Isabele makes the map total by mapping everything outside
the domain to None (or nil). In Isabelle
@{text "datatype 'a option = None | Some 'a"}
Some VDM functions for map domain/range restriction and filtering. You use some like <: and :>.
The use of some of these functions is one reason that makes the use of maps a bit more demanding,
but it works fine. Given these are new definitions, "apply auto" won't finish proofs as Isabelle
needs to know more (lemmas) about the new operators. \<close>
definition
inv_Map :: "('a \<Rightarrow> \<bool>) \<Rightarrow> ('b \<Rightarrow> \<bool>) \<Rightarrow> ('a \<rightharpoonup> 'b) \<Rightarrow>\<bool>"
where
[intro!]:
"inv_Map inv_Dom inv_Rng m \<equiv>
inv_VDMSet' inv_Dom (dom m) \<and>
inv_VDMSet' inv_Rng (ran m)"
definition
inv_Map1 :: "('a \<Rightarrow> \<bool>) \<Rightarrow> ('b \<Rightarrow> \<bool>) \<Rightarrow> ('a \<rightharpoonup> 'b) \<Rightarrow> \<bool>"
where
[intro!]: "inv_Map1 inv_Dom inv_Ran m \<equiv>
inv_Map inv_Dom inv_Ran m \<and> m \<noteq> Map.empty"
(*vdm_card (dom m) > 0 \<and> is worst more complicated for nothing*)
definition
inv_Inmap :: "('a \<Rightarrow> \<bool>) \<Rightarrow> ('b \<Rightarrow> \<bool>) \<Rightarrow> ('a \<rightharpoonup> 'b) \<Rightarrow> \<bool>"
where
[intro!]: "inv_Inmap inv_Dom inv_Ran m \<equiv>
inv_Map inv_Dom inv_Ran m \<and> inj m"
lemmas inv_Map_defs = inv_Map_def inv_VDMSet'_defs
lemmas inv_Map1_defs = inv_Map1_def inv_Map_defs
lemmas inv_Inmap_defs = inv_Inmap_def inv_Map_defs inj_def
(* dom exists already *)
thm dom_def
lemma "inj m" unfolding inj_on_def apply simp oops
definition
rng :: "('a \<rightharpoonup> 'b) \<Rightarrow> 'b VDMSet"
where
[intro!]: "rng m \<equiv> ran m"
lemmas rng_defs = rng_def ran_def
definition
dagger :: "('a \<rightharpoonup> 'b) \<Rightarrow> ('a \<rightharpoonup> 'b) \<Rightarrow> ('a \<rightharpoonup> 'b)" (infixl "\<dagger>" 100)
where
[intro!]: "f \<dagger> g \<equiv> f ++ g"
definition
munion :: "('a \<rightharpoonup> 'b) \<Rightarrow> ('a \<rightharpoonup> 'b) \<Rightarrow> ('a \<rightharpoonup> 'b)" (infixl "\<union>m" 90)
where
[intro!]: "f \<union>m g \<equiv> (if dom f \<inter> dom g = {} then f \<dagger> g else undefined)"
definition
dom_restr :: "'a set \<Rightarrow> ('a \<rightharpoonup> 'b) \<Rightarrow> ('a \<rightharpoonup> 'b)" (infixr "\<triangleleft>" 110)
where
[intro!]: "s \<triangleleft> m \<equiv> m |` s"
(* same as VDM s <: m *)
definition
dom_antirestr :: "'a set \<Rightarrow> ('a \<rightharpoonup> 'b) \<Rightarrow> ('a \<rightharpoonup> 'b)" (infixr "-\<triangleleft>" 110)
where
[intro!]: "s -\<triangleleft> m \<equiv> (\<lambda>x. if x : s then None else m x)"
(* same as VDM s <-: m *)
definition
rng_restr :: "('a \<rightharpoonup> 'b) \<Rightarrow> 'b set \<Rightarrow> ('a \<rightharpoonup> 'b)" (infixl "\<triangleright>" 105)
where
[intro!]: "m \<triangleright> s \<equiv> (\<lambda>x . if (\<exists> y. m x = Some y \<and> y \<in> s) then m x else None)"
(* same as VDM m :> s *)
definition
rng_antirestr :: "('a \<rightharpoonup> 'b) \<Rightarrow> 'b set \<Rightarrow> ('a \<rightharpoonup> 'b)" (infixl "\<triangleright>-" 105)
where
[intro!]: "m \<triangleright>- s \<equiv> (\<lambda>x . if (\<exists> y. m x = Some y \<and> y \<in> s) then None else m x)"
(* same as VDM m :-> s *)
definition
vdm_merge :: "('a \<rightharpoonup> 'b) VDMSet \<Rightarrow> ('a \<rightharpoonup> 'b)"
where
"vdm_merge mm \<equiv> undefined" (*TODO: (\<lambda> x . x \<in> \<Union> { dom mmi | mmi \<in> mm )} )"*)
definition
vdm_inverse :: "('a \<rightharpoonup> 'b) \<Rightarrow> ('b \<rightharpoonup> 'a)"
where
"vdm_inverse m \<equiv> undefined" (*(\<lambda> x . if (x \<in> rng m) then (m x) else None)"*)
definition
map_subset :: "('a \<rightharpoonup> 'b) \<Rightarrow> ('a \<rightharpoonup> 'b) \<Rightarrow> ('b \<Rightarrow> 'b \<Rightarrow> \<bool>) \<Rightarrow> \<bool>" ("((_)/ \<subseteq>\<^sub>s (_)/, (_))" [0, 0, 50] 50)
where
"(m\<^sub>1 \<subseteq>\<^sub>s m\<^sub>2, subset_of) \<longleftrightarrow> (dom m\<^sub>1 \<subseteq> dom m\<^sub>2 \<and> (\<forall>a \<in> dom m\<^sub>1. subset_of (the(m\<^sub>1 a)) (the(m\<^sub>2 a))))"
text \<open> Map application is just function application, but the result is an optional type,
so it is up to the user to unpick the optional type with the @{term the} operator.
It means we shouldn't get to undefined,
rather than we are handling undefinedness. That's because the value
is comparable (see next lemma). In effect, if we ever reach undefined
it means we have some partial function application outside its domain
somewhere within any rewriting chain. As one cannot reason about this
value, it can be seen as a flag for an error to be avoided.\<close>
definition
map_comp :: "('b \<rightharpoonup> 'c) \<Rightarrow> ('a \<rightharpoonup> 'b) \<Rightarrow> ('a \<rightharpoonup> 'c)" (infixl "\<circ>m" 55)
where
"f \<circ>m g \<equiv> (\<lambda> x . if x \<in> dom g then f (the (g x)) else None)"
(*****************************************************************)
subsection \<open>Map comprehension\<close>
text \<open>Isabelle maps are similar to VDMs, but with some significant differences worth observing.
If the filtering is not unique (i.e. result is not a function), then the @{term "THE x . P x"} expression
might lead to (undefined) unexpected results. In Isabelle maps, repetitions is equivalent to overriding,
so that @{lemma "[1::nat \<mapsto> 2, 1 \<mapsto> 3] 1 = Some 3" by simp}.
In various VDMToolkit definitions, we default to @{term undefined} in case where the situation is out of hand,
hence, proofs will fail, and users will know that @{term undefined} being reached means some earlier problem has
occurred.
\<close>
text \<open>Type bound map comprehension cannot filter for type invariants, hence won't have @{term undefined} results.
This corresponds to the VDMSL expression
%
\begin{vdmssl}
{ domexpr(d) |-> rngexpr(d, r) | d:S, r: T & P(d, r) }
\end{vdmsl}
%
where the maplet expression can be just variables or functions over the domain/range input(s).
VDM also issues a proof obligation for type bound maps (i.e. avoid it please!) to ensure the resulting map is finite.
Concretely, the example below generates the corresponding proof obligation:
%
\begin{vdmsl}
ex: () -> map nat to nat
ex() == { x+y |-> 10 | x: nat, y in set {4,5,6} & x < 10 };
exists finmap1: map nat to (map (nat1) to (nat1)) &
forall x:nat, y in set {4, 5, 6} & (x < 10) =>
exists findex2 in set dom finmap1 &
finmap1(findex2) = {(x + y) |-> 10}
\end{vdmsl}
\<close>
definition
mapCompTypeBound :: "('a \<Rightarrow> \<bool>) \<Rightarrow> ('b \<Rightarrow> \<bool>) \<Rightarrow> ('a \<Rightarrow> 'b \<Rightarrow> 'a) \<Rightarrow> ('a \<Rightarrow> 'b \<Rightarrow> 'b) \<Rightarrow> ('a \<Rightarrow> 'b \<Rightarrow> \<bool>) \<Rightarrow> ('a \<rightharpoonup> 'b)"
where
"mapCompTypeBound inv_S inv_T domexpr rngexpr pred \<equiv>
(\<lambda> dummy::'a .
if (\<exists> d r . inv_S d \<and> inv_T r \<and> dummy = domexpr d r \<and> r = rngexpr d r \<and> pred d r) then
Some (THE r . inv_T r \<and> (\<exists> d . dummy = domexpr d r \<and> r = rngexpr d r \<and> pred d r))
else
None
)"
value "[1::nat \<mapsto> 2::nat, 3 \<mapsto> 3] 10"
text \<open>Set bound map comprehension can filter bound set for their elements invariants.
This corresponds to the VDMSL expression
%
\begin{vdmssl}
{ domexpr(d, r) |-> rngexpr(d, r) | d in set S, r in set T & pred(d, r) }
domexpr: S * T -> S
rngexpr: S * T -> T
pred : S * T -> bool
\end{vdmsl}
%
If the types of domexpr or rngexpr are different from S or T then this will not work!
%
If the filtering is not unique (i.e. result is not a function), then the @{term "THE x . P x"} expression
might lead to (undefined) unexpected results. In Isabelle maps, repetitions is equivalent to overriding,
so that @{lemma "[1::nat \<mapsto> 2::nat, 1 \<mapsto> 3] 1 = Some 3" by simp}.
\<close>
definition
mapCompSetBound :: "'a set \<Rightarrow> 'b set \<Rightarrow> ('a \<Rightarrow> \<bool>) \<Rightarrow> ('b \<Rightarrow> \<bool>) \<Rightarrow> ('a \<Rightarrow> 'b \<Rightarrow> 'a) \<Rightarrow> ('a \<Rightarrow> 'b \<Rightarrow> 'b) \<Rightarrow> ('a \<Rightarrow> 'b \<Rightarrow> \<bool>) \<Rightarrow> ('a \<rightharpoonup> 'b)"
where
"mapCompSetBound S T inv_S inv_T domexpr rngexpr pred \<equiv>
(\<lambda> dummy::'a .
\<comment> \<open>In fact you have to check the inv_Type of domexpr and rngexpr!!!\<close>
if inv_VDMSet' inv_S S \<and> inv_VDMSet' inv_T T then
if (\<exists> r \<in> T . \<exists> d \<in> S . dummy = domexpr d r \<and> r = rngexpr d r \<and> pred d r) then
Some (THE r . r \<in> T \<and> inv_T r \<and> (\<exists> d \<in> S . dummy = domexpr d r \<and> r = rngexpr d r \<and> pred d r))
else
\<comment> \<open>This is for map application outside its domain error, VDMJ 4061 \<close>
None
else
\<comment> \<open>This is for type invariant violation errors, VDMJ ???? @NB?\<close>
undefined
)"
text \<open>Identity functions to be used for the dom/rng expression functions for the case they are variables.\<close>
definition
domid :: "'a \<Rightarrow> 'b \<Rightarrow> 'a"
where
"domid \<equiv> (\<lambda> d . (\<lambda> r . d))"
definition
rngid :: "'a \<Rightarrow> 'b \<Rightarrow> 'b"
where
"rngid \<equiv> (\<lambda> d . id)"
text \<open>Constant function to be used for the dom expression function for the case they are constants.\<close>
definition
domcnst :: "'a \<Rightarrow> 'a \<Rightarrow> 'b \<Rightarrow> 'a"
where
"domcnst v \<equiv> (\<lambda> d . (\<lambda> r . v))"
text \<open>Constant function to be used for the rng expression function for the case they are constants.\<close>
definition
rngcnst :: "'b \<Rightarrow> 'a \<Rightarrow> 'b \<Rightarrow> 'b"
where
"rngcnst v \<equiv> (\<lambda> d . (\<lambda> r . v))"
definition
truecnst :: "'a \<Rightarrow> 'b \<Rightarrow> \<bool>"
where
"truecnst \<equiv> (\<lambda> d . inv_True)"
definition
predcnst :: "\<bool> \<Rightarrow> 'a \<Rightarrow> 'b \<Rightarrow> \<bool>"
where
"predcnst p \<equiv> (\<lambda> d . (\<lambda> r . p))"
lemma domidI[simp]: "domid d r = d"
by (simp add: domid_def)
lemma rngidI[simp]: "rngid d r = r"
by (simp add: rngid_def)
lemma domcnstI[simp]: "domcnst v d r = v"
by (simp add: domcnst_def)
lemma rngcnstI[simp]: "rngcnst v d r = v"
by (simp add: rngcnst_def)
lemma predcnstI[simp]: "predcnst v d r = v"
by (simp add: predcnst_def)
lemma truecnstI[simp]: "truecnst d r"
by (simp add: truecnst_def)
lemmas maplet_defs = domid_def rngid_def rngcnst_def id_def truecnst_def inv_True_def
lemmas mapCompSetBound_defs = mapCompSetBound_def inv_VDMSet'_def inv_VDMSet_def maplet_defs rng_defs
lemmas mapCompTypeBound_defs = mapCompTypeBound_def maplet_defs rng_defs
(*========================================================================*)
section \<open> Lambda types \<close>
(*========================================================================*)
text \<open>Lambda definitions entail an implicit satisfiability proof obligation check
as part of its type invariant checks.
Because Isabelle lambdas are always curried, we need to also take this into
account. For example, "lambda x: nat, y: nat1 & x+y" will effectively become
@{term "(\<lambda> x . \<lambda> y . x + y)"}. Thus callers to this invariant check must
account for such currying when using more than one parameter in lambdas.
(i.e. call this as @{term "inv_Lambda inv_Dom (inv_Lambda inv_Dom' inv_Ran) l"}
assuming the right invariant checks for the type of x and y and the result
are used.
\<close>
definition
inv_Lambda :: "('a \<Rightarrow> \<bool>) \<Rightarrow> ('b \<Rightarrow> \<bool>) \<Rightarrow> ('a \<Rightarrow> 'b) \<Rightarrow> \<bool>"
where
"inv_Lambda inv_Dom inv_Ran l \<equiv> (\<forall> d . inv_Dom d \<longrightarrow> inv_Ran (l d))"
definition
inv_Lambda' :: "('a \<Rightarrow> \<bool>) \<Rightarrow> ('b \<Rightarrow> \<bool>) \<Rightarrow> ('a \<Rightarrow> 'b) \<Rightarrow> 'a \<Rightarrow> \<bool>"
where
"inv_Lambda' inv_Dom inv_Ran l d \<equiv> inv_Dom d \<longrightarrow> inv_Ran (l d)"
(*========================================================================*)
section \<open> Is test and type coercions \<close>
(*========================================================================*)
subsection \<open> Basic type coercions \<close>
definition
is_VDMRealWhole :: "VDMReal \<Rightarrow> \<bool>"
where
"is_VDMRealWhole r \<equiv> r \<ge> 1 \<and> (r - real_of_int (vdm_narrow_real r)) = 0"
definition
vdmint_of_real :: "VDMReal \<rightharpoonup> VDMInt"
where
"vdmint_of_real r \<equiv> if is_VDMRealWhole r then Some (vdm_narrow_real r) else None"
definition
is_VDMRatWhole :: "VDMRat \<Rightarrow> \<bool>"
where
"is_VDMRatWhole r \<equiv> r \<ge> 1 \<and> (r - rat_of_int (vdm_narrow_real r)) = 0"
definition
vdmint_of_rat :: "VDMRat \<rightharpoonup> VDMInt"
where
"vdmint_of_rat r \<equiv> if is_VDMRatWhole r then Some (vdm_narrow_real r) else None"
subsection \<open> Structured type coercions \<close>
type_synonym ('a, 'b) VDMTypeCoercion = "'a \<rightharpoonup> 'b"
text \<open>A total VDM type coercion is one where every element in the type space of
interest is convertible under the given type coercion
(e.g., set of real = {1,2,3} into set of nat is total; whereas
set of real = {0.5,2,3} into set of nat is not total given 0.5 is not nat).
\<close>
definition
total_coercion :: "'a VDMSet \<Rightarrow> ('a, 'b) VDMTypeCoercion \<Rightarrow> \<bool>"
where
"total_coercion space conv \<equiv> (\<forall> i \<in> space . conv i \<noteq> None)"
text \<open>To convert a VDM set s of type 'a into type 'b (e.g., set of real into set of nat),
it must be possible to convert every element of s under given type coercion
\<close>
definition
vdmset_of_t :: "('a, 'b) VDMTypeCoercion \<Rightarrow> ('a VDMSet, 'b VDMSet) VDMTypeCoercion"
where
"vdmset_of_t conv \<equiv>
(\<lambda> x . if total_coercion x conv then
Some { the(conv i) | i . i \<in> x \<and> conv i \<noteq> None }
else
None)"
text \<open>To convert a VDM seq s of type 'a into type 'b (e.g., seq of real into seq of nat),
it must be possible to convert every element of s under given type coercion
\<close>
definition
vdmseq_of_t :: "('a, 'b) VDMTypeCoercion \<Rightarrow> ('a VDMSeq, 'b VDMSeq) VDMTypeCoercion"
where
"vdmseq_of_t conv \<equiv>
(\<lambda> x . if total_coercion (elems x) conv then
Some [ the(conv i) . i \<leftarrow> x, conv i \<noteq> None ]
else
None)"
(* map coercion will be tricky because result d2 depends on dconv call, which needs x's d1!
definition
vdmmap_of_dr :: "('d1, 'd2) VDMTypeCoercion \<Rightarrow> ('r1, 'r2) VDMTypeCoercion \<Rightarrow> ('d1 \<rightharpoonup> 'r1, 'd2 \<rightharpoonup> 'r2) VDMTypeCoercion"
where
"vdmmap_of_dr dconv rconv \<equiv>
\<comment> \<open>x is a 'd1 \<rightharpoonup> 'r1, d is a 'd2 \<Rightarrow> 'r2; where dconv/rconv is applied throughout and succeeded\<close>
(\<lambda> x . if total_coercion (dom x) dconv \<and> total_coercion (rng x) rconv then
Some (\<lambda> d . if (\<exists> xd . d = dconv xd) then None else None)
else
None)"
*)
subsection \<open> Is tests \<close>
text \<open>"Successful" is expr test is simply a call to the test expression invariant\<close>
definition
isTest :: "'a \<Rightarrow> ('a \<Rightarrow> \<bool>) \<Rightarrow> \<bool>"
where
[intro!]: "isTest x inv_X \<equiv> inv_X x"
lemma l_isTestI[simp]: "isTest x inv_X = inv_X x"
by (simp add: isTest_def)
text \<open>Possibly failing is expr tests up to given type coercion \<close>
definition
isTest' :: "'a \<Rightarrow> ('a, 'b) VDMTypeCoercion \<Rightarrow> ('b \<Rightarrow> \<bool>) \<Rightarrow> \<bool>"
where
[intro!]: "isTest' x conv inv_X \<equiv>
(case conv x of
None \<Rightarrow> False
| Some x \<Rightarrow> inv_X x)"
(*========================================================================*)
section \<open> Set operators lemmas \<close>
(*========================================================================*)
lemma l_psubset_insert: "x \<notin> S \<Longrightarrow> S \<subset> insert x S"
by blast
lemma l_right_diff_left_dist: "S - (T - U) = (S - T) \<union> (S \<inter> U)"
by (metis Diff_Compl Diff_Int diff_eq)
thm Diff_Compl
Diff_Int
diff_eq
lemma l_diff_un_not_equal: "R \<subset> T \<Longrightarrow> T \<subseteq> S \<Longrightarrow> S - T \<union> R \<noteq> S"
by auto
(*========================================================================*)
section \<open> Map operators lemmas \<close>
(*========================================================================*)
lemma l_map_non_empty_has_elem_conv:
"g \<noteq> Map.empty \<longleftrightarrow> (\<exists> x . x \<in> dom g)"
by (metis domIff)
lemma l_map_non_empty_dom_conv:
"g \<noteq> Map.empty \<longleftrightarrow> dom g \<noteq> {}"
by (metis dom_eq_empty_conv)
lemma l_map_non_empty_ran_conv:
"g \<noteq> Map.empty \<longleftrightarrow> ran g \<noteq> {}"
by (metis empty_iff equals0I
fun_upd_triv option.exhaust
ranI ran_restrictD restrict_complement_singleton_eq)
(* +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ *)
subsubsection \<open> Domain restriction weakening lemmas [EXPERT] \<close>
(* +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ *)
(* Lemma: dom restriction set inter equiv [ZEVES-LEMMA] *)
lemma l_dom_r_iff: "dom(S \<triangleleft> g) = S \<inter> dom g"
by (metis Int_commute dom_restr_def dom_restrict)
(* Lemma: dom restriction set inter equiv [ZEVES-LEMMA] *)
lemma l_dom_r_subset: "(S \<triangleleft> g) \<subseteq>\<^sub>m g"
by (metis Int_iff dom_restr_def l_dom_r_iff map_le_def restrict_in)
(* Lemma: dom restriction set inter equiv [ZEVES-LEMMA] *)
lemma l_dom_r_accum: "S \<triangleleft> (T \<triangleleft> g) = (S \<inter> T) \<triangleleft> g"
by (metis Int_commute dom_restr_def restrict_restrict)
(* Lemma: dom restriction set inter equiv [ZEVES-LEMMA] *)
lemma l_dom_r_nothing: "{} \<triangleleft> f = Map.empty"
by (metis dom_restr_def restrict_map_to_empty)
(* Lemma: dom restriction set inter equiv [ZEVES-LEMMA] *)
lemma l_dom_r_empty: "S \<triangleleft> Map.empty = Map.empty"
by (metis dom_restr_def restrict_map_empty)
lemma l_dres_absorb: "UNIV \<triangleleft> m = m"
by (simp add: dom_restr_def map_le_antisym map_le_def)
lemma l_dom_r_nothing_empty: "S = {} \<Longrightarrow> S \<triangleleft> f = Map.empty"
by (metis l_dom_r_nothing)
(* FD: in specific dom subsumes application (over Some+None) [ZEVES-LEMMA] *)
(*
lemmX f_in_dom_r_apply_elem:
"l \<in> dom f \<and> l \<in> S \<Longrightarrow> ((S \<triangleleft> f) l) = (f l)"
unfolding dom_restr_def
by (cases "l\<in>S", auto)
*)
(* IJW: Simplified as doesn't need the l:dom f case *)
lemma f_in_dom_r_apply_elem: " x \<in> S \<Longrightarrow> ((S \<triangleleft> f) x) = (f x)"
by (metis dom_restr_def restrict_in)
lemma f_in_dom_r_apply_the_elem: "x \<in> dom f \<Longrightarrow> x \<in> S \<Longrightarrow> ((S \<triangleleft> f) x) = Some(the(f x))"
by (metis domIff f_in_dom_r_apply_elem option.collapse)
(* IJW: TODO: classify; rename. *)
lemma l_dom_r_disjoint_weakening: "A \<inter> B = {} \<Longrightarrow> dom(A \<triangleleft> f) \<inter> dom(B \<triangleleft> f) = {}"
by (metis dom_restr_def dom_restrict inf_bot_right inf_left_commute restrict_restrict)
(* IJW: TODO: classify; rename - refactor out for l_dom_r_iff? *)
lemma l_dom_r_subseteq: "S \<subseteq> dom f \<Longrightarrow> dom (S \<triangleleft> f) = S" unfolding dom_restr_def
by (metis Int_absorb1 dom_restrict)
(* IJW: TODO: classift; rename - refactor out for l_dom_r_subset? *)
lemma l_dom_r_dom_subseteq: "(dom ( S \<triangleleft> f)) \<subseteq> dom f"
unfolding dom_restr_def by auto
lemma l_the_dom_r: "x \<in> dom f \<Longrightarrow> x \<in> S \<Longrightarrow> the (( S \<triangleleft> f) x) = the (f x)"
by (metis f_in_dom_r_apply_elem)
lemma l_in_dom_dom_r: "x \<in> dom (S \<triangleleft> f) \<Longrightarrow> x \<in> S"
by (metis Int_iff l_dom_r_iff)
lemma l_dom_r_singleton: "x \<in> dom f \<Longrightarrow> ({x} \<triangleleft> f) = [x \<mapsto> the (f x)]"
unfolding dom_restr_def
by auto
lemma singleton_map_dom:
assumes "dom f = {x}" shows "f = [x \<mapsto> the (f x)]"
proof -
from assms obtain y where "f = [x \<mapsto> y]"
by (metis dom_eq_singleton_conv)
then have "y = the (f x)"
by (metis fun_upd_same option.sel)
thus ?thesis by (metis `f = [x \<mapsto> y]`)
qed
lemma l_relimg_ran_subset:
"ran (S \<triangleleft> m) \<subseteq> ran m"
by (metis (full_types) dom_restr_def ranI ran_restrictD subsetI)
lemma f_in_relimg_ran:
"y \<in> ran (S \<triangleleft> m) \<Longrightarrow> y \<in> ran m"
by (meson l_relimg_ran_subset subsetCE)
(* IJW: An experiment - not sure which are the best rules to choose! *)
lemmas restr_simps = l_dom_r_iff l_dom_r_accum l_dom_r_nothing l_dom_r_empty
f_in_dom_r_apply_elem l_dom_r_disjoint_weakening l_dom_r_subseteq
l_dom_r_dom_subseteq
(* +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ *)
subsubsection \<open> Domain anti restriction weakening lemmas [EXPERT] \<close>
(* +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ *)
(* FD: dom elem subsume dom ar *)
lemma f_in_dom_ar_subsume: "l \<in> dom (S -\<triangleleft> f) \<Longrightarrow> l \<in> dom f"
unfolding dom_antirestr_def
by (cases "l\<in>S", auto)
(* FD: in specific dom_ar cannot be what's filtered *)
lemma f_in_dom_ar_notelem: "l \<in> dom ({r} -\<triangleleft> f) \<Longrightarrow> l \<noteq> r"
unfolding dom_antirestr_def
by auto
(* FD: in specific dom_ar subsumes application (over Some) *)
lemma f_in_dom_ar_the_subsume:
"l \<in> dom (S -\<triangleleft> f) \<Longrightarrow> the ((S -\<triangleleft> f) l) = the (f l)"
unfolding dom_antirestr_def
by (cases "l\<in>S", auto)
(* FD: in specific dom_ar subsumes application (over Some+None) *)
lemma f_in_dom_ar_apply_subsume:
"l \<in> dom (S -\<triangleleft> f) \<Longrightarrow> ((S -\<triangleleft> f) l) = (f l)"
unfolding dom_antirestr_def
by (cases "l\<in>S", auto)
(* FD: in specific dom subsumes application (over Some+None) [ZEVES-LEMMA] *)
(*
lemmX f_in_dom_ar_apply_not_elem:
"l \<in> dom f \<and> l \<notin> S \<Longrightarrow> ((S -\<triangleleft> f) l) = (f l)"
unfolding dom_antirestr_def
by (cases "l\<in>S", auto)
*)
(* IJW: TODO: I had a more general lemma: *)
lemma f_in_dom_ar_apply_not_elem: "l \<notin> S \<Longrightarrow> (S -\<triangleleft> f) l = f l"
by (metis dom_antirestr_def)
(* FD: dom_ar subset dom [ZEVES-LEMMA] *)
lemma f_dom_ar_subset_dom:
"dom(S -\<triangleleft> f) \<subseteq> dom f"
unfolding dom_antirestr_def dom_def
by auto
(* Lemma: dom_ar as set different [ZEVES-LEMMA] *)
lemma l_dom_dom_ar:
"dom(S -\<triangleleft> f) = dom f - S"
unfolding dom_antirestr_def
by (smt Collect_cong domIff dom_def set_diff_eq)
(* Lemma: dom_ar accumulates to left [ZEVES-LEMMA] *)
lemma l_dom_ar_accum:
"S -\<triangleleft> (T -\<triangleleft> f) = (S \<union> T) -\<triangleleft> f"
unfolding dom_antirestr_def
by auto
(* Lemma: dom_ar subsumption [ZEVES-LEMMA] *)
lemma l_dom_ar_nothing:
"S \<inter> dom f = {} \<Longrightarrow> S -\<triangleleft> f = f"
unfolding dom_antirestr_def
apply (simp add: fun_eq_iff)
by (metis disjoint_iff_not_equal domIff)
(* NOTE: After finding fun_eq_iff, there is also map_le_antisym for maps!*)
(* Lemma: dom_ar nothing LHS [ZEVES-LEMMA] *)
lemma l_dom_ar_empty_lhs:
"{} -\<triangleleft> f = f"
by (metis Int_empty_left l_dom_ar_nothing)
(* Lemma: dom_ar nothing RHS [ZEVES-LEMMA] *)
lemma l_dom_ar_empty_rhs:
"S -\<triangleleft> Map.empty = Map.empty"
by (metis Int_empty_right dom_empty l_dom_ar_nothing)
(* Lemma: dom_ar all RHS is empty [ZEVES-LEMMA] *)
lemma l_dom_ar_everything:
"dom f \<subseteq> S \<Longrightarrow> S -\<triangleleft> f = Map.empty"
by (metis domIff dom_antirestr_def in_mono)
(* Lemma: dom_ar submap [ZEVES-LEMMA] *)
lemma l_map_dom_ar_subset: "S -\<triangleleft> f \<subseteq>\<^sub>m f"
by (metis domIff dom_antirestr_def map_le_def)
(* Lemma: dom_ar nothing RHS is f [ZEVES-LEMMA] *)
lemma l_dom_ar_none: "{} -\<triangleleft> f = f"
unfolding dom_antirestr_def
by (simp add: fun_eq_iff)
(* Lemma: dom_ar something RHS isn't f [ZEVES-LEMMA] *)
lemma l_map_dom_ar_neq: "S \<subseteq> dom f \<Longrightarrow> S \<noteq> {} \<Longrightarrow> S -\<triangleleft> f \<noteq> f"
apply (subst fun_eq_iff)
apply (insert ex_in_conv[of S])
apply simp
apply (erule exE)
unfolding dom_antirestr_def
apply (rule exI)
apply simp
apply (intro impI conjI)
apply simp_all
by (metis domIff set_mp)
lemma l_dom_rres_same_map_weaken:
"S = T \<Longrightarrow> (S -\<triangleleft> f) = (T -\<triangleleft> f)" by simp
(* IJW: TODO classify; rename *)
lemma l_dom_ar_not_in_dom:
assumes *: "x \<notin> dom f"
shows "x \<notin> dom (s -\<triangleleft> f)"
by (metis * domIff dom_antirestr_def)
(* IJW: TODO: classify; rename *)
lemma l_dom_ar_not_in_dom2: "x \<in> F \<Longrightarrow> x \<notin> dom (F -\<triangleleft> f)"
by (metis domIff dom_antirestr_def)
lemma l_dom_ar_notin_dom_or: "x \<notin> dom f \<or> x \<in> S \<Longrightarrow> x \<notin> dom (S -\<triangleleft> f)"
by (metis Diff_iff l_dom_dom_ar)
(* IJW: TODO: classify - shows conditions for being in antri restr dom *)
lemma l_in_dom_ar: "x \<notin> F \<Longrightarrow> x \<in> dom f \<Longrightarrow> x \<in> dom (F -\<triangleleft> f)"
by (metis f_in_dom_ar_apply_not_elem domIff)
lemma l_Some_in_dom:
"f x = Some y \<Longrightarrow> x \<in> dom f" by auto
(* IJW: TODO: classify; fix proof; rename; decide whether needed?! *)
lemma l_dom_ar_insert: "((insert x F) -\<triangleleft> f) = {x} -\<triangleleft> (F-\<triangleleft> f)"
proof
fix xa
show "(insert x F -\<triangleleft> f) xa = ({x} -\<triangleleft> F -\<triangleleft> f) xa"
apply (cases "x= xa")
apply (simp add: dom_antirestr_def)
apply (cases "xa\<in>F")
apply (simp add: dom_antirestr_def)
apply (subst f_in_dom_ar_apply_not_elem)
apply simp
apply (subst f_in_dom_ar_apply_not_elem)
apply simp
apply (subst f_in_dom_ar_apply_not_elem)
apply simp
apply simp
done
qed
(* IJW: TODO: classify; rename?; subsume by l_dom_ar_accum? *)
(* IJW: Think it may also be unused? *)
lemma l_dom_ar_absorb_singleton: "x \<in> F \<Longrightarrow> ({x} -\<triangleleft> F -\<triangleleft> f) =(F -\<triangleleft> f)"
by (metis l_dom_ar_insert insert_absorb)
(* IJW: TODO: rename; classify; generalise? *)
lemma l_dom_ar_disjoint_weakening:
"dom f \<inter> Y = {} \<Longrightarrow> dom (X -\<triangleleft> f) \<inter> Y = {}"
by (metis Diff_Int_distrib2 empty_Diff l_dom_dom_ar)
(* IJW: TODO: not used? *)
lemma l_dom_ar_singletons_comm: "{x}-\<triangleleft> {y} -\<triangleleft> f = {y}-\<triangleleft> {x} -\<triangleleft> f"
by (metis l_dom_ar_insert insert_commute)
lemma l_dom_r_ar_set_minus:
"S \<triangleleft> (T -\<triangleleft> m) = (S - T) \<triangleleft> m"
find_theorems "_ = _" name:HOL name:"fun"
apply (rule ext)
unfolding dom_restr_def dom_antirestr_def restrict_map_def
by simp
lemmas antirestr_simps = f_in_dom_ar_subsume f_in_dom_ar_notelem f_in_dom_ar_the_subsume
f_in_dom_ar_apply_subsume f_in_dom_ar_apply_not_elem f_dom_ar_subset_dom
l_dom_dom_ar l_dom_ar_accum l_dom_ar_nothing l_dom_ar_empty_lhs l_dom_ar_empty_rhs
l_dom_ar_everything l_dom_ar_none l_dom_ar_not_in_dom l_dom_ar_not_in_dom2
l_dom_ar_notin_dom_or l_in_dom_ar l_dom_ar_disjoint_weakening
(* +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ *)
subsubsection \<open> Map override weakening lemmas [EXPERT] \<close>
(* +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ *)
(* Lemma: dagger associates [ZEVES-LEMMA] *)
lemma l_dagger_assoc:
"f \<dagger> (g \<dagger> h) = (f \<dagger> g) \<dagger> h"
by (metis dagger_def map_add_assoc)
thm ext option.split fun_eq_iff (* EXT! Just found function extensionality! *)
(* Lemma: dagger application [ZEVES-LEMMA] *)
lemma l_dagger_apply:
"(f \<dagger> g) x = (if x \<in> dom g then (g x) else (f x))"
unfolding dagger_def
by (metis (full_types) map_add_dom_app_simps(1) map_add_dom_app_simps(3))
(* Lemma: dagger domain [ZEVES-LEMMA] *)
lemma l_dagger_dom:
"dom(f \<dagger> g) = dom f \<union> dom g"
unfolding dagger_def
by (metis dom_map_add sup_commute)
(* Lemma: dagger absorption LHS *)
lemma l_dagger_lhs_absorb:
"dom f \<subseteq> dom g \<Longrightarrow> f \<dagger> g = g"
apply (rule ext)
by(metis dagger_def l_dagger_apply map_add_dom_app_simps(2) set_rev_mp)
lemma l_dagger_lhs_absorb_ALT_PROOF:
"dom f \<subseteq> dom g \<Longrightarrow> f \<dagger> g = g"
apply (rule ext)
apply (simp add: l_dagger_apply)
apply (rule impI)
find_theorems "_ \<notin> _ \<Longrightarrow> _" name:Set
apply (drule contra_subsetD)
unfolding dom_def
by (simp_all) (* NOTE: foun nice lemmas to be used: contra_subsetD*)
(* Lemma: dagger empty absorption lhs [ZEVES-LEMMA] *)
lemma l_dagger_empty_lhs:
"Map.empty \<dagger> f = f"
by (metis dagger_def empty_map_add)
(* Lemma: dagger empty absorption rhs [ZEVES-LEMMA] *)
lemma l_dagger_empty_rhs:
"f \<dagger> Map.empty = f"
by (metis dagger_def map_add_empty)
(* Interesting observation here:
A few times I have spotted this. I then to get these
lemmas and use them in Isar; whereas Leo, you don't seem
to use this variety. Probably because the automation takes
care of the reasoning?...
*)
(* IJW: TODO: Rename; classify *)
lemma dagger_notemptyL:
"f \<noteq> Map.empty \<Longrightarrow> f \<dagger> g \<noteq> Map.empty" by (metis dagger_def map_add_None)
lemma dagger_notemptyR:
"g \<noteq> Map.empty \<Longrightarrow> f \<dagger> g \<noteq> Map.empty" by (metis dagger_def map_add_None)
(* Lemma: dagger associates with dom_ar [ZEVES-LEMMA] *)
(* IJW: It's not really an assoc prop? Well, kinda, but also kinda distrib *)
lemma l_dagger_dom_ar_assoc:
"S \<inter> dom g = {} \<Longrightarrow> (S -\<triangleleft> f) \<dagger> g = S -\<triangleleft> (f \<dagger> g)"
apply (simp add: fun_eq_iff)
apply (simp add: l_dagger_apply)
apply (intro allI impI conjI)
unfolding dom_antirestr_def
apply (simp_all add: l_dagger_apply)
by (metis dom_antirestr_def l_dom_ar_nothing)
thm map_add_comm
(* NOTE: This should be provable, if only I know how to do map extensionality :-(. Now I do! fun_eq_iff!
Thm map_add_comm is quite nice lemma two, and could be used here, yet l_dagger_apply seems nicer.
*)
lemma l_dagger_not_empty:
"g \<noteq> Map.empty \<Longrightarrow> f \<dagger> g \<noteq> Map.empty"
by (metis dagger_def map_add_None)
(* IJW TODO: Following 6 need renamed; classified? LEO: how do you do such choices? *)
lemma in_dagger_domL:
"x \<in> dom f \<Longrightarrow> x \<in> dom(f \<dagger> g)"
by (metis dagger_def domIff map_add_None)
lemma in_dagger_domR:
"x \<in> dom g \<Longrightarrow> x \<in> dom(f \<dagger> g)"
by (metis dagger_def domIff map_add_None)
lemma the_dagger_dom_right:
assumes "x \<in> dom g"
shows "the ((f \<dagger> g) x) = the (g x)"
by (metis assms dagger_def map_add_dom_app_simps(1))
lemma the_dagger_dom_left:
assumes "x \<notin> dom g"
shows "the ((f \<dagger> g) x) = the (f x)"
by (metis assms dagger_def map_add_dom_app_simps(3))
lemma the_dagger_mapupd_dom: "x\<noteq>y \<Longrightarrow> (f \<dagger> [y \<mapsto> z]) x = f x "
by (metis dagger_def fun_upd_other map_add_empty map_add_upd)
lemma dagger_upd_dist: "f \<dagger> fa(e \<mapsto> r) = (f \<dagger> fa)(e \<mapsto> r)" by (metis dagger_def map_add_upd)
(* IJW TOD): rename *)
lemma antirestr_then_dagger_notin: "x \<notin> dom f \<Longrightarrow> {x} -\<triangleleft> (f \<dagger> [x \<mapsto> y]) = f"
proof
fix z
assume "x \<notin> dom f"
show "({x} -\<triangleleft> (f \<dagger> [x \<mapsto> y])) z = f z"
by (metis `x \<notin> dom f` domIff dom_antirestr_def fun_upd_other insertI1 l_dagger_apply singleton_iff)
qed
lemma antirestr_then_dagger: "r\<in> dom f \<Longrightarrow> {r} -\<triangleleft> f \<dagger> [r \<mapsto> the (f r)] = f"
proof
fix x
assume *: "r\<in>dom f"
show "({r} -\<triangleleft> f \<dagger> [r \<mapsto> the (f r)]) x = f x"
proof (subst l_dagger_apply,simp,intro conjI impI)
assume "x=r" then show "Some (the (f r)) = f r" using * by auto
next
assume "x \<noteq>r" then show " ({r} -\<triangleleft> f) x = f x" by (metis f_in_dom_ar_apply_not_elem singleton_iff)
qed
qed
(* IJW: TODO: rename; classify *)
lemma dagger_notin_right: "x \<notin> dom g \<Longrightarrow> (f \<dagger> g) x = f x"
by (metis l_dagger_apply)
(* IJW: TODO: rename; classify *)
lemma dagger_notin_left: "x \<notin> dom f \<Longrightarrow> (f \<dagger> g) x = g x"
by (metis dagger_def map_add_dom_app_simps(2))
lemma l_dagger_commute: "dom f \<inter> dom g = {} \<Longrightarrow>f \<dagger> g = g \<dagger> f"
unfolding dagger_def
apply (rule map_add_comm)
by simp
lemmas dagger_simps = l_dagger_assoc l_dagger_apply l_dagger_dom l_dagger_lhs_absorb
l_dagger_empty_lhs l_dagger_empty_rhs dagger_notemptyL dagger_notemptyR l_dagger_not_empty
in_dagger_domL in_dagger_domR the_dagger_dom_right the_dagger_dom_left the_dagger_mapupd_dom
dagger_upd_dist antirestr_then_dagger_notin antirestr_then_dagger dagger_notin_right
dagger_notin_left
(* +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ *)
subsubsection \<open> Map update weakening lemmas [EXPERT] \<close>
(* +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ *)
text \<open> without the condition nitpick finds counter example \<close>
lemma l_inmapupd_dom_iff:
"l \<noteq> x \<Longrightarrow> (l \<in> dom (f(x \<mapsto> y))) = (l \<in> dom f)"
by (metis (full_types) domIff fun_upd_apply)
lemma l_inmapupd_dom:
"l \<in> dom f \<Longrightarrow> l \<in> dom (f(x \<mapsto> y))"
by (metis dom_fun_upd insert_iff option.distinct(1))
lemma l_dom_extend:
"x \<notin> dom f \<Longrightarrow> dom (f1(x \<mapsto> y)) = dom f1 \<union> {x}"
by simp
lemma l_updatedom_eq:
"x=l \<Longrightarrow> the ((f(x \<mapsto> the (f x) - s)) l) = the (f l) - s"
by auto
lemma l_updatedom_neq:
"x\<noteq>l \<Longrightarrow> the ((f(x \<mapsto> the (f x) - s)) l) = the (f l)"
by auto
\<comment> \<open>A helper lemma to have map update when domain is updated\<close>
lemma l_insertUpdSpec_aux: "dom f = insert x F \<Longrightarrow> (f0 = (f |` F)) \<Longrightarrow> f = f0 (x \<mapsto> the (f x))"
proof auto
assume insert: "dom f = insert x F"
then have "x \<in> dom f" by simp
then show "f = (f |` F)(x \<mapsto> the (f x))" using insert
unfolding dom_def
apply simp
apply (rule ext)
apply auto
done
qed
lemma l_the_map_union_right: "x \<in> dom g \<Longrightarrow>dom f \<inter> dom g = {} \<Longrightarrow> the ((f \<union>m g) x) = the (g x)"
by (metis l_dagger_apply munion_def)
lemma l_the_map_union_left: "x \<in> dom f \<Longrightarrow>dom f \<inter> dom g = {} \<Longrightarrow> the ((f \<union>m g) x) = the (f x)"
by (metis l_dagger_apply l_dagger_commute munion_def)
lemma l_the_map_union: "dom f \<inter> dom g = {} \<Longrightarrow> the ((f \<union>m g) x) = (if x \<in> dom f then the (f x) else the (g x))"
by (metis l_dagger_apply l_dagger_commute munion_def)
lemmas upd_simps = l_inmapupd_dom_iff l_inmapupd_dom l_dom_extend
l_updatedom_eq l_updatedom_neq
(* +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ *)
subsubsection \<open> Map union (VDM-specific) weakening lemmas [EXPERT] \<close>
(* +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ *)
(* Weaken: munion point-wise update well-definedness condition *)
lemma k_munion_map_upd_wd:
"x \<notin> dom f \<Longrightarrow> dom f \<inter> dom [x\<mapsto> y] = {}"
by (metis Int_empty_left Int_insert_left dom_eq_singleton_conv inf_commute)
(* NOTE: munion updates are often over singleton sets. This weakening rule
states that's enough to show x is not in f to enable the application
of f \<union>m [x \<mapsto> y].
*)
(* Lemma: munion application *)
lemma l_munion_apply:
"dom f \<inter> dom g = {} \<Longrightarrow> (f \<union>m g) x = (if x \<in> dom g then (g x) else (f x))"
unfolding munion_def
by (simp add: l_dagger_apply)
(* Lemma: munion domain *)
lemma l_munion_dom:
"dom f \<inter> dom g = {} \<Longrightarrow> dom(f \<union>m g) = dom f \<union> dom g"
unfolding munion_def
by (simp add: l_dagger_dom)
lemma l_diff_union: "(A - B) \<union> C = (A \<union> C) - (B - C)"
by (metis Compl_Diff_eq Diff_eq Un_Int_distrib2)
lemma l_munion_ran: "dom f \<inter> dom g = {} \<Longrightarrow> ran(f \<union>m g) = ran f \<union> ran g"
apply (unfold munion_def)
apply simp
find_theorems "(_ \<dagger> _) = _"
(*apply (simp add: b_dagger_munion)*)
apply (intro set_eqI iffI)
unfolding ran_def
thm l_dagger_apply
apply (simp_all add: l_dagger_apply split_ifs)
apply metis
by (metis Int_iff all_not_in_conv domIff option.distinct(1))
(* Bridge: dagger defined through munion [ZEVES-LEMMA] *)
lemma b_dagger_munion_aux:
"dom(dom g -\<triangleleft> f) \<inter> dom g = {}"
apply (simp add: l_dom_dom_ar)
by (metis Diff_disjoint inf_commute)
lemma b_dagger_munion:
"(f \<dagger> g) = (dom g -\<triangleleft> f) \<union>m g"
find_theorems (300) "_ = (_::(_ \<Rightarrow> _))" -name:Predicate -name:Product -name:Quick -name:New -name:Record -name:Quotient
-name:Hilbert -name:Nitpick -name:Random -name:Transitive -name:Sum_Type -name:DSeq -name:Datatype -name:Enum
-name:Big -name:Code -name:Divides
thm fun_eq_iff[of "f \<dagger> g" "(dom g -\<triangleleft> f) \<union>m g"]
apply (simp add: fun_eq_iff)
apply (simp add: l_dagger_apply)
apply (cut_tac b_dagger_munion_aux[of g f]) (* TODO: How to make this more automatic? Iain, help? subgoal_tac! Try that. *)
apply (intro allI impI conjI)
apply (simp_all add: l_munion_apply)
unfolding dom_antirestr_def
by simp
lemma l_munion_assoc:
"dom f \<inter> dom g = {} \<Longrightarrow> dom g \<inter> dom h = {} \<Longrightarrow> (f \<union>m g) \<union>m h = f \<union>m (g \<union>m h)"
unfolding munion_def
apply (simp add: l_dagger_dom)
apply (intro conjI impI)
apply (metis l_dagger_assoc)
apply (simp_all add: disjoint_iff_not_equal)
apply (erule_tac [1-] bexE)
apply blast
apply blast
done
lemma l_munion_commute:
"dom f \<inter> dom g = {} \<Longrightarrow> f \<union>m g = g \<union>m f"
by (metis b_dagger_munion l_dagger_commute l_dom_ar_nothing munion_def)
lemma l_munion_subsume:
"x \<in> dom f \<Longrightarrow> the(f x) = y \<Longrightarrow> f = ({x} -\<triangleleft> f) \<union>m [x \<mapsto> y]"
apply (subst fun_eq_iff)
apply (intro allI)
apply (subgoal_tac "dom({x} -\<triangleleft> f) \<inter> dom [x \<mapsto> y] = {}")
apply (simp add: l_munion_apply)
apply (metis domD dom_antirestr_def singletonE option.sel)
by (metis Diff_disjoint Int_commute dom_eq_singleton_conv l_dom_dom_ar)
text_raw \<open> Perhaps add @{text "g \<subseteq>\<^sub>m f"} instead? \<close>
lemma l_munion_subsumeG:
"dom g \<subseteq> dom f \<Longrightarrow> \<forall>x \<in> dom g . f x = g x \<Longrightarrow> f = (dom g -\<triangleleft> f) \<union>m g"
unfolding munion_def
apply (subgoal_tac "dom (dom g -\<triangleleft> f) \<inter> dom g = {}")
apply simp
apply (subst fun_eq_iff)
apply (rule allI)
apply (simp add: l_dagger_apply)
apply (intro conjI impI)+
unfolding dom_antirestr_def
apply (simp)
apply (fold dom_antirestr_def)
by (metis Diff_disjoint inf_commute l_dom_dom_ar)
lemma l_munion_dom_ar_assoc:
"S \<subseteq> dom f \<Longrightarrow> dom f \<inter> dom g = {} \<Longrightarrow> (S -\<triangleleft> f) \<union>m g = S -\<triangleleft> (f \<union>m g)"
unfolding munion_def
apply (subgoal_tac "dom (S -\<triangleleft> f) \<inter> dom g = {}")
defer 1
apply (metis Diff_Int_distrib2 empty_Diff l_dom_dom_ar)
apply simp
apply (rule l_dagger_dom_ar_assoc)
by (metis equalityE inf_mono subset_empty)
lemma l_munion_empty_rhs:
"(f \<union>m Map.empty) = f"
unfolding munion_def
by (metis dom_empty inf_bot_right l_dagger_empty_rhs)
lemma l_munion_empty_lhs:
"(Map.empty \<union>m f) = f"
unfolding munion_def
by (metis dom_empty inf_bot_left l_dagger_empty_lhs)
lemma k_finite_munion:
"finite (dom f) \<Longrightarrow> finite(dom g) \<Longrightarrow> dom f \<inter> dom g = {} \<Longrightarrow> finite(dom(f \<union>m g))"
by (metis finite_Un l_munion_dom)
lemma l_munion_singleton_not_empty:
"x \<notin> dom f \<Longrightarrow> f \<union>m [x \<mapsto> y] \<noteq> Map.empty"
apply (cases "f = Map.empty")
apply (metis l_munion_empty_lhs map_upd_nonempty)
unfolding munion_def
apply simp
by (metis dagger_def map_add_None)
lemma l_munion_empty_iff:
"dom f \<inter> dom g = {} \<Longrightarrow> (f \<union>m g = Map.empty) \<longleftrightarrow> (f = Map.empty \<and> g = Map.empty)"
apply (rule iffI)
apply (simp only: dom_eq_empty_conv[symmetric] l_munion_dom)
apply (metis Un_empty)
by (simp add: l_munion_empty_lhs l_munion_empty_rhs)
lemma l_munion_dom_ar_singleton_subsume:
"x \<notin> dom f \<Longrightarrow> {x} -\<triangleleft> (f \<union>m [x \<mapsto> y]) = f"
apply (subst fun_eq_iff)
apply (rule allI)
unfolding dom_antirestr_def
by (auto simp: l_munion_apply)
(*
lemmX l_dom_ar_union:
"S -\<triangleleft> (f \<union>m g) = (S -\<triangleleft> f) \<union>m (S -\<triangleleft> g)"
apply (rule ext)
unfolding munion_def
apply (split split_if, intro conjI impI)+
apply (simp_all add: l_dagger_apply)
apply (intro conjI impI)
apply (insert f_dom_ar_subset_dom[of S f])
apply (insert f_dom_ar_subset_dom[of S g])
oops
*)
(* IJW: TODO: rename? *)
lemma l_munion_upd: "dom f \<inter> dom [x \<mapsto> y] = {} \<Longrightarrow> f \<union>m [x \<mapsto> y] = f(x \<mapsto>y)"
unfolding munion_def
apply simp
by (metis dagger_def map_add_empty map_add_upd)
(* IJW: TODO: Do I really need these?! *)
lemma munion_notemp_dagger: "dom f \<inter> dom g = {} \<Longrightarrow> f \<union>m g\<noteq>Map.empty \<Longrightarrow> f \<dagger> g \<noteq> Map.empty"
by (metis munion_def)
lemma dagger_notemp_munion: "dom f \<inter> dom g = {} \<Longrightarrow> f \<dagger> g\<noteq>Map.empty \<Longrightarrow> f \<union>m g \<noteq> Map.empty"
by (metis munion_def)
lemma munion_notempty_left: "dom f \<inter> dom g = {} \<Longrightarrow> f \<noteq> Map.empty \<Longrightarrow> f \<union>m g \<noteq> Map.empty"
by (metis dagger_notemp_munion dagger_notemptyL)
lemma munion_notempty_right: "dom f \<inter> dom g = {} \<Longrightarrow> g \<noteq> Map.empty \<Longrightarrow> f \<union>m g \<noteq> Map.empty"
by (metis dagger_notemp_munion dagger_notemptyR)
lemma unionm_in_dom_left: "x \<in> dom (f \<union>m g) \<Longrightarrow> (dom f \<inter> dom g) = {} \<Longrightarrow> x \<notin> dom g \<Longrightarrow> x \<in> dom f"
by (simp add: l_munion_dom)
lemma unionm_in_dom_right: "x \<in> dom (f \<union>m g) \<Longrightarrow> (dom f \<inter> dom g) = {} \<Longrightarrow> x \<notin> dom f \<Longrightarrow> x \<in> dom g"
by (simp add: l_munion_dom)
lemma unionm_notin_dom: "x \<notin> dom f \<Longrightarrow> x \<notin> dom g \<Longrightarrow> (dom f \<inter> dom g) = {} \<Longrightarrow> x \<notin> dom (f \<union>m g)"
by (metis unionm_in_dom_right)
lemmas munion_simps = k_munion_map_upd_wd l_munion_apply l_munion_dom b_dagger_munion
l_munion_subsume l_munion_subsumeG l_munion_dom_ar_assoc l_munion_empty_rhs
l_munion_empty_lhs k_finite_munion l_munion_upd munion_notemp_dagger
dagger_notemp_munion munion_notempty_left munion_notempty_right
lemmas vdm_simps = restr_simps antirestr_simps dagger_simps upd_simps munion_simps
(* +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ *)
subsubsection \<open> Map finiteness weakening lemmas [EXPERT] \<close>
(* +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ *)
\<comment> \<open>Need to have the lemma options, otherwise it fails somehow\<close>
lemma finite_map_upd_induct [case_names empty insert, induct set: finite]:
assumes fin: "finite (dom f)"
and empty: "P Map.empty"
and insert: "\<And>e r f. finite (dom f) \<Longrightarrow> e \<notin> dom f \<Longrightarrow> P f \<Longrightarrow> P (f(e \<mapsto> r))"
shows "P f" using fin
proof (induct "dom f" arbitrary: "f" rule:finite_induct) \<comment> \<open>arbitrary statement is a must in here, otherwise cannot prove it\<close>
case empty then have "dom f = {}" by simp \<comment> \<open>need to reverse to apply rules\<close>
then have "f = Map.empty" by simp
thus ?case by (simp add: assms(2))
next
case (insert x F)
\<comment> \<open>Show that update of the domain means an update of the map\<close>
assume domF: "insert x F = dom f" then have domFr: "dom f = insert x F" by simp
then obtain f0 where f0Def: "f0 = f |` F" by simp
with domF have domF0: "F = dom f0" by auto
with insert have "finite (dom f0)" and "x \<notin> dom f0" and "P f0" by simp_all
then have PFUpd: "P (f0(x \<mapsto> the (f x)))"
by (simp add: assms(3))
from domFr f0Def have "f = f0(x \<mapsto> the (f x))" by (auto intro: l_insertUpdSpec_aux)
with PFUpd show ?case by simp
qed
lemma finiteRan: "finite (dom f) \<Longrightarrow> finite (ran f)"
proof (induct rule:finite_map_upd_induct)
case empty thus ?case by simp
next
case (insert e r f) then have ranIns: "ran (f(e \<mapsto> r)) = insert r (ran f)" by auto
assume "finite (ran f)" then have "finite (insert r (ran f))" by (intro finite.insertI)
thus ?case apply (subst ranIns)
by simp
qed
(* IJW: TODO: classify; rename; relocate? *)
lemma l_dom_r_finite: "finite (dom f) \<Longrightarrow> finite (dom ( S \<triangleleft> f))"
apply (rule_tac B="dom f" in finite_subset)
apply (simp add: l_dom_r_dom_subseteq)
apply assumption
done
lemma dagger_finite: "finite (dom f) \<Longrightarrow> finite (dom g) \<Longrightarrow> finite (dom (f \<dagger> g))"
by (metis dagger_def dom_map_add finite_Un)
lemma finite_singleton: "finite (dom [a \<mapsto> b])"
by (metis dom_eq_singleton_conv finite.emptyI finite_insert)
lemma not_in_dom_ar: "finite (dom f) \<Longrightarrow> s \<inter> dom f = {} \<Longrightarrow> dom (s -\<triangleleft> f) = dom f"
apply (induct rule: finite_map_upd_induct)
apply (unfold dom_antirestr_def) apply simp
by (metis IntI domIff empty_iff)
(* LF: why go for induction ? *)
lemma not_in_dom_ar_2: "finite (dom f) \<Longrightarrow> s \<inter> dom f = {} \<Longrightarrow> dom (s -\<triangleleft> f) = dom f"
apply (subst set_eq_subset)
apply (rule conjI)
apply (rule_tac[!] subsetI)
apply (metis l_dom_ar_not_in_dom)
by (metis l_dom_ar_nothing)
(* ======== *)
lemma l_dom_ar_commute_quickspec:
"S -\<triangleleft> (T -\<triangleleft> f) = T -\<triangleleft> (S -\<triangleleft> f)"
by (metis l_dom_ar_accum sup_commute)
lemma l_dom_ar_same_subsume_quickspec:
"S -\<triangleleft> (S -\<triangleleft> f) = S -\<triangleleft> f"
by (metis l_dom_ar_accum sup_idem)
lemma l_map_with_range_not_dom_empty: "dom m \<noteq> {} \<Longrightarrow> ran m \<noteq> {}"
by (simp add: l_map_non_empty_ran_conv)
lemma l_map_dom_ran: "dom f = A \<Longrightarrow> x \<in> A \<Longrightarrow> f x \<noteq> None"
by blast
(* Sequential composition combinator *)
definition
seqcomp :: "('a \<Rightarrow> 'a) \<Rightarrow> ('a \<Rightarrow> 'a) \<Rightarrow> 'a \<Rightarrow> 'a" ("((_)/ ;; (_)/, (_))" [0, 0, 10] 10)
where
[intro!]: "(P ;; Q, bst) \<equiv> let mst = P bst in (Q mst)"
lemma l_seq_comp_simp[simp]: "(P ;; Q, bst) = Q (P bst)" unfolding seqcomp_def by simp
lemma l_inv_SetElems_inv_MapTrue[simp]: "inv_SetElems inv_True S"
by (simp add: inv_True_def inv_SetElems_def)
lemma l_ranE_frule:
"e \<in> ran f \<Longrightarrow> \<exists> x . f x = Some e"
unfolding ran_def by safe
lemma l_ranE_frule':
"e \<in> ran f \<Longrightarrow> \<exists> x . e = the(f x)"
by (metis l_ranE_frule option.sel)
lemma l_inv_MapTrue:
"finite (dom m) \<Longrightarrow> inv_Map inv_True inv_True m"
unfolding inv_Map_defs
by (simp add: finite_ran)
lemma l_invMap_domr_absorb:
"inv_Map di ri m \<Longrightarrow> inv_Map di ri (S \<triangleleft> m)"
unfolding inv_Map_def inv_VDMSet'_defs inv_VDMSet_def
by (metis (mono_tags, lifting) domIff f_in_dom_r_apply_elem f_in_relimg_ran finiteRan l_dom_r_finite l_in_dom_dom_r)
lemma l_inv_Map_on_dom: "inv_Map inv_Dom inv_Ran m \<Longrightarrow> inv_SetElems inv_Dom (dom m)"
unfolding inv_Map_defs by auto
lemma l_inv_Map_on_ran: "inv_Map inv_Dom inv_Ran m \<Longrightarrow> inv_SetElems inv_Ran (ran m)"
unfolding inv_Map_defs by auto
lemma l_invMap_di_absorb:
"inv_Map di ri m \<Longrightarrow> inv_Map inv_True ri m"
by (simp add: inv_Map_defs)
section \<open>To tidy up or remove\<close>
subsection \<open> Set translations: enumeration, comprehension, ranges \<close>
(* { expr | var . filter }, { var \<in> type . filter }, { var . filter } *)
value "{ x+x | x . x \<in> {(1::nat),2,3,4,5,6} }"
value "{ x+x | x . x \<in> {(1::nat),2,3} }"
(*value "{ x+x | x . x \<in> {(1::nat)..3} }" --"not always work"*)
value "{0..(2::int)}"
value "{0..<(3::int)}"
value "{0<..<(3::int)}"
subsection \<open> Seq translations: enumeration, comprehension, ranges \<close>
value "{ [A,B,C] ! i | i . i \<in> {0,1,2} }"
value "{ [A,B,C,D,E,F] ! i | i . i \<in> {0,2,4} }"
(* { s(i) | i in set inds s & i mod 2 = 0 } *)
(* List application (i.e. s(x)) is available in Isabelle, but is zero based *)
value "[A, B, C] ! 0"
value "[A, B, C] ! 1"
value "[A, B, C] ! 2"
value "[A, B, C] ! 3"
value "nth [A, B, C] 0"
value "applyList [A, B] 0" \<comment> \<open>out of range\<close>
value "applyList [A, B] 1"
value "applyList [A, B] 2"
value "applyList [A, B] 3" \<comment> \<open>out of range\<close>
value "[A,B,C,D] $ 0"
lemma "[A,B,C] $ 4 = A" unfolding applyVDMSeq_defs apply simp oops
lemma "[A,B,C] $ 1 = A" unfolding applyVDMSeq_defs apply simp done
value "[a] $ (len [(a::nat)])"
value "[A, B] $ 0" \<comment> \<open>out of range\<close>
value "[A,B]$1"
value "[A, B]$ 1"
value "[A, B]$ 2"
value "[A, B]$ 3" \<comment> \<open>out of range\<close>
(* List comprehension *)
value "{ [A,B,C] ! i | i . i \<in> {0,1,2} }"
value "[ x . x \<leftarrow> [0,1,(2::int)] ]" (*avoid if possible... *)
value "[ x . x \<leftarrow> [0 .. 3] ]"
value "len [A, B, C]"
value "elems [A, B, C, A, B]"
value "elems [(0::nat), 1, 2]"
value "inds [A,B,C]"
value "inds_as_nat [A,B,C]"
value "card (elems [10, 20, 30, 1, 2, 3, 4, (5::nat), 10])"
value "len [10, 20, 30, 1, 2, 3, 4, (5::nat), 10]"
(* MySeq = seq of nat1
inv s == len s \<le> 9 and card(elem s) = len s and (forall i in set elems s . i \<le> 9)*)
type_synonym MySeq = "VDMNat1 list"
definition
inv_MySeq :: "MySeq \<Rightarrow> \<bool>"
where
"inv_MySeq s \<equiv> (inv_SeqElems inv_VDMNat1 s) \<and>
len s \<le> 9 \<and> int (card (elems s)) = len s \<and>
(\<forall> i \<in> elems s . i > 0 \<and> i \<le> 9)"
value "inv_MySeq [1, 2, 3]"
(*
type_synonym ('a,'b) "map" = "'a \<Rightarrow> 'b option" (infixr "~=>" 0)
*)
text \<open>
In Isabelle, VDM maps can be declared by the @{text "\<rightharpoonup>"} operator (not @{text "\<Rightarrow>"})
(i.e. type 'right' and you will see the arrow on dropdown menu).
It represents a function to an optional result as follows:
VDM : map X to Y
Isabelle: @{text "X \<rightharpoonup> Y"}
which is the same as
Isabelle: @{text "X \<Rightarrow> Y option"}
where an optional type is like using nil in VDM (map X to [Y]).
That is, Isabele makes the map total by mapping everything outside
the domain to None (or nil). In Isabelle
@{text "datatype 'a option = None | Some 'a"}
\<close>
text \<open> VDM maps auxiliary functions \<close>
(* dom exists already *)
thm dom_def
find_theorems "dom _"
subsection \<open> Map translations: enumeration, comprehension \<close>
(* map values are given as *)
value "[ (0::nat) \<mapsto> (7::nat), 1 \<mapsto> 5 ]"
value "[ (0::int) \<mapsto> (1::int), 1 \<mapsto> 5 ] 0"
value "the ([ (0::int) \<mapsto> (1::int), 1 \<mapsto> 5 ] 0)"
value "the (Some b)"
value "Map.empty(A \<mapsto> 0)"
value "Map.empty(A := Some 0)"
value "[A \<mapsto> 0]"
value "[A \<mapsto> 0, B \<mapsto> 1]"
(*
value "the None"
value "Map.empty"
value "the ([ (1::int) \<mapsto> (1::int), 2 \<mapsto> 1, 3 \<mapsto> 2 ] (4::int)) + (3::int)"
value "the ([ (0::nat) \<mapsto> (0::nat), 1 \<mapsto> 5 ] (4::nat))"
*)
lemma "the ([ (1::int) \<mapsto> (1::int), 2 \<mapsto> 1, 3 \<mapsto> 2 ] (4::int)) + (3::int) = A" apply simp oops
lemma "the ([ (1::int) \<mapsto> (1::int), 2 \<mapsto> 1, 3 \<mapsto> 2 ] 2) + 3 = 4" by simp
find_theorems "the _"
text \<open> Not always it's possible to see their values as
maps encodings are more complex. You could use
Isabelle prover as a debugger
\<close>
lemma "dom [ A \<mapsto> 0, B \<mapsto> 1] = LOOK_HERE" apply simp oops
value "Map.empty(A \<mapsto> 0)"
value "Map.empty(A := Some 0)"
value "[A \<mapsto> 0]"
value "[A \<mapsto> 0, B \<mapsto> 1]"
lemma "dom [ A \<mapsto> 0, B \<mapsto> 1] = LOOK_HERE" apply simp oops
lemma "ran [ A \<mapsto> (0::nat), B \<mapsto> 1] = {0,1}" apply simp oops
(* rng also exists as ran *)
thm ran_def
find_theorems "ran _"
lemma "ran [ A \<mapsto> (0::nat), B \<mapsto> 1] = {0,1}" apply simp oops
subsection \<open>Map comprehension examples, pitfalls and proof scenarios\<close>
(*On the explicit (narrower/declared) type, add inv_VDMNat1
v98: map nat to nat1 = { d \<mapsto> r | d in set {1,2,3}, r in set {2,4,6} & r = d*2 } *)
definition
ex1 :: "VDMNat \<rightharpoonup> VDMNat1"
where
"ex1 \<equiv> mapCompSetBound {1,2,3::VDMNat} {2,4,6::VDMNat} inv_VDMNat inv_VDMNat1 domid rngid (\<lambda> d r . r = d*2)"
lemmas ex1_defs = ex1_def mapCompSetBound_defs inv_VDMNat1_def inv_VDMNat_def
lemma ex1_none: "x \<notin> dom ex1 \<Longrightarrow> ex1 x = None"
by (simp add: domIff)
lemma ex1_dom: "dom ex1 = {1,2,3}"
unfolding dom_def ex1_defs
by (simp split:if_splits, safe)
lemma ex1_rng:"rng ex1 = {2,4,6}"
unfolding rng_defs ex1_defs
apply (simp split:if_splits)
apply (intro equalityI subsetI, simp_all)
(* apply (elim exE conjE impE)*) (* this will be fiddly! *)
apply (elim exE conjE disjE, simp_all)
apply (fastforce, fastforce, fastforce)
by (smt (z3) semiring_norm(83) the_equality verit_eq_simplify(14) zero_le_numeral)
(*@TODO add invariant failure to undefined tests! *)
lemma ex1_map: "x \<in> dom ex1 \<Longrightarrow> ex1 x = Some (2*x)"
unfolding ex1_defs
apply (simp split:if_splits, safe, force+)
thm option.discI
by (metis option.discI)
(*On the implicit (wider/presumed) type, add inv_VDMNat; these funny binds are tricky!
v98 = { x+y |-> 10 | x in set {1,2,3}, y in set {4,5,6} } *)
definition
ex2 :: "VDMNat \<rightharpoonup> VDMNat"
where
"ex2 \<equiv> mapCompSetBound {1,2,3::VDMNat} {4,5,6::VDMNat} inv_VDMNat inv_VDMNat (\<lambda> x . (\<lambda> y . x+y)) (rngcnst 10) truecnst"
lemmas ex2_defs = ex2_def mapCompSetBound_defs inv_VDMNat_def
lemma ex2_none: "x \<notin> dom ex2 \<Longrightarrow> ex2 x = None"
by (simp add: domIff)
lemma ex2_dom: "dom ex2 = {5,6,7,8,9}"
unfolding ex2_defs
apply (simp split:if_splits)
oops
definition
ex2' :: "VDMNat \<rightharpoonup> VDMNat"
where
"ex2' \<equiv> mapCompSetBound { x + y | x y . x \<in> {1,2,3::VDMNat} \<and> y \<in> {4,5,6::VDMNat} } {10::VDMNat}
inv_VDMNat inv_VDMNat domid (rngcnst 10) truecnst"
lemmas ex2'_defs = ex2'_def mapCompSetBound_defs inv_VDMNat_def truecnst_def domid_def
lemma ex2'_none: "x \<notin> dom ex2' \<Longrightarrow> ex2' x = None"
unfolding ex2'_defs
by (simp add: domIff)
lemma ex2'_dom: "dom ex2' = {5,6,7,8,9}"
unfolding ex2'_defs
apply (simp split:if_splits, safe) oops
lemma ex2'_dom_finite: "finite { x + y | x y . x \<in> {1,2,3::VDMNat} \<and> y \<in> {4,5,6::VDMNat} }"
by (simp add: finite_image_set2)
lemma ex2'_dom_clearer: "{ x + y | x y . x \<in> {1,2,3::VDMNat} \<and> y \<in> {4,5,6::VDMNat} } = {5..9::VDMNat}"
apply (safe, simp_all)
by presburger
lemma ex2'_dom_inv: "inv_SetElems ((\<le>) (0::VDMNat)) {5..(9::VDMNat)}"
unfolding inv_SetElems_def
by (safe,simp)
lemma ex2'_dom_inv': "inv_SetElems inv_VDMNat {5..9}"
unfolding inv_SetElems_def inv_VDMNat_def
by (safe,simp)
lemma ex2'_dom: "dom ex2' = {5,6,7,8,9}"
unfolding ex2'_def mapCompSetBound_def inv_VDMSet'_def inv_VDMSet_def truecnst_def
apply (simp only: ex2'_dom_finite ex2'_dom_clearer, simp split:if_splits add: ex2'_dom_inv' inv_VDMNat_def)
unfolding domid_def rngcnst_def inv_True_def inv_VDMNat_def
apply (safe, simp_all)
thm option.distinct
by (smt (z3) option.distinct(1))
lemma ex2'_rng:"rng ex2' = {10}"
unfolding rng_defs ex2'_def mapCompSetBound_def inv_VDMSet'_def inv_VDMSet_def truecnst_def
apply (simp only: ex2'_dom_finite ex2'_dom_clearer, simp split:if_splits add: ex2'_dom_inv' inv_VDMNat_def)
by (safe, simp_all, force+)
lemma ex2'_map: "x \<in> dom ex2' \<Longrightarrow> ex2' x = Some 10"
unfolding ex2'_defs (* don't expand inv_VDMNat *)
apply (simp split:if_splits)
(* complex domain patterns lead to loads of cases Jeez! no safe *)
apply (intro conjI impI, force) oops
lemma ex2'_map: "x \<in> dom ex2' \<Longrightarrow> ex2' x = Some 10"
unfolding ex2'_def mapCompSetBound_def domid_def rngcnst_def truecnst_def inv_True_def
apply (simp split:if_splits)
(* complex domain patterns lead to loads of cases Jeez! no safe *)
apply (intro conjI impI)
find_theorems intro name:the
apply (rule the_equality, simp add: inv_VDMNat_def, blast)
apply (simp add: inv_VDMSet'_def inv_VDMSet_def inv_VDMNat_def) (* clearly true by contradiction, but finite is struggling *)
oops
lemma l_finite_setcomp_finite[simp]: "finite S \<Longrightarrow> finite T \<Longrightarrow> finite { P x y | x y . x \<in> S \<and> y \<in> T }"
by (simp add: finite_image_set2)
thm finite_image_set2 finite_subset
lemma ex2'_map: "x \<in> dom ex2' \<Longrightarrow> ex2' x = Some 10"
unfolding ex2'_defs
apply (insert l_finite_setcomp_finite[of _ _ "\<lambda> x y . x+y"]) (* lemma above not quite in right shape *)
oops
lemma ex2'_map: "x \<in> dom ex2' \<Longrightarrow> ex2' x = Some 10"
unfolding ex2'_def mapCompSetBound_def domid_def rngcnst_def truecnst_def inv_True_def
apply (simp split:if_splits)
apply (intro conjI impI)
apply (rule the_equality, simp add: inv_VDMNat_def, blast)
(*apply (smt (z3) Collect_cong atLeastAtMost_iff ex2'_dom_clearer ex2'_dom_finite mem_Collect_eq) *)(* horrible! *)
apply (elim impE)
apply (simp add: inv_VDMSet'_def inv_VDMSet_def inv_VDMNat_def inv_SetElems_def)
apply (rule conjI)
using ex2'_dom_finite apply force
apply fastforce
oops
lemma l_invVDMSet_finite[simp]: "finite S \<Longrightarrow> inv_SetElems inv_T S \<Longrightarrow> inv_VDMSet' inv_T S"
by (simp add: inv_VDMSet'_def)
lemma ex2'_map: "x \<in> dom ex2' \<Longrightarrow> ex2' x = Some 10"
unfolding ex2'_def mapCompSetBound_def domid_def rngcnst_def truecnst_def inv_True_def
apply (simp split:if_splits)
apply (intro conjI impI, simp)
apply (rule the_equality, simp add: inv_VDMNat_def, blast)
apply (erule impE)
using ex2'_dom_clearer ex2'_dom_inv' apply auto[1]
using inv_VDMNat_def apply auto[1]
apply (smt (verit, del_insts) atLeastAtMost_iff ex2'_dom_clearer l_map_dom_ran)
(* by (smt (z3) Collect_cong atLeastAtMost_iff ex2'_dom_clearer ex2'_dom_finite finite.emptyI finite.insertI inv_SetElems_def inv_VDMNat_def l_invVDMSet_finite mem_Collect_eq singletonD) *)
apply (erule impE)
using ex2'_dom_clearer ex2'_dom_inv' apply force
using inv_VDMNat_def by auto
(* more direct binds even if with range expressions it's fine. UNIV isn't finite! DUH
= { x |-> x+5 | x in set {1,2,3,4} & x > 2 } *)
definition
ex3 :: "VDMNat \<rightharpoonup> VDMNat"
where
"ex3 \<equiv> mapCompSetBound {1,2,3,4::VDMNat} UNIV inv_VDMNat inv_VDMNat domid (\<lambda> x . (\<lambda> y . x + 5)) (\<lambda> x . (\<lambda> y . x > 2))"
lemmas ex3_defs = ex3_def mapCompSetBound_defs inv_VDMNat_def
lemma ex3_none: "x \<notin> dom ex3 \<Longrightarrow> ex3 x = None"
by (simp add: domIff)
lemma ex3_dom: "dom ex3 = {5,6,7,8,9}"
unfolding ex3_defs
apply (simp split:if_splits, safe)
(* Nice example of how it goes "wrong" with undefined! *)
oops
lemma ex3_dom: "dom ex3 = {1,2,3,4}"
unfolding ex3_defs
apply (simp split:if_splits, safe) oops
(* Nice example of how it goes "wrong" with undefined! *)
lemma ex3_dom: "dom ex3 = {3,4}"
unfolding ex3_defs
apply (simp split:if_splits, safe) oops
(* Nice example of how it goes "wrong" with undefined! *)
(* more direct binds even if with range expressions it's fine.
= { x |-> x+5 | x in set {1,2,3,4} & x > 2 } *)
definition
ex3' :: "VDMNat \<rightharpoonup> VDMNat"
where
"ex3' \<equiv> mapCompSetBound {1,2,3,4::VDMNat} { x + 5 | x . x \<in> {1,2,3,4::VDMNat} } inv_VDMNat inv_VDMNat domid (\<lambda> x . (\<lambda> y . x + 5)) (\<lambda> x . (\<lambda> y . x > 2))"
lemmas ex3'_defs = ex3'_def mapCompSetBound_defs inv_VDMNat_def
lemma ex3'_none: "x \<notin> dom ex3' \<Longrightarrow> ex3' x = None"
by (simp add: domIff)
lemma ex3'_dom: "dom ex3' = {3,4}"
unfolding ex3'_def mapCompSetBound_defs inv_SetElems_def inv_VDMNat_def
apply (simp split:if_splits)
apply (intro equalityI subsetI, simp_all add: dom_def split:if_splits)
by fastforce+
lemma ex3'_dom': "dom ex3' = {3,4}"
unfolding ex3'_defs
apply (simp split:if_splits)
apply (intro equalityI subsetI, simp_all add: dom_def split:if_splits)
using inv_SetElems_def by fastforce+
lemma ex3'_rng: "rng ex3' = {8,9}"
unfolding ex3'_defs inv_SetElems_def
apply (simp split:if_splits)
apply (intro equalityI subsetI)
apply (simp_all, safe, simp_all)
apply (rule+, force, force, force, force, force, force, force)
apply (rule_tac x=3 in exI, force)
by (rule_tac x=4 in exI, force)
lemma ex3'_map: "x \<in> dom ex3' \<Longrightarrow> ex3' x = Some (x+5)"
unfolding ex3'_defs
apply (simp split:if_splits, safe, force)
apply (linarith, force)
using inv_SetElems_def apply fastforce
using inv_SetElems_def apply fastforce
using inv_SetElems_def apply fastforce
using inv_SetElems_def apply fastforce
using inv_SetElems_def apply fastforce
using inv_SetElems_def apply fastforce
using inv_SetElems_def by fastforce
(* okay: dead simple ones
= { x |-> 5 | x in set {1,2,3,4} } *)
definition
ex4 :: "VDMNat \<rightharpoonup> VDMNat"
where
"ex4 \<equiv> mapCompSetBound {1,2,3,4::VDMNat} { 5::VDMNat } inv_VDMNat inv_VDMNat domid (rngcnst 5) truecnst"
lemmas ex4_defs = ex4_def mapCompSetBound_defs inv_VDMNat_def
lemma ex4_none: "x \<notin> dom ex4 \<Longrightarrow> ex4 x = None"
by (simp add: domIff)
lemma ex4_dom: "dom ex4 = {1,2,3,4}"
unfolding ex4_def mapCompSetBound_defs inv_SetElems_def inv_VDMNat_def
apply (simp split:if_splits)
by (intro equalityI subsetI, simp_all add: dom_def split:if_splits)
lemma ex4_rng: "rng ex4 = {5}"
unfolding ex4_defs inv_SetElems_def
apply (simp split:if_splits)
apply (intro equalityI subsetI, force, simp)
by (rule_tac x=1 in exI, fastforce)
lemma ex4_map: "x \<in> dom ex4 \<Longrightarrow> ex4 x = Some 5"
unfolding ex4_defs
apply (simp split:if_splits, safe, force+)
by (meson option.distinct(1))
(* for simple domain binds, you get simple enough proofs *)
(* s1: set of real = {1,2,3}, is_(s1, set of int)? *)
lemma "isTest' {1,2,3::VDMReal}
(vdmset_of_t vdmint_of_real)
(inv_VDMSet' inv_VDMInt)"
unfolding isTest'_def vdmset_of_t_def total_coercion_def
apply simp
unfolding inv_VDMSet'_def inv_VDMSet_def inv_VDMInt_def inv_SetElems_def
apply (simp, safe)
unfolding vdmint_of_real_def is_VDMRealWhole_def
by (simp_all add: vdm_narrow_real_def)
(* s1: set of real = {1,2,3}, is_(s1, set of nat)? *)
lemma "isTest' {1,2,3::VDMReal}
(vdmset_of_t vdmint_of_real)
(inv_VDMSet' inv_VDMNat)"
unfolding isTest'_def vdmset_of_t_def total_coercion_def
apply simp
unfolding inv_VDMSet'_def inv_VDMSet_def inv_VDMNat_def inv_SetElems_def
apply (simp, safe)
unfolding vdmint_of_real_def is_VDMRealWhole_def
by (simp_all add: vdm_narrow_real_def)
lemma "\<not> isTest' (-10)
(\<lambda> x . if inv_VDMNat x then Some x else None)
inv_VDMNat"
unfolding isTest'_def inv_VDMNat_def by simp
lemma "isTest' [1,2,3::VDMNat]
(\<lambda> x . Some { x$i | i . i \<in> inds x })
(inv_VDMSet' inv_VDMNat)"
unfolding isTest'_def
apply simp
unfolding inv_VDMSet'_def inv_VDMSet_def inv_VDMNat_def inv_SetElems_def
apply (simp, safe)
unfolding applyVDMSeq_def inv_VDMNat1_def len_def
apply simp
by (simp add: nth_Cons')
(*<*)end(*>*) |
-- Based on the code questioned at https://discord.com/channels/827106007712661524/834611018775789568/918166736153501696
a : String -> String -> Elab ()
a fn fd = declare
`[ %foreign ~(IPrimVal EmptyFC $ Constant $ Str fd)
~(IVar EmptyFC $ UN $ Basic fn) : Int -> Int
]
|
The Student Accounting department handles accounting issues for students at UC Davis, and is where you can pick up paychecks or Financial Aid Office financial aid. This is where to go if the University owes you money; if you owe them money, try the Cashiers Office.
|
/-
Copyright (c) 2017 Johannes Hölzl. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Johannes Hölzl, Mario Carneiro
! This file was ported from Lean 3 source module data.finset.preimage
! leanprover-community/mathlib commit 327c3c0d9232d80e250dc8f65e7835b82b266ea5
! Please do not edit these lines, except to modify the commit id
! if you have ported upstream changes.
-/
import Mathbin.Data.Set.Finite
import Mathbin.Algebra.BigOperators.Basic
/-!
# Preimage of a `finset` under an injective map.
> THIS FILE IS SYNCHRONIZED WITH MATHLIB4.
> Any changes to this file require a corresponding PR to mathlib4.
-/
open Set Function
open BigOperators
universe u v w x
variable {α : Type u} {β : Type v} {ι : Sort w} {γ : Type x}
namespace Finset
section Preimage
#print Finset.preimage /-
/-- Preimage of `s : finset β` under a map `f` injective of `f ⁻¹' s` as a `finset`. -/
noncomputable def preimage (s : Finset β) (f : α → β) (hf : Set.InjOn f (f ⁻¹' ↑s)) : Finset α :=
(s.finite_toSet.Preimage hf).toFinset
#align finset.preimage Finset.preimage
-/
#print Finset.mem_preimage /-
@[simp]
theorem mem_preimage {f : α → β} {s : Finset β} {hf : Set.InjOn f (f ⁻¹' ↑s)} {x : α} :
x ∈ preimage s f hf ↔ f x ∈ s :=
Set.Finite.mem_toFinset _
#align finset.mem_preimage Finset.mem_preimage
-/
#print Finset.coe_preimage /-
@[simp, norm_cast]
theorem coe_preimage {f : α → β} (s : Finset β) (hf : Set.InjOn f (f ⁻¹' ↑s)) :
(↑(preimage s f hf) : Set α) = f ⁻¹' ↑s :=
Set.Finite.coe_toFinset _
#align finset.coe_preimage Finset.coe_preimage
-/
#print Finset.preimage_empty /-
@[simp]
theorem preimage_empty {f : α → β} : preimage ∅ f (by simp [inj_on]) = ∅ :=
Finset.coe_injective (by simp)
#align finset.preimage_empty Finset.preimage_empty
-/
#print Finset.preimage_univ /-
@[simp]
theorem preimage_univ {f : α → β} [Fintype α] [Fintype β] (hf) : preimage univ f hf = univ :=
Finset.coe_injective (by simp)
#align finset.preimage_univ Finset.preimage_univ
-/
#print Finset.preimage_inter /-
@[simp]
theorem preimage_inter [DecidableEq α] [DecidableEq β] {f : α → β} {s t : Finset β}
(hs : Set.InjOn f (f ⁻¹' ↑s)) (ht : Set.InjOn f (f ⁻¹' ↑t)) :
(preimage (s ∩ t) f fun x₁ hx₁ x₂ hx₂ =>
hs (mem_of_mem_inter_left hx₁) (mem_of_mem_inter_left hx₂)) =
preimage s f hs ∩ preimage t f ht :=
Finset.coe_injective (by simp)
#align finset.preimage_inter Finset.preimage_inter
-/
#print Finset.preimage_union /-
@[simp]
theorem preimage_union [DecidableEq α] [DecidableEq β] {f : α → β} {s t : Finset β} (hst) :
preimage (s ∪ t) f hst =
(preimage s f fun x₁ hx₁ x₂ hx₂ => hst (mem_union_left _ hx₁) (mem_union_left _ hx₂)) ∪
preimage t f fun x₁ hx₁ x₂ hx₂ => hst (mem_union_right _ hx₁) (mem_union_right _ hx₂) :=
Finset.coe_injective (by simp)
#align finset.preimage_union Finset.preimage_union
-/
#print Finset.preimage_compl /-
@[simp]
theorem preimage_compl [DecidableEq α] [DecidableEq β] [Fintype α] [Fintype β] {f : α → β}
(s : Finset β) (hf : Function.Injective f) :
preimage (sᶜ) f (hf.InjOn _) = preimage s f (hf.InjOn _)ᶜ :=
Finset.coe_injective (by simp)
#align finset.preimage_compl Finset.preimage_compl
-/
#print Finset.monotone_preimage /-
theorem monotone_preimage {f : α → β} (h : Injective f) :
Monotone fun s => preimage s f (h.InjOn _) := fun s t hst x hx =>
mem_preimage.2 (hst <| mem_preimage.1 hx)
#align finset.monotone_preimage Finset.monotone_preimage
-/
#print Finset.image_subset_iff_subset_preimage /-
theorem image_subset_iff_subset_preimage [DecidableEq β] {f : α → β} {s : Finset α} {t : Finset β}
(hf : Set.InjOn f (f ⁻¹' ↑t)) : s.image f ⊆ t ↔ s ⊆ t.Preimage f hf :=
image_subset_iff.trans <| by simp only [subset_iff, mem_preimage]
#align finset.image_subset_iff_subset_preimage Finset.image_subset_iff_subset_preimage
-/
#print Finset.map_subset_iff_subset_preimage /-
theorem map_subset_iff_subset_preimage {f : α ↪ β} {s : Finset α} {t : Finset β} :
s.map f ⊆ t ↔ s ⊆ t.Preimage f (f.Injective.InjOn _) := by
classical rw [map_eq_image, image_subset_iff_subset_preimage]
#align finset.map_subset_iff_subset_preimage Finset.map_subset_iff_subset_preimage
-/
#print Finset.image_preimage /-
theorem image_preimage [DecidableEq β] (f : α → β) (s : Finset β) [∀ x, Decidable (x ∈ Set.range f)]
(hf : Set.InjOn f (f ⁻¹' ↑s)) :
image f (preimage s f hf) = s.filterₓ fun x => x ∈ Set.range f :=
Finset.coe_inj.1 <| by
simp only [coe_image, coe_preimage, coe_filter, Set.image_preimage_eq_inter_range,
Set.sep_mem_eq]
#align finset.image_preimage Finset.image_preimage
-/
#print Finset.image_preimage_of_bij /-
theorem image_preimage_of_bij [DecidableEq β] (f : α → β) (s : Finset β)
(hf : Set.BijOn f (f ⁻¹' ↑s) ↑s) : image f (preimage s f hf.InjOn) = s :=
Finset.coe_inj.1 <| by simpa using hf.image_eq
#align finset.image_preimage_of_bij Finset.image_preimage_of_bij
-/
#print Finset.preimage_subset /-
theorem preimage_subset {f : α ↪ β} {s : Finset β} {t : Finset α} (hs : s ⊆ t.map f) :
s.Preimage f (f.Injective.InjOn _) ⊆ t := fun x hx => (mem_map' f).1 (hs (mem_preimage.1 hx))
#align finset.preimage_subset Finset.preimage_subset
-/
/- ./././Mathport/Syntax/Translate/Basic.lean:635:2: warning: expanding binder collection (u «expr ⊆ » t) -/
#print Finset.subset_map_iff /-
theorem subset_map_iff {f : α ↪ β} {s : Finset β} {t : Finset α} :
s ⊆ t.map f ↔ ∃ (u : _)(_ : u ⊆ t), s = u.map f := by
classical
refine' ⟨fun h => ⟨_, preimage_subset h, _⟩, _⟩
· rw [map_eq_image, image_preimage, filter_true_of_mem fun x hx => _]
exact coe_map_subset_range _ _ (h hx)
· rintro ⟨u, hut, rfl⟩
exact map_subset_map.2 hut
#align finset.subset_map_iff Finset.subset_map_iff
-/
/- warning: finset.sigma_preimage_mk -> Finset.sigma_preimage_mk is a dubious translation:
lean 3 declaration is
forall {α : Type.{u1}} {β : α -> Type.{u2}} [_inst_1 : DecidableEq.{succ u1} α] (s : Finset.{max u1 u2} (Sigma.{u1, u2} α (fun (a : α) => β a))) (t : Finset.{u1} α), Eq.{succ (max u1 u2)} (Finset.{max u1 u2} (Sigma.{u1, u2} α (fun (i : α) => β i))) (Finset.sigma.{u1, u2} α (fun (a : α) => β a) t (fun (a : α) => Finset.preimage.{u2, max u1 u2} (β a) (Sigma.{u1, u2} α (fun (a : α) => β a)) s (Sigma.mk.{u1, u2} α (fun (a : α) => β a) a) (Function.Injective.injOn.{u2, max u1 u2} (β a) (Sigma.{u1, u2} α (fun (a : α) => β a)) (Sigma.mk.{u1, u2} α (fun (a : α) => β a) a) (sigma_mk_injective.{u1, u2} α (fun (a : α) => β a) a) (Set.preimage.{u2, max u1 u2} (β a) (Sigma.{u1, u2} α (fun (a : α) => β a)) (Sigma.mk.{u1, u2} α (fun (a : α) => β a) a) ((fun (a : Type.{max u1 u2}) (b : Type.{max u1 u2}) [self : HasLiftT.{succ (max u1 u2), succ (max u1 u2)} a b] => self.0) (Finset.{max u1 u2} (Sigma.{u1, u2} α (fun (a : α) => β a))) (Set.{max u1 u2} (Sigma.{u1, u2} α (fun (a : α) => β a))) (HasLiftT.mk.{succ (max u1 u2), succ (max u1 u2)} (Finset.{max u1 u2} (Sigma.{u1, u2} α (fun (a : α) => β a))) (Set.{max u1 u2} (Sigma.{u1, u2} α (fun (a : α) => β a))) (CoeTCₓ.coe.{succ (max u1 u2), succ (max u1 u2)} (Finset.{max u1 u2} (Sigma.{u1, u2} α (fun (a : α) => β a))) (Set.{max u1 u2} (Sigma.{u1, u2} α (fun (a : α) => β a))) (Finset.Set.hasCoeT.{max u1 u2} (Sigma.{u1, u2} α (fun (a : α) => β a))))) s))))) (Finset.filter.{max u1 u2} (Sigma.{u1, u2} α (fun (a : α) => β a)) (fun (a : Sigma.{u1, u2} α (fun (a : α) => β a)) => Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) (Sigma.fst.{u1, u2} α (fun (a : α) => β a) a) t) (fun (a : Sigma.{u1, u2} α (fun (a : α) => β a)) => Finset.decidableMem.{u1} α (fun (a : α) (b : α) => _inst_1 a b) (Sigma.fst.{u1, u2} α (fun (a : α) => β a) a) t) s)
but is expected to have type
forall {α : Type.{u2}} {β : α -> Type.{u1}} [_inst_1 : DecidableEq.{succ u2} α] (s : Finset.{max u1 u2} (Sigma.{u2, u1} α (fun (a : α) => β a))) (t : Finset.{u2} α), Eq.{max (succ u2) (succ u1)} (Finset.{max u1 u2} (Sigma.{u2, u1} α (fun (i : α) => β i))) (Finset.sigma.{u2, u1} α (fun (a : α) => β a) t (fun (a : α) => Finset.preimage.{u1, max u2 u1} (β a) (Sigma.{u2, u1} α (fun (a : α) => β a)) s (Sigma.mk.{u2, u1} α (fun (a : α) => β a) a) (Function.Injective.injOn.{max u1 u2, u1} (β a) (Sigma.{u2, u1} α (fun (a : α) => β a)) (Sigma.mk.{u2, u1} α (fun (a : α) => β a) a) (sigma_mk_injective.{u2, u1} α (fun (a : α) => β a) a) (Set.preimage.{u1, max u2 u1} (β a) (Sigma.{u2, u1} α (fun (a : α) => β a)) (Sigma.mk.{u2, u1} α (fun (a : α) => β a) a) (Finset.toSet.{max u2 u1} (Sigma.{u2, u1} α (fun (a : α) => β a)) s))))) (Finset.filter.{max u2 u1} (Sigma.{u2, u1} α (fun (a : α) => β a)) (fun (a : Sigma.{u2, u1} α (fun (a : α) => β a)) => Membership.mem.{u2, u2} α (Finset.{u2} α) (Finset.instMembershipFinset.{u2} α) (Sigma.fst.{u2, u1} α (fun (a : α) => β a) a) t) (fun (a : Sigma.{u2, u1} α (fun (a : α) => β a)) => Finset.decidableMem.{u2} α (fun (a : α) (b : α) => _inst_1 a b) (Sigma.fst.{u2, u1} α (fun (a : α) => β a) a) t) s)
Case conversion may be inaccurate. Consider using '#align finset.sigma_preimage_mk Finset.sigma_preimage_mkₓ'. -/
theorem sigma_preimage_mk {β : α → Type _} [DecidableEq α] (s : Finset (Σa, β a)) (t : Finset α) :
(t.Sigma fun a => s.Preimage (Sigma.mk a) <| sigma_mk_injective.InjOn _) =
s.filterₓ fun a => a.1 ∈ t :=
by
ext x
simp [and_comm']
#align finset.sigma_preimage_mk Finset.sigma_preimage_mk
/- warning: finset.sigma_preimage_mk_of_subset -> Finset.sigma_preimage_mk_of_subset is a dubious translation:
lean 3 declaration is
forall {α : Type.{u1}} {β : α -> Type.{u2}} [_inst_1 : DecidableEq.{succ u1} α] (s : Finset.{max u1 u2} (Sigma.{u1, u2} α (fun (a : α) => β a))) {t : Finset.{u1} α}, (HasSubset.Subset.{u1} (Finset.{u1} α) (Finset.hasSubset.{u1} α) (Finset.image.{max u1 u2, u1} (Sigma.{u1, u2} α (fun (a : α) => β a)) α (fun (a : α) (b : α) => _inst_1 a b) (Sigma.fst.{u1, u2} α (fun (a : α) => β a)) s) t) -> (Eq.{succ (max u1 u2)} (Finset.{max u1 u2} (Sigma.{u1, u2} α (fun (i : α) => β i))) (Finset.sigma.{u1, u2} α (fun (a : α) => β a) t (fun (a : α) => Finset.preimage.{u2, max u1 u2} (β a) (Sigma.{u1, u2} α (fun (a : α) => β a)) s (Sigma.mk.{u1, u2} α (fun (a : α) => β a) a) (Function.Injective.injOn.{u2, max u1 u2} (β a) (Sigma.{u1, u2} α (fun (a : α) => β a)) (Sigma.mk.{u1, u2} α (fun (a : α) => β a) a) (sigma_mk_injective.{u1, u2} α (fun (a : α) => β a) a) (Set.preimage.{u2, max u1 u2} (β a) (Sigma.{u1, u2} α (fun (a : α) => β a)) (Sigma.mk.{u1, u2} α (fun (a : α) => β a) a) ((fun (a : Type.{max u1 u2}) (b : Type.{max u1 u2}) [self : HasLiftT.{succ (max u1 u2), succ (max u1 u2)} a b] => self.0) (Finset.{max u1 u2} (Sigma.{u1, u2} α (fun (a : α) => β a))) (Set.{max u1 u2} (Sigma.{u1, u2} α (fun (a : α) => β a))) (HasLiftT.mk.{succ (max u1 u2), succ (max u1 u2)} (Finset.{max u1 u2} (Sigma.{u1, u2} α (fun (a : α) => β a))) (Set.{max u1 u2} (Sigma.{u1, u2} α (fun (a : α) => β a))) (CoeTCₓ.coe.{succ (max u1 u2), succ (max u1 u2)} (Finset.{max u1 u2} (Sigma.{u1, u2} α (fun (a : α) => β a))) (Set.{max u1 u2} (Sigma.{u1, u2} α (fun (a : α) => β a))) (Finset.Set.hasCoeT.{max u1 u2} (Sigma.{u1, u2} α (fun (a : α) => β a))))) s))))) s)
but is expected to have type
forall {α : Type.{u2}} {β : α -> Type.{u1}} [_inst_1 : DecidableEq.{succ u2} α] (s : Finset.{max u1 u2} (Sigma.{u2, u1} α (fun (a : α) => β a))) {t : Finset.{u2} α}, (HasSubset.Subset.{u2} (Finset.{u2} α) (Finset.instHasSubsetFinset.{u2} α) (Finset.image.{max u1 u2, u2} (Sigma.{u2, u1} α (fun (a : α) => β a)) α (fun (a : α) (b : α) => _inst_1 a b) (Sigma.fst.{u2, u1} α (fun (a : α) => β a)) s) t) -> (Eq.{max (succ u2) (succ u1)} (Finset.{max u1 u2} (Sigma.{u2, u1} α (fun (i : α) => β i))) (Finset.sigma.{u2, u1} α (fun (a : α) => β a) t (fun (a : α) => Finset.preimage.{u1, max u2 u1} (β a) (Sigma.{u2, u1} α (fun (a : α) => β a)) s (Sigma.mk.{u2, u1} α (fun (a : α) => β a) a) (Function.Injective.injOn.{max u1 u2, u1} (β a) (Sigma.{u2, u1} α (fun (a : α) => β a)) (Sigma.mk.{u2, u1} α (fun (a : α) => β a) a) (sigma_mk_injective.{u2, u1} α (fun (a : α) => β a) a) (Set.preimage.{u1, max u2 u1} (β a) (Sigma.{u2, u1} α (fun (a : α) => β a)) (Sigma.mk.{u2, u1} α (fun (a : α) => β a) a) (Finset.toSet.{max u2 u1} (Sigma.{u2, u1} α (fun (a : α) => β a)) s))))) s)
Case conversion may be inaccurate. Consider using '#align finset.sigma_preimage_mk_of_subset Finset.sigma_preimage_mk_of_subsetₓ'. -/
theorem sigma_preimage_mk_of_subset {β : α → Type _} [DecidableEq α] (s : Finset (Σa, β a))
{t : Finset α} (ht : s.image Sigma.fst ⊆ t) :
(t.Sigma fun a => s.Preimage (Sigma.mk a) <| sigma_mk_injective.InjOn _) = s := by
rw [sigma_preimage_mk, filter_true_of_mem <| image_subset_iff.1 ht]
#align finset.sigma_preimage_mk_of_subset Finset.sigma_preimage_mk_of_subset
/- warning: finset.sigma_image_fst_preimage_mk -> Finset.sigma_image_fst_preimage_mk is a dubious translation:
lean 3 declaration is
forall {α : Type.{u1}} {β : α -> Type.{u2}} [_inst_1 : DecidableEq.{succ u1} α] (s : Finset.{max u1 u2} (Sigma.{u1, u2} α (fun (a : α) => β a))), Eq.{succ (max u1 u2)} (Finset.{max u1 u2} (Sigma.{u1, u2} α (fun (i : α) => β i))) (Finset.sigma.{u1, u2} α (fun (a : α) => β a) (Finset.image.{max u1 u2, u1} (Sigma.{u1, u2} α (fun (a : α) => β a)) α (fun (a : α) (b : α) => _inst_1 a b) (Sigma.fst.{u1, u2} α (fun (a : α) => β a)) s) (fun (a : α) => Finset.preimage.{u2, max u1 u2} (β a) (Sigma.{u1, u2} α (fun (a : α) => β a)) s (Sigma.mk.{u1, u2} α (fun (a : α) => β a) a) (Function.Injective.injOn.{u2, max u1 u2} (β a) (Sigma.{u1, u2} α (fun (a : α) => β a)) (Sigma.mk.{u1, u2} α (fun (a : α) => β a) a) (sigma_mk_injective.{u1, u2} α (fun (a : α) => β a) a) (Set.preimage.{u2, max u1 u2} (β a) (Sigma.{u1, u2} α (fun (a : α) => β a)) (Sigma.mk.{u1, u2} α (fun (a : α) => β a) a) ((fun (a : Type.{max u1 u2}) (b : Type.{max u1 u2}) [self : HasLiftT.{succ (max u1 u2), succ (max u1 u2)} a b] => self.0) (Finset.{max u1 u2} (Sigma.{u1, u2} α (fun (a : α) => β a))) (Set.{max u1 u2} (Sigma.{u1, u2} α (fun (a : α) => β a))) (HasLiftT.mk.{succ (max u1 u2), succ (max u1 u2)} (Finset.{max u1 u2} (Sigma.{u1, u2} α (fun (a : α) => β a))) (Set.{max u1 u2} (Sigma.{u1, u2} α (fun (a : α) => β a))) (CoeTCₓ.coe.{succ (max u1 u2), succ (max u1 u2)} (Finset.{max u1 u2} (Sigma.{u1, u2} α (fun (a : α) => β a))) (Set.{max u1 u2} (Sigma.{u1, u2} α (fun (a : α) => β a))) (Finset.Set.hasCoeT.{max u1 u2} (Sigma.{u1, u2} α (fun (a : α) => β a))))) s))))) s
but is expected to have type
forall {α : Type.{u2}} {β : α -> Type.{u1}} [_inst_1 : DecidableEq.{succ u2} α] (s : Finset.{max u1 u2} (Sigma.{u2, u1} α (fun (a : α) => β a))), Eq.{max (succ u2) (succ u1)} (Finset.{max u1 u2} (Sigma.{u2, u1} α (fun (i : α) => β i))) (Finset.sigma.{u2, u1} α (fun (a : α) => β a) (Finset.image.{max u1 u2, u2} (Sigma.{u2, u1} α (fun (a : α) => β a)) α (fun (a : α) (b : α) => _inst_1 a b) (Sigma.fst.{u2, u1} α (fun (a : α) => β a)) s) (fun (a : α) => Finset.preimage.{u1, max u2 u1} (β a) (Sigma.{u2, u1} α (fun (a : α) => β a)) s (Sigma.mk.{u2, u1} α (fun (a : α) => β a) a) (Function.Injective.injOn.{max u1 u2, u1} (β a) (Sigma.{u2, u1} α (fun (a : α) => β a)) (Sigma.mk.{u2, u1} α (fun (a : α) => β a) a) (sigma_mk_injective.{u2, u1} α (fun (a : α) => β a) a) (Set.preimage.{u1, max u2 u1} (β a) (Sigma.{u2, u1} α (fun (a : α) => β a)) (Sigma.mk.{u2, u1} α (fun (a : α) => β a) a) (Finset.toSet.{max u2 u1} (Sigma.{u2, u1} α (fun (a : α) => β a)) s))))) s
Case conversion may be inaccurate. Consider using '#align finset.sigma_image_fst_preimage_mk Finset.sigma_image_fst_preimage_mkₓ'. -/
theorem sigma_image_fst_preimage_mk {β : α → Type _} [DecidableEq α] (s : Finset (Σa, β a)) :
((s.image Sigma.fst).Sigma fun a => s.Preimage (Sigma.mk a) <| sigma_mk_injective.InjOn _) =
s :=
s.sigma_preimage_mk_of_subset (Subset.refl _)
#align finset.sigma_image_fst_preimage_mk Finset.sigma_image_fst_preimage_mk
end Preimage
#print Finset.prod_preimage' /-
@[to_additive]
theorem prod_preimage' [CommMonoid β] (f : α → γ) [DecidablePred fun x => x ∈ Set.range f]
(s : Finset γ) (hf : Set.InjOn f (f ⁻¹' ↑s)) (g : γ → β) :
(∏ x in s.Preimage f hf, g (f x)) = ∏ x in s.filterₓ fun x => x ∈ Set.range f, g x := by
haveI := Classical.decEq γ <;>
calc
(∏ x in preimage s f hf, g (f x)) = ∏ x in image f (preimage s f hf), g x :=
Eq.symm <| prod_image <| by simpa only [mem_preimage, inj_on] using hf
_ = ∏ x in s.filter fun x => x ∈ Set.range f, g x := by rw [image_preimage]
#align finset.prod_preimage' Finset.prod_preimage'
#align finset.sum_preimage' Finset.sum_preimage'
-/
/- warning: finset.prod_preimage -> Finset.prod_preimage is a dubious translation:
lean 3 declaration is
forall {α : Type.{u1}} {β : Type.{u2}} {γ : Type.{u3}} [_inst_1 : CommMonoid.{u2} β] (f : α -> γ) (s : Finset.{u3} γ) (hf : Set.InjOn.{u1, u3} α γ f (Set.preimage.{u1, u3} α γ f ((fun (a : Type.{u3}) (b : Type.{u3}) [self : HasLiftT.{succ u3, succ u3} a b] => self.0) (Finset.{u3} γ) (Set.{u3} γ) (HasLiftT.mk.{succ u3, succ u3} (Finset.{u3} γ) (Set.{u3} γ) (CoeTCₓ.coe.{succ u3, succ u3} (Finset.{u3} γ) (Set.{u3} γ) (Finset.Set.hasCoeT.{u3} γ))) s))) (g : γ -> β), (forall (x : γ), (Membership.Mem.{u3, u3} γ (Finset.{u3} γ) (Finset.hasMem.{u3} γ) x s) -> (Not (Membership.Mem.{u3, u3} γ (Set.{u3} γ) (Set.hasMem.{u3} γ) x (Set.range.{u3, succ u1} γ α f))) -> (Eq.{succ u2} β (g x) (OfNat.ofNat.{u2} β 1 (OfNat.mk.{u2} β 1 (One.one.{u2} β (MulOneClass.toHasOne.{u2} β (Monoid.toMulOneClass.{u2} β (CommMonoid.toMonoid.{u2} β _inst_1)))))))) -> (Eq.{succ u2} β (Finset.prod.{u2, u1} β α _inst_1 (Finset.preimage.{u1, u3} α γ s f hf) (fun (x : α) => g (f x))) (Finset.prod.{u2, u3} β γ _inst_1 s (fun (x : γ) => g x)))
but is expected to have type
forall {α : Type.{u1}} {β : Type.{u2}} {γ : Type.{u3}} [_inst_1 : CommMonoid.{u2} β] (f : α -> γ) (s : Finset.{u3} γ) (hf : Set.InjOn.{u1, u3} α γ f (Set.preimage.{u1, u3} α γ f (Finset.toSet.{u3} γ s))) (g : γ -> β), (forall (x : γ), (Membership.mem.{u3, u3} γ (Finset.{u3} γ) (Finset.instMembershipFinset.{u3} γ) x s) -> (Not (Membership.mem.{u3, u3} γ (Set.{u3} γ) (Set.instMembershipSet.{u3} γ) x (Set.range.{u3, succ u1} γ α f))) -> (Eq.{succ u2} β (g x) (OfNat.ofNat.{u2} β 1 (One.toOfNat1.{u2} β (Monoid.toOne.{u2} β (CommMonoid.toMonoid.{u2} β _inst_1)))))) -> (Eq.{succ u2} β (Finset.prod.{u2, u1} β α _inst_1 (Finset.preimage.{u1, u3} α γ s f hf) (fun (x : α) => g (f x))) (Finset.prod.{u2, u3} β γ _inst_1 s (fun (x : γ) => g x)))
Case conversion may be inaccurate. Consider using '#align finset.prod_preimage Finset.prod_preimageₓ'. -/
@[to_additive]
theorem prod_preimage [CommMonoid β] (f : α → γ) (s : Finset γ) (hf : Set.InjOn f (f ⁻¹' ↑s))
(g : γ → β) (hg : ∀ x ∈ s, x ∉ Set.range f → g x = 1) :
(∏ x in s.Preimage f hf, g (f x)) = ∏ x in s, g x := by
classical
rw [prod_preimage', prod_filter_of_ne]
exact fun x hx => Not.imp_symm (hg x hx)
#align finset.prod_preimage Finset.prod_preimage
#align finset.sum_preimage Finset.sum_preimage
#print Finset.prod_preimage_of_bij /-
@[to_additive]
theorem prod_preimage_of_bij [CommMonoid β] (f : α → γ) (s : Finset γ)
(hf : Set.BijOn f (f ⁻¹' ↑s) ↑s) (g : γ → β) :
(∏ x in s.Preimage f hf.InjOn, g (f x)) = ∏ x in s, g x :=
prod_preimage _ _ hf.InjOn g fun x hxs hxf => (hxf <| hf.subset_range hxs).elim
#align finset.prod_preimage_of_bij Finset.prod_preimage_of_bij
#align finset.sum_preimage_of_bij Finset.sum_preimage_of_bij
-/
end Finset
|
If $x$ and $y$ are polynomials and $(q, r)$ is the euclidean division of $x$ by $y$, then $x \bmod y = r$. |
theory SWCommonPatterns_SWCylByAE_IsCylinder_T
imports "$HETS_ISABELLE_LIB/MainHC"
uses "$HETS_ISABELLE_LIB/prelude"
begin
ML "Header.initialize
[\"ga_subt_reflexive\", \"ga_subt_transitive\",
\"ga_subt_inj_proj\", \"ga_inj_transitive\",
\"ga_subt_Int_XLt_Rat\", \"ga_subt_Nat_XLt_Int\",
\"ga_subt_NonZero_XLt_Real\", \"ga_subt_Pos_XLt_Nat\",
\"ga_subt_Rat_XLt_Real\", \"ga_subt_RealNonNeg_XLt_Real\",
\"ga_subt_RealPos_XLt_RealNonNeg\",
\"ga_subt_SWArc_XLt_SWSketchObject\",
\"ga_subt_SWExtrusion_XLt_SWFeature\",
\"ga_subt_SWFeature_XLt_SWObject\",
\"ga_subt_SWLine_XLt_SWSketchObject\",
\"ga_subt_SWPlane_XLt_SWObject\",
\"ga_subt_SWSketch_XLt_SWObject\",
\"ga_subt_SWSketchObject_XLt_SWObject\",
\"ga_subt_SWSpline_XLt_SWSketchObject\",
\"ga_subt_VectorStar_XLt_Vector\", \"ga_assoc___Xx__\",
\"ga_right_unit___Xx__\", \"ga_left_unit___Xx__\", \"inv_Group\",
\"rinv_Group\", \"ga_comm___Xx__\", \"ga_assoc___Xx___9\",
\"ga_right_unit___Xx___7\", \"ga_left_unit___Xx___8\",
\"distr1_Ring\", \"distr2_Ring\", \"left_zero\", \"right_zero\",
\"ga_comm___Xx___14\", \"noZeroDiv\", \"zeroNeqOne\",
\"NonZero_type\", \"ga_assoc___Xx___22\",
\"ga_right_unit___Xx___18\", \"ga_left_unit___Xx___20\",
\"inv_Group_21\", \"rinv_Group_19\", \"binary_inverse\",
\"binary_field_inverse\", \"refl\", \"trans\", \"antisym\",
\"dichotomy_TotalOrder\", \"FWO_plus_left\", \"FWO_times_left\",
\"FWO_plus_right\", \"FWO_times_right\", \"FWO_plus\", \"inf_def\",
\"Real_completeness\", \"geq_def_ExtPartialOrder\",
\"less_def_ExtPartialOrder\", \"greater_def_ExtPartialOrder\",
\"ga_comm_inf\", \"ga_comm_sup\", \"inf_def_ExtPartialOrder\",
\"sup_def_ExtPartialOrder\", \"ga_comm_min\", \"ga_comm_max\",
\"ga_assoc_min\", \"ga_assoc_max\", \"ga_left_comm_min\",
\"ga_left_comm_max\", \"min_def_ExtTotalOrder\",
\"max_def_ExtTotalOrder\", \"min_inf_relation\",
\"max_sup_relation\", \"RealNonNeg_pred_def\",
\"RealPos_pred_def\", \"RealNonNeg_type\", \"RealPos_type\",
\"abs_def\", \"times_cancel_right_nonneg_leq\",
\"times_leq_nonneg_cond\", \"sqr_def\", \"sqrt_def\", \"Pos_def\",
\"ga_select_C1\", \"ga_select_C2\", \"ga_select_C3\",
\"Zero_Point\", \"Point_choice\", \"ga_select_C1_131\",
\"ga_select_C2_132\", \"ga_select_C3_133\", \"Zero_Vector\",
\"VectorStar_pred_def\", \"VectorStar_type\",
\"def_of_vector_addition\", \"def_of_minus_vector\",
\"binary_inverse_72\", \"scalar_multiplication\",
\"scalar_product\", \"vector_product\", \"ONB1\", \"ONB2\",
\"ONB3\", \"cross_left_homogenity\",
\"cross_product_antisymmetric\", \"ga_assoc___Xx___82\",
\"ga_right_unit___Xx___76\", \"ga_left_unit___Xx___78\",
\"inv_Group_79\", \"rinv_Group_77\", \"ga_comm___Xx___80\",
\"unit\", \"mix_assoc\", \"distr_Field\", \"distr_Space\",
\"zero_by_left_zero\", \"zero_by_right_zero\",
\"inverse_by_XMinus1\", \"no_zero_divisor\", \"distributive\",
\"homogeneous\", \"symmetric\", \"pos_definite\",
\"right_distributive\", \"right_homogeneous\", \"non_degenerate\",
\"lindep_def\", \"lindep_reflexivity\", \"lindep_symmetry\",
\"simple_lindep_condition\", \"lindep_nonlindep_transitivity\",
\"norm_from_inner_prod_def\", \"proj_def\", \"orthcomp_def\",
\"orthogonal_def\", \"homogeneous_93\", \"definite\",
\"pos_definite_94\", \"pos_homogeneous\", \"orth_symmetric\",
\"lindep_orth_transitive\", \"orthogonal_existence_theorem\",
\"orthogonal_on_zero_projection\",
\"orthogonal_projection_theorem\",
\"orthogonal_decomposition_theorem\",
\"unique_orthogonal_decomposition\", \"ga_select_first\",
\"ga_select_rest\", \"Ax4\", \"Ax5\", \"listop_basecase\",
\"listop_reccase\", \"cross_product_orthogonal\",
\"cross_product_zero_iff_lindep\", \"e1e2_nonlindep\",
\"point_vector_map\", \"plus_injective\", \"plus_surjective\",
\"point_vector_plus_associative\", \"vec_def\",
\"transitivity_of_vec_plus\", \"plus_vec_identity\",
\"set_comprehension\", \"abbrev_of_set_comprehension\",
\"function_image\", \"emptySet_empty\", \"allSet_contains_all\",
\"def_of_isIn\", \"def_of_subset\", \"def_of_union\",
\"def_of_bigunion\", \"def_of_intersection\",
\"def_of_difference\", \"def_of_disjoint\", \"def_of_productset\",
\"emptySet_union_right_unit\", \"function_image_structure\",
\"def_of_interval\", \"abbrev_of_interval\",
\"plus_PointSet_Vector\", \"plus_Point_VectorSet\",
\"plus_PointSet_VectorSet\", \"ga_select_SpacePoint\",
\"ga_select_NormalVector\", \"ga_select_InnerCS\",
\"ga_select_Center\", \"ga_select_Start\", \"ga_select_End\",
\"ga_select_From\", \"ga_select_To\", \"ga_select_Points\",
\"ga_select_Objects\", \"ga_select_Plane\",
\"ga_select_Objects_1\", \"ga_select_SkchCtrtStatus\",
\"ga_select_Objects_2\", \"ga_select_Sketch\", \"ga_select_Depth\",
\"E1_def\", \"E2_def\", \"E3_def\", \"VLine_constr\",
\"VWithLength_constr\", \"VPlane_constr\", \"VPlane2_constr\",
\"VConnected_constr\", \"VHalfSpace_constr\",
\"VHalfSpace2_constr\", \"VBall_constr\", \"VCircle_constr\",
\"ActAttach_constr\", \"ActExtrude_constr\", \"vwl_identity\",
\"vwl_length\", \"vwl_lindep\", \"semantics_for_Planes\",
\"semantics_for_SketchObject_listsXMinusBaseCase\",
\"semantics_for_SketchObject_listsXMinusRecursiveCase\",
\"semantics_for_Arcs\", \"semantics_for_Sketches\",
\"semantics_for_ArcExtrusion\", \"def_of_SWCylinder\",
\"affine_cylinder_constructible_in_SW\", \"def_of_Cylinder\"]"
typedecl NonZero
typedecl PointSet
typedecl Pos
typedecl Rat
typedecl Real
typedecl RealNonNeg
typedecl RealPos
typedecl RealSet
typedecl SWFeature
typedecl SWObject
typedecl SWSketchObject
typedecl ('a1) Set
typedecl VectorSet
typedecl VectorStar
typedecl X_Int
typedecl X_Nat
datatype Point = X_P "Real" "Real" "Real" ("P/'(_,/ _,/ _')" [3,3,3] 999)
datatype Vector = X_V "Real" "Real" "Real" ("V/'(_,/ _,/ _')" [3,3,3] 999)
datatype 'a List = XOSqBrXCSqBr ("['']") |
X__XColonXColon__X 'a "'a List" ("(_/ ::''/ _)" [54,54] 52)
datatype SWPlane = X_SWPlane "Point" "VectorStar" "Vector"
datatype SWArc = X_SWArc "Point" "Point" "Point"
datatype SWLine = X_SWLine "Point" "Point"
datatype SWSpline = X_SWSpline "Point List"
datatype SWSketch = X_SWSketch "SWSketchObject List" "SWPlane"
datatype SWSkchCtrtParam = sgANGLE | sgARCANG180 | sgARCANG270 |
sgARCANG90 | sgARCANGBOTTOM | sgARCANGLEFT |
sgARCANGRIGHT | sgARCANGTOP | sgATINTERSECT |
sgATMIDDLE | sgATPIERCE | sgCOINCIDENT | sgCOLINEAR |
sgCONCENTRIC | sgCORADIAL | sgDIAMETER | sgDISTANCE |
sgFIXXED ("sgFIXED") | sgHORIZONTAL | sgHORIZPOINTS |
sgOFFSETEDGE | sgPARALLEL | sgPERPENDICULAR |
sgSAMELENGTH | sgSNAPANGLE | sgSNAPGRID |
sgSNAPLENGTH | sgSYMMETRIC | sgTANGENT | sgUSEEDGE |
sgVERTICAL | sgVERTPOINTS
datatype SWSkchCtrtStatus = X_Autosolve_off ("Autosolve'_off") |
X_Fully_constrained ("Fully'_constrained") |
X_No_solution ("No'_solution") |
X_Over_constrained ("Over'_constrained") |
X_Under_constrained ("Under'_constrained")
datatype SWSkchCtrtObject = X_SWSkchCtrtObject "SWSkchCtrtParam List"
datatype SWSkchCtrts = X_SWSkchCtrts "SWSkchCtrtStatus" "SWSkchCtrtObject List"
datatype SWExtrusion = X_SWExtrusion "SWSketch" "Real"
consts
ActAttach :: "Point * (Vector => bool) => Point => bool"
ActExtrude :: "Vector * (Point => bool) => Point => bool"
C1X1 :: "Point => Real" ("C1''/'(_')" [3] 999)
C1X2 :: "Vector => Real" ("C1''''/'(_')" [3] 999)
C2X1 :: "Point => Real" ("C2''/'(_')" [3] 999)
C2X2 :: "Vector => Real" ("C2''''/'(_')" [3] 999)
C3X1 :: "Point => Real" ("C3''/'(_')" [3] 999)
C3X2 :: "Vector => Real" ("C3''''/'(_')" [3] 999)
Cylinder :: "(Point * RealPos) * VectorStar => Point => bool"
E1 :: "SWPlane"
E2 :: "SWPlane"
E3 :: "SWPlane"
ObjectsX1 :: "SWSkchCtrtObject => SWSkchCtrtParam List" ("Objects''/'(_')" [3] 999)
ObjectsX2 :: "SWSkchCtrts => SWSkchCtrtObject List" ("Objects''''/'(_')" [3] 999)
ObjectsX3 :: "SWSketch => SWSketchObject List" ("Objects'_3/'(_')" [3] 999)
Pi :: "RealPos"
VBall :: "Real => Vector => bool"
VCircle :: "Real * Vector => Vector => bool"
VConnected :: "(Vector => bool) * Vector => Vector => bool"
VHalfSpace :: "Vector => Vector => bool"
VHalfSpace2 :: "Vector => Vector => bool"
VLine :: "Vector * Vector => Vector => bool"
VPlane :: "Vector => Vector => bool"
VPlane2 :: "Vector * Vector => Vector => bool"
X0X1 :: "X_Int" ("0''")
X0X2 :: "X_Nat" ("0''''")
X0X3 :: "Point" ("0'_3")
X0X4 :: "Rat" ("0'_4")
X0X5 :: "Real" ("0'_5")
X0X6 :: "Vector" ("0'_6")
X1X1 :: "X_Int" ("1''")
X1X2 :: "X_Nat" ("1''''")
X1X3 :: "NonZero" ("1'_3")
X1X4 :: "Pos" ("1'_4")
X1X5 :: "Rat" ("1'_5")
X1X6 :: "Real" ("1'_6")
X2X1 :: "X_Int" ("2''")
X2X2 :: "X_Nat" ("2''''")
X2X3 :: "Rat" ("2'_3")
X3X1 :: "X_Int" ("3''")
X3X2 :: "X_Nat" ("3''''")
X3X3 :: "Rat" ("3'_3")
X4X1 :: "X_Int" ("4''")
X4X2 :: "X_Nat" ("4''''")
X4X3 :: "Rat" ("4'_3")
X5X1 :: "X_Int" ("5''")
X5X2 :: "X_Nat" ("5''''")
X5X3 :: "Rat" ("5'_3")
X6X1 :: "X_Int" ("6''")
X6X2 :: "X_Nat" ("6''''")
X6X3 :: "Rat" ("6'_3")
X7X1 :: "X_Int" ("7''")
X7X2 :: "X_Nat" ("7''''")
X7X3 :: "Rat" ("7'_3")
X8X1 :: "X_Int" ("8''")
X8X2 :: "X_Nat" ("8''''")
X8X3 :: "Rat" ("8'_3")
X9X1 :: "X_Int" ("9''")
X9X2 :: "X_Nat" ("9''''")
X9X3 :: "Rat" ("9'_3")
XLBrace__XRBrace :: "('S => bool) => 'S => bool"
XMinus__XX1 :: "X_Int => X_Int" ("(-''/ _)" [56] 56)
XMinus__XX2 :: "Rat => Rat" ("(-''''/ _)" [56] 56)
XMinus__XX3 :: "Real => Real" ("(-'_3/ _)" [56] 56)
XMinus__XX4 :: "Vector => Vector" ("(-'_4/ _)" [56] 56)
XOSqBr__XPeriodXPeriodXPeriod__XCSqBr :: "Real * Real => Real => bool"
XVBarXVBar__XVBarXVBar :: "Vector => Real" ("(||/ _/ ||)" [10] 999)
X_Center :: "SWArc => Point" ("Center/'(_')" [3] 999)
X_Depth :: "SWExtrusion => Real" ("Depth/'(_')" [3] 999)
X_End :: "SWArc => Point" ("End/'(_')" [3] 999)
X_From :: "SWLine => Point" ("From/'(_')" [3] 999)
X_InnerCS :: "SWPlane => Vector" ("InnerCS/'(_')" [3] 999)
X_NormalVector :: "SWPlane => VectorStar" ("NormalVector/'(_')" [3] 999)
X_Plane :: "SWSketch => SWPlane" ("Plane/'(_')" [3] 999)
X_Points :: "SWSpline => Point List" ("Points/'(_')" [3] 999)
X_Pos :: "Real => bool"
X_RealNonNeg_pred :: "Real => bool" ("RealNonNeg'_pred/'(_')" [3] 999)
X_RealPos_pred :: "Real => bool" ("RealPos'_pred/'(_')" [3] 999)
X_SWCylinder :: "Point => Point => VectorStar => SWFeature" ("SWCylinder/'(_,/ _,/ _')" [3,3,3] 999)
X_SkchCtrtStatus :: "SWSkchCtrts => SWSkchCtrtStatus" ("SkchCtrtStatus/'(_')" [3] 999)
X_Sketch :: "SWExtrusion => SWSketch" ("Sketch/'(_')" [3] 999)
X_SpacePoint :: "SWPlane => Point" ("SpacePoint/'(_')" [3] 999)
X_Start :: "SWArc => Point" ("Start/'(_')" [3] 999)
X_To :: "SWLine => Point" ("To/'(_')" [3] 999)
X_VWithLength :: "Vector => Real => Vector" ("VWithLength/'(_,/ _')" [3,3] 999)
X_VectorStar_pred :: "Vector => bool" ("VectorStar'_pred/'(_')" [3] 999)
X__E__X :: "Rat => X_Int => Rat" ("(_/ E/ _)" [54,54] 52)
X__XAtXAt__X :: "X_Nat => X_Nat => X_Nat" ("(_/ @@/ _)" [54,54] 52)
X__XBslashXBslash__X :: "('S => bool) * ('S => bool) => 'S => bool"
X__XCaret__XX1 :: "X_Int => X_Nat => X_Int" ("(_/ ^''/ _)" [54,54] 52)
X__XCaret__XX2 :: "X_Nat => X_Nat => X_Nat" ("(_/ ^''''/ _)" [54,54] 52)
X__XCaret__XX3 :: "Rat => X_Int => Rat partial" ("(_/ ^'_3/ _)" [54,54] 52)
X__XColonXColonXColon__X :: "X_Nat => X_Nat => Rat" ("(_/ :::/ _)" [54,54] 52)
X__XExclam :: "X_Nat => X_Nat" ("(_/ !'')" [58] 58)
X__XGtXEq__XX1 :: "X_Int => X_Int => bool" ("(_/ >=''/ _)" [44,44] 42)
X__XGtXEq__XX2 :: "X_Nat => X_Nat => bool" ("(_/ >=''''/ _)" [44,44] 42)
X__XGtXEq__XX3 :: "Rat => Rat => bool" ("(_/ >='_3/ _)" [44,44] 42)
X__XGtXEq__XX4 :: "Real => Real => bool" ("(_/ >='_4/ _)" [44,44] 42)
X__XGt__XX1 :: "X_Int => X_Int => bool" ("(_/ >''/ _)" [44,44] 42)
X__XGt__XX2 :: "X_Nat => X_Nat => bool" ("(_/ >''''/ _)" [44,44] 42)
X__XGt__XX3 :: "Rat => Rat => bool" ("(_/ >'_3/ _)" [44,44] 42)
X__XGt__XX4 :: "Real => Real => bool" ("(_/ >'_4/ _)" [44,44] 42)
X__XHash__X :: "Vector => Vector => Vector" ("(_/ #''/ _)" [54,54] 52)
X__XLtXEq__XX1 :: "X_Int => X_Int => bool" ("(_/ <=''/ _)" [44,44] 42)
X__XLtXEq__XX2 :: "X_Nat => X_Nat => bool" ("(_/ <=''''/ _)" [44,44] 42)
X__XLtXEq__XX3 :: "Rat => Rat => bool" ("(_/ <='_3/ _)" [44,44] 42)
X__XLtXEq__XX4 :: "Real => Real => bool" ("(_/ <='_4/ _)" [44,44] 42)
X__XLt__XX1 :: "X_Int => X_Int => bool" ("(_/ <''/ _)" [44,44] 42)
X__XLt__XX2 :: "X_Nat => X_Nat => bool" ("(_/ <''''/ _)" [44,44] 42)
X__XLt__XX3 :: "Rat => Rat => bool" ("(_/ <'_3/ _)" [44,44] 42)
X__XLt__XX4 :: "Real => Real => bool" ("(_/ <'_4/ _)" [44,44] 42)
X__XMinusXExclam__X :: "X_Nat => X_Nat => X_Nat" ("(_/ -!/ _)" [54,54] 52)
X__XMinusXQuest__X :: "X_Nat => X_Nat => X_Nat partial" ("(_/ -?/ _)" [54,54] 52)
X__XMinus__XX1 :: "X_Int => X_Int => X_Int" ("(_/ -''/ _)" [54,54] 52)
X__XMinus__XX2 :: "X_Nat => X_Nat => X_Int" ("(_/ -''''/ _)" [54,54] 52)
X__XMinus__XX3 :: "Rat => Rat => Rat" ("(_/ -'_3/ _)" [54,54] 52)
X__XMinus__XX4 :: "Real => Real => Real" ("(_/ -'_4/ _)" [54,54] 52)
X__XMinus__XX5 :: "Vector => Vector => Vector" ("(_/ -'_5/ _)" [54,54] 52)
X__XPlus__XX1 :: "X_Int => X_Int => X_Int" ("(_/ +''/ _)" [54,54] 52)
X__XPlus__XX10 :: "(Point => bool) * Vector => Point => bool"
X__XPlus__XX11 :: "(Point => bool) * (Vector => bool) => Point => bool"
X__XPlus__XX2 :: "X_Nat => X_Nat => X_Nat" ("(_/ +''''/ _)" [54,54] 52)
X__XPlus__XX3 :: "X_Nat => Pos => Pos" ("(_/ +'_3/ _)" [54,54] 52)
X__XPlus__XX4 :: "Point => Vector => Point" ("(_/ +'_4/ _)" [54,54] 52)
X__XPlus__XX5 :: "Point * (Vector => bool) => Point => bool"
X__XPlus__XX6 :: "Pos => X_Nat => Pos" ("(_/ +'_6/ _)" [54,54] 52)
X__XPlus__XX7 :: "Rat => Rat => Rat" ("(_/ +'_7/ _)" [54,54] 52)
X__XPlus__XX8 :: "Real => Real => Real" ("(_/ +'_8/ _)" [54,54] 52)
X__XPlus__XX9 :: "Vector => Vector => Vector" ("(_/ +'_9/ _)" [54,54] 52)
X__XSlashXQuest__XX1 :: "X_Int => X_Int => X_Int partial" ("(_/ '/?''/ _)" [54,54] 52)
X__XSlashXQuest__XX2 :: "X_Nat => X_Nat => X_Nat partial" ("(_/ '/?''''/ _)" [54,54] 52)
X__XSlash__XX1 :: "X_Int => Pos => Rat" ("(_/ '/''/ _)" [54,54] 52)
X__XSlash__XX2 :: "Real => NonZero => Real" ("(_/ '/''''/ _)" [54,54] 52)
X__XSlash__XX3 :: "Rat => Rat => Rat partial" ("(_/ '/'_3/ _)" [54,54] 52)
X__Xx__XX1 :: "X_Int => X_Int => X_Int" ("(_/ *''/ _)" [54,54] 52)
X__Xx__XX2 :: "X_Nat => X_Nat => X_Nat" ("(_/ *''''/ _)" [54,54] 52)
X__Xx__XX3 :: "NonZero => NonZero => NonZero" ("(_/ *'_3/ _)" [54,54] 52)
X__Xx__XX4 :: "Pos => Pos => Pos" ("(_/ *'_4/ _)" [54,54] 52)
X__Xx__XX5 :: "Rat => Rat => Rat" ("(_/ *'_5/ _)" [54,54] 52)
X__Xx__XX6 :: "Real => Real => Real" ("(_/ *'_6/ _)" [54,54] 52)
X__Xx__XX7 :: "Real => Vector => Vector" ("(_/ *'_7/ _)" [54,54] 52)
X__Xx__XX8 :: "Vector => Vector => Real" ("(_/ *'_8/ _)" [54,54] 52)
X__Xx__XX9 :: "('S => bool) * ('T => bool) => 'S * 'T => bool"
X__disjoint__X :: "('S => bool) => ('S => bool) => bool" ("(_/ disjoint/ _)" [44,44] 42)
X__div__XX1 :: "X_Int => X_Int => X_Int partial" ("(_/ div''/ _)" [54,54] 52)
X__div__XX2 :: "X_Nat => X_Nat => X_Nat partial" ("(_/ div''''/ _)" [54,54] 52)
X__dvd__X :: "X_Nat => X_Nat => bool" ("(_/ dvd''/ _)" [44,44] 42)
X__intersection__X :: "('S => bool) * ('S => bool) => 'S => bool"
X__isIn__X :: "'S => ('S => bool) => bool" ("(_/ isIn/ _)" [44,44] 42)
X__mod__XX1 :: "X_Int => X_Int => X_Nat partial" ("(_/ mod''/ _)" [54,54] 52)
X__mod__XX2 :: "X_Nat => X_Nat => X_Nat partial" ("(_/ mod''''/ _)" [54,54] 52)
X__quot__X :: "X_Int => X_Int => X_Int partial" ("(_/ quot/ _)" [54,54] 52)
X__rem__X :: "X_Int => X_Int => X_Int partial" ("(_/ rem/ _)" [54,54] 52)
X__subset__X :: "('S => bool) => ('S => bool) => bool" ("(_/ subset/ _)" [44,44] 42)
X__union__X :: "('S => bool) * ('S => bool) => 'S => bool"
X_absX1 :: "X_Int => X_Nat" ("abs''/'(_')" [3] 999)
X_absX2 :: "Rat => Rat" ("abs''''/'(_')" [3] 999)
X_absX3 :: "Real => RealNonNeg" ("abs'_3/'(_')" [3] 999)
X_allSet :: "'S => bool" ("allSet/'(_')" [3] 999)
X_choose :: "(Point => bool) => Point" ("choose''/'(_')" [3] 999)
X_emptySet :: "'S => bool" ("emptySet/'(_')" [3] 999)
X_evenX1 :: "X_Int => bool" ("even''/'(_')" [3] 999)
X_evenX2 :: "X_Nat => bool" ("even''''/'(_')" [3] 999)
X_first :: "'a List => 'a partial" ("first/'(_')" [3] 999)
X_gn_inj :: "'a => 'b" ("gn'_inj/'(_')" [3] 999)
X_gn_proj :: "'a => 'b partial" ("gn'_proj/'(_')" [3] 999)
X_gn_subt :: "'a => 'b => bool" ("gn'_subt/'(_,/ _')" [3,3] 999)
X_image :: "('S => 'T) * ('S => bool) => 'T => bool"
X_inv :: "NonZero => NonZero" ("inv''/'(_')" [3] 999)
X_isX1 :: "SWSketchObject * SWPlane => Point => bool"
X_isX2 :: "SWSketchObject List * SWPlane => Point => bool"
X_lindep :: "Vector => Vector => bool" ("lindep/'(_,/ _')" [3,3] 999)
X_map :: "('a => 'b) => 'a List => 'b List"
X_maxX1 :: "X_Int => X_Int => X_Int" ("max''/'(_,/ _')" [3,3] 999)
X_maxX2 :: "X_Nat => X_Nat => X_Nat" ("max''''/'(_,/ _')" [3,3] 999)
X_maxX3 :: "Rat => Rat => Rat" ("max'_3/'(_,/ _')" [3,3] 999)
X_maxX4 :: "Real => Real => Real" ("max'_4/'(_,/ _')" [3,3] 999)
X_minX1 :: "X_Int => X_Int => X_Int" ("min''/'(_,/ _')" [3,3] 999)
X_minX2 :: "X_Nat => X_Nat => X_Nat" ("min''''/'(_,/ _')" [3,3] 999)
X_minX3 :: "Rat => Rat => Rat" ("min'_3/'(_,/ _')" [3,3] 999)
X_minX4 :: "Real => Real => Real" ("min'_4/'(_,/ _')" [3,3] 999)
X_oddX1 :: "X_Int => bool" ("odd''/'(_')" [3] 999)
X_oddX2 :: "X_Nat => bool" ("odd''''/'(_')" [3] 999)
X_orth :: "Vector => Vector => bool" ("orth/'(_,/ _')" [3,3] 999)
X_orthcomp :: "Vector => Vector => Vector" ("orthcomp/'(_,/ _')" [3,3] 999)
X_pre :: "X_Nat => X_Nat partial" ("pre/'(_')" [3] 999)
X_proj :: "Vector => Vector => Vector" ("proj/'(_,/ _')" [3,3] 999)
X_rest :: "'a List => 'a List partial" ("rest/'(_')" [3] 999)
X_sign :: "X_Int => X_Int" ("sign/'(_')" [3] 999)
X_sqr :: "Real => RealNonNeg" ("sqr/'(_')" [3] 999)
X_sqrt :: "RealNonNeg => RealNonNeg" ("sqrt/'(_')" [3] 999)
X_sum :: "Vector List => Vector" ("sum/'(_')" [3] 999)
X_sup :: "Real => Real => Real partial" ("sup/'(_,/ _')" [3,3] 999)
X_vec :: "Point => Point => Vector" ("vec/'(_,/ _')" [3,3] 999)
bigunion :: "(('S => bool) => bool) => 'S => bool"
closedinterval :: "Real * Real => Real => bool"
e1 :: "Vector"
e2 :: "Vector"
e3 :: "Vector"
iX1 :: "SWFeature => Point => bool"
iX2 :: "SWPlane => Point => bool"
iX3 :: "SWSketch => Point => bool"
infX1 :: "Real => Real => Real partial" ("inf''/'(_,/ _')" [3,3] 999)
infX2 :: "(Real => bool) => Real partial" ("inf''''/'(_')" [3] 999)
setFromProperty :: "('S => bool) => 'S => bool"
sucX1 :: "X_Nat => X_Nat" ("suc''/'(_')" [3] 999)
sucX2 :: "X_Nat => Pos" ("suc''''/'(_')" [3] 999)
axioms
ga_subt_reflexive [rule_format] :
"ALL (x :: 'a). ALL (y :: 'a). gn_subt(x, y)"
ga_subt_transitive [rule_format] :
"ALL (x :: 'a).
ALL (y :: 'b).
ALL (z :: 'c). gn_subt(x, y) & gn_subt(y, z) --> gn_subt(x, z)"
ga_subt_inj_proj [rule_format] :
"ALL (x :: 'a).
ALL (y :: 'b).
gn_subt(x, y) -->
y = (X_gn_inj :: 'a => 'b) x =
(makePartial x = (X_gn_proj :: 'b => 'a partial) y)"
ga_inj_transitive [rule_format] :
"ALL (x :: 'a).
ALL (y :: 'b).
ALL (z :: 'c).
gn_subt(x, y) & gn_subt(y, z) & y = (X_gn_inj :: 'a => 'b) x -->
z = (X_gn_inj :: 'a => 'c) x = (z = (X_gn_inj :: 'b => 'c) y)"
ga_subt_Int_XLt_Rat [rule_format] :
"ALL (x :: X_Int). ALL (y :: Rat). gn_subt(x, y)"
ga_subt_Nat_XLt_Int [rule_format] :
"ALL (x :: X_Nat). ALL (y :: X_Int). gn_subt(x, y)"
ga_subt_NonZero_XLt_Real [rule_format] :
"ALL (x :: NonZero). ALL (y :: Real). gn_subt(x, y)"
ga_subt_Pos_XLt_Nat [rule_format] :
"ALL (x :: Pos). ALL (y :: X_Nat). gn_subt(x, y)"
ga_subt_Rat_XLt_Real [rule_format] :
"ALL (x :: Rat). ALL (y :: Real). gn_subt(x, y)"
ga_subt_RealNonNeg_XLt_Real [rule_format] :
"ALL (x :: RealNonNeg). ALL (y :: Real). gn_subt(x, y)"
ga_subt_RealPos_XLt_RealNonNeg [rule_format] :
"ALL (x :: RealPos). ALL (y :: RealNonNeg). gn_subt(x, y)"
ga_subt_SWArc_XLt_SWSketchObject [rule_format] :
"ALL (x :: SWArc). ALL (y :: SWSketchObject). gn_subt(x, y)"
ga_subt_SWExtrusion_XLt_SWFeature [rule_format] :
"ALL (x :: SWExtrusion). ALL (y :: SWFeature). gn_subt(x, y)"
ga_subt_SWFeature_XLt_SWObject [rule_format] :
"ALL (x :: SWFeature). ALL (y :: SWObject). gn_subt(x, y)"
ga_subt_SWLine_XLt_SWSketchObject [rule_format] :
"ALL (x :: SWLine). ALL (y :: SWSketchObject). gn_subt(x, y)"
ga_subt_SWPlane_XLt_SWObject [rule_format] :
"ALL (x :: SWPlane). ALL (y :: SWObject). gn_subt(x, y)"
ga_subt_SWSketch_XLt_SWObject [rule_format] :
"ALL (x :: SWSketch). ALL (y :: SWObject). gn_subt(x, y)"
ga_subt_SWSketchObject_XLt_SWObject [rule_format] :
"ALL (x :: SWSketchObject). ALL (y :: SWObject). gn_subt(x, y)"
ga_subt_SWSpline_XLt_SWSketchObject [rule_format] :
"ALL (x :: SWSpline). ALL (y :: SWSketchObject). gn_subt(x, y)"
ga_subt_VectorStar_XLt_Vector [rule_format] :
"ALL (x :: VectorStar). ALL (y :: Vector). gn_subt(x, y)"
ga_assoc___Xx__ [rule_format] :
"ALL (x :: Real).
ALL (y :: Real).
ALL (z :: Real). (x +_8 y) +_8 z = x +_8 (y +_8 z)"
ga_right_unit___Xx__ [rule_format] :
"ALL (x :: Real). x +_8 0_5 = x"
ga_left_unit___Xx__ [rule_format] :
"ALL (x :: Real). 0_5 +_8 x = x"
inv_Group [rule_format] : "ALL (x :: Real). -_3 x +_8 x = 0_5"
rinv_Group [rule_format] : "ALL (x :: Real). x +_8 -_3 x = 0_5"
ga_comm___Xx__ [rule_format] :
"ALL (x :: Real). ALL (y :: Real). x +_8 y = y +_8 x"
ga_assoc___Xx___9 [rule_format] :
"ALL (x :: Real).
ALL (y :: Real).
ALL (z :: Real). (x *_6 y) *_6 z = x *_6 (y *_6 z)"
ga_right_unit___Xx___7 [rule_format] :
"ALL (x :: Real). x *_6 1_6 = x"
ga_left_unit___Xx___8 [rule_format] :
"ALL (x :: Real). 1_6 *_6 x = x"
distr1_Ring [rule_format] :
"ALL (x :: Real).
ALL (y :: Real).
ALL (z :: Real). (x +_8 y) *_6 z = (x *_6 z) +_8 (y *_6 z)"
distr2_Ring [rule_format] :
"ALL (x :: Real).
ALL (y :: Real).
ALL (z :: Real). z *_6 (x +_8 y) = (z *_6 x) +_8 (z *_6 y)"
left_zero [rule_format] : "ALL (x :: Real). 0_5 *_6 x = 0_5"
right_zero [rule_format] : "ALL (x :: Real). x *_6 0_5 = 0_5"
ga_comm___Xx___14 [rule_format] :
"ALL (x :: Real). ALL (y :: Real). x *_6 y = y *_6 x"
noZeroDiv [rule_format] :
"ALL (x :: Real).
ALL (y :: Real). x *_6 y = 0_5 --> x = 0_5 | y = 0_5"
zeroNeqOne [rule_format] : "~ 1_6 = 0_5"
NonZero_type [rule_format] :
"ALL (x :: Real).
defOp ((X_gn_proj :: Real => NonZero partial) x) = (~ x = 0_5)"
ga_assoc___Xx___22 [rule_format] :
"ALL (x :: NonZero).
ALL (y :: NonZero).
ALL (z :: NonZero). (x *_3 y) *_3 z = x *_3 (y *_3 z)"
ga_right_unit___Xx___18 [rule_format] :
"ALL (x :: NonZero). x *_3 1_3 = x"
ga_left_unit___Xx___20 [rule_format] :
"ALL (x :: NonZero). 1_3 *_3 x = x"
inv_Group_21 [rule_format] :
"ALL (x :: NonZero). inv'(x) *_3 x = 1_3"
rinv_Group_19 [rule_format] :
"ALL (x :: NonZero). x *_3 inv'(x) = 1_3"
binary_inverse [rule_format] :
"ALL (x :: Real). ALL (y :: Real). x -_4 y = x +_8 -_3 y"
binary_field_inverse [rule_format] :
"ALL (x :: Real).
ALL (y :: NonZero).
x /'' y = x *_6 (X_gn_inj :: NonZero => Real) (inv'(y))"
refl [rule_format] : "ALL (x :: Real). x <=_4 x"
trans [rule_format] :
"ALL (x :: Real).
ALL (y :: Real). ALL (z :: Real). x <=_4 y & y <=_4 z --> x <=_4 z"
antisym [rule_format] :
"ALL (x :: Real). ALL (y :: Real). x <=_4 y & y <=_4 x --> x = y"
dichotomy_TotalOrder [rule_format] :
"ALL (x :: Real). ALL (y :: Real). x <=_4 y | y <=_4 x"
FWO_plus_left [rule_format] :
"ALL (a :: Real).
ALL (b :: Real).
ALL (c :: Real). a <=_4 b --> a +_8 c <=_4 b +_8 c"
FWO_times_left [rule_format] :
"ALL (a :: Real).
ALL (b :: Real).
ALL (c :: Real). a <=_4 b & 0_5 <=_4 c --> a *_6 c <=_4 b *_6 c"
FWO_plus_right [rule_format] :
"ALL (a :: Real).
ALL (b :: Real).
ALL (c :: Real). b <=_4 c --> a +_8 b <=_4 a +_8 c"
FWO_times_right [rule_format] :
"ALL (a :: Real).
ALL (b :: Real).
ALL (c :: Real). b <=_4 c & 0_5 <=_4 a --> a *_6 b <=_4 a *_6 c"
FWO_plus [rule_format] :
"ALL (a :: Real).
ALL (b :: Real).
ALL (c :: Real).
ALL (d :: Real). a <=_4 c & b <=_4 d --> a +_8 b <=_4 c +_8 d"
inf_def [rule_format] :
"ALL (S :: Real => bool).
ALL (m :: Real).
inf''(S) = makePartial m =
(ALL (m2 :: Real).
(ALL (x :: Real). S x --> x <=_4 m2) --> m <=_4 m2)"
Real_completeness [rule_format] :
"ALL (S :: Real => bool).
(EX (x :: Real). S x) &
(EX (B :: Real). ALL (x :: Real). S x --> x <=_4 B) -->
(EX (m :: Real). makePartial m = inf''(S))"
geq_def_ExtPartialOrder [rule_format] :
"ALL (x :: Real). ALL (y :: Real). (x >=_4 y) = (y <=_4 x)"
less_def_ExtPartialOrder [rule_format] :
"ALL (x :: Real).
ALL (y :: Real). (x <_4 y) = (x <=_4 y & ~ x = y)"
greater_def_ExtPartialOrder [rule_format] :
"ALL (x :: Real). ALL (y :: Real). (x >_4 y) = (y <_4 x)"
ga_comm_inf [rule_format] :
"ALL (x :: Real). ALL (y :: Real). inf'(x, y) = inf'(y, x)"
ga_comm_sup [rule_format] :
"ALL (x :: Real). ALL (y :: Real). sup(x, y) = sup(y, x)"
inf_def_ExtPartialOrder [rule_format] :
"ALL (x :: Real).
ALL (y :: Real).
ALL (z :: Real).
inf'(x, y) = makePartial z =
(z <=_4 x &
z <=_4 y & (ALL (t :: Real). t <=_4 x & t <=_4 y --> t <=_4 z))"
sup_def_ExtPartialOrder [rule_format] :
"ALL (x :: Real).
ALL (y :: Real).
ALL (z :: Real).
sup(x, y) = makePartial z =
(x <=_4 z &
y <=_4 z & (ALL (t :: Real). x <=_4 t & y <=_4 t --> z <=_4 t))"
ga_comm_min [rule_format] :
"ALL (x :: Real). ALL (y :: Real). min_4(x, y) = min_4(y, x)"
ga_comm_max [rule_format] :
"ALL (x :: Real). ALL (y :: Real). max_4(x, y) = max_4(y, x)"
ga_assoc_min [rule_format] :
"ALL (x :: Real).
ALL (y :: Real).
ALL (z :: Real). min_4(min_4(x, y), z) = min_4(x, min_4(y, z))"
ga_assoc_max [rule_format] :
"ALL (x :: Real).
ALL (y :: Real).
ALL (z :: Real). max_4(max_4(x, y), z) = max_4(x, max_4(y, z))"
ga_left_comm_min [rule_format] :
"ALL (x :: Real).
ALL (y :: Real).
ALL (z :: Real). min_4(x, min_4(y, z)) = min_4(y, min_4(x, z))"
ga_left_comm_max [rule_format] :
"ALL (x :: Real).
ALL (y :: Real).
ALL (z :: Real). max_4(x, max_4(y, z)) = max_4(y, max_4(x, z))"
min_def_ExtTotalOrder [rule_format] :
"ALL (x :: Real).
ALL (y :: Real). min_4(x, y) = (if x <=_4 y then x else y)"
max_def_ExtTotalOrder [rule_format] :
"ALL (x :: Real).
ALL (y :: Real). max_4(x, y) = (if x <=_4 y then y else x)"
min_inf_relation [rule_format] :
"ALL (x :: Real).
ALL (y :: Real). makePartial (min_4(x, y)) = inf'(x, y)"
max_sup_relation [rule_format] :
"ALL (x :: Real).
ALL (y :: Real). makePartial (max_4(x, y)) = sup(x, y)"
RealNonNeg_pred_def [rule_format] :
"ALL (x :: Real). RealNonNeg_pred(x) = (x >=_4 0_5)"
RealPos_pred_def [rule_format] :
"ALL (x :: Real). RealPos_pred(x) = (x >_4 0_5)"
RealNonNeg_type [rule_format] :
"ALL (x :: Real).
defOp ((X_gn_proj :: Real => RealNonNeg partial) x) =
RealNonNeg_pred(x)"
RealPos_type [rule_format] :
"ALL (x :: Real).
defOp ((X_gn_proj :: Real => RealPos partial) x) = RealPos_pred(x)"
abs_def [rule_format] :
"ALL (x :: Real).
makePartial (abs_3(x)) =
(if 0_5 <=_4 x then (X_gn_proj :: Real => RealNonNeg partial) x
else (X_gn_proj :: Real => RealNonNeg partial) (-_3 x))"
times_cancel_right_nonneg_leq [rule_format] :
"ALL (a :: Real).
ALL (b :: Real).
ALL (c :: Real). a *_6 b <=_4 c *_6 b & b >=_4 0_5 --> a <=_4 c"
times_leq_nonneg_cond [rule_format] :
"ALL (a :: Real).
ALL (b :: Real). 0_5 <=_4 a *_6 b & b >=_4 0_5 --> 0_5 <=_4 a"
sqr_def [rule_format] :
"ALL (r :: Real).
(X_gn_inj :: RealNonNeg => Real) (sqr(r)) = r *_6 r"
sqrt_def [rule_format] :
"ALL (q :: RealNonNeg).
sqr((X_gn_inj :: RealNonNeg => Real) (sqrt(q))) = q"
Pos_def [rule_format] : "ALL (x :: Real). X_Pos x = (0_5 <=_4 x)"
ga_select_C1 [rule_format] :
"ALL (x_1 :: Real).
ALL (x_2 :: Real). ALL (x_3 :: Real). C1'(P(x_1, x_2, x_3)) = x_1"
ga_select_C2 [rule_format] :
"ALL (x_1 :: Real).
ALL (x_2 :: Real). ALL (x_3 :: Real). C2'(P(x_1, x_2, x_3)) = x_2"
ga_select_C3 [rule_format] :
"ALL (x_1 :: Real).
ALL (x_2 :: Real). ALL (x_3 :: Real). C3'(P(x_1, x_2, x_3)) = x_3"
Zero_Point [rule_format] :
"0_3 =
P((X_gn_inj :: X_Nat => Real) 0'', (X_gn_inj :: X_Nat => Real) 0'',
(X_gn_inj :: X_Nat => Real) 0'')"
Point_choice [rule_format] :
"ALL (X_P :: Point => bool).
(EX (y :: Point). X_P y) --> X_P (choose'(X_P))"
ga_select_C1_131 [rule_format] :
"ALL (x_1 :: Real).
ALL (x_2 :: Real). ALL (x_3 :: Real). C1''(V(x_1, x_2, x_3)) = x_1"
ga_select_C2_132 [rule_format] :
"ALL (x_1 :: Real).
ALL (x_2 :: Real). ALL (x_3 :: Real). C2''(V(x_1, x_2, x_3)) = x_2"
ga_select_C3_133 [rule_format] :
"ALL (x_1 :: Real).
ALL (x_2 :: Real). ALL (x_3 :: Real). C3''(V(x_1, x_2, x_3)) = x_3"
Zero_Vector [rule_format] :
"0_6 =
V((X_gn_inj :: X_Nat => Real) 0'', (X_gn_inj :: X_Nat => Real) 0'',
(X_gn_inj :: X_Nat => Real) 0'')"
VectorStar_pred_def [rule_format] :
"ALL (x :: Vector). VectorStar_pred(x) = (~ x = 0_6)"
VectorStar_type [rule_format] :
"ALL (x :: Vector).
defOp ((X_gn_proj :: Vector => VectorStar partial) x) =
VectorStar_pred(x)"
def_of_vector_addition [rule_format] :
"ALL (x :: Vector).
ALL (y :: Vector).
x +_9 y =
V(C1''(x) +_8 C1''(y), C2''(x) +_8 C2''(y), C3''(x) +_8 C3''(y))"
def_of_minus_vector [rule_format] :
"ALL (x :: Vector).
-_4 x = V(-_3 C1''(x), -_3 C2''(x), -_3 C3''(x))"
binary_inverse_72 [rule_format] :
"ALL (x :: Vector). ALL (y :: Vector). x -_5 y = x +_9 -_4 y"
scalar_multiplication [rule_format] :
"ALL (x :: Real).
ALL (y :: Vector).
x *_7 y = V(x *_6 C1''(y), x *_6 C2''(y), x *_6 C3''(y))"
scalar_product [rule_format] :
"ALL (x :: Vector).
ALL (y :: Vector).
x *_8 y =
((C1''(x) *_6 C1''(y)) +_8 (C2''(x) *_6 C2''(y))) +_8
(C3''(x) *_6 C3''(y))"
vector_product [rule_format] :
"ALL (x :: Vector).
ALL (y :: Vector).
x #' y =
V((C2''(x) *_6 C3''(y)) -_4 (C2''(y) *_6 C3''(x)),
(C3''(x) *_6 C1''(y)) -_4 (C3''(y) *_6 C1''(x)),
(C1''(x) *_6 C2''(y)) -_4 (C1''(y) *_6 C2''(x)))"
ONB1 [rule_format] :
"e1 =
V((X_gn_inj :: NonZero => Real) 1_3,
(X_gn_inj :: X_Nat => Real) 0'', (X_gn_inj :: X_Nat => Real) 0'')"
ONB2 [rule_format] :
"e2 =
V((X_gn_inj :: X_Nat => Real) 0'',
(X_gn_inj :: NonZero => Real) 1_3,
(X_gn_inj :: X_Nat => Real) 0'')"
ONB3 [rule_format] :
"e3 =
V((X_gn_inj :: X_Nat => Real) 0'', (X_gn_inj :: X_Nat => Real) 0'',
(X_gn_inj :: NonZero => Real) 1_3)"
cross_left_homogenity [rule_format] :
"ALL (r :: Real).
ALL (x :: Vector).
ALL (y :: Vector). r *_7 (x #' y) = (r *_7 x) #' y"
cross_product_antisymmetric [rule_format] :
"ALL (x :: Vector). ALL (y :: Vector). x #' y = -_4 (y #' x)"
ga_assoc___Xx___82 [rule_format] :
"ALL (x :: Vector).
ALL (y :: Vector).
ALL (z :: Vector). (x +_9 y) +_9 z = x +_9 (y +_9 z)"
ga_right_unit___Xx___76 [rule_format] :
"ALL (x :: Vector). x +_9 0_6 = x"
ga_left_unit___Xx___78 [rule_format] :
"ALL (x :: Vector). 0_6 +_9 x = x"
inv_Group_79 [rule_format] : "ALL (x :: Vector). -_4 x +_9 x = 0_6"
rinv_Group_77 [rule_format] :
"ALL (x :: Vector). x +_9 -_4 x = 0_6"
ga_comm___Xx___80 [rule_format] :
"ALL (x :: Vector). ALL (y :: Vector). x +_9 y = y +_9 x"
unit [rule_format] :
"ALL (x :: Vector). (X_gn_inj :: NonZero => Real) 1_3 *_7 x = x"
mix_assoc [rule_format] :
"ALL (r :: Real).
ALL (s :: Real).
ALL (x :: Vector). (r *_6 s) *_7 x = r *_7 (s *_7 x)"
distr_Field [rule_format] :
"ALL (r :: Real).
ALL (s :: Real).
ALL (x :: Vector). (r +_8 s) *_7 x = (r *_7 x) +_9 (s *_7 x)"
distr_Space [rule_format] :
"ALL (r :: Real).
ALL (x :: Vector).
ALL (y :: Vector). r *_7 (x +_9 y) = (r *_7 x) +_9 (r *_7 y)"
zero_by_left_zero [rule_format] :
"ALL (x :: Vector). 0_5 *_7 x = 0_6"
zero_by_right_zero [rule_format] :
"ALL (r :: Real). r *_7 0_6 = 0_6"
inverse_by_XMinus1 [rule_format] :
"ALL (x :: Vector).
-_3 (X_gn_inj :: NonZero => Real) 1_3 *_7 x = -_4 x"
no_zero_divisor [rule_format] :
"ALL (r :: Real).
ALL (x :: Vector). ~ r = 0_5 & ~ x = 0_6 --> ~ r *_7 x = 0_6"
distributive [rule_format] :
"ALL (v :: Vector).
ALL (v' :: Vector).
ALL (w :: Vector). (v +_9 v') *_8 w = (v *_8 w) +_8 (v' *_8 w)"
homogeneous [rule_format] :
"ALL (a :: Real).
ALL (v :: Vector).
ALL (w :: Vector). (a *_7 v) *_8 w = a *_6 (v *_8 w)"
symmetric [rule_format] :
"ALL (v :: Vector). ALL (w :: Vector). v *_8 w = w *_8 v"
pos_definite [rule_format] :
"ALL (v :: Vector). ~ v = 0_6 --> v *_8 v >_4 0_5"
right_distributive [rule_format] :
"ALL (v :: Vector).
ALL (v' :: Vector).
ALL (w :: Vector). w *_8 (v +_9 v') = (w *_8 v) +_8 (w *_8 v')"
right_homogeneous [rule_format] :
"ALL (a :: Real).
ALL (v :: Vector).
ALL (w :: Vector). v *_8 (a *_7 w) = a *_6 (v *_8 w)"
non_degenerate [rule_format] :
"ALL (v :: Vector). v *_8 v = 0_5 --> v = 0_6"
lindep_def [rule_format] :
"ALL (x :: Vector).
ALL (y :: Vector).
lindep(x, y) = (y = 0_6 | (EX (r :: Real). x = r *_7 y))"
lindep_reflexivity [rule_format] :
"ALL (x :: Vector). lindep(x, x)"
lindep_symmetry [rule_format] :
"ALL (x :: Vector).
ALL (y :: Vector). lindep(x, y) --> lindep(y, x)"
simple_lindep_condition [rule_format] :
"ALL (r :: Real).
ALL (x :: Vector). ALL (y :: Vector). x = r *_7 y --> lindep(x, y)"
lindep_nonlindep_transitivity [rule_format] :
"ALL (x :: Vector).
ALL (y :: Vector).
ALL (z :: Vector).
(~ x = 0_6 & lindep(x, y)) & ~ lindep(y, z) --> ~ lindep(x, z)"
norm_from_inner_prod_def [rule_format] :
"ALL (x :: Vector).
makePartial ( || x || ) =
restrictOp
(makePartial
((X_gn_inj :: RealNonNeg => Real)
(sqrt(makeTotal
((X_gn_proj :: Real => RealNonNeg partial) (x *_8 x))))))
(defOp ((X_gn_proj :: Real => RealNonNeg partial) (x *_8 x)))"
proj_def [rule_format] :
"ALL (v :: Vector).
ALL (w :: Vector).
makePartial (proj(v, w)) =
(if w = 0_6 then makePartial 0_6
else restrictOp
(makePartial
(((v *_8 w) /''
makeTotal ((X_gn_proj :: Real => NonZero partial) (w *_8 w)))
*_7 w))
(defOp ((X_gn_proj :: Real => NonZero partial) (w *_8 w))))"
orthcomp_def [rule_format] :
"ALL (v :: Vector).
ALL (w :: Vector). orthcomp(v, w) = v -_5 proj(v, w)"
orthogonal_def [rule_format] :
"ALL (x :: Vector).
ALL (y :: Vector). orth(x, y) = (x *_8 y = 0_5)"
homogeneous_93 [rule_format] :
"ALL (r :: Real).
ALL (v :: Vector).
|| r *_7 v || =
(X_gn_inj :: RealNonNeg => Real) (abs_3(r)) *_6 || v ||"
definite [rule_format] :
"ALL (v :: Vector). || v || = 0_5 = (v = 0_6)"
pos_definite_94 [rule_format] :
"ALL (v :: Vector). 0_5 <=_4 || v ||"
pos_homogeneous [rule_format] :
"ALL (r :: Real).
ALL (v :: Vector). r >=_4 0_5 --> || r *_7 v || = r *_6 || v ||"
orth_symmetric [rule_format] :
"ALL (x :: Vector). ALL (y :: Vector). orth(x, y) --> orth(y, x)"
lindep_orth_transitive [rule_format] :
"ALL (x :: Vector).
ALL (y :: Vector).
ALL (z :: Vector). lindep(x, y) & orth(y, z) --> orth(x, z)"
orthogonal_existence_theorem [rule_format] :
"ALL (x :: Vector).
(EX (a :: Vector). EX (b :: Vector). ~ lindep(a, b)) -->
(EX (c :: Vector). ~ c = 0_6 & orth(c, x))"
orthogonal_on_zero_projection [rule_format] :
"ALL (x :: Vector).
ALL (y :: Vector). proj(x, y) = 0_6 --> orth(x, y)"
orthogonal_projection_theorem [rule_format] :
"ALL (x :: Vector). ALL (y :: Vector). orth(orthcomp(x, y), y)"
orthogonal_decomposition_theorem [rule_format] :
"ALL (x :: Vector).
ALL (y :: Vector). proj(x, y) +_9 orthcomp(x, y) = x"
unique_orthogonal_decomposition [rule_format] :
"ALL (v :: Vector).
ALL (w :: Vector).
ALL (x :: Vector).
ALL (y :: Vector).
ALL (z :: Vector).
((((~ z = 0_6 & x +_9 y = v +_9 w) & lindep(x, z)) &
lindep(v, z)) &
orth(z, y)) &
orth(z, w) -->
x = v & y = w"
ga_select_first [rule_format] :
"ALL (x_1 :: 'a).
ALL (x_2 :: 'a List). first(x_1 ::' x_2) = makePartial x_1"
ga_select_rest [rule_format] :
"ALL (x_1 :: 'a).
ALL (x_2 :: 'a List). rest(x_1 ::' x_2) = makePartial x_2"
Ax4 [rule_format] : "ALL (f :: 'a => 'b). X_map f ['] = [']"
Ax5 [rule_format] :
"ALL (f :: 'a => 'b).
ALL (l :: 'a List).
ALL (x :: 'a). X_map f (x ::' l) = f x ::' X_map f l"
listop_basecase [rule_format] : "sum([']) = 0_6"
listop_reccase [rule_format] :
"ALL (l :: Vector List).
ALL (x :: Vector). sum(x ::' l) = x +_9 sum(l)"
cross_product_orthogonal [rule_format] :
"ALL (x :: Vector). ALL (y :: Vector). orth(x, x #' y)"
cross_product_zero_iff_lindep [rule_format] :
"ALL (x :: Vector).
ALL (y :: Vector). lindep(x, y) = (x #' y = 0_6)"
e1e2_nonlindep [rule_format] : "~ lindep(e1, e2)"
point_vector_map [rule_format] :
"ALL (p :: Point).
ALL (v :: Vector).
p +_4 v =
P(C1'(p) +_8 C1''(v), C2'(p) +_8 C2''(v), C3'(p) +_8 C3''(v))"
plus_injective [rule_format] :
"ALL (p :: Point).
ALL (v :: Vector). ALL (w :: Vector). p +_4 v = p +_4 w --> v = w"
plus_surjective [rule_format] :
"ALL (p :: Point). ALL (q :: Point). EX (y :: Vector). p +_4 y = q"
point_vector_plus_associative [rule_format] :
"ALL (p :: Point).
ALL (v :: Vector).
ALL (w :: Vector). p +_4 (v +_9 w) = (p +_4 v) +_4 w"
vec_def [rule_format] :
"ALL (p :: Point). ALL (q :: Point). p +_4 vec(p, q) = q"
transitivity_of_vec_plus [rule_format] :
"ALL (p :: Point).
ALL (q :: Point).
ALL (r :: Point). vec(p, q) +_9 vec(q, r) = vec(p, r)"
plus_vec_identity [rule_format] :
"ALL (p :: Point).
ALL (q :: Point). ALL (v :: Vector). p +_4 v = q --> v = vec(p, q)"
set_comprehension [rule_format] :
"ALL (s :: 'S => bool). XLBrace__XRBrace s = s"
abbrev_of_set_comprehension [rule_format] :
"setFromProperty = XLBrace__XRBrace"
function_image [rule_format] :
"ALL (XX :: 'S => bool).
ALL (f :: 'S => 'T).
X_image (f, XX) = (% x. EX (y :: 'S). y isIn XX & f y = x)"
emptySet_empty [rule_format] : "ALL (x :: 'S). ~ x isIn X_emptySet"
allSet_contains_all [rule_format] :
"ALL (x :: 'S). x isIn X_allSet"
def_of_isIn [rule_format] :
"ALL (s :: 'S => bool). ALL (x :: 'S). (x isIn s) = s x"
def_of_subset [rule_format] :
"ALL (s :: 'S => bool).
ALL (s' :: 'S => bool).
(s subset s') = (ALL (x :: 'S). x isIn s --> x isIn s')"
def_of_union [rule_format] :
"ALL (s :: 'S => bool).
ALL (s' :: 'S => bool).
ALL (x :: 'S).
(x isIn X__union__X (s, s')) = (x isIn s | x isIn s')"
def_of_bigunion [rule_format] :
"ALL (XXXX :: ('S => bool) => bool).
ALL (x :: 'S).
(x isIn bigunion XXXX) =
(EX (XX :: 'S => bool). XX isIn XXXX & x isIn XX)"
def_of_intersection [rule_format] :
"ALL (s :: 'S => bool).
ALL (s' :: 'S => bool).
ALL (x :: 'S).
(x isIn X__intersection__X (s, s')) = (x isIn s & x isIn s')"
def_of_difference [rule_format] :
"ALL (s :: 'S => bool).
ALL (s' :: 'S => bool).
ALL (x :: 'S).
(x isIn X__XBslashXBslash__X (s, s')) = (x isIn s & ~ x isIn s')"
def_of_disjoint [rule_format] :
"ALL (s :: 'S => bool).
ALL (s' :: 'S => bool).
(s disjoint s') = (X__intersection__X (s, s') = X_emptySet)"
def_of_productset [rule_format] :
"ALL (s :: 'S => bool).
ALL (t :: 'T => bool).
ALL (x :: 'S).
ALL (y :: 'T).
((x, y) isIn X__Xx__XX9 (s, t)) = (x isIn s & y isIn t)"
emptySet_union_right_unit [rule_format] :
"ALL (a :: 'S => bool). X__union__X (a, X_emptySet) = a"
function_image_structure [rule_format] :
"ALL (a :: 'S => bool).
ALL (f :: 'S => 'T).
ALL (x :: 'T).
(x isIn X_image (f, a)) = (EX (y :: 'S). y isIn a & f y = x)"
def_of_interval [rule_format] :
"ALL (a :: Real).
ALL (b :: Real).
XOSqBr__XPeriodXPeriodXPeriod__XCSqBr (a, b) =
(% r. r >=_4 a & r <=_4 b)"
abbrev_of_interval [rule_format] :
"closedinterval = XOSqBr__XPeriodXPeriodXPeriod__XCSqBr"
plus_PointSet_Vector [rule_format] :
"ALL (X_P :: Point => bool).
ALL (v :: Vector).
X__XPlus__XX10 (X_P, v) = X_image (% x. x +_4 v, X_P)"
plus_Point_VectorSet [rule_format] :
"ALL (X_V :: Vector => bool).
ALL (p :: Point).
X__XPlus__XX5 (p, X_V) = X_image (% x. p +_4 x, X_V)"
plus_PointSet_VectorSet [rule_format] :
"ALL (X_P :: Point => bool).
ALL (X_V :: Vector => bool).
X__XPlus__XX11 (X_P, X_V) =
bigunion (X_image (% x. X__XPlus__XX10 (X_P, x), X_V))"
ga_select_SpacePoint [rule_format] :
"ALL (x_1 :: Point).
ALL (x_2 :: VectorStar).
ALL (x_3 :: Vector). SpacePoint(X_SWPlane x_1 x_2 x_3) = x_1"
ga_select_NormalVector [rule_format] :
"ALL (x_1 :: Point).
ALL (x_2 :: VectorStar).
ALL (x_3 :: Vector). NormalVector(X_SWPlane x_1 x_2 x_3) = x_2"
ga_select_InnerCS [rule_format] :
"ALL (x_1 :: Point).
ALL (x_2 :: VectorStar).
ALL (x_3 :: Vector). InnerCS(X_SWPlane x_1 x_2 x_3) = x_3"
ga_select_Center [rule_format] :
"ALL (x_1 :: Point).
ALL (x_2 :: Point).
ALL (x_3 :: Point). Center(X_SWArc x_1 x_2 x_3) = x_1"
ga_select_Start [rule_format] :
"ALL (x_1 :: Point).
ALL (x_2 :: Point).
ALL (x_3 :: Point). Start(X_SWArc x_1 x_2 x_3) = x_2"
ga_select_End [rule_format] :
"ALL (x_1 :: Point).
ALL (x_2 :: Point).
ALL (x_3 :: Point). End(X_SWArc x_1 x_2 x_3) = x_3"
ga_select_From [rule_format] :
"ALL (x_1 :: Point).
ALL (x_2 :: Point). From(X_SWLine x_1 x_2) = x_1"
ga_select_To [rule_format] :
"ALL (x_1 :: Point).
ALL (x_2 :: Point). To(X_SWLine x_1 x_2) = x_2"
ga_select_Points [rule_format] :
"ALL (x_1 :: Point List). Points(X_SWSpline x_1) = x_1"
ga_select_Objects [rule_format] :
"ALL (x_1 :: SWSketchObject List).
ALL (x_2 :: SWPlane). Objects_3(X_SWSketch x_1 x_2) = x_1"
ga_select_Plane [rule_format] :
"ALL (x_1 :: SWSketchObject List).
ALL (x_2 :: SWPlane). Plane(X_SWSketch x_1 x_2) = x_2"
ga_select_Objects_1 [rule_format] :
"ALL (x_1 :: SWSkchCtrtParam List).
Objects'(X_SWSkchCtrtObject x_1) = x_1"
ga_select_SkchCtrtStatus [rule_format] :
"ALL (x_1 :: SWSkchCtrtStatus).
ALL (x_2 :: SWSkchCtrtObject List).
SkchCtrtStatus(X_SWSkchCtrts x_1 x_2) = x_1"
ga_select_Objects_2 [rule_format] :
"ALL (x_1 :: SWSkchCtrtStatus).
ALL (x_2 :: SWSkchCtrtObject List).
Objects''(X_SWSkchCtrts x_1 x_2) = x_2"
ga_select_Sketch [rule_format] :
"ALL (x_1 :: SWSketch).
ALL (x_2 :: Real). Sketch(X_SWExtrusion x_1 x_2) = x_1"
ga_select_Depth [rule_format] :
"ALL (x_1 :: SWSketch).
ALL (x_2 :: Real). Depth(X_SWExtrusion x_1 x_2) = x_2"
E1_def [rule_format] :
"makePartial E1 =
restrictOp
(makePartial
(X_SWPlane
(P((X_gn_inj :: X_Nat => Real) 0'',
(X_gn_inj :: X_Nat => Real) 0'', (X_gn_inj :: X_Nat => Real) 0''))
(makeTotal
((X_gn_proj :: Vector => VectorStar partial)
(V((X_gn_inj :: X_Nat => Real) 0'',
(X_gn_inj :: X_Nat => Real) 0'',
(X_gn_inj :: NonZero => Real) 1_3))))
(V((X_gn_inj :: NonZero => Real) 1_3,
(X_gn_inj :: X_Nat => Real) 0'',
(X_gn_inj :: X_Nat => Real) 0''))))
(defOp
((X_gn_proj :: Vector => VectorStar partial)
(V((X_gn_inj :: X_Nat => Real) 0'',
(X_gn_inj :: X_Nat => Real) 0'',
(X_gn_inj :: NonZero => Real) 1_3))))"
E2_def [rule_format] :
"makePartial E2 =
restrictOp
(makePartial
(X_SWPlane
(P((X_gn_inj :: X_Nat => Real) 0'',
(X_gn_inj :: X_Nat => Real) 0'', (X_gn_inj :: X_Nat => Real) 0''))
(makeTotal
((X_gn_proj :: Vector => VectorStar partial)
(V((X_gn_inj :: X_Nat => Real) 0'',
(X_gn_inj :: NonZero => Real) 1_3,
(X_gn_inj :: X_Nat => Real) 0''))))
(V((X_gn_inj :: NonZero => Real) 1_3,
(X_gn_inj :: X_Nat => Real) 0'',
(X_gn_inj :: X_Nat => Real) 0''))))
(defOp
((X_gn_proj :: Vector => VectorStar partial)
(V((X_gn_inj :: X_Nat => Real) 0'',
(X_gn_inj :: NonZero => Real) 1_3,
(X_gn_inj :: X_Nat => Real) 0''))))"
E3_def [rule_format] :
"makePartial E3 =
restrictOp
(makePartial
(X_SWPlane
(P((X_gn_inj :: X_Nat => Real) 0'',
(X_gn_inj :: X_Nat => Real) 0'', (X_gn_inj :: X_Nat => Real) 0''))
(makeTotal
((X_gn_proj :: Vector => VectorStar partial)
(V((X_gn_inj :: NonZero => Real) 1_3,
(X_gn_inj :: X_Nat => Real) 0'',
(X_gn_inj :: X_Nat => Real) 0''))))
(V((X_gn_inj :: X_Nat => Real) 0'',
(X_gn_inj :: NonZero => Real) 1_3,
(X_gn_inj :: X_Nat => Real) 0''))))
(defOp
((X_gn_proj :: Vector => VectorStar partial)
(V((X_gn_inj :: NonZero => Real) 1_3,
(X_gn_inj :: X_Nat => Real) 0'',
(X_gn_inj :: X_Nat => Real) 0''))))"
VLine_constr [rule_format] :
"ALL (p1 :: Vector).
ALL (p2 :: Vector).
VLine (p1, p2) =
X_image
(% y. p1 +_9 (y *_7 (p2 -_5 p1)),
closedinterval
((X_gn_inj :: X_Nat => Real) 0'',
(X_gn_inj :: NonZero => Real) 1_3))"
VWithLength_constr [rule_format] :
"ALL (s :: Real).
ALL (v :: Vector).
makePartial (VWithLength(v, s)) =
(if v = 0_6 then makePartial v
else restrictOp
(makePartial
((s /''
makeTotal ((X_gn_proj :: Real => NonZero partial) ( || v || )))
*_7 v))
(defOp ((X_gn_proj :: Real => NonZero partial) ( || v || ))))"
VPlane_constr [rule_format] :
"ALL (normal :: Vector). VPlane normal = (% y. orth(y, normal))"
VPlane2_constr [rule_format] :
"ALL (axis1 :: Vector).
ALL (axis2 :: Vector).
VPlane2 (axis1, axis2) = VPlane (axis1 #' axis2)"
VConnected_constr [rule_format] :
"ALL (frontier :: Vector => bool).
ALL (point :: Vector).
VConnected (frontier, point) =
(if frontier point then frontier
else % y. X__intersection__X (VLine (point, y), frontier) =
X_emptySet)"
VHalfSpace_constr [rule_format] :
"ALL (normal :: Vector).
VHalfSpace normal = VConnected (VPlane normal, normal)"
VHalfSpace2_constr [rule_format] :
"ALL (normal :: Vector).
VHalfSpace2 normal =
X__union__X (VConnected (VPlane normal, normal), VPlane normal)"
VBall_constr [rule_format] :
"ALL (r :: Real). VBall r = (% y. || y || <=_4 r)"
VCircle_constr [rule_format] :
"ALL (axis :: Vector).
ALL (r :: Real).
VCircle (r, axis) = X__intersection__X (VPlane axis, VBall r)"
ActAttach_constr [rule_format] :
"ALL (point :: Point).
ALL (vectors :: Vector => bool).
ActAttach (point, vectors) = X__XPlus__XX5 (point, vectors)"
ActExtrude_constr [rule_format] :
"ALL (axis :: Vector).
ALL (points :: Point => bool).
ActExtrude (axis, points) =
(% x. EX (l :: Real).
EX (y :: Point).
(l isIn
closedinterval
((X_gn_inj :: X_Nat => Real) 0'',
(X_gn_inj :: NonZero => Real) 1_3) &
y isIn points) &
x = y +_4 (l *_7 axis))"
vwl_identity [rule_format] :
"ALL (s :: Real).
ALL (v :: Vector). || v || = s --> VWithLength(v, s) = v"
vwl_length [rule_format] :
"ALL (s :: Real).
ALL (v :: Vector).
~ v = 0_6 -->
|| VWithLength(v, s) || =
(X_gn_inj :: RealNonNeg => Real) (abs_3(s))"
vwl_lindep [rule_format] :
"ALL (s :: Real). ALL (v :: Vector). lindep(v, VWithLength(v, s))"
semantics_for_Planes [rule_format] :
"ALL (ics :: Vector).
ALL (X_n :: VectorStar).
ALL (X_o :: Point).
iX2 (X_SWPlane X_o X_n ics) =
ActAttach (X_o, VPlane ((X_gn_inj :: VectorStar => Vector) X_n))"
semantics_for_SketchObject_listsXMinusBaseCase [rule_format] :
"ALL (plane :: SWPlane). X_isX2 (['], plane) = X_emptySet"
semantics_for_SketchObject_listsXMinusRecursiveCase [rule_format] :
"ALL (plane :: SWPlane).
ALL (sko :: SWSketchObject).
ALL (skos :: SWSketchObject List).
X_isX2 (sko ::' skos, plane) =
X__union__X (X_isX1 (sko, plane), X_isX2 (skos, plane))"
semantics_for_Arcs [rule_format] :
"ALL (plane :: SWPlane).
ALL (x :: Point).
ALL (y :: Point).
ALL (z :: Point).
X_isX1
((X_gn_inj :: SWArc => SWSketchObject) (X_SWArc x y z), plane) =
(let r1 = vec(x, y);
ball = ActAttach (x, VBall ( || r1 || ));
planeI = iX2 plane
in X__intersection__X (ball, planeI))"
semantics_for_Sketches [rule_format] :
"ALL (plane :: SWPlane).
ALL (skos :: SWSketchObject List).
iX3 (X_SWSketch skos plane) = X_isX2 (skos, plane)"
semantics_for_ArcExtrusion [rule_format] :
"ALL (l :: Real).
ALL (sk :: SWSketch).
iX1 ((X_gn_inj :: SWExtrusion => SWFeature) (X_SWExtrusion sk l)) =
ActExtrude
(VWithLength((X_gn_inj :: VectorStar => Vector)
(NormalVector(Plane(sk))),
l),
iX3 sk)"
def_of_SWCylinder [rule_format] :
"ALL (axis :: VectorStar).
ALL (boundarypoint :: Point).
ALL (center :: Point).
SWCylinder(center, boundarypoint, axis) =
(X_gn_inj :: SWExtrusion => SWFeature)
(let plane = X_SWPlane center axis 0_6;
arc = X_SWArc center boundarypoint boundarypoint;
height = || (X_gn_inj :: VectorStar => Vector) axis ||
in X_SWExtrusion
(X_SWSketch ((X_gn_inj :: SWArc => SWSketchObject) arc ::' ['])
plane)
height)"
affine_cylinder_constructible_in_SW [rule_format] :
"ALL (axis :: VectorStar).
ALL (offset :: Point).
ALL (r :: RealPos).
Cylinder ((offset, r), axis) =
(let boundary =
% p. let v = vec(offset, p)
in orth(v, (X_gn_inj :: VectorStar => Vector) axis) &
|| v || = (X_gn_inj :: RealPos => Real) r;
boundarypoint = choose'(boundary)
in iX1 (SWCylinder(offset, boundarypoint, axis)))"
declare ga_subt_reflexive [simp]
declare ga_subt_Int_XLt_Rat [simp]
declare ga_subt_Nat_XLt_Int [simp]
declare ga_subt_NonZero_XLt_Real [simp]
declare ga_subt_Pos_XLt_Nat [simp]
declare ga_subt_Rat_XLt_Real [simp]
declare ga_subt_RealNonNeg_XLt_Real [simp]
declare ga_subt_RealPos_XLt_RealNonNeg [simp]
declare ga_subt_SWArc_XLt_SWSketchObject [simp]
declare ga_subt_SWExtrusion_XLt_SWFeature [simp]
declare ga_subt_SWFeature_XLt_SWObject [simp]
declare ga_subt_SWLine_XLt_SWSketchObject [simp]
declare ga_subt_SWPlane_XLt_SWObject [simp]
declare ga_subt_SWSketch_XLt_SWObject [simp]
declare ga_subt_SWSketchObject_XLt_SWObject [simp]
declare ga_subt_SWSpline_XLt_SWSketchObject [simp]
declare ga_subt_VectorStar_XLt_Vector [simp]
declare ga_assoc___Xx__ [simp]
declare ga_right_unit___Xx__ [simp]
declare ga_left_unit___Xx__ [simp]
declare inv_Group [simp]
declare rinv_Group [simp]
declare ga_comm___Xx__ [simp]
declare ga_assoc___Xx___9 [simp]
declare ga_right_unit___Xx___7 [simp]
declare ga_left_unit___Xx___8 [simp]
declare left_zero [simp]
declare right_zero [simp]
declare ga_comm___Xx___14 [simp]
declare ga_assoc___Xx___22 [simp]
declare ga_right_unit___Xx___18 [simp]
declare ga_left_unit___Xx___20 [simp]
declare inv_Group_21 [simp]
declare rinv_Group_19 [simp]
declare refl [simp]
declare FWO_plus_left [simp]
declare FWO_plus_right [simp]
declare ga_comm_inf [simp]
declare ga_comm_sup [simp]
declare ga_comm_min [simp]
declare ga_comm_max [simp]
declare ga_assoc_min [simp]
declare ga_assoc_max [simp]
declare ga_left_comm_min [simp]
declare ga_left_comm_max [simp]
declare min_inf_relation [simp]
declare max_sup_relation [simp]
declare RealNonNeg_type [simp]
declare RealPos_type [simp]
declare sqrt_def [simp]
declare ga_select_C1 [simp]
declare ga_select_C2 [simp]
declare ga_select_C3 [simp]
declare ga_select_C1_131 [simp]
declare ga_select_C2_132 [simp]
declare ga_select_C3_133 [simp]
declare VectorStar_type [simp]
declare ga_assoc___Xx___82 [simp]
declare ga_right_unit___Xx___76 [simp]
declare ga_left_unit___Xx___78 [simp]
declare inv_Group_79 [simp]
declare rinv_Group_77 [simp]
declare ga_comm___Xx___80 [simp]
declare unit [simp]
declare zero_by_left_zero [simp]
declare zero_by_right_zero [simp]
declare inverse_by_XMinus1 [simp]
declare lindep_reflexivity [simp]
declare lindep_symmetry [simp]
declare pos_definite_94 [simp]
declare orth_symmetric [simp]
declare orthogonal_on_zero_projection [simp]
declare orthogonal_projection_theorem [simp]
declare orthogonal_decomposition_theorem [simp]
declare ga_select_first [simp]
declare ga_select_rest [simp]
declare Ax4 [simp]
declare listop_basecase [simp]
declare cross_product_orthogonal [simp]
declare e1e2_nonlindep [simp]
declare vec_def [simp]
declare transitivity_of_vec_plus [simp]
declare emptySet_empty [simp]
declare allSet_contains_all [simp]
declare def_of_isIn [simp]
declare emptySet_union_right_unit [simp]
declare ga_select_SpacePoint [simp]
declare ga_select_NormalVector [simp]
declare ga_select_InnerCS [simp]
declare ga_select_Center [simp]
declare ga_select_Start [simp]
declare ga_select_End [simp]
declare ga_select_From [simp]
declare ga_select_To [simp]
declare ga_select_Points [simp]
declare ga_select_Objects [simp]
declare ga_select_Plane [simp]
declare ga_select_Objects_1 [simp]
declare ga_select_SkchCtrtStatus [simp]
declare ga_select_Objects_2 [simp]
declare ga_select_Sketch [simp]
declare ga_select_Depth [simp]
declare vwl_identity [simp]
declare vwl_lindep [simp]
declare semantics_for_SketchObject_listsXMinusBaseCase [simp]
declare semantics_for_Sketches [simp]
-- "SUBTYPE RULES"
use_thy "$AWE_HOME/Extensions/AWE_HOL"
use_thy "$HETS_ISABELLE_LIB/Subtypes"
thymorph subtypes_morph : Subtypes --> SWCommonPatterns_SWCylByAE_IsCylinder_T
translate_thm "Subtypes.gn_inj_identity" along subtypes_morph
translate_thm "Subtypes.gn_proj_def" along subtypes_morph
ML {* proofs := 1 *}
lemma zerozero: "0_5 = gn_inj(0'')" sorry
lemma oneone: "1_6 = gn_inj(1_3)" sorry
lemmas PO_simps =
geq_def_ExtPartialOrder
less_def_ExtPartialOrder
greater_def_ExtPartialOrder
theorem def_of_Cylinder :
"ALL (axis :: VectorStar).
ALL (offset :: Point).
ALL (r :: RealPos).
Cylinder ((offset, r), axis) =
(% x. let v = vec(offset, x)
in ( || proj(v, (X_gn_inj :: VectorStar => Vector) axis) || <=_4
|| (X_gn_inj :: VectorStar => Vector) axis || &
|| orthcomp(v, (X_gn_inj :: VectorStar => Vector) axis) || <=_4
(X_gn_inj :: RealPos => Real) r) &
v *_8 (X_gn_inj :: VectorStar => Vector) axis >=_4
(X_gn_inj :: X_Nat => Real) 0'')"
-- "unfolding some initial definitions"
unfolding affine_cylinder_constructible_in_SW
unfolding def_of_SWCylinder
proof (rule allI)+
-- "I. SUBTYPE AND PARTIALITY LEMMAS"
-- "to infer knowledge of the form"
-- "!!x::subtype. ?defining_predicate(gn_inj(x))"
-- "where the ? emphasizes the fact, that we want the predicate to be expanded,"
-- "we would need a tactic which retrieves for a given subtype the 3 rules:"
-- "S_pred_def, Ax4(linking the defining predicate to the defOp) and subt_S_T"
-- "As the subtype can be inferred from the context, no arguments would be"
-- "required for this tactic"
have subt_RealPos_Real:
"ALL (x :: RealPos). ALL (y :: Real). gn_subt(x, y)"
by (blast intro!: ga_subt_transitive ga_subt_RealPos_XLt_RealNonNeg
ga_subt_RealNonNeg_XLt_Real)
have RealPos_subtype: "!!x::RealPos. gn_inj(x) >_4 0_5"
by (simp only: RealPos_pred_def [THEN sym], subst RealPos_type [THEN sym],
simp only: gn_proj_def subt_RealPos_Real)
have VectorStar_subtype: "!!x::VectorStar. (~ gn_inj(x) = 0_6)"
by (simp only: VectorStar_pred_def [THEN sym], subst VectorStar_type [THEN sym],
simp only: gn_proj_def ga_subt_VectorStar_XLt_Vector)
from RealPos_subtype have
realpos_nonneg: "!!x::RealPos. 0_5 <=_4 gn_inj(x)"
by (simp only: PO_simps)
from ga_subt_RealPos_XLt_RealNonNeg ga_subt_RealNonNeg_XLt_Real subt_RealPos_Real
have real_inj:
"!!(x::RealPos). (gn_inj(x)\<Colon>Real) = gn_inj(gn_inj(x)\<Colon>RealNonNeg)"
by (rule_tac gn_inj_diagram, simp)
have realnonneg_identity:
"!!x::RealNonNeg. makeTotal(gn_proj((X_gn_inj x)\<Colon>Real)) = x"
by (simp only: gn_makeTotal_proj_inj ga_subt_RealNonNeg_XLt_Real)
-- "II. GENERAL LEMMAS"
from e1e2_nonlindep have space_at_least_2dim: "EX v w. \<not> lindep(v,w)" by blast
have abs_on_realpos:
"!!x::RealPos. abs_3(gn_inj(x)) = X_gn_inj x"
-- "INTERESTING: can't combine the two simp only's!"
by (subst partial_identity [THEN sym], subst abs_def,
simp only: if_P realpos_nonneg, simp only: real_inj realnonneg_identity)
have vwl_pos_length: "!!(s::RealPos) v. ~ v = 0_6 -->
|| VWithLength(v, gn_inj(s)) || = gn_inj(s)"
proof (rule impI)
fix s::RealPos
fix v::Vector
assume hyp: "v \<noteq> 0_6"
with vwl_length have
"|| VWithLength(v, X_gn_inj s) || = gn_inj(abs_3(X_gn_inj s))" by blast
also have "\<dots> = gn_inj(gn_inj(s)::RealNonNeg)" by (simp only: abs_on_realpos)
also have "\<dots> = gn_inj(s)" by (simp only: real_inj)
finally show "|| VWithLength(v, gn_inj(s)) || = gn_inj(s)" by blast
qed
from space_at_least_2dim orthogonal_existence_theorem
have orth_exists_aux: "!!w. EX x. x \<noteq> 0_6 \<and> orth(x, w)" by blast
have orth_exists:
"!!q w (r::RealPos). EX p. let v = vec(q, p) in orth(v, w) & || v || = gn_inj(r)"
(is "!!q w r. EX p. ?P q w r p")
proof-
fix q w
fix r::RealPos
from orth_exists_aux obtain v where v_props: "v \<noteq> 0_6 \<and> orth(v, w)" ..
def vprime_def: v' == "VWithLength(v, gn_inj(r))"
def p: p == "q +_4 v'"
with plus_vec_identity have vp_rel: "v' = vec(q, p)" by simp
from lindep_symmetry orth_symmetric vwl_lindep lindep_orth_transitive
v_props vprime_def have fact1: "orth(v', w)" by blast
with v_props vwl_pos_length vprime_def
have "orth(v', w) \<and> || v' || = gn_inj(r)" by blast
with vp_rel fact1 have "?P q w r p" by simp
thus "EX p. ?P q w r p" ..
qed
-- "need this fact to use the proj_def"
have subtype_cond: "!A x. (x \<noteq> 0_5) \<longrightarrow> (restrictOp A (defOp(gn_proj(x)\<Colon>NonZero partial))) = A"
proof ((rule allI)+, rule impI)
fix A x
assume hyp: "x \<noteq> 0_5"
show "restrictOp A (defOp (gn_proj(x)\<Colon>NonZero partial)) = A"
by (subst restrictOp_def, subst if_P, subst NonZero_type, simp add: hyp, simp)
qed
-- "END LEMMAS"
fix axis::VectorStar
fix offset::Point
fix r::RealPos
-- "providing vars for the let-constructs"
def boundary: boundary == "\<lambda>p. let v = vec(offset, p) in orth(v, gn_inj(axis)) \<and> || v || = gn_inj(r)"
def boundarypoint: bp == "choose'(boundary)"
def plane: pln == "X_SWPlane offset axis 0_6"
def arc: arc == "X_SWArc offset bp bp"
def height: ht == "|| gn_inj(axis) ||"
def sketch: sketch == "X_SWSketch (gn_inj(arc) ::' XOSqBrXCSqBr) pln"
-- "additional definitions, not stemming from let-vars"
def I01: I01 == "closedinterval (gn_inj(0''), gn_inj(1_3))"
-- "we know that Plane(sketch) = pln"
from plane sketch ga_select_Plane
have sketchplane_identity: "pln = Plane(sketch)" by simp
-- "we know that NormalVector(pln) = axis"
from plane ga_select_NormalVector
have nv_identity: "axis = NormalVector(pln)" by simp
def r1: r1 == "vec(offset, bp)"
def ball: bll == "ActAttach (offset, VBall ( || r1 || ))"
def planeI: plnI == "ActAttach (offset, VPlane (gn_inj(axis)))"
def scaledAxis: axs == "VWithLength(gn_inj(axis), ht)"
-- "we can identify gn_inj(axis) and axs via vwl_identity!"
from scaledAxis vwl_identity height
have axis_identity: "axs = gn_inj(axis)" by simp
have axs_nonzero: "axs \<noteq> 0_6" by (subst axis_identity, rule VectorStar_subtype)
with non_degenerate rev_contrapos
have axs_norm_nonzero: "axs *_8 axs \<noteq> 0_5" by blast
-- "PP = ProofPower remark"
-- "PP: doesn't work in one step!"
from axs_nonzero pos_definite have "axs *_8 axs >_4 0_5" by blast
with PO_simps
have axs_sqr_nonneg: "axs *_8 axs >=_4 0_5" by blast
-- "show facts about bp, r and r1"
have bp_in_boundary: "boundary bp"
proof-
from boundary orth_exists have "Ex boundary" by blast
with Point_choice boundarypoint show ?thesis by blast
qed
hence r1_r_relation: "|| r1 || = gn_inj(r)"
by (simp add: r1 boundary Let_def)
-- "we don't want to manipulate the right hand side, so we replace it by rhs"
def rhs: rhs == "(\<lambda>x. let v = vec(offset, x)
in ( || proj(v, gn_inj(axis)) || <=_4 || gn_inj(axis) || \<and>
|| orthcomp(v, gn_inj(axis)) || <=_4 gn_inj(r)) \<and>
v *_8 gn_inj(axis) >=_4 gn_inj(0''))"
-- "going in apply-mode again"
show "(let boundary = \<lambda>p. let v = vec(offset, p) in orth(v, gn_inj(axis)) \<and> || v || = gn_inj(r);
boundarypoint = choose'(boundary)
in iX1 (gn_inj
(let plane = X_SWPlane offset axis 0_6;
arc = X_SWArc offset boundarypoint boundarypoint;
height = || gn_inj(axis) ||
in X_SWExtrusion (X_SWSketch (gn_inj(arc) ::' [']) plane) height)))
=
(\<lambda>x. let v = vec(offset, x)
in ( || proj(v, gn_inj(axis)) || <=_4 || gn_inj(axis) || \<and>
|| orthcomp(v, gn_inj(axis)) || <=_4 gn_inj(r)) \<and>
v *_8 gn_inj(axis) >=_4 gn_inj(0''))"
apply (subst rhs [symmetric])
apply (subst boundary [symmetric])
-- "get the boundarypoint definition replaced"
apply (subst Let_def)
apply (subst boundarypoint [symmetric])
apply (subst plane [symmetric])
-- "get the boundarypoint definition replaced"
apply (subst Let_def)
apply (subst height [symmetric])
apply (subst arc [symmetric])
unfolding Let_def
apply (subst sketch [symmetric])
-- "second round of let-elimination, but first some definition unfoldings"
unfolding semantics_for_ArcExtrusion ActExtrude_constr
apply (subst sketchplane_identity [symmetric])
apply (subst nv_identity [symmetric])
apply (subst sketch)
unfolding semantics_for_Sketches semantics_for_SketchObject_listsXMinusRecursiveCase
apply (subst arc)
unfolding semantics_for_Arcs
apply (subst I01 [symmetric])
apply (subst r1 [symmetric])
-- "get the r1 definition replaced"
apply (subst Let_def)
apply (subst ball [symmetric])
apply (subst Let_def)
apply (subst plane)
unfolding semantics_for_Planes
apply (subst planeI [symmetric])
apply (subst scaledAxis [symmetric])
unfolding Let_def
unfolding semantics_for_SketchObject_listsXMinusBaseCase
apply (subst emptySet_union_right_unit)
apply (subst def_of_intersection)
apply (subst rhs)
apply (subst axis_identity [symmetric])+
proof (rule ext)
fix x
def v: v == "vec(offset, x)"
def vp: vp == "proj(v, axs)" -- "the axis-parallel component"
def vo: vo == "orthcomp(v, axs)" -- "the axis-orthogonal component"
-- "using the orthogonal projection theorem here!"
hence vo_axs_orth: "orth(axs, vo)" by simp
hence vo_mult_axs_zero: "vo *_8 axs = 0_5"
by (simp only: orth_symmetric orthogonal_def [symmetric])
have vp_structure: "EX k. vp = k *_7 axs"
unfolding vp
apply (subst partial_identity [symmetric], subst proj_def)
apply (subst if_not_P, simp add: axs_nonzero)
apply (subst subtype_cond, simp add: axs_norm_nonzero)
apply (subst partial_identity)
by auto
-- "here we provide already the knowledge that v = vp + vo"
have v_decomp: "v = vp +_9 vo" by (simp only: vo vp orthogonal_decomposition_theorem)
from v_decomp have "v *_8 axs = (vp +_9 vo) *_8 axs" by simp
also have "\<dots> = (vp *_8 axs) +_8 (vo *_8 axs)" by (simp only: distributive)
also have "\<dots> = (vp *_8 axs)" by (simp add: vo_mult_axs_zero)
finally have v_mult_axs_simp: "v *_8 axs = vp *_8 axs" by simp
-- "going in apply-mode again"
show "(\<exists>l y. (l isIn I01 \<and> y isIn bll \<and> y isIn plnI) \<and>
x = y +_4 (l *_7 axs)) =
(let v = vec(offset, x)
in ( || proj(v, axs) || <=_4 || axs || \<and>
|| orthcomp(v, axs) || <=_4 gn_inj(r)) \<and>
v *_8 axs >=_4 gn_inj(0''))"
apply (subst v [symmetric])
apply (subst Let_def)
apply (subst vp [symmetric])
apply (subst vo [symmetric])
-- "having normalized the problem we can now start the main proof!"
proof
assume "\<exists>l y. (l isIn I01 \<and> y isIn bll \<and> y isIn plnI) \<and> x = y +_4 (l *_7 axs)"
(is "\<exists>l y. (?I l \<and> ?B y \<and> ?P y) \<and> ?S l y")
then obtain l y where main_knowledge: "(?I l \<and> ?B y \<and> ?P y) \<and> ?S l y" by blast
def yvec: y' == "vec(offset, y)" -- "the offset-based component of y"
-- "we know that y' is in the given VBall because of its structure"
have yprime_in_ball: "y' isIn VBall ( || r1 || ) \<and> offset +_4 y' = y" (is "?L y'")
proof-
from main_knowledge have "EX y1. ?L y1"
by (subst (asm) ball, subst (asm) ActAttach_constr,
subst (asm) plus_Point_VectorSet, subst (asm) function_image_structure, simp)
then obtain y1 where obtained_from_ball: "?L y1" ..
with yvec plus_injective vec_def have "y' = y1" by blast
with obtained_from_ball show ?thesis by simp
qed
-- "identically we know that y' is in the given VPlane because of its structure"
have yprime_in_plane: "y' isIn VPlane(gn_inj(axis)) \<and> offset +_4 y' = y" (is "?L' y'")
proof-
from main_knowledge have "EX y1. ?L' y1"
by (subst (asm) planeI, subst (asm) ActAttach_constr,
subst (asm) plus_Point_VectorSet, subst (asm) function_image_structure, simp)
then obtain y1 where obtained_from_plane: "?L' y1" ..
with yvec plus_injective vec_def have "y' = y1" by blast
with obtained_from_plane show ?thesis by simp
qed
from v have "x = offset +_4 v" by simp
with yprime_in_ball main_knowledge
have "offset +_4 v = (offset +_4 y') +_4(l *_7 axs)" by simp
hence "v = y' +_9 (l *_7 axs)"
by (simp only: point_vector_plus_associative [symmetric] plus_injective)
hence "vp +_9 vo = y' +_9 (l *_7 axs)"
by (simp only: v_decomp)
hence vyprime_rel: "vp +_9 vo = (l *_7 axs) +_9 y'"
by (simp only: ga_comm___Xx___80)
have main_identity: "vp = l *_7 axs \<and> vo = y'"
proof (rule_tac z="axs" in unique_orthogonal_decomposition)
-- "mi_subgoal1 is equal to axs_nonzero"
from vyprime_rel have mi_subgoal2: "vp +_9 vo = (l *_7 axs) +_9 y'" .
from vp_structure lindep_def have mi_subgoal3: "lindep(vp, axs)"
by blast
from lindep_def have mi_subgoal4: "lindep(l *_7 axs, axs)" by blast
-- "mi_subgoal5 is equal to vo_axs_orth"
from axis_identity yprime_in_plane VPlane_constr
have mi_subgoal6: "orth(axs, y')" by simp
with axs_nonzero mi_subgoal2 mi_subgoal3 mi_subgoal4 vo_axs_orth
show "((((((axs \<noteq> 0_6) \<and> ((vp +_9 vo) = ((l *_7 axs) +_9 y'))) \<and> (lindep(vp, axs))) \<and>
(lindep((l *_7 axs), axs))) \<and> (orth(axs, vo))) \<and> (orth(axs, y')))" by blast
qed
-- "(vp = l * axs) and 0 <= l <= 1 should gives us the result"
-- "0 <= l <= 1"
have l_in_unitinterval: "0_5 <=_4 l \<and> l <=_4 gn_inj(1_3)"
proof-
from main_knowledge I01 zerozero
have "l isIn closedinterval(0_5, gn_inj(1_3))" by simp
with abbrev_of_interval have "XOSqBr__XPeriodXPeriodXPeriod__XCSqBr(0_5, gn_inj(1_3)) l"
by simp
thus ?thesis by (simp add: def_of_interval PO_simps)
qed
have subgoal1: "|| vp || <=_4 || axs ||"
proof-
from main_identity have "|| vp || = || l *_7 axs ||" by simp
also have "\<dots> = l *_6 || axs ||"
by (simp only: PO_simps pos_homogeneous l_in_unitinterval)
also have "\<dots> <=_4 || axs ||"
by (subst ga_left_unit___Xx___8 [symmetric]
, subst oneone
, rule FWO_times_left
, simp add: l_in_unitinterval pos_definite_94)
finally show ?thesis .
qed
from r1_r_relation VBall_constr yprime_in_ball main_identity
have subgoal2: "|| vo || <=_4 gn_inj(r)" by simp
from v_mult_axs_simp have "v *_8 axs = l *_6 (axs *_8 axs)"
by (simp add: main_identity homogeneous)
also from axs_sqr_nonneg l_in_unitinterval geq_def_ExtPartialOrder
FWO_times_right have "\<dots> >=_4 l *_6 0_5" by blast
also from right_zero have "\<dots> = 0_5" by blast
finally have subgoal3: "v *_8 axs >=_4 0_5" by simp
with subgoal1 subgoal2 zerozero
show "( || vp || <=_4 || axs || \<and>
|| vo || <=_4 gn_inj(r)) \<and> v *_8 axs >=_4 gn_inj(0'')" by simp
-- "tackle other direction here"
next
assume main_knowledge:
"( || vp || <=_4 || axs || \<and> || vo || <=_4 gn_inj(r)) \<and> v *_8 axs >=_4 gn_inj(0'')"
-- "We show vp = k * axs, and set l := k, y := offset + vo and"
-- "verify the four conditions for l and y."
from vp_structure obtain l where l_def: "vp = l *_7 axs" ..
def y_def: y == "offset +_4 vo"
have "(l isIn I01 \<and> y isIn bll \<and> y isIn plnI) \<and> x = y +_4 (l *_7 axs)"
(is "?G l y")
proof-
-- "PROOF for first conjunct"
from pos_definite_94 have axs_norm_nonneg: "||axs|| >=_4 0_5"
by (simp only: PO_simps)
from v_mult_axs_simp main_knowledge l_def homogeneous zerozero
have "l *_6 (axs *_8 axs) >=_4 0_5" by simp
with times_leq_nonneg_cond axs_sqr_nonneg PO_simps
have I01_first: "l >=_4 0_5" by blast
with main_knowledge l_def pos_homogeneous
have "l *_6 || axs || <=_4 || axs ||"
by (simp only:)
with axs_norm_nonneg have I01_second: "l <=_4 gn_inj(1_3)"
by (subst (asm) ga_left_unit___Xx___8 [symmetric]
, subst (asm) oneone
, insert times_cancel_right_nonneg_leq [of l "||axs||"]
, simp)
with I01_first I01 def_of_interval abbrev_of_interval
def_of_isIn PO_simps zerozero
have subgoal1: "l isIn I01" by auto
-- "PROOF for second conjunct"
from main_knowledge r1_r_relation VBall_constr
have "vo isIn VBall( || r1 || )" by simp
hence subgoal2: "y isIn bll"
by (subst y_def,subst ball,
subst ActAttach_constr,
subst plus_Point_VectorSet,
subst function_image,
subst def_of_isIn, auto)
-- "the same way we obtain the third subgoal!"
-- "PROOF for third conjunct"
from vo_axs_orth axis_identity VPlane_constr
have "vo isIn VPlane(axs)" by simp
hence subgoal3: "y isIn plnI"
by (subst y_def, subst planeI, subst ActAttach_constr
, subst plus_Point_VectorSet
, subst function_image
, (subst def_of_isIn)+
, subst axis_identity [symmetric]
, auto)
-- "PROOF for fourth conjunct"
have subgoal4: "x = y +_4 (l *_7 axs)"
by (subst y_def, subst l_def [symmetric]
, subst point_vector_plus_associative [symmetric]
, subst ga_comm___Xx___80
, subst v_decomp [symmetric]
, simp only: ga_comm___Xx___80 v vec_def)
with subgoal1 subgoal2 subgoal3 show ?thesis by blast
qed
thus "EX l y. ?G l y" by auto
qed
qed
qed
ML "Header.record \"def_of_Cylinder\""
end
|
True : Prop
True = {P : Prop} → P → P
|
[GOAL]
⊢ range arcsin = Icc (-(π / 2)) (π / 2)
[PROOFSTEP]
rw [arcsin, range_comp Subtype.val]
[GOAL]
⊢ Subtype.val '' range (IccExtend arcsin.proof_2 ↑(OrderIso.symm sinOrderIso)) = Icc (-(π / 2)) (π / 2)
[PROOFSTEP]
simp [Icc]
[GOAL]
x : ℝ
⊢ arcsin ↑(projIcc (-1) 1 (_ : -1 ≤ 1) x) = arcsin x
[PROOFSTEP]
rw [arcsin, Function.comp_apply, IccExtend_val, Function.comp_apply, IccExtend, Function.comp_apply]
[GOAL]
x : ℝ
hx : x ∈ Icc (-1) 1
⊢ sin (arcsin x) = x
[PROOFSTEP]
simpa [arcsin, IccExtend_of_mem _ _ hx, -OrderIso.apply_symm_apply] using
Subtype.ext_iff.1 (sinOrderIso.apply_symm_apply ⟨x, hx⟩)
[GOAL]
x : ℝ
hx : x ∈ Icc (-(π / 2)) (π / 2)
⊢ sin (arcsin (sin x)) = sin x
[PROOFSTEP]
rw [sin_arcsin (neg_one_le_sin _) (sin_le_one _)]
[GOAL]
x y : ℝ
h₁ : sin x = y
h₂ : x ∈ Icc (-(π / 2)) (π / 2)
⊢ arcsin y = x
[PROOFSTEP]
subst y
[GOAL]
x : ℝ
h₂ : x ∈ Icc (-(π / 2)) (π / 2)
⊢ arcsin (sin x) = x
[PROOFSTEP]
exact injOn_sin (arcsin_mem_Icc _) h₂ (sin_arcsin' (sin_mem_Icc x))
[GOAL]
x : ℝ
hx : 1 ≤ x
⊢ arcsin x = π / 2
[PROOFSTEP]
rw [← arcsin_projIcc, projIcc_of_right_le _ hx, Subtype.coe_mk, arcsin_one]
[GOAL]
⊢ sin (-(π / 2)) = -1
[PROOFSTEP]
rw [sin_neg, sin_pi_div_two]
[GOAL]
x : ℝ
hx : x ≤ -1
⊢ arcsin x = -(π / 2)
[PROOFSTEP]
rw [← arcsin_projIcc, projIcc_of_le_left _ hx, Subtype.coe_mk, arcsin_neg_one]
[GOAL]
x : ℝ
⊢ arcsin (-x) = -arcsin x
[PROOFSTEP]
cases' le_total x (-1) with hx₁ hx₁
[GOAL]
case inl
x : ℝ
hx₁ : x ≤ -1
⊢ arcsin (-x) = -arcsin x
[PROOFSTEP]
rw [arcsin_of_le_neg_one hx₁, neg_neg, arcsin_of_one_le (le_neg.2 hx₁)]
[GOAL]
case inr
x : ℝ
hx₁ : -1 ≤ x
⊢ arcsin (-x) = -arcsin x
[PROOFSTEP]
cases' le_total 1 x with hx₂ hx₂
[GOAL]
case inr.inl
x : ℝ
hx₁ : -1 ≤ x
hx₂ : 1 ≤ x
⊢ arcsin (-x) = -arcsin x
[PROOFSTEP]
rw [arcsin_of_one_le hx₂, arcsin_of_le_neg_one (neg_le_neg hx₂)]
[GOAL]
case inr.inr
x : ℝ
hx₁ : -1 ≤ x
hx₂ : x ≤ 1
⊢ arcsin (-x) = -arcsin x
[PROOFSTEP]
refine' arcsin_eq_of_sin_eq _ _
[GOAL]
case inr.inr.refine'_1
x : ℝ
hx₁ : -1 ≤ x
hx₂ : x ≤ 1
⊢ sin (-arcsin x) = -x
[PROOFSTEP]
rw [sin_neg, sin_arcsin hx₁ hx₂]
[GOAL]
case inr.inr.refine'_2
x : ℝ
hx₁ : -1 ≤ x
hx₂ : x ≤ 1
⊢ -arcsin x ∈ Icc (-(π / 2)) (π / 2)
[PROOFSTEP]
exact ⟨neg_le_neg (arcsin_le_pi_div_two _), neg_le.2 (neg_pi_div_two_le_arcsin _)⟩
[GOAL]
x y : ℝ
hx : x ∈ Icc (-1) 1
hy : y ∈ Icc (-(π / 2)) (π / 2)
⊢ arcsin x ≤ y ↔ x ≤ sin y
[PROOFSTEP]
rw [← arcsin_sin' hy, strictMonoOn_arcsin.le_iff_le hx (sin_mem_Icc _), arcsin_sin' hy]
[GOAL]
x y : ℝ
hy : y ∈ Ico (-(π / 2)) (π / 2)
⊢ arcsin x ≤ y ↔ x ≤ sin y
[PROOFSTEP]
cases' le_total x (-1) with hx₁ hx₁
[GOAL]
case inl
x y : ℝ
hy : y ∈ Ico (-(π / 2)) (π / 2)
hx₁ : x ≤ -1
⊢ arcsin x ≤ y ↔ x ≤ sin y
[PROOFSTEP]
simp [arcsin_of_le_neg_one hx₁, hy.1, hx₁.trans (neg_one_le_sin _)]
[GOAL]
case inr
x y : ℝ
hy : y ∈ Ico (-(π / 2)) (π / 2)
hx₁ : -1 ≤ x
⊢ arcsin x ≤ y ↔ x ≤ sin y
[PROOFSTEP]
cases' lt_or_le 1 x with hx₂ hx₂
[GOAL]
case inr.inl
x y : ℝ
hy : y ∈ Ico (-(π / 2)) (π / 2)
hx₁ : -1 ≤ x
hx₂ : 1 < x
⊢ arcsin x ≤ y ↔ x ≤ sin y
[PROOFSTEP]
simp [arcsin_of_one_le hx₂.le, hy.2.not_le, (sin_le_one y).trans_lt hx₂]
[GOAL]
case inr.inr
x y : ℝ
hy : y ∈ Ico (-(π / 2)) (π / 2)
hx₁ : -1 ≤ x
hx₂ : x ≤ 1
⊢ arcsin x ≤ y ↔ x ≤ sin y
[PROOFSTEP]
exact arcsin_le_iff_le_sin ⟨hx₁, hx₂⟩ (mem_Icc_of_Ico hy)
[GOAL]
x y : ℝ
hx : x ∈ Icc (-(π / 2)) (π / 2)
hy : y ∈ Icc (-1) 1
⊢ x ≤ arcsin y ↔ sin x ≤ y
[PROOFSTEP]
rw [← neg_le_neg_iff, ← arcsin_neg,
arcsin_le_iff_le_sin ⟨neg_le_neg hy.2, neg_le.2 hy.1⟩ ⟨neg_le_neg hx.2, neg_le.2 hx.1⟩, sin_neg, neg_le_neg_iff]
[GOAL]
x y : ℝ
hx : x ∈ Ioc (-(π / 2)) (π / 2)
⊢ x ≤ arcsin y ↔ sin x ≤ y
[PROOFSTEP]
rw [← neg_le_neg_iff, ← arcsin_neg, arcsin_le_iff_le_sin' ⟨neg_le_neg hx.2, neg_lt.2 hx.1⟩, sin_neg, neg_le_neg_iff]
[GOAL]
x y : ℝ
hy : y ∈ Ioo (-(π / 2)) (π / 2)
⊢ arcsin x = y ↔ x = sin y
[PROOFSTEP]
simp only [le_antisymm_iff, arcsin_le_iff_le_sin' (mem_Ico_of_Ioo hy), le_arcsin_iff_sin_le' (mem_Ioc_of_Ioo hy)]
[GOAL]
x : ℝ
⊢ sin 0 ≤ x ↔ 0 ≤ x
[PROOFSTEP]
rw [sin_zero]
[GOAL]
x : ℝ
⊢ arcsin x = 0 ↔ x = 0
[PROOFSTEP]
simp [le_antisymm_iff]
[GOAL]
x : ℝ
⊢ x < sin (π / 2) ↔ x < 1
[PROOFSTEP]
rw [sin_pi_div_two]
[GOAL]
x : ℝ
⊢ sin (-(π / 2)) < x ↔ -1 < x
[PROOFSTEP]
rw [sin_neg, sin_pi_div_two]
[GOAL]
x : ℝ
⊢ π / 4 ≤ arcsin x ↔ sqrt 2 / 2 ≤ x
[PROOFSTEP]
rw [← sin_pi_div_four, le_arcsin_iff_sin_le']
[GOAL]
x : ℝ
⊢ π / 4 ∈ Ioc (-(π / 2)) (π / 2)
[PROOFSTEP]
have := pi_pos
[GOAL]
x : ℝ
this : 0 < π
⊢ π / 4 ∈ Ioc (-(π / 2)) (π / 2)
[PROOFSTEP]
constructor
[GOAL]
case left
x : ℝ
this : 0 < π
⊢ -(π / 2) < π / 4
[PROOFSTEP]
linarith
[GOAL]
case right
x : ℝ
this : 0 < π
⊢ π / 4 ≤ π / 2
[PROOFSTEP]
linarith
[GOAL]
x : ℝ
h : x ∈ Ioo (-(π / 2)) (π / 2)
⊢ sin x ∈ Ioo (-1) 1
[PROOFSTEP]
rwa [mem_Ioo, ← arcsin_lt_pi_div_two, ← neg_pi_div_two_lt_arcsin, arcsin_sin h.1.le h.2.le]
[GOAL]
x : ℝ
⊢ cos (arcsin x) = sqrt (1 - x ^ 2)
[PROOFSTEP]
by_cases hx₁ : -1 ≤ x
[GOAL]
case pos
x : ℝ
hx₁ : -1 ≤ x
⊢ cos (arcsin x) = sqrt (1 - x ^ 2)
case neg x : ℝ hx₁ : ¬-1 ≤ x ⊢ cos (arcsin x) = sqrt (1 - x ^ 2)
[PROOFSTEP]
swap
[GOAL]
case neg
x : ℝ
hx₁ : ¬-1 ≤ x
⊢ cos (arcsin x) = sqrt (1 - x ^ 2)
[PROOFSTEP]
rw [not_le] at hx₁
[GOAL]
case neg
x : ℝ
hx₁ : x < -1
⊢ cos (arcsin x) = sqrt (1 - x ^ 2)
[PROOFSTEP]
rw [arcsin_of_le_neg_one hx₁.le, cos_neg, cos_pi_div_two, sqrt_eq_zero_of_nonpos]
[GOAL]
case neg
x : ℝ
hx₁ : x < -1
⊢ 1 - x ^ 2 ≤ 0
[PROOFSTEP]
nlinarith
[GOAL]
case pos
x : ℝ
hx₁ : -1 ≤ x
⊢ cos (arcsin x) = sqrt (1 - x ^ 2)
[PROOFSTEP]
by_cases hx₂ : x ≤ 1
[GOAL]
case pos
x : ℝ
hx₁ : -1 ≤ x
hx₂ : x ≤ 1
⊢ cos (arcsin x) = sqrt (1 - x ^ 2)
case neg x : ℝ hx₁ : -1 ≤ x hx₂ : ¬x ≤ 1 ⊢ cos (arcsin x) = sqrt (1 - x ^ 2)
[PROOFSTEP]
swap
[GOAL]
case neg
x : ℝ
hx₁ : -1 ≤ x
hx₂ : ¬x ≤ 1
⊢ cos (arcsin x) = sqrt (1 - x ^ 2)
[PROOFSTEP]
rw [not_le] at hx₂
[GOAL]
case neg
x : ℝ
hx₁ : -1 ≤ x
hx₂ : 1 < x
⊢ cos (arcsin x) = sqrt (1 - x ^ 2)
[PROOFSTEP]
rw [arcsin_of_one_le hx₂.le, cos_pi_div_two, sqrt_eq_zero_of_nonpos]
[GOAL]
case neg
x : ℝ
hx₁ : -1 ≤ x
hx₂ : 1 < x
⊢ 1 - x ^ 2 ≤ 0
[PROOFSTEP]
nlinarith
[GOAL]
case pos
x : ℝ
hx₁ : -1 ≤ x
hx₂ : x ≤ 1
⊢ cos (arcsin x) = sqrt (1 - x ^ 2)
[PROOFSTEP]
have : sin (arcsin x) ^ 2 + cos (arcsin x) ^ 2 = 1 := sin_sq_add_cos_sq (arcsin x)
[GOAL]
case pos
x : ℝ
hx₁ : -1 ≤ x
hx₂ : x ≤ 1
this : sin (arcsin x) ^ 2 + cos (arcsin x) ^ 2 = 1
⊢ cos (arcsin x) = sqrt (1 - x ^ 2)
[PROOFSTEP]
rw [← eq_sub_iff_add_eq', ← sqrt_inj (sq_nonneg _) (sub_nonneg.2 (sin_sq_le_one (arcsin x))), sq,
sqrt_mul_self (cos_arcsin_nonneg _)] at this
[GOAL]
case pos
x : ℝ
hx₁ : -1 ≤ x
hx₂ : x ≤ 1
this : cos (arcsin x) = sqrt (1 - sin (arcsin x) ^ 2)
⊢ cos (arcsin x) = sqrt (1 - x ^ 2)
[PROOFSTEP]
rw [this, sin_arcsin hx₁ hx₂]
[GOAL]
x : ℝ
⊢ tan (arcsin x) = x / sqrt (1 - x ^ 2)
[PROOFSTEP]
rw [tan_eq_sin_div_cos, cos_arcsin]
[GOAL]
x : ℝ
⊢ sin (arcsin x) / sqrt (1 - x ^ 2) = x / sqrt (1 - x ^ 2)
[PROOFSTEP]
by_cases hx₁ : -1 ≤ x
[GOAL]
case pos
x : ℝ
hx₁ : -1 ≤ x
⊢ sin (arcsin x) / sqrt (1 - x ^ 2) = x / sqrt (1 - x ^ 2)
case neg x : ℝ hx₁ : ¬-1 ≤ x ⊢ sin (arcsin x) / sqrt (1 - x ^ 2) = x / sqrt (1 - x ^ 2)
[PROOFSTEP]
swap
[GOAL]
case neg
x : ℝ
hx₁ : ¬-1 ≤ x
⊢ sin (arcsin x) / sqrt (1 - x ^ 2) = x / sqrt (1 - x ^ 2)
[PROOFSTEP]
have h : sqrt (1 - x ^ 2) = 0 := sqrt_eq_zero_of_nonpos (by nlinarith)
[GOAL]
x : ℝ
hx₁ : ¬-1 ≤ x
⊢ 1 - x ^ 2 ≤ 0
[PROOFSTEP]
nlinarith
[GOAL]
case neg
x : ℝ
hx₁ : ¬-1 ≤ x
h : sqrt (1 - x ^ 2) = 0
⊢ sin (arcsin x) / sqrt (1 - x ^ 2) = x / sqrt (1 - x ^ 2)
[PROOFSTEP]
rw [h]
[GOAL]
case neg
x : ℝ
hx₁ : ¬-1 ≤ x
h : sqrt (1 - x ^ 2) = 0
⊢ sin (arcsin x) / 0 = x / 0
[PROOFSTEP]
simp
[GOAL]
case pos
x : ℝ
hx₁ : -1 ≤ x
⊢ sin (arcsin x) / sqrt (1 - x ^ 2) = x / sqrt (1 - x ^ 2)
[PROOFSTEP]
by_cases hx₂ : x ≤ 1
[GOAL]
case pos
x : ℝ
hx₁ : -1 ≤ x
hx₂ : x ≤ 1
⊢ sin (arcsin x) / sqrt (1 - x ^ 2) = x / sqrt (1 - x ^ 2)
case neg x : ℝ hx₁ : -1 ≤ x hx₂ : ¬x ≤ 1 ⊢ sin (arcsin x) / sqrt (1 - x ^ 2) = x / sqrt (1 - x ^ 2)
[PROOFSTEP]
swap
[GOAL]
case neg
x : ℝ
hx₁ : -1 ≤ x
hx₂ : ¬x ≤ 1
⊢ sin (arcsin x) / sqrt (1 - x ^ 2) = x / sqrt (1 - x ^ 2)
[PROOFSTEP]
have h : sqrt (1 - x ^ 2) = 0 := sqrt_eq_zero_of_nonpos (by nlinarith)
[GOAL]
x : ℝ
hx₁ : -1 ≤ x
hx₂ : ¬x ≤ 1
⊢ 1 - x ^ 2 ≤ 0
[PROOFSTEP]
nlinarith
[GOAL]
case neg
x : ℝ
hx₁ : -1 ≤ x
hx₂ : ¬x ≤ 1
h : sqrt (1 - x ^ 2) = 0
⊢ sin (arcsin x) / sqrt (1 - x ^ 2) = x / sqrt (1 - x ^ 2)
[PROOFSTEP]
rw [h]
[GOAL]
case neg
x : ℝ
hx₁ : -1 ≤ x
hx₂ : ¬x ≤ 1
h : sqrt (1 - x ^ 2) = 0
⊢ sin (arcsin x) / 0 = x / 0
[PROOFSTEP]
simp
[GOAL]
case pos
x : ℝ
hx₁ : -1 ≤ x
hx₂ : x ≤ 1
⊢ sin (arcsin x) / sqrt (1 - x ^ 2) = x / sqrt (1 - x ^ 2)
[PROOFSTEP]
rw [sin_arcsin hx₁ hx₂]
[GOAL]
x : ℝ
⊢ arcsin x = π / 2 - arccos x
[PROOFSTEP]
simp [arccos]
[GOAL]
x : ℝ
⊢ arccos x ≤ π
[PROOFSTEP]
unfold arccos
[GOAL]
x : ℝ
⊢ π / 2 - arcsin x ≤ π
[PROOFSTEP]
linarith [neg_pi_div_two_le_arcsin x]
[GOAL]
x : ℝ
⊢ 0 ≤ arccos x
[PROOFSTEP]
unfold arccos
[GOAL]
x : ℝ
⊢ 0 ≤ π / 2 - arcsin x
[PROOFSTEP]
linarith [arcsin_le_pi_div_two x]
[GOAL]
x : ℝ
⊢ 0 < arccos x ↔ x < 1
[PROOFSTEP]
simp [arccos]
[GOAL]
x : ℝ
hx₁ : -1 ≤ x
hx₂ : x ≤ 1
⊢ cos (arccos x) = x
[PROOFSTEP]
rw [arccos, cos_pi_div_two_sub, sin_arcsin hx₁ hx₂]
[GOAL]
x : ℝ
hx₁ : 0 ≤ x
hx₂ : x ≤ π
⊢ arccos (cos x) = x
[PROOFSTEP]
rw [arccos, ← sin_pi_div_two_sub, arcsin_sin]
[GOAL]
x : ℝ
hx₁ : 0 ≤ x
hx₂ : x ≤ π
⊢ π / 2 - (π / 2 - x) = x
[PROOFSTEP]
simp [sub_eq_add_neg]
[GOAL]
case hx₁
x : ℝ
hx₁ : 0 ≤ x
hx₂ : x ≤ π
⊢ -(π / 2) ≤ π / 2 - x
[PROOFSTEP]
simp [sub_eq_add_neg]
[GOAL]
case hx₂
x : ℝ
hx₁ : 0 ≤ x
hx₂ : x ≤ π
⊢ π / 2 - x ≤ π / 2
[PROOFSTEP]
simp [sub_eq_add_neg]
[GOAL]
case hx₁
x : ℝ
hx₁ : 0 ≤ x
hx₂ : x ≤ π
⊢ x ≤ π
[PROOFSTEP]
linarith
[GOAL]
case hx₂
x : ℝ
hx₁ : 0 ≤ x
hx₂ : x ≤ π
⊢ 0 ≤ x
[PROOFSTEP]
linarith
[GOAL]
⊢ arccos 0 = π / 2
[PROOFSTEP]
simp [arccos]
[GOAL]
⊢ arccos 1 = 0
[PROOFSTEP]
simp [arccos]
[GOAL]
⊢ arccos (-1) = π
[PROOFSTEP]
simp [arccos, add_halves]
[GOAL]
x : ℝ
⊢ arccos x = 0 ↔ 1 ≤ x
[PROOFSTEP]
simp [arccos, sub_eq_zero]
[GOAL]
x : ℝ
⊢ arccos x = π / 2 ↔ x = 0
[PROOFSTEP]
simp [arccos]
[GOAL]
x : ℝ
⊢ arccos x = π ↔ x ≤ -1
[PROOFSTEP]
rw [arccos, sub_eq_iff_eq_add, ← sub_eq_iff_eq_add', div_two_sub_self, neg_pi_div_two_eq_arcsin]
[GOAL]
x : ℝ
⊢ arccos (-x) = π - arccos x
[PROOFSTEP]
rw [← add_halves π, arccos, arcsin_neg, arccos, add_sub_assoc, sub_sub_self, sub_neg_eq_add]
[GOAL]
x : ℝ
hx : 1 ≤ x
⊢ arccos x = 0
[PROOFSTEP]
rw [arccos, arcsin_of_one_le hx, sub_self]
[GOAL]
x : ℝ
hx : x ≤ -1
⊢ arccos x = π
[PROOFSTEP]
rw [arccos, arcsin_of_le_neg_one hx, sub_neg_eq_add, add_halves']
[GOAL]
x : ℝ
⊢ sin (arccos x) = sqrt (1 - x ^ 2)
[PROOFSTEP]
by_cases hx₁ : -1 ≤ x
[GOAL]
case pos
x : ℝ
hx₁ : -1 ≤ x
⊢ sin (arccos x) = sqrt (1 - x ^ 2)
case neg x : ℝ hx₁ : ¬-1 ≤ x ⊢ sin (arccos x) = sqrt (1 - x ^ 2)
[PROOFSTEP]
swap
[GOAL]
case neg
x : ℝ
hx₁ : ¬-1 ≤ x
⊢ sin (arccos x) = sqrt (1 - x ^ 2)
[PROOFSTEP]
rw [not_le] at hx₁
[GOAL]
case neg
x : ℝ
hx₁ : x < -1
⊢ sin (arccos x) = sqrt (1 - x ^ 2)
[PROOFSTEP]
rw [arccos_of_le_neg_one hx₁.le, sin_pi, sqrt_eq_zero_of_nonpos]
[GOAL]
case neg
x : ℝ
hx₁ : x < -1
⊢ 1 - x ^ 2 ≤ 0
[PROOFSTEP]
nlinarith
[GOAL]
case pos
x : ℝ
hx₁ : -1 ≤ x
⊢ sin (arccos x) = sqrt (1 - x ^ 2)
[PROOFSTEP]
by_cases hx₂ : x ≤ 1
[GOAL]
case pos
x : ℝ
hx₁ : -1 ≤ x
hx₂ : x ≤ 1
⊢ sin (arccos x) = sqrt (1 - x ^ 2)
case neg x : ℝ hx₁ : -1 ≤ x hx₂ : ¬x ≤ 1 ⊢ sin (arccos x) = sqrt (1 - x ^ 2)
[PROOFSTEP]
swap
[GOAL]
case neg
x : ℝ
hx₁ : -1 ≤ x
hx₂ : ¬x ≤ 1
⊢ sin (arccos x) = sqrt (1 - x ^ 2)
[PROOFSTEP]
rw [not_le] at hx₂
[GOAL]
case neg
x : ℝ
hx₁ : -1 ≤ x
hx₂ : 1 < x
⊢ sin (arccos x) = sqrt (1 - x ^ 2)
[PROOFSTEP]
rw [arccos_of_one_le hx₂.le, sin_zero, sqrt_eq_zero_of_nonpos]
[GOAL]
case neg
x : ℝ
hx₁ : -1 ≤ x
hx₂ : 1 < x
⊢ 1 - x ^ 2 ≤ 0
[PROOFSTEP]
nlinarith
[GOAL]
case pos
x : ℝ
hx₁ : -1 ≤ x
hx₂ : x ≤ 1
⊢ sin (arccos x) = sqrt (1 - x ^ 2)
[PROOFSTEP]
rw [arccos_eq_pi_div_two_sub_arcsin, sin_pi_div_two_sub, cos_arcsin]
[GOAL]
x : ℝ
⊢ arccos x ≤ π / 2 ↔ 0 ≤ x
[PROOFSTEP]
simp [arccos]
[GOAL]
x : ℝ
⊢ arccos x < π / 2 ↔ 0 < x
[PROOFSTEP]
simp [arccos]
[GOAL]
x : ℝ
⊢ arccos x ≤ π / 4 ↔ sqrt 2 / 2 ≤ x
[PROOFSTEP]
rw [arccos, ← pi_div_four_le_arcsin]
[GOAL]
x : ℝ
⊢ π / 2 - arcsin x ≤ π / 4 ↔ π / 4 ≤ arcsin x
[PROOFSTEP]
constructor
[GOAL]
case mp
x : ℝ
⊢ π / 2 - arcsin x ≤ π / 4 → π / 4 ≤ arcsin x
[PROOFSTEP]
intro
[GOAL]
case mp
x : ℝ
a✝ : π / 2 - arcsin x ≤ π / 4
⊢ π / 4 ≤ arcsin x
[PROOFSTEP]
linarith
[GOAL]
case mpr
x : ℝ
⊢ π / 4 ≤ arcsin x → π / 2 - arcsin x ≤ π / 4
[PROOFSTEP]
intro
[GOAL]
case mpr
x : ℝ
a✝ : π / 4 ≤ arcsin x
⊢ π / 2 - arcsin x ≤ π / 4
[PROOFSTEP]
linarith
[GOAL]
x : ℝ
⊢ tan (arccos x) = sqrt (1 - x ^ 2) / x
[PROOFSTEP]
rw [arccos, tan_pi_div_two_sub, tan_arcsin, inv_div]
[GOAL]
x : ℝ
h : 0 ≤ x
⊢ 0 ≤ 2
[PROOFSTEP]
norm_num
[GOAL]
x : ℝ
h : 0 ≤ x
⊢ arcsin x = arccos (sqrt (1 - x ^ 2))
[PROOFSTEP]
rw [eq_comm, ← cos_arcsin]
[GOAL]
x : ℝ
h : 0 ≤ x
⊢ arccos (cos (arcsin x)) = arcsin x
[PROOFSTEP]
exact arccos_cos (arcsin_nonneg.2 h) ((arcsin_le_pi_div_two _).trans (div_le_self pi_pos.le one_le_two))
|
/*
libs/numeric/odeint/examples/stochastic_euler.hpp
Copyright 2012 Karsten Ahnert
Copyright 2012-2013 Mario Mulansky
Copyright 2013 Pascal Germroth
Stochastic euler stepper example and Ornstein-Uhlenbeck process
Distributed under the Boost Software License, Version 1.0.
(See accompanying file LICENSE_1_0.txt or
copy at http://www.boost.org/LICENSE_1_0.txt)
*/
#include <boost/array.hpp>
#include <boost/numeric/odeint.hpp>
typedef boost::array< double , 1 > state_type;
using namespace boost::numeric::odeint;
//[ generation_functions_own_steppers
class custom_stepper
{
public:
typedef double value_type;
// ...
};
class custom_controller
{
// ...
};
class custom_dense_output
{
// ...
};
//]
//[ generation_functions_get_controller
namespace boost { namespace numeric { namespace odeint {
template<>
struct get_controller< custom_stepper >
{
typedef custom_controller type;
};
} } }
//]
//[ generation_functions_controller_factory
namespace boost { namespace numeric { namespace odeint {
template<>
struct controller_factory< custom_stepper , custom_controller >
{
custom_controller operator()( double abs_tol , double rel_tol , const custom_stepper & ) const
{
return custom_controller();
}
custom_controller operator()( double abs_tol , double rel_tol , double max_dt ,
const custom_stepper & ) const
{
// version with maximal allowed step size max_dt
return custom_controller();
}
};
} } }
//]
int main( int argc , char **argv )
{
{
typedef runge_kutta_dopri5< state_type > stepper_type;
/*
//[ generation_functions_syntax_auto
auto stepper1 = make_controlled( 1.0e-6 , 1.0e-6 , stepper_type() );
// or with max step size limit:
// auto stepper1 = make_controlled( 1.0e-6 , 1.0e-6 , 0.01, stepper_type() );
auto stepper2 = make_dense_output( 1.0e-6 , 1.0e-6 , stepper_type() );
//]
*/
//[ generation_functions_syntax_result_of
boost::numeric::odeint::result_of::make_controlled< stepper_type >::type stepper3 = make_controlled( 1.0e-6 , 1.0e-6 , stepper_type() );
(void)stepper3;
boost::numeric::odeint::result_of::make_dense_output< stepper_type >::type stepper4 = make_dense_output( 1.0e-6 , 1.0e-6 , stepper_type() );
(void)stepper4;
//]
}
{
/*
//[ generation_functions_example_custom_controller
auto stepper5 = make_controlled( 1.0e-6 , 1.0e-6 , custom_stepper() );
//]
*/
boost::numeric::odeint::result_of::make_controlled< custom_stepper >::type stepper5 = make_controlled( 1.0e-6 , 1.0e-6 , custom_stepper() );
(void)stepper5;
}
return 0;
}
|
/-
Copyright (c) 2016 Jeremy Avigad. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Jeremy Avigad
! This file was ported from Lean 3 source module data.int.dvd.pow
! leanprover-community/mathlib commit c3291da49cfa65f0d43b094750541c0731edc932
! Please do not edit these lines, except to modify the commit id
! if you have ported upstream changes.
-/
import Mathbin.Data.Int.Dvd.Basic
import Mathbin.Data.Nat.Pow
/-!
# Basic lemmas about the divisibility relation in `ℤ` involving powers.
> THIS FILE IS SYNCHRONIZED WITH MATHLIB4.
> Any changes to this file require a corresponding PR to mathlib4.
-/
open Nat
namespace Int
/- warning: int.sign_pow_bit1 -> Int.sign_pow_bit1 is a dubious translation:
lean 3 declaration is
forall (k : Nat) (n : Int), Eq.{1} Int (HPow.hPow.{0, 0, 0} Int Nat Int (instHPow.{0, 0} Int Nat (Monoid.Pow.{0} Int Int.monoid)) (Int.sign n) (bit1.{0} Nat Nat.hasOne Nat.hasAdd k)) (Int.sign n)
but is expected to have type
forall (k : Nat) (n : Int), Eq.{1} Int (HPow.hPow.{0, 0, 0} Int Nat Int Int.instHPowIntNat (Int.sign n) (bit1.{0} Nat (CanonicallyOrderedCommSemiring.toOne.{0} Nat Nat.canonicallyOrderedCommSemiring) instAddNat k)) (Int.sign n)
Case conversion may be inaccurate. Consider using '#align int.sign_pow_bit1 Int.sign_pow_bit1ₓ'. -/
@[simp]
theorem sign_pow_bit1 (k : ℕ) : ∀ n : ℤ, n.sign ^ bit1 k = n.sign
| (n + 1 : ℕ) => one_pow (bit1 k)
| 0 => zero_pow (Nat.zero_lt_bit1 k)
| -[n+1] => (neg_pow_bit1 1 k).trans (congr_arg (fun x => -x) (one_pow (bit1 k)))
#align int.sign_pow_bit1 Int.sign_pow_bit1
/- warning: int.pow_dvd_of_le_of_pow_dvd -> Int.pow_dvd_of_le_of_pow_dvd is a dubious translation:
lean 3 declaration is
forall {p : Nat} {m : Nat} {n : Nat} {k : Int}, (LE.le.{0} Nat Nat.hasLe m n) -> (Dvd.Dvd.{0} Int (semigroupDvd.{0} Int Int.semigroup) ((fun (a : Type) (b : Type) [self : HasLiftT.{1, 1} a b] => self.0) Nat Int (HasLiftT.mk.{1, 1} Nat Int (CoeTCₓ.coe.{1, 1} Nat Int (coeBase.{1, 1} Nat Int Int.hasCoe))) (HPow.hPow.{0, 0, 0} Nat Nat Nat (instHPow.{0, 0} Nat Nat (Monoid.Pow.{0} Nat Nat.monoid)) p n)) k) -> (Dvd.Dvd.{0} Int (semigroupDvd.{0} Int Int.semigroup) ((fun (a : Type) (b : Type) [self : HasLiftT.{1, 1} a b] => self.0) Nat Int (HasLiftT.mk.{1, 1} Nat Int (CoeTCₓ.coe.{1, 1} Nat Int (coeBase.{1, 1} Nat Int Int.hasCoe))) (HPow.hPow.{0, 0, 0} Nat Nat Nat (instHPow.{0, 0} Nat Nat (Monoid.Pow.{0} Nat Nat.monoid)) p m)) k)
but is expected to have type
forall {p : Nat} {m : Nat} {n : Nat} {k : Int}, (LE.le.{0} Nat instLENat m n) -> (Dvd.dvd.{0} Int Int.instDvdInt (Nat.cast.{0} Int instNatCastInt (HPow.hPow.{0, 0, 0} Nat Nat Nat (instHPow.{0, 0} Nat Nat instPowNat) p n)) k) -> (Dvd.dvd.{0} Int Int.instDvdInt (Nat.cast.{0} Int instNatCastInt (HPow.hPow.{0, 0, 0} Nat Nat Nat (instHPow.{0, 0} Nat Nat instPowNat) p m)) k)
Case conversion may be inaccurate. Consider using '#align int.pow_dvd_of_le_of_pow_dvd Int.pow_dvd_of_le_of_pow_dvdₓ'. -/
theorem pow_dvd_of_le_of_pow_dvd {p m n : ℕ} {k : ℤ} (hmn : m ≤ n) (hdiv : ↑(p ^ n) ∣ k) :
↑(p ^ m) ∣ k := by
induction k
· apply Int.coe_nat_dvd.2
apply pow_dvd_of_le_of_pow_dvd hmn
apply Int.coe_nat_dvd.1 hdiv
change -[k+1] with -(↑(k + 1) : ℤ)
apply dvd_neg_of_dvd
apply Int.coe_nat_dvd.2
apply pow_dvd_of_le_of_pow_dvd hmn
apply Int.coe_nat_dvd.1
apply dvd_of_dvd_neg
exact hdiv
#align int.pow_dvd_of_le_of_pow_dvd Int.pow_dvd_of_le_of_pow_dvd
/- warning: int.dvd_of_pow_dvd -> Int.dvd_of_pow_dvd is a dubious translation:
lean 3 declaration is
forall {p : Nat} {k : Nat} {m : Int}, (LE.le.{0} Nat Nat.hasLe (OfNat.ofNat.{0} Nat 1 (OfNat.mk.{0} Nat 1 (One.one.{0} Nat Nat.hasOne))) k) -> (Dvd.Dvd.{0} Int (semigroupDvd.{0} Int Int.semigroup) ((fun (a : Type) (b : Type) [self : HasLiftT.{1, 1} a b] => self.0) Nat Int (HasLiftT.mk.{1, 1} Nat Int (CoeTCₓ.coe.{1, 1} Nat Int (coeBase.{1, 1} Nat Int Int.hasCoe))) (HPow.hPow.{0, 0, 0} Nat Nat Nat (instHPow.{0, 0} Nat Nat (Monoid.Pow.{0} Nat Nat.monoid)) p k)) m) -> (Dvd.Dvd.{0} Int (semigroupDvd.{0} Int Int.semigroup) ((fun (a : Type) (b : Type) [self : HasLiftT.{1, 1} a b] => self.0) Nat Int (HasLiftT.mk.{1, 1} Nat Int (CoeTCₓ.coe.{1, 1} Nat Int (coeBase.{1, 1} Nat Int Int.hasCoe))) p) m)
but is expected to have type
forall {p : Nat} {k : Nat} {m : Int}, (LE.le.{0} Nat instLENat (OfNat.ofNat.{0} Nat 1 (instOfNatNat 1)) k) -> (Dvd.dvd.{0} Int Int.instDvdInt (Nat.cast.{0} Int instNatCastInt (HPow.hPow.{0, 0, 0} Nat Nat Nat (instHPow.{0, 0} Nat Nat instPowNat) p k)) m) -> (Dvd.dvd.{0} Int Int.instDvdInt (Nat.cast.{0} Int instNatCastInt p) m)
Case conversion may be inaccurate. Consider using '#align int.dvd_of_pow_dvd Int.dvd_of_pow_dvdₓ'. -/
theorem dvd_of_pow_dvd {p k : ℕ} {m : ℤ} (hk : 1 ≤ k) (hpk : ↑(p ^ k) ∣ m) : ↑p ∣ m := by
rw [← pow_one p] <;> exact pow_dvd_of_le_of_pow_dvd hk hpk
#align int.dvd_of_pow_dvd Int.dvd_of_pow_dvd
end Int
|
{-# language FlexibleInstances #-}
{-# language KindSignatures #-}
{-# language FunctionalDependencies #-}
{-# language LambdaCase #-}
{-# language MultiParamTypeClasses #-}
{-# language Rank2Types #-}
{-# language TupleSections #-}
module Main where
import Control.Monad.Reader
import Data.Complex
import Data.Functor.Const
import Data.Functor.Contravariant
import Data.Functor.Identity
import Data.Monoid (First(..))
import Data.Profunctor
import Data.Profunctor.Unsafe ((#.), (.#))
import Data.Tagged
import Data.Void (absurd, vacuous)
type Equality s t a b
= forall (p :: * -> * -> *) f.
p a (f b) -> p s (f t)
type Lens s t a b
= forall f. Functor f
=> (a -> f b) -> s -> f t
type Traversal s t a b
= forall f. Applicative f
=> (a -> f b) -> s -> f t
type Iso s t a b
= forall p f. (Functor f, Profunctor p)
=> p a (f b) -> p s (f t)
type Prism s t a b
= forall p f. (Applicative f, Choice p)
=> p a (f b) -> p s (f t)
type Getter s a
= forall f. (Functor f, Contravariant f)
=> (a -> f a) -> s -> f s
type Fold s a
= forall f. (Applicative f, Contravariant f)
=> (a -> f a) -> s -> f s
type Optic p f s t a b
= p a (f b) -> p s (f t)
type Optic' p f t b
= p b (f b) -> p t (f t)
type Review t b
= Tagged b (Identity b) -> Tagged t (Identity t)
-- in other words,
type Review' t b
= Optic' Tagged Identity t b
-- Traversal:
--
-- We strengthen `Functor` to `Applicative`. Why?
--
-- Clue: note that
--
-- traverse :: Applicative f => (a -> f b) -> t a -> f (t b)
-- ~ Traversable f => Traversal (f a) (f b) a b
--
-- So, instead we can ask why does `traverse` require `Applicative`?
--
-- The instance for lists is informative:
--
-- instance Traversable [] where
-- traverse _ [] = pure []
-- traverse f (x:xs) = (:) <$> f x <*> traverse f xs
--
-- Every lens is a valid Traversal. Witness:
lensToTraversal :: Lens s t a b -> Traversal s t a b
lensToTraversal = id
--
-- Iso:
--
-- We generalize `->` to `(Profunctor p =>) p`. Why?
--
-- One answer is that using `Exchange` (a `Profunctor`) allows us to reverse
-- the `Iso` and extract its parts.
--
-- Prism:
--
-- Here `f` is an `Applicative` and `p` is a `Choice`. Why?
--
-- First of all:
-- * Every `Prism`: Applicative f, Choice p
-- is a valid v v
-- `Traversal`: Applicative f
--
-- witness:
prismToTraversal :: Prism s t a b -> Traversal s t a b
prismToTraversal = id
--
-- Here `Prism` adds `Choice p`, meaning TODO
--
-- * Every `Iso`: Functor f, Profunctor p
-- is a valid v v
-- `Prism`: Applicative f, Choice p
--
-- witness:
isoToPrism :: Iso s t a b -> Prism s t a b
isoToPrism = id
-- Iso's are everything actually:
isoToLens :: Iso s t a b -> Lens s t a b
isoToLens = id
isoToTraversal :: Iso s t a b -> Traversal s t a b
isoToTraversal = id
isoToGetter :: Iso s s a a -> Getter s a
isoToGetter = id
isoToFold :: Iso s s a a -> Fold s a
isoToFold = id
isoToReview :: Iso s s a a -> Review s a
isoToReview = id
-- Except an `Equality`, that is. An `Equality` is an `Iso`:
equalityToIso :: Equality s t a b -> Iso s t a b
equalityToIso = id
-- An Equality is a witness that a ~ s and b ~ t.
--
--
-- Here `Prism` strenthens the constraints:
-- * `Functor` to `Applicative` because it doesn't touch exactly one position
-- * `Profunctor` to `Choice` because TODO
--
-- Getter:
--
-- A getter describes how to retrieve a single value
--
-- > for f to be both Functor and Contravariant implies `f a` doesn't contain
-- > any `a`s at all!
--
-- citation: https://www.reddit.com/r/haskell/comments/5vb6x1/how_do_i_learn_lensinternals/de0uz1v
-- Witness:
coerceGetterF :: (Functor f, Contravariant f) => f a -> f b
coerceGetterF = vacuous . contramap absurd
-- Fold:
--
-- A `Fold` describes how to retrieve multiple values
--
-- Note how conspicuously `Fold` and `Getter` are:
--
-- TODO: understand
-- "A `Getter` is a legal `Fold` that just ignores the supplied `Monoid`
--
-- * Every `Getter`: Functor f, Contravariant f
-- is a valid v v
-- `Fold`: Applicative f, Contravariant f
--
-- Witness:
getterToFold :: Getter s a -> Fold s a
getterToFold = id
-- Like `Getter`, note that the functor used in the `Fold` can't contain any
-- values:
coerceFoldF :: (Applicative f, Contravariant f) => f a -> f b
coerceFoldF = vacuous . contramap absurd
-- A "is a limited form of a `Prism` that can only be used for `re` operations.
-- Witness:
prismToReview :: Prism' t b -> Review t b
prismToReview = id
type Simple f s a = f s s a a
type Equality' s a = Simple Equality s a
type Lens' s a = Simple Lens s a
type Iso' s a = Simple Iso s a
type Prism' s a = Simple Prism s a
type Getting r s a = (a -> Const r a) -> s -> Const r s
iso :: (s -> a) -> (b -> t) -> Iso s t a b
iso sa bt = dimap sa (fmap bt)
-- used to provide access to the two parts of an iso
data Exchange a b s t = Exchange (s -> a) (b -> t)
instance Functor (Exchange a b s) where
fmap g' (Exchange f g) = Exchange f (g' . g)
instance Profunctor (Exchange a b) where
dimap f' g' (Exchange f g) = Exchange (f . f') (g' . g)
withIso :: Iso s t a b -> ((s -> a) -> (b -> t) -> r) -> r
withIso ai k = case ai (Exchange id Identity) of
Exchange sa bt -> k sa (runIdentity #. bt)
-- used to provide access to the two parts of a prism
data Market a b s t = Market (b -> t) (s -> Either t a)
instance Functor (Market a b s) where
fmap h (Market f g) = Market (h . f) (either (Left . h) Right . g)
instance Profunctor (Market a b) where
dimap f' g' (Market f g) = Market (g' . f) (left' g' . g . f')
instance Choice (Market a b) where
left' (Market f g) = Market (Left . f) $ \case
Left x -> case g x of
Left y -> Left (Left y)
Right y -> Right y
Right c -> Left (Right c)
-- TODO: implement right'
withPrism :: Prism s t a b -> ((b -> t) -> (s -> Either t a) -> r) -> r
withPrism p k = case p (Market Identity Right) of
Market bt sa -> k (runIdentity #. bt) (left' runIdentity . sa)
prism :: (b -> t) -> (s -> Either t a) -> Prism s t a b
prism bt seta = dimap seta (either pure (fmap bt)) . right'
matching :: Prism s t a b -> s -> Either t a
matching k = withPrism k $ \_ seta -> seta
_Left :: Prism (Either a c) (Either b c) a b
_Left = prism Left $ either Right (Left . Right)
_Right :: Prism (Either c a) (Either c b) a b
_Right = prism Right $ either (Left . Left) Right
-- TODO: rewrite / become more comfortable with this section
re :: Review t b -> Getter b t
re p = to (runIdentity #. unTagged #. p .# Tagged .# Identity)
to :: (Profunctor p, Contravariant f) => (s -> a) -> Optic' p f s a
to k = dimap k (contramap k)
reviewSimple :: Prism' s a -> a -> s
reviewSimple r = runIdentity . unTagged . r . Tagged . Identity
review :: MonadReader b m => Optic' Tagged Identity t b -> m t
review r = asks $ runIdentity . unTagged . r . Tagged . Identity
foldMapOf :: Getting r s a -> (a -> r) -> s -> r
foldMapOf l f = getConst #. l (Const #. f)
preview :: Prism' s a -> s -> Maybe a
preview p s = getFirst #. foldMapOf p (First #. Just) $ s
(^?) :: s -> Prism' s a -> Maybe a
(^?) s p = preview p s
--
from :: Iso s t a b -> Iso b a t s
from l = withIso l $ \ sa bt -> iso bt sa
view :: Lens s t a b -> s -> a
view l s = getConst (l Const s)
over :: Lens s t a b -> (a -> b) -> s -> t
over l f = runIdentity . l (Identity . f)
set :: Lens s t a b -> b -> s -> t
set l = over l . const
lens :: (s -> a) -> (s -> b -> t) -> Lens s t a b
lens get' set' f s = set' s <$> f (get' s)
realLens, realLens' :: RealFloat a => Lens' (Complex a) a
realLens f (r :+ i) = fmap (:+ i) (f r)
imagLens, imagLens' :: RealFloat a => Lens' (Complex a) a
imagLens f (r :+ i) = fmap (r :+) (f i)
realLens' = lens (\(r :+ _) -> r) (\(_ :+ i) r -> r :+ i)
imagLens' = lens (\(_ :+ i) -> i) (\(r :+ _) i -> r :+ i)
(<&>) :: Functor f => f a -> (a -> b) -> f b
(<&>) = flip fmap
class Field1 s t a b | s -> a, t -> b, s b -> t, t a -> s where
_1 :: Lens s t a b
instance Field1 (a, b) (a', b) a a' where
_1 f ~(a, b) = f a <&> \a' -> (a', b)
instance Field1 (a, b, c) (a', b, c) a a' where
_1 k ~(a, b, c) = k a <&> \a' -> (a', b, c)
class Field2 s t a b | s -> a, t -> b, s b -> t, t a -> s where
_2 :: Lens s t a b
instance Field2 (a, b) (a, b') b b' where
_2 f ~(a, b) = f b <&> \b' -> (a, b')
instance Field2 (a, b, c) (a, b', c) b b' where
_2 k ~(a, b, c) = k b <&> \b' -> (a, b', c)
main :: IO ()
main = do
print $ set realLens (1 :: Double) (0 :+ 1)
print $ view _1 ('x', 'y', 'z')
print $ view _2 $ set _2 False ('x', 'y', 'z')
|
(*
File: Shapiro_Tauberian.thy
Author: Manuel Eberl, TU München
Shapiro's Tauberian theorem
(see Section 4.6 of Apostol's Introduction to Analytic Number Theory)
*)
section \<open>Shapiro's Tauberian Theorem\<close>
theory Shapiro_Tauberian
imports
More_Dirichlet_Misc
Prime_Number_Theorem.Prime_Counting_Functions
Prime_Distribution_Elementary_Library
begin
subsection \<open>Proof\<close>
text \<open>
Given an arithmeticla function $a(n)$, Shapiro's Tauberian theorem relates the sum
$\sum_{n\leq x} a(n)$ to the weighted sums $\sum_{n\leq x} a(n) \lfloor\frac{x}{n}\rfloor$
and $\sum_{n\leq x} a(n)/n$.
More precisely, it shows that if $\sum_{n\leq x} a(n) \lfloor\frac{x}{n}\rfloor = x\ln x + O(x)$,
then:
\<^item> $\sum_{n\leq x} \frac{a(n)}{n} = \ln x + O(1)$
\<^item> $\sum_{n\leq x} a(n) \leq Bx$ for some constant $B\geq 0$ and all $x\geq 0$
\<^item> $\sum_{n\leq x} a(n) \geq Cx$ for some constant $C>0$ and all $x\geq 1/C$
\<close>
locale shapiro_tauberian =
fixes a :: "nat \<Rightarrow> real" and A S T :: "real \<Rightarrow> real"
defines "A \<equiv> sum_upto (\<lambda>n. a n / n)"
defines "S \<equiv> sum_upto a"
defines "T \<equiv> (\<lambda>x. dirichlet_prod' a floor x)"
assumes a_nonneg: "\<And>n. n > 0 \<Longrightarrow> a n \<ge> 0"
assumes a_asymptotics: "(\<lambda>x. T x - x * ln x) \<in> O(\<lambda>x. x)"
begin
lemma fin: "finite X" if "X \<subseteq> {n. real n \<le> x}" for X x
by (rule finite_subset[of _ "{..nat \<lfloor>x\<rfloor>}"]) (use that in \<open>auto simp: le_nat_iff le_floor_iff\<close>)
lemma S_mono: "S x \<le> S y" if "x \<le> y" for x y
unfolding S_def sum_upto_def using that by (intro sum_mono2 fin[of _ y] a_nonneg) auto
lemma split:
fixes f :: "nat \<Rightarrow> real"
assumes "\<alpha> \<in> {0..1}"
shows "sum_upto f x = sum_upto f (\<alpha>*x) + (\<Sum>n | n > 0 \<and> real n \<in> {\<alpha>*x<..x}. f n)"
proof (cases "x > 0")
case False
hence *: "{n. n > 0 \<and> real n \<le> x} = {}" "{n. n > 0 \<and> real n \<in> {\<alpha>*x<..x}} = {}"
using mult_right_mono[of \<alpha> 1 x] assms by auto
have "\<alpha> * x \<le> 0"
using False assms by (intro mult_nonneg_nonpos) auto
hence **: "{n. n > 0 \<and> real n \<le> \<alpha> * x} = {}"
by auto
show ?thesis
unfolding sum_upto_def * ** by auto
next
case True
have "sum_upto f x = (\<Sum>n | n > 0 \<and> real n \<le> x. f n)"
by (simp add: sum_upto_def)
also have "{n. n > 0 \<and> real n \<le> x} =
{n. n > 0 \<and> real n \<le> \<alpha>*x} \<union> {n. n > 0 \<and> real n \<in> {\<alpha>*x<..x}}"
using assms True mult_right_mono[of \<alpha> 1 x] by (force intro: order_trans)
also have "(\<Sum>n\<in>\<dots>. f n) = sum_upto f (\<alpha>*x) + (\<Sum>n | n > 0 \<and> real n \<in> {\<alpha>*x<..x}. f n)"
by (subst sum.union_disjoint) (auto intro: fin simp: sum_upto_def)
finally show ?thesis .
qed
lemma S_diff_T_diff: "S x - S (x / 2) \<le> T x - 2 * T (x / 2)"
proof -
note fin = fin[of _ x]
have T_diff_eq:
"T x - 2 * T (x / 2) = sum_upto (\<lambda>n. a n * (\<lfloor>x / n\<rfloor> - 2 * \<lfloor>x / (2 * n)\<rfloor>)) (x / 2) +
(\<Sum>n | n > 0 \<and> real n \<in> {x/2<..x}. a n * \<lfloor>x / n\<rfloor>)"
unfolding T_def dirichlet_prod'_def
by (subst split[where \<alpha> = "1/2"])
(simp_all add: sum_upto_def sum_subtractf ring_distribs
sum_distrib_left sum_distrib_right mult_ac)
have "S x - S (x / 2) = (\<Sum>n | n > 0 \<and> real n \<in> {x/2<..x}. a n)"
unfolding S_def by (subst split[where \<alpha> = "1 / 2"]) (auto simp: sum_upto_def)
also have "\<dots> = (\<Sum>n | n > 0 \<and> real n \<in> {x/2<..x}. a n * \<lfloor>x / n\<rfloor>)"
proof (intro sum.cong)
fix n assume "n \<in> {n. n > 0 \<and> real n \<in> {x/2<..x}}"
hence "x / n \<ge> 1" "x / n < 2" by (auto simp: field_simps)
hence "\<lfloor>x / n\<rfloor> = 1" by linarith
thus "a n = a n * \<lfloor>x / n\<rfloor>" by simp
qed auto
also have "\<dots> = 0 + \<dots>" by simp
also have "0 \<le> sum_upto (\<lambda>n. a n * (\<lfloor>x / n\<rfloor> - 2 * \<lfloor>x / (2 * n)\<rfloor>)) (x / 2)"
unfolding sum_upto_def
proof (intro sum_nonneg mult_nonneg_nonneg a_nonneg)
fix n assume "n \<in> {n. n > 0 \<and> real n \<le> x / 2}"
hence "x / real n \<ge> 2" by (auto simp: field_simps)
thus "real_of_int (\<lfloor>x / n\<rfloor> - 2 * \<lfloor>x / (2 * n)\<rfloor>) \<ge> 0"
using le_mult_floor[of 2 "x / (2 * n)"] by (simp add: mult_ac)
qed auto
also have "\<dots> + (\<Sum>n | n > 0 \<and> real n \<in> {x/2<..x}. a n * \<lfloor>x / n\<rfloor>) = T x - 2 * T (x / 2)"
using T_diff_eq ..
finally show "S x - S (x / 2) \<le> T x - 2 * T (x / 2)" by simp
qed
lemma
shows diff_bound_strong: "\<exists>c\<ge>0. \<forall>x\<ge>0. x * A x - T x \<in> {0..c*x}"
and asymptotics: "(\<lambda>x. A x - ln x) \<in> O(\<lambda>_. 1)"
and upper: "\<exists>c\<ge>0. \<forall>x\<ge>0. S x \<le> c * x"
and lower: "\<exists>c>0. \<forall>x\<ge>1/c. S x \<ge> c * x"
and bigtheta: "S \<in> \<Theta>(\<lambda>x. x)"
proof -
\<comment> \<open>We first prove the third case, i.\,e.\ the upper bound for \<open>S\<close>.\<close>
have "(\<lambda>x. S x - S (x / 2)) \<in> O(\<lambda>x. T x - 2 * T (x / 2))"
proof (rule le_imp_bigo_real)
show "eventually (\<lambda>x. S x - S (x / 2) \<ge> 0) at_top"
using eventually_ge_at_top[of 0]
proof eventually_elim
case (elim x)
thus ?case using S_mono[of "x / 2" x] by simp
qed
next
show "eventually (\<lambda>x. S x - S (x / 2) \<le> 1 * (T x - 2 * T (x / 2))) at_top"
using S_diff_T_diff by simp
qed auto
also have "(\<lambda>x. T x - 2 * T (x / 2)) \<in> O(\<lambda>x. x)"
proof -
have "(\<lambda>x. T x - 2 * T (x / 2)) =
(\<lambda>x. (T x - x * ln x) - 2 * (T (x / 2) - (x / 2) * ln (x / 2))
+ x * (ln x - ln (x / 2)))" by (simp add: algebra_simps)
also have "\<dots> \<in> O(\<lambda>x. x)"
proof (rule sum_in_bigo, rule sum_in_bigo)
show "(\<lambda>x. T x - x * ln x) \<in> O(\<lambda>x. x)" by (rule a_asymptotics)
next
have "(\<lambda>x. T (x / 2) - (x / 2) * ln (x / 2)) \<in> O(\<lambda>x. x / 2)"
using a_asymptotics by (rule landau_o.big.compose) real_asymp+
thus "(\<lambda>x. 2 * (T (x / 2) - x / 2 * ln (x / 2))) \<in> O(\<lambda>x. x)"
unfolding cmult_in_bigo_iff by (subst (asm) landau_o.big.cdiv) auto
qed real_asymp+
finally show ?thesis .
qed
finally have S_diff_bigo: "(\<lambda>x. S x - S (x / 2)) \<in> O(\<lambda>x. x)" .
obtain c1 where c1: "c1 \<ge> 0" "\<And>x. x \<ge> 0 \<Longrightarrow> S x \<le> c1 * x"
proof -
from S_diff_bigo have "(\<lambda>n. S (real n) - S (real n / 2)) \<in> O(\<lambda>n. real n)"
by (rule landau_o.big.compose) real_asymp
from natfun_bigoE[OF this, of 1] obtain c
where "c > 0" "\<forall>n\<ge>1. \<bar>S (real n) - S (real n / 2)\<bar> \<le> c * real n" by auto
hence c: "S (real n) - S (real n / 2) \<le> c * real n" if "n \<ge> 1" for n
using S_mono[of "real n" "2 * real n"] that by auto
have c_twopow: "S (2 ^ Suc n / 2) - S (2 ^ n / 2) \<le> c * 2 ^ n" for n
using c[of "2 ^ n"] by simp
have S_twopow_le: "S (2 ^ k) \<le> 2 * c * 2 ^ k" for k
proof -
have [simp]: "{0<..Suc 0} = {1}" by auto
have "(\<Sum>r<Suc k. S (2 ^ Suc r / 2) - S (2 ^ r / 2)) \<le> (\<Sum>r<Suc k. c * 2 ^ r)"
by (intro sum_mono c_twopow)
also have "(\<Sum>r<Suc k. S (2 ^ Suc r / 2) - S (2 ^ r / 2)) = S (2 ^ k)"
by (subst sum_lessThan_telescope) (auto simp: S_def sum_upto_altdef)
also have "(\<Sum>r<Suc k. c * 2 ^ r) = c * (\<Sum>r<Suc k. 2 ^ r)"
unfolding sum_distrib_left ..
also have "(\<Sum>r<Suc k. 2 ^ r :: real) = 2^Suc k - 1"
by (subst geometric_sum) auto
also have "c * \<dots> \<le> c * 2 ^ Suc k"
using \<open>c > 0\<close> by (intro mult_left_mono) auto
finally show "S (2 ^ k) \<le> 2 * c * 2 ^ k" by simp
qed
have S_le: "S x \<le> 4 * c * x" if "x \<ge> 0" for x
proof (cases "x \<ge> 1")
case False
with that have "x \<in> {0..<1}" by auto
thus ?thesis using \<open>c > 0\<close> by (auto simp: S_def sum_upto_altdef)
next
case True
hence x: "x \<ge> 1" by simp
define n where "n = nat \<lfloor>log 2 x\<rfloor>"
have "2 powr real n \<le> 2 powr (log 2 x)"
unfolding n_def using x by (intro powr_mono) auto
hence ge: "2 ^ n \<le> x" using x by (subst (asm) powr_realpow) auto
have "2 powr real (Suc n) > 2 powr (log 2 x)"
unfolding n_def using x by (intro powr_less_mono) linarith+
hence less: "2 ^ (Suc n) > x" using x by (subst (asm) powr_realpow) auto
have "S x \<le> S (2 ^ Suc n)"
using less by (intro S_mono) auto
also have "\<dots> \<le> 2 * c * 2 ^ Suc n"
by (intro S_twopow_le)
also have "\<dots> = 4 * c * 2 ^ n"
by simp
also have "\<dots> \<le> 4 * c * x"
by (intro mult_left_mono ge) (use x \<open>c > 0\<close> in auto)
finally show "S x \<le> 4 * c * x" .
qed
with that[of "4 * c"] and \<open>c > 0\<close> show ?thesis by auto
qed
thus "\<exists>c\<ge>0. \<forall>x\<ge>0. S x \<le> c * x" by auto
\<comment> \<open>The asymptotics of \<open>A\<close> follows from this immediately:\<close>
have a_strong: "x * A x - T x \<in> {0..c1 * x}" if x: "x \<ge> 0" for x
proof -
have "sum_upto (\<lambda>n. a n * frac (x / n)) x \<le> sum_upto (\<lambda>n. a n * 1) x" unfolding sum_upto_def
by (intro sum_mono mult_left_mono a_nonneg) (auto intro: less_imp_le frac_lt_1)
also have "\<dots> = S x" unfolding S_def by simp
also from x have "\<dots> \<le> c1 * x" by (rule c1)
finally have "sum_upto (\<lambda>n. a n * frac (x / n)) x \<le> c1 * x" .
moreover have "sum_upto (\<lambda>n. a n * frac (x / n)) x \<ge> 0"
unfolding sum_upto_def by (intro sum_nonneg mult_nonneg_nonneg a_nonneg) auto
ultimately have "sum_upto (\<lambda>n. a n * frac (x / n)) x \<in> {0..c1*x}" by auto
also have "sum_upto (\<lambda>n. a n * frac (x / n)) x = x * A x - T x"
by (simp add: T_def A_def sum_upto_def sum_subtractf frac_def algebra_simps
sum_distrib_left sum_distrib_right dirichlet_prod'_def)
finally show ?thesis .
qed
thus "\<exists>c\<ge>0. \<forall>x\<ge>0. x * A x - T x \<in> {0..c*x}"
using \<open>c1 \<ge> 0\<close> by (intro exI[of _ c1]) auto
hence "(\<lambda>x. x * A x - T x) \<in> O(\<lambda>x. x)"
using a_strong \<open>c1 \<ge> 0\<close>
by (intro le_imp_bigo_real[of c1] eventually_mono[OF eventually_ge_at_top[of 1]]) auto
from this and a_asymptotics have "(\<lambda>x. (x * A x - T x) + (T x - x * ln x)) \<in> O(\<lambda>x. x)"
by (rule sum_in_bigo)
hence "(\<lambda>x. x * (A x - ln x)) \<in> O(\<lambda>x. x * 1)"
by (simp add: algebra_simps)
thus bigo: "(\<lambda>x. A x - ln x) \<in> O(\<lambda>x. 1)"
by (subst (asm) landau_o.big.mult_cancel_left) auto
\<comment> \<open>It remains to show the lower bound for \<open>S\<close>.\<close>
define R where "R = (\<lambda>x. A x - ln x)"
obtain M where M: "\<And>x. x \<ge> 1 \<Longrightarrow> \<bar>R x\<bar> \<le> M"
proof -
have "(\<lambda>n. R (real n)) \<in> O(\<lambda>_. 1)"
using bigo unfolding R_def by (rule landau_o.big.compose) real_asymp
from natfun_bigoE[OF this, of 0] obtain M where M: "M > 0" "\<And>n. \<bar>R (real n)\<bar> \<le> M"
by auto
have "\<bar>R x\<bar> \<le> M + ln 2" if x: "x \<ge> 1" for x
proof -
define n where "n = nat \<lfloor>x\<rfloor>"
have "\<bar>R x - R (real n)\<bar> = ln (x / n)"
using x by (simp add: R_def A_def sum_upto_altdef n_def ln_div)
also {
have "x \<le> real n + 1"
unfolding n_def by linarith
also have "1 \<le> real n"
using x unfolding n_def by simp
finally have "ln (x / n) \<le> ln 2"
using x by (simp add: field_simps)
}
finally have "\<bar>R x\<bar> \<le> \<bar>R (real n)\<bar> + ln 2"
by linarith
also have "\<bar>R (real n)\<bar> \<le> M"
by (rule M)
finally show "\<bar>R x\<bar> \<le> M + ln 2" by simp
qed
with that[of "M + ln 2"] show ?thesis by blast
qed
have "M \<ge> 0" using M[of 1] by simp
have A_diff_ge: "A x - A (\<alpha>*x) \<ge> -ln \<alpha> - 2 * M"
if \<alpha>: "\<alpha> \<in> {0<..<1}" and "x \<ge> 1 / \<alpha>" for x \<alpha> :: real
proof -
from that have "1 < inverse \<alpha> * 1" by (simp add: field_simps)
also have "\<dots> \<le> inverse \<alpha> * (\<alpha> * x)"
using \<open>x \<ge> 1 / \<alpha>\<close> and \<alpha> by (intro mult_left_mono) (auto simp: field_simps)
also from \<alpha> have "\<dots> = x" by simp
finally have "x > 1" .
note x = this \<open>x >= 1 / \<alpha>\<close>
have "-ln \<alpha> - M - M \<le> -ln \<alpha> - \<bar>R x\<bar> - \<bar>R (\<alpha>*x)\<bar>"
using x \<alpha> by (intro diff_mono M) (auto simp: field_simps)
also have "\<dots> \<le> -ln \<alpha> + R x - R (\<alpha>*x)"
by linarith
also have "\<dots> = A x - A (\<alpha>*x)"
using \<alpha> x by (simp add: R_def ln_mult)
finally show "A x - A (\<alpha>*x) \<ge> -ln \<alpha> - 2 * M" by simp
qed
define \<alpha> where "\<alpha> = exp (-2*M-1)"
have "\<alpha> \<in> {0<..<1}"
using \<open>M \<ge> 0\<close> by (auto simp: \<alpha>_def)
have S_ge: "S x \<ge> \<alpha> * x" if x: "x \<ge> 1 / \<alpha>" for x
proof -
have "1 = -ln \<alpha> - 2 * M"
by (simp add: \<alpha>_def)
also have "\<dots> \<le> A x - A (\<alpha>*x)"
by (intro A_diff_ge) fact+
also have "\<dots> = (\<Sum>n | n > 0 \<and> real n \<in> {\<alpha>*x<..x}. a n / n)"
unfolding A_def using \<open>\<alpha> \<in> {0<..<1}\<close> by (subst split[where \<alpha> = \<alpha>]) auto
also have "\<dots> \<le> (\<Sum>n | n > 0 \<and> real n \<in> {\<alpha>*x<..x}. a n / (\<alpha>*x))"
using x \<open>\<alpha> \<in> {0<..<1}\<close> by (intro sum_mono divide_left_mono a_nonneg) auto
also have "\<dots> = (\<Sum>n | n > 0 \<and> real n \<in> {\<alpha>*x<..x}. a n) / (\<alpha>*x)"
by (simp add: sum_divide_distrib)
also have "\<dots> \<le> S x / (\<alpha>*x)"
using x \<open>\<alpha> \<in> {0<..<1}\<close> unfolding S_def sum_upto_def
by (intro divide_right_mono sum_mono2 a_nonneg) (auto simp: field_simps)
finally show "S x \<ge> \<alpha> * x"
using \<open>\<alpha> \<in> {0<..<1}\<close> x by (simp add: field_simps)
qed
thus "\<exists>c>0. \<forall>x\<ge>1/c. S x \<ge> c * x"
using \<open>\<alpha> \<in> {0<..<1}\<close> by (intro exI[of _ \<alpha>]) auto
have S_nonneg: "S x \<ge> 0" for x
unfolding S_def sum_upto_def by (intro sum_nonneg a_nonneg) auto
have "eventually (\<lambda>x. \<bar>S x\<bar> \<ge> \<alpha> * \<bar>x\<bar>) at_top"
using eventually_ge_at_top[of "max 0 (1 / \<alpha>)"]
proof eventually_elim
case (elim x)
with S_ge[of x] elim show ?case by auto
qed
hence "S \<in> \<Omega>(\<lambda>x. x)"
using \<open>\<alpha> \<in> {0<..<1}\<close> by (intro landau_omega.bigI[of \<alpha>]) auto
moreover have "S \<in> O(\<lambda>x. x)"
proof (intro bigoI eventually_mono[OF eventually_ge_at_top[of 0]])
fix x :: real assume "x \<ge> 0"
thus "norm (S x) \<le> c1 * norm x"
using c1(2)[of x] by (auto simp: S_nonneg)
qed
ultimately show "S \<in> \<Theta>(\<lambda>x. x)"
by (intro bigthetaI)
qed
end
subsection \<open>Applications to the Chebyshev functions\<close>
(* 3.16 *)
text \<open>
We can now apply Shapiro's Tauberian theorem to \<^term>\<open>\<psi>\<close> and \<^term>\<open>\<theta>\<close>.
\<close>
lemma dirichlet_prod_mangoldt1_floor_bigo:
includes prime_counting_notation
shows "(\<lambda>x. dirichlet_prod' (\<lambda>n. ind prime n * ln n) floor x - x * ln x) \<in> O(\<lambda>x. x)"
proof -
\<comment> \<open>This is a perhaps somewhat roundabout way of proving this statement. We show this using
the asymptotics of \<open>\<MM>\<close>: $\mathfrak{M}(x) = \ln x + O(1)$
We proved this before (which was a bit of work, but not that much).
Apostol, on the other hand, shows the following statement first and then deduces the
asymptotics of \<open>\<MM>\<close> with Shapiro's Tauberian theorem instead. This might save a bit of
work, but it is probably negligible.\<close>
define R where "R = (\<lambda>x. sum_upto (\<lambda>i. ind prime i * ln i * frac (x / i)) x)"
have *: "R x \<in> {0..ln 4 * x}" if "x \<ge> 1" for x
proof -
have "R x \<le> \<theta> x"
unfolding R_def prime_sum_upto_altdef1 sum_upto_def \<theta>_def
by (intro sum_mono) (auto simp: ind_def less_imp_le[OF frac_lt_1] dest!: prime_gt_1_nat)
also have "\<dots> < ln 4 * x"
by (rule \<theta>_upper_bound) fact+
finally have "R x \<le> ln 4 * x" by auto
moreover have "R x \<ge> 0" unfolding R_def sum_upto_def
by (intro sum_nonneg mult_nonneg_nonneg) (auto simp: ind_def)
ultimately show ?thesis by auto
qed
have "eventually (\<lambda>x. \<bar>R x\<bar> \<le> ln 4 * \<bar>x\<bar>) at_top"
using eventually_ge_at_top[of 1] by eventually_elim (use * in auto)
hence "R \<in> O(\<lambda>x. x)" by (intro landau_o.bigI[of "ln 4"]) auto
have "(\<lambda>x. dirichlet_prod' (\<lambda>n. ind prime n * ln n) floor x - x * ln x) =
(\<lambda>x. x * (\<MM> x - ln x) - R x)"
by (auto simp: primes_M_def dirichlet_prod'_def prime_sum_upto_altdef1 sum_upto_def
frac_def sum_subtractf sum_distrib_left sum_distrib_right algebra_simps R_def)
also have "\<dots> \<in> O(\<lambda>x. x)"
proof (rule sum_in_bigo)
have "(\<lambda>x. x * (\<MM> x - ln x)) \<in> O(\<lambda>x. x * 1)"
by (intro landau_o.big.mult mertens_bounded) auto
thus "(\<lambda>x. x * (\<MM> x - ln x)) \<in> O(\<lambda>x. x)" by simp
qed fact+
finally show ?thesis .
qed
lemma dirichlet_prod'_mangoldt_floor_asymptotics:
"(\<lambda>x. dirichlet_prod' mangoldt floor x - x * ln x + x) \<in> O(ln)"
proof -
have "dirichlet_prod' mangoldt floor = (\<lambda>x. sum_upto ln x)"
unfolding sum_upto_ln_conv_sum_upto_mangoldt dirichlet_prod'_def
by (intro sum_upto_cong' ext) auto
hence "(\<lambda>x. dirichlet_prod' mangoldt floor x - x * ln x + x) = (\<lambda>x. sum_upto ln x - x * ln x + x)"
by simp
also have "\<dots> \<in> O(ln)"
by (rule sum_upto_ln_stirling_weak_bigo)
finally show "(\<lambda>x. dirichlet_prod' mangoldt (\<lambda>x. real_of_int \<lfloor>x\<rfloor>) x - x * ln x + x) \<in> O(ln)" .
qed
(* 4.9 *)
interpretation \<psi>: shapiro_tauberian mangoldt "sum_upto (\<lambda>n. mangoldt n / n)" primes_psi
"dirichlet_prod' mangoldt floor"
proof unfold_locales
have "dirichlet_prod' mangoldt floor = (\<lambda>x. sum_upto ln x)"
unfolding sum_upto_ln_conv_sum_upto_mangoldt dirichlet_prod'_def
by (intro sum_upto_cong' ext) auto
hence "(\<lambda>x. dirichlet_prod' mangoldt floor x - x * ln x + x) = (\<lambda>x. sum_upto ln x - x * ln x + x)"
by simp
also have "\<dots> \<in> O(ln)"
by (rule sum_upto_ln_stirling_weak_bigo)
also have "ln \<in> O(\<lambda>x::real. x)" by real_asymp
finally have "(\<lambda>x. dirichlet_prod' mangoldt (\<lambda>x. real_of_int \<lfloor>x\<rfloor>) x - x * ln x + x - x)
\<in> O(\<lambda>x. x)" by (rule sum_in_bigo) auto
thus "(\<lambda>x. dirichlet_prod' mangoldt (\<lambda>x. real_of_int \<lfloor>x\<rfloor>) x - x * ln x) \<in> O(\<lambda>x. x)" by simp
qed (simp_all add: primes_psi_def mangoldt_nonneg)
thm \<psi>.asymptotics \<psi>.upper \<psi>.lower
(* 4.10 *)
interpretation \<theta>: shapiro_tauberian "\<lambda>n. ind prime n * ln n"
"sum_upto (\<lambda>n. ind prime n * ln n / n)" primes_theta "dirichlet_prod' (\<lambda>n. ind prime n * ln n) floor"
proof unfold_locales
fix n :: nat show "ind prime n * ln n \<ge> 0"
by (auto simp: ind_def dest: prime_gt_1_nat)
next
show "(\<lambda>x. dirichlet_prod' (\<lambda>n. ind prime n * ln n) floor x - x * ln x) \<in> O(\<lambda>x. x)"
by (rule dirichlet_prod_mangoldt1_floor_bigo)
qed (simp_all add: primes_theta_def mangoldt_nonneg prime_sum_upto_altdef1[abs_def])
thm \<theta>.asymptotics \<theta>.upper \<theta>.lower
(* 4.11 *)
lemma sum_upto_\<psi>_x_over_n_asymptotics:
"(\<lambda>x. sum_upto (\<lambda>n. primes_psi (x / n)) x - x * ln x + x) \<in> O(ln)"
and sum_upto_\<theta>_x_over_n_asymptotics:
"(\<lambda>x. sum_upto (\<lambda>n. primes_theta (x / n)) x - x * ln x) \<in> O(\<lambda>x. x)"
using dirichlet_prod_mangoldt1_floor_bigo dirichlet_prod'_mangoldt_floor_asymptotics
by (simp_all add: dirichlet_prod'_floor_conv_sum_upto primes_theta_def
primes_psi_def prime_sum_upto_altdef1)
end |
tup = (1,2,3)
@show typeof(tup)
@show tup[1]
for t in tup
println(t)
end
#=
tup[1]=9
ERROR: LoadError: MethodError: no method matching setindex!(::Tuple{Int64,Int64,Int64}, ::Int64, ::Int64)
=#
namedtup = (integer = 3, realnumber=3.14,complexnumber=2+3im)
@show namedtup.integer
@show namedtup.realnumber
@show namedtup.complexnumber
@show namedtup[:integer]
@show namedtup[:realnumber]
@show namedtup[:complexnumber]
for k in keys(namedtup)
println(k)
end
for value in values(namedtup)
println(value)
end
for value in namedtup
println(value)
end
for (k,v) in pairs(namedtup)
println(k,v)
end
|
locate port P at "incorrect.fpp"
port P
|
[STATEMENT]
theorem SC_siso_imp_SC_ZObis[intro]: "SC_siso c \<Longrightarrow> SC_ZObis c"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. SC_siso c \<Longrightarrow> SC_ZObis c
[PROOF STEP]
by (induct c) auto |
MODULE digit_I
INTERFACE
!...Generated by Pacific-Sierra Research 77to90 4.4G 09:34:53 03/09/06
REAL(KIND(0.0D0)) FUNCTION digit (STRING, ISTART)
CHARACTER (LEN = *), INTENT(IN) :: STRING
INTEGER, INTENT(IN) :: ISTART
END FUNCTION
END INTERFACE
END MODULE
|
[STATEMENT]
lemma varsPB_sappend[simp]:
assumes 1: "\<Phi>1 \<noteq> {}" and 2: "\<Phi>2 \<noteq> {}"
shows "varsPB (\<Phi>1 @@ \<Phi>2) = varsPB \<Phi>1 \<union> varsPB \<Phi>2"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. varsPB (\<Phi>1 @@ \<Phi>2) = varsPB \<Phi>1 \<union> varsPB \<Phi>2
[PROOF STEP]
proof safe
[PROOF STATE]
proof (state)
goal (3 subgoals):
1. \<And>x. \<lbrakk>x \<in> varsPB (\<Phi>1 @@ \<Phi>2); x \<notin> varsPB \<Phi>2\<rbrakk> \<Longrightarrow> x \<in> varsPB \<Phi>1
2. \<And>x. x \<in> varsPB \<Phi>1 \<Longrightarrow> x \<in> varsPB (\<Phi>1 @@ \<Phi>2)
3. \<And>x. x \<in> varsPB \<Phi>2 \<Longrightarrow> x \<in> varsPB (\<Phi>1 @@ \<Phi>2)
[PROOF STEP]
fix x
[PROOF STATE]
proof (state)
goal (3 subgoals):
1. \<And>x. \<lbrakk>x \<in> varsPB (\<Phi>1 @@ \<Phi>2); x \<notin> varsPB \<Phi>2\<rbrakk> \<Longrightarrow> x \<in> varsPB \<Phi>1
2. \<And>x. x \<in> varsPB \<Phi>1 \<Longrightarrow> x \<in> varsPB (\<Phi>1 @@ \<Phi>2)
3. \<And>x. x \<in> varsPB \<Phi>2 \<Longrightarrow> x \<in> varsPB (\<Phi>1 @@ \<Phi>2)
[PROOF STEP]
{
[PROOF STATE]
proof (state)
goal (3 subgoals):
1. \<And>x. \<lbrakk>x \<in> varsPB (\<Phi>1 @@ \<Phi>2); x \<notin> varsPB \<Phi>2\<rbrakk> \<Longrightarrow> x \<in> varsPB \<Phi>1
2. \<And>x. x \<in> varsPB \<Phi>1 \<Longrightarrow> x \<in> varsPB (\<Phi>1 @@ \<Phi>2)
3. \<And>x. x \<in> varsPB \<Phi>2 \<Longrightarrow> x \<in> varsPB (\<Phi>1 @@ \<Phi>2)
[PROOF STEP]
assume "x \<in> varsPB \<Phi>1"
[PROOF STATE]
proof (state)
this:
x \<in> varsPB \<Phi>1
goal (3 subgoals):
1. \<And>x. \<lbrakk>x \<in> varsPB (\<Phi>1 @@ \<Phi>2); x \<notin> varsPB \<Phi>2\<rbrakk> \<Longrightarrow> x \<in> varsPB \<Phi>1
2. \<And>x. x \<in> varsPB \<Phi>1 \<Longrightarrow> x \<in> varsPB (\<Phi>1 @@ \<Phi>2)
3. \<And>x. x \<in> varsPB \<Phi>2 \<Longrightarrow> x \<in> varsPB (\<Phi>1 @@ \<Phi>2)
[PROOF STEP]
then
[PROOF STATE]
proof (chain)
picking this:
x \<in> varsPB \<Phi>1
[PROOF STEP]
obtain c1 c2 where "x \<in> varsC c1" and "c1 \<in> \<Phi>1" and "c2 \<in> \<Phi>2"
[PROOF STATE]
proof (prove)
using this:
x \<in> varsPB \<Phi>1
goal (1 subgoal):
1. (\<And>c1 c2. \<lbrakk>x \<in> varsC c1; c1 \<in> \<Phi>1; c2 \<in> \<Phi>2\<rbrakk> \<Longrightarrow> thesis) \<Longrightarrow> thesis
[PROOF STEP]
using 2
[PROOF STATE]
proof (prove)
using this:
x \<in> varsPB \<Phi>1
\<Phi>2 \<noteq> {}
goal (1 subgoal):
1. (\<And>c1 c2. \<lbrakk>x \<in> varsC c1; c1 \<in> \<Phi>1; c2 \<in> \<Phi>2\<rbrakk> \<Longrightarrow> thesis) \<Longrightarrow> thesis
[PROOF STEP]
unfolding varsPB_def
[PROOF STATE]
proof (prove)
using this:
x \<in> \<Union> {varsC c |c. c \<in> \<Phi>1}
\<Phi>2 \<noteq> {}
goal (1 subgoal):
1. (\<And>c1 c2. \<lbrakk>x \<in> varsC c1; c1 \<in> \<Phi>1; c2 \<in> \<Phi>2\<rbrakk> \<Longrightarrow> thesis) \<Longrightarrow> thesis
[PROOF STEP]
by auto
[PROOF STATE]
proof (state)
this:
x \<in> varsC c1
c1 \<in> \<Phi>1
c2 \<in> \<Phi>2
goal (3 subgoals):
1. \<And>x. \<lbrakk>x \<in> varsPB (\<Phi>1 @@ \<Phi>2); x \<notin> varsPB \<Phi>2\<rbrakk> \<Longrightarrow> x \<in> varsPB \<Phi>1
2. \<And>x. x \<in> varsPB \<Phi>1 \<Longrightarrow> x \<in> varsPB (\<Phi>1 @@ \<Phi>2)
3. \<And>x. x \<in> varsPB \<Phi>2 \<Longrightarrow> x \<in> varsPB (\<Phi>1 @@ \<Phi>2)
[PROOF STEP]
thus "x \<in> varsPB (\<Phi>1 @@ \<Phi>2)"
[PROOF STATE]
proof (prove)
using this:
x \<in> varsC c1
c1 \<in> \<Phi>1
c2 \<in> \<Phi>2
goal (1 subgoal):
1. x \<in> varsPB (\<Phi>1 @@ \<Phi>2)
[PROOF STEP]
unfolding sappend_def varsPB_def
[PROOF STATE]
proof (prove)
using this:
x \<in> varsC c1
c1 \<in> \<Phi>1
c2 \<in> \<Phi>2
goal (1 subgoal):
1. x \<in> \<Union> {varsC c |c. c \<in> {al @ bl |al bl. al \<in> \<Phi>1 \<and> bl \<in> \<Phi>2}}
[PROOF STEP]
by fastforce
[PROOF STATE]
proof (state)
this:
x \<in> varsPB (\<Phi>1 @@ \<Phi>2)
goal (2 subgoals):
1. \<And>x. \<lbrakk>x \<in> varsPB (\<Phi>1 @@ \<Phi>2); x \<notin> varsPB \<Phi>2\<rbrakk> \<Longrightarrow> x \<in> varsPB \<Phi>1
2. \<And>x. x \<in> varsPB \<Phi>2 \<Longrightarrow> x \<in> varsPB (\<Phi>1 @@ \<Phi>2)
[PROOF STEP]
}
[PROOF STATE]
proof (state)
this:
x \<in> varsPB \<Phi>1 \<Longrightarrow> x \<in> varsPB (\<Phi>1 @@ \<Phi>2)
goal (2 subgoals):
1. \<And>x. \<lbrakk>x \<in> varsPB (\<Phi>1 @@ \<Phi>2); x \<notin> varsPB \<Phi>2\<rbrakk> \<Longrightarrow> x \<in> varsPB \<Phi>1
2. \<And>x. x \<in> varsPB \<Phi>2 \<Longrightarrow> x \<in> varsPB (\<Phi>1 @@ \<Phi>2)
[PROOF STEP]
{
[PROOF STATE]
proof (state)
this:
x \<in> varsPB \<Phi>1 \<Longrightarrow> x \<in> varsPB (\<Phi>1 @@ \<Phi>2)
goal (2 subgoals):
1. \<And>x. \<lbrakk>x \<in> varsPB (\<Phi>1 @@ \<Phi>2); x \<notin> varsPB \<Phi>2\<rbrakk> \<Longrightarrow> x \<in> varsPB \<Phi>1
2. \<And>x. x \<in> varsPB \<Phi>2 \<Longrightarrow> x \<in> varsPB (\<Phi>1 @@ \<Phi>2)
[PROOF STEP]
assume "x \<in> varsPB \<Phi>2"
[PROOF STATE]
proof (state)
this:
x \<in> varsPB \<Phi>2
goal (2 subgoals):
1. \<And>x. \<lbrakk>x \<in> varsPB (\<Phi>1 @@ \<Phi>2); x \<notin> varsPB \<Phi>2\<rbrakk> \<Longrightarrow> x \<in> varsPB \<Phi>1
2. \<And>x. x \<in> varsPB \<Phi>2 \<Longrightarrow> x \<in> varsPB (\<Phi>1 @@ \<Phi>2)
[PROOF STEP]
then
[PROOF STATE]
proof (chain)
picking this:
x \<in> varsPB \<Phi>2
[PROOF STEP]
obtain c1 c2 where "x \<in> varsC c2" and "c1 \<in> \<Phi>1" and "c2 \<in> \<Phi>2"
[PROOF STATE]
proof (prove)
using this:
x \<in> varsPB \<Phi>2
goal (1 subgoal):
1. (\<And>c2 c1. \<lbrakk>x \<in> varsC c2; c1 \<in> \<Phi>1; c2 \<in> \<Phi>2\<rbrakk> \<Longrightarrow> thesis) \<Longrightarrow> thesis
[PROOF STEP]
using 1
[PROOF STATE]
proof (prove)
using this:
x \<in> varsPB \<Phi>2
\<Phi>1 \<noteq> {}
goal (1 subgoal):
1. (\<And>c2 c1. \<lbrakk>x \<in> varsC c2; c1 \<in> \<Phi>1; c2 \<in> \<Phi>2\<rbrakk> \<Longrightarrow> thesis) \<Longrightarrow> thesis
[PROOF STEP]
unfolding varsPB_def
[PROOF STATE]
proof (prove)
using this:
x \<in> \<Union> {varsC c |c. c \<in> \<Phi>2}
\<Phi>1 \<noteq> {}
goal (1 subgoal):
1. (\<And>c2 c1. \<lbrakk>x \<in> varsC c2; c1 \<in> \<Phi>1; c2 \<in> \<Phi>2\<rbrakk> \<Longrightarrow> thesis) \<Longrightarrow> thesis
[PROOF STEP]
by auto
[PROOF STATE]
proof (state)
this:
x \<in> varsC c2
c1 \<in> \<Phi>1
c2 \<in> \<Phi>2
goal (2 subgoals):
1. \<And>x. \<lbrakk>x \<in> varsPB (\<Phi>1 @@ \<Phi>2); x \<notin> varsPB \<Phi>2\<rbrakk> \<Longrightarrow> x \<in> varsPB \<Phi>1
2. \<And>x. x \<in> varsPB \<Phi>2 \<Longrightarrow> x \<in> varsPB (\<Phi>1 @@ \<Phi>2)
[PROOF STEP]
thus "x \<in> varsPB (\<Phi>1 @@ \<Phi>2)"
[PROOF STATE]
proof (prove)
using this:
x \<in> varsC c2
c1 \<in> \<Phi>1
c2 \<in> \<Phi>2
goal (1 subgoal):
1. x \<in> varsPB (\<Phi>1 @@ \<Phi>2)
[PROOF STEP]
unfolding sappend_def varsPB_def
[PROOF STATE]
proof (prove)
using this:
x \<in> varsC c2
c1 \<in> \<Phi>1
c2 \<in> \<Phi>2
goal (1 subgoal):
1. x \<in> \<Union> {varsC c |c. c \<in> {al @ bl |al bl. al \<in> \<Phi>1 \<and> bl \<in> \<Phi>2}}
[PROOF STEP]
by fastforce
[PROOF STATE]
proof (state)
this:
x \<in> varsPB (\<Phi>1 @@ \<Phi>2)
goal (1 subgoal):
1. \<And>x. \<lbrakk>x \<in> varsPB (\<Phi>1 @@ \<Phi>2); x \<notin> varsPB \<Phi>2\<rbrakk> \<Longrightarrow> x \<in> varsPB \<Phi>1
[PROOF STEP]
}
[PROOF STATE]
proof (state)
this:
x \<in> varsPB \<Phi>2 \<Longrightarrow> x \<in> varsPB (\<Phi>1 @@ \<Phi>2)
goal (1 subgoal):
1. \<And>x. \<lbrakk>x \<in> varsPB (\<Phi>1 @@ \<Phi>2); x \<notin> varsPB \<Phi>2\<rbrakk> \<Longrightarrow> x \<in> varsPB \<Phi>1
[PROOF STEP]
qed(unfold varsPB_def sappend_def, fastforce) |
module FractalStream.Models
( Coordinate
, ComplexParametric1d
) where
import Lang.Numbers
type family Coordinate model :: *
data ComplexParametric1d
{-
import Data.Word
import Data.Complex
newtype ViewCoordinate a = ViewCoordinate a
data ParameterType = N | Z | R | C
data Parameter = Parameter
{ parameterType :: !ParameterType
, parameterName :: !String
, parameterDesc :: !String
}
data Group = Group
{ groupTitle :: !String
, groupParams :: [Parameter]
}
epsilon :: Parameter
epsilon = Parameter
{ parameterType = R
, parameterName = "Epsilon"
, parameterDesc = concat
[ "How close together two complex numbers should"
, " be in order for them to be considered equal?" ]
}
infinity :: Parameter
infinity = Parameter
{ parameterType = R
, parameterName = "Infinity"
, parameterDesc = concat
[ "How large a number should be in order to"
, " consider it escaped / near infinity?" ]
}
maxIter :: Parameter
maxIter = Parameter
{ parameterType = N
, parameterName = "Maximum iteration count"
, parameterDesc = concat
[ "How many iterations should we try before giving up?" ]
}
data Int32
data ParameterDesc where
= Param Parameter
|
complexParametric1d
= viewCoordinate @(Complex Double) "C"
<> parameter @Int32 "Maximum iteration count"
<> parameter @Double "Epsilon"
<> parameter @Double "Infinity"
complexParametricDynamics1d
= viewCoordinate @(Complex Double) "Z"
<> parameter @(Complex Double) "C"
<> parameter @Int32 "Maximum iteration count"
<> parameter @Double "Epsilon"
<> parameter @Double "Infinity"
complexDynamics1d
= viewCoordinate @(Complex Double) "Z"
<> parameter @Int32 "Maximum iteration count"
<> parameter @Double "Epsilon"
<> parameter @Double "Infinity"
-}
type instance Coordinate ComplexParametric1d = C --(Double, Double)
|
/-
Copyright (c) 2018 Jeremy Avigad. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Jeremy Avigad
-/
import data.pfunctor.univariate.M
/-!
# Quotients of Polynomial Functors
We assume the following:
`P` : a polynomial functor
`W` : its W-type
`M` : its M-type
`F` : a functor
We define:
`q` : `qpf` data, representing `F` as a quotient of `P`
The main goal is to construct:
`fix` : the initial algebra with structure map `F fix → fix`.
`cofix` : the final coalgebra with structure map `cofix → F cofix`
We also show that the composition of qpfs is a qpf, and that the quotient of a qpf
is a qpf.
The present theory focuses on the univariate case for qpfs
## References
* [Jeremy Avigad, Mario M. Carneiro and Simon Hudon, *Data Types as Quotients of Polynomial Functors*][avigad-carneiro-hudon2019]
-/
universe u
/--
Quotients of polynomial functors.
Roughly speaking, saying that `F` is a quotient of a polynomial functor means that for each `α`,
elements of `F α` are represented by pairs `⟨a, f⟩`, where `a` is the shape of the object and
`f` indexes the relevant elements of `α`, in a suitably natural manner.
-/
class qpf (F : Type u → Type u) [functor F] :=
(P : pfunctor.{u})
(abs : Π {α}, P.obj α → F α)
(repr : Π {α}, F α → P.obj α)
(abs_repr : ∀ {α} (x : F α), abs (repr x) = x)
(abs_map : ∀ {α β} (f : α → β) (p : P.obj α), abs (f <$> p) = f <$> abs p)
namespace qpf
variables {F : Type u → Type u} [functor F] [q : qpf F]
include q
open functor (liftp liftr)
/-
Show that every qpf is a lawful functor.
Note: every functor has a field, `map_const`, and is_lawful_functor has the defining
characterization. We can only propagate the assumption.
-/
theorem id_map {α : Type*} (x : F α) : id <$> x = x :=
by { rw ←abs_repr x, cases repr x with a f, rw [←abs_map], reflexivity }
theorem comp_map {α β γ : Type*} (f : α → β) (g : β → γ) (x : F α) :
(g ∘ f) <$> x = g <$> f <$> x :=
by { rw ←abs_repr x, cases repr x with a f, rw [←abs_map, ←abs_map, ←abs_map], reflexivity }
theorem is_lawful_functor
(h : ∀ α β : Type u, @functor.map_const F _ α _ = functor.map ∘ function.const β) :
is_lawful_functor F :=
{ map_const_eq := h,
id_map := @id_map F _ _,
comp_map := @comp_map F _ _ }
/-
Lifting predicates and relations
-/
section
open functor
theorem liftp_iff {α : Type u} (p : α → Prop) (x : F α) :
liftp p x ↔ ∃ a f, x = abs ⟨a, f⟩ ∧ ∀ i, p (f i) :=
begin
split,
{ rintros ⟨y, hy⟩, cases h : repr y with a f,
use [a, λ i, (f i).val], split,
{ rw [←hy, ←abs_repr y, h, ←abs_map], reflexivity },
intro i, apply (f i).property },
rintros ⟨a, f, h₀, h₁⟩, dsimp at *,
use abs (⟨a, λ i, ⟨f i, h₁ i⟩⟩),
rw [←abs_map, h₀], reflexivity
end
theorem liftp_iff' {α : Type u} (p : α → Prop) (x : F α) :
liftp p x ↔ ∃ u : q.P.obj α, abs u = x ∧ ∀ i, p (u.snd i) :=
begin
split,
{ rintros ⟨y, hy⟩, cases h : repr y with a f,
use ⟨a, λ i, (f i).val⟩, dsimp, split,
{ rw [←hy, ←abs_repr y, h, ←abs_map], reflexivity },
intro i, apply (f i).property },
rintros ⟨⟨a, f⟩, h₀, h₁⟩, dsimp at *,
use abs (⟨a, λ i, ⟨f i, h₁ i⟩⟩),
rw [←abs_map, ←h₀], reflexivity
end
theorem liftr_iff {α : Type u} (r : α → α → Prop) (x y : F α) :
liftr r x y ↔ ∃ a f₀ f₁, x = abs ⟨a, f₀⟩ ∧ y = abs ⟨a, f₁⟩ ∧ ∀ i, r (f₀ i) (f₁ i) :=
begin
split,
{ rintros ⟨u, xeq, yeq⟩, cases h : repr u with a f,
use [a, λ i, (f i).val.fst, λ i, (f i).val.snd],
split, { rw [←xeq, ←abs_repr u, h, ←abs_map], refl },
split, { rw [←yeq, ←abs_repr u, h, ←abs_map], refl },
intro i, exact (f i).property },
rintros ⟨a, f₀, f₁, xeq, yeq, h⟩,
use abs ⟨a, λ i, ⟨(f₀ i, f₁ i), h i⟩⟩,
dsimp, split,
{ rw [xeq, ←abs_map], refl },
rw [yeq, ←abs_map], refl
end
end
/-
Think of trees in the `W` type corresponding to `P` as representatives of elements of the
least fixed point of `F`, and assign a canonical representative to each equivalence class
of trees.
-/
/-- does recursion on `q.P.W` using `g : F α → α` rather than `g : P α → α` -/
def recF {α : Type*} (g : F α → α) : q.P.W → α
| ⟨a, f⟩ := g (abs ⟨a, λ x, recF (f x)⟩)
theorem recF_eq {α : Type*} (g : F α → α) (x : q.P.W) :
recF g x = g (abs (recF g <$> x.dest)) :=
by cases x; reflexivity
theorem recF_eq' {α : Type*} (g : F α → α) (a : q.P.A) (f : q.P.B a → q.P.W) :
recF g ⟨a, f⟩ = g (abs (recF g <$> ⟨a, f⟩)) :=
rfl
/-- two trees are equivalent if their F-abstractions are -/
inductive Wequiv : q.P.W → q.P.W → Prop
| ind (a : q.P.A) (f f' : q.P.B a → q.P.W) :
(∀ x, Wequiv (f x) (f' x)) → Wequiv ⟨a, f⟩ ⟨a, f'⟩
| abs (a : q.P.A) (f : q.P.B a → q.P.W) (a' : q.P.A) (f' : q.P.B a' → q.P.W) :
abs ⟨a, f⟩ = abs ⟨a', f'⟩ → Wequiv ⟨a, f⟩ ⟨a', f'⟩
| trans (u v w : q.P.W) : Wequiv u v → Wequiv v w → Wequiv u w
/-- recF is insensitive to the representation -/
theorem recF_eq_of_Wequiv {α : Type u} (u : F α → α) (x y : q.P.W) :
Wequiv x y → recF u x = recF u y :=
begin
cases x with a f, cases y with b g,
intro h, induction h,
case qpf.Wequiv.ind : a f f' h ih
{ simp only [recF_eq', pfunctor.map_eq, function.comp, ih] },
case qpf.Wequiv.abs : a f a' f' h
{ simp only [recF_eq', abs_map, h] },
case qpf.Wequiv.trans : x y z e₁ e₂ ih₁ ih₂
{ exact eq.trans ih₁ ih₂ }
end
theorem Wequiv.abs' (x y : q.P.W) (h : abs x.dest = abs y.dest) :
Wequiv x y :=
by { cases x, cases y, apply Wequiv.abs, apply h }
theorem Wequiv.refl (x : q.P.W) : Wequiv x x :=
by cases x with a f; exact Wequiv.abs a f a f rfl
theorem Wequiv.symm (x y : q.P.W) : Wequiv x y → Wequiv y x :=
begin
cases x with a f, cases y with b g,
intro h, induction h,
case qpf.Wequiv.ind : a f f' h ih
{ exact Wequiv.ind _ _ _ ih },
case qpf.Wequiv.abs : a f a' f' h
{ exact Wequiv.abs _ _ _ _ h.symm },
case qpf.Wequiv.trans : x y z e₁ e₂ ih₁ ih₂
{ exact qpf.Wequiv.trans _ _ _ ih₂ ih₁}
end
/-- maps every element of the W type to a canonical representative -/
def Wrepr : q.P.W → q.P.W := recF (pfunctor.W.mk ∘ repr)
theorem Wrepr_equiv (x : q.P.W) : Wequiv (Wrepr x) x :=
begin
induction x with a f ih,
apply Wequiv.trans,
{ change Wequiv (Wrepr ⟨a, f⟩) (pfunctor.W.mk (Wrepr <$> ⟨a, f⟩)),
apply Wequiv.abs',
have : Wrepr ⟨a, f⟩ = pfunctor.W.mk (repr (abs (Wrepr <$> ⟨a, f⟩))) := rfl,
rw [this, pfunctor.W.dest_mk, abs_repr],
reflexivity },
apply Wequiv.ind, exact ih
end
/--
Define the fixed point as the quotient of trees under the equivalence relation `Wequiv`.
-/
def W_setoid : setoid q.P.W :=
⟨Wequiv, @Wequiv.refl _ _ _, @Wequiv.symm _ _ _, @Wequiv.trans _ _ _⟩
local attribute [instance] W_setoid
/-- inductive type defined as initial algebra of a Quotient of Polynomial Functor -/
@[nolint has_nonempty_instance]
def fix (F : Type u → Type u) [functor F] [q : qpf F] := quotient (W_setoid : setoid q.P.W)
/-- recursor of a type defined by a qpf -/
def fix.rec {α : Type*} (g : F α → α) : fix F → α :=
quot.lift (recF g) (recF_eq_of_Wequiv g)
/-- access the underlying W-type of a fixpoint data type -/
def fix_to_W : fix F → q.P.W :=
quotient.lift Wrepr (recF_eq_of_Wequiv (λ x, @pfunctor.W.mk q.P (repr x)))
/-- constructor of a type defined by a qpf -/
def fix.mk (x : F (fix F)) : fix F := quot.mk _ (pfunctor.W.mk (fix_to_W <$> repr x))
/-- destructor of a type defined by a qpf -/
def fix.dest : fix F → F (fix F) := fix.rec (functor.map fix.mk)
theorem fix.rec_eq {α : Type*} (g : F α → α) (x : F (fix F)) :
fix.rec g (fix.mk x) = g (fix.rec g <$> x) :=
have recF g ∘ fix_to_W = fix.rec g,
by { apply funext, apply quotient.ind, intro x, apply recF_eq_of_Wequiv,
rw fix_to_W, apply Wrepr_equiv },
begin
conv { to_lhs, rw [fix.rec, fix.mk], dsimp },
cases h : repr x with a f,
rw [pfunctor.map_eq, recF_eq, ←pfunctor.map_eq, pfunctor.W.dest_mk, ←pfunctor.comp_map,
abs_map, ←h, abs_repr, this]
end
theorem fix.ind_aux (a : q.P.A) (f : q.P.B a → q.P.W) :
fix.mk (abs ⟨a, λ x, ⟦f x⟧⟩) = ⟦⟨a, f⟩⟧ :=
have fix.mk (abs ⟨a, λ x, ⟦f x⟧⟩) = ⟦Wrepr ⟨a, f⟩⟧,
begin
apply quot.sound, apply Wequiv.abs',
rw [pfunctor.W.dest_mk, abs_map, abs_repr, ←abs_map, pfunctor.map_eq],
conv { to_rhs, simp only [Wrepr, recF_eq, pfunctor.W.dest_mk, abs_repr] },
reflexivity
end,
by { rw this, apply quot.sound, apply Wrepr_equiv }
theorem fix.ind_rec {α : Type u} (g₁ g₂ : fix F → α)
(h : ∀ x : F (fix F), g₁ <$> x = g₂ <$> x → g₁ (fix.mk x) = g₂ (fix.mk x)) :
∀ x, g₁ x = g₂ x :=
begin
apply quot.ind,
intro x,
induction x with a f ih,
change g₁ ⟦⟨a, f⟩⟧ = g₂ ⟦⟨a, f⟩⟧,
rw [←fix.ind_aux a f], apply h,
rw [←abs_map, ←abs_map, pfunctor.map_eq, pfunctor.map_eq],
dsimp [function.comp],
congr' with x, apply ih
end
theorem fix.rec_unique {α : Type u} (g : F α → α) (h : fix F → α)
(hyp : ∀ x, h (fix.mk x) = g (h <$> x)) :
fix.rec g = h :=
begin
ext x,
apply fix.ind_rec,
intros x hyp',
rw [hyp, ←hyp', fix.rec_eq]
end
theorem fix.mk_dest (x : fix F) : fix.mk (fix.dest x) = x :=
begin
change (fix.mk ∘ fix.dest) x = id x,
apply fix.ind_rec,
intro x, dsimp,
rw [fix.dest, fix.rec_eq, id_map, comp_map],
intro h, rw h
end
theorem fix.dest_mk (x : F (fix F)) : fix.dest (fix.mk x) = x :=
begin
unfold fix.dest, rw [fix.rec_eq, ←fix.dest, ←comp_map],
conv { to_rhs, rw ←(id_map x) },
congr' with x, apply fix.mk_dest
end
theorem fix.ind (p : fix F → Prop)
(h : ∀ x : F (fix F), liftp p x → p (fix.mk x)) :
∀ x, p x :=
begin
apply quot.ind,
intro x,
induction x with a f ih,
change p ⟦⟨a, f⟩⟧,
rw [←fix.ind_aux a f],
apply h,
rw liftp_iff,
refine ⟨_, _, rfl, _⟩,
apply ih
end
end qpf
/-
Construct the final coalgebra to a qpf.
-/
namespace qpf
variables {F : Type u → Type u} [functor F] [q : qpf F]
include q
open functor (liftp liftr)
/-- does recursion on `q.P.M` using `g : α → F α` rather than `g : α → P α` -/
def corecF {α : Type*} (g : α → F α) : α → q.P.M :=
pfunctor.M.corec (λ x, repr (g x))
theorem corecF_eq {α : Type*} (g : α → F α) (x : α) :
pfunctor.M.dest (corecF g x) = corecF g <$> repr (g x) :=
by rw [corecF, pfunctor.M.dest_corec]
/- Equivalence -/
/-- A pre-congruence on q.P.M *viewed as an F-coalgebra*. Not necessarily symmetric. -/
def is_precongr (r : q.P.M → q.P.M → Prop) : Prop :=
∀ ⦃x y⦄, r x y →
abs (quot.mk r <$> pfunctor.M.dest x) = abs (quot.mk r <$> pfunctor.M.dest y)
/-- The maximal congruence on q.P.M -/
def Mcongr : q.P.M → q.P.M → Prop :=
λ x y, ∃ r, is_precongr r ∧ r x y
/-- coinductive type defined as the final coalgebra of a qpf -/
def cofix (F : Type u → Type u) [functor F] [q : qpf F]:= quot (@Mcongr F _ q)
instance [inhabited q.P.A] : inhabited (cofix F) := ⟨ quot.mk _ default ⟩
/-- corecursor for type defined by `cofix` -/
def cofix.corec {α : Type*} (g : α → F α) (x : α) : cofix F :=
quot.mk _ (corecF g x)
/-- destructor for type defined by `cofix` -/
def cofix.dest : cofix F → F (cofix F) :=
quot.lift
(λ x, quot.mk Mcongr <$> (abs (pfunctor.M.dest x)))
begin
rintros x y ⟨r, pr, rxy⟩, dsimp,
have : ∀ x y, r x y → Mcongr x y,
{ intros x y h, exact ⟨r, pr, h⟩ },
rw [←quot.factor_mk_eq _ _ this], dsimp,
conv { to_lhs, rw [comp_map, ←abs_map, pr rxy, abs_map, ←comp_map] }
end
theorem cofix.dest_corec {α : Type u} (g : α → F α) (x : α) :
cofix.dest (cofix.corec g x) = cofix.corec g <$> g x :=
begin
conv { to_lhs, rw [cofix.dest, cofix.corec] }, dsimp,
rw [corecF_eq, abs_map, abs_repr, ←comp_map], reflexivity
end
private theorem cofix.bisim_aux
(r : cofix F → cofix F → Prop)
(h' : ∀ x, r x x)
(h : ∀ x y, r x y → quot.mk r <$> cofix.dest x = quot.mk r <$> cofix.dest y) :
∀ x y, r x y → x = y :=
begin
intro x, apply quot.induction_on x, clear x,
intros x y, apply quot.induction_on y, clear y,
intros y rxy,
apply quot.sound,
let r' := λ x y, r (quot.mk _ x) (quot.mk _ y),
have : is_precongr r',
{ intros a b r'ab,
have h₀: quot.mk r <$> quot.mk Mcongr <$> abs (pfunctor.M.dest a) =
quot.mk r <$> quot.mk Mcongr <$> abs (pfunctor.M.dest b) := h _ _ r'ab,
have h₁ : ∀ u v : q.P.M, Mcongr u v → quot.mk r' u = quot.mk r' v,
{ intros u v cuv, apply quot.sound, dsimp [r'], rw quot.sound cuv, apply h' },
let f : quot r → quot r' := quot.lift (quot.lift (quot.mk r') h₁)
begin
intro c, apply quot.induction_on c, clear c,
intros c d, apply quot.induction_on d, clear d,
intros d rcd, apply quot.sound, apply rcd
end,
have : f ∘ quot.mk r ∘ quot.mk Mcongr = quot.mk r' := rfl,
rw [←this, pfunctor.comp_map _ _ f, pfunctor.comp_map _ _ (quot.mk r),
abs_map, abs_map, abs_map, h₀],
rw [pfunctor.comp_map _ _ f, pfunctor.comp_map _ _ (quot.mk r),
abs_map, abs_map, abs_map] },
refine ⟨r', this, rxy⟩
end
theorem cofix.bisim_rel
(r : cofix F → cofix F → Prop)
(h : ∀ x y, r x y → quot.mk r <$> cofix.dest x = quot.mk r <$> cofix.dest y) :
∀ x y, r x y → x = y :=
let r' x y := x = y ∨ r x y in
begin
intros x y rxy,
apply cofix.bisim_aux r',
{ intro x, left, reflexivity },
{ intros x y r'xy,
cases r'xy, { rw r'xy },
have : ∀ x y, r x y → r' x y := λ x y h, or.inr h,
rw ←quot.factor_mk_eq _ _ this, dsimp,
rw [@comp_map _ _ q _ _ _ (quot.mk r), @comp_map _ _ q _ _ _ (quot.mk r)],
rw h _ _ r'xy },
right, exact rxy
end
theorem cofix.bisim
(r : cofix F → cofix F → Prop)
(h : ∀ x y, r x y → liftr r (cofix.dest x) (cofix.dest y)) :
∀ x y, r x y → x = y :=
begin
apply cofix.bisim_rel,
intros x y rxy,
rcases (liftr_iff r _ _).mp (h x y rxy) with ⟨a, f₀, f₁, dxeq, dyeq, h'⟩,
rw [dxeq, dyeq, ←abs_map, ←abs_map, pfunctor.map_eq, pfunctor.map_eq],
congr' 2 with i,
apply quot.sound,
apply h'
end
theorem cofix.bisim' {α : Type*} (Q : α → Prop) (u v : α → cofix F)
(h : ∀ x, Q x → ∃ a f f',
cofix.dest (u x) = abs ⟨a, f⟩ ∧
cofix.dest (v x) = abs ⟨a, f'⟩ ∧
∀ i, ∃ x', Q x' ∧ f i = u x' ∧ f' i = v x') :
∀ x, Q x → u x = v x :=
λ x Qx,
let R := λ w z : cofix F, ∃ x', Q x' ∧ w = u x' ∧ z = v x' in
cofix.bisim R
(λ x y ⟨x', Qx', xeq, yeq⟩,
begin
rcases h x' Qx' with ⟨a, f, f', ux'eq, vx'eq, h'⟩,
rw liftr_iff,
refine ⟨a, f, f', xeq.symm ▸ ux'eq, yeq.symm ▸ vx'eq, h'⟩,
end)
_ _ ⟨x, Qx, rfl, rfl⟩
end qpf
/-
Composition of qpfs.
-/
namespace qpf
variables {F₂ : Type u → Type u} [functor F₂] [q₂ : qpf F₂]
variables {F₁ : Type u → Type u} [functor F₁] [q₁ : qpf F₁]
include q₂ q₁
/-- composition of qpfs gives another qpf -/
def comp : qpf (functor.comp F₂ F₁) :=
{ P := pfunctor.comp (q₂.P) (q₁.P),
abs := λ α,
begin
dsimp [functor.comp],
intro p,
exact abs ⟨p.1.1, λ x, abs ⟨p.1.2 x, λ y, p.2 ⟨x, y⟩⟩⟩
end,
repr := λ α,
begin
dsimp [functor.comp],
intro y,
refine ⟨⟨(repr y).1, λ u, (repr ((repr y).2 u)).1⟩, _⟩,
dsimp [pfunctor.comp],
intro x,
exact (repr ((repr y).2 x.1)).snd x.2
end,
abs_repr := λ α,
begin
abstract
{ dsimp [functor.comp],
intro x,
conv { to_rhs, rw ←abs_repr x},
cases h : repr x with a f,
dsimp,
congr' with x,
cases h' : repr (f x) with b g,
dsimp, rw [←h', abs_repr] }
end,
abs_map := λ α β f,
begin
abstract
{ dsimp [functor.comp, pfunctor.comp],
intro p,
cases p with a g, dsimp,
cases a with b h, dsimp,
symmetry,
transitivity,
symmetry,
apply abs_map,
congr,
rw pfunctor.map_eq,
dsimp [function.comp],
simp [abs_map],
split,
reflexivity,
ext x,
rw ←abs_map,
reflexivity }
end }
end qpf
/-
Quotients.
We show that if `F` is a qpf and `G` is a suitable quotient of `F`, then `G` is a qpf.
-/
namespace qpf
variables {F : Type u → Type u} [functor F] [q : qpf F]
variables {G : Type u → Type u} [functor G]
variable {FG_abs : Π {α}, F α → G α}
variable {FG_repr : Π {α}, G α → F α}
/-- Given a qpf `F` and a well-behaved surjection `FG_abs` from F α to
functor G α, `G` is a qpf. We can consider `G` a quotient on `F` where
elements `x y : F α` are in the same equivalence class if
`FG_abs x = FG_abs y` -/
def quotient_qpf
(FG_abs_repr : Π {α} (x : G α), FG_abs (FG_repr x) = x)
(FG_abs_map : ∀ {α β} (f : α → β) (x : F α), FG_abs (f <$> x) = f <$> FG_abs x) :
qpf G :=
{ P := q.P,
abs := λ {α} p, FG_abs (abs p),
repr := λ {α} x, repr (FG_repr x),
abs_repr := λ {α} x, by rw [abs_repr, FG_abs_repr],
abs_map := λ {α β} f x, by { rw [abs_map, FG_abs_map] } }
end qpf
/-
Support.
-/
namespace qpf
variables {F : Type u → Type u} [functor F] [q : qpf F]
include q
open functor (liftp liftr supp)
open set
theorem mem_supp {α : Type u} (x : F α) (u : α) :
u ∈ supp x ↔ ∀ a f, abs ⟨a, f⟩ = x → u ∈ f '' univ :=
begin
rw [supp], dsimp, split,
{ intros h a f haf,
have : liftp (λ u, u ∈ f '' univ) x,
{ rw liftp_iff, refine ⟨a, f, haf.symm, λ i, mem_image_of_mem _ (mem_univ _)⟩ },
exact h this },
intros h p, rw liftp_iff,
rintros ⟨a, f, xeq, h'⟩,
rcases h a f xeq.symm with ⟨i, _, hi⟩,
rw ←hi, apply h'
end
theorem supp_eq {α : Type u} (x : F α) : supp x = { u | ∀ a f, abs ⟨a, f⟩ = x → u ∈ f '' univ } :=
by ext; apply mem_supp
theorem has_good_supp_iff {α : Type u} (x : F α) :
(∀ p, liftp p x ↔ ∀ u ∈ supp x, p u) ↔
∃ a f, abs ⟨a, f⟩ = x ∧ ∀ a' f', abs ⟨a', f'⟩ = x → f '' univ ⊆ f' '' univ :=
begin
split,
{ intro h,
have : liftp (supp x) x, by rw h; intro u; exact id,
rw liftp_iff at this, rcases this with ⟨a, f, xeq, h'⟩,
refine ⟨a, f, xeq.symm, _⟩,
intros a' f' h'',
rintros u ⟨i, _, hfi⟩,
have : u ∈ supp x, by rw ←hfi; apply h',
exact (mem_supp x u).mp this _ _ h'' },
rintros ⟨a, f, xeq, h⟩ p, rw liftp_iff, split,
{ rintros ⟨a', f', xeq', h'⟩ u usuppx,
rcases (mem_supp x u).mp usuppx a' f' xeq'.symm with ⟨i, _, f'ieq⟩,
rw ←f'ieq, apply h' },
intro h',
refine ⟨a, f, xeq.symm, _⟩, intro i,
apply h', rw mem_supp,
intros a' f' xeq',
apply h a' f' xeq',
apply mem_image_of_mem _ (mem_univ _)
end
variable (q)
/-- A qpf is said to be uniform if every polynomial functor
representing a single value all have the same range. -/
def is_uniform : Prop := ∀ ⦃α : Type u⦄ (a a' : q.P.A)
(f : q.P.B a → α) (f' : q.P.B a' → α),
abs ⟨a, f⟩ = abs ⟨a', f'⟩ → f '' univ = f' '' univ
/-- does `abs` preserve `liftp`? -/
def liftp_preservation : Prop :=
∀ ⦃α⦄ (p : α → Prop) (x : q.P.obj α), liftp p (abs x) ↔ liftp p x
/-- does `abs` preserve `supp`? -/
def supp_preservation : Prop :=
∀ ⦃α⦄ (x : q.P.obj α), supp (abs x) = supp x
variable [q]
theorem supp_eq_of_is_uniform (h : q.is_uniform) {α : Type u} (a : q.P.A) (f : q.P.B a → α) :
supp (abs ⟨a, f⟩) = f '' univ :=
begin
ext u, rw [mem_supp], split,
{ intro h', apply h' _ _ rfl },
intros h' a' f' e,
rw [←h _ _ _ _ e.symm], apply h'
end
theorem liftp_iff_of_is_uniform (h : q.is_uniform) {α : Type u} (x : F α) (p : α → Prop) :
liftp p x ↔ ∀ u ∈ supp x, p u :=
begin
rw [liftp_iff, ←abs_repr x],
cases repr x with a f, split,
{ rintros ⟨a', f', abseq, hf⟩ u,
rw [supp_eq_of_is_uniform h, h _ _ _ _ abseq],
rintros ⟨i, _, hi⟩, rw ←hi, apply hf },
intro h',
refine ⟨a, f, rfl, λ i, h' _ _⟩,
rw supp_eq_of_is_uniform h,
exact ⟨i, mem_univ i, rfl⟩
end
theorem supp_map (h : q.is_uniform) {α β : Type u} (g : α → β) (x : F α) :
supp (g <$> x) = g '' supp x :=
begin
rw ←abs_repr x, cases repr x with a f, rw [←abs_map, pfunctor.map_eq],
rw [supp_eq_of_is_uniform h, supp_eq_of_is_uniform h, image_comp]
end
theorem supp_preservation_iff_uniform :
q.supp_preservation ↔ q.is_uniform :=
begin
split,
{ intros h α a a' f f' h',
rw [← pfunctor.supp_eq,← pfunctor.supp_eq,← h,h',h] },
{ rintros h α ⟨a,f⟩, rwa [supp_eq_of_is_uniform,pfunctor.supp_eq], }
end
theorem supp_preservation_iff_liftp_preservation :
q.supp_preservation ↔ q.liftp_preservation :=
begin
split; intro h,
{ rintros α p ⟨a,f⟩,
have h' := h, rw supp_preservation_iff_uniform at h',
dsimp only [supp_preservation,supp] at h,
rwa [liftp_iff_of_is_uniform,supp_eq_of_is_uniform,pfunctor.liftp_iff'];
try { assumption },
{ simp only [image_univ, mem_range, exists_imp_distrib],
split; intros; subst_vars; solve_by_elim } },
{ rintros α ⟨a,f⟩,
simp only [liftp_preservation] at h,
simp only [supp,h] }
end
theorem liftp_preservation_iff_uniform :
q.liftp_preservation ↔ q.is_uniform :=
by rw [← supp_preservation_iff_liftp_preservation, supp_preservation_iff_uniform]
end qpf
|
The allure of industrial pieces is their celebration of raw materials and use of structure as decoration. Bolts become accents and brackets design forms. This dining table set combines metal and wood in a gorgeous display of industrial style. Slight cracks and a wood grain make all of the detailing necessary and match beautifully with the metal framework. |
module Vimscript.FFI
%default total
mutual
public export
data VimFn t = MkVimFn t
public export
data VIM_FnTypes : Type -> Type where
VIM_Fn : VIM_Types s -> VIM_FnTypes t -> VIM_FnTypes (s -> t)
VIM_FnIO : VIM_Types t -> VIM_FnTypes (IO' l t)
VIM_FnBase : VIM_Types t -> VIM_FnTypes t
public export
data VIM_Types : Type -> Type where
VIM_Str : VIM_Types String
VIM_Int : VIM_Types Int
VIM_Float : VIM_Types Double
VIM_Unit : VIM_Types ()
VIM_Raw : VIM_Types (Raw a)
VIM_FnT : VIM_FnTypes t -> VIM_Types (VimFn t)
public export
data VIM_Scope
= VIM_Local
| VIM_Global
public export
data VIM_MutableRef
= VIM_Option
| VIM_ScopedOption VIM_Scope
| VIM_Register
public export
VIM_GlobalOption : VIM_MutableRef
VIM_GlobalOption = VIM_ScopedOption VIM_Global
public export
VIM_LocalOption : VIM_MutableRef
VIM_LocalOption = VIM_ScopedOption VIM_Local
public export
data VIM_Foreign
= VIM_Echo
| VIM_ListEmpty
| VIM_ListIndex
| VIM_ListCons
| VIM_ListSnoc
| VIM_ListConcat
| VIM_ListSetAt
| VIM_BuiltIn String
| VIM_Get VIM_MutableRef String
| VIM_Set VIM_MutableRef String
| VIM_Toggle VIM_MutableRef String
%error_reverse
public export
FFI_VIM : FFI
FFI_VIM = MkFFI VIM_Types VIM_Foreign String
%error_reverse
public export
VIM_IO : Type -> Type
VIM_IO = IO' FFI_VIM
IO : Type -> Type
IO a = IO' FFI_VIM a
|
(******************************************************************************
* Orca: A Functional Correctness Verifier for Imperative Programs
* Based on Isabelle/UTP
*
* Copyright (c) 2016-2018 Virginia Tech, USA
* 2016-2018 Technische Universität München, Germany
* 2016-2018 University of York, UK
* 2016-2018 Université Paris-Saclay, Univ. Paris-Sud, France
*
* This software may be distributed and modified according to the terms of
* the GNU Lesser General Public License version 3.0 or any later version.
* Note that NO WARRANTY is provided.
*
* See CONTRIBUTORS, LICENSE and CITATION files for details.
******************************************************************************)
theory algorithms
imports "../Backend/VCG/vcg"
begin
section \<open>setup and makeup!\<close>
sledgehammer_params[stop_on_first,parallel_subgoals, join_subgoals]
no_adhoc_overloading'_all
section \<open>Simple algorithms\<close>
text
\<open>
Through these experiments I want to observe the following problems:
\begin{itemize}
\item I want to deal with the problem of nested existential(SOLVED).
\item I want to deal with the problem of blow up due to the semantic machinery coming with lenses(SOLVED).
\item I want to have modularity(NOT SOLVED).
\end{itemize}
\<close>
subsection \<open>Increment method\<close>
lemma increment_method_sp_H1_H3:
assumes "vwb_lens x"
shows
"\<lbrace>\<guillemotleft>a\<guillemotright> >\<^sub>u 0\<rbrace>
x :== 0 ;
INVAR \<guillemotleft>a\<guillemotright> >\<^sub>u 0 \<and> \<guillemotleft>a\<guillemotright> \<ge>\<^sub>u &x
VRT \<guillemotleft>(measure o Rep_uexpr) (\<guillemotleft>a\<guillemotright> - &x)\<guillemotright>
WHILE &x <\<^sub>u \<guillemotleft>a\<guillemotright> DO x:== (&x + 1) OD
\<lbrace>\<guillemotleft>a\<guillemotright> =\<^sub>u &x\<rbrace>\<^sub>P"
apply (insert assms) (*Make this automatic *)
apply (vcg sp)
done
lemma increment_method_wp_H1_H3:
assumes "vwb_lens x"
shows
"\<lbrace>\<guillemotleft>a\<guillemotright> >\<^sub>u 0\<rbrace>
x :== 0 ;
INVAR \<guillemotleft>a\<guillemotright> >\<^sub>u 0 \<and> \<guillemotleft>a\<guillemotright> \<ge>\<^sub>u &x
VRT \<guillemotleft>(measure o Rep_uexpr) (\<guillemotleft>a\<guillemotright> - &x)\<guillemotright>
WHILE &x <\<^sub>u \<guillemotleft>a\<guillemotright> DO x:== (&x + 1) OD
\<lbrace>\<guillemotleft>a\<guillemotright> =\<^sub>u &x\<rbrace>\<^sub>P"
apply (insert assms) (*Make this automatic *)
apply (vcg wp)
done
lemma increment_method_sp_rel:
assumes "vwb_lens x"
shows
"\<lbrace>\<guillemotleft>a\<guillemotright> >\<^sub>u 0\<rbrace>
assign_r x 0 ;;
invr \<guillemotleft>a\<guillemotright> >\<^sub>u 0 \<and> \<guillemotleft>a\<guillemotright> \<ge>\<^sub>u &x
vrt \<guillemotleft>(measure o Rep_uexpr) (\<guillemotleft>a\<guillemotright> - &x)\<guillemotright>
while\<^sub>\<bottom> &x <\<^sub>u \<guillemotleft>a\<guillemotright> do assign_r x (&x + 1) od
\<lbrace>\<guillemotleft>a\<guillemotright> =\<^sub>u &x\<rbrace>\<^sub>u"
apply (insert assms) (*Make this automatic *)
apply (vcg sp)
done
lemma increment_method_wp_rel:
assumes "vwb_lens x"
shows
"\<lbrace>\<guillemotleft>a\<guillemotright> >\<^sub>u 0\<rbrace>
assign_r x 0 ;;
invr \<guillemotleft>a\<guillemotright> >\<^sub>u 0 \<and> \<guillemotleft>a\<guillemotright> \<ge>\<^sub>u &x
vrt \<guillemotleft>(measure o Rep_uexpr) (\<guillemotleft>a\<guillemotright> - &x)\<guillemotright>
while\<^sub>\<bottom> &x <\<^sub>u \<guillemotleft>a\<guillemotright> do assign_r x (&x + 1) od
\<lbrace>\<guillemotleft>a\<guillemotright> =\<^sub>u &x\<rbrace>\<^sub>u"
apply (insert assms) (*Make this automatic *)
apply (vcg wp)
done
subsection \<open>even count program\<close>
lemma even_count_gen_sp_H1_H3:
assumes "lens_indep_all [i,j]"
assumes "vwb_lens i" "vwb_lens j"
shows
"\<lbrace>\<guillemotleft>a\<guillemotright> >\<^sub>u 0 \<rbrace>
i :== \<guillemotleft>0::int\<guillemotright>;
j :== 0 ;
INVAR (&j =\<^sub>u (&i + 1) div \<guillemotleft>2\<guillemotright> \<and> &i \<le>\<^sub>u \<guillemotleft>a\<guillemotright>)
VRT \<guillemotleft>measure (nat o (Rep_uexpr (\<guillemotleft>a\<guillemotright> - &i)))\<guillemotright>
WHILE &i <\<^sub>u \<guillemotleft>a\<guillemotright>
DO
IF &i mod \<guillemotleft>2\<guillemotright> =\<^sub>u 0
THEN j :== (&j + 1)
ELSE SKIP
FI;
i :== (&i + 1)
OD
\<lbrace>&j =\<^sub>u (\<guillemotleft>a\<guillemotright> + 1)div \<guillemotleft>2\<guillemotright>\<rbrace>\<^sub>P"
apply (insert assms)(*Make this automatic*)
apply (vcg sp)
apply presburger+
done
lemma even_count_gen'_sp_H1_H3:
assumes "lens_indep_all [i,j]"
assumes "vwb_lens i" "vwb_lens j"
shows
"\<lbrace>\<guillemotleft>a\<guillemotright> >\<^sub>u 0\<rbrace>
i :== \<guillemotleft>0::int\<guillemotright>;
j :== 0 ;
INVAR (&j =\<^sub>u (&i + 1) div 2 \<and> &i \<le>\<^sub>u \<guillemotleft>a\<guillemotright>)
VRT \<guillemotleft>measure (nat o (Rep_uexpr (\<guillemotleft>a\<guillemotright> - &i)))\<guillemotright>
WHILE &i <\<^sub>u \<guillemotleft>a\<guillemotright>
DO
IF &i mod 2 =\<^sub>u 0
THEN j :== (&j + 1)
ELSE SKIP
FI;
i :== (&i + 1)
OD
\<lbrace>&j =\<^sub>u (\<guillemotleft>a\<guillemotright> + 1)div 2\<rbrace>\<^sub>P"
apply (insert assms)(*Make this automatic*)
apply (vcg sp)
apply (simp_all add: zdiv_zadd1_eq)
done
lemma even_count_gen'_wp_H1_H3:
assumes "lens_indep_all [i,j]"
assumes "vwb_lens i" "vwb_lens j"
shows
"\<lbrace>\<guillemotleft>a\<guillemotright> >\<^sub>u 0\<rbrace>
i :== \<guillemotleft>0::int\<guillemotright>;
j :== 0 ;
INVAR (&j =\<^sub>u (&i + 1) div 2 \<and> &i \<le>\<^sub>u \<guillemotleft>a\<guillemotright>)
VRT \<guillemotleft>measure (nat o (Rep_uexpr (\<guillemotleft>a\<guillemotright> - &i)))\<guillemotright>
WHILE &i <\<^sub>u \<guillemotleft>a\<guillemotright>
DO
IF &i mod 2 =\<^sub>u 0
THEN j :== (&j + 1)
ELSE SKIP
FI;
i :== (&i + 1)
OD
\<lbrace>&j =\<^sub>u (\<guillemotleft>a\<guillemotright> + 1)div 2\<rbrace>\<^sub>P"
apply (insert assms)(*Make this automatic*)
apply (vcg wp)
apply simp_all
using dvd_imp_mod_0 odd_succ_div_two
apply blast
done
lemma even_count_gen'_sp_rel:
assumes "lens_indep_all [i,j]"
assumes "vwb_lens i" "vwb_lens j"
shows
"\<lbrace>\<guillemotleft>a\<guillemotright> >\<^sub>u 0\<rbrace>
assign_r i \<guillemotleft>0::int\<guillemotright>;;
assign_r j 0 ;;
invr (&j =\<^sub>u (&i + 1) div 2 \<and> &i \<le>\<^sub>u \<guillemotleft>a\<guillemotright>)
vrt \<guillemotleft>measure (nat o (Rep_uexpr (\<guillemotleft>a\<guillemotright> - &i)))\<guillemotright>
while\<^sub>\<bottom> &i <\<^sub>u \<guillemotleft>a\<guillemotright>
do
bif &i mod 2 =\<^sub>u 0
then assign_r j (&j + 1)
else SKIP\<^sub>r
eif;;
assign_r i (&i + 1)
od
\<lbrace>&j =\<^sub>u (\<guillemotleft>a\<guillemotright> + 1)div 2\<rbrace>\<^sub>u"
apply (insert assms)(*Make this automatic*)
apply (vcg sp)
apply (simp_all add: zdiv_zadd1_eq)
done
lemma even_count_gen'_wp_rel:
assumes "lens_indep_all [i,j]"
assumes "vwb_lens i" "vwb_lens j"
shows
"\<lbrace>\<guillemotleft>a\<guillemotright> >\<^sub>u 0\<rbrace>
assign_r i \<guillemotleft>0::int\<guillemotright>;;
assign_r j 0 ;;
invr (&j =\<^sub>u (&i + 1) div 2 \<and> &i \<le>\<^sub>u \<guillemotleft>a\<guillemotright>)
vrt \<guillemotleft>measure (nat o (Rep_uexpr (\<guillemotleft>a\<guillemotright> - &i)))\<guillemotright>
while\<^sub>\<bottom> &i <\<^sub>u \<guillemotleft>a\<guillemotright>
do
bif &i mod 2 =\<^sub>u 0
then assign_r j (&j + 1)
else SKIP\<^sub>r
eif;;
assign_r i (&i + 1)
od
\<lbrace>&j =\<^sub>u (\<guillemotleft>a\<guillemotright> + 1)div 2\<rbrace>\<^sub>u"
apply (insert assms)(*Make this automatic*)
apply (vcg wp)
apply simp_all
using dvd_imp_mod_0 odd_succ_div_two
apply blast
done
subsection \<open>sqrt program\<close>
definition Isqrt :: "int \<Rightarrow> int \<Rightarrow> bool"
where "Isqrt n\<^sub>0 r \<equiv> 0\<le>r \<and> (r-1)\<^sup>2 \<le> n\<^sub>0"
lemma Isqrt_aux:
"0 \<le> n\<^sub>0 \<Longrightarrow> Isqrt n\<^sub>0 1"
"\<lbrakk>0 \<le> n\<^sub>0; r * r \<le> n\<^sub>0; Isqrt n\<^sub>0 r\<rbrakk> \<Longrightarrow> Isqrt n\<^sub>0 (r + 1)"
"\<lbrakk>0 \<le> n\<^sub>0; \<not> r * r \<le> n\<^sub>0; Isqrt n\<^sub>0 r\<rbrakk> \<Longrightarrow> (r - 1)\<^sup>2 \<le> n\<^sub>0 \<and> n\<^sub>0 < r\<^sup>2"
"Isqrt n\<^sub>0 r \<Longrightarrow> r * r \<le> n\<^sub>0 \<Longrightarrow> r\<le>n\<^sub>0"
"\<lbrakk>0 \<le> n\<^sub>0; \<not> r * r \<le> n\<^sub>0; Isqrt n\<^sub>0 r\<rbrakk> \<Longrightarrow> 0 < r"
apply (auto simp: Isqrt_def power2_eq_square algebra_simps)
by (smt combine_common_factor mult_right_mono semiring_normalization_rules(3))
lemma sqrt_prog_correct_sp_H1_H3:
assumes "vwb_lens r"
shows
"\<lbrace>0 \<le>\<^sub>u \<guillemotleft>a\<guillemotright>\<rbrace>
r :== 1 ;
INVAR 0\<le>\<^sub>u \<guillemotleft>a\<guillemotright> \<and> bop Isqrt \<guillemotleft>a\<guillemotright> (&r)
VRT \<guillemotleft>measure (nat o (Rep_uexpr ((\<guillemotleft>a\<guillemotright> + 1) - &r)))\<guillemotright>
WHILE (&r * &r \<le>\<^sub>u \<guillemotleft>a\<guillemotright>)
DO
r :== (&r + 1)
OD;
r :== (&r - 1)
\<lbrace>0\<le>\<^sub>u &r \<and> uop power2 (&r) \<le>\<^sub>u \<guillemotleft>a\<guillemotright> \<and> \<guillemotleft>a\<guillemotright> <\<^sub>u uop power2 (&r + 1)\<rbrace>\<^sub>P"
apply (insert assms)
supply Isqrt_aux [simp]
apply (vcg sp)
done
lemma sqrt_prog_correct_wp_H1_H3:
assumes "vwb_lens r"
shows
"\<lbrace>0 \<le>\<^sub>u \<guillemotleft>a\<guillemotright>\<rbrace>
r :== 1 ;
INVAR 0\<le>\<^sub>u \<guillemotleft>a\<guillemotright> \<and> bop Isqrt \<guillemotleft>a\<guillemotright> (&r)
VRT \<guillemotleft>measure (nat o (Rep_uexpr ((\<guillemotleft>a\<guillemotright> + 1) - &r)))\<guillemotright>
WHILE (&r * &r \<le>\<^sub>u \<guillemotleft>a\<guillemotright>)
DO
r :== (&r + 1)
OD;
r :== (&r - 1)
\<lbrace>0\<le>\<^sub>u &r \<and> uop power2 (&r) \<le>\<^sub>u \<guillemotleft>a\<guillemotright> \<and> \<guillemotleft>a\<guillemotright> <\<^sub>u uop power2 (&r + 1)\<rbrace>\<^sub>P"
apply (insert assms)
supply Isqrt_aux [simp]
apply (vcg wp)
done
lemma sqrt_prog_correct_sp_rel:
assumes "vwb_lens r"
shows
"\<lbrace>0 \<le>\<^sub>u \<guillemotleft>a\<guillemotright>\<rbrace>
assign_r r 1 ;;
invr 0\<le>\<^sub>u \<guillemotleft>a\<guillemotright> \<and> bop Isqrt \<guillemotleft>a\<guillemotright> (&r)
vrt \<guillemotleft>measure (nat o (Rep_uexpr ((\<guillemotleft>a\<guillemotright> + 1) - &r)))\<guillemotright>
while\<^sub>\<bottom> (&r * &r \<le>\<^sub>u \<guillemotleft>a\<guillemotright>)
do
assign_r r (&r + 1)
od;;
assign_r r (&r - 1)
\<lbrace>0\<le>\<^sub>u &r \<and> uop power2 (&r) \<le>\<^sub>u \<guillemotleft>a\<guillemotright> \<and> \<guillemotleft>a\<guillemotright> <\<^sub>u uop power2 (&r + 1)\<rbrace>\<^sub>u"
apply (insert assms)
supply Isqrt_aux [simp]
apply (vcg sp)
done
lemma sqrt_prog_correct_wp_rel:
assumes "vwb_lens r"
shows
"\<lbrace>0 \<le>\<^sub>u \<guillemotleft>a\<guillemotright>\<rbrace>
assign_r r 1 ;;
invr 0\<le>\<^sub>u \<guillemotleft>a\<guillemotright> \<and> bop Isqrt \<guillemotleft>a\<guillemotright> (&r)
vrt \<guillemotleft>measure (nat o (Rep_uexpr ((\<guillemotleft>a\<guillemotright> + 1) - &r)))\<guillemotright>
while\<^sub>\<bottom> (&r * &r \<le>\<^sub>u \<guillemotleft>a\<guillemotright>)
do
assign_r r (&r + 1)
od;;
assign_r r (&r - 1)
\<lbrace>0\<le>\<^sub>u &r \<and> uop power2 (&r) \<le>\<^sub>u \<guillemotleft>a\<guillemotright> \<and> \<guillemotleft>a\<guillemotright> <\<^sub>u uop power2 (&r + 1)\<rbrace>\<^sub>u"
apply (insert assms)
supply Isqrt_aux [simp]
apply (vcg wp)
done
subsection \<open>gcd\<close>
text \<open>In the followin we illustrate the effect of domain theory based approach.
Namely, in the lemma gcd_correct we use the hard coded max function
@{term "(trop If (&r >\<^sub>u &x) (&r) (&x))"}. This leads to long proof.
However in gcd_correct' we use the max function from HOL library.
This leads to a shorter proof since max library contains the necessary lemmas that simplify
the reasoning.\<close>
lemma gcd_correct_sp_H1_H3:
assumes "lens_indep_all [r, x]"
assumes "vwb_lens r" "vwb_lens x"
shows
"\<lbrace>&r =\<^sub>u \<guillemotleft>a\<guillemotright> \<and> &x =\<^sub>u \<guillemotleft>b\<guillemotright> \<and> \<guillemotleft>b\<guillemotright> >\<^sub>u 0 \<and> \<guillemotleft>a\<guillemotright>>\<^sub>u 0\<rbrace>
INVAR &r >\<^sub>u0 \<and> &x >\<^sub>u 0 \<and> bop gcd (&r) (&x) =\<^sub>u bop gcd \<guillemotleft>a\<guillemotright> \<guillemotleft>b\<guillemotright>
VRT \<guillemotleft>measure (Rep_uexpr (trop If (&r >\<^sub>u &x) (&r) (&x)))\<guillemotright>
WHILE \<not>(&r =\<^sub>u &x)
DO
IF &r >\<^sub>u &x
THEN r :== (&r - &x)
ELSE x :== (&x - &r)
FI
OD
\<lbrace>&r =\<^sub>u &x \<and> &r =\<^sub>u bop gcd \<guillemotleft>a\<guillemotright> \<guillemotleft>b\<guillemotright>\<rbrace>\<^sub>P"
apply (insert assms)
apply (vcg sp)
apply (auto simp: gcd_diff1_nat)
apply (metis gcd.commute gcd_diff1_nat not_le)+
done
lemma gcd_correct'_sp_H1_H3:
assumes "lens_indep_all [r, x]"
assumes "vwb_lens r" "vwb_lens x"
shows
"\<lbrace>&r =\<^sub>u \<guillemotleft>a\<guillemotright> \<and> &x =\<^sub>u \<guillemotleft>b\<guillemotright> \<and> \<guillemotleft>b\<guillemotright>>\<^sub>u 0 \<and> \<guillemotleft>a\<guillemotright>>\<^sub>u 0\<rbrace>
INVAR &r >\<^sub>u0 \<and> &x >\<^sub>u 0 \<and> bop gcd (&r) (&x) =\<^sub>u bop gcd \<guillemotleft>a\<guillemotright> \<guillemotleft>b\<guillemotright>
VRT \<guillemotleft>measure (Rep_uexpr (bop max (&r) (&x)))\<guillemotright>
WHILE \<not>(&r =\<^sub>u &x)
DO
IF &r >\<^sub>u &x
THEN r :== (&r - &x)
ELSE x :== (&x - &r)
FI
OD
\<lbrace>&r =\<^sub>u &x \<and> &r =\<^sub>u bop gcd \<guillemotleft>a\<guillemotright> \<guillemotleft>b\<guillemotright>\<rbrace>\<^sub>P"
apply (insert assms)
apply (vcg sp)
apply (simp add: gcd_diff1_nat)
apply (metis gcd.commute gcd_diff1_nat not_le)
done
lemma gcd_correct'_wp_H1_H3:
assumes "lens_indep_all [r, x]"
assumes "vwb_lens r" "vwb_lens x"
shows
"\<lbrace>&r =\<^sub>u \<guillemotleft>a\<guillemotright> \<and> &x =\<^sub>u \<guillemotleft>b\<guillemotright> \<and> \<guillemotleft>b\<guillemotright>>\<^sub>u 0 \<and> \<guillemotleft>a\<guillemotright>>\<^sub>u 0\<rbrace>
INVAR &r >\<^sub>u0 \<and> &x >\<^sub>u 0 \<and> bop gcd (&r) (&x) =\<^sub>u bop gcd \<guillemotleft>a\<guillemotright> \<guillemotleft>b\<guillemotright>
VRT \<guillemotleft>measure (Rep_uexpr (bop max (&r) (&x)))\<guillemotright>
WHILE \<not>(&r =\<^sub>u &x)
DO
IF &r >\<^sub>u &x
THEN r :== (&r - &x)
ELSE x :== (&x - &r)
FI
OD
\<lbrace>&r =\<^sub>u &x \<and> &r =\<^sub>u bop gcd \<guillemotleft>a\<guillemotright> \<guillemotleft>b\<guillemotright>\<rbrace>\<^sub>P"
apply (insert assms)
apply (vcg wp)
using gcd_diff1_nat apply auto[1]
apply (metis gcd.commute gcd_diff1_nat not_less)
apply (metis diff_is_0_eq gcd.commute gcd_diff1_nat not_le_minus)
apply (metis gcd.commute gcd_diff1_nat max.strict_order_iff max_def)
apply (simp add: gcd_diff1_nat)
done
lemma gcd_correct'_sp_rel:
assumes "lens_indep_all [r, x]"
assumes "vwb_lens r" "vwb_lens x"
shows
"\<lbrace>&r =\<^sub>u \<guillemotleft>a\<guillemotright> \<and> &x =\<^sub>u \<guillemotleft>b\<guillemotright> \<and> \<guillemotleft>b\<guillemotright>>\<^sub>u 0 \<and> \<guillemotleft>a\<guillemotright>>\<^sub>u 0\<rbrace>
invr &r >\<^sub>u0 \<and> &x >\<^sub>u 0 \<and> bop gcd (&r) (&x) =\<^sub>u bop gcd \<guillemotleft>a\<guillemotright> \<guillemotleft>b\<guillemotright>
vrt \<guillemotleft>measure (Rep_uexpr (bop max (&r) (&x)))\<guillemotright>
while\<^sub>\<bottom> \<not>(&r =\<^sub>u &x)
do
bif &r >\<^sub>u &x
then assign_r r ((&r - &x))
else assign_r x (&x - &r)
eif
od
\<lbrace>&r =\<^sub>u &x \<and> &r =\<^sub>u bop gcd \<guillemotleft>a\<guillemotright> \<guillemotleft>b\<guillemotright>\<rbrace>\<^sub>u"
apply (insert assms)
apply (vcg sp)
apply (simp add: gcd_diff1_nat)
apply (metis gcd.commute gcd_diff1_nat not_le)
done
lemma gcd_correct'_wp_rel:
assumes "lens_indep_all [r, x]"
assumes "vwb_lens r" "vwb_lens x"
shows
"\<lbrace>&r =\<^sub>u \<guillemotleft>a\<guillemotright> \<and> &x =\<^sub>u \<guillemotleft>b\<guillemotright> \<and> \<guillemotleft>b\<guillemotright>>\<^sub>u 0 \<and> \<guillemotleft>a\<guillemotright>>\<^sub>u 0\<rbrace>
invr &r >\<^sub>u0 \<and> &x >\<^sub>u 0 \<and> bop gcd (&r) (&x) =\<^sub>u bop gcd \<guillemotleft>a\<guillemotright> \<guillemotleft>b\<guillemotright>
vrt \<guillemotleft>measure (Rep_uexpr (bop max (&r) (&x)))\<guillemotright>
while\<^sub>\<bottom> \<not>(&r =\<^sub>u &x)
do
bif &r >\<^sub>u &x
then assign_r r ((&r - &x))
else assign_r x (&x - &r)
eif
od
\<lbrace>&r =\<^sub>u &x \<and> &r =\<^sub>u bop gcd \<guillemotleft>a\<guillemotright> \<guillemotleft>b\<guillemotright>\<rbrace>\<^sub>u"
apply (insert assms)
apply (vcg wp)
apply (simp_all add: gcd_diff1_nat)
apply (metis gcd.commute gcd_diff1_nat not_le)
apply (metis diff_is_0_eq gcd.commute gcd_diff1_nat not_le_minus)
apply (metis add_diff_inverse_nat gcd_add2 max.strict_coboundedI1)
done
section \<open>Arrays\<close>
subsection \<open>Array Max program: one-variable loop\<close>
lemma max_program_correct_sp_H1_H3:
assumes "r \<bowtie> i"
assumes "vwb_lens i" "vwb_lens r"
shows
"\<lbrace>uop length \<guillemotleft>a\<guillemotright> \<ge>\<^sub>u1 \<and> &i =\<^sub>u 1 \<and> &r =\<^sub>u bop nth \<guillemotleft>a:: int list\<guillemotright> 0\<rbrace>
INVAR 0 <\<^sub>u &i \<and> &i \<le>\<^sub>u uop length \<guillemotleft>a\<guillemotright> \<and> &r =\<^sub>u uop Max (uop set (bop take (&i) \<guillemotleft>a\<guillemotright>))
VRT \<guillemotleft>measure (Rep_uexpr (uop length \<guillemotleft>a\<guillemotright> - (&i)))\<guillemotright>
WHILE \<not>(&i =\<^sub>u uop length \<guillemotleft>a\<guillemotright>)
DO
IF &r <\<^sub>u bop nth \<guillemotleft>a\<guillemotright> (&i)
THEN r :== bop nth \<guillemotleft>a\<guillemotright> (&i)
ELSE SKIP
FI;
i :== (&i + 1)
OD
\<lbrace>&r =\<^sub>uuop Max (uop set \<guillemotleft>a\<guillemotright>)\<rbrace>\<^sub>P"
apply (insert assms)
apply (vcg sp)
subgoal for _
by (cases a; auto)
subgoal for _ i
apply (clarsimp simp: take_Suc_conv_app_nth)
apply (subst Max_insert) by auto
subgoal for _ i
apply (clarsimp simp: take_Suc_conv_app_nth)
apply (subst Max_insert) by auto
done
lemma max_program_correct_wp_H1_H3:
assumes "r \<bowtie> i"
assumes "vwb_lens i" "vwb_lens r"
shows
"\<lbrace>uop length \<guillemotleft>a\<guillemotright> \<ge>\<^sub>u1 \<and> &i =\<^sub>u 1 \<and> &r =\<^sub>u bop nth \<guillemotleft>a:: int list\<guillemotright> 0\<rbrace>
INVAR 0 <\<^sub>u &i \<and> &i \<le>\<^sub>u uop length \<guillemotleft>a\<guillemotright> \<and> &r =\<^sub>u uop Max (uop set (bop take (&i) \<guillemotleft>a\<guillemotright>))
VRT \<guillemotleft>measure (Rep_uexpr (uop length \<guillemotleft>a\<guillemotright> - (&i)))\<guillemotright>
WHILE \<not>(&i =\<^sub>u uop length \<guillemotleft>a\<guillemotright>)
DO
IF &r <\<^sub>u bop nth \<guillemotleft>a\<guillemotright> (&i)
THEN r :== bop nth \<guillemotleft>a\<guillemotright> (&i)
ELSE SKIP
FI;
i :== (&i + 1)
OD
\<lbrace>&r =\<^sub>uuop Max (uop set \<guillemotleft>a\<guillemotright>)\<rbrace>\<^sub>P"
apply (insert assms)
apply (vcg wp)
subgoal for _
by (cases a; auto)
subgoal for _ i'
apply (simp add: take_Suc_conv_app_nth )
apply (subst (asm) Max_insert)
apply auto
done
subgoal for _ i'
apply (clarsimp simp: take_Suc_conv_app_nth)
apply (cases a, auto)
done
subgoal for _ i
by (clarsimp simp: take_Suc_conv_app_nth)
subgoal for _ i
by (clarsimp simp: take_Suc_conv_app_nth)
done
lemma max_program_correct_sp_rel:
assumes "r \<bowtie> i"
assumes "vwb_lens i" "vwb_lens r"
shows
"\<lbrace>uop length \<guillemotleft>a\<guillemotright> \<ge>\<^sub>u1 \<and> &i =\<^sub>u 1 \<and> &r =\<^sub>u bop nth \<guillemotleft>a:: int list\<guillemotright> 0\<rbrace>
invr 0 <\<^sub>u &i \<and> &i \<le>\<^sub>u uop length \<guillemotleft>a\<guillemotright> \<and> &r =\<^sub>u uop Max (uop set (bop take (&i) \<guillemotleft>a\<guillemotright>))
vrt \<guillemotleft>measure (Rep_uexpr (uop length \<guillemotleft>a\<guillemotright> - (&i)))\<guillemotright>
while\<^sub>\<bottom> \<not>(&i =\<^sub>u uop length \<guillemotleft>a\<guillemotright>)
do
bif &r <\<^sub>u bop nth \<guillemotleft>a\<guillemotright> (&i)
then assign_r r (bop nth \<guillemotleft>a\<guillemotright> (&i))
else SKIP\<^sub>r
eif;;
assign_r i (&i + 1)
od
\<lbrace>&r =\<^sub>uuop Max (uop set \<guillemotleft>a\<guillemotright>)\<rbrace>\<^sub>u"
apply (insert assms)
apply (vcg sp)
subgoal for _
by (cases a; auto)
subgoal for _ i
apply (clarsimp simp: take_Suc_conv_app_nth)
apply (subst Max_insert) by auto
subgoal for _ i
apply (clarsimp simp: take_Suc_conv_app_nth)
apply (subst Max_insert) by auto
done
lemma max_program_correct_wp_rel:
assumes "r \<bowtie> i"
assumes "vwb_lens i" "vwb_lens r"
shows
"\<lbrace>uop length \<guillemotleft>a\<guillemotright> \<ge>\<^sub>u1 \<and> &i =\<^sub>u 1 \<and> &r =\<^sub>u bop nth \<guillemotleft>a:: int list\<guillemotright> 0\<rbrace>
invr 0 <\<^sub>u &i \<and> &i \<le>\<^sub>u uop length \<guillemotleft>a\<guillemotright> \<and> &r =\<^sub>u uop Max (uop set (bop take (&i) \<guillemotleft>a\<guillemotright>))
vrt \<guillemotleft>measure (Rep_uexpr (uop length \<guillemotleft>a\<guillemotright> - (&i)))\<guillemotright>
while\<^sub>\<bottom> \<not>(&i =\<^sub>u uop length \<guillemotleft>a\<guillemotright>)
do
bif &r <\<^sub>u bop nth \<guillemotleft>a\<guillemotright> (&i)
then assign_r r (bop nth \<guillemotleft>a\<guillemotright> (&i))
else SKIP\<^sub>r
eif;;
assign_r i (&i + 1)
od
\<lbrace>&r =\<^sub>uuop Max (uop set \<guillemotleft>a\<guillemotright>)\<rbrace>\<^sub>u"
apply (insert assms)
apply (vcg wp)
subgoal for _
by (cases a; auto)
subgoal for _ i'
apply (simp add: take_Suc_conv_app_nth )
apply (subst (asm) Max_insert)
apply auto
done
subgoal for _ i'
apply (clarsimp simp: take_Suc_conv_app_nth)
apply (cases a, auto)
done
subgoal for _ i
by (clarsimp simp: take_Suc_conv_app_nth)
subgoal for _ i
by (clarsimp simp: take_Suc_conv_app_nth)
done
find_theorems name: "rep_eq" "(Rep_uexpr ?e = ?t)" (*This what pred_simp uses...*)
(*
TODO List for next iteration:
*)
lemma demo_VAR_BIND:
assumes "lens_indep_all [r, x]"
assumes "vwb_lens r" "vwb_lens x"
assumes VAR_BIND: "(get\<^bsub>r\<^esub> ) = r\<^sub>0 \<and> (get\<^bsub>x\<^esub> ) = x\<^sub>0"
shows
"\<lbrace>&r =\<^sub>u \<guillemotleft>a\<guillemotright> \<and> &x =\<^sub>u \<guillemotleft>b\<guillemotright> \<and> \<guillemotleft>b\<guillemotright>>\<^sub>u 0 \<and> \<guillemotleft>a\<guillemotright>>\<^sub>u 0\<rbrace>
INVAR &r >\<^sub>u0 \<and> &x >\<^sub>u 0 \<and> bop gcd (&r) (&x) =\<^sub>u bop gcd \<guillemotleft>a\<guillemotright> \<guillemotleft>b\<guillemotright>
VRT \<guillemotleft>measure (Rep_uexpr (bop max (&r) (&x)))\<guillemotright>
WHILE \<not>(&r =\<^sub>u &x)
DO
IF &r >\<^sub>u &x
THEN r :== (&r - &x)
ELSE x :== (&x - &r)
FI
OD
\<lbrace>&r =\<^sub>u &x \<and> &r =\<^sub>u bop gcd \<guillemotleft>a\<guillemotright> \<guillemotleft>b\<guillemotright>\<rbrace>\<^sub>P"
apply (insert assms(1) assms(2))
apply(vcg sp)
apply (auto simp only: VAR_BIND)
apply (simp add: gcd_diff1_nat)
apply (metis gcd.commute gcd_diff1_nat not_le)
done
end
|
/-
Copyright (c) 2018 Kenny Lau. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Author: Kenny Lau, Joey van Langen, Casper Putz
-/
import Mathlib.PrePort
import Mathlib.Lean3Lib.init.default
import Mathlib.data.fintype.basic
import Mathlib.data.nat.choose.default
import Mathlib.data.int.modeq
import Mathlib.algebra.module.basic
import Mathlib.algebra.iterate_hom
import Mathlib.group_theory.order_of_element
import Mathlib.algebra.group.type_tags
import Mathlib.PostPort
universes u l u_1 u_2 v
namespace Mathlib
/-!
# Characteristic of semirings
-/
/-- The generator of the kernel of the unique homomorphism ℕ → α for a semiring α -/
class char_p (α : Type u) [semiring α] (p : ℕ)
where
cast_eq_zero_iff : ∀ (x : ℕ), ↑x = 0 ↔ p ∣ x
theorem char_p.cast_eq_zero (α : Type u) [semiring α] (p : ℕ) [char_p α p] : ↑p = 0 :=
iff.mpr (char_p.cast_eq_zero_iff α p p) (dvd_refl p)
@[simp] theorem char_p.cast_card_eq_zero (R : Type u_1) [ring R] [fintype R] : ↑(fintype.card R) = 0 := sorry
theorem char_p.int_cast_eq_zero_iff (R : Type u) [ring R] (p : ℕ) [char_p R p] (a : ℤ) : ↑a = 0 ↔ ↑p ∣ a := sorry
theorem char_p.int_coe_eq_int_coe_iff (R : Type u_1) [ring R] (p : ℕ) [char_p R p] (a : ℤ) (b : ℤ) : ↑a = ↑b ↔ int.modeq (↑p) a b := sorry
theorem char_p.eq (α : Type u) [semiring α] {p : ℕ} {q : ℕ} (c1 : char_p α p) (c2 : char_p α q) : p = q :=
nat.dvd_antisymm (iff.mp (char_p.cast_eq_zero_iff α p q) (char_p.cast_eq_zero α q))
(iff.mp (char_p.cast_eq_zero_iff α q p) (char_p.cast_eq_zero α p))
protected instance char_p.of_char_zero (α : Type u) [semiring α] [char_zero α] : char_p α 0 :=
char_p.mk
fun (x : ℕ) =>
eq.mpr (id (Eq._oldrec (Eq.refl (↑x = 0 ↔ 0 ∣ x)) (propext zero_dvd_iff)))
(eq.mpr (id (Eq._oldrec (Eq.refl (↑x = 0 ↔ x = 0)) (Eq.symm nat.cast_zero)))
(eq.mpr (id (Eq._oldrec (Eq.refl (↑x = ↑0 ↔ x = 0)) (propext nat.cast_inj))) (iff.refl (x = 0))))
theorem char_p.exists (α : Type u) [semiring α] : ∃ (p : ℕ), char_p α p := sorry
theorem char_p.exists_unique (α : Type u) [semiring α] : exists_unique fun (p : ℕ) => char_p α p := sorry
theorem char_p.congr {R : Type u} [semiring R] {p : ℕ} (q : ℕ) [hq : char_p R q] (h : q = p) : char_p R p :=
h ▸ hq
/-- Noncomputable function that outputs the unique characteristic of a semiring. -/
def ring_char (α : Type u) [semiring α] : ℕ :=
classical.some (char_p.exists_unique α)
namespace ring_char
theorem spec (R : Type u) [semiring R] (x : ℕ) : ↑x = 0 ↔ ring_char R ∣ x := sorry
theorem eq (R : Type u) [semiring R] {p : ℕ} (C : char_p R p) : p = ring_char R :=
and.right (classical.some_spec (char_p.exists_unique R)) p C
protected instance char_p (R : Type u) [semiring R] : char_p R (ring_char R) :=
char_p.mk (spec R)
theorem of_eq {R : Type u} [semiring R] {p : ℕ} (h : ring_char R = p) : char_p R p :=
char_p.congr (ring_char R) h
theorem eq_iff {R : Type u} [semiring R] {p : ℕ} : ring_char R = p ↔ char_p R p :=
{ mp := of_eq, mpr := Eq.symm ∘ eq R }
theorem dvd {R : Type u} [semiring R] {x : ℕ} (hx : ↑x = 0) : ring_char R ∣ x :=
iff.mp (spec R x) hx
end ring_char
theorem add_pow_char_of_commute (R : Type u) [semiring R] {p : ℕ} [fact (nat.prime p)] [char_p R p] (x : R) (y : R) (h : commute x y) : (x + y) ^ p = x ^ p + y ^ p := sorry
theorem add_pow_char_pow_of_commute (R : Type u) [semiring R] {p : ℕ} [fact (nat.prime p)] [char_p R p] {n : ℕ} (x : R) (y : R) (h : commute x y) : (x + y) ^ p ^ n = x ^ p ^ n + y ^ p ^ n := sorry
theorem sub_pow_char_of_commute (R : Type u) [ring R] {p : ℕ} [fact (nat.prime p)] [char_p R p] (x : R) (y : R) (h : commute x y) : (x - y) ^ p = x ^ p - y ^ p := sorry
theorem sub_pow_char_pow_of_commute (R : Type u) [ring R] {p : ℕ} [fact (nat.prime p)] [char_p R p] {n : ℕ} (x : R) (y : R) (h : commute x y) : (x - y) ^ p ^ n = x ^ p ^ n - y ^ p ^ n := sorry
theorem add_pow_char (α : Type u) [comm_semiring α] {p : ℕ} [fact (nat.prime p)] [char_p α p] (x : α) (y : α) : (x + y) ^ p = x ^ p + y ^ p :=
add_pow_char_of_commute α x y (commute.all x y)
theorem add_pow_char_pow (R : Type u) [comm_semiring R] {p : ℕ} [fact (nat.prime p)] [char_p R p] {n : ℕ} (x : R) (y : R) : (x + y) ^ p ^ n = x ^ p ^ n + y ^ p ^ n :=
add_pow_char_pow_of_commute R x y (commute.all x y)
theorem sub_pow_char (α : Type u) [comm_ring α] {p : ℕ} [fact (nat.prime p)] [char_p α p] (x : α) (y : α) : (x - y) ^ p = x ^ p - y ^ p :=
sub_pow_char_of_commute α x y (commute.all x y)
theorem sub_pow_char_pow (R : Type u) [comm_ring R] {p : ℕ} [fact (nat.prime p)] [char_p R p] {n : ℕ} (x : R) (y : R) : (x - y) ^ p ^ n = x ^ p ^ n - y ^ p ^ n :=
sub_pow_char_pow_of_commute R x y (commute.all x y)
theorem eq_iff_modeq_int (R : Type u_1) [ring R] (p : ℕ) [char_p R p] (a : ℤ) (b : ℤ) : ↑a = ↑b ↔ int.modeq (↑p) a b := sorry
theorem char_p.neg_one_ne_one (R : Type u_1) [ring R] (p : ℕ) [char_p R p] [fact (bit0 1 < p)] : -1 ≠ 1 := sorry
theorem ring_hom.char_p_iff_char_p {K : Type u_1} {L : Type u_2} [field K] [field L] (f : K →+* L) (p : ℕ) : char_p K p ↔ char_p L p := sorry
/-- The frobenius map that sends x to x^p -/
def frobenius (R : Type u) [comm_semiring R] (p : ℕ) [fact (nat.prime p)] [char_p R p] : R →+* R :=
ring_hom.mk (fun (x : R) => x ^ p) sorry sorry sorry (add_pow_char R)
theorem frobenius_def {R : Type u} [comm_semiring R] (p : ℕ) [fact (nat.prime p)] [char_p R p] (x : R) : coe_fn (frobenius R p) x = x ^ p :=
rfl
theorem iterate_frobenius {R : Type u} [comm_semiring R] (p : ℕ) [fact (nat.prime p)] [char_p R p] (x : R) (n : ℕ) : nat.iterate (⇑(frobenius R p)) n x = x ^ p ^ n := sorry
theorem frobenius_mul {R : Type u} [comm_semiring R] (p : ℕ) [fact (nat.prime p)] [char_p R p] (x : R) (y : R) : coe_fn (frobenius R p) (x * y) = coe_fn (frobenius R p) x * coe_fn (frobenius R p) y :=
ring_hom.map_mul (frobenius R p) x y
theorem frobenius_one {R : Type u} [comm_semiring R] (p : ℕ) [fact (nat.prime p)] [char_p R p] : coe_fn (frobenius R p) 1 = 1 :=
one_pow p
theorem monoid_hom.map_frobenius {R : Type u} [comm_semiring R] {S : Type v} [comm_semiring S] (f : R →* S) (p : ℕ) [fact (nat.prime p)] [char_p R p] [char_p S p] (x : R) : coe_fn f (coe_fn (frobenius R p) x) = coe_fn (frobenius S p) (coe_fn f x) :=
monoid_hom.map_pow f x p
theorem ring_hom.map_frobenius {R : Type u} [comm_semiring R] {S : Type v} [comm_semiring S] (g : R →+* S) (p : ℕ) [fact (nat.prime p)] [char_p R p] [char_p S p] (x : R) : coe_fn g (coe_fn (frobenius R p) x) = coe_fn (frobenius S p) (coe_fn g x) :=
ring_hom.map_pow g x p
theorem monoid_hom.map_iterate_frobenius {R : Type u} [comm_semiring R] {S : Type v} [comm_semiring S] (f : R →* S) (p : ℕ) [fact (nat.prime p)] [char_p R p] [char_p S p] (x : R) (n : ℕ) : coe_fn f (nat.iterate (⇑(frobenius R p)) n x) = nat.iterate (⇑(frobenius S p)) n (coe_fn f x) :=
function.semiconj.iterate_right (monoid_hom.map_frobenius f p) n x
theorem ring_hom.map_iterate_frobenius {R : Type u} [comm_semiring R] {S : Type v} [comm_semiring S] (g : R →+* S) (p : ℕ) [fact (nat.prime p)] [char_p R p] [char_p S p] (x : R) (n : ℕ) : coe_fn g (nat.iterate (⇑(frobenius R p)) n x) = nat.iterate (⇑(frobenius S p)) n (coe_fn g x) :=
monoid_hom.map_iterate_frobenius (ring_hom.to_monoid_hom g) p x n
theorem monoid_hom.iterate_map_frobenius {R : Type u} [comm_semiring R] (x : R) (f : R →* R) (p : ℕ) [fact (nat.prime p)] [char_p R p] (n : ℕ) : nat.iterate (⇑f) n (coe_fn (frobenius R p) x) = coe_fn (frobenius R p) (nat.iterate (⇑f) n x) :=
monoid_hom.iterate_map_pow f x n p
theorem ring_hom.iterate_map_frobenius {R : Type u} [comm_semiring R] (x : R) (f : R →+* R) (p : ℕ) [fact (nat.prime p)] [char_p R p] (n : ℕ) : nat.iterate (⇑f) n (coe_fn (frobenius R p) x) = coe_fn (frobenius R p) (nat.iterate (⇑f) n x) :=
ring_hom.iterate_map_pow f x n p
theorem frobenius_zero (R : Type u) [comm_semiring R] (p : ℕ) [fact (nat.prime p)] [char_p R p] : coe_fn (frobenius R p) 0 = 0 :=
ring_hom.map_zero (frobenius R p)
theorem frobenius_add (R : Type u) [comm_semiring R] (p : ℕ) [fact (nat.prime p)] [char_p R p] (x : R) (y : R) : coe_fn (frobenius R p) (x + y) = coe_fn (frobenius R p) x + coe_fn (frobenius R p) y :=
ring_hom.map_add (frobenius R p) x y
theorem frobenius_nat_cast (R : Type u) [comm_semiring R] (p : ℕ) [fact (nat.prime p)] [char_p R p] (n : ℕ) : coe_fn (frobenius R p) ↑n = ↑n :=
ring_hom.map_nat_cast (frobenius R p) n
theorem frobenius_neg (R : Type u) [comm_ring R] (p : ℕ) [fact (nat.prime p)] [char_p R p] (x : R) : coe_fn (frobenius R p) (-x) = -coe_fn (frobenius R p) x :=
ring_hom.map_neg (frobenius R p) x
theorem frobenius_sub (R : Type u) [comm_ring R] (p : ℕ) [fact (nat.prime p)] [char_p R p] (x : R) (y : R) : coe_fn (frobenius R p) (x - y) = coe_fn (frobenius R p) x - coe_fn (frobenius R p) y :=
ring_hom.map_sub (frobenius R p) x y
theorem frobenius_inj (α : Type u) [comm_ring α] [no_zero_divisors α] (p : ℕ) [fact (nat.prime p)] [char_p α p] : function.injective ⇑(frobenius α p) := sorry
namespace char_p
theorem char_p_to_char_zero (α : Type u) [ring α] [char_p α 0] : char_zero α :=
char_zero_of_inj_zero fun (n : ℕ) (h0 : ↑n = 0) => eq_zero_of_zero_dvd (iff.mp (cast_eq_zero_iff α 0 n) h0)
theorem cast_eq_mod (α : Type u) [ring α] (p : ℕ) [char_p α p] (k : ℕ) : ↑k = ↑(k % p) := sorry
theorem char_ne_zero_of_fintype (α : Type u) [ring α] (p : ℕ) [hc : char_p α p] [fintype α] : p ≠ 0 :=
fun (h : p = 0) =>
(fun (this : char_zero α) => absurd nat.cast_injective (not_injective_infinite_fintype coe)) (char_p_to_char_zero α)
theorem char_ne_one (α : Type u) [integral_domain α] (p : ℕ) [hc : char_p α p] : p ≠ 1 := sorry
theorem char_is_prime_of_two_le (α : Type u) [integral_domain α] (p : ℕ) [hc : char_p α p] (hp : bit0 1 ≤ p) : nat.prime p := sorry
theorem char_is_prime_or_zero (α : Type u) [integral_domain α] (p : ℕ) [hc : char_p α p] : nat.prime p ∨ p = 0 := sorry
theorem char_is_prime_of_pos (α : Type u) [integral_domain α] (p : ℕ) [h : fact (0 < p)] [char_p α p] : fact (nat.prime p) :=
or.resolve_right (char_is_prime_or_zero α p) (iff.mp pos_iff_ne_zero h)
theorem char_is_prime (α : Type u) [integral_domain α] [fintype α] (p : ℕ) [char_p α p] : nat.prime p :=
or.resolve_right (char_is_prime_or_zero α p) (char_ne_zero_of_fintype α p)
protected instance subsingleton {R : Type u_1} [semiring R] [char_p R 1] : subsingleton R :=
subsingleton.intro
((fun (this : ∀ (r : R), r = 0) (a b : R) =>
(fun (this : a = b) => this)
(eq.mpr (id (Eq._oldrec (Eq.refl (a = b)) (this a)))
(eq.mpr (id (Eq._oldrec (Eq.refl (0 = b)) (this b))) (Eq.refl 0))))
fun (r : R) =>
Eq.trans
(Eq.trans
(Eq.trans (eq.mpr (id (Eq._oldrec (Eq.refl (r = 1 * r)) (one_mul r))) (Eq.refl r))
(eq.mpr (id (Eq._oldrec (Eq.refl (1 * r = ↑1 * r)) nat.cast_one)) (Eq.refl (1 * r))))
(eq.mpr (id (Eq._oldrec (Eq.refl (↑1 * r = 0 * r)) (cast_eq_zero R 1))) (Eq.refl (0 * r))))
(eq.mpr (id (Eq._oldrec (Eq.refl (0 * r = 0)) (zero_mul r))) (Eq.refl 0)))
theorem false_of_nontrivial_of_char_one {R : Type u_1} [semiring R] [nontrivial R] [char_p R 1] : False :=
false_of_nontrivial_of_subsingleton R
theorem ring_char_ne_one {R : Type u_1} [semiring R] [nontrivial R] : ring_char R ≠ 1 := sorry
theorem nontrivial_of_char_ne_one {v : ℕ} (hv : v ≠ 1) {R : Type u_1} [semiring R] [hr : char_p R v] : nontrivial R := sorry
end char_p
theorem char_p_of_ne_zero (n : ℕ) (R : Type u_1) [comm_ring R] [fintype R] (hn : fintype.card R = n) (hR : ∀ (i : ℕ), i < n → ↑i = 0 → i = 0) : char_p R n := sorry
theorem char_p_of_prime_pow_injective (R : Type u_1) [comm_ring R] [fintype R] (p : ℕ) [hp : fact (nat.prime p)] (n : ℕ) (hn : fintype.card R = p ^ n) (hR : ∀ (i : ℕ), i ≤ n → ↑p ^ i = 0 → i = n) : char_p R (p ^ n) := sorry
|
module IdrisWeb.Session.SessionUtils
import IdrisWeb.CGI.CgiTypes
import IdrisWeb.CGI.Cgi
import IdrisWeb.Session.Session
import IdrisWeb.DB.SQLite.SQLiteNew
import Effects
%access public
-- Key for the session id in the cookie
public
SESSION_VAR : String
SESSION_VAR = "session_id"
getOrCreateSession : EffM IO [CGI (InitialisedCGI TaskRunning), SESSION (SessionRes SessionUninitialised)]
[CGI (InitialisedCGI TaskRunning), SESSION (SessionRes SessionInitialised)]
(Maybe (SessionID, SessionData))
getOrCreateSession = do
-- Firstly grab the session ID from the cookies, if it exists
s_var <- lift' (queryCookieVar SESSION_VAR)
case s_var of
-- If it does exist, then attempt to load the session
Just s_id => do res <- lift' (loadSession s_id)
case res of
Just res' => Effects.pure $ Just (s_id, res') -- (s_id, res')
-- TODO: This should create a new session
Nothing => Effects.pure $ Nothing
-- If it doesn't, create a new, empty session
Nothing => do res <- lift (Drop (Keep (SubNil))) (createSession [])
case res of
Just s_id' => Effects.pure $ Just (s_id', [])
Nothing => Effects.pure $ Nothing
setSessionCookie : Eff IO [CGI (InitialisedCGI TaskRunning), SESSION (SessionRes SessionInitialised)] Bool
setSessionCookie = do s_id <- lift (Drop (Keep (SubNil))) getSessionID
case s_id of
Just s_id => do lift (Keep (Drop (SubNil))) (setCookie SESSION_VAR s_id)
pure True
Nothing => pure False
total
updateVar : String -> SessionDataType -> SessionData -> SessionData
updateVar new_key new_val [] = [(new_key, new_val)]
updateVar new_key new_val ((key, val)::xs) = if (key == new_key) then ((key, new_val):: xs)
else ((key, val) :: (updateVar new_key new_val xs))
-- Takes in two functions: one to execute if there is a valid, authenticated
-- session cookie, and one to execute if there isn't.
withSession : (SessionData ->
EffM IO [CGI (InitialisedCGI TaskRunning),
SESSION (SessionRes SessionInitialised),
SQLITE ()]
[CGI (InitialisedCGI TaskRunning),
SESSION (SessionRes SessionUninitialised),
SQLITE ()] ()) ->
EffM IO [CGI (InitialisedCGI TaskRunning),
SESSION (SessionRes SessionInitialised),
SQLITE ()]
[CGI (InitialisedCGI TaskRunning),
SESSION (SessionRes SessionUninitialised),
SQLITE ()] () ->
Eff IO [CGI (InitialisedCGI TaskRunning),
SESSION (SessionRes SessionUninitialised),
SQLITE ()] ()
withSession is_auth_fn not_auth_fn = do
s_var <- queryCookieVar SESSION_VAR
case s_var of
-- If it does exist, then attempt to load the session
Just s_id => do res <- loadSession s_id--(lift (Drop (Keep (SubNil))) (loadSession s_id))
case res of
-- If we've got a valid session, execute the user-specified
-- function with the gathered session data
Just res' => do is_auth_fn res'
pure ()
-- If not, execute the specified failure function
Nothing => do not_auth_fn
pure ()
-- If there's no session variable, execute the failure function (somehow)
-- HACK: this loadSession won't succeed, yet will transfer into the other state so
-- that the not_auth_fn can be run. Probably better ways of doing it
Nothing => do loadSession ""
not_auth_fn
pure ()
|
lemma measure_lborel_box_eq: "measure lborel (box l u) = (if \<forall>b\<in>Basis. l \<bullet> b \<le> u \<bullet> b then \<Prod>b\<in>Basis. (u - l) \<bullet> b else 0)" |
[STATEMENT]
lemma unity_root_sum_0_left [simp]: "unity_root_sum 0 n = 0" and
unity_root_sum_0_right [simp]: "k > 0 \<Longrightarrow> unity_root_sum k 0 = k"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. unity_root_sum 0 n = 0 &&& (0 < k \<Longrightarrow> unity_root_sum k 0 = of_nat k)
[PROOF STEP]
unfolding unity_root_sum_def
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. (\<Sum>m<0. unity_root 0 (n * int m)) = 0 &&& (0 < k \<Longrightarrow> (\<Sum>m<k. unity_root k (0 * int m)) = of_nat k)
[PROOF STEP]
by simp_all |
[STATEMENT]
lemma SOV_nNot[simp]: "SOV (nNot \<phi>) = SOV (FNot \<phi>)"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. SOV (nNot \<phi>) = SOV (FNot \<phi>)
[PROOF STEP]
by (induct \<phi> rule: nNot.induct) auto |
import tactic
import measure_theory.interval_integral
import measure_theory.lebesgue_measure
import measure_theory.set_integral
import analysis.calculus.deriv
import analysis.special_functions.exp_log
import analysis.special_functions.trigonometric
import data.finset
noncomputable theory
open_locale classical
open_locale big_operators
open measure_theory
open interval_integral
open set
open real
namespace tactic.interactive
meta def show_continuous := `[
all_goals {try {simp}},
apply_rules [
continuous_on.neg,
continuous.continuous_on,
differentiable.continuous,
differentiable_on.continuous_on,
continuous.Icc_extend,
continuous_on.mono,
continuous.neg,
continuous_id,
continuous_sin,
continuous_cos,
continuous_const,
continuous.pow,
continuous.mul,
continuous.smul,
continuous.sub,
continuous.add
] 10,
all_goals {try {norm_num}}
]
meta def show_differentiable := `[
apply_rules [
differentiable.differentiable_on,
differentiable.neg,
differentiable.smul,
differentiable.cos,
differentiable.sin,
differentiable_const,
differentiable_id,
differentiable.mul,
differentiable_fpow
] 10,
all_goals {try {norm_num}}
]
meta def show_nonzero := `[
apply_rules [
mul_ne_zero,
sub_ne_zero_of_ne,
pow_ne_zero,
ne_of_gt,
ne_of_lt
] 10,
all_goals {try {norm_cast}, try {norm_num}}
]
meta def show_pos := `[
apply_rules [
nat.succ_pos,
mul_pos,
div_pos,
inv_pos.mpr,
pow_pos
] 10,
all_goals {try {norm_cast}, try {norm_num}, try {nlinarith}}
]
meta def clear_denoms := `[
try {rw div_eq_div_iff},
try {rw eq_div_iff},
try {symmetry, rw eq_div_iff},
try { ring_exp },
all_goals {show_nonzero}
]
meta def discrete_field := `[
try {ext},
try {field_simp *},
try {clear_denoms},
try {ring_exp},
try {norm_num},
try {linarith}
]
end tactic.interactive
lemma integrable_of_cont {f : ℝ → ℝ} (a b : ℝ) (h : continuous f):
interval_integrable f measure_theory.measure_space.volume a b :=
begin
have hmeas : measurable f := continuous.measurable h,
have hconton : continuous_on f (interval a b) := continuous.continuous_on h,
exact continuous_on.interval_integrable hconton,
end
/- lemma self_mem_ae_restrict
{s : set ℝ} (hs : is_measurable s):
s ∈ (measure.restrict measure_space.volume s).ae :=
begin
rw ae_restrict_eq hs,
simp only [exists_prop, filter.mem_principal_sets, filter.mem_inf_sets],
exact ⟨univ, filter.univ_mem_sets, s, by simp⟩,
end
-/
lemma nonempty_inter_of_nonempty_inter_closure {α : Type*} [topological_space α] {s t : set α}
(hs : is_open s) (h : (s ∩ closure t).nonempty) : (s ∩ t).nonempty :=
let ⟨x, xs, xt⟩ := h in _root_.mem_closure_iff.1 xt s hs xs
lemma real.volume_pos_of_is_open_of_nonempty {s : set ℝ} (h : is_open s) (h' : s.nonempty) :
0 < volume s :=
begin
rcases h' with ⟨x, hx⟩,
have : ∀ᶠ (y : ℝ) in nhds x, y ∈ s := filter.eventually_of_mem (mem_nhds_sets h hx) (λ y H, H),
exact filter.eventually.volume_pos_of_nhds_real this,
end
theorem integral_strictly_pos_of_cont (f : ℝ → ℝ) (a b : ℝ)
(hf : continuous f)
(hab : a < b)
(h : ∀ (x : ℝ), a ≤ x → x ≤ b → 0 ≤ f x)
(hneq: ∃ x, a ≤ x ∧ x ≤ b ∧ 0 < f x) :
0 < ∫ x in a..b, f x :=
begin
rw integral_pos_iff_support_of_nonneg_ae',
{ refine ⟨hab, _⟩,
let s := {b : ℝ | 0 < f b},
have s_open : is_open s := is_open_lt continuous_const hf,
have : (s ∩ closure (Ioo a b)).nonempty,
{ rw closure_Ioo hab,
rcases hneq with ⟨x, ax, xb, fxpos⟩,
have : x ∈ s ∩ Icc a b := ⟨fxpos, ax, xb⟩,
exact nonempty_of_mem this },
have : (s ∩ Ioo a b).nonempty := nonempty_inter_of_nonempty_inter_closure s_open this,
have : 0 < volume (s ∩ Ioo a b) :=
real.volume_pos_of_is_open_of_nonempty (is_open_inter s_open is_open_Ioo) this,
refine this.trans_le (measure_mono (λ x hx, _)),
split,
{ exact ne_of_gt (show 0 < f x, from hx.1) },
{ exact ⟨hx.2.1, hx.2.2.le⟩ } },
{ have : Ioc b a = ∅ := Ioc_eq_empty hab.le,
simp only [this, union_empty],
apply filter.eventually_of_mem _ _,
exact Icc a b,
{
simp,
use univ,
simp,
use Icc a b,
exact ⟨Ioc_subset_Icc_self, rfl.subset⟩,
},
simpa using h },
{ exact integrable_of_cont a b hf }
end
theorem integral_strictly_monotone_of_cont (f g : ℝ → ℝ) (a b : ℝ)
(hf : continuous f) (hg : continuous g) (hab : a < b)
(h : ∀ (x : ℝ), a ≤ x → x ≤ b → f x ≤ g x)
(hneq: ∃ x, a ≤ x ∧ x ≤ b ∧ f x < g x) :
∫ x in a..b, f x < ∫ x in a..b, g x :=
begin
have H : 0 < ∫ x in a..b, (g x - f x),
{
apply integral_strictly_pos_of_cont
(g-f) a b (continuous.sub hg hf) hab,
all_goals {
simp [sub_pos],
assumption,
},
},
rw [←sub_pos, ←interval_integral.integral_sub (integrable_of_cont a b hg) (integrable_of_cont a b hf)],
exact H,
end
lemma int_pos_of_pos {f : ℝ → ℝ} {a b : ℝ} (hab : a < b) (hf : continuous f)
(hnonneg : ∀ x, a ≤ x → x ≤ b → 0 ≤ f x)
(hx : ∃ x, a ≤ x ∧ x ≤ b ∧ 0 < f x) : 0 < ∫ x in a..b, f x :=
begin
rw ← (integral_zero : ∫ x in a..b, (0:ℝ) = 0 ),
exact integral_strictly_monotone_of_cont (λ x, (0:ℝ)) f a b continuous_const hf hab hnonneg hx,
end
lemma int_pos_of_square {f : ℝ → ℝ} {a b} (x : ℝ)
(hab : a < b) (hf : continuous f) (hx : a ≤ x ∧ x ≤ b ∧ f x ≠ 0) :
0 < ∫ x in a..b, (f x)^2 :=
begin
refine int_pos_of_pos hab _
(λ x hx1 hx2, pow_two_nonneg (f x)) ⟨x, ⟨hx.1, hx.2.1, pow_two_pos_of_ne_zero (f x) hx.2.2⟩⟩,
show_continuous,
end
theorem my_integral_smul (f : ℝ → ℝ) (a b c : ℝ) :
∫ x in a..b, c * (f x) = c * ∫ x in a..b, f x :=
begin
suffices : ∫ x in a..b, c • (f x) = c • ∫ x in a..b, f x, by exact this,
rw_mod_cast interval_integral.integral_smul,
end
theorem product_rule {f g : ℝ → ℝ} (hdf : differentiable ℝ f) (hdg : differentiable ℝ g) :
deriv (f * g) = (deriv f) * g + f * deriv g :=
begin
ext,
have hdf0 : differentiable_at ℝ f x := hdf x,
have hdg0 : differentiable_at ℝ g x := hdg x,
apply deriv_mul hdf0 hdg0,
end
theorem differentiable_fpow {f : ℝ → ℝ} {n : ℕ} :
differentiable ℝ f → differentiable ℝ (f^n) :=
begin
induction n with d hd,
{ intro h,
simp only [pow_zero],
exact differentiable_const 1 },
{
intro h,
rw pow_succ,
exact h.mul (hd h),
}
end
theorem power_rule {f : ℝ → ℝ} {n : ℕ} (hfd : differentiable ℝ f):
deriv (f^(n+1)) = ((n : ℝ) + 1) • f^n * (deriv f) :=
begin
induction n with d hd, by norm_num,
have H : f^(d+1) = f^d * f := pow_succ' f d,
calc
deriv (f^(d.succ+1)) = deriv (f^(d.succ) * f) : by {rw pow_succ' f (d.succ),}
... = (deriv (f^(d.succ))) * f + f^(d+1) * (deriv f) :
begin
rw product_rule,
exact differentiable_fpow hfd,
exact hfd,
end
... = ((d:ℝ) + 1) • f^d * deriv f * f + f^d.succ * deriv f : by {rw hd}
... = ((d:ℝ) + 1) • (f^(d.succ)) * deriv f + f^(d.succ) * deriv f :
begin
simp only [add_left_inj, H],
norm_num,
rw mul_assoc,
nth_rewrite_lhs 1 mul_comm,
rw ←mul_assoc,
end
... = ((d.succ:ℝ) + 1) • (f^(d.succ)) * deriv f :
begin
simp only [nat.cast_succ, algebra.smul_mul_assoc],
nth_rewrite 1 add_smul,
rw one_smul,
end
end
lemma pow_fun_def {f : ℝ → ℝ} {n : ℕ} : f^n = λ x, (f x)^n :=
begin
induction n with d hd,
all_goals {
try {rw [pow_succ, hd]},
refl,
}
end
lemma pow_deriv_fun_def {f : ℝ → ℝ} {n : ℕ} : ((n : ℝ) + 1) • f^n * (deriv f) =
λ (x : ℝ), ((n : ℝ) + 1) • ((f x)^n * deriv f x) :=
begin
rw pow_fun_def,
simpa,
end
@[simp] lemma power_rule' {f : ℝ → ℝ} (n : ℕ) (hfd : differentiable ℝ f):
deriv (λ (x : ℝ), (f x)^(n + 1)) = λ (x : ℝ), ((n : ℝ) + 1) • ((f x)^n * deriv f x) :=
begin
rw [←pow_fun_def, ←pow_deriv_fun_def],
exact power_rule hfd,
end
@[simp] lemma power_rule'' (n : ℕ) :
deriv (λ (x : ℝ), x^(n + 1)) = λ (x : ℝ), ((n : ℝ) + 1) • (x^n) :=
begin
have hfd : differentiable ℝ (λ (x:ℝ), (x:ℝ)) := differentiable_id',
have deriv_id_my : deriv (λ x, x) = λ (x : ℝ), (1:ℝ) := deriv_id',
have H := power_rule' n hfd,
rw deriv_id_my at H,
rw H,
simp only [mul_one, algebra.id.smul_eq_mul],
end
theorem int_by_parts (u v : ℝ → ℝ) {a b : ℝ} (hu : differentiable ℝ u)
(hv : differentiable ℝ v) (hcu : continuous(deriv u)) (hcv : continuous(deriv v)) :
∫ x in a..b, u x * deriv v x =
u b * v b - u a * v a - ∫ x in a..b, v x * deriv u x :=
begin
have hu' : ∀ (x : ℝ), differentiable_at ℝ u x := hu,
have hv' : ∀ (x : ℝ), differentiable_at ℝ v x := hv,
have huv : deriv (u * v) = (deriv u) * v + u * deriv v := product_rule hu hv,
have H : ∫ x in a..b, ((deriv u) x) * (v x) + (u x) * ((deriv v) x) = ∫ x in a..b, (deriv (u*v)) x,
{
congr,
solve_by_elim,
},
have duv_cont : continuous (deriv (u * v)),
{
rw product_rule hu hv,
apply continuous.add,
rw mul_comm,
all_goals {
apply continuous.mul,
work_on_goal 0
{
apply @differentiable.continuous ℝ _ _ _ _ _ _ _,
},
repeat {assumption},
},
},
have duv_cont' : continuous_on (deriv (u * v)) (interval a b),
{
intros x hx,
exact continuous.continuous_within_at duv_cont,
},
have H2 : ∫ x in a..b, deriv (u*v) x = u b * v b - u a * v a,
{
apply integral_deriv_eq_sub,
intros x hx,
exact differentiable_at.mul (hu' x) (hv' x),
exact duv_cont',
},
rw [←H2, ←interval_integral.integral_sub],
{
congr,
ext,
rw huv,
simp only [pi.add_apply, pi.mul_apply],
rw mul_comm (v x) (deriv u x),
ring,
},
{
apply integrable_of_cont,
assumption,
},
apply integrable_of_cont,
apply continuous.mul,
apply @differentiable.continuous ℝ _ _ _ _ _ _ _,
repeat {assumption},
end
lemma int_by_parts_zero_ends (u v : ℝ → ℝ) {a b : ℝ} (hu : differentiable ℝ u)
(hv : differentiable ℝ v) (hcu : continuous(deriv u)) (hcv : continuous(deriv v))
(ha : u a * v a = 0) (hb : u b * v b = 0)
:
∫ x in a..b, u x * deriv v x = - ∫ x in a..b, v x * deriv u x :=
begin
rw int_by_parts,
repeat {assumption},
rw [ha, hb],
norm_num,
end
@[simp] lemma pow_ext (f : ℝ → ℝ) (n : ℕ) : f^n = λ x, (f x)^n :=
begin
induction n with d hd,
{
norm_num,
refl,
},
{
change f^(d+1) = λ x, (f x)^(d+1),
rw [pow_add, hd, pow_one],
ext,
norm_num,
ring_nf,
}
end
lemma differentiable_cospow_at (n: ℕ) {x : ℝ} : differentiable_at ℝ (cos^(n+1)) x:=
by show_differentiable
lemma deriv_cospow (n: ℕ) : deriv (λ (x : ℝ), cos x ^ (n+1)) = λ x, -((n : ℝ)+1) * (cos x)^n * sin x :=
begin
suffices : (λ (x : ℝ), -(((n:ℝ) + 1) * (cos x ^ n * sin x))) =
λ (x : ℝ), (-1 + -n) * cos x^n * sin x, by simpa,
ext,
ring,
end
lemma continuous_cospow {n: ℕ} : continuous (λ (x : ℝ), (cos x)^n) :=
begin
exact continuous.pow continuous_cos n,
end
lemma continuous_cospow' {c : ℝ} {m : ℕ} :
continuous
(λ (x : ℝ), c * cos x ^m) := by show_continuous
lemma differentiable_cospow {n: ℕ} : differentiable ℝ (λ (x : ℝ), (cos x)^n) :=
begin
simp only [differentiable_id', differentiable.pow, differentiable.cos],
end
lemma continuous_deriv_cospow (n: ℕ) : continuous (deriv (λ (x : ℝ), cos x ^ (n+1))) :=
begin
rw deriv_cospow,
apply continuous.mul continuous_cospow' continuous_sin,
end
@[simp] lemma deriv_sin_times_cos {x : ℝ} : deriv(sin * cos) x =
2 * cos x ^ 2 - 1 :=
begin
have H : deriv (λ (y : ℝ), sin y * cos y) x =
deriv sin x * cos x + sin x * deriv cos x
:= deriv_mul differentiable_at_sin differentiable_at_cos,
have h0 : sin * cos = λ y, sin y * cos y, by refl,
have hsin : sin x^2 = 1 - cos x^2,
{
rw eq_sub_iff_add_eq,
exact sin_sq_add_cos_sq x,
},
rw [h0, H, real.deriv_sin, real.deriv_cos],
ring_nf,
rw hsin,
ring,
end
@[simp] lemma deriv_sin_cos {m : ℕ} : deriv (λ x, sin x * cos x^(m+1)) =
λ x, (m+2) * cos x^(m+2) - (m+1) * cos x^m :=
begin
ext,
suffices : deriv(sin * cos^(m+1)) x =
(m+2) * (cos x)^(m+2) - (m+1) * (cos x)^m,
{
rw pow_ext at this,
exact this,
},
induction m with d hd,
{
simp only [mul_one, nat.cast_zero, pow_one, zero_add, pow_zero],
exact deriv_sin_times_cos,
},
{
simp,
have H := deriv_mul (@differentiable_at_sin x)
(differentiable_cospow_at d.succ),
repeat {rw pow_succ,},
have h2 : (λ (y : ℝ), sin y * (cos ^ (d.succ + 1)) y) x =
sin x * (cos ^ (d.succ + 1)) x, by tauto,
have hsin : sin * (λ (x : ℝ), cos x ^ (d.succ + 1)) =
(λ x, sin x * cos x ^ (d.succ + 1)), by tauto,
rw hsin,
have hhd : (sin * cos ^ (d + 1) = λ (y : ℝ), sin y * cos y ^ (d + 1)),
{
ext,
simp only [pi.mul_apply, pow_ext],
},
simp [pow_ext],
ring_exp,
have sin_to_cos : sin x^2 = 1 - cos x^2,
{
rw eq_sub_iff_add_eq,
exact sin_sq_add_cos_sq x,
},
rw sin_to_cos,
discrete_field,
},
end |
function [out] = area_1(p1,p2,S,Smin,Smax,varargin)
%area_1
% Copyright (C) 2019, 2021 Wouter J.M. Knoben, Luca Trotter
% This file is part of the Modular Assessment of Rainfall-Runoff Models
% Toolbox (MARRMoT).
% MARRMoT is a free software (GNU GPL v3) and distributed WITHOUT ANY
% WARRANTY. See <https://www.gnu.org/licenses/> for details.
% Flux function
% ------------------
% Description: Auxiliary function that calculates a variable contributing area.
% Constraints: A <= 1
% @(Inputs): p1 - linear scaling parameter [-]
% p2 - exponential scaling parameter [-]
% S - current storage [mm]
% Smin - minimum contributing storage [mm]
% Smax - maximum contributing storage [mm]
% varargin(1) - smoothing variable r (default 0.01)
% varargin(2) - smoothing variable e (default 5.00)
if size(varargin,2) == 0
out = min(1,p1.*(max(0,S-Smin)./(Smax-Smin)).^p2).*...
(1-smoothThreshold_storage_logistic(S,Smin)); % default smoothing
elseif size(varargin,2) == 1
out = min(1,p1.*(max(0,S-Smin)./(Smax-Smin)).^p2).*...
(1-smoothThreshold_storage_logistic(S,Smin,varargin(1))); % user-specified smoothing
elseif size(varargin,2) == 2
out = min(1,p1.*(max(0,S-Smin)./(Smax-Smin)).^p2).*...
(1-smoothThreshold_storage_logistic(S,Smin,varargin(1),varargin(2))); % user-specified smoothing
end
end
|
[GOAL]
a b : ℕ
h : 0 < b
⊢ a + 0 < a + b
[PROOFSTEP]
apply Nat.add_lt_add_left
[GOAL]
case h
a b : ℕ
h : 0 < b
⊢ 0 < b
[PROOFSTEP]
assumption
[GOAL]
a : ℕ
this : 0 < a + 1
⊢ 0 < 1 + a
[PROOFSTEP]
simp [Nat.add_comm]
[GOAL]
a : ℕ
this : 0 < a + 1
⊢ 0 < a + 1
[PROOFSTEP]
assumption
|
!
! Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved.
!
! NVIDIA CORPORATION and its licensors retain all intellectual property
! and proprietary rights in and to this software, related documentation
! and any modifications thereto.
!
!
! These example codes are a portion of the code samples from the companion
! website to the book "CUDA Fortran for Scientists and Engineers":
!
! http://store.elsevier.com/product.jsp?isbn=9780124169708
!
module simpleOps_m
contains
attributes(global) subroutine increment(a, b)
implicit none
integer, intent(inout) :: a(:)
integer, value :: b
integer :: i
i = threadIdx%x
a(i) = a(i)+b
end subroutine increment
end module simpleOps_m
program incrementTest
use cudafor
use simpleOps_m
implicit none
integer, parameter :: n = 256
integer :: a(n), b
integer, device :: a_d(n)
a = 1
b = 3
a_d = a
call increment<<<1,n>>>(a_d, b)
a = a_d
if (any(a /= 4)) then
write(*,*) '**** Program Failed ****'
else
write(*,*) 'Program Passed'
endif
end program incrementTest
|
% Notes for Laplace scheme and dynamic updating of parameters
%==========================================================================
n = 128; % number of samples (time bins)
P = 4; % true parameter
k = n; % precision on fluctuations
pp = 2; % prior on parameter
m = 8; % number of data
iS = eye(m,m)*2; % error precision
s = sqrtm(inv(iS));
p = [0;0]; % initial parameter estimates
for i = 1:n
x = 4 + randn(m,1); % exogenous input
z = s*randn(m,1); % error
y = P*x + z; % response
e = y - p(1)*x; % prediction error
Lp = -e'*iS*x + pp*p(1);
Lpp = x'*iS*x + pp;
f = [p(2); (-Lp -k*p(2))];
dfdx = [0 1; -Lpp -k];
p = p + spm_dx(dfdx,f,1);
X(:,i) = p;
LP(i) = Lp;
end
% results
%--------------------------------------------------------------------------
subplot(2,1,1)
plot(1:n,X)
title('generalised parameter estimates','FontSize',16)
subplot(2,1,2)
plot(1:n,LP)
title('energy gradient','FontSize',16)
return
% Notes
%==========================================================================
M.f = inline('[x(2); (u - K(1)*x(2))]','x','u','K','M');
M.m = 1;
M.n = 2;
M.l = 2;
M.x = [0;0];
M.u = 0;
N = 128;
dt = 1/64;
K = 4;
[K0,K1,K2] = spm_kernels(M,K,N,dt);
subplot(2,1,1)
plot([1:N]*dt,K1);
xlabel('time (s)')
title('Kernels','FontSize',16)
legend('drive','trace')
axis square
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.