text
stringlengths 0
3.34M
|
---|
If $f$ converges to $l$ in $F$, and $f$ and $g$ are eventually equal in $F$, then $g$ converges to $l$ in $F$. |
Sometimes I like to add a little ‘sparkle and shine’ to my life (In a subtle way of course, I’m not talking Mr T style bling). ‘Floating Dreams Studio’ have sent me two beautiful charms to review all the way from Texas, United States! Which I know must have been an absolute pain because of shipping costs over here to the UK, so I am extremely grateful. They arrived quickly with a lovely handwritten note, which I always think adds a cute personal touch.
These are handmade, high quality, (obviously made with love) planner charms, by 23 year old ‘Nha’ who originates from Vietnam. She is a senior at the ‘University Of Houston’ and studies ‘Mechanical Engineering’. Yes that’s Mechanical Engineering! I found this really interesting as its got absolutely nothing to do with what she does for work. I was expecting her to say something along the lines of ‘Art and Crafts’. So it seems that Nha is a woman of many talents. ‘Floating Dreams Studio’ is a part time business which started out as her hobby. I always like to hear that people have started out that way, as it means the business owners are getting to do what they genuinely love and enjoy.
Now I do have to admit that if these were clipped to my work diary/planner no one would ever see them as its usually lying around with a mug of coffee resting on it, or shoved under my computer keyboard. Personally I think that these are far too pretty to be kept hidden. I have clipped them ‘both’ onto my handbag for all to see because I couldn’t choose which one I liked best.
These charms are a really effective and inexpensive way of adding a little something special to your possessions such as bags, purses and key rings. |
lemma LIMSEQ_le_const2: "X \<longlonglongrightarrow> x \<Longrightarrow> \<exists>N. \<forall>n\<ge>N. X n \<le> a \<Longrightarrow> x \<le> a" for a x :: "'a::linorder_topology" |
-- @@stderr --
dtrace: failed to compile script test/unittest/funcs/err.D_PROTO_LEN.panicbadarg.d: [D_PROTO_LEN] line 19: panic( ) prototype mismatch: 1 arg passed, 0 expected
|
module Statistics.MLE (
maxLikelihood,
probit
) where
import Numeric.GSL.Minimization
import Statistics.Distribution
import Statistics.Distribution.Normal
import Statistics.Matrix
import qualified Data.Vector.Generic as G
import qualified Data.Vector.Unboxed as U
data MaxControl = MaxControl { maxit :: Int, epsilon :: Double } deriving (Show)
type ObjectiveF = [Double] -> Double
type GradientF = [Double] -> [Double]
type LikelihoodModel = Matrix -> Vector -> (ObjectiveF, GradientF)
defaultMaxCtrl :: MaxControl
defaultMaxCtrl = MaxControl 100 10e-2
maxLikelihood :: LikelihoodModel -> MaxControl -> Matrix -> Vector -> Vector
maxLikelihood model ctrl x y = U.fromList . fst $ bhat
where
bhat = uncurry (minimizeD VectorBFGS2 (epsilon ctrl) (maxit ctrl) 1.0 0.1) (model x y) sval
sval = replicate (snd (dimension x)) (epsilon ctrl)
llprobit :: LikelihoodModel
llprobit x y = (loglik, grad)
where
loglik b = negate $ G.sum $ G.map contr (G.zip y (x `multiplyV` U.fromList b))
grad b = fmap negate $ U.toList $ transpose x `multiplyV` G.map gradcontr (G.zip y (x `multiplyV` U.fromList b))
contr (y,xb) = y * log (cdf xb) + (1.0 - y) * log (1.0 - cdf xb)
gradcontr (y,xb) = y * pdf xb / cdf xb - (1 - y) * pdf xb / (1 - cdf xb)
pdf = density (normalDistr 0 1)
cdf x = min (max (cumulative (normalDistr 0 1) x) (epsilon defaultMaxCtrl)) (1 - epsilon defaultMaxCtrl) -- BUG in cumulative: range not [0,1]
probit :: Matrix -> Vector -> Vector
probit = maxLikelihood llprobit defaultMaxCtrl
|
Formal statement is: lemma homeomorphism_compact: fixes f :: "'a::topological_space \<Rightarrow> 'b::t2_space" assumes "compact s" "continuous_on s f" "f ` s = t" "inj_on f s" shows "\<exists>g. homeomorphism s t f g" Informal statement is: If $f$ is a continuous injective map from a compact space $S$ to a Hausdorff space $T$, then $f$ is a homeomorphism. |
lemma continuous_polymonial_function: fixes f :: "'a::real_normed_vector \<Rightarrow> 'b::euclidean_space" assumes "polynomial_function f" shows "continuous (at x) f" |
lemma continuous_on_inv: fixes f :: "'a::topological_space \<Rightarrow> 'b::t2_space" assumes "continuous_on s f" and "compact s" and "\<forall>x\<in>s. g (f x) = x" shows "continuous_on (f ` s) g" |
corollary compact_uniformly_continuous: fixes f :: "'a :: metric_space \<Rightarrow> 'b :: metric_space" assumes f: "continuous_on S f" and S: "compact S" shows "uniformly_continuous_on S f" |
module Flexidisc.OrdList.HereOrNot
import Flexidisc.Dec.IsYes
import Flexidisc.OrdList.Fresh
import Flexidisc.OrdList.Label
import Flexidisc.OrdList.Nub
import Flexidisc.OrdList.Row
import Flexidisc.OrdList.Sub
import Flexidisc.OrdList.Type
%default total
||| A proof that labels that are in both lists have the same values
public export
data HereOrNot : (xs, ys : OrdList k v o) -> Type where
||| It holds for two empty `OrdList`
Empty : HereOrNot [] []
||| If an element of the first list is not in the second list, it holds
Skip : DecEq k => {xs : OrdList k v o} -> HereOrNot xs ys -> IsFresh l ys -> HereOrNot ((l, ty) :: xs) ys
||| If an element of the second list is not in the first list, it holds
Extra : DecEq k => {xs : OrdList k v o} -> HereOrNot xs ys -> IsFresh l xs -> HereOrNot xs ((l, ty) :: ys)
||| If an element is in both list, the values should be the same
Keep : HereOrNot xs ys -> HereOrNot ((l, ty) :: xs) ((l, ty) :: ys)
export
toRow : HereOrNot [(k, v)] ys -> Maybe (OrdRow k v ys)
toRow (Skip compat fresh) = Nothing
toRow (Extra compat fresh) = There <$> toRow compat
toRow (Keep compat) = Just Here
%name HereOrNot compat, can, prf
export
toSub : HereOrNot xs ys -> Maybe (Sub xs ys)
toSub Empty = Just Empty
toSub (Skip compat fresh) = Nothing
toSub (Extra compat fresh) = map Skip $ toSub compat
toSub (Keep compat) = map Keep $ toSub compat
|
/-
Copyright (c) 2019 Johan Commelin. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Johan Commelin
-/
import tactic.basic
/-!
# Types with a unique term
In this file we define a typeclass `unique`,
which expresses that a type has a unique term.
In other words, a type that is `inhabited` and a `subsingleton`.
## Main declaration
* `unique`: a typeclass that expresses that a type has a unique term.
## Main statements
* `unique.mk'`: an inhabited subsingleton type is `unique`. This can not be an instance because it
would lead to loops in typeclass inference.
* `function.surjective.unique`: if the domain of a surjective function is `unique`, then its
codomain is `unique` as well.
* `function.injective.subsingleton`: if the codomain of an injective function is `subsingleton`,
then its domain is `subsingleton` as well.
* `function.injective.unique`: if the codomain of an injective function is `subsingleton` and its
domain is `inhabited`, then its domain is `unique`.
## Implementation details
The typeclass `unique α` is implemented as a type,
rather than a `Prop`-valued predicate,
for good definitional properties of the default term.
-/
universes u v w
variables {α : Sort u} {β : Sort v} {γ : Sort w}
/-- `unique α` expresses that `α` is a type with a unique term `default α`.
This is implemented as a type, rather than a `Prop`-valued predicate,
for good definitional properties of the default term. -/
@[ext]
structure unique (α : Sort u) extends inhabited α :=
(uniq : ∀ a:α, a = default)
attribute [class] unique
instance punit.unique : unique punit.{u} :=
{ default := punit.star,
uniq := λ x, punit_eq x _ }
lemma fin.eq_zero : ∀ n : fin 1, n = 0
| ⟨n, hn⟩ := fin.eq_of_veq (nat.eq_zero_of_le_zero (nat.le_of_lt_succ hn))
instance {n : ℕ} : inhabited (fin n.succ) := ⟨0⟩
@[simp] lemma fin.default_eq_zero (n : ℕ) : default (fin n.succ) = 0 := rfl
instance fin.unique : unique (fin 1) :=
{ uniq := fin.eq_zero, .. fin.inhabited }
namespace unique
open function
section
variables [unique α]
@[priority 100] -- see Note [lower instance priority]
instance : inhabited α := to_inhabited ‹unique α›
lemma eq_default (a : α) : a = default α := uniq _ a
lemma default_eq (a : α) : default α = a := (uniq _ a).symm
@[priority 100] -- see Note [lower instance priority]
instance : subsingleton α := subsingleton_of_forall_eq _ eq_default
lemma forall_iff {p : α → Prop} : (∀ a, p a) ↔ p (default α) :=
⟨λ h, h _, λ h x, by rwa [unique.eq_default x]⟩
lemma exists_iff {p : α → Prop} : Exists p ↔ p (default α) :=
⟨λ ⟨a, ha⟩, eq_default a ▸ ha, exists.intro (default α)⟩
end
@[ext] protected lemma subsingleton_unique' : ∀ (h₁ h₂ : unique α), h₁ = h₂
| ⟨⟨x⟩, h⟩ ⟨⟨y⟩, _⟩ := by congr; rw [h x, h y]
instance subsingleton_unique : subsingleton (unique α) :=
⟨unique.subsingleton_unique'⟩
/-- Construct `unique` from `inhabited` and `subsingleton`. Making this an instance would create
a loop in the class inheritance graph. -/
def mk' (α : Sort u) [h₁ : inhabited α] [subsingleton α] : unique α :=
{ uniq := λ x, subsingleton.elim _ _, .. h₁ }
end unique
@[simp] lemma pi.default_def {β : Π a : α, Sort v} [Π a, inhabited (β a)] :
default (Π a, β a) = λ a, default (β a) :=
rfl
lemma pi.default_apply {β : Π a : α, Sort v} [Π a, inhabited (β a)] (a : α) :
default (Π a, β a) a = default (β a) :=
rfl
instance pi.unique {β : Π a : α, Sort v} [Π a, unique (β a)] : unique (Π a, β a) :=
{ uniq := λ f, funext $ λ x, unique.eq_default _,
.. pi.inhabited α }
/-- There is a unique function on an empty domain. -/
def pi.unique_of_empty (h : α → false) (β : Π a : α, Sort v) : unique (Π a, β a) :=
{ default := λ a, (h a).elim,
uniq := λ f, funext $ λ a, (h a).elim }
/-- There is a unique function whose domain is `pempty`. -/
instance pi.pempty_unique (β : pempty.{u} → Sort v) : unique (Π a, β a) :=
pi.unique_of_empty pempty.elim β
namespace function
variable {f : α → β}
/-- If the domain of a surjective function is a singleton,
then the codomain is a singleton as well. -/
protected def surjective.unique (hf : surjective f) [unique α] : unique β :=
{ default := f (default _),
uniq := λ b, let ⟨a, ha⟩ := hf b in ha ▸ congr_arg f (unique.eq_default _) }
/-- If the codomain of an injective function is a subsingleton, then the domain
is a subsingleton as well. -/
protected lemma injective.subsingleton (hf : injective f) [subsingleton β] :
subsingleton α :=
⟨λ x y, hf $ subsingleton.elim _ _⟩
/-- If `α` is inhabited and admits an injective map to a subsingleton type, then `α` is `unique`. -/
protected def injective.unique [inhabited α] [subsingleton β] (hf : injective f) : unique α :=
@unique.mk' _ _ hf.subsingleton
end function
|
lemma measurable_const: "c \<in> space M' \<Longrightarrow> (\<lambda>x. c) \<in> measurable M M'" |
lemma linear_bounded: fixes f :: "'a::euclidean_space \<Rightarrow> 'b::real_normed_vector" assumes lf: "linear f" shows "\<exists>B. \<forall>x. norm (f x) \<le> B * norm x" |
[STATEMENT]
lemma hn_ctxt_eq: "A x y = z \<Longrightarrow> hn_ctxt A x y = z"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. A x y = z \<Longrightarrow> hn_ctxt A x y = z
[PROOF STEP]
by (simp add: hn_ctxt_def) |
"ESET researchers have been tracking the TDL4 botnet for a long time, and now we have noticed a new phase in its evolution," announced David Harley, the company's director of malware intelligence.
"Based on the analysis of its components we can say that some of those components have been rewritten from scratch (kernel-mode driver, user-mode payload) while some (specifically, some bootkit components) remain the same as in the previous versions," he noted.
TDL, also known as TDSS, is a family of rootkits characterised by complex and innovative detection evasion techniques. Back in July, malware analysts from Kaspersky Lab called TDL version 4 the most sophisticated threat in the world and estimated that the number of computers infected with it exceeds 4.5 million.
However, according to ESET's researchers, changes are now being made to the way TDL4 infects systems and ensures its hold on them. Instead of storing components within the MBR, the new variants create a hidden partition at the end of the hard disk and set it as active.
This ensures that malicious code stored on it, including a special boot loader, gets executed before the actual operating system, and that the MBR code checked by antivirus programs for unauthorised modifications remains untouched.
"The malware is able to detect corruption of the files stored in the hidden file system by calculating its CRC32 checksum and comparing it with the value stored in the file header. In the event that a file is corrupted it is removed from the file system," the ESET researchers explain.
In April, Microsoft released a Windows update that modified systems to disrupt the TDL4 infection cycle. The rootkit's authors responded half a month later with an update of their own that bypassed the patch.
Several antivirus vendors like Kaspersky, BitDefender or AVAST, offer free stand-alone tools that can remove TDSS and similar rootkits. However, in order to avoid getting infected in the first place users should install an antivirus solution that provides advanced layers of protection, like those analysing software behavior. |
(* Author: Tobias Nipkow *)
section\<open>Isomorphisms Between Plane Graphs\<close>
theory PlaneGraphIso
imports Main Quasi_Order
begin
(* FIXME globalize *)
lemma image_image_id_if[simp]: "(\<And>x. f(f x) = x) \<Longrightarrow> f ` f ` M = M"
by (auto simp: image_iff)
declare not_None_eq [iff] not_Some_eq [iff]
text\<open>The symbols \<open>\<cong>\<close> and \<open>\<simeq>\<close> are overloaded. They
denote congruence and isomorphism on arbitrary types. On lists
(representing faces of graphs), \<open>\<cong>\<close> means congruence modulo
rotation; \<open>\<simeq>\<close> is currently unused. On graphs, \<open>\<simeq>\<close>
means isomorphism and is a weaker version of \<open>\<cong>\<close> (proper
isomorphism): \<open>\<simeq>\<close> also allows to reverse the orientation of
all faces.\<close>
consts
pr_isomorphic :: "'a \<Rightarrow> 'a \<Rightarrow> bool" (infix "\<cong>" 60)
(* isomorphic :: "'a \<Rightarrow> 'a \<Rightarrow> bool" (infix "\<simeq>" 60)
*)
(*
definition "congs" :: "'a list \<Rightarrow> 'a list \<Rightarrow> bool" (infix "\<cong>" 60) where
"F\<^sub>1 \<cong> (F\<^sub>2::'a list) \<equiv> \<exists>n. F\<^sub>2 = rotate n F\<^sub>1"
*)
definition Iso :: "('a list * 'a list) set" ("{\<cong>}") where
"{\<cong>} \<equiv> {(F\<^sub>1, F\<^sub>2). F\<^sub>1 \<cong> F\<^sub>2}"
text\<open>A plane graph is a set or list (for executability) of faces
(hence \<open>Fgraph\<close> and \<open>fgraph\<close>) and a face is a list of
nodes:\<close>
type_synonym 'a Fgraph = "'a list set"
type_synonym 'a fgraph = "'a list list"
subsection\<open>Equivalence of faces\<close>
text\<open>Two faces are equivalent modulo rotation:\<close>
overloading "congs" \<equiv> "pr_isomorphic :: 'a list \<Rightarrow> 'a list \<Rightarrow> bool"
begin
definition "F\<^sub>1 \<cong> (F\<^sub>2::'a list) \<equiv> \<exists>n. F\<^sub>2 = rotate n F\<^sub>1"
end
lemma congs_refl[iff]: "(xs::'a list) \<cong> xs"
apply(simp add:congs_def)
apply(rule_tac x = 0 in exI)
apply (simp)
done
lemma congs_sym: assumes A: "(xs::'a list) \<cong> ys" shows "ys \<cong> xs"
proof (simp add:congs_def)
let ?l = "length xs"
from A obtain n where ys: "ys = rotate n xs" by(fastforce simp add:congs_def)
have "xs = rotate ?l xs" by simp
also have "\<dots> = rotate (?l - n mod ?l + n mod ?l) xs"
proof (cases)
assume "xs = []" thus ?thesis by simp
next
assume "xs \<noteq> []"
hence "n mod ?l < ?l" by simp
hence "?l = ?l - n mod ?l + n mod ?l" by arith
thus ?thesis by simp
qed
also have "\<dots> = rotate (?l - n mod ?l) (rotate (n mod ?l) xs)"
by(simp add:rotate_rotate)
also have "rotate (n mod ?l) xs = rotate n xs"
by(rule rotate_conv_mod[symmetric])
finally show "\<exists>m. xs = rotate m ys" by(fastforce simp add:ys)
qed
lemma congs_trans: "(xs::'a list) \<cong> ys \<Longrightarrow> ys \<cong> zs \<Longrightarrow> xs \<cong> zs"
apply(clarsimp simp:congs_def rotate_def)
apply(rename_tac m n)
apply(rule_tac x = "n+m" in exI)
apply (simp add:funpow_add)
done
lemma equiv_EqF: "equiv (UNIV::'a list set) {\<cong>}"
apply(unfold equiv_def sym_def trans_def refl_on_def)
apply(rule conjI)
apply simp
apply(rule conjI)
apply(fastforce intro:congs_sym)
apply(fastforce intro:congs_trans)
done
lemma congs_distinct:
"F\<^sub>1 \<cong> F\<^sub>2 \<Longrightarrow> distinct F\<^sub>2 = distinct F\<^sub>1"
by (auto simp: congs_def)
lemma congs_length:
"F\<^sub>1 \<cong> F\<^sub>2 \<Longrightarrow> length F\<^sub>2 = length F\<^sub>1"
by (auto simp: congs_def)
lemma congs_pres_nodes: "F\<^sub>1 \<cong> F\<^sub>2 \<Longrightarrow> set F\<^sub>1 = set F\<^sub>2"
by(clarsimp simp:congs_def)
lemma congs_map:
"F\<^sub>1 \<cong> F\<^sub>2 \<Longrightarrow> map f F\<^sub>1 \<cong> map f F\<^sub>2"
by (auto simp: congs_def rotate_map)
lemma congs_map_eq_iff:
"inj_on f (set xs \<union> set ys) \<Longrightarrow> (map f xs \<cong> map f ys) = (xs \<cong> ys)"
apply(simp add:congs_def)
apply(rule iffI)
apply(clarsimp simp: rotate_map)
apply(drule map_inj_on)
apply(simp add:Un_commute)
apply (fastforce)
apply clarsimp
apply(fastforce simp: rotate_map)
done
lemma list_cong_rev_iff[simp]:
"(rev xs \<cong> rev ys) = (xs \<cong> ys)"
apply(simp add:congs_def rotate_rev)
apply(rule iffI)
apply fast
apply clarify
apply(cases "length xs = 0")
apply simp
apply(case_tac "n mod length xs = 0")
apply(rule_tac x = "n" in exI)
apply simp
apply(subst rotate_conv_mod)
apply(rule_tac x = "length xs - n mod length xs" in exI)
apply simp
done
lemma singleton_list_cong_eq_iff[simp]:
"({xs::'a list} // {\<cong>} = {ys} // {\<cong>}) = (xs \<cong> ys)"
by(simp add: eq_equiv_class_iff2[OF equiv_EqF])
subsection\<open>Homomorphism and isomorphism\<close>
definition is_pr_Hom :: "('a \<Rightarrow> 'b) \<Rightarrow> 'a Fgraph \<Rightarrow> 'b Fgraph \<Rightarrow> bool" where
"is_pr_Hom \<phi> Fs\<^sub>1 Fs\<^sub>2 \<equiv> (map \<phi> ` Fs\<^sub>1)//{\<cong>} = Fs\<^sub>2 //{\<cong>}"
definition is_pr_Iso :: "('a \<Rightarrow> 'b) \<Rightarrow> 'a Fgraph \<Rightarrow> 'b Fgraph \<Rightarrow> bool" where
"is_pr_Iso \<phi> Fs\<^sub>1 Fs\<^sub>2 \<equiv> is_pr_Hom \<phi> Fs\<^sub>1 Fs\<^sub>2 \<and> inj_on \<phi> (\<Union>F \<in> Fs\<^sub>1. set F)"
definition is_pr_iso :: "('a \<Rightarrow> 'b) \<Rightarrow> 'a fgraph \<Rightarrow> 'b fgraph \<Rightarrow> bool" where
"is_pr_iso \<phi> Fs\<^sub>1 Fs\<^sub>2 \<equiv> is_pr_Iso \<phi> (set Fs\<^sub>1) (set Fs\<^sub>2)"
text\<open>Homomorphisms preserve the set of nodes.\<close>
lemma UN_subset_iff: "((\<Union>i\<in>I. f i) \<subseteq> B) = (\<forall>i\<in>I. f i \<subseteq> B)"
by blast
declare Image_Collect_case_prod[simp del]
lemma pr_Hom_pres_face_nodes:
"is_pr_Hom \<phi> Fs\<^sub>1 Fs\<^sub>2 \<Longrightarrow> (\<Union>F\<in>Fs\<^sub>1. {\<phi> ` (set F)}) = (\<Union>F\<in>Fs\<^sub>2. {set F})"
supply image_cong_simp [cong del]
apply(clarsimp simp:is_pr_Hom_def quotient_def)
apply auto
apply(subgoal_tac "\<exists>F' \<in> Fs\<^sub>2. {\<cong>} `` {map \<phi> F} = {\<cong>} `` {F'}")
prefer 2 apply blast
apply (fastforce simp: eq_equiv_class_iff[OF equiv_EqF] dest!:congs_pres_nodes)
apply(subgoal_tac "\<exists>F' \<in> Fs\<^sub>1. {\<cong>} `` {map \<phi> F'} = {\<cong>} `` {F}")
apply (fastforce simp: eq_equiv_class_iff[OF equiv_EqF] dest!:congs_pres_nodes)
apply (erule equalityE)
apply(fastforce simp:UN_subset_iff)
done
lemma pr_Hom_pres_nodes:
assumes "is_pr_Hom \<phi> Fs\<^sub>1 Fs\<^sub>2"
shows "\<phi> ` (\<Union>F\<in>Fs\<^sub>1. set F) = (\<Union>F\<in>Fs\<^sub>2. set F)"
proof
from assms have *: "(\<Union>F\<in>Fs\<^sub>1. {\<phi> ` set F}) = (\<Union>F\<in>Fs\<^sub>2. {set F})"
by (rule pr_Hom_pres_face_nodes)
then show "\<phi> ` (\<Union>F\<in>Fs\<^sub>1. set F) \<subseteq> (\<Union>F\<in>Fs\<^sub>2. set F)"
by blast
show "(\<Union>F\<in>Fs\<^sub>2. set F) \<subseteq> \<phi> ` (\<Union>F\<in>Fs\<^sub>1. set F)"
proof
fix x
assume "x \<in> (\<Union>F\<in>Fs\<^sub>2. set F)"
then obtain F where "F \<in> Fs\<^sub>2" and "x \<in> set F" ..
then have "set F \<in> (\<Union>F\<in>Fs\<^sub>2. {set F})"
by blast
then have "set F \<in> (\<Union>F\<in>Fs\<^sub>1. {\<phi> ` set F})"
using * by simp
then obtain F' where "F' \<in> Fs\<^sub>1" and "set F \<in> {\<phi> ` set F'}" ..
with \<open>x \<in> set F\<close> show "x \<in> \<phi> ` (\<Union>F\<in>Fs\<^sub>1. set F)"
by auto
qed
qed
text\<open>Therefore isomorphisms preserve cardinality of node set.\<close>
lemma pr_Iso_same_no_nodes:
"\<lbrakk> is_pr_Iso \<phi> Fs\<^sub>1 Fs\<^sub>2; finite Fs\<^sub>1 \<rbrakk>
\<Longrightarrow> card(\<Union>F\<in>Fs\<^sub>1. set F) = card(\<Union>F\<in>Fs\<^sub>2. set F)"
by(clarsimp simp add: is_pr_Iso_def pr_Hom_pres_nodes[symmetric] card_image)
lemma pr_iso_same_no_nodes:
"is_pr_iso \<phi> Fs\<^sub>1 Fs\<^sub>2 \<Longrightarrow> card(\<Union>F\<in>set Fs\<^sub>1. set F) = card(\<Union>F\<in>set Fs\<^sub>2. set F)"
by(simp add: is_pr_iso_def pr_Iso_same_no_nodes)
text\<open>Isomorphisms preserve the number of faces.\<close>
lemma pr_iso_same_no_faces:
assumes dist1: "distinct Fs\<^sub>1" and dist2: "distinct Fs\<^sub>2"
and inj1: "inj_on (\<lambda>xs.{xs}//{\<cong>}) (set Fs\<^sub>1)"
and inj2: "inj_on (\<lambda>xs.{xs}//{\<cong>}) (set Fs\<^sub>2)" and iso: "is_pr_iso \<phi> Fs\<^sub>1 Fs\<^sub>2"
shows "length Fs\<^sub>1 = length Fs\<^sub>2"
proof -
have injphi: "\<forall>F\<in>set Fs\<^sub>1. \<forall>F'\<in>set Fs\<^sub>1. inj_on \<phi> (set F \<union> set F')" using iso
by(auto simp:is_pr_iso_def is_pr_Iso_def is_pr_Hom_def inj_on_def)
have inj1': "inj_on (\<lambda>xs. {xs} // {\<cong>}) (map \<phi> ` set Fs\<^sub>1)"
apply(rule inj_on_imageI)
apply(simp add:inj_on_def quotient_def eq_equiv_class_iff[OF equiv_EqF])
apply(simp add: congs_map_eq_iff injphi)
using inj1
apply(simp add:inj_on_def quotient_def eq_equiv_class_iff[OF equiv_EqF])
done
have "length Fs\<^sub>1 = card(set Fs\<^sub>1)" by(simp add:distinct_card[OF dist1])
also have "\<dots> = card(map \<phi> ` set Fs\<^sub>1)" using iso
by(auto simp:is_pr_iso_def is_pr_Iso_def is_pr_Hom_def inj_on_mapI card_image)
also have "\<dots> = card((map \<phi> ` set Fs\<^sub>1) // {\<cong>})"
by(simp add: card_quotient_disjoint[OF _ inj1'])
also have "(map \<phi> ` set Fs\<^sub>1)//{\<cong>} = set Fs\<^sub>2 // {\<cong>}"
using iso by(simp add: is_pr_iso_def is_pr_Iso_def is_pr_Hom_def)
also have "card(\<dots>) = card(set Fs\<^sub>2)"
by(simp add: card_quotient_disjoint[OF _ inj2])
also have "\<dots> = length Fs\<^sub>2" by(simp add:distinct_card[OF dist2])
finally show ?thesis .
qed
lemma is_Hom_distinct:
"\<lbrakk> is_pr_Hom \<phi> Fs\<^sub>1 Fs\<^sub>2; \<forall>F\<in>Fs\<^sub>1. distinct F; \<forall>F\<in>Fs\<^sub>2. distinct F \<rbrakk>
\<Longrightarrow> \<forall>F\<in>Fs\<^sub>1. distinct(map \<phi> F)"
apply(clarsimp simp add:is_pr_Hom_def)
apply(subgoal_tac "\<exists> F' \<in> Fs\<^sub>2. (map \<phi> F, F') : {\<cong>}")
apply(fastforce simp add: congs_def)
apply(subgoal_tac "\<exists> F' \<in> Fs\<^sub>2. {map \<phi> F}//{\<cong>} = {F'}//{\<cong>}")
apply clarify
apply(rule_tac x = F' in bexI)
apply(rule eq_equiv_class[OF _ equiv_EqF])
apply(simp add:singleton_quotient)
apply blast
apply assumption
apply(simp add:quotient_def)
apply(rotate_tac 1)
apply blast
done
lemma Collect_congs_eq_iff[simp]:
"Collect ((\<cong>) x) = Collect ((\<cong>) y) \<longleftrightarrow> (x \<cong> (y::'a list))"
using eq_equiv_class_iff2[OF equiv_EqF]
apply(simp add: quotient_def Iso_def)
apply blast
done
lemma is_pr_Hom_trans: assumes f: "is_pr_Hom f A B" and g: "is_pr_Hom g B C"
shows "is_pr_Hom (g \<circ> f) A C"
proof-
from f have f1: "\<forall>a\<in>A. \<exists>b\<in>B. map f a \<cong> b"
apply(simp add: is_pr_Hom_def quotient_def Iso_def)
apply(erule equalityE)
apply blast
done
from f have f2: "\<forall>b\<in>B. \<exists>a\<in>A. map f a \<cong> b"
apply(simp add: is_pr_Hom_def quotient_def Iso_def)
apply(erule equalityE)
apply blast
done
from g have g1: "\<forall>b\<in>B. \<exists>c\<in>C. map g b \<cong> c"
apply(simp add: is_pr_Hom_def quotient_def Iso_def)
apply(erule equalityE)
apply blast
done
from g have g2: "\<forall>c\<in>C. \<exists>b\<in>B. map g b \<cong> c"
apply(simp add: is_pr_Hom_def quotient_def Iso_def)
apply(erule equalityE)
apply blast
done
show ?thesis
apply(auto simp add: is_pr_Hom_def quotient_def Iso_def Image_def
map_comp_map[symmetric] image_comp simp del: map_map map_comp_map)
apply (metis congs_map[of _ _ g] congs_trans f1 g1)
by (metis congs_map[of _ _ g] congs_sym congs_trans f2 g2)
qed
lemma is_pr_Hom_rev:
"is_pr_Hom \<phi> A B \<Longrightarrow> is_pr_Hom \<phi> (rev ` A) (rev ` B)"
apply(auto simp add: is_pr_Hom_def quotient_def Image_def Iso_def rev_map[symmetric])
apply(erule equalityE)
apply blast
apply(erule equalityE)
apply blast
done
text\<open>A kind of recursion rule, a first step towards executability:\<close>
lemma is_pr_Iso_rec:
"\<lbrakk> inj_on (\<lambda>xs. {xs}//{\<cong>}) Fs\<^sub>1; inj_on (\<lambda>xs. {xs}//{\<cong>}) Fs\<^sub>2; F\<^sub>1 \<in> Fs\<^sub>1 \<rbrakk> \<Longrightarrow>
is_pr_Iso \<phi> Fs\<^sub>1 Fs\<^sub>2 =
(\<exists>F\<^sub>2 \<in> Fs\<^sub>2. length F\<^sub>1 = length F\<^sub>2 \<and> is_pr_Iso \<phi> (Fs\<^sub>1 - {F\<^sub>1}) (Fs\<^sub>2 - {F\<^sub>2})
\<and> (\<exists>n. map \<phi> F\<^sub>1 = rotate n F\<^sub>2)
\<and> inj_on \<phi> (\<Union>F\<in>Fs\<^sub>1. set F))"
apply(drule mk_disjoint_insert[of F\<^sub>1])
apply clarify
apply(rename_tac Fs\<^sub>1')
apply(rule iffI)
apply (clarsimp simp add:is_pr_Iso_def)
apply(clarsimp simp:is_pr_Hom_def quotient_diff1)
apply(drule_tac s="a // b" for a b in sym)
apply(clarsimp)
apply(subgoal_tac "{\<cong>} `` {map \<phi> F\<^sub>1} : Fs\<^sub>2 // {\<cong>}")
prefer 2 apply(simp add:quotient_def)
apply(erule quotientE)
apply(rename_tac F\<^sub>2)
apply(drule eq_equiv_class[OF _ equiv_EqF])
apply blast
apply(rule_tac x = F\<^sub>2 in bexI)
prefer 2 apply assumption
apply(rule conjI)
apply(clarsimp simp: congs_def)
apply(rule conjI)
apply(subgoal_tac "{\<cong>} `` {F\<^sub>2} = {\<cong>} `` {map \<phi> F\<^sub>1}")
prefer 2
apply(rule equiv_class_eq[OF equiv_EqF])
apply(fastforce intro: congs_sym)
apply(subgoal_tac "{F\<^sub>2}//{\<cong>} = {map \<phi> F\<^sub>1}//{\<cong>}")
prefer 2 apply(simp add:singleton_quotient)
apply(subgoal_tac "\<forall>F\<in>Fs\<^sub>1'. \<not> (map \<phi> F) \<cong> (map \<phi> F\<^sub>1)")
apply(fastforce simp:Iso_def quotient_def Image_Collect_case_prod simp del: Collect_congs_eq_iff
dest!: eq_equiv_class[OF _ equiv_EqF])
apply clarify
apply(subgoal_tac "inj_on \<phi> (set F \<union> set F\<^sub>1)")
prefer 2
apply(erule subset_inj_on)
apply(blast)
apply(clarsimp simp add:congs_map_eq_iff)
apply(subgoal_tac "{\<cong>} `` {F\<^sub>1} = {\<cong>} `` {F}")
apply(simp add:singleton_quotient)
apply(rule equiv_class_eq[OF equiv_EqF])
apply(blast intro:congs_sym)
apply(subgoal_tac "F\<^sub>2 \<cong> (map \<phi> F\<^sub>1)")
apply (simp add:congs_def inj_on_Un)
apply(clarsimp intro!:congs_sym)
apply(clarsimp simp add: is_pr_Iso_def is_pr_Hom_def quotient_diff1)
apply (simp add:singleton_quotient)
apply(subgoal_tac "F\<^sub>2 \<cong> (map \<phi> F\<^sub>1)")
prefer 2 apply(fastforce simp add:congs_def)
apply(subgoal_tac "{\<cong>}``{map \<phi> F\<^sub>1} = {\<cong>}``{F\<^sub>2}")
prefer 2
apply(rule equiv_class_eq[OF equiv_EqF])
apply(fastforce intro:congs_sym)
apply(subgoal_tac "{\<cong>}``{F\<^sub>2} \<in> Fs\<^sub>2 // {\<cong>}")
prefer 2 apply(erule quotientI)
apply (simp add:insert_absorb quotient_def)
done
lemma is_iso_Cons:
"\<lbrakk> distinct (F\<^sub>1#Fs\<^sub>1'); distinct Fs\<^sub>2;
inj_on (\<lambda>xs.{xs}//{\<cong>}) (set(F\<^sub>1#Fs\<^sub>1')); inj_on (\<lambda>xs.{xs}//{\<cong>}) (set Fs\<^sub>2) \<rbrakk>
\<Longrightarrow>
is_pr_iso \<phi> (F\<^sub>1#Fs\<^sub>1') Fs\<^sub>2 =
(\<exists>F\<^sub>2 \<in> set Fs\<^sub>2. length F\<^sub>1 = length F\<^sub>2 \<and> is_pr_iso \<phi> Fs\<^sub>1' (remove1 F\<^sub>2 Fs\<^sub>2)
\<and> (\<exists>n. map \<phi> F\<^sub>1 = rotate n F\<^sub>2)
\<and> inj_on \<phi> (set F\<^sub>1 \<union> (\<Union>F\<in>set Fs\<^sub>1'. set F)))"
apply(simp add:is_pr_iso_def)
apply(subst is_pr_Iso_rec[where ?F\<^sub>1.0 = F\<^sub>1])
apply(simp_all)
done
subsection\<open>Isomorphism tests\<close>
lemma map_upd_submap:
"x \<notin> dom m \<Longrightarrow> (m(x \<mapsto> y) \<subseteq>\<^sub>m m') = (m' x = Some y \<and> m \<subseteq>\<^sub>m m')"
apply(simp add:map_le_def dom_def)
apply(rule iffI)
apply(rule conjI) apply (blast intro:sym)
apply clarify
apply(case_tac "a=x")
apply auto
done
lemma map_of_zip_submap: "\<lbrakk> length xs = length ys; distinct xs \<rbrakk> \<Longrightarrow>
(map_of (zip xs ys) \<subseteq>\<^sub>m Some \<circ> f) = (map f xs = ys)"
apply(induct rule: list_induct2)
apply(simp)
apply (clarsimp simp: map_upd_submap simp del:o_apply fun_upd_apply)
apply simp
done
primrec pr_iso_test0 :: "('a \<rightharpoonup> 'b) \<Rightarrow> 'a fgraph \<Rightarrow> 'b fgraph \<Rightarrow> bool" where
"pr_iso_test0 m [] Fs\<^sub>2 = (Fs\<^sub>2 = [])"
| "pr_iso_test0 m (F\<^sub>1#Fs\<^sub>1) Fs\<^sub>2 =
(\<exists>F\<^sub>2 \<in> set Fs\<^sub>2. length F\<^sub>1 = length F\<^sub>2 \<and>
(\<exists>n. let m' = map_of(zip F\<^sub>1 (rotate n F\<^sub>2)) in
if m \<subseteq>\<^sub>m m ++ m' \<and> inj_on (m++m') (dom(m++m'))
then pr_iso_test0 (m ++ m') Fs\<^sub>1 (remove1 F\<^sub>2 Fs\<^sub>2) else False))"
lemma map_compatI: "\<lbrakk> f \<subseteq>\<^sub>m Some \<circ> h; g \<subseteq>\<^sub>m Some \<circ> h \<rbrakk> \<Longrightarrow> f \<subseteq>\<^sub>m f++g"
by (fastforce simp add: map_le_def map_add_def dom_def split:option.splits)
lemma inj_on_map_addI1:
"\<lbrakk> inj_on m A; m \<subseteq>\<^sub>m m++m'; A \<subseteq> dom m \<rbrakk> \<Longrightarrow> inj_on (m++m') A"
apply (clarsimp simp add: inj_on_def map_add_def map_le_def dom_def
split:option.splits)
apply(rule conjI)
apply fastforce
apply auto
apply fastforce
apply (rename_tac x a y)
apply(subgoal_tac "m x = Some a")
prefer 2 apply (fastforce)
apply(subgoal_tac "m y = Some a")
prefer 2 apply (fastforce)
apply(subgoal_tac "m x = m y")
prefer 2 apply simp
apply (blast)
done
lemma map_image_eq: "\<lbrakk> A \<subseteq> dom m; m \<subseteq>\<^sub>m m' \<rbrakk> \<Longrightarrow> m ` A = m' ` A"
by(force simp:map_le_def dom_def split:option.splits)
lemma inj_on_map_add_Un:
"\<lbrakk> inj_on m (dom m); inj_on m' (dom m'); m \<subseteq>\<^sub>m Some \<circ> f; m' \<subseteq>\<^sub>m Some \<circ> f;
inj_on f (dom m' \<union> dom m); A = dom m'; B = dom m \<rbrakk>
\<Longrightarrow> inj_on (m ++ m') (A \<union> B)"
apply(simp add:inj_on_Un)
apply(rule conjI)
apply(fastforce intro!: inj_on_map_addI1 map_compatI)
apply(clarify)
apply(subgoal_tac "m ++ m' \<subseteq>\<^sub>m Some \<circ> f")
prefer 2 apply(fast intro:map_add_le_mapI map_compatI)
apply(subgoal_tac "dom m' - dom m \<subseteq> dom(m++m')")
prefer 2 apply(fastforce)
apply(insert map_image_eq[of "dom m' - dom m" "m++m'" "Some \<circ> f"])
apply(subgoal_tac "dom m - dom m' \<subseteq> dom(m++m')")
prefer 2 apply(fastforce)
apply(insert map_image_eq[of "dom m - dom m'" "m++m'" "Some \<circ> f"])
apply (clarsimp simp add: image_comp [symmetric])
apply blast
done
lemma map_of_zip_eq_SomeD: "length xs = length ys \<Longrightarrow>
map_of (zip xs ys) x = Some y \<Longrightarrow> y \<in> set ys"
apply(induct rule:list_induct2)
apply simp
apply (auto split:if_splits)
done
lemma inj_on_map_of_zip:
"\<lbrakk> length xs = length ys; distinct ys \<rbrakk>
\<Longrightarrow> inj_on (map_of (zip xs ys)) (set xs)"
apply(induct rule:list_induct2)
apply simp
apply clarsimp
apply(rule conjI)
apply(erule inj_on_fun_updI)
apply(simp add:image_def)
apply clarsimp
apply(drule (1) map_of_zip_eq_SomeD[OF _ sym])
apply fast
apply(clarsimp simp add:image_def)
apply(drule (1) map_of_zip_eq_SomeD[OF _ sym])
apply fast
done
lemma pr_iso_test0_correct: "\<And>m Fs\<^sub>2.
\<lbrakk> \<forall>F\<in>set Fs\<^sub>1. distinct F; \<forall>F\<in>set Fs\<^sub>2. distinct F;
distinct Fs\<^sub>1; inj_on (\<lambda>xs.{xs}//{\<cong>}) (set Fs\<^sub>1);
distinct Fs\<^sub>2; inj_on (\<lambda>xs.{xs}//{\<cong>}) (set Fs\<^sub>2); inj_on m (dom m) \<rbrakk> \<Longrightarrow>
pr_iso_test0 m Fs\<^sub>1 Fs\<^sub>2 =
(\<exists>\<phi>. is_pr_iso \<phi> Fs\<^sub>1 Fs\<^sub>2 \<and> m \<subseteq>\<^sub>m Some \<circ> \<phi> \<and>
inj_on \<phi> (dom m \<union> (\<Union>F\<in>set Fs\<^sub>1. set F)))"
apply(induct Fs\<^sub>1)
apply(simp add:inj_on_def dom_def)
apply(rule iffI)
apply (simp add:is_pr_iso_def is_pr_Iso_def is_pr_Hom_def)
apply(rule_tac x = "the \<circ> m" in exI)
apply (fastforce simp: map_le_def)
apply (clarsimp simp:is_pr_iso_def is_pr_Iso_def is_pr_Hom_def)
apply(rename_tac F\<^sub>1 Fs\<^sub>1' m Fs\<^sub>2)
apply(clarsimp simp:Let_def Ball_def)
apply(simp add: is_iso_Cons)
apply(rule iffI)
apply clarify
apply(clarsimp simp add:map_of_zip_submap inj_on_diff)
apply(rule_tac x = \<phi> in exI)
apply(rule conjI)
apply(rule_tac x = F\<^sub>2 in bexI)
prefer 2 apply assumption
apply(frule map_add_le_mapE)
apply(simp add:map_of_zip_submap is_pr_iso_def is_pr_Iso_def)
apply(rule conjI)
apply blast
apply(erule subset_inj_on)
apply blast
apply(rule conjI)
apply(blast intro: map_le_trans)
apply(erule subset_inj_on)
apply blast
apply(clarsimp simp: inj_on_diff)
apply(rule_tac x = F\<^sub>2 in bexI)
prefer 2 apply assumption
apply simp
apply(rule_tac x = n in exI)
apply(rule conjI)
apply clarsimp
apply(rule_tac x = \<phi> in exI)
apply simp
apply(rule conjI)
apply(fastforce intro!:map_add_le_mapI simp:map_of_zip_submap)
apply(simp add:Un_ac)
apply(rule context_conjI)
apply(simp add:map_of_zip_submap[symmetric])
apply(erule (1) map_compatI)
apply(simp add:map_of_zip_submap[symmetric])
apply(erule inj_on_map_add_Un)
apply(simp add:inj_on_map_of_zip)
apply assumption
apply assumption
apply simp
apply(erule subset_inj_on)
apply fast
apply simp
apply(rule refl)
done
corollary pr_iso_test0_corr:
"\<lbrakk> \<forall>F\<in>set Fs\<^sub>1. distinct F; \<forall>F\<in>set Fs\<^sub>2. distinct F;
distinct Fs\<^sub>1; inj_on (\<lambda>xs.{xs}//{\<cong>}) (set Fs\<^sub>1);
distinct Fs\<^sub>2; inj_on (\<lambda>xs.{xs}//{\<cong>}) (set Fs\<^sub>2) \<rbrakk> \<Longrightarrow>
pr_iso_test0 Map.empty Fs\<^sub>1 Fs\<^sub>2 = (\<exists>\<phi>. is_pr_iso \<phi> Fs\<^sub>1 Fs\<^sub>2)"
apply(subst pr_iso_test0_correct)
apply assumption+
apply simp
apply (simp add:is_pr_iso_def is_pr_Iso_def)
done
text\<open>Now we bound the number of rotations needed. We have to exclude
the empty face @{term"[]"} to be able to restrict the search to
@{prop"n < length xs"} (which would otherwise be vacuous).\<close>
primrec pr_iso_test1 :: "('a \<rightharpoonup> 'b) \<Rightarrow> 'a fgraph \<Rightarrow> 'b fgraph \<Rightarrow> bool" where
"pr_iso_test1 m [] Fs\<^sub>2 = (Fs\<^sub>2 = [])"
| "pr_iso_test1 m (F\<^sub>1#Fs\<^sub>1) Fs\<^sub>2 =
(\<exists>F\<^sub>2 \<in> set Fs\<^sub>2. length F\<^sub>1 = length F\<^sub>2 \<and>
(\<exists>n < length F\<^sub>2. let m' = map_of(zip F\<^sub>1 (rotate n F\<^sub>2)) in
if m \<subseteq>\<^sub>m m ++ m' \<and> inj_on (m++m') (dom(m++m'))
then pr_iso_test1 (m ++ m') Fs\<^sub>1 (remove1 F\<^sub>2 Fs\<^sub>2) else False))"
lemma test0_conv_test1:
"\<And>m Fs\<^sub>2. [] \<notin> set Fs\<^sub>2 \<Longrightarrow> pr_iso_test1 m Fs\<^sub>1 Fs\<^sub>2 = pr_iso_test0 m Fs\<^sub>1 Fs\<^sub>2"
apply(induct Fs\<^sub>1)
apply simp
apply simp
apply(rule iffI)
apply blast
apply (clarsimp simp:Let_def)
apply(rule_tac x = F\<^sub>2 in bexI)
prefer 2 apply assumption
apply simp
apply(subgoal_tac "F\<^sub>2 \<noteq> []")
prefer 2 apply blast
apply(rule_tac x = "n mod length F\<^sub>2" in exI)
apply(simp add:rotate_conv_mod[symmetric])
done
text\<open>Thus correctness carries over to \<open>pr_iso_test1\<close>:\<close>
corollary pr_iso_test1_corr:
"\<lbrakk> \<forall>F\<in>set Fs\<^sub>1. distinct F; \<forall>F\<in>set Fs\<^sub>2. distinct F; [] \<notin> set Fs\<^sub>2;
distinct Fs\<^sub>1; inj_on (\<lambda>xs.{xs}//{\<cong>}) (set Fs\<^sub>1);
distinct Fs\<^sub>2; inj_on (\<lambda>xs.{xs}//{\<cong>}) (set Fs\<^sub>2) \<rbrakk> \<Longrightarrow>
pr_iso_test1 Map.empty Fs\<^sub>1 Fs\<^sub>2 = (\<exists>\<phi>. is_pr_iso \<phi> Fs\<^sub>1 Fs\<^sub>2)"
by(simp add: test0_conv_test1 pr_iso_test0_corr)
subsubsection\<open>Implementing maps by lists\<close>
text\<open>The representation are lists of pairs with no repetition in the
first or second component.\<close>
definition oneone :: "('a * 'b)list \<Rightarrow> bool" where
"oneone xys \<equiv> distinct(map fst xys) \<and> distinct(map snd xys)"
declare oneone_def[simp]
type_synonym
('a,'b)tester = "('a * 'b)list \<Rightarrow> ('a * 'b)list \<Rightarrow> bool"
type_synonym
('a,'b)merger = "('a * 'b)list \<Rightarrow> ('a * 'b)list \<Rightarrow> ('a * 'b)list"
primrec pr_iso_test2 :: "('a,'b)tester \<Rightarrow> ('a,'b)merger \<Rightarrow>
('a * 'b)list \<Rightarrow> 'a fgraph \<Rightarrow> 'b fgraph \<Rightarrow> bool" where
"pr_iso_test2 tst mrg I [] Fs\<^sub>2 = (Fs\<^sub>2 = [])"
| "pr_iso_test2 tst mrg I (F\<^sub>1#Fs\<^sub>1) Fs\<^sub>2 =
(\<exists>F\<^sub>2 \<in> set Fs\<^sub>2. length F\<^sub>1 = length F\<^sub>2 \<and>
(\<exists>n < length F\<^sub>2. let I' = zip F\<^sub>1 (rotate n F\<^sub>2) in
if tst I' I
then pr_iso_test2 tst mrg (mrg I' I) Fs\<^sub>1 (remove1 F\<^sub>2 Fs\<^sub>2) else False))"
lemma notin_range_map_of:
"y \<notin> snd ` set xys \<Longrightarrow> Some y \<notin> range(map_of xys)"
apply(induct xys)
apply (simp add:image_def)
apply(clarsimp split:if_splits)
done
lemma inj_on_map_upd:
"\<lbrakk> inj_on m (dom m); Some y \<notin> range m \<rbrakk> \<Longrightarrow> inj_on (m(x\<mapsto>y)) (dom m)"
apply(simp add:inj_on_def dom_def image_def)
apply (blast intro:sym)
done
lemma lem: "Ball (set xs) P \<Longrightarrow> Ball (set (remove1 x xs)) P = True"
by(induct xs) simp_all
lemma pr_iso_test2_conv_1:
"\<And>I Fs\<^sub>2.
\<lbrakk> \<forall>I I'. oneone I \<longrightarrow> oneone I' \<longrightarrow>
tst I' I = (let m = map_of I; m' = map_of I'
in m \<subseteq>\<^sub>m m ++ m' \<and> inj_on (m++m') (dom(m++m')));
\<forall>I I'. oneone I \<longrightarrow> oneone I' \<longrightarrow> tst I' I
\<longrightarrow> map_of(mrg I' I) = map_of I ++ map_of I';
\<forall>I I'. oneone I \<and> oneone I' \<longrightarrow> tst I' I \<longrightarrow> oneone (mrg I' I);
oneone I;
\<forall>F \<in> set Fs\<^sub>1. distinct F; \<forall>F \<in> set Fs\<^sub>2. distinct F \<rbrakk> \<Longrightarrow>
pr_iso_test2 tst mrg I Fs\<^sub>1 Fs\<^sub>2 = pr_iso_test1 (map_of I) Fs\<^sub>1 Fs\<^sub>2"
apply(induct Fs\<^sub>1)
apply simp
apply(simp add:Let_def lem inj_on_map_of_zip del: mod_less cong: conj_cong)
done
text\<open>A simple implementation\<close>
definition compat :: "('a,'b)tester" where
"compat I I' ==
\<forall>(x,y) \<in> set I. \<forall>(x',y') \<in> set I'. (x = x') = (y = y')"
lemma image_map_upd:
"x \<notin> dom m \<Longrightarrow> m(x\<mapsto>y) ` A = m ` (A-{x}) \<union> (if x \<in> A then {Some y} else {})"
by(auto simp:image_def dom_def)
lemma image_map_of_conv_Image:
"\<And>A. \<lbrakk> distinct(map fst xys) \<rbrakk>
\<Longrightarrow> map_of xys ` A = Some ` (set xys `` A) \<union> (if A \<subseteq> fst ` set xys then {} else {None})"
supply image_cong_simp [cong del]
apply (induct xys)
apply (simp add:image_def Image_def Collect_conv_if)
apply (simp add:image_map_upd dom_map_of_conv_image_fst)
apply(erule thin_rl)
apply (clarsimp simp:image_def Image_def)
apply((rule conjI, clarify)+, fastforce)
apply fastforce
apply(clarify)
apply((rule conjI, clarify)+, fastforce)
apply fastforce
apply fastforce
apply fastforce
done
lemma [simp]: "m++m' ` (dom m' - A) = m' ` (dom m' - A)"
apply(clarsimp simp add:map_add_def image_def dom_def inj_on_def split:option.splits)
apply auto
apply (blast intro:sym)
apply (blast intro:sym)
apply (rule_tac x = xa in bexI)
prefer 2 apply blast
apply simp
done
declare Diff_subset [iff]
lemma compat_correct:
"\<lbrakk> oneone I; oneone I' \<rbrakk> \<Longrightarrow>
compat I' I = (let m = map_of I; m' = map_of I'
in m \<subseteq>\<^sub>m m ++ m' \<and> inj_on (m++m') (dom(m++m')))"
apply(simp add: compat_def Let_def map_le_iff_map_add_commute)
apply(rule iffI)
apply(rule context_conjI)
apply(rule ext)
apply (fastforce simp add:map_add_def split:option.split)
apply(simp add:inj_on_Un)
apply(drule sym)
apply simp
apply(simp add: dom_map_of_conv_image_fst image_map_of_conv_Image)
apply(simp add: image_def Image_def)
apply fastforce
apply clarsimp
apply(rename_tac a b aa ba)
apply(rule iffI)
apply (clarsimp simp: fun_eq_iff)
apply(erule_tac x = aa in allE)
apply (simp add:map_add_def)
apply (clarsimp simp:dom_map_of_conv_image_fst)
apply(simp (no_asm_use) add:inj_on_def)
apply(drule_tac x = a in bspec)
apply force
apply(drule_tac x = aa in bspec)
apply force
apply(erule mp)
apply simp
apply(drule sym)
apply simp
done
corollary compat_corr:
"\<forall>I I'. oneone I \<longrightarrow> oneone I' \<longrightarrow>
compat I' I = (let m = map_of I; m' = map_of I'
in m \<subseteq>\<^sub>m m ++ m' \<and> inj_on (m++m') (dom(m++m')))"
by(simp add: compat_correct)
definition merge0 :: "('a,'b)merger" where
"merge0 I' I \<equiv> [xy \<leftarrow> I'. fst xy \<notin> fst ` set I] @ I"
lemma help1:
"distinct(map fst xys) \<Longrightarrow> map_of (filter P xys) =
map_of xys |` {x. \<exists>y. (x,y) \<in> set xys \<and> P(x,y)}"
apply(induct xys)
apply simp
apply(rule ext)
apply (simp add:restrict_map_def)
apply force
done
lemma merge0_correct:
"\<forall>I I'. oneone I \<longrightarrow> oneone I' \<longrightarrow> compat I' I
\<longrightarrow> map_of(merge0 I' I) = map_of I ++ map_of I'"
apply(simp add:compat_def merge0_def help1 fun_eq_iff map_add_def restrict_map_def split:option.split)
apply fastforce
done
lemma merge0_inv:
"\<forall>I I'. oneone I \<and> oneone I' \<longrightarrow> compat I' I \<longrightarrow> oneone (merge0 I' I)"
apply(auto simp add:merge0_def distinct_map compat_def split_def)
apply(blast intro:subset_inj_on)+
done
corollary pr_iso_test2_corr:
"\<lbrakk> \<forall>F\<in>set Fs\<^sub>1. distinct F; \<forall>F\<in>set Fs\<^sub>2. distinct F; [] \<notin> set Fs\<^sub>2;
distinct Fs\<^sub>1; inj_on (\<lambda>xs.{xs}//{\<cong>}) (set Fs\<^sub>1);
distinct Fs\<^sub>2; inj_on (\<lambda>xs.{xs}//{\<cong>}) (set Fs\<^sub>2) \<rbrakk> \<Longrightarrow>
pr_iso_test2 compat merge0 [] Fs\<^sub>1 Fs\<^sub>2 = (\<exists>\<phi>. is_pr_iso \<phi> Fs\<^sub>1 Fs\<^sub>2)"
by(simp add: pr_iso_test2_conv_1[OF compat_corr merge0_correct merge0_inv]
pr_iso_test1_corr)
text\<open>Implementing merge as a recursive function:\<close>
primrec merge :: "('a,'b)merger" where
"merge [] I = I"
| "merge (xy#xys) I = (let (x,y) = xy in
if \<forall> (x',y') \<in> set I. x \<noteq> x' then xy # merge xys I else merge xys I)"
lemma merge_conv_merge0: "merge I' I = merge0 I' I"
apply(induct I')
apply(simp add:merge0_def)
apply(force simp add:Let_def list_all_iff merge0_def)
done
primrec pr_iso_test_rec :: "('a * 'b)list \<Rightarrow> 'a fgraph \<Rightarrow> 'b fgraph \<Rightarrow> bool" where
"pr_iso_test_rec I [] Fs\<^sub>2 = (Fs\<^sub>2 = [])"
| "pr_iso_test_rec I (F\<^sub>1#Fs\<^sub>1) Fs\<^sub>2 =
(\<exists> F\<^sub>2 \<in> set Fs\<^sub>2. length F\<^sub>1 = length F\<^sub>2 \<and>
(\<exists>n < length F\<^sub>2. let I' = zip F\<^sub>1 (rotate n F\<^sub>2) in
compat I' I \<and> pr_iso_test_rec (merge I' I) Fs\<^sub>1 (remove1 F\<^sub>2 Fs\<^sub>2)))"
lemma pr_iso_test_rec_conv_2:
"\<And>I Fs\<^sub>2. pr_iso_test_rec I Fs\<^sub>1 Fs\<^sub>2 = pr_iso_test2 compat merge0 I Fs\<^sub>1 Fs\<^sub>2"
apply(induct Fs\<^sub>1)
apply simp
apply(auto simp: merge_conv_merge0 list_ex_iff Bex_def Let_def)
done
corollary pr_iso_test_rec_corr:
"\<lbrakk> \<forall>F\<in>set Fs\<^sub>1. distinct F; \<forall>F\<in>set Fs\<^sub>2. distinct F; [] \<notin> set Fs\<^sub>2;
distinct Fs\<^sub>1; inj_on (\<lambda>xs.{xs}//{\<cong>}) (set Fs\<^sub>1);
distinct Fs\<^sub>2; inj_on (\<lambda>xs.{xs}//{\<cong>}) (set Fs\<^sub>2) \<rbrakk> \<Longrightarrow>
pr_iso_test_rec [] Fs\<^sub>1 Fs\<^sub>2 = (\<exists>\<phi>. is_pr_iso \<phi> Fs\<^sub>1 Fs\<^sub>2)"
by(simp add: pr_iso_test_rec_conv_2 pr_iso_test2_corr)
definition pr_iso_test :: "'a fgraph \<Rightarrow> 'b fgraph \<Rightarrow> bool" where
"pr_iso_test Fs\<^sub>1 Fs\<^sub>2 = pr_iso_test_rec [] Fs\<^sub>1 Fs\<^sub>2"
corollary pr_iso_test_correct:
"\<lbrakk> \<forall>F\<in>set Fs\<^sub>1. distinct F; \<forall>F\<in>set Fs\<^sub>2. distinct F; [] \<notin> set Fs\<^sub>2;
distinct Fs\<^sub>1; inj_on (\<lambda>xs.{xs}//{\<cong>}) (set Fs\<^sub>1);
distinct Fs\<^sub>2; inj_on (\<lambda>xs.{xs}//{\<cong>}) (set Fs\<^sub>2) \<rbrakk> \<Longrightarrow>
pr_iso_test Fs\<^sub>1 Fs\<^sub>2 = (\<exists>\<phi>. is_pr_iso \<phi> Fs\<^sub>1 Fs\<^sub>2)"
apply(simp add:pr_iso_test_def pr_iso_test_rec_corr)
done
subsubsection\<open>`Improper' Isomorphisms\<close>
definition is_Iso :: "('a \<Rightarrow> 'b) \<Rightarrow> 'a Fgraph \<Rightarrow> 'b Fgraph \<Rightarrow> bool" where
"is_Iso \<phi> Fs\<^sub>1 Fs\<^sub>2 \<equiv> is_pr_Iso \<phi> Fs\<^sub>1 Fs\<^sub>2 \<or> is_pr_Iso \<phi> Fs\<^sub>1 (rev ` Fs\<^sub>2)"
definition is_iso :: "('a \<Rightarrow> 'b) \<Rightarrow> 'a fgraph \<Rightarrow> 'b fgraph \<Rightarrow> bool" where
"is_iso \<phi> Fs\<^sub>1 Fs\<^sub>2 \<equiv> is_Iso \<phi> (set Fs\<^sub>1) (set Fs\<^sub>2)"
definition iso_fgraph :: "'a fgraph \<Rightarrow> 'a fgraph \<Rightarrow> bool" (infix "\<simeq>" 60) where
"g\<^sub>1 \<simeq> g\<^sub>2 \<equiv> \<exists>\<phi>. is_iso \<phi> g\<^sub>1 g\<^sub>2"
lemma iso_fgraph_trans: assumes "f \<simeq> (g::'a fgraph)" and "g \<simeq> h" shows "f \<simeq> h"
proof-
{ fix \<phi> \<phi>' assume "is_pr_Hom \<phi> (set f) (set g)" "inj_on \<phi> (\<Union>F\<in>set f. set F)"
"is_pr_Hom \<phi>' (set g) (set h)" "inj_on \<phi>' (\<Union>F\<in>set g. set F)"
hence "is_pr_Hom (\<phi>' \<circ> \<phi>) (set f) (set h) \<and>
inj_on (\<phi>' \<circ> \<phi>) (\<Union>F\<in>set f. set F)"
by(simp add: is_pr_Hom_trans comp_inj_on pr_Hom_pres_nodes)
} moreover
{ fix \<phi> \<phi>' assume "is_pr_Hom \<phi> (set f) (set g)" "inj_on \<phi> (\<Union>F\<in>set f. set F)"
"is_pr_Hom \<phi>' (set g) (rev ` set h)" "inj_on \<phi>' (\<Union>F\<in>set g. set F)"
hence "is_pr_Hom (\<phi>' \<circ> \<phi>) (set f) (rev ` set h) \<and>
inj_on (\<phi>' \<circ> \<phi>) (\<Union>F\<in>set f. set F)"
by(simp add: is_pr_Hom_trans comp_inj_on pr_Hom_pres_nodes)
} moreover
{ fix \<phi> \<phi>' assume "is_pr_Hom \<phi> (set f) (rev ` set g)" "inj_on \<phi> (\<Union>F\<in>set f. set F)"
"is_pr_Hom \<phi>' (set g) (set h)" "inj_on \<phi>' (\<Union>F\<in>set g. set F)"
with this(3)[THEN is_pr_Hom_rev]
have "is_pr_Hom (\<phi>' \<circ> \<phi>) (set f) (rev ` set h) \<and>
inj_on (\<phi>' \<circ> \<phi>) (\<Union>F\<in>set f. set F)"
by(simp add: is_pr_Hom_trans comp_inj_on pr_Hom_pres_nodes)
} moreover
{ fix \<phi> \<phi>' assume "is_pr_Hom \<phi> (set f) (rev ` set g)" "inj_on \<phi> (\<Union>F\<in>set f. set F)"
"is_pr_Hom \<phi>' (set g) (rev ` set h)" "inj_on \<phi>' (\<Union>F\<in>set g. set F)"
with this(3)[THEN is_pr_Hom_rev]
have "is_pr_Hom (\<phi>' \<circ> \<phi>) (set f) (set h) \<and>
inj_on (\<phi>' \<circ> \<phi>) (\<Union>F\<in>set f. set F)"
by(simp add: is_pr_Hom_trans comp_inj_on pr_Hom_pres_nodes)
} ultimately show ?thesis using assms
by(simp add: iso_fgraph_def is_iso_def is_Iso_def is_pr_Iso_def) blast
qed
definition iso_test :: "'a fgraph \<Rightarrow> 'b fgraph \<Rightarrow> bool" where
"iso_test g\<^sub>1 g\<^sub>2 \<longleftrightarrow> pr_iso_test g\<^sub>1 g\<^sub>2 \<or> pr_iso_test g\<^sub>1 (map rev g\<^sub>2)"
theorem iso_correct:
"\<lbrakk> \<forall>F\<in>set Fs\<^sub>1. distinct F; \<forall>F\<in>set Fs\<^sub>2. distinct F; [] \<notin> set Fs\<^sub>2;
distinct Fs\<^sub>1; inj_on (\<lambda>xs.{xs}//{\<cong>}) (set Fs\<^sub>1);
distinct Fs\<^sub>2; inj_on (\<lambda>xs.{xs}//{\<cong>}) (set Fs\<^sub>2) \<rbrakk> \<Longrightarrow>
iso_test Fs\<^sub>1 Fs\<^sub>2 = (Fs\<^sub>1 \<simeq> Fs\<^sub>2)"
apply(simp add:iso_test_def pr_iso_test_correct iso_fgraph_def)
apply(subst pr_iso_test_correct)
apply simp
apply simp
apply (simp add:image_def)
apply simp
apply simp
apply (simp add:distinct_map)
apply (simp add:inj_on_image_iff)
apply(simp add:is_iso_def is_Iso_def is_pr_iso_def)
apply blast
done
lemma iso_fgraph_refl[iff]: "g \<simeq> g"
apply(simp add: iso_fgraph_def)
apply(rule_tac x = "id" in exI)
apply(simp add: is_iso_def is_Iso_def is_pr_Iso_def is_pr_Hom_def id_def)
done
subsection\<open>Elementhood and containment modulo\<close>
interpretation qle_gr: quasi_order "(\<simeq>)"
proof qed (auto intro:iso_fgraph_trans)
abbreviation qle_gr_in :: "'a fgraph \<Rightarrow> 'a fgraph set \<Rightarrow> bool" (infix "\<in>\<^sub>\<simeq>" 60)
where "x \<in>\<^sub>\<simeq> M \<equiv> qle_gr.in_qle x M"
abbreviation qle_gr_sub :: "'a fgraph set \<Rightarrow> 'a fgraph set \<Rightarrow> bool" (infix "\<subseteq>\<^sub>\<simeq>" 60)
where "x \<subseteq>\<^sub>\<simeq> M \<equiv> qle_gr.subseteq_qle x M"
abbreviation qle_gr_eq :: "'a fgraph set \<Rightarrow> 'a fgraph set \<Rightarrow> bool" (infix "=\<^sub>\<simeq>" 60)
where "x =\<^sub>\<simeq> M \<equiv> qle_gr.seteq_qle x M"
end
|
Formal statement is: lemma continuous_mult_left: fixes c::"'a::real_normed_algebra" shows "continuous F f \<Longrightarrow> continuous F (\<lambda>x. c * f x)" Informal statement is: If $f$ is a continuous function, then so is the function $x \mapsto c f(x)$, where $c$ is a constant. |
/- TACTICS -/
namespace TBA
-- definitions from last week
-- NOTE: We renamed it from `Nat'` for consistency. The new namespace makes sure we don't use the
-- standard library `Nat`.
inductive Nat : Type where
| zero : Nat
| succ (n : Nat) : Nat
open Nat
def add (m n : Nat) : Nat :=
match n with
| zero => m
| succ n => succ (add m n)
-- With this command we add a notation for `add`. From now on we will be able to write `m + n` for
-- `add m n`. The 65 denotes how strongly the operator should bind to what's adjacent to it.
-- The `priority` means that Lean will prefer it over the built-in `+`.
infix:65 (priority := high) " + " => add
def mul (m n : Nat) : Nat :=
match n with
| zero => zero
| succ n => (mul m n) + m
-- We also want a notation for `mul`, with a higher binding strength than addition so that
-- `a + b * c` means `a + (b * c`)`.
infix:70 (priority := high) " * " => mul
inductive LE : Nat → Nat → Prop where
| refl (n : Nat) : LE n n
| succ : LE m n → LE m (succ n)
-- lower binding strength than either addition or multiplication
infix:50 (priority := high) " ≤ " => LE
-- Let's start by reproving some theorems from last week, but this time with tactics!
-- useful tactics:
-- * `induction ... with ...`
-- * `rw [f]` to unfold applications of a function `f`
-- * `rw [h]` to rewrite every `a` to `b` if `h : a = b`
-- * `apply/exact`
-- * `simp/simp_all`... are powerful and basically always useful, though make sure that you could also
-- do the proof without them
theorem zero_add : zero + n = n := by
induction n with
| zero =>
rfl
| succ n ih =>
rw [add, ih]
-- or simply
--induction n <;> simp_all [add]
theorem le_add : m ≤ m + n := by
induction n with
| zero =>
apply LE.refl
-- same as
--exact LE.refl _
| succ n ih =>
rw [add] -- optional, but helpful
apply LE.succ
apply ih
-- same as
--exact LE.succ ih
-- alternatively, simply use
--simp [add, LE.succ, ih]
-- ...but using a conditional simp theorem like `LE.succ` is usually not obvious
-- when first writing a proof
-- or simply
--induction n <;> simp_all [LE.refl, LE.succ, add]
-- Alright, let's start automating more!
attribute [simp] add mul
-- These definitions will now automatically be unfolded when you use `simp/simp_all`
theorem succ_add : (succ n) + m = succ (n + m) := by
induction m <;> simp_all [zero_add]
-- This one is a bit more tricky, you might need to prove a helper lemma!
theorem add_comm : n + m = m + n := by
induction n with
| zero => exact zero_add
| succ n ih => simp [succ_add, ih]
-- Associativity can be proven in a similar way.
theorem add_assoc : (m + n) + k = m + (n + k) := by
induction k with
| zero => rfl
| succ k ih => simp [succ_add, ih]
def one := succ zero
theorem mul_one : m * one = m := by
simp [zero_add]
-- To prove associativity of multiplication, you might have to come up with
-- some more lemmas about multiplication first. Some are similar to the above laws of
-- addition, some use both addition and multiplication ("distributivity" is the keyword).
theorem left_distrib : m * (n + k) = (m * n) + (m * k) := by
induction k with
| zero => simp
| succ k ih =>
rw [add, mul, mul, ih, add_assoc]
-- NOTE: We *could* also use `add_assoc` as a simp theorem here.
-- ```
-- | succ n ih => simp [add_succ, ih, add_assoc]
-- ```
-- This even works with `add_comm` since `simp` is clever enough to avoid
-- permutating applicable terms endlessly! However, such general simp theorems
-- are best avoided for performance reasons and unintended interactions with
-- other simp theorems.
theorem mul_assoc : (m * n) * k = m * (n * k) := by
induction k with
| zero => rfl
| succ k ih => simp [ih, left_distrib]
-- Remember the structures for semigroups and monoids which we defined last week?
structure Semigroup (α : Type) where
mul : α → α → α
assoc : mul (mul a b) c = mul a (mul b c)
structure Monoid (α : Type) extends Semigroup α where
e : α
e_mul : mul e a = a
mul_e : mul a e = a
theorem one_mul : one * m = m := by
induction m with
| zero => rfl
| succ m ih => simp [ih]
-- You should now be able to instantiate two of them, including proofs!
def Nat_add_Monoid : Monoid Nat := {
mul := add
assoc := add_assoc
e := zero
e_mul := zero_add
mul_e := rfl
}
def Nat_mul_Monoid : Monoid Nat := {
mul := mul
assoc := mul_assoc
e := one
e_mul := one_mul
mul_e := mul_one
}
end TBA
|
module INIBuilder
using REPL
using REPL.TerminalMenus
using ..Circuitscape
include("filepicker.jl")
include("run.jl")
export start
end
|
(*
* Copyright Florian Haftmann
*
* SPDX-License-Identifier: BSD-2-Clause
*)
section \<open>Ancient comprehensive Word Library\<close>
theory Word_Lib_Sumo
imports
"HOL-Library.Word"
Aligned
Bit_Comprehension
Bit_Comprehension_Int
Bit_Shifts_Infix_Syntax
Bits_Int
Bitwise_Signed
Bitwise
Enumeration_Word
Generic_set_bit
Hex_Words
Least_significant_bit
More_Arithmetic
More_Divides
More_Sublist
Even_More_List
More_Misc
Strict_part_mono
Legacy_Aliases
Most_significant_bit
Next_and_Prev
Norm_Words
Reversed_Bit_Lists
Rsplit
Signed_Words
Syntax_Bundles
Typedef_Morphisms
Type_Syntax
Word_EqI
Word_Lemmas
Word_8
Word_16
Word_32
Word_Syntax
Signed_Division_Word
Singleton_Bit_Shifts
More_Word_Operations
Many_More
Word_Lemmas_Internal
Word_Lemmas_Prefix
begin
unbundle bit_operations_syntax
unbundle bit_projection_infix_syntax
declare word_induct2[induct type]
declare word_nat_cases[cases type]
declare signed_take_bit_Suc [simp]
(* these generate take_bit terms, which we often don't want for concrete lengths *)
lemmas of_int_and_nat = unsigned_of_nat unsigned_of_int signed_of_int signed_of_nat
bundle no_take_bit
begin
declare of_int_and_nat[simp del]
end
lemmas bshiftr1_def = bshiftr1_eq
lemmas is_down_def = is_down_eq
lemmas is_up_def = is_up_eq
lemmas mask_def = mask_eq
lemmas scast_def = scast_eq
lemmas shiftl1_def = shiftl1_eq
lemmas shiftr1_def = shiftr1_eq
lemmas sshiftr1_def = sshiftr1_eq
lemmas sshiftr_def = sshiftr_eq_funpow_sshiftr1
lemmas to_bl_def = to_bl_eq
lemmas ucast_def = ucast_eq
lemmas unat_def = unat_eq_nat_uint
lemmas word_cat_def = word_cat_eq
lemmas word_reverse_def = word_reverse_eq_of_bl_rev_to_bl
lemmas word_roti_def = word_roti_eq_word_rotr_word_rotl
lemmas word_rotl_def = word_rotl_eq
lemmas word_rotr_def = word_rotr_eq
lemmas word_sle_def = word_sle_eq
lemmas word_sless_def = word_sless_eq
lemmas uint_0 = uint_nonnegative
lemmas uint_lt = uint_bounded
lemmas uint_mod_same = uint_idem
lemmas of_nth_def = word_set_bits_def
lemmas of_nat_word_eq_iff = word_of_nat_eq_iff
lemmas of_nat_word_eq_0_iff = word_of_nat_eq_0_iff
lemmas of_int_word_eq_iff = word_of_int_eq_iff
lemmas of_int_word_eq_0_iff = word_of_int_eq_0_iff
lemmas word_next_def = word_next_unfold
lemmas word_prev_def = word_prev_unfold
lemmas is_aligned_def = is_aligned_iff_dvd_nat
lemmas word_and_max_simps =
word8_and_max_simp
word16_and_max_simp
word32_and_max_simp
lemma distinct_lemma: "f x \<noteq> f y \<Longrightarrow> x \<noteq> y" by auto
lemmas and_bang = word_and_nth
lemmas sdiv_int_def = signed_divide_int_def
lemmas smod_int_def = signed_modulo_int_def
(* shortcut for some specific lengths *)
lemma word_fixed_sint_1[simp]:
"sint (1::8 word) = 1"
"sint (1::16 word) = 1"
"sint (1::32 word) = 1"
"sint (1::64 word) = 1"
by (auto simp: sint_word_ariths)
declare of_nat_diff [simp]
(* Haskellish names/syntax *)
notation (input)
bit ("testBit")
lemmas cast_simps = cast_simps ucast_down_bl
(* shadows the slightly weaker Word.nth_ucast *)
lemma nth_ucast:
"(ucast (w::'a::len word)::'b::len word) !! n =
(w !! n \<and> n < min LENGTH('a) LENGTH('b))"
by (auto simp add: bit_simps not_le dest: bit_imp_le_length)
end
|
Formal statement is: lemma higher_deriv_diff: fixes z::complex assumes "f holomorphic_on S" "g holomorphic_on S" "open S" "z \<in> S" shows "(deriv ^^ n) (\<lambda>w. f w - g w) z = (deriv ^^ n) f z - (deriv ^^ n) g z" Informal statement is: If $f$ and $g$ are holomorphic functions on an open set $S$, then the $n$th derivative of $f - g$ is equal to the $n$th derivative of $f$ minus the $n$th derivative of $g$. |
Formal statement is: lemma higher_deriv_transform_within_open: fixes z::complex assumes "f holomorphic_on S" "g holomorphic_on S" "open S" and z: "z \<in> S" and fg: "\<And>w. w \<in> S \<Longrightarrow> f w = g w" shows "(deriv ^^ i) f z = (deriv ^^ i) g z" Informal statement is: If $f$ and $g$ are holomorphic functions on an open set $S$ and $f(z) = g(z)$ for all $z \in S$, then the $i$th derivative of $f$ at $z$ is equal to the $i$th derivative of $g$ at $z$. |
[STATEMENT]
lemma funpow_less_iter:
fixes f :: "('a::order) \<Rightarrow> 'a"
assumes mono: "\<And>k. k < f k" and m_lt_n: "m < n"
shows "(f ^^ m) k < (f ^^ n) k"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. (f ^^ m) k < (f ^^ n) k
[PROOF STEP]
using m_lt_n
[PROOF STATE]
proof (prove)
using this:
m < n
goal (1 subgoal):
1. (f ^^ m) k < (f ^^ n) k
[PROOF STEP]
by (induct n) (auto, blast intro: mono less_trans dest: less_antisym) |
#ifndef KGS_GSLQR_H
#define KGS_GSLQR_H
#include "math/QR.h"
#include <gsl/gsl_matrix.h>
class QRGSL: public QR {
public:
QRGSL(gsl_matrix* M): QR(M){}
protected:
void updateFromMatrix() override;
};
#endif //KGS_GSLQR_H
|
[STATEMENT]
lemma fresh_star_finite_insert:
fixes S :: "('a::fs) set" shows "finite S \<Longrightarrow> a \<sharp>* insert x S \<longleftrightarrow> a \<sharp>* x \<and> a \<sharp>* S"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. finite S \<Longrightarrow> a \<sharp>* insert x S = (a \<sharp>* x \<and> a \<sharp>* S)
[PROOF STEP]
by (auto simp: fresh_star_def fresh_finite_insert) |
module Control.Eff
import public Control.Eff.Choose
import public Control.Eff.Except
import public Control.Eff.Internal
import public Control.Eff.Reader
import public Control.Eff.State
import public Control.Eff.Writer
|
-- @@stderr --
dtrace: failed to compile script test/unittest/actions/printf/err.D_PRINTF_DYN_TYPE.prec.d: [D_PRINTF_DYN_TYPE] line 19: printf( ) argument #2 is incompatible with conversion #1 prototype:
conversion: % .* d
prototype: int
argument: string
|
{-# OPTIONS --cubical --no-import-sorts --safe #-}
module Cubical.Relation.Nullary.Decidable where
open import Cubical.Core.Everything
open import Cubical.Data.Empty using (⊥)
private
variable
ℓ : Level
-- Negation
infix 3 ¬_
¬_ : Type ℓ → Type ℓ
¬ A = A → ⊥
-- Decidable types (inspired by standard library)
data Dec (P : Type ℓ) : Type ℓ where
yes : ( p : P) → Dec P
no : (¬p : ¬ P) → Dec P
data IsYes {P : Type ℓ} : Dec P → Type ℓ where
isYes : ∀ {x} → IsYes (yes x)
Discrete : Type ℓ → Type ℓ
Discrete A = ∀ x y → Dec (Path A x y)
|
Formal statement is: lemma homotopic_with_linear: fixes f g :: "_ \<Rightarrow> 'b::real_normed_vector" assumes contf: "continuous_on S f" and contg:"continuous_on S g" and sub: "\<And>x. x \<in> S \<Longrightarrow> closed_segment (f x) (g x) \<subseteq> t" shows "homotopic_with_canon (\<lambda>z. True) S t f g" Informal statement is: If $f$ and $g$ are continuous functions from $S$ to $\mathbb{R}^n$ and the line segment between $f(x)$ and $g(x)$ is contained in $t$ for all $x \in S$, then $f$ and $g$ are homotopic relative to $S$ with $t$ as the target space. |
lemma Lim_at_zero: fixes a :: "'a::real_normed_vector" and l :: "'b::topological_space" shows "(f \<longlongrightarrow> l) (at a) \<longleftrightarrow> ((\<lambda>x. f(a + x)) \<longlongrightarrow> l) (at 0)" |
section \<open>Monad-Zero Class\<close>
theory Monad_Zero
imports Monad
begin
class zeroU = tycon +
fixes zeroU :: "udom\<cdot>'a::tycon"
class functor_zero = zeroU + "functor" +
assumes fmapU_zeroU [coerce_simp]:
"fmapU\<cdot>f\<cdot>zeroU = zeroU"
class monad_zero = zeroU + monad +
assumes bindU_zeroU:
"bindU\<cdot>zeroU\<cdot>f = zeroU"
instance monad_zero \<subseteq> functor_zero
proof
fix f show "fmapU\<cdot>f\<cdot>zeroU = (zeroU :: udom\<cdot>'a)"
unfolding fmapU_eq_bindU
by (rule bindU_zeroU)
qed
definition fzero :: "'a\<cdot>'f::functor_zero"
where "fzero = coerce\<cdot>(zeroU :: udom\<cdot>'f)"
lemma fmap_fzero:
"fmap\<cdot>f\<cdot>(fzero :: 'a\<cdot>'f::functor_zero) = (fzero :: 'b\<cdot>'f)"
unfolding fmap_def fzero_def
by (simp add: coerce_simp)
abbreviation mzero :: "'a\<cdot>'m::monad_zero"
where "mzero \<equiv> fzero"
lemmas mzero_def = fzero_def [where 'f="'m::monad_zero"] for f
lemmas fmap_mzero = fmap_fzero [where 'f="'m::monad_zero"] for f
lemma bindU_eq_bind: "bindU = bind"
unfolding bind_def by simp
lemma bind_mzero:
"bind\<cdot>(fzero :: 'a\<cdot>'m::monad_zero)\<cdot>k = (mzero :: 'b\<cdot>'m)"
unfolding bind_def mzero_def
by (simp add: coerce_simp bindU_zeroU)
end
|
module Main
import WordsProvider
import Data.List
import Data.List.Quantifiers
%language TypeProviders
%provide (seoWords : List String) with readWords "seo-words.txt"
numberInfixes : String -> Nat
numberInfixes word = length $ filter (\w => isInfixOf w word) $ filter (/= "") seoWords
say : (s:String) -> { auto prf : GT (numberInfixes s) Z } -> IO ()
say s = printLn s
main : IO ()
main = say "brand-1234"
|
module Star where
open import Prelude
infixr 40 _•_ _++_
infixl 30 _on_
infixr 20 _==>_ _=[_]=>_
data Star {X : Set} (R : Rel X) : Rel X where
ε : {x : X} -> Star R x x
_•_ : {x y z : X} -> R x y -> Star R y z -> Star R x z
_++_ : {X : Set}{R : Rel X}{x y z : X} ->
Star R x y -> Star R y z -> Star R x z
ε ++ ys = ys
(x • xs) ++ ys = x • (xs ++ ys)
_==>_ : {X : Set} -> Rel X -> Rel X -> Set
R ==> S = forall {a b} -> R a b -> S a b
_on_ : {X Y : Set} -> (R : Rel X) -> (f : Y -> X) -> Rel Y
R on f = \a b -> R (f a) (f b)
_=[_]=>_ : {X Y : Set} (R : Rel X) (f : X -> Y) (S : Rel Y) -> Set
R =[ f ]=> S = R ==> S on f
return : {X : Set}{R : Rel X} -> R ==> Star R
return x = x • ε
module JoinMap where
join : {X : Set}{R : Rel X} -> Star (Star R) ==> Star R
join ε = ε
join (xs • xss) = xs ++ join xss
map : forall {X Y R S} -> (f : X -> Y) ->
R =[ f ]=> S -> Star R =[ f ]=> Star S
map f pm ε = ε
map f pm (x • xs) = pm x • map f pm xs
bind : forall {X Y R S} -> (f : X -> Y) ->
R =[ f ]=> Star S -> Star R =[ f ]=> Star S
bind f k m = join (map f k m)
bind : forall {X Y R S} -> (f : X -> Y) ->
R =[ f ]=> Star S -> Star R =[ f ]=> Star S
bind f k ε = ε
bind f k (x • xs) = k x ++ bind f k xs
join : {X : Set}{R : Rel X} -> Star (Star R) ==> Star R
join = bind id id
map : forall {X Y R S} -> (f : X -> Y) ->
R =[ f ]=> S -> Star R =[ f ]=> Star S
map f k = bind f (return · k)
-- Generic length
length : {X : Set}{R : Rel X} -> Star R =[ ! ]=> Star One
length = map ! !
-- Reverse
_op : {X : Set} -> Rel X -> Rel X
(R op) a b = R b a
reverse : {X : Set}{R : Rel X}{a b : X} -> Star R a b -> Star (R op) b a
reverse {X}{R} xs = rev xs ε
where
rev : forall {a b c} ->
Star R a b -> Star (R op) a c -> Star (R op) b c
rev ε ys = ys
rev (x • xs) ys = rev xs (x • ys)
|
function spm_mask(P1, P2, thresh)
% Mask images
% FORMAT spm_mask(P1, P2, thresh)
% P1 - matrix of input image filenames from which
% to compute the mask.
% P2 - matrix of input image filenames on which
% to apply the mask.
% thresh - optional threshold(s) for defining the mask.
% The masked images are prepended with the prefix `m'.
%
% If any voxel in the series of images is zero (for data types without
% a floating point representation) or does not have a finite value (for
% floating point and double precision images), then that voxel is set to
% NaN or zero in all the images. If a threshold, or vector of
% thresholds is passed, then the masking is based on voxels whos
% values are above all the thresholds.
%
% Images sampled in different orientations and positions can be passed
% to the routine.
%__________________________________________________________________________
% Copyright (C) 1999-2011 Wellcome Trust Centre for Neuroimaging
% John Ashburner
% $Id: spm_mask.m 4419 2011-08-03 18:42:35Z guillaume $
persistent runonce
if isempty(runonce)
warning('ImCalc should be preferred to spm_mask whenever possible.');
runonce = 1;
end
SVNid = '$Rev: 4419 $';
%-Say hello
%--------------------------------------------------------------------------
SPMid = spm('FnBanner',mfilename,SVNid);
%-Parameters & Arguments
%--------------------------------------------------------------------------
if ~nargin
[P1, sts] = spm_select(Inf,'image','Images to compute mask from');
if ~sts, return; end
[P2, sts] = spm_select(Inf,'image','Images to apply mask to');
if ~sts, return; end
end
if nargin==1
P2 = P1;
end
V1 = spm_vol(P1);
V2 = spm_vol(P2);
if nargin==3
if numel(thresh)==1
thresh = repmat(thresh,numel(V1),1);
else
if numel(V1) ~= numel(thresh)
error('Input argument ''thresh'' has wrong size.');
end
thresh = thresh(:);
end
end
m1 = numel(V1);
m2 = numel(V2);
%-Create headers
%--------------------------------------------------------------------------
VO = V2;
for i=1:m2
[pth,nm,ext,num] = spm_fileparts(deblank(VO(i).fname));
VO(i).fname = fullfile(pth,['m', nm, ext, num]);
VO(i).descrip = 'Masked';
VO(i).mat = VO(1).mat;
VO(i).dim(1:3) = VO(1).dim(1:3);
end
VO = spm_create_vol(VO);
M = VO(1).mat;
dim = VO(1).dim(1:3);
%-Compute masked images
%--------------------------------------------------------------------------
spm_progress_bar('Init',VO(1).dim(3),'Masking','planes completed');
for j=1:dim(3)
msk = true(dim(1:2));
Mi = spm_matrix([0 0 j]);
% Load slice j from all images
for i=1:m1
M1 = M\V1(i).mat\Mi;
%if sum((M1(:)-Mi(:)).^2<eps) M1 = Mi; end;
img = spm_slice_vol(V1(i),M1,dim(1:2),[0 NaN]);
msk = msk & isfinite(img);
if ~spm_type(V1(i).dt(1),'nanrep')
msk = msk & (img ~= 0);
end
if nargin == 3
msk = msk & (img >= thresh(i));
end
end
% Write the images
for i=1:m2
M1 = M\V2(i).mat\Mi;
img = spm_slice_vol(V2(i),M1,dim(1:2),[1 0]);
img(~msk) = NaN;
VO(i) = spm_write_plane(VO(i),img,j);
end
spm_progress_bar('Set',j);
end
spm_progress_bar('Clear');
|
Formal statement is: lemma homeomorphic_imp_homotopy_equivalent_space: "X homeomorphic_space Y \<Longrightarrow> X homotopy_equivalent_space Y" Informal statement is: If two topological spaces are homeomorphic, then they are homotopy equivalent. |
[STATEMENT]
lemma fun_ord_trans: "transp ord \<Longrightarrow> transp (fun_ord ord)"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. transp ord \<Longrightarrow> transp (fun_ord ord)
[PROOF STEP]
by (fastforce simp: fun_ord_def transp_def) |
import Decidable.Equality
data Vect : Nat -> Type -> Type where
Nil : Vect Z a
(::) : (x : a) -> (xs : Vect k a) -> Vect (S k) a
%name Vect xs, ys, zs
-- the equalities below (i.e (x = y) -> Void ) get constrained
-- to be the same value (i.e (x = x) -> Void )
headUnequal : DecEq a => {xs : Vect n a} -> {ys : Vect n a} ->
(contra : (x = y) -> Void) -> ((x :: xs) = (y :: ys)) -> Void
headUnequal contra Refl = contra Refl
tailUnequal : DecEq a => {xs : Vect n a} -> {ys : Vect n a} ->
(contra : (xs = ys) -> Void) -> ((x :: xs) = (y :: ys)) -> Void
tailUnequal contra Refl = contra Refl
DecEq a => DecEq (Vect n a) where
decEq [] [] = Yes Refl
decEq (x :: xs) (y :: ys) =
case decEq x y of
Yes Refl => case decEq xs ys of
Yes Refl => Yes Refl
No contra => No (tailUnequal contra)
No contra => No (headUnequal contra)
|
(*
* Copyright 2014, General Dynamics C4 Systems
*
* SPDX-License-Identifier: GPL-2.0-only
*)
theory RAB_FN
imports
"CSpace1_R"
"Lib.MonadicRewrite"
begin
definition
"only_cnode_caps ctes =
option_map ((\<lambda>x. if isCNodeCap x then x else NullCap) o cteCap) o ctes"
definition locateSlotFun_def:
"locateSlotFun cnode offset \<equiv> cnode + 2 ^ cte_level_bits * offset"
definition
"cnode_caps_gsCNodes cts cns
= (\<forall>cap \<in> ran cts. isCNodeCap cap
\<longrightarrow> cns (capCNodePtr cap) = Some (capCNodeBits cap))"
abbreviation (input)
"cnode_caps_gsCNodes' s \<equiv> cnode_caps_gsCNodes (only_cnode_caps (ctes_of s)) (gsCNodes s)"
function
resolveAddressBitsFn ::
"capability \<Rightarrow> cptr \<Rightarrow> nat \<Rightarrow> (machine_word \<Rightarrow> capability option)
\<Rightarrow> (lookup_failure + (machine_word * nat))"
where
"resolveAddressBitsFn a b c =
(\<lambda>x0 capptr bits caps. (let nodeCap = x0 in
if isCNodeCap nodeCap
then (let
radixBits = capCNodeBits nodeCap;
guardBits = capCNodeGuardSize nodeCap;
levelBits = radixBits + guardBits;
offset = (fromCPtr capptr `~shiftR~` (bits-levelBits)) &&
(mask radixBits);
guard = (fromCPtr capptr `~shiftR~` (bits-guardBits)) &&
(mask guardBits);
bitsLeft = bits - levelBits;
slot = locateSlotFun (capCNodePtr nodeCap) offset
in
if levelBits = 0 then Inr (0, 0)
else if \<not> (guardBits \<le> bits \<and> guard = capCNodeGuard nodeCap)
then Inl $ GuardMismatch_ \<lparr>
guardMismatchBitsLeft= bits,
guardMismatchGuardFound= capCNodeGuard nodeCap,
guardMismatchGuardSize= guardBits \<rparr>
else if (levelBits > bits) then Inl $ DepthMismatch_ \<lparr>
depthMismatchBitsLeft= bits,
depthMismatchBitsFound= levelBits \<rparr>
else if (bitsLeft = 0)
then Inr (slot, 0)
else (case caps slot of Some NullCap
\<Rightarrow> Inr (slot, bitsLeft)
| Some nextCap
\<Rightarrow> resolveAddressBitsFn nextCap capptr bitsLeft caps
| None \<Rightarrow> Inr (0, 0))
)
else Inl InvalidRoot
))
a b c"
by auto
termination
apply (relation "measure (snd o snd)")
apply (auto split: if_split_asm)
done
declare resolveAddressBitsFn.simps[simp del]
lemma isCNodeCap_capUntypedPtr_capCNodePtr:
"isCNodeCap c \<Longrightarrow> capUntypedPtr c = capCNodePtr c"
by (clarsimp simp: isCap_simps)
lemma resolveAddressBitsFn_eq:
"monadic_rewrite F E (\<lambda>s. (isCNodeCap cap \<longrightarrow> (\<exists>slot. cte_wp_at' (\<lambda>cte. cteCap cte = cap) slot s))
\<and> valid_objs' s \<and> cnode_caps_gsCNodes' s)
(resolveAddressBits cap capptr bits)
(gets (resolveAddressBitsFn cap capptr bits o only_cnode_caps o ctes_of))"
(is "monadic_rewrite F E (?P cap) (?f cap bits) (?g cap capptr bits)")
proof (induct cap capptr bits rule: resolveAddressBits.induct)
case (1 cap cref depth)
show ?case
apply (subst resolveAddressBits.simps, subst resolveAddressBitsFn.simps)
apply (simp only: Let_def haskell_assertE_def K_bind_def)
apply (rule monadic_rewrite_name_pre)
apply (rule monadic_rewrite_guard_imp)
apply (rule_tac P="(=) s" in monadic_rewrite_trans)
(* step 1, apply the induction hypothesis on the lhs *)
apply (rule monadic_rewrite_named_if monadic_rewrite_named_bindE
monadic_rewrite_refl[THEN monadic_rewrite_guard_imp, where f="returnOk y" for y]
monadic_rewrite_refl[THEN monadic_rewrite_guard_imp, where f="x $ y" for x y]
monadic_rewrite_refl[THEN monadic_rewrite_guard_imp, where f="assertE P" for P s]
TrueI)+
apply (rule_tac g="case nextCap of CNodeCap a b c d
\<Rightarrow> ?g nextCap cref bitsLeft
| _ \<Rightarrow> returnOk (slot, bitsLeft)" in monadic_rewrite_guard_imp)
apply (wpc | rule monadic_rewrite_refl "1.hyps"
| simp only: capability.case haskell_assertE_def simp_thms)+
apply (clarsimp simp: in_monad locateSlot_conv getSlotCap_def
dest!: in_getCTE fst_stateAssertD)
apply (fastforce elim: cte_wp_at_weakenE')
apply (rule monadic_rewrite_refl[THEN monadic_rewrite_guard_imp], simp)
(* step 2, split and match based on the lhs structure *)
apply (simp add: locateSlot_conv liftE_bindE unlessE_def whenE_def
if_to_top_of_bindE assertE_def stateAssert_def bind_assoc
assert_def if_to_top_of_bind getSlotCap_def
split del: if_split cong: if_cong)
apply (rule monadic_rewrite_if_l monadic_rewrite_symb_exec_l'[OF _ get_wp, rotated]
empty_fail_get no_fail_get impI
monadic_rewrite_refl get_wp
| simp add: throwError_def returnOk_def locateSlotFun_def if_not_P
isCNodeCap_capUntypedPtr_capCNodePtr
cong: if_cong split del: if_split)+
apply (rule monadic_rewrite_symb_exec_l'[OF _ getCTE_inv _ _ getCTE_cte_wp_at, rotated])
apply simp
apply (rule impI, rule no_fail_getCTE)
apply (simp add: monadic_rewrite_def simpler_gets_def return_def returnOk_def
only_cnode_caps_def cte_wp_at_ctes_of isCap_simps
locateSlotFun_def isCNodeCap_capUntypedPtr_capCNodePtr
split: capability.split)
apply (rule monadic_rewrite_name_pre[where P="\<lambda>_. False" and f=fail]
monadic_rewrite_refl get_wp
| simp add: throwError_def returnOk_def locateSlotFun_def if_not_P
isCNodeCap_capUntypedPtr_capCNodePtr
cong: if_cong split del: if_split)+
(* step 3, prove the non-failure conditions *)
apply (clarsimp simp: isCap_simps)
apply (frule(1) cte_wp_at_valid_objs_valid_cap')
apply (clarsimp simp: cte_level_bits_def valid_cap_simps'
real_cte_at' isCap_simps cteSizeBits_def objBits_simps)
apply (clarsimp simp: cte_wp_at_ctes_of only_cnode_caps_def ball_Un
cnode_caps_gsCNodes_def ran_map_option o_def)
apply (drule bspec, rule IntI, erule ranI, simp add: isCap_simps)
apply (simp add: isCap_simps capAligned_def word_bits_def and_mask_less')
done
qed
end
|
/-
Copyright (c) 2022 Yaël Dillies. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Yaël Dillies
-/
import group_theory.group_action.defs
/-!
# Sigma instances for additive and multiplicative actions
> THIS FILE IS SYNCHRONIZED WITH MATHLIB4.
> Any changes to this file require a corresponding PR to mathlib4.
This file defines instances for arbitrary sum of additive and multiplicative actions.
## See also
* `group_theory.group_action.pi`
* `group_theory.group_action.prod`
* `group_theory.group_action.sum`
-/
variables {ι : Type*} {M N : Type*} {α : ι → Type*}
namespace sigma
section has_smul
variables [Π i, has_smul M (α i)] [Π i, has_smul N (α i)] (a : M) (i : ι) (b : α i)
(x : Σ i, α i)
@[to_additive sigma.has_vadd] instance : has_smul M (Σ i, α i) := ⟨λ a, sigma.map id $ λ i, (•) a⟩
@[to_additive]
@[to_additive] instance [has_smul M N] [Π i, is_scalar_tower M N (α i)] :
is_scalar_tower M N (Σ i, α i) :=
⟨λ a b x, by { cases x, rw [smul_mk, smul_mk, smul_mk, smul_assoc] }⟩
@[to_additive] instance [Π i, smul_comm_class M N (α i)] : smul_comm_class M N (Σ i, α i) :=
⟨λ a b x, by { cases x, rw [smul_mk, smul_mk, smul_mk, smul_mk, smul_comm] }⟩
@[to_additive] instance [Π i, has_smul Mᵐᵒᵖ (α i)] [Π i, is_central_scalar M (α i)] :
is_central_scalar M (Σ i, α i) :=
⟨λ a x, by { cases x, rw [smul_mk, smul_mk, op_smul_eq_smul] }⟩
/-- This is not an instance because `i` becomes a metavariable. -/
@[to_additive "This is not an instance because `i` becomes a metavariable."]
protected lemma has_faithful_smul' [has_faithful_smul M (α i)] : has_faithful_smul M (Σ i, α i) :=
⟨λ x y h, eq_of_smul_eq_smul $ λ a : α i, heq_iff_eq.1 (ext_iff.1 $ h $ mk i a).2⟩
@[to_additive] instance [nonempty ι] [Π i, has_faithful_smul M (α i)] :
has_faithful_smul M (Σ i, α i) :=
nonempty.elim ‹_› $ λ i, sigma.has_faithful_smul' i
end has_smul
@[to_additive] instance {m : monoid M} [Π i, mul_action M (α i)] : mul_action M (Σ i, α i) :=
{ mul_smul := λ a b x, by { cases x, rw [smul_mk, smul_mk, smul_mk, mul_smul] },
one_smul := λ x, by { cases x, rw [smul_mk, one_smul] } }
end sigma
|
module Category where
open import Logic.Equivalence
open import Logic.Relations
open Equivalence using () renaming (_==_ to eq)
record Cat : Set2 where
field
Obj : Set1
_─→_ : Obj -> Obj -> Set
id : {A : Obj} -> A ─→ A
_∘_ : {A B C : Obj} -> B ─→ C -> A ─→ B -> A ─→ C
Eq : {A B : Obj} -> Equivalence (A ─→ B)
cong : {A B C : Obj}{f₁ f₂ : B ─→ C}{g₁ g₂ : A ─→ B} ->
eq Eq f₁ f₂ -> eq Eq g₁ g₂ -> eq Eq (f₁ ∘ g₁) (f₂ ∘ g₂)
idLeft : {A B : Obj}{f : A ─→ B} -> eq Eq (id ∘ f) f
idRight : {A B : Obj}{f : A ─→ B} -> eq Eq (f ∘ id) f
assoc : {A B C D : Obj}{f : C ─→ D}{g : B ─→ C}{h : A ─→ B} ->
eq Eq ((f ∘ g) ∘ h) (f ∘ (g ∘ h))
module Category (ℂ : Cat) where
private module CC = Cat ℂ
open CC public hiding (_─→_; _∘_)
private module Eq {A B : Obj} = Equivalence (Eq {A}{B})
open Eq public hiding (_==_)
infix 20 _==_
infixr 30 _─→_
infixr 90 _∘_
_─→_ = CC._─→_
_==_ : {A B : Obj} -> Rel (A ─→ B)
_==_ = Eq._==_
_∘_ : {A B C : Obj} -> B ─→ C -> A ─→ B -> A ─→ C
_∘_ = CC._∘_
congL : {A B C : Obj}{f₁ f₂ : B ─→ C}{g : A ─→ B} ->
f₁ == f₂ -> f₁ ∘ g == f₂ ∘ g
congL p = cong p refl
congR : {A B C : Obj}{f : B ─→ C}{g₁ g₂ : A ─→ B} ->
g₁ == g₂ -> f ∘ g₁ == f ∘ g₂
congR p = cong refl p
|
Formal statement is: lemma analytic_imp_holomorphic: "f analytic_on S \<Longrightarrow> f holomorphic_on S" Informal statement is: If $f$ is analytic on $S$, then $f$ is holomorphic on $S$. |
open import Agda.Builtin.Bool
open import Issue4166.Import {b = true} as A′
it : ⦃ Bool ⦄ → Bool
it ⦃ b ⦄ = b
b : Bool
b = it
|
import pseudo_normed_group.category
import for_mathlib.AddCommGroup.explicit_limits
import topology.category.Compactum
open category_theory
open category_theory.limits
universe u
variables {J : Type u} [small_category J]
structure PseuNormGrp₁ :=
(carrier : Type u)
[str : pseudo_normed_group carrier]
(exhaustive' : ∀ x : carrier, ∃ c : nnreal,
x ∈ pseudo_normed_group.filtration carrier c)
namespace PseuNormGrp₁
instance : has_coe_to_sort PseuNormGrp₁.{u} (Type u) := ⟨carrier⟩
instance (M : PseuNormGrp₁.{u}) : pseudo_normed_group M := M.str
lemma exhaustive (M : PseuNormGrp₁) (x : M) :
∃ c, x ∈ pseudo_normed_group.filtration M c := M.exhaustive' x
instance : category PseuNormGrp₁.{u} :=
{ hom := λ A B, strict_pseudo_normed_group_hom A B,
id := λ A, strict_pseudo_normed_group_hom.id A,
comp := λ A B C f g, f.comp g }
@[simp]
lemma id_apply (M : PseuNormGrp₁) (x : M) : (𝟙 M : M ⟶ M) x = x := rfl
@[simp]
lemma comp_apply {A B C : PseuNormGrp₁} (f : A ⟶ B) (g : B ⟶ C) (a : A) :
(f ≫ g) a = g (f a) := rfl
def to_Ab : PseuNormGrp₁.{u} ⥤ Ab.{u} :=
{ obj := λ M, AddCommGroup.of M,
map := λ M N f, f.to_add_monoid_hom }
variable {K : J ⥤ PseuNormGrp₁.{u}}
variable (C : limits.limit_cone (K ⋙ to_Ab))
def bounded_elements : add_subgroup C.cone.X :=
{ carrier := { x | ∃ c, ∀ j, C.cone.π.app j x ∈ pseudo_normed_group.filtration (K.obj j) c },
zero_mem' := ⟨0, λ j, by { simp, apply pseudo_normed_group.zero_mem_filtration } ⟩,
add_mem' := λ a b ha hb, begin
obtain ⟨c,hc⟩ := ha,
obtain ⟨d,hd⟩ := hb,
use c + d,
intros j,
simp,
apply pseudo_normed_group.add_mem_filtration,
apply hc,
apply hd,
end,
neg_mem' := λ a ha, begin
obtain ⟨c,hc⟩ := ha,
use c,
intros j,
simp,
apply pseudo_normed_group.neg_mem_filtration,
apply hc,
end }
def bounded_elements.filt (c : nnreal) : set C.cone.X :=
{ x | ∀ j, C.cone.π.app j x ∈ pseudo_normed_group.filtration (K.obj j) c }
def bounded_elements.filt_incl (c : nnreal) :
bounded_elements.filt C c → bounded_elements C :=
λ x, ⟨x, c, x.2⟩
def bounded_elements.filtration (c : nnreal) : set (bounded_elements C) :=
set.range (bounded_elements.filt_incl _ c)
def bounded_cone_point : PseuNormGrp₁ :=
{ carrier := bounded_elements C,
str :=
{ filtration := bounded_elements.filtration _,
filtration_mono := begin
intros c₁ c₂ h x hx,
obtain ⟨t,rfl⟩ := hx, refine ⟨⟨t,_⟩,rfl⟩, intros i,
apply pseudo_normed_group.filtration_mono h, apply t.2,
end,
zero_mem_filtration := begin
intros c, refine ⟨⟨0,λ i, _⟩,rfl⟩, simp,
apply pseudo_normed_group.zero_mem_filtration
end,
neg_mem_filtration := begin
intros c x hx,
obtain ⟨t,rfl⟩ := hx, refine ⟨⟨-t, λ i, _⟩, rfl⟩, simp,
apply pseudo_normed_group.neg_mem_filtration, apply t.2
end,
add_mem_filtration := begin
intros c₁ c₂ x₁ x₂ h₁ h₂,
obtain ⟨t₁,rfl⟩ := h₁, obtain ⟨t₂,rfl⟩ := h₂,
refine ⟨⟨t₁ + t₂, λ i, _⟩, rfl⟩, simp,
apply pseudo_normed_group.add_mem_filtration, apply t₁.2, apply t₂.2,
end },
exhaustive' := begin
intros m,
obtain ⟨c,hc⟩ := m.2,
refine ⟨c,⟨m.1, hc⟩, by { ext, refl }⟩,
end }
def bounded_cone : cone K :=
{ X := bounded_cone_point C,
π :=
{ app := λ j,
{ to_fun := λ x, C.cone.π.app _ x.1,
map_zero' := by simp,
map_add' := λ x y, by simp,
strict' := begin
rintros c x ⟨x,rfl⟩,
apply x.2,
end },
naturality' := begin
intros i j f,
ext,
dsimp,
rw ← C.cone.w f,
refl,
end } }
def bounded_cone_lift (S : cone K) : S.X ⟶ bounded_cone_point C :=
{ to_fun := λ x, ⟨C.2.lift (to_Ab.map_cone S) x, begin
obtain ⟨c,hc⟩ := S.X.exhaustive x,
use c,
intros j,
rw [← Ab.comp_apply, C.2.fac],
apply (S.π.app j).strict,
exact hc,
end⟩,
map_zero' := by { ext, simp },
map_add' := λ x y, by { ext, simp },
strict' := begin
intros c x hx,
refine ⟨⟨_, λ j, _⟩,rfl⟩,
erw [← Ab.comp_apply, C.2.fac],
apply (S.π.app j).strict,
exact hx,
end }
def bounded_cone_is_limit : is_limit (bounded_cone C) :=
{ lift := λ S, bounded_cone_lift C S,
fac' := begin
intros S j,
ext,
dsimp [bounded_cone_lift, bounded_cone],
rw [← Ab.comp_apply, C.2.fac],
refl,
end,
uniq' := begin
intros S m hm,
ext,
dsimp [bounded_cone_lift, bounded_cone],
apply Ab.is_limit_ext,
intros j,
rw [← Ab.comp_apply, C.2.fac],
dsimp,
rw ← hm,
refl,
end }
instance : has_limits PseuNormGrp₁ :=
begin
constructor, introsI J hJ, constructor, intros K,
exact has_limit.mk ⟨_, bounded_cone_is_limit ⟨_,limit.is_limit _⟩⟩,
end
open pseudo_normed_group
lemma mem_filtration_iff_of_is_limit (C : cone K) (hC : is_limit C)
(x : C.X) (c : nnreal) :
x ∈ pseudo_normed_group.filtration C.X c ↔
(∀ j : J, C.π.app j x ∈ pseudo_normed_group.filtration (K.obj j) c) :=
begin
split,
{ intros h j,
exact (C.π.app j).strict h },
{ intros h,
let E := bounded_cone ⟨_, Ab.explicit_limit_cone_is_limit.{u u} _⟩,
let e : C ≅ E := hC.unique_up_to_iso (bounded_cone_is_limit _),
let eX : C.X ≅ E.X := (cones.forget _).map_iso e,
let w := eX.hom x,
have hw : ∀ j, E.π.app j w ∈ filtration (K.obj j) c,
{ intros j,
dsimp only [w],
change (eX.hom ≫ E.π.app _) _ ∈ _,
dsimp only [eX, functor.map_iso, cones.forget],
convert h j,
simp },
suffices : w ∈ filtration E.X c,
{ convert eX.inv.strict this,
change _ = (eX.hom ≫ eX.inv) x,
rw iso.hom_inv_id,
refl },
refine ⟨⟨_,hw⟩,rfl⟩ }
end
@[simps]
def _root_.strict_pseudo_normed_group_hom.level {M N : Type*}
[pseudo_normed_group M] [pseudo_normed_group N]
(f : strict_pseudo_normed_group_hom M N) (c) :
filtration M c → filtration N c :=
λ x, ⟨f x, f.strict x.2⟩
@[simp]
lemma _root_.strict_pseudo_normed_group_hom.level_id
(M : Type*) [pseudo_normed_group M] (c) :
(strict_pseudo_normed_group_hom.id M).level c = id := by { ext, refl }
@[simp]
lemma _root_.strict_pseudo_normed_group_hom.level_comp {M N L : Type*}
[pseudo_normed_group M] [pseudo_normed_group N] [pseudo_normed_group L]
(f : strict_pseudo_normed_group_hom M N) (g : strict_pseudo_normed_group_hom N L) (c) :
(f.comp g).level c = g.level c ∘ f.level c := by { ext, refl }
@[simps]
def level : nnreal ⥤ PseuNormGrp₁.{u} ⥤ Type u :=
{ obj := λ c,
{ obj := λ M, filtration M c,
map := λ X Y f, f.level _,
map_id' := λ M, strict_pseudo_normed_group_hom.level_id M _,
map_comp' := λ M N L f g, f.level_comp g c },
map := λ c₁ c₂ h,
{ app := λ M, pseudo_normed_group.cast_le' h.le } } .
lemma level_map {X Y : PseuNormGrp₁} (f : X ⟶ Y) (c) : (level.obj c).map f = f.level _ := rfl
lemma level_map' {X Y : PseuNormGrp₁} (f : X ⟶ Y) (c) : (level.obj c).map f =
pseudo_normed_group.level f f.strict c := rfl
def level_cone_iso_hom (c) (t : (level.obj c).obj (bounded_cone_point C)) :
(K ⋙ level.obj c).sections :=
{ val := λ j,
{ val := C.cone.π.app j t.1.1,
property := begin
obtain ⟨w,hw⟩ := t.2,
apply_fun (λ e, e.val) at hw,
rw ← hw,
apply w.2
end },
property := begin
intros i j f,
ext,
dsimp,
rw ← C.cone.w f,
refl,
end }
def level_cone_iso_inv (c) (t : (K ⋙ level.obj c).sections) :
(level.obj c).obj (bounded_cone_point C) :=
{ val :=
{ val := C.2.lift (Ab.explicit_limit_cone.{u u} _) ⟨λ j, (t.1 j).1, begin
intros i j f,
dsimp,
change _ = (t.val _).val,
rw ← t.2 f,
refl,
end⟩,
property := begin
use c,
intros j,
rw [← Ab.comp_apply, C.2.fac],
dsimp [Ab.explicit_limit_cone],
apply (t.1 j).2,
end },
property := begin
refine ⟨⟨_,_⟩,rfl⟩,
intros j,
dsimp,
rw [← Ab.comp_apply, C.2.fac],
dsimp [Ab.explicit_limit_cone],
apply (t.1 j).2,
end } .
def level_cone_iso (c) :
(level.obj c).map_cone (bounded_cone C) ≅ types.limit_cone.{u u} _ :=
cones.ext
{ hom := level_cone_iso_hom _ _,
inv := level_cone_iso_inv _ _,
hom_inv_id' := begin
ext,
dsimp [level_cone_iso_inv, level_cone_iso_hom],
apply Ab.is_limit_ext,
intros j,
rw [← Ab.comp_apply, C.2.fac],
refl,
end,
inv_hom_id' := begin
ext,
dsimp [level_cone_iso_inv, level_cone_iso_hom],
rw [← Ab.comp_apply, C.2.fac],
refl,
end }
begin
intros j,
ext,
refl,
end
instance preserves_limits_level_obj (c) : preserves_limits (level.obj c) :=
begin
constructor, introsI J hJ, constructor, intros K,
apply preserves_limit_of_preserves_limit_cone
(bounded_cone_is_limit ⟨_, Ab.explicit_limit_cone_is_limit _⟩),
apply is_limit.of_iso_limit (types.limit_cone_is_limit _) (level_cone_iso _ _).symm,
end
def neg_nat_trans (c) : level.obj.{u} c ⟶ level.obj.{u} c :=
{ app := λ X, pseudo_normed_group.neg',
naturality' := begin
intros A B f,
ext,
dsimp [level, neg'],
simp,
end }
end PseuNormGrp₁
namespace CompHausFiltPseuNormGrp₁
@[simp]
lemma id_apply {A : CompHausFiltPseuNormGrp₁} (a : A) : (𝟙 A : A ⟶ A) a = a := rfl
@[simp]
lemma comp_apply {A B C : CompHausFiltPseuNormGrp₁} (f : A ⟶ B) (g : B ⟶ C) (a : A) :
(f ≫ g) a = g (f a) := rfl
def to_PNG₁ :
CompHausFiltPseuNormGrp₁.{u} ⥤ PseuNormGrp₁.{u} :=
{ obj := λ M,
{ carrier := M,
exhaustive' := M.exhaustive },
map := λ X Y f, { strict' := λ c x h, f.strict h .. f.to_add_monoid_hom } }
instance : faithful to_PNG₁.{u} := faithful.mk $
begin
intros X Y f g h,
ext,
apply_fun (λ e, e x) at h,
exact h
end
variable {K : J ⥤ CompHausFiltPseuNormGrp₁.{u}}
variable (C : limits.limit_cone ((K ⋙ to_PNG₁) ⋙ PseuNormGrp₁.to_Ab))
def filtration_equiv (c : nnreal) :
pseudo_normed_group.filtration (PseuNormGrp₁.bounded_cone_point C) c
≃ (CompHaus.limit_cone.{u u} (K ⋙ level.obj c)).X :=
((cones.forget _).map_iso (PseuNormGrp₁.level_cone_iso C c)).to_equiv
instance (c) :
topological_space (pseudo_normed_group.filtration (PseuNormGrp₁.bounded_cone_point C) c) :=
topological_space.induced (filtration_equiv C c) infer_instance
def filtration_homeo (c : nnreal) :
pseudo_normed_group.filtration (PseuNormGrp₁.bounded_cone_point C) c
≃ₜ (CompHaus.limit_cone.{u u} (K ⋙ level.obj c)).X :=
homeomorph.homeomorph_of_continuous_open (filtration_equiv _ _) continuous_induced_dom
begin
intros U hU,
have : inducing (filtration_equiv C c) := ⟨rfl⟩,
rw this.is_open_iff at hU,
obtain ⟨U,hU,rfl⟩ := hU,
simpa,
end
instance (c) : t2_space
(pseudo_normed_group.filtration (PseuNormGrp₁.bounded_cone_point C) c) :=
(filtration_homeo C c).symm.t2_space
instance (c) : compact_space
(pseudo_normed_group.filtration (PseuNormGrp₁.bounded_cone_point C) c) :=
(filtration_homeo C c).symm.compact_space
/-
instance (c) : totally_disconnected_space
(pseudo_normed_group.filtration (PseuNormGrp₁.bounded_cone_point C) c) :=
(filtration_homeo C c).symm.totally_disconnected_space
-/
def level_π (j c) : pseudo_normed_group.filtration (PseuNormGrp₁.bounded_cone_point C) c →
pseudo_normed_group.filtration (K.obj j) c :=
(PseuNormGrp₁.level.obj c).map ((PseuNormGrp₁.bounded_cone C).π.app j)
lemma level_π_continuous (j c) : continuous (level_π C j c) :=
begin
have : level_π C j c ∘ (filtration_homeo C c).symm =
(CompHaus.limit_cone.{u u} _).π.app j,
{ ext,
change (C.is_limit.lift _ ≫ C.cone.π.app j) _ = _,
rw C.is_limit.fac,
refl },
suffices : continuous (level_π C j c ∘ (filtration_homeo C c).symm),
by simpa using this,
rw this,
continuity,
end
lemma bounded_cone_point_continuous_add'_aux {J : Type u}
[small_category J]
{K : J ⥤ CompHausFiltPseuNormGrp₁}
(C : category_theory.limits.limit_cone
((K ⋙ to_PNG₁) ⋙ PseuNormGrp₁.to_Ab)) :
∀ (c₁ c₂ : nnreal), continuous
(pseudo_normed_group.add' :
(pseudo_normed_group.filtration (PseuNormGrp₁.bounded_cone_point C) c₁) ×
(pseudo_normed_group.filtration (PseuNormGrp₁.bounded_cone_point C) c₂) →
(pseudo_normed_group.filtration (PseuNormGrp₁.bounded_cone_point C) (c₁ + c₂))) :=
begin
intros c₁ c₂,
let g : (pseudo_normed_group.filtration (PseuNormGrp₁.bounded_cone_point C) c₁) ×
(pseudo_normed_group.filtration (PseuNormGrp₁.bounded_cone_point C) c₂) →
(pseudo_normed_group.filtration (PseuNormGrp₁.bounded_cone_point C) (c₁ + c₂)) :=
pseudo_normed_group.add',
change continuous g,
suffices : continuous ((filtration_homeo C _) ∘ g), by simpa using this,
apply continuous.subtype_mk,
apply continuous_pi,
intros j,
let e := pseudo_normed_group.add' ∘ (prod.map (level_π C j c₁) (level_π C j c₂)),
have he : continuous e,
{ apply continuous.comp,
apply comphaus_filtered_pseudo_normed_group.continuous_add',
apply continuous.prod_map,
apply level_π_continuous,
apply level_π_continuous },
convert he,
ext,
dsimp,
simpa,
end
lemma bounded_cone_point_continuous_neg'_aux {J : Type u}
[small_category J]
{K : J ⥤ CompHausFiltPseuNormGrp₁}
(C : category_theory.limits.limit_cone
((K ⋙ to_PNG₁) ⋙ PseuNormGrp₁.to_Ab)) :
∀ (c : nnreal), continuous
(pseudo_normed_group.neg' :
(pseudo_normed_group.filtration (PseuNormGrp₁.bounded_cone_point C) c) →
(pseudo_normed_group.filtration (PseuNormGrp₁.bounded_cone_point C) c)) :=
begin
intros c,
let g : (pseudo_normed_group.filtration (PseuNormGrp₁.bounded_cone_point C) c) →
(pseudo_normed_group.filtration (PseuNormGrp₁.bounded_cone_point C) c) :=
pseudo_normed_group.neg',
change continuous g,
suffices : continuous ((filtration_homeo C c) ∘ g),
by simpa using this,
apply continuous.subtype_mk,
apply continuous_pi,
dsimp [g],
intros j,
let e := pseudo_normed_group.neg' ∘ level_π C j c,
have he : continuous e,
{ apply continuous.comp,
apply comphaus_filtered_pseudo_normed_group.continuous_neg',
apply level_π_continuous },
convert he,
ext,
dsimp,
simpa,
end
lemma bounded_cone_point_continuous_cast_le_aux {J : Type u}
[small_category J]
{K : J ⥤ CompHausFiltPseuNormGrp₁}
(C : category_theory.limits.limit_cone
((K ⋙ to_PNG₁) ⋙ PseuNormGrp₁.to_Ab)) :
∀ (c₁ c₂ : nnreal) (h : c₁ ≤ c₂), continuous
(pseudo_normed_group.cast_le' h :
(pseudo_normed_group.filtration (PseuNormGrp₁.bounded_cone_point C) c₁) →
(pseudo_normed_group.filtration (PseuNormGrp₁.bounded_cone_point C) c₂)) :=
begin
intros c₁ c₂ h,
let g : (pseudo_normed_group.filtration (PseuNormGrp₁.bounded_cone_point C) c₁) →
(pseudo_normed_group.filtration (PseuNormGrp₁.bounded_cone_point C) c₂) :=
pseudo_normed_group.cast_le' h,
change continuous g,
suffices : continuous ((filtration_homeo C _) ∘ g), by simpa using this,
apply continuous.subtype_mk,
apply continuous_pi,
intros j,
dsimp [g],
let e := pseudo_normed_group.cast_le' h ∘ level_π C j c₁,
have he : continuous e,
{ apply continuous.comp,
haveI : fact (c₁ ≤ c₂) := ⟨h⟩,
apply comphaus_filtered_pseudo_normed_group.continuous_cast_le,
apply level_π_continuous },
exact he,
end
def bounded_cone_point : CompHausFiltPseuNormGrp₁ :=
{ M := PseuNormGrp₁.bounded_cone_point C,
str :=
{ continuous_add' := bounded_cone_point_continuous_add'_aux _,
continuous_neg' := bounded_cone_point_continuous_neg'_aux _,
continuous_cast_le := λ _ _ h, bounded_cone_point_continuous_cast_le_aux _ _ _ h.out,
..(infer_instance : pseudo_normed_group (PseuNormGrp₁.bounded_cone_point C)) },
exhaustive' := (PseuNormGrp₁.bounded_cone_point C).exhaustive }
def bounded_cone : cone K :=
{ X := bounded_cone_point C,
π :=
{ app := λ j,
{ continuous' := λ c, level_π_continuous _ _ _,
..((PseuNormGrp₁.bounded_cone C).π.app j) },
naturality' := begin
intros i j f,
ext,
dsimp,
rw ← (PseuNormGrp₁.bounded_cone C).w f,
refl,
end } }
def bounded_cone_is_limit : is_limit (bounded_cone C) :=
{ lift := λ S,
{ continuous' := begin
intros c,
let t : pseudo_normed_group.filtration S.X c →
pseudo_normed_group.filtration (bounded_cone C).X c :=
(((PseuNormGrp₁.bounded_cone_is_limit C).lift (to_PNG₁.map_cone S)).level _),
change continuous t,
suffices : continuous ((filtration_homeo C c) ∘ t), by simpa using this,
have : ⇑(filtration_homeo C c) ∘ t =
(CompHaus.limit_cone_is_limit.{u u} _).lift ((level.obj c).map_cone S),
{ ext,
change (C.is_limit.lift _ ≫ C.cone.π.app _) _ = _,
rw C.is_limit.fac, refl },
rw this,
continuity,
end,
..((PseuNormGrp₁.bounded_cone_is_limit C).lift (to_PNG₁.map_cone S)) },
fac' := begin
intros S j,
ext,
dsimp [bounded_cone],
change ((PseuNormGrp₁.bounded_cone_is_limit C).lift (to_PNG₁.map_cone S) ≫
(PseuNormGrp₁.bounded_cone C).π.app j) _ = _,
rw (PseuNormGrp₁.bounded_cone_is_limit C).fac,
refl,
end,
uniq' := begin
intros S m hm,
ext,
dsimp,
have : to_PNG₁.map m =
(PseuNormGrp₁.bounded_cone_is_limit C).lift (to_PNG₁.map_cone S),
{ apply (PseuNormGrp₁.bounded_cone_is_limit C).uniq (to_PNG₁.map_cone S),
intros j,
ext t,
specialize hm j,
apply_fun (λ e, e t) at hm,
exact hm },
rw ← this,
refl,
end }
instance : preserves_limit K to_PNG₁ :=
begin
apply preserves_limit_of_preserves_limit_cone,
rotate 2,
exact bounded_cone ⟨_,Ab.explicit_limit_cone_is_limit.{u u} _⟩,
exact bounded_cone_is_limit _,
exact PseuNormGrp₁.bounded_cone_is_limit _,
end
/-
Remark: This functor even creates limits, as can be shown using the fact that the forgetful
functor from `Profinite` to `Type*` creates limits.
I don't think we actually need that strong statement, so we only prove the following.
-/
instance : preserves_limits to_PNG₁ :=
begin
constructor, introsI J hJ, constructor
end
end CompHausFiltPseuNormGrp₁
namespace ProFiltPseuNormGrp₁
@[simp]
lemma id_apply {A : ProFiltPseuNormGrp₁} (a : A) : (𝟙 A : A ⟶ A) a = a := rfl
@[simp]
lemma comp_apply {A B C : ProFiltPseuNormGrp₁} (f : A ⟶ B) (g : B ⟶ C) (a : A) :
(f ≫ g) a = g (f a) := rfl
def to_PNG₁ :
ProFiltPseuNormGrp₁.{u} ⥤ PseuNormGrp₁.{u} :=
{ obj := λ M,
{ carrier := M,
exhaustive' := M.exhaustive },
map := λ X Y f, { strict' := λ c x h, f.strict h .. f.to_add_monoid_hom } }
instance : faithful to_PNG₁.{u} := faithful.mk $
begin
intros X Y f g h,
ext,
apply_fun (λ e, e x) at h,
exact h
end
variable {K : J ⥤ ProFiltPseuNormGrp₁.{u}}
variable (C : limits.limit_cone ((K ⋙ to_PNG₁) ⋙ PseuNormGrp₁.to_Ab))
def filtration_equiv (c : nnreal) :
pseudo_normed_group.filtration (PseuNormGrp₁.bounded_cone_point C) c
≃ (Profinite.limit_cone (K ⋙ level.obj c)).X :=
((cones.forget _).map_iso (PseuNormGrp₁.level_cone_iso C c)).to_equiv
instance (c) :
topological_space (pseudo_normed_group.filtration (PseuNormGrp₁.bounded_cone_point C) c) :=
topological_space.induced (filtration_equiv C c) infer_instance
def filtration_homeo (c : nnreal) :
pseudo_normed_group.filtration (PseuNormGrp₁.bounded_cone_point C) c
≃ₜ (Profinite.limit_cone (K ⋙ level.obj c)).X :=
homeomorph.homeomorph_of_continuous_open (filtration_equiv _ _) continuous_induced_dom
begin
intros U hU,
have : inducing (filtration_equiv C c) := ⟨rfl⟩,
rw this.is_open_iff at hU,
obtain ⟨U,hU,rfl⟩ := hU,
simpa,
end
instance (c) : t2_space
(pseudo_normed_group.filtration (PseuNormGrp₁.bounded_cone_point C) c) :=
(filtration_homeo C c).symm.t2_space
instance (c) : compact_space
(pseudo_normed_group.filtration (PseuNormGrp₁.bounded_cone_point C) c) :=
(filtration_homeo C c).symm.compact_space
instance (c) : totally_disconnected_space
(pseudo_normed_group.filtration (PseuNormGrp₁.bounded_cone_point C) c) :=
(filtration_homeo C c).symm.totally_disconnected_space
def level_π (j c) : pseudo_normed_group.filtration (PseuNormGrp₁.bounded_cone_point C) c →
pseudo_normed_group.filtration (K.obj j) c :=
(PseuNormGrp₁.level.obj c).map ((PseuNormGrp₁.bounded_cone C).π.app j)
lemma level_π_continuous (j c) : continuous (level_π C j c) :=
begin
have : level_π C j c ∘ (filtration_homeo C c).symm =
(Profinite.limit_cone _).π.app j,
{ ext,
change (C.is_limit.lift _ ≫ C.cone.π.app j) _ = _,
rw C.is_limit.fac,
refl },
suffices : continuous (level_π C j c ∘ (filtration_homeo C c).symm),
by simpa using this,
rw this,
continuity,
end
lemma bounded_cone_point_continuous_add'_aux {J : Type u}
[small_category J]
{K : J ⥤ ProFiltPseuNormGrp₁}
(C : category_theory.limits.limit_cone
((K ⋙ to_PNG₁) ⋙ PseuNormGrp₁.to_Ab)) :
∀ (c₁ c₂ : nnreal), continuous
(pseudo_normed_group.add' :
(pseudo_normed_group.filtration (PseuNormGrp₁.bounded_cone_point C) c₁) ×
(pseudo_normed_group.filtration (PseuNormGrp₁.bounded_cone_point C) c₂) →
(pseudo_normed_group.filtration (PseuNormGrp₁.bounded_cone_point C) (c₁ + c₂))) :=
begin
intros c₁ c₂,
let g : (pseudo_normed_group.filtration (PseuNormGrp₁.bounded_cone_point C) c₁) ×
(pseudo_normed_group.filtration (PseuNormGrp₁.bounded_cone_point C) c₂) →
(pseudo_normed_group.filtration (PseuNormGrp₁.bounded_cone_point C) (c₁ + c₂)) :=
pseudo_normed_group.add',
change continuous g,
suffices : continuous ((filtration_homeo C _) ∘ g), by simpa using this,
apply continuous.subtype_mk,
apply continuous_pi,
intros j,
let e := pseudo_normed_group.add' ∘ (prod.map (level_π C j c₁) (level_π C j c₂)),
have he : continuous e,
{ apply continuous.comp,
apply comphaus_filtered_pseudo_normed_group.continuous_add',
apply continuous.prod_map,
apply level_π_continuous,
apply level_π_continuous },
convert he,
ext,
dsimp,
simpa,
end
lemma bounded_cone_point_continuous_neg'_aux {J : Type u}
[small_category J]
{K : J ⥤ ProFiltPseuNormGrp₁}
(C : category_theory.limits.limit_cone
((K ⋙ to_PNG₁) ⋙ PseuNormGrp₁.to_Ab)) :
∀ (c : nnreal), continuous
(pseudo_normed_group.neg' :
(pseudo_normed_group.filtration (PseuNormGrp₁.bounded_cone_point C) c) →
(pseudo_normed_group.filtration (PseuNormGrp₁.bounded_cone_point C) c)) :=
begin
intros c,
let g : (pseudo_normed_group.filtration (PseuNormGrp₁.bounded_cone_point C) c) →
(pseudo_normed_group.filtration (PseuNormGrp₁.bounded_cone_point C) c) :=
pseudo_normed_group.neg',
change continuous g,
suffices : continuous ((filtration_homeo C c) ∘ g),
by simpa using this,
apply continuous.subtype_mk,
apply continuous_pi,
dsimp [g],
intros j,
let e := pseudo_normed_group.neg' ∘ level_π C j c,
have he : continuous e,
{ apply continuous.comp,
apply comphaus_filtered_pseudo_normed_group.continuous_neg',
apply level_π_continuous },
convert he,
ext,
dsimp,
simpa,
end
lemma bounded_cone_point_continuous_cast_le_aux {J : Type u}
[small_category J]
{K : J ⥤ ProFiltPseuNormGrp₁}
(C : category_theory.limits.limit_cone
((K ⋙ to_PNG₁) ⋙ PseuNormGrp₁.to_Ab)) :
∀ (c₁ c₂ : nnreal) (h : c₁ ≤ c₂), continuous
(pseudo_normed_group.cast_le' h :
(pseudo_normed_group.filtration (PseuNormGrp₁.bounded_cone_point C) c₁) →
(pseudo_normed_group.filtration (PseuNormGrp₁.bounded_cone_point C) c₂)) :=
begin
intros c₁ c₂ h,
let g : (pseudo_normed_group.filtration (PseuNormGrp₁.bounded_cone_point C) c₁) →
(pseudo_normed_group.filtration (PseuNormGrp₁.bounded_cone_point C) c₂) :=
pseudo_normed_group.cast_le' h,
change continuous g,
suffices : continuous ((filtration_homeo C _) ∘ g), by simpa using this,
apply continuous.subtype_mk,
apply continuous_pi,
intros j,
dsimp [g],
let e := pseudo_normed_group.cast_le' h ∘ level_π C j c₁,
have he : continuous e,
{ apply continuous.comp,
haveI : fact (c₁ ≤ c₂) := ⟨h⟩,
apply comphaus_filtered_pseudo_normed_group.continuous_cast_le,
apply level_π_continuous },
exact he,
end
def bounded_cone_point : ProFiltPseuNormGrp₁ :=
{ M := PseuNormGrp₁.bounded_cone_point C,
str :=
{ continuous_add' := bounded_cone_point_continuous_add'_aux _,
continuous_neg' := bounded_cone_point_continuous_neg'_aux _,
continuous_cast_le := λ _ _ h, bounded_cone_point_continuous_cast_le_aux _ _ _ h.out,
..(infer_instance : pseudo_normed_group (PseuNormGrp₁.bounded_cone_point C)) },
exhaustive' := (PseuNormGrp₁.bounded_cone_point C).exhaustive }
def bounded_cone : cone K :=
{ X := bounded_cone_point C,
π :=
{ app := λ j,
{ continuous' := λ c, level_π_continuous _ _ _,
..((PseuNormGrp₁.bounded_cone C).π.app j) },
naturality' := begin
intros i j f,
ext,
dsimp,
rw ← (PseuNormGrp₁.bounded_cone C).w f,
refl,
end } }
def bounded_cone_is_limit : is_limit (bounded_cone C) :=
{ lift := λ S,
{ continuous' := begin
intros c,
let t : pseudo_normed_group.filtration S.X c →
pseudo_normed_group.filtration (bounded_cone C).X c :=
(((PseuNormGrp₁.bounded_cone_is_limit C).lift (to_PNG₁.map_cone S)).level _),
change continuous t,
suffices : continuous ((filtration_homeo C c) ∘ t), by simpa using this,
have : ⇑(filtration_homeo C c) ∘ t =
(Profinite.limit_cone_is_limit _).lift ((level.obj c).map_cone S),
{ ext,
change (C.is_limit.lift _ ≫ C.cone.π.app _) _ = _,
rw C.is_limit.fac, refl },
rw this,
continuity,
end,
..((PseuNormGrp₁.bounded_cone_is_limit C).lift (to_PNG₁.map_cone S)) },
fac' := begin
intros S j,
ext,
dsimp [bounded_cone],
change ((PseuNormGrp₁.bounded_cone_is_limit C).lift (to_PNG₁.map_cone S) ≫
(PseuNormGrp₁.bounded_cone C).π.app j) _ = _,
rw (PseuNormGrp₁.bounded_cone_is_limit C).fac,
refl,
end,
uniq' := begin
intros S m hm,
ext,
dsimp,
have : to_PNG₁.map m =
(PseuNormGrp₁.bounded_cone_is_limit C).lift (to_PNG₁.map_cone S),
{ apply (PseuNormGrp₁.bounded_cone_is_limit C).uniq (to_PNG₁.map_cone S),
intros j,
ext t,
specialize hm j,
apply_fun (λ e, e t) at hm,
exact hm },
rw ← this,
refl,
end }
instance : preserves_limit K to_PNG₁ :=
begin
apply preserves_limit_of_preserves_limit_cone,
rotate 2,
exact bounded_cone ⟨_,Ab.explicit_limit_cone_is_limit.{u u} _⟩,
exact bounded_cone_is_limit _,
exact PseuNormGrp₁.bounded_cone_is_limit _,
end
/-
Remark: This functor even creates limits, as can be shown using the fact that the forgetful
functor from `Profinite` to `Type*` creates limits.
I don't think we actually need that strong statement, so we only prove the following.
-/
instance : preserves_limits to_PNG₁ :=
begin
constructor, introsI J hJ, constructor
end
end ProFiltPseuNormGrp₁
|
/-
Copyright (c) 2022 Adam Topaz. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Adam Topaz, Amelia Livingston
-/
import algebra.homology.additive
import category_theory.abelian.pseudoelements
import category_theory.limits.preserves.shapes.kernels
import category_theory.limits.preserves.shapes.images
/-!
The object `homology f g w`, where `w : f ≫ g = 0`, can be identified with either a
cokernel or a kernel. The isomorphism with a cokernel is `homology_iso_cokernel_lift`, which
was obtained elsewhere. In the case of an abelian category, this file shows the isomorphism
with a kernel as well.
We use these isomorphisms to obtain the analogous api for `homology`:
- `homology.ι` is the map from `homology f g w` into the cokernel of `f`.
- `homology.π'` is the map from `kernel g` to `homology f g w`.
- `homology.desc'` constructs a morphism from `homology f g w`, when it is viewed as a cokernel.
- `homology.lift` constructs a morphism to `homology f g w`, when it is viewed as a kernel.
- Various small lemmas are proved as well, mimicking the API for (co)kernels.
With these definitions and lemmas, the isomorphisms between homology and a (co)kernel need not
be used directly.
-/
open category_theory.limits
open category_theory
noncomputable theory
universes v u
variables {A : Type u} [category.{v} A] [abelian A]
variables {X Y Z : A} (f : X ⟶ Y) (g : Y ⟶ Z) (w : f ≫ g = 0)
namespace category_theory.abelian
/-- The cokernel of `kernel.lift g f w`. This is isomorphic to `homology f g w`.
See `homology_iso_cokernel_lift`. -/
abbreviation homology_c : A :=
cokernel (kernel.lift g f w)
/-- The kernel of `cokernel.desc f g w`. This is isomorphic to `homology f g w`.
See `homology_iso_kernel_desc`. -/
abbreviation homology_k : A :=
kernel (cokernel.desc f g w)
/-- The canonical map from `homology_c` to `homology_k`.
This is an isomorphism, and it is used in obtaining the API for `homology f g w`
in the bottom of this file. -/
abbreviation homology_c_to_k : homology_c f g w ⟶ homology_k f g w :=
cokernel.desc _ (kernel.lift _ (kernel.ι _ ≫ cokernel.π _) (by simp)) begin
apply limits.equalizer.hom_ext,
simp,
end
local attribute [instance] pseudoelement.hom_to_fun pseudoelement.has_zero
instance : mono (homology_c_to_k f g w) :=
begin
apply pseudoelement.mono_of_zero_of_map_zero,
intros a ha,
obtain ⟨a,rfl⟩ := pseudoelement.pseudo_surjective_of_epi (cokernel.π (kernel.lift g f w)) a,
apply_fun (kernel.ι (cokernel.desc f g w)) at ha,
simp only [←pseudoelement.comp_apply, cokernel.π_desc,
kernel.lift_ι, pseudoelement.apply_zero] at ha,
simp only [pseudoelement.comp_apply] at ha,
obtain ⟨b,hb⟩ : ∃ b, f b = _ := (pseudoelement.pseudo_exact_of_exact (exact_cokernel f)).2 _ ha,
rsuffices ⟨c, rfl⟩ : ∃ c, kernel.lift g f w c = a,
{ simp [← pseudoelement.comp_apply] },
use b,
apply_fun kernel.ι g,
swap, { apply pseudoelement.pseudo_injective_of_mono },
simpa [← pseudoelement.comp_apply]
end
instance : epi (homology_c_to_k f g w) :=
begin
apply pseudoelement.epi_of_pseudo_surjective,
intros a,
let b := kernel.ι (cokernel.desc f g w) a,
obtain ⟨c,hc⟩ : ∃ c, cokernel.π f c = b,
apply pseudoelement.pseudo_surjective_of_epi (cokernel.π f),
have : g c = 0,
{ dsimp [b] at hc,
rw [(show g = cokernel.π f ≫ cokernel.desc f g w, by simp), pseudoelement.comp_apply, hc],
simp [← pseudoelement.comp_apply] },
obtain ⟨d,hd⟩ : ∃ d, kernel.ι g d = c,
{ apply (pseudoelement.pseudo_exact_of_exact exact_kernel_ι).2 _ this },
use cokernel.π (kernel.lift g f w) d,
apply_fun kernel.ι (cokernel.desc f g w),
swap, { apply pseudoelement.pseudo_injective_of_mono },
simp only [←pseudoelement.comp_apply, cokernel.π_desc, kernel.lift_ι],
simp only [pseudoelement.comp_apply, hd, hc],
end
instance (w : f ≫ g = 0) : is_iso (homology_c_to_k f g w) := is_iso_of_mono_of_epi _
end category_theory.abelian
/-- The homology associated to `f` and `g` is isomorphic to a kernel. -/
def homology_iso_kernel_desc : homology f g w ≅ kernel (cokernel.desc f g w) :=
homology_iso_cokernel_lift _ _ _ ≪≫ as_iso (category_theory.abelian.homology_c_to_k _ _ _)
namespace homology
-- `homology.π` is taken
/-- The canonical map from the kernel of `g` to the homology of `f` and `g`. -/
def π' : kernel g ⟶ homology f g w :=
cokernel.π _ ≫ (homology_iso_cokernel_lift _ _ _).inv
/-- The canonical map from the homology of `f` and `g` to the cokernel of `f`. -/
def ι : homology f g w ⟶ cokernel f :=
(homology_iso_kernel_desc _ _ _).hom ≫ kernel.ι _
/-- Obtain a morphism from the homology, given a morphism from the kernel. -/
def desc' {W : A} (e : kernel g ⟶ W) (he : kernel.lift g f w ≫ e = 0) :
homology f g w ⟶ W :=
(homology_iso_cokernel_lift _ _ _).hom ≫ cokernel.desc _ e he
/-- Obtain a moprhism to the homology, given a morphism to the kernel. -/
def lift {W : A} (e : W ⟶ cokernel f) (he : e ≫ cokernel.desc f g w = 0) :
W ⟶ homology f g w :=
kernel.lift _ e he ≫ (homology_iso_kernel_desc _ _ _).inv
@[simp, reassoc]
lemma π'_desc' {W : A} (e : kernel g ⟶ W) (he : kernel.lift g f w ≫ e = 0) :
π' f g w ≫ desc' f g w e he = e :=
by { dsimp [π', desc'], simp }
@[simp, reassoc]
lemma lift_ι {W : A} (e : W ⟶ cokernel f) (he : e ≫ cokernel.desc f g w = 0) :
lift f g w e he ≫ ι _ _ _ = e :=
by { dsimp [ι, lift], simp }
@[simp, reassoc]
lemma condition_π' : kernel.lift g f w ≫ π' f g w = 0 :=
by { dsimp [π'], simp }
@[simp, reassoc]
lemma condition_ι : ι f g w ≫ cokernel.desc f g w = 0 :=
by { dsimp [ι], simp }
@[ext]
lemma hom_from_ext {W : A} (a b : homology f g w ⟶ W)
(h : π' f g w ≫ a = π' f g w ≫ b) : a = b :=
begin
dsimp [π'] at h,
apply_fun (λ e, (homology_iso_cokernel_lift f g w).inv ≫ e),
swap,
{ intros i j hh,
apply_fun (λ e, (homology_iso_cokernel_lift f g w).hom ≫ e) at hh,
simpa using hh },
simp only [category.assoc] at h,
exact coequalizer.hom_ext h,
end
@[ext]
lemma hom_to_ext {W : A} (a b : W ⟶ homology f g w)
(h : a ≫ ι f g w = b ≫ ι f g w) : a = b :=
begin
dsimp [ι] at h,
apply_fun (λ e, e ≫ (homology_iso_kernel_desc f g w).hom),
swap,
{ intros i j hh,
apply_fun (λ e, e ≫ (homology_iso_kernel_desc f g w).inv) at hh,
simpa using hh },
simp only [← category.assoc] at h,
exact equalizer.hom_ext h,
end
@[simp, reassoc]
lemma π'_ι : π' f g w ≫ ι f g w = kernel.ι _ ≫ cokernel.π _ :=
by { dsimp [π', ι, homology_iso_kernel_desc], simp }
@[simp, reassoc]
lemma π'_eq_π : (kernel_subobject_iso _).hom ≫ π' f g w = π _ _ _ :=
begin
dsimp [π', homology_iso_cokernel_lift],
simp only [← category.assoc],
rw iso.comp_inv_eq,
dsimp [π, homology_iso_cokernel_image_to_kernel'],
simp,
end
section
variables {X' Y' Z' : A} (f' : X' ⟶ Y') (g' : Y' ⟶ Z') (w' : f' ≫ g' = 0)
@[simp, reassoc]
lemma π'_map (α β h) :
π' _ _ _ ≫ map w w' α β h = kernel.map _ _ α.right β.right (by simp [h,β.w.symm]) ≫ π' _ _ _ :=
begin
apply_fun (λ e, (kernel_subobject_iso _).hom ≫ e),
swap,
{ intros i j hh,
apply_fun (λ e, (kernel_subobject_iso _).inv ≫ e) at hh,
simpa using hh },
dsimp [map],
simp only [π'_eq_π_assoc],
dsimp [π],
simp only [cokernel.π_desc],
rw [← iso.inv_comp_eq, ← category.assoc],
have : (limits.kernel_subobject_iso g).inv ≫ limits.kernel_subobject_map β =
kernel.map _ _ β.left β.right β.w.symm ≫ (kernel_subobject_iso _).inv,
{ rw [iso.inv_comp_eq, ← category.assoc, iso.eq_comp_inv],
ext,
dsimp,
simp },
rw this,
simp only [category.assoc],
dsimp [π', homology_iso_cokernel_lift],
simp only [cokernel_iso_of_eq_inv_comp_desc, cokernel.π_desc_assoc],
congr' 1,
{ congr, exact h.symm },
{ rw [iso.inv_comp_eq, ← category.assoc, iso.eq_comp_inv],
dsimp [homology_iso_cokernel_image_to_kernel'],
simp }
end
lemma map_eq_desc'_lift_left (α β h) : map w w' α β h =
homology.desc' _ _ _ (homology.lift _ _ _ (kernel.ι _ ≫ β.left ≫ cokernel.π _) (by simp))
(by { ext, simp only [←h, category.assoc, zero_comp, lift_ι, kernel.lift_ι_assoc],
erw ← reassoc_of α.w, simp } ) :=
begin
apply homology.hom_from_ext,
simp only [π'_map, π'_desc'],
dsimp [π', lift],
rw iso.eq_comp_inv,
dsimp [homology_iso_kernel_desc],
ext,
simp [h],
end
lemma map_eq_lift_desc'_left (α β h) : map w w' α β h =
homology.lift _ _ _ (homology.desc' _ _ _ (kernel.ι _ ≫ β.left ≫ cokernel.π _)
(by { simp only [kernel.lift_ι_assoc, ← h], erw ← reassoc_of α.w, simp }))
(by { ext, simp }) :=
by { rw map_eq_desc'_lift_left, ext, simp }
lemma map_eq_desc'_lift_right (α β h) : map w w' α β h =
homology.desc' _ _ _ (homology.lift _ _ _ (kernel.ι _ ≫ α.right ≫ cokernel.π _) (by simp [h]))
(by { ext, simp only [category.assoc, zero_comp, lift_ι, kernel.lift_ι_assoc],
erw ← reassoc_of α.w, simp } ) :=
by { rw map_eq_desc'_lift_left, ext, simp [h] }
lemma map_eq_lift_desc'_right (α β h) : map w w' α β h =
homology.lift _ _ _ (homology.desc' _ _ _ (kernel.ι _ ≫ α.right ≫ cokernel.π _)
(by { simp only [kernel.lift_ι_assoc], erw ← reassoc_of α.w, simp }))
(by { ext, simp [h] }) :=
by { rw map_eq_desc'_lift_right, ext, simp }
@[simp, reassoc]
lemma map_ι (α β h) :
map w w' α β h ≫ ι f' g' w' = ι f g w ≫ cokernel.map f f' α.left β.left (by simp [h, β.w.symm]) :=
begin
rw [map_eq_lift_desc'_left, lift_ι],
ext,
simp only [← category.assoc],
rw [π'_ι, π'_desc', category.assoc, category.assoc, cokernel.π_desc],
end
end
end homology
namespace category_theory.functor
variables {ι : Type*} {c : complex_shape ι} {B : Type*} [category B] [abelian B] (F : A ⥤ B)
[functor.additive F] [preserves_finite_limits F] [preserves_finite_colimits F]
/-- When `F` is an exact additive functor, `F(Hᵢ(X)) ≅ Hᵢ(F(X))` for `X` a complex. -/
noncomputable def homology_iso (C : homological_complex A c) (j : ι) :
F.obj (C.homology j) ≅ ((F.map_homological_complex _).obj C).homology j :=
(preserves_cokernel.iso _ _).trans (cokernel.map_iso _ _ ((F.map_iso (image_subobject_iso _)).trans
((preserves_image.iso _ _).symm.trans (image_subobject_iso _).symm))
((F.map_iso (kernel_subobject_iso _)).trans ((preserves_kernel.iso _ _).trans
(kernel_subobject_iso _).symm))
begin
dsimp,
ext,
simp only [category.assoc, image_to_kernel_arrow],
erw [kernel_subobject_arrow', kernel_comparison_comp_ι, image_subobject_arrow'],
simp [←F.map_comp],
end)
/-- If `F` is an exact additive functor, then `F` commutes with `Hᵢ` (up to natural isomorphism). -/
noncomputable def homology_functor_iso (i : ι) :
homology_functor A c i ⋙ F ≅ F.map_homological_complex c ⋙ homology_functor B c i :=
nat_iso.of_components (λ X, homology_iso F X i)
begin
intros X Y f,
dsimp,
rw [←iso.inv_comp_eq, ←category.assoc, ←iso.eq_comp_inv],
refine coequalizer.hom_ext _,
dsimp [homology_iso],
simp only [homology.map, ←category.assoc, cokernel.π_desc],
simp only [category.assoc, cokernel_comparison_map_desc, cokernel.π_desc,
π_comp_cokernel_comparison, ←F.map_comp],
erw ←kernel_subobject_iso_comp_kernel_map_assoc,
simp only [homological_complex.hom.sq_from_right,
homological_complex.hom.sq_from_left, F.map_homological_complex_map_f, F.map_comp],
dunfold homological_complex.d_from homological_complex.hom.next,
dsimp,
rw [kernel_map_comp_preserves_kernel_iso_inv_assoc, ←F.map_comp_assoc,
←kernel_map_comp_kernel_subobject_iso_inv],
any_goals { simp },
end
end category_theory.functor
|
[STATEMENT]
lemma gterm_set_gterm_funs_terms:
"set_gterm t = funs_term (term_of_gterm t)"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. set_gterm t = funs_term (term_of_gterm t)
[PROOF STEP]
by (induct t) auto |
(*
Title: Psi-calculi
Author/Maintainer: Jesper Bengtson ([email protected]), 2012
*)
theory Bisim_Subst
imports Bisim_Struct_Cong Close_Subst
begin
context env begin
abbreviation
bisimSubstJudge ("_ \<rhd> _ \<sim>\<^sub>s _" [70, 70, 70] 65) where "\<Psi> \<rhd> P \<sim>\<^sub>s Q \<equiv> (\<Psi>, P, Q) \<in> closeSubst bisim"
abbreviation
bisimSubstNilJudge ("_ \<sim>\<^sub>s _" [70, 70] 65) where "P \<sim>\<^sub>s Q \<equiv> SBottom' \<rhd> P \<sim>\<^sub>s Q"
lemmas bisimSubstClosed[eqvt] = closeSubstClosed[OF bisimEqvt]
lemmas bisimSubstEqvt[simp] = closeSubstEqvt[OF bisimEqvt]
lemma bisimSubstOutputPres:
fixes \<Psi> :: 'b
and P :: "('a, 'b, 'c) psi"
and Q :: "('a, 'b, 'c) psi"
and M :: 'a
and N :: 'a
assumes "\<Psi> \<rhd> P \<sim>\<^sub>s Q"
shows "\<Psi> \<rhd> M\<langle>N\<rangle>.P \<sim>\<^sub>s M\<langle>N\<rangle>.Q"
using assms
by(fastforce intro: closeSubstI closeSubstE bisimOutputPres)
lemma seqSubstInputChain[simp]:
fixes xvec :: "name list"
and N :: "'a"
and P :: "('a, 'b, 'c) psi"
and \<sigma> :: "(name list \<times> 'a list) list"
assumes "xvec \<sharp>* \<sigma>"
shows "seqSubs' (inputChain xvec N P) \<sigma> = inputChain xvec (substTerm.seqSubst N \<sigma>) (seqSubs P \<sigma>)"
using assms
by(induct xvec) auto
lemma bisimSubstInputPres:
fixes \<Psi> :: 'b
and P :: "('a, 'b, 'c) psi"
and Q :: "('a, 'b, 'c) psi"
and M :: 'a
and xvec :: "name list"
and N :: 'a
assumes "\<Psi> \<rhd> P \<sim>\<^sub>s Q"
and "xvec \<sharp>* \<Psi>"
and "distinct xvec"
shows "\<Psi> \<rhd> M\<lparr>\<lambda>*xvec N\<rparr>.P \<sim>\<^sub>s M\<lparr>\<lambda>*xvec N\<rparr>.Q"
proof(rule_tac closeSubstI)
fix \<sigma>
assume "wellFormedSubst(\<sigma>::(name list \<times> 'a list) list)"
obtain p where "(p \<bullet> xvec) \<sharp>* \<sigma>"
and "(p \<bullet> xvec) \<sharp>* P" and "(p \<bullet> xvec) \<sharp>* Q" and "(p \<bullet> xvec) \<sharp>* \<Psi>" and "(p \<bullet> xvec) \<sharp>* N"
and S: "set p \<subseteq> set xvec \<times> set (p \<bullet> xvec)"
by(rule_tac c="(\<sigma>, P, Q, \<Psi>, N)" in name_list_avoiding) auto
from \<open>\<Psi> \<rhd> P \<sim>\<^sub>s Q\<close> have "(p \<bullet> \<Psi>) \<rhd> (p \<bullet> P) \<sim>\<^sub>s (p \<bullet> Q)"
by(rule bisimSubstClosed)
with \<open>xvec \<sharp>* \<Psi>\<close> \<open>(p \<bullet> xvec) \<sharp>* \<Psi>\<close> S have "\<Psi> \<rhd> (p \<bullet> P) \<sim>\<^sub>s (p \<bullet> Q)"
by simp
{
fix Tvec :: "'a list"
from \<open>\<Psi> \<rhd> (p \<bullet> P) \<sim>\<^sub>s (p \<bullet> Q)\<close> \<open>wellFormedSubst \<sigma>\<close> have "\<Psi> \<rhd> (p \<bullet> P)[<\<sigma>>] \<sim>\<^sub>s (p \<bullet> Q)[<\<sigma>>]"
by(rule closeSubstUnfold)
moreover assume "length xvec = length Tvec" and "distinct xvec"
ultimately have "\<Psi> \<rhd> ((p \<bullet> P)[<\<sigma>>])[(p \<bullet> xvec)::=Tvec] \<sim> ((p \<bullet> Q)[<\<sigma>>])[(p \<bullet> xvec)::=Tvec]"
by(drule_tac closeSubstE[where \<sigma>="[((p \<bullet> xvec), Tvec)]"]) auto
}
with \<open>(p \<bullet> xvec) \<sharp>* \<sigma>\<close> \<open>distinct xvec\<close>
have "\<Psi> \<rhd> (M\<lparr>\<lambda>*(p \<bullet> xvec) (p \<bullet> N)\<rparr>.(p \<bullet> P))[<\<sigma>>] \<sim> (M\<lparr>\<lambda>*(p \<bullet> xvec) (p \<bullet> N)\<rparr>.(p \<bullet> Q))[<\<sigma>>]"
by(force intro: bisimInputPres)
moreover from \<open>(p \<bullet> xvec) \<sharp>* N\<close> \<open>(p \<bullet> xvec) \<sharp>* P\<close> S have "M\<lparr>\<lambda>*(p \<bullet> xvec) (p \<bullet> N)\<rparr>.(p \<bullet> P) = M\<lparr>\<lambda>*xvec N\<rparr>.P"
apply(simp add: psi.inject) by(rule inputChainAlpha[symmetric]) auto
moreover from \<open>(p \<bullet> xvec) \<sharp>* N\<close> \<open>(p \<bullet> xvec) \<sharp>* Q\<close> S have "M\<lparr>\<lambda>*(p \<bullet> xvec) (p \<bullet> N)\<rparr>.(p \<bullet> Q) = M\<lparr>\<lambda>*xvec N\<rparr>.Q"
apply(simp add: psi.inject) by(rule inputChainAlpha[symmetric]) auto
ultimately show "\<Psi> \<rhd> (M\<lparr>\<lambda>*xvec N\<rparr>.P)[<\<sigma>>] \<sim> (M\<lparr>\<lambda>*xvec N\<rparr>.Q)[<\<sigma>>]"
by force
qed
lemma bisimSubstCasePresAux:
fixes \<Psi> :: 'b
and CsP :: "('c \<times> ('a, 'b, 'c) psi) list"
and CsQ :: "('c \<times> ('a, 'b, 'c) psi) list"
assumes C1: "\<And>\<phi> P. (\<phi>, P) mem CsP \<Longrightarrow> \<exists>Q. (\<phi>, Q) mem CsQ \<and> guarded Q \<and> \<Psi> \<rhd> P \<sim>\<^sub>s Q"
and C2: "\<And>\<phi> Q. (\<phi>, Q) mem CsQ \<Longrightarrow> \<exists>P. (\<phi>, P) mem CsP \<and> guarded P \<and> \<Psi> \<rhd> P \<sim>\<^sub>s Q"
shows "\<Psi> \<rhd> Cases CsP \<sim>\<^sub>s Cases CsQ"
proof -
{
fix \<sigma> :: "(name list \<times> 'a list) list"
assume "wellFormedSubst \<sigma>"
have "\<Psi> \<rhd> Cases(caseListSeqSubst CsP \<sigma>) \<sim> Cases(caseListSeqSubst CsQ \<sigma>)"
proof(rule bisimCasePres)
fix \<phi> P
assume "(\<phi>, P) mem (caseListSeqSubst CsP \<sigma>)"
then obtain \<phi>' P' where "(\<phi>', P') mem CsP" and "\<phi> = substCond.seqSubst \<phi>' \<sigma>" and PeqP': "P = (P'[<\<sigma>>])"
by(induct CsP) force+
from \<open>(\<phi>', P') mem CsP\<close> obtain Q' where "(\<phi>', Q') mem CsQ" and "guarded Q'" and "\<Psi> \<rhd> P' \<sim>\<^sub>s Q'" by(blast dest: C1)
from \<open>(\<phi>', Q') mem CsQ\<close> \<open>\<phi> = substCond.seqSubst \<phi>' \<sigma>\<close> obtain Q where "(\<phi>, Q) mem (caseListSeqSubst CsQ \<sigma>)" and "Q = Q'[<\<sigma>>]"
by(induct CsQ) auto
with PeqP' \<open>guarded Q'\<close> \<open>\<Psi> \<rhd> P' \<sim>\<^sub>s Q'\<close> \<open>wellFormedSubst \<sigma>\<close> show "\<exists>Q. (\<phi>, Q) mem (caseListSeqSubst CsQ \<sigma>) \<and> guarded Q \<and> \<Psi> \<rhd> P \<sim> Q"
by(blast dest: closeSubstE guardedSeqSubst)
next
fix \<phi> Q
assume "(\<phi>, Q) mem (caseListSeqSubst CsQ \<sigma>)"
then obtain \<phi>' Q' where "(\<phi>', Q') mem CsQ" and "\<phi> = substCond.seqSubst \<phi>' \<sigma>" and QeqQ': "Q = Q'[<\<sigma>>]"
by(induct CsQ) force+
from \<open>(\<phi>', Q') mem CsQ\<close> obtain P' where "(\<phi>', P') mem CsP" and "guarded P'" and "\<Psi> \<rhd> P' \<sim>\<^sub>s Q'" by(blast dest: C2)
from \<open>(\<phi>', P') mem CsP\<close> \<open>\<phi> = substCond.seqSubst \<phi>' \<sigma>\<close> obtain P where "(\<phi>, P) mem (caseListSeqSubst CsP \<sigma>)" and "P = P'[<\<sigma>>]"
by(induct CsP) auto
with QeqQ' \<open>guarded P'\<close> \<open>\<Psi> \<rhd> P' \<sim>\<^sub>s Q'\<close> \<open>wellFormedSubst \<sigma>\<close> show "\<exists>P. (\<phi>, P) mem (caseListSeqSubst CsP \<sigma>) \<and> guarded P \<and> \<Psi> \<rhd> P \<sim> Q"
by(blast dest: closeSubstE guardedSeqSubst)
qed
}
thus ?thesis
by(rule_tac closeSubstI) auto
qed
lemma bisimSubstReflexive:
fixes \<Psi> :: 'b
and P :: "('a, 'b, 'c) psi"
shows "\<Psi> \<rhd> P \<sim>\<^sub>s P"
by(auto intro: closeSubstI bisimReflexive)
lemma bisimSubstTransitive:
fixes \<Psi> :: 'b
and P :: "('a, 'b, 'c) psi"
and Q :: "('a, 'b, 'c) psi"
and R :: "('a, 'b, 'c) psi"
assumes "\<Psi> \<rhd> P \<sim>\<^sub>s Q"
and "\<Psi> \<rhd> Q \<sim>\<^sub>s R"
shows "\<Psi> \<rhd> P \<sim>\<^sub>s R"
using assms
by(auto intro: closeSubstI closeSubstE bisimTransitive)
lemma bisimSubstSymmetric:
fixes \<Psi> :: 'b
and P :: "('a, 'b, 'c) psi"
and Q :: "('a, 'b, 'c) psi"
assumes "\<Psi> \<rhd> P \<sim>\<^sub>s Q"
shows "\<Psi> \<rhd> Q \<sim>\<^sub>s P"
using assms
by(auto intro: closeSubstI closeSubstE bisimE)
(*
lemma bisimSubstCasePres:
fixes \<Psi> :: 'b
and CsP :: "('c \<times> ('a, 'b, 'c) psi) list"
and CsQ :: "('c \<times> ('a, 'b, 'c) psi) list"
assumes "length CsP = length CsQ"
and C: "\<And>(i::nat) \<phi> P \<phi>' Q. \<lbrakk>i <= length CsP; (\<phi>, P) = nth CsP i; (\<phi>', Q) = nth CsQ i\<rbrakk> \<Longrightarrow> \<phi> = \<phi>' \<and> \<Psi> \<rhd> P \<sim> Q"
shows "\<Psi> \<rhd> Cases CsP \<sim>\<^sub>s Cases CsQ"
proof -
{
fix \<phi>
and P
assume "(\<phi>, P) mem CsP"
with `length CsP = length CsQ` have "\<exists>Q. (\<phi>, Q) mem CsQ \<and> \<Psi> \<rhd> P \<sim>\<^sub>s Q"
apply(induct n=="length CsP" arbitrary: CsP CsQ rule: nat.induct)
apply simp
apply simp
apply auto
}
using `length CsP = length CsQ`
proof(induct n=="length CsP" rule: nat.induct)
case zero
thus ?case by(fastforce intro: bisimSubstReflexive)
next
case(Suc n)
next
apply auto
apply(blast intro: bisimSubstReflexive)
apply auto
apply(simp add: nth.simps)
apply(auto simp add: nth.simps)
apply blast
apply(rule_tac bisimSubstCasePresAux)
apply auto
*)
lemma bisimSubstParPres:
fixes \<Psi> :: 'b
and P :: "('a, 'b, 'c) psi"
and Q :: "('a, 'b, 'c) psi"
and R :: "('a, 'b, 'c) psi"
assumes "\<Psi> \<rhd> P \<sim>\<^sub>s Q"
shows "\<Psi> \<rhd> P \<parallel> R \<sim>\<^sub>s Q \<parallel> R"
using assms
by(fastforce intro: closeSubstI closeSubstE bisimParPres)
lemma bisimSubstResPres:
fixes \<Psi> :: 'b
and P :: "('a, 'b, 'c) psi"
and Q :: "('a, 'b, 'c) psi"
and x :: name
assumes "\<Psi> \<rhd> P \<sim>\<^sub>s Q"
and "x \<sharp> \<Psi>"
shows "\<Psi> \<rhd> \<lparr>\<nu>x\<rparr>P \<sim>\<^sub>s \<lparr>\<nu>x\<rparr>Q"
proof(rule_tac closeSubstI)
fix \<sigma> :: "(name list \<times> 'a list) list"
assume "wellFormedSubst \<sigma>"
obtain y::name where "y \<sharp> \<Psi>" and "y \<sharp> P" and "y \<sharp> Q" and "y \<sharp> \<sigma>"
by(generate_fresh "name") (auto simp add: fresh_prod)
from \<open>\<Psi> \<rhd> P \<sim>\<^sub>s Q\<close> have "([(x, y)] \<bullet> \<Psi>) \<rhd> ([(x, y)] \<bullet> P) \<sim>\<^sub>s ([(x, y)] \<bullet> Q)"
by(rule bisimSubstClosed)
with \<open>x \<sharp> \<Psi>\<close> \<open>y \<sharp> \<Psi>\<close> have "\<Psi> \<rhd> ([(x, y)] \<bullet> P) \<sim>\<^sub>s ([(x, y)] \<bullet> Q)"
by simp
hence "\<Psi> \<rhd> ([(x, y)] \<bullet> P)[<\<sigma>>] \<sim> ([(x, y)] \<bullet> Q)[<\<sigma>>]" using \<open>wellFormedSubst \<sigma>\<close>
by(rule closeSubstE)
hence "\<Psi> \<rhd> \<lparr>\<nu>y\<rparr>(([(x, y)] \<bullet> P)[<\<sigma>>]) \<sim> \<lparr>\<nu>y\<rparr>(([(x, y)] \<bullet> Q)[<\<sigma>>])" using \<open>y \<sharp> \<Psi>\<close>
by(rule bisimResPres)
with \<open>y \<sharp> P\<close> \<open>y \<sharp> Q\<close> \<open>y \<sharp> \<sigma>\<close>
show "\<Psi> \<rhd> (\<lparr>\<nu>x\<rparr>P)[<\<sigma>>] \<sim> (\<lparr>\<nu>x\<rparr>Q)[<\<sigma>>]"
by(simp add: alphaRes)
qed
lemma bisimSubstBangPres:
fixes \<Psi> :: 'b
and P :: "('a, 'b, 'c) psi"
and Q :: "('a, 'b, 'c) psi"
assumes "\<Psi> \<rhd> P \<sim>\<^sub>s Q"
and "guarded P"
and "guarded Q"
shows "\<Psi> \<rhd> !P \<sim>\<^sub>s !Q"
using assms
by(fastforce intro: closeSubstI closeSubstE bisimBangPres guardedSeqSubst)
lemma substNil[simp]:
fixes xvec :: "name list"
and Tvec :: "'a list"
assumes "wellFormedSubst \<sigma>"
and "distinct xvec"
shows "(\<zero>[<\<sigma>>]) = \<zero>"
using assms
by simp
lemma bisimSubstParNil:
fixes \<Psi> :: 'b
and P :: "('a, 'b, 'c) psi"
shows "\<Psi> \<rhd> P \<parallel> \<zero> \<sim>\<^sub>s P"
by(fastforce intro: closeSubstI bisimParNil)
lemma bisimSubstParComm:
fixes \<Psi> :: 'b
and P :: "('a, 'b, 'c) psi"
and Q :: "('a, 'b, 'c) psi"
shows "\<Psi> \<rhd> P \<parallel> Q \<sim>\<^sub>s Q \<parallel> P"
apply(rule closeSubstI)
by(fastforce intro: closeSubstI bisimParComm)
lemma bisimSubstParAssoc:
fixes \<Psi> :: 'b
and P :: "('a, 'b, 'c) psi"
and Q :: "('a, 'b, 'c) psi"
and R :: "('a, 'b, 'c) psi"
shows "\<Psi> \<rhd> (P \<parallel> Q) \<parallel> R \<sim>\<^sub>s P \<parallel> (Q \<parallel> R)"
apply(rule closeSubstI)
by(fastforce intro: closeSubstI bisimParAssoc)
lemma bisimSubstResNil:
fixes \<Psi> :: 'b
and x :: name
shows "\<Psi> \<rhd> \<lparr>\<nu>x\<rparr>\<zero> \<sim>\<^sub>s \<zero>"
proof(rule closeSubstI)
fix \<sigma>:: "(name list \<times> 'a list) list"
assume "wellFormedSubst \<sigma>"
obtain y::name where "y \<sharp> \<Psi>" and "y \<sharp> \<sigma>"
by(generate_fresh "name") (auto simp add: fresh_prod)
have "\<Psi> \<rhd> \<lparr>\<nu>y\<rparr>\<zero> \<sim> \<zero>" by(rule bisimResNil)
with \<open>y \<sharp> \<sigma>\<close> \<open>wellFormedSubst \<sigma>\<close> show "\<Psi> \<rhd> (\<lparr>\<nu>x\<rparr>\<zero>)[<\<sigma>>] \<sim> \<zero>[<\<sigma>>]"
by(subst alphaRes[of y]) auto
qed
lemma seqSubst2:
fixes x :: name
and P :: "('a, 'b, 'c) psi"
assumes "wellFormedSubst \<sigma>"
and "x \<sharp> \<sigma>"
and "x \<sharp> P"
shows "x \<sharp> P[<\<sigma>>]"
using assms
by(induct \<sigma> arbitrary: P, auto) (blast dest: subst2)
notation substTerm.seqSubst ("_[<_>]" [100, 100] 100)
lemma bisimSubstScopeExt:
fixes \<Psi> :: 'b
and x :: name
and P :: "('a, 'b, 'c) psi"
and Q :: "('a, 'b, 'c) psi"
assumes "x \<sharp> P"
shows "\<Psi> \<rhd> \<lparr>\<nu>x\<rparr>(P \<parallel> Q) \<sim>\<^sub>s P \<parallel> \<lparr>\<nu>x\<rparr>Q"
proof(rule closeSubstI)
fix \<sigma>:: "(name list \<times> 'a list) list"
assume "wellFormedSubst \<sigma>"
obtain y::name where "y \<sharp> \<Psi>" and "y \<sharp> \<sigma>" and "y \<sharp> P" and "y \<sharp> Q"
by(generate_fresh "name") (auto simp add: fresh_prod)
moreover from \<open>wellFormedSubst \<sigma>\<close> \<open>y \<sharp> \<sigma>\<close> \<open>y \<sharp> P\<close> have "y \<sharp> P[<\<sigma>>]"
by(rule seqSubst2)
hence "\<Psi> \<rhd> \<lparr>\<nu>y\<rparr>((P[<\<sigma>>]) \<parallel> (([(x, y)] \<bullet> Q)[<\<sigma>>])) \<sim> (P[<\<sigma>>]) \<parallel> \<lparr>\<nu>y\<rparr>(([(x, y)] \<bullet> Q)[<\<sigma>>])"
by(rule bisimScopeExt)
with \<open>x \<sharp> P\<close> \<open>y \<sharp> P\<close> \<open>y \<sharp> Q\<close> \<open>y \<sharp> \<sigma>\<close> show "\<Psi> \<rhd> (\<lparr>\<nu>x\<rparr>(P \<parallel> Q))[<\<sigma>>] \<sim> (P \<parallel> \<lparr>\<nu>x\<rparr>Q)[<\<sigma>>]"
apply(subst alphaRes[of y], simp)
apply(subst alphaRes[of y Q], simp)
by(simp add: eqvts)
qed
lemma bisimSubstCasePushRes:
fixes x :: name
and \<Psi> :: 'b
and Cs :: "('c \<times> ('a, 'b, 'c) psi) list"
assumes "x \<sharp> map fst Cs"
shows "\<Psi> \<rhd> \<lparr>\<nu>x\<rparr>(Cases Cs) \<sim>\<^sub>s Cases map (\<lambda>(\<phi>, P). (\<phi>, \<lparr>\<nu>x\<rparr>P)) Cs"
proof(rule closeSubstI)
fix \<sigma>:: "(name list \<times> 'a list) list"
assume "wellFormedSubst \<sigma>"
obtain y::name where "y \<sharp> \<Psi>" and "y \<sharp> \<sigma>" and "y \<sharp> Cs"
by(generate_fresh "name") (auto simp add: fresh_prod)
{
fix x :: name
and Cs :: "('c \<times> ('a, 'b, 'c) psi) list"
and \<sigma> :: "(name list \<times> 'a list) list"
assume "x \<sharp> \<sigma>"
hence "(Cases map (\<lambda>(\<phi>, P). (\<phi>, \<lparr>\<nu>x\<rparr>P)) Cs)[<\<sigma>>] = Cases map (\<lambda>(\<phi>, P). (\<phi>, \<lparr>\<nu>x\<rparr>P)) (caseListSeqSubst Cs \<sigma>)"
by(induct Cs) auto
}
note C1 = this
{
fix x :: name
and y :: name
and Cs :: "('c \<times> ('a, 'b, 'c) psi) list"
assume "x \<sharp> map fst Cs"
and "y \<sharp> map fst Cs"
and "y \<sharp> Cs"
hence "(Cases map (\<lambda>(\<phi>, P). (\<phi>, \<lparr>\<nu>x\<rparr>P)) Cs) = Cases map (\<lambda>(\<phi>, P). (\<phi>, \<lparr>\<nu>y\<rparr>P)) ([(x, y)] \<bullet> Cs)"
by(induct Cs) (auto simp add: fresh_list_cons alphaRes)
}
note C2 = this
from \<open>y \<sharp> Cs\<close> have "y \<sharp> map fst Cs" by(induct Cs) (auto simp add: fresh_list_cons fresh_list_nil)
from \<open>y \<sharp> Cs\<close> \<open>y \<sharp> \<sigma>\<close> \<open>x \<sharp> map fst Cs\<close> \<open>wellFormedSubst \<sigma>\<close> have "y \<sharp> map fst (caseListSeqSubst ([(x, y)] \<bullet> Cs) \<sigma>)"
by(induct Cs) (auto intro: substCond.seqSubst2 simp add: fresh_list_cons fresh_list_nil fresh_prod)
hence "\<Psi> \<rhd> \<lparr>\<nu>y\<rparr>(Cases(caseListSeqSubst ([(x, y)] \<bullet> Cs) \<sigma>)) \<sim> Cases map (\<lambda>(\<phi>, P). (\<phi>, \<lparr>\<nu>y\<rparr>P)) (caseListSeqSubst ([(x, y)] \<bullet> Cs) \<sigma>)"
by(rule bisimCasePushRes)
with \<open>y \<sharp> Cs\<close> \<open>x \<sharp> map fst Cs\<close> \<open>y \<sharp> map fst Cs\<close> \<open>y \<sharp> \<sigma>\<close> \<open>wellFormedSubst \<sigma>\<close>
show "\<Psi> \<rhd> (\<lparr>\<nu>x\<rparr>(Cases Cs))[<\<sigma>>] \<sim> (Cases map (\<lambda>(\<phi>, P). (\<phi>, \<lparr>\<nu>x\<rparr>P)) Cs)[<\<sigma>>]"
apply(subst C2[of x Cs y])
apply assumption+
apply(subst C1)
apply assumption+
apply(subst alphaRes[of y], simp)
by(simp add: eqvts)
qed
lemma bisimSubstOutputPushRes:
fixes x :: name
and \<Psi> :: 'b
and M :: 'a
and N :: 'a
and P :: "('a, 'b, 'c) psi"
assumes "x \<sharp> M"
and "x \<sharp> N"
shows "\<Psi> \<rhd> \<lparr>\<nu>x\<rparr>(M\<langle>N\<rangle>.P) \<sim>\<^sub>s M\<langle>N\<rangle>.\<lparr>\<nu>x\<rparr>P"
proof(rule closeSubstI)
fix \<sigma>:: "(name list \<times> 'a list) list"
assume "wellFormedSubst \<sigma>"
obtain y::name where "y \<sharp> \<Psi>" and "y \<sharp> \<sigma>" and "y \<sharp> P" and "y \<sharp> M" and "y \<sharp> N"
by(generate_fresh "name") (auto simp add: fresh_prod)
from \<open>wellFormedSubst \<sigma>\<close> \<open>y \<sharp> M\<close> \<open>y \<sharp> \<sigma>\<close> have "y \<sharp> M[<\<sigma>>]" by auto
moreover from \<open>wellFormedSubst \<sigma>\<close> \<open>y \<sharp> N\<close> \<open>y \<sharp> \<sigma>\<close> have "y \<sharp> N[<\<sigma>>]" by auto
ultimately have "\<Psi> \<rhd> \<lparr>\<nu>y\<rparr>((M[<\<sigma>>])\<langle>(N[<\<sigma>>])\<rangle>.(([(x, y)] \<bullet> P)[<\<sigma>>])) \<sim> (M[<\<sigma>>])\<langle>(N[<\<sigma>>])\<rangle>.(\<lparr>\<nu>y\<rparr>(([(x, y)] \<bullet> P)[<\<sigma>>]))"
by(rule bisimOutputPushRes)
with \<open>y \<sharp> M\<close> \<open>y \<sharp> N\<close> \<open>y \<sharp> P\<close> \<open>x \<sharp> M\<close> \<open>x \<sharp> N\<close> \<open>y \<sharp> \<sigma>\<close> \<open>wellFormedSubst \<sigma>\<close>
show "\<Psi> \<rhd> (\<lparr>\<nu>x\<rparr>(M\<langle>N\<rangle>.P))[<\<sigma>>] \<sim> (M\<langle>N\<rangle>.\<lparr>\<nu>x\<rparr>P)[<\<sigma>>]"
apply(subst alphaRes[of y], simp)
apply(subst alphaRes[of y P], simp)
by(simp add: eqvts)
qed
lemma bisimSubstInputPushRes:
fixes x :: name
and \<Psi> :: 'b
and M :: 'a
and xvec :: "name list"
and N :: 'a
assumes "x \<sharp> M"
and "x \<sharp> xvec"
and "x \<sharp> N"
shows "\<Psi> \<rhd> \<lparr>\<nu>x\<rparr>(M\<lparr>\<lambda>*xvec N\<rparr>.P) \<sim>\<^sub>s M\<lparr>\<lambda>*xvec N\<rparr>.\<lparr>\<nu>x\<rparr>P"
proof(rule closeSubstI)
fix \<sigma>:: "(name list \<times> 'a list) list"
assume "wellFormedSubst \<sigma>"
obtain y::name where "y \<sharp> \<Psi>" and "y \<sharp> \<sigma>" and "y \<sharp> P" and "y \<sharp> M" and "y \<sharp> xvec" and "y \<sharp> N"
by(generate_fresh "name") (auto simp add: fresh_prod)
obtain p::"name prm" where "(p \<bullet> xvec) \<sharp>* N" and "(p \<bullet> xvec) \<sharp>* P" and "x \<sharp> (p \<bullet> xvec)" and "y \<sharp> (p \<bullet> xvec)" and "(p \<bullet> xvec) \<sharp>* \<sigma>"
and S: "set p \<subseteq> set xvec \<times> set(p \<bullet> xvec)"
by(rule_tac c="(N, P, x, y, \<sigma>)" in name_list_avoiding) auto
from \<open>wellFormedSubst \<sigma>\<close> \<open>y \<sharp> M\<close> \<open>y \<sharp> \<sigma> \<close> have "y \<sharp> M[<\<sigma>>]" by auto
moreover note \<open>y \<sharp> (p \<bullet> xvec)\<close>
moreover from \<open>y \<sharp> N\<close> have "(p \<bullet> y) \<sharp> (p \<bullet> N)" by(simp add: pt_fresh_bij[OF pt_name_inst, OF at_name_inst])
with \<open>y \<sharp> xvec\<close> \<open>y \<sharp> (p \<bullet> xvec)\<close> S have "y \<sharp> p \<bullet> N" by simp
with \<open>wellFormedSubst \<sigma>\<close> have "y \<sharp> (p \<bullet> N)[<\<sigma>>]" using \<open>y \<sharp> \<sigma>\<close> by auto
ultimately have "\<Psi> \<rhd> \<lparr>\<nu>y\<rparr>((M[<\<sigma>>])\<lparr>\<lambda>*(p \<bullet> xvec) ((p \<bullet> N)[<\<sigma>>])\<rparr>.(([(x, y)] \<bullet> (p \<bullet> P))[<\<sigma>>])) \<sim> (M[<\<sigma>>])\<lparr>\<lambda>*(p \<bullet> xvec) ((p \<bullet> N)[<\<sigma>>])\<rparr>.(\<lparr>\<nu>y\<rparr>(([(x, y)] \<bullet> p \<bullet> P)[<\<sigma>>]))"
by(rule bisimInputPushRes)
with \<open>y \<sharp> M\<close> \<open>y \<sharp> N\<close> \<open>y \<sharp> P\<close> \<open>x \<sharp> M\<close> \<open>x \<sharp> N\<close> \<open>y \<sharp> xvec\<close> \<open>x \<sharp> xvec\<close> \<open>(p \<bullet> xvec) \<sharp>* N\<close> \<open>(p \<bullet> xvec) \<sharp>* P\<close>
\<open>x \<sharp> (p \<bullet> xvec)\<close> \<open>y \<sharp> (p \<bullet> xvec)\<close> \<open>y \<sharp> \<sigma>\<close> \<open>(p \<bullet> xvec) \<sharp>* \<sigma>\<close> S \<open>wellFormedSubst \<sigma>\<close>
show "\<Psi> \<rhd> (\<lparr>\<nu>x\<rparr>(M\<lparr>\<lambda>*xvec N\<rparr>.P))[<\<sigma>>] \<sim> (M\<lparr>\<lambda>*xvec N\<rparr>.\<lparr>\<nu>x\<rparr>P)[<\<sigma>>]"
apply(subst inputChainAlpha')
apply assumption+
apply(subst inputChainAlpha'[of p xvec])
apply(simp add: abs_fresh_star)
apply assumption+
apply(simp add: eqvts)
apply(subst alphaRes[of y], simp)
apply(simp add: inputChainFresh)
apply(simp add: freshChainSimps)
apply(subst alphaRes[of y "(p \<bullet> P)"])
apply(simp add: freshChainSimps)
by(simp add: freshChainSimps eqvts)
qed
lemma bisimSubstResComm:
fixes x :: name
and y :: name
shows "\<Psi> \<rhd> \<lparr>\<nu>x\<rparr>(\<lparr>\<nu>y\<rparr>P) \<sim>\<^sub>s \<lparr>\<nu>y\<rparr>(\<lparr>\<nu>x\<rparr>P)"
proof(case_tac "x = y")
assume "x = y"
thus ?thesis by(force intro: bisimSubstReflexive)
next
assume "x \<noteq> y"
show ?thesis
proof(rule closeSubstI)
fix \<sigma>:: "(name list \<times> 'a list) list"
assume "wellFormedSubst \<sigma>"
obtain x'::name where "x' \<sharp> \<Psi>" and "x' \<sharp> \<sigma>" and "x' \<sharp> P" and "x \<noteq> x'" and "y \<noteq> x'"
by(generate_fresh "name") (auto simp add: fresh_prod)
obtain y'::name where "y' \<sharp> \<Psi>" and "y' \<sharp> \<sigma>" and "y' \<sharp> P" and "x \<noteq> y'" and "y \<noteq> y'" and "x' \<noteq> y'"
by(generate_fresh "name") (auto simp add: fresh_prod)
have "\<Psi> \<rhd> \<lparr>\<nu>x'\<rparr>(\<lparr>\<nu>y'\<rparr>(([(x, x')] \<bullet> [(y, y')] \<bullet> P)[<\<sigma>>])) \<sim> \<lparr>\<nu>y'\<rparr>(\<lparr>\<nu>x'\<rparr>(([(x, x')] \<bullet> [(y, y')] \<bullet> P)[<\<sigma>>]))"
by(rule bisimResComm)
moreover from \<open>x' \<sharp> P\<close> \<open>y' \<sharp> P\<close> \<open>x \<noteq> y'\<close> \<open>x' \<noteq> y'\<close> have "\<lparr>\<nu>x\<rparr>(\<lparr>\<nu>y\<rparr>P) = \<lparr>\<nu>x'\<rparr>(\<lparr>\<nu>y'\<rparr>(([(x, x')] \<bullet> [(y, y')] \<bullet> P)))"
apply(subst alphaRes[of y' P], simp)
by(subst alphaRes[of x']) (auto simp add: abs_fresh fresh_left calc_atm eqvts)
moreover from \<open>x' \<sharp> P\<close> \<open>y' \<sharp> P\<close> \<open>y \<noteq> x'\<close> \<open>x \<noteq> y'\<close> \<open>x' \<noteq> y'\<close> \<open>x \<noteq> x'\<close> \<open>x \<noteq> y\<close> have "\<lparr>\<nu>y\<rparr>(\<lparr>\<nu>x\<rparr>P) = \<lparr>\<nu>y'\<rparr>(\<lparr>\<nu>x'\<rparr>(([(x, x')] \<bullet> [(y, y')] \<bullet> P)))"
apply(subst alphaRes[of x' P], simp)
apply(subst alphaRes[of y'], simp add: abs_fresh fresh_left calc_atm)
apply(simp add: eqvts calc_atm)
by(subst perm_compose) (simp add: calc_atm)
ultimately show "\<Psi> \<rhd> (\<lparr>\<nu>x\<rparr>(\<lparr>\<nu>y\<rparr>P))[<\<sigma>>] \<sim> (\<lparr>\<nu>y\<rparr>(\<lparr>\<nu>x\<rparr>P))[<\<sigma>>]"
using \<open>wellFormedSubst \<sigma>\<close> \<open>x' \<sharp> \<sigma>\<close> \<open>y' \<sharp> \<sigma>\<close>
by simp
qed
qed
lemma bisimSubstExtBang:
fixes \<Psi> :: 'b
and P :: "('a, 'b, 'c) psi"
assumes "guarded P"
shows "\<Psi> \<rhd> !P \<sim>\<^sub>s P \<parallel> !P"
using assms
by(fastforce intro: closeSubstI bangExt guardedSeqSubst)
lemma structCongBisimSubst:
fixes P :: "('a, 'b, 'c) psi"
and Q :: "('a, 'b, 'c) psi"
assumes "P \<equiv>\<^sub>s Q"
shows "P \<sim>\<^sub>s Q"
using assms
by(induct rule: structCong.induct)
(auto intro: bisimSubstReflexive bisimSubstSymmetric bisimSubstTransitive bisimSubstParComm bisimSubstParAssoc bisimSubstParNil bisimSubstResNil bisimSubstResComm bisimSubstScopeExt bisimSubstCasePushRes bisimSubstInputPushRes bisimSubstOutputPushRes bisimSubstExtBang)
end
end
|
-- Exercises_4_1.idr
--
-- Exercises for generic types
import Tree
import Shape
import Picture
||| Converts given List to a binary Tree
listToTree : Ord elem => List elem -> Tree elem
listToTree [] = Empty
listToTree (x :: xs) = insert x (listToTree xs)
||| Converts given Tree to an ordered List
treeToList : Tree a -> List a
treeToList Empty = []
treeToList (Node left x right) = let leftList = treeToList left
rightList = treeToList right in
leftList ++ [x] ++ rightList
-- Integer arithmetic
-- ==================
||| Data type for arithmetic expression(s)
data Expr = ||| Int value
Val Int
| ||| Arithmetic addition of two expressions
Add Expr Expr
| ||| Arithmetic substraction of two expressions
Sub Expr Expr
| ||| Arithmetic multiplication of two expressions
Mult Expr Expr
||| Evaluates given arithmentic expression
evaluate : Expr -> Int
evaluate (Val x) = x
evaluate (Add x y) = (evaluate x) + (evaluate y)
evaluate (Sub x y) = (evaluate x) - (evaluate y)
evaluate (Mult x y) = (evaluate x) * (evaluate y)
-- Maybe comparator
-- ================
maxMaybe : Ord a => Maybe a -> Maybe a -> Maybe a
maxMaybe Nothing Nothing = Nothing
maxMaybe Nothing (Just y) = Just y
maxMaybe (Just x) Nothing = Just x
maxMaybe (Just x) (Just y) = case compare x y of
LT => Just y
EQ => Just x
GT => Just x
-- Biggest triangle of picture
-- ===========================
biggestTriangle : Picture -> Maybe Double
biggestTriangle (Primitive triangle@(Triangle _ _)) = Just (area triangle)
biggestTriangle (Primitive _) = Nothing
biggestTriangle (Combine pic pic1) = maxMaybe (biggestTriangle pic) (biggestTriangle pic1)
biggestTriangle (Rotate x pic) = biggestTriangle pic
biggestTriangle (Translate x y pic) = biggestTriangle pic
testPic1 : Picture
testPic1 = Combine (Primitive (Triangle 2 3))
(Primitive (Triangle 2 4))
testPic2 : Picture
testPic2 = Combine (Primitive (Rectangle 1 3))
(Primitive (Circle 4))
|
Formal statement is: lemma tendsto_at_botI_sequentially: fixes f :: "real \<Rightarrow> 'b::first_countable_topology" assumes *: "\<And>X. filterlim X at_bot sequentially \<Longrightarrow> (\<lambda>n. f (X n)) \<longlonglongrightarrow> y" shows "(f \<longlongrightarrow> y) at_bot" Informal statement is: If $f$ converges to $y$ along any sequence that converges to $-\infty$, then $f$ converges to $y$ at $-\infty$. |
-- Tree.idr
--
-- Demonstates generic data types
module Tree
||| A binary tree
public export
data Tree elem = Empty
| Node (Tree elem) elem (Tree elem)
%name Tree tree, tree1
||| Inserts a new element into a binary search tree
export
insert : Ord elem => elem -> Tree elem -> Tree elem
insert x Empty = Node Empty x Empty
insert x (Node left val right) = case compare x val of
LT => Node (insert x left) val right
EQ => Node left val right
GT => Node left val (insert x right)
|
#' Create Email Template
#'
#' This package allows you to run a shiny app that helps you to save and load email templates.
#'
#'
#' @section run app:
#' \emph{EmailTemplate} runs app.
#'
#' @docType package
#' @name EmailTemplate
NULL
|
{-# OPTIONS --without-K --safe #-}
module Cham.Label where
open import Cham.Name
data Label : Set where
_⁺ : Name → Label
_⁻ : Name → Label
|
[STATEMENT]
lemma E_linear_diff2: "finite (set_pmf A) \<Longrightarrow> E (map_pmf f A) - E (map_pmf g A) = E (map_pmf (\<lambda>x. (f x) - (g x)) A)"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. finite (set_pmf A) \<Longrightarrow> E (map_pmf f A) - E (map_pmf g A) = E (map_pmf (\<lambda>x. f x - g x) A)
[PROOF STEP]
unfolding E_def integral_map_pmf
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. finite (set_pmf A) \<Longrightarrow> measure_pmf.expectation A f - measure_pmf.expectation A g = measure_pmf.expectation A (\<lambda>x. f x - g x)
[PROOF STEP]
apply(rule Bochner_Integration.integral_diff[of "measure_pmf A" f g, symmetric])
[PROOF STATE]
proof (prove)
goal (2 subgoals):
1. finite (set_pmf A) \<Longrightarrow> integrable (measure_pmf A) f
2. finite (set_pmf A) \<Longrightarrow> integrable (measure_pmf A) g
[PROOF STEP]
by (simp_all add: integrable_measure_pmf_finite) |
Formal statement is: lemma bounded_translation: fixes S :: "'a::real_normed_vector set" assumes "bounded S" shows "bounded ((\<lambda>x. a + x) ` S)" Informal statement is: If $S$ is a bounded set, then the set $S + a$ is also bounded. |
{-# OPTIONS --without-K --safe #-}
open import Level using (Level)
open import Relation.Binary.PropositionalEquality hiding (Extensionality)
open ≡-Reasoning
open import Data.Nat using (ℕ; suc; zero; _+_)
open import Data.Nat.Properties
open import Data.Product using (_,_)
open import Data.Vec using (Vec; foldr; zipWith; map; []; _∷_; _++_; take; drop; splitAt; replicate)
open import Data.Vec.Properties
module FLA.Data.Vec.Properties where
private
variable
ℓ : Level
A B C : Set ℓ
m n : ℕ
++-identityₗ : (v : Vec A n) → [] ++ v ≡ v
++-identityₗ _ = refl
|
[STATEMENT]
lemma nta_bisim_mono:
assumes major: "nta_bisim bisim ta ta'"
and mono: "\<And>t s1 s2. bisim t s1 s2 \<Longrightarrow> bisim' t s1 s2"
shows "nta_bisim bisim' ta ta'"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. nta_bisim bisim' ta ta'
[PROOF STEP]
using major
[PROOF STATE]
proof (prove)
using this:
nta_bisim bisim ta ta'
goal (1 subgoal):
1. nta_bisim bisim' ta ta'
[PROOF STEP]
by(cases ta)(auto intro: mono) |
subroutine griddims_admin( kcs, gdp )
!----- GPL ---------------------------------------------------------------------
!
! Copyright (C) Stichting Deltares, 2011-2016.
!
! This program is free software: you can redistribute it and/or modify
! it under the terms of the GNU General Public License as published by
! the Free Software Foundation version 3.
!
! This program is distributed in the hope that it will be useful,
! but WITHOUT ANY WARRANTY; without even the implied warranty of
! MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
! GNU General Public License for more details.
!
! You should have received a copy of the GNU General Public License
! along with this program. If not, see <http://www.gnu.org/licenses/>.
!
! contact: [email protected]
! Stichting Deltares
! P.O. Box 177
! 2600 MH Delft, The Netherlands
!
! All indications and logos of, and references to, "Delft3D" and "Deltares"
! are registered trademarks of Stichting Deltares, and remain the property of
! Stichting Deltares. All rights reserved.
!
!-------------------------------------------------------------------------------
! $Id: griddims_admin.f90 5717 2016-01-12 11:35:24Z mourits $
! $HeadURL: https://svn.oss.deltares.nl/repos/delft3d/tags/6686/src/engines_gpl/flow2d3d/packages/data/src/general/griddims_admin.f90 $
!!--description-----------------------------------------------------------------
!
! Function: ...
!
!!--declarations----------------------------------------------------------------
use globaldata
use m_alloc
!
implicit none
!
type(globdat),target :: gdp
!
! The following list of pointer parameters is used to point inside the gdp structure
!
! NONE
!
! Global variables
!
integer, dimension(gdp%d%nmlb:gdp%d%nmub), intent(in) :: kcs
!
! Local variables
!
integer :: i
integer :: icx
integer :: icy
integer :: istat
integer :: nm
integer :: nm2
!
!! executable statements -------------------------------------------------------
!
icx = 1
icy = gdp%d%nmax + 2*gdp%d%ddbound
!
do nm = gdp%d%nmlb, gdp%d%nmub
gdp%griddim%celltype(nm) = kcs(nm)
enddo
!
i = 0
do nm = 1, gdp%d%nmmax
if (kcs(nm)==2) i = i+1
enddo
!
call reallocP(gdp%griddim%nmbnd, (/i,2/), stat=istat)
i = 0
do nm = 1, gdp%d%nmmax
if (kcs(nm)==2) then
i = i+1
!
if (kcs(nm-icx) == 1) then
! ndm
nm2 = nm-icx
elseif (kcs(nm+icx) == 1) then
! num
nm2 = nm+icx
elseif (kcs(nm-icy) == 1) then
! nmd
nm2 = nm-icy
else
! nmu
nm2 = nm+icy
endif
!
gdp%griddim%nmbnd(i,1) = nm ! open boundary cell
gdp%griddim%nmbnd(i,2) = nm2 ! corresponding internal cell
endif
enddo
end subroutine griddims_admin |
Formal statement is: lemma connected_closedD: "\<lbrakk>connected s; A \<inter> B \<inter> s = {}; s \<subseteq> A \<union> B; closed A; closed B\<rbrakk> \<Longrightarrow> A \<inter> s = {} \<or> B \<inter> s = {}" Informal statement is: If $s$ is a connected set and $A$ and $B$ are closed sets such that $s \subseteq A \cup B$ and $A \cap B \cap s = \emptyset$, then either $A \cap s = \emptyset$ or $B \cap s = \emptyset$. |
Formal statement is: lemma interior_open: "open S \<Longrightarrow> interior S = S" Informal statement is: If $S$ is open, then $S$ is its own interior. |
(** * Autosubst Header for Unnamed Syntax
Version: December 11, 2019.
*)
Require Export axioms.
Definition ap {X Y} (f : X -> Y) {x y : X} (p : x = y) : f x = f y :=
match p with eq_refl => eq_refl end.
Definition apc {X Y} {f g : X -> Y} {x y : X} (p : f = g) (q : x = y) : f x = g y :=
match q with eq_refl => match p with eq_refl => eq_refl end end.
(** ** Primitives of the Sigma Calculus. *)
(* Definition None := 0. *)
(* Definition Some := S. *)
(* Notation fin := nat. *)
Definition shift := S.
Definition var_zero := 0.
Definition id {X} (x: X) := x.
Definition scons {X: Type} (x : X) (xi : nat -> X) :=
fun n => match n with
|0 => x
|S n => xi n
end.
Notation "s .: sigma" := (scons s sigma) (at level 70).
Definition funcomp {X Y Z} (g : Y -> Z) (f : X -> Y) :=
fun x => g (f x).
(** ** Type Class Instances for Notation
Required to make notation work. *)
(** *** Type classes for renamings. *)
Class Ren1 (X1 : Type) (Y Z : Type) :=
ren1 : X1 -> Y -> Z.
Class Ren2 (X1 X2 : Type) (Y Z : Type) :=
ren2 : X1 -> X2 -> Y -> Z.
Class Ren3 (X1 X2 X3 : Type) (Y Z : Type) :=
ren3 : X1 -> X2 -> X3 -> Y -> Z.
Class Ren4 (X1 X2 X3 X4 : Type) (Y Z : Type) :=
ren4 : X1 -> X2 -> X3 -> X4 -> Y -> Z.
Class Ren5 (X1 X2 X3 X4 X5 : Type) (Y Z : Type) :=
ren5 : X1 -> X2 -> X3 -> X4 -> X5 -> Y -> Z.
Notation "s ⟨ xi1 ⟩" := (ren1 xi1 s) (at level 7, left associativity, format "s ⟨ xi1 ⟩") : subst_scope.
Notation "s ⟨ xi1 ; xi2 ⟩" := (ren2 xi1 xi2 s) (at level 7, left associativity, format "s ⟨ xi1 ; xi2 ⟩") : subst_scope.
Notation "s ⟨ xi1 ; xi2 ; xi3 ⟩" := (ren3 xi1 xi2 xi3 s) (at level 7, left associativity, format "s ⟨ xi1 ; xi2 ; xi3 ⟩") : subst_scope.
Notation "s ⟨ xi1 ; xi2 ; xi3 ; xi4 ⟩" := (ren4 xi1 xi2 xi3 xi4 s) (at level 7, left associativity, format "s ⟨ xi1 ; xi2 ; xi3 ; xi4 ⟩") : subst_scope.
Notation "s ⟨ xi1 ; xi2 ; xi3 ; xi4 ; xi5 ⟩" := (ren5 xi1 xi2 xi3 xi4 xi5 s) (at level 7, left associativity, format "s ⟨ xi1 ; xi2 ; xi3 ; xi4 ; xi5 ⟩") : subst_scope.
Notation "⟨ xi ⟩" := (ren1 xi) (at level 1, left associativity, format "⟨ xi ⟩") : fscope.
Notation "⟨ xi1 ; xi2 ⟩" := (ren2 xi1 xi2) (at level 1, left associativity, format "⟨ xi1 ; xi2 ⟩") : fscope.
(** *** Type Classes for Substiution *)
Class Subst1 (X1 : Type) (Y Z: Type) :=
subst1 : X1 -> Y -> Z.
Class Subst2 (X1 X2 : Type) (Y Z: Type) :=
subst2 : X1 -> X2 -> Y -> Z.
Class Subst3 (X1 X2 X3 : Type) (Y Z: Type) :=
subst3 : X1 -> X2 -> X3 -> Y -> Z.
Class Subst4 (X1 X2 X3 X4: Type) (Y Z: Type) :=
subst4 : X1 -> X2 -> X3 -> X4 -> Y -> Z.
Class Subst5 (X1 X2 X3 X4 X5 : Type) (Y Z: Type) :=
subst5 : X1 -> X2 -> X3 -> X4 -> X5 -> Y -> Z.
Notation "s [ sigma ]" := (subst1 sigma s) (at level 7, left associativity, format "s '/' [ sigma ]") : subst_scope.
Notation "s [ sigma ; tau ]" := (subst2 sigma tau s) (at level 7, left associativity, format "s '/' [ sigma ; '/' tau ]") : subst_scope.
(** *** Type Class for Variables *)
Class Var X Y :=
ids : X -> Y.
(** ** Proofs for the substitution primitives. *)
Arguments funcomp {X Y Z} (g)%fscope (f)%fscope.
Notation "f >> g" := (funcomp g f) (*fun x => g (f x)*) (at level 50) : subst_scope.
Open Scope subst_scope.
(** A generic lifting of a renaming. *)
Definition up_ren (xi : nat -> nat) :=
0 .: (xi >> S).
(** A generic proof that lifting of renamings composes. *)
Lemma up_ren_ren (xi: nat -> nat) (zeta : nat -> nat) (rho: nat -> nat) (E: forall x, (xi >> zeta) x = rho x) :
forall x, (up_ren xi >> up_ren zeta) x = up_ren rho x.
Proof.
intros [|x].
- reflexivity.
- unfold up_ren. simpl. unfold funcomp. rewrite <- E. reflexivity.
Qed.
(** Eta laws. *)
Lemma scons_eta_id {n : nat} : var_zero .: shift = id :> (nat -> nat).
Proof. fext. intros [|x]; reflexivity. Qed.
Lemma scons_eta {T} (f : nat -> T) :
f var_zero .: shift >> f = f.
Proof. fext. intros [|x]; reflexivity. Qed.
Lemma scons_comp (T: Type) U (s: T) (sigma: nat -> T) (tau: T -> U ) :
(s .: sigma) >> tau = scons (tau s) (sigma >> tau) .
Proof.
fext. intros [|x]; reflexivity.
Qed.
(** ** Notations for unscoped syntax *)
(** Generic fsimpl tactic: simplifies the above primitives in a goal. *)
Ltac fsimpl :=
unfold up_ren; repeat match goal with
| [|- context[id >> ?f]] => change (id >> f) with f (* AsimplCompIdL *)
| [|- context[?f >> id]] => change (f >> id) with f (* AsimplCompIdR *)
| [|- context [id ?s]] => change (id s) with s
| [|- context[(?f >> ?g) >> ?h]] =>
change ((?f >> ?g) >> ?h) with (f >> (g >> h)) (* AsimplComp *)
| [|- context[(?s.:?sigma) var_zero]] => change ((s.:sigma)var_zero) with s
| [|- context[(?f >> ?g) >> ?h]] =>
change ((f >> g) >> h) with (f >> (g >> h))
| [|- context[?f >> (?x .: ?g)]] =>
change (f >> (x .: g)) with g
| [|- context[var_zero]] => change var_zero with 0
| [|- context[?x2 .: shift >> ?f]] =>
change x2 with (f 0); rewrite (@scons_eta _ _ f)
| [|- context[(?v .: ?g) 0]] =>
change ((v .: g) 0) with v
| [|- context[(?v .: ?g) (S ?n)]] =>
change ((v .: g) (S n)) with (g n)
| [|- context[?f 0 .: ?g]] =>
change g with (shift >> f); rewrite scons_eta
| _ => first [progress (rewrite ?scons_comp) | progress (rewrite ?scons_eta_id)]
end.
(** Generic fsimpl tactic: simplifies the above primitives in the context *)
Ltac fsimplc :=
unfold up_ren; repeat match goal with
| [H : context[id >> ?f] |- _] => change (id >> f) with f in H(* AsimplCompIdL *)
| [H: context[?f >> id] |- _] => change (f >> id) with f in H(* AsimplCompIdR *)
| [H: context [id ?s] |- _] => change (id s) with s in H
| [H: context[(?f >> ?g) >> ?h] |- _] =>
change ((?f >> ?g) >> ?h) with (f >> (g >> h)) in H(* AsimplComp *)
| [H : context[(?s.:?sigma) var_zero] |- _] => change ((s.:sigma)var_zero) with s in H
| [H: context[(?f >> ?g) >> ?h] |- _] =>
change ((f >> g) >> h) with (f >> (g >> h)) in H
| [H: context[?f >> (?x .: ?g)] |- _] =>
change (f >> (x .: g)) with g in H
| [H: context[var_zero] |- _] => change var_zero with 0 in H
| [H: context[?x2 .: shift >> ?f] |- _] =>
change x2 with (f 0) in H; rewrite (@scons_eta _ _ f) in H
| [H: context[(?v .: ?g) 0] |- _] =>
change ((v .: g) 0) with v in H
| [H: context[(?v .: ?g) (S ?n)] |- _] =>
change ((v .: g) (S n)) with (g n) in H
| [H: context[?f 0 .: ?g] |- _] =>
change g with (shift >> f); rewrite scons_eta in H
| _ => first [progress (rewrite ?scons_comp in *) | progress (rewrite ?scons_eta_id in *) ]
end.
(** Simplification in both the goal and the context *)
Tactic Notation "fsimpl" "in" "*" :=
fsimpl; fsimplc.
(* Notation "s , sigma" := (scons s sigma) (at level 60, format "s , sigma", right associativity) : subst_scope. *)
Notation "s '..'" := (scons s ids) (at level 1, format "s ..") : subst_scope.
Instance idsRen : Var nat nat := id.
Notation "↑" := (shift).
(** ** Tactics for unscoped syntax *)
(** Unfold of the instances in goal and context. *)
Ltac auto_unfold := unfold subst1, ren1, subst2, ren2, ids; unfold Subst1, Ren1, Subst2, Ren2, Var.
Ltac unfold_funcomp := match goal with
| |- context[(?f >> ?g) ?s] => change ((f >> g) s) with (g (f s))
end.
(** Automatically does a case analysis on a natural number, useful for proofs with context renamings/context morphisms. *)
Tactic Notation "auto_case" tactic(t) := (match goal with
| [|- forall (i : nat), _] => intros []; t
end).
|
library(a5udes)
library(dplyr)
library(sf)
library(tidygraph)
library(igraph)
library(progress)
library(furrr)
CE=osmdata::getbb(place_name='Ceará', format_out = "sf_polygon") %>% slice(1)
riv=st_read('./data/HydroRIVERS_v10_sa_shp')
riv_ce=riv %>% st_filter(CE)
rm(riv)
river_geometry=riv_ce %>% dplyr::select(HYRIV_ID,NEXT_DOWN,CATCH_SKM,UPLAND_SKM,HYBAS_L12) %>% st_transform(32724)
save(river_geometry,file='data/river_geometry.RData')
nodes=riv2nodes(river_geometry)
river_graph=riv2graph(nodes,river_geometry)
save(river_graph,file='data/river_graph.RData')
|
match_pattern <- function(x, pattern) {
# look for match
m <- regexpr(pattern, x, perl = TRUE)
# if no match
if (m < 1) {
# use zero instead of negative one to indicate no match
# so we can use in if statements. also keep other attributes
m[] <- 0L
}
# return match
m
}
match_left_paren <- function(x) match_pattern(x, "^\\(")
match_right_paren <- function(x) match_pattern(x, "^\\)")
match_left_brace <- function(x) match_pattern(x, "^{")
match_right_brace <- function(x) match_pattern(x, "^}")
match_comma <- function(x) match_pattern(x, "^,")
match_minus <- function(x) match_pattern(x, "^-")
match_plus <- function(x) match_pattern(x, "^\\+")
match_semicolon <- function(x) match_pattern(x, "^;")
match_star <- function(x) match_pattern(x, "^\\*")
match_bang <- function(x) match_pattern(x, "^!(?!=)")
match_bang_equal <- function(x) match_pattern(x, "^!=")
match_equal <- function(x) match_pattern(x, "^=(?!=)")
match_equal_equal <- function(x) match_pattern(x, "^==")
match_less <- function(x) match_pattern(x, "^<(?!=)")
match_less_equal <- function(x) match_pattern(x, "^<=")
match_greater <- function(x) match_pattern(x, "^>(?!=)")
match_greater_equal <- function(x) match_pattern(x, "^>=")
match_slash <- function(x) match_pattern(x, "^/(?!=/)")
match_number <- function(x) match_pattern(x, "^[0-9]+\\.?[0-9]*")
match_whitespace <- function(x) match_pattern(x, "^[^\\S\\n\\r]+")
match_newline <- function(x) match_pattern(x, "^\\r?\\n")
match_comment <- function(x) match_pattern(x, "^//[^\n]*(?:\n|$)")
match_identifier <- function(x) match_pattern(x, "^[[:alnum:]_]+")
match_string <- function(x) match_pattern(x, "^\".*?\"")
|
(*
* Copyright Florian Haftmann
*
* SPDX-License-Identifier: BSD-2-Clause
*)
section \<open>Ancient comprehensive Word Library\<close>
theory Word_Lib_Sumo
imports
"HOL-Library.Word"
Aligned
Bit_Comprehension
Bit_Comprehension_Int
Bit_Shifts_Infix_Syntax
Bits_Int
Bitwise_Signed
Bitwise
Enumeration_Word
Generic_set_bit
Hex_Words
Least_significant_bit
More_Arithmetic
More_Divides
More_Sublist
Even_More_List
More_Misc
Strict_part_mono
Legacy_Aliases
Most_significant_bit
Next_and_Prev
Norm_Words
Reversed_Bit_Lists
Rsplit
Signed_Words
Syntax_Bundles
Typedef_Morphisms
Type_Syntax
Word_EqI
Word_Lemmas
Word_8
Word_16
Word_32
Word_Syntax
Signed_Division_Word
Singleton_Bit_Shifts
More_Word_Operations
Many_More
begin
unbundle bit_operations_syntax
unbundle bit_projection_infix_syntax
declare word_induct2[induct type]
declare word_nat_cases[cases type]
declare signed_take_bit_Suc [simp]
(* these generate take_bit terms, which we often don't want for concrete lengths *)
lemmas of_int_and_nat = unsigned_of_nat unsigned_of_int signed_of_int signed_of_nat
bundle no_take_bit
begin
declare of_int_and_nat[simp del]
end
lemmas bshiftr1_def = bshiftr1_eq
lemmas is_down_def = is_down_eq
lemmas is_up_def = is_up_eq
lemmas mask_def = mask_eq
lemmas scast_def = scast_eq
lemmas shiftl1_def = shiftl1_eq
lemmas shiftr1_def = shiftr1_eq
lemmas sshiftr1_def = sshiftr1_eq
lemmas sshiftr_def = sshiftr_eq_funpow_sshiftr1
lemmas to_bl_def = to_bl_eq
lemmas ucast_def = ucast_eq
lemmas unat_def = unat_eq_nat_uint
lemmas word_cat_def = word_cat_eq
lemmas word_reverse_def = word_reverse_eq_of_bl_rev_to_bl
lemmas word_roti_def = word_roti_eq_word_rotr_word_rotl
lemmas word_rotl_def = word_rotl_eq
lemmas word_rotr_def = word_rotr_eq
lemmas word_sle_def = word_sle_eq
lemmas word_sless_def = word_sless_eq
lemmas uint_0 = uint_nonnegative
lemmas uint_lt = uint_bounded
lemmas uint_mod_same = uint_idem
lemmas of_nth_def = word_set_bits_def
lemmas of_nat_word_eq_iff = word_of_nat_eq_iff
lemmas of_nat_word_eq_0_iff = word_of_nat_eq_0_iff
lemmas of_int_word_eq_iff = word_of_int_eq_iff
lemmas of_int_word_eq_0_iff = word_of_int_eq_0_iff
lemmas word_next_def = word_next_unfold
lemmas word_prev_def = word_prev_unfold
lemmas is_aligned_def = is_aligned_iff_dvd_nat
lemmas word_and_max_simps =
word8_and_max_simp
word16_and_max_simp
word32_and_max_simp
lemma distinct_lemma: "f x \<noteq> f y \<Longrightarrow> x \<noteq> y" by auto
lemmas and_bang = word_and_nth
lemmas sdiv_int_def = signed_divide_int_def
lemmas smod_int_def = signed_modulo_int_def
(* shortcut for some specific lengths *)
lemma word_fixed_sint_1[simp]:
"sint (1::8 word) = 1"
"sint (1::16 word) = 1"
"sint (1::32 word) = 1"
"sint (1::64 word) = 1"
by (auto simp: sint_word_ariths)
declare of_nat_diff [simp]
(* Haskellish names/syntax *)
notation (input)
bit ("testBit")
lemmas cast_simps = cast_simps ucast_down_bl
(* shadows the slightly weaker Word.nth_ucast *)
lemma nth_ucast:
"(ucast (w::'a::len word)::'b::len word) !! n =
(w !! n \<and> n < min LENGTH('a) LENGTH('b))"
by (auto simp add: bit_simps not_le dest: bit_imp_le_length)
end
|
Donell Jones - Where I Wanna Be.
Music video by Donell Jones performing This Luv. (C) 1999 LaFace Records LLC #DonellJones #ThisLuv #Vevo. |
/-
Copyright (c) 2021 Riccardo Brasca. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Riccardo Brasca
-/
import linear_algebra.free_module.finite.basic
import linear_algebra.matrix.charpoly.coeff
import field_theory.minpoly.field
/-!
# Characteristic polynomial
We define the characteristic polynomial of `f : M →ₗ[R] M`, where `M` is a finite and
free `R`-module. The proof that `f.charpoly` is the characteristic polynomial of the matrix of `f`
in any basis is in `linear_algebra/charpoly/to_matrix`.
## Main definition
* `linear_map.charpoly f` : the characteristic polynomial of `f : M →ₗ[R] M`.
-/
universes u v w
variables {R : Type u} {M : Type v} [comm_ring R] [nontrivial R]
variables [add_comm_group M] [module R M] [module.free R M] [module.finite R M] (f : M →ₗ[R] M)
open_locale classical matrix polynomial
noncomputable theory
open module.free polynomial matrix
namespace linear_map
section basic
/-- The characteristic polynomial of `f : M →ₗ[R] M`. -/
def charpoly : R[X] :=
(to_matrix (choose_basis R M) (choose_basis R M) f).charpoly
end basic
section coeff
lemma charpoly_monic : f.charpoly.monic := charpoly_monic _
end coeff
section cayley_hamilton
/-- The **Cayley-Hamilton Theorem**, that the characteristic polynomial of a linear map, applied
to the linear map itself, is zero.
See `matrix.aeval_self_charpoly` for the equivalent statement about matrices. -/
lemma aeval_self_charpoly : aeval f f.charpoly = 0 :=
begin
apply (linear_equiv.map_eq_zero_iff (alg_equiv_matrix (choose_basis R M)).to_linear_equiv).1,
rw [alg_equiv.to_linear_equiv_apply, ← alg_equiv.coe_alg_hom,
← polynomial.aeval_alg_hom_apply _ _ _, charpoly_def],
exact aeval_self_charpoly _,
end
lemma is_integral : is_integral R f := ⟨f.charpoly, ⟨charpoly_monic f, aeval_self_charpoly f⟩⟩
lemma minpoly_dvd_charpoly {K : Type u} {M : Type v} [field K] [add_comm_group M] [module K M]
[finite_dimensional K M] (f : M →ₗ[K] M) : minpoly K f ∣ f.charpoly :=
minpoly.dvd _ _ (aeval_self_charpoly f)
/-- Any endomorphism polynomial `p` is equivalent under evaluation to `p %ₘ f.charpoly`; that is,
`p` is equivalent to a polynomial with degree less than the dimension of the module. -/
lemma aeval_eq_aeval_mod_charpoly (p : R[X]) : aeval f p = aeval f (p %ₘ f.charpoly) :=
(aeval_mod_by_monic_eq_self_of_root f.charpoly_monic f.aeval_self_charpoly).symm
/-- Any endomorphism power can be computed as the sum of endomorphism powers less than the
dimension of the module. -/
lemma pow_eq_aeval_mod_charpoly (k : ℕ) : f^k = aeval f (X^k %ₘ f.charpoly) :=
by rw [←aeval_eq_aeval_mod_charpoly, map_pow, aeval_X]
variable {f}
lemma minpoly_coeff_zero_of_injective (hf : function.injective f) : (minpoly R f).coeff 0 ≠ 0 :=
begin
intro h,
obtain ⟨P, hP⟩ := X_dvd_iff.2 h,
have hdegP : P.degree < (minpoly R f).degree,
{ rw [hP, mul_comm],
refine degree_lt_degree_mul_X (λ h, _),
rw [h, mul_zero] at hP,
exact minpoly.ne_zero (is_integral f) hP },
have hPmonic : P.monic,
{ suffices : (minpoly R f).monic,
{ rwa [monic.def, hP, mul_comm, leading_coeff_mul_X, ← monic.def] at this },
exact minpoly.monic (is_integral f) },
have hzero : aeval f (minpoly R f) = 0 := minpoly.aeval _ _,
simp only [hP, mul_eq_comp, ext_iff, hf, aeval_X, map_eq_zero_iff, coe_comp, alg_hom.map_mul,
zero_apply] at hzero,
exact not_le.2 hdegP (minpoly.min _ _ hPmonic (ext hzero)),
end
end cayley_hamilton
end linear_map
|
(* Property from Productive Use of Failure in Inductive Proof,
Andrew Ireland and Alan Bundy, JAR 1996.
This Isabelle theory is produced using the TIP tool offered at the following website:
https://github.com/tip-org/tools
This file was originally provided as part of TIP benchmark at the following website:
https://github.com/tip-org/benchmarks
Yutaka Nagashima at CIIRC, CTU changed the TIP output theory file slightly
to make it compatible with Isabelle2017.
Some proofs were added by Yutaka Nagashima.*)
theory TIP_prop_38
imports "../../Test_Base"
begin
datatype 'a list = nil2 | cons2 "'a" "'a list"
datatype Nat = Z | S "Nat"
fun z :: "'a list => 'a list => 'a list" where
"z (nil2) y2 = y2"
| "z (cons2 z2 xs) y2 = cons2 z2 (z xs y2)"
fun y :: "Nat => Nat => bool" where
"y (Z) (Z) = True"
| "y (Z) (S z2) = False"
| "y (S x22) (Z) = False"
| "y (S x22) (S y22) = y x22 y22"
fun x :: "bool => bool => bool" where
"x True y2 = True"
| "x False y2 = y2"
fun elem :: "Nat => Nat list => bool" where
"elem x2 (nil2) = False"
| "elem x2 (cons2 z2 xs) = x (y x2 z2) (elem x2 xs)"
theorem property0 :
"((elem x2 y2) ==> ((elem x2 z2) ==> (elem x2 (z y2 z2))))"
apply(induction y2, auto)
apply(case_tac "y x2 x1", auto)
done
end
|
SUBROUTINE GD_OPNR ( filnam, igdfln, navsz, rnvblk, ianlsz,
+ anlblk, ihdrsz, maxgrd, iret )
C************************************************************************
C* GD_OPNR *
C* *
C* This subroutine opens an existing GEMPAK grid file for realtime *
C* data access. The file is opened with shared, write access. *
C* *
C* GD_OPNR ( FILNAM, IGDFLN, NAVSZ, RNVBLK, IANLSZ, ANLBLK, IHDRSZ, *
C* MAXGRD, IRET ) *
C* *
C* Input parameters: *
C* FILNAM CHAR* File name *
C* *
C* Output parameters: *
C* IGDFLN INTEGER File number *
C* NAVSZ INTEGER Navigation block length *
C* RNVBLK (NAVSZ) REAL Navigation block *
C* IANLSZ INTEGER Analysis block length *
C* ANLBLK (IANLSZ) REAL Analysis block *
C* IHDRSZ INTEGER Grid header length *
C* MAXGRD INTEGER Maximum number of grids *
C* IRET INTEGER Return code *
C* 0 = normal return *
C* -2 = file cannot be opened *
C* -7 = not a GEMPAK5 grid file *
C* -8 = nav cannot be read *
C* -13 = grid header too long *
C* -14 = file name is blank *
C** *
C* Log: *
C* M. desJardins/GSFC 6/87 *
C* M. desJardins/GSFC 4/90 Added error for blank file name *
C************************************************************************
INCLUDE 'GEMPRM.PRM'
INCLUDE 'GMBDTA.CMN'
INCLUDE 'grdcmn.cmn'
C
CHARACTER*(*) filnam
REAL rnvblk (*), anlblk (*)
C*
LOGICAL wrtflg, shrflg
C------------------------------------------------------------------------
C* Check for blank name.
C
CALL ST_LSTR ( filnam, lenf, ier )
IF ( lenf .eq. 0 ) THEN
iret = -14
RETURN
END IF
C
C* Open the file.
C
wrtflg = .true.
shrflg = .true.
CALL GD_OFIL ( filnam, wrtflg, shrflg, igdfln, navsz, rnvblk,
+ ianlsz, anlblk, ihdrsz, maxgrd, iret )
C*
RETURN
END
|
Formal statement is: lemma smult_monom: "smult a (monom b n) = monom (a * b) n" Informal statement is: The product of a scalar and a monomial is a monomial. |
-- @@stderr --
dtrace: failed to compile script test/unittest/printf/err.D_PRINTF_ARG_TYPE.aggarg.d: [D_PRINTF_ARG_TYPE] line 19: printf( ) argument #3 is incompatible with conversion #2 prototype:
conversion: %d
prototype: char, short, int, long, or long long
argument: aggregation
|
Require Export SfLib.
Import SfLibmod.
(*Destination Language: STLC + notions of program, ownership, code base,
and events to log temporarily and permanently.*)
(*
Assumptions:
1- code_bases map function names to closed lambda abstractions
it is reflected in two places: in defining substitution [x:=s]t
and in appear_free_in x t.
Such an assuumption is declared in well-formed codebases!
2- Excluded middle is correct! (defined in SfLib)
*)
(*
types
T ::= Unit | T -> T
*)
Inductive ty : Type :=
| TUnit : ty
| TArrow : ty -> ty -> ty.
(*
terms:
t ::= x | f | \x:T.t | tt | unit | ev[f] t | lev[f] t | t;t
where x ranges over ordinary variable identifiers (id),
f ranges over function name identifiers (fid),
ev[f] t is the event to log function call temporarily,
lev[f] t is the event to log function call permanently.
*)
Inductive tm : Type :=
| tvar : id -> tm
| tfname : fid -> tm
| tabs : id -> ty -> tm -> tm
| tapp : tm -> tm -> tm
| tunit : tm
| tev : fid -> tm -> tm
| tlev : fid -> tm -> tm
| tseq : tm -> tm -> tm.
Notation "t1 ';' t2" := (tseq t1 t2) (at level 15).
(*
values defined as propositions over terms:
v ::= f | \x:T.t | unit
*)
Inductive value: tm -> Prop :=
| v_fname: forall (f: fid), value (tfname f)
| v_abs : forall (x: id) (T: ty) (t: tm), value (tabs x T t)
| v_unit : value tunit.
Hint Constructors value.
(*
Partial mappings:
functions from identifiers (I) to some optial value of type A.
*)
Definition partial_map (I: Type) (A:Type) := I -> option A.
Definition empty {I A:Type} : partial_map I A := (fun _ => None).
Definition extend_id {A:Type} (parMap : partial_map id A)
(var:id) (val : A) :=
fun var' => if eq_id_dec var var' then
Some val
else parMap var'.
Definition extend_fid {A:Type}
(parMap : partial_map fid A)
(fname: fid)
(func : A) :=
fun fname' => if eq_fid_dec fname fname' then
Some func
else parMap fname'.
(*
Code base:
Partial mapping from function names to terms:
C: fid -> option tm (or, partial_map fid tm)
*)
Definition code_base : Type := partial_map fid tm.
(*
Programs: a triple of a term, an owner and a code base.
*)
Inductive prog : Type :=
prg : tm -> ow -> code_base -> prog.
(*
lv: ow -> nat
gives the security level of the principal (owner).
*)
Definition lv (o: ow) : nat :=
match o with
OId m => m
end.
(*===================== LOGGING POLICIES ============================*)
(*Extending nat with variables*)
Inductive ext_nat : Type :=
| proper_nat : nat -> ext_nat
| nat_var : id -> ext_nat.
(*
propositions:
p ::= LoggedCall(n,f, arg)| Call(n, f, args) |
| n < n' | p /\ p | p <- p | forall x, p | exists x, p
| truth | falsom.
*)
Inductive prop : Type :=
| LoggedCall : ext_nat -> fid -> tm -> prop
| Call : ext_nat -> fid -> tm -> prop
| Lt : ext_nat -> ext_nat -> prop
| AND : prop -> prop -> prop
| ARROW : prop -> prop -> prop
| FORALL : id -> prop -> prop
| EXISTS : id -> prop -> prop
| TOP : prop
| BOT : prop.
(*
logical context as a set of props:
Delta ranges over it.
*)
Definition logical_context := list prop.
(*
logic proof theory judgment: Dela ||- p
*)
Reserved Notation " Delta '||-' p " (at level 40).
Inductive entails : logical_context -> prop -> Prop :=
(*dummy def! not implemented!*)
dummy_entail_constr : forall (Delta: logical_context) (p: prop),
Delta ||- p
where " Delta '||-' p " := (entails Delta p).
(* negation notation*)
Definition NOT (p: prop) : prop := ARROW p BOT.
Lemma weakening :
forall (Delta Delta': logical_context) (p: prop),
sublist Delta Delta' ->
Delta ||- p ->
Delta' ||- p.
Proof.
Admitted.
Lemma weakening_cons:
forall (Delta: logical_context) (p p': prop),
Delta ||- p ->
(p'::Delta) ||- p.
Proof.
intros.
remember(sublist_cons2 p' Delta) as HH.
clear HeqHH.
apply (weakening Delta (p'::Delta) p HH H).
Qed.
(*
logging policy event (lp_ev) is a function identifier,
for which f ranges over.
logging policy triggers (lp_trigs) is a list of function identifiers,
for which gs ranges over.
logging policy level (lp_level) is the upper bound level for owner.
*)
Definition lp_ev : Type := fid.
Definition lp_trigs : Type := list fid.
Definition lp_level : Type := nat.
(*
logging policy condition (phi ranges over it):
Call(?n, f, ?arg) /\
Call(?n'1, g1, ?arg1) /\ Lt(?n'1,?n) /\ ... /\
Call(?n'm, gm, ?argm) /\ Lt(?n'm,?n).
logging policy pattern (psi ranges over it):
Call(?n, f, ?arg) /\
Call(?n'1, g1, ?arg1) /\ Lt(?n'1,?n) /\ ... /\
Call(?n'm, gm, ?argm) /\ Lt(?n'm,?n) -> LoggedCall(?n, f, ?arg).
logging policy condition type (lp_cond):
prog -> nat -> tm -> ow -> list nat -> list tm -> prop.
logging policy pattern type (lp_patt):
prog -> nat -> tm -> ow -> list nat -> list tm -> prop.
lp_trig_condition_mk iteratively generates the section of
conditions about triggers (psi ranges over it):
Call(?n'1, g1, ?args21) /\ Lt(n'1,n) /\ ... /\
Call(?n'm, gm, ?args2m) /\ Lt(n'm,n).
lp_condition_mk generates the logging policy condition
by calling lp_trig_condition_mk.
lp_pattern_mk generates the whole logging policy pattern
by calling lp_condition_mk.
*)
Fixpoint lp_trig_condition_mk_numbered (gs: lp_trigs) (n: ext_nat) (i: nat)
: prop :=
match gs with
| [] => TOP
| g :: gs' => let ni := nat_var ((Id i)) in
let argi := tvar (Id (i+1)) in
EXISTS (Id i) (EXISTS (Id (i+1))
(AND (Call ni g argi) (AND (Lt ni n)
(lp_trig_condition_mk_numbered gs' n (i+2))))
)
end.
Fixpoint lp_trig_condition_mk (gs: lp_trigs) (n: ext_nat) : prop :=
lp_trig_condition_mk_numbered gs n 10.
Fixpoint lp_condition_mk (f: lp_ev) (gs: lp_trigs) (n: ext_nat) (arg: tm)
: prop :=
AND (Call n f arg) (lp_trig_condition_mk gs n).
Fixpoint lp_pattern_mk (f: lp_ev) (gs: lp_trigs) : prop :=
let n := nat_var (Id 5) in
let arg := tvar (Id 6) in
FORALL (Id 5) (FORALL (Id 6)
(ARROW (LoggedCall n f arg) (lp_condition_mk f gs n arg))).
(*
Eval simpl in (lp_trig_condition_mk ((FId 2)::(FId 3)::nil) (proper_nat 2)).
Eval simpl in
(lp_condition_mk (FId 1) ((FId 2)::(FId 3)::nil) (proper_nat 2) tunit).
Eval simpl in
(lp_pattern_mk (FId 1) ((FId 2)::(FId 3)::nil)).
*)
(*
logging policy refinement (kappa ranges over it):
LoggedCall(?n, f, ?arg)
logging policy refinement type (lp_refin):
nat -> tm -> prop
lp_refinement_mk generates logging policy refinement.
*)
Definition lp_refin := ext_nat -> fid -> tm -> prop.
Definition lp_refinement_mk := LoggedCall.
Check lp_refinement_mk.
(*
logging policy is a 5-ary tuple of
1) a logging event,
2) a list of logging triggers,
3) a logging policy level,
4) a logging policy pattern, and
5) a logging policy refinement.
lp ranges over logpol elements.
*)
Inductive logpol : Type :=
lgpl : lp_ev -> lp_trigs -> lp_level -> prop -> lp_refin -> logpol.
(*
wellformedness of logging polcies
are the ones whose both pattern and refinement
are wellformed:
*)
Inductive wellformed_lp : logpol -> Prop :=
wf_lp: forall (f: lp_ev) (gs: lp_trigs)
(l: lp_level) (psi: prop) (kappa: lp_refin),
(forall (gi: fid), inlist gi gs -> f <>gi) ->
psi = lp_pattern_mk f gs ->
kappa = lp_refinement_mk ->
wellformed_lp (lgpl f gs l psi kappa).
Definition LogCondition (lp: logpol) (n: ext_nat) (arg: tm) : prop :=
match lp with
lgpl f gs l psi kappa => lp_condition_mk f gs n arg
end.
Axiom logcondition_entail:
forall (Delta: list prop) (f: lp_ev) (gs: lp_trigs) (l: lp_level)
(psi: prop) (kappa: lp_refin) (n: nat) (arg: tm),
Delta ||- LogCondition (lgpl f gs l psi kappa) (proper_nat n) arg ->
inlist (Call (proper_nat n) f arg) Delta /\
forall (gi: fid), inlist gi gs ->
exists (ni: nat) (vi: tm), inlist (Call (proper_nat ni) gi vi) Delta /\
true = ble_nat ni n /\ ni <> n.
(*=====================OPERATIONAL SEMANTICS============================*)
(* Substitution: [x:=s]t defined over terms *)
Reserved Notation " '[' x ':=' s ']' t " (at level 20).
Fixpoint subst (x: id) (s t: tm) : tm :=
match t with
| tvar y => if eq_id_dec x y then s else (tvar y)
| tfname f => tfname f
| tabs y T t' => if eq_id_dec x y then
(tabs y T t')
else tabs y T ([x:=s] t')
| tapp t1 t2 => tapp ([x:=s] t1) ([x:=s] t2)
| tunit => tunit
| tev f t' => tev f ([x:=s] t')
| tlev f t' => tlev f ([x:=s] t')
| tseq t1 t2 => tseq ([x:=s] t1)([x:=s] t2)
end
where "'[' x ':=' s ']' t" := (subst x s t).
(* reduction single step:
[pr, lp]: t,n,X,L ==> t',n',X',L'
*)
Reserved Notation " '[' pr ',' lp ']:' p1 '==>' p2 " (at level 40).
Inductive step : prog -> logpol -> (tm * nat * list prop * list prop) ->
(tm * nat * list prop * list prop) ->
Prop :=
| ST_AppAbs : forall (pr: prog) (lp: logpol) (x: id) (T: ty)
(t v: tm) (n: nat) (X L: list prop),
wellformed_lp lp->
value v ->
[pr, lp]: (tapp (tabs x T t) v, n, X, L) ==>
(subst x v t, n+1, X, L)
| ST_AppFId : forall (tp: tm) (lp: logpol) (o: ow)
(C: code_base) (f: fid) (x: id) (T: ty)
(v t: tm) (n: nat) (X L: list prop),
wellformed_lp lp->
value v ->
C f = Some (tabs x T t) ->
[prg tp o C, lp]: (tapp (tfname f) v, n, X, L) ==>
(subst x v t, n+1, X, L)
| ST_App1 : forall (pr: prog) (lp: logpol) (t1 t1' t2: tm) (n1 n2: nat)
(X X' L L': list prop),
wellformed_lp lp->
[pr, lp]: (t1, n1, X, L) ==> (t1', n2, X', L') ->
[pr, lp]: (tapp t1 t2, n1, X, L) ==>
(tapp t1' t2, n2, X', L')
| ST_App2 : forall (pr: prog) (lp: logpol) (v t2 t2': tm) (n1 n2: nat)
(X X' L L': list prop),
wellformed_lp lp->
value v ->
[pr, lp]: (t2, n1, X, L) ==> (t2', n2, X', L') ->
[pr, lp]: (tapp v t2, n1, X, L) ==>
(tapp v t2', n2, X', L')
| ST_EvVal : forall (pr: prog) (lp: logpol) (f: fid) (v: tm) (n: nat)
(X L: list prop),
wellformed_lp lp->
value v ->
[pr, lp]: (tev f v, n, X, L) ==>
(tunit, n, ((Call (proper_nat (n-1)) f v)::X), L)
| ST_Ev : forall (pr: prog) (lp: logpol) (f: fid) (t t': tm)
(n n': nat) (X X' L L': list prop),
wellformed_lp lp->
[pr, lp]: (t, n, X, L) ==> (t', n', X', L') ->
[pr, lp]: (tev f t, n, X, L) ==> (tev f t', n', X', L')
| ST_LevVal1 : forall (tp: tm) (o: ow) (C: code_base) (f: lp_ev)
(gs: lp_trigs) (l: lp_level) (psi: prop) (kappa: lp_refin)
(v: tm) (n: nat) (X L: list prop),
wellformed_lp (lgpl f gs l psi kappa)->
X ||- LogCondition (lgpl f gs l psi kappa)
(proper_nat (n-1)) v ->
value v ->
[(prg tp o C), (lgpl f gs l psi kappa)]:
(tlev f v, n, X, L) ==>
(tunit, n, X, ((LoggedCall (proper_nat (n-1)) f v)::L))
| ST_LevVal2 : forall (tp: tm) (o: ow) (C: code_base) (f: lp_ev)
(gs: lp_trigs) (l: lp_level) (psi: prop) (kappa: lp_refin)
(v: tm) (n: nat) (X L: list prop),
wellformed_lp (lgpl f gs l psi kappa)->
~ (X ||- LogCondition (lgpl f gs l psi kappa)
(proper_nat (n-1)) v )->
value v ->
[(prg tp o C), (lgpl f gs l psi kappa)]:
(tlev f v, n, X, L) ==>
(tunit, n, X, L)
| ST_Lev : forall (pr: prog) (lp: logpol) (f: fid)
(t t': tm) (n n': nat) (X X' L L': list prop),
wellformed_lp lp->
[pr, lp]: (t, n, X, L) ==> (t', n', X', L') ->
[pr, lp]: (tlev f t, n, X, L) ==> (tlev f t', n', X', L')
| ST_SeqUnit: forall (pr: prog) (lp: logpol) (t: tm) (n: nat)
(X L: list prop),
wellformed_lp lp->
[pr, lp]: (tseq tunit t, n, X, L) ==> (t, n, X, L)
| ST_Seq : forall (pr: prog) (lp: logpol) (t1 t1' t2: tm)
(n n': nat) (X X' L L': list prop),
wellformed_lp lp->
[pr, lp]: (t1, n, X, L) ==> (t1', n', X', L') ->
[pr, lp]: (tseq t1 t2, n, X, L) ==>
(tseq t1' t2, n', X', L')
where "'[' pr ',' lp ']:' p1 '==>' p2" := (step pr lp p1 p2).
Hint Constructors step.
(*reflexive transitive closure of single step reduction*)
Reserved Notation "'[' pr ',' lp ']:' p1 '==>*' p2" (at level 40).
Inductive multistep : prog -> logpol ->
(tm * nat * list prop * list prop) ->
(tm * nat * list prop * list prop) ->
Prop :=
| multi_refl: forall (pr: prog) (lp: logpol) (t: tm) (n: nat)
(X L: list prop),
wellformed_lp lp ->
[pr, lp]: (t, n, X, L) ==>* (t, n, X, L)
| multi_step: forall (pr: prog) (lp: logpol) (t t' t'': tm)
(n n' n'': nat)
(X X' X'' L L' L'': list prop),
wellformed_lp lp ->
[pr, lp]: (t, n, X, L) ==> (t', n', X', L') ->
[pr, lp]: (t', n', X', L') ==>* (t'', n'', X'', L'') ->
[pr, lp]: (t, n, X, L) ==>* (t'', n'', X'', L'')
where "'[' pr ',' lp ']:' p1 '==>*' p2" := (multistep pr lp p1 p2).
Lemma multistep_implies_lp_wf:
forall (pr: prog) (lp: logpol) (t t': tm)
(n n': nat) (X X' L L': list prop),
[pr, lp]: (t, n, X, L) ==>* (t', n', X', L') ->
wellformed_lp lp.
Proof.
intros.
inversion H.
subst.
assumption.
subst.
assumption.
Qed.
(*=====================TYPING RULES================================*)
(* the notion free variables of a term:
function names and unit have no free variables.
*)
Inductive appears_free_in_term : id -> tm -> Prop :=
| afi_var : forall (x: id),
appears_free_in_term x (tvar x)
| afi_abs : forall (x y: id) (T: ty) (t: tm),
x <> y ->
appears_free_in_term x t ->
appears_free_in_term x (tabs y T t)
| afi_app1 : forall (x: id) (t1 t2: tm),
appears_free_in_term x t1 ->
appears_free_in_term x (tapp t1 t2)
| afi_app2 : forall (x: id) (t1 t2: tm),
appears_free_in_term x t2 ->
appears_free_in_term x (tapp t1 t2)
| afi_ev : forall (x: id) (f: fid) (t: tm),
appears_free_in_term x t ->
appears_free_in_term x (tev f t)
| afi_lev : forall (x: id) (f: fid) (t: tm),
appears_free_in_term x t ->
appears_free_in_term x (tlev f t)
| afi_seq1 : forall (x: id) (t1 t2: tm),
appears_free_in_term x t1 ->
appears_free_in_term x (tseq t1 t2)
| afi_seq2 : forall (x: id) (t1 t2: tm),
appears_free_in_term x t2 ->
appears_free_in_term x (tseq t1 t2).
Hint Constructors appears_free_in_term.
(* the notion of closed terms *)
Definition closed_term (t: tm) : Prop :=
~ exists x: id, appears_free_in_term x t.
(* well-formedness of codebases: |= C *)
Reserved Notation "'|=' C" (at level 40).
Inductive wellformed_codebase: code_base -> Prop :=
| wf_empty : |= empty
| wf_nonempty : forall (C: code_base) (f: fid) (t: tm),
|= C ->
C f = None ->
closed_term t ->
|= (extend_fid C f t)
where " '|=' C " := (wellformed_codebase C).
Lemma wellformed_closeness:
forall (C: code_base) (f: fid) (t: tm),
|= C -> C f = Some t -> closed_term t.
Proof.
intros.
generalize dependent t.
generalize dependent f.
induction H.
Case "empty".
intros.
unfold empty in H0.
inversion H0.
Case "nonempty".
intros.
unfold extend_fid in H2.
destruct (eq_fid_dec f f0).
SCase "f = f0".
inversion H2.
subst.
assumption.
SCase "f != f0".
apply IHwellformed_codebase with f0.
assumption.
Qed.
(* typing context for variable identifiers (Gamma ranges over it) *)
Definition context := partial_map id ty.
(*typing rules: in each |= of codebase is also added as a premise. *)
Reserved Notation " '[' pr ']:' Gamma '|-' p 'in' T " (at level 40).
Inductive has_type : prog -> context -> tm -> ty -> Prop :=
| T_Var : forall (tp: tm) (o: ow) (C: code_base)
(Gamma: context) (x: id) (T: ty),
|= C ->
Gamma x = Some T ->
[prg tp o C]: Gamma |- (tvar x) in T
| T_FName : forall (tp: tm) (o: ow) (C: code_base)
(Gamma: context) (f: fid) (t: tm) (T1 T2: ty),
|= C ->
C f = Some t ->
[prg tp o C]: Gamma |- t in (TArrow T1 T2) ->
[prg tp o C]: Gamma |- (tfname f) in (TArrow T1 T2)
| T_Abs : forall (tp: tm) (o: ow) (C: code_base)
(Gamma: context) (x: id) (t: tm) (T1 T2: ty),
|= C ->
[prg tp o C]: extend_id Gamma x T1 |- t in T2 ->
[prg tp o C]: Gamma |- (tabs x T1 t) in (TArrow T1 T2)
| T_App : forall (tp: tm) (o: ow) (C: code_base)
(Gamma: context) (t1 t2: tm) (T1 T2: ty),
|= C ->
[prg tp o C]: Gamma |- t1 in (TArrow T1 T2) ->
[prg tp o C]: Gamma |- t2 in T1 ->
[prg tp o C]: Gamma |- (tapp t1 t2) in T2
| T_Unit : forall (tp: tm) (o: ow) (C: code_base)
(Gamma: context),
|= C ->
[prg tp o C]: Gamma |- tunit in TUnit
| T_Ev : forall (tp: tm) (o: ow) (C: code_base)
(Gamma: context) (f: fid) (t: tm) (T1 T2: ty),
|= C ->
[prg tp o C]: Gamma |- (tfname f) in TArrow T1 T2 ->
[prg tp o C]: Gamma |- t in T1 ->
[prg tp o C]: Gamma |- (tev f t) in TUnit
| T_Lev : forall (tp: tm) (o: ow) (C: code_base)
(Gamma: context) (f: fid) (t: tm) (T1 T2: ty),
|= C ->
[prg tp o C]: Gamma |- (tfname f) in TArrow T1 T2 ->
[prg tp o C]: Gamma |- t in T1 ->
[prg tp o C]: Gamma |- (tlev f t) in TUnit
| T_Seq : forall (tp: tm) (o: ow) (C: code_base)
(Gamma: context) (t1 t2: tm) (T: ty),
|= C ->
[prg tp o C]: Gamma |- t1 in TUnit ->
[prg tp o C]: Gamma |- t2 in T ->
[prg tp o C]: Gamma |- tseq t1 t2 in T
where " '[' pr ']:' Gamma '|-' p 'in' T " := (has_type pr Gamma p T).
Hint Constructors has_type.
(*=====================PROGRESS================================*)
Theorem progress:
forall (pr: prog) (t: tm) (T: ty) (lp: logpol),
[pr]: empty |- t in T ->
wellformed_lp lp ->
value t \/
exists t' n n' X X' L L', [pr, lp]: (t, n, X, L) ==> (t', n', X', L').
Proof.
Admitted.
(*=====================PRESERVATION================================*)
(* free variables are present in context*)
Lemma free_in_context:
forall (x: id) (t: tm) (pr: prog) (Gamma: context) (T: ty),
appears_free_in_term x t ->
[pr]: Gamma |- t in T ->
exists T': ty, Gamma x = Some T'.
Proof.
Admitted.
Corollary typable_empty_closed:
forall (pr: prog) (t: tm) (T: ty),
[pr]: empty |- t in T ->
closed_term t.
Proof.
unfold closed_term, not.
intros.
inversion H0.
remember (free_in_context x t pr empty T H1 H) as HH.
clear HeqHH.
inversion HH.
rename x into T'.
unfold empty in H2.
inversion H2.
Qed.
Lemma context_invariance:
forall (Gamma Gamma': context) (pr: prog) (t: tm) (T: ty),
[pr]: Gamma |- t in T ->
(forall (x: id), appears_free_in_term x t -> Gamma x = Gamma' x) ->
[pr]: Gamma' |- t in T.
Proof.
Admitted.
Lemma substitution_preseves_typing:
forall (pr: prog) (Gamma: context) (x: id) (t s: tm) (T S: ty),
[pr]: extend_id Gamma x S |- t in T ->
[pr]: empty |- s in S ->
[pr]: Gamma |- subst x s t in T.
Proof.
Admitted.
Lemma preservation:
forall (pr: prog) (t t': tm) (T: ty)
(lp: logpol) (n n': nat) (X X' L L': list prop),
[pr]: empty |- t in T ->
[pr, lp]: (t, n, X, L) ==> (t', n', X', L') ->
[pr]: empty |- t' in T.
Proof.
Admitted.
(*=====================SOUNDNESS================================*)
(* normal form programs *)
Definition normal_form (t: tm) : Prop :=
forall (pr: prog) (lp: logpol)
(n n': nat) (X X' L L': list prop),
~ exists t', [pr, lp]: (t,n, X, L) ==> (t',n', X', L').
(* stuck programs *)
Definition stuck (t: tm) : Prop :=
(normal_form t) /\ ~ value t.
Corollary soundness:
forall (pr: prog) (lp: logpol) (t t': tm)
(n n': nat) (T: ty) (X X' L L': list prop),
[pr]: empty |- t in T ->
[pr, lp]: (t, n, X, L) ==>* (t', n', X', L') ->
~ stuck t'.
Proof.
Admitted.
Lemma normalization:
forall pr lp t n X L t' n' X' L',
[pr, lp]: (t,n,X,L) ==> (t',n',X',L') ->
~ exists X'' L'',
[pr, lp]: (t',n',X',L') ==>* (t,n,X'',L'').
Proof. Admitted.
(*========================== LOG GENERATION ======================*)
(* program generating some permanent log: pr ~~> L*)
(*modified: not to ensure being evaluated!*)
Inductive prog_gen_log : logpol -> prog -> list prop -> Prop :=
| pgl_constr: forall (lp: logpol) (t t': tm) (o: ow) (C: code_base)
(n: nat) (X L: list prop),
[prg t o C, lp]: (t,0,nil,nil) ==>* (t', n, X, L) ->
prog_gen_log lp (prg t o C) L.
Notation " '|' lp '|:' pr '~~>' L " :=
(prog_gen_log lp pr L) (at level 14).
|
[STATEMENT]
lemma cone_carrier: "A \<subseteq> carrier_vec n \<Longrightarrow> cone A \<subseteq> carrier_vec n"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. A \<subseteq> carrier_vec n \<Longrightarrow> cone A \<subseteq> carrier_vec n
[PROOF STEP]
using finite_cone_carrier
[PROOF STATE]
proof (prove)
using this:
?A \<subseteq> carrier_vec n \<Longrightarrow> finite_cone ?A \<subseteq> carrier_vec n
goal (1 subgoal):
1. A \<subseteq> carrier_vec n \<Longrightarrow> cone A \<subseteq> carrier_vec n
[PROOF STEP]
unfolding cone_def
[PROOF STATE]
proof (prove)
using this:
?A \<subseteq> carrier_vec n \<Longrightarrow> finite_cone ?A \<subseteq> carrier_vec n
goal (1 subgoal):
1. A \<subseteq> carrier_vec n \<Longrightarrow> {x. \<exists>Ws. finite Ws \<and> Ws \<subseteq> A \<and> x \<in> finite_cone Ws} \<subseteq> carrier_vec n
[PROOF STEP]
by blast |
"Turn, Turn, Turn" - Coulson and his team find themselves without anyone they can trust, only to discover that they are trapped with a traitor in their midst, on "Marvel's Agents of S.H.I.E.L.D.," Tuesday, April 8 (8:00-9:01 p.m., ET) on the ABC Television Network.
"Marvel's Agents of S.H.I.E.L.D." stars Clark Gregg as Agent Phil Coulson, Ming-Na Wen as Agent Melinda May, Brett Dalton as Agent Grant Ward, Chloe Bennet as Skye, Iain De Caestecker as Agent Leo Fitz and Elizabeth Henstridge as Agent Jemma Simmons.
Guest starring are Bill Paxton as Agent Garrett, Saffron Burrows as Victoria Hand, Christine Adams as Agent Weaver, B.J. Britt as Agent Triplett, Charles Halford as Agent Shaw, James Macdonald as Agent Jacobson, Dayo Ade as Agent Barbour, Kylie Furneaux as Agent Shade, Alex Daniels as Agent Chaimson, Braden Moran as Agent Jones, Cameron Diskin as Agent Baylin.
"Turn, Turn, Turn" was written by Jed Whedon & Maurissa Tancharoen and directed by Vincent Misiano.
Guckt das außer mir noch jemand?
Falls ja, den neuen Cap Film sollte man gesehen haben, er nimmt direkt Einfluss auf die "Agents of S.H.I.E.L.D".....sozusagen.
The good news: Producers are hoping to find a more permanent position for her next season. The bad: Agent Hill's future on the show — including her return in the April 29 episode — will be highly contentious. For starters, she was last seen applying for a job at Stark Industries at the end of Winter Soldier. "It would appear that she's moving on. Coulson clearly has not," executive producer Jeph Loeb says. " And let's not forget that Hill hid the truth about T.A.H.I.T.I. "I don't know how much Coulson trusts her opinion on anything, frankly," executive producer Jeffrey Bell says.
Freut mich das Agents verlängert worden ist. Allerdings muß man auch sagen, es hat schon sehr lange gedauert, bis die Serie so wirklich in Schwung gekommen ist. Ich hoffe das das in Season 2 da besser strukturiert ist. Whedon hin oder her, jetzt mal Butter bei die Fische und zeigen was die Serie kann.
Ein "YAY" für Agent Carter, darauf bin ich echt gespannt.
Not so fast! Did you notice that when Simmons kissed Fitz, she smooched him everywhere but on his actual mouth? "Clearly, any time one person says something and a person doesn't reciprocate, it changes things," executive producer Jeffrey Bell tells me. "It's safe to say there will be consequences." Further complicating matters: Producers want to keep Agent Triplett around.
Let me fix that for you: Melinda May will find herself with new duties as S.H.I.E.L.D. is being rebuilt, but her past may come back to haunt her this year. “She’s gone through something crazy in Bahrain that we’ve hinted at and we will explore in this season,” executive producer Jed Whedon teases. Sounds like The Cavalry will ride again! |
Formal statement is: lemma infnorm_pos_le: fixes x :: "'a::euclidean_space" shows "0 \<le> infnorm x" Informal statement is: The infnorm of a vector is always nonnegative. |
lemma compact_imp_complete: fixes s :: "'a::metric_space set" assumes "compact s" shows "complete s" |
Formal statement is: lemma cball_sing: fixes x :: "'a::metric_space" shows "e = 0 \<Longrightarrow> cball x e = {x}" Informal statement is: If $e = 0$, then the closed ball of radius $e$ centered at $x$ is just the point $x$. |
Formal statement is: lemma C1_differentiable_polynomial_function: fixes p :: "real \<Rightarrow> 'a::euclidean_space" shows "polynomial_function p \<Longrightarrow> p C1_differentiable_on S" Informal statement is: If $p$ is a polynomial function, then $p$ is continuously differentiable on $S$. |
[STATEMENT]
lemma length_inf_prefix: "length (xs \<sqinter> ys) = inf_prefix_length xs ys"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. length (xs \<sqinter> ys) = inf_prefix_length xs ys
[PROOF STEP]
by (simp add: inf_prefix_def min_eqR inf_prefix_length_leL) |
namespace TBA
-- Let's work with some inductive types other than `Nat`!
-- Here is our very own definition of `List`:
inductive List (α : Type) where
| nil : List α
| cons (head : α) (tail : List α) : List α
notation (priority := high) "[" "]" => List.nil -- `[]`
infixr:67 (priority := high) " :: " => List.cons -- `a :: as`
-- as a warm-up exercise, let's define concatenation of two lists
def append (as bs : List α) : List α := _
infixl:65 (priority := high) " ++ " => append
example : 1::2::[] ++ 3::4::[] = 1::2::3::4::[] := rfl
-- as with associativity on `Nat`, think twice about what induction variable to use!
theorem append_assoc : (as ++ bs) ++ cs = as ++ (bs ++ cs) := by
open Decidable
/-
One important special case of `Decidable` is decidability of equalities:
```
abbrev DecidableEq (α : Type) :=
(a b : α) → Decidable (a = b)
def decEq [s : DecidableEq α] (a b : α) : Decidable (a = b) :=
s a b
```
Note: `DecidableEq` is defined using `abbrev` instead of `def` because typeclass resolution only
unfolds the former for performance reasons.
Let's try to prove that `List` equality is decidable!
-/
-- hint: Something is still missing. Do we need to assume anything about `α`?
-- hint: Apply `match` case distinctions until the the appropriate `Decidable` constructor is clear,
-- then fill in its proof argument with `by`.
-- We could also do everything in a `by` block, but it's nicer to reserve tactics for proofs so we have
-- more control about the code of programs, i.e. the part that is actually executed
def ldecEq (as bs : List α) : Decidable (as = bs) := _
-- Let's declare the instance:
instance : DecidableEq (List α) := _
-- This should now work:
#eval decEq (1::2::[]) (1::3::[])
/-
`DecidabePred` is another convenient abbreviation of `Decidable`
```
abbrev DecidablePred (r : α → Prop) :=
(a : α) → Decidable (r a)
```
If we have `[DecidablePred p]`, we can e.g. use `if p a then ...` for some `a : α`.
`filter p as` is a simple list function that should remove all elements `a` for which `p a` does not hold.
-/
def filter (p : α → Prop) [DecidablePred p] (as : List α) : List α := _
example : filter (fun x => x % 2 = 0) (1::2::3::4::[]) = 2::4::[] := rfl
variable {p : α → Prop} [DecidablePred p] {as bs : List α}
-- These helper theorems can be useful, also for manual rewriting
@[simp] theorem filter_cons_true (h : p a) : filter p (a :: as) = a :: filter p as :=
by simp [filter, h]
@[simp] theorem filter_cons_false (h : ¬ p a) : filter p (a :: as) = filter p as :=
by simp [filter, h]
-- It's worthwhile thinking about what's actually happening here:
-- * first, `filter p (a :: as)` is unfolded to `if p a then a :: filter p as else filter p as`
-- (note that the second `filter` cannot be unfolded)
-- * then `if p a then ...` is rewritten to `if True then ...` using `h`
-- * finally, `if True then a :: filter p as else ...` is rewritten to `a :: filter p as` using
-- the built-in simp theorem `Lean.Simp.ite_true`
-- useful tactic: `by_cases h : q` for a decidable proposition `q`
theorem filter_idem : filter p (filter p as) = filter p as := by
theorem filter_append : filter p (as ++ bs) = filter p as ++ filter p bs := by
-- list membership as an inductive predicate:
inductive Mem (a : α) : List α → Prop where
-- either it's the first element...
| head {as} : Mem a (a::as)
-- or it's in the remainder list
| tail {as} : Mem a as → Mem a (a'::as)
infix:50 " ∈ " => Mem
-- recall that `a ≠ b` is the same as `a = b → False`
theorem mem_of_nonempty_filter (h : ∀ a, p a → a = x) : filter p as ≠ [] → x ∈ as := by
-- This proof is pretty long! Some hints:
-- * If you have an assumption `h : a ∈ []`, you can solve the current goal by `cases h`:
-- since there is no `Mem` constructor that could possibly match `[]`, there is nothing left to prove!
-- This exclusion of cases, and case analysis on inductive predicates in general,
-- is also called *rule inversion* since we (try to) apply the introduction rules (constructors)
-- "in reverse".
-- * On the other hand, if you try to do case analysis on a proof of e.g. `a ∈ filter p as`,
-- Lean will complain with "dependent elimination failed" since it *doesn't* know yet if
-- the argument `filter p as` is of the form `_ :: _` as demanded by the `Mem` constructors.
-- You need to get the assumption into the shape `_ ∈ []` or `_ ∈ _ :: _` before applying
-- `(no)match/cases` to it.
theorem mem_filter : a ∈ filter p as ↔ a ∈ as ∧ p a := _
-- Here is an alternative definition of list membership via `append`
inductive Mem' (a : α) : List α → Prop where
| intro (as bs) : Mem' a (as ++ (a :: bs))
infix:50 " ∈' " => Mem'
-- Let's prove that they are equivalent!
theorem mem_mem' : a ∈ as ↔ a ∈' as := _
end TBA
|
b -> c (6): a -> b (5)
c -> a (7): b -> c (6)
a -> d (1): c -> a (7)
d -> c (2): a -> d (1)
c -> e (3): d -> c (2)
e -> a (4): c -> e (3)
|
Formal statement is: lemma open_delete: fixes s :: "'a::t1_space set" shows "open s \<Longrightarrow> open (s - {x})" Informal statement is: If $s$ is an open set, then $s - \{x\}$ is open. |
[STATEMENT]
lemma [autoref_itype]: "id ::\<^sub>i I \<rightarrow>\<^sub>i I"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. id ::\<^sub>i I \<rightarrow>\<^sub>i I
[PROOF STEP]
by simp |
Formal statement is: lemma mem_interior_cball: "x \<in> interior S \<longleftrightarrow> (\<exists>e>0. cball x e \<subseteq> S)" Informal statement is: A point $x$ is in the interior of a set $S$ if and only if there exists an $e > 0$ such that the closed ball of radius $e$ centered at $x$ is contained in $S$. |
-- ---------------------------------------------------------------------
-- Ejercicio. Demostrar que en los anillos,
-- 0 * a = 0
-- ----------------------------------------------------------------------
import algebra.ring
namespace my_ring
variables {R : Type*} [ring R]
variable (a : R)
-- 1ª demostración
-- ===============
example : 0 * a = 0 :=
begin
have h : 0 * a + 0 * a = 0 * a + 0,
calc 0 * a + 0 * a
= (0 + 0) * a : (add_mul 0 0 a).symm
... = 0 * a : congr_arg (λ x, x * a) (add_zero 0)
... = 0 * a + 0 : self_eq_add_right.mpr rfl,
rw add_left_cancel h
end
-- 2ª demostración
-- ===============
example : 0 * a = 0 :=
begin
have h : 0 * a + 0 * a = 0 * a + 0,
calc 0 * a + 0 * a
= (0 + 0) * a : by rw add_mul
... = 0 * a : by rw add_zero
... = 0 * a + 0 : by rw add_zero,
rw add_left_cancel h
end
-- 3ª demostración
-- ===============
example : 0 * a = 0 :=
begin
have h : 0 * a + 0 * a = 0 * a + 0,
{ rw [←add_mul, add_zero, add_zero] },
rw add_left_cancel h
end
-- 4ª demostración
-- ===============
example : 0 * a = 0 :=
begin
have h : 0 * a + 0 * a = 0 * a + 0,
calc 0 * a + 0 * a
= (0 + 0) * a : by simp
... = 0 * a : by simp
... = 0 * a + 0 : by simp,
simp,
end
end my_ring
|
/-
Copyright (c) 2021 Eric Wieser. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Eric Wieser
-/
import group_theory.subgroup.basic
import group_theory.submonoid.pointwise
/-! # Pointwise instances on `subgroup` and `add_subgroup`s
This file provides the actions
* `subgroup.pointwise_mul_action`
* `add_subgroup.pointwise_mul_action`
which matches the action of `mul_action_set`.
These actions are available in the `pointwise` locale.
## Implementation notes
This file is almost identical to `group_theory/submonoid/pointwise.lean`. Where possible, try to
keep them in sync.
-/
variables {α : Type*} {G : Type*} {A : Type*} [group G] [add_group A]
namespace subgroup
section monoid
variables [monoid α] [mul_distrib_mul_action α G]
/-- The action on a subgroup corresponding to applying the action to every element.
This is available as an instance in the `pointwise` locale. -/
protected def pointwise_mul_action : mul_action α (subgroup G) :=
{ smul := λ a S, S.map (mul_distrib_mul_action.to_monoid_End _ _ a),
one_smul := λ S, (congr_arg (λ f, S.map f) (monoid_hom.map_one _)).trans S.map_id,
mul_smul := λ a₁ a₂ S,
(congr_arg (λ f, S.map f) (monoid_hom.map_mul _ _ _)).trans (S.map_map _ _).symm,}
localized "attribute [instance] subgroup.pointwise_mul_action" in pointwise
open_locale pointwise
lemma pointwise_smul_def {a : α} (S : subgroup G) :
a • S = S.map (mul_distrib_mul_action.to_monoid_End _ _ a) := rfl
@[simp] lemma coe_pointwise_smul (a : α) (S : subgroup G) : ↑(a • S) = a • (S : set G) := rfl
@[simp] lemma pointwise_smul_to_submonoid (a : α) (S : subgroup G) :
(a • S).to_submonoid = a • S.to_submonoid := rfl
lemma smul_mem_pointwise_smul (m : G) (a : α) (S : subgroup G) : m ∈ S → a • m ∈ a • S :=
(set.smul_mem_smul_set : _ → _ ∈ a • (S : set G))
lemma mem_smul_pointwise_iff_exists (m : G) (a : α) (S : subgroup G) :
m ∈ a • S ↔ ∃ (s : G), s ∈ S ∧ a • s = m :=
(set.mem_smul_set : m ∈ a • (S : set G) ↔ _)
instance pointwise_central_scalar [mul_distrib_mul_action αᵐᵒᵖ G] [is_central_scalar α G] :
is_central_scalar α (subgroup G) :=
⟨λ a S, congr_arg (λ f, S.map f) $ monoid_hom.ext $ by exact op_smul_eq_smul _⟩
end monoid
section group
variables [group α] [mul_distrib_mul_action α G]
open_locale pointwise
@[simp] lemma smul_mem_pointwise_smul_iff {a : α} {S : subgroup G} {x : G} :
a • x ∈ a • S ↔ x ∈ S :=
smul_mem_smul_set_iff
lemma mem_pointwise_smul_iff_inv_smul_mem {a : α} {S : subgroup G} {x : G} :
x ∈ a • S ↔ a⁻¹ • x ∈ S :=
mem_smul_set_iff_inv_smul_mem
lemma mem_inv_pointwise_smul_iff {a : α} {S : subgroup G} {x : G} : x ∈ a⁻¹ • S ↔ a • x ∈ S :=
mem_inv_smul_set_iff
@[simp] lemma pointwise_smul_le_pointwise_smul_iff {a : α} {S T : subgroup G} :
a • S ≤ a • T ↔ S ≤ T :=
set_smul_subset_set_smul_iff
lemma pointwise_smul_subset_iff {a : α} {S T : subgroup G} : a • S ≤ T ↔ S ≤ a⁻¹ • T :=
set_smul_subset_iff
lemma subset_pointwise_smul_iff {a : α} {S T : subgroup G} : S ≤ a • T ↔ a⁻¹ • S ≤ T :=
subset_set_smul_iff
/-- Applying a `mul_distrib_mul_action` results in an isomorphic subgroup -/
@[simps] def equiv_smul (a : α) (H : subgroup G) : H ≃* (a • H : subgroup G) :=
(mul_distrib_mul_action.to_mul_equiv G a).subgroup_map H
end group
section group_with_zero
variables [group_with_zero α] [mul_distrib_mul_action α G]
open_locale pointwise
@[simp] lemma smul_mem_pointwise_smul_iff₀ {a : α} (ha : a ≠ 0) (S : subgroup G)
(x : G) : a • x ∈ a • S ↔ x ∈ S :=
smul_mem_smul_set_iff₀ ha (S : set G) x
lemma mem_pointwise_smul_iff_inv_smul_mem₀ {a : α} (ha : a ≠ 0) (S : subgroup G) (x : G) :
x ∈ a • S ↔ a⁻¹ • x ∈ S :=
mem_smul_set_iff_inv_smul_mem₀ ha (S : set G) x
lemma mem_inv_pointwise_smul_iff₀ {a : α} (ha : a ≠ 0) (S : subgroup G) (x : G) :
x ∈ a⁻¹ • S ↔ a • x ∈ S :=
mem_inv_smul_set_iff₀ ha (S : set G) x
@[simp] lemma pointwise_smul_le_pointwise_smul_iff₀ {a : α} (ha : a ≠ 0) {S T : subgroup G} :
a • S ≤ a • T ↔ S ≤ T :=
set_smul_subset_set_smul_iff₀ ha
lemma pointwise_smul_le_iff₀ {a : α} (ha : a ≠ 0) {S T : subgroup G} : a • S ≤ T ↔ S ≤ a⁻¹ • T :=
set_smul_subset_iff₀ ha
lemma le_pointwise_smul_iff₀ {a : α} (ha : a ≠ 0) {S T : subgroup G} : S ≤ a • T ↔ a⁻¹ • S ≤ T :=
subset_set_smul_iff₀ ha
end group_with_zero
end subgroup
namespace add_subgroup
section monoid
variables [monoid α] [distrib_mul_action α A]
/-- The action on an additive subgroup corresponding to applying the action to every element.
This is available as an instance in the `pointwise` locale. -/
protected def pointwise_mul_action : mul_action α (add_subgroup A) :=
{ smul := λ a S, S.map (distrib_mul_action.to_add_monoid_End _ _ a),
one_smul := λ S, (congr_arg (λ f, S.map f) (monoid_hom.map_one _)).trans S.map_id,
mul_smul := λ a₁ a₂ S,
(congr_arg (λ f, S.map f) (monoid_hom.map_mul _ _ _)).trans (S.map_map _ _).symm,}
localized "attribute [instance] add_subgroup.pointwise_mul_action" in pointwise
open_locale pointwise
@[simp] lemma coe_pointwise_smul (a : α) (S : add_subgroup A) : ↑(a • S) = a • (S : set A) := rfl
@[simp] lemma pointwise_smul_to_add_submonoid (a : α) (S : add_subgroup A) :
(a • S).to_add_submonoid = a • S.to_add_submonoid := rfl
lemma smul_mem_pointwise_smul (m : A) (a : α) (S : add_subgroup A) : m ∈ S → a • m ∈ a • S :=
(set.smul_mem_smul_set : _ → _ ∈ a • (S : set A))
lemma mem_smul_pointwise_iff_exists (m : A) (a : α) (S : add_subgroup A) :
m ∈ a • S ↔ ∃ (s : A), s ∈ S ∧ a • s = m :=
(set.mem_smul_set : m ∈ a • (S : set A) ↔ _)
instance pointwise_central_scalar [distrib_mul_action αᵐᵒᵖ A] [is_central_scalar α A] :
is_central_scalar α (add_subgroup A) :=
⟨λ a S, congr_arg (λ f, S.map f) $ add_monoid_hom.ext $ by exact op_smul_eq_smul _⟩
end monoid
section group
variables [group α] [distrib_mul_action α A]
open_locale pointwise
@[simp] lemma smul_mem_pointwise_smul_iff {a : α} {S : add_subgroup A} {x : A} :
a • x ∈ a • S ↔ x ∈ S :=
smul_mem_smul_set_iff
lemma mem_pointwise_smul_iff_inv_smul_mem {a : α} {S : add_subgroup A} {x : A} :
x ∈ a • S ↔ a⁻¹ • x ∈ S :=
mem_smul_set_iff_inv_smul_mem
lemma mem_inv_pointwise_smul_iff {a : α} {S : add_subgroup A} {x : A} : x ∈ a⁻¹ • S ↔ a • x ∈ S :=
mem_inv_smul_set_iff
@[simp] lemma pointwise_smul_le_pointwise_smul_iff {a : α} {S T : add_subgroup A} :
a • S ≤ a • T ↔ S ≤ T :=
set_smul_subset_set_smul_iff
lemma pointwise_smul_le_iff {a : α} {S T : add_subgroup A} : a • S ≤ T ↔ S ≤ a⁻¹ • T :=
set_smul_subset_iff
lemma le_pointwise_smul_iff {a : α} {S T : add_subgroup A} : S ≤ a • T ↔ a⁻¹ • S ≤ T :=
subset_set_smul_iff
end group
section group_with_zero
variables [group_with_zero α] [distrib_mul_action α A]
open_locale pointwise
@[simp] lemma smul_mem_pointwise_smul_iff₀ {a : α} (ha : a ≠ 0) (S : add_subgroup A)
(x : A) : a • x ∈ a • S ↔ x ∈ S :=
smul_mem_smul_set_iff₀ ha (S : set A) x
lemma mem_pointwise_smul_iff_inv_smul_mem₀ {a : α} (ha : a ≠ 0) (S : add_subgroup A) (x : A) :
x ∈ a • S ↔ a⁻¹ • x ∈ S :=
mem_smul_set_iff_inv_smul_mem₀ ha (S : set A) x
lemma mem_inv_pointwise_smul_iff₀ {a : α} (ha : a ≠ 0) (S : add_subgroup A) (x : A) :
x ∈ a⁻¹ • S ↔ a • x ∈ S :=
mem_inv_smul_set_iff₀ ha (S : set A) x
@[simp] lemma pointwise_smul_le_pointwise_smul_iff₀ {a : α} (ha : a ≠ 0) {S T : add_subgroup A} :
a • S ≤ a • T ↔ S ≤ T :=
set_smul_subset_set_smul_iff₀ ha
lemma pointwise_smul_le_iff₀ {a : α} (ha : a ≠ 0) {S T : add_subgroup A} :
a • S ≤ T ↔ S ≤ a⁻¹ • T :=
set_smul_subset_iff₀ ha
lemma le_pointwise_smul_iff₀ {a : α} (ha : a ≠ 0) {S T : add_subgroup A} :
S ≤ a • T ↔ a⁻¹ • S ≤ T :=
subset_set_smul_iff₀ ha
end group_with_zero
end add_subgroup
|
Formal statement is: lemma path_image_part_circlepath': "path_image (part_circlepath z r s t) = (\<lambda>x. z + r * cis x) ` closed_segment s t" Informal statement is: The image of the path $z + r \exp(i \theta)$ for $\theta$ in the closed segment $[s, t]$ is the set of points $z + r \exp(i \theta)$ for $\theta$ in the closed segment $[s, t]$. |
Formal statement is: lemma residue_holo: assumes "open s" "z \<in> s" and f_holo: "f holomorphic_on s" shows "residue f z = 0" Informal statement is: If $f$ is holomorphic on an open set $s$ and $z \in s$, then the residue of $f$ at $z$ is zero. |
Formal statement is: lemma bounded_plus: fixes S ::"'a::real_normed_vector set" assumes "bounded S" "bounded T" shows "bounded ((\<lambda>(x,y). x + y) ` (S \<times> T))" Informal statement is: If $S$ and $T$ are bounded sets, then the set of sums of elements of $S$ and $T$ is bounded. |
module Nat where
open import Eq
data ℕ : Set where
Z : ℕ
S : ℕ → ℕ
{-# BUILTIN NATURAL ℕ #-}
infixl 6 _+_
infixl 7 _×_
_+_ : ℕ → ℕ → ℕ
Z + n = n
(S k) + n = S(k + n)
{-# BUILTIN NATPLUS _+_ #-}
_×_ : ℕ → ℕ → ℕ
Z × n = Z
S m × n = n + m × n
{-# BUILTIN NATTIMES _×_ #-}
*-right-zero : ∀ (n : ℕ) → n × Z ≡ Z
*-right-zero Z = Refl
*-right-zero (S n) = *-right-zero n
testEq : (x : ℕ) → (y : ℕ) → (p : x ≡ y) → ℕ
testEq x _ Refl = x
|
import .basic
universes u v
variables (M N : Type u) [monoid M] [monoid N]
variables (G H : Type u) [group G] [group H]
variables {K : Type v} [monoid K]
def bicoprod_monoid_aux (b : bool) : monoid (cond b M N) :=
{ mul := bool.cases_on b ((*) : N → N → N) ((*) : M → M → M),
one := bool.cases_on b (1 : N) (1 : M),
mul_assoc := bool.cases_on b (@mul_assoc N _) (@mul_assoc M _),
one_mul := bool.cases_on b (@one_mul N _) (@one_mul M _),
mul_one := bool.cases_on b (@mul_one N _) (@mul_one M _) }
local attribute [instance] bicoprod_monoid_aux
def bicoprod := coprod (λ b : bool, cond b M N)
namespace bicoprod
variables [decidable_eq M] [decidable_eq N] [decidable_eq G] [decidable_eq H]
infixr ` ⋆ `: 30 := bicoprod
def bicoprod_group_aux (b : bool) :
group (cond b G H) :=
let I : group (cond b G H) := by cases b; dunfold cond; apply_instance in
{ inv := bool.cases_on b (@has_inv.inv H _) (@has_inv.inv G _),
mul_left_inv := bool.cases_on b (@mul_left_inv H _) (@mul_left_inv G _),
..bicoprod_monoid_aux G H b }
def bicoprod_dec_eq_aux (b : bool) :
decidable_eq (cond b M N) := by cases b; dunfold cond; apply_instance
local attribute [instance] bicoprod_group_aux bicoprod_dec_eq_aux
instance : monoid (M ⋆ N) := coprod.monoid
instance : group (G ⋆ H) := @coprod.group bool (λ b, cond b G H) _ _ _
variables {M N}
def inl : M →* M ⋆ N :=
{ to_fun := @coprod.of bool (λ b, cond b M N) _ _ _ tt,
map_one' := by simp,
map_mul' := by simp }
def inr : N →* M ⋆ N :=
{ to_fun := @coprod.of bool (λ b, cond b M N) _ _ _ ff,
map_one' := by simp,
map_mul' := by simp }
def lift (f : M →* K) (g : N →* K) : M ⋆ N →* K :=
{ to_fun := coprod.lift (λ b, show cond b M N →* K,
from bool.rec_on b ⟨g, by simp, by simp⟩ ⟨f, by simp, by simp⟩ ),
map_one' := by simp,
map_mul' := by simp }
@[simp] lemma lift_inl (f₁ : M →* K) (f₂ : N →* K) (m : M) :
(lift f₁ f₂) (inl m) = f₁ m :=
@coprod.lift_of bool (λ b, cond b M N) _ _ _ _ _ _ tt m
@[simp] lemma lift_inr (f₁ : M →* K) (f₂ : N →* K) (n : N) :
(lift f₁ f₂) (inr n) = f₂ n :=
@coprod.lift_of bool (λ b, cond b M N) _ _ _ _ _ _ ff n
@[simp] lemma lift_comp_inl (f₁ : G →* M) (f₂ : H →* M) :
(lift f₁ f₂).comp inl = f₁ :=
monoid_hom.ext (lift_inl _ _)
@[simp] lemma lift_comp_inr (f₁ : G →* M) (f₂ : H →* M) :
(lift f₁ f₂).comp inr = f₂ :=
monoid_hom.ext (lift_inr _ _)
end bicoprod
|
section \<open>Augmented Tree (Tree2)\<close>
theory Tree2
imports "Tree"
begin
text \<open>This theory provides the basic infrastructure for the type @{typ \<open>('a * 'b) tree\<close>}
of augmented trees where @{typ 'a} is the key and @{typ 'b} some additional information.\<close>
text \<open>IMPORTANT: Inductions and cases analyses on augmented trees need to use the following
two rules explicitly. They generate nodes of the form @{term "Node l (a,b) r"}
rather than @{term "Node l a r"} for trees of type @{typ "'a tree"}.\<close>
lemmas tree2_induct = tree.induct[where 'a = "'a * 'b", split_format(complete)]
lemmas tree2_cases = tree.exhaust[where 'a = "'a * 'b", split_format(complete)]
fun inorder :: "('a*'b)tree \<Rightarrow> 'a list" where
"inorder Leaf = []" |
"inorder (Node l (a,_) r) = inorder l @ a # inorder r"
fun set_tree :: "('a*'b) tree \<Rightarrow> 'a set" where
"set_tree Leaf = {}" |
"set_tree (Node l (a,_) r) = {a} \<union> set_tree l \<union> set_tree r"
fun bst :: "('a::linorder*'b) tree \<Rightarrow> bool" where
"bst Leaf = True" |
"bst (Node l (a, _) r) = ((\<forall>x \<in> set_tree l. x < a) \<and> (\<forall>x \<in> set_tree r. a < x) \<and> bst l \<and> bst r)"
lemma finite_set_tree[simp]: "finite(set_tree t)"
by(induction t) auto
lemma eq_set_tree_empty[simp]: "set_tree t = {} \<longleftrightarrow> t = Leaf"
by (cases t) auto
lemma set_inorder[simp]: "set (inorder t) = set_tree t"
by (induction t) auto
lemma length_inorder[simp]: "length (inorder t) = size t"
by (induction t) auto
end
|
Formal statement is: lemma closed_real: fixes s :: "real set" shows "closed s \<longleftrightarrow> (\<forall>x. (\<forall>e>0. \<exists>x' \<in> s. x' \<noteq> x \<and> \<bar>x' - x\<bar> < e) \<longrightarrow> x \<in> s)" Informal statement is: A set $S$ is closed if and only if for every $x$, if for every $\epsilon > 0$, there exists $x' \in S$ such that $x' \neq x$ and $|x' - x| < \epsilon$, then $x \in S$. |
% --- [ Criteria for Selection of Test Programs ] ------------------------------
\subsection{Criteria for Selection of Test Programs}
\label{sec:criteria_for_selection_of_test_programs}
As part of a broader Open Science movement, the test data\footnote{Test data: \url{https://github.com/decomp/testdata}} and source code\footnote{Source code: \url{https://github.com/decomp/decomp}} of this research project are released open source.
To facilitate reproducible evaluation results, the test programs must be open source and explicitly tracked by a specific version number or version control revision, and they must be provided free of charge to the wider public. While unfortunate, this excludes test programs from the Standrad Performance Evaluation Corporation (\url{http://spec.org}) as those test programs are not made available free of charge to the wider public.
% TODO: Update the version numbers of SQLite and Coreutils.
For coverage of real world applications, the shell from the SQLite project (version 3.25.0) and 107 tools from the GNU Core Utilities project (version 8.30) are included in the test data. The scientific community have in recent years converged on utilizing GNU Core Utilities to evaluate control flow recovery methods \cite{no_more_gotos,homan_centric_decompilation,interprocedural_control_flow_recovery,semantics_preserving_structural_analysis,revng}.
% TODO: remove paragraph about pathological test programs?
%The control flow recovery methods will also be evaluated on pathological test programs which have been automatically generated to contain hundreds or thousands of nested control flow primitives.
% TODO: Add specific revision for testdata repo.
|
lemma Sup_lim: fixes a :: "'a::{complete_linorder,linorder_topology}" assumes "\<And>n. b n \<in> s" and "b \<longlonglongrightarrow> a" shows "a \<le> Sup s" |
Formal statement is: lemma small_subset_big: "l F (g) \<subseteq> L F (g)" Informal statement is: The set of all small elements of a filter is a subset of the set of all large elements of the filter. |
/-
Copyright (c) 2023 Yaël Dillies. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Yaël Dillies
-/
import algebra.big_operators.basic
import mathlib.combinatorics.simple_graph.subgraph
import mathlib.data.sym.sym2
/-!
# Containment of graphs
This file defines graph containment.
A graph is said to contain another if one of its subgraphs is isomorphic to it.
-/
open function
open_locale big_operators classical
namespace simple_graph
variables {α β γ : Type*} {G G₁ G₂ G₃ : simple_graph α} {H : simple_graph β} {I : simple_graph γ}
/-- A simple graph `G` is contained in a simple graph `H` if there exists a subgraph of `H`
isomorphic to `G`. This is denoted by `G ⊑ H`. -/
def is_contained (G : simple_graph α) (H : simple_graph β) : Prop := ∃ f : G →g H, injective f
infix ` ⊑ `:50 := simple_graph.is_contained
lemma is_contained_of_le (h : G₁ ≤ G₂) : G₁ ⊑ G₂ := ⟨hom_of_le h, injective_id⟩
protected lemma iso.is_contained (e : G ≃g H) : G ⊑ H := ⟨e, e.injective⟩
protected lemma iso.is_contained' (e : G ≃g H) : H ⊑ G := e.symm.is_contained
lemma subgraph.coe_is_contained (G' : G.subgraph) : G'.coe ⊑ G := ⟨G'.hom, subtype.val_injective⟩
@[refl] lemma is_contained_refl (G : simple_graph α) : G ⊑ G := is_contained_of_le le_rfl
lemma is_contained_rfl : G ⊑ G := is_contained_refl _
lemma is_contained.trans : G ⊑ H → H ⊑ I → G ⊑ I :=
by { rintro ⟨f, hf⟩ ⟨g, hg⟩, exact ⟨g.comp f, hg.comp hf⟩ }
lemma is_contained.mono_left (h₁₂ : G₁ ≤ G₂) (h₂₃ : G₂ ⊑ G₃) : G₁ ⊑ G₃ :=
(is_contained_of_le h₁₂).trans h₂₃
lemma is_contained.mono_right (h₁₂ : G₁ ⊑ G₂) (h₂₃ : G₂ ≤ G₃) : G₁ ⊑ G₃ :=
h₁₂.trans $ is_contained_of_le h₂₃
alias is_contained.mono_right ← is_contained.trans_le
lemma is_contained_of_is_empty [is_empty α] : G ⊑ H :=
⟨{ to_fun := is_empty_elim, map_rel' := is_empty_elim }, is_empty_elim⟩
lemma bot_is_contained (f : α ↪ β) : (⊥ : simple_graph α) ⊑ H :=
⟨{ to_fun := f, map_rel' := λ _ _, false.elim }, f.injective⟩
lemma is_contained_iff_exists_subgraph : G ⊑ H ↔ ∃ H' : H.subgraph, nonempty $ G ≃g H'.coe :=
begin
split,
{ rintro ⟨f, hf⟩,
exact ⟨subgraph.map f ⊤, ⟨(subgraph.iso_map _ hf _).comp subgraph.top_iso.symm⟩⟩ },
{ rintro ⟨H', ⟨e⟩⟩,
exact e.is_contained.trans H'.coe_is_contained }
end
private lemma aux (hG : G ≠ ⊥) {H' : H.subgraph} (f : G ≃g H'.coe) : H'.edge_set.nonempty :=
begin
obtain ⟨e, he⟩ := nonempty_edge_set.2 hG,
rw ←subgraph.image_coe_edge_set_coe,
exact set.nonempty.image _ ⟨sym2.map f e, f.map_mem_edge_set_iff.2 he⟩,
end
/-- `G.kill H` is a subgraph of `H` where an edge from every subgraph isomorphic to `G` was removed.
As such, it is a big subgraph of `H` that does not contain any subgraph isomorphic to `G`.
-/
noncomputable def kill (G : simple_graph α) (H : simple_graph β) : simple_graph β :=
if hG : G = ⊥ then H else H.delete_edges $ ⋃ (H' : H.subgraph) (f : G ≃g H'.coe), {(aux hG f).some}
lemma kill_le : G.kill H ≤ H := by { rw kill, split_ifs, exacts [le_rfl, delete_edges_le _ _] }
@[simp] lemma bot_kill (H : simple_graph β) : (⊥ : simple_graph α).kill H = H := dif_pos rfl
lemma kill_eq_right (hG : G ≠ ⊥) : G.kill H = H ↔ ¬ G ⊑ H :=
begin
rw [kill, dif_neg hG],
simp only [set.disjoint_left, is_contained_iff_exists_subgraph, @forall_swap _ H.subgraph,
set.Union_singleton_eq_range, delete_edges_eq, set.mem_Union, set.mem_range, not_exists,
not_nonempty_iff],
exact forall_congr (λ H', ⟨λ h,
⟨λ f, h _ (subgraph.edge_set_subset _ $ (aux hG f).some_spec) f rfl⟩, λ h _ _, h.elim⟩),
end
lemma kill_of_not_is_contained (hGH : ¬ G ⊑ H) : G.kill H = H :=
begin
obtain rfl | hG := eq_or_ne G ⊥,
{ exact bot_kill _ },
{ exact (kill_eq_right hG).2 hGH }
end
lemma not_is_contained_kill (hG : G ≠ ⊥) : ¬ G ⊑ G.kill H :=
begin
rw [kill, dif_neg hG, delete_edges_eq_sdiff_from_edge_set, is_contained_iff_exists_subgraph],
rintro ⟨H', ⟨f⟩⟩,
have hH' : (H'.map $ hom_of_le (sdiff_le : H \ _ ≤ H)).edge_set.nonempty,
{ rw subgraph.edge_set_map,
exact (aux hG f).image _ },
set e := hH'.some with he,
have : e ∈ _ := hH'.some_spec,
clear_value e,
rw ←subgraph.image_coe_edge_set_coe at this,
subst he,
obtain ⟨e, he₀, he₁⟩ := this,
let e' : sym2 H'.verts := sym2.map (subgraph.iso_map (hom_of_le _) injective_id _).symm e,
have he' : e' ∈ H'.coe.edge_set := (iso.map_mem_edge_set_iff _).2 he₀,
rw subgraph.edge_set_coe at he',
have := subgraph.edge_set_subset _ he',
simp only [edge_set_sdiff, edge_set_from_edge_set, edge_set_sdiff_sdiff_is_diag, set.mem_diff,
set.mem_Union, not_exists] at this,
refine this.2 (H'.map $ hom_of_le sdiff_le)
((subgraph.iso_map (hom_of_le _) injective_id _).comp f) _,
rw [sym2.map_map, set.mem_singleton_iff, ←he₁],
congr' 1 with x,
refine congr_arg coe (equiv.set.image_symm_apply _ _ injective_id _ _),
simpa using x.2,
end
variables [fintype H.edge_set]
noncomputable instance kill.edge_set.fintype : fintype (G.kill H).edge_set :=
fintype.of_injective (set.inclusion $ edge_set_mono kill_le) $ set.inclusion_injective _
end simple_graph
|
If the limit of a function $f$ as $x$ approaches $a$ is $L$, then the limit of $f$ as $x$ approaches $a$ plus $h$ is also $L$. |
##Main function of generating random parameter values
RandomGeneration<-function(WD, GD, CropName, CultivarID, GenotypeFileName, ParameterProperty, ParameterAddress, TotalParameterNumber, NumberOfModelRun, RoundOfGLUE, GLUEFlag)
{
Option="U";
#Set uniform distribution as the default distribution to generate the random parameter sets.
if(Option=="U")
{
eval(parse(text = paste("source('",WD,"/Uniform.r')",sep = '')));
RandomParameter<-(Uniform(GD, CropName, CultivarID, GenotypeFileName, ParameterProperty, ParameterAddress, TotalParameterNumber, NumberOfModelRun, RoundOfGLUE, GLUEFlag));
#Get rid of the negative random values through ABS();
}
return(RandomParameter);##Return the generated random matrix.
}
|
lemma higher_deriv_transform_within_open: fixes z::complex assumes "f holomorphic_on S" "g holomorphic_on S" "open S" and z: "z \<in> S" and fg: "\<And>w. w \<in> S \<Longrightarrow> f w = g w" shows "(deriv ^^ i) f z = (deriv ^^ i) g z" |
%% Transient diffusion equation
%% PDE and boundary conditions
% The transient diffusion equation reads
%
% $$\alpha\frac{\partial c}{\partial t}+\nabla.\left(-D\nabla c\right)=0,$$
%
% where $c$ is the independent variable (concentration, temperature, etc)
% , $D$ is the diffusion coefficient, and $\alpha$ is a constant.
% Written by Ali A. Eftekhari
% Last checked: June 2021
clc
%% Define the domain and create a mesh structure
L = 50; % domain length
Nx = 10; % number of cells
m = createMesh3D(Nx,Nx+3,Nx+6, L/2,2*pi,L);
%% Create the boundary condition structure
BC = createBC(m); % all Neumann boundary condition structure
% BC.left.a(:) = 0; BC.left.b(:)=1; BC.left.c(:)=0; % left boundary
BC.right.a(:) = 0; BC.right.b(:)=1; BC.right.c(:)=0; % right boundary
% BC.top.a(:) = 0; BC.top.b(:)=1; BC.top.c(:)=3; % top boundary
% BC.bottom.a(:) = 0; BC.bottom.b(:)=1; BC.bottom.c(:)=0; % bottom boundary
BC.top.periodic=1;
BC.back.a(:) = 0; BC.back.b(:)=1; BC.back.c(:)=3; % back boundary
BC.front.a(:) = 0; BC.front.b(:)=1; BC.front.c(:)=0; % front boundary
%% define the transfer coeffs
D_val = 1;
D = createCellVariable(m, D_val);
alfa = createCellVariable(m, 1);
u = createFaceVariable(m, [0,0,0.5]);
%% define initial values
c_init = 1;
c_old = createCellVariable(m, c_init,BC); % initial values
c = c_old; % assign the old value of the cells to the current values
%% loop
Dave = harmonicMean(D);
Mdiff = diffusionTerm(Dave);
[Mbc, RHSbc] = boundaryCondition(BC);
FL = fluxLimiter('Superbee');
Mconv = convectionTvdTerm(u, c, FL);
dt = 1; % time step
final_t = 50;
for t=dt:dt:final_t
[M_trans, RHS_trans] = transientTerm(c_old, dt, alfa);
M = M_trans-Mdiff+Mbc+Mconv;
RHS = RHS_trans+RHSbc;
c = solvePDE(m,M, RHS);
c_old = c;
figure(1);visualizeCells(c);drawnow;
end
%% visualization
figure(1);visualizeCells(c);
|
lemma cball_sing: fixes x :: "'a::metric_space" shows "e = 0 \<Longrightarrow> cball x e = {x}" |
theorem Residue_theorem: fixes s pts::"complex set" and f::"complex \<Rightarrow> complex" and g::"real \<Rightarrow> complex" assumes "open s" "connected s" "finite pts" and holo:"f holomorphic_on s-pts" and "valid_path g" and loop:"pathfinish g = pathstart g" and "path_image g \<subseteq> s-pts" and homo:"\<forall>z. (z \<notin> s) \<longrightarrow> winding_number g z = 0" shows "contour_integral g f = 2 * pi * \<i> *(\<Sum>p\<in>pts. winding_number g p * residue f p)" |
Subsets and Splits