text
stringlengths 0
3.34M
|
---|
In October 2010 , FIT officers in plain clothes were spotted by a press photographer at a protest against companies avoiding tax , despite Commander Bob Broadhurst telling a parliamentary committee in May 2009 , that only uniformed officers distinguishable by their blue and yellow jackets were involved in gathering intelligence at protests . The Metropolitan Police told The Guardian that it was necessary to deploy plain @-@ clothed officers to " gather information to provide us with a relevant and up @-@ to @-@ date intelligence picture of what to expect " . It was the first time that FITs are known to have been deployed in plain clothes .
|
(* Title: HOL/UNITY/UNITY.thy
Author: Lawrence C Paulson, Cambridge University Computer Laboratory
Copyright 1998 University of Cambridge
The basic UNITY theory (revised version, based upon the "co"
operator).
From Misra, "A Logic for Concurrent Programming", 1994.
*)
section \<open>The Basic UNITY Theory\<close>
theory UNITY imports MainRLT begin
definition
"Program =
{(init:: 'a set, acts :: ('a * 'a)set set,
allowed :: ('a * 'a)set set). Id \<in> acts & Id \<in> allowed}"
typedef 'a program = "Program :: ('a set * ('a * 'a) set set * ('a * 'a) set set) set"
morphisms Rep_Program Abs_Program
unfolding Program_def by blast
definition Acts :: "'a program => ('a * 'a)set set" where
"Acts F == (%(init, acts, allowed). acts) (Rep_Program F)"
definition "constrains" :: "['a set, 'a set] => 'a program set" (infixl "co" 60) where
"A co B == {F. \<forall>act \<in> Acts F. act``A \<subseteq> B}"
definition unless :: "['a set, 'a set] => 'a program set" (infixl "unless" 60) where
"A unless B == (A-B) co (A \<union> B)"
definition mk_program :: "('a set * ('a * 'a)set set * ('a * 'a)set set)
=> 'a program" where
"mk_program == %(init, acts, allowed).
Abs_Program (init, insert Id acts, insert Id allowed)"
definition Init :: "'a program => 'a set" where
"Init F == (%(init, acts, allowed). init) (Rep_Program F)"
definition AllowedActs :: "'a program => ('a * 'a)set set" where
"AllowedActs F == (%(init, acts, allowed). allowed) (Rep_Program F)"
definition Allowed :: "'a program => 'a program set" where
"Allowed F == {G. Acts G \<subseteq> AllowedActs F}"
definition stable :: "'a set => 'a program set" where
"stable A == A co A"
definition strongest_rhs :: "['a program, 'a set] => 'a set" where
"strongest_rhs F A == \<Inter>{B. F \<in> A co B}"
definition invariant :: "'a set => 'a program set" where
"invariant A == {F. Init F \<subseteq> A} \<inter> stable A"
definition increasing :: "['a => 'b::{order}] => 'a program set" where
\<comment> \<open>Polymorphic in both states and the meaning of \<open>\<le>\<close>\<close>
"increasing f == \<Inter>z. stable {s. z \<le> f s}"
subsubsection\<open>The abstract type of programs\<close>
lemmas program_typedef =
Rep_Program Rep_Program_inverse Abs_Program_inverse
Program_def Init_def Acts_def AllowedActs_def mk_program_def
lemma Id_in_Acts [iff]: "Id \<in> Acts F"
apply (cut_tac x = F in Rep_Program)
apply (auto simp add: program_typedef)
done
lemma insert_Id_Acts [iff]: "insert Id (Acts F) = Acts F"
by (simp add: insert_absorb)
lemma Acts_nonempty [simp]: "Acts F \<noteq> {}"
by auto
lemma Id_in_AllowedActs [iff]: "Id \<in> AllowedActs F"
apply (cut_tac x = F in Rep_Program)
apply (auto simp add: program_typedef)
done
lemma insert_Id_AllowedActs [iff]: "insert Id (AllowedActs F) = AllowedActs F"
by (simp add: insert_absorb)
subsubsection\<open>Inspectors for type "program"\<close>
lemma Init_eq [simp]: "Init (mk_program (init,acts,allowed)) = init"
by (simp add: program_typedef)
lemma Acts_eq [simp]: "Acts (mk_program (init,acts,allowed)) = insert Id acts"
by (simp add: program_typedef)
lemma AllowedActs_eq [simp]:
"AllowedActs (mk_program (init,acts,allowed)) = insert Id allowed"
by (simp add: program_typedef)
subsubsection\<open>Equality for UNITY programs\<close>
lemma surjective_mk_program [simp]:
"mk_program (Init F, Acts F, AllowedActs F) = F"
apply (cut_tac x = F in Rep_Program)
apply (auto simp add: program_typedef)
apply (drule_tac f = Abs_Program in arg_cong)+
apply (simp add: program_typedef insert_absorb)
done
lemma program_equalityI:
"[| Init F = Init G; Acts F = Acts G; AllowedActs F = AllowedActs G |]
==> F = G"
apply (rule_tac t = F in surjective_mk_program [THEN subst])
apply (rule_tac t = G in surjective_mk_program [THEN subst], simp)
done
lemma program_equalityE:
"[| F = G;
[| Init F = Init G; Acts F = Acts G; AllowedActs F = AllowedActs G |]
==> P |] ==> P"
by simp
lemma program_equality_iff:
"(F=G) =
(Init F = Init G & Acts F = Acts G &AllowedActs F = AllowedActs G)"
by (blast intro: program_equalityI program_equalityE)
subsubsection\<open>co\<close>
lemma constrainsI:
"(!!act s s'. [| act \<in> Acts F; (s,s') \<in> act; s \<in> A |] ==> s' \<in> A')
==> F \<in> A co A'"
by (simp add: constrains_def, blast)
lemma constrainsD:
"[| F \<in> A co A'; act \<in> Acts F; (s,s') \<in> act; s \<in> A |] ==> s' \<in> A'"
by (unfold constrains_def, blast)
lemma constrains_empty [iff]: "F \<in> {} co B"
by (unfold constrains_def, blast)
lemma constrains_empty2 [iff]: "(F \<in> A co {}) = (A={})"
by (unfold constrains_def, blast)
lemma constrains_UNIV [iff]: "(F \<in> UNIV co B) = (B = UNIV)"
by (unfold constrains_def, blast)
lemma constrains_UNIV2 [iff]: "F \<in> A co UNIV"
by (unfold constrains_def, blast)
text\<open>monotonic in 2nd argument\<close>
lemma constrains_weaken_R:
"[| F \<in> A co A'; A'<=B' |] ==> F \<in> A co B'"
by (unfold constrains_def, blast)
text\<open>anti-monotonic in 1st argument\<close>
lemma constrains_weaken_L:
"[| F \<in> A co A'; B \<subseteq> A |] ==> F \<in> B co A'"
by (unfold constrains_def, blast)
lemma constrains_weaken:
"[| F \<in> A co A'; B \<subseteq> A; A'<=B' |] ==> F \<in> B co B'"
by (unfold constrains_def, blast)
subsubsection\<open>Union\<close>
lemma constrains_Un:
"[| F \<in> A co A'; F \<in> B co B' |] ==> F \<in> (A \<union> B) co (A' \<union> B')"
by (unfold constrains_def, blast)
lemma constrains_UN:
"(!!i. i \<in> I ==> F \<in> (A i) co (A' i))
==> F \<in> (\<Union>i \<in> I. A i) co (\<Union>i \<in> I. A' i)"
by (unfold constrains_def, blast)
lemma constrains_Un_distrib: "(A \<union> B) co C = (A co C) \<inter> (B co C)"
by (unfold constrains_def, blast)
lemma constrains_UN_distrib: "(\<Union>i \<in> I. A i) co B = (\<Inter>i \<in> I. A i co B)"
by (unfold constrains_def, blast)
lemma constrains_Int_distrib: "C co (A \<inter> B) = (C co A) \<inter> (C co B)"
by (unfold constrains_def, blast)
lemma constrains_INT_distrib: "A co (\<Inter>i \<in> I. B i) = (\<Inter>i \<in> I. A co B i)"
by (unfold constrains_def, blast)
subsubsection\<open>Intersection\<close>
lemma constrains_Int:
"[| F \<in> A co A'; F \<in> B co B' |] ==> F \<in> (A \<inter> B) co (A' \<inter> B')"
by (unfold constrains_def, blast)
lemma constrains_INT:
"(!!i. i \<in> I ==> F \<in> (A i) co (A' i))
==> F \<in> (\<Inter>i \<in> I. A i) co (\<Inter>i \<in> I. A' i)"
by (unfold constrains_def, blast)
lemma constrains_imp_subset: "F \<in> A co A' ==> A \<subseteq> A'"
by (unfold constrains_def, auto)
text\<open>The reasoning is by subsets since "co" refers to single actions
only. So this rule isn't that useful.\<close>
lemma constrains_trans:
"[| F \<in> A co B; F \<in> B co C |] ==> F \<in> A co C"
by (unfold constrains_def, blast)
lemma constrains_cancel:
"[| F \<in> A co (A' \<union> B); F \<in> B co B' |] ==> F \<in> A co (A' \<union> B')"
by (unfold constrains_def, clarify, blast)
subsubsection\<open>unless\<close>
lemma unlessI: "F \<in> (A-B) co (A \<union> B) ==> F \<in> A unless B"
by (unfold unless_def, assumption)
lemma unlessD: "F \<in> A unless B ==> F \<in> (A-B) co (A \<union> B)"
by (unfold unless_def, assumption)
subsubsection\<open>stable\<close>
lemma stableI: "F \<in> A co A ==> F \<in> stable A"
by (unfold stable_def, assumption)
lemma stableD: "F \<in> stable A ==> F \<in> A co A"
by (unfold stable_def, assumption)
lemma stable_UNIV [simp]: "stable UNIV = UNIV"
by (unfold stable_def constrains_def, auto)
subsubsection\<open>Union\<close>
lemma stable_Un:
"[| F \<in> stable A; F \<in> stable A' |] ==> F \<in> stable (A \<union> A')"
apply (unfold stable_def)
apply (blast intro: constrains_Un)
done
lemma stable_UN:
"(!!i. i \<in> I ==> F \<in> stable (A i)) ==> F \<in> stable (\<Union>i \<in> I. A i)"
apply (unfold stable_def)
apply (blast intro: constrains_UN)
done
lemma stable_Union:
"(!!A. A \<in> X ==> F \<in> stable A) ==> F \<in> stable (\<Union>X)"
by (unfold stable_def constrains_def, blast)
subsubsection\<open>Intersection\<close>
lemma stable_Int:
"[| F \<in> stable A; F \<in> stable A' |] ==> F \<in> stable (A \<inter> A')"
apply (unfold stable_def)
apply (blast intro: constrains_Int)
done
lemma stable_INT:
"(!!i. i \<in> I ==> F \<in> stable (A i)) ==> F \<in> stable (\<Inter>i \<in> I. A i)"
apply (unfold stable_def)
apply (blast intro: constrains_INT)
done
lemma stable_Inter:
"(!!A. A \<in> X ==> F \<in> stable A) ==> F \<in> stable (\<Inter>X)"
by (unfold stable_def constrains_def, blast)
lemma stable_constrains_Un:
"[| F \<in> stable C; F \<in> A co (C \<union> A') |] ==> F \<in> (C \<union> A) co (C \<union> A')"
by (unfold stable_def constrains_def, blast)
lemma stable_constrains_Int:
"[| F \<in> stable C; F \<in> (C \<inter> A) co A' |] ==> F \<in> (C \<inter> A) co (C \<inter> A')"
by (unfold stable_def constrains_def, blast)
(*[| F \<in> stable C; F \<in> (C \<inter> A) co A |] ==> F \<in> stable (C \<inter> A) *)
lemmas stable_constrains_stable = stable_constrains_Int[THEN stableI]
subsubsection\<open>invariant\<close>
lemma invariantI: "[| Init F \<subseteq> A; F \<in> stable A |] ==> F \<in> invariant A"
by (simp add: invariant_def)
text\<open>Could also say \<^term>\<open>invariant A \<inter> invariant B \<subseteq> invariant(A \<inter> B)\<close>\<close>
lemma invariant_Int:
"[| F \<in> invariant A; F \<in> invariant B |] ==> F \<in> invariant (A \<inter> B)"
by (auto simp add: invariant_def stable_Int)
subsubsection\<open>increasing\<close>
lemma increasingD:
"F \<in> increasing f ==> F \<in> stable {s. z \<subseteq> f s}"
by (unfold increasing_def, blast)
lemma increasing_constant [iff]: "F \<in> increasing (%s. c)"
by (unfold increasing_def stable_def, auto)
lemma mono_increasing_o:
"mono g ==> increasing f \<subseteq> increasing (g o f)"
apply (unfold increasing_def stable_def constrains_def, auto)
apply (blast intro: monoD order_trans)
done
(*Holds by the theorem (Suc m \<subseteq> n) = (m < n) *)
lemma strict_increasingD:
"!!z::nat. F \<in> increasing f ==> F \<in> stable {s. z < f s}"
by (simp add: increasing_def Suc_le_eq [symmetric])
(** The Elimination Theorem. The "free" m has become universally quantified!
Should the premise be !!m instead of \<forall>m ? Would make it harder to use
in forward proof. **)
lemma elimination:
"[| \<forall>m \<in> M. F \<in> {s. s x = m} co (B m) |]
==> F \<in> {s. s x \<in> M} co (\<Union>m \<in> M. B m)"
by (unfold constrains_def, blast)
text\<open>As above, but for the trivial case of a one-variable state, in which the
state is identified with its one variable.\<close>
lemma elimination_sing:
"(\<forall>m \<in> M. F \<in> {m} co (B m)) ==> F \<in> M co (\<Union>m \<in> M. B m)"
by (unfold constrains_def, blast)
subsubsection\<open>Theoretical Results from Section 6\<close>
lemma constrains_strongest_rhs:
"F \<in> A co (strongest_rhs F A )"
by (unfold constrains_def strongest_rhs_def, blast)
lemma strongest_rhs_is_strongest:
"F \<in> A co B ==> strongest_rhs F A \<subseteq> B"
by (unfold constrains_def strongest_rhs_def, blast)
subsubsection\<open>Ad-hoc set-theory rules\<close>
lemma Un_Diff_Diff [simp]: "A \<union> B - (A - B) = B"
by blast
lemma Int_Union_Union: "\<Union>B \<inter> A = \<Union>((%C. C \<inter> A)`B)"
by blast
text\<open>Needed for WF reasoning in WFair.thy\<close>
lemma Image_less_than [simp]: "less_than `` {k} = greaterThan k"
by blast
lemma Image_inverse_less_than [simp]: "less_than\<inverse> `` {k} = lessThan k"
by blast
subsection\<open>Partial versus Total Transitions\<close>
definition totalize_act :: "('a * 'a)set => ('a * 'a)set" where
"totalize_act act == act \<union> Id_on (-(Domain act))"
definition totalize :: "'a program => 'a program" where
"totalize F == mk_program (Init F,
totalize_act ` Acts F,
AllowedActs F)"
definition mk_total_program :: "('a set * ('a * 'a)set set * ('a * 'a)set set)
=> 'a program" where
"mk_total_program args == totalize (mk_program args)"
definition all_total :: "'a program => bool" where
"all_total F == \<forall>act \<in> Acts F. Domain act = UNIV"
lemma insert_Id_image_Acts: "f Id = Id ==> insert Id (f`Acts F) = f ` Acts F"
by (blast intro: sym [THEN image_eqI])
subsubsection\<open>Basic properties\<close>
lemma totalize_act_Id [simp]: "totalize_act Id = Id"
by (simp add: totalize_act_def)
lemma Domain_totalize_act [simp]: "Domain (totalize_act act) = UNIV"
by (auto simp add: totalize_act_def)
lemma Init_totalize [simp]: "Init (totalize F) = Init F"
by (unfold totalize_def, auto)
lemma Acts_totalize [simp]: "Acts (totalize F) = (totalize_act ` Acts F)"
by (simp add: totalize_def insert_Id_image_Acts)
lemma AllowedActs_totalize [simp]: "AllowedActs (totalize F) = AllowedActs F"
by (simp add: totalize_def)
lemma totalize_constrains_iff [simp]: "(totalize F \<in> A co B) = (F \<in> A co B)"
by (simp add: totalize_def totalize_act_def constrains_def, blast)
lemma totalize_stable_iff [simp]: "(totalize F \<in> stable A) = (F \<in> stable A)"
by (simp add: stable_def)
lemma totalize_invariant_iff [simp]:
"(totalize F \<in> invariant A) = (F \<in> invariant A)"
by (simp add: invariant_def)
lemma all_total_totalize: "all_total (totalize F)"
by (simp add: totalize_def all_total_def)
lemma Domain_iff_totalize_act: "(Domain act = UNIV) = (totalize_act act = act)"
by (force simp add: totalize_act_def)
lemma all_total_imp_totalize: "all_total F ==> (totalize F = F)"
apply (simp add: all_total_def totalize_def)
apply (rule program_equalityI)
apply (simp_all add: Domain_iff_totalize_act image_def)
done
lemma all_total_iff_totalize: "all_total F = (totalize F = F)"
apply (rule iffI)
apply (erule all_total_imp_totalize)
apply (erule subst)
apply (rule all_total_totalize)
done
lemma mk_total_program_constrains_iff [simp]:
"(mk_total_program args \<in> A co B) = (mk_program args \<in> A co B)"
by (simp add: mk_total_program_def)
subsection\<open>Rules for Lazy Definition Expansion\<close>
text\<open>They avoid expanding the full program, which is a large expression\<close>
lemma def_prg_Init:
"F = mk_total_program (init,acts,allowed) ==> Init F = init"
by (simp add: mk_total_program_def)
lemma def_prg_Acts:
"F = mk_total_program (init,acts,allowed)
==> Acts F = insert Id (totalize_act ` acts)"
by (simp add: mk_total_program_def)
lemma def_prg_AllowedActs:
"F = mk_total_program (init,acts,allowed)
==> AllowedActs F = insert Id allowed"
by (simp add: mk_total_program_def)
text\<open>An action is expanded if a pair of states is being tested against it\<close>
lemma def_act_simp:
"act = {(s,s'). P s s'} ==> ((s,s') \<in> act) = P s s'"
by (simp add: mk_total_program_def)
text\<open>A set is expanded only if an element is being tested against it\<close>
lemma def_set_simp: "A = B ==> (x \<in> A) = (x \<in> B)"
by (simp add: mk_total_program_def)
subsubsection\<open>Inspectors for type "program"\<close>
lemma Init_total_eq [simp]:
"Init (mk_total_program (init,acts,allowed)) = init"
by (simp add: mk_total_program_def)
lemma Acts_total_eq [simp]:
"Acts(mk_total_program(init,acts,allowed)) = insert Id (totalize_act`acts)"
by (simp add: mk_total_program_def)
lemma AllowedActs_total_eq [simp]:
"AllowedActs (mk_total_program (init,acts,allowed)) = insert Id allowed"
by (auto simp add: mk_total_program_def)
end
|
using IRDumps
using Test
@testset "IRDumps.jl" begin
# Write your tests here.
end
|
(**
CoLoR, a Coq library on rewriting and termination.
See the COPYRIGHTS and LICENSE files.
- Stephane Le Roux, 2007-02-20
excluded middle and decidability for relations.
*)
Require Import Relations LogicUtil.
Set Implicit Arguments.
Section S.
Variables (A : Type) (R : relation A).
Definition rel_midex := forall x y : A, R x y \/ ~R x y.
Definition rel_dec := forall x y, {R x y} + {~R x y}.
Lemma rel_dec_midex : rel_dec -> rel_midex.
Proof. do 3 intro. destruct (X x y); tauto. Qed.
Definition fun_rel_dec (f : A->A->bool) :=
forall x y, if f x y then R x y else ~R x y.
Lemma bool_rel_dec : {f : A->A->bool | fun_rel_dec f} -> rel_dec.
Proof. intros (f,H) x y. gen (H x y). case (f x y); intros; tauto. Qed.
Lemma rel_dec_bool : rel_dec -> {f : A->A->bool | fun_rel_dec f}.
Proof.
intro H. exists (fun x y : A => if H x y then true else false).
intros x y. destruct (H x y); trivial.
Qed.
Lemma fun_rel_dec_true : forall f x y, fun_rel_dec f -> f x y = true -> R x y.
Proof. intros. set (w := H x y). rewrite H0 in w. hyp. Qed.
Lemma fun_rel_dec_false : forall f x y,
fun_rel_dec f -> f x y = false -> ~R x y.
Proof. intros. set (w := H x y). rewrite H0 in w. hyp. Qed.
(***********************************************************************)
(** Leibniz equality relation *)
Definition eq_midex := forall x y : A, x=y \/ x<>y.
Definition eq_dec := forall x y : A, {x=y}+{x<>y}.
Lemma eq_dec_midex : eq_dec -> eq_midex.
Proof. do 3 intro. destruct (X x y); tauto. Qed.
End S.
|
The product of two scalars is equal to the scalar multiple of the product of the scalars. |
[STATEMENT]
lemma is_top_sorted_alt: "is_top_sorted R l \<longleftrightarrow> (\<forall>x y. (x,y)\<in>list_before_rel l \<longrightarrow> (y,x)\<notin>R\<^sup>*)"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. is_top_sorted R l = (\<forall>x y. (x, y) \<in> list_before_rel l \<longrightarrow> (y, x) \<notin> R\<^sup>*)
[PROOF STEP]
unfolding is_top_sorted_def
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. (list_before_rel l \<inter> (R\<^sup>*)\<inverse> = {}) = (\<forall>x y. (x, y) \<in> list_before_rel l \<longrightarrow> (y, x) \<notin> R\<^sup>*)
[PROOF STEP]
by auto |
module TotallyTotal
%default total
zero : Nat
zero = Z
|
SUBROUTINE SF_OPNF ( filnam, wrtflg, isffln, iflsrc, nparm,
+ parms, iret )
C************************************************************************
C* SF_OPNF *
C* *
C* This subroutine opens an existing surface data file. *
C* *
C* SF_OPNF ( FILNAM, WRTFLG, ISFFLN, IFLSRC, NPARM, PARMS, IRET ) *
C* *
C* Input parameters: *
C* FILNAM CHAR* Surface file name *
C* WRTFLG LOGICAL Write access flag *
C* *
C* Output parameters: *
C* ISFFLN INTEGER File number *
C* IFLSRC INTEGER Data source *
C* NPARM INTEGER Number of parameters *
C* PARMS (NPARM) CHAR*4 Parameter names *
C* IRET INTEGER Return code *
C* 0 = normal return *
C* -2 = file could not be opened *
C* -6 = file not surface file *
C* -22 = file name is blank *
C** *
C* Log: *
C* I. Graffman/RDS 5/87 *
C* M. desJardins/GSFC 6/88 Documentation *
C* M. desJardins/GSFC 4/90 Error for blank file name *
C* S. Schotz/GSC 8/90 Write error message for blank file name *
C************************************************************************
INCLUDE 'GEMPRM.PRM'
INCLUDE 'GMBDTA.CMN'
INCLUDE 'sfcmn.cmn'
C*
CHARACTER*(*) filnam, parms (*)
LOGICAL wrtflg
C*
LOGICAL shrflg
C-------------------------------------------------------------------------
C* Check for blank name.
C
CALL ST_LSTR ( filnam, lenf, ier )
IF ( lenf .eq. 0 ) THEN
iret = -22
CALL ER_WMSG ( 'SF', iret, filnam, ier )
RETURN
END IF
C
C* Open the file.
C
shrflg = .false.
CALL SF_OFIL ( filnam, wrtflg, shrflg, isffln, iflsrc, nparm,
+ parms, iret )
C*
RETURN
END
|
------------------------------------------------------------------------
-- The Agda standard library
--
-- Finite sets, based on AVL trees
------------------------------------------------------------------------
open import Relation.Binary
open import Relation.Binary.PropositionalEquality using (_≡_)
module Data.AVL.Sets
{k ℓ} {Key : Set k} {_<_ : Rel Key ℓ}
(isStrictTotalOrder : IsStrictTotalOrder _≡_ _<_)
where
import Data.AVL as AVL
open import Data.Bool
open import Data.List as List using (List)
open import Data.Maybe as Maybe
open import Data.Product as Prod using (_×_; _,_; proj₁)
open import Data.Unit
open import Function
open import Level
-- The set type. (Note that Set is a reserved word.)
private
open module S = AVL (const ⊤) isStrictTotalOrder
public using () renaming (Tree to ⟨Set⟩)
-- Repackaged functions.
empty : ⟨Set⟩
empty = S.empty
singleton : Key → ⟨Set⟩
singleton k = S.singleton k _
insert : Key → ⟨Set⟩ → ⟨Set⟩
insert k = S.insert k _
delete : Key → ⟨Set⟩ → ⟨Set⟩
delete = S.delete
_∈?_ : Key → ⟨Set⟩ → Bool
_∈?_ = S._∈?_
headTail : ⟨Set⟩ → Maybe (Key × ⟨Set⟩)
headTail s = Maybe.map (Prod.map proj₁ id) (S.headTail s)
initLast : ⟨Set⟩ → Maybe (⟨Set⟩ × Key)
initLast s = Maybe.map (Prod.map id proj₁) (S.initLast s)
fromList : List Key → ⟨Set⟩
fromList = S.fromList ∘ List.map (λ k → (k , _))
toList : ⟨Set⟩ → List Key
toList = List.map proj₁ ∘ S.toList
|
[STATEMENT]
lemma countableI': "inj_on (f::'a \<Rightarrow> 'b::countable) S \<Longrightarrow> countable S"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. inj_on f S \<Longrightarrow> countable S
[PROOF STEP]
using comp_inj_on[of f S to_nat]
[PROOF STATE]
proof (prove)
using this:
\<lbrakk>inj_on f S; inj_on to_nat (f ` S)\<rbrakk> \<Longrightarrow> inj_on (to_nat \<circ> f) S
goal (1 subgoal):
1. inj_on f S \<Longrightarrow> countable S
[PROOF STEP]
by (auto intro: countableI) |
Formal statement is: lemma convex_connected: fixes S :: "'a::real_normed_vector set" assumes "convex S" shows "connected S" Informal statement is: If $S$ is a convex set, then $S$ is connected. |
module Class.Show where
open import Data.String using (String)
record Show {a} (A : Set a) : Set a where
field
show : A -> String
open Show {{...}} public
|
`is_element/hasse_diagrams` := (A::set) -> proc(G)
global reason;
local D,R;
if not(`is_element/autorel`(A)(G)) then
reason := ["is_element/hasse_diagrams","G is not a relation on A",G,reason];
return false;
fi;
if not(`is_antisymmetric/autorel`(A)(G)) then
reason := ["is_element/hasse_diagrams","G is not antisymmetric",G];
return false;
fi;
D := `id/autorel`(A);
R := `o/autorel`(A)(G,G);
while R <> {} do
if R intersect G <> {} then
reason := ["is_element/hasse_diagrams","G has an arrow parallel to a longer path",G,R intersect G];
return false;
fi;
if R intersect D <> {} then
reason := ["is_element/hasse_diagrams","G has a loop",G,R intersect D];
return false;
fi;
R := `o/autorel`(A)(R,G);
od;
return true;
end:
`is_element/ranked_hasse_diagrams` := (A::set) -> proc(Gr)
local G,r,AA,rA,d,A1,r1,a,b,AA1,G1,M1,M2,Aa,A0,Ma,rMa,i;
global reason;
if not(type(Gr,list) and nops(Gr) = 2) then
reason := [convert(procname,string),"Gr is not a list of length two",Gr];
return false;
fi;
G,r := op(Gr);
AA := {seq(seq([a,b],b in A),a in A)} minus {seq([a,a],a in A)};
if not(type(G,set) and G minus AA = {}) then
reason := [convert(procname,string),"G is not a subset of A^2 minus Delta",G,A];
return false;
fi;
if not(type(r,table) and [indices(r)] = map(a -> [a],[op(A)])) then
reason := [convert(procname,string),"r is not a table indexed by A",r,A];
return false;
fi;
if A = {} then return true; fi;
rA := map(a -> r[a],A);
if not(type(rA,list(integer)) and min(op(rA)) = 0) then
reason := [convert(procname,string),"The values of r are not integers starting from 0",r,A];
return false;
fi;
d := max(rA);
if rA <> {seq(i,i=0..d)} then
reason := [convert(procname,string),"The values of r do not form an interval starting from 0",r,A];
return false;
fi;
if d = 0 then
if G = {} then
return true;
else
reason := [convert(procname,string),"All ranks are zero but G is not empty",G];
return false;
fi;
fi;
A1 := select(a -> (r[a] < d),A);
r1 := table();
for a in A1 do r1[a] := r[a]; od;
AA1 := {seq(seq([a,b],b in A1),a in A1)} minus {seq([a,a],a in A1)};
G1 := G intersect AA1;
if not(`is_element/ranked_hasse_diagrams`(A1)([G1,r1])) then
return false;
fi;
M1 := NULL;
for a in A1 do
Aa := select(b -> member([a,b],G1),A1);
if Aa = {} then M1 := M1,a; fi;
od:
M1 := {M1};
M2 := select(a -> (r[a] = d-1),M1);
A0 := A minus A1;
for a in A0 do
Ma := select(b -> member([b,a],G),A);
rMa := map(b -> r[b],Ma);
if not(member(d-1,rMa)) then
reason := [convert(procname,string),"a has rank d but does not cover anything of rank d-1",a,d,Ma];
return false;
fi;
od:
end:
|
# test on generic type
# We used to be able to handle small generic cases w/ GenericSVD alone (not GenericSchur)
# but the switch to GenericLinearAlgebra provides half-baked schur methods which trip us up.
using Pseudospectra, Test, GenericLinearAlgebra, GenericSchur
@testset "Generic(Big)" begin
A = Matrix{BigFloat}(Pseudospectra.grcar(8))
# ax is needed w/o eigvals(::Matrix{BigFloat})
opts = Dict{Symbol,Any}(:ax => [-1,3,-3,3], :npts => 20)
ps_data = new_matrix(A,opts)
driver!(ps_data,opts,gs)
@test iscomputed(ps_data)
# Just big enough to get to the inverse-Lanczos branch
# this is a stunt, so don't waste time with usual npts.
A = Matrix{BigFloat}(Pseudospectra.grcar(56))
opts = Dict{Symbol,Any}(:ax => [-1,3,-3,3], :npts => 10)
ps_data = new_matrix(A,opts)
driver!(ps_data,opts,gs)
@test iscomputed(ps_data)
end
|
function initialize!(integrator,cache::KuttaPRK2p5ConstantCache)
integrator.fsalfirst = integrator.f(integrator.uprev,integrator.p,integrator.t) # Pre-start fsal
integrator.kshortsize = 2
integrator.k = typeof(integrator.k)(undef, integrator.kshortsize)
# Avoid undefined entries if k is an array of arrays
integrator.fsallast = zero(integrator.fsalfirst)
integrator.k[1] = integrator.fsalfirst
integrator.k[2] = integrator.fsallast
end
@muladd function perform_step!(integrator,cache::KuttaPRK2p5ConstantCache,repeat_step=false)
@unpack t,dt,uprev,u,f,p = integrator
@unpack α21,α31,α32,α41,α42,α43,α5_6 = cache
@unpack β1,β3,β5,β6,c2,c3,c4,c5_6 = cache
k1 = f(uprev, p, t)
k2 = f(uprev + dt*α21*k1, p, t + c2*dt)
k3 = f(uprev + dt*(α31*k1 + α32*k2), p, t + c3*dt)
k4 = f(uprev + dt*(α41*k1 + α42*k2 + α43*k3), p, t + c4*dt)
k5_6 = Array{typeof(k1)}(undef, 2)
if integrator.alg.threading == false
k5_6[1] = f(uprev + dt*(α5_6[1,1]*k1 + α5_6[1,2]*k2 + α5_6[1,3]*k3 + α5_6[1,4]*k4), p, t + c5_6[1]*dt)
k5_6[2] = f(uprev + dt*(α5_6[2,1]*k1 + α5_6[2,2]*k2 + α5_6[2,3]*k3 + α5_6[2,4]*k4), p, t + c5_6[2]*dt)
else
let
Threads.@threads for i in [1,2]
k5_6[i] = f(uprev + dt*(α5_6[i,1]*k1 + α5_6[i,2]*k2 + α5_6[i,3]*k3 + α5_6[i,4]*k4), p, t + c5_6[i]*dt)
end
end
end
u = uprev + dt*(β1*k1 + β3*k3 + β5*k5_6[1] + β6*k5_6[2])
k = f(u, p, t+dt)
integrator.fsallast = k # For interpolation, then FSAL'd
integrator.k[1] = integrator.fsalfirst
integrator.k[1] = integrator.fsallast
integrator.u = u
end
function initialize!(integrator,cache::KuttaPRK2p5Cache)
@unpack k,fsalfirst = cache
integrator.fsalfirst = fsalfirst
integrator.fsallast = k
integrator.kshortsize = 2
resize!(integrator.k, integrator.kshortsize)
integrator.f(integrator.fsalfirst,integrator.uprev,integrator.p,integrator.t) # FSAL for interpolation
integrator.k[1] = integrator.fsalfirst
integrator.k[2] = integrator.fsallast
end
@muladd function perform_step!(integrator,cache::KuttaPRK2p5Cache,repeat_step=false)
@unpack t,dt,uprev,u,f,p = integrator
@unpack k,k1,k2,k3,k4,k5_6,fsalfirst,tmp = cache
@unpack α21,α31,α32,α41,α42,α43,α5_6 = cache.tab
@unpack β1,β3,β5,β6,c2,c3,c4,c5_6 = cache.tab
f( k1, uprev, p, t)
@.. u = uprev + dt*α21*k1
f( k2, u, p, t + c2*dt)
@.. u = uprev + dt*(α31*k1 + α32*k2)
f( k3, u, p, t + c3*dt)
@.. u = uprev + dt*(α41*k1 + α42*k2 + α43*k3)
f( k4, u, p, t + c4*dt)
if integrator.alg.threading == false
@.. u = uprev + dt*(α5_6[1,1]*k1 + α5_6[1,2]*k2 + α5_6[1,3]*k3 + α5_6[1,4]*k4)
f( k5_6[1], u, p, t + c5_6[1]*dt)
@.. u = uprev + dt*(α5_6[2,1]*k1 + α5_6[2,2]*k2 + α5_6[2,3]*k3 + α5_6[2,4]*k4)
f( k5_6[2], u, p, t + c5_6[2]*dt)
else
tmps = (u, tmp)
let
Threads.@threads for i in [1,2]
@.. tmps[i] = uprev + dt*(α5_6[i,1]*k1 + α5_6[i,2]*k2 + α5_6[i,3]*k3 + α5_6[i,4]*k4)
f( k5_6[i], tmps[i], p, t + c5_6[i]*dt)
end
end
end
@.. u = uprev + dt*(β1*k1 + β3*k3 + β5*k5_6[1] + β6*k5_6[2])
f( k, u, p, t+dt)
end
|
```python
from IPython.display import Image
from IPython.core.display import HTML
from sympy import *; x,h,y = symbols("x h y")
Image(url= "https://i.imgur.com/pnSQKJk.png")
```
```python
dF = diff(1/ (4*sin(x) + 6*cos(x)))
dF.subs(x,0) #gives the correct slope of the tangent line
```
$\displaystyle - \frac{1}{9}$
```python
x0 = 0; y0 = 1/6
MofT = dF.subs(x,0)
y = y0 + MofT*(x - x0)
print(y)
```
0.166666666666667 - x/9
```python
# b = 0.166666666666667
```
```python
Image(url= "https://i.imgur.com/A9gEwzV.png")
```
```python
```
|
__precompile__()
module Types
export IntP, FloatP
export DMDStruct
const IntP = Int64
const FloatP = Float64
mutable struct DMDStruct
facecon::Array{IntP,3}; # faces-to-DG nodes connectivities (used to obtain DG nodes on faces)
eblks::Array{IntP,2}; # blocks of elements for parallel computation
fblks::Array{IntP,2}; # blocks of faces for parallel computation
nbsd::Array{IntP,2}; # neighboring subdomains (neighbors)
elemsend::Array{IntP,2}; # elements to be sent to neighbors
elemrecv::Array{IntP,2}; # elements to be received from neighbors
elemsendpts::Array{IntP,2}; # markers for elements to be sent to neighbors
elemrecvpts::Array{IntP,2}; # markers for elements to be received from neighbors
elempart::Array{IntP,2}; # element partitions
elempartpts::Array{IntP,2}; # markers for element partitions
facepart::Array{IntP,2}; # element partitions
facepartpts::Array{IntP,2}; # markers for element partitions
facepartbnd::Array{IntP,2};
t2f::Array{IntP,2};
f::Array{IntP,2};
#elem2cpu::Array{IntP,2}; # element partitions
DMDStruct() = new();
end
end
|
[STATEMENT]
lemma struct_spec_f4: "struct_spec f4_sel add_pairs_canon add_basis_canon f4_red"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. struct_spec f4_sel add_pairs_canon add_basis_canon f4_red
[PROOF STEP]
using sel_spec_f4_sel ap_spec_add_pairs_canon ab_spec_add_basis_sorted compl_struct_f4_red
[PROOF STATE]
proof (prove)
using this:
sel_spec f4_sel
ap_spec add_pairs_canon
ab_spec (add_basis_sorted ?rel)
compl_struct f4_red
goal (1 subgoal):
1. struct_spec f4_sel add_pairs_canon add_basis_canon f4_red
[PROOF STEP]
by (rule struct_specI) |
Formal statement is: lemma non_extensible_Borsuk_map: fixes a :: "'a :: euclidean_space" assumes "compact s" and cin: "c \<in> components(- s)" and boc: "bounded c" and "a \<in> c" shows "\<not> (\<exists>g. continuous_on (s \<union> c) g \<and> g ` (s \<union> c) \<subseteq> sphere 0 1 \<and> (\<forall>x \<in> s. g x = inverse(norm(x - a)) *\<^sub>R (x - a)))" Informal statement is: If $s$ is a compact set in $\mathbb{R}^n$ and $c$ is a bounded component of the complement of $s$, then there is no continuous map from $s \cup c$ to the unit sphere that maps $s$ to the antipodal map of $s$ and $c$ to the unit sphere. |
# getStudyField.r
# written by JuG
# May 08 2020
#' Returns values from selected API fields for a large set of study records
#' @author JuG
#' @description https://clinicaltrials.gov/api/gui/ref/api_urls
#' @param expr Search Expression (sep with +, use AND, OR, ...)
#' @param fields Study Fields (see https://clinicaltrials.gov/api/info/study_fields_list for a list of available fields)
#' @param max_rnk Maximum Rank (default is min(numberReturns, 20))
#' @details
#' @examples
#' getStudyField(expr = 'COVID+AND+hydroxychloroquine', fields = c("NCTId"))
#' getStudyField(expr = 'COVID+AND+hydroxychloroquine', fields = c("NCTId"),max_rnk=50)
#' getStudyField(expr = 'COVID+AND+hydroxychloroquine', fields = c("NCTId", "BriefTitle"))
#' getStudyField(expr = 'COVID+AND+hydroxychloroquine', fields = c("NCTId", "Acronym", "MinimumAge", "MaximumAge"))
#' getStudyField(expr = 'COVID+AND+hydroxychloroquine', fields = c("NCTId", "Acronym", "MinimumAge", "MaximumAge"),max_rnk = 10)
#' getStudyField(expr = 'COVID+AND+hydroxychloroquine', fields = c("NCTId", "Acronym", "MinimumAge", "MaximumAge"),max_rnk = "MAX")
#'@return data.frame
#' @export
getStudyField<- function(expr, fields, max_rnk=NULL){
urlBase <- "https://clinicaltrials.gov/api/query/study_fields?"
urlExpr <- paste("expr=", expr, sep='')
fields_txt <- paste(fields, collapse="%2C")
urlFields <- paste("&fields=", fields_txt, sep='')
#urlRank = paste("&min_rnk=1&max_rnk=", max_rnk, sep='')
if(!is.null(max_rnk) & !is.numeric(max_rnk)){
ee <- XML::getNodeSet(parsed_result, "//NStudiesFound")
max_rnk =as.numeric(sapply(ee, XML::xmlValue, '//NStudiesFound'))
urlRank = paste("&min_rnk=1&max_rnk=", max_rnk, sep='')
}else{
urlRank = paste("&min_rnk=1&max_rnk=", max_rnk, sep='')
}
urlFinal <- paste(urlBase,urlExpr,urlFields,urlRank,"&fmt=xml",sep='')
search_result <- httr::GET(urlFinal)
parsed_result <- XML::xmlParse(httr::content(search_result, as = "text"))
xmldf <- XML::xmlToDataFrame(nodes = XML::getNodeSet(parsed_result, "//StudyFields"))
colnames(xmldf) <- fields
return(xmldf)
}
|
[STATEMENT]
lemma pref_in_lists: "u \<le>p v \<Longrightarrow> v \<in> lists A \<Longrightarrow> u \<in> lists A"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<lbrakk>u \<le>p v; v \<in> lists A\<rbrakk> \<Longrightarrow> u \<in> lists A
[PROOF STEP]
by (auto simp add: prefix_def) |
Formal statement is: lemma base_in_s: "base \<in> s" Informal statement is: The base is in the set $s$. |
There are a few things we should demand from a decent power supply. Not least, it shouldn't require oven-like temperatures to function. That's just a given.
"Fluoride batteries can have a higher energy density, which means that they may last longer – up to eight times longer than batteries in use today," says Caltech researcher Robert Grubbs, famous for winning a Nobel Prize in Chemistry in 2005.
The type of electrochemical technology supplying power to your smart devices makes use of positively charged lithium 'Li2+' cations as a kind of chemical 'piston' to draw an electrical charge through a circuit.
At full charge, a supply of cations occupy the battery's anode. Once the circuit is closed, ions surge into the cathode, producing a current that does the all-important work. To reset the cell, all that's required is a voltage to 'push' the lithium piston back again.
"For a battery that lasts longer, you need to move a greater number of charges," says Simon Jones, a researcher at NASA's Jet Propulsion Laboratory.
"Moving multiply-charged metal cations is difficult, but a similar result can be achieved by moving several singly-charged anions, which travel with comparative ease."
"But fluoride can be challenging to work with, in particular because it's so corrosive and reactive," says Grubbs.
This isn't to say nobody has successfully made a functional fluoride ion battery. But the ions are part of a solid structure, which as you can imagine doesn't let them slip around too easily. Not at room temperature at least.
On finding the solvent did a fairly suitable job of allowing fluoride's anions to shuffle between electrodes at room temperature, the team ran models to find ways to tweak its performance with additives.
"We are still in the early stages of development, but this is the first rechargeable fluoride battery that works at room temperature," says Jones.
So, we might need to wait a bit longer for weekly phone recharges, but this is an exciting step - an we can hardly wait for this exciting technology to come into the market. |
(*
* Copyright 2020, Data61, CSIRO (ABN 41 687 119 230)
*
* SPDX-License-Identifier: BSD-2-Clause
*)
theory Word_Lemmas_32_Internal
imports Word_Lib_Sumo Machine_Word_32
begin
lemmas sint_eq_uint_32 = sint_eq_uint_2pl[where 'a=32, simplified]
lemmas sle_positive_32 = sle_le_2pl[where 'a=32, simplified]
lemmas sless_positive_32 = sless_less_2pl[where 'a=32, simplified]
lemma zero_le_sint_32:
"\<lbrakk> 0 \<le> (a :: word32); a < 0x80000000 \<rbrakk>
\<Longrightarrow> 0 \<le> sint a"
by (clarsimp simp: sint_eq_uint_32 unat_less_helper)
lemmas unat_add_simple = iffD1[OF unat_add_lem[where 'a = 32, folded word_bits_def]]
lemma upto_enum_inc_1:
"a < 2 ^ word_bits - 1
\<Longrightarrow> [(0:: 'a :: len word) .e. 1 + a] = [0.e.a] @ [(1+a)]"
using upper_trivial upto_enum_inc_1_len by force
lemmas upt_enum_offset_trivial =
upt_enum_offset_trivial[where 'a=32, folded word_bits_def]
lemmas unat32_eq_of_nat = unat_eq_of_nat[where 'a=32, folded word_bits_def]
declare mask_32_max_word[simp]
lemma le_32_mask_eq:
"(bits :: word32) \<le> 32 \<Longrightarrow> bits && mask 6 = bits"
by (fastforce elim: le_less_trans intro: less_mask_eq)
lemmas scast_1_32[simp] = scast_1[where 'a=32]
lemmas mask_32_id[simp] = mask_len_id[where 'a=32, folded word_bits_def]
lemmas t2p_shiftr_32 = t2p_shiftr[where 'a=32, folded word_bits_def]
lemma mask_eq1_nochoice:
"(x :: word32) && 1 = x
\<Longrightarrow> x = 0 \<or> x = 1"
using mask_eq1_nochoice len32 by force
lemmas const_le_unat_word_32 = const_le_unat[where 'a=32, folded word_bits_def]
lemmas createNewCaps_guard_helper =
createNewCaps_guard[where 'a=32, folded word_bits_def]
lemma word_log2_max_word32[simp]:
"word_log2 (w :: 32 word) < 32"
using word_log2_max[where w=w]
by (simp add: word_size)
(* FIXME: specialize using pow_sub_less_word *)
lemma mapping_two_power_16_64_inequality:
assumes sz: "sz \<le> 4" and len: "unat (len :: word32) = 2 ^ sz"
shows "unat (len * 8 - 1) \<le> 127"
using pow_sub_less[where 'a=32 and b=3, simplified]
proof -
have len2: "len = 2 ^ sz"
apply (rule word_unat.Rep_eqD, simp only: len)
using sz
apply simp
done
show ?thesis using two_power_increasing_less_1[where 'a=32 and n="sz + 3" and m=7]
by (simp add: word_le_nat_alt sz power_add len2 field_simps bintrunc_Suc_numeral)
qed
lemmas pre_helper2_32 = pre_helper2[where 'a=32, folded word_bits_def]
lemmas of_nat_shift_distinct_helper_machine =
of_nat_shift_distinct_helper[where 'a=32, folded word_bits_def]
lemmas ptr_add_distinct_helper_32 =
ptr_add_distinct_helper[where 'a=32, folded word_bits_def]
lemmas mask_out_eq_0_32 = mask_out_eq_0[where 'a=32, folded word_bits_def]
lemmas neg_mask_mask_unat_32 = neg_mask_mask_unat[where 'a=32, folded word_bits_def]
lemmas unat_less_iff_32 = unat_less_iff[where 'a=32, folded word_bits_def]
lemmas is_aligned_no_overflow3_32 = is_aligned_no_overflow3[where 'a=32, folded word_bits_def]
lemmas unat_ucast_16_32 = unat_signed_ucast_less_ucast[where 'a=16 and 'b=32, simplified]
(* FIXME: generalize? *)
lemma scast_mask_8:
"scast (mask 8 :: sword32) = (mask 8 :: word32)"
by (clarsimp simp: mask_eq)
lemmas ucast_le_8_32_equiv = ucast_le_up_down_iff[where 'a=8 and 'b=32, simplified]
lemma signed_unat_minus_one_32:
"unat (-1 :: 32 signed word) = 4294967295"
by (simp del: word_pow_0 diff_0 add: unat_sub_if' minus_one_word)
lemmas two_bits_cases_32 = two_bits_cases[where 'a=32, simplified]
lemmas word_ctz_not_minus_1_32 = word_ctz_not_minus_1[where 'a=32, simplified]
lemmas sint_ctz_32 = sint_ctz[where 'a=32, simplified]
(* FIXME: inline these? *)
lemmas scast_specific_plus32 =
scast_of_nat_signed_to_unsigned_add[where 'a=32 and x="word_ctz x" and y="0x20" for x,
simplified]
lemmas scast_specific_plus32_signed =
scast_of_nat_unsigned_to_signed_add[where 'a=32 and x="word_ctz x" and y="0x20" for x,
simplified]
lemma neq_0_unat: "x \<noteq> 0 \<Longrightarrow> 0 < unat x" for x::machine_word
by (simp add: unat_gt_0)
end |
theorem has_derivative_inverse_on: fixes f :: "'n::euclidean_space \<Rightarrow> 'n" assumes "open S" and derf: "\<And>x. x \<in> S \<Longrightarrow> (f has_derivative f'(x)) (at x)" and "\<And>x. x \<in> S \<Longrightarrow> g (f x) = x" and "f' x \<circ> g' x = id" and "x \<in> S" shows "(g has_derivative g'(x)) (at (f x))" |
/-
Copyright (c) 2018 Mario Carneiro. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Mario Carneiro
-/
import data.vector
import data.list.nodup
import data.list.of_fn
import control.applicative
/-!
# Additional theorems about the `vector` type
This file introduces the infix notation `::ᵥ` for `vector.cons`.
-/
universes u
variables {n : ℕ}
namespace vector
variables {α : Type*}
infixr `::ᵥ`:67 := vector.cons
attribute [simp] head_cons tail_cons
instance [inhabited α] : inhabited (vector α n) :=
⟨of_fn (λ _, default α)⟩
theorem to_list_injective : function.injective (@to_list α n) :=
subtype.val_injective
/-- Two `v w : vector α n` are equal iff they are equal at every single index. -/
@[ext] theorem ext : ∀ {v w : vector α n}
(h : ∀ m : fin n, vector.nth v m = vector.nth w m), v = w
| ⟨v, hv⟩ ⟨w, hw⟩ h := subtype.eq (list.ext_le (by rw [hv, hw])
(λ m hm hn, h ⟨m, hv ▸ hm⟩))
/-- The empty `vector` is a `subsingleton`. -/
instance zero_subsingleton : subsingleton (vector α 0) :=
⟨λ _ _, vector.ext (λ m, fin.elim0 m)⟩
@[simp] theorem cons_val (a : α) : ∀ (v : vector α n), (a ::ᵥ v).val = a :: v.val
| ⟨_, _⟩ := rfl
@[simp] theorem cons_head (a : α) : ∀ (v : vector α n), (a ::ᵥ v).head = a
| ⟨_, _⟩ := rfl
@[simp] theorem cons_tail (a : α) : ∀ (v : vector α n), (a ::ᵥ v).tail = v
| ⟨_, _⟩ := rfl
@[simp] theorem to_list_of_fn : ∀ {n} (f : fin n → α), to_list (of_fn f) = list.of_fn f
| 0 f := rfl
| (n+1) f := by rw [of_fn, list.of_fn_succ, to_list_cons, to_list_of_fn]
@[simp] theorem mk_to_list :
∀ (v : vector α n) h, (⟨to_list v, h⟩ : vector α n) = v
| ⟨l, h₁⟩ h₂ := rfl
@[simp] lemma to_list_map {β : Type*} (v : vector α n) (f : α → β) : (v.map f).to_list =
v.to_list.map f := by cases v; refl
theorem nth_eq_nth_le : ∀ (v : vector α n) (i),
nth v i = v.to_list.nth_le i.1 (by rw to_list_length; exact i.2)
| ⟨l, h⟩ i := rfl
@[simp] lemma nth_map {β : Type*} (v : vector α n) (f : α → β) (i : fin n) :
(v.map f).nth i = f (v.nth i) :=
by simp [nth_eq_nth_le]
@[simp] theorem nth_of_fn {n} (f : fin n → α) (i) : nth (of_fn f) i = f i :=
by rw [nth_eq_nth_le, ← list.nth_le_of_fn f];
congr; apply to_list_of_fn
@[simp] theorem of_fn_nth (v : vector α n) : of_fn (nth v) = v :=
begin
rcases v with ⟨l, rfl⟩,
apply to_list_injective,
change nth ⟨l, eq.refl _⟩ with λ i, nth ⟨l, rfl⟩ i,
simpa only [to_list_of_fn] using list.of_fn_nth_le _
end
@[simp] theorem nth_tail : ∀ (v : vector α n.succ) (i : fin n),
nth (tail v) i = nth v i.succ
| ⟨a::l, e⟩ ⟨i, h⟩ := by simp [nth_eq_nth_le]; refl
@[simp] theorem tail_val : ∀ (v : vector α n.succ), v.tail.val = v.val.tail
| ⟨a::l, e⟩ := rfl
/-- The `tail` of a `nil` vector is `nil`. -/
@[simp] lemma tail_nil : (@nil α).tail = nil := rfl
/-- The `tail` of a vector made up of one element is `nil`. -/
@[simp] lemma singleton_tail (v : vector α 1) : v.tail = vector.nil :=
by simp only [←cons_head_tail, eq_iff_true_of_subsingleton]
@[simp] theorem tail_of_fn {n : ℕ} (f : fin n.succ → α) :
tail (of_fn f) = of_fn (λ i, f i.succ) :=
(of_fn_nth _).symm.trans $ by congr; funext i; simp
/-- The list that makes up a `vector` made up of a single element,
retrieved via `to_list`, is equal to the list of that single element. -/
@[simp] lemma to_list_singleton (v : vector α 1) : v.to_list = [v.head] :=
begin
rw ←v.cons_head_tail,
simp only [to_list_cons, to_list_nil, cons_head, eq_self_iff_true,
and_self, singleton_tail]
end
/-- Mapping under `id` does not change a vector. -/
@[simp] lemma map_id {n : ℕ} (v : vector α n) : vector.map id v = v :=
vector.eq _ _ (by simp only [list.map_id, vector.to_list_map])
lemma mem_iff_nth {a : α} {v : vector α n} : a ∈ v.to_list ↔ ∃ i, v.nth i = a :=
by simp only [list.mem_iff_nth_le, fin.exists_iff, vector.nth_eq_nth_le];
exact ⟨λ ⟨i, hi, h⟩, ⟨i, by rwa to_list_length at hi, h⟩,
λ ⟨i, hi, h⟩, ⟨i, by rwa to_list_length, h⟩⟩
lemma nodup_iff_nth_inj {v : vector α n} : v.to_list.nodup ↔ function.injective v.nth :=
begin
cases v with l hl,
subst hl,
simp only [list.nodup_iff_nth_le_inj],
split,
{ intros h i j hij,
cases i, cases j, ext, apply h, simpa },
{ intros h i j hi hj hij,
have := @h ⟨i, hi⟩ ⟨j, hj⟩, simp [nth_eq_nth_le] at *, tauto }
end
@[simp] lemma nth_mem (i : fin n) (v : vector α n) : v.nth i ∈ v.to_list :=
by rw [nth_eq_nth_le]; exact list.nth_le_mem _ _ _
theorem head'_to_list : ∀ (v : vector α n.succ),
(to_list v).head' = some (head v)
| ⟨a::l, e⟩ := rfl
def reverse (v : vector α n) : vector α n :=
⟨v.to_list.reverse, by simp⟩
/-- The `list` of a vector after a `reverse`, retrieved by `to_list` is equal
to the `list.reverse` after retrieving a vector's `to_list`. -/
lemma to_list_reverse {v : vector α n} : v.reverse.to_list = v.to_list.reverse := rfl
@[simp] theorem nth_zero : ∀ (v : vector α n.succ), nth v 0 = head v
| ⟨a::l, e⟩ := rfl
@[simp] theorem head_of_fn
{n : ℕ} (f : fin n.succ → α) : head (of_fn f) = f 0 :=
by rw [← nth_zero, nth_of_fn]
@[simp] theorem nth_cons_zero
(a : α) (v : vector α n) : nth (a ::ᵥ v) 0 = a :=
by simp [nth_zero]
/-- Accessing the `nth` element of a vector made up
of one element `x : α` is `x` itself. -/
@[simp] lemma nth_cons_nil {ix : fin 1}
(x : α) : nth (x ::ᵥ nil) ix = x :=
by convert nth_cons_zero x nil
@[simp] theorem nth_cons_succ
(a : α) (v : vector α n) (i : fin n) : nth (a ::ᵥ v) i.succ = nth v i :=
by rw [← nth_tail, tail_cons]
/-- The last element of a `vector`, given that the vector is at least one element. -/
def last (v : vector α (n + 1)) : α := v.nth (fin.last n)
/-- The last element of a `vector`, given that the vector is at least one element. -/
lemma last_def {v : vector α (n + 1)} : v.last = v.nth (fin.last n) := rfl
/-- The `last` element of a vector is the `head` of the `reverse` vector. -/
lemma reverse_nth_zero {v : vector α (n + 1)} : v.reverse.head = v.last :=
begin
have : 0 = v.to_list.length - 1 - n,
{ simp only [nat.add_succ_sub_one, add_zero, to_list_length, nat.sub_self,
list.length_reverse] },
rw [←nth_zero, last_def, nth_eq_nth_le, nth_eq_nth_le],
simp_rw [to_list_reverse, fin.val_eq_coe, fin.coe_last, fin.coe_zero, this],
rw list.nth_le_reverse,
end
section scan
variables {β : Type*}
variables (f : β → α → β) (b : β)
variables (v : vector α n)
/--
Construct a `vector β (n + 1)` from a `vector α n` by scanning `f : β → α → β`
from the "left", that is, from 0 to `fin.last n`, using `b : β` as the starting value.
-/
def scanl : vector β (n + 1) :=
⟨list.scanl f b v.to_list, by rw [list.length_scanl, to_list_length]⟩
/-- Providing an empty vector to `scanl` gives the starting value `b : β`. -/
@[simp] lemma scanl_nil : scanl f b nil = b ::ᵥ nil := rfl
/--
The recursive step of `scanl` splits a vector `x ::ᵥ v : vector α (n + 1)`
into the provided starting value `b : β` and the recursed `scanl`
`f b x : β` as the starting value.
This lemma is the `cons` version of `scanl_nth`.
-/
@[simp] lemma scanl_cons (x : α) : scanl f b (x ::ᵥ v) = b ::ᵥ scanl f (f b x) v :=
by simpa only [scanl, to_list_cons]
/--
The underlying `list` of a `vector` after a `scanl` is the `list.scanl`
of the underlying `list` of the original `vector`.
-/
@[simp] lemma scanl_val : ∀ {v : vector α n}, (scanl f b v).val = list.scanl f b v.val
| ⟨l, hl⟩ := rfl
/--
The `to_list` of a `vector` after a `scanl` is the `list.scanl`
of the `to_list` of the original `vector`.
-/
@[simp] lemma to_list_scanl : (scanl f b v).to_list = list.scanl f b v.to_list := rfl
/--
The recursive step of `scanl` splits a vector made up of a single element
`x ::ᵥ nil : vector α 1` into a `vector` of the provided starting value `b : β`
and the mapped `f b x : β` as the last value.
-/
@[simp] lemma scanl_singleton (v : vector α 1) : scanl f b v = b ::ᵥ f b v.head ::ᵥ nil :=
begin
rw [←cons_head_tail v],
simp only [scanl_cons, scanl_nil, cons_head, singleton_tail]
end
/--
The first element of `scanl` of a vector `v : vector α n`,
retrieved via `head`, is the starting value `b : β`.
-/
@[simp] lemma scanl_head : (scanl f b v).head = b :=
begin
cases n,
{ have : v = nil := by simp only [eq_iff_true_of_subsingleton],
simp only [this, scanl_nil, cons_head] },
{ rw ←cons_head_tail v,
simp only [←nth_zero, nth_eq_nth_le, to_list_scanl,
to_list_cons, list.scanl, fin.val_zero', list.nth_le] }
end
/--
For an index `i : fin n`, the `nth` element of `scanl` of a
vector `v : vector α n` at `i.succ`, is equal to the application
function `f : β → α → β` of the `i.cast_succ` element of
`scanl f b v` and `nth v i`.
This lemma is the `nth` version of `scanl_cons`.
-/
@[simp] lemma scanl_nth (i : fin n) :
(scanl f b v).nth i.succ = f ((scanl f b v).nth i.cast_succ) (v.nth i) :=
begin
cases n,
{ exact fin_zero_elim i },
induction n with n hn generalizing b,
{ have i0 : i = 0 := by simp only [eq_iff_true_of_subsingleton],
simpa only [scanl_singleton, i0, nth_zero] },
{ rw [←cons_head_tail v, scanl_cons, nth_cons_succ],
refine fin.cases _ _ i,
{ simp only [nth_zero, scanl_head, fin.cast_succ_zero, cons_head] },
{ intro i',
simp only [hn, fin.cast_succ_fin_succ, nth_cons_succ] } }
end
end scan
def m_of_fn {m} [monad m] {α : Type u} : ∀ {n}, (fin n → m α) → m (vector α n)
| 0 f := pure nil
| (n+1) f := do a ← f 0, v ← m_of_fn (λi, f i.succ), pure (a ::ᵥ v)
theorem m_of_fn_pure {m} [monad m] [is_lawful_monad m] {α} :
∀ {n} (f : fin n → α), @m_of_fn m _ _ _ (λ i, pure (f i)) = pure (of_fn f)
| 0 f := rfl
| (n+1) f := by simp [m_of_fn, @m_of_fn_pure n, of_fn]
def mmap {m} [monad m] {α} {β : Type u} (f : α → m β) :
∀ {n}, vector α n → m (vector β n)
| 0 xs := pure nil
| (n+1) xs := do h' ← f xs.head, t' ← @mmap n xs.tail, pure (h' ::ᵥ t')
@[simp] theorem mmap_nil {m} [monad m] {α β} (f : α → m β) :
mmap f nil = pure nil := rfl
@[simp] theorem mmap_cons {m} [monad m] {α β} (f : α → m β) (a) :
∀ {n} (v : vector α n), mmap f (a ::ᵥ v) =
do h' ← f a, t' ← mmap f v, pure (h' ::ᵥ t')
| _ ⟨l, rfl⟩ := rfl
/-- Define `C v` by induction on `v : vector α (n + 1)`, a vector of
at least one element.
This function has two arguments: `h0` handles the base case on `C nil`,
and `hs` defines the inductive step using `∀ x : α, C v → C (x ::ᵥ v)`. -/
@[elab_as_eliminator] def induction_on
{α : Type*} {n : ℕ}
{C : Π {n : ℕ}, vector α n → Sort*}
(v : vector α (n + 1))
(h0 : C nil)
(hs : ∀ {n : ℕ} {x : α} {w : vector α n}, C w → C (x ::ᵥ w)) :
C v :=
begin
induction n with n hn,
{ rw ←v.cons_head_tail,
convert hs h0 },
{ rw ←v.cons_head_tail,
apply hs,
apply hn }
end
def to_array : vector α n → array n α
| ⟨xs, h⟩ := cast (by rw h) xs.to_array
section insert_nth
variable {a : α}
def insert_nth (a : α) (i : fin (n+1)) (v : vector α n) : vector α (n+1) :=
⟨v.1.insert_nth i a,
begin
rw [list.length_insert_nth, v.2],
rw [v.2, ← nat.succ_le_succ_iff],
exact i.2
end⟩
lemma insert_nth_val {i : fin (n+1)} {v : vector α n} :
(v.insert_nth a i).val = v.val.insert_nth i.1 a :=
rfl
@[simp] lemma remove_nth_val {i : fin n} :
∀{v : vector α n}, (remove_nth i v).val = v.val.remove_nth i
| ⟨l, hl⟩ := rfl
lemma remove_nth_insert_nth {v : vector α n} {i : fin (n+1)} :
remove_nth i (insert_nth a i v) = v :=
subtype.eq $ list.remove_nth_insert_nth i.1 v.1
lemma remove_nth_insert_nth' {v : vector α (n+1)} :
∀{i : fin (n+1)} {j : fin (n+2)},
remove_nth (j.succ_above i) (insert_nth a j v) = insert_nth a (i.pred_above j) (remove_nth i v)
| ⟨i, hi⟩ ⟨j, hj⟩ :=
begin
dsimp [insert_nth, remove_nth, fin.succ_above, fin.pred_above],
simp only [subtype.mk_eq_mk],
split_ifs,
{ convert (list.insert_nth_remove_nth_of_ge i (j-1) _ _ _).symm,
{ convert (nat.succ_pred_eq_of_pos _).symm, exact lt_of_le_of_lt (zero_le _) h, },
{ apply remove_nth_val, },
{ convert hi, exact v.2, },
{ exact nat.le_pred_of_lt h, }, },
{ convert (list.insert_nth_remove_nth_of_le i j _ _ _).symm,
{ apply remove_nth_val, },
{ convert hi, exact v.2, },
{ simpa using h, }, }
end
lemma insert_nth_comm (a b : α) (i j : fin (n+1)) (h : i ≤ j) :
∀(v : vector α n),
(v.insert_nth a i).insert_nth b j.succ = (v.insert_nth b j).insert_nth a i.cast_succ
| ⟨l, hl⟩ :=
begin
refine subtype.eq _,
simp only [insert_nth_val, fin.coe_succ, fin.cast_succ, fin.val_eq_coe, fin.coe_cast_add],
apply list.insert_nth_comm,
{ assumption },
{ rw hl, exact nat.le_of_succ_le_succ j.2 }
end
end insert_nth
section update_nth
/-- `update_nth v n a` replaces the `n`th element of `v` with `a` -/
def update_nth (v : vector α n) (i : fin n) (a : α) : vector α n :=
⟨v.1.update_nth i.1 a, by rw [list.update_nth_length, v.2]⟩
@[simp] lemma nth_update_nth_same (v : vector α n) (i : fin n) (a : α) :
(v.update_nth i a).nth i = a :=
by cases v; cases i; simp [vector.update_nth, vector.nth_eq_nth_le]
lemma nth_update_nth_of_ne {v : vector α n} {i j : fin n} (h : i ≠ j) (a : α) :
(v.update_nth i a).nth j = v.nth j :=
by cases v; cases i; cases j; simp [vector.update_nth, vector.nth_eq_nth_le,
list.nth_le_update_nth_of_ne (fin.vne_of_ne h)]
lemma nth_update_nth_eq_if {v : vector α n} {i j : fin n} (a : α) :
(v.update_nth i a).nth j = if i = j then a else v.nth j :=
by split_ifs; try {simp *}; try {rw nth_update_nth_of_ne}; assumption
end update_nth
end vector
namespace vector
section traverse
variables {F G : Type u → Type u}
variables [applicative F] [applicative G]
open applicative functor
open list (cons) nat
private def traverse_aux {α β : Type u} (f : α → F β) :
Π (x : list α), F (vector β x.length)
| [] := pure vector.nil
| (x::xs) := vector.cons <$> f x <*> traverse_aux xs
protected def traverse {α β : Type u} (f : α → F β) : vector α n → F (vector β n)
| ⟨v, Hv⟩ := cast (by rw Hv) $ traverse_aux f v
variables [is_lawful_applicative F] [is_lawful_applicative G]
variables {α β γ : Type u}
@[simp] protected lemma traverse_def
(f : α → F β) (x : α) : ∀ (xs : vector α n),
(x ::ᵥ xs).traverse f = cons <$> f x <*> xs.traverse f :=
by rintro ⟨xs, rfl⟩; refl
protected lemma id_traverse : ∀ (x : vector α n), x.traverse id.mk = x :=
begin
rintro ⟨x, rfl⟩, dsimp [vector.traverse, cast],
induction x with x xs IH, {refl},
simp! [IH], refl
end
open function
protected lemma comp_traverse (f : β → F γ) (g : α → G β) : ∀ (x : vector α n),
vector.traverse (comp.mk ∘ functor.map f ∘ g) x =
comp.mk (vector.traverse f <$> vector.traverse g x) :=
by rintro ⟨x, rfl⟩; dsimp [vector.traverse, cast];
induction x with x xs; simp! [cast, *] with functor_norm;
[refl, simp [(∘)]]
protected lemma traverse_eq_map_id {α β} (f : α → β) : ∀ (x : vector α n),
x.traverse (id.mk ∘ f) = id.mk (map f x) :=
by rintro ⟨x, rfl⟩; simp!;
induction x; simp! * with functor_norm; refl
variable (η : applicative_transformation F G)
protected lemma naturality {α β : Type*}
(f : α → F β) : ∀ (x : vector α n),
η (x.traverse f) = x.traverse (@η _ ∘ f) :=
by rintro ⟨x, rfl⟩; simp! [cast];
induction x with x xs IH; simp! * with functor_norm
end traverse
instance : traversable.{u} (flip vector n) :=
{ traverse := @vector.traverse n,
map := λ α β, @vector.map.{u u} α β n }
instance : is_lawful_traversable.{u} (flip vector n) :=
{ id_traverse := @vector.id_traverse n,
comp_traverse := @vector.comp_traverse n,
traverse_eq_map_id := @vector.traverse_eq_map_id n,
naturality := @vector.naturality n,
id_map := by intros; cases x; simp! [(<$>)],
comp_map := by intros; cases x; simp! [(<$>)] }
end vector
|
lemma compact_affinity: fixes s :: "'a::real_normed_vector set" assumes "compact s" shows "compact ((\<lambda>x. a + c *\<^sub>R x) ` s)" |
(* Author: Tobias Nipkow *)
subsection "Widening and Narrowing"
theory Abs_Int3
imports Abs_Int2_ivl
begin
class widen =
fixes widen :: "'a \<Rightarrow> 'a \<Rightarrow> 'a" (infix "\<nabla>" 65)
class narrow =
fixes narrow :: "'a \<Rightarrow> 'a \<Rightarrow> 'a" (infix "\<triangle>" 65)
class wn = widen + narrow + order +
assumes widen1: "x \<le> x \<nabla> y"
assumes widen2: "y \<le> x \<nabla> y"
assumes narrow1: "y \<le> x \<Longrightarrow> y \<le> x \<triangle> y"
assumes narrow2: "y \<le> x \<Longrightarrow> x \<triangle> y \<le> x"
begin
lemma narrowid[simp]: "x \<triangle> x = x"
by (rule order.antisym) (simp_all add: narrow1 narrow2)
end
lemma top_widen_top[simp]: "\<top> \<nabla> \<top> = (\<top>::_::{wn,order_top})"
by (metis eq_iff top_greatest widen2)
instantiation ivl :: wn
begin
definition "widen_rep p1 p2 =
(if is_empty_rep p1 then p2 else if is_empty_rep p2 then p1 else
let (l1,h1) = p1; (l2,h2) = p2
in (if l2 < l1 then Minf else l1, if h1 < h2 then Pinf else h1))"
lift_definition widen_ivl :: "ivl \<Rightarrow> ivl \<Rightarrow> ivl" is widen_rep
by(auto simp: widen_rep_def eq_ivl_iff)
definition "narrow_rep p1 p2 =
(if is_empty_rep p1 \<or> is_empty_rep p2 then empty_rep else
let (l1,h1) = p1; (l2,h2) = p2
in (if l1 = Minf then l2 else l1, if h1 = Pinf then h2 else h1))"
lift_definition narrow_ivl :: "ivl \<Rightarrow> ivl \<Rightarrow> ivl" is narrow_rep
by(auto simp: narrow_rep_def eq_ivl_iff)
instance
proof
qed (transfer, auto simp: widen_rep_def narrow_rep_def le_iff_subset \<gamma>_rep_def subset_eq is_empty_rep_def empty_rep_def eq_ivl_def split: if_splits extended.splits)+
end
instantiation st :: ("{order_top,wn}")wn
begin
lift_definition widen_st :: "'a st \<Rightarrow> 'a st \<Rightarrow> 'a st" is "map2_st_rep (\<nabla>)"
by(auto simp: eq_st_def)
lift_definition narrow_st :: "'a st \<Rightarrow> 'a st \<Rightarrow> 'a st" is "map2_st_rep (\<triangle>)"
by(auto simp: eq_st_def)
instance
proof (standard, goal_cases)
case 1 thus ?case by transfer (simp add: less_eq_st_rep_iff widen1)
next
case 2 thus ?case by transfer (simp add: less_eq_st_rep_iff widen2)
next
case 3 thus ?case by transfer (simp add: less_eq_st_rep_iff narrow1)
next
case 4 thus ?case by transfer (simp add: less_eq_st_rep_iff narrow2)
qed
end
instantiation option :: (wn)wn
begin
fun widen_option where
"None \<nabla> x = x" |
"x \<nabla> None = x" |
"(Some x) \<nabla> (Some y) = Some(x \<nabla> y)"
fun narrow_option where
"None \<triangle> x = None" |
"x \<triangle> None = None" |
"(Some x) \<triangle> (Some y) = Some(x \<triangle> y)"
instance
proof (standard, goal_cases)
case (1 x y) thus ?case
by(induct x y rule: widen_option.induct)(simp_all add: widen1)
next
case (2 x y) thus ?case
by(induct x y rule: widen_option.induct)(simp_all add: widen2)
next
case (3 x y) thus ?case
by(induct x y rule: narrow_option.induct) (simp_all add: narrow1)
next
case (4 y x) thus ?case
by(induct x y rule: narrow_option.induct) (simp_all add: narrow2)
qed
end
definition map2_acom :: "('a \<Rightarrow> 'a \<Rightarrow> 'a) \<Rightarrow> 'a acom \<Rightarrow> 'a acom \<Rightarrow> 'a acom"
where
"map2_acom f C1 C2 = annotate (\<lambda>p. f (anno C1 p) (anno C2 p)) (strip C1)"
instantiation acom :: (widen)widen
begin
definition "widen_acom = map2_acom (\<nabla>)"
instance ..
end
instantiation acom :: (narrow)narrow
begin
definition "narrow_acom = map2_acom (\<triangle>)"
instance ..
end
lemma strip_map2_acom[simp]:
"strip C1 = strip C2 \<Longrightarrow> strip(map2_acom f C1 C2) = strip C1"
by(simp add: map2_acom_def)
(*by(induct f C1 C2 rule: map2_acom.induct) simp_all*)
lemma strip_widen_acom[simp]:
"strip C1 = strip C2 \<Longrightarrow> strip(C1 \<nabla> C2) = strip C1"
by(simp add: widen_acom_def)
lemma strip_narrow_acom[simp]:
"strip C1 = strip C2 \<Longrightarrow> strip(C1 \<triangle> C2) = strip C1"
by(simp add: narrow_acom_def)
lemma narrow1_acom: "C2 \<le> C1 \<Longrightarrow> C2 \<le> C1 \<triangle> (C2::'a::wn acom)"
by(simp add: narrow_acom_def narrow1 map2_acom_def less_eq_acom_def size_annos)
lemma narrow2_acom: "C2 \<le> C1 \<Longrightarrow> C1 \<triangle> (C2::'a::wn acom) \<le> C1"
by(simp add: narrow_acom_def narrow2 map2_acom_def less_eq_acom_def size_annos)
subsubsection "Pre-fixpoint computation"
definition iter_widen :: "('a \<Rightarrow> 'a) \<Rightarrow> 'a \<Rightarrow> ('a::{order,widen})option"
where "iter_widen f = while_option (\<lambda>x. \<not> f x \<le> x) (\<lambda>x. x \<nabla> f x)"
definition iter_narrow :: "('a \<Rightarrow> 'a) \<Rightarrow> 'a \<Rightarrow> ('a::{order,narrow})option"
where "iter_narrow f = while_option (\<lambda>x. x \<triangle> f x < x) (\<lambda>x. x \<triangle> f x)"
definition pfp_wn :: "('a::{order,widen,narrow} \<Rightarrow> 'a) \<Rightarrow> 'a \<Rightarrow> 'a option"
where "pfp_wn f x =
(case iter_widen f x of None \<Rightarrow> None | Some p \<Rightarrow> iter_narrow f p)"
lemma iter_widen_pfp: "iter_widen f x = Some p \<Longrightarrow> f p \<le> p"
by(auto simp add: iter_widen_def dest: while_option_stop)
lemma iter_widen_inv:
assumes "!!x. P x \<Longrightarrow> P(f x)" "!!x1 x2. P x1 \<Longrightarrow> P x2 \<Longrightarrow> P(x1 \<nabla> x2)" and "P x"
and "iter_widen f x = Some y" shows "P y"
using while_option_rule[where P = "P", OF _ assms(4)[unfolded iter_widen_def]]
by (blast intro: assms(1-3))
lemma strip_while: fixes f :: "'a acom \<Rightarrow> 'a acom"
assumes "\<forall>C. strip (f C) = strip C" and "while_option P f C = Some C'"
shows "strip C' = strip C"
using while_option_rule[where P = "\<lambda>C'. strip C' = strip C", OF _ assms(2)]
by (metis assms(1))
lemma strip_iter_widen: fixes f :: "'a::{order,widen} acom \<Rightarrow> 'a acom"
assumes "\<forall>C. strip (f C) = strip C" and "iter_widen f C = Some C'"
shows "strip C' = strip C"
proof-
have "\<forall>C. strip(C \<nabla> f C) = strip C"
by (metis assms(1) strip_map2_acom widen_acom_def)
from strip_while[OF this] assms(2) show ?thesis by(simp add: iter_widen_def)
qed
lemma iter_narrow_pfp:
assumes mono: "!!x1 x2::_::wn acom. P x1 \<Longrightarrow> P x2 \<Longrightarrow> x1 \<le> x2 \<Longrightarrow> f x1 \<le> f x2"
and Pinv: "!!x. P x \<Longrightarrow> P(f x)" "!!x1 x2. P x1 \<Longrightarrow> P x2 \<Longrightarrow> P(x1 \<triangle> x2)"
and "P p0" and "f p0 \<le> p0" and "iter_narrow f p0 = Some p"
shows "P p \<and> f p \<le> p"
proof-
let ?Q = "%p. P p \<and> f p \<le> p \<and> p \<le> p0"
have "?Q (p \<triangle> f p)" if Q: "?Q p" for p
proof auto
note P = conjunct1[OF Q] and 12 = conjunct2[OF Q]
note 1 = conjunct1[OF 12] and 2 = conjunct2[OF 12]
let ?p' = "p \<triangle> f p"
show "P ?p'" by (blast intro: P Pinv)
have "f ?p' \<le> f p" by(rule mono[OF \<open>P (p \<triangle> f p)\<close> P narrow2_acom[OF 1]])
also have "\<dots> \<le> ?p'" by(rule narrow1_acom[OF 1])
finally show "f ?p' \<le> ?p'" .
have "?p' \<le> p" by (rule narrow2_acom[OF 1])
also have "p \<le> p0" by(rule 2)
finally show "?p' \<le> p0" .
qed
thus ?thesis
using while_option_rule[where P = ?Q, OF _ assms(6)[simplified iter_narrow_def]]
by (blast intro: assms(4,5) le_refl)
qed
lemma pfp_wn_pfp:
assumes mono: "!!x1 x2::_::wn acom. P x1 \<Longrightarrow> P x2 \<Longrightarrow> x1 \<le> x2 \<Longrightarrow> f x1 \<le> f x2"
and Pinv: "P x" "!!x. P x \<Longrightarrow> P(f x)"
"!!x1 x2. P x1 \<Longrightarrow> P x2 \<Longrightarrow> P(x1 \<nabla> x2)"
"!!x1 x2. P x1 \<Longrightarrow> P x2 \<Longrightarrow> P(x1 \<triangle> x2)"
and pfp_wn: "pfp_wn f x = Some p" shows "P p \<and> f p \<le> p"
proof-
from pfp_wn obtain p0
where its: "iter_widen f x = Some p0" "iter_narrow f p0 = Some p"
by(auto simp: pfp_wn_def split: option.splits)
have "P p0" by (blast intro: iter_widen_inv[where P="P"] its(1) Pinv(1-3))
thus ?thesis
by - (assumption |
rule iter_narrow_pfp[where P=P] mono Pinv(2,4) iter_widen_pfp its)+
qed
lemma strip_pfp_wn:
"\<lbrakk> \<forall>C. strip(f C) = strip C; pfp_wn f C = Some C' \<rbrakk> \<Longrightarrow> strip C' = strip C"
by(auto simp add: pfp_wn_def iter_narrow_def split: option.splits)
(metis (mono_tags) strip_iter_widen strip_narrow_acom strip_while)
locale Abs_Int_wn = Abs_Int_inv_mono where \<gamma>=\<gamma>
for \<gamma> :: "'av::{wn,bounded_lattice} \<Rightarrow> val set"
begin
definition AI_wn :: "com \<Rightarrow> 'av st option acom option" where
"AI_wn c = pfp_wn (step' \<top>) (bot c)"
lemma AI_wn_correct: "AI_wn c = Some C \<Longrightarrow> CS c \<le> \<gamma>\<^sub>c C"
proof(simp add: CS_def AI_wn_def)
assume 1: "pfp_wn (step' \<top>) (bot c) = Some C"
have 2: "strip C = c \<and> step' \<top> C \<le> C"
by(rule pfp_wn_pfp[where x="bot c"]) (simp_all add: 1 mono_step'_top)
have pfp: "step (\<gamma>\<^sub>o \<top>) (\<gamma>\<^sub>c C) \<le> \<gamma>\<^sub>c C"
proof(rule order_trans)
show "step (\<gamma>\<^sub>o \<top>) (\<gamma>\<^sub>c C) \<le> \<gamma>\<^sub>c (step' \<top> C)"
by(rule step_step')
show "... \<le> \<gamma>\<^sub>c C"
by(rule mono_gamma_c[OF conjunct2[OF 2]])
qed
have 3: "strip (\<gamma>\<^sub>c C) = c" by(simp add: strip_pfp_wn[OF _ 1])
have "lfp c (step (\<gamma>\<^sub>o \<top>)) \<le> \<gamma>\<^sub>c C"
by(rule lfp_lowerbound[simplified,where f="step (\<gamma>\<^sub>o \<top>)", OF 3 pfp])
thus "lfp c (step UNIV) \<le> \<gamma>\<^sub>c C" by simp
qed
end
global_interpretation Abs_Int_wn
where \<gamma> = \<gamma>_ivl and num' = num_ivl and plus' = "(+)"
and test_num' = in_ivl
and inv_plus' = inv_plus_ivl and inv_less' = inv_less_ivl
defines AI_wn_ivl = AI_wn
..
subsubsection "Tests"
definition "step_up_ivl n = ((\<lambda>C. C \<nabla> step_ivl \<top> C)^^n)"
definition "step_down_ivl n = ((\<lambda>C. C \<triangle> step_ivl \<top> C)^^n)"
text\<open>For \<^const>\<open>test3_ivl\<close>, \<^const>\<open>AI_ivl\<close> needed as many iterations as
the loop took to execute. In contrast, \<^const>\<open>AI_wn_ivl\<close> converges in a
constant number of steps:\<close>
value "show_acom (step_up_ivl 1 (bot test3_ivl))"
value "show_acom (step_up_ivl 2 (bot test3_ivl))"
value "show_acom (step_up_ivl 3 (bot test3_ivl))"
value "show_acom (step_up_ivl 4 (bot test3_ivl))"
value "show_acom (step_up_ivl 5 (bot test3_ivl))"
value "show_acom (step_up_ivl 6 (bot test3_ivl))"
value "show_acom (step_up_ivl 7 (bot test3_ivl))"
value "show_acom (step_up_ivl 8 (bot test3_ivl))"
value "show_acom (step_down_ivl 1 (step_up_ivl 8 (bot test3_ivl)))"
value "show_acom (step_down_ivl 2 (step_up_ivl 8 (bot test3_ivl)))"
value "show_acom (step_down_ivl 3 (step_up_ivl 8 (bot test3_ivl)))"
value "show_acom (step_down_ivl 4 (step_up_ivl 8 (bot test3_ivl)))"
value "show_acom_opt (AI_wn_ivl test3_ivl)"
text\<open>Now all the analyses terminate:\<close>
value "show_acom_opt (AI_wn_ivl test4_ivl)"
value "show_acom_opt (AI_wn_ivl test5_ivl)"
value "show_acom_opt (AI_wn_ivl test6_ivl)"
subsubsection "Generic Termination Proof"
lemma top_on_opt_widen:
"top_on_opt o1 X \<Longrightarrow> top_on_opt o2 X \<Longrightarrow> top_on_opt (o1 \<nabla> o2 :: _ st option) X"
apply(induct o1 o2 rule: widen_option.induct)
apply (auto)
by transfer simp
lemma top_on_opt_narrow:
"top_on_opt o1 X \<Longrightarrow> top_on_opt o2 X \<Longrightarrow> top_on_opt (o1 \<triangle> o2 :: _ st option) X"
apply(induct o1 o2 rule: narrow_option.induct)
apply (auto)
by transfer simp
(* FIXME mk anno abbrv *)
lemma annos_map2_acom[simp]: "strip C2 = strip C1 \<Longrightarrow>
annos(map2_acom f C1 C2) = map (%(x,y).f x y) (zip (annos C1) (annos C2))"
by(simp add: map2_acom_def list_eq_iff_nth_eq size_annos anno_def[symmetric] size_annos_same[of C1 C2])
lemma top_on_acom_widen:
"\<lbrakk>top_on_acom C1 X; strip C1 = strip C2; top_on_acom C2 X\<rbrakk>
\<Longrightarrow> top_on_acom (C1 \<nabla> C2 :: _ st option acom) X"
by(auto simp add: widen_acom_def top_on_acom_def)(metis top_on_opt_widen in_set_zipE)
lemma top_on_acom_narrow:
"\<lbrakk>top_on_acom C1 X; strip C1 = strip C2; top_on_acom C2 X\<rbrakk>
\<Longrightarrow> top_on_acom (C1 \<triangle> C2 :: _ st option acom) X"
by(auto simp add: narrow_acom_def top_on_acom_def)(metis top_on_opt_narrow in_set_zipE)
text\<open>The assumptions for widening and narrowing differ because during
narrowing we have the invariant \<^prop>\<open>y \<le> x\<close> (where \<open>y\<close> is the next
iterate), but during widening there is no such invariant, there we only have
that not yet \<^prop>\<open>y \<le> x\<close>. This complicates the termination proof for
widening.\<close>
locale Measure_wn = Measure1 where m=m
for m :: "'av::{order_top,wn} \<Rightarrow> nat" +
fixes n :: "'av \<Rightarrow> nat"
assumes m_anti_mono: "x \<le> y \<Longrightarrow> m x \<ge> m y"
assumes m_widen: "~ y \<le> x \<Longrightarrow> m(x \<nabla> y) < m x"
assumes n_narrow: "y \<le> x \<Longrightarrow> x \<triangle> y < x \<Longrightarrow> n(x \<triangle> y) < n x"
begin
lemma m_s_anti_mono_rep: assumes "\<forall>x. S1 x \<le> S2 x"
shows "(\<Sum>x\<in>X. m (S2 x)) \<le> (\<Sum>x\<in>X. m (S1 x))"
proof-
from assms have "\<forall>x. m(S1 x) \<ge> m(S2 x)" by (metis m_anti_mono)
thus "(\<Sum>x\<in>X. m (S2 x)) \<le> (\<Sum>x\<in>X. m (S1 x))" by (metis sum_mono)
qed
lemma m_s_anti_mono: "S1 \<le> S2 \<Longrightarrow> m_s S1 X \<ge> m_s S2 X"
unfolding m_s_def
apply (transfer fixing: m)
apply(simp add: less_eq_st_rep_iff eq_st_def m_s_anti_mono_rep)
done
lemma m_s_widen_rep: assumes "finite X" "S1 = S2 on -X" "\<not> S2 x \<le> S1 x"
shows "(\<Sum>x\<in>X. m (S1 x \<nabla> S2 x)) < (\<Sum>x\<in>X. m (S1 x))"
proof-
have 1: "\<forall>x\<in>X. m(S1 x) \<ge> m(S1 x \<nabla> S2 x)"
by (metis m_anti_mono wn_class.widen1)
have "x \<in> X" using assms(2,3)
by(auto simp add: Ball_def)
hence 2: "\<exists>x\<in>X. m(S1 x) > m(S1 x \<nabla> S2 x)"
using assms(3) m_widen by blast
from sum_strict_mono_ex1[OF \<open>finite X\<close> 1 2]
show ?thesis .
qed
lemma m_s_widen: "finite X \<Longrightarrow> fun S1 = fun S2 on -X ==>
~ S2 \<le> S1 \<Longrightarrow> m_s (S1 \<nabla> S2) X < m_s S1 X"
apply(auto simp add: less_st_def m_s_def)
apply (transfer fixing: m)
apply(auto simp add: less_eq_st_rep_iff m_s_widen_rep)
done
lemma m_o_anti_mono: "finite X \<Longrightarrow> top_on_opt o1 (-X) \<Longrightarrow> top_on_opt o2 (-X) \<Longrightarrow>
o1 \<le> o2 \<Longrightarrow> m_o o1 X \<ge> m_o o2 X"
proof(induction o1 o2 rule: less_eq_option.induct)
case 1 thus ?case by (simp add: m_o_def)(metis m_s_anti_mono)
next
case 2 thus ?case
by(simp add: m_o_def le_SucI m_s_h split: option.splits)
next
case 3 thus ?case by simp
qed
lemma m_o_widen: "\<lbrakk> finite X; top_on_opt S1 (-X); top_on_opt S2 (-X); \<not> S2 \<le> S1 \<rbrakk> \<Longrightarrow>
m_o (S1 \<nabla> S2) X < m_o S1 X"
by(auto simp: m_o_def m_s_h less_Suc_eq_le m_s_widen split: option.split)
lemma m_c_widen:
"strip C1 = strip C2 \<Longrightarrow> top_on_acom C1 (-vars C1) \<Longrightarrow> top_on_acom C2 (-vars C2)
\<Longrightarrow> \<not> C2 \<le> C1 \<Longrightarrow> m_c (C1 \<nabla> C2) < m_c C1"
apply(auto simp: m_c_def widen_acom_def map2_acom_def size_annos[symmetric] anno_def[symmetric]sum_list_sum_nth)
apply(subgoal_tac "length(annos C2) = length(annos C1)")
prefer 2 apply (simp add: size_annos_same2)
apply (auto)
apply(rule sum_strict_mono_ex1)
apply(auto simp add: m_o_anti_mono vars_acom_def anno_def top_on_acom_def top_on_opt_widen widen1 less_eq_acom_def listrel_iff_nth)
apply(rule_tac x=p in bexI)
apply (auto simp: vars_acom_def m_o_widen top_on_acom_def)
done
definition n_s :: "'av st \<Rightarrow> vname set \<Rightarrow> nat" ("n\<^sub>s") where
"n\<^sub>s S X = (\<Sum>x\<in>X. n(fun S x))"
lemma n_s_narrow_rep:
assumes "finite X" "S1 = S2 on -X" "\<forall>x. S2 x \<le> S1 x" "\<forall>x. S1 x \<triangle> S2 x \<le> S1 x"
"S1 x \<noteq> S1 x \<triangle> S2 x"
shows "(\<Sum>x\<in>X. n (S1 x \<triangle> S2 x)) < (\<Sum>x\<in>X. n (S1 x))"
proof-
have 1: "\<forall>x. n(S1 x \<triangle> S2 x) \<le> n(S1 x)"
by (metis assms(3) assms(4) eq_iff less_le_not_le n_narrow)
have "x \<in> X" by (metis Compl_iff assms(2) assms(5) narrowid)
hence 2: "\<exists>x\<in>X. n(S1 x \<triangle> S2 x) < n(S1 x)"
by (metis assms(3-5) eq_iff less_le_not_le n_narrow)
show ?thesis
apply(rule sum_strict_mono_ex1[OF \<open>finite X\<close>]) using 1 2 by blast+
qed
lemma n_s_narrow: "finite X \<Longrightarrow> fun S1 = fun S2 on -X \<Longrightarrow> S2 \<le> S1 \<Longrightarrow> S1 \<triangle> S2 < S1
\<Longrightarrow> n\<^sub>s (S1 \<triangle> S2) X < n\<^sub>s S1 X"
apply(auto simp add: less_st_def n_s_def)
apply (transfer fixing: n)
apply(auto simp add: less_eq_st_rep_iff eq_st_def fun_eq_iff n_s_narrow_rep)
done
definition n_o :: "'av st option \<Rightarrow> vname set \<Rightarrow> nat" ("n\<^sub>o") where
"n\<^sub>o opt X = (case opt of None \<Rightarrow> 0 | Some S \<Rightarrow> n\<^sub>s S X + 1)"
lemma n_o_narrow:
"top_on_opt S1 (-X) \<Longrightarrow> top_on_opt S2 (-X) \<Longrightarrow> finite X
\<Longrightarrow> S2 \<le> S1 \<Longrightarrow> S1 \<triangle> S2 < S1 \<Longrightarrow> n\<^sub>o (S1 \<triangle> S2) X < n\<^sub>o S1 X"
apply(induction S1 S2 rule: narrow_option.induct)
apply(auto simp: n_o_def n_s_narrow)
done
definition n_c :: "'av st option acom \<Rightarrow> nat" ("n\<^sub>c") where
"n\<^sub>c C = sum_list (map (\<lambda>a. n\<^sub>o a (vars C)) (annos C))"
lemma less_annos_iff: "(C1 < C2) = (C1 \<le> C2 \<and>
(\<exists>i<length (annos C1). annos C1 ! i < annos C2 ! i))"
by(metis (opaque_lifting, no_types) less_le_not_le le_iff_le_annos size_annos_same2)
lemma n_c_narrow: "strip C1 = strip C2
\<Longrightarrow> top_on_acom C1 (- vars C1) \<Longrightarrow> top_on_acom C2 (- vars C2)
\<Longrightarrow> C2 \<le> C1 \<Longrightarrow> C1 \<triangle> C2 < C1 \<Longrightarrow> n\<^sub>c (C1 \<triangle> C2) < n\<^sub>c C1"
apply(auto simp: n_c_def narrow_acom_def sum_list_sum_nth)
apply(subgoal_tac "length(annos C2) = length(annos C1)")
prefer 2 apply (simp add: size_annos_same2)
apply (auto)
apply(simp add: less_annos_iff le_iff_le_annos)
apply(rule sum_strict_mono_ex1)
apply (auto simp: vars_acom_def top_on_acom_def)
apply (metis n_o_narrow nth_mem finite_cvars less_imp_le le_less order_refl)
apply(rule_tac x=i in bexI)
prefer 2 apply simp
apply(rule n_o_narrow[where X = "vars(strip C2)"])
apply (simp_all)
done
end
lemma iter_widen_termination:
fixes m :: "'a::wn acom \<Rightarrow> nat"
assumes P_f: "\<And>C. P C \<Longrightarrow> P(f C)"
and P_widen: "\<And>C1 C2. P C1 \<Longrightarrow> P C2 \<Longrightarrow> P(C1 \<nabla> C2)"
and m_widen: "\<And>C1 C2. P C1 \<Longrightarrow> P C2 \<Longrightarrow> ~ C2 \<le> C1 \<Longrightarrow> m(C1 \<nabla> C2) < m C1"
and "P C" shows "\<exists>C'. iter_widen f C = Some C'"
proof(simp add: iter_widen_def,
rule measure_while_option_Some[where P = P and f=m])
show "P C" by(rule \<open>P C\<close>)
next
fix C assume "P C" "\<not> f C \<le> C" thus "P (C \<nabla> f C) \<and> m (C \<nabla> f C) < m C"
by(simp add: P_f P_widen m_widen)
qed
lemma iter_narrow_termination:
fixes n :: "'a::wn acom \<Rightarrow> nat"
assumes P_f: "\<And>C. P C \<Longrightarrow> P(f C)"
and P_narrow: "\<And>C1 C2. P C1 \<Longrightarrow> P C2 \<Longrightarrow> P(C1 \<triangle> C2)"
and mono: "\<And>C1 C2. P C1 \<Longrightarrow> P C2 \<Longrightarrow> C1 \<le> C2 \<Longrightarrow> f C1 \<le> f C2"
and n_narrow: "\<And>C1 C2. P C1 \<Longrightarrow> P C2 \<Longrightarrow> C2 \<le> C1 \<Longrightarrow> C1 \<triangle> C2 < C1 \<Longrightarrow> n(C1 \<triangle> C2) < n C1"
and init: "P C" "f C \<le> C" shows "\<exists>C'. iter_narrow f C = Some C'"
proof(simp add: iter_narrow_def,
rule measure_while_option_Some[where f=n and P = "%C. P C \<and> f C \<le> C"])
show "P C \<and> f C \<le> C" using init by blast
next
fix C assume 1: "P C \<and> f C \<le> C" and 2: "C \<triangle> f C < C"
hence "P (C \<triangle> f C)" by(simp add: P_f P_narrow)
moreover then have "f (C \<triangle> f C) \<le> C \<triangle> f C"
by (metis narrow1_acom narrow2_acom 1 mono order_trans)
moreover have "n (C \<triangle> f C) < n C" using 1 2 by(simp add: n_narrow P_f)
ultimately show "(P (C \<triangle> f C) \<and> f (C \<triangle> f C) \<le> C \<triangle> f C) \<and> n(C \<triangle> f C) < n C"
by blast
qed
locale Abs_Int_wn_measure = Abs_Int_wn where \<gamma>=\<gamma> + Measure_wn where m=m
for \<gamma> :: "'av::{wn,bounded_lattice} \<Rightarrow> val set" and m :: "'av \<Rightarrow> nat"
subsubsection "Termination: Intervals"
definition m_rep :: "eint2 \<Rightarrow> nat" where
"m_rep p = (if is_empty_rep p then 3 else
let (l,h) = p in (case l of Minf \<Rightarrow> 0 | _ \<Rightarrow> 1) + (case h of Pinf \<Rightarrow> 0 | _ \<Rightarrow> 1))"
lift_definition m_ivl :: "ivl \<Rightarrow> nat" is m_rep
by(auto simp: m_rep_def eq_ivl_iff)
lemma m_ivl_nice: "m_ivl[l,h] = (if [l,h] = \<bottom> then 3 else
(if l = Minf then 0 else 1) + (if h = Pinf then 0 else 1))"
unfolding bot_ivl_def
by transfer (auto simp: m_rep_def eq_ivl_empty split: extended.split)
lemma m_ivl_height: "m_ivl iv \<le> 3"
by transfer (simp add: m_rep_def split: prod.split extended.split)
lemma m_ivl_anti_mono: "y \<le> x \<Longrightarrow> m_ivl x \<le> m_ivl y"
by transfer
(auto simp: m_rep_def is_empty_rep_def \<gamma>_rep_cases le_iff_subset
split: prod.split extended.splits if_splits)
lemma m_ivl_widen:
"~ y \<le> x \<Longrightarrow> m_ivl(x \<nabla> y) < m_ivl x"
by transfer
(auto simp: m_rep_def widen_rep_def is_empty_rep_def \<gamma>_rep_cases le_iff_subset
split: prod.split extended.splits if_splits)
definition n_ivl :: "ivl \<Rightarrow> nat" where
"n_ivl iv = 3 - m_ivl iv"
lemma n_ivl_narrow:
"x \<triangle> y < x \<Longrightarrow> n_ivl(x \<triangle> y) < n_ivl x"
unfolding n_ivl_def
apply(subst (asm) less_le_not_le)
apply transfer
by(auto simp add: m_rep_def narrow_rep_def is_empty_rep_def empty_rep_def \<gamma>_rep_cases le_iff_subset
split: prod.splits if_splits extended.split)
global_interpretation Abs_Int_wn_measure
where \<gamma> = \<gamma>_ivl and num' = num_ivl and plus' = "(+)"
and test_num' = in_ivl
and inv_plus' = inv_plus_ivl and inv_less' = inv_less_ivl
and m = m_ivl and n = n_ivl and h = 3
proof (standard, goal_cases)
case 2 thus ?case by(rule m_ivl_anti_mono)
next
case 1 thus ?case by(rule m_ivl_height)
next
case 3 thus ?case by(rule m_ivl_widen)
next
case 4 from 4(2) show ?case by(rule n_ivl_narrow)
\<comment> \<open>note that the first assms is unnecessary for intervals\<close>
qed
lemma iter_winden_step_ivl_termination:
"\<exists>C. iter_widen (step_ivl \<top>) (bot c) = Some C"
apply(rule iter_widen_termination[where m = "m_c" and P = "%C. strip C = c \<and> top_on_acom C (- vars C)"])
apply (auto simp add: m_c_widen top_on_bot top_on_step'[simplified comp_def vars_acom_def]
vars_acom_def top_on_acom_widen)
done
lemma iter_narrow_step_ivl_termination:
"top_on_acom C (- vars C) \<Longrightarrow> step_ivl \<top> C \<le> C \<Longrightarrow>
\<exists>C'. iter_narrow (step_ivl \<top>) C = Some C'"
apply(rule iter_narrow_termination[where n = "n_c" and P = "%C'. strip C = strip C' \<and> top_on_acom C' (-vars C')"])
apply(auto simp: top_on_step'[simplified comp_def vars_acom_def]
mono_step'_top n_c_narrow vars_acom_def top_on_acom_narrow)
done
theorem AI_wn_ivl_termination:
"\<exists>C. AI_wn_ivl c = Some C"
apply(auto simp: AI_wn_def pfp_wn_def iter_winden_step_ivl_termination
split: option.split)
apply(rule iter_narrow_step_ivl_termination)
apply(rule conjunct2)
apply(rule iter_widen_inv[where f = "step' \<top>" and P = "%C. c = strip C & top_on_acom C (- vars C)"])
apply(auto simp: top_on_acom_widen top_on_step'[simplified comp_def vars_acom_def]
iter_widen_pfp top_on_bot vars_acom_def)
done
(*unused_thms Abs_Int_init - *)
subsubsection "Counterexamples"
text\<open>Widening is increasing by assumption, but \<^prop>\<open>x \<le> f x\<close> is not an invariant of widening.
It can already be lost after the first step:\<close>
lemma assumes "!!x y::'a::wn. x \<le> y \<Longrightarrow> f x \<le> f y"
and "x \<le> f x" and "\<not> f x \<le> x" shows "x \<nabla> f x \<le> f(x \<nabla> f x)"
nitpick[card = 3, expect = genuine, show_consts, timeout = 120]
(*
1 < 2 < 3,
f x = 2,
x widen y = 3 -- guarantees termination with top=3
x = 1
Now f is mono, x <= f x, not f x <= x
but x widen f x = 3, f 3 = 2, but not 3 <= 2
*)
oops
text\<open>Widening terminates but may converge more slowly than Kleene iteration.
In the following model, Kleene iteration goes from 0 to the least pfp
in one step but widening takes 2 steps to reach a strictly larger pfp:\<close>
lemma assumes "!!x y::'a::wn. x \<le> y \<Longrightarrow> f x \<le> f y"
and "x \<le> f x" and "\<not> f x \<le> x" and "f(f x) \<le> f x"
shows "f(x \<nabla> f x) \<le> x \<nabla> f x"
nitpick[card = 4, expect = genuine, show_consts, timeout = 120]
(*
0 < 1 < 2 < 3
f: 1 1 3 3
0 widen 1 = 2
2 widen 3 = 3
and x widen y arbitrary, eg 3, which guarantees termination
Kleene: f(f 0) = f 1 = 1 <= 1 = f 1
but
because not f 0 <= 0, we obtain 0 widen f 0 = 0 wide 1 = 2,
which is again not a pfp: not f 2 = 3 <= 2
Another widening step yields 2 widen f 2 = 2 widen 3 = 3
*)
oops
end
|
theory BExp imports AExp begin
subsection "Boolean Expressions"
datatype bexp = Bc bool | Not bexp | And bexp bexp | Less aexp aexp
fun bval :: "bexp \<Rightarrow> state \<Rightarrow> bool" where
"bval (Bc v) s = v" |
"bval (Not b) s = (\<not> bval b s)" |
"bval (And b\<^sub>1 b\<^sub>2) s = (bval b\<^sub>1 s \<and> bval b\<^sub>2 s)" |
"bval (Less a\<^sub>1 a\<^sub>2) s = (aval a\<^sub>1 s < aval a\<^sub>2 s)"
value "bval (Less (V ''x'') (Plus (N 3) (V ''y'')))
<''x'' := 3, ''y'' := 1>"
text{* To improve automation: *}
lemma bval_And_if[simp]:
"bval (And b1 b2) s = (if bval b1 s then bval b2 s else False)"
by(simp)
declare bval.simps(3)[simp del] --"remove the original eqn"
subsection "Constant Folding"
text{* Optimizing constructors: *}
fun less :: "aexp \<Rightarrow> aexp \<Rightarrow> bexp" where
"less (N n\<^sub>1) (N n\<^sub>2) = Bc(n\<^sub>1 < n\<^sub>2)" |
"less a\<^sub>1 a\<^sub>2 = Less a\<^sub>1 a\<^sub>2"
fun "and" :: "bexp \<Rightarrow> bexp \<Rightarrow> bexp" where
"and (Bc True) b = b" |
"and b (Bc True) = b" |
"and (Bc False) b = Bc False" |
"and b (Bc False) = Bc False" |
"and b\<^sub>1 b\<^sub>2 = And b\<^sub>1 b\<^sub>2"
lemma bval_and[simp]: "bval (and b1 b2) s = (bval b1 s \<and> bval b2 s)"
apply(induction b1 b2 rule: and.induct)
apply simp_all
done
fun not :: "bexp \<Rightarrow> bexp" where
"not (Bc True) = Bc False" |
"not (Bc False) = Bc True" |
"not b = Not b"
lemma bval_not[simp]: "bval (not b) s = (\<not> bval b s)"
apply(induction b rule: not.induct)
apply simp_all
done
text{* Now the overall optimizer: *}
fun bsimp :: "bexp \<Rightarrow> bexp" where
"bsimp (Bc v) = Bc v" |
"bsimp (Not b) = not(bsimp b)" |
"bsimp (And b\<^sub>1 b\<^sub>2) = and (bsimp b\<^sub>1) (bsimp b\<^sub>2)" |
"bsimp (Less a\<^sub>1 a\<^sub>2) = less (asimp a\<^sub>1) (asimp a\<^sub>2)"
value "bsimp (And (Less (N 0) (N 1)) b)"
value "bsimp (And (Less (N 1) (N 0)) (Bc True))"
theorem "bval (bsimp b) s = bval b s"
apply(induction b)
apply simp_all
done
end
|
/-
Copyright (c) 2022 Scott Morrison. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Scott Morrison
-/
import category_theory.preadditive.functor_category
import category_theory.linear.default
/-!
# Linear structure on functor categories
If `C` and `D` are categories and `D` is `R`-linear,
then `C ⥤ D` is also `R`-linear.
-/
open_locale big_operators
namespace category_theory
open category_theory.limits linear
variables {R : Type*} [semiring R]
variables {C D : Type*} [category C] [category D] [preadditive D] [linear R D]
instance functor_category_linear : linear R (C ⥤ D) :=
{ hom_module := λ F G,
{ smul := λ r α,
{ app := λ X, r • α.app X,
naturality' := by { intros, rw [comp_smul, smul_comp, α.naturality] } },
one_smul := by { intros, ext, apply one_smul },
zero_smul := by { intros, ext, apply zero_smul },
smul_zero := by { intros, ext, apply smul_zero },
add_smul := by { intros, ext, apply add_smul },
smul_add := by { intros, ext, apply smul_add },
mul_smul := by { intros, ext, apply mul_smul } },
smul_comp' := by { intros, ext, apply smul_comp },
comp_smul' := by { intros, ext, apply comp_smul } }
namespace nat_trans
variables {F G : C ⥤ D}
/-- Application of a natural transformation at a fixed object,
as group homomorphism -/
@[simps] def app_linear_map (X : C) : (F ⟶ G) →ₗ[R] (F.obj X ⟶ G.obj X) :=
{ to_fun := λ α, α.app X,
map_add' := λ _ _, rfl,
map_smul' := λ _ _, rfl, }
@[simp] lemma app_smul (X : C) (r : R) (α : F ⟶ G) : (r • α).app X = r • α.app X := rfl
end nat_trans
end category_theory
|
#' @export
NiarPredictionAtt <- function(data,NiarTreeModel){
Results = list()
for(i in 1:nrow(data)){
Results[i] <- FindTheBest(data[i,],NiarTreeModel)
}
# root <- NiarTreeModel[[1]]
# rootL <- NiarTreeModel$SonL
# rootR <- NiarTreeModel$SonR
# rootcase <- root$root
# DiscValue <- rootcase[root$AttDiscriminant]
# CasePrediction <- rootcase[nrow(rootcase)]
#
return(Results)
#return(lapply(X=CaseToCheck, FindTheBest,NiarTreeModel))
}
#' @export
FindTheBest <- function(CaseToCheck,NiarTreeModel){
root <- NiarTreeModel[[1]]
rootcase <- root$root
rootL <- NiarTreeModel$SonL
rootR <- NiarTreeModel$SonR
if (CaseToCheck[root$AttDiscriminant] <= rootcase[root$AttDiscriminant])
{
if(CaseToCheck[root$AttDiscriminant] == rootcase[root$AttDiscriminant])
{
c1 <- CaseToCheck[1:ncol(CaseToCheck)-1]
c2 <- rootcase[1:ncol(rootcase)-1]
if(DEuclidean1x1(c1,c2) == 0.0){
return(rootcase[ncol(rootcase)])
}else{
if (is.null(rootL$SonL) == FALSE)
root <- FindTheBest(CaseToCheck,rootL)
else
return(rootcase[ncol(rootcase)])
}
}
else{
if (is.null(rootL$SonL) == FALSE)
root <- FindTheBest(CaseToCheck,rootL)
else
return(rootcase[ncol(rootcase)])
}
}
else
{
if (is.null(rootR$SonR) == FALSE)
root <- FindTheBest(CaseToCheck,rootR)
else
return(rootcase[ncol(rootcase)])
}
}
|
Formal statement is: lemma Lim_bounded2: fixes f :: "nat \<Rightarrow> 'a::linorder_topology" assumes lim:"f \<longlonglongrightarrow> l" and ge: "\<forall>n\<ge>N. f n \<ge> C" shows "l \<ge> C" Informal statement is: If $f$ is a sequence of real numbers that converges to $l$, and if $f$ is eventually greater than or equal to $C$, then $l$ is greater than or equal to $C$. |
Formal statement is: lemma continuous_at_right_real_increasing: fixes f :: "real \<Rightarrow> real" assumes nondecF: "\<And>x y. x \<le> y \<Longrightarrow> f x \<le> f y" shows "continuous (at_right a) f \<longleftrightarrow> (\<forall>e>0. \<exists>d>0. f (a + d) - f a < e)" Informal statement is: A real-valued function $f$ is continuous from the right at $a$ if and only if for every $\epsilon > 0$, there exists $\delta > 0$ such that $f(a + \delta) - f(a) < \epsilon$. |
lemma Lim_transform_within_set_eq: fixes a :: "'a::metric_space" and l :: "'b::metric_space" shows "eventually (\<lambda>x. x \<in> S \<longleftrightarrow> x \<in> T) (at a) \<Longrightarrow> ((f \<longlongrightarrow> l) (at a within S) \<longleftrightarrow> (f \<longlongrightarrow> l) (at a within T))" |
theory T110
imports Main
begin
lemma "(
(\<forall> x::nat. \<forall> y::nat. meet(x, y) = meet(y, x)) &
(\<forall> x::nat. \<forall> y::nat. join(x, y) = join(y, x)) &
(\<forall> x::nat. \<forall> y::nat. \<forall> z::nat. meet(x, meet(y, z)) = meet(meet(x, y), z)) &
(\<forall> x::nat. \<forall> y::nat. \<forall> z::nat. join(x, join(y, z)) = join(join(x, y), z)) &
(\<forall> x::nat. \<forall> y::nat. meet(x, join(x, y)) = x) &
(\<forall> x::nat. \<forall> y::nat. join(x, meet(x, y)) = x) &
(\<forall> x::nat. \<forall> y::nat. \<forall> z::nat. mult(x, join(y, z)) = join(mult(x, y), mult(x, z))) &
(\<forall> x::nat. \<forall> y::nat. \<forall> z::nat. mult(join(x, y), z) = join(mult(x, z), mult(y, z))) &
(\<forall> x::nat. \<forall> y::nat. \<forall> z::nat. meet(x, over(join(mult(x, y), z), y)) = x) &
(\<forall> x::nat. \<forall> y::nat. \<forall> z::nat. meet(y, undr(x, join(mult(x, y), z))) = y) &
(\<forall> x::nat. \<forall> y::nat. \<forall> z::nat. join(mult(over(x, y), y), x) = x) &
(\<forall> x::nat. \<forall> y::nat. \<forall> z::nat. join(mult(y, undr(y, x)), x) = x) &
(\<forall> x::nat. \<forall> y::nat. \<forall> z::nat. mult(x, meet(y, z)) = meet(mult(x, y), mult(x, z))) &
(\<forall> x::nat. \<forall> y::nat. \<forall> z::nat. mult(meet(x, y), z) = meet(mult(x, z), mult(y, z))) &
(\<forall> x::nat. \<forall> y::nat. \<forall> z::nat. undr(meet(x, y), z) = join(undr(x, z), undr(y, z))) &
(\<forall> x::nat. \<forall> y::nat. invo(join(x, y)) = meet(invo(x), invo(y))) &
(\<forall> x::nat. \<forall> y::nat. invo(meet(x, y)) = join(invo(x), invo(y))) &
(\<forall> x::nat. invo(invo(x)) = x)
) \<longrightarrow>
(\<forall> x::nat. \<forall> y::nat. \<forall> z::nat. over(join(x, y), z) = join(over(x, z), over(y, z)))
"
nitpick[card nat=4,timeout=86400]
oops
end |
\section{Structuring Elements}
\begin{frame}{Text blocks}
\framesubtitle{In plain, example, and \alert{alert} flavour}
\alert{This text} is highlighted.
\begin{block}{A plain block}
This is a plain block containing some \alert{highlighted text}.
\end{block}
\begin{exampleblock}{An example block}
This is an example block containing some \alert{highlighted text}.
\end{exampleblock}
\begin{alertblock}{An alert block}
This is an alert block containing some \alert{highlighted text}.
\end{alertblock}
\end{frame}
\begin{frame}[label=proof]{Definitions, theorems, and proofs}
\framesubtitle{All integers divide zero}
\begin{definition}
$\forall a,b\in\mathbb{Z}: a\mid b\iff\exists c\in\mathbb{Z}:a\cdot c=b$
\end{definition}
\begin{theorem}
$\forall a\in\mathbb{Z}: a\mid 0$
\end{theorem}
\begin{proof}
$\forall a\in\mathbb{Z}: a\cdot 0=0$
\end{proof}
\end{frame}
|
2017/11/23
chaonin,@Xiamen,Fujian
Learn Tensorflow
|
module Lang.Irrelevance where
open import Type
postulate .axiom : ∀{ℓ}{T : Type{ℓ}} -> .T -> T
|
Formal statement is: lemma prime_elem_dvd_mult_iff: "prime_elem p \<Longrightarrow> p dvd (a * b) \<longleftrightarrow> p dvd a \<or> p dvd b" Informal statement is: If $p$ is a prime element, then $p$ divides $ab$ if and only if $p$ divides $a$ or $p$ divides $b$. |
Formal statement is: lemma Sup_insert: fixes S :: "real set" shows "bounded S \<Longrightarrow> Sup (insert x S) = (if S = {} then x else max x (Sup S))" Informal statement is: If $S$ is a bounded set, then $\sup(S \cup \{x\}) = \max\{x, \sup S\}$. |
lemma path_connected_uncountable: fixes S :: "'a::metric_space set" assumes "path_connected S" "a \<in> S" "b \<in> S" "a \<noteq> b" shows "uncountable S" |
[STATEMENT]
lemma ideduct_synth_priv_fun_in_ik:
fixes M::"('fun,'var) terms" and t::"('fun,'var) term"
assumes "M \<turnstile>\<^sub>c t" "f \<in> funs_term t" "\<not>public f"
shows "f \<in> \<Union>(funs_term ` M)"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. f \<in> \<Union> (funs_term ` M)
[PROOF STEP]
using assms
[PROOF STATE]
proof (prove)
using this:
M \<turnstile>\<^sub>c t
f \<in> funs_term t
\<not> public f
goal (1 subgoal):
1. f \<in> \<Union> (funs_term ` M)
[PROOF STEP]
by (induct t rule: intruder_synth_induct) auto |
/-
Copyright (c) 2020 Scott Morrison. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Scott Morrison
-/
import category_theory.limits.shapes.equalizers
import category_theory.limits.shapes.products
import topology.sheaves.sheaf_condition.pairwise_intersections
/-!
# The sheaf condition in terms of an equalizer of products
Here we set up the machinery for the "usual" definition of the sheaf condition,
e.g. as in https://stacks.math.columbia.edu/tag/0072
in terms of an equalizer diagram where the two objects are
`∏ F.obj (U i)` and `∏ F.obj (U i) ⊓ (U j)`.
We show that this sheaf condition is equivalent to the `pairwise_intersections` sheaf condition when
the presheaf is valued in a category with products, and thereby equivalent to the default sheaf
condition.
-/
universes v' v u
noncomputable theory
open category_theory
open category_theory.limits
open topological_space
open opposite
open topological_space.opens
namespace Top
variables {C : Type u} [category.{v} C] [has_products.{v'} C]
variables {X : Top.{v'}} (F : presheaf C X) {ι : Type v'} (U : ι → opens X)
namespace presheaf
namespace sheaf_condition_equalizer_products
/-- The product of the sections of a presheaf over a family of open sets. -/
def pi_opens : C := ∏ (λ i : ι, F.obj (op (U i)))
/--
The product of the sections of a presheaf over the pairwise intersections of
a family of open sets.
-/
def pi_inters : C := ∏ (λ p : ι × ι, F.obj (op (U p.1 ⊓ U p.2)))
/--
The morphism `Π F.obj (U i) ⟶ Π F.obj (U i) ⊓ (U j)` whose components
are given by the restriction maps from `U i` to `U i ⊓ U j`.
-/
def left_res : pi_opens F U ⟶ pi_inters.{v'} F U :=
pi.lift (λ p : ι × ι, pi.π _ p.1 ≫ F.map (inf_le_left (U p.1) (U p.2)).op)
/--
The morphism `Π F.obj (U i) ⟶ Π F.obj (U i) ⊓ (U j)` whose components
are given by the restriction maps from `U j` to `U i ⊓ U j`.
-/
def right_res : pi_opens F U ⟶ pi_inters.{v'} F U :=
pi.lift (λ p : ι × ι, pi.π _ p.2 ≫ F.map (inf_le_right (U p.1) (U p.2)).op)
/--
The morphism `F.obj U ⟶ Π F.obj (U i)` whose components
are given by the restriction maps from `U j` to `U i ⊓ U j`.
-/
def res : F.obj (op (supr U)) ⟶ pi_opens.{v'} F U :=
pi.lift (λ i : ι, F.map (topological_space.opens.le_supr U i).op)
@[simp, elementwise]
lemma res_π (i : ι) : res F U ≫ limit.π _ ⟨i⟩ = F.map (opens.le_supr U i).op :=
by rw [res, limit.lift_π, fan.mk_π_app]
@[elementwise]
lemma w : res F U ≫ left_res F U = res F U ≫ right_res F U :=
begin
dsimp [res, left_res, right_res],
ext,
simp only [limit.lift_π, limit.lift_π_assoc, fan.mk_π_app, category.assoc],
rw [←F.map_comp],
rw [←F.map_comp],
congr,
end
/--
The equalizer diagram for the sheaf condition.
-/
@[reducible]
def diagram : walking_parallel_pair ⥤ C :=
parallel_pair (left_res.{v'} F U) (right_res F U)
/--
The restriction map `F.obj U ⟶ Π F.obj (U i)` gives a cone over the equalizer diagram
for the sheaf condition. The sheaf condition asserts this cone is a limit cone.
-/
def fork : fork.{v} (left_res F U) (right_res F U) := fork.of_ι _ (w F U)
@[simp]
lemma fork_X : (fork F U).X = F.obj (op (supr U)) := rfl
@[simp]
lemma fork_ι : (fork F U).ι = res F U := rfl
@[simp]
lemma fork_π_app_walking_parallel_pair_zero :
(fork F U).π.app walking_parallel_pair.zero = res F U := rfl
@[simp]
lemma fork_π_app_walking_parallel_pair_one :
(fork F U).π.app walking_parallel_pair.one = res F U ≫ left_res F U := rfl
variables {F} {G : presheaf C X}
/-- Isomorphic presheaves have isomorphic `pi_opens` for any cover `U`. -/
@[simp]
def pi_opens.iso_of_iso (α : F ≅ G) : pi_opens F U ≅ pi_opens.{v'} G U :=
pi.map_iso (λ X, α.app _)
/-- Isomorphic presheaves have isomorphic `pi_inters` for any cover `U`. -/
@[simp]
def pi_inters.iso_of_iso (α : F ≅ G) : pi_inters F U ≅ pi_inters.{v'} G U :=
pi.map_iso (λ X, α.app _)
/-- Isomorphic presheaves have isomorphic sheaf condition diagrams. -/
def diagram.iso_of_iso (α : F ≅ G) : diagram F U ≅ diagram.{v'} G U :=
nat_iso.of_components
begin rintro ⟨⟩, exact pi_opens.iso_of_iso U α, exact pi_inters.iso_of_iso U α end
begin
rintro ⟨⟩ ⟨⟩ ⟨⟩,
{ simp, },
{ ext, simp [left_res], },
{ ext, simp [right_res], },
{ simp, },
end.
/--
If `F G : presheaf C X` are isomorphic presheaves,
then the `fork F U`, the canonical cone of the sheaf condition diagram for `F`,
is isomorphic to `fork F G` postcomposed with the corresponding isomorphism between
sheaf condition diagrams.
-/
def fork.iso_of_iso (α : F ≅ G) :
fork F U ≅ (cones.postcompose (diagram.iso_of_iso U α).inv).obj (fork G U) :=
begin
fapply fork.ext,
{ apply α.app, },
{ ext,
dunfold fork.ι, -- Ugh, `simp` can't unfold abbreviations.
simp [res, diagram.iso_of_iso], }
end
end sheaf_condition_equalizer_products
/--
The sheaf condition for a `F : presheaf C X` requires that the morphism
`F.obj U ⟶ ∏ F.obj (U i)` (where `U` is some open set which is the union of the `U i`)
is the equalizer of the two morphisms
`∏ F.obj (U i) ⟶ ∏ F.obj (U i) ⊓ (U j)`.
-/
def is_sheaf_equalizer_products (F : presheaf.{v' v u} C X) : Prop :=
∀ ⦃ι : Type v'⦄ (U : ι → opens X), nonempty (is_limit (sheaf_condition_equalizer_products.fork F U))
/-!
The remainder of this file shows that the equalizer_products sheaf condition is equivalent
to the pariwise_intersections sheaf condition.
-/
namespace sheaf_condition_pairwise_intersections
open category_theory.pairwise category_theory.pairwise.hom
/-- Implementation of `sheaf_condition_pairwise_intersections.cone_equiv`. -/
@[simps]
def cone_equiv_functor_obj (c : cone ((diagram U).op ⋙ F)) :
cone (sheaf_condition_equalizer_products.diagram F U) :=
{ X := c.X,
π :=
{ app := λ Z,
walking_parallel_pair.cases_on Z
(pi.lift (λ (i : ι), c.π.app (op (single i))))
(pi.lift (λ (b : ι × ι), c.π.app (op (pair b.1 b.2)))),
naturality' := λ Y Z f,
begin
cases Y; cases Z; cases f,
{ ext i, dsimp,
simp only [limit.lift_π, category.id_comp, fan.mk_π_app, category_theory.functor.map_id,
category.assoc],
dsimp,
simp only [limit.lift_π, category.id_comp, fan.mk_π_app], },
{ ext ⟨i, j⟩, dsimp [sheaf_condition_equalizer_products.left_res],
simp only [limit.lift_π, limit.lift_π_assoc, category.id_comp, fan.mk_π_app,
category.assoc],
have h := c.π.naturality (quiver.hom.op (hom.left i j)),
dsimp at h,
simpa using h, },
{ ext ⟨i, j⟩, dsimp [sheaf_condition_equalizer_products.right_res],
simp only [limit.lift_π, limit.lift_π_assoc, category.id_comp, fan.mk_π_app,
category.assoc],
have h := c.π.naturality (quiver.hom.op (hom.right i j)),
dsimp at h,
simpa using h, },
{ ext i, dsimp,
simp only [limit.lift_π, category.id_comp, fan.mk_π_app, category_theory.functor.map_id,
category.assoc],
dsimp,
simp only [limit.lift_π, category.id_comp, fan.mk_π_app], },
end, }, }
section
local attribute [tidy] tactic.case_bash
/-- Implementation of `sheaf_condition_pairwise_intersections.cone_equiv`. -/
@[simps]
def cone_equiv_functor :
limits.cone ((diagram U).op ⋙ F) ⥤
limits.cone (sheaf_condition_equalizer_products.diagram F U) :=
{ obj := λ c, cone_equiv_functor_obj F U c,
map := λ c c' f,
{ hom := f.hom,
w' := λ j, begin
cases j;
{ ext, simp only [limits.fan.mk_π_app, limits.cone_morphism.w,
limits.limit.lift_π, category.assoc, cone_equiv_functor_obj_π_app], },
end }, }.
end
/-- Implementation of `sheaf_condition_pairwise_intersections.cone_equiv`. -/
@[simps]
def cone_equiv_inverse_obj
(c : limits.cone (sheaf_condition_equalizer_products.diagram F U)) :
limits.cone ((diagram U).op ⋙ F) :=
{ X := c.X,
π :=
{ app :=
begin
intro x,
induction x using opposite.rec,
rcases x with (⟨i⟩|⟨i,j⟩),
{ exact c.π.app (walking_parallel_pair.zero) ≫ pi.π _ i, },
{ exact c.π.app (walking_parallel_pair.one) ≫ pi.π _ (i, j), }
end,
naturality' :=
begin
intros x y f,
induction x using opposite.rec,
induction y using opposite.rec,
have ef : f = f.unop.op := rfl,
revert ef,
generalize : f.unop = f',
rintro rfl,
rcases x with ⟨i⟩|⟨⟩; rcases y with ⟨⟩|⟨j,j⟩; rcases f' with ⟨⟩,
{ dsimp, erw [F.map_id], simp, },
{ dsimp, simp only [category.id_comp, category.assoc],
have h := c.π.naturality (walking_parallel_pair_hom.left),
dsimp [sheaf_condition_equalizer_products.left_res] at h,
simp only [category.id_comp] at h,
have h' := h =≫ pi.π _ (i, j),
rw h',
simp only [category.assoc, limit.lift_π, fan.mk_π_app],
refl, },
{ dsimp, simp only [category.id_comp, category.assoc],
have h := c.π.naturality (walking_parallel_pair_hom.right),
dsimp [sheaf_condition_equalizer_products.right_res] at h,
simp only [category.id_comp] at h,
have h' := h =≫ pi.π _ (j, i),
rw h',
simp,
refl, },
{ dsimp, erw [F.map_id], simp, },
end, }, }
/-- Implementation of `sheaf_condition_pairwise_intersections.cone_equiv`. -/
@[simps]
def cone_equiv_inverse :
limits.cone (sheaf_condition_equalizer_products.diagram F U) ⥤
limits.cone ((diagram U).op ⋙ F) :=
{ obj := λ c, cone_equiv_inverse_obj F U c,
map := λ c c' f,
{ hom := f.hom,
w' :=
begin
intro x,
induction x using opposite.rec,
rcases x with (⟨i⟩|⟨i,j⟩),
{ dsimp,
dunfold fork.ι,
rw [←(f.w walking_parallel_pair.zero), category.assoc], },
{ dsimp,
rw [←(f.w walking_parallel_pair.one), category.assoc], },
end }, }.
/-- Implementation of `sheaf_condition_pairwise_intersections.cone_equiv`. -/
@[simps]
def cone_equiv_unit_iso_app
(c : cone ((diagram U).op ⋙ F)) :
(𝟭 (cone ((diagram U).op ⋙ F))).obj c ≅
(cone_equiv_functor F U ⋙ cone_equiv_inverse F U).obj c :=
{ hom :=
{ hom := 𝟙 _,
w' := λ j, begin
induction j using opposite.rec, rcases j;
{ dsimp, simp only [limits.fan.mk_π_app, category.id_comp, limits.limit.lift_π], }
end, },
inv :=
{ hom := 𝟙 _,
w' := λ j, begin
induction j using opposite.rec, rcases j;
{ dsimp, simp only [limits.fan.mk_π_app, category.id_comp, limits.limit.lift_π], }
end },
hom_inv_id' := begin
ext,
simp only [category.comp_id, limits.cone.category_comp_hom, limits.cone.category_id_hom],
end,
inv_hom_id' := begin
ext,
simp only [category.comp_id, limits.cone.category_comp_hom, limits.cone.category_id_hom],
end, }
/-- Implementation of `sheaf_condition_pairwise_intersections.cone_equiv`. -/
@[simps]
def cone_equiv_unit_iso :
𝟭 (limits.cone ((diagram U).op ⋙ F)) ≅
cone_equiv_functor F U ⋙ cone_equiv_inverse F U :=
nat_iso.of_components (cone_equiv_unit_iso_app F U) (by tidy)
/-- Implementation of `sheaf_condition_pairwise_intersections.cone_equiv`. -/
@[simps]
def cone_equiv_counit_iso :
cone_equiv_inverse F U ⋙ cone_equiv_functor F U ≅
𝟭 (limits.cone (sheaf_condition_equalizer_products.diagram F U)) :=
nat_iso.of_components (λ c,
{ hom :=
{ hom := 𝟙 _,
w' :=
begin
rintro ⟨_|_⟩,
{ ext ⟨j⟩, dsimp, simp only [category.id_comp, limits.fan.mk_π_app, limits.limit.lift_π], },
{ ext ⟨i,j⟩, dsimp, simp only [category.id_comp, limits.fan.mk_π_app, limits.limit.lift_π], },
end },
inv :=
{ hom := 𝟙 _,
w' :=
begin
rintro ⟨_|_⟩,
{ ext ⟨j⟩, dsimp, simp only [category.id_comp, limits.fan.mk_π_app, limits.limit.lift_π], },
{ ext ⟨i,j⟩, dsimp, simp only [category.id_comp, limits.fan.mk_π_app, limits.limit.lift_π], },
end, },
hom_inv_id' := by { ext, dsimp, simp only [category.comp_id], },
inv_hom_id' := by { ext, dsimp, simp only [category.comp_id], }, })
(λ c d f, by { ext, dsimp, simp only [category.comp_id, category.id_comp], })
/--
Cones over `diagram U ⋙ F` are the same as a cones over the usual sheaf condition equalizer diagram.
-/
@[simps]
def cone_equiv :
limits.cone ((diagram U).op ⋙ F) ≌ limits.cone (sheaf_condition_equalizer_products.diagram F U) :=
{ functor := cone_equiv_functor F U,
inverse := cone_equiv_inverse F U,
unit_iso := cone_equiv_unit_iso F U,
counit_iso := cone_equiv_counit_iso F U, }
local attribute [reducible]
sheaf_condition_equalizer_products.res
sheaf_condition_equalizer_products.left_res
/--
If `sheaf_condition_equalizer_products.fork` is an equalizer,
then `F.map_cone (cone U)` is a limit cone.
-/
def is_limit_map_cone_of_is_limit_sheaf_condition_fork
(P : is_limit (sheaf_condition_equalizer_products.fork F U)) :
is_limit (F.map_cone (cocone U).op) :=
is_limit.of_iso_limit ((is_limit.of_cone_equiv (cone_equiv F U).symm).symm P)
{ hom :=
{ hom := 𝟙 _,
w' :=
begin
intro x,
induction x using opposite.rec,
rcases x with ⟨⟩,
{ dsimp, simp, refl, },
{ dsimp,
simp only [limit.lift_π, limit.lift_π_assoc, category.id_comp, fan.mk_π_app,
category.assoc],
rw ←F.map_comp,
refl, }
end },
inv :=
{ hom := 𝟙 _,
w' :=
begin
intro x,
induction x using opposite.rec,
rcases x with ⟨⟩,
{ dsimp, simp, refl, },
{ dsimp,
simp only [limit.lift_π, limit.lift_π_assoc, category.id_comp, fan.mk_π_app,
category.assoc],
rw ←F.map_comp,
refl, }
end },
hom_inv_id' := by { ext, dsimp, simp only [category.comp_id], },
inv_hom_id' := by { ext, dsimp, simp only [category.comp_id], }, }
/--
If `F.map_cone (cone U)` is a limit cone,
then `sheaf_condition_equalizer_products.fork` is an equalizer.
-/
def is_limit_sheaf_condition_fork_of_is_limit_map_cone
(Q : is_limit (F.map_cone (cocone U).op)) :
is_limit (sheaf_condition_equalizer_products.fork F U) :=
is_limit.of_iso_limit ((is_limit.of_cone_equiv (cone_equiv F U)).symm Q)
{ hom :=
{ hom := 𝟙 _,
w' :=
begin
rintro ⟨⟩,
{ dsimp, simp, refl, },
{ dsimp, ext ⟨i, j⟩,
simp only [limit.lift_π, limit.lift_π_assoc, category.id_comp, fan.mk_π_app,
category.assoc],
rw ←F.map_comp,
refl, }
end },
inv :=
{ hom := 𝟙 _,
w' :=
begin
rintro ⟨⟩,
{ dsimp, simp, refl, },
{ dsimp, ext ⟨i, j⟩,
simp only [limit.lift_π, limit.lift_π_assoc, category.id_comp, fan.mk_π_app,
category.assoc],
rw ←F.map_comp,
refl, }
end },
hom_inv_id' := by { ext, dsimp, simp only [category.comp_id], },
inv_hom_id' := by { ext, dsimp, simp only [category.comp_id], }, }
end sheaf_condition_pairwise_intersections
open sheaf_condition_pairwise_intersections
/--
The sheaf condition in terms of an equalizer diagram is equivalent
to the default sheaf condition.
-/
lemma is_sheaf_iff_is_sheaf_equalizer_products (F : presheaf C X) :
F.is_sheaf ↔ F.is_sheaf_equalizer_products :=
(is_sheaf_iff_is_sheaf_pairwise_intersections F).trans $
iff.intro (λ h ι U, ⟨is_limit_sheaf_condition_fork_of_is_limit_map_cone F U (h U).some⟩)
(λ h ι U, ⟨is_limit_map_cone_of_is_limit_sheaf_condition_fork F U (h U).some⟩)
end presheaf
end Top
|
import category_theory.adjunction.basic
import category_theory.limits.preserves.basic
import data.pfun
open category_theory category_theory.functor category_theory.limits
variables (𝒞 : Type) [category.{0} 𝒞]
inductive bicompletion_aux : bool → Type 1
| of_cat_obj : 𝒞 → bicompletion_aux ff
| limit_obj {𝒟 : Type} [category.{0} 𝒟] (F_obj : 𝒟 → bicompletion_aux ff)
(F_hom : Π {X Y : 𝒟}, (X ⟶ Y) → bicompletion_aux tt) : bicompletion_aux ff
| colimit_obj {𝒟 : Type} [category.{0} 𝒟] (F_obj : 𝒟 → bicompletion_aux ff)
(F_hom : Π {X Y : 𝒟}, (X ⟶ Y) → bicompletion_aux tt) : bicompletion_aux ff
| of_cat_hom : Π {X Y : 𝒞}, (X ⟶ Y) → bicompletion_aux tt -- of_cat_obj X ⟶ of_cat_obj Y
| limit_cone_comp {𝒟 : Type} [category.{0} 𝒟] (F_obj : 𝒟 → bicompletion_aux ff)
(F_hom : Π {X Y : 𝒟}, (X ⟶ Y) → bicompletion_aux tt)
(X : 𝒟) (Y : bicompletion_aux ff) (f : bicompletion_aux tt) : -- F_obj X ⟶ Y
bicompletion_aux tt -- limit_obj F_obj F_hom ⟶ Y
| is_limit {𝒟 : Type} [category.{0} 𝒟] (F_obj : 𝒟 → bicompletion_aux ff)
(F_hom : Π {X Y : 𝒟}, (X ⟶ Y) → bicompletion_aux tt)
(cone_obj : bicompletion_aux ff)
(cone : Π (X : 𝒟), bicompletion_aux tt) : -- cone_obj ⟶ F_obj X
bicompletion_aux tt -- cone_obj → limit_obj F_obj F_hom
| colimit_cocone_comp {𝒟 : Type} [category.{0} 𝒟] (F_obj : 𝒟 → bicompletion_aux ff)
(F_hom : Π {X Y : 𝒟}, (X ⟶ Y) → bicompletion_aux tt)
(X : 𝒟) (Y : bicompletion_aux ff) (f : bicompletion_aux tt) : -- Y ⟶ F_obj X
bicompletion_aux tt -- Y ⟶ colimit_obj F_obj F_hom
| is_colimit {𝒟 : Type} [category.{0} 𝒟] (F_obj : 𝒟 → bicompletion_aux ff)
(F_hom : Π {X Y : 𝒟}, (X ⟶ Y) → bicompletion_aux tt)
(cocone_obj : bicompletion_aux ff)
(cocone : Π (X : 𝒟), bicompletion_aux tt) : -- F_obj X ⟶ cocone_obj
bicompletion_aux tt -- colimit_obj F_obj F_hom ⟶ cocone_obj
namespace bicompletion_aux
variable {𝒞}
@[simp] def dom : Π (X : bicompletion_aux 𝒞 tt), bicompletion_aux 𝒞 ff
| (@of_cat_hom _ _ X Y f) := of_cat_obj X
| (@limit_cone_comp _ _ 𝒟 _ F_obj F_hom X _ _) := by exactI limit_obj F_obj @F_hom
| (@is_limit _ _ 𝒟 _ F_obj F_hom cone_obj cone) := cone_obj
| (@colimit_cocone_comp _ _ 𝒟 _ F_obj F_hom X Y f) := Y
| (@is_colimit _ _ 𝒟 _ F_obj F_hom cocone_obj cocone) := by exactI colimit_obj F_obj @F_hom
@[simp] def cod : Π (X : bicompletion_aux 𝒞 tt), bicompletion_aux 𝒞 ff
| (@of_cat_hom _ _ X Y f) := of_cat_obj Y
| (@colimit_cocone_comp _ _ 𝒟 _ F_obj F_hom X _ _) := by exactI colimit_obj F_obj @F_hom
| (@is_colimit _ _ 𝒟 _ F_obj F_hom cocone_obj cocone) := cocone_obj
| (@limit_cone_comp _ _ 𝒟 _ F_obj F_hom X Y f) := Y
| (@is_limit _ _ 𝒟 _ F_obj F_hom cone_obj cone) := by exactI limit_obj F_obj @F_hom
variable (𝒞)
def obj₁ : Type 1 := bicompletion_aux 𝒞 ff
variable {𝒞}
variables {𝒟 : Type} [category.{0} 𝒟]
def hom₁ (X Y : obj₁ 𝒞) : Type 1 :=
{ f : bicompletion_aux 𝒞 tt // f.dom = X ∧ f.cod = Y }
def of_cat_obj₁ (X : 𝒞) : obj₁ 𝒞 := of_cat_obj X
def limit_obj₁ (F_obj : 𝒟 → obj₁ 𝒞)
(F_hom : Π {X Y : 𝒟}, (X ⟶ Y) → hom₁ (F_obj X) (F_obj Y)) : obj₁ 𝒞 :=
limit_obj F_obj (λ X Y f, (F_hom f).1)
def colimit_obj₁ (F_obj : 𝒟 → obj₁ 𝒞)
(F_hom : Π {X Y : 𝒟}, (X ⟶ Y) → hom₁ (F_obj X) (F_obj Y)) : obj₁ 𝒞 :=
colimit_obj F_obj (λ X Y f, (F_hom f).1)
def of_cat_hom₁ {X Y : 𝒞} (f : X ⟶ Y) : hom₁ (of_cat_obj X) (of_cat_obj Y) :=
⟨of_cat_hom f, by simp⟩
def limit_cone_comp₁ (F_obj : 𝒟 → obj₁ 𝒞)
(F_hom : Π {X Y : 𝒟}, (X ⟶ Y) → hom₁ (F_obj X) (F_obj Y)) (X : 𝒟)
{Y : obj₁ 𝒞} (f : hom₁ (F_obj X) Y) :
hom₁ (limit_obj₁ F_obj @F_hom) Y :=
⟨limit_cone_comp F_obj (λ X Y f, (F_hom f).1) X Y f.1, by simp [limit_obj₁]⟩
def colimit_cocone_comp₁ (F_obj : 𝒟 → obj₁ 𝒞)
(F_hom : Π {X Y : 𝒟}, (X ⟶ Y) → hom₁ (F_obj X) (F_obj Y)) (X : 𝒟)
{Y : obj₁ 𝒞} (f : hom₁ Y (F_obj X)) :
hom₁ Y (colimit_obj₁ F_obj @F_hom) :=
⟨colimit_cocone_comp F_obj (λ X Y f, (F_hom f).1) X Y f.1, by simp [colimit_obj₁]⟩
def is_limit₁ (F_obj : 𝒟 → obj₁ 𝒞)
(F_hom : Π {X Y : 𝒟}, (X ⟶ Y) → hom₁ (F_obj X) (F_obj Y))
(cone_obj : obj₁ 𝒞)
(cone : Π (X : 𝒟), hom₁ cone_obj (F_obj X)) :
hom₁ cone_obj (limit_obj₁ F_obj @F_hom) :=
⟨is_limit F_obj (λ X Y f, (F_hom f).1) cone_obj (λ X, (cone X).1), by simp [limit_obj₁]⟩
def is_colimit₁ (F_obj : 𝒟 → obj₁ 𝒞)
(F_hom : Π {X Y : 𝒟}, (X ⟶ Y) → hom₁ (F_obj X) (F_obj Y))
(cocone_obj : obj₁ 𝒞)
(cocone : Π (X : 𝒟), hom₁ (F_obj X) cocone_obj) :
hom₁ (colimit_obj₁ F_obj @F_hom) cocone_obj :=
⟨is_colimit F_obj (λ X Y f, (F_hom f).1) cocone_obj (λ X, (cocone X).1), by simp [colimit_obj₁]⟩
def id₁_aux (b : bool) (hb : b = ff) (X : bicompletion_aux 𝒞 b) :
hom₁ (show bicompletion_aux 𝒞 ff, from eq.rec_on hb X)
(show bicompletion_aux 𝒞 ff, from eq.rec_on hb X) :=
begin
revert hb,
refine bicompletion_aux.rec_on X _ _ _ _ _ _ _ _,
{ rintros X h,
exact of_cat_hom₁ (𝟙 X) },
{ introsI 𝒟 _ F_obj F_hom ih₁ ih₂ _,
exact ⟨is_limit F_obj @F_hom (limit_obj F_obj @F_hom)
(λ D, limit_cone_comp F_obj @F_hom D (F_obj D) (ih₁ D rfl).1),
by simp⟩ },
{ introsI 𝒟 _ F_obj F_hom ih₁ ih₂ _,
exact ⟨is_colimit F_obj @F_hom (colimit_obj F_obj @F_hom)
(λ D, colimit_cocone_comp F_obj @F_hom D (F_obj D) (ih₁ D rfl).1),
by simp⟩ },
all_goals { intros, contradiction }
end
def id₁ (X : obj₁ 𝒞) : hom₁ X X :=
id₁_aux ff rfl X
-- def comp₁ : Π
-- (f : bicompletion_aux 𝒞 tt)
-- (g : bicompletion_aux 𝒞 tt),
-- part (bicompletion_aux 𝒞 tt)
-- | (@of_cat_hom _ _ A B f) (@of_cat_hom _ _ B' C g) :=
-- ⟨B = B', λ h, by subst h; exact (of_cat_hom₁ (f ≫ g)).1⟩
-- | (@limit_cone_comp _ _ 𝒟 _ F_obj F_hom A B f) g :=
-- do ih ← comp₁ f g, return (by exactI limit_cone_comp F_obj @F_hom A g.cod ih)
-- | f (@colimit_cocone_comp _ _ 𝒟 _ F_obj F_hom A B g) :=
-- do ih ← comp₁ f g, return (by exactI colimit_cocone_comp F_obj @F_hom A g.cod ih)
-- | (@is_colimit _ _ 𝒟 _ F_obj F_hom cocone_obj cocone) g :=
-- let f : Π (A : 𝒟), part (bicompletion_aux 𝒞 tt) := λ A, comp₁ (cocone A) g in
-- ⟨∀ A : 𝒟, (f A).dom, λ h, by exactI @is_colimit _ _ 𝒟 _ F_obj @F_hom cocone_obj
-- (λ A, (f A).get (h A))⟩
-- | f (@is_limit _ _ 𝒟 _ F_obj F_hom cone_obj cone) :=
-- let f : Π (A : 𝒟), part (bicompletion_aux 𝒞 tt) := λ A, comp₁ f (cone A) in
-- ⟨∀ A : 𝒟, (f A).dom, λ h, by exactI @is_colimit _ _ 𝒟 _ F_obj @F_hom cone_obj
-- (λ A, (f A).get (h A))⟩
-- using_well_founded { dec_tac := `[admit] }
inductive valid_obj₁ : Π (X : obj₁ 𝒞), Prop
| of_cat_obj (X : 𝒞) : valid_obj₁ (of_cat_obj X)
| limit_obj {𝒟 : Type} [category.{0} 𝒟] (F_obj : 𝒟 → obj₁ 𝒞)
(F_hom : Π {X Y : 𝒟}, (X ⟶ Y) → hom₁ (F_obj X) (F_obj Y))
(h : Π X : 𝒟, valid_obj₁ (F_obj X)) :
valid_obj₁ (limit_obj₁ F_obj @F_hom)
| colimit_obj {𝒟 : Type} [category.{0} 𝒟] (F_obj : 𝒟 → obj₁ 𝒞)
(F_hom : Π {X Y : 𝒟}, (X ⟶ Y) → hom₁ (F_obj X) (F_obj Y))
(h : Π X : 𝒟, valid_obj₁ (F_obj X)) :
valid_obj₁ (colimit_obj₁ F_obj @F_hom)
def valid_obj₁_limit_obj
{𝒟 : Type} [category.{0} 𝒟] {F_obj : 𝒟 → obj₁ 𝒞}
{F_hom : Π {X Y : 𝒟}, (X ⟶ Y) → bicompletion_aux 𝒞 tt}
(h : valid_obj₁ (limit_obj F_obj @F_hom)) :
Π (X : 𝒟), valid_obj₁ (F_obj X) :=
begin
generalize hX : limit_obj F_obj @F_hom = X,
rw hX at h,
induction h,
{ simp * at * },
{ simp [limit_obj₁] at hX,
rcases hX with ⟨hX₁, hX₂, hX₂, hX₄⟩,
subst hX₁,
simp at *,
subst hX₂,
assumption },
{ simp [*, colimit_obj₁] at * }
end
def valid_obj₁_colimit_obj
{𝒟 : Type} [category.{0} 𝒟] {F_obj : 𝒟 → obj₁ 𝒞}
{F_hom : Π {X Y : 𝒟}, (X ⟶ Y) → bicompletion_aux 𝒞 tt}
(h : valid_obj₁ (colimit_obj F_obj @F_hom)) :
Π (X : 𝒟), valid_obj₁ (F_obj X) :=
begin
generalize hX : colimit_obj F_obj @F_hom = X,
rw hX at h,
induction h,
{ simp * at * },
{ simp [*, limit_obj₁] at * },
{ simp [colimit_obj₁] at hX,
rcases hX with ⟨hX₁, hX₂, hX₂, hX₄⟩,
subst hX₁,
simp at *,
subst hX₂,
assumption }
end
inductive valid_hom₁ : Π {X Y : obj₁ 𝒞}, hom₁ X Y → Type 1
| of_cat_hom {X Y : 𝒞} (f : X ⟶ Y) : valid_hom₁ (of_cat_hom₁ f)
| limit_cone_comp {𝒟 : Type} [category.{0} 𝒟] (F_obj : 𝒟 → obj₁ 𝒞)
(F_hom : Π {X Y : 𝒟}, (X ⟶ Y) → hom₁ (F_obj X) (F_obj Y))
(X : 𝒟) {Y : obj₁ 𝒞} (f : hom₁ (F_obj X) Y)
(F_hom_valid : Π {X Y : 𝒟} (f : X ⟶ Y), valid_hom₁ (F_hom f))
(f_valid : valid_hom₁ f) :
valid_hom₁ (limit_cone_comp₁ F_obj @F_hom X f)
| colimit_cocone_comp {𝒟 : Type} [category.{0} 𝒟] (F_obj : 𝒟 → obj₁ 𝒞)
(F_hom : Π {X Y : 𝒟}, (X ⟶ Y) → hom₁ (F_obj X) (F_obj Y))
(X : 𝒟) {Y : obj₁ 𝒞} (f : hom₁ Y (F_obj X))
(F_hom_valid : Π {X Y : 𝒟} (f : X ⟶ Y), valid_hom₁ (F_hom f))
(f_valid : valid_hom₁ f) :
valid_hom₁ (colimit_cocone_comp₁ F_obj @F_hom X f)
| is_limit {𝒟 : Type} [category.{0} 𝒟] (F_obj : 𝒟 → obj₁ 𝒞)
(F_hom : Π {X Y : 𝒟}, (X ⟶ Y) → hom₁ (F_obj X) (F_obj Y))
(cone_obj : obj₁ 𝒞)
(cone : Π (X : 𝒟), hom₁ cone_obj (F_obj X))
(F_hom_valid : Π {X Y : 𝒟} (f : X ⟶ Y), valid_hom₁ (F_hom f))
(cone_valid : Π (X : 𝒟), valid_hom₁ (cone X)) :
valid_hom₁ (is_limit₁ F_obj @F_hom cone_obj cone)
| is_colimit {𝒟 : Type} [category.{0} 𝒟] (F_obj : 𝒟 → obj₁ 𝒞)
(F_hom : Π {X Y : 𝒟}, (X ⟶ Y) → hom₁ (F_obj X) (F_obj Y))
(cocone_obj : obj₁ 𝒞)
(cocone : Π (X : 𝒟), hom₁ (F_obj X) cocone_obj)
(F_hom_valid : Π {X Y : 𝒟} (f : X ⟶ Y), valid_hom₁ (F_hom f))
(cocone_valid : Π (X : 𝒟), valid_hom₁ (cocone X)) :
valid_hom₁ (is_colimit₁ F_obj @F_hom cocone_obj cocone)
variable (𝒞)
def obj₂ : Type 1 := { X : obj₁ 𝒞 // valid_obj₁ X }
variable {𝒞}
def hom₂ (X Y : obj₂ 𝒞) : Type 1 := Σ (f : hom₁ X.1 Y.1), valid_hom₁ f
open valid_hom₁
-- lemma valid_hom₁_limit_cone_comp {𝒟 : Type} [category.{0} 𝒟] (F_obj : 𝒟 → obj₁ 𝒞)
-- (F_hom : Π {X Y : 𝒟}, (X ⟶ Y) → hom₁ (F_obj X) (F_obj Y))
-- (X : 𝒟) {Y : obj₁ 𝒞} (f : hom₁ (F_obj X) Y)
-- (F_hom_valid : Π {X Y : 𝒟} (f : X ⟶ Y), valid_hom₁ (F_hom f))
-- (f_valid : valid_hom₁ f)
-- (h : valid_hom₁ (limit_cone_comp₁ F_obj @F_hom X f)) :
-- h == valid_hom₁.limit_cone_comp F_obj @F_hom X f @F_hom_valid f_valid :=
-- @valid_hom₁.rec_on _ _ (λ A B g hg, g == limit_cone_comp₁ F_obj @F_hom X f →
-- hg == valid_hom₁.limit_cone_comp F_obj @F_hom X f @F_hom_valid f_valid)
-- _ _ _ _
-- begin
-- intros,
-- simp [of_cat_hom₁] at *,
-- end _ _ _ _ (heq.refl _)
-- lemma hom₂_ext_aux {X Y : obj₁ 𝒞} (f : hom₁ X Y) (h₁ : valid_hom₁ f) :
-- ∀ (h₂ : valid_hom₁ f), h₁ = h₂ :=
-- begin
-- induction h₁,
-- { intro h₂, cases h₂, refl },
-- { intro h₂,
-- refine valid_hom₁.rec_on h₂ _ _ _ _ _,
-- { intros X Y f, }
-- }
-- end
def of_cat_obj₂ (X : 𝒞) : obj₂ 𝒞 :=
⟨of_cat_obj X, valid_obj₁.of_cat_obj _⟩
lemma of_cat_obj₂_injective : function.injective (@of_cat_obj₂ 𝒞 _) :=
begin
intros X Y hXY,
simp [of_cat_obj₂] at hXY,
injection hXY,
end
def limit_obj₂ (F_obj : 𝒟 → obj₂ 𝒞)
(F_hom : Π {X Y : 𝒟}, (X ⟶ Y) → hom₂ (F_obj X) (F_obj Y)) : obj₂ 𝒞 :=
⟨limit_obj₁ (λ X, (F_obj X).1) (λ X Y f, (F_hom f).1), valid_obj₁.limit_obj _ _ (λ X, (F_obj X).2)⟩
lemma limit_obj₂_injective {𝒟₁ 𝒟₂ : Type} [i₁ : category 𝒟₁] [i₂ : category 𝒟₂]
{F_obj₁ : 𝒟₁ → obj₂ 𝒞} {F_obj₂ : 𝒟₂ → obj₂ 𝒞}
{F_hom₁ : Π {X Y : 𝒟₁}, (X ⟶ Y) → hom₂ (F_obj₁ X) (F_obj₁ Y)}
{F_hom₂ : Π {X Y : 𝒟₂}, (X ⟶ Y) → hom₂ (F_obj₂ X) (F_obj₂ Y)}
(h : limit_obj₂ F_obj₁ @F_hom₁ = limit_obj₂ F_obj₂ @F_hom₂) :
𝒟₁ = 𝒟₂ ∧ i₁ == i₂ ∧ F_obj₁ == F_obj₂ ∧ @F_hom₁ == @F_hom₂ :=
begin
simp [limit_obj₂, limit_obj₁] at h,
injection h with h₁ h₂ h₃ h₄,
unfreezingI { subst h₁ },
rw heq_iff_eq at h₂,
unfreezingI { subst h₂ },
simp [heq_iff_eq, function.funext_iff, subtype.coe_injective.eq_iff] at h₃,
rw [← function.funext_iff] at h₃,
dsimp at h₃,
subst h₃,
simp [heq_iff_eq, function.funext_iff, subtype.coe_injective.eq_iff] at h₄,
end
def colimit_obj₂ (F_obj : 𝒟 → obj₂ 𝒞)
(F_hom : Π {X Y : 𝒟}, (X ⟶ Y) → hom₂ (F_obj X) (F_obj Y)) : obj₂ 𝒞 :=
⟨colimit_obj₁ (λ X, (F_obj X).1) (λ X Y f, (F_hom f).1), valid_obj₁.colimit_obj _ _ (λ X, (F_obj X).2)⟩
def of_cat_hom₂ {X Y : 𝒞} (f : X ⟶ Y) : hom₂ (of_cat_obj₂ X) (of_cat_obj₂ Y) :=
⟨of_cat_hom₁ f, valid_hom₁.of_cat_hom _⟩
def limit_cone_comp₂ (F_obj : 𝒟 → obj₂ 𝒞)
(F_hom : Π {X Y : 𝒟}, (X ⟶ Y) → hom₂ (F_obj X) (F_obj Y)) (X : 𝒟)
{Y : obj₂ 𝒞} (f : hom₂ (F_obj X) Y) :
hom₂ (limit_obj₂ F_obj @F_hom) Y :=
⟨limit_cone_comp₁ (λ X, (F_obj X).1) (λ X Y f, (F_hom f).1) X f.1,
valid_hom₁.limit_cone_comp _ _ _ _ (λ X Y f, (F_hom f).2) f.2⟩
def colimit_cocone_comp₂ (F_obj : 𝒟 → obj₂ 𝒞)
(F_hom : Π {X Y : 𝒟}, (X ⟶ Y) → hom₂ (F_obj X) (F_obj Y)) (X : 𝒟)
{Y : obj₂ 𝒞} (f : hom₂ Y (F_obj X)):
hom₂ Y (colimit_obj₂ F_obj @F_hom) :=
⟨colimit_cocone_comp₁ (λ X, (F_obj X).1) (λ X Y f, (F_hom f).1) X f.1,
valid_hom₁.colimit_cocone_comp _ _ _ _ (λ X Y f, (F_hom f).2) f.2⟩
def is_limit₂ (F_obj : 𝒟 → obj₂ 𝒞)
(F_hom : Π {X Y : 𝒟}, (X ⟶ Y) → hom₂ (F_obj X) (F_obj Y))
(cone_obj : obj₂ 𝒞)
(cone : Π (X : 𝒟), hom₂ cone_obj (F_obj X)) :
hom₂ cone_obj (limit_obj₂ F_obj @F_hom) :=
⟨is_limit₁ (λ X, (F_obj X).1) (λ X Y f, (F_hom f).1) cone_obj.1 (λ X, (cone X).1),
valid_hom₁.is_limit _ _ _ _ (λ X Y f, (F_hom f).2) (λ X, (cone X).2)⟩
def is_colimit₂ (F_obj : 𝒟 → obj₂ 𝒞)
(F_hom : Π {X Y : 𝒟}, (X ⟶ Y) → hom₂ (F_obj X) (F_obj Y))
(cocone_obj : obj₂ 𝒞)
(cocone : Π (X : 𝒟), hom₂ (F_obj X) cocone_obj) :
hom₂ (colimit_obj₂ F_obj @F_hom) cocone_obj :=
⟨is_colimit₁ (λ X, (F_obj X).1) (λ X Y f, (F_hom f).1) cocone_obj.1 (λ X, (cocone X).1),
valid_hom₁.is_colimit _ _ _ _ (λ X Y f, (F_hom f).2) (λ X, (cocone X).2)⟩
@[elab_as_eliminator] protected def hom₂.rec_on
{motive : Π {X Y : obj₂ 𝒞} (f : hom₂ X Y), Sort*} {X Y : obj₂ 𝒞} (f : hom₂ X Y)
(of_cat_hom : Π {X Y : 𝒞} (f : X ⟶ Y), motive (of_cat_hom₂ f))
(limit_cone_comp : Π {𝒟 : Type} [category 𝒟] (F_obj : 𝒟 → obj₂ 𝒞)
(F_hom : Π {X Y : 𝒟}, (by exactI X ⟶ Y) → hom₂ (F_obj X) (F_obj Y))
(ih_F_hom : Π {X Y : 𝒟} (f : by exactI X ⟶ Y), motive (F_hom f))
(X : 𝒟) {Y : obj₂ 𝒞} (f : hom₂ (F_obj X) Y)
(ih_f : motive f),
motive (by exactI limit_cone_comp₂ F_obj @F_hom X f))
(colimit_cocone_comp : Π {𝒟 : Type} [category 𝒟] (F_obj : 𝒟 → obj₂ 𝒞)
(F_hom : Π {X Y : 𝒟}, (by exactI X ⟶ Y) → hom₂ (F_obj X) (F_obj Y))
(ih_F_hom : Π {X Y : 𝒟} (f : by exactI X ⟶ Y), motive (F_hom f))
(X : 𝒟) {Y : obj₂ 𝒞} (f : hom₂ Y (F_obj X))
(ih_f : motive f),
motive (by exactI colimit_cocone_comp₂ F_obj @F_hom X f))
(is_limit : Π {𝒟 : Type} [category 𝒟] (F_obj : 𝒟 → obj₂ 𝒞)
(F_hom : Π {X Y : 𝒟}, (by exactI X ⟶ Y) → hom₂ (F_obj X) (F_obj Y))
(ih_F_hom : Π {X Y : 𝒟} (f : by exactI X ⟶ Y), motive (F_hom f))
(cone_obj : obj₂ 𝒞) (cone : Π (X : 𝒟), hom₂ cone_obj (F_obj X))
(ih_cone : Π (X : 𝒟), motive (cone X)),
motive (by exactI is_limit₂ F_obj @F_hom cone_obj cone))
(is_colimit : Π {𝒟 : Type} [category 𝒟] (F_obj : 𝒟 → obj₂ 𝒞)
(F_hom : Π {X Y : 𝒟}, (by exactI X ⟶ Y) → hom₂ (F_obj X) (F_obj Y))
(ih_F_hom : Π {X Y : 𝒟} (f : by exactI X ⟶ Y), motive (F_hom f))
(cocone_obj : obj₂ 𝒞) (cocone : Π (X : 𝒟), hom₂ (F_obj X) cocone_obj)
(ih_cone : Π (X : 𝒟), motive (cocone X)),
motive (by exactI is_colimit₂ F_obj @F_hom cocone_obj cocone)) :
motive f :=
begin
cases X with X hX, cases Y with Y hY,
cases f with f hf,
dsimp at f, dsimp at hf,
revert hX hY,
refine valid_hom₁.rec_on hf _ _ _ _ _,
{ intros A B g hX hY,
exact of_cat_hom g },
{ introsI 𝒟 _ F_obj F_hom X Y f F_hom_valid f_valid ih₁ ih₂ hX hY,
exact @limit_cone_comp _ _ (λ A, ⟨F_obj A, valid_obj₁_limit_obj hX A⟩)
(λ X Y f, ⟨F_hom f, F_hom_valid f⟩)
(λ X Y f, ih₁ f _ _) X ⟨Y, hY⟩ ⟨f, f_valid⟩ (ih₂ _ _) },
{ introsI 𝒟 _ F_obj F_hom X Y f F_hom_valid f_valid ih₁ ih₂ hY hX,
exact @colimit_cocone_comp _ _ (λ A, ⟨F_obj A, valid_obj₁_colimit_obj hX A⟩)
(λ X Y f, ⟨F_hom f, F_hom_valid f⟩)
(λ X Y f, ih₁ f _ _) X ⟨Y, hY⟩ ⟨f, f_valid⟩ (ih₂ _ _) },
{ introsI 𝒟 _ F_obj F_hom cone_obj cone F_hom_valid cone_valid ih₁ ih₂ hX hY,
exact @is_limit 𝒟 _ (λ A, ⟨F_obj A, valid_obj₁_limit_obj hY A⟩)
(λ X Y f, ⟨F_hom f, F_hom_valid f⟩)
(λ X Y f, ih₁ f _ _) ⟨cone_obj, hX⟩
(λ X, ⟨cone X, cone_valid X⟩)
(λ X, ih₂ X _ _) },
{ introsI 𝒟 _ F_obj F_hom cocone_obj cocone F_hom_valid cocone_valid ih₁ ih₂ hX hY,
exact @is_colimit 𝒟 _ (λ A, ⟨F_obj A, valid_obj₁_colimit_obj hX A⟩)
(λ X Y f, ⟨F_hom f, F_hom_valid f⟩)
(λ X Y f, ih₁ f _ _) ⟨cocone_obj, hY⟩
(λ X, ⟨cocone X, cocone_valid X⟩)
(λ X, ih₂ X _ _) }
end
def hom₂_of_cat_obj_rec_on
{motive : Π {X : 𝒞} {Y : obj₂ 𝒞} (f : hom₂ (of_cat_obj₂ X) Y), Sort*}
{X : 𝒞} {Y : obj₂ 𝒞} (f : hom₂ (of_cat_obj₂ X) Y)
(of_cat_hom : Π {Y : 𝒞} (f : X ⟶ Y), motive (of_cat_hom₂ f))
(colimit_cocone_comp : Π {𝒟 : Type} [category 𝒟] (F_obj : 𝒟 → obj₂ 𝒞)
(F_hom : Π {X Y : 𝒟}, (by exactI X ⟶ Y) → hom₂ (F_obj X) (F_obj Y))
(X : 𝒟) {Y : 𝒞} (f : hom₂ (of_cat_obj₂ Y) (F_obj X))
(ih_f : motive f),
motive (by exactI colimit_cocone_comp₂ F_obj @F_hom X f))
(is_limit : Π {𝒟 : Type} [category 𝒟] (F_obj : 𝒟 → obj₂ 𝒞)
(F_hom : Π {X Y : 𝒟}, (by exactI X ⟶ Y) → hom₂ (F_obj X) (F_obj Y))
(cone_obj : 𝒞) (cone : Π (X : 𝒟), hom₂ (of_cat_obj₂ cone_obj) (F_obj X))
(ih_cone : Π (X : 𝒟), motive (cone X)),
motive (by exactI is_limit₂ F_obj @F_hom (of_cat_obj₂ cone_obj) cone)) :
motive f :=
@hom₂.rec_on 𝒞 _ (λ A B f, ∀ (h : A = of_cat_obj₂ X),
motive (show hom₂ (of_cat_obj₂ X) B, from eq.rec_on h f))
(of_cat_obj₂ X) Y f
(λ A B g h, begin
have := of_cat_obj₂_injective h,
subst this,
dsimp,
exact of_cat_hom g
end)
begin
intros,
simp [limit_obj₂, of_cat_obj₂, limit_obj₁] at h,
contradiction
end
begin
introsI 𝒟 _ F_obj F_hom ih₁ A B g ih₂ h,
subst h,
exact colimit_cocone_comp _ _ _ _ (ih₂ rfl)
end
begin
introsI 𝒟 _ F_obj F_hom ih₁ cone_obj cone ih₂ h,
subst h,
exact is_limit _ _ _ _ (λ A, ih₂ A rfl),
end
begin
intros,
simp [colimit_obj₂, of_cat_obj₂] at h,
contradiction
end
rfl
def hom₂_limit_obj_rec_on
{motive : Π {𝒟 : Type} [category 𝒟] {F_obj : 𝒟 → obj₂ 𝒞}
{F_hom : Π {X Y : 𝒟}, (by exactI X ⟶ Y) → hom₂ (F_obj X) (F_obj Y)} {Y : obj₂ 𝒞},
hom₂ (by exactI limit_obj₂ F_obj @F_hom) Y → Sort*}
{𝒟 : Type} [category 𝒟] {F_obj : 𝒟 → obj₂ 𝒞}
{F_hom : Π {X Y : 𝒟}, (X ⟶ Y) → hom₂ (F_obj X) (F_obj Y)} {Y : obj₂ 𝒞}
(f : hom₂ (limit_obj₂ F_obj @F_hom) Y)
(limit_cone_comp : Π {𝒟 : Type} [category 𝒟] (F_obj : 𝒟 → obj₂ 𝒞)
(F_hom : Π {X Y : 𝒟}, (by exactI X ⟶ Y) → hom₂ (F_obj X) (F_obj Y))
(X : 𝒟) {Y : obj₂ 𝒞} (f : hom₂ (F_obj X) Y),
by exactI motive (limit_cone_comp₂ F_obj @F_hom X f))
(colimit_cocone_comp : Π {𝒟 : Type} [category 𝒟] (F_obj : 𝒟 → obj₂ 𝒞)
(F_hom : Π {X Y : 𝒟}, (by exactI X ⟶ Y) → hom₂ (F_obj X) (F_obj Y))
(X : 𝒟)
{ℰ : Type} [category ℰ] (G_obj : ℰ → obj₂ 𝒞)
(G_hom : Π {X Y : ℰ}, (by exactI X ⟶ Y) → hom₂ (G_obj X) (G_obj Y))
(f : hom₂ (by exactI limit_obj₂ G_obj @G_hom) (F_obj X))
(ih_f : by exactI motive f),
by exactI motive (colimit_cocone_comp₂ F_obj @F_hom X f))
(is_limit : Π {𝒟 : Type} [category 𝒟] (F_obj : 𝒟 → obj₂ 𝒞)
(F_hom : Π {X Y : 𝒟}, (by exactI X ⟶ Y) → hom₂ (F_obj X) (F_obj Y))
{ℰ : Type} [category ℰ] (G_obj : ℰ → obj₂ 𝒞)
(G_hom : Π {X Y : ℰ}, (by exactI X ⟶ Y) → hom₂ (G_obj X) (G_obj Y))
(cone : Π (X : 𝒟), hom₂ (by exactI limit_obj₂ G_obj @G_hom) (F_obj X))
(ih_cone : Π (X : 𝒟), by exactI motive (cone X)),
by exactI motive (is_limit₂ F_obj @F_hom (limit_obj₂ G_obj @G_hom) cone)) :
motive f := sorry
-- @hom₂.rec_on 𝒞 _ (λ A B f, ∀ (h : A = limit_obj₂ F_obj @F_hom),
-- motive (show hom₂ (limit_obj₂ F_obj @F_hom) B, from eq.rec_on h f))
-- (limit_obj₂ F_obj @F_hom) Y f
-- begin
-- intros,
-- simp [limit_obj₂, of_cat_obj₂, limit_obj₁] at h,
-- contradiction
-- end
-- begin
-- intros ℰ _ G_obj G_hom ih₁ A B g ih₂ h,
-- simp [limit_obj₂, of_cat_obj₂, limit_obj₁] at h,
-- injection h with h₁ h₂ h₃ h₄,
-- unfreezingI { subst h₁ },
-- rw [heq_iff_eq] at h₂,
-- unfreezingI { subst h₂ },
-- dsimp at h₄,
-- dsimp,
-- end
-- begin
-- introsI 𝒟 _ F_obj F_hom ih₁ A B g ih₂ h,
-- subst h,
-- exact colimit_cocone_comp _ _ _ _ (ih₂ rfl)
-- end
-- begin
-- introsI 𝒟 _ F_obj F_hom ih₁ cone_obj cone ih₂ h,
-- subst h,
-- exact is_limit _ _ _ _ (λ A, ih₂ A rfl),
-- end
-- begin
-- intros,
-- simp [colimit_obj₂, of_cat_obj₂] at h,
-- contradiction
-- end
-- rfl
def hom₂_colimit_obj_rec_on
{motive : Π {𝒟 : Type} [category 𝒟] {F_obj : 𝒟 → obj₂ 𝒞}
{F_hom : Π {X Y : 𝒟}, (by exactI X ⟶ Y) → hom₂ (F_obj X) (F_obj Y)} {Y : obj₂ 𝒞},
hom₂ (by exactI colimit_obj₂ F_obj @F_hom) Y → Sort*}
{𝒟 : Type} [category 𝒟] {F_obj : 𝒟 → obj₂ 𝒞}
{F_hom : Π {X Y : 𝒟}, (X ⟶ Y) → hom₂ (F_obj X) (F_obj Y)} {Y : obj₂ 𝒞}
(f : hom₂ (limit_obj₂ F_obj @F_hom) Y)
(limit_cone_comp : Π {𝒟 : Type} [category 𝒟] (F_obj : 𝒟 → obj₂ 𝒞)
(F_hom : Π {X Y : 𝒟}, (by exactI X ⟶ Y) → hom₂ (F_obj X) (F_obj Y))
(X : 𝒟) {Y : obj₂ 𝒞} (f : hom₂ (F_obj X) Y),
by exactI motive (limit_cone_comp₂ F_obj @F_hom X f))
(colimit_cocone_comp : Π {𝒟 : Type} [category 𝒟] (F_obj : 𝒟 → obj₂ 𝒞)
(F_hom : Π {X Y : 𝒟}, (by exactI X ⟶ Y) → hom₂ (F_obj X) (F_obj Y))
(X : 𝒟)
{ℰ : Type} [category ℰ] (G_obj : ℰ → obj₂ 𝒞)
(G_hom : Π {X Y : ℰ}, (by exactI X ⟶ Y) → hom₂ (G_obj X) (G_obj Y))
(f : hom₂ (by exactI limit_obj₂ G_obj @G_hom) (F_obj X))
(ih_f : by exactI motive f),
by exactI motive (colimit_cocone_comp₂ F_obj @F_hom X f))
(is_limit : Π {𝒟 : Type} [category 𝒟] (F_obj : 𝒟 → obj₂ 𝒞)
(F_hom : Π {X Y : 𝒟}, (by exactI X ⟶ Y) → hom₂ (F_obj X) (F_obj Y))
{ℰ : Type} [category ℰ] (G_obj : ℰ → obj₂ 𝒞)
(G_hom : Π {X Y : ℰ}, (by exactI X ⟶ Y) → hom₂ (G_obj X) (G_obj Y))
(cone : Π (X : 𝒟), hom₂ (by exactI limit_obj₂ G_obj @G_hom) (F_obj X))
(ih_cone : Π (X : 𝒟), by exactI motive (cone X)),
by exactI motive (is_limit₂ F_obj @F_hom (limit_obj₂ G_obj @G_hom) cone)) :
motive f
def comp₂ {X Y : obj₂ 𝒞} (f : hom₂ X Y) : Π {Z : obj₂ 𝒞}, hom₂ Y Z → hom₂ X Z :=
hom₂.rec_on f
begin
intros X Y f Z g,
refine hom₂_of_cat_obj_rec_on g _ _ _,
{ intros B g,
exact of_cat_hom₂ (f ≫ g) },
{ introsI 𝒟 _ F_obj F_hom ih₁ B g ih₂,
exact colimit_cocone_comp₂ F_obj _ _ ih₂ },
{ introsI 𝒟 _ F_obj F_hom ih₁ cone ih₂,
exact is_limit₂ _ _ _ (λ X, ih₂ _) }
end
begin
introsI 𝒟 _ F_obj F_hom ih₁ A B f ih₂ Z g,
refine limit_cone_comp₂ _ _ _ (ih₂ g),
end
begin
introsI 𝒟 _ F_obj F_hom ih₁ A B f ih₂ Z g,
refine ih₂ _,
admit
end
begin
introsI 𝒟 _ F_obj F_hom ih₁ cone_obj cone ih₂ Z g,
revert ih₂,
refine hom₂_limit_obj_rec_on g _ _ _,
{ introsI ℰ _ G_obj G_hom A B g ih₂,
exact ih₂ A g },
{ introsI ℰ _ F_obj F_hom A ℱ _ G_obj G_hom g ih₃ ih₂,
exact colimit_cocone_comp₂ _ _ A (ih₃ @ih₂) },
{ introsI ℰ _ F_obj F_hom ℱ _ G_obj G_hom ih₃ ih₄ ih₂,
exact is_limit₂ _ _ _ (λ X, ih₄ _ @ih₂) }
end
begin
introsI 𝒟 _ F_obj F_hom ih₁ cocone_obj cocone ih₂ Z g,
exact is_colimit₂ _ _ _ (λ A, ih₂ _ g)
end
end bicompletion_aux
|
lemma mem_interior_cball: "x \<in> interior S \<longleftrightarrow> (\<exists>e>0. cball x e \<subseteq> S)" |
Generation Next is friendly competition there is no need to be angry about anything that happens here. I'm simply trying to give artist exposure who may not had a chance to get on any platforms before win lose or draw life goes on tomorrow.
With that being said one of the contestants from yesterday's Generation NExt felt slighted, so there is a simple answer to that lets just run back both songs and see who can win tonight.
Golden Child Vs Moka Blast lets see who can win it tonight and come back with Generation NExt next Monday. Vote now for your favorite song. |
Load LFindLoad.
From lfind Require Import LFind.
From QuickChick Require Import QuickChick.
From adtind Require Import goal4.
Derive Show for natural.
Derive Arbitrary for natural.
Instance Dec_Eq_natural : Dec_Eq natural.
Proof. dec_eq. Qed.
Derive Show for lst.
Derive Arbitrary for lst.
Instance Dec_Eq_lst : Dec_Eq lst.
Proof. dec_eq. Qed.
Lemma conj8synthconj6 : forall (lv0 : lst), (@eq natural (len lv0) (len lv0)).
Admitted.
QuickChick conj8synthconj6.
|
theory T163
imports Main
begin
lemma "(
(\<forall> x::nat. \<forall> y::nat. meet(x, y) = meet(y, x)) &
(\<forall> x::nat. \<forall> y::nat. join(x, y) = join(y, x)) &
(\<forall> x::nat. \<forall> y::nat. \<forall> z::nat. meet(x, meet(y, z)) = meet(meet(x, y), z)) &
(\<forall> x::nat. \<forall> y::nat. \<forall> z::nat. join(x, join(y, z)) = join(join(x, y), z)) &
(\<forall> x::nat. \<forall> y::nat. meet(x, join(x, y)) = x) &
(\<forall> x::nat. \<forall> y::nat. join(x, meet(x, y)) = x) &
(\<forall> x::nat. \<forall> y::nat. \<forall> z::nat. mult(x, join(y, z)) = join(mult(x, y), mult(x, z))) &
(\<forall> x::nat. \<forall> y::nat. \<forall> z::nat. mult(join(x, y), z) = join(mult(x, z), mult(y, z))) &
(\<forall> x::nat. \<forall> y::nat. \<forall> z::nat. meet(x, over(join(mult(x, y), z), y)) = x) &
(\<forall> x::nat. \<forall> y::nat. \<forall> z::nat. meet(y, undr(x, join(mult(x, y), z))) = y) &
(\<forall> x::nat. \<forall> y::nat. \<forall> z::nat. join(mult(over(x, y), y), x) = x) &
(\<forall> x::nat. \<forall> y::nat. \<forall> z::nat. join(mult(y, undr(y, x)), x) = x) &
(\<forall> x::nat. \<forall> y::nat. \<forall> z::nat. mult(x, meet(y, z)) = meet(mult(x, y), mult(x, z))) &
(\<forall> x::nat. \<forall> y::nat. \<forall> z::nat. over(x, meet(y, z)) = join(over(x, y), over(x, z))) &
(\<forall> x::nat. \<forall> y::nat. invo(join(x, y)) = meet(invo(x), invo(y))) &
(\<forall> x::nat. \<forall> y::nat. invo(meet(x, y)) = join(invo(x), invo(y))) &
(\<forall> x::nat. invo(invo(x)) = x)
) \<longrightarrow>
(\<forall> x::nat. \<forall> y::nat. \<forall> z::nat. undr(meet(x, y), z) = join(undr(x, z), undr(y, z)))
"
nitpick[card nat=4,timeout=86400]
oops
end |
[STATEMENT]
lemma removeProcs_length:
"removeProcs ps = (dead,sd,ps',dcs) \<Longrightarrow> length ps' \<le> length ps"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. removeProcs ps = (dead, sd, ps', dcs) \<Longrightarrow> length ps' \<le> length ps
[PROOF STEP]
using removeProcs_length'
[PROOF STATE]
proof (prove)
using this:
length (fst (snd (snd (removeProcs ?ps)))) \<le> length ?ps
goal (1 subgoal):
1. removeProcs ps = (dead, sd, ps', dcs) \<Longrightarrow> length ps' \<le> length ps
[PROOF STEP]
by (metis fst_conv snd_conv) |
# Exercise 2b: Modeling steady-state heat flow in the lithosphere
Elco Luijendijk
November 2019
<[email protected]>
## Objectives
* Learn to model heat flow in the crust and lithosphere using Python
* Learn what typical geothermal gradients in the lithosphere look like and how they are affected by thermal parameters and the thickness of the lithosphere
**Deadline**: **17 Jan 2020**. Hand in a version of your jupyter notebook and a short word document with answers to the assignments and the result figures of your numerical model.
**Grading**: Each assignment is 1 point, for a total of 5 points.
Do not hesitate to ask questions if you get stuck anywhere. You can reach me by email <[email protected]> or pass by at my office, room 122 in the Structural Geology dept.
*Good luck !*
## Introduction
In this exercise we will update the notebook that you completed in exercise 2a to model steady-state heat flow instead of groundwater flow. Heat conduction and groundwater flow are governed by very similar diffusion laws. We will adjust the parameters and boundary conditions of your groundwater model to instead calculate steady-state geothermal gradients in the lithosphere.
## Rewrite your notebook to model heat flow
The heat flow equation that we will use in this exercise is a combination of Fouriers's law:
\begin{equation}
q = - K \dfrac{\partial T}{\partial x}
\end{equation}
and the heat balance equation:
\begin{equation}
\dfrac{\partial q}{\partial x} = W
\end{equation}
Note the similarity of Fourier's law and Darcy's law. Instead of hydraulic head (*h*) we use temperature (*T*), and instead of hydraulic conductivity we use thermal conductivity (both denoted by *K*). The source term in this case is not recharge or groundwater pumping, but the generation of heat by the decay of radioactive elements.
Copy the fast version of the diffusion function (``solve_steady_state_diffusion_eq_faster``) from your notebook of exercise 2a into this notebook below.
Change the function to model heat flow model by changing all the variables called ``h``, ``h_new``, ``h_old`` to ``T``, ``T_new``, ``T_old``.
```python
# importing external python modules
# numpy for working with arrays:
import numpy as np
# and matplotlib to make nice looking figures
import matplotlib.pyplot as pl
```
```python
# copy your finite difference diffusion function here...
```
**Assignment 1** Write down the finite difference approximation of the steady-state heat flow equation. This is the heat flow version of equation 10 in your handout of exercise 1. What units do heat flow (*q*) and heat production (*W*) have?
## Assigning new parameters
Next we have to assign new parameter values to our notebook. We will model the geothermal gradient in the lithosphere. We will increase the size of our model (variable ``L``) to a 100 km, which is a value representative of the thickness of the continental lithosphere. To change this, copy the code block where you assign parameters in exercise 2a below and change the line assigning ``L`` in your notebook like this:
~~~~python
L = 100000
~~~~
For thermal conductivity we will use an initial value of 2.5 W m^-1 K^-1. Our model will be one dimensional, so the thickness parameter ``b`` should be equal to 1. And for the grid cell size of our numerical model we will use a value of 200 m (``dx = 200``). For now we will leave the source term zero, so ``W_array[:] = 0``.
```python
# copy the code blocks where you assign parameters here....
```
## Adjust the boundary conditions
In contrast to the groundwater model, the right hand side / bottom of our model domain is not insulated. Instead there is a heat flux to the lithosphere from the astenosphere. The base of the lithosphere is usually defined as a thermal boundary, and this boundary is commonly assumed to be a temperature of 1300 °C. This means that we can define the right hand boundary condition as a specified temperature of 1300 °C. Change the following line in the ``solve_steady_state_diffusion_eq_faster`` function:
~~~~python
T_new[-1] = ...
~~~~
to
~~~~python
T_new[-1] = 1300.0
~~~~
This makes sure that the last node in our model always has a temperature of 1300 °C. For the top boundary we can use an average global surface temperature of 10 °C.
## Calculate a steady-state geothermal gradient
With the newly assigned variables and boundary condition we are ready to run our numerical model and calculate an average steady-state geothermal gradient in the lithosphere.
**Assignment 2** Copy the line where you run the diffusion function (``h = solve_steady....``) from exercise 2a. Run your new heat flow model. Try to experiment with the number of iterations. What number of iterations do you need (approximately) to reach a steady-state geothermal gradient?
```python
# copy the line that calls the diffusion function here:
```
## Adding realistic thermal parameters
In exercise 1 we have already have set up an array for the source term (``W``), which we can now use to vary heat production in the lithosphere. As you have learned in the Python tutorial you can perform operations like assigning numbers to sections of arrays like this:
~~~~python
W_array[10:30] = 1e-6
~~~~
This assign a heat production of 1 x 10-6^ W m^-3 from node 10 to 30. Note: this is an example, do not insert this line into your script yet. There is an even more convenient way to assign values based on their depth. We can also assign values to parts of arrays like this:
~~~~python
W_array[x < 10000] = 1e-6
~~~~
This will change the heat flow values in the upper 10000 m of you model domain. For example, to assign numbers to a section running from 10000 to 20000 m, try the following:
~~~~python
W_array[(x >= 10000) & (x < 20000)] = 1e-6
~~~~
Note that the sign ``>=`` means larger than or equal to a number.
Look up heat production for the upper crust, lower crust and the mantle from Cloetingh et al. (2005), Table 1. Use the reference at the bottom of this notebook and look up the paper on google scholar. Follow the examples above to assign heat production values for these three sections of the lithosphere. For the depth of the upper crust we can use a value of 12 km and for the depth of the lower crust 35 km. Try to place the new lines of code where you define heat production *after* the line where you calculate the first steady-state geothermal gradient (ie., after first call of the ``solve_steady_state_diffusion_eq function``). Next, add a line to calculate the new geothermal gradient including heat production:
~~~~python
T2 = solve_steady_state_diffusion_eq_faster(dx, K, W_array, T0)
~~~~
This makes sure that you store the new more exact geothermal gradient in a new variable ``T2``, which you can then compare to the old geothermal gradient ``T``.
Note that thermal conductivity also varies in the crust and in the mantle. However, the solution we derived for steady-state groundwater flow and heat flow equations assumes that *K* is constant, since we moved *K* out of the derivative. See the handout of exercise 1, equations 5 and 6. Therefore with our current simplified model we cannot model a variable thermal conductivity.
**Assignment 3** Run your new model with improved thermal parameters. Plot both the old and new temperature curve in the same panel. Try to explain the shape of the new geothermal gradient in a few words or sentences, why is there a curvature?
## Calculate heat flow
We can use Fourier's law to calculate heat flow, using the values of the geothermal gradient that you just calculated. Use the following lines to calculate the gradient and the heat flow in the lithosphere:
```python
T_gradient = (T2[1:] - T2[:-1]) / dx
q = T_gradient * K
```
## Add temperature and heat flow to a figure
We will try to make a figure that shows both the change in temperature and heat flow in the lithosphere. First copy the code block that generates a figure from exercise 2a below:
```python
# copy code block that makes a figure here:
```
First try to change the command where you plot temperature so that temperature is shown on the x-axis and depth (x) on the y-axis, instead of the other way around.
Next, we will try to set up a figure with two panels, one for temperature and one for heat flow. Add one more panel to the figure to show temperature and heat flow side by side. The following line creates a new figure containing two panels side by side.
~~~~python
fig, panels = pl.subplots(1, 2, figsize=(10, 6))
~~~~
*Replace* the current line that creates the new figure with this new line. The variable ``panels`` is now a list containing two panels. Each panel can be called using either ``panels[0]`` or ``panels[1]``.
Replace all existing lines of code where you use ``panel.`` with ``panels[0].``. Then add some lines to plot heat flow in the second panel (``panels[1]``):
~~~~python
x_mid = (x[1:] + x[:-1]) / 2.0
panels[1].plot(q, x_mid, color='black')
~~~~
we can also flip the y-axis to make sure the surface (x=0) is at the top of the figure and not the bottom:
~~~~python
panels[0].set_ylim(100000, 0)
panels[1].set_ylim(100000, 0)
~~~~
**Assignment 4** Calculate heat flow in the lithosphere and make a figure of temperature and heat flow. What percentage of the heat flow at the surface is supplied by the mantle? (hint: compare *q* at x=0 with *q* at x=mantle depth).
Note: make sure that your model has reached steady state, you may need to run significantly more iterations once you add heat production.
## Heat flow in different geological settings
The thickness of the lithosphere exerts a strong influence on geothermal gradients, because it is a thermal boundary. We will calculate representative geothermal gradients for three geological settings with a different lithosperic thickness: 1) a craton, 2) average continental crust and 3) oceanic crust and pick a fourth location that your are interested in (the Goettingen campus, mount everest, antarctica, etc...).
Pick each of these settings and look up values for the lithosphere thickness for a location of choice. For oceanic lithosphere look up thickness in Conrad et al. (2006). Refer to Artemieva et al. (2006) for estimates of the thickness of the continental lithosphere. Estimate the thickness of the crust using Reguzzoni et al. (2013), Fig. 12. You can find these references below.
The thermal parameters of oceanic and continental lithosphere also vary. For the continental lithosphere you can use the parameters that you have already assigned. The heat generation in the oceanic lithosphere is relatively low, on average 0.5 x 10^-6 W m^-3 (Allen & Allen 2005, note: this reference is not available online). The average thermal conductivity (*K*) of the oceanic lithosphere is 3.14 W m^-1 K^-1 (Stein 1992).
**Assignment 5** Model heat flow for the four different geological settings, make a figure of temperature and heat flow and compare the modeled heat flow with a database of global heat flow (Davies & Davies 2010): http://www.heatflow.und.edu/. Describe in words why the heat flow and geothermal gradients vary between the different settings, and why the models match the global heat flow data or why there is a difference.
## References
Allen, P.A., Allen, J.R., 2005. Basin analysis: principles and applications. Blackwell publishing, Oxford.
Artemieva, I.M., 2006. Global thermal model TC1 for the continental lithosphere: Implications for lithosphere secular evolution. Tectonophysics 416, 245–277.
Cloetingh, S.A.P.L., Ziegler, P.A., Beekman, F., Andriessen, P.A.M., Hardebol, N., Dèzes, P., 2005. Intraplate deformation and 3D rheological structure of the Rhine Rift System and adjacent areas of the northern Alpine foreland. Int. J. Earth Sci. 94, 758–778.
Conrad, C.P., Lithgow-Bertelloni, C., 2006. Influence of continental roots and asthenosphere on plate-mantle coupling. Geophys. Res. Lett. 33, 2–5. doi:10.1029/2005GL025621
Davies, J.H., Davies, D.R., 2010. Earth’s surface heat flux. Solid Earth 1, 5–24. doi:10.5194/se-1-5-2010
Reguzzoni, M., Sampietro, D., Sansò, F., 2013. Global moho from the combination of the CRUST2.0 model and GOCE data. Geophys. J. Int. 195, 222–237. doi:10.1093/gji/ggt247
Stein, C.A., Stein, S., 1992. A model for the global variation in oceanic depth and heat flow with lithospheric age. Nature 356, 133–135. doi:10.1038/359123a0
(note use google scholar to find any of these papers, https://scholar.google.com/)
```python
```
|
As we previously reported here, Papua New Guinea (PNG) is working on plans to improve its ICT infrastructure. Furthermore, it is working on addressing challenges on connectivity. It hopes to use its position as the chair of APEC in 2018 to foster the agenda of digital inclusion. The APEC 2018 program is working under the theme of Harnessing inclusive opportunities, Embracing the Digital Future. The program started in March this year, and it aims at improving digital inclusion and connectivity. This will help narrow the digital gap present in the APEC community and promote social and economic growth.
David Toua, Chairman of the APEC Business Advisory, talked of ways through which PNG can use its membership in APEC to improve the role of technology. According to him, PNG can benefit from the huge economies of APEC by enhancing its development through adapting their technology in PNG’s markets. He added that PNG had taken a huge forward step in communications and it is now the time for internet and the rest of technology products. The move will be of much help for the country’s accessibility, reputation. Moreover, it will give an avenue for small and medium sizes businesses.
Senior officials will hold discussions that focus on how to reach goals put in place under APEC Internet and Digital Economy Roadmap. The roadmap was drafted back in 2017, and it has three key pillars. The pillars include accessing universal broadband, developing digital infrastructure. It also involves developing government policy framework for digital and internet economy. It is important especially to improve digital infrastructure in PNG. This will help to attain the objectives set out in the digital roadmap. The government took an important step in late 2017 by signing a deal with its fellow APEC member Australia. The deal was to oversee the construction of an underwater high-speed telecoms cable. The cable will be between Australia and Port Moresby. The 3000km cable cost is worth almost $100 million. The Australian government will fund the larger part of it. Moreover, its construction will kick-start before the end of this year.
Immediately the construction is over the cable will improve connection speed. Furthermore, it will enhance connection capacity for PNG’s homes and businesses. A lot of the PNG’s existing international digital pillar is under-utilized or aging. There are two submarines in place by now these are PPC-1 and APNG-2. The two provide roughly 2.5Gbps of capacity to 7.6 million people. According to data from Lowy Institute an Australian think tank, the demand is expected to increase to more than 450Gbps by 2040. Despite the fact that bandwidth’s demand is on the rise, the whole internet penetration rate is still low. This is in comparison to the global average at almost 11.7% in 2016. Accessibility of mobile coverage has gone up from 3% of the population in 2006 to 80% as of 2006. But the rate of mobile penetration stood at 54.2% as of mid-2016. All this are despite the government’s reforms and the opening up of the telecommunication sector to international competition in 2007. The data are from the International Telecommunication Union of UN. The country is still recording the highest cost of broadband and mobile internet in the region despite the increase in uptake. The cost is because of its mountainous topography and its dispersed population.
The government is looking forward to improving domestic mobile telephone infrastructure. It is also looking forward to expanding access to 4G and 3G internet to tackle the challenges. Additionally, the National Information and Communication Technology Authority are planning to support the expansion of the high-speed broadband to selected villages. Ivan Pomelu, the chairperson of senior officials meeting at APEC 2018 said that smartphones offer an opportunity for broader engagement in the market economy more so by SMEs and women. He gave this statement at an APEC summit in March 2018. The data collection, warning and monitoring natural disasters and adverse weather conditions also possess strong digital potential.
The quench of improving capabilities came into focus after the 7.5 magnitude earthquake that struck PNG’s Hela Province on 26th February. The earthquake interfered with communication that affected relief and rescue operations. This made the collection of information related to casualties, damage and specific needs of affected communities much difficult. |
import .Cantor_Bendixson
variables {α : Type*}
open topological_space
open set
theorem image_Inter' {β γ: Type*} [inhabited γ] {f : α → β}
(hf : function.injective f) (s : γ → set α) :
f '' (⋂ i : γ, s i) = ⋂ i : γ, f '' (s i) :=
begin
apply subset_antisymm,
{ apply image_Inter_subset, },
intros y hy,
rw mem_Inter at hy,
rcases hy default with ⟨x,hx,rfl⟩,
refine ⟨x,_,rfl⟩,
rw mem_Inter,
intro i,
rcases hy i with ⟨z,hz1,hz2⟩,
convert hz1,
symmetry,
exact hf hz2,
end
instance polish_of_countable [h : countable α] [topological_space α] [discrete_topology α]
: polish_space α :=
begin
rw countable_iff_exists_injective at h,
cases h with f hf,
have : closed_embedding f, {
apply closed_embedding_of_continuous_injective_closed,
{ apply continuous_of_discrete_topology},
{ exact hf,},
intros t ht,
apply is_closed_discrete,
},
apply this.polish_space,
end
instance second_countable_of_polish [topological_space α] [polish_space α] :
second_countable_topology α :=
polish_space.second_countable _
section ctble_gen
variable [s : measurable_space α]
include s
open measurable_space
variable (α)
--Say a measurable space is countably generated if
--the sigma algebra can be generated by a countable set
class countable_generated : Prop :=
(is_generated_countable : ∃ b : set (set α), b.countable ∧ s = generate_from b)
lemma nat_gen [h : countable_generated α] : ∃ f : ℕ → (set α), s = generate_from (range f) :=
begin
obtain ⟨b,bct,hb⟩ := h.is_generated_countable,
let c := b ∪ {univ},
have cct : c.countable,
{ apply countable.union bct,
apply countable_singleton, },
have cnonempty : c.nonempty,
{ use univ,
right,
simp, },
obtain ⟨f,hf⟩ := (set.countable_iff_exists_surjective cnonempty).mp cct,
use (λ n, (f n).val),
rw hb,
apply le_antisymm, {
apply measurable_space.generate_from_mono,
intros s sb,
cases hf ⟨s, or.inl sb⟩ with n hn,
use n,
simp[hn],
},
apply generate_from_le,
rintros t ⟨n,hn⟩,
have : t ∈ c, {rw ← hn, exact (f n).property},
cases this with q q, {
exact measurable_set_generate_from q,
},
rw mem_singleton_iff at q,
rw q,
exact measurable_set.univ,
end
variable {α}
namespace countable_generated
--The borel sets are countably generated if a space is second countable
instance borel_countable_generated_of_second_countable [topological_space α] [borel_space α] [second_countable_topology α] : countable_generated α :=
begin
constructor,
have := topological_space.exists_countable_basis α,
rcases this with ⟨b,bct,bnontrivial,bbasis⟩,
use [b,bct],
borelize α,
apply bbasis.borel_eq_generate_from,
end
#check @measurable_set_generate_from
open_locale classical
--Any countably generated measurably space for which singeltons are measurable
--admits a measurable injection to the Cantor space ℕ → bool
--Note : bool is probably not the right type to use here.
variable (α)
theorem measurable_inj_cantor_of_countable_generated_of_singleton_measurable
[countable_generated α] [measurable_singleton_class α] :
∃ f : α → (ℕ → bool), measurable f ∧ function.injective f :=
begin
--obtain ⟨b,bct,hb⟩ := h.is_generated_countable,
obtain ⟨e,hb⟩ := nat_gen α,
let f : α → ℕ → bool := begin
intros x n,
exact (x ∈ e n),
end,
use f, split,
{ rw measurable_pi_iff,
intros n,
apply measurable_to_countable,
intros y,
cases f y n; rw hb; simp[preimage,f],
{ apply measurable_set.compl,
apply measurable_set_generate_from,
simp, },
apply measurable_set_generate_from,
simp, },
intros x y hxy,
--have := @measurable_set_eq _ _ _ y,
--let p : set α → Prop := λ C, C y → C x,
have : ∀ B : set α, measurable_set B → (x ∈ B ↔ y ∈ B),
{ intros B,
rw hb,
apply generate_from_induction (λ C, x ∈ C ↔ y ∈ C) (range e),
{ intros t ht,
obtain ⟨n, rfl⟩ := ht,
have : f x n = f y n := by rw hxy,
dsimp[f] at this,
simp only [bool.to_bool_eq] at this,
exact this, }, { tauto },
{ intros t ht,
tauto, },
intros t ht,
rw[mem_Union,mem_Union],
dsimp at ht,
split; rintros ⟨n,hn⟩; use n, { rwa ← ht n, }, rwa ht n, },
specialize this _ (@measurable_set_eq _ _ _ y),
dsimp at this,
rw this,
end
variable {α}
end countable_generated
end ctble_gen
section csb
variables {β : Type*}
variables [measurable_space α] [measurable_space β]
namespace measurable_embedding
open measurable_equiv
open measurable_space
--set_option pp.implicit true
open_locale classical
noncomputable
--If A is a measurable set in a measurable space α,
--Then α is measurably equivalent to A ⊕ Aᶜ via the natural map
def subtype_sum_compl {A : set α} (hA : measurable_set A) : A ⊕ (Aᶜ : set α) ≃ᵐ α :=
{ to_fun := sum.elim (λ x, x.val) (λ x, x.val),
inv_fun := begin
intros x,
by_cases x ∈ A, { left, exact ⟨x,h⟩, },
right, exact ⟨x,h⟩,
end,
left_inv := begin
intros x,
cases x; simp,
intros h,
apply x.property,
exact h,
end,
right_inv := begin
intros x,
by_cases x ∈ A; dsimp, {rw dif_pos h, simp,},
rw dif_neg h, simp,
end,
measurable_to_fun := begin
dsimp,
apply measurable.sum_elim; apply measurable_subtype_coe,
end,
measurable_inv_fun := begin
dsimp,
apply measurable.dite; measurability,
end }
noncomputable
--A measurable embedding induces a measurable equivalence between any subset of the domain
--and its image
--Essentially already in mathlib, but the hypothesis does not used the bundled name
--"measurable_embedding"
def set_image {f : α → β} (hf : measurable_embedding f) (A : set α) : A ≃ᵐ f '' A :=
begin
apply measurable_equiv.set.image, {exact hf.injective}, {exact hf.measurable},
exact hf.measurable_set_image',
end
noncomputable
--The Cantor-Schroeder-Bernstein Theorem for measurable spaces :
--If two spaces have measurable embeddings to eachother,
--then they are measurably equivalent.
def schroeder_bernstein {f : α → β} {g : β → α}
(hf : measurable_embedding f)(hg : measurable_embedding g) : (α ≃ᵐ β) :=
begin
let F : set α → set α := λ A, (g '' (f '' A)ᶜ)ᶜ,
--We follow the proof of CSB in mathlib, the crux of which is finding a fixed point of this F.
--However, we must find this fixed point manually instead of invoking Knaster-Tarski
--in order to make sure it is measurable.
suffices : Σ' A : set α, measurable_set A ∧ F A = A,
{ rcases this with ⟨A,Ameas,Afp⟩,
let B := f '' A,
have Bmeas : measurable_set B := by {rw hf.measurable_set_image, exact Ameas,},
apply (subtype_sum_compl Ameas).symm.trans,
apply measurable_equiv.trans _ (subtype_sum_compl Bmeas),
apply sum_congr,
{ apply hf.set_image, },
have : Aᶜ = g '' Bᶜ,
{ apply compl_injective,
rw ← Afp,
simp, },
rw this,
apply measurable_equiv.symm,
apply hg.set_image, },
have Fmono : ∀ {A B}, A ⊆ B → F A ⊆ F B,
{ intros A B hAB,
rw compl_subset_compl,
apply image_subset,
rw compl_subset_compl,
apply image_subset,
assumption, },
let X : ℕ → set α := begin
intro n,
induction n with n ih,
{ exact univ, },
exact F ih,
end,
use ⋂ n : ℕ, X n, split,
{ apply measurable_set.Inter,
intros n,
induction n with n ih,
{ exact measurable_set.univ, },
apply measurable_set.compl,
apply hg.measurable_set_image',
apply measurable_set.compl,
apply hf.measurable_set_image',
exact ih, },
apply subset_antisymm,
{ apply subset_Inter,
intros n,
cases n,
{ apply subset_univ, },
apply Fmono,
apply Inter_subset, },
rintros x hx ⟨y,hy,rfl⟩,
rw mem_Inter at hx,
apply hy,
rw image_Inter' hf.injective, swap, {apply_instance},
rw mem_Inter, intro n,
by_contradiction, --!
apply (hx n.succ),
exact ⟨y,h,rfl⟩,
end
end measurable_embedding
noncomputable
--The Cantor-Schroeder-Bernstein theorem for Polish spaces:
--If two Polish spaces admit Borel injections to eachother, they are Borel isomorphic.
def borel_schroeder_bernstein [topological_space α] [polish_space α] [borel_space α]
[topological_space β] [polish_space β] [borel_space β] {f : α → β} {g : β → α}
(fmeas : measurable f) (finj : function.injective f)
(gmeas : measurable g) (ginj : function.injective g) :
α ≃ᵐ β :=
begin
have hf' := fmeas.measurable_embedding finj,
have hg' := gmeas.measurable_embedding ginj,
exact hf'.schroeder_bernstein hg',
end
end csb
section borel_iso
variables { β : Type*}
variables [measurable_space α] [topological_space α] [borel_space α]
variables [measurable_space β] [topological_space β] [borel_space β]
--a continuous function is Borel
--version of continuous.borel_measurable which uses the borel_space class
theorem borel_of_continuous {f : α → β} (hf : continuous f) : measurable f :=
begin
borelize[α, β],
apply hf.borel_measurable,
end
variables [polish_space α] [polish_space β]
--The Borel Isomorphism Theorem : Every uncountable polish space is
--Borel isomorphic to the reals (or Cantor space)
lemma equiv_cantor_of_uncountable (ha : ¬ countable α) : nonempty (α ≃ᵐ (ℕ → bool)) :=
begin
obtain ⟨f,hf1,hf2⟩ :=
countable_generated.measurable_inj_cantor_of_countable_generated_of_singleton_measurable α,
obtain ⟨g,-,hg1,hg2⟩ := @cantor_of_closed_unc α _ _ univ (is_closed_univ) _, swap,
{ intro h,
apply ha,
have := h.to_subtype,
apply @countable.of_equiv _ _ this,
exact equiv.set.univ α, },
have := borel_schroeder_bernstein hf1 hf2 (borel_of_continuous hg1) hg2,
use this,
end
--The Borel Isomorphism Theorem : Any two uncountable Polish spaces are
--Borel isomorphic
theorem borel_equiv_of_uncountable (ha : ¬countable α) (hb : ¬countable β)
: nonempty (α ≃ᵐ β) :=
begin
have := (equiv_cantor_of_uncountable ha).some.trans (equiv_cantor_of_uncountable hb).some.symm,
use this,
end
--The Borel Isomorphism Theorem : Two Polish spaces are Borel isomorphic
--if and only if they have the same cardinality.
theorem borel_equiv_iff_equiv : nonempty (α ≃ᵐ β) ↔ nonempty (α ≃ β) :=
begin
split; rintros ⟨f⟩,
{ use f.to_equiv },
by_cases (countable α),
{ have : countable β := @countable.of_equiv _ _ h f,
use f; rintros s -; apply countable.measurable_set;
apply countable.mono (subset_univ _); apply @set.countable_univ _ _; assumption},
have : ¬ countable β,
{ intros H, apply h,
exact @countable.of_equiv _ _ H f.symm, },
apply borel_equiv_of_uncountable h this,
end
end borel_iso
|
Formal statement is: lemma (in order_topology) order_tendsto_iff: "(f \<longlongrightarrow> x) F \<longleftrightarrow> (\<forall>l<x. eventually (\<lambda>x. l < f x) F) \<and> (\<forall>u>x. eventually (\<lambda>x. f x < u) F)" Informal statement is: A function $f$ tends to $x$ in the order topology if and only if for every $l < x$ and every $u > x$, there exists a neighborhood of $x$ such that $l < f(y) < u$ for all $y$ in that neighborhood. |
theory IpAddresses
imports IP_Addresses.IP_Address_toString
IP_Addresses.CIDR_Split
"../Common/WordInterval_Lists"
begin
\<comment> \<open>Misc\<close>
(*we dont't have an empty ip space, but a space which only contains the 0 address. We will use the option type to denote the empty space in some functions.*)
lemma "ipset_from_cidr (ipv4addr_of_dotdecimal (0, 0, 0, 0)) 33 = {0}"
by(simp add: ipv4addr_of_dotdecimal.simps ipv4addr_of_nat_def ipset_from_cidr_large_pfxlen)
(*helper we use for spoofing protection specification*)
definition all_but_those_ips :: "('i::len word \<times> nat) list \<Rightarrow> ('i word \<times> nat) list" where
"all_but_those_ips cidrips = cidr_split (wordinterval_invert (l2wi (map ipcidr_to_interval cidrips)))"
lemma all_but_those_ips:
"ipcidr_union_set (set (all_but_those_ips cidrips)) =
UNIV - (\<Union> (ip,n) \<in> set cidrips. ipset_from_cidr ip n)"
apply(simp add: )
unfolding ipcidr_union_set_uncurry all_but_those_ips_def
apply(simp add: cidr_split_prefix)
apply(simp add: l2wi)
apply(simp add: ipcidr_to_interval_def)
using ipset_from_cidr_ipcidr_to_interval by blast
section\<open>IPv4 Addresses\<close>
subsection\<open>IPv4 Addresses in IPTables Notation (how we parse it)\<close>
context
notes [[typedef_overloaded]]
begin
datatype 'i ipt_iprange =
\<comment> \<open>Singleton IP Address\<close>
IpAddr "'i::len word"
\<comment> \<open>CIDR notation: addr/xx\<close>
| IpAddrNetmask "'i word" nat
\<comment> \<open>-m iprange --src-range a.b.c.d-e.f.g.h\<close>
| IpAddrRange "'i word" "'i word"
(*the range is inclusive*)
end
fun ipt_iprange_to_set :: "'i::len ipt_iprange \<Rightarrow> 'i word set" where
"ipt_iprange_to_set (IpAddrNetmask base m) = ipset_from_cidr base m" |
"ipt_iprange_to_set (IpAddr ip) = { ip }" |
"ipt_iprange_to_set (IpAddrRange ip1 ip2) = { ip1 .. ip2 }"
text\<open>@{term ipt_iprange_to_set} can only represent an empty set if it is an empty range.\<close>
lemma ipt_iprange_to_set_nonempty: "ipt_iprange_to_set ip = {} \<longleftrightarrow>
(\<exists>ip1 ip2. ip = IpAddrRange ip1 ip2 \<and> ip1 > ip2)"
apply(cases ip)
apply(simp; fail)
apply(simp add: ipset_from_cidr_alt bitmagic_zeroLast_leq_or1Last; fail)
apply(simp add:linorder_not_le; fail)
done
text\<open>maybe this is necessary as code equation?\<close>
lemma element_ipt_iprange_to_set[code_unfold]: "(addr::'i::len word) \<in> ipt_iprange_to_set X = (
case X of (IpAddrNetmask pre len) \<Rightarrow>
(pre AND ((mask len) << (len_of (TYPE('i)) - len))) \<le> addr \<and>
addr \<le> pre OR (mask (len_of (TYPE('i)) - len))
| IpAddr ip \<Rightarrow> (addr = ip)
| IpAddrRange ip1 ip2 \<Rightarrow> ip1 \<le> addr \<and> ip2 \<ge> addr)"
apply(cases X)
apply(simp; fail)
apply(simp add: ipset_from_cidr_alt; fail)
apply(simp; fail)
done
lemma ipt_iprange_to_set_uncurry_IpAddrNetmask:
"ipt_iprange_to_set (uncurry IpAddrNetmask a) = uncurry ipset_from_cidr a"
by(simp split: uncurry_splits)
text\<open>IP address ranges to \<open>(start, end)\<close> notation\<close>
fun ipt_iprange_to_interval :: "'i::len ipt_iprange \<Rightarrow> ('i word \<times> 'i word)" where
"ipt_iprange_to_interval (IpAddr addr) = (addr, addr)" |
"ipt_iprange_to_interval (IpAddrNetmask pre len) = ipcidr_to_interval (pre, len)" |
"ipt_iprange_to_interval (IpAddrRange ip1 ip2) = (ip1, ip2)"
lemma ipt_iprange_to_interval: "ipt_iprange_to_interval ip = (s,e) \<Longrightarrow> {s .. e} = ipt_iprange_to_set ip"
apply(cases ip)
apply(auto simp add: ipcidr_to_interval)
done
text\<open>A list of IP address ranges to a @{typ "'i::len wordinterval"}.
The nice thing is: the usual set operations are defined on this type.
We can use the existing function @{const l2wi_intersect} if we want the intersection of the supplied list\<close>
lemma "wordinterval_to_set (l2wi_intersect (map ipt_iprange_to_interval ips)) =
(\<Inter> ip \<in> set ips. ipt_iprange_to_set ip)"
apply(simp add: l2wi_intersect)
using ipt_iprange_to_interval by blast
text\<open>We can use @{const l2wi} if we want the union of the supplied list\<close>
lemma "wordinterval_to_set (l2wi (map ipt_iprange_to_interval ips)) = (\<Union> ip \<in> set ips. ipt_iprange_to_set ip)"
apply(simp add: l2wi)
using ipt_iprange_to_interval by blast
text\<open>A list of (negated) IP address to a @{typ "'i::len wordinterval"}.\<close>
definition ipt_iprange_negation_type_to_br_intersect ::
"'i::len ipt_iprange negation_type list \<Rightarrow> 'i wordinterval" where
"ipt_iprange_negation_type_to_br_intersect l = l2wi_negation_type_intersect (NegPos_map ipt_iprange_to_interval l)"
lemma ipt_iprange_negation_type_to_br_intersect: "wordinterval_to_set (ipt_iprange_negation_type_to_br_intersect l) =
(\<Inter> ip \<in> set (getPos l). ipt_iprange_to_set ip) - (\<Union> ip \<in> set (getNeg l). ipt_iprange_to_set ip)"
apply(simp add: ipt_iprange_negation_type_to_br_intersect_def l2wi_negation_type_intersect NegPos_map_simps)
using ipt_iprange_to_interval by blast
text\<open>The @{typ "'i::len wordinterval"} can be translated back into a list of IP ranges.
If a list of intervals is enough, we can use @{const wi2l}.
If we need it in @{typ "'i::len ipt_iprange"}, we can use this function.\<close>
definition wi_2_cidr_ipt_iprange_list :: "'i::len wordinterval \<Rightarrow> 'i ipt_iprange list" where
"wi_2_cidr_ipt_iprange_list r = map (uncurry IpAddrNetmask) (cidr_split r)"
lemma wi_2_cidr_ipt_iprange_list:
"(\<Union> ip \<in> set (wi_2_cidr_ipt_iprange_list r). ipt_iprange_to_set ip) = wordinterval_to_set r"
proof -
have "(\<Union> ip \<in> set (wi_2_cidr_ipt_iprange_list r). ipt_iprange_to_set ip) =
(\<Union>x\<in>set (cidr_split r). uncurry ipset_from_cidr x)"
unfolding wi_2_cidr_ipt_iprange_list_def by force
thus ?thesis using cidr_split_prefix by metis
qed
text\<open>For example, this allows the following transformation\<close>
definition ipt_iprange_compress :: "'i::len ipt_iprange negation_type list \<Rightarrow> 'i ipt_iprange list" where
"ipt_iprange_compress = wi_2_cidr_ipt_iprange_list \<circ> ipt_iprange_negation_type_to_br_intersect"
lemma ipt_iprange_compress: "(\<Union> ip \<in> set (ipt_iprange_compress l). ipt_iprange_to_set ip) =
(\<Inter> ip \<in> set (getPos l). ipt_iprange_to_set ip) - (\<Union> ip \<in> set (getNeg l). ipt_iprange_to_set ip)"
by (metis wi_2_cidr_ipt_iprange_list comp_apply ipt_iprange_compress_def ipt_iprange_negation_type_to_br_intersect)
definition normalized_cidr_ip :: "'i::len ipt_iprange \<Rightarrow> bool" where
"normalized_cidr_ip ip \<equiv> case ip of IpAddrNetmask _ _ \<Rightarrow> True | _ \<Rightarrow> False"
lemma wi_2_cidr_ipt_iprange_list_normalized_IpAddrNetmask:
"\<forall>a'\<in>set (wi_2_cidr_ipt_iprange_list as). normalized_cidr_ip a'"
apply(clarify)
apply(simp add: wi_2_cidr_ipt_iprange_list_def normalized_cidr_ip_def)
by force
lemma ipt_iprange_compress_normalized_IpAddrNetmask:
"\<forall>a'\<in>set (ipt_iprange_compress as). normalized_cidr_ip a'"
by(simp add: ipt_iprange_compress_def wi_2_cidr_ipt_iprange_list_normalized_IpAddrNetmask)
definition ipt_iprange_to_cidr :: "'i::len ipt_iprange \<Rightarrow> ('i word \<times> nat) list" where
"ipt_iprange_to_cidr ips = cidr_split (iprange_interval (ipt_iprange_to_interval ips))"
lemma ipt_ipvange_to_cidr: "ipcidr_union_set (set (ipt_iprange_to_cidr ips)) = (ipt_iprange_to_set ips)"
apply(simp add: ipt_iprange_to_cidr_def)
apply(simp add: ipcidr_union_set_uncurry)
apply(case_tac "(ipt_iprange_to_interval ips)")
apply(simp add: ipt_iprange_to_interval cidr_split_prefix_single)
done
(* actually, these are toString pretty printing helpers*)
definition interval_to_wi_to_ipt_iprange :: "'i::len word \<Rightarrow> 'i word \<Rightarrow> 'i ipt_iprange" where
"interval_to_wi_to_ipt_iprange s e \<equiv>
if s = e
then IpAddr s
else case cidr_split (WordInterval s e) of [(ip,nmask)] \<Rightarrow> IpAddrNetmask ip nmask
| _ \<Rightarrow> IpAddrRange s e"
lemma interval_to_wi_to_ipt_ipv4range: "ipt_iprange_to_set (interval_to_wi_to_ipt_iprange s e) = {s..e}"
proof -
from cidr_split_prefix_single[of s e] have
"cidr_split (WordInterval s e) = [(a, b)] \<Longrightarrow> ipset_from_cidr a b = {s..e}" for a b
by(simp add: iprange_interval.simps)
thus ?thesis
by(simp add: interval_to_wi_to_ipt_iprange_def split: list.split)
qed
fun wi_to_ipt_iprange :: "'i::len wordinterval \<Rightarrow> 'i ipt_iprange list" where
"wi_to_ipt_iprange (WordInterval s e) = (if s > e then [] else
[interval_to_wi_to_ipt_iprange s e])" |
"wi_to_ipt_iprange (RangeUnion a b) = wi_to_ipt_iprange a @ wi_to_ipt_iprange b"
lemma wi_to_ipt_ipv4range: "\<Union>(set (map ipt_iprange_to_set (wi_to_ipt_iprange wi))) = wordinterval_to_set wi"
apply(induction wi)
apply(simp add: interval_to_wi_to_ipt_ipv4range)
apply(simp)
done
end
|
#! /usr/bin/julia
# Rosetta Code, Bitmap/Write a PPM file
type Color
r::Uint8
g::Uint8
b::Uint8
end
type Image
pic::Array{Color,2}
end
Image(w::Int, h::Int) = Image(Array(Color, w, h))
Base.fill!(a::Image, c::Color) = Base.fill!(a.pic, c)
function splat!(a::Image, x::Int, y::Int, c::Color)
a.pic[x, y] = c
nothing
end
function color(a::Image, x::Int, y::Int)
a.pic[x, y]
end
function showpixel(a::Image, x::Int, y::Int)
c = color(a, x, y)
hex(c.r, 2)*hex(c.g, 2)*hex(c.b, 2)
end
function writeppm(fn::String, a::Image)
outf = open(fn, "w")
(w, h) = size(a.pic)
write(outf, "P6\n")
write(outf, @sprintf "%d %d\n" w h)
write(outf, @sprintf "%d\n" 255)
for i in 1:h
for j in 1:w
c = color(a, j, i)
write(outf, c.r)
write(outf, c.g)
write(outf, c.b)
end
end
close(outf)
end
w = 500
h = 300
a = Image(w, h)
purple = Color(0xff, 0, 0xff)
green = Color(0, 0xff, 0)
white = Color(0xff, 0xff, 0xff)
fill!(a, green)
for i in 20:220, j in 10:100
splat!(a, i, j, purple)
end
for i in 180:400, j in 80:200
splat!(a, i, j, white)
end
fn = "bitmap_write.ppm"
writeppm(fn, a)
|
-- Shape.idr
--
-- Demonstrate union data types
module Shape
||| Represents shapes
public export
data Shape = ||| A triangle, with its base and height
Triangle Double Double
| ||| A rectangle, with length and height
Rectangle Double Double
| ||| A circle, with radius
Circle Double
%name Shape shape, shape1, shape2
||| Calculates area of given shape
export
area : Shape -> Double
area (Triangle base height) = 0.5 * base * height
area (Rectangle length height) = length * height
area (Circle radius) = pi * radius * radius
|
Formal statement is: lemma pseudo_mod_main_list: "snd (pseudo_divmod_main_list l q xs ys n) = pseudo_mod_main_list l xs ys n" Informal statement is: The second component of the result of pseudo_divmod_main_list is equal to the result of pseudo_mod_main_list. |
/-
Copyright (c) 2022 Markus Himmel. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Markus Himmel
-/
import data.opposite
import data.set.image
/-!
# The opposite of a set
> THIS FILE IS SYNCHRONIZED WITH MATHLIB4.
> Any changes to this file require a corresponding PR to mathlib4.
The opposite of a set `s` is simply the set obtained by taking the opposite of each member of `s`.
-/
variables {α : Type*}
open opposite
namespace set
/-- The opposite of a set `s` is the set obtained by taking the opposite of each member of `s`. -/
protected def op (s : set α) : set αᵒᵖ :=
unop ⁻¹' s
/-- The unop of a set `s` is the set obtained by taking the unop of each member of `s`. -/
protected def unop (s : set αᵒᵖ) : set α :=
op ⁻¹' s
@[simp] lemma mem_op {s : set α} {a : αᵒᵖ} : a ∈ s.op ↔ unop a ∈ s :=
iff.rfl
@[simp] lemma op_mem_op {s : set α} {a : α} : op a ∈ s.op ↔ a ∈ s :=
by rw [mem_op, unop_op]
@[simp] lemma mem_unop {s : set αᵒᵖ} {a : α} : a ∈ s.unop ↔ op a ∈ s :=
iff.rfl
@[simp] lemma unop_mem_unop {s : set αᵒᵖ} {a : αᵒᵖ} : unop a ∈ s.unop ↔ a ∈ s :=
by rw [mem_unop, op_unop]
@[simp] lemma op_unop (s : set α) : s.op.unop = s :=
ext (by simp only [mem_unop, op_mem_op, iff_self, implies_true_iff])
@[simp] lemma unop_op (s : set αᵒᵖ) : s.unop.op = s :=
ext (by simp only [mem_op, unop_mem_unop, iff_self, implies_true_iff])
/-- The members of the opposite of a set are in bijection with the members of the set itself. -/
@[simps] def op_equiv_self (s : set α) : s.op ≃ s :=
⟨λ x, ⟨unop x, x.2⟩, λ x, ⟨op x, x.2⟩, λ x, by simp, λ x, by simp⟩
/-- Taking opposites as an equivalence of powersets. -/
@[simps] def op_equiv : set α ≃ set αᵒᵖ :=
⟨set.op, set.unop, op_unop, unop_op⟩
@[simp] lemma singleton_op (x : α) : ({x} : set α).op = {op x} :=
ext $ λ y, by simpa only [mem_op, mem_singleton_iff] using unop_eq_iff_eq_op
@[simp] lemma singleton_unop (x : αᵒᵖ) : ({x} : set αᵒᵖ).unop = {unop x} :=
ext $ λ y, by simpa only [mem_unop, mem_singleton_iff] using op_eq_iff_eq_unop
@[simp] lemma singleton_op_unop (x : α) : ({op x} : set αᵒᵖ).unop = {x} :=
by simp only [singleton_unop, opposite.unop_op]
@[simp] lemma singleton_unop_op (x : αᵒᵖ) : ({unop x} : set α).op = {x} :=
by simp only [singleton_op, opposite.op_unop]
end set
|
State Before: α : Type u_1
β : Type ?u.93086
γ : Type ?u.93089
m : Multiset α
x : α
n : ℕ
⊢ m < replicate (n + 1) x ↔ m ≤ replicate n x State After: α : Type u_1
β : Type ?u.93086
γ : Type ?u.93089
m : Multiset α
x : α
n : ℕ
⊢ (∃ a, a ::ₘ m ≤ replicate (n + 1) x) ↔ m ≤ replicate n x Tactic: rw [lt_iff_cons_le] State Before: α : Type u_1
β : Type ?u.93086
γ : Type ?u.93089
m : Multiset α
x : α
n : ℕ
⊢ (∃ a, a ::ₘ m ≤ replicate (n + 1) x) ↔ m ≤ replicate n x State After: case mp
α : Type u_1
β : Type ?u.93086
γ : Type ?u.93089
m : Multiset α
x : α
n : ℕ
⊢ (∃ a, a ::ₘ m ≤ replicate (n + 1) x) → m ≤ replicate n x
case mpr
α : Type u_1
β : Type ?u.93086
γ : Type ?u.93089
m : Multiset α
x : α
n : ℕ
⊢ m ≤ replicate n x → ∃ a, a ::ₘ m ≤ replicate (n + 1) x Tactic: constructor State Before: case mp
α : Type u_1
β : Type ?u.93086
γ : Type ?u.93089
m : Multiset α
x : α
n : ℕ
⊢ (∃ a, a ::ₘ m ≤ replicate (n + 1) x) → m ≤ replicate n x State After: case mp.intro
α : Type u_1
β : Type ?u.93086
γ : Type ?u.93089
m : Multiset α
x : α
n : ℕ
x' : α
hx' : x' ::ₘ m ≤ replicate (n + 1) x
⊢ m ≤ replicate n x Tactic: rintro ⟨x', hx'⟩ State Before: case mp.intro
α : Type u_1
β : Type ?u.93086
γ : Type ?u.93089
m : Multiset α
x : α
n : ℕ
x' : α
hx' : x' ::ₘ m ≤ replicate (n + 1) x
⊢ m ≤ replicate n x State After: case mp.intro
α : Type u_1
β : Type ?u.93086
γ : Type ?u.93089
m : Multiset α
x : α
n : ℕ
x' : α
hx' : x' ::ₘ m ≤ replicate (n + 1) x
this : x' = x
⊢ m ≤ replicate n x Tactic: have := eq_of_mem_replicate (mem_of_le hx' (mem_cons_self _ _)) State Before: case mp.intro
α : Type u_1
β : Type ?u.93086
γ : Type ?u.93089
m : Multiset α
x : α
n : ℕ
x' : α
hx' : x' ::ₘ m ≤ replicate (n + 1) x
this : x' = x
⊢ m ≤ replicate n x State After: no goals Tactic: rwa [this, replicate_succ, cons_le_cons_iff] at hx' State Before: case mpr
α : Type u_1
β : Type ?u.93086
γ : Type ?u.93089
m : Multiset α
x : α
n : ℕ
⊢ m ≤ replicate n x → ∃ a, a ::ₘ m ≤ replicate (n + 1) x State After: case mpr
α : Type u_1
β : Type ?u.93086
γ : Type ?u.93089
m : Multiset α
x : α
n : ℕ
h : m ≤ replicate n x
⊢ ∃ a, a ::ₘ m ≤ replicate (n + 1) x Tactic: intro h State Before: case mpr
α : Type u_1
β : Type ?u.93086
γ : Type ?u.93089
m : Multiset α
x : α
n : ℕ
h : m ≤ replicate n x
⊢ ∃ a, a ::ₘ m ≤ replicate (n + 1) x State After: case mpr
α : Type u_1
β : Type ?u.93086
γ : Type ?u.93089
m : Multiset α
x : α
n : ℕ
h : m ≤ replicate n x
⊢ ∃ a, a ::ₘ m ≤ x ::ₘ replicate n x Tactic: rw [replicate_succ] State Before: case mpr
α : Type u_1
β : Type ?u.93086
γ : Type ?u.93089
m : Multiset α
x : α
n : ℕ
h : m ≤ replicate n x
⊢ ∃ a, a ::ₘ m ≤ x ::ₘ replicate n x State After: no goals Tactic: exact ⟨x, cons_le_cons _ h⟩ |
lemma simple_path_image_uncountable: fixes g :: "real \<Rightarrow> 'a::metric_space" assumes "simple_path g" shows "uncountable (path_image g)" |
from transformers import *
import torch
import torch.nn.functional as F
import numpy as np
from model import MemeDialoGPT
from dataset import get_data, build_input_from_segments
import copy
# from train import input_construct
SPECIAL_TOKENS = ['[BOS]', '[EOS]', '[speaker1]', '[speaker2]', '[IMG]', '[TAG]', '[PAD]']
SPECIAL_TOKENS_DICT = {'bos_token':'[BOS]', 'eos_token':'[EOS]', 'additional_special_tokens':['[speaker1]', '[speaker2]', '[IMG]', '[TAG]'], 'pad_token':'[PAD]'}
# top-k sampling
def sample_sequence(input_embs, token_type_ids, model, tokenizer, speaker_id, max_len=20):
temperature = 0.7
bos, eos, speaker1, speaker2, img, tag = tokenizer.convert_tokens_to_ids(SPECIAL_TOKENS[:-1])
res = []
for i in range(max_len):
logits, _ = model(input_embs, token_type_ids)
logits = logits[-1]/temperature
# print(logits.size())
logits = top_filtering(logits, top_k=0, top_p=0.9)
probs = F.softmax(logits, dim=-1)
next_word = torch.multinomial(probs, 1).item()
if next_word == eos or next_word == 2:
break
res.append(next_word)
token_type_ids = torch.cat((token_type_ids, torch.LongTensor([speaker_id])), 0)
word_emb = model.transformer.wte(torch.LongTensor([next_word]))
input_embs = torch.cat((input_embs, word_emb), 0)
#break
return res
# select top-k or top-p candidates
def top_filtering(logits, top_k=0, top_p=0.0, threshold=-float('Inf'), filter_value=-float('Inf')):
assert logits.dim()==1
top_k = min(top_k, logits.size(-1))
if top_k > 0:
idxs_to_remove = logits < torch.topk(logits, top_k)[0][..., -1, None]
logits[idxs_to_remove] = filter_value
if top_p > 0:
sorted_logits, sorted_idx = torch.sort(logits, descending=True)
cummulative_probs = torch.cumsum(F.softmax(sorted_logits, dim=-1), dim=-1)
sorted_idx_to_remove = cummulative_probs > top_p
sorted_idx_to_remove[..., 1:] = sorted_idx_to_remove[...,:-1].clone()
sorted_idx_to_remove[...,0] = 0
idxs_to_remove = sorted_idx[sorted_idx_to_remove]
logits[idxs_to_remove] = filter_value
idxs_to_remove = logits < threshold
logits[idxs_to_remove] = filter_value
# print(logits.size())
return logits
def generate_response(model, dialog_list, id2feature, tokenizer):
bos, eos, speaker1, speaker2, img, tag = tokenizer.convert_tokens_to_ids(SPECIAL_TOKENS[:-1])
with torch.no_grad():
for dialog in dialog_list:
history = copy.deepcopy(dialog['history'])
history_txt, history_img, token_type_ids, _ = build_input_from_segments(history, tokenizer, id2feature)
if token_type_ids[-1] == speaker1:
speaker_id = speaker2
else:
speaker_id = speaker1
history_txt += [speaker_id]
token_type_ids += [speaker_id]
if len(history_img)==0:
continue
print(tokenizer.convert_ids_to_tokens(history_txt))
history_txt = torch.LongTensor(history_txt)
history_img = torch.from_numpy(np.array(history_img)).float()
token_type_ids = torch.Tensor(token_type_ids).long()
# print(token_type_ids.size(), history_txt.size(), history_img.size())
history_txt_embs = model.transformer.wte(history_txt)
history_img_embs = model.img_ff(history_img)
input_embs = input_construct(history_txt_embs, history_img_embs, token_type_ids, tokenizer)
# print(input_embs.size())
res = sample_sequence(input_embs, token_type_ids, model, tokenizer, speaker_id)
print(tokenizer.convert_ids_to_tokens(res))
break
def input_construct(history_txt_embs, history_img_embs, token_type_ids, tokenizer):
bos, eos, speaker1, speaker2, img, tag = tokenizer.convert_tokens_to_ids(SPECIAL_TOKENS[:-1])
emb_length = token_type_ids.size(-1)
emb_dim = history_txt_embs.size(-1)
img_num = history_img_embs.size(0)
input_embs = torch.zeros((emb_length, emb_dim))
txt_idx = 0
img_idx = 0
left_idx = 0
right_idx = 0
while right_idx < emb_length:
#if right_idx == emb_length-1 and token_type_ids[right_idx] == img:
# break
if token_type_ids[right_idx] == img:
txt_length = right_idx - left_idx
input_embs[left_idx:right_idx, :] = history_txt_embs[txt_idx:txt_idx+txt_length, :]
txt_idx += txt_length
input_embs[right_idx,:] = history_img_embs[img_idx, :]
img_idx += 1
left_idx = right_idx + 1
right_idx += 1
txt_length = right_idx - left_idx
if txt_length > 0:
input_embs[left_idx:right_idx, :] = history_txt_embs[txt_idx:, :]
# img_feature = history_img_embs[img_idx,:]
return input_embs
if __name__ == '__main__':
ckpt_path = 'ckpt/mod_gpt'
tokenizer = BertTokenizer.from_pretrained(ckpt_path, do_lower_case=True)
model_config = GPT2Config.from_pretrained(ckpt_path)
model = MemeDialoGPT(model_config)
ckpt = torch.load('ckpt/mod_gpt/model.bin', map_location='cpu')
model.load_state_dict(ckpt['model'])
tokenizer.add_special_tokens(SPECIAL_TOKENS_DICT)
use_cuda = torch.cuda.is_available()
device = torch.device('cuda' if use_cuda else 'cpu')
model = model.to(device)
model.eval()
test_path = 'data/dialog/validation.json'
feature_path = 'data/meme/id2feature.json'
#test_data = json.load(open(test_path, 'r', encoding='utf-8'))
dialog_list, id2feature = get_data(tokenizer, test_path, feature_path)
# print(dialog_list[0])
generate_response(model, dialog_list, id2feature, tokenizer)
|
lemma cball_eq_sing: fixes x :: "'a::{metric_space,perfect_space}" shows "cball x e = {x} \<longleftrightarrow> e = 0" |
{-# OPTIONS --safe #-}
module Cubical.Algebra.OrderedCommMonoid.Instances where
open import Cubical.Foundations.Prelude
open import Cubical.Algebra.OrderedCommMonoid.Base
open import Cubical.Data.Nat
open import Cubical.Data.Nat.Order
ℕ≤+ : OrderedCommMonoid ℓ-zero ℓ-zero
ℕ≤+ .fst = ℕ
ℕ≤+ .snd .OrderedCommMonoidStr._≤_ = _≤_
ℕ≤+ .snd .OrderedCommMonoidStr._·_ = _+_
ℕ≤+ .snd .OrderedCommMonoidStr.ε = 0
ℕ≤+ .snd .OrderedCommMonoidStr.isOrderedCommMonoid =
makeIsOrderedCommMonoid
isSetℕ
+-assoc +-zero (λ _ → refl) +-comm
(λ _ _ → isProp≤) (λ _ → ≤-refl) (λ _ _ _ → ≤-trans) (λ _ _ → ≤-antisym)
(λ _ _ _ → ≤-+k) (λ _ _ _ → ≤-k+)
ℕ≤· : OrderedCommMonoid ℓ-zero ℓ-zero
ℕ≤· .fst = ℕ
ℕ≤· .snd .OrderedCommMonoidStr._≤_ = _≤_
ℕ≤· .snd .OrderedCommMonoidStr._·_ = _·_
ℕ≤· .snd .OrderedCommMonoidStr.ε = 1
ℕ≤· .snd .OrderedCommMonoidStr.isOrderedCommMonoid =
makeIsOrderedCommMonoid
isSetℕ
·-assoc ·-identityʳ ·-identityˡ ·-comm
(λ _ _ → isProp≤) (λ _ → ≤-refl) (λ _ _ _ → ≤-trans) (λ _ _ → ≤-antisym)
(λ _ _ _ → ≤-·k) lmono
where lmono : (x y z : ℕ) → x ≤ y → z · x ≤ z · y
lmono x y z x≤y = subst ((z · x) ≤_) (·-comm y z) (subst (_≤ (y · z)) (·-comm x z) (≤-·k x≤y))
|
(*
Copyright (C) 2017 M.A.L. Marques
This Source Code Form is subject to the terms of the Mozilla Public
License, v. 2.0. If a copy of the MPL was not distributed with this
file, You can obtain one at http://mozilla.org/MPL/2.0/.
*)
(* type: gga_exc *)
(* prefix:
gga_c_sogga11_params *params;
assert(p->params != NULL);
params = (gga_c_sogga11_params * )(p->params);
*)
$define lda_c_pw_params
$define lda_c_pw_modified_params
$include "lda_c_pw.mpl"
mbeta := 15.75592*0.004235: (* the usual value of 0.066726 *)
malpha := mbeta/(16*2^(2/3)):
sogga11_yy := (rs, z, xt) -> -malpha*mphi(z)*xt^2/(rs*f_pw(rs, z)):
sogga11_f0 := (rs, z, xt) -> 1 - 1/(1 + sogga11_yy(rs, z, xt)):
sogga11_f1 := (rs, z, xt) -> 1 - exp(-sogga11_yy(rs, z, xt)):
sogga11_t0 := (rs, z, xt) -> add(params_a_sogga11_a[i]*sogga11_f0(rs, z, xt)^(i-1), i=1..6):
sogga11_t1 := (rs, z, xt) -> add(params_a_sogga11_b[i]*sogga11_f1(rs, z, xt)^(i-1), i=1..6):
sogga11_f := (rs, z, xt, xs0, xs1) ->
f_pw(rs, z)*(sogga11_t0(rs, z, xt) + sogga11_t1(rs, z, xt)):
f := (rs, z, xt, xs0, xs1) -> sogga11_f(rs, z, xt, xs0, xs1):
|
If $f_n$ is a sequence of complex numbers such that $\|f_n\| < 1/n$ for all $n$, then $f_n \to 0$. |
`is_element/permutations` := (n::nonnegint) -> proc(s)
type(s,list(posint)) and
nops(s) = n and
min(op(s)) = 1 and
max(op(s)) = n and
nops({op(s)}) = n;
end:
`is_equal/permutations` := (n) -> (s,t) -> evalb(s = t);
`is_leq/permutations` := NULL;
`random_element/permutations` := (n::nonnegint) ->
combinat[randperm](n);
`list_elements/permutations` := (n::nonnegint) ->
combinat[permute](n);
`count_elements/permutations` := (n::nonnegint) -> n!;
`id/permutations` := proc(n::nonnegint) local i; [seq(i,i=1..n)]; end:
`o/permutations` := (n::nonnegint) -> proc()
local i;
apply_assoc((s,t) -> [seq(s[t[i]],i=1..n)],
[seq(i,i=1..n)])(args);
end:
`inv/permutations` := (n::nonnegint) -> proc(s)
local i,t;
t := table();
for i from 1 to n do t[s[i]] := i; od;
return [seq(t[i],i=1..n)];
end:
`length_set/permutations` := (n::nonnegint) -> proc(s)
local L,i,j;
L := [seq(seq([i,j],j=i+1..n),i=1..n-1)];
L := select(ij -> s[ij[1]] > s[ij[2]],L);
return L;
end:
`unordered_length_set/permutations` := (n::nonnegint) -> (s) ->
map(u -> {op(u)},{op(`length_set/permutations`(n)(s))});
`length/permutations` := (n::nonnegint) -> proc(s)
nops(`length_set/permutations`(n)(s));
end:
`sgn/permutations` := (n::nonnegint) -> proc(s)
local i,j;
return signum(mul(mul(s[j]-s[i],j=i+1..n),i=1..n-1));
end:
`is_even/permutations` := (n::nonnegint) -> (s) ->
evalb(`sgn/permutations`(n)(s) = 1);
`is_odd/permutations` := (n::nonnegint) -> (s) ->
evalb(`sgn/permutations`(n)(s) = -1);
`switch/permutations` := (n::nonnegint) -> proc(i::posint)
local j;
if i >= n then error("Should have 1 <= i < n"); fi;
return [seq(j,j=1..i-1),i+1,i,seq(j,j=i+2..n)];
end:
`t/permutations` := (n::nonnegint) -> proc(k::posint)
local i;
[seq(i,i=1..k-1),seq(i+1,i=k..n-1),k];
end:
`t_inv/permutations` := (n::nonnegint) -> proc(k::posint)
local i;
[seq(i,i=1..k-1),n,seq(i-1,i=k+1..n)];
end:
`t_word/permutations` := (n::nonnegint) -> proc(k::posint)
local i;
[seq(i,i=k..n-1)];
end:
`to_coxeter_word/permutations` := (n::nonnegint) -> proc(w)
local m,v;
if n <= 1 then return []; fi;
m := w[n];
v := `o/permutations`(n)(`t_inv/permutations`(n)(m),w);
v := [op(1..(n-1),v)];
return [op(`t_word/permutations`(n)(m)),
op(`to_coxeter_word/permutations`(n-1)(v))];
end:
`from_coxeter_word/permutations` := (n::nonnegint) -> proc(x)
`o/permutations`(n)(op(map(`switch/permutations`(n),x)));
end:
# Here are two kinds of Coxeter words that reduce to the identity.
# They are used in the proof of the completeness of the Coxeter relations.
`long_coxeter_rel0/permutations` := (n) -> proc(l)
local i;
[seq(i,i=l..n-1),seq(i,i=l..n-1),seq(i,i=n-2..l,-1),seq(i,i=n-1..l+1,-1)];
end:
`long_coxeter_rel1/permutations` := (n) -> proc(k,l)
local i;
`if`(k <= l,
[seq(i,i=k..n-1),seq(i,i=l..n-1),seq(i,i=n-2..k,-1),seq(i,i=n-1..l+1,-1)],
[seq(i,i=k..n-1),seq(i,i=l..n-1),seq(i,i=n-2..k-1,-1),seq(i,i=n-1..l,-1)]);
end:
`coxeter_reduce_once/permutations` := (n) -> proc(x)
local i,j,k,l,l0,l1,m,p,d,y,z,i0,p0,d0,z0,z1,z2;
# Move s[p] left of s[q] if p < q - 1
y := x;
m := nops(y);
z := [];
i := 1;
while i <= m do
j := i + 1;
while j <= m and y[j] < y[i] - 1 do
z := [op(z),y[j]];
j := j + 1;
od;
z := [op(z),y[i]];
i := j;
od;
y := z;
m := nops(y);
z := [];
i := 1;
j := 1;
while i <= m do
while (j < m and y[j+1] = y[i]) do j := j + 1; od;
if modp(j - i + 1,2) = 1 then z := [op(z),y[i]]; fi;
i := j + 1;
j := i;
od;
y := z;
m := nops(y);
d := table():
l := table():
i0 := 0;
l0 := 0;
for i from 1 to m do
if i < m and abs(y[i+1] - y[i]) = 1 then
d[i] := y[i+1] - y[i];
j := i + 1;
while j <= m and y[j] = y[i] + modp(j - i,2) * d[i] do j := j + 1; od;
l[i] := j - i;
else
d[i] := 0;
l[i] := 1;
fi;
if l[i] > l0 then
i0 := i;
l0 := l[i];
fi;
od:
if l0 < 3 then return y; fi;
z0 := [seq(y[i],i=1..i0-1)];
z2 := [seq(y[i],i=i0+l0..m)];
l1 := mods(l0,6);
p0 := y[i0];
d0 := d[i0];
if l1 = 0 then z1 := [];
elif l1 = 1 then z1 := [p0];
elif l1 = 2 then z1 := [p0,p0+d0];
elif l1 = 3 and d0 = 1 then z1 := [p0,p0+1,p0];
elif l1 = 3 and d0 = -1 then z1 := [p0-1,p0,p0-1];
elif l1 = -2 then z1 := [p0+d0,p0];
elif l1 = -1 then z1 := [p0+d0];
fi;
return [op(z0),op(z1),op(z2)];
end:
`coxeter_reduce/permutations` := (n) -> proc(w)
local u,v;
u := NULL;
v := w;
while u <> v do
u := v;
v := `coxeter_reduce_once/permutations`(n)(u);
od:
return u;
end: |
Formal statement is: lemma adjoint_works: fixes f :: "'n::euclidean_space \<Rightarrow> 'm::euclidean_space" assumes lf: "linear f" shows "x \<bullet> adjoint f y = f x \<bullet> y" Informal statement is: If $f$ is a linear map from $\mathbb{R}^n$ to $\mathbb{R}^m$, then the dot product of $x$ and the adjoint of $f$ applied to $y$ is equal to the dot product of $f(x)$ and $y$. |
using StatsModels,DataFrames,MLStyle
include("Chain.jl")
data = DataFrame(A=[1,2,3], B=[2,4,7],C=[1,3,2],D=[3,2,5])
function form(expr)
@match expr begin
:($var~$data-$drop)=>
let res=@chain propertynames(eval(data)).filter(i->i!=var&&i!=drop).join('+')
:(@formula($var~$(Meta.parse(res))))
end
:($var~$data)=>
let res=@chain propertynames(eval(data)).filter(i->i!=var).join('+')
:(@formula($var~$(Meta.parse(res))))
end
end
end
macro form(expr)
form(expr)|>esc
end
@info @form(A~data)==@formula(A~B+C+D)
@info @form(A~data-B)==@formula(A~C+D)
|
function [ft] = au2ft(au)
% Convert length from astronomical units to feet.
% Chad A. Greene 2012
ft = au*490806662372; |
lemma LIMSEQ_subseq_LIMSEQ: "X \<longlonglongrightarrow> L \<Longrightarrow> strict_mono f \<Longrightarrow> (X \<circ> f) \<longlonglongrightarrow> L" |
theory Deduction imports
Main Term
begin
(* 6. (a) *)
definition \<iota> :: string where
"\<iota> = ''intruder''"
definition intruder :: msg where
"intruder = Cons \<iota>"
(* Intruder deduction rules from Fig. 1 *)
inductive deduce :: "msg set \<Rightarrow> msg \<Rightarrow> bool" (infix "\<turnstile>" 72) where
Ax[intro]: "u \<in> T \<Longrightarrow> T \<turnstile> u"
| Proj1[intro]: "T \<turnstile> Pair u1 u2 \<Longrightarrow> T \<turnstile> u1"
| Proj2[intro]: "T \<turnstile> Pair u1 u2 \<Longrightarrow> T \<turnstile> u2"
| Hash[intro]: "T \<turnstile> u \<Longrightarrow> T \<turnstile> Hash u"
| Pair[intro]: "T \<turnstile> u1 \<Longrightarrow> T \<turnstile> u2 \<Longrightarrow> T \<turnstile> Pair u1 u2"
| Senc[intro]: "T \<turnstile> m \<Longrightarrow> T \<turnstile> k \<Longrightarrow> T \<turnstile> Sym_encrypt m k"
| Aenc[intro]: "T \<turnstile> m \<Longrightarrow> T \<turnstile> k \<Longrightarrow> T \<turnstile> Public_key_encrypt m k"
| Sig[intro]: "T \<turnstile> m \<Longrightarrow> T \<turnstile> Signature m intruder"
| Sdec[intro]: "T \<turnstile> Sym_encrypt m k \<Longrightarrow> T \<turnstile> k \<Longrightarrow> T \<turnstile> m"
| Adec[intro]: "T \<turnstile> Public_key_encrypt m intruder \<Longrightarrow> T \<turnstile> m"
(* examples from the project description *)
lemma "{Sym_encrypt m x, x} \<turnstile> m"
by auto
lemma "{Pair u1 u2} \<turnstile> Hash (Pair u2 u1)"
apply (rule Hash)
by (rule Pair) auto
lemma "{Sym_encrypt m k, Public_key_encrypt k intruder} \<turnstile> Pair m (Signature m intruder)"
apply (rule Pair)
apply (rule Sdec)
prefer 2 apply (rule Adec)
prefer 3 apply (rule Sig)
apply (rule Sdec)
prefer 2 apply (rule Adec)
by auto
(* 6. (b) *)
(* helper lemma for deduce_cut *)
lemma deduce_cut_aux:
assumes "T \<turnstile> u" and "H \<turnstile> t" and "T = insert t H"
shows "H \<turnstile> u"
using assms
by (induction rule: deduce.induct) auto
(* Lemma 4. (i) *)
lemma deduce_cut:
assumes "insert t H \<turnstile> u" and "H \<turnstile> t"
shows "H \<turnstile> u"
using assms deduce_cut_aux by blast
(* Lemma 4. (ii) *)
lemma deduce_weaken:
assumes "G \<turnstile> t" and "G \<subseteq> H"
shows "H \<turnstile> t"
using assms
by (induction rule: deduce.induct) auto
(* 7. (a) *)
(* Definition 1. *)
datatype constraint = Constraint "msg list" "msg list" "msg" ("((2_/|_)/\<triangleright>_)" [67,67,67]66)
type_synonym constraint_system = "constraint list"
(* free variables of a constraint *)
fun c_fv :: "constraint \<Rightarrow> string set" where
"c_fv (Constraint ms ms' msg) = \<Union>(m_fv ` (set ms \<union> set ms' \<union> {msg}))"
lemma "c_fv_finite": "finite (c_fv c)"
apply (cases c)
by (simp add: m_fv_finite)
(* c_sapply applies a message substitution to a constraint *)
fun c_sapply :: "m_subst \<Rightarrow> constraint \<Rightarrow> constraint" where
"c_sapply \<sigma> (Constraint ms ms' msg) = Constraint (map (m_sapply \<sigma>) ms) (map (m_sapply \<sigma>) ms') (m_sapply \<sigma> msg)"
(* applying identity substitution is just the identity on the constraints *)
lemma "c_sapply_id": "c_sapply Var = id"
apply (rule ext)
subgoal for c
apply (cases c)
using m_sapply_id
by simp
done
(* lifted Lemma 1 *)
lemma "c_fv_sapply_sdom_svran":
assumes "x \<in> c_fv (c_sapply \<sigma> c)"
shows "x \<in> (c_fv c - m_sdom \<sigma>) \<union> m_svran \<sigma>"
using assms m_fv_sapply_sdom_svran
apply (cases c)
by simp blast
(* c_derives checks if a constraint is satisfied w.r.t. intruder deduction rules *)
fun c_derives :: "constraint \<Rightarrow> bool" where
"c_derives (Constraint ms ms' msg) = (set ms \<union> set ms') \<turnstile> msg"
lemma "c_sapply_comp": "c_sapply \<tau> (c_sapply \<sigma> c) = c_sapply (\<tau> \<circ>m \<sigma>) c"
using m_sapply_comp
by (cases c) simp
(* free variables of a constraint system *)
definition cs_fv :: "constraint_system \<Rightarrow> string set" where
"cs_fv cs = \<Union>(c_fv ` set cs)"
lemma "cs_fv_finite": "finite (cs_fv cs)"
by (simp add: c_fv_finite cs_fv_def)
(* cs_sapply applies a message substitution to a constraint system *)
definition cs_sapply :: "m_subst \<Rightarrow> constraint_system \<Rightarrow> constraint_system" where
"cs_sapply \<sigma> cs = map (c_sapply \<sigma>) cs"
(* applying identity substitution is just the identity on the constraint systems *)
lemma "cs_sapply_id": "cs_sapply Var = id"
apply (rule ext)
subgoal for cs
unfolding cs_sapply_def
by (simp add: c_sapply_id)
done
(* lifted Lemma 1 *)
lemma "cs_fv_sapply_sdom_svran":
assumes "x \<in> cs_fv (cs_sapply \<sigma> cs)"
shows "x \<in> (cs_fv cs - m_sdom \<sigma>) \<union> m_svran \<sigma>"
proof -
obtain "c" where "x \<in> c_fv (c_sapply \<sigma> c)" and $: "c \<in> set cs"
using assms
unfolding cs_fv_def cs_sapply_def
by auto
then have "x \<in> (c_fv c - m_sdom \<sigma>) \<union> m_svran \<sigma>"
using c_fv_sapply_sdom_svran
by blast
then show ?thesis
using $ cs_fv_def by auto
qed
(* cs_derives checks if all constraints are satisfied w.r.t. intruder deduction rules *)
definition cs_derives :: "constraint_system \<Rightarrow> bool" where
"cs_derives cs = (\<forall>c \<in> set cs. c_derives c)"
(* 7. (b) *)
(* Definition 2 *)
type_synonym sol_set = "m_subst set"
definition sol :: "constraint_system \<Rightarrow> sol_set" where
"sol cs = {\<sigma> | \<sigma>. cs_derives (cs_sapply \<sigma> cs)}"
(* Lemma 5 *)
lemma "sol_cs_union": "sol (cs @ cs') = (sol cs) \<inter> (sol cs')"
unfolding sol_def cs_sapply_def cs_derives_def
by (rule set_eqI) auto
(* helper lemma for Lemma 6 *)
lemma "sol_c_sapply": "\<tau> \<in> sol [c_sapply \<sigma> c] \<Longrightarrow> \<tau> \<circ>m \<sigma> \<in> sol [c]"
unfolding sol_def cs_sapply_def cs_derives_def
by (simp add: c_sapply_comp)
(* Lemma 6 *)
lemma "sol_cs_sapply": "\<tau> \<in> sol (cs_sapply \<sigma> cs) \<Longrightarrow> \<tau> \<circ>m \<sigma> \<in> sol cs"
unfolding sol_def cs_sapply_def cs_derives_def
by (simp add: c_sapply_comp)
(* introduction rule for sol *)
lemma "sol_sapply": "(m_sapply \<tau> ` (set M \<union> set A) \<turnstile> m_sapply \<tau> t) = (\<tau> \<in> sol [M | A \<triangleright> t])"
unfolding sol_def cs_derives_def cs_sapply_def
apply (rule iffI)
apply auto
by (simp add: image_Un)+
(* dropping a constraint preserves a solution *)
lemma "sol_fst": "\<tau> \<in> sol [c1, c2] \<Longrightarrow> \<tau> \<in> sol [c1]"
using sol_cs_union
by fastforce
(* dropping a constraint preserves a solution *)
lemma "sol_snd": "\<tau> \<in> sol [c1, c2] \<Longrightarrow> \<tau> \<in> sol [c2]"
using sol_cs_union
by (metis (full_types) IntE append_Cons append_Nil)
(* 7. (c) *)
inductive rer1 :: "constraint \<Rightarrow> m_subst \<Rightarrow> constraint_system \<Rightarrow> bool" ("_/\<leadsto>\<^sub>1[_]/_" [64,64,64]63) where
Unif[intro]: "\<not>is_var t \<Longrightarrow> \<exists>u \<in> set M \<union> set A. m_unify [(t, u)] = Some \<sigma> \<Longrightarrow> rer1 (M | A \<triangleright> t) \<sigma> []"
| Comp_Hash[intro]: "rer1 (M | A \<triangleright> Hash t) Var [M | A \<triangleright> t]"
| Comp_Pair[intro]: "rer1 (M | A \<triangleright> Pair t1 t2) Var [M | A \<triangleright> t1, M | A \<triangleright> t2]"
| Comp_Sym_encrypt[intro]: "rer1 (M | A \<triangleright> Sym_encrypt m k) Var [M | A \<triangleright> m, M | A \<triangleright> k]"
| Comp_Public_key_encrypt[intro]: "rer1 (M | A \<triangleright> Public_key_encrypt m k) Var [M | A \<triangleright> m, M | A \<triangleright> k]"
| Comp_Signature[intro]: "rer1 (M | A \<triangleright> Signature t intruder) Var [M | A \<triangleright> t]"
| Proj[intro]: "Pair u v \<in> set M \<Longrightarrow> M' = removeAll (Pair u v) M \<Longrightarrow> rer1 (M | A \<triangleright> t) Var [(u # v # M') | (Pair u v # A) \<triangleright> t]"
| Sdec[intro]: "Sym_encrypt u k \<in> set M \<Longrightarrow> M' = removeAll (Sym_encrypt u k) M \<Longrightarrow> rer1 (M | A \<triangleright> t) Var [(u # M') | (Sym_encrypt u k # A) \<triangleright> t, M' | (Sym_encrypt u k # A) \<triangleright> k]"
| Adec[intro]: "Public_key_encrypt u intruder \<in> set M \<Longrightarrow> M' = removeAll (Public_key_encrypt u intruder) M \<Longrightarrow> rer1 (M | A \<triangleright> t) Var [(u # M') | (Public_key_encrypt u intruder # A) \<triangleright> t]"
| Ksub[intro]: "Public_key_encrypt u (Var x) \<in> set M \<Longrightarrow> \<sigma> = Var(x := intruder) \<Longrightarrow> rer1 (M | A \<triangleright> t) \<sigma> [c_sapply \<sigma> (M | A \<triangleright> t)]"
inductive rer :: "constraint_system \<Rightarrow> m_subst \<Rightarrow> constraint_system \<Rightarrow> bool" ("_/\<leadsto>[_]/_" [73,73,73]72) where
Context: "rer1 c \<sigma> cs \<Longrightarrow> rer (cs' @ (c # cs'')) \<sigma> (cs_sapply \<sigma> cs' @ cs @ cs_sapply \<sigma> cs'')"
inductive rer_star :: "constraint_system \<Rightarrow> m_subst \<Rightarrow> constraint_system \<Rightarrow> bool" ("_/\<leadsto>*[_]/_" [73,73,73]72) where
Refl: "rer_star cs Var cs"
| Trans: "rer cs \<sigma> cs' \<Longrightarrow> rer_star cs' \<tau> cs'' \<Longrightarrow> rer_star cs (\<tau> \<circ>m \<sigma>) cs''"
(* 7. (d) *)
fun c_simple :: "constraint \<Rightarrow> bool" where
"c_simple (M | A \<triangleright> (Var _)) = True"
| "c_simple _ = False"
definition cs_simple :: "constraint_system \<Rightarrow> bool" where
"cs_simple cs = (\<forall>c \<in> set cs. c_simple c)"
(* Definition 3 *)
definition red :: "constraint_system \<Rightarrow> m_subst set" where
"red cs = {m_scomp \<tau> \<sigma> | \<tau> \<sigma>. \<exists>cs'. rer_star cs \<sigma> cs' \<and> cs_simple cs' \<and> \<tau> \<in> sol cs'}"
(* 8. (a) *)
(* the intruder does not change under any message substitution *)
lemma "m_subst_intruder": "m_sapply \<tau> intruder = intruder"
unfolding intruder_def
by simp
(* Lemma 7 *)
lemma "rer1_sound": "rer1 c \<sigma> cs \<Longrightarrow> \<tau> \<in> sol cs \<Longrightarrow> \<tau> \<circ>m \<sigma> \<in> sol [c]"
proof (induction rule: rer1.induct)
case (Unif t M A \<sigma>)
then obtain "u" where u_in_M_A: "u \<in> set M \<union> set A" and "m_unify [(t, u)] = Some \<sigma>" by auto
then have "m_sapply \<sigma> t = m_sapply \<sigma> u" using m_soundness1 m_unifiess.cases m_unifies.cases by blast
then have "m_sapply (\<tau> \<circ>m \<sigma>) t = m_sapply (\<tau> \<circ>m \<sigma>) u" using m_sapply_comp by metis
then have "m_sapply (\<tau> \<circ>m \<sigma>) ` {u} \<turnstile> m_sapply (\<tau> \<circ>m \<sigma>) t" using Ax by simp
then have "m_sapply (\<tau> \<circ>m \<sigma>) ` (set M \<union> set A) \<turnstile> m_sapply (\<tau> \<circ>m \<sigma>) t" using u_in_M_A deduce_weaken by auto
then show ?case unfolding sol_def cs_derives_def cs_sapply_def by (simp add: image_Un)
next
case (Comp_Hash M A t)
then show ?case
unfolding sol_def cs_derives_def cs_sapply_def
by auto
next
case (Comp_Pair M A t1 t2)
then show ?case
unfolding sol_def cs_derives_def cs_sapply_def
by auto
next
case (Comp_Sym_encrypt M A m k)
then show ?case
unfolding sol_def cs_derives_def cs_sapply_def
by auto
next
case (Comp_Public_key_encrypt M A m k)
then show ?case
unfolding sol_def cs_derives_def cs_sapply_def
by auto
next
case (Comp_Signature M A t)
then show ?case
unfolding sol_def cs_derives_def cs_sapply_def
using m_subst_intruder
by auto
next
case (Proj u v M M' A t)
then have "rem": "set M' \<union> set (Pair u v # A) = set M \<union> set A"
by auto
have "tau_t": "m_sapply \<tau> ` (set (u # v # M') \<union> set (Pair u v # A)) \<turnstile> m_sapply \<tau> t"
using Proj.prems sol_sapply by blast
have "m_sapply \<tau> ` (set (v # M') \<union> set (Pair u v # A)) \<turnstile> m_sapply \<tau> u"
by auto
then have "tau_t'": "m_sapply \<tau> ` (set (v # M') \<union> set (Pair u v # A)) \<turnstile> m_sapply \<tau> t"
by (metis Un_insert_left deduce_cut_aux image_insert list.simps(15) tau_t)
have "m_sapply \<tau> ` (set M' \<union> set (Pair u v # A)) \<turnstile> m_sapply \<tau> v"
by auto
then have "m_sapply \<tau> ` (set M' \<union> set (Pair u v # A)) \<turnstile> m_sapply \<tau> t"
by (metis Un_insert_left deduce_cut image_insert list.simps(15) tau_t')
then show ?case
by (metis cs_sapply_id id_apply rem sol_sapply sol_cs_sapply)
next
case (Sdec u k M M' A t)
then have "rem": "set M' \<union> set (Sym_encrypt u k # A) = set M \<union> set A"
by auto
have "tau_t": "m_sapply \<tau> ` (set (u # M') \<union> set (Sym_encrypt u k # A)) \<turnstile> m_sapply \<tau> t"
using Sdec.prems sol_fst sol_sapply by blast
have "m_sapply \<tau> ` (set M' \<union> set (Sym_encrypt u k # A)) \<turnstile> m_sapply \<tau> k"
using Sdec.prems sol_sapply sol_snd by blast
then have "m_sapply \<tau> ` (set M' \<union> set (Sym_encrypt u k # A)) \<turnstile> m_sapply \<tau> u"
by auto
then have "m_sapply \<tau> ` (set M \<union> set A) \<turnstile> m_sapply \<tau> t"
by (metis Un_insert_left deduce_cut_aux image_insert list.simps(15) rem tau_t)
then show ?case
by (simp add: sol_sapply)
next
case (Adec u M M' A t)
then have "rem": "set (Public_key_encrypt u intruder # M') \<union> set A = set M \<union> set A"
by auto
have "tau_t": "m_sapply \<tau> ` (set (u # M') \<union> set (Public_key_encrypt u intruder # A)) \<turnstile> m_sapply \<tau> t"
using Adec.prems sol_sapply by blast
have "m_sapply \<tau> ` (set (Public_key_encrypt u intruder # M') \<union> set A) \<turnstile> m_sapply \<tau> u"
by (simp add: Ax deduce.Adec m_subst_intruder)
then have "m_sapply \<tau> ` (set M \<union> set A) \<turnstile> m_sapply \<tau> t"
by (metis (no_types, lifting) Un_commute Un_insert_left deduce_cut_aux image_insert list.simps(15) rem tau_t)
then show ?case
by (simp add: sol_sapply)
next
case (Ksub u x M \<sigma> A t)
then show ?case
using sol_c_sapply
by blast
qed
(* Lemma 8 *)
lemma "rer_sound": "rer cs \<sigma> cs' \<Longrightarrow> \<tau> \<in> sol cs' \<Longrightarrow> \<tau> \<circ>m \<sigma> \<in> sol cs"
apply (induction rule: rer.induct)
using sol_cs_union sol_cs_sapply rer1_sound
by (metis (full_types) IntE IntI append_Cons append_Nil)
(* Lemma 9 *)
lemma "rer_star_sound": "rer_star cs \<sigma> cs' \<Longrightarrow> cs_simple cs' \<Longrightarrow> \<tau> \<in> sol cs' \<Longrightarrow> \<tau> \<circ>m \<sigma> \<in> sol cs"
using rer_sound m_sapply_comp
apply -
by (induction rule: rer_star.induct) auto
(* Theorem 4 *)
theorem "red_sound": "red cs \<subseteq> sol cs"
unfolding red_def
using rer_star_sound
by auto
(* 8. (b) *)
fun \<Theta> :: "msg \<Rightarrow> nat" where
"\<Theta> (Hash t) = \<Theta> t + 1"
| "\<Theta> (Pair u v) = \<Theta> u + \<Theta> v + 1"
| "\<Theta> (Sym_encrypt m k) = \<Theta> m + \<Theta> k + 1"
| "\<Theta> (Public_key_encrypt m k) = \<Theta> m + \<Theta> k + 1"
| "\<Theta> (Signature m k) = (if k = intruder then \<Theta> m + \<Theta> k + 1 else 1)"
| "\<Theta> _ = 1"
lemma "\<Theta>_pos": "\<Theta> m \<ge> 1"
by (induction m) auto
fun \<chi> :: "msg \<Rightarrow> nat" where
"\<chi> (Hash t) = \<chi> t + 1"
| "\<chi> (Pair u v) = \<chi> u * \<chi> v + 1"
| "\<chi> (Sym_encrypt m k) = \<chi> m + \<Theta> k + 1"
| "\<chi> (Public_key_encrypt m k) = \<chi> m + 1"
| "\<chi> (Signature m k) = \<chi> m + 1"
| "\<chi> _ = 1"
lemma "\<chi>_pos": "\<chi> m \<ge> 1"
by (induction m) auto
(* \<chi> from Section 3.2.4. on lists *)
definition \<chi>' :: "msg list \<Rightarrow> nat" where
"\<chi>' M = prod_list (map \<chi> M)"
lemma "\<chi>'_pos": "\<chi>' M \<ge> 1"
unfolding \<chi>'_def
apply (induction M)
apply simp_all
using One_nat_def \<chi>_pos by presburger
(* \<chi>' is monotone w.r.t. dropping all but one occurrence of a message from a list *)
lemma "\<chi>'_incl": "m \<in> set M \<Longrightarrow> M' = removeAll m M \<Longrightarrow> \<chi>' (m # M') \<le> \<chi>' M"
unfolding \<chi>'_def
apply (induction M arbitrary: M')
apply simp
subgoal premises prems for a M M'
proof (cases "m = a")
case True
then have "M' = removeAll m M" using prems(3) by simp
then have "prod_list (map \<chi> M') \<le> prod_list (map \<chi> M)" by (metis \<chi>'_def \<chi>'_pos less_le_trans list.simps(8) list.simps(9) mult.commute mult.right_neutral nat_mult_le_cancel_disj not_le prems(1) prod_list.Cons prod_list.Nil removeAll_id)
then show ?thesis
using True
by simp
next
case False
then have "m \<in> set M" using prems False by simp
then have "prod_list (map \<chi> (m # a # (removeAll m M))) \<le> prod_list (map \<chi> (a # M))" using prems by auto
then show ?thesis
using False prems(3)
by simp
qed
done
(* \<chi>' monotone w.r.t. appending the same list of messages *)
lemma "\<chi>'_app": "\<chi>' P < \<chi>' P' \<Longrightarrow> \<chi>' (P @ M) < \<chi>' (P' @ M)"
unfolding "\<chi>'_def"
apply (induction M)
apply simp_all
using \<chi>_pos less_le_trans by auto
fun w :: "constraint \<Rightarrow> nat" where
"w (M | A \<triangleright> t) = \<chi>' M * \<Theta> t"
lemma "w_pos": "w c \<ge> 1"
using \<chi>'_pos \<Theta>_pos
by (metis (full_types) c_derives.cases less_one mult_is_0 not_less w.simps)
definition \<eta>1 :: "constraint_system \<Rightarrow> nat" where
"\<eta>1 cs = card (cs_fv cs)"
definition \<eta>2 :: "constraint_system \<Rightarrow> nat" where
"\<eta>2 cs = sum_list (map w cs)"
(* substituting the intruder for some variable does not introduce any new free variables *)
lemma "m_fv_intruder_sub": "\<sigma> = Var(x := intruder) \<Longrightarrow> m_fv (m_sapply \<sigma> m) \<subseteq> m_fv m"
unfolding intruder_def
by (induction m) auto
lemma "c_fv_intruder_sub": "\<sigma> = Var(x := intruder) \<Longrightarrow> c_fv (c_sapply \<sigma> c) \<subseteq> c_fv c"
apply (cases c)
using m_fv_intruder_sub by fastforce+
(* a step under rer1 does not introduce any new free variables *)
lemma "rer1_fv_sub_cs_aux": "rer1 c \<sigma> cs \<Longrightarrow> cs_fv cs \<subseteq> cs_fv (c # cs'')"
unfolding cs_fv_def cs_sapply_def
using c_fv_intruder_sub
apply -
proof (induction rule: rer1.induct)
case (Ksub u x M \<sigma> A t)
then show ?case
apply safe
by (metis Union_iff image_iff list.distinct(1) list.set_cases list.set_intros(1) set_ConsD subsetCE)
qed auto
lemma "rer1_fv_sub_cs": "rer1 c \<sigma> cs \<Longrightarrow> cs_fv cs \<subseteq> cs_fv (cs' @ (c # cs''))"
using rer1_fv_sub_cs_aux
using cs_fv_def by fastforce
(* Lemma 10 *)
lemma "rer1_fv_sub": "rer1 c \<sigma> cs \<Longrightarrow> cs_fv (cs_sapply \<sigma> cs' @ cs @ cs_sapply \<sigma> cs'') \<subseteq> cs_fv (cs' @ (c # cs''))"
unfolding cs_fv_def cs_sapply_def
using cs_sapply_id c_sapply_id
apply -
proof (induction rule: rer1.induct)
case (Unif t M A \<sigma>)
then obtain "u" where $: "u \<in> set M \<union> set A" and "m_unify [(t, u)] = Some \<sigma>"
by auto
then have "m_svran \<sigma> \<subseteq> m_fv_eq (t, u)"
using m_lemma_3 m_fv_eqs.simps
by blast
then have "m_svran \<sigma> \<subseteq> c_fv (M | A \<triangleright> t)"
using $
by auto
then have "cs_fv (cs_sapply \<sigma> (cs' @ cs'')) \<subseteq> cs_fv (cs' @ cs'') \<union> c_fv (M | A \<triangleright> t)"
using cs_fv_sapply_sdom_svran
by blast
then show ?case
unfolding cs_fv_def cs_sapply_def
by auto
next
case (Ksub u x M \<sigma> A t)
show ?case
apply (rule subsetI)
apply simp
using Ksub.hyps(2) c_fv_intruder_sub
by (metis contra_subsetD m_fv_intruder_sub)
qed auto
(* Lemma 11 *)
lemma "rer1_fv_neq": "rer1 c \<sigma> cs \<Longrightarrow> \<sigma> \<noteq> Var \<Longrightarrow> cs_fv (cs_sapply \<sigma> cs' @ cs @ cs_sapply \<sigma> cs'') \<noteq> cs_fv (cs' @ (c # cs''))"
proof (induction rule: rer1.induct)
case (Unif t M A \<sigma>)
then obtain "u" "x" where $: "u \<in> set M \<union> set A" and "m_un": "m_unify [(t, u)] = Some \<sigma>" and "x_m_sdom": "x \<in> m_sdom \<sigma>"
by force
then have "m_sdom \<sigma> \<subseteq> m_fv_eq (t, u)"
using m_fv_eqs.simps m_lemma_3 by blast
then have "m_sdom \<sigma> \<subseteq> c_fv (M | A \<triangleright> t)"
using $
by auto
then have "x_cs_fv": "x \<in> cs_fv (cs' @ (M | A \<triangleright> t) # cs'')"
unfolding cs_fv_def
using "x_m_sdom"
by auto
have "cs_sub": "cs_fv (cs_sapply \<sigma> cs' @ cs_sapply \<sigma> cs'') \<subseteq> cs_fv (cs' @ cs'') - m_sdom \<sigma> \<union> m_svran \<sigma>"
using cs_fv_sapply_sdom_svran
by (simp add: cs_sapply_def subsetI)
have "m_sdom \<sigma> \<inter> m_svran \<sigma> = {}"
using m_lemma_3 m_un
by blast
then have "x \<notin> cs_fv (cs_sapply \<sigma> cs' @ [] @ cs_sapply \<sigma> cs'')"
using cs_fv_sapply_sdom_svran cs_sub x_m_sdom
by auto
then show ?case
using "x_cs_fv"
unfolding cs_fv_def
by auto
next
case (Ksub u x M \<sigma> A t)
then have "x_in_cs_fv": "x \<in> cs_fv (cs' @ (M | A \<triangleright> t # cs''))"
unfolding cs_fv_def
by fastforce
have "x \<in> m_sdom \<sigma> - m_svran \<sigma>"
using Ksub.hyps
unfolding intruder_def
by simp
then have "x \<notin> cs_fv (cs_sapply \<sigma> (cs' @ (M | A \<triangleright> t) # cs''))"
using Ksub.hyps cs_fv_sapply_sdom_svran
unfolding cs_fv_def cs_sapply_def
by blast
then show ?case
using x_in_cs_fv
unfolding cs_fv_def cs_sapply_def
by auto
qed auto
(* Lemma 12 *)
lemma "rer1_measure_lt": "rer1 c \<sigma> cs \<Longrightarrow> \<sigma> = Var \<Longrightarrow> \<eta>2 cs < w c"
unfolding \<eta>2_def
proof (induction rule: rer1.induct)
case (Unif t M A \<sigma>)
then have "sum_list (map w []) = 0" by simp
also have "0 < w (M | A \<triangleright> t)" using less_le_trans w_pos by blast
finally show ?case by blast
next
case (Comp_Hash M A t)
then have "sum_list (map w [M | A \<triangleright> t]) = w (M | A \<triangleright> t)" by simp
also have "... = \<chi>' M * \<Theta> t" by simp
also have "... < \<chi>' M * \<Theta> (Hash t)" using \<chi>'_pos less_le_trans by auto
finally show ?case by auto
next
case (Comp_Pair M A t1 t2)
then have "sum_list (map w [M | A \<triangleright> t1, M | A \<triangleright> t2]) = w (M | A \<triangleright> t1) + w (M | A \<triangleright> t2)" by simp
also have "... = \<chi>' M * \<Theta> t1 + \<chi>' M * \<Theta> t2" by simp
also have "... < \<chi>' M * \<Theta> (Pair t1 t2)" using \<chi>'_pos add_mult_distrib2 less_le_trans by fastforce
finally show ?case by auto
next
case (Comp_Sym_encrypt M A m k)
then have "sum_list (map w [M | A \<triangleright> m, M | A \<triangleright> k]) = w (M | A \<triangleright> m) + w (M | A \<triangleright> k)" by simp
also have "... = \<chi>' M * \<Theta> m + \<chi>' M * \<Theta> k" by simp
also have "... < \<chi>' M * \<Theta> (Sym_encrypt m k)" using \<chi>'_pos add_mult_distrib2 less_le_trans by fastforce
finally show ?case by auto
next
case (Comp_Public_key_encrypt M A m k)
have "sum_list (map w [M | A \<triangleright> m, M | A \<triangleright> k]) = w (M | A \<triangleright> m) + w (M | A \<triangleright> k)" by simp
also have "... = \<chi>' M * \<Theta> m + \<chi>' M * \<Theta> k" by simp
also have "... < \<chi>' M * \<Theta> (Public_key_encrypt m k)" using \<chi>'_pos add_mult_distrib2 less_le_trans by fastforce
finally show ?case by auto
next
case (Comp_Signature M A t)
have "sum_list (map w [M | A \<triangleright> t]) = w (M | A \<triangleright> t)" by simp
also have "... < \<chi>' M * \<Theta> (Signature t intruder)" by (metis (full_types) \<Theta>.simps(5) le_add1 le_less_trans less_add_one mult_less_mono2 nat_0_less_mult_iff not_le w.simps w_pos zero_less_one)
finally show ?case by auto
next
case (Proj u v M M' A t)
have "\<chi>'_pair": "\<chi>' [u, v] < \<chi>' [Pair u v]" unfolding \<chi>'_def by auto
have "sum_list (map w [(u # v # M') | (msg.Pair u v # A) \<triangleright> t]) = w ((u # v # M') | (msg.Pair u v # A) \<triangleright> t)" by simp
also have "... = \<chi>' (u # v # M') * \<Theta> t" by simp
also have "... < \<chi>' (Pair u v # M') * \<Theta> t" using "\<chi>'_pair" "\<chi>'_app"[of "[u, v]" "[Pair u v]" "M'"] unfolding "\<chi>'_def" by (metis Cons_eq_appendI One_nat_def \<Theta>_pos less_le_trans mult_less_mono1 self_append_conv2 zero_less_Suc)
also have "... \<le> \<chi>' M * \<Theta> t" by (simp add: Proj.hyps \<chi>'_incl)
finally show ?case by auto
next
case (Sdec u k M M' A t)
then have "sum_list (map w [(u # M') | (Sym_encrypt u k # A) \<triangleright> t, M' | (Sym_encrypt u k # A) \<triangleright> k]) = w ((u # M') | (Sym_encrypt u k # A) \<triangleright> t) + w (M' | (Sym_encrypt u k # A) \<triangleright> k)" by simp
also have "... = \<chi>' M' * \<chi> u * \<Theta> t + \<chi>' M' * \<Theta> k" by (simp add: \<chi>'_def)
also have "... < \<chi>' M' * \<chi> u * \<Theta> t + \<chi>' M' * (\<Theta> k + 1) * \<Theta> t" unfolding \<chi>'_def by (metis \<Theta>_pos \<chi>'_def add_strict_left_mono less_add_one less_le_trans mult.right_neutral mult_le_mono2 mult_less_mono2 nat_0_less_mult_iff w.simps w_pos)
also have "... = \<chi>' M' * (\<chi> u + \<Theta> k + 1) * \<Theta> t" by (simp add: mult.commute semiring_normalization_rules(34))
also have "... = \<chi>' M' * (\<chi> (Sym_encrypt u k)) * \<Theta> t" by simp
also have "... \<le> \<chi>' M * \<Theta> t" by (metis Sdec.hyps \<chi>'_def \<chi>'_incl list.simps(9) mult.commute mult_le_mono2 prod_list.Cons)
finally show ?case by auto
next
case (Adec u M M' A t)
then have "sum_list (map w [(u # M') | (Public_key_encrypt u intruder # A) \<triangleright> t]) = w ((u # M') | (Public_key_encrypt u intruder # A) \<triangleright> t)" by simp
also have "... = \<chi>' M' * \<chi> u * \<Theta> t" by (simp add: \<chi>'_def)
also have "... < \<chi>' M' * (\<chi> u + 1) * \<Theta> t" using \<Theta>_pos \<chi>'_pos less_le_trans by fastforce
also have "... = \<chi>' M' * (\<chi> (Public_key_encrypt u intruder)) * \<Theta> t" by simp
also have "... \<le> \<chi>' M * \<Theta> t" by (metis (full_types) Adec.hyps(1) Adec.hyps(2) \<chi>'_def \<chi>'_incl list.simps(9) mult.commute mult_le_mono2 prod_list.Cons)
finally show ?case by auto
next
case (Ksub u x M \<sigma> A t)
then show ?case unfolding intruder_def by (metis fun_upd_same msg.distinct(1))
qed
(* termination relation from Section 3.2.4. *)
definition "term_rel" :: "(constraint_system \<times> constraint_system) set" where
"term_rel = \<eta>1 <*mlex*> measure \<eta>2"
lemma "term_rel_wf": "wf term_rel"
unfolding term_rel_def
by (simp add: wf_mlex)
(* abstracting away the substitution from rer *)
inductive "rer_any" :: "constraint_system \<Rightarrow> constraint_system \<Rightarrow> bool" where
"rer cs \<sigma> cs' \<Longrightarrow> rer_any cs' cs"
lemma "rer1_\<eta>1": "rer cs \<sigma> cs' \<Longrightarrow> \<eta>1 cs' \<le> \<eta>1 cs"
unfolding \<eta>1_def
by (metis card_mono cs_fv_finite rer.simps rer1_fv_sub)
lemma "rer1_\<eta>1'": "rer cs \<sigma> cs' \<Longrightarrow> \<sigma> \<noteq> Var \<Longrightarrow> \<eta>1 cs' < \<eta>1 cs"
unfolding \<eta>1_def
by (metis cs_fv_finite le_neq_trans psubset_card_mono rer.simps rer1_fv_neq rer1_fv_sub)
lemma "rer1_\<eta>2": "rer cs \<sigma> cs' \<Longrightarrow> \<sigma> = Var \<Longrightarrow> \<eta>2 cs' < \<eta>2 cs"
unfolding \<eta>2_def
proof (induction rule: rer.induct)
case (Context c \<sigma> cs cs')
then have eq: "sum_list (map w (c # cs')) = w c + sum_list (map w cs')"
by auto
then show ?case
using Context.hyps(1) Context.prems(1) \<eta>2_def eq rer1_measure_lt
by (simp add: Context.prems cs_sapply_id)
qed
(* rer_any is a subset of the termination relation *)
lemma "rer_any_term": "rer_any cs' cs \<Longrightarrow> (cs', cs) \<in> term_rel"
unfolding term_rel_def
by (metis in_measure mlex_leq mlex_less rer1_\<eta>1 rer1_\<eta>1' rer1_\<eta>2 rer_any.cases)
(* Theorem 5 *)
theorem "rer_any_wf": "wfP rer_any"
by (metis rer_any_term term_rel_wf wfE_min wfP_eq_minimal)
end |
State Before: α : Type u_1
i : Nat
l : List α
h : length l ≤ i
⊢ take i l = l State After: α : Type u_1
i : Nat
l : List α
h : length l ≤ i
this : take i l ++ drop i l = l
⊢ take i l = l Tactic: have := take_append_drop i l State Before: α : Type u_1
i : Nat
l : List α
h : length l ≤ i
this : take i l ++ drop i l = l
⊢ take i l = l State After: α : Type u_1
i : Nat
l : List α
h : length l ≤ i
this : take i l = l
⊢ take i l = l Tactic: rw [drop_length_le h, append_nil] at this State Before: α : Type u_1
i : Nat
l : List α
h : length l ≤ i
this : take i l = l
⊢ take i l = l State After: no goals Tactic: exact this |
[STATEMENT]
lemma is_top_sorted_antimono:
assumes "R\<subseteq>R'"
assumes "is_top_sorted R' l"
shows "is_top_sorted R l"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. is_top_sorted R l
[PROOF STEP]
using assms
[PROOF STATE]
proof (prove)
using this:
R \<subseteq> R'
is_top_sorted R' l
goal (1 subgoal):
1. is_top_sorted R l
[PROOF STEP]
unfolding is_top_sorted_alt
[PROOF STATE]
proof (prove)
using this:
R \<subseteq> R'
\<forall>x y. (x, y) \<in> list_before_rel l \<longrightarrow> (y, x) \<notin> R'\<^sup>*
goal (1 subgoal):
1. \<forall>x y. (x, y) \<in> list_before_rel l \<longrightarrow> (y, x) \<notin> R\<^sup>*
[PROOF STEP]
by (auto dest: rtrancl_mono_mp) |
(*
Copyright (C) 2017 M.A.L. Marques
2019 Susi Lehtola
This Source Code Form is subject to the terms of the Mozilla Public
License, v. 2.0. If a copy of the MPL was not distributed with this
file, You can obtain one at http://mozilla.org/MPL/2.0/.
*)
(* type: mgga_exc *)
$include "mgga_x_tm.mpl"
(* revtm uses the tpss definition of qtilde *)
tm_b := 0.4:
tm_qtilde := (x, t) ->
9/20 * (tm_alpha(x, t) - 1)/sqrt(1 + tm_b*tm_alpha(x, t)*(tm_alpha(x, t) - 1))
+ 2*tm_p(x)/3:
|
Formal statement is: lemma orthogonal_commute: "orthogonal x y \<longleftrightarrow> orthogonal y x" Informal statement is: Two vectors are orthogonal if and only if they are orthogonal. |
#ifndef OPENMC_TALLIES_FILTER_MATERIAL_H
#define OPENMC_TALLIES_FILTER_MATERIAL_H
#include <cstdint>
#include <unordered_map>
#include <gsl/gsl>
#include "openmc/tallies/filter.h"
#include "openmc/vector.h"
namespace openmc {
//==============================================================================
//! Specifies which material tally events reside in.
//==============================================================================
class MaterialFilter : public Filter
{
public:
//----------------------------------------------------------------------------
// Constructors, destructors
~MaterialFilter() = default;
//----------------------------------------------------------------------------
// Methods
std::string type() const override {return "material";}
void from_xml(pugi::xml_node node) override;
void get_all_bins(const Particle& p, TallyEstimator estimator, FilterMatch& match)
const override;
void to_statepoint(hid_t filter_group) const override;
std::string text_label(int bin) const override;
//----------------------------------------------------------------------------
// Accessors
vector<int32_t>& materials() { return materials_; }
const vector<int32_t>& materials() const { return materials_; }
void set_materials(gsl::span<const int32_t> materials);
private:
//----------------------------------------------------------------------------
// Data members
//! The indices of the materials binned by this filter.
vector<int32_t> materials_;
//! A map from material indices to filter bin indices.
std::unordered_map<int32_t, int> map_;
};
} // namespace openmc
#endif // OPENMC_TALLIES_FILTER_MATERIAL_H
|
lemma continuous_at_Sup_mono: fixes f :: "'a::{linorder_topology,conditionally_complete_linorder} \<Rightarrow> 'b::{linorder_topology,conditionally_complete_linorder}" assumes "mono f" and cont: "continuous (at_left (Sup S)) f" and S: "S \<noteq> {}" "bdd_above S" shows "f (Sup S) = (SUP s\<in>S. f s)" |
lemma disjoint_cballI: "dist x y > r + s \<Longrightarrow> cball x r \<inter> cball y s = {}" |
[STATEMENT]
lemma mem_Epigraph: "(x, y) \<in> Epigraph S f \<longleftrightarrow> x \<in> S \<and> f x \<le> ereal y"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. ((x, y) \<in> Epigraph S f) = (x \<in> S \<and> f x \<le> ereal y)
[PROOF STEP]
unfolding Epigraph_def
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. ((x, y) \<in> {xy. fst xy \<in> S \<and> f (fst xy) \<le> ereal (snd xy)}) = (x \<in> S \<and> f x \<le> ereal y)
[PROOF STEP]
by auto |
Formal statement is: lemma filterlim_uminus_at_bot: "(LIM x F. f x :> at_bot) \<longleftrightarrow> (LIM x F. - (f x) :: real :> at_top)" Informal statement is: The filter $\{f \leq a\}$ converges to $-\infty$ if and only if the filter $\{-f \geq -a\}$ converges to $\infty$. |
lemma uncountable_ball: fixes a :: "'a::euclidean_space" assumes "r > 0" shows "uncountable (ball a r)" |
Formal statement is: lemma homeomorphic_compact: fixes f :: "'a::topological_space \<Rightarrow> 'b::t2_space" shows "compact s \<Longrightarrow> continuous_on s f \<Longrightarrow> (f ` s = t) \<Longrightarrow> inj_on f s \<Longrightarrow> s homeomorphic t" Informal statement is: If $f$ is a continuous injective map from a compact space $S$ to a Hausdorff space $T$, then $S$ and $T$ are homeomorphic. |
Formal statement is: lemma locally_compact_Int: fixes S :: "'a :: t2_space set" shows "\<lbrakk>locally compact S; locally compact t\<rbrakk> \<Longrightarrow> locally compact (S \<inter> t)" Informal statement is: If $S$ and $t$ are locally compact, then $S \cap t$ is locally compact. |
!> Implementation of the build configuration data.
!
! A build table can currently have the following fields
!
! ```toml
! [build]
! auto-executables = <bool>
! auto-tests = <bool>
! ```
module fpm_manifest_build_config
use fpm_error, only : error_t, syntax_error, fatal_error
use fpm_toml, only : toml_table, toml_key, toml_stat, get_value
implicit none
private
public :: build_config_t, new_build_config
!> Configuration data for build
type :: build_config_t
!> Automatic discovery of executables
logical :: auto_executables
!> Automatic discovery of tests
logical :: auto_tests
contains
!> Print information on this instance
procedure :: info
end type build_config_t
contains
!> Construct a new build configuration from a TOML data structure
subroutine new_build_config(self, table, error)
!> Instance of the build configuration
type(build_config_t), intent(out) :: self
!> Instance of the TOML data structure
type(toml_table), intent(inout) :: table
!> Error handling
type(error_t), allocatable, intent(out) :: error
!> Status
integer :: stat
call check(table, error)
if (allocated(error)) return
call get_value(table, "auto-executables", self%auto_executables, .true., stat=stat)
if (stat /= toml_stat%success) then
call fatal_error(error,"Error while reading value for 'auto-executables' in fpm.toml, expecting logical")
return
end if
call get_value(table, "auto-tests", self%auto_tests, .true., stat=stat)
if (stat /= toml_stat%success) then
call fatal_error(error,"Error while reading value for 'auto-tests' in fpm.toml, expecting logical")
return
end if
end subroutine new_build_config
!> Check local schema for allowed entries
subroutine check(table, error)
!> Instance of the TOML data structure
type(toml_table), intent(inout) :: table
!> Error handling
type(error_t), allocatable, intent(out) :: error
type(toml_key), allocatable :: list(:)
integer :: ikey
call table%get_keys(list)
! table can be empty
if (size(list) < 1) return
do ikey = 1, size(list)
select case(list(ikey)%key)
case("auto-executables", "auto-tests")
continue
case default
call syntax_error(error, "Key "//list(ikey)%key//" is not allowed in [build]")
exit
end select
end do
end subroutine check
!> Write information on build configuration instance
subroutine info(self, unit, verbosity)
!> Instance of the build configuration
class(build_config_t), intent(in) :: self
!> Unit for IO
integer, intent(in) :: unit
!> Verbosity of the printout
integer, intent(in), optional :: verbosity
integer :: pr
character(len=*), parameter :: fmt = '("#", 1x, a, t30, a)'
if (present(verbosity)) then
pr = verbosity
else
pr = 1
end if
if (pr < 1) return
write(unit, fmt) "Build configuration"
! if (allocated(self%auto_executables)) then
write(unit, fmt) " - auto-discovery (apps) ", merge("enabled ", "disabled", self%auto_executables)
! end if
! if (allocated(self%auto_tests)) then
write(unit, fmt) " - auto-discovery (tests) ", merge("enabled ", "disabled", self%auto_tests)
! end if
end subroutine info
end module fpm_manifest_build_config
|
-- @@stderr --
dtrace: failed to compile script test/unittest/pointers/err.D_OP_SOU.BadPointerAccess.d: [D_OP_SOU] line 26: operator . cannot be applied to type "struct myinput_struct *"; must be applied to a struct or union
|
lemma homeomorphic_balls: fixes a b ::"'a::real_normed_vector" assumes "0 < d" "0 < e" shows "(ball a d) homeomorphic (ball b e)" (is ?th) and "(cball a d) homeomorphic (cball b e)" (is ?cth) |
function b = legcoeffs(f, n)
%LEGCOEFFS Compute Legendre series coefficients of a CHEBTECH object.
% B = LEGCOEFFS(F) returns the Legendre series coefficients of CHEBTECH F, so
% that F = B(1)*P_0 + ... + B(N)*P_(N-1), where P_k is the kth Legendre
% polynomial. B is a vector of the same length as that of F.
%
% B = LEGCOEFFS(F, N) returns the first N coefficients. If length(F) < N
% then the additional entries of B are padded with zeros.
%
% If F is an array-valued CHEBTECH, then a matrix of coefficients is returned
% so that F(:,k) = B(1,k)*P_0 + ... + B(N,k)*P_(N-1).
%
% See also CHEBCOEFFS.
% Copyright 2017 by The University of Oxford and The Chebfun Developers.
% See http://www.chebfun.org/ for Chebfun information.
b = cheb2leg(f.coeffs);
if ( nargin > 1 )
s = size(b);
if ( s(1) > n )
b = b(1:n, :);
else
b = [b; zeros(n-s(1), s(2))];
end
end
end
|
-- -------------------------------------------------------------- [ Pretty.idr ]
-- Module : Pretty.idr
-- Copyright : (c) Jan de Muijnck-Hughes
-- License : see LICENSE
-- --------------------------------------------------------------------- [ EOH ]
||| Print pretty models.
module GRL.Pretty
import GRL.Model
%access export
||| Pretty models
prettyModel : GModel -> String
prettyModel g = (foldl (\res,x => show x ++ "\n" ++ res) "" (vertices g)) ++ "\n" ++
(foldl (\res,x => show x ++ "\n" ++ res) "" (edges g)) ++ "\n"
-- --------------------------------------------------------------------- [ EOF ]
|
section \<open>State and Lens integration\<close>
theory Lens_State
imports
"HOL-Library.State_Monad"
Lens_Algebra
begin
text \<open>Inspired by Haskell's lens package\<close>
definition zoom :: "('a \<Longrightarrow> 'b) \<Rightarrow> ('a, 'c) state \<Rightarrow> ('b, 'c) state" where
"zoom l m = State (\<lambda>b. case run_state m (lens_get l b) of (c, a) \<Rightarrow> (c, lens_put l b a))"
definition use :: "('a \<Longrightarrow> 'b) \<Rightarrow> ('b, 'a) state" where
"use l = zoom l State_Monad.get"
definition modify :: "('a \<Longrightarrow> 'b) \<Rightarrow> ('a \<Rightarrow> 'a) \<Rightarrow> ('b, unit) state" where
"modify l f = zoom l (State_Monad.update f)"
definition assign :: "('a \<Longrightarrow> 'b) \<Rightarrow> 'a \<Rightarrow> ('b, unit) state" where
"assign l b = zoom l (State_Monad.set b)"
context begin
qualified abbreviation "add l n \<equiv> modify l (\<lambda>x. x + n)"
qualified abbreviation "sub l n \<equiv> modify l (\<lambda>x. x - n)"
qualified abbreviation "mul l n \<equiv> modify l (\<lambda>x. x * n)"
qualified abbreviation "inc l \<equiv> add l 1"
qualified abbreviation "dec l \<equiv> sub l 1"
end
bundle lens_state_notation begin
notation zoom (infixr "\<rhd>" 80)
notation modify (infix "%=" 80)
notation assign (infix ".=" 80)
notation Lens_State.add (infix "+=" 80)
notation Lens_State.sub (infix "-=" 80)
notation Lens_State.mul (infix "*=" 80)
notation Lens_State.inc ("_ ++")
notation Lens_State.dec ("_ --")
end
context includes lens_state_notation begin
lemma zoom_comp1: "l1 \<rhd> l2 \<rhd> s = (l2 ;\<^sub>L l1) \<rhd> s"
unfolding zoom_def lens_comp_def
by (auto split: prod.splits)
lemma zoom_zero[simp]: "zero_lens \<rhd> s = s"
unfolding zoom_def zero_lens_def
by simp
lemma zoom_id[simp]: "id_lens \<rhd> s = s"
unfolding zoom_def id_lens_def
by simp
end
lemma (in mwb_lens) zoom_comp2[simp]: "zoom x m \<bind> (\<lambda>a. zoom x (n a)) = zoom x (m \<bind> n)"
unfolding zoom_def State_Monad.bind_def
by (auto split: prod.splits simp: put_get put_put)
lemma (in wb_lens) use_alt_def: "use x = map_state (lens_get x) State_Monad.get"
unfolding State_Monad.get_def use_def zoom_def
by (simp add: comp_def get_put)
lemma (in wb_lens) modify_alt_def: "modify x f = State_Monad.update (update f)"
unfolding modify_def zoom_def update_def State_Monad.update_def State_Monad.get_def State_Monad.set_def State_Monad.bind_def
by auto
lemma (in wb_lens) modify_id[simp]: "modify x (\<lambda>x. x) = State_Monad.return ()"
unfolding update_def modify_alt_def
by (simp add: get_put)
lemma (in mwb_lens) modify_comp[simp]: "bind (modify x f) (\<lambda>_. modify x g) = modify x (g \<circ> f)"
unfolding modify_def
by simp
end
|
(*
Copyright (C) 2017 M.A.L. Marques
This Source Code Form is subject to the terms of the Mozilla Public
License, v. 2.0. If a copy of the MPL was not distributed with this
file, You can obtain one at http://mozilla.org/MPL/2.0/.
*)
(* type: gga_exc *)
$define xc_dimensions_2d
_2d_pbe_kappa := 0.4604:
_2d_pbe_mu := 0.354546875:
_2d_pbe_f0 := s -> 1 + _2d_pbe_kappa*(1 - _2d_pbe_kappa/(_2d_pbe_kappa + _2d_pbe_mu*s^2)):
_2d_pbe_f := x -> _2d_pbe_f0(X2S_2D*x):
f := (rs, zeta, xt, xs0, xs1) -> gga_exchange(_2d_pbe_f, rs, zeta, xs0, xs1):
|
(* Title: JinjaThreads/Common/Decl.thy
Author: David von Oheimb, Andreas Lochbihler
Based on the Jinja theory Common/Decl.thy by David von Oheimb
*)
section \<open>Class Declarations and Programs\<close>
theory Decl
imports
Type
begin
type_synonym volatile = bool
record fmod =
volatile :: volatile
type_synonym fdecl = "vname \<times> ty \<times> fmod" \<comment> \<open>field declaration\<close>
type_synonym 'm mdecl = "mname \<times> ty list \<times> ty \<times> 'm" \<comment> \<open>method = name, arg. types, return type, body\<close>
type_synonym 'm mdecl' = "mname \<times> ty list \<times> ty \<times> 'm option" \<comment> \<open>method = name, arg. types, return type, possible body\<close>
type_synonym 'm "class" = "cname \<times> fdecl list \<times> 'm mdecl' list" \<comment> \<open>class = superclass, fields, methods\<close>
type_synonym 'm cdecl = "cname \<times> 'm class" \<comment> \<open>class declaration\<close>
datatype
'm prog = Program "'m cdecl list"
translations
(type) "fdecl" <= (type) "String.literal \<times> ty \<times> fmod"
(type) "'c mdecl" <= (type) "String.literal \<times> ty list \<times> ty \<times> 'c"
(type) "'c mdecl'" <= (type) "String.literal \<times> ty list \<times> ty \<times> 'c option"
(type) "'c class" <= (type) "String.literal \<times> fdecl list \<times> ('c mdecl) list"
(type) "'c cdecl" <= (type) "String.literal \<times> ('c class)"
notation (input) None ("Native")
primrec "classes" :: "'m prog \<Rightarrow> 'm cdecl list"
where
"classes (Program P) = P"
primrec "class" :: "'m prog \<Rightarrow> cname \<rightharpoonup> 'm class"
where
"class (Program p) = map_of p"
locale prog =
fixes P :: "'m prog"
definition is_class :: "'m prog \<Rightarrow> cname \<Rightarrow> bool"
where
"is_class P C \<equiv> class P C \<noteq> None"
lemma finite_is_class: "finite {C. is_class P C}"
(*<*)
apply(cases P)
apply (unfold is_class_def)
apply (fold dom_def)
apply(simp add: finite_dom_map_of)
done
(*>*)
primrec is_type :: "'m prog \<Rightarrow> ty \<Rightarrow> bool"
where
is_type_void: "is_type P Void = True"
| is_type_bool: "is_type P Boolean = True"
| is_type_int: "is_type P Integer = True"
| is_type_nt: "is_type P NT = True"
| is_type_class: "is_type P (Class C) = is_class P C"
| is_type_array: "is_type P (A\<lfloor>\<rceil>) = (case ground_type A of NT \<Rightarrow> False | Class C \<Rightarrow> is_class P C | _ \<Rightarrow> True)"
lemma is_type_ArrayD: "is_type P (T\<lfloor>\<rceil>) \<Longrightarrow> is_type P T"
by(induct T) auto
lemma is_type_ground_type:
"is_type P T \<Longrightarrow> is_type P (ground_type T)"
by(induct T)(auto, metis is_type_ArrayD is_type_array)
abbreviation "types" :: "'m prog \<Rightarrow> ty set"
where "types P \<equiv> {T. is_type P T}"
abbreviation is_htype :: "'m prog \<Rightarrow> htype \<Rightarrow> bool"
where "is_htype P hT \<equiv> is_type P (ty_of_htype hT)"
subsection \<open>Code generation\<close>
lemma is_class_intros [code_pred_intro]:
"class P C \<noteq> None \<Longrightarrow> is_class P C"
by(auto simp add: is_class_def)
code_pred
(modes: i \<Rightarrow> i \<Rightarrow> bool)
is_class
unfolding is_class_def by simp
declare is_class_def[code]
end
|
/-
Copyright (c) 2021 Heather Macbeth. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Heather Macbeth
-/
import analysis.special_functions.exp
import topology.continuous_function.basic
import analysis.normed.field.unit_ball
/-!
# The circle
This file defines `circle` to be the metric sphere (`metric.sphere`) in `ℂ` centred at `0` of
radius `1`. We equip it with the following structure:
* a submonoid of `ℂ`
* a group
* a topological group
We furthermore define `exp_map_circle` to be the natural map `λ t, exp (t * I)` from `ℝ` to
`circle`, and show that this map is a group homomorphism.
## Implementation notes
Because later (in `geometry.manifold.instances.sphere`) one wants to equip the circle with a smooth
manifold structure borrowed from `metric.sphere`, the underlying set is
`{z : ℂ | abs (z - 0) = 1}`. This prevents certain algebraic facts from working definitionally --
for example, the circle is not defeq to `{z : ℂ | abs z = 1}`, which is the kernel of `complex.abs`
considered as a homomorphism from `ℂ` to `ℝ`, nor is it defeq to `{z : ℂ | norm_sq z = 1}`, which
is the kernel of the homomorphism `complex.norm_sq` from `ℂ` to `ℝ`.
-/
noncomputable theory
open complex metric
open_locale complex_conjugate
/-- The unit circle in `ℂ`, here given the structure of a submonoid of `ℂ`. -/
def circle : submonoid ℂ := submonoid.unit_sphere ℂ
@[simp] lemma mem_circle_iff_abs {z : ℂ} : z ∈ circle ↔ abs z = 1 := mem_sphere_zero_iff_norm
lemma circle_def : ↑circle = {z : ℂ | abs z = 1} := set.ext $ λ z, mem_circle_iff_abs
@[simp] lemma abs_coe_circle (z : circle) : abs z = 1 :=
mem_circle_iff_abs.mp z.2
lemma mem_circle_iff_norm_sq {z : ℂ} : z ∈ circle ↔ norm_sq z = 1 :=
by simp [complex.abs]
@[simp] lemma norm_sq_eq_of_mem_circle (z : circle) : norm_sq z = 1 := by simp [norm_sq_eq_abs]
lemma ne_zero_of_mem_circle (z : circle) : (z:ℂ) ≠ 0 := ne_zero_of_mem_unit_sphere z
instance : comm_group circle := metric.sphere.comm_group
@[simp] lemma coe_inv_circle (z : circle) : ↑(z⁻¹) = (z : ℂ)⁻¹ := rfl
lemma coe_inv_circle_eq_conj (z : circle) : ↑(z⁻¹) = conj (z : ℂ) :=
by rw [coe_inv_circle, inv_def, norm_sq_eq_of_mem_circle, inv_one, of_real_one, mul_one]
@[simp] lemma coe_div_circle (z w : circle) : ↑(z / w) = (z:ℂ) / w :=
circle.subtype.map_div z w
/-- The elements of the circle embed into the units. -/
def circle.to_units : circle →* units ℂ := unit_sphere_to_units ℂ
-- written manually because `@[simps]` was slow and generated the wrong lemma
@[simp] lemma circle.to_units_apply (z : circle) :
circle.to_units z = units.mk0 z (ne_zero_of_mem_circle z) := rfl
instance : compact_space circle := metric.sphere.compact_space _ _
instance : topological_group circle := metric.sphere.topological_group
/-- If `z` is a nonzero complex number, then `conj z / z` belongs to the unit circle. -/
@[simps] def circle.of_conj_div_self (z : ℂ) (hz : z ≠ 0) : circle :=
⟨conj z / z, mem_circle_iff_abs.2 $ by rw [map_div₀, abs_conj, div_self (complex.abs.ne_zero hz)]⟩
/-- The map `λ t, exp (t * I)` from `ℝ` to the unit circle in `ℂ`. -/
def exp_map_circle : C(ℝ, circle) :=
{ to_fun := λ t, ⟨exp (t * I), by simp [exp_mul_I, abs_cos_add_sin_mul_I]⟩ }
@[simp] lemma exp_map_circle_apply (t : ℝ) : ↑(exp_map_circle t) = complex.exp (t * complex.I) :=
rfl
@[simp] lemma exp_map_circle_zero : exp_map_circle 0 = 1 :=
subtype.ext $ by rw [exp_map_circle_apply, of_real_zero, zero_mul, exp_zero, submonoid.coe_one]
@[simp] lemma exp_map_circle_add (x y : ℝ) :
exp_map_circle (x + y) = exp_map_circle x * exp_map_circle y :=
subtype.ext $ by simp only [exp_map_circle_apply, submonoid.coe_mul, of_real_add, add_mul,
complex.exp_add]
/-- The map `λ t, exp (t * I)` from `ℝ` to the unit circle in `ℂ`, considered as a homomorphism of
groups. -/
@[simps]
def exp_map_circle_hom : ℝ →+ (additive circle) :=
{ to_fun := additive.of_mul ∘ exp_map_circle,
map_zero' := exp_map_circle_zero,
map_add' := exp_map_circle_add }
@[simp] lemma exp_map_circle_sub (x y : ℝ) :
exp_map_circle (x - y) = exp_map_circle x / exp_map_circle y :=
exp_map_circle_hom.map_sub x y
@[simp] lemma exp_map_circle_neg (x : ℝ) : exp_map_circle (-x) = (exp_map_circle x)⁻¹ :=
exp_map_circle_hom.map_neg x
|
Formal statement is: lemma islimpt_punctured: "x islimpt S = x islimpt (S-{x})" Informal statement is: A point $x$ is a limit point of a set $S$ if and only if $x$ is a limit point of $S$ with $x$ removed. |
lemma closed_diagonal: "closed {y. \<exists> x::('a::t2_space). y = (x,x)}" |
(*
Copyright (C) 2017 M.A.L. Marques
This Source Code Form is subject to the terms of the Mozilla Public
License, v. 2.0. If a copy of the MPL was not distributed with this
file, You can obtain one at http://mozilla.org/MPL/2.0/.
*)
(* type: mgga_exc *)
$include "mgga_c_scan.mpl"
(* Override definition of beta *)
mbeta := (rs, t) -> 0.066725:
rmggac_gamma1 := 0.08:
rmggac_gamma2 := 0.3:
rmggac_g := (alpha, s) ->
(1 + rmggac_gamma1)*alpha/(rmggac_gamma1 + alpha + rmggac_gamma2*s^2):
rmggac_f2 := (alpha, s) ->
3*rmggac_g(alpha, s)^3/(1 + rmggac_g(alpha, s)^3 + rmggac_g(alpha, s)^6):
rmggac_f1 := (alpha, s) ->
1 - rmggac_f2(alpha, s):
rmggac_gamma := 0.031091:
(* from mmga_c_r2scan *)
rmggac_w1 := (rs, z) -> exp(-f_pw(rs, z)/(rmggac_gamma*mphi(z)^3)) - 1:
rmggac_H1 := (rs, z, t) -> rmggac_gamma*mphi(z)^3*log(1 + rmggac_w1(rs, z) * (1 - scan_e0_g(rs, z, t))):
rmggac_eps1 := (rs, z, t) ->
(f_pw(rs, z) + rmggac_H1(rs, z, t)):
rmggac_alpha := (z, xt, ts0, ts1) ->
(t_total(z, ts0, ts1) - xt^2/4)/(2**(1/3)*K_FACTOR_C):
rmggac_f := (rs, z, xt, xs0, xs1, ts0, ts1) ->
+ scan_e0(rs, z, X2S*2^(1/3)*xt)
* rmggac_f1(rmggac_alpha(z, xt, ts0, ts1), X2S*2^(1/3)*xt)
+ rmggac_eps1(rs, z, tp(rs, z, xt))
* rmggac_f2(rmggac_alpha(z, xt, ts0, ts1), X2S*2^(1/3)*xt):
(* the functional is written for the other convention for tau *)
f := (rs, z, xt, xs0, xs1, us0, us1, ts0, ts1) ->
rmggac_f(rs, z, xt, xs0, xs1, 2*ts0, 2*ts1):
|
lemma minus_image_eq_vimage: fixes A :: "'a::ab_group_add set" shows "(\<lambda>x. - x) ` A = (\<lambda>x. - x) -` A" |
This High Chair is a convenient and durable high chair that is filled with value. Featuring three unique seating positions, it grows with baby, to independent feeding.
Checkout this product on https://www.babyshopnepal.com/high-chair-for-kids. |
/-
Copyright (c) 2017 Johannes Hölzl. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Johannes Hölzl, Mario Carneiro, Patrick Massot
-/
import group_theory.group_action.conj_act
import group_theory.group_action.quotient
import order.filter.pointwise
import topology.algebra.monoid
import topology.compact_open
import topology.sets.compacts
import topology.algebra.constructions
/-!
# Topological groups
This file defines the following typeclasses:
* `topological_group`, `topological_add_group`: multiplicative and additive topological groups,
i.e., groups with continuous `(*)` and `(⁻¹)` / `(+)` and `(-)`;
* `has_continuous_sub G` means that `G` has a continuous subtraction operation.
There is an instance deducing `has_continuous_sub` from `topological_group` but we use a separate
typeclass because, e.g., `ℕ` and `ℝ≥0` have continuous subtraction but are not additive groups.
We also define `homeomorph` versions of several `equiv`s: `homeomorph.mul_left`,
`homeomorph.mul_right`, `homeomorph.inv`, and prove a few facts about neighbourhood filters in
groups.
## Tags
topological space, group, topological group
-/
open classical set filter topological_space function
open_locale classical topological_space filter pointwise
universes u v w x
variables {α : Type u} {β : Type v} {G : Type w} {H : Type x}
section continuous_mul_group
/-!
### Groups with continuous multiplication
In this section we prove a few statements about groups with continuous `(*)`.
-/
variables [topological_space G] [group G] [has_continuous_mul G]
/-- Multiplication from the left in a topological group as a homeomorphism. -/
@[to_additive "Addition from the left in a topological additive group as a homeomorphism."]
protected def homeomorph.mul_left (a : G) : G ≃ₜ G :=
{ continuous_to_fun := continuous_const.mul continuous_id,
continuous_inv_fun := continuous_const.mul continuous_id,
.. equiv.mul_left a }
@[simp, to_additive]
lemma homeomorph.coe_mul_left (a : G) : ⇑(homeomorph.mul_left a) = (*) a := rfl
@[to_additive]
lemma homeomorph.mul_left_symm (a : G) : (homeomorph.mul_left a).symm = homeomorph.mul_left a⁻¹ :=
by { ext, refl }
@[to_additive]
lemma is_open_map_mul_left (a : G) : is_open_map (λ x, a * x) :=
(homeomorph.mul_left a).is_open_map
@[to_additive is_open.left_add_coset]
lemma is_open.left_coset {U : set G} (h : is_open U) (x : G) : is_open (left_coset x U) :=
is_open_map_mul_left x _ h
@[to_additive]
lemma is_closed_map_mul_left (a : G) : is_closed_map (λ x, a * x) :=
(homeomorph.mul_left a).is_closed_map
@[to_additive is_closed.left_add_coset]
lemma is_closed.left_coset {U : set G} (h : is_closed U) (x : G) : is_closed (left_coset x U) :=
is_closed_map_mul_left x _ h
/-- Multiplication from the right in a topological group as a homeomorphism. -/
@[to_additive "Addition from the right in a topological additive group as a homeomorphism."]
protected def homeomorph.mul_right (a : G) :
G ≃ₜ G :=
{ continuous_to_fun := continuous_id.mul continuous_const,
continuous_inv_fun := continuous_id.mul continuous_const,
.. equiv.mul_right a }
@[simp, to_additive]
lemma homeomorph.coe_mul_right (a : G) : ⇑(homeomorph.mul_right a) = λ g, g * a := rfl
@[to_additive]
lemma homeomorph.mul_right_symm (a : G) :
(homeomorph.mul_right a).symm = homeomorph.mul_right a⁻¹ :=
by { ext, refl }
@[to_additive]
lemma is_open_map_mul_right (a : G) : is_open_map (λ x, x * a) :=
(homeomorph.mul_right a).is_open_map
@[to_additive is_open.right_add_coset]
lemma is_open.right_coset {U : set G} (h : is_open U) (x : G) : is_open (right_coset U x) :=
is_open_map_mul_right x _ h
@[to_additive]
lemma is_closed_map_mul_right (a : G) : is_closed_map (λ x, x * a) :=
(homeomorph.mul_right a).is_closed_map
@[to_additive is_closed.right_add_coset]
lemma is_closed.right_coset {U : set G} (h : is_closed U) (x : G) : is_closed (right_coset U x) :=
is_closed_map_mul_right x _ h
@[to_additive]
lemma discrete_topology_of_open_singleton_one (h : is_open ({1} : set G)) : discrete_topology G :=
begin
rw ← singletons_open_iff_discrete,
intro g,
suffices : {g} = (λ (x : G), g⁻¹ * x) ⁻¹' {1},
{ rw this, exact (continuous_mul_left (g⁻¹)).is_open_preimage _ h, },
simp only [mul_one, set.preimage_mul_left_singleton, eq_self_iff_true,
inv_inv, set.singleton_eq_singleton_iff],
end
@[to_additive]
lemma discrete_topology_iff_open_singleton_one : discrete_topology G ↔ is_open ({1} : set G) :=
⟨λ h, forall_open_iff_discrete.mpr h {1}, discrete_topology_of_open_singleton_one⟩
end continuous_mul_group
/-!
### `has_continuous_inv` and `has_continuous_neg`
-/
/-- Basic hypothesis to talk about a topological additive group. A topological additive group
over `M`, for example, is obtained by requiring the instances `add_group M` and
`has_continuous_add M` and `has_continuous_neg M`. -/
class has_continuous_neg (G : Type u) [topological_space G] [has_neg G] : Prop :=
(continuous_neg : continuous (λ a : G, -a))
/-- Basic hypothesis to talk about a topological group. A topological group over `M`, for example,
is obtained by requiring the instances `group M` and `has_continuous_mul M` and
`has_continuous_inv M`. -/
@[to_additive]
class has_continuous_inv (G : Type u) [topological_space G] [has_inv G] : Prop :=
(continuous_inv : continuous (λ a : G, a⁻¹))
export has_continuous_inv (continuous_inv)
export has_continuous_neg (continuous_neg)
section continuous_inv
variables [topological_space G] [has_inv G] [has_continuous_inv G]
@[to_additive]
lemma continuous_on_inv {s : set G} : continuous_on has_inv.inv s :=
continuous_inv.continuous_on
@[to_additive]
lemma continuous_within_at_inv {s : set G} {x : G} : continuous_within_at has_inv.inv s x :=
continuous_inv.continuous_within_at
@[to_additive]
lemma continuous_at_inv {x : G} : continuous_at has_inv.inv x :=
continuous_inv.continuous_at
@[to_additive]
lemma tendsto_inv (a : G) : tendsto has_inv.inv (𝓝 a) (𝓝 (a⁻¹)) :=
continuous_at_inv
/-- If a function converges to a value in a multiplicative topological group, then its inverse
converges to the inverse of this value. For the version in normed fields assuming additionally
that the limit is nonzero, use `tendsto.inv'`. -/
@[to_additive]
lemma filter.tendsto.inv {f : α → G} {l : filter α} {y : G} (h : tendsto f l (𝓝 y)) :
tendsto (λ x, (f x)⁻¹) l (𝓝 y⁻¹) :=
(continuous_inv.tendsto y).comp h
variables [topological_space α] {f : α → G} {s : set α} {x : α}
@[continuity, to_additive]
lemma continuous.inv (hf : continuous f) : continuous (λx, (f x)⁻¹) :=
continuous_inv.comp hf
@[to_additive]
lemma continuous_at.inv (hf : continuous_at f x) : continuous_at (λ x, (f x)⁻¹) x :=
continuous_at_inv.comp hf
@[to_additive]
lemma continuous_on.inv (hf : continuous_on f s) : continuous_on (λx, (f x)⁻¹) s :=
continuous_inv.comp_continuous_on hf
@[to_additive]
lemma continuous_within_at.inv (hf : continuous_within_at f s x) :
continuous_within_at (λ x, (f x)⁻¹) s x :=
hf.inv
@[to_additive]
instance [topological_space H] [has_inv H] [has_continuous_inv H] : has_continuous_inv (G × H) :=
⟨(continuous_inv.comp continuous_fst).prod_mk (continuous_inv.comp continuous_snd)⟩
variable {ι : Type*}
@[to_additive]
instance pi.has_continuous_inv {C : ι → Type*} [∀ i, topological_space (C i)]
[∀ i, has_inv (C i)] [∀ i, has_continuous_inv (C i)] : has_continuous_inv (Π i, C i) :=
{ continuous_inv := continuous_pi (λ i, continuous.inv (continuous_apply i)) }
/-- A version of `pi.has_continuous_inv` for non-dependent functions. It is needed because sometimes
Lean fails to use `pi.has_continuous_inv` for non-dependent functions. -/
@[to_additive "A version of `pi.has_continuous_neg` for non-dependent functions. It is needed
because sometimes Lean fails to use `pi.has_continuous_neg` for non-dependent functions."]
instance pi.has_continuous_inv' : has_continuous_inv (ι → G) :=
pi.has_continuous_inv
@[priority 100, to_additive]
instance has_continuous_inv_of_discrete_topology [topological_space H]
[has_inv H] [discrete_topology H] : has_continuous_inv H :=
⟨continuous_of_discrete_topology⟩
section pointwise_limits
variables (G₁ G₂ : Type*) [topological_space G₂] [t2_space G₂]
@[to_additive] lemma is_closed_set_of_map_inv [has_inv G₁] [has_inv G₂] [has_continuous_inv G₂] :
is_closed {f : G₁ → G₂ | ∀ x, f x⁻¹ = (f x)⁻¹ } :=
begin
simp only [set_of_forall],
refine is_closed_Inter (λ i, is_closed_eq (continuous_apply _) (continuous_apply _).inv),
end
end pointwise_limits
instance additive.has_continuous_neg [h : topological_space H] [has_inv H]
[has_continuous_inv H] : @has_continuous_neg (additive H) h _ :=
{ continuous_neg := @continuous_inv H _ _ _ }
instance multiplicative.has_continuous_inv [h : topological_space H] [has_neg H]
[has_continuous_neg H] : @has_continuous_inv (multiplicative H) h _ :=
{ continuous_inv := @continuous_neg H _ _ _ }
end continuous_inv
section continuous_involutive_inv
variables [topological_space G] [has_involutive_inv G] [has_continuous_inv G] {s : set G}
@[to_additive] lemma is_compact.inv (hs : is_compact s) : is_compact s⁻¹ :=
by { rw [← image_inv], exact hs.image continuous_inv }
variables (G)
/-- Inversion in a topological group as a homeomorphism. -/
@[to_additive "Negation in a topological group as a homeomorphism."]
protected def homeomorph.inv (G : Type*) [topological_space G] [has_involutive_inv G]
[has_continuous_inv G] : G ≃ₜ G :=
{ continuous_to_fun := continuous_inv,
continuous_inv_fun := continuous_inv,
.. equiv.inv G }
@[to_additive] lemma is_open_map_inv : is_open_map (has_inv.inv : G → G) :=
(homeomorph.inv _).is_open_map
@[to_additive] lemma is_closed_map_inv : is_closed_map (has_inv.inv : G → G) :=
(homeomorph.inv _).is_closed_map
variables {G}
@[to_additive] lemma is_open.inv (hs : is_open s) : is_open s⁻¹ := hs.preimage continuous_inv
@[to_additive] lemma is_closed.inv (hs : is_closed s) : is_closed s⁻¹ := hs.preimage continuous_inv
@[to_additive] lemma inv_closure : ∀ s : set G, (closure s)⁻¹ = closure s⁻¹ :=
(homeomorph.inv G).preimage_closure
end continuous_involutive_inv
section lattice_ops
variables {ι' : Sort*} [has_inv G] [has_inv H] {ts : set (topological_space G)}
(h : Π t ∈ ts, @has_continuous_inv G t _) {ts' : ι' → topological_space G}
(h' : Π i, @has_continuous_inv G (ts' i) _) {t₁ t₂ : topological_space G}
(h₁ : @has_continuous_inv G t₁ _) (h₂ : @has_continuous_inv G t₂ _)
{t : topological_space H} [has_continuous_inv H]
@[to_additive] lemma has_continuous_inv_Inf :
@has_continuous_inv G (Inf ts) _ :=
{ continuous_inv := continuous_Inf_rng (λ t ht, continuous_Inf_dom ht
(@has_continuous_inv.continuous_inv G t _ (h t ht))) }
include h'
@[to_additive] lemma has_continuous_inv_infi :
@has_continuous_inv G (⨅ i, ts' i) _ :=
by {rw ← Inf_range, exact has_continuous_inv_Inf (set.forall_range_iff.mpr h')}
omit h'
include h₁ h₂
@[to_additive] lemma has_continuous_inv_inf :
@has_continuous_inv G (t₁ ⊓ t₂) _ :=
by {rw inf_eq_infi, refine has_continuous_inv_infi (λ b, _), cases b; assumption}
end lattice_ops
section topological_group
/-!
### Topological groups
A topological group is a group in which the multiplication and inversion operations are
continuous. Topological additive groups are defined in the same way. Equivalently, we can require
that the division operation `λ x y, x * y⁻¹` (resp., subtraction) is continuous.
-/
/-- A topological (additive) group is a group in which the addition and negation operations are
continuous. -/
class topological_add_group (G : Type u) [topological_space G] [add_group G]
extends has_continuous_add G, has_continuous_neg G : Prop
/-- A topological group is a group in which the multiplication and inversion operations are
continuous.
When you declare an instance that does not already have a `uniform_space` instance,
you should also provide an instance of `uniform_space` and `uniform_group` using
`topological_group.to_uniform_space` and `topological_group_is_uniform`. -/
@[to_additive]
class topological_group (G : Type*) [topological_space G] [group G]
extends has_continuous_mul G, has_continuous_inv G : Prop
section conj
instance conj_act.units_has_continuous_const_smul {M} [monoid M] [topological_space M]
[has_continuous_mul M] :
has_continuous_const_smul (conj_act Mˣ) M :=
⟨λ m, (continuous_const.mul continuous_id).mul continuous_const⟩
/-- we slightly weaken the type class assumptions here so that it will also apply to `ennreal`, but
we nevertheless leave it in the `topological_group` namespace. -/
variables [topological_space G] [has_inv G] [has_mul G] [has_continuous_mul G]
/-- Conjugation is jointly continuous on `G × G` when both `mul` and `inv` are continuous. -/
@[to_additive "Conjugation is jointly continuous on `G × G` when both `mul` and `inv` are
continuous."]
lemma topological_group.continuous_conj_prod [has_continuous_inv G] :
continuous (λ g : G × G, g.fst * g.snd * g.fst⁻¹) :=
continuous_mul.mul (continuous_inv.comp continuous_fst)
/-- Conjugation by a fixed element is continuous when `mul` is continuous. -/
@[to_additive "Conjugation by a fixed element is continuous when `add` is continuous."]
lemma topological_group.continuous_conj (g : G) : continuous (λ (h : G), g * h * g⁻¹) :=
(continuous_mul_right g⁻¹).comp (continuous_mul_left g)
/-- Conjugation acting on fixed element of the group is continuous when both `mul` and
`inv` are continuous. -/
@[to_additive "Conjugation acting on fixed element of the additive group is continuous when both
`add` and `neg` are continuous."]
lemma topological_group.continuous_conj' [has_continuous_inv G]
(h : G) : continuous (λ (g : G), g * h * g⁻¹) :=
(continuous_mul_right h).mul continuous_inv
end conj
variables [topological_space G] [group G] [topological_group G]
[topological_space α] {f : α → G} {s : set α} {x : α}
section zpow
@[continuity, to_additive]
lemma continuous_zpow : ∀ z : ℤ, continuous (λ a : G, a ^ z)
| (int.of_nat n) := by simpa using continuous_pow n
| -[1+n] := by simpa using (continuous_pow (n + 1)).inv
instance add_group.has_continuous_const_smul_int {A} [add_group A] [topological_space A]
[topological_add_group A] : has_continuous_const_smul ℤ A := ⟨continuous_zsmul⟩
instance add_group.has_continuous_smul_int {A} [add_group A] [topological_space A]
[topological_add_group A] : has_continuous_smul ℤ A :=
⟨continuous_uncurry_of_discrete_topology continuous_zsmul⟩
@[continuity, to_additive]
lemma continuous.zpow {f : α → G} (h : continuous f) (z : ℤ) :
continuous (λ b, (f b) ^ z) :=
(continuous_zpow z).comp h
@[to_additive]
lemma continuous_on_zpow {s : set G} (z : ℤ) : continuous_on (λ x, x ^ z) s :=
(continuous_zpow z).continuous_on
@[to_additive]
lemma continuous_at_zpow (x : G) (z : ℤ) : continuous_at (λ x, x ^ z) x :=
(continuous_zpow z).continuous_at
@[to_additive]
lemma filter.tendsto.zpow {α} {l : filter α} {f : α → G} {x : G} (hf : tendsto f l (𝓝 x)) (z : ℤ) :
tendsto (λ x, f x ^ z) l (𝓝 (x ^ z)) :=
(continuous_at_zpow _ _).tendsto.comp hf
@[to_additive]
lemma continuous_within_at.zpow {f : α → G} {x : α} {s : set α} (hf : continuous_within_at f s x)
(z : ℤ) : continuous_within_at (λ x, f x ^ z) s x :=
hf.zpow z
@[to_additive]
lemma continuous_at.zpow {f : α → G} {x : α} (hf : continuous_at f x) (z : ℤ) :
continuous_at (λ x, f x ^ z) x :=
hf.zpow z
@[to_additive continuous_on.zsmul]
lemma continuous_on.zpow {f : α → G} {s : set α} (hf : continuous_on f s) (z : ℤ) :
continuous_on (λ x, f x ^ z) s :=
λ x hx, (hf x hx).zpow z
end zpow
section ordered_comm_group
variables [topological_space H] [ordered_comm_group H] [topological_group H]
@[to_additive] lemma tendsto_inv_nhds_within_Ioi {a : H} :
tendsto has_inv.inv (𝓝[>] a) (𝓝[<] (a⁻¹)) :=
(continuous_inv.tendsto a).inf $ by simp [tendsto_principal_principal]
@[to_additive] lemma tendsto_inv_nhds_within_Iio {a : H} :
tendsto has_inv.inv (𝓝[<] a) (𝓝[>] (a⁻¹)) :=
(continuous_inv.tendsto a).inf $ by simp [tendsto_principal_principal]
@[to_additive] lemma tendsto_inv_nhds_within_Ioi_inv {a : H} :
tendsto has_inv.inv (𝓝[>] (a⁻¹)) (𝓝[<] a) :=
by simpa only [inv_inv] using @tendsto_inv_nhds_within_Ioi _ _ _ _ (a⁻¹)
@[to_additive] lemma tendsto_inv_nhds_within_Iio_inv {a : H} :
tendsto has_inv.inv (𝓝[<] (a⁻¹)) (𝓝[>] a) :=
by simpa only [inv_inv] using @tendsto_inv_nhds_within_Iio _ _ _ _ (a⁻¹)
@[to_additive] lemma tendsto_inv_nhds_within_Ici {a : H} :
tendsto has_inv.inv (𝓝[≥] a) (𝓝[≤] (a⁻¹)) :=
(continuous_inv.tendsto a).inf $ by simp [tendsto_principal_principal]
@[to_additive] lemma tendsto_inv_nhds_within_Iic {a : H} :
tendsto has_inv.inv (𝓝[≤] a) (𝓝[≥] (a⁻¹)) :=
(continuous_inv.tendsto a).inf $ by simp [tendsto_principal_principal]
@[to_additive] lemma tendsto_inv_nhds_within_Ici_inv {a : H} :
tendsto has_inv.inv (𝓝[≥] (a⁻¹)) (𝓝[≤] a) :=
by simpa only [inv_inv] using @tendsto_inv_nhds_within_Ici _ _ _ _ (a⁻¹)
@[to_additive] lemma tendsto_inv_nhds_within_Iic_inv {a : H} :
tendsto has_inv.inv (𝓝[≤] (a⁻¹)) (𝓝[≥] a) :=
by simpa only [inv_inv] using @tendsto_inv_nhds_within_Iic _ _ _ _ (a⁻¹)
end ordered_comm_group
@[instance, to_additive]
instance [topological_space H] [group H] [topological_group H] :
topological_group (G × H) :=
{ continuous_inv := continuous_inv.prod_map continuous_inv }
@[to_additive]
instance pi.topological_group {C : β → Type*} [∀ b, topological_space (C b)]
[∀ b, group (C b)] [∀ b, topological_group (C b)] : topological_group (Π b, C b) :=
{ continuous_inv := continuous_pi (λ i, (continuous_apply i).inv) }
open mul_opposite
@[to_additive]
instance [group α] [has_continuous_inv α] : has_continuous_inv αᵐᵒᵖ :=
{ continuous_inv := continuous_induced_rng $ (@continuous_inv α _ _ _).comp continuous_unop }
/-- If multiplication is continuous in `α`, then it also is in `αᵐᵒᵖ`. -/
@[to_additive "If addition is continuous in `α`, then it also is in `αᵃᵒᵖ`."]
instance [group α] [topological_group α] :
topological_group αᵐᵒᵖ := { }
variable (G)
@[to_additive]
lemma nhds_one_symm : comap has_inv.inv (𝓝 (1 : G)) = 𝓝 (1 : G) :=
((homeomorph.inv G).comap_nhds_eq _).trans (congr_arg nhds inv_one)
/-- The map `(x, y) ↦ (x, xy)` as a homeomorphism. This is a shear mapping. -/
@[to_additive "The map `(x, y) ↦ (x, x + y)` as a homeomorphism.
This is a shear mapping."]
protected def homeomorph.shear_mul_right : G × G ≃ₜ G × G :=
{ continuous_to_fun := continuous_fst.prod_mk continuous_mul,
continuous_inv_fun := continuous_fst.prod_mk $ continuous_fst.inv.mul continuous_snd,
.. equiv.prod_shear (equiv.refl _) equiv.mul_left }
@[simp, to_additive]
lemma homeomorph.shear_mul_right_coe :
⇑(homeomorph.shear_mul_right G) = λ z : G × G, (z.1, z.1 * z.2) :=
rfl
@[simp, to_additive]
lemma homeomorph.shear_mul_right_symm_coe :
⇑(homeomorph.shear_mul_right G).symm = λ z : G × G, (z.1, z.1⁻¹ * z.2) :=
rfl
variables {G}
namespace subgroup
@[to_additive] instance (S : subgroup G) :
topological_group S :=
{ continuous_inv :=
begin
rw embedding_subtype_coe.to_inducing.continuous_iff,
exact continuous_subtype_coe.inv
end,
..S.to_submonoid.has_continuous_mul }
end subgroup
/-- The (topological-space) closure of a subgroup of a space `M` with `has_continuous_mul` is
itself a subgroup. -/
@[to_additive "The (topological-space) closure of an additive subgroup of a space `M` with
`has_continuous_add` is itself an additive subgroup."]
def subgroup.topological_closure (s : subgroup G) : subgroup G :=
{ carrier := closure (s : set G),
inv_mem' := λ g m, by simpa [←set.mem_inv, inv_closure] using m,
..s.to_submonoid.topological_closure }
@[simp, to_additive] lemma subgroup.topological_closure_coe {s : subgroup G} :
(s.topological_closure : set G) = closure s :=
rfl
@[to_additive]
instance subgroup.topological_closure_topological_group (s : subgroup G) :
topological_group (s.topological_closure) :=
{ continuous_inv :=
begin
apply continuous_induced_rng,
change continuous (λ p : s.topological_closure, (p : G)⁻¹),
continuity,
end
..s.to_submonoid.topological_closure_has_continuous_mul}
@[to_additive] lemma subgroup.subgroup_topological_closure (s : subgroup G) :
s ≤ s.topological_closure :=
subset_closure
@[to_additive] lemma subgroup.is_closed_topological_closure (s : subgroup G) :
is_closed (s.topological_closure : set G) :=
by convert is_closed_closure
@[to_additive] lemma subgroup.topological_closure_minimal
(s : subgroup G) {t : subgroup G} (h : s ≤ t) (ht : is_closed (t : set G)) :
s.topological_closure ≤ t :=
closure_minimal h ht
@[to_additive] lemma dense_range.topological_closure_map_subgroup [group H] [topological_space H]
[topological_group H] {f : G →* H} (hf : continuous f) (hf' : dense_range f) {s : subgroup G}
(hs : s.topological_closure = ⊤) :
(s.map f).topological_closure = ⊤ :=
begin
rw set_like.ext'_iff at hs ⊢,
simp only [subgroup.topological_closure_coe, subgroup.coe_top, ← dense_iff_closure_eq] at hs ⊢,
exact hf'.dense_image hf hs
end
/-- The topological closure of a normal subgroup is normal.-/
@[to_additive "The topological closure of a normal additive subgroup is normal."]
lemma subgroup.is_normal_topological_closure {G : Type*} [topological_space G] [group G]
[topological_group G] (N : subgroup G) [N.normal] :
(subgroup.topological_closure N).normal :=
{ conj_mem := λ n hn g,
begin
apply mem_closure_of_continuous (topological_group.continuous_conj g) hn,
intros m hm,
exact subset_closure (subgroup.normal.conj_mem infer_instance m hm g),
end }
@[to_additive] lemma mul_mem_connected_component_one {G : Type*} [topological_space G]
[mul_one_class G] [has_continuous_mul G] {g h : G} (hg : g ∈ connected_component (1 : G))
(hh : h ∈ connected_component (1 : G)) : g * h ∈ connected_component (1 : G) :=
begin
rw connected_component_eq hg,
have hmul: g ∈ connected_component (g*h),
{ apply continuous.image_connected_component_subset (continuous_mul_left g),
rw ← connected_component_eq hh,
exact ⟨(1 : G), mem_connected_component, by simp only [mul_one]⟩ },
simpa [← connected_component_eq hmul] using (mem_connected_component)
end
@[to_additive] lemma inv_mem_connected_component_one {G : Type*} [topological_space G] [group G]
[topological_group G] {g : G} (hg : g ∈ connected_component (1 : G)) :
g⁻¹ ∈ connected_component (1 : G) :=
begin
rw ← inv_one,
exact continuous.image_connected_component_subset continuous_inv _
((set.mem_image _ _ _).mp ⟨g, hg, rfl⟩)
end
/-- The connected component of 1 is a subgroup of `G`. -/
@[to_additive "The connected component of 0 is a subgroup of `G`."]
def subgroup.connected_component_of_one (G : Type*) [topological_space G] [group G]
[topological_group G] : subgroup G :=
{ carrier := connected_component (1 : G),
one_mem' := mem_connected_component,
mul_mem' := λ g h hg hh, mul_mem_connected_component_one hg hh,
inv_mem' := λ g hg, inv_mem_connected_component_one hg }
/-- If a subgroup of a topological group is commutative, then so is its topological closure. -/
@[to_additive "If a subgroup of an additive topological group is commutative, then so is its
topological closure."]
def subgroup.comm_group_topological_closure [t2_space G] (s : subgroup G)
(hs : ∀ (x y : s), x * y = y * x) : comm_group s.topological_closure :=
{ ..s.topological_closure.to_group,
..s.to_submonoid.comm_monoid_topological_closure hs }
@[to_additive exists_nhds_half_neg]
lemma exists_nhds_split_inv {s : set G} (hs : s ∈ 𝓝 (1 : G)) :
∃ V ∈ 𝓝 (1 : G), ∀ (v ∈ V) (w ∈ V), v / w ∈ s :=
have ((λp : G × G, p.1 * p.2⁻¹) ⁻¹' s) ∈ 𝓝 ((1, 1) : G × G),
from continuous_at_fst.mul continuous_at_snd.inv (by simpa),
by simpa only [div_eq_mul_inv, nhds_prod_eq, mem_prod_self_iff, prod_subset_iff, mem_preimage]
using this
@[to_additive]
lemma nhds_translation_mul_inv (x : G) : comap (λ y : G, y * x⁻¹) (𝓝 1) = 𝓝 x :=
((homeomorph.mul_right x⁻¹).comap_nhds_eq 1).trans $ show 𝓝 (1 * x⁻¹⁻¹) = 𝓝 x, by simp
@[simp, to_additive] lemma map_mul_left_nhds (x y : G) : map ((*) x) (𝓝 y) = 𝓝 (x * y) :=
(homeomorph.mul_left x).map_nhds_eq y
@[to_additive] lemma map_mul_left_nhds_one (x : G) : map ((*) x) (𝓝 1) = 𝓝 x := by simp
/-- A monoid homomorphism (a bundled morphism of a type that implements `monoid_hom_class`) from a
topological group to a topological monoid is continuous provided that it is continuous at one. See
also `uniform_continuous_of_continuous_at_one`. -/
@[to_additive "An additive monoid homomorphism (a bundled morphism of a type that implements
`add_monoid_hom_class`) from an additive topological group to an additive topological monoid is
continuous provided that it is continuous at zero. See also
`uniform_continuous_of_continuous_at_zero`."]
lemma continuous_of_continuous_at_one {M hom : Type*} [mul_one_class M] [topological_space M]
[has_continuous_mul M] [monoid_hom_class hom G M] (f : hom) (hf : continuous_at f 1) :
continuous f :=
continuous_iff_continuous_at.2 $ λ x,
by simpa only [continuous_at, ← map_mul_left_nhds_one x, tendsto_map'_iff, (∘),
map_mul, map_one, mul_one] using hf.tendsto.const_mul (f x)
@[to_additive]
lemma topological_group.ext {G : Type*} [group G] {t t' : topological_space G}
(tg : @topological_group G t _) (tg' : @topological_group G t' _)
(h : @nhds G t 1 = @nhds G t' 1) : t = t' :=
eq_of_nhds_eq_nhds $ λ x, by
rw [← @nhds_translation_mul_inv G t _ _ x , ← @nhds_translation_mul_inv G t' _ _ x , ← h]
@[to_additive]
lemma topological_group.of_nhds_aux {G : Type*} [group G] [topological_space G]
(hinv : tendsto (λ (x : G), x⁻¹) (𝓝 1) (𝓝 1))
(hleft : ∀ (x₀ : G), 𝓝 x₀ = map (λ (x : G), x₀ * x) (𝓝 1))
(hconj : ∀ (x₀ : G), map (λ (x : G), x₀ * x * x₀⁻¹) (𝓝 1) ≤ 𝓝 1) : continuous (λ x : G, x⁻¹) :=
begin
rw continuous_iff_continuous_at,
rintros x₀,
have key : (λ x, (x₀*x)⁻¹) = (λ x, x₀⁻¹*x) ∘ (λ x, x₀*x*x₀⁻¹) ∘ (λ x, x⁻¹),
by {ext ; simp[mul_assoc] },
calc map (λ x, x⁻¹) (𝓝 x₀)
= map (λ x, x⁻¹) (map (λ x, x₀*x) $ 𝓝 1) : by rw hleft
... = map (λ x, (x₀*x)⁻¹) (𝓝 1) : by rw filter.map_map
... = map (((λ x, x₀⁻¹*x) ∘ (λ x, x₀*x*x₀⁻¹)) ∘ (λ x, x⁻¹)) (𝓝 1) : by rw key
... = map ((λ x, x₀⁻¹*x) ∘ (λ x, x₀*x*x₀⁻¹)) _ : by rw ← filter.map_map
... ≤ map ((λ x, x₀⁻¹ * x) ∘ λ x, x₀ * x * x₀⁻¹) (𝓝 1) : map_mono hinv
... = map (λ x, x₀⁻¹ * x) (map (λ x, x₀ * x * x₀⁻¹) (𝓝 1)) : filter.map_map
... ≤ map (λ x, x₀⁻¹ * x) (𝓝 1) : map_mono (hconj x₀)
... = 𝓝 x₀⁻¹ : (hleft _).symm
end
@[to_additive]
lemma topological_group.of_nhds_one' {G : Type u} [group G] [topological_space G]
(hmul : tendsto (uncurry ((*) : G → G → G)) ((𝓝 1) ×ᶠ 𝓝 1) (𝓝 1))
(hinv : tendsto (λ x : G, x⁻¹) (𝓝 1) (𝓝 1))
(hleft : ∀ x₀ : G, 𝓝 x₀ = map (λ x, x₀*x) (𝓝 1))
(hright : ∀ x₀ : G, 𝓝 x₀ = map (λ x, x*x₀) (𝓝 1)) : topological_group G :=
begin
refine { continuous_mul := (has_continuous_mul.of_nhds_one hmul hleft hright).continuous_mul,
continuous_inv := topological_group.of_nhds_aux hinv hleft _ },
intros x₀,
suffices : map (λ (x : G), x₀ * x * x₀⁻¹) (𝓝 1) = 𝓝 1, by simp [this, le_refl],
rw [show (λ x, x₀ * x * x₀⁻¹) = (λ x, x₀ * x) ∘ λ x, x*x₀⁻¹, by {ext, simp [mul_assoc] },
← filter.map_map, ← hright, hleft x₀⁻¹, filter.map_map],
convert map_id,
ext,
simp
end
@[to_additive]
lemma topological_group.of_nhds_one {G : Type u} [group G] [topological_space G]
(hmul : tendsto (uncurry ((*) : G → G → G)) ((𝓝 1) ×ᶠ 𝓝 1) (𝓝 1))
(hinv : tendsto (λ x : G, x⁻¹) (𝓝 1) (𝓝 1))
(hleft : ∀ x₀ : G, 𝓝 x₀ = map (λ x, x₀*x) (𝓝 1))
(hconj : ∀ x₀ : G, tendsto (λ x, x₀*x*x₀⁻¹) (𝓝 1) (𝓝 1)) : topological_group G :=
{ continuous_mul := begin
rw continuous_iff_continuous_at,
rintros ⟨x₀, y₀⟩,
have key : (λ (p : G × G), x₀ * p.1 * (y₀ * p.2)) =
((λ x, x₀*y₀*x) ∘ (uncurry (*)) ∘ (prod.map (λ x, y₀⁻¹*x*y₀) id)),
by { ext, simp [uncurry, prod.map, mul_assoc] },
specialize hconj y₀⁻¹, rw inv_inv at hconj,
calc map (λ (p : G × G), p.1 * p.2) (𝓝 (x₀, y₀))
= map (λ (p : G × G), p.1 * p.2) ((𝓝 x₀) ×ᶠ 𝓝 y₀)
: by rw nhds_prod_eq
... = map (λ (p : G × G), x₀ * p.1 * (y₀ * p.2)) ((𝓝 1) ×ᶠ (𝓝 1))
: by rw [hleft x₀, hleft y₀, prod_map_map_eq, filter.map_map]
... = map (((λ x, x₀*y₀*x) ∘ (uncurry (*))) ∘ (prod.map (λ x, y₀⁻¹*x*y₀) id))((𝓝 1) ×ᶠ (𝓝 1))
: by rw key
... = map ((λ x, x₀*y₀*x) ∘ (uncurry (*))) ((map (λ x, y₀⁻¹*x*y₀) $ 𝓝 1) ×ᶠ (𝓝 1))
: by rw [← filter.map_map, ← prod_map_map_eq', map_id]
... ≤ map ((λ x, x₀*y₀*x) ∘ (uncurry (*))) ((𝓝 1) ×ᶠ (𝓝 1))
: map_mono (filter.prod_mono hconj $ le_rfl)
... = map (λ x, x₀*y₀*x) (map (uncurry (*)) ((𝓝 1) ×ᶠ (𝓝 1))) : by rw filter.map_map
... ≤ map (λ x, x₀*y₀*x) (𝓝 1) : map_mono hmul
... = 𝓝 (x₀*y₀) : (hleft _).symm
end,
continuous_inv := topological_group.of_nhds_aux hinv hleft hconj}
@[to_additive]
lemma topological_group.of_comm_of_nhds_one {G : Type u} [comm_group G] [topological_space G]
(hmul : tendsto (uncurry ((*) : G → G → G)) ((𝓝 1) ×ᶠ 𝓝 1) (𝓝 1))
(hinv : tendsto (λ x : G, x⁻¹) (𝓝 1) (𝓝 1))
(hleft : ∀ x₀ : G, 𝓝 x₀ = map (λ x, x₀*x) (𝓝 1)) : topological_group G :=
topological_group.of_nhds_one hmul hinv hleft (by simpa using tendsto_id)
end topological_group
section quotient_topological_group
variables [topological_space G] [group G] [topological_group G] (N : subgroup G) (n : N.normal)
@[to_additive]
instance quotient_group.quotient.topological_space {G : Type*} [group G] [topological_space G]
(N : subgroup G) : topological_space (G ⧸ N) :=
quotient.topological_space
open quotient_group
@[to_additive]
lemma quotient_group.is_open_map_coe : is_open_map (coe : G → G ⧸ N) :=
begin
intros s s_op,
change is_open ((coe : G → G ⧸ N) ⁻¹' (coe '' s)),
rw quotient_group.preimage_image_coe N s,
exact is_open_Union (λ n, (continuous_mul_right _).is_open_preimage s s_op)
end
@[to_additive]
instance topological_group_quotient [N.normal] : topological_group (G ⧸ N) :=
{ continuous_mul := begin
have cont : continuous ((coe : G → G ⧸ N) ∘ (λ (p : G × G), p.fst * p.snd)) :=
continuous_quot_mk.comp continuous_mul,
have quot : quotient_map (λ p : G × G, ((p.1 : G ⧸ N), (p.2 : G ⧸ N))),
{ apply is_open_map.to_quotient_map,
{ exact (quotient_group.is_open_map_coe N).prod (quotient_group.is_open_map_coe N) },
{ exact continuous_quot_mk.prod_map continuous_quot_mk },
{ exact (surjective_quot_mk _).prod_map (surjective_quot_mk _) } },
exact (quotient_map.continuous_iff quot).2 cont,
end,
continuous_inv := begin
have : continuous ((coe : G → G ⧸ N) ∘ (λ (a : G), a⁻¹)) :=
continuous_quot_mk.comp continuous_inv,
convert continuous_quotient_lift _ this,
end }
end quotient_topological_group
/-- A typeclass saying that `λ p : G × G, p.1 - p.2` is a continuous function. This property
automatically holds for topological additive groups but it also holds, e.g., for `ℝ≥0`. -/
class has_continuous_sub (G : Type*) [topological_space G] [has_sub G] : Prop :=
(continuous_sub : continuous (λ p : G × G, p.1 - p.2))
/-- A typeclass saying that `λ p : G × G, p.1 / p.2` is a continuous function. This property
automatically holds for topological groups. Lemmas using this class have primes.
The unprimed version is for `group_with_zero`. -/
@[to_additive]
class has_continuous_div (G : Type*) [topological_space G] [has_div G] : Prop :=
(continuous_div' : continuous (λ p : G × G, p.1 / p.2))
@[priority 100, to_additive] -- see Note [lower instance priority]
instance topological_group.to_has_continuous_div [topological_space G] [group G]
[topological_group G] : has_continuous_div G :=
⟨by { simp only [div_eq_mul_inv], exact continuous_fst.mul continuous_snd.inv }⟩
export has_continuous_sub (continuous_sub)
export has_continuous_div (continuous_div')
section has_continuous_div
variables [topological_space G] [has_div G] [has_continuous_div G]
@[to_additive sub]
lemma filter.tendsto.div' {f g : α → G} {l : filter α} {a b : G} (hf : tendsto f l (𝓝 a))
(hg : tendsto g l (𝓝 b)) : tendsto (λ x, f x / g x) l (𝓝 (a / b)) :=
(continuous_div'.tendsto (a, b)).comp (hf.prod_mk_nhds hg)
@[to_additive const_sub]
lemma filter.tendsto.const_div' (b : G) {c : G} {f : α → G} {l : filter α}
(h : tendsto f l (𝓝 c)) : tendsto (λ k : α, b / f k) l (𝓝 (b / c)) :=
tendsto_const_nhds.div' h
@[to_additive sub_const]
lemma filter.tendsto.div_const' (b : G) {c : G} {f : α → G} {l : filter α}
(h : tendsto f l (𝓝 c)) : tendsto (λ k : α, f k / b) l (𝓝 (c / b)) :=
h.div' tendsto_const_nhds
variables [topological_space α] {f g : α → G} {s : set α} {x : α}
@[continuity, to_additive sub] lemma continuous.div' (hf : continuous f) (hg : continuous g) :
continuous (λ x, f x / g x) :=
continuous_div'.comp (hf.prod_mk hg : _)
@[to_additive continuous_sub_left]
lemma continuous_div_left' (a : G) : continuous (λ b : G, a / b) :=
continuous_const.div' continuous_id
@[to_additive continuous_sub_right]
lemma continuous_div_right' (a : G) : continuous (λ b : G, b / a) :=
continuous_id.div' continuous_const
@[to_additive sub]
lemma continuous_at.div' {f g : α → G} {x : α} (hf : continuous_at f x) (hg : continuous_at g x) :
continuous_at (λx, f x / g x) x :=
hf.div' hg
@[to_additive sub]
lemma continuous_within_at.div' (hf : continuous_within_at f s x)
(hg : continuous_within_at g s x) :
continuous_within_at (λ x, f x / g x) s x :=
hf.div' hg
@[to_additive sub]
lemma continuous_on.div' (hf : continuous_on f s) (hg : continuous_on g s) :
continuous_on (λx, f x / g x) s :=
λ x hx, (hf x hx).div' (hg x hx)
end has_continuous_div
section div_in_topological_group
variables [group G] [topological_space G] [topological_group G]
/-- A version of `homeomorph.mul_left a b⁻¹` that is defeq to `a / b`. -/
@[to_additive /-" A version of `homeomorph.add_left a (-b)` that is defeq to `a - b`. "-/,
simps {simp_rhs := tt}]
def homeomorph.div_left (x : G) : G ≃ₜ G :=
{ continuous_to_fun := continuous_const.div' continuous_id,
continuous_inv_fun := continuous_inv.mul continuous_const,
.. equiv.div_left x }
@[to_additive] lemma is_open_map_div_left (a : G) : is_open_map ((/) a) :=
(homeomorph.div_left _).is_open_map
@[to_additive] lemma is_closed_map_div_left (a : G) : is_closed_map ((/) a) :=
(homeomorph.div_left _).is_closed_map
/-- A version of `homeomorph.mul_right a⁻¹ b` that is defeq to `b / a`. -/
@[to_additive /-" A version of `homeomorph.add_right (-a) b` that is defeq to `b - a`. "-/,
simps {simp_rhs := tt}]
def homeomorph.div_right (x : G) : G ≃ₜ G :=
{ continuous_to_fun := continuous_id.div' continuous_const,
continuous_inv_fun := continuous_id.mul continuous_const,
.. equiv.div_right x }
@[to_additive]
lemma is_open_map_div_right (a : G) : is_open_map (λ x, x / a) :=
(homeomorph.div_right a).is_open_map
@[to_additive]
lemma is_closed_map_div_right (a : G) : is_closed_map (λ x, x / a) :=
(homeomorph.div_right a).is_closed_map
@[to_additive]
lemma tendsto_div_nhds_one_iff
{α : Type*} {l : filter α} {x : G} {u : α → G} :
tendsto (λ n, u n / x) l (𝓝 1) ↔ tendsto u l (𝓝 x) :=
begin
have A : tendsto (λ (n : α), x) l (𝓝 x) := tendsto_const_nhds,
exact ⟨λ h, by simpa using h.mul A, λ h, by simpa using h.div' A⟩
end
@[to_additive] lemma nhds_translation_div (x : G) : comap (/ x) (𝓝 1) = 𝓝 x :=
by simpa only [div_eq_mul_inv] using nhds_translation_mul_inv x
end div_in_topological_group
/-!
### Topological operations on pointwise sums and products
A few results about interior and closure of the pointwise addition/multiplication of sets in groups
with continuous addition/multiplication. See also `submonoid.top_closure_mul_self_eq` in
`topology.algebra.monoid`.
-/
section has_continuous_mul
variables [topological_space α] [group α] [has_continuous_mul α] {s t : set α}
@[to_additive] lemma is_open.mul_left (ht : is_open t) : is_open (s * t) :=
by { rw ←Union_mul_left_image, exact is_open_bUnion (λ a ha, is_open_map_mul_left a t ht) }
@[to_additive] lemma is_open.mul_right (hs : is_open s) : is_open (s * t) :=
by { rw ←Union_mul_right_image, exact is_open_bUnion (λ a ha, is_open_map_mul_right a s hs) }
@[to_additive] lemma subset_interior_mul_left : interior s * t ⊆ interior (s * t) :=
interior_maximal (set.mul_subset_mul_right interior_subset) is_open_interior.mul_right
@[to_additive] lemma subset_interior_mul_right : s * interior t ⊆ interior (s * t) :=
interior_maximal (set.mul_subset_mul_left interior_subset) is_open_interior.mul_left
@[to_additive] lemma subset_interior_mul : interior s * interior t ⊆ interior (s * t) :=
(set.mul_subset_mul_left interior_subset).trans subset_interior_mul_left
end has_continuous_mul
section topological_group
variables [topological_space α] [group α] [topological_group α] {s t : set α}
@[to_additive] lemma is_open.div_left (ht : is_open t) : is_open (s / t) :=
by { rw ←Union_div_left_image, exact is_open_bUnion (λ a ha, is_open_map_div_left a t ht) }
@[to_additive] lemma is_open.div_right (hs : is_open s) : is_open (s / t) :=
by { rw ←Union_div_right_image, exact is_open_bUnion (λ a ha, is_open_map_div_right a s hs) }
@[to_additive] lemma subset_interior_div_left : interior s / t ⊆ interior (s / t) :=
interior_maximal (div_subset_div_right interior_subset) is_open_interior.div_right
@[to_additive] lemma subset_interior_div_right : s / interior t ⊆ interior (s / t) :=
interior_maximal (div_subset_div_left interior_subset) is_open_interior.div_left
@[to_additive] lemma subset_interior_div : interior s / interior t ⊆ interior (s / t) :=
(div_subset_div_left interior_subset).trans subset_interior_div_left
@[to_additive] lemma is_open.mul_closure (hs : is_open s) (t : set α) : s * closure t = s * t :=
begin
refine (mul_subset_iff.2 $ λ a ha b hb, _).antisymm (mul_subset_mul_left subset_closure),
rw mem_closure_iff at hb,
have hbU : b ∈ s⁻¹ * {a * b} := ⟨a⁻¹, a * b, set.inv_mem_inv.2 ha, rfl, inv_mul_cancel_left _ _⟩,
obtain ⟨_, ⟨c, d, hc, (rfl : d = _), rfl⟩, hcs⟩ := hb _ hs.inv.mul_right hbU,
exact ⟨c⁻¹, _, hc, hcs, inv_mul_cancel_left _ _⟩,
end
@[to_additive] lemma is_open.closure_mul (ht : is_open t) (s : set α) : closure s * t = s * t :=
by rw [←inv_inv (closure s * t), mul_inv_rev, inv_closure, ht.inv.mul_closure, mul_inv_rev, inv_inv,
inv_inv]
@[to_additive] lemma is_open.div_closure (hs : is_open s) (t : set α) : s / closure t = s / t :=
by simp_rw [div_eq_mul_inv, inv_closure, hs.mul_closure]
@[to_additive] lemma is_open.closure_div (ht : is_open t) (s : set α) : closure s / t = s / t :=
by simp_rw [div_eq_mul_inv, ht.inv.closure_mul]
end topological_group
/-- additive group with a neighbourhood around 0.
Only used to construct a topology and uniform space.
This is currently only available for commutative groups, but it can be extended to
non-commutative groups too.
-/
class add_group_with_zero_nhd (G : Type u) extends add_comm_group G :=
(Z [] : filter G)
(zero_Z : pure 0 ≤ Z)
(sub_Z : tendsto (λp:G×G, p.1 - p.2) (Z ×ᶠ Z) Z)
section filter_mul
section
variables (G) [topological_space G] [group G] [topological_group G]
@[to_additive]
lemma topological_group.t1_space (h : @is_closed G _ {1}) : t1_space G :=
⟨assume x, by { convert is_closed_map_mul_right x _ h, simp }⟩
@[to_additive]
lemma topological_group.regular_space [t1_space G] : regular_space G :=
⟨assume s a hs ha,
let f := λ p : G × G, p.1 * (p.2)⁻¹ in
have hf : continuous f := continuous_fst.mul continuous_snd.inv,
-- a ∈ -s implies f (a, 1) ∈ -s, and so (a, 1) ∈ f⁻¹' (-s);
-- and so can find t₁ t₂ open such that a ∈ t₁ × t₂ ⊆ f⁻¹' (-s)
let ⟨t₁, t₂, ht₁, ht₂, a_mem_t₁, one_mem_t₂, t_subset⟩ :=
is_open_prod_iff.1 ((is_open_compl_iff.2 hs).preimage hf) a (1:G) (by simpa [f]) in
begin
use [s * t₂, ht₂.mul_left, λ x hx, ⟨x, 1, hx, one_mem_t₂, mul_one _⟩],
rw [nhds_within, inf_principal_eq_bot, mem_nhds_iff],
refine ⟨t₁, _, ht₁, a_mem_t₁⟩,
rintros x hx ⟨y, z, hy, hz, yz⟩,
have : x * z⁻¹ ∈ sᶜ := (prod_subset_iff.1 t_subset) x hx z hz,
have : x * z⁻¹ ∈ s, rw ← yz, simpa,
contradiction
end⟩
@[to_additive]
lemma topological_group.t2_space [t1_space G] : t2_space G :=
@regular_space.t2_space G _ (topological_group.regular_space G)
variables {G} (S : subgroup G) [subgroup.normal S] [is_closed (S : set G)]
@[to_additive]
instance subgroup.regular_quotient_of_is_closed
(S : subgroup G) [subgroup.normal S] [is_closed (S : set G)] : regular_space (G ⧸ S) :=
begin
suffices : t1_space (G ⧸ S), { exact @topological_group.regular_space _ _ _ _ this, },
have hS : is_closed (S : set G) := infer_instance,
rw ← quotient_group.ker_mk S at hS,
exact topological_group.t1_space (G ⧸ S) ((quotient_map_quotient_mk.is_closed_preimage).mp hS),
end
end
section
/-! Some results about an open set containing the product of two sets in a topological group. -/
variables [topological_space G] [group G] [topological_group G]
/-- Given a compact set `K` inside an open set `U`, there is a open neighborhood `V` of `1`
such that `K * V ⊆ U`. -/
@[to_additive "Given a compact set `K` inside an open set `U`, there is a open neighborhood `V` of
`0` such that `K + V ⊆ U`."]
lemma compact_open_separated_mul_right {K U : set G} (hK : is_compact K) (hU : is_open U)
(hKU : K ⊆ U) : ∃ V ∈ 𝓝 (1 : G), K * V ⊆ U :=
begin
apply hK.induction_on,
{ exact ⟨univ, by simp⟩ },
{ rintros s t hst ⟨V, hV, hV'⟩,
exact ⟨V, hV, (mul_subset_mul_right hst).trans hV'⟩ },
{ rintros s t ⟨V, V_in, hV'⟩ ⟨W, W_in, hW'⟩,
use [V ∩ W, inter_mem V_in W_in],
rw union_mul,
exact union_subset ((mul_subset_mul_left (V.inter_subset_left W)).trans hV')
((mul_subset_mul_left (V.inter_subset_right W)).trans hW') },
{ intros x hx,
have := tendsto_mul (show U ∈ 𝓝 (x * 1), by simpa using hU.mem_nhds (hKU hx)),
rw [nhds_prod_eq, mem_map, mem_prod_iff] at this,
rcases this with ⟨t, ht, s, hs, h⟩,
rw [← image_subset_iff, image_mul_prod] at h,
exact ⟨t, mem_nhds_within_of_mem_nhds ht, s, hs, h⟩ }
end
open mul_opposite
/-- Given a compact set `K` inside an open set `U`, there is a open neighborhood `V` of `1`
such that `V * K ⊆ U`. -/
@[to_additive "Given a compact set `K` inside an open set `U`, there is a open neighborhood `V` of
`0` such that `V + K ⊆ U`."]
lemma compact_open_separated_mul_left {K U : set G} (hK : is_compact K) (hU : is_open U)
(hKU : K ⊆ U) : ∃ V ∈ 𝓝 (1 : G), V * K ⊆ U :=
begin
rcases compact_open_separated_mul_right (hK.image continuous_op) (op_homeomorph.is_open_map U hU)
(image_subset op hKU) with ⟨V, (hV : V ∈ 𝓝 (op (1 : G))), hV' : op '' K * V ⊆ op '' U⟩,
refine ⟨op ⁻¹' V, continuous_op.continuous_at hV, _⟩,
rwa [← image_preimage_eq V op_surjective, ← image_op_mul, image_subset_iff,
preimage_image_eq _ op_injective] at hV'
end
/-- A compact set is covered by finitely many left multiplicative translates of a set
with non-empty interior. -/
@[to_additive "A compact set is covered by finitely many left additive translates of a set
with non-empty interior."]
lemma compact_covered_by_mul_left_translates {K V : set G} (hK : is_compact K)
(hV : (interior V).nonempty) : ∃ t : finset G, K ⊆ ⋃ g ∈ t, (λ h, g * h) ⁻¹' V :=
begin
obtain ⟨t, ht⟩ : ∃ t : finset G, K ⊆ ⋃ x ∈ t, interior (((*) x) ⁻¹' V),
{ refine hK.elim_finite_subcover (λ x, interior $ ((*) x) ⁻¹' V) (λ x, is_open_interior) _,
cases hV with g₀ hg₀,
refine λ g hg, mem_Union.2 ⟨g₀ * g⁻¹, _⟩,
refine preimage_interior_subset_interior_preimage (continuous_const.mul continuous_id) _,
rwa [mem_preimage, inv_mul_cancel_right] },
exact ⟨t, subset.trans ht $ Union₂_mono $ λ g hg, interior_subset⟩
end
/-- Every locally compact separable topological group is σ-compact.
Note: this is not true if we drop the topological group hypothesis. -/
@[priority 100, to_additive separable_locally_compact_add_group.sigma_compact_space]
instance separable_locally_compact_group.sigma_compact_space
[separable_space G] [locally_compact_space G] : sigma_compact_space G :=
begin
obtain ⟨L, hLc, hL1⟩ := exists_compact_mem_nhds (1 : G),
refine ⟨⟨λ n, (λ x, x * dense_seq G n) ⁻¹' L, _, _⟩⟩,
{ intro n, exact (homeomorph.mul_right _).compact_preimage.mpr hLc },
{ refine Union_eq_univ_iff.2 (λ x, _),
obtain ⟨_, ⟨n, rfl⟩, hn⟩ : (range (dense_seq G) ∩ (λ y, x * y) ⁻¹' L).nonempty,
{ rw [← (homeomorph.mul_left x).apply_symm_apply 1] at hL1,
exact (dense_range_dense_seq G).inter_nhds_nonempty
((homeomorph.mul_left x).continuous.continuous_at $ hL1) },
exact ⟨n, hn⟩ }
end
/-- Every separated topological group in which there exists a compact set with nonempty interior
is locally compact. -/
@[to_additive] lemma topological_space.positive_compacts.locally_compact_space_of_group
[t2_space G] (K : positive_compacts G) :
locally_compact_space G :=
begin
refine locally_compact_of_compact_nhds (λ x, _),
obtain ⟨y, hy⟩ := K.interior_nonempty,
let F := homeomorph.mul_left (x * y⁻¹),
refine ⟨F '' K, _, K.compact.image F.continuous⟩,
suffices : F.symm ⁻¹' K ∈ 𝓝 x, by { convert this, apply equiv.image_eq_preimage },
apply continuous_at.preimage_mem_nhds F.symm.continuous.continuous_at,
have : F.symm x = y, by simp [F, homeomorph.mul_left_symm],
rw this,
exact mem_interior_iff_mem_nhds.1 hy
end
end
section
variables [topological_space G] [comm_group G] [topological_group G]
@[to_additive]
lemma nhds_mul (x y : G) : 𝓝 (x * y) = 𝓝 x * 𝓝 y :=
filter_eq $ set.ext $ assume s,
begin
rw [← nhds_translation_mul_inv x, ← nhds_translation_mul_inv y, ← nhds_translation_mul_inv (x*y)],
split,
{ rintros ⟨t, ht, ts⟩,
rcases exists_nhds_one_split ht with ⟨V, V1, h⟩,
refine ⟨(λa, a * x⁻¹) ⁻¹' V, (λa, a * y⁻¹) ⁻¹' V,
⟨V, V1, subset.refl _⟩, ⟨V, V1, subset.refl _⟩, _⟩,
rintros a ⟨v, w, v_mem, w_mem, rfl⟩,
apply ts,
simpa [mul_comm, mul_assoc, mul_left_comm] using h (v * x⁻¹) v_mem (w * y⁻¹) w_mem },
{ rintros ⟨a, c, ⟨b, hb, ba⟩, ⟨d, hd, dc⟩, ac⟩,
refine ⟨b ∩ d, inter_mem hb hd, assume v, _⟩,
simp only [preimage_subset_iff, mul_inv_rev, mem_preimage] at *,
rintros ⟨vb, vd⟩,
refine ac ⟨v * y⁻¹, y, _, _, _⟩,
{ rw ← mul_assoc _ _ _ at vb, exact ba _ vb },
{ apply dc y, rw mul_right_inv, exact mem_of_mem_nhds hd },
{ simp only [inv_mul_cancel_right] } }
end
/-- On a topological group, `𝓝 : G → filter G` can be promoted to a `mul_hom`. -/
@[to_additive "On an additive topological group, `𝓝 : G → filter G` can be promoted to an
`add_hom`.", simps]
def nhds_mul_hom : G →ₙ* (filter G) :=
{ to_fun := 𝓝,
map_mul' := λ_ _, nhds_mul _ _ }
end
end filter_mul
instance additive.topological_add_group {G} [h : topological_space G]
[group G] [topological_group G] : @topological_add_group (additive G) h _ :=
{ continuous_neg := @continuous_inv G _ _ _ }
instance multiplicative.topological_group {G} [h : topological_space G]
[add_group G] [topological_add_group G] : @topological_group (multiplicative G) h _ :=
{ continuous_inv := @continuous_neg G _ _ _ }
section quotient
variables [group G] [topological_space G] [topological_group G] {Γ : subgroup G}
@[to_additive]
instance quotient_group.has_continuous_const_smul : has_continuous_const_smul G (G ⧸ Γ) :=
{ continuous_const_smul := λ g₀, begin
apply continuous_coinduced_dom,
change continuous (λ g : G, quotient_group.mk (g₀ * g)),
exact continuous_coinduced_rng.comp (continuous_mul_left g₀),
end }
@[to_additive]
lemma quotient_group.continuous_smul₁ (x : G ⧸ Γ) : continuous (λ g : G, g • x) :=
begin
obtain ⟨g₀, rfl⟩ : ∃ g₀, quotient_group.mk g₀ = x,
{ exact @quotient.exists_rep _ (quotient_group.left_rel Γ) x },
change continuous (λ g, quotient_group.mk (g * g₀)),
exact continuous_coinduced_rng.comp (continuous_mul_right g₀)
end
@[to_additive]
instance quotient_group.has_continuous_smul [locally_compact_space G] :
has_continuous_smul G (G ⧸ Γ) :=
{ continuous_smul := begin
let F : G × G ⧸ Γ → G ⧸ Γ := λ p, p.1 • p.2,
change continuous F,
have H : continuous (F ∘ (λ p : G × G, (p.1, quotient_group.mk p.2))),
{ change continuous (λ p : G × G, quotient_group.mk (p.1 * p.2)),
refine continuous_coinduced_rng.comp continuous_mul },
exact quotient_map.continuous_lift_prod_right quotient_map_quotient_mk H,
end }
end quotient
namespace units
open mul_opposite (continuous_op continuous_unop)
variables [monoid α] [topological_space α] [has_continuous_mul α] [monoid β] [topological_space β]
[has_continuous_mul β]
@[to_additive] instance : topological_group αˣ :=
{ continuous_inv := continuous_induced_rng ((continuous_unop.comp
(@continuous_embed_product α _ _).snd).prod_mk (continuous_op.comp continuous_coe)) }
/-- The topological group isomorphism between the units of a product of two monoids, and the product
of the units of each monoid. -/
def homeomorph.prod_units : homeomorph (α × β)ˣ (αˣ × βˣ) :=
{ continuous_to_fun :=
begin
show continuous (λ i : (α × β)ˣ, (map (monoid_hom.fst α β) i, map (monoid_hom.snd α β) i)),
refine continuous.prod_mk _ _,
{ refine continuous_induced_rng ((continuous_fst.comp units.continuous_coe).prod_mk _),
refine mul_opposite.continuous_op.comp (continuous_fst.comp _),
simp_rw units.inv_eq_coe_inv,
exact units.continuous_coe.comp continuous_inv, },
{ refine continuous_induced_rng ((continuous_snd.comp units.continuous_coe).prod_mk _),
simp_rw units.coe_map_inv,
exact continuous_op.comp (continuous_snd.comp (units.continuous_coe.comp continuous_inv)), }
end,
continuous_inv_fun :=
begin
refine continuous_induced_rng (continuous.prod_mk _ _),
{ exact (units.continuous_coe.comp continuous_fst).prod_mk
(units.continuous_coe.comp continuous_snd), },
{ refine continuous_op.comp
(units.continuous_coe.comp $ continuous_induced_rng $ continuous.prod_mk _ _),
{ exact (units.continuous_coe.comp (continuous_inv.comp continuous_fst)).prod_mk
(units.continuous_coe.comp (continuous_inv.comp continuous_snd)) },
{ exact continuous_op.comp ((units.continuous_coe.comp continuous_fst).prod_mk
(units.continuous_coe.comp continuous_snd)) }}
end,
..mul_equiv.prod_units }
end units
section lattice_ops
variables {ι : Sort*} [group G] [group H] {ts : set (topological_space G)}
(h : ∀ t ∈ ts, @topological_group G t _) {ts' : ι → topological_space G}
(h' : ∀ i, @topological_group G (ts' i) _) {t₁ t₂ : topological_space G}
(h₁ : @topological_group G t₁ _) (h₂ : @topological_group G t₂ _)
{t : topological_space H} [topological_group H] {F : Type*}
[monoid_hom_class F G H] (f : F)
@[to_additive] lemma topological_group_Inf :
@topological_group G (Inf ts) _ :=
{ continuous_inv := @has_continuous_inv.continuous_inv G (Inf ts) _
(@has_continuous_inv_Inf _ _ _
(λ t ht, @topological_group.to_has_continuous_inv G t _ (h t ht))),
continuous_mul := @has_continuous_mul.continuous_mul G (Inf ts) _
(@has_continuous_mul_Inf _ _ _
(λ t ht, @topological_group.to_has_continuous_mul G t _ (h t ht))) }
include h'
@[to_additive] lemma topological_group_infi :
@topological_group G (⨅ i, ts' i) _ :=
by {rw ← Inf_range, exact topological_group_Inf (set.forall_range_iff.mpr h')}
omit h'
include h₁ h₂
@[to_additive] lemma topological_group_inf :
@topological_group G (t₁ ⊓ t₂) _ :=
by {rw inf_eq_infi, refine topological_group_infi (λ b, _), cases b; assumption}
omit h₁ h₂
@[to_additive] lemma topological_group_induced :
@topological_group G (t.induced f) _ :=
{ continuous_inv :=
begin
letI : topological_space G := t.induced f,
refine continuous_induced_rng _,
simp_rw [function.comp, map_inv],
exact continuous_inv.comp (continuous_induced_dom : continuous f)
end,
continuous_mul := @has_continuous_mul.continuous_mul G (t.induced f) _
(@has_continuous_mul_induced G H _ _ t _ _ _ f) }
end lattice_ops
/-!
### Lattice of group topologies
We define a type class `group_topology α` which endows a group `α` with a topology such that all
group operations are continuous.
Group topologies on a fixed group `α` are ordered, by reverse inclusion. They form a complete
lattice, with `⊥` the discrete topology and `⊤` the indiscrete topology.
Any function `f : α → β` induces `coinduced f : topological_space α → group_topology β`.
The additive version `add_group_topology α` and corresponding results are provided as well.
-/
/-- A group topology on a group `α` is a topology for which multiplication and inversion
are continuous. -/
structure group_topology (α : Type u) [group α]
extends topological_space α, topological_group α : Type u
/-- An additive group topology on an additive group `α` is a topology for which addition and
negation are continuous. -/
structure add_group_topology (α : Type u) [add_group α]
extends topological_space α, topological_add_group α : Type u
attribute [to_additive] group_topology
namespace group_topology
variables [group α]
/-- A version of the global `continuous_mul` suitable for dot notation. -/
@[to_additive]
lemma continuous_mul' (g : group_topology α) :
by haveI := g.to_topological_space; exact continuous (λ p : α × α, p.1 * p.2) :=
begin
letI := g.to_topological_space,
haveI := g.to_topological_group,
exact continuous_mul,
end
/-- A version of the global `continuous_inv` suitable for dot notation. -/
@[to_additive]
lemma continuous_inv' (g : group_topology α) :
by haveI := g.to_topological_space; exact continuous (has_inv.inv : α → α) :=
begin
letI := g.to_topological_space,
haveI := g.to_topological_group,
exact continuous_inv,
end
@[to_additive]
lemma to_topological_space_injective :
function.injective (to_topological_space : group_topology α → topological_space α):=
λ f g h, by { cases f, cases g, congr' }
@[ext, to_additive]
lemma ext' {f g : group_topology α} (h : f.is_open = g.is_open) : f = g :=
to_topological_space_injective $ topological_space_eq h
/-- The ordering on group topologies on the group `γ`.
`t ≤ s` if every set open in `s` is also open in `t` (`t` is finer than `s`). -/
@[to_additive]
instance : partial_order (group_topology α) :=
partial_order.lift to_topological_space to_topological_space_injective
@[simp, to_additive] lemma to_topological_space_le {x y : group_topology α} :
x.to_topological_space ≤ y.to_topological_space ↔ x ≤ y := iff.rfl
@[to_additive]
instance : has_top (group_topology α) :=
⟨{to_topological_space := ⊤,
continuous_mul := continuous_top,
continuous_inv := continuous_top}⟩
@[simp, to_additive] lemma to_topological_space_top :
(⊤ : group_topology α).to_topological_space = ⊤ := rfl
@[to_additive]
instance : has_bot (group_topology α) :=
⟨{to_topological_space := ⊥,
continuous_mul := by continuity,
continuous_inv := continuous_bot}⟩
@[simp, to_additive] lemma to_topological_space_bot :
(⊥ : group_topology α).to_topological_space = ⊥ := rfl
@[to_additive]
instance : bounded_order (group_topology α) :=
{ top := ⊤,
le_top := λ x, show x.to_topological_space ≤ ⊤, from le_top,
bot := ⊥,
bot_le := λ x, show ⊥ ≤ x.to_topological_space, from bot_le }
@[to_additive]
instance : has_inf (group_topology α) :=
{ inf := λ x y,
{ to_topological_space := x.to_topological_space ⊓ y.to_topological_space,
continuous_mul := continuous_inf_rng
(continuous_inf_dom_left₂ x.continuous_mul') (continuous_inf_dom_right₂ y.continuous_mul'),
continuous_inv := continuous_inf_rng
(continuous_inf_dom_left x.continuous_inv') (continuous_inf_dom_right y.continuous_inv') } }
@[simp, to_additive]
lemma to_topological_space_inf (x y : group_topology α) :
(x ⊓ y).to_topological_space = x.to_topological_space ⊓ y.to_topological_space := rfl
@[to_additive]
instance : semilattice_inf (group_topology α) :=
to_topological_space_injective.semilattice_inf _ to_topological_space_inf
@[to_additive]
instance : inhabited (group_topology α) := ⟨⊤⟩
local notation `cont` := @continuous _ _
@[to_additive "Infimum of a collection of additive group topologies"]
instance : has_Inf (group_topology α) :=
{ Inf := λ S,
{ to_topological_space := Inf (to_topological_space '' S),
continuous_mul := continuous_Inf_rng begin
rintros _ ⟨⟨t, tr⟩, haS, rfl⟩, resetI,
exact continuous_Inf_dom₂
(set.mem_image_of_mem to_topological_space haS)
(set.mem_image_of_mem to_topological_space haS) continuous_mul,
end,
continuous_inv := continuous_Inf_rng begin
rintros _ ⟨⟨t, tr⟩, haS, rfl⟩, resetI,
exact continuous_Inf_dom (set.mem_image_of_mem to_topological_space haS) continuous_inv,
end, } }
@[simp, to_additive]
lemma to_topological_space_Inf (s : set (group_topology α)) :
(Inf s).to_topological_space = Inf (to_topological_space '' s) := rfl
@[simp, to_additive]
lemma to_topological_space_infi {ι} (s : ι → group_topology α) :
(⨅ i, s i).to_topological_space = ⨅ i, (s i).to_topological_space :=
congr_arg Inf (range_comp _ _).symm
/-- Group topologies on `γ` form a complete lattice, with `⊥` the discrete topology and `⊤` the
indiscrete topology.
The infimum of a collection of group topologies is the topology generated by all their open sets
(which is a group topology).
The supremum of two group topologies `s` and `t` is the infimum of the family of all group
topologies contained in the intersection of `s` and `t`. -/
@[to_additive]
instance : complete_semilattice_Inf (group_topology α) :=
{ Inf_le := λ S a haS, to_topological_space_le.1 $ Inf_le ⟨a, haS, rfl⟩,
le_Inf :=
begin
intros S a hab,
apply topological_space.complete_lattice.le_Inf,
rintros _ ⟨b, hbS, rfl⟩,
exact hab b hbS,
end,
..group_topology.has_Inf,
..group_topology.partial_order }
@[to_additive]
instance : complete_lattice (group_topology α) :=
{ inf := (⊓),
top := ⊤,
bot := ⊥,
..group_topology.bounded_order,
..group_topology.semilattice_inf,
..complete_lattice_of_complete_semilattice_Inf _ }
/-- Given `f : α → β` and a topology on `α`, the coinduced group topology on `β` is the finest
topology such that `f` is continuous and `β` is a topological group. -/
@[to_additive "Given `f : α → β` and a topology on `α`, the coinduced additive group topology on `β`
is the finest topology such that `f` is continuous and `β` is a topological additive group."]
def coinduced {α β : Type*} [t : topological_space α] [group β] (f : α → β) :
group_topology β :=
Inf {b : group_topology β | (topological_space.coinduced f t) ≤ b.to_topological_space}
@[to_additive]
lemma coinduced_continuous {α β : Type*} [t : topological_space α] [group β]
(f : α → β) : cont t (coinduced f).to_topological_space f :=
begin
rw continuous_iff_coinduced_le,
refine le_Inf _,
rintros _ ⟨t', ht', rfl⟩,
exact ht',
end
end group_topology
|
(* Title: HOL/Auth/n_german_lemma_inv__15_on_rules.thy
Author: Yongjian Li and Kaiqiang Duan, State Key Lab of Computer Science, Institute of Software, Chinese Academy of Sciences
Copyright 2016 State Key Lab of Computer Science, Institute of Software, Chinese Academy of Sciences
*)
header{*The n_german Protocol Case Study*}
theory n_german_lemma_inv__15_on_rules imports n_german_lemma_on_inv__15
begin
section{*All lemmas on causal relation between inv__15*}
lemma lemma_inv__15_on_rules:
assumes b1: "r \<in> rules N" and b2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__15 p__Inv4)"
shows "invHoldForRule s f r (invariants N)"
proof -
have c1: "(\<exists> j. j\<le>N\<and>r=n_SendReqS j)\<or>
(\<exists> i. i\<le>N\<and>r=n_SendReqEI i)\<or>
(\<exists> i. i\<le>N\<and>r=n_SendReqES i)\<or>
(\<exists> i. i\<le>N\<and>r=n_RecvReq N i)\<or>
(\<exists> i. i\<le>N\<and>r=n_SendInvE i)\<or>
(\<exists> i. i\<le>N\<and>r=n_SendInvS i)\<or>
(\<exists> i. i\<le>N\<and>r=n_SendInvAck i)\<or>
(\<exists> i. i\<le>N\<and>r=n_RecvInvAck i)\<or>
(\<exists> i. i\<le>N\<and>r=n_SendGntS i)\<or>
(\<exists> i. i\<le>N\<and>r=n_SendGntE N i)\<or>
(\<exists> i. i\<le>N\<and>r=n_RecvGntS i)\<or>
(\<exists> i. i\<le>N\<and>r=n_RecvGntE i)\<or>
(\<exists> i d. i\<le>N\<and>d\<le>N\<and>r=n_Store i d)"
apply (cut_tac b1, auto) done
moreover {
assume d1: "(\<exists> j. j\<le>N\<and>r=n_SendReqS j)"
have "invHoldForRule s f r (invariants N)"
apply (cut_tac b2 d1, metis n_SendReqSVsinv__15) done
}
moreover {
assume d1: "(\<exists> i. i\<le>N\<and>r=n_SendReqEI i)"
have "invHoldForRule s f r (invariants N)"
apply (cut_tac b2 d1, metis n_SendReqEIVsinv__15) done
}
moreover {
assume d1: "(\<exists> i. i\<le>N\<and>r=n_SendReqES i)"
have "invHoldForRule s f r (invariants N)"
apply (cut_tac b2 d1, metis n_SendReqESVsinv__15) done
}
moreover {
assume d1: "(\<exists> i. i\<le>N\<and>r=n_RecvReq N i)"
have "invHoldForRule s f r (invariants N)"
apply (cut_tac b2 d1, metis n_RecvReqVsinv__15) done
}
moreover {
assume d1: "(\<exists> i. i\<le>N\<and>r=n_SendInvE i)"
have "invHoldForRule s f r (invariants N)"
apply (cut_tac b2 d1, metis n_SendInvEVsinv__15) done
}
moreover {
assume d1: "(\<exists> i. i\<le>N\<and>r=n_SendInvS i)"
have "invHoldForRule s f r (invariants N)"
apply (cut_tac b2 d1, metis n_SendInvSVsinv__15) done
}
moreover {
assume d1: "(\<exists> i. i\<le>N\<and>r=n_SendInvAck i)"
have "invHoldForRule s f r (invariants N)"
apply (cut_tac b2 d1, metis n_SendInvAckVsinv__15) done
}
moreover {
assume d1: "(\<exists> i. i\<le>N\<and>r=n_RecvInvAck i)"
have "invHoldForRule s f r (invariants N)"
apply (cut_tac b2 d1, metis n_RecvInvAckVsinv__15) done
}
moreover {
assume d1: "(\<exists> i. i\<le>N\<and>r=n_SendGntS i)"
have "invHoldForRule s f r (invariants N)"
apply (cut_tac b2 d1, metis n_SendGntSVsinv__15) done
}
moreover {
assume d1: "(\<exists> i. i\<le>N\<and>r=n_SendGntE N i)"
have "invHoldForRule s f r (invariants N)"
apply (cut_tac b2 d1, metis n_SendGntEVsinv__15) done
}
moreover {
assume d1: "(\<exists> i. i\<le>N\<and>r=n_RecvGntS i)"
have "invHoldForRule s f r (invariants N)"
apply (cut_tac b2 d1, metis n_RecvGntSVsinv__15) done
}
moreover {
assume d1: "(\<exists> i. i\<le>N\<and>r=n_RecvGntE i)"
have "invHoldForRule s f r (invariants N)"
apply (cut_tac b2 d1, metis n_RecvGntEVsinv__15) done
}
moreover {
assume d1: "(\<exists> i d. i\<le>N\<and>d\<le>N\<and>r=n_Store i d)"
have "invHoldForRule s f r (invariants N)"
apply (cut_tac b2 d1, metis n_StoreVsinv__15) done
}
ultimately show "invHoldForRule s f r (invariants N)"
by satx
qed
end
|
-- Andreas, 2019-09-13, AIM XXX, test for #4050 by gallais
-- Jesper, 2019-12-19, moved to test/Fail after unfix of #3823
record Wrap : Set₂ where
field wrapped : Set₁
f : Wrap
f = record { M }
module M where
wrapped : Set₁
wrapped = Set
-- Should be accepted.
|
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.