text
stringlengths 0
3.34M
|
---|
(* Title: HOL/Auth/n_germanSimp_lemma_inv__18_on_rules.thy
Author: Yongjian Li and Kaiqiang Duan, State Key Lab of Computer Science, Institute of Software, Chinese Academy of Sciences
Copyright 2016 State Key Lab of Computer Science, Institute of Software, Chinese Academy of Sciences
*)
header{*The n_germanSimp Protocol Case Study*}
theory n_germanSimp_lemma_inv__18_on_rules imports n_germanSimp_lemma_on_inv__18
begin
section{*All lemmas on causal relation between inv__18*}
lemma lemma_inv__18_on_rules:
assumes b1: "r \<in> rules N" and b2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__18 p__Inv4)"
shows "invHoldForRule s f r (invariants N)"
proof -
have c1: "(\<exists> i d. i\<le>N\<and>d\<le>N\<and>r=n_Store i d)\<or>
(\<exists> i. i\<le>N\<and>r=n_RecvReqS N i)\<or>
(\<exists> i. i\<le>N\<and>r=n_RecvReqE__part__0 N i)\<or>
(\<exists> i. i\<le>N\<and>r=n_RecvReqE__part__1 N i)\<or>
(\<exists> i. i\<le>N\<and>r=n_SendInv__part__0 i)\<or>
(\<exists> i. i\<le>N\<and>r=n_SendInv__part__1 i)\<or>
(\<exists> i. i\<le>N\<and>r=n_SendInvAck i)\<or>
(\<exists> i. i\<le>N\<and>r=n_RecvInvAck i)\<or>
(\<exists> i. i\<le>N\<and>r=n_SendGntS i)\<or>
(\<exists> i. i\<le>N\<and>r=n_SendGntE N i)\<or>
(\<exists> i. i\<le>N\<and>r=n_RecvGntS i)\<or>
(\<exists> i. i\<le>N\<and>r=n_RecvGntE i)"
apply (cut_tac b1, auto) done
moreover {
assume d1: "(\<exists> i d. i\<le>N\<and>d\<le>N\<and>r=n_Store i d)"
have "invHoldForRule s f r (invariants N)"
apply (cut_tac b2 d1, metis n_StoreVsinv__18) done
}
moreover {
assume d1: "(\<exists> i. i\<le>N\<and>r=n_RecvReqS N i)"
have "invHoldForRule s f r (invariants N)"
apply (cut_tac b2 d1, metis n_RecvReqSVsinv__18) done
}
moreover {
assume d1: "(\<exists> i. i\<le>N\<and>r=n_RecvReqE__part__0 N i)"
have "invHoldForRule s f r (invariants N)"
apply (cut_tac b2 d1, metis n_RecvReqE__part__0Vsinv__18) done
}
moreover {
assume d1: "(\<exists> i. i\<le>N\<and>r=n_RecvReqE__part__1 N i)"
have "invHoldForRule s f r (invariants N)"
apply (cut_tac b2 d1, metis n_RecvReqE__part__1Vsinv__18) done
}
moreover {
assume d1: "(\<exists> i. i\<le>N\<and>r=n_SendInv__part__0 i)"
have "invHoldForRule s f r (invariants N)"
apply (cut_tac b2 d1, metis n_SendInv__part__0Vsinv__18) done
}
moreover {
assume d1: "(\<exists> i. i\<le>N\<and>r=n_SendInv__part__1 i)"
have "invHoldForRule s f r (invariants N)"
apply (cut_tac b2 d1, metis n_SendInv__part__1Vsinv__18) done
}
moreover {
assume d1: "(\<exists> i. i\<le>N\<and>r=n_SendInvAck i)"
have "invHoldForRule s f r (invariants N)"
apply (cut_tac b2 d1, metis n_SendInvAckVsinv__18) done
}
moreover {
assume d1: "(\<exists> i. i\<le>N\<and>r=n_RecvInvAck i)"
have "invHoldForRule s f r (invariants N)"
apply (cut_tac b2 d1, metis n_RecvInvAckVsinv__18) done
}
moreover {
assume d1: "(\<exists> i. i\<le>N\<and>r=n_SendGntS i)"
have "invHoldForRule s f r (invariants N)"
apply (cut_tac b2 d1, metis n_SendGntSVsinv__18) done
}
moreover {
assume d1: "(\<exists> i. i\<le>N\<and>r=n_SendGntE N i)"
have "invHoldForRule s f r (invariants N)"
apply (cut_tac b2 d1, metis n_SendGntEVsinv__18) done
}
moreover {
assume d1: "(\<exists> i. i\<le>N\<and>r=n_RecvGntS i)"
have "invHoldForRule s f r (invariants N)"
apply (cut_tac b2 d1, metis n_RecvGntSVsinv__18) done
}
moreover {
assume d1: "(\<exists> i. i\<le>N\<and>r=n_RecvGntE i)"
have "invHoldForRule s f r (invariants N)"
apply (cut_tac b2 d1, metis n_RecvGntEVsinv__18) done
}
ultimately show "invHoldForRule s f r (invariants N)"
by satx
qed
end
|
module Text.WebIDL.Types.Member
import Generics.Derive
import Text.WebIDL.Types.Argument
import Text.WebIDL.Types.Attribute
import Text.WebIDL.Types.Identifier
import Text.WebIDL.Types.StringLit
import Text.WebIDL.Types.Type
%language ElabReflection
||| Const ::
||| const ConstType identifier = ConstValue ;
|||
||| ConstValue ::
||| BooleanLiteral
||| FloatLiteral
||| integer
public export
record Const where
constructor MkConst
type : ConstType
name : Identifier
value : ConstValue
%runElab derive "Const" [Generic,Meta,Eq,Show,HasAttributes]
--------------------------------------------------------------------------------
-- Operation
--------------------------------------------------------------------------------
||| OperationName ::
||| OperationNameKeyword
||| identifier
|||
||| OperationNameKeyword ::
||| includes
public export
record OperationName where
constructor MkOpName
value : String
%runElab derive "OperationName" [Generic,Meta,Eq,Show,HasAttributes]
||| Special ::
||| getter
||| setter
||| deleter
public export
data Special = Getter | Setter | Deleter
%runElab derive "Special" [Generic,Meta,Eq,Show,HasAttributes]
||| RegularOperation ::
||| Type OperationRest
|||
||| OperationRest ::
||| OptionalOperationName ( ArgumentList ) ;
|||
||| OptionalOperationName ::
||| OperationName
||| ε
|||
||| SpecialOperation ::
||| Special RegularOperation
|||
||| Operation ::
||| RegularOperation
||| SpecialOperation
public export
record Op a where
constructor MkOp
special : a
type : IdlType
name : Maybe OperationName
args : ArgumentList
%runElab derive "Op" [Generic,Meta,Eq,Show,HasAttributes]
public export
0 RegularOperation : Type
RegularOperation = Op ()
public export
0 SpecialOperation : Type
SpecialOperation = Op Special
public export
0 Operation : Type
Operation = Op (Maybe Special)
public export
regToOp : RegularOperation -> Operation
regToOp = record { special = Nothing }
public export
specToOp : SpecialOperation -> Operation
specToOp = record { special $= Just }
--------------------------------------------------------------------------------
-- Callbacks
--------------------------------------------------------------------------------
||| CallbackInterfaceMember ::
||| Const
||| RegularOperation
public export
0 CallbackInterfaceMember : Type
CallbackInterfaceMember = NS I [Const,RegularOperation]
||| CallbackInterfaceMembers ::
||| ExtendedAttributeList CallbackInterfaceMember CallbackInterfaceMembers
||| ε
public export
0 CallbackInterfaceMembers : Type
CallbackInterfaceMembers = List (Attributed CallbackInterfaceMember)
--------------------------------------------------------------------------------
-- Dictionary
--------------------------------------------------------------------------------
||| Inheritance ::
||| : identifier
||| ε
public export
0 Inheritance : Type
Inheritance = Maybe Identifier
||| DictionaryMemberRest ::
||| required TypeWithExtendedAttributes identifier ;
||| Type identifier Default ;
public export
data DictionaryMemberRest : Type where
Required : (attrs : ExtAttributeList)
-> (type : IdlType)
-> (name : Identifier)
-> DictionaryMemberRest
Optional : (type : IdlType)
-> (name : Identifier)
-> (deflt : Default)
-> DictionaryMemberRest
%runElab derive "DictionaryMemberRest" [Generic,Meta,Eq,Show,HasAttributes]
||| DictionaryMember ::
||| ExtendedAttributeList DictionaryMemberRest
public export
0 DictionaryMember : Type
DictionaryMember = Attributed DictionaryMemberRest
||| DictionaryMembers ::
||| DictionaryMember DictionaryMembers
||| ε
public export
0 DictionaryMembers : Type
DictionaryMembers = List DictionaryMember
--------------------------------------------------------------------------------
-- Attributes
--------------------------------------------------------------------------------
public export
record Readonly a where
constructor MkRO
value : a
%runElab derive "Readonly" [Generic,Meta,Eq,Show,HasAttributes]
public export
record Inherit a where
constructor MkI
value : a
%runElab derive "Inherit" [Generic,Meta,Eq,Show,HasAttributes]
||| AttributeName ::
||| AttributeNameKeyword
||| identifier
|||
||| AttributeNameKeyword ::
||| async
||| required
public export
record AttributeName where
constructor MkAttributeName
value : String
%runElab derive "AttributeName" [Generic,Meta,Eq,Ord,Show,HasAttributes]
||| AttributeRest ::
||| attribute TypeWithExtendedAttributes AttributeName ;
public export
record Attribute where
constructor MkAttribute
attrs : ExtAttributeList
type : IdlType
name : AttributeName
%runElab derive "Attribute" [Generic,Meta,Eq,Show,HasAttributes]
||| ReadWriteMaplike ::
||| MaplikeRest
|||
||| MaplikeRest ::
||| maplike < TypeWithExtendedAttributes , TypeWithExtendedAttributes > ;
public export
record Maplike where
constructor MkMaplike
fstTpe : Attributed IdlType
sndTpe : Attributed IdlType
%runElab derive "Maplike" [Generic,Meta,Eq,Show,HasAttributes]
export
HasAttributes Maplike where
attributes v = fst (v.fstTpe) ++ fst (v.sndTpe)
||| ReadWriteSetlike ::
||| SetlikeRest
|||
||| SetlikeRest ::
||| setlike < TypeWithExtendedAttributes > ;
public export
record Setlike where
constructor MkSetlike
type : Attributed IdlType
%runElab derive "Setlike" [Generic,Meta,Eq,Show,HasAttributes]
||| StringifierRest ::
||| OptionalReadOnly AttributeRest
||| RegularOperation
||| ;
|||
||| Stringifier ::
||| stringifier StringifierRest
public export
0 Stringifier : Type
Stringifier = NS I [Attribute, Readonly Attribute, RegularOperation,()]
||| StaticMember ::
||| static StaticMemberRest
|||
||| StaticMemberRest ::
||| OptionalReadOnly AttributeRest
||| RegularOperation
public export
0 StaticMember : Type
StaticMember = NS I [Attribute, Readonly Attribute, RegularOperation]
--------------------------------------------------------------------------------
-- Namespace
--------------------------------------------------------------------------------
||| NamespaceMember ::
||| RegularOperation
||| readonly AttributeRest
public export
0 NamespaceMember : Type
NamespaceMember = NS I [RegularOperation, Readonly Attribute]
||| NamespaceMembers ::
||| ExtendedAttributeList NamespaceMember NamespaceMembers
||| ε
public export
0 NamespaceMembers : Type
NamespaceMembers = List (Attributed NamespaceMember)
--------------------------------------------------------------------------------
-- Interface
--------------------------------------------------------------------------------
||| Constructor ::
||| constructor ( ArgumentList ) ;
public export
record Constructor where
constructor MkConstructor
args : ArgumentList
%runElab derive "Constructor" [Generic,Meta,Eq,Show,HasAttributes]
||| PartialInterfaceMember ::
||| Const
||| Operation
||| Stringifier
||| StaticMember
||| Iterable
||| AsyncIterable
||| ReadOnlyMember
||| ReadWriteAttribute
||| ReadWriteMaplike
||| ReadWriteSetlike
||| InheritAttribute
|||
||| Iterable ::
||| iterable < TypeWithExtendedAttributes OptionalType > ;
public export
data PartialInterfaceMember =
IConst Const
| IOp Operation
| IStr Stringifier
| IStatic StaticMember
| IAttr Attribute
| IMap Maplike
| ISet Setlike
| IAttrRO (Readonly Attribute)
| IMapRO (Readonly Maplike)
| ISetRO (Readonly Setlike)
| IAttrInh (Inherit Attribute)
| IIterable (Attributed IdlType) OptionalType
| IAsync (Attributed IdlType) OptionalType ArgumentList
%runElab derive "PartialInterfaceMember" [Generic,Meta,Eq,Show,HasAttributes]
||| MixinMember ::
||| Const
||| RegularOperation
||| Stringifier
||| OptionalReadOnly AttributeRest
public export
data MixinMember =
MConst Const
| MOp RegularOperation
| MStr Stringifier
| MAttr Attribute
| MAttrRO (Readonly Attribute)
%runElab derive "MixinMember" [Generic,Meta,Eq,Show,HasAttributes]
||| PartialInterfaceMembers ::
||| ExtendedAttributeList PartialInterfaceMember PartialInterfaceMembers
||| ε
public export
0 PartialInterfaceMembers : Type
PartialInterfaceMembers = List (Attributed PartialInterfaceMember)
||| InterfaceMember ::
||| PartialInterfaceMember
||| Constructor
public export
0 InterfaceMember : Type
InterfaceMember = NS I [Constructor,PartialInterfaceMember]
||| InterfaceMembers ::
||| ExtendedAttributeList InterfaceMember InterfaceMembers
||| ε
public export
0 InterfaceMembers : Type
InterfaceMembers = List (Attributed InterfaceMember)
||| MixinMembers ::
||| ExtendedAttributeList MixinMember MixinMembers
||| ε
public export
0 MixinMembers : Type
MixinMembers = List (Attributed MixinMember)
--------------------------------------------------------------------------------
-- Extractors
--------------------------------------------------------------------------------
namespace CallbackInterfaceMember
export
const : Attributed CallbackInterfaceMember -> Maybe Const
const (_,Z x) = Just x
const _ = Nothing
namespace Dictionary
export
optional : DictionaryMember -> Maybe Attribute
optional (_, Required _ _ _) = Nothing
optional (_, Optional t n _) =
Just $ MkAttribute Nil t (MkAttributeName n.value)
export
required : DictionaryMember -> Maybe Attribute
required (_, Required _ t n) =
Just $ MkAttribute Nil t (MkAttributeName n.value)
required (_, Optional _ _ _) = Nothing
namespace InterfaceMember
export
part : (PartialInterfaceMember -> Maybe a)
-> Attributed InterfaceMember
-> Maybe a
part f (_,(S $ Z $ p)) = f p
part _ _ = Nothing
namespace MixinMember
export
const : Attributed MixinMember -> Maybe Const
const (_,MConst x) = Just x
const _ = Nothing
export
attrRO : Attributed MixinMember -> Maybe (Readonly Attribute)
attrRO (_, (MAttrRO x)) = Just x
attrRO _ = Nothing
export
attr : Attributed MixinMember -> Maybe Attribute
attr (_, (MAttr x)) = Just x
attr _ = Nothing
namespace NamespaceMember
export
attrRO : NamespaceMember -> Maybe (Readonly Attribute)
attrRO (S $ Z x) = Just x
attrRO (Z _) = Nothing
namespace PartialInterfaceMember
export
const : PartialInterfaceMember -> Maybe Const
const (IConst x) = Just x
const _ = Nothing
export
attrRO : PartialInterfaceMember -> Maybe (Readonly Attribute)
attrRO (IAttrRO x) = Just x
attrRO _ = Nothing
export
attr : PartialInterfaceMember -> Maybe Attribute
attr (IAttr x) = Just x
attr _ = Nothing
|
module Test.Bits16
import Data.Prim.Bits16
import Data.SOP
import Hedgehog
import Test.RingLaws
allBits16 : Gen Bits16
allBits16 = bits16 (linear 0 0xffff)
gt0 : Gen Bits16
gt0 = bits16 (linear 1 MaxBits16)
gt1 : Gen Bits16
gt1 = bits16 (linear 2 MaxBits16)
prop_ltMax : Property
prop_ltMax = property $ do
b8 <- forAll allBits16
(b8 <= MaxBits16) === True
prop_ltMin : Property
prop_ltMin = property $ do
b8 <- forAll allBits16
(b8 >= MinBits16) === True
prop_comp : Property
prop_comp = property $ do
[m,n] <- forAll $ np [allBits16, allBits16]
toOrdering (comp m n) === compare m n
prop_mod : Property
prop_mod = property $ do
[n,d] <- forAll $ np [allBits16, gt0]
compare (n `mod` d) d === LT
prop_div : Property
prop_div = property $ do
[n,d] <- forAll $ np [gt0, gt1]
compare (n `div` d) n === LT
prop_divMod : Property
prop_divMod = property $ do
[n,d] <- forAll $ np [allBits16, gt0]
let x = n `div` d
r = n `mod` d
n === x * d + r
export
props : Group
props = MkGroup "Bits16" $
[ ("prop_ltMax", prop_ltMax)
, ("prop_ltMin", prop_ltMin)
, ("prop_comp", prop_comp)
, ("prop_mod", prop_mod)
, ("prop_div", prop_div)
, ("prop_divMod", prop_divMod)
] ++ ringProps allBits16
|
State Before: K : Type u_1
n : ℕ
g : GeneralizedContinuedFraction K
s : Stream'.Seq (Pair K)
inst✝ : Field K
nth_part_denom_ne_zero : ∀ {b : K}, Stream'.Seq.get? (partialDenominators g) n = some b → b ≠ 0
⊢ convergents g (n + 1) = convergents (squashGCF g n) n State After: case inl
K : Type u_1
n : ℕ
g : GeneralizedContinuedFraction K
s : Stream'.Seq (Pair K)
inst✝ : Field K
nth_part_denom_ne_zero : ∀ {b : K}, Stream'.Seq.get? (partialDenominators g) n = some b → b ≠ 0
terminated_at_n : TerminatedAt g n
⊢ convergents g (n + 1) = convergents (squashGCF g n) n
case inr
K : Type u_1
n : ℕ
g : GeneralizedContinuedFraction K
s : Stream'.Seq (Pair K)
inst✝ : Field K
nth_part_denom_ne_zero : ∀ {b : K}, Stream'.Seq.get? (partialDenominators g) n = some b → b ≠ 0
not_terminated_at_n : ¬TerminatedAt g n
⊢ convergents g (n + 1) = convergents (squashGCF g n) n Tactic: cases' Decidable.em (g.TerminatedAt n) with terminated_at_n not_terminated_at_n State Before: case inl
K : Type u_1
n : ℕ
g : GeneralizedContinuedFraction K
s : Stream'.Seq (Pair K)
inst✝ : Field K
nth_part_denom_ne_zero : ∀ {b : K}, Stream'.Seq.get? (partialDenominators g) n = some b → b ≠ 0
terminated_at_n : TerminatedAt g n
⊢ convergents g (n + 1) = convergents (squashGCF g n) n State After: case inl
K : Type u_1
n : ℕ
g : GeneralizedContinuedFraction K
s : Stream'.Seq (Pair K)
inst✝ : Field K
nth_part_denom_ne_zero : ∀ {b : K}, Stream'.Seq.get? (partialDenominators g) n = some b → b ≠ 0
terminated_at_n : TerminatedAt g n
this : squashGCF g n = g
⊢ convergents g (n + 1) = convergents (squashGCF g n) n Tactic: have : squashGCF g n = g := squashGCF_eq_self_of_terminated terminated_at_n State Before: case inl
K : Type u_1
n : ℕ
g : GeneralizedContinuedFraction K
s : Stream'.Seq (Pair K)
inst✝ : Field K
nth_part_denom_ne_zero : ∀ {b : K}, Stream'.Seq.get? (partialDenominators g) n = some b → b ≠ 0
terminated_at_n : TerminatedAt g n
this : squashGCF g n = g
⊢ convergents g (n + 1) = convergents (squashGCF g n) n State After: no goals Tactic: simp only [this, convergents_stable_of_terminated n.le_succ terminated_at_n] State Before: case inr
K : Type u_1
n : ℕ
g : GeneralizedContinuedFraction K
s : Stream'.Seq (Pair K)
inst✝ : Field K
nth_part_denom_ne_zero : ∀ {b : K}, Stream'.Seq.get? (partialDenominators g) n = some b → b ≠ 0
not_terminated_at_n : ¬TerminatedAt g n
⊢ convergents g (n + 1) = convergents (squashGCF g n) n State After: K : Type u_1
n : ℕ
g : GeneralizedContinuedFraction K
s : Stream'.Seq (Pair K)
inst✝ : Field K
nth_part_denom_ne_zero : ∀ {b : K}, Stream'.Seq.get? (partialDenominators g) n = some b → b ≠ 0
not_terminated_at_n : ¬TerminatedAt g n
⊢ ∃ gp_n, Stream'.Seq.get? g.s n = some gp_n
case inr.intro.mk
K : Type u_1
n : ℕ
g : GeneralizedContinuedFraction K
s : Stream'.Seq (Pair K)
inst✝ : Field K
nth_part_denom_ne_zero : ∀ {b : K}, Stream'.Seq.get? (partialDenominators g) n = some b → b ≠ 0
not_terminated_at_n : ¬TerminatedAt g n
a b : K
s_nth_eq : Stream'.Seq.get? g.s n = some { a := a, b := b }
⊢ convergents g (n + 1) = convergents (squashGCF g n) n Tactic: obtain ⟨⟨a, b⟩, s_nth_eq⟩ : ∃ gp_n, g.s.get? n = some gp_n State Before: K : Type u_1
n : ℕ
g : GeneralizedContinuedFraction K
s : Stream'.Seq (Pair K)
inst✝ : Field K
nth_part_denom_ne_zero : ∀ {b : K}, Stream'.Seq.get? (partialDenominators g) n = some b → b ≠ 0
not_terminated_at_n : ¬TerminatedAt g n
⊢ ∃ gp_n, Stream'.Seq.get? g.s n = some gp_n
case inr.intro.mk
K : Type u_1
n : ℕ
g : GeneralizedContinuedFraction K
s : Stream'.Seq (Pair K)
inst✝ : Field K
nth_part_denom_ne_zero : ∀ {b : K}, Stream'.Seq.get? (partialDenominators g) n = some b → b ≠ 0
not_terminated_at_n : ¬TerminatedAt g n
a b : K
s_nth_eq : Stream'.Seq.get? g.s n = some { a := a, b := b }
⊢ convergents g (n + 1) = convergents (squashGCF g n) n State After: case inr.intro.mk
K : Type u_1
n : ℕ
g : GeneralizedContinuedFraction K
s : Stream'.Seq (Pair K)
inst✝ : Field K
nth_part_denom_ne_zero : ∀ {b : K}, Stream'.Seq.get? (partialDenominators g) n = some b → b ≠ 0
not_terminated_at_n : ¬TerminatedAt g n
a b : K
s_nth_eq : Stream'.Seq.get? g.s n = some { a := a, b := b }
⊢ convergents g (n + 1) = convergents (squashGCF g n) n Tactic: exact Option.ne_none_iff_exists'.mp not_terminated_at_n State Before: case inr.intro.mk
K : Type u_1
n : ℕ
g : GeneralizedContinuedFraction K
s : Stream'.Seq (Pair K)
inst✝ : Field K
nth_part_denom_ne_zero : ∀ {b : K}, Stream'.Seq.get? (partialDenominators g) n = some b → b ≠ 0
not_terminated_at_n : ¬TerminatedAt g n
a b : K
s_nth_eq : Stream'.Seq.get? g.s n = some { a := a, b := b }
⊢ convergents g (n + 1) = convergents (squashGCF g n) n State After: case inr.intro.mk
K : Type u_1
n : ℕ
g : GeneralizedContinuedFraction K
s : Stream'.Seq (Pair K)
inst✝ : Field K
nth_part_denom_ne_zero : ∀ {b : K}, Stream'.Seq.get? (partialDenominators g) n = some b → b ≠ 0
not_terminated_at_n : ¬TerminatedAt g n
a b : K
s_nth_eq : Stream'.Seq.get? g.s n = some { a := a, b := b }
b_ne_zero : b ≠ 0
⊢ convergents g (n + 1) = convergents (squashGCF g n) n Tactic: have b_ne_zero : b ≠ 0 := nth_part_denom_ne_zero (part_denom_eq_s_b s_nth_eq) State Before: case inr.intro.mk
K : Type u_1
n : ℕ
g : GeneralizedContinuedFraction K
s : Stream'.Seq (Pair K)
inst✝ : Field K
nth_part_denom_ne_zero : ∀ {b : K}, Stream'.Seq.get? (partialDenominators g) n = some b → b ≠ 0
not_terminated_at_n : ¬TerminatedAt g n
a b : K
s_nth_eq : Stream'.Seq.get? g.s n = some { a := a, b := b }
b_ne_zero : b ≠ 0
⊢ convergents g (n + 1) = convergents (squashGCF g n) n State After: case inr.intro.mk.zero
K : Type u_1
g : GeneralizedContinuedFraction K
s : Stream'.Seq (Pair K)
inst✝ : Field K
a b : K
b_ne_zero : b ≠ 0
nth_part_denom_ne_zero : ∀ {b : K}, Stream'.Seq.get? (partialDenominators g) Nat.zero = some b → b ≠ 0
not_terminated_at_n : ¬TerminatedAt g Nat.zero
s_nth_eq : Stream'.Seq.get? g.s Nat.zero = some { a := a, b := b }
⊢ convergents g (Nat.zero + 1) = convergents (squashGCF g Nat.zero) Nat.zero
case inr.intro.mk.succ
K : Type u_1
g : GeneralizedContinuedFraction K
s : Stream'.Seq (Pair K)
inst✝ : Field K
a b : K
b_ne_zero : b ≠ 0
n' : ℕ
nth_part_denom_ne_zero : ∀ {b : K}, Stream'.Seq.get? (partialDenominators g) (Nat.succ n') = some b → b ≠ 0
not_terminated_at_n : ¬TerminatedAt g (Nat.succ n')
s_nth_eq : Stream'.Seq.get? g.s (Nat.succ n') = some { a := a, b := b }
⊢ convergents g (Nat.succ n' + 1) = convergents (squashGCF g (Nat.succ n')) (Nat.succ n') Tactic: cases' n with n' State Before: case inr.intro.mk.zero
K : Type u_1
g : GeneralizedContinuedFraction K
s : Stream'.Seq (Pair K)
inst✝ : Field K
a b : K
b_ne_zero : b ≠ 0
nth_part_denom_ne_zero : ∀ {b : K}, Stream'.Seq.get? (partialDenominators g) Nat.zero = some b → b ≠ 0
not_terminated_at_n : ¬TerminatedAt g Nat.zero
s_nth_eq : Stream'.Seq.get? g.s Nat.zero = some { a := a, b := b }
⊢ convergents g (Nat.zero + 1) = convergents (squashGCF g Nat.zero) Nat.zero
case inr.intro.mk.succ
K : Type u_1
g : GeneralizedContinuedFraction K
s : Stream'.Seq (Pair K)
inst✝ : Field K
a b : K
b_ne_zero : b ≠ 0
n' : ℕ
nth_part_denom_ne_zero : ∀ {b : K}, Stream'.Seq.get? (partialDenominators g) (Nat.succ n') = some b → b ≠ 0
not_terminated_at_n : ¬TerminatedAt g (Nat.succ n')
s_nth_eq : Stream'.Seq.get? g.s (Nat.succ n') = some { a := a, b := b }
⊢ convergents g (Nat.succ n' + 1) = convergents (squashGCF g (Nat.succ n')) (Nat.succ n') State After: case inr.intro.mk.succ
K : Type u_1
g : GeneralizedContinuedFraction K
s : Stream'.Seq (Pair K)
inst✝ : Field K
a b : K
b_ne_zero : b ≠ 0
n' : ℕ
nth_part_denom_ne_zero : ∀ {b : K}, Stream'.Seq.get? (partialDenominators g) (Nat.succ n') = some b → b ≠ 0
not_terminated_at_n : ¬TerminatedAt g (Nat.succ n')
s_nth_eq : Stream'.Seq.get? g.s (Nat.succ n') = some { a := a, b := b }
⊢ convergents g (Nat.succ n' + 1) = convergents (squashGCF g (Nat.succ n')) (Nat.succ n') Tactic: case zero =>
suffices (b * g.h + a) / b = g.h + a / b by
simpa [squashGCF, s_nth_eq, convergent_eq_conts_a_div_conts_b,
continuants_recurrenceAux s_nth_eq zeroth_continuant_aux_eq_one_zero
first_continuant_aux_eq_h_one]
calc
(b * g.h + a) / b = b * g.h / b + a / b := by ring
_ = g.h + a / b := by rw [mul_div_cancel_left _ b_ne_zero] State Before: case inr.intro.mk.succ
K : Type u_1
g : GeneralizedContinuedFraction K
s : Stream'.Seq (Pair K)
inst✝ : Field K
a b : K
b_ne_zero : b ≠ 0
n' : ℕ
nth_part_denom_ne_zero : ∀ {b : K}, Stream'.Seq.get? (partialDenominators g) (Nat.succ n') = some b → b ≠ 0
not_terminated_at_n : ¬TerminatedAt g (Nat.succ n')
s_nth_eq : Stream'.Seq.get? g.s (Nat.succ n') = some { a := a, b := b }
⊢ convergents g (Nat.succ n' + 1) = convergents (squashGCF g (Nat.succ n')) (Nat.succ n') State After: no goals Tactic: case succ =>
obtain ⟨⟨pa, pb⟩, s_n'th_eq⟩ : ∃ gp_n', g.s.get? n' = some gp_n' :=
g.s.ge_stable n'.le_succ s_nth_eq
let g' := squashGCF g (n' + 1)
set pred_conts := g.continuantsAux (n' + 1) with succ_n'th_conts_aux_eq
set ppred_conts := g.continuantsAux n' with n'th_conts_aux_eq
let pA := pred_conts.a
let pB := pred_conts.b
let ppA := ppred_conts.a
let ppB := ppred_conts.b
set pred_conts' := g'.continuantsAux (n' + 1) with succ_n'th_conts_aux_eq'
set ppred_conts' := g'.continuantsAux n' with n'th_conts_aux_eq'
let pA' := pred_conts'.a
let pB' := pred_conts'.b
let ppA' := ppred_conts'.a
let ppB' := ppred_conts'.b
have : g'.convergents (n' + 1) =
((pb + a / b) * pA' + pa * ppA') / ((pb + a / b) * pB' + pa * ppB') := by
have : g'.s.get? n' = some ⟨pa, pb + a / b⟩ :=
squashSeq_nth_of_not_terminated s_n'th_eq s_nth_eq
rw [convergent_eq_conts_a_div_conts_b,
continuants_recurrenceAux this n'th_conts_aux_eq'.symm succ_n'th_conts_aux_eq'.symm]
rw [this]
have : g.convergents (n' + 2) =
(b * (pb * pA + pa * ppA) + a * pA) / (b * (pb * pB + pa * ppB) + a * pB) := by
have : g.continuantsAux (n' + 2) = ⟨pb * pA + pa * ppA, pb * pB + pa * ppB⟩ :=
continuantsAux_recurrence s_n'th_eq n'th_conts_aux_eq.symm succ_n'th_conts_aux_eq.symm
rw [convergent_eq_conts_a_div_conts_b,
continuants_recurrenceAux s_nth_eq succ_n'th_conts_aux_eq.symm this]
rw [this]
suffices
((pb + a / b) * pA + pa * ppA) / ((pb + a / b) * pB + pa * ppB) =
(b * (pb * pA + pa * ppA) + a * pA) / (b * (pb * pB + pa * ppB) + a * pB) by
obtain ⟨eq1, eq2, eq3, eq4⟩ : pA' = pA ∧ pB' = pB ∧ ppA' = ppA ∧ ppB' = ppB := by
simp [*, (continuantsAux_eq_continuantsAux_squashGCF_of_le <| le_refl <| n' + 1).symm,
(continuantsAux_eq_continuantsAux_squashGCF_of_le n'.le_succ).symm]
symm
simpa only [eq1, eq2, eq3, eq4, mul_div_cancel _ b_ne_zero]
field_simp
congr 1 <;> ring State Before: K : Type u_1
g : GeneralizedContinuedFraction K
s : Stream'.Seq (Pair K)
inst✝ : Field K
a b : K
b_ne_zero : b ≠ 0
nth_part_denom_ne_zero : ∀ {b : K}, Stream'.Seq.get? (partialDenominators g) Nat.zero = some b → b ≠ 0
not_terminated_at_n : ¬TerminatedAt g Nat.zero
s_nth_eq : Stream'.Seq.get? g.s Nat.zero = some { a := a, b := b }
⊢ convergents g (Nat.zero + 1) = convergents (squashGCF g Nat.zero) Nat.zero State After: K : Type u_1
g : GeneralizedContinuedFraction K
s : Stream'.Seq (Pair K)
inst✝ : Field K
a b : K
b_ne_zero : b ≠ 0
nth_part_denom_ne_zero : ∀ {b : K}, Stream'.Seq.get? (partialDenominators g) Nat.zero = some b → b ≠ 0
not_terminated_at_n : ¬TerminatedAt g Nat.zero
s_nth_eq : Stream'.Seq.get? g.s Nat.zero = some { a := a, b := b }
⊢ (b * g.h + a) / b = g.h + a / b Tactic: suffices (b * g.h + a) / b = g.h + a / b by
simpa [squashGCF, s_nth_eq, convergent_eq_conts_a_div_conts_b,
continuants_recurrenceAux s_nth_eq zeroth_continuant_aux_eq_one_zero
first_continuant_aux_eq_h_one] State Before: K : Type u_1
g : GeneralizedContinuedFraction K
s : Stream'.Seq (Pair K)
inst✝ : Field K
a b : K
b_ne_zero : b ≠ 0
nth_part_denom_ne_zero : ∀ {b : K}, Stream'.Seq.get? (partialDenominators g) Nat.zero = some b → b ≠ 0
not_terminated_at_n : ¬TerminatedAt g Nat.zero
s_nth_eq : Stream'.Seq.get? g.s Nat.zero = some { a := a, b := b }
⊢ (b * g.h + a) / b = g.h + a / b State After: no goals Tactic: calc
(b * g.h + a) / b = b * g.h / b + a / b := by ring
_ = g.h + a / b := by rw [mul_div_cancel_left _ b_ne_zero] State Before: K : Type u_1
g : GeneralizedContinuedFraction K
s : Stream'.Seq (Pair K)
inst✝ : Field K
a b : K
b_ne_zero : b ≠ 0
nth_part_denom_ne_zero : ∀ {b : K}, Stream'.Seq.get? (partialDenominators g) Nat.zero = some b → b ≠ 0
not_terminated_at_n : ¬TerminatedAt g Nat.zero
s_nth_eq : Stream'.Seq.get? g.s Nat.zero = some { a := a, b := b }
this : (b * g.h + a) / b = g.h + a / b
⊢ convergents g (Nat.zero + 1) = convergents (squashGCF g Nat.zero) Nat.zero State After: no goals Tactic: simpa [squashGCF, s_nth_eq, convergent_eq_conts_a_div_conts_b,
continuants_recurrenceAux s_nth_eq zeroth_continuant_aux_eq_one_zero
first_continuant_aux_eq_h_one] State Before: K : Type u_1
g : GeneralizedContinuedFraction K
s : Stream'.Seq (Pair K)
inst✝ : Field K
a b : K
b_ne_zero : b ≠ 0
nth_part_denom_ne_zero : ∀ {b : K}, Stream'.Seq.get? (partialDenominators g) Nat.zero = some b → b ≠ 0
not_terminated_at_n : ¬TerminatedAt g Nat.zero
s_nth_eq : Stream'.Seq.get? g.s Nat.zero = some { a := a, b := b }
⊢ (b * g.h + a) / b = b * g.h / b + a / b State After: no goals Tactic: ring State Before: K : Type u_1
g : GeneralizedContinuedFraction K
s : Stream'.Seq (Pair K)
inst✝ : Field K
a b : K
b_ne_zero : b ≠ 0
nth_part_denom_ne_zero : ∀ {b : K}, Stream'.Seq.get? (partialDenominators g) Nat.zero = some b → b ≠ 0
not_terminated_at_n : ¬TerminatedAt g Nat.zero
s_nth_eq : Stream'.Seq.get? g.s Nat.zero = some { a := a, b := b }
⊢ b * g.h / b + a / b = g.h + a / b State After: no goals Tactic: rw [mul_div_cancel_left _ b_ne_zero] State Before: K : Type u_1
g : GeneralizedContinuedFraction K
s : Stream'.Seq (Pair K)
inst✝ : Field K
a b : K
b_ne_zero : b ≠ 0
n' : ℕ
nth_part_denom_ne_zero : ∀ {b : K}, Stream'.Seq.get? (partialDenominators g) (Nat.succ n') = some b → b ≠ 0
not_terminated_at_n : ¬TerminatedAt g (Nat.succ n')
s_nth_eq : Stream'.Seq.get? g.s (Nat.succ n') = some { a := a, b := b }
⊢ convergents g (Nat.succ n' + 1) = convergents (squashGCF g (Nat.succ n')) (Nat.succ n') State After: case intro.mk
K : Type u_1
g : GeneralizedContinuedFraction K
s : Stream'.Seq (Pair K)
inst✝ : Field K
a b : K
b_ne_zero : b ≠ 0
n' : ℕ
nth_part_denom_ne_zero : ∀ {b : K}, Stream'.Seq.get? (partialDenominators g) (Nat.succ n') = some b → b ≠ 0
not_terminated_at_n : ¬TerminatedAt g (Nat.succ n')
s_nth_eq : Stream'.Seq.get? g.s (Nat.succ n') = some { a := a, b := b }
pa pb : K
s_n'th_eq : Stream'.Seq.get? g.s n' = some { a := pa, b := pb }
⊢ convergents g (Nat.succ n' + 1) = convergents (squashGCF g (Nat.succ n')) (Nat.succ n') Tactic: obtain ⟨⟨pa, pb⟩, s_n'th_eq⟩ : ∃ gp_n', g.s.get? n' = some gp_n' :=
g.s.ge_stable n'.le_succ s_nth_eq State Before: case intro.mk
K : Type u_1
g : GeneralizedContinuedFraction K
s : Stream'.Seq (Pair K)
inst✝ : Field K
a b : K
b_ne_zero : b ≠ 0
n' : ℕ
nth_part_denom_ne_zero : ∀ {b : K}, Stream'.Seq.get? (partialDenominators g) (Nat.succ n') = some b → b ≠ 0
not_terminated_at_n : ¬TerminatedAt g (Nat.succ n')
s_nth_eq : Stream'.Seq.get? g.s (Nat.succ n') = some { a := a, b := b }
pa pb : K
s_n'th_eq : Stream'.Seq.get? g.s n' = some { a := pa, b := pb }
⊢ convergents g (Nat.succ n' + 1) = convergents (squashGCF g (Nat.succ n')) (Nat.succ n') State After: case intro.mk
K : Type u_1
g : GeneralizedContinuedFraction K
s : Stream'.Seq (Pair K)
inst✝ : Field K
a b : K
b_ne_zero : b ≠ 0
n' : ℕ
nth_part_denom_ne_zero : ∀ {b : K}, Stream'.Seq.get? (partialDenominators g) (Nat.succ n') = some b → b ≠ 0
not_terminated_at_n : ¬TerminatedAt g (Nat.succ n')
s_nth_eq : Stream'.Seq.get? g.s (Nat.succ n') = some { a := a, b := b }
pa pb : K
s_n'th_eq : Stream'.Seq.get? g.s n' = some { a := pa, b := pb }
g' : GeneralizedContinuedFraction K := squashGCF g (n' + 1)
⊢ convergents g (Nat.succ n' + 1) = convergents (squashGCF g (Nat.succ n')) (Nat.succ n') Tactic: let g' := squashGCF g (n' + 1) State Before: case intro.mk
K : Type u_1
g : GeneralizedContinuedFraction K
s : Stream'.Seq (Pair K)
inst✝ : Field K
a b : K
b_ne_zero : b ≠ 0
n' : ℕ
nth_part_denom_ne_zero : ∀ {b : K}, Stream'.Seq.get? (partialDenominators g) (Nat.succ n') = some b → b ≠ 0
not_terminated_at_n : ¬TerminatedAt g (Nat.succ n')
s_nth_eq : Stream'.Seq.get? g.s (Nat.succ n') = some { a := a, b := b }
pa pb : K
s_n'th_eq : Stream'.Seq.get? g.s n' = some { a := pa, b := pb }
g' : GeneralizedContinuedFraction K := squashGCF g (n' + 1)
⊢ convergents g (Nat.succ n' + 1) = convergents (squashGCF g (Nat.succ n')) (Nat.succ n') State After: case intro.mk
K : Type u_1
g : GeneralizedContinuedFraction K
s : Stream'.Seq (Pair K)
inst✝ : Field K
a b : K
b_ne_zero : b ≠ 0
n' : ℕ
nth_part_denom_ne_zero : ∀ {b : K}, Stream'.Seq.get? (partialDenominators g) (Nat.succ n') = some b → b ≠ 0
not_terminated_at_n : ¬TerminatedAt g (Nat.succ n')
s_nth_eq : Stream'.Seq.get? g.s (Nat.succ n') = some { a := a, b := b }
pa pb : K
s_n'th_eq : Stream'.Seq.get? g.s n' = some { a := pa, b := pb }
g' : GeneralizedContinuedFraction K := squashGCF g (n' + 1)
pred_conts : Pair K := continuantsAux g (n' + 1)
succ_n'th_conts_aux_eq : pred_conts = continuantsAux g (n' + 1)
⊢ convergents g (Nat.succ n' + 1) = convergents (squashGCF g (Nat.succ n')) (Nat.succ n') Tactic: set pred_conts := g.continuantsAux (n' + 1) with succ_n'th_conts_aux_eq State Before: case intro.mk
K : Type u_1
g : GeneralizedContinuedFraction K
s : Stream'.Seq (Pair K)
inst✝ : Field K
a b : K
b_ne_zero : b ≠ 0
n' : ℕ
nth_part_denom_ne_zero : ∀ {b : K}, Stream'.Seq.get? (partialDenominators g) (Nat.succ n') = some b → b ≠ 0
not_terminated_at_n : ¬TerminatedAt g (Nat.succ n')
s_nth_eq : Stream'.Seq.get? g.s (Nat.succ n') = some { a := a, b := b }
pa pb : K
s_n'th_eq : Stream'.Seq.get? g.s n' = some { a := pa, b := pb }
g' : GeneralizedContinuedFraction K := squashGCF g (n' + 1)
pred_conts : Pair K := continuantsAux g (n' + 1)
succ_n'th_conts_aux_eq : pred_conts = continuantsAux g (n' + 1)
⊢ convergents g (Nat.succ n' + 1) = convergents (squashGCF g (Nat.succ n')) (Nat.succ n') State After: case intro.mk
K : Type u_1
g : GeneralizedContinuedFraction K
s : Stream'.Seq (Pair K)
inst✝ : Field K
a b : K
b_ne_zero : b ≠ 0
n' : ℕ
nth_part_denom_ne_zero : ∀ {b : K}, Stream'.Seq.get? (partialDenominators g) (Nat.succ n') = some b → b ≠ 0
not_terminated_at_n : ¬TerminatedAt g (Nat.succ n')
s_nth_eq : Stream'.Seq.get? g.s (Nat.succ n') = some { a := a, b := b }
pa pb : K
s_n'th_eq : Stream'.Seq.get? g.s n' = some { a := pa, b := pb }
g' : GeneralizedContinuedFraction K := squashGCF g (n' + 1)
pred_conts : Pair K := continuantsAux g (n' + 1)
succ_n'th_conts_aux_eq : pred_conts = continuantsAux g (n' + 1)
ppred_conts : Pair K := continuantsAux g n'
n'th_conts_aux_eq : ppred_conts = continuantsAux g n'
⊢ convergents g (Nat.succ n' + 1) = convergents (squashGCF g (Nat.succ n')) (Nat.succ n') Tactic: set ppred_conts := g.continuantsAux n' with n'th_conts_aux_eq State Before: case intro.mk
K : Type u_1
g : GeneralizedContinuedFraction K
s : Stream'.Seq (Pair K)
inst✝ : Field K
a b : K
b_ne_zero : b ≠ 0
n' : ℕ
nth_part_denom_ne_zero : ∀ {b : K}, Stream'.Seq.get? (partialDenominators g) (Nat.succ n') = some b → b ≠ 0
not_terminated_at_n : ¬TerminatedAt g (Nat.succ n')
s_nth_eq : Stream'.Seq.get? g.s (Nat.succ n') = some { a := a, b := b }
pa pb : K
s_n'th_eq : Stream'.Seq.get? g.s n' = some { a := pa, b := pb }
g' : GeneralizedContinuedFraction K := squashGCF g (n' + 1)
pred_conts : Pair K := continuantsAux g (n' + 1)
succ_n'th_conts_aux_eq : pred_conts = continuantsAux g (n' + 1)
ppred_conts : Pair K := continuantsAux g n'
n'th_conts_aux_eq : ppred_conts = continuantsAux g n'
⊢ convergents g (Nat.succ n' + 1) = convergents (squashGCF g (Nat.succ n')) (Nat.succ n') State After: case intro.mk
K : Type u_1
g : GeneralizedContinuedFraction K
s : Stream'.Seq (Pair K)
inst✝ : Field K
a b : K
b_ne_zero : b ≠ 0
n' : ℕ
nth_part_denom_ne_zero : ∀ {b : K}, Stream'.Seq.get? (partialDenominators g) (Nat.succ n') = some b → b ≠ 0
not_terminated_at_n : ¬TerminatedAt g (Nat.succ n')
s_nth_eq : Stream'.Seq.get? g.s (Nat.succ n') = some { a := a, b := b }
pa pb : K
s_n'th_eq : Stream'.Seq.get? g.s n' = some { a := pa, b := pb }
g' : GeneralizedContinuedFraction K := squashGCF g (n' + 1)
pred_conts : Pair K := continuantsAux g (n' + 1)
succ_n'th_conts_aux_eq : pred_conts = continuantsAux g (n' + 1)
ppred_conts : Pair K := continuantsAux g n'
n'th_conts_aux_eq : ppred_conts = continuantsAux g n'
pA : K := pred_conts.a
⊢ convergents g (Nat.succ n' + 1) = convergents (squashGCF g (Nat.succ n')) (Nat.succ n') Tactic: let pA := pred_conts.a State Before: case intro.mk
K : Type u_1
g : GeneralizedContinuedFraction K
s : Stream'.Seq (Pair K)
inst✝ : Field K
a b : K
b_ne_zero : b ≠ 0
n' : ℕ
nth_part_denom_ne_zero : ∀ {b : K}, Stream'.Seq.get? (partialDenominators g) (Nat.succ n') = some b → b ≠ 0
not_terminated_at_n : ¬TerminatedAt g (Nat.succ n')
s_nth_eq : Stream'.Seq.get? g.s (Nat.succ n') = some { a := a, b := b }
pa pb : K
s_n'th_eq : Stream'.Seq.get? g.s n' = some { a := pa, b := pb }
g' : GeneralizedContinuedFraction K := squashGCF g (n' + 1)
pred_conts : Pair K := continuantsAux g (n' + 1)
succ_n'th_conts_aux_eq : pred_conts = continuantsAux g (n' + 1)
ppred_conts : Pair K := continuantsAux g n'
n'th_conts_aux_eq : ppred_conts = continuantsAux g n'
pA : K := pred_conts.a
⊢ convergents g (Nat.succ n' + 1) = convergents (squashGCF g (Nat.succ n')) (Nat.succ n') State After: case intro.mk
K : Type u_1
g : GeneralizedContinuedFraction K
s : Stream'.Seq (Pair K)
inst✝ : Field K
a b : K
b_ne_zero : b ≠ 0
n' : ℕ
nth_part_denom_ne_zero : ∀ {b : K}, Stream'.Seq.get? (partialDenominators g) (Nat.succ n') = some b → b ≠ 0
not_terminated_at_n : ¬TerminatedAt g (Nat.succ n')
s_nth_eq : Stream'.Seq.get? g.s (Nat.succ n') = some { a := a, b := b }
pa pb : K
s_n'th_eq : Stream'.Seq.get? g.s n' = some { a := pa, b := pb }
g' : GeneralizedContinuedFraction K := squashGCF g (n' + 1)
pred_conts : Pair K := continuantsAux g (n' + 1)
succ_n'th_conts_aux_eq : pred_conts = continuantsAux g (n' + 1)
ppred_conts : Pair K := continuantsAux g n'
n'th_conts_aux_eq : ppred_conts = continuantsAux g n'
pA : K := pred_conts.a
pB : K := pred_conts.b
⊢ convergents g (Nat.succ n' + 1) = convergents (squashGCF g (Nat.succ n')) (Nat.succ n') Tactic: let pB := pred_conts.b State Before: case intro.mk
K : Type u_1
g : GeneralizedContinuedFraction K
s : Stream'.Seq (Pair K)
inst✝ : Field K
a b : K
b_ne_zero : b ≠ 0
n' : ℕ
nth_part_denom_ne_zero : ∀ {b : K}, Stream'.Seq.get? (partialDenominators g) (Nat.succ n') = some b → b ≠ 0
not_terminated_at_n : ¬TerminatedAt g (Nat.succ n')
s_nth_eq : Stream'.Seq.get? g.s (Nat.succ n') = some { a := a, b := b }
pa pb : K
s_n'th_eq : Stream'.Seq.get? g.s n' = some { a := pa, b := pb }
g' : GeneralizedContinuedFraction K := squashGCF g (n' + 1)
pred_conts : Pair K := continuantsAux g (n' + 1)
succ_n'th_conts_aux_eq : pred_conts = continuantsAux g (n' + 1)
ppred_conts : Pair K := continuantsAux g n'
n'th_conts_aux_eq : ppred_conts = continuantsAux g n'
pA : K := pred_conts.a
pB : K := pred_conts.b
⊢ convergents g (Nat.succ n' + 1) = convergents (squashGCF g (Nat.succ n')) (Nat.succ n') State After: case intro.mk
K : Type u_1
g : GeneralizedContinuedFraction K
s : Stream'.Seq (Pair K)
inst✝ : Field K
a b : K
b_ne_zero : b ≠ 0
n' : ℕ
nth_part_denom_ne_zero : ∀ {b : K}, Stream'.Seq.get? (partialDenominators g) (Nat.succ n') = some b → b ≠ 0
not_terminated_at_n : ¬TerminatedAt g (Nat.succ n')
s_nth_eq : Stream'.Seq.get? g.s (Nat.succ n') = some { a := a, b := b }
pa pb : K
s_n'th_eq : Stream'.Seq.get? g.s n' = some { a := pa, b := pb }
g' : GeneralizedContinuedFraction K := squashGCF g (n' + 1)
pred_conts : Pair K := continuantsAux g (n' + 1)
succ_n'th_conts_aux_eq : pred_conts = continuantsAux g (n' + 1)
ppred_conts : Pair K := continuantsAux g n'
n'th_conts_aux_eq : ppred_conts = continuantsAux g n'
pA : K := pred_conts.a
pB : K := pred_conts.b
ppA : K := ppred_conts.a
⊢ convergents g (Nat.succ n' + 1) = convergents (squashGCF g (Nat.succ n')) (Nat.succ n') Tactic: let ppA := ppred_conts.a State Before: case intro.mk
K : Type u_1
g : GeneralizedContinuedFraction K
s : Stream'.Seq (Pair K)
inst✝ : Field K
a b : K
b_ne_zero : b ≠ 0
n' : ℕ
nth_part_denom_ne_zero : ∀ {b : K}, Stream'.Seq.get? (partialDenominators g) (Nat.succ n') = some b → b ≠ 0
not_terminated_at_n : ¬TerminatedAt g (Nat.succ n')
s_nth_eq : Stream'.Seq.get? g.s (Nat.succ n') = some { a := a, b := b }
pa pb : K
s_n'th_eq : Stream'.Seq.get? g.s n' = some { a := pa, b := pb }
g' : GeneralizedContinuedFraction K := squashGCF g (n' + 1)
pred_conts : Pair K := continuantsAux g (n' + 1)
succ_n'th_conts_aux_eq : pred_conts = continuantsAux g (n' + 1)
ppred_conts : Pair K := continuantsAux g n'
n'th_conts_aux_eq : ppred_conts = continuantsAux g n'
pA : K := pred_conts.a
pB : K := pred_conts.b
ppA : K := ppred_conts.a
⊢ convergents g (Nat.succ n' + 1) = convergents (squashGCF g (Nat.succ n')) (Nat.succ n') State After: case intro.mk
K : Type u_1
g : GeneralizedContinuedFraction K
s : Stream'.Seq (Pair K)
inst✝ : Field K
a b : K
b_ne_zero : b ≠ 0
n' : ℕ
nth_part_denom_ne_zero : ∀ {b : K}, Stream'.Seq.get? (partialDenominators g) (Nat.succ n') = some b → b ≠ 0
not_terminated_at_n : ¬TerminatedAt g (Nat.succ n')
s_nth_eq : Stream'.Seq.get? g.s (Nat.succ n') = some { a := a, b := b }
pa pb : K
s_n'th_eq : Stream'.Seq.get? g.s n' = some { a := pa, b := pb }
g' : GeneralizedContinuedFraction K := squashGCF g (n' + 1)
pred_conts : Pair K := continuantsAux g (n' + 1)
succ_n'th_conts_aux_eq : pred_conts = continuantsAux g (n' + 1)
ppred_conts : Pair K := continuantsAux g n'
n'th_conts_aux_eq : ppred_conts = continuantsAux g n'
pA : K := pred_conts.a
pB : K := pred_conts.b
ppA : K := ppred_conts.a
ppB : K := ppred_conts.b
⊢ convergents g (Nat.succ n' + 1) = convergents (squashGCF g (Nat.succ n')) (Nat.succ n') Tactic: let ppB := ppred_conts.b State Before: case intro.mk
K : Type u_1
g : GeneralizedContinuedFraction K
s : Stream'.Seq (Pair K)
inst✝ : Field K
a b : K
b_ne_zero : b ≠ 0
n' : ℕ
nth_part_denom_ne_zero : ∀ {b : K}, Stream'.Seq.get? (partialDenominators g) (Nat.succ n') = some b → b ≠ 0
not_terminated_at_n : ¬TerminatedAt g (Nat.succ n')
s_nth_eq : Stream'.Seq.get? g.s (Nat.succ n') = some { a := a, b := b }
pa pb : K
s_n'th_eq : Stream'.Seq.get? g.s n' = some { a := pa, b := pb }
g' : GeneralizedContinuedFraction K := squashGCF g (n' + 1)
pred_conts : Pair K := continuantsAux g (n' + 1)
succ_n'th_conts_aux_eq : pred_conts = continuantsAux g (n' + 1)
ppred_conts : Pair K := continuantsAux g n'
n'th_conts_aux_eq : ppred_conts = continuantsAux g n'
pA : K := pred_conts.a
pB : K := pred_conts.b
ppA : K := ppred_conts.a
ppB : K := ppred_conts.b
⊢ convergents g (Nat.succ n' + 1) = convergents (squashGCF g (Nat.succ n')) (Nat.succ n') State After: case intro.mk
K : Type u_1
g : GeneralizedContinuedFraction K
s : Stream'.Seq (Pair K)
inst✝ : Field K
a b : K
b_ne_zero : b ≠ 0
n' : ℕ
nth_part_denom_ne_zero : ∀ {b : K}, Stream'.Seq.get? (partialDenominators g) (Nat.succ n') = some b → b ≠ 0
not_terminated_at_n : ¬TerminatedAt g (Nat.succ n')
s_nth_eq : Stream'.Seq.get? g.s (Nat.succ n') = some { a := a, b := b }
pa pb : K
s_n'th_eq : Stream'.Seq.get? g.s n' = some { a := pa, b := pb }
g' : GeneralizedContinuedFraction K := squashGCF g (n' + 1)
pred_conts : Pair K := continuantsAux g (n' + 1)
succ_n'th_conts_aux_eq : pred_conts = continuantsAux g (n' + 1)
ppred_conts : Pair K := continuantsAux g n'
n'th_conts_aux_eq : ppred_conts = continuantsAux g n'
pA : K := pred_conts.a
pB : K := pred_conts.b
ppA : K := ppred_conts.a
ppB : K := ppred_conts.b
pred_conts' : Pair K := continuantsAux g' (n' + 1)
succ_n'th_conts_aux_eq' : pred_conts' = continuantsAux g' (n' + 1)
⊢ convergents g (Nat.succ n' + 1) = convergents (squashGCF g (Nat.succ n')) (Nat.succ n') Tactic: set pred_conts' := g'.continuantsAux (n' + 1) with succ_n'th_conts_aux_eq' State Before: case intro.mk
K : Type u_1
g : GeneralizedContinuedFraction K
s : Stream'.Seq (Pair K)
inst✝ : Field K
a b : K
b_ne_zero : b ≠ 0
n' : ℕ
nth_part_denom_ne_zero : ∀ {b : K}, Stream'.Seq.get? (partialDenominators g) (Nat.succ n') = some b → b ≠ 0
not_terminated_at_n : ¬TerminatedAt g (Nat.succ n')
s_nth_eq : Stream'.Seq.get? g.s (Nat.succ n') = some { a := a, b := b }
pa pb : K
s_n'th_eq : Stream'.Seq.get? g.s n' = some { a := pa, b := pb }
g' : GeneralizedContinuedFraction K := squashGCF g (n' + 1)
pred_conts : Pair K := continuantsAux g (n' + 1)
succ_n'th_conts_aux_eq : pred_conts = continuantsAux g (n' + 1)
ppred_conts : Pair K := continuantsAux g n'
n'th_conts_aux_eq : ppred_conts = continuantsAux g n'
pA : K := pred_conts.a
pB : K := pred_conts.b
ppA : K := ppred_conts.a
ppB : K := ppred_conts.b
pred_conts' : Pair K := continuantsAux g' (n' + 1)
succ_n'th_conts_aux_eq' : pred_conts' = continuantsAux g' (n' + 1)
⊢ convergents g (Nat.succ n' + 1) = convergents (squashGCF g (Nat.succ n')) (Nat.succ n') State After: case intro.mk
K : Type u_1
g : GeneralizedContinuedFraction K
s : Stream'.Seq (Pair K)
inst✝ : Field K
a b : K
b_ne_zero : b ≠ 0
n' : ℕ
nth_part_denom_ne_zero : ∀ {b : K}, Stream'.Seq.get? (partialDenominators g) (Nat.succ n') = some b → b ≠ 0
not_terminated_at_n : ¬TerminatedAt g (Nat.succ n')
s_nth_eq : Stream'.Seq.get? g.s (Nat.succ n') = some { a := a, b := b }
pa pb : K
s_n'th_eq : Stream'.Seq.get? g.s n' = some { a := pa, b := pb }
g' : GeneralizedContinuedFraction K := squashGCF g (n' + 1)
pred_conts : Pair K := continuantsAux g (n' + 1)
succ_n'th_conts_aux_eq : pred_conts = continuantsAux g (n' + 1)
ppred_conts : Pair K := continuantsAux g n'
n'th_conts_aux_eq : ppred_conts = continuantsAux g n'
pA : K := pred_conts.a
pB : K := pred_conts.b
ppA : K := ppred_conts.a
ppB : K := ppred_conts.b
pred_conts' : Pair K := continuantsAux g' (n' + 1)
succ_n'th_conts_aux_eq' : pred_conts' = continuantsAux g' (n' + 1)
ppred_conts' : Pair K := continuantsAux g' n'
n'th_conts_aux_eq' : ppred_conts' = continuantsAux g' n'
⊢ convergents g (Nat.succ n' + 1) = convergents (squashGCF g (Nat.succ n')) (Nat.succ n') Tactic: set ppred_conts' := g'.continuantsAux n' with n'th_conts_aux_eq' State Before: case intro.mk
K : Type u_1
g : GeneralizedContinuedFraction K
s : Stream'.Seq (Pair K)
inst✝ : Field K
a b : K
b_ne_zero : b ≠ 0
n' : ℕ
nth_part_denom_ne_zero : ∀ {b : K}, Stream'.Seq.get? (partialDenominators g) (Nat.succ n') = some b → b ≠ 0
not_terminated_at_n : ¬TerminatedAt g (Nat.succ n')
s_nth_eq : Stream'.Seq.get? g.s (Nat.succ n') = some { a := a, b := b }
pa pb : K
s_n'th_eq : Stream'.Seq.get? g.s n' = some { a := pa, b := pb }
g' : GeneralizedContinuedFraction K := squashGCF g (n' + 1)
pred_conts : Pair K := continuantsAux g (n' + 1)
succ_n'th_conts_aux_eq : pred_conts = continuantsAux g (n' + 1)
ppred_conts : Pair K := continuantsAux g n'
n'th_conts_aux_eq : ppred_conts = continuantsAux g n'
pA : K := pred_conts.a
pB : K := pred_conts.b
ppA : K := ppred_conts.a
ppB : K := ppred_conts.b
pred_conts' : Pair K := continuantsAux g' (n' + 1)
succ_n'th_conts_aux_eq' : pred_conts' = continuantsAux g' (n' + 1)
ppred_conts' : Pair K := continuantsAux g' n'
n'th_conts_aux_eq' : ppred_conts' = continuantsAux g' n'
⊢ convergents g (Nat.succ n' + 1) = convergents (squashGCF g (Nat.succ n')) (Nat.succ n') State After: case intro.mk
K : Type u_1
g : GeneralizedContinuedFraction K
s : Stream'.Seq (Pair K)
inst✝ : Field K
a b : K
b_ne_zero : b ≠ 0
n' : ℕ
nth_part_denom_ne_zero : ∀ {b : K}, Stream'.Seq.get? (partialDenominators g) (Nat.succ n') = some b → b ≠ 0
not_terminated_at_n : ¬TerminatedAt g (Nat.succ n')
s_nth_eq : Stream'.Seq.get? g.s (Nat.succ n') = some { a := a, b := b }
pa pb : K
s_n'th_eq : Stream'.Seq.get? g.s n' = some { a := pa, b := pb }
g' : GeneralizedContinuedFraction K := squashGCF g (n' + 1)
pred_conts : Pair K := continuantsAux g (n' + 1)
succ_n'th_conts_aux_eq : pred_conts = continuantsAux g (n' + 1)
ppred_conts : Pair K := continuantsAux g n'
n'th_conts_aux_eq : ppred_conts = continuantsAux g n'
pA : K := pred_conts.a
pB : K := pred_conts.b
ppA : K := ppred_conts.a
ppB : K := ppred_conts.b
pred_conts' : Pair K := continuantsAux g' (n' + 1)
succ_n'th_conts_aux_eq' : pred_conts' = continuantsAux g' (n' + 1)
ppred_conts' : Pair K := continuantsAux g' n'
n'th_conts_aux_eq' : ppred_conts' = continuantsAux g' n'
pA' : K := pred_conts'.a
⊢ convergents g (Nat.succ n' + 1) = convergents (squashGCF g (Nat.succ n')) (Nat.succ n') Tactic: let pA' := pred_conts'.a State Before: case intro.mk
K : Type u_1
g : GeneralizedContinuedFraction K
s : Stream'.Seq (Pair K)
inst✝ : Field K
a b : K
b_ne_zero : b ≠ 0
n' : ℕ
nth_part_denom_ne_zero : ∀ {b : K}, Stream'.Seq.get? (partialDenominators g) (Nat.succ n') = some b → b ≠ 0
not_terminated_at_n : ¬TerminatedAt g (Nat.succ n')
s_nth_eq : Stream'.Seq.get? g.s (Nat.succ n') = some { a := a, b := b }
pa pb : K
s_n'th_eq : Stream'.Seq.get? g.s n' = some { a := pa, b := pb }
g' : GeneralizedContinuedFraction K := squashGCF g (n' + 1)
pred_conts : Pair K := continuantsAux g (n' + 1)
succ_n'th_conts_aux_eq : pred_conts = continuantsAux g (n' + 1)
ppred_conts : Pair K := continuantsAux g n'
n'th_conts_aux_eq : ppred_conts = continuantsAux g n'
pA : K := pred_conts.a
pB : K := pred_conts.b
ppA : K := ppred_conts.a
ppB : K := ppred_conts.b
pred_conts' : Pair K := continuantsAux g' (n' + 1)
succ_n'th_conts_aux_eq' : pred_conts' = continuantsAux g' (n' + 1)
ppred_conts' : Pair K := continuantsAux g' n'
n'th_conts_aux_eq' : ppred_conts' = continuantsAux g' n'
pA' : K := pred_conts'.a
⊢ convergents g (Nat.succ n' + 1) = convergents (squashGCF g (Nat.succ n')) (Nat.succ n') State After: case intro.mk
K : Type u_1
g : GeneralizedContinuedFraction K
s : Stream'.Seq (Pair K)
inst✝ : Field K
a b : K
b_ne_zero : b ≠ 0
n' : ℕ
nth_part_denom_ne_zero : ∀ {b : K}, Stream'.Seq.get? (partialDenominators g) (Nat.succ n') = some b → b ≠ 0
not_terminated_at_n : ¬TerminatedAt g (Nat.succ n')
s_nth_eq : Stream'.Seq.get? g.s (Nat.succ n') = some { a := a, b := b }
pa pb : K
s_n'th_eq : Stream'.Seq.get? g.s n' = some { a := pa, b := pb }
g' : GeneralizedContinuedFraction K := squashGCF g (n' + 1)
pred_conts : Pair K := continuantsAux g (n' + 1)
succ_n'th_conts_aux_eq : pred_conts = continuantsAux g (n' + 1)
ppred_conts : Pair K := continuantsAux g n'
n'th_conts_aux_eq : ppred_conts = continuantsAux g n'
pA : K := pred_conts.a
pB : K := pred_conts.b
ppA : K := ppred_conts.a
ppB : K := ppred_conts.b
pred_conts' : Pair K := continuantsAux g' (n' + 1)
succ_n'th_conts_aux_eq' : pred_conts' = continuantsAux g' (n' + 1)
ppred_conts' : Pair K := continuantsAux g' n'
n'th_conts_aux_eq' : ppred_conts' = continuantsAux g' n'
pA' : K := pred_conts'.a
pB' : K := pred_conts'.b
⊢ convergents g (Nat.succ n' + 1) = convergents (squashGCF g (Nat.succ n')) (Nat.succ n') Tactic: let pB' := pred_conts'.b State Before: case intro.mk
K : Type u_1
g : GeneralizedContinuedFraction K
s : Stream'.Seq (Pair K)
inst✝ : Field K
a b : K
b_ne_zero : b ≠ 0
n' : ℕ
nth_part_denom_ne_zero : ∀ {b : K}, Stream'.Seq.get? (partialDenominators g) (Nat.succ n') = some b → b ≠ 0
not_terminated_at_n : ¬TerminatedAt g (Nat.succ n')
s_nth_eq : Stream'.Seq.get? g.s (Nat.succ n') = some { a := a, b := b }
pa pb : K
s_n'th_eq : Stream'.Seq.get? g.s n' = some { a := pa, b := pb }
g' : GeneralizedContinuedFraction K := squashGCF g (n' + 1)
pred_conts : Pair K := continuantsAux g (n' + 1)
succ_n'th_conts_aux_eq : pred_conts = continuantsAux g (n' + 1)
ppred_conts : Pair K := continuantsAux g n'
n'th_conts_aux_eq : ppred_conts = continuantsAux g n'
pA : K := pred_conts.a
pB : K := pred_conts.b
ppA : K := ppred_conts.a
ppB : K := ppred_conts.b
pred_conts' : Pair K := continuantsAux g' (n' + 1)
succ_n'th_conts_aux_eq' : pred_conts' = continuantsAux g' (n' + 1)
ppred_conts' : Pair K := continuantsAux g' n'
n'th_conts_aux_eq' : ppred_conts' = continuantsAux g' n'
pA' : K := pred_conts'.a
pB' : K := pred_conts'.b
⊢ convergents g (Nat.succ n' + 1) = convergents (squashGCF g (Nat.succ n')) (Nat.succ n') State After: case intro.mk
K : Type u_1
g : GeneralizedContinuedFraction K
s : Stream'.Seq (Pair K)
inst✝ : Field K
a b : K
b_ne_zero : b ≠ 0
n' : ℕ
nth_part_denom_ne_zero : ∀ {b : K}, Stream'.Seq.get? (partialDenominators g) (Nat.succ n') = some b → b ≠ 0
not_terminated_at_n : ¬TerminatedAt g (Nat.succ n')
s_nth_eq : Stream'.Seq.get? g.s (Nat.succ n') = some { a := a, b := b }
pa pb : K
s_n'th_eq : Stream'.Seq.get? g.s n' = some { a := pa, b := pb }
g' : GeneralizedContinuedFraction K := squashGCF g (n' + 1)
pred_conts : Pair K := continuantsAux g (n' + 1)
succ_n'th_conts_aux_eq : pred_conts = continuantsAux g (n' + 1)
ppred_conts : Pair K := continuantsAux g n'
n'th_conts_aux_eq : ppred_conts = continuantsAux g n'
pA : K := pred_conts.a
pB : K := pred_conts.b
ppA : K := ppred_conts.a
ppB : K := ppred_conts.b
pred_conts' : Pair K := continuantsAux g' (n' + 1)
succ_n'th_conts_aux_eq' : pred_conts' = continuantsAux g' (n' + 1)
ppred_conts' : Pair K := continuantsAux g' n'
n'th_conts_aux_eq' : ppred_conts' = continuantsAux g' n'
pA' : K := pred_conts'.a
pB' : K := pred_conts'.b
ppA' : K := ppred_conts'.a
⊢ convergents g (Nat.succ n' + 1) = convergents (squashGCF g (Nat.succ n')) (Nat.succ n') Tactic: let ppA' := ppred_conts'.a State Before: case intro.mk
K : Type u_1
g : GeneralizedContinuedFraction K
s : Stream'.Seq (Pair K)
inst✝ : Field K
a b : K
b_ne_zero : b ≠ 0
n' : ℕ
nth_part_denom_ne_zero : ∀ {b : K}, Stream'.Seq.get? (partialDenominators g) (Nat.succ n') = some b → b ≠ 0
not_terminated_at_n : ¬TerminatedAt g (Nat.succ n')
s_nth_eq : Stream'.Seq.get? g.s (Nat.succ n') = some { a := a, b := b }
pa pb : K
s_n'th_eq : Stream'.Seq.get? g.s n' = some { a := pa, b := pb }
g' : GeneralizedContinuedFraction K := squashGCF g (n' + 1)
pred_conts : Pair K := continuantsAux g (n' + 1)
succ_n'th_conts_aux_eq : pred_conts = continuantsAux g (n' + 1)
ppred_conts : Pair K := continuantsAux g n'
n'th_conts_aux_eq : ppred_conts = continuantsAux g n'
pA : K := pred_conts.a
pB : K := pred_conts.b
ppA : K := ppred_conts.a
ppB : K := ppred_conts.b
pred_conts' : Pair K := continuantsAux g' (n' + 1)
succ_n'th_conts_aux_eq' : pred_conts' = continuantsAux g' (n' + 1)
ppred_conts' : Pair K := continuantsAux g' n'
n'th_conts_aux_eq' : ppred_conts' = continuantsAux g' n'
pA' : K := pred_conts'.a
pB' : K := pred_conts'.b
ppA' : K := ppred_conts'.a
⊢ convergents g (Nat.succ n' + 1) = convergents (squashGCF g (Nat.succ n')) (Nat.succ n') State After: case intro.mk
K : Type u_1
g : GeneralizedContinuedFraction K
s : Stream'.Seq (Pair K)
inst✝ : Field K
a b : K
b_ne_zero : b ≠ 0
n' : ℕ
nth_part_denom_ne_zero : ∀ {b : K}, Stream'.Seq.get? (partialDenominators g) (Nat.succ n') = some b → b ≠ 0
not_terminated_at_n : ¬TerminatedAt g (Nat.succ n')
s_nth_eq : Stream'.Seq.get? g.s (Nat.succ n') = some { a := a, b := b }
pa pb : K
s_n'th_eq : Stream'.Seq.get? g.s n' = some { a := pa, b := pb }
g' : GeneralizedContinuedFraction K := squashGCF g (n' + 1)
pred_conts : Pair K := continuantsAux g (n' + 1)
succ_n'th_conts_aux_eq : pred_conts = continuantsAux g (n' + 1)
ppred_conts : Pair K := continuantsAux g n'
n'th_conts_aux_eq : ppred_conts = continuantsAux g n'
pA : K := pred_conts.a
pB : K := pred_conts.b
ppA : K := ppred_conts.a
ppB : K := ppred_conts.b
pred_conts' : Pair K := continuantsAux g' (n' + 1)
succ_n'th_conts_aux_eq' : pred_conts' = continuantsAux g' (n' + 1)
ppred_conts' : Pair K := continuantsAux g' n'
n'th_conts_aux_eq' : ppred_conts' = continuantsAux g' n'
pA' : K := pred_conts'.a
pB' : K := pred_conts'.b
ppA' : K := ppred_conts'.a
ppB' : K := ppred_conts'.b
⊢ convergents g (Nat.succ n' + 1) = convergents (squashGCF g (Nat.succ n')) (Nat.succ n') Tactic: let ppB' := ppred_conts'.b State Before: case intro.mk
K : Type u_1
g : GeneralizedContinuedFraction K
s : Stream'.Seq (Pair K)
inst✝ : Field K
a b : K
b_ne_zero : b ≠ 0
n' : ℕ
nth_part_denom_ne_zero : ∀ {b : K}, Stream'.Seq.get? (partialDenominators g) (Nat.succ n') = some b → b ≠ 0
not_terminated_at_n : ¬TerminatedAt g (Nat.succ n')
s_nth_eq : Stream'.Seq.get? g.s (Nat.succ n') = some { a := a, b := b }
pa pb : K
s_n'th_eq : Stream'.Seq.get? g.s n' = some { a := pa, b := pb }
g' : GeneralizedContinuedFraction K := squashGCF g (n' + 1)
pred_conts : Pair K := continuantsAux g (n' + 1)
succ_n'th_conts_aux_eq : pred_conts = continuantsAux g (n' + 1)
ppred_conts : Pair K := continuantsAux g n'
n'th_conts_aux_eq : ppred_conts = continuantsAux g n'
pA : K := pred_conts.a
pB : K := pred_conts.b
ppA : K := ppred_conts.a
ppB : K := ppred_conts.b
pred_conts' : Pair K := continuantsAux g' (n' + 1)
succ_n'th_conts_aux_eq' : pred_conts' = continuantsAux g' (n' + 1)
ppred_conts' : Pair K := continuantsAux g' n'
n'th_conts_aux_eq' : ppred_conts' = continuantsAux g' n'
pA' : K := pred_conts'.a
pB' : K := pred_conts'.b
ppA' : K := ppred_conts'.a
ppB' : K := ppred_conts'.b
⊢ convergents g (Nat.succ n' + 1) = convergents (squashGCF g (Nat.succ n')) (Nat.succ n') State After: case intro.mk
K : Type u_1
g : GeneralizedContinuedFraction K
s : Stream'.Seq (Pair K)
inst✝ : Field K
a b : K
b_ne_zero : b ≠ 0
n' : ℕ
nth_part_denom_ne_zero : ∀ {b : K}, Stream'.Seq.get? (partialDenominators g) (Nat.succ n') = some b → b ≠ 0
not_terminated_at_n : ¬TerminatedAt g (Nat.succ n')
s_nth_eq : Stream'.Seq.get? g.s (Nat.succ n') = some { a := a, b := b }
pa pb : K
s_n'th_eq : Stream'.Seq.get? g.s n' = some { a := pa, b := pb }
g' : GeneralizedContinuedFraction K := squashGCF g (n' + 1)
pred_conts : Pair K := continuantsAux g (n' + 1)
succ_n'th_conts_aux_eq : pred_conts = continuantsAux g (n' + 1)
ppred_conts : Pair K := continuantsAux g n'
n'th_conts_aux_eq : ppred_conts = continuantsAux g n'
pA : K := pred_conts.a
pB : K := pred_conts.b
ppA : K := ppred_conts.a
ppB : K := ppred_conts.b
pred_conts' : Pair K := continuantsAux g' (n' + 1)
succ_n'th_conts_aux_eq' : pred_conts' = continuantsAux g' (n' + 1)
ppred_conts' : Pair K := continuantsAux g' n'
n'th_conts_aux_eq' : ppred_conts' = continuantsAux g' n'
pA' : K := pred_conts'.a
pB' : K := pred_conts'.b
ppA' : K := ppred_conts'.a
ppB' : K := ppred_conts'.b
this : convergents g' (n' + 1) = ((pb + a / b) * pA' + pa * ppA') / ((pb + a / b) * pB' + pa * ppB')
⊢ convergents g (Nat.succ n' + 1) = convergents (squashGCF g (Nat.succ n')) (Nat.succ n') Tactic: have : g'.convergents (n' + 1) =
((pb + a / b) * pA' + pa * ppA') / ((pb + a / b) * pB' + pa * ppB') := by
have : g'.s.get? n' = some ⟨pa, pb + a / b⟩ :=
squashSeq_nth_of_not_terminated s_n'th_eq s_nth_eq
rw [convergent_eq_conts_a_div_conts_b,
continuants_recurrenceAux this n'th_conts_aux_eq'.symm succ_n'th_conts_aux_eq'.symm] State Before: case intro.mk
K : Type u_1
g : GeneralizedContinuedFraction K
s : Stream'.Seq (Pair K)
inst✝ : Field K
a b : K
b_ne_zero : b ≠ 0
n' : ℕ
nth_part_denom_ne_zero : ∀ {b : K}, Stream'.Seq.get? (partialDenominators g) (Nat.succ n') = some b → b ≠ 0
not_terminated_at_n : ¬TerminatedAt g (Nat.succ n')
s_nth_eq : Stream'.Seq.get? g.s (Nat.succ n') = some { a := a, b := b }
pa pb : K
s_n'th_eq : Stream'.Seq.get? g.s n' = some { a := pa, b := pb }
g' : GeneralizedContinuedFraction K := squashGCF g (n' + 1)
pred_conts : Pair K := continuantsAux g (n' + 1)
succ_n'th_conts_aux_eq : pred_conts = continuantsAux g (n' + 1)
ppred_conts : Pair K := continuantsAux g n'
n'th_conts_aux_eq : ppred_conts = continuantsAux g n'
pA : K := pred_conts.a
pB : K := pred_conts.b
ppA : K := ppred_conts.a
ppB : K := ppred_conts.b
pred_conts' : Pair K := continuantsAux g' (n' + 1)
succ_n'th_conts_aux_eq' : pred_conts' = continuantsAux g' (n' + 1)
ppred_conts' : Pair K := continuantsAux g' n'
n'th_conts_aux_eq' : ppred_conts' = continuantsAux g' n'
pA' : K := pred_conts'.a
pB' : K := pred_conts'.b
ppA' : K := ppred_conts'.a
ppB' : K := ppred_conts'.b
this : convergents g' (n' + 1) = ((pb + a / b) * pA' + pa * ppA') / ((pb + a / b) * pB' + pa * ppB')
⊢ convergents g (Nat.succ n' + 1) = convergents (squashGCF g (Nat.succ n')) (Nat.succ n') State After: case intro.mk
K : Type u_1
g : GeneralizedContinuedFraction K
s : Stream'.Seq (Pair K)
inst✝ : Field K
a b : K
b_ne_zero : b ≠ 0
n' : ℕ
nth_part_denom_ne_zero : ∀ {b : K}, Stream'.Seq.get? (partialDenominators g) (Nat.succ n') = some b → b ≠ 0
not_terminated_at_n : ¬TerminatedAt g (Nat.succ n')
s_nth_eq : Stream'.Seq.get? g.s (Nat.succ n') = some { a := a, b := b }
pa pb : K
s_n'th_eq : Stream'.Seq.get? g.s n' = some { a := pa, b := pb }
g' : GeneralizedContinuedFraction K := squashGCF g (n' + 1)
pred_conts : Pair K := continuantsAux g (n' + 1)
succ_n'th_conts_aux_eq : pred_conts = continuantsAux g (n' + 1)
ppred_conts : Pair K := continuantsAux g n'
n'th_conts_aux_eq : ppred_conts = continuantsAux g n'
pA : K := pred_conts.a
pB : K := pred_conts.b
ppA : K := ppred_conts.a
ppB : K := ppred_conts.b
pred_conts' : Pair K := continuantsAux g' (n' + 1)
succ_n'th_conts_aux_eq' : pred_conts' = continuantsAux g' (n' + 1)
ppred_conts' : Pair K := continuantsAux g' n'
n'th_conts_aux_eq' : ppred_conts' = continuantsAux g' n'
pA' : K := pred_conts'.a
pB' : K := pred_conts'.b
ppA' : K := ppred_conts'.a
ppB' : K := ppred_conts'.b
this : convergents g' (n' + 1) = ((pb + a / b) * pA' + pa * ppA') / ((pb + a / b) * pB' + pa * ppB')
⊢ convergents g (Nat.succ n' + 1) = ((pb + a / b) * pA' + pa * ppA') / ((pb + a / b) * pB' + pa * ppB') Tactic: rw [this] State Before: case intro.mk
K : Type u_1
g : GeneralizedContinuedFraction K
s : Stream'.Seq (Pair K)
inst✝ : Field K
a b : K
b_ne_zero : b ≠ 0
n' : ℕ
nth_part_denom_ne_zero : ∀ {b : K}, Stream'.Seq.get? (partialDenominators g) (Nat.succ n') = some b → b ≠ 0
not_terminated_at_n : ¬TerminatedAt g (Nat.succ n')
s_nth_eq : Stream'.Seq.get? g.s (Nat.succ n') = some { a := a, b := b }
pa pb : K
s_n'th_eq : Stream'.Seq.get? g.s n' = some { a := pa, b := pb }
g' : GeneralizedContinuedFraction K := squashGCF g (n' + 1)
pred_conts : Pair K := continuantsAux g (n' + 1)
succ_n'th_conts_aux_eq : pred_conts = continuantsAux g (n' + 1)
ppred_conts : Pair K := continuantsAux g n'
n'th_conts_aux_eq : ppred_conts = continuantsAux g n'
pA : K := pred_conts.a
pB : K := pred_conts.b
ppA : K := ppred_conts.a
ppB : K := ppred_conts.b
pred_conts' : Pair K := continuantsAux g' (n' + 1)
succ_n'th_conts_aux_eq' : pred_conts' = continuantsAux g' (n' + 1)
ppred_conts' : Pair K := continuantsAux g' n'
n'th_conts_aux_eq' : ppred_conts' = continuantsAux g' n'
pA' : K := pred_conts'.a
pB' : K := pred_conts'.b
ppA' : K := ppred_conts'.a
ppB' : K := ppred_conts'.b
this : convergents g' (n' + 1) = ((pb + a / b) * pA' + pa * ppA') / ((pb + a / b) * pB' + pa * ppB')
⊢ convergents g (Nat.succ n' + 1) = ((pb + a / b) * pA' + pa * ppA') / ((pb + a / b) * pB' + pa * ppB') State After: case intro.mk
K : Type u_1
g : GeneralizedContinuedFraction K
s : Stream'.Seq (Pair K)
inst✝ : Field K
a b : K
b_ne_zero : b ≠ 0
n' : ℕ
nth_part_denom_ne_zero : ∀ {b : K}, Stream'.Seq.get? (partialDenominators g) (Nat.succ n') = some b → b ≠ 0
not_terminated_at_n : ¬TerminatedAt g (Nat.succ n')
s_nth_eq : Stream'.Seq.get? g.s (Nat.succ n') = some { a := a, b := b }
pa pb : K
s_n'th_eq : Stream'.Seq.get? g.s n' = some { a := pa, b := pb }
g' : GeneralizedContinuedFraction K := squashGCF g (n' + 1)
pred_conts : Pair K := continuantsAux g (n' + 1)
succ_n'th_conts_aux_eq : pred_conts = continuantsAux g (n' + 1)
ppred_conts : Pair K := continuantsAux g n'
n'th_conts_aux_eq : ppred_conts = continuantsAux g n'
pA : K := pred_conts.a
pB : K := pred_conts.b
ppA : K := ppred_conts.a
ppB : K := ppred_conts.b
pred_conts' : Pair K := continuantsAux g' (n' + 1)
succ_n'th_conts_aux_eq' : pred_conts' = continuantsAux g' (n' + 1)
ppred_conts' : Pair K := continuantsAux g' n'
n'th_conts_aux_eq' : ppred_conts' = continuantsAux g' n'
pA' : K := pred_conts'.a
pB' : K := pred_conts'.b
ppA' : K := ppred_conts'.a
ppB' : K := ppred_conts'.b
this✝ : convergents g' (n' + 1) = ((pb + a / b) * pA' + pa * ppA') / ((pb + a / b) * pB' + pa * ppB')
this : convergents g (n' + 2) = (b * (pb * pA + pa * ppA) + a * pA) / (b * (pb * pB + pa * ppB) + a * pB)
⊢ convergents g (Nat.succ n' + 1) = ((pb + a / b) * pA' + pa * ppA') / ((pb + a / b) * pB' + pa * ppB') Tactic: have : g.convergents (n' + 2) =
(b * (pb * pA + pa * ppA) + a * pA) / (b * (pb * pB + pa * ppB) + a * pB) := by
have : g.continuantsAux (n' + 2) = ⟨pb * pA + pa * ppA, pb * pB + pa * ppB⟩ :=
continuantsAux_recurrence s_n'th_eq n'th_conts_aux_eq.symm succ_n'th_conts_aux_eq.symm
rw [convergent_eq_conts_a_div_conts_b,
continuants_recurrenceAux s_nth_eq succ_n'th_conts_aux_eq.symm this] State Before: case intro.mk
K : Type u_1
g : GeneralizedContinuedFraction K
s : Stream'.Seq (Pair K)
inst✝ : Field K
a b : K
b_ne_zero : b ≠ 0
n' : ℕ
nth_part_denom_ne_zero : ∀ {b : K}, Stream'.Seq.get? (partialDenominators g) (Nat.succ n') = some b → b ≠ 0
not_terminated_at_n : ¬TerminatedAt g (Nat.succ n')
s_nth_eq : Stream'.Seq.get? g.s (Nat.succ n') = some { a := a, b := b }
pa pb : K
s_n'th_eq : Stream'.Seq.get? g.s n' = some { a := pa, b := pb }
g' : GeneralizedContinuedFraction K := squashGCF g (n' + 1)
pred_conts : Pair K := continuantsAux g (n' + 1)
succ_n'th_conts_aux_eq : pred_conts = continuantsAux g (n' + 1)
ppred_conts : Pair K := continuantsAux g n'
n'th_conts_aux_eq : ppred_conts = continuantsAux g n'
pA : K := pred_conts.a
pB : K := pred_conts.b
ppA : K := ppred_conts.a
ppB : K := ppred_conts.b
pred_conts' : Pair K := continuantsAux g' (n' + 1)
succ_n'th_conts_aux_eq' : pred_conts' = continuantsAux g' (n' + 1)
ppred_conts' : Pair K := continuantsAux g' n'
n'th_conts_aux_eq' : ppred_conts' = continuantsAux g' n'
pA' : K := pred_conts'.a
pB' : K := pred_conts'.b
ppA' : K := ppred_conts'.a
ppB' : K := ppred_conts'.b
this✝ : convergents g' (n' + 1) = ((pb + a / b) * pA' + pa * ppA') / ((pb + a / b) * pB' + pa * ppB')
this : convergents g (n' + 2) = (b * (pb * pA + pa * ppA) + a * pA) / (b * (pb * pB + pa * ppB) + a * pB)
⊢ convergents g (Nat.succ n' + 1) = ((pb + a / b) * pA' + pa * ppA') / ((pb + a / b) * pB' + pa * ppB') State After: case intro.mk
K : Type u_1
g : GeneralizedContinuedFraction K
s : Stream'.Seq (Pair K)
inst✝ : Field K
a b : K
b_ne_zero : b ≠ 0
n' : ℕ
nth_part_denom_ne_zero : ∀ {b : K}, Stream'.Seq.get? (partialDenominators g) (Nat.succ n') = some b → b ≠ 0
not_terminated_at_n : ¬TerminatedAt g (Nat.succ n')
s_nth_eq : Stream'.Seq.get? g.s (Nat.succ n') = some { a := a, b := b }
pa pb : K
s_n'th_eq : Stream'.Seq.get? g.s n' = some { a := pa, b := pb }
g' : GeneralizedContinuedFraction K := squashGCF g (n' + 1)
pred_conts : Pair K := continuantsAux g (n' + 1)
succ_n'th_conts_aux_eq : pred_conts = continuantsAux g (n' + 1)
ppred_conts : Pair K := continuantsAux g n'
n'th_conts_aux_eq : ppred_conts = continuantsAux g n'
pA : K := pred_conts.a
pB : K := pred_conts.b
ppA : K := ppred_conts.a
ppB : K := ppred_conts.b
pred_conts' : Pair K := continuantsAux g' (n' + 1)
succ_n'th_conts_aux_eq' : pred_conts' = continuantsAux g' (n' + 1)
ppred_conts' : Pair K := continuantsAux g' n'
n'th_conts_aux_eq' : ppred_conts' = continuantsAux g' n'
pA' : K := pred_conts'.a
pB' : K := pred_conts'.b
ppA' : K := ppred_conts'.a
ppB' : K := ppred_conts'.b
this✝ : convergents g' (n' + 1) = ((pb + a / b) * pA' + pa * ppA') / ((pb + a / b) * pB' + pa * ppB')
this : convergents g (n' + 2) = (b * (pb * pA + pa * ppA) + a * pA) / (b * (pb * pB + pa * ppB) + a * pB)
⊢ (b * (pb * pA + pa * ppA) + a * pA) / (b * (pb * pB + pa * ppB) + a * pB) =
((pb + a / b) * pA' + pa * ppA') / ((pb + a / b) * pB' + pa * ppB') Tactic: rw [this] State Before: case intro.mk
K : Type u_1
g : GeneralizedContinuedFraction K
s : Stream'.Seq (Pair K)
inst✝ : Field K
a b : K
b_ne_zero : b ≠ 0
n' : ℕ
nth_part_denom_ne_zero : ∀ {b : K}, Stream'.Seq.get? (partialDenominators g) (Nat.succ n') = some b → b ≠ 0
not_terminated_at_n : ¬TerminatedAt g (Nat.succ n')
s_nth_eq : Stream'.Seq.get? g.s (Nat.succ n') = some { a := a, b := b }
pa pb : K
s_n'th_eq : Stream'.Seq.get? g.s n' = some { a := pa, b := pb }
g' : GeneralizedContinuedFraction K := squashGCF g (n' + 1)
pred_conts : Pair K := continuantsAux g (n' + 1)
succ_n'th_conts_aux_eq : pred_conts = continuantsAux g (n' + 1)
ppred_conts : Pair K := continuantsAux g n'
n'th_conts_aux_eq : ppred_conts = continuantsAux g n'
pA : K := pred_conts.a
pB : K := pred_conts.b
ppA : K := ppred_conts.a
ppB : K := ppred_conts.b
pred_conts' : Pair K := continuantsAux g' (n' + 1)
succ_n'th_conts_aux_eq' : pred_conts' = continuantsAux g' (n' + 1)
ppred_conts' : Pair K := continuantsAux g' n'
n'th_conts_aux_eq' : ppred_conts' = continuantsAux g' n'
pA' : K := pred_conts'.a
pB' : K := pred_conts'.b
ppA' : K := ppred_conts'.a
ppB' : K := ppred_conts'.b
this✝ : convergents g' (n' + 1) = ((pb + a / b) * pA' + pa * ppA') / ((pb + a / b) * pB' + pa * ppB')
this : convergents g (n' + 2) = (b * (pb * pA + pa * ppA) + a * pA) / (b * (pb * pB + pa * ppB) + a * pB)
⊢ (b * (pb * pA + pa * ppA) + a * pA) / (b * (pb * pB + pa * ppB) + a * pB) =
((pb + a / b) * pA' + pa * ppA') / ((pb + a / b) * pB' + pa * ppB') State After: case intro.mk
K : Type u_1
g : GeneralizedContinuedFraction K
s : Stream'.Seq (Pair K)
inst✝ : Field K
a b : K
b_ne_zero : b ≠ 0
n' : ℕ
nth_part_denom_ne_zero : ∀ {b : K}, Stream'.Seq.get? (partialDenominators g) (Nat.succ n') = some b → b ≠ 0
not_terminated_at_n : ¬TerminatedAt g (Nat.succ n')
s_nth_eq : Stream'.Seq.get? g.s (Nat.succ n') = some { a := a, b := b }
pa pb : K
s_n'th_eq : Stream'.Seq.get? g.s n' = some { a := pa, b := pb }
g' : GeneralizedContinuedFraction K := squashGCF g (n' + 1)
pred_conts : Pair K := continuantsAux g (n' + 1)
succ_n'th_conts_aux_eq : pred_conts = continuantsAux g (n' + 1)
ppred_conts : Pair K := continuantsAux g n'
n'th_conts_aux_eq : ppred_conts = continuantsAux g n'
pA : K := pred_conts.a
pB : K := pred_conts.b
ppA : K := ppred_conts.a
ppB : K := ppred_conts.b
pred_conts' : Pair K := continuantsAux g' (n' + 1)
succ_n'th_conts_aux_eq' : pred_conts' = continuantsAux g' (n' + 1)
ppred_conts' : Pair K := continuantsAux g' n'
n'th_conts_aux_eq' : ppred_conts' = continuantsAux g' n'
pA' : K := pred_conts'.a
pB' : K := pred_conts'.b
ppA' : K := ppred_conts'.a
ppB' : K := ppred_conts'.b
this✝ : convergents g' (n' + 1) = ((pb + a / b) * pA' + pa * ppA') / ((pb + a / b) * pB' + pa * ppB')
this : convergents g (n' + 2) = (b * (pb * pA + pa * ppA) + a * pA) / (b * (pb * pB + pa * ppB) + a * pB)
⊢ ((pb + a / b) * pA + pa * ppA) / ((pb + a / b) * pB + pa * ppB) =
(b * (pb * pA + pa * ppA) + a * pA) / (b * (pb * pB + pa * ppB) + a * pB) Tactic: suffices
((pb + a / b) * pA + pa * ppA) / ((pb + a / b) * pB + pa * ppB) =
(b * (pb * pA + pa * ppA) + a * pA) / (b * (pb * pB + pa * ppB) + a * pB) by
obtain ⟨eq1, eq2, eq3, eq4⟩ : pA' = pA ∧ pB' = pB ∧ ppA' = ppA ∧ ppB' = ppB := by
simp [*, (continuantsAux_eq_continuantsAux_squashGCF_of_le <| le_refl <| n' + 1).symm,
(continuantsAux_eq_continuantsAux_squashGCF_of_le n'.le_succ).symm]
symm
simpa only [eq1, eq2, eq3, eq4, mul_div_cancel _ b_ne_zero] State Before: case intro.mk
K : Type u_1
g : GeneralizedContinuedFraction K
s : Stream'.Seq (Pair K)
inst✝ : Field K
a b : K
b_ne_zero : b ≠ 0
n' : ℕ
nth_part_denom_ne_zero : ∀ {b : K}, Stream'.Seq.get? (partialDenominators g) (Nat.succ n') = some b → b ≠ 0
not_terminated_at_n : ¬TerminatedAt g (Nat.succ n')
s_nth_eq : Stream'.Seq.get? g.s (Nat.succ n') = some { a := a, b := b }
pa pb : K
s_n'th_eq : Stream'.Seq.get? g.s n' = some { a := pa, b := pb }
g' : GeneralizedContinuedFraction K := squashGCF g (n' + 1)
pred_conts : Pair K := continuantsAux g (n' + 1)
succ_n'th_conts_aux_eq : pred_conts = continuantsAux g (n' + 1)
ppred_conts : Pair K := continuantsAux g n'
n'th_conts_aux_eq : ppred_conts = continuantsAux g n'
pA : K := pred_conts.a
pB : K := pred_conts.b
ppA : K := ppred_conts.a
ppB : K := ppred_conts.b
pred_conts' : Pair K := continuantsAux g' (n' + 1)
succ_n'th_conts_aux_eq' : pred_conts' = continuantsAux g' (n' + 1)
ppred_conts' : Pair K := continuantsAux g' n'
n'th_conts_aux_eq' : ppred_conts' = continuantsAux g' n'
pA' : K := pred_conts'.a
pB' : K := pred_conts'.b
ppA' : K := ppred_conts'.a
ppB' : K := ppred_conts'.b
this✝ : convergents g' (n' + 1) = ((pb + a / b) * pA' + pa * ppA') / ((pb + a / b) * pB' + pa * ppB')
this : convergents g (n' + 2) = (b * (pb * pA + pa * ppA) + a * pA) / (b * (pb * pB + pa * ppB) + a * pB)
⊢ ((pb + a / b) * pA + pa * ppA) / ((pb + a / b) * pB + pa * ppB) =
(b * (pb * pA + pa * ppA) + a * pA) / (b * (pb * pB + pa * ppB) + a * pB) State After: case intro.mk
K : Type u_1
g : GeneralizedContinuedFraction K
s : Stream'.Seq (Pair K)
inst✝ : Field K
a b : K
b_ne_zero : b ≠ 0
n' : ℕ
nth_part_denom_ne_zero : ∀ {b : K}, Stream'.Seq.get? (partialDenominators g) (Nat.succ n') = some b → b ≠ 0
not_terminated_at_n : ¬TerminatedAt g (Nat.succ n')
s_nth_eq : Stream'.Seq.get? g.s (Nat.succ n') = some { a := a, b := b }
pa pb : K
s_n'th_eq : Stream'.Seq.get? g.s n' = some { a := pa, b := pb }
g' : GeneralizedContinuedFraction K := squashGCF g (n' + 1)
pred_conts : Pair K := continuantsAux g (n' + 1)
succ_n'th_conts_aux_eq : pred_conts = continuantsAux g (n' + 1)
ppred_conts : Pair K := continuantsAux g n'
n'th_conts_aux_eq : ppred_conts = continuantsAux g n'
pA : K := pred_conts.a
pB : K := pred_conts.b
ppA : K := ppred_conts.a
ppB : K := ppred_conts.b
pred_conts' : Pair K := continuantsAux g' (n' + 1)
succ_n'th_conts_aux_eq' : pred_conts' = continuantsAux g' (n' + 1)
ppred_conts' : Pair K := continuantsAux g' n'
n'th_conts_aux_eq' : ppred_conts' = continuantsAux g' n'
pA' : K := pred_conts'.a
pB' : K := pred_conts'.b
ppA' : K := ppred_conts'.a
ppB' : K := ppred_conts'.b
this✝ : convergents g' (n' + 1) = ((pb + a / b) * pA' + pa * ppA') / ((pb + a / b) * pB' + pa * ppB')
this : convergents g (n' + 2) = (b * (pb * pA + pa * ppA) + a * pA) / (b * (pb * pB + pa * ppB) + a * pB)
⊢ ((pb * b + a) * (continuantsAux g (n' + 1)).a + pa * (continuantsAux g n').a * b) /
((pb * b + a) * (continuantsAux g (n' + 1)).b + pa * (continuantsAux g n').b * b) =
(b * (pb * (continuantsAux g (n' + 1)).a + pa * (continuantsAux g n').a) + a * (continuantsAux g (n' + 1)).a) /
(b * (pb * (continuantsAux g (n' + 1)).b + pa * (continuantsAux g n').b) + a * (continuantsAux g (n' + 1)).b) Tactic: field_simp State Before: case intro.mk
K : Type u_1
g : GeneralizedContinuedFraction K
s : Stream'.Seq (Pair K)
inst✝ : Field K
a b : K
b_ne_zero : b ≠ 0
n' : ℕ
nth_part_denom_ne_zero : ∀ {b : K}, Stream'.Seq.get? (partialDenominators g) (Nat.succ n') = some b → b ≠ 0
not_terminated_at_n : ¬TerminatedAt g (Nat.succ n')
s_nth_eq : Stream'.Seq.get? g.s (Nat.succ n') = some { a := a, b := b }
pa pb : K
s_n'th_eq : Stream'.Seq.get? g.s n' = some { a := pa, b := pb }
g' : GeneralizedContinuedFraction K := squashGCF g (n' + 1)
pred_conts : Pair K := continuantsAux g (n' + 1)
succ_n'th_conts_aux_eq : pred_conts = continuantsAux g (n' + 1)
ppred_conts : Pair K := continuantsAux g n'
n'th_conts_aux_eq : ppred_conts = continuantsAux g n'
pA : K := pred_conts.a
pB : K := pred_conts.b
ppA : K := ppred_conts.a
ppB : K := ppred_conts.b
pred_conts' : Pair K := continuantsAux g' (n' + 1)
succ_n'th_conts_aux_eq' : pred_conts' = continuantsAux g' (n' + 1)
ppred_conts' : Pair K := continuantsAux g' n'
n'th_conts_aux_eq' : ppred_conts' = continuantsAux g' n'
pA' : K := pred_conts'.a
pB' : K := pred_conts'.b
ppA' : K := ppred_conts'.a
ppB' : K := ppred_conts'.b
this✝ : convergents g' (n' + 1) = ((pb + a / b) * pA' + pa * ppA') / ((pb + a / b) * pB' + pa * ppB')
this : convergents g (n' + 2) = (b * (pb * pA + pa * ppA) + a * pA) / (b * (pb * pB + pa * ppB) + a * pB)
⊢ ((pb * b + a) * (continuantsAux g (n' + 1)).a + pa * (continuantsAux g n').a * b) /
((pb * b + a) * (continuantsAux g (n' + 1)).b + pa * (continuantsAux g n').b * b) =
(b * (pb * (continuantsAux g (n' + 1)).a + pa * (continuantsAux g n').a) + a * (continuantsAux g (n' + 1)).a) /
(b * (pb * (continuantsAux g (n' + 1)).b + pa * (continuantsAux g n').b) + a * (continuantsAux g (n' + 1)).b) State After: no goals Tactic: congr 1 <;> ring State Before: K : Type u_1
g : GeneralizedContinuedFraction K
s : Stream'.Seq (Pair K)
inst✝ : Field K
a b : K
b_ne_zero : b ≠ 0
n' : ℕ
nth_part_denom_ne_zero : ∀ {b : K}, Stream'.Seq.get? (partialDenominators g) (Nat.succ n') = some b → b ≠ 0
not_terminated_at_n : ¬TerminatedAt g (Nat.succ n')
s_nth_eq : Stream'.Seq.get? g.s (Nat.succ n') = some { a := a, b := b }
pa pb : K
s_n'th_eq : Stream'.Seq.get? g.s n' = some { a := pa, b := pb }
g' : GeneralizedContinuedFraction K := squashGCF g (n' + 1)
pred_conts : Pair K := continuantsAux g (n' + 1)
succ_n'th_conts_aux_eq : pred_conts = continuantsAux g (n' + 1)
ppred_conts : Pair K := continuantsAux g n'
n'th_conts_aux_eq : ppred_conts = continuantsAux g n'
pA : K := pred_conts.a
pB : K := pred_conts.b
ppA : K := ppred_conts.a
ppB : K := ppred_conts.b
pred_conts' : Pair K := continuantsAux g' (n' + 1)
succ_n'th_conts_aux_eq' : pred_conts' = continuantsAux g' (n' + 1)
ppred_conts' : Pair K := continuantsAux g' n'
n'th_conts_aux_eq' : ppred_conts' = continuantsAux g' n'
pA' : K := pred_conts'.a
pB' : K := pred_conts'.b
ppA' : K := ppred_conts'.a
ppB' : K := ppred_conts'.b
⊢ convergents g' (n' + 1) = ((pb + a / b) * pA' + pa * ppA') / ((pb + a / b) * pB' + pa * ppB') State After: K : Type u_1
g : GeneralizedContinuedFraction K
s : Stream'.Seq (Pair K)
inst✝ : Field K
a b : K
b_ne_zero : b ≠ 0
n' : ℕ
nth_part_denom_ne_zero : ∀ {b : K}, Stream'.Seq.get? (partialDenominators g) (Nat.succ n') = some b → b ≠ 0
not_terminated_at_n : ¬TerminatedAt g (Nat.succ n')
s_nth_eq : Stream'.Seq.get? g.s (Nat.succ n') = some { a := a, b := b }
pa pb : K
s_n'th_eq : Stream'.Seq.get? g.s n' = some { a := pa, b := pb }
g' : GeneralizedContinuedFraction K := squashGCF g (n' + 1)
pred_conts : Pair K := continuantsAux g (n' + 1)
succ_n'th_conts_aux_eq : pred_conts = continuantsAux g (n' + 1)
ppred_conts : Pair K := continuantsAux g n'
n'th_conts_aux_eq : ppred_conts = continuantsAux g n'
pA : K := pred_conts.a
pB : K := pred_conts.b
ppA : K := ppred_conts.a
ppB : K := ppred_conts.b
pred_conts' : Pair K := continuantsAux g' (n' + 1)
succ_n'th_conts_aux_eq' : pred_conts' = continuantsAux g' (n' + 1)
ppred_conts' : Pair K := continuantsAux g' n'
n'th_conts_aux_eq' : ppred_conts' = continuantsAux g' n'
pA' : K := pred_conts'.a
pB' : K := pred_conts'.b
ppA' : K := ppred_conts'.a
ppB' : K := ppred_conts'.b
this : Stream'.Seq.get? g'.s n' = some { a := pa, b := pb + a / b }
⊢ convergents g' (n' + 1) = ((pb + a / b) * pA' + pa * ppA') / ((pb + a / b) * pB' + pa * ppB') Tactic: have : g'.s.get? n' = some ⟨pa, pb + a / b⟩ :=
squashSeq_nth_of_not_terminated s_n'th_eq s_nth_eq State Before: K : Type u_1
g : GeneralizedContinuedFraction K
s : Stream'.Seq (Pair K)
inst✝ : Field K
a b : K
b_ne_zero : b ≠ 0
n' : ℕ
nth_part_denom_ne_zero : ∀ {b : K}, Stream'.Seq.get? (partialDenominators g) (Nat.succ n') = some b → b ≠ 0
not_terminated_at_n : ¬TerminatedAt g (Nat.succ n')
s_nth_eq : Stream'.Seq.get? g.s (Nat.succ n') = some { a := a, b := b }
pa pb : K
s_n'th_eq : Stream'.Seq.get? g.s n' = some { a := pa, b := pb }
g' : GeneralizedContinuedFraction K := squashGCF g (n' + 1)
pred_conts : Pair K := continuantsAux g (n' + 1)
succ_n'th_conts_aux_eq : pred_conts = continuantsAux g (n' + 1)
ppred_conts : Pair K := continuantsAux g n'
n'th_conts_aux_eq : ppred_conts = continuantsAux g n'
pA : K := pred_conts.a
pB : K := pred_conts.b
ppA : K := ppred_conts.a
ppB : K := ppred_conts.b
pred_conts' : Pair K := continuantsAux g' (n' + 1)
succ_n'th_conts_aux_eq' : pred_conts' = continuantsAux g' (n' + 1)
ppred_conts' : Pair K := continuantsAux g' n'
n'th_conts_aux_eq' : ppred_conts' = continuantsAux g' n'
pA' : K := pred_conts'.a
pB' : K := pred_conts'.b
ppA' : K := ppred_conts'.a
ppB' : K := ppred_conts'.b
this : Stream'.Seq.get? g'.s n' = some { a := pa, b := pb + a / b }
⊢ convergents g' (n' + 1) = ((pb + a / b) * pA' + pa * ppA') / ((pb + a / b) * pB' + pa * ppB') State After: no goals Tactic: rw [convergent_eq_conts_a_div_conts_b,
continuants_recurrenceAux this n'th_conts_aux_eq'.symm succ_n'th_conts_aux_eq'.symm] State Before: K : Type u_1
g : GeneralizedContinuedFraction K
s : Stream'.Seq (Pair K)
inst✝ : Field K
a b : K
b_ne_zero : b ≠ 0
n' : ℕ
nth_part_denom_ne_zero : ∀ {b : K}, Stream'.Seq.get? (partialDenominators g) (Nat.succ n') = some b → b ≠ 0
not_terminated_at_n : ¬TerminatedAt g (Nat.succ n')
s_nth_eq : Stream'.Seq.get? g.s (Nat.succ n') = some { a := a, b := b }
pa pb : K
s_n'th_eq : Stream'.Seq.get? g.s n' = some { a := pa, b := pb }
g' : GeneralizedContinuedFraction K := squashGCF g (n' + 1)
pred_conts : Pair K := continuantsAux g (n' + 1)
succ_n'th_conts_aux_eq : pred_conts = continuantsAux g (n' + 1)
ppred_conts : Pair K := continuantsAux g n'
n'th_conts_aux_eq : ppred_conts = continuantsAux g n'
pA : K := pred_conts.a
pB : K := pred_conts.b
ppA : K := ppred_conts.a
ppB : K := ppred_conts.b
pred_conts' : Pair K := continuantsAux g' (n' + 1)
succ_n'th_conts_aux_eq' : pred_conts' = continuantsAux g' (n' + 1)
ppred_conts' : Pair K := continuantsAux g' n'
n'th_conts_aux_eq' : ppred_conts' = continuantsAux g' n'
pA' : K := pred_conts'.a
pB' : K := pred_conts'.b
ppA' : K := ppred_conts'.a
ppB' : K := ppred_conts'.b
this : convergents g' (n' + 1) = ((pb + a / b) * pA' + pa * ppA') / ((pb + a / b) * pB' + pa * ppB')
⊢ convergents g (n' + 2) = (b * (pb * pA + pa * ppA) + a * pA) / (b * (pb * pB + pa * ppB) + a * pB) State After: K : Type u_1
g : GeneralizedContinuedFraction K
s : Stream'.Seq (Pair K)
inst✝ : Field K
a b : K
b_ne_zero : b ≠ 0
n' : ℕ
nth_part_denom_ne_zero : ∀ {b : K}, Stream'.Seq.get? (partialDenominators g) (Nat.succ n') = some b → b ≠ 0
not_terminated_at_n : ¬TerminatedAt g (Nat.succ n')
s_nth_eq : Stream'.Seq.get? g.s (Nat.succ n') = some { a := a, b := b }
pa pb : K
s_n'th_eq : Stream'.Seq.get? g.s n' = some { a := pa, b := pb }
g' : GeneralizedContinuedFraction K := squashGCF g (n' + 1)
pred_conts : Pair K := continuantsAux g (n' + 1)
succ_n'th_conts_aux_eq : pred_conts = continuantsAux g (n' + 1)
ppred_conts : Pair K := continuantsAux g n'
n'th_conts_aux_eq : ppred_conts = continuantsAux g n'
pA : K := pred_conts.a
pB : K := pred_conts.b
ppA : K := ppred_conts.a
ppB : K := ppred_conts.b
pred_conts' : Pair K := continuantsAux g' (n' + 1)
succ_n'th_conts_aux_eq' : pred_conts' = continuantsAux g' (n' + 1)
ppred_conts' : Pair K := continuantsAux g' n'
n'th_conts_aux_eq' : ppred_conts' = continuantsAux g' n'
pA' : K := pred_conts'.a
pB' : K := pred_conts'.b
ppA' : K := ppred_conts'.a
ppB' : K := ppred_conts'.b
this✝ : convergents g' (n' + 1) = ((pb + a / b) * pA' + pa * ppA') / ((pb + a / b) * pB' + pa * ppB')
this : continuantsAux g (n' + 2) = { a := pb * pA + pa * ppA, b := pb * pB + pa * ppB }
⊢ convergents g (n' + 2) = (b * (pb * pA + pa * ppA) + a * pA) / (b * (pb * pB + pa * ppB) + a * pB) Tactic: have : g.continuantsAux (n' + 2) = ⟨pb * pA + pa * ppA, pb * pB + pa * ppB⟩ :=
continuantsAux_recurrence s_n'th_eq n'th_conts_aux_eq.symm succ_n'th_conts_aux_eq.symm State Before: K : Type u_1
g : GeneralizedContinuedFraction K
s : Stream'.Seq (Pair K)
inst✝ : Field K
a b : K
b_ne_zero : b ≠ 0
n' : ℕ
nth_part_denom_ne_zero : ∀ {b : K}, Stream'.Seq.get? (partialDenominators g) (Nat.succ n') = some b → b ≠ 0
not_terminated_at_n : ¬TerminatedAt g (Nat.succ n')
s_nth_eq : Stream'.Seq.get? g.s (Nat.succ n') = some { a := a, b := b }
pa pb : K
s_n'th_eq : Stream'.Seq.get? g.s n' = some { a := pa, b := pb }
g' : GeneralizedContinuedFraction K := squashGCF g (n' + 1)
pred_conts : Pair K := continuantsAux g (n' + 1)
succ_n'th_conts_aux_eq : pred_conts = continuantsAux g (n' + 1)
ppred_conts : Pair K := continuantsAux g n'
n'th_conts_aux_eq : ppred_conts = continuantsAux g n'
pA : K := pred_conts.a
pB : K := pred_conts.b
ppA : K := ppred_conts.a
ppB : K := ppred_conts.b
pred_conts' : Pair K := continuantsAux g' (n' + 1)
succ_n'th_conts_aux_eq' : pred_conts' = continuantsAux g' (n' + 1)
ppred_conts' : Pair K := continuantsAux g' n'
n'th_conts_aux_eq' : ppred_conts' = continuantsAux g' n'
pA' : K := pred_conts'.a
pB' : K := pred_conts'.b
ppA' : K := ppred_conts'.a
ppB' : K := ppred_conts'.b
this✝ : convergents g' (n' + 1) = ((pb + a / b) * pA' + pa * ppA') / ((pb + a / b) * pB' + pa * ppB')
this : continuantsAux g (n' + 2) = { a := pb * pA + pa * ppA, b := pb * pB + pa * ppB }
⊢ convergents g (n' + 2) = (b * (pb * pA + pa * ppA) + a * pA) / (b * (pb * pB + pa * ppB) + a * pB) State After: no goals Tactic: rw [convergent_eq_conts_a_div_conts_b,
continuants_recurrenceAux s_nth_eq succ_n'th_conts_aux_eq.symm this] State Before: K : Type u_1
g : GeneralizedContinuedFraction K
s : Stream'.Seq (Pair K)
inst✝ : Field K
a b : K
b_ne_zero : b ≠ 0
n' : ℕ
nth_part_denom_ne_zero : ∀ {b : K}, Stream'.Seq.get? (partialDenominators g) (Nat.succ n') = some b → b ≠ 0
not_terminated_at_n : ¬TerminatedAt g (Nat.succ n')
s_nth_eq : Stream'.Seq.get? g.s (Nat.succ n') = some { a := a, b := b }
pa pb : K
s_n'th_eq : Stream'.Seq.get? g.s n' = some { a := pa, b := pb }
g' : GeneralizedContinuedFraction K := squashGCF g (n' + 1)
pred_conts : Pair K := continuantsAux g (n' + 1)
succ_n'th_conts_aux_eq : pred_conts = continuantsAux g (n' + 1)
ppred_conts : Pair K := continuantsAux g n'
n'th_conts_aux_eq : ppred_conts = continuantsAux g n'
pA : K := pred_conts.a
pB : K := pred_conts.b
ppA : K := ppred_conts.a
ppB : K := ppred_conts.b
pred_conts' : Pair K := continuantsAux g' (n' + 1)
succ_n'th_conts_aux_eq' : pred_conts' = continuantsAux g' (n' + 1)
ppred_conts' : Pair K := continuantsAux g' n'
n'th_conts_aux_eq' : ppred_conts' = continuantsAux g' n'
pA' : K := pred_conts'.a
pB' : K := pred_conts'.b
ppA' : K := ppred_conts'.a
ppB' : K := ppred_conts'.b
this✝¹ : convergents g' (n' + 1) = ((pb + a / b) * pA' + pa * ppA') / ((pb + a / b) * pB' + pa * ppB')
this✝ : convergents g (n' + 2) = (b * (pb * pA + pa * ppA) + a * pA) / (b * (pb * pB + pa * ppB) + a * pB)
this :
((pb + a / b) * pA + pa * ppA) / ((pb + a / b) * pB + pa * ppB) =
(b * (pb * pA + pa * ppA) + a * pA) / (b * (pb * pB + pa * ppB) + a * pB)
⊢ (b * (pb * pA + pa * ppA) + a * pA) / (b * (pb * pB + pa * ppB) + a * pB) =
((pb + a / b) * pA' + pa * ppA') / ((pb + a / b) * pB' + pa * ppB') State After: case intro.intro.intro
K : Type u_1
g : GeneralizedContinuedFraction K
s : Stream'.Seq (Pair K)
inst✝ : Field K
a b : K
b_ne_zero : b ≠ 0
n' : ℕ
nth_part_denom_ne_zero : ∀ {b : K}, Stream'.Seq.get? (partialDenominators g) (Nat.succ n') = some b → b ≠ 0
not_terminated_at_n : ¬TerminatedAt g (Nat.succ n')
s_nth_eq : Stream'.Seq.get? g.s (Nat.succ n') = some { a := a, b := b }
pa pb : K
s_n'th_eq : Stream'.Seq.get? g.s n' = some { a := pa, b := pb }
g' : GeneralizedContinuedFraction K := squashGCF g (n' + 1)
pred_conts : Pair K := continuantsAux g (n' + 1)
succ_n'th_conts_aux_eq : pred_conts = continuantsAux g (n' + 1)
ppred_conts : Pair K := continuantsAux g n'
n'th_conts_aux_eq : ppred_conts = continuantsAux g n'
pA : K := pred_conts.a
pB : K := pred_conts.b
ppA : K := ppred_conts.a
ppB : K := ppred_conts.b
pred_conts' : Pair K := continuantsAux g' (n' + 1)
succ_n'th_conts_aux_eq' : pred_conts' = continuantsAux g' (n' + 1)
ppred_conts' : Pair K := continuantsAux g' n'
n'th_conts_aux_eq' : ppred_conts' = continuantsAux g' n'
pA' : K := pred_conts'.a
pB' : K := pred_conts'.b
ppA' : K := ppred_conts'.a
ppB' : K := ppred_conts'.b
this✝¹ : convergents g' (n' + 1) = ((pb + a / b) * pA' + pa * ppA') / ((pb + a / b) * pB' + pa * ppB')
this✝ : convergents g (n' + 2) = (b * (pb * pA + pa * ppA) + a * pA) / (b * (pb * pB + pa * ppB) + a * pB)
this :
((pb + a / b) * pA + pa * ppA) / ((pb + a / b) * pB + pa * ppB) =
(b * (pb * pA + pa * ppA) + a * pA) / (b * (pb * pB + pa * ppB) + a * pB)
eq1 : pA' = pA
eq2 : pB' = pB
eq3 : ppA' = ppA
eq4 : ppB' = ppB
⊢ (b * (pb * pA + pa * ppA) + a * pA) / (b * (pb * pB + pa * ppB) + a * pB) =
((pb + a / b) * pA' + pa * ppA') / ((pb + a / b) * pB' + pa * ppB') Tactic: obtain ⟨eq1, eq2, eq3, eq4⟩ : pA' = pA ∧ pB' = pB ∧ ppA' = ppA ∧ ppB' = ppB := by
simp [*, (continuantsAux_eq_continuantsAux_squashGCF_of_le <| le_refl <| n' + 1).symm,
(continuantsAux_eq_continuantsAux_squashGCF_of_le n'.le_succ).symm] State Before: case intro.intro.intro
K : Type u_1
g : GeneralizedContinuedFraction K
s : Stream'.Seq (Pair K)
inst✝ : Field K
a b : K
b_ne_zero : b ≠ 0
n' : ℕ
nth_part_denom_ne_zero : ∀ {b : K}, Stream'.Seq.get? (partialDenominators g) (Nat.succ n') = some b → b ≠ 0
not_terminated_at_n : ¬TerminatedAt g (Nat.succ n')
s_nth_eq : Stream'.Seq.get? g.s (Nat.succ n') = some { a := a, b := b }
pa pb : K
s_n'th_eq : Stream'.Seq.get? g.s n' = some { a := pa, b := pb }
g' : GeneralizedContinuedFraction K := squashGCF g (n' + 1)
pred_conts : Pair K := continuantsAux g (n' + 1)
succ_n'th_conts_aux_eq : pred_conts = continuantsAux g (n' + 1)
ppred_conts : Pair K := continuantsAux g n'
n'th_conts_aux_eq : ppred_conts = continuantsAux g n'
pA : K := pred_conts.a
pB : K := pred_conts.b
ppA : K := ppred_conts.a
ppB : K := ppred_conts.b
pred_conts' : Pair K := continuantsAux g' (n' + 1)
succ_n'th_conts_aux_eq' : pred_conts' = continuantsAux g' (n' + 1)
ppred_conts' : Pair K := continuantsAux g' n'
n'th_conts_aux_eq' : ppred_conts' = continuantsAux g' n'
pA' : K := pred_conts'.a
pB' : K := pred_conts'.b
ppA' : K := ppred_conts'.a
ppB' : K := ppred_conts'.b
this✝¹ : convergents g' (n' + 1) = ((pb + a / b) * pA' + pa * ppA') / ((pb + a / b) * pB' + pa * ppB')
this✝ : convergents g (n' + 2) = (b * (pb * pA + pa * ppA) + a * pA) / (b * (pb * pB + pa * ppB) + a * pB)
this :
((pb + a / b) * pA + pa * ppA) / ((pb + a / b) * pB + pa * ppB) =
(b * (pb * pA + pa * ppA) + a * pA) / (b * (pb * pB + pa * ppB) + a * pB)
eq1 : pA' = pA
eq2 : pB' = pB
eq3 : ppA' = ppA
eq4 : ppB' = ppB
⊢ (b * (pb * pA + pa * ppA) + a * pA) / (b * (pb * pB + pa * ppB) + a * pB) =
((pb + a / b) * pA' + pa * ppA') / ((pb + a / b) * pB' + pa * ppB') State After: case intro.intro.intro
K : Type u_1
g : GeneralizedContinuedFraction K
s : Stream'.Seq (Pair K)
inst✝ : Field K
a b : K
b_ne_zero : b ≠ 0
n' : ℕ
nth_part_denom_ne_zero : ∀ {b : K}, Stream'.Seq.get? (partialDenominators g) (Nat.succ n') = some b → b ≠ 0
not_terminated_at_n : ¬TerminatedAt g (Nat.succ n')
s_nth_eq : Stream'.Seq.get? g.s (Nat.succ n') = some { a := a, b := b }
pa pb : K
s_n'th_eq : Stream'.Seq.get? g.s n' = some { a := pa, b := pb }
g' : GeneralizedContinuedFraction K := squashGCF g (n' + 1)
pred_conts : Pair K := continuantsAux g (n' + 1)
succ_n'th_conts_aux_eq : pred_conts = continuantsAux g (n' + 1)
ppred_conts : Pair K := continuantsAux g n'
n'th_conts_aux_eq : ppred_conts = continuantsAux g n'
pA : K := pred_conts.a
pB : K := pred_conts.b
ppA : K := ppred_conts.a
ppB : K := ppred_conts.b
pred_conts' : Pair K := continuantsAux g' (n' + 1)
succ_n'th_conts_aux_eq' : pred_conts' = continuantsAux g' (n' + 1)
ppred_conts' : Pair K := continuantsAux g' n'
n'th_conts_aux_eq' : ppred_conts' = continuantsAux g' n'
pA' : K := pred_conts'.a
pB' : K := pred_conts'.b
ppA' : K := ppred_conts'.a
ppB' : K := ppred_conts'.b
this✝¹ : convergents g' (n' + 1) = ((pb + a / b) * pA' + pa * ppA') / ((pb + a / b) * pB' + pa * ppB')
this✝ : convergents g (n' + 2) = (b * (pb * pA + pa * ppA) + a * pA) / (b * (pb * pB + pa * ppB) + a * pB)
this :
((pb + a / b) * pA + pa * ppA) / ((pb + a / b) * pB + pa * ppB) =
(b * (pb * pA + pa * ppA) + a * pA) / (b * (pb * pB + pa * ppB) + a * pB)
eq1 : pA' = pA
eq2 : pB' = pB
eq3 : ppA' = ppA
eq4 : ppB' = ppB
⊢ ((pb + a / b) * pA' + pa * ppA') / ((pb + a / b) * pB' + pa * ppB') =
(b * (pb * pA + pa * ppA) + a * pA) / (b * (pb * pB + pa * ppB) + a * pB) Tactic: symm State Before: case intro.intro.intro
K : Type u_1
g : GeneralizedContinuedFraction K
s : Stream'.Seq (Pair K)
inst✝ : Field K
a b : K
b_ne_zero : b ≠ 0
n' : ℕ
nth_part_denom_ne_zero : ∀ {b : K}, Stream'.Seq.get? (partialDenominators g) (Nat.succ n') = some b → b ≠ 0
not_terminated_at_n : ¬TerminatedAt g (Nat.succ n')
s_nth_eq : Stream'.Seq.get? g.s (Nat.succ n') = some { a := a, b := b }
pa pb : K
s_n'th_eq : Stream'.Seq.get? g.s n' = some { a := pa, b := pb }
g' : GeneralizedContinuedFraction K := squashGCF g (n' + 1)
pred_conts : Pair K := continuantsAux g (n' + 1)
succ_n'th_conts_aux_eq : pred_conts = continuantsAux g (n' + 1)
ppred_conts : Pair K := continuantsAux g n'
n'th_conts_aux_eq : ppred_conts = continuantsAux g n'
pA : K := pred_conts.a
pB : K := pred_conts.b
ppA : K := ppred_conts.a
ppB : K := ppred_conts.b
pred_conts' : Pair K := continuantsAux g' (n' + 1)
succ_n'th_conts_aux_eq' : pred_conts' = continuantsAux g' (n' + 1)
ppred_conts' : Pair K := continuantsAux g' n'
n'th_conts_aux_eq' : ppred_conts' = continuantsAux g' n'
pA' : K := pred_conts'.a
pB' : K := pred_conts'.b
ppA' : K := ppred_conts'.a
ppB' : K := ppred_conts'.b
this✝¹ : convergents g' (n' + 1) = ((pb + a / b) * pA' + pa * ppA') / ((pb + a / b) * pB' + pa * ppB')
this✝ : convergents g (n' + 2) = (b * (pb * pA + pa * ppA) + a * pA) / (b * (pb * pB + pa * ppB) + a * pB)
this :
((pb + a / b) * pA + pa * ppA) / ((pb + a / b) * pB + pa * ppB) =
(b * (pb * pA + pa * ppA) + a * pA) / (b * (pb * pB + pa * ppB) + a * pB)
eq1 : pA' = pA
eq2 : pB' = pB
eq3 : ppA' = ppA
eq4 : ppB' = ppB
⊢ ((pb + a / b) * pA' + pa * ppA') / ((pb + a / b) * pB' + pa * ppB') =
(b * (pb * pA + pa * ppA) + a * pA) / (b * (pb * pB + pa * ppB) + a * pB) State After: no goals Tactic: simpa only [eq1, eq2, eq3, eq4, mul_div_cancel _ b_ne_zero] State Before: K : Type u_1
g : GeneralizedContinuedFraction K
s : Stream'.Seq (Pair K)
inst✝ : Field K
a b : K
b_ne_zero : b ≠ 0
n' : ℕ
nth_part_denom_ne_zero : ∀ {b : K}, Stream'.Seq.get? (partialDenominators g) (Nat.succ n') = some b → b ≠ 0
not_terminated_at_n : ¬TerminatedAt g (Nat.succ n')
s_nth_eq : Stream'.Seq.get? g.s (Nat.succ n') = some { a := a, b := b }
pa pb : K
s_n'th_eq : Stream'.Seq.get? g.s n' = some { a := pa, b := pb }
g' : GeneralizedContinuedFraction K := squashGCF g (n' + 1)
pred_conts : Pair K := continuantsAux g (n' + 1)
succ_n'th_conts_aux_eq : pred_conts = continuantsAux g (n' + 1)
ppred_conts : Pair K := continuantsAux g n'
n'th_conts_aux_eq : ppred_conts = continuantsAux g n'
pA : K := pred_conts.a
pB : K := pred_conts.b
ppA : K := ppred_conts.a
ppB : K := ppred_conts.b
pred_conts' : Pair K := continuantsAux g' (n' + 1)
succ_n'th_conts_aux_eq' : pred_conts' = continuantsAux g' (n' + 1)
ppred_conts' : Pair K := continuantsAux g' n'
n'th_conts_aux_eq' : ppred_conts' = continuantsAux g' n'
pA' : K := pred_conts'.a
pB' : K := pred_conts'.b
ppA' : K := ppred_conts'.a
ppB' : K := ppred_conts'.b
this✝¹ : convergents g' (n' + 1) = ((pb + a / b) * pA' + pa * ppA') / ((pb + a / b) * pB' + pa * ppB')
this✝ : convergents g (n' + 2) = (b * (pb * pA + pa * ppA) + a * pA) / (b * (pb * pB + pa * ppB) + a * pB)
this :
((pb + a / b) * pA + pa * ppA) / ((pb + a / b) * pB + pa * ppB) =
(b * (pb * pA + pa * ppA) + a * pA) / (b * (pb * pB + pa * ppB) + a * pB)
⊢ pA' = pA ∧ pB' = pB ∧ ppA' = ppA ∧ ppB' = ppB State After: no goals Tactic: simp [*, (continuantsAux_eq_continuantsAux_squashGCF_of_le <| le_refl <| n' + 1).symm,
(continuantsAux_eq_continuantsAux_squashGCF_of_le n'.le_succ).symm] |
theory AddPredicateTransformers
imports MonoPredicateTransformers
begin
definition additive :: "('a :: bbi \<Rightarrow> 'b :: bbi) \<Rightarrow> bool" where
"additive f \<equiv> \<forall>x y. f (x + y) = f x + f y"
definition comp_additive :: "('a :: bbi \<Rightarrow> 'b :: bbi) \<Rightarrow> bool" where
"comp_additive f \<equiv> \<forall>X. f (\<Squnion>X) = \<Squnion>{f x | x. x \<in> X}"
lemma comp_add_addI: "comp_additive f \<Longrightarrow> additive f"
proof (auto simp: comp_additive_def additive_def)
fix x y assume "\<forall>X. f (\<Squnion>X) = \<Squnion>{f x |x. x \<in> X}"
hence "f (\<Squnion>{x, y}) = \<Squnion>{f z |z. z \<in> {x, y}}" by metis
thus "f (x + y) = f x + f y"
by (auto intro!: Sup_eqI) metis+
qed
lemma add_mono[dest?]: "additive F \<Longrightarrow> mono F"
by (auto simp add: mono_def additive_def) (metis le_iff_sup)
lemma comp_add_mono[dest?]: "comp_additive F \<Longrightarrow> mono F"
by (metis add_mono comp_add_addI)
text {* Closed operations *}
lemma comp_additive_skip [simp,intro]: "comp_additive id"
by (simp add: comp_additive_def)
lemma comp_additive_comp_clos [intro]: "\<lbrakk>comp_additive (F :: 'a :: bbi \<Rightarrow> 'a); comp_additive (G :: 'a :: bbi \<Rightarrow> 'a)\<rbrakk> \<Longrightarrow> comp_additive (F o G)"
by (auto simp: comp_additive_def intro!: Sup_eqI Sup_upper Sup_least) force
lemma comp_additive_abort [simp,intro]: "comp_additive \<bottom>"
by (simp add: comp_additive_def)
lemma comp_additive_sup_clos [intro]: "\<lbrakk>comp_additive F; comp_additive G\<rbrakk> \<Longrightarrow> comp_additive (F + G)"
apply (auto simp: comp_additive_def intro!: Sup_eqI[symmetric])
apply (rule le_supI1, auto intro!: Sup_least Sup_upper)
by (auto intro!: le_supI2 Sup_upper)
text {* Meet and top are not closed under complete additivity *}
lemma comp_additive_inf_clos [intro]: "\<lbrakk>comp_additive F; comp_additive G\<rbrakk> \<Longrightarrow> comp_additive (F \<sqinter> G)"
(* nitpick *) oops
lemma comp_additive_magic [simp,intro]: "comp_additive \<top>"
(* nitpick *) oops
lemma comp_additive_Sup_clos [intro]: "\<forall>f \<in> F. comp_additive f \<Longrightarrow> comp_additive (\<Squnion>F)"
proof (simp add: comp_additive_def, rule allI)
fix X
assume assm: "\<forall>f \<in> F. \<forall>X. f (\<Squnion>X) = \<Squnion>{f x |x. x \<in> X}"
hence "(SUP f:F. f (\<Squnion>X)) = (SUP f:F. \<Squnion> {f x| x. x \<in> X})"
by (auto intro: SUP_cong)
also have "... = \<Squnion> {(SUP f:F. f x) | x. x \<in> X}"
apply (auto intro!: Sup_eqI[symmetric])
apply (rule SUP_mono)
apply (rule_tac x=f in bexI)
apply (rule Sup_upper)
apply force
apply simp
apply (rule SUP_least)
apply (rule Sup_least)
apply auto
by (metis SUP_upper dual_order.trans)
finally show "(SUP f:F. f (\<Squnion>X)) = \<Squnion>{SUP f:F. f x |x. x \<in> X}"
by auto
qed
text {* Type of additive predicate transformers *}
typedef 'a atran = "{F:: 'a :: bbi \<Rightarrow> 'a. comp_additive F}"
by (rule_tac x=id in exI) (auto simp: comp_additive_def)
setup_lifting type_definition_atran
text {* Every additive predicate transformer is a monotonic one *}
lemma "\<forall>(F :: 'a :: bbi atran). \<exists>(G :: 'a mtran). Rep_mtran G = Rep_atran F"
by transfer (auto intro: comp_add_mono)
text {* Morphism between additive to monotonic predicate transformers *}
lift_definition mptran :: "'a :: bbi atran \<Rightarrow> 'a mtran" is "\<lambda>F. F"
by (metis comp_add_mono)
instantiation atran :: (bbi) bounded_semilattice_sup_bot
begin
lift_definition bot_atran :: "'a atran" is \<bottom> ..
lift_definition less_eq_atran:: "'a atran \<Rightarrow> 'a atran \<Rightarrow> bool" is "op \<le>" .
lift_definition less_atran :: "'a atran \<Rightarrow> 'a atran \<Rightarrow> bool" is "op <" .
lift_definition sup_atran :: "'a atran \<Rightarrow> 'a atran \<Rightarrow> 'a atran" is sup ..
instance
by default (transfer, auto)+
end
text {* Complete additive predicate transformers form a complete join-semilattice*}
instantiation atran :: (bbi) Sup
begin
lift_definition Sup_atran :: "'a atran set \<Rightarrow> 'a atran" is Sup by auto
instance ..
end
text {*
All complete join semilattice for a complete lattice.
Note that meet and top are different from the complete lattice formed by ptran
*}
instantiation atran :: (bbi) complete_lattice
begin
definition "\<Sqinter>(F :: 'a atran set) \<equiv> \<Squnion>{g. \<forall>f \<in> F. g \<le> f}"
definition "\<top> :: 'a atran \<equiv> \<Sqinter>{}"
definition "(F :: 'a atran) \<sqinter> G \<equiv> \<Sqinter>{F, G}"
instance
apply (default, simp_all add: inf_atran_def Inf_atran_def top_atran_def)
by (transfer, auto intro: Sup_least Sup_upper Inf_greatest Inf_lower)+
end
text {*
It forms a monoid
*}
instantiation atran :: (bbi) monoid_mult
begin
lift_definition one_atran :: "'a atran" is "id" ..
lift_definition times_atran :: "'a atran \<Rightarrow> 'a atran \<Rightarrow> 'a atran" is "op o" ..
instance
by default (transfer, auto)+
end
text {*
It forms a Quantale
*}
instance atran :: (bbi) near_quantale
by default (transfer, auto)
instance atran :: (bbi) near_quantale_unital ..
instance atran :: (bbi) pre_quantale
apply (default, transfer, drule comp_add_mono)
apply (rule mono_qSup_subdistl)
by simp
instance atran :: (bbi) quantale
apply default
apply transfer
apply (simp add: comp_additive_def)
apply (rule le_funI)
apply simp
apply (unfold SUP_def)
apply (erule_tac x="((\<lambda>f. f xa) ` Y)" in allE)
apply simp
apply (rule Sup_least)
apply (unfold SUP_def)
apply (rule Sup_upper)
by auto
instance atran :: (bbi) quantale_unital ..
end
|
/-
Copyright (c) 2019 Scott Morrison. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Scott Morrison, Justus Springer
-/
import order.complete_lattice
import category_theory.limits.shapes.pullbacks
import category_theory.category.preorder
import category_theory.limits.shapes.products
import category_theory.limits.shapes.finite_limits
/-!
# Limits in lattice categories are given by infimums and supremums.
-/
universes w u
open category_theory
open category_theory.limits
namespace category_theory.limits.complete_lattice
section semilattice
variables {α : Type u}
variables {J : Type w} [small_category J] [fin_category J]
/--
The limit cone over any functor from a finite diagram into a `semilattice_inf` with `order_top`.
-/
def finite_limit_cone [semilattice_inf α] [order_top α] (F : J ⥤ α) : limit_cone F :=
{ cone :=
{ X := finset.univ.inf F.obj,
π := { app := λ j, hom_of_le (finset.inf_le (fintype.complete _)) } },
is_limit := { lift := λ s, hom_of_le (finset.le_inf (λ j _, (s.π.app j).down.down)) } }
/--
The colimit cocone over any functor from a finite diagram into a `semilattice_sup` with `order_bot`.
-/
def finite_colimit_cocone [semilattice_sup α] [order_bot α] (F : J ⥤ α) : colimit_cocone F :=
{ cocone :=
{ X := finset.univ.sup F.obj,
ι := { app := λ i, hom_of_le (finset.le_sup (fintype.complete _)) } },
is_colimit := { desc := λ s, hom_of_le (finset.sup_le (λ j _, (s.ι.app j).down.down)) } }
@[priority 100] -- see Note [lower instance priority]
instance has_finite_limits_of_semilattice_inf_order_top [semilattice_inf α] [order_top α] :
has_finite_limits α :=
⟨λ J 𝒥₁ 𝒥₂, by exactI { has_limit := λ F, has_limit.mk (finite_limit_cone F) }⟩
@[priority 100] -- see Note [lower instance priority]
instance has_finite_colimits_of_semilattice_sup_order_bot [semilattice_sup α] [order_bot α] :
has_finite_colimits α :=
⟨λ J 𝒥₁ 𝒥₂, by exactI { has_colimit := λ F, has_colimit.mk (finite_colimit_cocone F) }⟩
/--
The limit of a functor from a finite diagram into a `semilattice_inf` with `order_top` is the
infimum of the objects in the image.
-/
lemma finite_limit_eq_finset_univ_inf [semilattice_inf α] [order_top α] (F : J ⥤ α) :
limit F = finset.univ.inf F.obj :=
(is_limit.cone_point_unique_up_to_iso (limit.is_limit F)
(finite_limit_cone F).is_limit).to_eq
/--
The colimit of a functor from a finite diagram into a `semilattice_sup` with `order_bot`
is the supremum of the objects in the image.
-/
lemma finite_colimit_eq_finset_univ_sup [semilattice_sup α] [order_bot α] (F : J ⥤ α) :
colimit F = finset.univ.sup F.obj :=
(is_colimit.cocone_point_unique_up_to_iso (colimit.is_colimit F)
(finite_colimit_cocone F).is_colimit).to_eq
/--
A finite product in the category of a `semilattice_inf` with `order_top` is the same as the infimum.
-/
lemma finite_product_eq_finset_inf [semilattice_inf α] [order_top α] {ι : Type u}
[fintype ι] (f : ι → α) : (∏ f) = (fintype.elems ι).inf f :=
begin
transitivity,
exact (is_limit.cone_point_unique_up_to_iso (limit.is_limit _)
(finite_limit_cone (discrete.functor f)).is_limit).to_eq,
change finset.univ.inf (f ∘ discrete_equiv.to_embedding) = (fintype.elems ι).inf f,
simp only [←finset.inf_map, finset.univ_map_equiv_to_embedding],
refl,
end
/--
A finite coproduct in the category of a `semilattice_sup` with `order_bot` is the same as the
supremum.
-/
lemma finite_coproduct_eq_finset_sup [semilattice_sup α] [order_bot α] {ι : Type u}
[fintype ι] (f : ι → α) : (∐ f) = (fintype.elems ι).sup f :=
begin
transitivity,
exact (is_colimit.cocone_point_unique_up_to_iso (colimit.is_colimit _)
(finite_colimit_cocone (discrete.functor f)).is_colimit).to_eq,
change finset.univ.sup (f ∘ discrete_equiv.to_embedding) = (fintype.elems ι).sup f,
simp only [←finset.sup_map, finset.univ_map_equiv_to_embedding],
refl,
end
@[priority 100] -- see Note [lower instance priority]
instance [semilattice_inf α] [order_top α] : has_binary_products α :=
begin
haveI : ∀ (x y : α), has_limit (pair x y),
{ letI := has_finite_limits_of_has_finite_limits_of_size.{u} α, apply_instance },
apply has_binary_products_of_has_limit_pair
end
/--
The binary product in the category of a `semilattice_inf` with `order_top` is the same as the
infimum.
-/
@[simp]
lemma prod_eq_inf [semilattice_inf α] [order_top α] (x y : α) : limits.prod x y = x ⊓ y :=
calc limits.prod x y = limit (pair x y) : rfl
... = finset.univ.inf (pair x y).obj : by rw finite_limit_eq_finset_univ_inf (pair.{u} x y)
... = x ⊓ (y ⊓ ⊤) : rfl -- Note: finset.inf is realized as a fold, hence the definitional equality
... = x ⊓ y : by rw inf_top_eq
@[priority 100] -- see Note [lower instance priority]
instance [semilattice_sup α] [order_bot α] : has_binary_coproducts α :=
begin
haveI : ∀ (x y : α), has_colimit (pair x y),
{ letI := has_finite_colimits_of_has_finite_colimits_of_size.{u} α, apply_instance },
apply has_binary_coproducts_of_has_colimit_pair
end
/--
The binary coproduct in the category of a `semilattice_sup` with `order_bot` is the same as the
supremum.
-/
@[simp]
lemma coprod_eq_sup [semilattice_sup α] [order_bot α] (x y : α) : limits.coprod x y = x ⊔ y :=
calc limits.coprod x y = colimit (pair x y) : rfl
... = finset.univ.sup (pair x y).obj : by rw finite_colimit_eq_finset_univ_sup (pair x y)
... = x ⊔ (y ⊔ ⊥) : rfl -- Note: finset.sup is realized as a fold, hence the definitional equality
... = x ⊔ y : by rw sup_bot_eq
/--
The pullback in the category of a `semilattice_inf` with `order_top` is the same as the infimum
over the objects.
-/
@[simp]
lemma pullback_eq_inf [semilattice_inf α] [order_top α] {x y z : α} (f : x ⟶ z) (g : y ⟶ z) :
pullback f g = x ⊓ y :=
calc pullback f g = limit (cospan f g) : rfl
... = finset.univ.inf (cospan f g).obj : by rw finite_limit_eq_finset_univ_inf
... = z ⊓ (x ⊓ (y ⊓ ⊤)) : rfl
... = z ⊓ (x ⊓ y) : by rw inf_top_eq
... = x ⊓ y : inf_eq_right.mpr (inf_le_of_left_le f.le)
/--
The pushout in the category of a `semilattice_sup` with `order_bot` is the same as the supremum
over the objects.
-/
@[simp]
lemma pushout_eq_sup [semilattice_sup α] [order_bot α] (x y z : α) (f : z ⟶ x) (g : z ⟶ y) :
pushout f g = x ⊔ y :=
calc pushout f g = colimit (span f g) : rfl
... = finset.univ.sup (span f g).obj : by rw finite_colimit_eq_finset_univ_sup
... = z ⊔ (x ⊔ (y ⊔ ⊥)) : rfl
... = z ⊔ (x ⊔ y) : by rw sup_bot_eq
... = x ⊔ y : sup_eq_right.mpr (le_sup_of_le_left f.le)
end semilattice
variables {α : Type u} [complete_lattice α]
variables {J : Type u} [small_category J]
/--
The limit cone over any functor into a complete lattice.
-/
def limit_cone (F : J ⥤ α) : limit_cone F :=
{ cone :=
{ X := infi F.obj,
π :=
{ app := λ j, hom_of_le (complete_lattice.Inf_le _ _ (set.mem_range_self _)) } },
is_limit :=
{ lift := λ s, hom_of_le (complete_lattice.le_Inf _ _
begin rintros _ ⟨j, rfl⟩, exact (s.π.app j).le, end) } }
/--
The colimit cocone over any functor into a complete lattice.
-/
def colimit_cocone (F : J ⥤ α) : colimit_cocone F :=
{ cocone :=
{ X := supr F.obj,
ι :=
{ app := λ j, hom_of_le (complete_lattice.le_Sup _ _ (set.mem_range_self _)) } },
is_colimit :=
{ desc := λ s, hom_of_le (complete_lattice.Sup_le _ _
begin rintros _ ⟨j, rfl⟩, exact (s.ι.app j).le, end) } }
-- It would be nice to only use the `Inf` half of the complete lattice, but
-- this seems not to have been described separately.
@[priority 100] -- see Note [lower instance priority]
instance has_limits_of_complete_lattice : has_limits α :=
{ has_limits_of_shape := λ J 𝒥, by exactI
{ has_limit := λ F, has_limit.mk (limit_cone F) } }
@[priority 100] -- see Note [lower instance priority]
instance has_colimits_of_complete_lattice : has_colimits α :=
{ has_colimits_of_shape := λ J 𝒥, by exactI
{ has_colimit := λ F, has_colimit.mk (colimit_cocone F) } }
/--
The limit of a functor into a complete lattice is the infimum of the objects in the image.
-/
lemma limit_eq_infi (F : J ⥤ α) : limit F = infi F.obj :=
(is_limit.cone_point_unique_up_to_iso (limit.is_limit F)
(limit_cone F).is_limit).to_eq
/--
The colimit of a functor into a complete lattice is the supremum of the objects in the image.
-/
lemma colimit_eq_supr (F : J ⥤ α) : colimit F = supr F.obj :=
(is_colimit.cocone_point_unique_up_to_iso (colimit.is_colimit F)
(colimit_cocone F).is_colimit).to_eq
end category_theory.limits.complete_lattice
|
State Before: α : Type u_1
N : α → Type u_2
inst✝² : DecidableEq α
inst✝¹ : (a : α) → DecidableEq (N a)
inst✝ : (a : α) → Zero (N a)
f✝ g✝ f g : Π₀ (a : α), N a
a : α
⊢ a ∈ neLocus f g ↔ ↑f a ≠ ↑g a State After: no goals Tactic: simpa only [neLocus, Finset.mem_filter, Finset.mem_union, mem_support_iff,
and_iff_right_iff_imp] using Ne.ne_or_ne _ |
{-
Pointed structure: X ↦ X
-}
{-# OPTIONS --cubical --no-import-sorts --safe #-}
module Cubical.Structures.Pointed where
open import Cubical.Foundations.Prelude
open import Cubical.Foundations.Equiv
open import Cubical.Foundations.Univalence
open import Cubical.Foundations.SIP
open import Cubical.Foundations.Pointed.Base
private
variable
ℓ : Level
-- Structured isomorphisms
PointedStructure : Type ℓ → Type ℓ
PointedStructure X = X
PointedEquivStr : StrEquiv PointedStructure ℓ
PointedEquivStr A B f = equivFun f (pt A) ≡ pt B
pointedUnivalentStr : UnivalentStr {ℓ} PointedStructure PointedEquivStr
pointedUnivalentStr f = invEquiv (ua-ungluePath-Equiv f)
pointedSIP : (A B : Pointed ℓ) → A ≃[ PointedEquivStr ] B ≃ (A ≡ B)
pointedSIP = SIP pointedUnivalentStr
pointed-sip : (A B : Pointed ℓ) → A ≃[ PointedEquivStr ] B → (A ≡ B)
pointed-sip A B = equivFun (pointedSIP A B) -- ≡ λ (e , p) i → ua e i , ua-gluePath e p i
pointedEquivAction : EquivAction {ℓ} PointedStructure
pointedEquivAction e = e
pointedTransportStr : TransportStr {ℓ} pointedEquivAction
pointedTransportStr e s = sym (transportRefl _)
|
% forcecol.m
% function to force a vector to be a single column
%
% Brian Birge
% Rev 1.0
% 7/1/98
function[out]=forcecol(in)
len=prod(size(in));
out=reshape(in,[len,1]);
|
\chapter{Alternative Perspective on SIP Solutions in Example}\label{app:pde-example}
We solve our SIP for twenty different realizations of noise to pollute our hundred measurements, and show the resulting solutions for the first twenty and all hundred being used to construct the vector--valued map.
The resulting functions that are induced by these MUD points are shown in Fig.~\ref{fig:pde-highd-2d-vector-mud}, and we note that solutions are no longer are considering the possibility of the minimum value of $g$ being at the wrong knot point as we see in Fig.~\ref{fig:pde-highd-2d-scalar-mud}, even when only twenty total data points are used to construct $Q$.
By the time all hundred measurements are incorporated, the MUD solutions appear to be tracing out curves between the projection function and the interpolant function, a far more accurate set of predictions than those from the scalar--valued map (see \ref{fig:pde-highd-2d-scalar-mud}).
\begin{figure}[htbp]
\centering
\includegraphics[width=0.6\linewidth]{figures/pde-highd/pde-highd_pair_D2-2_m20.png}
\includegraphics[width=0.6\linewidth]{figures/pde-highd/pde-highd_pair_D2-2_m100.png}
\caption{
(Top): When vectorizing our QoI map, we find that we are able to achieve more accuracy with fewer measurements. Here, we see far less variation in MUD solutions than in the bottom of Fig.~\ref{fig:pde-highd-2d-scalar-mud}.
(Bottom): When all 100 measurements are incorporated
}
\label{fig:pde-highd-2d-vector-mud}
\end{figure}
How we incorporate the available data has a dramatic impact on our ability to reduce uncertainty in the parameter space.
We attempt to illustrate this by requesting the reader juxtapose figures of the initial samples in \ref{fig:pde-highd-initial-2d} to those in \ref{fig:pde-highd-2d-scalar-mud} and \ref{fig:pde-highd-2d-vector-mud}.
To better quantify the differences between the two types of maps, it is more rigorous to study how close we are to $g$ (in function space).
Since the approximation $\hat{g}$ that arises from evaluating the MUD point is piecewise-linear, and $g$ is continuous, we use the knot points and the trapezoidal rule to approximate the $L^2$ norm of $\abs{g - \hat{g}}$ (using {\tt scipy}), and plot the resulting histograms for the scalar- and vector-valued $\param$s whose relative ratios exceeded a threshold, and compare them against samples from the initial density, and show the result in Figure~\ref{fig:pde-highd-2d-hist}.
\begin{figure}[htbp]
\centering
\includegraphics[width=0.675\linewidth]{figures/pde-highd/pde-highd_hist_D2_t5-0E-01}
\caption{
Histograms comparing initial samples with the highest probabilities (relative ratio $> 0.5$).
}
\label{fig:pde-highd-2d-hist}
\end{figure}
In \ref{fig:pde-highd-2d-hist}, the histograms have been normalized for comparison, and while the scalar--valued map still does reduce the uncertainty that we started with in our initial density, the vector--valued map is considerably more accurate.
The multi-modal nature of the scalar--valued histogram plot shows a lack of resolution that is not experienced at all by the vector--valued solution.
Both QoI maps solve a stochastic inverse problem, but the latter is more respectful of the geometry of the response surface, and so is able to learn significantly more, and bring us modelers far closer to the true function $g$.
The multi-modal density of probable samples corresponds to the two types of solutions we see in Figure~\ref{fig:pde-highd-2d-scalar-mud}, but we are after a single parameter estimate of truth.
|
State Before: α : Type u
β : Type v
γ : Type w
ι : Sort x
inst✝² : Preorder α
inst✝¹ : Preorder β
s✝ t : Set α
a✝ b✝ : α
inst✝ : SemilatticeSup γ
a b : γ
s : Set γ
hs : IsLUB s b
⊢ IsLUB (Insert.insert a s) (a ⊔ b) State After: α : Type u
β : Type v
γ : Type w
ι : Sort x
inst✝² : Preorder α
inst✝¹ : Preorder β
s✝ t : Set α
a✝ b✝ : α
inst✝ : SemilatticeSup γ
a b : γ
s : Set γ
hs : IsLUB s b
⊢ IsLUB ({a} ∪ s) (a ⊔ b) Tactic: rw [insert_eq] State Before: α : Type u
β : Type v
γ : Type w
ι : Sort x
inst✝² : Preorder α
inst✝¹ : Preorder β
s✝ t : Set α
a✝ b✝ : α
inst✝ : SemilatticeSup γ
a b : γ
s : Set γ
hs : IsLUB s b
⊢ IsLUB ({a} ∪ s) (a ⊔ b) State After: no goals Tactic: exact isLUB_singleton.union hs |
lemma limitin_topspace: "limitin X f l F \<Longrightarrow> l \<in> topspace X" |
{-# OPTIONS --without-K #-}
module function where
open import function.core public
open import function.overloading public
open import function.isomorphism public
open import function.extensionality public
open import function.fibration public
|
(* Title: HOL/Induct/Comb.thy
Author: Lawrence C Paulson
Copyright 1996 University of Cambridge
*)
section \<open>Combinatory Logic example: the Church-Rosser Theorem\<close>
theory Comb
imports Main
begin
text \<open>
Curiously, combinators do not include free variables.
Example taken from @{cite camilleri92}.
\<close>
subsection \<open>Definitions\<close>
text \<open>Datatype definition of combinators \<open>S\<close> and \<open>K\<close>.\<close>
datatype comb = K
| S
| Ap comb comb (infixl "\<bullet>" 90)
text \<open>
Inductive definition of contractions, \<open>\<rightarrow>\<^sup>1\<close> and
(multi-step) reductions, \<open>\<rightarrow>\<close>.
\<close>
inductive_set contract :: "(comb*comb) set"
and contract_rel1 :: "[comb,comb] \<Rightarrow> bool" (infixl "\<rightarrow>\<^sup>1" 50)
where
"x \<rightarrow>\<^sup>1 y == (x,y) \<in> contract"
| K: "K\<bullet>x\<bullet>y \<rightarrow>\<^sup>1 x"
| S: "S\<bullet>x\<bullet>y\<bullet>z \<rightarrow>\<^sup>1 (x\<bullet>z)\<bullet>(y\<bullet>z)"
| Ap1: "x\<rightarrow>\<^sup>1y \<Longrightarrow> x\<bullet>z \<rightarrow>\<^sup>1 y\<bullet>z"
| Ap2: "x\<rightarrow>\<^sup>1y \<Longrightarrow> z\<bullet>x \<rightarrow>\<^sup>1 z\<bullet>y"
abbreviation
contract_rel :: "[comb,comb] \<Rightarrow> bool" (infixl "\<rightarrow>" 50) where
"x \<rightarrow> y == (x,y) \<in> contract\<^sup>*"
text \<open>
Inductive definition of parallel contractions, \<open>\<Rrightarrow>\<^sup>1\<close> and
(multi-step) parallel reductions, \<open>\<Rrightarrow>\<close>.
\<close>
inductive_set parcontract :: "(comb*comb) set"
and parcontract_rel1 :: "[comb,comb] \<Rightarrow> bool" (infixl "\<Rrightarrow>\<^sup>1" 50)
where
"x \<Rrightarrow>\<^sup>1 y == (x,y) \<in> parcontract"
| refl: "x \<Rrightarrow>\<^sup>1 x"
| K: "K\<bullet>x\<bullet>y \<Rrightarrow>\<^sup>1 x"
| S: "S\<bullet>x\<bullet>y\<bullet>z \<Rrightarrow>\<^sup>1 (x\<bullet>z)\<bullet>(y\<bullet>z)"
| Ap: "[| x\<Rrightarrow>\<^sup>1y; z\<Rrightarrow>\<^sup>1w |] ==> x\<bullet>z \<Rrightarrow>\<^sup>1 y\<bullet>w"
abbreviation
parcontract_rel :: "[comb,comb] \<Rightarrow> bool" (infixl "\<Rrightarrow>" 50) where
"x \<Rrightarrow> y == (x,y) \<in> parcontract\<^sup>*"
text \<open>
Misc definitions.
\<close>
definition
I :: comb where
"I = S\<bullet>K\<bullet>K"
definition
diamond :: "('a * 'a)set \<Rightarrow> bool" where
\<comment>\<open>confluence; Lambda/Commutation treats this more abstractly\<close>
"diamond(r) = (\<forall>x y. (x,y) \<in> r -->
(\<forall>y'. (x,y') \<in> r -->
(\<exists>z. (y,z) \<in> r & (y',z) \<in> r)))"
subsection \<open>Reflexive/Transitive closure preserves Church-Rosser property\<close>
text\<open>So does the Transitive closure, with a similar proof\<close>
text\<open>Strip lemma.
The induction hypothesis covers all but the last diamond of the strip.\<close>
lemma diamond_strip_lemmaE [rule_format]:
"[| diamond(r); (x,y) \<in> r\<^sup>* |] ==>
\<forall>y'. (x,y') \<in> r --> (\<exists>z. (y',z) \<in> r\<^sup>* & (y,z) \<in> r)"
apply (unfold diamond_def)
apply (erule rtrancl_induct)
apply (meson rtrancl_refl)
apply (meson rtrancl_trans r_into_rtrancl)
done
lemma diamond_rtrancl: "diamond(r) \<Longrightarrow> diamond(r\<^sup>*)"
apply (simp (no_asm_simp) add: diamond_def)
apply (rule impI [THEN allI, THEN allI])
apply (erule rtrancl_induct, blast)
apply (meson rtrancl_trans r_into_rtrancl diamond_strip_lemmaE)
done
subsection \<open>Non-contraction results\<close>
text \<open>Derive a case for each combinator constructor.\<close>
inductive_cases
K_contractE [elim!]: "K \<rightarrow>\<^sup>1 r"
and S_contractE [elim!]: "S \<rightarrow>\<^sup>1 r"
and Ap_contractE [elim!]: "p\<bullet>q \<rightarrow>\<^sup>1 r"
declare contract.K [intro!] contract.S [intro!]
declare contract.Ap1 [intro] contract.Ap2 [intro]
lemma I_contract_E [elim!]: "I \<rightarrow>\<^sup>1 z \<Longrightarrow> P"
by (unfold I_def, blast)
lemma K1_contractD [elim!]: "K\<bullet>x \<rightarrow>\<^sup>1 z \<Longrightarrow> (\<exists>x'. z = K\<bullet>x' & x \<rightarrow>\<^sup>1 x')"
by blast
lemma Ap_reduce1 [intro]: "x \<rightarrow> y \<Longrightarrow> x\<bullet>z \<rightarrow> y\<bullet>z"
apply (erule rtrancl_induct)
apply (blast intro: rtrancl_trans)+
done
lemma Ap_reduce2 [intro]: "x \<rightarrow> y \<Longrightarrow> z\<bullet>x \<rightarrow> z\<bullet>y"
apply (erule rtrancl_induct)
apply (blast intro: rtrancl_trans)+
done
text \<open>Counterexample to the diamond property for @{term "x \<rightarrow>\<^sup>1 y"}\<close>
lemma not_diamond_contract: "~ diamond(contract)"
by (unfold diamond_def, metis S_contractE contract.K)
subsection \<open>Results about Parallel Contraction\<close>
text \<open>Derive a case for each combinator constructor.\<close>
inductive_cases
K_parcontractE [elim!]: "K \<Rrightarrow>\<^sup>1 r"
and S_parcontractE [elim!]: "S \<Rrightarrow>\<^sup>1 r"
and Ap_parcontractE [elim!]: "p\<bullet>q \<Rrightarrow>\<^sup>1 r"
declare parcontract.intros [intro]
(*** Basic properties of parallel contraction ***)
subsection \<open>Basic properties of parallel contraction\<close>
lemma K1_parcontractD [dest!]: "K\<bullet>x \<Rrightarrow>\<^sup>1 z \<Longrightarrow> (\<exists>x'. z = K\<bullet>x' & x \<Rrightarrow>\<^sup>1 x')"
by blast
lemma S1_parcontractD [dest!]: "S\<bullet>x \<Rrightarrow>\<^sup>1 z \<Longrightarrow> (\<exists>x'. z = S\<bullet>x' & x \<Rrightarrow>\<^sup>1 x')"
by blast
lemma S2_parcontractD [dest!]:
"S\<bullet>x\<bullet>y \<Rrightarrow>\<^sup>1 z \<Longrightarrow> (\<exists>x' y'. z = S\<bullet>x'\<bullet>y' & x \<Rrightarrow>\<^sup>1 x' & y \<Rrightarrow>\<^sup>1 y')"
by blast
text\<open>The rules above are not essential but make proofs much faster\<close>
text\<open>Church-Rosser property for parallel contraction\<close>
lemma diamond_parcontract: "diamond parcontract"
apply (unfold diamond_def)
apply (rule impI [THEN allI, THEN allI])
apply (erule parcontract.induct, fast+)
done
text \<open>
\<^medskip>
Equivalence of @{prop "p \<rightarrow> q"} and @{prop "p \<Rrightarrow> q"}.
\<close>
lemma contract_subset_parcontract: "contract \<subseteq> parcontract"
by (auto, erule contract.induct, blast+)
text\<open>Reductions: simply throw together reflexivity, transitivity and
the one-step reductions\<close>
declare r_into_rtrancl [intro] rtrancl_trans [intro]
(*Example only: not used*)
lemma reduce_I: "I\<bullet>x \<rightarrow> x"
by (unfold I_def, blast)
lemma parcontract_subset_reduce: "parcontract \<subseteq> contract\<^sup>*"
by (auto, erule parcontract.induct, blast+)
lemma reduce_eq_parreduce: "contract\<^sup>* = parcontract\<^sup>*"
by (metis contract_subset_parcontract parcontract_subset_reduce rtrancl_subset)
theorem diamond_reduce: "diamond(contract\<^sup>*)"
by (simp add: reduce_eq_parreduce diamond_rtrancl diamond_parcontract)
end
|
/-
Copyright (c) 2022 Yaël Dillies. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Yaël Dillies
-/
import order.filter.basic
/-!
# N-ary maps of filter
This file defines the binary and ternary maps of filters. This is mostly useful to define pointwise
operations on filters.
## Main declarations
* `filter.map₂`: Binary map of filters.
* `filter.map₃`: Ternary map of filters.
## Notes
This file is very similar to the n-ary section of `data.set.basic` and to `data.finset.n_ary`.
Please keep them in sync.
-/
open function set
open_locale filter
namespace filter
variables {α α' β β' γ γ' δ δ' ε ε' : Type*} {m : α → β → γ} {f f₁ f₂ : filter α}
{g g₁ g₂ : filter β} {h h₁ h₂ : filter γ} {s s₁ s₂ : set α} {t t₁ t₂ : set β} {u : set γ}
{v : set δ} {a : α} {b : β} {c : γ}
/-- The image of a binary function `m : α → β → γ` as a function `filter α → filter β → filter γ`.
Mathematically this should be thought of as the image of the corresponding function `α × β → γ`. -/
def map₂ (m : α → β → γ) (f : filter α) (g : filter β) : filter γ :=
{ sets := {s | ∃ u v, u ∈ f ∧ v ∈ g ∧ image2 m u v ⊆ s},
univ_sets := ⟨univ, univ, univ_sets _, univ_sets _, subset_univ _⟩,
sets_of_superset := λ s t hs hst,
Exists₂.imp (λ u v, and.imp_right $ and.imp_right $ λ h, subset.trans h hst) hs,
inter_sets := λ s t,
begin
simp only [exists_prop, mem_set_of_eq, subset_inter_iff],
rintro ⟨s₁, s₂, hs₁, hs₂, hs⟩ ⟨t₁, t₂, ht₁, ht₂, ht⟩,
exact ⟨s₁ ∩ t₁, s₂ ∩ t₂, inter_sets f hs₁ ht₁, inter_sets g hs₂ ht₂,
(image2_subset (inter_subset_left _ _) $ inter_subset_left _ _).trans hs,
(image2_subset (inter_subset_right _ _) $ inter_subset_right _ _).trans ht⟩,
end }
@[simp] lemma mem_map₂_iff : u ∈ map₂ m f g ↔ ∃ s t, s ∈ f ∧ t ∈ g ∧ image2 m s t ⊆ u := iff.rfl
lemma image2_mem_map₂ (hs : s ∈ f) (ht : t ∈ g) : image2 m s t ∈ map₂ m f g :=
⟨_, _, hs, ht, subset.rfl⟩
lemma map_prod_eq_map₂ (m : α → β → γ) (f : filter α) (g : filter β) :
filter.map (λ p : α × β, m p.1 p.2) (f ×ᶠ g) = map₂ m f g :=
begin
ext s,
split,
{ intro hmem,
rw filter.mem_map_iff_exists_image at hmem,
obtain ⟨s', hs', hsub⟩ := hmem,
rw filter.mem_prod_iff at hs',
obtain ⟨t, ht, t', ht', hsub'⟩ := hs',
refine ⟨t, t', ht, ht', _⟩,
rw ← set.image_prod,
exact subset_trans (set.image_subset (λ (p : α × β), m p.fst p.snd) hsub') hsub },
{ intro hmem,
rw mem_map₂_iff at hmem,
obtain ⟨t, t', ht, ht', hsub⟩ := hmem,
rw ← set.image_prod at hsub,
rw filter.mem_map_iff_exists_image,
exact ⟨t ×ˢ t', filter.prod_mem_prod ht ht', hsub⟩ },
end
lemma map_prod_eq_map₂' (m : α × β → γ) (f : filter α) (g : filter β) :
filter.map m (f ×ᶠ g) = map₂ (λ a b, m (a, b)) f g :=
by { refine eq.trans _ (map_prod_eq_map₂ (curry m) f g), ext, simp }
-- lemma image2_mem_map₂_iff (hm : injective2 m) : image2 m s t ∈ map₂ m f g ↔ s ∈ f ∧ t ∈ g :=
-- ⟨by { rintro ⟨u, v, hu, hv, h⟩, rw image2_subset_image2_iff hm at h,
-- exact ⟨mem_of_superset hu h.1, mem_of_superset hv h.2⟩ }, λ h, image2_mem_map₂ h.1 h.2⟩
lemma map₂_mono (hf : f₁ ≤ f₂) (hg : g₁ ≤ g₂) : map₂ m f₁ g₁ ≤ map₂ m f₂ g₂ :=
λ _ ⟨s, t, hs, ht, hst⟩, ⟨s, t, hf hs, hg ht, hst⟩
lemma map₂_mono_left (h : g₁ ≤ g₂) : map₂ m f g₁ ≤ map₂ m f g₂ := map₂_mono subset.rfl h
lemma map₂_mono_right (h : f₁ ≤ f₂) : map₂ m f₁ g ≤ map₂ m f₂ g := map₂_mono h subset.rfl
@[simp] lemma le_map₂_iff {h : filter γ} :
h ≤ map₂ m f g ↔ ∀ ⦃s⦄, s ∈ f → ∀ ⦃t⦄, t ∈ g → image2 m s t ∈ h :=
⟨λ H s hs t ht, H $ image2_mem_map₂ hs ht, λ H u ⟨s, t, hs, ht, hu⟩, mem_of_superset (H hs ht) hu⟩
@[simp] lemma map₂_bot_left : map₂ m ⊥ g = ⊥ :=
empty_mem_iff_bot.1 ⟨∅, univ, trivial, univ_mem, (image2_empty_left).subset⟩
@[simp] lemma map₂_bot_right : map₂ m f ⊥ = ⊥ :=
empty_mem_iff_bot.1 ⟨univ, ∅, univ_mem, trivial, (image2_empty_right).subset⟩
@[simp] lemma map₂_eq_bot_iff : map₂ m f g = ⊥ ↔ f = ⊥ ∨ g = ⊥ :=
begin
simp only [←empty_mem_iff_bot, mem_map₂_iff, subset_empty_iff, image2_eq_empty_iff],
split,
{ rintro ⟨s, t, hs, ht, rfl | rfl⟩,
{ exact or.inl hs },
{ exact or.inr ht } },
{ rintro (h | h),
{ exact ⟨_, _, h, univ_mem, or.inl rfl⟩ },
{ exact ⟨_, _, univ_mem, h, or.inr rfl⟩ } }
end
@[simp] lemma map₂_ne_bot_iff : (map₂ m f g).ne_bot ↔ f.ne_bot ∧ g.ne_bot :=
by { simp_rw ne_bot_iff, exact map₂_eq_bot_iff.not.trans not_or_distrib }
lemma ne_bot.map₂ (hf : f.ne_bot) (hg : g.ne_bot) : (map₂ m f g).ne_bot :=
map₂_ne_bot_iff.2 ⟨hf, hg⟩
lemma ne_bot.of_map₂_left (h : (map₂ m f g).ne_bot) : f.ne_bot := (map₂_ne_bot_iff.1 h).1
lemma ne_bot.of_map₂_right (h : (map₂ m f g).ne_bot) : g.ne_bot := (map₂_ne_bot_iff.1 h).2
lemma map₂_sup_left : map₂ m (f₁ ⊔ f₂) g = map₂ m f₁ g ⊔ map₂ m f₂ g :=
begin
ext u,
split,
{ rintro ⟨s, t, ⟨h₁, h₂⟩, ht, hu⟩,
exact ⟨mem_of_superset (image2_mem_map₂ h₁ ht) hu,
mem_of_superset (image2_mem_map₂ h₂ ht) hu⟩ },
{ rintro ⟨⟨s₁, t₁, hs₁, ht₁, hu₁⟩, s₂, t₂, hs₂, ht₂, hu₂⟩,
refine ⟨s₁ ∪ s₂, t₁ ∩ t₂, union_mem_sup hs₁ hs₂, inter_mem ht₁ ht₂, _⟩,
rw image2_union_left,
exact union_subset ((image2_subset_left $ inter_subset_left _ _).trans hu₁)
((image2_subset_left $ inter_subset_right _ _).trans hu₂) }
end
lemma map₂_sup_right : map₂ m f (g₁ ⊔ g₂) = map₂ m f g₁ ⊔ map₂ m f g₂ :=
begin
ext u,
split,
{ rintro ⟨s, t, hs, ⟨h₁, h₂⟩, hu⟩,
exact ⟨mem_of_superset (image2_mem_map₂ hs h₁) hu,
mem_of_superset (image2_mem_map₂ hs h₂) hu⟩ },
{ rintro ⟨⟨s₁, t₁, hs₁, ht₁, hu₁⟩, s₂, t₂, hs₂, ht₂, hu₂⟩,
refine ⟨s₁ ∩ s₂, t₁ ∪ t₂, inter_mem hs₁ hs₂, union_mem_sup ht₁ ht₂, _⟩,
rw image2_union_right,
exact union_subset ((image2_subset_right $ inter_subset_left _ _).trans hu₁)
((image2_subset_right $ inter_subset_right _ _).trans hu₂) }
end
lemma map₂_inf_subset_left : map₂ m (f₁ ⊓ f₂) g ≤ map₂ m f₁ g ⊓ map₂ m f₂ g :=
le_inf (map₂_mono_right inf_le_left) (map₂_mono_right inf_le_right)
lemma map₂_inf_subset_right : map₂ m f (g₁ ⊓ g₂) ≤ map₂ m f g₁ ⊓ map₂ m f g₂ :=
le_inf (map₂_mono_left inf_le_left) (map₂_mono_left inf_le_right)
@[simp] lemma map₂_pure_left : map₂ m (pure a) g = g.map (λ b, m a b) :=
filter.ext $ λ u, ⟨λ ⟨s, t, hs, ht, hu⟩,
mem_of_superset (image_mem_map ht) ((image_subset_image2_right $ mem_pure.1 hs).trans hu),
λ h, ⟨{a}, _, singleton_mem_pure, h, by rw [image2_singleton_left, image_subset_iff]⟩⟩
@[simp] lemma map₂_pure_right : map₂ m f (pure b) = f.map (λ a, m a b) :=
filter.ext $ λ u, ⟨λ ⟨s, t, hs, ht, hu⟩,
mem_of_superset (image_mem_map hs) ((image_subset_image2_left $ mem_pure.1 ht).trans hu),
λ h, ⟨_, {b}, h, singleton_mem_pure, by rw [image2_singleton_right, image_subset_iff]⟩⟩
lemma map₂_pure : map₂ m (pure a) (pure b) = pure (m a b) := by rw [map₂_pure_right, map_pure]
lemma map₂_swap (m : α → β → γ) (f : filter α) (g : filter β) :
map₂ m f g = map₂ (λ a b, m b a) g f :=
by { ext u, split; rintro ⟨s, t, hs, ht, hu⟩; refine ⟨t, s, ht, hs, by rwa image2_swap⟩ }
@[simp] lemma map₂_left (h : g.ne_bot) : map₂ (λ x y, x) f g = f :=
begin
ext u,
refine ⟨_, λ hu, ⟨_, _, hu, univ_mem, (image2_left $ h.nonempty_of_mem univ_mem).subset⟩⟩,
rintro ⟨s, t, hs, ht, hu⟩,
rw image2_left (h.nonempty_of_mem ht) at hu,
exact mem_of_superset hs hu,
end
@[simp] lemma map₂_right (h : f.ne_bot) : map₂ (λ x y, y) f g = g := by rw [map₂_swap, map₂_left h]
/-- The image of a ternary function `m : α → β → γ → δ` as a function
`filter α → filter β → filter γ → filter δ`. Mathematically this should be thought of as the image
of the corresponding function `α × β × γ → δ`. -/
def map₃ (m : α → β → γ → δ) (f : filter α) (g : filter β) (h : filter γ) : filter δ :=
{ sets := {s | ∃ u v w, u ∈ f ∧ v ∈ g ∧ w ∈ h ∧ image3 m u v w ⊆ s},
univ_sets := ⟨univ, univ, univ, univ_sets _, univ_sets _, univ_sets _, subset_univ _⟩,
sets_of_superset := λ s t hs hst, Exists₃.imp
(λ u v w, and.imp_right $ and.imp_right $ and.imp_right $ λ h, subset.trans h hst) hs,
inter_sets := λ s t,
begin
simp only [exists_prop, mem_set_of_eq, subset_inter_iff],
rintro ⟨s₁, s₂, s₃, hs₁, hs₂, hs₃, hs⟩ ⟨t₁, t₂, t₃, ht₁, ht₂, ht₃, ht⟩,
exact ⟨s₁ ∩ t₁, s₂ ∩ t₂, s₃ ∩ t₃, inter_mem hs₁ ht₁, inter_mem hs₂ ht₂, inter_mem hs₃ ht₃,
(image3_mono (inter_subset_left _ _) (inter_subset_left _ _) $ inter_subset_left _ _).trans
hs,
(image3_mono (inter_subset_right _ _) (inter_subset_right _ _) $ inter_subset_right _ _).trans
ht⟩,
end }
lemma map₂_map₂_left (m : δ → γ → ε) (n : α → β → δ) :
map₂ m (map₂ n f g) h = map₃ (λ a b c, m (n a b) c) f g h :=
begin
ext w,
split,
{ rintro ⟨s, t, ⟨u, v, hu, hv, hs⟩, ht, hw⟩,
refine ⟨u, v, t, hu, hv, ht, _⟩,
rw ←image2_image2_left,
exact (image2_subset_right hs).trans hw },
{ rintro ⟨s, t, u, hs, ht, hu, hw⟩,
exact ⟨_, u, image2_mem_map₂ hs ht, hu, by rwa image2_image2_left⟩ }
end
lemma map₂_map₂_right (m : α → δ → ε) (n : β → γ → δ) :
map₂ m f (map₂ n g h) = map₃ (λ a b c, m a (n b c)) f g h :=
begin
ext w,
split,
{ rintro ⟨s, t, hs, ⟨u, v, hu, hv, ht⟩, hw⟩,
refine ⟨s, u, v, hs, hu, hv, _⟩,
rw ←image2_image2_right,
exact (image2_subset_left ht).trans hw },
{ rintro ⟨s, t, u, hs, ht, hu, hw⟩,
exact ⟨s, _, hs, image2_mem_map₂ ht hu, by rwa image2_image2_right⟩ }
end
lemma map_map₂ (m : α → β → γ) (n : γ → δ) : (map₂ m f g).map n = map₂ (λ a b, n (m a b)) f g :=
filter.ext $ λ u, exists₂_congr $ λ s t, by rw [←image_subset_iff, image_image2]
lemma map₂_map_left (m : γ → β → δ) (n : α → γ) :
map₂ m (f.map n) g = map₂ (λ a b, m (n a) b) f g :=
begin
ext u,
split,
{ rintro ⟨s, t, hs, ht, hu⟩,
refine ⟨_, t, hs, ht, _⟩,
rw ←image2_image_left,
exact (image2_subset_right $ image_preimage_subset _ _).trans hu },
{ rintro ⟨s, t, hs, ht, hu⟩,
exact ⟨_, t, image_mem_map hs, ht, by rwa image2_image_left⟩ }
end
lemma map₂_map_right (m : α → γ → δ) (n : β → γ) :
map₂ m f (g.map n) = map₂ (λ a b, m a (n b)) f g :=
by rw [map₂_swap, map₂_map_left, map₂_swap]
/-!
### Algebraic replacement rules
A collection of lemmas to transfer associativity, commutativity, distributivity, ... of operations
to the associativity, commutativity, distributivity, ... of `filter.map₂` of those operations.
The proof pattern is `map₂_lemma operation_lemma`. For example, `map₂_comm mul_comm` proves that
`map₂ (*) f g = map₂ (*) g f` in a `comm_semigroup`.
-/
lemma map₂_assoc {m : δ → γ → ε} {n : α → β → δ} {m' : α → ε' → ε} {n' : β → γ → ε'}
{h : filter γ} (h_assoc : ∀ a b c, m (n a b) c = m' a (n' b c)) :
map₂ m (map₂ n f g) h = map₂ m' f (map₂ n' g h) :=
by simp only [map₂_map₂_left, map₂_map₂_right, h_assoc]
lemma map₂_comm {n : β → α → γ} (h_comm : ∀ a b, m a b = n b a) : map₂ m f g = map₂ n g f :=
(map₂_swap _ _ _).trans $ by simp_rw h_comm
lemma map₂_left_comm {m : α → δ → ε} {n : β → γ → δ} {m' : α → γ → δ'} {n' : β → δ' → ε}
(h_left_comm : ∀ a b c, m a (n b c) = n' b (m' a c)) :
map₂ m f (map₂ n g h) = map₂ n' g (map₂ m' f h) :=
by { rw [map₂_swap m', map₂_swap m], exact map₂_assoc (λ _ _ _, h_left_comm _ _ _) }
lemma map₂_right_comm {m : δ → γ → ε} {n : α → β → δ} {m' : α → γ → δ'} {n' : δ' → β → ε}
(h_right_comm : ∀ a b c, m (n a b) c = n' (m' a c) b) :
map₂ m (map₂ n f g) h = map₂ n' (map₂ m' f h) g :=
by { rw [map₂_swap n, map₂_swap n'], exact map₂_assoc (λ _ _ _, h_right_comm _ _ _) }
lemma map_map₂_distrib {n : γ → δ} {m' : α' → β' → δ} {n₁ : α → α'} {n₂ : β → β'}
(h_distrib : ∀ a b, n (m a b) = m' (n₁ a) (n₂ b)) :
(map₂ m f g).map n = map₂ m' (f.map n₁) (g.map n₂) :=
by simp_rw [map_map₂, map₂_map_left, map₂_map_right, h_distrib]
/-- Symmetric of `filter.map₂_map_left_comm`. -/
lemma map_map₂_distrib_left {n : γ → δ} {m' : α' → β → δ} {n' : α → α'}
(h_distrib : ∀ a b, n (m a b) = m' (n' a) b) :
(map₂ m f g).map n = map₂ m' (f.map n') g :=
map_map₂_distrib h_distrib
/-- Symmetric of `filter.map_map₂_right_comm`. -/
lemma map_map₂_distrib_right {n : γ → δ} {m' : α → β' → δ} {n' : β → β'}
(h_distrib : ∀ a b, n (m a b) = m' a (n' b)) :
(map₂ m f g).map n = map₂ m' f (g.map n') :=
map_map₂_distrib h_distrib
/-- Symmetric of `filter.map_map₂_distrib_left`. -/
lemma map₂_map_left_comm {m : α' → β → γ} {n : α → α'} {m' : α → β → δ} {n' : δ → γ}
(h_left_comm : ∀ a b, m (n a) b = n' (m' a b)) :
map₂ m (f.map n) g = (map₂ m' f g).map n' :=
(map_map₂_distrib_left $ λ a b, (h_left_comm a b).symm).symm
/-- Symmetric of `filter.map_map₂_distrib_right`. -/
/-- The other direction does not hold because of the `f`-`f` cross terms on the RHS. -/
lemma map₂_distrib_le_left {m : α → δ → ε} {n : β → γ → δ} {m₁ : α → β → β'} {m₂ : α → γ → γ'}
{n' : β' → γ' → ε} (h_distrib : ∀ a b c, m a (n b c) = n' (m₁ a b) (m₂ a c)) :
map₂ m f (map₂ n g h) ≤ map₂ n' (map₂ m₁ f g) (map₂ m₂ f h) :=
begin
rintro s ⟨t₁, t₂, ⟨u₁, v, hu₁, hv, ht₁⟩, ⟨u₂, w, hu₂, hw, ht₂⟩, hs⟩,
refine ⟨u₁ ∩ u₂, _, inter_mem hu₁ hu₂, image2_mem_map₂ hv hw, _⟩,
refine (image2_distrib_subset_left h_distrib).trans ((image2_subset _ _).trans hs),
{ exact (image2_subset_right $ inter_subset_left _ _).trans ht₁ },
{ exact (image2_subset_right $ inter_subset_right _ _).trans ht₂ }
end
/-- The other direction does not hold because of the `h`-`h` cross terms on the RHS. -/
lemma map₂_distrib_le_right {m : δ → γ → ε} {n : α → β → δ} {m₁ : α → γ → α'}
{m₂ : β → γ → β'} {n' : α' → β' → ε} (h_distrib : ∀ a b c, m (n a b) c = n' (m₁ a c) (m₂ b c)) :
map₂ m (map₂ n f g) h ≤ map₂ n' (map₂ m₁ f h) (map₂ m₂ g h) :=
begin
rintro s ⟨t₁, t₂, ⟨u, w₁, hu, hw₁, ht₁⟩, ⟨v, w₂, hv, hw₂, ht₂⟩, hs⟩,
refine ⟨_, w₁ ∩ w₂, image2_mem_map₂ hu hv, inter_mem hw₁ hw₂, _⟩,
refine (image2_distrib_subset_right h_distrib).trans ((image2_subset _ _).trans hs),
{ exact (image2_subset_left $ inter_subset_left _ _).trans ht₁ },
{ exact (image2_subset_left $ inter_subset_right _ _).trans ht₂ }
end
lemma map_map₂_antidistrib {n : γ → δ} {m' : β' → α' → δ} {n₁ : β → β'} {n₂ : α → α'}
(h_antidistrib : ∀ a b, n (m a b) = m' (n₁ b) (n₂ a)) :
(map₂ m f g).map n = map₂ m' (g.map n₁) (f.map n₂) :=
by { rw map₂_swap m, exact map_map₂_distrib (λ _ _, h_antidistrib _ _) }
/-- Symmetric of `filter.map₂_map_left_anticomm`. -/
lemma map_map₂_antidistrib_left {n : γ → δ} {m' : β' → α → δ} {n' : β → β'}
(h_antidistrib : ∀ a b, n (m a b) = m' (n' b) a) :
(map₂ m f g).map n = map₂ m' (g.map n') f :=
map_map₂_antidistrib h_antidistrib
/-- Symmetric of `filter.map_map₂_right_anticomm`. -/
lemma map_map₂_antidistrib_right {n : γ → δ} {m' : β → α' → δ} {n' : α → α'}
(h_antidistrib : ∀ a b, n (m a b) = m' b (n' a)) :
(map₂ m f g).map n = map₂ m' g (f.map n') :=
map_map₂_antidistrib h_antidistrib
/-- Symmetric of `filter.map_map₂_antidistrib_left`. -/
lemma map₂_map_left_anticomm {m : α' → β → γ} {n : α → α'} {m' : β → α → δ} {n' : δ → γ}
(h_left_anticomm : ∀ a b, m (n a) b = n' (m' b a)) :
map₂ m (f.map n) g = (map₂ m' g f).map n' :=
(map_map₂_antidistrib_left $ λ a b, (h_left_anticomm b a).symm).symm
/-- Symmetric of `filter.map_map₂_antidistrib_right`. -/
lemma map_map₂_right_anticomm {m : α → β' → γ} {n : β → β'} {m' : β → α → δ} {n' : δ → γ}
(h_right_anticomm : ∀ a b, m a (n b) = n' (m' b a)) :
map₂ m f (g.map n) = (map₂ m' g f).map n' :=
(map_map₂_antidistrib_right $ λ a b, (h_right_anticomm b a).symm).symm
end filter
|
[STATEMENT]
lemma inorder_update:
"sorted1(inorder t) \<Longrightarrow> inorder(update a b t) = upd_list a b (inorder t)"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. sorted1 (inorder t) \<Longrightarrow> inorder (update a b t) = upd_list a b (inorder t)
[PROOF STEP]
by(induction t) (auto simp: upd_list_simps) |
function get_circle(obj::Edge)
return jcall(obj, "getCircle", Circle, ())
end
function get_end(obj::Edge)
return jcall(obj, "getEnd", Vertex, ())
end
function get_length(obj::Edge)
return jcall(obj, "getLength", jdouble, ())
end
function get_point_at(obj::Edge, arg0::jdouble)
return jcall(obj, "getPointAt", Vector3D, (jdouble,), arg0)
end
function get_start(obj::Edge)
return jcall(obj, "getStart", Vertex, ())
end
|
open import Level
open import Ordinals
module BAlgbra {n : Level } (O : Ordinals {n}) where
open import zf
open import logic
import OrdUtil
import OD
import ODUtil
import ODC
open import Relation.Nullary
open import Relation.Binary
open import Data.Empty
open import Relation.Binary
open import Relation.Binary.Core
open import Relation.Binary.PropositionalEquality
open import Data.Nat renaming ( zero to Zero ; suc to Suc ; ℕ to Nat ; _⊔_ to _n⊔_ ; _+_ to _n+_ )
open inOrdinal O
open Ordinals.Ordinals O
open Ordinals.IsOrdinals isOrdinal
open Ordinals.IsNext isNext
open OrdUtil O
open ODUtil O
open OD O
open OD.OD
open ODAxiom odAxiom
open HOD
open _∧_
open _∨_
open Bool
--_∩_ : ( A B : HOD ) → HOD
--A ∩ B = record { od = record { def = λ x → odef A x ∧ odef B x } ;
-- odmax = omin (odmax A) (odmax B) ; <odmax = λ y → min1 (<odmax A (proj1 y)) (<odmax B (proj2 y)) }
_∪_ : ( A B : HOD ) → HOD
A ∪ B = record { od = record { def = λ x → odef A x ∨ odef B x } ;
odmax = omax (odmax A) (odmax B) ; <odmax = lemma } where
lemma : {y : Ordinal} → odef A y ∨ odef B y → y o< omax (odmax A) (odmax B)
lemma {y} (case1 a) = ordtrans (<odmax A a) (omax-x _ _)
lemma {y} (case2 b) = ordtrans (<odmax B b) (omax-y _ _)
_\_ : ( A B : HOD ) → HOD
A \ B = record { od = record { def = λ x → odef A x ∧ ( ¬ ( odef B x ) ) }; odmax = odmax A ; <odmax = λ y → <odmax A (proj1 y) }
∪-Union : { A B : HOD } → Union (A , B) ≡ ( A ∪ B )
∪-Union {A} {B} = ==→o≡ ( record { eq→ = lemma1 ; eq← = lemma2 } ) where
lemma1 : {x : Ordinal} → odef (Union (A , B)) x → odef (A ∪ B) x
lemma1 {x} lt = lemma3 lt where
lemma4 : {y : Ordinal} → odef (A , B) y ∧ odef (* y) x → ¬ (¬ ( odef A x ∨ odef B x) )
lemma4 {y} z with proj1 z
lemma4 {y} z | case1 refl = double-neg (case1 ( subst (λ k → odef k x ) *iso (proj2 z)) )
lemma4 {y} z | case2 refl = double-neg (case2 ( subst (λ k → odef k x ) *iso (proj2 z)) )
lemma3 : (((u : Ordinal ) → ¬ odef (A , B) u ∧ odef (* u) x) → ⊥) → odef (A ∪ B) x
lemma3 not = ODC.double-neg-eilm O (FExists _ lemma4 not) -- choice
lemma2 : {x : Ordinal} → odef (A ∪ B) x → odef (Union (A , B)) x
lemma2 {x} (case1 A∋x) = subst (λ k → odef (Union (A , B)) k) &iso ( IsZF.union→ isZF (A , B) (* x) A
⟪ case1 refl , d→∋ A A∋x ⟫ )
lemma2 {x} (case2 B∋x) = subst (λ k → odef (Union (A , B)) k) &iso ( IsZF.union→ isZF (A , B) (* x) B
⟪ case2 refl , d→∋ B B∋x ⟫ )
∩-Select : { A B : HOD } → Select A ( λ x → ( A ∋ x ) ∧ ( B ∋ x ) ) ≡ ( A ∩ B )
∩-Select {A} {B} = ==→o≡ ( record { eq→ = lemma1 ; eq← = lemma2 } ) where
lemma1 : {x : Ordinal} → odef (Select A (λ x₁ → (A ∋ x₁) ∧ (B ∋ x₁))) x → odef (A ∩ B) x
lemma1 {x} lt = ⟪ proj1 lt , subst (λ k → odef B k ) &iso (proj2 (proj2 lt)) ⟫
lemma2 : {x : Ordinal} → odef (A ∩ B) x → odef (Select A (λ x₁ → (A ∋ x₁) ∧ (B ∋ x₁))) x
lemma2 {x} lt = ⟪ proj1 lt , ⟪ d→∋ A (proj1 lt) , d→∋ B (proj2 lt) ⟫ ⟫
dist-ord : {p q r : HOD } → p ∩ ( q ∪ r ) ≡ ( p ∩ q ) ∪ ( p ∩ r )
dist-ord {p} {q} {r} = ==→o≡ ( record { eq→ = lemma1 ; eq← = lemma2 } ) where
lemma1 : {x : Ordinal} → odef (p ∩ (q ∪ r)) x → odef ((p ∩ q) ∪ (p ∩ r)) x
lemma1 {x} lt with proj2 lt
lemma1 {x} lt | case1 q∋x = case1 ⟪ proj1 lt , q∋x ⟫
lemma1 {x} lt | case2 r∋x = case2 ⟪ proj1 lt , r∋x ⟫
lemma2 : {x : Ordinal} → odef ((p ∩ q) ∪ (p ∩ r)) x → odef (p ∩ (q ∪ r)) x
lemma2 {x} (case1 p∩q) = ⟪ proj1 p∩q , case1 (proj2 p∩q ) ⟫
lemma2 {x} (case2 p∩r) = ⟪ proj1 p∩r , case2 (proj2 p∩r ) ⟫
dist-ord2 : {p q r : HOD } → p ∪ ( q ∩ r ) ≡ ( p ∪ q ) ∩ ( p ∪ r )
dist-ord2 {p} {q} {r} = ==→o≡ ( record { eq→ = lemma1 ; eq← = lemma2 } ) where
lemma1 : {x : Ordinal} → odef (p ∪ (q ∩ r)) x → odef ((p ∪ q) ∩ (p ∪ r)) x
lemma1 {x} (case1 cp) = ⟪ case1 cp , case1 cp ⟫
lemma1 {x} (case2 cqr) = ⟪ case2 (proj1 cqr) , case2 (proj2 cqr) ⟫
lemma2 : {x : Ordinal} → odef ((p ∪ q) ∩ (p ∪ r)) x → odef (p ∪ (q ∩ r)) x
lemma2 {x} lt with proj1 lt | proj2 lt
lemma2 {x} lt | case1 cp | _ = case1 cp
lemma2 {x} lt | _ | case1 cp = case1 cp
lemma2 {x} lt | case2 cq | case2 cr = case2 ⟪ cq , cr ⟫
record IsBooleanAlgebra ( L : Set n)
( b1 : L )
( b0 : L )
( -_ : L → L )
( _+_ : L → L → L )
( _x_ : L → L → L ) : Set (suc n) where
field
+-assoc : {a b c : L } → a + ( b + c ) ≡ (a + b) + c
x-assoc : {a b c : L } → a x ( b x c ) ≡ (a x b) x c
+-sym : {a b : L } → a + b ≡ b + a
-sym : {a b : L } → a x b ≡ b x a
+-aab : {a b : L } → a + ( a x b ) ≡ a
x-aab : {a b : L } → a x ( a + b ) ≡ a
+-dist : {a b c : L } → a + ( b x c ) ≡ ( a x b ) + ( a x c )
x-dist : {a b c : L } → a x ( b + c ) ≡ ( a + b ) x ( a + c )
a+0 : {a : L } → a + b0 ≡ a
ax1 : {a : L } → a x b1 ≡ a
a+-a1 : {a : L } → a + ( - a ) ≡ b1
ax-a0 : {a : L } → a x ( - a ) ≡ b0
record BooleanAlgebra ( L : Set n) : Set (suc n) where
field
b1 : L
b0 : L
-_ : L → L
_+_ : L → L → L
_x_ : L → L → L
isBooleanAlgebra : IsBooleanAlgebra L b1 b0 -_ _+_ _x_
|
c
c this file contains the basic subroutines for
c forming and evaluating multipole expansions.
c
c remarks on scaling conventions.
c
c 1) far field and local expansions are consistently rscaled as
c
c
c m_n^m (scaled) = m_n^m / rscale^(n) so that upon evaluation
c
c the field is sum m_n^m (scaled) * rscale^(n) / r^{n+1}.
c
c l_n^m (scaled) = l_n^m * rscale^(n) so that upon evaluation
c
c the field is sum l_n^m (scaled) / rscale^(n) * r^{n}.
c
c
c 2) there are many definitions of the spherical harmonics,
c which differ in terms of normalization constants. we
c adopt the following convention:
c
c for m>0, we define y_n^m according to
c
c y_n^m = \sqrt{2n+1} \sqrt{\frac{ (n-m)!}{(n+m)!}} \cdot
c p_n^m(\cos \theta) e^{i m phi}
c and
c
c y_n^-m = dconjg( y_n^m )
c
c we omit the condon-shortley phase factor (-1)^m in the
c definition of y_n^m for m<0. (this is standard in several
c communities.)
c
c we also omit the factor \sqrt{\frac{1}{4 \pi}}, so that
c the y_n^m are orthogonal on the unit sphere but not
c orthonormal. (this is also standard in several communities.)
c more precisely,
c
c \int_s y_n^m y_n^m d\omega = 4 \pi.
c
c using our standard definition, the addition theorem takes
c the simple form
c
c 1/r =
c \sum_n 1/(2n+1) \sum_m |s|^n ylm*(s) ylm(t)/ (|t|^(n+1))
c
c 1/r =
c \sum_n \sum_m |s|^n ylm*(s) ylm(t) / (|t|^(n+1))
c ------- ------
c sqrt(2n+1) sqrt(2n+1)
c
c in the laplace library (this library), we incorporate the
c sqrt(2n+1) factor in both forming and evaluating multipole
c expansions.
c
c-----------------------------------------------------------------------
c
c l3dmpevalp: computes potentials due to a multipole expansion
c at a collection of targets (done,tested)
c
c l3dmpevalg: computes potentials and gradients
c due to a multipole expansion
c at a collection of targets (done,tested)
c
c l3dmpevalh: computes potentials, gradients, and hessians
c due to a multipole expansion
c at a collection of targets (done,tested)
c
c l3dformmpc: creates multipole expansion (outgoing) due to
c a collection of charges (done,tested)
c
c l3dformmpd: creates multipole expansion (outgoing) due to
c a collection of dipoles (done,tested)
c
c l3dformmpcd: creates multipole expansion (outgoing) due to
c a collection of charges and dipoles (done,tested)
c
c l3dtaevalp: computes potentials
c due to local expansion at a collection of targets
c
c l3dtaevalg: computes potentials and gradients
c due to local expansion at a collection of targets
c
c l3dtaevalh: computes potentials, gradients, and hessians
c due to local expansion at a collection of targets
c
c l3dtaevalhessdini: initialization routine for l3dtaevalh
c
c l3dformtac: creates local expansion due to
c a collection of charges.
c
c l3dformtad: creates local expansion due to
c a collection of dipoles
c
c l3dformtacd: creates local expansion due to
c a collection of charges and dipoles
c
c
c l3dmpevalhessdini: initialization routine for l3dmpevalhessd
c
c l3dmpevalh: computes potentials, gradients and Hessians
c due to a multipole expansion
c at a collection of targets (done,tested)
c
c**********************************************************************
subroutine l3dmpevalp(nd,rscale,center,mpole,nterms,
1 ztarg,ntarg,pot,wlege,nlege,thresh)
c**********************************************************************
c
c this subroutine evaluates the potentials due to an
c outgoing multipole expansion and increments inputs accordingly:
c
c pot = pot + sum sum mpole(n,m) Y_nm(theta,phi) / r^{n+1}
c n m
c
c
c-----------------------------------------------------------------------
c INPUT:
c
c nd : number of multipole expansions
c rscale : scaling parameter
c center : expansion center
c mpole : multipole expansion
c nterms : order of the multipole expansion
c ztarg : target locations
c ntarg : number of target locations
c wlege : precomputed array of scaling coeffs for Pnm
c nlege : dimension parameter for wlege
c thresh : threshold for computing outgoing expansion,
c potential at target location
c won't be updated if |t-c| <= thresh, where
c t is the target location and c is the expansion
c center location
c-----------------------------------------------------------------------
c OUTPUT:
c
c pot : updated potentials at all targets
c
c----------------------------------------------------------------------
implicit none
c
cc calling sequence variables
c
integer nterms,nlege,ntarg,nd
real *8 rscale,center(3),ztarg(3,ntarg)
real *8 pot(nd,ntarg)
complex *16 mpole(nd,0:nterms,-nterms:nterms)
real *8 wlege(0:nlege,0:nlege), thresh
c
cc temporary variables
c
integer idim
real *8, allocatable :: ynm(:,:),fr(:)
complex *16, allocatable :: ephi(:)
integer i,j,k,l,m,n,itarg
real *8 done,r,theta,phi,zdiff(3)
real *8 ctheta,stheta,cphi,sphi
real *8 d,rs,rtmp1,rtmp2
complex *16 ephi1
c
complex *16 eye
c
data eye/(0.0d0,1.0d0)/
c
done=1.0d0
allocate(ephi(0:nterms+1))
allocate(fr(0:nterms+1))
allocate(ynm(0:nterms,0:nterms))
do itarg=1,ntarg
zdiff(1)=ztarg(1,itarg)-center(1)
zdiff(2)=ztarg(2,itarg)-center(2)
zdiff(3)=ztarg(3,itarg)-center(3)
c
call cart2polar(zdiff,r,theta,phi)
if(abs(r).lt.thresh) goto 1000
ctheta = dcos(theta)
stheta = dsin(theta)
cphi = dcos(phi)
sphi = dsin(phi)
ephi1 = dcmplx(cphi,sphi)
c
c compute exp(eye*m*phi) array
c
ephi(0)=done
ephi(1)=ephi1
d = 1.0d0/r
fr(0) = d
d = d*rscale
fr(1) = fr(0)*d
do i=2,nterms+1
fr(i) = fr(i-1)*d
ephi(i)=ephi(i-1)*ephi1
enddo
c
c get the associated Legendre functions:
c
call ylgndrfw(nterms,ctheta,ynm,wlege,nlege)
do l = 0,nterms
rs = sqrt(1.0d0/(2*l+1))
do m=0,l
ynm(l,m) = ynm(l,m)*rs
enddo
enddo
do idim=1,nd
pot(idim,itarg) = pot(idim,itarg) +
1 real(mpole(idim,0,0))*fr(0)
enddo
do n=1,nterms
rtmp1 = fr(n)*ynm(n,0)
do idim=1,nd
pot(idim,itarg)=pot(idim,itarg)+real(mpole(idim,n,0))*rtmp1
enddo
do m=1,n
rtmp1 = fr(n)*ynm(n,m)
do idim=1,nd
rtmp2 = 2*real(mpole(idim,n,m)*ephi(m))
pot(idim,itarg)=pot(idim,itarg)+rtmp1*rtmp2
enddo
enddo
enddo
1000 continue
enddo
return
end
c
c
c
c**********************************************************************
subroutine l3dmpevalg(nd,rscale,center,mpole,nterms,
1 ztarg,ntarg,pot,grad,wlege,nlege,thresh)
c**********************************************************************
c
c
c this subroutine evaluates the potentials and gradients due to
c an outgoing multipole expansion and increments inputs accordingly:
c
c
c pot = pot + sum sum mpole(n,m) Y_nm(theta,phi) / r^{n+1}
c n m
c
c grad = grad + Gradient( sum sum mpole(n,m) Y_nm(theta,phi)/r^{n+1})
c n m
c
c-----------------------------------------------------------------------
c INPUT:
c
c nd : number of multipole expansions
c rscale : scaling parameter
c center : expansion center
c mpole : multipole expansion
c nterms : order of the multipole expansion
c ztarg : target location
c ntarg : number of target locations
c wlege : precomputed array of scaling coeffs for Pnm
c nlege : dimension parameter for wlege
c thresh : threshold for computing outgoing expansion,
c potential and gradient at target location
c won't be updated if |t-c| <= thresh, where
c t is the target location and c is the expansion
c center location
c-----------------------------------------------------------------------
c OUTPUT:
c
c pot : updated potentials at targets
c grad : updated gradients at targets
c
c----------------------------------------------------------------------
implicit none
c
cc calling sequence variables
c
integer nterms,nlege,ntarg,nd
real *8 rscale,center(3),ztarg(3,ntarg)
real *8 pot(nd,ntarg),grad(nd,3,ntarg)
complex *16 mpole(nd,0:nterms,-nterms:nterms)
real *8 wlege(0:nlege,0:nlege), thresh
c
cc temporary variables
c
integer idim
real *8, allocatable :: ynm(:,:),ynmd(:,:),fr(:),frder(:)
complex *16, allocatable :: ephi(:)
integer i,j,k,l,m,n,itarg
real *8 done,r,theta,phi,zdiff(3)
real *8 ctheta,stheta,cphi,sphi
real *8 d,rx,ry,rz,thetax,thetay,thetaz,phix,phiy,phiz,rs
real *8 rtmp1,rtmp2,rtmp3,rtmp4,rtmp5,rtmp6
complex *16 ephi1
real *8 ur(nd),utheta(nd),uphi(nd)
c
complex *16 eye
c
data eye/(0.0d0,1.0d0)/
c
done=1.0d0
allocate(ephi(0:nterms+1))
allocate(fr(0:nterms+1),frder(0:nterms))
allocate(ynm(0:nterms,0:nterms))
allocate(ynmd(0:nterms,0:nterms))
do itarg=1,ntarg
zdiff(1)=ztarg(1,itarg)-center(1)
zdiff(2)=ztarg(2,itarg)-center(2)
zdiff(3)=ztarg(3,itarg)-center(3)
c
call cart2polar(zdiff,r,theta,phi)
if(abs(r).lt.thresh) goto 1000
ctheta = dcos(theta)
stheta = dsin(theta)
cphi = dcos(phi)
sphi = dsin(phi)
ephi1 = dcmplx(cphi,sphi)
c
c compute exp(eye*m*phi) array
c
ephi(0)=done
ephi(1)=ephi1
cphi = dreal(ephi1)
sphi = dimag(ephi1)
d = 1.0d0/r
fr(0) = d
d = d*rscale
fr(1) = fr(0)*d
do i=2,nterms+1
fr(i) = fr(i-1)*d
ephi(i)=ephi(i-1)*ephi1
enddo
do i=0,nterms
frder(i) = -(i+1.0d0)*fr(i+1)/rscale
enddo
c
c get the associated Legendre functions:
c
call ylgndr2sfw(nterms,ctheta,ynm,ynmd,wlege,nlege)
do l = 0,nterms
rs = sqrt(1.0d0/(2*l+1))
do m=0,l
ynm(l,m) = ynm(l,m)*rs
ynmd(l,m) = ynmd(l,m)*rs
enddo
enddo
c
c compute coefficients in change of variables from spherical
c to Cartesian gradients. In phix, phiy, we leave out the
c 1/sin(theta) contribution, since we use values of Ynm (which
c multiplies phix and phiy) that are scaled by
c 1/sin(theta).
c
rx = stheta*cphi
thetax = ctheta*cphi/r
phix = -sphi/r
ry = stheta*sphi
thetay = ctheta*sphi/r
phiy = cphi/r
rz = ctheta
thetaz = -stheta/r
phiz = 0.0d0
do idim=1,nd
ur(idim) = real(mpole(idim,0,0))*frder(0)
utheta(idim) = 0.0d0
uphi(idim) = 0.0d0
pot(idim,itarg) = pot(idim,itarg) +
1 real(mpole(idim,0,0))*fr(0)
enddo
do n=1,nterms
rtmp1 = fr(n)*ynm(n,0)
rtmp2 = frder(n)*ynm(n,0)
rtmp3 = -fr(n)*ynmd(n,0)*stheta
do idim=1,nd
pot(idim,itarg)=pot(idim,itarg)+real(mpole(idim,n,0))*rtmp1
ur(idim)=ur(idim)+real(mpole(idim,n,0))*rtmp2
utheta(idim)=utheta(idim)+real(mpole(idim,n,0))*rtmp3
enddo
do m=1,n
rtmp1 = fr(n)*ynm(n,m)*stheta
rtmp4 = frder(n)*ynm(n,m)*stheta
rtmp5 = -fr(n)*ynmd(n,m)
rtmp6 = -m*fr(n)*ynm(n,m)
do idim=1,nd
rtmp2 = 2*real(mpole(idim,n,m)*ephi(m))
pot(idim,itarg)=pot(idim,itarg)+rtmp1*rtmp2
ur(idim) = ur(idim) + rtmp4*rtmp2
utheta(idim) = utheta(idim)+rtmp5*rtmp2
rtmp2 = 2*imag(mpole(idim,n,m)*ephi(m))
uphi(idim) = uphi(idim) + rtmp6*rtmp2
enddo
enddo
enddo
do idim=1,nd
grad(idim,1,itarg)=grad(idim,1,itarg)+ur(idim)*rx+
1 utheta(idim)*thetax+uphi(idim)*phix
grad(idim,2,itarg)=grad(idim,2,itarg)+ur(idim)*ry+
1 utheta(idim)*thetay+uphi(idim)*phiy
grad(idim,3,itarg)=grad(idim,3,itarg)+ur(idim)*rz+
1 utheta(idim)*thetaz+uphi(idim)*phiz
enddo
1000 continue
enddo
return
end
c
c
c
c
c
c
c
C***********************************************************************
subroutine l3dformmpc(nd,rscale,sources,charge,ns,center,
1 nterms,mpole,wlege,nlege)
C***********************************************************************
C
C Constructs multipole expansion about CENTER due to NS charges
C located at SOURCES(3,*) and add to existing expansions
C
c-----------------------------------------------------------------------
C INPUT:
c
c nd : number of multipole expansions
C rscale : the scaling factor.
C sources(3,ns) : coordinates of sources
C charge(nd,ns) : charge strengths
C ns : number of sources
C center(3) : epxansion center
C nterms : order of multipole expansion
C wlege : precomputed array of scaling coeffs for pnm
C nlege : dimension parameter for wlege
c-----------------------------------------------------------------------
C OUTPUT:
C
c mpole : coeffs of the multipole expansion
c-----------------------------------------------------------------------
implicit none
c
cc calling sequence variables
c
integer nterms,ns,nd, nlege
real *8 center(3),sources(3,ns)
real *8 wlege(0:nlege,0:nlege)
real *8 rscale
complex *16 mpole(nd,0:nterms,-nterms:nterms)
real *8 charge(nd,ns)
c
cc temporary variables
c
integer i,j,k,l,m,n,isrc,idim
real *8 zdiff(3)
real *8, allocatable :: ynm(:,:),fr(:),rfac(:)
real *8 theta,stheta,ctheta,phi,sphi,cphi,dtmp,d,r
complex *16, allocatable :: ephi(:)
complex *16 ephi1
complex *16 eye
data eye/(0.0d0,1.0d0)/
allocate(ynm(0:nterms,0:nterms),fr(0:nterms+1))
allocate(ephi(-nterms-1:nterms+1))
allocate(rfac(0:nterms))
do i=0,nterms
rfac(i) = 1/sqrt(2.0d0*i + 1.0d0)
enddo
do isrc = 1,ns
zdiff(1)=sources(1,isrc)-center(1)
zdiff(2)=sources(2,isrc)-center(2)
zdiff(3)=sources(3,isrc)-center(3)
c
call cart2polar(zdiff,r,theta,phi)
ctheta = dcos(theta)
stheta = dsin(theta)
cphi = dcos(phi)
sphi = dsin(phi)
ephi1 = dcmplx(cphi,sphi)
c
c compute exp(eye*m*phi) array and fr array
c
ephi(0)=1.0d0
ephi(1)=ephi1
ephi(-1)=dconjg(ephi1)
fr(0) = 1.0d0
d = r/rscale
fr(1) = d
do i=2,nterms+1
fr(i) = fr(i-1)*d
ephi(i)=ephi(i-1)*ephi1
ephi(-i)=ephi(-i+1)*ephi(-1)
enddo
c
c get the associated Legendre functions and rescale
c by 1/sqrt(2*l+1)
c
call ylgndrfw(nterms,ctheta,ynm,wlege,nlege)
do i=0,nterms
do j=0,nterms
ynm(j,i) = ynm(j,i)*rfac(j)
enddo
enddo
c
c
c Compute contribution to mpole coefficients.
c
c Recall that there are multiple definitions of scaling for
c Ylm. Using our standard definition,
c the addition theorem takes the simple form
c
c 1/r =
c \sum_n 1/(2n+1) \sum_m |S|^n Ylm*(S) Ylm(T) / (|T|)^{n+1}
c
c so contribution is |S|^n times
c
c Ylm*(S) = P_l,m * dconjg(ephi(m)) for m > 0
c Yl,m*(S) = P_l,|m| * dconjg(ephi(m)) for m < 0
c
c where P_l,m is the scaled associated Legendre function.
c
c
do idim=1,nd
mpole(idim,0,0)= mpole(idim,0,0) + fr(0)*charge(idim,isrc)
enddo
do n=1,nterms
dtmp=ynm(n,0)*fr(n)
do idim=1,nd
mpole(idim,n,0)= mpole(idim,n,0) + dtmp*charge(idim,isrc)
enddo
do m=1,n
dtmp=ynm(n,m)*fr(n)
do idim=1,nd
mpole(idim,n,m) = mpole(idim,n,m) +
1 dtmp*ephi(-m)*charge(idim,isrc)
mpole(idim,n,-m) = mpole(idim,n,-m) +
1 dtmp*ephi(m)*charge(idim,isrc)
enddo
enddo
enddo
enddo
c
c
return
end
c
c
c
c
c
c
c
C***********************************************************************
subroutine l3dformmpd(nd,rscale,sources,dipvec,ns,center,
1 nterms,mpole,wlege,nlege)
C***********************************************************************
C
C Constructs multipole expansion about CENTER due to NS dipoles
C located at SOURCES(3,*) and adds to existing expansion
C
c-----------------------------------------------------------------------
C INPUT:
c
c nd : number of multipole expansions
C rscale : the scaling factor.
C sources(3,ns) : coordinates of sources
C dipvec(nd,3,ns) : dipole orientiation vectors
C ns : number of sources
C center(3) : epxansion center
C nterms : order of multipole expansion
C wlege : precomputed array of scaling coeffs for pnm
C nlege : dimension parameter for wlege
c-----------------------------------------------------------------------
C OUTPUT:
C
c mpole : coeffs of the multipole expansion
c-----------------------------------------------------------------------
implicit none
c
cc calling sequence variables
c
integer nterms,ns,nd, nlege
real *8 center(3),sources(3,ns)
real *8 wlege(0:nlege,0:nlege)
real *8 rscale
complex *16 mpole(nd,0:nterms,-nterms:nterms)
real *8 dipvec(nd,3,ns)
c
cc temporary variables
c
integer i,j,k,l,m,n,isrc,idim
real *8 zdiff(3)
real *8, allocatable :: ynm(:,:),fr(:),rfac(:),frder(:),ynmd(:,:)
real *8 thetaz,thetay,thetax, theta
real *8 stheta,sphi,rx,ry,rz,r
real *8 ctheta,cphi
real *8 phix,phiy,phiz,phi,fruse,d
complex *16 ur,utheta,uphi,ux,uy,uz,zzz
complex *16, allocatable :: ephi(:)
complex *16 eye,ephi1
data eye/(0.0d0,1.0d0)/
allocate(ynm(0:nterms,0:nterms),fr(0:nterms+1))
allocate(frder(0:nterms),ynmd(0:nterms,0:nterms))
allocate(ephi(-nterms-1:nterms+1))
allocate(rfac(0:nterms))
do i=0,nterms
rfac(i) = 1/sqrt(2.0d0*i + 1.0d0)
enddo
do isrc = 1,ns
zdiff(1)=sources(1,isrc)-center(1)
zdiff(2)=sources(2,isrc)-center(2)
zdiff(3)=sources(3,isrc)-center(3)
c
call cart2polar(zdiff,r,theta,phi)
ctheta = dcos(theta)
stheta = dsin(theta)
cphi = dcos(phi)
sphi = dsin(phi)
ephi1 = dcmplx(cphi,sphi)
c
c compute exp(eye*m*phi) array and fr array
c
ephi(0)=1.0d0
ephi(1)=ephi1
ephi(-1)=dconjg(ephi1)
fr(0) = 1.0d0
d = r/rscale
fr(1) = d
do i=2,nterms+1
fr(i) = fr(i-1)*d
ephi(i)=ephi(i-1)*ephi1
ephi(-i)=ephi(-i+1)*ephi(-1)
enddo
frder(0) = 0.0d0
do i=1,nterms
frder(i) = i*fr(i-1)/rscale
enddo
c
c compute coefficients in change of variables from spherical
c to Cartesian gradients. In phix, phiy, we leave out the
c 1/sin(theta) contribution, since we use values of Ynm (which
c multiplies phix and phiy) that are scaled by
c 1/sin(theta).
c
c In thetax, thetaty, phix, phiy we leave out the 1/r factors in the
c change of variables to avoid blow-up at the origin.
c For the n=0 mode, it is not relevant. For n>0 modes,
c the variable fruse is set to fr(n)/r:
c
c
c
rx = stheta*cphi
thetax = ctheta*cphi
phix = -sphi
ry = stheta*sphi
thetay = ctheta*sphi
phiy = cphi
rz = ctheta
thetaz = -stheta
phiz = 0.0d0
c
c get the associated Legendre functions and rescale by
c 1/sqrt(2*l+1)
c
call ylgndr2sfw(nterms,ctheta,ynm,ynmd,wlege,nlege)
do i=0,nterms
do j=0,nterms
ynm(j,i) = ynm(j,i)*rfac(j)
ynmd(j,i) = ynmd(j,i)*rfac(j)
enddo
enddo
c
c
c Compute contribution to mpole coefficients.
c
c Recall that there are multiple definitions of scaling for
c Ylm. Using our standard definition,
c the addition theorem takes the simple form
c
c 1/r =
c \sum_n 1/(2n+1) \sum_m |S|^n Ylm*(S) Ylm(T)/ (|T|^(n+1))
c
c so contribution is |S|^n times
c
c Ylm*(S) = P_l,m * dconjg(ephi(m)) for m > 0
c Yl,m*(S) = P_l,|m| * dconjg(ephi(m)) for m < 0
c
c where P_l,m is the scaled associated Legendre function.
c
c
ur = ynm(0,0)*frder(0)
ux = ur*rx
uy = ur*ry
uz = ur*rz
do idim=1,nd
zzz = dipvec(idim,1,isrc)*ux + dipvec(idim,2,isrc)*uy +
1 dipvec(idim,3,isrc)*uz
mpole(idim,0,0)= mpole(idim,0,0) + zzz
enddo
do n=1,nterms
fruse = fr(n-1)/rscale
ur = ynm(n,0)*frder(n)
utheta = -fruse*ynmd(n,0)*stheta
ux = ur*rx + utheta*thetax
uy = ur*ry + utheta*thetay
uz = ur*rz + utheta*thetaz
do idim=1,nd
zzz = dipvec(idim,1,isrc)*ux + dipvec(idim,2,isrc)*uy +
1 dipvec(idim,3,isrc)*uz
mpole(idim,n,0)= mpole(idim,n,0) + zzz
enddo
do m=1,n
ur = frder(n)*ynm(n,m)*stheta*ephi(-m)
utheta = -ephi(-m)*fruse*ynmd(n,m)
uphi = -eye*m*ephi(-m)*fruse*ynm(n,m)
ux = ur*rx + utheta*thetax + uphi*phix
uy = ur*ry + utheta*thetay + uphi*phiy
uz = ur*rz + utheta*thetaz + uphi*phiz
do idim=1,nd
zzz = dipvec(idim,1,isrc)*ux + dipvec(idim,2,isrc)*uy +
1 dipvec(idim,3,isrc)*uz
mpole(idim,n,m)= mpole(idim,n,m) + zzz
enddo
c
ur = frder(n)*ynm(n,m)*stheta*ephi(m)
utheta = -ephi(m)*fruse*ynmd(n,m)
uphi = eye*m*ephi(m)*fruse*ynm(n,m)
ux = ur*rx + utheta*thetax + uphi*phix
uy = ur*ry + utheta*thetay + uphi*phiy
uz = ur*rz + utheta*thetaz + uphi*phiz
do idim=1,nd
zzz = dipvec(idim,1,isrc)*ux + dipvec(idim,2,isrc)*uy +
1 dipvec(idim,3,isrc)*uz
mpole(idim,n,-m)= mpole(idim,n,-m) + zzz
enddo
enddo
enddo
enddo
c
return
end
c
c
c
c
c
c
c
C***********************************************************************
subroutine l3dformmpcd(nd,rscale,sources,charge,dipvec,ns,
1 center,nterms,mpole,wlege,nlege)
C***********************************************************************
C
C Constructs multipole expansion about CENTER due to NS charges+dipoles
C located at SOURCES(3,*) and adds to existing expansion
C
c-----------------------------------------------------------------------
C INPUT:
c
c nd : number of multipole expansions
C rscale : the scaling factor.
C sources(3,ns) : coordinates of sources
C charge(nd,ns) : charge strengths
C dipvec(nd,3,ns) : dipole orientiation vectors
C ns : number of sources
C center(3) : epxansion center
C nterms : order of multipole expansion
C wlege : precomputed array of scaling coeffs for pnm
C nlege : dimension parameter for wlege
c-----------------------------------------------------------------------
C OUTPUT:
C
c mpole : coeffs of the multipole expansion
c-----------------------------------------------------------------------
implicit none
c
cc calling sequence variables
c
integer nterms,ns,nd, nlege
real *8 center(3),sources(3,ns)
real *8 wlege(0:nlege,0:nlege)
real *8 rscale
complex *16 mpole(nd,0:nterms,-nterms:nterms)
real *8 charge(nd,ns)
real *8 dipvec(nd,3,ns)
c
cc temporary variables
c
integer i,j,k,l,m,n,isrc,idim
real *8 zdiff(3)
real *8, allocatable :: ynm(:,:),fr(:),rfac(:),frder(:),ynmd(:,:)
real *8 thetaz,thetay,thetax, theta
real *8 stheta,sphi,rx,ry,rz,r
real *8 ctheta,cphi
real *8 phix,phiy,phiz,phi,fruse,d,dtmp
complex *16 ur,utheta,uphi,ux,uy,uz,zzz
complex *16, allocatable :: ephi(:)
complex *16 eye,ephi1
data eye/(0.0d0,1.0d0)/
allocate(ynm(0:nterms,0:nterms),fr(0:nterms+1))
allocate(frder(0:nterms),ynmd(0:nterms,0:nterms))
allocate(ephi(-nterms-1:nterms+1))
allocate(rfac(0:nterms))
do i=0,nterms
rfac(i) = 1/sqrt(2.0d0*i + 1.0d0)
enddo
do isrc = 1,ns
zdiff(1)=sources(1,isrc)-center(1)
zdiff(2)=sources(2,isrc)-center(2)
zdiff(3)=sources(3,isrc)-center(3)
c
call cart2polar(zdiff,r,theta,phi)
ctheta = dcos(theta)
stheta = dsin(theta)
cphi = dcos(phi)
sphi = dsin(phi)
ephi1 = dcmplx(cphi,sphi)
c
c compute exp(eye*m*phi) array and fr array
c
ephi(0)=1.0d0
ephi(1)=ephi1
ephi(-1)=dconjg(ephi1)
fr(0) = 1.0d0
d = r/rscale
fr(1) = d
do i=2,nterms+1
fr(i) = fr(i-1)*d
ephi(i)=ephi(i-1)*ephi1
ephi(-i)=ephi(-i+1)*ephi(-1)
enddo
frder(0) = 0.0d0
do i=1,nterms
frder(i) = i*fr(i-1)/rscale
enddo
c
c compute coefficients in change of variables from spherical
c to Cartesian gradients. In phix, phiy, we leave out the
c 1/sin(theta) contribution, since we use values of Ynm (which
c multiplies phix and phiy) that are scaled by
c 1/sin(theta).
c
c In thetax, thetaty, phix, phiy we leave out the 1/r factors in the
c change of variables to avoid blow-up at the origin.
c For the n=0 mode, it is not relevant. For n>0 modes,
c the variable fruse is set to fr(n)/r:
c
c
c
rx = stheta*cphi
thetax = ctheta*cphi
phix = -sphi
ry = stheta*sphi
thetay = ctheta*sphi
phiy = cphi
rz = ctheta
thetaz = -stheta
phiz = 0.0d0
c
c get the associated Legendre functions and rescale by
c 1/sqrt(2*l+1)
c
call ylgndr2sfw(nterms,ctheta,ynm,ynmd,wlege,nlege)
do i=0,nterms
do j=0,nterms
ynm(j,i) = ynm(j,i)*rfac(j)
ynmd(j,i) = ynmd(j,i)*rfac(j)
enddo
enddo
c
c
c Compute contribution to mpole coefficients.
c
c Recall that there are multiple definitions of scaling for
c Ylm. Using our standard definition,
c the addition theorem takes the simple form
c
c 1/r =
c \sum_n 1/(2n+1) \sum_m |S|^n Ylm*(S) Ylm(T)/ (|T|^(n+1))
c
c so contribution is |S|^n times
c
c Ylm*(S) = P_l,m * dconjg(ephi(m)) for m > 0
c Yl,m*(S) = P_l,|m| * dconjg(ephi(m)) for m < 0
c
c where P_l,m is the scaled associated Legendre function.
c
c
ur = ynm(0,0)*frder(0)
ux = ur*rx
uy = ur*ry
uz = ur*rz
do idim=1,nd
zzz = dipvec(idim,1,isrc)*ux + dipvec(idim,2,isrc)*uy +
1 dipvec(idim,3,isrc)*uz
mpole(idim,0,0)= mpole(idim,0,0) + zzz +
1 fr(0)*charge(idim,isrc)
enddo
do n=1,nterms
fruse = fr(n-1)/rscale
ur = ynm(n,0)*frder(n)
utheta = -fruse*ynmd(n,0)*stheta
ux = ur*rx + utheta*thetax
uy = ur*ry + utheta*thetay
uz = ur*rz + utheta*thetaz
dtmp = fr(n)*ynm(n,0)
do idim=1,nd
zzz = dipvec(idim,1,isrc)*ux + dipvec(idim,2,isrc)*uy +
1 dipvec(idim,3,isrc)*uz
mpole(idim,n,0)= mpole(idim,n,0) + zzz +
1 charge(idim,isrc)*dtmp
enddo
do m=1,n
ur = frder(n)*ynm(n,m)*stheta*ephi(-m)
utheta = -ephi(-m)*fruse*ynmd(n,m)
uphi = -eye*m*ephi(-m)*fruse*ynm(n,m)
ux = ur*rx + utheta*thetax + uphi*phix
uy = ur*ry + utheta*thetay + uphi*phiy
uz = ur*rz + utheta*thetaz + uphi*phiz
dtmp = ynm(n,m)*fr(n)*stheta
do idim=1,nd
zzz = dipvec(idim,1,isrc)*ux + dipvec(idim,2,isrc)*uy +
1 dipvec(idim,3,isrc)*uz
mpole(idim,n,m)= mpole(idim,n,m) + zzz +
1 charge(idim,isrc)*dtmp*ephi(-m)
enddo
c
ur = frder(n)*ynm(n,m)*stheta*ephi(m)
utheta = -ephi(m)*fruse*ynmd(n,m)
uphi = eye*m*ephi(m)*fruse*ynm(n,m)
ux = ur*rx + utheta*thetax + uphi*phix
uy = ur*ry + utheta*thetay + uphi*phiy
uz = ur*rz + utheta*thetaz + uphi*phiz
do idim=1,nd
zzz = dipvec(idim,1,isrc)*ux + dipvec(idim,2,isrc)*uy +
1 dipvec(idim,3,isrc)*uz
mpole(idim,n,-m)= mpole(idim,n,-m)+zzz+
1 charge(idim,isrc)*dtmp*ephi(m)
enddo
enddo
enddo
enddo
c
return
end
c
c
c
c
c
c
c**********************************************************************
subroutine l3dtaevalp(nd,rscale,center,mpole,nterms,
1 ztarg,ntarg,pot,wlege,nlege)
c**********************************************************************
c
c
c this subroutine evaluates the potentials due to an
c incoming local expansion and increments accordingly:
c
c pot = pot + sum sum r^{n} mpole(n,m) Y_nm(theta,phi) /sqrt(2n+1)
c n m
c
c-----------------------------------------------------------------------
c INPUT:
c
c nd : number of multipole expansions
c rscale : scaling parameter
c center : expansion center
c mpole : local expansion
c nterms : order of the multipole expansion
c ztarg : target location
c ntarg : number of target locations
c wlege : precomputed array of scaling coeffs for Pnm
c nlege : dimension parameter for wlege
c-----------------------------------------------------------------------
c OUTPUT:
c
c pot : updated potentials at targets
c
c----------------------------------------------------------------------
implicit none
c
cc calling sequence variables
c
integer nterms,nlege,ntarg,nd
real *8 rscale,center(3),ztarg(3,ntarg)
real *8 pot(nd,ntarg)
complex *16 mpole(nd,0:nterms,-nterms:nterms)
real *8 wlege(0:nlege,0:nlege), thresh
c
cc temporary variables
c
integer idim
real *8, allocatable :: ynm(:,:),fr(:)
complex *16, allocatable :: ephi(:)
integer i,j,k,l,m,n,itarg
real *8 done,r,theta,phi,zdiff(3)
real *8 ctheta,stheta,cphi,sphi
real *8 d,rs,rtmp1,rtmp2
complex *16 ephi1
c
complex *16 eye
c
data eye/(0.0d0,1.0d0)/
c
done=1.0d0
allocate(ephi(0:nterms+1))
allocate(fr(0:nterms+1))
allocate(ynm(0:nterms,0:nterms))
do itarg=1,ntarg
zdiff(1)=ztarg(1,itarg)-center(1)
zdiff(2)=ztarg(2,itarg)-center(2)
zdiff(3)=ztarg(3,itarg)-center(3)
c
call cart2polar(zdiff,r,theta,phi)
ctheta = dcos(theta)
stheta = dsin(theta)
cphi = dcos(phi)
sphi = dsin(phi)
ephi1 = dcmplx(cphi,sphi)
c
c compute exp(eye*m*phi) array
c
ephi(0)=done
ephi(1)=ephi1
cphi = dreal(ephi1)
sphi = dimag(ephi1)
fr(0) = 1.0d0
d = r/rscale
fr(1) = fr(0)*d
do i=2,nterms+1
fr(i) = fr(i-1)*d
ephi(i)=ephi(i-1)*ephi1
enddo
c
c get the associated Legendre functions:
c
call ylgndrfw(nterms,ctheta,ynm,wlege,nlege)
do l = 0,nterms
rs = sqrt(1.0d0/(2*l+1))
do m=0,l
ynm(l,m) = ynm(l,m)*rs
enddo
enddo
do idim=1,nd
pot(idim,itarg) = pot(idim,itarg) +
1 real(mpole(idim,0,0))*fr(0)
enddo
do n=1,nterms
rtmp1 = fr(n)*ynm(n,0)
do idim=1,nd
pot(idim,itarg)=pot(idim,itarg)+real(mpole(idim,n,0))*rtmp1
enddo
do m=1,n
rtmp1 = fr(n)*ynm(n,m)
do idim=1,nd
rtmp2 = 2*real(mpole(idim,n,m)*ephi(m))
pot(idim,itarg)=pot(idim,itarg)+rtmp1*rtmp2
enddo
enddo
enddo
enddo
return
end
c
c
c
c**********************************************************************
subroutine l3dtaevalg(nd,rscale,center,mpole,nterms,
1 ztarg,ntarg,pot,grad,wlege,nlege)
c**********************************************************************
c
c
c this subroutine evaluates the potentials and gradients due to
c an incoming local expansion and increments inputs accordingly:
c
c pot = pot + sum sum mpole(n,m) r^{n}Y_nm(theta,phi) / sqrt(2n+1)
c n m
c
c grad = grad +
c Gradient( sum sum mpole(n,m)r^{n}Y_nm(theta,phi)/sqrt(2n+1))
c n m
c-----------------------------------------------------------------------
c INPUT:
c
c nd : number of multipole expansions
c rscale : scaling parameter
c center : expansion center
c mpole : local expansion
c nterms : order of the multipole expansion
c ztarg : target location
c ntarg : number of target locations
c wlege : precomputed array of scaling coeffs for Pnm
c nlege : dimension parameter for wlege
c-----------------------------------------------------------------------
c OUTPUT:
c
c pot : updated potentials at targets
c grad : updated gradients at targets
c----------------------------------------------------------------------
implicit none
c
cc calling sequence variables
c
integer nterms,nlege,ntarg,nd
real *8 rscale,center(3),ztarg(3,ntarg)
real *8 pot(nd,ntarg),grad(nd,3,ntarg)
complex *16 mpole(nd,0:nterms,-nterms:nterms)
real *8 wlege(0:nlege,0:nlege)
c
cc temporary variables
c
integer idim
real *8, allocatable :: ynm(:,:),ynmd(:,:),fr(:),frder(:)
complex *16, allocatable :: ephi(:)
integer i,j,k,l,m,n,itarg
real *8 done,r,theta,phi,zdiff(3)
real *8 ctheta,stheta,cphi,sphi
real *8 d,rx,ry,rz,thetax,thetay,thetaz,phix,phiy,phiz,rs
real *8 rtmp1,rtmp2,rtmp3,rtmp4,rtmp5,rtmp6
complex *16 ephi1
real *8 ur(nd),utheta(nd),uphi(nd)
c
complex *16 eye
complex *16 ztmp1,ztmp2,ztmp3,ztmpsum,z
real *8 rscaleinv
c
data eye/(0.0d0,1.0d0)/
c
done=1.0d0
allocate(ephi(0:nterms+1))
allocate(fr(0:nterms+1),frder(0:nterms))
allocate(ynm(0:nterms,0:nterms))
allocate(ynmd(0:nterms,0:nterms))
c
do itarg=1,ntarg
zdiff(1)=ztarg(1,itarg)-center(1)
zdiff(2)=ztarg(2,itarg)-center(2)
zdiff(3)=ztarg(3,itarg)-center(3)
c
call cart2polar(zdiff,r,theta,phi)
ctheta = dcos(theta)
stheta = dsin(theta)
cphi = dcos(phi)
sphi = dsin(phi)
ephi1 = dcmplx(cphi,sphi)
c
c compute exp(eye*m*phi) array
c
ephi(0)=done
ephi(1)=ephi1
d = r/rscale
fr(0) = 1.0d0
fr(1) = fr(0)*d
do i=2,nterms+1
fr(i) = fr(i-1)*d
ephi(i)=ephi(i-1)*ephi1
enddo
frder(0) = 0
do i=1,nterms
frder(i) = i*fr(i-1)/rscale
enddo
c
c get the associated Legendre functions:
c
call ylgndr2sfw(nterms,ctheta,ynm,ynmd,wlege,nlege)
do l = 0,nterms
rs = sqrt(1.0d0/(2*l+1))
do m=0,l
ynm(l,m) = ynm(l,m)*rs
ynmd(l,m) = ynmd(l,m)*rs
enddo
enddo
c
c compute coefficients in change of variables from spherical
c to Cartesian gradients. In phix, phiy, we leave out the
c 1/sin(theta) contribution, since we use values of Ynm (which
c multiplies phix and phiy) that are scaled by
c 1/sin(theta).
c
c
c NOTE: sphereical derivative needs to be fixed for r=0
c
rscaleinv = 1.0d0/rscale
rx = stheta*cphi
thetax = ctheta*cphi*rscaleinv
phix = -sphi*rscaleinv
ry = stheta*sphi
thetay = ctheta*sphi*rscaleinv
phiy = cphi*rscaleinv
rz = ctheta
thetaz = -stheta*rscaleinv
phiz = 0.0d0
c
do idim=1,nd
ur(idim) = real(mpole(idim,0,0))*frder(0)
utheta(idim) = 0.0d0
uphi(idim) = 0.0d0
pot(idim,itarg) = pot(idim,itarg)+real(mpole(idim,0,0))*fr(0)
enddo
c
do n=1,nterms
rtmp1 = fr(n)*ynm(n,0)
rtmp2 = frder(n)*ynm(n,0)
rtmp3 = -fr(n-1)*ynmd(n,0)*stheta
do idim=1,nd
pot(idim,itarg)=pot(idim,itarg)+real(mpole(idim,n,0))*rtmp1
ur(idim)=ur(idim)+real(mpole(idim,n,0))*rtmp2
utheta(idim)=utheta(idim)+real(mpole(idim,n,0))*rtmp3
enddo
c
do m=1,n
rtmp1 = fr(n)*ynm(n,m)*stheta
rtmp4 = frder(n)*ynm(n,m)*stheta
rtmp5 = -fr(n-1)*ynmd(n,m)
rtmp6 = -m*fr(n-1)*ynm(n,m)
do idim=1,nd
rtmp2 = 2*real(mpole(idim,n,m)*ephi(m))
pot(idim,itarg)=pot(idim,itarg)+rtmp1*rtmp2
ur(idim) = ur(idim) + rtmp4*rtmp2
utheta(idim) = utheta(idim)+rtmp5*rtmp2
rtmp2 = 2*imag(mpole(idim,n,m)*ephi(m))
uphi(idim) = uphi(idim) + rtmp6*rtmp2
enddo
enddo
enddo
c
do idim=1,nd
grad(idim,1,itarg)=grad(idim,1,itarg)+ur(idim)*rx+
1 utheta(idim)*thetax+uphi(idim)*phix
grad(idim,2,itarg)=grad(idim,2,itarg)+ur(idim)*ry+
1 utheta(idim)*thetay+uphi(idim)*phiy
grad(idim,3,itarg)=grad(idim,3,itarg)+ur(idim)*rz+
1 utheta(idim)*thetaz+uphi(idim)*phiz
enddo
1000 continue
enddo
return
end
c
c
c
c
c
c
c
C***********************************************************************
subroutine l3dformtac(nd,rscale,sources,charge,ns,center,
1 nterms,mpole,wlege,nlege)
C***********************************************************************
C
C Constructs local expansion about CENTER due to NS charges
C located at SOURCES(3,*) and add to existing expansions
C
c-----------------------------------------------------------------------
C INPUT:
c
c nd : number of multipole expansions
C rscale : the scaling factor.
C sources(3,ns) : coordinates of sources
C charge(nd,ns) : charge strengths
C ns : number of sources
C center(3) : epxansion center
C nterms : order of multipole expansion
C wlege : precomputed array of scaling coeffs for pnm
C nlege : dimension parameter for wlege
c-----------------------------------------------------------------------
C OUTPUT:
C
c mpole : coeffs of the local expansion
c-----------------------------------------------------------------------
implicit none
c
cc calling sequence variables
c
integer nterms,ns,nd, nlege
real *8 center(3),sources(3,ns)
real *8 wlege(0:nlege,0:nlege)
real *8 rscale
complex *16 mpole(nd,0:nterms,-nterms:nterms)
real *8 charge(nd,ns)
c
cc temporary variables
c
integer i,j,k,l,m,n,isrc,idim
real *8 zdiff(3)
real *8, allocatable :: ynm(:,:),fr(:),rfac(:)
real *8 theta,stheta,ctheta,phi,sphi,cphi,dtmp,d,r
complex *16, allocatable :: ephi(:)
complex *16 ephi1
complex *16 eye
data eye/(0.0d0,1.0d0)/
allocate(ynm(0:nterms,0:nterms),fr(0:nterms+1))
allocate(ephi(-nterms-1:nterms+1))
allocate(rfac(0:nterms))
do i=0,nterms
rfac(i) = 1/sqrt(2.0d0*i + 1.0d0)
enddo
do isrc = 1,ns
zdiff(1)=sources(1,isrc)-center(1)
zdiff(2)=sources(2,isrc)-center(2)
zdiff(3)=sources(3,isrc)-center(3)
c
call cart2polar(zdiff,r,theta,phi)
ctheta = dcos(theta)
stheta = dsin(theta)
cphi = dcos(phi)
sphi = dsin(phi)
ephi1 = dcmplx(cphi,sphi)
c
c compute exp(eye*m*phi) array and fr array
c
ephi(0)=1.0d0
ephi(1)=ephi1
ephi(-1)=dconjg(ephi1)
d = 1.0d0/r
fr(0) = d
d = d*rscale
fr(1) = fr(0)*d
do i=2,nterms+1
fr(i) = fr(i-1)*d
ephi(i)=ephi(i-1)*ephi1
ephi(-i)=ephi(-i+1)*ephi(-1)
enddo
c
c get the associated Legendre functions and rescale
c by 1/sqrt(2*l+1)
c
call ylgndrfw(nterms,ctheta,ynm,wlege,nlege)
do i=0,nterms
do j=0,nterms
ynm(j,i) = ynm(j,i)*rfac(j)
enddo
enddo
c
c
c Compute contribution to mpole coefficients.
c
c Recall that there are multiple definitions of scaling for
c Ylm. Using our standard definition,
c the addition theorem takes the simple form
c
c 1/r =
c \sum_n 1/(2n+1) \sum_m |S|^n Ylm*(S) Ylm(T) / (|T|)^{n+1}
c
c so contribution is |S|^n times
c
c Ylm*(S) = P_l,m * dconjg(ephi(m)) for m > 0
c Yl,m*(S) = P_l,|m| * dconjg(ephi(m)) for m < 0
c
c where P_l,m is the scaled associated Legendre function.
c
c
do idim=1,nd
mpole(idim,0,0)= mpole(idim,0,0) + fr(0)*charge(idim,isrc)
enddo
do n=1,nterms
dtmp=ynm(n,0)*fr(n)
do idim=1,nd
mpole(idim,n,0)= mpole(idim,n,0) + dtmp*charge(idim,isrc)
enddo
do m=1,n
dtmp=ynm(n,m)*fr(n)
do idim=1,nd
mpole(idim,n,m) = mpole(idim,n,m) +
1 dtmp*ephi(-m)*charge(idim,isrc)
mpole(idim,n,-m) = mpole(idim,n,-m) +
1 dtmp*ephi(m)*charge(idim,isrc)
enddo
enddo
enddo
enddo
c
c
return
end
c
c
c
c
c
c
c
C***********************************************************************
subroutine l3dformtad(nd,rscale,sources,dipvec,ns,center,
1 nterms,mpole,wlege,nlege)
C***********************************************************************
C
C Constructs multipole expansion about CENTER due to NS dipoles
C located at SOURCES(3,*) and adds to existing expansion
C
c-----------------------------------------------------------------------
C INPUT:
c
c nd : number of multipole expansions
C rscale : the scaling factor.
C sources(3,ns) : coordinates of sources
C dipvec(nd,3,ns) : dipole orientiation vectors
C ns : number of sources
C center(3) : epxansion center
C nterms : order of multipole expansion
C wlege : precomputed array of scaling coeffs for pnm
C nlege : dimension parameter for wlege
c-----------------------------------------------------------------------
C OUTPUT:
C
c mpole : coeffs of the multipole expansion
c-----------------------------------------------------------------------
implicit none
c
cc calling sequence variables
c
integer nterms,ns,nd, nlege
real *8 center(3),sources(3,ns)
real *8 wlege(0:nlege,0:nlege)
real *8 rscale
complex *16 mpole(nd,0:nterms,-nterms:nterms)
real *8 dipvec(nd,3,ns)
c
cc temporary variables
c
integer i,j,k,l,m,n,isrc,idim
real *8 zdiff(3)
real *8, allocatable :: ynm(:,:),fr(:),rfac(:),frder(:),ynmd(:,:)
real *8 thetaz,thetay,thetax, theta
real *8 stheta,sphi,rx,ry,rz,r
real *8 ctheta,cphi
real *8 phix,phiy,phiz,phi,d
complex *16 ur,utheta,uphi,ux,uy,uz,zzz
complex *16, allocatable :: ephi(:)
complex *16 eye,ephi1
data eye/(0.0d0,1.0d0)/
allocate(ynm(0:nterms,0:nterms),fr(0:nterms+1))
allocate(frder(0:nterms),ynmd(0:nterms,0:nterms))
allocate(ephi(-nterms-1:nterms+1))
allocate(rfac(0:nterms))
do i=0,nterms
rfac(i) = 1/sqrt(2.0d0*i + 1.0d0)
enddo
do isrc = 1,ns
zdiff(1)=sources(1,isrc)-center(1)
zdiff(2)=sources(2,isrc)-center(2)
zdiff(3)=sources(3,isrc)-center(3)
c
call cart2polar(zdiff,r,theta,phi)
ctheta = dcos(theta)
stheta = dsin(theta)
cphi = dcos(phi)
sphi = dsin(phi)
ephi1 = dcmplx(cphi,sphi)
c
c compute exp(eye*m*phi) array and fr array
c
ephi(0)=1.0d0
ephi(1)=ephi1
ephi(-1)=dconjg(ephi1)
d = 1.0d0/r
fr(0) = d
d = d*rscale
fr(1) = fr(0)*d
do i=2,nterms+1
fr(i) = fr(i-1)*d
ephi(i)=ephi(i-1)*ephi1
ephi(-i)=ephi(-i+1)*ephi(-1)
enddo
do i=0,nterms
frder(i) = -(i+1.0d0)*fr(i+1)/rscale
enddo
c
c compute coefficients in change of variables from spherical
c to Cartesian gradients. In phix, phiy, we leave out the
c 1/sin(theta) contribution, since we use values of Ynm (which
c multiplies phix and phiy) that are scaled by
c 1/sin(theta).
c
c
rx = stheta*cphi
thetax = ctheta*cphi/r
phix = -sphi/r
ry = stheta*sphi
thetay = ctheta*sphi/r
phiy = cphi/r
rz = ctheta
thetaz = -stheta/r
phiz = 0.0d0
c
c get the associated Legendre functions and rescale by
c 1/sqrt(2*l+1)
c
call ylgndr2sfw(nterms,ctheta,ynm,ynmd,wlege,nlege)
do i=0,nterms
do j=0,nterms
ynm(j,i) = ynm(j,i)*rfac(j)
ynmd(j,i) = ynmd(j,i)*rfac(j)
enddo
enddo
c
c
c Compute contribution to mpole coefficients.
c
c Recall that there are multiple definitions of scaling for
c Ylm. Using our standard definition,
c the addition theorem takes the simple form
c
c 1/r =
c \sum_n 1/(2n+1) \sum_m |S|^n Ylm*(S) Ylm(T)/ (|T|^(n+1))
c
c so contribution is |S|^n times
c
c Ylm*(S) = P_l,m * dconjg(ephi(m)) for m > 0
c Yl,m*(S) = P_l,|m| * dconjg(ephi(m)) for m < 0
c
c where P_l,m is the scaled associated Legendre function.
c
c
ur = ynm(0,0)*frder(0)
ux = ur*rx
uy = ur*ry
uz = ur*rz
do idim=1,nd
zzz = dipvec(idim,1,isrc)*ux + dipvec(idim,2,isrc)*uy +
1 dipvec(idim,3,isrc)*uz
mpole(idim,0,0)= mpole(idim,0,0) + zzz
enddo
do n=1,nterms
ur = ynm(n,0)*frder(n)
utheta = -fr(n)*ynmd(n,0)*stheta
ux = ur*rx + utheta*thetax
uy = ur*ry + utheta*thetay
uz = ur*rz + utheta*thetaz
do idim=1,nd
zzz = dipvec(idim,1,isrc)*ux + dipvec(idim,2,isrc)*uy +
1 dipvec(idim,3,isrc)*uz
mpole(idim,n,0)= mpole(idim,n,0) + zzz
enddo
do m=1,n
ur = frder(n)*ynm(n,m)*stheta*ephi(-m)
utheta = -ephi(-m)*fr(n)*ynmd(n,m)
uphi = -eye*m*ephi(-m)*fr(n)*ynm(n,m)
ux = ur*rx + utheta*thetax + uphi*phix
uy = ur*ry + utheta*thetay + uphi*phiy
uz = ur*rz + utheta*thetaz + uphi*phiz
do idim=1,nd
zzz = dipvec(idim,1,isrc)*ux + dipvec(idim,2,isrc)*uy +
1 dipvec(idim,3,isrc)*uz
mpole(idim,n,m)= mpole(idim,n,m) + zzz
enddo
c
ur = frder(n)*ynm(n,m)*stheta*ephi(m)
utheta = -ephi(m)*fr(n)*ynmd(n,m)
uphi = eye*m*ephi(m)*fr(n)*ynm(n,m)
ux = ur*rx + utheta*thetax + uphi*phix
uy = ur*ry + utheta*thetay + uphi*phiy
uz = ur*rz + utheta*thetaz + uphi*phiz
do idim=1,nd
zzz = dipvec(idim,1,isrc)*ux + dipvec(idim,2,isrc)*uy +
1 dipvec(idim,3,isrc)*uz
mpole(idim,n,-m)= mpole(idim,n,-m) + zzz
enddo
enddo
enddo
enddo
c
return
end
c
c
c
c
c
c
c
C***********************************************************************
subroutine l3dformtacd(nd,rscale,sources,charge,dipvec,ns,
1 center,nterms,mpole,wlege,nlege)
C***********************************************************************
C
C Constructs multipole expansion about CENTER due to NS charges+dipoles
C located at SOURCES(3,*) and adds to existing expansion
C
c-----------------------------------------------------------------------
C INPUT:
c
c nd : number of multipole expansions
C rscale : the scaling factor.
C sources(3,ns) : coordinates of sources
C charge(nd,ns) : charge strengths
C dipvec(nd,3,ns) : dipole orientiation vectors
C ns : number of sources
C center(3) : epxansion center
C nterms : order of multipole expansion
C wlege : precomputed array of scaling coeffs for pnm
C nlege : dimension parameter for wlege
c-----------------------------------------------------------------------
C OUTPUT:
C
c mpole : coeffs of the multipole expansion
c-----------------------------------------------------------------------
implicit none
c
cc calling sequence variables
c
integer nterms,ns,nd, nlege
real *8 center(3),sources(3,ns)
real *8 wlege(0:nlege,0:nlege)
real *8 rscale
complex *16 mpole(nd,0:nterms,-nterms:nterms)
real *8 charge(nd,ns)
real *8 dipvec(nd,3,ns)
c
cc temporary variables
c
integer i,j,k,l,m,n,isrc,idim
real *8 zdiff(3)
real *8, allocatable :: ynm(:,:),fr(:),rfac(:),frder(:),ynmd(:,:)
real *8 thetaz,thetay,thetax, theta
real *8 stheta,sphi,rx,ry,rz,r
real *8 ctheta,cphi
real *8 phix,phiy,phiz,phi,fruse,d,dtmp
complex *16 ur,utheta,uphi,ux,uy,uz,zzz
complex *16, allocatable :: ephi(:)
complex *16 eye,ephi1
data eye/(0.0d0,1.0d0)/
allocate(ynm(0:nterms,0:nterms),fr(0:nterms+1))
allocate(frder(0:nterms),ynmd(0:nterms,0:nterms))
allocate(ephi(-nterms-1:nterms+1))
allocate(rfac(0:nterms))
do i=0,nterms
rfac(i) = 1/sqrt(2.0d0*i + 1.0d0)
enddo
do isrc = 1,ns
zdiff(1)=sources(1,isrc)-center(1)
zdiff(2)=sources(2,isrc)-center(2)
zdiff(3)=sources(3,isrc)-center(3)
c
call cart2polar(zdiff,r,theta,phi)
ctheta = dcos(theta)
stheta = dsin(theta)
cphi = dcos(phi)
sphi = dsin(phi)
ephi1 = dcmplx(cphi,sphi)
c
c compute exp(eye*m*phi) array and fr array
c
ephi(0)=1.0d0
ephi(1)=ephi1
ephi(-1)=dconjg(ephi1)
d = 1.0d0/r
fr(0) = d
d = d*rscale
fr(1) = fr(0)*d
do i=2,nterms+1
fr(i) = fr(i-1)*d
ephi(i)=ephi(i-1)*ephi1
ephi(-i)=ephi(-i+1)*ephi(-1)
enddo
do i=0,nterms
frder(i) = -(i+1.0d0)*fr(i+1)/rscale
enddo
c
c compute coefficients in change of variables from spherical
c to Cartesian gradients. In phix, phiy, we leave out the
c 1/sin(theta) contribution, since we use values of Ynm (which
c multiplies phix and phiy) that are scaled by
c 1/sin(theta).
c
c In thetax, thetaty, phix, phiy we leave out the 1/r factors in the
c change of variables to avoid blow-up at the origin.
c For the n=0 mode, it is not relevant. For n>0 modes,
c the variable fruse is set to fr(n)/r:
c
c
c
rx = stheta*cphi
thetax = ctheta*cphi/r
phix = -sphi/r
ry = stheta*sphi
thetay = ctheta*sphi/r
phiy = cphi/r
rz = ctheta
thetaz = -stheta/r
phiz = 0.0d0
c
c get the associated Legendre functions and rescale by
c 1/sqrt(2*l+1)
c
call ylgndr2sfw(nterms,ctheta,ynm,ynmd,wlege,nlege)
do i=0,nterms
do j=0,nterms
ynm(j,i) = ynm(j,i)*rfac(j)
ynmd(j,i) = ynmd(j,i)*rfac(j)
enddo
enddo
c
c
c Compute contribution to mpole coefficients.
c
c Recall that there are multiple definitions of scaling for
c Ylm. Using our standard definition,
c the addition theorem takes the simple form
c
c 1/r =
c \sum_n 1/(2n+1) \sum_m |S|^n Ylm*(S) Ylm(T)/ (|T|^(n+1))
c
c so contribution is |S|^n times
c
c Ylm*(S) = P_l,m * dconjg(ephi(m)) for m > 0
c Yl,m*(S) = P_l,|m| * dconjg(ephi(m)) for m < 0
c
c where P_l,m is the scaled associated Legendre function.
c
c
ur = ynm(0,0)*frder(0)
ux = ur*rx
uy = ur*ry
uz = ur*rz
do idim=1,nd
zzz = dipvec(idim,1,isrc)*ux + dipvec(idim,2,isrc)*uy +
1 dipvec(idim,3,isrc)*uz
mpole(idim,0,0)= mpole(idim,0,0) + zzz +
1 fr(0)*charge(idim,isrc)
enddo
do n=1,nterms
ur = ynm(n,0)*frder(n)
utheta = -fr(n)*ynmd(n,0)*stheta
ux = ur*rx + utheta*thetax
uy = ur*ry + utheta*thetay
uz = ur*rz + utheta*thetaz
dtmp = fr(n)*ynm(n,0)
do idim=1,nd
zzz = dipvec(idim,1,isrc)*ux + dipvec(idim,2,isrc)*uy +
1 dipvec(idim,3,isrc)*uz
mpole(idim,n,0)= mpole(idim,n,0) + zzz +
1 charge(idim,isrc)*dtmp
enddo
do m=1,n
ur = frder(n)*ynm(n,m)*stheta*ephi(-m)
utheta = -ephi(-m)*fr(n)*ynmd(n,m)
uphi = -eye*m*ephi(-m)*fr(n)*ynm(n,m)
ux = ur*rx + utheta*thetax + uphi*phix
uy = ur*ry + utheta*thetay + uphi*phiy
uz = ur*rz + utheta*thetaz + uphi*phiz
dtmp = ynm(n,m)*fr(n)*stheta
do idim=1,nd
zzz = dipvec(idim,1,isrc)*ux + dipvec(idim,2,isrc)*uy +
1 dipvec(idim,3,isrc)*uz
mpole(idim,n,m)= mpole(idim,n,m) + zzz+
1 charge(idim,isrc)*dtmp*ephi(-m)
enddo
c
ur = frder(n)*ynm(n,m)*stheta*ephi(m)
utheta = -ephi(m)*fr(n)*ynmd(n,m)
uphi = eye*m*ephi(m)*fr(n)*ynm(n,m)
ux = ur*rx + utheta*thetax + uphi*phix
uy = ur*ry + utheta*thetay + uphi*phiy
uz = ur*rz + utheta*thetaz + uphi*phiz
do idim=1,nd
zzz = dipvec(idim,1,isrc)*ux + dipvec(idim,2,isrc)*uy +
1 dipvec(idim,3,isrc)*uz
mpole(idim,n,-m)= mpole(idim,n,-m)+zzz+
1 charge(idim,isrc)*dtmp*ephi(m)
enddo
enddo
enddo
enddo
c
return
end
c
C***********************************************************************
subroutine l3dmpevalhessdini(nterms,scarray)
C***********************************************************************
C
c Precomputes array used in
c mpole-local translation operator from an nterms expansion to an
c order 2 expansion (sufficient to compute pot/fld/hessian).
C
c-----------------------------------------------------------------------
C INPUT:
c
C nterms : order of multipole expansion
c-----------------------------------------------------------------------
C OUTPUT:
C
c scarray : work array of size > 10*(nterms+2)**2
c-----------------------------------------------------------------------
implicit none
integer nterms,l,j,k,m,ll,mm,iuse,lnew,mnew
real *8 scarray(1),cscale
real *8 d
real *8, allocatable :: c(:,:)
real *8, allocatable :: sqc(:,:)
c
allocate(c(0:2*nterms+4,0:2*nterms+4))
allocate(sqc(0:2*nterms+4,0:2*nterms+4))
c
do l = 0,2*nterms+4
c(l,0) = 1.0d0
sqc(l,0) = 1.0d0
enddo
do m = 1,2*nterms+4
c(m,m) = 1.0d0
sqc(m,m) = 1.0d0
do l = m+1,2*nterms+4
c(l,m) = c(l-1,m)+c(l-1,m-1)
sqc(l,m) = dsqrt(c(l,m))
enddo
enddo
c
iuse = 1
do lnew= 0,2
do l = 0,nterms
do m = -l,l
do mnew = -lnew,lnew
ll = l+lnew
mm = mnew-m
cscale = sqc(ll+mm,lnew+mnew)*sqc(ll-mm,lnew-mnew)
cscale = cscale*(-1)**l
cscale = cscale/dsqrt(2*ll+1.0d0)
if ( (m .lt. 0) .and. (mnew .lt. 0) ) then
if (-mnew .lt. -m) cscale = cscale*(-1)**mnew
if (-mnew .ge. -m) cscale = cscale*(-1)**m
endif
if ( (m .gt. 0) .and. (mnew .gt. 0) ) then
if (mnew .lt. m) cscale = cscale*(-1)**mnew
if (mnew .ge. m) cscale = cscale*(-1)**m
endif
scarray(iuse) = cscale
iuse = iuse+1
enddo
enddo
enddo
enddo
return
end
c
c
c
c
C***********************************************************************
subroutine l3dmpevalh(nd,rscale,center,mpole,nterms,
1 ztarg,ntarg,pot,grad,hess,thresh,scarray)
C***********************************************************************
c
c This subroutine evaluates the potential, gradient and
c Hessian of the potential due to a multipole expansion and adds
c to existing quantities
c
c pot = pot + sum sum mpole(n,m) Y_nm(theta,phi) / r^{n+1}
c n m
c
c grad = grad + Gradient( sum sum mpole(n,m) Y_nm(theta,phi)/r^{n+1})
c n m
c
c hess = hess + Hessian( sum sum mpole(n,m) Y_nm(theta,phi)/r^{n+1})
c n m
c
c The method is based on translation of mpole to
c second order expansion at target location.
c It is reasonably optimized, precomputing the array of
c binomial/factorial terms that appear in the shift operator.
c
c-----------------------------------------------------------------------
c INPUT:
c
c nd : number of multipole expansions
c rscale : scaling parameter (see formmp1l3d)
c center : expansion center
c mpole : multipole expansion in 2d matrix format
c nterms : order of the multipole expansion
c ztarg : target locations
c ntarg : number of target location
c thresh : threshold for computing outgoing expansion,
c potential and gradient at target location
c won't be updated if |t-c| <= thresh, where
c t is the target location and c is the expansion
c center location
c scarray : precomputed array (MUST BE PRECEDED BY CALL TO
c L3DMPEVALHESSDINI(nterms,scarray))
c with dimension of scarray at least 10*(nterms+2)**2
c If nterms is changed,
c l3dtaevalhessdini must be called again.
c
c OUTPUT:
c
c pot : updated potential at ztarg
c grad : updated gradient at ztarg
c hess : updated Hessian at ztarg
c ordered as dxx,dyy,dzz,dxy,dxz,dyz.
c--------------------------------------------------------------------
implicit none
integer nterms,ntarg,nd,itarg
integer l,m,lnew,mnew,ll,mm,iuse,j,k,lsum,idim
real *8 center(3),ztarg(3,ntarg)
real *8 zdiff(3)
real *8 scarray(*),rscale,thresh
real *8 cphi,sphi,phi,theta,ctheta,d,dd,pi,rfac
complex *16 mpole(nd,0:nterms,-nterms:nterms)
ccc complex *16 local2(0:2,-2:2)
complex *16 z0,ima,ephi1
real *8 pot(nd,ntarg),grad(nd,3,ntarg)
real *8 hess(nd,6,ntarg)
c
real *8, allocatable :: pp(:,:)
real *8, allocatable :: powers(:)
complex *16, allocatable :: local2(:,:)
complex *16, allocatable :: ppc(:,:)
complex *16, allocatable :: ephi(:)
c
data ima/(0.0d0,1.0d0)/
c
allocate(pp(0:nterms+2,0:nterms+2))
allocate(ppc(0:nterms+2,-nterms-2:nterms+2))
allocate(powers(0:nterms+3))
allocate(ephi(-nterms-3:nterms+3))
allocate(local2(nd,9))
c
c determine order of shifted expansion
c
c
do itarg = 1,ntarg
c
do ll = 1,nd
do l = 1,9
local2(ll,l) = 0.0d0
enddo
enddo
c
zdiff(1) = center(1) - ztarg(1,itarg)
zdiff(2) = center(2) - ztarg(2,itarg)
zdiff(3) = center(3) - ztarg(3,itarg)
call cart2polar(zdiff,d,theta,phi)
c
if (abs(d).lt.thresh) goto 1000
c
ctheta = dcos(theta)
cphi = dcos(phi)
sphi = dsin(phi)
ephi1 = dcmplx(cphi,sphi)
C
C----- create array of powers of R and e^(i*m*phi).
c
dd = 1.0d0/d
dd = dd*rscale
powers(0) = 1.0d0
powers(1) = dd
ephi(0) = 1.0d0
ephi(1) = ephi1
ephi(-1) = dconjg(ephi1)
do l = 2,nterms+3
powers(l) = dd*powers(l-1)
ephi(l) = ephi(l-1)*ephi(1)
ephi(-l) = dconjg(ephi(l))
enddo
c
call ylgndr(nterms+2,ctheta,pp)
do l = 0,nterms+2
do k = -l,l
ppc(l,k) = pp(l,abs(k))*powers(l+1)*ephi(-k)
enddo
enddo
c
c shift to local expansion of order ll about target point
c
iuse = 1
do l = 0,nterms
do m = -l,l
do idim=1,nd
local2(idim,1) = local2(idim,1) +
1 ppc(l,-m)*scarray(iuse)*mpole(idim,l,m)
enddo
iuse = iuse+1
enddo
enddo
do l = 0,nterms
lsum = l+1
do m = -l,l
do idim=1,nd
local2(idim,2) = local2(idim,2) +
1 ppc(lsum,-1-m)*scarray(iuse)*mpole(idim,l,m)
local2(idim,3) = local2(idim,3) +
1 ppc(lsum, -m)*scarray(iuse+1)*mpole(idim,l,m)
local2(idim,4) = local2(idim,4) +
1 ppc(lsum, 1-m)*scarray(iuse+2)*mpole(idim,l,m)
enddo
iuse = iuse+3
enddo
enddo
do l = 0,nterms
lsum = l+2
do m = -l,l
do idim=1,nd
local2(idim,5) = local2(idim,5) +
1 ppc(lsum,-2-m)*scarray(iuse )*mpole(idim,l,m)
local2(idim,6) = local2(idim,6) +
1 ppc(lsum,-1-m)*scarray(iuse+1)*mpole(idim,l,m)
local2(idim,7) = local2(idim,7) +
1 ppc(lsum,-m)*scarray(iuse+2)*mpole(idim,l,m)
local2(idim,8) = local2(idim,8) +
1 ppc(lsum,1-m)*scarray(iuse+3)*mpole(idim,l,m)
local2(idim,9) = local2(idim,9) +
1 ppc(lsum,2-m)*scarray(iuse+4)*mpole(idim,l,m)
enddo
iuse = iuse+5
enddo
enddo
c
ccc pi = 4.0d0*datan(1.0d0)
c
c pot comes from 0,0 mode
c
do idim=1,nd
pot(idim,itarg) = pot(idim,itarg)+local2(idim,1)/rscale
c
c fld comes from l=1 modes
c
rfac = 1.0d0/(rscale*rscale*sqrt(2.0d0))
grad(idim,1,itarg)= grad(idim,1,itarg)+dreal(
1 -rfac*(local2(idim,4)+local2(idim,2)))
grad(idim,2,itarg)= grad(idim,2,itarg)+dreal(
1 -rfac*ima*(local2(idim,4)-local2(idim,2)))
grad(idim,3,itarg)=grad(idim,3,itarg)+
1 dreal(local2(idim,3))/(rscale*rscale)
c
c hess comes from l=2 modes
c
rfac = sqrt(3.0d0)/(sqrt(2.0d0)*rscale*rscale*rscale)
z0 = local2(idim,7)/(rscale*rscale*rscale)
hess(idim,1,itarg)=hess(idim,1,itarg)+dreal(
1 rfac*(local2(idim,9)+local2(idim,5))-z0)
hess(idim,2,itarg)=hess(idim,2,itarg)+dreal(
1 -rfac*(local2(idim,9)+local2(idim,5))-z0)
hess(idim,3,itarg)=hess(idim,3,itarg)+dreal(2*z0)
hess(idim,4,itarg)=hess(idim,4,itarg)+dreal(
1 rfac*ima*(local2(idim,9)-local2(idim,5)))
hess(idim,5,itarg)=hess(idim,5,itarg)+dreal(
1 -rfac*(local2(idim,8)+local2(idim,6)))
hess(idim,6,itarg)=hess(idim,6,itarg)+dreal(
1 -rfac*ima*(local2(idim,8)-local2(idim,6)))
enddo
1000 continue
enddo
return
end
c
c
c
c
C***********************************************************************
subroutine l3dtaevalhessdini(nterms,scarray)
C***********************************************************************
C
c Precomputes arrayused in
c local-local translation operator from an nterms expansion to an
c order 2 expansion (sufficient to compute pot/fld/hessian).
C
c-----------------------------------------------------------------------
C INPUT:
c
C nterms : order of multipole expansion
c-----------------------------------------------------------------------
C OUTPUT:
C
c scarray : work array of size > 10*(nterms+2)**2
c-----------------------------------------------------------------------
implicit none
integer nterms,l,j,k,m,ll,mm,iuse
real *8 scarray(1)
real *8 d
real *8, allocatable :: cs(:,:)
real *8, allocatable :: fact(:)
c
allocate(cs(0:nterms,-nterms:nterms))
allocate(fact(0:2*nterms))
c
d = 1.0d0
fact(0) = d
do l = 1,2*nterms
d = d*dsqrt(l+0.0D0)
fact(l) = d
enddo
cs(0,0) = 1.0d0
do l = 1,nterms
do m = 0,l
cs(l,m) = ((-1)**l)/( fact(l-m)*fact(l+m) )
cs(l,-m) = cs(l,m)
enddo
enddo
c
iuse = 1
do j = 0,2
ccc do k = -j,j
do l = j,nterms
do k = -j,j
do m = -l,l
ll = l-j
mm = m-k
scarray(iuse) = 0.0d0
if (abs(mm).gt.ll) goto 111
scarray(iuse) = cs(j,k)*cs(ll,mm)/cs(l,m)
scarray(iuse) = scarray(iuse)/dsqrt(2*LL+1.0D0)
scarray(iuse) = scarray(iuse)*(-1)**ll
if ( m*mm .lt. 0) then
scarray(iuse) = scarray(iuse)*(-1)**mm
endif
if (m*mm .ge. 0) then
if ( abs(m) .le. abs(mm) )
1 scarray(iuse) = scarray(iuse)*(-1)**k
endif
iuse = iuse+1
111 continue
enddo
enddo
enddo
enddo
return
end
c
c
C***********************************************************************
subroutine l3dtaevalh(nd,rscale,center,local,nterms,
1 ztarg,ntarg,pot,grad,hess,scarray)
C***********************************************************************
cc
c this subroutine evaluates the potentials, gradients and Hessians
c of a local expansion and increments inputs accordingly:
c
c pot=pot + sum sum mpole(n,m) r^{n}Y_nm(theta,phi) / sqrt(2n+1)
c n m
c
c grad=grad +
c Gradient(sum sum mpole(n,m)r^{n}Y_nm(theta,phi)/sqrt(2n+1))
c n m
c
c hess=hess +
c Hessian(sum sum mpole(n,m)r^{n}Y_nm(theta,phi)/sqrt(2n+1))
c n m
c
c The method used direct translation (not rotation/zshift) of
c local expansion to one of order 2. It is reasonably optimized,
c precomputing the array of binomial/factorial terms that appear
c in the shift operator.
c
c-----------------------------------------------------------------------
c INPUT:
c
c nd : number of local expansions
c rscale : scaling parameter (see formmp1l3d)
c center : expansion center
c local : multipole expansion in 2d matrix format
c nterms : order of the multipole expansion
c ztarg : target locations
c ntarg : number of target location
c scarray : precomputed array (MUST BE PRECEDED BY CALL TO
c L3DTAEVALHESSDINI(nterms,scarray))
c with dimension of scarray at least 10*(nterms+2)**2
c If nterms is changed,
c l3dtaevalhessdini must be called again.
c
c OUTPUT:
c
c pot : updated potential at ztarg
c grad : updated gradient at ztarg
c hess : updated Hessian at ztarg
c ordered as dxx,dyy,dzz,dxy,dxz,dyz.
c--------------------------------------------------------------------
implicit none
integer nterms,ntarg,nd,itarg
integer l,m,lnew,mnew,ll,mm,iuse,j,k,ldiff,idim
real *8 center(3),ztarg(3,ntarg)
real *8 zdiff(3)
real *8 scarray(*),rscale
real *8 cphi,sphi,phi,theta,ctheta,d,dd,pi,rfac
complex *16 local(nd,0:nterms,-nterms:nterms)
complex *16 z0,ima,ephi1
real *8 pot(nd,ntarg),grad(nd,3,ntarg)
real *8 hess(nd,6,ntarg)
c
real *8, allocatable :: pp(:,:)
real *8, allocatable :: powers(:)
complex *16, allocatable :: local2(:,:)
complex *16, allocatable :: ppc(:,:)
complex *16, allocatable :: ephi(:)
c
data ima/(0.0d0,1.0d0)/
c
allocate(pp(0:nterms,0:nterms))
allocate(ppc(0:nterms,-nterms:nterms))
allocate(powers(0:nterms+1))
allocate(ephi(-nterms-1:nterms+1))
allocate(local2(nd,9))
c determine order of shifted expansion
c
c
do itarg = 1,ntarg
do ll = 1,nd
do l = 1,9
local2(ll,l) = 0.0d0
enddo
enddo
c
zdiff(1) = center(1) - ztarg(1,itarg)
zdiff(2) = center(2) - ztarg(2,itarg)
zdiff(3) = center(3) - ztarg(3,itarg)
call cart2polar(zdiff,d,theta,phi)
c
ctheta = dcos(theta)
cphi = dcos(phi)
sphi = dsin(phi)
ephi1 = dcmplx(cphi,sphi)
C
C----- create array of powers of R and e^(i*m*phi).
c
dd = d/rscale
powers(0) = 1.0d0
powers(1) = dd
ephi(0) = 1.0d0
ephi(1) = ephi1
ephi(-1) = dconjg(ephi1)
do l = 2,nterms+1
powers(l) = dd*powers(l-1)
ephi(l) = ephi(l-1)*ephi(1)
ephi(-l) = dconjg(ephi(l))
enddo
c
call ylgndr(nterms,ctheta,pp)
do l = 0,nterms
do k = -l,l
ppc(l,k) = pp(l,abs(k))*powers(l)*ephi(k)
enddo
enddo
c
iuse = 1
do l = 0,nterms
do m = -l,l
do idim=1,nd
local2(idim,1) = local2(idim,1) +
1 ppc(l,m)*scarray(iuse)*local(idim,l,m)
enddo
iuse = iuse+1
enddo
enddo
do l = 1,nterms
ldiff = l-1
do m = -l,l-2
mm = m+1
do idim=1,nd
local2(idim,2) = local2(idim,2) +
1 ppc(ldiff,mm)*scarray(iuse)*local(idim,l,m)
enddo
iuse = iuse+1
enddo
do m = -ldiff,ldiff
do idim=1,nd
local2(idim,3) = local2(idim,3) +
1 ppc(ldiff,m)*scarray(iuse)*local(idim,l,m)
enddo
iuse = iuse+1
enddo
do m = -l+2,l
mm = m-1
do idim=1,nd
local2(idim,4) = local2(idim,4) +
1 ppc(ldiff,mm)*scarray(iuse)*local(idim,l,m)
enddo
iuse = iuse+1
enddo
enddo
do l = 2,nterms
ldiff = l-2
do m = -l,l-4
mm = m+2
do idim=1,nd
local2(idim,5) = local2(idim,5) +
1 ppc(ldiff,mm)*scarray(iuse)*local(idim,l,m)
enddo
iuse = iuse+1
enddo
do m = -l+1,l-3
mm = m+1
do idim=1,nd
local2(idim,6) = local2(idim,6) +
1 ppc(ldiff,mm)*scarray(iuse)*local(idim,l,m)
enddo
iuse = iuse+1
enddo
do m = -ldiff,ldiff
mm = m
do idim=1,nd
local2(idim,7) = local2(idim,7) +
1 ppc(ldiff,mm)*scarray(iuse)*local(idim,l,m)
enddo
iuse = iuse+1
enddo
do m = -l+3,l-1
mm = m-1
do idim=1,nd
local2(idim,8) = local2(idim,8) +
1 ppc(ldiff,mm)*scarray(iuse)*local(idim,l,m)
enddo
iuse = iuse+1
enddo
do m = -l+4,l
mm = m-2
do idim=1,nd
local2(idim,9) = local2(idim,9) +
1 ppc(ldiff,mm)*scarray(iuse)*local(idim,l,m)
enddo
iuse = iuse+1
enddo
enddo
c
ccc pi = 4.0d0*datan(1.0d0)
c
c pot comes from 0,0 mode
c
do idim=1,nd
pot(idim,itarg) = pot(idim,itarg)+local2(idim,1)
c
c fld comes from l=1 modes
c
rfac = 1.0d0/(sqrt(2.0d0)*rscale)
grad(idim,3,itarg)= grad(idim,3,itarg)+dreal(
1 local2(idim,3)/rscale)
grad(idim,1,itarg)= grad(idim,1,itarg)+dreal(
1 -rfac*(local2(idim,4) + local2(idim,2)))
grad(idim,2,itarg)= grad(idim,2,itarg)+dreal(
1 -rfac*ima*(local2(idim,4) - local2(idim,2)))
c
c hess comes from l=2 modes
c
ccc rfac = rscale*rscale*sqrt(3.0d0)/sqrt(2.0d0)
rfac = rfac*sqrt(3.0d0)/rscale
z0 = local2(idim,7)/(rscale*rscale)
hess(idim,1,itarg)=hess(idim,1,itarg)+dreal(
1 rfac*(local2(idim,9)+local2(idim,5))-z0)
hess(idim,2,itarg)=hess(idim,2,itarg)+dreal(
1 -rfac*(local2(idim,9)+local2(idim,5))-z0)
hess(idim,3,itarg)=hess(idim,3,itarg)+dreal(2*z0)
hess(idim,4,itarg)=hess(idim,4,itarg)+dreal(
1 rfac*ima*(local2(idim,9)-local2(idim,5)))
hess(idim,5,itarg)=hess(idim,5,itarg)+dreal(
1 -rfac*(local2(idim,8)+local2(idim,6)))
hess(idim,6,itarg)=hess(idim,6,itarg)+dreal(
1 -rfac*ima*(local2(idim,8)-local2(idim,6)))
enddo
1000 continue
enddo
return
end
c
c
c
c
|
(* Title: HOL/Auth/n_germanSimp_lemma_inv__16_on_rules.thy
Author: Yongjian Li and Kaiqiang Duan, State Key Lab of Computer Science, Institute of Software, Chinese Academy of Sciences
Copyright 2016 State Key Lab of Computer Science, Institute of Software, Chinese Academy of Sciences
*)
header{*The n_germanSimp Protocol Case Study*}
theory n_germanSimp_lemma_inv__16_on_rules imports n_germanSimp_lemma_on_inv__16
begin
section{*All lemmas on causal relation between inv__16*}
lemma lemma_inv__16_on_rules:
assumes b1: "r \<in> rules N" and b2: "(\<exists> p__Inv3 p__Inv4. p__Inv3\<le>N\<and>p__Inv4\<le>N\<and>p__Inv3~=p__Inv4\<and>f=inv__16 p__Inv3 p__Inv4)"
shows "invHoldForRule s f r (invariants N)"
proof -
have c1: "(\<exists> i d. i\<le>N\<and>d\<le>N\<and>r=n_Store i d)\<or>
(\<exists> i. i\<le>N\<and>r=n_RecvReqS N i)\<or>
(\<exists> i. i\<le>N\<and>r=n_RecvReqE__part__0 N i)\<or>
(\<exists> i. i\<le>N\<and>r=n_RecvReqE__part__1 N i)\<or>
(\<exists> i. i\<le>N\<and>r=n_SendInv__part__0 i)\<or>
(\<exists> i. i\<le>N\<and>r=n_SendInv__part__1 i)\<or>
(\<exists> i. i\<le>N\<and>r=n_SendInvAck i)\<or>
(\<exists> i. i\<le>N\<and>r=n_RecvInvAck i)\<or>
(\<exists> i. i\<le>N\<and>r=n_SendGntS i)\<or>
(\<exists> i. i\<le>N\<and>r=n_SendGntE N i)\<or>
(\<exists> i. i\<le>N\<and>r=n_RecvGntS i)\<or>
(\<exists> i. i\<le>N\<and>r=n_RecvGntE i)"
apply (cut_tac b1, auto) done
moreover {
assume d1: "(\<exists> i d. i\<le>N\<and>d\<le>N\<and>r=n_Store i d)"
have "invHoldForRule s f r (invariants N)"
apply (cut_tac b2 d1, metis n_StoreVsinv__16) done
}
moreover {
assume d1: "(\<exists> i. i\<le>N\<and>r=n_RecvReqS N i)"
have "invHoldForRule s f r (invariants N)"
apply (cut_tac b2 d1, metis n_RecvReqSVsinv__16) done
}
moreover {
assume d1: "(\<exists> i. i\<le>N\<and>r=n_RecvReqE__part__0 N i)"
have "invHoldForRule s f r (invariants N)"
apply (cut_tac b2 d1, metis n_RecvReqE__part__0Vsinv__16) done
}
moreover {
assume d1: "(\<exists> i. i\<le>N\<and>r=n_RecvReqE__part__1 N i)"
have "invHoldForRule s f r (invariants N)"
apply (cut_tac b2 d1, metis n_RecvReqE__part__1Vsinv__16) done
}
moreover {
assume d1: "(\<exists> i. i\<le>N\<and>r=n_SendInv__part__0 i)"
have "invHoldForRule s f r (invariants N)"
apply (cut_tac b2 d1, metis n_SendInv__part__0Vsinv__16) done
}
moreover {
assume d1: "(\<exists> i. i\<le>N\<and>r=n_SendInv__part__1 i)"
have "invHoldForRule s f r (invariants N)"
apply (cut_tac b2 d1, metis n_SendInv__part__1Vsinv__16) done
}
moreover {
assume d1: "(\<exists> i. i\<le>N\<and>r=n_SendInvAck i)"
have "invHoldForRule s f r (invariants N)"
apply (cut_tac b2 d1, metis n_SendInvAckVsinv__16) done
}
moreover {
assume d1: "(\<exists> i. i\<le>N\<and>r=n_RecvInvAck i)"
have "invHoldForRule s f r (invariants N)"
apply (cut_tac b2 d1, metis n_RecvInvAckVsinv__16) done
}
moreover {
assume d1: "(\<exists> i. i\<le>N\<and>r=n_SendGntS i)"
have "invHoldForRule s f r (invariants N)"
apply (cut_tac b2 d1, metis n_SendGntSVsinv__16) done
}
moreover {
assume d1: "(\<exists> i. i\<le>N\<and>r=n_SendGntE N i)"
have "invHoldForRule s f r (invariants N)"
apply (cut_tac b2 d1, metis n_SendGntEVsinv__16) done
}
moreover {
assume d1: "(\<exists> i. i\<le>N\<and>r=n_RecvGntS i)"
have "invHoldForRule s f r (invariants N)"
apply (cut_tac b2 d1, metis n_RecvGntSVsinv__16) done
}
moreover {
assume d1: "(\<exists> i. i\<le>N\<and>r=n_RecvGntE i)"
have "invHoldForRule s f r (invariants N)"
apply (cut_tac b2 d1, metis n_RecvGntEVsinv__16) done
}
ultimately show "invHoldForRule s f r (invariants N)"
by satx
qed
end
|
import order.filter.basic
/-
# tendsto
If `X` and `Y` are types, `φ : X → Y` is a function,
and `F : filter X` and `G : filter Y` are filters, then
`filter.tendsto φ F G`
is a true-false statement, which is pronounced something like
"`F` tends to `G` along `φ`". Of course we will `open filter`
in this file, so you can just write `tendsto φ F G`, or if
you like the dot notation you can even write `F.tendsto φ G`.
## Geometric meaning of `tendsto`.
Let's start by thinking about the easy case where `F` and `G`
are actually subsets of `X` and `Y` (that is, principal filters,
associated to sets which we will also call `F` and `G`). In this case,
`tendsto φ F G` simply means "`φ` restricts to a function
from `F` to `G`", or in other words `∀ x ∈ F, φ(x) ∈ G`.
There are two other ways of writing this predicate. The first
involves pushing a set forward along a map. If `F` is a subset of `X`
then let `φ(F)` denote the image of `F` under `φ`, that
is, the subset `{y : Y | ∃ x : X, φ x = y}` of `Y`.
Then `tendsto φ F G` simply means `φ(F) ⊆ G`.
The second involves pulling a set back along a map. If `G` is a subset
of `Y` then let `φ⁻¹(G)` denote the preimage of `G` under `φ`,
that is, the subset `{x : X | φ x ∈ G}` of `Y`. Then `tendsto φ F G`
simply means `F ⊆ φ⁻¹(G)`.
This is how it all works in the case of sets. What we need to
do today is to figure out how to push forward and pull back
filters along a map `φ`. Once we have done this, then we can
prove `φ(F) ≤ G ↔ F ≤ φ⁻¹(G)` and use either one of these
as our definition of `tendsto φ F G` -- it doesn't matter which.
## Digression : adjoint functors.
The discussion below is not needed to be able to do this week's
problems, but it might provide some helpful background for some.
Also note that anyone who still doens't like the word "type" can
literally just change it for the word "set" (and change "term of
type" to "element of set"), which is how arguments
of the below kind would appear in the traditional mathematical
literature.
Partially ordered types, such as the type of subsets of a fixed
type `X` or the type of filters on `X`, are actually very simple
examples of categories. In general if `P` is a partially ordered type
and `x,y` are terms of type `P` then the idea is that we can
define `Hom(x,y)` to have exactly one element if `x ≤ y` is true,
and no elements at all if `x ≤ y` is false. The structure/axioms for
a category are that `Hom(x,x)` is supposed to have an identity
element, which follows from reflexivity of `≤`, and that one can
compose morphisms, which follows from transitivity of `≤`.
Antisymmetry states that if two objects are isomorphic (i.e.,
in this case, if `Hom(x,y)` and `Hom(y,x)` are both nonempty),
then they are equal. If `φ : X → Y` is a map of types, then
pushing forward subsets and pulling back subsets are both
functors from `set X` to `set Y`, because `S ⊆ T → φ(S) ⊆ φ(T)`
and `U ⊆ V → φ⁻¹(U) ⊆ φ⁻¹(V)`. The statement that
`φ(S) ≤ U ↔ S ≤ φ⁻¹(U)` is simply the statement that these functors
are adjoint to each other. Today we will define pushforward and
pullback of filters, and show that they are also a pair of
adjoint functors, but we will not use this language. In fact there
is a special language for adjoint functors in this simple situation:
we will say that pushforward and pullback form a Galois connection.
-/
/-
## Warm-up: pushing forward and pulling back subsets.
Say `X` and `Y` are types, and `f : X → Y`.
-/
variables (X Y : Type) (f : X → Y)
/-
### images
In Lean, the image `f(S)` of a subset `S : set X` cannot
be denoted `f S`, because `f` expects an _element_ of `X` as
an input, not a subset of `X`, so we need new notation.
Notation : `f '' S` is the image of `S` under `f`. Let's
check this.
-/
example (S : set X) : f '' S = {y : Y | ∃ x : X, x ∈ S ∧ f x = y} :=
begin
-- true by definition
refl
end
/-
### preimages
In Lean, the preimage `f⁻¹(T)` of a subset `T : set Y` cannot
be denoted `f⁻¹ T` because `⁻¹` is the inverse notation in group
theory, so if anything would be a function from `Y` to `X`,
not a function on subsets of `Y`.
Notation : `f ⁻¹' T` is the preimage of `T` under `f`. Let's
check this.
Pro shortcut: `\-'` for `⁻¹'`
-/
example (T : set Y) : f ⁻¹' T = {x : X | f x ∈ T} :=
begin
-- true by definition
refl
end
/-
I claim that the following conditions on `S : set X` and `T : set Y`
are equivalent:
1) `f '' S ⊆ T`
2) `S ⊆ f⁻¹' T`
Indeed, they both say that `f` restricts to a function from `S` to `T`.
Let's check this. You might find
`mem_preimage : a ∈ f ⁻¹' s ↔ f a ∈ s`
and
-/
open set
example (S : set X) (T : set Y) : f '' S ⊆ T ↔ S ⊆ f⁻¹' T :=
begin
split,
{ intros h x hxS,
-- rw subset_def at h,
-- rw mem_preimage,
apply h,
use [x, hxS, rfl] },
{ rintros h - ⟨x, hxS, rfl⟩,
exact h hxS }
end
/-
## Pushing forward filters.
Pushing forward is easy, so let's do that first.
It's called `filter.map` in Lean.
We define the pushforward filter `map f F` on `Y` to be the
obvious thing: a subset of `Y` is in the filter iff `f⁻¹(Y)`
is in `F`. Let's check this is a filter.
Reminder of some helpful lemmas:
In `set`:
`mem_set_of_eq : a ∈ {x : α | p x} = p a` -- definitional
In `filter`:
`univ_mem_sets : univ ∈ F`
`mem_sets_of_superset : S ∈ F → S ⊆ T → T ∈ F`
`inter_mem_sets : S ∈ F → T ∈ F → S ∩ T ∈ F`
-/
open filter
-- this is called `F.map f` or `filter.map f F`
-- or just `map f F` if `filter` is open.
example (F : filter X) : filter Y :=
{ sets := {T : set Y | f ⁻¹' T ∈ F },
univ_sets := begin
-- rw mem_set_of_eq,
exact univ_mem_sets,
end,
sets_of_superset := begin
intros S T hS hST,
--rw mem_set_of_eq at *,
refine mem_sets_of_superset hS _,
intros x hx,
exact hST hx,
end,
inter_sets := begin
intros S T,
-- I am abusing definitional equality
exact inter_mem_sets,
end, }
-- this is `filter.mem_map` and it's true by definition.
example (F : filter X) (T : set Y) : T ∈ F.map f ↔ f ⁻¹' T ∈ F :=
begin
-- true by definition
refl
end
-- Let's check that map satisfies some basic functorialities.
-- Recall that if your goal is to check two filters are
-- equal then you can use the `ext` tactic.
-- pushing along the identity map id : X → X doesn't change the filter.
-- this is `filter.map_id` but see if you can prove it yourself.
example (F : filter X) : F.map id = F :=
begin
ext S,
refl,
end
-- pushing along g ∘ f is the same as pushing along f and then g
-- for some reason this isn't in mathlib, instead they have `map_map` which
-- has the equality the other way.
variables (Z : Type) (g : Y → Z)
-- this isn't in mathlib, but `filter.map_map` is the equality the other
-- way around. See if you can prove it yourself.
example (F : filter X) : F.map (g ∘ f) = (F.map f).map g :=
begin
ext S,
refl,
end
open_locale filter -- for 𝓟 notation
-- pushing the principal filter `𝓟 S` along `f` gives `𝓟 (f '' S)`
-- this is `filter.map_principal` but see if you can prove it yourself.
example (S : set X) : (𝓟 S).map f = 𝓟 (f '' S) :=
begin
ext T,
-- rw mem_map,
-- rw mem_principal_sets,
-- rw mem_principal_sets,
split,
{ rintro h y ⟨x, hx, rfl⟩,
exact h hx },
{ rintro h x hx,
apply h,
exact ⟨x, hx, rfl⟩ }
end
/-
## tendsto
The definition: if `f : X → Y` and `F : filter X` and `G : filter Y`
then `tendsto f F G : Prop := map f F ≤ G`. This is a definition (it
has type `Prop`), not the proof of a theorem. It is a true-false statement
attached to `f`, `F` and `G`, it's a bit like saying "f is continuous at x"
or something like that, it might be true and it might be false.
The mental model you might want to have of the definition is that
`tendsto f F G` means that the function `f` restricts to a function
from the generalized set `F` to the generalized set `G`.
-/
-- this is `filter.tendsto_def`
example (F : filter X) (G : filter Y) :
tendsto f F G ↔ ∀ T : set Y, T ∈ G → f ⁻¹' T ∈ F :=
begin
-- true by definition
refl
end
-- Let's make a basic API for `tendsto`
-- this is `tendsto_id` but see if you can prove it yourself.
example (F : filter X) : tendsto id F F :=
begin
intro S,
exact id,
end
-- this is `tendsto.comp` but see if you can prove it yourself
example (F : filter X) (G : filter Y) (H : filter Z)
(f : X → Y) (g : Y → Z)
(hf : tendsto f F G) (hg : tendsto g G H) : tendsto (g ∘ f) F H :=
begin
rintro S hS,
specialize hg hS,
specialize hf hg,
exact hf,
end
-- I would recommend looking at the model answer to this one if
-- you get stuck.
lemma tendsto_comp_map (g : Y → Z) (F : filter X) (G : filter Z) :
tendsto (g ∘ f) F G ↔ tendsto g (F.map f) G :=
begin
refl, -- Both sides are the same, by definition. Think about it on paper!
end
/-
## Appendix : Pulling back filters
We don't use this in the next part.
Say `f : X → Y` and `G : filter Y`, and we want a filter on `X`. Let's make a
naive definition. We want a collection of subsets of `X` corresponding to the
filter obtained by pulling back `G` along `f`. When should `S : set X` be
in this filter? Perhaps it is when `f '' S ∈ G`. However, there is no reason
that the collection of `S` satisfying this property should be a filter
on `X`. For example, there is no reason to espect that `f '' univ ∈ G`
if `f` is not surjective.
Here's a way of fixing this. Remember that our model of a filter `G` is some
kind of generalised notion of a set. If `T : set Y` then `T ∈ G` is supposed to
mean that the "set" `G` is a subset of `T`. So this should imply
that `f⁻¹(G) ⊆ f⁻¹(T)`. In particular, if `T ∈ G` and `f⁻¹(T) ⊆ S` then this
should mean `f⁻¹(G) ⊆ S` and hence `S ∈ f⁻¹(G)`. Let's try this and see if
it works.
Random useful lemmas (you might be getting to the point where you can
guess the names of the lemmas):
`subset_univ S : S ⊆ univ`
`subset.trans : A ⊆ B → B ⊆ C → A ⊆ C`
-/
-- this is called filter.comap
example (G : filter Y) : filter X :=
{ sets := {S : set X | ∃ T ∈ G, f ⁻¹' T ⊆ S},
univ_sets := begin
use univ,
split,
{ exact univ_mem_sets },
{ exact subset_univ _ }
end,
sets_of_superset := begin
rintros S T ⟨U, hUG, hUS⟩ hST,
use [U, hUG],
exact subset.trans hUS hST
end,
inter_sets := begin
rintro S T ⟨U, hUG, hUS⟩ ⟨V, hVG, hVT⟩,
use [U ∩ V, inter_mem_sets hUG hVG],
rintro x ⟨hxU, hxV⟩,
exact ⟨hUS hxU, hVT hxV⟩,
end }
-- Let's call this mem_comap
lemma mem_comap (f : X → Y) (G : filter Y) (S : set X) :
S ∈ comap f G ↔ ∃ T ∈ G, f ⁻¹' T ⊆ S :=
begin
-- true by definition
refl
end
-- If you want to, you can check some preliminary properties of `comap`.
-- this is comap_id
example (G : filter Y) : comap id G = G :=
begin
ext S,
rw mem_comap,
split,
{ rintro ⟨T, hT, h⟩,
exact mem_sets_of_superset hT h,
},
{ intro hS,
use [S, hS],
refl }
end
-- this is comap_comap but the other way around
lemma comap_comp (H : filter Z) : comap (g ∘ f) H = comap f (comap g H) :=
begin
ext S,
simp only [mem_comap],
split,
{ rintro ⟨U, hU, h⟩,
use g ⁻¹' U,
refine ⟨_, h⟩,
rw mem_comap,
use [U, hU] },
{ rintro ⟨T, ⟨U, hU, h2⟩, h⟩,
use [U, hU],
refine subset.trans _ h,
intros x hx,
exact h2 hx }
end
-- this is comap_principal. Remember `mem_principal_sets`!
example (T : set Y) : comap f (𝓟 T) = 𝓟 (f ⁻¹' T) :=
begin
ext S,
-- rw mem_comap,
-- rw mem_principal_sets,
split,
{ rintro ⟨U, hU, h⟩,
refine subset.trans (λ x, _) h,
apply hU },
{ intro h,
exact ⟨T, mem_principal_self T, h⟩ }
end
-- This is the proof that `map f` and `comap f` are adjoint functors,
-- or in other words form a Galois connection. It is the "generalised set"
-- analogue of the assertion that if S is a subset of X and T is a subset of Y
-- then f(S) ⊆ T ↔ S ⊆ f⁻¹(T), these both being ways to say that `f` restricts
-- to a function from `S` to `T`.
lemma filter.galois_connection (F : filter X) (G : filter Y) :
map f F ≤ G ↔ F ≤ comap f G :=
begin
split,
{ rintro h S ⟨T, hT, hTS⟩,
rw le_def at h,
exact mem_sets_of_superset (h T hT) hTS },
{ rintro h T hT,
rw le_def at h,
exact h (f ⁻¹' T) ⟨T, hT, subset.refl _⟩ },
end
-- indeed, `map f` and `comap f` form a Galois connection.
example : galois_connection (map f) (comap f) :=
filter.galois_connection X Y f |
------------------------------------------------------------------------------
-- PA properties
------------------------------------------------------------------------------
{-# OPTIONS --exact-split #-}
{-# OPTIONS --no-sized-types #-}
{-# OPTIONS --no-universe-polymorphism #-}
{-# OPTIONS --without-K #-}
module PA.Axiomatic.Mendelson.PropertiesATP where
open import PA.Axiomatic.Mendelson.Base
------------------------------------------------------------------------------
+-leftIdentity : ∀ n → zero + n ≈ n
+-leftIdentity = S₅
-- See Issue https://github.com/asr/apia/issues/81 .
+-rightIdentityA : ℕ → Set
+-rightIdentityA i = i + zero ≈ i
{-# ATP definition +-rightIdentityA #-}
+-rightIdentity : ∀ n → n + zero ≈ n
+-rightIdentity = S₉ +-rightIdentityA A0 is
where
A0 : +-rightIdentityA zero
A0 = +-leftIdentity zero
postulate is : ∀ i → +-rightIdentityA i → +-rightIdentityA (succ i)
{-# ATP prove is #-}
-- See Issue https://github.com/asr/apia/issues/81 .
x+Sy≈S[x+y]A : ℕ → ℕ → Set
x+Sy≈S[x+y]A n i = i + succ n ≈ succ (i + n)
{-# ATP definition x+Sy≈S[x+y]A #-}
x+Sy≈S[x+y] : ∀ m n → m + succ n ≈ succ (m + n)
x+Sy≈S[x+y] m n = S₉ (x+Sy≈S[x+y]A n) A0 is m
where
postulate A0 : x+Sy≈S[x+y]A n zero
{-# ATP prove A0 #-}
postulate is : ∀ i → x+Sy≈S[x+y]A n i → x+Sy≈S[x+y]A n (succ i)
{-# ATP prove is #-}
-- See Issue https://github.com/asr/apia/issues/81 .
+-leftCongA : ℕ → ℕ → ℕ → Set
+-leftCongA m n i = m + i ≈ n + i
{-# ATP definition +-leftCongA #-}
+-leftCong : ∀ {m n o} → m ≈ n → m + o ≈ n + o
+-leftCong {m} {n} {o} h = S₉ (+-leftCongA m n) A0 is o
where
postulate A0 : +-leftCongA m n zero
{-# ATP prove A0 +-rightIdentity #-}
postulate is : ∀ i → +-leftCongA m n i → +-leftCongA m n (succ i)
{-# ATP prove is x+Sy≈S[x+y] #-}
-- See Issue https://github.com/asr/apia/issues/81 .
+-commA : ℕ → ℕ → Set
+-commA n i = i + n ≈ n + i
{-# ATP definition +-commA #-}
+-comm : ∀ m n → m + n ≈ n + m
+-comm m n = S₉ (+-commA n) A0 is m
where
postulate A0 : +-commA n zero
{-# ATP prove A0 +-rightIdentity #-}
postulate is : ∀ i → +-commA n i → +-commA n (succ i)
{-# ATP prove is x+Sy≈S[x+y] #-}
+-asocc : ∀ m n o → m + n + o ≈ m + (n + o)
+-asocc m n o = S₉ A A0 is m
where
A : ℕ → Set
A i = i + n + o ≈ i + (n + o)
{-# ATP definition A #-}
postulate A0 : A zero
{-# ATP prove A0 +-leftCong #-}
postulate is : ∀ i → A i → A (succ i)
{-# ATP prove is +-leftCong #-}
|
/-
Copyright (c) 2020 Scott Morrison. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Scott Morrison
-/
import algebra.ring.ulift
import data.equiv.module
/-!
# `ulift` instances for module and multiplicative actions
This file defines instances for module, mul_action and related structures on `ulift` types.
(Recall `ulift α` is just a "copy" of a type `α` in a higher universe.)
We also provide `ulift.module_equiv : ulift M ≃ₗ[R] M`.
-/
namespace ulift
universes u v w
variable {R : Type u}
variable {M : Type v}
variable {N : Type w}
instance has_scalar_left [has_scalar R M] :
has_scalar (ulift R) M :=
⟨λ s x, s.down • x⟩
@[simp] lemma smul_down [has_scalar R M] (s : ulift R) (x : M) : (s • x) = s.down • x := rfl
@[simp]
lemma smul_down' [has_scalar R M] (s : R) (x : ulift M) :
(s • x).down = s • x.down :=
rfl
instance is_scalar_tower [has_scalar R M] [has_scalar M N] [has_scalar R N]
[is_scalar_tower R M N] : is_scalar_tower (ulift R) M N :=
⟨λ x y z, show (x.down • y) • z = x.down • y • z, from smul_assoc _ _ _⟩
instance is_scalar_tower' [has_scalar R M] [has_scalar M N] [has_scalar R N]
[is_scalar_tower R M N] : is_scalar_tower R (ulift M) N :=
⟨λ x y z, show (x • y.down) • z = x • y.down • z, from smul_assoc _ _ _⟩
instance is_scalar_tower'' [has_scalar R M] [has_scalar M N] [has_scalar R N]
[is_scalar_tower R M N] : is_scalar_tower R M (ulift N) :=
⟨λ x y z, show up ((x • y) • z.down) = ⟨x • y • z.down⟩, by rw smul_assoc⟩
instance [has_scalar R M] [has_scalar Rᵐᵒᵖ M] [is_central_scalar R M] :
is_central_scalar R (ulift M) :=
⟨λ r m, congr_arg up $ op_smul_eq_smul r m.down⟩
instance mul_action [monoid R] [mul_action R M] :
mul_action (ulift R) M :=
{ smul := (•),
mul_smul := λ r s f, by { cases r, cases s, simp [mul_smul], },
one_smul := λ f, by { simp [one_smul], } }
instance mul_action' [monoid R] [mul_action R M] :
mul_action R (ulift M) :=
{ smul := (•),
mul_smul := λ r s f, by { cases f, ext, simp [mul_smul], },
one_smul := λ f, by { ext, simp [one_smul], } }
instance distrib_mul_action [monoid R] [add_monoid M] [distrib_mul_action R M] :
distrib_mul_action (ulift R) M :=
{ smul_zero := λ c, by { cases c, simp [smul_zero], },
smul_add := λ c f g, by { cases c, simp [smul_add], },
..ulift.mul_action }
instance distrib_mul_action' [monoid R] [add_monoid M] [distrib_mul_action R M] :
distrib_mul_action R (ulift M) :=
{ smul_zero := λ c, by { ext, simp [smul_zero], },
smul_add := λ c f g, by { ext, simp [smul_add], },
..ulift.mul_action' }
instance mul_distrib_mul_action [monoid R] [monoid M] [mul_distrib_mul_action R M] :
mul_distrib_mul_action (ulift R) M :=
{ smul_one := λ c, by { cases c, simp [smul_one], },
smul_mul := λ c f g, by { cases c, simp [smul_mul'], },
..ulift.mul_action }
instance mul_distrib_mul_action' [monoid R] [monoid M] [mul_distrib_mul_action R M] :
mul_distrib_mul_action R (ulift M) :=
{ smul_one := λ c, by { ext, simp [smul_one], },
smul_mul := λ c f g, by { ext, simp [smul_mul'], },
..ulift.mul_action' }
instance module [semiring R] [add_comm_monoid M] [module R M] :
module (ulift R) M :=
{ add_smul := λ c f g, by { cases c, simp [add_smul], },
zero_smul := λ f, by { simp [zero_smul], },
..ulift.distrib_mul_action }
instance module' [semiring R] [add_comm_monoid M] [module R M] :
module R (ulift M) :=
{ add_smul := by { intros, ext1, apply add_smul },
zero_smul := by { intros, ext1, apply zero_smul } }
/--
The `R`-linear equivalence between `ulift M` and `M`.
-/
def module_equiv [semiring R] [add_comm_monoid M] [module R M] : ulift M ≃ₗ[R] M :=
{ to_fun := ulift.down,
inv_fun := ulift.up,
map_smul' := λ r x, rfl,
map_add' := λ x y, rfl,
left_inv := by tidy,
right_inv := by tidy, }
end ulift
|
[STATEMENT]
lemma list_all2_append':
"length us = length vs \<Longrightarrow> list_all2 P (xs @ us) (ys @ vs) \<longleftrightarrow> list_all2 P xs ys \<and> list_all2 P us vs"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. length us = length vs \<Longrightarrow> list_all2 P (xs @ us) (ys @ vs) = (list_all2 P xs ys \<and> list_all2 P us vs)
[PROOF STEP]
by(auto simp add: list_all2_append1 list_all2_append2 dest: list_all2_lengthD) |
/-
Copyright (c) 2021 Eric Wieser. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Eric Wieser
-/
import data.set.pointwise
import group_theory.submonoid.operations
/-! # Pointwise instances on `submonoid`s and `add_submonoid`s
This file provides:
* `submonoid.has_inv`
* `add_submonoid.has_neg`
and the actions
* `submonoid.pointwise_mul_action`
* `add_submonoid.pointwise_mul_action`
which matches the action of `mul_action_set`.
These are all available in the `pointwise` locale.
Additionally, it provides `add_submonoid.has_mul`, which is available globally to match
`submodule.has_mul`.
## Implementation notes
Most of the lemmas in this file are direct copies of lemmas from `algebra/pointwise.lean`.
While the statements of these lemmas are defeq, we repeat them here due to them not being
syntactically equal. Before adding new lemmas here, consider if they would also apply to the action
on `set`s.
-/
open set
variables {α : Type*} {G : Type*} {M : Type*} {R : Type*} {A : Type*}
variables [monoid M] [add_monoid A]
namespace submonoid
variables [group G]
open_locale pointwise
/-- The submonoid with every element inverted. -/
@[to_additive /-" The additive submonoid with every element negated. "-/]
protected def has_inv : has_inv (submonoid G):=
{ inv := λ S,
{ carrier := (S : set G)⁻¹,
one_mem' := show (1 : G)⁻¹ ∈ S, by { rw inv_one, exact S.one_mem },
mul_mem' := λ a b (ha : a⁻¹ ∈ S) (hb : b⁻¹ ∈ S), show (a * b)⁻¹ ∈ S,
by { rw mul_inv_rev, exact S.mul_mem hb ha } } }
localized "attribute [instance] submonoid.has_inv" in pointwise
open_locale pointwise
@[simp, to_additive] lemma coe_inv (S : submonoid G) : ↑(S⁻¹) = (S : set G)⁻¹ := rfl
@[simp, to_additive] lemma mem_inv {g : G} {S : submonoid G} : g ∈ S⁻¹ ↔ g⁻¹ ∈ S := iff.rfl
@[to_additive] instance : has_involutive_inv (submonoid G) :=
set_like.coe_injective.has_involutive_inv _ $ λ _, rfl
@[simp, to_additive] lemma inv_le_inv (S T : submonoid G) : S⁻¹ ≤ T⁻¹ ↔ S ≤ T :=
set_like.coe_subset_coe.symm.trans set.inv_subset_inv
@[to_additive] lemma inv_le (S T : submonoid G) : S⁻¹ ≤ T ↔ S ≤ T⁻¹ :=
set_like.coe_subset_coe.symm.trans set.inv_subset
/-- `submonoid.has_inv` as an order isomorphism. -/
@[to_additive /-" `add_submonoid.has_neg` as an order isomorphism "-/, simps]
def inv_order_iso : submonoid G ≃o submonoid G :=
{ to_equiv := equiv.inv _,
map_rel_iff' := inv_le_inv }
@[to_additive] lemma closure_inv (s : set G) : closure s⁻¹ = (closure s)⁻¹ :=
begin
apply le_antisymm,
{ rw [closure_le, coe_inv, ←set.inv_subset, inv_inv],
exact subset_closure },
{ rw [inv_le, closure_le, coe_inv, ←set.inv_subset],
exact subset_closure }
end
@[simp, to_additive]
lemma inv_inf (S T : submonoid G) : (S ⊓ T)⁻¹ = S⁻¹ ⊓ T⁻¹ :=
set_like.coe_injective set.inter_inv
@[simp, to_additive]
lemma inv_sup (S T : submonoid G) : (S ⊔ T)⁻¹ = S⁻¹ ⊔ T⁻¹ :=
(inv_order_iso : submonoid G ≃o submonoid G).map_sup S T
@[simp, to_additive]
lemma inv_bot : (⊥ : submonoid G)⁻¹ = ⊥ :=
set_like.coe_injective $ (set.inv_singleton 1).trans $ congr_arg _ inv_one
@[simp, to_additive]
lemma inv_top : (⊤ : submonoid G)⁻¹ = ⊤ :=
set_like.coe_injective $ set.inv_univ
@[simp, to_additive]
lemma inv_infi {ι : Sort*} (S : ι → submonoid G) : (⨅ i, S i)⁻¹ = ⨅ i, (S i)⁻¹ :=
(inv_order_iso : submonoid G ≃o submonoid G).map_infi _
@[simp, to_additive]
lemma inv_supr {ι : Sort*} (S : ι → submonoid G) : (⨆ i, S i)⁻¹ = ⨆ i, (S i)⁻¹ :=
(inv_order_iso : submonoid G ≃o submonoid G).map_supr _
end submonoid
namespace submonoid
section monoid
variables [monoid α] [mul_distrib_mul_action α M]
/-- The action on a submonoid corresponding to applying the action to every element.
This is available as an instance in the `pointwise` locale. -/
protected def pointwise_mul_action : mul_action α (submonoid M) :=
{ smul := λ a S, S.map (mul_distrib_mul_action.to_monoid_End _ _ a),
one_smul := λ S, (congr_arg (λ f, S.map f) (monoid_hom.map_one _)).trans S.map_id,
mul_smul := λ a₁ a₂ S,
(congr_arg (λ f, S.map f) (monoid_hom.map_mul _ _ _)).trans (S.map_map _ _).symm,}
localized "attribute [instance] submonoid.pointwise_mul_action" in pointwise
open_locale pointwise
@[simp] lemma coe_pointwise_smul (a : α) (S : submonoid M) : ↑(a • S) = a • (S : set M) := rfl
lemma smul_mem_pointwise_smul (m : M) (a : α) (S : submonoid M) : m ∈ S → a • m ∈ a • S :=
(set.smul_mem_smul_set : _ → _ ∈ a • (S : set M))
lemma mem_smul_pointwise_iff_exists (m : M) (a : α) (S : submonoid M) :
m ∈ a • S ↔ ∃ (s : M), s ∈ S ∧ a • s = m :=
(set.mem_smul_set : m ∈ a • (S : set M) ↔ _)
instance pointwise_central_scalar [mul_distrib_mul_action αᵐᵒᵖ M] [is_central_scalar α M] :
is_central_scalar α (submonoid M) :=
⟨λ a S, congr_arg (λ f, S.map f) $ monoid_hom.ext $ by exact op_smul_eq_smul _⟩
end monoid
section group
variables [group α] [mul_distrib_mul_action α M]
open_locale pointwise
@[simp] lemma smul_mem_pointwise_smul_iff {a : α} {S : submonoid M} {x : M} :
a • x ∈ a • S ↔ x ∈ S :=
smul_mem_smul_set_iff
lemma mem_pointwise_smul_iff_inv_smul_mem {a : α} {S : submonoid M} {x : M} :
x ∈ a • S ↔ a⁻¹ • x ∈ S :=
mem_smul_set_iff_inv_smul_mem
lemma mem_inv_pointwise_smul_iff {a : α} {S : submonoid M} {x : M} : x ∈ a⁻¹ • S ↔ a • x ∈ S :=
mem_inv_smul_set_iff
@[simp] lemma pointwise_smul_le_pointwise_smul_iff {a : α} {S T : submonoid M} :
a • S ≤ a • T ↔ S ≤ T :=
set_smul_subset_set_smul_iff
lemma pointwise_smul_subset_iff {a : α} {S T : submonoid M} : a • S ≤ T ↔ S ≤ a⁻¹ • T :=
set_smul_subset_iff
lemma subset_pointwise_smul_iff {a : α} {S T : submonoid M} : S ≤ a • T ↔ a⁻¹ • S ≤ T :=
subset_set_smul_iff
end group
section group_with_zero
variables [group_with_zero α] [mul_distrib_mul_action α M]
open_locale pointwise
@[simp] lemma smul_mem_pointwise_smul_iff₀ {a : α} (ha : a ≠ 0) (S : submonoid M)
(x : M) : a • x ∈ a • S ↔ x ∈ S :=
smul_mem_smul_set_iff₀ ha (S : set M) x
lemma mem_inv_pointwise_smul_iff₀ {a : α} (ha : a ≠ 0) (S : submonoid M) (x : M) :
x ∈ a⁻¹ • S ↔ a • x ∈ S :=
mem_inv_smul_set_iff₀ ha (S : set M) x
@[simp] lemma pointwise_smul_le_pointwise_smul_iff₀ {a : α} (ha : a ≠ 0) {S T : submonoid M} :
a • S ≤ a • T ↔ S ≤ T :=
set_smul_subset_set_smul_iff₀ ha
lemma pointwise_smul_le_iff₀ {a : α} (ha : a ≠ 0) {S T : submonoid M} : a • S ≤ T ↔ S ≤ a⁻¹ • T :=
set_smul_subset_iff₀ ha
lemma le_pointwise_smul_iff₀ {a : α} (ha : a ≠ 0) {S T : submonoid M} : S ≤ a • T ↔ a⁻¹ • S ≤ T :=
subset_set_smul_iff₀ ha
end group_with_zero
open_locale pointwise
@[to_additive]
lemma mem_closure_inv {G : Type*} [group G] (S : set G) (x : G) :
x ∈ submonoid.closure S⁻¹ ↔ x⁻¹ ∈ submonoid.closure S :=
by rw [closure_inv, mem_inv]
end submonoid
namespace add_submonoid
section monoid
variables [monoid α] [distrib_mul_action α A]
/-- The action on an additive submonoid corresponding to applying the action to every element.
This is available as an instance in the `pointwise` locale. -/
protected def pointwise_mul_action : mul_action α (add_submonoid A) :=
{ smul := λ a S, S.map (distrib_mul_action.to_add_monoid_End _ _ a),
one_smul := λ S, (congr_arg (λ f, S.map f) (monoid_hom.map_one _)).trans S.map_id,
mul_smul := λ a₁ a₂ S,
(congr_arg (λ f, S.map f) (monoid_hom.map_mul _ _ _)).trans (S.map_map _ _).symm,}
localized "attribute [instance] add_submonoid.pointwise_mul_action" in pointwise
open_locale pointwise
@[simp] lemma coe_pointwise_smul (a : α) (S : add_submonoid A) : ↑(a • S) = a • (S : set A) := rfl
lemma smul_mem_pointwise_smul (m : A) (a : α) (S : add_submonoid A) : m ∈ S → a • m ∈ a • S :=
(set.smul_mem_smul_set : _ → _ ∈ a • (S : set A))
instance pointwise_central_scalar [distrib_mul_action αᵐᵒᵖ A] [is_central_scalar α A] :
is_central_scalar α (add_submonoid A) :=
⟨λ a S, congr_arg (λ f, S.map f) $ add_monoid_hom.ext $ by exact op_smul_eq_smul _⟩
end monoid
section group
variables [group α] [distrib_mul_action α A]
open_locale pointwise
@[simp] lemma smul_mem_pointwise_smul_iff {a : α} {S : add_submonoid A} {x : A} :
a • x ∈ a • S ↔ x ∈ S :=
smul_mem_smul_set_iff
lemma mem_pointwise_smul_iff_inv_smul_mem {a : α} {S : add_submonoid A} {x : A} :
x ∈ a • S ↔ a⁻¹ • x ∈ S :=
mem_smul_set_iff_inv_smul_mem
lemma mem_smul_pointwise_iff_exists (m : A) (a : α) (S : add_submonoid A) :
m ∈ a • S ↔ ∃ (s : A), s ∈ S ∧ a • s = m :=
(set.mem_smul_set : m ∈ a • (S : set A) ↔ _)
lemma mem_inv_pointwise_smul_iff {a : α} {S : add_submonoid A} {x : A} : x ∈ a⁻¹ • S ↔ a • x ∈ S :=
mem_inv_smul_set_iff
@[simp] lemma pointwise_smul_le_pointwise_smul_iff {a : α} {S T : add_submonoid A} :
a • S ≤ a • T ↔ S ≤ T :=
set_smul_subset_set_smul_iff
lemma pointwise_smul_le_iff {a : α} {S T : add_submonoid A} : a • S ≤ T ↔ S ≤ a⁻¹ • T :=
set_smul_subset_iff
lemma le_pointwise_smul_iff {a : α} {S T : add_submonoid A} : S ≤ a • T ↔ a⁻¹ • S ≤ T :=
subset_set_smul_iff
end group
section group_with_zero
variables [group_with_zero α] [distrib_mul_action α A]
open_locale pointwise
@[simp] lemma smul_mem_pointwise_smul_iff₀ {a : α} (ha : a ≠ 0) (S : add_submonoid A)
(x : A) : a • x ∈ a • S ↔ x ∈ S :=
smul_mem_smul_set_iff₀ ha (S : set A) x
lemma mem_pointwise_smul_iff_inv_smul_mem₀ {a : α} (ha : a ≠ 0) (S : add_submonoid A) (x : A) :
x ∈ a • S ↔ a⁻¹ • x ∈ S :=
mem_smul_set_iff_inv_smul_mem₀ ha (S : set A) x
lemma mem_inv_pointwise_smul_iff₀ {a : α} (ha : a ≠ 0) (S : add_submonoid A) (x : A) :
x ∈ a⁻¹ • S ↔ a • x ∈ S :=
mem_inv_smul_set_iff₀ ha (S : set A) x
@[simp] lemma pointwise_smul_le_pointwise_smul_iff₀ {a : α} (ha : a ≠ 0) {S T : add_submonoid A} :
a • S ≤ a • T ↔ S ≤ T :=
set_smul_subset_set_smul_iff₀ ha
lemma pointwise_smul_le_iff₀ {a : α} (ha : a ≠ 0) {S T : add_submonoid A} :
a • S ≤ T ↔ S ≤ a⁻¹ • T :=
set_smul_subset_iff₀ ha
lemma le_pointwise_smul_iff₀ {a : α} (ha : a ≠ 0) {S T : add_submonoid A} :
S ≤ a • T ↔ a⁻¹ • S ≤ T :=
subset_set_smul_iff₀ ha
end group_with_zero
open_locale pointwise
end add_submonoid
/-! ### Elementwise multiplication of two additive submonoids
These definitions are a cut-down versions of the ones around `submodule.has_mul`, as that API is
usually more useful. -/
namespace add_submonoid
variables [non_unital_non_assoc_semiring R]
/-- Multiplication of additive submonoids of a semiring R. The additive submonoid `S * T` is the
smallest R-submodule of `R` containing the elements `s * t` for `s ∈ S` and `t ∈ T`. -/
instance : has_mul (add_submonoid R) :=
⟨λ M N, ⨆ s : M, N.map $ add_monoid_hom.mul s.1⟩
theorem mul_mem_mul {M N : add_submonoid R} {m n : R} (hm : m ∈ M) (hn : n ∈ N) : m * n ∈ M * N :=
(le_supr _ ⟨m, hm⟩ : _ ≤ M * N) ⟨n, hn, rfl⟩
theorem mul_le {M N P : add_submonoid R} : M * N ≤ P ↔ ∀ (m ∈ M) (n ∈ N), m * n ∈ P :=
⟨λ H m hm n hn, H $ mul_mem_mul hm hn,
λ H, supr_le $ λ ⟨m, hm⟩, map_le_iff_le_comap.2 $ λ n hn, H m hm n hn⟩
@[elab_as_eliminator] protected theorem mul_induction_on
{M N : add_submonoid R}
{C : R → Prop} {r : R} (hr : r ∈ M * N)
(hm : ∀ (m ∈ M) (n ∈ N), C (m * n))
(ha : ∀ x y, C x → C y → C (x + y)) : C r :=
(@mul_le _ _ _ _ ⟨C, ha, by simpa only [zero_mul] using hm _ (zero_mem _) _ (zero_mem _)⟩).2 hm hr
open_locale pointwise
variables R
-- this proof is copied directly from `submodule.span_mul_span`
theorem closure_mul_closure (S T : set R) : closure S * closure T = closure (S * T) :=
begin
apply le_antisymm,
{ rw mul_le, intros a ha b hb,
apply closure_induction ha,
work_on_goal 1 { intros, apply closure_induction hb,
work_on_goal 1 { intros, exact subset_closure ⟨_, _, ‹_›, ‹_›, rfl⟩ } },
all_goals { intros, simp only [mul_zero, zero_mul, zero_mem,
left_distrib, right_distrib, mul_smul_comm, smul_mul_assoc],
solve_by_elim [add_mem _ _, zero_mem _]
{ max_depth := 4, discharger := tactic.interactive.apply_instance } } },
{ rw closure_le, rintros _ ⟨a, b, ha, hb, rfl⟩,
exact mul_mem_mul (subset_closure ha) (subset_closure hb) }
end
variables {R}
@[simp] theorem mul_bot (S : add_submonoid R) : S * ⊥ = ⊥ :=
eq_bot_iff.2 $ mul_le.2 $ λ m hm n hn, by rw [add_submonoid.mem_bot] at hn ⊢; rw [hn, mul_zero]
@[simp] theorem bot_mul (S : add_submonoid R) : ⊥ * S = ⊥ :=
eq_bot_iff.2 $ mul_le.2 $ λ m hm n hn, by rw [add_submonoid.mem_bot] at hm ⊢; rw [hm, zero_mul]
@[mono] theorem mul_le_mul {M N P Q : add_submonoid R} (hmp : M ≤ P) (hnq : N ≤ Q) :
M * N ≤ P * Q :=
mul_le.2 $ λ m hm n hn, mul_mem_mul (hmp hm) (hnq hn)
theorem mul_le_mul_left {M N P : add_submonoid R} (h : M ≤ N) : M * P ≤ N * P :=
mul_le_mul h (le_refl P)
theorem mul_le_mul_right {M N P : add_submonoid R} (h : N ≤ P) : M * N ≤ M * P :=
mul_le_mul (le_refl M) h
lemma mul_subset_mul {M N : add_submonoid R} : (↑M : set R) * (↑N : set R) ⊆ (↑(M * N) : set R) :=
by { rintros _ ⟨i, j, hi, hj, rfl⟩, exact mul_mem_mul hi hj }
end add_submonoid
|
proposition bounded_closed_chain: fixes \<F> :: "'a::heine_borel set set" assumes "B \<in> \<F>" "bounded B" and \<F>: "\<And>S. S \<in> \<F> \<Longrightarrow> closed S" and "{} \<notin> \<F>" and chain: "\<And>S T. S \<in> \<F> \<and> T \<in> \<F> \<Longrightarrow> S \<subseteq> T \<or> T \<subseteq> S" shows "\<Inter>\<F> \<noteq> {}" |
import utilities
open list
open multiset
open set
open nat
set_option trace.simplify.rewrite true
variables {α : Type*} {κ : Type*}
variable r: κ → κ → Prop
variables (x: α) (k: κ ) (xs: list α)
variables (f: α → κ) (P: α → Prop)
/-
# Insertion Sort w.r.t. Keys and Stability
-/
def insort_key [decidable_rel r] [is_linear_order κ r] : list α → list α
| [] := [x]
| (y :: ys) := if r (f x) (f y) then x :: y :: ys else y :: insort_key ys
def isort_key [decidable_rel r] [is_linear_order κ r]: list α → list α
| [] := []
| (x :: xs) := insort_key r x f (isort_key xs)
/-
## Functional Correctness
-/
lemma mset_insort_key [decidable_rel r] [is_linear_order κ r]:
((insort_key r x f xs): multiset α) = {x} + ↑ xs :=
begin
induction' xs,
{ refl},
simp [insort_key],
split_ifs,
{ refl},
simp [← multiset.cons_coe, ih],
end
lemma mset_isort_key [decidable_rel r] [is_linear_order κ r]: (↑ (isort_key r f xs): multiset α) = ↑ xs :=
begin
induction' xs,
{ refl},
simp [mset_insort_key, isort_key, ih],
refl,
end
lemma set_insort_key [decidable_rel r] [is_linear_order κ r]: (insort_key r x f xs).to_set = {x} ∪ xs.to_set:=
begin
simp [← set_mset_mset, mset_insort_key, multiset.to_set],
refl
end
lemma set_isort_key [decidable_rel r] [is_linear_order κ r]: (isort_key r f xs).to_set = xs.to_set :=
begin
simp [← set_mset_mset, mset_isort_key],
end
lemma sorted_insort_key [decidable_rel r] [is_linear_order κ r]:
sorted' r ((insort_key r x f xs).map f) = sorted' r (xs.map f) :=
begin
induction' xs fixing *,
{ simp [insort_key, sorted'],
intros,
exact false.elim H},
simp [insort_key],
split_ifs,
{ simp [sorted', list.to_set],
intros h1 h2,
apply and.intro h,
intros k h3,
exact trans h (h1 k h3) },
simp [sorted', ih],
intros h1,
simp [← set_mset_mset, ← multiset.coe_map, mset_insort_key, multiset.to_set],
intros h2,
exact or.resolve_left (total_of r (f x) (f hd)) h
end
lemma sorted_isort_key [decidable_rel r] [is_linear_order κ r] :
sorted' r (map f (isort_key r f xs)) :=
begin
induction' xs,
repeat { simp [isort_key, sorted_insort_key, *] },
end
/-
## Stability
-/
lemma insort_is_Cons [decidable_rel r] [is_linear_order κ r]:
(∀ a ∈ xs.to_set, r (f x) (f a)) → insort_key r x f xs = (x:: xs):=
begin
cases xs,
repeat { simp [insort_key, list.to_set] },
intros h h1 h2,
cc,
end
lemma filter_insort_key_neg [decidable_rel r] [is_linear_order κ r] [decidable_pred P]:
¬ P x → (insort_key r x f xs).filter P = xs.filter P :=
begin
induction xs,
{ intro h,
simp [insort_key, *] },
simp [insort_key],
split_ifs,
{ intro h1,
simp * },
intro h1,
simp [list.filter, xs_ih h1],
end
lemma filter_insort_key_pos [decidable_rel r] [is_linear_order κ r] [decidable_pred P]:
sorted' r (xs.map f) ∧ P x → (insort_key r x f xs).filter P = insort_key r x f (xs.filter P) :=
begin
induction xs,
{ intro,
simp [insort_key, *] },
simp [sorted', list.filter, insort_key],
split_ifs,
{ intros,
simp [insort_key, *] },
{ have h5: (∀ a ∈ (list.filter P xs_tl).to_set, r (f x) (f a)) → insort_key r x f (filter P xs_tl) = (x:: (filter P xs_tl)), from insort_is_Cons r x (filter P xs_tl) f,
simp [ ← member_list_set] at h5 |-,
intros h2 h3 h4,
have h6: ∀ (a : α), a ∈ xs_tl → P a → r (f x) (f a), from begin
intros a h7 h8,
exact trans_of r h (h2 a h7),
end,
simp [*, h5 h6] },
{ intros,
simp [list.filter, *, insort_key] },
intros,
simp [list.filter, *],
end
/-
Lemma 2.9 from __Functional Algorithms Verified!__
-/
lemma sort_key_stable [decidable_rel r] [is_linear_order κ r] [decidable_pred (λ y, f y = k)]:
(isort_key r f xs).filter (λ y, f y = k) = xs.filter (λ y, f y = k):=
begin
induction xs,
repeat { simp [isort_key, list.filter] },
split_ifs,
{ simp [isort_key, *, filter_insort_key_pos, sorted_isort_key, ← member_list_set],
have h1: (∀ a ∈ (list.filter (λ (y : α), f y = k) xs_tl).to_set, r (f _) (f a)) → insort_key r _ f _ = (_:: _) , from insort_is_Cons r xs_hd (filter (λ (y : α), f y = k) xs_tl) f,
have h3: ∀ (a : α), a ∈ (list.filter (λ (y : α), f y = k) xs_tl).to_set → r (f xs_hd) (f a), from begin
intros,
simp [← member_list_set, *] at *,
exact refl_of r k,
end,
exact h1 h3 },
simp [isort_key, filter_insort_key_neg, *],
end
|
\name{default_smooth_fun}
\alias{default_smooth_fun}
\title{
Default Smoothing function
}
\description{
Default Smoothing function
}
\usage{
default_smooth_fun(x)
}
\arguments{
\item{x}{Input numeric vector.}
}
\details{
The smoothing function is applied to every row in the normalized matrix. For this default smoothing function,
\code{\link[locfit]{locfit}} is first tried on the vector. If there is error, \code{\link[stats]{loess}} smoothing is tried afterwards.
If both smoothing are failed, there will be an error.
}
\author{
Zuguang Gu <[email protected]>
}
\examples{
# There is no example
NULL
}
|
Formal statement is: lemma closed_Un_complement_component: fixes S :: "'a::real_normed_vector set" assumes S: "closed S" and c: " c \<in> components(-S)" shows "closed (S \<union> c)" Informal statement is: If $S$ is a closed set and $c$ is a component of the complement of $S$, then $S \cup c$ is closed. |
export TabularRandomPolicy
"""
TabularRandomPolicy(prob::Array{Float64, 2})
`prob` describes the distribution of actions for each state.
"""
struct TabularRandomPolicy <: AbstractPolicy
prob::Array{Float64,2}
end
(π::TabularRandomPolicy)(s) = sample(Weights(π.prob[s, :]))
(π::TabularRandomPolicy)(obs::Observation) = π(get_state(obs))
get_prob(π::TabularRandomPolicy, s) = @view π.prob[s, :]
get_prob(π::TabularRandomPolicy, s, a) = π.prob[s, a] |
import Aoc
import Data.List
import Data.List1
import Data.SortedMap as M
%default total
-- Return pairwise deltas between elements of a list.
deltas : List1 Integer -> List Integer
deltas (x:::xs) = zipWith (-) xs (x::xs)
-- Index into a map, returning 0 for a missing key.
(!.) : M.SortedMap k Integer -> k -> Integer
(!.) m k = maybe 0 id (M.lookup k m)
infixr 10 !.
-- Given the sorted "init" adapters and the "final" adapters,
-- count possible paths from 0 to final.
countPaths : List1 Integer -> Integer -> Integer
countPaths initAdapters final =
-- let's do some ~dynamic programming~ with foldr and a map
let m0 = M.singleton final 1
f = (\k, m => M.insert k (m!.(k+1) + m!.(k+2) + m!.(k+3)) m)
in foldr f m0 initAdapters !. 0
main : IO ()
main = do
ns <- readIntegerLines {a=Integer}
let sorted = sort ns
let initAdapters = 0 ::: sorted
let final = last initAdapters + 3
let adapters = 0 ::: (sorted ++ [final])
let ds = deltas adapters
putStr "* "; printLn $ count (==1) ds * count (==3) ds
putStr "** "; printLn $ countPaths initAdapters final
|
[STATEMENT]
lemma set_ofD: "(x, y) \<in> set_of P \<Longrightarrow> P x y"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. (x, y) \<in> set_of P \<Longrightarrow> P x y
[PROOF STEP]
unfolding set_of_def
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. (x, y) \<in> {(x, y). P x y} \<Longrightarrow> P x y
[PROOF STEP]
by simp |
[GOAL]
F : Type u_1
α : Type u_2
β : Type u_3
γ : Type u_4
inst✝¹ : NonUnitalNonAssocSemiring α
inst✝ : NonUnitalNonAssocSemiring β
f g : α →ₙ+* β
h : (fun f => f.toFun) f = (fun f => f.toFun) g
⊢ f = g
[PROOFSTEP]
cases f
[GOAL]
case mk
F : Type u_1
α : Type u_2
β : Type u_3
γ : Type u_4
inst✝¹ : NonUnitalNonAssocSemiring α
inst✝ : NonUnitalNonAssocSemiring β
g : α →ₙ+* β
toMulHom✝ : α →ₙ* β
map_zero'✝ : MulHom.toFun toMulHom✝ 0 = 0
map_add'✝ : ∀ (x y : α), MulHom.toFun toMulHom✝ (x + y) = MulHom.toFun toMulHom✝ x + MulHom.toFun toMulHom✝ y
h : (fun f => f.toFun) { toMulHom := toMulHom✝, map_zero' := map_zero'✝, map_add' := map_add'✝ } = (fun f => f.toFun) g
⊢ { toMulHom := toMulHom✝, map_zero' := map_zero'✝, map_add' := map_add'✝ } = g
[PROOFSTEP]
cases g
[GOAL]
case mk.mk
F : Type u_1
α : Type u_2
β : Type u_3
γ : Type u_4
inst✝¹ : NonUnitalNonAssocSemiring α
inst✝ : NonUnitalNonAssocSemiring β
toMulHom✝¹ : α →ₙ* β
map_zero'✝¹ : MulHom.toFun toMulHom✝¹ 0 = 0
map_add'✝¹ : ∀ (x y : α), MulHom.toFun toMulHom✝¹ (x + y) = MulHom.toFun toMulHom✝¹ x + MulHom.toFun toMulHom✝¹ y
toMulHom✝ : α →ₙ* β
map_zero'✝ : MulHom.toFun toMulHom✝ 0 = 0
map_add'✝ : ∀ (x y : α), MulHom.toFun toMulHom✝ (x + y) = MulHom.toFun toMulHom✝ x + MulHom.toFun toMulHom✝ y
h :
(fun f => f.toFun) { toMulHom := toMulHom✝¹, map_zero' := map_zero'✝¹, map_add' := map_add'✝¹ } =
(fun f => f.toFun) { toMulHom := toMulHom✝, map_zero' := map_zero'✝, map_add' := map_add'✝ }
⊢ { toMulHom := toMulHom✝¹, map_zero' := map_zero'✝¹, map_add' := map_add'✝¹ } =
{ toMulHom := toMulHom✝, map_zero' := map_zero'✝, map_add' := map_add'✝ }
[PROOFSTEP]
congr
[GOAL]
case mk.mk.e_toMulHom
F : Type u_1
α : Type u_2
β : Type u_3
γ : Type u_4
inst✝¹ : NonUnitalNonAssocSemiring α
inst✝ : NonUnitalNonAssocSemiring β
toMulHom✝¹ : α →ₙ* β
map_zero'✝¹ : MulHom.toFun toMulHom✝¹ 0 = 0
map_add'✝¹ : ∀ (x y : α), MulHom.toFun toMulHom✝¹ (x + y) = MulHom.toFun toMulHom✝¹ x + MulHom.toFun toMulHom✝¹ y
toMulHom✝ : α →ₙ* β
map_zero'✝ : MulHom.toFun toMulHom✝ 0 = 0
map_add'✝ : ∀ (x y : α), MulHom.toFun toMulHom✝ (x + y) = MulHom.toFun toMulHom✝ x + MulHom.toFun toMulHom✝ y
h :
(fun f => f.toFun) { toMulHom := toMulHom✝¹, map_zero' := map_zero'✝¹, map_add' := map_add'✝¹ } =
(fun f => f.toFun) { toMulHom := toMulHom✝, map_zero' := map_zero'✝, map_add' := map_add'✝ }
⊢ toMulHom✝¹ = toMulHom✝
[PROOFSTEP]
apply FunLike.coe_injective'
[GOAL]
case mk.mk.e_toMulHom.a
F : Type u_1
α : Type u_2
β : Type u_3
γ : Type u_4
inst✝¹ : NonUnitalNonAssocSemiring α
inst✝ : NonUnitalNonAssocSemiring β
toMulHom✝¹ : α →ₙ* β
map_zero'✝¹ : MulHom.toFun toMulHom✝¹ 0 = 0
map_add'✝¹ : ∀ (x y : α), MulHom.toFun toMulHom✝¹ (x + y) = MulHom.toFun toMulHom✝¹ x + MulHom.toFun toMulHom✝¹ y
toMulHom✝ : α →ₙ* β
map_zero'✝ : MulHom.toFun toMulHom✝ 0 = 0
map_add'✝ : ∀ (x y : α), MulHom.toFun toMulHom✝ (x + y) = MulHom.toFun toMulHom✝ x + MulHom.toFun toMulHom✝ y
h :
(fun f => f.toFun) { toMulHom := toMulHom✝¹, map_zero' := map_zero'✝¹, map_add' := map_add'✝¹ } =
(fun f => f.toFun) { toMulHom := toMulHom✝, map_zero' := map_zero'✝, map_add' := map_add'✝ }
⊢ ↑toMulHom✝¹ = ↑toMulHom✝
[PROOFSTEP]
exact h
[GOAL]
F : Type u_1
α✝ : Type u_2
β : Type u_3
γ : Type u_4
inst✝² : NonUnitalNonAssocSemiring α✝
inst✝¹ : NonUnitalNonAssocSemiring β
α : Type u_5
inst✝ : NonUnitalNonAssocSemiring α
⊢ α →ₙ+* α
[PROOFSTEP]
refine' { toFun := id .. }
[GOAL]
case refine'_1
F : Type u_1
α✝ : Type u_2
β : Type u_3
γ : Type u_4
inst✝² : NonUnitalNonAssocSemiring α✝
inst✝¹ : NonUnitalNonAssocSemiring β
α : Type u_5
inst✝ : NonUnitalNonAssocSemiring α
⊢ ∀ (x y : α), id (x * y) = id x * id y
[PROOFSTEP]
intros
[GOAL]
case refine'_2
F : Type u_1
α✝ : Type u_2
β : Type u_3
γ : Type u_4
inst✝² : NonUnitalNonAssocSemiring α✝
inst✝¹ : NonUnitalNonAssocSemiring β
α : Type u_5
inst✝ : NonUnitalNonAssocSemiring α
⊢ MulHom.toFun { toFun := id, map_mul' := (_ : ∀ (x y : α), id (x * y) = id x * id y) } 0 = 0
[PROOFSTEP]
intros
[GOAL]
case refine'_3
F : Type u_1
α✝ : Type u_2
β : Type u_3
γ : Type u_4
inst✝² : NonUnitalNonAssocSemiring α✝
inst✝¹ : NonUnitalNonAssocSemiring β
α : Type u_5
inst✝ : NonUnitalNonAssocSemiring α
⊢ ∀ (x y : α),
MulHom.toFun { toFun := id, map_mul' := (_ : ∀ (x y : α), id (x * y) = id x * id y) } (x + y) =
MulHom.toFun { toFun := id, map_mul' := (_ : ∀ (x y : α), id (x * y) = id x * id y) } x +
MulHom.toFun { toFun := id, map_mul' := (_ : ∀ (x y : α), id (x * y) = id x * id y) } y
[PROOFSTEP]
intros
[GOAL]
case refine'_1
F : Type u_1
α✝ : Type u_2
β : Type u_3
γ : Type u_4
inst✝² : NonUnitalNonAssocSemiring α✝
inst✝¹ : NonUnitalNonAssocSemiring β
α : Type u_5
inst✝ : NonUnitalNonAssocSemiring α
x✝ y✝ : α
⊢ id (x✝ * y✝) = id x✝ * id y✝
[PROOFSTEP]
rfl
[GOAL]
case refine'_2
F : Type u_1
α✝ : Type u_2
β : Type u_3
γ : Type u_4
inst✝² : NonUnitalNonAssocSemiring α✝
inst✝¹ : NonUnitalNonAssocSemiring β
α : Type u_5
inst✝ : NonUnitalNonAssocSemiring α
⊢ MulHom.toFun { toFun := id, map_mul' := (_ : ∀ (x y : α), id (x * y) = id (x * y)) } 0 = 0
[PROOFSTEP]
rfl
[GOAL]
case refine'_3
F : Type u_1
α✝ : Type u_2
β : Type u_3
γ : Type u_4
inst✝² : NonUnitalNonAssocSemiring α✝
inst✝¹ : NonUnitalNonAssocSemiring β
α : Type u_5
inst✝ : NonUnitalNonAssocSemiring α
x✝ y✝ : α
⊢ MulHom.toFun { toFun := id, map_mul' := (_ : ∀ (x y : α), id (x * y) = id (x * y)) } (x✝ + y✝) =
MulHom.toFun { toFun := id, map_mul' := (_ : ∀ (x y : α), id (x * y) = id (x * y)) } x✝ +
MulHom.toFun { toFun := id, map_mul' := (_ : ∀ (x y : α), id (x * y) = id (x * y)) } y✝
[PROOFSTEP]
rfl
[GOAL]
F : Type u_1
α : Type u_2
β : Type u_3
γ : Type u_4
inst✝² : NonUnitalNonAssocSemiring α
inst✝¹ : NonUnitalNonAssocSemiring β
inst✝ : NonUnitalNonAssocSemiring γ
g✝ : β →ₙ+* γ
f : α →ₙ+* β
g : β →ₙ+* γ
⊢ comp g 0 = 0
[PROOFSTEP]
ext
[GOAL]
case a
F : Type u_1
α : Type u_2
β : Type u_3
γ : Type u_4
inst✝² : NonUnitalNonAssocSemiring α
inst✝¹ : NonUnitalNonAssocSemiring β
inst✝ : NonUnitalNonAssocSemiring γ
g✝ : β →ₙ+* γ
f : α →ₙ+* β
g : β →ₙ+* γ
x✝ : α
⊢ ↑(comp g 0) x✝ = ↑0 x✝
[PROOFSTEP]
simp
[GOAL]
F : Type u_1
α : Type u_2
β : Type u_3
γ : Type u_4
inst✝² : NonUnitalNonAssocSemiring α
inst✝¹ : NonUnitalNonAssocSemiring β
inst✝ : NonUnitalNonAssocSemiring γ
g : β →ₙ+* γ
f✝ f : α →ₙ+* β
⊢ comp 0 f = 0
[PROOFSTEP]
ext
[GOAL]
case a
F : Type u_1
α : Type u_2
β : Type u_3
γ : Type u_4
inst✝² : NonUnitalNonAssocSemiring α
inst✝¹ : NonUnitalNonAssocSemiring β
inst✝ : NonUnitalNonAssocSemiring γ
g : β →ₙ+* γ
f✝ f : α →ₙ+* β
x✝ : α
⊢ ↑(comp 0 f) x✝ = ↑0 x✝
[PROOFSTEP]
rfl
[GOAL]
F : Type u_1
α : Type u_2
β : Type u_3
γ : Type u_4
inst✝² : NonUnitalNonAssocSemiring α
inst✝¹ : NonUnitalNonAssocSemiring β
inst✝ : NonUnitalNonAssocSemiring γ
g✝ : β →ₙ+* γ
f : α →ₙ+* β
g : β →ₙ+* γ
f₁ f₂ : α →ₙ+* β
hg : Injective ↑g
h : comp g f₁ = comp g f₂
x : α
⊢ ↑g (↑f₁ x) = ↑g (↑f₂ x)
[PROOFSTEP]
rw [← comp_apply, h, comp_apply]
[GOAL]
F : Type u_1
α : Type u_2
β : Type u_3
γ : Type u_4
inst✝² : NonAssocSemiring α
inst✝¹ : NonAssocSemiring β
inst✝ : RingHomClass F α β
f : F
a : α
⊢ ↑f (bit1 a) = bit1 (↑f a)
[PROOFSTEP]
simp [bit1]
[GOAL]
F : Type u_1
α : Type u_2
β : Type u_3
γ : Type u_4
x✝¹ : NonAssocSemiring α
x✝ : NonAssocSemiring β
f g : α →+* β
h : (fun f => f.toFun) f = (fun f => f.toFun) g
⊢ f = g
[PROOFSTEP]
cases f
[GOAL]
case mk
F : Type u_1
α : Type u_2
β : Type u_3
γ : Type u_4
x✝¹ : NonAssocSemiring α
x✝ : NonAssocSemiring β
g : α →+* β
toMonoidHom✝ : α →* β
map_zero'✝ : OneHom.toFun (↑toMonoidHom✝) 0 = 0
map_add'✝ :
∀ (x y : α), OneHom.toFun (↑toMonoidHom✝) (x + y) = OneHom.toFun (↑toMonoidHom✝) x + OneHom.toFun (↑toMonoidHom✝) y
h :
(fun f => f.toFun) { toMonoidHom := toMonoidHom✝, map_zero' := map_zero'✝, map_add' := map_add'✝ } =
(fun f => f.toFun) g
⊢ { toMonoidHom := toMonoidHom✝, map_zero' := map_zero'✝, map_add' := map_add'✝ } = g
[PROOFSTEP]
cases g
[GOAL]
case mk.mk
F : Type u_1
α : Type u_2
β : Type u_3
γ : Type u_4
x✝¹ : NonAssocSemiring α
x✝ : NonAssocSemiring β
toMonoidHom✝¹ : α →* β
map_zero'✝¹ : OneHom.toFun (↑toMonoidHom✝¹) 0 = 0
map_add'✝¹ :
∀ (x y : α), OneHom.toFun (↑toMonoidHom✝¹) (x + y) = OneHom.toFun (↑toMonoidHom✝¹) x + OneHom.toFun (↑toMonoidHom✝¹) y
toMonoidHom✝ : α →* β
map_zero'✝ : OneHom.toFun (↑toMonoidHom✝) 0 = 0
map_add'✝ :
∀ (x y : α), OneHom.toFun (↑toMonoidHom✝) (x + y) = OneHom.toFun (↑toMonoidHom✝) x + OneHom.toFun (↑toMonoidHom✝) y
h :
(fun f => f.toFun) { toMonoidHom := toMonoidHom✝¹, map_zero' := map_zero'✝¹, map_add' := map_add'✝¹ } =
(fun f => f.toFun) { toMonoidHom := toMonoidHom✝, map_zero' := map_zero'✝, map_add' := map_add'✝ }
⊢ { toMonoidHom := toMonoidHom✝¹, map_zero' := map_zero'✝¹, map_add' := map_add'✝¹ } =
{ toMonoidHom := toMonoidHom✝, map_zero' := map_zero'✝, map_add' := map_add'✝ }
[PROOFSTEP]
congr
[GOAL]
case mk.mk.e_toMonoidHom
F : Type u_1
α : Type u_2
β : Type u_3
γ : Type u_4
x✝¹ : NonAssocSemiring α
x✝ : NonAssocSemiring β
toMonoidHom✝¹ : α →* β
map_zero'✝¹ : OneHom.toFun (↑toMonoidHom✝¹) 0 = 0
map_add'✝¹ :
∀ (x y : α), OneHom.toFun (↑toMonoidHom✝¹) (x + y) = OneHom.toFun (↑toMonoidHom✝¹) x + OneHom.toFun (↑toMonoidHom✝¹) y
toMonoidHom✝ : α →* β
map_zero'✝ : OneHom.toFun (↑toMonoidHom✝) 0 = 0
map_add'✝ :
∀ (x y : α), OneHom.toFun (↑toMonoidHom✝) (x + y) = OneHom.toFun (↑toMonoidHom✝) x + OneHom.toFun (↑toMonoidHom✝) y
h :
(fun f => f.toFun) { toMonoidHom := toMonoidHom✝¹, map_zero' := map_zero'✝¹, map_add' := map_add'✝¹ } =
(fun f => f.toFun) { toMonoidHom := toMonoidHom✝, map_zero' := map_zero'✝, map_add' := map_add'✝ }
⊢ toMonoidHom✝¹ = toMonoidHom✝
[PROOFSTEP]
apply FunLike.coe_injective'
[GOAL]
case mk.mk.e_toMonoidHom.a
F : Type u_1
α : Type u_2
β : Type u_3
γ : Type u_4
x✝¹ : NonAssocSemiring α
x✝ : NonAssocSemiring β
toMonoidHom✝¹ : α →* β
map_zero'✝¹ : OneHom.toFun (↑toMonoidHom✝¹) 0 = 0
map_add'✝¹ :
∀ (x y : α), OneHom.toFun (↑toMonoidHom✝¹) (x + y) = OneHom.toFun (↑toMonoidHom✝¹) x + OneHom.toFun (↑toMonoidHom✝¹) y
toMonoidHom✝ : α →* β
map_zero'✝ : OneHom.toFun (↑toMonoidHom✝) 0 = 0
map_add'✝ :
∀ (x y : α), OneHom.toFun (↑toMonoidHom✝) (x + y) = OneHom.toFun (↑toMonoidHom✝) x + OneHom.toFun (↑toMonoidHom✝) y
h :
(fun f => f.toFun) { toMonoidHom := toMonoidHom✝¹, map_zero' := map_zero'✝¹, map_add' := map_add'✝¹ } =
(fun f => f.toFun) { toMonoidHom := toMonoidHom✝, map_zero' := map_zero'✝, map_add' := map_add'✝ }
⊢ ↑toMonoidHom✝¹ = ↑toMonoidHom✝
[PROOFSTEP]
exact h
[GOAL]
F : Type u_1
α : Type u_2
β : Type u_3
γ : Type u_4
x✝¹ : NonAssocSemiring α
x✝ : NonAssocSemiring β
f : α →+* β
⊢ ↑(toMonoidWithZeroHom f) = ↑f
[PROOFSTEP]
rfl
[GOAL]
F✝ : Type u_1
α : Type u_2
β : Type u_3
γ : Type u_4
x✝¹ : NonAssocSemiring α
x✝ : NonAssocSemiring β
f✝ : α →+* β
x y : α
F : Type u_5
inst✝¹ : RingHomClass F α β
f : F
p : Prop
inst✝ : Decidable p
⊢ ↑f (if p then 0 else 1) = if p then 0 else 1
[PROOFSTEP]
split_ifs with h
[GOAL]
case pos
F✝ : Type u_1
α : Type u_2
β : Type u_3
γ : Type u_4
x✝¹ : NonAssocSemiring α
x✝ : NonAssocSemiring β
f✝ : α →+* β
x y : α
F : Type u_5
inst✝¹ : RingHomClass F α β
f : F
p : Prop
inst✝ : Decidable p
h : p
⊢ ↑f (if p then 0 else 1) = 0
[PROOFSTEP]
simp [h]
[GOAL]
case neg
F✝ : Type u_1
α : Type u_2
β : Type u_3
γ : Type u_4
x✝¹ : NonAssocSemiring α
x✝ : NonAssocSemiring β
f✝ : α →+* β
x y : α
F : Type u_5
inst✝¹ : RingHomClass F α β
f : F
p : Prop
inst✝ : Decidable p
h : ¬p
⊢ ↑f (if p then 0 else 1) = 1
[PROOFSTEP]
simp [h]
[GOAL]
F✝ : Type u_1
α : Type u_2
β : Type u_3
γ : Type u_4
x✝¹ : NonAssocSemiring α
x✝ : NonAssocSemiring β
f✝ : α →+* β
x y : α
F : Type u_5
inst✝¹ : RingHomClass F α β
f : F
p : Prop
inst✝ : Decidable p
⊢ ↑f (if p then 1 else 0) = if p then 1 else 0
[PROOFSTEP]
split_ifs with h
[GOAL]
case pos
F✝ : Type u_1
α : Type u_2
β : Type u_3
γ : Type u_4
x✝¹ : NonAssocSemiring α
x✝ : NonAssocSemiring β
f✝ : α →+* β
x y : α
F : Type u_5
inst✝¹ : RingHomClass F α β
f : F
p : Prop
inst✝ : Decidable p
h : p
⊢ ↑f (if p then 1 else 0) = 1
[PROOFSTEP]
simp [h]
[GOAL]
case neg
F✝ : Type u_1
α : Type u_2
β : Type u_3
γ : Type u_4
x✝¹ : NonAssocSemiring α
x✝ : NonAssocSemiring β
f✝ : α →+* β
x y : α
F : Type u_5
inst✝¹ : RingHomClass F α β
f : F
p : Prop
inst✝ : Decidable p
h : ¬p
⊢ ↑f (if p then 1 else 0) = 0
[PROOFSTEP]
simp [h]
[GOAL]
F : Type u_1
α : Type u_2
β : Type u_3
γ : Type u_4
x✝¹ : NonAssocSemiring α
x✝ : NonAssocSemiring β
f : α →+* β
x y : α
⊢ 0 = 1 ↔ ↑f 1 = 0
[PROOFSTEP]
rw [map_one, eq_comm]
[GOAL]
F : Type u_1
α : Type u_2
β : Type u_3
γ : Type u_4
x✝² : NonAssocSemiring α
x✝¹ : NonAssocSemiring β
f : α →+* β
x✝ y : α
h : ↑f 1 = 0
x : α
⊢ ↑f x = 0
[PROOFSTEP]
rw [← mul_one x, map_mul, h, mul_zero]
[GOAL]
F : Type u_1
α : Type u_2
β : Type u_3
γ : Type u_4
x✝³ : NonAssocSemiring α
x✝² : NonAssocSemiring β
f : α →+* β
x✝¹ y✝ : α
h : ∀ (x : α), ↑f x = 0
y : β
x✝ : y ∈ Set.range ↑f
x : α
hx : ↑f x = y
⊢ y ∈ {0}
[PROOFSTEP]
simp [← hx, h x]
[GOAL]
F : Type u_1
α : Type u_2
β : Type u_3
γ : Type u_4
x✝¹ : NonAssocSemiring α
x✝ : NonAssocSemiring β
f : α →+* β
x y✝ : α
h : ∀ (x : α), ↑f x = 0
y : β
hy : y ∈ {0}
⊢ ↑f 0 = y
[PROOFSTEP]
simpa using hy.symm
[GOAL]
F : Type u_1
α : Type u_2
β : Type u_3
γ : Type u_4
x✝¹ : NonAssocSemiring α
x✝ : NonAssocSemiring β
f : α →+* β
x y : α
inst✝ : Nontrivial β
h : 1 = 0
⊢ ↑f 1 = 0
[PROOFSTEP]
rw [h, map_zero]
[GOAL]
F : Type u_1
α✝ : Type u_2
β : Type u_3
γ : Type u_4
x✝¹ : NonAssocSemiring α✝
x✝ : NonAssocSemiring β
α : Type u_5
inst✝ : NonAssocSemiring α
⊢ α →+* α
[PROOFSTEP]
refine' { toFun := _root_.id .. }
[GOAL]
case refine'_1
F : Type u_1
α✝ : Type u_2
β : Type u_3
γ : Type u_4
x✝¹ : NonAssocSemiring α✝
x✝ : NonAssocSemiring β
α : Type u_5
inst✝ : NonAssocSemiring α
⊢ _root_.id 1 = 1
[PROOFSTEP]
intros
[GOAL]
case refine'_2
F : Type u_1
α✝ : Type u_2
β : Type u_3
γ : Type u_4
x✝¹ : NonAssocSemiring α✝
x✝ : NonAssocSemiring β
α : Type u_5
inst✝ : NonAssocSemiring α
⊢ ∀ (x y : α),
OneHom.toFun { toFun := _root_.id, map_one' := ?refine'_1 } (x * y) =
OneHom.toFun { toFun := _root_.id, map_one' := ?refine'_1 } x *
OneHom.toFun { toFun := _root_.id, map_one' := ?refine'_1 } y
[PROOFSTEP]
intros
[GOAL]
case refine'_3
F : Type u_1
α✝ : Type u_2
β : Type u_3
γ : Type u_4
x✝¹ : NonAssocSemiring α✝
x✝ : NonAssocSemiring β
α : Type u_5
inst✝ : NonAssocSemiring α
⊢ OneHom.toFun
(↑{ toOneHom := { toFun := _root_.id, map_one' := ?refine'_1 },
map_mul' :=
(_ :
∀ (x y : α),
OneHom.toFun { toFun := _root_.id, map_one' := ?refine'_1 } (x * y) =
OneHom.toFun { toFun := _root_.id, map_one' := ?refine'_1 } x *
OneHom.toFun { toFun := _root_.id, map_one' := ?refine'_1 } y) })
0 =
0
[PROOFSTEP]
intros
[GOAL]
case refine'_4
F : Type u_1
α✝ : Type u_2
β : Type u_3
γ : Type u_4
x✝¹ : NonAssocSemiring α✝
x✝ : NonAssocSemiring β
α : Type u_5
inst✝ : NonAssocSemiring α
⊢ ∀ (x y : α),
OneHom.toFun
(↑{ toOneHom := { toFun := _root_.id, map_one' := ?refine'_1 },
map_mul' :=
(_ :
∀ (x y : α),
OneHom.toFun { toFun := _root_.id, map_one' := ?refine'_1 } (x * y) =
OneHom.toFun { toFun := _root_.id, map_one' := ?refine'_1 } x *
OneHom.toFun { toFun := _root_.id, map_one' := ?refine'_1 } y) })
(x + y) =
OneHom.toFun
(↑{ toOneHom := { toFun := _root_.id, map_one' := ?refine'_1 },
map_mul' :=
(_ :
∀ (x y : α),
OneHom.toFun { toFun := _root_.id, map_one' := ?refine'_1 } (x * y) =
OneHom.toFun { toFun := _root_.id, map_one' := ?refine'_1 } x *
OneHom.toFun { toFun := _root_.id, map_one' := ?refine'_1 } y) })
x +
OneHom.toFun
(↑{ toOneHom := { toFun := _root_.id, map_one' := ?refine'_1 },
map_mul' :=
(_ :
∀ (x y : α),
OneHom.toFun { toFun := _root_.id, map_one' := ?refine'_1 } (x * y) =
OneHom.toFun { toFun := _root_.id, map_one' := ?refine'_1 } x *
OneHom.toFun { toFun := _root_.id, map_one' := ?refine'_1 } y) })
y
[PROOFSTEP]
intros
[GOAL]
case refine'_1
F : Type u_1
α✝ : Type u_2
β : Type u_3
γ : Type u_4
x✝¹ : NonAssocSemiring α✝
x✝ : NonAssocSemiring β
α : Type u_5
inst✝ : NonAssocSemiring α
⊢ _root_.id 1 = 1
[PROOFSTEP]
rfl
[GOAL]
case refine'_2
F : Type u_1
α✝ : Type u_2
β : Type u_3
γ : Type u_4
x✝² : NonAssocSemiring α✝
x✝¹ : NonAssocSemiring β
α : Type u_5
inst✝ : NonAssocSemiring α
x✝ y✝ : α
⊢ OneHom.toFun { toFun := _root_.id, map_one' := (_ : _root_.id 1 = _root_.id 1) } (x✝ * y✝) =
OneHom.toFun { toFun := _root_.id, map_one' := (_ : _root_.id 1 = _root_.id 1) } x✝ *
OneHom.toFun { toFun := _root_.id, map_one' := (_ : _root_.id 1 = _root_.id 1) } y✝
[PROOFSTEP]
rfl
[GOAL]
case refine'_3
F : Type u_1
α✝ : Type u_2
β : Type u_3
γ : Type u_4
x✝¹ : NonAssocSemiring α✝
x✝ : NonAssocSemiring β
α : Type u_5
inst✝ : NonAssocSemiring α
⊢ OneHom.toFun
(↑{ toOneHom := { toFun := _root_.id, map_one' := (_ : _root_.id 1 = _root_.id 1) },
map_mul' :=
(_ :
∀ (x y : α),
OneHom.toFun { toFun := _root_.id, map_one' := (_ : _root_.id 1 = _root_.id 1) } (x * y) =
OneHom.toFun { toFun := _root_.id, map_one' := (_ : _root_.id 1 = _root_.id 1) } (x * y)) })
0 =
0
[PROOFSTEP]
rfl
[GOAL]
case refine'_4
F : Type u_1
α✝ : Type u_2
β : Type u_3
γ : Type u_4
x✝² : NonAssocSemiring α✝
x✝¹ : NonAssocSemiring β
α : Type u_5
inst✝ : NonAssocSemiring α
x✝ y✝ : α
⊢ OneHom.toFun
(↑{ toOneHom := { toFun := _root_.id, map_one' := (_ : _root_.id 1 = _root_.id 1) },
map_mul' :=
(_ :
∀ (x y : α),
OneHom.toFun { toFun := _root_.id, map_one' := (_ : _root_.id 1 = _root_.id 1) } (x * y) =
OneHom.toFun { toFun := _root_.id, map_one' := (_ : _root_.id 1 = _root_.id 1) } (x * y)) })
(x✝ + y✝) =
OneHom.toFun
(↑{ toOneHom := { toFun := _root_.id, map_one' := (_ : _root_.id 1 = _root_.id 1) },
map_mul' :=
(_ :
∀ (x y : α),
OneHom.toFun { toFun := _root_.id, map_one' := (_ : _root_.id 1 = _root_.id 1) } (x * y) =
OneHom.toFun { toFun := _root_.id, map_one' := (_ : _root_.id 1 = _root_.id 1) } (x * y)) })
x✝ +
OneHom.toFun
(↑{ toOneHom := { toFun := _root_.id, map_one' := (_ : _root_.id 1 = _root_.id 1) },
map_mul' :=
(_ :
∀ (x y : α),
OneHom.toFun { toFun := _root_.id, map_one' := (_ : _root_.id 1 = _root_.id 1) } (x * y) =
OneHom.toFun { toFun := _root_.id, map_one' := (_ : _root_.id 1 = _root_.id 1) } (x * y)) })
y✝
[PROOFSTEP]
rfl
[GOAL]
F : Type u_1
α : Type u_2
β : Type u_3
γ : Type u_4
x✝² : NonAssocSemiring α
x✝¹ : NonAssocSemiring β
x✝ : NonAssocSemiring γ
g : β →+* γ
f : α →+* β
src✝ : α →ₙ+* γ := NonUnitalRingHom.comp (toNonUnitalRingHom g) (toNonUnitalRingHom f)
⊢ (↑g ∘ ↑f) 1 = 1
[PROOFSTEP]
simp
[GOAL]
F : Type u_1
α : Type u_2
β : Type u_3
γ : Type u_4
x✝² : NonAssocSemiring α
x✝¹ : NonAssocSemiring β
x✝ : NonAssocSemiring γ
g : β →+* γ
f₁ f₂ : α →+* β
hg : Injective ↑g
h : comp g f₁ = comp g f₂
x : α
⊢ ↑g (↑f₁ x) = ↑g (↑f₂ x)
[PROOFSTEP]
rw [← comp_apply, h, comp_apply]
[GOAL]
F : Type u_1
α : Type u_2
β : Type u_3
γ : Type u_4
inst✝² : Ring α
inst✝¹ : IsDomain α
inst✝ : Ring β
f : β →+* α
hf : Injective ↑f
⊢ IsDomain β
[PROOFSTEP]
haveI := pullback_nonzero f f.map_zero f.map_one
[GOAL]
F : Type u_1
α : Type u_2
β : Type u_3
γ : Type u_4
inst✝² : Ring α
inst✝¹ : IsDomain α
inst✝ : Ring β
f : β →+* α
hf : Injective ↑f
this : Nontrivial β
⊢ IsDomain β
[PROOFSTEP]
haveI := IsRightCancelMulZero.to_noZeroDivisors α
[GOAL]
F : Type u_1
α : Type u_2
β : Type u_3
γ : Type u_4
inst✝² : Ring α
inst✝¹ : IsDomain α
inst✝ : Ring β
f : β →+* α
hf : Injective ↑f
this✝ : Nontrivial β
this : NoZeroDivisors α
⊢ IsDomain β
[PROOFSTEP]
haveI := hf.noZeroDivisors f f.map_zero f.map_mul
[GOAL]
F : Type u_1
α : Type u_2
β : Type u_3
γ : Type u_4
inst✝² : Ring α
inst✝¹ : IsDomain α
inst✝ : Ring β
f : β →+* α
hf : Injective ↑f
this✝¹ : Nontrivial β
this✝ : NoZeroDivisors α
this : NoZeroDivisors β
⊢ IsDomain β
[PROOFSTEP]
exact NoZeroDivisors.to_isDomain β
[GOAL]
F : Type u_1
α : Type u_2
β : Type u_3
γ : Type u_4
inst✝² : CommRing α
inst✝¹ : IsDomain α
inst✝ : CommRing β
f : β →+ α
h : ∀ (x : β), ↑f (x * x) = ↑f x * ↑f x
h_two : 2 ≠ 0
h_one : ↑f 1 = 1
x y : β
⊢ OneHom.toFun { toFun := f.toFun, map_one' := h_one } (x * y) =
OneHom.toFun { toFun := f.toFun, map_one' := h_one } x * OneHom.toFun { toFun := f.toFun, map_one' := h_one } y
[PROOFSTEP]
have hxy := h (x + y)
[GOAL]
F : Type u_1
α : Type u_2
β : Type u_3
γ : Type u_4
inst✝² : CommRing α
inst✝¹ : IsDomain α
inst✝ : CommRing β
f : β →+ α
h : ∀ (x : β), ↑f (x * x) = ↑f x * ↑f x
h_two : 2 ≠ 0
h_one : ↑f 1 = 1
x y : β
hxy : ↑f ((x + y) * (x + y)) = ↑f (x + y) * ↑f (x + y)
⊢ OneHom.toFun { toFun := f.toFun, map_one' := h_one } (x * y) =
OneHom.toFun { toFun := f.toFun, map_one' := h_one } x * OneHom.toFun { toFun := f.toFun, map_one' := h_one } y
[PROOFSTEP]
rw [mul_add, add_mul, add_mul, f.map_add, f.map_add, f.map_add, f.map_add, h x, h y, add_mul, mul_add, mul_add, ←
sub_eq_zero, add_comm (f x * f x + f (y * x)), ← sub_sub, ← sub_sub, ← sub_sub, mul_comm y x, mul_comm (f y) (f x)] at
hxy
[GOAL]
F : Type u_1
α : Type u_2
β : Type u_3
γ : Type u_4
inst✝² : CommRing α
inst✝¹ : IsDomain α
inst✝ : CommRing β
f : β →+ α
h : ∀ (x : β), ↑f (x * x) = ↑f x * ↑f x
h_two : 2 ≠ 0
h_one : ↑f 1 = 1
x y : β
hxy✝ : ↑f x * ↑f x + ↑f (y * x) + (↑f (x * y) + ↑f y * ↑f y) = ↑f x * ↑f x + ↑f x * ↑f y + (↑f y * ↑f x + ↑f y * ↑f y)
hxy : ↑f (x * y) + ↑f y * ↑f y + (↑f x * ↑f x + ↑f (x * y)) - ↑f x * ↑f x - ↑f x * ↑f y - ↑f x * ↑f y - ↑f y * ↑f y = 0
⊢ OneHom.toFun { toFun := f.toFun, map_one' := h_one } (x * y) =
OneHom.toFun { toFun := f.toFun, map_one' := h_one } x * OneHom.toFun { toFun := f.toFun, map_one' := h_one } y
[PROOFSTEP]
simp only [add_assoc, add_sub_assoc, add_sub_cancel'_right] at hxy
[GOAL]
F : Type u_1
α : Type u_2
β : Type u_3
γ : Type u_4
inst✝² : CommRing α
inst✝¹ : IsDomain α
inst✝ : CommRing β
f : β →+ α
h : ∀ (x : β), ↑f (x * x) = ↑f x * ↑f x
h_two : 2 ≠ 0
h_one : ↑f 1 = 1
x y : β
hxy✝ : ↑f x * ↑f x + ↑f (y * x) + (↑f (x * y) + ↑f y * ↑f y) = ↑f x * ↑f x + ↑f x * ↑f y + (↑f y * ↑f x + ↑f y * ↑f y)
hxy : ↑f (x * y) + (↑f (x * y) - ↑f x * ↑f y - ↑f x * ↑f y) = 0
⊢ OneHom.toFun { toFun := f.toFun, map_one' := h_one } (x * y) =
OneHom.toFun { toFun := f.toFun, map_one' := h_one } x * OneHom.toFun { toFun := f.toFun, map_one' := h_one } y
[PROOFSTEP]
rw [sub_sub, ← two_mul, ← add_sub_assoc, ← two_mul, ← mul_sub, mul_eq_zero (M₀ := α), sub_eq_zero,
or_iff_not_imp_left] at hxy
[GOAL]
F : Type u_1
α : Type u_2
β : Type u_3
γ : Type u_4
inst✝² : CommRing α
inst✝¹ : IsDomain α
inst✝ : CommRing β
f : β →+ α
h : ∀ (x : β), ↑f (x * x) = ↑f x * ↑f x
h_two : 2 ≠ 0
h_one : ↑f 1 = 1
x y : β
hxy✝ : ↑f x * ↑f x + ↑f (y * x) + (↑f (x * y) + ↑f y * ↑f y) = ↑f x * ↑f x + ↑f x * ↑f y + (↑f y * ↑f x + ↑f y * ↑f y)
hxy : ¬2 = 0 → ↑f (x * y) = ↑f x * ↑f y
⊢ OneHom.toFun { toFun := f.toFun, map_one' := h_one } (x * y) =
OneHom.toFun { toFun := f.toFun, map_one' := h_one } x * OneHom.toFun { toFun := f.toFun, map_one' := h_one } y
[PROOFSTEP]
exact hxy h_two
[GOAL]
F : Type u_1
α : Type u_2
β : Type u_3
γ : Type u_4
inst✝² : CommRing α
inst✝¹ : IsDomain α
inst✝ : CommRing β
f : β →+ α
h : ∀ (x : β), ↑f (x * x) = ↑f x * ↑f x
h_two : 2 ≠ 0
h_one : ↑f 1 = 1
⊢ ↑(mkRingHomOfMulSelfOfTwoNeZero f h h_two h_one) = f
[PROOFSTEP]
ext
[GOAL]
case h
F : Type u_1
α : Type u_2
β : Type u_3
γ : Type u_4
inst✝² : CommRing α
inst✝¹ : IsDomain α
inst✝ : CommRing β
f : β →+ α
h : ∀ (x : β), ↑f (x * x) = ↑f x * ↑f x
h_two : 2 ≠ 0
h_one : ↑f 1 = 1
x✝ : β
⊢ ↑↑(mkRingHomOfMulSelfOfTwoNeZero f h h_two h_one) x✝ = ↑f x✝
[PROOFSTEP]
rfl
|
(* Title: HOL/Auth/n_germanSimp_lemma_on_inv__30.thy
Author: Yongjian Li and Kaiqiang Duan, State Key Lab of Computer Science, Institute of Software, Chinese Academy of Sciences
Copyright 2016 State Key Lab of Computer Science, Institute of Software, Chinese Academy of Sciences
*)
header{*The n_germanSimp Protocol Case Study*}
theory n_germanSimp_lemma_on_inv__30 imports n_germanSimp_base
begin
section{*All lemmas on causal relation between inv__30 and some rule r*}
lemma n_SendInv__part__0Vsinv__30:
assumes a1: "(\<exists> i. i\<le>N\<and>r=n_SendInv__part__0 i)" and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__30 p__Inv4)"
shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s")
proof -
from a1 obtain i where a1:"i\<le>N\<and>r=n_SendInv__part__0 i" apply fastforce done
from a2 obtain p__Inv4 where a2:"p__Inv4\<le>N\<and>f=inv__30 p__Inv4" apply fastforce done
have "(i=p__Inv4)\<or>(i~=p__Inv4)" apply (cut_tac a1 a2, auto) done
moreover {
assume b1: "(i=p__Inv4)"
have "?P1 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
moreover {
assume b1: "(i~=p__Inv4)"
have "?P2 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
ultimately show "invHoldForRule s f r (invariants N)" by satx
qed
lemma n_SendInv__part__1Vsinv__30:
assumes a1: "(\<exists> i. i\<le>N\<and>r=n_SendInv__part__1 i)" and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__30 p__Inv4)"
shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s")
proof -
from a1 obtain i where a1:"i\<le>N\<and>r=n_SendInv__part__1 i" apply fastforce done
from a2 obtain p__Inv4 where a2:"p__Inv4\<le>N\<and>f=inv__30 p__Inv4" apply fastforce done
have "(i=p__Inv4)\<or>(i~=p__Inv4)" apply (cut_tac a1 a2, auto) done
moreover {
assume b1: "(i=p__Inv4)"
have "?P1 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
moreover {
assume b1: "(i~=p__Inv4)"
have "?P2 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
ultimately show "invHoldForRule s f r (invariants N)" by satx
qed
lemma n_SendInvAckVsinv__30:
assumes a1: "(\<exists> i. i\<le>N\<and>r=n_SendInvAck i)" and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__30 p__Inv4)"
shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s")
proof -
from a1 obtain i where a1:"i\<le>N\<and>r=n_SendInvAck i" apply fastforce done
from a2 obtain p__Inv4 where a2:"p__Inv4\<le>N\<and>f=inv__30 p__Inv4" apply fastforce done
have "(i=p__Inv4)\<or>(i~=p__Inv4)" apply (cut_tac a1 a2, auto) done
moreover {
assume b1: "(i=p__Inv4)"
have "?P1 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
moreover {
assume b1: "(i~=p__Inv4)"
have "?P2 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
ultimately show "invHoldForRule s f r (invariants N)" by satx
qed
lemma n_RecvInvAckVsinv__30:
assumes a1: "(\<exists> i. i\<le>N\<and>r=n_RecvInvAck i)" and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__30 p__Inv4)"
shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s")
proof -
from a1 obtain i where a1:"i\<le>N\<and>r=n_RecvInvAck i" apply fastforce done
from a2 obtain p__Inv4 where a2:"p__Inv4\<le>N\<and>f=inv__30 p__Inv4" apply fastforce done
have "(i=p__Inv4)\<or>(i~=p__Inv4)" apply (cut_tac a1 a2, auto) done
moreover {
assume b1: "(i=p__Inv4)"
have "?P1 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
moreover {
assume b1: "(i~=p__Inv4)"
have "?P2 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
ultimately show "invHoldForRule s f r (invariants N)" by satx
qed
lemma n_SendGntSVsinv__30:
assumes a1: "(\<exists> i. i\<le>N\<and>r=n_SendGntS i)" and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__30 p__Inv4)"
shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s")
proof -
from a1 obtain i where a1:"i\<le>N\<and>r=n_SendGntS i" apply fastforce done
from a2 obtain p__Inv4 where a2:"p__Inv4\<le>N\<and>f=inv__30 p__Inv4" apply fastforce done
have "(i=p__Inv4)\<or>(i~=p__Inv4)" apply (cut_tac a1 a2, auto) done
moreover {
assume b1: "(i=p__Inv4)"
have "?P3 s"
apply (cut_tac a1 a2 b1, simp, rule_tac x="(neg (andForm (andForm (eqn (IVar (Field (Para (Ident ''Chan3'') p__Inv4) ''Cmd'')) (Const InvAck)) (eqn (IVar (Ident ''CurCmd'')) (Const ReqS))) (eqn (IVar (Ident ''ExGntd'')) (Const false))))" in exI, auto) done
then have "invHoldForRule s f r (invariants N)" by auto
}
moreover {
assume b1: "(i~=p__Inv4)"
have "?P2 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
ultimately show "invHoldForRule s f r (invariants N)" by satx
qed
lemma n_SendGntEVsinv__30:
assumes a1: "(\<exists> i. i\<le>N\<and>r=n_SendGntE N i)" and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__30 p__Inv4)"
shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s")
proof -
from a1 obtain i where a1:"i\<le>N\<and>r=n_SendGntE N i" apply fastforce done
from a2 obtain p__Inv4 where a2:"p__Inv4\<le>N\<and>f=inv__30 p__Inv4" apply fastforce done
have "(i=p__Inv4)\<or>(i~=p__Inv4)" apply (cut_tac a1 a2, auto) done
moreover {
assume b1: "(i=p__Inv4)"
have "?P1 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
moreover {
assume b1: "(i~=p__Inv4)"
have "?P2 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
ultimately show "invHoldForRule s f r (invariants N)" by satx
qed
lemma n_RecvGntSVsinv__30:
assumes a1: "(\<exists> i. i\<le>N\<and>r=n_RecvGntS i)" and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__30 p__Inv4)"
shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s")
proof -
from a1 obtain i where a1:"i\<le>N\<and>r=n_RecvGntS i" apply fastforce done
from a2 obtain p__Inv4 where a2:"p__Inv4\<le>N\<and>f=inv__30 p__Inv4" apply fastforce done
have "(i=p__Inv4)\<or>(i~=p__Inv4)" apply (cut_tac a1 a2, auto) done
moreover {
assume b1: "(i=p__Inv4)"
have "?P1 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
moreover {
assume b1: "(i~=p__Inv4)"
have "?P2 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
ultimately show "invHoldForRule s f r (invariants N)" by satx
qed
lemma n_RecvGntEVsinv__30:
assumes a1: "(\<exists> i. i\<le>N\<and>r=n_RecvGntE i)" and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__30 p__Inv4)"
shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s")
proof -
from a1 obtain i where a1:"i\<le>N\<and>r=n_RecvGntE i" apply fastforce done
from a2 obtain p__Inv4 where a2:"p__Inv4\<le>N\<and>f=inv__30 p__Inv4" apply fastforce done
have "(i=p__Inv4)\<or>(i~=p__Inv4)" apply (cut_tac a1 a2, auto) done
moreover {
assume b1: "(i=p__Inv4)"
have "?P1 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
moreover {
assume b1: "(i~=p__Inv4)"
have "?P2 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
ultimately show "invHoldForRule s f r (invariants N)" by satx
qed
lemma n_StoreVsinv__30:
assumes a1: "\<exists> i d. i\<le>N\<and>d\<le>N\<and>r=n_Store i d" and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__30 p__Inv4)"
shows "invHoldForRule s f r (invariants N)"
apply (rule noEffectOnRule, cut_tac a1 a2, auto) done
lemma n_RecvReqE__part__0Vsinv__30:
assumes a1: "\<exists> i. i\<le>N\<and>r=n_RecvReqE__part__0 N i" and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__30 p__Inv4)"
shows "invHoldForRule s f r (invariants N)"
apply (rule noEffectOnRule, cut_tac a1 a2, auto) done
lemma n_RecvReqE__part__1Vsinv__30:
assumes a1: "\<exists> i. i\<le>N\<and>r=n_RecvReqE__part__1 N i" and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__30 p__Inv4)"
shows "invHoldForRule s f r (invariants N)"
apply (rule noEffectOnRule, cut_tac a1 a2, auto) done
lemma n_RecvReqSVsinv__30:
assumes a1: "\<exists> i. i\<le>N\<and>r=n_RecvReqS N i" and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__30 p__Inv4)"
shows "invHoldForRule s f r (invariants N)"
apply (rule noEffectOnRule, cut_tac a1 a2, auto) done
end
|
Formal statement is: lemma deformation_retract_of_space: "S \<subseteq> topspace X \<and> (\<exists>r. homotopic_with (\<lambda>x. True) X X id r \<and> retraction_maps X (subtopology X S) r id) \<longleftrightarrow> S retract_of_space X \<and> (\<exists>f. homotopic_with (\<lambda>x. True) X X id f \<and> f ` (topspace X) \<subseteq> S)" Informal statement is: A subset $S$ of a topological space $X$ is a deformation retract of $X$ if and only if $S$ is a retract of $X$ and there exists a continuous map $f: X \to S$ such that $f$ is homotopic to the identity map on $X$. |
theory T83
imports Main
begin
lemma "(
(\<forall> x::nat. \<forall> y::nat. meet(x, y) = meet(y, x)) &
(\<forall> x::nat. \<forall> y::nat. join(x, y) = join(y, x)) &
(\<forall> x::nat. \<forall> y::nat. \<forall> z::nat. meet(x, meet(y, z)) = meet(meet(x, y), z)) &
(\<forall> x::nat. \<forall> y::nat. \<forall> z::nat. join(x, join(y, z)) = join(join(x, y), z)) &
(\<forall> x::nat. \<forall> y::nat. meet(x, join(x, y)) = x) &
(\<forall> x::nat. \<forall> y::nat. join(x, meet(x, y)) = x) &
(\<forall> x::nat. \<forall> y::nat. \<forall> z::nat. mult(x, join(y, z)) = join(mult(x, y), mult(x, z))) &
(\<forall> x::nat. \<forall> y::nat. \<forall> z::nat. mult(join(x, y), z) = join(mult(x, z), mult(y, z))) &
(\<forall> x::nat. \<forall> y::nat. \<forall> z::nat. meet(x, over(join(mult(x, y), z), y)) = x) &
(\<forall> x::nat. \<forall> y::nat. \<forall> z::nat. meet(y, undr(x, join(mult(x, y), z))) = y) &
(\<forall> x::nat. \<forall> y::nat. \<forall> z::nat. join(mult(over(x, y), y), x) = x) &
(\<forall> x::nat. \<forall> y::nat. \<forall> z::nat. join(mult(y, undr(y, x)), x) = x) &
(\<forall> x::nat. \<forall> y::nat. \<forall> z::nat. mult(meet(x, y), z) = meet(mult(x, z), mult(y, z))) &
(\<forall> x::nat. \<forall> y::nat. \<forall> z::nat. over(join(x, y), z) = join(over(x, z), over(y, z))) &
(\<forall> x::nat. \<forall> y::nat. \<forall> z::nat. over(x, meet(y, z)) = join(over(x, y), over(x, z))) &
(\<forall> x::nat. \<forall> y::nat. invo(join(x, y)) = meet(invo(x), invo(y))) &
(\<forall> x::nat. \<forall> y::nat. invo(meet(x, y)) = join(invo(x), invo(y))) &
(\<forall> x::nat. invo(invo(x)) = x)
) \<longrightarrow>
(\<forall> x::nat. \<forall> y::nat. \<forall> z::nat. undr(x, join(y, z)) = join(undr(x, y), undr(x, z)))
"
nitpick[card nat=4,timeout=86400]
oops
end |
# INF-510, v0.31, Claudio Torres, [email protected]. DI-UTFSM
## Textbook: Lloyd N. Trefethen, Spectral Methods in MATLAB, SIAM, Philadelphia, 2000
# More on Spectral Matrices
```python
import matplotlib.pyplot as plt
%matplotlib inline
import numpy as np
import scipy.sparse.linalg as sp
from scipy import interpolate
import scipy as spf
from sympy import *
import sympy as sym
from scipy.linalg import toeplitz
from ipywidgets import interact
from ipywidgets import IntSlider
from mpl_toolkits.mplot3d import Axes3D
from matplotlib import cm
from matplotlib.ticker import LinearLocator, FormatStrFormatter
# The variable M is used for changing the default size of the figures
M=5
import ipywidgets as widgets
import matplotlib as mpl
mpl.rcParams['font.size'] = 14
mpl.rcParams['axes.labelsize'] = 20
mpl.rcParams['xtick.labelsize'] = 14
mpl.rcParams['ytick.labelsize'] = 14
sym.init_printing()
```
### Chebyshev differentiation matrix
```python
def cheb(N):
if N==0:
D=0
x=1
return D,x
x = np.cos(np.pi*np.arange(N+1)/N)
c=np.hstack((2,np.ones(N-1),2))*((-1.)**np.arange(N+1))
X=np.tile(x,(N+1,1)).T
dX=X-X.T
D = np.outer(c,1./c)/(dX+np.eye(N+1))
D = D - np.diag(np.sum(D.T,axis=0))
return D,x
```
## Understanding how the np.FFT does the FFT
```python
def show_spectral_derivative_example(N):
x=np.linspace(2*np.pi/N,2*np.pi,N)
u = lambda x: np.sin(x)
up = lambda x: np.cos(x)
#u = lambda x: np.sin(x)*np.cos(x)
#up = lambda x: np.cos(x)*np.cos(x)-np.sin(x)*np.sin(x)
v=u(x)
K=np.fft.fftfreq(N)*N
iK=1j*K
vhat=np.fft.fft(v)
W=iK*vhat
W[int(N/2)]=0
vp=np.real(np.fft.ifft(W))
plt.figure(figsize=(10,10))
plt.plot(x,v,'ks-',markersize=12,markeredgewidth=3,label='$\sin(x)$',linewidth=3)
plt.plot(x,up(x),'b.-',markersize=24,markeredgewidth=3,label='Exact derivative: $\cos(x)$',linewidth=3)
plt.plot(x,np.real(vp),'rx-',markersize=10,markeredgewidth=3,label='spectral derivative',linewidth=3)
plt.grid(True)
plt.legend(loc='best')
plt.xlabel('$x$')
plt.show()
print('v :',v)
print('vhat :',vhat)
print('K :',K)
print('W :',W)
print('vprime: ',vp)
widgets.interact(show_spectral_derivative_example,N=(2,40,2))
```
```python
def spectralDerivativeByFFT(v,nu=1):
if not np.all(np.isreal(v)):
raise ValueError('The input vector must be real')
N=v.shape[0]
K=np.fft.fftfreq(N)*N
iK=(1j*K)**nu
v_hat=np.fft.fft(v)
w_hat=iK*v_hat
if np.mod(nu,2)!=0:
w_hat[int(N/2)]=0
return np.real(np.fft.ifft(w_hat))
def my_D2_spec_2pi(N):
h=(2*np.pi/N)
c=np.zeros(N)
j=np.arange(1,N)
c[0]=-np.pi**2/(3.*h**2)-1./6.
c[1:]=-0.5*((-1)**j)/(np.sin(j*h/2.)**2)
D2=toeplitz(c)
return D2
```
# Fractional derivative application
```python
def fractional_derivative(N=10,nu=1):
x=np.linspace(2*np.pi/N,2*np.pi,N)
u = lambda x: np.sin(x)
up = lambda x: np.cos(x)
v = u(x)
vp=spectralDerivativeByFFT(v,nu)
plt.figure(figsize=(10,10))
plt.plot(x,v,'ks-',markersize=12,markeredgewidth=3,label='$\sin(x)$',linewidth=3)
plt.plot(x,up(x),'b.-',markersize=24,markeredgewidth=3,label='Exact derivative: $\cos(x)$',linewidth=3)
plt.plot(x,np.real(vp),'rx-',markersize=10,markeredgewidth=3,label=r'$\frac{d^{\nu}u}{dx^{\nu}}$',linewidth=3)
plt.grid(True)
plt.legend(loc='best')
plt.xlabel('$x$')
plt.show()
d_nu=0.1
widgets.interact(fractional_derivative,N=(4,100),nu=(d_nu,1,d_nu))
```
# Example 1: Computing Eigenvalues
We are solving: $-u''(x)+x^2\,u(x)=\lambda\, u(x)$ on $\mathbb{R}$
```python
L=8.0
def show_example_1(N=6):
h=2*np.pi/N
x=np.linspace(h,2*np.pi,N)
x=L*(x-np.pi)/np.pi
D2=(np.pi/L)**2*my_D2_spec_2pi(N)
w, v = np.linalg.eig(-D2+np.diag(x**2))
# eigenvalues = np.sort(np.linalg.eigvals(-D2+np.diag(x**2)))
ii = np.argsort(w)
w=w[ii]
v=v[:,ii]
plt.figure(figsize=(2*M,2*M))
for i in np.arange(1,5):
plt.subplot(2,2,i)
plt.title(r'$u_{:d}(x),\, \lambda_{:d}={:f}$'.format(i,i,w[i-1]))
plt.plot(x,v[:,i],'kx',markersize=16,markeredgewidth=3)
plt.grid(True)
plt.show()
widgets.interact(show_example_1,N=(6,100,1))
```
# Example 2: Solving ODE
Solving the following BVP $u_{xx}=\exp(4\,x)$ with $u(-1)=u(1)=0$
```python
def example_2(N=16):
D,x = cheb(N)
D2 = np.dot(D,D)
D2 = D2[1:-1,1:-1]
f = np.exp(4*x[1:-1])
u = np.linalg.solve(D2,f)
u = np.concatenate(([0],u,[0]),axis=0)
plt.figure(figsize=(M,M))
plt.plot(x,u,'k.')
xx = np.linspace(-1,1,1000)
P = np.polyfit(x, u, N)
uu = np.polyval(P, xx)
plt.plot(xx,uu,'b-')
plt.grid(True)
exact = (np.exp(4*xx)-np.sinh(4.)*xx-np.cosh(4.))/16.
plt.title('max error= '+str(np.linalg.norm(exact-uu,np.inf)))
plt.ylim([-2.5,0.5])
plt.show()
interact(example_2,N=(2,35))
```
# Example 3: Solving ODE
Solving the following BVP $u_{xx}=\exp(u)$ with $u(-1)=u(1)=0$
```python
def example_3(N=16,IT=20):
D,x = cheb(N)
D2 = np.dot(D,D)
D2 = D2[1:-1,1:-1]
u = np.zeros(N-1)
for i in np.arange(IT):
u_new = np.linalg.solve(D2,np.exp(u))
change = np.linalg.norm(u_new-u,np.inf)
u = u_new
u = np.concatenate(([0],u,[0]),axis=0)
plt.figure(figsize=(M,M))
plt.plot(x,u,'k.')
xx = np.linspace(-1,1,1000)
P = np.polyfit(x, u, N)
uu = np.polyval(P, xx)
plt.plot(xx,uu,'b-')
plt.grid(True)
plt.title('IT= '+str(IT)+' u(0)= '+str(u[int(N/2)]))
plt.ylim([-0.5,0.])
plt.show()
interact(example_3,N=(2,30),IT=(0,100))
```
# Example 4: Eigenvalue BVP
Solve $u_{xx}=\lambda\,u$ with $u(-1)=u(1)=0$
```python
N_widget = IntSlider(min=2, max=50, step=1, value=10)
j_widget = IntSlider(min=1, max=49, step=1, value=5)
def update_j_range(*args):
j_widget.max = N_widget.value-1
j_widget.observe(update_j_range, 'value')
def example_4(N=36,j=5):
D,x = cheb(N)
D2 = np.dot(D,D)
D2 = D2[1:-1,1:-1]
lam, V = np.linalg.eig(D2)
ii=np.argsort(-np.real(lam))
lam=lam[ii]
V=V[:,ii]
u = np.concatenate(([0],V[:,j-1],[0]),axis=0)
plt.figure(figsize=(2*M,M))
plt.plot(x,u,'k.')
xx = np.linspace(-1,1,1000)
P = np.polyfit(x, u, N)
uu = np.polyval(P, xx)
plt.plot(xx,uu,'b-')
plt.grid(True)
plt.title('eig '+str(j)+' = '+str(lam[j-1]*4./(np.pi**2))+' pi**2/4'+' ppw '+str(4*N/(np.pi*j)))
plt.show()
interact(example_4,N=N_widget,j=j_widget)
```
# Example 5: (2D) Poisson equation $u_{xx}+u_{yy}=f$ with u=0 on $\partial\Gamma$
```python
elev_widget = IntSlider(min=0, max=180, step=10, value=40)
azim_widget = IntSlider(min=0, max=360, step=10, value=230)
def example_5(N=10,elev=40,azim=230):
D,x = cheb(N)
y=x
D2 = np.dot(D,D)
D2 = D2[1:-1,1:-1]
xx,yy=np.meshgrid(x[1:-1],y[1:-1])
xx = xx.flatten()
yy = yy.flatten()
f = 10*np.sin(8*xx*(yy-1))
I = np.eye(N-1)
# The Laplacian
L = np.kron(I,D2)+np.kron(D2,I)
u = np.linalg.solve(L,f)
fig = plt.figure(figsize=(2*M,2*M))
# The spy of the Laplacian
plt.subplot(221)
plt.spy(L)
# Plotting the approximation and its interpolation
# The numerical approximation
uu = np.zeros((N+1,N+1))
uu[1:-1,1:-1]=np.reshape(u,(N-1,N-1))
xx,yy=np.meshgrid(x,y)
value = uu[int(N/4),int(N/4)]
plt.subplot(222,projection='3d')
ax = fig.gca()
#surf = ax.plot_surface(xxx, yyy, uuu_n, rstride=1, cstride=1, cmap=cm.coolwarm,
# linewidth=0, antialiased=False)
ax.plot_wireframe(xx, yy, uu)
ax.view_init(elev,azim)
# The INTERPOLATED approximation
N_fine=4*N
finer_mesh=np.linspace(-1,1,N_fine)
xxx,yyy=np.meshgrid(finer_mesh,finer_mesh)
uuu = spf.interpolate.interp2d(xx, yy, uu, kind='linear')
uuu_n=np.reshape(uuu(finer_mesh,finer_mesh),(N_fine,N_fine))
plt.subplot(224,projection='3d')
ax = fig.gca()
surf = ax.plot_surface(xxx, yyy, uuu_n, rstride=1, cstride=1, cmap=cm.coolwarm,
linewidth=0, antialiased=False)
#ax.plot_wireframe(xxx, yyy, uuu_n)
fig.colorbar(surf)
ax.view_init(elev,azim)
plt.subplot(223)
ax = fig.gca()
#surf = ax.plot_surface(xxx, yyy, uuu_n, rstride=1, cstride=1, cmap=cm.coolwarm,
# linewidth=0, antialiased=False)
extent = [x[0], x[-1], y[0], y[-1]]
plt.imshow(uu, extent=extent)
plt.ylabel('$y$')
plt.xlabel('$x$')
plt.colorbar()
plt.show()
interact(example_5,N=(3,20),elev=elev_widget,azim=azim_widget)
```
# Example 6: (2D) Helmholtz equation $u_{xx}+u_{yy}+k^2\,u=f$ with u=0 on $\partial\Gamma$
```python
elev_widget = IntSlider(min=0, max=180, step=10, value=40)
azim_widget = IntSlider(min=0, max=360, step=10, value=230)
def example_6(N=10,elev=40,azim=230,k=9,n_contours=8):
D,x = cheb(N)
y=x
D2 = np.dot(D,D)
D2 = D2[1:-1,1:-1]
xx,yy=np.meshgrid(x[1:-1],y[1:-1])
xx = xx.flatten()
yy = yy.flatten()
f = np.exp(-10.*((yy-1.)**2+(xx-.5)**2))
I = np.eye(N-1)
# The Laplacian
L = np.kron(I,D2)+np.kron(D2,I)+k**2*np.eye((N-1)**2)
u = np.linalg.solve(L,f)
fig = plt.figure(figsize=(2*M,2*M))
# Plotting the approximation and its interpolation
# The numerical approximation
uu = np.zeros((N+1,N+1))
uu[1:-1,1:-1]=np.reshape(u,(N-1,N-1))
xx,yy=np.meshgrid(x,y)
value = uu[int(N/4),int(N/4)]
plt.subplot(221,projection='3d')
ax = fig.gca()
#surf = ax.plot_surface(xxx, yyy, uuu_n, rstride=1, cstride=1, cmap=cm.coolwarm,
# linewidth=0, antialiased=False)
ax.plot_wireframe(xx, yy, uu)
ax.view_init(elev,azim)
plt.subplot(222)
plt.contour(xx, yy, uu, n_contours,
colors='k', # negative contours will be dashed by default
)
# The INTERPOLATED approximation
N_fine=4*N
finer_mesh=np.linspace(-1,1,N_fine)
xxx,yyy=np.meshgrid(finer_mesh,finer_mesh)
uuu = spf.interpolate.interp2d(xx, yy, uu, kind='linear')
uuu_n=np.reshape(uuu(finer_mesh,finer_mesh),(N_fine,N_fine))
plt.subplot(223,projection='3d')
ax = fig.gca()
#surf = ax.plot_surface(xxx, yyy, uuu_n, rstride=1, cstride=1, cmap=cm.coolwarm,
# linewidth=0, antialiased=False)
ax.plot_wireframe(xxx, yyy, uuu_n)
ax.view_init(elev,azim)
plt.subplot(224)
plt.contour(xxx, yyy, uuu_n, n_contours,
colors='k', # negative contours will be dashed by default
)
plt.show()
interact(example_6,N=(3,30),elev=elev_widget,azim=azim_widget,k=(1,20),n_contours=(5,12))
```
# Example 7: (2D) $-(u_{xx}+u_{yy})=\lambda\,u$ with u=0 on $\partial\Gamma$
```python
elev_widget = IntSlider(min=0, max=180, step=10, value=40)
azim_widget = IntSlider(min=0, max=360, step=10, value=230)
N_widget = IntSlider(min=2, max=30, step=1, value=10)
j_widget = IntSlider(min=1, max=20, step=1, value=1)
def update_j_range(*args):
j_widget.max = (N_widget.value-1)**2
j_widget.observe(update_j_range, 'value')
def example_7(N=10,elev=40,azim=230,n_contours=8,j=1):
D,x = cheb(N)
y=x
D2 = np.dot(D,D)
D2 = D2[1:-1,1:-1]
xx,yy=np.meshgrid(x[1:-1],y[1:-1])
xx = xx.flatten()
yy = yy.flatten()
I = np.eye(N-1)
# The Laplacian
L = (np.kron(I,-D2)+np.kron(-D2,I))
lam, V = np.linalg.eig(L)
ii=np.argsort(np.real(lam))
lam=lam[ii]
V=V[:,ii]
fig = plt.figure(figsize=(2*M,M))
# Plotting the approximation and its interpolation
# The numerical approximation
vv = np.zeros((N+1,N+1))
vv[1:-1,1:-1]=np.reshape(np.real(V[:,j-1]),(N-1,N-1))
xx,yy=np.meshgrid(x,y)
plt.subplot(221,projection='3d')
ax = fig.gca()
#surf = ax.plot_surface(xxx, yyy, uuu_n, rstride=1, cstride=1, cmap=cm.coolwarm,
# linewidth=0, antialiased=False)
ax.plot_wireframe(xx, yy, vv)
plt.title('eig '+str(j)+'/ (pi/2)**2= '+str(lam[j-1]/((np.pi/2)**2)))
ax.view_init(elev,azim)
plt.subplot(222)
plt.contour(xx, yy, vv, n_contours,
colors='k', # negative contours will be dashed by default
)
# The INTERPOLATED approximation
N_fine=4*N
finer_mesh=np.linspace(-1,1,N_fine)
xxx,yyy=np.meshgrid(finer_mesh,finer_mesh)
vvv = spf.interpolate.interp2d(xx, yy, vv, kind='linear')
vvv_n=np.reshape(vvv(finer_mesh,finer_mesh),(N_fine,N_fine))
plt.subplot(223,projection='3d')
ax = fig.gca()
#surf = ax.plot_surface(xxx, yyy, uuu_n, rstride=1, cstride=1, cmap=cm.coolwarm,
# linewidth=0, antialiased=False)
ax.plot_wireframe(xxx, yyy, vvv_n)
ax.view_init(elev,azim)
plt.subplot(224)
plt.contour(xxx, yyy, vvv_n, n_contours,
colors='k', # negative contours will be dashed by default
)
plt.show()
interact(example_7,N=N_widget,elev=elev_widget,azim=azim_widget,n_contours=(5,12),j=j_widget)
```
# In-class work
## [Flash back] Implement Program 6, 7 and 12.
## [Today] Implement Program 19, 20, 21, 22 and 23.
```python
```
|
[STATEMENT]
lemma monad_state_altc_writerT' [locale_witness]:
"monad_state_altc return (bind :: ('a \<times> 'w list, 'm) bind) (get :: ('s, 'm) get) put (altc :: ('c, 'm) altc)
\<Longrightarrow> monad_state_altc return (bind :: ('a, ('w, 'a, 'm) writerT) bind) (get :: ('s, ('w, 'a, 'm) writerT) get) put (altc :: ('c, ('w, 'a, 'm) writerT) altc)"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. monad_state_altc Monad_Overloading.return Monad_Overloading.bind get put altc \<Longrightarrow> monad_state_altc Monad_Overloading.return Monad_Overloading.bind get put altc
[PROOF STEP]
unfolding return_writerT_def bind_writerT_def get_writerT_def put_writerT_def altc_writerT_def
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. monad_state_altc Monad_Overloading.return Monad_Overloading.bind get put altc \<Longrightarrow> monad_state_altc (return_writer Monad_Overloading.return) (bind_writer Monad_Overloading.return Monad_Overloading.bind) (get_writer get) (put_writer put) (altc_writer altc)
[PROOF STEP]
by(rule monad_state_altc_writerT) |
/-
Copyright (c) 2022 Yaël Dillies. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Yaël Dillies
-/
import order.filter.prod
/-!
# N-ary maps of filter
> THIS FILE IS SYNCHRONIZED WITH MATHLIB4.
> Any changes to this file require a corresponding PR to mathlib4.
This file defines the binary and ternary maps of filters. This is mostly useful to define pointwise
operations on filters.
## Main declarations
* `filter.map₂`: Binary map of filters.
* `filter.map₃`: Ternary map of filters.
## Notes
This file is very similar to `data.set.n_ary`, `data.finset.n_ary` and `data.option.n_ary`. Please
keep them in sync.
-/
open function set
open_locale filter
namespace filter
variables {α α' β β' γ γ' δ δ' ε ε' : Type*} {m : α → β → γ} {f f₁ f₂ : filter α}
{g g₁ g₂ : filter β} {h h₁ h₂ : filter γ} {s s₁ s₂ : set α} {t t₁ t₂ : set β} {u : set γ}
{v : set δ} {a : α} {b : β} {c : γ}
/-- The image of a binary function `m : α → β → γ` as a function `filter α → filter β → filter γ`.
Mathematically this should be thought of as the image of the corresponding function `α × β → γ`. -/
def map₂ (m : α → β → γ) (f : filter α) (g : filter β) : filter γ :=
{ sets := {s | ∃ u v, u ∈ f ∧ v ∈ g ∧ image2 m u v ⊆ s},
univ_sets := ⟨univ, univ, univ_sets _, univ_sets _, subset_univ _⟩,
sets_of_superset := λ s t hs hst,
Exists₂.imp (λ u v, and.imp_right $ and.imp_right $ λ h, subset.trans h hst) hs,
inter_sets := λ s t,
begin
simp only [exists_prop, mem_set_of_eq, subset_inter_iff],
rintro ⟨s₁, s₂, hs₁, hs₂, hs⟩ ⟨t₁, t₂, ht₁, ht₂, ht⟩,
exact ⟨s₁ ∩ t₁, s₂ ∩ t₂, inter_sets f hs₁ ht₁, inter_sets g hs₂ ht₂,
(image2_subset (inter_subset_left _ _) $ inter_subset_left _ _).trans hs,
(image2_subset (inter_subset_right _ _) $ inter_subset_right _ _).trans ht⟩,
end }
@[simp] lemma mem_map₂_iff : u ∈ map₂ m f g ↔ ∃ s t, s ∈ f ∧ t ∈ g ∧ image2 m s t ⊆ u := iff.rfl
lemma image2_mem_map₂ (hs : s ∈ f) (ht : t ∈ g) : image2 m s t ∈ map₂ m f g :=
⟨_, _, hs, ht, subset.rfl⟩
lemma map_prod_eq_map₂ (m : α → β → γ) (f : filter α) (g : filter β) :
filter.map (λ p : α × β, m p.1 p.2) (f ×ᶠ g) = map₂ m f g :=
begin
ext s,
simp [mem_prod_iff, prod_subset_iff]
end
lemma map_prod_eq_map₂' (m : α × β → γ) (f : filter α) (g : filter β) :
filter.map m (f ×ᶠ g) = map₂ (λ a b, m (a, b)) f g :=
(congr_arg2 _ (uncurry_curry m).symm rfl).trans (map_prod_eq_map₂ _ _ _)
@[simp] lemma map₂_mk_eq_prod (f : filter α) (g : filter β) : map₂ prod.mk f g = f ×ᶠ g :=
by simp only [← map_prod_eq_map₂, prod.mk.eta, map_id']
-- lemma image2_mem_map₂_iff (hm : injective2 m) : image2 m s t ∈ map₂ m f g ↔ s ∈ f ∧ t ∈ g :=
-- ⟨by { rintro ⟨u, v, hu, hv, h⟩, rw image2_subset_image2_iff hm at h,
-- exact ⟨mem_of_superset hu h.1, mem_of_superset hv h.2⟩ }, λ h, image2_mem_map₂ h.1 h.2⟩
lemma map₂_mono (hf : f₁ ≤ f₂) (hg : g₁ ≤ g₂) : map₂ m f₁ g₁ ≤ map₂ m f₂ g₂ :=
λ _ ⟨s, t, hs, ht, hst⟩, ⟨s, t, hf hs, hg ht, hst⟩
lemma map₂_mono_left (h : g₁ ≤ g₂) : map₂ m f g₁ ≤ map₂ m f g₂ := map₂_mono subset.rfl h
lemma map₂_mono_right (h : f₁ ≤ f₂) : map₂ m f₁ g ≤ map₂ m f₂ g := map₂_mono h subset.rfl
@[simp] lemma le_map₂_iff {h : filter γ} :
h ≤ map₂ m f g ↔ ∀ ⦃s⦄, s ∈ f → ∀ ⦃t⦄, t ∈ g → image2 m s t ∈ h :=
⟨λ H s hs t ht, H $ image2_mem_map₂ hs ht, λ H u ⟨s, t, hs, ht, hu⟩, mem_of_superset (H hs ht) hu⟩
@[simp] lemma map₂_bot_left : map₂ m ⊥ g = ⊥ :=
empty_mem_iff_bot.1 ⟨∅, univ, trivial, univ_mem, (image2_empty_left).subset⟩
@[simp] lemma map₂_bot_right : map₂ m f ⊥ = ⊥ :=
empty_mem_iff_bot.1 ⟨univ, ∅, univ_mem, trivial, (image2_empty_right).subset⟩
@[simp] lemma map₂_eq_bot_iff : map₂ m f g = ⊥ ↔ f = ⊥ ∨ g = ⊥ :=
begin
simp only [←empty_mem_iff_bot, mem_map₂_iff, subset_empty_iff, image2_eq_empty_iff],
split,
{ rintro ⟨s, t, hs, ht, rfl | rfl⟩,
{ exact or.inl hs },
{ exact or.inr ht } },
{ rintro (h | h),
{ exact ⟨_, _, h, univ_mem, or.inl rfl⟩ },
{ exact ⟨_, _, univ_mem, h, or.inr rfl⟩ } }
end
@[simp] lemma map₂_ne_bot_iff : (map₂ m f g).ne_bot ↔ f.ne_bot ∧ g.ne_bot :=
by { simp_rw ne_bot_iff, exact map₂_eq_bot_iff.not.trans not_or_distrib }
lemma ne_bot.map₂ (hf : f.ne_bot) (hg : g.ne_bot) : (map₂ m f g).ne_bot :=
map₂_ne_bot_iff.2 ⟨hf, hg⟩
lemma ne_bot.of_map₂_left (h : (map₂ m f g).ne_bot) : f.ne_bot := (map₂_ne_bot_iff.1 h).1
lemma ne_bot.of_map₂_right (h : (map₂ m f g).ne_bot) : g.ne_bot := (map₂_ne_bot_iff.1 h).2
lemma map₂_sup_left : map₂ m (f₁ ⊔ f₂) g = map₂ m f₁ g ⊔ map₂ m f₂ g :=
begin
ext u,
split,
{ rintro ⟨s, t, ⟨h₁, h₂⟩, ht, hu⟩,
exact ⟨mem_of_superset (image2_mem_map₂ h₁ ht) hu,
mem_of_superset (image2_mem_map₂ h₂ ht) hu⟩ },
{ rintro ⟨⟨s₁, t₁, hs₁, ht₁, hu₁⟩, s₂, t₂, hs₂, ht₂, hu₂⟩,
refine ⟨s₁ ∪ s₂, t₁ ∩ t₂, union_mem_sup hs₁ hs₂, inter_mem ht₁ ht₂, _⟩,
rw image2_union_left,
exact union_subset ((image2_subset_left $ inter_subset_left _ _).trans hu₁)
((image2_subset_left $ inter_subset_right _ _).trans hu₂) }
end
lemma map₂_sup_right : map₂ m f (g₁ ⊔ g₂) = map₂ m f g₁ ⊔ map₂ m f g₂ :=
begin
ext u,
split,
{ rintro ⟨s, t, hs, ⟨h₁, h₂⟩, hu⟩,
exact ⟨mem_of_superset (image2_mem_map₂ hs h₁) hu,
mem_of_superset (image2_mem_map₂ hs h₂) hu⟩ },
{ rintro ⟨⟨s₁, t₁, hs₁, ht₁, hu₁⟩, s₂, t₂, hs₂, ht₂, hu₂⟩,
refine ⟨s₁ ∩ s₂, t₁ ∪ t₂, inter_mem hs₁ hs₂, union_mem_sup ht₁ ht₂, _⟩,
rw image2_union_right,
exact union_subset ((image2_subset_right $ inter_subset_left _ _).trans hu₁)
((image2_subset_right $ inter_subset_right _ _).trans hu₂) }
end
lemma map₂_inf_subset_left : map₂ m (f₁ ⊓ f₂) g ≤ map₂ m f₁ g ⊓ map₂ m f₂ g :=
le_inf (map₂_mono_right inf_le_left) (map₂_mono_right inf_le_right)
lemma map₂_inf_subset_right : map₂ m f (g₁ ⊓ g₂) ≤ map₂ m f g₁ ⊓ map₂ m f g₂ :=
le_inf (map₂_mono_left inf_le_left) (map₂_mono_left inf_le_right)
@[simp] lemma map₂_pure_left : map₂ m (pure a) g = g.map (λ b, m a b) :=
filter.ext $ λ u, ⟨λ ⟨s, t, hs, ht, hu⟩,
mem_of_superset (image_mem_map ht) ((image_subset_image2_right $ mem_pure.1 hs).trans hu),
λ h, ⟨{a}, _, singleton_mem_pure, h, by rw [image2_singleton_left, image_subset_iff]⟩⟩
@[simp] lemma map₂_pure_right : map₂ m f (pure b) = f.map (λ a, m a b) :=
filter.ext $ λ u, ⟨λ ⟨s, t, hs, ht, hu⟩,
mem_of_superset (image_mem_map hs) ((image_subset_image2_left $ mem_pure.1 ht).trans hu),
λ h, ⟨_, {b}, h, singleton_mem_pure, by rw [image2_singleton_right, image_subset_iff]⟩⟩
lemma map₂_pure : map₂ m (pure a) (pure b) = pure (m a b) := by rw [map₂_pure_right, map_pure]
lemma map₂_swap (m : α → β → γ) (f : filter α) (g : filter β) :
map₂ m f g = map₂ (λ a b, m b a) g f :=
by { ext u, split; rintro ⟨s, t, hs, ht, hu⟩; refine ⟨t, s, ht, hs, by rwa image2_swap⟩ }
@[simp] lemma map₂_left (h : g.ne_bot) : map₂ (λ x y, x) f g = f :=
begin
ext u,
refine ⟨_, λ hu, ⟨_, _, hu, univ_mem, (image2_left $ h.nonempty_of_mem univ_mem).subset⟩⟩,
rintro ⟨s, t, hs, ht, hu⟩,
rw image2_left (h.nonempty_of_mem ht) at hu,
exact mem_of_superset hs hu,
end
@[simp] lemma map₂_right (h : f.ne_bot) : map₂ (λ x y, y) f g = g := by rw [map₂_swap, map₂_left h]
/-- The image of a ternary function `m : α → β → γ → δ` as a function
`filter α → filter β → filter γ → filter δ`. Mathematically this should be thought of as the image
of the corresponding function `α × β × γ → δ`. -/
def map₃ (m : α → β → γ → δ) (f : filter α) (g : filter β) (h : filter γ) : filter δ :=
{ sets := {s | ∃ u v w, u ∈ f ∧ v ∈ g ∧ w ∈ h ∧ image3 m u v w ⊆ s},
univ_sets := ⟨univ, univ, univ, univ_sets _, univ_sets _, univ_sets _, subset_univ _⟩,
sets_of_superset := λ s t hs hst, Exists₃.imp
(λ u v w, and.imp_right $ and.imp_right $ and.imp_right $ λ h, subset.trans h hst) hs,
inter_sets := λ s t,
begin
simp only [exists_prop, mem_set_of_eq, subset_inter_iff],
rintro ⟨s₁, s₂, s₃, hs₁, hs₂, hs₃, hs⟩ ⟨t₁, t₂, t₃, ht₁, ht₂, ht₃, ht⟩,
exact ⟨s₁ ∩ t₁, s₂ ∩ t₂, s₃ ∩ t₃, inter_mem hs₁ ht₁, inter_mem hs₂ ht₂, inter_mem hs₃ ht₃,
(image3_mono (inter_subset_left _ _) (inter_subset_left _ _) $ inter_subset_left _ _).trans
hs,
(image3_mono (inter_subset_right _ _) (inter_subset_right _ _) $ inter_subset_right _ _).trans
ht⟩,
end }
lemma map₂_map₂_left (m : δ → γ → ε) (n : α → β → δ) :
map₂ m (map₂ n f g) h = map₃ (λ a b c, m (n a b) c) f g h :=
begin
ext w,
split,
{ rintro ⟨s, t, ⟨u, v, hu, hv, hs⟩, ht, hw⟩,
refine ⟨u, v, t, hu, hv, ht, _⟩,
rw ←image2_image2_left,
exact (image2_subset_right hs).trans hw },
{ rintro ⟨s, t, u, hs, ht, hu, hw⟩,
exact ⟨_, u, image2_mem_map₂ hs ht, hu, by rwa image2_image2_left⟩ }
end
lemma map₂_map₂_right (m : α → δ → ε) (n : β → γ → δ) :
map₂ m f (map₂ n g h) = map₃ (λ a b c, m a (n b c)) f g h :=
begin
ext w,
split,
{ rintro ⟨s, t, hs, ⟨u, v, hu, hv, ht⟩, hw⟩,
refine ⟨s, u, v, hs, hu, hv, _⟩,
rw ←image2_image2_right,
exact (image2_subset_left ht).trans hw },
{ rintro ⟨s, t, u, hs, ht, hu, hw⟩,
exact ⟨s, _, hs, image2_mem_map₂ ht hu, by rwa image2_image2_right⟩ }
end
lemma map_map₂ (m : α → β → γ) (n : γ → δ) : (map₂ m f g).map n = map₂ (λ a b, n (m a b)) f g :=
by rw [← map_prod_eq_map₂, ← map_prod_eq_map₂, map_map]
lemma map₂_map_left (m : γ → β → δ) (n : α → γ) :
map₂ m (f.map n) g = map₂ (λ a b, m (n a) b) f g :=
begin
rw [← map_prod_eq_map₂, ← map_prod_eq_map₂, ← @map_id _ g, prod_map_map_eq, map_map, map_id],
refl
end
lemma map₂_map_right (m : α → γ → δ) (n : β → γ) :
map₂ m f (g.map n) = map₂ (λ a b, m a (n b)) f g :=
by rw [map₂_swap, map₂_map_left, map₂_swap]
@[simp] lemma map₂_curry (m : α × β → γ) (f : filter α) (g : filter β) :
map₂ (curry m) f g = (f ×ᶠ g).map m :=
(map_prod_eq_map₂' _ _ _).symm
@[simp] lemma map_uncurry_prod (m : α → β → γ) (f : filter α) (g : filter β) :
(f ×ᶠ g).map (uncurry m) = map₂ m f g :=
by rw [←map₂_curry, curry_uncurry]
/-!
### Algebraic replacement rules
A collection of lemmas to transfer associativity, commutativity, distributivity, ... of operations
to the associativity, commutativity, distributivity, ... of `filter.map₂` of those operations.
The proof pattern is `map₂_lemma operation_lemma`. For example, `map₂_comm mul_comm` proves that
`map₂ (*) f g = map₂ (*) g f` in a `comm_semigroup`.
-/
lemma map₂_assoc {m : δ → γ → ε} {n : α → β → δ} {m' : α → ε' → ε} {n' : β → γ → ε'}
{h : filter γ} (h_assoc : ∀ a b c, m (n a b) c = m' a (n' b c)) :
map₂ m (map₂ n f g) h = map₂ m' f (map₂ n' g h) :=
by simp only [map₂_map₂_left, map₂_map₂_right, h_assoc]
lemma map₂_comm {n : β → α → γ} (h_comm : ∀ a b, m a b = n b a) : map₂ m f g = map₂ n g f :=
(map₂_swap _ _ _).trans $ by simp_rw h_comm
lemma map₂_left_comm {m : α → δ → ε} {n : β → γ → δ} {m' : α → γ → δ'} {n' : β → δ' → ε}
(h_left_comm : ∀ a b c, m a (n b c) = n' b (m' a c)) :
map₂ m f (map₂ n g h) = map₂ n' g (map₂ m' f h) :=
by { rw [map₂_swap m', map₂_swap m], exact map₂_assoc (λ _ _ _, h_left_comm _ _ _) }
lemma map₂_right_comm {m : δ → γ → ε} {n : α → β → δ} {m' : α → γ → δ'} {n' : δ' → β → ε}
(h_right_comm : ∀ a b c, m (n a b) c = n' (m' a c) b) :
map₂ m (map₂ n f g) h = map₂ n' (map₂ m' f h) g :=
by { rw [map₂_swap n, map₂_swap n'], exact map₂_assoc (λ _ _ _, h_right_comm _ _ _) }
lemma map_map₂_distrib {n : γ → δ} {m' : α' → β' → δ} {n₁ : α → α'} {n₂ : β → β'}
(h_distrib : ∀ a b, n (m a b) = m' (n₁ a) (n₂ b)) :
(map₂ m f g).map n = map₂ m' (f.map n₁) (g.map n₂) :=
by simp_rw [map_map₂, map₂_map_left, map₂_map_right, h_distrib]
/-- Symmetric statement to `filter.map₂_map_left_comm`. -/
lemma map_map₂_distrib_left {n : γ → δ} {m' : α' → β → δ} {n' : α → α'}
(h_distrib : ∀ a b, n (m a b) = m' (n' a) b) :
(map₂ m f g).map n = map₂ m' (f.map n') g :=
map_map₂_distrib h_distrib
/-- Symmetric statement to `filter.map_map₂_right_comm`. -/
lemma map_map₂_distrib_right {n : γ → δ} {m' : α → β' → δ} {n' : β → β'}
(h_distrib : ∀ a b, n (m a b) = m' a (n' b)) :
(map₂ m f g).map n = map₂ m' f (g.map n') :=
map_map₂_distrib h_distrib
/-- Symmetric statement to `filter.map_map₂_distrib_left`. -/
lemma map₂_map_left_comm {m : α' → β → γ} {n : α → α'} {m' : α → β → δ} {n' : δ → γ}
(h_left_comm : ∀ a b, m (n a) b = n' (m' a b)) :
map₂ m (f.map n) g = (map₂ m' f g).map n' :=
(map_map₂_distrib_left $ λ a b, (h_left_comm a b).symm).symm
/-- Symmetric statement to `filter.map_map₂_distrib_right`. -/
/-- The other direction does not hold because of the `f`-`f` cross terms on the RHS. -/
lemma map₂_distrib_le_left {m : α → δ → ε} {n : β → γ → δ} {m₁ : α → β → β'} {m₂ : α → γ → γ'}
{n' : β' → γ' → ε} (h_distrib : ∀ a b c, m a (n b c) = n' (m₁ a b) (m₂ a c)) :
map₂ m f (map₂ n g h) ≤ map₂ n' (map₂ m₁ f g) (map₂ m₂ f h) :=
begin
rintro s ⟨t₁, t₂, ⟨u₁, v, hu₁, hv, ht₁⟩, ⟨u₂, w, hu₂, hw, ht₂⟩, hs⟩,
refine ⟨u₁ ∩ u₂, _, inter_mem hu₁ hu₂, image2_mem_map₂ hv hw, _⟩,
refine (image2_distrib_subset_left h_distrib).trans ((image2_subset _ _).trans hs),
{ exact (image2_subset_right $ inter_subset_left _ _).trans ht₁ },
{ exact (image2_subset_right $ inter_subset_right _ _).trans ht₂ }
end
/-- The other direction does not hold because of the `h`-`h` cross terms on the RHS. -/
lemma map₂_distrib_le_right {m : δ → γ → ε} {n : α → β → δ} {m₁ : α → γ → α'}
{m₂ : β → γ → β'} {n' : α' → β' → ε} (h_distrib : ∀ a b c, m (n a b) c = n' (m₁ a c) (m₂ b c)) :
map₂ m (map₂ n f g) h ≤ map₂ n' (map₂ m₁ f h) (map₂ m₂ g h) :=
begin
rintro s ⟨t₁, t₂, ⟨u, w₁, hu, hw₁, ht₁⟩, ⟨v, w₂, hv, hw₂, ht₂⟩, hs⟩,
refine ⟨_, w₁ ∩ w₂, image2_mem_map₂ hu hv, inter_mem hw₁ hw₂, _⟩,
refine (image2_distrib_subset_right h_distrib).trans ((image2_subset _ _).trans hs),
{ exact (image2_subset_left $ inter_subset_left _ _).trans ht₁ },
{ exact (image2_subset_left $ inter_subset_right _ _).trans ht₂ }
end
lemma map_map₂_antidistrib {n : γ → δ} {m' : β' → α' → δ} {n₁ : β → β'} {n₂ : α → α'}
(h_antidistrib : ∀ a b, n (m a b) = m' (n₁ b) (n₂ a)) :
(map₂ m f g).map n = map₂ m' (g.map n₁) (f.map n₂) :=
by { rw map₂_swap m, exact map_map₂_distrib (λ _ _, h_antidistrib _ _) }
/-- Symmetric statement to `filter.map₂_map_left_anticomm`. -/
lemma map_map₂_antidistrib_left {n : γ → δ} {m' : β' → α → δ} {n' : β → β'}
(h_antidistrib : ∀ a b, n (m a b) = m' (n' b) a) :
(map₂ m f g).map n = map₂ m' (g.map n') f :=
map_map₂_antidistrib h_antidistrib
/-- Symmetric statement to `filter.map_map₂_right_anticomm`. -/
lemma map_map₂_antidistrib_right {n : γ → δ} {m' : β → α' → δ} {n' : α → α'}
(h_antidistrib : ∀ a b, n (m a b) = m' b (n' a)) :
(map₂ m f g).map n = map₂ m' g (f.map n') :=
map_map₂_antidistrib h_antidistrib
/-- Symmetric statement to `filter.map_map₂_antidistrib_left`. -/
lemma map₂_map_left_anticomm {m : α' → β → γ} {n : α → α'} {m' : β → α → δ} {n' : δ → γ}
(h_left_anticomm : ∀ a b, m (n a) b = n' (m' b a)) :
map₂ m (f.map n) g = (map₂ m' g f).map n' :=
(map_map₂_antidistrib_left $ λ a b, (h_left_anticomm b a).symm).symm
/-- Symmetric statement to `filter.map_map₂_antidistrib_right`. -/
lemma map_map₂_right_anticomm {m : α → β' → γ} {n : β → β'} {m' : β → α → δ} {n' : δ → γ}
(h_right_anticomm : ∀ a b, m a (n b) = n' (m' b a)) :
map₂ m f (g.map n) = (map₂ m' g f).map n' :=
(map_map₂_antidistrib_right $ λ a b, (h_right_anticomm b a).symm).symm
/-- If `a` is a left identity for `f : α → β → β`, then `pure a` is a left identity for
`filter.map₂ f`. -/
lemma map₂_left_identity {f : α → β → β} {a : α} (h : ∀ b, f a b = b) (l : filter β) :
map₂ f (pure a) l = l :=
by rw [map₂_pure_left, show f a = id, from funext h, map_id]
/-- If `b` is a right identity for `f : α → β → α`, then `pure b` is a right identity for
`filter.map₂ f`. -/
lemma map₂_right_identity {f : α → β → α} {b : β} (h : ∀ a, f a b = a) (l : filter α) :
map₂ f l (pure b) = l :=
by rw [map₂_pure_right, funext h, map_id']
end filter
|
% !TEX root = frideswide.tex
\chapter{Sigla and Abbreviations}
\begin{raggedright}
\setkomafont{descriptionlabel}{}
\begin{description}[leftmargin=!,labelwidth=4em, itemsep=0pt, parsep=0pt]
\item[\emph{B}]
Oxford, Balliol College, \textsc{MS} 228
\item[\emph{C}]
Cambridge, Gonville and Caius College, \textsc{MS} 129/67
\item[\emph{D}]
Oxford, Bodleian Library, \textsc{MS} Digby 177
\item[\emph{G}]
Gotha, Forschungsbibliothek, \textsc{MS} Memb.~I~81
\item[\emph{L}]
London, British Library, Lansdowne \textsc{MS} 436
\item[\emph{M}]
Oxford, Bodleian Library, \textsc{MS} Laud misc. 114
\item[\emph{N}]
London, British Library, Cotton \textsc{MS} Nero E.~\textsc{i}/2
\item[\emph{P}]
Paris, Bibliothèque nationale de France, \textsc{MS} Latin 5320
\item[\emph{T}]
Cambridge, Trinity College, \textsc{MS} B.14.37
\item[\emph{W}]
Worcester, Cathedral Library, \textsc{MS} Q.86
\end{description}
\begin{description}[leftmargin=!,labelwidth=4em, itemsep=0pt, parsep=0pt]
\item[\emph{add.}]
\textlatin{\emph{addidit}}
\item[\emph{ante corr.}]
\textlatin{\emph{ante correctionem}}
\item[\emph{\textsc{BHL}}]
\textlatin{\emph{Bibliotheca hagiographica latina}}
\item[\emph{in marg.}]
\textlatin{\emph{in margine}}
\item[\emph{om.}]
\textlatin{\emph{omisit}}
\item[\emph{sup.~l.}]
\textlatin{\emph{supra lineam}}
\item[Vulg.]
Vulgate
\item[⟨\,\ldots{}\,⟩]
\textlatin{\emph{addendum}}
\end{description}
\end{raggedright}
|
[STATEMENT]
lemma derivations_cross_compare:
assumes X: "derivation X" and Y: "derivation Y" and xX: "x \<in> X" and yY: "y \<in> Y"
shows "(x \<sqsubset> y \<and> x \<in> Y) \<or> x = y \<or> (y \<sqsubset> x \<and> y \<in> X)"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. x \<sqsubset> y \<and> x \<in> Y \<or> x = y \<or> y \<sqsubset> x \<and> y \<in> X
[PROOF STEP]
proof-
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. x \<sqsubset> y \<and> x \<in> Y \<or> x = y \<or> y \<sqsubset> x \<and> y \<in> X
[PROOF STEP]
{
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. x \<sqsubset> y \<and> x \<in> Y \<or> x = y \<or> y \<sqsubset> x \<and> y \<in> X
[PROOF STEP]
fix X Y x y
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. x \<sqsubset> y \<and> x \<in> Y \<or> x = y \<or> y \<sqsubset> x \<and> y \<in> X
[PROOF STEP]
assume X: "derivation X" and Y: "derivation Y" and xX: "x \<in> X" and yY: "y \<in> Y"
[PROOF STATE]
proof (state)
this:
derivation X
derivation Y
x \<in> X
y \<in> Y
goal (1 subgoal):
1. x \<sqsubset> y \<and> x \<in> Y \<or> x = y \<or> y \<sqsubset> x \<and> y \<in> X
[PROOF STEP]
interpret X: well_ordered_set X "(\<sqsubseteq>)"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. well_ordered_set X (\<sqsubseteq>)
[PROOF STEP]
using derivation_well_ordered[OF X]
[PROOF STATE]
proof (prove)
using this:
well_ordered_set X (\<sqsubseteq>)
goal (1 subgoal):
1. well_ordered_set X (\<sqsubseteq>)
[PROOF STEP]
.
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. x \<sqsubset> y \<and> x \<in> Y \<or> x = y \<or> y \<sqsubset> x \<and> y \<in> X
[PROOF STEP]
interpret X.asympartp: transitive X "(\<sqsubset>)"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. transitive X (\<sqsubset>)
[PROOF STEP]
using X.asympartp_transitive
[PROOF STATE]
proof (prove)
using this:
transitive X (\<sqsubset>)
goal (1 subgoal):
1. transitive X (\<sqsubset>)
[PROOF STEP]
.
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. x \<sqsubset> y \<and> x \<in> Y \<or> x = y \<or> y \<sqsubset> x \<and> y \<in> X
[PROOF STEP]
interpret Y: well_ordered_set Y "(\<sqsubseteq>)"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. well_ordered_set Y (\<sqsubseteq>)
[PROOF STEP]
using derivation_well_ordered[OF Y]
[PROOF STATE]
proof (prove)
using this:
well_ordered_set Y (\<sqsubseteq>)
goal (1 subgoal):
1. well_ordered_set Y (\<sqsubseteq>)
[PROOF STEP]
.
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. x \<sqsubset> y \<and> x \<in> Y \<or> x = y \<or> y \<sqsubset> x \<and> y \<in> X
[PROOF STEP]
have XA: "X \<subseteq> A"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. X \<subseteq> A
[PROOF STEP]
using derivation_A[OF X]
[PROOF STATE]
proof (prove)
using this:
X \<subseteq> A
goal (1 subgoal):
1. X \<subseteq> A
[PROOF STEP]
.
[PROOF STATE]
proof (state)
this:
X \<subseteq> A
goal (1 subgoal):
1. x \<sqsubset> y \<and> x \<in> Y \<or> x = y \<or> y \<sqsubset> x \<and> y \<in> X
[PROOF STEP]
then
[PROOF STATE]
proof (chain)
picking this:
X \<subseteq> A
[PROOF STEP]
have xA: "x \<in> A"
[PROOF STATE]
proof (prove)
using this:
X \<subseteq> A
goal (1 subgoal):
1. x \<in> A
[PROOF STEP]
using xX
[PROOF STATE]
proof (prove)
using this:
X \<subseteq> A
x \<in> X
goal (1 subgoal):
1. x \<in> A
[PROOF STEP]
by auto
[PROOF STATE]
proof (state)
this:
x \<in> A
goal (1 subgoal):
1. x \<sqsubset> y \<and> x \<in> Y \<or> x = y \<or> y \<sqsubset> x \<and> y \<in> X
[PROOF STEP]
with f
[PROOF STATE]
proof (chain)
picking this:
f ` A \<subseteq> A
x \<in> A
[PROOF STEP]
have fxA: "f x \<in> A"
[PROOF STATE]
proof (prove)
using this:
f ` A \<subseteq> A
x \<in> A
goal (1 subgoal):
1. f x \<in> A
[PROOF STEP]
by auto
[PROOF STATE]
proof (state)
this:
f x \<in> A
goal (1 subgoal):
1. x \<sqsubset> y \<and> x \<in> Y \<or> x = y \<or> y \<sqsubset> x \<and> y \<in> X
[PROOF STEP]
have YA: "Y \<subseteq> A"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. Y \<subseteq> A
[PROOF STEP]
using derivation_A[OF Y]
[PROOF STATE]
proof (prove)
using this:
Y \<subseteq> A
goal (1 subgoal):
1. Y \<subseteq> A
[PROOF STEP]
.
[PROOF STATE]
proof (state)
this:
Y \<subseteq> A
goal (1 subgoal):
1. x \<sqsubset> y \<and> x \<in> Y \<or> x = y \<or> y \<sqsubset> x \<and> y \<in> X
[PROOF STEP]
then
[PROOF STATE]
proof (chain)
picking this:
Y \<subseteq> A
[PROOF STEP]
have yA: "y \<in> A"
[PROOF STATE]
proof (prove)
using this:
Y \<subseteq> A
goal (1 subgoal):
1. y \<in> A
[PROOF STEP]
using yY
[PROOF STATE]
proof (prove)
using this:
Y \<subseteq> A
y \<in> Y
goal (1 subgoal):
1. y \<in> A
[PROOF STEP]
by auto
[PROOF STATE]
proof (state)
this:
y \<in> A
goal (1 subgoal):
1. x \<sqsubset> y \<and> x \<in> Y \<or> x = y \<or> y \<sqsubset> x \<and> y \<in> X
[PROOF STEP]
with f
[PROOF STATE]
proof (chain)
picking this:
f ` A \<subseteq> A
y \<in> A
[PROOF STEP]
have fyA: "f y \<in> A"
[PROOF STATE]
proof (prove)
using this:
f ` A \<subseteq> A
y \<in> A
goal (1 subgoal):
1. f y \<in> A
[PROOF STEP]
by auto
[PROOF STATE]
proof (state)
this:
f y \<in> A
goal (1 subgoal):
1. x \<sqsubset> y \<and> x \<in> Y \<or> x = y \<or> y \<sqsubset> x \<and> y \<in> X
[PROOF STEP]
{
[PROOF STATE]
proof (state)
this:
f y \<in> A
goal (1 subgoal):
1. x \<sqsubset> y \<and> x \<in> Y \<or> x = y \<or> y \<sqsubset> x \<and> y \<in> X
[PROOF STEP]
fix Z
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. x \<sqsubset> y \<and> x \<in> Y \<or> x = y \<or> y \<sqsubset> x \<and> y \<in> X
[PROOF STEP]
assume Z: "Z = {z \<in> X. z \<sqsubset> x}"
and fZ: "f ` Z \<subseteq> Z"
and Zx: "extreme_bound A (\<sqsubseteq>) Z x"
and IHx: "\<forall>z \<in> X. z \<sqsubset> x \<longrightarrow> (z \<sqsubset> y \<and> z \<in> Y) \<or> z = y \<or> (y \<sqsubset> z \<and> y \<in> X)"
[PROOF STATE]
proof (state)
this:
Z = {z \<in> X. z \<sqsubset> x}
f ` Z \<subseteq> Z
extreme_bound A (\<sqsubseteq>) Z x
\<forall>z\<in>X. z \<sqsubset> x \<longrightarrow> z \<sqsubset> y \<and> z \<in> Y \<or> z = y \<or> y \<sqsubset> z \<and> y \<in> X
goal (1 subgoal):
1. x \<sqsubset> y \<and> x \<in> Y \<or> x = y \<or> y \<sqsubset> x \<and> y \<in> X
[PROOF STEP]
have "(y \<sqsubset> x \<and> y \<in> X) \<or> x \<sqsubseteq> y"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. y \<sqsubset> x \<and> y \<in> X \<or> x \<sqsubseteq> y
[PROOF STEP]
proof (cases "\<exists>z \<in> Z. y \<sqsubset> z")
[PROOF STATE]
proof (state)
goal (2 subgoals):
1. \<exists>z\<in>Z. y \<sqsubset> z \<Longrightarrow> y \<sqsubset> x \<and> y \<in> X \<or> x \<sqsubseteq> y
2. \<not> (\<exists>z\<in>Z. y \<sqsubset> z) \<Longrightarrow> y \<sqsubset> x \<and> y \<in> X \<or> x \<sqsubseteq> y
[PROOF STEP]
case True
[PROOF STATE]
proof (state)
this:
\<exists>z\<in>Z. y \<sqsubset> z
goal (2 subgoals):
1. \<exists>z\<in>Z. y \<sqsubset> z \<Longrightarrow> y \<sqsubset> x \<and> y \<in> X \<or> x \<sqsubseteq> y
2. \<not> (\<exists>z\<in>Z. y \<sqsubset> z) \<Longrightarrow> y \<sqsubset> x \<and> y \<in> X \<or> x \<sqsubseteq> y
[PROOF STEP]
then
[PROOF STATE]
proof (chain)
picking this:
\<exists>z\<in>Z. y \<sqsubset> z
[PROOF STEP]
obtain z where zZ: "z \<in> Z" and yz: "y \<sqsubset> z"
[PROOF STATE]
proof (prove)
using this:
\<exists>z\<in>Z. y \<sqsubset> z
goal (1 subgoal):
1. (\<And>z. \<lbrakk>z \<in> Z; y \<sqsubset> z\<rbrakk> \<Longrightarrow> thesis) \<Longrightarrow> thesis
[PROOF STEP]
by auto
[PROOF STATE]
proof (state)
this:
z \<in> Z
y \<sqsubset> z
goal (2 subgoals):
1. \<exists>z\<in>Z. y \<sqsubset> z \<Longrightarrow> y \<sqsubset> x \<and> y \<in> X \<or> x \<sqsubseteq> y
2. \<not> (\<exists>z\<in>Z. y \<sqsubset> z) \<Longrightarrow> y \<sqsubset> x \<and> y \<in> X \<or> x \<sqsubseteq> y
[PROOF STEP]
from zZ Z
[PROOF STATE]
proof (chain)
picking this:
z \<in> Z
Z = {z \<in> X. z \<sqsubset> x}
[PROOF STEP]
have zX: "z \<in> X" and zx: "z \<sqsubset> x"
[PROOF STATE]
proof (prove)
using this:
z \<in> Z
Z = {z \<in> X. z \<sqsubset> x}
goal (1 subgoal):
1. z \<in> X &&& z \<sqsubset> x
[PROOF STEP]
by auto
[PROOF STATE]
proof (state)
this:
z \<in> X
z \<sqsubset> x
goal (2 subgoals):
1. \<exists>z\<in>Z. y \<sqsubset> z \<Longrightarrow> y \<sqsubset> x \<and> y \<in> X \<or> x \<sqsubseteq> y
2. \<not> (\<exists>z\<in>Z. y \<sqsubset> z) \<Longrightarrow> y \<sqsubset> x \<and> y \<in> X \<or> x \<sqsubseteq> y
[PROOF STEP]
from IHx[rule_format, OF zX zx] yz
[PROOF STATE]
proof (chain)
picking this:
z \<sqsubset> y \<and> z \<in> Y \<or> z = y \<or> y \<sqsubset> z \<and> y \<in> X
y \<sqsubset> z
[PROOF STEP]
have yX: "y \<in> X"
[PROOF STATE]
proof (prove)
using this:
z \<sqsubset> y \<and> z \<in> Y \<or> z = y \<or> y \<sqsubset> z \<and> y \<in> X
y \<sqsubset> z
goal (1 subgoal):
1. y \<in> X
[PROOF STEP]
by auto
[PROOF STATE]
proof (state)
this:
y \<in> X
goal (2 subgoals):
1. \<exists>z\<in>Z. y \<sqsubset> z \<Longrightarrow> y \<sqsubset> x \<and> y \<in> X \<or> x \<sqsubseteq> y
2. \<not> (\<exists>z\<in>Z. y \<sqsubset> z) \<Longrightarrow> y \<sqsubset> x \<and> y \<in> X \<or> x \<sqsubseteq> y
[PROOF STEP]
from X.asympartp.trans[OF yz zx yX zX xX]
[PROOF STATE]
proof (chain)
picking this:
y \<sqsubset> x
[PROOF STEP]
have "y \<sqsubset> x"
[PROOF STATE]
proof (prove)
using this:
y \<sqsubset> x
goal (1 subgoal):
1. y \<sqsubset> x
[PROOF STEP]
.
[PROOF STATE]
proof (state)
this:
y \<sqsubset> x
goal (2 subgoals):
1. \<exists>z\<in>Z. y \<sqsubset> z \<Longrightarrow> y \<sqsubset> x \<and> y \<in> X \<or> x \<sqsubseteq> y
2. \<not> (\<exists>z\<in>Z. y \<sqsubset> z) \<Longrightarrow> y \<sqsubset> x \<and> y \<in> X \<or> x \<sqsubseteq> y
[PROOF STEP]
with yX
[PROOF STATE]
proof (chain)
picking this:
y \<in> X
y \<sqsubset> x
[PROOF STEP]
show ?thesis
[PROOF STATE]
proof (prove)
using this:
y \<in> X
y \<sqsubset> x
goal (1 subgoal):
1. y \<sqsubset> x \<and> y \<in> X \<or> x \<sqsubseteq> y
[PROOF STEP]
by auto
[PROOF STATE]
proof (state)
this:
y \<sqsubset> x \<and> y \<in> X \<or> x \<sqsubseteq> y
goal (1 subgoal):
1. \<not> (\<exists>z\<in>Z. y \<sqsubset> z) \<Longrightarrow> y \<sqsubset> x \<and> y \<in> X \<or> x \<sqsubseteq> y
[PROOF STEP]
next
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. \<not> (\<exists>z\<in>Z. y \<sqsubset> z) \<Longrightarrow> y \<sqsubset> x \<and> y \<in> X \<or> x \<sqsubseteq> y
[PROOF STEP]
case False
[PROOF STATE]
proof (state)
this:
\<not> (\<exists>z\<in>Z. y \<sqsubset> z)
goal (1 subgoal):
1. \<not> (\<exists>z\<in>Z. y \<sqsubset> z) \<Longrightarrow> y \<sqsubset> x \<and> y \<in> X \<or> x \<sqsubseteq> y
[PROOF STEP]
have "bound Z (\<sqsubseteq>) y"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. bound Z (\<sqsubseteq>) y
[PROOF STEP]
proof
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. \<And>x. x \<in> Z \<Longrightarrow> x \<sqsubseteq> y
[PROOF STEP]
fix z
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. \<And>x. x \<in> Z \<Longrightarrow> x \<sqsubseteq> y
[PROOF STEP]
assume "z \<in> Z"
[PROOF STATE]
proof (state)
this:
z \<in> Z
goal (1 subgoal):
1. \<And>x. x \<in> Z \<Longrightarrow> x \<sqsubseteq> y
[PROOF STEP]
then
[PROOF STATE]
proof (chain)
picking this:
z \<in> Z
[PROOF STEP]
have zX: "z \<in> X" and zx: "z \<sqsubset> x" and nyz: "\<not> y \<sqsubset> z"
[PROOF STATE]
proof (prove)
using this:
z \<in> Z
goal (1 subgoal):
1. z \<in> X &&& z \<sqsubset> x &&& \<not> y \<sqsubset> z
[PROOF STEP]
using Z False
[PROOF STATE]
proof (prove)
using this:
z \<in> Z
Z = {z \<in> X. z \<sqsubset> x}
\<not> (\<exists>z\<in>Z. y \<sqsubset> z)
goal (1 subgoal):
1. z \<in> X &&& z \<sqsubset> x &&& \<not> y \<sqsubset> z
[PROOF STEP]
by auto
[PROOF STATE]
proof (state)
this:
z \<in> X
z \<sqsubset> x
\<not> y \<sqsubset> z
goal (1 subgoal):
1. \<And>x. x \<in> Z \<Longrightarrow> x \<sqsubseteq> y
[PROOF STEP]
with IHx[rule_format, OF zX zx] X
[PROOF STATE]
proof (chain)
picking this:
z \<sqsubset> y \<and> z \<in> Y \<or> z = y \<or> y \<sqsubset> z \<and> y \<in> X
derivation X
z \<in> X
z \<sqsubset> x
\<not> y \<sqsubset> z
[PROOF STEP]
show "z \<sqsubseteq> y"
[PROOF STATE]
proof (prove)
using this:
z \<sqsubset> y \<and> z \<in> Y \<or> z = y \<or> y \<sqsubset> z \<and> y \<in> X
derivation X
z \<in> X
z \<sqsubset> x
\<not> y \<sqsubset> z
goal (1 subgoal):
1. z \<sqsubseteq> y
[PROOF STEP]
by auto
[PROOF STATE]
proof (state)
this:
z \<sqsubseteq> y
goal:
No subgoals!
[PROOF STEP]
qed
[PROOF STATE]
proof (state)
this:
bound Z (\<sqsubseteq>) y
goal (1 subgoal):
1. \<not> (\<exists>z\<in>Z. y \<sqsubset> z) \<Longrightarrow> y \<sqsubset> x \<and> y \<in> X \<or> x \<sqsubseteq> y
[PROOF STEP]
with yA Zx
[PROOF STATE]
proof (chain)
picking this:
y \<in> A
extreme_bound A (\<sqsubseteq>) Z x
bound Z (\<sqsubseteq>) y
[PROOF STEP]
have xy: "x \<sqsubseteq> y"
[PROOF STATE]
proof (prove)
using this:
y \<in> A
extreme_bound A (\<sqsubseteq>) Z x
bound Z (\<sqsubseteq>) y
goal (1 subgoal):
1. x \<sqsubseteq> y
[PROOF STEP]
by auto
[PROOF STATE]
proof (state)
this:
x \<sqsubseteq> y
goal (1 subgoal):
1. \<not> (\<exists>z\<in>Z. y \<sqsubset> z) \<Longrightarrow> y \<sqsubset> x \<and> y \<in> X \<or> x \<sqsubseteq> y
[PROOF STEP]
then
[PROOF STATE]
proof (chain)
picking this:
x \<sqsubseteq> y
[PROOF STEP]
show ?thesis
[PROOF STATE]
proof (prove)
using this:
x \<sqsubseteq> y
goal (1 subgoal):
1. y \<sqsubset> x \<and> y \<in> X \<or> x \<sqsubseteq> y
[PROOF STEP]
by auto
[PROOF STATE]
proof (state)
this:
y \<sqsubset> x \<and> y \<in> X \<or> x \<sqsubseteq> y
goal:
No subgoals!
[PROOF STEP]
qed
[PROOF STATE]
proof (state)
this:
y \<sqsubset> x \<and> y \<in> X \<or> x \<sqsubseteq> y
goal (1 subgoal):
1. x \<sqsubset> y \<and> x \<in> Y \<or> x = y \<or> y \<sqsubset> x \<and> y \<in> X
[PROOF STEP]
}
[PROOF STATE]
proof (state)
this:
\<lbrakk>?Z2 = {z \<in> X. z \<sqsubset> x}; f ` ?Z2 \<subseteq> ?Z2; extreme_bound A (\<sqsubseteq>) ?Z2 x; \<forall>z\<in>X. z \<sqsubset> x \<longrightarrow> z \<sqsubset> y \<and> z \<in> Y \<or> z = y \<or> y \<sqsubset> z \<and> y \<in> X\<rbrakk> \<Longrightarrow> y \<sqsubset> x \<and> y \<in> X \<or> x \<sqsubseteq> y
goal (1 subgoal):
1. x \<sqsubset> y \<and> x \<in> Y \<or> x = y \<or> y \<sqsubset> x \<and> y \<in> X
[PROOF STEP]
note lim_any = this
[PROOF STATE]
proof (state)
this:
\<lbrakk>?Z2 = {z \<in> X. z \<sqsubset> x}; f ` ?Z2 \<subseteq> ?Z2; extreme_bound A (\<sqsubseteq>) ?Z2 x; \<forall>z\<in>X. z \<sqsubset> x \<longrightarrow> z \<sqsubset> y \<and> z \<in> Y \<or> z = y \<or> y \<sqsubset> z \<and> y \<in> X\<rbrakk> \<Longrightarrow> y \<sqsubset> x \<and> y \<in> X \<or> x \<sqsubseteq> y
goal (1 subgoal):
1. x \<sqsubset> y \<and> x \<in> Y \<or> x = y \<or> y \<sqsubset> x \<and> y \<in> X
[PROOF STEP]
{
[PROOF STATE]
proof (state)
this:
\<lbrakk>?Z2 = {z \<in> X. z \<sqsubset> x}; f ` ?Z2 \<subseteq> ?Z2; extreme_bound A (\<sqsubseteq>) ?Z2 x; \<forall>z\<in>X. z \<sqsubset> x \<longrightarrow> z \<sqsubset> y \<and> z \<in> Y \<or> z = y \<or> y \<sqsubset> z \<and> y \<in> X\<rbrakk> \<Longrightarrow> y \<sqsubset> x \<and> y \<in> X \<or> x \<sqsubseteq> y
goal (1 subgoal):
1. x \<sqsubset> y \<and> x \<in> Y \<or> x = y \<or> y \<sqsubset> x \<and> y \<in> X
[PROOF STEP]
fix z Z
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. x \<sqsubset> y \<and> x \<in> Y \<or> x = y \<or> y \<sqsubset> x \<and> y \<in> X
[PROOF STEP]
assume Z: "Z = {z \<in> X. z \<sqsubset> x}"
and Zz: "extreme Z (\<sqsubseteq>) z"
and xfz: "x = f z"
and IHx: "(z \<sqsubset> y \<and> z \<in> Y) \<or> z = y \<or> (y \<sqsubset> z \<and> y \<in> X)"
[PROOF STATE]
proof (state)
this:
Z = {z \<in> X. z \<sqsubset> x}
extreme Z (\<sqsubseteq>) z
x = f z
z \<sqsubset> y \<and> z \<in> Y \<or> z = y \<or> y \<sqsubset> z \<and> y \<in> X
goal (1 subgoal):
1. x \<sqsubset> y \<and> x \<in> Y \<or> x = y \<or> y \<sqsubset> x \<and> y \<in> X
[PROOF STEP]
have zX: "z \<in> X" and zx: "z \<sqsubset> x"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. z \<in> X &&& z \<sqsubset> x
[PROOF STEP]
using Zz Z
[PROOF STATE]
proof (prove)
using this:
extreme Z (\<sqsubseteq>) z
Z = {z \<in> X. z \<sqsubset> x}
goal (1 subgoal):
1. z \<in> X &&& z \<sqsubset> x
[PROOF STEP]
by (auto simp: extreme_def)
[PROOF STATE]
proof (state)
this:
z \<in> X
z \<sqsubset> x
goal (1 subgoal):
1. x \<sqsubset> y \<and> x \<in> Y \<or> x = y \<or> y \<sqsubset> x \<and> y \<in> X
[PROOF STEP]
then
[PROOF STATE]
proof (chain)
picking this:
z \<in> X
z \<sqsubset> x
[PROOF STEP]
have zA: "z \<in> A"
[PROOF STATE]
proof (prove)
using this:
z \<in> X
z \<sqsubset> x
goal (1 subgoal):
1. z \<in> A
[PROOF STEP]
using XA
[PROOF STATE]
proof (prove)
using this:
z \<in> X
z \<sqsubset> x
X \<subseteq> A
goal (1 subgoal):
1. z \<in> A
[PROOF STEP]
by auto
[PROOF STATE]
proof (state)
this:
z \<in> A
goal (1 subgoal):
1. x \<sqsubset> y \<and> x \<in> Y \<or> x = y \<or> y \<sqsubset> x \<and> y \<in> X
[PROOF STEP]
from IHx
[PROOF STATE]
proof (chain)
picking this:
z \<sqsubset> y \<and> z \<in> Y \<or> z = y \<or> y \<sqsubset> z \<and> y \<in> X
[PROOF STEP]
have "(y \<sqsubset> x \<and> y \<in> X) \<or> x \<sqsubseteq> y"
[PROOF STATE]
proof (prove)
using this:
z \<sqsubset> y \<and> z \<in> Y \<or> z = y \<or> y \<sqsubset> z \<and> y \<in> X
goal (1 subgoal):
1. y \<sqsubset> x \<and> y \<in> X \<or> x \<sqsubseteq> y
[PROOF STEP]
proof (elim disjE conjE)
[PROOF STATE]
proof (state)
goal (3 subgoals):
1. \<lbrakk>z \<sqsubset> y; z \<in> Y\<rbrakk> \<Longrightarrow> y \<sqsubset> x \<and> y \<in> X \<or> x \<sqsubseteq> y
2. z = y \<Longrightarrow> y \<sqsubset> x \<and> y \<in> X \<or> x \<sqsubseteq> y
3. \<lbrakk>y \<sqsubset> z; y \<in> X\<rbrakk> \<Longrightarrow> y \<sqsubset> x \<and> y \<in> X \<or> x \<sqsubseteq> y
[PROOF STEP]
assume zy: "z \<sqsubset> y" and zY: "z \<in> Y"
[PROOF STATE]
proof (state)
this:
z \<sqsubset> y
z \<in> Y
goal (3 subgoals):
1. \<lbrakk>z \<sqsubset> y; z \<in> Y\<rbrakk> \<Longrightarrow> y \<sqsubset> x \<and> y \<in> X \<or> x \<sqsubseteq> y
2. z = y \<Longrightarrow> y \<sqsubset> x \<and> y \<in> X \<or> x \<sqsubseteq> y
3. \<lbrakk>y \<sqsubset> z; y \<in> X\<rbrakk> \<Longrightarrow> y \<sqsubset> x \<and> y \<in> X \<or> x \<sqsubseteq> y
[PROOF STEP]
from derivation_useful[OF Y zY yY zy] xfz
[PROOF STATE]
proof (chain)
picking this:
f z \<sqsubseteq> y
x = f z
[PROOF STEP]
have xy: "x \<sqsubseteq> y"
[PROOF STATE]
proof (prove)
using this:
f z \<sqsubseteq> y
x = f z
goal (1 subgoal):
1. x \<sqsubseteq> y
[PROOF STEP]
by auto
[PROOF STATE]
proof (state)
this:
x \<sqsubseteq> y
goal (3 subgoals):
1. \<lbrakk>z \<sqsubset> y; z \<in> Y\<rbrakk> \<Longrightarrow> y \<sqsubset> x \<and> y \<in> X \<or> x \<sqsubseteq> y
2. z = y \<Longrightarrow> y \<sqsubset> x \<and> y \<in> X \<or> x \<sqsubseteq> y
3. \<lbrakk>y \<sqsubset> z; y \<in> X\<rbrakk> \<Longrightarrow> y \<sqsubset> x \<and> y \<in> X \<or> x \<sqsubseteq> y
[PROOF STEP]
then
[PROOF STATE]
proof (chain)
picking this:
x \<sqsubseteq> y
[PROOF STEP]
show ?thesis
[PROOF STATE]
proof (prove)
using this:
x \<sqsubseteq> y
goal (1 subgoal):
1. y \<sqsubset> x \<and> y \<in> X \<or> x \<sqsubseteq> y
[PROOF STEP]
by auto
[PROOF STATE]
proof (state)
this:
y \<sqsubset> x \<and> y \<in> X \<or> x \<sqsubseteq> y
goal (2 subgoals):
1. z = y \<Longrightarrow> y \<sqsubset> x \<and> y \<in> X \<or> x \<sqsubseteq> y
2. \<lbrakk>y \<sqsubset> z; y \<in> X\<rbrakk> \<Longrightarrow> y \<sqsubset> x \<and> y \<in> X \<or> x \<sqsubseteq> y
[PROOF STEP]
next
[PROOF STATE]
proof (state)
goal (2 subgoals):
1. z = y \<Longrightarrow> y \<sqsubset> x \<and> y \<in> X \<or> x \<sqsubseteq> y
2. \<lbrakk>y \<sqsubset> z; y \<in> X\<rbrakk> \<Longrightarrow> y \<sqsubset> x \<and> y \<in> X \<or> x \<sqsubseteq> y
[PROOF STEP]
assume zy: "z = y"
[PROOF STATE]
proof (state)
this:
z = y
goal (2 subgoals):
1. z = y \<Longrightarrow> y \<sqsubset> x \<and> y \<in> X \<or> x \<sqsubseteq> y
2. \<lbrakk>y \<sqsubset> z; y \<in> X\<rbrakk> \<Longrightarrow> y \<sqsubset> x \<and> y \<in> X \<or> x \<sqsubseteq> y
[PROOF STEP]
then
[PROOF STATE]
proof (chain)
picking this:
z = y
[PROOF STEP]
have "y \<sqsubset> x"
[PROOF STATE]
proof (prove)
using this:
z = y
goal (1 subgoal):
1. y \<sqsubset> x
[PROOF STEP]
using zx
[PROOF STATE]
proof (prove)
using this:
z = y
z \<sqsubset> x
goal (1 subgoal):
1. y \<sqsubset> x
[PROOF STEP]
by auto
[PROOF STATE]
proof (state)
this:
y \<sqsubset> x
goal (2 subgoals):
1. z = y \<Longrightarrow> y \<sqsubset> x \<and> y \<in> X \<or> x \<sqsubseteq> y
2. \<lbrakk>y \<sqsubset> z; y \<in> X\<rbrakk> \<Longrightarrow> y \<sqsubset> x \<and> y \<in> X \<or> x \<sqsubseteq> y
[PROOF STEP]
with zy zX
[PROOF STATE]
proof (chain)
picking this:
z = y
z \<in> X
y \<sqsubset> x
[PROOF STEP]
show ?thesis
[PROOF STATE]
proof (prove)
using this:
z = y
z \<in> X
y \<sqsubset> x
goal (1 subgoal):
1. y \<sqsubset> x \<and> y \<in> X \<or> x \<sqsubseteq> y
[PROOF STEP]
by auto
[PROOF STATE]
proof (state)
this:
y \<sqsubset> x \<and> y \<in> X \<or> x \<sqsubseteq> y
goal (1 subgoal):
1. \<lbrakk>y \<sqsubset> z; y \<in> X\<rbrakk> \<Longrightarrow> y \<sqsubset> x \<and> y \<in> X \<or> x \<sqsubseteq> y
[PROOF STEP]
next
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. \<lbrakk>y \<sqsubset> z; y \<in> X\<rbrakk> \<Longrightarrow> y \<sqsubset> x \<and> y \<in> X \<or> x \<sqsubseteq> y
[PROOF STEP]
assume yz: "y \<sqsubset> z" and yX: "y \<in> X"
[PROOF STATE]
proof (state)
this:
y \<sqsubset> z
y \<in> X
goal (1 subgoal):
1. \<lbrakk>y \<sqsubset> z; y \<in> X\<rbrakk> \<Longrightarrow> y \<sqsubset> x \<and> y \<in> X \<or> x \<sqsubseteq> y
[PROOF STEP]
from X.asympartp.trans[OF yz zx yX zX xX]
[PROOF STATE]
proof (chain)
picking this:
y \<sqsubset> x
[PROOF STEP]
have "y \<sqsubset> x"
[PROOF STATE]
proof (prove)
using this:
y \<sqsubset> x
goal (1 subgoal):
1. y \<sqsubset> x
[PROOF STEP]
.
[PROOF STATE]
proof (state)
this:
y \<sqsubset> x
goal (1 subgoal):
1. \<lbrakk>y \<sqsubset> z; y \<in> X\<rbrakk> \<Longrightarrow> y \<sqsubset> x \<and> y \<in> X \<or> x \<sqsubseteq> y
[PROOF STEP]
with yX
[PROOF STATE]
proof (chain)
picking this:
y \<in> X
y \<sqsubset> x
[PROOF STEP]
show ?thesis
[PROOF STATE]
proof (prove)
using this:
y \<in> X
y \<sqsubset> x
goal (1 subgoal):
1. y \<sqsubset> x \<and> y \<in> X \<or> x \<sqsubseteq> y
[PROOF STEP]
by auto
[PROOF STATE]
proof (state)
this:
y \<sqsubset> x \<and> y \<in> X \<or> x \<sqsubseteq> y
goal:
No subgoals!
[PROOF STEP]
qed
[PROOF STATE]
proof (state)
this:
y \<sqsubset> x \<and> y \<in> X \<or> x \<sqsubseteq> y
goal (1 subgoal):
1. x \<sqsubset> y \<and> x \<in> Y \<or> x = y \<or> y \<sqsubset> x \<and> y \<in> X
[PROOF STEP]
}
[PROOF STATE]
proof (state)
this:
\<lbrakk>?Z2 = {z \<in> X. z \<sqsubset> x}; extreme ?Z2 (\<sqsubseteq>) ?z2; x = f ?z2; ?z2 \<sqsubset> y \<and> ?z2 \<in> Y \<or> ?z2 = y \<or> y \<sqsubset> ?z2 \<and> y \<in> X\<rbrakk> \<Longrightarrow> y \<sqsubset> x \<and> y \<in> X \<or> x \<sqsubseteq> y
goal (1 subgoal):
1. x \<sqsubset> y \<and> x \<in> Y \<or> x = y \<or> y \<sqsubset> x \<and> y \<in> X
[PROOF STEP]
note lim_any this
[PROOF STATE]
proof (state)
this:
\<lbrakk>?Z2 = {z \<in> X. z \<sqsubset> x}; f ` ?Z2 \<subseteq> ?Z2; extreme_bound A (\<sqsubseteq>) ?Z2 x; \<forall>z\<in>X. z \<sqsubset> x \<longrightarrow> z \<sqsubset> y \<and> z \<in> Y \<or> z = y \<or> y \<sqsubset> z \<and> y \<in> X\<rbrakk> \<Longrightarrow> y \<sqsubset> x \<and> y \<in> X \<or> x \<sqsubseteq> y
\<lbrakk>?Z2 = {z \<in> X. z \<sqsubset> x}; extreme ?Z2 (\<sqsubseteq>) ?z2; x = f ?z2; ?z2 \<sqsubset> y \<and> ?z2 \<in> Y \<or> ?z2 = y \<or> y \<sqsubset> ?z2 \<and> y \<in> X\<rbrakk> \<Longrightarrow> y \<sqsubset> x \<and> y \<in> X \<or> x \<sqsubseteq> y
goal (1 subgoal):
1. x \<sqsubset> y \<and> x \<in> Y \<or> x = y \<or> y \<sqsubset> x \<and> y \<in> X
[PROOF STEP]
}
[PROOF STATE]
proof (state)
this:
\<lbrakk>derivation ?Xa5; derivation ?Ya5; ?xa5 \<in> ?Xa5; ?ya5 \<in> ?Ya5; ?Z2 = {z \<in> ?Xa5. z \<sqsubset> ?xa5}; f ` ?Z2 \<subseteq> ?Z2; extreme_bound A (\<sqsubseteq>) ?Z2 ?xa5; \<forall>z\<in>?Xa5. z \<sqsubset> ?xa5 \<longrightarrow> z \<sqsubset> ?ya5 \<and> z \<in> ?Ya5 \<or> z = ?ya5 \<or> ?ya5 \<sqsubset> z \<and> ?ya5 \<in> ?Xa5\<rbrakk> \<Longrightarrow> ?ya5 \<sqsubset> ?xa5 \<and> ?ya5 \<in> ?Xa5 \<or> ?xa5 \<sqsubseteq> ?ya5
\<lbrakk>derivation ?Xa5; derivation ?Ya5; ?xa5 \<in> ?Xa5; ?ya5 \<in> ?Ya5; ?Z2 = {z \<in> ?Xa5. z \<sqsubset> ?xa5}; extreme ?Z2 (\<sqsubseteq>) ?z2; ?xa5 = f ?z2; ?z2 \<sqsubset> ?ya5 \<and> ?z2 \<in> ?Ya5 \<or> ?z2 = ?ya5 \<or> ?ya5 \<sqsubset> ?z2 \<and> ?ya5 \<in> ?Xa5\<rbrakk> \<Longrightarrow> ?ya5 \<sqsubset> ?xa5 \<and> ?ya5 \<in> ?Xa5 \<or> ?xa5 \<sqsubseteq> ?ya5
goal (1 subgoal):
1. x \<sqsubset> y \<and> x \<in> Y \<or> x = y \<or> y \<sqsubset> x \<and> y \<in> X
[PROOF STEP]
note lim_any = this(1) and suc_any = this(2)
[PROOF STATE]
proof (state)
this:
\<lbrakk>derivation ?Xa5; derivation ?Ya5; ?xa5 \<in> ?Xa5; ?ya5 \<in> ?Ya5; ?Z2 = {z \<in> ?Xa5. z \<sqsubset> ?xa5}; f ` ?Z2 \<subseteq> ?Z2; extreme_bound A (\<sqsubseteq>) ?Z2 ?xa5; \<forall>z\<in>?Xa5. z \<sqsubset> ?xa5 \<longrightarrow> z \<sqsubset> ?ya5 \<and> z \<in> ?Ya5 \<or> z = ?ya5 \<or> ?ya5 \<sqsubset> z \<and> ?ya5 \<in> ?Xa5\<rbrakk> \<Longrightarrow> ?ya5 \<sqsubset> ?xa5 \<and> ?ya5 \<in> ?Xa5 \<or> ?xa5 \<sqsubseteq> ?ya5
\<lbrakk>derivation ?Xa5; derivation ?Ya5; ?xa5 \<in> ?Xa5; ?ya5 \<in> ?Ya5; ?Z2 = {z \<in> ?Xa5. z \<sqsubset> ?xa5}; extreme ?Z2 (\<sqsubseteq>) ?z2; ?xa5 = f ?z2; ?z2 \<sqsubset> ?ya5 \<and> ?z2 \<in> ?Ya5 \<or> ?z2 = ?ya5 \<or> ?ya5 \<sqsubset> ?z2 \<and> ?ya5 \<in> ?Xa5\<rbrakk> \<Longrightarrow> ?ya5 \<sqsubset> ?xa5 \<and> ?ya5 \<in> ?Xa5 \<or> ?xa5 \<sqsubseteq> ?ya5
goal (1 subgoal):
1. x \<sqsubset> y \<and> x \<in> Y \<or> x = y \<or> y \<sqsubset> x \<and> y \<in> X
[PROOF STEP]
interpret X: well_ordered_set X "(\<sqsubseteq>)"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. well_ordered_set X (\<sqsubseteq>)
[PROOF STEP]
using derivation_well_ordered[OF X]
[PROOF STATE]
proof (prove)
using this:
well_ordered_set X (\<sqsubseteq>)
goal (1 subgoal):
1. well_ordered_set X (\<sqsubseteq>)
[PROOF STEP]
.
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. x \<sqsubset> y \<and> x \<in> Y \<or> x = y \<or> y \<sqsubset> x \<and> y \<in> X
[PROOF STEP]
interpret Y: well_ordered_set Y "(\<sqsubseteq>)"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. well_ordered_set Y (\<sqsubseteq>)
[PROOF STEP]
using derivation_well_ordered[OF Y]
[PROOF STATE]
proof (prove)
using this:
well_ordered_set Y (\<sqsubseteq>)
goal (1 subgoal):
1. well_ordered_set Y (\<sqsubseteq>)
[PROOF STEP]
.
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. x \<sqsubset> y \<and> x \<in> Y \<or> x = y \<or> y \<sqsubset> x \<and> y \<in> X
[PROOF STEP]
have XA: "X \<subseteq> A"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. X \<subseteq> A
[PROOF STEP]
using derivation_A[OF X]
[PROOF STATE]
proof (prove)
using this:
X \<subseteq> A
goal (1 subgoal):
1. X \<subseteq> A
[PROOF STEP]
.
[PROOF STATE]
proof (state)
this:
X \<subseteq> A
goal (1 subgoal):
1. x \<sqsubset> y \<and> x \<in> Y \<or> x = y \<or> y \<sqsubset> x \<and> y \<in> X
[PROOF STEP]
have YA: "Y \<subseteq> A"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. Y \<subseteq> A
[PROOF STEP]
using derivation_A[OF Y]
[PROOF STATE]
proof (prove)
using this:
Y \<subseteq> A
goal (1 subgoal):
1. Y \<subseteq> A
[PROOF STEP]
.
[PROOF STATE]
proof (state)
this:
Y \<subseteq> A
goal (1 subgoal):
1. x \<sqsubset> y \<and> x \<in> Y \<or> x = y \<or> y \<sqsubset> x \<and> y \<in> X
[PROOF STEP]
from xX yY
[PROOF STATE]
proof (chain)
picking this:
x \<in> X
y \<in> Y
[PROOF STEP]
show ?thesis
[PROOF STATE]
proof (prove)
using this:
x \<in> X
y \<in> Y
goal (1 subgoal):
1. x \<sqsubset> y \<and> x \<in> Y \<or> x = y \<or> y \<sqsubset> x \<and> y \<in> X
[PROOF STEP]
proof (induct x arbitrary: y)
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. \<And>x y. \<lbrakk>x \<in> X; \<And>y ya. \<lbrakk>y \<in> X; y \<sqsubset> x; ya \<in> Y\<rbrakk> \<Longrightarrow> y \<sqsubset> ya \<and> y \<in> Y \<or> y = ya \<or> ya \<sqsubset> y \<and> ya \<in> X; y \<in> Y\<rbrakk> \<Longrightarrow> x \<sqsubset> y \<and> x \<in> Y \<or> x = y \<or> y \<sqsubset> x \<and> y \<in> X
[PROOF STEP]
case (less x)
[PROOF STATE]
proof (state)
this:
x \<in> X
\<lbrakk>?y \<in> X; ?y \<sqsubset> x; ?ya \<in> Y\<rbrakk> \<Longrightarrow> ?y \<sqsubset> ?ya \<and> ?y \<in> Y \<or> ?y = ?ya \<or> ?ya \<sqsubset> ?y \<and> ?ya \<in> X
y \<in> Y
goal (1 subgoal):
1. \<And>x y. \<lbrakk>x \<in> X; \<And>y ya. \<lbrakk>y \<in> X; y \<sqsubset> x; ya \<in> Y\<rbrakk> \<Longrightarrow> y \<sqsubset> ya \<and> y \<in> Y \<or> y = ya \<or> ya \<sqsubset> y \<and> ya \<in> X; y \<in> Y\<rbrakk> \<Longrightarrow> x \<sqsubset> y \<and> x \<in> Y \<or> x = y \<or> y \<sqsubset> x \<and> y \<in> X
[PROOF STEP]
note xX = \<open>x \<in> X\<close> and IHx = this(2)
[PROOF STATE]
proof (state)
this:
x \<in> X
\<lbrakk>?y \<in> X; ?y \<sqsubset> x; ?ya \<in> Y\<rbrakk> \<Longrightarrow> ?y \<sqsubset> ?ya \<and> ?y \<in> Y \<or> ?y = ?ya \<or> ?ya \<sqsubset> ?y \<and> ?ya \<in> X
goal (1 subgoal):
1. \<And>x y. \<lbrakk>x \<in> X; \<And>y ya. \<lbrakk>y \<in> X; y \<sqsubset> x; ya \<in> Y\<rbrakk> \<Longrightarrow> y \<sqsubset> ya \<and> y \<in> Y \<or> y = ya \<or> ya \<sqsubset> y \<and> ya \<in> X; y \<in> Y\<rbrakk> \<Longrightarrow> x \<sqsubset> y \<and> x \<in> Y \<or> x = y \<or> y \<sqsubset> x \<and> y \<in> X
[PROOF STEP]
from xX XA f
[PROOF STATE]
proof (chain)
picking this:
x \<in> X
X \<subseteq> A
f ` A \<subseteq> A
[PROOF STEP]
have xA: "x \<in> A" and fxA: "f x \<in> A"
[PROOF STATE]
proof (prove)
using this:
x \<in> X
X \<subseteq> A
f ` A \<subseteq> A
goal (1 subgoal):
1. x \<in> A &&& f x \<in> A
[PROOF STEP]
by auto
[PROOF STATE]
proof (state)
this:
x \<in> A
f x \<in> A
goal (1 subgoal):
1. \<And>x y. \<lbrakk>x \<in> X; \<And>y ya. \<lbrakk>y \<in> X; y \<sqsubset> x; ya \<in> Y\<rbrakk> \<Longrightarrow> y \<sqsubset> ya \<and> y \<in> Y \<or> y = ya \<or> ya \<sqsubset> y \<and> ya \<in> X; y \<in> Y\<rbrakk> \<Longrightarrow> x \<sqsubset> y \<and> x \<in> Y \<or> x = y \<or> y \<sqsubset> x \<and> y \<in> X
[PROOF STEP]
from \<open>y \<in> Y\<close>
[PROOF STATE]
proof (chain)
picking this:
y \<in> Y
[PROOF STEP]
show ?case
[PROOF STATE]
proof (prove)
using this:
y \<in> Y
goal (1 subgoal):
1. x \<sqsubset> y \<and> x \<in> Y \<or> x = y \<or> y \<sqsubset> x \<and> y \<in> X
[PROOF STEP]
proof (induct y)
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. \<And>x. \<lbrakk>x \<in> Y; \<And>y. \<lbrakk>y \<in> Y; y \<sqsubset> x\<rbrakk> \<Longrightarrow> x \<sqsubset> y \<and> x \<in> Y \<or> x = y \<or> y \<sqsubset> x \<and> y \<in> X\<rbrakk> \<Longrightarrow> x \<sqsubset> x \<and> x \<in> Y \<or> x = x \<or> x \<sqsubset> x \<and> x \<in> X
[PROOF STEP]
case (less y)
[PROOF STATE]
proof (state)
this:
y \<in> Y
\<lbrakk>?y \<in> Y; ?y \<sqsubset> y\<rbrakk> \<Longrightarrow> x \<sqsubset> ?y \<and> x \<in> Y \<or> x = ?y \<or> ?y \<sqsubset> x \<and> ?y \<in> X
goal (1 subgoal):
1. \<And>x. \<lbrakk>x \<in> Y; \<And>y. \<lbrakk>y \<in> Y; y \<sqsubset> x\<rbrakk> \<Longrightarrow> x \<sqsubset> y \<and> x \<in> Y \<or> x = y \<or> y \<sqsubset> x \<and> y \<in> X\<rbrakk> \<Longrightarrow> x \<sqsubset> x \<and> x \<in> Y \<or> x = x \<or> x \<sqsubset> x \<and> x \<in> X
[PROOF STEP]
note yY = \<open>y \<in> Y\<close> and IHy = less(2)
[PROOF STATE]
proof (state)
this:
y \<in> Y
\<lbrakk>?y \<in> Y; ?y \<sqsubset> y\<rbrakk> \<Longrightarrow> x \<sqsubset> ?y \<and> x \<in> Y \<or> x = ?y \<or> ?y \<sqsubset> x \<and> ?y \<in> X
goal (1 subgoal):
1. \<And>x. \<lbrakk>x \<in> Y; \<And>y. \<lbrakk>y \<in> Y; y \<sqsubset> x\<rbrakk> \<Longrightarrow> x \<sqsubset> y \<and> x \<in> Y \<or> x = y \<or> y \<sqsubset> x \<and> y \<in> X\<rbrakk> \<Longrightarrow> x \<sqsubset> x \<and> x \<in> Y \<or> x = x \<or> x \<sqsubset> x \<and> x \<in> X
[PROOF STEP]
from yY YA f
[PROOF STATE]
proof (chain)
picking this:
y \<in> Y
Y \<subseteq> A
f ` A \<subseteq> A
[PROOF STEP]
have yA: "y \<in> A" and fyA: "f y \<in> A"
[PROOF STATE]
proof (prove)
using this:
y \<in> Y
Y \<subseteq> A
f ` A \<subseteq> A
goal (1 subgoal):
1. y \<in> A &&& f y \<in> A
[PROOF STEP]
by auto
[PROOF STATE]
proof (state)
this:
y \<in> A
f y \<in> A
goal (1 subgoal):
1. \<And>x. \<lbrakk>x \<in> Y; \<And>y. \<lbrakk>y \<in> Y; y \<sqsubset> x\<rbrakk> \<Longrightarrow> x \<sqsubset> y \<and> x \<in> Y \<or> x = y \<or> y \<sqsubset> x \<and> y \<in> X\<rbrakk> \<Longrightarrow> x \<sqsubset> x \<and> x \<in> Y \<or> x = x \<or> x \<sqsubset> x \<and> x \<in> X
[PROOF STEP]
from X xX
[PROOF STATE]
proof (chain)
picking this:
derivation X
x \<in> X
[PROOF STEP]
show ?case
[PROOF STATE]
proof (prove)
using this:
derivation X
x \<in> X
goal (1 subgoal):
1. x \<sqsubset> y \<and> x \<in> Y \<or> x = y \<or> y \<sqsubset> x \<and> y \<in> X
[PROOF STEP]
proof (cases rule: derivation_cases)
[PROOF STATE]
proof (state)
goal (2 subgoals):
1. \<And>Ya y. \<lbrakk>Ya = {y \<in> X. y \<sqsubset> x}; extreme Ya (\<sqsubseteq>) y; x = f y\<rbrakk> \<Longrightarrow> x \<sqsubset> y \<and> x \<in> Y \<or> x = y \<or> y \<sqsubset> x \<and> y \<in> X
2. \<And>Ya. \<lbrakk>Ya = {y \<in> X. y \<sqsubset> x}; f ` Ya \<subseteq> Ya; extreme_bound A (\<sqsubseteq>) Ya x\<rbrakk> \<Longrightarrow> x \<sqsubset> y \<and> x \<in> Y \<or> x = y \<or> y \<sqsubset> x \<and> y \<in> X
[PROOF STEP]
case (suc Z z)
[PROOF STATE]
proof (state)
this:
Z = {y \<in> X. y \<sqsubset> x}
extreme Z (\<sqsubseteq>) z
x = f z
goal (2 subgoals):
1. \<And>Ya y. \<lbrakk>Ya = {y \<in> X. y \<sqsubset> x}; extreme Ya (\<sqsubseteq>) y; x = f y\<rbrakk> \<Longrightarrow> x \<sqsubset> y \<and> x \<in> Y \<or> x = y \<or> y \<sqsubset> x \<and> y \<in> X
2. \<And>Ya. \<lbrakk>Ya = {y \<in> X. y \<sqsubset> x}; f ` Ya \<subseteq> Ya; extreme_bound A (\<sqsubseteq>) Ya x\<rbrakk> \<Longrightarrow> x \<sqsubset> y \<and> x \<in> Y \<or> x = y \<or> y \<sqsubset> x \<and> y \<in> X
[PROOF STEP]
note Z = \<open>Z = {z \<in> X. z \<sqsubset> x}\<close> and Zz = \<open>extreme Z (\<sqsubseteq>) z\<close> and xfz = \<open>x = f z\<close>
[PROOF STATE]
proof (state)
this:
Z = {z \<in> X. z \<sqsubset> x}
extreme Z (\<sqsubseteq>) z
x = f z
goal (2 subgoals):
1. \<And>Ya y. \<lbrakk>Ya = {y \<in> X. y \<sqsubset> x}; extreme Ya (\<sqsubseteq>) y; x = f y\<rbrakk> \<Longrightarrow> x \<sqsubset> y \<and> x \<in> Y \<or> x = y \<or> y \<sqsubset> x \<and> y \<in> X
2. \<And>Ya. \<lbrakk>Ya = {y \<in> X. y \<sqsubset> x}; f ` Ya \<subseteq> Ya; extreme_bound A (\<sqsubseteq>) Ya x\<rbrakk> \<Longrightarrow> x \<sqsubset> y \<and> x \<in> Y \<or> x = y \<or> y \<sqsubset> x \<and> y \<in> X
[PROOF STEP]
then
[PROOF STATE]
proof (chain)
picking this:
Z = {z \<in> X. z \<sqsubset> x}
extreme Z (\<sqsubseteq>) z
x = f z
[PROOF STEP]
have zx: "z \<sqsubset> x" and zX: "z \<in> X"
[PROOF STATE]
proof (prove)
using this:
Z = {z \<in> X. z \<sqsubset> x}
extreme Z (\<sqsubseteq>) z
x = f z
goal (1 subgoal):
1. z \<sqsubset> x &&& z \<in> X
[PROOF STEP]
by auto
[PROOF STATE]
proof (state)
this:
z \<sqsubset> x
z \<in> X
goal (2 subgoals):
1. \<And>Ya y. \<lbrakk>Ya = {y \<in> X. y \<sqsubset> x}; extreme Ya (\<sqsubseteq>) y; x = f y\<rbrakk> \<Longrightarrow> x \<sqsubset> y \<and> x \<in> Y \<or> x = y \<or> y \<sqsubset> x \<and> y \<in> X
2. \<And>Ya. \<lbrakk>Ya = {y \<in> X. y \<sqsubset> x}; f ` Ya \<subseteq> Ya; extreme_bound A (\<sqsubseteq>) Ya x\<rbrakk> \<Longrightarrow> x \<sqsubset> y \<and> x \<in> Y \<or> x = y \<or> y \<sqsubset> x \<and> y \<in> X
[PROOF STEP]
note IHz = IHx[OF zX zx yY]
[PROOF STATE]
proof (state)
this:
z \<sqsubset> y \<and> z \<in> Y \<or> z = y \<or> y \<sqsubset> z \<and> y \<in> X
goal (2 subgoals):
1. \<And>Ya y. \<lbrakk>Ya = {y \<in> X. y \<sqsubset> x}; extreme Ya (\<sqsubseteq>) y; x = f y\<rbrakk> \<Longrightarrow> x \<sqsubset> y \<and> x \<in> Y \<or> x = y \<or> y \<sqsubset> x \<and> y \<in> X
2. \<And>Ya. \<lbrakk>Ya = {y \<in> X. y \<sqsubset> x}; f ` Ya \<subseteq> Ya; extreme_bound A (\<sqsubseteq>) Ya x\<rbrakk> \<Longrightarrow> x \<sqsubset> y \<and> x \<in> Y \<or> x = y \<or> y \<sqsubset> x \<and> y \<in> X
[PROOF STEP]
have 1: "y \<sqsubset> x \<and> y \<in> X \<or> x \<sqsubseteq> y"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. y \<sqsubset> x \<and> y \<in> X \<or> x \<sqsubseteq> y
[PROOF STEP]
using suc_any[OF X Y xX yY Z Zz xfz IHz] IHy
[PROOF STATE]
proof (prove)
using this:
y \<sqsubset> x \<and> y \<in> X \<or> x \<sqsubseteq> y
\<lbrakk>?y \<in> Y; ?y \<sqsubset> y\<rbrakk> \<Longrightarrow> x \<sqsubset> ?y \<and> x \<in> Y \<or> x = ?y \<or> ?y \<sqsubset> x \<and> ?y \<in> X
goal (1 subgoal):
1. y \<sqsubset> x \<and> y \<in> X \<or> x \<sqsubseteq> y
[PROOF STEP]
by auto
[PROOF STATE]
proof (state)
this:
y \<sqsubset> x \<and> y \<in> X \<or> x \<sqsubseteq> y
goal (2 subgoals):
1. \<And>Ya y. \<lbrakk>Ya = {y \<in> X. y \<sqsubset> x}; extreme Ya (\<sqsubseteq>) y; x = f y\<rbrakk> \<Longrightarrow> x \<sqsubset> y \<and> x \<in> Y \<or> x = y \<or> y \<sqsubset> x \<and> y \<in> X
2. \<And>Ya. \<lbrakk>Ya = {y \<in> X. y \<sqsubset> x}; f ` Ya \<subseteq> Ya; extreme_bound A (\<sqsubseteq>) Ya x\<rbrakk> \<Longrightarrow> x \<sqsubset> y \<and> x \<in> Y \<or> x = y \<or> y \<sqsubset> x \<and> y \<in> X
[PROOF STEP]
from Y yY
[PROOF STATE]
proof (chain)
picking this:
derivation Y
y \<in> Y
[PROOF STEP]
show ?thesis
[PROOF STATE]
proof (prove)
using this:
derivation Y
y \<in> Y
goal (1 subgoal):
1. x \<sqsubset> y \<and> x \<in> Y \<or> x = y \<or> y \<sqsubset> x \<and> y \<in> X
[PROOF STEP]
proof (cases rule: derivation_cases)
[PROOF STATE]
proof (state)
goal (2 subgoals):
1. \<And>Ya y. \<lbrakk>Ya = {y \<in> Y. y \<sqsubset> y}; extreme Ya (\<sqsubseteq>) y; y = f y\<rbrakk> \<Longrightarrow> x \<sqsubset> y \<and> x \<in> Y \<or> x = y \<or> y \<sqsubset> x \<and> y \<in> X
2. \<And>Ya. \<lbrakk>Ya = {y \<in> Y. y \<sqsubset> y}; f ` Ya \<subseteq> Ya; extreme_bound A (\<sqsubseteq>) Ya y\<rbrakk> \<Longrightarrow> x \<sqsubset> y \<and> x \<in> Y \<or> x = y \<or> y \<sqsubset> x \<and> y \<in> X
[PROOF STEP]
case (suc W w)
[PROOF STATE]
proof (state)
this:
W = {y \<in> Y. y \<sqsubset> y}
extreme W (\<sqsubseteq>) w
y = f w
goal (2 subgoals):
1. \<And>Ya y. \<lbrakk>Ya = {y \<in> Y. y \<sqsubset> y}; extreme Ya (\<sqsubseteq>) y; y = f y\<rbrakk> \<Longrightarrow> x \<sqsubset> y \<and> x \<in> Y \<or> x = y \<or> y \<sqsubset> x \<and> y \<in> X
2. \<And>Ya. \<lbrakk>Ya = {y \<in> Y. y \<sqsubset> y}; f ` Ya \<subseteq> Ya; extreme_bound A (\<sqsubseteq>) Ya y\<rbrakk> \<Longrightarrow> x \<sqsubset> y \<and> x \<in> Y \<or> x = y \<or> y \<sqsubset> x \<and> y \<in> X
[PROOF STEP]
note W = \<open>W = {w \<in> Y. w \<sqsubset> y}\<close> and Ww = \<open>extreme W (\<sqsubseteq>) w\<close> and yfw = \<open>y = f w\<close>
[PROOF STATE]
proof (state)
this:
W = {w \<in> Y. w \<sqsubset> y}
extreme W (\<sqsubseteq>) w
y = f w
goal (2 subgoals):
1. \<And>Ya y. \<lbrakk>Ya = {y \<in> Y. y \<sqsubset> y}; extreme Ya (\<sqsubseteq>) y; y = f y\<rbrakk> \<Longrightarrow> x \<sqsubset> y \<and> x \<in> Y \<or> x = y \<or> y \<sqsubset> x \<and> y \<in> X
2. \<And>Ya. \<lbrakk>Ya = {y \<in> Y. y \<sqsubset> y}; f ` Ya \<subseteq> Ya; extreme_bound A (\<sqsubseteq>) Ya y\<rbrakk> \<Longrightarrow> x \<sqsubset> y \<and> x \<in> Y \<or> x = y \<or> y \<sqsubset> x \<and> y \<in> X
[PROOF STEP]
then
[PROOF STATE]
proof (chain)
picking this:
W = {w \<in> Y. w \<sqsubset> y}
extreme W (\<sqsubseteq>) w
y = f w
[PROOF STEP]
have wY: "w \<in> Y" and wy: "w \<sqsubset> y"
[PROOF STATE]
proof (prove)
using this:
W = {w \<in> Y. w \<sqsubset> y}
extreme W (\<sqsubseteq>) w
y = f w
goal (1 subgoal):
1. w \<in> Y &&& w \<sqsubset> y
[PROOF STEP]
by auto
[PROOF STATE]
proof (state)
this:
w \<in> Y
w \<sqsubset> y
goal (2 subgoals):
1. \<And>Ya y. \<lbrakk>Ya = {y \<in> Y. y \<sqsubset> y}; extreme Ya (\<sqsubseteq>) y; y = f y\<rbrakk> \<Longrightarrow> x \<sqsubset> y \<and> x \<in> Y \<or> x = y \<or> y \<sqsubset> x \<and> y \<in> X
2. \<And>Ya. \<lbrakk>Ya = {y \<in> Y. y \<sqsubset> y}; f ` Ya \<subseteq> Ya; extreme_bound A (\<sqsubseteq>) Ya y\<rbrakk> \<Longrightarrow> x \<sqsubset> y \<and> x \<in> Y \<or> x = y \<or> y \<sqsubset> x \<and> y \<in> X
[PROOF STEP]
have IHw: "w \<sqsubset> x \<and> w \<in> X \<or> w = x \<or> x \<sqsubset> w \<and> x \<in> Y"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. w \<sqsubset> x \<and> w \<in> X \<or> w = x \<or> x \<sqsubset> w \<and> x \<in> Y
[PROOF STEP]
using IHy[OF wY wy]
[PROOF STATE]
proof (prove)
using this:
x \<sqsubset> w \<and> x \<in> Y \<or> x = w \<or> w \<sqsubset> x \<and> w \<in> X
goal (1 subgoal):
1. w \<sqsubset> x \<and> w \<in> X \<or> w = x \<or> x \<sqsubset> w \<and> x \<in> Y
[PROOF STEP]
by auto
[PROOF STATE]
proof (state)
this:
w \<sqsubset> x \<and> w \<in> X \<or> w = x \<or> x \<sqsubset> w \<and> x \<in> Y
goal (2 subgoals):
1. \<And>Ya y. \<lbrakk>Ya = {y \<in> Y. y \<sqsubset> y}; extreme Ya (\<sqsubseteq>) y; y = f y\<rbrakk> \<Longrightarrow> x \<sqsubset> y \<and> x \<in> Y \<or> x = y \<or> y \<sqsubset> x \<and> y \<in> X
2. \<And>Ya. \<lbrakk>Ya = {y \<in> Y. y \<sqsubset> y}; f ` Ya \<subseteq> Ya; extreme_bound A (\<sqsubseteq>) Ya y\<rbrakk> \<Longrightarrow> x \<sqsubset> y \<and> x \<in> Y \<or> x = y \<or> y \<sqsubset> x \<and> y \<in> X
[PROOF STEP]
have "x \<sqsubset> y \<and> x \<in> Y \<or> y \<sqsubseteq> x"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. x \<sqsubset> y \<and> x \<in> Y \<or> y \<sqsubseteq> x
[PROOF STEP]
using suc_any[OF Y X yY xX W Ww yfw IHw]
[PROOF STATE]
proof (prove)
using this:
x \<sqsubset> y \<and> x \<in> Y \<or> y \<sqsubseteq> x
goal (1 subgoal):
1. x \<sqsubset> y \<and> x \<in> Y \<or> y \<sqsubseteq> x
[PROOF STEP]
by auto
[PROOF STATE]
proof (state)
this:
x \<sqsubset> y \<and> x \<in> Y \<or> y \<sqsubseteq> x
goal (2 subgoals):
1. \<And>Ya y. \<lbrakk>Ya = {y \<in> Y. y \<sqsubset> y}; extreme Ya (\<sqsubseteq>) y; y = f y\<rbrakk> \<Longrightarrow> x \<sqsubset> y \<and> x \<in> Y \<or> x = y \<or> y \<sqsubset> x \<and> y \<in> X
2. \<And>Ya. \<lbrakk>Ya = {y \<in> Y. y \<sqsubset> y}; f ` Ya \<subseteq> Ya; extreme_bound A (\<sqsubseteq>) Ya y\<rbrakk> \<Longrightarrow> x \<sqsubset> y \<and> x \<in> Y \<or> x = y \<or> y \<sqsubset> x \<and> y \<in> X
[PROOF STEP]
with 1
[PROOF STATE]
proof (chain)
picking this:
y \<sqsubset> x \<and> y \<in> X \<or> x \<sqsubseteq> y
x \<sqsubset> y \<and> x \<in> Y \<or> y \<sqsubseteq> x
[PROOF STEP]
show ?thesis
[PROOF STATE]
proof (prove)
using this:
y \<sqsubset> x \<and> y \<in> X \<or> x \<sqsubseteq> y
x \<sqsubset> y \<and> x \<in> Y \<or> y \<sqsubseteq> x
goal (1 subgoal):
1. x \<sqsubset> y \<and> x \<in> Y \<or> x = y \<or> y \<sqsubset> x \<and> y \<in> X
[PROOF STEP]
using antisym xA yA
[PROOF STATE]
proof (prove)
using this:
y \<sqsubset> x \<and> y \<in> X \<or> x \<sqsubseteq> y
x \<sqsubset> y \<and> x \<in> Y \<or> y \<sqsubseteq> x
\<lbrakk>?x \<sqsubseteq> ?y; ?y \<sqsubseteq> ?x; ?x \<in> A; ?y \<in> A\<rbrakk> \<Longrightarrow> ?x = ?y
x \<in> A
y \<in> A
goal (1 subgoal):
1. x \<sqsubset> y \<and> x \<in> Y \<or> x = y \<or> y \<sqsubset> x \<and> y \<in> X
[PROOF STEP]
by auto
[PROOF STATE]
proof (state)
this:
x \<sqsubset> y \<and> x \<in> Y \<or> x = y \<or> y \<sqsubset> x \<and> y \<in> X
goal (1 subgoal):
1. \<And>Ya. \<lbrakk>Ya = {y \<in> Y. y \<sqsubset> y}; f ` Ya \<subseteq> Ya; extreme_bound A (\<sqsubseteq>) Ya y\<rbrakk> \<Longrightarrow> x \<sqsubset> y \<and> x \<in> Y \<or> x = y \<or> y \<sqsubset> x \<and> y \<in> X
[PROOF STEP]
next
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. \<And>Ya. \<lbrakk>Ya = {y \<in> Y. y \<sqsubset> y}; f ` Ya \<subseteq> Ya; extreme_bound A (\<sqsubseteq>) Ya y\<rbrakk> \<Longrightarrow> x \<sqsubset> y \<and> x \<in> Y \<or> x = y \<or> y \<sqsubset> x \<and> y \<in> X
[PROOF STEP]
case (lim W)
[PROOF STATE]
proof (state)
this:
W = {y \<in> Y. y \<sqsubset> y}
f ` W \<subseteq> W
extreme_bound A (\<sqsubseteq>) W y
goal (1 subgoal):
1. \<And>Ya. \<lbrakk>Ya = {y \<in> Y. y \<sqsubset> y}; f ` Ya \<subseteq> Ya; extreme_bound A (\<sqsubseteq>) Ya y\<rbrakk> \<Longrightarrow> x \<sqsubset> y \<and> x \<in> Y \<or> x = y \<or> y \<sqsubset> x \<and> y \<in> X
[PROOF STEP]
note W = \<open>W = {w \<in> Y. w \<sqsubset> y}\<close> and fW = \<open>f ` W \<subseteq> W\<close> and Wy = \<open>extreme_bound A (\<sqsubseteq>) W y\<close>
[PROOF STATE]
proof (state)
this:
W = {w \<in> Y. w \<sqsubset> y}
f ` W \<subseteq> W
extreme_bound A (\<sqsubseteq>) W y
goal (1 subgoal):
1. \<And>Ya. \<lbrakk>Ya = {y \<in> Y. y \<sqsubset> y}; f ` Ya \<subseteq> Ya; extreme_bound A (\<sqsubseteq>) Ya y\<rbrakk> \<Longrightarrow> x \<sqsubset> y \<and> x \<in> Y \<or> x = y \<or> y \<sqsubset> x \<and> y \<in> X
[PROOF STEP]
have "x \<sqsubset> y \<and> x \<in> Y \<or> y \<sqsubseteq> x"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. x \<sqsubset> y \<and> x \<in> Y \<or> y \<sqsubseteq> x
[PROOF STEP]
using lim_any[OF Y X yY xX W fW Wy] IHy
[PROOF STATE]
proof (prove)
using this:
\<forall>z\<in>Y. z \<sqsubset> y \<longrightarrow> z \<sqsubset> x \<and> z \<in> X \<or> z = x \<or> x \<sqsubset> z \<and> x \<in> Y \<Longrightarrow> x \<sqsubset> y \<and> x \<in> Y \<or> y \<sqsubseteq> x
\<lbrakk>?y \<in> Y; ?y \<sqsubset> y\<rbrakk> \<Longrightarrow> x \<sqsubset> ?y \<and> x \<in> Y \<or> x = ?y \<or> ?y \<sqsubset> x \<and> ?y \<in> X
goal (1 subgoal):
1. x \<sqsubset> y \<and> x \<in> Y \<or> y \<sqsubseteq> x
[PROOF STEP]
by auto
[PROOF STATE]
proof (state)
this:
x \<sqsubset> y \<and> x \<in> Y \<or> y \<sqsubseteq> x
goal (1 subgoal):
1. \<And>Ya. \<lbrakk>Ya = {y \<in> Y. y \<sqsubset> y}; f ` Ya \<subseteq> Ya; extreme_bound A (\<sqsubseteq>) Ya y\<rbrakk> \<Longrightarrow> x \<sqsubset> y \<and> x \<in> Y \<or> x = y \<or> y \<sqsubset> x \<and> y \<in> X
[PROOF STEP]
with 1
[PROOF STATE]
proof (chain)
picking this:
y \<sqsubset> x \<and> y \<in> X \<or> x \<sqsubseteq> y
x \<sqsubset> y \<and> x \<in> Y \<or> y \<sqsubseteq> x
[PROOF STEP]
show ?thesis
[PROOF STATE]
proof (prove)
using this:
y \<sqsubset> x \<and> y \<in> X \<or> x \<sqsubseteq> y
x \<sqsubset> y \<and> x \<in> Y \<or> y \<sqsubseteq> x
goal (1 subgoal):
1. x \<sqsubset> y \<and> x \<in> Y \<or> x = y \<or> y \<sqsubset> x \<and> y \<in> X
[PROOF STEP]
using antisym xA yA
[PROOF STATE]
proof (prove)
using this:
y \<sqsubset> x \<and> y \<in> X \<or> x \<sqsubseteq> y
x \<sqsubset> y \<and> x \<in> Y \<or> y \<sqsubseteq> x
\<lbrakk>?x \<sqsubseteq> ?y; ?y \<sqsubseteq> ?x; ?x \<in> A; ?y \<in> A\<rbrakk> \<Longrightarrow> ?x = ?y
x \<in> A
y \<in> A
goal (1 subgoal):
1. x \<sqsubset> y \<and> x \<in> Y \<or> x = y \<or> y \<sqsubset> x \<and> y \<in> X
[PROOF STEP]
by auto
[PROOF STATE]
proof (state)
this:
x \<sqsubset> y \<and> x \<in> Y \<or> x = y \<or> y \<sqsubset> x \<and> y \<in> X
goal:
No subgoals!
[PROOF STEP]
qed
[PROOF STATE]
proof (state)
this:
x \<sqsubset> y \<and> x \<in> Y \<or> x = y \<or> y \<sqsubset> x \<and> y \<in> X
goal (1 subgoal):
1. \<And>Ya. \<lbrakk>Ya = {y \<in> X. y \<sqsubset> x}; f ` Ya \<subseteq> Ya; extreme_bound A (\<sqsubseteq>) Ya x\<rbrakk> \<Longrightarrow> x \<sqsubset> y \<and> x \<in> Y \<or> x = y \<or> y \<sqsubset> x \<and> y \<in> X
[PROOF STEP]
next
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. \<And>Ya. \<lbrakk>Ya = {y \<in> X. y \<sqsubset> x}; f ` Ya \<subseteq> Ya; extreme_bound A (\<sqsubseteq>) Ya x\<rbrakk> \<Longrightarrow> x \<sqsubset> y \<and> x \<in> Y \<or> x = y \<or> y \<sqsubset> x \<and> y \<in> X
[PROOF STEP]
case (lim Z)
[PROOF STATE]
proof (state)
this:
Z = {y \<in> X. y \<sqsubset> x}
f ` Z \<subseteq> Z
extreme_bound A (\<sqsubseteq>) Z x
goal (1 subgoal):
1. \<And>Ya. \<lbrakk>Ya = {y \<in> X. y \<sqsubset> x}; f ` Ya \<subseteq> Ya; extreme_bound A (\<sqsubseteq>) Ya x\<rbrakk> \<Longrightarrow> x \<sqsubset> y \<and> x \<in> Y \<or> x = y \<or> y \<sqsubset> x \<and> y \<in> X
[PROOF STEP]
note Z = \<open>Z = {z \<in> X. z \<sqsubset> x}\<close> and fZ = \<open>f ` Z \<subseteq> Z\<close> and Zx = \<open>extreme_bound A (\<sqsubseteq>) Z x\<close>
[PROOF STATE]
proof (state)
this:
Z = {z \<in> X. z \<sqsubset> x}
f ` Z \<subseteq> Z
extreme_bound A (\<sqsubseteq>) Z x
goal (1 subgoal):
1. \<And>Ya. \<lbrakk>Ya = {y \<in> X. y \<sqsubset> x}; f ` Ya \<subseteq> Ya; extreme_bound A (\<sqsubseteq>) Ya x\<rbrakk> \<Longrightarrow> x \<sqsubset> y \<and> x \<in> Y \<or> x = y \<or> y \<sqsubset> x \<and> y \<in> X
[PROOF STEP]
have 1: "y \<sqsubset> x \<and> y \<in> X \<or> x \<sqsubseteq> y"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. y \<sqsubset> x \<and> y \<in> X \<or> x \<sqsubseteq> y
[PROOF STEP]
using lim_any[OF X Y xX yY Z fZ Zx] IHx[OF _ _ yY]
[PROOF STATE]
proof (prove)
using this:
\<forall>z\<in>X. z \<sqsubset> x \<longrightarrow> z \<sqsubset> y \<and> z \<in> Y \<or> z = y \<or> y \<sqsubset> z \<and> y \<in> X \<Longrightarrow> y \<sqsubset> x \<and> y \<in> X \<or> x \<sqsubseteq> y
\<lbrakk>?y \<in> X; ?y \<sqsubset> x\<rbrakk> \<Longrightarrow> ?y \<sqsubset> y \<and> ?y \<in> Y \<or> ?y = y \<or> y \<sqsubset> ?y \<and> y \<in> X
goal (1 subgoal):
1. y \<sqsubset> x \<and> y \<in> X \<or> x \<sqsubseteq> y
[PROOF STEP]
by auto
[PROOF STATE]
proof (state)
this:
y \<sqsubset> x \<and> y \<in> X \<or> x \<sqsubseteq> y
goal (1 subgoal):
1. \<And>Ya. \<lbrakk>Ya = {y \<in> X. y \<sqsubset> x}; f ` Ya \<subseteq> Ya; extreme_bound A (\<sqsubseteq>) Ya x\<rbrakk> \<Longrightarrow> x \<sqsubset> y \<and> x \<in> Y \<or> x = y \<or> y \<sqsubset> x \<and> y \<in> X
[PROOF STEP]
from Y yY
[PROOF STATE]
proof (chain)
picking this:
derivation Y
y \<in> Y
[PROOF STEP]
show ?thesis
[PROOF STATE]
proof (prove)
using this:
derivation Y
y \<in> Y
goal (1 subgoal):
1. x \<sqsubset> y \<and> x \<in> Y \<or> x = y \<or> y \<sqsubset> x \<and> y \<in> X
[PROOF STEP]
proof (cases rule: derivation_cases)
[PROOF STATE]
proof (state)
goal (2 subgoals):
1. \<And>Ya y. \<lbrakk>Ya = {y \<in> Y. y \<sqsubset> y}; extreme Ya (\<sqsubseteq>) y; y = f y\<rbrakk> \<Longrightarrow> x \<sqsubset> y \<and> x \<in> Y \<or> x = y \<or> y \<sqsubset> x \<and> y \<in> X
2. \<And>Ya. \<lbrakk>Ya = {y \<in> Y. y \<sqsubset> y}; f ` Ya \<subseteq> Ya; extreme_bound A (\<sqsubseteq>) Ya y\<rbrakk> \<Longrightarrow> x \<sqsubset> y \<and> x \<in> Y \<or> x = y \<or> y \<sqsubset> x \<and> y \<in> X
[PROOF STEP]
case (suc W w)
[PROOF STATE]
proof (state)
this:
W = {y \<in> Y. y \<sqsubset> y}
extreme W (\<sqsubseteq>) w
y = f w
goal (2 subgoals):
1. \<And>Ya y. \<lbrakk>Ya = {y \<in> Y. y \<sqsubset> y}; extreme Ya (\<sqsubseteq>) y; y = f y\<rbrakk> \<Longrightarrow> x \<sqsubset> y \<and> x \<in> Y \<or> x = y \<or> y \<sqsubset> x \<and> y \<in> X
2. \<And>Ya. \<lbrakk>Ya = {y \<in> Y. y \<sqsubset> y}; f ` Ya \<subseteq> Ya; extreme_bound A (\<sqsubseteq>) Ya y\<rbrakk> \<Longrightarrow> x \<sqsubset> y \<and> x \<in> Y \<or> x = y \<or> y \<sqsubset> x \<and> y \<in> X
[PROOF STEP]
note W = \<open>W = {w \<in> Y. w \<sqsubset> y}\<close> and Ww = \<open>extreme W (\<sqsubseteq>) w\<close> and yfw = \<open>y = f w\<close>
[PROOF STATE]
proof (state)
this:
W = {w \<in> Y. w \<sqsubset> y}
extreme W (\<sqsubseteq>) w
y = f w
goal (2 subgoals):
1. \<And>Ya y. \<lbrakk>Ya = {y \<in> Y. y \<sqsubset> y}; extreme Ya (\<sqsubseteq>) y; y = f y\<rbrakk> \<Longrightarrow> x \<sqsubset> y \<and> x \<in> Y \<or> x = y \<or> y \<sqsubset> x \<and> y \<in> X
2. \<And>Ya. \<lbrakk>Ya = {y \<in> Y. y \<sqsubset> y}; f ` Ya \<subseteq> Ya; extreme_bound A (\<sqsubseteq>) Ya y\<rbrakk> \<Longrightarrow> x \<sqsubset> y \<and> x \<in> Y \<or> x = y \<or> y \<sqsubset> x \<and> y \<in> X
[PROOF STEP]
then
[PROOF STATE]
proof (chain)
picking this:
W = {w \<in> Y. w \<sqsubset> y}
extreme W (\<sqsubseteq>) w
y = f w
[PROOF STEP]
have wY: "w \<in> Y" and wy: "w \<sqsubset> y"
[PROOF STATE]
proof (prove)
using this:
W = {w \<in> Y. w \<sqsubset> y}
extreme W (\<sqsubseteq>) w
y = f w
goal (1 subgoal):
1. w \<in> Y &&& w \<sqsubset> y
[PROOF STEP]
by auto
[PROOF STATE]
proof (state)
this:
w \<in> Y
w \<sqsubset> y
goal (2 subgoals):
1. \<And>Ya y. \<lbrakk>Ya = {y \<in> Y. y \<sqsubset> y}; extreme Ya (\<sqsubseteq>) y; y = f y\<rbrakk> \<Longrightarrow> x \<sqsubset> y \<and> x \<in> Y \<or> x = y \<or> y \<sqsubset> x \<and> y \<in> X
2. \<And>Ya. \<lbrakk>Ya = {y \<in> Y. y \<sqsubset> y}; f ` Ya \<subseteq> Ya; extreme_bound A (\<sqsubseteq>) Ya y\<rbrakk> \<Longrightarrow> x \<sqsubset> y \<and> x \<in> Y \<or> x = y \<or> y \<sqsubset> x \<and> y \<in> X
[PROOF STEP]
have IHw: "w \<sqsubset> x \<and> w \<in> X \<or> w = x \<or> x \<sqsubset> w \<and> x \<in> Y"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. w \<sqsubset> x \<and> w \<in> X \<or> w = x \<or> x \<sqsubset> w \<and> x \<in> Y
[PROOF STEP]
using IHy[OF wY wy]
[PROOF STATE]
proof (prove)
using this:
x \<sqsubset> w \<and> x \<in> Y \<or> x = w \<or> w \<sqsubset> x \<and> w \<in> X
goal (1 subgoal):
1. w \<sqsubset> x \<and> w \<in> X \<or> w = x \<or> x \<sqsubset> w \<and> x \<in> Y
[PROOF STEP]
by auto
[PROOF STATE]
proof (state)
this:
w \<sqsubset> x \<and> w \<in> X \<or> w = x \<or> x \<sqsubset> w \<and> x \<in> Y
goal (2 subgoals):
1. \<And>Ya y. \<lbrakk>Ya = {y \<in> Y. y \<sqsubset> y}; extreme Ya (\<sqsubseteq>) y; y = f y\<rbrakk> \<Longrightarrow> x \<sqsubset> y \<and> x \<in> Y \<or> x = y \<or> y \<sqsubset> x \<and> y \<in> X
2. \<And>Ya. \<lbrakk>Ya = {y \<in> Y. y \<sqsubset> y}; f ` Ya \<subseteq> Ya; extreme_bound A (\<sqsubseteq>) Ya y\<rbrakk> \<Longrightarrow> x \<sqsubset> y \<and> x \<in> Y \<or> x = y \<or> y \<sqsubset> x \<and> y \<in> X
[PROOF STEP]
have "x \<sqsubset> y \<and> x \<in> Y \<or> y \<sqsubseteq> x"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. x \<sqsubset> y \<and> x \<in> Y \<or> y \<sqsubseteq> x
[PROOF STEP]
using suc_any[OF Y X yY xX W Ww yfw IHw]
[PROOF STATE]
proof (prove)
using this:
x \<sqsubset> y \<and> x \<in> Y \<or> y \<sqsubseteq> x
goal (1 subgoal):
1. x \<sqsubset> y \<and> x \<in> Y \<or> y \<sqsubseteq> x
[PROOF STEP]
by auto
[PROOF STATE]
proof (state)
this:
x \<sqsubset> y \<and> x \<in> Y \<or> y \<sqsubseteq> x
goal (2 subgoals):
1. \<And>Ya y. \<lbrakk>Ya = {y \<in> Y. y \<sqsubset> y}; extreme Ya (\<sqsubseteq>) y; y = f y\<rbrakk> \<Longrightarrow> x \<sqsubset> y \<and> x \<in> Y \<or> x = y \<or> y \<sqsubset> x \<and> y \<in> X
2. \<And>Ya. \<lbrakk>Ya = {y \<in> Y. y \<sqsubset> y}; f ` Ya \<subseteq> Ya; extreme_bound A (\<sqsubseteq>) Ya y\<rbrakk> \<Longrightarrow> x \<sqsubset> y \<and> x \<in> Y \<or> x = y \<or> y \<sqsubset> x \<and> y \<in> X
[PROOF STEP]
with 1
[PROOF STATE]
proof (chain)
picking this:
y \<sqsubset> x \<and> y \<in> X \<or> x \<sqsubseteq> y
x \<sqsubset> y \<and> x \<in> Y \<or> y \<sqsubseteq> x
[PROOF STEP]
show ?thesis
[PROOF STATE]
proof (prove)
using this:
y \<sqsubset> x \<and> y \<in> X \<or> x \<sqsubseteq> y
x \<sqsubset> y \<and> x \<in> Y \<or> y \<sqsubseteq> x
goal (1 subgoal):
1. x \<sqsubset> y \<and> x \<in> Y \<or> x = y \<or> y \<sqsubset> x \<and> y \<in> X
[PROOF STEP]
using antisym xA yA
[PROOF STATE]
proof (prove)
using this:
y \<sqsubset> x \<and> y \<in> X \<or> x \<sqsubseteq> y
x \<sqsubset> y \<and> x \<in> Y \<or> y \<sqsubseteq> x
\<lbrakk>?x \<sqsubseteq> ?y; ?y \<sqsubseteq> ?x; ?x \<in> A; ?y \<in> A\<rbrakk> \<Longrightarrow> ?x = ?y
x \<in> A
y \<in> A
goal (1 subgoal):
1. x \<sqsubset> y \<and> x \<in> Y \<or> x = y \<or> y \<sqsubset> x \<and> y \<in> X
[PROOF STEP]
by auto
[PROOF STATE]
proof (state)
this:
x \<sqsubset> y \<and> x \<in> Y \<or> x = y \<or> y \<sqsubset> x \<and> y \<in> X
goal (1 subgoal):
1. \<And>Ya. \<lbrakk>Ya = {y \<in> Y. y \<sqsubset> y}; f ` Ya \<subseteq> Ya; extreme_bound A (\<sqsubseteq>) Ya y\<rbrakk> \<Longrightarrow> x \<sqsubset> y \<and> x \<in> Y \<or> x = y \<or> y \<sqsubset> x \<and> y \<in> X
[PROOF STEP]
next
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. \<And>Ya. \<lbrakk>Ya = {y \<in> Y. y \<sqsubset> y}; f ` Ya \<subseteq> Ya; extreme_bound A (\<sqsubseteq>) Ya y\<rbrakk> \<Longrightarrow> x \<sqsubset> y \<and> x \<in> Y \<or> x = y \<or> y \<sqsubset> x \<and> y \<in> X
[PROOF STEP]
case (lim W)
[PROOF STATE]
proof (state)
this:
W = {y \<in> Y. y \<sqsubset> y}
f ` W \<subseteq> W
extreme_bound A (\<sqsubseteq>) W y
goal (1 subgoal):
1. \<And>Ya. \<lbrakk>Ya = {y \<in> Y. y \<sqsubset> y}; f ` Ya \<subseteq> Ya; extreme_bound A (\<sqsubseteq>) Ya y\<rbrakk> \<Longrightarrow> x \<sqsubset> y \<and> x \<in> Y \<or> x = y \<or> y \<sqsubset> x \<and> y \<in> X
[PROOF STEP]
note W = \<open>W = {w \<in> Y. w \<sqsubset> y}\<close> and fW = \<open>f ` W \<subseteq> W\<close> and Wy = \<open>extreme_bound A (\<sqsubseteq>) W y\<close>
[PROOF STATE]
proof (state)
this:
W = {w \<in> Y. w \<sqsubset> y}
f ` W \<subseteq> W
extreme_bound A (\<sqsubseteq>) W y
goal (1 subgoal):
1. \<And>Ya. \<lbrakk>Ya = {y \<in> Y. y \<sqsubset> y}; f ` Ya \<subseteq> Ya; extreme_bound A (\<sqsubseteq>) Ya y\<rbrakk> \<Longrightarrow> x \<sqsubset> y \<and> x \<in> Y \<or> x = y \<or> y \<sqsubset> x \<and> y \<in> X
[PROOF STEP]
have "x \<sqsubset> y \<and> x \<in> Y \<or> y \<sqsubseteq> x"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. x \<sqsubset> y \<and> x \<in> Y \<or> y \<sqsubseteq> x
[PROOF STEP]
using lim_any[OF Y X yY xX W fW Wy] IHy
[PROOF STATE]
proof (prove)
using this:
\<forall>z\<in>Y. z \<sqsubset> y \<longrightarrow> z \<sqsubset> x \<and> z \<in> X \<or> z = x \<or> x \<sqsubset> z \<and> x \<in> Y \<Longrightarrow> x \<sqsubset> y \<and> x \<in> Y \<or> y \<sqsubseteq> x
\<lbrakk>?y \<in> Y; ?y \<sqsubset> y\<rbrakk> \<Longrightarrow> x \<sqsubset> ?y \<and> x \<in> Y \<or> x = ?y \<or> ?y \<sqsubset> x \<and> ?y \<in> X
goal (1 subgoal):
1. x \<sqsubset> y \<and> x \<in> Y \<or> y \<sqsubseteq> x
[PROOF STEP]
by auto
[PROOF STATE]
proof (state)
this:
x \<sqsubset> y \<and> x \<in> Y \<or> y \<sqsubseteq> x
goal (1 subgoal):
1. \<And>Ya. \<lbrakk>Ya = {y \<in> Y. y \<sqsubset> y}; f ` Ya \<subseteq> Ya; extreme_bound A (\<sqsubseteq>) Ya y\<rbrakk> \<Longrightarrow> x \<sqsubset> y \<and> x \<in> Y \<or> x = y \<or> y \<sqsubset> x \<and> y \<in> X
[PROOF STEP]
with 1
[PROOF STATE]
proof (chain)
picking this:
y \<sqsubset> x \<and> y \<in> X \<or> x \<sqsubseteq> y
x \<sqsubset> y \<and> x \<in> Y \<or> y \<sqsubseteq> x
[PROOF STEP]
show ?thesis
[PROOF STATE]
proof (prove)
using this:
y \<sqsubset> x \<and> y \<in> X \<or> x \<sqsubseteq> y
x \<sqsubset> y \<and> x \<in> Y \<or> y \<sqsubseteq> x
goal (1 subgoal):
1. x \<sqsubset> y \<and> x \<in> Y \<or> x = y \<or> y \<sqsubset> x \<and> y \<in> X
[PROOF STEP]
using antisym xA yA
[PROOF STATE]
proof (prove)
using this:
y \<sqsubset> x \<and> y \<in> X \<or> x \<sqsubseteq> y
x \<sqsubset> y \<and> x \<in> Y \<or> y \<sqsubseteq> x
\<lbrakk>?x \<sqsubseteq> ?y; ?y \<sqsubseteq> ?x; ?x \<in> A; ?y \<in> A\<rbrakk> \<Longrightarrow> ?x = ?y
x \<in> A
y \<in> A
goal (1 subgoal):
1. x \<sqsubset> y \<and> x \<in> Y \<or> x = y \<or> y \<sqsubset> x \<and> y \<in> X
[PROOF STEP]
by auto
[PROOF STATE]
proof (state)
this:
x \<sqsubset> y \<and> x \<in> Y \<or> x = y \<or> y \<sqsubset> x \<and> y \<in> X
goal:
No subgoals!
[PROOF STEP]
qed
[PROOF STATE]
proof (state)
this:
x \<sqsubset> y \<and> x \<in> Y \<or> x = y \<or> y \<sqsubset> x \<and> y \<in> X
goal:
No subgoals!
[PROOF STEP]
qed
[PROOF STATE]
proof (state)
this:
x \<sqsubset> y \<and> x \<in> Y \<or> x = y \<or> y \<sqsubset> x \<and> y \<in> X
goal:
No subgoals!
[PROOF STEP]
qed
[PROOF STATE]
proof (state)
this:
x \<sqsubset> y \<and> x \<in> Y \<or> x = y \<or> y \<sqsubset> x \<and> y \<in> X
goal:
No subgoals!
[PROOF STEP]
qed
[PROOF STATE]
proof (state)
this:
x \<sqsubset> y \<and> x \<in> Y \<or> x = y \<or> y \<sqsubset> x \<and> y \<in> X
goal:
No subgoals!
[PROOF STEP]
qed |
lemma residue_rmul: assumes "open s" "z \<in> s" and f_holo: "f holomorphic_on s - {z}" shows "residue (\<lambda>z. (f z) * c) z= residue f z * c" |
#pragma once
#include <src/common/Span.h>
#include <gsl/gsl>
#include <type_traits>
namespace miner {
//simple replacement for std::vector<char> in situations where huge
//allocations happen, since the standard doesn't define how
//much more memory std::vector allocates on reserve or resize.
//basically an owning gsl::span
template<class T = uint8_t>
class DynamicBuffer {
gsl::owner<T*> owner = nullptr;
span<T> buffer;
public:
DynamicBuffer() = default;
// only use alignment if a stronger power of two alignment than the natural alignment of T is needed
DynamicBuffer(size_t size, size_t alignment = alignof(T))
: owner(reinterpret_cast<T*>(::operator new(size * sizeof(T) + alignment - 1)))
, buffer(reinterpret_cast<T*>((uintptr_t(owner) + alignment - 1) & ~uintptr_t(alignment - 1))
, ptrdiff_t(size)) {
if (!std::is_trivially_constructible<T>::value) {
new(buffer.data()) T[size];
}
}
~DynamicBuffer() {
if (owner) { //if not moved-from
if (!std::is_trivially_destructible<T>::value) {
for (auto &element : buffer) {
element.~T();
}
}
::operator delete(owner);
}
}
DynamicBuffer(DynamicBuffer &&o) noexcept
: owner(o.owner)
, buffer(o.buffer) {
o.owner = nullptr; //remove ownership
}
DynamicBuffer &operator=(DynamicBuffer &&o) noexcept {
owner = o.owner;
buffer = o.buffer;
o.owner = nullptr; //remove ownership
return *this;
}
//copy
DynamicBuffer(const DynamicBuffer &) = delete;
DynamicBuffer &operator=(const DynamicBuffer &) = delete;
T *data() {
return buffer.data();
}
uint8_t *bytes() {
return reinterpret_cast<uint8_t*>(buffer.data());
}
const T *data() const {
return buffer.data();
}
const uint8_t *bytes() const {
return reinterpret_cast<const uint8_t*>(buffer.data());
}
size_t size_bytes() const {
return static_cast<size_t>(buffer.size_bytes());
}
size_t size() const {
return static_cast<size_t>(buffer.size());
}
span<const T> getSpan() const {
return buffer;
}
span<const uint8_t> getByteSpan() const {
return {bytes(), static_cast<ptrdiff_t>(size_bytes())};
}
operator bool() const {
return owner != nullptr;
}
};
}
|
# -*- coding: utf-8 -*-
"""
Created on Tue Jul 14 13:50:47 2020
@author: kench
"""
import matplotlib.pyplot as plt
import pandas as pd
import numpy as np
df=pd.read_csv('fb441e62df2d58994928907a91895ec62c2c42e6cd075c2700843b89.csv')
df.head()
#convert temperature from tenths of degree C to degree C
df['Data_Value']=0.1*df.Data_Value
days=list(map(lambda x: x.split('-')[-2]+'-'+x.split('-')[-1], df.Date))
years=list(map(lambda x: x.split('-')[0], df.Date))
df['Days']=days
df['Years']=years
df_2005_to_2014=df[(df.Days!='02-29')&(df.Years!='2015')]
df_2015=df[(df.Days!='02-29')&(df.Years=='2015')]
df_max=df_2005_to_2014.groupby(['Element','Days']).max()
df_min = df_2005_to_2014.groupby(['Element','Days']).min()
df_2015_max=df_2015.groupby(['Element','Days']).max()
df_2015_min = df_2015.groupby(['Element','Days']).min()
record_max=df_max.loc['TMAX'].Data_Value
record_min=df_min.loc['TMIN'].Data_Value
record_2015_max=df_2015_max.loc['TMAX'].Data_Value
record_2015_min=df_2015_min.loc['TMIN'].Data_Value
plt.figure(figsize=(10,7))
plt.plot(np.arange(len(record_max)),record_max, '--k', label="record high")
plt.plot(np.arange(len(record_max)),record_min, '-k',label="record low")
plt.scatter(np.where(record_2015_min < record_min.values),record_2015_min[record_2015_min < record_min].values,c='b',label='2015 break low')
plt.scatter(np.where(record_2015_max > record_max.values),record_2015_max[record_2015_max > record_max].values,c='r',label='2015 break high')
plt.xlabel('month',size=14)
plt.ylabel('temperature($^\circ C$ )',size=14)
plt.xticks(np.arange(0,365,31), ['Jan','Feb', 'Mar','Apr','May','Jun','Jul','Aug','Sep','Oct','Nov','Dec'])
ax=plt.gca()
ax.axis([0,365,-40,40])
plt.gca().fill_between(np.arange(0,365),record_min,record_max,facecolor='blue',alpha=0.25)
plt.title('Record temperatures for different months between 2005-2014',size=14)
plt.legend(loc=0)
plt.show() |
(*
Copyright (C) 2017 M.A.L. Marques
This Source Code Form is subject to the terms of the Mozilla Public
License, v. 2.0. If a copy of the MPL was not distributed with this
file, You can obtain one at http://mozilla.org/MPL/2.0/.
*)
(* type: gga_exc *)
$include "gga_c_zvpbeint.mpl"
$include "gga_c_pbeloc.mpl"
params_a_alpha := 0.5:
params_a_omega := 2:
(* redefine nu of zbpbeint *)
zvpbeint_nu := (rs, z, t) ->
2*(4/(3*Pi^2))^(1/18) * rs^(1/3):
(* Note that f_pbe here is, in fact, pbeloc *)
f := (rs, z, xt, xs0, xs1) ->
zvpbeint_ff(rs, z, 0) * f_pbe(rs, z, xt, xs0, xs1):
|
State Before: α : Type u_1
β : Type ?u.226607
inst✝ : CommMonoid α
n : ℕ
f : Fin n → α
⊢ prod (ofFn f) = ∏ i : Fin n, f i State After: case h.e'_2.h.e'_4
α : Type u_1
β : Type ?u.226607
inst✝ : CommMonoid α
n : ℕ
f : Fin n → α
⊢ ofFn f = take n (ofFn f)
case h.e'_3.h
α : Type u_1
β : Type ?u.226607
inst✝ : CommMonoid α
n : ℕ
f : Fin n → α
⊢ univ = Finset.filter (fun j => ↑j < n) univ Tactic: convert prod_take_ofFn f n State Before: case h.e'_2.h.e'_4
α : Type u_1
β : Type ?u.226607
inst✝ : CommMonoid α
n : ℕ
f : Fin n → α
⊢ ofFn f = take n (ofFn f) State After: no goals Tactic: rw [take_all_of_le (le_of_eq (length_ofFn f))] State Before: case h.e'_3.h
α : Type u_1
β : Type ?u.226607
inst✝ : CommMonoid α
n : ℕ
f : Fin n → α
⊢ univ = Finset.filter (fun j => ↑j < n) univ State After: no goals Tactic: simp |
{-# OPTIONS --cubical-compatible #-}
open import Common.Prelude
open import Common.Equality
open import Common.Product
data _≅_ {A : Set} (a : A) : {B : Set} (b : B) → Set₁ where
refl : a ≅ a
data D : Bool → Set where
x : D true
y : D false
P : Set -> Set₁
P S = Σ S (\s → s ≅ x)
pbool : P (D true)
pbool = _ , refl
¬pfin : P (D false) → ⊥
¬pfin (y , ())
|
%IKINE_SYM Symbolic inverse kinematics
%
% Q = R.IKINE_SYM(K, OPTIONS) is a cell array (Cx1) of inverse kinematic
% solutions of the SerialLink object ROBOT. The cells of Q represent the
% different possible configurations. Each cell of Q is a vector (Nx1), and
% element J is the symbolic expressions for the J'th joint angle. The
% solution is in terms of the desired end-point pose of the robot which is
% represented by the symbolic matrix (3x4) with elements
% nx ox ax tx
% ny oy ay ty
% nz oz az tz
% where the first three columns specify orientation and the last column
% specifies translation.
%
% K <= N can have only specific values:
% - 2 solve for translation tx and ty
% - 3 solve for translation tx, ty and tz
% - 6 solve for translation and orientation
%
% Options::
%
% 'file',F Write the solution to an m-file named F
%
% Example::
%
% mdl_planar2
% sol = p2.ikine_sym(2);
% length(sol)
% ans =
% 2 % there are 2 solutions
% s1 = sol{1} % is one solution
% q1 = s1(1); % the expression for q1
% q2 = s1(2); % the expression for q2
%
% Notes::
% - Requires the Symbolic Toolbox for MATLAB.
% - This code is experimental and has a lot of diagnostic prints.
% - Based on the classical approach using Pieper's method.
% Copyright (C) 1993-2015, by Peter I. Corke
%
% This file is part of The Robotics Toolbox for MATLAB (RTB).
%
% RTB is free software: you can redistribute it and/or modify
% it under the terms of the GNU Lesser General Public License as published by
% the Free Software Foundation, either version 3 of the License, or
% (at your option) any later version.
%
% RTB is distributed in the hope that it will be useful,
% but WITHOUT ANY WARRANTY; without even the implied warranty of
% MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
% GNU Lesser General Public License for more details.
%
% You should have received a copy of the GNU Leser General Public License
% along with RTB. If not, see <http://www.gnu.org/licenses/>.
%
% http://www.petercorke.com
function out = ikine_sym(srobot, N, varargin)
%
% Given a robot model the following steps are performed:
% 1. Convert model to symbolic form
% 2. Find relevant trig equations and solve them for joint angles
% 3. Write an M-file to implement the solution
% xikine(T)
% xikine(T, S) where S is a 3 vector with elements 1 or 2 to select
% the first or second solution for the corresponding joint.
%
% TODO:
% - handle the wrist joints, only first 3 joints so far
% - handle base and tool transforms
opt.file = [];
opt = tb_optparse(opt, varargin);
% make a symbolic representation of the passed robot
srobot = sym(srobot);
q = srobot.gencoords();
% test N DOF has an allowable value
switch N
case 2
case 3
case 6
otherwise
error('RTB:ikine_sym:badarg', 'Can only solve for 2,3,6 DOF');
end
% define symbolic elements of the homogeneous transform
syms nx ox ax tx
syms ny oy ay ty
syms nz oz az tz
syms d3
% inits
Q = {};
trigsubOld = [];
trigsubNew = [];
% loop over each joint variable
for j=1:N
fprintf('----- solving for joint %d\n', j);
% create some equations to sift through
[left,right] = pieper(srobot, j, 'left');
% decide which equations to look at
if j <= 3
% for first three joints only focus on translational part
left = left(1:3, 4); left = left(:);
right = right(1:3, 4); right = right(:);
else
% for last three joints only focus on rotational part
left = left(1:3, 1:3); left = left(:);
right = right(1:3, 1:3); right = right(:);
end
% substitute sin/cos for preceding joint as S/C, essentially removes
% the joint variables from the equations and treats them as constants.
if ~isempty(trigsubOld)
left = subs(left, trigsubOld, trigsubNew);
right = subs(right, trigsubOld, trigsubNew);
end
% then simplify the LHS
% do it after the substitution to prevent sum of angle terms being introduced
left = simplify(left);
% search for a solveable equation:
% function of current joint variable on the LHS
% constant element on the RHS
k = NaN;
for i=1:length(left)
if hasonly(left(i), j) && isconstant(right(i))
k = i;
break;
end
end
eq = [];
if ~isnan(k)
% create the equation to solve: LHS-RHS == 0
eq = left(k) - right(k);
else
% ok, we weren't lucky, try another strategy
% find all equations:
% function of current joint variable on the LHS
k = [];
for i=1:length(left)
% has qj on the left and constant on the right
if hasonly(left(i), j)
k = [k i];
end
end
% hopefully we found two of them
if length(k) < 2
continue;
end
% we did, lets see if the sum square RHS is constant
rhs = simplify(right(k(1))^2 + right(k(2))^2); % was simple
if isconstant( rhs )
% it is, let's sum and square the LHS
fprintf('lets square and add %d %d\n', k);
eq = simplify( expand( left(k(1))^2 + left(k(2))^2 ) ) - rhs; % was simple
end
end
% expand the list of joint variable subsitutions
fprintf('subs sin/cos q%d for S/C\n', j);
trigsubOld = [trigsubOld mvar('sin(q%d)', j) mvar('cos(q%d)', j)];
trigsubNew = [trigsubNew mvar('S%d', j) mvar('C%d', j)];
if isempty(eq)
fprintf('cant solve this equation');
k
left(k)==right(k)
error('cant solve');
end
% now solve the equation
if srobot.links(j).isrevolute()
% for revolute joint it will be a trig equation, do we know how to solve it?
Q{j} = solve_joint(eq, j );
if isempty(Q)
warning('cant solve this kind of equation');
end
else
fprintf('prismatic case\n')
q = sym( sprintf('q%d', j) );
Q{j} = solve( eq == 0, q);
end
end
% final simplification
% get rid of C^2+S^2 and C^4, S^4 terms
fprintf('**final simplification pass\n')
% create a list of simplifications
% substitute S^2 = 1-C^2, S^4=(1-C^2)^2
tsubOld = [];
tsubNew = [];
for j=1:N
tsubOld = [tsubOld mvar('S%d', j)^2 mvar('S%d', j)^4];
tsubNew = [tsubNew 1-mvar('C%d', j)^2 (1-mvar('C%d', j)^2)^2];
end
for j=1:N
for k=1:5
% seem to need to iterate this, not quite sure why
Q{j} = simplify( expand( subs(Q{j}, tsubOld, tsubNew) ) );
end
end
% Q is a cell array of equations for joint variables
if nargout > 0
out = Q;
end
if ~isempty(opt.file)
fprintf('**generate MATLAB code\n')
gencode(Q);
end
end
%PIEPER Return a set of equations using Pieper's method
%
% [L,R] = pieper(robot, n, which)
%
% If robot has link matrix A1 A2 A3 A4 then returns 12 equations from equating the coefficients of
%
% A1' T = A2 A3 A4 n=1, which='left'
% A2' A1' T = A3 A4 n=2, which='left'
% A3' A2' A1' T = A4 n=3, which='left'
%
% T A4' = A1 A2 A3 n=1, which='right'
% T A4' A3' = A1 A2 n=2, which='right'
% T A4' A3' A2' = A1 n=3, which='right'
%
% Judicious choice of the equations can lead to joint solutions
function [L,R] = pieper(robot, n, which)
if nargin < 3
which = 'left';
end
syms nx ox ax tx real
syms ny oy ay ty real
syms nz oz az tz real
T = [nx ox ax tx
ny oy ay ty
nx oz az tz
0 0 0 1 ];
T = inv(robot.base) * T * inv(robot.tool);
q = robot.gencoords();
% Create the symbolic A matrices
for j=1:robot.n
A{j} = robot.links(j).A(q(j));
end
switch which
case 'left'
left = T;
for j=1:n
left = inv(A{j}) * left ;
end
right = eye(4,4);
for j=n+1:robot.n
right = right * A{j};
end
case 'right'
left = T;
for j=1:n
left = left * inv(A{robot.n-j+1});
end
right = eye(4,4);
for j=1:(robot.n-n)
right = right * A{j};
end
end
% left = simple(left);
% right = simple(right);
if nargout == 0
left == right
elseif nargout == 1
L = left;
elseif nargout == 2
L = left;
R = right;
end
end
%SOLVE_JOINT Solve a trigonometric equation
%
% S = SOLVE_JOINT(EQ, J) solves the equation EQ=0 for the joint variable qJ.
% The result is a cell array of solutions.
%
% The equations must be of the form:
% A cos(qJ) + B sin(qJ) = 0
% A cos(qJ) + B sin(qJ) = C
%
% where A, B, C are arbitrarily complex expressions. qJ can be the only
% joint variable in the expression.
function s = solve_joint(eq, j)
sinj = mvar('sin(q%d)', j);
cosj = mvar('cos(q%d)', j);
A = getcoef(eq, cosj);
B = getcoef(eq, sinj);
if isempty(A) || isempty(B)
warning('don''t know how to solve this kind of equation');
end
C = -simplify(eq - A*cosj - B*sinj); % was simple
if C == 0
% A cos(q) + B sin(q) = 0
s(2) = atan2(A, -B);
s(1) = atan2(-A, B);
else
% A cos(q) + B sin(q) = C
r = sqrt(A^2 + B^2 - C^2);
phi = atan2(A, B);
s(2) = atan2(C, r) - phi;
s(1) = atan2(C, -r) - phi;
end
if nargout == 0
try
eval(s)
catch
s
end
end
end
%MVAR Create a symbolic variable
%
% V = MVAR(FMT, ARGS) is a symbolic variable created using SPRINTF
%
% eg. mvar('q%d', j)
%
% The symbolic is explicitly declared to be real.
function v = mvar(fmt, varargin)
if isempty(strfind(fmt, '('))
% not a function
v = sym( sprintf(fmt, varargin{:}), 'real' );
else
v = sym( sprintf(fmt, varargin{:}) );
end
end
%HASONLY Determine if an expression contains only certain joint variables
%
% S = HASONLY(E L) is true if the joint variables (q1, q2 etc.) in the expression E
% are listed in the vector L.
%
% Eg. hasonly('sin(q1)*cos(q2)*cos(q4)', [1 2 3]) -> true
% Eg. hasonly('sin(q1)*cos(q2)*cos(q4)', [1]) -> false
function s = hasonly(eq, j)
q = findq(eq);
if isempty(q)
s = false;
else
s = all(ismember(j, findq(eq)));
end
end
%ISCONSTANT Determine if an expression is free of joint variables
%
% S = ISCONSTANT(E) is true if the expression E contains no joint variables such
% q1, q2 etc.
function s = isconstant(eq)
s = isempty(findq(eq));
end
%FINDQ Find the joint variables in expression
%
% Q = FINDQ(E) returns a list of integers indicating the joint variables found
% in the expression E. For instance an instance of 'q1' would cause a 1 to be
% returned and so on.
%
% Eg. findq('sin(q1)*cos(q2)+S3') -> [1 2]
function q = findq(s)
q = [];
for var=symvar(s)
if isempty(var)
break
end
varname = char(var);
if varname(1) == 'q'
q = [q str2num(varname(2:end))];
end
end
end
function coef = getcoef(eq, trig)
z = children( collect(eq, trig) );
z = children( z(1) );
coef = z(1);
end
% Output a joint expression to a file
function s = gencode(Q, filename)
function s = G(s, fmt, varargin)
s = strvcat(s, sprintf(fmt, varargin{:}));
end
s = 'function q = xikine(T, sol)';
s = G(s, ' if nargin < 2; sol = ones(1, %d); end', length(Q));
s = G(s, ' px = T(1,4); py = T(2,4); pz = T(3,4);');
for j=1:3
Qj = Q{j}; % cast it to subclass
if length(Qj) == 1
s = G(s, ' q(%d) = %s', j, matgen2(Qj));
elseif length(Qj) == 2
s = G(s, ' if sol(%d) == 1', j);
s = G(s, ' q(%d) = %s', j, matgen2(Qj(1)));
s = G(s, ' else');
s = G(s, ' q(%d) = %s', j, matgen2(Qj(2)));
s = G(s, ' end');
end
s = G(s, ' S%d = sin(q(%d));', j, j);
s = G(s, ' C%d = cos(q(%d));', j, j);
s = G(s, ' ');
end
s = G(s, 'end');
fp = fopen(filename, 'w');
for i=1:numrows(s)
fprintf(fp, '%s\n', deblank(s(i,:)));
end
fclose(fp);
end
% Generate MATLAB code from an expression
%
% Requires a bit of a hack, a subclass of sym (sym2) to do this
function s = matgen2(e)
s = matgen(sym2(e));
k = strfind(s, '=');
s = deblank( s(k+2:end) );
end
|
open import Coinduction using ( ∞ ; ♯_ ; ♭ )
open import Data.Product using ( _,_ )
open import FRP.LTL.ISet.Core using ( ISet ; M⟦_⟧ ; splitM⟦_⟧ ; ⟦_⟧ ; ⌈_⌉ ; [_] )
open import FRP.LTL.ISet.Product using ( _∧_ )
open import FRP.LTL.ISet.Stateless using ( _⇒_ )
open import FRP.LTL.Time.Bound using ( Time∞ ; fin ; +∞ ; _≼_ ; _≺_ ; ≼-refl ; ≺-impl-≼ ; ≺-impl-⋡ ; _≺-trans_ ; _≼-case_ ; lt ; eq ; gt ; t≺+∞ )
open import FRP.LTL.Time.Interval using ( [_⟩ )
open import FRP.LTL.Util using ( ⊥-elim ; ≡-relevant )
open import Relation.Binary.PropositionalEquality using ( _≡_ ; refl )
module FRP.LTL.ISet.Decoupled where
data _∙_⊷_∙_ (A : ISet) (s : Time∞) (B : ISet) (u : Time∞) : Set where
inp : .(s ≺ u) → .(u ≺ +∞) →
(∀ {t} .(s≺t : s ≺ t) → M⟦ A ⟧ [ s≺t ⟩ → ∞ (A ∙ t ⊷ B ∙ u)) →
(A ∙ s ⊷ B ∙ u)
out : ∀ {v} .(u≺v : u ≺ v) →
M⟦ B ⟧ [ u≺v ⟩ → ∞ (A ∙ s ⊷ B ∙ v) →
(A ∙ s ⊷ B ∙ u)
done : .(u ≡ +∞) →
(A ∙ s ⊷ B ∙ u)
_▹_ : ISet → ISet → ISet
A ▹ B = ⌈ (λ t → A ∙ fin t ⊷ B ∙ fin t) ⌉
_/_/_ : ∀ {A B s t u} → (A ∙ s ⊷ B ∙ u) → .(s≺t : s ≺ t) → M⟦ A ⟧ [ s≺t ⟩ → (A ∙ t ⊷ B ∙ u)
inp s≺u u≺∞ P / s≺t / σ = ♭ (P s≺t σ)
out u≺v τ P / s≺t / σ = out u≺v τ (♯ (♭ P / s≺t / σ))
done u≡∞ / s≺t / σ = done u≡∞
mutual
≻-tr : ∀ {A B C s u} .(s≻u : u ≺ s) → M⟦ B ⟧ [ s≻u ⟩ →
((A ∧ B) ∙ u ⊷ (A ∧ C) ∙ u) → (B ∙ s ⊷ C ∙ u)
≻-tr s≻u σ (inp u≺u u≺∞ P) = ⊥-elim (≺-impl-⋡ u≺u ≼-refl)
≻-tr {A} {B} {C} {s} {u} s≻u σ (out {v} u≺v [ ρ , τ ] P)
with s ≼-case v
≻-tr {A} {B} {C} s≻u σ (out u≺v [ ρ , τ ] P) | lt s≺v
with splitM⟦ A ⟧ [ s≻u ⟩ [ s≺v ⟩ refl ρ
≻-tr {A} {B} {C} s≻u σ (out u≺v [ ρ , τ ] P) | lt s≺v | (ρ₁ , ρ₂) =
out u≺v τ (♯ ≺-tr s≺v ρ₂ (♭ P / s≻u / [ ρ₁ , σ ]))
≻-tr {A} {B} {C} s≻u σ (out u≺v [ ρ , τ ] P) | eq s≡v
with ≡-relevant s≡v
≻-tr {A} {B} {C} s≻u σ (out u≺v [ ρ , τ ] P) | eq s≡v | refl =
out u≺v τ (♯ tr (♭ P / s≻u / [ ρ , σ ]))
≻-tr {A} {B} {C} s≻u σ (out u≺v [ ρ , τ ] P) | gt s≻v
with splitM⟦ B ⟧ [ u≺v ⟩ [ s≻v ⟩ refl σ
≻-tr {A} {B} {C} s≻u σ (out u≺v [ ρ , τ ] P) | gt s≻v | (σ₁ , σ₂) =
out u≺v τ (♯ ≻-tr s≻v σ₂ (♭ P / u≺v / [ ρ , σ₁ ]))
≻-tr s≻u σ (done u≡∞) = done u≡∞
≺-tr : ∀ {A B C s u} .(s≺u : s ≺ u) → M⟦ A ⟧ [ s≺u ⟩ →
((A ∧ B) ∙ s ⊷ (A ∧ C) ∙ u) → (B ∙ s ⊷ C ∙ u)
≺-tr {A} {B} {C} {s} {+∞} s≺u ρ P = done refl
≺-tr {A} {B} {C} {s} {fin u} s≺u ρ P = inp s≺u t≺+∞ Q where
Q : ∀ {t} .(s≺t : s ≺ t) → M⟦ B ⟧ [ s≺t ⟩ → ∞ (B ∙ t ⊷ C ∙ fin u)
Q {t} s≺t σ with t ≼-case (fin u)
Q s≺t σ | lt t≺u with splitM⟦ _ ⟧ [ s≺t ⟩ [ t≺u ⟩ refl ρ
Q s≺t σ | lt t≺u | (ρ₁ , ρ₂) = ♯ ≺-tr t≺u ρ₂ (P / s≺t / [ ρ₁ , σ ])
Q s≺t σ | eq t≡u with ≡-relevant t≡u
Q s≺t σ | eq t≡u | refl = ♯ tr (P / s≺u / [ ρ , σ ])
Q s≺t σ | gt t≻u with splitM⟦ _ ⟧ [ s≺u ⟩ [ t≻u ⟩ refl σ
Q s≺t σ | gt t≻u | (σ₁ , σ₂) = ♯ (≻-tr t≻u σ₂ (P / s≺u / [ ρ , σ₁ ]))
tr : ∀ {A B C s} → ((A ∧ B) ∙ s ⊷ (A ∧ C) ∙ s) → (B ∙ s ⊷ C ∙ s)
tr (inp s≺s s≺∞ P) = ⊥-elim (≺-impl-⋡ s≺s ≼-refl)
tr (out s≺u [ ρ , τ ] P) = out s≺u τ (♯ ≺-tr s≺u ρ (♭ P))
tr (done s≡∞) = done s≡∞
loop : ∀ {A B C} → ⟦ ((A ∧ B) ▹ (A ∧ C)) ⇒ (B ▹ C) ⟧
loop [ [ f ] ] = [ (λ t t∈i → tr (f t t∈i)) ] |
{- Cubical Agda with K
This file demonstrates the incompatibility of the --cubical
and --with-K flags, relying on the well-known incosistency of K with
univalence.
The --safe flag can be used to prevent accidentally mixing such
incompatible flags.
-}
{-# OPTIONS --cubical --no-import-sorts --with-K #-}
module Cubical.WithK where
open import Cubical.Data.Equality
open import Cubical.Data.Bool
open import Cubical.Data.Empty
private
variable
ℓ : Level
A : Type ℓ
x y : A
uip : (prf : x ≡p x) → prf ≡c reflp
uip reflp i = reflp
transport-uip : (prf : A ≡p A) → transport (ptoc prf) x ≡c x
transport-uip {x = x} prf =
cong (λ m → transport (ptoc m) x) (uip prf) ∙ transportRefl x
transport-not : transport (ptoc (ctop notEq)) true ≡c false
transport-not = cong (λ a → transport a true) (ptoc-ctop notEq)
false-true : false ≡c true
false-true = sym transport-not ∙ transport-uip (ctop notEq)
absurd : (X : Type) → X
absurd X = transport (cong sel false-true) true
where
sel : Bool → Type
sel false = Bool
sel true = X
inconsistency : ⊥
inconsistency = absurd ⊥
|
[STATEMENT]
lemma emb_step_cases [consumes 1, case_names chop extended_chop remove_arg under_arg]:
assumes emb:"t \<rightarrow>\<^sub>e\<^sub>m\<^sub>b s"
and chop:"chop t = s \<Longrightarrow> P"
and extended_chop:"chop t \<rightarrow>\<^sub>e\<^sub>m\<^sub>b s \<Longrightarrow> P"
and remove_arg:"\<And>i. head t = head s \<Longrightarrow> i<num_args t \<Longrightarrow> args s = take i (args t) @ drop (Suc i) (args t) \<Longrightarrow> P"
and under_arg:"\<And>i. head t = head s \<Longrightarrow> num_args t = num_args s \<Longrightarrow> args t ! i \<rightarrow>\<^sub>e\<^sub>m\<^sub>b args s ! i \<Longrightarrow>
(\<And>j. j<num_args t \<Longrightarrow> i \<noteq> j \<Longrightarrow> args t ! j = args s ! j) \<Longrightarrow> P"
shows P
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. P
[PROOF STEP]
proof -
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. P
[PROOF STEP]
obtain p d where pd_def:"emb_step_at p d t = s" "position_of t (p @ [d])"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. (\<And>p d. \<lbrakk>emb_step_at p d t = s; position_of t (p @ [d])\<rbrakk> \<Longrightarrow> thesis) \<Longrightarrow> thesis
[PROOF STEP]
using emb emb_step_equiv' position_if_emb_step_at
[PROOF STATE]
proof (prove)
using this:
t \<rightarrow>\<^sub>e\<^sub>m\<^sub>b s
(?t \<rightarrow>\<^sub>e\<^sub>m\<^sub>b ?s) = ((\<exists>p. p \<noteq> [] \<and> emb_step_at' p ?t = ?s) \<and> ?t \<noteq> ?s)
\<lbrakk>emb_step_at ?p ?d ?t = ?u; ?t \<noteq> ?u\<rbrakk> \<Longrightarrow> position_of ?t (?p @ [?d])
goal (1 subgoal):
1. (\<And>p d. \<lbrakk>emb_step_at p d t = s; position_of t (p @ [d])\<rbrakk> \<Longrightarrow> thesis) \<Longrightarrow> thesis
[PROOF STEP]
by metis
[PROOF STATE]
proof (state)
this:
emb_step_at p d t = s
position_of t (p @ [d])
goal (1 subgoal):
1. P
[PROOF STEP]
have "is_App t"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. is_App t
[PROOF STEP]
by (metis emb emb_step_at_is_App emb_step_equiv)
[PROOF STATE]
proof (state)
this:
is_App t
goal (1 subgoal):
1. P
[PROOF STEP]
show ?thesis
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. P
[PROOF STEP]
proof (cases "list_all (\<lambda>x. x = Left) p")
[PROOF STATE]
proof (state)
goal (2 subgoals):
1. list_all (\<lambda>x. x = dir.Left) p \<Longrightarrow> P
2. \<not> list_all (\<lambda>x. x = dir.Left) p \<Longrightarrow> P
[PROOF STEP]
case True
[PROOF STATE]
proof (state)
this:
list_all (\<lambda>x. x = dir.Left) p
goal (2 subgoals):
1. list_all (\<lambda>x. x = dir.Left) p \<Longrightarrow> P
2. \<not> list_all (\<lambda>x. x = dir.Left) p \<Longrightarrow> P
[PROOF STEP]
show ?thesis
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. P
[PROOF STEP]
proof (cases d)
[PROOF STATE]
proof (state)
goal (2 subgoals):
1. d = dir.Left \<Longrightarrow> P
2. d = dir.Right \<Longrightarrow> P
[PROOF STEP]
case Left
[PROOF STATE]
proof (state)
this:
d = dir.Left
goal (2 subgoals):
1. d = dir.Left \<Longrightarrow> P
2. d = dir.Right \<Longrightarrow> P
[PROOF STEP]
then
[PROOF STATE]
proof (chain)
picking this:
d = dir.Left
[PROOF STEP]
show P
[PROOF STATE]
proof (prove)
using this:
d = dir.Left
goal (1 subgoal):
1. P
[PROOF STEP]
using emb_step_at_remove_arg
[PROOF STATE]
proof (prove)
using this:
d = dir.Left
\<lbrakk>emb_step_at ?p dir.Left ?t = ?s; position_of ?t (?p @ [dir.Left]); list_all (\<lambda>x. x = dir.Left) ?p\<rbrakk> \<Longrightarrow> let i = num_args ?t - Suc (length ?p) in head ?t = head ?s \<and> i < num_args ?t \<and> args ?s = take i (args ?t) @ drop (Suc i) (args ?t)
goal (1 subgoal):
1. P
[PROOF STEP]
by (metis True pd_def(1) pd_def(2) remove_arg)
[PROOF STATE]
proof (state)
this:
P
goal (1 subgoal):
1. d = dir.Right \<Longrightarrow> P
[PROOF STEP]
next
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. d = dir.Right \<Longrightarrow> P
[PROOF STEP]
case Right
[PROOF STATE]
proof (state)
this:
d = dir.Right
goal (1 subgoal):
1. d = dir.Right \<Longrightarrow> P
[PROOF STEP]
then
[PROOF STATE]
proof (chain)
picking this:
d = dir.Right
[PROOF STEP]
show P
[PROOF STATE]
proof (prove)
using this:
d = dir.Right
goal (1 subgoal):
1. P
[PROOF STEP]
using True chop emb_step_at_chop extended_chop pd_def(1) pd_def(2)
[PROOF STATE]
proof (prove)
using this:
d = dir.Right
list_all (\<lambda>x. x = dir.Left) p
chop t = s \<Longrightarrow> P
\<lbrakk>emb_step_at ?p dir.Right ?t = ?s; position_of ?t (?p @ [dir.Right]); list_all (\<lambda>x. x = dir.Left) ?p\<rbrakk> \<Longrightarrow> chop ?t = ?s \<or> chop ?t \<rightarrow>\<^sub>e\<^sub>m\<^sub>b ?s
chop t \<rightarrow>\<^sub>e\<^sub>m\<^sub>b s \<Longrightarrow> P
emb_step_at p d t = s
position_of t (p @ [d])
goal (1 subgoal):
1. P
[PROOF STEP]
by blast
[PROOF STATE]
proof (state)
this:
P
goal:
No subgoals!
[PROOF STEP]
qed
[PROOF STATE]
proof (state)
this:
P
goal (1 subgoal):
1. \<not> list_all (\<lambda>x. x = dir.Left) p \<Longrightarrow> P
[PROOF STEP]
next
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. \<not> list_all (\<lambda>x. x = dir.Left) p \<Longrightarrow> P
[PROOF STEP]
case False
[PROOF STATE]
proof (state)
this:
\<not> list_all (\<lambda>x. x = dir.Left) p
goal (1 subgoal):
1. \<not> list_all (\<lambda>x. x = dir.Left) p \<Longrightarrow> P
[PROOF STEP]
have 1:"num_args t = num_args s"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. num_args t = num_args s
[PROOF STEP]
using emb_step_under_args_num_args
[PROOF STATE]
proof (prove)
using this:
\<not> list_all (\<lambda>x. x = dir.Left) ?p \<Longrightarrow> num_args (emb_step_at ?p ?d ?t) = num_args ?t
goal (1 subgoal):
1. num_args t = num_args s
[PROOF STEP]
by (metis False pd_def(1))
[PROOF STATE]
proof (state)
this:
num_args t = num_args s
goal (1 subgoal):
1. \<not> list_all (\<lambda>x. x = dir.Left) p \<Longrightarrow> P
[PROOF STEP]
have 2:"head t = head s"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. head t = head s
[PROOF STEP]
using emb_step_under_args_head
[PROOF STATE]
proof (prove)
using this:
\<not> list_all (\<lambda>x. x = dir.Left) ?p \<Longrightarrow> head (emb_step_at ?p ?d ?t) = head ?t
goal (1 subgoal):
1. head t = head s
[PROOF STEP]
by (metis False pd_def(1))
[PROOF STATE]
proof (state)
this:
head t = head s
goal (1 subgoal):
1. \<not> list_all (\<lambda>x. x = dir.Left) p \<Longrightarrow> P
[PROOF STEP]
show ?thesis
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. P
[PROOF STEP]
using 1 2 under_arg emb_step_under_args_emb_step
[PROOF STATE]
proof (prove)
using this:
num_args t = num_args s
head t = head s
\<lbrakk>head t = head s; num_args t = num_args s; args t ! ?i \<rightarrow>\<^sub>e\<^sub>m\<^sub>b args s ! ?i; \<And>j. \<lbrakk>j < num_args t; ?i \<noteq> j\<rbrakk> \<Longrightarrow> args t ! j = args s ! j\<rbrakk> \<Longrightarrow> P
\<lbrakk>\<not> list_all (\<lambda>x. x = dir.Left) ?p; position_of ?t (?p @ [?d]); \<And>i. \<lbrakk>i < num_args ?t; args ?t ! i \<rightarrow>\<^sub>e\<^sub>m\<^sub>b args (emb_step_at ?p ?d ?t) ! i; \<And>j. \<lbrakk>j < num_args ?t; i \<noteq> j\<rbrakk> \<Longrightarrow> args ?t ! j = args (emb_step_at ?p ?d ?t) ! j\<rbrakk> \<Longrightarrow> ?thesis\<rbrakk> \<Longrightarrow> ?thesis
goal (1 subgoal):
1. P
[PROOF STEP]
by (metis False pd_def(1) pd_def(2))
[PROOF STATE]
proof (state)
this:
P
goal:
No subgoals!
[PROOF STEP]
qed
[PROOF STATE]
proof (state)
this:
P
goal:
No subgoals!
[PROOF STEP]
qed |
Formal statement is: lemma convex_halfspace_Re_le: "convex {x. Re x \<le> b}" Informal statement is: The set of complex numbers with real part less than or equal to $b$ is convex. |
[STATEMENT]
lemma transymcl_sym: "(x, y) \<in> transymcl r \<Longrightarrow> (y, x) \<in> transymcl r"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. (x, y) \<in> transymcl r \<Longrightarrow> (y, x) \<in> transymcl r
[PROOF STEP]
using equiv_transymcl[THEN equivD(2), THEN symD]
[PROOF STATE]
proof (prove)
using this:
(?b, ?a) \<in> transymcl ?Qeq2 \<Longrightarrow> (?a, ?b) \<in> transymcl ?Qeq2
goal (1 subgoal):
1. (x, y) \<in> transymcl r \<Longrightarrow> (y, x) \<in> transymcl r
[PROOF STEP]
. |
Formal statement is: lemma higher_deriv_mult: fixes z::complex assumes "f holomorphic_on S" "g holomorphic_on S" "open S" and z: "z \<in> S" shows "(deriv ^^ n) (\<lambda>w. f w * g w) z = (\<Sum>i = 0..n. of_nat (n choose i) * (deriv ^^ i) f z * (deriv ^^ (n - i)) g z)" Informal statement is: If $f$ and $g$ are holomorphic functions on a set $S$, then the $n$th derivative of $f \cdot g$ is given by the formula $\sum_{i=0}^n \binom{n}{i} f^{(i)} g^{(n-i)}$. |
# Project 2 - Mc907/Mo651 - Mobile Robotics
### Student:
Luiz Eduardo Cartolano - RA: 183012
### Instructor:
Esther Luna Colombini
### Github Link:
[Project Repository](https://github.com/luizcartolano2/mc907-mobile-robotics)
### Youtube Link:
[Link to Video](https://youtu.be/uqNeEhWo0dA)
### Subject of this Work:
The general objective of this work is to implement and evaluate at least 1 robot control behavior per group member.
### Goals:
1. Implement and evaluate at least 1 robot control behavior per group member (AvoidObstacle, WallFollow, Go- ToGoal) using models based on PID, Fuzzy, Neural Networks, etc;
2. Propose a behavior coordination strategy (state machine, planner, AR, subsumption, etc.)
# Code Starts Here
Import of used libraries
```python
from lib import vrep
import sys, time
from src import robot as rb
from src.utils import vrep2array
import math
from time import time
import numpy as np
import cv2
import matplotlib.pyplot as plt
from sklearn.cluster import KMeans
import skfuzzy
import skfuzzy as fuzz
import skfuzzy.control as ctrl
# from reinforcement_learning.train import *
```
# Defining the kinematic model of the Pionner P3DX
For this project, we are going to use the configuration of the mobile robot being characterized by the position (x,y) and the orientation in a Cartesian coordinate.
Using the follow parameters:
1. $V_R$: linear velocity of the right wheel.
2. $V_L$: linear velocity of the left wheel.
3. $W$: angular velocity of the mobile robot.
4. $X$: abscissa of the robot.
5. $Y$: intercept of the robot.
6. $X,Y$ : the actual position coordinates.
7. $\theta$: orientation of the robot.
8. $L$: the distance between the driving wheels.
The kinematic model is given by these equations [1](https://www.hindawi.com/journals/cin/2016/9548482/abs/):
<br>
\begin{align}
\frac{dX}{dt} & = \frac{V_L + V_R}{2} \cdot cos(\theta) \\
\frac{dY}{dt} & = \frac{V_L + V_R}{2} \cdot sen(\theta) \\
\frac{d \theta}{dt} & = \frac{V_L - V_R}{2} \\
\end{align}
<br>
Where ($X$,$Y$ and $\theta$) are the robot actual position and orientation angle in world reference frame. In simulation, we use the discrete form to build a model of the robot. The discrete form of the kinematic model is given by the following equations:<br>
<br>
\begin{align}
X_{k+1} & = X_k + T \cdot \frac{V_{lk} + V_{rk}}{2} \cdot cos(\theta_k + \frac{d \theta}{dt} ) \\
Y_{k+1} & = Y_k + T \cdot \frac{V_{lk} + V_{rk}}{2} \cdot sen(\theta_k + \frac{d \theta}{dt}) \\
\theta_{k+1} & = \theta_k + T \cdot \frac{V_{lk} + V_{rk}}{L} \\
\end{align}
<br>
where $X_{k+1}$ and $Y_{k+1}$ represent the position of the center axis of the mobile robot and $T$ is the sampling time.
```python
class Pose:
"""
A class used to store the robot pose.
...
Attributes
----------
x : double
The x position of the robot on the map
y : double
The y position of the robot on the map
orientation : double
The angle theta of the robot on the map
Methods
-------
The class doesn't have any methods
"""
def __init__(self, x=None, y=None, orientation=None):
self.x = x
self.y = y
self.orientation = orientation
```
```python
class Odometry():
"""
A class used to implement methods that allow a robot to calculate his own odometry.
...
Attributes
----------
robot : obj
The robot object
lastPose : obj Pose
Store the robot's pose during his movement
lastTimestamp : time
Store the last timestamp
left_vel : double
Store the velocity of the left robot wheel
right_vel : double
Store the velocity of the right robot wheel
delta_time : double
Store how much time has passed
delta_theta : double
Store how the orientation change
delta_space : double
Store how the (x,y) change
Methods
-------
ground_truth_updater()
Function to update the ground truth, the real pose of the robot at the simulator
odometry_pose_updater()
Function to estimate the pose of the robot based on the kinematic model
"""
def __init__(self, robot):
self.robot = robot
self.lastPose = None
self.lastTimestamp = time()
self.left_vel = 0
self.right_vel = 0
self.delta_time = 0
self.delta_theta = 0
self.delta_space = 0
def ground_truth_updater(self):
"""
Function to update the ground truth, the real pose of the robot at the simulator
"""
# get the (x,y,z) position of the robot at the simulator
pose = self.robot.get_current_position()
# get the orientation of the robot (euler angles)
orientation = self.robot.get_current_orientation()
# return an pose object (x,y,theta)
return Pose(x=pose[0], y=pose[1], orientation=orientation[2])
def odometry_pose_updater(self):
"""
Function to estimate the pose of the robot based on the knematic model
"""
if self.lastPose is None:
self.lastPose = self.ground_truth_updater()
return self.lastPose
# get the actual timestamp
time_now = time()
# get the robot linear velocity for the left and right wheel
left_vel, right_vel = self.robot.get_linear_velocity()
# calculate the difference between the acutal and last timestamp
delta_time = time_now - self.lastTimestamp
# calculate the angle deslocation - based on the kinematic model
delta_theta = (right_vel - left_vel) * (delta_time / self.robot.ROBOT_WIDTH)
# calculate the distance deslocation - based on the kinematic model
delta_space = (right_vel + left_vel) * (delta_time / 2)
# auxiliary function to sum angles
add_deltha = lambda start, delta: (((start+delta)%(2*math.pi))-(2*math.pi)) if (((start+delta)%(2*math.pi))>math.pi) else ((start+delta)%(2*math.pi))
# calculate the new X pose
x = self.lastPose.x + (delta_space * math.cos(add_deltha(self.lastPose.orientation, delta_theta/2)))
# calculate the new Y pose
y = self.lastPose.y + (delta_space * math.sin(add_deltha(self.lastPose.orientation, delta_theta/2)))
# calculate the new Orientation pose
theta = add_deltha(self.lastPose.orientation, delta_theta)
# uptade the state of the class
self.lastPose = Pose(x, y, theta)
self.lastTimestamp = time_now
self.left_vel = left_vel
self.right_vel = right_vel
self.delta_time = delta_time
self.delta_theta = delta_theta
self.delta_space = delta_space
return self.lastPose
```
# Defining the class that controls the robot walker
For this project we are going to use two different controllers in order to make the robot avoid obstacles in the map. The first one is a classical fuzzy based system, and the second one, is a more modern approach, based on artificial intelligence, called reinforcement learning.
### Controllers:
**1. Fuzzy**
Fuzzy logic is a very common technique in the Artificial Intelligence branch. It is a method that introduces the concept of partially correct and / or wrong, different from Boolean logic, in which only binary values are allowed. This fact allows generalist logic in which it is not necessary to deal with all possible cases, ideal for applications with limited memory and / or time.
A fuzzy control system is a control system based on fuzzy logic—a mathematical system that analyzes analog input values in terms of logical variables that take on continuous values between 0 and 1, in contrast to classical or digital logic, which operates on discrete values of either 1 or 0 (true or false, respectively).
The input variables in a fuzzy control system are in general mapped by sets of membership functions similar to this, known as "fuzzy sets". The process of converting a crisp input value to a fuzzy value is called "fuzzification". A control system may also have various types of switch, or "ON-OFF", inputs along with its analog inputs, and such switch inputs of course will always have a truth value equal to either 1 or 0, but the scheme can deal with them as simplified fuzzy functions that happen to be either one value or another. Given "mappings" of input variables into membership functions and truth values, the microcontroller then makes decisions for what action to take, based on a set of "rules", each of the form:
~~~
IF brake temperature IS warm AND speed IS not very fast
THEN brake pressure IS slightly decreased.
~~~
For this project the implemented fuzzy was very simples, and aims just to make the robot abble to avoid obstacles on his way. He uses the ultrassonic sensors for his three inputs (front, left and right distance) and outputs the linear velocity for both weels.
Some fundamental concepts to understand logic are:
1. **Degree of Relevance:** value in the range $[0,1]$ that determines the degree to which a given element belongs to a set, allowing a gradual transition from falsehood to truth.
2. **Fuzzy Set:** A set A in X is expressed as a set of ordered pairs: $A = {{(x, \mu_A (X)) | x \in X }}$.
3. **Fuzzy Rules:** are created to evaluate the antecedent (input) and apply the result to the consequent (output). They are partially activated depending on the antecedent.
4. **Fuzzy Steps:**
1. *Fuzification:* stage in which subjective linguistic variables and pertinence functions are defined.
2. *Inference:* stage at which rules are defined and evaluated.
3. *Defuzification:* step in which the resulting regions are converted to values for the system output variable. The best known methods of defuzification are: centroid, bisector, lowest of maximum (SOM), middle of maximum (MOM), highest of maximum (LOM).
Now, we are given a more detailed explanation about the implemented system, showing how we model the inputs, outputs, system rules and the defuzzify methods.
1. **Inputs and Outputs:** For this fuzzy we use three antecedents (all of them has the same shape of the one show bellow) and two consequents. The antecedents works mapping the left, right and front sensors of the ultrassonic sensors of the robot. As we can see, inputs are divide in three sets, low, medium and high distances that aims to tell the system how far he is from some object in the map. The consequents, by the other side, aims to mapping the velocity of both wheels of the robot, they are split in four velocities.
Fuzzy Antecedent | Fuzzy Consequent
:-------------------------:|:-------------------------:
|
2. **Rules:** The system is implemented using eleven rules, that we enough to make the robot able to escape from obstacles from the map with a stable control. The rules can be describe as follow:
~~~
left['low'] AND right['low'] AND (front['medium'] OR front['far'])),
output_left['low'], output_right['low']
left['low'] AND right['low'] AND front['low'],
output_left['reverse'], output_right['low']
left['medium'] OR left['far'] AND right['low'] AND front['low'],
output_left['low'], output_right['high']
left['medium'] OR left['far'] AND right['low'] AND front['medium'] OR front['far'],
output_left['low'], output_right['high']
left['far'] AND right['medium'] AND front['low'],
output_left['low'], output_right['high']
left['far'] AND right['far'] AND front['low'],
output_left['high'], output_right['low']
left['medium'] AND right['medium'] AND front['low'],
output_left['high'], output_right['low']
left['medium'] AND right['far'] AND front['low'],
output_left['high'], output_right['low']
left['low'] AND right['medium'] OR right['far'] AND front['low'],
output_left['high'], output_right['low']
left['low'] AND right['medium'] OR right['far'] AND front['medium'] OR front['far'],
output_left['high'], output_right['low']
left['medium'] OR left['far'] AND right['medium'] OR right['far'] AND front['medium'] OR front['far'],
output_left['medium'], output_right['medium']
~~~
3. **Defuzzification:** In order to understand how the choosen defuzzification afects the controller we test two different models, the minimum of the maximuns (SOM) and maximum of the maximuns(LOM), the consequences of this approach will be commented at the Results section.
```python
class FuzzyControler():
"""
A class used to implement methods that allow a robot to walk, based on a fuzzy logic controller.
...
Attributes
----------
forward: skfuzzy object
Skfuzzy input object
left: skfuzzy object
Skfuzzy input object
right: skfuzzy object
Skfuzzy input object
output_left: skfuzzy object
Skfuzzy output object
output_right: skfuzzy object
Skfuzzy output object
rules: skfuzzy object
List of rules to the fuzzy
control: skfuzzy object
Skfuzzy controller object
simulator: skfuzzy object
Skfuzzy simulator object
Methods:
-------
create_inputs()
Function to create skfuzzy input functions
create_outputs()
Function to create skfuzzy output functions
create_rules()
Function to create skfuzzy rules
create_control()
Function to create skfuzzy controller
show_fuzzy()
Function to show the fuzzy rules as a graph
create_simulator()
Function that controls the fuzzy pipeline
simulate()
Function that give outputs velocity based on input distance
"""
def __init__(self, behavior):
self.front = None
self.left = None
self.right = None
self.output_left = None
self.output_right = None
self.rules = []
self.control = None
self.simulator = None
self.behavior = behavior
def create_inputs(self):
# set the variable universe as near, medium and far
self.front = ctrl.Antecedent(np.arange(0, 5.01, 0.01), 'front')
self.front['low'] = fuzz.trapmf(self.front.universe, [0, 0, 0.6, 1])
self.front['medium'] = fuzz.trimf(self.front.universe, [0.6, 1, 1.4])
self.front['far'] = fuzz.trapmf(self.front.universe, [1, 1.5, 5, 5])
self.left = ctrl.Antecedent(np.arange(0, 5.01, 0.01), 'left')
self.left['low'] = fuzz.trapmf(self.left.universe, [0, 0, 0.6, 1])
self.left['medium'] = fuzz.trimf(self.left.universe, [0.6, 1, 1.4])
self.left['far'] = fuzz.trapmf(self.left.universe, [1, 1.5, 5, 5])
self.right = ctrl.Antecedent(np.arange(0, 5.01, 0.01), 'right')
self.right['low'] = fuzz.trapmf(self.right.universe, [0, 0, 0.6, 1])
self.right['medium'] = fuzz.trimf(self.right.universe, [0.6, 1, 1.4])
self.right['far'] = fuzz.trapmf(self.right.universe, [1, 1.5, 5, 5])
return
def create_outputs(self):
self.output_left = ctrl.Consequent(np.arange(-1, 2.01, 0.1), 'output_left')
self.output_left['reverse'] = fuzz.trapmf(self.output_left.universe, [-1,-1, 0, 0.2])
self.output_left['low'] = fuzz.trimf(self.output_left.universe, [0,1, 1.3])
self.output_left['medium'] = fuzz.trimf(self.output_left.universe, [1,1.5, 1.75])
self.output_left['high'] = fuzz.trimf(self.output_left.universe, [1.2,1.8, 2])
self.output_left.defuzzify_method = 'lom'
self.output_right = ctrl.Consequent(np.arange(-1, 2.01, 0.1), 'output_right')
self.output_right['reverse'] = fuzz.trapmf(self.output_left.universe, [-1,-1, 0, 0.2])
self.output_right['low'] = fuzz.trimf(self.output_left.universe, [0,1, 1.3])
self.output_right['medium'] = fuzz.trimf(self.output_left.universe, [1,1.5, 1.75])
self.output_right['high'] = fuzz.trimf(self.output_left.universe, [1.2,1.8, 2])
self.output_right.defuzzify_method = 'lom'
return
def create_rules(self, front, left, right, output_left, output_right):
rule1 = ctrl.Rule(antecedent=(left['low'] & right['low'] & (front['medium'] | front['far'])),
consequent=(output_left['low'], output_right['low']))
rule2 = ctrl.Rule(antecedent=(left['low'] & right['low'] & front['low']),
consequent=(output_left['reverse'], output_right['low']))
rule3 = ctrl.Rule(antecedent=((left['medium'] | left['far']) & right['low'] & front['low']),
consequent=(output_left['low'], output_right['high']))
rule4 = ctrl.Rule(antecedent=((left['medium'] | left['far']) & right['low'] & (front['medium'] | front['far'])),
consequent=(output_left['low'], output_right['high']))
rule5 = ctrl.Rule(antecedent=(left['far'] & right['medium'] & front['low']),
consequent=(output_left['low'], output_right['high']))
rule6 = ctrl.Rule(antecedent=(left['far'] & right['far'] & front['low']),
consequent=(output_left['high'], output_right['low']))
rule7 = ctrl.Rule(antecedent=(left['medium'] & right['medium'] & front['low']),
consequent=(output_left['high'], output_right['low']))
rule8 = ctrl.Rule(antecedent=(left['medium'] & right['far'] & front['low']),
consequent=(output_left['high'], output_right['low']))
rule9 = ctrl.Rule(antecedent=(left['low'] & (right['medium'] | right['far']) & front['low']),
consequent=(output_left['high'], output_right['low']))
rule10 = ctrl.Rule(antecedent=(left['low'] & (right['medium'] | right['far']) & (front['medium'] | front['far'])),
consequent=(output_left['high'], output_right['low']))
rule11 = ctrl.Rule(antecedent=((left['medium'] | left['far']) & (right['medium'] | right['far']) & (front['medium'] | front['far'])),
consequent=(output_left['medium'], output_right['medium']))
for i in range(1, 12):
self.rules.append(eval("rule" + str(i)))
return
def create_control(self):
# call function to create robot input
self.create_inputs()
# call function to create robot output
self.create_outputs()
if self.behavior == "avoid_obstacle":
# call function to create rules
self.create_rules(self.front, self.left, self.right, self.output_left, self.output_right)
# create controller
self.control = skfuzzy.control.ControlSystem(self.rules)
return
def show_fuzzy(self):
if self.control is None:
raise Exception("Control not created yet!")
else:
self.control.view()
return
def create_simulator(self):
if self.control is None:
# crete controller if it doensn't exist
self.create_control()
# create simulator object
self.simulator = ctrl.ControlSystemSimulation(self.control)
return
def simulate(self, input_foward=None, input_left=None, input_right=None):
if self.simulator is None:
# crete simulator if it doensn't exist
self.create_simulator()
# if there is no input raise exception
if input_foward is None or input_left is None or input_right is None:
raise Exception("Inputs can't be none")
# simulte the robot linear velocity based on given inputs
self.simulator.input['front'] = input_foward
self.simulator.input['left'] = input_left
self.simulator.input['right'] = input_right
self.simulator.compute()
return self.simulator.output['output_left'], self.simulator.output['output_right']
```
**2. Reinforcement Learning**
Reinforcement learning is an area of Machine Learning. Reinforcement. It is about taking suitable action to maximize reward in a particular situation. Reinforcement learning differs from the supervised learning in a way that in supervised learning the training data has the answer key with it so the model is trained with the correct answer itself whereas in reinforcement learning, there is no answer but the reinforcement agent decides what to do to perform the given task. In the absence of training dataset, it is bound to learn from its experience.
**Main points in Reinforcement learning:**
1. Input: The input should be an initial state from which the model will start
2. Output: There are many possible output as there are variety of solution to a particular problem
3. Training: The training is based upon the input, The model will return a state and the user will decide to reward or punish the model based on its output.
4. The model keeps continues to learn.
5. The best solution is decided based on the maximum reward.
**Types of Reinforcement:**
1. Positive - is defined as when an event, occurs due to a particular behavior, increases the strength and the frequency of the behavior.
2. Negative - is defined as strengthening of a behavior because a negative condition is stopped or avoided.
One of the most common RL algorithms is the **Q-Learning**, a basic form which uses Q-values (also called action values) to iteratively improve the behavior of the learning agent. A brief introduction can be done with the following informations:
1. **Q-Values:** Q-values are defined for states and actions. $Q(S, A)$ is an estimation of how good is it to take the action A at the state S.
2. **Rewards and Episodes:** an agent over the course of its lifetime starts from a start state, makes a number of transitions from its current state to a next state based on its choice of action and also the environment the agent is interacting in. At every step of transition, the agent from a state takes an action, observes a reward from the environment, and then transits to another state.
3. **TD-Update:** the Temporal Difference or TD-Update rule can be represented as follows: $Q(S,A) = Q(S,A) + \alpha \cdot (R + \gamma \cdot (Q',S') - Q(S,A))$. Where the variables can be described as follow:
1. S: current state
2. A: current action
3. S': next state
4. A': next action
5. R: curren reward
6. $\gamma$: discounting factor for future rewards
7. $\alpha$: learning rate
4. **Choosing the Action:** the policy for choosing an action is very simple. It goes as follows :
1. with probability $(1-\epsilon)$ choose the action which has the highest Q-value
2. with probability $(\epsilon)$ choose any action at random
In order to implement the behavior for this project we need to make two main components, the enviroment (who communicate with the V-Rep interface) and the training function that implements the policy and the Q-Learning algorithm, the last one was done based on [this](https://www.geeksforgeeks.org/q-learning-in-python/) implementation.
Now, we are going to explain, with more details the enviroment and training implementations, both codes can be found [here](https://github.com/luizcartolano2/mc907-mobile-robotics/blob/project2/reinforcement_learning/environment.py) and [here](https://github.com/luizcartolano2/mc907-mobile-robotics/blob/project2/reinforcement_learning/train.py), and a function call can be seen bellow.
**Enviroment Implementation:**
The enviroment, on a Q-Learning situation, must have the hability to start and restart the simulation, to inform states and, more important, to take actions and identify the consequences (reward) of that actions.
1. **State:**
Since we aim to create a reinforcement behavior that "teaches" the robot how to avoid obstacles in the scene, we simply choose as a state the ultrassonic sensors observations, that is, the read distance in all of the robot sensors, what was implemented as follow:
~~~
observations = {}
observations['proxy_sensor'] = [np.array(self.read_ultrassonic_sensors())]
~~~
2. **Actions:**
The robot actions were limited to three options:
1. Walk straight: $[1.5,1.5]$.
2. Turn Left: $[0.5,1.5]$.
3. Turn Right: $[1.5,0.5]$.
3. **Reset Function:**
In order to training the model the code have to be able to restart the simulation at the start of every episode, in order to make it along the V-Rep simulator, it was necessary to implement a restart function, that stop the simultion and start it again, between both actions a delay is required in order to make sure the older simulation were completely killed. Also, the reset function has to return to the training function the intial state of the simulation.
This was done with the following lines of code:
~~~
stop = vrep.simxStopSimulation(self.clientID,vrep.simx_opmode_blocking)
time.sleep(5)
start = vrep.simxStartSimulation(self.clientID, vrep.simx_opmode_blocking)
observations = {}
observations['proxy_sensor'] = [np.array(self.read_ultrassonic_sensors())]
~~~
4. **Rewards model:**
The robot rewards were given based on the following situations:
1. Punishment to be close to objects:
``` (np.array(observations['proxy_sensor']) < 0.7).sum() * -2 ```
2. Punishment to be very close to objects:
``` (np.array(observations['proxy_sensor']) < 0.2).sum() * -10 ```
3. Rewarded for movement:
``` np.clip(np.sum(np.absolute(action)) * 2, 0, 2) ```
4. Reward for walking:
``` if dist > 0.1: reward['proxy_sensor'] += 50 ```
5. Punishment for dying:
``` if np.any(np.array(observations['proxy_sensor']) < 0.1): reward['proxy_sensor'] -= 100000 ```
5. **Step Update Function:**
The step/update function receive an action as input, perform and evalute it. At the end, it checks if the episode is done (the robot colide with something). Due to V-Rep problemns at implementation, we consider that, if any of the sensors were ten centimeters or less from an object, the robot is considered as "dead".
**Q-Learning Implementation:**
We can split the Q-Learning part in three main functionalities, as we follow explain:
1. **Action Policy:**
The action policy is the function to created to updade the action probabilities, it works as follow:
~~~
action_probabilities = np.ones(num_actions,dtype=float) * epsilon / num_actions
best_action = np.argmax(Q[state])
action_probabilities[best_action] += (1.0 - epsilon)
~~~
2. **Q-Learning Loop:**
The Q-Leanrning function iterate over the episodes, choosing actions based on their probabilities and updating the probabilities based on the TD-Rule and the state reward.
3. **Save/Load model:**
In order to save the model over many iterations and be able to keep improving the quality of the controller, at the end of every simulation, we create save the state/probability dictionary at a text file and, at the start of every simulation, we load that values into the dictionary.
#### Observation
Once that the reinforcement behavior were a test we develop it in a separate branch and using .py files, not a Jupyter Notebook, so here we are just going to show how to call the Q-Learning function, and to present the obtained results in a following section. Links to the implemented files are available to.
```python
# create the simulation enviroment
env = Robot()
# calls the Q-Learning function
Q, stats = qLearning(env, 500)
# save the learned model
with open('model.txt', 'w') as f:
json.dump({str(k): str(tuple(v)) for k, v in Q.items()}, f)
# plot few results charts
plotting.plot_episode_stats(stats)
```
# Controls robot actions
A state machine, is a mathematical model of computation. It is an abstract machine that can be in exactly one of a finite number of states at any given time. The state machine can change from one state to another in response to some external inputs and/or a condition is satisfied, the change from one state to another is called a transition.
For this project, we implemented two behaviors with same goal, to avoid obstacles, but, the one who uses reinforcement learning were tested in a separate situation, so he isn't implemented at the state machine. The one that is implemented, the fuzzy controller, has two simple states, the one before he is initialized, and the one his working. For the firts stage we create all the used objects and transit to next stage. At the next stage, we read the sensors inputs, call the fuzzy simulator in order to get the outputs and change the robot velocities.
The fuzzy controller class has his own state machine implemented in order to create all the need behaviors, that is, it checks if all conditions are satisfied and, if not, create the antecedents, consequents, rules and put all together in a controller object, and then, it make the simulations.
```python
def state_machine(behavior="avoid_obstacle"):
# stage
stage = 0
if behavior == "follow_wall":
raise Exception("Not implemented!")
elif behavior == "avoid_obstacle":
while True:
if stage == 0:
# first we create the robot and the walker object
robot = rb.Robot()
fuzzy = FuzzyControler(behavior=behavior)
# instantiate the odometry calculator
odometry_calculator = Odometry(robot=robot)
stage = 1
if stage == 1:
sensors = robot.read_ultrassonic_sensors()
front_sensors = min(sensors[3], sensors[4])
left_sensors = min(sensors[0], sensors[1], sensors[2])
right_sensors = min(sensors[5], sensors[6], sensors[7])
left_vel, right_vel = fuzzy.simulate(input_foward=front_sensors, input_left=left_sensors, input_right=right_sensors)
robot.set_left_velocity(left_vel)
robot.set_right_velocity(right_vel)
else:
raise Exception("Not implemented!")
```
# Main function - Execute the code here!
Here is a simple signal handler implement in order to make the simulator execution last for a given time period.
```python
import signal
from contextlib import contextmanager
class TimeoutException(Exception): pass
@contextmanager
def time_limit(seconds):
def signal_handler(signum, frame):
raise TimeoutException("Timed out!")
signal.signal(signal.SIGALRM, signal_handler)
signal.alarm(seconds)
try:
yield
finally:
signal.alarm(0)
try:
ground_truth = []
odometry = []
lines = []
corners = []
points_kmeans = []
with time_limit(90):
state_machine()
except TimeoutException as e:
print("Timed out!")
```
Connected to remoteApi server.
[92m Pioneer_p3dx_ultrasonicSensor1 connected.
[92m Pioneer_p3dx_ultrasonicSensor2 connected.
[92m Pioneer_p3dx_ultrasonicSensor3 connected.
[92m Pioneer_p3dx_ultrasonicSensor4 connected.
[92m Pioneer_p3dx_ultrasonicSensor5 connected.
[92m Pioneer_p3dx_ultrasonicSensor6 connected.
[92m Pioneer_p3dx_ultrasonicSensor7 connected.
[92m Pioneer_p3dx_ultrasonicSensor8 connected.
[92m Pioneer_p3dx_ultrasonicSensor9 connected.
[92m Pioneer_p3dx_ultrasonicSensor10 connected.
[92m Pioneer_p3dx_ultrasonicSensor11 connected.
[92m Pioneer_p3dx_ultrasonicSensor12 connected.
[92m Pioneer_p3dx_ultrasonicSensor13 connected.
[92m Pioneer_p3dx_ultrasonicSensor14 connected.
[92m Pioneer_p3dx_ultrasonicSensor15 connected.
[92m Pioneer_p3dx_ultrasonicSensor16 connected.
[92m Vision sensor connected.
[92m Laser connected.
[92m Left motor connected.
[92m Right motor connected.
[92m Robot connected.
Timed out!
# Results
In order to show the obtained results we are going to demonstrate a video with the running simulation for both behaviors. For the fuzzy system we are going to evaluate it beyond some different start pose situations and changing the input/output universe. The reinforcement learning, by the other side, has changes only at the enviroment, also, we are going to show how it performed as the episodes were growing.
An important observation we want to do respect about the topic that ask to evaluate the model using the ground truth position and the odometry one, since both controllers aims to avoid obstacles in the scene, and that, both of them have a react behavior, none of them uses the pose as input for the behavior, so it doesn't make any difference for the obtained results.
The robot can be seen in action at [youtube](https://youtu.be/uqNeEhWo0dA). And the video behavior will be following commented. Before go into the obtained results we are going to first explain how the tests were done. For both the fuzzy controller and the reinforcement learning.
### Fuzzy Controller
In order to test and understand the fuzzy behavior in a range of different situations, we create five different scenarios for it and test each of them in three different start poses at the scene. The start poses are show in the follow images:
First Pose | Second Pose | Third Pose
:-------------------------:|:-------------------------:|:-------------------------:
| |
Beyond the different starting we change, for each experiment, or the shape/range of the fuzzy antecedents or the deffuzitication method. Each experiment can be described as follow:
1. **Experiment 1:**
For the first experiment the fuzzy set were the default one, and the deffuzitication method was the smallest of the maximuns (SOM).
2. **Experiment 2:**
For the second one, we expand the distance considered as "close" and still use the the smallest of the maximuns (SOM).
3. **Experiment 3:**
For the third one, we expand the distance considered as "medium" and still use the the smallest of the maximuns (SOM).
4. **Experiment 4:**
For the fourth one, we expand the distance considered as "medium", reduce the considered as "far" and still use the the smallest of the maximuns (SOM).
5. **Experiment 5:**
For the last one, we keep the default antecedent and change the deffuzification method for the largest of the maximuns (LOM).
The antecedent sets are show here:
Experiment 1 | Experiment 2 | Experiment 3 | Experiment 4 | Experiment 5
:-------------------------:|:-------------------------:|:-------------------------:|:-------------------------:|:-------------------------:
| | | |
As we can observe in the [video](https://youtu.be/uqNeEhWo0dA), there isn't significant difference between the experiments 1, 3 e 4. The bigger ones happens when we change the "close" range of the input and, mainly, when we change the defuzzification method.
We can say that the experiment 1 show a robust and stable controller, where the robot is able to escape from obstacles with consistency and without taking major risks, besides that, he also walks with a good speed and do smooth movements.
For the second experiments we experience the worst fuzzy results. The robot achieves his goal of don't collide with other obstacles, but, he has sudden movements and once he considers that a lot of obstacles are close to him, a lot of bad decisions are taking, including moments when it basically turns on its own axis.
For the third experiment we have a closer situation for the first one, except for one aspect, he walks to close to the scene objects in a lot of situations.
The fourt and fifth experiments exhibit similar behaviors, they are consistent on their actions, don't take big risks, but they move to slow.
### Reinforcement Learning
The reinforcement learning was test on only one scenario, the same of Experiment 1. We achieve pretty good results, considering the lack of training time. After about 2500 episodes the robot were able, as we can observe in the video, to avoid the first obstacle. However, better results can be longed for, once that he doesn't survive a lot of time at the enviroment.
The scenario, and few charts describing the results are displayed below:
Enviroment | Episode Length | Episode Reward
:-------------------------:|:-------------------------:|:-------------------------:
| |
As we expect, lenght and reward grow as the time passed, showing that we are on the right path.
# Discussion
Both controllers achieved satisfactory results, as expected at the start of the development.
### Fuzzy Controller
The fuzzy one, turned out to be a very good option when we talk about a reactive behavior, the implementation was quite easy and the results were satisfactory. Analysing the experiments we also could extract interesting insights. For example, we realize that increase the ```close``` or the ```medium``` intervals of the antecedents sets makes the robots moves less soft and he starts to take more risk and lowest consistent actions.
By the other side, consider less objects in a ```far``` distance and take the maximum of the maximuns as the defuzzification methods both made the robot movements more conservative, walking at a much lower speed. That behavior can be easily explained for both situations. For the first one, the robot starts to "imagine" that objects are close to him than he expects, so he acts more carefully. For the second one, once we start to defuzzify our set taking the maximum value, any sensor who make a ```low``` or ```medium``` read will overlap the other ones, and the robot will, again, act carefully.
Lastly, we decide that the first configuration (Experiment 1) of antecedents and defuzzification were the best one, and set it as the 'defaul' one. The choice was based on the robot's consistency and robustness. That results makes a lot of sense when we analyse how the controller were structered in the antecedent/defuzzication mix. That is, the antecedents were created in a way that the values tend to overlap so decisions aren't abrupt. Besides that, the defuzzification is a bit more confident, so, tends to higher speeds.
### Reinforcement Learning
The reinforcement approach showed to be an amazing approach for hard to note problems using artificial intelligence. As we saw on the charts of lenght and reward over the episodes, the idea of maximizing the reward obtained makes the robot learn with his mistakes, that is, the bad action take at some state. We didn't have time to train the robot over different scenarios, so he still not able, yet, to stay alive for a very long time in the enviroment. But, he can avoid some obstacles.
The rewards were modeled in a way that be close to an object has a negative return, a punishment. Also, make a move at the scene, walk, give a positive reward, a prize. By the end, the worst punish is die. Nevertheless, we believe that the way the state was defined wasn't the best possible and, possibly, is one of the reasons the results were not spectacular. An additional improvement that can be made in order to achieve better results is change the state for it to map only the front, left and right ultrassonic sensors.
Another problem faced was to determine when the robot had hit an object on the scene. The decision was check all the ultrassonic sensors and if any of then were ten centimeters or less from an object, we consider a hit. The decision fulfilled what was expected, but had some situations where wasn't clear if it really had hit something.
# Conclusions And Final Observations
In general, the work presented satisfactory results, especially considering that the objective of this work was to introduce the students to the concepts of fuzzy logic applies to robotics, being able to understand its operation and having the ability to evaluate solutions and know how to modify them for optimal results. It is also noteworthy that the results obtained by applying the solution to the proposed problem were significantly positive. Also, that was the first time ever working with reinforcement learning, that also presents very good results and it was really fun to work with.
Weaknesses of the work, which need to be improved in future iterations, are mostly related to how the fuzification criteria were defined. An improvement thought for the project is the use of genetic algorithms to choose the best ways to create the membership functions and to choose the fuzification method. For the reinforcement learning approach, we need to make a better model of the ```state-reward``` relation.
|
lemma distr_bij_count_space: assumes f: "bij_betw f A B" shows "distr (count_space A) (count_space B) f = count_space B" |
function [l,d,perm] = mchol(A,mu)
% [l,d,perm] = mchol(A,mu)
% Compute the Gill-Murray modified LDL factorization of A,
if nargin < 2
mu = 1e-12;
end
n = size(A,1);
l = eye(n);
d = zeros(n,1);
perm = 1:n;
for i = 1:n
c(i,i) = A(i,i);
end
% Compute modification parameters
gamma = max(abs(diag(A)));
xi = max(max(abs(setdiag(A,0))));
delta = mu*max(gamma+xi,1);
if n > 1
beta = sqrt(max([gamma xi/sqrt(n^2-1) mu]));
else
beta = sqrt(max([gamma mu]));
end
for j = 1:n
% Find q that results in Best Permutation with j
[maxVal maxPos] = max(abs(diag(c(j:end,j:end))));
q = maxPos+j-1;
% Permute d,c,l,a
d([j q]) = d([q j]);
perm([j q]) = perm([q j]);
c([j q],:) = c([q j],:);
c(:,[j q]) = c(:,[q j]);
l([j q],:) = l([q j],:);
l(:,[j q]) = l(:,[q j]);
A([j q],:) = A([q j],:);
A(:,[j q]) = A(:,[q j]);
for s = 1:j-1
l(j,s) = c(j,s)/d(s);
end
for i = j+1:n
c(i,j) = A(i,j) - sum(l(j,1:j-1).*c(i,1:j-1));
end
theta = 0;
if j < n
theta = max(abs(c(j+1:n,j)));
end
d(j) = max([abs(c(j,j)) (theta/beta)^2 delta]);
if j < n
for i = j+1:n
c(i,i) = c(i,i) - (c(i,j)^2)/d(j);
end
end
end |
lemma AE_not_in: "N \<in> null_sets M \<Longrightarrow> AE x in M. x \<notin> N" |
lemma linear_continuous_on_compose: fixes f :: "'a::euclidean_space \<Rightarrow> 'b::euclidean_space" and g :: "'b \<Rightarrow> 'c::real_normed_vector" assumes "continuous_on S f" "linear g" shows "continuous_on S (\<lambda>x. g(f x))" |
subroutine pmidpp(p1,p2,p)
C.......ha t koop hat to k , haxo e c ocepe he me y bym
C....... a ahh m to kam
dimension p1(2),p2(2),p(2)
p(1)=(p1(1)+p2(1))/2.
p(2)=(p1(2)+p2(2))/2.
return
end
|
subroutine sslcs (a,model)
c implicit double precision (a-h,o-z) dp
c
common/bk00/k01,k02,k03,k04,k05,k06,k07,k08,k09,k10,k11,k12
common/bk02/ioofc,iphase,imass,lpar(9)
common/bk06/nprnt,mprint,itmpop,numelt,jprint,idump,locstr
common/bk14/lfna(15),lfnt(6)
common/bk16/maxint,hgc
common/bk17/dn1,dn2,nwebuf,ntime,numnp,neq,ibar,mthsol
common/bk18/nummat,ityp2d,ako(31)
common/intgp/d(4,4),ipt,nel,nelsub
dimension a(*)
c
ln = maxint*lpar(9)
k08 = igtpnt(8)
mtp = locdbl(a(k08),nelsub-1)
nm = igtpnt(12)+48*(mtp-1)
ii = igtpnt(14)+ln*(nel-1)
kk = igtpnt(6) +nel-1
nn = ii+lpar(9)*(ipt-1)
ne = nn+lpar(9)-1
nt = ne-idump
nu = igtpnt(11)+nummat+mtp-1
nray = igtpnt(11)+3*nummat+(mtp-1)*3
mm = igtpnt(2) +4*(nel-1)
k63 = igtpnt(63)
k64 = igtpnt(64)
k81 = igtpnt(81)
k82 = igtpnt(82)
mtb = igtpnt(12)+96*nummat
mtb1 = locdbl(a(mtb),mtp-1)
mtb2 = mtb+nummat+mtb1
if(iphase.eq.2)then
ktm = k81+1
else
ktm = k82+1
endif
c
if (model.eq.0) then
call s0mn(a(nn),a(ne),a(nt),ln)
return
endif
c
ck----Elasticity :
if(model.eq.1)then
call s1mn (a(nm),a(nn),a(ne),a(nt),ln)
ck
ck----Orthotropic Elasticity :
elseif(model.eq.2)then
call s2mn (a(nm),a(nm+29),a(nn),a(ne),a(nt),ln)
ck
ck----Elastoplasticity :
elseif(model.eq.3)then
if (ityp2d.le.1) then
call s3mn(a(nm),a(nm+29),a(nn),a(nn+4),a(ne),ln)
else
call s3mnp(a(nm),a(nm+29),a(nn),a(nn+4),a(ne),a(nt),ln)
endif
c---------------------------------
c.....add up matslip model by Waeil Ashamwi
c---------------------------------
*-----Single Crystal (Double-Slip) :
elseif (model.eq.4)then
if (ityp2d.le.1) then
call matpoly (a(k08),a(nn),a(k12),ln)
endif
*
* ... Polycrystal with dislocation eveol. (Double-Slip) :
elseif(model.eq.5)then
if (ityp2d.le.1) then
* call matpoly_dislocation_2s (a(k08),a(nn),a(k12),ln)
endif
*
* ... Polycrystal with dislocation eveol. (4-Slip) :
elseif(model.eq.6)then
if (ityp2d.le.1) then
* call matpoly_dislocation_4s (a(k08),a(nn),a(k12),ln)
endif
*
*-----Bicrystal (Double-slip) :
elseif(model.eq.7) then
if (ityp2d.le.1) then
* call matslipbxds (a(nn),a(k12),ln)
endif
*
* ... Polycrystal (Double-Slip) :
elseif(model.eq.8)then
if (ityp2d.le.1) then
* call matpoly_ds (a(k08),a(nn),a(k12),ln)
endif
c.... no material specified
else
write(lfnt(2),250)model
call bye(2)
endif
c
c.... stiffness proportional damping
if(imass.eq.1)then
call raydmp(a(nray))
endif
c
231 format(' **fatal error** '//
1 ' model',i5,'is not implemented for plane stress'//)
250 format(' **fatal error** mat model',i5,' not coded (sslcs)')
return
end
|
{-# OPTIONS --cubical --safe #-}
module Cubical.Structures.Semigroup where
open import Cubical.Foundations.Prelude
open import Cubical.Foundations.Equiv
open import Cubical.Foundations.HLevels
open import Cubical.Data.Sigma
open import Cubical.Foundations.SIP renaming (SNS-PathP to SNS)
open import Cubical.Structures.NAryOp
private
variable
ℓ ℓ' : Level
raw-semigroup-structure : Type ℓ → Type ℓ
raw-semigroup-structure X = X → X → X
raw-semigroup-is-SNS : SNS {ℓ} raw-semigroup-structure _
raw-semigroup-is-SNS = nAryFunSNS 2
semigroup-axioms : (X : Type ℓ) → raw-semigroup-structure X → Type ℓ
semigroup-axioms X _·_ = isSet X ×
((x y z : X) → (x · (y · z)) ≡ ((x · y) · z))
semigroup-structure : Type ℓ → Type ℓ
semigroup-structure = add-to-structure (raw-semigroup-structure) semigroup-axioms
Semigroup : Type (ℓ-suc ℓ)
Semigroup {ℓ} = TypeWithStr ℓ semigroup-structure
-- Operations for extracting components
⟨_⟩ : Semigroup → Type ℓ
⟨ G , _ ⟩ = G
semigroup-operation : (G : Semigroup {ℓ}) → ⟨ G ⟩ → ⟨ G ⟩ → ⟨ G ⟩
semigroup-operation (_ , f , _) = f
module semigroup-operation-syntax where
semigroup-operation-syntax : (G : Semigroup {ℓ}) → ⟨ G ⟩ → ⟨ G ⟩ → ⟨ G ⟩
semigroup-operation-syntax = semigroup-operation
infixl 20 semigroup-operation-syntax
syntax semigroup-operation-syntax G x y = x ·⟨ G ⟩ y
open semigroup-operation-syntax
semigroup-isSet : (G : Semigroup {ℓ}) → isSet ⟨ G ⟩
semigroup-isSet (_ , _ , P , _) = P
semigroup-assoc : (G : Semigroup {ℓ})
→ (x y z : ⟨ G ⟩) → (x ·⟨ G ⟩ (y ·⟨ G ⟩ z)) ≡ ((x ·⟨ G ⟩ y) ·⟨ G ⟩ z)
semigroup-assoc (_ , _ , _ , P) = P
-- Semigroup equivalences
semigroup-iso : StrIso semigroup-structure ℓ
semigroup-iso = add-to-iso (nAryFunIso 2) semigroup-axioms
semigroup-axiom-isProp : (X : Type ℓ)
→ (s : raw-semigroup-structure X)
→ isProp (semigroup-axioms X s)
semigroup-axiom-isProp X _·_ = isPropΣ isPropIsSet
λ isSetX → isPropΠ (λ x → isPropΠ (λ y → isPropΠ (λ z → isSetX _ _)))
semigroup-is-SNS : SNS {ℓ} semigroup-structure semigroup-iso
semigroup-is-SNS = add-axioms-SNS _ semigroup-axiom-isProp (nAryFunSNS 2)
SemigroupPath : (M N : Semigroup {ℓ}) → (M ≃[ semigroup-iso ] N) ≃ (M ≡ N)
SemigroupPath = SIP semigroup-is-SNS
-- Semigroup ·syntax
module semigroup-·syntax (G : Semigroup {ℓ}) where
infixr 18 _·_
_·_ : ⟨ G ⟩ → ⟨ G ⟩ → ⟨ G ⟩
_·_ = semigroup-operation G
|
theory Datatype_Selectors
imports Main
begin
text\<open>
Running Example: \<open>datatype_new iptrule_match = is_Src: Src (src_range: ipt_iprange)\<close>
A discriminator \<open>disc\<close> tells whether a value is of a certain constructor.
Example: \<open>is_Src\<close>
A selector \<open>sel\<close> select the inner value.
Example: \<open>src_range\<close>
A constructor \<open>C\<close> constructs a value
Example: \<open>Src\<close>
The are well-formed if the belong together.
\<close>
fun wf_disc_sel :: "(('a \<Rightarrow> bool) \<times> ('a \<Rightarrow> 'b)) \<Rightarrow> ('b \<Rightarrow> 'a) \<Rightarrow> bool" where
"wf_disc_sel (disc, sel) C \<longleftrightarrow> (\<forall>a. disc a \<longrightarrow> C (sel a) = a) \<and> (\<forall>a. \<^cancel>\<open>disc (C a) \<longrightarrow>\<close> sel (C a) = a)"
(* should the following be added to the definition?
the discriminator is true for all C independent of the a
for example: is_Src_IP is true for all Src_IPs, independent of the numberic value of the ip.
lemma "wf_disc_sel (disc, sel) C \<Longrightarrow> (\<exists>a. disc (C a)) \<longrightarrow> (\<forall>a. disc (C a))"
*)
declare wf_disc_sel.simps[simp del]
end
|
= = History and Purpose = =
|
module Functional where
import Lvl
open import Type
infixl 10000 _∘_
infixl 10000 _⩺_
infixl 10000 _⩹_
infixl 30 _→ᶠ_ _←_ _←ᶠ_
infixr 0 _$_
private variable ℓ ℓ₁ ℓ₂ : Lvl.Level
private variable T X X₁ X₂ X₃ X₄ Y Y₁ Y₂ Y₃ Y₄ Z : Type{ℓ}
-- Converse of a function type
_←_ : Type{ℓ₁} → Type{ℓ₂} → Type{ℓ₁ Lvl.⊔ ℓ₂}
Y ← X = X → Y
-- Function type as a function
_→ᶠ_ : Type{ℓ₁} → Type{ℓ₂} → Type{ℓ₁ Lvl.⊔ ℓ₂}
X →ᶠ Y = X → Y
-- Converse function type as a function
_←ᶠ_ : Type{ℓ₁} → Type{ℓ₂} → Type{ℓ₁ Lvl.⊔ ℓ₂}
Y ←ᶠ X = Y ← X
-- The identity function.
-- Returns the applied argument.
id : T → T
id(x) = x
{-# INLINE id #-}
-- The constant function.
-- Returns the first argument independent of the second.
const : let _ = X in Y → (X → Y)
const(x)(_) = x
-- Function application as a function.
-- Applies the first argument on the function on the second argument.
apply : X → (X → Y) → Y
apply(x)(f) = f(x)
{-# INLINE apply #-}
-- Function application as an operator
_$_ : (X → Y) → X → Y
_$_ = id
{-# INLINE _$_ #-}
_$ᵢₘₚₗ_ : ({ _ : X } → Y) → (X → Y)
f $ᵢₘₚₗ x = f{x}
{-# INLINE _$ᵢₘₚₗ_ #-}
_$ᵢₙₛₜ_ : (⦃ _ : X ⦄ → Y) → (X → Y)
f $ᵢₙₛₜ x = f ⦃ x ⦄
{-# INLINE _$ᵢₙₛₜ_ #-}
-- Function application as an operator. Function to the left, value to the right.
_⩹_ : (X → Y) → X → Y
f ⩹ x = f(x)
{-# INLINE _⩹_ #-}
-- Function application as an operator. Value to the left, function to the right.
_⩺_ : X → (X → Y) → Y
x ⩺ f = f(x)
{-# INLINE _⩺_ #-}
-- Swapping the arguments of a binary operation
swap : (X → Y → Z) → (Y → X → Z)
swap f(y)(x) = f(x)(y)
{-# INLINE swap #-}
-- Function composition
_∘_ : let _ = X in (Y → Z) → (X → Y) → (X → Z)
(f ∘ g)(x) = f(g(x))
-- Function composition on implicit argument
_∘ᵢₘₚₗ_ : let _ = X in ({Y} → Z) → ({X} → Y) → ({X} → Z)
(f ∘ᵢₘₚₗ g){x} = f{g{x}}
-- Function composition on instance argument
_∘ᵢₙₛₜ_ : let _ = X in (⦃ Y ⦄ → Z) → (⦃ X ⦄ → Y) → (⦃ X ⦄ → Z)
(f ∘ᵢₙₛₜ g) ⦃ x ⦄ = f ⦃ g ⦃ x ⦄ ⦄
-- The S-combinator from combinatory logic.
-- It is sometimes described as a generalized version of the application operator or the composition operator.
-- Note: TODO: Applicative instance
_∘ₛ_ : (X → Y → Z) → (X → Y) → (X → Z)
(f ∘ₛ g)(x) = (f x) (g x)
_on₀_ : let _ = X in Z → (X → Y) → Z
((▫) on₀ f) = ▫ -- const
_on₁_ : let _ = X in (Y → Z) → (X → Y) → (X → Z)
((_▫) on₁ f)(y₁) = (f(y₁) ▫) on₀ f -- f(y₁) ▫
-- Function composition on a binary operator
-- A function is composed on every argument of the binary operator.
_on₂_ : let _ = X in (Y → Y → Z) → (X → Y) → (X → X → Z)
((_▫_) on₂ f)(y₁) = (f(y₁) ▫_) on₁ f -- f(y₁) ▫ f(y₂)
_on₃_ : let _ = X in (Y → Y → Y → Z) → (X → Y) → (X → X → X → Z)
((_▫_▫_) on₃ f)(y₁) = (f(y₁) ▫_▫_) on₂ f -- f(y₁) ▫ f(y₂) ▫ f(y₃)
-- TODO: Move these to Function.Multi
_∘₀_ : (Y → Z) → Y → Z
_∘₀_ = id
_∘₁_ : let _ = X₁ in (Y → Z) → (X₁ → Y) → (X₁ → Z)
_∘₁_ f = (f ∘₀_) ∘_
-- (f ∘₂ g)(x)(y) = f(g(x)(y))
_∘₂_ : let _ = X₁ ; _ = X₂ in (Y → Z) → (X₁ → X₂ → Y) → (X₁ → X₂ → Z)
_∘₂_ f = (f ∘₁_) ∘_
-- (f ∘₃ g)(x)(y)(z) = f(g(x)(y)(z))
_∘₃_ : let _ = X₁ ; _ = X₂ ; _ = X₃ in (Y → Z) → (X₁ → X₂ → X₃ → Y) → (X₁ → X₂ → X₃ → Z)
_∘₃_ f = (f ∘₂_) ∘_
-- (f ∘₄ g)(x)(y)(z)(w) = f(g(x)(y)(z)(w))
_∘₄_ : let _ = X₁ ; _ = X₂ ; _ = X₃ ; _ = X₄ in (Y → Z) → (X₁ → X₂ → X₃ → X₄ → Y) → (X₁ → X₂ → X₃ → X₄ → Z)
_∘₄_ f = (f ∘₃_) ∘_
-- map₂Arg₁ : let _ = X in (Y₁ → Y₂ → Z) → (X → Y₁) → (X → Y₂) → (X → Z)
-- map₂Arg₁ f g₁ g₂ x = f(g₁ x)(g₂ x)
-- map₂Arg₂ : let _ = X₁ ; _ = X₂ in (Y₁ → Y₂ → Z) → (X₁ → Y₁) → (X₂ → Y₂) → (X₁ → X₂ → Z)
-- map₂Arg₂ f g₁ g₂ x₁ x₂ = f(g₁ x₁)(g₂ x₂)
-- Function lifting //TODO: Consider removing because it is the same as _∘_
liftₗ : (X → Y) → ((Z → X) → (Z → Y))
liftₗ = _∘_ -- liftₗ(f) = f ∘_
liftᵣ : (X → Y) → ((Y → Z) → (X → Z))
liftᵣ = swap(_∘_) -- liftᵣ(f) = _∘ f
-- Applies an argument to two arguments of a binary function.
_$₂_ : (X → X → Y) → (X → Y)
f $₂ x = f x x
apply₂ : X → (X → X → Y) → Y
apply₂ x f = f x x
proj₂ₗ : X → Y → X
proj₂ₗ = const
proj₂ᵣ : X → Y → Y
proj₂ᵣ = const id
open import Syntax.Function public
|
FAQ on Brussels Ghost Tour - Find your answer here !
What should I bring for the tour?
Nothing special, just be ready to walk for bit and potentially something to protect you from poltergeists and rain. Any kind of amulet or protection stone is also welcome.
Can kids join too ?
Yes, we accept kids from 6 years old and on! We also have a family discount, if you book for 2 adults and 2 kids!
When does the tour run ?
Where is the meeting point ?
Brussels has many paved roads and those are hardly accessible with a wheelchair, for that reason we have to unfortunately label our tour as not wheelchair accessible. |
```python
%matplotlib inline
import matplotlib
import numpy as np
import matplotlib.pyplot as plt
plt.rcParams["figure.figsize"] = (12, 9)
plt.rcParams["font.size"] = 18
```
# In Core Fuel Management
In core fuel management focuses on the study of requirements and operational considerations impacting fuel performance in the reactor core, power history, core loading patterns, and refuelling activities.
## Learning Objectives
At the end of this lesson, you will be equipped to:
- List safety constraints driving in core fuel management decisions.
- Calculate capacity and availability factors.
- Calculate the mass required for each reactor year of operation.
- Calculate core and assembly discharge burnup based on power output.
- Analyze the reactivity evolution of a core based on burnup.
- Apply burnup calculations to multiple batch cores.
- Recognize the relationship between the number of batches and the final burnup.
- Understand the goals driving choices in various fuel loading patterns.
- Apply these lessons to pebble-fuelled and liquid-fueled advanced reactor designs.
- Recognize the impact of extended burnup on fuel utilization, SWU utilization, and fuel cycle cost.
- Understand how isotopic activities can be used to determine fuel burnup.
- Calculate burnup based on key activity ratios.
## Safety Constraints
- $\frac{P_{peak}}{P_{avg}}$, peak to average power ratio.
- $T_{max}$, maximimum core temperature.
- Departure from Nucleate Boiling Ratio (DNBR)
- $\rho$, reactivity in the core.
- $\alpha_T$, temperature coefficient of reactivity
Primarily, there is a loss of coolant accident (LOCA) peak clad temp (PCT) limit of 1205 $^\circ C$, which limits the maximum pellet linear power density to approx 48 kW/m at Hot Full Power(HFP).
- Critical Heat Flux (CHF), which denotes departure from nuclear boiling (DNB) for a PWR and Dryout for a BWR, not being exceeded during anticipated transients, which limits the maximum average fuel pin linear power density to approximately 29 kW/m at HFP.
- Fuel cladding strain limit not exceeded during anticipated transients
### Safety Variables
- Fuel enrichment
- Re-load batch size & number of assemblies
- Fuel loading pattern of fresh and partially spent fuel assemblies
- Control mechanisms
## Mass Required
The simplest possible representation of the mass of fuel that must be added into a reactor is:
\begin{align}
M(t) &= \frac{Q}{BU}
\end{align}
where
\begin{align}
M &= \mbox{mass of heavy metal (e.g., uranium) in the core }[MTHM/yr]\\
Q &= \mbox{annual thermal energy output }[GWd/yr]\\
BU &= \mbox{burnup }[GWd/MTIHM]
\end{align}
But, Q itself typically needs to be back-calculated from energy produced.
\begin{align}
Q &= \frac{P_0\cdot CF\cdot T}{\eta_{th}}
\end{align}
where
\begin{align}
P_0 &= \mbox{installed electric capacity }[GWe]\\
CF &= \mbox{capacity factor }[-]\\
T &= \mbox{time in core } [days]\\
\eta_{th} &= \mbox{thermal efficiency }[GWe/GWth]\\
\end{align}
```python
def m(q, bu):
return q/bu
def q(p0, cf, t, eta_th):
return p0*cf*t/eta_th
p0 = 1.500 # installed electric capacity MWe
cf = 0.9 # capacity factor
t = 365 # days per year
eta_th = 0.33 # thermal efficiency MWe/MWth
bu = 50 # burnup GWd/MTIHM
print(m(q(p0, cf, t, eta_th), bu))
```
29.863636363636363
## Capacity and Availability Factors
The capacity factor is representative of the plant's tendency to acheive its rated power capacity.
\begin{align}
CF &= \frac{\mbox{actual power generated over time T}}{\mbox{rated power potential over time T}}\\
&=\frac{\int_0^T P(t)dt}{P_0T}\\
P(t) &= \mbox{ thermal power at time t during period T}
\end{align}
The capacity factor, integrated over time, gives Effective Full Power Days (EFPD), the equivalent number of days at full power.
\begin{align}
EFPD &= \int_0^TCF(t)dt\\
&= \int_0^T \frac{\int_0^T P(t)dt}{P_0T}\\
\end{align}
The availability factor is always greater than the capacity factor.
\begin{align}
AF &= \frac{\mbox{time during which the reactor was operational during time period T}}{T}
\end{align}
```python
# The reactor shuts down:
# for a few days during the 10th month
# for one month during month 18
shutdowns = {10:10.1,
18.5:19.5}
import numpy as np
def A(t, shutdowns):
to_ret = 1.0*(t > 0)
for start,stop in shutdowns.items():
if start < t and t < stop:
to_ret = 0
return to_ret
times = np.arange(0.0, 20.0, 0.01)
hist = np.arange(0.0, 20.0, 0.01)
cf = np.arange(0.0, 20.0, 0.01)
for i in range(0, times.size):
hist[i] = A(times[i], shutdowns)
cf[i] = A(times[i], shutdowns)*(1.-0.01*np.random.random())
plt.plot(times, hist, label='Availability')
plt.plot(times, cf, label='Capacity')
plt.ylim([-0.5, 1.5])
plt.title('Capacity and Availabilty')
plt.xlabel('Time (months)')
plt.ylabel('Factor [-]')
plt.legend()
```
We can do a quick numeric integral to get each factor as an integral over the 20 month cycle.
\begin{align}
AF &= \frac{\int_0^{20}A(t)dt}{T}\\
CF &= \frac{\int_0^{20}P(t)dt}{P_0T}\\
\end{align}
```python
print("Availability Factor = ", hist.sum()/hist.shape[0])
print("Capacity Factor = ", cf.sum()/cf.shape[0])
```
Availability Factor = 0.9455
Capacity Factor = 0.9408645280541623
## Simple Reactivity Model
- On each cycle (1/n)th of the fuel is replaced
- Each fuel batch experiences a discharge burnup of Bd
- Each fuel batch on each cycle experiences a burnup of Bd/n
- $k_{reactor}$ is the uncontrolled multiplication factor (excess reactivity)
- $k_i$ is the infinite multiplication factor of a fuel batch (excess reactivity)
Each batch of fuel will have a different burn-up and $k_i(B)$ since each batch has been in the reactor a different length of time. The reactivity of the reactor is found by summing over the reactivities of all the batches of fuel, for n batches:
\begin{align}
k_{reactor} = \frac{1}{n}\sum_{i=1}^{n}k_i(B)
\end{align}
\begin{align}
k_i(B) = k_0 - \alpha B_n
\end{align}
- $k_0$ is the uncontrolled infinite multiplication factor of the fuel batch when it is fresh.
- $B_n$ is the burnup of the batch in a single cycle. The n refers to the number of batches that the reload scheme includes.
- $\alpha$ is a constant of proportionality with units of 1/Bn. Uniform linear depletion.
- $k_F$ is the uncontrolled infinite multiplication factor necessary to sustain a chain reaction at the end of an operating cycle
```python
def ki(k0, alpha, b):
return k0 - alpha*b
def k(ki, n):
return (1/n)*np.sum(ki)
n=0
k0 =4.5
alpha = (k0 - 1)/20000
bu = np.arange(0, 50000., 1000.)
plt.plot(bu, ki(k0, alpha, bu))
plt.plot(bu, np.zeros(bu.shape), color='r')
plt.ylabel(r'$k_i(B)$')
plt.xlabel(r'$B$')
plt.title('Excess Reactivity Using Linear Depletion Model')
```
This approximation is somewhat accurate and gives an intuition for the impact of reloading on excess reactivity in the core.
## Single Cycle Refuelling
\begin{align}
k_{reactor} = k_1(B_1)
\end{align}
\begin{align}
k_1(B_1) = k_0 - \alpha B_1
\end{align}
Therefore the fuel burnup capability is:
\begin{align}
B_1 &= \frac{k_0-k_F}{\alpha}
\end{align}
## Two Cycle Refuelling
At the end of each cycle one batch of fuel has been burned for one cycle and the other batch has been burned for two cycles. Thus:
\begin{align}
k_F &= \frac{k_0 - \alpha B_2}{2} + \frac{k_0 - 2\alpha B_2}{2}\\
&= k_0 - \frac{3\alpha B_2}{2}\\
B_2 &= \frac{2(k_0 - k_F)}{3\alpha}\\
&= \frac{2}{3}B_1
\end{align}
- Each batch in the two cycle reload scheme is burned for $2B_2$.
So, in terms of the single cycle reload burnup:
\begin{align}
2B_2 &= 2\left(\frac{2}{3}B_1\right)\\
&= \frac{4}{3}B_1\\
\end{align}
**This means there is 1/3 more burnup in the two cycle reload, for the same initial and final multiplication factors $k_0$ and $k_F$ (exactly the same fuel.)**
## N Cycle Reload Scheme
The relation between end-of-cycle core multiplication factor kF and the fresh fuel batch infinite multiplication factor k0 and the batch burnup in general is
\begin{align}
k_F &= k_0 - \frac{1}{n}\sum_{i=1}^{n}i\alpha B_n\\
\end{align}
Recall from your geometric series intution:
\begin{align}
\sum_{i=1}^{n}i &= \frac{n(n + 1)}{2}\\
\end{align}
Therefore:
\begin{align}
k_F &= k_0 - \left(\frac{n + 1}{2}\right)\alpha B_n\\
\end{align}
The batch burnup in a single cycle is then the result of solving for $B_n$:
\begin{align}
B_n &= \frac{2(k_0 - k_F)}{\alpha(n + 1)}
\end{align}
The discharge burnup of batch n, is the batch burnup in a cycle times the number of cycles:
\begin{align}
B_n^d &= nB_n\\
&= \frac{2n(k_0 - k_F)}{\alpha(n + 1)}\\
&= \left(\frac{2n}{n + 1}\right)\frac{k_0 - k_F}{\alpha} \\
&= \left(\frac{2n}{n + 1}\right)B_1 \\
\end{align}
```python
def bd(n, b1):
num = 2*n*b1
denom = n+1
return num/denom
b1 = 12000
n = np.arange(1,50)
plt.plot(n, bd(n, b1))
```
### Discussion: What is the primary drawback of many batches per core?
## Fuel Loading Patterns
Various fuel loading patterns are used to acheive improved fuel utilization (higher burnup), better core control, and lower leakage to the pressure vessel.
## Many and $\infty$ Batch Reactor Designs
Infinite batch refuelling (a.k.a. online refuelling) is possible in liquid fuelled cores with online reprocessing.
What exactly is a pebble core, then, in terms of batches?
<center>Aufiero, 2016</center>
## Determining Burnup
- Direct methods occur while the fuel is still in the core (using ion chambers and in-core flux probes)
- Indirect methods use measurements of activity after the fuel has been removed.
\begin{align}
BU &= a + bA(^{137}Cs)\\
BU &= c(e, r) + d(e, r) \left[A(^{134}Cs)/A(^{137}Cs)\right]\\
BU &= a\cdot exp{\left[b\cdot ln\left(\frac{A(^{106}Ru)A(^{137}Cs)}{[A(^{134}Cs)^2}\right)\right]}\\
a, b, c, d &= \mbox{calibration constants}\\
e &= \mbox{enrichment}\\
r &= \mbox{power rating}
\end{align}
```python
```
|
theory T117
imports Main
begin
lemma "(
(\<forall> x::nat. \<forall> y::nat. meet(x, y) = meet(y, x)) &
(\<forall> x::nat. \<forall> y::nat. join(x, y) = join(y, x)) &
(\<forall> x::nat. \<forall> y::nat. \<forall> z::nat. meet(x, meet(y, z)) = meet(meet(x, y), z)) &
(\<forall> x::nat. \<forall> y::nat. \<forall> z::nat. join(x, join(y, z)) = join(join(x, y), z)) &
(\<forall> x::nat. \<forall> y::nat. meet(x, join(x, y)) = x) &
(\<forall> x::nat. \<forall> y::nat. join(x, meet(x, y)) = x) &
(\<forall> x::nat. \<forall> y::nat. \<forall> z::nat. mult(x, join(y, z)) = join(mult(x, y), mult(x, z))) &
(\<forall> x::nat. \<forall> y::nat. \<forall> z::nat. mult(join(x, y), z) = join(mult(x, z), mult(y, z))) &
(\<forall> x::nat. \<forall> y::nat. \<forall> z::nat. meet(x, over(join(mult(x, y), z), y)) = x) &
(\<forall> x::nat. \<forall> y::nat. \<forall> z::nat. meet(y, undr(x, join(mult(x, y), z))) = y) &
(\<forall> x::nat. \<forall> y::nat. \<forall> z::nat. join(mult(over(x, y), y), x) = x) &
(\<forall> x::nat. \<forall> y::nat. \<forall> z::nat. join(mult(y, undr(y, x)), x) = x) &
(\<forall> x::nat. \<forall> y::nat. \<forall> z::nat. undr(x, join(y, z)) = join(undr(x, y), undr(x, z))) &
(\<forall> x::nat. \<forall> y::nat. \<forall> z::nat. over(x, meet(y, z)) = join(over(x, y), over(x, z))) &
(\<forall> x::nat. \<forall> y::nat. \<forall> z::nat. undr(meet(x, y), z) = join(undr(x, z), undr(y, z))) &
(\<forall> x::nat. \<forall> y::nat. invo(join(x, y)) = meet(invo(x), invo(y))) &
(\<forall> x::nat. \<forall> y::nat. invo(meet(x, y)) = join(invo(x), invo(y))) &
(\<forall> x::nat. invo(invo(x)) = x)
) \<longrightarrow>
(\<forall> x::nat. \<forall> y::nat. \<forall> z::nat. over(join(x, y), z) = join(over(x, z), over(y, z)))
"
nitpick[card nat=4,timeout=86400]
oops
end |
module Heatwave
using ..Ahorn, Maple
placements = Maple.Heatwave
function Ahorn.canFgBg(effect::Maple.Heatwave)
return true, true
end
end |
module Util
import Data.Config
import Data.Fuel
import Data.List
import Data.String
import Text.PrettyPrint.Prettyprinter
import Text.PrettyPrint.Prettyprinter.Render.Terminal
%default total
||| Render with or without color based on configuration
export
renderString : Config => Doc AnsiStyle -> String
renderString @{config} =
renderString . layoutPretty defaultLayoutOptions . if config.colors then id else unAnnotate
export
getManyLines : HasIO io => Fuel -> io (List String)
getManyLines = getMoreLines []
where
getMoreLines : (acc : List String) -> Fuel -> io (List String)
getMoreLines acc Dry = pure (reverse acc)
getMoreLines acc (More fuel) =
do line <- trim <$> getLine
-- stop collecting lines on second blank line.
case (acc, line) of
("" :: rest, "") => pure (reverse rest)
_ => getMoreLines (line :: acc) fuel
export
parseJiraPrefix : String -> Maybe String
parseJiraPrefix = map (pack . reverse) . guardSuccess . foldl go startOver . unpack
where
data Part = Start | Proj | Dash | Num | End
startOver : (Part, List Char)
startOver = (Start, [])
guardSuccess : (Part, List Char) -> Maybe (List Char)
guardSuccess (Num, y) = Just y
guardSuccess (End, y) = Just y
guardSuccess _ = Nothing
go : (Part, List Char) -> Char -> (Part, List Char)
-- start off looking for alpha characters that are a Jira Project slug.
go (Start, cs) c = if isAlpha c then (Proj, c :: cs) else startOver
-- if you've found alpha characters, keep an eye out for a dash.
go (Proj , cs) '-' = (Dash, '-' :: cs)
-- continue parsing alpha until you find the aforementioned dash.
-- start over if you find something else.
go (Proj , cs) c = if isAlpha c then (Proj, c :: cs) else startOver
-- we expect a number after a dash or else we start over.
go (Dash , cs) c = if isDigit c then (Num, c :: cs) else startOver
-- now we expect numbers until we reach the end of the prefix.
go (Num , cs) c = if isDigit c then (Num, c :: cs) else (End, cs)
-- once we are done, we just ignore the remaining characters.
go (End , cs) c = (End, cs)
|
(* Title: JinjaThreads/Framework/FWCondAction.thy
Author: Andreas Lochbihler
*)
section \<open>Semantics of the thread actions for purely conditional purpose such as Join\<close>
theory FWCondAction
imports
FWState
begin
locale final_thread =
fixes final :: "'x \<Rightarrow> bool"
begin
primrec cond_action_ok :: "('l,'t,'x,'m,'w) state \<Rightarrow> 't \<Rightarrow> 't conditional_action \<Rightarrow> bool" where
"\<And>ln. cond_action_ok s t (Join T) =
(case thr s T of None \<Rightarrow> True | \<lfloor>(x, ln)\<rfloor> \<Rightarrow> t \<noteq> T \<and> final x \<and> ln = no_wait_locks \<and> wset s T = None)"
| "cond_action_ok s t Yield = True"
primrec cond_action_oks :: "('l,'t,'x,'m,'w) state \<Rightarrow> 't \<Rightarrow> 't conditional_action list \<Rightarrow> bool" where
"cond_action_oks s t [] = True"
| "cond_action_oks s t (ct#cts) = (cond_action_ok s t ct \<and> cond_action_oks s t cts)"
lemma cond_action_oks_append [simp]:
"cond_action_oks s t (cts @ cts') \<longleftrightarrow> cond_action_oks s t cts \<and> cond_action_oks s t cts'"
by(induct cts, auto)
lemma cond_action_oks_conv_set:
"cond_action_oks s t cts \<longleftrightarrow> (\<forall>ct \<in> set cts. cond_action_ok s t ct)"
by(induct cts) simp_all
lemma cond_action_ok_Join:
"\<And>ln. \<lbrakk> cond_action_ok s t (Join T); thr s T = \<lfloor>(x, ln)\<rfloor> \<rbrakk> \<Longrightarrow> final x \<and> ln = no_wait_locks \<and> wset s T = None"
by(auto)
lemma cond_action_oks_Join:
"\<And>ln. \<lbrakk> cond_action_oks s t cas; Join T \<in> set cas; thr s T = \<lfloor>(x, ln)\<rfloor> \<rbrakk>
\<Longrightarrow> final x \<and> ln = no_wait_locks \<and> wset s T = None \<and> t \<noteq> T"
by(induct cas)(auto)
lemma cond_action_oks_upd:
assumes tst: "thr s t = \<lfloor>xln\<rfloor>"
shows "cond_action_oks (locks s, ((thr s)(t \<mapsto> xln'), shr s), wset s, interrupts s) t cas = cond_action_oks s t cas"
proof(induct cas)
case Nil thus ?case by simp
next
case (Cons ca cas)
from tst have eq: "cond_action_ok (locks s, ((thr s)(t \<mapsto> xln'), shr s), wset s, interrupts s) t ca = cond_action_ok s t ca"
by(cases ca) auto
with Cons show ?case by(auto simp del: fun_upd_apply)
qed
lemma cond_action_ok_shr_change:
"cond_action_ok (ls, (ts, m), ws, is) t ct \<Longrightarrow> cond_action_ok (ls, (ts, m'), ws, is) t ct"
by(cases ct) auto
primrec cond_action_ok' :: "('l,'t,'x,'m,'w) state \<Rightarrow> 't \<Rightarrow> 't conditional_action \<Rightarrow> bool"
where
"cond_action_ok' _ _ (Join t) = True"
| "cond_action_ok' _ _ Yield = True"
primrec cond_action_oks' :: "('l,'t,'x,'m,'w) state \<Rightarrow> 't \<Rightarrow> 't conditional_action list \<Rightarrow> bool" where
"cond_action_oks' s t [] = True"
| "cond_action_oks' s t (ct#cts) = (cond_action_ok' s t ct \<and> cond_action_oks' s t cts)"
lemma cond_action_oks'_append [simp]:
"cond_action_oks' s t (cts @ cts') \<longleftrightarrow> cond_action_oks' s t cts \<and> cond_action_oks' s t cts'"
by(induct cts, auto)
lemma cond_action_oks'_subset_Join:
"set cts \<subseteq> insert Yield (range Join) \<Longrightarrow> cond_action_oks' s t cts"
apply(induct cts)
apply(auto)
done
end
definition collect_cond_actions :: "'t conditional_action list \<Rightarrow> 't set" where
"collect_cond_actions cts = {t. Join t \<in> set cts}"
declare collect_cond_actions_def [simp]
lemma cond_action_ok_final_change:
"\<lbrakk> final_thread.cond_action_ok final1 s1 t ca;
\<And>t. thr s1 t = None \<longleftrightarrow> thr s2 t = None;
\<And>t x1. \<lbrakk> thr s1 t = \<lfloor>(x1, no_wait_locks)\<rfloor>; final1 x1; wset s1 t = None \<rbrakk>
\<Longrightarrow> \<exists>x2. thr s2 t = \<lfloor>(x2, no_wait_locks)\<rfloor> \<and> final2 x2 \<and> ln2 = no_wait_locks \<and> wset s2 t = None \<rbrakk>
\<Longrightarrow> final_thread.cond_action_ok final2 s2 t ca"
apply(cases ca)
apply(fastforce simp add: final_thread.cond_action_ok.simps)+
done
lemma cond_action_oks_final_change:
assumes major: "final_thread.cond_action_oks final1 s1 t cas"
and minor: "\<And>t. thr s1 t = None \<longleftrightarrow> thr s2 t = None"
"\<And>t x1. \<lbrakk> thr s1 t = \<lfloor>(x1, no_wait_locks)\<rfloor>; final1 x1; wset s1 t = None \<rbrakk>
\<Longrightarrow> \<exists>x2. thr s2 t = \<lfloor>(x2, no_wait_locks)\<rfloor> \<and> final2 x2 \<and> ln2 = no_wait_locks \<and> wset s2 t = None"
shows "final_thread.cond_action_oks final2 s2 t cas"
using major
by(induct cas)(auto simp add: final_thread.cond_action_oks.simps intro: cond_action_ok_final_change[OF _ minor])
end
|
function WriteLDRStack(stack, name, format)
%
% WriteLDRStack(dir_name, format)
%
% This function writes an LDR stack into a set of LDR images
%
% Input:
% -stack: the stack to be written
% -name: the folder name where the stack is stored.
% -format: an LDR format for reading LDR images.
%
% Output:
% -exposure: a stack of exposure values from images in dir_name
%
% Copyright (C) 2019 Francesco Banterle
%
% This program is free software: you can redistribute it and/or modify
% it under the terms of the GNU General Public License as published by
% the Free Software Foundation, either version 3 of the License, or
% (at your option) any later version.
%
% This program is distributed in the hope that it will be useful,
% but WITHOUT ANY WARRANTY; without even the implied warranty of
% MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
% GNU General Public License for more details.
%
% You should have received a copy of the GNU General Public License
% along with this program. If not, see <http://www.gnu.org/licenses/>.
%
n = size(stack, 4);
for i=1:n
imwrite(stack(:,:,:,i), [name, '_', num2str(100000 + i), '.', format]);
end
end |
I’ve never applied for a passport, because until now, I didn’t really feel I needed one. With no plans to travel outside my home country, it seemed just a hollow gesture to apply. In this way, I am not unlike millions of people who go through life without a passport, since they feel the same way I do. This is a perfect analogy of life in general, and life after death specifically. Some people are not interested in having another life after this one, they don’t wonder about the future, they see no need in believing it’s true, because they don’t intend to go there. Ignorance is not bliss.
Believing in the “hereafter” in many ways requires a spiritual passport. A person has to first become convinced there is a life in the hereafter, and that they want to go there. This step alone is a tiny act of faith, since none of the current world religious systems offer any clear evidence of another life after this one, except Christianity. The Case for Christ movie begins next week, and in my opinion is a must-see for every person wanting more information about why Christianity’s claims are true. Here’s my shortened version.
God offers every person a passport to the life He designed for us to have, with a place for us to live in constant community with Him. The passport fees have already been paid by His Son, Jesus. The passport is issued with a NEW NAME, because we are going to a NEW COUNTRY.
He who has an ear, let him hear what the Spirit says to the churches. To him who overcomes, to him I will give some of the hidden manna, and I will give him a white stone, and a new name written on the stone which no one knows but he who receives it.
He who overcomes, I will make him a pillar in the temple of My God, and he will not go out from it anymore; and I will write on him the name of My God, and the name of the city of My God, the new Jerusalem, which comes down out of heaven from My God, and My new name.
Old Testament patriarchs, kings, and prophets all new the truth of this place and this passport found in a person called Messiah. David and Isaiah wrote about God’s provision for His people.
But now, thus says the LORD, your Creator, O Jacob, and He who formed you, O Israel, “Do not fear, for I have redeemed you; I have called you by name; you are Mine! When you pass through the waters, I will be with you; and through the rivers, they will not overflow you. When you walk through the fire, you will not be scorched, nor will the flame burn you.
These Old & New Testament verses help us understand that God has made a provision to take every believer to a newly created place that was very much like Eden, yet entrance is attainable only through a specific faith statement. Jesus gave us the specific criteria for our application on this spiritual passport.
This kingdom of God, the next life after this one, is described in at least a few details, in the last book of the Bible, the Book of Revelation. Revelation 21 describes a new heaven, a new earth, and a new Jerusalem. Revelation 22 describes two attributes of this new earth, that sustain life for all who dwell there: the River of Life, and the Tree of Life. In the last few verses of the last book in the Bible, Jesus gives a very interesting command to John, who is recording all this information for us.
And he said to me, “Do not seal up the words of the prophecy of this book, for the time is near. Let the one who does wrong, still do wrong; and the one who is filthy, still be filthy; and let the one who is righteous, still practice righteousness; and the one who is holy, still keep himself holy. Behold, I am coming quickly, and My reward is with Me, to render to every man according to what he has done. I am the Alpha and the Omega, the first and the last, the beginning and the end.
This one phrase “do not seal up” means that the main message and theme of the Book of Revelation can be read, and understood, by those who seek truth regarding the consummation of human history and what happens when this life is over. The spiritual passport that God makes available to every human is clearly seen in this book. The hope of an afterlife that is real and desired is clearly seen in this book. If you would like to know more, there is a bible study book available by clicking here. Want hope for the afterlife? Find it in Jesus! |
State Before: S : Type ?u.171562
R : Type u_1
R₁ : Type ?u.171568
M : Type u_2
inst✝³ : Ring R
inst✝² : CommRing R₁
inst✝¹ : AddCommGroup M
inst✝ : Module R M
Q : QuadraticForm R M
x y y' : M
⊢ polar (↑Q) x (y - y') = polar (↑Q) x y - polar (↑Q) x y' State After: no goals Tactic: rw [sub_eq_add_neg, sub_eq_add_neg, polar_add_right, polar_neg_right] |
/**
Copyright © 2018 ebio lab. SNU. All Rights Reserved.
@file sssampledriver.cpp
@date 2018-03-22, JoonYong
@author Kim, JoonYong <[email protected]>
This file is for server-side sample driver.
refer from: https://github.com/ebio-snu/cvtdriver
*/
#include <iostream>
#include <sstream>
#include <string>
#include <queue>
#include <boost/config.hpp>
#include <glog/logging.h>
#include <mysql_connection.h>
#include <cppconn/driver.h>
#include <cppconn/exception.h>
#include <cppconn/resultset.h>
#include <cppconn/statement.h>
#include <cppconn/prepared_statement.h>
#include "../spec/cvtdevice.h"
#include "../spec/cvtdriver.h"
using namespace std;
using namespace stdcvt;
namespace ebiodriver {
// 센서에 기능을 추가하고 싶을때 사용할 수 있다는 예시
// CvtSensor 는 id로 문자열을 처리하지만, SSSensor는 숫자로 처리함
// 실제로 사용하지는 않음
class SSSensor : public CvtSensor {
public:
SSSensor(int devid, stdcvt::devtype_t devtype, stdcvt::devsec_t section,
stdcvt::devtarget_t target, stdcvt::devstat_t devstatus, stdcvt::obsunit_t unit)
: stdcvt::CvtSensor (to_string(devid), devtype, section, target, devstatus, unit) {
}
};
// 모터에 기능을 추가하고 싶을때 사용할 수 있다는 예시
// CvtMotor 는 id로 문자열을 처리하지만, SSMotor는 숫자로 처리함
// 실제로 사용하지는 않음
class SSMotor : public CvtMotor{
public:
SSMotor(int devid, stdcvt::devtype_t devtype, stdcvt::devsec_t section,
stdcvt::devtarget_t target, stdcvt::devstat_t devstatus)
: stdcvt::CvtMotor(to_string(devid), devtype, section, target, devstatus) {
}
};
// 스위치에 기능을 추가하고 싶을때 사용할 수 있다는 예시
// CvtActuator는 id로 문자열을 처리하지만, SSSwitch는 숫자로 처리함
// 실제로 사용하지는 않음
class SSSwitch : public CvtActuator {
public:
SSSwitch(int devid, stdcvt::devtype_t devtype, stdcvt::devsec_t section,
stdcvt::devtarget_t target, stdcvt::devstat_t devstatus)
: stdcvt::CvtActuator(to_string(devid), devtype, section, target, devstatus) {
}
};
class SSSampleDriver : public CvtDriver {
private:
int _lastcmdid; ///< 명령아이디 시퀀스
queue<CvtCommand *> _cmdq; ///< 명령 큐
vector<CvtDevice *> _devvec; ///< 디바이스 벡터
string _host; ///< 디비 호스트
string _user; ///< 디비 사용자
string _pass; ///< 디비 사용자 암호
string _db; ///< 디비명
sql::Driver *_driver; ///< 디비 드라이버
sql::Connection *_con; ///< 디비 연결자
public:
/**
새로운 SS드라이버를 생성한다.
*/
SSSampleDriver() : stdcvt::CvtDriver (2001, 100) {
_lastcmdid = 0;
_host = _user = _pass = _db = "";
updated(); // 샘플 SS드라이버는 직접 통신을 수행하지 않기 때문에, 테스트 통과를 위해서 넣음.
}
~SSSampleDriver () {
}
/**
드라이버 제작자가 부여하는 버전번호를 확인한다.
@return 문자열 형식의 버전번호
*/
string getversion () {
return "V0.1.0";
}
/**
드라이버 제작자가 부여하는 모델번호를 확인한다.
@return 문자열 형식의 모델번호
*/
string getmodel () {
return "ebioss_v1";
}
/**
드라이버 제조사명을 확인한다.
컨버터에서는 제조사명을 로깅용도로만 사용한다.
@return 문자열 형식의 제조사명
*/
string getcompany () {
return "EBIO lab. SNU.";
}
/**
드라이버를 초기화 한다. 드라이버 동작을 위한 option 은 key-value 형식으로 전달된다.
@param option 드라이버동작을 위한 옵션
@return 초기화 성공 여부
*/
bool initialize (CvtOption option) {
LOG(INFO) << "SSSampleDriver initialized.";
_host = option.get("host");
_user = option.get("user");
_pass = option.get("pass");
_db = option.get("db");
try {
_driver = get_driver_instance();
_con = _driver->connect(_host, _user, _pass);
_con->setSchema(_db);
} catch (sql::SQLException &e) {
LOG(ERROR) << "# open ERR: SQLException " << e.what()
<< " (MySQL error code: " << e.getErrorCode()
<< ", SQLState: " << e.getSQLState() << " )";
return false;
}
try {
sql::Statement *stmt;
stmt = _con->createStatement();
stmt->execute ("TRUNCATE TABLE commands");
delete stmt;
} catch (sql::SQLException &e) {
LOG(ERROR) << "# Truncate commands ERR: SQLException " << e.what()
<< " (MySQL error code: " << e.getErrorCode()
<< ", SQLState: " << e.getSQLState() << " )";
return false;
}
return true;
}
/**
드라이버를 종료한다.
@return 종료 성공 여부
*/
bool finalize () {
delete _con;
LOG(INFO) << "SSSampleDriver finalized.";
return true;
}
/**
드라이버간 상태교환을 하기전에 호출되는 메소드로 전처리를 수행한다.
@return 전처리 성공 여부
*/
bool preprocess () {
//read command from DB
try {
sql::PreparedStatement *prepstmt;
sql::ResultSet *res;
CvtCommand *pcmd;
prepstmt = _con->prepareStatement("SELECT id, devtype, section, target, onoff, ratio from commands where id > ?");
prepstmt->setInt(1, _lastcmdid);
res = prepstmt->executeQuery();
while (res->next()) {
if (CvtDevice::getgroup((devtype_t)res->getInt(2)) == DG_MOTOR) {
CvtDeviceSpec tmpspec((devtype_t)res->getInt(2),
(devsec_t)res->getInt64(3), (devtarget_t)res->getInt(4));
pcmd = new CvtRatioCommand (res->getInt(1), &tmpspec,
(res->getInt(5) > 0) ? true: false, res->getDouble(6));
} else if (res->getInt(2) / 10000 == DG_SWITCH) {
CvtDeviceSpec tmpspec((devtype_t)res->getInt(2),
(devsec_t)res->getInt64(3), (devtarget_t)res->getInt(4));
pcmd = new CvtCommand (res->getInt(1), &tmpspec,
res->getInt(5)>0? true: false);
} else {
continue;
}
_lastcmdid = pcmd->getid ();
_cmdq.push (pcmd);
}
delete res;
delete prepstmt;
} catch (sql::SQLException &e) {
LOG(ERROR) << "# command select ERR: SQLException " << e.what()
<< " (MySQL error code: " << e.getErrorCode()
<< ", SQLState: " << e.getSQLState() << " )";
return false;
}
updated(); // 샘플 SS드라이버는 직접 통신을 수행하지 않기 때문에, 테스트 통과를 위해서 넣음.
return true;
}
/**
드라이버간 상태교환이 이루어진 이후에 호출되는 메소드로 후처리를 수행한다.
@return 후처리 성공 여부
*/
bool postprocess () {
//write observation to DB
try {
sql::Statement *stmt;
stmt = _con->createStatement();
stmt->execute ("TRUNCATE TABLE devices");
delete stmt;
} catch (sql::SQLException &e) {
LOG(ERROR) << "# Truncate devices ERR: SQLException " << e.what()
<< " (MySQL error code: " << e.getErrorCode()
<< ", SQLState: " << e.getSQLState() << " )";
return false;
}
try {
sql::PreparedStatement *prepstmt;
prepstmt = _con->prepareStatement(
"INSERT INTO devices(id, devtype, section, target, status, value, unit)"
" VALUES (?, ?, ?, ?, ?, ?, ?)");
for (vector<CvtDevice *>::size_type i = 0; i < _devvec.size(); ++i) {
prepstmt->setString(1, _devvec[i]->getid());
prepstmt->setInt(2, (_devvec[i]->getspec())->gettype());
prepstmt->setInt64(3, (_devvec[i]->getspec())->getsection());
prepstmt->setInt(4, (_devvec[i]->getspec())->gettarget());
prepstmt->setInt(5, _devvec[i]->getstatus());
if (CvtMotor *pmotor= dynamic_cast<CvtMotor *>(_devvec[i])) { // younger first
LOG(INFO) << "motor : " << pmotor->tostring();
LOG(INFO) << "motor current : " << pmotor->getcurrent();
prepstmt->setDouble(6, pmotor->getcurrent ());
prepstmt->setInt(7, OU_NONE);
} else if (CvtActuator *pactuator = dynamic_cast<CvtActuator *>(_devvec[i])) {
prepstmt->setDouble(6, 0);
prepstmt->setInt(7, OU_NONE);
} else if (CvtSensor *psensor = dynamic_cast<CvtSensor *>(_devvec[i])) {
prepstmt->setDouble(6, psensor->readobservation ());
prepstmt->setInt(7, psensor->getunit ());
}
prepstmt->execute ();
delete _devvec[i];
}
_con->commit ();
delete prepstmt;
_devvec.clear ();
} catch (sql::SQLException &e) {
LOG(ERROR) << "# Insert devices ERR: SQLException " << e.what()
<< " (MySQL error code: " << e.getErrorCode()
<< ", SQLState: " << e.getSQLState() << " )";
return false;
}
return true;
}
/**
드라이버가 관리하고 있는 장비의 포인터를 꺼내준다.
@param index 얻고자 하는 장비의 인덱스 번호. 0에서 시작한다.
@return 인덱스에 해당하는 장비의 포인터. NULL 이라면 이후에 장비가 없다는 의미이다.
*/
CvtDevice *getdevice(int index) {
return nullptr;
}
/**
전달된 장비의 정보를 획득한다.
다른 드라이버의 장비정보를 입력해주기 위해 컨버터가 호출한다.
@param pdevice 다른 드라이버의 장비 포인터
@return 성공여부. 관심이 없는 장비인 경우라도 문제가 없으면 true를 리턴한다.
*/
bool sharedevice(CvtDevice *pdevice) {
CvtDevice *newdev = pdevice->clone ();
_devvec.push_back (newdev);
return true;
}
/**
다른 드라이버가 관리하고 있는 장비를 제어하고자 할때 명령을 전달한다.
명령을 전달하지 않는 드라이버라면 그냥 NULL을 리턴하도록 만들면 된다.
@return 인덱스에 해당하는 명령의 포인터. NULL 이라면 이후에 명령이 없다는 의미이다.
*/
CvtCommand *getcommand() {
if (_cmdq.empty ())
return nullptr;
CvtCommand *pcmd = _cmdq.front();
_cmdq.pop ();
return pcmd;
}
/**
다른 드라이버로부터 명령을 받아 처리한다.
@param pcmd 명령에 대한 포인터
@return 실제 명령의 처리 여부가 아니라 명령을 수신했는지 여부이다. 해당 명령을 실행할 장비가 없다면 false이다.
*/
bool control(CvtCommand *pcmd) {
return false;
}
};
extern "C" BOOST_SYMBOL_EXPORT SSSampleDriver plugin;
SSSampleDriver plugin;
} // namespace ebiodriver
|
(** * Sorted types. *)
(** Gianluca Amato, Marco Maggesi, Cosimo Perini Brogi 2019-2021 *)
(*
This file contains a formalization of _sorted types_, i.e. types indexed by elements of another
type, called _index type_. Notation and terminologies are inspired by Wolfgang Wechler,
_Universal Algebra for Computer Scientist_, Springer.
*)
Require Import UniMath.Foundations.All.
Require Export UniMath.Combinatorics.MoreLists.
Require Export UniMath.Algebra.Universal.HVectors.
Declare Scope sorted_scope.
Delimit Scope sorted_scope with sorted.
Local Open Scope sorted_scope.
(** An element of [sUU S] is an [S]-sorted type, i.e., an [S]-indexed family of types. *)
Definition sUU (S: UU): UU := S → UU.
(** If [X] and [Y] are [S]-sorted types, then [sfun X Y] is an [S]-sorted mapping, i.e.,
a [S]-indexed family of functions [X s → Y s]. *)
Definition sfun {S: UU} (X Y: sUU S): UU := ∏ s: S, X s → Y s.
Notation "x s→ y" := (sfun x y) (at level 99, y at level 200, right associativity): type_scope.
Bind Scope sorted_scope with sUU.
Bind Scope sorted_scope with sfun.
Definition idsfun {S: UU} (X: sUU S): X s→ X := λ s: S, idfun (X s).
Definition scomp {S: UU} {X Y Z: sUU S} (f: Y s→ Z) (g: X s→ Y): sfun X Z
:= λ s: S, (f s) ∘ (g s).
Infix "s∘" := scomp (at level 40, left associativity): sorted_scope.
Definition sunit (S: UU): sUU S := λ σ: S, unit.
Definition tosunit {S: UU} {X: sUU S}: X s→ sunit S := λ σ: S, tounit.
Lemma iscontr_sfuntosunit {S: UU} {X: sUU S}: iscontr (X s→ sunit S).
Proof.
apply impred_iscontr.
intros.
apply iscontrfuntounit.
Defined.
(** An element of [shSet S] is an [S]-sorted set, i.e., an [S]-indexed family of sets. It can be
immediately coerced to an [S]-sorted type. *)
Definition shSet (S: UU): UU := S → hSet.
Definition sunitset (S: UU): shSet S := λ _, unitset.
Lemma isaset_set_sfun_space {S: UU} {X: sUU S} {Y: shSet S}: isaset (X s→ Y).
Proof.
change (isaset (X s→ Y)).
apply impred_isaset.
intros.
apply isaset_forall_hSet.
Defined.
(** If [X: sUU S], then [star X] is the lifting of [X] to the index type [list S], given
by [star X] [s1; s2; ...; sn] = [X s1 ; X s2 ; ... ; X sn]. *)
Definition star {S: UU} (X: sUU S): sUU (list S) := λ l: list S, hvec (vec_map X (pr2 l)).
Bind Scope hvec_scope with star.
Notation "A ⋆" := (star A) (at level 3, format "'[ ' A '⋆' ']'"): sorted_scope.
(** If [f] is an indexed mapping between [S]-indexed types [X] and [Y], then [starfun X] is the lifting of
[f] to a [list S]-indexed mapping between [list S]-indexed sets [star X] and [star Y].
*)
Definition starfun {S: UU} {X Y: sUU S} (f: sfun X Y) : sfun X⋆ Y⋆ := λ s: list S, h1map f.
Notation "f ⋆⋆" := (starfun f) (at level 3, format "'[ ' f '⋆⋆' ']'"): sorted_scope.
(** Here follows the proof that [starfun] is functorial. Compositionality w.r.t. [s∘] is presented as
[(f s∘ g)⋆⋆ _ x = f⋆⋆ _ (g⋆⋆ _ x)] instead of [(f s∘ g)⋆⋆ = (f⋆⋆) s∘ (g⋆⋆ )] since the former
does not require function extensionality. *)
Lemma staridfun {S: UU} {X: sUU S} (l: list S) (x: X⋆ l): (idsfun X)⋆⋆ _ x = idsfun X⋆ _ x.
Proof.
apply h1map_idfun.
Defined.
Lemma starcomp {S: UU} {X Y Z: sUU S} (f: Y s→ Z) (g: X s→ Y) (l: list S) (x: X⋆ l)
: (f s∘ g)⋆⋆ _ x = f⋆⋆ _ (g⋆⋆ _ x).
Proof.
unfold starfun.
apply pathsinv0.
apply h1map_compose.
Defined.
|
"""Fluid properties, geometric and kinematic parameters."""
import numpy
# Parameters.
D = 1.0 # sphere diameter
# Kinematic parameters.
Am = 0.125 * D # oscillation amplitude
Um = 1.0 # maximum translation velocity
# Fluid properties.
Re = 78.54 # Reynolds number
nu = Um * D / Re # kinematic viscosity
rho = 1.0 # density
# Temporal parameters.
St = 1.2732 # Strouhal number
f = St * Um / D # oscillation frequency
T = 1 / f # time period
# Simulation parameters.
dt = 0.00157 * D / Um # time-step size
n_periods = 5 # number of periods
tf = n_periods * T # final time
nt_period = 500 # number of time steps per period
nt = n_periods * nt_period # number of time steps
dt2 = tf / nt # time-step size
if __name__ == '__main__':
print(locals())
|
State Before: 𝕜 : Type u_1
inst✝¹⁰ : NontriviallyNormedField 𝕜
D✝ : Type uD
inst✝⁹ : NormedAddCommGroup D✝
inst✝⁸ : NormedSpace 𝕜 D✝
E : Type uE
inst✝⁷ : NormedAddCommGroup E
inst✝⁶ : NormedSpace 𝕜 E
F : Type uF
inst✝⁵ : NormedAddCommGroup F
inst✝⁴ : NormedSpace 𝕜 F
G : Type uG
inst✝³ : NormedAddCommGroup G
inst✝² : NormedSpace 𝕜 G
X : Type ?u.4322453
inst✝¹ : NormedAddCommGroup X
inst✝ : NormedSpace 𝕜 X
s✝ s₁ t✝ u : Set E
f✝ f₁ : E → F
g✝ : F → G
x✝ x₀ : E
c : F
b : E × F → G
m n✝ : ℕ∞
p : E → FormalMultilinearSeries 𝕜 E F
g : F → G
f : E → F
n : ℕ
s : Set E
t : Set F
x : E
N : ℕ∞
hg : ContDiffOn 𝕜 N g t
hf : ContDiffOn 𝕜 N f s
hn : ↑n ≤ N
ht : UniqueDiffOn 𝕜 t
hs : UniqueDiffOn 𝕜 s
hst : MapsTo f s t
hx : x ∈ s
C D : ℝ
hC : ∀ (i : ℕ), i ≤ n → ‖iteratedFDerivWithin 𝕜 i g t (f x)‖ ≤ C
hD : ∀ (i : ℕ), 1 ≤ i → i ≤ n → ‖iteratedFDerivWithin 𝕜 i f s x‖ ≤ D ^ i
⊢ ‖iteratedFDerivWithin 𝕜 n (g ∘ f) s x‖ ≤ ↑n ! * C * D ^ n State After: 𝕜 : Type u_1
inst✝¹⁰ : NontriviallyNormedField 𝕜
D✝ : Type uD
inst✝⁹ : NormedAddCommGroup D✝
inst✝⁸ : NormedSpace 𝕜 D✝
E : Type uE
inst✝⁷ : NormedAddCommGroup E
inst✝⁶ : NormedSpace 𝕜 E
F : Type uF
inst✝⁵ : NormedAddCommGroup F
inst✝⁴ : NormedSpace 𝕜 F
G : Type uG
inst✝³ : NormedAddCommGroup G
inst✝² : NormedSpace 𝕜 G
X : Type ?u.4322453
inst✝¹ : NormedAddCommGroup X
inst✝ : NormedSpace 𝕜 X
s✝ s₁ t✝ u : Set E
f✝ f₁ : E → F
g✝ : F → G
x✝ x₀ : E
c : F
b : E × F → G
m n✝ : ℕ∞
p : E → FormalMultilinearSeries 𝕜 E F
g : F → G
f : E → F
n : ℕ
s : Set E
t : Set F
x : E
N : ℕ∞
hg : ContDiffOn 𝕜 N g t
hf : ContDiffOn 𝕜 N f s
hn : ↑n ≤ N
ht : UniqueDiffOn 𝕜 t
hs : UniqueDiffOn 𝕜 s
hst : MapsTo f s t
hx : x ∈ s
C D : ℝ
hC : ∀ (i : ℕ), i ≤ n → ‖iteratedFDerivWithin 𝕜 i g t (f x)‖ ≤ C
hD : ∀ (i : ℕ), 1 ≤ i → i ≤ n → ‖iteratedFDerivWithin 𝕜 i f s x‖ ≤ D ^ i
Fu : Type (max uF uG) := ULift F
⊢ ‖iteratedFDerivWithin 𝕜 n (g ∘ f) s x‖ ≤ ↑n ! * C * D ^ n Tactic: let Fu : Type max uF uG := ULift.{uG, uF} F State Before: 𝕜 : Type u_1
inst✝¹⁰ : NontriviallyNormedField 𝕜
D✝ : Type uD
inst✝⁹ : NormedAddCommGroup D✝
inst✝⁸ : NormedSpace 𝕜 D✝
E : Type uE
inst✝⁷ : NormedAddCommGroup E
inst✝⁶ : NormedSpace 𝕜 E
F : Type uF
inst✝⁵ : NormedAddCommGroup F
inst✝⁴ : NormedSpace 𝕜 F
G : Type uG
inst✝³ : NormedAddCommGroup G
inst✝² : NormedSpace 𝕜 G
X : Type ?u.4322453
inst✝¹ : NormedAddCommGroup X
inst✝ : NormedSpace 𝕜 X
s✝ s₁ t✝ u : Set E
f✝ f₁ : E → F
g✝ : F → G
x✝ x₀ : E
c : F
b : E × F → G
m n✝ : ℕ∞
p : E → FormalMultilinearSeries 𝕜 E F
g : F → G
f : E → F
n : ℕ
s : Set E
t : Set F
x : E
N : ℕ∞
hg : ContDiffOn 𝕜 N g t
hf : ContDiffOn 𝕜 N f s
hn : ↑n ≤ N
ht : UniqueDiffOn 𝕜 t
hs : UniqueDiffOn 𝕜 s
hst : MapsTo f s t
hx : x ∈ s
C D : ℝ
hC : ∀ (i : ℕ), i ≤ n → ‖iteratedFDerivWithin 𝕜 i g t (f x)‖ ≤ C
hD : ∀ (i : ℕ), 1 ≤ i → i ≤ n → ‖iteratedFDerivWithin 𝕜 i f s x‖ ≤ D ^ i
Fu : Type (max uF uG) := ULift F
⊢ ‖iteratedFDerivWithin 𝕜 n (g ∘ f) s x‖ ≤ ↑n ! * C * D ^ n State After: 𝕜 : Type u_1
inst✝¹⁰ : NontriviallyNormedField 𝕜
D✝ : Type uD
inst✝⁹ : NormedAddCommGroup D✝
inst✝⁸ : NormedSpace 𝕜 D✝
E : Type uE
inst✝⁷ : NormedAddCommGroup E
inst✝⁶ : NormedSpace 𝕜 E
F : Type uF
inst✝⁵ : NormedAddCommGroup F
inst✝⁴ : NormedSpace 𝕜 F
G : Type uG
inst✝³ : NormedAddCommGroup G
inst✝² : NormedSpace 𝕜 G
X : Type ?u.4322453
inst✝¹ : NormedAddCommGroup X
inst✝ : NormedSpace 𝕜 X
s✝ s₁ t✝ u : Set E
f✝ f₁ : E → F
g✝ : F → G
x✝ x₀ : E
c : F
b : E × F → G
m n✝ : ℕ∞
p : E → FormalMultilinearSeries 𝕜 E F
g : F → G
f : E → F
n : ℕ
s : Set E
t : Set F
x : E
N : ℕ∞
hg : ContDiffOn 𝕜 N g t
hf : ContDiffOn 𝕜 N f s
hn : ↑n ≤ N
ht : UniqueDiffOn 𝕜 t
hs : UniqueDiffOn 𝕜 s
hst : MapsTo f s t
hx : x ∈ s
C D : ℝ
hC : ∀ (i : ℕ), i ≤ n → ‖iteratedFDerivWithin 𝕜 i g t (f x)‖ ≤ C
hD : ∀ (i : ℕ), 1 ≤ i → i ≤ n → ‖iteratedFDerivWithin 𝕜 i f s x‖ ≤ D ^ i
Fu : Type (max uF uG) := ULift F
Gu : Type (max uF uG) := ULift G
⊢ ‖iteratedFDerivWithin 𝕜 n (g ∘ f) s x‖ ≤ ↑n ! * C * D ^ n Tactic: let Gu : Type max uF uG := ULift.{uF, uG} G State Before: 𝕜 : Type u_1
inst✝¹⁰ : NontriviallyNormedField 𝕜
D✝ : Type uD
inst✝⁹ : NormedAddCommGroup D✝
inst✝⁸ : NormedSpace 𝕜 D✝
E : Type uE
inst✝⁷ : NormedAddCommGroup E
inst✝⁶ : NormedSpace 𝕜 E
F : Type uF
inst✝⁵ : NormedAddCommGroup F
inst✝⁴ : NormedSpace 𝕜 F
G : Type uG
inst✝³ : NormedAddCommGroup G
inst✝² : NormedSpace 𝕜 G
X : Type ?u.4322453
inst✝¹ : NormedAddCommGroup X
inst✝ : NormedSpace 𝕜 X
s✝ s₁ t✝ u : Set E
f✝ f₁ : E → F
g✝ : F → G
x✝ x₀ : E
c : F
b : E × F → G
m n✝ : ℕ∞
p : E → FormalMultilinearSeries 𝕜 E F
g : F → G
f : E → F
n : ℕ
s : Set E
t : Set F
x : E
N : ℕ∞
hg : ContDiffOn 𝕜 N g t
hf : ContDiffOn 𝕜 N f s
hn : ↑n ≤ N
ht : UniqueDiffOn 𝕜 t
hs : UniqueDiffOn 𝕜 s
hst : MapsTo f s t
hx : x ∈ s
C D : ℝ
hC : ∀ (i : ℕ), i ≤ n → ‖iteratedFDerivWithin 𝕜 i g t (f x)‖ ≤ C
hD : ∀ (i : ℕ), 1 ≤ i → i ≤ n → ‖iteratedFDerivWithin 𝕜 i f s x‖ ≤ D ^ i
Fu : Type (max uF uG) := ULift F
Gu : Type (max uF uG) := ULift G
⊢ ‖iteratedFDerivWithin 𝕜 n (g ∘ f) s x‖ ≤ ↑n ! * C * D ^ n State After: 𝕜 : Type u_1
inst✝¹⁰ : NontriviallyNormedField 𝕜
D✝ : Type uD
inst✝⁹ : NormedAddCommGroup D✝
inst✝⁸ : NormedSpace 𝕜 D✝
E : Type uE
inst✝⁷ : NormedAddCommGroup E
inst✝⁶ : NormedSpace 𝕜 E
F : Type uF
inst✝⁵ : NormedAddCommGroup F
inst✝⁴ : NormedSpace 𝕜 F
G : Type uG
inst✝³ : NormedAddCommGroup G
inst✝² : NormedSpace 𝕜 G
X : Type ?u.4322453
inst✝¹ : NormedAddCommGroup X
inst✝ : NormedSpace 𝕜 X
s✝ s₁ t✝ u : Set E
f✝ f₁ : E → F
g✝ : F → G
x✝ x₀ : E
c : F
b : E × F → G
m n✝ : ℕ∞
p : E → FormalMultilinearSeries 𝕜 E F
g : F → G
f : E → F
n : ℕ
s : Set E
t : Set F
x : E
N : ℕ∞
hg : ContDiffOn 𝕜 N g t
hf : ContDiffOn 𝕜 N f s
hn : ↑n ≤ N
ht : UniqueDiffOn 𝕜 t
hs : UniqueDiffOn 𝕜 s
hst : MapsTo f s t
hx : x ∈ s
C D : ℝ
hC : ∀ (i : ℕ), i ≤ n → ‖iteratedFDerivWithin 𝕜 i g t (f x)‖ ≤ C
hD : ∀ (i : ℕ), 1 ≤ i → i ≤ n → ‖iteratedFDerivWithin 𝕜 i f s x‖ ≤ D ^ i
Fu : Type (max uF uG) := ULift F
Gu : Type (max uF uG) := ULift G
isoF : Fu ≃ₗᵢ[𝕜] F
⊢ ‖iteratedFDerivWithin 𝕜 n (g ∘ f) s x‖ ≤ ↑n ! * C * D ^ n Tactic: have isoF : Fu ≃ₗᵢ[𝕜] F := LinearIsometryEquiv.ulift 𝕜 F State Before: 𝕜 : Type u_1
inst✝¹⁰ : NontriviallyNormedField 𝕜
D✝ : Type uD
inst✝⁹ : NormedAddCommGroup D✝
inst✝⁸ : NormedSpace 𝕜 D✝
E : Type uE
inst✝⁷ : NormedAddCommGroup E
inst✝⁶ : NormedSpace 𝕜 E
F : Type uF
inst✝⁵ : NormedAddCommGroup F
inst✝⁴ : NormedSpace 𝕜 F
G : Type uG
inst✝³ : NormedAddCommGroup G
inst✝² : NormedSpace 𝕜 G
X : Type ?u.4322453
inst✝¹ : NormedAddCommGroup X
inst✝ : NormedSpace 𝕜 X
s✝ s₁ t✝ u : Set E
f✝ f₁ : E → F
g✝ : F → G
x✝ x₀ : E
c : F
b : E × F → G
m n✝ : ℕ∞
p : E → FormalMultilinearSeries 𝕜 E F
g : F → G
f : E → F
n : ℕ
s : Set E
t : Set F
x : E
N : ℕ∞
hg : ContDiffOn 𝕜 N g t
hf : ContDiffOn 𝕜 N f s
hn : ↑n ≤ N
ht : UniqueDiffOn 𝕜 t
hs : UniqueDiffOn 𝕜 s
hst : MapsTo f s t
hx : x ∈ s
C D : ℝ
hC : ∀ (i : ℕ), i ≤ n → ‖iteratedFDerivWithin 𝕜 i g t (f x)‖ ≤ C
hD : ∀ (i : ℕ), 1 ≤ i → i ≤ n → ‖iteratedFDerivWithin 𝕜 i f s x‖ ≤ D ^ i
Fu : Type (max uF uG) := ULift F
Gu : Type (max uF uG) := ULift G
isoF : Fu ≃ₗᵢ[𝕜] F
⊢ ‖iteratedFDerivWithin 𝕜 n (g ∘ f) s x‖ ≤ ↑n ! * C * D ^ n State After: 𝕜 : Type u_1
inst✝¹⁰ : NontriviallyNormedField 𝕜
D✝ : Type uD
inst✝⁹ : NormedAddCommGroup D✝
inst✝⁸ : NormedSpace 𝕜 D✝
E : Type uE
inst✝⁷ : NormedAddCommGroup E
inst✝⁶ : NormedSpace 𝕜 E
F : Type uF
inst✝⁵ : NormedAddCommGroup F
inst✝⁴ : NormedSpace 𝕜 F
G : Type uG
inst✝³ : NormedAddCommGroup G
inst✝² : NormedSpace 𝕜 G
X : Type ?u.4322453
inst✝¹ : NormedAddCommGroup X
inst✝ : NormedSpace 𝕜 X
s✝ s₁ t✝ u : Set E
f✝ f₁ : E → F
g✝ : F → G
x✝ x₀ : E
c : F
b : E × F → G
m n✝ : ℕ∞
p : E → FormalMultilinearSeries 𝕜 E F
g : F → G
f : E → F
n : ℕ
s : Set E
t : Set F
x : E
N : ℕ∞
hg : ContDiffOn 𝕜 N g t
hf : ContDiffOn 𝕜 N f s
hn : ↑n ≤ N
ht : UniqueDiffOn 𝕜 t
hs : UniqueDiffOn 𝕜 s
hst : MapsTo f s t
hx : x ∈ s
C D : ℝ
hC : ∀ (i : ℕ), i ≤ n → ‖iteratedFDerivWithin 𝕜 i g t (f x)‖ ≤ C
hD : ∀ (i : ℕ), 1 ≤ i → i ≤ n → ‖iteratedFDerivWithin 𝕜 i f s x‖ ≤ D ^ i
Fu : Type (max uF uG) := ULift F
Gu : Type (max uF uG) := ULift G
isoF : Fu ≃ₗᵢ[𝕜] F
isoG : Gu ≃ₗᵢ[𝕜] G
⊢ ‖iteratedFDerivWithin 𝕜 n (g ∘ f) s x‖ ≤ ↑n ! * C * D ^ n Tactic: have isoG : Gu ≃ₗᵢ[𝕜] G := LinearIsometryEquiv.ulift 𝕜 G State Before: 𝕜 : Type u_1
inst✝¹⁰ : NontriviallyNormedField 𝕜
D✝ : Type uD
inst✝⁹ : NormedAddCommGroup D✝
inst✝⁸ : NormedSpace 𝕜 D✝
E : Type uE
inst✝⁷ : NormedAddCommGroup E
inst✝⁶ : NormedSpace 𝕜 E
F : Type uF
inst✝⁵ : NormedAddCommGroup F
inst✝⁴ : NormedSpace 𝕜 F
G : Type uG
inst✝³ : NormedAddCommGroup G
inst✝² : NormedSpace 𝕜 G
X : Type ?u.4322453
inst✝¹ : NormedAddCommGroup X
inst✝ : NormedSpace 𝕜 X
s✝ s₁ t✝ u : Set E
f✝ f₁ : E → F
g✝ : F → G
x✝ x₀ : E
c : F
b : E × F → G
m n✝ : ℕ∞
p : E → FormalMultilinearSeries 𝕜 E F
g : F → G
f : E → F
n : ℕ
s : Set E
t : Set F
x : E
N : ℕ∞
hg : ContDiffOn 𝕜 N g t
hf : ContDiffOn 𝕜 N f s
hn : ↑n ≤ N
ht : UniqueDiffOn 𝕜 t
hs : UniqueDiffOn 𝕜 s
hst : MapsTo f s t
hx : x ∈ s
C D : ℝ
hC : ∀ (i : ℕ), i ≤ n → ‖iteratedFDerivWithin 𝕜 i g t (f x)‖ ≤ C
hD : ∀ (i : ℕ), 1 ≤ i → i ≤ n → ‖iteratedFDerivWithin 𝕜 i f s x‖ ≤ D ^ i
Fu : Type (max uF uG) := ULift F
Gu : Type (max uF uG) := ULift G
isoF : Fu ≃ₗᵢ[𝕜] F
isoG : Gu ≃ₗᵢ[𝕜] G
⊢ ‖iteratedFDerivWithin 𝕜 n (g ∘ f) s x‖ ≤ ↑n ! * C * D ^ n State After: 𝕜 : Type u_1
inst✝¹⁰ : NontriviallyNormedField 𝕜
D✝ : Type uD
inst✝⁹ : NormedAddCommGroup D✝
inst✝⁸ : NormedSpace 𝕜 D✝
E : Type uE
inst✝⁷ : NormedAddCommGroup E
inst✝⁶ : NormedSpace 𝕜 E
F : Type uF
inst✝⁵ : NormedAddCommGroup F
inst✝⁴ : NormedSpace 𝕜 F
G : Type uG
inst✝³ : NormedAddCommGroup G
inst✝² : NormedSpace 𝕜 G
X : Type ?u.4322453
inst✝¹ : NormedAddCommGroup X
inst✝ : NormedSpace 𝕜 X
s✝ s₁ t✝ u : Set E
f✝ f₁ : E → F
g✝ : F → G
x✝ x₀ : E
c : F
b : E × F → G
m n✝ : ℕ∞
p : E → FormalMultilinearSeries 𝕜 E F
g : F → G
f : E → F
n : ℕ
s : Set E
t : Set F
x : E
N : ℕ∞
hg : ContDiffOn 𝕜 N g t
hf : ContDiffOn 𝕜 N f s
hn : ↑n ≤ N
ht : UniqueDiffOn 𝕜 t
hs : UniqueDiffOn 𝕜 s
hst : MapsTo f s t
hx : x ∈ s
C D : ℝ
hC : ∀ (i : ℕ), i ≤ n → ‖iteratedFDerivWithin 𝕜 i g t (f x)‖ ≤ C
hD : ∀ (i : ℕ), 1 ≤ i → i ≤ n → ‖iteratedFDerivWithin 𝕜 i f s x‖ ≤ D ^ i
Fu : Type (max uF uG) := ULift F
Gu : Type (max uF uG) := ULift G
isoF : Fu ≃ₗᵢ[𝕜] F
isoG : Gu ≃ₗᵢ[𝕜] G
fu : E → Fu := ↑(LinearIsometryEquiv.symm isoF) ∘ f
⊢ ‖iteratedFDerivWithin 𝕜 n (g ∘ f) s x‖ ≤ ↑n ! * C * D ^ n Tactic: let fu : E → Fu := isoF.symm ∘ f State Before: 𝕜 : Type u_1
inst✝¹⁰ : NontriviallyNormedField 𝕜
D✝ : Type uD
inst✝⁹ : NormedAddCommGroup D✝
inst✝⁸ : NormedSpace 𝕜 D✝
E : Type uE
inst✝⁷ : NormedAddCommGroup E
inst✝⁶ : NormedSpace 𝕜 E
F : Type uF
inst✝⁵ : NormedAddCommGroup F
inst✝⁴ : NormedSpace 𝕜 F
G : Type uG
inst✝³ : NormedAddCommGroup G
inst✝² : NormedSpace 𝕜 G
X : Type ?u.4322453
inst✝¹ : NormedAddCommGroup X
inst✝ : NormedSpace 𝕜 X
s✝ s₁ t✝ u : Set E
f✝ f₁ : E → F
g✝ : F → G
x✝ x₀ : E
c : F
b : E × F → G
m n✝ : ℕ∞
p : E → FormalMultilinearSeries 𝕜 E F
g : F → G
f : E → F
n : ℕ
s : Set E
t : Set F
x : E
N : ℕ∞
hg : ContDiffOn 𝕜 N g t
hf : ContDiffOn 𝕜 N f s
hn : ↑n ≤ N
ht : UniqueDiffOn 𝕜 t
hs : UniqueDiffOn 𝕜 s
hst : MapsTo f s t
hx : x ∈ s
C D : ℝ
hC : ∀ (i : ℕ), i ≤ n → ‖iteratedFDerivWithin 𝕜 i g t (f x)‖ ≤ C
hD : ∀ (i : ℕ), 1 ≤ i → i ≤ n → ‖iteratedFDerivWithin 𝕜 i f s x‖ ≤ D ^ i
Fu : Type (max uF uG) := ULift F
Gu : Type (max uF uG) := ULift G
isoF : Fu ≃ₗᵢ[𝕜] F
isoG : Gu ≃ₗᵢ[𝕜] G
fu : E → Fu := ↑(LinearIsometryEquiv.symm isoF) ∘ f
⊢ ‖iteratedFDerivWithin 𝕜 n (g ∘ f) s x‖ ≤ ↑n ! * C * D ^ n State After: 𝕜 : Type u_1
inst✝¹⁰ : NontriviallyNormedField 𝕜
D✝ : Type uD
inst✝⁹ : NormedAddCommGroup D✝
inst✝⁸ : NormedSpace 𝕜 D✝
E : Type uE
inst✝⁷ : NormedAddCommGroup E
inst✝⁶ : NormedSpace 𝕜 E
F : Type uF
inst✝⁵ : NormedAddCommGroup F
inst✝⁴ : NormedSpace 𝕜 F
G : Type uG
inst✝³ : NormedAddCommGroup G
inst✝² : NormedSpace 𝕜 G
X : Type ?u.4322453
inst✝¹ : NormedAddCommGroup X
inst✝ : NormedSpace 𝕜 X
s✝ s₁ t✝ u : Set E
f✝ f₁ : E → F
g✝ : F → G
x✝ x₀ : E
c : F
b : E × F → G
m n✝ : ℕ∞
p : E → FormalMultilinearSeries 𝕜 E F
g : F → G
f : E → F
n : ℕ
s : Set E
t : Set F
x : E
N : ℕ∞
hg : ContDiffOn 𝕜 N g t
hf : ContDiffOn 𝕜 N f s
hn : ↑n ≤ N
ht : UniqueDiffOn 𝕜 t
hs : UniqueDiffOn 𝕜 s
hst : MapsTo f s t
hx : x ∈ s
C D : ℝ
hC : ∀ (i : ℕ), i ≤ n → ‖iteratedFDerivWithin 𝕜 i g t (f x)‖ ≤ C
hD : ∀ (i : ℕ), 1 ≤ i → i ≤ n → ‖iteratedFDerivWithin 𝕜 i f s x‖ ≤ D ^ i
Fu : Type (max uF uG) := ULift F
Gu : Type (max uF uG) := ULift G
isoF : Fu ≃ₗᵢ[𝕜] F
isoG : Gu ≃ₗᵢ[𝕜] G
fu : E → Fu := ↑(LinearIsometryEquiv.symm isoF) ∘ f
gu : Fu → Gu := ↑(LinearIsometryEquiv.symm isoG) ∘ g ∘ ↑isoF
⊢ ‖iteratedFDerivWithin 𝕜 n (g ∘ f) s x‖ ≤ ↑n ! * C * D ^ n Tactic: let gu : Fu → Gu := isoG.symm ∘ g ∘ isoF State Before: 𝕜 : Type u_1
inst✝¹⁰ : NontriviallyNormedField 𝕜
D✝ : Type uD
inst✝⁹ : NormedAddCommGroup D✝
inst✝⁸ : NormedSpace 𝕜 D✝
E : Type uE
inst✝⁷ : NormedAddCommGroup E
inst✝⁶ : NormedSpace 𝕜 E
F : Type uF
inst✝⁵ : NormedAddCommGroup F
inst✝⁴ : NormedSpace 𝕜 F
G : Type uG
inst✝³ : NormedAddCommGroup G
inst✝² : NormedSpace 𝕜 G
X : Type ?u.4322453
inst✝¹ : NormedAddCommGroup X
inst✝ : NormedSpace 𝕜 X
s✝ s₁ t✝ u : Set E
f✝ f₁ : E → F
g✝ : F → G
x✝ x₀ : E
c : F
b : E × F → G
m n✝ : ℕ∞
p : E → FormalMultilinearSeries 𝕜 E F
g : F → G
f : E → F
n : ℕ
s : Set E
t : Set F
x : E
N : ℕ∞
hg : ContDiffOn 𝕜 N g t
hf : ContDiffOn 𝕜 N f s
hn : ↑n ≤ N
ht : UniqueDiffOn 𝕜 t
hs : UniqueDiffOn 𝕜 s
hst : MapsTo f s t
hx : x ∈ s
C D : ℝ
hC : ∀ (i : ℕ), i ≤ n → ‖iteratedFDerivWithin 𝕜 i g t (f x)‖ ≤ C
hD : ∀ (i : ℕ), 1 ≤ i → i ≤ n → ‖iteratedFDerivWithin 𝕜 i f s x‖ ≤ D ^ i
Fu : Type (max uF uG) := ULift F
Gu : Type (max uF uG) := ULift G
isoF : Fu ≃ₗᵢ[𝕜] F
isoG : Gu ≃ₗᵢ[𝕜] G
fu : E → Fu := ↑(LinearIsometryEquiv.symm isoF) ∘ f
gu : Fu → Gu := ↑(LinearIsometryEquiv.symm isoG) ∘ g ∘ ↑isoF
⊢ ‖iteratedFDerivWithin 𝕜 n (g ∘ f) s x‖ ≤ ↑n ! * C * D ^ n State After: 𝕜 : Type u_1
inst✝¹⁰ : NontriviallyNormedField 𝕜
D✝ : Type uD
inst✝⁹ : NormedAddCommGroup D✝
inst✝⁸ : NormedSpace 𝕜 D✝
E : Type uE
inst✝⁷ : NormedAddCommGroup E
inst✝⁶ : NormedSpace 𝕜 E
F : Type uF
inst✝⁵ : NormedAddCommGroup F
inst✝⁴ : NormedSpace 𝕜 F
G : Type uG
inst✝³ : NormedAddCommGroup G
inst✝² : NormedSpace 𝕜 G
X : Type ?u.4322453
inst✝¹ : NormedAddCommGroup X
inst✝ : NormedSpace 𝕜 X
s✝ s₁ t✝ u : Set E
f✝ f₁ : E → F
g✝ : F → G
x✝ x₀ : E
c : F
b : E × F → G
m n✝ : ℕ∞
p : E → FormalMultilinearSeries 𝕜 E F
g : F → G
f : E → F
n : ℕ
s : Set E
t : Set F
x : E
N : ℕ∞
hg : ContDiffOn 𝕜 N g t
hf : ContDiffOn 𝕜 N f s
hn : ↑n ≤ N
ht : UniqueDiffOn 𝕜 t
hs : UniqueDiffOn 𝕜 s
hst : MapsTo f s t
hx : x ∈ s
C D : ℝ
hC : ∀ (i : ℕ), i ≤ n → ‖iteratedFDerivWithin 𝕜 i g t (f x)‖ ≤ C
hD : ∀ (i : ℕ), 1 ≤ i → i ≤ n → ‖iteratedFDerivWithin 𝕜 i f s x‖ ≤ D ^ i
Fu : Type (max uF uG) := ULift F
Gu : Type (max uF uG) := ULift G
isoF : Fu ≃ₗᵢ[𝕜] F
isoG : Gu ≃ₗᵢ[𝕜] G
fu : E → Fu := ↑(LinearIsometryEquiv.symm isoF) ∘ f
gu : Fu → Gu := ↑(LinearIsometryEquiv.symm isoG) ∘ g ∘ ↑isoF
tu : Set Fu := ↑isoF ⁻¹' t
⊢ ‖iteratedFDerivWithin 𝕜 n (g ∘ f) s x‖ ≤ ↑n ! * C * D ^ n Tactic: let tu := isoF ⁻¹' t State Before: 𝕜 : Type u_1
inst✝¹⁰ : NontriviallyNormedField 𝕜
D✝ : Type uD
inst✝⁹ : NormedAddCommGroup D✝
inst✝⁸ : NormedSpace 𝕜 D✝
E : Type uE
inst✝⁷ : NormedAddCommGroup E
inst✝⁶ : NormedSpace 𝕜 E
F : Type uF
inst✝⁵ : NormedAddCommGroup F
inst✝⁴ : NormedSpace 𝕜 F
G : Type uG
inst✝³ : NormedAddCommGroup G
inst✝² : NormedSpace 𝕜 G
X : Type ?u.4322453
inst✝¹ : NormedAddCommGroup X
inst✝ : NormedSpace 𝕜 X
s✝ s₁ t✝ u : Set E
f✝ f₁ : E → F
g✝ : F → G
x✝ x₀ : E
c : F
b : E × F → G
m n✝ : ℕ∞
p : E → FormalMultilinearSeries 𝕜 E F
g : F → G
f : E → F
n : ℕ
s : Set E
t : Set F
x : E
N : ℕ∞
hg : ContDiffOn 𝕜 N g t
hf : ContDiffOn 𝕜 N f s
hn : ↑n ≤ N
ht : UniqueDiffOn 𝕜 t
hs : UniqueDiffOn 𝕜 s
hst : MapsTo f s t
hx : x ∈ s
C D : ℝ
hC : ∀ (i : ℕ), i ≤ n → ‖iteratedFDerivWithin 𝕜 i g t (f x)‖ ≤ C
hD : ∀ (i : ℕ), 1 ≤ i → i ≤ n → ‖iteratedFDerivWithin 𝕜 i f s x‖ ≤ D ^ i
Fu : Type (max uF uG) := ULift F
Gu : Type (max uF uG) := ULift G
isoF : Fu ≃ₗᵢ[𝕜] F
isoG : Gu ≃ₗᵢ[𝕜] G
fu : E → Fu := ↑(LinearIsometryEquiv.symm isoF) ∘ f
gu : Fu → Gu := ↑(LinearIsometryEquiv.symm isoG) ∘ g ∘ ↑isoF
tu : Set Fu := ↑isoF ⁻¹' t
⊢ ‖iteratedFDerivWithin 𝕜 n (g ∘ f) s x‖ ≤ ↑n ! * C * D ^ n State After: 𝕜 : Type u_1
inst✝¹⁰ : NontriviallyNormedField 𝕜
D✝ : Type uD
inst✝⁹ : NormedAddCommGroup D✝
inst✝⁸ : NormedSpace 𝕜 D✝
E : Type uE
inst✝⁷ : NormedAddCommGroup E
inst✝⁶ : NormedSpace 𝕜 E
F : Type uF
inst✝⁵ : NormedAddCommGroup F
inst✝⁴ : NormedSpace 𝕜 F
G : Type uG
inst✝³ : NormedAddCommGroup G
inst✝² : NormedSpace 𝕜 G
X : Type ?u.4322453
inst✝¹ : NormedAddCommGroup X
inst✝ : NormedSpace 𝕜 X
s✝ s₁ t✝ u : Set E
f✝ f₁ : E → F
g✝ : F → G
x✝ x₀ : E
c : F
b : E × F → G
m n✝ : ℕ∞
p : E → FormalMultilinearSeries 𝕜 E F
g : F → G
f : E → F
n : ℕ
s : Set E
t : Set F
x : E
N : ℕ∞
hg : ContDiffOn 𝕜 N g t
hf : ContDiffOn 𝕜 N f s
hn : ↑n ≤ N
ht : UniqueDiffOn 𝕜 t
hs : UniqueDiffOn 𝕜 s
hst : MapsTo f s t
hx : x ∈ s
C D : ℝ
hC : ∀ (i : ℕ), i ≤ n → ‖iteratedFDerivWithin 𝕜 i g t (f x)‖ ≤ C
hD : ∀ (i : ℕ), 1 ≤ i → i ≤ n → ‖iteratedFDerivWithin 𝕜 i f s x‖ ≤ D ^ i
Fu : Type (max uF uG) := ULift F
Gu : Type (max uF uG) := ULift G
isoF : Fu ≃ₗᵢ[𝕜] F
isoG : Gu ≃ₗᵢ[𝕜] G
fu : E → Fu := ↑(LinearIsometryEquiv.symm isoF) ∘ f
gu : Fu → Gu := ↑(LinearIsometryEquiv.symm isoG) ∘ g ∘ ↑isoF
tu : Set Fu := ↑isoF ⁻¹' t
htu : UniqueDiffOn 𝕜 tu
⊢ ‖iteratedFDerivWithin 𝕜 n (g ∘ f) s x‖ ≤ ↑n ! * C * D ^ n Tactic: have htu : UniqueDiffOn 𝕜 tu := isoF.toContinuousLinearEquiv.uniqueDiffOn_preimage_iff.2 ht State Before: 𝕜 : Type u_1
inst✝¹⁰ : NontriviallyNormedField 𝕜
D✝ : Type uD
inst✝⁹ : NormedAddCommGroup D✝
inst✝⁸ : NormedSpace 𝕜 D✝
E : Type uE
inst✝⁷ : NormedAddCommGroup E
inst✝⁶ : NormedSpace 𝕜 E
F : Type uF
inst✝⁵ : NormedAddCommGroup F
inst✝⁴ : NormedSpace 𝕜 F
G : Type uG
inst✝³ : NormedAddCommGroup G
inst✝² : NormedSpace 𝕜 G
X : Type ?u.4322453
inst✝¹ : NormedAddCommGroup X
inst✝ : NormedSpace 𝕜 X
s✝ s₁ t✝ u : Set E
f✝ f₁ : E → F
g✝ : F → G
x✝ x₀ : E
c : F
b : E × F → G
m n✝ : ℕ∞
p : E → FormalMultilinearSeries 𝕜 E F
g : F → G
f : E → F
n : ℕ
s : Set E
t : Set F
x : E
N : ℕ∞
hg : ContDiffOn 𝕜 N g t
hf : ContDiffOn 𝕜 N f s
hn : ↑n ≤ N
ht : UniqueDiffOn 𝕜 t
hs : UniqueDiffOn 𝕜 s
hst : MapsTo f s t
hx : x ∈ s
C D : ℝ
hC : ∀ (i : ℕ), i ≤ n → ‖iteratedFDerivWithin 𝕜 i g t (f x)‖ ≤ C
hD : ∀ (i : ℕ), 1 ≤ i → i ≤ n → ‖iteratedFDerivWithin 𝕜 i f s x‖ ≤ D ^ i
Fu : Type (max uF uG) := ULift F
Gu : Type (max uF uG) := ULift G
isoF : Fu ≃ₗᵢ[𝕜] F
isoG : Gu ≃ₗᵢ[𝕜] G
fu : E → Fu := ↑(LinearIsometryEquiv.symm isoF) ∘ f
gu : Fu → Gu := ↑(LinearIsometryEquiv.symm isoG) ∘ g ∘ ↑isoF
tu : Set Fu := ↑isoF ⁻¹' t
htu : UniqueDiffOn 𝕜 tu
⊢ ‖iteratedFDerivWithin 𝕜 n (g ∘ f) s x‖ ≤ ↑n ! * C * D ^ n State After: 𝕜 : Type u_1
inst✝¹⁰ : NontriviallyNormedField 𝕜
D✝ : Type uD
inst✝⁹ : NormedAddCommGroup D✝
inst✝⁸ : NormedSpace 𝕜 D✝
E : Type uE
inst✝⁷ : NormedAddCommGroup E
inst✝⁶ : NormedSpace 𝕜 E
F : Type uF
inst✝⁵ : NormedAddCommGroup F
inst✝⁴ : NormedSpace 𝕜 F
G : Type uG
inst✝³ : NormedAddCommGroup G
inst✝² : NormedSpace 𝕜 G
X : Type ?u.4322453
inst✝¹ : NormedAddCommGroup X
inst✝ : NormedSpace 𝕜 X
s✝ s₁ t✝ u : Set E
f✝ f₁ : E → F
g✝ : F → G
x✝ x₀ : E
c : F
b : E × F → G
m n✝ : ℕ∞
p : E → FormalMultilinearSeries 𝕜 E F
g : F → G
f : E → F
n : ℕ
s : Set E
t : Set F
x : E
N : ℕ∞
hg : ContDiffOn 𝕜 N g t
hf : ContDiffOn 𝕜 N f s
hn : ↑n ≤ N
ht : UniqueDiffOn 𝕜 t
hs : UniqueDiffOn 𝕜 s
hst : MapsTo f s t
hx : x ∈ s
C D : ℝ
hC : ∀ (i : ℕ), i ≤ n → ‖iteratedFDerivWithin 𝕜 i g t (f x)‖ ≤ C
hD : ∀ (i : ℕ), 1 ≤ i → i ≤ n → ‖iteratedFDerivWithin 𝕜 i f s x‖ ≤ D ^ i
Fu : Type (max uF uG) := ULift F
Gu : Type (max uF uG) := ULift G
isoF : Fu ≃ₗᵢ[𝕜] F
isoG : Gu ≃ₗᵢ[𝕜] G
fu : E → Fu := ↑(LinearIsometryEquiv.symm isoF) ∘ f
gu : Fu → Gu := ↑(LinearIsometryEquiv.symm isoG) ∘ g ∘ ↑isoF
tu : Set Fu := ↑isoF ⁻¹' t
htu : UniqueDiffOn 𝕜 tu
hstu : MapsTo fu s tu
⊢ ‖iteratedFDerivWithin 𝕜 n (g ∘ f) s x‖ ≤ ↑n ! * C * D ^ n Tactic: have hstu : MapsTo fu s tu := fun y hy ↦ by
simpa only [mem_preimage, comp_apply, LinearIsometryEquiv.apply_symm_apply] using hst hy State Before: 𝕜 : Type u_1
inst✝¹⁰ : NontriviallyNormedField 𝕜
D✝ : Type uD
inst✝⁹ : NormedAddCommGroup D✝
inst✝⁸ : NormedSpace 𝕜 D✝
E : Type uE
inst✝⁷ : NormedAddCommGroup E
inst✝⁶ : NormedSpace 𝕜 E
F : Type uF
inst✝⁵ : NormedAddCommGroup F
inst✝⁴ : NormedSpace 𝕜 F
G : Type uG
inst✝³ : NormedAddCommGroup G
inst✝² : NormedSpace 𝕜 G
X : Type ?u.4322453
inst✝¹ : NormedAddCommGroup X
inst✝ : NormedSpace 𝕜 X
s✝ s₁ t✝ u : Set E
f✝ f₁ : E → F
g✝ : F → G
x✝ x₀ : E
c : F
b : E × F → G
m n✝ : ℕ∞
p : E → FormalMultilinearSeries 𝕜 E F
g : F → G
f : E → F
n : ℕ
s : Set E
t : Set F
x : E
N : ℕ∞
hg : ContDiffOn 𝕜 N g t
hf : ContDiffOn 𝕜 N f s
hn : ↑n ≤ N
ht : UniqueDiffOn 𝕜 t
hs : UniqueDiffOn 𝕜 s
hst : MapsTo f s t
hx : x ∈ s
C D : ℝ
hC : ∀ (i : ℕ), i ≤ n → ‖iteratedFDerivWithin 𝕜 i g t (f x)‖ ≤ C
hD : ∀ (i : ℕ), 1 ≤ i → i ≤ n → ‖iteratedFDerivWithin 𝕜 i f s x‖ ≤ D ^ i
Fu : Type (max uF uG) := ULift F
Gu : Type (max uF uG) := ULift G
isoF : Fu ≃ₗᵢ[𝕜] F
isoG : Gu ≃ₗᵢ[𝕜] G
fu : E → Fu := ↑(LinearIsometryEquiv.symm isoF) ∘ f
gu : Fu → Gu := ↑(LinearIsometryEquiv.symm isoG) ∘ g ∘ ↑isoF
tu : Set Fu := ↑isoF ⁻¹' t
htu : UniqueDiffOn 𝕜 tu
hstu : MapsTo fu s tu
⊢ ‖iteratedFDerivWithin 𝕜 n (g ∘ f) s x‖ ≤ ↑n ! * C * D ^ n State After: 𝕜 : Type u_1
inst✝¹⁰ : NontriviallyNormedField 𝕜
D✝ : Type uD
inst✝⁹ : NormedAddCommGroup D✝
inst✝⁸ : NormedSpace 𝕜 D✝
E : Type uE
inst✝⁷ : NormedAddCommGroup E
inst✝⁶ : NormedSpace 𝕜 E
F : Type uF
inst✝⁵ : NormedAddCommGroup F
inst✝⁴ : NormedSpace 𝕜 F
G : Type uG
inst✝³ : NormedAddCommGroup G
inst✝² : NormedSpace 𝕜 G
X : Type ?u.4322453
inst✝¹ : NormedAddCommGroup X
inst✝ : NormedSpace 𝕜 X
s✝ s₁ t✝ u : Set E
f✝ f₁ : E → F
g✝ : F → G
x✝ x₀ : E
c : F
b : E × F → G
m n✝ : ℕ∞
p : E → FormalMultilinearSeries 𝕜 E F
g : F → G
f : E → F
n : ℕ
s : Set E
t : Set F
x : E
N : ℕ∞
hg : ContDiffOn 𝕜 N g t
hf : ContDiffOn 𝕜 N f s
hn : ↑n ≤ N
ht : UniqueDiffOn 𝕜 t
hs : UniqueDiffOn 𝕜 s
hst : MapsTo f s t
hx : x ∈ s
C D : ℝ
hC : ∀ (i : ℕ), i ≤ n → ‖iteratedFDerivWithin 𝕜 i g t (f x)‖ ≤ C
hD : ∀ (i : ℕ), 1 ≤ i → i ≤ n → ‖iteratedFDerivWithin 𝕜 i f s x‖ ≤ D ^ i
Fu : Type (max uF uG) := ULift F
Gu : Type (max uF uG) := ULift G
isoF : Fu ≃ₗᵢ[𝕜] F
isoG : Gu ≃ₗᵢ[𝕜] G
fu : E → Fu := ↑(LinearIsometryEquiv.symm isoF) ∘ f
gu : Fu → Gu := ↑(LinearIsometryEquiv.symm isoG) ∘ g ∘ ↑isoF
tu : Set Fu := ↑isoF ⁻¹' t
htu : UniqueDiffOn 𝕜 tu
hstu : MapsTo fu s tu
Ffu : ↑isoF (fu x) = f x
⊢ ‖iteratedFDerivWithin 𝕜 n (g ∘ f) s x‖ ≤ ↑n ! * C * D ^ n Tactic: have Ffu : isoF (fu x) = f x := by simp only [comp_apply, LinearIsometryEquiv.apply_symm_apply] State Before: 𝕜 : Type u_1
inst✝¹⁰ : NontriviallyNormedField 𝕜
D✝ : Type uD
inst✝⁹ : NormedAddCommGroup D✝
inst✝⁸ : NormedSpace 𝕜 D✝
E : Type uE
inst✝⁷ : NormedAddCommGroup E
inst✝⁶ : NormedSpace 𝕜 E
F : Type uF
inst✝⁵ : NormedAddCommGroup F
inst✝⁴ : NormedSpace 𝕜 F
G : Type uG
inst✝³ : NormedAddCommGroup G
inst✝² : NormedSpace 𝕜 G
X : Type ?u.4322453
inst✝¹ : NormedAddCommGroup X
inst✝ : NormedSpace 𝕜 X
s✝ s₁ t✝ u : Set E
f✝ f₁ : E → F
g✝ : F → G
x✝ x₀ : E
c : F
b : E × F → G
m n✝ : ℕ∞
p : E → FormalMultilinearSeries 𝕜 E F
g : F → G
f : E → F
n : ℕ
s : Set E
t : Set F
x : E
N : ℕ∞
hg : ContDiffOn 𝕜 N g t
hf : ContDiffOn 𝕜 N f s
hn : ↑n ≤ N
ht : UniqueDiffOn 𝕜 t
hs : UniqueDiffOn 𝕜 s
hst : MapsTo f s t
hx : x ∈ s
C D : ℝ
hC : ∀ (i : ℕ), i ≤ n → ‖iteratedFDerivWithin 𝕜 i g t (f x)‖ ≤ C
hD : ∀ (i : ℕ), 1 ≤ i → i ≤ n → ‖iteratedFDerivWithin 𝕜 i f s x‖ ≤ D ^ i
Fu : Type (max uF uG) := ULift F
Gu : Type (max uF uG) := ULift G
isoF : Fu ≃ₗᵢ[𝕜] F
isoG : Gu ≃ₗᵢ[𝕜] G
fu : E → Fu := ↑(LinearIsometryEquiv.symm isoF) ∘ f
gu : Fu → Gu := ↑(LinearIsometryEquiv.symm isoG) ∘ g ∘ ↑isoF
tu : Set Fu := ↑isoF ⁻¹' t
htu : UniqueDiffOn 𝕜 tu
hstu : MapsTo fu s tu
Ffu : ↑isoF (fu x) = f x
⊢ ‖iteratedFDerivWithin 𝕜 n (g ∘ f) s x‖ ≤ ↑n ! * C * D ^ n State After: 𝕜 : Type u_1
inst✝¹⁰ : NontriviallyNormedField 𝕜
D✝ : Type uD
inst✝⁹ : NormedAddCommGroup D✝
inst✝⁸ : NormedSpace 𝕜 D✝
E : Type uE
inst✝⁷ : NormedAddCommGroup E
inst✝⁶ : NormedSpace 𝕜 E
F : Type uF
inst✝⁵ : NormedAddCommGroup F
inst✝⁴ : NormedSpace 𝕜 F
G : Type uG
inst✝³ : NormedAddCommGroup G
inst✝² : NormedSpace 𝕜 G
X : Type ?u.4322453
inst✝¹ : NormedAddCommGroup X
inst✝ : NormedSpace 𝕜 X
s✝ s₁ t✝ u : Set E
f✝ f₁ : E → F
g✝ : F → G
x✝ x₀ : E
c : F
b : E × F → G
m n✝ : ℕ∞
p : E → FormalMultilinearSeries 𝕜 E F
g : F → G
f : E → F
n : ℕ
s : Set E
t : Set F
x : E
N : ℕ∞
hg : ContDiffOn 𝕜 N g t
hf : ContDiffOn 𝕜 N f s
hn : ↑n ≤ N
ht : UniqueDiffOn 𝕜 t
hs : UniqueDiffOn 𝕜 s
hst : MapsTo f s t
hx : x ∈ s
C D : ℝ
hC : ∀ (i : ℕ), i ≤ n → ‖iteratedFDerivWithin 𝕜 i g t (f x)‖ ≤ C
hD : ∀ (i : ℕ), 1 ≤ i → i ≤ n → ‖iteratedFDerivWithin 𝕜 i f s x‖ ≤ D ^ i
Fu : Type (max uF uG) := ULift F
Gu : Type (max uF uG) := ULift G
isoF : Fu ≃ₗᵢ[𝕜] F
isoG : Gu ≃ₗᵢ[𝕜] G
fu : E → Fu := ↑(LinearIsometryEquiv.symm isoF) ∘ f
gu : Fu → Gu := ↑(LinearIsometryEquiv.symm isoG) ∘ g ∘ ↑isoF
tu : Set Fu := ↑isoF ⁻¹' t
htu : UniqueDiffOn 𝕜 tu
hstu : MapsTo fu s tu
Ffu : ↑isoF (fu x) = f x
hfu : ContDiffOn 𝕜 (↑n) fu s
⊢ ‖iteratedFDerivWithin 𝕜 n (g ∘ f) s x‖ ≤ ↑n ! * C * D ^ n Tactic: have hfu : ContDiffOn 𝕜 n fu s := isoF.symm.contDiff.comp_contDiffOn (hf.of_le hn) State Before: 𝕜 : Type u_1
inst✝¹⁰ : NontriviallyNormedField 𝕜
D✝ : Type uD
inst✝⁹ : NormedAddCommGroup D✝
inst✝⁸ : NormedSpace 𝕜 D✝
E : Type uE
inst✝⁷ : NormedAddCommGroup E
inst✝⁶ : NormedSpace 𝕜 E
F : Type uF
inst✝⁵ : NormedAddCommGroup F
inst✝⁴ : NormedSpace 𝕜 F
G : Type uG
inst✝³ : NormedAddCommGroup G
inst✝² : NormedSpace 𝕜 G
X : Type ?u.4322453
inst✝¹ : NormedAddCommGroup X
inst✝ : NormedSpace 𝕜 X
s✝ s₁ t✝ u : Set E
f✝ f₁ : E → F
g✝ : F → G
x✝ x₀ : E
c : F
b : E × F → G
m n✝ : ℕ∞
p : E → FormalMultilinearSeries 𝕜 E F
g : F → G
f : E → F
n : ℕ
s : Set E
t : Set F
x : E
N : ℕ∞
hg : ContDiffOn 𝕜 N g t
hf : ContDiffOn 𝕜 N f s
hn : ↑n ≤ N
ht : UniqueDiffOn 𝕜 t
hs : UniqueDiffOn 𝕜 s
hst : MapsTo f s t
hx : x ∈ s
C D : ℝ
hC : ∀ (i : ℕ), i ≤ n → ‖iteratedFDerivWithin 𝕜 i g t (f x)‖ ≤ C
hD : ∀ (i : ℕ), 1 ≤ i → i ≤ n → ‖iteratedFDerivWithin 𝕜 i f s x‖ ≤ D ^ i
Fu : Type (max uF uG) := ULift F
Gu : Type (max uF uG) := ULift G
isoF : Fu ≃ₗᵢ[𝕜] F
isoG : Gu ≃ₗᵢ[𝕜] G
fu : E → Fu := ↑(LinearIsometryEquiv.symm isoF) ∘ f
gu : Fu → Gu := ↑(LinearIsometryEquiv.symm isoG) ∘ g ∘ ↑isoF
tu : Set Fu := ↑isoF ⁻¹' t
htu : UniqueDiffOn 𝕜 tu
hstu : MapsTo fu s tu
Ffu : ↑isoF (fu x) = f x
hfu : ContDiffOn 𝕜 (↑n) fu s
⊢ ‖iteratedFDerivWithin 𝕜 n (g ∘ f) s x‖ ≤ ↑n ! * C * D ^ n State After: 𝕜 : Type u_1
inst✝¹⁰ : NontriviallyNormedField 𝕜
D✝ : Type uD
inst✝⁹ : NormedAddCommGroup D✝
inst✝⁸ : NormedSpace 𝕜 D✝
E : Type uE
inst✝⁷ : NormedAddCommGroup E
inst✝⁶ : NormedSpace 𝕜 E
F : Type uF
inst✝⁵ : NormedAddCommGroup F
inst✝⁴ : NormedSpace 𝕜 F
G : Type uG
inst✝³ : NormedAddCommGroup G
inst✝² : NormedSpace 𝕜 G
X : Type ?u.4322453
inst✝¹ : NormedAddCommGroup X
inst✝ : NormedSpace 𝕜 X
s✝ s₁ t✝ u : Set E
f✝ f₁ : E → F
g✝ : F → G
x✝ x₀ : E
c : F
b : E × F → G
m n✝ : ℕ∞
p : E → FormalMultilinearSeries 𝕜 E F
g : F → G
f : E → F
n : ℕ
s : Set E
t : Set F
x : E
N : ℕ∞
hg : ContDiffOn 𝕜 N g t
hf : ContDiffOn 𝕜 N f s
hn : ↑n ≤ N
ht : UniqueDiffOn 𝕜 t
hs : UniqueDiffOn 𝕜 s
hst : MapsTo f s t
hx : x ∈ s
C D : ℝ
hC : ∀ (i : ℕ), i ≤ n → ‖iteratedFDerivWithin 𝕜 i g t (f x)‖ ≤ C
hD : ∀ (i : ℕ), 1 ≤ i → i ≤ n → ‖iteratedFDerivWithin 𝕜 i f s x‖ ≤ D ^ i
Fu : Type (max uF uG) := ULift F
Gu : Type (max uF uG) := ULift G
isoF : Fu ≃ₗᵢ[𝕜] F
isoG : Gu ≃ₗᵢ[𝕜] G
fu : E → Fu := ↑(LinearIsometryEquiv.symm isoF) ∘ f
gu : Fu → Gu := ↑(LinearIsometryEquiv.symm isoG) ∘ g ∘ ↑isoF
tu : Set Fu := ↑isoF ⁻¹' t
htu : UniqueDiffOn 𝕜 tu
hstu : MapsTo fu s tu
Ffu : ↑isoF (fu x) = f x
hfu : ContDiffOn 𝕜 (↑n) fu s
hgu : ContDiffOn 𝕜 (↑n) gu tu
⊢ ‖iteratedFDerivWithin 𝕜 n (g ∘ f) s x‖ ≤ ↑n ! * C * D ^ n Tactic: have hgu : ContDiffOn 𝕜 n gu tu :=
isoG.symm.contDiff.comp_contDiffOn
((hg.of_le hn).comp_continuousLinearMap (isoF : Fu →L[𝕜] F)) State Before: 𝕜 : Type u_1
inst✝¹⁰ : NontriviallyNormedField 𝕜
D✝ : Type uD
inst✝⁹ : NormedAddCommGroup D✝
inst✝⁸ : NormedSpace 𝕜 D✝
E : Type uE
inst✝⁷ : NormedAddCommGroup E
inst✝⁶ : NormedSpace 𝕜 E
F : Type uF
inst✝⁵ : NormedAddCommGroup F
inst✝⁴ : NormedSpace 𝕜 F
G : Type uG
inst✝³ : NormedAddCommGroup G
inst✝² : NormedSpace 𝕜 G
X : Type ?u.4322453
inst✝¹ : NormedAddCommGroup X
inst✝ : NormedSpace 𝕜 X
s✝ s₁ t✝ u : Set E
f✝ f₁ : E → F
g✝ : F → G
x✝ x₀ : E
c : F
b : E × F → G
m n✝ : ℕ∞
p : E → FormalMultilinearSeries 𝕜 E F
g : F → G
f : E → F
n : ℕ
s : Set E
t : Set F
x : E
N : ℕ∞
hg : ContDiffOn 𝕜 N g t
hf : ContDiffOn 𝕜 N f s
hn : ↑n ≤ N
ht : UniqueDiffOn 𝕜 t
hs : UniqueDiffOn 𝕜 s
hst : MapsTo f s t
hx : x ∈ s
C D : ℝ
hC : ∀ (i : ℕ), i ≤ n → ‖iteratedFDerivWithin 𝕜 i g t (f x)‖ ≤ C
hD : ∀ (i : ℕ), 1 ≤ i → i ≤ n → ‖iteratedFDerivWithin 𝕜 i f s x‖ ≤ D ^ i
Fu : Type (max uF uG) := ULift F
Gu : Type (max uF uG) := ULift G
isoF : Fu ≃ₗᵢ[𝕜] F
isoG : Gu ≃ₗᵢ[𝕜] G
fu : E → Fu := ↑(LinearIsometryEquiv.symm isoF) ∘ f
gu : Fu → Gu := ↑(LinearIsometryEquiv.symm isoG) ∘ g ∘ ↑isoF
tu : Set Fu := ↑isoF ⁻¹' t
htu : UniqueDiffOn 𝕜 tu
hstu : MapsTo fu s tu
Ffu : ↑isoF (fu x) = f x
hfu : ContDiffOn 𝕜 (↑n) fu s
hgu : ContDiffOn 𝕜 (↑n) gu tu
⊢ ‖iteratedFDerivWithin 𝕜 n (g ∘ f) s x‖ ≤ ↑n ! * C * D ^ n State After: 𝕜 : Type u_1
inst✝¹⁰ : NontriviallyNormedField 𝕜
D✝ : Type uD
inst✝⁹ : NormedAddCommGroup D✝
inst✝⁸ : NormedSpace 𝕜 D✝
E : Type uE
inst✝⁷ : NormedAddCommGroup E
inst✝⁶ : NormedSpace 𝕜 E
F : Type uF
inst✝⁵ : NormedAddCommGroup F
inst✝⁴ : NormedSpace 𝕜 F
G : Type uG
inst✝³ : NormedAddCommGroup G
inst✝² : NormedSpace 𝕜 G
X : Type ?u.4322453
inst✝¹ : NormedAddCommGroup X
inst✝ : NormedSpace 𝕜 X
s✝ s₁ t✝ u : Set E
f✝ f₁ : E → F
g✝ : F → G
x✝ x₀ : E
c : F
b : E × F → G
m n✝ : ℕ∞
p : E → FormalMultilinearSeries 𝕜 E F
g : F → G
f : E → F
n : ℕ
s : Set E
t : Set F
x : E
N : ℕ∞
hg : ContDiffOn 𝕜 N g t
hf : ContDiffOn 𝕜 N f s
hn : ↑n ≤ N
ht : UniqueDiffOn 𝕜 t
hs : UniqueDiffOn 𝕜 s
hst : MapsTo f s t
hx : x ∈ s
C D : ℝ
hC : ∀ (i : ℕ), i ≤ n → ‖iteratedFDerivWithin 𝕜 i g t (f x)‖ ≤ C
hD : ∀ (i : ℕ), 1 ≤ i → i ≤ n → ‖iteratedFDerivWithin 𝕜 i f s x‖ ≤ D ^ i
Fu : Type (max uF uG) := ULift F
Gu : Type (max uF uG) := ULift G
isoF : Fu ≃ₗᵢ[𝕜] F
isoG : Gu ≃ₗᵢ[𝕜] G
fu : E → Fu := ↑(LinearIsometryEquiv.symm isoF) ∘ f
gu : Fu → Gu := ↑(LinearIsometryEquiv.symm isoG) ∘ g ∘ ↑isoF
tu : Set Fu := ↑isoF ⁻¹' t
htu : UniqueDiffOn 𝕜 tu
hstu : MapsTo fu s tu
Ffu : ↑isoF (fu x) = f x
hfu : ContDiffOn 𝕜 (↑n) fu s
hgu : ContDiffOn 𝕜 (↑n) gu tu
Nfu : ∀ (i : ℕ), ‖iteratedFDerivWithin 𝕜 i fu s x‖ = ‖iteratedFDerivWithin 𝕜 i f s x‖
⊢ ‖iteratedFDerivWithin 𝕜 n (g ∘ f) s x‖ ≤ ↑n ! * C * D ^ n Tactic: have Nfu : ∀ i, ‖iteratedFDerivWithin 𝕜 i fu s x‖ = ‖iteratedFDerivWithin 𝕜 i f s x‖ := fun i ↦ by
rw [LinearIsometryEquiv.norm_iteratedFDerivWithin_comp_left _ _ hs hx] State Before: 𝕜 : Type u_1
inst✝¹⁰ : NontriviallyNormedField 𝕜
D✝ : Type uD
inst✝⁹ : NormedAddCommGroup D✝
inst✝⁸ : NormedSpace 𝕜 D✝
E : Type uE
inst✝⁷ : NormedAddCommGroup E
inst✝⁶ : NormedSpace 𝕜 E
F : Type uF
inst✝⁵ : NormedAddCommGroup F
inst✝⁴ : NormedSpace 𝕜 F
G : Type uG
inst✝³ : NormedAddCommGroup G
inst✝² : NormedSpace 𝕜 G
X : Type ?u.4322453
inst✝¹ : NormedAddCommGroup X
inst✝ : NormedSpace 𝕜 X
s✝ s₁ t✝ u : Set E
f✝ f₁ : E → F
g✝ : F → G
x✝ x₀ : E
c : F
b : E × F → G
m n✝ : ℕ∞
p : E → FormalMultilinearSeries 𝕜 E F
g : F → G
f : E → F
n : ℕ
s : Set E
t : Set F
x : E
N : ℕ∞
hg : ContDiffOn 𝕜 N g t
hf : ContDiffOn 𝕜 N f s
hn : ↑n ≤ N
ht : UniqueDiffOn 𝕜 t
hs : UniqueDiffOn 𝕜 s
hst : MapsTo f s t
hx : x ∈ s
C D : ℝ
hC : ∀ (i : ℕ), i ≤ n → ‖iteratedFDerivWithin 𝕜 i g t (f x)‖ ≤ C
hD : ∀ (i : ℕ), 1 ≤ i → i ≤ n → ‖iteratedFDerivWithin 𝕜 i f s x‖ ≤ D ^ i
Fu : Type (max uF uG) := ULift F
Gu : Type (max uF uG) := ULift G
isoF : Fu ≃ₗᵢ[𝕜] F
isoG : Gu ≃ₗᵢ[𝕜] G
fu : E → Fu := ↑(LinearIsometryEquiv.symm isoF) ∘ f
gu : Fu → Gu := ↑(LinearIsometryEquiv.symm isoG) ∘ g ∘ ↑isoF
tu : Set Fu := ↑isoF ⁻¹' t
htu : UniqueDiffOn 𝕜 tu
hstu : MapsTo fu s tu
Ffu : ↑isoF (fu x) = f x
hfu : ContDiffOn 𝕜 (↑n) fu s
hgu : ContDiffOn 𝕜 (↑n) gu tu
Nfu : ∀ (i : ℕ), ‖iteratedFDerivWithin 𝕜 i fu s x‖ = ‖iteratedFDerivWithin 𝕜 i f s x‖
⊢ ‖iteratedFDerivWithin 𝕜 n (g ∘ f) s x‖ ≤ ↑n ! * C * D ^ n State After: 𝕜 : Type u_1
inst✝¹⁰ : NontriviallyNormedField 𝕜
D✝ : Type uD
inst✝⁹ : NormedAddCommGroup D✝
inst✝⁸ : NormedSpace 𝕜 D✝
E : Type uE
inst✝⁷ : NormedAddCommGroup E
inst✝⁶ : NormedSpace 𝕜 E
F : Type uF
inst✝⁵ : NormedAddCommGroup F
inst✝⁴ : NormedSpace 𝕜 F
G : Type uG
inst✝³ : NormedAddCommGroup G
inst✝² : NormedSpace 𝕜 G
X : Type ?u.4322453
inst✝¹ : NormedAddCommGroup X
inst✝ : NormedSpace 𝕜 X
s✝ s₁ t✝ u : Set E
f✝ f₁ : E → F
g✝ : F → G
x✝ x₀ : E
c : F
b : E × F → G
m n✝ : ℕ∞
p : E → FormalMultilinearSeries 𝕜 E F
g : F → G
f : E → F
n : ℕ
s : Set E
t : Set F
x : E
N : ℕ∞
hg : ContDiffOn 𝕜 N g t
hf : ContDiffOn 𝕜 N f s
hn : ↑n ≤ N
ht : UniqueDiffOn 𝕜 t
hs : UniqueDiffOn 𝕜 s
hst : MapsTo f s t
hx : x ∈ s
C D : ℝ
hC : ∀ (i : ℕ), i ≤ n → ‖iteratedFDerivWithin 𝕜 i g t (f x)‖ ≤ C
Fu : Type (max uF uG) := ULift F
Gu : Type (max uF uG) := ULift G
isoF : Fu ≃ₗᵢ[𝕜] F
isoG : Gu ≃ₗᵢ[𝕜] G
fu : E → Fu := ↑(LinearIsometryEquiv.symm isoF) ∘ f
gu : Fu → Gu := ↑(LinearIsometryEquiv.symm isoG) ∘ g ∘ ↑isoF
tu : Set Fu := ↑isoF ⁻¹' t
htu : UniqueDiffOn 𝕜 tu
hstu : MapsTo fu s tu
Ffu : ↑isoF (fu x) = f x
hfu : ContDiffOn 𝕜 (↑n) fu s
hgu : ContDiffOn 𝕜 (↑n) gu tu
Nfu : ∀ (i : ℕ), ‖iteratedFDerivWithin 𝕜 i fu s x‖ = ‖iteratedFDerivWithin 𝕜 i f s x‖
hD : ∀ (i : ℕ), 1 ≤ i → i ≤ n → ‖iteratedFDerivWithin 𝕜 i (↑(LinearIsometryEquiv.symm isoF) ∘ f) s x‖ ≤ D ^ i
⊢ ‖iteratedFDerivWithin 𝕜 n (g ∘ f) s x‖ ≤ ↑n ! * C * D ^ n Tactic: simp_rw [← Nfu] at hD State Before: 𝕜 : Type u_1
inst✝¹⁰ : NontriviallyNormedField 𝕜
D✝ : Type uD
inst✝⁹ : NormedAddCommGroup D✝
inst✝⁸ : NormedSpace 𝕜 D✝
E : Type uE
inst✝⁷ : NormedAddCommGroup E
inst✝⁶ : NormedSpace 𝕜 E
F : Type uF
inst✝⁵ : NormedAddCommGroup F
inst✝⁴ : NormedSpace 𝕜 F
G : Type uG
inst✝³ : NormedAddCommGroup G
inst✝² : NormedSpace 𝕜 G
X : Type ?u.4322453
inst✝¹ : NormedAddCommGroup X
inst✝ : NormedSpace 𝕜 X
s✝ s₁ t✝ u : Set E
f✝ f₁ : E → F
g✝ : F → G
x✝ x₀ : E
c : F
b : E × F → G
m n✝ : ℕ∞
p : E → FormalMultilinearSeries 𝕜 E F
g : F → G
f : E → F
n : ℕ
s : Set E
t : Set F
x : E
N : ℕ∞
hg : ContDiffOn 𝕜 N g t
hf : ContDiffOn 𝕜 N f s
hn : ↑n ≤ N
ht : UniqueDiffOn 𝕜 t
hs : UniqueDiffOn 𝕜 s
hst : MapsTo f s t
hx : x ∈ s
C D : ℝ
hC : ∀ (i : ℕ), i ≤ n → ‖iteratedFDerivWithin 𝕜 i g t (f x)‖ ≤ C
Fu : Type (max uF uG) := ULift F
Gu : Type (max uF uG) := ULift G
isoF : Fu ≃ₗᵢ[𝕜] F
isoG : Gu ≃ₗᵢ[𝕜] G
fu : E → Fu := ↑(LinearIsometryEquiv.symm isoF) ∘ f
gu : Fu → Gu := ↑(LinearIsometryEquiv.symm isoG) ∘ g ∘ ↑isoF
tu : Set Fu := ↑isoF ⁻¹' t
htu : UniqueDiffOn 𝕜 tu
hstu : MapsTo fu s tu
Ffu : ↑isoF (fu x) = f x
hfu : ContDiffOn 𝕜 (↑n) fu s
hgu : ContDiffOn 𝕜 (↑n) gu tu
Nfu : ∀ (i : ℕ), ‖iteratedFDerivWithin 𝕜 i fu s x‖ = ‖iteratedFDerivWithin 𝕜 i f s x‖
hD : ∀ (i : ℕ), 1 ≤ i → i ≤ n → ‖iteratedFDerivWithin 𝕜 i (↑(LinearIsometryEquiv.symm isoF) ∘ f) s x‖ ≤ D ^ i
⊢ ‖iteratedFDerivWithin 𝕜 n (g ∘ f) s x‖ ≤ ↑n ! * C * D ^ n State After: 𝕜 : Type u_1
inst✝¹⁰ : NontriviallyNormedField 𝕜
D✝ : Type uD
inst✝⁹ : NormedAddCommGroup D✝
inst✝⁸ : NormedSpace 𝕜 D✝
E : Type uE
inst✝⁷ : NormedAddCommGroup E
inst✝⁶ : NormedSpace 𝕜 E
F : Type uF
inst✝⁵ : NormedAddCommGroup F
inst✝⁴ : NormedSpace 𝕜 F
G : Type uG
inst✝³ : NormedAddCommGroup G
inst✝² : NormedSpace 𝕜 G
X : Type ?u.4322453
inst✝¹ : NormedAddCommGroup X
inst✝ : NormedSpace 𝕜 X
s✝ s₁ t✝ u : Set E
f✝ f₁ : E → F
g✝ : F → G
x✝ x₀ : E
c : F
b : E × F → G
m n✝ : ℕ∞
p : E → FormalMultilinearSeries 𝕜 E F
g : F → G
f : E → F
n : ℕ
s : Set E
t : Set F
x : E
N : ℕ∞
hg : ContDiffOn 𝕜 N g t
hf : ContDiffOn 𝕜 N f s
hn : ↑n ≤ N
ht : UniqueDiffOn 𝕜 t
hs : UniqueDiffOn 𝕜 s
hst : MapsTo f s t
hx : x ∈ s
C D : ℝ
hC : ∀ (i : ℕ), i ≤ n → ‖iteratedFDerivWithin 𝕜 i g t (f x)‖ ≤ C
Fu : Type (max uF uG) := ULift F
Gu : Type (max uF uG) := ULift G
isoF : Fu ≃ₗᵢ[𝕜] F
isoG : Gu ≃ₗᵢ[𝕜] G
fu : E → Fu := ↑(LinearIsometryEquiv.symm isoF) ∘ f
gu : Fu → Gu := ↑(LinearIsometryEquiv.symm isoG) ∘ g ∘ ↑isoF
tu : Set Fu := ↑isoF ⁻¹' t
htu : UniqueDiffOn 𝕜 tu
hstu : MapsTo fu s tu
Ffu : ↑isoF (fu x) = f x
hfu : ContDiffOn 𝕜 (↑n) fu s
hgu : ContDiffOn 𝕜 (↑n) gu tu
Nfu : ∀ (i : ℕ), ‖iteratedFDerivWithin 𝕜 i fu s x‖ = ‖iteratedFDerivWithin 𝕜 i f s x‖
hD : ∀ (i : ℕ), 1 ≤ i → i ≤ n → ‖iteratedFDerivWithin 𝕜 i (↑(LinearIsometryEquiv.symm isoF) ∘ f) s x‖ ≤ D ^ i
Ngu : ∀ (i : ℕ), ‖iteratedFDerivWithin 𝕜 i gu tu (fu x)‖ = ‖iteratedFDerivWithin 𝕜 i g t (f x)‖
⊢ ‖iteratedFDerivWithin 𝕜 n (g ∘ f) s x‖ ≤ ↑n ! * C * D ^ n Tactic: have Ngu : ∀ i,
‖iteratedFDerivWithin 𝕜 i gu tu (fu x)‖ = ‖iteratedFDerivWithin 𝕜 i g t (f x)‖ := fun i ↦ by
rw [LinearIsometryEquiv.norm_iteratedFDerivWithin_comp_left _ _ htu (hstu hx)]
rw [LinearIsometryEquiv.norm_iteratedFDerivWithin_comp_right _ _ ht, Ffu]
rw [Ffu]
exact hst hx State Before: 𝕜 : Type u_1
inst✝¹⁰ : NontriviallyNormedField 𝕜
D✝ : Type uD
inst✝⁹ : NormedAddCommGroup D✝
inst✝⁸ : NormedSpace 𝕜 D✝
E : Type uE
inst✝⁷ : NormedAddCommGroup E
inst✝⁶ : NormedSpace 𝕜 E
F : Type uF
inst✝⁵ : NormedAddCommGroup F
inst✝⁴ : NormedSpace 𝕜 F
G : Type uG
inst✝³ : NormedAddCommGroup G
inst✝² : NormedSpace 𝕜 G
X : Type ?u.4322453
inst✝¹ : NormedAddCommGroup X
inst✝ : NormedSpace 𝕜 X
s✝ s₁ t✝ u : Set E
f✝ f₁ : E → F
g✝ : F → G
x✝ x₀ : E
c : F
b : E × F → G
m n✝ : ℕ∞
p : E → FormalMultilinearSeries 𝕜 E F
g : F → G
f : E → F
n : ℕ
s : Set E
t : Set F
x : E
N : ℕ∞
hg : ContDiffOn 𝕜 N g t
hf : ContDiffOn 𝕜 N f s
hn : ↑n ≤ N
ht : UniqueDiffOn 𝕜 t
hs : UniqueDiffOn 𝕜 s
hst : MapsTo f s t
hx : x ∈ s
C D : ℝ
hC : ∀ (i : ℕ), i ≤ n → ‖iteratedFDerivWithin 𝕜 i g t (f x)‖ ≤ C
Fu : Type (max uF uG) := ULift F
Gu : Type (max uF uG) := ULift G
isoF : Fu ≃ₗᵢ[𝕜] F
isoG : Gu ≃ₗᵢ[𝕜] G
fu : E → Fu := ↑(LinearIsometryEquiv.symm isoF) ∘ f
gu : Fu → Gu := ↑(LinearIsometryEquiv.symm isoG) ∘ g ∘ ↑isoF
tu : Set Fu := ↑isoF ⁻¹' t
htu : UniqueDiffOn 𝕜 tu
hstu : MapsTo fu s tu
Ffu : ↑isoF (fu x) = f x
hfu : ContDiffOn 𝕜 (↑n) fu s
hgu : ContDiffOn 𝕜 (↑n) gu tu
Nfu : ∀ (i : ℕ), ‖iteratedFDerivWithin 𝕜 i fu s x‖ = ‖iteratedFDerivWithin 𝕜 i f s x‖
hD : ∀ (i : ℕ), 1 ≤ i → i ≤ n → ‖iteratedFDerivWithin 𝕜 i (↑(LinearIsometryEquiv.symm isoF) ∘ f) s x‖ ≤ D ^ i
Ngu : ∀ (i : ℕ), ‖iteratedFDerivWithin 𝕜 i gu tu (fu x)‖ = ‖iteratedFDerivWithin 𝕜 i g t (f x)‖
⊢ ‖iteratedFDerivWithin 𝕜 n (g ∘ f) s x‖ ≤ ↑n ! * C * D ^ n State After: 𝕜 : Type u_1
inst✝¹⁰ : NontriviallyNormedField 𝕜
D✝ : Type uD
inst✝⁹ : NormedAddCommGroup D✝
inst✝⁸ : NormedSpace 𝕜 D✝
E : Type uE
inst✝⁷ : NormedAddCommGroup E
inst✝⁶ : NormedSpace 𝕜 E
F : Type uF
inst✝⁵ : NormedAddCommGroup F
inst✝⁴ : NormedSpace 𝕜 F
G : Type uG
inst✝³ : NormedAddCommGroup G
inst✝² : NormedSpace 𝕜 G
X : Type ?u.4322453
inst✝¹ : NormedAddCommGroup X
inst✝ : NormedSpace 𝕜 X
s✝ s₁ t✝ u : Set E
f✝ f₁ : E → F
g✝ : F → G
x✝ x₀ : E
c : F
b : E × F → G
m n✝ : ℕ∞
p : E → FormalMultilinearSeries 𝕜 E F
g : F → G
f : E → F
n : ℕ
s : Set E
t : Set F
x : E
N : ℕ∞
hg : ContDiffOn 𝕜 N g t
hf : ContDiffOn 𝕜 N f s
hn : ↑n ≤ N
ht : UniqueDiffOn 𝕜 t
hs : UniqueDiffOn 𝕜 s
hst : MapsTo f s t
hx : x ∈ s
C D : ℝ
Fu : Type (max uF uG) := ULift F
Gu : Type (max uF uG) := ULift G
isoF : Fu ≃ₗᵢ[𝕜] F
isoG : Gu ≃ₗᵢ[𝕜] G
fu : E → Fu := ↑(LinearIsometryEquiv.symm isoF) ∘ f
gu : Fu → Gu := ↑(LinearIsometryEquiv.symm isoG) ∘ g ∘ ↑isoF
tu : Set Fu := ↑isoF ⁻¹' t
htu : UniqueDiffOn 𝕜 tu
hstu : MapsTo fu s tu
Ffu : ↑isoF (fu x) = f x
hfu : ContDiffOn 𝕜 (↑n) fu s
hgu : ContDiffOn 𝕜 (↑n) gu tu
Nfu : ∀ (i : ℕ), ‖iteratedFDerivWithin 𝕜 i fu s x‖ = ‖iteratedFDerivWithin 𝕜 i f s x‖
hD : ∀ (i : ℕ), 1 ≤ i → i ≤ n → ‖iteratedFDerivWithin 𝕜 i (↑(LinearIsometryEquiv.symm isoF) ∘ f) s x‖ ≤ D ^ i
Ngu : ∀ (i : ℕ), ‖iteratedFDerivWithin 𝕜 i gu tu (fu x)‖ = ‖iteratedFDerivWithin 𝕜 i g t (f x)‖
hC :
∀ (i : ℕ),
i ≤ n →
‖iteratedFDerivWithin 𝕜 i (↑(LinearIsometryEquiv.symm isoG) ∘ g ∘ ↑isoF) (↑isoF ⁻¹' t)
((↑(LinearIsometryEquiv.symm isoF) ∘ f) x)‖ ≤
C
⊢ ‖iteratedFDerivWithin 𝕜 n (g ∘ f) s x‖ ≤ ↑n ! * C * D ^ n Tactic: simp_rw [← Ngu] at hC State Before: 𝕜 : Type u_1
inst✝¹⁰ : NontriviallyNormedField 𝕜
D✝ : Type uD
inst✝⁹ : NormedAddCommGroup D✝
inst✝⁸ : NormedSpace 𝕜 D✝
E : Type uE
inst✝⁷ : NormedAddCommGroup E
inst✝⁶ : NormedSpace 𝕜 E
F : Type uF
inst✝⁵ : NormedAddCommGroup F
inst✝⁴ : NormedSpace 𝕜 F
G : Type uG
inst✝³ : NormedAddCommGroup G
inst✝² : NormedSpace 𝕜 G
X : Type ?u.4322453
inst✝¹ : NormedAddCommGroup X
inst✝ : NormedSpace 𝕜 X
s✝ s₁ t✝ u : Set E
f✝ f₁ : E → F
g✝ : F → G
x✝ x₀ : E
c : F
b : E × F → G
m n✝ : ℕ∞
p : E → FormalMultilinearSeries 𝕜 E F
g : F → G
f : E → F
n : ℕ
s : Set E
t : Set F
x : E
N : ℕ∞
hg : ContDiffOn 𝕜 N g t
hf : ContDiffOn 𝕜 N f s
hn : ↑n ≤ N
ht : UniqueDiffOn 𝕜 t
hs : UniqueDiffOn 𝕜 s
hst : MapsTo f s t
hx : x ∈ s
C D : ℝ
Fu : Type (max uF uG) := ULift F
Gu : Type (max uF uG) := ULift G
isoF : Fu ≃ₗᵢ[𝕜] F
isoG : Gu ≃ₗᵢ[𝕜] G
fu : E → Fu := ↑(LinearIsometryEquiv.symm isoF) ∘ f
gu : Fu → Gu := ↑(LinearIsometryEquiv.symm isoG) ∘ g ∘ ↑isoF
tu : Set Fu := ↑isoF ⁻¹' t
htu : UniqueDiffOn 𝕜 tu
hstu : MapsTo fu s tu
Ffu : ↑isoF (fu x) = f x
hfu : ContDiffOn 𝕜 (↑n) fu s
hgu : ContDiffOn 𝕜 (↑n) gu tu
Nfu : ∀ (i : ℕ), ‖iteratedFDerivWithin 𝕜 i fu s x‖ = ‖iteratedFDerivWithin 𝕜 i f s x‖
hD : ∀ (i : ℕ), 1 ≤ i → i ≤ n → ‖iteratedFDerivWithin 𝕜 i (↑(LinearIsometryEquiv.symm isoF) ∘ f) s x‖ ≤ D ^ i
Ngu : ∀ (i : ℕ), ‖iteratedFDerivWithin 𝕜 i gu tu (fu x)‖ = ‖iteratedFDerivWithin 𝕜 i g t (f x)‖
hC :
∀ (i : ℕ),
i ≤ n →
‖iteratedFDerivWithin 𝕜 i (↑(LinearIsometryEquiv.symm isoG) ∘ g ∘ ↑isoF) (↑isoF ⁻¹' t)
((↑(LinearIsometryEquiv.symm isoF) ∘ f) x)‖ ≤
C
⊢ ‖iteratedFDerivWithin 𝕜 n (g ∘ f) s x‖ ≤ ↑n ! * C * D ^ n State After: 𝕜 : Type u_1
inst✝¹⁰ : NontriviallyNormedField 𝕜
D✝ : Type uD
inst✝⁹ : NormedAddCommGroup D✝
inst✝⁸ : NormedSpace 𝕜 D✝
E : Type uE
inst✝⁷ : NormedAddCommGroup E
inst✝⁶ : NormedSpace 𝕜 E
F : Type uF
inst✝⁵ : NormedAddCommGroup F
inst✝⁴ : NormedSpace 𝕜 F
G : Type uG
inst✝³ : NormedAddCommGroup G
inst✝² : NormedSpace 𝕜 G
X : Type ?u.4322453
inst✝¹ : NormedAddCommGroup X
inst✝ : NormedSpace 𝕜 X
s✝ s₁ t✝ u : Set E
f✝ f₁ : E → F
g✝ : F → G
x✝ x₀ : E
c : F
b : E × F → G
m n✝ : ℕ∞
p : E → FormalMultilinearSeries 𝕜 E F
g : F → G
f : E → F
n : ℕ
s : Set E
t : Set F
x : E
N : ℕ∞
hg : ContDiffOn 𝕜 N g t
hf : ContDiffOn 𝕜 N f s
hn : ↑n ≤ N
ht : UniqueDiffOn 𝕜 t
hs : UniqueDiffOn 𝕜 s
hst : MapsTo f s t
hx : x ∈ s
C D : ℝ
Fu : Type (max uF uG) := ULift F
Gu : Type (max uF uG) := ULift G
isoF : Fu ≃ₗᵢ[𝕜] F
isoG : Gu ≃ₗᵢ[𝕜] G
fu : E → Fu := ↑(LinearIsometryEquiv.symm isoF) ∘ f
gu : Fu → Gu := ↑(LinearIsometryEquiv.symm isoG) ∘ g ∘ ↑isoF
tu : Set Fu := ↑isoF ⁻¹' t
htu : UniqueDiffOn 𝕜 tu
hstu : MapsTo fu s tu
Ffu : ↑isoF (fu x) = f x
hfu : ContDiffOn 𝕜 (↑n) fu s
hgu : ContDiffOn 𝕜 (↑n) gu tu
Nfu : ∀ (i : ℕ), ‖iteratedFDerivWithin 𝕜 i fu s x‖ = ‖iteratedFDerivWithin 𝕜 i f s x‖
hD : ∀ (i : ℕ), 1 ≤ i → i ≤ n → ‖iteratedFDerivWithin 𝕜 i (↑(LinearIsometryEquiv.symm isoF) ∘ f) s x‖ ≤ D ^ i
Ngu : ∀ (i : ℕ), ‖iteratedFDerivWithin 𝕜 i gu tu (fu x)‖ = ‖iteratedFDerivWithin 𝕜 i g t (f x)‖
hC :
∀ (i : ℕ),
i ≤ n →
‖iteratedFDerivWithin 𝕜 i (↑(LinearIsometryEquiv.symm isoG) ∘ g ∘ ↑isoF) (↑isoF ⁻¹' t)
((↑(LinearIsometryEquiv.symm isoF) ∘ f) x)‖ ≤
C
Nfgu : ‖iteratedFDerivWithin 𝕜 n (g ∘ f) s x‖ = ‖iteratedFDerivWithin 𝕜 n (gu ∘ fu) s x‖
⊢ ‖iteratedFDerivWithin 𝕜 n (g ∘ f) s x‖ ≤ ↑n ! * C * D ^ n Tactic: have Nfgu :
‖iteratedFDerivWithin 𝕜 n (g ∘ f) s x‖ = ‖iteratedFDerivWithin 𝕜 n (gu ∘ fu) s x‖ := by
have : gu ∘ fu = isoG.symm ∘ g ∘ f := by
ext x
simp only [comp_apply, LinearIsometryEquiv.map_eq_iff, LinearIsometryEquiv.apply_symm_apply]
rw [this, LinearIsometryEquiv.norm_iteratedFDerivWithin_comp_left _ _ hs hx] State Before: 𝕜 : Type u_1
inst✝¹⁰ : NontriviallyNormedField 𝕜
D✝ : Type uD
inst✝⁹ : NormedAddCommGroup D✝
inst✝⁸ : NormedSpace 𝕜 D✝
E : Type uE
inst✝⁷ : NormedAddCommGroup E
inst✝⁶ : NormedSpace 𝕜 E
F : Type uF
inst✝⁵ : NormedAddCommGroup F
inst✝⁴ : NormedSpace 𝕜 F
G : Type uG
inst✝³ : NormedAddCommGroup G
inst✝² : NormedSpace 𝕜 G
X : Type ?u.4322453
inst✝¹ : NormedAddCommGroup X
inst✝ : NormedSpace 𝕜 X
s✝ s₁ t✝ u : Set E
f✝ f₁ : E → F
g✝ : F → G
x✝ x₀ : E
c : F
b : E × F → G
m n✝ : ℕ∞
p : E → FormalMultilinearSeries 𝕜 E F
g : F → G
f : E → F
n : ℕ
s : Set E
t : Set F
x : E
N : ℕ∞
hg : ContDiffOn 𝕜 N g t
hf : ContDiffOn 𝕜 N f s
hn : ↑n ≤ N
ht : UniqueDiffOn 𝕜 t
hs : UniqueDiffOn 𝕜 s
hst : MapsTo f s t
hx : x ∈ s
C D : ℝ
Fu : Type (max uF uG) := ULift F
Gu : Type (max uF uG) := ULift G
isoF : Fu ≃ₗᵢ[𝕜] F
isoG : Gu ≃ₗᵢ[𝕜] G
fu : E → Fu := ↑(LinearIsometryEquiv.symm isoF) ∘ f
gu : Fu → Gu := ↑(LinearIsometryEquiv.symm isoG) ∘ g ∘ ↑isoF
tu : Set Fu := ↑isoF ⁻¹' t
htu : UniqueDiffOn 𝕜 tu
hstu : MapsTo fu s tu
Ffu : ↑isoF (fu x) = f x
hfu : ContDiffOn 𝕜 (↑n) fu s
hgu : ContDiffOn 𝕜 (↑n) gu tu
Nfu : ∀ (i : ℕ), ‖iteratedFDerivWithin 𝕜 i fu s x‖ = ‖iteratedFDerivWithin 𝕜 i f s x‖
hD : ∀ (i : ℕ), 1 ≤ i → i ≤ n → ‖iteratedFDerivWithin 𝕜 i (↑(LinearIsometryEquiv.symm isoF) ∘ f) s x‖ ≤ D ^ i
Ngu : ∀ (i : ℕ), ‖iteratedFDerivWithin 𝕜 i gu tu (fu x)‖ = ‖iteratedFDerivWithin 𝕜 i g t (f x)‖
hC :
∀ (i : ℕ),
i ≤ n →
‖iteratedFDerivWithin 𝕜 i (↑(LinearIsometryEquiv.symm isoG) ∘ g ∘ ↑isoF) (↑isoF ⁻¹' t)
((↑(LinearIsometryEquiv.symm isoF) ∘ f) x)‖ ≤
C
Nfgu : ‖iteratedFDerivWithin 𝕜 n (g ∘ f) s x‖ = ‖iteratedFDerivWithin 𝕜 n (gu ∘ fu) s x‖
⊢ ‖iteratedFDerivWithin 𝕜 n (g ∘ f) s x‖ ≤ ↑n ! * C * D ^ n State After: 𝕜 : Type u_1
inst✝¹⁰ : NontriviallyNormedField 𝕜
D✝ : Type uD
inst✝⁹ : NormedAddCommGroup D✝
inst✝⁸ : NormedSpace 𝕜 D✝
E : Type uE
inst✝⁷ : NormedAddCommGroup E
inst✝⁶ : NormedSpace 𝕜 E
F : Type uF
inst✝⁵ : NormedAddCommGroup F
inst✝⁴ : NormedSpace 𝕜 F
G : Type uG
inst✝³ : NormedAddCommGroup G
inst✝² : NormedSpace 𝕜 G
X : Type ?u.4322453
inst✝¹ : NormedAddCommGroup X
inst✝ : NormedSpace 𝕜 X
s✝ s₁ t✝ u : Set E
f✝ f₁ : E → F
g✝ : F → G
x✝ x₀ : E
c : F
b : E × F → G
m n✝ : ℕ∞
p : E → FormalMultilinearSeries 𝕜 E F
g : F → G
f : E → F
n : ℕ
s : Set E
t : Set F
x : E
N : ℕ∞
hg : ContDiffOn 𝕜 N g t
hf : ContDiffOn 𝕜 N f s
hn : ↑n ≤ N
ht : UniqueDiffOn 𝕜 t
hs : UniqueDiffOn 𝕜 s
hst : MapsTo f s t
hx : x ∈ s
C D : ℝ
Fu : Type (max uF uG) := ULift F
Gu : Type (max uF uG) := ULift G
isoF : Fu ≃ₗᵢ[𝕜] F
isoG : Gu ≃ₗᵢ[𝕜] G
fu : E → Fu := ↑(LinearIsometryEquiv.symm isoF) ∘ f
gu : Fu → Gu := ↑(LinearIsometryEquiv.symm isoG) ∘ g ∘ ↑isoF
tu : Set Fu := ↑isoF ⁻¹' t
htu : UniqueDiffOn 𝕜 tu
hstu : MapsTo fu s tu
Ffu : ↑isoF (fu x) = f x
hfu : ContDiffOn 𝕜 (↑n) fu s
hgu : ContDiffOn 𝕜 (↑n) gu tu
Nfu : ∀ (i : ℕ), ‖iteratedFDerivWithin 𝕜 i fu s x‖ = ‖iteratedFDerivWithin 𝕜 i f s x‖
hD : ∀ (i : ℕ), 1 ≤ i → i ≤ n → ‖iteratedFDerivWithin 𝕜 i (↑(LinearIsometryEquiv.symm isoF) ∘ f) s x‖ ≤ D ^ i
Ngu : ∀ (i : ℕ), ‖iteratedFDerivWithin 𝕜 i gu tu (fu x)‖ = ‖iteratedFDerivWithin 𝕜 i g t (f x)‖
hC :
∀ (i : ℕ),
i ≤ n →
‖iteratedFDerivWithin 𝕜 i (↑(LinearIsometryEquiv.symm isoG) ∘ g ∘ ↑isoF) (↑isoF ⁻¹' t)
((↑(LinearIsometryEquiv.symm isoF) ∘ f) x)‖ ≤
C
Nfgu : ‖iteratedFDerivWithin 𝕜 n (g ∘ f) s x‖ = ‖iteratedFDerivWithin 𝕜 n (gu ∘ fu) s x‖
⊢ ‖iteratedFDerivWithin 𝕜 n (gu ∘ fu) s x‖ ≤ ↑n ! * C * D ^ n Tactic: rw [Nfgu] State Before: 𝕜 : Type u_1
inst✝¹⁰ : NontriviallyNormedField 𝕜
D✝ : Type uD
inst✝⁹ : NormedAddCommGroup D✝
inst✝⁸ : NormedSpace 𝕜 D✝
E : Type uE
inst✝⁷ : NormedAddCommGroup E
inst✝⁶ : NormedSpace 𝕜 E
F : Type uF
inst✝⁵ : NormedAddCommGroup F
inst✝⁴ : NormedSpace 𝕜 F
G : Type uG
inst✝³ : NormedAddCommGroup G
inst✝² : NormedSpace 𝕜 G
X : Type ?u.4322453
inst✝¹ : NormedAddCommGroup X
inst✝ : NormedSpace 𝕜 X
s✝ s₁ t✝ u : Set E
f✝ f₁ : E → F
g✝ : F → G
x✝ x₀ : E
c : F
b : E × F → G
m n✝ : ℕ∞
p : E → FormalMultilinearSeries 𝕜 E F
g : F → G
f : E → F
n : ℕ
s : Set E
t : Set F
x : E
N : ℕ∞
hg : ContDiffOn 𝕜 N g t
hf : ContDiffOn 𝕜 N f s
hn : ↑n ≤ N
ht : UniqueDiffOn 𝕜 t
hs : UniqueDiffOn 𝕜 s
hst : MapsTo f s t
hx : x ∈ s
C D : ℝ
Fu : Type (max uF uG) := ULift F
Gu : Type (max uF uG) := ULift G
isoF : Fu ≃ₗᵢ[𝕜] F
isoG : Gu ≃ₗᵢ[𝕜] G
fu : E → Fu := ↑(LinearIsometryEquiv.symm isoF) ∘ f
gu : Fu → Gu := ↑(LinearIsometryEquiv.symm isoG) ∘ g ∘ ↑isoF
tu : Set Fu := ↑isoF ⁻¹' t
htu : UniqueDiffOn 𝕜 tu
hstu : MapsTo fu s tu
Ffu : ↑isoF (fu x) = f x
hfu : ContDiffOn 𝕜 (↑n) fu s
hgu : ContDiffOn 𝕜 (↑n) gu tu
Nfu : ∀ (i : ℕ), ‖iteratedFDerivWithin 𝕜 i fu s x‖ = ‖iteratedFDerivWithin 𝕜 i f s x‖
hD : ∀ (i : ℕ), 1 ≤ i → i ≤ n → ‖iteratedFDerivWithin 𝕜 i (↑(LinearIsometryEquiv.symm isoF) ∘ f) s x‖ ≤ D ^ i
Ngu : ∀ (i : ℕ), ‖iteratedFDerivWithin 𝕜 i gu tu (fu x)‖ = ‖iteratedFDerivWithin 𝕜 i g t (f x)‖
hC :
∀ (i : ℕ),
i ≤ n →
‖iteratedFDerivWithin 𝕜 i (↑(LinearIsometryEquiv.symm isoG) ∘ g ∘ ↑isoF) (↑isoF ⁻¹' t)
((↑(LinearIsometryEquiv.symm isoF) ∘ f) x)‖ ≤
C
Nfgu : ‖iteratedFDerivWithin 𝕜 n (g ∘ f) s x‖ = ‖iteratedFDerivWithin 𝕜 n (gu ∘ fu) s x‖
⊢ ‖iteratedFDerivWithin 𝕜 n (gu ∘ fu) s x‖ ≤ ↑n ! * C * D ^ n State After: no goals Tactic: exact norm_iteratedFDerivWithin_comp_le_aux hgu hfu htu hs hstu hx hC hD State Before: 𝕜 : Type u_1
inst✝¹⁰ : NontriviallyNormedField 𝕜
D✝ : Type uD
inst✝⁹ : NormedAddCommGroup D✝
inst✝⁸ : NormedSpace 𝕜 D✝
E : Type uE
inst✝⁷ : NormedAddCommGroup E
inst✝⁶ : NormedSpace 𝕜 E
F : Type uF
inst✝⁵ : NormedAddCommGroup F
inst✝⁴ : NormedSpace 𝕜 F
G : Type uG
inst✝³ : NormedAddCommGroup G
inst✝² : NormedSpace 𝕜 G
X : Type ?u.4322453
inst✝¹ : NormedAddCommGroup X
inst✝ : NormedSpace 𝕜 X
s✝ s₁ t✝ u : Set E
f✝ f₁ : E → F
g✝ : F → G
x✝ x₀ : E
c : F
b : E × F → G
m n✝ : ℕ∞
p : E → FormalMultilinearSeries 𝕜 E F
g : F → G
f : E → F
n : ℕ
s : Set E
t : Set F
x : E
N : ℕ∞
hg : ContDiffOn 𝕜 N g t
hf : ContDiffOn 𝕜 N f s
hn : ↑n ≤ N
ht : UniqueDiffOn 𝕜 t
hs : UniqueDiffOn 𝕜 s
hst : MapsTo f s t
hx : x ∈ s
C D : ℝ
hC : ∀ (i : ℕ), i ≤ n → ‖iteratedFDerivWithin 𝕜 i g t (f x)‖ ≤ C
hD : ∀ (i : ℕ), 1 ≤ i → i ≤ n → ‖iteratedFDerivWithin 𝕜 i f s x‖ ≤ D ^ i
Fu : Type (max uF uG) := ULift F
Gu : Type (max uF uG) := ULift G
isoF : Fu ≃ₗᵢ[𝕜] F
isoG : Gu ≃ₗᵢ[𝕜] G
fu : E → Fu := ↑(LinearIsometryEquiv.symm isoF) ∘ f
gu : Fu → Gu := ↑(LinearIsometryEquiv.symm isoG) ∘ g ∘ ↑isoF
tu : Set Fu := ↑isoF ⁻¹' t
htu : UniqueDiffOn 𝕜 tu
y : E
hy : y ∈ s
⊢ fu y ∈ tu State After: no goals Tactic: simpa only [mem_preimage, comp_apply, LinearIsometryEquiv.apply_symm_apply] using hst hy State Before: 𝕜 : Type u_1
inst✝¹⁰ : NontriviallyNormedField 𝕜
D✝ : Type uD
inst✝⁹ : NormedAddCommGroup D✝
inst✝⁸ : NormedSpace 𝕜 D✝
E : Type uE
inst✝⁷ : NormedAddCommGroup E
inst✝⁶ : NormedSpace 𝕜 E
F : Type uF
inst✝⁵ : NormedAddCommGroup F
inst✝⁴ : NormedSpace 𝕜 F
G : Type uG
inst✝³ : NormedAddCommGroup G
inst✝² : NormedSpace 𝕜 G
X : Type ?u.4322453
inst✝¹ : NormedAddCommGroup X
inst✝ : NormedSpace 𝕜 X
s✝ s₁ t✝ u : Set E
f✝ f₁ : E → F
g✝ : F → G
x✝ x₀ : E
c : F
b : E × F → G
m n✝ : ℕ∞
p : E → FormalMultilinearSeries 𝕜 E F
g : F → G
f : E → F
n : ℕ
s : Set E
t : Set F
x : E
N : ℕ∞
hg : ContDiffOn 𝕜 N g t
hf : ContDiffOn 𝕜 N f s
hn : ↑n ≤ N
ht : UniqueDiffOn 𝕜 t
hs : UniqueDiffOn 𝕜 s
hst : MapsTo f s t
hx : x ∈ s
C D : ℝ
hC : ∀ (i : ℕ), i ≤ n → ‖iteratedFDerivWithin 𝕜 i g t (f x)‖ ≤ C
hD : ∀ (i : ℕ), 1 ≤ i → i ≤ n → ‖iteratedFDerivWithin 𝕜 i f s x‖ ≤ D ^ i
Fu : Type (max uF uG) := ULift F
Gu : Type (max uF uG) := ULift G
isoF : Fu ≃ₗᵢ[𝕜] F
isoG : Gu ≃ₗᵢ[𝕜] G
fu : E → Fu := ↑(LinearIsometryEquiv.symm isoF) ∘ f
gu : Fu → Gu := ↑(LinearIsometryEquiv.symm isoG) ∘ g ∘ ↑isoF
tu : Set Fu := ↑isoF ⁻¹' t
htu : UniqueDiffOn 𝕜 tu
hstu : MapsTo fu s tu
⊢ ↑isoF (fu x) = f x State After: no goals Tactic: simp only [comp_apply, LinearIsometryEquiv.apply_symm_apply] State Before: 𝕜 : Type u_1
inst✝¹⁰ : NontriviallyNormedField 𝕜
D✝ : Type uD
inst✝⁹ : NormedAddCommGroup D✝
inst✝⁸ : NormedSpace 𝕜 D✝
E : Type uE
inst✝⁷ : NormedAddCommGroup E
inst✝⁶ : NormedSpace 𝕜 E
F : Type uF
inst✝⁵ : NormedAddCommGroup F
inst✝⁴ : NormedSpace 𝕜 F
G : Type uG
inst✝³ : NormedAddCommGroup G
inst✝² : NormedSpace 𝕜 G
X : Type ?u.4322453
inst✝¹ : NormedAddCommGroup X
inst✝ : NormedSpace 𝕜 X
s✝ s₁ t✝ u : Set E
f✝ f₁ : E → F
g✝ : F → G
x✝ x₀ : E
c : F
b : E × F → G
m n✝ : ℕ∞
p : E → FormalMultilinearSeries 𝕜 E F
g : F → G
f : E → F
n : ℕ
s : Set E
t : Set F
x : E
N : ℕ∞
hg : ContDiffOn 𝕜 N g t
hf : ContDiffOn 𝕜 N f s
hn : ↑n ≤ N
ht : UniqueDiffOn 𝕜 t
hs : UniqueDiffOn 𝕜 s
hst : MapsTo f s t
hx : x ∈ s
C D : ℝ
hC : ∀ (i : ℕ), i ≤ n → ‖iteratedFDerivWithin 𝕜 i g t (f x)‖ ≤ C
hD : ∀ (i : ℕ), 1 ≤ i → i ≤ n → ‖iteratedFDerivWithin 𝕜 i f s x‖ ≤ D ^ i
Fu : Type (max uF uG) := ULift F
Gu : Type (max uF uG) := ULift G
isoF : Fu ≃ₗᵢ[𝕜] F
isoG : Gu ≃ₗᵢ[𝕜] G
fu : E → Fu := ↑(LinearIsometryEquiv.symm isoF) ∘ f
gu : Fu → Gu := ↑(LinearIsometryEquiv.symm isoG) ∘ g ∘ ↑isoF
tu : Set Fu := ↑isoF ⁻¹' t
htu : UniqueDiffOn 𝕜 tu
hstu : MapsTo fu s tu
Ffu : ↑isoF (fu x) = f x
hfu : ContDiffOn 𝕜 (↑n) fu s
hgu : ContDiffOn 𝕜 (↑n) gu tu
i : ℕ
⊢ ‖iteratedFDerivWithin 𝕜 i fu s x‖ = ‖iteratedFDerivWithin 𝕜 i f s x‖ State After: no goals Tactic: rw [LinearIsometryEquiv.norm_iteratedFDerivWithin_comp_left _ _ hs hx] State Before: 𝕜 : Type u_1
inst✝¹⁰ : NontriviallyNormedField 𝕜
D✝ : Type uD
inst✝⁹ : NormedAddCommGroup D✝
inst✝⁸ : NormedSpace 𝕜 D✝
E : Type uE
inst✝⁷ : NormedAddCommGroup E
inst✝⁶ : NormedSpace 𝕜 E
F : Type uF
inst✝⁵ : NormedAddCommGroup F
inst✝⁴ : NormedSpace 𝕜 F
G : Type uG
inst✝³ : NormedAddCommGroup G
inst✝² : NormedSpace 𝕜 G
X : Type ?u.4322453
inst✝¹ : NormedAddCommGroup X
inst✝ : NormedSpace 𝕜 X
s✝ s₁ t✝ u : Set E
f✝ f₁ : E → F
g✝ : F → G
x✝ x₀ : E
c : F
b : E × F → G
m n✝ : ℕ∞
p : E → FormalMultilinearSeries 𝕜 E F
g : F → G
f : E → F
n : ℕ
s : Set E
t : Set F
x : E
N : ℕ∞
hg : ContDiffOn 𝕜 N g t
hf : ContDiffOn 𝕜 N f s
hn : ↑n ≤ N
ht : UniqueDiffOn 𝕜 t
hs : UniqueDiffOn 𝕜 s
hst : MapsTo f s t
hx : x ∈ s
C D : ℝ
hC : ∀ (i : ℕ), i ≤ n → ‖iteratedFDerivWithin 𝕜 i g t (f x)‖ ≤ C
Fu : Type (max uF uG) := ULift F
Gu : Type (max uF uG) := ULift G
isoF : Fu ≃ₗᵢ[𝕜] F
isoG : Gu ≃ₗᵢ[𝕜] G
fu : E → Fu := ↑(LinearIsometryEquiv.symm isoF) ∘ f
gu : Fu → Gu := ↑(LinearIsometryEquiv.symm isoG) ∘ g ∘ ↑isoF
tu : Set Fu := ↑isoF ⁻¹' t
htu : UniqueDiffOn 𝕜 tu
hstu : MapsTo fu s tu
Ffu : ↑isoF (fu x) = f x
hfu : ContDiffOn 𝕜 (↑n) fu s
hgu : ContDiffOn 𝕜 (↑n) gu tu
Nfu : ∀ (i : ℕ), ‖iteratedFDerivWithin 𝕜 i fu s x‖ = ‖iteratedFDerivWithin 𝕜 i f s x‖
hD : ∀ (i : ℕ), 1 ≤ i → i ≤ n → ‖iteratedFDerivWithin 𝕜 i (↑(LinearIsometryEquiv.symm isoF) ∘ f) s x‖ ≤ D ^ i
i : ℕ
⊢ ‖iteratedFDerivWithin 𝕜 i gu tu (fu x)‖ = ‖iteratedFDerivWithin 𝕜 i g t (f x)‖ State After: 𝕜 : Type u_1
inst✝¹⁰ : NontriviallyNormedField 𝕜
D✝ : Type uD
inst✝⁹ : NormedAddCommGroup D✝
inst✝⁸ : NormedSpace 𝕜 D✝
E : Type uE
inst✝⁷ : NormedAddCommGroup E
inst✝⁶ : NormedSpace 𝕜 E
F : Type uF
inst✝⁵ : NormedAddCommGroup F
inst✝⁴ : NormedSpace 𝕜 F
G : Type uG
inst✝³ : NormedAddCommGroup G
inst✝² : NormedSpace 𝕜 G
X : Type ?u.4322453
inst✝¹ : NormedAddCommGroup X
inst✝ : NormedSpace 𝕜 X
s✝ s₁ t✝ u : Set E
f✝ f₁ : E → F
g✝ : F → G
x✝ x₀ : E
c : F
b : E × F → G
m n✝ : ℕ∞
p : E → FormalMultilinearSeries 𝕜 E F
g : F → G
f : E → F
n : ℕ
s : Set E
t : Set F
x : E
N : ℕ∞
hg : ContDiffOn 𝕜 N g t
hf : ContDiffOn 𝕜 N f s
hn : ↑n ≤ N
ht : UniqueDiffOn 𝕜 t
hs : UniqueDiffOn 𝕜 s
hst : MapsTo f s t
hx : x ∈ s
C D : ℝ
hC : ∀ (i : ℕ), i ≤ n → ‖iteratedFDerivWithin 𝕜 i g t (f x)‖ ≤ C
Fu : Type (max uF uG) := ULift F
Gu : Type (max uF uG) := ULift G
isoF : Fu ≃ₗᵢ[𝕜] F
isoG : Gu ≃ₗᵢ[𝕜] G
fu : E → Fu := ↑(LinearIsometryEquiv.symm isoF) ∘ f
gu : Fu → Gu := ↑(LinearIsometryEquiv.symm isoG) ∘ g ∘ ↑isoF
tu : Set Fu := ↑isoF ⁻¹' t
htu : UniqueDiffOn 𝕜 tu
hstu : MapsTo fu s tu
Ffu : ↑isoF (fu x) = f x
hfu : ContDiffOn 𝕜 (↑n) fu s
hgu : ContDiffOn 𝕜 (↑n) gu tu
Nfu : ∀ (i : ℕ), ‖iteratedFDerivWithin 𝕜 i fu s x‖ = ‖iteratedFDerivWithin 𝕜 i f s x‖
hD : ∀ (i : ℕ), 1 ≤ i → i ≤ n → ‖iteratedFDerivWithin 𝕜 i (↑(LinearIsometryEquiv.symm isoF) ∘ f) s x‖ ≤ D ^ i
i : ℕ
⊢ ‖iteratedFDerivWithin 𝕜 i (g ∘ ↑isoF) tu (fu x)‖ = ‖iteratedFDerivWithin 𝕜 i g t (f x)‖ Tactic: rw [LinearIsometryEquiv.norm_iteratedFDerivWithin_comp_left _ _ htu (hstu hx)] State Before: 𝕜 : Type u_1
inst✝¹⁰ : NontriviallyNormedField 𝕜
D✝ : Type uD
inst✝⁹ : NormedAddCommGroup D✝
inst✝⁸ : NormedSpace 𝕜 D✝
E : Type uE
inst✝⁷ : NormedAddCommGroup E
inst✝⁶ : NormedSpace 𝕜 E
F : Type uF
inst✝⁵ : NormedAddCommGroup F
inst✝⁴ : NormedSpace 𝕜 F
G : Type uG
inst✝³ : NormedAddCommGroup G
inst✝² : NormedSpace 𝕜 G
X : Type ?u.4322453
inst✝¹ : NormedAddCommGroup X
inst✝ : NormedSpace 𝕜 X
s✝ s₁ t✝ u : Set E
f✝ f₁ : E → F
g✝ : F → G
x✝ x₀ : E
c : F
b : E × F → G
m n✝ : ℕ∞
p : E → FormalMultilinearSeries 𝕜 E F
g : F → G
f : E → F
n : ℕ
s : Set E
t : Set F
x : E
N : ℕ∞
hg : ContDiffOn 𝕜 N g t
hf : ContDiffOn 𝕜 N f s
hn : ↑n ≤ N
ht : UniqueDiffOn 𝕜 t
hs : UniqueDiffOn 𝕜 s
hst : MapsTo f s t
hx : x ∈ s
C D : ℝ
hC : ∀ (i : ℕ), i ≤ n → ‖iteratedFDerivWithin 𝕜 i g t (f x)‖ ≤ C
Fu : Type (max uF uG) := ULift F
Gu : Type (max uF uG) := ULift G
isoF : Fu ≃ₗᵢ[𝕜] F
isoG : Gu ≃ₗᵢ[𝕜] G
fu : E → Fu := ↑(LinearIsometryEquiv.symm isoF) ∘ f
gu : Fu → Gu := ↑(LinearIsometryEquiv.symm isoG) ∘ g ∘ ↑isoF
tu : Set Fu := ↑isoF ⁻¹' t
htu : UniqueDiffOn 𝕜 tu
hstu : MapsTo fu s tu
Ffu : ↑isoF (fu x) = f x
hfu : ContDiffOn 𝕜 (↑n) fu s
hgu : ContDiffOn 𝕜 (↑n) gu tu
Nfu : ∀ (i : ℕ), ‖iteratedFDerivWithin 𝕜 i fu s x‖ = ‖iteratedFDerivWithin 𝕜 i f s x‖
hD : ∀ (i : ℕ), 1 ≤ i → i ≤ n → ‖iteratedFDerivWithin 𝕜 i (↑(LinearIsometryEquiv.symm isoF) ∘ f) s x‖ ≤ D ^ i
i : ℕ
⊢ ‖iteratedFDerivWithin 𝕜 i (g ∘ ↑isoF) tu (fu x)‖ = ‖iteratedFDerivWithin 𝕜 i g t (f x)‖ State After: case hx
𝕜 : Type u_1
inst✝¹⁰ : NontriviallyNormedField 𝕜
D✝ : Type uD
inst✝⁹ : NormedAddCommGroup D✝
inst✝⁸ : NormedSpace 𝕜 D✝
E : Type uE
inst✝⁷ : NormedAddCommGroup E
inst✝⁶ : NormedSpace 𝕜 E
F : Type uF
inst✝⁵ : NormedAddCommGroup F
inst✝⁴ : NormedSpace 𝕜 F
G : Type uG
inst✝³ : NormedAddCommGroup G
inst✝² : NormedSpace 𝕜 G
X : Type ?u.4322453
inst✝¹ : NormedAddCommGroup X
inst✝ : NormedSpace 𝕜 X
s✝ s₁ t✝ u : Set E
f✝ f₁ : E → F
g✝ : F → G
x✝ x₀ : E
c : F
b : E × F → G
m n✝ : ℕ∞
p : E → FormalMultilinearSeries 𝕜 E F
g : F → G
f : E → F
n : ℕ
s : Set E
t : Set F
x : E
N : ℕ∞
hg : ContDiffOn 𝕜 N g t
hf : ContDiffOn 𝕜 N f s
hn : ↑n ≤ N
ht : UniqueDiffOn 𝕜 t
hs : UniqueDiffOn 𝕜 s
hst : MapsTo f s t
hx : x ∈ s
C D : ℝ
hC : ∀ (i : ℕ), i ≤ n → ‖iteratedFDerivWithin 𝕜 i g t (f x)‖ ≤ C
Fu : Type (max uF uG) := ULift F
Gu : Type (max uF uG) := ULift G
isoF : Fu ≃ₗᵢ[𝕜] F
isoG : Gu ≃ₗᵢ[𝕜] G
fu : E → Fu := ↑(LinearIsometryEquiv.symm isoF) ∘ f
gu : Fu → Gu := ↑(LinearIsometryEquiv.symm isoG) ∘ g ∘ ↑isoF
tu : Set Fu := ↑isoF ⁻¹' t
htu : UniqueDiffOn 𝕜 tu
hstu : MapsTo fu s tu
Ffu : ↑isoF (fu x) = f x
hfu : ContDiffOn 𝕜 (↑n) fu s
hgu : ContDiffOn 𝕜 (↑n) gu tu
Nfu : ∀ (i : ℕ), ‖iteratedFDerivWithin 𝕜 i fu s x‖ = ‖iteratedFDerivWithin 𝕜 i f s x‖
hD : ∀ (i : ℕ), 1 ≤ i → i ≤ n → ‖iteratedFDerivWithin 𝕜 i (↑(LinearIsometryEquiv.symm isoF) ∘ f) s x‖ ≤ D ^ i
i : ℕ
⊢ ↑isoF (fu x) ∈ t Tactic: rw [LinearIsometryEquiv.norm_iteratedFDerivWithin_comp_right _ _ ht, Ffu] State Before: case hx
𝕜 : Type u_1
inst✝¹⁰ : NontriviallyNormedField 𝕜
D✝ : Type uD
inst✝⁹ : NormedAddCommGroup D✝
inst✝⁸ : NormedSpace 𝕜 D✝
E : Type uE
inst✝⁷ : NormedAddCommGroup E
inst✝⁶ : NormedSpace 𝕜 E
F : Type uF
inst✝⁵ : NormedAddCommGroup F
inst✝⁴ : NormedSpace 𝕜 F
G : Type uG
inst✝³ : NormedAddCommGroup G
inst✝² : NormedSpace 𝕜 G
X : Type ?u.4322453
inst✝¹ : NormedAddCommGroup X
inst✝ : NormedSpace 𝕜 X
s✝ s₁ t✝ u : Set E
f✝ f₁ : E → F
g✝ : F → G
x✝ x₀ : E
c : F
b : E × F → G
m n✝ : ℕ∞
p : E → FormalMultilinearSeries 𝕜 E F
g : F → G
f : E → F
n : ℕ
s : Set E
t : Set F
x : E
N : ℕ∞
hg : ContDiffOn 𝕜 N g t
hf : ContDiffOn 𝕜 N f s
hn : ↑n ≤ N
ht : UniqueDiffOn 𝕜 t
hs : UniqueDiffOn 𝕜 s
hst : MapsTo f s t
hx : x ∈ s
C D : ℝ
hC : ∀ (i : ℕ), i ≤ n → ‖iteratedFDerivWithin 𝕜 i g t (f x)‖ ≤ C
Fu : Type (max uF uG) := ULift F
Gu : Type (max uF uG) := ULift G
isoF : Fu ≃ₗᵢ[𝕜] F
isoG : Gu ≃ₗᵢ[𝕜] G
fu : E → Fu := ↑(LinearIsometryEquiv.symm isoF) ∘ f
gu : Fu → Gu := ↑(LinearIsometryEquiv.symm isoG) ∘ g ∘ ↑isoF
tu : Set Fu := ↑isoF ⁻¹' t
htu : UniqueDiffOn 𝕜 tu
hstu : MapsTo fu s tu
Ffu : ↑isoF (fu x) = f x
hfu : ContDiffOn 𝕜 (↑n) fu s
hgu : ContDiffOn 𝕜 (↑n) gu tu
Nfu : ∀ (i : ℕ), ‖iteratedFDerivWithin 𝕜 i fu s x‖ = ‖iteratedFDerivWithin 𝕜 i f s x‖
hD : ∀ (i : ℕ), 1 ≤ i → i ≤ n → ‖iteratedFDerivWithin 𝕜 i (↑(LinearIsometryEquiv.symm isoF) ∘ f) s x‖ ≤ D ^ i
i : ℕ
⊢ ↑isoF (fu x) ∈ t State After: case hx
𝕜 : Type u_1
inst✝¹⁰ : NontriviallyNormedField 𝕜
D✝ : Type uD
inst✝⁹ : NormedAddCommGroup D✝
inst✝⁸ : NormedSpace 𝕜 D✝
E : Type uE
inst✝⁷ : NormedAddCommGroup E
inst✝⁶ : NormedSpace 𝕜 E
F : Type uF
inst✝⁵ : NormedAddCommGroup F
inst✝⁴ : NormedSpace 𝕜 F
G : Type uG
inst✝³ : NormedAddCommGroup G
inst✝² : NormedSpace 𝕜 G
X : Type ?u.4322453
inst✝¹ : NormedAddCommGroup X
inst✝ : NormedSpace 𝕜 X
s✝ s₁ t✝ u : Set E
f✝ f₁ : E → F
g✝ : F → G
x✝ x₀ : E
c : F
b : E × F → G
m n✝ : ℕ∞
p : E → FormalMultilinearSeries 𝕜 E F
g : F → G
f : E → F
n : ℕ
s : Set E
t : Set F
x : E
N : ℕ∞
hg : ContDiffOn 𝕜 N g t
hf : ContDiffOn 𝕜 N f s
hn : ↑n ≤ N
ht : UniqueDiffOn 𝕜 t
hs : UniqueDiffOn 𝕜 s
hst : MapsTo f s t
hx : x ∈ s
C D : ℝ
hC : ∀ (i : ℕ), i ≤ n → ‖iteratedFDerivWithin 𝕜 i g t (f x)‖ ≤ C
Fu : Type (max uF uG) := ULift F
Gu : Type (max uF uG) := ULift G
isoF : Fu ≃ₗᵢ[𝕜] F
isoG : Gu ≃ₗᵢ[𝕜] G
fu : E → Fu := ↑(LinearIsometryEquiv.symm isoF) ∘ f
gu : Fu → Gu := ↑(LinearIsometryEquiv.symm isoG) ∘ g ∘ ↑isoF
tu : Set Fu := ↑isoF ⁻¹' t
htu : UniqueDiffOn 𝕜 tu
hstu : MapsTo fu s tu
Ffu : ↑isoF (fu x) = f x
hfu : ContDiffOn 𝕜 (↑n) fu s
hgu : ContDiffOn 𝕜 (↑n) gu tu
Nfu : ∀ (i : ℕ), ‖iteratedFDerivWithin 𝕜 i fu s x‖ = ‖iteratedFDerivWithin 𝕜 i f s x‖
hD : ∀ (i : ℕ), 1 ≤ i → i ≤ n → ‖iteratedFDerivWithin 𝕜 i (↑(LinearIsometryEquiv.symm isoF) ∘ f) s x‖ ≤ D ^ i
i : ℕ
⊢ f x ∈ t Tactic: rw [Ffu] State Before: case hx
𝕜 : Type u_1
inst✝¹⁰ : NontriviallyNormedField 𝕜
D✝ : Type uD
inst✝⁹ : NormedAddCommGroup D✝
inst✝⁸ : NormedSpace 𝕜 D✝
E : Type uE
inst✝⁷ : NormedAddCommGroup E
inst✝⁶ : NormedSpace 𝕜 E
F : Type uF
inst✝⁵ : NormedAddCommGroup F
inst✝⁴ : NormedSpace 𝕜 F
G : Type uG
inst✝³ : NormedAddCommGroup G
inst✝² : NormedSpace 𝕜 G
X : Type ?u.4322453
inst✝¹ : NormedAddCommGroup X
inst✝ : NormedSpace 𝕜 X
s✝ s₁ t✝ u : Set E
f✝ f₁ : E → F
g✝ : F → G
x✝ x₀ : E
c : F
b : E × F → G
m n✝ : ℕ∞
p : E → FormalMultilinearSeries 𝕜 E F
g : F → G
f : E → F
n : ℕ
s : Set E
t : Set F
x : E
N : ℕ∞
hg : ContDiffOn 𝕜 N g t
hf : ContDiffOn 𝕜 N f s
hn : ↑n ≤ N
ht : UniqueDiffOn 𝕜 t
hs : UniqueDiffOn 𝕜 s
hst : MapsTo f s t
hx : x ∈ s
C D : ℝ
hC : ∀ (i : ℕ), i ≤ n → ‖iteratedFDerivWithin 𝕜 i g t (f x)‖ ≤ C
Fu : Type (max uF uG) := ULift F
Gu : Type (max uF uG) := ULift G
isoF : Fu ≃ₗᵢ[𝕜] F
isoG : Gu ≃ₗᵢ[𝕜] G
fu : E → Fu := ↑(LinearIsometryEquiv.symm isoF) ∘ f
gu : Fu → Gu := ↑(LinearIsometryEquiv.symm isoG) ∘ g ∘ ↑isoF
tu : Set Fu := ↑isoF ⁻¹' t
htu : UniqueDiffOn 𝕜 tu
hstu : MapsTo fu s tu
Ffu : ↑isoF (fu x) = f x
hfu : ContDiffOn 𝕜 (↑n) fu s
hgu : ContDiffOn 𝕜 (↑n) gu tu
Nfu : ∀ (i : ℕ), ‖iteratedFDerivWithin 𝕜 i fu s x‖ = ‖iteratedFDerivWithin 𝕜 i f s x‖
hD : ∀ (i : ℕ), 1 ≤ i → i ≤ n → ‖iteratedFDerivWithin 𝕜 i (↑(LinearIsometryEquiv.symm isoF) ∘ f) s x‖ ≤ D ^ i
i : ℕ
⊢ f x ∈ t State After: no goals Tactic: exact hst hx State Before: 𝕜 : Type u_1
inst✝¹⁰ : NontriviallyNormedField 𝕜
D✝ : Type uD
inst✝⁹ : NormedAddCommGroup D✝
inst✝⁸ : NormedSpace 𝕜 D✝
E : Type uE
inst✝⁷ : NormedAddCommGroup E
inst✝⁶ : NormedSpace 𝕜 E
F : Type uF
inst✝⁵ : NormedAddCommGroup F
inst✝⁴ : NormedSpace 𝕜 F
G : Type uG
inst✝³ : NormedAddCommGroup G
inst✝² : NormedSpace 𝕜 G
X : Type ?u.4322453
inst✝¹ : NormedAddCommGroup X
inst✝ : NormedSpace 𝕜 X
s✝ s₁ t✝ u : Set E
f✝ f₁ : E → F
g✝ : F → G
x✝ x₀ : E
c : F
b : E × F → G
m n✝ : ℕ∞
p : E → FormalMultilinearSeries 𝕜 E F
g : F → G
f : E → F
n : ℕ
s : Set E
t : Set F
x : E
N : ℕ∞
hg : ContDiffOn 𝕜 N g t
hf : ContDiffOn 𝕜 N f s
hn : ↑n ≤ N
ht : UniqueDiffOn 𝕜 t
hs : UniqueDiffOn 𝕜 s
hst : MapsTo f s t
hx : x ∈ s
C D : ℝ
Fu : Type (max uF uG) := ULift F
Gu : Type (max uF uG) := ULift G
isoF : Fu ≃ₗᵢ[𝕜] F
isoG : Gu ≃ₗᵢ[𝕜] G
fu : E → Fu := ↑(LinearIsometryEquiv.symm isoF) ∘ f
gu : Fu → Gu := ↑(LinearIsometryEquiv.symm isoG) ∘ g ∘ ↑isoF
tu : Set Fu := ↑isoF ⁻¹' t
htu : UniqueDiffOn 𝕜 tu
hstu : MapsTo fu s tu
Ffu : ↑isoF (fu x) = f x
hfu : ContDiffOn 𝕜 (↑n) fu s
hgu : ContDiffOn 𝕜 (↑n) gu tu
Nfu : ∀ (i : ℕ), ‖iteratedFDerivWithin 𝕜 i fu s x‖ = ‖iteratedFDerivWithin 𝕜 i f s x‖
hD : ∀ (i : ℕ), 1 ≤ i → i ≤ n → ‖iteratedFDerivWithin 𝕜 i (↑(LinearIsometryEquiv.symm isoF) ∘ f) s x‖ ≤ D ^ i
Ngu : ∀ (i : ℕ), ‖iteratedFDerivWithin 𝕜 i gu tu (fu x)‖ = ‖iteratedFDerivWithin 𝕜 i g t (f x)‖
hC :
∀ (i : ℕ),
i ≤ n →
‖iteratedFDerivWithin 𝕜 i (↑(LinearIsometryEquiv.symm isoG) ∘ g ∘ ↑isoF) (↑isoF ⁻¹' t)
((↑(LinearIsometryEquiv.symm isoF) ∘ f) x)‖ ≤
C
⊢ ‖iteratedFDerivWithin 𝕜 n (g ∘ f) s x‖ = ‖iteratedFDerivWithin 𝕜 n (gu ∘ fu) s x‖ State After: 𝕜 : Type u_1
inst✝¹⁰ : NontriviallyNormedField 𝕜
D✝ : Type uD
inst✝⁹ : NormedAddCommGroup D✝
inst✝⁸ : NormedSpace 𝕜 D✝
E : Type uE
inst✝⁷ : NormedAddCommGroup E
inst✝⁶ : NormedSpace 𝕜 E
F : Type uF
inst✝⁵ : NormedAddCommGroup F
inst✝⁴ : NormedSpace 𝕜 F
G : Type uG
inst✝³ : NormedAddCommGroup G
inst✝² : NormedSpace 𝕜 G
X : Type ?u.4322453
inst✝¹ : NormedAddCommGroup X
inst✝ : NormedSpace 𝕜 X
s✝ s₁ t✝ u : Set E
f✝ f₁ : E → F
g✝ : F → G
x✝ x₀ : E
c : F
b : E × F → G
m n✝ : ℕ∞
p : E → FormalMultilinearSeries 𝕜 E F
g : F → G
f : E → F
n : ℕ
s : Set E
t : Set F
x : E
N : ℕ∞
hg : ContDiffOn 𝕜 N g t
hf : ContDiffOn 𝕜 N f s
hn : ↑n ≤ N
ht : UniqueDiffOn 𝕜 t
hs : UniqueDiffOn 𝕜 s
hst : MapsTo f s t
hx : x ∈ s
C D : ℝ
Fu : Type (max uF uG) := ULift F
Gu : Type (max uF uG) := ULift G
isoF : Fu ≃ₗᵢ[𝕜] F
isoG : Gu ≃ₗᵢ[𝕜] G
fu : E → Fu := ↑(LinearIsometryEquiv.symm isoF) ∘ f
gu : Fu → Gu := ↑(LinearIsometryEquiv.symm isoG) ∘ g ∘ ↑isoF
tu : Set Fu := ↑isoF ⁻¹' t
htu : UniqueDiffOn 𝕜 tu
hstu : MapsTo fu s tu
Ffu : ↑isoF (fu x) = f x
hfu : ContDiffOn 𝕜 (↑n) fu s
hgu : ContDiffOn 𝕜 (↑n) gu tu
Nfu : ∀ (i : ℕ), ‖iteratedFDerivWithin 𝕜 i fu s x‖ = ‖iteratedFDerivWithin 𝕜 i f s x‖
hD : ∀ (i : ℕ), 1 ≤ i → i ≤ n → ‖iteratedFDerivWithin 𝕜 i (↑(LinearIsometryEquiv.symm isoF) ∘ f) s x‖ ≤ D ^ i
Ngu : ∀ (i : ℕ), ‖iteratedFDerivWithin 𝕜 i gu tu (fu x)‖ = ‖iteratedFDerivWithin 𝕜 i g t (f x)‖
hC :
∀ (i : ℕ),
i ≤ n →
‖iteratedFDerivWithin 𝕜 i (↑(LinearIsometryEquiv.symm isoG) ∘ g ∘ ↑isoF) (↑isoF ⁻¹' t)
((↑(LinearIsometryEquiv.symm isoF) ∘ f) x)‖ ≤
C
this : gu ∘ fu = ↑(LinearIsometryEquiv.symm isoG) ∘ g ∘ f
⊢ ‖iteratedFDerivWithin 𝕜 n (g ∘ f) s x‖ = ‖iteratedFDerivWithin 𝕜 n (gu ∘ fu) s x‖ Tactic: have : gu ∘ fu = isoG.symm ∘ g ∘ f := by
ext x
simp only [comp_apply, LinearIsometryEquiv.map_eq_iff, LinearIsometryEquiv.apply_symm_apply] State Before: 𝕜 : Type u_1
inst✝¹⁰ : NontriviallyNormedField 𝕜
D✝ : Type uD
inst✝⁹ : NormedAddCommGroup D✝
inst✝⁸ : NormedSpace 𝕜 D✝
E : Type uE
inst✝⁷ : NormedAddCommGroup E
inst✝⁶ : NormedSpace 𝕜 E
F : Type uF
inst✝⁵ : NormedAddCommGroup F
inst✝⁴ : NormedSpace 𝕜 F
G : Type uG
inst✝³ : NormedAddCommGroup G
inst✝² : NormedSpace 𝕜 G
X : Type ?u.4322453
inst✝¹ : NormedAddCommGroup X
inst✝ : NormedSpace 𝕜 X
s✝ s₁ t✝ u : Set E
f✝ f₁ : E → F
g✝ : F → G
x✝ x₀ : E
c : F
b : E × F → G
m n✝ : ℕ∞
p : E → FormalMultilinearSeries 𝕜 E F
g : F → G
f : E → F
n : ℕ
s : Set E
t : Set F
x : E
N : ℕ∞
hg : ContDiffOn 𝕜 N g t
hf : ContDiffOn 𝕜 N f s
hn : ↑n ≤ N
ht : UniqueDiffOn 𝕜 t
hs : UniqueDiffOn 𝕜 s
hst : MapsTo f s t
hx : x ∈ s
C D : ℝ
Fu : Type (max uF uG) := ULift F
Gu : Type (max uF uG) := ULift G
isoF : Fu ≃ₗᵢ[𝕜] F
isoG : Gu ≃ₗᵢ[𝕜] G
fu : E → Fu := ↑(LinearIsometryEquiv.symm isoF) ∘ f
gu : Fu → Gu := ↑(LinearIsometryEquiv.symm isoG) ∘ g ∘ ↑isoF
tu : Set Fu := ↑isoF ⁻¹' t
htu : UniqueDiffOn 𝕜 tu
hstu : MapsTo fu s tu
Ffu : ↑isoF (fu x) = f x
hfu : ContDiffOn 𝕜 (↑n) fu s
hgu : ContDiffOn 𝕜 (↑n) gu tu
Nfu : ∀ (i : ℕ), ‖iteratedFDerivWithin 𝕜 i fu s x‖ = ‖iteratedFDerivWithin 𝕜 i f s x‖
hD : ∀ (i : ℕ), 1 ≤ i → i ≤ n → ‖iteratedFDerivWithin 𝕜 i (↑(LinearIsometryEquiv.symm isoF) ∘ f) s x‖ ≤ D ^ i
Ngu : ∀ (i : ℕ), ‖iteratedFDerivWithin 𝕜 i gu tu (fu x)‖ = ‖iteratedFDerivWithin 𝕜 i g t (f x)‖
hC :
∀ (i : ℕ),
i ≤ n →
‖iteratedFDerivWithin 𝕜 i (↑(LinearIsometryEquiv.symm isoG) ∘ g ∘ ↑isoF) (↑isoF ⁻¹' t)
((↑(LinearIsometryEquiv.symm isoF) ∘ f) x)‖ ≤
C
this : gu ∘ fu = ↑(LinearIsometryEquiv.symm isoG) ∘ g ∘ f
⊢ ‖iteratedFDerivWithin 𝕜 n (g ∘ f) s x‖ = ‖iteratedFDerivWithin 𝕜 n (gu ∘ fu) s x‖ State After: no goals Tactic: rw [this, LinearIsometryEquiv.norm_iteratedFDerivWithin_comp_left _ _ hs hx] State Before: 𝕜 : Type u_1
inst✝¹⁰ : NontriviallyNormedField 𝕜
D✝ : Type uD
inst✝⁹ : NormedAddCommGroup D✝
inst✝⁸ : NormedSpace 𝕜 D✝
E : Type uE
inst✝⁷ : NormedAddCommGroup E
inst✝⁶ : NormedSpace 𝕜 E
F : Type uF
inst✝⁵ : NormedAddCommGroup F
inst✝⁴ : NormedSpace 𝕜 F
G : Type uG
inst✝³ : NormedAddCommGroup G
inst✝² : NormedSpace 𝕜 G
X : Type ?u.4322453
inst✝¹ : NormedAddCommGroup X
inst✝ : NormedSpace 𝕜 X
s✝ s₁ t✝ u : Set E
f✝ f₁ : E → F
g✝ : F → G
x✝ x₀ : E
c : F
b : E × F → G
m n✝ : ℕ∞
p : E → FormalMultilinearSeries 𝕜 E F
g : F → G
f : E → F
n : ℕ
s : Set E
t : Set F
x : E
N : ℕ∞
hg : ContDiffOn 𝕜 N g t
hf : ContDiffOn 𝕜 N f s
hn : ↑n ≤ N
ht : UniqueDiffOn 𝕜 t
hs : UniqueDiffOn 𝕜 s
hst : MapsTo f s t
hx : x ∈ s
C D : ℝ
Fu : Type (max uF uG) := ULift F
Gu : Type (max uF uG) := ULift G
isoF : Fu ≃ₗᵢ[𝕜] F
isoG : Gu ≃ₗᵢ[𝕜] G
fu : E → Fu := ↑(LinearIsometryEquiv.symm isoF) ∘ f
gu : Fu → Gu := ↑(LinearIsometryEquiv.symm isoG) ∘ g ∘ ↑isoF
tu : Set Fu := ↑isoF ⁻¹' t
htu : UniqueDiffOn 𝕜 tu
hstu : MapsTo fu s tu
Ffu : ↑isoF (fu x) = f x
hfu : ContDiffOn 𝕜 (↑n) fu s
hgu : ContDiffOn 𝕜 (↑n) gu tu
Nfu : ∀ (i : ℕ), ‖iteratedFDerivWithin 𝕜 i fu s x‖ = ‖iteratedFDerivWithin 𝕜 i f s x‖
hD : ∀ (i : ℕ), 1 ≤ i → i ≤ n → ‖iteratedFDerivWithin 𝕜 i (↑(LinearIsometryEquiv.symm isoF) ∘ f) s x‖ ≤ D ^ i
Ngu : ∀ (i : ℕ), ‖iteratedFDerivWithin 𝕜 i gu tu (fu x)‖ = ‖iteratedFDerivWithin 𝕜 i g t (f x)‖
hC :
∀ (i : ℕ),
i ≤ n →
‖iteratedFDerivWithin 𝕜 i (↑(LinearIsometryEquiv.symm isoG) ∘ g ∘ ↑isoF) (↑isoF ⁻¹' t)
((↑(LinearIsometryEquiv.symm isoF) ∘ f) x)‖ ≤
C
⊢ gu ∘ fu = ↑(LinearIsometryEquiv.symm isoG) ∘ g ∘ f State After: case h.h
𝕜 : Type u_1
inst✝¹⁰ : NontriviallyNormedField 𝕜
D✝ : Type uD
inst✝⁹ : NormedAddCommGroup D✝
inst✝⁸ : NormedSpace 𝕜 D✝
E : Type uE
inst✝⁷ : NormedAddCommGroup E
inst✝⁶ : NormedSpace 𝕜 E
F : Type uF
inst✝⁵ : NormedAddCommGroup F
inst✝⁴ : NormedSpace 𝕜 F
G : Type uG
inst✝³ : NormedAddCommGroup G
inst✝² : NormedSpace 𝕜 G
X : Type ?u.4322453
inst✝¹ : NormedAddCommGroup X
inst✝ : NormedSpace 𝕜 X
s✝ s₁ t✝ u : Set E
f✝ f₁ : E → F
g✝ : F → G
x✝¹ x₀ : E
c : F
b : E × F → G
m n✝ : ℕ∞
p : E → FormalMultilinearSeries 𝕜 E F
g : F → G
f : E → F
n : ℕ
s : Set E
t : Set F
x✝ : E
N : ℕ∞
hg : ContDiffOn 𝕜 N g t
hf : ContDiffOn 𝕜 N f s
hn : ↑n ≤ N
ht : UniqueDiffOn 𝕜 t
hs : UniqueDiffOn 𝕜 s
hst : MapsTo f s t
hx : x✝ ∈ s
C D : ℝ
Fu : Type (max uF uG) := ULift F
Gu : Type (max uF uG) := ULift G
isoF : Fu ≃ₗᵢ[𝕜] F
isoG : Gu ≃ₗᵢ[𝕜] G
fu : E → Fu := ↑(LinearIsometryEquiv.symm isoF) ∘ f
gu : Fu → Gu := ↑(LinearIsometryEquiv.symm isoG) ∘ g ∘ ↑isoF
tu : Set Fu := ↑isoF ⁻¹' t
htu : UniqueDiffOn 𝕜 tu
hstu : MapsTo fu s tu
Ffu : ↑isoF (fu x✝) = f x✝
hfu : ContDiffOn 𝕜 (↑n) fu s
hgu : ContDiffOn 𝕜 (↑n) gu tu
Nfu : ∀ (i : ℕ), ‖iteratedFDerivWithin 𝕜 i fu s x✝‖ = ‖iteratedFDerivWithin 𝕜 i f s x✝‖
hD : ∀ (i : ℕ), 1 ≤ i → i ≤ n → ‖iteratedFDerivWithin 𝕜 i (↑(LinearIsometryEquiv.symm isoF) ∘ f) s x✝‖ ≤ D ^ i
Ngu : ∀ (i : ℕ), ‖iteratedFDerivWithin 𝕜 i gu tu (fu x✝)‖ = ‖iteratedFDerivWithin 𝕜 i g t (f x✝)‖
hC :
∀ (i : ℕ),
i ≤ n →
‖iteratedFDerivWithin 𝕜 i (↑(LinearIsometryEquiv.symm isoG) ∘ g ∘ ↑isoF) (↑isoF ⁻¹' t)
((↑(LinearIsometryEquiv.symm isoF) ∘ f) x✝)‖ ≤
C
x : E
⊢ ((gu ∘ fu) x).down = ((↑(LinearIsometryEquiv.symm isoG) ∘ g ∘ f) x).down Tactic: ext x State Before: case h.h
𝕜 : Type u_1
inst✝¹⁰ : NontriviallyNormedField 𝕜
D✝ : Type uD
inst✝⁹ : NormedAddCommGroup D✝
inst✝⁸ : NormedSpace 𝕜 D✝
E : Type uE
inst✝⁷ : NormedAddCommGroup E
inst✝⁶ : NormedSpace 𝕜 E
F : Type uF
inst✝⁵ : NormedAddCommGroup F
inst✝⁴ : NormedSpace 𝕜 F
G : Type uG
inst✝³ : NormedAddCommGroup G
inst✝² : NormedSpace 𝕜 G
X : Type ?u.4322453
inst✝¹ : NormedAddCommGroup X
inst✝ : NormedSpace 𝕜 X
s✝ s₁ t✝ u : Set E
f✝ f₁ : E → F
g✝ : F → G
x✝¹ x₀ : E
c : F
b : E × F → G
m n✝ : ℕ∞
p : E → FormalMultilinearSeries 𝕜 E F
g : F → G
f : E → F
n : ℕ
s : Set E
t : Set F
x✝ : E
N : ℕ∞
hg : ContDiffOn 𝕜 N g t
hf : ContDiffOn 𝕜 N f s
hn : ↑n ≤ N
ht : UniqueDiffOn 𝕜 t
hs : UniqueDiffOn 𝕜 s
hst : MapsTo f s t
hx : x✝ ∈ s
C D : ℝ
Fu : Type (max uF uG) := ULift F
Gu : Type (max uF uG) := ULift G
isoF : Fu ≃ₗᵢ[𝕜] F
isoG : Gu ≃ₗᵢ[𝕜] G
fu : E → Fu := ↑(LinearIsometryEquiv.symm isoF) ∘ f
gu : Fu → Gu := ↑(LinearIsometryEquiv.symm isoG) ∘ g ∘ ↑isoF
tu : Set Fu := ↑isoF ⁻¹' t
htu : UniqueDiffOn 𝕜 tu
hstu : MapsTo fu s tu
Ffu : ↑isoF (fu x✝) = f x✝
hfu : ContDiffOn 𝕜 (↑n) fu s
hgu : ContDiffOn 𝕜 (↑n) gu tu
Nfu : ∀ (i : ℕ), ‖iteratedFDerivWithin 𝕜 i fu s x✝‖ = ‖iteratedFDerivWithin 𝕜 i f s x✝‖
hD : ∀ (i : ℕ), 1 ≤ i → i ≤ n → ‖iteratedFDerivWithin 𝕜 i (↑(LinearIsometryEquiv.symm isoF) ∘ f) s x✝‖ ≤ D ^ i
Ngu : ∀ (i : ℕ), ‖iteratedFDerivWithin 𝕜 i gu tu (fu x✝)‖ = ‖iteratedFDerivWithin 𝕜 i g t (f x✝)‖
hC :
∀ (i : ℕ),
i ≤ n →
‖iteratedFDerivWithin 𝕜 i (↑(LinearIsometryEquiv.symm isoG) ∘ g ∘ ↑isoF) (↑isoF ⁻¹' t)
((↑(LinearIsometryEquiv.symm isoF) ∘ f) x✝)‖ ≤
C
x : E
⊢ ((gu ∘ fu) x).down = ((↑(LinearIsometryEquiv.symm isoG) ∘ g ∘ f) x).down State After: no goals Tactic: simp only [comp_apply, LinearIsometryEquiv.map_eq_iff, LinearIsometryEquiv.apply_symm_apply] |
Formal statement is: lemma eventually_at_to_0: "eventually P (at a) \<longleftrightarrow> eventually (\<lambda>x. P (x + a)) (at 0)" for a :: "'a::real_normed_vector" Informal statement is: For any real-valued function $f$ defined on a neighborhood of $a$, the following are equivalent: $f$ is eventually positive in a neighborhood of $a$. $f(x + a)$ is eventually positive in a neighborhood of $0$. |
section {* \isaheader{Set Implementation by Arrays} *}
theory ArraySetImpl
imports
"../spec/SetSpec"
"ArrayMapImpl"
"../gen_algo/SetByMap"
"../gen_algo/SetGA"
begin
text_raw {*\label{thy:ArraySetImpl}*}
(*@impl Set
@type ias
@abbrv ias,is
Sets of natural numbers implemented by arrays.
*)
subsection "Definitions"
type_synonym ias = "(unit) iam"
setup Locale_Code.open_block
interpretation ias_sbm: OSetByOMap iam_basic_ops by unfold_locales
setup Locale_Code.close_block
definition ias_ops :: "(nat,ias) oset_ops"
where [icf_rec_def]:
"ias_ops \<equiv> ias_sbm.obasic.dflt_oops"
setup Locale_Code.open_block
interpretation ias: StdOSet ias_ops
unfolding ias_ops_def by (rule ias_sbm.obasic.dflt_oops_impl)
interpretation ias: StdSet_no_invar ias_ops
by unfold_locales (simp add: icf_rec_unf SetByMapDefs.invar_def)
setup Locale_Code.close_block
setup {* ICF_Tools.revert_abbrevs "ias"*}
lemmas ias_it_to_it_map_code_unfold[code_unfold] =
it_to_it_map_fold'[OF pi_iam]
it_to_it_map_fold'[OF pi_iam_rev]
lemma pi_ias[proper_it]:
"proper_it' ias.iteratei ias.iteratei"
"proper_it' ias.iterateoi ias.iterateoi"
"proper_it' ias.rev_iterateoi ias.rev_iterateoi"
unfolding ias.iteratei_def[abs_def] ias.iterateoi_def[abs_def]
ias.rev_iterateoi_def[abs_def]
apply (rule proper_it'I icf_proper_iteratorI)+
done
interpretation
pi_ias: proper_it_loc ias.iteratei ias.iteratei +
pi_ias_o: proper_it_loc ias.iterateoi ias.iterateoi +
pi_ias_ro: proper_it_loc ias.rev_iterateoi ias.rev_iterateoi
apply unfold_locales by (rule pi_ias)+
definition test_codegen where "test_codegen \<equiv> (
ias.empty,
ias.memb,
ias.ins,
ias.delete,
ias.list_it,
ias.sng,
ias.isEmpty,
ias.isSng,
ias.ball,
ias.bex,
ias.size,
ias.size_abort,
ias.union,
ias.union_dj,
ias.diff,
ias.filter,
ias.inter,
ias.subset,
ias.equal,
ias.disjoint,
ias.disjoint_witness,
ias.sel,
ias.to_list,
ias.from_list,
ias.ordered_list_it,
ias.rev_list_it,
ias.min,
ias.max,
ias.to_sorted_list,
ias.to_rev_list
)"
export_code test_codegen in SML
end
|
function [yes] = isfilextension(fname, extension)
% See also CHECK_FILE_EXTENSION, CLEAR_FILE_EXTENSION.
%
% File: isfilextension.m
% Author: Ioannis Filippidis, [email protected]
% Date: 2012.06.22
% Language: MATLAB R2012a
% Purpose: check if file has the desired extension
% Copyright: Ioannis Filippidis, 2012-
% dot in extension ?
if ~strcmp(extension(1), '.')
extension = ['.', extension];
end
[~, ~, ext] = fileparts(fname);
% extension = unwanted ?
if strcmp(ext, extension)
yes = 1;
else
yes = 0;
end
|
Every natural number is a real number. |
module Data.Num.Unary where
open import Data.Unit
open import Data.Empty
open import Data.List
open import Data.Nat renaming (_+_ to _⊹_)
data Digit : Set where
[1] : Digit
Unary : Set
Unary = List Digit
_⊕_ : Digit → Digit → Digit
[1] ⊕ [1] = [1]
_⊚_ : Digit → Digit → Digit
[1] ⊚ [1] = [1]
add : Digit → Unary → Unary
add [1] [] = [1] ∷ []
add [1] ([1] ∷ xs) = [1] ∷ add [1] xs
_+_ : Unary → Unary → Unary
[] + ys = ys
xs + [] = xs
(x ∷ xs) + (y ∷ ys) = x ⊕ y ∷ add (x ⊚ y) (xs + ys)
_≈_ : Unary → Unary → Set
[] ≈ [] = ⊤
[] ≈ (x ∷ ys) = ⊥
(x ∷ xs) ≈ [] = ⊥
(x ∷ xs) ≈ (y ∷ ys) = xs ≈ ys
fromℕ : ℕ → Unary
fromℕ zero = []
fromℕ (suc n) = fromℕ n
-- fromℕ zero = []
-- fromℕ (suc n) = [1] ∷ fromℕ n
toℕ : Unary → ℕ
toℕ [] = zero
toℕ (_ ∷ xs) = suc (toℕ xs)
|
If $S$ is locally connected and $c$ is a component of $S$, then $c$ is open in $S$. |
Formal statement is: proposition maximum_modulus_principle: assumes holf: "f holomorphic_on S" and S: "open S" and "connected S" and "open U" and "U \<subseteq> S" and "\<xi> \<in> U" and no: "\<And>z. z \<in> U \<Longrightarrow> norm(f z) \<le> norm(f \<xi>)" shows "f constant_on S" Informal statement is: If $f$ is a holomorphic function on an open connected set $S$ and $U$ is an open subset of $S$ such that $f$ attains its maximum modulus on $U$, then $f$ is constant on $S$. |
(* Title: AWN_Cterms.thy
License: BSD 2-Clause. See LICENSE.
Author: Timothy Bourke
*)
header "Control terms and well-definedness of sequential processes"
theory AWN_Cterms
imports AWN
begin
subsection "Microsteps "
text \<open>
We distinguish microsteps from `external' transitions (observable or not). Here, they are
a kind of `hypothetical computation', since, unlike @{text \<tau>}-transitions, they do not make
choices but rather `compute' which choices are possible.
\<close>
inductive
microstep :: "('s, 'm, 'p, 'l) seqp_env
\<Rightarrow> ('s, 'm, 'p, 'l) seqp
\<Rightarrow> ('s, 'm, 'p, 'l) seqp
\<Rightarrow> bool"
for \<Gamma> :: "('s, 'm, 'p, 'l) seqp_env"
where
microstep_choiceI1 [intro, simp]: "microstep \<Gamma> (p1 \<oplus> p2) p1"
| microstep_choiceI2 [intro, simp]: "microstep \<Gamma> (p1 \<oplus> p2) p2"
| microstep_callI [intro, simp]: "microstep \<Gamma> (call(pn)) (\<Gamma> pn)"
abbreviation microstep_rtcl
where "microstep_rtcl \<Gamma> p q \<equiv> (microstep \<Gamma>)\<^sup>*\<^sup>* p q"
abbreviation microstep_tcl
where "microstep_tcl \<Gamma> p q \<equiv> (microstep \<Gamma>)\<^sup>+\<^sup>+ p q"
syntax
"_microstep"
:: "[('s, 'm, 'p, 'l) seqp, ('s, 'm, 'p, 'l) seqp_env, ('s, 'm, 'p, 'l) seqp] \<Rightarrow> bool"
("(_) \<leadsto>\<^bsub>_\<^esub> (_)" [61, 0, 61] 50)
"_microstep_rtcl"
:: "[('s, 'm, 'p, 'l) seqp, ('s, 'm, 'p, 'l) seqp_env, ('s, 'm, 'p, 'l) seqp] \<Rightarrow> bool"
("(_) \<leadsto>\<^bsub>_\<^esub>\<^sup>* (_)" [61, 0, 61] 50)
"_microstep_tcl"
:: "[('s, 'm, 'p, 'l) seqp, ('s, 'm, 'p, 'l) seqp_env, ('s, 'm, 'p, 'l) seqp] \<Rightarrow> bool"
("(_) \<leadsto>\<^bsub>_\<^esub>\<^sup>+ (_)" [61, 0, 61] 50)
translations
"p1 \<leadsto>\<^bsub>\<Gamma>\<^esub> p2" \<rightleftharpoons> "CONST microstep \<Gamma> p1 p2"
"p1 \<leadsto>\<^bsub>\<Gamma>\<^esub>\<^sup>* p2" \<rightleftharpoons> "CONST microstep_rtcl \<Gamma> p1 p2"
"p1 \<leadsto>\<^bsub>\<Gamma>\<^esub>\<^sup>+ p2" \<rightleftharpoons> "CONST microstep_tcl \<Gamma> p1 p2"
lemma microstep_choiceD [dest]:
"(p1 \<oplus> p2) \<leadsto>\<^bsub>\<Gamma>\<^esub> p \<Longrightarrow> p = p1 \<or> p = p2"
by (ind_cases "(p1 \<oplus> p2) \<leadsto>\<^bsub>\<Gamma>\<^esub> p") auto
lemma microstep_choiceE [elim]:
"\<lbrakk> (p1 \<oplus> p2) \<leadsto>\<^bsub>\<Gamma>\<^esub> p;
(p1 \<oplus> p2) \<leadsto>\<^bsub>\<Gamma>\<^esub> p1 \<Longrightarrow> P;
(p1 \<oplus> p2) \<leadsto>\<^bsub>\<Gamma>\<^esub> p2 \<Longrightarrow> P \<rbrakk> \<Longrightarrow> P"
by (blast)
lemma microstep_callD [dest]:
"(call(pn)) \<leadsto>\<^bsub>\<Gamma>\<^esub> p \<Longrightarrow> p = \<Gamma> pn"
by (ind_cases "(call(pn)) \<leadsto>\<^bsub>\<Gamma>\<^esub> p")
lemma microstep_callE [elim]:
"\<lbrakk> (call(pn)) \<leadsto>\<^bsub>\<Gamma>\<^esub> p; p = \<Gamma>(pn) \<Longrightarrow> P \<rbrakk> \<Longrightarrow> P"
by auto
lemma no_microstep_guard: "\<not> (({l}\<langle>g\<rangle> p) \<leadsto>\<^bsub>\<Gamma>\<^esub> q)"
by (rule notI) (ind_cases "({l}\<langle>g\<rangle> p) \<leadsto>\<^bsub>\<Gamma>\<^esub> q")
lemma no_microstep_assign: "\<not> ({l}\<lbrakk>f\<rbrakk> p) \<leadsto>\<^bsub>\<Gamma>\<^esub> q"
by (rule notI) (ind_cases "({l}\<lbrakk>f\<rbrakk> p) \<leadsto>\<^bsub>\<Gamma>\<^esub> q")
lemma no_microstep_unicast: "\<not> (({l}unicast(s\<^sub>i\<^sub>p, s\<^sub>m\<^sub>s\<^sub>g).p \<triangleright> q) \<leadsto>\<^bsub>\<Gamma>\<^esub> r)"
by (rule notI) (ind_cases "({l}unicast(s\<^sub>i\<^sub>p, s\<^sub>m\<^sub>s\<^sub>g).p \<triangleright> q) \<leadsto>\<^bsub>\<Gamma>\<^esub> r")
lemma no_microstep_broadcast: "\<not> (({l}broadcast(s\<^sub>m\<^sub>s\<^sub>g).p) \<leadsto>\<^bsub>\<Gamma>\<^esub> q)"
by (rule notI) (ind_cases "({l}broadcast(s\<^sub>m\<^sub>s\<^sub>g).p) \<leadsto>\<^bsub>\<Gamma>\<^esub> q")
lemma no_microstep_groupcast: "\<not> (({l}groupcast(s\<^sub>i\<^sub>p\<^sub>s, s\<^sub>m\<^sub>s\<^sub>g).p) \<leadsto>\<^bsub>\<Gamma>\<^esub> q)"
by (rule notI) (ind_cases "({l}groupcast(s\<^sub>i\<^sub>p\<^sub>s, s\<^sub>m\<^sub>s\<^sub>g).p) \<leadsto>\<^bsub>\<Gamma>\<^esub> q")
lemma no_microstep_send: "\<not> (({l}send(s\<^sub>m\<^sub>s\<^sub>g).p) \<leadsto>\<^bsub>\<Gamma>\<^esub> q)"
by (rule notI) (ind_cases "({l}send(s\<^sub>m\<^sub>s\<^sub>g).p) \<leadsto>\<^bsub>\<Gamma>\<^esub> q")
lemma no_microstep_deliver: "\<not> (({l}deliver(s\<^sub>d\<^sub>a\<^sub>t\<^sub>a).p) \<leadsto>\<^bsub>\<Gamma>\<^esub> q)"
by (rule notI) (ind_cases "({l}deliver(s\<^sub>d\<^sub>a\<^sub>t\<^sub>a).p) \<leadsto>\<^bsub>\<Gamma>\<^esub> q")
lemma no_microstep_receive: "\<not> (({l}receive(u\<^sub>m\<^sub>s\<^sub>g).p) \<leadsto>\<^bsub>\<Gamma>\<^esub> q)"
by (rule notI) (ind_cases "({l}receive(u\<^sub>m\<^sub>s\<^sub>g).p) \<leadsto>\<^bsub>\<Gamma>\<^esub> q")
lemma microstep_call_or_choice [dest]:
assumes "p \<leadsto>\<^bsub>\<Gamma>\<^esub> q"
shows "(\<exists>pn. p = call(pn)) \<or> (\<exists>p1 p2. p = p1 \<oplus> p2)"
using assms by clarsimp (metis microstep.simps)
lemmas no_microstep [intro,simp] =
no_microstep_guard
no_microstep_assign
no_microstep_unicast
no_microstep_broadcast
no_microstep_groupcast
no_microstep_send
no_microstep_deliver
no_microstep_receive
subsection "Wellformed process specifications "
text \<open>
A process specification @{text \<Gamma>} is wellformed if its @{term "microstep \<Gamma>"} relation is
free of loops and infinite chains.
For example, these specifications are not wellformed:
@{term "\<Gamma>\<^sub>1(p1) = call(p1)"}
@{term "\<Gamma>\<^sub>2(p1) = send(msg).call(p1) \<oplus> call(p1)"}
@{term "\<Gamma>\<^sub>3(p1) = send(msg).call(p2)"}
@{term "\<Gamma>\<^sub>3(p2) = call(p3)"}
@{term "\<Gamma>\<^sub>3(p3) = call(p4)"}
@{term "\<Gamma>\<^sub>3(p4) = call(p5)"}
\ldots
\<close>
definition
wellformed :: "('s, 'm, 'p, 'l) seqp_env \<Rightarrow> bool"
where
"wellformed \<Gamma> = wf {(q, p). p \<leadsto>\<^bsub>\<Gamma>\<^esub> q}"
lemma wellformed_defP: "wellformed \<Gamma> = wfP (\<lambda>q p. p \<leadsto>\<^bsub>\<Gamma>\<^esub> q)"
unfolding wellformed_def wfP_def by simp
text \<open>
The induction rule for @{term "wellformed \<Gamma>"} is stronger than @{thm seqp.induct} because
the case for @{term "call(pn)"} can be shown with the assumption on @{term "\<Gamma> pn"}.
\<close>
lemma wellformed_induct
[consumes 1, case_names ASSIGN CHOICE CALL GUARD UCAST BCAST GCAST SEND DELIVER RECEIVE,
induct set: wellformed]:
assumes "wellformed \<Gamma>"
and ASSIGN: "\<And>l f p. wellformed \<Gamma> \<Longrightarrow> P ({l}\<lbrakk>f\<rbrakk> p)"
and GUARD: "\<And>l f p. wellformed \<Gamma> \<Longrightarrow> P ({l}\<langle>f\<rangle> p)"
and UCAST: "\<And>l fip fmsg p q. wellformed \<Gamma> \<Longrightarrow> P ({l}unicast(fip, fmsg). p \<triangleright> q)"
and BCAST: "\<And>l fmsg p. wellformed \<Gamma> \<Longrightarrow> P ({l}broadcast(fmsg). p)"
and GCAST: "\<And>l fips fmsg p. wellformed \<Gamma> \<Longrightarrow> P ({l}groupcast(fips, fmsg). p)"
and SEND: "\<And>l fmsg p. wellformed \<Gamma> \<Longrightarrow> P ({l}send(fmsg). p)"
and DELIVER: "\<And>l fdata p. wellformed \<Gamma> \<Longrightarrow> P ({l}deliver(fdata). p)"
and RECEIVE: "\<And>l fmsg p. wellformed \<Gamma> \<Longrightarrow> P ({l}receive(fmsg). p)"
and CHOICE: "\<And>p1 p2. \<lbrakk> wellformed \<Gamma>; P p1; P p2 \<rbrakk> \<Longrightarrow> P (p1 \<oplus> p2)"
and CALL: "\<And>pn. \<lbrakk> wellformed \<Gamma>; P (\<Gamma> pn) \<rbrakk> \<Longrightarrow> P (call(pn))"
shows "P a"
using assms(1) unfolding wellformed_defP
proof (rule wfP_induct_rule, case_tac x, simp_all)
fix p1 p2
assume "\<And>q. (p1 \<oplus> p2) \<leadsto>\<^bsub>\<Gamma>\<^esub> q \<Longrightarrow> P q"
then obtain "P p1" and "P p2" by (auto intro!: microstep.intros)
thus "P (p1 \<oplus> p2)" by (rule CHOICE [OF \<open>wellformed \<Gamma>\<close>])
next
fix pn
assume "\<And>q. (call(pn)) \<leadsto>\<^bsub>\<Gamma>\<^esub> q \<Longrightarrow> P q"
hence "P (\<Gamma> pn)" by (auto intro!: microstep.intros)
thus "P (call(pn))" by (rule CALL [OF \<open>wellformed \<Gamma>\<close>])
qed (auto intro: assms)
subsection "Start terms (sterms) "
text \<open>
Formulate sets of local subterms from which an action is directly possible. Since the
process specification @{term "\<Gamma>"} is not considered, only choice terms @{term "p1 \<oplus> p2"}
are traversed, and not @{term "call(p)"} terms.
\<close>
fun stermsl :: "('s, 'm, 'p, 'l) seqp \<Rightarrow> ('s, 'm, 'p , 'l) seqp set"
where
"stermsl (p1 \<oplus> p2) = stermsl p1 \<union> stermsl p2"
| "stermsl p = {p}"
lemma stermsl_nobigger: "q \<in> stermsl p \<Longrightarrow> size q \<le> size p"
by (induct p) auto
lemma stermsl_no_choice[simp]: "p1 \<oplus> p2 \<notin> stermsl p"
by (induct p) simp_all
lemma stermsl_choice_disj[simp]:
"p \<in> stermsl (p1 \<oplus> p2) = (p \<in> stermsl p1 \<or> p \<in> stermsl p2)"
by simp
lemma stermsl_in_branch[elim]:
"\<lbrakk>p \<in> stermsl (p1 \<oplus> p2); p \<in> stermsl p1 \<Longrightarrow> P; p \<in> stermsl p2 \<Longrightarrow> P\<rbrakk> \<Longrightarrow> P"
by auto
lemma stermsl_commute:
"stermsl (p1 \<oplus> p2) = stermsl (p2 \<oplus> p1)"
by simp (rule Un_commute)
lemma stermsl_not_empty:
"stermsl p \<noteq> {}"
by (induct p) auto
lemma stermsl_idem [simp]:
"(\<Union>q\<in>stermsl p. stermsl q) = stermsl p"
by (induct p) simp_all
lemma stermsl_in_wfpf:
assumes AA: "A \<subseteq> {(q, p). p \<leadsto>\<^bsub>\<Gamma>\<^esub> q} `` A"
and *: "p \<in> A"
shows "\<exists>r\<in>stermsl p. r \<in> A"
using *
proof (induction p)
fix p1 p2
assume IH1: "p1 \<in> A \<Longrightarrow> \<exists>r\<in>stermsl p1. r \<in> A"
and IH2: "p2 \<in> A \<Longrightarrow> \<exists>r\<in>stermsl p2. r \<in> A"
and *: "p1 \<oplus> p2 \<in> A"
from * and AA have "p1 \<oplus> p2 \<in> {(q, p). p \<leadsto>\<^bsub>\<Gamma>\<^esub> q} `` A" by auto
hence "p1 \<in> A \<or> p2 \<in> A" by auto
hence "(\<exists>r\<in>stermsl p1. r \<in> A) \<or> (\<exists>r\<in>stermsl p2. r \<in> A)"
proof
assume "p1 \<in> A" hence "\<exists>r\<in>stermsl p1. r \<in> A" by (rule IH1) thus ?thesis ..
next
assume "p2 \<in> A" hence "\<exists>r\<in>stermsl p2. r \<in> A" by (rule IH2) thus ?thesis ..
qed
hence "\<exists>r\<in>stermsl p1 \<union> stermsl p2. r \<in> A" by blast
thus "\<exists>r\<in>stermsl (p1 \<oplus> p2). r \<in> A" by simp
next case UCAST from UCAST.prems show ?case by auto
qed auto
lemma nocall_stermsl_max:
assumes "r \<in> stermsl p"
and "not_call r"
shows "\<not> (r \<leadsto>\<^bsub>\<Gamma>\<^esub> q)"
using assms
by (induction p) auto
theorem wf_no_direct_calls[intro]:
fixes \<Gamma> :: "('s, 'm, 'p, 'l) seqp_env"
assumes no_calls: "\<And>pn. \<forall>pn'. call(pn') \<notin> stermsl(\<Gamma>(pn))"
shows "wellformed \<Gamma>"
unfolding wellformed_def wfP_def
proof (rule wfI_pf)
fix A
assume ARA: "A \<subseteq> {(q, p). p \<leadsto>\<^bsub>\<Gamma>\<^esub> q} `` A"
hence hasnext: "\<And>p. p \<in> A \<Longrightarrow> \<exists>q. p \<leadsto>\<^bsub>\<Gamma>\<^esub> q \<and> q \<in> A" by auto
show "A = {}"
proof (rule Set.equals0I)
fix p assume "p \<in> A" thus "False"
proof (induction p)
fix l f p'
assume *: "{l}\<langle>f\<rangle> p' \<in> A"
from hasnext [OF *] have "\<exists>q. ({l}\<langle>f\<rangle> p') \<leadsto>\<^bsub>\<Gamma>\<^esub> q" by simp
thus "False" by simp
next
fix p1 p2
assume *: "p1 \<oplus> p2 \<in> A"
and IH1: "p1 \<in> A \<Longrightarrow> False"
and IH2: "p2 \<in> A \<Longrightarrow> False"
have "\<exists>q. (p1 \<oplus> p2) \<leadsto>\<^bsub>\<Gamma>\<^esub> q \<and> q \<in> A" by (rule hasnext [OF *])
hence "p1 \<in> A \<or> p2 \<in> A" by auto
thus "False" by (auto dest: IH1 IH2)
next
fix pn
assume "call(pn) \<in> A"
hence "\<exists>q. (call(pn)) \<leadsto>\<^bsub>\<Gamma>\<^esub> q \<and> q \<in> A" by (rule hasnext)
hence "\<Gamma>(pn) \<in> A" by auto
with ARA [THEN stermsl_in_wfpf] obtain q where "q\<in>stermsl (\<Gamma> pn)" and "q \<in> A" by metis
hence "not_call q" using no_calls [of pn]
unfolding not_call_def by auto
from hasnext [OF \<open>q \<in> A\<close>] obtain q' where "q \<leadsto>\<^bsub>\<Gamma>\<^esub> q'" by auto
moreover from \<open>q \<in> stermsl (\<Gamma> pn)\<close> \<open>not_call q\<close> have "\<not> (q \<leadsto>\<^bsub>\<Gamma>\<^esub> q')"
by (rule nocall_stermsl_max)
ultimately show "False" by simp
qed (auto dest: hasnext)
qed
qed
subsection "Start terms"
text \<open>
The start terms are those terms, relative to a wellformed process specification @{text \<Gamma>},
from which transitions can occur directly.
\<close>
function (domintros, sequential) sterms
:: "('s, 'm, 'p, 'l) seqp_env \<Rightarrow> ('s, 'm, 'p, 'l) seqp \<Rightarrow> ('s, 'm, 'p, 'l) seqp set"
where
sterms_choice: "sterms \<Gamma> (p1 \<oplus> p2) = sterms \<Gamma> p1 \<union> sterms \<Gamma> p2"
| sterms_call: "sterms \<Gamma> (call(pn)) = sterms \<Gamma> (\<Gamma> pn)"
| sterms_other: "sterms \<Gamma> p = {p}"
by pat_completeness auto
lemma sterms_dom_basic[simp]:
assumes "not_call p"
and "not_choice p"
shows "sterms_dom (\<Gamma>, p)"
proof (rule accpI)
fix y
assume "sterms_rel y (\<Gamma>, p)"
with assms show "sterms_dom y"
by (cases p) (auto simp: sterms_rel.simps)
qed
lemma sterms_termination:
assumes "wellformed \<Gamma>"
shows "sterms_dom (\<Gamma>, p)"
proof -
have sterms_rel':
"sterms_rel = (\<lambda>gq gp. (gq, gp) \<in> {((\<Gamma>, q), (\<Gamma>', p)). \<Gamma> = \<Gamma>' \<and> p \<leadsto>\<^bsub>\<Gamma>\<^esub> q})"
by (rule ext)+ (auto simp: sterms_rel.simps elim: microstep.cases)
from assms have "\<forall>x. x \<in> Wellfounded.acc {(q, p). p \<leadsto>\<^bsub>\<Gamma>\<^esub> q}"
unfolding wellformed_def by (simp add: wf_acc_iff)
hence "p \<in> Wellfounded.acc {(q, p). p \<leadsto>\<^bsub>\<Gamma>\<^esub> q}" ..
hence "(\<Gamma>, p) \<in> Wellfounded.acc {((\<Gamma>, q), (\<Gamma>', p)). \<Gamma> = \<Gamma>' \<and> p \<leadsto>\<^bsub>\<Gamma>\<^esub> q}"
by (rule acc_induct) (auto intro: accI)
thus "sterms_dom (\<Gamma>, p)" unfolding sterms_rel' accp_acc_eq .
qed
declare sterms.psimps [simp]
lemmas sterms_psimps[simp] = sterms.psimps [OF sterms_termination]
and sterms_pinduct = sterms.pinduct [OF sterms_termination]
lemma sterms_reflD [dest]:
assumes "q \<in> sterms \<Gamma> p"
and "not_choice p" "not_call p"
shows "q = p"
using assms by (cases p) auto
lemma sterms_choice_disj [simp]:
assumes "wellformed \<Gamma>"
shows "p \<in> sterms \<Gamma> (p1 \<oplus> p2) = (p \<in> sterms \<Gamma> p1 \<or> p \<in> sterms \<Gamma> p2)"
using assms by (simp)
lemma sterms_no_choice [simp]:
assumes "wellformed \<Gamma>"
shows "p1 \<oplus> p2 \<notin> sterms \<Gamma> p"
using assms by induction auto
lemma sterms_not_choice [simp]:
assumes "wellformed \<Gamma>"
and "q \<in> sterms \<Gamma> p"
shows "not_choice q"
using assms unfolding not_choice_def
by (auto dest: sterms_no_choice)
lemma sterms_no_call [simp]:
assumes "wellformed \<Gamma>"
shows "call(pn) \<notin> sterms \<Gamma> p"
using assms by induction auto
lemma sterms_not_call [simp]:
assumes "wellformed \<Gamma>"
and "q \<in> sterms \<Gamma> p"
shows "not_call q"
using assms unfolding not_call_def
by (auto dest: sterms_no_call)
lemma sterms_in_branch:
assumes "wellformed \<Gamma>"
and "p \<in> sterms \<Gamma> (p1 \<oplus> p2)"
and "p \<in> sterms \<Gamma> p1 \<Longrightarrow> P"
and "p \<in> sterms \<Gamma> p2 \<Longrightarrow> P"
shows "P"
using assms by auto
lemma sterms_commute:
assumes "wellformed \<Gamma>"
shows "sterms \<Gamma> (p1 \<oplus> p2) = sterms \<Gamma> (p2 \<oplus> p1)"
using assms by simp (rule Un_commute)
lemma sterms_not_empty:
assumes "wellformed \<Gamma>"
shows "sterms \<Gamma> p \<noteq> {}"
using assms
by (induct p rule: sterms_pinduct [OF \<open>wellformed \<Gamma>\<close>]) simp_all
lemma sterms_sterms [simp]:
assumes "wellformed \<Gamma>"
shows "(\<Union>x\<in>sterms \<Gamma> p. sterms \<Gamma> x) = sterms \<Gamma> p"
using assms by induction simp_all
lemma sterms_stermsl:
assumes "ps \<in> sterms \<Gamma> p"
and "wellformed \<Gamma>"
shows "ps \<in> stermsl p \<or> (\<exists>pn. ps \<in> stermsl (\<Gamma> pn))"
using assms by (induction p rule: sterms_pinduct [OF \<open>wellformed \<Gamma>\<close>]) auto
lemma stermsl_sterms [elim]:
assumes "q \<in> stermsl p"
and "not_call q"
and "wellformed \<Gamma>"
shows "q \<in> sterms \<Gamma> p"
using assms by (induct p) auto
lemma sterms_stermsl_heads:
assumes "ps \<in> sterms \<Gamma> (\<Gamma> pn)"
and "wellformed \<Gamma>"
shows "\<exists>pn. ps \<in> stermsl (\<Gamma> pn)"
proof -
from assms have "ps \<in> stermsl (\<Gamma> pn) \<or> (\<exists>pn'. ps \<in> stermsl (\<Gamma> pn'))"
by (rule sterms_stermsl)
thus ?thesis by auto
qed
lemma sterms_subterms [dest]:
assumes "wellformed \<Gamma>"
and "\<exists>pn. p \<in> subterms (\<Gamma> pn)"
and "q \<in> sterms \<Gamma> p"
shows "\<exists>pn. q \<in> subterms (\<Gamma> pn)"
using assms by (induct p) auto
lemma no_microsteps_sterms_refl:
assumes "wellformed \<Gamma>"
shows "(\<not>(\<exists>q. p \<leadsto>\<^bsub>\<Gamma>\<^esub> q)) = (sterms \<Gamma> p = {p})"
proof (cases p)
fix p1 p2
assume "p = p1 \<oplus> p2"
from \<open>wellformed \<Gamma>\<close> have "p1 \<oplus> p2 \<notin> sterms \<Gamma> (p1 \<oplus> p2)" by simp
hence "sterms \<Gamma> (p1 \<oplus> p2) \<noteq> {p1 \<oplus> p2}" by auto
moreover have "\<exists>q. (p1 \<oplus> p2) \<leadsto>\<^bsub>\<Gamma>\<^esub> q" by auto
ultimately show ?thesis
using \<open>p = p1 \<oplus> p2\<close> by simp
next
fix pn
assume "p = call(pn)"
from \<open>wellformed \<Gamma>\<close> have "call(pn) \<notin> sterms \<Gamma> (call(pn))" by simp
hence "sterms \<Gamma> (call(pn)) \<noteq> {call(pn)}" by auto
moreover have "\<exists>q. (call(pn)) \<leadsto>\<^bsub>\<Gamma>\<^esub> q" by auto
ultimately show ?thesis
using \<open>p = call(pn)\<close> by simp
qed simp_all
lemma sterms_maximal [elim]:
assumes "wellformed \<Gamma>"
and "q \<in> sterms \<Gamma> p"
shows "sterms \<Gamma> q = {q}"
using assms by (cases q) auto
lemma microstep_rtranscl_equal:
assumes "not_call p"
and "not_choice p"
and "p \<leadsto>\<^bsub>\<Gamma>\<^esub>\<^sup>* q"
shows "q = p"
using assms(3) proof (rule converse_rtranclpE)
fix p'
assume "p \<leadsto>\<^bsub>\<Gamma>\<^esub> p'"
with assms(1-2) show "q = p"
by (cases p) simp_all
qed simp
lemma microstep_rtranscl_singleton [simp]:
assumes "not_call p"
and "not_choice p"
shows "{q. p \<leadsto>\<^bsub>\<Gamma>\<^esub>\<^sup>* q \<and> sterms \<Gamma> q = {q}} = {p}"
proof (rule set_eqI)
fix p'
show "(p' \<in> {q. p \<leadsto>\<^bsub>\<Gamma>\<^esub>\<^sup>* q \<and> sterms \<Gamma> q = {q}}) = (p' \<in> {p})"
proof
assume "p' \<in> {q. p \<leadsto>\<^bsub>\<Gamma>\<^esub>\<^sup>* q \<and> sterms \<Gamma> q = {q}}"
hence "(microstep \<Gamma>)\<^sup>*\<^sup>* p p'" and "sterms \<Gamma> p' = {p'}" by auto
from this(1) have "p' = p"
proof (rule converse_rtranclpE)
fix q assume "p \<leadsto>\<^bsub>\<Gamma>\<^esub> q"
with \<open>not_call p\<close> and \<open>not_choice p\<close> have False
by (cases p) auto
thus "p' = p" ..
qed simp
thus "p' \<in> {p}" by simp
next
assume "p' \<in> {p}"
hence "p' = p" ..
with \<open>not_call p\<close> and \<open>not_choice p\<close> show "p' \<in> {q. p \<leadsto>\<^bsub>\<Gamma>\<^esub>\<^sup>* q \<and> sterms \<Gamma> q = {q}}"
by (cases p) simp_all
qed
qed
theorem sterms_maximal_microstep:
assumes "wellformed \<Gamma>"
shows "sterms \<Gamma> p = {q. p \<leadsto>\<^bsub>\<Gamma>\<^esub>\<^sup>* q \<and> \<not>(\<exists>q'. q \<leadsto>\<^bsub>\<Gamma>\<^esub> q')}"
proof
from \<open>wellformed \<Gamma>\<close> have "sterms \<Gamma> p \<subseteq> {q. p \<leadsto>\<^bsub>\<Gamma>\<^esub>\<^sup>* q \<and> sterms \<Gamma> q = {q}}"
proof induction
fix p1 p2
assume IH1: "sterms \<Gamma> p1 \<subseteq> {q. p1 \<leadsto>\<^bsub>\<Gamma>\<^esub>\<^sup>* q \<and> sterms \<Gamma> q = {q}}"
and IH2: "sterms \<Gamma> p2 \<subseteq> {q. p2 \<leadsto>\<^bsub>\<Gamma>\<^esub>\<^sup>* q \<and> sterms \<Gamma> q = {q}}"
have "sterms \<Gamma> p1 \<subseteq> {q. (p1 \<oplus> p2) \<leadsto>\<^bsub>\<Gamma>\<^esub>\<^sup>* q \<and> sterms \<Gamma> q = {q}}"
proof
fix p'
assume "p' \<in> sterms \<Gamma> p1"
with IH1 have "p1 \<leadsto>\<^bsub>\<Gamma>\<^esub>\<^sup>* p'" by auto
moreover have "(p1 \<oplus> p2) \<leadsto>\<^bsub>\<Gamma>\<^esub> p1" ..
ultimately have "(p1 \<oplus> p2) \<leadsto>\<^bsub>\<Gamma>\<^esub>\<^sup>* p'"
by - (rule converse_rtranclp_into_rtranclp)
moreover from \<open>wellformed \<Gamma>\<close> and \<open>p' \<in> sterms \<Gamma> p1\<close> have "sterms \<Gamma> p' = {p'}" ..
ultimately show "p' \<in> {q. (p1 \<oplus> p2) \<leadsto>\<^bsub>\<Gamma>\<^esub>\<^sup>* q \<and> sterms \<Gamma> q = {q}}"
by simp
qed
moreover have "sterms \<Gamma> p2 \<subseteq> {q. (p1 \<oplus> p2) \<leadsto>\<^bsub>\<Gamma>\<^esub>\<^sup>* q \<and> sterms \<Gamma> q = {q}}"
proof
fix p'
assume "p' \<in> sterms \<Gamma> p2"
with IH2 have "p2 \<leadsto>\<^bsub>\<Gamma>\<^esub>\<^sup>* p'" and "sterms \<Gamma> p' = {p'}" by auto
moreover have "(p1 \<oplus> p2) \<leadsto>\<^bsub>\<Gamma>\<^esub> p2" ..
ultimately have "(p1 \<oplus> p2) \<leadsto>\<^bsub>\<Gamma>\<^esub>\<^sup>* p'"
by - (rule converse_rtranclp_into_rtranclp)
with \<open>sterms \<Gamma> p' = {p'}\<close> show "p' \<in> {q. (p1 \<oplus> p2) \<leadsto>\<^bsub>\<Gamma>\<^esub>\<^sup>* q \<and> sterms \<Gamma> q = {q}}"
by simp
qed
ultimately show "sterms \<Gamma> (p1 \<oplus> p2) \<subseteq> {q. (p1 \<oplus> p2) \<leadsto>\<^bsub>\<Gamma>\<^esub>\<^sup>* q \<and> sterms \<Gamma> q = {q}}"
using \<open>wellformed \<Gamma>\<close> by simp
next
fix pn
assume IH: "sterms \<Gamma> (\<Gamma> pn) \<subseteq> {q. \<Gamma> pn \<leadsto>\<^bsub>\<Gamma>\<^esub>\<^sup>* q \<and> sterms \<Gamma> q = {q}}"
show "sterms \<Gamma> (call(pn)) \<subseteq> {q. (call(pn)) \<leadsto>\<^bsub>\<Gamma>\<^esub>\<^sup>* q \<and> sterms \<Gamma> q = {q}}"
proof
fix p'
assume "p' \<in> sterms \<Gamma> (call(pn))"
with \<open>wellformed \<Gamma>\<close> have "p' \<in> sterms \<Gamma> (\<Gamma> pn)" by simp
with IH have "\<Gamma> pn \<leadsto>\<^bsub>\<Gamma>\<^esub>\<^sup>* p'" and "sterms \<Gamma> p' = {p'}" by auto
note this(1)
moreover have "(call(pn)) \<leadsto>\<^bsub>\<Gamma>\<^esub> \<Gamma> pn" by simp
ultimately have "(call(pn)) \<leadsto>\<^bsub>\<Gamma>\<^esub>\<^sup>* p'"
by - (rule converse_rtranclp_into_rtranclp)
with \<open>sterms \<Gamma> p' = {p'}\<close> show "p' \<in> {q. (call(pn)) \<leadsto>\<^bsub>\<Gamma>\<^esub>\<^sup>* q \<and> sterms \<Gamma> q = {q}}"
by simp
qed
qed simp_all
with \<open>wellformed \<Gamma>\<close> show "sterms \<Gamma> p \<subseteq> {q. p \<leadsto>\<^bsub>\<Gamma>\<^esub>\<^sup>* q \<and> \<not>(\<exists>q'. q \<leadsto>\<^bsub>\<Gamma>\<^esub> q')}"
by (simp only: no_microsteps_sterms_refl)
next
from \<open>wellformed \<Gamma>\<close> have "{q. p \<leadsto>\<^bsub>\<Gamma>\<^esub>\<^sup>* q \<and> sterms \<Gamma> q = {q}} \<subseteq> sterms \<Gamma> p"
proof (induction)
fix p1 p2
assume IH1: "{q. p1 \<leadsto>\<^bsub>\<Gamma>\<^esub>\<^sup>* q \<and> sterms \<Gamma> q = {q}} \<subseteq> sterms \<Gamma> p1"
and IH2: "{q. p2 \<leadsto>\<^bsub>\<Gamma>\<^esub>\<^sup>* q \<and> sterms \<Gamma> q = {q}} \<subseteq> sterms \<Gamma> p2"
show "{q. (p1 \<oplus> p2) \<leadsto>\<^bsub>\<Gamma>\<^esub>\<^sup>* q \<and> sterms \<Gamma> q = {q}} \<subseteq> sterms \<Gamma> (p1 \<oplus> p2)"
proof (rule, drule CollectD, erule conjE)
fix q'
assume "(p1 \<oplus> p2) \<leadsto>\<^bsub>\<Gamma>\<^esub>\<^sup>* q'"
and "sterms \<Gamma> q' = {q'}"
with \<open>wellformed \<Gamma>\<close> have "(p1 \<oplus> p2) \<leadsto>\<^bsub>\<Gamma>\<^esub>\<^sup>+ q'"
by (auto dest!: rtranclpD sterms_no_choice)
hence "p1 \<leadsto>\<^bsub>\<Gamma>\<^esub>\<^sup>* q' \<or> p2 \<leadsto>\<^bsub>\<Gamma>\<^esub>\<^sup>* q'"
by (auto dest: tranclpD)
thus "q' \<in> sterms \<Gamma> (p1 \<oplus> p2)"
proof
assume "p1 \<leadsto>\<^bsub>\<Gamma>\<^esub>\<^sup>* q'"
with IH1 and \<open>sterms \<Gamma> q' = {q'}\<close> have "q' \<in> sterms \<Gamma> p1" by auto
with \<open>wellformed \<Gamma>\<close> show ?thesis by auto
next
assume "p2 \<leadsto>\<^bsub>\<Gamma>\<^esub>\<^sup>* q'"
with IH2 and \<open>sterms \<Gamma> q' = {q'}\<close> have "q' \<in> sterms \<Gamma> p2" by auto
with \<open>wellformed \<Gamma>\<close> show ?thesis by auto
qed
qed
next
fix pn
assume IH: "{q. \<Gamma> pn \<leadsto>\<^bsub>\<Gamma>\<^esub>\<^sup>* q \<and> sterms \<Gamma> q = {q}} \<subseteq> sterms \<Gamma> (\<Gamma> pn)"
show "{q. (call(pn)) \<leadsto>\<^bsub>\<Gamma>\<^esub>\<^sup>* q \<and> sterms \<Gamma> q = {q}} \<subseteq> sterms \<Gamma> (call(pn))"
proof (rule, drule CollectD, erule conjE)
fix q'
assume "(call(pn)) \<leadsto>\<^bsub>\<Gamma>\<^esub>\<^sup>* q'"
and "sterms \<Gamma> q' = {q'}"
with \<open>wellformed \<Gamma>\<close> have "(call(pn)) \<leadsto>\<^bsub>\<Gamma>\<^esub>\<^sup>+ q'"
by (auto dest!: rtranclpD sterms_no_call)
moreover have "(call(pn)) \<leadsto>\<^bsub>\<Gamma>\<^esub> \<Gamma> pn" ..
ultimately have "\<Gamma> pn \<leadsto>\<^bsub>\<Gamma>\<^esub>\<^sup>* q'"
by (auto dest!: tranclpD)
with \<open>sterms \<Gamma> q' = {q'}\<close> and IH have "q' \<in> sterms \<Gamma> (\<Gamma> pn)" by auto
with \<open>wellformed \<Gamma>\<close> show "q' \<in> sterms \<Gamma> (call(pn))" by simp
qed
qed simp_all
with \<open>wellformed \<Gamma>\<close> show "{q. p \<leadsto>\<^bsub>\<Gamma>\<^esub>\<^sup>* q \<and> \<not>(\<exists>q'. q \<leadsto>\<^bsub>\<Gamma>\<^esub> q')} \<subseteq> sterms \<Gamma> p"
by (simp only: no_microsteps_sterms_refl)
qed
subsection "Derivative terms "
text \<open>
The derivatives of a term are those @{term sterm}s potentially reachable by taking a
transition, relative to a wellformed process specification @{text \<Gamma>}. These terms
overapproximate the reachable sterms, since the truth of guards is not considered.
\<close>
function (domintros) dterms
:: "('s, 'm, 'p, 'l) seqp_env \<Rightarrow> ('s, 'm, 'p, 'l) seqp \<Rightarrow> ('s, 'm, 'p, 'l) seqp set"
where
"dterms \<Gamma> ({l}\<langle>g\<rangle> p) = sterms \<Gamma> p"
| "dterms \<Gamma> ({l}\<lbrakk>u\<rbrakk> p) = sterms \<Gamma> p"
| "dterms \<Gamma> (p1 \<oplus> p2) = dterms \<Gamma> p1 \<union> dterms \<Gamma> p2"
| "dterms \<Gamma> ({l}unicast(s\<^sub>i\<^sub>p, s\<^sub>m\<^sub>s\<^sub>g).p \<triangleright> q) = sterms \<Gamma> p \<union> sterms \<Gamma> q"
| "dterms \<Gamma> ({l}broadcast(s\<^sub>m\<^sub>s\<^sub>g). p) = sterms \<Gamma> p"
| "dterms \<Gamma> ({l}groupcast(s\<^sub>i\<^sub>p\<^sub>s, s\<^sub>m\<^sub>s\<^sub>g). p) = sterms \<Gamma> p"
| "dterms \<Gamma> ({l}send(s\<^sub>m\<^sub>s\<^sub>g).p) = sterms \<Gamma> p"
| "dterms \<Gamma> ({l}deliver(s\<^sub>d\<^sub>a\<^sub>t\<^sub>a).p) = sterms \<Gamma> p"
| "dterms \<Gamma> ({l}receive(u\<^sub>m\<^sub>s\<^sub>g).p) = sterms \<Gamma> p"
| "dterms \<Gamma> (call(pn)) = dterms \<Gamma> (\<Gamma> pn)"
by pat_completeness auto
lemma dterms_dom_basic [simp]:
assumes "not_call p"
and "not_choice p"
shows "dterms_dom (\<Gamma>, p)"
proof (rule accpI)
fix y
assume "dterms_rel y (\<Gamma>, p)"
with assms show "dterms_dom y"
by (cases p) (auto simp: dterms_rel.simps)
qed
lemma dterms_termination:
assumes "wellformed \<Gamma>"
shows "dterms_dom (\<Gamma>, p)"
proof -
have dterms_rel': "dterms_rel = (\<lambda>gq gp. (gq, gp) \<in> {((\<Gamma>, q), (\<Gamma>', p)). \<Gamma> = \<Gamma>' \<and> p \<leadsto>\<^bsub>\<Gamma>\<^esub> q})"
by (rule ext)+ (auto simp: dterms_rel.simps elim: microstep.cases)
from \<open>wellformed(\<Gamma>)\<close> have "\<forall>x. x \<in> Wellfounded.acc {(q, p). p \<leadsto>\<^bsub>\<Gamma>\<^esub> q}"
unfolding wellformed_def by (simp add: wf_acc_iff)
hence "p \<in> Wellfounded.acc {(q, p). p \<leadsto>\<^bsub>\<Gamma>\<^esub> q}" ..
hence "(\<Gamma>, p) \<in> Wellfounded.acc {((\<Gamma>, q), \<Gamma>', p). \<Gamma> = \<Gamma>' \<and> p \<leadsto>\<^bsub>\<Gamma>\<^esub> q}"
by (rule acc_induct) (auto intro: accI)
thus "dterms_dom (\<Gamma>, p)"
unfolding dterms_rel' by (subst accp_acc_eq)
qed
lemmas dterms_psimps [simp] = dterms.psimps [OF dterms_termination]
and dterms_pinduct = dterms.pinduct [OF dterms_termination]
lemma sterms_after_dterms [simp]:
assumes "wellformed \<Gamma>"
shows "(\<Union>x\<in>dterms \<Gamma> p. sterms \<Gamma> x) = dterms \<Gamma> p"
using assms by (induction p) simp_all
lemma sterms_before_dterms [simp]:
assumes "wellformed \<Gamma>"
shows "(\<Union>x\<in>sterms \<Gamma> p. dterms \<Gamma> x) = dterms \<Gamma> p"
using assms by (induction p) simp_all
lemma dterms_choice_disj [simp]:
assumes "wellformed \<Gamma>"
shows "p \<in> dterms \<Gamma> (p1 \<oplus> p2) = (p \<in> dterms \<Gamma> p1 \<or> p \<in> dterms \<Gamma> p2)"
using assms by (simp)
lemma dterms_in_branch:
assumes "wellformed \<Gamma>"
and "p \<in> dterms \<Gamma> (p1 \<oplus> p2)"
and "p \<in> dterms \<Gamma> p1 \<Longrightarrow> P"
and "p \<in> dterms \<Gamma> p2 \<Longrightarrow> P"
shows "P"
using assms by auto
lemma dterms_no_choice:
assumes "wellformed \<Gamma>"
shows "p1 \<oplus> p2 \<notin> dterms \<Gamma> p"
using assms by induction simp_all
lemma dterms_not_choice [simp]:
assumes "wellformed \<Gamma>"
and "q \<in> dterms \<Gamma> p"
shows "not_choice q"
using assms unfolding not_choice_def
by (auto dest: dterms_no_choice)
lemma dterms_no_call:
assumes "wellformed \<Gamma>"
shows "call(pn) \<notin> dterms \<Gamma> p"
using assms by induction simp_all
lemma dterms_not_call [simp]:
assumes "wellformed \<Gamma>"
and "q \<in> dterms \<Gamma> p"
shows "not_call q"
using assms unfolding not_call_def
by (auto dest: dterms_no_call)
lemma dterms_subterms:
assumes wf: "wellformed \<Gamma>"
and "\<exists>pn. p \<in> subterms (\<Gamma> pn)"
and "q \<in> dterms \<Gamma> p"
shows "\<exists>pn. q \<in> subterms (\<Gamma> pn)"
using assms
proof (induct p)
fix p1 p2
assume IH1: "\<exists>pn. p1 \<in> subterms (\<Gamma> pn) \<Longrightarrow> q \<in> dterms \<Gamma> p1 \<Longrightarrow> \<exists>pn. q \<in> subterms (\<Gamma> pn)"
and IH2: "\<exists>pn. p2 \<in> subterms (\<Gamma> pn) \<Longrightarrow> q \<in> dterms \<Gamma> p2 \<Longrightarrow> \<exists>pn. q \<in> subterms (\<Gamma> pn)"
and *: "\<exists>pn. p1 \<oplus> p2 \<in> subterms (\<Gamma> pn)"
and "q \<in> dterms \<Gamma> (p1 \<oplus> p2)"
from * obtain pn where "p1 \<oplus> p2 \<in> subterms (\<Gamma> pn)"
by auto
hence "p1 \<in> subterms (\<Gamma> pn)" and "p2 \<in> subterms (\<Gamma> pn)"
by auto
from \<open>q \<in> dterms \<Gamma> (p1 \<oplus> p2)\<close> wf have "q \<in> dterms \<Gamma> p1 \<or> q \<in> dterms \<Gamma> p2"
by auto
thus "\<exists>pn. q \<in> subterms (\<Gamma> pn)"
proof
assume "q \<in> dterms \<Gamma> p1"
with \<open>p1 \<in> subterms (\<Gamma> pn)\<close> show ?thesis
by (auto intro: IH1)
next
assume "q \<in> dterms \<Gamma> p2"
with \<open>p2 \<in> subterms (\<Gamma> pn)\<close> show ?thesis
by (auto intro: IH2)
qed
qed auto
text \<open>
Note that the converse of @{thm dterms_subterms} is not true because @{term dterm}s are an
over-approximation; i.e., we cannot show, in general, that guards return a non-empty set
of post-states.
\<close>
subsection "Control terms "
text \<open>
The control terms of a process specification @{term \<Gamma>} are those subterms from which
transitions are directly possible. We can omit @{term "call(pn)"} terms, since
the root terms of all processes are considered, and also @{term "p1 \<oplus> p2"} terms
since they effectively combine the transitions of the subterms @{term p1} and
@{term p2}.
It will be shown that only the control terms, rather than all subterms, need be
considered in invariant proofs.
\<close>
inductive_set
cterms :: "('s, 'm, 'p, 'l) seqp_env \<Rightarrow> ('s, 'm, 'p, 'l) seqp set"
for \<Gamma> :: "('s, 'm, 'p, 'l) seqp_env"
where
ctermsSI[intro]: "p \<in> sterms \<Gamma> (\<Gamma> pn) \<Longrightarrow> p \<in> cterms \<Gamma>"
| ctermsDI[intro]: "\<lbrakk> pp \<in> cterms \<Gamma>; p \<in> dterms \<Gamma> pp \<rbrakk> \<Longrightarrow> p \<in> cterms \<Gamma>"
lemma cterms_not_choice [simp]:
assumes "wellformed \<Gamma>"
and "p \<in> cterms \<Gamma>"
shows "not_choice p"
using assms
proof (cases p)
case CHOICE from \<open>p \<in> cterms \<Gamma>\<close> show ?thesis
using \<open>wellformed \<Gamma>\<close> by cases simp_all
qed simp_all
lemma cterms_no_choice [simp]:
assumes "wellformed \<Gamma>"
shows "p1 \<oplus> p2 \<notin> cterms \<Gamma>"
using assms by (auto dest: cterms_not_choice)
lemma cterms_not_call [simp]:
assumes "wellformed \<Gamma>"
and "p \<in> cterms \<Gamma>"
shows "not_call p"
using assms
proof (cases p)
case CALL from \<open>p \<in> cterms \<Gamma>\<close> show ?thesis
using \<open>wellformed \<Gamma>\<close> by cases simp_all
qed simp_all
lemma cterms_no_call [simp]:
assumes "wellformed \<Gamma>"
shows "call(pn) \<notin> cterms \<Gamma>"
using assms by (auto dest: cterms_not_call)
lemma sterms_cterms [elim]:
assumes "p \<in> cterms \<Gamma>"
and "q \<in> sterms \<Gamma> p"
and "wellformed \<Gamma>"
shows "q \<in> cterms \<Gamma>"
using assms by - (cases p, auto)
lemma dterms_cterms [elim]:
assumes "p \<in> cterms \<Gamma>"
and "q \<in> dterms \<Gamma> p"
and "wellformed \<Gamma>"
shows "q \<in> cterms \<Gamma>"
using assms by (cases p) auto
lemma derivs_in_cterms [simp]:
"\<And>l f p. {l}\<langle>f\<rangle> p \<in> cterms \<Gamma> \<Longrightarrow> sterms \<Gamma> p \<subseteq> cterms \<Gamma>"
"\<And>l f p. {l}\<lbrakk>f\<rbrakk> p \<in> cterms \<Gamma> \<Longrightarrow> sterms \<Gamma> p \<subseteq> cterms \<Gamma>"
"\<And>l fip fmsg q p. {l}unicast(fip, fmsg). p \<triangleright> q \<in> cterms \<Gamma>
\<Longrightarrow> sterms \<Gamma> p \<subseteq> cterms \<Gamma> \<and> sterms \<Gamma> q \<subseteq> cterms \<Gamma>"
"\<And>l fmsg p. {l}broadcast(fmsg).p \<in> cterms \<Gamma> \<Longrightarrow> sterms \<Gamma> p \<subseteq> cterms \<Gamma>"
"\<And>l fips fmsg p. {l}groupcast(fips, fmsg).p \<in> cterms \<Gamma> \<Longrightarrow> sterms \<Gamma> p \<subseteq> cterms \<Gamma>"
"\<And>l fmsg p. {l}send(fmsg).p \<in> cterms \<Gamma> \<Longrightarrow> sterms \<Gamma> p \<subseteq> cterms \<Gamma>"
"\<And>l fdata p. {l}deliver(fdata).p \<in> cterms \<Gamma> \<Longrightarrow> sterms \<Gamma> p \<subseteq> cterms \<Gamma>"
"\<And>l fmsg p. {l}receive(fmsg).p \<in> cterms \<Gamma> \<Longrightarrow> sterms \<Gamma> p \<subseteq> cterms \<Gamma>"
by (auto simp: dterms.psimps)
subsection "Local control terms"
text \<open>
We introduce a `local' version of @{term cterms} that does not step through calls and,
thus, that is defined independently of a process specification @{term \<Gamma>}.
This allows an alternative, terminating characterisation of cterms as a set of
subterms. Including @{term "call(pn)"}s in the set makes for a simpler relation with
@{term "stermsl"}, even if they must be filtered out for the desired characterisation.
\<close>
function
ctermsl :: "('s, 'm, 'p, 'l) seqp \<Rightarrow> ('s, 'm, 'p , 'l) seqp set"
where
"ctermsl ({l}\<langle>g\<rangle> p) = insert ({l}\<langle>g\<rangle> p) (ctermsl p)"
| "ctermsl ({l}\<lbrakk>u\<rbrakk> p) = insert ({l}\<lbrakk>u\<rbrakk> p) (ctermsl p)"
| "ctermsl ({l}unicast(s\<^sub>i\<^sub>p, s\<^sub>m\<^sub>s\<^sub>g). p \<triangleright> q) = insert ({l}unicast(s\<^sub>i\<^sub>p, s\<^sub>m\<^sub>s\<^sub>g). p \<triangleright> q)
(ctermsl p \<union> ctermsl q)"
| "ctermsl ({l}broadcast(s\<^sub>m\<^sub>s\<^sub>g). p) = insert ({l}broadcast(s\<^sub>m\<^sub>s\<^sub>g). p) (ctermsl p)"
| "ctermsl ({l}groupcast(s\<^sub>i\<^sub>p\<^sub>s, s\<^sub>m\<^sub>s\<^sub>g). p) = insert ({l}groupcast(s\<^sub>i\<^sub>p\<^sub>s, s\<^sub>m\<^sub>s\<^sub>g). p) (ctermsl p)"
| "ctermsl ({l}send(s\<^sub>m\<^sub>s\<^sub>g). p) = insert ({l}send(s\<^sub>m\<^sub>s\<^sub>g). p) (ctermsl p)"
| "ctermsl ({l}deliver(s\<^sub>d\<^sub>a\<^sub>t\<^sub>a). p) = insert ({l}deliver(s\<^sub>d\<^sub>a\<^sub>t\<^sub>a). p) (ctermsl p)"
| "ctermsl ({l}receive(u\<^sub>m\<^sub>s\<^sub>g). p) = insert ({l}receive(u\<^sub>m\<^sub>s\<^sub>g). p) (ctermsl p)"
| "ctermsl (p1 \<oplus> p2) = ctermsl p1 \<union> ctermsl p2"
| "ctermsl (call(pn)) = {call(pn)}"
by pat_completeness auto
termination by (relation "measure(size)") (auto dest: stermsl_nobigger)
lemmas ctermsl_induct =
ctermsl.induct [case_names GUARD ASSIGN UCAST BCAST GCAST
SEND DELIVER RECEIVE CHOICE CALL]
lemma ctermsl_refl [intro]: "not_choice p \<Longrightarrow> p \<in> ctermsl p"
by (cases p) auto
lemma ctermsl_subterms:
"ctermsl p = {q. q \<in> subterms p \<and> not_choice q }" (is "?lhs = ?rhs")
proof
show "?lhs \<subseteq> ?rhs" by (induct p, auto) next
show "?rhs \<subseteq> ?lhs" by (induct p, auto)
qed
lemma ctermsl_trans [elim]:
assumes "q \<in> ctermsl p"
and "r \<in> ctermsl q"
shows "r \<in> ctermsl p"
using assms
proof (induction p rule: ctermsl_induct)
case (CHOICE p1 p2)
have "(q \<in> ctermsl p1) \<or> (q \<in> ctermsl p2)"
using CHOICE.prems(1) by simp
hence "r \<in> ctermsl p1 \<or> r \<in> ctermsl p2"
proof (rule disj_forward)
assume "q \<in> ctermsl p1"
thus "r \<in> ctermsl p1" using \<open>r \<in> ctermsl q\<close> by (rule CHOICE.IH)
next
assume "q \<in> ctermsl p2"
thus "r \<in> ctermsl p2" using \<open>r \<in> ctermsl q\<close> by (rule CHOICE.IH)
qed
thus "r \<in> ctermsl (p1 \<oplus> p2)" by simp
qed auto
lemma ctermsl_ex_trans [elim]:
assumes "\<exists>q \<in> ctermsl p. r \<in> ctermsl q"
shows "r \<in> ctermsl p"
using assms by auto
lemma call_ctermsl_empty [elim]:
"\<lbrakk> p \<in> ctermsl p'; not_call p \<rbrakk> \<Longrightarrow> not_call p'"
unfolding not_call_def by (cases p) auto
lemma stermsl_ctermsl_choice1 [simp]:
assumes "q \<in> stermsl p1"
shows "q \<in> ctermsl (p1 \<oplus> p2)"
using assms by (induction p1) auto
lemma stermsl_ctermsl_choice2 [simp]:
assumes "q \<in> stermsl p2"
shows "q \<in> ctermsl (p1 \<oplus> p2)"
using assms by (induction p2) auto
lemma stermsl_ctermsl [elim]:
assumes "q \<in> stermsl p"
shows "q \<in> ctermsl p"
using assms
proof (cases p)
case (CHOICE p1 p2)
hence "q \<in> stermsl (p1 \<oplus> p2)" using assms by simp
hence "q \<in> stermsl p1 \<or> q \<in> stermsl p2" by simp
hence "q \<in> ctermsl (p1 \<oplus> p2)" by (rule) (simp_all del: ctermsl.simps)
thus "q \<in> ctermsl p" using CHOICE by simp
qed simp_all
lemma stermsl_after_ctermsl [simp]:
"(\<Union>x\<in>ctermsl p. stermsl x) = ctermsl p"
using assms by (induction p) auto
lemma stermsl_before_ctermsl [simp]:
"(\<Union>x\<in>stermsl p. ctermsl x) = ctermsl p"
using assms by (induction p) simp_all
lemma ctermsl_no_choice: "p1 \<oplus> p2 \<notin> ctermsl p"
by (induct p) simp_all
lemma ctermsl_ex_stermsl: "q \<in> ctermsl p \<Longrightarrow> \<exists>ps\<in>stermsl p. q \<in> ctermsl ps"
by (induct p) auto
lemma dterms_ctermsl [intro]:
assumes "q \<in> dterms \<Gamma> p"
and "wellformed \<Gamma>"
shows "q \<in> ctermsl p \<or> (\<exists>pn. q \<in> ctermsl (\<Gamma> pn))"
using assms(1-2)
proof (induction p rule: dterms_pinduct [OF \<open>wellformed \<Gamma>\<close>])
fix \<Gamma> l fg p
assume "q \<in> dterms \<Gamma> ({l}\<langle>fg\<rangle> p)"
and "wellformed \<Gamma>"
hence "q \<in> sterms \<Gamma> p" by simp
hence "q \<in> stermsl p \<or> (\<exists>pn. q \<in> stermsl (\<Gamma> pn))"
using \<open>wellformed \<Gamma>\<close> by (rule sterms_stermsl)
thus "q \<in> ctermsl ({l}\<langle>fg\<rangle> p) \<or> (\<exists>pn. q \<in> ctermsl (\<Gamma> pn))"
proof
assume "q \<in> stermsl p"
hence "q \<in> ctermsl p" by (rule stermsl_ctermsl)
hence "q \<in> ctermsl ({l}\<langle>fg\<rangle> p)" by simp
thus ?thesis ..
next
assume "\<exists>pn. q \<in> stermsl (\<Gamma> pn)"
then obtain pn where "q \<in> stermsl (\<Gamma> pn)" by auto
hence "q \<in> ctermsl (\<Gamma> pn)" by (rule stermsl_ctermsl)
hence "\<exists>pn. q \<in> ctermsl (\<Gamma> pn)" ..
thus ?thesis ..
qed
next
fix \<Gamma> p1 p2
assume "q \<in> dterms \<Gamma> (p1 \<oplus> p2)"
and IH1: "\<lbrakk> q \<in> dterms \<Gamma> p1; wellformed \<Gamma> \<rbrakk> \<Longrightarrow> q \<in> ctermsl p1 \<or> (\<exists>pn. q \<in> ctermsl (\<Gamma> pn))"
and IH2: "\<lbrakk> q \<in> dterms \<Gamma> p2; wellformed \<Gamma> \<rbrakk> \<Longrightarrow> q \<in> ctermsl p2 \<or> (\<exists>pn. q \<in> ctermsl (\<Gamma> pn))"
and "wellformed \<Gamma>"
thus "q \<in> ctermsl (p1 \<oplus> p2) \<or> (\<exists>pn. q \<in> ctermsl (\<Gamma> pn))"
by auto
next
fix \<Gamma> pn
assume "q \<in> dterms \<Gamma> (call(pn))"
and "wellformed \<Gamma>"
and "\<lbrakk> q \<in> dterms \<Gamma> (\<Gamma> pn); wellformed \<Gamma> \<rbrakk> \<Longrightarrow> q \<in> ctermsl (\<Gamma> pn) \<or> (\<exists>pn. q \<in> ctermsl (\<Gamma> pn))"
thus "q \<in> ctermsl (call(pn)) \<or> (\<exists>pn. q \<in> ctermsl (\<Gamma> pn))"
by auto
qed (simp_all, (metis sterms_stermsl stermsl_ctermsl)+)
lemma ctermsl_cterms [elim]:
assumes "q \<in> ctermsl p"
and "not_call q"
and "sterms \<Gamma> p \<subseteq> cterms \<Gamma>"
and "wellformed \<Gamma>"
shows "q \<in> cterms \<Gamma>"
using assms by (induct p rule: ctermsl.induct) auto
subsection "Local deriviative terms"
text \<open>
We define local @{term "dterm"}s for use in the theorem that relates @{term "cterms"}
and sets of @{term "ctermsl"}.
\<close>
function dtermsl
:: "('s, 'm, 'p, 'l) seqp \<Rightarrow> ('s, 'm, 'p, 'l) seqp set"
where
"dtermsl ({l}\<langle>fg\<rangle> p) = stermsl p"
| "dtermsl ({l}\<lbrakk>fa\<rbrakk> p) = stermsl p"
| "dtermsl (p1 \<oplus> p2) = dtermsl p1 \<union> dtermsl p2"
| "dtermsl ({l}unicast(fip, fmsg).p \<triangleright> q) = stermsl p \<union> stermsl q"
| "dtermsl ({l}broadcast(fmsg). p) = stermsl p"
| "dtermsl ({l}groupcast(fips, fmsg). p) = stermsl p"
| "dtermsl ({l}send(fmsg).p) = stermsl p"
| "dtermsl ({l}deliver(fdata).p) = stermsl p"
| "dtermsl ({l}receive(fmsg).p) = stermsl p"
| "dtermsl (call(pn)) = {}"
by pat_completeness auto
termination by (relation "measure(size)") (auto dest: stermsl_nobigger)
lemma stermsl_after_dtermsl [simp]:
shows "(\<Union>x\<in>dtermsl p. stermsl x) = dtermsl p"
using assms by (induct p) simp_all
lemma stermsl_before_dtermsl [simp]:
"(\<Union>x\<in>stermsl p. dtermsl x) = dtermsl p"
using assms by (induct p) simp_all
lemma dtermsl_no_choice [simp]: "p1 \<oplus> p2 \<notin> dtermsl p"
by (induct p) simp_all
lemma ctermsl_dtermsl [elim]:
assumes "q \<in> dtermsl p"
shows "q \<in> ctermsl p"
using assms by (induct p) (simp_all, (metis stermsl_ctermsl)+)
lemma dtermsl_dterms [elim]:
assumes "q \<in> dtermsl p"
and "not_call q"
and "wellformed \<Gamma>"
shows "q \<in> dterms \<Gamma> p"
using assms
using assms by (induct p) (simp_all, (metis stermsl_sterms)+)
lemma ctermsl_stermsl_or_dtermsl:
assumes "q \<in> ctermsl p"
shows "q \<in> stermsl p \<or> (\<exists>p'\<in>dtermsl p. q \<in> ctermsl p')"
using assms by (induct p) (auto dest: ctermsl_ex_stermsl)
lemma dtermsl_add_stermsl_beforeD:
assumes "q \<in> dtermsl p"
shows "\<exists>ps\<in>stermsl p. q \<in> dtermsl ps"
proof -
from assms have "q \<in> (\<Union>x\<in>stermsl p. dtermsl x)" by auto
thus ?thesis
by (rule UN_E) auto
qed
lemma call_dtermsl_empty [elim]:
"q \<in> dtermsl p \<Longrightarrow> not_call p"
by (cases p) simp_all
subsection "More properties of control terms"
text \<open>
We now show an alternative definition of @{term "cterms"} based on sets of local control
terms. While the original definition has convenient induction and simplification rules,
useful for proving properties like cterms\_includes\_sterms\_of\_seq\_reachable, this
definition makes it easier to systematically generate the set of control terms of a
process specification.
\<close>
theorem cterms_def':
assumes wfg: "wellformed \<Gamma>"
shows "cterms \<Gamma> = { p |p pn. p \<in> ctermsl (\<Gamma> pn) \<and> not_call p }"
(is "_ = ?ctermsl_set")
proof (rule iffI [THEN set_eqI])
fix p
assume "p \<in> cterms \<Gamma>"
thus "p \<in> ?ctermsl_set"
proof (induction p)
fix p pn
assume "p \<in> sterms \<Gamma> (\<Gamma> pn)"
then obtain pn' where "p \<in> stermsl (\<Gamma> pn')" using wfg
by (blast dest: sterms_stermsl_heads)
hence "p \<in> ctermsl (\<Gamma> pn')" ..
moreover from \<open>p \<in> sterms \<Gamma> (\<Gamma> pn)\<close> wfg have "not_call p" by simp
ultimately show "p \<in> ?ctermsl_set" by auto
next
fix pp p
assume "pp \<in> cterms \<Gamma>"
and IH: "pp \<in> ?ctermsl_set"
and *: "p \<in> dterms \<Gamma> pp"
from * have "p \<in> ctermsl pp \<or> (\<exists>pn. p \<in> ctermsl (\<Gamma> pn))"
using wfg by (rule dterms_ctermsl)
hence "\<exists>pn. p \<in> ctermsl (\<Gamma> pn)"
proof
assume "p \<in> ctermsl pp"
from \<open>pp \<in> cterms \<Gamma>\<close> and IH obtain pn' where "pp \<in> ctermsl (\<Gamma> pn')"
by auto
with \<open>p \<in> ctermsl pp\<close> have "p \<in> ctermsl (\<Gamma> pn')" by auto
thus "\<exists>pn. p \<in> ctermsl (\<Gamma> pn)" ..
qed -
moreover from \<open>p \<in> dterms \<Gamma> pp\<close> wfg have "not_call p" by simp
ultimately show "p \<in> ?ctermsl_set" by auto
qed
next
fix p
assume "p \<in> ?ctermsl_set"
then obtain pn where *: "p \<in> ctermsl (\<Gamma> pn)" and "not_call p" by auto
from * have "p \<in> stermsl (\<Gamma> pn) \<or> (\<exists>p'\<in>dtermsl (\<Gamma> pn). p \<in> ctermsl p')"
by (rule ctermsl_stermsl_or_dtermsl)
thus "p \<in> cterms \<Gamma>"
proof
assume "p \<in> stermsl (\<Gamma> pn)"
hence "p \<in> sterms \<Gamma> (\<Gamma> pn)" using \<open>not_call p\<close> wfg ..
thus "p \<in> cterms \<Gamma>" ..
next
assume "\<exists>p'\<in>dtermsl (\<Gamma> pn). p \<in> ctermsl p'"
then obtain p' where p'1: "p' \<in> dtermsl (\<Gamma> pn)"
and p'2: "p \<in> ctermsl p'" ..
from p'2 and \<open>not_call p\<close> have "not_call p'" ..
from p'1 obtain ps where ps1: "ps \<in> stermsl (\<Gamma> pn)"
and ps2: "p' \<in> dtermsl ps"
by (blast dest: dtermsl_add_stermsl_beforeD)
from ps2 have "not_call ps" ..
with ps1 have "ps \<in> cterms \<Gamma>" using wfg by auto
with \<open>p' \<in> dtermsl ps\<close> and \<open>not_call p'\<close> have "p' \<in> cterms \<Gamma>" using wfg by auto
hence "sterms \<Gamma> p' \<subseteq> cterms \<Gamma>" using wfg by auto
with \<open>p \<in> ctermsl p'\<close> \<open>not_call p\<close> show "p \<in> cterms \<Gamma>" using wfg ..
qed
qed
lemma ctermsE [elim]:
assumes "wellformed \<Gamma>"
and "p \<in> cterms \<Gamma>"
obtains pn where "p \<in> ctermsl (\<Gamma> pn)"
and "not_call p"
using assms(2) unfolding cterms_def' [OF assms(1)] by auto
corollary cterms_subterms:
assumes "wellformed \<Gamma>"
shows "cterms \<Gamma> = {p|p pn. p\<in>subterms (\<Gamma> pn) \<and> not_call p \<and> not_choice p}"
by (subst cterms_def' [OF assms(1)], subst ctermsl_subterms) auto
lemma subterms_in_cterms [elim]:
assumes "wellformed \<Gamma>"
and "p\<in>subterms (\<Gamma> pn)"
and "not_call p"
and "not_choice p"
shows "p \<in> cterms \<Gamma>"
using assms unfolding cterms_subterms [OF \<open>wellformed \<Gamma>\<close>] by auto
lemma subterms_stermsl_ctermsl:
assumes "q \<in> subterms p"
and "r \<in> stermsl q"
shows "r \<in> ctermsl p"
using assms
proof (induct p)
fix p1 p2
assume IH1: "q \<in> subterms p1 \<Longrightarrow> r \<in> stermsl q \<Longrightarrow> r \<in> ctermsl p1"
and IH2: "q \<in> subterms p2 \<Longrightarrow> r \<in> stermsl q \<Longrightarrow> r \<in> ctermsl p2"
and *: "q \<in> subterms (p1 \<oplus> p2)"
and "r \<in> stermsl q"
from * have "q \<in> {p1 \<oplus> p2} \<union> subterms p1 \<union> subterms p2" by simp
thus "r \<in> ctermsl (p1 \<oplus> p2)"
proof (elim UnE)
assume "q \<in> {p1 \<oplus> p2}" with \<open>r \<in> stermsl q\<close> show ?thesis
by simp (metis stermsl_ctermsl)
next
assume "q \<in> subterms p1" hence "r \<in> ctermsl p1" using \<open>r \<in> stermsl q\<close> by (rule IH1)
thus ?thesis by simp
next
assume "q \<in> subterms p2" hence "r \<in> ctermsl p2" using \<open>r \<in> stermsl q\<close> by (rule IH2)
thus ?thesis by simp
qed
qed auto
lemma subterms_sterms_cterms:
assumes wf: "wellformed \<Gamma>"
and "p \<in> subterms (\<Gamma> pn)"
shows "sterms \<Gamma> p \<subseteq> cterms \<Gamma>"
using assms(2)
proof (induct p)
fix p
assume "call(p) \<in> subterms (\<Gamma> pn)"
from wf have "sterms \<Gamma> (call(p)) = sterms \<Gamma> (\<Gamma> p)" by simp
thus "sterms \<Gamma> (call(p)) \<subseteq> cterms \<Gamma>" by auto
next
fix p1 p2
assume IH1: "p1 \<in> subterms (\<Gamma> pn) \<Longrightarrow> sterms \<Gamma> p1 \<subseteq> cterms \<Gamma>"
and IH2: "p2 \<in> subterms (\<Gamma> pn) \<Longrightarrow> sterms \<Gamma> p2 \<subseteq> cterms \<Gamma>"
and *: "p1 \<oplus> p2 \<in> subterms (\<Gamma> pn)"
from * have "p1 \<in> subterms (\<Gamma> pn)" by auto
hence "sterms \<Gamma> p1 \<subseteq> cterms \<Gamma>" by (rule IH1)
moreover from * have "p2 \<in> subterms (\<Gamma> pn)" by auto
hence "sterms \<Gamma> p2 \<subseteq> cterms \<Gamma>" by (rule IH2)
ultimately show "sterms \<Gamma> (p1 \<oplus> p2 ) \<subseteq> cterms \<Gamma>" using wf by simp
qed (auto elim!: subterms_in_cterms [OF \<open>wellformed \<Gamma>\<close>])
lemma subterms_sterms_in_cterms:
assumes "wellformed \<Gamma>"
and "p \<in> subterms (\<Gamma> pn)"
and "q \<in> sterms \<Gamma> p"
shows "q \<in> cterms \<Gamma>"
using assms
by (auto dest!: subterms_sterms_cterms [OF \<open>wellformed \<Gamma>\<close>])
end
|
I’m very pleased to announce that the David Adams Wealth Group has been awarded the elite distinction of membership in the 2015 Raymond James Chairman’s Council*.
For more than four decades, the associates at Raymond James have remained true to our commitment of always doing what is in each client’s best interest. David and his team embody that ideal.
Raymond James Chairman’s Council is our firm’s highest honor with membership reserved for advisors who have demonstrated the utmost in dedication to client service, as well as a continued desire for professional development. This accolade is reserved for the top Independent financial advisors across the entire firm, placing David’s office in the top 75 (or top 1%) of financial advisors across the firm. The David Adams Wealth Group is a shining example of our 40-year tradition of professionalism, integrity and hard work. This marks the ninth consecutive year that David has qualified for one of the firm’s top recognitions.
In addition to Chairman’s Council, Raymond James also announced that David was named to REP. Magazine’s annual list of the Top 50 NextGen Independent Advisors under the age of 40 in the United States**.
Thank you for the trust and confidence you have placed in our firm, and please join me in congratulating the David Adams Wealth Group on this accomplishment.
*Membership is based mainly on assets under management, education, credentials and fiscal-year production. Re-qualification is required annually.
** Advisors on the Top NextGen Independent Broker/Dealer Advisors list are ranked exclusively by total assets under management. To be eligible, advisors had to be under the age of 40 as of Jan. 1, 2015. We solicited nominations from the top 30 IBDs by headcount, sourced from Cerulli Associates. REP. magazine and WealthManagement.com do not receive any compensation from financial advisors, participating firms and affiliates, or the media in exchange for rankings. |
State Before: 𝕜 : Type u_2
inst✝⁸ : NontriviallyNormedField 𝕜
E : Type u_1
inst✝⁷ : NormedAddCommGroup E
inst✝⁶ : NormedSpace 𝕜 E
F : Type ?u.658148
inst✝⁵ : NormedAddCommGroup F
inst✝⁴ : NormedSpace 𝕜 F
G : Type ?u.658243
inst✝³ : NormedAddCommGroup G
inst✝² : NormedSpace 𝕜 G
G' : Type ?u.658338
inst✝¹ : NormedAddCommGroup G'
inst✝ : NormedSpace 𝕜 G'
f f₀ f₁ g : E → F
f' f₀' f₁' g' e : E →L[𝕜] F
x✝ : E
s t : Set E
L✝ L₁ L₂ : Filter E
x : E
L : Filter E
⊢ ∀ (x_1 : E), 0 = _root_.id x_1 - _root_.id x - ↑(ContinuousLinearMap.id 𝕜 E) (x_1 - x) State After: no goals Tactic: simp |
#!/usr/bin/python3.7
import numpy as np
from utils.dataset import Dataset
from utils.network import Trainer, Forwarder
from utils.viterbi import Viterbi
import os
os.environ["CUDA_VISIBLE_DEVICES"] = "0"
### read label2index mapping and index2label mapping ###########################
label2index = dict()
index2label = dict()
with open('data/mapping.txt', 'r') as f:
content = f.read().split('\n')[0:-1]
for line in content:
label2index[line.split()[1]] = int(line.split()[0])
index2label[int(line.split()[0])] = line.split()[1]
### read training data #########################################################
with open('data/split1.train', 'r') as f:
video_list = f.read().split('\n')[0:-1]
dataset = Dataset('data', video_list, label2index, shuffle = True)
### generate path grammar for inference ########################################
paths = set()
for _, transcript in dataset:
paths.add( ' '.join([index2label[index] for index in transcript]) )
with open('results/grammar.txt', 'w') as f:
f.write('\n'.join(paths) + '\n')
### actual nn-viterbi training #################################################
decoder = Viterbi(None, None, frame_sampling = 30) # (None, None): transcript-grammar and length-model are set for each training sequence separately, see trainer.train(...)
trainer = Trainer(decoder, dataset.input_dimension, dataset.n_classes, buffer_size = len(dataset), buffered_frame_ratio = 25)
learning_rate = 0.01
window = 10
step = 5
# train for 100000 iterations
for i in range(100000):
sequence, transcript = dataset.get()
loss1, loss2 = trainer.train(sequence, transcript, batch_size=512, learning_rate=learning_rate, window=window, step=step)
# print some progress information
if (i+1) % 100 == 0:
print('Iteration %d, loss1: %f, loss2: %f, loss: %f' % (i+1, loss1, loss2, loss1 - loss2))
# save model every 1000 iterations
if (i+1) % 1000 == 0:
network_file = 'results/network.iter-' + str(i+1) + '.net'
length_file = 'results/lengths.iter-' + str(i+1) + '.txt'
prior_file = 'results/prior.iter-' + str(i+1) + '.txt'
trainer.save_model(network_file, length_file, prior_file)
# adjust learning rate after 60000 iterations
if (i+1) == 60000:
learning_rate = learning_rate * 0.1
|
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
l : List α
x : α
hx : ¬x ∈ l
⊢ Set.InjOn (fun k => insertNth k x l) {n | n ≤ length l}
[PROOFSTEP]
induction' l with hd tl IH
[GOAL]
case nil
α : Type u_1
β : Type u_2
γ : Type u_3
l : List α
x : α
hx✝ : ¬x ∈ l
hx : ¬x ∈ []
⊢ Set.InjOn (fun k => insertNth k x []) {n | n ≤ length []}
[PROOFSTEP]
intro n hn m hm _
[GOAL]
case nil
α : Type u_1
β : Type u_2
γ : Type u_3
l : List α
x : α
hx✝ : ¬x ∈ l
hx : ¬x ∈ []
n : ℕ
hn : n ∈ {n | n ≤ length []}
m : ℕ
hm : m ∈ {n | n ≤ length []}
a✝ : (fun k => insertNth k x []) n = (fun k => insertNth k x []) m
⊢ n = m
[PROOFSTEP]
simp only [Set.mem_singleton_iff, Set.setOf_eq_eq_singleton, length, nonpos_iff_eq_zero] at hn hm
[GOAL]
case nil
α : Type u_1
β : Type u_2
γ : Type u_3
l : List α
x : α
hx✝ : ¬x ∈ l
hx : ¬x ∈ []
n m : ℕ
a✝ : (fun k => insertNth k x []) n = (fun k => insertNth k x []) m
hn : n = 0
hm : m = 0
⊢ n = m
[PROOFSTEP]
simp [hn, hm]
[GOAL]
case cons
α : Type u_1
β : Type u_2
γ : Type u_3
l : List α
x : α
hx✝ : ¬x ∈ l
hd : α
tl : List α
IH : ¬x ∈ tl → Set.InjOn (fun k => insertNth k x tl) {n | n ≤ length tl}
hx : ¬x ∈ hd :: tl
⊢ Set.InjOn (fun k => insertNth k x (hd :: tl)) {n | n ≤ length (hd :: tl)}
[PROOFSTEP]
intro n hn m hm h
[GOAL]
case cons
α : Type u_1
β : Type u_2
γ : Type u_3
l : List α
x : α
hx✝ : ¬x ∈ l
hd : α
tl : List α
IH : ¬x ∈ tl → Set.InjOn (fun k => insertNth k x tl) {n | n ≤ length tl}
hx : ¬x ∈ hd :: tl
n : ℕ
hn : n ∈ {n | n ≤ length (hd :: tl)}
m : ℕ
hm : m ∈ {n | n ≤ length (hd :: tl)}
h : (fun k => insertNth k x (hd :: tl)) n = (fun k => insertNth k x (hd :: tl)) m
⊢ n = m
[PROOFSTEP]
simp only [length, Set.mem_setOf_eq] at hn hm
[GOAL]
case cons
α : Type u_1
β : Type u_2
γ : Type u_3
l : List α
x : α
hx✝ : ¬x ∈ l
hd : α
tl : List α
IH : ¬x ∈ tl → Set.InjOn (fun k => insertNth k x tl) {n | n ≤ length tl}
hx : ¬x ∈ hd :: tl
n : ℕ
hn : n ≤ length tl + 1
m : ℕ
hm : m ≤ length tl + 1
h : (fun k => insertNth k x (hd :: tl)) n = (fun k => insertNth k x (hd :: tl)) m
⊢ n = m
[PROOFSTEP]
simp only [mem_cons, not_or] at hx
[GOAL]
case cons
α : Type u_1
β : Type u_2
γ : Type u_3
l : List α
x : α
hx✝ : ¬x ∈ l
hd : α
tl : List α
IH : ¬x ∈ tl → Set.InjOn (fun k => insertNth k x tl) {n | n ≤ length tl}
n : ℕ
hn : n ≤ length tl + 1
m : ℕ
hm : m ≤ length tl + 1
h : (fun k => insertNth k x (hd :: tl)) n = (fun k => insertNth k x (hd :: tl)) m
hx : ¬x = hd ∧ ¬x ∈ tl
⊢ n = m
[PROOFSTEP]
cases n
[GOAL]
case cons.zero
α : Type u_1
β : Type u_2
γ : Type u_3
l : List α
x : α
hx✝ : ¬x ∈ l
hd : α
tl : List α
IH : ¬x ∈ tl → Set.InjOn (fun k => insertNth k x tl) {n | n ≤ length tl}
m : ℕ
hm : m ≤ length tl + 1
hx : ¬x = hd ∧ ¬x ∈ tl
hn : Nat.zero ≤ length tl + 1
h : (fun k => insertNth k x (hd :: tl)) Nat.zero = (fun k => insertNth k x (hd :: tl)) m
⊢ Nat.zero = m
[PROOFSTEP]
cases m
[GOAL]
case cons.succ
α : Type u_1
β : Type u_2
γ : Type u_3
l : List α
x : α
hx✝ : ¬x ∈ l
hd : α
tl : List α
IH : ¬x ∈ tl → Set.InjOn (fun k => insertNth k x tl) {n | n ≤ length tl}
m : ℕ
hm : m ≤ length tl + 1
hx : ¬x = hd ∧ ¬x ∈ tl
n✝ : ℕ
hn : Nat.succ n✝ ≤ length tl + 1
h : (fun k => insertNth k x (hd :: tl)) (Nat.succ n✝) = (fun k => insertNth k x (hd :: tl)) m
⊢ Nat.succ n✝ = m
[PROOFSTEP]
cases m
[GOAL]
case cons.zero.zero
α : Type u_1
β : Type u_2
γ : Type u_3
l : List α
x : α
hx✝ : ¬x ∈ l
hd : α
tl : List α
IH : ¬x ∈ tl → Set.InjOn (fun k => insertNth k x tl) {n | n ≤ length tl}
hx : ¬x = hd ∧ ¬x ∈ tl
hn hm : Nat.zero ≤ length tl + 1
h : (fun k => insertNth k x (hd :: tl)) Nat.zero = (fun k => insertNth k x (hd :: tl)) Nat.zero
⊢ Nat.zero = Nat.zero
[PROOFSTEP]
rfl
[GOAL]
case cons.zero.succ
α : Type u_1
β : Type u_2
γ : Type u_3
l : List α
x : α
hx✝ : ¬x ∈ l
hd : α
tl : List α
IH : ¬x ∈ tl → Set.InjOn (fun k => insertNth k x tl) {n | n ≤ length tl}
hx : ¬x = hd ∧ ¬x ∈ tl
hn : Nat.zero ≤ length tl + 1
n✝ : ℕ
hm : Nat.succ n✝ ≤ length tl + 1
h : (fun k => insertNth k x (hd :: tl)) Nat.zero = (fun k => insertNth k x (hd :: tl)) (Nat.succ n✝)
⊢ Nat.zero = Nat.succ n✝
[PROOFSTEP]
simp [hx.left] at h
[GOAL]
case cons.succ.zero
α : Type u_1
β : Type u_2
γ : Type u_3
l : List α
x : α
hx✝ : ¬x ∈ l
hd : α
tl : List α
IH : ¬x ∈ tl → Set.InjOn (fun k => insertNth k x tl) {n | n ≤ length tl}
hx : ¬x = hd ∧ ¬x ∈ tl
n✝ : ℕ
hn : Nat.succ n✝ ≤ length tl + 1
hm : Nat.zero ≤ length tl + 1
h : (fun k => insertNth k x (hd :: tl)) (Nat.succ n✝) = (fun k => insertNth k x (hd :: tl)) Nat.zero
⊢ Nat.succ n✝ = Nat.zero
[PROOFSTEP]
simp [Ne.symm hx.left] at h
[GOAL]
case cons.succ.succ
α : Type u_1
β : Type u_2
γ : Type u_3
l : List α
x : α
hx✝ : ¬x ∈ l
hd : α
tl : List α
IH : ¬x ∈ tl → Set.InjOn (fun k => insertNth k x tl) {n | n ≤ length tl}
hx : ¬x = hd ∧ ¬x ∈ tl
n✝¹ : ℕ
hn : Nat.succ n✝¹ ≤ length tl + 1
n✝ : ℕ
hm : Nat.succ n✝ ≤ length tl + 1
h : (fun k => insertNth k x (hd :: tl)) (Nat.succ n✝¹) = (fun k => insertNth k x (hd :: tl)) (Nat.succ n✝)
⊢ Nat.succ n✝¹ = Nat.succ n✝
[PROOFSTEP]
simp only [true_and_iff, eq_self_iff_true, insertNth_succ_cons] at h
[GOAL]
case cons.succ.succ
α : Type u_1
β : Type u_2
γ : Type u_3
l : List α
x : α
hx✝ : ¬x ∈ l
hd : α
tl : List α
IH : ¬x ∈ tl → Set.InjOn (fun k => insertNth k x tl) {n | n ≤ length tl}
hx : ¬x = hd ∧ ¬x ∈ tl
n✝¹ : ℕ
hn : Nat.succ n✝¹ ≤ length tl + 1
n✝ : ℕ
hm : Nat.succ n✝ ≤ length tl + 1
h : hd :: insertNth n✝¹ x tl = hd :: insertNth n✝ x tl
⊢ Nat.succ n✝¹ = Nat.succ n✝
[PROOFSTEP]
rw [Nat.succ_inj']
[GOAL]
case cons.succ.succ
α : Type u_1
β : Type u_2
γ : Type u_3
l : List α
x : α
hx✝ : ¬x ∈ l
hd : α
tl : List α
IH : ¬x ∈ tl → Set.InjOn (fun k => insertNth k x tl) {n | n ≤ length tl}
hx : ¬x = hd ∧ ¬x ∈ tl
n✝¹ : ℕ
hn : Nat.succ n✝¹ ≤ length tl + 1
n✝ : ℕ
hm : Nat.succ n✝ ≤ length tl + 1
h : hd :: insertNth n✝¹ x tl = hd :: insertNth n✝ x tl
⊢ n✝¹ = n✝
[PROOFSTEP]
refine' IH hx.right _ _ (by injection h)
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
l : List α
x : α
hx✝ : ¬x ∈ l
hd : α
tl : List α
IH : ¬x ∈ tl → Set.InjOn (fun k => insertNth k x tl) {n | n ≤ length tl}
hx : ¬x = hd ∧ ¬x ∈ tl
n✝¹ : ℕ
hn : Nat.succ n✝¹ ≤ length tl + 1
n✝ : ℕ
hm : Nat.succ n✝ ≤ length tl + 1
h : hd :: insertNth n✝¹ x tl = hd :: insertNth n✝ x tl
⊢ (fun k => insertNth k x tl) n✝¹ = (fun k => insertNth k x tl) n✝
[PROOFSTEP]
injection h
[GOAL]
case cons.succ.succ.refine'_1
α : Type u_1
β : Type u_2
γ : Type u_3
l : List α
x : α
hx✝ : ¬x ∈ l
hd : α
tl : List α
IH : ¬x ∈ tl → Set.InjOn (fun k => insertNth k x tl) {n | n ≤ length tl}
hx : ¬x = hd ∧ ¬x ∈ tl
n✝¹ : ℕ
hn : Nat.succ n✝¹ ≤ length tl + 1
n✝ : ℕ
hm : Nat.succ n✝ ≤ length tl + 1
h : hd :: insertNth n✝¹ x tl = hd :: insertNth n✝ x tl
⊢ n✝¹ ∈ {n | n ≤ length tl}
[PROOFSTEP]
simpa [Nat.succ_le_succ_iff] using hn
[GOAL]
case cons.succ.succ.refine'_2
α : Type u_1
β : Type u_2
γ : Type u_3
l : List α
x : α
hx✝ : ¬x ∈ l
hd : α
tl : List α
IH : ¬x ∈ tl → Set.InjOn (fun k => insertNth k x tl) {n | n ≤ length tl}
hx : ¬x = hd ∧ ¬x ∈ tl
n✝¹ : ℕ
hn : Nat.succ n✝¹ ≤ length tl + 1
n✝ : ℕ
hm : Nat.succ n✝ ≤ length tl + 1
h : hd :: insertNth n✝¹ x tl = hd :: insertNth n✝ x tl
⊢ n✝ ∈ {n | n ≤ length tl}
[PROOFSTEP]
simpa [Nat.succ_le_succ_iff] using hm
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
f : β → α → α
g : γ → α → α
hfg : Set.range f ⊆ Set.range g
a : α
⊢ Set.range (foldr f a) ⊆ Set.range (foldr g a)
[PROOFSTEP]
rintro _ ⟨l, rfl⟩
[GOAL]
case intro
α : Type u_1
β : Type u_2
γ : Type u_3
f : β → α → α
g : γ → α → α
hfg : Set.range f ⊆ Set.range g
a : α
l : List β
⊢ foldr f a l ∈ Set.range (foldr g a)
[PROOFSTEP]
induction' l with b l H
[GOAL]
case intro.nil
α : Type u_1
β : Type u_2
γ : Type u_3
f : β → α → α
g : γ → α → α
hfg : Set.range f ⊆ Set.range g
a : α
⊢ foldr f a [] ∈ Set.range (foldr g a)
[PROOFSTEP]
exact ⟨[], rfl⟩
[GOAL]
case intro.cons
α : Type u_1
β : Type u_2
γ : Type u_3
f : β → α → α
g : γ → α → α
hfg : Set.range f ⊆ Set.range g
a : α
b : β
l : List β
H : foldr f a l ∈ Set.range (foldr g a)
⊢ foldr f a (b :: l) ∈ Set.range (foldr g a)
[PROOFSTEP]
cases' hfg (Set.mem_range_self b) with c hgf
[GOAL]
case intro.cons.intro
α : Type u_1
β : Type u_2
γ : Type u_3
f : β → α → α
g : γ → α → α
hfg : Set.range f ⊆ Set.range g
a : α
b : β
l : List β
H : foldr f a l ∈ Set.range (foldr g a)
c : γ
hgf : g c = f b
⊢ foldr f a (b :: l) ∈ Set.range (foldr g a)
[PROOFSTEP]
cases' H with m hgf'
[GOAL]
case intro.cons.intro.intro
α : Type u_1
β : Type u_2
γ : Type u_3
f : β → α → α
g : γ → α → α
hfg : Set.range f ⊆ Set.range g
a : α
b : β
l : List β
c : γ
hgf : g c = f b
m : List γ
hgf' : foldr g a m = foldr f a l
⊢ foldr f a (b :: l) ∈ Set.range (foldr g a)
[PROOFSTEP]
rw [foldr_cons, ← hgf, ← hgf']
[GOAL]
case intro.cons.intro.intro
α : Type u_1
β : Type u_2
γ : Type u_3
f : β → α → α
g : γ → α → α
hfg : Set.range f ⊆ Set.range g
a : α
b : β
l : List β
c : γ
hgf : g c = f b
m : List γ
hgf' : foldr g a m = foldr f a l
⊢ g c (foldr g a m) ∈ Set.range (foldr g a)
[PROOFSTEP]
exact ⟨c :: m, rfl⟩
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
f : α → β → α
g : α → γ → α
hfg : (Set.range fun a c => f c a) ⊆ Set.range fun b c => g c b
a : α
⊢ Set.range (foldl f a) ⊆ Set.range (foldl g a)
[PROOFSTEP]
change
(Set.range fun l => _) ⊆
Set.range fun l =>
_
-- Porting note: This was simply `simp_rw [← foldr_reverse]`
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
f : α → β → α
g : α → γ → α
hfg : (Set.range fun a c => f c a) ⊆ Set.range fun b c => g c b
a : α
⊢ (Set.range fun l => foldl f a l) ⊆ Set.range fun l => foldl g a l
[PROOFSTEP]
simp_rw [← foldr_reverse _ (fun z w => g w z), ← foldr_reverse _ (fun z w => f w z)]
-- Porting note: This `change` was not necessary in mathlib3
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
f : α → β → α
g : α → γ → α
hfg : (Set.range fun a c => f c a) ⊆ Set.range fun b c => g c b
a : α
⊢ (Set.range fun l => foldr (fun z w => f w z) a (reverse l)) ⊆
Set.range fun l => foldr (fun z w => g w z) a (reverse l)
[PROOFSTEP]
change (Set.range (foldr (fun z w => f w z) a ∘ reverse)) ⊆ Set.range (foldr (fun z w => g w z) a ∘ reverse)
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
f : α → β → α
g : α → γ → α
hfg : (Set.range fun a c => f c a) ⊆ Set.range fun b c => g c b
a : α
⊢ Set.range (foldr (fun z w => f w z) a ∘ reverse) ⊆ Set.range (foldr (fun z w => g w z) a ∘ reverse)
[PROOFSTEP]
simp_rw [Set.range_comp _ reverse, reverse_involutive.bijective.surjective.range_eq, Set.image_univ]
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
f : α → β → α
g : α → γ → α
hfg : (Set.range fun a c => f c a) ⊆ Set.range fun b c => g c b
a : α
⊢ Set.range (foldr (fun z w => f w z) a) ⊆ Set.range (foldr (fun z w => g w z) a)
[PROOFSTEP]
exact foldr_range_subset_of_range_subset hfg a
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
σ : Type u_4
f : α → σ → σ × β
a : α
as : List α
s : σ
⊢ mapAccumr f (a :: as) s =
foldr
(fun a s =>
let r := f a s.fst;
(r.fst, r.snd :: s.snd))
(s, []) (a :: as)
[PROOFSTEP]
simp only [mapAccumr, foldr, mapAccumr_eq_foldr f as]
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
σ : Type u_4
φ : Type u_5
f : α → β → σ → σ × φ
a : α
as : List α
b : β
bs : List β
s : σ
⊢ mapAccumr₂ f (a :: as) (b :: bs) s =
foldr
(fun ab s =>
let r := f ab.fst ab.snd s.fst;
(r.fst, r.snd :: s.snd))
(s, []) (zip (a :: as) (b :: bs))
[PROOFSTEP]
simp only [mapAccumr₂, foldr, mapAccumr₂_eq_foldr f as]
[GOAL]
α : Type u_1
β : Type u_2
γ : Type u_3
σ : Type u_4
φ : Type u_5
f : α → β → σ → σ × φ
a : α
as : List α
b : β
bs : List β
s : σ
⊢ ((f a b
(foldr (fun ab s => ((f ab.fst ab.snd s.fst).fst, (f ab.fst ab.snd s.fst).snd :: s.snd)) (s, [])
(zip as bs)).fst).fst,
(f a b
(foldr (fun ab s => ((f ab.fst ab.snd s.fst).fst, (f ab.fst ab.snd s.fst).snd :: s.snd)) (s, [])
(zip as bs)).fst).snd ::
(foldr (fun ab s => ((f ab.fst ab.snd s.fst).fst, (f ab.fst ab.snd s.fst).snd :: s.snd)) (s, [])
(zip as bs)).snd) =
((f a b
(foldr (fun ab s => ((f ab.fst ab.snd s.fst).fst, (f ab.fst ab.snd s.fst).snd :: s.snd)) (s, [])
(zipWith Prod.mk as bs)).fst).fst,
(f a b
(foldr (fun ab s => ((f ab.fst ab.snd s.fst).fst, (f ab.fst ab.snd s.fst).snd :: s.snd)) (s, [])
(zipWith Prod.mk as bs)).fst).snd ::
(foldr (fun ab s => ((f ab.fst ab.snd s.fst).fst, (f ab.fst ab.snd s.fst).snd :: s.snd)) (s, [])
(zipWith Prod.mk as bs)).snd)
[PROOFSTEP]
rfl
|
(* Title: HOL/ex/Tarski.thy
Author: Florian Kammüller, Cambridge University Computer Laboratory
*)
section \<open>The Full Theorem of Tarski\<close>
theory Tarski
imports MainRLT "HOL-Library.FuncSet"
begin
text \<open>
Minimal version of lattice theory plus the full theorem of Tarski:
The fixedpoints of a complete lattice themselves form a complete
lattice.
Illustrates first-class theories, using the Sigma representation of
structures. Tidied and converted to Isar by lcp.
\<close>
record 'a potype =
pset :: "'a set"
order :: "('a \<times> 'a) set"
definition monotone :: "['a \<Rightarrow> 'a, 'a set, ('a \<times> 'a) set] \<Rightarrow> bool"
where "monotone f A r \<longleftrightarrow> (\<forall>x\<in>A. \<forall>y\<in>A. (x, y) \<in> r \<longrightarrow> (f x, f y) \<in> r)"
definition least :: "['a \<Rightarrow> bool, 'a potype] \<Rightarrow> 'a"
where "least P po = (SOME x. x \<in> pset po \<and> P x \<and> (\<forall>y \<in> pset po. P y \<longrightarrow> (x, y) \<in> order po))"
definition greatest :: "['a \<Rightarrow> bool, 'a potype] \<Rightarrow> 'a"
where "greatest P po = (SOME x. x \<in> pset po \<and> P x \<and> (\<forall>y \<in> pset po. P y \<longrightarrow> (y, x) \<in> order po))"
definition lub :: "['a set, 'a potype] \<Rightarrow> 'a"
where "lub S po = least (\<lambda>x. \<forall>y\<in>S. (y, x) \<in> order po) po"
definition glb :: "['a set, 'a potype] \<Rightarrow> 'a"
where "glb S po = greatest (\<lambda>x. \<forall>y\<in>S. (x, y) \<in> order po) po"
definition isLub :: "['a set, 'a potype, 'a] \<Rightarrow> bool"
where "isLub S po =
(\<lambda>L. L \<in> pset po \<and> (\<forall>y\<in>S. (y, L) \<in> order po) \<and>
(\<forall>z\<in>pset po. (\<forall>y\<in>S. (y, z) \<in> order po) \<longrightarrow> (L, z) \<in> order po))"
definition isGlb :: "['a set, 'a potype, 'a] \<Rightarrow> bool"
where "isGlb S po =
(\<lambda>G. (G \<in> pset po \<and> (\<forall>y\<in>S. (G, y) \<in> order po) \<and>
(\<forall>z \<in> pset po. (\<forall>y\<in>S. (z, y) \<in> order po) \<longrightarrow> (z, G) \<in> order po)))"
definition "fix" :: "['a \<Rightarrow> 'a, 'a set] \<Rightarrow> 'a set"
where "fix f A = {x. x \<in> A \<and> f x = x}"
definition interval :: "[('a \<times> 'a) set, 'a, 'a] \<Rightarrow> 'a set"
where "interval r a b = {x. (a, x) \<in> r \<and> (x, b) \<in> r}"
definition Bot :: "'a potype \<Rightarrow> 'a"
where "Bot po = least (\<lambda>x. True) po"
definition Top :: "'a potype \<Rightarrow> 'a"
where "Top po = greatest (\<lambda>x. True) po"
definition PartialOrder :: "'a potype set"
where "PartialOrder = {P. refl_on (pset P) (order P) \<and> antisym (order P) \<and> trans (order P)}"
definition CompleteLattice :: "'a potype set"
where "CompleteLattice =
{cl. cl \<in> PartialOrder \<and>
(\<forall>S. S \<subseteq> pset cl \<longrightarrow> (\<exists>L. isLub S cl L)) \<and>
(\<forall>S. S \<subseteq> pset cl \<longrightarrow> (\<exists>G. isGlb S cl G))}"
definition CLF_set :: "('a potype \<times> ('a \<Rightarrow> 'a)) set"
where "CLF_set =
(SIGMA cl : CompleteLattice.
{f. f \<in> pset cl \<rightarrow> pset cl \<and> monotone f (pset cl) (order cl)})"
definition induced :: "['a set, ('a \<times> 'a) set] \<Rightarrow> ('a \<times> 'a) set"
where "induced A r = {(a, b). a \<in> A \<and> b \<in> A \<and> (a, b) \<in> r}"
definition sublattice :: "('a potype \<times> 'a set) set"
where "sublattice =
(SIGMA cl : CompleteLattice.
{S. S \<subseteq> pset cl \<and> \<lparr>pset = S, order = induced S (order cl)\<rparr> \<in> CompleteLattice})"
abbreviation sublat :: "['a set, 'a potype] \<Rightarrow> bool" ("_ <<= _" [51, 50] 50)
where "S <<= cl \<equiv> S \<in> sublattice `` {cl}"
definition dual :: "'a potype \<Rightarrow> 'a potype"
where "dual po = \<lparr>pset = pset po, order = converse (order po)\<rparr>"
locale S =
fixes cl :: "'a potype"
and A :: "'a set"
and r :: "('a \<times> 'a) set"
defines A_def: "A \<equiv> pset cl"
and r_def: "r \<equiv> order cl"
locale PO = S +
assumes cl_po: "cl \<in> PartialOrder"
locale CL = S +
assumes cl_co: "cl \<in> CompleteLattice"
sublocale CL < po?: PO
unfolding A_def r_def
using CompleteLattice_def PO.intro cl_co by fastforce
locale CLF = S +
fixes f :: "'a \<Rightarrow> 'a"
and P :: "'a set"
assumes f_cl: "(cl, f) \<in> CLF_set"
defines P_def: "P \<equiv> fix f A"
sublocale CLF < cl?: CL
unfolding A_def r_def CL_def
using CLF_set_def f_cl by blast
locale Tarski = CLF +
fixes Y :: "'a set"
and intY1 :: "'a set"
and v :: "'a"
assumes Y_ss: "Y \<subseteq> P"
defines intY1_def: "intY1 \<equiv> interval r (lub Y cl) (Top cl)"
and v_def: "v \<equiv>
glb {x. ((\<lambda>x \<in> intY1. f x) x, x) \<in> induced intY1 r \<and> x \<in> intY1}
\<lparr>pset = intY1, order = induced intY1 r\<rparr>"
subsection \<open>Partial Order\<close>
context PO
begin
lemma dual: "PO (dual cl)"
proof
show "dual cl \<in> PartialOrder"
using cl_po unfolding PartialOrder_def dual_def by auto
qed
lemma PO_imp_refl_on [simp]: "refl_on A r"
using cl_po by (simp add: PartialOrder_def A_def r_def)
lemma PO_imp_sym [simp]: "antisym r"
using cl_po by (simp add: PartialOrder_def r_def)
lemma PO_imp_trans [simp]: "trans r"
using cl_po by (simp add: PartialOrder_def r_def)
lemma reflE: "x \<in> A \<Longrightarrow> (x, x) \<in> r"
using cl_po by (simp add: PartialOrder_def refl_on_def A_def r_def)
lemma antisymE: "\<lbrakk>(a, b) \<in> r; (b, a) \<in> r\<rbrakk> \<Longrightarrow> a = b"
using cl_po by (simp add: PartialOrder_def antisym_def r_def)
lemma transE: "\<lbrakk>(a, b) \<in> r; (b, c) \<in> r\<rbrakk> \<Longrightarrow> (a, c) \<in> r"
using cl_po by (simp add: PartialOrder_def r_def) (unfold trans_def, fast)
lemma monotoneE: "\<lbrakk>monotone f A r; x \<in> A; y \<in> A; (x, y) \<in> r\<rbrakk> \<Longrightarrow> (f x, f y) \<in> r"
by (simp add: monotone_def)
lemma po_subset_po:
assumes "S \<subseteq> A" shows "\<lparr>pset = S, order = induced S r\<rparr> \<in> PartialOrder"
proof -
have "refl_on S (induced S r)"
using \<open>S \<subseteq> A\<close> by (auto simp: refl_on_def induced_def intro: reflE)
moreover
have "antisym (induced S r)"
by (auto simp add: antisym_def induced_def intro: antisymE)
moreover
have "trans (induced S r)"
by (auto simp add: trans_def induced_def intro: transE)
ultimately show ?thesis
by (simp add: PartialOrder_def)
qed
lemma indE: "\<lbrakk>(x, y) \<in> induced S r; S \<subseteq> A\<rbrakk> \<Longrightarrow> (x, y) \<in> r"
by (simp add: induced_def)
lemma indI: "\<lbrakk>(x, y) \<in> r; x \<in> S; y \<in> S\<rbrakk> \<Longrightarrow> (x, y) \<in> induced S r"
by (simp add: induced_def)
end
lemma (in CL) CL_imp_ex_isLub: "S \<subseteq> A \<Longrightarrow> \<exists>L. isLub S cl L"
using cl_co by (simp add: CompleteLattice_def A_def)
declare (in CL) cl_co [simp]
lemma isLub_lub: "(\<exists>L. isLub S cl L) \<longleftrightarrow> isLub S cl (lub S cl)"
by (simp add: lub_def least_def isLub_def some_eq_ex [symmetric])
lemma isGlb_glb: "(\<exists>G. isGlb S cl G) \<longleftrightarrow> isGlb S cl (glb S cl)"
by (simp add: glb_def greatest_def isGlb_def some_eq_ex [symmetric])
lemma isGlb_dual_isLub: "isGlb S cl = isLub S (dual cl)"
by (simp add: isLub_def isGlb_def dual_def converse_unfold)
lemma isLub_dual_isGlb: "isLub S cl = isGlb S (dual cl)"
by (simp add: isLub_def isGlb_def dual_def converse_unfold)
lemma (in PO) dualPO: "dual cl \<in> PartialOrder"
using cl_po by (simp add: PartialOrder_def dual_def)
lemma Rdual:
assumes major: "\<And>S. S \<subseteq> A \<Longrightarrow> \<exists>L. isLub S po L" and "S \<subseteq> A" and "A = pset po"
shows "\<exists>G. isGlb S po G"
proof
show "isGlb S po (lub {y \<in> A. \<forall>k\<in>S. (y, k) \<in> order po} po)"
using major [of "{y. y \<in> A \<and> (\<forall>k \<in> S. (y, k) \<in> order po)}"] \<open>S \<subseteq> A\<close> \<open>A = pset po\<close>
apply (simp add: isLub_lub isGlb_def)
apply (auto simp add: isLub_def)
done
qed
lemma lub_dual_glb: "lub S cl = glb S (dual cl)"
by (simp add: lub_def glb_def least_def greatest_def dual_def converse_unfold)
lemma glb_dual_lub: "glb S cl = lub S (dual cl)"
by (simp add: lub_def glb_def least_def greatest_def dual_def converse_unfold)
lemma CL_subset_PO: "CompleteLattice \<subseteq> PartialOrder"
by (auto simp: PartialOrder_def CompleteLattice_def)
lemmas CL_imp_PO = CL_subset_PO [THEN subsetD]
context CL
begin
lemma CO_refl_on: "refl_on A r"
by (rule PO_imp_refl_on)
lemma CO_antisym: "antisym r"
by (rule PO_imp_sym)
lemma CO_trans: "trans r"
by (rule PO_imp_trans)
end
lemma CompleteLatticeI:
"\<lbrakk>po \<in> PartialOrder; \<forall>S. S \<subseteq> pset po \<longrightarrow> (\<exists>L. isLub S po L);
\<forall>S. S \<subseteq> pset po \<longrightarrow> (\<exists>G. isGlb S po G)\<rbrakk>
\<Longrightarrow> po \<in> CompleteLattice"
unfolding CompleteLattice_def by blast
lemma (in CL) CL_dualCL: "dual cl \<in> CompleteLattice"
using cl_co
apply (simp add: CompleteLattice_def dual_def)
apply (simp add: dualPO flip: dual_def isLub_dual_isGlb isGlb_dual_isLub)
done
context PO
begin
lemma dualA_iff [simp]: "pset (dual cl) = pset cl"
by (simp add: dual_def)
lemma dualr_iff [simp]: "(x, y) \<in> (order (dual cl)) \<longleftrightarrow> (y, x) \<in> order cl"
by (simp add: dual_def)
lemma monotone_dual:
"monotone f (pset cl) (order cl) \<Longrightarrow> monotone f (pset (dual cl)) (order(dual cl))"
by (simp add: monotone_def)
lemma interval_dual: "\<lbrakk>x \<in> A; y \<in> A\<rbrakk> \<Longrightarrow> interval r x y = interval (order(dual cl)) y x"
unfolding interval_def dualr_iff by (auto simp flip: r_def)
lemma interval_not_empty: "interval r a b \<noteq> {} \<Longrightarrow> (a, b) \<in> r"
by (simp add: interval_def) (use transE in blast)
lemma interval_imp_mem: "x \<in> interval r a b \<Longrightarrow> (a, x) \<in> r"
by (simp add: interval_def)
lemma left_in_interval: "\<lbrakk>a \<in> A; b \<in> A; interval r a b \<noteq> {}\<rbrakk> \<Longrightarrow> a \<in> interval r a b"
using interval_def interval_not_empty reflE by fastforce
lemma right_in_interval: "\<lbrakk>a \<in> A; b \<in> A; interval r a b \<noteq> {}\<rbrakk> \<Longrightarrow> b \<in> interval r a b"
by (simp add: A_def PO.dual PO.left_in_interval PO_axioms interval_dual)
end
subsection \<open>sublattice\<close>
lemma (in PO) sublattice_imp_CL:
"S <<= cl \<Longrightarrow> \<lparr>pset = S, order = induced S r\<rparr> \<in> CompleteLattice"
by (simp add: sublattice_def CompleteLattice_def r_def)
lemma (in CL) sublatticeI:
"\<lbrakk>S \<subseteq> A; \<lparr>pset = S, order = induced S r\<rparr> \<in> CompleteLattice\<rbrakk> \<Longrightarrow> S <<= cl"
by (simp add: sublattice_def A_def r_def)
lemma (in CL) dual: "CL (dual cl)"
proof
show "dual cl \<in> CompleteLattice"
using cl_co
by (simp add: CompleteLattice_def dualPO flip: isGlb_dual_isLub isLub_dual_isGlb)
qed
subsection \<open>lub\<close>
context CL
begin
lemma lub_unique: "\<lbrakk>S \<subseteq> A; isLub S cl x; isLub S cl L\<rbrakk> \<Longrightarrow> x = L"
by (rule antisymE) (auto simp add: isLub_def r_def)
lemma lub_upper:
assumes "S \<subseteq> A" "x \<in> S" shows "(x, lub S cl) \<in> r"
proof -
obtain L where "isLub S cl L"
using CL_imp_ex_isLub \<open>S \<subseteq> A\<close> by auto
then show ?thesis
by (metis assms(2) isLub_def isLub_lub r_def)
qed
lemma lub_least:
assumes "S \<subseteq> A" and L: "L \<in> A" "\<forall>x \<in> S. (x, L) \<in> r" shows "(lub S cl, L) \<in> r"
proof -
obtain L' where "isLub S cl L'"
using CL_imp_ex_isLub \<open>S \<subseteq> A\<close> by auto
then show ?thesis
by (metis A_def L isLub_def isLub_lub r_def)
qed
lemma lub_in_lattice:
assumes "S \<subseteq> A" shows "lub S cl \<in> A"
proof -
obtain L where "isLub S cl L"
using CL_imp_ex_isLub \<open>S \<subseteq> A\<close> by auto
then show ?thesis
by (metis A_def isLub_def isLub_lub)
qed
lemma lubI:
assumes A: "S \<subseteq> A" "L \<in> A" and r: "\<forall>x \<in> S. (x, L) \<in> r"
and clo: "\<And>z. \<lbrakk>z \<in> A; (\<forall>y \<in> S. (y, z) \<in> r)\<rbrakk> \<Longrightarrow> (L, z) \<in> r"
shows "L = lub S cl"
proof -
obtain L where "isLub S cl L"
using CL_imp_ex_isLub assms(1) by auto
then show ?thesis
by (simp add: antisymE A clo lub_in_lattice lub_least lub_upper r)
qed
lemma lubIa: "\<lbrakk>S \<subseteq> A; isLub S cl L\<rbrakk> \<Longrightarrow> L = lub S cl"
by (meson isLub_lub lub_unique)
lemma isLub_in_lattice: "isLub S cl L \<Longrightarrow> L \<in> A"
by (simp add: isLub_def A_def)
lemma isLub_upper: "\<lbrakk>isLub S cl L; y \<in> S\<rbrakk> \<Longrightarrow> (y, L) \<in> r"
by (simp add: isLub_def r_def)
lemma isLub_least: "\<lbrakk>isLub S cl L; z \<in> A; \<forall>y \<in> S. (y, z) \<in> r\<rbrakk> \<Longrightarrow> (L, z) \<in> r"
by (simp add: isLub_def A_def r_def)
lemma isLubI:
"\<lbrakk>L \<in> A; \<forall>y \<in> S. (y, L) \<in> r; (\<forall>z \<in> A. (\<forall>y \<in> S. (y, z)\<in>r) \<longrightarrow> (L, z) \<in> r)\<rbrakk> \<Longrightarrow> isLub S cl L"
by (simp add: isLub_def A_def r_def)
end
subsection \<open>glb\<close>
context CL
begin
lemma glb_in_lattice: "S \<subseteq> A \<Longrightarrow> glb S cl \<in> A"
by (metis A_def CL.lub_in_lattice dualA_iff glb_dual_lub local.dual)
lemma glb_lower: "\<lbrakk>S \<subseteq> A; x \<in> S\<rbrakk> \<Longrightarrow> (glb S cl, x) \<in> r"
by (metis A_def CL.lub_upper dualA_iff dualr_iff glb_dual_lub local.dual r_def)
end
text \<open>
Reduce the sublattice property by using substructural properties;
abandoned see \<open>Tarski_4.ML\<close>.
\<close>
context CLF
begin
declare f_cl [simp]
lemma f_in_funcset: "f \<in> A \<rightarrow> A"
by (simp add: A_def)
lemma monotone_f: "monotone f A r"
by (simp add: A_def r_def)
lemma CLF_dual: "(dual cl, f) \<in> CLF_set"
proof -
have "Tarski.monotone f A (order (dual cl))"
by (metis (no_types) A_def PO.monotone_dual PO_axioms dualA_iff monotone_f r_def)
then show ?thesis
by (simp add: A_def CLF_set_def CL_dualCL)
qed
lemma dual: "CLF (dual cl) f"
by (rule CLF.intro) (rule CLF_dual)
end
subsection \<open>fixed points\<close>
lemma fix_subset: "fix f A \<subseteq> A"
by (auto simp: fix_def)
lemma fix_imp_eq: "x \<in> fix f A \<Longrightarrow> f x = x"
by (simp add: fix_def)
lemma fixf_subset: "\<lbrakk>A \<subseteq> B; x \<in> fix (\<lambda>y \<in> A. f y) A\<rbrakk> \<Longrightarrow> x \<in> fix f B"
by (auto simp: fix_def)
subsection \<open>lemmas for Tarski, lub\<close>
context CLF
begin
lemma lubH_le_flubH:
assumes "H = {x \<in> A. (x, f x) \<in> r}"
shows "(lub H cl, f (lub H cl)) \<in> r"
proof (intro lub_least ballI)
show "H \<subseteq> A"
using assms
by auto
show "f (lub H cl) \<in> A"
using \<open>H \<subseteq> A\<close> f_in_funcset lub_in_lattice by auto
show "(x, f (lub H cl)) \<in> r" if "x \<in> H" for x
proof -
have "(f x, f (lub H cl)) \<in> r"
by (meson \<open>H \<subseteq> A\<close> in_mono lub_in_lattice lub_upper monotoneE monotone_f that)
moreover have "(x, f x) \<in> r"
using assms that by blast
ultimately show ?thesis
using po.transE by blast
qed
qed
lemma lubH_is_fixp:
assumes "H = {x \<in> A. (x, f x) \<in> r}"
shows "lub H cl \<in> fix f A"
proof -
have "(f (lub H cl), lub H cl) \<in> r"
proof -
have "(lub H cl, f (lub H cl)) \<in> r"
using assms lubH_le_flubH by blast
then have "(f (lub H cl), f (f (lub H cl))) \<in> r"
by (meson PO_imp_refl_on monotoneE monotone_f refl_on_domain)
then have "f (lub H cl) \<in> H"
by (metis (no_types, lifting) PO_imp_refl_on assms mem_Collect_eq refl_on_domain)
then show ?thesis
by (simp add: assms lub_upper)
qed
with assms show ?thesis
by (simp add: fix_def antisymE lubH_le_flubH lub_in_lattice)
qed
lemma fixf_le_lubH:
assumes "H = {x \<in> A. (x, f x) \<in> r}" "x \<in> fix f A"
shows "(x, lub H cl) \<in> r"
proof -
have "x \<in> P \<Longrightarrow> x \<in> H"
by (simp add: assms P_def fix_imp_eq [of _ f A] reflE fix_subset [of f A, THEN subsetD])
with assms show ?thesis
by (metis (no_types, lifting) P_def lub_upper mem_Collect_eq subset_eq)
qed
subsection \<open>Tarski fixpoint theorem 1, first part\<close>
lemma T_thm_1_lub: "lub P cl = lub {x \<in> A. (x, f x) \<in> r} cl"
proof -
have "lub {x \<in> A. (x, f x) \<in> r} cl = lub (fix f A) cl"
proof (rule antisymE)
show "(lub {x \<in> A. (x, f x) \<in> r} cl, lub (fix f A) cl) \<in> r"
by (simp add: fix_subset lubH_is_fixp lub_upper)
have "\<And>a. a \<in> fix f A \<Longrightarrow> a \<in> A"
by (meson fix_subset subset_iff)
then show "(lub (fix f A) cl, lub {x \<in> A. (x, f x) \<in> r} cl) \<in> r"
by (simp add: fix_subset fixf_le_lubH lubH_is_fixp lub_least)
qed
then show ?thesis
using P_def by auto
qed
lemma glbH_is_fixp:
assumes "H = {x \<in> A. (f x, x) \<in> r}" shows "glb H cl \<in> P"
\<comment> \<open>Tarski for glb\<close>
proof -
have "glb H cl \<in> fix f (pset (dual cl))"
using assms CLF.lubH_is_fixp [OF dual] PO.dualr_iff PO_axioms
by (fastforce simp add: A_def r_def glb_dual_lub)
then show ?thesis
by (simp add: A_def P_def)
qed
lemma T_thm_1_glb: "glb P cl = glb {x \<in> A. (f x, x) \<in> r} cl"
unfolding glb_dual_lub P_def A_def r_def
using CLF.T_thm_1_lub dualA_iff dualr_iff local.dual by force
subsection \<open>interval\<close>
lemma rel_imp_elem: "(x, y) \<in> r \<Longrightarrow> x \<in> A"
using CO_refl_on by (auto simp: refl_on_def)
lemma interval_subset: "\<lbrakk>a \<in> A; b \<in> A\<rbrakk> \<Longrightarrow> interval r a b \<subseteq> A"
by (simp add: interval_def) (blast intro: rel_imp_elem)
lemma intervalI: "\<lbrakk>(a, x) \<in> r; (x, b) \<in> r\<rbrakk> \<Longrightarrow> x \<in> interval r a b"
by (simp add: interval_def)
lemma interval_lemma1: "\<lbrakk>S \<subseteq> interval r a b; x \<in> S\<rbrakk> \<Longrightarrow> (a, x) \<in> r"
unfolding interval_def by fast
lemma interval_lemma2: "\<lbrakk>S \<subseteq> interval r a b; x \<in> S\<rbrakk> \<Longrightarrow> (x, b) \<in> r"
unfolding interval_def by fast
lemma a_less_lub: "\<lbrakk>S \<subseteq> A; S \<noteq> {}; \<forall>x \<in> S. (a,x) \<in> r; \<forall>y \<in> S. (y, L) \<in> r\<rbrakk> \<Longrightarrow> (a, L) \<in> r"
by (blast intro: transE)
lemma S_intv_cl: "\<lbrakk>a \<in> A; b \<in> A; S \<subseteq> interval r a b\<rbrakk> \<Longrightarrow> S \<subseteq> A"
by (simp add: subset_trans [OF _ interval_subset])
lemma L_in_interval:
assumes "b \<in> A" and S: "S \<subseteq> interval r a b" "isLub S cl L" "S \<noteq> {}"
shows "L \<in> interval r a b"
proof (rule intervalI)
show "(a, L) \<in> r"
by (meson PO_imp_trans all_not_in_conv S interval_lemma1 isLub_upper transD)
show "(L, b) \<in> r"
using \<open>b \<in> A\<close> assms interval_lemma2 isLub_least by auto
qed
lemma G_in_interval:
assumes "b \<in> A" and S: "S \<subseteq> interval r a b" "isGlb S cl G" "S \<noteq> {}"
shows "G \<in> interval r a b"
proof -
have "a \<in> A"
using S(1) \<open>S \<noteq> {}\<close> interval_lemma1 rel_imp_elem by blast
with assms show ?thesis
by (metis (no_types) A_def CLF.L_in_interval dualA_iff interval_dual isGlb_dual_isLub local.dual)
qed
lemma intervalPO:
"\<lbrakk>a \<in> A; b \<in> A; interval r a b \<noteq> {}\<rbrakk>
\<Longrightarrow> \<lparr>pset = interval r a b, order = induced (interval r a b) r\<rparr> \<in> PartialOrder"
by (rule po_subset_po) (simp add: interval_subset)
lemma intv_CL_lub:
assumes "a \<in> A" "b \<in> A" "interval r a b \<noteq> {}" and S: "S \<subseteq> interval r a b"
shows "\<exists>L. isLub S \<lparr>pset = interval r a b, order = induced (interval r a b) r\<rparr> L"
proof -
obtain L where L: "isLub S cl L"
by (meson CL_imp_ex_isLub S_intv_cl assms(1) assms(2) assms(4))
show ?thesis
unfolding isLub_def potype.simps
proof (intro exI impI conjI ballI)
let ?L = "(if S = {} then a else L)"
show Lin: "?L \<in> interval r a b"
using L L_in_interval assms left_in_interval by auto
show "(y, ?L) \<in> induced (interval r a b) r" if "y \<in> S" for y
proof -
have "S \<noteq> {}"
using that by blast
then show ?thesis
using L Lin S indI isLub_upper that by auto
qed
show "(?L, z) \<in> induced (interval r a b) r"
if "z \<in> interval r a b" and "\<forall>y\<in>S. (y, z) \<in> induced (interval r a b) r" for z
using that L
apply (simp add: isLub_def induced_def interval_imp_mem)
by (metis (full_types) A_def Lin \<open>a \<in> A\<close> \<open>b \<in> A\<close> interval_subset r_def subset_eq)
qed
qed
lemmas intv_CL_glb = intv_CL_lub [THEN Rdual]
lemma interval_is_sublattice: "\<lbrakk>a \<in> A; b \<in> A; interval r a b \<noteq> {}\<rbrakk> \<Longrightarrow> interval r a b <<= cl"
apply (rule sublatticeI)
apply (simp add: interval_subset)
by (simp add: CompleteLatticeI intervalPO intv_CL_glb intv_CL_lub)
lemmas interv_is_compl_latt = interval_is_sublattice [THEN sublattice_imp_CL]
subsection \<open>Top and Bottom\<close>
lemma Top_dual_Bot: "Top cl = Bot (dual cl)"
by (simp add: Top_def Bot_def least_def greatest_def)
lemma Bot_dual_Top: "Bot cl = Top (dual cl)"
by (simp add: Top_def Bot_def least_def greatest_def)
lemma Bot_in_lattice: "Bot cl \<in> A"
unfolding Bot_def least_def
apply (rule_tac a = "glb A cl" in someI2)
using glb_in_lattice glb_lower by (auto simp: A_def r_def)
lemma Top_in_lattice: "Top cl \<in> A"
using A_def CLF.Bot_in_lattice Top_dual_Bot local.dual by force
lemma Top_prop: "x \<in> A \<Longrightarrow> (x, Top cl) \<in> r"
unfolding Top_def greatest_def
apply (rule_tac a = "lub A cl" in someI2)
using lub_in_lattice lub_upper by (auto simp: A_def r_def)
lemma Bot_prop: "x \<in> A \<Longrightarrow> (Bot cl, x) \<in> r"
using A_def Bot_dual_Top CLF.Top_prop dualA_iff dualr_iff local.dual r_def by fastforce
lemma Top_intv_not_empty: "x \<in> A \<Longrightarrow> interval r x (Top cl) \<noteq> {}"
using Top_prop intervalI reflE by force
lemma Bot_intv_not_empty: "x \<in> A \<Longrightarrow> interval r (Bot cl) x \<noteq> {}"
using Bot_dual_Top Bot_prop intervalI reflE by fastforce
text \<open>the set of fixed points form a partial order\<close>
proposition fixf_po: "\<lparr>pset = P, order = induced P r\<rparr> \<in> PartialOrder"
by (simp add: P_def fix_subset po_subset_po)
end
context Tarski
begin
lemma Y_subset_A: "Y \<subseteq> A"
by (rule subset_trans [OF _ fix_subset]) (rule Y_ss [simplified P_def])
lemma lubY_in_A: "lub Y cl \<in> A"
by (rule Y_subset_A [THEN lub_in_lattice])
lemma lubY_le_flubY: "(lub Y cl, f (lub Y cl)) \<in> r"
proof (intro lub_least Y_subset_A ballI)
show "f (lub Y cl) \<in> A"
by (meson Tarski.monotone_def lubY_in_A monotone_f reflE rel_imp_elem)
show "(x, f (lub Y cl)) \<in> r" if "x \<in> Y" for x
proof
have "\<And>A. Y \<subseteq> A \<Longrightarrow> x \<in> A"
using that by blast
moreover have "(x, lub Y cl) \<in> r"
using that by (simp add: Y_subset_A lub_upper)
ultimately show "(x, f (lub Y cl)) \<in> r"
by (metis (no_types) Tarski.Y_ss Tarski_axioms Y_subset_A fix_imp_eq lubY_in_A monotoneE monotone_f)
qed auto
qed
lemma intY1_subset: "intY1 \<subseteq> A"
unfolding intY1_def using Top_in_lattice interval_subset lubY_in_A by auto
lemmas intY1_elem = intY1_subset [THEN subsetD]
lemma intY1_f_closed:
assumes "x \<in> intY1" shows "f x \<in> intY1"
proof (simp add: intY1_def interval_def, rule conjI)
show "(lub Y cl, f x) \<in> r"
using assms intY1_elem interval_imp_mem lubY_in_A unfolding intY1_def
using lubY_le_flubY monotoneE monotone_f po.transE by blast
then show "(f x, Top cl) \<in> r"
by (meson PO_imp_refl_on Top_prop refl_onD2)
qed
lemma intY1_mono: "monotone (\<lambda> x \<in> intY1. f x) intY1 (induced intY1 r)"
apply (auto simp add: monotone_def induced_def intY1_f_closed)
apply (blast intro: intY1_elem monotone_f [THEN monotoneE])
done
lemma intY1_is_cl: "\<lparr>pset = intY1, order = induced intY1 r\<rparr> \<in> CompleteLattice"
unfolding intY1_def
by (simp add: Top_in_lattice Top_intv_not_empty interv_is_compl_latt lubY_in_A)
lemma v_in_P: "v \<in> P"
proof -
have "v \<in> fix (restrict f intY1) intY1"
unfolding v_def
apply (rule CLF.glbH_is_fixp
[OF CLF.intro, unfolded CLF_set_def, of "\<lparr>pset = intY1, order = induced intY1 r\<rparr>", simplified])
using intY1_f_closed intY1_is_cl intY1_mono apply blast+
done
then show ?thesis
unfolding P_def
by (meson fixf_subset intY1_subset)
qed
lemma z_in_interval: "\<lbrakk>z \<in> P; \<forall>y\<in>Y. (y, z) \<in> induced P r\<rbrakk> \<Longrightarrow> z \<in> intY1"
unfolding intY1_def P_def
by (meson Top_prop Y_subset_A fix_subset in_mono indE intervalI lub_least)
lemma tarski_full_lemma: "\<exists>L. isLub Y \<lparr>pset = P, order = induced P r\<rparr> L"
proof
have "(y, v) \<in> induced P r" if "y \<in> Y" for y
proof -
have "(y, lub Y cl) \<in> r"
by (simp add: Y_subset_A lub_upper that)
moreover have "(lub Y cl, v) \<in> r"
by (metis (no_types, lifting) CL.glb_in_lattice CL.intro intY1_def intY1_is_cl interval_imp_mem lub_dual_glb mem_Collect_eq select_convs(1) subsetI v_def)
ultimately have "(y, v) \<in> r"
using po.transE by blast
then show ?thesis
using Y_ss indI that v_in_P by auto
qed
moreover have "(v, z) \<in> induced P r" if "z \<in> P" "\<forall>y\<in>Y. (y, z) \<in> induced P r" for z
proof (rule indI)
have "((\<lambda>x \<in> intY1. f x) z, z) \<in> induced intY1 r"
by (metis P_def fix_imp_eq in_mono indI intY1_subset reflE restrict_apply' that z_in_interval)
then show "(v, z) \<in> r"
by (metis (no_types, lifting) CL.glb_lower CL_def indE intY1_is_cl intY1_subset mem_Collect_eq select_convs(1,2) subsetI that v_def z_in_interval)
qed (auto simp: that v_in_P)
ultimately
show "isLub Y \<lparr>pset = P, order = induced P r\<rparr> v"
by (simp add: isLub_def v_in_P)
qed
end
lemma CompleteLatticeI_simp:
"\<lbrakk>po \<in> PartialOrder; \<And>S. S \<subseteq> A \<Longrightarrow> \<exists>L. isLub S po L; A = pset po\<rbrakk> \<Longrightarrow> po \<in> CompleteLattice"
by (metis CompleteLatticeI Rdual)
theorem (in CLF) Tarski_full: "\<lparr>pset = P, order = induced P r\<rparr> \<in> CompleteLattice"
proof (intro CompleteLatticeI_simp allI impI)
show "\<lparr>pset = P, order = induced P r\<rparr> \<in> PartialOrder"
by (simp add: fixf_po)
show "\<And>S. S \<subseteq> P \<Longrightarrow> \<exists>L. isLub S \<lparr>pset = P, order = induced P r\<rparr> L"
unfolding P_def A_def r_def
proof (rule Tarski.tarski_full_lemma [OF Tarski.intro [OF _ Tarski_axioms.intro]])
show "CLF cl f" ..
qed
qed auto
end
|
Formal statement is: lemma LIM_equal2: fixes f g :: "'a::real_normed_vector \<Rightarrow> 'b::topological_space" assumes "0 < R" and "\<And>x. x \<noteq> a \<Longrightarrow> norm (x - a) < R \<Longrightarrow> f x = g x" shows "g \<midarrow>a\<rightarrow> l \<Longrightarrow> f \<midarrow>a\<rightarrow> l" Informal statement is: Suppose $f$ and $g$ are functions from $\mathbb{R}$ to a topological space $X$, and $f$ converges to $l$ at $a$. If $g$ and $f$ agree on a neighborhood of $a$, then $g$ converges to $l$ at $a$. |
(** Cut Elimination for Focused Linear Logic
This file proves cut-elimination for the triadic system of linear
logic. The proof uses 5 cut-rules dealing with the negative and
positive phase of proofs (see [CutElimBase]).
It is assumed that the theory only produces well formed LL formulas
(see [TheoryIsFormula]).
*)
Require Export MMLL.Misc.Hybrid.
Require Export MMLL.SL.FLLTactics.
Require Import Lia.
Require Import MMLL.Misc.Permutations.
Require Import FunInd.
Require Import Coq.Program.Equality.
Require Export MMLL.SL.InvPositivePhase.
Export ListNotations.
Export LLNotations.
Set Implicit Arguments.
Section CutElimination.
Context `{SI : Signature}.
Context `{OLS: OLSig}.
Hint Constructors isFormula Remove seqN IsPositiveAtom : core .
Variable theory : oo -> Prop .
Notation " n '|---' B ';' L ';' X " := (seqN theory n B L X) (at level 80).
Notation " '|--' B ';' L ';' X " := (seq theory B L X) (at level 80).
Ltac simplSet :=
repeat
match goal with
| [H1: SetK ?i ?K, H2: SetU ?K |- _] =>
rewrite cxtDestruct in H1; rewrite (SetU_then_empty H2) in H1
| [H1: SetK4 ?i ?K, H2: SetU ?K |- _] =>
rewrite cxtDestruct in H1; rewrite (SetU_then_empty H2) in H1
end;CleanContext.
(** |-- B; []; (> [perp A]) *)
Lemma substContext n BD M1 M2 M X :
Permutation (getU BD) M1 ->
Permutation (getL BD) M2 ->
n |--- BD; M; X -> n |--- M1++M2; M; X.
Proof.
intros.
rewrite cxtDestruct in H1.
rewrite H in H1.
rewrite H0 in H1.
assumption.
Qed.
Lemma substContext' BD M1 M2 M X :
Permutation (getU BD) M1 ->
Permutation (getL BD) M2 ->
|-- M1++M2; M; X -> |-- BD ; M; X.
Proof.
intros.
rewrite cxtDestruct.
rewrite H.
rewrite H0.
assumption.
Qed.
Lemma substContext1 n B M X C:
n |--- B++C; M; X -> n |--- (getU B ++ getL B) ++ C ; M; X.
Proof.
intros.
rewrite <- cxtDestruct;auto.
Qed.
Lemma substContext1' B M X C:
|-- B++C; M; X -> |-- (getU B ++ getL B) ++ C ; M; X.
Proof.
intros.
rewrite <- cxtDestruct;auto.
Qed.
Ltac substCon :=
repeat rewrite app_assoc_reverse;
match goal with
| [ |- context[getU ?B ++ ?C ++ getL ?B] ] => LLPerm ((getU B ++ getL B) ++ C)
| [ |- context[getU ?B ++ ?C1 ++ getL ?B ++ ?C2] ] => LLPerm ((getU B ++ getL B) ++ C1 ++ C2)
| [ |- context[?C1 ++ getU ?B ++ ?C2 ++ getL ?B] ] => LLPerm ((getU B ++ getL B) ++ C1 ++ C2)
| [ |- context[?C ++ getU ?B ++ getL ?B] ] => LLPerm ((getU B ++ getL B) ++ C)
| [ |- context[?C1 ++ getU ?B ++ getL ?B ++ ?C2] ] => LLPerm ((getU B ++ getL B) ++ C1 ++ C2)
| [ |- context[?C1 ++ getU ?B ++ ?C2 ++ getL ?B ++ ?C3] ] => LLPerm ((getU B ++ getL B) ++ C1 ++ C2 ++ C3)
| [ |- context[?C1 ++ (getU ?B ++ getL ?B) ++ ?C2] ] => LLPerm ((getU B ++ getL B) ++ C1 ++ C2)
| [ |- context[getL ?B ++ ?C ++ getU ?B] ] => LLPerm ((getU B ++ getL B) ++ C)
| [ |- context[getL ?B ++ ?C1 ++ getU ?B ++ ?C2] ] => LLPerm ((getU B ++ getL B) ++ C1 ++ C2)
| [ |- context[?C1 ++ getL ?B ++ ?C2 ++ getU ?B] ] => LLPerm ((getU B ++ getL B) ++ C1 ++ C2)
| [ |- context[?C ++ getL ?B ++ getU ?B] ] => LLPerm ((getU B ++ getL B) ++ C)
| [ |- context[?C1 ++ getL ?B ++ getU ?B ++ ?C2] ] => LLPerm ((getU B ++ getL B) ++ C1 ++ C2)
| [ |- context[?C1 ++ getL ?B ++ ?C2 ++ getU ?B ++ ?C3] ] => LLPerm ((getU B ++ getL B) ++ C1 ++ C2 ++ C3)
| [ |- context[?C1 ++ (getL ?B ++ getU ?B) ++ ?C2] ] => LLPerm ((getU B ++ getL B) ++ C1 ++ C2)
end;try apply substContext1;try apply substContext1'.
Ltac InSet :=
intuition; try
match goal with
| [H: In ?F ?X |- In ?F ?Y] =>
apply in_or_app;right;InSet
end.
Lemma CutK4BaseInit n C P a : a <> loc -> SetK4 a C -> n >= length C + 1 ->
n - length C - 1 |--- PlusT C; []; (> [P]) ->
S n |--- PlusT C; []; (>> (plust a) ! P).
Proof with sauto.
intros.
createWorld.
apply plust_loc_diff...
eapply @GenK4Rel with (C4:=PlusT C) (CK:=[]) (CN:=[])...
apply plust_loc_diff...
apply SetK4PlusT...
autounfold.
rewrite map_length...
CleanContext...
autounfold.
rewrite map_length...
Qed.
Hypothesis TheoryIsFormula: forall P, theory P -> isFormula P.
Theorem CutElimBase a C dualC A dualA B D BD L L1 L2 L3 S1 S2 M N P:
dualC = dual C ->
Permutation (getU BD) (getU B) ->
Permutation (getU BD) (getU D) ->
Permutation (getL BD) (getL B ++ getL D) ->
(L = L1++L2 -> 0 |--- B; M ++ [C]; (> L1) -> 0 |--- D; N; (> dualC::L2) -> |-- BD; M ++ N; (> L)) /\
(0 |--- B; M; (> C :: L) -> 0 |--- D; N; (>> dualC) -> |-- BD; M ++ N; (> L)) /\
(L = (S1++S2)++L3 -> 0 |--- B; M; (> S1++[C]++S2) -> 0 |--- D; N; (> dual C::L3) -> |-- BD; M ++ N; (> L) ) /\
(dualA = A ^ ->
dualC = a ! dualA -> L = [P] ->
0 |--- B ++ [(a,A)] ; M; (>> P) -> 0 |--- D; []; (>> a ! dualA) -> |-- BD; M; (> [P])) /\
(dualA = A ^ ->
dualC = a ! dualA ->
0 |--- B ++ [(a,A)] ; M; (> L) -> 0 |--- D; []; (>> a ! dualA) -> |-- BD; M; (> L)).
Proof with sauto;solveLL.
intros CDual.
split;[intros
|split;[intros
|split;[intros
|split;intros]]].
* inversion H3...
rewrite app_normalize_2...
* inversion H2...
inversion H3.
* inversion H3...
apply ListConsApp' in H9...
inversion H4...
do 2 rewrite app_normalize_2...
* inversion H6...
* inversion H5...
Qed.
Definition CutW (w: nat) :=
forall a m i j C dualC A dualA P M N L L1 L2 L3 S1 S2 BD B D,
m <= w ->
isFormulaL (second BD) ->
isFormulaL M ->
isFormulaL N ->
isFormulaL L ->
isFormula C ->
isFormula dualC ->
dualC = C ^ ->
complexity C = m ->
Permutation (getU BD) (getU B) ->
Permutation (getU BD) (getU D) ->
Permutation (getL BD) (getL B ++ getL D) ->
(L = L1++L2 -> i |--- B; M ++ [C]; (> L1) -> j |--- D; N; (> dualC::L2) -> |-- BD; M ++ N; (> L)) /\
(i |--- B; M; (> C :: L) -> j |--- D; N; (>> dualC) -> |-- BD; M ++ N; (> L)) /\
(L = (S1++S2)++L3 -> i |--- B; M; (> S1++[C]++S2) -> j |--- D; N; (> dual C::L3) -> |-- BD; M ++ N; (> L) ) /\
(dualA = A ^ ->
dualC = a ! dualA -> L = [P] ->
i |--- B ++ [(a,A)] ; M; (>> P) -> j |--- D; []; (>> a ! dualA) -> |-- BD; M; (> [P])) /\
(dualA = A ^ ->
dualC = a ! dualA ->
i |--- B ++ [(a,A)] ; M; (> L) -> j |--- D; []; (>> a ! dualA) -> |-- BD; M; (> L)).
Definition CutH (w h: nat) :=
forall a m i j C dualC A dualA P M N L L1 L2 L3 S1 S2 BD B D,
m <= h ->
m = i + j ->
isFormulaL (second BD) ->
isFormulaL M ->
isFormulaL N ->
isFormulaL L ->
isFormula C ->
isFormula dualC ->
dualC = C ^ ->
complexity C = S w ->
Permutation (getU BD) (getU B) ->
Permutation (getU BD) (getU D) ->
Permutation (getL BD) (getL B ++ getL D) ->
(L = L1++L2 -> i |--- B; M ++ [C]; (> L1) -> j |--- D; N; (> dualC::L2) -> |-- BD; M ++ N; (> L)) /\
(i |--- B; M; (> C :: L) -> j |--- D; N; (>> dualC) -> |-- BD; M ++ N; (> L)) /\
(L = (S1++S2)++L3 -> i |--- B; M; (> S1++[C]++S2) -> j |--- D; N; (> dual C::L3) -> |-- BD; M ++ N; (> L) ) /\
(dualA = A ^ ->
dualC = a ! dualA -> L = [P] ->
i |--- B ++ [(a,A)] ; M; (>> P) -> j |--- D; []; (>> a ! dualA) -> |-- BD; M; (> [P])) /\
(dualA = A ^ ->
dualC = a ! dualA ->
i |--- B ++ [(a,A)] ; M; (> L) -> j |--- D; []; (>> a ! dualA) -> |-- BD; M; (> L)).
Theorem CutUPLStar i j w h C L L1 L2 M N BD B D : CutH w h -> complexity C = S w -> S h = i + j ->
isFormulaL (second BD) -> isFormulaL M -> isFormulaL N -> isFormulaL L -> isFormula C -> isFormula (C^) -> L = L1 ++ L2 ->
Permutation (getU BD) (getU B) ->
Permutation (getU BD) (getU D) ->
Permutation (getL BD) (getL B ++ getL D) ->
i |--- B; M ++ [C]; (> L1) ->
j |--- D; N; (> C ^ :: L2) ->
|-- BD; M ++ N; (> L).
Proof with sauto;solveLL.
intros CH compC hH isFBD isFM isFN isFL isFC isFDC eqH HP1 HP2 HP3 Hi Hj.
subst.
assert(isFormulaL L1 /\ isFormulaL L2).
split;SLSolve.
assert(isFormulaL (second D)) by SLSolve.
assert(isFormulaL (second B)) by SLSolve.
CleanContext.
clear isFL.
rename H0 into isFD.
rename H1 into isFB.
rename H2 into isFL1.
rename H3 into isFL2.
inversion Hi...
* rewrite <- app_comm_cons...
* rewrite <- app_comm_cons...
assert(n |--- B; M ++ [C]; (> M0) ->
j |--- D; N; (> (dual C)::L2) ->
|-- BD; M ++ N; (> M0++L2)) as Cut.
eapply CH...
SLSolve.
inversion isFL1...
apply Cut...
* rewrite <- app_comm_cons...
assert(n |--- B; M ++ [C]; (> F :: G :: M0) ->
j |--- D; N; (> (dual C)::L2) ->
|-- BD; M ++ N; (> F :: G :: M0++L2)) as Cut.
eapply CH...
inversion isFL1...
SLSolve.
apply Cut...
* rewrite <- app_comm_cons...
assert(n |--- B; M ++ [C]; (> F :: M0) ->
j |--- D; N; (> (dual C)::L2) ->
|-- BD; M ++ N; (> F :: M0++L2)) as CutF.
eapply CH...
inversion isFL1...
SLSolve.
apply CutF...
assert(n |--- B; M ++ [C]; (> G :: M0) ->
j |--- D; N; (> (dual C)::L2) ->
|-- BD; M ++ N; (> G :: M0++L2)) as CutG.
eapply CH...
inversion isFL1...
SLSolve.
apply CutG...
* rewrite <- app_comm_cons...
destruct (uDec i0).
- assert(n |--- B ++ [(i0, F)]; M ++ [C]; (> M0) ->
j |--- D ++ [(i0, F)]; N; (> (dual C)::L2) ->
|-- BD ++ [(i0, F)]; M ++ N; (> M0++L2)) as Cut.
eapply CH...
inversion isFL1...
SLSolve.
inversion H1...
SLSolve.
inversion isFL1...
CleanContext.
rewrite HP1...
CleanContext.
rewrite HP2...
CleanContext.
rewrite Permutation_cons_append.
apply Cut...
LLExact H3.
apply weakeningGenN_rev...
- assert(n |--- B ++ [(i0, F)]; M ++ [C]; (> M0) ->
j |--- D ; N; (> (dual C)::L2) ->
|-- BD ++ [(i0, F)]; M ++ N; (> M0++L2)) as Cut.
eapply CH...
inversion isFL1...
SLSolve.
inversion H1...
SLSolve.
inversion isFL1...
SLSolve.
CleanContext.
SLSolve.
CleanContext.
SLSolve.
CleanContext.
rewrite HP3...
rewrite Permutation_cons_append.
apply Cut...
LLExact H3.
* rewrite <- app_comm_cons...
assert(n |--- B; (M ++ [F]) ++ [C]; (> M0) ->
j |--- D; N; (> (dual C)::L2) ->
|-- BD; (M ++ [F]) ++ N; (> M0++L2)) as Cut.
eapply CH...
inversion isFL1...
SLSolve.
inversion isFL1...
SLSolve.
LLPerm((M ++ [F]) ++ N).
apply Cut...
LLExact H4.
* apply Remove_Permute in H1...
checkPermutationCases H1.
2:{ inversion H1...
rewrite H2.
assert(j |--- D; N; (> (dual C)::L2) ->
n |--- B; L'; (>> C) ->
|-- BD; N++L'; (> L2)) as Cut.
eapply CH with (m:=n + j) ...
lia.
SLSolve.
rewrite <- ng_involutive...
rewrite DualComplexity in compC...
rewrite HP3...
LLPerm(N ++ L')... }
destruct(PositiveOrRelease F).
2:{ inversion H5;CleanContext...
rewrite H1.
rewrite <- app_comm_cons.
LLPerm((x++N)++[F]).
eapply UpExtensionInv'...
eapply EquivUpArrow2 with (L:=[F] ++ L2)...
SLSolve. rewrite H1 in isFM. inversion isFM...
assert(S n0 |--- B; x++ [C]; (> [F]) ->
j |--- D; N; (> (dual C)::L2) ->
|-- BD; x ++ N; (> [F]++L2)) as Cut.
eapply CH with (m:=S n0 + j)...
rewrite H1 in isFM. inversion isFM...
SLSolve.
rewrite H1 in isFM. inversion isFM...
apply Cut...
rewrite <- H2 in H9.
LLExact H9. }
inversion H5...
{
rewrite (simplUnb _ D HP1 HP2 HP3 H8).
inversion Hj...
apply seqNtoSeq in H9... }
{
rewrite H4 in H2.
checkPermutationCases H2.
-
destruct(PositiveOrRelease F0).
{ (* first *)
assert(S n0 |--- B0; (F0::x0)++[C]; (> [])).
decide1 F0...
inversion H2...
rewrite <- H3...
rewrite H1.
rewrite <- app_comm_cons.
rewrite Permutation_cons_append.
apply TensorComm'.
rewrite <- H10.
LLPerm(G**F0::N0++(x0++N)).
change L2 with ([]++L2).
eapply @InvTensor with (B:=D0) (D:=getU D ++ getL D ++ getL B0)...
CleanContext.
CleanContext.
rewrite HP3...
rewrite H8...
apply Derivation1.
apply seqNtoSeq in H13...
assert(isFormulaL (second B)) by SLSolve.
assert(isFormulaL (second B0)) by SLSolve.
assert(S n0 |--- B0; (F0::x0) ++ [C]; (> [ ]) ->
j |--- D; N; (> (dual C)::L2) ->
|-- getU D ++ getL D ++ getL B0; (F0::x0) ++ N; (> [ ]++L2)) as Cut.
eapply CH...
SLSolve;SLSolve.
rewrite H1 in isFM.
inversion isFM...
SLSolve.
SLSolve.
CleanContext.
apply UpExtension'.
inversion H2...
LLPerm((F0 :: x0) ++ N)... }
{ (* second *)
inversion H9;CleanContext...
rewrite H1.
rewrite <- app_comm_cons.
rewrite Permutation_cons_append.
apply TensorComm'.
rewrite <- H10.
LLPerm(G**F0::N0++(x0++N)).
change L2 with ([]++L2).
eapply @InvTensor with (B:=D0) (D:=getU D ++ getL D ++ getL B0)...
CleanContext.
CleanContext.
rewrite HP3...
rewrite H8...
apply Derivation1.
apply seqNtoSeq in H13...
assert(isFormulaL (second B0)) by SLSolve.
assert(n |--- B0; x0 ++ [C]; (> [F0]) ->
j |--- D; N; (> (dual C)::L2) ->
|-- getU D ++ getL D ++ getL B0; x0 ++ N; (> [F0]++L2)) as Cut.
eapply CH with (m:=n + j)...
SLSolve;SLSolve.
rewrite H1 in isFM.
inversion isFM...
rewrite <- H10 in H15.
inversion H15...
SLSolve.
SLSolve.
rewrite H1 in isFM.
inversion isFM...
inversion H14...
CleanContext.
eapply EquivUpArrow2 with (L:=[F0] ++ L2)...
srewrite H1 in isFM.
inversion isFM...
SLSolve. inversion H14...
apply Cut...
rewrite <- H3... }
-
destruct(PositiveOrRelease G).
{ (* first *)
assert(S n0 |--- D0; (G::x0)++[C]; (> [])).
decide1 G...
inversion H2...
rewrite <- H3...
rewrite H1.
rewrite <- H10.
LLPerm(F0**G::M0++(x0++N)).
change L2 with ([]++L2).
eapply @InvTensor with (B:=B0) (D:=getU D ++ getL D ++ getL D0)...
CleanContext...
CleanContext...
rewrite HP3...
rewrite H8...
apply Derivation1.
apply seqNtoSeq in H9...
assert(S n0 |--- D0; (G::x0) ++ [C]; (> [ ]) ->
j |--- D; N; (> (dual C)::L2) ->
|-- getU D ++ getL D ++ getL D0; (G::x0) ++ N; (> [ ]++L2)) as Cut.
eapply CH...
assert(isFormulaL (second D0)) by SLSolve.
rewrite app_assoc.
SLSolve;SLSolve.
rewrite H1 in isFM.
inversion isFM...
SLSolve.
rewrite <- H10 in H16.
inversion H16...
SLSolve.
CleanContext.
apply UpExtension'.
inversion H2...
LLPerm((G :: x0) ++ N)... }
{ (* second *)
inversion H13;CleanContext...
rewrite H1.
rewrite <- H10.
LLPerm(F0**G::M0++(x0++N)).
change L2 with ([]++L2).
eapply @InvTensor with (B:=B0) (D:=getU D ++ getL D ++ getL D0)...
CleanContext.
CleanContext.
CleanContext.
rewrite HP3...
rewrite H8...
apply Derivation1.
apply seqNtoSeq in H9...
assert(n |--- D0; x0++ [C]; (> [G ]) ->
j |--- D; N; (> (dual C)::L2) ->
|-- getU D ++ getL D ++ getL D0; x0 ++ N; (> [G]++L2)) as Cut.
eapply CH with (m:=n + j)...
assert(isFormulaL (second D0)) by SLSolve.
rewrite app_assoc.
SLSolve;SLSolve.
rewrite H1 in isFM.
inversion isFM...
rewrite <- H10 in H14.
inversion H14...
SLSolve.
SLSolve.
rewrite H1 in isFM.
inversion isFM...
inversion H11...
CleanContext.
eapply EquivUpArrow2 with (L:=[G] ++ L2)...
rewrite H1 in isFM.
inversion isFM...
SLSolve. inversion H11...
apply Cut...
rewrite <- H3... }
}
- destruct(PositiveOrRelease F0).
{ assert ( (S n0) |--- B;(x ++ [F0]) ++ [C]; (> [])).
LLPerm (F0::(x++[C])).
decide1 F0. inversion H3...
rewrite H2...
rewrite H1.
rewrite <- app_comm_cons.
apply InvPlus.
assert((S n0) |--- B; (x ++ [F0]) ++ [C]; (> [ ]) ->
j |--- D; N; (> (dual C)::L2) ->
|-- BD; (x ++ [F0]) ++ N; (> [ ]++L2)) as Cut.
eapply CH with (m:=(S n0) + j)...
rewrite H1 in isFM.
inversion isFM...
SLSolve. inversion H9...
apply UpExtension'.
inversion H3...
LLPerm ( (x ++ [F0]) ++ N)... }
{ inversion H8;CleanContext...
rewrite H1.
rewrite <- app_comm_cons.
apply InvPlus.
assert(n |--- B; x ++ [C]; (> [F0]) ->
j |--- D; N; (> (dual C)::L2) ->
|-- BD; x ++ N; (> [F0]++L2)) as Cut.
eapply CH with (m:= n + j)...
rewrite H1 in isFM.
inversion isFM...
rewrite H1 in isFM.
inversion isFM...
SLSolve.
eapply EquivUpArrow2 with (L:=[F0] ++ L2)...
rewrite H1 in isFM.
inversion isFM...
SLSolve. inversion H4...
apply Cut...
rewrite H2...
}
- destruct(PositiveOrRelease G).
{ assert ( (S n0) |--- B;(x ++ [G]) ++ [C]; (> [])).
LLPerm (G::(x++[C])).
decide1 G. inversion H3...
rewrite H2...
rewrite H1.
rewrite <- app_comm_cons.
apply InvPlusComm.
assert((S n0) |--- B; (x ++ [G]) ++ [C]; (> [ ]) ->
j |--- D; N; (> (dual C)::L2) ->
|-- BD; (x ++ [G]) ++ N; (> [ ]++L2)) as Cut.
eapply CH with (m:=(S n0) + j)...
rewrite H1 in isFM.
inversion isFM...
SLSolve. inversion H9...
apply UpExtension'.
inversion H3...
LLPerm ( (x ++ [G]) ++ N)... }
{ inversion H8;CleanContext...
rewrite H1.
rewrite <- app_comm_cons.
apply InvPlusComm.
assert(n |--- B; x ++ [C]; (> [G]) ->
j |--- D; N; (> (dual C)::L2) ->
|-- BD; x ++ N; (> [G]++L2)) as Cut.
eapply CH with (m:= n + j)...
rewrite H1 in isFM.
inversion isFM...
rewrite H1 in isFM.
inversion isFM...
SLSolve.
eapply EquivUpArrow2 with (L:=[G] ++ L2)...
rewrite H1 in isFM.
inversion isFM...
SLSolve. inversion H4...
apply Cut...
rewrite H2...
}
- apply PositiveNotRelease in H. contradiction.
- destruct(PositiveOrRelease (FX t)).
{ assert ( (S n0) |--- B;(x ++ [FX t]) ++ [C]; (> [])).
LLPerm ((FX t)::(x++[C])).
decide1 (FX t). inversion H3...
rewrite H2...
rewrite H1.
rewrite <- app_comm_cons.
apply @InvEx with (t:=t)...
assert((S n0) |--- B; (x ++ [FX t]) ++ [C]; (> [ ]) ->
j |--- D; N; (> (dual C)::L2) ->
|-- BD; (x ++ [FX t]) ++ N; (> [ ]++L2)) as Cut.
eapply CH with (m:=(S n0) + j)...
rewrite H1 in isFM.
inversion isFM...
SLSolve.
inversion H11...
apply UpExtension'.
inversion H3...
LLPerm ( (x ++ [FX t]) ++ N)... }
{
inversion H10;subst;auto;
try match goal with
[ H1: _ = FX t, H2: release (FX t) |- _] => rewrite <- H1 in H2;inversion H2
end.
rewrite H1.
rewrite <- app_comm_cons.
apply @InvEx with (t:=t)...
assert(n |--- B; x ++ [C]; (> [FX t]) ->
j |--- D; N; (> (dual C)::L2) ->
|-- BD; x ++ N; (> [FX t]++L2)) as Cut.
eapply CH with (m:= n + j)...
rewrite H1 in isFM.
inversion isFM...
rewrite H1 in isFM.
inversion isFM...
SLSolve.
eapply EquivUpArrow2 with (L:=[FX t] ++ L2)...
rewrite H1 in isFM.
inversion isFM...
SLSolve. inversion H9...
apply Cut...
rewrite H2... }
* destruct(PositiveOrRelease F).
2:{ inversion H7;CleanContext...
apply @AbsorptionClassic' with (i:=i0) (F:=F)...
rewrite cxtDestruct.
rewrite HP1.
apply in_or_app. left.
apply uIngetU...
eapply EquivUpArrow2 with (L:=[F] ++ L2)...
SLSolve.
SLSolve.
assert(n0 |--- B; M ++ [C]; (> [F]) ->
j |--- D; N; (> (dual C)::L2) ->
|-- BD; M ++ N; (> [F]++L2)) as Cut.
eapply CH with (m:=n0 + j)...
SLSolve.
apply Cut... }
inversion H7...
- apply @AbsorptionClassic' with (i:=i0) (F:=perp A)...
rewrite cxtDestruct.
rewrite HP1.
apply in_or_app. left.
apply uIngetU...
rewrite cxtDestruct.
rewrite HP2. rewrite HP3.
rewrite (SetU_then_empty H9)...
rewrite <- cxtDestruct.
apply UpExtension'...
inversion Hj...
apply seqNtoSeq in H12...
LLExact H12.
- checkPermutationCases H5.
{ destruct(PositiveOrRelease F0).
{ (* first *)
rewrite <- H11.
assert (S n0 |--- B0; (F0 :: x) ++ [C]; (> [])).
LLPerm (F0 :: x ++ [C]).
decide1 F0. inversion H4...
rewrite <- H5...
LLPerm((x ++ N) ++ N0).
rewrite <- (app_nil_r L2).
eapply @InvTensorC with (F:=F0) (G:=G) (i:=i0) (B:=getU D ++ getL D ++ getL B0) (D:=D0)...
rewrite cxtDestruct.
rewrite HP1.
apply in_or_app. left.
apply uIngetU...
CleanContext...
CleanContext...
rewrite HP3...
rewrite H9...
apply UpExtension'.
inversion H4...
assert(S n0 |--- B0; (F0::x) ++ [C]; (> [ ]) ->
j |--- D; N; (> (dual C)::L2) ->
|-- getU D ++ getL D ++ getL B0; (F0::x) ++ N; (> [ ]++L2)) as Cut.
eapply CH...
assert(isFormulaL (second B0)) by SLSolve.
SLSolve;SLSolve.
rewrite <- H11 in isFM.
SLSolve.
SLSolve.
CleanContext...
LLPerm((F0 :: x) ++ N)...
apply Derivation1.
apply seqNtoSeq in H14... }
{ (* first *)
rewrite <- H11.
inversion H10;CleanContext...
LLPerm((x ++ N) ++ N0).
rewrite <- (app_nil_r L2).
eapply @InvTensorC with (F:=F0) (G:=G) (i:=i0) (B:=getU D ++ getL D ++ getL B0) (D:=D0)...
rewrite cxtDestruct.
rewrite HP1.
apply in_or_app. left.
apply uIngetU...
CleanContext...
CleanContext...
rewrite HP3...
rewrite H9...
assert(n |--- B0; x ++ [C]; (> [F0]) ->
j |--- D; N; (> (dual C)::L2) ->
|-- getU D ++ getL D ++ getL B0; x ++ N; (> [F0]++L2)) as Cut.
eapply CH with (m:=n+j)...
assert(isFormulaL (second B0)) by SLSolve.
SLSolve;SLSolve.
SLSolve.
rewrite <- H11 in isFM.
SLSolve.
SLSolve.
CleanContext...
CleanContext...
eapply EquivUpArrow2 with (L:=[F0] ++ L2)...
SLSolve;SLSolve.
apply Cut...
rewrite <- H5...
apply Derivation1.
apply seqNtoSeq in H14... } }
{ destruct(PositiveOrRelease G).
{ (* first *)
rewrite <- H11.
assert (S n0 |--- D0; (G :: x) ++ [C]; (> [])).
LLPerm (G :: x ++ [C]).
decide1 G. inversion H4...
rewrite <- H5...
LLPerm(M0++(x ++ N)).
rewrite <- (app_nil_l L2).
eapply @InvTensorC with (F:=F0) (G:=G) (i:=i0) (D:=getU D ++ getL D ++ getL D0) (B:=B0)...
rewrite cxtDestruct.
rewrite HP1.
apply in_or_app. left.
apply uIngetU...
CleanContext...
CleanContext...
rewrite HP3...
rewrite H9...
apply Derivation1.
apply seqNtoSeq in H10...
apply UpExtension'.
inversion H4...
assert(S n0 |--- D0; (G::x) ++ [C]; (> [ ]) ->
j |--- D; N; (> (dual C)::L2) ->
|-- getU D ++ getL D ++ getL D0; (G::x) ++ N; (> [ ]++L2)) as Cut.
eapply CH...
assert(isFormulaL (second D0)) by SLSolve.
SLSolve;SLSolve.
rewrite <- H11 in isFM.
SLSolve. SLSolve.
CleanContext...
LLPerm((G :: x) ++ N)... }
{ (* first *)
rewrite <- H11.
inversion H14;CleanContext...
LLPerm(M0++(x ++ N)).
rewrite <- (app_nil_l L2).
eapply @InvTensorC with (F:=F0) (G:=G) (i:=i0) (D:=getU D ++ getL D ++ getL D0) (B:=B0)...
rewrite cxtDestruct.
rewrite HP1.
apply in_or_app. left.
apply uIngetU...
CleanContext...
CleanContext...
rewrite HP3...
rewrite H9...
apply Derivation1.
apply seqNtoSeq in H10...
assert(n |--- D0; x ++ [C]; (> [G]) ->
j |--- D; N; (> (dual C)::L2) ->
|-- getU D ++ getL D ++ getL D0; x ++ N; (> [G]++L2)) as Cut.
eapply CH with (m:=n+j)...
assert(isFormulaL (second D0)) by SLSolve.
SLSolve;SLSolve.
rewrite <- H11 in isFM.
SLSolve.
SLSolve.
CleanContext...
eapply EquivUpArrow2 with (L:=[G] ++ L2)...
SLSolve. SLSolve.
apply Cut...
rewrite <- H5... } }
- destruct(PositiveOrRelease F0).
{ assert ( (S n0) |--- B;(M ++ [F0]) ++ [C]; (> [])).
LLPerm (F0::(M++[C])).
decide1 F0. inversion H4...
eapply @InvPlusC with (F:=F0) (G:=G) (i:=i0)...
rewrite cxtDestruct.
rewrite HP1.
apply in_or_app. left.
apply uIngetU...
apply UpExtension'.
inversion H4...
assert(S n0 |--- B; (M ++ [F0]) ++ [C]; (> [ ]) ->
j |--- D; N; (> (dual C)::L2) ->
|-- BD; (M++[F0]) ++ N; (> [ ]++L2)) as Cut.
eapply CH with (m:=S n0 + j)...
SLSolve.
SLSolve.
LLPerm( (M ++ [F0]) ++ N)... }
{ inversion H9;CleanContext...
eapply @InvPlusC with (F:=F0) (G:=G) (i:=i0)...
rewrite cxtDestruct.
rewrite HP1.
apply in_or_app. left.
apply uIngetU...
assert( n |--- B; M ++ [C]; (> [F0]) ->
j |--- D; N; (> (dual C)::L2) ->
|-- BD; M ++ N; (> [F0]++L2)) as Cut.
eapply CH with (m:=n + j)...
SLSolve. SLSolve.
eapply EquivUpArrow2 with (L:=[F0] ++ L2)...
SLSolve. SLSolve. }
- destruct(PositiveOrRelease G).
{ assert ( (S n0) |--- B;(M ++ [G]) ++ [C]; (> [])).
LLPerm (G::(M++[C])).
decide1 G. inversion H4...
eapply @InvPlusCComm with (F:=F0) (G:=G) (i:=i0)...
rewrite cxtDestruct.
rewrite HP1.
apply in_or_app. left.
apply uIngetU...
apply UpExtension'.
inversion H4...
assert(S n0 |--- B; (M ++ [G]) ++ [C]; (> [ ]) ->
j |--- D; N; (> (dual C)::L2) ->
|-- BD; (M++[G]) ++ N; (> [ ]++L2)) as Cut.
eapply CH with (m:=S n0 + j)...
SLSolve. SLSolve.
LLPerm( (M ++ [G]) ++ N)... }
{ inversion H9;CleanContext...
eapply @InvPlusCComm with (F:=F0) (G:=G) (i:=i0)...
rewrite cxtDestruct.
rewrite HP1.
apply in_or_app. left.
apply uIngetU...
assert( n |--- B; M ++ [C]; (> [G]) ->
j |--- D; N; (> (dual C)::L2) ->
|-- BD; M ++ N; (> [G]++L2)) as Cut.
eapply CH with (m:=n + j)...
SLSolve. SLSolve.
eapply EquivUpArrow2 with (L:=[G] ++ L2)...
SLSolve. SLSolve. }
- apply PositiveNotRelease in H. contradiction.
- destruct(PositiveOrRelease (FX t)).
{ assert ( (S n0) |--- B;(M ++ [FX t]) ++ [C]; (> [])).
LLPerm ((FX t)::(M++[C])).
decide1 (FX t). inversion H4...
eapply @InvExC with (i:=i0) (t:=t) (FX:=FX)...
rewrite cxtDestruct.
rewrite HP1.
apply in_or_app. left.
apply uIngetU...
apply UpExtension'.
inversion H4...
assert(S n0 |--- B; (M ++ [FX t]) ++ [C]; (> [ ]) ->
j |--- D; N; (> (dual C)::L2) ->
|-- BD; (M++[FX t]) ++ N; (> [ ]++L2)) as Cut.
eapply CH with (m:=S n0 + j)...
SLSolve. SLSolve.
LLPerm( (M ++ [FX t]) ++ N)... }
{ inversion H11;subst;auto;
try match goal with
[ H1: _ = FX t, H2: release (FX t) |- _] => rewrite <- H1 in H2;inversion H2
end.
eapply @InvExC with (i:=i0) (t:=t) (FX:=FX)...
rewrite cxtDestruct.
rewrite HP1.
apply in_or_app. left.
apply uIngetU...
assert( n |--- B; M ++ [C]; (> [FX t]) ->
j |--- D; N; (> (dual C)::L2) ->
|-- BD; M ++ N; (> [FX t]++L2)) as Cut.
eapply CH with (m:=n + j)...
SLSolve.
eapply EquivUpArrow2 with (L:=[FX t] ++ L2)...
SLSolve. SLSolve. }
* destruct(PositiveOrRelease F).
2:{ inversion H7;CleanContext...
apply Remove_Permute in H3...
rewrite H3 in *.
CleanContext.
rewrite cxtDestruct.
rewrite HP1.
rewrite HP3.
LLPerm((i0, F):: (getU B' ++ getL B') ++ getL D).
rewrite <- cxtDestruct.
eapply @AbsorptionLinear with (i:=i0) (F:=F) (B':=B'++getL D)...
eapply EquivUpArrow2 with (L:=[F] ++ L2)...
SLSolve.
SLSolve.
assert(n0 |--- B'; M ++ [C]; (> [F]) ->
j |--- getU B' ++ getL D; N; (> (dual C)::L2) ->
|-- B' ++ getL D; M ++ N; (> [F]++L2)) as Cut.
eapply CH with (m:=n0 + j)...
SLSolve.
srewrite H3 in isFB.
simpl in isFB.
SLSolve.
SLSolve.
SLSolve.
apply Cut...
rewrite <- HP1.
rewrite HP2.
rewrite <- cxtDestruct... }
inversion H7...
-
apply Remove_Permute in H3...
rewrite H3 in *.
CleanContext.
rewrite cxtDestruct.
rewrite HP1.
rewrite HP3.
LLPerm((i0, perp A):: (getU B' ++ getL D)).
eapply @AbsorptionLinear with (i:=i0) (F:=perp A) (B':=getU B'++getL D)...
rewrite <- HP1.
rewrite HP2.
rewrite <- cxtDestruct...
apply UpExtension'...
inversion Hj...
apply seqNtoSeq in H11...
LLExact H11.
-
checkPermutationCases H5.
{ destruct(PositiveOrRelease F0).
{ apply Remove_Permute in H3...
rewrite H3 in *.
CleanContext.
rewrite cxtDestruct.
rewrite HP1.
rewrite HP3.
rewrite <- H11.
LLPerm((i0, F0 ** G):: (getU B' ++ getL B') ++ getL D).
LLPerm ((x++N)++N0).
rewrite <- (app_nil_r L2).
eapply @InvTensorL1 with (i:=i0) (F:=F0) (G:=G) (D:=D0)
(B:=(i0, F0 ** G):: (getU B' ++ getL B0++getL D)) (B':=(getU B' ++ getL B0++getL D));auto.
CleanContext...
CleanContext...
CleanContext...
rewrite H9...
apply UpExtension'... inversion H4...
assert(S n0 |--- B0; (F0 :: x) ++ [C]; (> [])).
decide1 F0. inversion H4...
rewrite <- H5...
assert(isFormulaL (second B')).
srewrite H3 in isFB.
SLSolve.
assert(isFormulaL (second B0)) by SLSolve.
assert(S n0 |--- B0; (F0::x) ++ [C]; (> [ ]) ->
j |--- D; N; (> (dual C)::L2) ->
|-- getU B0 ++ getL B0 ++ getL D; (F0::x) ++ N; (> [ ]++L2)) as Cut.
eapply CH...
SLSolve;SLSolve.
rewrite <- H11 in isFM.
SLSolve.
SLSolve.
CleanContext...
rewrite <- H6...
rewrite <- HP2...
CleanContext...
LLPerm((F0 :: x) ++ N)...
rewrite H6...
apply Derivation1.
apply seqNtoSeq in H14... }
{ inversion H10;CleanContext...
apply Remove_Permute in H3...
rewrite H3 in *.
CleanContext.
rewrite cxtDestruct.
rewrite HP1.
rewrite HP3.
rewrite <- H11.
LLPerm((i0, F0 ** G):: (getU B' ++ getL B') ++ getL D).
LLPerm ((x++N)++N0).
rewrite <- (app_nil_r L2).
eapply @InvTensorL1 with (i:=i0) (F:=F0) (G:=G) (D:=D0)
(B:=(i0, F0 ** G):: (getU B' ++ getL B0++getL D)) (B':=(getU B' ++ getL B0++getL D));auto.
CleanContext...
CleanContext...
CleanContext...
rewrite H9...
assert(isFormulaL (second B')).
srewrite H3 in isFB.
SLSolve.
assert(isFormulaL (second B0)) by SLSolve.
assert( n |--- B0; x ++ [C]; (> [F0]) ->
j |--- D; N; (> (dual C)::L2) ->
|-- getU B0 ++ getL B0 ++ getL D; x ++ N; (> [F0]++L2)) as Cut.
eapply CH with (m:=n+j)...
SLSolve;SLSolve.
SLSolve.
rewrite <- H11 in isFM.
SLSolve.
SLSolve.
CleanContext...
rewrite <- H6...
rewrite <- HP2...
rewrite H6...
eapply EquivUpArrow2 with (L:=[F0] ++ L2)...
SLSolve. SLSolve.
apply Cut...
rewrite <- H5...
apply Derivation1.
apply seqNtoSeq in H14... } }
{ destruct(PositiveOrRelease G).
{ apply Remove_Permute in H3...
rewrite H3 in *.
CleanContext.
rewrite cxtDestruct.
rewrite HP1.
rewrite HP3.
rewrite <- H11.
LLPerm((i0, F0 ** G):: (getU B' ++ getL B') ++ getL D).
LLPerm (M0++(x++N)).
rewrite <- (app_nil_l L2).
eapply @InvTensorL2 with (i:=i0) (F:=F0) (G:=G) (B:=B0)
(D:=(i0, F0 ** G):: (getU B' ++ getL D0++getL D)) (D':=(getU B' ++ getL D0++getL D));auto.
CleanContext...
CleanContext...
CleanContext...
rewrite H9...
apply Derivation1.
apply seqNtoSeq in H10...
apply UpExtension'... inversion H4...
assert(S n0 |--- D0; (G :: x) ++ [C]; (> [])).
decide1 G. inversion H4...
rewrite <- H5...
assert(isFormulaL (second B')).
srewrite H3 in isFB.
SLSolve.
assert(isFormulaL (second D0)) by SLSolve.
assert(S n0 |--- D0; (G::x) ++ [C]; (> [ ]) ->
j |--- D; N; (> (dual C)::L2) ->
|-- getU D0 ++ getL D0 ++ getL D; (G::x) ++ N; (> [ ]++L2)) as Cut.
eapply CH...
SLSolve;SLSolve.
rewrite <- H11 in isFM.
SLSolve. SLSolve.
CleanContext...
rewrite <- H8...
rewrite <- HP2...
CleanContext...
rewrite H8...
LLPerm((G :: x) ++ N)... }
{ inversion H14;CleanContext...
apply Remove_Permute in H3...
rewrite H3 in *.
CleanContext.
rewrite cxtDestruct.
rewrite HP1.
rewrite HP3.
rewrite <- H11.
LLPerm((i0, F0 ** G):: (getU B' ++ getL B') ++ getL D).
LLPerm (M0++(x++N)).
rewrite <- (app_nil_l L2).
eapply @InvTensorL2 with (i:=i0) (F:=F0) (G:=G) (B:=B0)
(D:=(i0, F0 ** G):: (getU B' ++ getL D0++getL D)) (D':=(getU B' ++ getL D0++getL D));auto.
CleanContext...
CleanContext...
CleanContext...
rewrite H9...
apply Derivation1.
apply seqNtoSeq in H10...
assert(isFormulaL (second B')).
srewrite H3 in isFB.
SLSolve.
assert(isFormulaL (second D0)) by SLSolve.
assert( n |--- D0; x ++ [C]; (> [G]) ->
j |--- D; N; (> (dual C)::L2) ->
|-- getU D0 ++ getL D0 ++ getL D; x ++ N; (> [G]++L2)) as Cut.
eapply CH with (m:=n+j)...
SLSolve;SLSolve.
SLSolve.
rewrite <- H11 in isFM.
SLSolve.
SLSolve.
CleanContext...
rewrite <- H8...
rewrite <- HP2...
CleanContext...
rewrite H8...
eapply EquivUpArrow2 with (L:=[G] ++ L2)...
SLSolve. SLSolve.
apply Cut...
rewrite <- H5... } }
- destruct(PositiveOrRelease F0).
{ apply Remove_Permute in H3...
rewrite H3 in *.
CleanContext.
rewrite cxtDestruct.
rewrite HP1.
rewrite HP3.
LLPerm((i0, F0 op G):: (getU B' ++ getL B') ++ getL D).
eapply @InvPlusL;eauto.
apply UpExtension'... inversion H4...
assert ( (S n0) |--- B';(M ++ [F0]) ++ [C]; (> [])).
LLPerm (F0::(M++[C])).
decide1 F0. inversion H4...
assert(isFormulaL (second B')).
srewrite H3 in isFB.
SLSolve.
assert(S n0 |--- B'; (M ++ [F0]) ++ [C]; (> [ ]) ->
j |--- D; N; (> (dual C)::L2) ->
|-- (getU B' ++ getL B') ++ getL D; (M++[F0]) ++ N; (> [ ]++L2)) as Cut.
eapply CH with (m:=S n0 + j)...
SLSolve;SLSolve.
SLSolve. SLSolve.
CleanContext...
LLPerm( (M ++ [F0]) ++ N)... }
{ inversion H9;CleanContext...
apply Remove_Permute in H3...
rewrite H3 in *.
CleanContext.
rewrite cxtDestruct.
rewrite HP1.
rewrite HP3.
LLPerm((i0, F0 op G):: (getU B' ++ getL B') ++ getL D).
eapply @InvPlusL;eauto.
assert(isFormulaL (second B')).
srewrite H3 in isFB.
SLSolve.
assert( n |--- B'; M ++ [C]; (> [F0]) ->
j |--- D; N; (> (dual C)::L2) ->
|-- (getU B' ++ getL B') ++ getL D; M ++ N; (> [F0]++L2)) as Cut.
eapply CH with (m:=n + j)...
SLSolve;SLSolve.
SLSolve.
SLSolve.
CleanContext.
CleanContext.
eapply EquivUpArrow2 with (L:=[F0] ++ L2)...
SLSolve. SLSolve. }
- destruct(PositiveOrRelease G).
{ apply Remove_Permute in H3...
rewrite H3 in *.
CleanContext.
rewrite cxtDestruct.
rewrite HP1.
rewrite HP3.
LLPerm((i0, F0 op G):: (getU B' ++ getL B') ++ getL D).
eapply @InvPlusLComm;eauto.
apply UpExtension'... inversion H4...
assert ( (S n0) |--- B';(M ++ [G]) ++ [C]; (> [])).
LLPerm (G::(M++[C])).
decide1 G. inversion H4...
assert(isFormulaL (second B')).
srewrite H3 in isFB.
SLSolve.
assert(S n0 |--- B'; (M ++ [G]) ++ [C]; (> [ ]) ->
j |--- D; N; (> (dual C)::L2) ->
|-- (getU B' ++ getL B') ++ getL D; (M++[G]) ++ N; (> [ ]++L2)) as Cut.
eapply CH with (m:=S n0 + j)...
SLSolve;SLSolve.
SLSolve.
SLSolve. CleanContext.
CleanContext.
LLPerm( (M ++ [G]) ++ N)... }
{ inversion H9;CleanContext...
apply Remove_Permute in H3...
rewrite H3 in *.
CleanContext.
rewrite cxtDestruct.
rewrite HP1.
rewrite HP3.
LLPerm((i0, F0 op G):: (getU B' ++ getL B') ++ getL D).
eapply @InvPlusLComm;eauto.
assert(isFormulaL (second B')).
srewrite H3 in isFB.
SLSolve.
assert( n |--- B'; M ++ [C]; (> [G]) ->
j |--- D; N; (> (dual C)::L2) ->
|-- (getU B' ++ getL B') ++ getL D; M ++ N; (> [G]++L2)) as Cut.
eapply CH with (m:=n + j)...
SLSolve;SLSolve.
SLSolve.
CleanContext.
CleanContext.
eapply EquivUpArrow2 with (L:=[G] ++ L2)...
SLSolve. SLSolve. }
- apply PositiveNotRelease in H. contradiction.
- destruct(PositiveOrRelease (FX t)).
{ apply Remove_Permute in H3...
rewrite H3 in *.
CleanContext.
rewrite cxtDestruct.
rewrite HP1.
rewrite HP3.
LLPerm((i0, E{ FX}):: (getU B' ++ getL B') ++ getL D).
eapply @InvExL;eauto.
apply UpExtension'... inversion H4...
assert ( (S n0) |--- B';(M ++ [FX t]) ++ [C]; (> [])).
LLPerm ((FX t)::(M++[C])).
decide1 (FX t). inversion H4...
assert(isFormulaL (second B')).
srewrite H3 in isFB.
SLSolve.
assert(S n0 |--- B'; (M ++ [FX t]) ++ [C]; (> [ ]) ->
j |--- D; N; (> (dual C)::L2) ->
|-- (getU B' ++ getL B') ++ getL D; (M++[FX t]) ++ N; (> [ ]++L2)) as Cut.
eapply CH with (m:=S n0 + j)...
SLSolve;SLSolve.
SLSolve. SLSolve.
CleanContext.
CleanContext.
LLPerm( (M ++ [FX t]) ++ N)... }
{ inversion H11;subst;auto;
try match goal with
[ H1: _ = FX t, H2: release (FX t) |- _] => rewrite <- H1 in H2;inversion H2
end.
apply Remove_Permute in H3...
rewrite H3 in *.
CleanContext.
rewrite cxtDestruct.
rewrite HP1.
rewrite HP3.
LLPerm((i0, E{ FX}):: (getU B' ++ getL B') ++ getL D).
eapply @InvExL;eauto.
assert(isFormulaL (second B')).
srewrite H3 in isFB.
SLSolve.
assert( n |--- B'; M ++ [C]; (> [FX t]) ->
j |--- D; N; (> (dual C)::L2) ->
|-- (getU B' ++ getL B') ++ getL D; M ++ N; (> [FX t]++L2)) as Cut.
eapply CH with (m:=n + j)...
SLSolve;SLSolve.
SLSolve.
CleanContext.
CleanContext.
eapply EquivUpArrow2 with (L:=[FX t] ++ L2)... SLSolve. SLSolve. }
* destruct(PositiveOrRelease F).
2:{ inversion H5;CleanContext...
destruct (NegativeAtomDec F).
assert(False).
inversion H;subst...
contradiction.
apply @AbsorptionTheory with (F:=F)...
eapply EquivUpArrow2 with (L:=[F] ++ L2)...
SLSolve.
assert(n0 |--- B; M ++ [C]; (> [F]) ->
j |--- D; N; (> (dual C)::L2) ->
|-- BD; M ++ N; (> [F]++L2)) as Cut.
eapply CH with (m:=n0 + j)...
apply Cut... }
inversion H5...
- eapply @AbsorptionPerp' with (A:=A)...
rewrite cxtDestruct.
rewrite HP2.
rewrite HP3.
rewrite (SetU_then_empty H7)...
rewrite <- cxtDestruct.
apply UpExtension'...
inversion Hj...
apply seqNtoSeq in H10...
LLExact H10.
- checkPermutationCases H3.
{ destruct(PositiveOrRelease F0).
{ (* first *)
rewrite <- H9.
assert (S n0 |--- B0; (F0 :: x) ++ [C]; (> [])).
LLPerm (F0 :: x ++ [C]).
decide1 F0. inversion H2...
rewrite <- H3...
LLPerm((x ++ N) ++ N0).
rewrite <- (app_nil_r L2).
eapply @InvTensorT with (F:=F0) (G:=G) (B:=getU D ++ getL D ++ getL B0) (D:=D0)...
CleanContext...
rewrite HP3...
rewrite H7...
CleanContext...
apply UpExtension'.
inversion H2...
assert(isFormulaL (second B0)) by SLSolve.
assert(S n0 |--- B0; (F0::x) ++ [C]; (> [ ]) ->
j |--- D; N; (> (dual C)::L2) ->
|-- getU D ++ getL D ++ getL B0; (F0::x) ++ N; (> [ ]++L2)) as Cut.
eapply CH...
SLSolve;SLSolve.
rewrite <- H9 in isFM.
SLSolve.
SLSolve.
CleanContext...
LLPerm((F0 :: x) ++ N)...
apply Derivation1.
apply seqNtoSeq in H12... }
{ (* first *)
rewrite <- H9.
inversion H8;CleanContext...
LLPerm((x ++ N) ++ N0).
rewrite <- (app_nil_r L2).
eapply @InvTensorT with (F:=F0) (G:=G) (B:=getU D ++ getL D ++ getL B0) (D:=D0)...
CleanContext...
CleanContext...
rewrite HP3...
rewrite H7...
assert(isFormulaL (second B0)) by SLSolve.
assert(n |--- B0; x ++ [C]; (> [F0]) ->
j |--- D; N; (> (dual C)::L2) ->
|-- getU D ++ getL D ++ getL B0; x ++ N; (> [F0]++L2)) as Cut.
eapply CH with (m:=n+j)...
rewrite <- H9 in isFM.
SLSolve;SLSolve.
rewrite <- H9 in isFM.
SLSolve.
SLSolve.
CleanContext...
eapply EquivUpArrow2 with (L:=[F0] ++ L2)...
SLSolve. SLSolve.
apply Cut...
rewrite <- H3...
apply Derivation1.
apply seqNtoSeq in H12... } }
{ destruct(PositiveOrRelease G).
{ (* first *)
rewrite <- H9.
assert (S n0 |--- D0; (G :: x) ++ [C]; (> [])).
LLPerm (G :: x ++ [C]).
decide1 G. inversion H2...
rewrite <- H3...
LLPerm(M0++(x ++ N)).
rewrite <- (app_nil_l L2).
eapply @InvTensorT with (F:=F0) (G:=G) (D:=getU D ++ getL D ++ getL D0) (B:=B0)...
CleanContext...
CleanContext...
rewrite HP3...
rewrite H7...
apply Derivation1.
apply seqNtoSeq in H8...
apply UpExtension'.
inversion H2...
assert(isFormulaL (second D0)) by SLSolve.
assert(S n0 |--- D0; (G::x) ++ [C]; (> [ ]) ->
j |--- D; N; (> (dual C)::L2) ->
|-- getU D ++ getL D ++ getL D0; (G::x) ++ N; (> [ ]++L2)) as Cut.
eapply CH...
rewrite <- H9 in isFM.
SLSolve;SLSolve.
rewrite <- H9 in isFM.
SLSolve.
SLSolve.
CleanContext...
LLPerm((G :: x) ++ N)... }
{ (* first *)
rewrite <- H9.
inversion H12;CleanContext...
LLPerm(M0++(x ++ N)).
rewrite <- (app_nil_l L2).
eapply @InvTensorT with (F:=F0) (G:=G) (D:=getU D ++ getL D ++ getL D0) (B:=B0)...
CleanContext...
CleanContext...
rewrite HP3...
rewrite H7...
apply Derivation1.
apply seqNtoSeq in H8...
assert(isFormulaL (second D0)) by SLSolve.
assert(n |--- D0; x ++ [C]; (> [G]) ->
j |--- D; N; (> (dual C)::L2) ->
|-- getU D ++ getL D ++ getL D0; x ++ N; (> [G]++L2)) as Cut.
eapply CH with (m:=n+j)...
rewrite <- H9 in isFM.
SLSolve;SLSolve.
rewrite <- H9 in isFM.
SLSolve.
SLSolve.
CleanContext...
eapply EquivUpArrow2 with (L:=[G] ++ L2)...
SLSolve. SLSolve.
apply Cut...
rewrite <- H3... } }
- destruct(PositiveOrRelease F0).
{ assert ( (S n0) |--- B;(M ++ [F0]) ++ [C]; (> [])).
LLPerm (F0::(M++[C])).
decide1 F0. inversion H2...
eapply @InvPlusT with (F:=F0) (G:=G)...
apply UpExtension'.
inversion H2...
assert(S n0 |--- B; (M ++ [F0]) ++ [C]; (> [ ]) ->
j |--- D; N; (> (dual C)::L2) ->
|-- BD; (M++[F0]) ++ N; (> [ ]++L2)) as Cut.
eapply CH with (m:=S n0 + j)...
SLSolve. SLSolve.
LLPerm( (M ++ [F0]) ++ N)... }
{ inversion H7;CleanContext...
eapply @InvPlusT with (F:=F0) (G:=G)...
assert( n |--- B; M ++ [C]; (> [F0]) ->
j |--- D; N; (> (dual C)::L2) ->
|-- BD; M ++ N; (> [F0]++L2)) as Cut.
eapply CH with (m:=n + j)...
SLSolve. SLSolve.
eapply EquivUpArrow2 with (L:=[F0] ++ L2)...
SLSolve. SLSolve. }
- destruct(PositiveOrRelease G).
{ assert ( (S n0) |--- B;(M ++ [G]) ++ [C]; (> [])).
LLPerm (G::(M++[C])).
decide1 G. inversion H2...
eapply @InvPlusTComm with (F:=F0) (G:=G)...
apply UpExtension'.
inversion H2...
assert(S n0 |--- B; (M ++ [G]) ++ [C]; (> [ ]) ->
j |--- D; N; (> (dual C)::L2) ->
|-- BD; (M++[G]) ++ N; (> [ ]++L2)) as Cut.
eapply CH with (m:=S n0 + j)...
SLSolve. SLSolve.
LLPerm( (M ++ [G]) ++ N)... }
{ inversion H7;CleanContext...
eapply @InvPlusTComm with (F:=F0) (G:=G)...
assert( n |--- B; M ++ [C]; (> [G]) ->
j |--- D; N; (> (dual C)::L2) ->
|-- BD; M ++ N; (> [G]++L2)) as Cut.
eapply CH with (m:=n + j)...
SLSolve.
eapply EquivUpArrow2 with (L:=[G] ++ L2)...
SLSolve. SLSolve. }
- apply PositiveNotRelease in H. contradiction.
- destruct(PositiveOrRelease (FX t)).
{ assert ( (S n0) |--- B;(M ++ [FX t]) ++ [C]; (> [])).
LLPerm ((FX t)::(M++[C])).
decide1 (FX t). inversion H2...
eapply @InvExT;eauto...
apply UpExtension'.
inversion H2...
assert(S n0 |--- B; (M ++ [FX t]) ++ [C]; (> [ ]) ->
j |--- D; N; (> (dual C)::L2) ->
|-- BD; (M++[FX t]) ++ N; (> [ ]++L2)) as Cut.
eapply CH with (m:=S n0 + j)...
SLSolve. SLSolve.
LLPerm( (M ++ [FX t]) ++ N)... }
{ inversion H9;subst;auto;
try match goal with
[ H1: _ = FX t, H2: release (FX t) |- _] => rewrite <- H1 in H2;inversion H2
end.
eapply @InvExT;eauto...
assert( n |--- B; M ++ [C]; (> [FX t]) ->
j |--- D; N; (> (dual C)::L2) ->
|-- BD; M ++ N; (> [FX t]++L2)) as Cut.
eapply CH with (m:=n + j)...
SLSolve.
eapply EquivUpArrow2 with (L:=[FX t] ++ L2)...
SLSolve. SLSolve. }
* rewrite <- app_comm_cons...
assert(n |--- B; M ++ [C]; (> FX x :: M0) ->
j |--- D; N; (> (dual C)::L2) ->
|-- BD; M ++ N; (> FX x :: M0++L2)) as Cut.
eapply CH...
SLSolve. inversion H2... SLSolve.
apply H4 in properX...
Qed.
Theorem CutUPStar i j w h C L S1 S2 L3 M N BD B D : CutH w h -> complexity C = S w -> S h = i + j ->
isFormulaL (second BD) -> isFormulaL L -> isFormulaL M -> isFormulaL N -> isFormula C -> isFormula (C^) -> L = (S1++S2)++L3 ->
Permutation (getU BD) (getU B) ->
Permutation (getU BD) (getU D) ->
Permutation (getL BD) (getL B ++ getL D) ->
i |--- B; M; (> S1++[C]++S2) ->
j |--- D; N; (> dual C::L3) ->
|-- BD; M ++ N; (> L).
Proof with sauto;solveLL;try SLSolve.
intros CH compC hH isFBD isFL isFM isFN isFC isFDC eqL HP1 HP2 HP3 Hi Hj.
subst.
assert(isFormulaL (S1++S2) /\ isFormulaL L3).
split;SLSolve.
CleanContext.
assert(isFormulaL S1 /\ isFormulaL S2).
split;SLSolve.
assert(isFormulaL (second D))...
assert(isFormulaL (second B))...
clear isFL H0.
rename H2 into isFD.
rename H into isFB.
rename H3 into isFS1.
rename H4 into isFS2.
rename H1 into isFL3.
destruct S1.
* CleanContext.
destruct (PositiveOrRelease C).
- simpl in Hi.
assert(exists n, n < i /\ n |--- B; C::M; (> S2)).
inversion Hi;subst;inversion H;
exists n...
CleanContext.
assert(x |--- B; M ++ [C]; (> S2) ->
j |--- D; N; (> (dual C)::L3) ->
|-- BD; M ++ N; (> S2++L3)) as Cut.
eapply CH with (m:=x + j)...
apply Cut...
LLExact H2.
- assert(exists n, n < j /\ n |--- D; N++[dual C]; (> L3)).
apply ReleaseDualPositive in H.
inversion Hj; match goal with
[ H: _= dual ?C , H2: positiveFormula (dual ?C) |- _ ] => rewrite <- H in H2
end;try solve [inversion H];exists n...
CleanContext. LLExact H6. CleanContext.
assert(x |--- D; N ++ [dual C]; (> L3) ->
i |--- B; M; (> C::S2) ->
|-- BD; N++M; (> L3++S2)) as Cut.
eapply CH with (m:=x + i)...
rewrite <- ng_involutive...
rewrite DualComplexity.
rewrite <- ng_involutive...
rewrite HP3...
LLPerm(N++M).
eapply @EquivUpArrow2 with (L:=L3 ++ S2)...
* repeat rewrite <- app_comm_cons in Hi.
CleanContext.
repeat rewrite <- app_comm_cons.
inversion Hi...
- assert(n |--- B; M; (> S1 ++ [C] ++ S2) ->
j |--- D; N; (> C ^ :: L3) ->
|-- BD; M ++ N; (> (S1 ++ S2) ++ L3)) as Cut.
eapply CH with (m:=n + j) (C:=C) (dualC:=C ^)...
SLSolve.
apply Cut...
- assert(n |--- B; M; (> (F :: G :: S1) ++ [C] ++ S2) ->
j |--- D; N; (> C ^ :: L3) ->
|-- BD; M ++ N; (> ((F :: G :: S1) ++ S2) ++ L3)) as Cut.
eapply CH with (m:=n + j) (C:=C) (dualC:=C ^)...
inversion H1...
inversion H1...
SLSolve.
apply Cut...
- assert(n |--- B; M; (> (F :: S1) ++ [C] ++ S2) ->
j |--- D; N; (> C ^ :: L3) ->
|-- BD; M ++ N; (> ((F :: S1) ++ S2) ++ L3)) as Cut.
eapply CH with (m:=n + j)(C:=C) (dualC:=C ^)...
inversion H1...
SLSolve.
apply Cut...
- assert(n |--- B; M; (> (G :: S1) ++ [C] ++ S2) ->
j |--- D; N; (> C ^ :: L3) ->
|-- BD; M ++ N; (> ((G :: S1) ++ S2) ++ L3)) as Cut.
eapply CH with (m:=n + j)(C:=C) (dualC:=C ^)...
inversion H1...
SLSolve.
apply Cut...
- destruct (uDec i0) .
-- assert(n |--- (i0, F)::B; M; (> S1 ++ [C] ++ S2) ->
j |--- (i0, F)::D; N; (> C ^ :: L3) ->
|-- (i0, F)::BD; M ++ N; (> (S1++ S2) ++ L3)) as Cut.
eapply CH with (m:=n + j)(C:=C) (dualC:=C ^)...
inversion H1...
SLSolve.
rewrite HP1...
rewrite HP2...
rewrite HP3...
apply Cut...
apply weakeningN...
-- assert(n |--- (i0, F)::B; M; (> S1 ++ [C] ++ S2) ->
j |--- D ; N; (> C ^ :: L3) ->
|-- (i0, F)::BD; M ++ N; (> (S1++ S2) ++ L3)) as Cut.
eapply CH with (m:=n + j)(C:=C) (dualC:=C ^)...
inversion H1...
SLSolve.
rewrite HP1...
rewrite HP2...
rewrite HP3...
apply Cut...
- assert(n |--- B; (o::M) ; (> S1 ++ [C] ++ S2) ->
j |--- D; N; (> C ^ :: L3) ->
|-- BD; (o::M) ++ N; (> (S1 ++ S2) ++ L3)) as Cut.
eapply CH with (m:=n + j)(C:=C) (dualC:=C ^)...
inversion isFS1...
SLSolve.
rewrite app_comm_cons...
- assert(n |--- B; M ; (> (FX x :: S1) ++ [C] ++ S2) ->
j |--- D; N; (> C ^ :: L3) ->
|-- BD; M ++ N; (> ((FX x :: S1) ++ S2) ++ L3)) as Cut.
eapply CH with (m:=n + j)(C:=C) (dualC:=C ^)...
inversion H1...
SLSolve.
apply Cut...
apply H5...
Qed.
Theorem CutUP i j w h C L M N BD B D : CutH w h -> CutW w -> complexity C = S w -> S h = i + j ->
isFormulaL (second BD) -> isFormulaL L -> isFormulaL M -> isFormulaL N -> isFormula C -> isFormula (C^) ->
Permutation (getU BD) (getU B) ->
Permutation (getU BD) (getU D) ->
Permutation (getL BD) (getL B ++ getL D) ->
i |--- B; M; (> C::L) ->
j |--- D; N; (>> dual C) ->
|-- BD; M ++ N; (> L).
Proof with sauto;solveLL.
intros CH CW compC hH isFBD isFL isFM isFN isFC isFDC HP1 HP2 HP3 Hi Hj.
assert(isFormulaL (second D)) by SLSolve.
assert(isFormulaL (second B)) by SLSolve.
rename H into isFD.
rename H0 into isFB.
inversion Hi;subst.
* inversion Hj...
CleanContext.
* inversion Hj; CleanContext...
rewrite cxtDestruct.
rewrite HP1.
rewrite HP3.
rewrite <- cxtDestruct.
apply seqNtoSeq in H3;auto.
* inversion Hj; CleanContext...
rewrite cxtDestruct.
rewrite HP1.
rewrite HP3.
rewrite H5.
assert(isFormulaL (second D0)) by SLSolve.
assert(isFormulaL (second B0)) by SLSolve.
assert( n |--- B; M; (> F :: G :: L) ->
n0 |--- B0; M0; (>> F ^) ->
|-- getU D0 ++ getL B ++ getL B0; M ++ M0; (> G :: L)) as HcutF.
eapply CW with (m:=complexity F)...
inversion compC...
SLSolve;SLSolve.
SLSolve. SLSolve.
inversion isFC...
inversion isFDC...
CleanContext...
rewrite <- H4...
rewrite <- HP2...
CleanContext...
CleanContext...
apply HcutF in H9;auto.
apply seqtoSeqN in H9.
destruct H9.
assert(isFormulaL (second B0)) by SLSolve.
assert(isFormulaL (second D0)) by SLSolve.
assert( x |--- getU D0 ++ getL B ++ getL B0; M ++ M0; (> G :: L) ->
n0 |--- D0; N0; (>> G ^) ->
|-- getU B ++ getL B ++ getL B0 ++ getL D0; (M ++ M0) ++ N0; > L) as HcutG.
eapply CW with (m:=complexity G);sauto.
inversion compC...
SLSolve;SLSolve.
SLSolve;SLSolve.
SLSolve. inversion isFC...
inversion isFDC...
CleanContext...
CleanContext...
CleanContext...
rewrite H1.
LLPerm((M ++ M0) ++ N0)...
* inversion Hj; CleanContext...
assert( n |--- B; M; (> F :: L) ->
n0 |--- D; N; (>> F ^) ->
|-- BD; M ++ N; (> L)) as HcutF.
eapply CW with (m:=complexity F)...
inversion compC...
inversion isFC...
inversion isFDC...
apply HcutF ...
assert( n |--- B; M; (> G :: L) ->
n0 |--- D; N; (>> G ^) ->
|-- BD; M ++ N; (> L)) as HcutG.
eapply CW with (m:=complexity G)...
inversion compC...
inversion isFC...
inversion isFDC...
apply HcutG...
* assert(N=[]).
inversion Hj...
subst.
assert( n |--- B ++ [(i0,F)]; M ; (> L) ->
j |--- D; []; (>> i0 ! F ^) ->
|-- BD; M ; (> L)) as UCCut.
eapply CH with (m:=h) (C:=i0 ? F) (dualC:=i0 ! F^);eauto.
rewrite app_nil_r...
apply UCCut...
LLExact H3.
* apply NotAsynchronousPosAtoms in H4...
apply PositiveDualRelease in H.
inversion Hj;subst; try match goal with
[ H: _= dual ?C , H2: release (dual ?C) |- _ ] => rewrite <- H in H2
end;CleanContext.
assert( n |--- B; M ++ [C]; (> L) ->
n0 |--- D; N; (> [dual C]) ->
|-- BD; M ++ N; (> L++[])) as ULCut.
eapply CH with (m:=n+n0)...
CleanContext.
apply ULCut...
LLExact H5.
inversion H...
simpl in Hj.
inversion Hj...
rewrite (simplUnb BD _ HP2 HP1)...
apply seqNtoSeq in Hi.
inversion Hi...
LLExact H7.
rewrite HP3...
destruct (uDec i).
rewrite <- H7 in HP2.
rewrite <- H7 in HP3...
CleanContext.
rewrite cxtDestruct.
rewrite HP3.
rewrite HP2.
rewrite cxtDestruct in H5.
rewrite <- HP1 in H5.
rewrite HP2 in H5.
rewrite <- app_comm_cons in H5.
assert(n |--- (i, atom A) :: getU C ++ getL B;
(atom A)::M; (> L)).
LLExact H5.
eapply AbsorptionC in H0...
apply seqNtoSeq in H0.
LLExact H0.
rewrite <- H7 in HP2.
rewrite <- H7 in HP3...
CleanContext.
rewrite cxtDestruct.
rewrite HP3.
rewrite HP2.
rewrite cxtDestruct in H5.
rewrite <- HP1 in H5.
rewrite HP2 in H5.
assert(n |--- getU C ++ getL B;
(atom A)::M; (> L)).
LLExact H5.
eapply @AbsorptionL with (i:=i) in H0...
apply seqNtoSeq in H0.
LLExact H0.
inversion H1.
* inversion Hj;CleanContext...
assert( n |--- B; M; (> (FX t :: L)) ->
n0 |--- D; N; (>> (FX t) ^) ->
|-- BD; M++N; (> L)) as HCut.
eapply CW with (m:=complexity (FX (VAR con 0)));eauto...
inversion compC...
inversion isFC...
inversion isFDC...
inversion compC...
remember (VAR con 0).
assert(proper e).
rewrite Heqe.
constructor.
eapply ComplexityUniformEq...
apply HCut...
Qed.
Theorem CutK4SubCase (h n j w:nat) i a L B D BD P: CutH w h -> CutW w -> complexity P = w -> S h = S n + j -> i <> loc ->
isFormulaL (second BD) -> isFormulaL L -> isFormula P -> isFormula (dual P) ->
Permutation (getU BD) (getU B) ->
Permutation (getU BD) (getU D) ->
Permutation (getL BD) (getL B ++ getL D) ->
tri_bangK4 theory n (B ++ [(a, P)]) i [] [] (> L) ->
j |--- D; []; (>> a ! P ^) -> tri_bangK4' theory BD i [] [] (> L).
Proof with sauto;solveF.
intros HC WC comP hH Hd isFBD isFL isFP isFPD UB UD LBD Hyp Hj.
assert(isFormulaL (second D)) by SLSolve.
assert(isFormulaL (second B)) by SLSolve.
rename H into isFD.
rename H0 into isFB.
apply InvSubExpPhase in Hyp;auto.
destruct Hyp as [C4 Hyp];
destruct Hyp as [CK Hyp];
destruct Hyp as [CN Hyp].
CleanContext.
assert(isFormulaL (second C4) /\ isFormulaL (second CK) /\ isFormulaL (second CN)).
assert(isFormulaL (second (C4++CK++CN))).
srewrite_reverse H.
change (Forall isFormula (second (B ++ [(a, P)]))) with (isFormulaL (second (B ++ [(a, P)]))).
SLSolve.
CleanContext;SLSolve.
CleanContext.
rename H6 into isFC4.
rename H8 into isF_K.
rename H9 into isFCN.
checkPermutationCases H.
{ (* P in C4 *)
rewrite <- Permutation_cons_append in H4.
inversion Hj...
{ rewrite H4 in H0...
assert(False).
apply locAlone in Hd.
apply Hd... left.
inversion H0...
contradiction. }
assert(lt i a /\ m4 a = true /\ SetK4 i x).
{ rewrite H4 in H0.
inversion H0... }
CleanContext.
finishExponential.
assert(isFormulaL (second CK4) /\ isFormulaL (second CN0)).
assert(isFormulaL (second (CK4++CN0))).
apply Permutation_map with (f:=snd) in H.
rewrite <- H;auto.
split;SLSolve.
CleanContext.
rename H15 into isF_K4.
rename H17 into isFCN0.
assert(SetK4 i CK4).
{ eapply @SetK4Trans with (i:=a)... }
assert(Permutation (getU B) (getU D)).
rewrite <- UB.
rewrite <- UD...
rewrite <- H6 in H15.
rewrite H in H15.
CleanContext.
change (getU CK4 ++ getU CN0) with (getU CK4 ++ [] ++ getU CN0) in H15.
eapply @destructClassicSet' with (a:=i) in H15;auto;SLSolve.
destruct H15 as [K_1 H15].
destruct H15 as [K_2 H15].
destruct H15 as [K_3 H15].
destruct H15 as [K4_1 H15].
destruct H15 as [K4_2 H15].
destruct H15 as [K4_3 H15].
destruct H15 as [N H15].
simpl in *.
CleanContext.
assert(Hd': S n0 |--- PlusT CK4; []; (>> (plust a) ! P^)).
{ apply CutK4BaseInit... }
rewrite cxtDestruct.
rewrite UD.
rewrite LBD.
rewrite H...
rewrite <- H6...
CleanContext.
eapply @GenK4Rel' with (C4:=getL x++CK4++K4_2) (CK:=CK) (CN:=N);sauto;SLSolve.
SLSolve.
apply SetK4Destruct in H12;sauto.
rewrite H18 in H15.
SLSolve.
assert(SetU (K4_3 ++ N)).
rewrite <- H21;SLSolve.
SLSolve.
assert(Hp: Permutation ((getU CK4 ++ getU CN0) ++ (getL x ++ getL CK) ++ getL CK4)
((getU CK4 ++ getL CK4) ++ (getL x ++ getL CK) ++ getU CN0)) by perm.
rewrite Hp. clear Hp.
rewrite <- cxtDestruct.
rewrite H23...
rewrite (cxtDestruct CK).
CleanContext.
rewrite H17...
assert(isFormulaL (second x)).
srewrite H4 in isFC4.
simpl in isFC4.
SLSolve.
assert(Hp:
(n - length (C4 ++ CK) - 1) |--- (PlusT (getL x ++ getU CK4 ++ K4_2) ++ Loc (getU CK)) ++ [(plust a,P)]; []; (> L ++ second (getL CK)) ->
S n0 |--- PlusT (CK4 ++ getU K4_2) ++ Loc (getU CK); []; (>> (plust a) ! P ^) ->
|-- PlusT (getL x ++ CK4 ++ K4_2) ++ Loc (getU CK) ; []; (> L ++ second (getL CK))).
eapply HC with (m:=n - length (C4 ++ CK) - 1 + S n0) (C:=(plust a) ? P) (dualC:=(plust a) ! P^);sauto.
lia. CleanContext.
SLSolve;
SLSolve;
SLSolve.
apply isFormulaL_getU in H15 .
srewrite H18 in H15.
rewrite secondApp in H15.
SLSolve.
apply isFormulaL_PlusT;auto.
SLSolve. SLSolve.
CleanContext.
SLSolve.
CleanContext.
rewrite PlusTgetU...
CleanContext.
rewrite PlusTgetU...
CleanContext.
CleanContext.
apply Hp... all: try clear Hp.
CleanContext.
rewrite H20...
CleanContext.
LLPerm((PlusT (getL x) ++ (PlusT K4_1 ++ PlusT K4_2) ++ [(plust a, P)] ++ Loc (getU CK)) ++ PlusT K4_3).
apply weakeningGenN_rev...
eapply exchangeCCN.
2:{ exact H5. }
rewrite H4.
CleanContext.
rewrite (cxtDestruct x).
CleanContext.
rewrite H18...
assert(SetU (K4_1 ++ K4_3)).
rewrite <- H20...
SLSolve.
apply SetUPlusT.
SLSolve.
CleanContext.
LLPerm(PlusT CK4++(PlusT (getU K4_2) ++ Loc (getU CK)) ).
apply weakeningGenN_rev...
SLSolve.
}
{ rewrite <- Permutation_cons_append in H4.
checkPermutationCases H4.
{ (* P in CK *)
inversion Hj...
{ rewrite H4 in H2.
assert(False).
apply locAlone in Hd.
apply Hd...
inversion H2... contradiction. }
destruct (uDec a).
{
apply InvSubExpPhaseU in H12;auto.
destruct H12 as [C4' H12].
destruct H12 as [CK' H12].
destruct H12 as [CN' H12].
CleanContext.
assert(isFormulaL (second C4') /\ isFormulaL (second CK') /\ isFormulaL (second CN')).
assert(isFormulaL (second (C4'++CK'++CN'))).
apply Permutation_map with (f:=snd) in H.
rewrite <- H;auto.
CleanContext;SLSolve.
CleanContext.
rename H16 into isFC4'.
rename H19 into isF_K'.
rename H20 into isFCN'.
assert(lt i a /\ m4 a = false /\ SetK i x0).
{ rewrite H4 in H2.
inversion H2...
} CleanContext.
assert(SetK4 i C4').
{ eapply @SetK4Trans with (i:=a)... }
assert(SetK i CK').
{ eapply @SetKTrans with (i:=a)... }
apply simplUnb' in LBD...
2:{ rewrite H;SLSolve. }
rewrite LBD in UD.
rewrite <- H7 in H6.
rewrite <- H6 in UD.
rewrite H in UD.
assert(HCK: Permutation CK ((a, P) :: x0)) by auto.
apply Permutation_vs_cons_inv in H4...
rewrite Permutation_midle in HCK.
apply Permutation_cons_inv in HCK.
assert(isFormulaL (second x0)).
symmetry in H6.
srewrite H6 in isFB.
SLSolve.
rename H4 into isFx0.
CleanContext.
eapply @destructClassicSet' with (a:=i) in UD;auto;try SLSolve.
destruct UD as [K_1 UD].
destruct UD as [K_2 UD].
destruct UD as [K_3 UD].
destruct UD as [K4_1 UD].
destruct UD as [K4_2 UD].
destruct UD as [K4_3 UD].
destruct UD as [N UD]...
assert(Hd': S n0 |--- PlusT (getU C4') ++ Loc (getU CK'); []; (>> (loc) ! P^)).
{ solveLL;rewrite setUPlusTgetU;auto.
HProof. }
rewrite cxtDestruct.
rewrite UB.
rewrite LBD.
rewrite <- H6.
CleanContext.
rewrite <- HCK.
assert(SetU (K_3 ++ K4_3 ++ N)).
rewrite <- H24.
SLSolve.
assert(SetU (K_2 ++ K4_2 ++ N)).
rewrite <- H26.
SLSolve.
eapply @GenK4Rel' with (C4:=C4++K4_3) (CK:=x1++x2++K_3) (CN:=N);sauto...
SLSolve.
apply SetK4Destruct in H12;sauto.
rewrite H23 in H28...
SLSolve.
rewrite app_assoc.
SLSolve.
apply SetKDestruct in H18;sauto.
rewrite H22 in H28...
SLSolve.
SLSolve.
CleanContext.
rewrite H24...
rewrite (cxtDestruct x1).
rewrite (cxtDestruct x2).
rewrite (cxtDestruct C4)...
CleanContext.
assert(Hp:
(n - length (C4 ++ x1 ++ x2)- 1) |--- ( (PlusT C4 ++ PlusT K4_3) ++ Loc (getU x1) ++ Loc (getU x2) ++ Loc (getU K_3)) ++ [(loc,P)]; []; (> L ++ second (getL x1) ++ second (getL x2)) ->
S n0 |--- (PlusT (getU C4) ++ PlusT K4_3) ++ Loc (getU x1) ++ Loc (getU x2) ++ Loc (getU K_3); []; (>> loc ! P ^) ->
|-- (PlusT C4 ++ PlusT K4_3) ++ Loc (getU x1) ++ Loc (getU x2) ++ Loc (getU K_3) ; []; (> L ++ second (getL x1) ++ second (getL x2))).
eapply HC with (m:=n - length (C4 ++ x1 ++ x2) - 1 + S n0) (C:=loc ? P) (dualC:=loc ! P^);sauto.
lia.
CleanContext.
SLSolve;SLSolve;SLSolve.
assert(isFormulaL ( second (K4_1 ++ K4_3))).
symmetry in H23.
srewrite H23.
SLSolve.
apply isFormulaL_PlusT.
SLSolve.
apply isFormulaL_Loc;auto.
apply isFormulaL_getU;auto.
SLSolve.
apply isFormulaL_Loc;auto.
apply isFormulaL_getU;auto.
SLSolve.
assert(isFormulaL ( second (K_1 ++ K_3))).
symmetry in H22.
srewrite H22.
SLSolve.
apply isFormulaL_Loc;auto.
apply isFormulaL_getU;auto.
SLSolve.
SLSolve;SLSolve.
apply isFormulaL_getL;auto.
SLSolve.
apply isFormulaL_getL;auto.
SLSolve.
CleanContext.
rewrite PlusTgetU...
CleanContext.
assert(SetU K4_3). SLSolve.
rewrite (getLgetUPlusT' H28)...
assert(SetU K_3). SLSolve.
rewrite (SetU_then_empty H28)...
sauto.
apply Hp... all: try clear Hp.
LLPerm((Loc (getU K_3)++ PlusT K4_3) ++ ((PlusT C4 ++ Loc (getU x1) ++ Loc (getU x2) ++ [(loc, P)]))).
apply weakeningGenN...
eapply HeightGeqCEx.
2:{ exact H5. }
CleanContext.
assert(Hp: Permutation (C4 ++ x1 ++ (a, P) :: x2) ((a, P) :: C4 ++ x1 ++ x2)) by perm.
rewrite Hp. simpl. lia.
SLSolve.
apply SetUPlusT.
SLSolve.
assert(Hp: Permutation ( Loc (getU x1) ++ Loc (getU x2)) (Loc (getU x0))).
rewrite <- HCK...
LLPerm(PlusT (getU C4) ++ PlusT K4_3 ++ (Loc (getU x1) ++ Loc (getU x2)) ++ Loc (getU K_3)).
rewrite Hp.
rewrite H4.
rewrite H21.
CleanContext.
LLPerm((PlusT K4_2 ++ Loc K_2) ++ (PlusT K4_1 ++ PlusT K4_3 ++ Loc K_1 ++ Loc (getU K_3))).
apply weakeningGenN...
LLExact Hd'.
rewrite (cxtDestruct C4').
rewrite H22.
rewrite H23.
CleanContext.
SLSolve.
rewrite setULocgetU;auto. perm.
SLSolve.
apply SetUPlusT;auto.
SLSolve.
}
{ apply InvSubExpPhase in H12;auto.
destruct H12 as [C4' H12].
destruct H12 as [CK' H12].
destruct H12 as [CN' H12].
CleanContext.
assert(isFormulaL (second C4') /\ isFormulaL (second CK') /\ isFormulaL (second CN')).
assert(isFormulaL (second (C4'++CK'++CN'))).
apply Permutation_map with (f:=snd) in H.
rewrite <- H;auto.
CleanContext;
SLSolve.
CleanContext.
rename H14 into isFC4'.
rename H17 into isF_K'.
rename H18 into isFCN'.
assert(lt i a /\ m4 a = false /\ SetK i x0).
{ rewrite H4 in H2.
inversion H2...
} CleanContext.
assert(SetK4 i C4').
{ eapply @SetK4Trans with (i:=a)... }
assert(SetK i CK').
{ eapply @SetKTrans with (i:=a)... }
rewrite UB in UD.
rewrite <- H7 in H6.
rewrite <- H6 in UD.
rewrite H in UD.
CleanContext.
assert(isFormulaL (second x0)).
srewrite H4 in isF_K.
simpl in isF_K.
SLSolve.
rename H19 into isFx0.
assert(HCK: Permutation CK ((a, P) :: x0)) by auto.
apply Permutation_vs_cons_inv in H4...
rewrite Permutation_midle in HCK.
apply Permutation_cons_inv in HCK.
eapply @destructClassicSet' with (a:=i) in UD;auto;try SLSolve.
destruct UD as [K_1 UD].
destruct UD as [K_2 UD].
destruct UD as [K_3 UD].
destruct UD as [K4_1 UD].
destruct UD as [K4_2 UD].
destruct UD as [K4_3 UD].
destruct UD as [N UD]...
CleanContext.
rewrite cxtDestruct.
rewrite UB.
rewrite LBD.
rewrite <- H6.
rewrite H...
CleanContext.
rewrite <- HCK.
CleanContext.
assert(SetU (K_3 ++ K4_3 ++ N)).
rewrite <- H22.
SLSolve.
assert(SetU (K_2 ++ K4_2 ++ N)).
rewrite <- H24.
SLSolve.
eapply @GenK4Rel' with (C4:=C4++ getL C4' ++ K4_3) (CK:=x1++x2++getL CK'++K_3) (CN:=N);sauto...
SLSolve.
apply SetK4Destruct in H12;sauto.
rewrite H21 in H26...
SLSolve.
rewrite app_assoc.
SLSolve.
apply SetKDestruct in H16;sauto.
rewrite H20 in H26...
SLSolve.
SLSolve.
CleanContext.
rewrite H22...
rewrite (cxtDestruct x1).
rewrite (cxtDestruct x2).
rewrite (cxtDestruct C4)...
CleanContext.
assert(SetU K_3). SLSolve.
assert(SetU K_2). SLSolve.
assert(SetU K4_3). SLSolve.
assert(SetU K4_2). SLSolve.
rewrite (SetU_then_empty H26)...
CleanContext.
assert(Hp:
(n - length (C4 ++ x1 ++ x2) - 1) |---
(PlusT C4 ++ PlusT K4_3) ++
Loc (getU x1) ++ Loc (getU x2) ++ Loc (getU K_3);
[]; (> (L ++ second (getL x1)) ++ [P] ++ second (getL x2)) ->
n0 - length (C4' ++ CK') - 1 |---
(PlusT (getU C4) ++ PlusT (getL C4') ++ PlusT K4_3) ++
Loc (getU x1) ++ Loc (getU x2) ++ Loc (getU K_3);
[]; (> P ^ :: second (getL CK')) ->
|--(PlusT C4 ++ PlusT (getL C4') ++ PlusT K4_3) ++
Loc (getU x1) ++ Loc (getU x2) ++ Loc (getU K_3); []++[];
(> ((L ++ second (getL x1)) ++ second (getL x2)) ++ second (getL CK')) ).
eapply WC with (C:=P) (dualC:=P^);sauto.
{ CleanContext.
SLSolve.
apply isFormulaL_PlusT;auto.
apply isFormulaL_PlusT;auto.
apply isFormulaL_getL;auto.
apply isFormulaL_PlusT.
assert(isFormulaL ( second (K4_1 ++ K4_3))).
symmetry in H21.
srewrite H21.
apply isFormulaL_getU;auto.
SLSolve.
apply isFormulaL_Loc.
apply isFormulaL_getU;auto.
symmetry in HCK.
srewrite HCK in isFx0.
SLSolve.
apply isFormulaL_Loc.
apply isFormulaL_getU;auto.
symmetry in HCK.
srewrite HCK in isFx0.
SLSolve.
assert(isFormulaL ( second (K_1 ++ K_3))).
symmetry in H20.
srewrite H20.
apply isFormulaL_getU;auto.
apply isFormulaL_Loc.
apply isFormulaL_getU;auto.
SLSolve. }
{ SLSolve.
apply isFormulaL_getL;auto.
symmetry in HCK.
srewrite HCK in isFx0.
SLSolve.
apply isFormulaL_getL;auto.
symmetry in HCK.
srewrite HCK in isFx0.
SLSolve.
apply isFormulaL_getL;auto. }
CleanContext.
CleanContext.
rewrite PlusTgetU...
CleanContext.
rewrite (app_assoc L).
rewrite (app_assoc (L ++ second (getL x1))).
apply Hp. all: try clear Hp.
LLPerm((Loc (getU K_3) ++ PlusT K4_3) ++ (PlusT C4 ++ Loc (getU x1) ++ Loc (getU x2))).
apply weakeningGenN...
eapply HeightGeqCEx.
2:{ rewrite app_assoc_reverse. exact H5. }
CleanContext.
assert(Hp: Permutation (C4 ++ x1 ++ (a, P) :: x2) ((a, P) :: C4 ++ x1 ++ x2)) by perm.
rewrite Hp. simpl. lia.
SLSolve.
apply SetUPlusT;auto.
assert(Hp: Permutation ( Loc (getU x1) ++ Loc (getU x2)) (Loc (getU x0))).
rewrite <- HCK...
LLPerm(PlusT (getU C4) ++ PlusT (getL C4') ++ PlusT K4_3 ++ (Loc (getU x1) ++ Loc (getU x2)) ++ Loc (getU K_3)).
rewrite Hp. clear Hp.
rewrite H4.
rewrite H19...
CleanContext.
LLPerm((Loc K_2 ++ PlusT K4_2) ++ (PlusT K4_1 ++ PlusT K4_3) ++
PlusT (getL C4') ++ (Loc K_1 ++ Loc (getU K_3))).
apply weakeningGenN...
LLExact H15.
rewrite (cxtDestruct C4').
CleanContext.
rewrite H21...
rewrite H20...
CleanContext.
rewrite setULocgetU...
SLSolve.
apply SetUPlusT;auto.
} }
assert(SetU x0).
rewrite H4 in H3. SLSolve.
assert(SetU D).
apply BangUnb in Hj...
rewrite H4 in H3. SLSolve.
eapply @GenK4Rel' with (C4:=C4) (CK:=CK) (CN:= x0)...
rewrite cxtDestruct.
rewrite UB.
rewrite LBD.
rewrite H7.
rewrite <- H6.
rewrite (SetU_then_empty H8).
CleanContext.
symmetry.
rewrite (cxtDestruct C4).
rewrite (cxtDestruct x).
CleanContext.
apply seqNtoSeq in H5... }
Qed.
Theorem UCutDwC a j n w h P F L BD B D:
CutH w h -> CutW w -> S h = n + j -> complexity P = w ->
u a = true -> isFormulaL (second BD) -> isFormulaL L -> isFormula F -> isFormula P -> isFormula (dual P) ->
Permutation (getU BD) (getU B) ->
Permutation (getU BD) (getU D) ->
Permutation (getL BD) (getL B ++ getL D) ->
j |--- D; []; (>> a ! P ^) ->
n |--- B ++ [(a,P)]; L; (>> F) ->
|-- BD; L; (> [F]).
Proof with sauto;solveF.
intros HC WC Hh Hc Hut isFBD isFL isFF isFP isFDP HP1 HP2 HP3 Hj Hn.
assert(isFormulaL (second B)) by SLSolve.
rename H into isFB.
assert(SetU D).
eapply BangUnb in Hj;auto.
rewrite cxtDestruct in Hj.
rewrite (SetU_then_empty H) in *.
CleanContext.
clear H.
rewrite cxtDestruct.
rewrite HP2.
rewrite HP3.
assert(Permutation (getU D) (getU B)).
rewrite <- HP1.
rewrite <- HP2;auto.
clear HP1 HP2 HP3.
rewrite H.
rewrite H in Hj. clear H isFBD.
rewrite <- cxtDestruct.
inversion Hn...
*
store.
decide1 (perp A).
* checkPermutationCases H5.
{
store.
decide1 (perp A).
}
{ inversion H2...
simpl in Hj.
rewrite H3.
rewrite H3 in Hj.
rewrite cxtDestruct.
rewrite (SetU_then_empty H0).
CleanContext.
apply (InvBangT Hut H1 Hj). }
*
store. decide1 one.
*
CleanContext.
CleanContext.
assert(isFormulaL (second B0)).
eapply @isFormulaSecondSplit1 with (D:=D0) (BD:=B) (X:=[(a,P)]) (Y:=[]);sauto.
SLSolve.
rename H into isFB0.
assert(isFormulaL (second D0)).
eapply @isFormulaSecondSplit2 with (D:=D0) (BD:=B) (X:=[(a,P)]) (Y:=[]);sauto.
SLSolve.
rename H into isFD0.
assert(n0 |--- (getU B ++ getL B0) ++ [(a, P)]; M; (>> F0) ->
j |--- getU B; []; (>> a ! P ^) ->
|-- getU B ++ getL B0 ; M; (> [F0])).
eapply HC with (m:=n0 + j) (dualC:=a ! P ^) (C:=a ? P) (L:=[F0])...
SLSolve;SLSolve.
SLSolve.
SLSolve.
assert(n0 |--- (getU B ++ getL D0) ++ [(a, P)]; N; (>> G) ->
j |--- getU B; []; (>> a ! P ^) ->
|-- getU B ++ getL D0 ; N; (> [G])).
eapply HC with (m:=n0 + j) (dualC:=a ! P ^) (C:=a ? P) (L:=[G])...
SLSolve;SLSolve.
SLSolve.
SLSolve.
rewrite H0.
solveLL.
rewrite <- (app_nil_r []).
eapply @InvTensor with (B:=getU B ++ getL B0) (D:=getU B ++ getL D0)...
CleanContext.
apply H;auto.
LLPerm ((getU B ++ [(a, P)]) ++ getL B0 ).
rewrite H1.
rewrite <- cxtDestruct;auto.
apply H5;auto.
LLPerm ((getU B ++ [(a, P)]) ++ getL D0 ).
rewrite H2.
rewrite <- cxtDestruct;auto.
* assert(n0 |--- B ++ [(a, P)]; L; (>> F0) ->
j |--- getU B; []; (>> a ! P ^) ->
|-- B ; L; (> [F0])).
eapply HC with (m:=n0 + j) (dualC:=a ! P ^) (C:=a ? P) (L:=[F0])...
SLSolve.
solveLL.
rewrite <- (app_nil_r []).
apply InvPlus...
* assert(n0 |--- B ++ [(a, P)]; L; (>> G) ->
j |--- getU B; []; (>> a ! P ^) ->
|-- B ; L; (> [G])).
eapply HC with (m:=n0 + j) (dualC:=a ! P ^) (C:=a ? P) (L:=[G])...
SLSolve.
solveLL.
rewrite <- (app_nil_r []).
apply InvPlusComm...
* assert(Hc:
n0 |--- B ++ [(a,P)]; L; (> [F]) ->
j |--- getU B; []; (>> a ! P ^) ->
|-- B; L; (> [F])).
eapply HC with (C:=a ? P) (dualC:=a ! P ^) (m:=n0 + j) (L:=[F])...
apply Hc...
*
assert(Hc:
n0 |--- B ++ [(a,P)]; L; (>> (FX t)) ->
j |--- getU B; []; (>> a ! P ^) ->
|-- B; L; (> [FX t])).
eapply HC with (C:=a ? P) (dualC:=a ! P ^) (m:=n0 + j) (L:=[FX t])...
SLSolve.
solveLL.
rewrite <- (app_nil_l []).
eapply InvEx with (t0:=t)...
*
assert(Hc:
n0 |--- B ++ [(a,P)]; []; (> [F0]) ->
j |--- getU B; []; (>> a ! P ^) ->
|-- B; []; (> [F0])).
eapply HC with (C:=a ? P) (dualC:=a ! P ^) (m:=n0 + j) (L:=[F0])...
1-5:exact nil.
SLSolve.
solveLL. decide1.
* solveLL. decide1.
eapply @CutK4SubCase with (n:=n0) (j:=j) (h:=h) (P:=P) (a:=a) (w:=complexity P) (B:=B) (D:=getU B)...
SLSolve.
Qed.
Theorem LCutDwC a j n w h P F L B D BD:
CutH w h -> CutW w -> S h = n + j -> complexity P = w ->
u a = false ->
isFormulaL (second BD) -> isFormulaL L -> isFormula F -> isFormula P -> isFormula (dual P) ->
Permutation (getU BD) (getU B) ->
Permutation (getU BD) (getU D) ->
Permutation (getL BD) (getL B ++ getL D) ->
j |--- D; []; (>> a ! P ^) ->
n |--- B ++ [(a,P)]; L; (>> F) ->
|-- BD; L; (> [F]).
Proof with sauto;solveF.
intros HC WC Hh Hc Hut isFBD isFL isFF isFP isFDP HP1 HP2 HP3 Hj Hn.
assert(isFormulaL (second B)) by SLSolve.
assert(isFormulaL (second D)) by SLSolve.
rename H into isFB.
rename H0 into isFD.
inversion Hn...
+ assert(SetU [(a, P)]).
SLSolve...
inversion H...
+
checkPermutationCases H5.
{ assert(SetU [(a, P)]).
rewrite <- H3 in H0.
SLSolve...
inversion H... }
{
inversion H2...
inversion Hj... SLSolve.
apply InvSubExpPhase in H7;auto.
destruct H7 as [C4 H7].
destruct H7 as [CK H7].
destruct H7 as [CN H7].
CleanContext.
assert(SetT C4).
{ eapply (SetTK4Closure H1 H2). }
assert(SetT CK).
{ eapply (SetTKClosure H1 H5). }
rewrite cxtDestruct.
rewrite HP2.
rewrite HP3.
CleanContext.
rewrite SetU_then_empty.
2:{ rewrite H3... }
rewrite H.
CleanContext.
LLPerm(getU CN ++(getL CK ++ (getU C4 ++ getL C4)) ++ getU CK).
apply weakeningGen;SLSolve.
apply Loc_Unb';SLSolve...
rewrite cxtDestruct in H9.
SLSolve.
eapply @AbsorptionLinearSet with (C:=getL CK) (B':=(getU C4 ++ getL C4) ++ Loc (getU CK) );SLSolve...
rewrite cxtDestruct in H9.
SLSolve.
rewrite <- cxtDestruct.
rewrite <- (SetTPlusT H7).
CleanContext.
HProof. }
+
assert(SetU [(a, P)]).
SLSolve...
inversion H...
+
CleanContext.
CleanContext.
checkPermutationCases H3.
-
assert(SetL x).
assert(SetL (x ++ getL D0)).
rewrite H5;SLSolve.
SLSolve...
rewrite (cxtDestruct x) in H5.
rewrite (cxtDestruct x) in H3.
rewrite (SetL_then_empty H) in H5...
rewrite (SetL_then_empty H) in H3...
rewrite cxtDestruct.
rewrite HP3.
rewrite <- H5.
rewrite HP2.
destruct (PositiveOrRelease G).
assert(|-- getU D ++ (getL x ++ getL D0) ++ getL D; F0 ** G::M++N; UP ([]++[])).
eapply @InvTensor with (B:=getL x ++ getL D ++ getU D) (D:=getU D++ getL D0)...
CleanContext.
CleanContext...
CleanContext.
assert(n0 |--- (getU D ++ getL x) ++ [(a, P)]; M; (>> F0) ->
j |--- D; []; (>> a ! P ^) ->
|-- getL x ++ getL D ++ getU D ; M; (> [F0])).
{
eapply HC with (m:=n0 + j) (dualC:=a ! P ^) (C:=a ? P) (L:=[F0])...
SLSolve.
assert(isFormulaL (second (getL B))).
SLSolve.
symmetry in H5.
srewrite H5 in H7.
SLSolve.
SLSolve.
SLSolve.
SLSolve.
SLSolve.
}
apply H7...
rewrite <- HP2.
rewrite HP1.
rewrite H1.
rewrite app_assoc_reverse.
rewrite <- H3.
rewrite <- cxtDestruct;auto.
rewrite <- HP2.
rewrite HP1.
rewrite H2.
rewrite <- cxtDestruct;auto.
store. inversion H6...
decide1 G. inversion H6...
apply seqNtoSeq in H8;auto.
store.
rewrite H0.
CleanContext.
assert(|-- getU D ++ (getL x ++ getL D0) ++ getL D; F0 ** G::M++N; UP ([]++[])).
eapply @InvTensor with (B:=getL x ++ getL D ++ getU D) (D:=getU D++ getL D0)...
CleanContext.
CleanContext...
CleanContext.
assert(n0 |--- (getU D ++ getL x) ++ [(a, P)]; M; (>> F0) ->
j |--- D; []; (>> a ! P ^) ->
|-- getL x ++ getL D ++ getU D ; M; (> [F0])).
{
eapply HC with (m:=n0 + j) (dualC:=a ! P ^) (C:=a ? P) (L:=[F0])...
SLSolve.
assert(isFormulaL (second (getL B))).
SLSolve.
symmetry in H5.
srewrite H5 in H7.
SLSolve.
SLSolve.
SLSolve.
SLSolve.
SLSolve.
}
apply H7...
rewrite <- HP2.
rewrite HP1.
rewrite H1.
rewrite app_assoc_reverse.
rewrite <- H3.
rewrite <- cxtDestruct;auto.
rewrite <- HP2.
rewrite HP1.
rewrite H2.
rewrite <- cxtDestruct;auto.
inversion H8;CleanContext...
apply seqNtoSeq in H13;auto.
store.
rewrite H0.
CleanContext.
-
assert(SetL x).
assert(SetL (getL B0 ++ x)).
rewrite H5. apply getLtoSetL.
SLSolve...
rewrite (cxtDestruct x) in H5.
rewrite (cxtDestruct x) in H3.
rewrite (SetL_then_empty H) in H5...
rewrite (SetL_then_empty H) in H3...
rewrite cxtDestruct.
rewrite HP3.
rewrite <- H5.
rewrite HP2.
destruct (PositiveOrRelease F0).
assert(|-- getU D ++ (getL x ++ getL B0) ++ getL D; F0 ** G::M++N; UP ([]++[])).
eapply @InvTensor with (D:=getL x ++ getL D ++ getU D) (B:=getU D++ getL B0)...
CleanContext.
CleanContext...
CleanContext.
rewrite <- HP2.
rewrite HP1.
rewrite H1.
rewrite <- cxtDestruct;auto.
store. inversion H6...
decide1 F0. inversion H6...
apply seqNtoSeq in H4;auto.
assert(n0 |--- (getU D ++ getL x) ++ [(a, P)]; N; (>> G) ->
j |--- D; []; (>> a ! P ^) ->
|-- getL x ++ getL D ++ getU D ; N; (> [G])).
{
eapply HC with (m:=n0 + j) (dualC:=a ! P ^) (C:=a ? P) (L:=[G])...
SLSolve.
assert(isFormulaL (second (getL B))).
SLSolve.
symmetry in H5.
srewrite H5 in H7.
SLSolve.
SLSolve.
SLSolve.
SLSolve.
SLSolve.
}
apply H7...
rewrite <- HP2.
rewrite HP1.
rewrite H2.
rewrite app_assoc_reverse.
rewrite <- H3.
rewrite <- cxtDestruct;auto.
store.
rewrite H0.
CleanContext.
LLExact H7.
assert(|-- getU D ++ (getL x ++ getL B0) ++ getL D; F0 ** G::M++N; UP ([]++[])).
eapply @InvTensor with (D:=getL x ++ getL D ++ getU D) (B:=getU D++ getL B0)...
CleanContext.
CleanContext...
CleanContext.
rewrite <- HP2.
rewrite HP1.
rewrite H1.
rewrite <- cxtDestruct;auto.
inversion H4;CleanContext...
solveLinearLogic.
assert(n0 |--- (getU D ++ getL x) ++ [(a, P)]; N; (>> G) ->
j |--- D; []; (>> a ! P ^) ->
|-- getL x ++ getL D ++ getU D ; N; (> [G])).
{
eapply HC with (m:=n0 + j) (dualC:=a ! P ^) (C:=a ? P) (L:=[G])...
SLSolve.
assert(isFormulaL (second (getL B))).
SLSolve.
symmetry in H5.
srewrite H5 in H7.
SLSolve.
SLSolve.
SLSolve.
SLSolve.
SLSolve.
}
apply H7...
rewrite <- HP2.
rewrite HP1.
rewrite H2.
rewrite app_assoc_reverse.
rewrite <- H3.
rewrite <- cxtDestruct;auto.
store.
rewrite H0.
CleanContext.
LLExact H7.
+
assert(n0 |--- B ++ [(a, P)]; L; (>> F0) ->
j |--- D; []; (>> a ! P ^) ->
|-- BD ; L; (> [F0])).
{
eapply HC with (m:=n0 + j) (dualC:=a ! P ^) (C:=a ? P) (L:=[F0])... SLSolve. }
assert(|-- BD; F0 op G::L; UP ([]++[])).
apply InvPlus...
store...
+
assert(n0 |--- B ++ [(a, P)]; L; (>> G) ->
j |--- D; []; (>> a ! P ^) ->
|-- BD ; L; (> [G])).
{
eapply HC with (m:=n0 + j) (dualC:=a ! P ^) (C:=a ? P) (L:=[G])... SLSolve. }
assert(|-- BD; F0 op G::L; UP ([]++[])).
apply InvPlusComm...
store...
+
assert(Hc:
n0 |--- B ++ [(a,P)]; L; (> [F]) ->
j |--- D; []; (>> a ! P ^) ->
|-- BD; L; (> [F])).
eapply HC with (C:=a ? P) (dualC:=a ! P ^) (L:=[F])...
apply Hc...
+
assert(n0 |--- B ++ [(a, P)]; L; (>> (FX t)) ->
j |--- D; []; (>> a ! P ^) ->
|-- BD ; L; (> [FX t])).
{
eapply HC with (m:=n0 + j) (dualC:=a ! P ^) (C:=a ? P) (L:=[FX t])... SLSolve. }
assert(|-- BD; E{ FX}::L; UP ([]++[])).
eapply InvEx with (t0:=t)...
store.
+
assert(SetU [(a, P)]).
SLSolve...
inversion H...
+ store. decide1.
eapply @CutK4SubCase with (n:=n0) (j:=j) (h:=h) (P:=P) (a:=a) (w:=complexity P) (B:=B) (D:=D)...
SLSolve.
Qed.
Theorem CutDwC a j n w h P F L B D BD:
CutH w h -> CutW w -> S h = n + j -> complexity P = w ->
isFormulaL (second BD) -> isFormulaL L -> isFormula F -> isFormula P -> isFormula (dual P) ->
Permutation (getU BD) (getU B) ->
Permutation (getU BD) (getU D) ->
Permutation (getL BD) (getL B ++ getL D) ->
j |--- D; []; (>> a ! P ^) ->
n |--- B ++ [(a,P)]; L; (>> F) ->
|-- BD; L; (> [F]).
Proof with sauto.
intros.
destruct (uDec a).
- eapply UCutDwC with (w:=w) (h:=h) (n:=n) (j:=j) (a:=a) (P:=P) (D:=D) (B:=B)...
- eapply LCutDwC with (w:=w) (h:=h) (n:=n) (j:=j) (a:=a) (P:=P) (D:=D) (B:=B)...
Qed.
Theorem CutUPC i j w h a A L M BD B D : CutH w h -> CutW w -> complexity A = w -> S h = i + j ->
Permutation (getU BD) (getU B) ->
Permutation (getU BD) (getU D) ->
Permutation (getL BD) (getL B ++ getL D) ->
isFormulaL (second BD) -> isFormulaL M -> isFormulaL L -> isFormula A -> isFormula (dual A) ->
i |--- B++[(a,A)]; M; (> L) ->
j |--- D; []; (>> a ! A ^) ->
|-- BD; M; (> L).
Proof with sauto;solveLL.
intros CH CW compA hH HP1 HP2 HP3 isFBD isFM isFL isFA isFDA Hi Hj.
assert(isFormulaL (second B)) by SLSolve.
assert(isFormulaL (second D)) by SLSolve.
rename H into isFB.
rename H0 into isFD.
inversion Hi...
-- assert( n |--- B ++ [(a,A)]; M; (> M0) ->
j |--- D; []; (>> a ! A ^) -> |-- BD; M; (> M0)) as Cut.
eapply CH with (C:=a ? A ) (dualC:=a ! A ^)... SLSolve.
apply Cut...
-- assert( n |--- B ++ [(a,A)]; M; (> F :: G :: M0) ->
j |--- D; []; (>> a ! A ^) -> |-- BD; M; (> F :: G ::M0)) as Cut.
eapply CH with (C:=a ? A ) (dualC:=a ! A ^)... SLSolve. inversion H1... inversion H1...
SLSolve.
apply Cut...
-- assert( n |--- B ++ [(a,A)]; M; (> F :: M0) ->
j |--- D; []; (>> a ! A ^) -> |-- BD; M; (> F :: M0)) as Cut.
eapply CH with (C:=a ? A ) (dualC:=a ! A ^)...
SLSolve. inversion H2...
SLSolve.
apply Cut...
-- assert( n |--- B ++ [(a,A)]; M; (> G :: M0) ->
j |--- D; []; (>> a ! A ^) -> |-- BD; M; (> G ::M0)) as Cut.
eapply CH with (C:=a ? A ) (dualC:=a ! A ^)...
SLSolve.
inversion H2...
SLSolve.
apply Cut...
-- destruct (uDec i0).
assert( n |--- ((i0, F)::B) ++ [(a,A)]; M; (> M0) ->
j |--- (i0, F)::D; []; (>> a ! A ^) -> |-- (i0, F)::BD; M; (> M0)) as Cut.
eapply CH with (C:=a ? A ) (dualC:=a ! A ^)...
SLSolve.
inversion H1...
SLSolve.
rewrite e.
CleanContext...
rewrite e.
CleanContext...
rewrite e.
CleanContext...
apply Cut...
apply weakeningN...
assert( n |--- ((i0, F)::B) ++ [(a,A)]; M; (> M0) ->
j |--- D ; []; (>> a ! A ^) -> |-- (i0, F)::BD; M; (> M0)) as Cut.
eapply CH with (C:=a ? A ) (dualC:=a ! A ^)...
SLSolve.
inversion H1...
SLSolve.
rewrite e.
CleanContext...
rewrite e.
CleanContext...
rewrite e.
CleanContext...
apply Cut...
-- assert( n |--- B ++ [(a,A)]; F::M; (> M0) ->
j |--- D; []; (>> a ! A ^) -> |-- BD ; F::M; (> M0)) as Cut.
eapply CH with (C:=a ? A ) (dualC:=a ! A ^)...
SLSolve.
inversion isFL...
SLSolve.
apply Cut...
-- destruct (PositiveOrRelease F).
assert( n |--- B ++ [(a,A)]; L'; (>> F) ->
j |--- D; []; (>> a ! A ^) -> |-- BD ; L'; (> [F])) as Cut.
eapply CH with (C:=a ? A ) (dualC:=a ! A ^) (L:=[F])...
apply Remove_Permute in H1...
SLSolve.
apply Remove_Permute in H1...
rewrite H1 in isFM.
inversion isFM...
assert( |-- BD ; L'; (> [F])).
apply Cut...
inversion H2;subst;try solve [inversion H].
apply Remove_Permute in H1...
rewrite H1.
LLExact H9.
inversion H5;CleanContext...
apply Remove_Permute in H1...
rewrite H1.
decide1 F.
assert( n0 |--- B ++ [(a,A)]; L'; (> [F]) ->
j |--- D; []; (>> a ! A ^) -> |-- BD ; L'; (> [F])) as Cut.
eapply CH with (m:=n0+j) (C:=a ? A ) (dualC:=a ! A ^) (L:=[F])...
rewrite H1 in isFM.
inversion isFM...
rewrite H1 in isFM.
inversion isFM...
apply Cut...
-- apply in_app_or in H3...
+ assert( n |--- B ++ [(a,A)]; M; (>> F) ->
j |--- D; []; (>> a ! A ^) -> |-- BD ; M; (> [F])) as Cut.
eapply CH with (C:=a ? A ) (dualC:=a ! A ^) (L:=[F])...
SLSolve.
eapply @AbsorptionClassic with (i:=i0) (F:=F)...
rewrite cxtDestruct.
rewrite HP1.
apply in_or_app. left.
apply uIngetU...
+ inversion H...
assert(SetU D).
apply BangUnb in Hj...
assert( Permutation BD B).
eapply (simplUnb _ B HP2 HP1 _ H).
rewrite H3 in *. clear H3 HP1 HP3.
assert( n |--- B ++ [(i0,F)]; M; (>> F) ->
j |--- D; []; (>> i0 ! F ^) -> |-- B ; M; (> [F])) as Cut.
eapply CH with (C:=i0 ? F ) (dualC:=i0 ! F ^) (L:=[F])...
assert(Hs: |-- B; M; (> [F]))...
clear Cut.
apply seqtoSeqN in Hs.
destruct Hs as [x Hs].
apply InvBangT in Hj...
apply seqtoSeqN in Hj.
destruct Hj as [y Hj].
destruct(PositiveOrRelease F).
assert(release (F ^)).
apply PositiveDualRelease...
assert( x |--- B; M ; (> [F]) ->
S y |--- D; []; (>> F ^) ->
|-- B; M++[] ; > [ ]) as Cut.
eapply CW with (m:=complexity F)...
CleanContext.
assert( y |--- D; [] ; (> [F^]) ->
S x |--- B; M; (>> F) ->
|-- B; []++M ; > [ ]) as Cut.
eapply CW with (m:=complexity F)...
rewrite <- ng_involutive...
rewrite DualComplexity.
rewrite <- ng_involutive...
CleanContext.
-- apply Remove_Permute in H3.
checkPermutationCases H3... 3:{ exact nil. }
-
rewrite cxtDestruct.
rewrite HP1.
rewrite HP3.
rewrite H3.
CleanContext.
assert(Hs:|--getU x ++ getL x ++ getL D ; M; (> [F])).
eapply CutDwC with (P:=A) (j:=j) (a:=a) (n:= S n) (h:=h) (w:=complexity A) (B:=x) (D:=D)...
CleanContext. SLSolve.
apply isFormulaL_getU.
srewrite H3 in isFB.
SLSolve.
apply isFormulaL_getL.
srewrite H3 in isFB.
SLSolve.
SLSolve.
srewrite H3 in isFB.
SLSolve.
inversion isFB...
CleanContext.
rewrite <- HP2.
rewrite HP1.
rewrite H3.
CleanContext.
CleanContext.
rewrite H4...
eapply @HeightGeq with (n:=n)...
LLPerm ((i0, F) :: (getU x ++ getL x ++ getL D)).
eapply @AbsorptionLinear with (i:=i0) (F:=F)...
-
inversion H3...
rewrite <- H4 in H7.
clear H4.
inversion Hj...
inversion H3.
solveSubExp.
apply InvSubExpPhase in H8;auto.
destruct H8 as [C4 H8].
destruct H8 as [CK H8].
destruct H8 as [CN H8]...
assert(SetT C4).
{ eapply (SetTK4Closure H1 H3). }
assert(SetT CK).
{ eapply (SetTKClosure H1 H5). }
rewrite cxtDestruct.
rewrite HP2.
rewrite HP3.
rewrite H.
CleanContext.
eapply @AbsorptionLinearSet with (C:=getL CK) (B':=(getU C4 ++ getU CK ++ getU CN) ++
getL B ++ getL C4)...
rewrite cxtDestruct in H10.
SLSolve...
LLPerm ((getU CN ++ getU C4 ++ getL C4 ++ getL B )++ getU CK).
apply ContractionL'...
apply getUtoSetU.
rewrite cxtDestruct in H10.
SLSolve...
assert(
n0 - length (C4 ++ CK) - 1 |--- getU CN ++ getU CK ++ getU C4 ++ getL C4 ++ Loc (getU CK); [] ; (> (dual A ::second (getL CK))) ->
n |--- Loc (getU CK) ++ getU CN ++ getU C4 ++ getL B ++ getU CK; M; (>> A) ->
|-- (getU CN ++ getU C4 ++ getL C4 ++ getL B) ++ getU CK ++ Loc (getU CK); []++M ; > second (getL CK)) as DCCut.
eapply CW with (m:=complexity A)...
CleanContext.
SLSolve;SLSolve.
srewrite H in isFD.
apply isFormulaL_getU.
SLSolve.
srewrite H in isFD.
apply isFormulaL_getU.
SLSolve.
srewrite H in isFD.
apply isFormulaL_getL.
SLSolve.
srewrite H in isFD.
apply isFormulaL_getU.
SLSolve.
srewrite H in isFD.
apply isFormulaL_Loc.
apply isFormulaL_getU.
SLSolve.
srewrite H in isFD.
apply isFormulaL_getL.
SLSolve.
rewrite <- ng_involutive...
rewrite DualComplexity .
rewrite <- ng_involutive...
CleanContext.
CleanContext.
CleanContext.
apply DCCut...
apply weakeningGenN.
apply weakeningGenN.
eapply exchangeCCN.
2:{ exact H11. }
rewrite (cxtDestruct C4).
CleanContext.
SLSolve.
SLSolve.
apply weakeningGenN.
eapply exchangeCCN.
2:{ exact H7. }
rewrite (cxtDestruct B).
CleanContext.
rewrite <- HP1.
rewrite HP2.
rewrite H.
CleanContext.
SLSolve.
-- assert(Hs:|-- BD; M; (> [F])).
eapply CutDwC with (P:=A) (B:=B) (D:=D) (BD:=BD) (j:=j) (a:=a) (n:=S n) (h:=h) (w:=complexity A)...
eapply @HeightGeq with (n:=n)...
destruct (NegativeAtomDec F).
2:{ eapply @AbsorptionTheory with (F:=F)... }
inversion H...
eapply @AbsorptionPerp' with (A:=A0)...
-- assert( n |--- B ++ [(a,A)]; M; (> FX x :: M0) ->
j |--- D; []; (>> a ! A ^) -> |-- BD ; M; (> FX x :: M0)) as Cut.
eapply CH with (C:=a ? A ) (dualC:=a ! A ^)...
SLSolve.
inversion H2...
SLSolve.
apply Cut...
-- createWorld i0.
eapply @CutK4SubCase with (n:=n) (j:=j) (h:=h) (P:=A) (a:=a) (w:=complexity A) (B:=B) (D:=D)...
intro... SLSolve.
Unshelve .
rewrite HP3...
Qed.
Theorem CutElimination i j a C dualC A dualA B D BD L L1 L2 L3 S1 S2 M N P:
isFormulaL (second BD) ->
isFormulaL M ->
isFormulaL N ->
isFormulaL L ->
isFormula C ->
isFormula dualC ->
dualC = dual C ->
Permutation (getU BD) (getU B) ->
Permutation (getU BD) (getU D) ->
Permutation (getL BD) (getL B ++ getL D) ->
(L = L1++L2 -> i |--- B; M ++ [C]; (> L1) -> j |--- D; N; (> dualC::L2) -> |-- BD; M ++ N; (> L)) /\
(i |--- B; M; (> C :: L) -> j |--- D; N; (>> dualC) -> |-- BD; M ++ N; (> L)) /\
(L = (S1++S2)++L3 -> i |--- B; M; (> S1++[C]++S2) -> j |--- D; N; (> dual C::L3) -> |-- BD; M ++ N; (> L) ) /\
(dualA = A ^ ->
dualC = a ! dualA -> L = [P] ->
i |--- B ++ [(a,A)] ; M; (>> P) -> j |--- D; []; (>> a ! dualA) -> |-- BD; M; (> [P])) /\
(dualA = A ^ ->
dualC = a ! dualA ->
i |--- B ++ [(a,A)] ; M; (> L) -> j |--- D; []; (>> a ! dualA) -> |-- BD; M; (> L)).
Proof with sauto;solveF;solveLL.
assert(exists w, complexity C = w).
eexists; auto.
destruct H as [w H].
revert H.
revert a i j C dualC A dualA P BD B D L L1 L2 L3 S1 S2 M N.
induction w using strongind;intros.
- assert(complexity C > 0) by apply Complexity0.
rewrite H in H10. inversion H10.
- remember (plus i j) as h.
revert dependent BD.
revert dependent B.
revert dependent D.
revert dependent L.
revert dependent L1.
revert dependent L2.
revert dependent L3.
revert dependent S1.
revert dependent S2.
revert dependent M.
revert dependent N.
revert dependent P.
revert dependent dualA.
revert A.
revert dependent C.
revert dependent dualC.
revert dependent i.
revert a j.
dependent induction h using strongind; intros.
+
symmetry in Heqh.
apply plus_is_O in Heqh.
destruct Heqh;subst.
eapply CutElimBase...
+ rename H into CutW.
rename H0 into CutH.
rename H1 into compC.
move BD at top.
move B at top.
move D at top.
move M at top.
move N at top.
move L at top.
move L2 at top.
move L1 at top.
move S1 at top.
move S2 at top.
move C at top.
move A at top.
move dualC at top.
move dualA at top.
move P at top.
subst.
split;[intros
|split;[intros
|split;[intros
|split;intros]]].
* eapply (@CutUPLStar i j w h C L L1 L2 M N BD B D)...
unfold CutElimination.CutH; intros.
eapply CutH with (m:=m)...
* eapply (@CutUP i j w h C L M N BD B D)...
unfold CutElimination.CutH; intros.
eapply CutH with (m:=m)...
unfold CutElimination.CutW; intros.
eapply CutW with (m:=m)...
* eapply (@CutUPStar i j w h C L S1 S2 L3 M N BD B D)...
unfold CutElimination.CutH; intros.
eapply CutH with (m:=m)...
* subst. eapply (@CutDwC a j i w h A P M B D BD)...
unfold CutElimination.CutH; intros.
eapply CutH with (m:=m)...
unfold CutElimination.CutW; intros.
eapply CutW with (m:=m)...
rewrite DualComplexity in compC...
rewrite H0 in compC...
inversion compC...
apply DualComplexity.
all: clear CutH CutW.
inversion H4...
assert(C = a ? A).
rewrite (ng_involutive C).
rewrite H0;simpl...
rewrite <- ng_involutive...
rewrite H in H5.
inversion H5...
rewrite H0 in H6.
inversion H6...
*
assert(C = a ? A).
rewrite (ng_involutive C).
rewrite H0;simpl...
rewrite <- ng_involutive...
eapply (@CutUPC i j w h a A L M BD B D)...
unfold CutElimination.CutH; intros.
eapply CutH with (m:=m)...
unfold CutElimination.CutW; intros.
eapply CutW with (m:=m)...
rewrite DualComplexity in compC...
rewrite H0 in compC...
inversion compC...
all: clear CutH CutW.
inversion H5...
inversion H6...
Qed.
Theorem GeneralCut i j C BD B D L M N:
isFormulaL (second BD) ->
isFormulaL M ->
isFormulaL N ->
isFormulaL L ->
isFormula C ->
isFormula (C ^) ->
Permutation (getU BD) (getU B) ->
Permutation (getU BD) (getU D) ->
Permutation (getL BD) (getL B ++ getL D) ->
(i |--- B; M ; UP (C::L) ->
j |--- D; N ; DW (dual C) ->
|-- BD; M++N ; UP L).
Proof with subst;auto.
intros.
assert(exists w, complexity C = w).
eexists; auto.
destruct H10 as [w H10].
specialize CutElimination;intros.
assert((i |--- B; M ; UP (C::L) ->
j |--- D; N ; DW (dual C) ->
|-- BD; M++N ; UP L)) as CUT.
eapply H11;eauto.
clear H11.
apply CUT;auto.
Qed.
Theorem GeneralCutClassic i j a A BD B D L M:
isFormulaL (second BD) ->
isFormulaL M ->
isFormulaL L ->
isFormula A ->
isFormula (A ^) ->
Permutation (getU BD) (getU B) ->
Permutation (getU BD) (getU D) ->
Permutation (getL BD) (getL B ++ getL D) ->
(i |--- B ++ [(a,A)] ; M; (> L) -> j |--- D; []; (>> a ! A^) -> |-- BD; M; (> L)).
Proof with subst;auto.
intros.
assert(exists w, complexity A = w).
eexists; auto.
destruct H9 as [w H9].
specialize CutElimination;intros.
assert((i |--- B ++ [(a,A)]; M ; UP L ->
j |--- D; [] ; DW (a ! A^) ->
|-- BD; M ; UP L)) as CUT.
eapply H10 with (C:=a ? A) (dualC:=(a ? A) ^);eauto.
clear H5.
simpl. constructor;auto.
apply CUT;auto.
Qed.
Theorem GeneralCutClassic' a A BD B D L M:
isFormulaL (second BD) ->
isFormulaL M ->
isFormulaL L ->
isFormula A ->
isFormula (A ^) ->
Permutation (getU BD) (getU B) ->
Permutation (getU BD) (getU D) ->
Permutation (getL BD) (getL B ++ getL D) ->
(|-- B ++ [(a,A)] ; M; (> L) -> |-- D; []; (>> a ! A^) -> |-- BD; M; (> L)).
Proof with subst;auto.
intros.
apply seqtoSeqN in H7.
apply seqtoSeqN in H8.
CleanContext.
eapply GeneralCutClassic with (B:= B) (D:=D) (a:=a) (A:=A);eauto.
Qed.
Theorem GeneralCut' C BD B D L M N:
isFormulaL (second BD) ->
isFormulaL M ->
isFormulaL N ->
isFormulaL L ->
isFormula C ->
isFormula (C ^) ->
Permutation (getU BD) (getU B) ->
Permutation (getU BD) (getU D) ->
Permutation (getL BD) (getL B ++ getL D) ->
(|-- B; M ; UP (C::L) ->
|-- D; N ; DW (dual C) ->
|-- BD; M++N ; UP L).
Proof with subst;auto.
intros.
apply seqtoSeqN in H8.
apply seqtoSeqN in H9.
CleanContext.
eapply GeneralCut with (C:= C) (B:=B);eauto.
Qed.
End CutElimination.
|
If $f$ converges to $l$ in a metric space, then for every $\epsilon > 0$, there exists an $x$ such that $|f(x) - l| < \epsilon$. |
Sightsavers is an international non-governmental organization, whose aim is to prevent eliminate avoidable blindness and promote the rights of people with disabilities, in some of the poorest parts of the world.
People who signed up to receive Sightsavers’ emails would be added onto their main active segment. The amount and subject of these emails would vary greatly throughout the year, meaning that each person’s initial experience of Sightsavers’ emails could be very different depending on when they signed up. It was also possible that new subscribers could be dropped into a series of emails midway through.
Sightsavers wanted to provide their subscribers with a consistent, engaging and educational welcome program, introducing a balanced view of the different types of work they do and countries they work in. The overall objective of the campaign was to provide new subscribers with a better and more enjoyable email experience – and in turn increase long-term engagement and donations.
The solution was to develop a two-month automated welcome journey, using a combination of new and existing content which had performed well with the main active segment. Sightsavers also utilized behavioral and data filters to tailor how they spoke to their supporters and what they asked of them. After the initial two months, these people were automatically added to the active segment.
Many of the CTAs are engagement-focused, however they have achieved an overall 3% conversion rate once supporters hit the website.
To further personalize the user experience, subscribers are asked their name twice – once in the welcome email and then in a follow-up later in the journey. 27% of those who opened the welcome email went on to provide their name, as did 20% of those who opened the follow-up. This constitutes a 17.8% name collection rate across all sign-ups. The campaign hasn’t yet been running long enough for any longer-term analysis but Sightsavers will be keeping a keen eye on it going forward, optimizing and updating it with fresh content. |
[STATEMENT]
lemma (in cf_parallel_2) cf_parallel_axioms'[cat_parallel_cs_intros]:
assumes "\<alpha>' = \<alpha>"
and "a = \<aa>"
and "b = \<bb>"
and "g = \<gg>"
and "f = \<ff>"
and "a' = \<aa>'"
and "b' = \<bb>'"
and "g' = \<gg>'"
and "f' = \<ff>'"
shows "cf_parallel_2 \<alpha>' a b g f a' b' g' f' \<CC>"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. cf_parallel_2 \<alpha>' a b g f a' b' g' f' \<CC>
[PROOF STEP]
unfolding assms
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. cf_parallel_2 \<alpha> \<aa> \<bb> \<gg> \<ff> \<aa>' \<bb>' \<gg>' \<ff>' \<CC>
[PROOF STEP]
by (rule cf_parallel_2_axioms) |
theory "Traceable-Objects"
imports Main
begin
section \<open>A few properties of foldl\<close>
lemma l1:"foldl (o) a (rev (x#xs)) = (foldl (o) a (rev xs)) o x"
by simp
lemma l2:"foldl (o) a (x#xs) = a o (foldl (o) x xs)"
proof (induct "rev xs" arbitrary:a x xs)
case Nil
then show ?case
by auto
next
case (Cons a xa)
then show ?case
by (metis (no_types, lifting) append_Cons comp_assoc l1 rev.simps(2) rev_swap)
qed
lemma l3:"foldl (o) x xs = x o (foldl (o) id xs)"
by (metis comp_id foldl_Cons l2)
text \<open>TODO: this could be generalized to multiplicative commutative monoids
(functions with the composition operation are a multiplicative commutative monoid).\<close>
section \<open>Data types and traceability\<close>
locale data_type =
fixes f :: "'b \<Rightarrow> 'a \<Rightarrow> 'a" \<comment> \<open>the transition function of the data type\<close>
and init :: "'a"
begin
definition exec \<comment> \<open>a state transformer corresponding to applying all operations in order\<close>
where "exec ops \<equiv> foldl (o) id (map f ops)" \<comment> \<open>@{term "(o)"} is function composition \<close>
lemma exec_Cons:"exec (x#xs) = (f x) o (exec xs)"
using exec_def l3 by force
lemma exec_Nil:"exec [] = id"
by (simp add: exec_def)
definition is_traceable \<comment> \<open>A state has a unique history from the initial state\<close>
where "is_traceable \<equiv> \<forall> ops\<^sub>1 ops\<^sub>2 . exec ops\<^sub>1 init = exec ops\<^sub>2 init \<longrightarrow> ops\<^sub>1 = ops\<^sub>2"
end
locale traceable_data_type = data_type f init for f init +
assumes traceable:"is_traceable"
begin
text \<open>Prove some facts about traceable datatypes...\<close>
end
section \<open>Append-only lists are traceable\<close>
interpretation list_data_type: data_type "(#)" "[]" .
lemma l4:"list_data_type.exec xs [] = xs"
proof (induct xs)
case Nil
then show ?case
by (simp add: list_data_type.exec_Nil)
next
case (Cons a xs)
then show ?case
by (simp add: data_type.exec_Cons)
qed
interpretation list_traceable:traceable_data_type "(#)" "[]"
using l4 list_data_type.is_traceable_def traceable_data_type_def by fastforce
text \<open>Now the facts proved about traceable datatypes are available for append-only lists.\<close>
end |
module Test
import Salo.Language.Lexer
import Salo.Language.Parser
main : IO ()
main = do Salo.Language.Lexer.Test.test
Salo.Language.Parser.Test.test
|
# This is a temporary module to validate `AbstractImageFilter` idea
# proposed in https://github.com/JuliaImages/ImagesAPI.jl/pull/3
module ThresholdAPI
using ImageBase
using MappedArrays
"""
AbstractImageAlgorithm
The root of image algorithms type system
"""
abstract type AbstractImageAlgorithm end
"""
AbstractImageFilter <: AbstractImageAlgorithm
Filters are image algorithms whose input and output are both images
"""
abstract type AbstractImageFilter <: AbstractImageAlgorithm end
include("find_threshold.jl")
include("build_histogram.jl")
export build_histogram
end # module ThresholdAPI |
[STATEMENT]
lemma mapping_comparator': "comparator (ccomp :: 'b comparator)"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. comparator ccomp
[PROOF STEP]
using ID_ccompare_neq_None'
[PROOF STATE]
proof (prove)
using this:
ID ccompare \<noteq> None
goal (1 subgoal):
1. comparator ccomp
[PROOF STEP]
by(clarsimp)(rule ID_ccompare') |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.