text
stringlengths 0
3.34M
|
---|
#!/usr/bin/env python
# -*- encoding: utf-8 -*-
from __future__ import absolute_import, division, print_function, unicode_literals
from tests import pyunit_utils
import h2o
from h2o.frame import H2OFrame
import numpy
def test_sw_602_endpoints_equality():
data = [numpy.arange(0, 50000).tolist() for x in numpy.arange(0, 99).tolist()]
fr = h2o.H2OFrame(data)
full = H2OFrame.get_frame(fr.frame_id)
light = H2OFrame.get_frame(fr.frame_id, light=True)
assert full._ex._cache._id == light._ex._cache._id
assert full._ex._cache._nrows == light._ex._cache._nrows
assert full._ex._cache._ncols == light._ex._cache._ncols
assert full._ex._cache._names == light._ex._cache._names
assert full._ex._cache._data == light._ex._cache._data
assert full._ex._cache._l == light._ex._cache._l
__TESTS__ = [test_sw_602_endpoints_equality]
if __name__ == "__main__":
for func in __TESTS__:
pyunit_utils.standalone_test(func)
else:
for func in __TESTS__:
func()
|
section\<open>The Axiom of Pairing in $M[G]$\<close>
theory Pairing_Axiom imports Names begin
context forcing_data
begin
lemma val_Upair :
"one \<in> G \<Longrightarrow> val(G,{\<langle>\<tau>,one\<rangle>,\<langle>\<rho>,one\<rangle>}) = {val(G,\<tau>),val(G,\<rho>)}"
by (insert one_in_P, rule trans, subst def_val,auto simp add: Sep_and_Replace)
lemma pairing_in_MG :
assumes "M_generic(G)"
shows "upair_ax(##M[G])"
proof -
{
fix x y
have "one\<in>G" using assms one_in_G by simp
from assms
have "G\<subseteq>P" unfolding M_generic_def and filter_def by simp
with \<open>one\<in>G\<close>
have "one\<in>P" using subsetD by simp
then
have "one\<in>M" using transitivity[OF _ P_in_M] by simp
assume "x \<in> M[G]" "y \<in> M[G]"
then
obtain \<tau> \<rho> where
0 : "val(G,\<tau>) = x" "val(G,\<rho>) = y" "\<rho> \<in> M" "\<tau> \<in> M"
using GenExtD by blast
with \<open>one\<in>M\<close>
have "\<langle>\<tau>,one\<rangle> \<in> M" "\<langle>\<rho>,one\<rangle>\<in>M" using pair_in_M_iff by auto
then
have 1: "{\<langle>\<tau>,one\<rangle>,\<langle>\<rho>,one\<rangle>} \<in> M" (is "?\<sigma> \<in> _") using upair_in_M_iff by simp
then
have "val(G,?\<sigma>) \<in> M[G]" using GenExtI by simp
with 1
have "{val(G,\<tau>),val(G,\<rho>)} \<in> M[G]" using val_Upair assms one_in_G by simp
with 0
have "{x,y} \<in> M[G]" by simp
}
then show ?thesis unfolding upair_ax_def upair_def by auto
qed
end (* context forcing_data *)
end |
From fae_gtlc_mu.refinements.gradual_static Require Export logical_relation.
From fae_gtlc_mu.backtranslation Require Export alternative_consistency.
From fae_gtlc_mu.backtranslation Require Export cast_help.general_def.
From fae_gtlc_mu.stlc_mu Require Export lang.
(* This file defines what needs to be proven for the compatibility lemma for casts. *)
Section defs.
Context `{!implG Σ,!specG Σ}.
(* Defines relatedness for a list of static values with respect A, a list of pairs of gradual types. *)
Definition rel_cast_functions A (fs : list stlc_mu.lang.val) : iProp Σ :=
⌜length A = length fs⌝ ∗
[∗ list] a ; f ∈ A ; fs , (
□ (∀ (v : cast_calculus.lang.val) (v' : stlc_mu.lang.val) ,
⟦ a.1 ⟧ (v , v') → ⟦ a.2 ⟧ₑ (Cast (v) a.1 a.2, (stlc_mu.lang.of_val f v'))
)
)%I.
Global Instance rel_cast_functions_persistent A fs :
Persistent (rel_cast_functions A fs).
Proof.
apply bi.sep_persistent; first by apply bi.pure_persistent.
apply big_sepL2_persistent. intros _ (τi , τf) f. simpl.
apply bi.intuitionistically_persistent.
Qed.
(** The (to-be-proven) statement that the -- closed up -- back-translated casts behave appropriately;
it's a slightly adjusted version of the compatibility lemma for casts such that the proof is more ergonomic. *)
Definition back_cast_ar {A} {τi τf} (pC : alternative_consistency A τi τf) :=
∀ ei' K' v v' fs,
( rel_cast_functions A fs ∧
⟦ τi ⟧ (v, v') ∧
initially_inv ei' ∧
currently_half (fill K' (𝓕c pC fs (stlc_mu.lang.of_val v')))
)
⊢ (WP
Cast v τi τf ?{{ w, ∃ w', currently_half (fill K' (stlc_mu.lang.of_val w')) ∧ ⟦ τf ⟧ (w, w') }})%I.
End defs.
|
Formal statement is: lemma LIMSEQ_abs_realpow_zero: "\<bar>c\<bar> < 1 \<Longrightarrow> (\<lambda>n. \<bar>c\<bar> ^ n :: real) \<longlonglongrightarrow> 0" Informal statement is: If $|c| < 1$, then $\lim_{n \to \infty} |c|^n = 0$. |
module Language.Ruby.Parser.Builder where
import Data.Ratio (Rational)
import Data.Complex
import Data.List (nub)
import Language.Ruby.AST (Term (..))
import Language.Ruby.Parser.Lexer (Token (..))
lvasgn name = Lvasgn name . Just
ivasgn name = Ivasgn name . Just
cvasgn name = Cvasgn name . Just
gvasgn name = Gvasgn name . Just
casgn name parent = Casgn name parent . Just
type StaticEnv = [String]
mkLogicalOp :: (Term -> Term -> Term) -> Term -> Term -> Term
mkLogicalOp = id
mkExpression :: [Term] -> Term
mkExpression [] = Nil
mkExpression [e] = e
mkExpression xs = Begin xs
mk_multiassign :: Term -> Term -> Term
mk_multiassign = Masgn
mk_postexe :: Term -> Term
mk_postexe = Postexe
mk_accessible :: Term -> StaticEnv -> Term
mk_accessible (Lvar name) env | elem name env = Send Nil name []
mk_accessible term _ = term
mk_alias :: Term -> Term -> Term
mk_alias = Alias
mk_arg :: Token -> Term
mk_arg = Arg . value
mk_args :: [Term] -> Term
mk_args args
| has_duplicate_args args = error "duplicate argument"
| otherwise = Args args
has_duplicate_args :: [Term] -> Bool
has_duplicate_args args = has_non_ignored_duplicates . concatMap arg_names $ args
where has_non_ignored_duplicates = has_duplicates . filter (not . starts_with_underscore)
starts_with_underscore ('_' : _) = True
starts_with_underscore _ = False
has_duplicates a_list = nub a_list == a_list
arg_names (Mlhs terms) = map arg_name terms
arg_names arg = [arg_name arg]
mk_array :: [Term] -> Term
mk_array = RArray
mk_assign :: Term -> Term -> Term
mk_assign = mk_op_assign -- FIXME maybe mk_op_assign should be removed. Maybe also the corresponding grammar rules should be dropped
mk_assignable :: Term -> Term
mk_assignable (Lvar i) = Lvasgn i Nothing
mk_assignable (Ivar i) = Ivasgn i Nothing
mk_assignable (Cvar i) = Cvasgn i Nothing
mk_assignable (Gvar i) = Gvasgn i Nothing
mk_assignable (Const parent i) = Casgn parent i Nothing
mk_associate :: [Term] -> Term
mk_associate = Hash
mk_attr_asgn :: Term -> Token -> Token -> Term
mk_attr_asgn receiver t_dot selector_t = call_type_for_dot t_dot receiver (value selector_t ++ "=") []
mk_back_ref, mk_nth_ref :: Token -> Term
mk_back_ref (TBACK_REF s) = BackRef s
mk_nth_ref (TNTH_REF i) = NthRef i
mk_begin :: Term -> Term
mk_begin Nil = Begin []
mk_begin term@(Mlhs _) = term
--mk_begin term@(Begin _) | (body.loc.begin.nil? && body.loc.end.nil?) = term
mk_begin term = Begin [term]
mk_begin_body' :: Term -> [Term] -> Term
mk_begin_body' compoundStmt [] = compoundStmt
mk_begin_body' compoundStmt rescue_bodies = Rescue compoundStmt $ rescue_bodies ++ [Nil]
mk_begin_body :: Term -> [Term] -> Term -> Term -> Term
mk_begin_body Nil [] els ensure = wrap_in_ensure [] els ensure
mk_begin_body (Begin terms) [] els ensure = wrap_in_ensure terms els ensure
mk_begin_body compoundStmt [] els ensure = wrap_in_ensure [compoundStmt] els ensure
mk_begin_body compoundStmt rescue_bodies els ensure = Ensure (Rescue compoundStmt $ rescue_bodies ++ [els]) ensure
wrap_in_ensure terms els ensure = Ensure (Begin $ terms ++ [Begin [els]]) ensure
mk_begin_keyword :: Term -> Term
mk_begin_keyword Nil = KWBegin []
mk_begin_keyword (Begin ts) = KWBegin ts
mk_begin_keyword t = KWBegin [t]
mk_binary_op :: Term -> String -> Term -> Term
mk_binary_op receiver "&&" arg = And receiver arg
mk_binary_op receiver "||" arg = Or receiver arg
mk_binary_op receiver op arg = Send receiver op [arg]
mk_block = error "mk_block"
mk_block_pass :: Term -> Term
mk_block_pass = BlockPass
mk_blockarg :: Token -> Term
mk_blockarg = BlockArg . value
mk_call_lambda :: Term
mk_call_lambda = Lambda
mk_call_method :: Term -> Token -> Token -> [Term] -> Term
mk_call_method receiver t_dot selector args = call_type_for_dot t_dot receiver (mk_selector selector) args
mk_case :: Term -> [Term] -> Term
mk_case expr bodies = Case $ expr : bodies
mk_character :: Token -> Term
mk_character (TCHARACTER c) = Str [c]
mk_complex = error "mk_complex"
mk_condition :: Term -> Term -> Term -> Term
mk_condition = If . check_condition
check_condition :: Term -> Term
check_condition (Begin [term]) = Begin [check_condition term]
check_condition (And lhs rhs) = And (check_condition lhs) (check_condition rhs)
check_condition (Or lhs rhs) = Or (check_condition lhs) (check_condition rhs)
--check_condition (Regexp) =
check_condition condition = condition
mk_condition_mod :: Term -> Term -> Term -> Term
mk_condition_mod ifTrue ifFalse cond = If (check_condition cond) ifTrue ifFalse
mk_const_fetch :: Term -> Token -> Term
mk_const_fetch first (TCONSTANT second) = Const first second
mk_const_global :: Token -> Term
mk_const_global = Const Cbase . value
mk_const_op_assignable :: Term -> Term
mk_const_op_assignable (Const parent i) = Casgn parent i Nothing
mk_cvar, mk_gvar, mk_ivar, mk_const, mk_ident :: Token -> Term
mk_cvar (TCVAR i) = Cvar i
mk_gvar (TGVAR i) = Gvar i
mk_ivar (TIVAR i) = Ivar i
mk_const (TCONSTANT i) = Const Nil i
mk_ident (TIDENTIFIER i) = Lvar i
mk_def_class = error "mk_def_class"
mk_def_module = error "mk_def_module"
mk_def_sclass = error "mk_def_sclass"
mk_def_singleton :: Term -> Token -> args -> body -> Term
mk_def_singleton singleton fname args body = Defs singleton (value fname) (Args []) Nil
mk_def_method :: Token -> args -> body -> Term
mk_def_method fname args body = Def (value fname) (Args []) Nil
mk_float :: Token -> Term
mk_float (TFLOAT f) = RFloat f
mk_for :: Term -> Term -> Term -> Term
mk_for = For
mk_index :: Term -> [Term] -> Term
mk_index = Index
mk_index_asgn :: Term -> [Term] -> Term
mk_index_asgn = IndexAsgn
mk_integer :: Token -> Term
mk_integer (TINTEGER i) = RInt i
mk_keyword_cmd :: ([Term] -> Term) -> [Term] -> Term
mk_keyword_cmd f args = f args -- TODO check for yield with block
mk_kwarg :: Token -> Term
mk_kwarg = KWArg . value
mk_kwoptarg :: Token -> Term -> Term
mk_kwoptarg token term = KWOptArg (value token) term
mk_kwrestarg :: Maybe Token -> Term
mk_kwrestarg = KWRestArg . fmap value
mk_kwsplat :: Term -> Term
mk_kwsplat = KWSplat
mk_loop :: (Term -> Term -> Term) -> Term -> Term -> Term
mk_loop = id
mk_loop_mod :: Term -> Token -> Term -> Term
mk_loop_mod body@(KWBegin _) KWHILE cond = WhilePost (check_condition cond) body
mk_loop_mod body@(KWBegin _) KUNTIL cond = UntilPost (check_condition cond) body
mk_loop_mod body KWHILE cond = While (check_condition cond) body
mk_loop_mod body KUNTIL cond = Until (check_condition cond) body
mk_match_op = error "mk_match_op"
mk_multi_lhs :: [Term] -> Term
mk_multi_lhs = Mlhs
mk_not_op :: Term -> Term
mk_not_op Nil = Send (Begin []) "!" []
mk_not_op expr = Send (check_condition expr) "!" []
mk_op_assign :: Term -> Term -> Term
mk_op_assign (Lvasgn i Nothing) val = lvasgn i val
mk_op_assign (Ivasgn i Nothing) val = ivasgn i val
mk_op_assign (Cvasgn i Nothing) val = cvasgn i val
mk_op_assign (Gvasgn i Nothing) val = gvasgn i val
mk_op_assign (Casgn i parent Nothing) val = casgn i parent val
mk_op_assign t1 t2 = error ("mk_op_assign" ++ show t1 ++ " " ++ show t2)
mk_optarg :: Token -> Term -> Term
mk_optarg token val = OptArg (value token) val
mk_pair :: Term -> Term -> Term
mk_pair = Pair
mk_pair_keyword :: Token -> Term -> Term
mk_pair_keyword token = Pair (Sym $ value token)
mk_pair_quoted = error "mk_pair_quoted"
mk_preexe :: Term -> Term
mk_preexe = Preexe
mk_range_exclusive :: Term -> Term -> Term
mk_range_exclusive = ERange
mk_range_inclusive :: Term -> Term -> Term
mk_range_inclusive = IRange
mk_rational = error "mk_rational"
mk_regexp_compose = error "mk_regexp_compose"
mk_regexp_options = error "mk_regexp_options"
mk_rescue_body :: Term -> Term -> Term -> Term
mk_rescue_body = Resbody
mk_restarg :: Maybe Token -> Term
mk_restarg = RestArg . fmap value
mk_shadowarg :: Token -> Term
mk_shadowarg = ShadowArg . value
mk_splat :: Term -> Term
mk_splat Nil = Splat Nothing
mk_splat term = Splat . Just $ term
mk_string_compose :: [Term] -> Term
mk_string_compose [t] = t
mk_string_compose ts = Dstr ts
mk_string, mk_string_internal :: Token -> Term
mk_string = mk_string_internal
mk_string_internal (TSTRING s) = Str s
mk_symbol_compose :: [Term] -> Term
mk_symbol_compose = error "mk_symbol_compose"
mk_symbol, mk_symbol_internal :: Token -> Term
mk_symbol = mk_symbol_internal
mk_symbol_internal (TSYMBOL s) = Sym s
mk_symbol_internal (TIDENTIFIER s) = Sym s
mk_symbols_compose = error "mk_symbols_compose"
mk_ternary :: Term -> Term -> Term -> Term
mk_ternary = mk_condition
mk_unary_num = error "mk_unary_num"
mk_unary_op :: String -> Term -> Term
mk_unary_op op receiver = Send receiver op []
mk_undef_method :: [Term] -> Term
mk_undef_method = Undef
mk_when :: [Term] -> Term -> Term
mk_when cases body = When $ cases ++ [body]
mk_word = error "mk_word"
mk_words_compose = error "mk_words_compose"
mk_xstring_compose = error "mk_xstring_compose"
-- mk_block' (begin_t, args, body, end_t) = mk_block (mk_call_method $1 $2 $3 $4) begin_t Args body end_t
mk_block' = error "mk_block"
--- private
call_type_for_dot :: Token -> (Term -> String -> [Term] -> Term)
call_type_for_dot TANDDOT = Csend
call_type_for_dot _ = Send
value :: Token -> String
value (TIDENTIFIER i) = i
value (TCONSTANT i) = i
value other = error (show other)
mk_selector :: Token -> String
mk_selector KNIL = "call"
mk_selector s = value s
arg_name :: Term -> String
arg_name (Arg name) = name
arg_name (OptArg name _) = name
arg_name (RestArg (Just name)) = name
arg_name (RestArg Nothing) = "*"
arg_name (BlockArg name) = name
arg_name (KWArg name) = name
arg_name (KWOptArg name _) = name
arg_name (KWRestArg (Just name)) = name
arg_name (KWRestArg Nothing) = "*"
arg_name (ShadowArg name) = name
|
module TestDoubles
put : Double -> IO ()
put = putStrLn . show
main : IO ()
main = do
put $ exp 1
put $ log 1
put $ sin 1
put $ cos 1
put $ tan 1
put $ asin 1
put $ acos 1
put $ atan 1
put $ sqrt 2
put $ floor 1.5
put $ ceiling 1.5
|
data Elem : a -> List a -> Type where
Here : Elem x (x :: xs)
There : (later : Elem x xs) -> Elem x (y :: xs)
data TypeWithFunction : Type where
ABC : (ty_a:Type) -> (ty_b:Type) -> (ty_a -> ty_b) -> TypeWithFunction
isInList : Elem (ABC Bool String (\c => if c then "Foo" else "Bar"))
[(ABC Bool String (\c => if c then "Foo" else "Bar"))]
isInList = Here
isInListBad : Elem (ABC Bool String (\c => if c then "Foo" else "Bar"))
[(ABC Bool String (\c => if c then "Foo" else "Baz"))]
isInListBad = Here
|
include("run_mms.jl")
let
N0 = 48
αs = (1, 4, 16, 64, 128)
@show cfls = 2.0 .^ (0:-1:-5)
for sbp_order in (2, 4, 6)
for characteristic_method in (true, false)
for α in αs
@show (sbp_order, characteristic_method, α)
for cfl in cfls
ϵ = main(
sbp_order,
1,
N0;
characteristic_method = characteristic_method,
cfl = cfl,
friction = (V) -> α * asinh(V),
tspan = (0.0, 0.1),
do_output = false,
)[1]
@show (cfl, ϵ)
end
println()
end
end
end
end
|
State Before: G : Type u_1
inst✝ : Group G
H K : Subgroup G
S T : Set G
f : G ⧸ H → G
hf : ∀ (q : G ⧸ H), ↑(f q) = q
q : G ⧸ H
⊢ ↑(↑(toEquiv (_ : (Set.range fun q => f q) ∈ leftTransversals ↑H)) q) = f q State After: G : Type u_1
inst✝ : Group G
H K : Subgroup G
S T : Set G
f : G ⧸ H → G
hf : ∀ (q : G ⧸ H), ↑(f q) = q
q : G ⧸ H
⊢ ↑(toEquiv (_ : (Set.range fun q => f q) ∈ leftTransversals ↑H)) q =
{ val := f q, property := (_ : ∃ y, (fun q => f q) y = f q) } Tactic: refine' (Subtype.ext_iff.mp _).trans (Subtype.coe_mk (f q) ⟨q, rfl⟩) State Before: G : Type u_1
inst✝ : Group G
H K : Subgroup G
S T : Set G
f : G ⧸ H → G
hf : ∀ (q : G ⧸ H), ↑(f q) = q
q : G ⧸ H
⊢ ↑(toEquiv (_ : (Set.range fun q => f q) ∈ leftTransversals ↑H)) q =
{ val := f q, property := (_ : ∃ y, (fun q => f q) y = f q) } State After: no goals Tactic: exact (toEquiv (range_mem_leftTransversals hf)).apply_eq_iff_eq_symm_apply.mpr (hf q).symm |
/-
Copyright (c) 2019 Sébastien Gouëzel. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Jan-David Salchow, Sébastien Gouëzel, Jean Lo, Yury Kudryashov, Frédéric Dupuis,
Heather Macbeth
! This file was ported from Lean 3 source module topology.algebra.module.basic
! leanprover-community/mathlib commit f430769b562e0cedef59ee1ed968d67e0e0c86ba
! Please do not edit these lines, except to modify the commit id
! if you have ported upstream changes.
-/
import Mathbin.Topology.Algebra.Ring.Basic
import Mathbin.Topology.Algebra.MulAction
import Mathbin.Topology.Algebra.UniformGroup
import Mathbin.Topology.ContinuousFunction.Basic
import Mathbin.Topology.UniformSpace.UniformEmbedding
import Mathbin.Algebra.Algebra.Basic
import Mathbin.LinearAlgebra.Projection
import Mathbin.LinearAlgebra.Pi
/-!
# Theory of topological modules and continuous linear maps.
We use the class `has_continuous_smul` for topological (semi) modules and topological vector spaces.
In this file we define continuous (semi-)linear maps, as semilinear maps between topological
modules which are continuous. The set of continuous semilinear maps between the topological
`R₁`-module `M` and `R₂`-module `M₂` with respect to the `ring_hom` `σ` is denoted by `M →SL[σ] M₂`.
Plain linear maps are denoted by `M →L[R] M₂` and star-linear maps by `M →L⋆[R] M₂`.
The corresponding notation for equivalences is `M ≃SL[σ] M₂`, `M ≃L[R] M₂` and `M ≃L⋆[R] M₂`.
-/
open Filter
open LinearMap (ker range)
open Topology BigOperators Filter
universe u v w u'
section
variable {R : Type _} {M : Type _} [Ring R] [TopologicalSpace R] [TopologicalSpace M]
[AddCommGroup M] [Module R M]
theorem ContinuousSMul.of_nhds_zero [TopologicalRing R] [TopologicalAddGroup M]
(hmul : Tendsto (fun p : R × M => p.1 • p.2) (𝓝 0 ×ᶠ 𝓝 0) (𝓝 0))
(hmulleft : ∀ m : M, Tendsto (fun a : R => a • m) (𝓝 0) (𝓝 0))
(hmulright : ∀ a : R, Tendsto (fun m : M => a • m) (𝓝 0) (𝓝 0)) : ContinuousSMul R M :=
⟨by
rw [continuous_iff_continuousAt]
rintro ⟨a₀, m₀⟩
have key :
∀ p : R × M,
p.1 • p.2 = a₀ • m₀ + ((p.1 - a₀) • m₀ + a₀ • (p.2 - m₀) + (p.1 - a₀) • (p.2 - m₀)) :=
by
rintro ⟨a, m⟩
simp [sub_smul, smul_sub]
abel
rw [funext key]
clear key
refine' tendsto_const_nhds.add (tendsto.add (tendsto.add _ _) _)
· rw [sub_self, zero_smul]
apply (hmulleft m₀).comp
rw [show (fun p : R × M => p.1 - a₀) = (fun a => a - a₀) ∘ Prod.fst
by
ext
rfl,
nhds_prod_eq]
have : tendsto (fun a => a - a₀) (𝓝 a₀) (𝓝 0) :=
by
rw [← sub_self a₀]
exact tendsto_id.sub tendsto_const_nhds
exact this.comp tendsto_fst
· rw [sub_self, smul_zero]
apply (hmulright a₀).comp
rw [show (fun p : R × M => p.2 - m₀) = (fun m => m - m₀) ∘ Prod.snd
by
ext
rfl,
nhds_prod_eq]
have : tendsto (fun m => m - m₀) (𝓝 m₀) (𝓝 0) :=
by
rw [← sub_self m₀]
exact tendsto_id.sub tendsto_const_nhds
exact this.comp tendsto_snd
· rw [sub_self, zero_smul, nhds_prod_eq,
show
(fun p : R × M => (p.fst - a₀) • (p.snd - m₀)) =
(fun p : R × M => p.1 • p.2) ∘ Prod.map (fun a => a - a₀) fun m => m - m₀
by
ext
rfl]
apply hmul.comp (tendsto.prod_map _ _) <;>
· rw [← sub_self]
exact tendsto_id.sub tendsto_const_nhds⟩
#align has_continuous_smul.of_nhds_zero ContinuousSMul.of_nhds_zero
end
section
variable {R : Type _} {M : Type _} [Ring R] [TopologicalSpace R] [TopologicalSpace M]
[AddCommGroup M] [ContinuousAdd M] [Module R M] [ContinuousSMul R M]
/-- If `M` is a topological module over `R` and `0` is a limit of invertible elements of `R`, then
`⊤` is the only submodule of `M` with a nonempty interior.
This is the case, e.g., if `R` is a nontrivially normed field. -/
theorem Submodule.eq_top_of_nonempty_interior' [NeBot (𝓝[{ x : R | IsUnit x }] 0)]
(s : Submodule R M) (hs : (interior (s : Set M)).Nonempty) : s = ⊤ :=
by
rcases hs with ⟨y, hy⟩
refine' Submodule.eq_top_iff'.2 fun x => _
rw [mem_interior_iff_mem_nhds] at hy
have : tendsto (fun c : R => y + c • x) (𝓝[{ x : R | IsUnit x }] 0) (𝓝 (y + (0 : R) • x)) :=
tendsto_const_nhds.add ((tendsto_nhdsWithin_of_tendsto_nhds tendsto_id).smul tendsto_const_nhds)
rw [zero_smul, add_zero] at this
obtain ⟨_, hu : y + _ • _ ∈ s, u, rfl⟩ :=
nonempty_of_mem (inter_mem (mem_map.1 (this hy)) self_mem_nhdsWithin)
have hy' : y ∈ ↑s := mem_of_mem_nhds hy
rwa [s.add_mem_iff_right hy', ← Units.smul_def, s.smul_mem_iff' u] at hu
#align submodule.eq_top_of_nonempty_interior' Submodule.eq_top_of_nonempty_interior'
variable (R M)
/-- Let `R` be a topological ring such that zero is not an isolated point (e.g., a nontrivially
normed field, see `normed_field.punctured_nhds_ne_bot`). Let `M` be a nontrivial module over `R`
such that `c • x = 0` implies `c = 0 ∨ x = 0`. Then `M` has no isolated points. We formulate this
using `ne_bot (𝓝[≠] x)`.
This lemma is not an instance because Lean would need to find `[has_continuous_smul ?m_1 M]` with
unknown `?m_1`. We register this as an instance for `R = ℝ` in `real.punctured_nhds_module_ne_bot`.
One can also use `haveI := module.punctured_nhds_ne_bot R M` in a proof.
-/
theorem Module.punctured_nhds_neBot [Nontrivial M] [NeBot (𝓝[≠] (0 : R))] [NoZeroSMulDivisors R M]
(x : M) : NeBot (𝓝[≠] x) :=
by
rcases exists_ne (0 : M) with ⟨y, hy⟩
suffices : tendsto (fun c : R => x + c • y) (𝓝[≠] 0) (𝓝[≠] x); exact this.ne_bot
refine' tendsto.inf _ (tendsto_principal_principal.2 <| _)
· convert tendsto_const_nhds.add ((@tendsto_id R _).smul_const y)
rw [zero_smul, add_zero]
· intro c hc
simpa [hy] using hc
#align module.punctured_nhds_ne_bot Module.punctured_nhds_neBot
end
section LatticeOps
variable {ι R M₁ M₂ : Type _} [Semiring R] [AddCommMonoid M₁] [AddCommMonoid M₂] [Module R M₁]
[Module R M₂] [u : TopologicalSpace R] {t : TopologicalSpace M₂} [ContinuousSMul R M₂]
(f : M₁ →ₗ[R] M₂)
theorem continuousSMul_induced : @ContinuousSMul R M₁ _ u (t.induced f) :=
{
continuous_smul := by
letI : TopologicalSpace M₁ := t.induced f
refine' continuous_induced_rng.2 _
simp_rw [Function.comp, f.map_smul]
refine' continuous_fst.smul (continuous_induced_dom.comp continuous_snd) }
#align has_continuous_smul_induced continuousSMul_induced
end LatticeOps
namespace Submodule
variable {α β : Type _} [TopologicalSpace β]
instance [TopologicalSpace α] [Semiring α] [AddCommMonoid β] [Module α β] [ContinuousSMul α β]
(S : Submodule α β) : ContinuousSMul α S
where continuous_smul :=
by
rw [embedding_subtype_coe.to_inducing.continuous_iff]
exact continuous_fst.smul (continuous_subtype_coe.comp continuous_snd)
instance [Ring α] [AddCommGroup β] [Module α β] [TopologicalAddGroup β] (S : Submodule α β) :
TopologicalAddGroup S :=
S.toAddSubgroup.TopologicalAddGroup
end Submodule
section closure
variable {R : Type u} {M : Type v} [Semiring R] [TopologicalSpace R] [TopologicalSpace M]
[AddCommMonoid M] [Module R M] [ContinuousSMul R M]
/- ./././Mathport/Syntax/Translate/Expr.lean:177:8: unsupported: ambiguous notation -/
/- ./././Mathport/Syntax/Translate/Expr.lean:177:8: unsupported: ambiguous notation -/
/- ./././Mathport/Syntax/Translate/Expr.lean:177:8: unsupported: ambiguous notation -/
/- ./././Mathport/Syntax/Translate/Expr.lean:177:8: unsupported: ambiguous notation -/
theorem Submodule.closure_smul_self_subset (s : Submodule R M) :
(fun p : R × M => p.1 • p.2) '' Set.univ ×ˢ closure s ⊆ closure s :=
calc
(fun p : R × M => p.1 • p.2) '' Set.univ ×ˢ closure s =
(fun p : R × M => p.1 • p.2) '' closure (Set.univ ×ˢ s) :=
by simp [closure_prod_eq]
_ ⊆ closure ((fun p : R × M => p.1 • p.2) '' Set.univ ×ˢ s) :=
(image_closure_subset_closure_image continuous_smul)
_ = closure s := by
congr
ext x
refine' ⟨_, fun hx => ⟨⟨1, x⟩, ⟨Set.mem_univ _, hx⟩, one_smul R _⟩⟩
rintro ⟨⟨c, y⟩, ⟨hc, hy⟩, rfl⟩
simp [s.smul_mem c hy]
#align submodule.closure_smul_self_subset Submodule.closure_smul_self_subset
/- ./././Mathport/Syntax/Translate/Expr.lean:177:8: unsupported: ambiguous notation -/
theorem Submodule.closure_smul_self_eq (s : Submodule R M) :
(fun p : R × M => p.1 • p.2) '' Set.univ ×ˢ closure s = closure s :=
s.closure_smul_self_subset.antisymm fun x hx => ⟨⟨1, x⟩, ⟨Set.mem_univ _, hx⟩, one_smul R _⟩
#align submodule.closure_smul_self_eq Submodule.closure_smul_self_eq
variable [ContinuousAdd M]
/-- The (topological-space) closure of a submodule of a topological `R`-module `M` is itself
a submodule. -/
def Submodule.topologicalClosure (s : Submodule R M) : Submodule R M :=
{
s.toAddSubmonoid.topologicalClosure with
carrier := closure (s : Set M)
smul_mem' := fun c x hx => s.closure_smul_self_subset ⟨⟨c, x⟩, ⟨Set.mem_univ _, hx⟩, rfl⟩ }
#align submodule.topological_closure Submodule.topologicalClosure
@[simp]
theorem Submodule.topologicalClosure_coe (s : Submodule R M) :
(s.topologicalClosure : Set M) = closure (s : Set M) :=
rfl
#align submodule.topological_closure_coe Submodule.topologicalClosure_coe
theorem Submodule.le_topologicalClosure (s : Submodule R M) : s ≤ s.topologicalClosure :=
subset_closure
#align submodule.le_topological_closure Submodule.le_topologicalClosure
theorem Submodule.isClosed_topologicalClosure (s : Submodule R M) :
IsClosed (s.topologicalClosure : Set M) := by convert isClosed_closure
#align submodule.is_closed_topological_closure Submodule.isClosed_topologicalClosure
theorem Submodule.topologicalClosure_minimal (s : Submodule R M) {t : Submodule R M} (h : s ≤ t)
(ht : IsClosed (t : Set M)) : s.topologicalClosure ≤ t :=
closure_minimal h ht
#align submodule.topological_closure_minimal Submodule.topologicalClosure_minimal
theorem Submodule.topologicalClosure_mono {s : Submodule R M} {t : Submodule R M} (h : s ≤ t) :
s.topologicalClosure ≤ t.topologicalClosure :=
s.topologicalClosure_minimal (h.trans t.le_topologicalClosure) t.isClosed_topologicalClosure
#align submodule.topological_closure_mono Submodule.topologicalClosure_mono
/-- The topological closure of a closed submodule `s` is equal to `s`. -/
theorem IsClosed.submodule_topologicalClosure_eq {s : Submodule R M} (hs : IsClosed (s : Set M)) :
s.topologicalClosure = s :=
le_antisymm (s.topologicalClosure_minimal rfl.le hs) s.le_topologicalClosure
#align is_closed.submodule_topological_closure_eq IsClosed.submodule_topologicalClosure_eq
/-- A subspace is dense iff its topological closure is the entire space. -/
theorem Submodule.dense_iff_topologicalClosure_eq_top {s : Submodule R M} :
Dense (s : Set M) ↔ s.topologicalClosure = ⊤ :=
by
rw [← SetLike.coe_set_eq, dense_iff_closure_eq]
simp
#align submodule.dense_iff_topological_closure_eq_top Submodule.dense_iff_topologicalClosure_eq_top
instance {M' : Type _} [AddCommMonoid M'] [Module R M'] [UniformSpace M'] [ContinuousAdd M']
[ContinuousSMul R M'] [CompleteSpace M'] (U : Submodule R M') :
CompleteSpace U.topologicalClosure :=
isClosed_closure.completeSpace_coe
/-- A maximal proper subspace of a topological module (i.e a `submodule` satisfying `is_coatom`)
is either closed or dense. -/
theorem Submodule.isClosed_or_dense_of_isCoatom (s : Submodule R M) (hs : IsCoatom s) :
IsClosed (s : Set M) ∨ Dense (s : Set M) :=
(hs.le_iff.mp s.le_topologicalClosure).symm.imp (isClosed_of_closure_subset ∘ Eq.le)
Submodule.dense_iff_topologicalClosure_eq_top.mpr
#align submodule.is_closed_or_dense_of_is_coatom Submodule.isClosed_or_dense_of_isCoatom
end closure
section Pi
theorem LinearMap.continuous_on_pi {ι : Type _} {R : Type _} {M : Type _} [Finite ι] [Semiring R]
[TopologicalSpace R] [AddCommMonoid M] [Module R M] [TopologicalSpace M] [ContinuousAdd M]
[ContinuousSMul R M] (f : (ι → R) →ₗ[R] M) : Continuous f :=
by
cases nonempty_fintype ι
classical
-- for the proof, write `f` in the standard basis, and use that each coordinate is a continuous
-- function.
have : (f : (ι → R) → M) = fun x => ∑ i : ι, x i • f fun j => if i = j then 1 else 0 :=
by
ext x
exact f.pi_apply_eq_sum_univ x
rw [this]
refine' continuous_finset_sum _ fun i hi => _
exact (continuous_apply i).smul continuous_const
#align linear_map.continuous_on_pi LinearMap.continuous_on_pi
end Pi
/-- Continuous linear maps between modules. We only put the type classes that are necessary for the
definition, although in applications `M` and `M₂` will be topological modules over the topological
ring `R`. -/
structure ContinuousLinearMap {R : Type _} {S : Type _} [Semiring R] [Semiring S] (σ : R →+* S)
(M : Type _) [TopologicalSpace M] [AddCommMonoid M] (M₂ : Type _) [TopologicalSpace M₂]
[AddCommMonoid M₂] [Module R M] [Module S M₂] extends M →ₛₗ[σ] M₂ where
cont : Continuous to_fun := by continuity
#align continuous_linear_map ContinuousLinearMap
-- mathport name: «expr →SL[ ] »
notation:25 M " →SL[" σ "] " M₂ => ContinuousLinearMap σ M M₂
-- mathport name: «expr →L[ ] »
notation:25 M " →L[" R "] " M₂ => ContinuousLinearMap (RingHom.id R) M M₂
-- mathport name: «expr →L⋆[ ] »
notation:25 M " →L⋆[" R "] " M₂ => ContinuousLinearMap (starRingEnd R) M M₂
/-- `continuous_semilinear_map_class F σ M M₂` asserts `F` is a type of bundled continuous
`σ`-semilinear maps `M → M₂`. See also `continuous_linear_map_class F R M M₂` for the case where
`σ` is the identity map on `R`. A map `f` between an `R`-module and an `S`-module over a ring
homomorphism `σ : R →+* S` is semilinear if it satisfies the two properties `f (x + y) = f x + f y`
and `f (c • x) = (σ c) • f x`. -/
class ContinuousSemilinearMapClass (F : Type _) {R S : outParam (Type _)} [Semiring R] [Semiring S]
(σ : outParam <| R →+* S) (M : outParam (Type _)) [TopologicalSpace M] [AddCommMonoid M]
(M₂ : outParam (Type _)) [TopologicalSpace M₂] [AddCommMonoid M₂] [Module R M]
[Module S M₂] extends SemilinearMapClass F σ M M₂, ContinuousMapClass F M M₂
#align continuous_semilinear_map_class ContinuousSemilinearMapClass
-- `σ`, `R` and `S` become metavariables, but they are all outparams so it's OK
attribute [nolint dangerous_instance] ContinuousSemilinearMapClass.toContinuousMapClass
/-- `continuous_linear_map_class F R M M₂` asserts `F` is a type of bundled continuous
`R`-linear maps `M → M₂`. This is an abbreviation for
`continuous_semilinear_map_class F (ring_hom.id R) M M₂`. -/
abbrev ContinuousLinearMapClass (F : Type _) (R : outParam (Type _)) [Semiring R]
(M : outParam (Type _)) [TopologicalSpace M] [AddCommMonoid M] (M₂ : outParam (Type _))
[TopologicalSpace M₂] [AddCommMonoid M₂] [Module R M] [Module R M₂] :=
ContinuousSemilinearMapClass F (RingHom.id R) M M₂
#align continuous_linear_map_class ContinuousLinearMapClass
/-- Continuous linear equivalences between modules. We only put the type classes that are necessary
for the definition, although in applications `M` and `M₂` will be topological modules over the
topological semiring `R`. -/
@[nolint has_nonempty_instance]
structure ContinuousLinearEquiv {R : Type _} {S : Type _} [Semiring R] [Semiring S] (σ : R →+* S)
{σ' : S →+* R} [RingHomInvPair σ σ'] [RingHomInvPair σ' σ] (M : Type _) [TopologicalSpace M]
[AddCommMonoid M] (M₂ : Type _) [TopologicalSpace M₂] [AddCommMonoid M₂] [Module R M]
[Module S M₂] extends M ≃ₛₗ[σ] M₂ where
continuous_toFun : Continuous to_fun := by continuity
continuous_invFun : Continuous inv_fun := by continuity
#align continuous_linear_equiv ContinuousLinearEquiv
-- mathport name: «expr ≃SL[ ] »
notation:50 M " ≃SL[" σ "] " M₂ => ContinuousLinearEquiv σ M M₂
-- mathport name: «expr ≃L[ ] »
notation:50 M " ≃L[" R "] " M₂ => ContinuousLinearEquiv (RingHom.id R) M M₂
-- mathport name: «expr ≃L⋆[ ] »
notation:50 M " ≃L⋆[" R "] " M₂ => ContinuousLinearEquiv (starRingEnd R) M M₂
/-- `continuous_semilinear_equiv_class F σ M M₂` asserts `F` is a type of bundled continuous
`σ`-semilinear equivs `M → M₂`. See also `continuous_linear_equiv_class F R M M₂` for the case
where `σ` is the identity map on `R`. A map `f` between an `R`-module and an `S`-module over a ring
homomorphism `σ : R →+* S` is semilinear if it satisfies the two properties `f (x + y) = f x + f y`
and `f (c • x) = (σ c) • f x`. -/
class ContinuousSemilinearEquivClass (F : Type _) {R : outParam (Type _)} {S : outParam (Type _)}
[Semiring R] [Semiring S] (σ : outParam <| R →+* S) {σ' : outParam <| S →+* R}
[RingHomInvPair σ σ'] [RingHomInvPair σ' σ] (M : outParam (Type _)) [TopologicalSpace M]
[AddCommMonoid M] (M₂ : outParam (Type _)) [TopologicalSpace M₂] [AddCommMonoid M₂] [Module R M]
[Module S M₂] extends SemilinearEquivClass F σ M M₂ where
map_continuous : ∀ f : F, Continuous f := by continuity
inv_continuous : ∀ f : F, Continuous (inv f) := by continuity
#align continuous_semilinear_equiv_class ContinuousSemilinearEquivClass
/-- `continuous_linear_equiv_class F σ M M₂` asserts `F` is a type of bundled continuous
`R`-linear equivs `M → M₂`. This is an abbreviation for
`continuous_semilinear_equiv_class F (ring_hom.id) M M₂`. -/
abbrev ContinuousLinearEquivClass (F : Type _) (R : outParam (Type _)) [Semiring R]
(M : outParam (Type _)) [TopologicalSpace M] [AddCommMonoid M] (M₂ : outParam (Type _))
[TopologicalSpace M₂] [AddCommMonoid M₂] [Module R M] [Module R M₂] :=
ContinuousSemilinearEquivClass F (RingHom.id R) M M₂
#align continuous_linear_equiv_class ContinuousLinearEquivClass
namespace ContinuousSemilinearEquivClass
variable (F : Type _) {R : Type _} {S : Type _} [Semiring R] [Semiring S] (σ : R →+* S)
{σ' : S →+* R} [RingHomInvPair σ σ'] [RingHomInvPair σ' σ] (M : Type _) [TopologicalSpace M]
[AddCommMonoid M] (M₂ : Type _) [TopologicalSpace M₂] [AddCommMonoid M₂] [Module R M]
[Module S M₂]
include σ'
-- `σ'` becomes a metavariable, but it's OK since it's an outparam
@[nolint dangerous_instance]
instance (priority := 100) [s : ContinuousSemilinearEquivClass F σ M M₂] :
ContinuousSemilinearMapClass F σ M M₂ :=
{ s with
coe := (coe : F → M → M₂)
coe_injective' := @FunLike.coe_injective F _ _ _ }
omit σ'
end ContinuousSemilinearEquivClass
section PointwiseLimits
variable {M₁ M₂ α R S : Type _} [TopologicalSpace M₂] [T2Space M₂] [Semiring R] [Semiring S]
[AddCommMonoid M₁] [AddCommMonoid M₂] [Module R M₁] [Module S M₂] [ContinuousConstSMul S M₂]
section
variable (M₁ M₂) (σ : R →+* S)
theorem isClosed_setOf_map_smul : IsClosed { f : M₁ → M₂ | ∀ c x, f (c • x) = σ c • f x } :=
by
simp only [Set.setOf_forall]
exact
isClosed_interᵢ fun c =>
isClosed_interᵢ fun x => isClosed_eq (continuous_apply _) ((continuous_apply _).const_smul _)
#align is_closed_set_of_map_smul isClosed_setOf_map_smul
end
variable [ContinuousAdd M₂] {σ : R →+* S} {l : Filter α}
/-- Constructs a bundled linear map from a function and a proof that this function belongs to the
closure of the set of linear maps. -/
@[simps (config := { fullyApplied := false })]
def linearMapOfMemClosureRangeCoe (f : M₁ → M₂)
(hf : f ∈ closure (Set.range (coeFn : (M₁ →ₛₗ[σ] M₂) → M₁ → M₂))) : M₁ →ₛₗ[σ] M₂ :=
{ addMonoidHomOfMemClosureRangeCoe f hf with
toFun := f
map_smul' :=
(isClosed_setOf_map_smul M₁ M₂ σ).closure_subset_iff.2
(Set.range_subset_iff.2 LinearMap.map_smulₛₗ) hf }
#align linear_map_of_mem_closure_range_coe linearMapOfMemClosureRangeCoe
/-- Construct a bundled linear map from a pointwise limit of linear maps -/
@[simps (config := { fullyApplied := false })]
def linearMapOfTendsto (f : M₁ → M₂) (g : α → M₁ →ₛₗ[σ] M₂) [l.ne_bot]
(h : Tendsto (fun a x => g a x) l (𝓝 f)) : M₁ →ₛₗ[σ] M₂ :=
linearMapOfMemClosureRangeCoe f <|
mem_closure_of_tendsto h <| eventually_of_forall fun a => Set.mem_range_self _
#align linear_map_of_tendsto linearMapOfTendsto
variable (M₁ M₂ σ)
theorem LinearMap.isClosed_range_coe : IsClosed (Set.range (coeFn : (M₁ →ₛₗ[σ] M₂) → M₁ → M₂)) :=
isClosed_of_closure_subset fun f hf => ⟨linearMapOfMemClosureRangeCoe f hf, rfl⟩
#align linear_map.is_closed_range_coe LinearMap.isClosed_range_coe
end PointwiseLimits
namespace ContinuousLinearMap
section Semiring
/-!
### Properties that hold for non-necessarily commutative semirings.
-/
variable {R₁ : Type _} {R₂ : Type _} {R₃ : Type _} [Semiring R₁] [Semiring R₂] [Semiring R₃]
{σ₁₂ : R₁ →+* R₂} {σ₂₃ : R₂ →+* R₃} {σ₁₃ : R₁ →+* R₃} {M₁ : Type _} [TopologicalSpace M₁]
[AddCommMonoid M₁] {M'₁ : Type _} [TopologicalSpace M'₁] [AddCommMonoid M'₁] {M₂ : Type _}
[TopologicalSpace M₂] [AddCommMonoid M₂] {M₃ : Type _} [TopologicalSpace M₃] [AddCommMonoid M₃]
{M₄ : Type _} [TopologicalSpace M₄] [AddCommMonoid M₄] [Module R₁ M₁] [Module R₁ M'₁]
[Module R₂ M₂] [Module R₃ M₃]
/-- Coerce continuous linear maps to linear maps. -/
instance : Coe (M₁ →SL[σ₁₂] M₂) (M₁ →ₛₗ[σ₁₂] M₂) :=
⟨toLinearMap⟩
-- make the coercion the preferred form
@[simp]
theorem toLinearMap_eq_coe (f : M₁ →SL[σ₁₂] M₂) : f.toLinearMap = f :=
rfl
#align continuous_linear_map.to_linear_map_eq_coe ContinuousLinearMap.toLinearMap_eq_coe
theorem coe_injective : Function.Injective (coe : (M₁ →SL[σ₁₂] M₂) → M₁ →ₛₗ[σ₁₂] M₂) :=
by
intro f g H
cases f
cases g
congr
#align continuous_linear_map.coe_injective ContinuousLinearMap.coe_injective
instance : ContinuousSemilinearMapClass (M₁ →SL[σ₁₂] M₂) σ₁₂ M₁ M₂
where
coe f := f.toFun
coe_injective' f g h := coe_injective (FunLike.coe_injective h)
map_add f := map_add f.toLinearMap
map_continuous f := f.2
map_smulₛₗ f := f.toLinearMap.map_smul'
-- see Note [function coercion]
/-- Coerce continuous linear maps to functions. -/
instance toFun : CoeFun (M₁ →SL[σ₁₂] M₂) fun _ => M₁ → M₂ :=
⟨fun f => f.toFun⟩
#align continuous_linear_map.to_fun ContinuousLinearMap.toFun
@[simp]
theorem coe_mk (f : M₁ →ₛₗ[σ₁₂] M₂) (h) : (mk f h : M₁ →ₛₗ[σ₁₂] M₂) = f :=
rfl
#align continuous_linear_map.coe_mk ContinuousLinearMap.coe_mk
@[simp]
theorem coe_mk' (f : M₁ →ₛₗ[σ₁₂] M₂) (h) : (mk f h : M₁ → M₂) = f :=
rfl
#align continuous_linear_map.coe_mk' ContinuousLinearMap.coe_mk'
@[continuity]
protected theorem continuous (f : M₁ →SL[σ₁₂] M₂) : Continuous f :=
f.2
#align continuous_linear_map.continuous ContinuousLinearMap.continuous
protected theorem uniformContinuous {E₁ E₂ : Type _} [UniformSpace E₁] [UniformSpace E₂]
[AddCommGroup E₁] [AddCommGroup E₂] [Module R₁ E₁] [Module R₂ E₂] [UniformAddGroup E₁]
[UniformAddGroup E₂] (f : E₁ →SL[σ₁₂] E₂) : UniformContinuous f :=
uniformContinuous_addMonoidHom_of_continuous f.Continuous
#align continuous_linear_map.uniform_continuous ContinuousLinearMap.uniformContinuous
@[simp, norm_cast]
theorem coe_inj {f g : M₁ →SL[σ₁₂] M₂} : (f : M₁ →ₛₗ[σ₁₂] M₂) = g ↔ f = g :=
coe_injective.eq_iff
#align continuous_linear_map.coe_inj ContinuousLinearMap.coe_inj
theorem coeFn_injective : @Function.Injective (M₁ →SL[σ₁₂] M₂) (M₁ → M₂) coeFn :=
FunLike.coe_injective
#align continuous_linear_map.coe_fn_injective ContinuousLinearMap.coeFn_injective
/-- See Note [custom simps projection]. We need to specify this projection explicitly in this case,
because it is a composition of multiple projections. -/
def Simps.apply (h : M₁ →SL[σ₁₂] M₂) : M₁ → M₂ :=
h
#align continuous_linear_map.simps.apply ContinuousLinearMap.Simps.apply
/-- See Note [custom simps projection]. -/
def Simps.coe (h : M₁ →SL[σ₁₂] M₂) : M₁ →ₛₗ[σ₁₂] M₂ :=
h
#align continuous_linear_map.simps.coe ContinuousLinearMap.Simps.coe
initialize_simps_projections ContinuousLinearMap (to_linear_map_to_fun → apply, toLinearMap → coe)
@[ext]
theorem ext {f g : M₁ →SL[σ₁₂] M₂} (h : ∀ x, f x = g x) : f = g :=
FunLike.ext f g h
#align continuous_linear_map.ext ContinuousLinearMap.ext
theorem ext_iff {f g : M₁ →SL[σ₁₂] M₂} : f = g ↔ ∀ x, f x = g x :=
FunLike.ext_iff
#align continuous_linear_map.ext_iff ContinuousLinearMap.ext_iff
/-- Copy of a `continuous_linear_map` with a new `to_fun` equal to the old one. Useful to fix
definitional equalities. -/
protected def copy (f : M₁ →SL[σ₁₂] M₂) (f' : M₁ → M₂) (h : f' = ⇑f) : M₁ →SL[σ₁₂] M₂
where
toLinearMap := f.toLinearMap.copy f' h
cont := show Continuous f' from h.symm ▸ f.Continuous
#align continuous_linear_map.copy ContinuousLinearMap.copy
@[simp]
theorem coe_copy (f : M₁ →SL[σ₁₂] M₂) (f' : M₁ → M₂) (h : f' = ⇑f) : ⇑(f.copy f' h) = f' :=
rfl
#align continuous_linear_map.coe_copy ContinuousLinearMap.coe_copy
theorem copy_eq (f : M₁ →SL[σ₁₂] M₂) (f' : M₁ → M₂) (h : f' = ⇑f) : f.copy f' h = f :=
FunLike.ext' h
#align continuous_linear_map.copy_eq ContinuousLinearMap.copy_eq
-- make some straightforward lemmas available to `simp`.
protected theorem map_zero (f : M₁ →SL[σ₁₂] M₂) : f (0 : M₁) = 0 :=
map_zero f
#align continuous_linear_map.map_zero ContinuousLinearMap.map_zero
protected theorem map_add (f : M₁ →SL[σ₁₂] M₂) (x y : M₁) : f (x + y) = f x + f y :=
map_add f x y
#align continuous_linear_map.map_add ContinuousLinearMap.map_add
@[simp]
protected theorem map_smulₛₗ (f : M₁ →SL[σ₁₂] M₂) (c : R₁) (x : M₁) : f (c • x) = σ₁₂ c • f x :=
(toLinearMap _).map_smulₛₗ _ _
#align continuous_linear_map.map_smulₛₗ ContinuousLinearMap.map_smulₛₗ
@[simp]
protected theorem map_smul [Module R₁ M₂] (f : M₁ →L[R₁] M₂) (c : R₁) (x : M₁) :
f (c • x) = c • f x := by simp only [RingHom.id_apply, ContinuousLinearMap.map_smulₛₗ]
#align continuous_linear_map.map_smul ContinuousLinearMap.map_smul
@[simp]
theorem map_smul_of_tower {R S : Type _} [Semiring S] [SMul R M₁] [Module S M₁] [SMul R M₂]
[Module S M₂] [LinearMap.CompatibleSMul M₁ M₂ R S] (f : M₁ →L[S] M₂) (c : R) (x : M₁) :
f (c • x) = c • f x :=
LinearMap.CompatibleSMul.map_smul f c x
#align continuous_linear_map.map_smul_of_tower ContinuousLinearMap.map_smul_of_tower
protected theorem map_sum {ι : Type _} (f : M₁ →SL[σ₁₂] M₂) (s : Finset ι) (g : ι → M₁) :
f (∑ i in s, g i) = ∑ i in s, f (g i) :=
f.toLinearMap.map_sum
#align continuous_linear_map.map_sum ContinuousLinearMap.map_sum
@[simp, norm_cast]
theorem coe_coe (f : M₁ →SL[σ₁₂] M₂) : ⇑(f : M₁ →ₛₗ[σ₁₂] M₂) = f :=
rfl
#align continuous_linear_map.coe_coe ContinuousLinearMap.coe_coe
@[ext]
theorem ext_ring [TopologicalSpace R₁] {f g : R₁ →L[R₁] M₁} (h : f 1 = g 1) : f = g :=
coe_inj.1 <| LinearMap.ext_ring h
#align continuous_linear_map.ext_ring ContinuousLinearMap.ext_ring
theorem ext_ring_iff [TopologicalSpace R₁] {f g : R₁ →L[R₁] M₁} : f = g ↔ f 1 = g 1 :=
⟨fun h => h ▸ rfl, ext_ring⟩
#align continuous_linear_map.ext_ring_iff ContinuousLinearMap.ext_ring_iff
/-- If two continuous linear maps are equal on a set `s`, then they are equal on the closure
of the `submodule.span` of this set. -/
theorem eqOn_closure_span [T2Space M₂] {s : Set M₁} {f g : M₁ →SL[σ₁₂] M₂} (h : Set.EqOn f g s) :
Set.EqOn f g (closure (Submodule.span R₁ s : Set M₁)) :=
(LinearMap.eqOn_span' h).closure f.Continuous g.Continuous
#align continuous_linear_map.eq_on_closure_span ContinuousLinearMap.eqOn_closure_span
/-- If the submodule generated by a set `s` is dense in the ambient module, then two continuous
linear maps equal on `s` are equal. -/
theorem ext_on [T2Space M₂] {s : Set M₁} (hs : Dense (Submodule.span R₁ s : Set M₁))
{f g : M₁ →SL[σ₁₂] M₂} (h : Set.EqOn f g s) : f = g :=
ext fun x => eqOn_closure_span h (hs x)
#align continuous_linear_map.ext_on ContinuousLinearMap.ext_on
/-- Under a continuous linear map, the image of the `topological_closure` of a submodule is
contained in the `topological_closure` of its image. -/
theorem Submodule.topologicalClosure_map [RingHomSurjective σ₁₂] [TopologicalSpace R₁]
[TopologicalSpace R₂] [ContinuousSMul R₁ M₁] [ContinuousAdd M₁] [ContinuousSMul R₂ M₂]
[ContinuousAdd M₂] (f : M₁ →SL[σ₁₂] M₂) (s : Submodule R₁ M₁) :
s.topologicalClosure.map (f : M₁ →ₛₗ[σ₁₂] M₂) ≤
(s.map (f : M₁ →ₛₗ[σ₁₂] M₂)).topologicalClosure :=
image_closure_subset_closure_image f.Continuous
#align submodule.topological_closure_map Submodule.topologicalClosure_map
/-- Under a dense continuous linear map, a submodule whose `topological_closure` is `⊤` is sent to
another such submodule. That is, the image of a dense set under a map with dense range is dense.
-/
theorem DenseRange.topologicalClosure_map_submodule [RingHomSurjective σ₁₂] [TopologicalSpace R₁]
[TopologicalSpace R₂] [ContinuousSMul R₁ M₁] [ContinuousAdd M₁] [ContinuousSMul R₂ M₂]
[ContinuousAdd M₂] {f : M₁ →SL[σ₁₂] M₂} (hf' : DenseRange f) {s : Submodule R₁ M₁}
(hs : s.topologicalClosure = ⊤) : (s.map (f : M₁ →ₛₗ[σ₁₂] M₂)).topologicalClosure = ⊤ :=
by
rw [SetLike.ext'_iff] at hs⊢
simp only [Submodule.topologicalClosure_coe, Submodule.top_coe, ← dense_iff_closure_eq] at hs⊢
exact hf'.dense_image f.continuous hs
#align dense_range.topological_closure_map_submodule DenseRange.topologicalClosure_map_submodule
section SmulMonoid
variable {S₂ T₂ : Type _} [Monoid S₂] [Monoid T₂]
variable [DistribMulAction S₂ M₂] [SMulCommClass R₂ S₂ M₂] [ContinuousConstSMul S₂ M₂]
variable [DistribMulAction T₂ M₂] [SMulCommClass R₂ T₂ M₂] [ContinuousConstSMul T₂ M₂]
instance : MulAction S₂ (M₁ →SL[σ₁₂] M₂)
where
smul c f := ⟨c • f, (f.2.const_smul _ : Continuous fun x => c • f x)⟩
one_smul f := ext fun x => one_smul _ _
mul_smul a b f := ext fun x => mul_smul _ _ _
theorem smul_apply (c : S₂) (f : M₁ →SL[σ₁₂] M₂) (x : M₁) : (c • f) x = c • f x :=
rfl
#align continuous_linear_map.smul_apply ContinuousLinearMap.smul_apply
@[simp, norm_cast]
theorem coe_smul (c : S₂) (f : M₁ →SL[σ₁₂] M₂) : (↑(c • f) : M₁ →ₛₗ[σ₁₂] M₂) = c • f :=
rfl
#align continuous_linear_map.coe_smul ContinuousLinearMap.coe_smul
@[simp, norm_cast]
theorem coe_smul' (c : S₂) (f : M₁ →SL[σ₁₂] M₂) : ⇑(c • f) = c • f :=
rfl
#align continuous_linear_map.coe_smul' ContinuousLinearMap.coe_smul'
instance [SMul S₂ T₂] [IsScalarTower S₂ T₂ M₂] : IsScalarTower S₂ T₂ (M₁ →SL[σ₁₂] M₂) :=
⟨fun a b f => ext fun x => smul_assoc a b (f x)⟩
instance [SMulCommClass S₂ T₂ M₂] : SMulCommClass S₂ T₂ (M₁ →SL[σ₁₂] M₂) :=
⟨fun a b f => ext fun x => smul_comm a b (f x)⟩
end SmulMonoid
/-- The continuous map that is constantly zero. -/
instance : Zero (M₁ →SL[σ₁₂] M₂) :=
⟨⟨0, continuous_zero⟩⟩
instance : Inhabited (M₁ →SL[σ₁₂] M₂) :=
⟨0⟩
@[simp]
theorem default_def : (default : M₁ →SL[σ₁₂] M₂) = 0 :=
rfl
#align continuous_linear_map.default_def ContinuousLinearMap.default_def
@[simp]
theorem zero_apply (x : M₁) : (0 : M₁ →SL[σ₁₂] M₂) x = 0 :=
rfl
#align continuous_linear_map.zero_apply ContinuousLinearMap.zero_apply
@[simp, norm_cast]
theorem coe_zero : ((0 : M₁ →SL[σ₁₂] M₂) : M₁ →ₛₗ[σ₁₂] M₂) = 0 :=
rfl
#align continuous_linear_map.coe_zero ContinuousLinearMap.coe_zero
/- no simp attribute on the next line as simp does not always simplify `0 x` to `0`
when `0` is the zero function, while it does for the zero continuous linear map,
and this is the most important property we care about. -/
@[norm_cast]
theorem coe_zero' : ⇑(0 : M₁ →SL[σ₁₂] M₂) = 0 :=
rfl
#align continuous_linear_map.coe_zero' ContinuousLinearMap.coe_zero'
instance uniqueOfLeft [Subsingleton M₁] : Unique (M₁ →SL[σ₁₂] M₂) :=
coe_injective.unique
#align continuous_linear_map.unique_of_left ContinuousLinearMap.uniqueOfLeft
instance uniqueOfRight [Subsingleton M₂] : Unique (M₁ →SL[σ₁₂] M₂) :=
coe_injective.unique
#align continuous_linear_map.unique_of_right ContinuousLinearMap.uniqueOfRight
theorem exists_ne_zero {f : M₁ →SL[σ₁₂] M₂} (hf : f ≠ 0) : ∃ x, f x ≠ 0 :=
by
by_contra' h
exact hf (ContinuousLinearMap.ext h)
#align continuous_linear_map.exists_ne_zero ContinuousLinearMap.exists_ne_zero
section
variable (R₁ M₁)
/-- the identity map as a continuous linear map. -/
def id : M₁ →L[R₁] M₁ :=
⟨LinearMap.id, continuous_id⟩
#align continuous_linear_map.id ContinuousLinearMap.id
end
instance : One (M₁ →L[R₁] M₁) :=
⟨id R₁ M₁⟩
theorem one_def : (1 : M₁ →L[R₁] M₁) = id R₁ M₁ :=
rfl
#align continuous_linear_map.one_def ContinuousLinearMap.one_def
theorem id_apply (x : M₁) : id R₁ M₁ x = x :=
rfl
#align continuous_linear_map.id_apply ContinuousLinearMap.id_apply
@[simp, norm_cast]
theorem coe_id : (id R₁ M₁ : M₁ →ₗ[R₁] M₁) = LinearMap.id :=
rfl
#align continuous_linear_map.coe_id ContinuousLinearMap.coe_id
@[simp, norm_cast]
theorem coe_id' : ⇑(id R₁ M₁) = id :=
rfl
#align continuous_linear_map.coe_id' ContinuousLinearMap.coe_id'
@[simp, norm_cast]
theorem coe_eq_id {f : M₁ →L[R₁] M₁} : (f : M₁ →ₗ[R₁] M₁) = LinearMap.id ↔ f = id _ _ := by
rw [← coe_id, coe_inj]
#align continuous_linear_map.coe_eq_id ContinuousLinearMap.coe_eq_id
@[simp]
theorem one_apply (x : M₁) : (1 : M₁ →L[R₁] M₁) x = x :=
rfl
#align continuous_linear_map.one_apply ContinuousLinearMap.one_apply
section Add
variable [ContinuousAdd M₂]
instance : Add (M₁ →SL[σ₁₂] M₂) :=
⟨fun f g => ⟨f + g, f.2.add g.2⟩⟩
@[simp]
theorem add_apply (f g : M₁ →SL[σ₁₂] M₂) (x : M₁) : (f + g) x = f x + g x :=
rfl
#align continuous_linear_map.add_apply ContinuousLinearMap.add_apply
@[simp, norm_cast]
theorem coe_add (f g : M₁ →SL[σ₁₂] M₂) : (↑(f + g) : M₁ →ₛₗ[σ₁₂] M₂) = f + g :=
rfl
#align continuous_linear_map.coe_add ContinuousLinearMap.coe_add
@[norm_cast]
theorem coe_add' (f g : M₁ →SL[σ₁₂] M₂) : ⇑(f + g) = f + g :=
rfl
#align continuous_linear_map.coe_add' ContinuousLinearMap.coe_add'
instance : AddCommMonoid (M₁ →SL[σ₁₂] M₂)
where
zero := (0 : M₁ →SL[σ₁₂] M₂)
add := (· + ·)
zero_add := by
intros <;> ext <;> apply_rules [zero_add, add_assoc, add_zero, add_left_neg, add_comm]
add_zero := by
intros <;> ext <;> apply_rules [zero_add, add_assoc, add_zero, add_left_neg, add_comm]
add_comm := by
intros <;> ext <;> apply_rules [zero_add, add_assoc, add_zero, add_left_neg, add_comm]
add_assoc := by
intros <;> ext <;> apply_rules [zero_add, add_assoc, add_zero, add_left_neg, add_comm]
nsmul := (· • ·)
nsmul_zero f := by
ext
simp
nsmul_succ n f := by
ext
simp [Nat.succ_eq_one_add, add_smul]
@[simp, norm_cast]
theorem coe_sum {ι : Type _} (t : Finset ι) (f : ι → M₁ →SL[σ₁₂] M₂) :
↑(∑ d in t, f d) = (∑ d in t, f d : M₁ →ₛₗ[σ₁₂] M₂) :=
(AddMonoidHom.mk (coe : (M₁ →SL[σ₁₂] M₂) → M₁ →ₛₗ[σ₁₂] M₂) rfl fun _ _ => rfl).map_sum _ _
#align continuous_linear_map.coe_sum ContinuousLinearMap.coe_sum
@[simp, norm_cast]
theorem coe_sum' {ι : Type _} (t : Finset ι) (f : ι → M₁ →SL[σ₁₂] M₂) :
⇑(∑ d in t, f d) = ∑ d in t, f d := by simp only [← coe_coe, coe_sum, LinearMap.coeFn_sum]
#align continuous_linear_map.coe_sum' ContinuousLinearMap.coe_sum'
theorem sum_apply {ι : Type _} (t : Finset ι) (f : ι → M₁ →SL[σ₁₂] M₂) (b : M₁) :
(∑ d in t, f d) b = ∑ d in t, f d b := by simp only [coe_sum', Finset.sum_apply]
#align continuous_linear_map.sum_apply ContinuousLinearMap.sum_apply
end Add
variable [RingHomCompTriple σ₁₂ σ₂₃ σ₁₃]
/-- Composition of bounded linear maps. -/
def comp (g : M₂ →SL[σ₂₃] M₃) (f : M₁ →SL[σ₁₂] M₂) : M₁ →SL[σ₁₃] M₃ :=
⟨(g : M₂ →ₛₗ[σ₂₃] M₃).comp ↑f, g.2.comp f.2⟩
#align continuous_linear_map.comp ContinuousLinearMap.comp
-- mathport name: «expr ∘L »
infixr:80 " ∘L " =>
@ContinuousLinearMap.comp _ _ _ _ _ _ (RingHom.id _) (RingHom.id _) (RingHom.id _) _ _ _ _ _ _ _ _
_ _ _ _ RingHomCompTriple.ids
@[simp, norm_cast]
theorem coe_comp (h : M₂ →SL[σ₂₃] M₃) (f : M₁ →SL[σ₁₂] M₂) :
(h.comp f : M₁ →ₛₗ[σ₁₃] M₃) = (h : M₂ →ₛₗ[σ₂₃] M₃).comp (f : M₁ →ₛₗ[σ₁₂] M₂) :=
rfl
#align continuous_linear_map.coe_comp ContinuousLinearMap.coe_comp
include σ₁₃
@[simp, norm_cast]
theorem coe_comp' (h : M₂ →SL[σ₂₃] M₃) (f : M₁ →SL[σ₁₂] M₂) : ⇑(h.comp f) = h ∘ f :=
rfl
#align continuous_linear_map.coe_comp' ContinuousLinearMap.coe_comp'
theorem comp_apply (g : M₂ →SL[σ₂₃] M₃) (f : M₁ →SL[σ₁₂] M₂) (x : M₁) : (g.comp f) x = g (f x) :=
rfl
#align continuous_linear_map.comp_apply ContinuousLinearMap.comp_apply
omit σ₁₃
@[simp]
theorem comp_id (f : M₁ →SL[σ₁₂] M₂) : f.comp (id R₁ M₁) = f :=
ext fun x => rfl
#align continuous_linear_map.comp_id ContinuousLinearMap.comp_id
@[simp]
theorem id_comp (f : M₁ →SL[σ₁₂] M₂) : (id R₂ M₂).comp f = f :=
ext fun x => rfl
#align continuous_linear_map.id_comp ContinuousLinearMap.id_comp
include σ₁₃
@[simp]
theorem comp_zero (g : M₂ →SL[σ₂₃] M₃) : g.comp (0 : M₁ →SL[σ₁₂] M₂) = 0 :=
by
ext
simp
#align continuous_linear_map.comp_zero ContinuousLinearMap.comp_zero
@[simp]
theorem zero_comp (f : M₁ →SL[σ₁₂] M₂) : (0 : M₂ →SL[σ₂₃] M₃).comp f = 0 :=
by
ext
simp
#align continuous_linear_map.zero_comp ContinuousLinearMap.zero_comp
@[simp]
theorem comp_add [ContinuousAdd M₂] [ContinuousAdd M₃] (g : M₂ →SL[σ₂₃] M₃)
(f₁ f₂ : M₁ →SL[σ₁₂] M₂) : g.comp (f₁ + f₂) = g.comp f₁ + g.comp f₂ :=
by
ext
simp
#align continuous_linear_map.comp_add ContinuousLinearMap.comp_add
@[simp]
theorem add_comp [ContinuousAdd M₃] (g₁ g₂ : M₂ →SL[σ₂₃] M₃) (f : M₁ →SL[σ₁₂] M₂) :
(g₁ + g₂).comp f = g₁.comp f + g₂.comp f := by
ext
simp
#align continuous_linear_map.add_comp ContinuousLinearMap.add_comp
omit σ₁₃
theorem comp_assoc {R₄ : Type _} [Semiring R₄] [Module R₄ M₄] {σ₁₄ : R₁ →+* R₄} {σ₂₄ : R₂ →+* R₄}
{σ₃₄ : R₃ →+* R₄} [RingHomCompTriple σ₁₃ σ₃₄ σ₁₄] [RingHomCompTriple σ₂₃ σ₃₄ σ₂₄]
[RingHomCompTriple σ₁₂ σ₂₄ σ₁₄] (h : M₃ →SL[σ₃₄] M₄) (g : M₂ →SL[σ₂₃] M₃) (f : M₁ →SL[σ₁₂] M₂) :
(h.comp g).comp f = h.comp (g.comp f) :=
rfl
#align continuous_linear_map.comp_assoc ContinuousLinearMap.comp_assoc
instance : Mul (M₁ →L[R₁] M₁) :=
⟨comp⟩
theorem mul_def (f g : M₁ →L[R₁] M₁) : f * g = f.comp g :=
rfl
#align continuous_linear_map.mul_def ContinuousLinearMap.mul_def
@[simp]
theorem coe_mul (f g : M₁ →L[R₁] M₁) : ⇑(f * g) = f ∘ g :=
rfl
#align continuous_linear_map.coe_mul ContinuousLinearMap.coe_mul
theorem mul_apply (f g : M₁ →L[R₁] M₁) (x : M₁) : (f * g) x = f (g x) :=
rfl
#align continuous_linear_map.mul_apply ContinuousLinearMap.mul_apply
instance : MonoidWithZero (M₁ →L[R₁] M₁)
where
mul := (· * ·)
one := 1
zero := 0
mul_zero f := ext fun _ => map_zero f
zero_mul _ := ext fun _ => rfl
mul_one _ := ext fun _ => rfl
one_mul _ := ext fun _ => rfl
mul_assoc _ _ _ := ext fun _ => rfl
instance [ContinuousAdd M₁] : Semiring (M₁ →L[R₁] M₁) :=
{ ContinuousLinearMap.monoidWithZero,
ContinuousLinearMap.addCommMonoid with
mul := (· * ·)
one := 1
left_distrib := fun f g h => ext fun x => map_add f (g x) (h x)
right_distrib := fun _ _ _ => ext fun _ => LinearMap.add_apply _ _ _ }
/-- `continuous_linear_map.to_linear_map` as a `ring_hom`.-/
@[simps]
def toLinearMapRingHom [ContinuousAdd M₁] : (M₁ →L[R₁] M₁) →+* M₁ →ₗ[R₁] M₁
where
toFun := toLinearMap
map_zero' := rfl
map_one' := rfl
map_add' _ _ := rfl
map_mul' _ _ := rfl
#align continuous_linear_map.to_linear_map_ring_hom ContinuousLinearMap.toLinearMapRingHom
section ApplyAction
variable [ContinuousAdd M₁]
/-- The tautological action by `M₁ →L[R₁] M₁` on `M`.
This generalizes `function.End.apply_mul_action`. -/
instance applyModule : Module (M₁ →L[R₁] M₁) M₁ :=
Module.compHom _ toLinearMapRingHom
#align continuous_linear_map.apply_module ContinuousLinearMap.applyModule
@[simp]
protected theorem smul_def (f : M₁ →L[R₁] M₁) (a : M₁) : f • a = f a :=
rfl
#align continuous_linear_map.smul_def ContinuousLinearMap.smul_def
/-- `continuous_linear_map.apply_module` is faithful. -/
instance apply_faithfulSMul : FaithfulSMul (M₁ →L[R₁] M₁) M₁ :=
⟨fun _ _ => ContinuousLinearMap.ext⟩
#align continuous_linear_map.apply_has_faithful_smul ContinuousLinearMap.apply_faithfulSMul
instance apply_sMulCommClass : SMulCommClass R₁ (M₁ →L[R₁] M₁) M₁
where smul_comm r e m := (e.map_smul r m).symm
#align continuous_linear_map.apply_smul_comm_class ContinuousLinearMap.apply_sMulCommClass
instance apply_smul_comm_class' : SMulCommClass (M₁ →L[R₁] M₁) R₁ M₁
where smul_comm := ContinuousLinearMap.map_smul
#align continuous_linear_map.apply_smul_comm_class' ContinuousLinearMap.apply_smul_comm_class'
instance : ContinuousConstSMul (M₁ →L[R₁] M₁) M₁ :=
⟨ContinuousLinearMap.continuous⟩
end ApplyAction
/-- The cartesian product of two bounded linear maps, as a bounded linear map. -/
protected def prod [Module R₁ M₂] [Module R₁ M₃] (f₁ : M₁ →L[R₁] M₂) (f₂ : M₁ →L[R₁] M₃) :
M₁ →L[R₁] M₂ × M₃ :=
⟨(f₁ : M₁ →ₗ[R₁] M₂).Prod f₂, f₁.2.prod_mk f₂.2⟩
#align continuous_linear_map.prod ContinuousLinearMap.prod
@[simp, norm_cast]
theorem coe_prod [Module R₁ M₂] [Module R₁ M₃] (f₁ : M₁ →L[R₁] M₂) (f₂ : M₁ →L[R₁] M₃) :
(f₁.Prod f₂ : M₁ →ₗ[R₁] M₂ × M₃) = LinearMap.prod f₁ f₂ :=
rfl
#align continuous_linear_map.coe_prod ContinuousLinearMap.coe_prod
@[simp, norm_cast]
theorem prod_apply [Module R₁ M₂] [Module R₁ M₃] (f₁ : M₁ →L[R₁] M₂) (f₂ : M₁ →L[R₁] M₃) (x : M₁) :
f₁.Prod f₂ x = (f₁ x, f₂ x) :=
rfl
#align continuous_linear_map.prod_apply ContinuousLinearMap.prod_apply
section
variable (R₁ M₁ M₂)
/-- The left injection into a product is a continuous linear map. -/
def inl [Module R₁ M₂] : M₁ →L[R₁] M₁ × M₂ :=
(id R₁ M₁).Prod 0
#align continuous_linear_map.inl ContinuousLinearMap.inl
/-- The right injection into a product is a continuous linear map. -/
def inr [Module R₁ M₂] : M₂ →L[R₁] M₁ × M₂ :=
(0 : M₂ →L[R₁] M₁).Prod (id R₁ M₂)
#align continuous_linear_map.inr ContinuousLinearMap.inr
end
variable {F : Type _}
@[simp]
theorem inl_apply [Module R₁ M₂] (x : M₁) : inl R₁ M₁ M₂ x = (x, 0) :=
rfl
#align continuous_linear_map.inl_apply ContinuousLinearMap.inl_apply
@[simp]
theorem inr_apply [Module R₁ M₂] (x : M₂) : inr R₁ M₁ M₂ x = (0, x) :=
rfl
#align continuous_linear_map.inr_apply ContinuousLinearMap.inr_apply
@[simp, norm_cast]
theorem coe_inl [Module R₁ M₂] : (inl R₁ M₁ M₂ : M₁ →ₗ[R₁] M₁ × M₂) = LinearMap.inl R₁ M₁ M₂ :=
rfl
#align continuous_linear_map.coe_inl ContinuousLinearMap.coe_inl
@[simp, norm_cast]
theorem coe_inr [Module R₁ M₂] : (inr R₁ M₁ M₂ : M₂ →ₗ[R₁] M₁ × M₂) = LinearMap.inr R₁ M₁ M₂ :=
rfl
#align continuous_linear_map.coe_inr ContinuousLinearMap.coe_inr
theorem isClosed_ker [T1Space M₂] [ContinuousSemilinearMapClass F σ₁₂ M₁ M₂] (f : F) :
IsClosed (ker f : Set M₁) :=
continuous_iff_isClosed.1 (map_continuous f) _ isClosed_singleton
#align continuous_linear_map.is_closed_ker ContinuousLinearMap.isClosed_ker
theorem isComplete_ker {M' : Type _} [UniformSpace M'] [CompleteSpace M'] [AddCommMonoid M']
[Module R₁ M'] [T1Space M₂] [ContinuousSemilinearMapClass F σ₁₂ M' M₂] (f : F) :
IsComplete (ker f : Set M') :=
(isClosed_ker f).IsComplete
#align continuous_linear_map.is_complete_ker ContinuousLinearMap.isComplete_ker
instance (priority := 100) completeSpace_ker {M' : Type _} [UniformSpace M'] [CompleteSpace M']
[AddCommMonoid M'] [Module R₁ M'] [T1Space M₂] [ContinuousSemilinearMapClass F σ₁₂ M' M₂]
(f : F) : CompleteSpace (ker f) :=
(isClosed_ker f).completeSpace_coe
#align continuous_linear_map.complete_space_ker ContinuousLinearMap.completeSpace_ker
@[simp]
theorem ker_prod [Module R₁ M₂] [Module R₁ M₃] (f : M₁ →L[R₁] M₂) (g : M₁ →L[R₁] M₃) :
ker (f.Prod g) = ker f ⊓ ker g :=
LinearMap.ker_prod f g
#align continuous_linear_map.ker_prod ContinuousLinearMap.ker_prod
/-- Restrict codomain of a continuous linear map. -/
def codRestrict (f : M₁ →SL[σ₁₂] M₂) (p : Submodule R₂ M₂) (h : ∀ x, f x ∈ p) : M₁ →SL[σ₁₂] p
where
cont := f.Continuous.subtype_mk _
toLinearMap := (f : M₁ →ₛₗ[σ₁₂] M₂).codRestrict p h
#align continuous_linear_map.cod_restrict ContinuousLinearMap.codRestrict
@[norm_cast]
theorem coe_codRestrict (f : M₁ →SL[σ₁₂] M₂) (p : Submodule R₂ M₂) (h : ∀ x, f x ∈ p) :
(f.codRestrict p h : M₁ →ₛₗ[σ₁₂] p) = (f : M₁ →ₛₗ[σ₁₂] M₂).codRestrict p h :=
rfl
#align continuous_linear_map.coe_cod_restrict ContinuousLinearMap.coe_codRestrict
@[simp]
theorem coe_codRestrict_apply (f : M₁ →SL[σ₁₂] M₂) (p : Submodule R₂ M₂) (h : ∀ x, f x ∈ p) (x) :
(f.codRestrict p h x : M₂) = f x :=
rfl
#align continuous_linear_map.coe_cod_restrict_apply ContinuousLinearMap.coe_codRestrict_apply
@[simp]
theorem ker_codRestrict (f : M₁ →SL[σ₁₂] M₂) (p : Submodule R₂ M₂) (h : ∀ x, f x ∈ p) :
ker (f.codRestrict p h) = ker f :=
(f : M₁ →ₛₗ[σ₁₂] M₂).ker_codRestrict p h
#align continuous_linear_map.ker_cod_restrict ContinuousLinearMap.ker_codRestrict
/-- `submodule.subtype` as a `continuous_linear_map`. -/
def Submodule.subtypeL (p : Submodule R₁ M₁) : p →L[R₁] M₁
where
cont := continuous_subtype_val
toLinearMap := p.Subtype
#align submodule.subtypeL Submodule.subtypeL
@[simp, norm_cast]
theorem Submodule.coe_subtypeL (p : Submodule R₁ M₁) : (p.subtypeL : p →ₗ[R₁] M₁) = p.Subtype :=
rfl
#align submodule.coe_subtypeL Submodule.coe_subtypeL
@[simp]
theorem Submodule.coe_subtypeL' (p : Submodule R₁ M₁) : ⇑p.subtypeL = p.Subtype :=
rfl
#align submodule.coe_subtypeL' Submodule.coe_subtypeL'
@[simp, norm_cast]
theorem Submodule.subtypeL_apply (p : Submodule R₁ M₁) (x : p) : p.subtypeL x = x :=
rfl
#align submodule.subtypeL_apply Submodule.subtypeL_apply
@[simp]
theorem Submodule.range_subtypeL (p : Submodule R₁ M₁) : range p.subtypeL = p :=
Submodule.range_subtype _
#align submodule.range_subtypeL Submodule.range_subtypeL
@[simp]
theorem Submodule.ker_subtypeL (p : Submodule R₁ M₁) : ker p.subtypeL = ⊥ :=
Submodule.ker_subtype _
#align submodule.ker_subtypeL Submodule.ker_subtypeL
variable (R₁ M₁ M₂)
/-- `prod.fst` as a `continuous_linear_map`. -/
def fst [Module R₁ M₂] : M₁ × M₂ →L[R₁] M₁
where
cont := continuous_fst
toLinearMap := LinearMap.fst R₁ M₁ M₂
#align continuous_linear_map.fst ContinuousLinearMap.fst
/-- `prod.snd` as a `continuous_linear_map`. -/
def snd [Module R₁ M₂] : M₁ × M₂ →L[R₁] M₂
where
cont := continuous_snd
toLinearMap := LinearMap.snd R₁ M₁ M₂
#align continuous_linear_map.snd ContinuousLinearMap.snd
variable {R₁ M₁ M₂}
@[simp, norm_cast]
theorem coe_fst [Module R₁ M₂] : ↑(fst R₁ M₁ M₂) = LinearMap.fst R₁ M₁ M₂ :=
rfl
#align continuous_linear_map.coe_fst ContinuousLinearMap.coe_fst
@[simp, norm_cast]
theorem coe_fst' [Module R₁ M₂] : ⇑(fst R₁ M₁ M₂) = Prod.fst :=
rfl
#align continuous_linear_map.coe_fst' ContinuousLinearMap.coe_fst'
@[simp, norm_cast]
theorem coe_snd [Module R₁ M₂] : ↑(snd R₁ M₁ M₂) = LinearMap.snd R₁ M₁ M₂ :=
rfl
#align continuous_linear_map.coe_snd ContinuousLinearMap.coe_snd
@[simp, norm_cast]
theorem coe_snd' [Module R₁ M₂] : ⇑(snd R₁ M₁ M₂) = Prod.snd :=
rfl
#align continuous_linear_map.coe_snd' ContinuousLinearMap.coe_snd'
@[simp]
theorem fst_prod_snd [Module R₁ M₂] : (fst R₁ M₁ M₂).Prod (snd R₁ M₁ M₂) = id R₁ (M₁ × M₂) :=
ext fun ⟨x, y⟩ => rfl
#align continuous_linear_map.fst_prod_snd ContinuousLinearMap.fst_prod_snd
@[simp]
theorem fst_comp_prod [Module R₁ M₂] [Module R₁ M₃] (f : M₁ →L[R₁] M₂) (g : M₁ →L[R₁] M₃) :
(fst R₁ M₂ M₃).comp (f.Prod g) = f :=
ext fun x => rfl
#align continuous_linear_map.fst_comp_prod ContinuousLinearMap.fst_comp_prod
@[simp]
theorem snd_comp_prod [Module R₁ M₂] [Module R₁ M₃] (f : M₁ →L[R₁] M₂) (g : M₁ →L[R₁] M₃) :
(snd R₁ M₂ M₃).comp (f.Prod g) = g :=
ext fun x => rfl
#align continuous_linear_map.snd_comp_prod ContinuousLinearMap.snd_comp_prod
/-- `prod.map` of two continuous linear maps. -/
def prodMap [Module R₁ M₂] [Module R₁ M₃] [Module R₁ M₄] (f₁ : M₁ →L[R₁] M₂) (f₂ : M₃ →L[R₁] M₄) :
M₁ × M₃ →L[R₁] M₂ × M₄ :=
(f₁.comp (fst R₁ M₁ M₃)).Prod (f₂.comp (snd R₁ M₁ M₃))
#align continuous_linear_map.prod_map ContinuousLinearMap.prodMap
@[simp, norm_cast]
theorem coe_prodMap [Module R₁ M₂] [Module R₁ M₃] [Module R₁ M₄] (f₁ : M₁ →L[R₁] M₂)
(f₂ : M₃ →L[R₁] M₄) : ↑(f₁.Prod_map f₂) = (f₁ : M₁ →ₗ[R₁] M₂).Prod_map (f₂ : M₃ →ₗ[R₁] M₄) :=
rfl
#align continuous_linear_map.coe_prod_map ContinuousLinearMap.coe_prodMap
@[simp, norm_cast]
theorem coe_prod_map' [Module R₁ M₂] [Module R₁ M₃] [Module R₁ M₄] (f₁ : M₁ →L[R₁] M₂)
(f₂ : M₃ →L[R₁] M₄) : ⇑(f₁.Prod_map f₂) = Prod.map f₁ f₂ :=
rfl
#align continuous_linear_map.coe_prod_map' ContinuousLinearMap.coe_prod_map'
/-- The continuous linear map given by `(x, y) ↦ f₁ x + f₂ y`. -/
def coprod [Module R₁ M₂] [Module R₁ M₃] [ContinuousAdd M₃] (f₁ : M₁ →L[R₁] M₃)
(f₂ : M₂ →L[R₁] M₃) : M₁ × M₂ →L[R₁] M₃ :=
⟨LinearMap.coprod f₁ f₂, (f₁.cont.comp continuous_fst).add (f₂.cont.comp continuous_snd)⟩
#align continuous_linear_map.coprod ContinuousLinearMap.coprod
@[norm_cast, simp]
theorem coe_coprod [Module R₁ M₂] [Module R₁ M₃] [ContinuousAdd M₃] (f₁ : M₁ →L[R₁] M₃)
(f₂ : M₂ →L[R₁] M₃) : (f₁.coprod f₂ : M₁ × M₂ →ₗ[R₁] M₃) = LinearMap.coprod f₁ f₂ :=
rfl
#align continuous_linear_map.coe_coprod ContinuousLinearMap.coe_coprod
@[simp]
theorem coprod_apply [Module R₁ M₂] [Module R₁ M₃] [ContinuousAdd M₃] (f₁ : M₁ →L[R₁] M₃)
(f₂ : M₂ →L[R₁] M₃) (x) : f₁.coprod f₂ x = f₁ x.1 + f₂ x.2 :=
rfl
#align continuous_linear_map.coprod_apply ContinuousLinearMap.coprod_apply
theorem range_coprod [Module R₁ M₂] [Module R₁ M₃] [ContinuousAdd M₃] (f₁ : M₁ →L[R₁] M₃)
(f₂ : M₂ →L[R₁] M₃) : range (f₁.coprod f₂) = range f₁ ⊔ range f₂ :=
LinearMap.range_coprod _ _
#align continuous_linear_map.range_coprod ContinuousLinearMap.range_coprod
section
variable {R S : Type _} [Semiring R] [Semiring S] [Module R M₁] [Module R M₂] [Module R S]
[Module S M₂] [IsScalarTower R S M₂] [TopologicalSpace S] [ContinuousSMul S M₂]
/-- The linear map `λ x, c x • f`. Associates to a scalar-valued linear map and an element of
`M₂` the `M₂`-valued linear map obtained by multiplying the two (a.k.a. tensoring by `M₂`).
See also `continuous_linear_map.smul_rightₗ` and `continuous_linear_map.smul_rightL`. -/
def smulRight (c : M₁ →L[R] S) (f : M₂) : M₁ →L[R] M₂ :=
{ c.toLinearMap.smul_right f with cont := c.2.smul continuous_const }
#align continuous_linear_map.smul_right ContinuousLinearMap.smulRight
@[simp]
theorem smulRight_apply {c : M₁ →L[R] S} {f : M₂} {x : M₁} :
(smulRight c f : M₁ → M₂) x = c x • f :=
rfl
#align continuous_linear_map.smul_right_apply ContinuousLinearMap.smulRight_apply
end
variable [Module R₁ M₂] [TopologicalSpace R₁] [ContinuousSMul R₁ M₂]
@[simp]
theorem smulRight_one_one (c : R₁ →L[R₁] M₂) : smulRight (1 : R₁ →L[R₁] R₁) (c 1) = c := by
ext <;> simp [← ContinuousLinearMap.map_smul_of_tower]
#align continuous_linear_map.smul_right_one_one ContinuousLinearMap.smulRight_one_one
@[simp]
theorem smulRight_one_eq_iff {f f' : M₂} :
smulRight (1 : R₁ →L[R₁] R₁) f = smulRight (1 : R₁ →L[R₁] R₁) f' ↔ f = f' := by
simp only [ext_ring_iff, smul_right_apply, one_apply, one_smul]
#align continuous_linear_map.smul_right_one_eq_iff ContinuousLinearMap.smulRight_one_eq_iff
theorem smulRight_comp [ContinuousMul R₁] {x : M₂} {c : R₁} :
(smulRight (1 : R₁ →L[R₁] R₁) x).comp (smulRight (1 : R₁ →L[R₁] R₁) c) =
smulRight (1 : R₁ →L[R₁] R₁) (c • x) :=
by
ext
simp [mul_smul]
#align continuous_linear_map.smul_right_comp ContinuousLinearMap.smulRight_comp
end Semiring
section Pi
variable {R : Type _} [Semiring R] {M : Type _} [TopologicalSpace M] [AddCommMonoid M] [Module R M]
{M₂ : Type _} [TopologicalSpace M₂] [AddCommMonoid M₂] [Module R M₂] {ι : Type _} {φ : ι → Type _}
[∀ i, TopologicalSpace (φ i)] [∀ i, AddCommMonoid (φ i)] [∀ i, Module R (φ i)]
/-- `pi` construction for continuous linear functions. From a family of continuous linear functions
it produces a continuous linear function into a family of topological modules. -/
def pi (f : ∀ i, M →L[R] φ i) : M →L[R] ∀ i, φ i :=
⟨LinearMap.pi fun i => f i, continuous_pi fun i => (f i).Continuous⟩
#align continuous_linear_map.pi ContinuousLinearMap.pi
@[simp]
theorem coe_pi' (f : ∀ i, M →L[R] φ i) : ⇑(pi f) = fun c i => f i c :=
rfl
#align continuous_linear_map.coe_pi' ContinuousLinearMap.coe_pi'
@[simp]
theorem coe_pi (f : ∀ i, M →L[R] φ i) : (pi f : M →ₗ[R] ∀ i, φ i) = LinearMap.pi fun i => f i :=
rfl
#align continuous_linear_map.coe_pi ContinuousLinearMap.coe_pi
theorem pi_apply (f : ∀ i, M →L[R] φ i) (c : M) (i : ι) : pi f c i = f i c :=
rfl
#align continuous_linear_map.pi_apply ContinuousLinearMap.pi_apply
theorem pi_eq_zero (f : ∀ i, M →L[R] φ i) : pi f = 0 ↔ ∀ i, f i = 0 :=
by
simp only [ext_iff, pi_apply, Function.funext_iff]
exact forall_swap
#align continuous_linear_map.pi_eq_zero ContinuousLinearMap.pi_eq_zero
theorem pi_zero : pi (fun i => 0 : ∀ i, M →L[R] φ i) = 0 :=
ext fun _ => rfl
#align continuous_linear_map.pi_zero ContinuousLinearMap.pi_zero
theorem pi_comp (f : ∀ i, M →L[R] φ i) (g : M₂ →L[R] M) :
(pi f).comp g = pi fun i => (f i).comp g :=
rfl
#align continuous_linear_map.pi_comp ContinuousLinearMap.pi_comp
/-- The projections from a family of topological modules are continuous linear maps. -/
def proj (i : ι) : (∀ i, φ i) →L[R] φ i :=
⟨LinearMap.proj i, continuous_apply _⟩
#align continuous_linear_map.proj ContinuousLinearMap.proj
@[simp]
theorem proj_apply (i : ι) (b : ∀ i, φ i) : (proj i : (∀ i, φ i) →L[R] φ i) b = b i :=
rfl
#align continuous_linear_map.proj_apply ContinuousLinearMap.proj_apply
theorem proj_pi (f : ∀ i, M₂ →L[R] φ i) (i : ι) : (proj i).comp (pi f) = f i :=
ext fun c => rfl
#align continuous_linear_map.proj_pi ContinuousLinearMap.proj_pi
theorem infᵢ_ker_proj : (⨅ i, ker (proj i : (∀ i, φ i) →L[R] φ i) : Submodule R (∀ i, φ i)) = ⊥ :=
LinearMap.infᵢ_ker_proj
#align continuous_linear_map.infi_ker_proj ContinuousLinearMap.infᵢ_ker_proj
variable (R φ)
/-- If `I` and `J` are complementary index sets, the product of the kernels of the `J`th projections
of `φ` is linearly equivalent to the product over `I`. -/
def infiKerProjEquiv {I J : Set ι} [DecidablePred fun i => i ∈ I] (hd : Disjoint I J)
(hu : Set.univ ⊆ I ∪ J) :
(⨅ i ∈ J, ker (proj i : (∀ i, φ i) →L[R] φ i) : Submodule R (∀ i, φ i)) ≃L[R] ∀ i : I, φ i
where
toLinearEquiv := LinearMap.infᵢKerProjEquiv R φ hd hu
continuous_toFun :=
continuous_pi fun i =>
by
have :=
@continuous_subtype_val _ _ fun x =>
x ∈ (⨅ i ∈ J, ker (proj i : (∀ i, φ i) →L[R] φ i) : Submodule R (∀ i, φ i))
have := Continuous.comp (continuous_apply i) this
exact this
continuous_invFun :=
Continuous.subtype_mk
(continuous_pi fun i => by dsimp;
split_ifs <;> [apply continuous_apply, exact continuous_zero])
_
#align continuous_linear_map.infi_ker_proj_equiv ContinuousLinearMap.infiKerProjEquiv
end Pi
section Ring
variable {R : Type _} [Ring R] {R₂ : Type _} [Ring R₂] {R₃ : Type _} [Ring R₃] {M : Type _}
[TopologicalSpace M] [AddCommGroup M] {M₂ : Type _} [TopologicalSpace M₂] [AddCommGroup M₂]
{M₃ : Type _} [TopologicalSpace M₃] [AddCommGroup M₃] {M₄ : Type _} [TopologicalSpace M₄]
[AddCommGroup M₄] [Module R M] [Module R₂ M₂] [Module R₃ M₃] {σ₁₂ : R →+* R₂} {σ₂₃ : R₂ →+* R₃}
{σ₁₃ : R →+* R₃}
section
protected theorem map_neg (f : M →SL[σ₁₂] M₂) (x : M) : f (-x) = -f x :=
map_neg _ _
#align continuous_linear_map.map_neg ContinuousLinearMap.map_neg
protected theorem map_sub (f : M →SL[σ₁₂] M₂) (x y : M) : f (x - y) = f x - f y :=
map_sub _ _ _
#align continuous_linear_map.map_sub ContinuousLinearMap.map_sub
@[simp]
theorem sub_apply' (f g : M →SL[σ₁₂] M₂) (x : M) : ((f : M →ₛₗ[σ₁₂] M₂) - g) x = f x - g x :=
rfl
#align continuous_linear_map.sub_apply' ContinuousLinearMap.sub_apply'
end
section
variable [Module R M₂] [Module R M₃] [Module R M₄]
theorem range_prod_eq {f : M →L[R] M₂} {g : M →L[R] M₃} (h : ker f ⊔ ker g = ⊤) :
range (f.Prod g) = (range f).Prod (range g) :=
LinearMap.range_prod_eq h
#align continuous_linear_map.range_prod_eq ContinuousLinearMap.range_prod_eq
theorem ker_prod_ker_le_ker_coprod [ContinuousAdd M₃] (f : M →L[R] M₃) (g : M₂ →L[R] M₃) :
(LinearMap.ker f).Prod (LinearMap.ker g) ≤ LinearMap.ker (f.coprod g) :=
LinearMap.ker_prod_ker_le_ker_coprod f.toLinearMap g.toLinearMap
#align continuous_linear_map.ker_prod_ker_le_ker_coprod ContinuousLinearMap.ker_prod_ker_le_ker_coprod
theorem ker_coprod_of_disjoint_range [ContinuousAdd M₃] (f : M →L[R] M₃) (g : M₂ →L[R] M₃)
(hd : Disjoint (range f) (range g)) :
LinearMap.ker (f.coprod g) = (LinearMap.ker f).Prod (LinearMap.ker g) :=
LinearMap.ker_coprod_of_disjoint_range f.toLinearMap g.toLinearMap hd
#align continuous_linear_map.ker_coprod_of_disjoint_range ContinuousLinearMap.ker_coprod_of_disjoint_range
end
section
variable [TopologicalAddGroup M₂]
instance : Neg (M →SL[σ₁₂] M₂) :=
⟨fun f => ⟨-f, f.2.neg⟩⟩
@[simp]
theorem neg_apply (f : M →SL[σ₁₂] M₂) (x : M) : (-f) x = -f x :=
rfl
#align continuous_linear_map.neg_apply ContinuousLinearMap.neg_apply
@[simp, norm_cast]
theorem coe_neg (f : M →SL[σ₁₂] M₂) : (↑(-f) : M →ₛₗ[σ₁₂] M₂) = -f :=
rfl
#align continuous_linear_map.coe_neg ContinuousLinearMap.coe_neg
@[norm_cast]
theorem coe_neg' (f : M →SL[σ₁₂] M₂) : ⇑(-f) = -f :=
rfl
#align continuous_linear_map.coe_neg' ContinuousLinearMap.coe_neg'
instance : Sub (M →SL[σ₁₂] M₂) :=
⟨fun f g => ⟨f - g, f.2.sub g.2⟩⟩
instance : AddCommGroup (M →SL[σ₁₂] M₂) := by
refine'
{ ContinuousLinearMap.addCommMonoid with
zero := 0
add := (· + ·)
neg := Neg.neg
sub := Sub.sub
sub_eq_add_neg := _
nsmul := (· • ·)
zsmul := (· • ·)
zsmul_zero' := fun f => by
ext
simp
zsmul_succ' := fun n f => by
ext
simp [add_smul, add_comm]
zsmul_neg' := fun n f => by
ext
simp [Nat.succ_eq_add_one, add_smul].. } <;>
intros <;>
ext <;>
apply_rules [zero_add, add_assoc, add_zero, add_left_neg, add_comm, sub_eq_add_neg]
theorem sub_apply (f g : M →SL[σ₁₂] M₂) (x : M) : (f - g) x = f x - g x :=
rfl
#align continuous_linear_map.sub_apply ContinuousLinearMap.sub_apply
@[simp, norm_cast]
theorem coe_sub (f g : M →SL[σ₁₂] M₂) : (↑(f - g) : M →ₛₗ[σ₁₂] M₂) = f - g :=
rfl
#align continuous_linear_map.coe_sub ContinuousLinearMap.coe_sub
@[simp, norm_cast]
theorem coe_sub' (f g : M →SL[σ₁₂] M₂) : ⇑(f - g) = f - g :=
rfl
#align continuous_linear_map.coe_sub' ContinuousLinearMap.coe_sub'
end
@[simp]
theorem comp_neg [RingHomCompTriple σ₁₂ σ₂₃ σ₁₃] [TopologicalAddGroup M₂] [TopologicalAddGroup M₃]
(g : M₂ →SL[σ₂₃] M₃) (f : M →SL[σ₁₂] M₂) : g.comp (-f) = -g.comp f :=
by
ext
simp
#align continuous_linear_map.comp_neg ContinuousLinearMap.comp_neg
@[simp]
theorem neg_comp [RingHomCompTriple σ₁₂ σ₂₃ σ₁₃] [TopologicalAddGroup M₃] (g : M₂ →SL[σ₂₃] M₃)
(f : M →SL[σ₁₂] M₂) : (-g).comp f = -g.comp f :=
by
ext
simp
#align continuous_linear_map.neg_comp ContinuousLinearMap.neg_comp
@[simp]
theorem comp_sub [RingHomCompTriple σ₁₂ σ₂₃ σ₁₃] [TopologicalAddGroup M₂] [TopologicalAddGroup M₃]
(g : M₂ →SL[σ₂₃] M₃) (f₁ f₂ : M →SL[σ₁₂] M₂) : g.comp (f₁ - f₂) = g.comp f₁ - g.comp f₂ :=
by
ext
simp
#align continuous_linear_map.comp_sub ContinuousLinearMap.comp_sub
@[simp]
theorem sub_comp [RingHomCompTriple σ₁₂ σ₂₃ σ₁₃] [TopologicalAddGroup M₃] (g₁ g₂ : M₂ →SL[σ₂₃] M₃)
(f : M →SL[σ₁₂] M₂) : (g₁ - g₂).comp f = g₁.comp f - g₂.comp f :=
by
ext
simp
#align continuous_linear_map.sub_comp ContinuousLinearMap.sub_comp
instance [TopologicalAddGroup M] : Ring (M →L[R] M) :=
{ ContinuousLinearMap.semiring,
ContinuousLinearMap.addCommGroup with
mul := (· * ·)
one := 1 }
theorem smulRight_one_pow [TopologicalSpace R] [TopologicalRing R] (c : R) (n : ℕ) :
smulRight (1 : R →L[R] R) c ^ n = smulRight (1 : R →L[R] R) (c ^ n) :=
by
induction' n with n ihn
· ext
simp
· rw [pow_succ, ihn, mul_def, smul_right_comp, smul_eq_mul, pow_succ']
#align continuous_linear_map.smul_right_one_pow ContinuousLinearMap.smulRight_one_pow
section
variable {σ₂₁ : R₂ →+* R} [RingHomInvPair σ₁₂ σ₂₁]
/-- Given a right inverse `f₂ : M₂ →L[R] M` to `f₁ : M →L[R] M₂`,
`proj_ker_of_right_inverse f₁ f₂ h` is the projection `M →L[R] f₁.ker` along `f₂.range`. -/
def projKerOfRightInverse [TopologicalAddGroup M] (f₁ : M →SL[σ₁₂] M₂) (f₂ : M₂ →SL[σ₂₁] M)
(h : Function.RightInverse f₂ f₁) : M →L[R] LinearMap.ker f₁ :=
(id R M - f₂.comp f₁).codRestrict (LinearMap.ker f₁) fun x => by simp [h (f₁ x)]
#align continuous_linear_map.proj_ker_of_right_inverse ContinuousLinearMap.projKerOfRightInverse
@[simp]
theorem coe_projKerOfRightInverse_apply [TopologicalAddGroup M] (f₁ : M →SL[σ₁₂] M₂)
(f₂ : M₂ →SL[σ₂₁] M) (h : Function.RightInverse f₂ f₁) (x : M) :
(f₁.projKerOfRightInverse f₂ h x : M) = x - f₂ (f₁ x) :=
rfl
#align continuous_linear_map.coe_proj_ker_of_right_inverse_apply ContinuousLinearMap.coe_projKerOfRightInverse_apply
@[simp]
theorem projKerOfRightInverse_apply_idem [TopologicalAddGroup M] (f₁ : M →SL[σ₁₂] M₂)
(f₂ : M₂ →SL[σ₂₁] M) (h : Function.RightInverse f₂ f₁) (x : LinearMap.ker f₁) :
f₁.projKerOfRightInverse f₂ h x = x :=
Subtype.ext_iff_val.2 <| by simp
#align continuous_linear_map.proj_ker_of_right_inverse_apply_idem ContinuousLinearMap.projKerOfRightInverse_apply_idem
@[simp]
theorem projKerOfRightInverse_comp_inv [TopologicalAddGroup M] (f₁ : M →SL[σ₁₂] M₂)
(f₂ : M₂ →SL[σ₂₁] M) (h : Function.RightInverse f₂ f₁) (y : M₂) :
f₁.projKerOfRightInverse f₂ h (f₂ y) = 0 :=
Subtype.ext_iff_val.2 <| by simp [h y]
#align continuous_linear_map.proj_ker_of_right_inverse_comp_inv ContinuousLinearMap.projKerOfRightInverse_comp_inv
end
end Ring
section DivisionMonoid
variable {R M : Type _}
/-- A nonzero continuous linear functional is open. -/
protected theorem isOpenMap_of_ne_zero [TopologicalSpace R] [DivisionRing R] [ContinuousSub R]
[AddCommGroup M] [TopologicalSpace M] [ContinuousAdd M] [Module R M] [ContinuousSMul R M]
(f : M →L[R] R) (hf : f ≠ 0) : IsOpenMap f :=
let ⟨x, hx⟩ := exists_ne_zero hf
IsOpenMap.of_sections fun y =>
⟨fun a => y + (a - f y) • (f x)⁻¹ • x, Continuous.continuousAt <| by continuity, by simp,
fun a => by simp [hx]⟩
#align continuous_linear_map.is_open_map_of_ne_zero ContinuousLinearMap.isOpenMap_of_ne_zero
end DivisionMonoid
section SmulMonoid
-- The M's are used for semilinear maps, and the N's for plain linear maps
variable {R R₂ R₃ S S₃ : Type _} [Semiring R] [Semiring R₂] [Semiring R₃] [Monoid S] [Monoid S₃]
{M : Type _} [TopologicalSpace M] [AddCommMonoid M] [Module R M] {M₂ : Type _}
[TopologicalSpace M₂] [AddCommMonoid M₂] [Module R₂ M₂] {M₃ : Type _} [TopologicalSpace M₃]
[AddCommMonoid M₃] [Module R₃ M₃] {N₂ : Type _} [TopologicalSpace N₂] [AddCommMonoid N₂]
[Module R N₂] {N₃ : Type _} [TopologicalSpace N₃] [AddCommMonoid N₃] [Module R N₃]
[DistribMulAction S₃ M₃] [SMulCommClass R₃ S₃ M₃] [ContinuousConstSMul S₃ M₃]
[DistribMulAction S N₃] [SMulCommClass R S N₃] [ContinuousConstSMul S N₃] {σ₁₂ : R →+* R₂}
{σ₂₃ : R₂ →+* R₃} {σ₁₃ : R →+* R₃} [RingHomCompTriple σ₁₂ σ₂₃ σ₁₃]
include σ₁₃
@[simp]
theorem smul_comp (c : S₃) (h : M₂ →SL[σ₂₃] M₃) (f : M →SL[σ₁₂] M₂) :
(c • h).comp f = c • h.comp f :=
rfl
#align continuous_linear_map.smul_comp ContinuousLinearMap.smul_comp
omit σ₁₃
variable [DistribMulAction S₃ M₂] [ContinuousConstSMul S₃ M₂] [SMulCommClass R₂ S₃ M₂]
variable [DistribMulAction S N₂] [ContinuousConstSMul S N₂] [SMulCommClass R S N₂]
@[simp]
theorem comp_smul [LinearMap.CompatibleSMul N₂ N₃ S R] (hₗ : N₂ →L[R] N₃) (c : S)
(fₗ : M →L[R] N₂) : hₗ.comp (c • fₗ) = c • hₗ.comp fₗ :=
by
ext x
exact hₗ.map_smul_of_tower c (fₗ x)
#align continuous_linear_map.comp_smul ContinuousLinearMap.comp_smul
include σ₁₃
@[simp]
theorem comp_smulₛₗ [SMulCommClass R₂ R₂ M₂] [SMulCommClass R₃ R₃ M₃] [ContinuousConstSMul R₂ M₂]
[ContinuousConstSMul R₃ M₃] (h : M₂ →SL[σ₂₃] M₃) (c : R₂) (f : M →SL[σ₁₂] M₂) :
h.comp (c • f) = σ₂₃ c • h.comp f := by
ext x
simp only [coe_smul', coe_comp', Function.comp_apply, Pi.smul_apply,
ContinuousLinearMap.map_smulₛₗ]
#align continuous_linear_map.comp_smulₛₗ ContinuousLinearMap.comp_smulₛₗ
omit σ₁₃
instance [ContinuousAdd M₂] : DistribMulAction S₃ (M →SL[σ₁₂] M₂)
where
smul_add a f g := ext fun x => smul_add a (f x) (g x)
smul_zero a := ext fun x => smul_zero _
end SmulMonoid
section Smul
-- The M's are used for semilinear maps, and the N's for plain linear maps
variable {R R₂ R₃ S S₃ : Type _} [Semiring R] [Semiring R₂] [Semiring R₃] [Semiring S] [Semiring S₃]
{M : Type _} [TopologicalSpace M] [AddCommMonoid M] [Module R M] {M₂ : Type _}
[TopologicalSpace M₂] [AddCommMonoid M₂] [Module R₂ M₂] {M₃ : Type _} [TopologicalSpace M₃]
[AddCommMonoid M₃] [Module R₃ M₃] {N₂ : Type _} [TopologicalSpace N₂] [AddCommMonoid N₂]
[Module R N₂] {N₃ : Type _} [TopologicalSpace N₃] [AddCommMonoid N₃] [Module R N₃] [Module S₃ M₃]
[SMulCommClass R₃ S₃ M₃] [ContinuousConstSMul S₃ M₃] [Module S N₂] [ContinuousConstSMul S N₂]
[SMulCommClass R S N₂] [Module S N₃] [SMulCommClass R S N₃] [ContinuousConstSMul S N₃]
{σ₁₂ : R →+* R₂} {σ₂₃ : R₂ →+* R₃} {σ₁₃ : R →+* R₃} [RingHomCompTriple σ₁₂ σ₂₃ σ₁₃] (c : S)
(h : M₂ →SL[σ₂₃] M₃) (f g : M →SL[σ₁₂] M₂) (x y z : M)
/-- `continuous_linear_map.prod` as an `equiv`. -/
@[simps apply]
def prodEquiv : (M →L[R] N₂) × (M →L[R] N₃) ≃ (M →L[R] N₂ × N₃)
where
toFun f := f.1.Prod f.2
invFun f := ⟨(fst _ _ _).comp f, (snd _ _ _).comp f⟩
left_inv f := by ext <;> rfl
right_inv f := by ext <;> rfl
#align continuous_linear_map.prod_equiv ContinuousLinearMap.prodEquiv
theorem prod_ext_iff {f g : M × N₂ →L[R] N₃} :
f = g ↔ f.comp (inl _ _ _) = g.comp (inl _ _ _) ∧ f.comp (inr _ _ _) = g.comp (inr _ _ _) :=
by
simp only [← coe_inj, LinearMap.prod_ext_iff]
rfl
#align continuous_linear_map.prod_ext_iff ContinuousLinearMap.prod_ext_iff
@[ext]
theorem prod_ext {f g : M × N₂ →L[R] N₃} (hl : f.comp (inl _ _ _) = g.comp (inl _ _ _))
(hr : f.comp (inr _ _ _) = g.comp (inr _ _ _)) : f = g :=
prod_ext_iff.2 ⟨hl, hr⟩
#align continuous_linear_map.prod_ext ContinuousLinearMap.prod_ext
variable [ContinuousAdd M₂] [ContinuousAdd M₃] [ContinuousAdd N₂]
instance : Module S₃ (M →SL[σ₁₃] M₃)
where
zero_smul _ := ext fun _ => zero_smul _ _
add_smul _ _ _ := ext fun _ => add_smul _ _ _
instance [Module S₃ᵐᵒᵖ M₃] [IsCentralScalar S₃ M₃] : IsCentralScalar S₃ (M →SL[σ₁₃] M₃)
where op_smul_eq_smul _ _ := ext fun _ => op_smul_eq_smul _ _
variable (S) [ContinuousAdd N₃]
/-- `continuous_linear_map.prod` as a `linear_equiv`. -/
@[simps apply]
def prodₗ : ((M →L[R] N₂) × (M →L[R] N₃)) ≃ₗ[S] M →L[R] N₂ × N₃ :=
{ prodEquiv with
map_add' := fun f g => rfl
map_smul' := fun c f => rfl }
#align continuous_linear_map.prodₗ ContinuousLinearMap.prodₗ
/-- The coercion from `M →L[R] M₂` to `M →ₗ[R] M₂`, as a linear map. -/
@[simps]
def coeLm : (M →L[R] N₃) →ₗ[S] M →ₗ[R] N₃
where
toFun := coe
map_add' f g := coe_add f g
map_smul' c f := coe_smul c f
#align continuous_linear_map.coe_lm ContinuousLinearMap.coeLm
variable {S} (σ₁₃)
/-- The coercion from `M →SL[σ] M₂` to `M →ₛₗ[σ] M₂`, as a linear map. -/
@[simps]
def coeLmₛₗ : (M →SL[σ₁₃] M₃) →ₗ[S₃] M →ₛₗ[σ₁₃] M₃
where
toFun := coe
map_add' f g := coe_add f g
map_smul' c f := coe_smul c f
#align continuous_linear_map.coe_lmₛₗ ContinuousLinearMap.coeLmₛₗ
variable {σ₁₃}
end Smul
section SmulRightₗ
variable {R S T M M₂ : Type _} [Semiring R] [Semiring S] [Semiring T] [Module R S]
[AddCommMonoid M₂] [Module R M₂] [Module S M₂] [IsScalarTower R S M₂] [TopologicalSpace S]
[TopologicalSpace M₂] [ContinuousSMul S M₂] [TopologicalSpace M] [AddCommMonoid M] [Module R M]
[ContinuousAdd M₂] [Module T M₂] [ContinuousConstSMul T M₂] [SMulCommClass R T M₂]
[SMulCommClass S T M₂]
/-- Given `c : E →L[𝕜] 𝕜`, `c.smul_rightₗ` is the linear map from `F` to `E →L[𝕜] F`
sending `f` to `λ e, c e • f`. See also `continuous_linear_map.smul_rightL`. -/
def smulRightₗ (c : M →L[R] S) : M₂ →ₗ[T] M →L[R] M₂
where
toFun := c.smul_right
map_add' x y := by
ext e
apply smul_add
map_smul' a x := by
ext e
dsimp
apply smul_comm
#align continuous_linear_map.smul_rightₗ ContinuousLinearMap.smulRightₗ
@[simp]
theorem coe_smulRightₗ (c : M →L[R] S) : ⇑(smulRightₗ c : M₂ →ₗ[T] M →L[R] M₂) = c.smul_right :=
rfl
#align continuous_linear_map.coe_smul_rightₗ ContinuousLinearMap.coe_smulRightₗ
end SmulRightₗ
section CommRing
variable {R : Type _} [CommRing R] {M : Type _} [TopologicalSpace M] [AddCommGroup M] {M₂ : Type _}
[TopologicalSpace M₂] [AddCommGroup M₂] {M₃ : Type _} [TopologicalSpace M₃] [AddCommGroup M₃]
[Module R M] [Module R M₂] [Module R M₃] [ContinuousConstSMul R M₃]
variable [TopologicalAddGroup M₂] [ContinuousConstSMul R M₂]
instance : Algebra R (M₂ →L[R] M₂) :=
Algebra.ofModule smul_comp fun _ _ _ => comp_smul _ _ _
end CommRing
section RestrictScalars
variable {A M M₂ : Type _} [Ring A] [AddCommGroup M] [AddCommGroup M₂] [Module A M] [Module A M₂]
[TopologicalSpace M] [TopologicalSpace M₂] (R : Type _) [Ring R] [Module R M] [Module R M₂]
[LinearMap.CompatibleSMul M M₂ R A]
/-- If `A` is an `R`-algebra, then a continuous `A`-linear map can be interpreted as a continuous
`R`-linear map. We assume `linear_map.compatible_smul M M₂ R A` to match assumptions of
`linear_map.map_smul_of_tower`. -/
def restrictScalars (f : M →L[A] M₂) : M →L[R] M₂ :=
⟨(f : M →ₗ[A] M₂).restrictScalars R, f.Continuous⟩
#align continuous_linear_map.restrict_scalars ContinuousLinearMap.restrictScalars
variable {R}
@[simp, norm_cast]
theorem coe_restrictScalars (f : M →L[A] M₂) :
(f.restrictScalars R : M →ₗ[R] M₂) = (f : M →ₗ[A] M₂).restrictScalars R :=
rfl
#align continuous_linear_map.coe_restrict_scalars ContinuousLinearMap.coe_restrictScalars
@[simp]
theorem coe_restrict_scalars' (f : M →L[A] M₂) : ⇑(f.restrictScalars R) = f :=
rfl
#align continuous_linear_map.coe_restrict_scalars' ContinuousLinearMap.coe_restrict_scalars'
@[simp]
theorem restrictScalars_zero : (0 : M →L[A] M₂).restrictScalars R = 0 :=
rfl
#align continuous_linear_map.restrict_scalars_zero ContinuousLinearMap.restrictScalars_zero
section
variable [TopologicalAddGroup M₂]
@[simp]
theorem restrictScalars_add (f g : M →L[A] M₂) :
(f + g).restrictScalars R = f.restrictScalars R + g.restrictScalars R :=
rfl
#align continuous_linear_map.restrict_scalars_add ContinuousLinearMap.restrictScalars_add
@[simp]
theorem restrictScalars_neg (f : M →L[A] M₂) : (-f).restrictScalars R = -f.restrictScalars R :=
rfl
#align continuous_linear_map.restrict_scalars_neg ContinuousLinearMap.restrictScalars_neg
end
variable {S : Type _} [Ring S] [Module S M₂] [ContinuousConstSMul S M₂] [SMulCommClass A S M₂]
[SMulCommClass R S M₂]
@[simp]
theorem restrictScalars_smul (c : S) (f : M →L[A] M₂) :
(c • f).restrictScalars R = c • f.restrictScalars R :=
rfl
#align continuous_linear_map.restrict_scalars_smul ContinuousLinearMap.restrictScalars_smul
variable (A M M₂ R S) [TopologicalAddGroup M₂]
/-- `continuous_linear_map.restrict_scalars` as a `linear_map`. See also
`continuous_linear_map.restrict_scalarsL`. -/
def restrictScalarsₗ : (M →L[A] M₂) →ₗ[S] M →L[R] M₂
where
toFun := restrictScalars R
map_add' := restrictScalars_add
map_smul' := restrictScalars_smul
#align continuous_linear_map.restrict_scalarsₗ ContinuousLinearMap.restrictScalarsₗ
variable {A M M₂ R S}
@[simp]
theorem coe_restrictScalarsₗ : ⇑(restrictScalarsₗ A M M₂ R S) = restrictScalars R :=
rfl
#align continuous_linear_map.coe_restrict_scalarsₗ ContinuousLinearMap.coe_restrictScalarsₗ
end RestrictScalars
end ContinuousLinearMap
namespace ContinuousLinearEquiv
section AddCommMonoid
variable {R₁ : Type _} {R₂ : Type _} {R₃ : Type _} [Semiring R₁] [Semiring R₂] [Semiring R₃]
{σ₁₂ : R₁ →+* R₂} {σ₂₁ : R₂ →+* R₁} [RingHomInvPair σ₁₂ σ₂₁] [RingHomInvPair σ₂₁ σ₁₂]
{σ₂₃ : R₂ →+* R₃} {σ₃₂ : R₃ →+* R₂} [RingHomInvPair σ₂₃ σ₃₂] [RingHomInvPair σ₃₂ σ₂₃]
{σ₁₃ : R₁ →+* R₃} {σ₃₁ : R₃ →+* R₁} [RingHomInvPair σ₁₃ σ₃₁] [RingHomInvPair σ₃₁ σ₁₃]
[RingHomCompTriple σ₁₂ σ₂₃ σ₁₃] [RingHomCompTriple σ₃₂ σ₂₁ σ₃₁] {M₁ : Type _}
[TopologicalSpace M₁] [AddCommMonoid M₁] {M'₁ : Type _} [TopologicalSpace M'₁] [AddCommMonoid M'₁]
{M₂ : Type _} [TopologicalSpace M₂] [AddCommMonoid M₂] {M₃ : Type _} [TopologicalSpace M₃]
[AddCommMonoid M₃] {M₄ : Type _} [TopologicalSpace M₄] [AddCommMonoid M₄] [Module R₁ M₁]
[Module R₁ M'₁] [Module R₂ M₂] [Module R₃ M₃]
include σ₂₁
/-- A continuous linear equivalence induces a continuous linear map. -/
def toContinuousLinearMap (e : M₁ ≃SL[σ₁₂] M₂) : M₁ →SL[σ₁₂] M₂ :=
{ e.toLinearEquiv.toLinearMap with cont := e.continuous_toFun }
#align continuous_linear_equiv.to_continuous_linear_map ContinuousLinearEquiv.toContinuousLinearMap
/-- Coerce continuous linear equivs to continuous linear maps. -/
instance : Coe (M₁ ≃SL[σ₁₂] M₂) (M₁ →SL[σ₁₂] M₂) :=
⟨toContinuousLinearMap⟩
instance : ContinuousSemilinearEquivClass (M₁ ≃SL[σ₁₂] M₂) σ₁₂ M₁ M₂
where
coe f := f
inv f := f.invFun
coe_injective' f g h₁ h₂ := by
cases' f with f' _
cases' g with g' _
cases f'
cases g'
congr
left_inv f := f.left_inv
right_inv f := f.right_inv
map_add f := f.map_add'
map_smulₛₗ f := f.map_smul'
map_continuous := continuous_toFun
inv_continuous := continuous_invFun
-- see Note [function coercion]
/-- Coerce continuous linear equivs to maps. -/
instance : CoeFun (M₁ ≃SL[σ₁₂] M₂) fun _ => M₁ → M₂ :=
⟨fun f => f⟩
@[simp]
theorem coe_def_rev (e : M₁ ≃SL[σ₁₂] M₂) : e.toContinuousLinearMap = e :=
rfl
#align continuous_linear_equiv.coe_def_rev ContinuousLinearEquiv.coe_def_rev
theorem coe_apply (e : M₁ ≃SL[σ₁₂] M₂) (b : M₁) : (e : M₁ →SL[σ₁₂] M₂) b = e b :=
rfl
#align continuous_linear_equiv.coe_apply ContinuousLinearEquiv.coe_apply
@[simp]
theorem coe_toLinearEquiv (f : M₁ ≃SL[σ₁₂] M₂) : ⇑f.toLinearEquiv = f :=
rfl
#align continuous_linear_equiv.coe_to_linear_equiv ContinuousLinearEquiv.coe_toLinearEquiv
@[simp, norm_cast]
theorem coe_coe (e : M₁ ≃SL[σ₁₂] M₂) : ⇑(e : M₁ →SL[σ₁₂] M₂) = e :=
rfl
#align continuous_linear_equiv.coe_coe ContinuousLinearEquiv.coe_coe
theorem toLinearEquiv_injective :
Function.Injective (toLinearEquiv : (M₁ ≃SL[σ₁₂] M₂) → M₁ ≃ₛₗ[σ₁₂] M₂)
| ⟨e, _, _⟩, ⟨e', _, _⟩, rfl => rfl
#align continuous_linear_equiv.to_linear_equiv_injective ContinuousLinearEquiv.toLinearEquiv_injective
@[ext]
theorem ext {f g : M₁ ≃SL[σ₁₂] M₂} (h : (f : M₁ → M₂) = g) : f = g :=
toLinearEquiv_injective <| LinearEquiv.ext <| congr_fun h
#align continuous_linear_equiv.ext ContinuousLinearEquiv.ext
theorem coe_injective : Function.Injective (coe : (M₁ ≃SL[σ₁₂] M₂) → M₁ →SL[σ₁₂] M₂) :=
fun e e' h => ext <| funext <| ContinuousLinearMap.ext_iff.1 h
#align continuous_linear_equiv.coe_injective ContinuousLinearEquiv.coe_injective
@[simp, norm_cast]
theorem coe_inj {e e' : M₁ ≃SL[σ₁₂] M₂} : (e : M₁ →SL[σ₁₂] M₂) = e' ↔ e = e' :=
coe_injective.eq_iff
#align continuous_linear_equiv.coe_inj ContinuousLinearEquiv.coe_inj
/-- A continuous linear equivalence induces a homeomorphism. -/
def toHomeomorph (e : M₁ ≃SL[σ₁₂] M₂) : M₁ ≃ₜ M₂ :=
{ e with toEquiv := e.toLinearEquiv.toEquiv }
#align continuous_linear_equiv.to_homeomorph ContinuousLinearEquiv.toHomeomorph
@[simp]
theorem coe_toHomeomorph (e : M₁ ≃SL[σ₁₂] M₂) : ⇑e.toHomeomorph = e :=
rfl
#align continuous_linear_equiv.coe_to_homeomorph ContinuousLinearEquiv.coe_toHomeomorph
theorem image_closure (e : M₁ ≃SL[σ₁₂] M₂) (s : Set M₁) : e '' closure s = closure (e '' s) :=
e.toHomeomorph.image_closure s
#align continuous_linear_equiv.image_closure ContinuousLinearEquiv.image_closure
theorem preimage_closure (e : M₁ ≃SL[σ₁₂] M₂) (s : Set M₂) : e ⁻¹' closure s = closure (e ⁻¹' s) :=
e.toHomeomorph.preimage_closure s
#align continuous_linear_equiv.preimage_closure ContinuousLinearEquiv.preimage_closure
@[simp]
theorem isClosed_image (e : M₁ ≃SL[σ₁₂] M₂) {s : Set M₁} : IsClosed (e '' s) ↔ IsClosed s :=
e.toHomeomorph.isClosed_image
#align continuous_linear_equiv.is_closed_image ContinuousLinearEquiv.isClosed_image
theorem map_nhds_eq (e : M₁ ≃SL[σ₁₂] M₂) (x : M₁) : map e (𝓝 x) = 𝓝 (e x) :=
e.toHomeomorph.map_nhds_eq x
#align continuous_linear_equiv.map_nhds_eq ContinuousLinearEquiv.map_nhds_eq
-- Make some straightforward lemmas available to `simp`.
@[simp]
theorem map_zero (e : M₁ ≃SL[σ₁₂] M₂) : e (0 : M₁) = 0 :=
(e : M₁ →SL[σ₁₂] M₂).map_zero
#align continuous_linear_equiv.map_zero ContinuousLinearEquiv.map_zero
@[simp]
theorem map_add (e : M₁ ≃SL[σ₁₂] M₂) (x y : M₁) : e (x + y) = e x + e y :=
(e : M₁ →SL[σ₁₂] M₂).map_add x y
#align continuous_linear_equiv.map_add ContinuousLinearEquiv.map_add
@[simp]
theorem map_smulₛₗ (e : M₁ ≃SL[σ₁₂] M₂) (c : R₁) (x : M₁) : e (c • x) = σ₁₂ c • e x :=
(e : M₁ →SL[σ₁₂] M₂).map_smulₛₗ c x
#align continuous_linear_equiv.map_smulₛₗ ContinuousLinearEquiv.map_smulₛₗ
omit σ₂₁
@[simp]
theorem map_smul [Module R₁ M₂] (e : M₁ ≃L[R₁] M₂) (c : R₁) (x : M₁) : e (c • x) = c • e x :=
(e : M₁ →L[R₁] M₂).map_smul c x
#align continuous_linear_equiv.map_smul ContinuousLinearEquiv.map_smul
include σ₂₁
@[simp]
theorem map_eq_zero_iff (e : M₁ ≃SL[σ₁₂] M₂) {x : M₁} : e x = 0 ↔ x = 0 :=
e.toLinearEquiv.map_eq_zero_iff
#align continuous_linear_equiv.map_eq_zero_iff ContinuousLinearEquiv.map_eq_zero_iff
attribute [continuity]
ContinuousLinearEquiv.continuous_toFun ContinuousLinearEquiv.continuous_invFun
@[continuity]
protected theorem continuous (e : M₁ ≃SL[σ₁₂] M₂) : Continuous (e : M₁ → M₂) :=
e.continuous_toFun
#align continuous_linear_equiv.continuous ContinuousLinearEquiv.continuous
protected theorem continuousOn (e : M₁ ≃SL[σ₁₂] M₂) {s : Set M₁} : ContinuousOn (e : M₁ → M₂) s :=
e.Continuous.ContinuousOn
#align continuous_linear_equiv.continuous_on ContinuousLinearEquiv.continuousOn
protected theorem continuousAt (e : M₁ ≃SL[σ₁₂] M₂) {x : M₁} : ContinuousAt (e : M₁ → M₂) x :=
e.Continuous.ContinuousAt
#align continuous_linear_equiv.continuous_at ContinuousLinearEquiv.continuousAt
protected theorem continuousWithinAt (e : M₁ ≃SL[σ₁₂] M₂) {s : Set M₁} {x : M₁} :
ContinuousWithinAt (e : M₁ → M₂) s x :=
e.Continuous.ContinuousWithinAt
#align continuous_linear_equiv.continuous_within_at ContinuousLinearEquiv.continuousWithinAt
theorem comp_continuousOn_iff {α : Type _} [TopologicalSpace α] (e : M₁ ≃SL[σ₁₂] M₂) {f : α → M₁}
{s : Set α} : ContinuousOn (e ∘ f) s ↔ ContinuousOn f s :=
e.toHomeomorph.comp_continuousOn_iff _ _
#align continuous_linear_equiv.comp_continuous_on_iff ContinuousLinearEquiv.comp_continuousOn_iff
theorem comp_continuous_iff {α : Type _} [TopologicalSpace α] (e : M₁ ≃SL[σ₁₂] M₂) {f : α → M₁} :
Continuous (e ∘ f) ↔ Continuous f :=
e.toHomeomorph.comp_continuous_iff
#align continuous_linear_equiv.comp_continuous_iff ContinuousLinearEquiv.comp_continuous_iff
omit σ₂₁
/-- An extensionality lemma for `R ≃L[R] M`. -/
theorem ext₁ [TopologicalSpace R₁] {f g : R₁ ≃L[R₁] M₁} (h : f 1 = g 1) : f = g :=
ext <| funext fun x => mul_one x ▸ by rw [← smul_eq_mul, map_smul, h, map_smul]
#align continuous_linear_equiv.ext₁ ContinuousLinearEquiv.ext₁
section
variable (R₁ M₁)
/-- The identity map as a continuous linear equivalence. -/
@[refl]
protected def refl : M₁ ≃L[R₁] M₁ :=
{ LinearEquiv.refl R₁ M₁ with
continuous_toFun := continuous_id
continuous_invFun := continuous_id }
#align continuous_linear_equiv.refl ContinuousLinearEquiv.refl
end
@[simp, norm_cast]
theorem coe_refl : ↑(ContinuousLinearEquiv.refl R₁ M₁) = ContinuousLinearMap.id R₁ M₁ :=
rfl
#align continuous_linear_equiv.coe_refl ContinuousLinearEquiv.coe_refl
@[simp, norm_cast]
theorem coe_refl' : ⇑(ContinuousLinearEquiv.refl R₁ M₁) = id :=
rfl
#align continuous_linear_equiv.coe_refl' ContinuousLinearEquiv.coe_refl'
/-- The inverse of a continuous linear equivalence as a continuous linear equivalence-/
@[symm]
protected def symm (e : M₁ ≃SL[σ₁₂] M₂) : M₂ ≃SL[σ₂₁] M₁ :=
{ e.toLinearEquiv.symm with
continuous_toFun := e.continuous_invFun
continuous_invFun := e.continuous_toFun }
#align continuous_linear_equiv.symm ContinuousLinearEquiv.symm
include σ₂₁
@[simp]
theorem symm_toLinearEquiv (e : M₁ ≃SL[σ₁₂] M₂) : e.symm.toLinearEquiv = e.toLinearEquiv.symm :=
by
ext
rfl
#align continuous_linear_equiv.symm_to_linear_equiv ContinuousLinearEquiv.symm_toLinearEquiv
@[simp]
theorem symm_toHomeomorph (e : M₁ ≃SL[σ₁₂] M₂) : e.toHomeomorph.symm = e.symm.toHomeomorph :=
rfl
#align continuous_linear_equiv.symm_to_homeomorph ContinuousLinearEquiv.symm_toHomeomorph
/-- See Note [custom simps projection]. We need to specify this projection explicitly in this case,
because it is a composition of multiple projections. -/
def Simps.apply (h : M₁ ≃SL[σ₁₂] M₂) : M₁ → M₂ :=
h
#align continuous_linear_equiv.simps.apply ContinuousLinearEquiv.Simps.apply
/-- See Note [custom simps projection] -/
def Simps.symmApply (h : M₁ ≃SL[σ₁₂] M₂) : M₂ → M₁ :=
h.symm
#align continuous_linear_equiv.simps.symm_apply ContinuousLinearEquiv.Simps.symmApply
initialize_simps_projections ContinuousLinearEquiv (to_linear_equiv_to_fun → apply,
to_linear_equiv_inv_fun → symm_apply)
theorem symm_map_nhds_eq (e : M₁ ≃SL[σ₁₂] M₂) (x : M₁) : map e.symm (𝓝 (e x)) = 𝓝 x :=
e.toHomeomorph.symm_map_nhds_eq x
#align continuous_linear_equiv.symm_map_nhds_eq ContinuousLinearEquiv.symm_map_nhds_eq
omit σ₂₁
include σ₂₁ σ₃₂ σ₃₁
/-- The composition of two continuous linear equivalences as a continuous linear equivalence. -/
@[trans]
protected def trans (e₁ : M₁ ≃SL[σ₁₂] M₂) (e₂ : M₂ ≃SL[σ₂₃] M₃) : M₁ ≃SL[σ₁₃] M₃ :=
{
e₁.toLinearEquiv.trans
e₂.toLinearEquiv with
continuous_toFun := e₂.continuous_toFun.comp e₁.continuous_toFun
continuous_invFun := e₁.continuous_invFun.comp e₂.continuous_invFun }
#align continuous_linear_equiv.trans ContinuousLinearEquiv.trans
include σ₁₃
@[simp]
theorem trans_toLinearEquiv (e₁ : M₁ ≃SL[σ₁₂] M₂) (e₂ : M₂ ≃SL[σ₂₃] M₃) :
(e₁.trans e₂).toLinearEquiv = e₁.toLinearEquiv.trans e₂.toLinearEquiv :=
by
ext
rfl
#align continuous_linear_equiv.trans_to_linear_equiv ContinuousLinearEquiv.trans_toLinearEquiv
omit σ₁₃ σ₂₁ σ₃₂ σ₃₁
/-- Product of two continuous linear equivalences. The map comes from `equiv.prod_congr`. -/
def prod [Module R₁ M₂] [Module R₁ M₃] [Module R₁ M₄] (e : M₁ ≃L[R₁] M₂) (e' : M₃ ≃L[R₁] M₄) :
(M₁ × M₃) ≃L[R₁] M₂ × M₄ :=
{
e.toLinearEquiv.Prod
e'.toLinearEquiv with
continuous_toFun := e.continuous_toFun.Prod_map e'.continuous_toFun
continuous_invFun := e.continuous_invFun.Prod_map e'.continuous_invFun }
#align continuous_linear_equiv.prod ContinuousLinearEquiv.prod
@[simp, norm_cast]
theorem prod_apply [Module R₁ M₂] [Module R₁ M₃] [Module R₁ M₄] (e : M₁ ≃L[R₁] M₂)
(e' : M₃ ≃L[R₁] M₄) (x) : e.Prod e' x = (e x.1, e' x.2) :=
rfl
#align continuous_linear_equiv.prod_apply ContinuousLinearEquiv.prod_apply
@[simp, norm_cast]
theorem coe_prod [Module R₁ M₂] [Module R₁ M₃] [Module R₁ M₄] (e : M₁ ≃L[R₁] M₂)
(e' : M₃ ≃L[R₁] M₄) :
(e.Prod e' : M₁ × M₃ →L[R₁] M₂ × M₄) = (e : M₁ →L[R₁] M₂).Prod_map (e' : M₃ →L[R₁] M₄) :=
rfl
#align continuous_linear_equiv.coe_prod ContinuousLinearEquiv.coe_prod
theorem prod_symm [Module R₁ M₂] [Module R₁ M₃] [Module R₁ M₄] (e : M₁ ≃L[R₁] M₂)
(e' : M₃ ≃L[R₁] M₄) : (e.Prod e').symm = e.symm.Prod e'.symm :=
rfl
#align continuous_linear_equiv.prod_symm ContinuousLinearEquiv.prod_symm
include σ₂₁
protected theorem bijective (e : M₁ ≃SL[σ₁₂] M₂) : Function.Bijective e :=
e.toLinearEquiv.toEquiv.Bijective
#align continuous_linear_equiv.bijective ContinuousLinearEquiv.bijective
protected theorem injective (e : M₁ ≃SL[σ₁₂] M₂) : Function.Injective e :=
e.toLinearEquiv.toEquiv.Injective
#align continuous_linear_equiv.injective ContinuousLinearEquiv.injective
protected theorem surjective (e : M₁ ≃SL[σ₁₂] M₂) : Function.Surjective e :=
e.toLinearEquiv.toEquiv.Surjective
#align continuous_linear_equiv.surjective ContinuousLinearEquiv.surjective
include σ₃₂ σ₃₁ σ₁₃
@[simp]
theorem trans_apply (e₁ : M₁ ≃SL[σ₁₂] M₂) (e₂ : M₂ ≃SL[σ₂₃] M₃) (c : M₁) :
(e₁.trans e₂) c = e₂ (e₁ c) :=
rfl
#align continuous_linear_equiv.trans_apply ContinuousLinearEquiv.trans_apply
omit σ₃₂ σ₃₁ σ₁₃
@[simp]
theorem apply_symm_apply (e : M₁ ≃SL[σ₁₂] M₂) (c : M₂) : e (e.symm c) = c :=
e.1.right_inv c
#align continuous_linear_equiv.apply_symm_apply ContinuousLinearEquiv.apply_symm_apply
@[simp]
theorem symm_apply_apply (e : M₁ ≃SL[σ₁₂] M₂) (b : M₁) : e.symm (e b) = b :=
e.1.left_inv b
#align continuous_linear_equiv.symm_apply_apply ContinuousLinearEquiv.symm_apply_apply
include σ₁₂ σ₂₃ σ₁₃ σ₃₁
@[simp]
theorem symm_trans_apply (e₁ : M₂ ≃SL[σ₂₁] M₁) (e₂ : M₃ ≃SL[σ₃₂] M₂) (c : M₁) :
(e₂.trans e₁).symm c = e₂.symm (e₁.symm c) :=
rfl
#align continuous_linear_equiv.symm_trans_apply ContinuousLinearEquiv.symm_trans_apply
omit σ₁₂ σ₂₃ σ₁₃ σ₃₁
@[simp]
theorem symm_image_image (e : M₁ ≃SL[σ₁₂] M₂) (s : Set M₁) : e.symm '' (e '' s) = s :=
e.toLinearEquiv.toEquiv.symm_image_image s
#align continuous_linear_equiv.symm_image_image ContinuousLinearEquiv.symm_image_image
@[simp]
theorem image_symm_image (e : M₁ ≃SL[σ₁₂] M₂) (s : Set M₂) : e '' (e.symm '' s) = s :=
e.symm.symm_image_image s
#align continuous_linear_equiv.image_symm_image ContinuousLinearEquiv.image_symm_image
include σ₃₂ σ₃₁
@[simp, norm_cast]
theorem comp_coe (f : M₁ ≃SL[σ₁₂] M₂) (f' : M₂ ≃SL[σ₂₃] M₃) :
(f' : M₂ →SL[σ₂₃] M₃).comp (f : M₁ →SL[σ₁₂] M₂) = (f.trans f' : M₁ →SL[σ₁₃] M₃) :=
rfl
#align continuous_linear_equiv.comp_coe ContinuousLinearEquiv.comp_coe
omit σ₃₂ σ₃₁ σ₂₁
@[simp]
theorem coe_comp_coe_symm (e : M₁ ≃SL[σ₁₂] M₂) :
(e : M₁ →SL[σ₁₂] M₂).comp (e.symm : M₂ →SL[σ₂₁] M₁) = ContinuousLinearMap.id R₂ M₂ :=
ContinuousLinearMap.ext e.apply_symm_apply
#align continuous_linear_equiv.coe_comp_coe_symm ContinuousLinearEquiv.coe_comp_coe_symm
@[simp]
theorem coe_symm_comp_coe (e : M₁ ≃SL[σ₁₂] M₂) :
(e.symm : M₂ →SL[σ₂₁] M₁).comp (e : M₁ →SL[σ₁₂] M₂) = ContinuousLinearMap.id R₁ M₁ :=
ContinuousLinearMap.ext e.symm_apply_apply
#align continuous_linear_equiv.coe_symm_comp_coe ContinuousLinearEquiv.coe_symm_comp_coe
include σ₂₁
@[simp]
theorem symm_comp_self (e : M₁ ≃SL[σ₁₂] M₂) : (e.symm : M₂ → M₁) ∘ (e : M₁ → M₂) = id :=
by
ext x
exact symm_apply_apply e x
#align continuous_linear_equiv.symm_comp_self ContinuousLinearEquiv.symm_comp_self
@[simp]
theorem self_comp_symm (e : M₁ ≃SL[σ₁₂] M₂) : (e : M₁ → M₂) ∘ (e.symm : M₂ → M₁) = id :=
by
ext x
exact apply_symm_apply e x
#align continuous_linear_equiv.self_comp_symm ContinuousLinearEquiv.self_comp_symm
@[simp]
theorem symm_symm (e : M₁ ≃SL[σ₁₂] M₂) : e.symm.symm = e :=
by
ext x
rfl
#align continuous_linear_equiv.symm_symm ContinuousLinearEquiv.symm_symm
omit σ₂₁
@[simp]
theorem refl_symm : (ContinuousLinearEquiv.refl R₁ M₁).symm = ContinuousLinearEquiv.refl R₁ M₁ :=
rfl
#align continuous_linear_equiv.refl_symm ContinuousLinearEquiv.refl_symm
include σ₂₁
theorem symm_symm_apply (e : M₁ ≃SL[σ₁₂] M₂) (x : M₁) : e.symm.symm x = e x :=
rfl
#align continuous_linear_equiv.symm_symm_apply ContinuousLinearEquiv.symm_symm_apply
theorem symm_apply_eq (e : M₁ ≃SL[σ₁₂] M₂) {x y} : e.symm x = y ↔ x = e y :=
e.toLinearEquiv.symm_apply_eq
#align continuous_linear_equiv.symm_apply_eq ContinuousLinearEquiv.symm_apply_eq
theorem eq_symm_apply (e : M₁ ≃SL[σ₁₂] M₂) {x y} : y = e.symm x ↔ e y = x :=
e.toLinearEquiv.eq_symm_apply
#align continuous_linear_equiv.eq_symm_apply ContinuousLinearEquiv.eq_symm_apply
protected theorem image_eq_preimage (e : M₁ ≃SL[σ₁₂] M₂) (s : Set M₁) : e '' s = e.symm ⁻¹' s :=
e.toLinearEquiv.toEquiv.image_eq_preimage s
#align continuous_linear_equiv.image_eq_preimage ContinuousLinearEquiv.image_eq_preimage
protected theorem image_symm_eq_preimage (e : M₁ ≃SL[σ₁₂] M₂) (s : Set M₂) :
e.symm '' s = e ⁻¹' s := by rw [e.symm.image_eq_preimage, e.symm_symm]
#align continuous_linear_equiv.image_symm_eq_preimage ContinuousLinearEquiv.image_symm_eq_preimage
@[simp]
protected theorem symm_preimage_preimage (e : M₁ ≃SL[σ₁₂] M₂) (s : Set M₂) :
e.symm ⁻¹' (e ⁻¹' s) = s :=
e.toLinearEquiv.toEquiv.symm_preimage_preimage s
#align continuous_linear_equiv.symm_preimage_preimage ContinuousLinearEquiv.symm_preimage_preimage
@[simp]
protected theorem preimage_symm_preimage (e : M₁ ≃SL[σ₁₂] M₂) (s : Set M₁) :
e ⁻¹' (e.symm ⁻¹' s) = s :=
e.symm.symm_preimage_preimage s
#align continuous_linear_equiv.preimage_symm_preimage ContinuousLinearEquiv.preimage_symm_preimage
protected theorem uniformEmbedding {E₁ E₂ : Type _} [UniformSpace E₁] [UniformSpace E₂]
[AddCommGroup E₁] [AddCommGroup E₂] [Module R₁ E₁] [Module R₂ E₂] [UniformAddGroup E₁]
[UniformAddGroup E₂] (e : E₁ ≃SL[σ₁₂] E₂) : UniformEmbedding e :=
e.toLinearEquiv.toEquiv.UniformEmbedding e.toContinuousLinearMap.UniformContinuous
e.symm.toContinuousLinearMap.UniformContinuous
#align continuous_linear_equiv.uniform_embedding ContinuousLinearEquiv.uniformEmbedding
protected theorem LinearEquiv.uniformEmbedding {E₁ E₂ : Type _} [UniformSpace E₁] [UniformSpace E₂]
[AddCommGroup E₁] [AddCommGroup E₂] [Module R₁ E₁] [Module R₂ E₂] [UniformAddGroup E₁]
[UniformAddGroup E₂] (e : E₁ ≃ₛₗ[σ₁₂] E₂) (h₁ : Continuous e) (h₂ : Continuous e.symm) :
UniformEmbedding e :=
ContinuousLinearEquiv.uniformEmbedding
({ e with
continuous_toFun := h₁
continuous_invFun := h₂ } :
E₁ ≃SL[σ₁₂] E₂)
#align linear_equiv.uniform_embedding LinearEquiv.uniformEmbedding
omit σ₂₁
/-- Create a `continuous_linear_equiv` from two `continuous_linear_map`s that are
inverse of each other. -/
def equivOfInverse (f₁ : M₁ →SL[σ₁₂] M₂) (f₂ : M₂ →SL[σ₂₁] M₁) (h₁ : Function.LeftInverse f₂ f₁)
(h₂ : Function.RightInverse f₂ f₁) : M₁ ≃SL[σ₁₂] M₂ :=
{ f₁ with
toFun := f₁
continuous_toFun := f₁.Continuous
invFun := f₂
continuous_invFun := f₂.Continuous
left_inv := h₁
right_inv := h₂ }
#align continuous_linear_equiv.equiv_of_inverse ContinuousLinearEquiv.equivOfInverse
include σ₂₁
@[simp]
theorem equivOfInverse_apply (f₁ : M₁ →SL[σ₁₂] M₂) (f₂ h₁ h₂ x) :
equivOfInverse f₁ f₂ h₁ h₂ x = f₁ x :=
rfl
#align continuous_linear_equiv.equiv_of_inverse_apply ContinuousLinearEquiv.equivOfInverse_apply
@[simp]
theorem symm_equivOfInverse (f₁ : M₁ →SL[σ₁₂] M₂) (f₂ h₁ h₂) :
(equivOfInverse f₁ f₂ h₁ h₂).symm = equivOfInverse f₂ f₁ h₂ h₁ :=
rfl
#align continuous_linear_equiv.symm_equiv_of_inverse ContinuousLinearEquiv.symm_equivOfInverse
omit σ₂₁
variable (M₁)
/-- The continuous linear equivalences from `M` to itself form a group under composition. -/
instance automorphismGroup : Group (M₁ ≃L[R₁] M₁)
where
mul f g := g.trans f
one := ContinuousLinearEquiv.refl R₁ M₁
inv f := f.symm
mul_assoc f g h := by
ext
rfl
mul_one f := by
ext
rfl
one_mul f := by
ext
rfl
mul_left_inv f := by
ext
exact f.left_inv x
#align continuous_linear_equiv.automorphism_group ContinuousLinearEquiv.automorphismGroup
variable {M₁} {R₄ : Type _} [Semiring R₄] [Module R₄ M₄] {σ₃₄ : R₃ →+* R₄} {σ₄₃ : R₄ →+* R₃}
[RingHomInvPair σ₃₄ σ₄₃] [RingHomInvPair σ₄₃ σ₃₄] {σ₂₄ : R₂ →+* R₄} {σ₁₄ : R₁ →+* R₄}
[RingHomCompTriple σ₂₁ σ₁₄ σ₂₄] [RingHomCompTriple σ₂₄ σ₄₃ σ₂₃] [RingHomCompTriple σ₁₃ σ₃₄ σ₁₄]
/-- The continuous linear equivalence between `ulift M₁` and `M₁`. -/
def ulift : ULift M₁ ≃L[R₁] M₁ :=
{ Equiv.ulift with
map_add' := fun x y => rfl
map_smul' := fun c x => rfl
continuous_toFun := continuous_uLift_down
continuous_invFun := continuous_uLift_up }
#align continuous_linear_equiv.ulift ContinuousLinearEquiv.ulift
include σ₂₁ σ₃₄ σ₂₃ σ₂₄ σ₁₃
/-- A pair of continuous (semi)linear equivalences generates an equivalence between the spaces of
continuous linear maps. See also `continuous_linear_equiv.arrow_congr`. -/
@[simps]
def arrowCongrEquiv (e₁₂ : M₁ ≃SL[σ₁₂] M₂) (e₄₃ : M₄ ≃SL[σ₄₃] M₃) :
(M₁ →SL[σ₁₄] M₄) ≃ (M₂ →SL[σ₂₃] M₃)
where
toFun f := (e₄₃ : M₄ →SL[σ₄₃] M₃).comp (f.comp (e₁₂.symm : M₂ →SL[σ₂₁] M₁))
invFun f := (e₄₃.symm : M₃ →SL[σ₃₄] M₄).comp (f.comp (e₁₂ : M₁ →SL[σ₁₂] M₂))
left_inv f :=
ContinuousLinearMap.ext fun x => by
simp only [ContinuousLinearMap.comp_apply, symm_apply_apply, coe_coe]
right_inv f :=
ContinuousLinearMap.ext fun x => by
simp only [ContinuousLinearMap.comp_apply, apply_symm_apply, coe_coe]
#align continuous_linear_equiv.arrow_congr_equiv ContinuousLinearEquiv.arrowCongrEquiv
end AddCommMonoid
section AddCommGroup
variable {R : Type _} [Semiring R] {M : Type _} [TopologicalSpace M] [AddCommGroup M] {M₂ : Type _}
[TopologicalSpace M₂] [AddCommGroup M₂] {M₃ : Type _} [TopologicalSpace M₃] [AddCommGroup M₃]
{M₄ : Type _} [TopologicalSpace M₄] [AddCommGroup M₄] [Module R M] [Module R M₂] [Module R M₃]
[Module R M₄]
variable [TopologicalAddGroup M₄]
/-- Equivalence given by a block lower diagonal matrix. `e` and `e'` are diagonal square blocks,
and `f` is a rectangular block below the diagonal. -/
def skewProd (e : M ≃L[R] M₂) (e' : M₃ ≃L[R] M₄) (f : M →L[R] M₄) : (M × M₃) ≃L[R] M₂ × M₄ :=
{
e.toLinearEquiv.skewProd e'.toLinearEquiv
↑f with
continuous_toFun :=
(e.continuous_toFun.comp continuous_fst).prod_mk
((e'.continuous_toFun.comp continuous_snd).add <| f.Continuous.comp continuous_fst)
continuous_invFun :=
(e.continuous_invFun.comp continuous_fst).prod_mk
(e'.continuous_invFun.comp <|
continuous_snd.sub <| f.Continuous.comp <| e.continuous_invFun.comp continuous_fst) }
#align continuous_linear_equiv.skew_prod ContinuousLinearEquiv.skewProd
@[simp]
theorem skewProd_apply (e : M ≃L[R] M₂) (e' : M₃ ≃L[R] M₄) (f : M →L[R] M₄) (x) :
e.skewProd e' f x = (e x.1, e' x.2 + f x.1) :=
rfl
#align continuous_linear_equiv.skew_prod_apply ContinuousLinearEquiv.skewProd_apply
@[simp]
theorem skewProd_symm_apply (e : M ≃L[R] M₂) (e' : M₃ ≃L[R] M₄) (f : M →L[R] M₄) (x) :
(e.skewProd e' f).symm x = (e.symm x.1, e'.symm (x.2 - f (e.symm x.1))) :=
rfl
#align continuous_linear_equiv.skew_prod_symm_apply ContinuousLinearEquiv.skewProd_symm_apply
end AddCommGroup
section Ring
variable {R : Type _} [Ring R] {R₂ : Type _} [Ring R₂] {M : Type _} [TopologicalSpace M]
[AddCommGroup M] [Module R M] {M₂ : Type _} [TopologicalSpace M₂] [AddCommGroup M₂] [Module R₂ M₂]
variable {σ₁₂ : R →+* R₂} {σ₂₁ : R₂ →+* R} [RingHomInvPair σ₁₂ σ₂₁] [RingHomInvPair σ₂₁ σ₁₂]
include σ₂₁
@[simp]
theorem map_sub (e : M ≃SL[σ₁₂] M₂) (x y : M) : e (x - y) = e x - e y :=
(e : M →SL[σ₁₂] M₂).map_sub x y
#align continuous_linear_equiv.map_sub ContinuousLinearEquiv.map_sub
@[simp]
theorem map_neg (e : M ≃SL[σ₁₂] M₂) (x : M) : e (-x) = -e x :=
(e : M →SL[σ₁₂] M₂).map_neg x
#align continuous_linear_equiv.map_neg ContinuousLinearEquiv.map_neg
omit σ₂₁
section
/-! The next theorems cover the identification between `M ≃L[𝕜] M`and the group of units of the ring
`M →L[R] M`. -/
variable [TopologicalAddGroup M]
/-- An invertible continuous linear map `f` determines a continuous equivalence from `M` to itself.
-/
def ofUnit (f : (M →L[R] M)ˣ) : M ≃L[R] M
where
toLinearEquiv :=
{ toFun := f.val
map_add' := by simp
map_smul' := by simp
invFun := f.inv
left_inv := fun x =>
show (f.inv * f.val) x = x by
rw [f.inv_val]
simp
right_inv := fun x =>
show (f.val * f.inv) x = x by
rw [f.val_inv]
simp }
continuous_toFun := f.val.Continuous
continuous_invFun := f.inv.Continuous
#align continuous_linear_equiv.of_unit ContinuousLinearEquiv.ofUnit
/-- A continuous equivalence from `M` to itself determines an invertible continuous linear map. -/
def toUnit (f : M ≃L[R] M) : (M →L[R] M)ˣ where
val := f
inv := f.symm
val_inv := by
ext
simp
inv_val := by
ext
simp
#align continuous_linear_equiv.to_unit ContinuousLinearEquiv.toUnit
variable (R M)
/-- The units of the algebra of continuous `R`-linear endomorphisms of `M` is multiplicatively
equivalent to the type of continuous linear equivalences between `M` and itself. -/
def unitsEquiv : (M →L[R] M)ˣ ≃* M ≃L[R] M
where
toFun := ofUnit
invFun := toUnit
left_inv f := by
ext
rfl
right_inv f := by
ext
rfl
map_mul' x y := by
ext
rfl
#align continuous_linear_equiv.units_equiv ContinuousLinearEquiv.unitsEquiv
@[simp]
theorem unitsEquiv_apply (f : (M →L[R] M)ˣ) (x : M) : unitsEquiv R M f x = f x :=
rfl
#align continuous_linear_equiv.units_equiv_apply ContinuousLinearEquiv.unitsEquiv_apply
end
section
variable (R) [TopologicalSpace R] [ContinuousMul R]
/-- Continuous linear equivalences `R ≃L[R] R` are enumerated by `Rˣ`. -/
def unitsEquivAut : Rˣ ≃ R ≃L[R] R
where
toFun u :=
equivOfInverse (ContinuousLinearMap.smulRight (1 : R →L[R] R) ↑u)
(ContinuousLinearMap.smulRight (1 : R →L[R] R) ↑u⁻¹) (fun x => by simp) fun x => by simp
invFun e :=
⟨e 1, e.symm 1, by rw [← smul_eq_mul, ← map_smul, smul_eq_mul, mul_one, symm_apply_apply], by
rw [← smul_eq_mul, ← map_smul, smul_eq_mul, mul_one, apply_symm_apply]⟩
left_inv u := Units.ext <| by simp
right_inv e := ext₁ <| by simp
#align continuous_linear_equiv.units_equiv_aut ContinuousLinearEquiv.unitsEquivAut
variable {R}
@[simp]
theorem unitsEquivAut_apply (u : Rˣ) (x : R) : unitsEquivAut R u x = x * u :=
rfl
#align continuous_linear_equiv.units_equiv_aut_apply ContinuousLinearEquiv.unitsEquivAut_apply
@[simp]
theorem unitsEquivAut_apply_symm (u : Rˣ) (x : R) : (unitsEquivAut R u).symm x = x * ↑u⁻¹ :=
rfl
#align continuous_linear_equiv.units_equiv_aut_apply_symm ContinuousLinearEquiv.unitsEquivAut_apply_symm
@[simp]
theorem unitsEquivAut_symm_apply (e : R ≃L[R] R) : ↑((unitsEquivAut R).symm e) = e 1 :=
rfl
#align continuous_linear_equiv.units_equiv_aut_symm_apply ContinuousLinearEquiv.unitsEquivAut_symm_apply
end
variable [Module R M₂] [TopologicalAddGroup M]
open _Root_.ContinuousLinearMap (id fst snd)
open _Root_.LinearMap (mem_ker)
/-- A pair of continuous linear maps such that `f₁ ∘ f₂ = id` generates a continuous
linear equivalence `e` between `M` and `M₂ × f₁.ker` such that `(e x).2 = x` for `x ∈ f₁.ker`,
`(e x).1 = f₁ x`, and `(e (f₂ y)).2 = 0`. The map is given by `e x = (f₁ x, x - f₂ (f₁ x))`. -/
def equivOfRightInverse (f₁ : M →L[R] M₂) (f₂ : M₂ →L[R] M) (h : Function.RightInverse f₂ f₁) :
M ≃L[R] M₂ × ker f₁ :=
equivOfInverse (f₁.Prod (f₁.projKerOfRightInverse f₂ h)) (f₂.coprod (ker f₁).subtypeL)
(fun x => by simp) fun ⟨x, y⟩ => by simp [h x]
#align continuous_linear_equiv.equiv_of_right_inverse ContinuousLinearEquiv.equivOfRightInverse
@[simp]
theorem fst_equivOfRightInverse (f₁ : M →L[R] M₂) (f₂ : M₂ →L[R] M)
(h : Function.RightInverse f₂ f₁) (x : M) : (equivOfRightInverse f₁ f₂ h x).1 = f₁ x :=
rfl
#align continuous_linear_equiv.fst_equiv_of_right_inverse ContinuousLinearEquiv.fst_equivOfRightInverse
@[simp]
theorem snd_equivOfRightInverse (f₁ : M →L[R] M₂) (f₂ : M₂ →L[R] M)
(h : Function.RightInverse f₂ f₁) (x : M) :
((equivOfRightInverse f₁ f₂ h x).2 : M) = x - f₂ (f₁ x) :=
rfl
#align continuous_linear_equiv.snd_equiv_of_right_inverse ContinuousLinearEquiv.snd_equivOfRightInverse
@[simp]
theorem equivOfRightInverse_symm_apply (f₁ : M →L[R] M₂) (f₂ : M₂ →L[R] M)
(h : Function.RightInverse f₂ f₁) (y : M₂ × ker f₁) :
(equivOfRightInverse f₁ f₂ h).symm y = f₂ y.1 + y.2 :=
rfl
#align continuous_linear_equiv.equiv_of_right_inverse_symm_apply ContinuousLinearEquiv.equivOfRightInverse_symm_apply
end Ring
section
variable (ι R M : Type _) [Unique ι] [Semiring R] [AddCommMonoid M] [Module R M]
[TopologicalSpace M]
/-- If `ι` has a unique element, then `ι → M` is continuously linear equivalent to `M`. -/
def funUnique : (ι → M) ≃L[R] M :=
{ Homeomorph.funUnique ι M with toLinearEquiv := LinearEquiv.funUnique ι R M }
#align continuous_linear_equiv.fun_unique ContinuousLinearEquiv.funUnique
variable {ι R M}
@[simp]
theorem coe_funUnique : ⇑(funUnique ι R M) = Function.eval default :=
rfl
#align continuous_linear_equiv.coe_fun_unique ContinuousLinearEquiv.coe_funUnique
@[simp]
theorem coe_funUnique_symm : ⇑(funUnique ι R M).symm = Function.const ι :=
rfl
#align continuous_linear_equiv.coe_fun_unique_symm ContinuousLinearEquiv.coe_funUnique_symm
variable (R M)
/-- Continuous linear equivalence between dependent functions `Π i : fin 2, M i` and `M 0 × M 1`. -/
@[simps (config := { fullyApplied := false })]
def piFinTwo (M : Fin 2 → Type _) [∀ i, AddCommMonoid (M i)] [∀ i, Module R (M i)]
[∀ i, TopologicalSpace (M i)] : (∀ i, M i) ≃L[R] M 0 × M 1 :=
{ Homeomorph.piFinTwo M with toLinearEquiv := LinearEquiv.piFinTwo R M }
#align continuous_linear_equiv.pi_fin_two ContinuousLinearEquiv.piFinTwo
/-- Continuous linear equivalence between vectors in `M² = fin 2 → M` and `M × M`. -/
@[simps (config := { fullyApplied := false })]
def finTwoArrow : (Fin 2 → M) ≃L[R] M × M :=
{ piFinTwo R fun _ => M with toLinearEquiv := LinearEquiv.finTwoArrow R M }
#align continuous_linear_equiv.fin_two_arrow ContinuousLinearEquiv.finTwoArrow
end
end ContinuousLinearEquiv
namespace ContinuousLinearMap
open Classical
variable {R : Type _} {M : Type _} {M₂ : Type _} [TopologicalSpace M] [TopologicalSpace M₂]
section
variable [Semiring R]
variable [AddCommMonoid M₂] [Module R M₂]
variable [AddCommMonoid M] [Module R M]
/-- Introduce a function `inverse` from `M →L[R] M₂` to `M₂ →L[R] M`, which sends `f` to `f.symm` if
`f` is a continuous linear equivalence and to `0` otherwise. This definition is somewhat ad hoc,
but one needs a fully (rather than partially) defined inverse function for some purposes, including
for calculus. -/
noncomputable def inverse : (M →L[R] M₂) → M₂ →L[R] M := fun f =>
if h : ∃ e : M ≃L[R] M₂, (e : M →L[R] M₂) = f then ((Classical.choose h).symm : M₂ →L[R] M) else 0
#align continuous_linear_map.inverse ContinuousLinearMap.inverse
/-- By definition, if `f` is invertible then `inverse f = f.symm`. -/
@[simp]
theorem inverse_equiv (e : M ≃L[R] M₂) : inverse (e : M →L[R] M₂) = e.symm :=
by
have h : ∃ e' : M ≃L[R] M₂, (e' : M →L[R] M₂) = ↑e := ⟨e, rfl⟩
simp only [inverse, dif_pos h]
congr
exact_mod_cast Classical.choose_spec h
#align continuous_linear_map.inverse_equiv ContinuousLinearMap.inverse_equiv
/-- By definition, if `f` is not invertible then `inverse f = 0`. -/
@[simp]
theorem inverse_non_equiv (f : M →L[R] M₂) (h : ¬∃ e' : M ≃L[R] M₂, ↑e' = f) : inverse f = 0 :=
dif_neg h
#align continuous_linear_map.inverse_non_equiv ContinuousLinearMap.inverse_non_equiv
end
section
variable [Ring R]
variable [AddCommGroup M] [TopologicalAddGroup M] [Module R M]
variable [AddCommGroup M₂] [Module R M₂]
@[simp]
theorem ring_inverse_equiv (e : M ≃L[R] M) : Ring.inverse ↑e = inverse (e : M →L[R] M) :=
by
suffices Ring.inverse ((ContinuousLinearEquiv.unitsEquiv _ _).symm e : M →L[R] M) = inverse ↑e by
convert this
simp
rfl
#align continuous_linear_map.ring_inverse_equiv ContinuousLinearMap.ring_inverse_equiv
/-- The function `continuous_linear_equiv.inverse` can be written in terms of `ring.inverse` for the
ring of self-maps of the domain. -/
theorem to_ring_inverse (e : M ≃L[R] M₂) (f : M →L[R] M₂) :
inverse f = Ring.inverse ((e.symm : M₂ →L[R] M).comp f) ∘L ↑e.symm :=
by
by_cases h₁ : ∃ e' : M ≃L[R] M₂, ↑e' = f
· obtain ⟨e', he'⟩ := h₁
rw [← he']
change _ = Ring.inverse ↑(e'.trans e.symm) ∘L ↑e.symm
ext
simp
· suffices ¬IsUnit ((e.symm : M₂ →L[R] M).comp f) by simp [this, h₁]
contrapose! h₁
rcases h₁ with ⟨F, hF⟩
use (ContinuousLinearEquiv.unitsEquiv _ _ F).trans e
ext
dsimp
rw [coeFn_coe_base' F, hF]
simp
#align continuous_linear_map.to_ring_inverse ContinuousLinearMap.to_ring_inverse
theorem ring_inverse_eq_map_inverse : Ring.inverse = @inverse R M M _ _ _ _ _ _ _ :=
by
ext
simp [to_ring_inverse (ContinuousLinearEquiv.refl R M)]
#align continuous_linear_map.ring_inverse_eq_map_inverse ContinuousLinearMap.ring_inverse_eq_map_inverse
end
end ContinuousLinearMap
namespace Submodule
variable {R : Type _} [Ring R] {M : Type _} [TopologicalSpace M] [AddCommGroup M] [Module R M]
{M₂ : Type _} [TopologicalSpace M₂] [AddCommGroup M₂] [Module R M₂]
open ContinuousLinearMap
/-- A submodule `p` is called *complemented* if there exists a continuous projection `M →ₗ[R] p`. -/
def ClosedComplemented (p : Submodule R M) : Prop :=
∃ f : M →L[R] p, ∀ x : p, f x = x
#align submodule.closed_complemented Submodule.ClosedComplemented
theorem ClosedComplemented.has_closed_complement {p : Submodule R M} [T1Space p]
(h : ClosedComplemented p) : ∃ (q : Submodule R M)(hq : IsClosed (q : Set M)), IsCompl p q :=
Exists.elim h fun f hf => ⟨ker f, f.isClosed_ker, LinearMap.isCompl_of_proj hf⟩
#align submodule.closed_complemented.has_closed_complement Submodule.ClosedComplemented.has_closed_complement
protected theorem ClosedComplemented.isClosed [TopologicalAddGroup M] [T1Space M]
{p : Submodule R M} (h : ClosedComplemented p) : IsClosed (p : Set M) :=
by
rcases h with ⟨f, hf⟩
have : ker (id R M - p.subtypeL.comp f) = p := LinearMap.ker_id_sub_eq_of_proj hf
exact this ▸ is_closed_ker _
#align submodule.closed_complemented.is_closed Submodule.ClosedComplemented.isClosed
@[simp]
theorem closedComplemented_bot : ClosedComplemented (⊥ : Submodule R M) :=
⟨0, fun x => by simp only [zero_apply, eq_zero_of_bot_submodule x]⟩
#align submodule.closed_complemented_bot Submodule.closedComplemented_bot
@[simp]
theorem closedComplemented_top : ClosedComplemented (⊤ : Submodule R M) :=
⟨(id R M).codRestrict ⊤ fun x => trivial, fun x => Subtype.ext_iff_val.2 <| by simp⟩
#align submodule.closed_complemented_top Submodule.closedComplemented_top
end Submodule
theorem ContinuousLinearMap.closedComplemented_ker_of_rightInverse {R : Type _} [Ring R]
{M : Type _} [TopologicalSpace M] [AddCommGroup M] {M₂ : Type _} [TopologicalSpace M₂]
[AddCommGroup M₂] [Module R M] [Module R M₂] [TopologicalAddGroup M] (f₁ : M →L[R] M₂)
(f₂ : M₂ →L[R] M) (h : Function.RightInverse f₂ f₁) : (ker f₁).ClosedComplemented :=
⟨f₁.projKerOfRightInverse f₂ h, f₁.projKerOfRightInverse_apply_idem f₂ h⟩
#align continuous_linear_map.closed_complemented_ker_of_right_inverse ContinuousLinearMap.closedComplemented_ker_of_rightInverse
section Quotient
namespace Submodule
variable {R M : Type _} [Ring R] [AddCommGroup M] [Module R M] [TopologicalSpace M]
(S : Submodule R M)
theorem isOpenMap_mkQ [TopologicalAddGroup M] : IsOpenMap S.mkQ :=
QuotientAddGroup.isOpenMap_coe S.toAddSubgroup
#align submodule.is_open_map_mkq Submodule.isOpenMap_mkQ
instance topologicalAddGroup_quotient [TopologicalAddGroup M] : TopologicalAddGroup (M ⧸ S) :=
topologicalAddGroup_quotient S.toAddSubgroup
#align submodule.topological_add_group_quotient Submodule.topologicalAddGroup_quotient
instance continuousSMul_quotient [TopologicalSpace R] [TopologicalAddGroup M] [ContinuousSMul R M] :
ContinuousSMul R (M ⧸ S) := by
constructor
have quot : QuotientMap fun au : R × M => (au.1, S.mkq au.2) :=
IsOpenMap.to_quotientMap (is_open_map.id.prod S.is_open_map_mkq)
(continuous_id.prod_map continuous_quot_mk)
(function.surjective_id.prod_map <| surjective_quot_mk _)
rw [quot.continuous_iff]
exact continuous_quot_mk.comp continuous_smul
#align submodule.has_continuous_smul_quotient Submodule.continuousSMul_quotient
instance t3_quotient_of_isClosed [TopologicalAddGroup M] [IsClosed (S : Set M)] : T3Space (M ⧸ S) :=
letI : IsClosed (S.to_add_subgroup : Set M) := ‹_›
S.to_add_subgroup.t3_quotient_of_is_closed
#align submodule.t3_quotient_of_is_closed Submodule.t3_quotient_of_isClosed
end Submodule
end Quotient
|
import matplotlib.pyplot as plt
import matplotlib.cm as cm
import matplotlib.colors as col
import numpy as np
from sklearn.datasets import make_moons
from sklearn.svm import SVC
# Get colors from a color map
def get_colors(colormap='viridis', n_colors=2, bounds=(0, 1)):
cmap = cm.get_cmap(colormap)
colors_rgb = cmap(np.linspace(bounds[0], bounds[1], num=n_colors))
colors_hex = [col.rgb2hex(c) for c in colors_rgb]
return colors_hex
# Plot a 2D classification data set onto the specified axes
def plot_2d_data(ax, X, y, s=20, alpha=0.95, xlabel=None, ylabel=None, title=None, legend=None, colormap='viridis'):
# Get data set size
n_examples, n_features = X.shape
# Check that the data set is 2D
if n_features != 2:
raise ValueError('Data set is not 2D!')
# Check that the lengths of X and y match
if n_examples != len(y):
raise ValueError('Length of X is not equal to the length of y!')
# Get the unique labels and set up marker styles and colors
unique_labels = np.sort(np.unique(y))
n_classes = len(unique_labels)
markers = ['o', 's', '^', 'v', '<', '>', 'p']
cmap = cm.get_cmap(colormap)
colors = cmap(np.linspace(0, 1, num=n_classes))
# Set marker sizes
if isinstance(s, np.ndarray):
# If its an ndarray, make sure it has the same size as the number of examples
if len(s) != n_examples:
raise ValueError('Length of s is not equal to the length of y!')
else:
# Otherwise, make it an nd_array
s = np.full_like(y, fill_value=s)
# Plot the data
for i, label in enumerate(unique_labels):
marker_color = col.rgb2hex(colors[i])
marker_shape = markers[i % len(markers)]
ax.scatter(X[y == label, 0], X[y == label, 1], s=s[y == label],
marker=marker_shape, c=marker_color, edgecolors='k', alpha=alpha)
# Add labels, title and bounds
if xlabel is not None:
ax.set_xlabel(xlabel, fontsize=12)
if ylabel is not None:
ax.set_ylabel(ylabel, fontsize=12)
if title is not None:
ax.set_title(title)
# Set the legend
if legend is not None:
ax.legend(legend)
# Plot a 2D classification function and/or corresponding data set onto the specified axes
def plot_2d_classifier(ax, X, y, predict_function, predict_args=None, predict_proba=False, boundary_level=0.5,
s=20, plot_data=True, alpha=0.75,
xlabel=None, ylabel=None, title=None, legend=None, colormap='viridis'):
# Get the bounds of the plot and generate a mesh
xMin, xMax = X[:, 0].min() - 0.25, X[:, 0].max() + 0.25
yMin, yMax = X[:, 1].min() - 0.25, X[:, 1].max() + 0.25
xMesh, yMesh = np.meshgrid(np.arange(xMin, xMax, 0.05),
np.arange(yMin, yMax, 0.05))
# Compute predictions over the mesh
if predict_proba:
zMesh = predict_function(np.c_[xMesh.ravel(), yMesh.ravel()])[:, 1]
elif predict_args is None:
zMesh = predict_function(np.c_[xMesh.ravel(), yMesh.ravel()])
else:
zMesh = predict_function(np.c_[xMesh.ravel(), yMesh.ravel()], predict_args)
zMesh = zMesh.reshape(xMesh.shape)
# Plot the classifier
ax.contourf(xMesh, yMesh, zMesh, cmap=colormap, alpha=alpha, antialiased=True)
if boundary_level is not None:
ax.contour(xMesh, yMesh, zMesh, [boundary_level], linewidths=3, colors='k')
# Plot the data
if plot_data:
plot_2d_data(ax, X, y, s=s, xlabel=xlabel, ylabel=ylabel, title=title, legend=legend, colormap=colormap)
if __name__ == '__main__':
x = get_colors()
X, y = make_moons(n_samples=100, noise=0.15)
plt.ion()
# # Plot data points only
# fig, ax = plt.subplots(nrows=1, ncols=1, figsize=(4, 4))
# plot_2d_data(ax, X, y, xlabel='x', ylabel='y', title='Scatter plot test', legend=['pos', 'neg'])
# fig.tight_layout()
# Plot a classifier and then superimpose data points
fig, ax = plt.subplots(nrows=1, ncols=1, figsize=(4, 4))
svm = SVC(kernel='rbf', gamma=2.0, probability=True)
svm.fit(X, y)
# plot_2d_classifier(ax, X, y, predict_function=svm.predict, predict_args=None)
plot_2d_classifier(ax, X, y, predict_function=svm.predict_proba, predict_proba=True,
xlabel='x', ylabel='y', title='Scatter plot test')
fig.tight_layout()
print()
|
library(shiny)
ui <- fluidPage(
titlePanel("Shiny Beer+Breweries Dataset"),
sidebarLayout(
sidebarPanel(
checkboxInput("doHist", "Histogram plots", value = T),
sliderInput(inputId = 'bins',
label = "Number of bins:",
min = 1,
max = 50,
value = 30)
),
mainPanel(
fluidRow(
column(6,plotOutput(outputId="abvHPlot", width="300px",height="300px")),
column(6,plotOutput(outputId="abvBPlot", width="300px",height="300px")),
column(6,plotOutput(outputId="ibuHPlot", width="300px",height="300px")),
column(6,plotOutput(outputId="ibuBPlot", width="300px",height="300px"))
)
) #end main panel
) # end sidebar
)#end fluid page |
{-# OPTIONS --safe #-}
module Definition.Conversion.Universe where
open import Definition.Untyped
open import Definition.Typed
open import Definition.Typed.Properties
open import Definition.Typed.RedSteps
open import Definition.Conversion
open import Definition.Conversion.Reduction
open import Definition.Conversion.Lift
import Tools.PropositionalEquality as PE
-- Algorithmic equality of terms in WHNF of type U are equal as types.
univConv↓ : ∀ {A B r Γ l}
→ Γ ⊢ A [conv↓] B ∷ Univ r l ^ next l
→ Γ ⊢ A [conv↓] B ^ [ r , ι l ]
univConv↓ X = univ X
-- Algorithmic equality of terms of type U are equal as types.
univConv↑ : ∀ {A B r Γ l}
→ Γ ⊢ A [conv↑] B ∷ Univ r l ^ next l
→ Γ ⊢ A [conv↑] B ^ [ r , ι l ]
univConv↑ ([↑]ₜ B₁ t′ u′ D d d′ whnfB whnft′ whnfu′ t<>u)
rewrite PE.sym (whnfRed* D Uₙ) =
reductionConv↑ (univ* d) (univ* d′) whnft′ whnfu′ (liftConv (univConv↓ t<>u))
|
function varargout = erfc(varargin)
%ERFC (overloaded)
switch class(varargin{1})
case 'double'
error('Overloaded SDPVAR/ERFC CALLED WITH DOUBLE. Report error')
case 'sdpvar'
varargout{1} = InstantiateElementWise(mfilename,varargin{:});
case 'char'
operator = struct('convexity','none','monotonicity','decreasing','definiteness','positive','model','callback');
operator.bounds = @bounds;
operator.range = [-1 1];
operator.derivative =@(x)-exp(-x.^2)*2/sqrt(pi);
varargout{1} = [];
varargout{2} = operator;
varargout{3} = varargin{3};
otherwise
error('SDPVAR/ERF called with CHAR argument?');
end
function [L,U] = bounds(xL,xU)
L = erfc(xL);
U = erfc(xU); |
{-# LANGUAGE FlexibleContexts #-}
module STC.OrientationScaleAnalysis where
import Control.Monad.Parallel as MP
import Data.Array.Repa as R
import Data.Complex
import Data.List as L
import Data.Vector.Unboxed as VU
import DFT.Plan
import FokkerPlanck.DomainChange (r2z1Tor2s1)
import Graphics.Gnuplot.Simple
import Image.IO (ImageRepa (..), plotImageRepa,
plotImageRepaComplex)
import STC.Convolution
import System.FilePath
import Types
import Utils.Array
import Utils.Parallel
import Text.Printf
-- {-# INLINE analyzeOrientation #-}
-- analyzeOrientation :: Int -> [Double] -> R2T0Array -> (R2T0Array, R2T0Array)
-- analyzeOrientation numOrientation thetaFreqs arr =
-- let arrR2S1 = R.map magnitude . r2z1Tor2s1 numOrientation thetaFreqs $ arr
-- deltaTheta = 2 * pi / fromIntegral numOrientation :: Double
-- (Z :. _ :. cols :. rows) = extent arrR2S1
-- orientationArr =
-- fromListUnboxed (Z :. cols :. rows) .
-- parMap
-- rdeepseq
-- (\(i, j) ->
-- deltaTheta *
-- (fromIntegral .
-- VU.maxIndex . toUnboxed . computeS . R.slice arrR2S1 $
-- (Z :. All :. i :. j))) $
-- [(i, j) | i <- [0 .. cols - 1], j <- [0 .. rows - 1]]
-- freqArr = fromListUnboxed (Z :. (L.length thetaFreqs)) thetaFreqs
-- magArr =
-- R.map sqrt . sumS . R.map (\x -> (magnitude x) ^ 2) . rotate3D $ arr
-- func theta =
-- computeS .
-- R.traverse3 orientationArr freqArr magArr (\_ _ _ -> extent arr) $ \fOri fFreq fMag (Z :. k :. i :. j) ->
-- (fMag (Z :. i :. j) :+ 0) *
-- (exp $ 0 :+ (1) * fFreq (Z :. k) * (fOri (Z :. i :. j) + theta))
-- in (func 0, func 0)
-- {-# INLINE analyzeOrientationR2Z1T0 #-}
-- analyzeOrientationR2Z1T0 ::
-- Int -> [Double] -> [Double] -> R2T0Array -> (R2Z1T0Array, R2Z1T0Array)
-- analyzeOrientationR2Z1T0 numOrientation thetaFreqs theta0Freqs arr =
-- let arrR2S1 = R.map magnitude . r2z1Tor2s1 numOrientation thetaFreqs $ arr
-- deltaTheta = 2 * pi / fromIntegral numOrientation :: Double
-- (Z :. _ :. cols :. rows) = extent arrR2S1
-- orientationArr =
-- fromListUnboxed (Z :. cols :. rows) .
-- parMap
-- rdeepseq
-- (\(i, j) ->
-- deltaTheta *
-- (fromIntegral .
-- VU.maxIndex . toUnboxed . computeS . R.slice arrR2S1 $
-- (Z :. All :. i :. j))) $
-- [(i, j) | i <- [0 .. cols - 1], j <- [0 .. rows - 1]]
-- freqArr = fromListUnboxed (Z :. (L.length thetaFreqs)) thetaFreqs
-- freq0Arr = fromListUnboxed (Z :. (L.length theta0Freqs)) theta0Freqs
-- magArr =
-- R.map sqrt . sumS . R.map (\x -> (magnitude x) ^ 2) . rotate3D $ arr
-- func theta =
-- computeS .
-- R.traverse4
-- orientationArr
-- freqArr
-- freq0Arr
-- magArr
-- (\_ _ _ _ ->
-- (Z :. (L.length thetaFreqs) :. (L.length theta0Freqs) :. cols :.
-- rows)) $ \fOri fFreq fFreq0 fMag (Z :. k :. l :. i :. j) ->
-- (fMag (Z :. i :. j) :+ 0) *
-- (exp $ 0 :+ (-1) * (fFreq (Z :. k) + fFreq0 (Z :. l)) * (fOri (Z :. i :. j) + theta))
-- in (func (pi / 2),func 0)
-- {-# INLINE normalizeList #-}
-- normalizeList :: (Ord e, Fractional e) => [e] -> [e]
-- normalizeList xs = L.map (/ L.maximum xs) xs
-- plotMagnitudeOrientation ::
-- (R.Source s (Complex Double))
-- => FilePath
-- -> Int
-- -> [Double]
-- -> R.Array s DIM3 (Complex Double)
-- -> (Int, Int)
-- -> IO (Int,Int)
-- plotMagnitudeOrientation folderPath numOrientationSample thetaFreqs arr (i', j') = do
-- let orientationSampleRad =
-- [ 2 * pi / fromIntegral numOrientationSample * fromIntegral i
-- | i <- [0 .. numOrientationSample - 1]
-- ]
-- orientationSampleDeg =
-- [ 360 * fromIntegral i / fromIntegral numOrientationSample
-- | i <- [0 .. numOrientationSample - 1]
-- ]
-- freqArr = fromListUnboxed (Z :. (L.length thetaFreqs)) thetaFreqs
-- xsFreqDomain
-- -- normalizeList $
-- =
-- parMap
-- rdeepseq
-- (\theta ->
-- magnitude .
-- R.sumAllS .
-- R.zipWith (\freq x -> x * exp (0 :+ theta * freq)) freqArr .
-- R.slice arr $
-- (Z :. All :. i :. j))
-- orientationSampleRad
-- xs =
-- normalizeList $
-- R.toList .
-- R.map magnitude .
-- r2z1Tor2s1 numOrientationSample thetaFreqs .
-- extend (Z :. All :. (1 :: Int) :. (1 :: Int)) . R.slice arr $
-- (Z :. All :. i :. j)
-- (Z :. _ :. cols :. rows) = extent arr
-- magVec =
-- VU.concat $
-- parMap
-- rdeepseq
-- (\theta ->
-- toUnboxed .
-- computeS .
-- R.map magnitude . R.sumS . rotate3D . R.traverse2 arr freqArr const $ \f1 f2 idx@(Z :. k :. _ :. _) ->
-- f1 idx * exp (0 :+ theta * f2 (Z :. k)))
-- orientationSampleRad
-- (Z :. c :. a :. b) =
-- fromIndex (Z :. (L.length thetaFreqs) :. cols :. rows) . VU.maxIndex $
-- magVec
-- maxMag = VU.maximum magVec
-- (i,j) = (a,b)
-- printf
-- "Max magnitude: %0.5f at (%d,%d) %f degree.\n"
-- maxMag
-- a
-- b
-- ((fromIntegral c :: Double) / (fromIntegral numOrientationSample) * 360)
-- plotPathsStyle
-- [ PNG (folderPath </> "Magnitude.png")
-- , Title ("Magnitude at " L.++ show (i, j))
-- ] $
-- L.zip
-- -- defaultStyle
-- -- { plotType = LinesPoints
-- -- , lineSpec = CustomStyle [LineTitle "Spatial Domain", PointType 1]
-- -- }
-- -- ,
-- [ defaultStyle
-- { plotType = Lines
-- , lineSpec =
-- CustomStyle
-- [ LineTitle "Frequency Domain" -- , PointType 0
-- ]
-- }
-- ]
-- -- L.zip orientationSampleDeg xs,
-- [L.zip orientationSampleDeg xsFreqDomain]
-- return (a,b)
-- plotMagnitudeOrientationSource ::
-- (R.Source s (Complex Double))
-- => DFTPlan
-- -> FilePath
-- -> Int
-- -> Int
-- -> [Double]
-- -> R2Z1T0Array
-- -> R.Array s DIM3 (Complex Double)
-- -> (Int, Int)
-- -> IO ()
-- plotMagnitudeOrientationSource plan folderPath numOrientationSample numOrientation thetaFreqs filter input (i, j) = do
-- let orientationSampleRad =
-- [ 2 * pi / fromIntegral numOrientationSample * fromIntegral i
-- | i <- [0 .. numOrientationSample - 1]
-- ]
-- orientationSampleDeg =
-- [ 360 * fromIntegral i / fromIntegral numOrientationSample
-- | i <- [0 .. numOrientationSample - 1]
-- ]
-- freqArr = fromListUnboxed (Z :. (L.length thetaFreqs)) thetaFreqs
-- (Z :. numThetaFreq :. cols :. rows) = extent input
-- xs <-
-- MP.mapM
-- (\theta -> do
-- initialDistF <-
-- fmap (fromUnboxed (Z :. numThetaFreq :. cols :. rows) . VU.convert) .
-- dftExecute plan (DFTPlanID DFT1DG [numThetaFreq, cols, rows] [1, 2]) .
-- VU.convert . toUnboxed . computeS . R.traverse2 input freqArr const $
-- (\f1 f2 idx@(Z :. k :. i :. j) ->
-- f1 idx * exp (0 :+ theta * (1) * f2 (Z :. k)))
-- sourceArr <- convolveR2T0 plan filter initialDistF
-- let sourceR2Z1 = R.sumS . rotateR2Z1T0Array $ sourceArr
-- mag =
-- magnitude .
-- R.sumAllS .
-- r2z1Tor2s1 numOrientation thetaFreqs .
-- extend (Z :. All :. (1 :: Int) :. (1 :: Int)) .
-- R.slice sourceR2Z1 $
-- (Z :. All :. i :. j)
-- return mag)
-- orientationSampleRad
-- plotPath
-- [ PNG (folderPath </> "SourceMagnitude.png")
-- , Title ("Source Magnitude at " L.++ show (i, j))
-- ] .
-- L.zip orientationSampleDeg . normalizeList $
-- xs
|
I very rarely know what I am going to have presented to me when I go out to photograph a landscape. I know what I would like but we don’t always get what we want. Not only are we dealing with nature’s finest creations, we are trying to balance it with whatever the ‘greatest lighting man’ throws at us. This can often please or displease in equal measures.
Take A Different View: 14 Variations of the Same Location posted on Fujifilm Insider on August 5, 2017 12:27 pm . |
trimWhiteSpace <- function (x) gsub("^\\s+|\\s+$", "", x)
|
<a href="https://colab.research.google.com/github/JesperDramsch/corona/blob/master/Flattening_the_Curve_Interactively.ipynb" target="_parent"></a>
# Infection Modeling
The [Numberphile video](https://www.youtube.com/watch?v=k6nLfCbAzgo) on the corona curve inspired me to try solve these ODEs in Python and make it interactive.
The original video shows the SIR model, a ["compartmental model"](https://en.wikipedia.org/wiki/Compartmental_models_in_epidemiology) for epidemology, deterministically modeling:
- **S**usceptible to the disease
- **I**nfectious with the disease
- **R**emoved from the disease and immune
This is done by solving these three ordinary differential equations (ODEs) and luckily SciPy has just the tools for this. The change of susceptible population is dependent on the transmission coefficient $\beta$, the number of infected and suscptible people $(I, S)$, as well as the population size $N$:
$
\frac{dS}{dt} = - \frac{\beta I S}{N},
$
the change of infected people is given by the influx of the infected people from the susceptible group from before and the transfer of infected people multiplied by the recovery factor $\gamma$:
$
\frac{dI}{dt} = \frac{\beta I S}{N}- \gamma I,
$
the change of recovered people is simply given by the influx of infected people modified by the recovery factor from before:
$
\frac{dR}{dt} = \gamma I,
$
We can see that eventually all people will have gone through the infected group to the recovered group. This model simply depends on the assumption that the population N never changes (steady state).
```
import numpy as np
from scipy.integrate import solve_ivp
import matplotlib.pyplot as plt
from ipywidgets import interact, interactive, FloatSlider
import ipywidgets as widgets
%matplotlib inline
```
These following start values show a simple percentage ($N=100$) and the values for the novel Corona virus that causes COVID-19.
Additionally, we'll define some time we want to analyze and the sampling of time for our simulation. Coarser is faster but finer is more stable.
```
N = 100
transmisision_rate = 3.2
recovery_rate = 0.23
waning_rate = 0.00
max_time = 5e1
dt = 1e-2
```
Then we need some initially infected people from our population.
```
# Population stats
def SIR_init(N=1, infected_ratio=0.01):
""" Initialize S, I, R
Initial Susceptible, Infected and Removed Population
N (int): number of people in population
infected_ratio (float): ratio of initially infected people
"""
I = (infected_ratio * N)
S = ((1 - infected_ratio) * N)
R = 0
return S, I, R
S_ini, I_ini, R_ini = SIR_init(N)
print(SIR_init(N))
```
(99.0, 1.0, 0)
# Solve Differential Equations
We use a slightly modified version of the SIR formulation. Influenza and influenza-like viruses (possibly the corona virus?) can cause a loss of immunity. That means the SIRS model, which has a rate that changes from recovered to susceptible, is possibly better suited to model this virus. That changes the aforementioned ODEs to:
$
\begin{align}
& \frac{dS}{dt} = - \frac{\beta I S}{N} + \xi R \\[6pt]
& \frac{dI}{dt} = \frac{\beta I S}{N}- \gamma I \\[6pt]
& \frac{dR}{dt} = \gamma I - \xi R
\end{align}
$
adding a change to $\frac{dR}{dt}$ of $R$ modified by the waning ratio $\xi$ that is added to the susceptible population $S$. As long as we keep $\xi=0$ the SIRS model is equivalent to the SIR model.
```
def dSIR_dt(t, SIR, N, transmisision_rate, recovery_rate, waning_rate=0):
#def dSIR_dt(t, S, I, R, N, transmisision_rate, recovery_rate, waning_rate=0):
S, I, R = SIR
infected = transmisision_rate * S * I / N
removed = recovery_rate * I
waned = waning_rate * R
S_new = - infected + waned
I_new = infected - removed
R_new = removed - waned
return (S_new, I_new, R_new)
```
The Scipy package provides us with several ODE solvers for "Initial Value Provlems", which is our kind of problem. These solvers are neatly packed in [solve_ivp](https://docs.scipy.org/doc/scipy/reference/generated/scipy.integrate.solve_ivp.html#scipy.integrate.solve_ivp).
This function can be used to portray all the data we want in the entire time we simulate. However, it can solve specific points as well without providing that dense solution. Pretty neat:
```
def solve_SIR(N, max_time, dt, transmisision_rate, recovery_rate, waning_rate):
t = np.arange(0, max_time, dt)
SIR = solve_ivp(dSIR_dt,
[0, max_time],
SIR_init(N),
args=(N, transmisision_rate, recovery_rate, waning_rate),
dense_output=True)
return t, SIR.sol(t)
```
# Interactive Plot
A nice big chunk of code to plot three lines. But now we can interactively explore the data and how we can flatten the curve.
Social Distancing reduces the transmission coefficient, try it out!
```
t, SIR = solve_SIR(N, max_time, dt, transmisision_rate, recovery_rate, waning_rate)
fig, ax = plt.subplots(figsize=(10,5))
S_plot, = ax.plot(t, SIR[0], label='Susceptible')
I_plot, = ax.plot(t, SIR[1], label='Infectious')
R_plot, = ax.plot(t, SIR[2], label='Removed')
_ = ax.legend(loc='best')
_ = ax.set_xlabel('Time')
_ = ax.set_ylabel('Population')
_ = ax.set_title('Deterministic SIR(S) model')
r_o = plt.text(40, 0, f'$R_O$={transmisision_rate/recovery_rate:.2f}', fontsize=10)
plt.close()
def plot_SIR(transmission=3.2, recovery=.23, wane=.05):
t, SIR = solve_SIR(N, max_time, dt, transmission, recovery, wane)
S_plot.set_ydata(SIR[0])
I_plot.set_ydata(SIR[1])
R_plot.set_ydata(SIR[2])
r_o.set_text(f'$R_O$={transmission/recovery:.2f}')
fig.canvas.draw()
display(fig)
style = {'description_width': 'initial'}
interactive_plot = interact(plot_SIR,
transmission=FloatSlider(value=3.2, min=0, max=5, step=1e-2, continuous_update=False, description="Transmission Rate", style=style),
recovery=FloatSlider(value=.23, min=0, max=1, step=1e-2, continuous_update=False, description="Recovery Rate", style=style),
wane=FloatSlider(value=0, min=0, max=1, step=1e-2, continuous_update=False, description="Immunity Loss", style=style))
```
interactive(children=(FloatSlider(value=3.2, continuous_update=False, description='Transmission Rate', max=5.0…
```
```
|
------------------------------------------------------------------------------
-- Properties related with lists using instances of the induction principle
------------------------------------------------------------------------------
{-# OPTIONS --exact-split #-}
{-# OPTIONS --no-sized-types #-}
{-# OPTIONS --no-universe-polymorphism #-}
{-# OPTIONS --without-K #-}
module FOT.FOTC.Data.List.Induction.Instances.PropertiesATP where
open import FOTC.Base
open import FOTC.Base.List
open import FOTC.Data.List
open import FOTC.Data.Nat.Type
------------------------------------------------------------------------------
-- Totality properties
lengthList-N-ind-instance :
N (length []) →
(∀ x {xs} → N (length xs) → N (length (x ∷ xs))) →
∀ {xs} → List xs → N (length xs)
lengthList-N-ind-instance = List-ind (λ as → N (length as))
postulate lengthList-N : ∀ {xs} → List xs → N (length xs)
{-# ATP prove lengthList-N lengthList-N-ind-instance #-}
++-List-ind-instance :
∀ {ys} →
List ([] ++ ys) →
(∀ x {xs} → List (xs ++ ys) → List ((x ∷ xs) ++ ys)) →
∀ {xs} → List xs → List (xs ++ ys)
++-List-ind-instance {ys} = List-ind (λ as → List (as ++ ys))
postulate ++-List : ∀ {xs ys} → List xs → List ys → List (xs ++ ys)
{-# ATP prove ++-List ++-List-ind-instance #-}
map-List-ind-instance :
∀ {f} →
List (map f []) →
(∀ x {xs} → List (map f xs) → List (map f (x ∷ xs))) →
∀ {xs} → List xs → List (map f xs)
map-List-ind-instance {f} = List-ind (λ as → List (map f as))
postulate map-List : ∀ f {xs} → List xs → List (map f xs)
{-# ATP prove map-List map-List-ind-instance #-}
------------------------------------------------------------------------------
++-assoc-ind-instance :
∀ {ys zs} →
([] ++ ys) ++ zs ≡ [] ++ ys ++ zs →
(∀ x {xs} →
(xs ++ ys) ++ zs ≡ xs ++ ys ++ zs →
((x ∷ xs) ++ ys) ++ zs ≡ (x ∷ xs) ++ ys ++ zs) →
∀ {xs} → List xs → (xs ++ ys) ++ zs ≡ xs ++ ys ++ zs
++-assoc-ind-instance {ys} {zs} =
List-ind (λ as → (as ++ ys) ++ zs ≡ as ++ ys ++ zs )
postulate
++-assoc : ∀ {xs} → List xs → ∀ ys zs → (xs ++ ys) ++ zs ≡ xs ++ ys ++ zs
{-# ATP prove ++-assoc ++-assoc-ind-instance #-}
map-++-ind-instance :
∀ {f} {ys} →
map f ([] ++ ys) ≡ map f [] ++ map f ys →
(∀ x {xs} → map f (xs ++ ys) ≡ map f xs ++ map f ys →
map f ((x ∷ xs) ++ ys) ≡ map f (x ∷ xs) ++ map f ys) →
∀ {xs} → List xs → map f (xs ++ ys) ≡ map f xs ++ map f ys
map-++-ind-instance {f} {ys} =
List-ind (λ as → map f (as ++ ys) ≡ map f as ++ map f ys)
postulate
map-++ : ∀ f {xs} → List xs → ∀ ys →
map f (xs ++ ys) ≡ map f xs ++ map f ys
{-# ATP prove map-++ map-++-ind-instance #-}
|
import Mathbin
open MvPolynomial
@[simp]
lemma eq_zero_of_zero_eq (R : Type u) [HasZero R] (r : R) : 0 = r ↔ r = 0 :=
by exact eq_comm
@[simp] lemma zero_sub_eq_iff (R : Type u) [AddCommGroup R] (a b : R) : 0 - a = b ↔ a + b = 0 := by
apply Iff.intro
· intro h
rw [←h]
sorry
· sorry
register_simp_attr polynomial_nf
"Attribute for lemmas that are used in the conversion of mv_polynomial expressions to a normal form consisting of adds of sums of muls of mv_polynomials"
attribute [polynomial_nf] Polynomial.eval₂
attribute [polynomial_nf] Polynomial.sum
attribute [polynomial_nf] Finsupp.sum
attribute [polynomial_nf] mul_add
attribute [polynomial_nf] add_mul
attribute [polynomial_nf] Finset.sum_mul
attribute [polynomial_nf] Finset.mul_sum
attribute [polynomial_nf] Finset.sum_add_distrib
attribute [polynomial_nf] mul_assoc
attribute [polynomial_nf] finsupp.smul_sum
attribute [polynomial_nf] mul_smul_comm
attribute [polynomial_nf] smul_add
attribute [polynomial_nf] mul_smul
attribute [polynomial_nf] smul_mul_assoc
register_simp_attr polynomial_nf_2
"Attribute for lemmas that are used in the conversion of mv_polynomial expressions to a normal form consisting of adds of sums of muls of mv_polynomials"
attribute [polynomial_nf_2] mul_add
attribute [polynomial_nf_2] add_mul
attribute [polynomial_nf_2] finset.sum_add_distrib
attribute [polynomial_nf_2] sum_X_mul
attribute [polynomial_nf_2] sum_C_mul
attribute [polynomial_nf_2] rearrange_constants_right
attribute [polynomial_nf_2] rearrange_constants_right_with_extra
attribute [polynomial_nf_2] rearrange_sums_right
attribute [polynomial_nf_2] rearrange_sums_right_with_extra
attribute [polynomial_nf_2] C_mul_C
attribute [polynomial_nf_2] finset.sum_hom
attribute [polynomial_nf_2] mv_polynomial.smul_eq_C_mul
attribute [polynomial_nf_2] mul_assoc
attribute [polynomial_nf_2] finsupp.smul_sum
attribute [polynomial_nf_2] mul_smul_comm
attribute [polynomial_nf_2] smul_add
attribute [polynomial_nf_2] mul_smul
attribute [polynomial_nf_2] smul_mul_assoc
register_simp_attr polynomial_nf_3
"Attribute for lemmas that are used in the conversion of mv_polynomial expressions to a normal form consisting of adds of sums of muls of mv_polynomials"
attribute [polynomial_nf_3] mul_add
attribute [polynomial_nf_3] add_mul
attribute [polynomial_nf_3] finset.sum_add_distrib
attribute [polynomial_nf_3] mul_sum_symm
attribute [polynomial_nf_3] rearrange_constants_right
attribute [polynomial_nf_3] rearrange_constants_right_with_extra
attribute [polynomial_nf_3] rearrange_sums_right
attribute [polynomial_nf_3] rearrange_sums_right_with_extra
attribute [polynomial_nf_3] C_mul_C
attribute [polynomial_nf_3] finset.sum_hom
attribute [polynomial_nf_3] mv_polynomial.smul_eq_C_mul
attribute [polynomial_nf_3] mul_assoc
|
(** This file is self study of coinduction in Coq.
The contents are based on
http://www.cse.chalmers.se/research/group/logic/TypesSS05/Extra/bertot_sl2.pdf
https://www.labri.fr/perso/casteran/CoqArt/Tsinghua/C7.pdf
*)
(*
This is solution for https://www.labri.fr/perso/casteran/CoqArt/Tsinghua/exercises_7.v
*)
Require Import List.
Require Import ArithRing.
Set Implicit Arguments.
(** Let us consider the following co-inductive definition *)
CoInductive LList (A: Type) : Type := (* lazy lists *)
| LNil : LList A
| LCons : A -> LList A -> LList A.
Check (LCons 1 (LCons 2 (LCons 3 (LNil nat)))).
(* builds the infinite list n, 1+n, 2+n, 3+n, etc. *)
CoFixpoint from (n : nat) : LList nat := LCons n (from (S n)).
Definition nat_stream := from 0.
(* exercise 1 :
Build the infinite list true_false_alter which alternates the
boolean values : true,false,true,false, ...
*)
CoFixpoint alter (b : bool) : LList bool := LCons b (alter (negb b)).
(* exercise 2 *)
(* generate the infinite list n_times_n :
1,2,2,3,3,3,4,4,4,4, .... *)
CoFixpoint n_times_n (n : nat) (m : nat) : LList nat :=
match m with
| 0 => LCons (S n) (n_times_n (S n) n)
| S m' => LCons n (n_times_n n m')
end.
(* exercise 3 :
: Let A be any type and f : A -> A.
define "iterates f a" as the infinite list
a, f a , f (f a), f (f (f a), etc.
apply this functional for defining the sequence Exp2 of powers of 2 :
1,2,4,8,16, etc.
*)
CoFixpoint iterates (A : Type) (f : A -> A) (a : A) : LList A :=
LCons a (iterates f (f a)).
Definition isEmpty (A:Type) (l:LList A) : Prop :=
match l with
| LNil _ => True
| LCons a l' => False
end.
(* exercise 4: prove the following lemma *)
Lemma nat_stream_not_Empty : ~ isEmpty nat_stream.
Proof.
unfold isEmpty. intro contra.
unfold nat_stream in contra.
simpl in contra. destruct contra.
Qed.
Definition LHead (A:Type) (l:LList A) : option A :=
match l with
| LNil _ => None
| LCons a l' => Some a
end.
Eval compute in (LHead (LCons 1 (LCons 2 (LCons 3 (LNil nat))))).
(* Exercise 5 :
prove the following lemma *)
Lemma Head_of_from : forall n, LHead (from n) = Some n.
Proof.
intros. reflexivity. Qed.
Definition LTail (A:Type) (l:LList A) : LList A :=
match l with
| LNil _ => LNil A
| LCons a l' => l'
end.
(* Exercise 6 :
define a function Nth (A:Type) (n:nat) (l:LList A) : option A
such that (Nth n l) returns
- (Some a) if a is the n-th element of l (0-based)
- None if l has less than n+1 elements
If your solution is good, you can make a simple test :
Eval compute in (LNth 5 Exp2).
Some 32 : option nat
*)
Fixpoint LNth (A : Type) (n : nat) (l : LList A) : option A :=
match l, n with
| LCons a l', S n' => LNth n' l'
| LCons a l', 0 => Some a
| LNil _, _ => None
end.
(* Exercise 7 :
For this exercise (and perhaps another one) , you may use the
following tools (but it's not mandatory) :
Standard library's theorem f_equal
tactic ring on natural numbers (from the ArithRing module) :
Proove the following theorem :
Lemma LNth_from : forall n p, LNth n (from p) = Some (n+p).
*)
Lemma LNth_from : forall n p, LNth n (from p) = Some (n+p).
Proof.
intros. generalize dependent p. induction n.
- simpl. reflexivity.
- intros. simpl. rewrite -> IHn.
assert (n + S p = S (n + p)) by ring.
rewrite H. reflexivity.
Qed.
(* exercise 8 :
define a function list_inj (A:Type)(l : list A) : LList A
which maps any (finite) list to a lazy list having the same elements
in the same order
*)
Fixpoint LList_inj (A : Type) (l : list A) : LList A :=
match l with
| nil => LNil A
| a :: l' => LCons a (LList_inj l')
end.
(* exercise 9 :
in order to validate your function list_inj,
prove the lemma list_inj_ok (which uses the following nth function on
finite lists).
*)
Fixpoint nth (A:Type) (n:nat) (l:list A) {struct l} : option A :=
match n,l with | _,nil => None
| 0,a::_ => Some a
| S p, _::l' => nth p l'
end.
(*
Lemma list_inj_Ok : forall (A:Type)(l : list A)(n:nat),
nth n l = LNth n (list_inj l) .
*)
Lemma list_inj_Ok : forall (A:Type)(l : list A)(n:nat),
nth n l = LNth n (LList_inj l) .
Proof.
intros. generalize dependent l. induction n.
- intros. simpl. destruct l.
+ reflexivity.
+ simpl. reflexivity.
- intros. simpl. destruct l.
+ simpl. reflexivity.
+ simpl. rewrite -> IHn. reflexivity.
Qed.
Fixpoint firsts (A : Type) (n : nat) (l : LList A) : list A :=
match l, n with
| LCons a l', S n' => a :: (firsts n' l')
| _, _ => nil
end.
(* exercise 10 :
Define a "reciprocal" to list_inj :
firsts (A:Type) n (l:LList A): list A
returns the list of n-ths first elements of l
if l is finite and too short, firsts returns the list of all elements of l
Here is a little test :
*)
Definition Exp2 : LList nat := iterates (fun n => 2 * n) 1.
Eval compute in (firsts 6 Exp2).
Eval compute in (firsts 10 (n_times_n 1 1)).
(* Exercise 11 (not so easy) :
Prove that Exp2 truely contains the sequence of all powers of 2 *)
Inductive Finite(A:Type): LList A -> Prop :=
Finite_LNil : Finite (LNil A)
|Finite_Lcons : forall a l, Finite l -> Finite (LCons a l).
CoInductive Infinite(A:Type): LList A -> Prop :=
Infinite_LCons : forall a l, Infinite l -> Infinite (LCons a l).
CoInductive LList_eq (A:Type): LList A -> LList A -> Prop :=
| LList_eq_LNil : LList_eq (LNil A) (LNil A)
| LList_eq_LCons : forall a l l', LList_eq l l' ->
LList_eq (LCons a l) (LCons a l').
Definition LList_decomp (A:Type) (l:LList A) : LList A :=
match l with
| LNil _ => LNil A
| LCons a l' => LCons a l'
end.
Eval simpl in (LList_decomp (n_times_n 1 1)).
Lemma LList_decompose : forall (A:Type) (l:LList A), l = LList_decomp l.
Proof.
intros A l; case l; trivial.
Qed.
Ltac unwind_i :=
match goal with | |- ?t1= ?t2 =>
apply trans_equal with (1 := LList_decompose t1);auto
end.
Ltac unwind term1 term2 :=
let eg := fresh "eg" in
assert(eg : term1 = term2);
[unwind_i|idtac].
Lemma bool_alternate_Infinite : forall b, Infinite (alter b).
Proof.
cofix H.
intro b.
unwind (alter b) (LCons b (alter (negb b))).
rewrite eg.
constructor.
auto.
Guarded.
Qed.
(* exercise 12 : Prove the following lemmas
Lemma Exp2_Infinite : Infinite Exp2.
Lemma bool_alternate_eqn : forall b, bool_alternate b =
LCons b (bool_alternate (negb b)).
*)
Lemma iterates_Infinite : forall (A : Type) (f : A -> A) (n : A),
Infinite (iterates f n).
Proof.
intro A. cofix H. intros.
unwind (iterates f n) (LCons n (iterates f (f n))).
rewrite eg. apply Infinite_LCons.
apply H. Qed.
Lemma bool_alternate_eqn :
forall b, alter b = LCons b (alter (negb b)).
Proof.
intro.
unwind (alter b) (LCons b (alter (negb b))).
apply eg. Qed.
CoFixpoint LAppend (A:Type) (u v:LList A) : LList A :=
match u with
| LNil _ => v
| LCons a u' => LCons a (LAppend u' v)
end.
Lemma LAppend_LNil : forall (A:Type) (v:LList A), LAppend (LNil A) v = v.
Proof.
intros A v.
destruct v; unwind_i.
Qed.
Lemma LAppend_LCons :
forall (A:Type) (a:A) (u v:LList A),
LAppend (LCons a u) v = LCons a (LAppend u v).
Proof.
intros A a u v.
unwind_i.
Qed.
Hint Rewrite LAppend_LNil LAppend_LCons : llists.
Lemma LAppend_Infinite_1 : forall (A:Type)(u v : LList A),
Infinite u -> Infinite (LAppend u v).
Proof.
intro A;cofix H1.
destruct u.
intros v H;inversion H.
intros v H;rewrite LAppend_LCons.
constructor;auto.
apply H1.
inversion H;auto.
Qed.
(* exercise 13 :
Prove the following lemma :
Lemma LAppend_Infinite_2 : forall (A:Type)(u v : LList A),
Infinite v -> Infinite (LAppend u v).
*)
Lemma LAppend_Infinite_2 : forall (A:Type)(u v : LList A),
Infinite v -> Infinite (LAppend u v).
Proof.
intro A. cofix H.
intros. destruct v.
- inversion H0.
- destruct u.
+ rewrite LAppend_LNil. apply H0.
+ rewrite LAppend_LCons.
constructor. apply H. apply H0.
Qed.
(* exercise 14 :
Prove the following lemma :
Lemma LAppend_Infinite_3 : forall (A:Type)(u v : LList A),
Infinite (LAppend u v) ->
Finite u -> Infinite v.
*)
Lemma LAppend_Infinite_3 : forall (A:Type)(u v : LList A),
Infinite (LAppend u v) ->
Finite u -> Infinite v.
Proof.
intro A. cofix H. intros.
destruct v.
- induction H1.
+ rewrite LAppend_LNil in H0. inversion H0.
+ rewrite LAppend_LCons in H0.
inversion H0; subst. apply IHFinite in H3.
inversion H3.
- induction H1.
+ rewrite LAppend_LNil in H0. apply H0.
+ apply IHFinite.
rewrite LAppend_LCons in H0.
inversion H0; subst. apply H3.
Qed.
(* exercise 15 :
Prove the following lemma :
Lemma LAppend_absorbent : forall (A:Type)( u v: LList A),
Infinite u ->
LList_eq u (LAppend u v).
*)
Lemma LAppend_absorbent : forall (A:Type)( u v: LList A),
Infinite u ->
LList_eq u (LAppend u v).
Proof.
intro A. cofix H.
intros.
destruct u.
- inversion H0.
- rewrite LAppend_LCons. apply LList_eq_LCons.
apply H. inversion H0; subst. apply H2.
Qed.
|
(*
Copyright 2018
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*)
theory sys_getprio_mem
imports syscall
begin
text \<open>One locale per function in the binary.\<close>
locale sys_getprio_function = syscall_context +
fixes rsp\<^sub>0 rbp\<^sub>0 current_task id' a sys_getprio_ret :: \<open>64 word\<close>
and v\<^sub>0 :: \<open>8 word\<close>
and blocks :: \<open>(nat \<times> 64 word \<times> nat) set\<close>
assumes seps: \<open>seps blocks\<close>
and masters:
\<open>master blocks (a, 1) 0\<close>
\<open>master blocks (rsp\<^sub>0, 8) 1\<close>
\<open>master blocks (rsp\<^sub>0-8, 8) 2\<close>
\<open>master blocks (rsp\<^sub>0-16, 8) 3\<close>
\<open>master blocks (rsp\<^sub>0-32, 8) 4\<close>
\<open>master blocks (current_task, 8) 5\<close>
\<open>master blocks (id', 4) 6\<close>
and ret_address: \<open>outside sys_getprio_ret 23 85\<close> \<comment> \<open>Only works for non-recursive functions.\<close>
and task: \<open>the (label_to_address assembly ''current_task'') = current_task\<close>
begin
text \<open>
The Floyd invariant expresses for some locations properties that are invariably true.
Simply expresses that a byte in the memory remains untouched.
\<close>
definition pp_\<Theta> :: \<open>_ \<Rightarrow> floyd_invar\<close> where
\<open>pp_\<Theta> tid \<equiv> [
\<comment> \<open>precondition\<close>
boffset+23 \<mapsto> \<lambda>\<sigma>. regs \<sigma> rsp = rsp\<^sub>0
\<and> regs \<sigma> rbp = rbp\<^sub>0
\<and> regs \<sigma> rdi = tid
\<and> \<sigma> \<turnstile> *[current_task,8] = id'
\<and> \<sigma> \<turnstile> *[rsp\<^sub>0,8] = boffset+sys_getprio_ret
\<and> \<sigma> \<turnstile> *[a,1] = v\<^sub>0,
boffset+65 \<mapsto> \<lambda>\<sigma>. regs \<sigma> rsp = rsp\<^sub>0-8
\<and> regs \<sigma> rbp = rsp\<^sub>0-8
\<and> \<sigma> \<turnstile> *[rsp\<^sub>0-8,8] = rbp\<^sub>0
\<and> \<sigma> \<turnstile> *[rsp\<^sub>0-32,8] = tid
\<and> \<sigma> \<turnstile> *[rsp\<^sub>0,8] = boffset+sys_getprio_ret
\<and> \<sigma> \<turnstile> *[a,1] = v\<^sub>0,
boffset+83 \<mapsto> \<lambda>\<sigma>. regs \<sigma> rsp = rsp\<^sub>0-8
\<and> regs \<sigma> rbp = rsp\<^sub>0-8
\<and> \<sigma> \<turnstile> *[rsp\<^sub>0-8,8] = rbp\<^sub>0
\<and> \<sigma> \<turnstile> *[rsp\<^sub>0-32,8] = tid
\<and> \<sigma> \<turnstile> *[rsp\<^sub>0,8] = boffset+sys_getprio_ret
\<and> \<sigma> \<turnstile> *[a,1] = v\<^sub>0,
\<comment> \<open>postcondition\<close>
boffset+sys_getprio_ret \<mapsto> \<lambda>\<sigma>. \<sigma> \<turnstile> *[a,1] = v\<^sub>0
\<and> regs \<sigma> rsp = rsp\<^sub>0+8
\<and> regs \<sigma> rbp = rbp\<^sub>0
]\<close>
text \<open>Adding some rules to the simplifier to simplify proofs.\<close>
schematic_goal pp_\<Theta>_zero[simp]:
shows \<open>pp_\<Theta> tid boffset = ?x\<close>
unfolding pp_\<Theta>_def
by simp
schematic_goal pp_\<Theta>_numeral_l[simp]:
shows \<open>pp_\<Theta> tid (n + boffset) = ?x\<close>
unfolding pp_\<Theta>_def
by simp
schematic_goal pp_\<Theta>_numeral_r[simp]:
shows \<open>pp_\<Theta> tid (boffset + n) = ?x\<close>
unfolding pp_\<Theta>_def
by simp
lemma rewrite_sys_getprio_mem:
assumes \<open>master blocks (tid, 4) 7\<close>
shows \<open>is_std_invar sys_getprio_ret (floyd.invar sys_getprio_ret (pp_\<Theta> tid))\<close>
proof -
note masters = masters assms
show ?thesis
text \<open>Boilerplate code to start the VCG\<close>
apply (rule floyd_invarI)
apply (rewrite at \<open>floyd_vcs sys_getprio_ret \<hole> _\<close> pp_\<Theta>_def)
apply (intro floyd_vcsI)
text \<open>Subgoal for rip = boffset+23\<close>
subgoal premises prems for \<sigma>
text \<open>Insert relevant knowledge\<close>
apply (insert prems seps ret_address task)
text \<open>Apply VCG/symb.\ execution\<close>
apply (restart_symbolic_execution?, (symbolic_execution masters: masters)+, (finish_symbolic_execution masters: masters)?)+
apply restart_symbolic_execution
apply (finish_symbolic_execution masters: masters)
done
text \<open>Subgoal for rip = boffset+65\<close>
subgoal premises prems for \<sigma>
text \<open>Insert relevant knowledge\<close>
apply (insert prems seps ret_address task)
text \<open>Apply VCG/symb.\ execution\<close>
apply (restart_symbolic_execution?, (symbolic_execution masters: masters)+, (finish_symbolic_execution masters: masters)?)+
done
text \<open>Subgoal for rip = boffset+83\<close>
subgoal premises prems for \<sigma>
text \<open>Insert relevant knowledge\<close>
apply (insert prems seps ret_address task)
text \<open>Apply VCG/symb.\ execution\<close>
apply (restart_symbolic_execution?, (symbolic_execution masters: masters)+, (finish_symbolic_execution masters: masters)?)+
done
text \<open>Trivial ending subgoal.\<close>
subgoal
by simp
done
qed
end
end
|
IsEvenInt(n);
IsOddInt(n);
|
! RUN: %python %S/test_errors.py %s %flang_fc1
! Test extension: RETURN from main program
return !ok
!ERROR: RETURN with expression is only allowed in SUBROUTINE subprogram
return 0
end
|
A : Set
|
main : JS_IO ()
main = putStr' $ show 66
|
#!/usr/bin/env python
"""Run benchmarks for model synthetic photometry"""
import time
import os
import glob
import argparse
from collections import OrderedDict
import numpy as np
import sncosmo
delim = 61 * "-"
# test data
ndata = 100 # make divisible by 4!
dates = np.linspace(-15., 40., ndata)
bands = np.array((ndata//4) * ['desg', 'desr', 'desi', 'sdssg'])
niter = 100
# models
f99dust = sncosmo.F99Dust(3.1)
models = OrderedDict([
('salt2', sncosmo.Model(source='salt2')),
('hsiao', sncosmo.Model(source='hsiao')),
('salt2+f99dust',
sncosmo.Model(source='salt2', effects=[f99dust],
effect_names=['mw'], effect_frames=['obs'])),
('hsiao+f99dust',
sncosmo.Model(source='hsiao', effects=[f99dust],
effect_names=['mw'], effect_frames=['obs']))
])
print("\nbandflux(band_array, time_array) [4 des bands]:")
print(delim)
print("Model n=1 n=10 n=100")
print(delim)
for name, model in models.items():
print('{:15s}'.format(name), end='')
for idx in [0, range(10), range(100)]:
d = dates[idx]
b = bands[idx]
time1 = time.time()
for i in range(niter): model.bandflux(b, d)
time2 = time.time()
time_sec = (time2 - time1) / niter
print("%10.5f" % (time_sec * 1000.), end='')
print(" ms per call")
|
\documentclass{beamer}
\usepackage[utf8]{inputenc}
\usetheme{#THEME}
\usecolortheme{#COLOR_THEME}
\title[Short Paper Title]{Presentation Title}
\subtitle{Presentation Subtitle}
\author[Author, Another]{F.~Author\inst{1} \and S.~Another\inst{2}}
\institute[Some University]
{
\inst{1}%
Department of Computer Science\\
University of Somewhere
\and
\inst{2}%
Department of Theoretical Philosophy\\
University of Elsewhere}
\date[Short Occasion]{Date / Occasion}
\begin{document}
\begin{frame}
\titlepage
\end{frame}
\section{First Section}
\subsection[Short First Subsection Name]{First Subsection Name}
\begin{frame}{This is the slide title.}{This is the slide subtitle.}
\begin{block}{Remark}
Sample text
\end{block}
\begin{itemize}
\item
An item
\item
Another item
\begin{itemize}
\item Something
\begin{itemize}
\item Something else
\end{itemize}
\end{itemize}
\begin{enumerate}
\item Thing A
\item Thing B
\item Thing C
\end{enumerate}
\end{itemize}
\end{frame}
\end{document}
|
/*
* The MIT License (MIT)
*
* Copyright (c) 2017 Sylko Olzscher
*
*/
#include <cyng/chrono.h>
#include <ctime>
#include <cerrno>
#include <cstring>
#include <iostream>
#include <boost/assert.hpp>
#include <boost/core/ignore_unused.hpp>
#ifdef _DEBUG
#include <cyng/io/io_chrono.hpp>
#endif
namespace cyng
{
namespace chrono
{
int year(std::tm const& t)
{
return t.tm_year + 1900;
}
int month(std::tm const& t)
{
return t.tm_mon + 1;
}
int day(std::tm const& t)
{
return t.tm_mday;
}
int day_of_year(std::tm const& t)
{
return t.tm_yday;
}
int hour(std::tm const& t)
{
return t.tm_hour;
}
int minute(std::tm const& t)
{
return t.tm_min;
}
int second(std::tm const& t)
{
return t.tm_sec;
}
int time_of_day(std::tm const& t) {
BOOST_ASSERT_MSG(t.tm_mon > 0 && t.tm_mon < 13, "month is out of range");
BOOST_ASSERT_MSG(t.tm_mday >= 0 && t.tm_mday < 32, "day is out of range");
return t.tm_sec + (t.tm_min * 60) + (t.tm_hour * 60 * 60);
}
std::tm init_tm(int year, int month, int day, int hour, int min, int sec)
{
BOOST_ASSERT_MSG(year > 1900, "year is out of range");
BOOST_ASSERT_MSG(month > 0, "month is out of range");
#ifdef _MSC_VER
// MSC does not support aggregate initialisation of struct
std::tm t;
t.tm_sec = sec; // tm_sec - Seconds. [0-60] (1 leap second)
t.tm_min = min; // tm_min - Minutes. [0-59]
t.tm_hour = hour; // tm_hour - Hours. [0-23]
t.tm_mday = day; // tm_mday - Day. [1-31]
t.tm_mon = month - 1; // tm_mon - Month. [0-11]
t.tm_year = year - 1900; // tm_year - Year - 1900
t.tm_wday = 0; // days since Sunday - [0, 6]
t.tm_yday = 0; // days since January 1 - [0, 365]
t.tm_isdst = -1; // daylight savings time flag [-1/0/1]
return t;
#else
return std::tm {
sec, // tm_sec - Seconds. [0-60] (1 leap second)
min, // tm_min - Minutes. [0-59]
hour, // tm_hour - Hours. [0-23]
day, // tm_mday - Day. [1-31]
month - 1, // tm_mon - Month. [0-11]
year - 1900, // tm_year - Year - 1900
// unused
0, // tm_wday - Day of week [0-6]
0, // tm_yday - Days in year [0-365]
-1, // tm_isdst - DST. [-1/0/1]
0, // Seconds east of UTC.
"" // Timezone abbreviation.
};
#endif
}
std::time_t tm_to_tt(std::tm const& t)
{
std::tm tmp = t;
#ifdef BOOST_WINDOWS
return ::_mkgmtime(&tmp);
#else
// nonstandard GNU extension, also present on the BSDs
return ::timegm(&tmp);
#endif
}
std::chrono::system_clock::time_point init_tp(std::uint16_t year, std::uint8_t month, std::uint8_t day, std::uint8_t hour, std::uint8_t min, double sec)
{
const std::time_t tt = tm_to_tt(init_tm(year, month, day, hour, min, 0));
std::chrono::duration<double> d(sec);
return std::chrono::system_clock::from_time_t(tt) + std::chrono::duration_cast<std::chrono::microseconds>(d);
}
std::tm convert_utc(std::time_t tt)
{
std::tm r{ 0 };
#ifdef BOOST_WINDOWS
const errno_t e = ::gmtime_s(&r, &tt);
if (e != 0)
{
char msg[128];
::strerror_s(msg, 128, e);
std::cerr << msg << std::endl;
}
BOOST_ASSERT_MSG (e == 0, "convert_utc");
#else
// POSIX API
const struct tm* ptr = ::gmtime_r(&tt, &r);
BOOST_ASSERT_MSG(ptr != nullptr, "convert_utc");
boost::ignore_unused(ptr);
#endif
return r;
}
std::tm convert_local(std::time_t tt)
{
#ifdef _MSC_VER
struct tm r;
::localtime_s(&r, &tt);
return r;
#else
return *::localtime( &tt );
#endif
// BOOST_ASSERT_MSG(ptr != nullptr, "convert_local";
}
std::chrono::minutes delta()
{
//
// get a time point
//
const std::chrono::system_clock::time_point now = std::chrono::system_clock::now();
const std::time_t now_tt = std::chrono::system_clock::to_time_t(now);
//
// get UTC and local time
//
const std::tm utc_tt = convert_utc(now_tt);
const std::tm local_tt = convert_local(now_tt);
//
// calculate offset
//
return std::chrono::minutes(60 * (utc_tt.tm_hour - local_tt.tm_hour) + (utc_tt.tm_min - local_tt.tm_min));
}
std::pair<std::time_t, double> to_dbl_time_point(std::chrono::system_clock::time_point const& tp)
{
// Probably precision get lost.
const std::time_t tt = std::chrono::system_clock::to_time_t(tp);
dbl_seconds sec = tp - std::chrono::system_clock::from_time_t(tt);
BOOST_ASSERT_MSG(sec.count() < 1.0, "error in fractional second");
return std::make_pair(tt, sec.count());
}
std::chrono::system_clock::time_point to_time_point(dbl_time_point const& dtp)
{
// Probably precision get lost.
return std::chrono::system_clock::from_time_t(dtp.first)
+ std::chrono::duration_cast<std::chrono::microseconds>(dbl_seconds(dtp.second));
}
std::chrono::system_clock::duration duration_of_day(std::chrono::system_clock::time_point const& tp)
{
std::chrono::system_clock::duration d = tp.time_since_epoch();
std::chrono::hours h = std::chrono::duration_cast<std::chrono::hours>(d);
d -= std::chrono::hours(h.count() % 24);
std::chrono::minutes m = std::chrono::duration_cast<std::chrono::minutes>(d);
d -= std::chrono::minutes(m.count() % 60);
std::chrono::seconds s = std::chrono::duration_cast<std::chrono::seconds>(d);
d -= std::chrono::seconds(s.count() % 60);
std::chrono::milliseconds ms = std::chrono::duration_cast<std::chrono::milliseconds>(d);
d -= std::chrono::milliseconds(ms.count() % 1000);
return tp.time_since_epoch() - d;
}
std::chrono::system_clock::time_point strip_time(std::chrono::system_clock::time_point const& tp)
{
return (tp - duration_of_day(tp));
}
std::string to_string(std::chrono::system_clock::time_point const& tp)
{
const std::time_t tt = std::chrono::system_clock::to_time_t(tp);
#ifdef _MSC_VER
char str[26];
::ctime_s(str, sizeof str, &tt);
std::string s(str, sizeof str);
#else
std::string s = std::ctime(&tt);
#endif
return s.substr(0, s.size() - 1); // remove NL
}
std::tm make_utc_tm(std::chrono::system_clock::time_point tp)
{
return convert_utc(std::chrono::system_clock::to_time_t(tp));
}
std::chrono::system_clock::time_point add_month(std::chrono::system_clock::time_point const& tp, int month)
{
auto tt = std::chrono::system_clock::to_time_t(tp);
//
// calulate lost accuracy
//
dbl_seconds sec = tp - std::chrono::system_clock::from_time_t(tt);
auto tm = convert_utc(tt);
//
// add/sub one or multiple months.
//
while (month > 0) {
if (tm.tm_mon == 11) {
tm.tm_mon = 0;
tm.tm_year++;
}
else {
tm.tm_mon++;
}
month--;
}
while (month < 0) {
if (tm.tm_mon == 0) {
tm.tm_mon = 11;
tm.tm_year--;
BOOST_ASSERT_MSG(tm.tm_year > 1900, "year is out of range");
}
else {
tm.tm_mon--;
}
month++;
}
return std::chrono::system_clock::from_time_t(tm_to_tt(tm))
+ std::chrono::duration_cast<std::chrono::microseconds>(dbl_seconds(sec));
}
days days_of_month(std::chrono::system_clock::time_point tp)
{
#ifdef _DEBUG
//std::cout << "tp : " << to_str(tp) << std::endl;
#endif
//
// convert to tm struct and cut out all other information than
// year and month positioned to the first day in this month
//
auto tm = make_utc_tm(tp);
auto begin = cyng::chrono::init_tp(cyng::chrono::year(tm)
, cyng::chrono::month(tm)
, 1 // 1. day
, 0 // hour
, 0 // minute
, 0.0); // this day
#ifdef _DEBUG
//std::cout << "begin: " << to_str(begin) << std::endl;
#endif
//
// Calculate begin of next month.
// To use "begin" guaranties that we skip not
// to the month after.
//
tp = add_month(begin, 1);
#ifdef _DEBUG
//std::cout << "tp : " << to_str(tp) << std::endl;
#endif
//
// Cut out again all other information than year and month
// and use the first day of the next month.
//
tm = make_utc_tm(tp);
auto end = cyng::chrono::init_tp(cyng::chrono::year(tm)
, cyng::chrono::month(tm)
, 1 // 1. day
, 0 // hour
, 0 // minute
, 0.0); // this day
#ifdef _DEBUG
//std::cout << "end : " << to_str(end) << std::endl;
#endif
//
// Calculate the diff and convert it into days
//
return std::chrono::duration_cast<days>(end - begin);
}
bool same_day(std::chrono::system_clock::time_point tp1, std::chrono::system_clock::time_point tp2)
{
const auto tm1 = make_utc_tm(tp1);
const auto tm2 = make_utc_tm(tp2);
return (year(tm1) == year(tm2)) && (day_of_year(tm1) == day_of_year(tm2));
}
}
}
|
From mathcomp
Require Import ssreflect.
Require Import Coq.Logic.JMeq.
Require Import Coq.Logic.EqdepFacts.
(* fooのようなRecordは等値性を簡単に証明できる *)
Record foo := make_foo
{
foo_carrier : Set;
}.
Theorem foo_eq (F F' : foo) :
foo_carrier F = foo_carrier F'
-> F = F'.
Proof.
move=> Hcarrier.
destruct F, F'.
simpl in * |-.
f_equal.
exact Hcarrier.
Qed.
(* barのようなRecordはfooと同じようには等値性の証明ができない? *)
Record bar := make_bar
{
bar_carrier : Set;
bar_star : bar_carrier;
}.
Theorem bar_eq (B B' : bar) :
bar_carrier B = bar_carrier B'
-> JMeq (bar_star B) (bar_star B')
-> B = B'.
Proof.
destruct B as [Bcarrier Bstar].
destruct B' as [B'carrier B'star].
simpl.
move=> Hcarrier Hstar.
pose proof (JMeq_eq_dep (U := Set) (fun X => X) Hcarrier Hstar) as Heq.
apply eq_dep_eq_sigT in Heq.
dependent rewrite Heq.
reflexivity.
Qed.
|
{-# OPTIONS --sized-types #-}
open import Relation.Binary.Core
module BubbleSort.Correctness.Permutation {A : Set}
(_≤_ : A → A → Set)
(tot≤ : Total _≤_) where
open import BubbleSort _≤_ tot≤
open import Data.Product
open import Data.List
open import Data.Sum
open import List.Permutation.Base A
open import List.Permutation.Base.Equivalence A
open import Size
open import SList
open import SList.Properties A
open import SList.Concatenation A
lemma-swap*∼ : {ι : Size}(x : A) → (xs : SList A {ι}) → unsize A (x ∙ xs) ∼ unsize A (proj₂ (swap* x xs) ∙ proj₁ (swap* x xs))
lemma-swap*∼ x snil = ∼x /head /head ∼[]
lemma-swap*∼ x (y ∙ ys)
with tot≤ x y
... | inj₁ x≤y = ∼x /head (/tail /head) (lemma-swap*∼ y ys)
... | inj₂ y≤x = ∼x (/tail /head) (/tail /head) (lemma-swap*∼ x ys)
lemma-bubbleSort∼ : {ι : Size}(xs : SList A {ι}) → unsize A xs ∼ unsize A (bubbleSort xs)
lemma-bubbleSort∼ snil = ∼[]
lemma-bubbleSort∼ (x ∙ xs) = trans∼ (lemma-swap*∼ x xs) (trans∼ (lemma-⊕∼ y (lemma-bubbleSort∼ ys)) (lemma-size-unsize y (bubbleSort ys)))
where sxxs = swap* x xs
ys = proj₁ sxxs
y = proj₂ sxxs
theorem-bubbleSort∼ : (xs : List A) → xs ∼ unsize A (bubbleSort (size A xs))
theorem-bubbleSort∼ xs = trans∼ (lemma-unsize-size xs) (lemma-bubbleSort∼ (size A xs))
|
#' Version of Ink engine
#' @family version
#' @export
version_ink <- function() {
# initialize a js context
js <- create_context()
on.exit(remove_context(js))
# current version of the ink story file format. (the .ink file itself?)
js$get("inkjs.Story.inkVersionCurrent") # this from inkjs itself
# the minimum legacy version of ink that can be loaded by the current version of the code.
#js$get("story.inkVersionMinimumCompatible") # this is from the engine but its private in inkjs.Story
# storyContent.inkVersion -> inksjs.Story.inkVersionCurrent
# to run a ink story correctly we need inkVersionMinimumCompatible <= version_story()
}
#' Version of Ink story
#' @inheritParams load_story
#' @family version
#' @export
version_story <- function(filepath) {
json <- read_inkjson(filepath)
json$inkVersion
}
#' Version of Ink story state
#' @inheritParams load_story
#' @family version
#' @keywords internal
version_storystate <- function(js) {
# to load a json file correctly we need kInkSaveStateVersion >= kMinCompatibleLoadVersion
# save state JSON version. this is from the story
save_version <- js$get("story._state.kInkSaveStateVersion")
# minimum version needed to load JSON. this is from the story
load_version_min <- js$get("story._state.kMinCompatibleLoadVersion")
# combine
list(
"kInkSaveStateVersion" = save_version,
"kMinCompatibleLoadVersion" = load_version_min
)
}
|
data Vect : Nat -> Type -> Type where
Nil : Vect Z a
(::) : a -> Vect k a -> Vect (S k) a
record MyDPair a (p : a -> Type) where
constructor MkMyDPair
dfst : a
dsnd : p dfst
record DVect a where
constructor MkDVect
test : Int
{n : Nat}
vec : Vect n a
record Person where
constructor MkPerson
name : String
age, shoesize : Int
some_fn : b -> b -- 'b' bound as an argument to MkPerson
testPair : MyDPair Nat (\n => Vect n Int)
testPair = MkMyDPair _ [1,2,3,4,5]
testDVect : DVect Int
testDVect = MkDVect 94 [1,2,3,4,5]
testPerson : Person
testPerson = MkPerson "Wowbagger" 1337 10 (\x => S x)
|
From cap_machine Require Import rules_base.
From iris.base_logic Require Export invariants gen_heap.
From iris.program_logic Require Export weakestpre ectx_lifting.
From iris.proofmode Require Import tactics.
From iris.algebra Require Import frac.
From cap_machine.rules Require Import rules_StoreU.
Section cap_lang_rules.
Context `{memG Σ, regG Σ, MonRef: MonRefG (leibnizO _) CapR_rtc Σ}.
Context `{MachineParameters}.
Implicit Types P Q : iProp Σ.
Implicit Types σ : ExecConf.
Implicit Types c : cap_lang.expr.
Implicit Types a b : Addr.
Implicit Types r : RegName.
Implicit Types v : cap_lang.val.
Implicit Types w : Word.
Implicit Types reg : gmap RegName Word.
Implicit Types ms : gmap Addr Word.
Lemma isU_nonO p p' :
PermFlows p p' → isU p = true → p' ≠ O.
Proof.
intros Hfl' Hra. destruct p'; auto. destruct p; inversion Hfl'. inversion Hra.
Qed.
Lemma wb_implies_verify_access p g:
∀ b e a,
withinBounds ((p, g), b, e, a) = true ->
match (a + 0)%a with
| Some a' =>
if Addr_le_dec b a'
then if Addr_le_dec a' a then if Addr_lt_dec a e then Some a' else None else None
else None
| None => None
end = Some a.
Proof.
intros b e a Hwb.
rewrite /= addr_add_0 /=.
apply withinBounds_le_addr in Hwb as [Hle Hlt].
destruct (Addr_le_dec b a);[|contradiction].
destruct (Addr_le_dec a a);[|solve_addr].
destruct (Addr_lt_dec a e);[|contradiction].
auto.
Qed.
(* store and increment *)
Lemma wp_storeU_success_0_reg E pc_p pc_g pc_b pc_e pc_a pc_a' w dst src w'
p g b e a a' w'' pc_p' p' :
decodeInstrW w = StoreU dst (inl 0%Z) (inr src) →
PermFlows pc_p pc_p' →
PermFlows p p' →
isCorrectPC (inr ((pc_p,pc_g),pc_b,pc_e,pc_a)) →
(pc_a + 1)%a = Some pc_a' →
isU p = true -> canStoreU p w'' = true ->
withinBounds ((p, g), b, e, a) = true ->
(a + 1)%a = Some a' ->
{{{ ▷ PC ↦ᵣ inr ((pc_p,pc_g),pc_b,pc_e,pc_a)
∗ ▷ pc_a ↦ₐ[pc_p'] w
∗ ▷ src ↦ᵣ w''
∗ ▷ dst ↦ᵣ inr ((p,g),b,e,a)
∗ ▷ a ↦ₐ[p'] w' }}}
Instr Executable @ E
{{{ RET NextIV;
PC ↦ᵣ inr ((pc_p,pc_g),pc_b,pc_e,pc_a')
∗ pc_a ↦ₐ[pc_p'] w
∗ src ↦ᵣ w''
∗ dst ↦ᵣ inr ((p,g),b,e,a')
∗ a ↦ₐ[p'] w'' }}}.
Proof.
iIntros (Hinstr Hfl Hfl' Hvpc Hpca' HU HstoreU Hwb Ha' φ)
"(>HPC & >Hi & >Hsrc & >Hdst & >Hsrca) Hφ".
iDestruct (map_of_regs_3 with "HPC Hsrc Hdst") as "[Hmap (%&%&%)]".
pose proof (isU_nonO _ _ Hfl' HU) as Hp''.
pose proof (correctPC_nonO _ _ _ _ _ _ Hfl Hvpc) as Hpc_p'.
iDestruct (memMap_resource_2ne_apply with "Hi Hsrca") as "[Hmem %]"; auto.
iApply (wp_storeU _ pc_p with "[$Hmap $Hmem]"); eauto; simplify_map_eq; eauto.
{ by rewrite !dom_insert; set_solver+. }
{ rewrite HU HstoreU. erewrite wb_implies_verify_access; eauto.
by simplify_map_eq. }
iNext. iIntros (regs' mem' retv) "(#Hspec & Hmem & Hmap)".
iDestruct "Hspec" as %Hspec.
destruct Hspec as [ | * Hfail ].
{ (* Success *)
iApply "Hφ".
simplify_map_eq.
erewrite wb_implies_verify_access in H11; eauto. simplify_eq.
rewrite insert_commute // insert_insert.
iDestruct (memMap_resource_2ne with "Hmem") as "[Hpc_a Ha]";auto.
destruct (addr_eq_dec a'0 a'0);[|contradiction].
incrementPC_inv.
simplify_map_eq.
rewrite (insert_commute _ _ PC) // insert_insert.
rewrite (insert_commute _ _ src) // insert_insert.
iDestruct (regs_of_map_3 with "[$Hmap]") as "[HPC [Hsrc Hdst] ]"; eauto. iFrame. }
{ (* Failure (contradiction) *)
destruct Hfail; try incrementPC_inv; simplify_map_eq; eauto.
all: try congruence.
erewrite wb_implies_verify_access in e6; eauto. simplify_eq.
Unshelve. all:auto.
}
Qed.
(* store and increment from and to the same register *)
Lemma wp_storeU_success_0_reg_same E pc_p pc_g pc_b pc_e pc_a pc_a' w dst w'
p g b e a a' pc_p' p' :
decodeInstrW w = StoreU dst (inl 0%Z) (inr dst) →
PermFlows pc_p pc_p' →
PermFlows p p' →
isCorrectPC (inr ((pc_p,pc_g),pc_b,pc_e,pc_a)) →
(pc_a + 1)%a = Some pc_a' →
isU p = true -> canStoreU p (inr (p, g, b, e, a)) = true ->
withinBounds ((p, g), b, e, a) = true ->
(a + 1)%a = Some a' ->
{{{ ▷ PC ↦ᵣ inr ((pc_p,pc_g),pc_b,pc_e,pc_a)
∗ ▷ pc_a ↦ₐ[pc_p'] w
∗ ▷ dst ↦ᵣ inr ((p,g),b,e,a)
∗ ▷ a ↦ₐ[p'] w' }}}
Instr Executable @ E
{{{ RET NextIV;
PC ↦ᵣ inr ((pc_p,pc_g),pc_b,pc_e,pc_a')
∗ pc_a ↦ₐ[pc_p'] w
∗ dst ↦ᵣ inr ((p,g),b,e,a')
∗ a ↦ₐ[p'] inr ((p,g),b,e,a)}}}.
Proof.
iIntros (Hinstr Hfl Hfl' Hvpc Hpca' HU HstoreU Hwb Ha' φ)
"(>HPC & >Hi & >Hdst & >Hsrca) Hφ".
iDestruct (map_of_regs_2 with "HPC Hdst") as "[Hmap %]".
pose proof (isU_nonO _ _ Hfl' HU) as Hp''.
pose proof (correctPC_nonO _ _ _ _ _ _ Hfl Hvpc) as Hpc_p'.
iDestruct (memMap_resource_2ne_apply with "Hi Hsrca") as "[Hmem %]"; auto.
iApply (wp_storeU _ pc_p with "[$Hmap $Hmem]"); eauto; simplify_map_eq; eauto.
{ by rewrite !dom_insert; set_solver+. }
{ unfold canStoreU. rewrite HU HstoreU. erewrite wb_implies_verify_access; eauto.
by simplify_map_eq. }
iNext. iIntros (regs' mem' retv) "(#Hspec & Hmem & Hmap)".
iDestruct "Hspec" as %Hspec.
destruct Hspec as [ | * Hfail ].
{ (* Success *)
iApply "Hφ".
simplify_map_eq.
erewrite wb_implies_verify_access in H9; eauto. simplify_eq.
rewrite insert_commute // insert_insert.
iDestruct (memMap_resource_2ne with "Hmem") as "[Hpc_a Ha]";auto.
destruct (addr_eq_dec a'0 a'0);[|contradiction].
incrementPC_inv.
simplify_map_eq.
rewrite (insert_commute _ _ PC) // insert_insert.
rewrite insert_insert.
iDestruct (regs_of_map_2 with "[$Hmap]") as "[HPC [Hsrc Hdst] ]"; eauto. iFrame. }
{ (* Failure (contradiction) *)
destruct Hfail; try incrementPC_inv; simplify_map_eq; eauto.
all: try congruence.
erewrite wb_implies_verify_access in e6; eauto. simplify_eq.
Unshelve. all:auto.
}
Qed.
Lemma wp_storeU_success_0_z E pc_p pc_g pc_b pc_e pc_a pc_a' w dst z w'
p g b e a a' pc_p' p' :
decodeInstrW w = StoreU dst (inl 0%Z) (inl z) →
PermFlows pc_p pc_p' →
PermFlows p p' →
isCorrectPC (inr ((pc_p,pc_g),pc_b,pc_e,pc_a)) →
(pc_a + 1)%a = Some pc_a' →
isU p = true ->
withinBounds ((p, g), b, e, a) = true ->
(a + 1)%a = Some a' ->
{{{ ▷ PC ↦ᵣ inr ((pc_p,pc_g),pc_b,pc_e,pc_a)
∗ ▷ pc_a ↦ₐ[pc_p'] w
∗ ▷ dst ↦ᵣ inr ((p,g),b,e,a)
∗ ▷ a ↦ₐ[p'] w' }}}
Instr Executable @ E
{{{ RET NextIV;
PC ↦ᵣ inr ((pc_p,pc_g),pc_b,pc_e,pc_a')
∗ pc_a ↦ₐ[pc_p'] w
∗ dst ↦ᵣ inr ((p,g),b,e,a')
∗ a ↦ₐ[p'] (inl z) }}}.
Proof.
iIntros (Hinstr Hfl Hfl' Hvpc Hpca' HU Hwb Ha' φ)
"(>HPC & >Hi & >Hdst & >Hsrca) Hφ".
iDestruct (map_of_regs_2 with "HPC Hdst") as "[Hmap %]".
pose proof (isU_nonO _ _ Hfl' HU) as Hp''.
pose proof (correctPC_nonO _ _ _ _ _ _ Hfl Hvpc) as Hpc_p'.
iDestruct (memMap_resource_2ne_apply with "Hi Hsrca") as "[Hmem %]"; auto.
iApply (wp_storeU _ pc_p with "[$Hmap $Hmem]"); eauto; simplify_map_eq; eauto.
{ by rewrite !dom_insert; set_solver+. }
{ rewrite HU. erewrite wb_implies_verify_access; eauto.
by simplify_map_eq. }
iNext. iIntros (regs' mem' retv) "(#Hspec & Hmem & Hmap)".
iDestruct "Hspec" as %Hspec.
destruct Hspec as [ | * Hfail ].
{ (* Success *)
iApply "Hφ".
simplify_map_eq.
erewrite wb_implies_verify_access in H9; eauto. simplify_eq.
rewrite insert_commute // insert_insert.
iDestruct (memMap_resource_2ne with "Hmem") as "[Hpc_a Ha]";auto.
destruct (addr_eq_dec a'0 a'0);[|contradiction].
incrementPC_inv.
simplify_map_eq.
rewrite (insert_commute _ _ PC) // insert_insert. rewrite insert_insert.
iDestruct (regs_of_map_2 with "[$Hmap]") as "[HPC Hdst]"; eauto. iFrame. }
{ (* Failure (contradiction) *)
destruct Hfail; try incrementPC_inv; simplify_map_eq; eauto.
all: try congruence.
erewrite wb_implies_verify_access in e6; eauto. simplify_eq.
Unshelve. all:auto.
}
Qed.
End cap_lang_rules.
|
Discussion in 'Deck Help and Strategy' started by KingGengar, Nov 5, 2007.
Why drive a TRUK when you can saunter in with a pachyderm?
You can also say this is "Riding the Dragon" but don't read too much into that.
STRATEGY: Set up the elephant as soon as possible for the disruptive 40+Rapid Spin. This causes chaos for Blissey, but also Electrode, Electivire, and Raichu, since Donphan is resistant to Lightning. This Resistance to Lightning also covers Gyarados' Weakness.
Once set up, you can use Donphan's 100hp, Phanpy's Rage, and Memory Berry to good effect. This goes well with Gyarados, which does the same thing, only without the Berry. You run 3 Windstorm to help Gyarados, but Memory Berry can work for the Dragon also.
Switch and Warp Point are both in line with Donphan's disorientation, and Gyarados' huge retreat cost. The single SSU can be used to scoop up that hurting Gyarados, but you might just as easily add another Night Maintenance.
The different Energies won't matter if using only Rage attacks, or Donphan mostly, but it'll be good to have Water in case you try to build Gyarados, or use Dragon Rage. Scramble will probably be necessary since damage is a big part of the plan. In fact, it may be preferable to have 4 Scramble. I'm not sure yet.
The big risk is the Magikarp, but there are 9 ways to get a 2nd Pokemon on T2, not including 7 more Draw/Rotate.
Donphan I understand ... Gyarados going with him, not so much. Seriously what does Gyarados add to this besides complecating your energy, making you power dependant, and giving you terrible starts?
Edit: Sorry for the negative sounding tone. Came out a lot worse than intended. What do you do if damage doesn't get spread for rage?
There isn't much synergy between the cards, and what makes Donphan almost playable is that it uses Rapid Spin and avoids damage. The idea of trying to use Memory Berry and Flail doesn't make much sense with its attack. I can't see you having a T2 Donphan as consistently as you should as opposed to if you included some Quick Ball in the list.
i like plaing it with gyarados becuase after rapid spin you bring up gyarados to take the hit then use flail.
First, you gotta understand how much I love Gyarados as a Pokemon, and want to play the MT version "just because." That said, it's such a fragile situation, with Magikarp being, self-admittedly, the weakest Pokemon; with Gyarados dependant on Poke-Powers; with its far-fetched attacks.
Second, since Gyarados is weak to Lightning, Donphan is one of the few choices which makes sense for Gyarados as a partner, being resistant to Lightning, and being Fighting type, which is Lightning's main problem. Master of Puppets hits the nail on the head - Rapid Spin, Gyarados comes up, gets damaged, hits for big damage.
Right, so I see what you're saying, that you don't think Donphan will be able to use the Memory Berry consistently. I see where it would make sense to play Buffer instead to help the fragile Magikarp from being KO'd early.
What do they add to each other? Besides the similar Rage attacks, and the Weakness coverage, not a whole lot. Gyarados can use *any* energy for Magikarp's attack, however, so Fighting doesn't bother it too much.
I'll have to test it more and see what happens.
I don't have much to comment but I just can't bare to play with a 30 hp pokemon.
It really is too bad that the Carp doesn't have Fighting Resistance -10, since Gyrados has -20. That would at least force the use of a Plus Power or something for the first turn Riolu KO.
T2 Donphan is a good idea because? You hit for 40 and send both basics back to the bench. Unless you team it up with like something that can free retreat or attack for 1 energy, you won't be doing much next turn because Donphan is on the bench.
You run 3 Windstorm to help Gyarados, but Memory Berry can work for the Dragon also.
Never mind that... how about Leek Slap FTW!
KingGengar.. nice idea... look at this card for your target at Rapid spin.. you don't switch and if they have CC on an Active and not a Bench, the Body Kicks in after the CC is benched and before you switch.
But really, you would start with Magikarp 1 in every 8 games, which is pretty meh.
Yeah, the new cards are really cool. That Wormadam is interesting, and so are its mechanics.
I still don't get the idea behind the deck. If you Rapid Spin back to the bench T2, how do you do anything of any good damage on T3 with your only powered Pokemon on the bench?
It helps with what Prime is talking about... you can use the Burmy to get back the F nrg from Mentor/TVR evo up to Wormadam and then Target the Worm with the Spin! You dont switch but your opponent does so you can stack damage on their bench and stack nrg on the Worm then Memory Berry the Phan for Flail.... then bring out the Dma and drop a scramble and go to town while building another Phan! A lot of phun and irritating!! Just what I like!
I can't say I really LIKE the idea of this deck, but it definitely is an interesting take on Donphan. I don't like the idea of Gyarados simply because you're relying on your opponent to damage it for you to do any damage, and with a nasty retreat cost, the only real way to get it back to the bench is with a trainer card... only to bring it back up with another Rapid Spin. Is that a little counter-intuitive or is it just me?
Would that really work? I think someone might want to ask that in Ask the Masters.
BLACK MAMBA: Yeah, I suppose it is counter-intuitive. But I'm looking at it this way: if opponent snipes my Donphan, I can use the Memory Berry (if the damage is better than the attack), and if opponent hits Gyarados, I can dish out mega-damage for one energy.
It's possible you could pair Donphan with Sharpedo. Then, if opponent hits Donphan, piles up for Memory Berry, or if opponent hits Sharpedo, Rough Skin. Then, free retreat, do it again. Not super-powerful, but super-annoying.
MECHES: Neat idea about the Wormadan!
Maybe run fossils with the deck, so you can just discard them if they aren't KO'd? Eh, I don't know. |
{"Models":{"index.md":[{"SourceFilePath":"index.md","FilePath":"dljpitv1.loj"}],"articles/intro.md":[{"SourceFilePath":"articles/intro.md","FilePath":"pnqm4pg4.jfw"}],"api/index.md":[{"SourceFilePath":"api/index.md","FilePath":"5sh140so.rav"}]}} |
Require Export SfLib.
Require Export HelperFunctions.
Inductive bplustree (b: nat) (X:Type) : Type :=
| bptLeaf : list (nat * X) -> bplustree b X
| bptNode : list (nat * (bplustree b X)) -> bplustree b X
.
Notation "[[ b , X | x , .. , y ]]" := (bptLeaf b X (cons x .. (cons y []) ..)) (at level 100, format
"[[ b , X | '[v ' x , .. , y ']' ]]").
Notation "{{ b , X | f , x , .. , y }}" := (bptNode b X f (cons x .. (cons y []) ..)) (at level 99, format
"{{ b , X | '[v ' '//' f , '//' x , .. , y ']' '//' }}").
Example test := bptLeaf 2 bool [(1, true), (2, false)].
Inductive appears_in_kvl {X:Type} (sk: nat) : list (nat * X) -> Prop :=
| aik_here: forall v l, appears_in_kvl sk ((sk, v)::l)
| aik_later: forall k v l, appears_in_kvl sk l -> appears_in_kvl sk ((k, v)::l).
Inductive kv_appears_in_kvl {X:Type} (sk: nat) (sv: X) : list (nat * X) -> Prop :=
| kv_aik_here: forall l, kv_appears_in_kvl sk sv ((sk, sv)::l)
| kv_aik_later: forall k v l, kv_appears_in_kvl sk sv l -> kv_appears_in_kvl sk sv ((k, v)::l).
Lemma kv_appears_in_kvl_impl_appears_in_kvl: forall (X: Type) (k: nat) (v: X) (l: list (nat * X)),
kv_appears_in_kvl k v l -> appears_in_kvl k l.
Proof.
intros.
induction H; constructor; assumption.
Qed.
Inductive appears_in_tree {X:Type} {b: nat} (sk: nat) : bplustree b X -> Prop :=
| ait_leaf: forall l, appears_in_kvl sk l -> appears_in_tree sk (bptLeaf b X l)
| ait_node_last: forall k1 k2 v1 v2,
appears_in_tree sk v2 -> k2 <= sk ->
appears_in_tree sk (bptNode b X [(k1, v1), (k2, v2)])
| ait_node_here: forall k1 k2 v1 v2 l,
appears_in_tree sk v1 -> k1 <= sk /\ sk < k2 ->
appears_in_tree sk (bptNode b X ((k1, v1)::(k2, v2)::l))
| ait_node_later: forall x k1 k2 v1 v2 l,
appears_in_tree sk (bptNode b X ((k1, v1)::(k2, v2)::l)) ->
k1 <= sk ->
appears_in_tree sk (bptNode b X (x::(k1, v1)::(k2, v2)::l)).
Inductive kv_appears_in_tree {X:Type} {b: nat} (sk: nat) (sv: X) : bplustree b X -> Prop :=
| kv_ait_leaf: forall l, kv_appears_in_kvl sk sv l -> kv_appears_in_tree sk sv (bptLeaf b X l)
| kv_ait_node_last: forall k1 k2 v1 v2,
kv_appears_in_tree sk sv v2 -> k2 <= sk ->
kv_appears_in_tree sk sv (bptNode b X [(k1, v1), (k2, v2)])
| kv_ait_node_here: forall k1 k2 v1 v2 l,
kv_appears_in_tree sk sv v1 -> k1 <= sk /\ sk < k2 ->
kv_appears_in_tree sk sv (bptNode b X ((k1, v1)::(k2, v2)::l))
| kv_ait_node_later: forall x k1 k2 v1 v2 l,
kv_appears_in_tree sk sv (bptNode b X ((k1, v1)::(k2, v2)::l)) ->
k1 <= sk ->
kv_appears_in_tree sk sv (bptNode b X (x::(k1, v1)::(k2, v2)::l)).
Inductive kvl_sorted {X: Type}: list (nat * X) -> Prop :=
kvl_sorted_0 : kvl_sorted []
| kvl_sorted_1 : forall (n: nat) (x: X),
kvl_sorted [(n, x)]
| kvl_sorted_cons : forall (n1 n2: nat) (x1 x2: X) (lst: list (nat * X)),
kvl_sorted ((n2,x2)::lst) ->
blt_nat n1 n2 = true ->
kvl_sorted ((n1,x1)::(n2,x2)::lst).
(* Some props for having a prop apply to all elements in a list *)
Inductive all_values (X : Type) (P : X -> Prop) : list (nat * X) -> Prop :=
| av_empty : all_values X P []
| av_next : forall (x:X) (n: nat) (l: list (nat * X)), all_values X P l -> P x -> all_values X P ((n,x)::l)
.
Inductive all_keys (X : Type) (P : nat -> Prop) : list (nat * X) -> Prop :=
| ak_empty : all_keys X P []
| ak_next : forall (x:X) (n: nat) (l: list (nat * X)), all_keys X P l -> P n -> all_keys X P ((n,x)::l)
.
Inductive all (P : nat -> Prop) : list nat -> Prop :=
| a_empty : all P []
| a_next : forall (n: nat) (l: list nat), all P l -> P n -> all P (n::l)
.
Inductive all_values_eq_prop (X: Type)(P: X -> X -> Prop) : list (nat * X) -> Prop :=
| alep_0 : all_values_eq_prop X P []
| alep_1 : forall (x:X) (n: nat), all_values_eq_prop X P [(n, x)]
| alep_next : forall (x1 x2:X) (n1 n2: nat) l,
all_values_eq_prop X P ((n2, x2) :: l) ->
P x1 x2 ->
all_values_eq_prop X P ((n1, x1) :: (n2, x2) :: l).
(* Some helper functions for checking if a number is above or below a given number *)
Definition below (n: nat) : nat -> Prop :=
fun o => blt_nat o n = true.
Definition below_equal (n: nat) : nat -> Prop :=
fun o => ble_nat o n = true.
Definition between (n m: nat) : nat -> Prop :=
fun o => andb (ble_nat n o) (blt_nat o m) = true.
Definition above (m: nat) : nat -> Prop :=
fun o => ble_nat m o = true.
|
From Undecidability.Synthetic Require Import DecidabilityFacts EnumerabilityFacts ListEnumerabilityFacts.
From Undecidability.Shared Require Import Dec.
Require Import List.
Import ListNotations.
#[local] Coercion dec2bool P (d: dec P) := if d then true else false.
Lemma enumerable_enum {X} {p : X -> Prop} :
enumerable p <-> list_enumerable p.
Proof.
split. eapply enumerable_list_enumerable. eapply list_enumerable_enumerable.
Qed.
Lemma enumerable_disj X (p q : X -> Prop) :
enumerable p -> enumerable q -> enumerable (fun x => p x \/ q x).
Proof.
intros [Lp H] % enumerable_enum [Lq H0] % enumerable_enum.
eapply enumerable_enum.
exists (fix f n := match n with 0 => [] | S n => f n ++ (Lp n) ++ (Lq n) end).
intros x. split.
- intros [H1 | H1].
* eapply H in H1 as [m]. exists (1 + m). cbn.
apply in_or_app. right. apply in_or_app. now left.
* eapply H0 in H1 as [m]. exists (1 + m). cbn.
apply in_or_app. right. apply in_or_app. now right.
- intros [m]. induction m.
* inversion H1.
* apply in_app_iff in H1.
destruct H1 as [?|H1]; [now auto|].
apply in_app_iff in H1.
unfold list_enumerator in *; firstorder easy.
Qed.
Lemma enumerable_conj X (p q : X -> Prop) :
discrete X -> enumerable p -> enumerable q -> enumerable (fun x => p x /\ q x).
Proof.
intros [] % discrete_iff [Lp] % enumerable_enum [Lq] % enumerable_enum.
eapply enumerable_enum.
exists (fix f n := match n with 0 => [] | S n => f n ++ (filter (fun x => Dec (In x (cumul Lq n))) (cumul Lp n)) end).
intros x. split.
+ intros []. eapply (list_enumerator_to_cumul H) in H1 as [m1].
eapply (list_enumerator_to_cumul H0) in H2 as [m2].
exists (1 + m1 + m2). cbn. apply in_or_app. right.
apply filter_In. split.
* eapply cum_ge'; eauto; lia.
* eapply Dec_auto. eapply cum_ge'; eauto; lia.
+ intros [m]. induction m.
* inversion H1.
* apply in_app_iff in H1. destruct H1 as [?|H1]; [now auto|].
apply filter_In in H1. destruct H1 as [? H1].
split.
** eapply (list_enumerator_to_cumul H). eauto.
** destruct (Dec _) in H1; [|easy].
eapply (list_enumerator_to_cumul H0). eauto.
Qed.
Lemma projection X Y (p : X * Y -> Prop) :
enumerable p -> enumerable (fun x => exists y, p (x,y)).
Proof.
intros [f].
exists (fun n => match f n with Some (x, y) => Some x | None => None end).
intros; split.
- intros [y ?]. eapply H in H0 as [n]. exists n. now rewrite H0.
- intros [n ?]. destruct (f n) as [ [] | ] eqn:E; inversion H0; subst.
exists y. eapply H. eauto.
Qed.
Lemma projection' X Y (p : X * Y -> Prop) :
enumerable p -> enumerable (fun y => exists x, p (x,y)).
Proof.
intros [f].
exists (fun n => match f n with Some (x, y) => Some y | None => None end).
intros y; split.
- intros [x ?]. eapply H in H0 as [n]. exists n. now rewrite H0.
- intros [n ?]. destruct (f n) as [ [] | ] eqn:E; inversion H0; subst.
exists x. eapply H. eauto.
Qed.
|
C->>> -----------------------------------> ems_ca_u_lg_sed_wt <<<
c Calls the routines to updates steepest edge weights and possibly
c update dual activities or do CHUZC.
c
subroutine ems_ca_u_lg_sed_wt(
& pv_c_sgn,
& u_du_act,
& vr_in_c,
& du_act,
& pi_v,
& pi_ix,
& ed_wt,
& btran_o_pv_c)
implicit none
include 'EMSV.INC'
include 'EMSPM.INC'
include 'RSMICS.INC'
include 'RSMICOM.INC'
include 'ICTVR.INC'
include 'EMSMSG.INC'
CM IF (emsol_tt .EQ. 1) THEN
C? include 'EMSTT.INC'
CM ENDIF
integer pv_c_sgn
logical u_du_act
integer vr_in_c(-vr_in_c_n_sn:n_c+1)
double precision du_act(0:mx_n_c+n_r)
double precision pi_v(0:n_r)
integer pi_ix(0:n_r)
double precision ed_wt(0:mx_n_c+n_r)
double precision btran_o_pv_c(0:n_r)
double precision rcp_pv
if (vr_t_en_bs .le. 0 .or. vr_t_lv_bs .eq. 0 .or.
& vr_t_en_bs .eq. vr_t_lv_bs) then
if (ems_msg_no_prt_fm .ge. 1) write(ems_li, 9900)
& vr_t_en_bs, vr_t_lv_bs
call ems_msg_wr_li(bug_msg_n)
CM IF (emsol_deb .EQ. 1) THEN
C? call ems_dump
CM ENDIF
go to 7000
endif
ed_wt_o_vr_t_en_bs = ed_wt(vr_t_en_bs)
ed_wt(vr_t_lv_bs) = inf
CM IF (emsol_tt .EQ. 1) THEN
C? if (ems_tt_pc_lvl1) call ems_tt_rec(u_lg_sed_wt_tt, n_bs)
CM ENDIF
if (u_du_act) then
call ems_u_lg_sed_wt_du_act(
& pv_c_sgn,
& vr_t_en_bs,
& vr_in_c,
& du_act,
& pi_v,
& pi_ix,
& ed_wt,
& btran_o_pv_c)
else
call ems_u_lg_sed_wt(
& pv_c_sgn,
& vr_t_en_bs,
& vr_in_c,
& pi_v,
& pi_ix,
& ed_wt,
& btran_o_pv_c)
endif
if (vr_t_lv_bs .ge. mx_n_c) then
c
c Set the weight for the variable which has left the basis.
c
rcp_pv = one/pv
ed_wt(vr_t_lv_bs) = ed_wt_o_vr_t_en_bs*rcp_pv*rcp_pv
endif
CM IF (emsol_tt .EQ. 1) THEN
C? if (ems_tt_pc_lvl1) call ems_tt_rec(-u_lg_sed_wt_tt, n_bs)
CM ENDIF
7000 continue
return
9900 format(
& 'Calling u_lg_sed_wt with vr_t_en_bs, vr_t_lv_bs = ',
& i9, i9)
end
C->>> ----------------------------------------> ems_u_lg_sed_wt <<<
c Updates steepest edge weights for logicals.
c
subroutine ems_u_lg_sed_wt(
& pv_c_sgn,
& vr_t_en_bs,
& vr_in_c,
& pi_v,
& pi_ix,
& ed_wt,
& btran_o_pv_c)
implicit none
include 'EMSV.INC'
include 'EMSPM.INC'
CM IF (emsol_da .EQ. 1) THEN
C? include 'EMSDA.INC'
CM ENDIF
include 'ICTVR.INC'
include 'RLCTVR.INC'
integer pv_c_sgn
integer vr_t_en_bs
integer vr_in_c(-vr_in_c_n_sn:n_c)
integer pi_ix(0:n_r)
double precision pi_v(0:n_r)
double precision ed_wt(0:mx_n_c+n_r)
double precision btran_o_pv_c(0:n_r)
integer vr_n, c_n, ix_n, r_n
integer c_loop_ln
double precision ed_wt_o_vr_t_en_bs, su
ed_wt_o_vr_t_en_bs = ed_wt(vr_t_en_bs)
c_loop_ln = vr_in_c(os_lg_in_c_l_pc_p)
if (pi_ix(0) .gt. n_r .or.
& tbu_r_loop_mode .eq. tbu_r_loop_no .or.
& (tbu_r_loop_mode .eq. tbu_r_loop_poss .and.
& 2*c_loop_ln .le. pi_ix(0))) then
do 10, c_n = 1, vr_in_c(os_lg_in_c_l_pc_p)
vr_n = vr_in_c(c_n)
r_n = vr_n - mx_n_c
CM IF (emsol_da .EQ. 1) THEN
C? if (pi_v(r_n) .ne. zero) then
C? su_n_u_ed_wt_en = su_n_u_ed_wt_en + 1
C? if (abs(pi_v(r_n)) .le. u_ed_wt_ze)
C? & su_n_u_ed_wt_ze = su_n_u_ed_wt_ze + 1
C? endif
CM ENDIF
if (pi_v(r_n) .eq. zero) goto 10
if (abs(pi_v(r_n)) .le. u_ed_wt_ze) goto 10
su = pi_v(r_n)*ed_wt_o_vr_t_en_bs +
& pv_c_sgn*btran_o_pv_c(r_n)
ed_wt(vr_n) = ed_wt(vr_n) + pi_v(r_n)*su
10 continue
else
do 20, ix_n = 1, pi_ix(0)
r_n = pi_ix(ix_n)
CM IF (emsol_da .EQ. 1) THEN
C? su_n_u_ed_wt_en = su_n_u_ed_wt_en + 1
C? if (abs(pi_v(r_n)) .le. u_ed_wt_ze)
C? & su_n_u_ed_wt_ze = su_n_u_ed_wt_ze + 1
CM ENDIF
if (abs(pi_v(r_n)) .le. u_ed_wt_ze) goto 20
su = pi_v(r_n)*ed_wt_o_vr_t_en_bs +
& pv_c_sgn*btran_o_pv_c(r_n)
vr_n = mx_n_c + r_n
ed_wt(vr_n) = ed_wt(vr_n) + pi_v(r_n)*su
20 continue
endif
return
end
C->>> -----------------------------> ems_u_lg_sed_wt_du_act <<<
c Updates steepest edge weights and dual activities for logicals.
c
subroutine ems_u_lg_sed_wt_du_act(
& pv_c_sgn,
& vr_t_en_bs,
& vr_in_c,
& du_act,
& pi_v,
& pi_ix,
& ed_wt,
& btran_o_pv_c)
implicit none
include 'EMSV.INC'
include 'EMSPM.INC'
CM IF (emsol_da .EQ. 1) THEN
C? include 'EMSDA.INC'
CM ENDIF
include 'ICTVR.INC'
include 'RLCTVR.INC'
integer pv_c_sgn
integer vr_t_en_bs
integer vr_in_c(-vr_in_c_n_sn:n_c)
integer pi_ix(0:n_r)
double precision du_act(0:mx_n_c+n_r)
double precision pi_v(0:n_r)
double precision ed_wt(0:mx_n_c+n_r)
double precision btran_o_pv_c(0:n_r)
integer vr_n, c_n, ix_n, r_n
integer c_loop_ln
double precision pi_v_mu_1, pi_v_mu_2, su
c
c When btran_o_pv_c is negated, negating pi_v_mu_1 then negates
c pi_v_mu_2. Hence the non-btran_o_pv_c contribution to su is
c negated but the product pi_v_mu_1*su has the right sign.
c
pi_v_mu_1 = pv_c_sgn/du_act(vr_t_en_bs)
pi_v_mu_2 = ed_wt(vr_t_en_bs)*pi_v_mu_1
c
c Update the weights (and costs) for the nonbasic variables. This
c does not give the correct weight for the variable which has just
c left the basis---this is assigned at the end of the routine---but
c it does give the correct reduced cost for this variable. This must
c be done here. If the cost of this variable does not change then
c its reduced cost is pi_rhs_v_in_pv_r*rcp_pv.
c However, if the variable becomes feasible as it leaves the basis
c then its reduced cost is not pi_rhs_v_in_pv_r*rcp_pv because
c its objective coefficient changes---by an amount delta. In this
c case the reduced cost is delta + pi_rhs_v_in_pv_r*rcp_pv.
c This is achieved by the calling routine initialising the reduced
c cost to zero or delta accordingly so that the correct updated
c reduced cost will be obtained by adding pi_v(pv_r_n)---which is
c rcp_pv*pi_rhs_v_in_pv_r.
c
c_loop_ln = vr_in_c(os_lg_in_c_l_pc_p)
if (pi_ix(0) .gt. n_r .or.
& tbu_r_loop_mode .eq. tbu_r_loop_no .or.
& (tbu_r_loop_mode .eq. tbu_r_loop_poss .and.
& 2*c_loop_ln .le. pi_ix(0))) then
do 10, c_n = 1, vr_in_c(os_lg_in_c_l_pc_p)
vr_n = vr_in_c(c_n)
r_n = vr_n - mx_n_c
CM IF (emsol_da .EQ. 1) THEN
C? if (pi_v(r_n) .ne. zero) then
C? su_n_u_ed_wt_en = su_n_u_ed_wt_en + 1
C? if (abs(pi_v(r_n)) .le. u_ed_wt_ze)
C? & su_n_u_ed_wt_ze = su_n_u_ed_wt_ze + 1
C? endif
CM ENDIF
if (pi_v(r_n) .eq. zero) goto 10
if (abs(pi_v(r_n)) .le. u_ed_wt_ze) goto 10
su = pi_v(r_n)*pi_v_mu_2 + btran_o_pv_c(r_n)
ed_wt(vr_n) = ed_wt(vr_n) + pi_v(r_n)*pi_v_mu_1*su
du_act(vr_n) = du_act(vr_n) - pi_v(r_n)
10 continue
else
do 20, ix_n = 1, pi_ix(0)
r_n = pi_ix(ix_n)
CM IF (emsol_da .EQ. 1) THEN
C? su_n_u_ed_wt_en = su_n_u_ed_wt_en + 1
C? if (abs(pi_v(r_n)) .le. u_ed_wt_ze)
C? & su_n_u_ed_wt_ze = su_n_u_ed_wt_ze + 1
CM ENDIF
if (abs(pi_v(r_n)) .le. u_ed_wt_ze) goto 20
su = pi_v(r_n)*pi_v_mu_2 + btran_o_pv_c(r_n)
vr_n = mx_n_c + r_n
ed_wt(vr_n) = ed_wt(vr_n) + pi_v(r_n)*pi_v_mu_1*su
du_act(vr_n) = du_act(vr_n) - pi_v(r_n)
20 continue
endif
return
end
C->>> ------------------------------> ems_perm_ca_u_lg_sed_wt <<<
c Calls the routines to updates steepest edge weights and possibly
c update dual activities or do CHUZC.
c
subroutine ems_perm_ca_u_lg_sed_wt(
& pv_c_sgn,
& u_du_act,
& vr_in_c,
& du_act,
& pi_v,
& pi_ix,
& ed_wt,
& btran_o_pv_c,
& og_t_nw_perm,
& nw_t_og_perm)
implicit none
include 'EMSV.INC'
include 'EMSPM.INC'
include 'RSMICS.INC'
include 'RSMICOM.INC'
include 'ICTVR.INC'
include 'EMSMSG.INC'
CM IF (emsol_tt .EQ. 1) THEN
C? include 'EMSTT.INC'
CM ENDIF
integer pv_c_sgn
logical u_du_act
integer vr_in_c(-vr_in_c_n_sn:n_c+1)
integer pi_ix(0:n_r)
integer og_t_nw_perm(0:n_r)
integer nw_t_og_perm(0:n_r)
double precision du_act(0:mx_n_c+n_r)
double precision pi_v(0:n_r)
double precision ed_wt(0:mx_n_c+n_r)
double precision btran_o_pv_c(0:n_r)
double precision rcp_pv
if (vr_t_en_bs .le. 0 .or. vr_t_lv_bs .eq. 0 .or.
& vr_t_en_bs .eq. vr_t_lv_bs) then
if (ems_msg_no_prt_fm .ge. 1) write(ems_li, 9900)
& vr_t_en_bs, vr_t_lv_bs
call ems_msg_wr_li(warn_msg_n)
CM IF (emsol_deb .EQ. 1) THEN
C? call ems_dump
CM ENDIF
go to 7000
endif
ed_wt_o_vr_t_en_bs = ed_wt(vr_t_en_bs)
ed_wt(vr_t_lv_bs) = inf
CM IF (emsol_tt .EQ. 1) THEN
C? if (ems_tt_pc_lvl1) call ems_tt_rec(u_lg_sed_wt_tt, n_bs)
CM ENDIF
if (u_du_act) then
call ems_perm_u_lg_sed_wt_du_act(
& pv_c_sgn,
& vr_t_en_bs,
& vr_in_c,
& du_act,
& pi_v,
& pi_ix,
& ed_wt,
& btran_o_pv_c,
& og_t_nw_perm,
& nw_t_og_perm)
else
call ems_perm_u_lg_sed_wt(
& pv_c_sgn,
& vr_t_en_bs,
& vr_in_c,
& pi_v,
& pi_ix,
& ed_wt,
& btran_o_pv_c,
& og_t_nw_perm,
& nw_t_og_perm)
endif
if (vr_t_lv_bs .ge. mx_n_c) then
c
c Set the weight for the variable which has left the basis.
c
rcp_pv = one/pv
ed_wt(vr_t_lv_bs) = ed_wt_o_vr_t_en_bs*rcp_pv*rcp_pv
endif
CM IF (emsol_tt .EQ. 1) THEN
C? if (ems_tt_pc_lvl1) call ems_tt_rec(-u_lg_sed_wt_tt, n_bs)
CM ENDIF
7000 continue
return
9900 format(
& 'Calling u_lg_sed_wt with vr_t_en_bs, vr_t_lv_bs = ',
& i9, i9)
end
C->>> -----------------------------------> ems_perm_u_lg_sed_wt <<<
c Updates steepest edge weights for logicals.
c
subroutine ems_perm_u_lg_sed_wt(
& pv_c_sgn,
& vr_t_en_bs,
& vr_in_c,
& pi_v,
& pi_ix,
& ed_wt,
& btran_o_pv_c,
& og_t_nw_perm,
& nw_t_og_perm)
implicit none
include 'EMSV.INC'
include 'EMSPM.INC'
CM IF (emsol_da .EQ. 1) THEN
C? include 'EMSDA.INC'
CM ENDIF
include 'ICTVR.INC'
include 'RLCTVR.INC'
integer pv_c_sgn
integer vr_t_en_bs
integer vr_in_c(-vr_in_c_n_sn:n_c)
integer pi_ix(0:n_r)
integer og_t_nw_perm(0:n_r)
integer nw_t_og_perm(0:n_r)
double precision pi_v(0:n_r)
double precision ed_wt(0:mx_n_c+n_r)
double precision btran_o_pv_c(0:n_r)
integer vr_n, nw_r_n, c_n, ix_n, r_n, og_r_n
integer c_loop_ln
double precision ed_wt_o_vr_t_en_bs, su
ed_wt_o_vr_t_en_bs = ed_wt(vr_t_en_bs)
c_loop_ln = vr_in_c(os_lg_in_c_l_pc_p)
if (pi_ix(0) .gt. n_r .or.
& tbu_r_loop_mode .eq. tbu_r_loop_no .or.
& (tbu_r_loop_mode .eq. tbu_r_loop_poss .and.
& 2*c_loop_ln .le. pi_ix(0))) then
do 10, c_n = 1, vr_in_c(os_lg_in_c_l_pc_p)
vr_n = vr_in_c(c_n)
r_n = vr_n - mx_n_c
nw_r_n = og_t_nw_perm(r_n)
CM IF (emsol_da .EQ. 1) THEN
C? if (pi_v(nw_r_n) .ne. zero) then
C? su_n_u_ed_wt_en = su_n_u_ed_wt_en + 1
C? if (abs(pi_v(nw_r_n)) .le. u_ed_wt_ze)
C? & su_n_u_ed_wt_ze = su_n_u_ed_wt_ze + 1
C? endif
CM ENDIF
if (pi_v(nw_r_n) .eq. zero) goto 10
if (abs(pi_v(nw_r_n)) .le. u_ed_wt_ze) goto 10
su = pi_v(nw_r_n)*ed_wt_o_vr_t_en_bs +
& pv_c_sgn*btran_o_pv_c(r_n)
ed_wt(vr_n) = ed_wt(vr_n) + pi_v(nw_r_n)*su
10 continue
else
do 20, ix_n = 1, pi_ix(0)
r_n = pi_ix(ix_n)
og_r_n = nw_t_og_perm(r_n)
CM IF (emsol_da .EQ. 1) THEN
C? su_n_u_ed_wt_en = su_n_u_ed_wt_en + 1
C? if (abs(pi_v(nw_r_n)) .le. u_ed_wt_ze)
C? & su_n_u_ed_wt_ze = su_n_u_ed_wt_ze + 1
CM ENDIF
if (abs(pi_v(r_n)) .le. u_ed_wt_ze) goto 20
su = pi_v(r_n)*ed_wt_o_vr_t_en_bs +
& pv_c_sgn*btran_o_pv_c(og_r_n)
vr_n = mx_n_c + og_r_n
ed_wt(vr_n) = ed_wt(vr_n) + pi_v(r_n)*su
20 continue
endif
return
end
C->>> ------------------------> ems_perm_u_lg_sed_wt_du_act <<<
c Updates steepest edge weights and dual activities for logicals.
c
subroutine ems_perm_u_lg_sed_wt_du_act(
& pv_c_sgn,
& vr_t_en_bs,
& vr_in_c,
& du_act,
& pi_v,
& pi_ix,
& ed_wt,
& btran_o_pv_c,
& og_t_nw_perm,
& nw_t_og_perm)
implicit none
include 'EMSV.INC'
include 'EMSPM.INC'
CM IF (emsol_da .EQ. 1) THEN
C? include 'EMSDA.INC'
CM ENDIF
include 'ICTVR.INC'
include 'RLCTVR.INC'
integer pv_c_sgn
integer vr_t_en_bs
integer vr_in_c(-vr_in_c_n_sn:n_c)
integer pi_ix(0:n_r)
integer og_t_nw_perm(0:n_r)
integer nw_t_og_perm(0:n_r)
double precision du_act(0:mx_n_c+n_r)
double precision pi_v(0:n_r)
double precision ed_wt(0:mx_n_c+n_r)
double precision btran_o_pv_c(0:n_r)
integer vr_n, nw_r_n, c_n, ix_n, r_n, og_r_n
integer c_loop_ln
double precision pi_v_mu_1, pi_v_mu_2, su
pi_v_mu_1 = pv_c_sgn/du_act(vr_t_en_bs)
pi_v_mu_2 = ed_wt(vr_t_en_bs)*pi_v_mu_1
c_loop_ln = vr_in_c(os_lg_in_c_l_pc_p)
if (pi_ix(0) .gt. n_r .or.
& tbu_r_loop_mode .eq. tbu_r_loop_no .or.
& (tbu_r_loop_mode .eq. tbu_r_loop_poss .and.
& 2*c_loop_ln .le. pi_ix(0))) then
do 10 c_n = 1, vr_in_c(os_lg_in_c_l_pc_p)
vr_n = vr_in_c(c_n)
r_n = vr_n - mx_n_c
nw_r_n = og_t_nw_perm(r_n)
CM IF (emsol_da .EQ. 1) THEN
C? if (pi_v(nw_r_n) .ne. zero) then
C? su_n_u_ed_wt_en = su_n_u_ed_wt_en + 1
C? if (abs(pi_v(nw_r_n)) .le. u_ed_wt_ze)
C? & su_n_u_ed_wt_ze = su_n_u_ed_wt_ze + 1
C? endif
CM ENDIF
if (pi_v(nw_r_n) .eq. zero) goto 10
if (abs(pi_v(nw_r_n)) .le. u_ed_wt_ze) goto 10
su = pi_v(nw_r_n)*pi_v_mu_2 + btran_o_pv_c(r_n)
ed_wt(vr_n) = ed_wt(vr_n) + pi_v(nw_r_n)*pi_v_mu_1*su
du_act(vr_n) = du_act(vr_n) - pi_v(nw_r_n)
10 continue
else
do 20, ix_n = 1, pi_ix(0)
r_n = pi_ix(ix_n)
og_r_n = nw_t_og_perm(r_n)
CM IF (emsol_da .EQ. 1) THEN
C? su_n_u_ed_wt_en = su_n_u_ed_wt_en + 1
C? if (abs(pi_v(nw_r_n)) .le. u_ed_wt_ze)
C? & su_n_u_ed_wt_ze = su_n_u_ed_wt_ze + 1
CM ENDIF
if (abs(pi_v(r_n)) .le. u_ed_wt_ze) goto 20
su = pi_v(r_n)*pi_v_mu_2 + btran_o_pv_c(og_r_n)
vr_n = mx_n_c + og_r_n
ed_wt(vr_n) = ed_wt(vr_n) + pi_v(r_n)*pi_v_mu_1*su
du_act(vr_n) = du_act(vr_n) - pi_v(r_n)
20 continue
endif
return
end
C->>> ---------------------------------------> ems_ca_u_struc_sed_wt <<<
c Calls the routines to updates steepest edge weights and possibly
c update dual activities or do CHUZC.
c
subroutine ems_ca_u_struc_sed_wt(
& pv_c_sgn,
& u_du_act,
& vr_in_c,
& du_act,
& tbu_r_v,
& tbu_r_ix,
& ed_wt,
& r_v,
& r_ix,
& c_sa,
& btran_o_pv_c)
implicit none
include 'EMSV.INC'
include 'EMSPM.INC'
include 'RSMICS.INC'
include 'RSMICOM.INC'
include 'ICTVR.INC'
include 'EMSMSG.INC'
CM IF (emsol_tt .EQ. 1) THEN
C? include 'EMSTT.INC'
CM ENDIF
integer pv_c_sgn
logical u_du_act
integer vr_in_c(-vr_in_c_n_sn:n_c+1)
integer r_ix(0:n_a_el), c_sa(0:n_c+1)
double precision du_act(0:mx_n_c+n_r)
double precision tbu_r_v(0:mx_n_c+n_r)
integer tbu_r_ix(0:n_c)
double precision ed_wt(0:mx_n_c+n_r)
double precision r_v(0:n_a_el)
double precision btran_o_pv_c(0:n_r)
double precision rcp_pv
integer struc_in_c_l_pc_p_p1, sv_vr_in_c, r_n
double precision sv_tbu_r_v
if (vr_t_en_bs .le. 0 .or. vr_t_lv_bs .eq. 0 .or.
& vr_t_en_bs .eq. vr_t_lv_bs) then
if (ems_msg_no_prt_fm .ge. 1) write(ems_li, 9900)
& vr_t_en_bs, vr_t_lv_bs
call ems_msg_wr_li(warn_msg_n)
CM IF (emsol_deb .EQ. 1) THEN
C? call ems_dump
CM ENDIF
go to 7000
endif
ed_wt_o_vr_t_en_bs = ed_wt(vr_t_en_bs)
CM IF (emsol_tt .EQ. 1) THEN
C? if (ems_tt_pc_lvl1) call ems_tt_rec(u_struc_sed_wt_tt, n_bs)
CM ENDIF
if (u_du_act) then
if (iand(asm_msk, asm_u_sed_wt) .ne. 0) then
struc_in_c_l_pc_p_p1 = vr_in_c(os_struc_in_c_l_pc_p) + 1
sv_vr_in_c = vr_in_c(struc_in_c_l_pc_p_p1)
vr_in_c(struc_in_c_l_pc_p_p1) = 0
sv_tbu_r_v = tbu_r_v(0)
tbu_r_v(0) = 1.0d0
CM IF (emsol_asm .EQ. 1) THEN
C? call ems_core_u_struc_sed_wt(
C? & pv_c_sgn,
C? & vr_t_en_bs,
C? & vr_in_c,
C? & du_act,
C? & tbu_r_v,
C? & ed_wt,
C? & r_v,
C? & r_ix,
C? & c_sa,
C? & btran_o_pv_c)
CM ELSE
call ems_u_struc_sed_wt_du_act(
& pv_c_sgn,
& vr_in_c,
& du_act,
& tbu_r_v,
& tbu_r_ix,
& ed_wt,
& r_v,
& r_ix,
& c_sa,
& btran_o_pv_c)
CM ENDIF
vr_in_c(struc_in_c_l_pc_p_p1) = sv_vr_in_c
tbu_r_v(0) = sv_tbu_r_v
else
call ems_u_struc_sed_wt_du_act(
& pv_c_sgn,
& vr_in_c,
& du_act,
& tbu_r_v,
& tbu_r_ix,
& ed_wt,
& r_v,
& r_ix,
& c_sa,
& btran_o_pv_c)
endif
else
call ems_u_struc_sed_wt(
& pv_c_sgn,
& vr_in_c,
& tbu_r_v,
& tbu_r_ix,
& ed_wt,
& r_v,
& r_ix,
& c_sa,
& btran_o_pv_c)
endif
if (vr_t_lv_bs .le. n_c) then
c
c Set the weight for the variable which has left the basis.
c
rcp_pv = one/pv
ed_wt(vr_t_lv_bs) = ed_wt_o_vr_t_en_bs*rcp_pv*rcp_pv
endif
c
c Zero the vector which held the BTRANned pivotal column.
c
do 10, r_n = 1, n_r
btran_o_pv_c(r_n) = zero
10 continue
if (iand(ck_msk, ze_a_ck_bt) .ne. 0)
& call ems_ck_ze_rl_a(n_c, tbu_r_v)
CM IF (emsol_tt .EQ. 1) THEN
C? if (ems_tt_pc_lvl1) call ems_tt_rec(-u_struc_sed_wt_tt, n_bs)
CM ENDIF
7000 continue
return
9900 format(
& 'Calling u_struc_sed_wt with vr_t_en_bs, vr_t_lv_bs = ',
& i9, i9)
end
C->>> -------------------------------------> ems_u_struc_sed_wt <<<
c Updates steepest edge weights for structurals.
c
subroutine ems_u_struc_sed_wt(
& pv_c_sgn,
& vr_in_c,
& tbu_r_v,
& tbu_r_ix,
& ed_wt,
& r_v,
& r_ix,
& c_sa,
& btran_o_pv_c)
implicit none
include 'EMSV.INC'
include 'EMSPM.INC'
CM IF (emsol_da .EQ. 1) THEN
C? include 'EMSDA.INC'
CM ENDIF
include 'ICTVR.INC'
include 'RSMICOM.INC'
include 'RLCTVR.INC'
integer pv_c_sgn
integer vr_in_c(-vr_in_c_n_sn:n_c)
integer r_ix(0:n_a_el), c_sa(0:n_c+1)
double precision tbu_r_v(0:mx_n_c+n_r)
integer tbu_r_ix(0:n_c)
double precision ed_wt(0:mx_n_c+n_r)
double precision r_v(0:n_a_el)
double precision btran_o_pv_c(0:n_r)
integer c_loop_ln, vr_n, c_n, el_n, ix_n
double precision su
integer tbu_r_n_nz, tbu_r_n_c
ed_wt_o_vr_t_en_bs = ed_wt(vr_t_en_bs)
c_loop_ln =
& vr_in_c(os_struc_in_c_l_pc_p) -
& vr_in_c(os_struc_in_c_f_p_m1)
tbu_r_n_c = c_loop_ln
if (tbu_r_ix(0) .gt. n_c .or.
& tbu_r_loop_mode .eq. tbu_r_loop_no .or.
& (tbu_r_loop_mode .eq. tbu_r_loop_poss .and.
& 2*c_loop_ln .le. tbu_r_ix(0))) then
tbu_r_n_nz = 0
do 20 c_n = vr_in_c(os_struc_in_c_f_p_m1) + 1,
& vr_in_c(os_struc_in_c_l_pc_p)
vr_n = vr_in_c(c_n)
CM IF (emsol_da .EQ. 1) THEN
C? if (tbu_r_v(vr_n) .ne. zero) then
C? su_n_u_ed_wt_en = su_n_u_ed_wt_en + 1
C? if (abs(tbu_r_v(vr_n)) .le. u_ed_wt_ze)
C? & su_n_u_ed_wt_ze = su_n_u_ed_wt_ze + 1
C? endif
CM ENDIF
if (tbu_r_v(vr_n) .eq. zero) goto 20
tbu_r_n_nz = tbu_r_n_nz + 1
if (abs(tbu_r_v(vr_n)) .le. u_ed_wt_ze) goto 15
su = pv_c_sgn*tbu_r_v(vr_n)*ed_wt_o_vr_t_en_bs
do 10 el_n = c_sa(vr_n), c_sa(vr_n+1)-1
su = su + r_v(el_n)*btran_o_pv_c(r_ix(el_n))
10 continue
ed_wt(vr_n) = ed_wt(vr_n) + tbu_r_v(vr_n)*pv_c_sgn*su
15 continue
tbu_r_v(vr_n) = zero
20 continue
else
tbu_r_n_nz = tbu_r_ix(0)
do 120, ix_n = 1, tbu_r_ix(0)
vr_n = tbu_r_ix(ix_n)
CM IF (emsol_da .EQ. 1) THEN
C? if (tbu_r_v(vr_n) .ne. zero) then
C? su_n_u_ed_wt_en = su_n_u_ed_wt_en + 1
C? if (abs(tbu_r_v(vr_n)) .le. u_ed_wt_ze)
C? & su_n_u_ed_wt_ze = su_n_u_ed_wt_ze + 1
C? endif
CM ENDIF
if (tbu_r_v(vr_n) .eq. zero) goto 120
if (abs(tbu_r_v(vr_n)) .le. u_ed_wt_ze) goto 115
su = pv_c_sgn*tbu_r_v(vr_n)*ed_wt_o_vr_t_en_bs
do 110 el_n = c_sa(vr_n), c_sa(vr_n+1)-1
su = su + r_v(el_n)*btran_o_pv_c(r_ix(el_n))
110 continue
ed_wt(vr_n) = ed_wt(vr_n) + tbu_r_v(vr_n)*pv_c_sgn*su
115 continue
tbu_r_v(vr_n) = zero
120 continue
endif
c
c Get the density of the tableau row (if required).
c
if (tbu_r_dse .lt. zero) then
if (tbu_r_n_c .le. 0) then
c
c This situation---no nonbasic structurals---is pretty unlikely.
c Setting tbu_r_dse to an illegal value (rather than 0 or 1) seems
c sensible.
c
tbu_r_dse = two
else
tbu_r_dse = float(tbu_r_n_nz)/float(tbu_r_n_c)
endif
endif
return
end
C->>> --------------------------> ems_u_struc_sed_wt_du_act <<<
c Updates steepest edge weights and dual activities for structurals.
c
subroutine ems_u_struc_sed_wt_du_act(
& pv_c_sgn,
& vr_in_c,
& du_act,
& tbu_r_v,
& tbu_r_ix,
& ed_wt,
& r_v,
& r_ix,
& c_sa,
& btran_o_pv_c)
implicit none
include 'EMSV.INC'
include 'EMSPM.INC'
CM IF (emsol_da .EQ. 1) THEN
C? include 'EMSDA.INC'
CM ENDIF
include 'ICTVR.INC'
include 'RSMICOM.INC'
include 'RLCTVR.INC'
integer pv_c_sgn
integer vr_in_c(-vr_in_c_n_sn:n_c)
integer r_ix(0:n_a_el), c_sa(0:n_c+1)
double precision du_act(0:mx_n_c+n_r)
double precision tbu_r_v(0:mx_n_c+n_r)
integer tbu_r_ix(0:n_c)
double precision ed_wt(0:mx_n_c+n_r)
double precision r_v(0:n_a_el)
double precision btran_o_pv_c(0:n_r)
integer c_loop_ln, vr_n, c_n, el_n, ix_n
double precision tbu_r_v_mu_1, tbu_r_v_mu_2, su
integer tbu_r_n_nz, tbu_r_n_c
tbu_r_v_mu_1 = pv_c_sgn/du_act(vr_t_en_bs)
tbu_r_v_mu_2 = ed_wt(vr_t_en_bs)*tbu_r_v_mu_1
c_loop_ln =
& vr_in_c(os_struc_in_c_l_pc_p) -
& vr_in_c(os_struc_in_c_f_p_m1)
tbu_r_n_c = c_loop_ln
if (tbu_r_ix(0) .gt. n_c .or.
& tbu_r_loop_mode .eq. tbu_r_loop_no .or.
& (tbu_r_loop_mode .eq. tbu_r_loop_poss .and.
& 2*c_loop_ln .le. tbu_r_ix(0))) then
tbu_r_n_nz = 0
do 20 c_n = vr_in_c(os_struc_in_c_f_p_m1) + 1,
& vr_in_c(os_struc_in_c_l_pc_p)
vr_n = vr_in_c(c_n)
CM IF (emsol_da .EQ. 1) THEN
C? if (tbu_r_v(vr_n) .ne. zero) then
C? su_n_u_ed_wt_en = su_n_u_ed_wt_en + 1
C? if (abs(tbu_r_v(vr_n)) .le. u_ed_wt_ze)
C? & su_n_u_ed_wt_ze = su_n_u_ed_wt_ze + 1
C? endif
CM ENDIF
if (tbu_r_v(vr_n) .eq. zero) goto 20
tbu_r_n_nz = tbu_r_n_nz + 1
if (abs(tbu_r_v(vr_n)) .le. u_ed_wt_ze) goto 15
su = tbu_r_v(vr_n)*tbu_r_v_mu_2
do 10 el_n = c_sa(vr_n), c_sa(vr_n+1)-1
su = su + r_v(el_n)*btran_o_pv_c(r_ix(el_n))
10 continue
ed_wt(vr_n) = ed_wt(vr_n) + tbu_r_v(vr_n)*tbu_r_v_mu_1*su
du_act(vr_n) = du_act(vr_n) + tbu_r_v(vr_n)
15 continue
tbu_r_v(vr_n) = zero
20 continue
else
tbu_r_n_nz = tbu_r_ix(0)
do 120, ix_n = 1, tbu_r_ix(0)
vr_n = tbu_r_ix(ix_n)
CM IF (emsol_da .EQ. 1) THEN
C? if (tbu_r_v(vr_n) .ne. zero) then
C? su_n_u_ed_wt_en = su_n_u_ed_wt_en + 1
C? if (abs(tbu_r_v(vr_n)) .le. u_ed_wt_ze)
C? & su_n_u_ed_wt_ze = su_n_u_ed_wt_ze + 1
C? endif
CM ENDIF
if (tbu_r_v(vr_n) .eq. zero) goto 120
if (abs(tbu_r_v(vr_n)) .le. u_ed_wt_ze) goto 115
su = tbu_r_v(vr_n)*tbu_r_v_mu_2
do 110 el_n = c_sa(vr_n), c_sa(vr_n+1)-1
su = su + r_v(el_n)*btran_o_pv_c(r_ix(el_n))
110 continue
ed_wt(vr_n) = ed_wt(vr_n) + tbu_r_v(vr_n)*tbu_r_v_mu_1*su
du_act(vr_n) = du_act(vr_n) + tbu_r_v(vr_n)
115 continue
tbu_r_v(vr_n) = zero
120 continue
endif
c
c Get the density of the tableau row (if required).
c
if (tbu_r_dse .lt. zero) then
if (tbu_r_n_c .le. 0) then
c
c This situation---no nonbasic structurals---is pretty unlikely.
c Setting tbu_r_dse to an illegal value (rather than 0 or 1) seems
c sensible.
c
tbu_r_dse = two
else
tbu_r_dse = float(tbu_r_n_nz)/float(tbu_r_n_c)
endif
endif
return
end
C->>> --------------------------------------------> ems_iz_lg_sed_wt <<<
c Initialises the steepest edge weights (as if) for a logical basis.
c
subroutine ems_iz_lg_sed_wt(
& vr_in_r, vr_in_c,
& r_v, c_sa, ed_wt)
implicit none
include 'EMSV.INC'
include 'EMSPM.INC'
include 'ICTVR.INC'
include 'EMSMSG.INC'
integer vr_in_r(0:n_r)
integer vr_in_c(-vr_in_c_n_sn:n_c)
integer c_sa(0:n_c+1)
double precision r_v(0:n_a_el), ed_wt(0:mx_n_c+n_r)
integer r_n, c_n, vr_n, el_n
logical ed_wt_er
ed_wt_er = .false.
do 5, r_n = 1, n_r
vr_n = vr_in_r(r_n)
ed_wt(vr_n) = zero
if (vr_n .le. n_c) ed_wt_er = .true.
5 continue
if (ed_wt_er) then
if (ems_msg_no_prt_fm .ge. 1) write(ems_li, 9000)
call ems_msg_wr_li(warn_msg_n)
endif
do 10, c_n = 1, vr_in_c(os_lg_in_c_l_pc_p)
vr_n = vr_in_c(c_n)
ed_wt(vr_n) = one
c ed_wt(vr_n) = half
c
c Surely wrong: corrected by JAJH 13/02/97: Error not noticed since
c logical basis means no logicals in pricing list
c
10 continue
do 30, c_n = vr_in_c(os_struc_in_c_f_p_m1) + 1,
& vr_in_c(os_struc_in_c_l_pc_p)
vr_n = vr_in_c(c_n)
ed_wt(vr_n) = one
do 20 el_n = c_sa(vr_n), c_sa(vr_n+1)-1
ed_wt(vr_n) = ed_wt(vr_n) + r_v(el_n)*r_v(el_n)
20 continue
ed_wt(vr_n) = ed_wt(vr_n)*half
30 continue
c
c Indicate that the edge weight information is correct.
c
ml_da_st_msk = ior(ml_da_st_msk, ml_da_st_ed_wt)
return
9000 format('Initialising steepest edge weights for a',
& ' non-logical basis as if it were logical')
end
C->>> ------------------------------------------> ems_iz_sed_wt <<<
c Initialises the steepest edge weights for a general basis.
c
subroutine ems_iz_sed_wt(
& vr_in_r, vr_in_c,
& r_v, c_sa, ed_wt, ds, is)
implicit none
include 'EMSV.INC'
include 'EMSPM.INC'
include 'EMSMMGR.INC'
include 'EMSMEM.INC'
include 'EMSP.INC'
include 'ICTVR.INC'
include 'EMSMSG.INC'
integer vr_in_r(0:n_r)
integer vr_in_c(-vr_in_c_n_sn:n_c)
integer c_sa(0:n_c+1)
integer is(0:is_n_en_m1)
double precision r_v(0:n_a_el), ed_wt(0:mx_n_c+n_r)
double precision ds(0:ds_n_en_m1)
double precision ems_scpr
integer r_n, c_n, vr_n
integer rl_wk_a_ix
integer i_wk_a_ix
c
c Check whether the basis is actually logical.
c
do 5, r_n = 1, n_r
vr_n = vr_in_r(r_n)
ed_wt(vr_n) = zero
if (vr_n .le. n_c) goto 10
5 continue
c
c The basis is logical so calculate the weights from the constraint
c matrix columns.
c
call ems_iz_lg_sed_wt(vr_in_r, vr_in_c, r_v, c_sa, ed_wt)
goto 7000
10 continue
c
c Initialising steepest edge weights is expensive!!
c
if (ems_msg_no_prt_fm .ge. 1) write(ems_li, 9000)
call ems_msg_wr_li(info_msg_n)
call ems_g_rsmi_rl_wk_a_ix(rl_wk_a_ix)
if (rl_wk_a_ix .lt. 0) goto 8000
call ems_g_rsmi_i_wk_a_ix(i_wk_a_ix)
if (i_wk_a_ix .lt. 0) goto 8000
do 20, c_n = 1, vr_in_c(os_lg_in_c_l_pc_p)
vr_n = vr_in_c(c_n)
do 15, r_n = 1, n_r
ds(p_rsmi_rl_wk_a(rl_wk_a_ix)+r_n) = zero
15 continue
if (sto_ftran_ix .eq. sto_ix_y) then
is(p_rsmi_i_wk_a(i_wk_a_ix)) = 0
else
is(p_rsmi_i_wk_a(i_wk_a_ix)) = n_r+1
endif
call ems_g_rhs(1, vr_n,
& ds(p_rsmi_rl_wk_a(rl_wk_a_ix)),
& is(p_rsmi_i_wk_a(i_wk_a_ix)),
& ds, is)
call ems_ftran(
& ds(p_rsmi_rl_wk_a(rl_wk_a_ix)),
& is(p_rsmi_i_wk_a(i_wk_a_ix)),
& ds, is)
ed_wt(vr_n) = ems_scpr(one,
& ds(p_rsmi_rl_wk_a(rl_wk_a_ix)),
& ds(p_rsmi_rl_wk_a(rl_wk_a_ix)), n_r)*half
20 continue
do 30, c_n = vr_in_c(os_struc_in_c_f_p_m1) + 1,
& vr_in_c(os_struc_in_c_l_pc_p)
vr_n = vr_in_c(c_n)
do 25, r_n = 1, n_r
ds(p_rsmi_rl_wk_a(rl_wk_a_ix)+r_n) = zero
25 continue
if (sto_ftran_ix .eq. sto_ix_y) then
is(p_rsmi_i_wk_a(i_wk_a_ix)) = 0
else
is(p_rsmi_i_wk_a(i_wk_a_ix)) = n_r+1
endif
call ems_g_rhs(1, vr_n,
& ds(p_rsmi_rl_wk_a(rl_wk_a_ix)),
& is(p_rsmi_i_wk_a(i_wk_a_ix)),
& ds, is)
call ems_ftran(
& ds(p_rsmi_rl_wk_a(rl_wk_a_ix)),
& is(p_rsmi_i_wk_a(i_wk_a_ix)),
& ds, is)
ed_wt(vr_n) = ems_scpr(one,
& ds(p_rsmi_rl_wk_a(rl_wk_a_ix)),
& ds(p_rsmi_rl_wk_a(rl_wk_a_ix)), n_r)*half
30 continue
call ems_fr_rsmi_rl_wk_a_ix(rl_wk_a_ix)
call ems_fr_rsmi_i_wk_a_ix(i_wk_a_ix)
c
c Indicate that the edge weight information is correct.
c
ml_da_st_msk = ior(ml_da_st_msk, ml_da_st_ed_wt)
7000 continue
return
8000 continue
if (ems_msg_no_prt_fm .ge. 1) write(ems_li, 9800)
call ems_msg_wr_li(bug_msg_n)
goto 7000
9000 format('Initialising steepest edge weights for a non-logical',
& ' basis is expensive')
9800 format('RSMI workspace not available in ems_iz_sed_wt')
end
|
PROGRAM FISPWRZI
C
C Define the error file, the Fortran unit number, the workstation type,
C and the workstation ID to be used in calls to GKS routines.
C
C PARAMETER (IERRF=6, LUNIT=2, IWTYPE=1, IWKID=1) ! NCGM
C PARAMETER (IERRF=6, LUNIT=2, IWTYPE=8, IWKID=1) ! X Windows
C PARAMETER (IERRF=6, LUNIT=2, IWTYPE=11, IWKID=1) ! PDF
C PARAMETER (IERRF=6, LUNIT=2, IWTYPE=20, IWKID=1) ! PostScript
C
PARAMETER (IERRF=6, LUNIT=2, IWTYPE=1, IWKID=1)
C
C Open GKS, open and activate a workstation.
C
CALL GOPKS (IERRF, ISZDM)
CALL GOPWK (IWKID, LUNIT, IWTYPE)
CALL GACWK (IWKID)
C
C INVOKE DEMO DRIVER
C
CALL TPWRZI(IWKID,IERR)
C
C DEACTIVATE AND CLOSE WORKSTATION, CLOSE GKS.
C
CALL GDAWK (IWKID)
CALL GCLWK (IWKID)
CALL GCLKS
C
STOP
END
C
SUBROUTINE TPWRZI (IWKID,IERROR)
C
C PURPOSE To provide a simple demonstration of
C entry PWRZI with the ISOSRF utility.
C
C USAGE CALL TPWRZI (IWKID,IERROR)
C
C ARGUMENTS
C
C ON OUTPUT IERROR
C An integer variable
C = 0, if the test was successful,
C = 1, otherwise
C
C ON INPUT IWKID
C A workstation id
C
C I/O If the test is successful, the message
C
C PWRZI TEST EXECUTED--SEE PLOTS TO CERTIFY
C
C is printed on unit 6. In addition, 1
C frame is produced on the machine graphics
C device. In order to determine if the test
C was successful, it is necessary to examine
C the plot.
C
C PRECISION Single
C
C REQUIRED ROUTINES PWRZI, ISOSRF
C
C REQUIRED GKS LEVEL 0A
C
C LANGUAGE FORTRAN 77
C
C ALGORITHM A function of 3 variables is defined and the
C values of the function on a 3-D rectangular
C grid are stored in an array. This test routine
C then calls ISOSRF to draw an iso-valued surface
C plot of the function. PWRZI is then called 3
C times to label the front, side, and back of
C the picture.
C
DIMENSION T(21,31,19),SLAB(33,33),EYE(3)
C
C Define the center of a plot title string on a square grid of size
C 0. to 1.
C
DATA TX/0.4375/, TY/0.9667/
C
DATA NU,NV,NW/21,31,19/
DATA RBIG1,RBIG2,RSML1,RSML2/6.,6.,2.,2./
DATA TISO/0./
DATA MUVWP2/33/
DATA IFLAG/-7/
C
C Initialize the error parameter.
C
IERROR = 1
C
C Set up a color table
C
C White background
C
CALL GSCR (IWKID,0,1.,1.,1.)
C
C Black foreground
C
CALL GSCR (IWKID,1,0.,0.,0.)
C
C Red
C
CALL GSCR (IWKID,2,1.,0.,0.)
C
C Green
C
CALL GSCR (IWKID,3,0.,1.,0.)
C
C Blue
C
CALL GSCR (IWKID,4,0.,0.,1.)
C
C Fill the 3-D array to be plotted.
C
JCENT1 = REAL(NV)*.5-RBIG1*.5
JCENT2 = REAL(NV)*.5+RBIG2*.5
DO 30 I=1,NU
FIMID = I-NU/2
DO 20 J=1,NV
FJMID1 = J-JCENT1
FJMID2 = J-JCENT2
DO 10 K=1,NW
FKMID = K-NW/2
F1 = SQRT(RBIG1*RBIG1/(FJMID1*FJMID1+FKMID*FKMID+.1))
F2 = SQRT(RBIG2*RBIG2/(FIMID*FIMID+FJMID2*FJMID2+.1))
FIP1 = (1.-F1)*FIMID
FIP2 = (1.-F2)*FIMID
FJP1 = (1.-F1)*FJMID1
FJP2 = (1.-F2)*FJMID2
FKP1 = (1.-F1)*FKMID
FKP2 = (1.-F2)*FKMID
T(I,J,K) = MIN(FIMID*FIMID+FJP1*FJP1+FKP1*FKP1-
1 RSML1*RSML1,
2 FKMID*FKMID+FIP2*FIP2+FJP2*FJP2-RSML2*RSML2)
10 CONTINUE
20 CONTINUE
30 CONTINUE
C
C Define the eye position.
C
EYE(1) = 100.
EYE(2) = 150.
EYE(3) = 125.
C
C Select normalization transformation number 0.
C
CALL GSELNT (0)
C
C Label the plot.
C
CALL PLCHLQ (TX,TY,'DEMONSTRATION PLOT FOR PWRZI',16.,0.,0.)
C
C Test ISOSRF with subarray T.
C
MU = NU/2
MV = NV/2
MW = NW/2
MUVWP2 = MAX(MU,MV,MW)+2
C
C Set the line color of the isosurface
C
CALL GSPLCI(4)
CALL ISOSRF (T(MU,MV,MW),NU,MU,NV,MV,MW,EYE,MUVWP2,SLAB,TISO,
1 IFLAG)
ISIZE = 35
C
C Set the line color of the text
C
CALL GSPLCI(2)
CALL PWRZI (5.,16.,.5,'FRONT',5,ISIZE,-1,3,0)
CALL PWRZI (11.,7.5,.5,'SIDE',4,ISIZE,2,-1,0)
CALL PWRZI (5.,1.,5.,' BACK BACK BACK BACK BACK',25,ISIZE,-1,3,0)
CALL FRAME
IERROR = 0
C
WRITE (6,1001)
RETURN
C
1001 FORMAT (' PWRZI TEST EXECUTED--SEE PLOT TO CERTIFY')
C
END
|
## 8.4 Principal Component Regression
- We can determine the principal components of our data set from a singular value decomposition (SVD) of the
coveriance matrix $X^{T}X$:
$$X^{T}X=V\Sigma V^{T}$$
- Projecting the data onto the principal components defines a new data set
$$Z=XV^{T}$$
- We can truncate the new set of vectors to exclude components with small eigenvalues (features/dimensions that contribute little variance)
- Principal component regression:
$$ \vec{w}_{\text{PCA}}=\left( \begin{array}{c}
1 \\
1 \\
1 \\
0 \\
\vdots\\
0 \end{array}\right)
\hskip{1in}
M_{\text{z}}=\left( \begin{array}{ccc}
1 & z_{01} & z_{02} & z_{03} & 0 & \cdots & 0 \\
1 & z_{11} & z_{12} & z_{13} & 0 & \cdots & 0 \\
1 & z_{21} & z_{22} & z_{23} & 0 & \cdots & 0 \\
\vdots & \vdots & \vdots & \vdots & \vdots & \ddots & \vdots \\
1 & z_{N1} & z_{N2} & z_{N3} & 0 & \cdots & 0 \end{array}\right)
$$
- PCA has an advantage over ridge regression when the data contains many independent collinear variables (high covariance)
- In this case the regression coefficients $\theta_{i}$ have high variance and their solutions can become unstable
```python
import matplotlib
import matplotlib.pyplot as plt
%matplotlib inline
import numpy as np
import random as ra
from scipy.stats import multivariate_normal
import scipy.interpolate
```
```python
for covar in [0,0.95]:
#Generate bivariate norm distribution
var = multivariate_normal(mean = [0,0], cov = [[1,covar],[covar,1]])
#Randomly draw x1, x2 values from pdf
x1 = []
x2 = []
for i in range(1,30000):
x1rand = 10.*ra.random()-5
x2rand = 10.*ra.random()-5
norm_chance = var.pdf([x1rand, x2rand])
const_chance = ra.random()
if const_chance <= norm_chance:
x1.append(x1rand)
x2.append(x2rand)
#Plot (x1, x2; y) data
alpha = 1
beta = 0.3
l = 0.2
y = [1*x1[i] + 1*x2[i] + beta*(ra.random()-.5) for i in range(len(x1))]
cm = plt.cm.get_cmap('RdYlBu')
plt.scatter(x1,x2, c = y, cmap = cm)
plt.colorbar()
plt.xlabel('x1')
plt.ylabel('x2')
plt.show()
#Generate cost contour
th1, th2 = np.linspace(0, 2, 100), np.linspace(0, 2, 100)
cost = np.zeros((len(th1),len(th2)))
for i in range(len(th1)):
for j in range(len(th2)):
summ=0
for k in range(len(x1)):
summ+=(th1[i]*x1[k]+th2[j]*x2[k]-y[k]+l*(th1[i]**2.0+th2[j]**2.0))**2.0
cost[i,j]=summ
th1, th2 = np.meshgrid(th1, th2)
# Interpolate
rbf = scipy.interpolate.Rbf(th1, th2, cost, function='linear')
cost_inter = rbf(th1, th2)
plt.imshow(cost_inter, vmin=cost_inter.min(), vmax=cost_inter.max(), origin='lower', extent=[-5, 5, -5, 5])
plt.colorbar()
plt.xlabel('Theta 1')
plt.ylabel('Theta 2')
plt.title('Mean-square error')
plt.show()
```
## 8.5 Kernel Regression
- Define a kernel $K\left(x_{i}, x\right)$ local to each data point with the amplitude of the kernel depending onn the distance from the local point to all other points in the sample
- e.g. top-hat or Gaussian kernel
- Nadaraya-Watson estimate of the regression function:
$$f\left(x|K\right)=\frac{\sum_{i=1}^{N}K\left(\frac{||x_{i}-x||}{h}\right)y_{i}}{\sum_{i=1}^{N}K\left(\frac{||x_{i}-x||}{h}\right)}$$
- i.e., the predicted value of the function at $x_{i}$ is a weighted average of the y-values of all the points, with the individual weights given by the values of the kernel at that position
- Rule of thumb: bandwidth is more important than shape of kernel used
- Optimal bandwidth can be found by minimizing the cost with respect to the bandwidth on a cross-validation set
$$CV_{L_{2}}\left(h\right)=\frac{1}{N}\sum_{i=1}^{N}\left(y_{i}-f\left(x_{i}|K\left(\frac{||x_{i}-x_{j}||}{h}\right)\right)\right)^{2}$$
#### Author: Jake VanderPlas
#### License: BSD
#### The figure produced by this code is published in the textbook
#### "Statistics, Data Mining, and Machine Learning in Astronomy" (2013)
#### For more information, see http://astroML.github.com
#### To report a bug or issue, use the following forum:
#### https://groups.google.com/forum/#!forum/astroml-general
## 8.6 Locally Linear Regression
- Related to kernel regression, but we solve a separate weighted least-squares problem at each point x, finding the weight $w\left(x\right)$ which minimizes
$$\sum_{i=1}^{N}K\left(\frac{||x-x_{i}||}{h}\right)\left(y_{i}-w\left(x\right)x_{i}\right)^{2}$$
- Assume regression function can be approximated as a Taylor series about any local point; truncation at first term (locally constant solution) leads to Kernel regression
- The estimate for locally linear regression is
$$\begin{align}\\
f\left(x|K\right)&=\theta\left(x\right)x\\
&=x^{T}\left(\mathbf{X}^{T}W\left(x\right)\mathbf{X}\right)^{-1}\mathbf{X}^{T}W\left(x\right)\mathbf{Y}\\
&=\sum_{i=1}^{N}w_{i}\left(x\right)y_{i}\\
\end{align}
$$
- where $W\left(x\right)$ is an $N\times N$ diag. matrix with the $i\text{th}$ diaganol element given by $K||x_{i}-x||/h$
```python
```
```python
```
|
Formal statement is: lemmas swap_apply1 = swap_apply(1) Informal statement is: The lemma swap_apply(1) is now called swap_apply1. |
\PassOptionsToPackage{unicode=true}{hyperref} % options for packages loaded elsewhere
\PassOptionsToPackage{hyphens}{url}
\documentclass[11pt,dvipsnames,ignorenonframetext,aspectratio=169]{beamer}
\IfFileExists{pgfpages.sty}{\usepackage{pgfpages}}{}
\setbeamertemplate{caption}[numbered]
\setbeamertemplate{caption label separator}{: }
\setbeamercolor{caption name}{fg=normal text.fg}
\beamertemplatenavigationsymbolsempty
\usepackage{lmodern}
\usepackage{amssymb,amsmath}
\usepackage{ifxetex,ifluatex}
\usepackage{fixltx2e} % provides \textsubscript
\ifnum 0\ifxetex 1\fi\ifluatex 1\fi=0 % if pdftex
\usepackage[T1]{fontenc}
\usepackage[utf8]{inputenc}
\else % if luatex or xelatex
\ifxetex
\usepackage{mathspec}
\else
\usepackage{fontspec}
\fi
\defaultfontfeatures{Ligatures=TeX,Scale=MatchLowercase}
\fi
\usetheme[]{monash}
\usecolortheme{monashwhite}
% A default size of 24 is set in beamerthememonash.sty
\useinnertheme{rounded}
\useoutertheme{smoothtree}
% use upquote if available, for straight quotes in verbatim environments
\IfFileExists{upquote.sty}{\usepackage{upquote}}{}
% use microtype if available
\IfFileExists{microtype.sty}{%
\usepackage{microtype}
\UseMicrotypeSet[protrusion]{basicmath} % disable protrusion for tt fonts
}{}
\newif\ifbibliography
\hypersetup{
pdftitle={Developmental genetics},
colorlinks=true,
linkcolor=red,
citecolor=Blue,
urlcolor=lightgrayd,
breaklinks=true}
%\urlstyle{same} % Use monospace font for urls
% Prevent slide breaks in the middle of a paragraph:
\widowpenalties 1 10000
\raggedbottom
\AtBeginPart{
\let\insertpartnumber\relax
\let\partname\relax
\frame{\partpage}
}
\AtBeginSection{
\ifbibliography
\else
\let\insertsectionnumber\relax
\let\sectionname\relax
\frame{\sectionpage}
\fi
}
\AtBeginSubsection{
\let\insertsubsectionnumber\relax
\let\subsectionname\relax
\frame{\subsectionpage}
}
\setlength{\parindent}{0pt}
\setlength{\parskip}{6pt plus 2pt minus 1pt}
\setlength{\emergencystretch}{3em} % prevent overfull lines
\providecommand{\tightlist}{%
\setlength{\itemsep}{0pt}\setlength{\parskip}{0pt}}
\setcounter{secnumdepth}{0}
%% Monash overrides
\AtBeginSection[]{
\frame<beamer>{
\frametitle{Outline}\vspace*{0.2cm}
\tableofcontents[currentsection,hideallsubsections]
}}
% Redefine shaded environment if it exists (to ensure text is black)
\ifcsname Shaded\endcsname
\definecolor{shadecolor}{RGB}{225,225,225}
\renewenvironment{Shaded}{\color{black}\begin{snugshade}\color{black}}{\end{snugshade}}
\fi
%%
\usepackage{setspace}
\usepackage{wasysym}
% \usepackage{footnote} % don't use this this breaks all
\usepackage{fontenc}
\usepackage{fontawesome}
\usepackage{booktabs,siunitx}
\usepackage{longtable}
\usepackage{array}
\usepackage{multirow}
\usepackage{wrapfig}
\usepackage{float}
\usepackage{colortbl}
\usepackage{pdflscape}
\usepackage{tabu}
\usepackage{threeparttable}
\usepackage{threeparttablex}
\usepackage[normalem]{ulem}
\usepackage{makecell}
\usepackage{xcolor}
\usepackage{tikz} % required for image opacity change
\usepackage[absolute,overlay]{textpos} % for text formatting
\usepackage{chemfig}
\usepackage[skip=0.333\baselineskip]{caption}
% \newcommand*{\AlignChar}[1]{\makebox[1ex][c]{\ensuremath{\scriptstyle#1}}}%
% this font option is amenable for beamer
\setbeamerfont{caption}{size=\tiny}
\singlespacing
\definecolor{lightgrayd}{gray}{0.95}
\definecolor{skyblued}{rgb}{0.65, 0.6, 0.94}
\definecolor{oranged}{RGB}{245, 145, 200}
\title[]{Developmental genetics}
\author[
Deependra Dhakal\\
Gokuleshwor Agriculture and Animal Science College\\
Tribhuwan University\\
\textit{[email protected]}\\
\url{https://rookie.rbind.io}
]{Deependra Dhakal\\
Gokuleshwor Agriculture and Animal Science College\\
Tribhuwan University\\
\textit{[email protected]}\\
\url{https://rookie.rbind.io}}
\date[
Academic year 2019-2020
]{
Academic year 2019-2020
}
\begin{document}
% Hide progress bar and footline on titlepage
\begin{frame}[plain]
\titlepage
\end{frame}
\frame<beamer>{
\frametitle{Outline}\vspace*{0.2cm}
\tableofcontents[hideallsubsections]
}
\hypertarget{variegation-in-biological-tissues}{%
\section{Variegation in biological
tissues}\label{variegation-in-biological-tissues}}
\begin{frame}{Variegation: meaning}
\protect\hypertarget{variegation-meaning}{}
\begin{columns}[T,onlytextwidth]
\small
\column{.30\linewidth}
\begin{itemize}
\item Variegation is the appearance of differently coloured zones in the leaves, and sometimes the stems, of plants. Variegated leaves occur rarely in nature.
\item Causes are: Chimeral, Pigmentary, Pathological (Mosaic viruses), Genetical due to transposable elements
\end{itemize}
\column{0.70\linewidth}
\begin{figure}
{\centering \includegraphics[width=0.68\linewidth]{../images/saintpaulia_ionantha_variegata}
}
\caption{Variegated \textit{Saintpaulia ionantha} plant}\label{fig:variegated-saintpaulia}
\end{figure}
\end{columns}
\end{frame}
\begin{frame}{Genetics of variegation}
\protect\hypertarget{genetics-of-variegation}{}
\begin{itemize}
\tightlist
\item
Geneticist Hermann Muller discovered an interesting genetic phenomenon
while studying Drosophila: chromosomal neighborhoods exist that can
silence genes that are experimentally ``relocated'' to adjacent
regions of the chromosome.
\item
Flies were irradiated with X rays to induce mutations in their germ
cells.
\item
The progeny of the irradiated flies were screened for unusual
phenotypes. A mutation in the \emph{white} gene, near the tip of the X
chromosome, will result in progeny with white eyes instead of the
wild-type red color.
\item
Some of the progeny had very unusual eyes with patches of white and
red color.
\item
Cytological examination revealed a chromosomal rearrangement in the
mutant flies: present in the X chromosome was an inversion of a piece
of the chromosome carrying the \emph{white} gene.
\item
The white gene, which is normally located in a euchromatic region of
the X chromosome, now finds itself near the heterochromatic
centromere. In some cells, the heterochromatin can ``spread'' to the
neighboring euchromatin and silence the white gene.
\end{itemize}
\end{frame}
\begin{frame}{}
\protect\hypertarget{section}{}
\begin{figure}
\includegraphics[width=0.55\linewidth]{./../images/drosophila_chromosomal_rearrangement_pev} \caption{Chromosomal rearrangement produces position-effect variegation. Chromosomal inversion places the wild-type white allele close to heterochromatin. The spread of heterochromatin silences the allele. Eye facets are white instead of the wild-type red wherever the allele has been silenced.}\label{fig:chromosomal-rearrangement-pev}
\end{figure}
\end{frame}
\begin{frame}{}
\protect\hypertarget{section-1}{}
\begin{itemize}
\tightlist
\item
Patches of white tissue in the eye are derived from the descendants of
a single cell in which the white gene has been silenced and remains
silenced through future cell divisions.
\item
The red patches arise from cells in which heterochromatin has not
spread to the \emph{white} gene, and so this gene remains active in
all its descendants.
\item
Findings from subsequent studies in Drosophila and yeast demonstrated
that many active genes are silenced in this mosaic fashion when they
are relocated to neighborhoods (near centromeres or telomeres) that
are heterochromatic.
\item
The ability of heterochromatin to spread into euchromatin and silence
genes is a feature common to many organisms.
\item
This phenomenon has been called position-effect \textbf{variegation}
(PEV).
\end{itemize}
\end{frame}
\hypertarget{development-and-pattern}{%
\section{Development and pattern}\label{development-and-pattern}}
\begin{frame}{Development}
\protect\hypertarget{development}{}
\begin{itemize}
\tightlist
\item
Development refers to formation of different types of
tissues/organs/cells.
\item
Development involves production of proteins that generate specific
structural and functional phenotypes.
\item
From the genetic perspective, following key questions arise concerning
number, identity, and function of genes taking part in development:
\begin{enumerate}
\tightlist
\item
Which genes are important in development?
\item
Where in the developing animal and at what times are these genes
active?
\item
How is the expression of developmental genes regulated?
\item
Through what molecular mechanisms do gene products affect
development?
\end{enumerate}
\end{itemize}
\end{frame}
\begin{frame}{}
\protect\hypertarget{section-2}{}
\begin{itemize}
\tightlist
\item
One of the first considerations in study of animal genetics is to go
with certain type of organism -- Model organism. (Because it acts as
the genetic model in animal development.)
\item
Many genes are housekeeping genes (cellular metabolism, biosynthesis
of macromolecules).
\item
Some genes carry specialized taks of various organ systems, tissues
and cells (such as antibody proteins, oxygen transporter proteins,
etc.).
\item
Other genes perform building of organs and tissues and the
specification of cell types
\end{itemize}
\end{frame}
\begin{frame}{Differentiation}
\protect\hypertarget{differentiation}{}
\begin{itemize}
\tightlist
\item
Differentiation refers to a permanent or irreversible change in the
function of cells (relative to the single-celled zygote), which is
often accompanied by change in their structures.
\item
Weismann and Roux proposed that different cell types differed in their
gene content and that each cell type contains only those genes that
are needed for its function. But cells do not appear to lose any
genetic material during differentiation, except in a few notable
cases. RBC extrude their nuclei during the last stages of
differentiation, and lymphocytes lose, during differentiation, those
segments of antibody genes, which are not needed in a particular cell
type.
\item
In any differentiated cell type, only a small proportion (less than
10\%) of the genome is transcribed to yield mRNA, e.g., 2-5 percent in
mice liver cells, 8\% in brain cells of the toad \emph{Xenopus}, only
1\% in \emph{Xenopus} oocyte, etc.
\end{itemize}
\end{frame}
\begin{frame}{}
\protect\hypertarget{section-3}{}
\begin{itemize}
\tightlist
\item
In different cell types, different sets of genes are transcribed. The
RNA sequences present in different cell types may differ from each
other by 10-100\%. For example, the mRNAs from \emph{Xenopus laevis}
oocytes and blastula (a stage in embryo development) embryos are
entirely different; those from mouse liver, spleen and kidney cells
differ from each other in sequence composition from about 15-70\%,
etc.
\end{itemize}
\end{frame}
\begin{frame}{Embryo studies in \emph{Drosophila}}
\protect\hypertarget{embryo-studies-in-drosophila}{}
\begin{itemize}
\tightlist
\item
Provided an understanding of formation of basic animal body plan.
\item
Larval exoskeleton of Drosophila easily shows abnormality in the body
plan of a mutant due to its noncellular structure.
\item
Exoskeleton is made up of polysaccharide polymer from epidermal cells
of embryo.
\item
With intricate pattern of hairs, indentations, and other structures,
the exoskeleton provides numerous landmarks to serve as indicators of
the fates assigned to the many epidermal cells.
\item
There are many distinct anatomical structures along the
antero-posterior (A--P) and dorsoventral (D--V) axes.
\end{itemize}
\end{frame}
\begin{frame}{}
\protect\hypertarget{section-4}{}
\begin{itemize}
\tightlist
\item
Effects of major mutations in either of the axes \textbf{does not}
alter embryogenesis in very early young larva.
\item
Recovery of exoskeleton of such stages mirrors fate of epidermal
cells.
\item
Only small population of cells set aside during embryogeneis
proliferate during three larval stages (instars) and differentiate in
the pupal stage into adult structures.
\item
Such cells include \emph{imaginal disks}, and are easy to remove for
expression analysis.
\end{itemize}
\end{frame}
\begin{frame}{}
\protect\hypertarget{section-5}{}
\begin{itemize}
\tightlist
\item
Genes contributing to body plan can be cloned and characterized at
molecular level easily
\item
Translation products (protein) can be inferred by identifying close
relatives in amino acid sequence.
\item
Spatial and temporal patterns of expressions can be investigated by
expression analysis of:
\begin{itemize}
\tightlist
\item
An mRNA, by using histochemically tagged single-stranded DNA
sequences complementary to the mRNA to perform RNA in situ
hybridization.
\item
A protein, by using histochemically tagged antibodies that bind
specifically to that protein.
\end{itemize}
\end{itemize}
\end{frame}
\begin{frame}{}
\protect\hypertarget{section-6}{}
\begin{figure}
\includegraphics[width=0.55\linewidth]{./../images/drosophila_development} \caption{The larva forms in 1 day and then undergoes several stages of growth during which the imaginal disks and other precursors of adult structures proliferate. These structures differentiate during pupation, and the adult fly hatches (eclosion) and begins the cycle again.}\label{fig:drosophila-development}
\end{figure}
\end{frame}
\begin{frame}{Extrapolating information}
\protect\hypertarget{extrapolating-information}{}
\begin{itemize}
\tightlist
\item
There are numerous homeobox genes within \emph{Drosophila} genome.
\item
Homeotic gene detection depends on DNA base-pair complementarity.
\item
Hybridization experiments can be done with \emph{moderate stringency
conditions}.
\item
Homeobox genes have been searched for in other animals, by means of
\emph{zoo blots} by using radioactive Drosophila homeobox DNA as the
probe.
\item
This approach led to the discovery of homologous homeobox sequences in
many different animals, including humans and mice.
\item
Homeotic mutants have following features:
\begin{itemize}
\tightlist
\item
Developmental pathways are changed dramatically due to single
homeotic gene mutation
\item
Structure formed in mutant is alike to well-developed another body
part
\item
Transform the identity of serially reiterated structures
\end{itemize}
\item
Systematic searches for homeotic genes have led to the identification
of eight loci, now referred to as Hox genes, that affect the identity
of segments and their associated appendages in Drosophila.
\end{itemize}
\end{frame}
\begin{frame}{}
\protect\hypertarget{section-7}{}
\begin{figure}
\includegraphics[width=0.35\linewidth]{./../images/drosophila_hox_genes} \caption{The Hox genes of Drosophila. Eight Hox genes regulate the identity of regions within the adult. The color coding identifies the segments and structures that are affected by mutations in the various Hox genes.}\label{fig:drosophila-hox-genes}
\end{figure}
\end{frame}
\begin{frame}{Summary}
\protect\hypertarget{summary}{}
\begin{itemize}
\tightlist
\item
Despite vast differences in appearance and anatomy, animals have in
common a toolkit of genes that govern development.
\item
Distinguishing feature of toolkit genes is the typical presence of
numerous independent cisacting regulatory elements that govern gene
expression in different spatial domains and at different stages of
development.
\item
This toolkit is a small fraction of all genes in the genome, and most
of these toolkit genes control transcription factors and components of
signal-transduction pathways.
\item
Individual toolkit genes typically have multiple functions and affect
the development of different structures at different stages.
\item
The development of the growing embryo and its body parts takes place
in a spatially and temporally ordered progression.
\end{itemize}
\end{frame}
\begin{frame}{}
\protect\hypertarget{section-8}{}
\begin{itemize}
\tightlist
\item
Spatially restricted patterns of gene expression are products of
combinatorial regulation. Each pattern of gene expression has a
preceding causal basis. New patterns are generated by the combined
inputs of preceding patterns.
\item
As an example, the positioning of pair-rule stripes and the
restriction of appendage-regulatory-gene expression to individual
segments requires the integration of numerous positive and negative
regulatory inputs by cis-acting regulatory elements.
\item
Post-transcriptional regulation at the RNA level adds another layer of
specificity to the control of gene expression.
\item
Alternative RNA splicing and translational control by proteins and
miRNAs also contribute to the spatial and temporal control of
toolkit-gene expression.
\item
Combinatorial control is key to both the \textbf{specificity} and the
\textbf{diversity} of gene expression and toolkit-gene function.
\item
The modularity of cis-acting regulatory elements allows for
independent spatial and temporal control of toolkit-gene expression
and function. They act as switches in the developmental control of
gene expression.
\end{itemize}
\end{frame}
\hypertarget{genetics-of-cancer}{%
\section{Genetics of cancer}\label{genetics-of-cancer}}
\begin{frame}{}
\protect\hypertarget{section-9}{}
\begin{itemize}
\tightlist
\item
Somatic cells of higher eukaryotes have limited life span, and their
growth and divisions are highly regulated.
\item
But occasionally, some cells show uncontrolled division and growth,
and an ability to grow in inappropriate locations; these cells are
said to be cancerous or tumorigenic and they produce cancer.
\item
Uncontrolled growth in non-circulatory tissues produces solid tumors.
\item
A malignant tumor, or \textbf{cancer}, is an aggregate of cells, all
descended from an initial aberrant founder cell.
\item
When tumor becomes \textbf{malignant} or \textbf{cancerous}, its cells
detach and migrate to other parts of body where they produce secondary
tumors; this phenomenon is called \textbf{metastasis}.
\item
\textbf{Benign} or noncancerous tumors do not exhibit metastasis.
\end{itemize}
\end{frame}
\begin{frame}{}
\protect\hypertarget{section-10}{}
\begin{itemize}
\tightlist
\item
Cancerous cells are produced due to the following three types of
changes:
\begin{itemize}
\tightlist
\item
Immortalization,
\item
Transformation
\item
Metastasis.
\end{itemize}
\item
Cancer arises through:
\begin{enumerate}
\tightlist
\item
An increased mutation rate that creates genetic variation and
\item
Selection of cells for increased proliferation rates; several cycles
of these events occur in the progression of cancer. (Singh 2018),
page 386.
\end{enumerate}
\end{itemize}
\end{frame}
\begin{frame}{}
\protect\hypertarget{section-11}{}
\begin{itemize}
\tightlist
\item
Cancer researchers have identified genes that, when mutated, can
contribute to the development of a cancerous state. In it, genes or
mutant genes actively promote cell division.
\item
The genes are called \textbf{oncogenes}, from the Greek word for
`tumor.'
\item
Oncogenes: Many cancers involve the overexpression of certain genes or
the abnormal activity of their mutant protein products; They result in
dominant mutations, usually owing to their inappropriate activation.
\item
In contrast, tumor-suppressor genes encode proteins whose loss of
activity can contribute to a cancerous state. As such, they are
recessive mutations.
\item
These genes were first discovered in the genomes of RNA viruses that
are capable of inducing tumors in vertebrate hosts.
\item
Later, the cellular counterparts of these viral oncogenes were
discovered in many different organisms ranging from \emph{Drosophila}
to humans.
\end{itemize}
\end{frame}
\begin{frame}{Tumor producing retroviruses and viral oncogenes}
\protect\hypertarget{tumor-producing-retroviruses-and-viral-oncogenes}{}
\begin{itemize}
\tightlist
\item
Understanding of genetic basis of cancer has come from the study of
tumor-inducing viruses.
\item
Many of these viruses have a genome composed of RNA instead of DNA.
\item
After entering a cell, the viral RNA is used as a template to
synthesize complementary DNA, which is then inserted at one or more
positions in the cell's chromosomes.
\item
The synthesis of DNA from RNA is catalyzed by the viral enzyme reverse
transcriptase. This reversal of the normal flow of genetic information
from DNA to RNA has prompted biologists to call these pathogens
\textbf{retroviruses}.
\end{itemize}
\end{frame}
\begin{frame}{}
\protect\hypertarget{section-12}{}
\begin{itemize}
\tightlist
\item
The first tumor-inducing virus was discovered in 1910 by Peyton Rous;
it caused a special kind of tumor, or sarcoma, in the connective
tissue of chickens and has since been called the Rous sarcoma virus.
\item
Rous was awarded the Nobel Prize in Physiology or Medicine for the
significance of his discovery in 1966. Modern research has shown that
the RNA genome of this retrovirus contains four genes: \emph{gag},
\emph{pol}, \emph{env}
\item
\textbf{Other examples}:
\begin{itemize}
\tightlist
\item
Breast cancer (\emph{BRCA2}): Autosomal dominant. Tumor suppressor
defect giving predisposition to breast and other cancers
\item
\emph{TLF}: Colon cancer
\item
\emph{Gli1}: Basal-cell carcinoma
\item
\emph{DPC4}: Pancreatic and colon
\item
\emph{hNotch1}: Leukemia, lymphoma
\end{itemize}
\end{itemize}
\end{frame}
\hypertarget{immunogenetics}{%
\section{Immunogenetics}\label{immunogenetics}}
\begin{frame}{Introduction}
\protect\hypertarget{introduction}{}
\begin{columns}[T,onlytextwidth]
\small
\column{.35\linewidth}
\begin{itemize}
\item The immune system keeps a repertoire of B cells that are poised to make antibodies to invading pathogens.
\item When one of these B-cell antibodies is needed, the B cell starts dividing so that many antibodies can be produced and they are available to attack the pathogen.
\item Some of these clones are refined by mutation to make a more specific antibody.
\end{itemize}
\column{0.65\linewidth}
\begin{center}\includegraphics[width=0.5\linewidth]{./../images/antibody_antigen_recognition} \end{center}
\end{columns}
\end{frame}
\begin{frame}{}
\protect\hypertarget{section-13}{}
\begin{itemize}
\tightlist
\item
Acquired immunity is divided into two branches. Humoral immunity is
mediated by antibodies in blood plasma, which are produced by B cells.
The second part is cellular immunity, which is mediated by T cells.
\item
Antibodies recognize epitopes or specific regions of the invading
pathogen.
\item
Antibodies are very diverse in structure so that all the pathogens can
be recognized.
\item
Antibodies are produced by shuffling gene segments rather than having
one gene code for each different antibody; the process is called
\textbf{V(D)J} recombination.
\end{itemize}
\end{frame}
\begin{frame}{}
\protect\hypertarget{section-14}{}
\begin{itemize}
\tightlist
\item
ELISA (diagnostics) procedure uses antibodies for clinical use.
\item
Antibodies of various mixtures with variying affinities to antigen/s
is reffered to as polyclonal antibody. However such antibodies are of
little use for a specific, accurate assay.
\item
Pure antibody made by a single line of cells is known as a
\textbf{monoclonal antibody}.
\item
An example of antibody generation technology is by exploiting myelomas
(naturally occurring cancers) derived from B cells; they therefore
express immunoglobulin genes, as long as they are given proper
nutrients.
\item
To make monoclonal antibodies, scientists fuse the relatively delicate
B cell, which is making the required antibody, to a myeloma cell. The
resulting hybrid is called a \textbf{hybridoma}.
\end{itemize}
\end{frame}
\begin{frame}{}
\protect\hypertarget{section-15}{}
\begin{figure}
\begin{columns}[T,onlytextwidth]
\column{.70\linewidth}
\begin{center}
\includegraphics[width=0.28\linewidth]{./../images/hybridoma_principle.png}
\end{center}
\column{.30\linewidth}
\caption{\newline \textbf{Principle of the Hybridoma} \newline Monoclonal antibodies derive from a single antibody-producing B cell. The antigen is first injected into a mouse to provoke an immune response. The spleen is harvested because it harbors many activated B cells. The spleen cells are short-lived in culture, so they are fused to immortal myeloma cells. The hybridoma cells are cultured and isolated so each hybrid is separate from the other. Each hybrid clone can then be screened for the best antibody to the target protein.}
\label{fig:monoclonal-antibody-hybridoma}
\end{columns}
\end{figure}
\end{frame}
\begin{frame}{Vaccination}
\protect\hypertarget{vaccination}{}
\begin{itemize}
\tightlist
\item
Vaccination takes advantage of immune memory.
\item
Vaccines consist of various derivatives of infectious agents that no
longer cause disease but are still antigenic; that is, they induce an
immune response.
\item
For example, bacteria killed by heat are sometimes used. The antigens
on the dead bacteria stimulate B-cell division.
\item
Some of the B cells form memory cells so, later, when living germs
corresponding to the vaccine attack the vaccinated person, the immune
system is prepared.
\end{itemize}
\end{frame}
\hypertarget{bibliography}{%
\section{Bibliography}\label{bibliography}}
\begin{frame}{References}
\protect\hypertarget{references}{}
\hypertarget{refs}{}
\leavevmode\hypertarget{ref-singh2018fundamentals}{}%
Singh, BD. 2018. \emph{Fundamentals of Genetics}. Kalyani Publishers.
\end{frame}
\end{document}
|
/-
Copyright (c) 2017 Johannes Hölzl. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Johannes Hölzl, Johan Commelin, Mario Carneiro
-/
import Mathlib.PrePort
import Mathlib.Lean3Lib.init.default
import Mathlib.data.mv_polynomial.variables
import Mathlib.PostPort
universes u u_1 v
namespace Mathlib
/-!
# Multivariate polynomials over a ring
Many results about polynomials hold when the coefficient ring is a commutative semiring.
Some stronger results can be derived when we assume this semiring is a ring.
This file does not define any new operations, but proves some of these stronger results.
## Notation
As in other polynomial files, we typically use the notation:
+ `σ : Type*` (indexing the variables)
+ `R : Type*` `[comm_ring R]` (the coefficients)
+ `s : σ →₀ ℕ`, a function from `σ` to `ℕ` which is zero away from a finite set.
This will give rise to a monomial in `mv_polynomial σ R` which mathematicians might call `X^s`
+ `a : R`
+ `i : σ`, with corresponding monomial `X i`, often denoted `X_i` by mathematicians
+ `p : mv_polynomial σ R`
-/
namespace mv_polynomial
protected instance comm_ring {R : Type u} {σ : Type u_1} [comm_ring R] :
comm_ring (mv_polynomial σ R) :=
add_monoid_algebra.comm_ring
protected instance C.is_ring_hom {R : Type u} {σ : Type u_1} [comm_ring R] : is_ring_hom ⇑C :=
is_ring_hom.of_semiring ⇑C
@[simp] theorem C_sub {R : Type u} (σ : Type u_1) (a : R) (a' : R) [comm_ring R] :
coe_fn C (a - a') = coe_fn C a - coe_fn C a' :=
is_ring_hom.map_sub ⇑C
@[simp] theorem C_neg {R : Type u} (σ : Type u_1) (a : R) [comm_ring R] :
coe_fn C (-a) = -coe_fn C a :=
is_ring_hom.map_neg ⇑C
@[simp] theorem coeff_neg {R : Type u} (σ : Type u_1) [comm_ring R] (m : σ →₀ ℕ)
(p : mv_polynomial σ R) : coeff m (-p) = -coeff m p :=
finsupp.neg_apply
@[simp] theorem coeff_sub {R : Type u} (σ : Type u_1) [comm_ring R] (m : σ →₀ ℕ)
(p : mv_polynomial σ R) (q : mv_polynomial σ R) : coeff m (p - q) = coeff m p - coeff m q :=
finsupp.sub_apply
protected instance coeff.is_add_group_hom {R : Type u} (σ : Type u_1) [comm_ring R] (m : σ →₀ ℕ) :
is_add_group_hom (coeff m) :=
is_add_group_hom.mk
theorem degrees_neg {R : Type u} {σ : Type u_1} [comm_ring R] (p : mv_polynomial σ R) :
degrees (-p) = degrees p :=
sorry
theorem degrees_sub {R : Type u} {σ : Type u_1} [comm_ring R] (p : mv_polynomial σ R)
(q : mv_polynomial σ R) : degrees (p - q) ≤ degrees p ⊔ degrees q :=
sorry
@[simp] theorem vars_neg {R : Type u} {σ : Type u_1} [comm_ring R] (p : mv_polynomial σ R) :
vars (-p) = vars p :=
sorry
theorem vars_sub_subset {R : Type u} {σ : Type u_1} [comm_ring R] (p : mv_polynomial σ R)
(q : mv_polynomial σ R) : vars (p - q) ⊆ vars p ∪ vars q :=
sorry
@[simp] theorem vars_sub_of_disjoint {R : Type u} {σ : Type u_1} [comm_ring R]
{p : mv_polynomial σ R} {q : mv_polynomial σ R} (hpq : disjoint (vars p) (vars q)) :
vars (p - q) = vars p ∪ vars q :=
sorry
@[simp] theorem eval₂_sub {R : Type u} {S : Type v} {σ : Type u_1} [comm_ring R]
(p : mv_polynomial σ R) {q : mv_polynomial σ R} [comm_ring S] (f : R →+* S) (g : σ → S) :
eval₂ f g (p - q) = eval₂ f g p - eval₂ f g q :=
ring_hom.map_sub (eval₂_hom f g) p q
@[simp] theorem eval₂_neg {R : Type u} {S : Type v} {σ : Type u_1} [comm_ring R]
(p : mv_polynomial σ R) [comm_ring S] (f : R →+* S) (g : σ → S) :
eval₂ f g (-p) = -eval₂ f g p :=
ring_hom.map_neg (eval₂_hom f g) p
theorem hom_C {S : Type v} {σ : Type u_1} [comm_ring S] (f : mv_polynomial σ ℤ → S) [is_ring_hom f]
(n : ℤ) : f (coe_fn C n) = ↑n :=
ring_hom.eq_int_cast (ring_hom.comp (ring_hom.of f) (ring_hom.of ⇑C)) n
/-- A ring homomorphism f : Z[X_1, X_2, ...] → R
is determined by the evaluations f(X_1), f(X_2), ... -/
@[simp] theorem eval₂_hom_X {S : Type v} [comm_ring S] {R : Type u} (c : ℤ →+* S)
(f : mv_polynomial R ℤ →+* S) (x : mv_polynomial R ℤ) : eval₂ c (⇑f ∘ X) x = coe_fn f x :=
sorry
/-- Ring homomorphisms out of integer polynomials on a type `σ` are the same as
functions out of the type `σ`, -/
def hom_equiv {S : Type v} {σ : Type u_1} [comm_ring S] : (mv_polynomial σ ℤ →+* S) ≃ (σ → S) :=
equiv.mk (fun (f : mv_polynomial σ ℤ →+* S) => ⇑f ∘ X)
(fun (f : σ → S) => eval₂_hom (int.cast_ring_hom S) f) sorry sorry
@[simp] theorem total_degree_neg {R : Type u} {σ : Type u_1} [comm_ring R] (a : mv_polynomial σ R) :
total_degree (-a) = total_degree a :=
sorry
theorem total_degree_sub {R : Type u} {σ : Type u_1} [comm_ring R] (a : mv_polynomial σ R)
(b : mv_polynomial σ R) : total_degree (a - b) ≤ max (total_degree a) (total_degree b) :=
sorry
end Mathlib |
% \documentclass[draft,11pt]{article}
%\documentclass[11pt]{article}
\subsection{Eigenvalue Bounds Beyond Test Vectors}
In the previous sections, we first saw a complete characterization of the
eigenvalues and eigenvectors of the unit weight complete graph on $n$
vertices, $K_n$. Namely, $\LL_{K_N} = n\II - \vecone \vecone^\trp$, and
this means that \emph{every} vector $\yy \perp \vecone$ is an
eigenvector of eigenvalue $n$.
We then looked at eigenvalues of $P_n$, the unit weight path on $n$
vertices, and we showed using \emph{test vector} bounds that
\begin{equation}
\label{eq:testvectorbounds}
\lambda_2(\LL_{P_n}) \leq \frac{12}{n^2} \text{ and } 1 \leq
\lambda_n(\LL_{P_n}).
\end{equation}
%
Ideally we would like to prove an almost matching upper bound on
$\lambda_2$ and an almost matching lower bound on $\lambda_n$, but it
is not clear how to get that from the Courant-Fischer theorem.
To get there, we start we need to introduce some more tools.
% The Courant-Fischer theorem is not as helpful when we want to prove lower bounds on $\lambda_2$. To prove lower bounds, we need the form with a maximum on the outside, which gives
% \begin{align*}
% \lambda_2 \geq \max_{S : \dim{S} = n - 1 } \min_{ \vv\in S } \frac{ \vv^\top \LL \vv}{ \vv^\top \vv}
% \end{align*}
% This is not too helpful, as it is difficult to prove lower bounds on
% \begin{align*}
% \min_{ \vv\in S } \frac{ \vv^\top \LL \vv}{ \vv^\top \vv}
% \end{align*}
% over a space $S$ of large dimension. We need another technique.
\subsection{The Loewner Order, aka. the Positive Semi-Definite Order}
We'll now introduce an ordering on symmetric matrices called the
\emph{Loewner order}, which I also like to just call the positive
semi-definite order.
As we will see in a moment, it is a partial order on symmetric matrices, we denote
it by ``$\preceq$''.
For conveniece, we allow ourselves to both write $\AA \preceq \BB$
and equivalently $\BB \succeq \AA$.
For a symmetric matrix $\AA \in \R^{n \times n}$ we define that
\begin{align*}
\AA \succeq \matzero
\end{align*}
if and only if $\AA$ is positive semi-definite.
More generally, when we have two symmetric matrices $\AA, \BB \in
\R^{n \times n}$, we will write
\begin{equation}
\label{eq:psdorder}
\AA \preceq \BB
\text{ if and only if for all } \xx \in \R^n
\text{ we have } \xx^{\trp} \AA \xx \leq \xx^{\trp} \BB
\xx
\end{equation}
This is a partial order, because it satisfies the three requirements
of
\begin{enumerate}
\item Reflexivity: $\AA \preceq \AA$.
\item Anti-symmetry:
$\AA \preceq \BB$ and $\BB \preceq \AA$
implies $\AA = \BB$
\item Transitivity: $\AA \preceq \BB$ and $\BB \preceq \CC$
implies $\AA \preceq \CC$
\end{enumerate}
Check for yourself that these properties hold!
The PSD order has other very useful properties:
$\AA \preceq \BB$ implies $\AA + \CC \preceq \BB + \CC$ for any
symmetric matrix $\CC$. Convince yourself of this too!
And, combining this observation with transitivity, we can see that
$\AA \preceq \BB$ and $\CC\preceq \DD$
implies ${\AA + \CC \preceq \BB + \DD}$.
Here is another useful property: If $\matzero \preceq \AA$ then for all $\alpha \geq 1$
\[
\frac{1}{\alpha} \AA \preceq \AA \preceq \alpha \AA.
\]
Here is another one:
\begin{claim}
\label{clm:eigorderfrompsdorder}
If $\AA \preceq \BB$, then for all $i$
\[
\lambda_i(\AA) \leq \lambda_i(\BB).
\]
\end{claim}
\begin{proof}
We can prove this Claim by applying the subspace version of the
Courant-Fischer theorem.
\[
\lambda_i(\AA) =
\min_{
\substack{
\mathrm{subspace~} W \subseteq \R^n
\\
\dim{W} = i
}
}
\max_{
\xx \in W, \xx \neq \veczero
}
\frac{\xx^\trp \AA\xx}{\xx^\trp\xx}
\leq
\min_{
\substack{
\mathrm{subspace~} W \subseteq \R^n
\\
\dim{W} = i
}
}
\max_{
\xx \in W, \xx \neq \veczero
}
\frac{\xx^\trp \BB\xx}{\xx^\trp\xx}
= \lambda_i(\BB).
\]
\end{proof}
Note that the converse of Clam~\ref{clm:eigorderfrompsdorder} is very
much false, for example the matrices
$ \AA =
\begin{pmatrix}
2 & 0 \\
0 & 1
\end{pmatrix}$
and $\BB = \begin{pmatrix}
1 & 0 \\
0 & 2
\end{pmatrix}$ have equal eigenvalues, but both $\AA \not\preceq \BB$ and
$\BB \not\preceq \AA$.
\begin{remark}
It's useful to get used to and remember some of the properties of the Loewner
order, but all the things we have established so far are almost
immediate from the basic characterization in
Equation~\eqref{eq:psdorder}.
So, ideally, don't memorize all these facts, instead, try to see that
they are simple consequences of the definition.
\end{remark}
\subsection{Upper Bounding a Laplacian's $\lambda_n$ Using Degrees}
In an earlier lecture, we observed that for any graph $G=(V,E,\ww)$,
$\LL = \DD - \AA \succeq \matzero$.
We can see this from
$\xx^\top (\DD-\AA) \xx = \sum_{ (u,v) \in E } \ww(u,v)
( \xx(u) - \xx(v) )^2 \geq 0$.
Similarly $\DD + \AA \succeq \matzero$. because
$\xx^\top (\DD+\AA) \xx = \sum_{ (u,v) \in E } \ww(u,v)
( \xx(u) + \xx(v) )^2 \geq 0$.
But this means that $-\AA \preceq \DD$ and hence $\LL = \DD-\AA
\preceq 2\DD$.
So, for the path graph $P_n$, we have
$\LL_{P_n} \preceq \DD-\AA \preceq 2\DD \preceq 4 \II$.
So by Claim~\ref{clm:eigorderfrompsdorder}
\begin{equation}
\label{eq:pathlambdamaxub}
\lambda_n(\LL_{P_n}) \leq 4.
\end{equation}
We can see that our test vector-based lower bound on $\lambda_n(\LL_{P_n})$ from
Equation~\eqref{eq:testvectorbounds} is tight up
to a factor 4.
Since this type of argument works for any unit weight graph, it proves the following claim.
\begin{claim}
\label{clm:lambdamaxfromeig}
For any unit weight graph $G$,
$\lambda_{n}(\LL_G) \leq 2 \max_{v
\in V} \mathop{degree}(v)$.
\end{claim}
This is tight on a graph consisting of a single edge.
\subsection{The Loewner Order and Laplacians of Graphs.}
It's sometimes convenient to overload the for the PSD order to also
apply to graphs. We will write
\begin{align*}
G \preceq H
\end{align*}
if $\LL_{G} \preceq
\LL_H$.
For example, given two unit weight graphs
$G = (V,E)$ and $H = (V,F)$, if $H = (V,F)$ is a subgraph of $G$,
then
\begin{align*}
\LL_H \preceq \LL_G.
\end{align*}
We can see this from the Laplacian quadratic form:
\begin{align*}
\xx^\top \LL_G \xx = \sum_{ (u,v) \in E } \ww(u,v) ( \xx(u) - \xx(v) )^2.
\end{align*}
Dropping edges will only decrease the value of the quadratic form. The
same is for decreasing the weights of edges.
The graph order notation is especially useful when we allow for
scaling a graph by a constant, say $c > 0$,
\begin{align*}
c \cdot H \preceq G
\end{align*}
What is $c \cdot H$? It is the same graph as $H$, but the weight of
every edge is multiplied by $c$.
Now we can make statements like $\frac{1}{2} H \preceq G \preceq 2 H$,
which turn out to be useful notion of the two graphs approximating
each other.
% \subsection{Dan}
% I begin by recalling an extremely useful piece of notation that is used in the Optimization community. For a symmetric matrix $\AA$, we write
% \begin{align*}
% \AA \succeq 0
% \end{align*}
% if $\AA$ is positive semidefinite. That is, if all of the eigenvalues of $\AA$ are nonnegative, which is equivalent to
% \begin{align*}
% \vv^\top \AA \vv\geq 0,
% \end{align*}
% for all $\vv$. We similarly write
% \begin{align*}
% \AA \succeq {\bf B}
% \end{align*}
% if
% \begin{align*}
% \AA - {\bf B} \succeq 0
% \end{align*}
% which is equivalent to
% \begin{align*}
% \vv^\top \AA \vv\geq \vv^\top {\bf B} \vv
% \end{align*}
% for all $\vv$.
% The relation $\preceq$ is an example of a partial order. It applies to some pairs of symmetric matrices, while others are incomparable. But, for all pairs to which it does apply, it acts like an order. For example, we have
% \begin{align*}
% \AA \succeq {\bf B}, \mathrm{~and~} {\bf B} \succeq {\bf C} \mathrm{~implies~} \AA \succeq {\bf C},
% \end{align*}
% and
% \begin{align*}
% \AA \succeq {\bf B} \mathrm{~implies~} \AA + {\bf C} \succeq {\bf B} + {\bf C},
% \end{align*}
% for symmetric matrices ${\cal A}$, ${\cal B}$ and ${\cal C}$.
% I find it convenient to overload this notation by defining it for graphs as well. Thus, I'll write
% \begin{align*}
% G \succeq H
% \end{align*}
% if $\LL_{G} \succeq \LL_H$.
% For example, if $G = (V,E)$ is a graph and $H = (V,F)$ is a subgraph of $G$, then
% \begin{align*}
% \LL_G \succeq \LL_H.
% \end{align*}
% To see this, recall the Laplacian quadratic form:
% \begin{align*}
% \xx^\top \LL_G \xx = \sum_{ (u,v) \in E } w_{u,v} ( \xx(u) - \xx(v) )^2.
% \end{align*}
% It is clear that dropping edges can only decrease the value of the quadratic form. The same holds for decreasing the weights of edges.
% This notation is most powerful when we consider some multiple of a graph. Thus, I could write
% \begin{align*}
% G \succeq c \cdot H, \mathrm{~for~some~} c > 0.
% \end{align*}
% What is $c \cdot H$? It is the same graph as $H$, but the weight of every edge is multiplied by $c$.
% Using the Courant-Fischer Theorem, we can prove
% \begin{lemma}
% If $G$ and $H$ are graphs such that
% \begin{align*}
% G \succeq c \cdot H,
% \end{align*}
% then
% \begin{align*}
% \lambda_k (G) \geq c \cdot \lambda_k(H), \mathrm{~for~all~} k.
% \end{align*}
% \end{lemma}
% \begin{proof}
% The Courant-Fischer Theorem tells us that
% \begin{align*}
% \lambda_k (G)
% = & ~ \min_{ S \subseteq \R^n, \dim{S} = k } \max_{ \xx \in S }
% \frac{ \xx^\top \LL_G \xx }{ \xx^\top \xx } \\
% \geq & ~ c \dot \min_{ S \subseteq \R^n, \dim{S} = k } \max_{ \xx \in S } \frac{ \xx^\top L_H \xx }{ \xx^\top \xx } \\
% = & ~ c \cdot \lambda_k (H).
% \end{align*}
% \end{proof}
% \begin{corollary}
% Let $G$ be a graph and let $H$ be obtained by either adding an edge to $G$ or increasing the weight of an edge in $G$. Then, for all $i$,
% \begin{align*}
% \lambda_i (G) \leq \lambda_i (H).
% \end{align*}
% \end{corollary}
% \subsection{Approximations of Graphs}
% An idea that we will use in later lectures is that one graph approximations another if their Laplacian quadratic forms are similar. For example, we will say that $H$ is a $c$-approximation of $G$ if
% \begin{align*}
% c \cdot H \succeq G \succeq H /c.
% \end{align*}
% Surprising approximations exist.
% For example, expander graphs are very sparse approximations of the complete graph. For example, the following is known
% \begin{theorem}
% For every $\epsilon > 0$, there exists a $d > 0$ such that for all sufficiently large $n$ there is a $d$-regular graph $G_n$ that is $(1+\epsilon)$-approximation of $K_n$.
% \end{theorem}
% These graphs have many fewer edges than the complete graphs!
% In a latter lecture we will also prove that every graph can be well-approximated by a sparse graph.
\subsection{The Path Inequality}
Now, we'll see a general tool
for comparing two graphs $G$ and $H$ to prove
an inequalities like $c H
\preceq G$ for some constant $c$.
Our tools won't necessarily work well for all cases, but we'll see
some examples where they do.
% By now you should be wondering, ``how do we prove that $G \succeq c \cdot H$ for some graph $G$ and $H$?'' Not too many ways are known. We'll do it by proving some inequalities of this form for some of the simplest graphs, and then extending them to more general graphs.
In the rest of the lecture, we will often need to compare two graphs
define on the same vertex set $V = \setof{1,\ldots,n} = [n]$.
We use $G_{i,j}$ to denote the unit weight graph on vertex set $[n]$
consisting of a single edge between vertices $i$ and $j$.
\begin{lemma}[The Path Inequality]
\label{lem:pathineq}
\begin{align*}
(n-1) \cdot P_n \succeq G_{1,n},
\end{align*}
\end{lemma}
% The following very simple proof of this inequality was discovered by Sam Daitch.
% \begin{lemma}
% \begin{align*}
% (n-1) \cdot P_n \succeq G_{1,n}.
% \end{align*}
% \end{lemma}
\begin{proof}
We want to show that for every $\xx \in \in \R^n$,
\begin{align*}
(n-1) \cdot \sum_{i=1}^{n-1} ( \xx(i+1) - \xx(i) )^2 \geq ( \xx(n) - \xx(1) )^2.
\end{align*}
For $i \in [n-1]$, set
\begin{align*}
\DDelta (i) = \xx(i+1) - \xx(i).
\end{align*}
The inequality we want to prove then becomes
\begin{align*}
(n-1) \sum_{i=1}^{n-1} ( \DDelta(i) )^2 \geq \left( \sum_{i=1}^{n-1} \DDelta (i) \right)^2.
\end{align*}
But, this is immediate from the Caucy-Schwarz inequality
$\aa^{\trp}\bb \leq \norm{\aa}_2\norm{\bb}_2$:
\begin{align*}
(n-1) \sum_{i=1}^{n-1} ( \DDelta (i) )^2
= & ~ \| \vecone_{n-1} \|^2 \cdot \| \DDelta \|^2 \\
= & ~ ( \| \vecone_{n-1} \| \cdot \| \DDelta \| )^2 \\
\geq & ~ ( \vecone^\top_{n-1} \DDelta )^2 \\
= & ~ ( \sum_{i=1}^{n-1} \DDelta(i) )^2
\end{align*}
\end{proof}
% \Zhao{We skip Lemma 4.6.2}
\subsection{Lower Bounding $\lambda_2$ of a Path Graph}
We will now use Lemma~\ref{lem:pathineq} to prove a lower bound on $\lambda_2(\LL_{P_n})$.
% I'll now demonstrate the power of Lemma 4.6.1 by using it to prove a lower bound on $\lambda_2 (P_n)$ that will be very close to the upper bound we obtained from the test vector.
% To prove a lower bound on $\lambda_2 (P_n)$, we
Our strategy will be to prove that the path $P_n$ is at least some multiple of the
complete graph $K_n$, measured by the Loewner order, i.e. $K_n \preceq
f(n) P_n$ for some function $f: \N \to \R$.
We can combine this with our observation from the previous lecture
that $\lambda_2 (\LL_{K_n}) = n$ to show that
\begin{align}
\label{eq:pathtocompleteeig}
f(n) \lambda_2(\LL_{P_n}) \geq \lambda_2(\LL_{K_n}) = n,
\end{align}
and this will give our lower bound on $\lambda_2(\LL_{P_n}).$
% will prove that some multiple of the path is at least the complete
% graph. To this end, write
When establishing the inequality between $P_n$ and $K_n$, we can treat
each edge of the complete graph separately, by first noting that
\begin{align*}
\LL_{K_n} = \sum_{i < j} \LL_{G_{i,j}}
\end{align*}
For every edge $(i,j)$ in the complete graph, we apply the Path
Inequality, Lemma~\ref{lem:pathineq}:
\begin{align*}
\label{eq:1}
G_{i,j}
\preceq & ~ (j-i) \sum_{k=i}^{j-1} G_{k,k+1} \\
\preceq & ~ (j-i) P_n
\end{align*}
This inequality says that $G_{i,j}$ is at most $(j-i)$ times the part of the path connecting $i$ to $j$, and that this part of the path is less than the whole.
Summing inequality (4.3) over all edges $(i,j) \in K_n$ gives
\begin{align*}
K_n = \sum_{i < j} G_{i,j} \preceq \sum_{i < j} (j-i)P_n.
\end{align*}
To finish the proof, we compute
\begin{align*}
\sum_{i < j} (j-i) \leq \sum_{i < j} n \leq n^3
\end{align*}
So
\begin{align*}
L_{K_n} \preceq n^3 \cdot L_{P_n}.
\end{align*}
Plugging this into Equation~\eqref{eq:pathtocompleteeig}
we obtain
\begin{align*}
\frac{1}{ n^2 } \leq \lambda_2 (P_n).
\end{align*}
This only differs from our test vector-based upper bound in
Equation~\eqref{eq:testvectorbounds} by a factor 12.
We could make this consirably tigher by being more careful about the sums.
\subsection{Laplacian Eigenvalues of the Complete Binary Tree}
Let's do the same analysis with the complete binary tree with unit
weight edges on $n =
2^{d+1}-1$ vertices, which we
denote by $T_d$.
$T_d$ is the balanced binary tree on this many vertices, i.e. it
consists of a root node, which has two children, each of those
children have two children and so on until we reach a depth of $d$
from the root, at which point the child vertices have no more
children.
A simple induction shows that indeed $n = 2^{d+1}-1$.
We can also describe the edge set by saying that each node $i$ has
edges to its children $2i$ and $2i+1$ whenever the node labels do not
exceed $n$.
We emphasize that we still think of the graph as undirected.
\paragraph{The largest eigenvalue.}
We'll start by above bounding $\lambda_n(\LL_{T_d})$ using a test
vector.
We let $\xx(i) = 0$ for all nodes that have a child node, and $\xx(i)
= -1$ for even-numbered leaf nodes and $\xx(i) = +1$ for odd-numbered
leaf nodes.
Note that there are $(n+1)/2$ leaf nodes, and every leaf node has a
single edge, connecting it to a parent with value $0$.
Thus
\begin{align}
\lambda_n(\LL) = \max_{ \substack{ \vv \neq \veczero} } \frac{
\vv^\top \LL \vv}{ \vv^\top \vv}
\geq
\frac{\xx^\top \LL \xx}
{ \xx^\top \xx}
=
\frac{ (n+1)/2 }{ (n+1)/2 }
= 1
.
\end{align}
Meanwhile, every vertex has degree at most 3, so by
Claim~\ref{clm:lambdamaxfromeig}, $\lambda_n(\LL) \leq 6$.
So we can bound the largest eigenvalue above and below by constant.
\paragraph{$\lambda_2$ and diameter in any graph.}
The following lemma gives a simple lower bound on $\lambda_2$ for any
graph.
\begin{lemma}
\label{lem:lambda2diam}
For any unit weight graph $G$ with diameter $D$,
\[
\lambda_2(\LL_G) \geq \frac{1}{nD}.
\]
\end{lemma}
\begin{proof}
We will again prove a lower bound comparing $G$ to the complete
graph. For each edge $(i,j) \in K_n$, let $G^{i,j}$ denote a
shortest path in $G$ from $i$ to $j$. This path will have length at most $D$. So, we have
\begin{align*}
K_n
= & ~ \sum_{i < j} G_{i,j} \\
\preceq & ~ \sum_{i < j} D G^{i,j} \\
\preceq & ~ \sum_{i < j} D G \\
\preceq & ~ n^2 D G.
\end{align*}
So, we obtain the bound
\begin{align*}
n^2D \lambda_{2} (G) \geq n,
\end{align*}
which implies our desired statement.
\end{proof}
\paragraph{$\lambda_2$ in a tree.}
Since a complete binary tree $T_d$ has diameter $2d \leq 2\log_2(n)$,
by Lemma~\ref{lem:lambda2diam}, $\lambda_2(\LL_{T_d}) \geq
\frac{1}{2n\log_2(n)}$.
Let us give an upper bound on $\lambda_2$ of the tree using a test
vector.
Let $\xx \in \R^v$ have $\xx(1) = 0$ and $\xx(i) = -1$ for $i$ in the left
subtree and $\xx(i) = +1$ in the right subtree.
Then
\begin{align*}
\lambda_2(\LL_{T_d}) = \min_{ \substack{ \vv \neq \veczero \\ \vv^\top
\vecone = 0} } \frac{ \vv^\top \LL \vv}{ \vv^\top \vv}
\leq
\frac{ \xx^\top \LL \xx}{ \xx^\top \xx}
=
\frac{2}{n-1}.
\end{align*}
So, we have shown
$\frac{1}{2n\log_2(n)} \leq \lambda_2(\LL_{T_d})
\leq \frac{2}{n-1}$, and unlike the previous examples, the gap is more
than a constant.
In the exercises for Week 3, I will ask you to improve the lower bound to
$1/(cn)$ for some constant $c$.
% \FloatBarrier
% \bibliographystyle{alpha}
% \bibliography{refs}
%%% Local Variables:
%%% mode: latex
%%% TeX-master: "agao21_script"
%%% TeX-engine: luatex
%%% End:
|
open import Agda.Builtin.Equality
variable
A B : Set
f g : A
postulate
F : Set → Set
map : (A → B) → F A → F B
lemma : (x : F A) → map (λ x → f (g x)) x ≡ map f (map g x) → F A
lemma {A = A} {f = f} {g = g} x _ = x
|
= = = Modernisation = = =
|
## Author: Sergio García Prado
## Title: Exercises with Solutions 1
rm(list = ls())
Q <- matrix(c(-1/3, 1/3, 0,
1/20, -1/4, 1/5,
1/2, 0, -1/2),
3, 3, byrow = TRUE)
(A <- cbind(Q[, 1:(nrow(Q) - 1)], rep(1, nrow(Q))))
# -0.3333333 0.3333333 1
# 0.0500000 -0.2500000 1
# 0.5000000 0.0000000 1
(stationary <- solve(A)[nrow(A), ])
# 0.348837209302326 0.465116279069767 0.186046511627907
R <- Q[1:2, 1:2]
(between.rainy <- (- solve(R) %*% rep(1, nrow(R)))[1])
# 8.75
r <- - Q / diag(Q)
(a <- cbind(r[, 1:(nrow(r) - 1)], rep(1, nrow(r))))
# -1.0 1 1
# 0.2 -1 1
# 1.0 0 1
solve(a)[nrow(a), ]
# 0.357142857142857 0.357142857142857 0.285714285714286
|
# MIT License
#
# Copyright (c) 2018 Martin Biel
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
@info "Running functionality tests..."
@testset "Stochastic Programs: Functionality" begin
tol = 1e-2
for (model, _scenarios, res, name) in problems
sp = instantiate(model, _scenarios, optimizer = GLPK.Optimizer)
@testset "SP Constructs: $name" begin
optimize!(sp, cache = true)
@test termination_status(sp) == MOI.OPTIMAL
@test isapprox(optimal_decision(sp), res.x̄, rtol = tol)
for i in 1:num_scenarios(sp)
@test isapprox(optimal_recourse_decision(sp, i), res.ȳ[i], rtol = tol)
end
@test isapprox(objective_value(sp), res.VRP, rtol = tol)
@test isapprox(EWS(sp), res.EWS, rtol = tol)
@test isapprox(EVPI(sp), res.EVPI, rtol = tol)
@test isapprox(VSS(sp), res.VSS, rtol = tol)
@test isapprox(EV(sp), res.EV, rtol = tol)
@test isapprox(EEV(sp), res.EEV, rtol = tol)
end
@testset "Inequalities: $name" begin
@test EWS(sp) <= VRP(sp)
@test VRP(sp) <= EEV(sp)
@test VSS(sp) >= 0
@test EVPI(sp) >= 0
@test VSS(sp) <= EEV(sp) - EV(sp)
@test EVPI(sp) <= EEV(sp) - EV(sp)
end
@testset "Copying: $name" begin
sp_copy = copy(sp, optimizer = GLPK.Optimizer)
add_scenarios!(sp_copy, scenarios(sp))
@test num_scenarios(sp_copy) == num_scenarios(sp)
generate!(sp_copy)
@test num_subproblems(sp_copy) == num_subproblems(sp)
optimize!(sp)
optimize!(sp_copy)
@test termination_status(sp_copy) == MOI.OPTIMAL
@test isapprox(optimal_decision(sp_copy), optimal_decision(sp), rtol = tol)
for i in 1:num_scenarios(sp)
@test isapprox(optimal_recourse_decision(sp_copy, i), optimal_recourse_decision(sp, i), rtol = tol)
end
@test isapprox(objective_value(sp_copy), objective_value(sp), rtol = tol)
@test isapprox(EWS(sp_copy), EWS(sp), rtol = tol)
@test isapprox(EVPI(sp_copy), EVPI(sp), rtol = tol)
@test isapprox(VSS(sp_copy), VSS(sp), rtol = tol)
@test isapprox(EV(sp_copy), EV(sp), rtol = tol)
@test isapprox(EEV(sp_copy), EEV(sp), rtol = tol)
end
end
@testset "Sampling" begin
sampled_sp = instantiate(simple, sampler, 100, optimizer = GLPK.Optimizer)
generate!(sampled_sp)
@test num_scenarios(sampled_sp) == 100
@test isapprox(stage_probability(sampled_sp), 1.0)
StochasticPrograms.sample!(sampled_sp, sampler, 100)
generate!(sampled_sp)
@test num_scenarios(sampled_sp) == 200
@test isapprox(stage_probability(sampled_sp), 1.0)
end
@testset "Instant" begin
optimize!(simple_sp)
@test termination_status(simple_sp) == MOI.OPTIMAL
@test isapprox(optimal_decision(simple_sp), simple_res.x̄, rtol = tol)
for i in 1:num_scenarios(simple_sp)
@test isapprox(optimal_recourse_decision(simple_sp, i), simple_res.ȳ[i], rtol = tol)
end
@test isapprox(objective_value(simple_sp), simple_res.VRP, rtol = tol)
@test isapprox(EWS(simple_sp), simple_res.EWS, rtol = tol)
@test isapprox(EVPI(simple_sp), simple_res.EVPI, rtol = tol)
@test isapprox(VSS(simple_sp), simple_res.VSS, rtol = tol)
@test isapprox(EV(simple_sp), simple_res.EV, rtol = tol)
@test isapprox(EEV(simple_sp), simple_res.EEV, rtol = tol)
end
@testset "SMPS" begin
simple_smps = read("io/smps/simple.smps", StochasticProgram, optimizer = GLPK.Optimizer)
optimize!(simple_smps)
@test termination_status(simple_smps) == MOI.OPTIMAL
@test isapprox(optimal_decision(simple_smps), simple_res.x̄, rtol = tol)
for i in 1:num_scenarios(simple_smps)
@test isapprox(optimal_recourse_decision(simple_smps, i), simple_res.ȳ[i], rtol = tol)
end
@test isapprox(objective_value(simple_smps), simple_res.VRP, rtol = tol)
@test isapprox(EWS(simple_smps), simple_res.EWS, rtol = tol)
@test isapprox(EVPI(simple_smps), simple_res.EVPI, rtol = tol)
@test isapprox(VSS(simple_smps), simple_res.VSS, rtol = tol)
@test isapprox(EV(simple_smps), simple_res.EV, rtol = tol)
@test isapprox(EEV(simple_smps), simple_res.EEV, rtol = tol)
end
end
|
/- Spatial Reasoning Problem 13 -/
/- It can be found at: SpatialQs.txt -/
/- (13) A is on the right of B
C is on the left of B.
D is in front of C.
E is in front of B.
Therefore D is on the left of E.-/
/- a. Left (x,y) & Front (z, x) + Left (front (z, x), y), where the right-hand
side signifies “z is in front of x, all of which is on the left of y.”
b. Left (x, y) & Front (z, y) + Left (x,front (z, y)), where the right-hand
side signifies “x is on the left of z which is in front of y.”
c. Left (x, y) & Left (u, z) -+ Left (x, left(x, z)).
d. Left (x, y) * Right (y, x).
e. Left (front (x, y), z) + Left (x, z) & Left (y, z) & Front (x, y).
f. Left (x, front (v, z)) -+ Left (x, y) & Left (x, z) & Front (y, z).
g. Left (x, left (y, z)) + Left (x, y) & Left (x, z) 8~ Left 01, z).
h. Left (x, y) + - Right (x, y).
i. Right (x, y) + - Left (x, y). -/
constant U : Type
constants Left Right Front Behind PositionalAttribute TransitivePositionalAttribute
OppositeDirection : U
constant ins : U → U → Prop
constant orientation : U → U → U → Prop
constant holds : U → U → U → Prop
/- axiom from sumo -/
-- (instance Left PositionalAttribute)
axiom left_positional_attribute : ins Left PositionalAttribute
-- (instance Left PositionalAttribute)
axiom right_positional_attribute : ins Right PositionalAttribute
--(=>
-- (and
-- (orientation ?OBJ1 ?OBJ2 ?DIR)
-- (instance ?DIR PositionalAttribute)
-- (oppositeDirection ?DIR ?OPPDIR))
-- (orientation ?OBJ2 ?OBJ1 ?OPPDIR))
axiom oposite_positional : ∀ OBJ1 OBJ2 DIR OPPDIR,
(orientation OBJ1 OBJ2 DIR)
∧ (ins DIR PositionalAttribute)
∧ (holds OppositeDirection DIR OPPDIR)
→ (orientation OBJ2 OBJ1 OPPDIR)
/- axioms to be added to SUMO -/
-- (instance Front PositionalAttribute)
-- (instance Front TransitivePositionalAttribute)
axiom front_positional_attribute : ins Front PositionalAttribute
axiom front_transitive_positional_attribute : ins Behind TransitivePositionalAttribute
-- (instance Behind PositionalAttribute)
-- (instance Behind TransitivePositionalAttribute)
axiom behind_positional_attribute : ins Behind PositionalAttribute
axiom behind_transitive_positional_attribute : ins Behind TransitivePositionalAttribute
-- there are some axioms analogous to be added,
-- like "right_front_right" or "right_behid_right"
axiom left_front_left : ∀ OBJ1 OBJ2 OBJ3,
(orientation OBJ1 OBJ2 Left)
∧ (orientation OBJ3 OBJ2 Front)
→ (orientation OBJ1 OBJ3 Left)
axiom right_front_right : ∀ OBJ1 OBJ2 OBJ3,
(orientation OBJ1 OBJ2 Right)
∧ (orientation OBJ3 OBJ2 Front)
→ (orientation OBJ1 OBJ3 Right)
-- next axiom was introduced in problem 06
-- (instance North TransitivePositionalAttribute)
axiom transitive_positional_attribute : ∀ A B C P, (orientation A B P)
∧ (orientation B C P)
∧ (ins P TransitivePositionalAttribute)
→ (orientation A C P)
-- (OppositeDirection Left Right)
axiom left_opposite_right : holds OppositeDirection Left Right
-- a reasonable property about opposites directions
axiom opposite_directions : ∀ DIR OPDIR,
(holds OppositeDirection DIR OPDIR)
→ (holds OppositeDirection OPDIR DIR)
/- axioms from problem -/
constants A B C D E : U
axiom a1 : orientation A B Right
axiom a2 : orientation C B Left
axiom a3 : orientation D C Front
axiom a4 : orientation E B Front
/- demonstration start -/
theorem D_is_on_the_left_of_E : orientation D E Left :=
begin
have h1, from (left_front_left _ _ _) ⟨a2,a4⟩,
have h2, from (oposite_positional _ _ _ _)
⟨h1,⟨left_positional_attribute, left_opposite_right⟩⟩,
have h3, from (right_front_right _ _ _) ⟨h2,a3⟩,
have h4, from (opposite_directions _ _) left_opposite_right,
exact (oposite_positional _ _ _ _) ⟨h3,⟨right_positional_attribute, h4⟩⟩
end
|
From Categories Require Import Essentials.Notations.
From Categories Require Import Essentials.Types.
From Categories Require Import Essentials.Facts_Tactics.
From Categories Require Import Category.Main.
From Categories Require Import Functor.Functor Functor.Functor_Ops
Functor.Representable.Hom_Func_Prop.
From Categories Require Import Ext_Cons.Prod_Cat.Prod_Cat.
From Categories Require Import NatTrans.NatTrans NatTrans.Operations.
From Categories Require Import Adjunction.Adjunction Adjunction.Duality
Adjunction.Adj_Facts.
From Categories Require Import KanExt.Global KanExt.Local
KanExt.LocalFacts.ConesToHom KanExt.LocalFacts.HomToCones
KanExt.GlobalDuality.
Local Open Scope functor_scope.
(** This module contains conversion from global to local kan extensions. *)
Section Global_to_Local_Right.
Context {C C' : Category}
(p : C --> C')
(D : Category)
(rke : Right_KanExt p D)
(F : C --> D).
(** The cone which (we will prove) is the local kan extension. *)
Definition Cone_for_LoKan : LoKan_Cone p F :=
{|
cone_apex := (rke _o F)%object;
cone_edge :=
@adj_morph_ex _ _ _ _
(right_kan_ext_adj rke) (rke _o F) F (NatTrans_id _)
|}.
Section Cone_Morph_to_Cone_for_LoKan.
Context (Cn : LoKan_Cone p F).
(** We show that any natural transformation from any cone (the apex functor
of a cone) to (apex functor of) the cone we constructed above remains
the same under the following transformation:
morph : Cn ——––> (rke _o F)
morph ∘_h (NatTrans_id p) : (Cn ∘ p) ————> ((rke _o F) ∘ p)
η_{NatTrans_id (rke _o F)} ∘ (morph ∘_h (NatTrans_id p))
: (Cn ∘ p) ————> F
rke @_a (Cn ∘ p) F (η_{NatTrans_id (rke _o F)}
∘ (morph ∘_h (NatTrans_id p))) :
(rke _o (Cn ∘ p)) ————> (rke _o F)
(rke @_a (Cn ∘ p) F (η_{NatTrans_id (rke _o F)}
∘ (morph ∘_h (NatTrans_id p))))
∘ (Trans (adj_unit (right_kan_ext_adj rke)) Cn) :
Cn ————> (rke _o F)
This result is used to show existence and unique ness of cones from Cn
to the cone constructed above.
*)
Lemma Cone_Morph_to_Cone_for_LoKan_adj_unit_rke_id
(morph : (Cn --> ((rke _o)%object F))%nattrans) :
morph =
(
(
(rke @_a)%morphism (Cn ∘ p)%functor F
(
(adj_morph_ex
(right_kan_ext_adj rke) (NatTrans_id ((rke _o) F)%object))
∘ (morph ∘_h (NatTrans_id p))
)
) ∘ (Trans (adj_unit (right_kan_ext_adj rke)) Cn)
)%nattrans.
Proof.
rewrite (@F_compose); cbn.
rewrite NatTrans_compose_assoc.
cbn_rewrite <- (@Trans_com
_ _ _ _
(@adj_unit _ _ _ _ (right_kan_ext_adj rke)) _ _ morph).
rewrite <- NatTrans_compose_assoc.
cbn_rewrite <- (
@adj_morph_com _ _ _ _
(right_kan_ext_adj rke)
_
_
(NatTrans_id ((rke _o)%object F))
).
rewrite NatTrans_id_unit_left.
trivial.
Qed.
(** Given a cone, we construct a cone morph to the cone morph that we
constructed above. *)
Program Definition Cone_Morph_to_Cone_for_LoKan :
LoKan_Cone_Morph Cn Cone_for_LoKan :=
{|
cone_morph :=
(((rke _a (cone_edge Cn))%morphism)
∘ (Trans (adj_unit (right_kan_ext_adj rke)) Cn))%nattrans
|}.
Next Obligation.
Proof.
match goal with
[|- _ = NatTrans_compose (NatTrans_hor_comp _ ?X) _] =>
apply (@adj_morph_unique _ _ _ _ (right_kan_ext_adj rke) _ _ X); trivial
end.
apply Cone_Morph_to_Cone_for_LoKan_adj_unit_rke_id.
Qed.
End Cone_Morph_to_Cone_for_LoKan.
Section Cone_Morph_to_Cone_for_LoKan_Unique.
Context {Cn : LoKan_Cone p F} (M M' : LoKan_Cone_Morph Cn Cone_for_LoKan).
(** Cone morph to the cone constructed is unique. *)
Theorem Cone_Morph_to_Cone_for_LoKan_Unique : (M = M' :> (_ --> _)%nattrans).
Proof.
rewrite (Cone_Morph_to_Cone_for_LoKan_adj_unit_rke_id Cn M).
rewrite (Cone_Morph_to_Cone_for_LoKan_adj_unit_rke_id Cn M').
do 2 apply f_equal.
set (H := cone_morph_com M'); rewrite (cone_morph_com M) in H; exact H.
Qed.
End Cone_Morph_to_Cone_for_LoKan_Unique.
(** The conversion from global kan extensions to local kan extensions *)
Definition Global_to_Local_Right : Local_Right_KanExt p F :=
{|
LRKE := Cone_for_LoKan;
LRKE_morph_ex := Cone_Morph_to_Cone_for_LoKan;
LRKE_morph_unique := @Cone_Morph_to_Cone_for_LoKan_Unique
|}.
End Global_to_Local_Right.
(** Teh conversion from global left kan extensions to local left kan extensions
is jsut the dual what we just proved. *)
Section Global_to_Local_Left.
Context {C C' : Category}
(p : C --> C')
(D : Category)
(lke : Left_KanExt p D)
(F : C --> D).
Definition Global_to_Local_Left : Local_Left_KanExt p F :=
Global_to_Local_Right _ _ (KanExt_Left_to_Right _ _ lke) (F^op).
End Global_to_Local_Left.
|
C
C
*+ mapcat_enqsr
subroutine mapcat_enqsr(imap,source,program,status)
C ---------------------------------------------------
C
C Enquire the source name for catalogue entry IMAP
C
C Input:
C Map catalogue entry
integer imap
C Returned:
C Source name for entry IMAP
character*(*) source
C Program name
character*(*) program
C Status
integer status
*-
include 'mapcat_cat.inc'
if (status.ne.0) return
call mapcat_chk(imap,'NONE',status)
call mapcat_open(status)
call mapcat_read(imap,status)
source = current_source
program = current_program
call mapcat_err(status,'mapcat_enqsr',' ')
end
|
[STATEMENT]
lemma minimal_antichain_idempotent[simp]: "minimal_antichain (minimal_antichain A) = minimal_antichain A"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. minimal_antichain (minimal_antichain A) = minimal_antichain A
[PROOF STEP]
by (auto simp: minimal_antichain_def) |
/-
Copyright (c) 2020 Zhouhang Zhou. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Zhouhang Zhou, Yury Kudryashov
! This file was ported from Lean 3 source module order.filter.indicator_function
! leanprover-community/mathlib commit 8631e2d5ea77f6c13054d9151d82b83069680cb1
! Please do not edit these lines, except to modify the commit id
! if you have ported upstream changes.
-/
import Mathlib.Algebra.IndicatorFunction
import Mathlib.Order.Filter.AtTopBot
/-!
# Indicator function and filters
Properties of indicator functions involving `=ᶠ` and `≤ᶠ`.
## Tags
indicator, characteristic, filter
-/
variable {α β M E : Type _}
open Set Filter
section Zero
variable [Zero M] {s t : Set α} {f g : α → M} {a : α} {l : Filter α}
theorem indicator_eventuallyEq (hf : f =ᶠ[l ⊓ 𝓟 s] g) (hs : s =ᶠ[l] t) :
indicator s f =ᶠ[l] indicator t g :=
(eventually_inf_principal.1 hf).mp <|
hs.mem_iff.mono fun x hst hfg =>
by_cases (fun hxs : x ∈ s => by simp only [*, hst.1 hxs, indicator_of_mem]) fun hxs => by
simp only [indicator_of_not_mem hxs, indicator_of_not_mem (mt hst.2 hxs)]
#align indicator_eventually_eq indicator_eventuallyEq
end Zero
section AddMonoid
variable [AddMonoid M] {s t : Set α} {f g : α → M} {a : α} {l : Filter α}
theorem indicator_union_eventuallyEq (h : ∀ᶠ a in l, a ∉ s ∩ t) :
indicator (s ∪ t) f =ᶠ[l] indicator s f + indicator t f :=
h.mono fun _a ha => indicator_union_of_not_mem_inter ha _
#align indicator_union_eventually_eq indicator_union_eventuallyEq
end AddMonoid
section Order
variable [Zero β] [Preorder β] {s t : Set α} {f g : α → β} {a : α} {l : Filter α}
theorem indicator_eventuallyLE_indicator (h : f ≤ᶠ[l ⊓ 𝓟 s] g) :
indicator s f ≤ᶠ[l] indicator s g :=
(eventually_inf_principal.1 h).mono fun _ => indicator_rel_indicator le_rfl
#align indicator_eventually_le_indicator indicator_eventuallyLE_indicator
end Order
theorem Monotone.tendsto_indicator {ι} [Preorder ι] [Zero β] (s : ι → Set α) (hs : Monotone s)
(f : α → β) (a : α) :
Tendsto (fun i => indicator (s i) f a) atTop (pure <| indicator (⋃ i, s i) f a) := by
by_cases h : ∃ i, a ∈ s i
· rcases h with ⟨i, hi⟩
refine' tendsto_pure.2 ((eventually_ge_atTop i).mono fun n hn => _)
rw [indicator_of_mem (hs hn hi) _, indicator_of_mem ((subset_unionᵢ _ _) hi) _]
· have h' : a ∉ ⋃ i, s i := mt mem_unionᵢ.1 h
rw [not_exists] at h
simpa only [indicator_of_not_mem, *] using tendsto_const_pure
#align monotone.tendsto_indicator Monotone.tendsto_indicator
theorem Antitone.tendsto_indicator {ι} [Preorder ι] [Zero β] (s : ι → Set α) (hs : Antitone s)
(f : α → β) (a : α) :
Tendsto (fun i => indicator (s i) f a) atTop (pure <| indicator (⋂ i, s i) f a) := by
by_cases h : ∃ i, a ∉ s i
· rcases h with ⟨i, hi⟩
refine' tendsto_pure.2 ((eventually_ge_atTop i).mono fun n hn => _)
rw [indicator_of_not_mem _ _, indicator_of_not_mem _ _]
· simp only [mem_interᵢ, not_forall]
exact ⟨i, hi⟩
· intro h
have := hs hn h
contradiction
· push_neg at h
simp only [indicator_of_mem, h, mem_interᵢ.2 h, tendsto_const_pure]
#align antitone.tendsto_indicator Antitone.tendsto_indicator
theorem tendsto_indicator_bunionᵢ_finset {ι} [Zero β] (s : ι → Set α) (f : α → β) (a : α) :
Tendsto (fun n : Finset ι => indicator (⋃ i ∈ n, s i) f a) atTop
(pure <| indicator (unionᵢ s) f a) := by
rw [unionᵢ_eq_unionᵢ_finset s]
refine' Monotone.tendsto_indicator (fun n : Finset ι => ⋃ i ∈ n, s i) _ f a
exact fun t₁ t₂ => bunionᵢ_subset_bunionᵢ_left
#align tendsto_indicator_bUnion_finset tendsto_indicator_bunionᵢ_finset
theorem Filter.EventuallyEq.support [Zero β] {f g : α → β} {l : Filter α} (h : f =ᶠ[l] g) :
Function.support f =ᶠ[l] Function.support g := by
filter_upwards [h]with x hx
rw [eq_iff_iff]
change f x ≠ 0 ↔ g x ≠ 0
rw [hx]
#align filter.eventually_eq.support Filter.EventuallyEq.support
theorem Filter.EventuallyEq.indicator [Zero β] {l : Filter α} {f g : α → β} {s : Set α}
(hfg : f =ᶠ[l] g) : s.indicator f =ᶠ[l] s.indicator g :=
indicator_eventuallyEq (hfg.filter_mono inf_le_left) EventuallyEq.rfl
#align filter.eventually_eq.indicator Filter.EventuallyEq.indicator
theorem Filter.EventuallyEq.indicator_zero [Zero β] {l : Filter α} {f : α → β} {s : Set α}
(hf : f =ᶠ[l] 0) : s.indicator f =ᶠ[l] 0 := by
refine' hf.indicator.trans _
rw [indicator_zero']
#align filter.eventually_eq.indicator_zero Filter.EventuallyEq.indicator_zero
|
theory PreferenceLogicTestsApp2 (*Benzmüller & Fuenmayor, 2020*)
imports PreferenceLogicBasics
begin
(*****Application-specific tests for the value ontology****)
(* EE variant (\<and>)*)
lemma "\<lfloor>A \<^bold>\<preceq>\<^sub>E\<^sub>E (A\<^bold>\<and>B)\<rfloor>" nitpick[satisfy] nitpick oops (*contingent*)
lemma "\<lfloor>(A\<^bold>\<and>B) \<^bold>\<preceq>\<^sub>E\<^sub>E A\<rfloor>" nitpick[satisfy] nitpick oops (*contingent*)
lemma "\<lfloor>(A \<^bold>\<preceq>\<^sub>E\<^sub>E B) \<^bold>\<rightarrow> (A \<^bold>\<preceq>\<^sub>E\<^sub>E (C\<^bold>\<and>B))\<rfloor>" nitpick[satisfy] nitpick oops (*contingent*)
lemma "\<lfloor>(A \<^bold>\<preceq>\<^sub>E\<^sub>E (C\<^bold>\<and>B)) \<^bold>\<rightarrow> (A \<^bold>\<preceq>\<^sub>E\<^sub>E B)\<rfloor>" by blast
lemma "\<lfloor>((C\<^bold>\<and>B) \<^bold>\<preceq>\<^sub>E\<^sub>E A) \<^bold>\<rightarrow> (B \<^bold>\<preceq>\<^sub>E\<^sub>E A)\<rfloor>" by blast
lemma "\<lfloor>(B \<^bold>\<preceq>\<^sub>E\<^sub>E A) \<^bold>\<rightarrow> ((C\<^bold>\<and>B) \<^bold>\<preceq>\<^sub>E\<^sub>E A)\<rfloor>" nitpick[satisfy] nitpick oops (*contingent*)
(* EE variant (\<or>)*)
lemma "\<lfloor>A \<^bold>\<preceq>\<^sub>E\<^sub>E (A\<^bold>\<or>B)\<rfloor>" nitpick[satisfy] nitpick oops (*contingent*)
lemma "\<lfloor>(A\<^bold>\<or>B) \<^bold>\<preceq>\<^sub>E\<^sub>E A\<rfloor>" nitpick[satisfy] nitpick oops (*contingent*)
lemma "\<lfloor>(A \<^bold>\<preceq>\<^sub>E\<^sub>E B) \<^bold>\<rightarrow> (A \<^bold>\<preceq>\<^sub>E\<^sub>E (C\<^bold>\<or>B))\<rfloor>" by blast
lemma "\<lfloor>(A \<^bold>\<preceq>\<^sub>E\<^sub>E (C\<^bold>\<or>B)) \<^bold>\<rightarrow> (A \<^bold>\<preceq>\<^sub>E\<^sub>E B)\<rfloor>" nitpick[satisfy] nitpick oops (*contingent*)
lemma "\<lfloor>((C\<^bold>\<or>B) \<^bold>\<preceq>\<^sub>E\<^sub>E A) \<^bold>\<rightarrow> (B \<^bold>\<preceq>\<^sub>E\<^sub>E A)\<rfloor>" nitpick[satisfy] nitpick oops (*contingent*)
lemma "\<lfloor>(B \<^bold>\<preceq>\<^sub>E\<^sub>E A) \<^bold>\<rightarrow> ((C\<^bold>\<or>B) \<^bold>\<preceq>\<^sub>E\<^sub>E A)\<rfloor>" by blast
(* total EE variant (\<and>)*)
lemma "is_total SBR \<Longrightarrow> \<lfloor>A \<preceq>\<^sub>E\<^sub>E (A\<^bold>\<and>B)\<rfloor>" by blast
lemma "is_total SBR \<Longrightarrow> \<lfloor>(A\<^bold>\<and>B) \<preceq>\<^sub>E\<^sub>E A\<rfloor>" by blast
lemma "is_total SBR \<Longrightarrow> \<lfloor>(A \<preceq>\<^sub>E\<^sub>E B) \<^bold>\<rightarrow> (A \<preceq>\<^sub>E\<^sub>E (C\<^bold>\<and>B))\<rfloor>" by blast
lemma "is_total SBR \<Longrightarrow> \<lfloor>(A \<preceq>\<^sub>E\<^sub>E (C\<^bold>\<and>B)) \<^bold>\<rightarrow> (A \<preceq>\<^sub>E\<^sub>E B)\<rfloor>" by blast
lemma "is_total SBR \<Longrightarrow> \<lfloor>((C\<^bold>\<and>B) \<preceq>\<^sub>E\<^sub>E A) \<^bold>\<rightarrow> (B \<preceq>\<^sub>E\<^sub>E A)\<rfloor>" by blast
lemma "is_total SBR \<Longrightarrow> \<lfloor>(B \<preceq>\<^sub>E\<^sub>E A) \<^bold>\<rightarrow> ((C\<^bold>\<and>B) \<preceq>\<^sub>E\<^sub>E A)\<rfloor>" by blast
(* total EE variant (\<or>)*)
lemma "is_total SBR \<Longrightarrow> \<lfloor>A \<preceq>\<^sub>E\<^sub>E (A\<^bold>\<or>B)\<rfloor>" by blast
lemma "is_total SBR \<Longrightarrow> \<lfloor>(A\<^bold>\<or>B) \<preceq>\<^sub>E\<^sub>E A\<rfloor>" by blast
lemma "is_total SBR \<Longrightarrow> \<lfloor>(A \<preceq>\<^sub>E\<^sub>E B) \<^bold>\<rightarrow> (A \<preceq>\<^sub>E\<^sub>E (C\<^bold>\<or>B))\<rfloor>" by blast
lemma "is_total SBR \<Longrightarrow> \<lfloor>(A \<preceq>\<^sub>E\<^sub>E (C\<^bold>\<or>B)) \<^bold>\<rightarrow> (A \<preceq>\<^sub>E\<^sub>E B)\<rfloor>" by blast
lemma "is_total SBR \<Longrightarrow> \<lfloor>((C\<^bold>\<or>B) \<preceq>\<^sub>E\<^sub>E A) \<^bold>\<rightarrow> (B \<preceq>\<^sub>E\<^sub>E A)\<rfloor>" by blast
lemma "is_total SBR \<Longrightarrow> \<lfloor>(B \<preceq>\<^sub>E\<^sub>E A) \<^bold>\<rightarrow> ((C\<^bold>\<or>B) \<preceq>\<^sub>E\<^sub>E A)\<rfloor>" by blast
(* AE variant (\<and>)*)
lemma "\<lfloor>A \<^bold>\<preceq>\<^sub>A\<^sub>E (A\<^bold>\<and>B)\<rfloor>" nitpick[satisfy] nitpick oops (*contingent*)
lemma "\<lfloor>(A\<^bold>\<and>B) \<^bold>\<preceq>\<^sub>A\<^sub>E A\<rfloor>" using rBR by blast (*change wrt. strict*)
lemma "\<lfloor>(A \<^bold>\<preceq>\<^sub>A\<^sub>E B) \<^bold>\<rightarrow> (A \<^bold>\<preceq>\<^sub>A\<^sub>E (C\<^bold>\<and>B))\<rfloor>" nitpick[satisfy] nitpick oops (*contingent*)
lemma "\<lfloor>(A \<^bold>\<preceq>\<^sub>A\<^sub>E (C\<^bold>\<and>B)) \<^bold>\<rightarrow> (A \<^bold>\<preceq>\<^sub>A\<^sub>E B)\<rfloor>" by blast
lemma "\<lfloor>((C\<^bold>\<and>B) \<^bold>\<preceq>\<^sub>A\<^sub>E A) \<^bold>\<rightarrow> (B \<^bold>\<preceq>\<^sub>A\<^sub>E A)\<rfloor>" nitpick[satisfy] nitpick oops (*contingent*)
lemma "\<lfloor>(B \<^bold>\<preceq>\<^sub>A\<^sub>E A) \<^bold>\<rightarrow> ((C\<^bold>\<and>B) \<^bold>\<preceq>\<^sub>A\<^sub>E A)\<rfloor>" by blast
(* AE variant (\<or>)*)
lemma "\<lfloor>A \<^bold>\<preceq>\<^sub>A\<^sub>E (A\<^bold>\<or>B)\<rfloor>" using rBR by blast (*change wrt. strict*)
lemma "\<lfloor>(A\<^bold>\<or>B) \<^bold>\<preceq>\<^sub>A\<^sub>E A\<rfloor>" nitpick[satisfy] nitpick oops (*contingent*)
lemma "\<lfloor>(A \<^bold>\<preceq>\<^sub>A\<^sub>E B) \<^bold>\<rightarrow> (A \<^bold>\<preceq>\<^sub>A\<^sub>E (C\<^bold>\<or>B))\<rfloor>" by blast
lemma "\<lfloor>(A \<^bold>\<preceq>\<^sub>A\<^sub>E (C\<^bold>\<or>B)) \<^bold>\<rightarrow> (A \<^bold>\<preceq>\<^sub>A\<^sub>E B)\<rfloor>" nitpick[satisfy] nitpick oops (*contingent*)
lemma "\<lfloor>((C\<^bold>\<or>B) \<^bold>\<preceq>\<^sub>A\<^sub>E A) \<^bold>\<rightarrow> (B \<^bold>\<preceq>\<^sub>A\<^sub>E A)\<rfloor>" by blast
lemma "\<lfloor>(B \<^bold>\<preceq>\<^sub>A\<^sub>E A) \<^bold>\<rightarrow> ((C\<^bold>\<or>B) \<^bold>\<preceq>\<^sub>A\<^sub>E A)\<rfloor>" nitpick[satisfy] nitpick oops (*contingent*)
(* total AE variant (\<and>)*)
lemma "is_total SBR \<Longrightarrow> \<lfloor>A \<preceq>\<^sub>A\<^sub>E (A\<^bold>\<and>B)\<rfloor>" by blast
lemma "is_total SBR \<Longrightarrow> \<lfloor>(A\<^bold>\<and>B) \<preceq>\<^sub>A\<^sub>E A\<rfloor>" by blast
lemma "is_total SBR \<Longrightarrow> \<lfloor>(A \<preceq>\<^sub>A\<^sub>E B) \<^bold>\<rightarrow> (A \<preceq>\<^sub>A\<^sub>E (C\<^bold>\<and>B))\<rfloor>" by blast
lemma "is_total SBR \<Longrightarrow> \<lfloor>(A \<preceq>\<^sub>A\<^sub>E (C\<^bold>\<and>B)) \<^bold>\<rightarrow> (A \<preceq>\<^sub>A\<^sub>E B)\<rfloor>" by blast
lemma "is_total SBR \<Longrightarrow> \<lfloor>((C\<^bold>\<and>B) \<preceq>\<^sub>A\<^sub>E A) \<^bold>\<rightarrow> (B \<preceq>\<^sub>A\<^sub>E A)\<rfloor>" by blast
lemma "is_total SBR \<Longrightarrow> \<lfloor>(B \<preceq>\<^sub>A\<^sub>E A) \<^bold>\<rightarrow> ((C\<^bold>\<and>B) \<preceq>\<^sub>A\<^sub>E A)\<rfloor>" by blast
(* total AE variant (\<or>)*)
lemma "is_total SBR \<Longrightarrow> \<lfloor>A \<preceq>\<^sub>A\<^sub>E (A\<^bold>\<or>B)\<rfloor>" by blast
lemma "is_total SBR \<Longrightarrow> \<lfloor>(A\<^bold>\<or>B) \<preceq>\<^sub>A\<^sub>E A\<rfloor>" by blast
lemma "is_total SBR \<Longrightarrow> \<lfloor>(A \<preceq>\<^sub>A\<^sub>E B) \<^bold>\<rightarrow> (A \<preceq>\<^sub>A\<^sub>E (C\<^bold>\<or>B))\<rfloor>" by blast
lemma "is_total SBR \<Longrightarrow> \<lfloor>(A \<preceq>\<^sub>A\<^sub>E (C\<^bold>\<or>B)) \<^bold>\<rightarrow> (A \<preceq>\<^sub>A\<^sub>E B)\<rfloor>" by blast
lemma "is_total SBR \<Longrightarrow> \<lfloor>((C\<^bold>\<or>B) \<preceq>\<^sub>A\<^sub>E A) \<^bold>\<rightarrow> (B \<preceq>\<^sub>A\<^sub>E A)\<rfloor>" by blast
lemma "is_total SBR \<Longrightarrow> \<lfloor>(B \<preceq>\<^sub>A\<^sub>E A) \<^bold>\<rightarrow> ((C\<^bold>\<or>B) \<preceq>\<^sub>A\<^sub>E A)\<rfloor>" by blast
(* AA variant (\<and>)*)
lemma "\<lfloor>A \<^bold>\<preceq>\<^sub>A\<^sub>A (A\<^bold>\<and>B)\<rfloor>" nitpick[satisfy] nitpick oops (*contingent*)
lemma "\<lfloor>(A\<^bold>\<and>B) \<^bold>\<preceq>\<^sub>A\<^sub>A A\<rfloor>" nitpick[satisfy] nitpick oops (*contingent*)
lemma "\<lfloor>(A \<^bold>\<preceq>\<^sub>A\<^sub>A B) \<^bold>\<rightarrow> (A \<^bold>\<preceq>\<^sub>A\<^sub>A (C\<^bold>\<and>B))\<rfloor>" by blast
lemma "\<lfloor>(A \<^bold>\<preceq>\<^sub>A\<^sub>A (C\<^bold>\<and>B)) \<^bold>\<rightarrow> (A \<^bold>\<preceq>\<^sub>A\<^sub>A B)\<rfloor>" nitpick[satisfy] nitpick oops (*contingent*)
lemma "\<lfloor>((C\<^bold>\<and>B) \<^bold>\<preceq>\<^sub>A\<^sub>A A) \<^bold>\<rightarrow> (B \<^bold>\<preceq>\<^sub>A\<^sub>A A)\<rfloor>" nitpick[satisfy] nitpick oops (*contingent*)
lemma "\<lfloor>(B \<^bold>\<preceq>\<^sub>A\<^sub>A A) \<^bold>\<rightarrow> ((C\<^bold>\<and>B) \<^bold>\<preceq>\<^sub>A\<^sub>A A)\<rfloor>" by blast
(*--------------------------------------------------*)
lemma "\<lfloor>A \<preceq>\<^sub>A\<^sub>A (A\<^bold>\<and>B)\<rfloor>" nitpick[satisfy] nitpick oops (*contingent*)
lemma "\<lfloor>(A\<^bold>\<and>B) \<preceq>\<^sub>A\<^sub>A A\<rfloor>" nitpick[satisfy] nitpick oops (*contingent*)
lemma "\<lfloor>(A \<preceq>\<^sub>A\<^sub>A B) \<^bold>\<rightarrow> (A \<preceq>\<^sub>A\<^sub>A (C\<^bold>\<and>B))\<rfloor>" by blast
lemma "\<lfloor>(A \<preceq>\<^sub>A\<^sub>A (C\<^bold>\<and>B)) \<^bold>\<rightarrow> (A \<preceq>\<^sub>A\<^sub>A B)\<rfloor>" nitpick[satisfy] nitpick oops (*contingent*)
lemma "\<lfloor>((C\<^bold>\<and>B) \<preceq>\<^sub>A\<^sub>A A) \<^bold>\<rightarrow> (B \<preceq>\<^sub>A\<^sub>A A)\<rfloor>" nitpick[satisfy] nitpick oops (*contingent*)
lemma "\<lfloor>(B \<preceq>\<^sub>A\<^sub>A A) \<^bold>\<rightarrow> ((C\<^bold>\<and>B) \<preceq>\<^sub>A\<^sub>A A)\<rfloor>" by blast
(* AA variant (\<or>)*)
lemma "\<lfloor>A \<^bold>\<preceq>\<^sub>A\<^sub>A (A\<^bold>\<or>B)\<rfloor>" nitpick[satisfy] nitpick oops (*contingent*)
lemma "\<lfloor>(A\<^bold>\<or>B) \<^bold>\<preceq>\<^sub>A\<^sub>A A\<rfloor>" nitpick[satisfy] nitpick oops (*contingent*)
lemma "\<lfloor>(A \<^bold>\<preceq>\<^sub>A\<^sub>A B) \<^bold>\<rightarrow> (A \<^bold>\<preceq>\<^sub>A\<^sub>A (C\<^bold>\<or>B))\<rfloor>" nitpick[satisfy] nitpick oops (*contingent*)
lemma "\<lfloor>(A \<^bold>\<preceq>\<^sub>A\<^sub>A (C\<^bold>\<or>B)) \<^bold>\<rightarrow> (A \<^bold>\<preceq>\<^sub>A\<^sub>A B)\<rfloor>" by blast
lemma "\<lfloor>((C\<^bold>\<or>B) \<^bold>\<preceq>\<^sub>A\<^sub>A A) \<^bold>\<rightarrow> (B \<^bold>\<preceq>\<^sub>A\<^sub>A A)\<rfloor>" by blast
lemma "\<lfloor>(B \<^bold>\<preceq>\<^sub>A\<^sub>A A) \<^bold>\<rightarrow> ((C\<^bold>\<or>B) \<^bold>\<preceq>\<^sub>A\<^sub>A A)\<rfloor>" nitpick[satisfy] nitpick oops (*contingent*)
(*---------------------------------------------------*)
lemma "\<lfloor>A \<preceq>\<^sub>A\<^sub>A (A\<^bold>\<or>B)\<rfloor>" nitpick[satisfy] nitpick oops (*contingent*)
lemma "\<lfloor>(A\<^bold>\<or>B) \<preceq>\<^sub>A\<^sub>A A\<rfloor>" nitpick[satisfy] nitpick oops (*contingent*)
lemma "\<lfloor>(A \<preceq>\<^sub>A\<^sub>A B) \<^bold>\<rightarrow> (A \<preceq>\<^sub>A\<^sub>A (C\<^bold>\<or>B))\<rfloor>" nitpick[satisfy] nitpick oops (*contingent*)
lemma "\<lfloor>(A \<preceq>\<^sub>A\<^sub>A (C\<^bold>\<or>B)) \<^bold>\<rightarrow> (A \<preceq>\<^sub>A\<^sub>A B)\<rfloor>" by blast
lemma "\<lfloor>((C\<^bold>\<or>B) \<preceq>\<^sub>A\<^sub>A A) \<^bold>\<rightarrow> (B \<preceq>\<^sub>A\<^sub>A A)\<rfloor>" by blast
lemma "\<lfloor>(B \<preceq>\<^sub>A\<^sub>A A) \<^bold>\<rightarrow> ((C\<^bold>\<or>B) \<preceq>\<^sub>A\<^sub>A A)\<rfloor>" nitpick[satisfy] nitpick oops (*contingent*)
(* total AA variant (\<and>)*)
lemma "is_total SBR \<Longrightarrow> \<lfloor>A \<preceq>\<^sub>A\<^sub>A (A\<^bold>\<and>B)\<rfloor>" by blast
lemma "is_total SBR \<Longrightarrow> \<lfloor>(A\<^bold>\<and>B) \<preceq>\<^sub>A\<^sub>A A\<rfloor>" by blast
lemma "is_total SBR \<Longrightarrow> \<lfloor>(A \<preceq>\<^sub>A\<^sub>A B) \<^bold>\<rightarrow> (A \<preceq>\<^sub>A\<^sub>A (C\<^bold>\<and>B))\<rfloor>" by blast
lemma "is_total SBR \<Longrightarrow> \<lfloor>(A \<preceq>\<^sub>A\<^sub>A (C\<^bold>\<and>B)) \<^bold>\<rightarrow> (A \<preceq>\<^sub>A\<^sub>A B)\<rfloor>" by blast
lemma "is_total SBR \<Longrightarrow> \<lfloor>((C\<^bold>\<and>B) \<preceq>\<^sub>A\<^sub>A A) \<^bold>\<rightarrow> (B \<preceq>\<^sub>A\<^sub>A A)\<rfloor>" by blast
lemma "is_total SBR \<Longrightarrow> \<lfloor>(B \<preceq>\<^sub>A\<^sub>A A) \<^bold>\<rightarrow> ((C\<^bold>\<and>B) \<preceq>\<^sub>A\<^sub>A A)\<rfloor>" by blast
(* total AA variant (\<or>)*)
lemma "is_total SBR \<Longrightarrow> \<lfloor>A \<preceq>\<^sub>A\<^sub>A (A\<^bold>\<or>B)\<rfloor>" by blast
lemma "is_total SBR \<Longrightarrow> \<lfloor>(A\<^bold>\<or>B) \<preceq>\<^sub>A\<^sub>A A\<rfloor>" by blast
lemma "is_total SBR \<Longrightarrow> \<lfloor>(A \<preceq>\<^sub>A\<^sub>A B) \<^bold>\<rightarrow> (A \<preceq>\<^sub>A\<^sub>A (C\<^bold>\<or>B))\<rfloor>" by blast
lemma "is_total SBR \<Longrightarrow> \<lfloor>(A \<preceq>\<^sub>A\<^sub>A (C\<^bold>\<or>B)) \<^bold>\<rightarrow> (A \<preceq>\<^sub>A\<^sub>A B)\<rfloor>" by blast
lemma "is_total SBR \<Longrightarrow> \<lfloor>((C\<^bold>\<or>B) \<preceq>\<^sub>A\<^sub>A A) \<^bold>\<rightarrow> (B \<preceq>\<^sub>A\<^sub>A A)\<rfloor>" by blast
lemma "is_total SBR \<Longrightarrow> \<lfloor>(B \<preceq>\<^sub>A\<^sub>A A) \<^bold>\<rightarrow> ((C\<^bold>\<or>B) \<preceq>\<^sub>A\<^sub>A A)\<rfloor>" by blast
(* EA variant (\<and>)*)
lemma "\<lfloor>(A\<^bold>\<and>B) \<^bold>\<succeq>\<^sub>E\<^sub>A A\<rfloor>" nitpick[satisfy] nitpick oops (*contingent - change wrt. strict*)
lemma "\<lfloor>A \<^bold>\<succeq>\<^sub>E\<^sub>A (A\<^bold>\<and>B)\<rfloor>" nitpick[satisfy] nitpick oops (*contingent*)
lemma "\<lfloor>(B \<^bold>\<succeq>\<^sub>E\<^sub>A A) \<^bold>\<rightarrow> ((C\<^bold>\<and>B) \<^bold>\<succeq>\<^sub>E\<^sub>A A)\<rfloor>" nitpick[satisfy] nitpick oops (*contingent*)
lemma "\<lfloor>((C\<^bold>\<and>B) \<^bold>\<succeq>\<^sub>E\<^sub>A A) \<^bold>\<rightarrow> (B \<^bold>\<succeq>\<^sub>E\<^sub>A A)\<rfloor>" by blast
lemma "\<lfloor>(A \<^bold>\<succeq>\<^sub>E\<^sub>A (C\<^bold>\<and>B)) \<^bold>\<rightarrow> (A \<^bold>\<succeq>\<^sub>E\<^sub>A B)\<rfloor>" nitpick[satisfy] nitpick oops (*contingent*)
lemma "\<lfloor>(A \<^bold>\<succeq>\<^sub>E\<^sub>A B) \<^bold>\<rightarrow> (A \<^bold>\<succeq>\<^sub>E\<^sub>A (C\<^bold>\<and>B))\<rfloor>" by blast
(*--------------------------------------------*)
lemma "\<lfloor>(A\<^bold>\<and>B) \<succeq>\<^sub>E\<^sub>A A\<rfloor>" nitpick[satisfy] nitpick oops (*contingent*)
lemma "\<lfloor>A \<succeq>\<^sub>E\<^sub>A (A\<^bold>\<and>B)\<rfloor>" nitpick[satisfy] nitpick oops (*contingent*)
lemma "\<lfloor>(B \<succeq>\<^sub>E\<^sub>A A) \<^bold>\<rightarrow> ((C\<^bold>\<and>B) \<succeq>\<^sub>E\<^sub>A A)\<rfloor>" by blast
lemma "\<lfloor>((C\<^bold>\<and>B) \<succeq>\<^sub>E\<^sub>A A) \<^bold>\<rightarrow> (B \<succeq>\<^sub>E\<^sub>A A)\<rfloor>" nitpick[satisfy] nitpick oops (*contingent*)
lemma "\<lfloor>(A \<succeq>\<^sub>E\<^sub>A (C\<^bold>\<and>B)) \<^bold>\<rightarrow> (A \<succeq>\<^sub>E\<^sub>A B)\<rfloor>" nitpick[satisfy] nitpick oops (*contingent*)
lemma "\<lfloor>(A \<succeq>\<^sub>E\<^sub>A B) \<^bold>\<rightarrow> (A \<succeq>\<^sub>E\<^sub>A (C\<^bold>\<and>B))\<rfloor>" by blast
(* EA variant (\<or>)*)
lemma "\<lfloor>(A\<^bold>\<or>B) \<^bold>\<succeq>\<^sub>E\<^sub>A A\<rfloor>" nitpick[satisfy] nitpick oops (*contingent*)
lemma "\<lfloor>A \<^bold>\<succeq>\<^sub>E\<^sub>A (A\<^bold>\<or>B)\<rfloor>" nitpick[satisfy] nitpick oops (*contingent - change wrt. strict*)
lemma "\<lfloor>(B \<^bold>\<succeq>\<^sub>E\<^sub>A A) \<^bold>\<rightarrow> ((C\<^bold>\<or>B) \<^bold>\<succeq>\<^sub>E\<^sub>A A)\<rfloor>" by blast
lemma "\<lfloor>((C\<^bold>\<or>B) \<^bold>\<succeq>\<^sub>E\<^sub>A A) \<^bold>\<rightarrow> (B \<^bold>\<succeq>\<^sub>E\<^sub>A A)\<rfloor>" nitpick[satisfy] nitpick oops (*contingent*)
lemma "\<lfloor>(A \<^bold>\<succeq>\<^sub>E\<^sub>A (C\<^bold>\<or>B)) \<^bold>\<rightarrow> (A \<^bold>\<succeq>\<^sub>E\<^sub>A B)\<rfloor>" by blast
lemma "\<lfloor>(A \<^bold>\<succeq>\<^sub>E\<^sub>A B) \<^bold>\<rightarrow> (A \<^bold>\<succeq>\<^sub>E\<^sub>A (C\<^bold>\<or>B))\<rfloor>" nitpick[satisfy] nitpick oops (*contingent*)
(*------------------------------------------------*)
lemma "\<lfloor>(A\<^bold>\<or>B) \<succeq>\<^sub>E\<^sub>A A\<rfloor>" nitpick[satisfy] nitpick oops (*contingent*)
lemma "\<lfloor>A \<succeq>\<^sub>E\<^sub>A (A\<^bold>\<or>B)\<rfloor>" nitpick[satisfy] nitpick oops (*contingent*)
lemma "\<lfloor>(B \<succeq>\<^sub>E\<^sub>A A) \<^bold>\<rightarrow> ((C\<^bold>\<or>B) \<succeq>\<^sub>E\<^sub>A A)\<rfloor>" nitpick[satisfy] nitpick oops (*contingent*)
lemma "\<lfloor>((C\<^bold>\<or>B) \<succeq>\<^sub>E\<^sub>A A) \<^bold>\<rightarrow> (B \<succeq>\<^sub>E\<^sub>A A)\<rfloor>" by blast
lemma "\<lfloor>(A \<succeq>\<^sub>E\<^sub>A (C\<^bold>\<or>B)) \<^bold>\<rightarrow> (A \<succeq>\<^sub>E\<^sub>A B)\<rfloor>" by blast
lemma "\<lfloor>(A \<succeq>\<^sub>E\<^sub>A B) \<^bold>\<rightarrow> (A \<succeq>\<^sub>E\<^sub>A (C\<^bold>\<or>B))\<rfloor>" nitpick[satisfy] nitpick oops (*contingent*)
(* total EA variant (\<and>)*)
lemma "is_total SBR \<Longrightarrow> \<lfloor>(A\<^bold>\<and>B) \<succeq>\<^sub>E\<^sub>A A\<rfloor>" by blast
lemma "is_total SBR \<Longrightarrow> \<lfloor>A \<succeq>\<^sub>E\<^sub>A (A\<^bold>\<and>B)\<rfloor>" by blast
lemma "is_total SBR \<Longrightarrow> \<lfloor>(B \<succeq>\<^sub>E\<^sub>A A) \<^bold>\<rightarrow> ((C\<^bold>\<and>B) \<succeq>\<^sub>E\<^sub>A A)\<rfloor>" by blast
lemma "is_total SBR \<Longrightarrow> \<lfloor>((C\<^bold>\<and>B) \<succeq>\<^sub>E\<^sub>A A) \<^bold>\<rightarrow> (B \<succeq>\<^sub>E\<^sub>A A)\<rfloor>" by blast
lemma "is_total SBR \<Longrightarrow> \<lfloor>(A \<succeq>\<^sub>E\<^sub>A (C\<^bold>\<and>B)) \<^bold>\<rightarrow> (A \<succeq>\<^sub>E\<^sub>A B)\<rfloor>" by blast
lemma "is_total SBR \<Longrightarrow> \<lfloor>(A \<succeq>\<^sub>E\<^sub>A B) \<^bold>\<rightarrow> (A \<succeq>\<^sub>E\<^sub>A (C\<^bold>\<and>B))\<rfloor>" by blast
(* total EA variant (\<or>)*)
lemma "is_total SBR \<Longrightarrow> \<lfloor>(A\<^bold>\<or>B) \<succeq>\<^sub>E\<^sub>A A\<rfloor>" by blast
lemma "is_total SBR \<Longrightarrow> \<lfloor>A \<succeq>\<^sub>E\<^sub>A (A\<^bold>\<or>B)\<rfloor>" by blast
lemma "is_total SBR \<Longrightarrow> \<lfloor>(B \<succeq>\<^sub>E\<^sub>A A) \<^bold>\<rightarrow> ((C\<^bold>\<or>B) \<succeq>\<^sub>E\<^sub>A A)\<rfloor>" by blast
lemma "is_total SBR \<Longrightarrow> \<lfloor>((C\<^bold>\<or>B) \<succeq>\<^sub>E\<^sub>A A) \<^bold>\<rightarrow> (B \<succeq>\<^sub>E\<^sub>A A)\<rfloor>" by blast
lemma "is_total SBR \<Longrightarrow> \<lfloor>(A \<succeq>\<^sub>E\<^sub>A (C\<^bold>\<or>B)) \<^bold>\<rightarrow> (A \<succeq>\<^sub>E\<^sub>A B)\<rfloor>" by blast
lemma "is_total SBR \<Longrightarrow> \<lfloor>(A \<succeq>\<^sub>E\<^sub>A B) \<^bold>\<rightarrow> (A \<succeq>\<^sub>E\<^sub>A (C\<^bold>\<or>B))\<rfloor>" by blast
end
|
-- Andreas, 2017-01-24, issue #2430, reported by nad
-- Regression introduced by updating module parameter substitution
-- when going underAbstraction (80794767db1aceaa78a72e06ad901cfa53f8346d).
record Σ (A : Set) (B : A → Set) : Set where
field
proj₁ : A
proj₂ : B proj₁
open Σ public
Σ-map : {A : Set} {B : Set} {P : A → Set} {Q : B → Set} →
(f : A → B) → (∀ {x} → P x → Q (f x)) →
Σ A P → Σ B Q
Σ-map f g p = record { proj₁ = f (proj₁ p); proj₂ = g (proj₂ p) }
postulate
_≡_ : {A : Set} → A → A → Set
refl : {A : Set} (x : A) → x ≡ x
cong : {A : Set} {B : Set} (f : A → B) {x y : A} →
x ≡ y → f x ≡ f y
cong-refl : {A : Set} {B : Set} (f : A → B) {x : A} →
refl (f x) ≡ cong f (refl x)
subst : {A : Set} (P : A → Set) {x y : A} →
x ≡ y → P x → P y
subst-refl : ∀ {A : Set} (P : A → Set) {x} (p : P x) →
subst P (refl x) p ≡ p
trans : {A : Set} {x y z : A} → x ≡ y → y ≡ z → x ≡ z
module _ (_ : Set) where
postulate
Σ-≡,≡→≡ : {A : Set} {B : A → Set} {p₁ p₂ : Σ A B} →
(p : proj₁ p₁ ≡ proj₁ p₂) →
subst B p (proj₂ p₁) ≡ proj₂ p₂ →
p₁ ≡ p₂
Σ-≡,≡→≡-refl-subst-refl :
∀ {A : Set} {B : A → Set} {p} →
Σ-≡,≡→≡ (refl (proj₁ p)) (subst-refl B (proj₂ p)) ≡ refl p
rejected :
{A₁ A₂ : Set} {B₁ : A₁ → Set} {B₂ : A₂ → Set}
(f : A₁ → A₂) (g : ∀ x → B₁ x → B₂ (f x)) (x : Σ A₁ B₁) →
Σ-≡,≡→≡ (refl _) (subst-refl B₂ _) ≡ cong (Σ-map f (g _)) (refl x)
rejected {B₁ = B₁} {B₂} f g x =
trans {x = Σ-≡,≡→≡ (refl _) (subst-refl B₂ _)}
{z = cong (Σ-map f (g _)) (refl _)}
Σ-≡,≡→≡-refl-subst-refl
(cong-refl _)
|
{-
Part 3: Transport and composition
- Cubical transport
- Subst as a special case of cubical transport
- Path induction from subst?
- Homogeneous composition (hcomp)
- Binary composition of paths as special case of hcomp
-}
{-# OPTIONS --cubical #-}
module Part2 where
open import Part1 public
-- Transport is more complex as ≡ isn't inductively defined (so we
-- can't define it by pattern-matching on p)
transport : {A B : Type ℓ} → A ≡ B → A → B
transport p a = transp (λ i → p i) i0 a
-- This lets us define subst (which is called "transport" in the HoTT book)
subst : {A : Type ℓ} (P : A → Type ℓ') {x y : A} (p : x ≡ y) → P x → P y
subst P p pa = transport (λ i → P (p i)) pa
-- The transp operation reduces differently for different types
-- formers. For paths it reduces to another primitive operation called
-- hcomp.
-- We can also define the J eliminator (aka path induction)
-- TODO: rewrite using subst?
-- TODO: talk about ∧
J : {A : Type ℓ} {B : A → Type ℓ'} {x : A}
(P : (z : A) → x ≡ z → Type ℓ'')
(d : P x refl) {y : A} (p : x ≡ y) → P y p
J P d p = transport (λ i → P (p i) (λ j → p (i ∧ j))) d
-- So J is provable, but it doesn't satisfy computation rule
-- definitionally. This is almost never a problem in practice as the
-- cubical primitives satisfy many new definitional equalities.
|
Formal statement is: lemma eucl_rel_poly_unique: assumes 1: "eucl_rel_poly x y (q1, r1)" assumes 2: "eucl_rel_poly x y (q2, r2)" shows "q1 = q2 \<and> r1 = r2" Informal statement is: If $(q_1, r_1)$ and $(q_2, r_2)$ are both pairs of polynomials that satisfy the Euclidean relation for polynomials, then $q_1 = q_2$ and $r_1 = r_2$. |
/-
Copyright (c) 2022 Frédéric Dupuis. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Frédéric Dupuis
-/
import topology.algebra.module.character_space
import analysis.normed_space.weak_dual
import analysis.normed_space.spectrum
/-!
# Normed algebras
This file contains basic facts about normed algebras.
## Main results
* We show that the character space of a normed algebra is compact using the Banach-Alaoglu theorem.
## TODO
* Show compactness for topological vector spaces; this requires the TVS version of Banach-Alaoglu.
## Tags
normed algebra, character space, continuous functional calculus
-/
variables {𝕜 : Type*} {A : Type*}
namespace weak_dual
namespace character_space
variables [nontrivially_normed_field 𝕜] [normed_ring A]
[normed_algebra 𝕜 A] [complete_space A]
lemma norm_le_norm_one (φ : character_space 𝕜 A) :
‖to_normed_dual (φ : weak_dual 𝕜 A)‖ ≤ ‖(1 : A)‖ :=
continuous_linear_map.op_norm_le_bound _ (norm_nonneg (1 : A)) $
λ a, mul_comm (‖a‖) (‖(1 : A)‖) ▸ spectrum.norm_le_norm_mul_of_mem (apply_mem_spectrum φ a)
instance [proper_space 𝕜] : compact_space (character_space 𝕜 A) :=
begin
rw [←is_compact_iff_compact_space],
have h : character_space 𝕜 A ⊆ to_normed_dual ⁻¹' metric.closed_ball 0 (‖(1 : A)‖),
{ intros φ hφ,
rw [set.mem_preimage, mem_closed_ball_zero_iff],
exact (norm_le_norm_one ⟨φ, ⟨hφ.1, hφ.2⟩⟩ : _), },
exact is_compact_of_is_closed_subset (is_compact_closed_ball 𝕜 0 _) character_space.is_closed h,
end
end character_space
end weak_dual
|
function [nb_sources]=source_number_detection_IC(received_signal,criterion,display)
%HELP source_number_detection_IC
%
%Detection of the number of sources with information criteria. The method
%assumes that the noise is spatially white.
%
%Input: received_signal
% criterion ('Akaike' or 'MDL')
% (optional) Display criterion if non empty
%Sorties: nb_sources
%
%Example: X=randn(3,500); %3 sources
% H=randn(10,3)+i*randn(10,3); %10 receivers
% B=2*(randn(10,500)+i*randn(10,500));
% Y=H*X+B; %MIMO Mixing Model
% source_number_detection_IC(Y,'MDL',1)
%
%Reference: [WAX85] Wax, M. and Kailath, T., "Detection of signals by information
%theoretic criteria", IEEE Transactions on Acoustics, Speech and Signal
%Processing, 1985.
[Nb_receivers,Nb_samples]=size(received_signal);
%Computation of the covariance matrix
covariance=received_signal*received_signal'/Nb_samples;
%eigenvalue decomposition
[U,V]=eig(covariance);
%sort the eigenvalue in the decreasing order
eigenvalues=sort(diag(V),'descend');
%Compute criterion
for k=0:Nb_receivers-1
coef=1/(Nb_receivers-k);
a=coef*sum(eigenvalues(k+1:Nb_receivers));
g=prod(eigenvalues(k+1:Nb_receivers)).^(coef);
akaike_criterion(k+1)=-log(((g/a)^(Nb_samples*(Nb_receivers-k))))+k*(2*Nb_receivers-k);
MDL_criterion(k+1)=-log(((g/a)^(Nb_samples*(Nb_receivers-k))))+0.5*k*(2*Nb_receivers-k)*log(Nb_samples);
end
switch criterion
case 'AIC'
criterion_value=akaike_criterion;
case 'MDL'
criterion_value=MDL_criterion;
end
[criterion_value_min,nb_sources]=min(criterion_value);
nb_sources=nb_sources-1; %retrieve 1 because the value k=0 is stocked in the index 1.
if nargin > 2
plot(0:Nb_receivers-1,criterion_value);
grid;
hold on
plot(nb_sources,criterion_value_min,'ro');
hold off
ylabel('Criterion value')
xlabel('Number of sources')
end
%Created by: Vincent Choqueuse, PhD ([email protected])
|
# ## [Пример №2, краткий, стандартные параметры](@id de_2)
#
# В этом примере:
# - получаем стандартные входные параметры,
# - решается прямая задача,
# - генерируются экспериментальные данные.
# Только в отличии от [Подробного примера №1](@ref de_1)
# будем пользоваться функциями-сокращениями.
#
# Этот файл будет использоваться в тестировании.
#
# Задаем параметры с помощью стандартной функции
# [`NonLinearReactionAdvectionDiffusionWithFrontData.dparams`](@ref)
using NonLinearReactionAdvectionDiffusionWithFrontData
a, b, t₀, T, N, M, ε, Xₙ, Tₘ, qₙ, ulₘ, urₘ, u₀ = NonLinearReactionAdvectionDiffusionWithFrontData.dparams();
##
u, XX, TP = solve(u₀, Xₙ, N, Tₘ, M, ε, ulₘ, urₘ, qₙ);
nothing #hide
# Генерируем экспериментальные данные функцией
# [`NonLinearReactionAdvectionDiffusionWithFrontData.generate_obs_data`](@ref).
ϕl, ϕr, ϕ, f1_data, f2_data = generate_obs_data(u, Xₙ, N, Tₘ, M, qₙ, ulₘ, urₘ);
nothing # hide
|
/-
Copyright (c) 2021 Damiano Testa. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Damiano Testa
! This file was ported from Lean 3 source module algebra.hom.embedding
! leanprover-community/mathlib commit 448144f7ae193a8990cb7473c9e9a01990f64ac7
! Please do not edit these lines, except to modify the commit id
! if you have ported upstream changes.
-/
import Mathbin.Algebra.Group.Defs
import Mathbin.Logic.Embedding.Basic
/-!
# The embedding of a cancellative semigroup into itself by multiplication by a fixed element.
> THIS FILE IS SYNCHRONIZED WITH MATHLIB4.
> Any changes to this file require a corresponding PR to mathlib4.
-/
variable {R : Type _}
section LeftOrRightCancelSemigroup
#print mulLeftEmbedding /-
/-- The embedding of a left cancellative semigroup into itself
by left multiplication by a fixed element.
-/
@[to_additive
"The embedding of a left cancellative additive semigroup into itself\n by left translation by a fixed element.",
simps]
def mulLeftEmbedding {G : Type _} [LeftCancelSemigroup G] (g : G) : G ↪ G
where
toFun h := g * h
inj' := mul_right_injective g
#align mul_left_embedding mulLeftEmbedding
#align add_left_embedding addLeftEmbedding
-/
#print mulRightEmbedding /-
/-- The embedding of a right cancellative semigroup into itself
by right multiplication by a fixed element.
-/
@[to_additive
"The embedding of a right cancellative additive semigroup into itself\n by right translation by a fixed element.",
simps]
def mulRightEmbedding {G : Type _} [RightCancelSemigroup G] (g : G) : G ↪ G
where
toFun h := h * g
inj' := mul_left_injective g
#align mul_right_embedding mulRightEmbedding
#align add_right_embedding addRightEmbedding
-/
#print mul_left_embedding_eq_mul_right_embedding /-
@[to_additive]
theorem mul_left_embedding_eq_mul_right_embedding {G : Type _} [CancelCommMonoid G] (g : G) :
mulLeftEmbedding g = mulRightEmbedding g := by
ext
exact mul_comm _ _
#align mul_left_embedding_eq_mul_right_embedding mul_left_embedding_eq_mul_right_embedding
#align add_left_embedding_eq_add_right_embedding add_left_embedding_eq_add_right_embedding
-/
end LeftOrRightCancelSemigroup
|
import topology.basic
import topology.compact_open
import data.nat.prime
import data.real.basic
import data.real.irrational
import data.complex.basic
import data.fin.basic
import geometry.euclidean.basic
import analysis.inner_product_space.pi_L2
import algebra.group.defs
import algebra.field.basic
import combinatorics.configuration
import ring_theory.polynomial.basic
import group_theory.free_group
import combinatorics.simple_graph.basic
import ring_theory.integral_closure
import data.fintype.card
import category_theory.category.basic
import ring_theory.discrete_valuation_ring
import group_theory.torsion
import linear_algebra.matrix.charpoly.basic
import algebra.order.absolute_value
import analysis.convex.basic
import topology.uniform_space.uniform_convergence_topology
import topology.sequences
import analysis.normed.group.infinite_sum
import data.nat.choose.sum
import group_theory.specific_groups.cyclic
import group_theory.order_of_element
import analysis.mean_inequalities
import analysis.normed_space.banach
import topology.algebra.continuous_monoid_hom
import linear_algebra.matrix.symmetric
import analysis.inner_product_space.spectrum
import ring_theory.class_group
import ring_theory.dedekind_domain.basic
import ring_theory.principal_ideal_domain
import model_theory.satisfiability
import probability.integration
import ring_theory.simple_module
import category_theory.preadditive.schur
import representation_theory.maschke
import topology.paracompact
import combinatorics.simple_graph.coloring
--Prove that Homomorphisms between two finite abelian groups are isomorphic to homomorphisms between their duals in Lean mathlib
--OUTPUT 1
theorem fin_ab_hom_eq_dual_hom {G H : Type*} [fintype G] [fintype H] [add_comm_group G] [add_comm_group H] [group_hom f : G →* H] [group_hom g : G →* H]
(h : ∀ x, f x = g x) :
∀ x, (finsupp.single x : G →ₗ[ℤ] ℤ) ∘ f = (finsupp.single x : G →ₗ[ℤ] ℤ) ∘ g :=
begin
intro x,
have h1 : f x = g x,
from h x,
have h2 : (finsupp.single x : G →ₗ[ℤ] ℤ) ∘ f = (finsupp.single x : G →ₗ[ℤ] ℤ) ∘ g,
from congr_fun h1 _,
exact h2,
--OUTPUT 2
theorem abelian_hom_isomorphism {G H : Type*} [fintype G] [fintype H] [add_comm_group G] [add_comm_group H]
(f : G → H) [is_group_hom f] : (finsupp G H).to_fun ≫ (finsupp.map f).to_fun = f := sorry
--Prove that Homomorphisms between two finite abelian groups are isomorphic to homomorphisms between their duals in Lean mathlib
theorem abelian_hom_isomorphism_inv {G H : Type*} [fintype G] [fintype H] [add_comm_group G] [add_comm_group H]
(f : G → H) [is_group_hom f] : (finsupp.map f).to_fun ≫ (finsupp G H).to_fun = f := sorry
--Prove that Homomorphisms between two finite abelian groups are isomorphic to homomorphisms between their duals in Lean mathlib
theorem abelian_hom_isomorphism_inv_inv {G H : Type*} [fintype G] [fintype H] [add_comm_group G] [add_comm_group H]
(f : G → H) [is_group_hom f] : (finsupp.map f).to_fun ≫ (finsupp.map f).to_fun = finsupp.map f := sorry
--Prove that Homomorphisms between two finite abelian groups are isomorphic to homomorphisms between their duals in Lean mathlib
theorem abelian_hom_isomorphism_inv_inv_inv {G H : Type*} [fintype G] [fintype H] [add_comm_group G] [add_comm_group H]
(f : G → H) [is_group_hom f] : (finsupp G H).to_fun ≫ (finsupp G H).to_fun = finsupp G H := sorry
--Prove that Homomorphisms between two finite abelian groups are isomorphic to homomorphisms between their duals in Lean mathlib
theorem abelian_hom_isomorphism_inv_inv_inv_inv {G H : Type*} [fintype G] [fintype H] [add_comm_group G] [add_comm_group H]
(f : G → H) [is_group_hom f] : (finsupp G H).to_fun ≫ (finsupp.map f).to_fun ≫ (finsupp G H).to_fun = finsupp G H := sorry
--Prove that Homomorphisms between two finite abelian groups are isomorphic to homomorphisms between their duals in Lean mathlib
theorem abelian_hom_isomorphism_inv_inv_inv_inv_inv {G H : Type*} [fintype G] [fintype H] [add_comm_group G] [add_comm_group H]
(f : G → H) [is_group_hom f] : (finsupp.map f).to_fun ≫ (finsupp G H).to_fun ≫ (finsupp.map f).to_fun = finsupp.map f := sorry
--Prove that Homomorphisms between two finite abelian groups are isomorphic to homomorphisms between their duals in Lean mathlib
theorem abelian_hom_isomorphism_inv_inv_inv_inv_inv_inv {G H : Type*} [fintype G] [fintype H] [add_comm_group G] [add_comm_group H]
(f : G → H) [is_group_hom f] : (finsupp G H).to_fun ≫ (finsupp.map f).to_fun ≫ (finsupp G H).to_fun ≫ (finsupp.map f).to_fun = finsupp.map f := sorry
--Prove that Homomorphisms between two finite abelian groups are isomorphic to homomorphisms between their duals in Lean mathlib
theorem abelian_hom_isomorphism_inv_inv_inv_inv_inv_inv_inv {G H : Type*} [fintype G] [fintype H] [add_comm_group G] [add_comm_group H]
(f : G → H) [is_group_hom f] : (finsupp.map f).to_fun ≫ (finsupp G H).to_fun ≫ (finsupp.map f).to_fun ≫ (finsupp G H).to_fun = finsupp G H := sorry
--Prove that Homomorphisms between two finite abelian groups are isomorphic to homomorphisms between their duals in Lean mathlib
theorem abelian_hom_isomorphism_inv_inv_inv_inv_inv_inv_inv_inv {G H : Type*} [fintype G] [fintype H] [add_comm_group G] [add_comm_group H]
(f : G → H) [is_group_hom f] : (finsupp G H).to_fun ≫ (finsupp.map f).to_fun ≫ (finsupp G H).to_fun ≫ (finsupp.map f).to_fun ≫ (finsupp G H).to_fun = finsupp G H := sorry
--Prove that Homomorphisms between two finite abelian groups are isomorphic to homomorphisms between their duals in Lean mathlib
theorem abelian_hom_isomorphism_inv_inv_inv_inv_inv_inv_inv_inv_inv {G H : Type*} [fintype G] [fintype H] [add_comm_group G] [add_comm_group H]
(f : G → H) [is_group_hom f] : (finsupp.map f).to_fun ≫ (finsupp G H).to_fun ≫ (finsupp.map f).to_fun ≫ (finsupp G H).to_fun ≫ (finsupp.map f).to_fun = finsupp.map f := sorry
--Prove that Homomorphisms between two finite abelian groups are isomorphic to homomorphisms between their duals in Lean mathlib
theorem abelian_hom_isomorphism_inv_inv_inv_inv_inv_inv_inv_inv_inv_inv {G H : Type*} [fintype G] [fintype H] [add_comm_group G] [add_comm_group H]
(f : G → H) [is_group_hom f] : (finsupp G H).to_fun ≫ (finsupp.map f).to_fun ≫ (finsupp G H).to_fun ≫ (finsupp.map f).to_fun ≫ (finsupp G H).to_fun ≫ (finsupp.map f).to_fun = finsupp.map f := sorry
--Prove that Homomorphisms between two finite abelian groups are isomorphic to homomorphisms between their duals in Lean mathlib
theorem abelian_hom_isomorphism_inv_inv_inv_inv_inv_inv_inv_inv_inv_inv_inv {G H : Type*} [fintype G] [fintype H] [add_comm_group G] [add_comm_group H]
(f : G → H) [is_group_hom f] : (finsupp.map f).to_fun ≫ (finsupp G H).to_fun ≫ (finsupp.map f).to_fun ≫ (finsupp G H).to_fun ≫ (finsupp.map f).to_fun ≫ (finsupp G H).to_fun = finsupp G H := sorry
--Prove that Homomorphisms between two finite abelian groups are isomorphic to homomorphisms between their duals in Lean mathlib
theorem abelian_hom_isomorphism_inv_inv_inv_inv_inv_inv_inv_inv_inv_inv_inv_inv {G H : Type*} [fintype G] [fintype H] [add_comm_group G] [add_comm_group H]
(f : G → H) [is_group_hom f] : (finsupp G H).to_fun ≫ (finsupp.map f).to_fun ≫ (finsupp G H).to_
--OUTPUT 3
theorem is_isomorphic_to_hom_of_dual (G : Type) [add_comm_group G] [module ℤ G] [fintype G] [decidable_eq G] [fintype (dual G)] [decidable_eq (dual G)] (H : Type) [add_comm_group H] [module ℤ H] [fintype H] [decidable_eq H] [fintype (dual H)] [decidable_eq (dual H)] (f : G → H) (hf : is_homomorphism f) : (dual G → dual H) ≃ (G → H) :=
begin
haveI := classical.dec_eq G,
haveI := classical.dec_eq H,
haveI := classical.dec_eq (dual G),
haveI := classical.dec_eq (dual H),
haveI := classical.dec_eq (G → H),
haveI := classical.dec_eq (dual G → dual H),
let g : (dual G → dual H) → (G → H) := λ f, f ∘ dual.val,
let h : (G → H) → (dual G → dual H) := λ f, dual.map ∘ f,
have hg : h ∘ g = id,
{
ext,
rw [h, g, dual.map_val, id_comp],
},
have gh : g ∘ h = id,
{
ext,
rw [h, g, dual.map_val, id_comp],
},
exact equiv.mk g h hg gh,
|
[STATEMENT]
theorem (in conservative) disjoint_sets_measure_density_one_tendsto_zero:
fixes P::"'a measure" and A::"nat \<Rightarrow> 'a set"
assumes [measurable]: "\<And>n. A n \<in> sets M"
and "disjoint_family A"
"absolutely_continuous M P" "sets P = sets M"
"emeasure P (space M) \<noteq> \<infinity>"
shows "\<exists>B. lower_asymptotic_density B = 1 \<and> (\<lambda>n. measure P (space M \<inter> (T^^n)-`(A n)) * indicator B n) \<longlonglongrightarrow> 0"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<exists>B. lower_asymptotic_density B = 1 \<and> (\<lambda>n. Sigma_Algebra.measure P (space M \<inter> (T ^^ n) -` A n) * indicat_real B n) \<longlonglongrightarrow> 0
[PROOF STEP]
by (rule cesaro_imp_density_one[OF _ disjoint_sets_measure_Cesaro_tendsto_zero[OF assms]], simp) |
theory ND_FiniteAssms
imports ND
begin
lemma ND_finite_assms: "\<Gamma> \<turnstile> F \<Longrightarrow> \<exists>\<Gamma>'. \<Gamma>' \<subseteq> \<Gamma> \<and> finite \<Gamma>' \<and> (\<Gamma>' \<turnstile> F)"
proof(induction rule: ND.induct)
case (Ax F \<Gamma>) thus ?case by(intro exI[of _ "{F}"]) (simp add: ND.Ax)
next
case (AndI \<Gamma> F G)
from AndI.IH(1) guess \<Gamma>1 .. moreover
from AndI.IH(2) guess \<Gamma>2 .. ultimately
show ?case by(intro exI[where x="\<Gamma>1\<union>\<Gamma>2"]) (force elim: Weaken intro!: ND.AndI)
next
case (CC F \<Gamma>)
from CC.IH guess \<Gamma>' .. note \<Gamma>' = this
thus ?case proof(cases "Not F \<in> \<Gamma>'") text\<open>case distinction: Did we actually use @{term "\<^bold>\<not>F"}?\<close>
case False hence "\<Gamma>' \<subseteq> \<Gamma>" using \<Gamma>' by blast
with \<Gamma>' show ?thesis using BotE by(intro exI[where x="\<Gamma>'"]) fast
next
case True
then obtain \<Gamma>'' where "\<Gamma>' = \<^bold>\<not> F\<triangleright>\<Gamma>''" "\<^bold>\<not> F \<notin> \<Gamma>''" by (meson Set.set_insert)
hence "\<Gamma>'' \<subseteq> \<Gamma>" "finite \<Gamma>''" "\<^bold>\<not> F\<triangleright>\<Gamma>'' \<turnstile> \<bottom>" using \<Gamma>' by auto
thus ?thesis using ND.CC by auto
qed
next
case AndE1 thus ?case by(blast dest: ND.AndE1) next
case AndE2 thus ?case by(blast dest: ND.AndE2)
next
case OrI1 thus ?case by(blast dest: ND.OrI1) next
case OrI2 thus ?case by(blast dest: ND.OrI2)
next
case (OrE \<Gamma> F G H)
from OrE.IH(1) guess \<Gamma>1 .. moreover
from OrE.IH(2) guess \<Gamma>2 .. moreover
from OrE.IH(3) guess \<Gamma>3 ..
note IH = calculation this
let ?w = "\<Gamma>1 \<union> (\<Gamma>2 - {F}) \<union> (\<Gamma>3 - {G})"
from IH have "?w \<turnstile> F \<^bold>\<or> G" using Weaken[OF _ sup_ge1] by metis moreover
from IH have "F\<triangleright>?w \<turnstile> H" "G\<triangleright>?w \<turnstile> H" using Weaken by (metis Un_commute Un_insert_right Un_upper1 Weaken insert_Diff_single)+ ultimately
have "?w \<turnstile> H" using ND.OrE by blast
thus ?case using IH by(intro exI[where x="?w"]) auto
text\<open>Clever evasion of the case distinction made for CC.\<close>
next
case (ImpI F \<Gamma> G)
from ImpI.IH guess \<Gamma>' ..
thus ?case by (intro exI[where x="\<Gamma>' - {F}"]) (force elim: Weaken intro!: ND.ImpI)
next
case (ImpE \<Gamma> F G)
from ImpE.IH(1) guess \<Gamma>1 .. moreover
from ImpE.IH(2) guess \<Gamma>2 .. ultimately
show ?case by(intro exI[where x="\<Gamma>1 \<union> \<Gamma>2"]) (force elim: Weaken intro: ND.ImpE[where F=F])
next
case (NotE \<Gamma> F)
from NotE.IH(1) guess \<Gamma>1 .. moreover
from NotE.IH(2) guess \<Gamma>2 .. ultimately
show ?case by(intro exI[where x="\<Gamma>1 \<union> \<Gamma>2"]) (force elim: Weaken intro: ND.NotE[where F=F])
next
case (NotI F \<Gamma>)
from NotI.IH guess \<Gamma>' ..
thus ?case by(intro exI[where x="\<Gamma>' - {F}"]) (force elim: Weaken intro: ND.NotI[where F=F])
qed
text\<open>We thought that a lemma like this would be necessary for the ND completeness by SC completeness proof
(this lemma shows that if we made an ND proof, we can always limit ourselves to a finite set of assumptions --
and thus put all the assumptions into one formula).
That is not the case, since in the completeness proof,
we assume a valid entailment and have to show (the existence of) a derivation.
The author hopes that his misunderstanding can help the reader's understanding.\<close>
corollary ND_no_assms:
assumes "\<Gamma> \<turnstile> F"
obtains \<Gamma>' where "set \<Gamma>' \<subseteq> \<Gamma> \<and> ({} \<turnstile> \<^bold>\<And>\<Gamma>' \<^bold>\<rightarrow> F)"
proof(goal_cases)
case 1
from ND_finite_assms[OF assms] obtain \<Gamma>' where "\<Gamma>'\<subseteq>\<Gamma>" "finite \<Gamma>'" "\<Gamma>' \<turnstile> F" by blast
from \<open>finite \<Gamma>'\<close> obtain G where \<Gamma>'[simp]: "\<Gamma>' = set G" using finite_list by blast
with \<open>\<Gamma>'\<subseteq>\<Gamma>\<close> have "set G \<subseteq> \<Gamma>" by clarify
moreover from \<open>\<Gamma>' \<turnstile> F\<close> have "{} \<turnstile> \<^bold>\<And> G \<^bold>\<rightarrow> F" unfolding \<Gamma>' AssmBigAnd .
ultimately show ?case by(intro 1[where \<Gamma>'=G] conjI)
qed
end
|
Require Coq.Classes.EquivDec.
Require Coq.Lists.List.
Import List.ListNotations.
Require Import Leapfrog.Syntax.
Require Import Leapfrog.FinType.
Require Import Leapfrog.Sum.
Require Import Leapfrog.ConfRel.
Require Import Leapfrog.Notations.
Require Import Leapfrog.BisimChecker.
Require Import Coq.Program.Equality.
Open Scope p4a.
Inductive header :=
| HdrEth0
| HdrEth1
| HdrVLAN0
| HdrVLAN1
| HdrIPv4
| HdrICMP
| HdrTCP
| HdrUDP
| HdrGRE0
| HdrGRE1
| HdrGRE2
| HdrNVGRE
| HdrVXLAN
| HdrARP
| HdrARPIP.
Definition sz (h: header) : nat :=
match h with
| HdrEth0
| HdrEth1 => 112
| HdrVLAN0
| HdrVLAN1 => 160
| HdrIPv4 => 160
| HdrICMP => 32
| HdrTCP => 160
| HdrUDP => 160
| HdrGRE0
| HdrGRE1
| HdrGRE2 => 32
| HdrNVGRE => 32
| HdrVXLAN => 64
| HdrARP => 64
| HDRARPIP => 160
end.
Scheme Equality for header.
Global Instance header_eqdec: EquivDec.EqDec header eq := header_eq_dec.
Global Instance header_finite: @Finite header _ header_eq_dec.
Proof.
solve_finiteness.
Defined.
Inductive state: Type :=
| ParseEth0 (* start state *)
| ParseEth1
| ParseVLAN0
| ParseVLAN1
| ParseICMP
| ParseIPv4
| ParseTCP
| ParseUDP
| ParseGRE0
| ParseGRE1
| ParseGRE2
| ParseNVGRE
| ParseVXLAN
| ParseARP
| ParseARPIP.
Scheme Equality for state.
Global Instance state_eqdec: EquivDec.EqDec state eq := state_eq_dec.
Global Instance state_finite: @Finite state _ state_eq_dec.
Proof.
solve_finiteness.
Defined.
Definition states (s: state) : P4A.state state sz :=
match s with
| ParseEth0 =>
{| st_op := extract(HdrEth0);
st_trans := transition select (| (EHdr HdrEth0)[111--96] |)
{{ [| hexact 0x8100 |] ==> inl ParseVLAN0 ;;;
[| hexact 0x9100 |] ==> inl ParseVLAN0 ;;;
[| hexact 0x9200 |] ==> inl ParseVLAN0 ;;;
[| hexact 0x9300 |] ==> inl ParseVLAN0 ;;;
[| hexact 0x0800 |] ==> inl ParseIPv4 ;;;
[| hexact 0x0806 |] ==> inl ParseARP ;;;
[| hexact 0x8035 |] ==> inl ParseARP ;;;
reject }}
|}
| ParseVLAN0 =>
{| st_op := extract(HdrVLAN0) ;
st_trans := transition select (| (EHdr HdrVLAN0)[159--144] |)
{{ [| hexact 0x8100 |] ==> inl ParseVLAN1 ;;;
[| hexact 0x9100 |] ==> inl ParseVLAN1 ;;;
[| hexact 0x9200 |] ==> inl ParseVLAN1 ;;;
[| hexact 0x9300 |] ==> inl ParseVLAN1 ;;;
[| hexact 0x0800 |] ==> inl ParseIPv4 ;;;
[| hexact 0x0806 |] ==> inl ParseARP ;;;
[| hexact 0x8035 |] ==> inl ParseARP ;;;
reject }}
|}
| ParseVLAN1 =>
{| st_op := extract(HdrVLAN1) ;
st_trans := transition select (| (EHdr HdrVLAN1)[159--144] |)
{{ [| hexact 0x0800 |] ==> inl ParseIPv4 ;;;
[| hexact 0x0806 |] ==> inl ParseARP ;;;
[| hexact 0x8035 |] ==> inl ParseARP ;;;
reject }}
|}
| ParseIPv4 =>
{| st_op := extract(HdrIPv4);
st_trans := transition select (| (EHdr HdrIPv4)[79--72] |)
{{ [| hexact 6 |] ==> inl ParseTCP;;;
[| hexact 17 |] ==> inl ParseUDP;;;
[| hexact 47 |] ==> inl ParseGRE0;;;
accept
}}
|}
| ParseUDP =>
{| st_op := extract(HdrUDP);
st_trans := transition select (| (EHdr HdrUDP)[31--16] |)
{{ [| hexact 0xFFFF |] ==> inl ParseVXLAN;;;
accept
}}
|}
| ParseICMP =>
{| st_op := extract(HdrICMP);
st_trans := transition accept |}
| ParseTCP =>
{| st_op := extract(HdrTCP);
st_trans := transition accept |}
| ParseGRE0 =>
{| st_op := extract(HdrGRE0);
st_trans := transition select (| (EHdr HdrGRE0)[2--2], (EHdr HdrGRE0)[31--16] |)
{{ [| hexact 0x1, hexact 0x6558 |] ==> inl ParseNVGRE;;;
[| hexact 0x1, hexact 0x6559 |] ==> inl ParseGRE1;;;
accept
}}
|}
| ParseGRE1 =>
{| st_op := extract(HdrGRE1);
st_trans := transition select (| (EHdr HdrGRE1)[31--16] |)
{{ [| hexact 0x16558 |] ==> inl ParseNVGRE;;;
[| hexact 0x16559 |] ==> inl ParseGRE2;;;
accept
}}
|}
| ParseGRE2 =>
{| st_op := extract(HdrGRE2);
st_trans := transition select (| (EHdr HdrGRE2)[31--16] |)
{{ [| hexact 0x16558 |] ==> inl ParseNVGRE;;;
[| hexact 0x16559 |] ==> reject;;;
accept
}}
|}
| ParseNVGRE =>
{| st_op := extract(HdrNVGRE);
st_trans := transition (inl ParseEth1) |}
| ParseVXLAN =>
{| st_op := extract(HdrVXLAN);
st_trans := transition (inl ParseEth1) |}
| ParseEth1 =>
{| st_op := extract(HdrEth1);
st_trans := transition accept |}
| ParseARP =>
{| st_op := extract(HdrARP);
st_trans := transition select (| (EHdr HdrARP)[31--16] |)
{{ [| hexact 0x0800 |] ==> inl ParseARPIP;;;
accept
}}
|}
| ParseARPIP =>
{| st_op := extract(HdrARPIP);
st_trans := transition accept
|}
end.
Program Definition aut: Syntax.t state sz :=
{| t_states := states |}.
Solve Obligations with (destruct s || destruct h; vm_compute; Lia.lia).
|
If $p$ is a nonzero polynomial over a field, then the product of its prime factors is equal to $p$ divided by its leading coefficient. |
Chronic Kidney Illness (CKD) is recognized by a decrease in Glomerular Filtration Rate (GFR) because of a lower in functioning of nephrons throughout the kidney and/or proteinuria additional to break down of the glomerular filtration buffer and incomplete tubular reabsorption. World-vast the prevalence of both CKD and finish degree kidney disease.
Medical tourism is a growingly fashionable practice that includes worldwide travel among people who are in search of medical care. A rising number of North People go to Mexico or Costa Rica identical to a variety of Europeans travel to South East Asia for a variety of remedies comparable to Botox, plastic surgery, intensive dental care, knew replacements and even transplants. Through the years, international locations such as Brazil, Mexico, Costa Rica, Thailand and India have come to change into fashionable tourist destinations.
Within the case of OAB, the muscle tissues of the bladder contract involuntarily although the bladder is probably not full. This causes a sudden urge to urinate in the particular person. Nevertheless, sometimes this urge is totally sensory and the bladder muscle mass should not really contracting. Still, the urge causes the particular person to must rush to the restroom, often resulting in accidents.
The absence of a big difference in the fee of active ingredient pharmaceutical equivalents or pharmaceutical alternative at website of motion is relevant, is outlined as bioequivalence. Different knowledge could need to be submitted to fulfill regulatory requirements for bioequivalence, in addition to knowledge from bioequivalence research.
One of the largest challenges any singer faces is studying the right way to breathe accurately. As if that’s not hard enough additionally they need to know how to control their respiratory so it is used to optimum effect when start our breathing is naturally appropriate. For example, a baby can scream, yell and breathe using their lungs with optimum impact as a result of they’re doing this with out a conscious thought. However, as we start to get older some individuals turn out to be lazy of their habits and solely use the higher part of their lungs – they take a shallow breath instead of a standard perceive how right breathing and breath control works, first you’ll want to understand the process that it makes use of to operate. |
! ============================================================================
! Name : declaration.f90
! Author : Kensuke Konishi
! Version : 0.0.1
! Copyright : It is Complicated.
! Description : Declaration part for 2D space.
! ============================================================================
double precision :: DELTAX, DELTAY, DELTAT
integer :: NT
integer, allocatable, dimension (:,:) :: &
MASK_CELL, MASK_NODE, MASK1X, MASK1Y, MASK2Y, MASK2X
double precision, allocatable, dimension(:,:) :: &
u0x, u0y, u1x, u1y, u2x, u2y, corr_ux, corr_uy
double precision, allocatable :: C (:,:,:,:)
double precision, allocatable, dimension (:,:) :: rho, INV_MASS
double precision, dimension (3,3) :: gx, gy
double precision, allocatable :: wavelet(:)
integer :: NX_source, NY_source
integer :: itime, irec
double precision, allocatable, dimension (:) :: X_rec, Y_rec
integer, allocatable, dimension (:) :: NX_rec, NY_rec
double precision,allocatable :: inpo_rec(:,:,:)
character(len=32) :: snap_file
character(len=100),allocatable :: outfname_rec(:)
double precision, allocatable :: buffer_ux(:,:), buffer_uy(:,:)
|
From mathcomp Require Import ssreflect.
Require Import set_notations.
Set Implicit Arguments.
Unset Strict Implicit.
Import Prenex Implicits.
Section UniqueExample.
Variable U:Type.
Import SetNotations.
Goal forall (A B: Ensemble U),
(exists! x, x ∈ A) ->
(forall (x:U),(x∈ A) -> (x ∈ B)) -> exists x, (x∈ A) /\ (x ∈ B).
Proof.
move => A B H H0.
destruct H.
unfold unique in H.
exists x.
inversion H.
split.
apply H1.
apply H0.
apply H1.
Qed.
End UniqueExample. |
module Program
import CommonTestingStuff
export
beforeString : String
beforeString = "before coop"
long : PrintString m => CanSleep m => Zippable m => (offset : Time) -> m String
long offset = do
printTime offset "long proc, first"
for 5 $ do
printTime offset "long proc, before 1000"
sleepFor 1.seconds
printTime offset "long proc, before 2000"
sleepFor 2.seconds
printTime offset "long proc, last"
pure "long"
infinite : CanSleep m => m a
infinite = forever $ sleepFor 1.millis
export
program : PrintString m => CanSleep m => Zippable m => Alternative m => m Unit
program = do
offset <- currentTime
printTime offset "start"
res <- long offset <|> infinite
printTime offset "top: \{res}"
sleepFor 1.seconds
printTime offset "------"
res <- infinite <|> long offset
printTime offset "top: \{res}"
printTime offset "end"
|
\chapter{Abstrations of Jaseci}
\section{Graphs, the Friend that Never Gets Invited to the Party}
There's something quite strange that has happend with our \gls{common languages} over the years, ...decades. When you look at it, almost every data structure we programmers use to solve problems can be modeled formally as a graph, or a special case of a graph, (save perhaps hash tables). Think about it, stacks, lists, queues, trees, heaps, and yes, even graphs, can be modeled with graphs. But, low and behold, no common language ustilizes the formal semantics of a graph as its first order abstraction for data or memory. I mean, isn't it a bit odd that practically every data structure covered in the language-agnostic classic foundational work \textit{Introduction to Algorithms}~\cite{intro_to_algo} can most naturally be be reasoned about as a graph, yet none of the common languages have built in and be designed around this primitive. I submit that the graph semantic is stupidly rich, very nice for us humans to reason about, and, most importantly for the purpose of Jaseci, is inherently well suited for the conceptualization and reasoning about computational problems, especially AI problems.
\par
There are a few arguments that may pop into mind at this point of my conjecture.
\begin{itemize}
\item ``Well there are graph libraries in my favorite language that implement graph symantics, why would I need a language to force the concept upon me?''
or
\item ``Duh! Interacting with all data and memory through graphical abstractions will make the language ssllooowww as hell since memory in hardware is essitially a big array, what is this dude talking about!?!?''
\end{itemize}
\par
For the former of these two challenges, I counter with two points. First, the core design languages are always based upon their inherent abstractions. With graphs not being one such abstraction, the language's design will not be optimized to empower programmers to nimbly do gymnastics with the rich language symantics that correspond to the rich semantics graphs offer (You'll see what I mean in later chapters). And second, libraries suck (See~\ref{rant:librariessuck}).
\par
For the latter question, I'd respond, ``Have you SEEN the kind of abstractions in modern languages!?!? It's rediculous, lets look at python dictionaries, actually scratch that, lets keep it simple and look at dynamic typing in general. The runtime complexity to support dynamic typing is most certainly hgiher than what would be needed to support graph symantics. Duh right back at'ya!''
\subsection{Yes, But What Kind of Graphs}
There are many categories of graphs to consider when thinking about the abstractions to support in Jaseci. There are rules to be defined as to the availabe semantics of the graphs. Should all graphs be \gls{directed graphs}, should we allow the creation of \gls{undirected graphs}, what about parallel edges or \gls{multigraphs}, are those explicitly expressible or discouraged / banned, can we express \gls{hypergraphs}, and what combination of these graphical sematics should be able to be manifested and manipulated through the programming model. At this point I can feel your eyes getting droopy and your mind moving into that intermediary state between concious and sleeping, so let me cut to the answer.
\par
\printfigGraphTypes
In Jaseci, we elect to assume the following semantics:
\begin{enumerate}
\item Graphs are directed (as per Figure~\ref{fig:directedgraph}) with a special case of a doubly directed edge type which can be utilized practically as an undirected edge (imagine fusing the two edges between nodes 3 and 4 in the figure).
\item Both nodes and edges have their own distinct identities (i,e. an edge isn't representable as a pairing of two nodes). This point is important as both nodes and edges can have \gls{contexts}.
\item Multigraphs (i.e., parallel edges) are allowed, including self-loop edges (as per Figure~\ref{fig:multigraph}).
\item Graphs are not required to be acyclic.
\item No hypergraphs, as I wouldn't want Jaseci programmers heads to explode.
\end{enumerate}
\emph{As an aside, I would describe Jaseci graphs as strictly unstrict directed multigraphs that leverages the semantics of parallel edges to create a laymans `undirected edge' by shorthanding two directed edges pointed in opposite directions between the same two nodes.}
\par
\begin{nerd}
I'd formally describe a Jaseci Graph as an $7$-tuple $(N,E,C,s,t,c_N,c_E)$, where
\begin{enumerate}
\item $N$ is the set of nodes in a graph
\item $E$ is the set of edges in a graph
\item $C$ is the set of all contexts
\item $s$: $E \rightarrow V$, maps the source node to an edge
\item $t$: $E \rightarrow V$, maps the target node to an edge
\item $c_N$: $N \rightarrow C$, maps nodes to contexts
\item $c_E$: $E \rightarrow C$, maps edges to contexts
\end{enumerate}
An undriected edge can then be formed with a pair of edges $(x, y)$ if three conditions are met,
\begin{enumerate}
\item $x, y \in E$
\item $s(x) = t(y)$, and $s(y) = t(x)$
\item $c_E(x) = c_E(y)$
\end{enumerate}
\end{nerd}
\par
If you happend to have read that formal definition and didn't enter deep comatose you may be wondering ``Whoa, what was that context stuff that came outta nowhere! What's this guy trying to do here, sneaking a new concept in as if it was already introduced and described.''
\par
Worry not friend, lets discuss.
\subsection{Putting it All Into Context}
A key principle of Jaseci is to reshape and reimagine how we view data and memory. We do so by fusing the concept of data wit the intuitive and rich semantics of graphs as the lowest level primitive to view memory.
\begin{nerd}
A context is a representation of data that can be expressed simply as a $3$-tuple $(\sum_K,\sum_V,p_K)$, where
\begin{enumerate}
\item $\sum_K$ is a finite alphabet of keys
\item $\sum_V$ is a finite alphabet of values
\item $p_K$ is the pairing of keys to values
\end{enumerate}
\end{nerd}
\section{Walkers}
\section{Abilities}
\section{Other Abstractions Not Yet Actualized in Current Jaseci}
|
Formal statement is: lemma homotopic_paths_nearby_explicit: assumes \<section>: "path g" "path h" "pathstart h = pathstart g" "pathfinish h = pathfinish g" and no: "\<And>t x. \<lbrakk>t \<in> {0..1}; x \<notin> S\<rbrakk> \<Longrightarrow> norm(h t - g t) < norm(g t - x)" shows "homotopic_paths S g h" Informal statement is: If $g$ and $h$ are paths with the same endpoints, and $h$ is closer to $g$ than to any point outside $S$, then $g$ and $h$ are homotopic in $S$. |
#SNOPSIS
#calculates kinship, indbreeding coefficients
#AUTHOR
# Isaak Y Tecle ([email protected])
options(echo = FALSE)
library(methods)
library(rrBLUP)
library(plyr)
library(stringr)
#library(lme4)
library(randomForest)
library(parallel)
library(genoDataFilter)
library(dplyr)
library(tibble)
library(rlang)
library(jsonlite)
library(data.table)
allArgs <- commandArgs()
inputFiles <- scan(grep("input_files", allArgs, value = TRUE),
what = "character")
outputFiles <- scan(grep("output_files", allArgs, value = TRUE),
what = "character")
genoData <- c()
createGenoData <- function(inputFiles) {
genoFiles <- grep("genotype_data", inputFiles, value = TRUE)
genoMetaData <- c()
filteredGenoFile <- c()
if (length(genoFiles) > 1) {
genoData <- combineGenoData(genoFiles)
genoMetaData <- genoData$trial
genoData$trial <- NULL
} else {
genoFile <- genoFiles
genoData <- fread(genoFile,
header = TRUE,
na.strings = c("NA", " ", "--", "-", "."))
if (is.null(genoData)) {
filteredGenoFile <- grep("filtered_genotype_data_", genoFile, value = TRUE)
genoData <- fread(filteredGenoFile, header = TRUE)
}
genoData <- unique(genoData, by = 'V1')
genoData <- data.frame(genoData)
genoData <- column_to_rownames(genoData, 'V1')
}
if (is.null(genoData)) {
stop("There is no genotype dataset.")
q("no", 1, FALSE)
} else {
##genoDataFilter::filterGenoData
genoData <- convertToNumeric(genoData)
genoData <- filterGenoData(genoData, maf=0.01)
genoData <- roundAlleleDosage(genoData)
message("No. of geno missing values, ", sum(is.na(genoData)))
if (sum(is.na(genoData)) > 0) {
genoData <- na.roughfix(genoData)
}
genoData <- data.frame(genoData)
}
}
genoData <- createGenoData(inputFiles)
genoData <- genoData[order(row.names(genoData)), ]
#change genotype coding to [-1, 0, 1], to use the A.mat ) if [0, 1, 2]
genoTrCode <- grep("2", genoData[1, ], value = TRUE)
if(length(genoTrCode) != 0) {
genoData <- genoData - 1
}
relationshipMatrixFile <- grep("relationship_matrix_adjusted_table", outputFiles, value = TRUE)
relationshipMatrixJsonFile <- grep("relationship_matrix_adjusted_json", outputFiles, value = TRUE)
message('matrix file ', relationshipMatrixFile)
message('json file ', relationshipMatrixJsonFile)
inbreedingFile <- grep('inbreeding_coefficients', outputFiles, value=TRUE)
aveKinshipFile <- grep('average_kinship', outputFiles, value=TRUE)
message('inbreeding file ', inbreedingFile)
message('ave file ', aveKinshipFile)
relationshipMatrix <- c()
inbreeding <- c()
aveKinship <- c()
relationshipMatrixJson <- c()
relationshipMatrix <- A.mat(genoData)
diag(relationshipMatrix) <- diag(relationshipMatrix) + 1e-6
genos <- rownames(relationshipMatrix)
relationshipMatrix <- data.frame(relationshipMatrix)
colnames(relationshipMatrix) <- genos
rownames(relationshipMatrix) <- genos
relationshipMatrix <- relationshipMatrix %>%
rownames_to_column('genotypes') %>%
mutate_if(is.numeric, round, 3) %>%
column_to_rownames('genotypes')
inbreeding <- diag(data.matrix(relationshipMatrix))
inbreeding <- inbreeding - 1
diag(relationshipMatrix) <- inbreeding
relationshipMatrix <- relationshipMatrix %>% replace(., . < 0, 0)
inbreeding <- inbreeding %>% replace(., . < 0, 0)
inbreeding <- data.frame(inbreeding)
inbreeding <- inbreeding %>%
rownames_to_column('genotypes') %>%
rename(Inbreeding = inbreeding) %>%
arrange(Inbreeding) %>%
mutate_at('Inbreeding', round, 3) %>%
column_to_rownames('genotypes')
aveKinship <- data.frame(apply(relationshipMatrix, 1, mean))
aveKinship <- aveKinship %>%
rownames_to_column('genotypes') %>%
rename(Mean_kinship = contains('apply')) %>%
arrange(Mean_kinship) %>%
mutate_at('Mean_kinship', round, 3) %>%
column_to_rownames('genotypes')
relationshipMatrixJson <- relationshipMatrix
relationshipMatrixJson[upper.tri(relationshipMatrixJson)] <- NA
#relationshipMatrixJson <- data.frame(relationshipMatrixJson)
relationshipMatrixList <- list(labels = names(relationshipMatrixJson),
values = relationshipMatrixJson)
relationshipMatrixJson <- jsonlite::toJSON(relationshipMatrixList)
#if (file.info(relationshipMatrixFile)$size == 0) {
fwrite(relationshipMatrix,
file = relationshipMatrixFile,
row.names = TRUE,
sep = "\t",
quote = FALSE,
)
#}
#if (file.info(relationshipMatrixJsonFile)$size == 0) {
write(relationshipMatrixJson,
file = relationshipMatrixJsonFile,
)
#}
message('inbreedingfile ', inbreedingFile)
message('ave file', aveKinshipFile)
message('kinshipfile ', relationshipMatrixFile)
#if (file.info(inbreedingFile)$size == 0) {
fwrite(inbreeding,
file = inbreedingFile,
row.names = TRUE,
sep = "\t",
quote = FALSE,
)
#}
#if (file.info(aveKinshipFile)$size == 0) {
fwrite(aveKinship,
file = aveKinshipFile,
row.names = TRUE,
sep = "\t",
quote = FALSE,
)
#}
message("Done.")
q(save = "no", runLast = FALSE)
|
subroutine prcndi(noutpt,nttyo)
c
c This subroutine writes legal statements and disclaimers to the
c output and screen files.
c
c This subroutine is called by:
c
c EQPT/eqpt.f
c EQ3NR/eq3nr.f
c EQ6/eq6.f
c
c-----------------------------------------------------------------------
c
c Input:
c
c noutpt = the unit number of the output file
c nttyo = the unit number of the screen file
c
c Output:
c
c None
c
c-----------------------------------------------------------------------
c
implicit none
c
c-----------------------------------------------------------------------
c
c Calling sequence variable declarations.
c
integer noutpt,nttyo
c
c-----------------------------------------------------------------------
c
c Local variable declarations.
c
c None
c
c-----------------------------------------------------------------------
c
c Write notice of applicable statements and disclaimers.
c
write (noutpt,1000)
write (nttyo,1000)
1000 format(' This work is subject to additional statements and',
$ /' disclaimers which may be found in the README.txt file',
$ /' included in the EQ3/6 software transmittal package.',//)
c
c Write additional copyright notice paragraph.
c
c write (noutpt,1010)
c write (nttyo,1010)
c
1010 format(' This work was produced at the University of California,',
$ /' Lawrence Livermore National Laboratory (UC LLNL) under',
$ /' contract no. W-7405-ENG-48 between the U.S. Department of',
$ /' Energy (DOE) and The Regents of the University of California',
$ /' (University) for the operation of UC LLNL. Copyright is',
$ /' reserved to the University for purposes of controlled',
$ /' dissemination, commercialization through formal licensing,',
$ /' or other disposition under terms of Contract 48; DOE',
$ /' policies, regulations, and orders; and U.S. statutes.',//)
c
c Write standard LLNL disclaimer.
c
c write (noutpt,1020)
c write (nttyo,1020)
c
1020 format(24x,'DISCLAIMER',//
$ ' This computer code was prepared as an account of work',
$ /' sponsored by an agency of the United States Government.',
$ /' Neither the United States Government nor the University of',
$ /' California nor any of their employees, makes any warranty,',
$ /' express or implied, or assumes any liability or responsi-',
$ /' bility for the accuracy, completeness, or usefulness of any',
$ /' information, apparatus, product, or process disclosed, or',
$ /' represents that its use would not infringe privately-owned',
$ /' rights. Reference herein to any specific commercial,',
$ /' product, process, or service by trade name, trademark,',
$ /' manufacturer, or otherwise, does not necessarily constitute',
$ /' or imply its endorsement, recommendation, or favoring by the',
$ /' United States Government or the University of California.',
$ /' The views and opinions of authors expressed herein do not',
$ /' necessarily state or reflect those of the United States',
$ /' government or the University of California, and shall not',
$ /' be used for advertising or product endorsement purposes.'//)
c
end
|
The computer was built as a researching and demonstration project by the ASTRA group of researchers at the Vision Lab in the University of Antwerp in Belgium , one of the researchers being Joost <unk> . Unlike other modern supercomputers such as the Cray Jaguar and the IBM Roadrunner , which cost millions of euros , the Fastra II only uses consumer hardware , costing € 6 @,@ 000 in total .
|
# This file contains the definition for the PowerPlants struct.
mutable struct PowerPlants
unit
node
technology
fuel
capacity
efficiency
emission
varcost
mc
mc_fuel
mc_co2
pmax
pmin
rup
rdn
function PowerPlants(powerplants_df::DataFrame, avail_powerplants_df::DataFrame, mustrun_powerplants_df::DataFrame, fuelcost_df::DataFrame)
P = Symbol.(powerplants_df[!, 1]) # U = unit
powerplants_dict = df_to_dict_with_id(powerplants_df)
# Marginal generation cost from fuel cost, efficiency, variable cost
# and CO2 price
fuelcost_dict = df_to_dict(fuelcost_df)
mc = Dict{Symbol, Array{Float64, 1}}()
mc_fuel = Dict{Symbol, Array{Float64, 1}}()
mc_co2 = Dict{Symbol, Array{Float64, 1}}()
for p in P
f = Symbol(powerplants_dict[:fuel][p])
fuelcost = fuelcost_dict[f]
η = powerplants_dict[:efficiency][p]
co_price = fuelcost_dict[:CO2]
emission = powerplants_dict[:emission][p]
vc = powerplants_dict[:varcost][p]
mc[p] = fuelcost / η + co_price * emission .+ vc
mc_fuel[p] = fuelcost / η
mc_co2[p] = co_price * emission
end
# Maximum power output based on capacity and availability
pmax_df = copy(avail_powerplants_df)
for p in P
pmax_df[!, p] = avail_powerplants_df[!, p] .* powerplants_dict[:capacity][p]
end
pmax_dict = df_to_dict(pmax_df)
# Must-run obligation/pmin based on time series
pmin_df = copy(mustrun_powerplants_df)
for p in P
pmin_df[!, p] = avail_powerplants_df[!, p] .* mustrun_powerplants_df[!, p] .* powerplants_dict[:capacity][p]
end
pmin_dict = df_to_dict(pmin_df)
return new(P,
powerplants_dict[:node],
powerplants_dict[:technology],
powerplants_dict[:fuel],
powerplants_dict[:capacity],
powerplants_dict[:efficiency],
powerplants_dict[:emission],
powerplants_dict[:varcost],
mc,
mc_fuel,
mc_co2,
pmax_dict,
pmin_dict,
powerplants_dict[:rup],
powerplants_dict[:rdn],
)
end
end
|
PROGRAM ecmwf_case1
INTEGER :: jl
INTEGER :: jm
!$acc kernels
!$acc loop
DO jl = 1 , 10 , 1
PRINT * ,"1st jl loop body"
!$acc loop
DO jm = 1 , 10 , 1
IF ( .TRUE. ) THEN
PRINT * ,"2nd jm/jl loop body"
END IF
END DO
!$acc loop
DO jm = 1 , 10 , 1
IF ( .TRUE. ) THEN
PRINT * ,"4th jm/jl loop body"
END IF
END DO
PRINT * ,"5th jl loop body"
END DO
!$claw loop-fusion group(g3)
DO jm = 1 , 10 , 1
PRINT * ,"3rd jm loop body"
END DO
!$acc end kernels
END PROGRAM ecmwf_case1
|
## Release the Kraken!
```python
# The next library we're going to look at is called Kraken, which was developed by Université
# PSL in Paris. It's actually based on a slightly older code base, OCRopus. You can see how the
# flexible open-source licenses allow new ideas to grow by building upon older ideas. And, in
# this case, I fully support the idea that the Kraken - a mythical massive sea creature - is the
# natural progression of an octopus!
#
# What we are going to use Kraken for is to detect lines of text as bounding boxes in a given
# image. The biggest limitation of tesseract is the lack of a layout engine inside of it. Tesseract
# expects to be using fairly clean text, and gets confused if we don't crop out other artifacts.
# It's not bad, but Kraken can help us out be segmenting pages. Lets take a look.
#
# Please note that Kraken is only supported on Linux and Mac OS X, it is not supported on Windows.
# Documentation and Installation Notes can be found at: https://pypi.org/project/kraken/
```
```python
!pip install --pre coremltools
```
Requirement already satisfied: coremltools in c:\users\mosae\anaconda3\lib\site-packages (5.0)
Requirement already satisfied: sympy in c:\users\mosae\anaconda3\lib\site-packages (from coremltools) (1.8)
Requirement already satisfied: numpy>=1.14.5 in c:\users\mosae\anaconda3\lib\site-packages (from coremltools) (1.21.2)
Requirement already satisfied: protobuf>=3.1.0 in c:\users\mosae\anaconda3\lib\site-packages (from coremltools) (3.19.1)
Requirement already satisfied: tqdm in c:\users\mosae\anaconda3\lib\site-packages (from coremltools) (4.59.0)
Requirement already satisfied: packaging in c:\users\mosae\anaconda3\lib\site-packages (from coremltools) (20.9)
Requirement already satisfied: pyparsing>=2.0.2 in c:\users\mosae\anaconda3\lib\site-packages (from packaging->coremltools) (2.4.7)
Requirement already satisfied: mpmath>=0.19 in c:\users\mosae\anaconda3\lib\site-packages (from sympy->coremltools) (1.2.1)
WARNING: You are using pip version 21.2.4; however, version 21.3.1 is available.
You should consider upgrading via the 'c:\users\mosae\anaconda3\python.exe -m pip install --upgrade pip' command.
```python
# First, we'll take a look at the kraken module itself
import kraken
help(kraken)
```
Help on package kraken:
NAME
kraken - entry point for kraken functionality
PACKAGE CONTENTS
binarization
ketos
kraken
lib (package)
linegen
pageseg
repo
rpred
serialization
transcribe
DATA
absolute_import = _Feature((2, 5, 0, 'alpha', 1), (3, 0, 0, 'alpha', 0...
division = _Feature((2, 2, 0, 'alpha', 2), (3, 0, 0, 'alpha', 0), 1310...
print_function = _Feature((2, 6, 0, 'alpha', 2), (3, 0, 0, 'alpha', 0)...
FILE
c:\users\mosae\anaconda3\lib\site-packages\kraken\__init__.py
```python
# There isn't much of a discussion here, but there are a number of sub-modules that look
# interesting. I spend a bit of time on their website, and I think the pageseg module, which
# handles all of the page segmentation, is the one we want to use. Lets look at it
from kraken import pageseg
help(pageseg)
```
```python
# So it looks like there are a few different functions we can call, and the segment
# function looks particularly appropriate. I love how expressive this library is on the
# documentation front -- I can see immediately that we are working with PIL.Image files,
# and the author has even indicated that we need to pass in either a binarized (e.g. '1')
# or grayscale (e.g. 'L') image. We can also see that the return value is a dictionary
# object with two keys, "text_direction" which will return to us a string of the
# direction of the text, and "boxes" which appears to be a list of tuples, where each
# tuple is a box in the original image.
#
# Lets try this on the image of text. I have a simple bit of text in a file called
# two_col.png which is from a newspaper on campus here
from PIL import Image
im=Image.open("download.png")
# Lets display the image inline
display(im)
# Lets now convert it to black and white and segment it up into lines with kraken
bounding_boxes=pageseg.segment(im.convert('1'))['boxes']
# And lets print those lines to the screen
print(bounding_boxes)
```
```python
# Ok, pretty simple two column text and then a list of lists which are the bounding boxes of
# lines of that text. Lets write a little routine to try and see the effects a bit more
# clearly. I'm going to clean up my act a bit and write real documentation too, it's a good
# practice
def show_boxes(img):
'''Modifies the passed image to show a series of bounding boxes on an image as run by kraken
:param img: A PIL.Image object
:return img: The modified PIL.Image object
'''
# Lets bring in our ImageDraw object
from PIL import ImageDraw
# And grab a drawing object to annotate that image
drawing_object=ImageDraw.Draw(img)
# We can create a set of boxes using pageseg.segment
bounding_boxes=pageseg.segment(img.convert('1'))['boxes']
# Now lets go through the list of bounding boxes
for box in bounding_boxes:
# An just draw a nice rectangle
drawing_object.rectangle(box, fill = None, outline ='red')
# And to make it easy, lets return the image object
return img
# To test this, lets use display
display(show_boxes(Image.open("download.png")))
```
```python
# Not bad at all! It's interesting to see that kraken isn't completely sure what to do with this
# two column format. In some cases, kraken has identified a line in just a single column, while
# in other cases kraken has spanned the line marker all the way across the page. Does this matter?
# Well, it really depends on our goal. In this case, I want to see if we can improve a bit on this.
#
# So we're going to go a bit off script here. While this week of lectures is about libraries, the
# goal of this last course is to give you confidence that you can apply your knowledge to actual
# programming tasks, even if the library you are using doesn't quite do what you want.
#
# I'd like to pause the video for the moment and collect your thoughts. Looking at the image above,
# with the two column example and red boxes, how do you think we might modify this image to improve
# kraken's ability to text lines?
```
```python
# Thanks for sharing your thoughts, I'm looking forward to seeing the breadth of ideas that everyone
# in the course comes up with. Here's my partial solution -- while looking through the kraken docs on
# the pageseg() function I saw that there are a few parameters we can supply in order to improve
# segmentation. One of these is the black_colseps parameter. If set to True, kraken will assume that
# columns will be separated by black lines. This isn't our case here, but, I think we have all of the
# tools to go through and actually change the source image to have a black separator between columns.
#
# The first step is that I want to update the show_boxes() function. I'm just going to do a quick
# copy and paste from the above but add in the black_colseps=True parameter
def show_boxes(img):
'''Modifies the passed image to show a series of bounding boxes on an image as run by kraken
:param img: A PIL.Image object
:return img: The modified PIL.Image object
'''
# Lets bring in our ImageDraw object
from PIL import ImageDraw
# And grab a drawing object to annotate that image
drawing_object=ImageDraw.Draw(img)
# We can create a set of boxes using pageseg.segment
bounding_boxes=pageseg.segment(img.convert('1'), black_colseps=True)['boxes']
# Now lets go through the list of bounding boxes
for box in bounding_boxes:
# An just draw a nice rectangle
drawing_object.rectangle(box, fill = None, outline ='red')
# And to make it easy, lets return the image object
return img
```
```python
# The next step is to think of the algorithm we want to apply to detect a white column separator.
# In experimenting a bit I decided that I only wanted to add the separator if the space of was
# at least 25 pixels wide, which is roughly the width of a character, and six lines high. The
# width is easy, lets just make a variable
char_width=25
# The height is harder, since it depends on the height of the text. I'm going to write a routine
# to calculate the average height of a line
def calculate_line_height(img):
'''Calculates the average height of a line from a given image
:param img: A PIL.Image object
:return: The average line height in pixels
'''
# Lets get a list of bounding boxes for this image
bounding_boxes=pageseg.segment(img.convert('1'))['boxes']
# Each box is a tuple of (top, left, bottom, right) so the height is just top - bottom
# So lets just calculate this over the set of all boxes
height_accumulator=0
for box in bounding_boxes:
height_accumulator=height_accumulator+box[3]-box[1]
# this is a bit tricky, remember that we start counting at the upper left corner in PIL!
# now lets just return the average height
# lets change it to the nearest full pixel by making it an integer
return int(height_accumulator/len(bounding_boxes))
# And lets test this with the image with have been using
line_height=calculate_line_height(Image.open("readonly/two_col.png"))
print(line_height)
```
```python
# Ok, so the average height of a line is 31.
# Now, we want to scan through the image - looking at each pixel in turn - to determine if there
# is a block of whitespace. How bit of a block should we look for? That's a bit more of an art
# than a science. Looking at our sample image, I'm going to say an appropriate block should be
# one char_width wide, and six line_heights tall. But, I honestly just made this up by eyeballing
# the image, so I would encourage you to play with values as you explore.
# Lets create a new box called gap box that represents this area
gap_box=(0,0,char_width,line_height*6)
gap_box
```
```python
# It seems we will want to have a function which, given a pixel in an image, can check to see
# if that pixel has whitespace to the right and below it. Essentially, we want to test to see
# if the pixel is the upper left corner of something that looks like the gap_box. If so, then
# we should insert a line to "break up" this box before sending to kraken
#
# Lets call this new function gap_check
def gap_check(img, location):
'''Checks the img in a given (x,y) location to see if it fits the description
of a gap_box
:param img: A PIL.Image file
:param location: A tuple (x,y) which is a pixel location in that image
:return: True if that fits the definition of a gap_box, otherwise False
'''
# Recall that we can get a pixel using the img.getpixel() function. It returns this value
# as a tuple of integers, one for each color channel. Our tools all work with binarized
# images (black and white), so we should just get one value. If the value is 0 it's a black
# pixel, if it's white then the value should be 255
#
# We're going to assume that the image is in the correct mode already, e.g. it has been
# binarized. The algorithm to check our bounding box is fairly easy: we have a single location
# which is our start and then we want to check all the pixels to the right of that location
# up to gap_box[2]
for x in range(location[0], location[0]+gap_box[2]):
# the height is similar, so lets iterate a y variable to gap_box[3]
for y in range(location[1], location[1]+gap_box[3]):
# we want to check if the pixel is white, but only if we are still within the image
if x < img.width and y < img.height:
# if the pixel is white we don't do anything, if it's black, we just want to
# finish and return False
if img.getpixel((x,y)) != 255:
return False
# If we have managed to walk all through the gap_box without finding any non-white pixels
# then we can return true -- this is a gap!
return True
```
```python
# Alright, we have a function to check for a gap, called gap_check. What should we do once
# we find a gap? For this, lets just draw a line in the middle of it. Lets create a new function
def draw_sep(img,location):
'''Draws a line in img in the middle of the gap discovered at location. Note that
this doesn't draw the line in location, but draws it at the middle of a gap_box
starting at location.
:param img: A PIL.Image file
:param location: A tuple(x,y) which is a pixel location in the image
'''
# First lets bring in all of our drawing code
from PIL import ImageDraw
drawing_object=ImageDraw.Draw(img)
# next, lets decide what the middle means in terms of coordinates in the image
x1=location[0]+int(gap_box[2]/2)
# and our x2 is just the same thing, since this is a one pixel vertical line
x2=x1
# our starting y coordinate is just the y coordinate which was passed in, the top of the box
y1=location[1]
# but we want our final y coordinate to be the bottom of the box
y2=y1+gap_box[3]
drawing_object.rectangle((x1,y1,x2,y2), fill = 'black', outline ='black')
# and we don't have anything we need to return from this, because we modified the image
```
```python
# Now, lets try it all out. This is pretty easy, we can just iterate through each pixel
# in the image, check if there is a gap, then insert a line if there is.
def process_image(img):
'''Takes in an image of text and adds black vertical bars to break up columns
:param img: A PIL.Image file
:return: A modified PIL.Image file
'''
# we'll start with a familiar iteration process
for x in range(img.width):
for y in range(img.height):
# check if there is a gap at this point
if (gap_check(img, (x,y))):
# then update image to one which has a separator drawn on it
draw_sep(img, (x,y))
# and for good measure we'll return the image we modified
return img
# Lets read in our test image and convert it through binarization
i=Image.open("readonly/two_col.png").convert("L")
i=process_image(i)
display(i)
#Note: This will take some time to run! Be patient!
```
```python
# Not bad at all! The effect at the bottom of the image is a bit unexpected to me, but it makes
# sense. You can imagine that there are several ways we might try and control this. Lets see how
# this new image works when run through the kraken layout engine
display(show_boxes(i))
```
```python
# Looks like that is pretty accurate, and fixes the problem we faced. Feel free to experiment
# with different settings for the gap heights and width and share in the forums. You'll notice though
# method we created is really quite slow, which is a bit of a problem if we wanted to use
# this on larger text. But I wanted to show you how you can mix your own logic and work with
# libraries you're using. Just because Kraken didn't work perfectly, doesn't mean we can't
# build something more specific to our use case on top of it.
#
# I want to end this lecture with a pause and to ask you to reflect on the code we've written
# here. We started this course with some pretty simple use of libraries, but now we're
# digging in deeper and solving problems ourselves with the help of these libraries. Before we
# go on to our last library, how well prepared do you think you are to take your python
# skills out into the wild?
```
## Comparing Image Data Structures
```python
# OpenCV supports reading of images in most file formats, such as JPEG, PNG, and TIFF. Most image and
# video analysis requires converting images into grayscale first. This simplifies the image and reduces
# noise allowing for improved analysis. Let's write some code that reads an image of as person, Floyd
# Mayweather and converts it into greyscale.
# First we will import the open cv package cv2
import cv2 as cv
# We'll load the floyd.jpg image
img = cv.imread('readonly/floyd.jpg')
# And we'll convert it to grayscale using the cvtColor image
gray = cv.cvtColor(img, cv.COLOR_BGR2GRAY)
# Now, before we get to the result, lets talk about docs. Just like tesseract, opencv is an external
# package written in C++, and the docs for python are really poor. This is unfortunatly quite common
# when python is being used as a wrapper. Thankfully, the web docs for opencv are actually pretty good,
# so hit the website docs.opencv.org when you want to learn more about a particular function. In this
# case cvtColor converts from one color space to another, and we are convering our image to grayscale.
# Of course, we already know at least two different ways of doing this, using binarization and PIL
# color spaces conversions
# Lets instpec this object that has been returned.
import inspect
inspect.getmro(type(gray))
```
```python
# We see that it is of type ndarray, which is a fundamental list type coming from the numerical
# python project. That's a bit surprising - up until this point we have been used to working with
# PIL.Image objects. OpenCV, however, wants to represent an image as a two dimensional sequence
# of bytes, and the ndarray, which stands for n dimensional array, is the ideal way to do this.
# Lets look at the array contents.
gray
```
```python
# The array is shown here as a list of lists, where the inner lists are filled with integers.
# The dtype=uint8 definition indicates that each of the items in an array is an 8 bit unsigned
# integer, which is very common for black and white images. So this is a pixel by pixel definition
# of the image.
#
# The display package, however, doesn't know what to do with this image. So lets convert it
# into a PIL object to render it in the browser.
from PIL import Image
# PIL can take an array of data with a given color format and convert this into a PIL object.
# This is perfect for our situation, as the PIL color mode, "L" is just an array of luminance
# values in unsigned integers
image = Image.fromarray(gray, "L")
display(image)
```
```python
# Lets talk a bit more about images for a moment. Numpy arrays are multidimensional. For
# instance, we can define an array in a single dimension:
import numpy as np
single_dim = np.array([25, 50 , 25, 10, 10])
# In an image, this is analagous to a single row of 5 pixels each in grayscale. But actually,
# all imaging libraries tend to expect at least two dimensions, a width and a height, and to
# show a matrix. So if we put the single_dim inside of another array, this would be a two
# dimensional array with element in the height direction, and five in the width direction
double_dim = np.array([single_dim])
double_dim
```
```python
# This should look pretty familiar, it's a lot like a list of lists! Lets see what this new
# two dimensional array looks like if we display it
display(Image.fromarray(double_dim, "L"))
```
```python
# Pretty unexciting - it's just a little line. Five pixels in a row to be exact, of different
# levels of black. The numpy library has a nice attribute called shape that allows us to see how
# many dimensions big an array is. The shape attribute returns a tuple that shows the height of
# the image, by the width of the image
double_dim.shape
```
```python
# Lets take a look at the shape of our initial image which we loaded into the img variable
img.shape
```
```python
# This image has three dimensions! That's because it has a width, a height, and what's called
# a color depth. In this case, the color is represented as an array of three values. Lets take a
# look at the color of the first pixel
first_pixel=img[0][0]
first_pixel
```
```python
# Here we see that the color value is provided in full RGB using an unsigned integer. This
# means that each color can have one of 256 values, and the total number of unique colors
# that can be represented by this data is 256 * 256 *256 which is roughly 16 million colors.
# We call this 24 bit color, which is 8+8+8.
#
# If you find yourself shopping for a television, you might notice that some expensive models
# are advertised as having 10 bit or even 12 bit panels. These are televisions where each of
# the red, green, and blue color channels are represented by 10 or 12 bits instead of 8. For
# ten bit panels this means that there are 1 billion colors capable, and 12 bit panels are
# capable of over 68 billion colors!
```
```python
# We're not going to talk much more about color in this course, but it's a fun subject. Instead,
# lets go back to this array representation of images, because we can do some interesting things
# with this.
#
# One of the most common things to do with an ndarray is to reshape it -- to change the number
# of rows and columns that are represented so that we can do different kinds of operations.
# Here is our original two dimensional image
print("Original image")
print(gray)
# If we wanted to represent that as a one dimensional image, we just call reshape
print("New image")
# And reshape takes the image as the first parameter, and a new shape as the second
image1d=np.reshape(gray,(1,gray.shape[0]*gray.shape[1]))
print(image1d)
```
```python
# So, why are we talking about these nested arrays of bytes, we were supposed to be talking
# about OpenCV as a library. Well, I wanted to show you that often libraries working on the
# same kind of principles, in this case images stored as arrays of bytes, are not representing
# data in the same way in their APIs. But, by exploring a bit you can learn how the internal
# representation of data is stored, and build routines to convert between formats.
#
# For instance, remember in the last lecture when we wanted to look for gaps in an image so
# that we could draw lines to feed into kraken? Well, we use PIL to do this, using getpixel()
# to look at individual pixels and see what the luminosity was, then ImageDraw.rectangle to
# actually fill in a black bar separator. This was a nice high level API, and let us write
# routines to do the work we wanted without having to understand too much about how the images
# were being stored. But it was computationally very slow.
#
# Instead, we could write the code to do this using matrix features within numpy. Lets take
# a look.
import cv2 as cv
# We'll load the 2 column image
img = cv.imread('readonly/two_col.png')
# And we'll convert it to grayscale using the cvtColor image
gray = cv.cvtColor(img, cv.COLOR_BGR2GRAY)
```
```python
# Now, remember how slicing on a list works, if you have a list of number such as
# a=[0,1,2,3,4,5] then a[2:4] will return the sublist of numbers at position 2 through 4
# inclusive - don't forget that lists start indexing at 0!
# If we have a two dimensional array, we can slice out a smaller piece of that using the
# format a[2:4,1:3]. You can think of this as first slicing along the rows dimension, then
# in the columns dimension. So in this example, that would be a matrix of rows 2, and 3,
# and columns 1, and 2. Here's a look at our image.
gray[2:4,1:3]
```
```python
# So we see that it is all white. We can use this as a "window" and move it around our
# our big image.
#
# Finally, the ndarray library has lots of matrix functions which are generally very fast
# to run. One that we want to consider in this case is count_nonzero(), which just returns
# the number of entries in the matrix which are not zero.
np.count_nonzero(gray[2:4,1:3])
```
```python
# Ok, the last benefit of going to this low level approach to images is that we can change
# pixels very fast as well. Previously we were drawing rectangles and setting a fill and line
# width. This is nice if you want to do something like change the color of the fill from the
# line, or draw complex shapes. But we really just want a line here. That's really easy to
# do - we just want to change a number of luminosity values from 255 to 0.
#
# As an example, lets create a big white matrix
white_matrix=np.full((12,12),255,dtype=np.uint8)
display(Image.fromarray(white_matrix,"L"))
white_matrix
```
```python
# looks pretty boring, it's just a giant white square we can't see. But if we want, we can
# easily color a column to be black
white_matrix[:,6]=np.full((1,12),0,dtype=np.uint8)
display(Image.fromarray(white_matrix,"L"))
white_matrix
```
```python
# And that's exactly what we wanted to do. So, why do it this way, when it seems so much
# more low level? Really, the answer is speed. This paradigm of using matricies to store
# and manipulate bytes of data for images is much closer to how low level API and hardware
# developers think about storing files and bytes in memory.
#
# How much faster is it? Well, that's up to you to discover; there's an optional assignment
# for this week to convert our old code over into this new format, to compare both the
# readability and speed of the two different approaches.
```
## OpenCV
```python
# Ok, we're just about at the project for this course. If you reflect on the specialization
# as a whole you'll realize that you started with probably little or no understanding of python,
# progressed through the basic control structures and libraries included with the language
# with the help of a digital textbook, moved on to more high level representations of data
# and functions with objects, and now started to explore third party libraries that exist for
# python which allow you to manipulate and display images. This is quite an achievement!
#
# You have also no doubt found that as you have progressed the demands on you to engage in self-
# discovery have also increased. Where the first assignments were maybe straight forward, the
# ones in this week require you to struggle a bit more with planning and debugging code as
# you develop.
#
# But, you've persisted, and I'd like to share with you just one more set of features before
# we head over to a project. The OpenCV library contains mechanisms to do face detection on
# images. The technique used is based on Haar cascades, which is a machine learning approach.
# Now, we're not going to go into the machine learning bits, we have another specialization on
# Applied Data Science with Python which you can take after this if you're interested in that topic.
# But here we'll treat OpenCV like a black box.
#
# OpenCV comes with trained models for detecting faces, eyes, and smiles which we'll be using.
# You can train models for detecting other things - like hot dogs or flutes - and if you're
# interested in that I'd recommend you check out the Open CV docs on how to train a cascade
# classifier: https://docs.opencv.org/3.4/dc/d88/tutorial_traincascade.html
# However, in this lecture we just want to use the current classifiers and see if we can detect
# portions of an image which are interesting.
#
# First step is to load opencv and the XML-based classifiers
import cv2 as cv
face_cascade = cv.CascadeClassifier('readonly/haarcascade_frontalface_default.xml')
eye_cascade = cv.CascadeClassifier('readonly/haarcascade_eye.xml')
```
```python
# Ok, with the classifiers loaded, we now want to try and detect a face. Lets pull in the
# picture we played with last time
img = cv.imread('readonly/floyd.jpg')
# And we'll convert it to grayscale using the cvtColor image
gray = cv.cvtColor(img, cv.COLOR_BGR2GRAY)
# The next step is to use the face_cascade classifier. I'll let you go explore the docs if you
# would like to, but the norm is to use the detectMultiScale() function. This function returns
# a list of objects as rectangles. The first parameter is an ndarray of the image.
faces = face_cascade.detectMultiScale(gray)
# And lets just print those faces out to the screen
faces
```
```python
faces.tolist()[0]
```
```python
# The resulting rectangles are in the format of (x,y,w,h) where x and y denote the upper
# left hand point for the image and the width and height represent the bounding box. We know
# how to handle this in PIL
from PIL import Image
# Lets create a PIL image object
pil_img=Image.fromarray(gray,mode="L")
# Now lets bring in our drawing object
from PIL import ImageDraw
# And lets create our drawing context
drawing=ImageDraw.Draw(pil_img)
# Now lets pull the rectangle out of the faces object
rec=faces.tolist()[0]
# Now we just draw a rectangle around the bounds
drawing.rectangle(rec, outline="white")
# And display
display(pil_img)
```
```python
# So, not quite what we were looking for. What do you think went wrong?
# Well, a quick double check of the docs and it is apparent that OpenCV is return the coordinates
# as (x,y,w,h), while PIL.ImageDraw is looking for (x1,y1,x2,y2). Looks like an easy fix
# Wipe our old image
pil_img=Image.fromarray(gray,mode="L")
# Setup our drawing context
drawing=ImageDraw.Draw(pil_img)
# And draw the new box
drawing.rectangle((rec[0],rec[1],rec[0]+rec[2],rec[1]+rec[3]), outline="white")
# And display
display(pil_img)
```
```python
# We see the face detection works pretty good on this image! Note that it's apparent that this is
# not head detection, but that the haarcascades file we used is looking for eyes and a mouth.
# Lets try this on something a bit more complex, lets read in our MSI recruitment image
img = cv.imread('readonly/msi_recruitment.gif')
# And lets take a look at that image
display(Image.fromarray(img))
```
```python
# Whoa, what's that error about? It looks like there is an error on a line deep within the PIL
# Image.py file, and it is trying to call an internal private member called __array_interface__
# on the img object, but this object is None
#
# It turns out that the root of this error is that OpenCV can't work with Gif images. This is
# kind of a pain and unfortunate. But we know how to fix that right? One was is that we could
# just open this in PIL and then save it as a png, then open that in open cv.
#
# Lets use PIL to open our image
pil_img=Image.open('readonly/msi_recruitment.gif')
# now lets convert it to greyscale for opencv, and get the bytestream
open_cv_version=pil_img.convert("L")
# now lets just write that to a file
open_cv_version.save("msi_recruitment.png")
```
```python
# Ok, now that the conversion of format is done, lets try reading this back into opencv
cv_img=cv.imread('msi_recruitment.png')
# We don't need to color convert this, because we saved it as grayscale
# lets try and detect faces in that image
faces = face_cascade.detectMultiScale(cv_img)
# Now, we still have our PIL color version in a gif
pil_img=Image.open('readonly/msi_recruitment.gif')
# Set our drawing context
drawing=ImageDraw.Draw(pil_img)
# For each item in faces, lets surround it with a red box
for x,y,w,h in faces:
# That might be new syntax for you! Recall that faces is a list of rectangles in (x,y,w,h)
# format, that is, a list of lists. Instead of having to do an iteration and then manually
# pull out each item, we can use tuple unpacking to pull out individual items in the sublist
# directly to variables. A really nice python feature
#
# Now we just need to draw our box
drawing.rectangle((x,y,x+w,y+h), outline="white")
display(pil_img)
```
```python
# What happened here!? We see that we have detected faces, and that we have drawn boxes
# around those faces on the image, but that the colors have gone all weird! This, it turns
# out, has to do with color limitations for gif images. In short, a gif image has a very
# limited number of colors. This is called a color pallette after the pallette artists
# use to mix paints. For gifs the pallette can only be 256 colors -- but they can be *any*
# 256 colors. When a new color is introduced, is has to take the space of an old color.
# In this case, PIL adds white to the pallette but doesn't know which color to replace and
# thus messes up the image.
#
# Who knew there was so much to learn about image formats? We can see what mode the image
# is in with the .mode attribute
pil_img.mode
```
```python
# We can see a list of modes in the PILLOW documentation, and they correspond with the
# color spaces we have been using. For the moment though, lets change back to RGB, which
# represents color as a three byte tuple instead of in a pallette.
# Lets read in the image
pil_img=Image.open('readonly/msi_recruitment.gif')
# Lets convert it to RGB mode
pil_img = pil_img.convert("RGB")
# And lets print out the mode
pil_img.mode
```
```python
# Ok, now lets go back to drawing rectangles. Lets get our drawing object
drawing=ImageDraw.Draw(pil_img)
# And iterate through the faces sequence, tuple unpacking as we go
for x,y,w,h in faces:
# And remember this is width and height so we have to add those appropriately.
drawing.rectangle((x,y,x+w,y+h), outline="white")
display(pil_img)
```
```python
# Awesome! We managed to detect a bunch of faces in that image. Looks like we have missed
# four faces. In the machine learning world we would call these false negatives - something
# which the machine thought was not a face (so a negative), but that it was incorrect on.
# Consequently, we would call the actual faces that were detected as true positives -
# something that the machine thought was a face and it was correct on. This leaves us with
# false positives - something the machine thought was a face but it wasn't. We see there are
# two of these in the image, picking up shadow patterns or textures in shirts and matching
# them with the haarcascades. Finally, we have true negatives, or the set of all possible
# rectangles the machine learning classifier could consider where it correctly indicated that
# the result was not a face. In this case there are many many true negatives.
```
```python
# There are a few ways we could try and improve this, and really, it requires a lot of
# experimentation to find good values for a given image. First, lets create a function
# which will plot rectanges for us over the image
def show_rects(faces):
#Lets read in our gif and convert it
pil_img=Image.open('readonly/msi_recruitment.gif').convert("RGB")
# Set our drawing context
drawing=ImageDraw.Draw(pil_img)
# And plot all of the rectangles in faces
for x,y,w,h in faces:
drawing.rectangle((x,y,x+w,y+h), outline="white")
#Finally lets display this
display(pil_img)
```
```python
# Ok, first up, we could try and binarize this image. It turns out that opencv has a built in
# binarization function called threshold(). You simply pass in the image, the midpoint, and
# the maximum value, as well as a flag which indicates whether the threshold should be
# binary or something else. Lets try this.
cv_img_bin=cv.threshold(img,120,255,cv.THRESH_BINARY)[1] # returns a list, we want the second value
# Now do the actual face detection
faces = face_cascade.detectMultiScale(cv_img_bin)
# Now lets see the results
show_rects(faces)
```
```python
# That's kind of interesting. Not better, but we do see that there is one false positive
# towards the bottom, where the classifier detected the sunglasses as eyes and the dark shadow
# line below as a mouth.
#
# If you're following in the notebook with this video, why don't you pause things and try a
# few different parameters for the thresholding value?
```
```python
# The detectMultiScale() function from OpenCV also has a couple of parameters. The first of
# these is the scale factor. The scale factor changes the size of rectangles which are
# considered against the model, that is, the haarcascades XML file. You can think of it as if
# it were changing the size of the rectangles which are on the screen.
#
# Lets experiment with the scale factor. Usually it's a small value, lets try 1.05
faces = face_cascade.detectMultiScale(cv_img,1.05)
# Show those results
show_rects(faces)
# Now lets also try 1.15
faces = face_cascade.detectMultiScale(cv_img,1.15)
# Show those results
show_rects(faces)
# Finally lets also try 1.25
faces = face_cascade.detectMultiScale(cv_img,1.25)
# Show those results
show_rects(faces)
```
```python
# We can see that as we change the scale factor we change the number of true and
# false positives and negatives. With the scale set to 1.05, we have 7 true positives,
# which are correctly identified faces, and 3 false negatives, which are faces which
# are there but not detected, and 3 false positives, where are non-faces which
# opencv thinks are faces. When we change this to 1.15 we lose the false positives but
# also lose one of the true positives, the person to the right wearing a hat. And
# when we change this to 1.25 we lost more true positives as well.
#
# This is actually a really interesting phenomena in machine learning and artificial
# intelligence. There is a trade off between not only how accurate a model is, but how
# the inaccuracy actually happens. Which of these three models do you think is best?
```
```python
# Well, the answer to that question is really, "it depends". It depends why you are trying
# to detect faces, and what you are going to do with them. If you think these issues
# are interesting, you might want to check out the Applied Data Science with Python
# specialization Michigan offers on Coursera.
#
# Ok, beyond an opportunity to advertise, did you notice anything else that happened when
# we changed the scale factor? It's subtle, but the speed at which the processing ran
# took longer at smaller scale factors. This is because more subimages are being considered
# for these scales. This could also affect which method we might use.
#
# Jupyter has nice support for timing commands. You might have seen this before, a line
# that starts with a percentage sign in jupyter is called a "magic function". This isn't
# normal python - it's actually a shorthand way of writing a function which Jupyter
# has predefined. It looks a lot like the decorators we talked about in a previous
# lecture, but the magic functions were around long before decorators were part of the
# python language. One of the built-in magic functions in juptyer is called timeit, and this
# repeats a piece of python ten times (by default) and tells you the average speed it
# took to complete.
#
# Lets time the speed of detectmultiscale when using a scale of 1.05
%timeit face_cascade.detectMultiScale(cv_img,1.05)
```
```python
# Ok, now lets compare that to the speed at scale = 1.15
%timeit face_cascade.detectMultiScale(cv_img,1.15)
```
```python
# You can see that this is a dramatic difference, roughly two and a half times slower
# when using the smaller scale!
#
# This wraps up our discussion of detecting faces in opencv. You'll see that, like OCR, this
# is not a foolproof process. But we can build on the work others have done in machine learning
# and leverage powerful libraries to bring us closer to building a turn key python-based
# solution. Remember that the detection mechanism isn't specific to faces, that's just the
# haarcascades training data we used. On the web you'll be able to find other training data
# to detect other objects, including eyes, animals, and so forth.
```
## More Jupyter Widgets
```python
# One of the nice things about using the Jupyter notebook systems is that there is a
# rich set of contributed plugins that seek to extend this system. In this lecture I
# want to introduce you to one such plugin, call ipy web rtc. Webrtc is a fairly new
# protocol for real time communication on the web. Yup, I'm talking about chatting.
# The widget brings this to the Jupyter notebook system. Lets take a look.
#
# First, lets import from this library two different classes which we'll use in a
# demo, one for the camera and one for images.
from ipywebrtc import CameraStream, ImageRecorder
# Then lets take a look at the camera stream object
help(CameraStream)
```
```python
# We see from the docs that it's east to get a camera facing the user, and we can have
# the audio on or off. We don't need audio for this demo, so lets create a new camera
# instance
camera = CameraStream.facing_user(audio=False)
# The next object we want to look at is the ImageRecorder
help(ImageRecorder)
```
```python
# The image recorder lets us actually grab images from the camera stream. There are features
# for downloading and using the image as well. We see that the default format is a png file.
# Lets hook up the ImageRecorder to our stream
image_recorder = ImageRecorder(stream=camera)
# Now, the docs are a little unclear how to use this within Jupyter, but if we call the
# download() function it will actually store the results of the camera which is hooked up
# in image_recorder.image. Lets try it out
# First, lets tell the recorder to start capturing data
image_recorder.recording=True
# Now lets download the image
image_recorder.download()
# Then lets inspect the type of the image
type(image_recorder.image)
```
```python
# Ok, the object that it stores is an ipywidgets.widgets.widget_media.Image. How do we do
# something useful with this? Well, an inspection of the object shows that there is a handy
# value field which actually holds the bytes behind the image. And we know how to display
# those.
# Lets import PIL Image
import PIL.Image
# And lets import io
import io
# And now lets create a PIL image from the bytes
img = PIL.Image.open(io.BytesIO(image_recorder.image.value))
# And render it to the screen
display(img)
```
```python
# Great, you see a picture! Hopefully you are following along in one of the notebooks
# and have been able to try this out for yourself!
#
# What can you do with this? This is a great way to get started with a bit of computer vision.
# You already know how to identify a face in the webcam picture, or try and capture text
# from within the picture. With OpenCV there are any number of other things you can do, simply
# with a webcam, the Jupyter notebooks, and python!
```
|
/* bspline/test.c
*
* Copyright (C) 2006, 2007, 2009 Brian Gough
* Copyright (C) 2008, 2011 Rhys Ulerich
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 3 of the License, or (at
* your option) any later version.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
*/
#include <config.h>
#include <gsl/gsl_test.h>
#include <gsl/gsl_errno.h>
#include <gsl/gsl_bspline.h>
#include <gsl/gsl_ieee_utils.h>
#include <gsl/gsl_nan.h>
void
test_bspline(gsl_bspline_workspace * bw, gsl_bspline_deriv_workspace * dbw)
{
gsl_vector *B;
gsl_matrix *dB;
size_t i, j;
size_t n = 100;
size_t ncoeffs = gsl_bspline_ncoeffs(bw);
size_t order = gsl_bspline_order(bw);
size_t nbreak = gsl_bspline_nbreak(bw);
double a = gsl_bspline_breakpoint(0, bw);
double b = gsl_bspline_breakpoint(nbreak - 1, bw);
B = gsl_vector_alloc(ncoeffs);
dB = gsl_matrix_alloc(ncoeffs, 1);
/* Ensure B-splines form a partition of unity */
for (i = 0; i < n; i++)
{
double xi = a + (b - a) * (i / (n - 1.0));
double sum = 0;
gsl_bspline_eval(xi, B, bw);
for (j = 0; j < ncoeffs; j++)
{
double Bj = gsl_vector_get(B, j);
int s = (Bj < 0 || Bj > 1);
gsl_test(s,
"basis-spline coefficient %u is in range [0,1] for x=%g",
j, xi);
sum += Bj;
}
gsl_test_rel(sum, 1.0, order * GSL_DBL_EPSILON,
"basis-spline order %u is normalized for x=%g", order,
xi);
}
/* Ensure B-splines 0th derivatives agree with regular evaluation */
for (i = 0; i < n; i++)
{
double xi = a + (b - a) * (i / (n - 1.0));
gsl_bspline_eval(xi, B, bw);
gsl_bspline_deriv_eval(xi, 0, dB, bw, dbw);
for (j = 0; j < ncoeffs; j++)
{
gsl_test_abs(gsl_matrix_get(dB, j, 0), gsl_vector_get(B, j),
GSL_DBL_EPSILON,
"b-spline order %d basis #%d evaluation and 0th derivative consistent for x=%g",
order, j, xi);
}
}
gsl_vector_free(B);
gsl_matrix_free(dB);
}
int
main(int argc, char **argv)
{
size_t order, breakpoints, i;
gsl_ieee_env_setup();
argc = 0; /* prevent warnings about unused parameters */
argv = 0;
for (order = 1; order < 10; order++)
{
for (breakpoints = 2; breakpoints < 100; breakpoints++)
{
double a = -1.23 * order, b = 45.6 * order;
gsl_bspline_workspace *bw = gsl_bspline_alloc(order, breakpoints);
gsl_bspline_deriv_workspace *dbw = gsl_bspline_deriv_alloc(order);
gsl_bspline_knots_uniform(a, b, bw);
test_bspline(bw, dbw);
gsl_bspline_deriv_free(dbw);
gsl_bspline_free(bw);
}
}
for (order = 1; order < 10; order++)
{
for (breakpoints = 2; breakpoints < 100; breakpoints++)
{
double a = -1.23 * order, b = 45.6 * order;
gsl_bspline_workspace *bw = gsl_bspline_alloc(order, breakpoints);
gsl_bspline_deriv_workspace *dbw = gsl_bspline_deriv_alloc(order);
gsl_vector *k = gsl_vector_alloc(breakpoints);
for (i = 0; i < breakpoints; i++)
{
double f, x;
f = sqrt(i / (breakpoints - 1.0));
x = (1 - f) * a + f * b;
gsl_vector_set(k, i, x);
};
gsl_bspline_knots(k, bw);
test_bspline(bw, dbw);
gsl_vector_free(k);
gsl_bspline_deriv_free(dbw);
gsl_bspline_free(bw);
}
}
/* Spot check known 0th, 1st, 2nd derivative
evaluations for a particular k = 2 case. */
{
size_t i, j; /* looping */
const double xloc[4] = { 0.0, 1.0, 6.0, 7.0};
const double deriv[4][3] =
{
{ -1.0/2.0, 1.0/2.0, 0.0 },
{ -1.0/2.0, 1.0/2.0, 0.0 },
{ 0.0, -1.0/5.0, 1.0/5.0 },
{ 0.0, -1.0/5.0, 1.0/5.0 }
};
gsl_bspline_workspace *bw = gsl_bspline_alloc(2, 3);
gsl_bspline_deriv_workspace *dbw = gsl_bspline_deriv_alloc(2);
gsl_matrix *dB = gsl_matrix_alloc(gsl_bspline_ncoeffs(bw),
gsl_bspline_order(bw) + 1);
gsl_vector *breakpts = gsl_vector_alloc(3);
gsl_vector_set(breakpts, 0, 0.0);
gsl_vector_set(breakpts, 1, 2.0);
gsl_vector_set(breakpts, 2, 7.0);
gsl_bspline_knots(breakpts, bw);
for (i = 0; i < 4; ++i) /* at each location */
{
/* Initialize dB with poison to ensure we overwrite it */
gsl_matrix_set_all(dB, GSL_NAN);
gsl_bspline_deriv_eval(xloc[i], gsl_bspline_order(bw), dB, bw, dbw);
for (j = 0; j < gsl_bspline_ncoeffs(bw) ; ++j)
{
/* check basis function 1st deriv */
gsl_test_abs(gsl_matrix_get(dB, j, 1), deriv[i][j], GSL_DBL_EPSILON,
"b-spline k=%d basis #%d derivative %d at x = %f",
gsl_bspline_order(bw), j, 1, xloc[i]);
}
for (j = 0; j < gsl_bspline_ncoeffs(bw); ++j)
{
/* check k order basis function has k-th deriv equal to 0 */
gsl_test_abs(gsl_matrix_get(dB, j, gsl_bspline_order(bw)), 0.0,
GSL_DBL_EPSILON,
"b-spline k=%d basis #%d derivative %d at x = %f",
gsl_bspline_order(bw), j, gsl_bspline_order(bw),
xloc[i]);
}
}
gsl_matrix_free(dB);
gsl_bspline_deriv_free(dbw);
gsl_bspline_free(bw);
gsl_vector_free(breakpts);
}
/* Spot check known 0th, 1st, 2nd derivative
evaluations for a particular k = 3 case. */
{
size_t i, j; /* looping */
const double xloc[5] = { 0.0, 5.0, 9.0, 12.0, 15.0 };
const double eval[5][6] =
{
{ 4./25., 69./100., 3./ 20. , 0. , 0. , 0. },
{ 0. , 4./21. , 143./210. , 9./70., 0. , 0. },
{ 0. , 0. , 3./ 10. , 7./10., 0. , 0. },
{ 0. , 0. , 0. , 3./4. , 1./4., 0. },
{ 0. , 0. , 0. , 1./3. , 5./9., 1./9. }
};
const double deriv[5][6] =
{
{ -4./25., 3./50., 1./ 10., 0. , 0. , 0. },
{ 0. , -2./21., 1./105., 3./35., 0. , 0. },
{ 0. , 0. , -1./5. , 1./ 5., 0. , 0. },
{ 0. , 0. , 0. , -1./ 6., 1./6. , 0. },
{ 0. , 0. , 0. , -1./ 9., 1./27., 2./27. }
};
const double deriv2[5][6] =
{
{ 2./25., -17./150., 1.0/30.0 , 0.0 , 0. , 0. },
{ 0. , 1./ 42., -11.0/210.0, 1.0/35.0, 0. , 0. },
{ 0. , 0. , 1.0/15.0 ,-11.0/90.0, 1./18. , 0. },
{ 0. , 0. , 0.0 , 1.0/54.0, -7./162., 2./81. },
{ 0. , 0. , 0.0 , 1.0/54.0, -7./162., 2./81. }
};
gsl_bspline_workspace *bw = gsl_bspline_alloc(3, 5);
gsl_bspline_deriv_workspace *dbw = gsl_bspline_deriv_alloc(3);
gsl_matrix *dB = gsl_matrix_alloc(gsl_bspline_ncoeffs(bw),
gsl_bspline_order(bw) + 1);
gsl_vector *breakpts = gsl_vector_alloc(5);
gsl_vector_set(breakpts, 0, -3.0);
gsl_vector_set(breakpts, 1, 2.0);
gsl_vector_set(breakpts, 2, 9.0);
gsl_vector_set(breakpts, 3, 12.0);
gsl_vector_set(breakpts, 4, 21.0);
gsl_bspline_knots(breakpts, bw);
for (i = 0; i < 5; ++i) /* at each location */
{
/* Initialize dB with poison to ensure we overwrite it */
gsl_matrix_set_all(dB, GSL_NAN);
gsl_bspline_deriv_eval(xloc[i], gsl_bspline_order(bw), dB, bw, dbw);
/* check basis function evaluation */
for (j = 0; j < gsl_bspline_ncoeffs(bw); ++j)
{
gsl_test_abs(gsl_matrix_get(dB, j, 0), eval[i][j], GSL_DBL_EPSILON,
"b-spline k=%d basis #%d derivative %d at x = %f",
gsl_bspline_order(bw), j, 0, xloc[i]);
}
/* check 1st derivative evaluation */
for (j = 0; j < gsl_bspline_ncoeffs(bw); ++j)
{
gsl_test_abs(gsl_matrix_get(dB, j, 1), deriv[i][j], GSL_DBL_EPSILON,
"b-spline k=%d basis #%d derivative %d at x = %f",
gsl_bspline_order(bw), j, 1, xloc[i]);
}
/* check 2nd derivative evaluation */
for (j = 0; j < gsl_bspline_ncoeffs(bw); ++j)
{
gsl_test_abs(gsl_matrix_get(dB, j, 2), deriv2[i][j], GSL_DBL_EPSILON,
"b-spline k=%d basis #%d derivative %d at x = %f",
gsl_bspline_order(bw), j, 2, xloc[i]);
}
}
gsl_matrix_free(dB);
gsl_bspline_deriv_free(dbw);
gsl_bspline_free(bw);
gsl_vector_free(breakpts);
}
/* Check Greville abscissae functionality on a non-uniform k=1 */
{
size_t i; /* looping */
/* Test parameters */
const size_t k = 1;
const double bpoint_data[] = { 0.0, 0.2, 0.5, 0.75, 1.0 };
const size_t nbreak = sizeof(bpoint_data)/sizeof(bpoint_data[0]);
/* Expected results */
const double abscissae_data[] = { 0.1, 0.35, 0.625, 0.875 };
const size_t nabscissae = sizeof(abscissae_data)/sizeof(abscissae_data[0]);
gsl_vector_const_view bpoints = gsl_vector_const_view_array(bpoint_data, nbreak);
gsl_bspline_workspace *w = gsl_bspline_alloc(k, nbreak);
gsl_bspline_knots((const gsl_vector *) &bpoints, w);
gsl_test_int(nabscissae, gsl_bspline_ncoeffs(w),
"b-spline k=%d number of abscissae", k);
for (i = 0; i < nabscissae; ++i)
{
gsl_test_abs(gsl_bspline_greville_abscissa(i, w), abscissae_data[i], 2*k*GSL_DBL_EPSILON,
"b-spline k=%d Greville abscissa #%d at x = %f", k, i, abscissae_data[i]);
}
gsl_bspline_free(w);
}
/* Check Greville abscissae functionality on a non-uniform k=2 */
{
size_t i; /* looping */
/* Test parameters */
const size_t k = 2;
const double bpoint_data[] = { 0.0, 0.2, 0.5, 0.75, 1.0 };
const size_t nbreak = sizeof(bpoint_data)/sizeof(bpoint_data[0]);
/* Expected results */
const double abscissae_data[] = { 0.0, 0.2, 0.5, 0.75, 1.0 };
const size_t nabscissae = sizeof(abscissae_data)/sizeof(abscissae_data[0]);
gsl_vector_const_view bpoints = gsl_vector_const_view_array(bpoint_data, nbreak);
gsl_bspline_workspace *w = gsl_bspline_alloc(k, nbreak);
gsl_bspline_knots((const gsl_vector *) &bpoints, w);
gsl_test_int(nabscissae, gsl_bspline_ncoeffs(w),
"b-spline k=%d number of abscissae", k);
for (i = 0; i < nabscissae; ++i)
{
gsl_test_abs(gsl_bspline_greville_abscissa(i, w), abscissae_data[i], 2*k*GSL_DBL_EPSILON,
"b-spline k=%d Greville abscissa #%d at x = %f", k, i, abscissae_data[i]);
}
gsl_bspline_free(w);
}
/* Check Greville abscissae functionality on non-uniform k=3 */
{
size_t i; /* looping */
/* Test parameters */
const size_t k = 3;
const double bpoint_data[] = { 0.0, 0.2, 0.5, 0.75, 1.0 };
const size_t nbreak = sizeof(bpoint_data)/sizeof(bpoint_data[0]);
/* Expected results */
const double abscissae_data[] = { 0.0, 1.0/10.0, 7.0/20.0,
5.0/ 8.0, 7.0/ 8.0, 1.0 };
const size_t nabscissae = sizeof(abscissae_data)/sizeof(abscissae_data[0]);
gsl_vector_const_view bpoints = gsl_vector_const_view_array(bpoint_data, nbreak);
gsl_bspline_workspace *w = gsl_bspline_alloc(k, nbreak);
gsl_bspline_knots((const gsl_vector *) &bpoints, w);
gsl_test_int(nabscissae, gsl_bspline_ncoeffs(w),
"b-spline k=%d number of abscissae", k);
for (i = 0; i < nabscissae; ++i)
{
gsl_test_abs(gsl_bspline_greville_abscissa(i, w), abscissae_data[i], 2*k*GSL_DBL_EPSILON,
"b-spline k=%d Greville abscissa #%d at x = %f", k, i, abscissae_data[i]);
}
gsl_bspline_free(w);
}
/* Check Greville abscissae functionality on non-uniform k=4 */
{
size_t i; /* looping */
/* Test parameters */
const size_t k = 4;
const double bpoint_data[] = { 0.0, 0.2, 0.5, 0.75, 1.0 };
const size_t nbreak = sizeof(bpoint_data)/sizeof(bpoint_data[0]);
/* Expected results */
const double abscissae_data[] = { 0.0, 1.0/15.0, 7.0/30.0, 29.0/60.0,
3.0/ 4.0, 11.0/12.0, 1.0 };
const size_t nabscissae = sizeof(abscissae_data)/sizeof(abscissae_data[0]);
gsl_vector_const_view bpoints = gsl_vector_const_view_array(bpoint_data, nbreak);
gsl_bspline_workspace *w = gsl_bspline_alloc(k, nbreak);
gsl_bspline_knots((const gsl_vector *) &bpoints, w);
gsl_test_int(nabscissae, gsl_bspline_ncoeffs(w),
"b-spline k=%d number of abscissae", k);
for (i = 0; i < nabscissae; ++i)
{
gsl_test_abs(gsl_bspline_greville_abscissa(i, w), abscissae_data[i], 2*k*GSL_DBL_EPSILON,
"b-spline k=%d Greville abscissa #%d at x = %f", k, i, abscissae_data[i]);
}
gsl_bspline_free(w);
}
/* Knots computed from prescribed Greville abscissae for k = 4 */
{
size_t i; /* looping */
/* Test parameters */
const size_t k = 4;
const double abscissae_data[] = { 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0 };
const size_t nabscissae = sizeof(abscissae_data)/sizeof(abscissae_data[0]);
/* Expected results */
const double bpoint_data[] = { 1.0, 4.0, 4.0, 4.0, 7.0 };
const size_t nbreak = sizeof(bpoint_data)/sizeof(bpoint_data[0]);
/* Compute knots from Greville abscissae */
double abserr;
gsl_vector_const_view abscissae
= gsl_vector_const_view_array(abscissae_data, nabscissae);
gsl_bspline_workspace *w = gsl_bspline_alloc(k, nbreak);
gsl_bspline_knots_greville(&abscissae.vector, w, &abserr);
for (i = 0; i < nbreak; ++i)
{
gsl_test_abs(gsl_bspline_breakpoint(i,w), bpoint_data[i], GSL_DBL_EPSILON*50,
"b-spline k=%d knots_greville breakpoint #%d", k, i);
}
gsl_test_abs(abserr, 0.0, GSL_DBL_EPSILON*15,
"b-spline k=%d nbreak=%d knots_greville abserr", k, nbreak);
gsl_bspline_free(w);
}
/* Knots computed from prescribed Greville abscissae for k = 8 */
{
size_t i; /* looping */
/* Test parameters */
const size_t k = 8;
const double abscissae_data[] = { 1.0, 10.0/7, 13.0/7, 16.0/7, 22.0/7,
4.0, 34.0/7, 40.0/7, 43.0/7, 46.0/7, 7.0 };
const size_t nabscissae = sizeof(abscissae_data)/sizeof(abscissae_data[0]);
/* Expected results */
const double bpoint_data[] = { 1.0, 4.0, 4.0, 4.0, 7.0 };
const size_t nbreak = sizeof(bpoint_data)/sizeof(bpoint_data[0]);
/* Compute knots from Greville abscissae */
double abserr;
gsl_vector_const_view abscissae
= gsl_vector_const_view_array(abscissae_data, nabscissae);
gsl_bspline_workspace *w = gsl_bspline_alloc(k, nbreak);
gsl_bspline_knots_greville(&abscissae.vector, w, &abserr);
for (i = 0; i < nbreak; ++i)
{
gsl_test_abs(gsl_bspline_breakpoint(i,w), bpoint_data[i], GSL_DBL_EPSILON*50,
"b-spline k=%d knots_greville breakpoint #%d", k, i);
}
gsl_test_abs(abserr, 0.0, GSL_DBL_EPSILON*15,
"b-spline k=%d nbreak=%d knots_greville abserr", k, nbreak);
gsl_bspline_free(w);
}
/* Knots computed from prescribed Greville abscissae for k = 2 */
/* Not an interesting calculation but checks the k = 2 edge case */
{
size_t i; /* looping */
/* Test parameters */
const size_t k = 2;
const double abscissae_data[] = { 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0 };
const size_t nabscissae = sizeof(abscissae_data)/sizeof(abscissae_data[0]);
/* Expected results */
const double bpoint_data[] = { 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0 };
const size_t nbreak = sizeof(bpoint_data)/sizeof(bpoint_data[0]);
/* Compute knots from Greville abscissae */
double abserr;
gsl_vector_const_view abscissae
= gsl_vector_const_view_array(abscissae_data, nabscissae);
gsl_bspline_workspace *w = gsl_bspline_alloc(k, nbreak);
gsl_bspline_knots_greville(&abscissae.vector, w, &abserr);
for (i = 0; i < nbreak; ++i)
{
gsl_test_abs(gsl_bspline_breakpoint(i,w), bpoint_data[i], GSL_DBL_EPSILON,
"b-spline k=%d knots_greville breakpoint #%d", k, i);
}
gsl_test_abs(abserr, 0.0, GSL_DBL_EPSILON,
"b-spline k=%d nbreak=%d knots_greville abserr", k, nbreak);
gsl_bspline_free(w);
}
/* Knots computed from prescribed abscissae for edge case when nbreak = 2 */
{
size_t i; /* looping */
/* Test parameters */
const size_t k = 4;
double abscissae_data[] = { 1.0, 3.0, 5.0, 7.0 };
const size_t nabscissae = sizeof(abscissae_data)/sizeof(abscissae_data[0]);
/* Expected results */
const double bpoint_data[] = { 1.0, 7.0 };
const size_t nbreak = sizeof(bpoint_data)/sizeof(bpoint_data[0]);
/* Compute knots from Greville abscissae where abscissae are recoverable */
double abserr;
gsl_vector_view abscissae
= gsl_vector_view_array(abscissae_data, nabscissae);
gsl_bspline_workspace *w = gsl_bspline_alloc(k, nbreak);
gsl_bspline_knots_greville(&abscissae.vector, w, &abserr);
/* Check recovery of breakpoints and abscissae */
for (i = 0; i < nbreak; ++i)
{
gsl_test_abs(gsl_bspline_breakpoint(i,w), bpoint_data[i], GSL_DBL_EPSILON,
"b-spline k=%d knots_greville breakpoint #%d", k, i);
}
gsl_test_abs(abserr, 0.0, GSL_DBL_EPSILON,
"b-spline k=%d nbreak=%d knots_greville abserr", k, nbreak);
/* Modify interior abscissae so they cannot be recovered with nbreak = 2 */
/* Then recompute breakpoints and check that abserr is as expected */
abscissae_data[1] -= 1;
abscissae_data[2] += 1;
gsl_bspline_knots_greville(&abscissae.vector, w, &abserr);
for (i = 0; i < nbreak; ++i)
{
gsl_test_abs(gsl_bspline_breakpoint(i,w), bpoint_data[i], GSL_DBL_EPSILON,
"b-spline k=%d knots_greville breakpoint #%d", k, i);
}
gsl_test_abs(abserr, /* deliberate error */ 2.0, GSL_DBL_EPSILON,
"b-spline k=%d nbreak=%d knots_greville abserr large", k, nbreak);
gsl_bspline_free(w);
}
exit(gsl_test_summary());
}
|
module Lam where
f
: {A : Set}
→ A
→ A
f x
= (λ y z → z) x x
g
: {A : Set}
→ A
→ A
g {A = A} x
= (λ (y' : A) (z : A) → z) x x
|
module Algebra.LabelledGraph where
open import Algebra.Dioid
-- Core graph construction primitives
data LabelledGraph {D eq} (d : Dioid D eq) (A : Set) : Set where
ε : LabelledGraph d A -- Empty graph
v : A -> LabelledGraph d A -- Graph comprising a single vertex
_[_]>_ : LabelledGraph d A -> D -> LabelledGraph d A -> LabelledGraph d A
-- Connect two graphs
_+_ : ∀ {A D eq} {d : Dioid D eq} -> LabelledGraph d A -> LabelledGraph d A -> LabelledGraph d A
_+_ {_} {_} {_} {d} g h = g [ Dioid.zero d ]> h
_*_ : ∀ {A D eq} {d : Dioid D eq} -> LabelledGraph d A -> LabelledGraph d A -> LabelledGraph d A
_*_ {_} {_} {_} {d} g h = g [ Dioid.one d ]> h
infixl 4 _≡_
infixl 8 _+_
infixl 9 _*_
infixl 9 _[_]>_
infix 10 _⊆_
-- Equational theory of graphs
data _≡_ {A D eq} {d : Dioid D eq} : (x y : LabelledGraph d A) -> Set where
-- Equivalence relation
reflexivity : ∀ {x : LabelledGraph d A} -> x ≡ x
symmetry : ∀ {x y : LabelledGraph d A} -> x ≡ y -> y ≡ x
transitivity : ∀ {x y z : LabelledGraph d A} -> x ≡ y -> y ≡ z -> x ≡ z
-- Congruence
left-congruence : ∀ {x y z : LabelledGraph d A} {r : D} -> x ≡ y -> x [ r ]> z ≡ y [ r ]> z
right-congruence : ∀ {x y z : LabelledGraph d A} {r : D} -> x ≡ y -> z [ r ]> x ≡ z [ r ]> y
dioid-congruence : ∀ {x y : LabelledGraph d A} {r s : D} -> eq r s -> x [ r ]> y ≡ x [ s ]> y
-- Axioms
zero-commutativity : ∀ {x y : LabelledGraph d A} -> x + y ≡ y + x
left-identity : ∀ {x : LabelledGraph d A} {r : D} -> ε [ r ]> x ≡ x
right-identity : ∀ {x : LabelledGraph d A} {r : D} -> x [ r ]> ε ≡ x
left-decomposition : ∀ {x y z : LabelledGraph d A} {r s : D} -> x [ r ]> (y [ s ]> z) ≡ (x [ r ]> y) + (x [ r ]> z) + (y [ s ]> z)
right-decomposition : ∀ {x y z : LabelledGraph d A} {r s : D} -> (x [ r ]> y) [ s ]> z ≡ (x [ r ]> y) + (x [ s ]> z) + (y [ s ]> z)
label-addition : ∀ {x y : LabelledGraph d A} {r s : D} -> (x [ r ]> y) + (x [ s ]> y) ≡ (x [ Dioid.plus d r s ]> y)
-- Subgraph relation
_⊆_ : ∀ {A D eq} {d : Dioid D eq} -> LabelledGraph d A -> LabelledGraph d A -> Set
x ⊆ y = x + y ≡ y
|
import numpy as np
import pytest
from abmarl.sim.predator_prey import PredatorPreySimulation, Predator, Prey
from abmarl.managers import AllStepManager
def test_turn_based_predator_prey_distance():
np.random.seed(24)
predators = [Predator(id=f'predator{i}', attack=1) for i in range(2)]
prey = [Prey(id=f'prey{i}') for i in range(7)]
agents = predators + prey
sim_config = {
'region': 6,
'observation_mode': PredatorPreySimulation.ObservationMode.DISTANCE,
'agents': agents,
}
sim = PredatorPreySimulation.build(sim_config)
sim = AllStepManager(sim)
# Little hackish here because I have to explicitly set their values
obs = sim.reset()
sim.agents['predator0'].position = np.array([2, 3])
sim.agents['predator1'].position = np.array([0, 1])
sim.agents['prey0'].position = np.array([1, 1])
sim.agents['prey1'].position = np.array([4, 3])
sim.agents['prey2'].position = np.array([4, 3])
sim.agents['prey3'].position = np.array([2, 3])
sim.agents['prey4'].position = np.array([3, 3])
sim.agents['prey5'].position = np.array([3, 1])
sim.agents['prey6'].position = np.array([2, 1])
obs = {agent_id: sim.sim.get_obs(agent_id) for agent_id in sim.agents}
np.testing.assert_array_equal(obs['predator0']['predator1'], np.array([-2, -2, 2]))
np.testing.assert_array_equal(obs['predator0']['prey0'], np.array([-1, -2, 1]))
np.testing.assert_array_equal(obs['predator0']['prey1'], np.array([2, 0, 1]))
np.testing.assert_array_equal(obs['predator0']['prey2'], np.array([2, 0, 1]))
np.testing.assert_array_equal(obs['predator0']['prey3'], np.array([0, 0, 1]))
np.testing.assert_array_equal(obs['predator0']['prey4'], np.array([1, 0, 1]))
np.testing.assert_array_equal(obs['predator0']['prey5'], np.array([1, -2, 1]))
np.testing.assert_array_equal(obs['predator0']['prey6'], np.array([0, -2, 1]))
np.testing.assert_array_equal(obs['predator1']['predator0'], np.array([2, 2, 2]))
np.testing.assert_array_equal(obs['predator1']['prey0'], np.array([1, 0, 1]))
np.testing.assert_array_equal(obs['predator1']['prey1'], np.array([4, 2, 1]))
np.testing.assert_array_equal(obs['predator1']['prey2'], np.array([4, 2, 1]))
np.testing.assert_array_equal(obs['predator1']['prey3'], np.array([2, 2, 1]))
np.testing.assert_array_equal(obs['predator1']['prey4'], np.array([3, 2, 1]))
np.testing.assert_array_equal(obs['predator1']['prey5'], np.array([3, 0, 1]))
np.testing.assert_array_equal(obs['predator1']['prey6'], np.array([2, 0, 1]))
np.testing.assert_array_equal(obs['prey0']['predator0'], np.array([1, 2, 2]))
np.testing.assert_array_equal(obs['prey0']['predator1'], np.array([-1, 0, 2]))
np.testing.assert_array_equal(obs['prey0']['prey1'], np.array([3, 2, 1]))
np.testing.assert_array_equal(obs['prey0']['prey2'], np.array([3, 2, 1]))
np.testing.assert_array_equal(obs['prey0']['prey3'], np.array([1, 2, 1]))
np.testing.assert_array_equal(obs['prey0']['prey4'], np.array([2, 2, 1]))
np.testing.assert_array_equal(obs['prey0']['prey5'], np.array([2, 0, 1]))
np.testing.assert_array_equal(obs['prey0']['prey6'], np.array([1, 0, 1]))
np.testing.assert_array_equal(obs['prey1']['predator0'], np.array([-2, 0, 2]))
np.testing.assert_array_equal(obs['prey1']['predator1'], np.array([-4, -2, 2]))
np.testing.assert_array_equal(obs['prey1']['prey0'], np.array([-3, -2, 1]))
np.testing.assert_array_equal(obs['prey1']['prey2'], np.array([0, 0, 1]))
np.testing.assert_array_equal(obs['prey1']['prey3'], np.array([-2, 0, 1]))
np.testing.assert_array_equal(obs['prey1']['prey4'], np.array([-1, 0, 1]))
np.testing.assert_array_equal(obs['prey1']['prey5'], np.array([-1, -2, 1]))
np.testing.assert_array_equal(obs['prey1']['prey6'], np.array([-2, -2, 1]))
np.testing.assert_array_equal(obs['prey2']['predator0'], np.array([-2, 0, 2]))
np.testing.assert_array_equal(obs['prey2']['predator1'], np.array([-4, -2, 2]))
np.testing.assert_array_equal(obs['prey2']['prey0'], np.array([-3, -2, 1]))
np.testing.assert_array_equal(obs['prey2']['prey1'], np.array([0, 0, 1]))
np.testing.assert_array_equal(obs['prey2']['prey3'], np.array([-2, 0, 1]))
np.testing.assert_array_equal(obs['prey2']['prey4'], np.array([-1, 0, 1]))
np.testing.assert_array_equal(obs['prey2']['prey5'], np.array([-1, -2, 1]))
np.testing.assert_array_equal(obs['prey2']['prey6'], np.array([-2, -2, 1]))
np.testing.assert_array_equal(obs['prey3']['predator0'], np.array([0, 0, 2]))
np.testing.assert_array_equal(obs['prey3']['predator1'], np.array([-2, -2, 2]))
np.testing.assert_array_equal(obs['prey3']['prey0'], np.array([-1, -2, 1]))
np.testing.assert_array_equal(obs['prey3']['prey1'], np.array([2, 0, 1]))
np.testing.assert_array_equal(obs['prey3']['prey2'], np.array([2, 0, 1]))
np.testing.assert_array_equal(obs['prey3']['prey4'], np.array([1, 0, 1]))
np.testing.assert_array_equal(obs['prey3']['prey5'], np.array([1, -2, 1]))
np.testing.assert_array_equal(obs['prey3']['prey6'], np.array([0, -2, 1]))
np.testing.assert_array_equal(obs['prey4']['predator0'], np.array([-1, 0, 2]))
np.testing.assert_array_equal(obs['prey4']['predator1'], np.array([-3, -2, 2]))
np.testing.assert_array_equal(obs['prey4']['prey0'], np.array([-2, -2, 1]))
np.testing.assert_array_equal(obs['prey4']['prey1'], np.array([1, 0, 1]))
np.testing.assert_array_equal(obs['prey4']['prey2'], np.array([1, 0, 1]))
np.testing.assert_array_equal(obs['prey4']['prey3'], np.array([-1, 0, 1]))
np.testing.assert_array_equal(obs['prey4']['prey5'], np.array([0, -2, 1]))
np.testing.assert_array_equal(obs['prey4']['prey6'], np.array([-1, -2, 1]))
np.testing.assert_array_equal(obs['prey5']['predator0'], np.array([-1, 2, 2]))
np.testing.assert_array_equal(obs['prey5']['predator1'], np.array([-3, 0, 2]))
np.testing.assert_array_equal(obs['prey5']['prey0'], np.array([-2, 0, 1]))
np.testing.assert_array_equal(obs['prey5']['prey1'], np.array([1, 2, 1]))
np.testing.assert_array_equal(obs['prey5']['prey2'], np.array([1, 2, 1]))
np.testing.assert_array_equal(obs['prey5']['prey3'], np.array([-1, 2, 1]))
np.testing.assert_array_equal(obs['prey5']['prey4'], np.array([0, 2, 1]))
np.testing.assert_array_equal(obs['prey5']['prey6'], np.array([-1, 0, 1]))
np.testing.assert_array_equal(obs['prey6']['predator0'], np.array([0, 2, 2]))
np.testing.assert_array_equal(obs['prey6']['predator1'], np.array([-2, 0, 2]))
np.testing.assert_array_equal(obs['prey6']['prey0'], np.array([-1, 0, 1]))
np.testing.assert_array_equal(obs['prey6']['prey1'], np.array([2, 2, 1]))
np.testing.assert_array_equal(obs['prey6']['prey2'], np.array([2, 2, 1]))
np.testing.assert_array_equal(obs['prey6']['prey3'], np.array([0, 2, 1]))
np.testing.assert_array_equal(obs['prey6']['prey4'], np.array([1, 2, 1]))
np.testing.assert_array_equal(obs['prey6']['prey5'], np.array([1, 0, 1]))
obs, reward, done, info = sim.step({
'predator0': {'attack': 1, 'move': np.array([0, 0])},
'predator1': {'attack': 1, 'move': np.array([0, 0])},
'prey0': np.array([-1, 1]),
'prey1': np.array([0, -1]),
'prey2': np.array([1, 1]),
'prey3': np.array([1, -1]),
'prey4': np.array([-1, 1]),
'prey5': np.array([1, 1]),
'prey6': np.array([0, 0]),
})
np.testing.assert_array_equal(obs['predator0']['predator1'], np.array([-2, -2, 2]))
np.testing.assert_array_equal(obs['predator0']['prey0'], np.array([0, 0, 0]))
np.testing.assert_array_equal(obs['predator0']['prey1'], np.array([2, -1, 1]))
np.testing.assert_array_equal(obs['predator0']['prey2'], np.array([3, 1, 1]))
np.testing.assert_array_equal(obs['predator0']['prey3'], np.array([0, 0, 0]))
np.testing.assert_array_equal(obs['predator0']['prey4'], np.array([0, 1, 1]))
np.testing.assert_array_equal(obs['predator0']['prey5'], np.array([2, -1, 1]))
np.testing.assert_array_equal(obs['predator0']['prey6'], np.array([0, -2, 1]))
np.testing.assert_array_equal(obs['predator1']['predator0'], np.array([2, 2, 2]))
np.testing.assert_array_equal(obs['predator1']['prey0'], np.array([0, 0, 0]))
np.testing.assert_array_equal(obs['predator1']['prey1'], np.array([4, 1, 1]))
np.testing.assert_array_equal(obs['predator1']['prey2'], np.array([5, 3, 1]))
np.testing.assert_array_equal(obs['predator1']['prey3'], np.array([0, 0, 0]))
np.testing.assert_array_equal(obs['predator1']['prey4'], np.array([2, 3, 1]))
np.testing.assert_array_equal(obs['predator1']['prey5'], np.array([4, 1, 1]))
np.testing.assert_array_equal(obs['predator1']['prey6'], np.array([2, 0, 1]))
np.testing.assert_array_equal(obs['prey0']['predator0'], np.array([1, 2, 2]))
np.testing.assert_array_equal(obs['prey0']['predator1'], np.array([-1, 0, 2]))
np.testing.assert_array_equal(obs['prey0']['prey1'], np.array([3, 1, 1]))
np.testing.assert_array_equal(obs['prey0']['prey2'], np.array([4, 3, 1]))
np.testing.assert_array_equal(obs['prey0']['prey3'], np.array([0, 0, 0]))
np.testing.assert_array_equal(obs['prey0']['prey4'], np.array([1, 3, 1]))
np.testing.assert_array_equal(obs['prey0']['prey5'], np.array([3, 1, 1]))
np.testing.assert_array_equal(obs['prey0']['prey6'], np.array([1, 0, 1]))
np.testing.assert_array_equal(obs['prey1']['predator0'], np.array([-2, 1, 2]))
np.testing.assert_array_equal(obs['prey1']['predator1'], np.array([-4, -1, 2]))
np.testing.assert_array_equal(obs['prey1']['prey0'], np.array([0, 0, 0]))
np.testing.assert_array_equal(obs['prey1']['prey2'], np.array([1, 2, 1]))
np.testing.assert_array_equal(obs['prey1']['prey3'], np.array([0, 0, 0]))
np.testing.assert_array_equal(obs['prey1']['prey4'], np.array([-2, 2, 1]))
np.testing.assert_array_equal(obs['prey1']['prey5'], np.array([0, 0, 1]))
np.testing.assert_array_equal(obs['prey1']['prey6'], np.array([-2, -1, 1]))
np.testing.assert_array_equal(obs['prey2']['predator0'], np.array([-3, -1, 2]))
np.testing.assert_array_equal(obs['prey2']['predator1'], np.array([-5, -3, 2]))
np.testing.assert_array_equal(obs['prey2']['prey0'], np.array([0, 0, 0]))
np.testing.assert_array_equal(obs['prey2']['prey1'], np.array([-1, -2, 1]))
np.testing.assert_array_equal(obs['prey2']['prey3'], np.array([0, 0, 0]))
np.testing.assert_array_equal(obs['prey2']['prey4'], np.array([-3, 0, 1]))
np.testing.assert_array_equal(obs['prey2']['prey5'], np.array([-1, -2, 1]))
np.testing.assert_array_equal(obs['prey2']['prey6'], np.array([-3, -3, 1]))
np.testing.assert_array_equal(obs['prey3']['predator0'], np.array([0, 0, 2]))
np.testing.assert_array_equal(obs['prey3']['predator1'], np.array([-2, -2, 2]))
np.testing.assert_array_equal(obs['prey3']['prey0'], np.array([0, 0, 0]))
np.testing.assert_array_equal(obs['prey3']['prey1'], np.array([2, -1, 1]))
np.testing.assert_array_equal(obs['prey3']['prey2'], np.array([3, 1, 1]))
np.testing.assert_array_equal(obs['prey3']['prey4'], np.array([0, 1, 1]))
np.testing.assert_array_equal(obs['prey3']['prey5'], np.array([2, -1, 1]))
np.testing.assert_array_equal(obs['prey3']['prey6'], np.array([0, -2, 1]))
np.testing.assert_array_equal(obs['prey4']['predator0'], np.array([0, -1, 2]))
np.testing.assert_array_equal(obs['prey4']['predator1'], np.array([-2, -3, 2]))
np.testing.assert_array_equal(obs['prey4']['prey0'], np.array([0, 0, 0]))
np.testing.assert_array_equal(obs['prey4']['prey1'], np.array([2, -2, 1]))
np.testing.assert_array_equal(obs['prey4']['prey2'], np.array([3, 0, 1]))
np.testing.assert_array_equal(obs['prey4']['prey3'], np.array([0, 0, 0]))
np.testing.assert_array_equal(obs['prey4']['prey5'], np.array([2, -2, 1]))
np.testing.assert_array_equal(obs['prey4']['prey6'], np.array([0, -3, 1]))
np.testing.assert_array_equal(obs['prey5']['predator0'], np.array([-2, 1, 2]))
np.testing.assert_array_equal(obs['prey5']['predator1'], np.array([-4, -1, 2]))
np.testing.assert_array_equal(obs['prey5']['prey0'], np.array([0, 0, 0]))
np.testing.assert_array_equal(obs['prey5']['prey1'], np.array([0, 0, 1]))
np.testing.assert_array_equal(obs['prey5']['prey2'], np.array([1, 2, 1]))
np.testing.assert_array_equal(obs['prey5']['prey3'], np.array([0, 0, 0]))
np.testing.assert_array_equal(obs['prey5']['prey4'], np.array([-2, 2, 1]))
np.testing.assert_array_equal(obs['prey5']['prey6'], np.array([-2, -1, 1]))
np.testing.assert_array_equal(obs['prey6']['predator0'], np.array([0, 2, 2]))
np.testing.assert_array_equal(obs['prey6']['predator1'], np.array([-2, 0, 2]))
np.testing.assert_array_equal(obs['prey6']['prey0'], np.array([0, 0, 0]))
np.testing.assert_array_equal(obs['prey6']['prey1'], np.array([2, 1, 1]))
np.testing.assert_array_equal(obs['prey6']['prey2'], np.array([3, 3, 1]))
np.testing.assert_array_equal(obs['prey6']['prey3'], np.array([0, 0, 0]))
np.testing.assert_array_equal(obs['prey6']['prey4'], np.array([0, 3, 1]))
np.testing.assert_array_equal(obs['prey6']['prey5'], np.array([2, 1, 1]))
assert reward == {
'predator0': 36,
'predator1': 36,
'prey0': -36,
'prey1': -1,
'prey2': -1,
'prey3': -36,
'prey4': -1,
'prey5': -1,
'prey6': 0,
}
assert done == {
'predator0': False,
'predator1': False,
'prey0': True,
'prey1': False,
'prey2': False,
'prey3': True,
'prey4': False,
'prey5': False,
'prey6': False,
'__all__': False}
with pytest.raises(AssertionError):
obs, reward, done, info = sim.step({
'predator0': {'attack': 1, 'move': np.array([0, 0])},
'predator1': {'attack': 1, 'move': np.array([0, 0])},
'prey0': np.array([-1, 1]),
'prey1': np.array([0, -1]),
'prey2': np.array([1, 1]),
'prey3': np.array([1, -1]),
'prey4': np.array([-1, 1]),
'prey5': np.array([1, 1]),
'prey6': np.array([0, 0]),
})
obs, reward, done, info = sim.step({
'predator0': {'attack': 1, 'move': np.array([0, 0])},
'predator1': {'attack': 0, 'move': np.array([1, 0])},
'prey1': np.array([-1, -1]),
'prey2': np.array([-1, 0]),
'prey4': np.array([-1, 0]),
'prey5': np.array([-1, 0]),
'prey6': np.array([0, -1]),
})
np.testing.assert_array_equal(obs['predator0']['predator1'], np.array([-1, -2, 2]))
np.testing.assert_array_equal(obs['predator0']['prey0'], np.array([0, 0, 0]))
np.testing.assert_array_equal(obs['predator0']['prey1'], np.array([1, -2, 1]))
np.testing.assert_array_equal(obs['predator0']['prey2'], np.array([2, 1, 1]))
np.testing.assert_array_equal(obs['predator0']['prey3'], np.array([0, 0, 0]))
np.testing.assert_array_equal(obs['predator0']['prey4'], np.array([0, 0, 0]))
np.testing.assert_array_equal(obs['predator0']['prey5'], np.array([1, -1, 1]))
np.testing.assert_array_equal(obs['predator0']['prey6'], np.array([0, -3, 1]))
np.testing.assert_array_equal(obs['predator1']['predator0'], np.array([1, 2, 2]))
np.testing.assert_array_equal(obs['predator1']['prey0'], np.array([0, 0, 0]))
np.testing.assert_array_equal(obs['predator1']['prey1'], np.array([2, 0, 1]))
np.testing.assert_array_equal(obs['predator1']['prey2'], np.array([3, 3, 1]))
np.testing.assert_array_equal(obs['predator1']['prey3'], np.array([0, 0, 0]))
np.testing.assert_array_equal(obs['predator1']['prey4'], np.array([0, 0, 0]))
np.testing.assert_array_equal(obs['predator1']['prey5'], np.array([2, 1, 1]))
np.testing.assert_array_equal(obs['predator1']['prey6'], np.array([1, -1, 1]))
np.testing.assert_array_equal(obs['prey1']['predator0'], np.array([-1, 2, 2]))
np.testing.assert_array_equal(obs['prey1']['predator1'], np.array([-2, 0, 2]))
np.testing.assert_array_equal(obs['prey1']['prey0'], np.array([0, 0, 0]))
np.testing.assert_array_equal(obs['prey1']['prey2'], np.array([1, 3, 1]))
np.testing.assert_array_equal(obs['prey1']['prey3'], np.array([0, 0, 0]))
np.testing.assert_array_equal(obs['prey1']['prey4'], np.array([0, 0, 0]))
np.testing.assert_array_equal(obs['prey1']['prey5'], np.array([0, 1, 1]))
np.testing.assert_array_equal(obs['prey1']['prey6'], np.array([-1, -1, 1]))
np.testing.assert_array_equal(obs['prey2']['predator0'], np.array([-2, -1, 2]))
np.testing.assert_array_equal(obs['prey2']['predator1'], np.array([-3, -3, 2]))
np.testing.assert_array_equal(obs['prey2']['prey0'], np.array([0, 0, 0]))
np.testing.assert_array_equal(obs['prey2']['prey1'], np.array([-1, -3, 1]))
np.testing.assert_array_equal(obs['prey2']['prey3'], np.array([0, 0, 0]))
np.testing.assert_array_equal(obs['prey2']['prey4'], np.array([0, 0, 0]))
np.testing.assert_array_equal(obs['prey2']['prey5'], np.array([-1, -2, 1]))
np.testing.assert_array_equal(obs['prey2']['prey6'], np.array([-2, -4, 1]))
np.testing.assert_array_equal(obs['prey4']['predator0'], np.array([0, -1, 2]))
np.testing.assert_array_equal(obs['prey4']['predator1'], np.array([-1, -3, 2]))
np.testing.assert_array_equal(obs['prey4']['prey0'], np.array([0, 0, 0]))
np.testing.assert_array_equal(obs['prey4']['prey1'], np.array([1, -3, 1]))
np.testing.assert_array_equal(obs['prey4']['prey2'], np.array([2, 0, 1]))
np.testing.assert_array_equal(obs['prey4']['prey3'], np.array([0, 0, 0]))
np.testing.assert_array_equal(obs['prey4']['prey5'], np.array([1, -2, 1]))
np.testing.assert_array_equal(obs['prey4']['prey6'], np.array([0, -4, 1]))
np.testing.assert_array_equal(obs['prey5']['predator0'], np.array([-1, 1, 2]))
np.testing.assert_array_equal(obs['prey5']['predator1'], np.array([-2, -1, 2]))
np.testing.assert_array_equal(obs['prey5']['prey0'], np.array([0, 0, 0]))
np.testing.assert_array_equal(obs['prey5']['prey1'], np.array([0, -1, 1]))
np.testing.assert_array_equal(obs['prey5']['prey2'], np.array([1, 2, 1]))
np.testing.assert_array_equal(obs['prey5']['prey3'], np.array([0, 0, 0]))
np.testing.assert_array_equal(obs['prey5']['prey4'], np.array([0, 0, 0]))
np.testing.assert_array_equal(obs['prey5']['prey6'], np.array([-1, -2, 1]))
np.testing.assert_array_equal(obs['prey6']['predator0'], np.array([0, 3, 2]))
np.testing.assert_array_equal(obs['prey6']['predator1'], np.array([-1, 1, 2]))
np.testing.assert_array_equal(obs['prey6']['prey0'], np.array([0, 0, 0]))
np.testing.assert_array_equal(obs['prey6']['prey1'], np.array([1, 1, 1]))
np.testing.assert_array_equal(obs['prey6']['prey2'], np.array([2, 4, 1]))
np.testing.assert_array_equal(obs['prey6']['prey3'], np.array([0, 0, 0]))
np.testing.assert_array_equal(obs['prey6']['prey4'], np.array([0, 0, 0]))
np.testing.assert_array_equal(obs['prey6']['prey5'], np.array([1, 2, 1]))
assert reward == {
'predator0': 36,
'predator1': -1,
'prey1': -1,
'prey2': -1,
'prey4': -36,
'prey5': -1,
'prey6': -1,
}
assert done == {
'predator0': False,
'predator1': False,
'prey1': False,
'prey2': False,
'prey4': True,
'prey5': False,
'prey6': False,
'__all__': False}
with pytest.raises(AssertionError):
obs, reward, done, info = sim.step({
'predator0': {'attack': 1, 'move': np.array([0, 0])},
'predator1': {'attack': 1, 'move': np.array([0, 0])},
'prey1': np.array([0, -1]),
'prey2': np.array([1, 1]),
'prey4': np.array([-1, 1]),
'prey5': np.array([1, 1]),
'prey6': np.array([0, 0]),
})
obs, reward, done, info = sim.step({
'predator0': {'attack': 1, 'move': np.array([0, 0])},
'predator1': {'attack': 1, 'move': np.array([0, 0])},
'prey1': np.array([-1, 0]),
'prey2': np.array([-1, 0]),
'prey5': np.array([0, 1]),
'prey6': np.array([-1, 0]),
})
np.testing.assert_array_equal(obs['predator0']['predator1'], np.array([-1, -2, 2]))
np.testing.assert_array_equal(obs['predator0']['prey0'], np.array([0, 0, 0]))
np.testing.assert_array_equal(obs['predator0']['prey1'], np.array([0, -2, 1]))
np.testing.assert_array_equal(obs['predator0']['prey2'], np.array([1, 1, 1]))
np.testing.assert_array_equal(obs['predator0']['prey3'], np.array([0, 0, 0]))
np.testing.assert_array_equal(obs['predator0']['prey4'], np.array([0, 0, 0]))
np.testing.assert_array_equal(obs['predator0']['prey5'], np.array([0, 0, 0]))
np.testing.assert_array_equal(obs['predator0']['prey6'], np.array([0, 0, 0]))
np.testing.assert_array_equal(obs['predator1']['predator0'], np.array([1, 2, 2]))
np.testing.assert_array_equal(obs['predator1']['prey0'], np.array([0, 0, 0]))
np.testing.assert_array_equal(obs['predator1']['prey1'], np.array([1, 0, 1]))
np.testing.assert_array_equal(obs['predator1']['prey2'], np.array([2, 3, 1]))
np.testing.assert_array_equal(obs['predator1']['prey3'], np.array([0, 0, 0]))
np.testing.assert_array_equal(obs['predator1']['prey4'], np.array([0, 0, 0]))
np.testing.assert_array_equal(obs['predator1']['prey5'], np.array([0, 0, 0]))
np.testing.assert_array_equal(obs['predator1']['prey6'], np.array([0, 0, 0]))
np.testing.assert_array_equal(obs['prey1']['predator0'], np.array([0, 2, 2]))
np.testing.assert_array_equal(obs['prey1']['predator1'], np.array([-1, 0, 2]))
np.testing.assert_array_equal(obs['prey1']['prey0'], np.array([0, 0, 0]))
np.testing.assert_array_equal(obs['prey1']['prey2'], np.array([1, 3, 1]))
np.testing.assert_array_equal(obs['prey1']['prey3'], np.array([0, 0, 0]))
np.testing.assert_array_equal(obs['prey1']['prey4'], np.array([0, 0, 0]))
np.testing.assert_array_equal(obs['prey1']['prey5'], np.array([0, 0, 0]))
np.testing.assert_array_equal(obs['prey1']['prey6'], np.array([0, 0, 0]))
np.testing.assert_array_equal(obs['prey2']['predator0'], np.array([-1, -1, 2]))
np.testing.assert_array_equal(obs['prey2']['predator1'], np.array([-2, -3, 2]))
np.testing.assert_array_equal(obs['prey2']['prey0'], np.array([0, 0, 0]))
np.testing.assert_array_equal(obs['prey2']['prey1'], np.array([-1, -3, 1]))
np.testing.assert_array_equal(obs['prey2']['prey3'], np.array([0, 0, 0]))
np.testing.assert_array_equal(obs['prey2']['prey4'], np.array([0, 0, 0]))
np.testing.assert_array_equal(obs['prey2']['prey5'], np.array([0, 0, 0]))
np.testing.assert_array_equal(obs['prey2']['prey6'], np.array([0, 0, 0]))
np.testing.assert_array_equal(obs['prey5']['predator0'], np.array([-1, 1, 2]))
np.testing.assert_array_equal(obs['prey5']['predator1'], np.array([-2, -1, 2]))
np.testing.assert_array_equal(obs['prey5']['prey0'], np.array([0, 0, 0]))
np.testing.assert_array_equal(obs['prey5']['prey1'], np.array([-1, -1, 1]))
np.testing.assert_array_equal(obs['prey5']['prey2'], np.array([0, 2, 1]))
np.testing.assert_array_equal(obs['prey5']['prey3'], np.array([0, 0, 0]))
np.testing.assert_array_equal(obs['prey5']['prey4'], np.array([0, 0, 0]))
np.testing.assert_array_equal(obs['prey5']['prey6'], np.array([0, 0, 0]))
np.testing.assert_array_equal(obs['prey6']['predator0'], np.array([0, 3, 2]))
np.testing.assert_array_equal(obs['prey6']['predator1'], np.array([-1, 1, 2]))
np.testing.assert_array_equal(obs['prey6']['prey0'], np.array([0, 0, 0]))
np.testing.assert_array_equal(obs['prey6']['prey1'], np.array([0, 1, 1]))
np.testing.assert_array_equal(obs['prey6']['prey2'], np.array([1, 4, 1]))
np.testing.assert_array_equal(obs['prey6']['prey3'], np.array([0, 0, 0]))
np.testing.assert_array_equal(obs['prey6']['prey4'], np.array([0, 0, 0]))
np.testing.assert_array_equal(obs['prey6']['prey5'], np.array([0, 0, 0]))
assert reward == {
'predator0': 36,
'predator1': 36,
'prey1': -1,
'prey2': -1,
'prey5': -36,
'prey6': -36
}
assert done == {
'predator0': False,
'predator1': False,
'prey1': False,
'prey2': False,
'prey5': True,
'prey6': True,
'__all__': False}
with pytest.raises(AssertionError):
obs, reward, done, info = sim.step({
'predator0': {'attack': 1, 'move': np.array([0, 0])},
'predator1': {'attack': 1, 'move': np.array([0, 0])},
'prey1': np.array([0, -1]),
'prey2': np.array([1, 1]),
'prey5': np.array([1, 1]),
'prey6': np.array([0, 0]),
})
obs, reward, done, info = sim.step({
'predator0': {'attack': 1, 'move': np.array([0, 0])},
'predator1': {'attack': 1, 'move': np.array([0, 0])},
'prey1': np.array([-1, 0]),
'prey2': np.array([-1, 0]),
})
np.testing.assert_array_equal(obs['predator0']['predator1'], np.array([-1, -2, 2]))
np.testing.assert_array_equal(obs['predator0']['prey0'], np.array([0, 0, 0]))
np.testing.assert_array_equal(obs['predator0']['prey1'], np.array([0, 0, 0]))
np.testing.assert_array_equal(obs['predator0']['prey2'], np.array([0, 0, 0]))
np.testing.assert_array_equal(obs['predator0']['prey3'], np.array([0, 0, 0]))
np.testing.assert_array_equal(obs['predator0']['prey4'], np.array([0, 0, 0]))
np.testing.assert_array_equal(obs['predator0']['prey5'], np.array([0, 0, 0]))
np.testing.assert_array_equal(obs['predator0']['prey6'], np.array([0, 0, 0]))
np.testing.assert_array_equal(obs['predator1']['predator0'], np.array([1, 2, 2]))
np.testing.assert_array_equal(obs['predator1']['prey0'], np.array([0, 0, 0]))
np.testing.assert_array_equal(obs['predator1']['prey1'], np.array([0, 0, 0]))
np.testing.assert_array_equal(obs['predator1']['prey2'], np.array([0, 0, 0]))
np.testing.assert_array_equal(obs['predator1']['prey3'], np.array([0, 0, 0]))
np.testing.assert_array_equal(obs['predator1']['prey4'], np.array([0, 0, 0]))
np.testing.assert_array_equal(obs['predator1']['prey5'], np.array([0, 0, 0]))
np.testing.assert_array_equal(obs['predator1']['prey6'], np.array([0, 0, 0]))
np.testing.assert_array_equal(obs['prey1']['predator0'], np.array([0, 2, 2]))
np.testing.assert_array_equal(obs['prey1']['predator1'], np.array([-1, 0, 2]))
np.testing.assert_array_equal(obs['prey1']['prey0'], np.array([0, 0, 0]))
np.testing.assert_array_equal(obs['prey1']['prey2'], np.array([0, 0, 0]))
np.testing.assert_array_equal(obs['prey1']['prey3'], np.array([0, 0, 0]))
np.testing.assert_array_equal(obs['prey1']['prey4'], np.array([0, 0, 0]))
np.testing.assert_array_equal(obs['prey1']['prey5'], np.array([0, 0, 0]))
np.testing.assert_array_equal(obs['prey1']['prey6'], np.array([0, 0, 0]))
np.testing.assert_array_equal(obs['prey2']['predator0'], np.array([-1, -1, 2]))
np.testing.assert_array_equal(obs['prey2']['predator1'], np.array([-2, -3, 2]))
np.testing.assert_array_equal(obs['prey2']['prey0'], np.array([0, 0, 0]))
np.testing.assert_array_equal(obs['prey2']['prey1'], np.array([0, 0, 0]))
np.testing.assert_array_equal(obs['prey2']['prey3'], np.array([0, 0, 0]))
np.testing.assert_array_equal(obs['prey2']['prey4'], np.array([0, 0, 0]))
np.testing.assert_array_equal(obs['prey2']['prey5'], np.array([0, 0, 0]))
np.testing.assert_array_equal(obs['prey2']['prey6'], np.array([0, 0, 0]))
assert reward == {
'predator0': 36,
'predator1': 36,
'prey1': -36,
'prey2': -36,
}
assert done == {
'predator0': False,
'predator1': False,
'prey1': True,
'prey2': True,
'__all__': True}
def test_turn_based_predator_prey_grid():
np.random.seed(24)
predators = [Predator(id=f'predator{i}', attack=1, view=0) for i in range(2)]
prey = [Prey(id=f'prey{i}', view=0) for i in range(7)]
agents = predators + prey
sim_config = {
'region': 6,
'observation_mode': PredatorPreySimulation.ObservationMode.GRID,
'agents': agents,
}
sim = PredatorPreySimulation.build(sim_config)
sim = AllStepManager(sim)
# Little hackish here because I have to explicitly set their values
obs = sim.reset()
sim.agents['predator0'].position = np.array([2, 3])
sim.agents['predator1'].position = np.array([0, 1])
sim.agents['prey0'].position = np.array([1, 1])
sim.agents['prey1'].position = np.array([4, 3])
sim.agents['prey2'].position = np.array([4, 3])
sim.agents['prey3'].position = np.array([2, 3])
sim.agents['prey4'].position = np.array([3, 3])
sim.agents['prey5'].position = np.array([3, 1])
sim.agents['prey6'].position = np.array([2, 1])
obs = {agent_id: sim.sim.get_obs(agent_id) for agent_id in sim.agents}
assert 'predator0' in obs
assert 'predator0' in obs
assert 'prey0' in obs
assert 'prey1' in obs
assert 'prey2' in obs
assert 'prey3' in obs
assert 'prey4' in obs
assert 'prey5' in obs
assert 'prey6' in obs
obs, reward, done, info = sim.step({
'predator0': {'attack': 1, 'move': np.array([0, 0])},
'predator1': {'attack': 1, 'move': np.array([0, 0])},
'prey0': {'move': np.array([1, 1]), 'harvest': 0},
'prey1': {'move': np.array([0, -1]), 'harvest': 0},
'prey2': {'move': np.array([1, 1]), 'harvest': 0},
'prey3': {'move': np.array([0, 0]), 'harvest': 0},
'prey4': {'move': np.array([-1, 1]), 'harvest': 0},
'prey5': {'move': np.array([1, 1]), 'harvest': 0},
'prey6': {'move': np.array([0, 0]), 'harvest': 0},
})
assert 'predator0' in obs
assert 'predator0' in obs
assert 'prey0' in obs
assert 'prey1' in obs
assert 'prey2' in obs
assert 'prey3' in obs
assert 'prey4' in obs
assert 'prey5' in obs
assert 'prey6' in obs
assert reward == {
'predator0': 36,
'predator1': 36,
'prey0': -36,
'prey1': -1,
'prey2': -1,
'prey3': -36,
'prey4': -1,
'prey5': -1,
'prey6': 0,
}
assert done == {
'predator0': False,
'predator1': False,
'prey0': True,
'prey1': False,
'prey2': False,
'prey3': True,
'prey4': False,
'prey5': False,
'prey6': False,
'__all__': False}
with pytest.raises(AssertionError):
obs, reward, done, info = sim.step({
'predator0': {'attack': 1, 'move': np.array([0, 0])},
'predator1': {'attack': 1, 'move': np.array([0, 0])},
'prey0': {'move': np.array([0, -1]), 'harvest': 0},
'prey1': {'move': np.array([0, -1]), 'harvest': 0},
'prey2': {'move': np.array([1, 1]), 'harvest': 0},
'prey3': {'move': np.array([0, -1]), 'harvest': 0},
'prey4': {'move': np.array([0, -1]), 'harvest': 0},
'prey5': {'move': np.array([1, 1]), 'harvest': 0},
'prey6': {'move': np.array([0, 0]), 'harvest': 0},
})
obs, reward, done, info = sim.step({
'predator0': {'attack': 1, 'move': np.array([0, 0])},
'predator1': {'attack': 0, 'move': np.array([1, 0])},
'prey1': {'move': np.array([-1, -1]), 'harvest': 0},
'prey2': {'move': np.array([-1, 0]), 'harvest': 0},
'prey4': {'move': np.array([0, -1]), 'harvest': 0},
'prey5': {'move': np.array([-1, 0]), 'harvest': 0},
'prey6': {'move': np.array([0, -1]), 'harvest': 0},
})
assert 'predator0' in obs
assert 'predator0' in obs
assert 'prey1' in obs
assert 'prey2' in obs
assert 'prey4' in obs
assert 'prey5' in obs
assert 'prey6' in obs
assert reward == {
'predator0': 36,
'predator1': -1,
'prey1': -1,
'prey2': -1,
'prey4': -36,
'prey5': -1,
'prey6': -1,
}
assert done == {
'predator0': False,
'predator1': False,
'prey1': False,
'prey2': False,
'prey4': True,
'prey5': False,
'prey6': False,
'__all__': False}
with pytest.raises(AssertionError):
obs, reward, done, info = sim.step({
'predator0': {'attack': 1, 'move': np.array([0, 0])},
'predator1': {'attack': 1, 'move': np.array([0, 0])},
'prey1': {'move': np.array([0, -1]), 'harvest': 0},
'prey2': {'move': np.array([1, 1]), 'harvest': 0},
'prey4': {'move': np.array([0, -1]), 'harvest': 0},
'prey5': {'move': np.array([1, 1]), 'harvest': 0},
'prey6': {'move': np.array([0, 0]), 'harvest': 0},
})
obs, reward, done, info = sim.step({
'predator0': {'attack': 1, 'move': np.array([0, 0])},
'predator1': {'attack': 1, 'move': np.array([0, 0])},
'prey1': {'move': np.array([-1, 0]), 'harvest': 0},
'prey2': {'move': np.array([-1, 0]), 'harvest': 0},
'prey5': {'move': np.array([-1, 0]), 'harvest': 0},
'prey6': {'move': np.array([1, -1]), 'harvest': 0},
})
assert 'predator0' in obs
assert 'predator0' in obs
assert 'prey1' in obs
assert 'prey2' in obs
assert 'prey5' in obs
assert 'prey6' in obs
assert reward == {
'predator0': 36,
'predator1': 36,
'prey1': -1,
'prey2': -1,
'prey5': -36,
'prey6': -36,
}
assert done == {
'predator0': False,
'predator1': False,
'prey1': False,
'prey2': False,
'prey5': True,
'prey6': True,
'__all__': False}
with pytest.raises(AssertionError):
obs, reward, done, info = sim.step({
'predator0': {'attack': 1, 'move': np.array([0, 0])},
'predator1': {'attack': 1, 'move': np.array([0, 0])},
'prey1': {'move': np.array([0, -1]), 'harvest': 0},
'prey2': {'move': np.array([1, 1]), 'harvest': 0},
'prey5': {'move': np.array([1, 1]), 'harvest': 0},
'prey6': {'move': np.array([0, 0]), 'harvest': 0},
})
obs, reward, done, info = sim.step({
'predator0': {'attack': 1, 'move': np.array([0, 0])},
'predator1': {'attack': 1, 'move': np.array([0, 0])},
'prey1': {'move': np.array([-1, 0]), 'harvest': 0},
'prey2': {'move': np.array([-1, 0]), 'harvest': 0},
})
assert 'predator0' in obs
assert 'predator0' in obs
assert 'prey1' in obs
assert 'prey2' in obs
assert reward == {
'predator0': 36,
'predator1': 36,
'prey1': -36,
'prey2': -36,
}
assert done == {
'predator0': False,
'predator1': False,
'prey1': True,
'prey2': True,
'__all__': True}
|
-- Andreas Abel, 2022-06-06, issue #5944 reported by Mike Shulman
-- Support rewriting with 2ltt (match SSet).
{-# OPTIONS --type-in-type --rewriting --two-level #-}
open import Agda.Primitive public
postulate
Tel : SSet
ε : Tel
_≡_ : {A : SSet} (a : A) → A → SSet
cong : (A B : SSet) (f : A → B) {x y : A} (p : x ≡ y) → f x ≡ f y
coe← : (A B : SSet) → (A ≡ B) → B → A
el : Tel → SSet
[] : el ε
ID : (Δ : Tel) → Tel
ID′ : (Δ : Tel) (Θ : el Δ → Tel) → Tel
ID′ε : (Δ : Tel) → ID′ Δ (λ _ → ε) ≡ ε
{-# BUILTIN REWRITE _≡_ #-}
{-# REWRITE ID′ε #-}
postulate
ID′-CONST : (Θ : Tel) (Δ : Tel)
→ ID′ Θ (λ _ → Δ) ≡ ID Δ
ID′-CONST-ε : (Θ : Tel) (δ₂ : el (ID ε))
→ coe← (el (ID′ Θ (λ _ → ε))) (el (ID ε)) (cong Tel (SSet lzero) el (ID′-CONST Θ ε)) δ₂ ≡ []
{-# REWRITE ID′-CONST-ε #-}
-- WAS: internal error when trying to make a pattern from SSet
-- Should succeed now
|
Print | This is Zoe, Renault's latest beauty. That isn't just because of her looks. Zoe looks nice, but it's her clean spirit that really makes her stand out from the crowd. Zoe is fully electric and therefore emission free. And Zoe is idealistic as she wants to make electric driving available for everyone. |
= = Melee = =
|
From iris.algebra Require Import excl auth frac cmra gmap agree gset numbers.
From iris.algebra.lib Require Import frac_agree.
From iris.heap_lang Require Export notation locations lang.
From iris.base_logic.lib Require Export invariants.
From iris.program_logic Require Export atomic.
From iris.proofmode Require Import tactics.
From iris.heap_lang Require Import proofmode par.
From iris.bi.lib Require Import fractional.
Set Default Proof Using "All".
Require Export multicopy_df auth_ext.
Require Export multicopy_util.
Section multicopy_df_upsert.
Context {Σ} `{!heapG Σ, !multicopyG Σ, !multicopy_dfG Σ}.
Notation iProp := (iProp Σ).
Local Notation "m !1 i" := (nzmap_total_lookup i m) (at level 20).
Lemma nodePred_lockR_true γ_s γ_cn r n Cn Vn Vn' Tn bn :
node r n Vn' -∗
lockR bn n (nodePred γ_s γ_cn r n Cn Vn Tn) -∗
⌜bn = true⌝.
Proof.
iIntros "node Hl_n".
destruct bn; try done.
iDestruct "Hl_n" as "(Hl & HnP')".
iDestruct "HnP'" as "(n' & _)".
iExFalso. iApply (node_sep_star r n). iFrame.
Qed.
Lemma upsert_spec N γ_te γ_he γ_s Prot γ_cr γ_cd γ_d r d (k: K) (v: V) :
⊢ ⌜k ∈ KS⌝ -∗
(ghost_update_protocol N γ_te γ_he Prot k) -∗
mcs_inv N γ_te γ_he γ_s Prot
(Inv_DF γ_s γ_cr γ_cd γ_d r d) -∗
<<< ∀ (t: T) H, MCS γ_te γ_he t H >>>
upsert r #k #v @ ⊤ ∖ (↑(mcsN N))
<<< MCS γ_te γ_he (t + 1) (H ∪ {[(k, (v, t))]}), RET #() >>>.
Proof.
iIntros "%".
rename H into k_in_KS.
(* perform Löb induction by generating a hypothesis IH : ▷ goal *)
iLöb as "IH".
iIntros "Ghost_updP #HInv" (Φ) "AU". wp_lam. wp_pures.
iApply fupd_wp.
(** Open invariant to establish root node in footprint **)
iInv "HInv" as (t0 H0)"(mcs_high & >Inv_DF)".
iDestruct "Inv_DF" as (Cr0 Vr0 Tr0 Cd0 Vd0 Td0) "(r_neq_d & Hcir
& Hset & HlockR_r & Half_r & Hcts_r
& HlockR_d & Half_d & Hcts_d)".
iModIntro. iSplitR "AU Ghost_updP". iNext.
iExists t0, H0. iFrame "mcs_high".
iExists Cr0, Vr0, Tr0, Cd0, Vd0, Td0. iFrame "∗ #".
iModIntro.
awp_apply lockNode_spec_high without "Ghost_updP"; try done.
iPureIntro. auto.
(** Lock the node r **)
iAaccIntro with ""; try eauto with iFrame.
iIntros (γ_cn Cr Vr Tr) "HnP_n". iModIntro.
iIntros "Ghost_updP". wp_pures.
iDestruct "HnP_n" as "(HnP_n & #r_is_locked)".
iDestruct "HnP_n" as "(node_r & HnP_C & HnP_frac)".
wp_apply (addContents_spec with "[$node_r]"); try done.
iIntros (b Vr') "(node_r & Hif)".
(** Case analysis on whether addContents is successful **)
destruct b; last first.
- (** Case : addContents fails. Unlock root node and apply
inductive hypothesis IH **)
iDestruct "Hif" as %HVr. replace Vr'. wp_pures.
awp_apply (unlockNode_spec_high
with "[] [] [HnP_frac HnP_C node_r ]")
without "Ghost_updP" ; try done.
{
iFrame.
}
iAaccIntro with ""; try eauto with iFrame.
iIntros "_". iModIntro.
iIntros "Ghost_updP". wp_pures.
iApply ("IH" with "Ghost_updP"); try done.
- (** Case : addContent successful **)
(** Linearization Point: open invariant and update the resources **)
wp_pures.
(* Need to unfold unlockNode here in order to apply
ghost_udpate_protocol, which requires stripping off
a later modality. This requires a physical step in heapLang,
which is available by unfolding the unlockNode *)
unfold unlockNode.
wp_pures. wp_bind(getLockLoc _)%E.
wp_apply getLockLoc_spec; first done.
iIntros (l) "%"; subst l; wp_pures.
iInv "HInv" as (t1 H1)"(mcs_high & >Inv_DF)".
iDestruct "Inv_DF" as (Cr1 Vr1 Tr1 Cd1 Vd1 Td1) "(r_neq_d & Hcir
& Hset & HlockR_r & Half_r & Hcts_r
& HlockR_d & Half_d & Hcts_d)".
iDestruct "mcs_high" as "(>MCS_auth & >HH & >HInit
& >HClock & >HUniq & Prot)".
iDestruct "HlockR_r" as (br) "HlockR_r".
iDestruct "HlockR_d" as (bd) "HlockR_d".
iDestruct "Hif" as %HCr.
set (Tr' := <[k := t1]> Tr).
set (Cr' := <[k := (v, t1)]> Cr).
set (H1' := H1 ∪ {[(k, (v, t1))]}).
iPoseProof ((auth_own_incl γ_s H1 _) with "[$HH $HnP_C]") as "%".
rename H into Cr_sub_H1. apply gset_included in Cr_sub_H1.
iDestruct "HClock" as %HClock_H1.
(** Re-establish maxTS for updated T and H **)
assert (HClock (t1 + 1) H1') as HClock_H1'.
{ subst H1'. intros k' v' t' H'.
assert (((k', (v', t')) ∈ H1) ∨ (k' = k ∧ v' = v ∧ t' = t1))
as Hor by set_solver.
destruct Hor as [Hor | Hor].
pose proof HClock_H1 k' v' t' Hor as Hres. lia.
destruct Hor as [_ [_ Hor]]. replace t'. lia. }
iAssert (⌜set_of_map Cr' ⊆ H1'⌝)%I as %Cr'_sub_H1'.
{ subst H1'. iPureIntro. subst Cr'.
pose proof (set_of_map_insert_subseteq Cr k v t1) as H'.
assert (set_of_map Cr = set_of_map Cr) as H'' by done.
set_solver. }
(** Update the (● H) resource **)
iMod (own_update γ_s (● H1) (● H1')
with "[$HH]") as "HH".
{ apply (auth_update_auth _ _ H1').
apply gset_local_update. set_solver. }
iMod (own_update γ_s (● H1')
(● H1' ⋅ ◯ (set_of_map Cr'))
with "[$HH]") as "HH".
{ subst H1'.
apply (auth_update_alloc _ (H1 ∪ {[(k, (v, t1))]}) (set_of_map Cr')).
apply local_update_discrete. intros m Valid_H1 H1_eq.
split; try done. rewrite /(ε ⋅? m) in H1_eq.
destruct m. rewrite gset_op in H1_eq.
rewrite left_id in H1_eq *; intros H1_eq.
rewrite <-H1_eq.
rewrite /(set_of_map Cr' ⋅? Some (H1 ∪ {[k, (v, t1)]})).
rewrite gset_op.
rewrite /(ε) in H1_eq. unfold ucmra_unit in H1_eq.
simpl in H1_eq.
assert ((k, (v, t1)) ∈ set_of_map Cr') as H'.
{ subst Cr'. apply set_of_map_member.
apply lookup_insert. }
clear - H' Cr_sub_H1 Cr'_sub_H1'. set_solver.
exfalso. clear -H1_eq. set_solver. }
(** Re-establish HInit **)
iAssert (⌜HInit H1'⌝)%I with "[HInit]" as "HInit".
{ subst H1'. iDestruct "HInit" as %HInit.
unfold multicopy.HInit. iPureIntro.
clear -HInit k_in_KS. intros k' Hk'.
pose proof HInit k' Hk' as H'. set_solver. }
iDestruct "HnP_C" as "Hown_Cr".
iDestruct "HH" as "(HH & HnP_C)".
rewrite (big_sepS_delete _ KS k); last by eauto.
iDestruct "HInit" as %HInit.
iPoseProof (nodePred_lockR_true with "[$node_r] [HlockR_r]")
as "%". iFrame. subst br.
(** Update contents-in-reach of r **)
iAssert (⌜map_of_set H1' =
<[k:= (v, t1)]> (map_of_set H1)⌝)%I as %Htrans_union.
{
iDestruct "HUniq" as %HUniq.
iPureIntro.
pose proof map_of_set_insert_eq k v t1 H1 HUniq HClock_H1 as mos_eq.
by subst H1'.
}
iAssert (⌜γ_cn = γ_cr⌝)%I as %gamma_cn_cr.
{
iDestruct "r_is_locked" as %r_is_locked.
iDestruct "r_neq_d" as %r_neq_d.
destruct r_is_locked as [ r_is_locked | r_not_locked].
destruct r_is_locked as [ r_is_r gh_cn_cr ]. done.
destruct r_not_locked as [ r_is_d gh_cn_cr ]. done.
}
subst γ_cn.
iAssert (⌜Cr = Cr1⌝∗ ⌜Vr = Vr1⌝ ∗ ⌜Tr = Tr1⌝)%I as "(%&%&%)".
{ iPoseProof (own_valid_2 _ _ _ with "[$Half_r] [$HnP_frac]")
as "#HCr_equiv".
iDestruct "HCr_equiv" as %HCr_equiv.
apply frac_agree_op_valid in HCr_equiv.
destruct HCr_equiv as [_ HCr_equiv].
apply leibniz_equiv_iff in HCr_equiv.
inversion HCr_equiv. iPureIntro. done. } subst Cr1 Vr1 Tr1.
iAssert (⌜cir H1' Cr' Cd1⌝)%I with "[Hcir]" as "Hcir".
{
iDestruct "Hcir" as %Hcir.
iPureIntro.
intros k' v' t'.
rewrite -> Htrans_union.
destruct (decide (k' = k)).
- subst k'. subst Cr'.
rewrite !lookup_insert.
split; try done.
- subst Cr'.
rewrite !lookup_insert_ne; try done.
}
(*
iAssert (⌜(map_of_set H1) !!! k ≤ T⌝)%I as %H_le_T.
{
iPureIntro.
rewrite lookup_total_alt.
destruct ((map_of_set H1) !! k) eqn: hist_has_k.
- simpl.
unfold maxTS in MaxTS_H1.
destruct MaxTS_H1 as [MaxTS_H1 T_not_zero].
pose proof map_of_set_lookup_cases H1 k as H'.
destruct H' as [H' | [_ H']]; last first.
+ rewrite H' in hist_has_k. inversion hist_has_k.
+ destruct H' as [Tk [H' [_ H'']]].
rewrite H'' in hist_has_k. inversion hist_has_k.
subst Tk.
pose proof MaxTS_H1 k n H' as H'''.
clear -H'''; lia.
- simpl. lia.
}
*)
iAssert (⌜HUnique H1'⌝)%I with "[HUniq]" as %HUniq.
{ iDestruct "HUniq" as %HUniq.
iPureIntro. subst H1'.
intros k' t' v' v'' H' H''.
assert (((k', (v', t')) ∈ H1) ∨ (k' = k ∧ v' = v ∧ t' = t1))
as Hor by set_solver.
assert (((k', (v'', t')) ∈ H1) ∨ (k' = k ∧ v'' = v ∧ t' = t1))
as Hor' by set_solver.
destruct Hor as [Hor | Hor].
- destruct Hor' as [Hor' | Hor'].
+ apply (HUniq k' t' v' v'' Hor Hor'); try done.
+ destruct Hor' as [? [? ?]]. subst k' v'' t'.
apply (HClock_H1 k v' t1) in Hor.
clear -Hor; lia.
- destruct Hor as [? [? ?]]. subst k' v' t'.
destruct Hor' as [Hor' | Hor'].
+ apply (HClock_H1 k v'' t1) in Hor'.
clear -Hor'; lia.
+ destruct Hor' as [? [? ?]]. by subst v''. }
iAssert (contents_proj Cr' Vr' Tr')%I with "[Hcts_r]" as "Hcts_r".
{ iDestruct "Hcts_r" as "(% & % & %)".
rename H into dom_Cr_Vr; rename H2 into dom_Cr_Tr;
rename H3 into Cr_eq_Vr_Tr.
iPureIntro. subst Cr' Vr' Tr'. split; last split.
- apply leibniz_equiv. rewrite !dom_insert.
rewrite dom_Cr_Vr. clear; set_solver.
- apply leibniz_equiv. rewrite !dom_insert.
rewrite dom_Cr_Tr. clear; set_solver.
- intros k' v' t'. destruct (decide (k' = k)).
+ subst k'. rewrite !lookup_insert. split.
* intros H'; by inversion H'.
* intros [H' H'']; inversion H'; by inversion H''.
+ rewrite !lookup_insert_ne; try done. }
iDestruct "HlockR_r" as "(Hlockr & _)". wp_store.
(** Linearization **)
iMod "AU" as (t' H1'')"[MCS [_ Hclose]]".
iAssert (⌜t' = t1 ∧ H1'' = H1⌝)%I as "(% & %)".
{ iPoseProof (MCS_agree with "[$MCS_auth] [$MCS]") as "(% & %)".
by iPureIntro. } subst t' H1''.
iDestruct "MCS" as "(MCS◯t & MCS◯h & _)".
iDestruct "MCS_auth" as "(MCS●t & MCS●h)".
iMod ((auth_excl_update γ_te (t1+1) t1 t1) with "MCS●t MCS◯t")
as "(MCS●t & MCS◯t)".
iMod ((auth_excl_update γ_he (H1 ∪ {[(k, (v, t1))]}) H1 H1) with "MCS●h MCS◯h")
as "(MCS●h & MCS◯h)".
iCombine "MCS◯t MCS◯h" as "(MCS_t & MCS_h)".
iCombine "MCS●t MCS●h" as "MCS_auth".
iMod ("Hclose" with "[MCS_t MCS_h]") as "HΦ".
iFrame. by iPureIntro.
(** Use ghost_update_protocol to update Prot(H) **)
iSpecialize ("Ghost_updP" $! v t1 H1).
iMod ("Ghost_updP" with "[] [$MCS_auth] [$Prot]")
as "(Prot & MCS_auth)".
{ assert ((k, (v, t1)) ∈ H1') as H' by set_solver.
assert ((∀ (v' : V) (t' : nat), (k, (v', t')) ∈ H1' → t' ≤ t1))
as H''.
{ intros v' t' Hvt'. subst H1'.
rewrite elem_of_union in Hvt'*; intros Hvt'.
destruct Hvt' as [Hvt' | Hvt'].
- apply HClock_H1 in Hvt'. clear -Hvt'; lia.
- assert (t' = t1) by (clear -Hvt'; set_solver).
subst t'; clear; lia. }
pose proof map_of_set_lookup H1' k v t1 HUniq H' H'' as H'''.
iPureIntro. rewrite lookup_total_alt.
rewrite H'''. by simpl. }
(* Combine fractional ownerships of Cr. *)
iCombine "HnP_frac Half_r" as "Half_r".
iEval (rewrite <-frac_agree_op) in "Half_r".
iEval (rewrite Qp_half_half) in "Half_r".
(* Use combined ownerships to update Cr -> Cr'. *)
iMod ((own_update (γ_cr) (to_frac_agree 1 (Cr, Vr, Tr))
(to_frac_agree 1 (Cr', Vr', Tr'))) with "[$Half_r]")
as "Half_r".
{ apply cmra_update_exclusive.
unfold valid, cmra_valid. simpl. unfold prod_valid_instance.
split; simpl; try done. }
(* Break apart ownerships to be used separately. *)
iEval (rewrite <- Qp_half_half) in "Half_r".
iEval (rewrite frac_agree_op) in "Half_r".
iDestruct "Half_r" as "(HnP_frac & Half_r)".
iModIntro. iFrame "HΦ".
iNext. iExists (t1+1), H1'. iFrame "∗".
iSplitR; first by iPureIntro.
iExists Cr', Vr', Tr', Cd1, Vd1, Td1.
iFrame "Hcir".
rewrite (big_sepS_delete _ (KS) k); last by eauto.
iFrame.
iSplitR "HlockR_d"; last first.
{ by iExists bd. }
iExists false; iFrame.
Qed.
End multicopy_df_upsert. |
Require Import Bool Arith List CpdtTactics.
Set Implicit Arguments.
Set Asymmetric Patterns.
Inductive binop : Set := Plus | Times.
Inductive exp : Set :=
| Const : nat -> exp
| Binop : binop -> exp -> exp -> exp.
Definition binopDenote (b : binop) : nat -> nat -> nat :=
match b with
| Plus => plus
| Times => mult
end.
Fixpoint expDenote (e : exp) : nat :=
match e with
| Const n => n
| Binop b e1 e2 => (binopDenote b) (expDenote e1) (expDenote e2)
end.
Eval simpl in expDenote (Const 42).
Eval simpl in expDenote (Binop Plus (Const 2) (Const 4)).
Eval simpl in expDenote (Binop Times (Binop Plus (Const 2) (Const 4)) (Const 7)).
(* Target Language *)
Inductive instr : Set :=
| iConst : nat -> instr
| iBinop : binop -> instr.
Definition prog := list instr.
Definition stack := list nat.
Definition instrDenote (i : instr) (s : stack) : option stack :=
match i with
| iConst n => Some (n :: s)
| iBinop b =>
match s with
| arg1 :: arg2 :: s' => Some ((binopDenote b) arg1 arg2 :: s')
| _ => None
end
end.
Fixpoint progDenote (p : prog) (s : stack) : option stack :=
match p with
| nil => Some s
| i :: p' =>
match (instrDenote i s) with
| None => None
| Some s' => progDenote p' s'
end
end.
Fixpoint compile (e : exp) : prog :=
match e with
| Const n => iConst n :: nil
| Binop b e1 e2 => compile e2 ++ compile e1 ++ iBinop b :: nil
end.
Eval simpl in compile (Const 42).
Eval simpl in compile (Binop Plus (Const 2) (Const 4)).
Eval simpl in compile (Binop Plus (Binop Times (Const 2) (Const 4)) (Const 7)).
Eval simpl in progDenote (compile (Const 42)).
Eval simpl in progDenote (compile (Binop Plus (Const 2) (Const 4))).
Eval simpl in progDenote (compile (Binop Times (Binop Plus (Const 2) (Const 4)) (Const 7))).
Theorem compile_correct : forall e,
progDenote (compile e) nil = Some (expDenote e :: nil).
Proof.
induction e; intros; try reflexivity.
Abort.
Lemma compile_correct' : forall e p s,
progDenote (compile e ++ p) s = progDenote p (expDenote e :: s).
Proof.
induction e.
- intros. unfold compile. unfold progDenote at 1. simpl. fold progDenote. reflexivity.
- intros. unfold compile. fold compile. unfold progDenote. fold progDenote.
Check app_assoc_reverse. SearchRewrite ((_ ++ _) ++ _). rewrite app_assoc_reverse.
rewrite IHe2. rewrite app_assoc_reverse. rewrite IHe1. unfold progDenote at 1. simpl. fold progDenote.
reflexivity.
Abort.
Lemma compile_correct' : forall e s p,
progDenote (compile e ++ p) s = progDenote p (expDenote e :: s).
Proof.
induction e; crush.
Qed.
Theorem compile_correct : forall e,
progDenote (compile e) nil = Some (expDenote e :: nil).
Proof.
intros.
Check app_nil_end.
rewrite (app_nil_end (compile e)).
rewrite compile_correct'.
reflexivity.
Qed.
(* Typed Expressions *)
Inductive type : Set := Nat | Bool.
Inductive tbinop : type -> type -> type -> Set :=
| TPlus : tbinop Nat Nat Nat
| TTimes : tbinop Nat Nat Nat
| TEq : forall t, tbinop t t Bool
| TLt : tbinop Nat Nat Bool.
Inductive texp : type -> Set :=
| TNConst : nat -> texp Nat
| TBConst : bool -> texp Bool
| TBinop : forall t t1 t2, tbinop t1 t2 t -> texp t1 -> texp t2 -> texp t.
Definition typeDenote (t : type) : Set :=
match t with
| Nat => nat
| Bool => bool
end.
Definition tbinopDenote arg1 arg2 res (b : tbinop arg1 arg2 res)
: typeDenote arg1 -> typeDenote arg2 -> typeDenote res :=
match b with
| TPlus => plus
| TTimes => mult
| TEq Nat => beq_nat
| TEq Bool => eqb
| TLt => leb
end.
Fixpoint texpDenote t (e : texp t) : typeDenote t :=
match e with
| TNConst n => n
| TBConst b => b
| TBinop _ _ _ b e1 e2 => (tbinopDenote b) (texpDenote e1) (texpDenote e2)
end.
Eval simpl in texpDenote (TNConst 42).
Eval simpl in texpDenote (TBConst true).
Eval simpl in texpDenote (TBinop TTimes (TBinop TPlus (TNConst 2) (TNConst 4)) (TNConst 7)).
Eval simpl in texpDenote (TBinop (TEq Nat) (TBinop TPlus (TNConst 2) (TNConst 4)) (TNConst 7)).
Eval simpl in texpDenote (TBinop TLt (TBinop TPlus (TNConst 2) (TNConst 2)) (TNConst 7)).
(* Target Language *)
Definition tstack := list type.
Inductive tinstr : tstack -> tstack -> Set :=
| TiNConst : forall s, nat -> tinstr s (Nat :: s)
| TiBConst : forall s, bool -> tinstr s (Bool :: s)
| TiBinop : forall arg1 arg2 res s,
tbinop arg1 arg2 res -> tinstr (arg1 :: arg2 :: s) (res :: s).
Inductive tprog : tstack -> tstack -> Set :=
| TNil : forall s, tprog s s
| TCons : forall s1 s2 s3,
tinstr s1 s2
-> tprog s2 s3
-> tprog s1 s3.
Fixpoint vstack (ts : tstack) : Set :=
match ts with
| nil => unit
| t :: ts' => typeDenote t * vstack ts'
end%type.
Definition tinstrDenote ts ts' (i : tinstr ts ts') : vstack ts -> vstack ts' :=
match i with
| TiNConst _ n => fun s => (n, s)
| TiBConst _ b => fun s => (b, s)
| TiBinop _ _ _ _ b => fun s =>
let '(arg1, (arg2, s')) := s in
((tbinopDenote b) arg1 arg2, s')
end.
Fixpoint tprogDenote ts ts' (p : tprog ts ts') : vstack ts -> vstack ts' :=
match p with
| TNil _ => fun s => s
| TCons _ _ _ i p' => fun s => tprogDenote p' (tinstrDenote i s)
end.
(* Translation *)
Fixpoint tconcat ts ts' ts'' (p : tprog ts ts') : tprog ts' ts'' -> tprog ts ts'' :=
match p with
| TNil _ => fun p' => p'
| TCons _ _ _ i p1 => fun p' => TCons i (tconcat p1 p')
end.
Fixpoint tcompile t (e : texp t) (ts : tstack) : tprog ts (t :: ts) :=
match e with
| TNConst n => TCons (TiNConst _ n) (TNil _)
| TBConst b => TCons (TiBConst _ b) (TNil _)
| TBinop _ _ _ b e1 e2 => tconcat (tcompile e2 _)
(tconcat (tcompile e1 _) (TCons (TiBinop _ b) (TNil _)))
end.
Print tcompile.
Eval simpl in tprogDenote (tcompile (TNConst 42) nil) tt.
Eval simpl in tprogDenote (tcompile (TBConst true) nil) tt.
Eval simpl in tprogDenote (tcompile (TBinop TTimes (TBinop TPlus (TNConst 2) (TNConst 4)) (TNConst 7)) nil) tt.
Eval simpl in tprogDenote (tcompile (TBinop (TEq Nat) (TBinop TPlus (TNConst 2) (TNConst 4)) (TNConst 7)) nil) tt.
Eval simpl in tprogDenote (tcompile (TBinop TLt (TBinop TPlus (TNConst 2) (TNConst 4)) (TNConst 7)) nil) tt.
(* Translation Correctness *)
Theorem tcompile_correct : forall t (e : texp t),
tprogDenote (tcompile e nil) tt = (texpDenote e, tt).
Lemma tcompile_correct' : forall t (e : texp t) ts (s : vstack ts),
tprogDenote (tcompile e ts) s = (texpDenote e, s).
Proof.
induction e; crush.
Abort.
Lemma tconcat_correct : forall ts ts' ts'' (p : tprog ts ts') (p' : tprog ts' ts'') (s : vstack ts),
tprogDenote (tconcat p p') s
= tprogDenote p' (tprogDenote p s).
Proof.
induction p; crush.
Qed.
Hint Rewrite tconcat_correct.
Lemma tcompile_correct' : forall t (e : texp t) ts (s : vstack ts),
tprogDenote (tcompile e ts) s = (texpDenote e, s).
Proof.
induction e; crush.
Qed.
Hint Rewrite tcompile_correct'.
Theorem tcompile_correct : forall t (e : texp t),
tprogDenote (tcompile e nil) tt = (texpDenote e, tt).
Proof.
induction e; crush.
Qed.
Require Coq.extraction.Extraction.
Extraction Language OCaml.
Extraction tcompile.
|
Formal statement is: lemma compactI: assumes "\<And>C. \<forall>t\<in>C. open t \<Longrightarrow> s \<subseteq> \<Union>C \<Longrightarrow> \<exists>C'. C' \<subseteq> C \<and> finite C' \<and> s \<subseteq> \<Union>C'" shows "compact s" Informal statement is: A set $s$ is compact if for every collection of open sets whose union contains $s$, there is a finite subcollection whose union also contains $s$. |
The Indian Navy 's Advanced Technology Vessel project to design and construct a nuclear submarine took shape in the 1990s . Then Defence Minister George Fernandes confirmed the project in 1998 . The initial intent of the project was to design nuclear @-@ powered fast attack submarines , though following nuclear tests conducted by India in 1998 at Pokhran Test Range and the Indian pledge of no first use , the project was re @-@ aligned towards the design of a ballistic missile submarine in order to complete India 's nuclear triad .
|
// ------------------------------------------------------------
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License (MIT). See License.txt in the repo root for license information.
// ------------------------------------------------------------
#include "stdafx.h"
#include "Common/ExclusiveFile.h"
#include <boost/test/unit_test.hpp>
#include "Common/boost-taef.h"
using namespace std;
namespace Common
{
class TestExclusiveFile
{
};
static atomic_long counter(0);
static atomic_long runningThreads(0);
static shared_ptr<AutoResetEvent> allThreadsCompleted;
DWORD _stdcall TestAccessThreadCallback(void *)
{
const int iterationCount = 10;
wstring path = L"ExclusiveFile.lock";
for (int i=0; i<iterationCount; i++)
{
ExclusiveFile lock(path);
lock.Acquire(TimeSpan::MaxValue);
++counter;
CODING_ERROR_ASSERT(counter.load() == 1);
Sleep(200);
CODING_ERROR_ASSERT(counter.load() == 1);
--counter;
}
if (--runningThreads == 0) allThreadsCompleted->Set();
return 0;
}
void TestAccessForThreadCount(int threadCount)
{
runningThreads.store(threadCount);
allThreadsCompleted = make_shared<AutoResetEvent>(false);
vector<HANDLE> threads;
for (int i=0; i<threadCount; i++)
{
threads.push_back(CreateThread(NULL, 0, TestAccessThreadCallback, NULL, 0, NULL));
}
BOOST_REQUIRE(allThreadsCompleted->WaitOne(TimeSpan::FromSeconds(60)));
}
BOOST_FIXTURE_TEST_SUITE(ExclusiveFileTest,TestExclusiveFile)
BOOST_AUTO_TEST_CASE(TestAccess)
{
TestAccessForThreadCount(1);
TestAccessForThreadCount(5);
}
BOOST_AUTO_TEST_SUITE_END()
}
|
import matplotlib.pyplot as plt
from matplotlib.animation import FuncAnimation
from itertools import count
import pandas as pd
import numpy as np
from collections import deque
from io import StringIO
import argparse
# create parser
parser = argparse.ArgumentParser()
# add arguments to the parser
parser.add_argument("--fname", default=r'out.txt', type=np.str)
parser.add_argument("--nrows", default=100, type=np.int)
parser.add_argument("--fullscreen", default=0, type=np.int, choices=[0, 1])
parser.add_argument('-v','--vars', nargs='+', default=None)
# parse the arguments
args = parser.parse_args()
print(args)
# open top of file to get the number of variables
# def get_nvar():
# with open(args.fname, 'r') as f:
# q = deque(f, 1) # replace 2 with n (lines read at the end)
# df = pd.read_csv(StringIO(''.join(q)), header=None)
# return len(df.columns)
if not args.vars is None:
variables = [str(v) for v in args.vars]
else:
# maximum number of variables is 10
variables = ["var " + str(i) for i in range(10)]
fig, ax = plt.subplots()
if args.fullscreen == 1:
fig.canvas.manager.full_screen_toggle() # toggle fullscreen mode
index = count()
def animate(i):
with open(args.fname, 'r') as f:
q = deque(f, args.nrows) # replace 2 with n (lines read at the end)
df = pd.read_csv(StringIO(''.join(q)), header=None, usecols=range(len(variables)), names=variables)
plt.cla()
df.plot(ax=ax, ls="-", marker="o", ms=2, lw=1)
plt.legend(loc="lower left")
plt.xlabel('time')
plt.title(i)
plt.gcf().autofmt_xdate()
plt.tight_layout()
ani = FuncAnimation(plt.gcf(), animate, 5000)
plt.tight_layout()
plt.show() |
[STATEMENT]
lemma aff_dim_affine_Int_hyperplane:
fixes a :: "'a::euclidean_space"
assumes "affine S"
shows "aff_dim(S \<inter> {x. a \<bullet> x = b}) =
(if S \<inter> {v. a \<bullet> v = b} = {} then - 1
else if S \<subseteq> {v. a \<bullet> v = b} then aff_dim S
else aff_dim S - 1)"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. aff_dim (S \<inter> {x. a \<bullet> x = b}) = (if S \<inter> {v. a \<bullet> v = b} = {} then - 1 else if S \<subseteq> {v. a \<bullet> v = b} then aff_dim S else aff_dim S - 1)
[PROOF STEP]
proof (cases "a = 0")
[PROOF STATE]
proof (state)
goal (2 subgoals):
1. a = (0::'a) \<Longrightarrow> aff_dim (S \<inter> {x. a \<bullet> x = b}) = (if S \<inter> {v. a \<bullet> v = b} = {} then - 1 else if S \<subseteq> {v. a \<bullet> v = b} then aff_dim S else aff_dim S - 1)
2. a \<noteq> (0::'a) \<Longrightarrow> aff_dim (S \<inter> {x. a \<bullet> x = b}) = (if S \<inter> {v. a \<bullet> v = b} = {} then - 1 else if S \<subseteq> {v. a \<bullet> v = b} then aff_dim S else aff_dim S - 1)
[PROOF STEP]
case True
[PROOF STATE]
proof (state)
this:
a = (0::'a)
goal (2 subgoals):
1. a = (0::'a) \<Longrightarrow> aff_dim (S \<inter> {x. a \<bullet> x = b}) = (if S \<inter> {v. a \<bullet> v = b} = {} then - 1 else if S \<subseteq> {v. a \<bullet> v = b} then aff_dim S else aff_dim S - 1)
2. a \<noteq> (0::'a) \<Longrightarrow> aff_dim (S \<inter> {x. a \<bullet> x = b}) = (if S \<inter> {v. a \<bullet> v = b} = {} then - 1 else if S \<subseteq> {v. a \<bullet> v = b} then aff_dim S else aff_dim S - 1)
[PROOF STEP]
with assms
[PROOF STATE]
proof (chain)
picking this:
affine S
a = (0::'a)
[PROOF STEP]
show ?thesis
[PROOF STATE]
proof (prove)
using this:
affine S
a = (0::'a)
goal (1 subgoal):
1. aff_dim (S \<inter> {x. a \<bullet> x = b}) = (if S \<inter> {v. a \<bullet> v = b} = {} then - 1 else if S \<subseteq> {v. a \<bullet> v = b} then aff_dim S else aff_dim S - 1)
[PROOF STEP]
by auto
[PROOF STATE]
proof (state)
this:
aff_dim (S \<inter> {x. a \<bullet> x = b}) = (if S \<inter> {v. a \<bullet> v = b} = {} then - 1 else if S \<subseteq> {v. a \<bullet> v = b} then aff_dim S else aff_dim S - 1)
goal (1 subgoal):
1. a \<noteq> (0::'a) \<Longrightarrow> aff_dim (S \<inter> {x. a \<bullet> x = b}) = (if S \<inter> {v. a \<bullet> v = b} = {} then - 1 else if S \<subseteq> {v. a \<bullet> v = b} then aff_dim S else aff_dim S - 1)
[PROOF STEP]
next
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. a \<noteq> (0::'a) \<Longrightarrow> aff_dim (S \<inter> {x. a \<bullet> x = b}) = (if S \<inter> {v. a \<bullet> v = b} = {} then - 1 else if S \<subseteq> {v. a \<bullet> v = b} then aff_dim S else aff_dim S - 1)
[PROOF STEP]
case False
[PROOF STATE]
proof (state)
this:
a \<noteq> (0::'a)
goal (1 subgoal):
1. a \<noteq> (0::'a) \<Longrightarrow> aff_dim (S \<inter> {x. a \<bullet> x = b}) = (if S \<inter> {v. a \<bullet> v = b} = {} then - 1 else if S \<subseteq> {v. a \<bullet> v = b} then aff_dim S else aff_dim S - 1)
[PROOF STEP]
then
[PROOF STATE]
proof (chain)
picking this:
a \<noteq> (0::'a)
[PROOF STEP]
have "aff_dim (S \<inter> {x. a \<bullet> x = b}) = aff_dim S - 1"
if "x \<in> S" "a \<bullet> x \<noteq> b" and non: "S \<inter> {v. a \<bullet> v = b} \<noteq> {}" for x
[PROOF STATE]
proof (prove)
using this:
a \<noteq> (0::'a)
goal (1 subgoal):
1. aff_dim (S \<inter> {x. a \<bullet> x = b}) = aff_dim S - 1
[PROOF STEP]
proof -
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. a \<noteq> (0::'a) \<Longrightarrow> aff_dim (S \<inter> {x. a \<bullet> x = b}) = aff_dim S - 1
[PROOF STEP]
have [simp]: "{x + y| x y. x \<in> S \<and> a \<bullet> y = b} = UNIV"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. {x + y |x y. x \<in> S \<and> a \<bullet> y = b} = UNIV
[PROOF STEP]
using affine_hyperplane_sums_eq_UNIV [OF assms non] that
[PROOF STATE]
proof (prove)
using this:
S - {v. a \<bullet> v = b} \<noteq> {} \<Longrightarrow> {x + y |x y. x \<in> S \<and> a \<bullet> y = b} = UNIV
x \<in> S
a \<bullet> x \<noteq> b
S \<inter> {v. a \<bullet> v = b} \<noteq> {}
goal (1 subgoal):
1. {x + y |x y. x \<in> S \<and> a \<bullet> y = b} = UNIV
[PROOF STEP]
by blast
[PROOF STATE]
proof (state)
this:
{x + y |x y. x \<in> S \<and> a \<bullet> y = b} = UNIV
goal (1 subgoal):
1. a \<noteq> (0::'a) \<Longrightarrow> aff_dim (S \<inter> {x. a \<bullet> x = b}) = aff_dim S - 1
[PROOF STEP]
show ?thesis
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. aff_dim (S \<inter> {x. a \<bullet> x = b}) = aff_dim S - 1
[PROOF STEP]
using aff_dim_sums_Int [OF assms affine_hyperplane non]
[PROOF STATE]
proof (prove)
using this:
aff_dim {x + y |x y. x \<in> S \<and> y \<in> {v. a \<bullet> v = b}} = aff_dim S + aff_dim {v. a \<bullet> v = b} - aff_dim (S \<inter> {v. a \<bullet> v = b})
goal (1 subgoal):
1. aff_dim (S \<inter> {x. a \<bullet> x = b}) = aff_dim S - 1
[PROOF STEP]
by (simp add: of_nat_diff False)
[PROOF STATE]
proof (state)
this:
aff_dim (S \<inter> {x. a \<bullet> x = b}) = aff_dim S - 1
goal:
No subgoals!
[PROOF STEP]
qed
[PROOF STATE]
proof (state)
this:
\<lbrakk>?x \<in> S; a \<bullet> ?x \<noteq> b; S \<inter> {v. a \<bullet> v = b} \<noteq> {}\<rbrakk> \<Longrightarrow> aff_dim (S \<inter> {x. a \<bullet> x = b}) = aff_dim S - 1
goal (1 subgoal):
1. a \<noteq> (0::'a) \<Longrightarrow> aff_dim (S \<inter> {x. a \<bullet> x = b}) = (if S \<inter> {v. a \<bullet> v = b} = {} then - 1 else if S \<subseteq> {v. a \<bullet> v = b} then aff_dim S else aff_dim S - 1)
[PROOF STEP]
then
[PROOF STATE]
proof (chain)
picking this:
\<lbrakk>?x \<in> S; a \<bullet> ?x \<noteq> b; S \<inter> {v. a \<bullet> v = b} \<noteq> {}\<rbrakk> \<Longrightarrow> aff_dim (S \<inter> {x. a \<bullet> x = b}) = aff_dim S - 1
[PROOF STEP]
show ?thesis
[PROOF STATE]
proof (prove)
using this:
\<lbrakk>?x \<in> S; a \<bullet> ?x \<noteq> b; S \<inter> {v. a \<bullet> v = b} \<noteq> {}\<rbrakk> \<Longrightarrow> aff_dim (S \<inter> {x. a \<bullet> x = b}) = aff_dim S - 1
goal (1 subgoal):
1. aff_dim (S \<inter> {x. a \<bullet> x = b}) = (if S \<inter> {v. a \<bullet> v = b} = {} then - 1 else if S \<subseteq> {v. a \<bullet> v = b} then aff_dim S else aff_dim S - 1)
[PROOF STEP]
by (metis (mono_tags, lifting) inf.orderE aff_dim_empty_eq mem_Collect_eq subsetI)
[PROOF STATE]
proof (state)
this:
aff_dim (S \<inter> {x. a \<bullet> x = b}) = (if S \<inter> {v. a \<bullet> v = b} = {} then - 1 else if S \<subseteq> {v. a \<bullet> v = b} then aff_dim S else aff_dim S - 1)
goal:
No subgoals!
[PROOF STEP]
qed |
The Lebesgue measure of a set $S$ is equal to the Lebesgue measure of the set $\{x - a \mid x \in S\}$. |
import .util
namespace posets
open function
open classical (em prop_decidable)
open classical (renaming some → unexists) (renaming some_spec → unexists_prop)
local attribute [instance] prop_decidable
open util
-- comparability:
inductive comp {T: Sort _} [hT: partial_order T] (x y: T): Prop
| le (hle: x ≤ y): comp
| ge (hge: y ≤ x): comp
infix ` <=> `:50 := comp
@[refl] theorem comp.refl {T: Sort _} [hT: partial_order T] (x: T):
x <=> x := begin
apply comp.le,
refl,
end
@[symm] theorem comp.symm {T: Sort _} [hT: partial_order T] {x y: T}:
x <=> y →
y <=> x := begin
intro hcomp,
induction hcomp,
case comp.le {
apply comp.ge,
exact hcomp,
},
case comp.ge {
apply comp.le,
exact hcomp,
},
end
theorem le_of_not_lt {T: Sort _} [hT: partial_order T] {x y: T} (hcomp: x <=> y):
¬ x < y →
y ≤ x := begin
intro hnlt,
cases hcomp,
case comp.le {
rw [le_iff_lt_or_eq] at hcomp,
cases hcomp,
case or.inl {
contradiction,
},
case or.inr {
rw [hcomp],
},
},
case comp.ge {
exact hcomp,
},
end
theorem lt_of_not_le {T: Sort _} [hT: partial_order T] {x y: T} (hcomp: x <=> y):
¬ x ≤ y →
y < x := begin
intro hnlt,
cases hcomp,
case comp.le {
contradiction,
},
case comp.ge {
by_contra hnlt,
have hyx: comp y x := comp.le hcomp,
have hle := le_of_not_lt hyx hnlt,
contradiction,
},
end
-- connectedness:
inductive con {T: Sort _} [hT: partial_order T]: T → T → Prop
| le {x y: T} (hle: x ≤ y): con x y
| ge {x y: T} (hge: y ≤ x): con x y
| trans {x y z: T} (hxy: con x y) (hyz: con y z): con x z
attribute [trans] con.trans
infix ` ~ ` := con
@[refl] theorem con.refl {T: Sort _} [hT: partial_order T] (x: T):
x ~ x := begin
intros,
apply con.le,
refl,
end
@[symm] theorem con.symm {T: Sort _} [hT: partial_order T] {x y: T}:
x ~ y →
y ~ x := begin
intro hcon,
induction hcon,
case con.le {
apply con.ge,
assumption,
},
case con.ge {
apply con.le,
assumption,
},
case con.trans {
exact con.trans hcon_ih_hyz hcon_ih_hxy,
},
end
theorem comp.con {T: Sort _} [hT: partial_order T] {x y: T} (hcomp: x <=> y):
x ~ y := begin
induction hcomp,
case comp.le {
apply con.le,
exact hcomp,
},
case comp.ge {
apply con.ge,
exact hcomp,
},
end
-- order with bottom:
class bot_order (T: Sort _) extends partial_order T :=
(bot: T)
(bot_le: ∀ x: T, bot ≤ x)
@[reducible, inline] def bot {T: Sort _} [hT: bot_order T] := hT.bot
@[reducible, inline] def bot_le {T: Sort _} [hT: bot_order T] := hT.bot_le
instance bot_order.inhabited {T: Sort _} [hT: bot_order T]:
inhabited T := (| bot |)
theorem bot_uniq {T: Sort _} [hT: bot_order T]:
∀ bot': T,
(∀ x: T, bot' ≤ x) →
bot' = bot := begin
intros bot' bot_le',
apply le_antisymm,
apply bot_le',
apply bot_le,
end
theorem bot_ne_elim {T: Sort _} [hT: bot_order T] {x: T}:
x ≠ bot →
∃ y: T,
¬ x ≤ y := begin
intro hne,
apply not_forall_not_elim,
intro hnlt,
apply hne,
apply bot_uniq,
intro y,
have hy := hnlt y,
simp at hy,
exact hy,
end
theorem con.all_of_bot {T: Sort _} [hT: bot_order T] {x y: T}:
x ~ y := begin
have hx: x ~ bot, by {
apply con.ge,
apply bot_le,
},
have hy: bot ~ y, by {
apply con.le,
apply bot_le,
},
exact con.trans hx hy,
end
-- trivial order:
@[reducible] def trivially_ordered (T: Sort _) [hT: partial_order T]: Prop :=
∀ {x y: T},
x ≤ y → x = y
@[simp] theorem trivially_ordered.elim {T: Sort _} [hT: partial_order T] (htriv: trivially_ordered T):
∀ {x y: T},
x ≤ y ↔ x = y := begin
intros,
apply iff.intro,
{
intro h,
exact htriv h,
},
{
intro h,
rw [h],
},
end
@[simp] theorem trivially_ordered.of_bot {T: Sort _} [hT: bot_order T] (htriv: trivially_ordered T):
∀ {x: T},
x = bot := begin
intros,
symmetry,
apply htriv,
apply bot_le,
end
-- not marked as instance to avoid automatically trivially ordering everything
def trivial_ordering (T: Sort _):
partial_order T := {
le := eq,
le_refl := begin
intros,
refl,
end,
le_trans := begin
intros x y z hxy hyz,
rw [hxy, ←hyz],
end,
le_antisymm := begin
intros,
assumption,
end,
}
theorem trivial_ordering.is_triv (T: Sort _):
@trivially_ordered T (trivial_ordering T) := begin
intros x y,
intro hxy,
cases hxy,
refl,
end
-- min:
noncomputable def or_else {T: Sort _} [hT: bot_order T] (x y: T):
T := if x = bot then y else x
infix ` ?? `:60 := or_else
theorem or_else.le_refl {T: Sort _} [hT: bot_order T] {x y: T}:
x ≤ x ?? y := begin
rw [or_else],
cases em (x = bot),
case or.inl {
rw [if_pos h, h],
apply bot_le,
},
case or.inr {
rw [if_neg h],
},
end
theorem or_else.le_of_le {T: Sort _} [hT: bot_order T] {x y: T}:
x ≤ y →
x ≤ y ?? x := begin
intro hle,
rw [or_else],
cases em (y = bot),
case or.inl {
rw [if_pos h],
},
case or.inr {
rw [if_neg h],
exact hle,
},
end
-- max:
noncomputable def and_then {T: Sort _} [hT: bot_order T] (x y: T):
T := if x = bot then x else y
infix ` >> `:60 := and_then
theorem and_then.le_refl {T: Sort _} [hT: bot_order T] {x y: T}:
y >> x ≤ x := begin
rw [and_then],
cases em (y = bot),
case or.inl {
rw [if_pos h, h],
apply bot_le,
},
case or.inr {
rw [if_neg h],
},
end
theorem and_then.le_of_le {T: Sort _} [hT: bot_order T] {x y: T}:
x ≤ y →
y >> x ≤ y := begin
intro hle,
rw [and_then],
cases em (y = bot),
case or.inl {
rw [if_pos h],
},
case or.inr {
rw [if_neg h],
exact hle,
},
end
-- function classes:
class increasing {T: Sort _} [hT: partial_order T] (f: T → T): Prop :=
(elim:
∀ {x: T},
x ≤ f x)
class decreasing {T: Sort _} [hT: partial_order T] (f: T → T): Prop :=
(elim:
∀ {x: T},
f x ≤ x)
class monotone {T T': Sort _} [hT: partial_order T] [hT': partial_order T'] (f: T → T'): Prop :=
(elim:
∀ {x y: T},
x ≤ y →
f x ≤ f y)
class antitone {T T': Sort _} [hT: partial_order T] [hT': partial_order T'] (f: T → T'): Prop :=
(elim:
∀ {x y: T},
f x ≤ f y →
x ≤ y)
-- examples:
instance id.increasing {T: Sort _} [hT: partial_order T]:
increasing (@id T) := begin
split,
intro x,
simp,
end
instance id.decreasing {T: Sort _} [hT: partial_order T]:
decreasing (@id T) := begin
split,
intro x,
simp,
end
instance id.monotone {T: Sort _} [hT: partial_order T]:
monotone (@id T) := begin
split,
intros x y hxy,
simp,
exact hxy,
end
instance id.antitone {T: Sort _} [hT: partial_order T]:
antitone (@id T) := begin
split,
intros x y hid,
simp at hid,
exact hid,
end
instance nat.succ.monotone:
monotone nat.succ := begin
split,
intros,
apply nat.succ_le_succ,
assumption,
end
-- monotonicity:
theorem monotone.of_comparable {T T': Sort _} [hT: partial_order T] [hT': partial_order T'] {f: T → T'} [hf: monotone f]:
∀ {x y: T},
x <=> y →
f x <=> f y := begin
intros x y hcomp,
induction hcomp,
case comp.le {
apply comp.le,
apply hf.elim,
exact hcomp,
},
case comp.ge {
apply comp.ge,
apply hf.elim,
exact hcomp,
},
end
theorem monotone.of_con {T T': Sort _} [hT: partial_order T] [hT': partial_order T'] {f: T → T'} [hf: monotone f]:
∀ {x y: T},
x ~ y →
f x ~ f y := begin
intros x y hcon,
induction hcon,
case con.le {
apply con.le,
apply hf.elim,
assumption,
},
case con.ge {
apply con.ge,
apply hf.elim,
assumption,
},
case con.trans {
exact con.trans hcon_ih_hxy hcon_ih_hyz,
},
end
@[simp] theorem monotone.bot_to_bot_of_sur {T T': Sort _} [hT: bot_order T] [hT': bot_order T'] (f: T → T') [hfm: monotone f] [hfs: surjective f]:
f bot = bot := begin
apply bot_uniq,
intro x,
have hfsx := hfs x,
apply exists.elim hfsx,
intros y hy,
rw [←hy],
apply hfm.elim,
apply bot_le,
end
def monotone.cod_bot_of_sur {T T': Sort _} [hT: bot_order T] [hT': partial_order T'] (f: T → T') [hfm: monotone f] [hfs: surjective f]:
bot_order T' := begin
split,
show T', from f bot,
intro x,
have hfsx := hfs x,
apply exists.elim hfsx,
intros y hy,
rw [←hy],
apply hfm.elim,
apply bot_le,
end
instance monotone.of_composition {T T' T'': Sort _} [hT: partial_order T] [hT': partial_order T'] [hT'': partial_order T''] (g: T → T') [hg: monotone g] (f: T' → T'') [hf: monotone f]:
monotone (f ∘ g) := begin
split,
intros x y hxy,
simp,
apply hf.elim,
apply hg.elim,
exact hxy,
end
-- galois connections:
structure galois_connection (A B: Sort _) [hA: partial_order A] [hB: partial_order B] :=
(F: A → B)
[hF: monotone F]
(G: B → A)
[hG: monotone G]
(elim:
∀ {a: A} {b: B},
F a ≤ b ↔
a ≤ G b)
def galois_connection.closure {A B: Sort _} [hA: partial_order A] [hB: partial_order B] (gc: galois_connection A B):
A → A := gc.G ∘ gc.F
instance galois_connection.closure.monotone {A B: Sort _} [hA: partial_order A] [hB: partial_order B] (gc: galois_connection A B):
monotone gc.closure := by apply @monotone.of_composition A B A hA hB hA gc.F gc.hF gc.G gc.hG
def galois_connection.kernel {A B: Sort _} [hA: partial_order A] [hB: partial_order B] (gc: galois_connection A B):
B → B := gc.F ∘ gc.G
instance galois_connection.kernel.monotone {A B: Sort _} [hA: partial_order A] [hB: partial_order B] (gc: galois_connection A B):
monotone gc.kernel := by apply @monotone.of_composition B A B hB hA hB gc.G gc.hG gc.F gc.hF
-- trivial or well-founded:
def has_bot (T: Sort _) [hT: partial_order T]: Prop :=
∃ bot: T,
∀ x: T,
bot ≤ x
noncomputable def has_bot.bot_order {T: Sort _} [hT: partial_order T] (hbot: has_bot T):
bot_order T := {
bot := unexists hbot,
bot_le := unexists_prop hbot,
..hT,
}
inductive triv_or_bot (T: Sort _) [hT: partial_order T]: Prop
| triv (htriv: trivially_ordered T): triv_or_bot
| bot (hbot: has_bot T): triv_or_bot
-- lower bounds:
@[reducible] def lower_bound {T: Sort _} [ht: partial_order T] (lb: T) (s: set T): Prop :=
∀ x: T,
x ∈ s →
lb ≤ x
infix ≤ := lower_bound
class bounded_below {T: Sort _} [hT: partial_order T] (s: set T) :=
(elim:
∃ lb: T,
lb ≤ s)
instance bounded_below.of_bot {T: Sort _} [hT: bot_order T] (s: set T):
bounded_below s := begin
split,
apply exists.intro bot,
intros x hx,
apply bot_le,
end
noncomputable def bounded_below.bot_of_bounded_univ {T: Sort _} [hT: partial_order T] [hs: bounded_below {x: T | true}]:
bot_order T := begin
split,
show T, from unexists hs.elim,
intros,
apply unexists_prop hs.elim,
split,
end
class glb_prop (T: Sort _) extends partial_order T :=
(has_glb:
∀ s: set T,
∀ [hs: bounded_below s],
∃ glb: T,
glb ≤ s ∧
∀ x: T,
x ≤ s →
x ≤ glb)
noncomputable def inf {T: Sort _} [hT: glb_prop T] (s: set T) [hs: bounded_below s]: T :=
unexists (glb_prop.has_glb s)
theorem inf_le {T: Sort _} [hT: glb_prop T] {s: set T} [hs: bounded_below s]:
inf s ≤ s :=
(unexists_prop (glb_prop.has_glb s)).1
theorem inf_glb {T: Sort _} [hT: glb_prop T] {s: set T} [hs: bounded_below s]:
∀ x: T,
x ≤ s →
x ≤ inf s :=
(unexists_prop (glb_prop.has_glb s)).2
-- order inversion:
def invert_order (T: Sort _) [hT: partial_order T]:
partial_order T := {
le := λ x y: T, y ≤ x,
le_refl := begin
intros,
apply hT.le_refl,
end,
le_trans := begin
intros,
apply hT.le_trans;
assumption,
end,
le_antisymm := begin
intros,
apply hT.le_antisymm;
assumption,
end,
}
class monotone.decreasing {T T': Sort _} [hT: partial_order T] [hT': partial_order T'] (f: T → T'): Prop :=
(elim:
∀ {x y: T},
x ≤ y →
f y ≤ f x)
end posets
|
[STATEMENT]
lemma Swap: \<open>\<turnstile> B # A # G \<Longrightarrow> \<turnstile> A # B # G\<close>
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<turnstile> B # A # G \<Longrightarrow> \<turnstile> A # B # G
[PROOF STEP]
by (simp add: Order insert_commute) |
Here at York, we pride ourselves in delivering outstanding customer service. You can see some of our testimonials below.
"The professionalism from all the staff was outstanding and (they) made us very welcome"
"The professionalism of the dealer, his knowledge and the patience shown when dealing with me as a customer. Outstanding!"
"I have had a number of cars over the years including company cars but the service and the conditions and how clean it also came back was the best I have ever had which goes a long way in providing a quality service to customers" |
#pragma once
#include <gsl/gsl>
#include <Metal/Metal.h>
namespace Halley {
class MetalVideo;
class MetalBuffer {
public:
enum class Type
{
Vertex,
Index,
Constant
};
MetalBuffer(MetalVideo& video, Type type, size_t initialSize = 0);
MetalBuffer(MetalBuffer&& other) noexcept;
~MetalBuffer();
MetalBuffer(const MetalBuffer& other) = delete;
MetalBuffer& operator=(const MetalBuffer& other) = delete;
MetalBuffer& operator=(MetalBuffer&& other) = delete;
void setData(gsl::span<const gsl::byte> data);
void bindVertex(id<MTLRenderCommandEncoder> encoder, int bindPoint);
void bindFragment(id<MTLRenderCommandEncoder> encoder, int bindPoint);
id<MTLBuffer> getBuffer();
private:
MetalVideo& video;
Type type;
id<MTLBuffer> buffer = nil;
};
}
|
Formal statement is: lemma convex_on_convex_hull_bound: assumes "convex_on (convex hull S) f" and "\<forall>x\<in>S. f x \<le> b" shows "\<forall>x\<in> convex hull S. f x \<le> b" Informal statement is: If $f$ is convex on the convex hull of $S$ and $f(x) \leq b$ for all $x \in S$, then $f(x) \leq b$ for all $x$ in the convex hull of $S$. |
function solver(Nt::Int , Δt::Float64, L::Float64, N::Int, ɸ::Array, x::Array{Float64,1}, ẋ::Array{Float64,1},
parameter_ɸ, para_x, para_V, ɸ_fct::Function, x_fct::Function, V_fct::Function, times_save=reverse([i for i = 1:Nt]))
V = ([real.(zero(ɸ)) for _ = 1:length(size(ɸ))+1])
xarray = [i for i in range(-L / 2.0, L / 2.0, length = N)]
update_V = ((V, x, t) -> V_fct(V, x, t, xarray, para_V...))
ɸ_step = ((ɸ, V, t) -> ɸ_fct(ɸ, V, t, Δt, parameter_ɸ...))
a_step = ((ɸ, V) -> x_fct(ɸ, V, para_x...))
ɸ_r = [zero(ɸ) for _ = 1:length(times_save)]
x_r = [zero(x) for _ = 1:length(times_save)]
ẋ_r = [zero(ẋ) for _ = 1:length(times_save)]
ẍ_r = [zero(ẋ) for _ = 1:length(times_save)]
j = 1
V = update_V(V, x, 0.0)
temp = real.(a_step(ɸ, V))
for i = 1:Nt
a = temp
x = (x + Δt* ẋ + 0.5 * Δt^2 * a )
ɸ = ɸ_step(ɸ, V[1], i * Δt)
V = update_V(V, x, i * Δt)
temp = a_step(ɸ, V)
ẋ = (ẋ + 0.5 * Δt * (a+temp))
if i == times_save[length(times_save)] # O(1)
ɸ_r[j] = deepcopy(ɸ)
x_r[j] = deepcopy(x)
ẋ_r[j] = deepcopy(ẋ)
ẍ_r[j] = deepcopy(a)
j += 1
pop!(times_save) # O(1)
end
end
return ɸ_r, x_r, ẋ_r, ẍ_r
end
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.