text
stringlengths 0
3.34M
|
---|
Gorilla Property Services offers Burnaby Pressure Washing to everyone in the area. Maybe you’re getting your home ready for the summer. We’d recommend an annual spring clean-up. Perhaps you’re getting ready to sell your Burnaby property. In which case, it may need a classic ‘Top to Bottom’ clean. Here at Gorilla Property Services, we have not one but two Burnaby Pressure Washing Teams! The first focuses on residential pressure washing. The second centers on Strata, multi-unit, and commercial care. Burnaby Pressure Washing varies. We clean all types of residential areas and surfaces. These include driveways, walkways, houses, gutter faces, multi-unit housing structures, pool sides, and so on. Keeping curb appeal at an A+ is our Gorillas’ main goal! On the commercial side, Burnaby Pressure Washing is extremely important. We keep store fronts, sidewalks, windows, awnings, siding, brickwork, parking lots, etc. in Tip Top shape.
Curb appeal is very important. Keeping the entrance of your Burnaby business, and its surroundings, clean is the best first impression you can offer. If you’re about to list your Burnaby home, pressure washing the outside can make selling a lot easier. Our Burnaby Pressure Washing services start with a nice foaming application to the surface being treated, letting it sit for 15 minutes. This lets our ‘Gorilla Foam’ get underneath the surface of the dirt or grime, prepping the surface for pressure washing. After the job is complete, we also offer surface finishing options to all of our clients.
Ask our Gorilla Team about sealing the concrete or aggregate of walkways, driveways, entrances etc., with a satin or high gloss finish sealer.
|
Politically , Ireland is divided between the Republic of Ireland ( officially named Ireland ) , which covers five @-@ sixths of the island , and Northern Ireland , which is part of the United Kingdom , in the northeast of the island . In 2011 the population of Ireland was about 6 @.@ 4 million , ranking it the second @-@ most populous island in Europe after Great Britain . Just under 4 @.@ 6 million live in the Republic of Ireland and just over 1 @.@ 8 million live in Northern Ireland .
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef avro_ResolverSchema_hh__
#define avro_ResolverSchema_hh__
#include <boost/noncopyable.hpp>
#include <boost/shared_ptr.hpp>
#include <stdint.h>
#include "Boost.hh"
#include "Reader.hh"
/// \file ResolverSchema.hh
///
namespace avro {
class ValidSchema;
class Layout;
class Resolver;
class ResolverSchema {
public:
ResolverSchema(const ValidSchema &writer, const ValidSchema &reader, const Layout &readerLayout);
private:
friend class ResolvingReader;
void parse(Reader &reader, uint8_t *address);
boost::shared_ptr<Resolver> resolver_;
};
} // namespace avro
#endif
|
theory MMU_Instants_TLB_PDC
imports Simp_Lemmas_PDC
TLBJ.ARM_Monadic
begin
record non_det_tlb_state = state +
non_det_tlb :: "tlb \<times> pdc"
record sat_tlb_state = state +
sat_tlb :: "tlb \<times> pdc"
record set_tlb =
iset :: "vaddr set"
global_set :: "vaddr set"
snapshot :: "asid \<Rightarrow> (vaddr set \<times> (vaddr \<Rightarrow> pt_walk_typ))"
record set_tlb_state = state +
set_tlb :: set_tlb
definition
typ_non_det_tlb :: "'a non_det_tlb_state_scheme \<Rightarrow> (tlb \<times> pdc) state_scheme"
where
"typ_non_det_tlb s = state.extend (state.truncate s) (non_det_tlb s)"
definition
typ_sat_tlb :: "'a sat_tlb_state_scheme \<Rightarrow> (tlb \<times> pdc) state_scheme"
where
"typ_sat_tlb s = state.extend (state.truncate s) (sat_tlb s)"
definition
typ_set_tlb :: "'a set_tlb_state_scheme \<Rightarrow> (set_tlb) state_scheme"
where
"typ_set_tlb s = state.extend (state.truncate s) (set_tlb s)"
lemma non_det_tlb_more [simp]:
"state.more (typ_non_det_tlb s) = (non_det_tlb s)"
by (clarsimp simp: typ_non_det_tlb_def state.defs)
lemma sat_tlb_more [simp]:
"state.more (typ_sat_tlb s) = (sat_tlb s)"
by (clarsimp simp: typ_sat_tlb_def state.defs)
lemma set_tlb_more' [simp]:
"state.more (typ_set_tlb s) = (set_tlb s)"
by (clarsimp simp: typ_set_tlb_def state.defs)
lemma non_det_tlb_truncate [simp]:
"state.truncate (typ_non_det_tlb s') = state.truncate s'"
by (clarsimp simp: typ_non_det_tlb_def state.defs)
lemma sat_tlb_truncate [simp]:
"state.truncate (typ_sat_tlb s') = state.truncate s'"
by (clarsimp simp: typ_sat_tlb_def state.defs)
lemma set_tlb_truncate [simp]:
"state.truncate (typ_set_tlb s') = state.truncate s'"
by (clarsimp simp: typ_set_tlb_def state.defs)
lemma typ_non_det_prim_parameter [simp]:
" TTBR0 (typ_non_det_tlb s) = TTBR0 s \<and> ASID (typ_non_det_tlb s) = ASID s \<and>
MEM (typ_non_det_tlb s) = MEM s \<and> exception (typ_non_det_tlb s) = exception s"
by (clarsimp simp: typ_non_det_tlb_def state.defs)
lemma typ_sat_prim_parameter [simp]:
"TTBR0 (typ_sat_tlb s) = TTBR0 s \<and> ASID (typ_sat_tlb s) = ASID s \<and> MEM (typ_sat_tlb s) = MEM s \<and> exception (typ_sat_tlb s) = exception s"
by (clarsimp simp: typ_sat_tlb_def state.defs)
lemma typ_set_prim_parameter [simp]:
"TTBR0 (typ_set_tlb s) = TTBR0 s \<and> ASID (typ_set_tlb s) = ASID s \<and> MEM (typ_set_tlb s) = MEM s \<and> exception (typ_set_tlb s) = exception s"
by (clarsimp simp: typ_set_tlb_def state.defs)
abbreviation
"consistent (s:: (tlb \<times> pdc) state_scheme) \<equiv>
consistent0' (MEM s) (ASID s) (TTBR0 s) (fst (state.more s)) (snd (state.more s))"
definition
saturated :: "(tlb \<times> pdc) state_scheme \<Rightarrow> bool"
where
"saturated s \<equiv> the ` {e\<in>pt_walk (ASID s) (MEM s) (TTBR0 s) ` UNIV. \<not>is_fault e} \<subseteq> (fst (state.more s)) \<and>
the ` {e\<in>pdc_walk(ASID s) (MEM s) (TTBR0 s) ` UNIV. \<not>is_fault e} \<subseteq> (snd (state.more s))"
definition
tlb_rel_sat :: "(tlb \<times> pdc) state_scheme \<Rightarrow> (tlb \<times> pdc) state_scheme \<Rightarrow> bool"
where
"tlb_rel_sat s t \<equiv> state.truncate s = state.truncate t \<and>
fst (state.more s) \<subseteq> fst (state.more t) \<and>
snd (state.more s) \<subseteq> snd (state.more t) \<and> saturated t"
definition
inconsistent_vaddrs :: "(tlb \<times> pdc) state_scheme \<Rightarrow> vaddr set"
where
"inconsistent_vaddrs s \<equiv> {va. lookup'' (fst (state.more s)) (ASID s) va = Incon} \<union> {va. lookup_pdc (snd (state.more s)) (ASID s) va = Incon}"
definition
incoherrent_vaddrs :: "(tlb \<times> pdc) state_scheme \<Rightarrow> vaddr set"
where
"incoherrent_vaddrs s \<equiv> {va. \<exists>x. lookup'' (fst ((state.more s))) (ASID s) va = Hit x \<and> is_fault (pt_walk (ASID s) (MEM s) (TTBR0 s) va)} \<union>
{va. \<exists>x. lookup_pdc ( (snd(state.more s))) (ASID s) va = Hit x \<and> is_fault (pdc_walk (ASID s) (MEM s) (TTBR0 s) va)} "
definition
incon_addrs :: "(tlb \<times> pdc) state_scheme \<Rightarrow> vaddr set"
where
"incon_addrs s \<equiv> inconsistent_vaddrs s \<union> incoherrent_vaddrs s"
definition
global_range :: "(tlb \<times> pdc) state_scheme \<Rightarrow> vaddr set"
where
"global_range s \<equiv> \<Union>(range_of ` global_entries (fst( state.more s))) \<union>
\<Union>(range_of ` global_entries_pdc (snd(state.more s))) "
definition
snap_conv' :: "(vaddr set \<times> (vaddr \<Rightarrow> pt_walk_typ)) \<Rightarrow> (vaddr \<Rightarrow> tlb_entry lookup_type \<times> pdc_entry lookup_type)"
where
"snap_conv' snp \<equiv> \<lambda>v. if v \<in> (fst snp) then (Incon, Incon) else
case (snd snp) v of Fault \<Rightarrow> (Miss, Miss)
| Partial_Walk pe \<Rightarrow> if asid_of_pdc pe = None then (Miss, Miss) else (Miss, Hit pe)
| Full_Walk te pe \<Rightarrow> if asid_of te = None
then (Miss, if asid_of_pdc pe = None then Miss else Hit pe)
else (Hit te, Hit pe )"
definition
"lookup_from' snp a v \<equiv> snap_conv' (snp a) v"
definition
tlb_rel_abs :: "(tlb \<times> pdc) state_scheme \<Rightarrow> (set_tlb) state_scheme \<Rightarrow> bool"
where
"tlb_rel_abs s t \<equiv> state.truncate s = state.truncate t \<and>
saturated s \<and>
incon_addrs s \<subseteq> iset (state.more t) \<and>
global_range s \<subseteq> global_set (state.more t) \<and>
(\<forall>a v. a \<noteq> ASID s \<longrightarrow> lookup'' (fst ((state.more s)) - global_entries (fst ((state.more s)))) a v \<le> fst (lookup_from' (snapshot (state.more t)) a v) \<and>
lookup_pdc (snd ((state.more s)) - global_entries_pdc (snd ((state.more s)))) a v \<le> snd (lookup_from' (snapshot (state.more t)) a v))"
declare return_def [simp add]
declare bind_def [simp add]
declare read_state_def [simp add]
declare update_state_def [simp add]
declare extend_state_def [simp add]
declare trim_state_def [simp add]
lemma tlb_rel_satD:
"tlb_rel_sat s t \<Longrightarrow> MEM t = MEM s \<and> TTBR0 t = TTBR0 s \<and> ASID t = ASID s \<and>
fst(state.more s) \<subseteq> fst(state.more t) \<and> snd(state.more s) \<subseteq> snd(state.more t) \<and> exception t = exception s \<and> saturated t"
by (clarsimp simp: tlb_rel_sat_def state.defs)
lemma tlb_rel_absD:
"tlb_rel_abs s t \<Longrightarrow> MEM t = MEM s \<and> TTBR0 t = TTBR0 s \<and> ASID t = ASID s \<and> saturated s \<and>
incon_addrs s \<subseteq> iset ((state.more t)) \<and> global_range s \<subseteq> global_set ((state.more t)) \<and>
exception t = exception s"
by (clarsimp simp: tlb_rel_abs_def state.defs)
lemma tlb_rel_sat_consistent:
"\<lbrakk> tlb_rel_sat s t; consistent t va \<rbrakk> \<Longrightarrow> consistent s va"
apply (insert tlb_rel_satD [of s t])
apply ( clarsimp )
apply (drule asid_tlb_mono)
by (drule asid_pdc_mono [of _ _ "ASID s" va], auto simp: consistent0'_def less_eq_lookup_type)
lemma sat_state_tlb:
"\<lbrakk> saturated s \<rbrakk> \<Longrightarrow>
fst((state.more s)) = fst((state.more s)) \<union> the ` {e \<in> range (pt_walk (ASID s) (MEM s) (TTBR0 s)). \<not> is_fault e} \<and>
snd((state.more s)) = snd((state.more s)) \<union> the ` {e \<in> range (pdc_walk (ASID s) (MEM s) (TTBR0 s)). \<not> is_fault e}"
by (fastforce simp: saturated_def)
lemma saturated_tlb_pde:
"saturated (typ_sat_tlb s) \<Longrightarrow> fst (sat_tlb s) = fst (sat_tlb s) \<union> the ` {e\<in>pt_walk (ASID s) (MEM s) (TTBR0 s) ` UNIV. \<not>is_fault e} \<and>
snd (sat_tlb s) = snd (sat_tlb s) \<union> the ` {e\<in>pdc_walk (ASID s) (MEM s) (TTBR0 s) ` UNIV. \<not>is_fault e}"
apply (clarsimp simp: saturated_def fst_def) apply (cases "sat_tlb s" ; clarsimp) by blast
lemma sat_miss_fault:
"\<lbrakk> saturated (typ_sat_tlb s); lookup'' (fst (sat_tlb s)) (ASID s) va = Miss \<rbrakk> \<Longrightarrow>
is_fault (pt_walk (ASID s) (MEM s) (TTBR0 s) va)"
apply (subgoal_tac " lookup'' (fst (sat_tlb s) \<union> the ` {e \<in> range (pt_walk (ASID s) (MEM s) (TTBR0 s)). \<not> is_fault e}) (ASID s) va = Miss")
apply (thin_tac "lookup'' (fst (sat_tlb s)) (ASID s) va = Miss")
apply (drule lookup_asid_tlb_miss_union)
apply clarsimp
using asid_tlb_lookup_miss_is_fault apply force
using sat_state_tlb by force
lemma saturatd_lookup_hit_no_fault:
"\<lbrakk>saturated (typ_sat_tlb s);
lookup'' (fst(sat_tlb s)) (ASID s) b = Hit x; \<not> is_fault (pt_walk (ASID s) (MEM s) (TTBR0 s) b)\<rbrakk> \<Longrightarrow>
x = the (pt_walk (ASID s) (MEM s) (TTBR0 s) b)"
apply (subgoal_tac "lookup'' (fst(sat_tlb s) \<union>
the ` {e \<in> range (pt_walk (ASID s) (MEM s) (TTBR0 s)). \<not> is_fault e}) (ASID s) ( b) = Hit x")
prefer 2
apply (drule sat_state_tlb, clarsimp simp: state.defs)
apply (drule lookup_asid_tlb_hit_miss_or_hit')
apply (erule disjE)
apply (clarsimp simp: asid_tlb_lookup_range_pt_walk_hit)
apply (frule asid_tlb_lookup_range_fault_pt_walk)
apply (drule_tac x = b in bspec; clarsimp simp: lookup_asid_tlb_hit_entry_range)
done
lemma saturated_tlb:
"saturated (typ_sat_tlb s) \<Longrightarrow> fst (sat_tlb s) = fst (sat_tlb s) \<union> the ` {e\<in>pt_walk (ASID s) (MEM s) (TTBR0 s) ` UNIV. \<not>is_fault e} "
apply (clarsimp simp: saturated_def fst_def) apply (cases "sat_tlb s" ; clarsimp) by blast
lemma sat_miss_pdc_fault:
"\<lbrakk> saturated (typ_sat_tlb s); lookup_pdc (snd (sat_tlb s)) (ASID s) va = Miss \<rbrakk> \<Longrightarrow>
is_fault (pdc_walk (ASID s) (MEM s) (TTBR0 s) va)"
apply (subgoal_tac "lookup_pdc (snd (sat_tlb s) \<union> the ` {e \<in> range (pdc_walk (ASID s) (MEM s) (TTBR0 s)). \<not> is_fault e}) (ASID s) va = Miss")
apply (thin_tac "lookup_pdc (snd (sat_tlb s)) (ASID s) va = Miss")
apply (drule lookup_asid_pdc_miss_union)
apply clarsimp
apply (drule lookup_pdc_miss_is_fault)
apply clarsimp
using saturated_tlb_pde by fastforce
lemma saturatd_lookup_pdc_hit_no_fault:
"\<lbrakk>saturated (typ_sat_tlb s);
lookup_pdc (snd(sat_tlb s)) (ASID s) b = Hit x; \<not> is_fault (pdc_walk (ASID s) (MEM s) (TTBR0 s) b)\<rbrakk> \<Longrightarrow>
x = the (pdc_walk (ASID s) (MEM s) (TTBR0 s) b)"
apply (subgoal_tac "lookup_pdc (snd(sat_tlb s) \<union>
the ` {e \<in> range (pdc_walk (ASID s) (MEM s) (TTBR0 s)). \<not> is_fault e}) (ASID s) b = Hit x")
prefer 2
apply (drule sat_state_tlb, clarsimp simp: state.defs)
apply (drule lookup_asid_pdc_hit_miss_or_hit')
apply (erule disjE)
apply (clarsimp simp: lookup_pdc_range_pt_walk_hit)
apply (frule lookup_pdc_range_fault_pt_walk)
apply (drule_tac x = b in bspec; clarsimp simp: lookup_asid_pdc_hit_entry_range)
done
lemma saturate_no_icon_miss:
"\<lbrakk>saturated (typ_sat_tlb s); lookup_pdc (snd (sat_tlb s)) (ASID s) va \<noteq> Incon;
lookup_pdc (snd (sat_tlb s)) (ASID s) va \<noteq> Miss; \<not> is_fault (pdc_walk (ASID s) (MEM s) (TTBR0 s) va)\<rbrakk>
\<Longrightarrow> lookup_pdc (snd (sat_tlb s)) (ASID s) va = Hit (the (pdc_walk (ASID s) (MEM s) (TTBR0 s) va))"
apply (subgoal_tac "\<exists>x\<in>snd(sat_tlb s). lookup_pdc (snd(sat_tlb s)) (ASID s) va = Hit x")
prefer 2
apply (meson lookup_in_asid_pdc lookup_type.exhaust)
by (clarsimp simp: saturatd_lookup_pdc_hit_no_fault)
lemma write_mem_eq_TLB:
"\<lbrakk> write'mem1 (val, va, sz) s = ((), s') \<rbrakk> \<Longrightarrow> state.more s' = state.more s"
by (clarsimp simp: write'mem1_def raise'exception_def split: if_split_asm)
lemma write_same_mem:
"\<lbrakk> write'mem1 (val, va, sz) s = ((), s') ; write'mem1 (val, va, sz) t = ((), t') ;
MEM s = MEM t\<rbrakk> \<Longrightarrow> MEM s' = MEM t'"
by (clarsimp simp: write'mem1_def raise'exception_def split:if_split_asm)
lemma write_same_mem_excep:
"\<lbrakk> write'mem1 (val, pa, sz) s = ((), s') ; write'mem1 (val, pa, sz) t = ((), t') ;
MEM s = MEM t ; exception s = exception t \<rbrakk> \<Longrightarrow> exception s' = exception t'"
by (clarsimp simp: write'mem1_def raise'exception_def split:if_split_asm)
lemma write_mem_rel:
"\<lbrakk> write'mem1 (val, va, sz) s = ((), s') \<rbrakk> \<Longrightarrow> s' = s \<lparr> MEM:= MEM s' , exception:= exception s' \<rparr>"
by (clarsimp simp: write'mem1_def raise'exception_def split: if_split_asm)
lemma not_member_incon_consistent:
"\<lbrakk>va \<notin> incon_addrs (typ_sat_tlb s) ; saturated (typ_sat_tlb s) \<rbrakk> \<Longrightarrow>
consistent (typ_sat_tlb s) va"
apply (clarsimp simp: incon_addrs_def inconsistent_vaddrs_def incoherrent_vaddrs_def consistent0'_def)
apply (erule disjE)
apply (rule conjI)
apply (meson lookup_type.exhaust)
using not_miss_incon_hit_asid_pdc saturate_no_icon_miss apply blast
apply clarsimp
apply (rule conjI)
apply (subgoal_tac "\<exists>x\<in>fst(sat_tlb s). lookup'' (fst(sat_tlb s)) (ASID s) va = Hit x")
prefer 2
apply (meson lookup_in_asid_tlb lookup_type.exhaust sat_miss_fault)
using saturatd_lookup_hit_no_fault apply blast
apply (subgoal_tac "\<exists>x\<in>snd(sat_tlb s). lookup_pdc (snd(sat_tlb s)) (ASID s) va = Hit x")
prefer 2
apply (meson is_fault_pde_is_fault_pt lookup_type.exhaust lookup_in_asid_pdc sat_miss_pdc_fault)
using saturate_no_icon_miss by blast
lemma tlb_rel_abs_consistent [simp]:
"\<lbrakk>va \<notin> iset (set_tlb t) ; tlb_rel_abs (typ_sat_tlb s) (typ_set_tlb t) \<rbrakk> \<Longrightarrow>
consistent (typ_sat_tlb s) va "
apply (rule not_member_incon_consistent ; clarsimp simp: tlb_rel_abs_def)
by blast
(* ARM Monadic Simplification Lemmas *)
lemma mem1_exception:
"mem1 p s = (val, t) \<Longrightarrow> t = s\<lparr>exception := exception t\<rparr>"
apply (clarsimp simp: mem1_def)
apply (cases "MEM s p")
apply (clarsimp simp: raise'exception_def split: if_split_asm)
apply clarsimp
done
lemma mem1_read_exception:
"mem_read1 (a, sz) b = (val, r) \<Longrightarrow> r = b \<lparr>exception := exception r\<rparr>"
apply (clarsimp simp: mem_read1_def)
apply (clarsimp split: if_split_asm)
apply (case_tac "mem1 (a r+ 0) b" , clarsimp)
apply (clarsimp simp: mem1_exception)
apply (case_tac "mem1 (a r+ 1) b" , clarsimp)
apply (case_tac "mem1 (a r+ 0) ba", clarsimp)
apply (drule mem1_exception)
apply (drule mem1_exception)
apply (cases b, case_tac ba, cases r ,clarsimp)
apply (case_tac "mem1 (a r+ 3) b" , clarsimp)
apply (case_tac "mem1 (a r+ 2) ba", clarsimp)
apply (case_tac "mem1 (a r+ 1) baa", clarsimp)
apply (case_tac "mem1 (a r+ 0) bb", clarsimp)
apply (drule mem1_exception)
apply (drule mem1_exception)
apply (drule mem1_exception)
apply (drule mem1_exception)
apply (cases b, case_tac ba, case_tac baa, case_tac bb , cases r ,clarsimp)
apply (case_tac "mem1 (a r+ 7) b" , clarsimp)
apply (case_tac "mem1 (a r+ 6) ba", clarsimp)
apply (case_tac "mem1 (a r+ 5) baa", clarsimp)
apply (case_tac "mem1 (a r+ 4) bb", clarsimp)
apply (case_tac "mem1 (a r+ 3) bc", clarsimp)
apply (case_tac "mem1 (a r+ 2) bd", clarsimp)
apply (case_tac "mem1 (a r+ 1) be", clarsimp)
apply (case_tac "mem1 (a r+ 0) bf", clarsimp)
apply (drule mem1_exception)
apply (drule mem1_exception)
apply (drule mem1_exception)
apply (drule mem1_exception)
apply (drule mem1_exception)
apply (drule mem1_exception)
apply (drule mem1_exception)
apply (drule mem1_exception)
apply (cases b, case_tac ba, case_tac baa, case_tac bb ,case_tac bc ,
case_tac bd , case_tac be , case_tac bf , cases r ,clarsimp)
apply (clarsimp simp: raise'exception_def split:if_split_asm)
done
lemma write_mem_state_trun_equal:
"\<lbrakk> write'mem1 (val, pa, sz) s = ((), s'); write'mem1 (val, pa, sz) t = ((), t');
state.truncate s = state.truncate t \<rbrakk> \<Longrightarrow> state.truncate s' = state.truncate t'"
apply (frule write_mem_rel)
apply rotate_tac
apply (frule write_mem_rel)
apply (subgoal_tac "MEM s' = MEM t' \<and> exception s' = exception t'")
apply clarsimp
apply (cases s, cases t, cases s', cases t', clarsimp simp: state.defs)
apply (cases s, cases t, cases s', cases t', clarsimp simp: state.defs)
apply (clarsimp simp: write'mem1_def Let_def state.defs raise'exception_def split:if_split_asm)
done
lemma write_mem1_eq_ASID_TTBR0:
"\<lbrakk> write'mem1 (val, va, sz) s = ((), s') \<rbrakk> \<Longrightarrow> fst (state.more s') = fst (state.more s) \<and> TTBR0 s' = TTBR0 s"
by (clarsimp simp: write'mem1_def raise'exception_def split: if_split_asm)
lemma tlb_sat_set_mem1 [simp]:
"fst(sat_tlb (snd (mem1 ps s))) = fst(sat_tlb s) \<and>
snd(sat_tlb (snd (mem1 ps s))) = snd(sat_tlb s)"
by (simp add: mem1_def raise'exception_def split: option.splits)
definition
flush_tlb_pdc_vset :: " (tlb \<times> pdc) \<Rightarrow> vaddr set \<Rightarrow> (tlb \<times> pdc)"
where
"flush_tlb_pdc_vset t vset = (fst t - \<Union>((\<lambda> v. {e\<in>(fst t). v \<in> range_of e}) ` vset), snd t - \<Union>((\<lambda> v. {e\<in>(snd t). v \<in> range_of e}) ` vset))"
definition
flush_tlb_pdc_asid :: " (tlb \<times> pdc) \<Rightarrow> asid \<Rightarrow> (tlb \<times> pdc)"
where
"flush_tlb_pdc_asid t a = (fst t - {e\<in>(fst t). asid_of e = Some a}, snd t - {e\<in>(snd t). asid_of_pdc e = Some a})"
definition
flush_tlb_pdc_a_vset :: " (tlb \<times> pdc) \<Rightarrow> asid \<Rightarrow> vaddr set \<Rightarrow> (tlb \<times> pdc)"
where
"flush_tlb_pdc_a_vset t a vset = (fst t - (\<Union>v\<in>vset. {e\<in>(fst t). v \<in> range_of e \<and> asid_of e = Some a}),
snd t - (\<Union>v\<in>vset. {e\<in>(snd t). v \<in> range_of e \<and> asid_of_pdc e = Some a}))"
consts tlb_evict :: "(tlb_entry set \<times> pdc) state_scheme \<Rightarrow> tlb_entry set \<times> pdc"
instantiation non_det_tlb_state_ext :: (type) mmu
begin
definition
"(mmu_translate v :: ('a non_det_tlb_state_scheme \<Rightarrow> _))
= do {
update_state (\<lambda>s. s\<lparr> non_det_tlb := pairsub (non_det_tlb s) (tlb_evict (typ_non_det_tlb s)) \<rparr>);
mem <- read_state MEM;
asid <- read_state ASID;
ttbr0 <- read_state TTBR0;
tlb_pde <- read_state non_det_tlb;
let tlb = fst tlb_pde;
let pdc = snd tlb_pde;
case lookup'' tlb asid v of
Hit entry \<Rightarrow> return (va_to_pa v entry)
| Miss \<Rightarrow>
(case lookup_pdc pdc asid v of
Hit pdc_entry \<Rightarrow> do {
let entry = pde_tlb_entry pdc_entry mem v;
if is_fault entry
then raise'exception (PAGE_FAULT ''more info'')
else do {
update_state (\<lambda>s. s\<lparr> non_det_tlb := pairunion (non_det_tlb s) ({the entry} , {}) \<rparr>);
return (va_to_pa v (the entry))
} }
| Miss \<Rightarrow> do {
let pde = pdc_walk asid mem ttbr0 v;
if is_fault pde
then raise'exception (PAGE_FAULT ''more info'')
else do {
update_state (\<lambda>s. s\<lparr> non_det_tlb := pairunion (non_det_tlb s) ({} , {the pde}) \<rparr>);
let entry = pt_walk asid mem ttbr0 v;
if is_fault entry
then raise'exception (PAGE_FAULT ''more info'')
else do {
update_state (\<lambda>s. s\<lparr> non_det_tlb := pairunion (non_det_tlb s) ({the entry} , {}) \<rparr>);
return (va_to_pa v (the entry)) }
}
}
| Incon \<Rightarrow> raise'exception (IMPLEMENTATION_DEFINED ''set on fire''))
| Incon \<Rightarrow> raise'exception (IMPLEMENTATION_DEFINED ''set on fire'')
}"
definition
"(mmu_read_size :: (vaddr \<times> nat \<Rightarrow> 'a non_det_tlb_state_scheme \<Rightarrow> bool list \<times> 'a non_det_tlb_state_scheme))
\<equiv> \<lambda>(va,size). do {
pa \<leftarrow> mmu_translate va :: ('a non_det_tlb_state_scheme \<Rightarrow> _);
mem_read1 (pa , size)
}"
definition
"(mmu_write_size :: (bool list \<times> vaddr \<times> nat \<Rightarrow> 'a non_det_tlb_state_scheme \<Rightarrow> unit \<times> 'a non_det_tlb_state_scheme))
\<equiv> \<lambda>(value, vaddr, size). do {
paddr <- mmu_translate vaddr :: ('a non_det_tlb_state_scheme \<Rightarrow> _);
exception <- read_state exception;
if exception = NoException
then write'mem1 (value, paddr, size)
else return ()
}"
definition
"(update_TTBR0 r :: ('a non_det_tlb_state_scheme \<Rightarrow> _)) =
do {
update_state (\<lambda>s. s\<lparr> TTBR0 := r \<rparr>)
} "
definition
"(update_ASID a :: ('a non_det_tlb_state_scheme \<Rightarrow> _)) =
do { update_state (\<lambda>s. s\<lparr> ASID := a \<rparr>) }"
definition
"(flush f :: ('a non_det_tlb_state_scheme \<Rightarrow> _)) \<equiv>
case f of FlushTLB \<Rightarrow> update_state (\<lambda>s. s\<lparr> non_det_tlb := ({}, {}) \<rparr>)
| Flushvarange vset \<Rightarrow> do {
update_state (\<lambda>s. s\<lparr> non_det_tlb := flush_tlb_pdc_vset (non_det_tlb s) vset \<rparr>)}
| FlushASID a \<Rightarrow> do { update_state (\<lambda>s. s\<lparr> non_det_tlb := flush_tlb_pdc_asid (non_det_tlb s) a \<rparr>)}
| FlushASIDvarange a vset \<Rightarrow> do {
update_state (\<lambda>s. s\<lparr> non_det_tlb := flush_tlb_pdc_a_vset (non_det_tlb s) a vset \<rparr>)}"
instance ..
end
instantiation sat_tlb_state_ext :: (type) mmu
begin
definition
"(mmu_translate v :: ('a sat_tlb_state_scheme \<Rightarrow> _))
= do {
mem <- read_state MEM;
asid <- read_state ASID;
ttbr0 <- read_state TTBR0;
let all_pdes = the ` {e\<in>pdc_walk asid mem ttbr0 ` UNIV. \<not>is_fault e};
let all_tlbes = the ` {e\<in>\<Union>(tlb_pdc_walk asid all_pdes mem ttbr0 ` UNIV). \<not>is_fault e};
tlb_pde0 <- read_state sat_tlb;
let tlb0 = fst tlb_pde0;
let pdc0 = snd tlb_pde0;
let tlb_pde = pairunion tlb_pde0 (all_tlbes , all_pdes) ;
update_state (\<lambda>s. s\<lparr> sat_tlb := tlb_pde \<rparr>);
case lookup'' (fst tlb_pde) asid v of
Hit entry \<Rightarrow> return (va_to_pa v entry)
| Miss \<Rightarrow> raise'exception (PAGE_FAULT ''more info'')
| Incon \<Rightarrow> raise'exception (IMPLEMENTATION_DEFINED ''set on fire'')
}"
definition
"(mmu_read_size :: (vaddr \<times> nat \<Rightarrow> 'a sat_tlb_state_scheme \<Rightarrow> bool list \<times> 'a sat_tlb_state_scheme))
\<equiv> \<lambda>(va,size). do {
pa \<leftarrow> mmu_translate va :: ('a sat_tlb_state_scheme \<Rightarrow> _);
mem_read1 (pa , size)
}"
definition
"Somes S' \<equiv> the ` (S' \<inter> {x. x \<noteq> None})"
definition
"(mmu_write_size :: (bool list \<times> vaddr \<times> nat \<Rightarrow> 'a sat_tlb_state_scheme \<Rightarrow> unit \<times> 'a sat_tlb_state_scheme))
\<equiv> \<lambda>(value, vaddr, size). do {
ttbr0 <- read_state TTBR0;
asid <- read_state ASID;
pa <- mmu_translate vaddr :: ('a sat_tlb_state_scheme \<Rightarrow> _);
tlb_pde0 <- read_state sat_tlb;
exception <- read_state exception;
if exception = NoException
then do {
write'mem1 (value, pa, size);
mem1 <- read_state MEM;
let all_pdes = the ` {e\<in>pdc_walk asid mem1 ttbr0 ` UNIV. \<not>is_fault e};
let all_tlbes = the ` {e\<in>\<Union>(tlb_pdc_walk asid all_pdes mem1 ttbr0 ` UNIV). \<not>is_fault e};
let tlb_pde = pairunion tlb_pde0 (all_tlbes , all_pdes) ;
update_state (\<lambda>s. s\<lparr> sat_tlb := tlb_pde \<rparr>)
}
else return ()
}"
definition
"(update_TTBR0 r :: ('a sat_tlb_state_scheme \<Rightarrow> _)) \<equiv> do {
update_state (\<lambda>s. s\<lparr> TTBR0 := r \<rparr>);
asid <- read_state ASID;
mem <- read_state MEM;
let all_pdes = the ` {e\<in>pdc_walk asid mem r ` UNIV. \<not>is_fault e};
let all_tlbes = the ` {e\<in>\<Union>(tlb_pdc_walk asid all_pdes mem r ` UNIV). \<not>is_fault e};
tlb_pde0 <- read_state sat_tlb;
let tlb0 = fst tlb_pde0;
let pdc0 = snd tlb_pde0;
let tlb_pde = pairunion tlb_pde0 (all_tlbes , all_pdes) ;
update_state (\<lambda>s. s\<lparr> sat_tlb := tlb_pde \<rparr>)} "
definition
"(update_ASID a :: ('a sat_tlb_state_scheme \<Rightarrow> _)) = do {
update_state (\<lambda>s. s\<lparr> ASID := a \<rparr>);
mem <- read_state MEM;
ttbr0 <- read_state TTBR0;
let all_pdes = the ` {e\<in>pdc_walk a mem ttbr0 ` UNIV. \<not>is_fault e};
let all_tlbes = the ` {e\<in>\<Union>(tlb_pdc_walk a all_pdes mem ttbr0 ` UNIV). \<not>is_fault e};
tlb_pde0 <- read_state sat_tlb;
let tlb0 = fst tlb_pde0;
let pdc0 = snd tlb_pde0;
let tlb_pde = pairunion tlb_pde0 (all_tlbes , all_pdes) ;
update_state (\<lambda>s. s\<lparr> sat_tlb := tlb_pde \<rparr>)} "
definition
"(flush f :: ('a sat_tlb_state_scheme \<Rightarrow> _)) \<equiv> do {
mem <- read_state MEM;
ttbr0 <- read_state TTBR0;
asid <- read_state ASID;
tlb0 <- read_state sat_tlb;
let all_pdes = the ` {e\<in>pdc_walk asid mem ttbr0 ` UNIV. \<not>is_fault e};
let all_tlbes = the ` {e\<in>\<Union>(tlb_pdc_walk asid all_pdes mem ttbr0 ` UNIV). \<not>is_fault e};
case f of FlushTLB \<Rightarrow> update_state (\<lambda>s. s\<lparr> sat_tlb := (all_tlbes , all_pdes) \<rparr>)
| Flushvarange vset \<Rightarrow> do {
let tlb_pde = pairunion (flush_tlb_pdc_vset tlb0 vset) (all_tlbes , all_pdes) ;
update_state (\<lambda>s. s\<lparr> sat_tlb := tlb_pde \<rparr>) }
| FlushASID a \<Rightarrow> update_state (\<lambda>s. s\<lparr> sat_tlb := pairunion (flush_tlb_pdc_asid tlb0 a) (all_tlbes,all_pdes) \<rparr>)
| FlushASIDvarange a vset \<Rightarrow>
update_state (\<lambda>s. s\<lparr> sat_tlb := pairunion (flush_tlb_pdc_a_vset tlb0 a vset) (all_tlbes,all_pdes) \<rparr>)}"
instance ..
end
definition
ptable_comp :: "(vaddr \<Rightarrow> pt_walk_typ) \<Rightarrow> (vaddr \<Rightarrow> pt_walk_typ) \<Rightarrow> vaddr set"
where
"ptable_comp walk walk' \<equiv> {va. \<not>(walk va \<preceq> walk' va)}"
definition
incon_comp :: "asid \<Rightarrow> asid \<Rightarrow> heap \<Rightarrow> heap \<Rightarrow> paddr \<Rightarrow> paddr \<Rightarrow> vaddr set"
where
"incon_comp a a' hp hp' rt rt' \<equiv> ptable_comp (pt_walk_pair a hp rt) (pt_walk_pair a' hp' rt')"
definition
snp_upd_cur :: "vaddr set \<Rightarrow> heap \<Rightarrow> ttbr0 \<Rightarrow> asid \<Rightarrow> (vaddr set \<times> (vaddr \<Rightarrow> pt_walk_typ))"
where
"snp_upd_cur ist m r \<equiv> \<lambda>a. (ist, \<lambda>v. pt_walk_pair a m r v)"
definition
snp_upd_cur' :: "(asid \<Rightarrow> (vaddr set \<times> (vaddr \<Rightarrow> pt_walk_typ))) \<Rightarrow> vaddr set \<Rightarrow>
heap \<Rightarrow> ttbr0 \<Rightarrow> asid \<Rightarrow> (asid \<Rightarrow> (vaddr set \<times> (vaddr \<Rightarrow> pt_walk_typ)))"
where
"snp_upd_cur' snp ist mem ttbr0 a \<equiv> snp (a := snp_upd_cur ist mem ttbr0 a)"
instantiation set_tlb_state_ext :: (type) mmu
begin
definition
"(mmu_translate v :: ('a set_tlb_state_scheme \<Rightarrow> _))
= do {
mem <- read_state MEM;
asid <- read_state ASID;
ttbr0 <- read_state TTBR0;
set_tlb <- read_state set_tlb;
if v \<in> iset set_tlb
then raise'exception (IMPLEMENTATION_DEFINED ''set on fire'')
else let entry = pt_walk asid mem ttbr0 v in
if is_fault entry
then raise'exception (PAGE_FAULT ''more info'')
else return (va_to_pa v (the entry))
}"
definition
"(mmu_read_size :: (vaddr \<times> nat \<Rightarrow> 'a set_tlb_state_scheme \<Rightarrow> bool list \<times> 'a set_tlb_state_scheme))
\<equiv> \<lambda>(va,size). do {
pa \<leftarrow> mmu_translate va :: ('a set_tlb_state_scheme \<Rightarrow> _);
mem_read1 (pa , size)
}"
definition
"(mmu_write_size :: (bool list \<times> vaddr \<times> nat \<Rightarrow> 'a set_tlb_state_scheme \<Rightarrow> unit \<times> 'a set_tlb_state_scheme))
\<equiv> \<lambda>(value, vaddr, size). do {
ttbr0 <- read_state TTBR0;
mem <- read_state MEM;
asid <- read_state ASID;
paddr <- mmu_translate vaddr :: ('a set_tlb_state_scheme \<Rightarrow> _);
iset_snapshot <- read_state set_tlb;
let incon_vaddrs = iset (iset_snapshot);
let global_set = global_set (iset_snapshot);
exception <- read_state exception;
if exception = NoException
then do {
write'mem1 (value, paddr, size);
mem' <- read_state MEM;
let ptable_comp = incon_comp asid asid mem mem' ttbr0 ttbr0;
let incon_vaddrs_new = incon_vaddrs \<union> ptable_comp;
\<comment> \<open> pdc_walk are always asid specific, only using pt_walk for global_entries\<close>
let global_set_up = global_set \<union> \<Union> (range_of ` global_entries (the ` {e\<in>pt_walk asid mem' ttbr0 ` UNIV. \<not>is_fault e}));
let iset_snapshot = iset_snapshot \<lparr>iset := incon_vaddrs_new , global_set := global_set_up \<rparr>;
update_state (\<lambda>s. s\<lparr> set_tlb := iset_snapshot \<rparr>)
}
else return ()
}"
definition
"(update_TTBR0 r :: ('a set_tlb_state_scheme \<Rightarrow> _)) = do {
ttbr0 <- read_state TTBR0;
update_state (\<lambda>s. s\<lparr> TTBR0 := r \<rparr>);
iset_snapshot <- read_state set_tlb;
let global_set = global_set (iset_snapshot);
asid <- read_state ASID;
mem <- read_state MEM;
let ptable_asid_va = incon_comp asid asid mem mem ttbr0 r;
let incon_set_n = iset iset_snapshot \<union> ptable_asid_va;
let global_set_up = global_set \<union> \<Union> (range_of `global_entries (the ` {e\<in>pt_walk asid mem r ` UNIV. \<not>is_fault e}));
let iset_snapshot = iset_snapshot \<lparr>iset := incon_set_n , global_set := global_set_up \<rparr>;
update_state (\<lambda>s. s\<lparr> set_tlb := iset_snapshot \<rparr>)
}"
definition
"(update_ASID a :: ('a set_tlb_state_scheme \<Rightarrow> _)) = do {
mem <- read_state MEM;
ttbr0 <- read_state TTBR0;
asid <- read_state ASID;
iset_snapshot <- read_state set_tlb;
let iset = iset iset_snapshot; \<comment> \<open>current iset\<close>
let global_set = global_set (iset_snapshot);
let snapshot = snapshot iset_snapshot;
\<comment> \<open>incon vaddrs that are global\<close>
\<comment> \<open>let iset_global = iset \<inter> \<Union> (range_of ` global_set);\<close>
let iset_global = iset \<inter> global_set;
\<comment> \<open>snapshot update\<close>
let snapshot_current = snp_upd_cur' snapshot iset mem ttbr0 asid;
let set_tlb = iset_snapshot \<lparr>snapshot := snapshot_current \<rparr>;
update_state (\<lambda>s. s\<lparr> set_tlb := set_tlb \<rparr>);
\<comment> \<open>new ASID\<close>
update_state (\<lambda>s. s\<lparr> ASID := a \<rparr>);
\<comment> \<open>for the new iset\<close>
let iset_snp_incon = fst (snapshot_current a);
let iset_snp = ptable_comp (snd(snapshot_current a)) (pt_walk_pair a mem ttbr0);
let set_tlb = set_tlb\<lparr> iset := iset_snp_incon \<union> iset_global \<union> iset_snp \<rparr>;
update_state (\<lambda>s. s\<lparr> set_tlb := set_tlb \<rparr>)
}"
definition
"(flush f :: ('a set_tlb_state_scheme \<Rightarrow> _)) \<equiv> do {
asid <- read_state ASID;
mem <- read_state MEM;
ttbr0 <- read_state TTBR0;
set_tlb <- read_state set_tlb;
let iset = iset set_tlb;
let global_set = global_set set_tlb;
let snapshot = snapshot set_tlb;
case f of FlushTLB \<Rightarrow> do {
let empty_set_tlb = set_tlb \<lparr>iset := {},
global_set := \<Union>(range_of ` global_entries (the ` {e \<in> range (pt_walk asid mem ttbr0). \<not> is_fault e})),
snapshot := \<lambda> a. ({}, \<lambda>v. Fault) \<rparr>;
update_state (\<lambda>s. s\<lparr> set_tlb := empty_set_tlb \<rparr>) }
| Flushvarange vset \<Rightarrow> do {
let upd_set_tlb = set_tlb \<lparr>iset := iset - vset ,
global_set := (global_set - vset) \<union>
\<Union>(range_of ` global_entries (the ` {e \<in> range (pt_walk asid mem ttbr0). \<not> is_fault e})),
snapshot := \<lambda> a. (fst(snapshot a) - vset, \<lambda>v. if v \<in> vset then Fault else snd(snapshot a) v) \<rparr>;
update_state (\<lambda>s. s\<lparr> set_tlb := upd_set_tlb \<rparr>) }
| FlushASID a \<Rightarrow>
if a = asid then
update_state (\<lambda>s. s\<lparr> set_tlb := set_tlb \<lparr> iset := iset \<inter> global_set \<rparr> \<rparr> )
else do {
let upd_set_tlb = set_tlb \<lparr> snapshot := snapshot (a := ({}, \<lambda>v. Fault)) \<rparr>;
update_state (\<lambda>s. s\<lparr> set_tlb := upd_set_tlb \<rparr>)
}
| FlushASIDvarange a vset \<Rightarrow>
if a = asid then
update_state (\<lambda>s. s\<lparr> set_tlb := set_tlb \<lparr> iset := iset - (vset - global_set) \<rparr> \<rparr> )
else
update_state (\<lambda>s. s\<lparr> set_tlb := set_tlb \<lparr> snapshot := \<lambda>a'. if a' = a then (fst (snapshot a) - vset,
\<lambda>v. if v \<in> vset then Fault else (snd (snapshot a)) v) else snapshot a' \<rparr> \<rparr> )
}"
instance ..
end
end
|
function val = fgGet(fg,param,varargin)
%Get values from a fiber group structure
%
% val = fgGet(fg,param,varargin)
%
% Parameters
% General
% 'name'
% 'type'
% 'colorrgb'
% 'thickness'
% 'visible'
%
% Fiber related
% 'nfibers'- Number of fibers in this group
% 'nodes per fiber' - Number of nodes per fiber.
% 'fibers' - Fiber coordinates
% 'fibernames'
% 'fiberindex'
%
% ROI and image coord related
% 'unique image coords'
% 'nodes to imagecoords' -
% 'voxel2fiber node pairs' - For each roi coord, an Nx2 matrix of
% (fiber number,node number)
% 'nodes in voxels' - Nodes inside the voxels of roi coords
% 'voxels in fg' - Cell array of the roiCoords touched by each fiber
% 'voxels2fibermatrix' - Binary matrix (voxels by fibers). 1s when a
% fiber is in a voxel of the roiCoords (which are, sadly, implicit).
%
% Tensor and tractography related
% 'tensors' - Tensors for each node
%
%
% See also: dwiGet/Set, fgCreate; fgSet
%
% (c) Stanford VISTA Team
% NOTES:
% Programming TODO:
% We should store the transforms needed to shift the fg coordinates
% between acpc and image space.
% I eliminated these checks because this function is now called many many
% times and this slows the computations (Franco).
%
%if notDefined('fg'),error('fiber group required.'); end
% if notDefined('param'), error('param required.'); end
val = [];
switch mrvParamFormat(param)
% Basic fiber parameters
case 'name'
val = fg.name;
case 'type' % Should always be fibergroup
val = fg.type;
% Fiber visualization settings.
case 'colorrgb'
val = fg.colorRgb;
case 'thickness'
val = fg.thickness;
case 'visible'
val = fg.visible;
% Simple fiber properties --
case {'fibers'}
% val = fgGet(fg,'fibers',fList);
%
% Returns a 3xN matrix of fiber coordinates corresponding to the
% fibers specified in the integer vector, fList. This differs from
% the dtiH (mrDiffusion) representation, where fiber coordinates
% are stored as a set of cell arrays for each fiber.
if ~isempty(varargin)
list = varargin{1};
val = cell(length(list),1);
for ii=1:length(list)
val{ii} = fg.fibers{ii};
end
else
val = fg.fibers;
end
case 'fibernames'
val = fg.fiberNames;
case 'fiberindex'
val = fg.fiberIndex;
case 'nfibers'
val = length(fg.fibers);
case {'nodesperfiber','nsamplesperfiber','nfibersamples'}
% fgGet(fg,'n samples per fiber ')
% How many samples per fiber. This is about equal to
% their length in mm, though we need to write the fiber lengths
% routine to actually calculate this.
nFibers = fgGet(fg,'n fibers');
val = zeros(1,nFibers);
for ii=1:nFibers
val(ii) = length(fg.fibers{ii});
end
% Fiber group (subgroup) properties.
% These are used when we classify fibers into subgroups. We should
% probably clean up this organization which is currently
%
% subgroup - length of fibers, an index of group identity
% subgroupNames()
% .subgroupIndex - Probably should go away and the index should
% just be
% .subgroupName - Probably should be moved up.
%
case {'ngroups','nsubgroups'}
val = length(fg.subgroupNames);
case {'groupnames'}
val = cell(1,fgGet(fg,'n groups'));
for ii=1:nGroups
val{ii} = fg.subgroupNames(ii).subgroupName;
end
% DTI properties
case 'tensors'
val = fg.tensors;
% Fiber to coord calculations
case {'imagecoords'}
% c = fgGet(fgAcpc,'image coords',fgList,xForm);
% c = fgGet(fgAcpc,'image coords',fgList,xForm);
%
% Return the image coordinates of a specified list of fibers
% Returns a matrix that is fgList by 3 of the image coordinates for
% each node of each fiber.
%
% Fiber coords are represented at fine resolution in ACPC space.
% These coordinates are rounded and in image space
if ~isempty(varargin)
fList = varargin{1};
if length(varargin) > 1
xForm = varargin{2};
% Put the fiber coordinates into image space
fg = dtiXformFiberCoords(fg,xForm);
end
else
% In this case, the fiber coords should already be in image
% space.
nFibers = fgGet(fg,'n fibers');
fList = 1:nFibers;
end
% Pull out the coordinates and floor them. These are in image
% space.
nFibers = length(fList);
val = cell(1,nFibers);
if nFibers == 1
%val = round(fg.fibers{fList(1)}');
val = floor(fg.fibers{fList(1)}');
else
for ii=1:nFibers
%val{ii} = round(fg.fibers{fList(ii)}');
val{ii} = floor(fg.fibers{fList(ii)}');
end
end
case {'uniqueimagecoords'}
% coords = fgGet(fgIMG,'unique image coords');
%
% The fg input must be in IMG space.
%
% Returns the unique image coordinates of all the fibers as an Nx3
% matrix of integers.
% val = round(horzcat(fg.fibers{:})');
val = floor(horzcat(fg.fibers{:})');
val = unique(val,'rows');
case {'nodes2voxels'}
% nodes2voxels = fgGet(fgImg,'nodes2voxels',roiCoords)
%
% The roiCoords are a matrix of Nx3 coordinates. They describe a
% region of interest, typically in image space or possibly in acpc
% space.
%
% We return a cell array that is a mapping of fiber nodes to voxels in
% the roi. The roi is specified as an Nx3 matrix of coordinates.
% The returned cell array, nodes2voxels, has the same number of
% cells as there are fibers.
%
% Unlike the fiber group cells, which have a 3D coordinate of each
% node, this cell array has an integer that indexes the row of
% roiCoords that contains the node. If a node is not in any of the
% roiCoords, the entry in node2voxels{ii} for that node is zero.
% This means that node is outside the 'roiCoords'.
%
% Once again: The cell nodes2voxels{ii} specifies whether each
% node in the iith fiber is inside a voxel in the roiCoords. The
% value specifies the row in roiCoords that contains the node.
%
if isempty(varargin), error('roiCoords required');
else
roiCoords = varargin{1};
end
% Find the roiCoord for each node in each fiber.
nFiber = fgGet(fg,'n fibers');
val = cell(nFiber,1);
for ii=1:nFiber
% if ~mod(ii,200), fprintf('%d ',ii); end
% Node coordinates in image space
nodeCoords = fgGet(fg,'image coords',ii);
% The values in loc are the row of the coords matrix that contains
% that sample point in a fiber. For example, if the number 100 is
% in the 10th position of loc, then the 10th sample point in the
% fiber passes through the voxel in row 100 of coords.
[~, val{ii}] = ismember(nodeCoords, roiCoords, 'rows');
end
case {'voxel2fibernodepairs','v2fn'}
% voxel2FNpairs = fgGet(fgImg,'voxel 2 fibernode pairs',roiCoords);
% voxel2FNpairs = fgGet(fgImg,'voxel 2 fibernode pairs',roiCoords,nodes2voxels);
%
% The return is a cell array whose size is the number of voxels.
% The cell is a Nx2 matrix of the (fiber, node) pairs that pass
% through it.
%
% The value N is the number of nodes in the voxel. The first
% column is the fiber number. The second column reports the indexes
% of the nodes for each fiber in each voxel.
tic
fprintf('\n[fgGet] Computing fibers/nodes pairing in each voxel...')
if length(varargin) < 1, error('Requires the roiCoords.');
else
roiCoords = varargin{1};
nCoords = size(roiCoords,1);
end
if length(varargin) < 2
% We assume the fg and the ROI coordinates are in the same
% coordinate frame.
nodes2voxels = fgGet(fg,'nodes 2 voxels',roiCoords);
else nodes2voxels = varargin{2};
end
nFibers = fgGet(fg,'nFibers');
voxelsInFG = fgGet(fg,'voxels in fg',nodes2voxels);
roiNodesInFG = fgGet(fg,'nodes in voxels',nodes2voxels);
val = cell(1,nCoords);
for thisFiber=1:nFibers
voxelsInFiber = voxelsInFG{thisFiber}; % A few voxels, in a list
nodesInFiber = roiNodesInFG{thisFiber}; % The corresponding nodes
% Then add a row for each (fiber,node) pairs that pass through
% the voxels for this fiber.
for jj=1:length(voxelsInFiber)
thisVoxel = voxelsInFiber(jj);
% Print out roi coord and fiber coord to verify match
% roiCoords(thisVoxel,:)
% fg.fibers{thisFiber}(:,nodesInFiber(jj))
% Would horzcat be faster?
val{thisVoxel} = cat(1,val{thisVoxel},[thisFiber,nodesInFiber(jj)]);
end
end
fprintf('process completed in: %2.3fs.\n',toc)
case {'nodesinvoxels'}
% nodesInVoxels = fgGet(fg,'nodes in voxels',nodes2voxels);
%
% This cell array is a modified form of nodes2voxels (see above).
% In that cell array every node in every fiber has a number
% referring to its row in roiCoords, or a 0 when the node is not in
% any roiCoord voxel.
%
% This cell array differs only in that the 0s removed. This
% is used to simplify certain calculations.
%
if length(varargin) <1
error('Requires nodes2voxels cell array.');
end
nodes2voxels = varargin{1};
nFibers = fgGet(fg,'nFibers');
val = cell(1,nFibers);
% For each fiber, this is a list of the nodes that pass through
% a voxel in the roiCoords
for ii = 1:nFibers
% For each fiber, this is a list of the nodes that pass through
% a voxel in the roiCoords
lst = (nodes2voxels{ii} ~= 0);
val{ii} = find(lst);
end
case 'voxelsinfg'
% voxelsInFG = fgGet(fgImg,'voxels in fg',nodes2voxels);
%
% A cell array length n-fibers. Each cell has a list of the voxels
% (rows of roiCoords) for a fiber.
%
% This routine eliminates the 0's in the nodes2voxels lists.
%
if length(varargin) < 1, error('Requires nodes2voxels cell array.'); end
nodes2voxels = varargin{1};
nFibers = fgGet(fg,'nFibers');
val = cell(1,nFibers);
for ii = 1:nFibers
% These are the nodes that pass through a voxel in the
% roiCoords
lst = (nodes2voxels{ii} ~= 0);
val{ii} = nodes2voxels{ii}(lst);
end
case {'voxels2fibermatrix','v2fm'}
% v2fm = fgGet(fgImg,'voxels 2 fiber matrix',roiCoords);
% Or,
% v2fnPairs = fgGet(fgImg,'v2fn',roiCoords);
% v2fm = fgGet(fgImg,'voxels 2 fiber matrix',roiCoords, v2fnPairs);
%
% mrvNewGraphWin; imagesc(v2fm)
%
% Returns a binary matrix of size Voxels by Fibers.
% When voxel ii has at least one node from fiber jj, there is a one
% in v2fm(ii,jj). Otherwise, the entry is zero.
%
% Check that the fg is in the image coordspace:
if isfield(fg, 'coordspace') && ~strcmp(fg.coordspace, 'img')
error('Fiber group is not in the image coordspace, please xform');
end
if isempty(varargin), error('roiCoords required');
else
roiCoords = varargin{1};
nCoords = size(roiCoords,1);
if length(varargin) < 2
v2fnPairs = fgGet(fg,'v2fn',roiCoords);
else
v2fnPairs = varargin{2};
end
end
% Allocate matrix of voxels by fibers
val = zeros(nCoords,fgGet(fg,'n fibers'));
% For each coordinate, find the fibers. Set those entries to 1.
for ii=1:nCoords
if ~isempty(v2fnPairs{ii})
f = unique(v2fnPairs{ii}(:,1));
end
val(ii,f) = 1;
end
case {'fibersinroi','fginvoxels','fibersinvoxels'}
% fList = fgGet(fgImg,'fibersinroi',roiCoords);
%
% v2fn = fgGet(fgImg,'v2fn',roiCoords);
% fList = fgGet(fgImg,'fibersinroi',roiCoords,v2fn);
%
% Returns an integer vector of the fibers with at least
% one node in a region of interest.
%
% The fg and roiCoords should be in the same coordinate frame.
%
if isempty(varargin), error('roiCoords required');
elseif length(varargin) == 1
roiCoords = varargin{1};
v2fnPairs = fgGet(fg,'v2fn',roiCoords);
elseif length(varargin) > 1
roiCoords = varargin{1};
v2fnPairs = varargin{2};
end
val = []; nCoords = size(roiCoords,1);
for ii=1:nCoords
if ~isempty(v2fnPairs{ii})
val = cat(1,val,v2fnPairs{ii}(:,1));
end
end
val = sort(unique(val),'ascend');
case {'coordspace','fibercoordinatespace','fcspace'}
% In some cases, the fg might contain information telling us in which
% coordinate space its coordinates are set. This information is set
% as a struct. Each entry in the struct can be either a 4x4 xform
% matrix from the fiber coordinates to that space (with eye(4) for
% the space in which the coordinates are defined), or (if the xform
% is not know) an empty matrix.
cspace_fields = fields(fg.coordspace);
val = [];
for f=1:length(cspace_fields)
this_field = cspace_fields{f};
if isequal(getfield(fg.coordspace, this_field), eye(4))
val = this_field;
end
end
otherwise
error('Unknown fg parameter: "%s"\n',param);
end
return
|
{-# LANGUAGE BangPatterns #-}
-- import System.Environment
import Data.Array.Repa as Repa
import Data.Array.Repa.Eval as Eval
import Data.Array.Repa.IO.BMP as BMP
import Data.Array.Repa.Algorithms.Pixel as Pixel
import Data.Vector.Unboxed.Base as Unboxed
import Control.Monad.Identity
import GHC.Word
import Data.Complex
import Data.List
import Julia
import Rendering
import RealFunctions
import ComplexFunctions
width :: (Num a) => a
width = 600
height :: (Num a) => a
height = 600
pixelRenderer :: (RealFloat a) => Int -> Int -> a
pixelRenderer x y = smooth samples -- avg' (Prelude.map (f) samples)
where samples = sample 128 (magnitudeLimit 2) dragon start
start = afold (remapped x y)
afold = (1.5*(cis(pi/(4)))*).(foldPhase (pi/6))
f z = cos (atan2 (imagPart z) (realPart z)) --(cos . abs . phase) z
f1 x z = 1 + (z*x)
f2 x z = 1 - (z*x)
paramRenderer :: (RealFloat a) => a -> Int -> Int -> a
paramRenderer progress x y = smooth samples -- progress between 0 and 100%
where samples = sample 128 (magnitudeLimit 2) (dragon) start
start = ((cis n *).(foldPhase (pi/6))) (remapped x y)
n = progress * 2 * pi
remapped :: (RealFloat a) => Int -> Int -> Complex a
remapped x y = (remap (fromIntegral x :+ fromIntegral y) width height)
avg :: (Num a, Fractional a) => [a] -> a
avg l = (sum l) / ((fromIntegral . length) l)
avg' :: (Num a, Fractional a) => [a] -> a
avg' = uncurry (/) . foldl' (\(s,n) x -> (s+x, n+1)) (0, 0)
-- 2D shaped array
shape :: DIM2
shape = Z :. (height :: Int) :. (width :: Int)
canvas :: Array U DIM2 Double
canvas = fromListUnboxed shape (take (fromIntegral (size shape)) (repeat 0))
render :: (Source r a, RealFloat a, Elt a) => Array r DIM2 a -> Array D DIM2 a
render !input = traverse input id (\_ (Z :. y :. x) -> pixelRenderer x y)
-- render a frame with progress between 0% and 100%
renderFrame :: (Source r a, RealFloat a, Elt a) => Array r DIM2 a -> a -> Array D DIM2 a
renderFrame !input progress = traverse input id (\_ (Z :. y :. x) -> paramRenderer progress x y)
convert :: (Shape sh) => Array U sh Double
-> Array D sh (GHC.Word.Word8, GHC.Word.Word8, GHC.Word.Word8)
convert !array = Repa.map (rgb8OfGreyDouble) array
getMinimum :: (Ord b, Num b, Source r b, Elt b, Unbox b, Shape sh) =>
Array r sh b -> IO (b)
getMinimum !array = foldAllP (min) 100000 array
getMaximum :: (Ord b, Num b, Source r b, Elt b, Unbox b, Shape sh) =>
Array r sh b -> IO (b)
getMaximum !array = foldAllP (max) 0 array
correctExpo :: (Source r b, Unbox b, Elt b, Floating b, Shape sh, RealFrac b) =>
Array r sh b -> b -> b -> IO (Array U sh b)
correctExpo !array !mini !maxi = computeP $ Repa.map ((map' mini maxi 0.0 1.0)) array
--
compute :: IO ()
compute = do
img <- computeP (render canvas) :: IO (Array U DIM2 Double) -- super intensive
mini <- getMinimum img
maxi <- getMaximum img
corrected <- correctExpo img mini maxi :: IO (Array U DIM2 Double)
image <- computeP (convert corrected)
writeImageToBMP "out.bmp" image
computeFrame :: Double -> Int -> IO ()
computeFrame progress frameNum = do
img <- computeP (renderFrame canvas progress) :: IO (Array U DIM2 Double) -- super intensive
mini <- getMinimum img
maxi <- getMaximum img
corrected <- correctExpo img mini maxi :: IO (Array U DIM2 Double)
image <- computeP (convert corrected)
let filename = "gifs/" Data.List.++ (show frameNum) Data.List.++ ".bmp"
writeImageToBMP filename image
main :: IO ()
main = compute -- foldl (>>) (return ()) (Data.List.zipWith3 ($) (repeat computeFrame) progresses [1..])
where progresses = takeWhile (<= 1.0) $ iterate (+0.005) 0.0
|
function varargout = infoto(varargin)
% INFOTO M-file for infoto.fig
% INFOTO, by itself, creates a new INFOTO or raises the existing
% singleton*.
%
% H = INFOTO returns the handle to a new INFOTO or the handle to
% the existing singleton*.
%
% INFOTO('CALLBACK',hObject,eventData,handles,...) calls the local
% function named CALLBACK in INFOTO.M with the given input arguments.
%
% INFOTO('Property','Value',...) creates a new INFOTO or raises the
% existing singleton*. Starting from the left, property value pairs are
% applied to the GUI before infoto_OpeningFcn gets called. An
% unrecognized property name or invalid value makes property application
% stop. All inputs are passed to infoto_OpeningFcn via varargin.
%
% *See GUI Options on GUIDE's Tools menu. Choose "GUI allows only one
% instance to run (singleton)".
%
% See also: GUIDE, GUIDATA, GUIHANDLES
% Edit the above text to modify the response to help infoto
% Last Modified by GUIDE v2.5 09-Jul-2012 15:08:20
% Begin initialization code - DO NOT EDIT
gui_Singleton = 1;
gui_State = struct('gui_Name', mfilename, ...
'gui_Singleton', gui_Singleton, ...
'gui_OpeningFcn', @infoto_OpeningFcn, ...
'gui_OutputFcn', @infoto_OutputFcn, ...
'gui_LayoutFcn', [] , ...
'gui_Callback', []);
if nargin && ischar(varargin{1})
gui_State.gui_Callback = str2func(varargin{1});
end
if nargout
[varargout{1:nargout}] = gui_mainfcn(gui_State, varargin{:});
else
gui_mainfcn(gui_State, varargin{:});
end
% End initialization code - DO NOT EDIT
% --- Executes just before infoto is made visible.
function infoto_OpeningFcn(hObject, eventdata, handles, varargin)
% This function has no output args, see OutputFcn.
% hObject handle to figure
% eventdata reserved - to be defined in a future version of MATLAB
% handles structure with handles and user data (see GUIDATA)
% varargin command line arguments to infoto (see VARARGIN)
% Choose default command line output for infoto
handles.output = hObject;
% Update handles structure
guidata(hObject, handles);
% UIWAIT makes infoto wait for user response (see UIRESUME)
% uiwait(handles.figure1);
% --- Outputs from this function are returned to the command line.
function varargout = infoto_OutputFcn(hObject, eventdata, handles)
% varargout cell array for returning output args (see VARARGOUT);
% hObject handle to figure
% eventdata reserved - to be defined in a future version of MATLAB
% handles structure with handles and user data (see GUIDATA)
% Get default command line output from handles structure
varargout{1} = handles.output;
% --- Executes during object creation, after setting all properties.
function axes1_CreateFcn(hObject, eventdata, handles)
% hObject handle to axes1 (see GCBO)
% eventdata reserved - to be defined in a future version of MATLAB
% handles empty - handles not created until after all CreateFcns called
% Hint: place code in OpeningFcn to populate axes1
axes (hObject)
imshow('infoto.jpg')
|
r=359.78
https://sandbox.dams.library.ucdavis.edu/fcrepo/rest/collection/sherry-lehmann/catalogs/d7kk54/media/images/d7kk54-011/svc:tesseract/full/full/359.78/default.jpg Accept:application/hocr+xml
|
% sbplot() - create axes in arbitrary subplot grid positions and sizes
%
% Usage: >> axis_handle = sbplot(v,h,index)
% >> axis_handle = sbplot(v,h,[index1 index2])
% >> axis_handle = sbplot(v,h,[index1 index2],axprop,..)
% >> axis_handle = sbplot(v,h,[index1 index2],'ax',handle,axprop,..)
%
% Inputs:
% v,h - Integers giving the vertical and horizontal ranks of the tiling.
% index - Either a single subplot index, in which case the command
% is equivalent to subplot, or a two-element vector giving
% the indices of two corners of the sbplot() area according
% to subplot() convention (e.g., left-to-right, top-to-bottom).
% axprop - Any axes property(s), e.g., >> sbplot(3,3,3,'color','w')
% handle - Following keyword 'ax', sbplot tiles the given axes handle
% instead of the whole figure
%
% Output:
% axis_handle - matlab axis handle
%
% Note:
% sbplot is essentially the same as the subplot command except that
% sbplot axes may span multiple tiles. Also, sbplot() will not erase
% underlying axes.
%
% Examples: >> sbplot(3,3,6);plot(rand(1,10),'g');
% >> sbplot(3,3,[7 2]);plot(rand(1,10),'r');
% >> sbplot(8,7,47);plot(rand(1,10),'b');
%
% Authors: Colin Humphries, Arnaud Delorme & Scott Makeig, SCCN/INC/UCSD, La Jolla, June, 1998
% Copyright (C) June 1998, Colin Humphries & Scott Makeig, SCCN/INC/UCSD,
% [email protected]
%
% This program is free software; you can redistribute it and/or modify
% it under the terms of the GNU General Public License as published by
% the Free Software Foundation; either version 2 of the License, or
% (at your option) any later version.
%
% This program is distributed in the hope that it will be useful,
% but WITHOUT ANY WARRANTY; without even the implied warranty of
% MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
% GNU General Public License for more details.
%
% You should have received a copy of the GNU General Public License
% along with this program; if not, write to the Free Software
% Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
% reformatted by Scott Makeig, 6/10/98
% 12/22/00 test nargin<3 -sm
% 01/21/01 added (recursive) axes option 'ax' -sm
% 01-25-02 reformated help & licence -ad
function [out] = sbplot(m,n,gridpos,varargin) % varargin is std. matlab arg list
if nargin<3
error(' requires >=3 arguments');
end
if nargin>3 & strcmp(varargin{1},'ax')
pos = get(varargin{2},'Position'); % sbplot(3,1,[2 3]) -> 0.4111 0.1100 0.4939 0.815
varargin = {varargin{3:end}};
else
pos = get(gcf,'DefaultAxesPosition'); % [0.1300 0.1100 0.7750 0.815]
end
Xpad = pos(1); % lower-left distance from left side of figure
Ypad = pos(2); % lower-left distance from bottom of figure
Xlen = pos(3); % axes width
Ylen = pos(4); % axes height
if n == 2
xspace = Xlen*0.27/(n-0.27); % xspace between axes as per subplot
else
xspace = (0.9*Xlen)*0.27/(n-0.9*0.27);
end
if m == 2
yspace = Ylen*0.27/(m-0.27); % yspace between axes as per subplot
else % WHY Xlen (.775) instead of Ylen (.815) ??
yspace = (0.9*Ylen)*0.27/(m-0.9*0.27);
end
xlength = (Xlen-xspace*(n-1))/n; % axes width
ylength = (Ylen-yspace*(m-1))/m; % axes height
% Convert tile indices to grid positions
if length(gridpos) == 1
xgridpos(1) = mod(gridpos,n); % grid position
if xgridpos(1) == 0
xgridpos(1) = n;
end
xgridpos(2) = 1; % grid length
ygridpos(1) = m-ceil(gridpos/n)+1; % grid position
ygridpos(2) = 1; % grid length
else
xgridpos(1) = mod(gridpos(1),n);
if xgridpos(1) == 0
xgridpos(1) = n;
end
tmp = mod(gridpos(2),n);
if tmp == 0
tmp = n;
end
if tmp > xgridpos(1)
xgridpos(2) = tmp-xgridpos(1)+1;
else
xgridpos(2) = xgridpos(1)-tmp+1;
xgridpos(1) = tmp;
end
ygridpos(1) = m-ceil(gridpos(1)/n)+1;
tmp = m-ceil(gridpos(2)/n)+1;
if tmp > ygridpos(1)
ygridpos(2) = tmp-ygridpos(1)+1;
else
ygridpos(2) = ygridpos(1)-tmp+1;
ygridpos(1) = tmp;
end
end
% Calculate axes coordinates
position(1) = Xpad+xspace*(xgridpos(1)-1)+xlength*(xgridpos(1)-1);
position(2) = Ypad+yspace*(ygridpos(1)-1)+ylength*(ygridpos(1)-1)-0.03;
position(3) = xspace*(xgridpos(2)-1)+xlength*xgridpos(2);
position(4) = yspace*(ygridpos(2)-1)+ylength*ygridpos(2);
% Create new axes
ax = axes('Position',position,varargin{:});
% Output axes handle
if nargout > 0
out = ax;
end
|
= = = = White 's advantages = = = =
|
[GOAL]
α : Type u
inst✝³ : MulOneClass α
inst✝² : Preorder α
inst✝¹ : ContravariantClass α α (fun x x_1 => x * x_1) fun x x_1 => x < x_1
inst✝ : ExistsMulOfLE α
a b : α
h : a < b
⊢ ∃ c, 1 < c ∧ a * c = b
[PROOFSTEP]
obtain ⟨c, rfl⟩ := exists_mul_of_le h.le
[GOAL]
case intro
α : Type u
inst✝³ : MulOneClass α
inst✝² : Preorder α
inst✝¹ : ContravariantClass α α (fun x x_1 => x * x_1) fun x x_1 => x < x_1
inst✝ : ExistsMulOfLE α
a c : α
h : a < a * c
⊢ ∃ c_1, 1 < c_1 ∧ a * c_1 = a * c
[PROOFSTEP]
exact ⟨c, one_lt_of_lt_mul_right h, rfl⟩
[GOAL]
α : Type u
inst✝⁵ : LinearOrder α
inst✝⁴ : DenselyOrdered α
inst✝³ : Monoid α
inst✝² : ExistsMulOfLE α
inst✝¹ : CovariantClass α α (fun x x_1 => x * x_1) fun x x_1 => x < x_1
inst✝ : ContravariantClass α α (fun x x_1 => x * x_1) fun x x_1 => x < x_1
a b : α
h : ∀ (ε : α), 1 < ε → a ≤ b * ε
x : α
hxb : b < x
⊢ a ≤ x
[PROOFSTEP]
obtain ⟨ε, rfl⟩ := exists_mul_of_le hxb.le
[GOAL]
case intro
α : Type u
inst✝⁵ : LinearOrder α
inst✝⁴ : DenselyOrdered α
inst✝³ : Monoid α
inst✝² : ExistsMulOfLE α
inst✝¹ : CovariantClass α α (fun x x_1 => x * x_1) fun x x_1 => x < x_1
inst✝ : ContravariantClass α α (fun x x_1 => x * x_1) fun x x_1 => x < x_1
a b : α
h : ∀ (ε : α), 1 < ε → a ≤ b * ε
ε : α
hxb : b < b * ε
⊢ a ≤ b * ε
[PROOFSTEP]
exact h _ ((lt_mul_iff_one_lt_right' b).1 hxb)
[GOAL]
α : Type u
inst✝ : CanonicallyOrderedMonoid α
a b c d : α
⊢ a ≤ b * a
[PROOFSTEP]
rw [mul_comm]
[GOAL]
α : Type u
inst✝ : CanonicallyOrderedMonoid α
a b c d : α
⊢ a ≤ a * b
[PROOFSTEP]
exact le_self_mul
[GOAL]
α : Type u
inst✝ : CanonicallyOrderedMonoid α
a b c d : α
⊢ (∃ c, b = a * c) → a ≤ b
[PROOFSTEP]
rintro ⟨c, rfl⟩
[GOAL]
case intro
α : Type u
inst✝ : CanonicallyOrderedMonoid α
a c✝ d c : α
⊢ a ≤ a * c
[PROOFSTEP]
exact le_self_mul
[GOAL]
α : Type u
inst✝ : CanonicallyOrderedMonoid α
a b c d : α
⊢ a ≤ b ↔ ∃ c, b = c * a
[PROOFSTEP]
simp only [mul_comm _ a, le_iff_exists_mul]
[GOAL]
α : Type u
inst✝ : CanonicallyOrderedMonoid α
a b c d : α
⊢ 1 < a * b ↔ 1 < a ∨ 1 < b
[PROOFSTEP]
simp only [one_lt_iff_ne_one, Ne.def, mul_eq_one_iff, not_and_or]
[GOAL]
α : Type u
inst✝ : CanonicallyOrderedMonoid α
a b c d : α
h : a < b
⊢ ∃ c x, a * c = b
[PROOFSTEP]
obtain ⟨c, hc⟩ := le_iff_exists_mul.1 h.le
[GOAL]
case intro
α : Type u
inst✝ : CanonicallyOrderedMonoid α
a b c✝ d : α
h : a < b
c : α
hc : b = a * c
⊢ ∃ c x, a * c = b
[PROOFSTEP]
refine' ⟨c, one_lt_iff_ne_one.2 _, hc.symm⟩
[GOAL]
case intro
α : Type u
inst✝ : CanonicallyOrderedMonoid α
a b c✝ d : α
h : a < b
c : α
hc : b = a * c
⊢ c ≠ 1
[PROOFSTEP]
rintro rfl
[GOAL]
case intro
α : Type u
inst✝ : CanonicallyOrderedMonoid α
a b c d : α
h : a < b
hc : b = a * 1
⊢ False
[PROOFSTEP]
simp [hc, lt_irrefl] at h
[GOAL]
α : Type u
inst✝ : CanonicallyOrderedMonoid α
a b c d : α
h : a ≤ c
⊢ a = 1 * a
[PROOFSTEP]
simp
[GOAL]
α : Type u
inst✝ : CanonicallyOrderedMonoid α
a b c d : α
h : a ≤ b
⊢ a = a * 1
[PROOFSTEP]
simp
[GOAL]
α : Type u
inst✝¹ : CanonicallyOrderedMonoid α
a b c d : α
inst✝ : CovariantClass α α (fun x x_1 => x * x_1) fun x x_1 => x < x_1
⊢ a < b ↔ ∃ c, c > 1 ∧ b = a * c
[PROOFSTEP]
rw [lt_iff_le_and_ne, le_iff_exists_mul, ← exists_and_right]
[GOAL]
α : Type u
inst✝¹ : CanonicallyOrderedMonoid α
a b c d : α
inst✝ : CovariantClass α α (fun x x_1 => x * x_1) fun x x_1 => x < x_1
⊢ (∃ x, b = a * x ∧ a ≠ b) ↔ ∃ c, c > 1 ∧ b = a * c
[PROOFSTEP]
apply exists_congr
[GOAL]
case h
α : Type u
inst✝¹ : CanonicallyOrderedMonoid α
a b c d : α
inst✝ : CovariantClass α α (fun x x_1 => x * x_1) fun x x_1 => x < x_1
⊢ ∀ (a_1 : α), b = a * a_1 ∧ a ≠ b ↔ a_1 > 1 ∧ b = a * a_1
[PROOFSTEP]
intro c
[GOAL]
case h
α : Type u
inst✝¹ : CanonicallyOrderedMonoid α
a b c✝ d : α
inst✝ : CovariantClass α α (fun x x_1 => x * x_1) fun x x_1 => x < x_1
c : α
⊢ b = a * c ∧ a ≠ b ↔ c > 1 ∧ b = a * c
[PROOFSTEP]
rw [and_comm, and_congr_left_iff, gt_iff_lt]
[GOAL]
case h
α : Type u
inst✝¹ : CanonicallyOrderedMonoid α
a b c✝ d : α
inst✝ : CovariantClass α α (fun x x_1 => x * x_1) fun x x_1 => x < x_1
c : α
⊢ b = a * c → (a ≠ b ↔ 1 < c)
[PROOFSTEP]
rintro rfl
[GOAL]
case h
α : Type u
inst✝¹ : CanonicallyOrderedMonoid α
a c✝ d : α
inst✝ : CovariantClass α α (fun x x_1 => x * x_1) fun x x_1 => x < x_1
c : α
⊢ a ≠ a * c ↔ 1 < c
[PROOFSTEP]
constructor
[GOAL]
case h.mp
α : Type u
inst✝¹ : CanonicallyOrderedMonoid α
a c✝ d : α
inst✝ : CovariantClass α α (fun x x_1 => x * x_1) fun x x_1 => x < x_1
c : α
⊢ a ≠ a * c → 1 < c
[PROOFSTEP]
rw [one_lt_iff_ne_one]
[GOAL]
case h.mp
α : Type u
inst✝¹ : CanonicallyOrderedMonoid α
a c✝ d : α
inst✝ : CovariantClass α α (fun x x_1 => x * x_1) fun x x_1 => x < x_1
c : α
⊢ a ≠ a * c → c ≠ 1
[PROOFSTEP]
apply mt
[GOAL]
case h.mp.h₁
α : Type u
inst✝¹ : CanonicallyOrderedMonoid α
a c✝ d : α
inst✝ : CovariantClass α α (fun x x_1 => x * x_1) fun x x_1 => x < x_1
c : α
⊢ c = 1 → a = a * c
[PROOFSTEP]
rintro rfl
[GOAL]
case h.mp.h₁
α : Type u
inst✝¹ : CanonicallyOrderedMonoid α
a c d : α
inst✝ : CovariantClass α α (fun x x_1 => x * x_1) fun x x_1 => x < x_1
⊢ a = a * 1
[PROOFSTEP]
rw [mul_one]
[GOAL]
case h.mpr
α : Type u
inst✝¹ : CanonicallyOrderedMonoid α
a c✝ d : α
inst✝ : CovariantClass α α (fun x x_1 => x * x_1) fun x x_1 => x < x_1
c : α
⊢ 1 < c → a ≠ a * c
[PROOFSTEP]
rw [← (self_le_mul_right a c).lt_iff_ne]
[GOAL]
case h.mpr
α : Type u
inst✝¹ : CanonicallyOrderedMonoid α
a c✝ d : α
inst✝ : CovariantClass α α (fun x x_1 => x * x_1) fun x x_1 => x < x_1
c : α
⊢ 1 < c → a < a * c
[PROOFSTEP]
apply lt_mul_of_one_lt_right'
[GOAL]
α : Type u
inst✝ : CanonicallyLinearOrderedMonoid α
a b c : α
⊢ min a (b * c) = min a (min a b * min a c)
[PROOFSTEP]
cases' le_total a b with hb hb
[GOAL]
case inl
α : Type u
inst✝ : CanonicallyLinearOrderedMonoid α
a b c : α
hb : a ≤ b
⊢ min a (b * c) = min a (min a b * min a c)
[PROOFSTEP]
simp [hb, le_mul_right]
[GOAL]
case inr
α : Type u
inst✝ : CanonicallyLinearOrderedMonoid α
a b c : α
hb : b ≤ a
⊢ min a (b * c) = min a (min a b * min a c)
[PROOFSTEP]
cases' le_total a c with hc hc
[GOAL]
case inr.inl
α : Type u
inst✝ : CanonicallyLinearOrderedMonoid α
a b c : α
hb : b ≤ a
hc : a ≤ c
⊢ min a (b * c) = min a (min a b * min a c)
[PROOFSTEP]
simp [hc, le_mul_left]
[GOAL]
case inr.inr
α : Type u
inst✝ : CanonicallyLinearOrderedMonoid α
a b c : α
hb : b ≤ a
hc : c ≤ a
⊢ min a (b * c) = min a (min a b * min a c)
[PROOFSTEP]
simp [hb, hc]
[GOAL]
α : Type u
inst✝ : CanonicallyLinearOrderedMonoid α
a b c : α
⊢ min (a * b) c = min (min a c * min b c) c
[PROOFSTEP]
simpa [min_comm _ c] using min_mul_distrib c a b
|
What an amazing day! It started off horribly and I had a really bad attitude. Naomi was up all night and I felt sick from the stupid fasting part of the PET Scan. I read online that I’d have to wait 30-60 minutes to let this radioactive sugar get absorbed. So I brought a list of phone calls I had to make. Sidenote: if you’re ever diagnosed with cancer make sure to get unlimited minutes. I’ve never been on the phone so much in my life. Anyway, he tells me I can text but not speak. Then he mentions that besides not nursing Naomi for 24 hours (no, she still has never taken a bottle), I can’t TOUCH or hold my children for more than 30 second increments. For 4-5 hours. By the way, is it 4 or 5? I went with 4.5 hours. Of torture.
Naomi was hysterical when I got home and all I could do was look at her. She normally has 2 naps by then but had only slept for 30 minutes. Hadn’t eaten a thing. I couldn’t find the best sippy cup we have and searched everywhere. Finally, I prayed for 10 seconds, then walked back to where they were. She stopped crying. I looked over, and there was the sippy cup (in one of Abi’s toy bins). Then she took the sippy cup with milk for longer than ever from my dad. My baby girl passed out for 3.5 hours after this.
I expect the night to be the worst part, because she nurses so much. But at dinner she miraculously took a bunch of milk from a tablespoon, and after a stroller ride she fell asleep without nursing. Very little crying. This early and sudden weaning was one of my biggest concerns of the entire cancer deal. Praise God from whom all blessings flow! He is still around and still faithful.
|
using Base.Test
using Reactive
# Processes 1 messages from the Reactive event queue.
onestep() = Reactive.run(1)
x = Signal(2)
@test value(x) == 2
mul3 = map(a -> 3a, x)
@test value(mul3) == 6
push!(x, 3)
onestep()
@test value(x) == 3
@test value(mul3) == 9
|
State Before: F : Type ?u.101304
α : Type u_1
β : Type ?u.101310
R : Type ?u.101313
inst✝ : Ring α
a b : α
n : ℕ
⊢ Even (-2) State After: no goals Tactic: simp only [even_neg, even_two]
|
PIPaginationLinks <- function(first = NULL, previous = NULL, last = NULL) {
if (is.null(first) == FALSE) {
if (is.character(first) == FALSE) {
return (print(paste0("Error: first must be a string.")))
}
}
if (is.null(previous) == FALSE) {
if (is.character(previous) == FALSE) {
return (print(paste0("Error: previous must be a string.")))
}
}
if (is.null(last) == FALSE) {
if (is.character(last) == FALSE) {
return (print(paste0("Error: last must be a string.")))
}
}
value <- list(
First = first,
Previous = previous,
Last = last)
valueCleaned <- rmNullObs(value)
attr(valueCleaned, "className") <- "PIPaginationLinks"
return(valueCleaned)
}
|
// Copyright (c) 2006-2008 Michael B. Edwin Rickert
//
// Distributed under the Boost Software License, Version 1.0.
// (See accompanying file LICENSE_1_0.txt or copy at
// http://www.boost.org/LICENSE_1_0.txt )
//
// $LastChangedBy$ - $LastChangedDate$
//
// Dec 28, 2006 - Created
#include <industry/traits/iterator_traits.hpp>
#include <boost/static_assert.hpp>
#include <boost/test/auto_unit_test.hpp>
#include <boost/type_traits/is_same.hpp>
#include <iterator>
#include <vector>
BOOST_AUTO_TEST_CASE( test_iterator_traits ) {
typedef int * pointer;
typedef const int * c_pointer;
typedef std::istream_iterator< int > istream_iterator;
typedef std::ostream_iterator< int > ostream_iterator;
typedef std::vector< int >::iterator vector_iterator;
typedef industry::iterator_traits< pointer > pointer_traits;
typedef industry::iterator_traits< c_pointer > c_pointer_traits;
typedef industry::iterator_traits< ostream_iterator > ostream_iterator_traits;
typedef industry::iterator_traits< istream_iterator > istream_iterator_traits;
typedef industry::iterator_traits< vector_iterator > vector_iterator_traits;
BOOST_STATIC_ASSERT(( boost::is_same< pointer_traits::result_type , int& >::value ));
BOOST_STATIC_ASSERT(( boost::is_same< c_pointer_traits::result_type , const int& >::value ));
BOOST_STATIC_ASSERT(( boost::is_same< istream_iterator_traits::result_type , const int& >::value ));
BOOST_STATIC_ASSERT(( boost::is_same< vector_iterator_traits::result_type , int& >::value ));
}
|
Formal statement is: lemma coeff_poly_cutoff: "coeff (poly_cutoff n p) k = (if k < n then coeff p k else 0)" Informal statement is: The coefficient of $x^k$ in the polynomial $p$ truncated to degree $n$ is the coefficient of $x^k$ in $p$ if $k < n$, and $0$ otherwise.
|
# Copyright (c) 2018-2021, Carnegie Mellon University
# See LICENSE for details
# Option Records
# ==============
#F SPL Options Records
#F -------------------
#F An spl options record is used to collect options passed to the
#F spl compiler by functions that communicate with the spl compiler,
#F such as measure, verify, and search functions.
#F
#F There is a system wide options record, SpiralDefailts (set in config.g),
#F which sets the default values.
#F
#F An spl options record is a record, which contains a subset of the
#F following fields. Adding a new fields requires to change the
#F functions PrintSpecSPLOptionsRecord, CheckSPLOptionsRecord, and
#F MakeSPLCallSPLOptionsRecord.
#F
#F rec(
#F customDataType = "int_cplx" | "int_fpN" | "Ipp16sc" (XScale) | "Ipp32sc" (XScale)
#F where N - number of fractional bits, i.e. int_fp8
#F zeroBits = zero | <positive int>
#F dataType = "real" | "complex",
#F precision = "single" | "double" | "extended"
#F subName = <string>
#F schedule = <integer>
#F globalUnrolling = <positive int> | "none" | "full",
#F language = "fortran" | "c" # see config.g for languages
#F compiler = not to be used
#F compflags = <flags for compiler as string>
#F splflags = <flags for spl compiler as string>
#F )
#F
#F Note: switching language automatically switches compilers
#F and flags.
#F
#F spl options records should be used as follows:
#F - create your desired spl options record R
#F - merge with defaults, R1 := MergeSPLOptionsRecord(R)
#F - create spl prog for external operations with ProgSPL(SPL, R)
#F
#F CheckSPLOptionsRecord ( <spl-options-record> )
#F checks whether <spl-options-record> is a valid spl options record
#F with valid spl options set. If a field name or a field value
#F is invalid, then an error is signaled, otherwise true is
#F returned.
#F
CheckSPLOptionsRecord := function ( R )
local r;
if not IsRec(R) then Error("<R> must be an spl options record"); fi;
for r in RecFields(R) do
Cond(r = "dataType",
Constraint(R.dataType in ["no default", "real", "complex"]),
r = "customDataType",
Constraint(IsString(R.customDataType)),
r = "customReal",
Constraint(IsString(R.customReal)),
r = "customComplex",
Constraint(IsString(R.customComplex)),
r = "zeroBits",
Constraint(IsInt(R.zeroBits) and R.zeroBits >= 0),
r = "precision",
Constraint(R.precision in ["single", "double", "extended"]),
r = "subName",
Constraint(IsString(R.subName)),
r = "file",
Constraint(IsString(R.file)),
r = "schedule",
Constraint(IsInt(R.schedule) and R.schedule > 0),
r = "globalUnrolling",
Constraint(
(IsInt(R.globalUnrolling) and R.globalUnrolling > 0)
or R.globalUnrolling in ["none", "full"]),
r = "compiler",
Constraint(IsString(R.compiler)),
r = "compflags",
Constraint(IsString(R.compflags)),
r = "dmpcompflags",
Constraint(IsString(R.dmpcompflags)),
r = "faultTolerant",
Constraint(IsBool(R.faultTolerant)),
r = "cgen",
Constraint(IsFunc(R.cgen)),
r = "x" or IsSystemRecField(r),
Ignore(),
0); # do nothing if field is unrecognized
od;
return true;
end;
#F MergeSPLOptionsRecord ( <spl-options-record> )
#F returns the option record obtained by starting with SpiralDefaults
#F (config.g, set at installation) and merging or overwriting with the
#F spl options given by <spl-options-record>
#F
MergeSPLOptionsRecord := R -> Checked(CheckSPLOptionsRecord(R),
CopyFields(SpiralDefaults, R)
);
|
#include <iostream>
#include <boost/program_options.hpp>
#include "cmdlineopts.hpp"
namespace bpo = boost::program_options;
void CmdLineOpts::Populate(int argc, char* argv[]) {
const std::string configOpt = "configuration-file";
const char configOptSingle = 'f';
const std::string configOptDesc = "Path to configuration XML file";
const std::string configOptSpec = configOpt + "," + configOptSingle;
const std::string deviceOpt = "device";
const char deviceOptSingle = 'd';
const std::string deviceOptDesc = "Device to select from configuration file";
const std::string deviceOptSpec = deviceOpt + "," + deviceOptSingle;
const std::string channelOpt = "channel";
const char channelOptSingle = 'c';
const std::string channelOptDesc = "Channel on device to calibrate";
const std::string channelOptSpec = channelOpt + "," + channelOptSingle;
bpo::options_description desc("Allowed Options");
desc.add_options()
("help", "Show help message")
(configOptSpec.c_str(), bpo::value<std::string>(&(this->configFilePath)), configOptDesc.c_str())
(deviceOptSpec.c_str(), bpo::value<std::string>(&(this->device)), deviceOptDesc.c_str())
(channelOptSpec.c_str(), bpo::value<unsigned int>(&(this->channel)), channelOptDesc.c_str())
;
bpo::variables_map vm;
bpo::store(bpo::parse_command_line(argc, argv, desc), vm);
bpo::notify(vm);
if( vm.count("help") ) {
std::cout << desc << std::endl;
this->helpMessagePrinted = true;
return;
}
if( vm.count(configOpt.c_str()) ) {
std::cout << "Configuration file: " << this->configFilePath << std::endl;
} else {
throw std::runtime_error("Configuration file not specified");
}
if( vm.count(deviceOpt.c_str()) ) {
std::cout << "Device: " << this->device << std::endl;
} else {
throw std::runtime_error("Device not specified");
}
if( vm.count(channelOpt.c_str()) ) {
if( this->channel < 16 ) {
std::cout << "Channel: " << static_cast<int>(this->channel) << std::endl;
} else {
throw std::out_of_range("Channel must be in range 0 to 15");
}
} else {
throw std::runtime_error("Channel not specified");
}
}
|
Formal statement is: lemma homotopy_equivalent_space_refl: "X homotopy_equivalent_space X" Informal statement is: A space is homotopy equivalent to itself.
|
#include "RecalboxConf.h"
#include <iostream>
#include <fstream>
#include <boost/regex.hpp>
#include "Log.h"
#include <boost/algorithm/string/predicate.hpp>
RecalboxConf *RecalboxConf::sInstance = NULL;
boost::regex validLine("^(?<key>[^;|#].*?)=(?<val>.*?)$");
boost::regex commentLine("^;(?<key>.*?)=(?<val>.*?)$");
std::string recalboxConfFile = "/recalbox/share/system/recalbox.conf";
std::string recalboxConfFileTmp = "/recalbox/share/system/recalbox.conf.tmp";
RecalboxConf::RecalboxConf() {
loadRecalboxConf();
}
RecalboxConf *RecalboxConf::getInstance() {
if (sInstance == NULL)
sInstance = new RecalboxConf();
return sInstance;
}
bool RecalboxConf::loadRecalboxConf() {
std::string line;
std::ifstream recalboxConf(recalboxConfFile);
if (recalboxConf && recalboxConf.is_open()) {
while (std::getline(recalboxConf, line)) {
boost::smatch lineInfo;
if (boost::regex_match(line, lineInfo, validLine)) {
confMap[std::string(lineInfo["key"])] = std::string(lineInfo["val"]);
}
}
recalboxConf.close();
} else {
LOG(LogError) << "Unable to open " << recalboxConfFile;
return false;
}
return true;
}
bool RecalboxConf::saveRecalboxConf() {
std::ifstream filein(recalboxConfFile); //File to read from
if (!filein) {
LOG(LogError) << "Unable to open for saving : " << recalboxConfFile << "\n";
return false;
}
/* Read all lines in a vector */
std::vector<std::string> fileLines;
std::string line;
while (std::getline(filein, line)) {
fileLines.push_back(line);
}
filein.close();
/* Save new value if exists */
for (std::map<std::string, std::string>::iterator it = confMap.begin(); it != confMap.end(); ++it) {
std::string key = it->first;
std::string val = it->second;
bool lineFound = false;
for (int i = 0; i < fileLines.size(); i++) {
std::string currentLine = fileLines[i];
if (boost::starts_with(currentLine, key+"=") || boost::starts_with(currentLine, ";"+key+"=")){
fileLines[i] = key + "=" + val;
lineFound = true;
}
}
if(!lineFound){
fileLines.push_back(key + "=" + val);
}
}
std::ofstream fileout(recalboxConfFileTmp); //Temporary file
if (!fileout) {
LOG(LogError) << "Unable to open for saving : " << recalboxConfFileTmp << "\n";
return false;
}
for (int i = 0; i < fileLines.size(); i++) {
fileout << fileLines[i] << "\n";
}
fileout.close();
/* Copy back the tmp to recalbox.conf */
std::ifstream src(recalboxConfFileTmp, std::ios::binary);
std::ofstream dst(recalboxConfFile, std::ios::binary);
dst << src.rdbuf();
remove(recalboxConfFileTmp.c_str());
return true;
}
std::string RecalboxConf::get(const std::string &name) {
if (confMap.count(name)) {
return confMap[name];
}
return "";
}
std::string RecalboxConf::get(const std::string &name, const std::string &defaut) {
if (confMap.count(name)) {
return confMap[name];
}
return defaut;
}
void RecalboxConf::set(const std::string &name, const std::string &value) {
confMap[name] = value;
}
|
import cv2
import numpy as np
img = cv2.imread('4.2.07.tiff')
a = np.array([0,225,0])
b = np.array([0,255,0])
thresh = cv2.inRange(img,a,b)
cv2.imshow('win1',img)
cv2.imshow('win2',thresh)
cv2.waitKey(0)
cv2.destroyAllWindows()
|
! GridTools
!
! Copyright (c) 2014-2019, ETH Zurich
! All rights reserved.
!
! Please, refer to the LICENSE file in the root directory.
! SPDX-License-Identifier: BSD-3-Clause
program main
use iso_c_binding
use gt_handle
use copy_stencil_lib_mc
implicit none
integer, parameter :: i = 9, j = 10, k = 11
real(c_float), dimension(i, j, k) :: in_array, out_array
type(c_ptr) grid_handle, storage_info_handle, computation_handle, in_handle, out_handle
! fill some input values
in_array = initial()
out_array(:, :, :) = 0
grid_handle = make_grid(i, j, k)
storage_info_handle = make_storage_info(i, j, k)
in_handle = make_data_store(storage_info_handle)
out_handle = make_data_store(storage_info_handle)
computation_handle = make_copy_stencil(grid_handle)
! gt_handles need to be released explicitly
call gt_release(grid_handle)
call gt_release(storage_info_handle)
! transform data from Fortran to C layout
call transform_f_to_c(in_handle, in_array)
call run_stencil(computation_handle, in_handle, out_handle)
! transform data from C layout to Fortran layout
call transform_c_to_f(out_array, out_handle)
! check output
if (any(in_array /= initial())) stop 1
if (any(out_array /= initial())) stop 1
! gt_handles need to be released explicitly
call gt_release(in_handle)
call gt_release(out_handle)
call gt_release(computation_handle)
print *, "It works!"
contains
function initial()
integer :: x
integer, dimension(i, j, k) :: initial
initial = reshape((/(x, x = 1, size(initial))/) , shape(initial))
end
end
|
IMPLICIT NONE
REAL(8)::x,vel,F,m,H
INTEGER::ixx
INTEGER,PARAMETER::itime = 100000
REAL,PARAMETER::xposeq = 0.00d0, val=1.000d0,delta=0.002d0
x = 0.00d0
vel = 1.00d0
F= -val*(x-xposeq)
m=1.00d0
ixx=0
write(16,*) ixx*delta,x
write(18,*) ixx*delta,vel
H = 0.5d0 *(vel**2) + 0.5d0 * val *(x**2)
write(20,*) x,vel
write(24,*) ixx*delta,H
do ixx = 1, itime
vel = vel + (0.5d0*(F/m)*delta)
x=x + (vel*delta)
F = -val*(x-xposeq)
vel = vel + (0.5d0*(F/m)*delta)
write(16,*) ixx*delta,x
write(18,*) ixx*delta,vel
H = 0.5d0 *(vel**2) + 0.5d0 * val *(x**2)
write(20,*) x,vel
write(24,*) ixx*delta,H
end do
stop
end
|
import numpy as np
import MeshFEM, mesh
import registration
import os
import pickle, gzip
def load(path):
"""
load a pickled gzip object
"""
return pickle.load(gzip.open(path, 'rb'))
def save(obj, path):
"""
save an object to a pickled gzip
"""
pickle.dump(obj, gzip.open(path, 'wb'))
def sheetTrisForVar(sheet, varIdx):
"""
Get indices of triangles influencing a particular equilibrium variable of the sheet.
(indices < sheet.mesh().numTrix() refer to triangles in the top sheet, the
rest to triangles in the bottom sheet.)
"""
v = sheet.vtxForVar(varIdx)
result = []
m = sheet.mesh()
if v.sheet & 1: result.extend(np.where(m.triangles() == v.vi)[0])
if v.sheet & 2: result.extend(np.where(m.triangles() == v.vi)[0] + m.numTris())
return result
def maskForIndexList(indices, size):
mask = np.zeros(size, dtype=np.bool)
mask[indices] = True
return mask
def freshPath(path, suffix='', excludeSuffix = False):
if path is None: return
if not os.path.exists(path + suffix): return path if excludeSuffix else path + suffix
i = 0
candidatePath = lambda i: f'{path}.{i}{suffix}'
while os.path.exists(candidatePath(i)): i += 1
print(f'Requested path exists; using fresh path {candidatePath(i)}')
return f'{path}.{i}' if excludeSuffix else candidatePath(i)
def allEnergies(obj):
return {name: obj.energy(etype) for name, etype in obj.EnergyType.__members__.items()}
def allGradientNorms(obj, freeVariables = None):
if freeVariables is None:
freeVariables = np.arange(obj.numVars(), dtype=np.int)
return {name: np.linalg.norm(obj.gradient(etype)[freeVariables]) for name, etype in obj.EnergyType.__members__.items()}
def loadObj(path):
V, F = [], []
for l in open(path, 'r'):
comps = l.strip().split(' ')
specifier = comps[0].lower()
if (specifier == 'v'): V.append([float(c) for c in comps[1:]])
if (specifier == 'l' or specifier == 'f'): F.append([int(i) - 1 for i in comps[1:]])
return np.array(V), np.array(F)
def normalizedParamEnergies(obj):
ET = obj.EnergyType
return [obj.energy(et) / reg if reg != 0 else obj.energy(et)
for (et, reg) in [(ET.Fitting, 1.0),
(ET.AlphaRegularization, obj.alphaRegW),
(ET.PhiRegularization, obj.phiRegW),
(ET.BendingRegularization, obj.bendRegW)]]
def bbox(P):
return np.min(P, axis=0), np.max(P, axis=0)
def bbox_dims(P):
bb = bbox(P)
return bb[1] - bb[0]
def getClosestPointDistances(P):
"""
Gets the distance of each point in a point collection P to its closest other point in P.
"""
closestDist = []
for p in P:
closestDist.append(np.partition(np.linalg.norm(p - P, axis=1), 1)[1])
return closestDist
def prototypeScaleNormalization(P, placeAtopFloor = False, objectScale = 750, reorient = False):
if reorient: P = registration.align_points_with_axes(P)
bb = bbox(P)
c = (bb[0] + bb[1]) / 2 # use center of bounding box rather than center of mass
t = -c
if (placeAtopFloor): t[2] = -bb[0][2]
return (P + t) * (objectScale / np.max(bb[1] - bb[0]))
def renderingNormalization(P, placeAtopFloor = False):
"""
Return the transformation function that maps the points `P` in a standard
configuration for rendering.
"""
c = np.mean(P, axis=0)
bb = bbox(P)
t = -c
if placeAtopFloor:
t[2] = -bb[0][2]
s = 1.0 / np.max(bb[1] - bb[0])
return lambda x: s * (x + t)
def isWallTri(sheet_mesh, is_wall_vtx):
"""
Determine which triangles are part of a wall (triangles made of three wall vertices).
"""
return is_wall_vtx[sheet_mesh.triangles()].all(axis=1)
def pad2DTo3D(P):
if P.shape[1] == 3: return P
return np.pad(P, [(0, 0), (0, 1)], mode='constant')
import itertools
def nth_choice(n, *args):
return next(itertools.islice(itertools.product(*args), n, None))
def writeFields(path, m, name1, field1, *args):
mfw = mesh.MSHFieldWriter(path, m.vertices(), m.triangles())
data = [name1, field1] + list(args)
for name, field in zip(data[0::2], data[1::2]):
mfw.addField(name, field)
del mfw
import mesh_utilities
def getLiftedSheetPositions(origSheetMesh, uv, target_surf):
paramSampler = mesh_utilities.SurfaceSampler(pad2DTo3D(uv), target_surf.triangles())
return paramSampler.sample(origSheetMesh.vertices(), target_surf.vertices())
import parametrization
def getSquashedLiftedPositionsFromLiftedPos(optSheetMesh, liftedPos, liftFrac = 0.2, freeBoundary = False):
flatPos = None
if freeBoundary:
# Note: we assume the design sheet has already been registered with the target boundary...
flatPos = optSheetMesh.vertices()
else:
# If we're fixing the boundary, the flattened state must perfectly match the target surface's boundary.
# Do this by mapping the design sheet to the interior of the target surface's boundary harmonically.
bv = optSheetMesh.boundaryVertices()
flatPos = parametrization.harmonic(optSheetMesh, liftedPos[bv])
return flatPos + liftFrac * (liftedPos - flatPos)
def getSquashedLiftedPositions(optSheetMesh, origSheetMesh, uv, target_surf, liftFrac = 0.2):
liftedPos = getLiftedSheetPositions(origSheetMesh, uv, target_surf)
return getSquashedLiftedPositionsFromLiftedPos(optSheetMesh, liftedPos, liftFrac)
import mesh, glob
def getBoundingBox(framesDir):
minCorner = [ np.inf, np.inf, np.inf]
maxCorner = [-np.inf, -np.inf, -np.inf]
for i in glob.glob(f'{framesDir}/step_*.msh'):
V = mesh.Mesh(i, embeddingDimension=3).vertices()
minCorner = np.min([minCorner, V.min(axis=0)], axis=0)
maxCorner = np.max([maxCorner, V.max(axis=0)], axis=0)
return np.array([minCorner, maxCorner])
def printBoundingBox(framesDir):
print('{', ', '.join(map(str, getBoundingBox(framesDir).ravel(order='F'))), '}')
def getTargetSurf(tas):
tsf = tas.targetSurfaceFitter()
return mesh.Mesh(tsf.targetSurfaceV, tsf.targetSurfaceF)
################################################################################
# Strain analysis
################################################################################
def getStrains(isheet):
getStrain = lambda ted: ted.principalBiotStrains() if hasattr(ted, 'principalBiotStrains') else (np.sqrt(ted.eigSensitivities().Lambda()) - 1)
return np.array([getStrain(ted) for ted in isheet.triEnergyDensities()])
def tensionStates(isheet):
return [ted.tensionState() for ted in isheet.triEnergyDensities()]
# Get the amount by which each element is compressed. This is
# zero for elements in complete tension or the increase in
# strain needed to put the element in tension.
def compressionMagnitudes(isheet):
def cm(ted):
l = ted.eigSensitivities().Lambda()
if (l[0] < 1): return 1 - np.sqrt(l[0]) # full compression case
return np.max([np.sqrt(1 / np.sqrt(l[0])) - np.sqrt(l[1]), 0]) # partial compression or full tension case.
return np.array([cm(ted) for ted in isheet.triEnergyDensities()])
# Get the amount by which each element is "fully compressed" (nonzero
# only for elements in full compression rather than partial tension).
def fullCompressionMagnitudes(isheet):
return np.clip(1.0 - np.sqrt(np.array([ted.eigSensitivities().Lambda()[0] for ted in isheet.triEnergyDensities()])), 0.0, None)
def writeStrainFields(path, isheet):
vm = isheet.visualizationMesh()
strains = getStrains(isheet)
mfw = mesh.MSHFieldWriter(path, vm.vertices(), vm.elements())
mfw.addField("tensionState", tensionStates(isheet))
mfw.addField("compressionMagnitude", compressionMagnitudes(isheet))
mfw.addField("lambda_0", strains[:, 0])
mfw.addField("lambda_1", strains[:, 1])
def strainHistogram(isheet):
from matplotlib import pyplot as plt
strains = getStrains(isheet)
plt.hist(strains[:, 0], bins=500, range=(-0.4,0.1), label='$\lambda_0$');
plt.hist(strains[:, 1], bins=500, range=(-0.4,0.1), label='$\lambda_1$');
plt.legend()
plt.grid()
plt.title('Principal strains');
def cumulativeArcLen(loopPts):
numPts, numComp = loopPts.shape
arcLen = np.empty(numPts)
arcLen[0] = 0.0
for i in range(1, numPts):
arcLen[i] = arcLen[i - 1] + np.linalg.norm(loopPts[i] - loopPts[i - 1])
return arcLen
################################################################################
# Curve operations
################################################################################
def samplePointsOnLoop(loopPts, numSamples, offset):
"""
Sample `numSamples` evenly spaced along the arlength of a closed polyline "loopPts"
This closed loop is represented by a list of points, with the first and
last point coinciding.
The first sample point is placed at `offset`, a relative arclength position along the curve in [0, 1].
If `offset` is a list of `n` floats (instead of just a float), then we generate n * numSamples points
at the specified offsets (with the sampled points for each offset value interleaved).
"""
assert(np.linalg.norm(loopPts[-1] - loopPts[0]) == 0)
numPts, numComp = loopPts.shape
arcLen = cumulativeArcLen(loopPts)
arcLen /= arcLen[-1] # normalize arc lengths to [0, 1]
# Arc length position of the sample points
if (not isinstance(offset, list)):
offset = [offset]
s = np.vstack([np.fmod(np.linspace(0, 1, numSamples, endpoint=False) + o, 1.0) for o in offset]).ravel(order='F')
samples = np.empty((len(s), numComp))
for c in range(numComp):
samples[:, c] = np.interp(s, arcLen, loopPts[:, c])
return samples
import shapely
import shapely.ops
import shapely.geometry as shp
def normalOffset(polygon, dist):
"""
Offset points on the planar curve or shp.Polygon "polygon" in the normal
direction by "dist". This curve should lie in a "z = const" plane or the
result will be distorted.
Returns a **list** of the resulting polygon(s) (shp.Polygon instances),
as an inward offset can divide the input polygon into multiple pieces.
"""
if not isinstance(polygon, shp.Polygon):
polygon = shp.Polygon(polygon[:, 0:2])
offsetResult = polygon.buffer(dist)
# Note: the result could be a Polygon or a MultiPolygon...
if (isinstance(offsetResult, shp.Polygon)):
return [offsetResult]
elif (isinstance(offsetResult, shp.MultiPolygon)):
return list(offsetResult)
else: raise Exception('Unexpected polygon offset result type')
def getBoundary(polygon, getAll = False):
"""
Get the boundary of a shapely polygon.
If `getAll` is true, we return a list with all boundary polylines sorted by descending length;
if false, we return the largest one and print a warning.
"""
result = polygon.boundary
if result.geom_type == 'LineString':
if getAll: return [np.array(result)]
return np.array(result)
if result.geom_type == 'MultiLineString':
allBoundaries = sorted([np.array(r) for r in result], key=lambda a: -len(a))
if getAll: return allBoundaries
print('WARNING: union boundary has multiple components; returning the largest one')
return allBoundaries[0]
raise Exception('Unexpected boundary result type')
def unionPolygons(polygons):
"""
Union two or more polygons [ptsA, ptsB, ...] described by point lists `ptsA` and `ptsB`.
(For each of these lists, the first and last points must agree)
"""
return shapely.ops.unary_union([shp.Polygon(p) for p in polygons])
import os
def get_nonexistant_path(fname_path):
"""
Get the path to a filename which does not exist by incrementing path.
From https://stackoverflow.com/a/43167607/122710
"""
if not os.path.exists(fname_path):
return fname_path
filename, file_extension = os.path.splitext(fname_path)
i = 1
new_fname = "{}-{}{}".format(filename, i, file_extension)
while os.path.exists(new_fname):
i += 1
new_fname = "{}-{}{}".format(filename, i, file_extension)
return new_fname
import scipy
import scipy.sparse
def reconnectPolygons(polygons, originatingPolygon, minGap = 0):
"""
Add the line segments of the minimal length necessary to connect the entries of
polygon list `polygons`, only allowing line segments that lie within the
originating polygon (using a minimum spanning tree).
This is meant to address the problem where eroding a polygon can separate it
into a bunch of small polygons that we want to connect at the seam width.
Unfortunately, we can have two polygons whose ground-truth connection line
(indicated by * below) exceeds the distance of their nearest points (a and b)
a--- * --------+
|
b----------------+
(here the "--" lines represent thin polygons). This will result in a reconnection
failure. It could be mitigated by splitting up large polygons with some threshold,
but we instead opt for the reconnectPolygons2 algorithm below.
"""
#pickle.dump(polygons, open(get_nonexistant_path('polygons.pkl'), 'wb'))
#pickle.dump(originatingPolygon, open(get_nonexistant_path('originatingPolygon.pkl'), 'wb'))
inputPolygons = polygons
polygons = [shp.Polygon(p) for p in polygons]
originatingPolygon = shp.Polygon(originatingPolygon)
n = len(polygons)
dists = np.full((n, n), np.inf)
closestPoints = np.empty((n, n), dtype='O')
for i, pi in enumerate(polygons):
for j, pj in enumerate(polygons):
if (i >= j): continue; # only compute upper triangle
cp = np.vstack([np.array(o.coords) for o in shapely.ops.nearest_points(pi, pj)])
connectionDist = np.linalg.norm(np.subtract(*cp))
distToOrig = shp.Point(cp.mean(axis=0)).distance(originatingPolygon)
if (distToOrig > 0.25 * connectionDist): continue # If the candidate connecting line strays too far outside the originating polygon, it is probably invalid
dists [i, j] = connectionDist
closestPoints[i, j] = cp
outputPolylines = inputPolygons.copy()
for mst_edge in zip(*scipy.sparse.csgraph.minimum_spanning_tree(dists).nonzero()):
i, j = sorted(mst_edge)
if (dists[i, j] < minGap): continue # no connection needed
outputPolylines.append(closestPoints[i, j])
return outputPolylines
import scipy.spatial
def reconnectPolygons2(inputPolygons, originatingPolygon, fuseWidth, includeExtensions=False):
"""
Hopefully superior algorithm for inserting line segments to reconnect the
distinct polygons that arose from an erosion operation on originatingPolygon.
This one works by detecting "bridges"--regions of `originatingPolygon \ inputPolygons`
that connect two distinct polygons of inputPolygons--and then joining the
closest points of these input polygons (after intersecting with a
neighborhood of the bridge).
"""
eps = 1e-6
polygons = [shp.Polygon(p).buffer(fuseWidth / 2 + eps) for p in inputPolygons]
originatingPolygon = shp.Polygon(originatingPolygon)
bridges = [p for p in originatingPolygon.difference(shapely.ops.unary_union(polygons)) if p.boundary.length > 3 * fuseWidth]
outputPolylines = inputPolygons.copy()
for b in bridges:
distances = np.array([b.distance(p) for p in polygons])
# If "b" actually bridges between two polygons, connect these
# polygons' closest points (restricted to a neighborhood of the bridge)
closest = np.argsort(distances)
if (distances[closest[1]] < fuseWidth / 2):
bridgeRegion = b.buffer(2 * fuseWidth)
p0, p1 = shapely.ops.nearest_points(bridgeRegion.intersection(polygons[closest[0]]),
bridgeRegion.intersection(polygons[closest[1]]))
outputPolylines.append(np.array([np.asarray(p0), np.asarray(p1)]))
elif includeExtensions:
if (b.boundary.length > 4 * fuseWidth):
bdryPts = np.array(b.boundary)
_, p0 = shapely.ops.nearest_points(b, polygons[closest[0]])
b_to_p0 = scipy.spatial.distance.cdist([np.asarray(p0)], bdryPts[:, 0:2])[0]
farthest = np.argmax(b_to_p0)
if (b_to_p0[farthest] > 4 * fuseWidth):
p1, _ = shapely.ops.nearest_points(polygons[closest[0]], shp.Point(bdryPts[farthest, 0:2]))
outputPolylines.append(np.array([np.asarray(p1), bdryPts[farthest, 0:2]]))
return outputPolylines
|
import numpy as np
import tvm
import logging
import sys, time, subprocess
from tvm import autotvm
import topi
import json
from topi.util import get_const_tuple
import os
op_attributes = {
"N": int(os.environ['N']) if 'N' in os.environ else 64,
"C": int(os.environ['C']) if 'C' in os.environ else 3,
"H": int(os.environ['H']) if 'H' in os.environ else 229,
"W": int(os.environ['W']) if 'W' in os.environ else 229,
"F": int(os.environ['F']) if 'F' in os.environ else 32,
"K": int(os.environ['K']) if 'K' in os.environ else 5,
"ST": int(os.environ['ST']) if 'ST' in os.environ else 1,
"PD": int(os.environ['PD']) if 'PD' in os.environ else 2,
}
@autotvm.template
def get_template_op(**kargs):
N = op_attributes["N"]
CI = op_attributes["C"]
H = op_attributes["H"]
W = op_attributes["W"]
H = op_attributes["H"]
CO = op_attributes["F"]
KH = KW = op_attributes["K"]
stride = op_attributes["ST"]
padding = op_attributes["PD"]
dilation = 1
data = tvm.placeholder((N, CI, H, W), name='data')
kernel = tvm.placeholder((CO, CI, KH, KW), name='kernel')
conv = topi.nn.conv2d_nchw(
data, kernel, (stride, stride), (padding, padding), dilation=1, out_dtype='float32')
s = tvm.create_schedule([conv.op])
cfg = autotvm.get_config()
##### space definition begin #####
n, f, y, x = s[conv].op.axis
rc, ry, rx = s[conv].op.reduce_axis
cfg.define_split("tile_f", f, num_outputs=4)
cfg.define_split("tile_y", y, num_outputs=4)
cfg.define_split("tile_x", x, num_outputs=4)
cfg.define_split("tile_rc", rc, num_outputs=2)
cfg.define_split("tile_ry", ry, num_outputs=2)
cfg.define_split("tile_rx", rx, num_outputs=2)
cfg.define_knob("auto_unroll_max_step", [0, 125, 256])
target = tvm.target.current_target()
if target.target_name in ['nvptx', 'rocm']:
cfg.define_knob("unroll_explicit", [1])
else:
cfg.define_knob("unroll_explicit", [0, 1])
pad_data, kernel = s[conv].op.input_tensors
s[pad_data].compute_inline()
if isinstance(kernel.op, tvm.tensor.ComputeOp) and 'dilate' in kernel.op.tag:
s[kernel].compute_inline()
if conv.op in s.outputs:
output = conv
OL = s.cache_write(conv, 'local')
else:
output = s.outputs[0].output(0)
s[conv].set_scope('local')
OL = conv
# create cache stage
AA = s.cache_read(pad_data, 'shared', [OL])
WW = s.cache_read(kernel, 'shared', [OL])
# tile and bind spatial axes
n, f, y, x = s[output].op.axis
kernel_scope, n = s[output].split(n, nparts=1)
bf, vf, tf, fi = cfg["tile_f"].apply(s, output, f)
by, vy, ty, yi = cfg["tile_y"].apply(s, output, y)
bx, vx, tx, xi = cfg["tile_x"].apply(s, output, x)
bf = s[output].fuse(n, bf)
s[output].bind(bf, tvm.thread_axis("blockIdx.z"))
s[output].bind(by, tvm.thread_axis("blockIdx.y"))
s[output].bind(bx, tvm.thread_axis("blockIdx.x"))
s[output].bind(vf, tvm.thread_axis("vthread"))
s[output].bind(vy, tvm.thread_axis("vthread"))
s[output].bind(vx, tvm.thread_axis("vthread"))
s[output].bind(tf, tvm.thread_axis("threadIdx.z"))
s[output].bind(ty, tvm.thread_axis("threadIdx.y"))
s[output].bind(tx, tvm.thread_axis("threadIdx.x"))
s[output].reorder(bf, by, bx, vf, vy, vx, tf, ty, tx, fi, yi, xi)
s[OL].compute_at(s[output], tx)
# tile reduction axes
n, f, y, x = s[OL].op.axis
rc, ry, rx = s[OL].op.reduce_axis
rco, rci = cfg['tile_rc'].apply(s, OL, rc)
ryo, ryi = cfg['tile_rx'].apply(s, OL, ry)
rxo, rxi = cfg['tile_ry'].apply(s, OL, rx)
s[OL].reorder(rco, ryo, rxo, rci, ryi, rxi, n, f, y, x)
s[AA].compute_at(s[OL], rxo)
s[WW].compute_at(s[OL], rxo)
# cooperative fetching
for load in [AA, WW]:
n, f, y, x = s[load].op.axis
fused = s[load].fuse(n, f, y, x)
tz, fused = s[load].split(fused, nparts=cfg["tile_f"].size[2])
ty, fused = s[load].split(fused, nparts=cfg["tile_y"].size[2])
tx, fused = s[load].split(fused, nparts=cfg["tile_x"].size[2])
s[load].bind(tz, tvm.thread_axis("threadIdx.z"))
s[load].bind(ty, tvm.thread_axis("threadIdx.y"))
s[load].bind(tx, tvm.thread_axis("threadIdx.x"))
# unroll
s[output].pragma(kernel_scope, 'auto_unroll_max_step', cfg['auto_unroll_max_step'].val)
s[output].pragma(kernel_scope, 'unroll_explicit', cfg['unroll_explicit'].val)
N, CO, OH, OW = get_const_tuple(output.shape)
_, KH, KW, CI = get_const_tuple(kernel.shape)
cfg.add_flop(2 * N * OH * OW * CO * CI * KH * KW)
return s, [data, kernel, conv]
|
Formal statement is: lemma lipschitz_on_minus[lipschitz_intros]: fixes f::"'a::metric_space \<Rightarrow>'b::real_normed_vector" assumes "C-lipschitz_on U f" shows "C-lipschitz_on U (\<lambda>x. - f x)" Informal statement is: If $f$ is $C$-Lipschitz on $U$, then $-f$ is $C$-Lipschitz on $U$.
|
Load LFindLoad.
From lfind Require Import LFind.
From QuickChick Require Import QuickChick.
From adtind Require Import goal33.
Derive Show for natural.
Derive Arbitrary for natural.
Instance Dec_Eq_natural : Dec_Eq natural.
Proof. dec_eq. Qed.
Lemma conj11synthconj1 : forall (lv0 : natural) (lv1 : natural) (lv2 : natural), (@eq natural (plus Zero (plus (mult lv0 lv1) lv2)) (plus lv2 (mult lv0 lv1))).
Admitted.
QuickChick conj11synthconj1.
|
function formats = supported_wc_extensions()
% Return a cell of arrays with the formats supported. Automatically updated
formats ={};
raw_readers_folder = fileparts(mfilename('fullpath'));
files = dir(raw_readers_folder);
for i = 1: length(files)
fname = files(i).name;
fend = regexp(fname,'_wc_reader.m');
if ~isempty (fend)
formats = [formats,{fname(1:fend-1)}];
end
end
end
|
In July 2006 , Bosi and his wife Claire announced that they were intending to sell Hibiscus and open a new restaurant closer to London , or in the capital itself . Hibiscus closed in Ludlow in April 2007 , with Bosi selling the site to fellow chef Alan Murchison for £ 247 @,@ 500 , but retaining the Hibiscus name for himself . The restaurant was renamed " Le <unk> " ( sic ) , and underwent a £ 100 @,@ 000 makeover before being re @-@ opened under head chef Will Holland . In 2014 , Murchison 's company went into voluntary liquidation after running up debts of almost half a million pounds .
|
# Dimensional Reduction
G. Richards (2016, 2018), based on materials from Ivezic, Connolly, Leighly, and VanderPlas
**Before class starts, please try to do the following:**
> find . -name “sdss_corrected_spectra.py” -print
> ./anaconda/lib/python2.7/site-packages/astroML/datasets/sdss_corrected_spectra.py
> emacs -nw ./anaconda/lib/python2.7/site-packages/astroML/datasets/sdss_corrected_spectra.py
> #DATA_URL = 'http://www.astro.washington.edu/users/vanderplas/spec4000.npz'
> DATA_URL = 'http://staff.washington.edu/jakevdp/spec4000.npz'
Just in case that doesn't work, I've put "spec4000.npz" in PHYS_T480_F18/data. Copy this to your "astroML_data" directory.
## Curse of Dimensionality
You want to buy a car. Right now--you don't want to wait. But you are picky and have certain things that you would like it to have. Each of those things has a probability between 0 and 1 of being on the the car dealer's lot. You want a red car which has a probability of being on the lot of $p_{\rm red}$; you want good gas mileage, $p_{\rm gas}$; you want leather seats, $p_{\rm leather}$; and you want a sunroof, $p_{\rm sunroof}$. The probability that the dealer has a car on the lot that meets all of those requirements is
$$p_{\rm red} \, p_{\rm gas} \, p_{\rm leather} \, p_{\rm sunroof},$$
or $p^n$ where $n$ is the number of features (assuming equal probability for each).
If the probability of each of these is 50%, then the probability of you driving off with your car of choice is only $0.5*0.5*0.5*0.5 = 0.0625$. Not very good. Imagine if you also wanted other things. This is the [Curse of Dimensionality](https://en.wikipedia.org/wiki/Curse_of_dimensionality).
Let's illustrate the curse of dimensionality with two figures from [here.](https://medium.freecodecamp.org/the-curse-of-dimensionality-how-we-can-save-big-data-from-itself-d9fa0f872335)
In the first example we are trying to find which box hold some treasure, which gets harder and harder with more dimensions, despite there just being 5 boxes in each dimension:
In the next example we inscribe a circle in a square. The area outside of the circle grows larger and larger as the number of dimensions increase:
Mathematically we can describe this as: the more dimensions that your data span, the more points needed to uniformly sample the space.
For $D$ dimensions with coordinates $[-1,1]$, the fraction of points in a unit hypersphere (with radius $r$, as illustrated above) is
$$f_D = \frac{V_D(r)}{(2r)^D} = \frac{\pi^{D/2}}{D2^{D-1}\Gamma(D/2)}$$
which goes to $0$ as $D$ goes to infinity! Actually, as you can see from the plot below, it is effectively 0 much earlier than that!
```python
# Execute this cell
# from Andy Connolly
%matplotlib inline
import numpy as np
import scipy.special as sp
from matplotlib import pyplot as plt
def unitVolume(dimension, radius=1.):
return 2*(radius**dimension *np.pi**(dimension/2.))/(dimension*sp.gamma(dimension/2.))
dim = np.linspace(1,100)
#------------------------------------------------------------
# Plot the results
fig = plt.figure()
ax = fig.add_subplot(111)
ax.plot(dim,unitVolume(dim)/2.**dim)
ax.set_yscale('log')
ax.set_xlabel('$Dimension$')
ax.set_ylabel('$Volume$')
plt.show()
```
Note that this works in the opposite direction too: let's say you want to find "rare" objects in 10 dimensions, where we'll define rare as <1% of the population. Then you'll need to accept objects from 63% of the distribution in all 10 dimensions! So are those really "rare" or are they just a particular 1% of the population?
```python
import numpy as np
p = 10**(np.log10(0.01)/10.0)
print p
```
N.B. Dimensionality isn't just measuring $D$ parameters for $N$ objects. It could be a spectrum with $D$ values or an image with $D$ pixels, etc. In the book the examples used just happen to be spectra of galaxies from the SDSS project. But we can insert the data of our choice instead.
For example: the SDSS comprises a sample of 357 million sources:
- each source has 448 measured attributes
- selecting just 30 (e.g., magnitude, size..) and normalizing the data range $-1$ to $1$
yields a probability of having one of the 357 million sources reside within a unit hypersphere of 1 in 1.4$\times 10^5$.
## Principal Component Analysis (PCA)
In [Principal Component Analysis (PCA)](https://en.wikipedia.org/wiki/Principal_component_analysis) we seek to take a data set like the one shown below and apply a transform to the data such that the new axes are aligned with the maximal variance of the data. As can be seen in the Figure, this is basically just the same as doing regression by minimizing the square of the perpendicular distances to the new axes. Note that we haven't made any changes to the data, we have just defined new axes.
```python
# Execute this cell
# Ivezic, Figure 7.2
# Author: Jake VanderPlas
# License: BSD
# The figure produced by this code is published in the textbook
# "Statistics, Data Mining, and Machine Learning in Astronomy" (2013)
# For more information, see http://astroML.github.com
# To report a bug or issue, use the following forum:
# https://groups.google.com/forum/#!forum/astroml-general
%matplotlib inline
import numpy as np
from matplotlib import pyplot as plt
from matplotlib.patches import Ellipse
#------------------------------------------------------------
# Set parameters and draw the random sample
np.random.seed(42)
r = 0.9
sigma1 = 0.25
sigma2 = 0.08
rotation = np.pi / 6
s = np.sin(rotation)
c = np.cos(rotation)
X = np.random.normal(0, [sigma1, sigma2], size=(100, 2)).T
R = np.array([[c, -s],[s, c]])
X = np.dot(R, X) #Same data, now rotated by R matrix.
#------------------------------------------------------------
# Plot the diagram
fig = plt.figure(figsize=(5, 5), facecolor='w')
ax = plt.axes((0, 0, 1, 1), xticks=[], yticks=[], frameon=False)
# draw axes
ax.annotate(r'$x$', (-r, 0), (r, 0),
ha='center', va='center',
arrowprops=dict(arrowstyle='<->', color='k', lw=1))
ax.annotate(r'$y$', (0, -r), (0, r),
ha='center', va='center',
arrowprops=dict(arrowstyle='<->', color='k', lw=1))
# draw rotated axes
ax.annotate(r'$x^\prime$', (-r * c, -r * s), (r * c, r * s),
ha='center', va='center',
arrowprops=dict(color='k', arrowstyle='<->', lw=1))
ax.annotate(r'$y^\prime$', (r * s, -r * c), (-r * s, r * c),
ha='center', va='center',
arrowprops=dict(color='k', arrowstyle='<->', lw=1))
# scatter points
ax.scatter(X[0], X[1], s=25, lw=0, c='k', zorder=2)
# draw lines
vnorm = np.array([s, -c])
for v in (X.T):
d = np.dot(v, vnorm)
v1 = v - d * vnorm
ax.plot([v[0], v1[0]], [v[1], v1[1]], '-k')
# draw ellipses
for sigma in (1, 2, 3):
ax.add_patch(Ellipse((0, 0), 2 * sigma * sigma1, 2 * sigma * sigma2,
rotation * 180. / np.pi,
ec='k', fc='gray', alpha=0.2, zorder=1))
ax.set_xlim(-1, 1)
ax.set_ylim(-1, 1)
plt.show()
```
Note that the points are correlated along a particular direction which doesn't align with the initial choice of axes. So, we should rotate our axes to align with this correlation.
We'll choose the rotation to maximize the ability to discriminate between the data points:
* the first axis, or **principal component**, is direction of maximal variance
* the second principal component is orthogonal to the first component and maximizes the residual variance
* ...
PCA is a dimensional reduction process because we can generally account for nearly "all" of the variance in the data set with fewer than the original $K$ dimensions. See more below.
We start with a data set $\{x_i\}$ which consists of $N$ objects for which we measure $K$ features. We start by subtracting the mean for each feature in $\{x_i\}$ and write $X$ as a $N\times K$ matrix.
The covariance of this matrix is
$$C_X=\frac{1}{N-1}X^TX.$$
There are off-diagonal terms if there are correlations between the measurements (e.g., maybe two of the features are temperature dependent and the measurements were taken at the same time).
If $R$ is a projection of the data that is aligned with the maximal variance, then we have $Y= X R$ with covariance
$$ C_{Y} = R^T X^T X R = R^T C_X R.$$
$r_1$ is the first principal component of $R$, which can be derived using Langrange multipliers with the following cost function:
$$ \phi(r_1,\lambda_1) = r_1^TC_X r_1 - \lambda_1(r_1^Tr_1-1). $$
If we take derivative of $\phi(r_1,\lambda)$ with respect to $r_1$ and set it to 0, then we have
$$ C_Xr_1 - \lambda_1 r_1 = 0. $$
$\lambda_1$ (the largest eigenvalue of the matrix) is the root of the equation $\det(C_X -
\lambda_1 {\bf I})=0$ for which the eigenvalue is
$$ \lambda_1 = r_1^T C_X r_1.$$
The columns of the full matrix, $R$ are the eigenvectors (known here as principal components).
The diagonal values of $C_Y$ are the variance contained within each component.
We aren't going to go through the linear algebra more than that here. But it would be a good group project for someone. See the end of 7.3.1 starting at the bottom on page 294 or go through [Karen Leighly's PCA lecture notes](http://seminar.ouml.org/lectures/principal-components-analysis/) if you want to walk through the math in more detail.
### Preparing data for PCA
* Subtract the mean of each dimension (to "center" the data)
* Divide by the variance in each dimension (to "whiten" the data)
* (For spectra and images) normalize each row to yield an integral of unity.
Below is a typical call to the PCA algorithm. Note that this is somewhat backwards. We are starting with `X` and then we are making it higher dimensional--to create a mock high-$D$ data set. Then we are applying PCA as a dimensionality reduction technique.
```python
#Example call from 7.3.2
import numpy as np
from sklearn.decomposition import PCA
X = np.random.normal(size=(100,3)) # 100 points in 3D
R = np.random.random((3,10)) # projection matrix
X = np.dot(X,R) # X is now 10-dim, with 3 intrinsic dims
pca = PCA(n_components=4) # n_components can be optionally set
pca.fit(X)
comp = pca.transform(X) # compute the subspace projection of X, 4 eigenvalues for each of the 100 samples
mean = pca.mean_ # length 10 mean of the data
components = pca.components_ # 4x10 matrix of components, multiply each by respective "comp" to reconstruct
#Reconstruction of object1
#Xreconstruct[0] = mean + [components][comp[0]]
```
To illustrate what is happening here is a PCA reconstruction of handwritten "3s" from [Hastie et al.](https://web.stanford.edu/~hastie/ElemStatLearn/) :
[Scikit-Learn's decomposition module](http://scikit-learn.org/stable/modules/classes.html#module-sklearn.decomposition) has a number of [PCA type implementations](http://scikit-learn.org/stable/modules/generated/sklearn.decomposition.PCA.html#sklearn.decomposition.PCA).
Let's work through an example using spectra of galaxies take during the Sloan Digital Sky Survey. In this sample there are 4000 spectra with flux measurements in 1000 bins. 15 example spectra are shown below and our example will use half of the spectra chosen at random.
```python
%matplotlib inline
# Example from Andy Connolly
# See Ivezic, Figure 7.4
import numpy as np
from matplotlib import pyplot as plt
from sklearn.decomposition import PCA
from sklearn.decomposition import RandomizedPCA
from astroML.datasets import sdss_corrected_spectra
from astroML.decorators import pickle_results
#------------------------------------------------------------
# Download data
data = sdss_corrected_spectra.fetch_sdss_corrected_spectra()
spectra = sdss_corrected_spectra.reconstruct_spectra(data)
wavelengths = sdss_corrected_spectra.compute_wavelengths(data)
print len(spectra), len(wavelengths)
#----------------------------------------------------------------------
# Compute PCA
np.random.seed(500)
nrows = 2000 # We'll just look at 2000 random spectra
n_components = 5 # Do the fit with 5 components, which is the mean plus 4
ind = np.random.randint(spectra.shape[0], size=nrows)
spec_mean = spectra[ind].mean(0) # Compute the mean spectrum, which is the first component
# spec_mean = spectra[:50].mean(0)
# use Randomized PCA for speed
#pca = RandomizedPCA(n_components - 1)
pca = PCA(n_components - 1,svd_solver='randomized')
pca.fit(spectra[ind])
pca_comp = np.vstack([spec_mean,pca.components_]) #Add the mean to the components
evals = pca.explained_variance_ratio_
print evals
```
Now let's plot the components. See also Ivezic, Figure 7.4. The left hand panels are just the first 5 spectra for comparison with the first 5 PCA components, which are shown on the right. They are ordered by the size of their eigenvalues.
```python
#Make plots
fig = plt.figure(figsize=(10, 8))
fig.subplots_adjust(left=0.05, right=0.95, wspace=0.05,
bottom=0.1, top=0.95, hspace=0.05)
titles = 'PCA components'
for j in range(n_components):
# plot the components
ax = fig.add_subplot(n_components, 2, 2*j+2)
ax.yaxis.set_major_formatter(plt.NullFormatter())
ax.xaxis.set_major_locator(plt.MultipleLocator(1000))
if j < n_components - 1:
ax.xaxis.set_major_formatter(plt.NullFormatter())
else:
ax.set_xlabel('wavelength (Angstroms)')
ax.plot(wavelengths, pca_comp[j], '-k', lw=1)
# plot zero line
xlim = [3000, 7999]
ax.plot(xlim, [0, 0], '-', c='gray', lw=1)
ax.set_xlim(xlim)
# adjust y limits
ylim = plt.ylim()
dy = 0.05 * (ylim[1] - ylim[0])
ax.set_ylim(ylim[0] - dy, ylim[1] + 4 * dy)
# plot the first j spectra
ax2 = fig.add_subplot(n_components, 2, 2*j+1)
ax2.yaxis.set_major_formatter(plt.NullFormatter())
ax2.xaxis.set_major_locator(plt.MultipleLocator(1000))
if j < n_components - 1:
ax2.xaxis.set_major_formatter(plt.NullFormatter())
else:
ax2.set_xlabel('wavelength (Angstroms)')
ax2.plot(wavelengths, spectra[j], '-k', lw=1)
# plot zero line
ax2.plot(xlim, [0, 0], '-', c='gray', lw=1)
ax2.set_xlim(xlim)
if j == 0:
ax.set_title(titles, fontsize='medium')
if j == 0:
label = 'mean'
else:
label = 'component %i' % j
# adjust y limits
ylim = plt.ylim()
dy = 0.05 * (ylim[1] - ylim[0])
ax2.set_ylim(ylim[0] - dy, ylim[1] + 4 * dy)
ax.text(0.02, 0.95, label, transform=ax.transAxes,
ha='left', va='top', bbox=dict(ec='w', fc='w'),
fontsize='small')
plt.show()
```
Now let's make "scree" plots. These plots tell us how much of the variance is explained as a function of the each eigenvector. Our plot won't look much like Ivezic, Figure 7.5, so I've shown it below to explain where "scree" comes from.
```python
# Execute this cell
import numpy as np
from matplotlib import pyplot as plt
#----------------------------------------------------------------------
# Plot the results
fig = plt.figure(figsize=(10, 5))
ax = fig.add_subplot(121)
ax.plot(np.arange(n_components-1), evals)
ax.set_xlabel("eigenvalue number")
ax.set_ylabel("eigenvalue ")
ax = fig.add_subplot(122)
ax.plot(np.arange(n_components-1), evals.cumsum())
ax.set_xlabel("eigenvalue number")
ax.set_ylabel("cumulative eigenvalue")
plt.show()
```
How much of the variance is explained by the first two components? How about all of the components?
```python
print("The first component explains {:.3f} of the variance in the data.".format(___.___[0])
print("The second component explains {:.3f} of the variance in the data.".format(___.___[0])
print("All components explain {:.3f} of the variance in the data.".format(sum(___.___))
```
This is why PCA enables dimensionality reduction.
How many components would we need to explain 99.5% of the variance?
```python
for num_feats in np.arange(1,20, dtype = int):
pca = PCA(___=___)
pca.___(spectra[ind])
if (sum(___.___)>___):
break
print("{:d} features are needed to explain 99.5% of the variance".format(____))
```
Note that we would need 1000 components to encode *all* of the variance.
## Interpreting the PCA
- The output eigenvectors are ordered by their associated eigenvalues
- The eigenvalues reflect the variance within each eigenvector
- The sum of the eigenvalues is total variance of the system
- Projection of each spectrum onto the first few eigenspectra is a compression of the data
Once we have the eigenvectors, we can try to reconstruct an observed spectrum, ${x}(k)$, in the eigenvector basis, ${e}_i(k)$, as
$$ \begin{equation}
{x}_i(k) = {\mu}(k) + \sum_j^R \theta_{ij} {e}_j(k).
\end{equation}
$$
That would give a full (perfect) reconstruction of the data since it uses all of the eigenvectors. But if we truncate (i.e., $r<R$), then we will have reduced the dimensionality while still reconstructing the data with relatively little loss of information.
For example, we started with 4000x1000 floating point numbers. If we can explain nearly all of the variance with 8 eigenvectors, then we have reduced the problem to 4000x8+8x1000 floating point numbers!
Execute the next cell to see how the reconstruction improves by adding more components.
```python
# Execute this cell
import numpy as np
from matplotlib import pyplot as plt
from sklearn.decomposition import PCA
from astroML.datasets import sdss_corrected_spectra
from astroML.decorators import pickle_results
#------------------------------------------------------------
# Download data
data = sdss_corrected_spectra.fetch_sdss_corrected_spectra()
spectra = sdss_corrected_spectra.reconstruct_spectra(data)
wavelengths = sdss_corrected_spectra.compute_wavelengths(data)
#------------------------------------------------------------
# Compute PCA components
# Eigenvalues can be computed using PCA as in the commented code below:
#from sklearn.decomposition import PCA
#pca = PCA()
#pca.fit(spectra)
#evals = pca.explained_variance_ratio_
#evals_cs = evals.cumsum()
# because the spectra have been reconstructed from masked values, this
# is not exactly correct in this case: we'll use the values computed
# in the file compute_sdss_pca.py
evals = data['evals'] ** 2
evals_cs = evals.cumsum()
evals_cs /= evals_cs[-1]
evecs = data['evecs']
spec_mean = spectra.mean(0)
#------------------------------------------------------------
# Find the coefficients of a particular spectrum
spec = spectra[1]
coeff = np.dot(evecs, spec - spec_mean)
#------------------------------------------------------------
# Plot the sequence of reconstructions
fig = plt.figure(figsize=(8, 8))
fig.subplots_adjust(hspace=0)
for i, n in enumerate([0, 4, 8, 20]):
ax = fig.add_subplot(411 + i)
ax.plot(wavelengths, spec, '-', c='gray')
ax.plot(wavelengths, spec_mean + np.dot(coeff[:n], evecs[:n]), '-k')
if i < 3:
ax.xaxis.set_major_formatter(plt.NullFormatter())
ax.set_ylim(-2, 21)
ax.set_ylabel('flux')
if n == 0:
text = "mean"
elif n == 1:
text = "mean + 1 component\n"
text += r"$(\sigma^2_{tot} = %.2f)$" % evals_cs[n - 1]
else:
text = "mean + %i components\n" % n
text += r"$(\sigma^2_{tot} = %.2f)$" % evals_cs[n - 1]
ax.text(0.01, 0.95, text, ha='left', va='top', transform=ax.transAxes)
fig.axes[-1].set_xlabel(r'${\rm wavelength\ (\AA)}$')
plt.show()
```
### Caveats I
PCA is a linear process, whereas the variations in the data may not be. So it may not always be appropriate to use and/or may require a relatively large number of components to fully describe any non-linearity.
Note also that PCA can be very impractical for large data sets which exceed the memory per core as the computational requirement goes as $\mathscr{O}(D^3$) and the memory requirement goes as $\mathscr{O}(2D^2)$.
### Missing Data
We have assumed so far that there is no missing data (e.g., bad pixels in the spectrum, etc.). But often the data set is incomplete. Since PCA encodes the flux correlation with wavelength (or whatever parameters are in your data set), we can actually use it to determine missing values.
An example is shown below. Here, black are the observed spectra. Gray are the regions where we have no data. Blue is the PCA reconstruction, including the regions where there are no data. Awesome, isn't it?
```python
# Execute this cell
%matplotlib inline
import numpy as np
from matplotlib import pyplot as plt
from matplotlib import ticker
from astroML.datasets import fetch_sdss_corrected_spectra
from astroML.datasets import sdss_corrected_spectra
#------------------------------------------------------------
# Get spectra and eigenvectors used to reconstruct them
data = fetch_sdss_corrected_spectra()
spec = sdss_corrected_spectra.reconstruct_spectra(data)
lam = sdss_corrected_spectra.compute_wavelengths(data)
evecs = data['evecs']
mu = data['mu']
norms = data['norms']
mask = data['mask']
#------------------------------------------------------------
# plot the results
i_plot = ((lam > 5750) & (lam < 6350))
lam = lam[i_plot]
specnums = [20, 8, 9]
subplots = [311, 312, 313]
fig = plt.figure(figsize=(8, 10))
fig.subplots_adjust(hspace=0)
for subplot, i in zip(subplots, specnums):
ax = fig.add_subplot(subplot)
# compute eigen-coefficients
spec_i_centered = spec[i] / norms[i] - mu
coeffs = np.dot(spec_i_centered, evecs.T)
# blank out masked regions
spec_i = spec[i]
mask_i = mask[i]
spec_i[mask_i] = np.nan
# plot the raw masked spectrum
ax.plot(lam, spec_i[i_plot], '-', color='k', lw=2,
label='True spectrum')
# plot two levels of reconstruction
for nev in [10]:
if nev == 0:
label = 'mean'
else:
label = 'N EV=%i' % nev
spec_i_recons = norms[i] * (mu + np.dot(coeffs[:nev], evecs[:nev]))
ax.plot(lam, spec_i_recons[i_plot], label=label)
# plot shaded background in masked region
ylim = ax.get_ylim()
mask_shade = ylim[0] + mask[i][i_plot].astype(float) * ylim[1]
plt.fill(np.concatenate([lam[:1], lam, lam[-1:]]),
np.concatenate([[ylim[0]], mask_shade, [ylim[0]]]),
lw=0, fc='k', alpha=0.2)
ax.set_xlim(lam[0], lam[-1])
ax.set_ylim(ylim)
ax.yaxis.set_major_formatter(ticker.NullFormatter())
if subplot == 311:
ax.legend(loc=1, prop=dict(size=14))
ax.set_xlabel('$\lambda\ (\AA)$')
ax.set_ylabel('normalized flux')
plt.show()
```
The example that we have been using above is "spectral" PCA. Some examples from the literature include:
- [Francis et al. 1992](http://articles.adsabs.harvard.edu/cgi-bin/nph-iarticle_query?1992ApJ...398..476F&data_type=PDF_HIGH&whole_paper=YES&type=PRINTER&filetype=.pdf)
- [Connolly et al. 1995](http://articles.adsabs.harvard.edu/cgi-bin/nph-iarticle_query?1995AJ....110.1071C&data_type=PDF_HIGH&whole_paper=YES&type=PRINTER&filetype=.pdf)
- [Yip et al. 2004](http://iopscience.iop.org/article/10.1086/425626/meta;jsessionid=31BB5F11B85D2BF4180834DC71BA0B85.c3.iopscience.cld.iop.org)
One can also do PCA on features that aren't ordered (as they were for the spectra). E.g., if you have $D$ different parameters measured for your objects. The classic example in astronomy is
[Boroson & Green 1992](http://articles.adsabs.harvard.edu/cgi-bin/nph-iarticle_query?1992ApJS...80..109B&data_type=PDF_HIGH&whole_paper=YES&type=PRINTER&filetype=.pdf).
### Caveats II
One of the things that I don't like about PCA is that the eigenvectors are defined relative to the mean. So they can be positive or negative and they often don't look anything like the original data itself. Whereas it is often the case that you might expect that the components would look like, well, the physical components. For example, quasars are fundamentally galaxies. So, part of their flux comes from the galaxy that they live in. But PCA doesn't return any component that looks like a typical galaxy.
## Non-negative Matrix Factorization (NMF)
This is where [Non-negative Matrix Factorizaiton (NMF)](https://en.wikipedia.org/wiki/Non-negative_matrix_factorization) comes in. Here we are treating the data as a linear sum of positive-definite components.
NMF assumes any data matrix can be factored into two matrices, $W$ and $Y$, with
$$\begin{equation}
X=W Y,
\end{equation}
$$
where both $W$ and $Y$ are nonnegative.
So, $WY$ is an approximation of $X$. Minimizing the reconstruction error $|| (X - W Y)^2 ||$,
nonnegative bases can be derived through an iterative process.
Note, however, that the iterative process does not guarantee nonlocal minima (like $K$-means and EM), but using
random initialization and cross-validation can be used to find the global minimum.
An example from the literature is [Allen et al. 2008](http://arxiv.org/abs/0810.4231)
In Scikit-Learn the [NMF implementation](http://scikit-learn.org/stable/modules/generated/sklearn.decomposition.NMF.html) looks like:
```python
# Execute this cell
import numpy as np
from sklearn.decomposition import NMF
X = np.random.random((100,10)) # 100 points in 10-D
nmf = NMF(n_components=3)
nmf.fit(X)
proj = nmf.transform(X) # project to 3 dimension
comp = nmf.components_ # 3x10 array of components
err = nmf.reconstruction_err_ # how well 3 components capture the data
```
An example (and comparison to PCA) is given below.
```python
# Execute the next 2 cells
# Example from Figure 7.4
# Author: Jake VanderPlas
# License: BSD
%matplotlib inline
import numpy as np
from matplotlib import pyplot as plt
from sklearn.decomposition import NMF
from sklearn.decomposition import RandomizedPCA
from sklearn.decomposition import PCA
from astroML.datasets import sdss_corrected_spectra
from astroML.decorators import pickle_results
#------------------------------------------------------------
# Download data
data = sdss_corrected_spectra.fetch_sdss_corrected_spectra()
spectra = sdss_corrected_spectra.reconstruct_spectra(data)
wavelengths = sdss_corrected_spectra.compute_wavelengths(data)
```
```python
#----------------------------------------------------------------------
# Compute PCA, and NMF components
def compute_PCA_NMF(n_components=5):
spec_mean = spectra.mean(0)
# PCA: use randomized PCA for speed
#pca = RandomizedPCA(n_components - 1)
pca = PCA(n_components - 1,svd_solver='randomized')
pca.fit(spectra)
pca_comp = np.vstack([spec_mean, pca.components_])
# NMF requires all elements of the input to be greater than zero
spectra[spectra < 0] = 0
nmf = NMF(n_components)
nmf.fit(spectra)
nmf_comp = nmf.components_
return pca_comp, nmf_comp
n_components = 5
decompositions = compute_PCA_NMF(n_components)
#----------------------------------------------------------------------
# Plot the results
fig = plt.figure(figsize=(10, 10))
fig.subplots_adjust(left=0.05, right=0.95, wspace=0.05,
bottom=0.1, top=0.95, hspace=0.05)
titles = ['PCA components', 'NMF components']
for i, comp in enumerate(decompositions):
for j in range(n_components):
ax = fig.add_subplot(n_components, 3, 3 * j + 1 + i)
ax.yaxis.set_major_formatter(plt.NullFormatter())
ax.xaxis.set_major_locator(plt.MultipleLocator(1000))
if j < n_components - 1:
ax.xaxis.set_major_formatter(plt.NullFormatter())
else:
ax.set_xlabel('wavelength (Angstroms)')
ax.plot(wavelengths, comp[j], '-k', lw=1)
# plot zero line
xlim = [3000, 7999]
ax.plot(xlim, [0, 0], '-', c='gray', lw=1)
ax.set_xlim(xlim)
if j == 0:
ax.set_title(titles[i])
if titles[i].startswith('PCA') or titles[i].startswith('ICA'):
if j == 0:
label = 'mean'
else:
label = 'component %i' % j
else:
label = 'component %i' % (j + 1)
ax.text(0.03, 0.94, label, transform=ax.transAxes,
ha='left', va='top')
for l in ax.get_xticklines() + ax.get_yticklines():
l.set_markersize(2)
# adjust y limits
ylim = plt.ylim()
dy = 0.05 * (ylim[1] - ylim[0])
ax.set_ylim(ylim[0] - dy, ylim[1] + 4 * dy)
plt.show()
```
## Independent Component Analysis (ICA)
For data where the components are statistically independent (or nearly so) [Independent Component Analysis (ICA)](https://en.wikipedia.org/wiki/Independent_component_analysis) has become a popular method for separating mixed components. The classical example is the so-called "cocktail party" problem. This is illustrated in the following figure from Hastie, Tibshirani, and Friedman (Figure 14.27 on page 497 in my copy, so they have clearly added some stuff!). Think of the "source signals" as two voices at a party. You are trying to concentrate on just one voice. What you hear is something like the "measured signals" pattern. You could run the data through PCA and that would do an excellent job of reconstructing the signal with reduced dimensionality, but it wouldn't actually isolate the different physical components (bottom-left panel). ICA on the other hand can (bottom-right panel).
.](images/HastieFigure14_37.png)
[Hastie et al.](https://web.stanford.edu/~hastie/ElemStatLearn/): "ICA applied to multivariate data looks for a sequence of orthogonal projections such that the projected data look as far from Gaussian as possible. With pre-whitened data, this amounts to looking for
components that are as independent as possible."
In short you want to find components that are maximally non-Gaussian since the sum of 2 random variables will be more Gaussian than either of the components (remember the Central Limit Theorem). Hastie et al. illustrate this as follows:
ICA is a good choice for a complex system with relatively indepent components. For example a galaxy is roughly a linear combination of cool stars and hot stars, and a quasar is just a galaxy with others component from an accretion disk and emission line regions. Ideally we want "eigenvectors" that are aligned with those physical traits/regions as opposed to mathematical constructs.
The basic call to the [FastICA algoirthm](http://scikit-learn.org/stable/modules/generated/sklearn.decomposition.FastICA.html) in Scikit-Learn looks like:
```python
# Execute this cell
import numpy as np
from sklearn.decomposition import FastICA
X = np.random.normal(size=(100,2)) # 100 objects in 2D
R = np.random.random((2,5)) # mixing matrix
X = np.dot(X,R) # Simulation of a 5D data space
ica = FastICA(2) # Now reproject to 2-D
ica.fit(X)
proj = ica.transform(X) # 100x2 projection of the data
comp = ica.components_ # 2x5 matrix of independent components
## sources = ica.sources_ # 100x2 matrix of sources
```
Execute the next 2 cells to produce a plot showing the ICA components.
```python
%matplotlib inline
#Example from Andy Connolly
import numpy as np
from matplotlib import pyplot as plt
from sklearn.decomposition import FastICA
from astroML.datasets import sdss_corrected_spectra
from astroML.decorators import pickle_results
#------------------------------------------------------------
# Download data
data = sdss_corrected_spectra.fetch_sdss_corrected_spectra()
spectra = sdss_corrected_spectra.reconstruct_spectra(data)
wavelengths = sdss_corrected_spectra.compute_wavelengths(data)
#----------------------------------------------------------------------
# Compute PCA
np.random.seed(500)
nrows = 500
n_components = 5
ind = np.random.randint(spectra.shape[0], size=nrows)
spec_mean = spectra[ind].mean(0)
# spec_mean = spectra[:50].mean(0)
ica = FastICA(n_components - 1)
ica.fit(spectra[ind])
ica_comp = np.vstack([spec_mean,ica.components_]) #Add the mean to the components
```
```python
#Make plots
fig = plt.figure(figsize=(10, 8))
fig.subplots_adjust(left=0.05, right=0.95, wspace=0.05,
bottom=0.1, top=0.95, hspace=0.05)
titles = 'ICA components'
for j in range(n_components):
# plot the components
ax = fig.add_subplot(n_components, 2, 2*j+2)
ax.yaxis.set_major_formatter(plt.NullFormatter())
ax.xaxis.set_major_locator(plt.MultipleLocator(1000))
if j < n_components - 1:
ax.xaxis.set_major_formatter(plt.NullFormatter())
else:
ax.set_xlabel(r'wavelength ${\rm (\AA)}$')
ax.plot(wavelengths, ica_comp[j], '-k', lw=1)
# plot zero line
xlim = [3000, 7999]
ax.plot(xlim, [0, 0], '-', c='gray', lw=1)
ax.set_xlim(xlim)
# adjust y limits
ylim = plt.ylim()
dy = 0.05 * (ylim[1] - ylim[0])
ax.set_ylim(ylim[0] - dy, ylim[1] + 4 * dy)
# plot the first j spectra
ax2 = fig.add_subplot(n_components, 2, 2*j+1)
ax2.yaxis.set_major_formatter(plt.NullFormatter())
ax2.xaxis.set_major_locator(plt.MultipleLocator(1000))
if j < n_components - 1:
ax2.xaxis.set_major_formatter(plt.NullFormatter())
else:
ax2.set_xlabel(r'wavelength ${\rm (\AA)}$')
ax2.plot(wavelengths, spectra[j], '-k', lw=1)
# plot zero line
ax2.plot(xlim, [0, 0], '-', c='gray', lw=1)
ax2.set_xlim(xlim)
if j == 0:
ax.set_title(titles, fontsize='medium')
if j == 0:
label = 'mean'
else:
label = 'component %i' % j
# adjust y limits
ylim = plt.ylim()
dy = 0.05 * (ylim[1] - ylim[0])
ax2.set_ylim(ylim[0] - dy, ylim[1] + 4 * dy)
ax.text(0.02, 0.95, label, transform=ax.transAxes,
ha='left', va='top', bbox=dict(ec='w', fc='w'),
fontsize='small')
plt.show()
```
As with PCA and NMF, we can similarly do a reconstruction:
```python
# Execute this cell
#------------------------------------------------------------
# Find the coefficients of a particular spectrum
spec = spectra[1]
evecs = data['evecs']
coeff = np.dot(evecs, spec - spec_mean)
#------------------------------------------------------------
# Plot the sequence of reconstructions
fig = plt.figure(figsize=(8, 8))
fig.subplots_adjust(hspace=0)
for i, n in enumerate([0, 2, 4, 8]):
ax = fig.add_subplot(411 + i)
ax.plot(wavelengths, spec, '-', c='gray')
ax.plot(wavelengths, spec_mean + np.dot(coeff[:n], evecs[:n]), '-k')
if i < 3:
ax.xaxis.set_major_formatter(plt.NullFormatter())
ax.set_ylim(-2, 21)
ax.set_ylabel('flux')
if n == 0:
text = "mean"
elif n == 1:
text = "mean + 1 component\n"
#text += r"$(\sigma^2_{tot} = %.2f)$" % evals_cs[n - 1]
else:
text = "mean + %i components\n" % n
#text += r"$(\sigma^2_{tot} = %.2f)$" % evals_cs[n - 1]
ax.text(0.01, 0.95, text, ha='left', va='top', transform=ax.transAxes)
fig.axes[-1].set_xlabel(r'${\rm wavelength\ (\AA)}$')
plt.show()
```
Ivezic, Figure 7.4 compares the components found by the PCA, ICA, and NMF algorithms. Their differences and similarities are quite interesting.
If you think that I was pulling your leg about the cocktail problem, try it yourself!
Load the code instead of running it and see what effect changing some things has.
```python
%run code/plot_ica_blind_source_separation.py
```
Let's revisit the digits sample and see what PCA, NMF, and ICA do for it.
```python
## Execute this cell to load the digits sample
%matplotlib inline
import numpy as np
from sklearn.datasets import load_digits
from matplotlib import pyplot as plt
digits = load_digits()
grid_data = np.reshape(digits.data[0], (8,8)) #reshape to 8x8
plt.imshow(grid_data, interpolation = "nearest", cmap = "bone_r")
print grid_data
X = digits.data
y = digits.target
```
Do the PCA transform, projecting to 2 dimensions and plot the results.
```python
# PCA
from sklearn.decomposition import ___
pca = PCA(n_components = ___)
pca.___(___)
X_reduced = pca.transform(___)
plt.scatter(X_reduced[:,___], X_reduced[:,___], c=y, cmap="nipy_spectral", edgecolor="None")
plt.colorbar()
```
Similarly for NMF and ICA
```python
# NMF
from sklearn.decomposition import ___
nmf = NMF(___)
nmf.___(___)
X_reduced = nmf.___(___)
plt.scatter(___, ___, c=y, cmap="nipy_spectral", edgecolor="None")
plt.colorbar()
```
```python
# ICA
from sklearn.decomposition import ___
ica = FastICA(___)
ica.___(___)
X_reduced = ica.___(___)
plt.scatter(___, ___, c=y, cmap="nipy_spectral", edgecolor="None")
plt.colorbar()
```
Take a second to think about what ICA is doing. What if you had digits from digital clocks instead of handwritten?
I wasn't going to introduce [Neural Networks](https://en.wikipedia.org/wiki/Artificial_neural_network) yet, but it is worth noting that Scikit-Learn's [`Bernoulli Restricted Boltzman Machine (RBM)`](http://scikit-learn.org/stable/modules/generated/sklearn.neural_network.BernoulliRBM.html) is discussed in the [(unsupervised) neural network](http://scikit-learn.org/stable/modules/neural_networks_unsupervised.html) part of the User's Guide and is relevant here as the data input must be either binary or values between 0 and 1, which is the case that we have here.
We could think about doing dimensional reduction of the digits data set in another way. There are 64 pixels in each of our images. Presumably all of them aren't equally useful. Let's figure out exactly which pixels are the most relevant. We'll use Scikit-Learn's [`RandomForestRegressor`](http://scikit-learn.org/stable/modules/generated/sklearn.ensemble.RandomForestRegressor.html). We won't get to regression until next week, but you don't need to understand the algorithm to do this, just look at the inputs and outputs. Which pixels are the most important? As a bonus see if you can plot digit images with those pixels highlighted.
```python
from sklearn.ensemble import RandomForestRegressor
RFreg = RandomForestRegressor()# Complete or leave blank as you see fit
RFreg.fit(X,y)# Do Fitting
importances = RFreg.feature_importances_# Determine "importances"
pixelorder = np.argsort(importances)[::-1] #Rank importances (highest to lowest)
print(pixelorder)
plt.figure()
plt.imshow(np.reshape(importances,(8,8)),interpolation="nearest")
plt.show()
```
|
[STATEMENT]
lemma cont2cont_case_prim [simp, cont2cont]:
assumes "\<And>y. cont (\<lambda>x. f1 x y)"
and "\<And>y z. cont (\<lambda>x. f2 x y z)"
shows "cont (\<lambda>x. case_prim (f1 x) (f2 x) p)"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. cont (\<lambda>x. case p of Plus xa \<Rightarrow> f1 x xa | prim.If xa xb \<Rightarrow> f2 x xa xb)
[PROOF STEP]
using assms
[PROOF STATE]
proof (prove)
using this:
cont (\<lambda>x. f1 x ?y)
cont (\<lambda>x. f2 x ?y ?z)
goal (1 subgoal):
1. cont (\<lambda>x. case p of Plus xa \<Rightarrow> f1 x xa | prim.If xa xb \<Rightarrow> f2 x xa xb)
[PROOF STEP]
by (cases p) auto
|
import os
import numpy as np
import pytest
import tensorflow as tf
import torch
from finetuner.tuner.base import BaseTuner
from finetuner.tuner.callback import BestModelCheckpoint, TrainingCheckpoint
from finetuner.tuner.keras import KerasTuner
from finetuner.tuner.pytorch import PytorchTuner
from finetuner.tuner.state import TunerState
@pytest.fixture(scope='module')
def pytorch_model() -> BaseTuner:
embed_model = torch.nn.Linear(in_features=10, out_features=10)
return embed_model
@pytest.fixture(scope='module')
def keras_model() -> BaseTuner:
embed_model = tf.keras.Sequential(
[
tf.keras.layers.Flatten(input_shape=(28, 28)),
tf.keras.layers.Dense(10, activation='relu'),
]
)
return embed_model
@pytest.mark.parametrize(
'mode, monitor, operation, best',
(
('min', 'val_loss', np.less, np.Inf),
('max', 'val_loss', np.greater, -np.Inf),
('auto', 'val_loss', np.less, np.Inf),
('max', 'acc', np.greater, -np.Inf),
('somethingelse', 'acc', np.greater, -np.Inf),
),
)
def test_mode(mode: str, monitor: str, operation, best, tmpdir):
checkpoint = BestModelCheckpoint(save_dir=tmpdir, mode=mode, monitor=monitor)
assert checkpoint._monitor_op == operation
assert checkpoint._best == best
def test_mandatory_save_dir():
with pytest.raises(TypeError, match='missing'):
TrainingCheckpoint()
def test_last_k_epochs_file(pytorch_model: BaseTuner, tmpdir):
checkpoint = TrainingCheckpoint(save_dir=tmpdir, last_k_epochs=3)
tuner = PytorchTuner(embed_model=pytorch_model)
for epoch in range(4):
tuner.state = TunerState(
epoch=epoch, batch_index=2, current_loss=1.1, num_epochs=10
)
checkpoint.on_epoch_end(tuner)
assert set(os.listdir(tmpdir)) == {
'saved_model_epoch_02',
'saved_model_epoch_03',
'saved_model_epoch_04',
}
for epoch in range(4, 10):
tuner.state = TunerState(
epoch=epoch, batch_index=2, current_loss=1.1, num_epochs=10
)
checkpoint.on_epoch_end(tuner)
assert set(os.listdir(tmpdir)) == {
'saved_model_epoch_10',
'saved_model_epoch_09',
'saved_model_epoch_08',
}
def test_last_k_epochs_folder(keras_model: BaseTuner, tmpdir):
checkpoint = TrainingCheckpoint(save_dir=tmpdir, last_k_epochs=3)
tuner = KerasTuner(embed_model=keras_model)
for epoch in range(4):
tuner.state = TunerState(
epoch=epoch, batch_index=2, current_loss=1.1, num_epochs=10
)
checkpoint.on_epoch_end(tuner)
assert set(os.listdir(tmpdir)) == {
'saved_model_epoch_02',
'saved_model_epoch_03',
'saved_model_epoch_04',
}
for epoch in range(4, 10):
tuner.state = TunerState(
epoch=epoch, batch_index=2, current_loss=1.1, num_epochs=10
)
checkpoint.on_epoch_end(tuner)
assert set(os.listdir(tmpdir)) == {
'saved_model_epoch_10',
'saved_model_epoch_09',
'saved_model_epoch_08',
}
|
[STATEMENT]
lemma is_vals_append [simp]: "is_vals (es @ es') \<longleftrightarrow> is_vals es \<and> is_vals es'"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. is_vals (es @ es') = (is_vals es \<and> is_vals es')
[PROOF STEP]
by(induct es) auto
|
#pragma once
#include <string_view>
#include <gsl/span>
namespace kab_advent {
auto day(gsl::span<std::string_view const> args) -> int;
}
|
(* Title: JinjaThreads/Compiler/Exception_Tables.thy
Author: Andreas Lochbihler
*)
section \<open>Various Operations for Exception Tables\<close>
theory Exception_Tables imports
Compiler2
"../Common/ExternalCallWF"
"../JVM/JVMExceptions"
begin
definition pcs :: "ex_table \<Rightarrow> nat set"
where "pcs xt \<equiv> \<Union>(f,t,C,h,d) \<in> set xt. {f ..< t}"
lemma pcs_subset:
fixes e :: "'addr expr1" and es :: "'addr expr1 list"
shows "pcs(compxE2 e pc d) \<subseteq> {pc..<pc+size(compE2 e)}"
and "pcs(compxEs2 es pc d) \<subseteq> {pc..<pc+size(compEs2 es)}"
apply(induct e pc d and es pc d rule: compxE2_compxEs2_induct)
apply (simp_all add:pcs_def)
apply (fastforce)+
done
lemma pcs_Nil [simp]: "pcs [] = {}"
by(simp add:pcs_def)
lemma pcs_Cons [simp]: "pcs (x#xt) = {fst x ..< fst(snd x)} \<union> pcs xt"
by(auto simp add: pcs_def)
lemma pcs_append [simp]: "pcs(xt\<^sub>1 @ xt\<^sub>2) = pcs xt\<^sub>1 \<union> pcs xt\<^sub>2"
by(simp add:pcs_def)
lemma [simp]: "pc < pc0 \<or> pc0+size(compEs2 es) \<le> pc \<Longrightarrow> pc \<notin> pcs(compxEs2 es pc0 d)"
using pcs_subset by fastforce
lemma [simp]: "pc1 + size(compE2 e1) \<le> pc2 \<Longrightarrow> pcs(compxE2 e1 pc1 d1) \<inter> pcs(compxE2 e2 pc2 d2) = {}"
using pcs_subset by fastforce
lemma [simp]: "pc\<^sub>1 + size(compE2 e) \<le> pc\<^sub>2 \<Longrightarrow> pcs(compxE2 e pc\<^sub>1 d\<^sub>1) \<inter> pcs(compxEs2 es pc\<^sub>2 d\<^sub>2) = {}"
using pcs_subset by fastforce
lemma match_ex_table_append_not_pcs [simp]:
"pc \<notin> pcs xt0 \<Longrightarrow> match_ex_table P C pc (xt0 @ xt1) = match_ex_table P C pc xt1"
by (induct xt0) (auto simp: matches_ex_entry_def)
lemma outside_pcs_not_matches_entry [simp]:
"\<lbrakk> x \<in> set xt; pc \<notin> pcs xt \<rbrakk> \<Longrightarrow> \<not> matches_ex_entry P D pc x"
by(auto simp:matches_ex_entry_def pcs_def)
lemma outside_pcs_compxE2_not_matches_entry [simp]:
assumes xe: "xe \<in> set(compxE2 e pc d)"
and outside: "pc' < pc \<or> pc+size(compE2 e) \<le> pc'"
shows "\<not> matches_ex_entry P C pc' xe"
proof
assume "matches_ex_entry P C pc' xe"
with xe have "pc' \<in> pcs(compxE2 e pc d)"
by(force simp add:matches_ex_entry_def pcs_def)
with outside show False by simp
qed
lemma outside_pcs_compxEs2_not_matches_entry [simp]:
assumes xe: "xe \<in> set(compxEs2 es pc d)"
and outside: "pc' < pc \<or> pc+size(compEs2 es) \<le> pc'"
shows "\<not> matches_ex_entry P C pc' xe"
proof
assume "matches_ex_entry P C pc' xe"
with xe have "pc' \<in> pcs(compxEs2 es pc d)"
by(force simp add:matches_ex_entry_def pcs_def)
with outside show False by simp
qed
lemma match_ex_table_app[simp]:
"\<forall>xte \<in> set xt\<^sub>1. \<not> matches_ex_entry P D pc xte \<Longrightarrow>
match_ex_table P D pc (xt\<^sub>1 @ xt) = match_ex_table P D pc xt"
by(induct xt\<^sub>1) simp_all
lemma match_ex_table_eq_NoneI [simp]:
"\<forall>x \<in> set xtab. \<not> matches_ex_entry P C pc x \<Longrightarrow>
match_ex_table P C pc xtab = None"
using match_ex_table_app[where ?xt = "[]"] by fastforce
lemma match_ex_entry:
fixes start shows
"matches_ex_entry P C pc (start, end, catch_type, handler) =
(start \<le> pc \<and> pc < end \<and> (case catch_type of None \<Rightarrow> True | \<lfloor>C'\<rfloor> \<Rightarrow> P \<turnstile> C \<preceq>\<^sup>* C'))"
by(simp add:matches_ex_entry_def)
lemma pcs_compxE2D [dest]:
"pc \<in> pcs (compxE2 e pc' d) \<Longrightarrow> pc' \<le> pc \<and> pc < pc' + length (compE2 e)"
using pcs_subset by(fastforce)
lemma pcs_compxEs2D [dest]:
"pc \<in> pcs (compxEs2 es pc' d) \<Longrightarrow> pc' \<le> pc \<and> pc < pc' + length (compEs2 es)"
using pcs_subset by(fastforce)
definition shift :: "nat \<Rightarrow> ex_table \<Rightarrow> ex_table"
where
"shift n xt \<equiv> map (\<lambda>(from,to,C,handler,depth). (n+from,n+to,C,n+handler,depth)) xt"
lemma shift_0 [simp]: "shift 0 xt = xt"
by(induct xt)(auto simp:shift_def)
lemma shift_Nil [simp]: "shift n [] = []"
by(simp add:shift_def)
lemma shift_Cons_tuple [simp]:
"shift n ((from, to, C, handler, depth) # xt) = (from + n, to + n, C, handler + n, depth) # shift n xt"
by(simp add: shift_def)
lemma shift_append [simp]: "shift n (xt\<^sub>1 @ xt\<^sub>2) = shift n xt\<^sub>1 @ shift n xt\<^sub>2"
by(simp add:shift_def)
lemma shift_shift [simp]: "shift m (shift n xt) = shift (m+n) xt"
by(simp add: shift_def split_def)
lemma fixes e :: "'addr expr1" and es :: "'addr expr1 list"
shows shift_compxE2: "shift pc (compxE2 e pc' d) = compxE2 e (pc' + pc) d"
and shift_compxEs2: "shift pc (compxEs2 es pc' d) = compxEs2 es (pc' + pc) d"
by(induct e and es arbitrary: pc pc' d and pc pc' d rule: compE2.induct compEs2.induct)
(auto simp:shift_def ac_simps)
lemma compxE2_size_convs [simp]: "n \<noteq> 0 \<Longrightarrow> compxE2 e n d = shift n (compxE2 e 0 d)"
and compxEs2_size_convs: "n \<noteq> 0 \<Longrightarrow> compxEs2 es n d = shift n (compxEs2 es 0 d)"
by(simp_all add:shift_compxE2 shift_compxEs2)
lemma pcs_shift_conv [simp]: "pcs (shift n xt) = (+) n ` pcs xt"
apply(auto simp add: shift_def pcs_def)
apply(rule_tac x="x-n" in image_eqI)
apply(auto)
apply(rule bexI)
prefer 2
apply(assumption)
apply(auto)
done
lemma image_plus_const_conv [simp]:
fixes m :: nat
shows "m \<in> (+) n ` A \<longleftrightarrow> m \<ge> n \<and> m - n \<in> A"
by(force)
lemma match_ex_table_shift_eq_None_conv [simp]:
"match_ex_table P C pc (shift n xt) = None \<longleftrightarrow> pc < n \<or> match_ex_table P C (pc - n) xt = None"
by(induct xt)(auto simp add: match_ex_entry split: if_split_asm)
lemma match_ex_table_shift_pc_None:
"pc \<ge> n \<Longrightarrow> match_ex_table P C pc (shift n xt) = None \<longleftrightarrow> match_ex_table P C (pc - n) xt = None"
by(simp add: match_ex_table_shift_eq_None_conv)
lemma match_ex_table_shift_eq_Some_conv [simp]:
"match_ex_table P C pc (shift n xt) = \<lfloor>(pc', d)\<rfloor> \<longleftrightarrow>
pc \<ge> n \<and> pc' \<ge> n \<and> match_ex_table P C (pc - n) xt = \<lfloor>(pc' - n, d)\<rfloor>"
by(induct xt)(auto simp add: match_ex_entry split: if_split_asm)
lemma match_ex_table_shift:
"match_ex_table P C pc xt = \<lfloor>(pc', d)\<rfloor> \<Longrightarrow> match_ex_table P C (n + pc) (shift n xt) = \<lfloor>(n + pc', d)\<rfloor>"
by(simp add: match_ex_table_shift_eq_Some_conv)
lemma match_ex_table_shift_pcD:
"match_ex_table P C pc (shift n xt) = \<lfloor>(pc', d)\<rfloor> \<Longrightarrow> pc \<ge> n \<and> pc' \<ge> n \<and> match_ex_table P C (pc - n) xt = \<lfloor>(pc' - n, d)\<rfloor>"
by(simp add: match_ex_table_shift_eq_Some_conv)
lemma match_ex_table_pcsD: "match_ex_table P C pc xt = \<lfloor>(pc', D)\<rfloor> \<Longrightarrow> pc \<in> pcs xt"
by(induct xt)(auto split: if_split_asm simp add: match_ex_entry)
definition stack_xlift :: "nat \<Rightarrow> ex_table \<Rightarrow> ex_table"
where "stack_xlift n xt \<equiv> map (\<lambda>(from,to,C,handler,depth). (from, to, C, handler, n + depth)) xt"
lemma stack_xlift_0 [simp]: "stack_xlift 0 xt = xt"
by(induct xt, auto simp add: stack_xlift_def)
lemma stack_xlift_Nil [simp]: "stack_xlift n [] = []"
by(simp add: stack_xlift_def)
lemma stack_xlift_Cons_tuple [simp]:
"stack_xlift n ((from, to, C, handler, depth) # xt) = (from, to, C, handler, depth + n) # stack_xlift n xt"
by(simp add: stack_xlift_def)
lemma stack_xlift_append [simp]: "stack_xlift n (xt @ xt') = stack_xlift n xt @ stack_xlift n xt'"
by(simp add: stack_xlift_def)
lemma stack_xlift_stack_xlift [simp]: "stack_xlift n (stack_xlift m xt) = stack_xlift (n + m) xt"
by(simp add: stack_xlift_def split_def)
lemma fixes e :: "'addr expr1" and es :: "'addr expr1 list"
shows stack_xlift_compxE2: "stack_xlift n (compxE2 e pc d) = compxE2 e pc (n + d)"
and stack_xlift_compxEs2: "stack_xlift n (compxEs2 es pc d) = compxEs2 es pc (n + d)"
by(induct e and es arbitrary: d pc and d pc rule: compE2.induct compEs2.induct)
(auto simp add: shift_compxE2 simp del: compxE2_size_convs)
lemma compxE2_stack_xlift_convs [simp]: "d > 0 \<Longrightarrow> compxE2 e pc d = stack_xlift d (compxE2 e pc 0)"
and compxEs2_stack_xlift_convs [simp]: "d > 0 \<Longrightarrow> compxEs2 es pc d = stack_xlift d (compxEs2 es pc 0)"
by(simp_all add: stack_xlift_compxE2 stack_xlift_compxEs2)
lemma stack_xlift_shift [simp]: "stack_xlift d (shift n xt) = shift n (stack_xlift d xt)"
by(induct xt)(auto)
lemma pcs_stack_xlift_conv [simp]: "pcs (stack_xlift n xt) = pcs xt"
by(auto simp add: pcs_def stack_xlift_def)
lemma match_ex_table_stack_xlift_eq_None_conv [simp]:
"match_ex_table P C pc (stack_xlift d xt) = None \<longleftrightarrow> match_ex_table P C pc xt = None"
by(induct xt)(auto simp add: match_ex_entry)
lemma match_ex_table_stack_xlift_eq_Some_conv [simp]:
"match_ex_table P C pc (stack_xlift n xt) = \<lfloor>(pc', d)\<rfloor> \<longleftrightarrow> d \<ge> n \<and> match_ex_table P C pc xt = \<lfloor>(pc', d - n)\<rfloor>"
by(induct xt)(auto simp add: match_ex_entry)
lemma match_ex_table_stack_xliftD:
"match_ex_table P C pc (stack_xlift n xt) = \<lfloor>(pc', d)\<rfloor> \<Longrightarrow> d \<ge> n \<and> match_ex_table P C pc xt = \<lfloor>(pc', d - n)\<rfloor>"
by(simp)
lemma match_ex_table_stack_xlift:
"match_ex_table P C pc xt = \<lfloor>(pc', d)\<rfloor> \<Longrightarrow> match_ex_table P C pc (stack_xlift n xt) = \<lfloor>(pc', n + d)\<rfloor>"
by simp
lemma pcs_stack_xlift: "pcs (stack_xlift n xt) = pcs xt"
by(auto simp add: stack_xlift_def pcs_def)
lemma match_ex_table_None_append [simp]:
"match_ex_table P C pc xt = None
\<Longrightarrow> match_ex_table P C pc (xt @ xt') = match_ex_table P C pc xt'"
by(induct xt, auto)
lemma match_ex_table_Some_append [simp]:
"match_ex_table P C pc xt = \<lfloor>(pc', d)\<rfloor> \<Longrightarrow> match_ex_table P C pc (xt @ xt') = \<lfloor>(pc', d)\<rfloor>"
by(induct xt)(auto)
lemma match_ex_table_append:
"match_ex_table P C pc (xt @ xt') = (case match_ex_table P C pc xt of None \<Rightarrow> match_ex_table P C pc xt'
| Some pcd \<Rightarrow> Some pcd)"
by(auto)
lemma match_ex_table_pc_length_compE2:
"match_ex_table P a pc (compxE2 e pc' d) = \<lfloor>pcd\<rfloor> \<Longrightarrow> pc' \<le> pc \<and> pc < length (compE2 e) + pc'"
and match_ex_table_pc_length_compEs2:
"match_ex_table P a pc (compxEs2 es pc' d) = \<lfloor>pcd\<rfloor> \<Longrightarrow> pc' \<le> pc \<and> pc < length (compEs2 es) + pc'"
using pcs_subset by(cases pcd, fastforce dest!: match_ex_table_pcsD)+
lemma match_ex_table_compxE2_shift_conv:
"f > 0 \<Longrightarrow> match_ex_table P C pc (compxE2 e f d) = \<lfloor>(pc', d')\<rfloor> \<longleftrightarrow> pc \<ge> f \<and> pc' \<ge> f \<and> match_ex_table P C (pc - f) (compxE2 e 0 d) = \<lfloor>(pc' - f, d')\<rfloor>"
by simp
lemma match_ex_table_compxEs2_shift_conv:
"f > 0 \<Longrightarrow> match_ex_table P C pc (compxEs2 es f d) = \<lfloor>(pc', d')\<rfloor> \<longleftrightarrow> pc \<ge> f \<and> pc' \<ge> f \<and> match_ex_table P C (pc - f) (compxEs2 es 0 d) = \<lfloor>(pc' - f, d')\<rfloor>"
by(simp add: compxEs2_size_convs)
lemma match_ex_table_compxE2_stack_conv:
"d > 0 \<Longrightarrow> match_ex_table P C pc (compxE2 e 0 d) = \<lfloor>(pc', d')\<rfloor> \<longleftrightarrow> d' \<ge> d \<and> match_ex_table P C pc (compxE2 e 0 0) = \<lfloor>(pc', d' - d)\<rfloor>"
by simp
lemma match_ex_table_compxEs2_stack_conv:
"d > 0 \<Longrightarrow> match_ex_table P C pc (compxEs2 es 0 d) = \<lfloor>(pc', d')\<rfloor> \<longleftrightarrow> d' \<ge> d \<and> match_ex_table P C pc (compxEs2 es 0 0) = \<lfloor>(pc', d' - d)\<rfloor>"
by(simp add: compxEs2_stack_xlift_convs)
lemma fixes e :: "'addr expr1" and es :: "'addr expr1 list"
shows match_ex_table_compxE2_not_same: "match_ex_table P C pc (compxE2 e n d) = \<lfloor>(pc', d')\<rfloor> \<Longrightarrow> pc \<noteq> pc'"
and match_ex_table_compxEs2_not_same:"match_ex_table P C pc (compxEs2 es n d) = \<lfloor>(pc', d')\<rfloor> \<Longrightarrow> pc \<noteq> pc'"
apply(induct e n d and es n d rule: compxE2_compxEs2_induct)
apply(auto simp add: match_ex_table_append match_ex_entry simp del: compxE2_size_convs compxEs2_size_convs compxE2_stack_xlift_convs compxEs2_stack_xlift_convs split: if_split_asm)
done
end
|
State Before: α : Type u_1
β : Type ?u.284137
γ : Type ?u.284140
ι : Type ?u.284143
M : Type u_2
M' : Type ?u.284149
N : Type ?u.284152
P : Type ?u.284155
G : Type ?u.284158
H : Type ?u.284161
R : Type ?u.284164
S : Type ?u.284167
inst✝¹ : AddZeroClass M
inst✝ : DecidableEq α
g₁ g₂ : α →₀ M
h : Disjoint g₁.support g₂.support
a : α
ha✝ : a ∈ g₁.support ∪ g₂.support
ha : a ∈ g₁.support
⊢ a ∈ (g₁ + g₂).support State After: α : Type u_1
β : Type ?u.284137
γ : Type ?u.284140
ι : Type ?u.284143
M : Type u_2
M' : Type ?u.284149
N : Type ?u.284152
P : Type ?u.284155
G : Type ?u.284158
H : Type ?u.284161
R : Type ?u.284164
S : Type ?u.284167
inst✝¹ : AddZeroClass M
inst✝ : DecidableEq α
g₁ g₂ : α →₀ M
h : Disjoint g₁.support g₂.support
a : α
ha✝ : a ∈ g₁.support ∪ g₂.support
ha : a ∈ g₁.support
this : ¬a ∈ g₂.support
⊢ a ∈ (g₁ + g₂).support Tactic: have : a ∉ g₂.support := disjoint_left.1 h ha State Before: α : Type u_1
β : Type ?u.284137
γ : Type ?u.284140
ι : Type ?u.284143
M : Type u_2
M' : Type ?u.284149
N : Type ?u.284152
P : Type ?u.284155
G : Type ?u.284158
H : Type ?u.284161
R : Type ?u.284164
S : Type ?u.284167
inst✝¹ : AddZeroClass M
inst✝ : DecidableEq α
g₁ g₂ : α →₀ M
h : Disjoint g₁.support g₂.support
a : α
ha✝ : a ∈ g₁.support ∪ g₂.support
ha : a ∈ g₁.support
this : ¬a ∈ g₂.support
⊢ a ∈ (g₁ + g₂).support State After: α : Type u_1
β : Type ?u.284137
γ : Type ?u.284140
ι : Type ?u.284143
M : Type u_2
M' : Type ?u.284149
N : Type ?u.284152
P : Type ?u.284155
G : Type ?u.284158
H : Type ?u.284161
R : Type ?u.284164
S : Type ?u.284167
inst✝¹ : AddZeroClass M
inst✝ : DecidableEq α
g₁ g₂ : α →₀ M
h : Disjoint g₁.support g₂.support
a : α
ha✝ : a ∈ g₁.support ∪ g₂.support
ha : ↑g₁ a ≠ 0
this : ↑g₂ a = 0
⊢ ↑(g₁ + g₂) a ≠ 0 Tactic: simp only [mem_support_iff, not_not] at * State Before: α : Type u_1
β : Type ?u.284137
γ : Type ?u.284140
ι : Type ?u.284143
M : Type u_2
M' : Type ?u.284149
N : Type ?u.284152
P : Type ?u.284155
G : Type ?u.284158
H : Type ?u.284161
R : Type ?u.284164
S : Type ?u.284167
inst✝¹ : AddZeroClass M
inst✝ : DecidableEq α
g₁ g₂ : α →₀ M
h : Disjoint g₁.support g₂.support
a : α
ha✝ : a ∈ g₁.support ∪ g₂.support
ha : ↑g₁ a ≠ 0
this : ↑g₂ a = 0
⊢ ↑(g₁ + g₂) a ≠ 0 State After: no goals Tactic: simpa only [add_apply, this, add_zero] State Before: α : Type u_1
β : Type ?u.284137
γ : Type ?u.284140
ι : Type ?u.284143
M : Type u_2
M' : Type ?u.284149
N : Type ?u.284152
P : Type ?u.284155
G : Type ?u.284158
H : Type ?u.284161
R : Type ?u.284164
S : Type ?u.284167
inst✝¹ : AddZeroClass M
inst✝ : DecidableEq α
g₁ g₂ : α →₀ M
h : Disjoint g₁.support g₂.support
a : α
ha✝ : a ∈ g₁.support ∪ g₂.support
ha : a ∈ g₂.support
⊢ a ∈ (g₁ + g₂).support State After: α : Type u_1
β : Type ?u.284137
γ : Type ?u.284140
ι : Type ?u.284143
M : Type u_2
M' : Type ?u.284149
N : Type ?u.284152
P : Type ?u.284155
G : Type ?u.284158
H : Type ?u.284161
R : Type ?u.284164
S : Type ?u.284167
inst✝¹ : AddZeroClass M
inst✝ : DecidableEq α
g₁ g₂ : α →₀ M
h : Disjoint g₁.support g₂.support
a : α
ha✝ : a ∈ g₁.support ∪ g₂.support
ha : a ∈ g₂.support
this : ¬a ∈ g₁.support
⊢ a ∈ (g₁ + g₂).support Tactic: have : a ∉ g₁.support := disjoint_right.1 h ha State Before: α : Type u_1
β : Type ?u.284137
γ : Type ?u.284140
ι : Type ?u.284143
M : Type u_2
M' : Type ?u.284149
N : Type ?u.284152
P : Type ?u.284155
G : Type ?u.284158
H : Type ?u.284161
R : Type ?u.284164
S : Type ?u.284167
inst✝¹ : AddZeroClass M
inst✝ : DecidableEq α
g₁ g₂ : α →₀ M
h : Disjoint g₁.support g₂.support
a : α
ha✝ : a ∈ g₁.support ∪ g₂.support
ha : a ∈ g₂.support
this : ¬a ∈ g₁.support
⊢ a ∈ (g₁ + g₂).support State After: α : Type u_1
β : Type ?u.284137
γ : Type ?u.284140
ι : Type ?u.284143
M : Type u_2
M' : Type ?u.284149
N : Type ?u.284152
P : Type ?u.284155
G : Type ?u.284158
H : Type ?u.284161
R : Type ?u.284164
S : Type ?u.284167
inst✝¹ : AddZeroClass M
inst✝ : DecidableEq α
g₁ g₂ : α →₀ M
h : Disjoint g₁.support g₂.support
a : α
ha✝ : a ∈ g₁.support ∪ g₂.support
ha : ↑g₂ a ≠ 0
this : ↑g₁ a = 0
⊢ ↑(g₁ + g₂) a ≠ 0 Tactic: simp only [mem_support_iff, not_not] at * State Before: α : Type u_1
β : Type ?u.284137
γ : Type ?u.284140
ι : Type ?u.284143
M : Type u_2
M' : Type ?u.284149
N : Type ?u.284152
P : Type ?u.284155
G : Type ?u.284158
H : Type ?u.284161
R : Type ?u.284164
S : Type ?u.284167
inst✝¹ : AddZeroClass M
inst✝ : DecidableEq α
g₁ g₂ : α →₀ M
h : Disjoint g₁.support g₂.support
a : α
ha✝ : a ∈ g₁.support ∪ g₂.support
ha : ↑g₂ a ≠ 0
this : ↑g₁ a = 0
⊢ ↑(g₁ + g₂) a ≠ 0 State After: no goals Tactic: simpa only [add_apply, this, zero_add]
|
context("AddFactor")
test_that("AddFactor adds a column, restricts dates", {
#seq.Date(as.Date("2009-03-1"), by="month", length.out = 36) %m-% days(1)
data("factorReturns.US")
data("factorReturns.Intl")
fr1 <- factorReturns.US
fr2 <- factorReturns.Intl[,1:2]
fr <- AddFactor(fr1, fr2, TRUE)
fr1[fr1$date == fr$date[1], 2]
expect_equal(ncol(fr), ncol(fr1) + 1)
expect_equal(max(fr$date), min(max(fr1$date), max(fr2$date)))
expect_equal(min(fr$date), max(min(fr1$date), min(fr2$date)))
expect_equal(as.numeric(fr1[fr1$date == fr$date[1], 2, drop=TRUE]), as.numeric(fr[1, 2] + fr[1, ncol(fr)]))
fr <- AddFactor(fr1, fr2, FALSE)
expect_equal(as.numeric(fr1[fr1$date == fr$date[1], 2, drop=TRUE]), as.numeric(fr[1, 2]))
})
|
Non-commercial and community projects assistance.
THERE ARE MANY AREAS OF OUR PLANNING AND ENVIRONMENTAL EXPERIENCE THAT OFFERS SUPPORT FOR PRIVATE LAND AND HOMEOWNERS, AS WELL AS COMMUNITY GROUPS.
People when undertaking a build project; for a new home, extending your property, adding renewable technologies on your land, or part of a community based project build, focus on what the building will look like, the facilities it will offer. All of these things are an exciting time. At Cogeo we provide the support services and management for the things that make these builds happen and allow you to focus on areas you need to.
Our consultants have vast experience within these areas of planning and can guide you at every step to ensure "No Surprises". This is critical as for many without the correct planning processes, understanding planning authorities criteria and meeting it, also means that the daunting prospect of the unforseen is addressed from day one. For Cogeo, it's about protecting your best interests, making sure that your finished project meets your objectives as well as ensuring you meet all legislative requirements and your project is given the final seal of approval.
TO DISCUSS YOUR PROJECT IN DETAIL WITH ONE OF OUR ADVISERS, CONTACT RELEVANT UK OFFICE HERE.
|
Formal statement is: lemma sum_norm_bound: "norm (sum f S) \<le> of_nat (card S)*K" if "\<And>x. x \<in> S \<Longrightarrow> norm (f x) \<le> K" for f :: "'b \<Rightarrow> 'a" Informal statement is: If $f$ is a function from a finite set $S$ to a normed vector space $V$, and if $f(x)$ is bounded by $K$ for all $x \<in> S$, then $\sum_{x \in S} f(x)$ is bounded by $|S|K$.
|
r=0.15
https://sandbox.dams.library.ucdavis.edu/fcrepo/rest/collection/sherry-lehmann/catalogs/d7dg6x/media/images/d7dg6x-025/svc:tesseract/full/full/0.15/default.jpg Accept:application/hocr+xml
|
= UNO components written in Ruby
The component loader for Ruby allows to write an UNO component
in Ruby.
== Ruby instance
When the loader is requested, the instance of Ruby is created
in the instance of the office. And the instance is never die
until the office is shutting down. Only an instance is
created for an office instance.
== Loading process of the components
The script file which implements UNO components is loaded
as a string and evaluated in an un-named module as follows:
path = get_path(ctx, url)
mod = Module.new
mod.module_eval(IO.read(path), path)
After the evaluation, the loader try to find classes which include
Uno::UnoComponentBase module. It provides nothing but it is
used as an indicator of UNO components.
And components should be defined two constants which are provides
information about the component itself.
--- IMPLE_NAME
The implementation name of the component in String.
--- SERVICE_NAMES
The service names which are supported by the component in Array.
= UNO component example
Here is a simple UNO component written in Ruby.
module MyJobModule
class MyJob
# provides type information based on included modules of
# UNO interfaces.
include Uno::UnoBase
# tells this class should be detected as an UNO component
# to the loader.
include Uno::UnoComponentBase
# provides execution of the task.
include Runo::Com::Sun::Star::Task::XJobExecutor
# provides information about this component.
# The XServiceInfo module is extended in the loader
# module and it provides three methods based on
# the following two constants.
include Runo::Com::Sun::Star::Lang::XServiceInfo
IMPLE_NAME = "mytools.job.MyJob"
SERVICE_NAMES = ["mytools.job.MyJob"]
# new(ctx, args)
# The component context is passed as first argument and
# additional arguments might be passed in the second
# argument when the component instantiated by
# createInstanceWithArgumentsAndContext method.
def initialize(ctx, args)
@ctx = ctx
end
# XJobExecutor
#
def trigger(args)
puts args
end
end
end
== How to register
When you make your own UNO component in Ruby, you have to pack
into an OXT extension. Recent version of the office supports
passive registration of the components through the extension
manager.
The script provider for Ruby is implemented as an UNO component
written in Ruby and you can refere it as an example.
Briefly, you have to prepare your extension package as follows:
/
|- META-INF/
| |- manifest.xml
|- description.xml
|- lib/
| |- comp.rb
| |- comp.components
These files have to be packed into a zip archive and name it
with oxt file extension.
manifest.xml file defines which files should be processed
during the installation of your extension package.
<?xml version="1.0" encoding="UTF-8"?>
<manifest:manifest>
<manifest:file-entry manifest:full-path="lib/comp.components"
manifest:media-type="application/vnd.sun.star.uno-components"/>
</manifest:manifest>
The media-type application/vnd.sun.star.uno-components specifies
the passive registration and its information is stored in
lib/comp.components file.
comp.components file provides informations of your components.
<?xml version="1.0" encoding="UTF-8"?>
<components xmlns="http://openoffice.org/2010/uno-components">
<component loader="com.sun.star.loader.Ruby" uri="rubyscriptprovider.rb">
<implementation name="mytools.script.provider.ScriptProviderForRuby">
<service name="com.sun.star.script.provider.ScriptProviderForRuby"/>
<service name="com.sun.star.script.provider.LanguageScriptProvider"/>
</implementation>
</component>
</components>
You component is written in Ruby and it should be loaded by
com.sun.star.loader.Ruby and its file is stored in the location
specified by uri attribute. It have to be have implementation name
and it supports two services.
References:
* ((<URL:http://wiki.services.openoffice.org/wiki/Documentation/DevGuide/Extensions/Extensions>))
|
[STATEMENT]
lemma Domainp_Grp: "Domainp (BNF_Def.Grp A f) = (\<lambda>x. x \<in> A)"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. Domainp (BNF_Def.Grp A f) = (\<lambda>x. x \<in> A)
[PROOF STEP]
by(auto simp add: fun_eq_iff Grp_def)
|
## Copyright (c) 2018-2021, Carnegie Mellon University
## See LICENSE for details
# from FFTX C++ unparser
Class(TFCall, Tagged_tSPL_Container, rec(
abbrevs := [ (nt, cconf) -> Checked(IsSPL(nt), [nt, cconf]) ],
transpose := self >> ObjId(self)(self.params[1].transpose(), self.params[2]).withTags(self.getTags())
));
Class(TDeviceCall, Tagged_tSPL_Container, rec(
abbrevs := [ (nt, cconf) -> Checked(IsSPL(nt), [nt, cconf]) ],
transpose := self >> ObjId(self)(self.params[1].transpose(), self.params[2]).withTags(self.getTags())
));
Class(TDecl, Tagged_tSPL_Container, rec(
abbrevs := [ (nt, vlist) -> Checked(IsSPL(nt), [nt, vlist]) ],
transpose := self >> ObjId(self)(self.params[1].transpose(), self.params[2]).withTags(self.getTags())
));
Class(TDAGNode, Tagged_tSPL_Container, rec(
abbrevs := [ (nt, ylist, xlist) -> Checked(IsSPL(nt), [nt, When(IsList(ylist), ylist, [ylist]), When(IsList(xlist), xlist, [xlist])]) ],
transpose := self >> ObjId(self)(self.params[1].transpose(), self.params[2], self.params[3]).withTags(self.getTags())
));
Class(TDAG, TCompose, rec(
terminate := self >> Error("Not yet implemented."),
from_rChildren := (self, rch) >> let(
len := Length(rch),
transposed := rch[len-1],
tags := rch[len],
t := ApplyFunc(ObjId(self), [rch{[1..len-2]}]),
tt := When(transposed, t.transpose(), t),
attrTakeA(tt.withTags(tags), self)
),
rChildren := self >>
Concatenation(self.params[1], [self.transposed, self.tags]),
rSetChild := meth(self, n, newChild)
local l;
l := Length(self.params[1]);
if n <= l then
self.params[1][n] := newChild;
elif n = l+1 then
self.transposed := newChild;
elif n = l+2 then
self.tags := newChild;
else Error("<n> must be in [1..", l+2, "]");
fi;
# self.canonizeParams(); ??
self.dimensions := self.dims();
end,
));
## the dims here are ordered as in the MDDFT case: e.g., for 3D: odims[1] = x, odims[2] = y, odims[3] = z
#Class(TResample, Tagged_tSPL_Container, rec(
# abbrevs := [ (odims, idims) -> [ odims, idims, [] ],
# (odims, idims, shifts) -> [ odims, idims, shifts ],],
# dims := self >> [Product(self.params[1]), Product(self.params[2])],
# isReal := True,
# terminate := self >> Error("Not yet implemented."),
# transpose := self >> ObjId(self)(self.params[2], self.params[1], self.params[3]).withTags(self.getTags())
#));
Declare(TSparseMat);
Class(TSparseMat, Tagged_tSPL_Container, rec(
abbrevs := [ (dims, entries) -> [ dims, entries ] ],
dims := self >> self.params[1],
terminate := self >> Error("Not yet implemented."),
transpose := self >> TSparseMat(Reversed(self.params[1]), [x->Error("Not yet done")])
));
Declare(TIterVStack);
Class(TIterHStack, Tagged_tSPL_Container, rec(
abbrevs := [ (nt, idx) -> Checked(IsSPL(nt), IsVar(idx),
[nt, idx]) ],
dims := self >> self.params[1].dims(){[1]} :: self.params[1].dims(){[2]} * self.params[2].range,
terminate := self >> IterHStack(self.params[2], self.params[2].range, self.params[1].terminate()),
transpose := self >> TIterVStack(self.params[1].transpose(), self.params[2])
.withTags(self.getTags()),
isReal := self >> self.params[1].isReal(),
normalizedArithCost := self >>
self.params[1].normalizedArithCost() * self.params[2].range,
# doNotMeasure := true,
HashId := self >> let(
p := self.params,
h := When(IsBound(p[1].HashId), p[1].HashId(), p[1]),
[h, p[2].range] :: When(IsBound(self.tags), self.tags, [])
)
));
Class(TIterVStack, Tagged_tSPL_Container, rec(
abbrevs := [ (nt, idx) -> Checked(IsSPL(nt), IsVar(idx),
[nt, idx]) ],
dims := self >> (self.params[1].dims(){[1]} * self.params[2].range) :: self.params[1].dims(){[2]},
terminate := self >> IterVStack(self.params[2], self.params[2].range, self.params[1].terminate()),
transpose := self >> TIterHStack(self.params[1].transpose(), self.params[2])
.withTags(self.getTags()),
isReal := self >> self.params[1].isReal(),
normalizedArithCost := self >>
self.params[1].normalizedArithCost() * self.params[2].range,
# doNotMeasure := true,
HashId := self >> let(
p := self.params,
h := When(IsBound(p[1].HashId), p[1].HashId(), p[1]),
[h, p[2].range] :: When(IsBound(self.tags), self.tags, [])
)
));
Class(TNoDiagPullin, Tagged_tSPL_Container, rec(
abbrevs := [ s -> Checked(IsSPL(s), [s]) ],
HashId := self >> let(
p := self.params[1],
h := When(IsBound(p.HashId), p.HashId(), p),
[ self.__name__, h ] :: When(IsBound(self.tags), self.tags, [])
)
));
Class(TNoDiagPullinRight, Tagged_tSPL_Container, rec(
abbrevs := [ s -> Checked(IsSPL(s), [s]) ],
HashId := self >> let(
p := self.params[1],
h := When(IsBound(p.HashId), p.HashId(), p),
[ self.__name__, h ] :: When(IsBound(self.tags), self.tags, [])
)
));
Class(TNoDiagPullinLeft, Tagged_tSPL_Container, rec(
abbrevs := [ s -> Checked(IsSPL(s), [s]) ],
HashId := self >> let(
p := self.params[1],
h := When(IsBound(p.HashId), p.HashId(), p),
[ self.__name__, h ] :: When(IsBound(self.tags), self.tags, [])
)
));
Class(TNoPullRight, Tagged_tSPL_Container, rec(
abbrevs := [ s -> Checked(IsSPL(s), [s]) ],
HashId := self >> let(
p := self.params[1],
h := When(IsBound(p.HashId), p.HashId(), p),
[ self.__name__, h ] :: When(IsBound(self.tags), self.tags, [])
)
));
Class(TNoPullLeft, Tagged_tSPL_Container, rec(
abbrevs := [ s -> Checked(IsSPL(s), [s]) ],
HashId := self >> let(
p := self.params[1],
h := When(IsBound(p.HashId), p.HashId(), p),
[ self.__name__, h ] :: When(IsBound(self.tags), self.tags, [])
)
));
#F tSPL ColMajor -> RowMajor transformation
Class(TColMajor, Tagged_tSPL_Container, rec(
_short_print := true,
abbrevs := [ (A) -> Checked(IsNonTerminal(A) or IsSPL(A), [A]) ],
dims := self >> 2*self.params[1].dims(),
terminate := self >> Mat(MatSPL(RC(self.params[1]))),
transpose := self >> ObjId(self)(
self.params[1].conjTranspose()).withTags(self.getTags()),
conjTranspose := self >> self.transpose(),
isReal := self >> true,
# Do not use doNotMeasure, this will prevent TRC_By_Def from ever being found!
doNotMeasure := false,
normalizedArithCost := self >> self.params[1].normalizedArithCost(),
HashId := self >> let(
h := [ When(IsBound(self.params[1].HashId), self.params[1].HashId(),
self.params[1]) ],
When(IsBound(self.tags), Concatenation(h, self.tags), h))
));
|
Formal statement is: lemma summable_Re: "summable f \<Longrightarrow> summable (\<lambda>x. Re (f x))" Informal statement is: If a complex-valued function is summable, then its real part is also summable.
|
The argument of $0$ is $0$.
|
If you would like to support the Down Syndrome Association of West Michigan's campaign of kindness, you may visit the organization's crowd funding site to help pay for supplies. The more money raised, the more random acts of kindness those in our community with Down syndrome will be able to commit.
The Down Syndrome Association of West Michigan is a resource and advocacy organization promoting public awareness and supporting lifelong opportunities for individuals with Down syndrome and their families. Down syndrome is the most commonly occurring chromosomal condition, with one in every 700 babies being born with Down syndrome. There are more than 400,000 people living with Down syndrome in the United States. For more information, visit www.dsawm.org.
|
{-# OPTIONS --rewriting #-}
module JVM.Transform.Assemble where
open import Agda.Builtin.Equality.Rewrite
open import Data.Nat using (ℕ)
open import Data.List as List
open import Data.List.Properties
open import Data.List.Relation.Unary.All as All
open import Data.List.Membership.Propositional
open import Data.List.Membership.Propositional.Properties
open import Data.List.Relation.Unary.Any
open import Data.Product hiding (swap)
open import Relation.Unary hiding (_∈_)
open import Relation.Binary.PropositionalEquality
open import Relation.Ternary.Core
open import Relation.Ternary.Structures.Syntax
open import Relation.Ternary.Data.Bigstar
open import Relation.Ternary.Data.ReflexiveTransitive as Star
open import Relation.Ternary.Data.IndexedMonoid
open import Relation.Ternary.Construct.Bag.Properties
open import JVM.Types
open import JVM.Syntax.Values
open import JVM.Model StackTy
open import JVM.Model.Properties
open import JVM.Syntax.Labeling StackTy
open import JVM.Syntax.Instructions hiding (⟨_↝_⟩; Instr)
{-# REWRITE ++-assoc #-}
Typing = List StackTy
variable
J₁ J₂ J : Typing
module _ Γ where
data Instr (J : Typing) : StackTy → StackTy → Set where
goto : ψ₁ ∈ J → Instr J ψ₁ ψ₂
if : ∀ {as} → Comparator as → ψ₁ ∈ J → Instr J (as ++ ψ₁) ψ₂
nop : Instr J ψ₁ ψ₁
push : Const a → Instr J ψ₁ (a ∷ ψ₁)
pop : Instr J (a ∷ ψ₁) ψ₁
dup : Instr J (a ∷ ψ₁) (a ∷ a ∷ ψ₁)
swap : Instr J (a ∷ b ∷ ψ₁) (b ∷ a ∷ ψ₁)
bop : NativeBinOp a b c → Instr J (b ∷ a ∷ ψ₁) (c ∷ ψ₁)
load : a ∈ Γ → Instr J ψ₁ (a ∷ ψ₁)
store : a ∈ Γ → Instr J (a ∷ ψ₁) ψ₁
ret : Instr J (a ∷ ψ₁) ψ₂
data Bytecode' (J : Typing) : Typing → StackTy → StackTy → Set where
nil : Bytecode' J [] ψ₁ ψ₁
cons : ∀ {I} → Instr J ψ₁ ψ₂ → Bytecode' J I ψ₂ ψ₃ → Bytecode' J (ψ₁ ∷ I) ψ₁ ψ₃
Bytecode : Typing → StackTy → StackTy → Set
Bytecode J = Bytecode' J J
module _ where
Addressing : Labels → Typing → Set
Addressing lbs J = All (λ ψ → ψ ∈ J) lbs
instr-tf : ∀ {Γ ℓs} → Addressing ℓs J → ⟨ Γ ∣ ψ₁ ↝ ψ₂ ⟩ ℓs → Instr Γ J ψ₁ ψ₂
instr-tf ρ noop = nop
instr-tf ρ pop = pop
instr-tf ρ (push x) = push x
instr-tf ρ dup = dup
instr-tf ρ swap = swap
instr-tf ρ (bop x) = bop x
instr-tf ρ (load x) = load x
instr-tf ρ (store x) = store x
instr-tf (ℓ ∷ _) (goto refl) = goto ℓ
instr-tf (ℓ ∷ _) (if x refl) = if x ℓ
Extractor : ∀ Γ → StackTy → StackTy → Intf → Set
Extractor Γ ψ₁ ψ₂ Φ = {J₁ : Typing} → Addressing (down Φ) J₁
→ ∃ λ J₂ → Bytecode' Γ (J₁ ++ J₂) J₂ ψ₁ ψ₂ × Addressing (up Φ) (J₁ ++ J₂)
exec-extractor : ∀ {Γ} → Extractor Γ ψ₁ ψ₂ ε → ∃ λ J → Bytecode Γ J ψ₁ ψ₂
exec-extractor c = let _ , code , _ = c {[]} [] in -, code
label-addresses : ∀ {x} → Labeling ψ₁ x → All (λ z → z ∈ J₁ ++ ψ₁ ∷ []) x
label-addresses (refl ∙⟨ σ ⟩ qx) = joinAll (λ ()) σ (∈-++⁺ʳ _ (here refl) ∷ []) (addr' qx)
where
addr' : ∀ {x} → Bigstar (Own List.[ ψ₁ ]) x → All (λ z → z ∈ J₁ ++ ψ₁ ∷ []) x
addr' emp = []
addr' (cons (refl ∙⟨ σ ⟩ qx)) = joinAll (λ ()) σ (∈-++⁺ʳ _ (here refl) ∷ []) (addr' qx)
-- We can extract bytecode that does absolute addressing from the fancy
-- intrinsically-typed representation.
-- We do this in a single recursive pass over the bytecode, collecting a global map of instruction typings,
-- and also collecting indices into that map for every defined label on the forward pass, and
-- eliminating labels on the backwards pass.
--
-- The interface compositions guide the way. The key lemma is `sinkᵣ`
-- which says that in order to know what l imports in l ✴ r,
-- it is sufficient to know what l ✴ r imports, *and* what r exports.
--
-- In this setting 'l' is the head of some suffix of bytecode.
-- The imports of 'l ✴ r' have been collected from the labels defined in the prefix.
-- We get the exports of the tail 'r' of the suffix from the recursive call.
extract : ∀ {Γ} → ∀[ ⟪ Γ ∣ ψ₁ ↝ ψ₂ ⟫ ⇒ Extractor Γ ψ₁ ψ₂ ]
extract {ψ₁ = ψ₁} nil ρ = [] , nil , []
extract {ψ₁ = ψ₁} (cons (labeled (↑ ls ∙⟨ σ₁ ⟩ ↓ i) ∙⟨ σ₂ ⟩ is)) ρ = let
-- addressing from ↑ ls
ρ₀ = label-addresses ls
-- Addresses for the labels
ρ₁ = source σ₁ ρ₀ []
-- Compute addresses for the tail,
-- by combining the addresses from the state with the addresses for the labels.
-- We don't need everything, only what is imported in the head instruction.
ρ₂ = sinkᵣ σ₂ ρ₁ (All.map (∈-++⁺ˡ {ys = List.[ ψ₁ ]}) ρ)
-- Recursively extract first to get addressing for forward jumps.
-- These addresses are exported by the tail.
k , code , ρ′ = extract is ρ₂
-- Compute the addresses imported by i
ρ₃ = sinkᵣ (∙-comm σ₂) ρ′ (All.map ∈-++⁺ˡ ρ)
ρ₄ = sinkᵣ σ₁ (All.map ∈-++⁺ˡ ρ₀) ρ₃
-- Compute the exported addresses
ρ₅ = source σ₂ (All.map ∈-++⁺ˡ ρ₁) ρ′
in ψ₁ ∷ k , cons (instr-tf ρ₄ i) code , ρ₅
-- Same as above, but simpler
extract {ψ₁ = ψ₁} (cons (instr (↓ i) ∙⟨ σ ⟩ b)) ρ = let
ρ₂ = sinkᵣ σ [] (All.map (∈-++⁺ˡ {ys = List.[ ψ₁ ]}) ρ)
k , code , ρ′ = extract b ρ₂
ρ₃ = sinkᵣ (∙-comm σ) ρ′ (All.map ∈-++⁺ˡ ρ)
ρ₄ = source σ [] ρ′
in ψ₁ ∷ k , cons (instr-tf ρ₃ i) code , ρ₄
module Show where
open import Data.String as S hiding (show)
open import Data.Integer as I
open import Data.Fin as Fin
open import Data.Nat.Show as NS
open import Data.Bool.Show as BS
showInt : ℤ → String
showInt (+ n) = NS.show n
showInt (-[1+ n ]) = "-" S.++ NS.show (ℕ.suc n)
showFin : ∀ {n} → Fin n → String
showFin f = NS.show (Fin.toℕ f)
showComp : Comparator as → String
showComp eq = "eq"
showComp ne = "ne"
showComp lt = "lt"
showComp ge = "ge"
showComp gt = "gt"
showComp le = "le"
showComp icmpge = "icmpge"
showComp icmpgt = "icmpgt"
showComp icmpeq = "icmpeq"
showComp icmpne = "icmpne"
showComp icmplt = "icmplt"
showComp icmple = "icmple"
showOp : NativeBinOp a b c → String
showOp add = "add"
showOp sub = "sub"
showOp mul = "mul"
showOp div = "div"
showOp xor = "xor"
showConst : Const a → String
showConst Const.null = "null"
showConst (num x) = showInt x
showConst (bool x) = BS.show x
showReg : ∀ {Γ} → a ∈ Γ → String
showReg e = showFin (index e)
showLabel : ψ ∈ J → String
showLabel = λ ℓ → showFin (index ℓ)
showInstr : ∀ {Γ} → Instr Γ J ψ₁ ψ₂ → String
showInstr (goto ℓ) = "goto " S.++ showLabel ℓ
showInstr (if c ℓ) = "if " S.++ (showComp c S.++ " " S.++ showLabel ℓ)
showInstr nop = "noop"
showInstr (push c) = "push " S.++ (showConst c)
showInstr pop = "pop"
showInstr dup = "dup"
showInstr swap = "swap"
showInstr (bop o) = "bop " S.++ (showOp o)
showInstr (load r) = "load " S.++ (showReg r)
showInstr (store r)= "store " S.++ (showReg r)
showInstr ret = "ret"
showTy : Ty → String
showTy boolean = "bool"
showTy byte = "byte"
showTy short = "short"
showTy int = "int"
showTy long = "long"
showTy char = "char"
showTy (ref x) = "ref"
showTy (array t) = "array"
showStackTy : StackTy → String
showStackTy [] = "[]"
showStackTy (x ∷ ψ) = showTy x S.++ " : " S.++ showStackTy ψ
showBytecode : ∀ {Γ J} → Bytecode Γ J ψ₁ ψ₂ → String
showBytecode b = showBytecode' b 0
where
open import Data.Nat
showBytecode' : ∀ {Γ J J'} → Bytecode' Γ J J' ψ₁ ψ₂ → ℕ → String
showBytecode' nil n = ""
showBytecode' (cons {ψ₁ = ψ₁} {ψ₂} i b) n =
NS.show n S.++ ": "
S.++ showInstr i
S.++ "\t⟨ " S.++ showStackTy ψ₁ S.++ " ↝ " S.++ showStackTy ψ₂ S.++ " ⟩"
S.++ "\n" S.++ showBytecode' b (ℕ.suc n)
|
Isotope. Elemental variance defined by nuclei. From the Greek "isos" meaning equal and "topos" meaning place. Seven renowned musicians of distinct musical heritage with a shared vision and purpose, when combined, produce an utterly unique aural experience. Forged in the aftermath of Japan's nuclear catastrophe of 2011, this is an album of precision crafted cinematic soundscapes fusing elements of jazz, rock and electronica layered with heartfelt vocals. A poignant musical depiction of our shared future hanging in the balance, Isotope sees J's Bee artfully maturing beyond genre.
The band that initially formed over 10 years ago stand by their original sound that set them apart, refraining from commercialising their music. Continuously receiving strong support for their borderless music style and performance ability has allowed J’s bee to interact and connect with people in all walks. With previous tracks featured on Japanese TV including a sync with a Smirnoff Advertisement, they have been invited to numerous different music festivals and gigs including one of Japan’s famous festivals, Metamorphose advocated by Dalai Lama, the World Festival of Sacred Music Hiroshima, and The Fuji Rock Festival where the bands spectrum of backgrounds from Classic, Jazz, Rock, Dub, Electro and Club music were fully embraced. Recorded at Trip Echo studio and mixed by Yagotoyama Kobo, Isotope is a lushly arranged album that goes beyond a genre.
|
State Before: C : Type u
inst✝³ : Category C
D : Type u'
inst✝² : Category D
inst✝¹ : HasZeroMorphisms C
X Y : C
f : X ⟶ Y
inst✝ : Mono f
i : IsZero Y
⊢ IsZero X State After: C : Type u
inst✝³ : Category C
D : Type u'
inst✝² : Category D
inst✝¹ : HasZeroMorphisms C
X Y : C
f : X ⟶ Y
inst✝ : Mono f
i : IsZero Y
hf : f = 0
⊢ IsZero X Tactic: have hf := i.eq_zero_of_tgt f State Before: C : Type u
inst✝³ : Category C
D : Type u'
inst✝² : Category D
inst✝¹ : HasZeroMorphisms C
X Y : C
f : X ⟶ Y
inst✝ : Mono f
i : IsZero Y
hf : f = 0
⊢ IsZero X State After: C : Type u
inst✝³ : Category C
D : Type u'
inst✝² : Category D
inst✝¹ : HasZeroMorphisms C
X Y : C
i : IsZero Y
inst✝ : Mono 0
⊢ IsZero X Tactic: subst hf State Before: C : Type u
inst✝³ : Category C
D : Type u'
inst✝² : Category D
inst✝¹ : HasZeroMorphisms C
X Y : C
i : IsZero Y
inst✝ : Mono 0
⊢ IsZero X State After: no goals Tactic: exact IsZero.of_mono_zero X Y
|
##### Run this cell to set your notebook up on Google Colab
```python
!apt-get install -y xvfb python-opengl ffmpeg > /dev/null 2>&1
!git clone https://github.com/yfletberliac/rlss2019-hands-on.git > /dev/null 2>&1
!pip install -q torch==1.1.0 torchvision pyvirtualdisplay piglet > /dev/null 2>&1
```
# <font color='#ed7d31'>Deep Q Networks</font>
------------
You can find the original paper [here](https://www.cs.toronto.edu/~vmnih/docs/dqn.pdf).
## <font color='#ed7d31'>Preliminaries: Q Learning</font>
#### <font color='#ed7d31'>Q-Value</font>
**Q-Value** is a measure of the overall expected reward assuming the agent is in state $s$ and performs action $a$, and then continues playing until the end of the episode following some policy $\pi$. It is defined mathematically as:
\begin{equation}
Q^{\pi}\left(s_{t}, a_{t}\right)=E\left[R_{t+1}+\gamma R_{t+2}+\gamma^{2} R_{t+3}+\ldots | s_{t}, a_{t}\right]
\end{equation}
where $R_{t+1}$ is the immediate reward received after performing action $a_{t}$ in state $s_{t}$ and $\gamma$ is the discount factor and controls the importance of the future rewards versus the immediate ones: the lower the discount factor is, the less important future rewards are.
#### <font color='#ed7d31'>Bellman Optimality Equation</font>
Formally, the Bellman equation defines the relationships between a given state (or, in our case, a **state-action pair**) and its successors. While many forms exist, one of the most common is the **Bellman Optimality Equation** for the optimal **Q-Value**, which is given by:
\begin{equation}
Q^{*}(s, a)=\sum_{s^{\prime}, r} p\left(s^{\prime}, r | s, a\right)\left[r+\gamma \max _{a^{\prime}} Q^{*}\left(s^{\prime}, a^{\prime}\right)\right]
\end{equation}
Of course, when no uncertainty exists (transition probabilities are either 0 or 1), we have:
\begin{equation}
Q^{*}(s, a)=r(s, a)+\gamma \max _{a^{\prime}} Q^{*}\left(s^{\prime}, a^{\prime}\right)
\end{equation}
#### <font color='#ed7d31'>Q-Value Iteration</font>
We define the corresponding Bellman backup operator:
\begin{equation}
[\mathcal{T} Q]\left(s, a\right)=r(s, a)+\gamma \max _{a^{\prime}} Q\left(s^{\prime}, a^{\prime}\right)
\end{equation}
$Q$ is a fixed point of $\mathcal{T}$:
\begin{equation}
\mathcal{T} Q^{*}=Q^{*}
\end{equation}
If we apply the Bellman operator $\mathcal{T}$ repeatedly to any initial $Q$, the series converges to $Q^{*}$:
\begin{equation}
Q, \mathcal{T} Q, \mathcal{T}^{2} Q, \cdots \rightarrow Q^{*}
\end{equation}
# <font color='#ed7d31'>Imports</font>
```python
import sys
sys.path.insert(0, './rlss2019-hands-on/utils')
# If using the Docker image, replace by:
# sys.path.insert(0, '../utils')
import gym, random, os.path, math, glob, csv, base64
from pathlib import Path
from timeit import default_timer as timer
from datetime import timedelta
import numpy as np
import torch
import torch.optim as optim
import torch.nn as nn
import torch.nn.functional as F
import matplotlib
%matplotlib inline
from qfettes_plot import plot_all_data
from qfettes_wrappers import *
from openai_wrappers import make_atari, wrap_deepmind
from gym.wrappers import Monitor
from pyvirtualdisplay import Display
from IPython import display as ipythondisplay
from IPython.display import clear_output
```
------------
# <font color='#ed7d31'>Deep Q learning</font>
Usually in Deep RL, the **Q-Value** is defined as $Q(s,a;\theta)$ where $\theta$ represents the parameters of the function approximation used.
For *MuJoCo* or *Roboschool* environments, we usually use a simple 2- or 3-layer MLP whereas when using **raw pixels for observations** such as in *Atari 2600* games, we usually use a 1-, 2- or 3-layer CNN.
In our case, since we want to train DQN on *CartPole*, we will use a 3-layer perceptron for our function approximation.
## <font color='#ed7d31'>Network declaration</font>
In this section, we build $Q(s,a;\theta)$ function approximation. Since the input is composed of 4 scalars, namely:
<center>[position of cart, velocity of cart, angle of pole, rotation rate of pole]</center>
we build a FCN -> ReLU -> FCN -> ReLU -> FCN neural network. As an exercice, change the architecture of the network:
1. Change the 1st fully-connected layer from 8 hidden neurons to 16
2. Create `self.fc2` in `__init__` with 16 neurons
3. Create `self.fc3` with `self.num_actions` as the output size
4. Add it to the network in `forward` with no activation function
```python
class DQN(nn.Module):
def __init__(self, input_shape, num_actions):
super().__init__()
self.input_shape = input_shape
self.num_actions = num_actions
self.fc1 = nn.Linear(self.input_shape[0], 8)
self.fc2 = ...
self.fc3 = ...
def forward(self, x):
x = F.relu(self.fc2(F.relu(self.fc1(x))))
x = ...
return x
```
## <font color='#ed7d31'>Safety checks</font>
#### <font color='#ed7d31'>Network architecture</font>
As a *safety check*, inspect the resulting network in the next cell. For instance, the total number of trainable parameters should change with the architecture. Check the correctness of `in_features` and `out_features`.
```python
env_id = 'CartPole-v0'
env = gym.make(env_id)
network = DQN(env.observation_space.shape, env.action_space.n)
print("Observation space:\n", env.observation_space.shape, "\n")
print("Network architecture:\n", network, "\n")
model_parameters = filter(lambda p: p.requires_grad, network.parameters())
print("Total number of trainable parameters:\n", sum([np.prod(p.size()) for p in model_parameters]))
```
Observation space:
(4,)
Network architecture:
DQN(
(fc1): Linear(in_features=4, out_features=8, bias=True)
(fc2): Linear(in_features=8, out_features=8, bias=True)
(fc3): Linear(in_features=8, out_features=2, bias=True)
)
Total number of trainable parameters:
130
#### <font color='#ed7d31'>Run a Policy with Random Actions</font>
What the working environment looks like? It's always useful to know the details about the environment you train your policy on. For instance, its dynamics, the size of action and observation space, etc. Below we display three different random policies on `CartPole-v0`.
```python
display = Display(visible=0, size=(1400, 900))
display.start()
def show_video():
html = []
for mp4 in Path("videos").glob("*.mp4"):
video_b64 = base64.b64encode(mp4.read_bytes())
html.append(''''''.format(mp4, video_b64.decode('ascii')))
ipythondisplay.display(ipythondisplay.HTML(data="<br>".join(html)))
env = Monitor(env, './videos', force=True, video_callable=lambda episode: True)
for episode in range(2):
done = False
obs = env.reset()
while not done:
action = env.action_space.sample()
obs, reward, done, info = env.step(action)
env.close()
show_video()
```
xdpyinfo was not found, X start can not be checked! Please install xdpyinfo!
<br>
We can see the episode ending prematurely because the pole drops.
-----
<font color='#ed7d31'>**Question**:</font>
It is also important to identify some of the characteristics of the problem. `CartPole-v0` can be described as a **fully-observable**, **deterministic**, **continuous state space**, with a **discrete action space** and **frequent rewards**. Take some time to understand each of these terms :-) Try to find the opposite term for each of them, e.g. deterministic <> stochastic.
## <font color='#ed7d31'>Experience Replay Memory</font>
As usual RL tasks have no pre-generated training sets which they can learn from, in off-policy learning, our agent must keep records of all the state-transitions it encountered so it can **learn from them later**. The memory-buffer used to store this is often referred to as the **Experience Replay Memory**. There are several types and architectures of these memory buffers — but some very common ones are:
- the *cyclic memory buffers*: they make sure the agent keeps training over its new behavior rather than things that might no longer be relevant
- the *reservoir-sampling-based memory buffers*: they guarantee each state-transition recorded has an even probability to be inserted to the buffer
We use a combination of both.
In `push`:
1. Append the transition to memory
2. Create the if statement which deletes an old transition from the memory
```python
class ExperienceReplayMemory:
def __init__(self, capacity):
self.capacity = capacity
self.memory = []
def push(self, transition):
# Append the transition below
...
# Now, we need an `if` statement in order to keep the capacity to its limit. Write it below.
# Hint: `del something` will delete something if something is an array
if ...:
raise NotImplementedError
def sample(self, batch_size):
return random.sample(self.memory, batch_size)
def __len__(self):
return len(self.memory)
```
------------
Now we have:
- the **DQN** network,
- the **ExperienceReplayMemory**.
Let's build the **Agent** class !
## <font color='#ed7d31'>Agent declaration</font>
In the cell below:
1. Create `self.target_model` in `declare_networks`
2. Complete the epsilon-greedy algorithm in `get_action`
```python
class Agent(object):
def __init__(self, config, env, log_dir='/tmp/gym'):
self.log_dir = log_dir
self.rewards = []
self.action_log_frequency = config.ACTION_SELECTION_COUNT_FREQUENCY
self.action_selections = [0 for _ in range(env.action_space.n)]
# Define the DQN networks
def declare_networks(self):
self.model = DQN(self.num_feats, self.num_actions)
# Create `self.target_model` with the same network architecture
self.target_model = ...
raise NotImplementedError
# Define the Replay Memory
def declare_memory(self):
self.memory = ExperienceReplayMemory(self.experience_replay_size)
# Append the new transition to the Replay Memory
def append_to_replay(self, s, a, r, s_):
self.memory.push((s, a, r, s_))
# Sample transitions from the Replay Memory
def sample_minibatch(self):
transitions = self.memory.sample(self.batch_size)
batch_state, batch_action, batch_reward, batch_next_state = zip(*transitions)
shape = (-1,)+self.num_feats
batch_state = torch.tensor(batch_state, device=self.device, dtype=torch.float).view(shape)
batch_action = torch.tensor(batch_action, device=self.device, dtype=torch.long).squeeze().view(-1, 1)
batch_reward = torch.tensor(batch_reward, device=self.device, dtype=torch.float).squeeze().view(-1, 1)
non_final_mask = torch.tensor(tuple(map(lambda s: s is not None, batch_next_state)), device=self.device, dtype=torch.uint8)
# Sometimes all next states are false
try:
non_final_next_states = torch.tensor([s for s in batch_next_state if s is not None], device=self.device, dtype=torch.float).view(shape)
empty_next_state_values = False
except:
non_final_next_states = None
empty_next_state_values = True
return batch_state, batch_action, batch_reward, non_final_next_states, non_final_mask, empty_next_state_values
# Sample action
def get_action(self, s, eps=0.1):
with torch.no_grad():
# Epsilon-greedy
if np.random.random() >= eps:
X = torch.tensor([s], device=self.device, dtype=torch.float)
a = self.model(X).max(1)[1].view(1, 1)
return a.item()
else:
...
```
-----
<font color='#ed7d31'>**Question**:</font>
Remember we define the objective function as
\begin{equation}
J=\left(r+\gamma \max _{a^{\prime}} Q\left(s^{\prime}, a^{\prime}, \mathbf{\theta}^{-}\right)-Q(s, a, \mathbf{\theta})\right)^{2},
\end{equation}
where $\theta^{-}$ are the target parameters.
Why do we need a target network in the first place ?
## <font color='#ed7d31'>Learning</font>
In the cell below, and from the above objective fonction:
1. Write the value `expected_q_values`
2. Write `diff`
3. The `update` function needs some work
```python
class Learning(Agent):
def __init__(self, env=None, config=None, log_dir='/tmp/gym'):
super().__init__(config=config, env=env, log_dir=log_dir)
# Compute loss from the Bellman Optimality Equation
def compute_loss(self, batch_vars):
batch_state, batch_action, batch_reward, non_final_next_states, non_final_mask, empty_next_state_values = batch_vars
# Estimate
current_q_values = self.model(batch_state).gather(1, batch_action)
# Target
with torch.no_grad():
max_next_q_values = torch.zeros(self.batch_size, device=self.device, dtype=torch.float).unsqueeze(dim=1)
if not empty_next_state_values:
max_next_action = self.get_max_next_state_action(non_final_next_states)
max_next_q_values[non_final_mask] = self.target_model(non_final_next_states).gather(1, max_next_action)
# From the equation above, write the value `expected_q_values`.
expected_q_values = ...
# From the equation above, write the value `diff`.
diff = ...
loss = self.MSE(diff)
loss = loss.mean()
raise NotImplementedError
return loss
# Update both networks (the agent and the target)
def update(self, s, a, r, s_, sample_idx=0):
self.append_to_replay(s, a, r, s_)
# When not to update ?
# There is a concise way to write to skip the update, fill in the 2 blanks in the `if` statement below.
# Hint: the sample count should be < the learn_start hyperparameter and respect the update_freq.
if ... or ...:
raise NotImplementedError
return None
batch_vars = self.sample_minibatch()
loss = self.compute_loss(batch_vars)
# Optimize the model
self.optimizer.zero_grad()
loss.backward()
for param in self.model.parameters():
param.grad.data.clamp_(-1, 1)
self.optimizer.step()
self.update_target_model()
self.save_td(loss.item(), sample_idx)
def update_target_model(self):
# Copy weights from model to target_model following `target_net_update_freq`.
self.update_count+=1
if self.update_count % self.target_net_update_freq == 0:
self.target_model.load_state_dict(self.model.state_dict())
```
## <font color='#ed7d31'>Model declaration</font>
```python
class Model(Learning):
def __init__(self, env=None, config=None, log_dir='/tmp/gym'):
super().__init__(config=config, env=env, log_dir=log_dir)
self.device = config.device
# Hyperparameters
self.gamma = config.GAMMA
self.target_net_update_freq = config.TARGET_NET_UPDATE_FREQ
self.experience_replay_size = config.EXP_REPLAY_SIZE
self.batch_size = config.BATCH_SIZE
self.learn_start = config.LEARN_START
self.update_freq = config.UPDATE_FREQ
# Environment specific parameters
self.num_feats = env.observation_space.shape
self.num_actions = env.action_space.n
self.env = env
self.declare_networks()
self.declare_memory()
self.target_model.load_state_dict(self.model.state_dict())
self.optimizer = optim.Adam(self.model.parameters(), lr=config.LR)
# Move to correct device
self.model = self.model.to(self.device)
self.target_model.to(self.device)
self.model.train()
self.target_model.train()
self.update_count = 0
def save_td(self, td, tstep):
with open(os.path.join(self.log_dir, 'td.csv'), 'a') as f:
writer = csv.writer(f)
writer.writerow((tstep, td))
def get_max_next_state_action(self, next_states):
return self.target_model(next_states).max(dim=1)[1].view(-1, 1)
def MSE(self, x):
return 0.5 * x.pow(2)
def save_reward(self, reward):
self.rewards.append(reward)
def save_action(self, action, tstep):
self.action_selections[int(action)] += 1.0/self.action_log_frequency
if (tstep+1) % self.action_log_frequency == 0:
with open(os.path.join(self.log_dir, 'action_log.csv'), 'a') as f:
writer = csv.writer(f)
writer.writerow(list([tstep]+self.action_selections))
self.action_selections = [0 for _ in range(len(self.action_selections))]
def save_w(self):
if not os.path.exists("../saved_agents"):
os.makedirs("../saved_agents")
torch.save(self.model.state_dict(), '../saved_agents/model.dump')
torch.save(self.optimizer.state_dict(), '../saved_agents/optim.dump')
```
## <font color='#ed7d31'>Hyperparameters</font>
```python
class Config(object):
def __init__(self):
self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
# Main agent variables
self.GAMMA=0.99
self.LR=1e-3
# Epsilon variables
self.epsilon_start = 1.0
self.epsilon_final = 0.01
self.epsilon_decay = 10000
self.epsilon_by_sample = lambda sample_idx: config.epsilon_final + (config.epsilon_start - config.epsilon_final) * math.exp(-1. * sample_idx / config.epsilon_decay)
# Memory
self.TARGET_NET_UPDATE_FREQ = 1000
self.EXP_REPLAY_SIZE = 10000
self.BATCH_SIZE = 64
# Learning control variables
self.LEARN_START = 1000
self.MAX_SAMPLES = 50000
self.UPDATE_FREQ = 1
# Data logging parameters
self.ACTION_SELECTION_COUNT_FREQUENCY = 1000
config = Config()
```
## <font color='#ed7d31'>Training</font>
```python
import gym
from openai_monitor import Monitor
from IPython import display
import matplotlib
import matplotlib.pyplot as plt
%matplotlib inline
start=timer()
log_dir = "/tmp/gym/"
try:
os.makedirs(log_dir)
except OSError:
files = glob.glob(os.path.join(log_dir, '*.monitor.csv')) \
+ glob.glob(os.path.join(log_dir, '*td.csv')) \
+ glob.glob(os.path.join(log_dir, '*action_log.csv'))
for f in files:
os.remove(f)
env_id = 'CartPole-v0'
env = gym.make(env_id)
env = Monitor(env, os.path.join(log_dir, env_id))
model = Model(env=env, config=config, log_dir=log_dir)
episode_reward = 0
observation = env.reset()
for sample_idx in range(1, config.MAX_SAMPLES + 1):
epsilon = config.epsilon_by_sample(sample_idx)
action = model.get_action(observation, epsilon)
# Log action selection
model.save_action(action, sample_idx)
prev_observation=observation
observation, reward, done, _ = env.step(action)
observation = None if done else observation
model.update(prev_observation, action, reward, observation, sample_idx)
episode_reward += reward
if done:
observation = env.reset()
model.save_reward(episode_reward)
episode_reward = 0
if sample_idx % 1000 == 0:
try:
clear_output(True)
plot_all_data(log_dir, env_id, 'DQN', config.MAX_SAMPLES, bin_size=(10, 100, 100, 1), smooth=1, time=timedelta(seconds=int(timer()-start)), ipynb=True)
except IOError:
pass
model.save_w()
env.close()
```
By observing the plots, does the learning appear to be stable?
If your answer is *yes*, then start a second run, and a third, with the same hyperparameters. ;-)
You have just faced reproducibility concerns, which is quite a serious problem in deep RL and which can be dealt with by e.g. running your experiments on a sufficient number of seeds (~ 6-8 min.)
## <font color='#ed7d31'>Visualize the agent</font>
```python
from gym.wrappers import Monitor
# Loading the agent
fname_model = "../saved_agents/model.dump"
fname_optim = "../saved_agents/optim.dump"
log_dir = "/tmp/gym/"
model = Model(env=env, config=config, log_dir=log_dir)
if os.path.isfile(fname_model):
model.model.load_state_dict(torch.load(fname_model))
model.target_model.load_state_dict(model.model.state_dict())
if os.path.isfile(fname_optim):
model.optimizer.load_state_dict(torch.load(fname_optim))
env_id = 'CartPole-v0'
env = gym.make(env_id)
env = Monitor(env, './videos', force=True, video_callable=lambda episode: True)
for episode in range(3):
done = False
obs = env.reset()
while not done:
action = model.get_action(obs)
obs, _, done, _ = env.step(action)
env.close()
show_video()
```
You can experiment with modifying the hypermarameters (learning rate, batch size, experience replay size, etc.) to see if you can make its performance improve !
-------------
|
Formal statement is: lemma pderiv_prod: "pderiv (prod f (as)) = (\<Sum>a\<in>as. prod f (as - {a}) * pderiv (f a))" Informal statement is: The derivative of a product of functions is the sum of the products of the derivatives of the functions.
|
# Heatmap
#install.packages("dplyr")
library(dplyr)
#install.packages("RColorBrewer")
library(RColorBrewer)
#install.packages(genefilter)
library(genefilter)
#install.packages("gplots")
library(gplots) ##Available from CRAN
#install_github("ririzarr/rafalib")
library(rafalib)
# get the working directory
dir <- getwd()
# set the working directory
setwd(dir)
## Log transformed data
setwd("../../data/freqs_2/log")
### All data
combined <- as.data.frame(read.csv(file = "all.csv",header = TRUE))
combined <- combined %>% select(-sample)
targets <- combined %>% select(target)
targets_chr <- sapply(targets, unlist)
#set the heatmap colors
hmcol <- colorRampPalette(brewer.pal(9, "GnBu"))(100)
combined <- as.data.frame(read.csv(file = "all_tr.csv",header = TRUE,row.names = 1))
c <- as.matrix(combined)
rv_data <- rowVars(c)
idx_data <- order(-rv_data )[1:60]
cols <- palette(brewer.pal(11, "Paired"))[as.fumeric(targets_chr)]
cols <- palette(brewer.pal(11, "Paired"))[as.fumeric(targets_chr)]
hclust.ward <- function(c) hclust(c, method="ward.D2")
full <- heatmap.2(c[idx_data,], labCol=targets_chr,
trace="none", ColSideColors=cols,
col=hmcol, hclustfun=hclust.ward )
############## making a legend
# labels <- unique(unlist(targets_chr))
# colors_vect <- unique(unlist(cols))
# legend('top', labels, lty=c(1,1), lwd=c(2.5,2.5),col=colors_vect)
### filter GO terms > depth 10
combined <- as.data.frame(read.csv(file = "depth_10.csv",header = TRUE))
combined <- combined %>% select(-sample)
targets <- combined %>% select(target)
targets_chr <- sapply(targets, unlist)
combined <- as.data.frame(read.csv(file = "depth_10_tr.csv",header = TRUE,row.names = 1))
c <- as.matrix(combined)
rv_data <- rowVars(c)
idx_data <- order(-rv_data )[1:25] #also looks ok with top 30 GO terms
cols <- palette(brewer.pal(11, "Paired"))[as.fumeric(targets_chr)]
cols <- palette(brewer.pal(11, "Paired"))[as.fumeric(targets_chr)]
hclust.ward <- function(c) hclust(c, method="ward.D2")
mypar(1,1)
full <- heatmap.2(c[idx_data,], labCol=targets_chr,
trace="none", ColSideColors=cols,
col=hmcol, hclustfun=hclust.ward )
############## making a legend
# labels <- unique(unlist(targets_chr))
# colors_vect <- unique(unlist(cols))
# legend('top', labels, lty=c(1,1), lwd=c(2.5,2.5),col=colors_vect)
######################################################
#Feature selection for specific GO hierarchy subclasses
#using the log_transformed data
## Log transformed data
#setwd("../log")
# ## alpha-amino acid biosynthetic process
# combined <- as.data.frame(read.csv(file = "alpha-amino_acid_biosynthetic_process.csv",header = TRUE))
# combined <- combined %>% select(-sample)
# targets <- combined %>% select(target)
# targets_chr <- sapply(targets, unlist)
#
# combined <- as.data.frame(read.csv(file = "alpha-amino_acid_biosynthetic_process_tr.csv",header = TRUE,row.names = 1))
# c <- as.matrix(combined)
# rv_data <- rowVars(c)
# idx_data <- order(-rv_data )[1:21]
# cols <- palette(brewer.pal(11, "Paired"))[as.fumeric(targets_chr)]
# cols <- palette(brewer.pal(11, "Paired"))[as.fumeric(targets_chr)]
# hclust.ward <- function(c) hclust(c, method="ward.D2")
# mypar(1,1)
# full <- heatmap.2(c[idx_data,], labCol=targets_chr,
# trace="none", ColSideColors=cols,
# col=hmcol, hclustfun=hclust.ward )
#
#
#
# ## ATPase activity
# combined <- as.data.frame(read.csv(file = "ATPase_activity.csv",header = TRUE))
# combined <- combined %>% select(-sample)
# targets <- combined %>% select(target)
# targets_chr <- sapply(targets, unlist)
#
# combined <- as.data.frame(read.csv(file = "ATPase_activity_tr.csv",header = TRUE,row.names = 1))
# c <- as.matrix(combined)
# rv_data <- rowVars(c)
# idx_data <- order(-rv_data )[1:19]
# cols <- palette(brewer.pal(11, "Paired"))[as.fumeric(targets_chr)]
# cols <- palette(brewer.pal(11, "Paired"))[as.fumeric(targets_chr)]
# hclust.ward <- function(c) hclust(c, method="ward.D2")
# mypar(1,1)
# full <- heatmap.2(c[idx_data,], labCol=targets_chr,
# trace="none", ColSideColors=cols,
# col=hmcol, hclustfun=hclust.ward )
#
#
# ## nucleoside phosphate metabolic process
# combined <- as.data.frame(read.csv(file = "nucleoside_phosphate_metabolic_process.csv",header = TRUE))
# combined <- combined %>% select(-sample)
# targets <- combined %>% select(target)
# targets_chr <- sapply(targets, unlist)
#
# combined <- as.data.frame(read.csv(file = "nucleoside_phosphate_metabolic_process_tr.csv",header = TRUE,row.names = 1))
# c <- as.matrix(combined)
# rv_data <- rowVars(c)
# idx_data <- order(-rv_data )[1:40]
# cols <- palette(brewer.pal(11, "Paired"))[as.fumeric(targets_chr)]
# cols <- palette(brewer.pal(11, "Paired"))[as.fumeric(targets_chr)]
# hclust.ward <- function(c) hclust(c, method="ward.D2")
# mypar(1,1)
# full <- heatmap.2(c[idx_data,], labCol=targets_chr,
# trace="none", ColSideColors=cols,
# col=hmcol, hclustfun=hclust.ward )
#
# ## tRNA metabolic process
# combined <- as.data.frame(read.csv(file = "tRNA_metabolic_process.csv",header = TRUE))
# combined <- combined %>% select(-sample)
# targets <- combined %>% select(target)
# targets_chr <- sapply(targets, unlist)
#
# combined <- as.data.frame(read.csv(file = "tRNA_metabolic_process_tr.csv",header = TRUE,row.names = 1))
# c <- as.matrix(combined)
# rv_data <- rowVars(c)
# idx_data <- order(-rv_data )[1:31]
# cols <- palette(brewer.pal(11, "Paired"))[as.fumeric(targets_chr)]
# cols <- palette(brewer.pal(11, "Paired"))[as.fumeric(targets_chr)]
# hclust.ward <- function(c) hclust(c, method="ward.D2")
# mypar(1,1)
# full <- heatmap.2(c[idx_data,], labCol=targets_chr,
# trace="none", ColSideColors=cols,
# col=hmcol, hclustfun=hclust.ward )
#
#########
### translation_assoc
#Having searched GO for many translation associated classes,
# and having retrieved their subclasses and used all of them
#to feature select down we get 99 GO classes as features.
combined <- as.data.frame(read.csv(file = "translation_assoc.csv",header = TRUE))
combined <- combined %>% select(-sample)
targets <- combined %>% select(target)
targets_chr <- sapply(targets, unlist)
combined <- as.data.frame(read.csv(file = "translation_assoc_tr.csv",header = TRUE,row.names = 1))
c <- as.matrix(combined)
rv_data <- rowVars(c)
idx_data <- order(-rv_data )[1:95]
cols <- palette(brewer.pal(11, "Paired"))[as.fumeric(targets_chr)]
cols <- palette(brewer.pal(11, "Paired"))[as.fumeric(targets_chr)]
hclust.ward <- function(c) hclust(c, method="ward.D2")
mypar(1,1)
full <- heatmap.2(c[idx_data,], labCol=targets_chr,
trace="none", ColSideColors=cols,
col=hmcol, hclustfun=hclust.ward )
# To get the sample labels
#carpet <- full$carpet
#li <- rownames(carpet)
#lis <- sapply(li, unlist)
#View(li)
### again with column labels as the sample EBI numbers
# combined <- as.data.frame(read.csv(file = "translation_assoc.csv",header = TRUE))
# combined <- combined %>% select(-target)
# targets <- combined %>% select(samples)
# targets_chr <- sapply(targets, unlist)
#
# combined <- as.data.frame(read.csv(file = "translation_assoc_tr_2.csv",header = TRUE,row.names = 1))
# c <- as.matrix(combined)
# rv_data <- rowVars(c)
# idx_data <- order(-rv_data )[1:99]
# cols <- palette(brewer.pal(8, "Dark2"))[as.fumeric(targets_chr)]
# hclust.ward <- function(c) hclust(c, method="ward.D2")
# mypar(1,1)
# full <- heatmap.2(c[idx_data,], labCol=targets_chr,
# trace="none", ColSideColors=cols,
# col=hmcol, hclustfun=hclust.ward,
# colCol=cols)
#
# carpet <- full$carpet
# li <- rownames(carpet)
# lis <- sapply(li, unlist)
# View(li)
|
[STATEMENT]
lemma upper_3_tendsto: "upper_3 x \<longlonglongrightarrow> real_of_3 x"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. upper_3 x \<longlonglongrightarrow> real_of_3 x
[PROOF STEP]
proof(rule dist_tendsto_0_imp_tendsto, rule sandwitch_real)
[PROOF STATE]
proof (state)
goal (4 subgoals):
1. ?l2 \<longlonglongrightarrow> 0
2. ?r2 \<longlonglongrightarrow> 0
3. \<And>i. ?l2 i \<le> \<bar>upper_3 x i - real_of_3 x\<bar>
4. \<And>i. \<bar>upper_3 x i - real_of_3 x\<bar> \<le> ?r2 i
[PROOF STEP]
fix i
[PROOF STATE]
proof (state)
goal (4 subgoals):
1. ?l2 \<longlonglongrightarrow> 0
2. ?r2 \<longlonglongrightarrow> 0
3. \<And>i. ?l2 i \<le> \<bar>upper_3 x i - real_of_3 x\<bar>
4. \<And>i. \<bar>upper_3 x i - real_of_3 x\<bar> \<le> ?r2 i
[PROOF STEP]
obtain l r where lr: "get_itvl_3 ((tighten_bounds_3 ^^ i) x) = Interval l r"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. (\<And>l r. get_itvl_3 ((tighten_bounds_3 ^^ i) x) = Interval l r \<Longrightarrow> thesis) \<Longrightarrow> thesis
[PROOF STEP]
by (metis interval.collapse)
[PROOF STATE]
proof (state)
this:
get_itvl_3 ((tighten_bounds_3 ^^ i) x) = Interval l r
goal (4 subgoals):
1. ?l2 \<longlonglongrightarrow> 0
2. ?r2 \<longlonglongrightarrow> 0
3. \<And>i. ?l2 i \<le> \<bar>upper_3 x i - real_of_3 x\<bar>
4. \<And>i. \<bar>upper_3 x i - real_of_3 x\<bar> \<le> ?r2 i
[PROOF STEP]
with get_itvl_3[of "(tighten_bounds_3 ^^ i) x"]
[PROOF STATE]
proof (chain)
picking this:
real_of_3 ((tighten_bounds_3 ^^ i) x) \<in>\<^sub>i get_itvl_3 ((tighten_bounds_3 ^^ i) x)
get_itvl_3 ((tighten_bounds_3 ^^ i) x) = Interval l r
[PROOF STEP]
show "\<bar>(upper_3 x) i - real_of_3 x\<bar> \<le> (upper_3 x i - lower_3 x i)"
[PROOF STATE]
proof (prove)
using this:
real_of_3 ((tighten_bounds_3 ^^ i) x) \<in>\<^sub>i get_itvl_3 ((tighten_bounds_3 ^^ i) x)
get_itvl_3 ((tighten_bounds_3 ^^ i) x) = Interval l r
goal (1 subgoal):
1. \<bar>upper_3 x i - real_of_3 x\<bar> \<le> upper_3 x i - lower_3 x i
[PROOF STEP]
unfolding upper_3_def lower_3_def
[PROOF STATE]
proof (prove)
using this:
real_of_3 ((tighten_bounds_3 ^^ i) x) \<in>\<^sub>i get_itvl_3 ((tighten_bounds_3 ^^ i) x)
get_itvl_3 ((tighten_bounds_3 ^^ i) x) = Interval l r
goal (1 subgoal):
1. \<bar>interval.upper (get_itvl_3 ((tighten_bounds_3 ^^ i) x)) - real_of_3 x\<bar> \<le> interval.upper (get_itvl_3 ((tighten_bounds_3 ^^ i) x)) - interval.lower (get_itvl_3 ((tighten_bounds_3 ^^ i) x))
[PROOF STEP]
by auto
[PROOF STATE]
proof (state)
this:
\<bar>upper_3 x i - real_of_3 x\<bar> \<le> upper_3 x i - lower_3 x i
goal (3 subgoals):
1. ?l2 \<longlonglongrightarrow> 0
2. (\<lambda>i. upper_3 x i - lower_3 x i) \<longlonglongrightarrow> 0
3. \<And>i. ?l2 i \<le> \<bar>upper_3 x i - real_of_3 x\<bar>
[PROOF STEP]
qed (insert interval_size_3_tendsto_0, auto)
|
function [robfitstatus] = canlab_glm_group_levels_run1input(wd, c)
% child process of canlab_glm_group_levels
% (see canlab_glm_README.txt for an overview)
%
% ..
% Copyright (C) 2013 Luka Ruzic
% ..
load(fullfile(wd,sprintf('env_%04d',c)));
%% PREP
% diaryname = fullfile(wd,sprintf('diary_%04d.log',c));
cd(grpmodeldir)
robfitstatus = 0; %#ok
% grfstatus = 0;
%% robfit
cmd = 'robfit(EXPT, includedcons(c), 0, EXPT.mask)';
% diary(diaryname), fprintf('> %s\n',cmd); diary off
fprintf('> %s\n',cmd);
try
eval(cmd)
robfitstatus = 1;
catch exc
if OPTS.nocatch, cd(STARTINGDIR); rethrow(exc);
else fprintf('> %s\n',getReport(exc,'extended')); end
% else diary(diaryname), fprintf(getReport(exc,'extended')), diary off; end
robfitstatus = -1;
end
%% grf % having problems with this running in parallel (display problems?)
% if OPTS.run_grf
% robdir = filenames(fullfile(grpmodeldir,sprintf('robust%04d',includedcons(c))),'char','absolute');
% if ~isempty(robdir)
% announce_string('SIGNIFICANT CLUSTER SIZE ESTIMATION using GRF')
% try
% load(fullfile(robdir,'SETUP.mat'));
%
% cmd = 'sigclext = estimate_cluster_extent(.05, pthresh, SETUP.files);';
% fprintf('> %s\n',cmd)
% eval(cmd);
% close all
%
% fout = fullfile(robdir,'significant_cluster_extents_grf.txt');
% dlmwrite(fout,[pthresh' sigclext(:,1)],'precision','%g','delimiter',' '); %#ok
%
% grfstatus = 1;
% catch exc
% if OPTS.nocatch, cd(STARTINGDIR); rethrow(exc)
% else fprintf('> %s\n',getReport(exc,'extended')); end
% grfstatus = -1;
% end
% end
% end
end
function announce_string(string)
s = sprintf('-- %s --',string);
l = regexprep(s,'.','-');
fprintf('> \n> \n> \n> %s\n> %s\n> %s\n> \n',l,s,l);
end
|
####
###UMR_Trib Data. Si Woodstoich Working Group
#5.14.19
#Playing around with new Trib data
library(plyr)
library(dplyr)
setwd("~/Woodstoich/RawData_5.14.19")
#importing data
Data<-read.table(file="UMR_Tribs_jcc_5.14.19.csv", header=TRUE, sep=',')
names(Data)
#Helper variables from Paul
N.mw=14.0067
P.mw=30.973762
C.mw=12.0107
Si.mw=28.0855
#Adding new colums with data in moles so to calc molar ratios
Data$uMSi<-(Data$SI/Si.mw*1000)
Data$uMNOX<-(Data$NOX/N.mw*1000)
Data$uMNHX<-(Data$NHX/N.mw*1000)
Data$uMSRP<-(Data$SRP/P.mw*1000)
Data$uMDIN<-(Data$uMNOX+Data$uMNHX)
Data$DINSi<-(Data$uMDIN/Data$uMSi)
Data$PSi<-(Data$uMSRP/Data$uMSi)
range(Data$uMSi, na.rm=T)
#subsetting to remove negatives
Data<-subset(Data, uMSi>0 & uMNOX>0 & uMSRP>0 & uMNHX>0)
#Fixing dates and adding year as a column
Data$DATE2=as.Date(Data$DATE, c("%m/%d/%Y"))
Data$YEAR=as.numeric(format(Data$DATE2,c("%Y")))
#making each site name into a unique number
unique(Data$LOCATCD)
Data$SiteNum<-as.numeric(as.factor(Data$LOCATCD))
head(Data$SiteNum)
unique(Data$SiteNum)
#Calc min and max dates for each site
SiteDates<-ddply(Data, c("LOCATCD"), summarise, min.date=min(YEAR), max.date=max(YEAR))
SiteDates$YrsData<-with(SiteDates, as.numeric(max.date-min.date))
LongTermSites<-subset(SiteDates, SiteDates$YrsData>20)
#Now merge these sites with >20 yrs data with larger dataset
Data3<-merge(LongTermSites, Data, by=c("LOCATCD"))
head(Data3)
#calc avg vaues by year at each site
AvgByYr2<-aggregate(cbind(Data3$uMSi, Data3$uMNHX, Data3$uMSRP, Data3$uMNOX, Data3$uMDIN,
Data3$DINSi, Data3$PSi) ~ Data3$LOCATCD + Data3$YEAR, FUN=mean)
names(AvgByYr2)
colnames(AvgByYr2) <- c("LOCATCD", "Year", "AvguMSi", "AvgNH4", "AvgSRP", "AvgNOX",
"AvgDIN", "AvgDIN_SiRatio", "AvgP_SiRatio")
Data4<-AvgByYr2[order(AvgByYr2$LOCATCD),]
range(Data4$Year)
range(Data4$AvgDIN_SiRatio)
##==============================================
##plotting avg annual values over time for each site with >20yrs data
##exported the results of simple linear regression
##=============================================
library(broom)
#Average DIN:DSi molar ratios over time
df=NULL
par(mfrow=c(4,7), mar=c(2,2.8,2,1.3))
for(i in unique(Data4$LOCATCD)){
plot(Data4[Data4$LOCATCD==i, "Year"], Data4[Data4$LOCATCD==i, "AvgDIN_SiRatio"],
xlim=c(1990, 2020), ylim=c(0,16), main=paste("Site", i), ylab="DIN:DSi",
mgp=c(1.8,.2,0), las=1, xlab="", xaxt="n", yaxt="n")
axis(side=1, at = c(1990, 2000, 2010, 2020), tcl=-.4, cex.axis=.8, mgp=c(0,.3,0), font.axis=1)
axis(side=2, at = c(0,5,10,15), tcl=-.3, cex.axis=.8, mgp=c(0, .5,0), font.axis=1, las=1)
abline(lm(Data4[Data4$LOCATCD==i, "AvgDIN_SiRatio"]~ Data4[Data4$LOCATCD==i, "Year"]))
df=rbind(df,data.frame(glance(lm(Data4[Data4$LOCATCD==i, "AvgDIN_SiRatio"]~ Data4[Data4$LOCATCD==i, "Year"]))))
}
dev.off()
write.csv(df, file="R2_DIN_SiRatiosvsTime_5.15.19.csv", row.names=TRUE, na = "NA")
#Average DIP:Si molar ratios over time
df=NULL
par(mfrow=c(4,7), mar=c(2,2.8,2,1.3))
for(i in unique(Data4$LOCATCD)){
plot(Data4[Data4$LOCATCD==i, "Year"], Data4[Data4$LOCATCD==i, "AvgP_SiRatio"],
xlim=c(1990, 2020), ylim=c(0,.1), main=paste("Site", i), ylab="SRP:DSi",
mgp=c(1.8,.2,0), las=1, xlab="", xaxt="n", yaxt="n")
axis(side=1, at = c(1990, 2000, 2010, 2020), tcl=-.4, cex.axis=.8, mgp=c(0,.3,0), font.axis=1)
axis(side=2, at = c(0,0.05, 0.1), tcl=-.3, cex.axis=.8, mgp=c(0, .5,0), font.axis=1, las=1)
abline(lm(Data4[Data4$LOCATCD==i, "AvgP_SiRatio"]~ Data4[Data4$LOCATCD==i, "Year"]))
df=rbind(df,data.frame(glance(lm(Data4[Data4$LOCATCD==i, "AvgP_SiRatio"]~ Data4[Data4$LOCATCD==i, "Year"]))))
}
dev.off()
write.csv(df, file="R2_P_SiRatiosvsTime_5.15.19.csv", row.names=TRUE, na = "NA")
#Average DSi values over time
df=NULL
par(mfrow=c(4,7), mar=c(2,2.5,2,1.3))
for(i in unique(Data4$LOCATCD)){
plot(Data4[Data4$LOCATCD==i, "Year"], Data4[Data4$LOCATCD==i, "AvguMSi"],
xlim=c(1990, 2020), ylim=range(Data4$AvguMSi), main=paste("Site", i), ylab="DSi uM",
mgp=c(1.6,.2,0), las=1, xlab="", xaxt="n", yaxt="n")
axis(side=1, at = c(1990, 2000, 2010, 2020), tcl=-.4, cex.axis=.8, mgp=c(0,.3,0), font.axis=1)
axis(side=2, at = c(0,50,100,150,200, 250), tcl=-.3, cex.axis=.8, mgp=c(0, .5,0), font.axis=1, las=1)
abline(lm(Data4[Data4$LOCATCD==i, "AvguMSi"]~ Data4[Data4$LOCATCD==i, "Year"]))
df=rbind(df,data.frame(glance(lm(Data4[Data4$LOCATCD==i, "AvguMSi"]~ Data4[Data4$LOCATCD==i, "Year"]))))
}
dev.off()
write.csv(df, file="R2_DSivsTime_5.15.19.csv", row.names=TRUE, na = "NA")
#glace in the broom package give R2 (and apparently pvalues). tidy gives p values and slopes
#Average DIN values over time
df=NULL
par(mfrow=c(4,7), mar=c(2,2.5,2,1.3))
for(i in unique(Data4$LOCATCD)){
plot(Data4[Data4$LOCATCD==i, "Year"], Data4[Data4$LOCATCD==i, "AvgDIN"],
xlim=c(1990, 2020), ylim=range(Data4$AvgDIN), main=paste("Site", i), ylab="DIN uM",
mgp=c(1.6,.2,0), las=1, xlab="", xaxt="n", yaxt="n")
axis(side=1, at = c(1990, 2000, 2010, 2020), tcl=-.4, cex.axis=.8, mgp=c(0,.3,0), font.axis=1)
axis(side=2, at = c(0,500,1000), tcl=-.3, cex.axis=.8, mgp=c(0, .5,0), font.axis=1, las=1)
abline(lm(Data4[Data4$LOCATCD==i, "AvgDIN"]~ Data4[Data4$LOCATCD==i, "Year"]))
df=rbind(df,data.frame(glance(lm(Data4[Data4$LOCATCD==i, "AvgDIN"]~ Data4[Data4$LOCATCD==i, "Year"]))))
}
dev.off()
write.csv(df, file="R2_DINvsTime_5.15.19.csv", row.names=TRUE, na = "NA")
#Average DIP values over time
df=NULL
par(mfrow=c(4,7), mar=c(2,2.5,2,1.3))
for(i in unique(Data4$LOCATCD)){
plot(Data4[Data4$LOCATCD==i, "Year"], Data4[Data4$LOCATCD==i, "AvgSRP"],
xlim=c(1990, 2020), ylim=range(Data4$AvgSRP), main=paste("Site", i), ylab="SRP uM",
mgp=c(1.6,.2,0), las=1, xlab="", xaxt="n", yaxt="n")
axis(side=1, at = c(1990, 2000, 2010, 2020), tcl=-.4, cex.axis=.8, mgp=c(0,.3,0), font.axis=1)
axis(side=2, at = c(0,10,20,30), tcl=-.3, cex.axis=.8, mgp=c(0, .5,0), font.axis=1, las=1)
abline(lm(Data4[Data4$LOCATCD==i, "AvgSRP"]~ Data4[Data4$LOCATCD==i, "Year"]))
df=rbind(df,data.frame(glance(lm(Data4[Data4$LOCATCD==i, "AvgSRP"]~ Data4[Data4$LOCATCD==i, "Year"]))))
}
dev.off()
write.csv(df, file="R2_SRPvsTime_5.15.19.csv", row.names=TRUE, na = "NA")
##=======================================================
##below is me f-ing around...ignore. didn't use
##=======================================================
#couldn't figure this out - to define the path inside the loop:
mypath<-file.path(~"Woodstoich","RawData_5.14.19", "img")
jpeg(file=mypath)
#if use below, i have to change site num to locatcd bc got rid of misc numbers:
#testing out the loop here - works! just going into wrong folder and these are each plot individually, not multi-panel:
#so putting in diff folder:
setwd("~/Woodstoich/RawData_5.14.19/img")
for(i in unique(Data4$SiteNumber)){
jpeg(paste("plot_", i, ".jpeg", sep=""))
plot(Data4[Data4$SiteNumber==i, "Year"], Data4[Data4$SiteNumber==i, "AvguMSi"],
xlim=range(Data4$Year), ylim=range(Data4$AvguMSi), main=paste("Site", i), ylab="DSi uM", xlab="")
dev.off()
}
#couldn't figure out matrix so using mfrow instead
layout(matrix(1:3, ncol=3))
layout(matrix(1:28,7,4,byrow=F))
#Test code for saving plot to folder
jpeg(file = "~/Woodstoich/RawData_5.14.19/img/plot2.jpeg")
plot(Data4$Year, Data4$AvgP_SiRatio)
dev.off()
#lets add trend line to plot
#practice plot not in loop:
plot(Data4$Year, Data4$AvguMSi, ylim=c(5,250), xlim=c(1990,2020), ylab="DSi uM", xlab="", las=1)
title("DSi uM concentrations over time, LOCATCD==WPO2.6M ", line=2, cex.main=.8)
abline(lm(Data4$AvguMSi ~ Data4$Year))
fit1<-lm(Data4$AvguMSi ~ Data4$Year)
fit2<-lm(Data4$AvguMSi ~ Data4$Year)
#this loop works - just need to save the files to a folder
for(i in unique(Data4$SiteNumber)){
plot(Data4[Data4$SiteNumber==i, "Year"], Data4[Data4$SiteNumber==i, "AvguMSi"],
xlim=range(Data4$Year), ylim=range(Data4$AvguMSi), main=paste("Site", i), ylab="DSi uM", xlab="")
}
dev.off()
#this works but ignore
plot(Data4$Year[which(Data4$SiteNumber==1)], Data4$AvguMSi [which(Data4$SiteNumber==1)], ylim=c(5,250), xlim=c(1990,2020), ylab="DSi uM", xlab="", las=1)
title("DSi uM concentrations over time, Site ", line=2, cex.main=.8)
##not using but kept for maybe later
#calc avg vaues by year at each site
AvgByYr<-aggregate(Data$uMSi~ Data$SiteNum + Data$YEAR, FUN=mean)
colnames(AvgByYr) <- c("SiteNumber", "Year", "AvguMSi")
Data3<-AvgByYr[order(AvgByYr$SiteNumber),]
#these are 'or' not 'and' statements
Data5<-subset(Data,subset=uMSi>0 | uMNOX>0 | uMNHX>0 | uMSRP>0)
Data9<-Data[ !is.na(Data$uMSi | Data$uMNOX| Data$uMSRP | Data$NHX) & (Data$uMSi>0 | Data$uMNOX>0 |Data$uMNHX>0 | Data$uMSRP>0) ,]
#Does same thing as above but uses the 'which', which some pple don't like
Data8<-Data[ which(Data$uMSi>0 | Data$uMNOX>0 |Data$uMNHX>0 | Data$uMSRP>0) ,]
|
> module Fun.Predicates
> %default total
> %access public export
> %auto_implicits off
> ||| Extensional equality
> ExtEq : {A, B : Type} -> (f : A -> B) -> (g : A -> B) -> Type
> ExtEq {A} {B} f g = (a : A) -> f a = g a
|
import os
from pytest import fixture
import tarfile
from zipfile import ZipFile
from spacekit.analyzer.explore import HstCalPlots, HstSvmPlots
from spacekit.analyzer.scan import SvmScanner, CalScanner, import_dataset
from spacekit.extractor.load import load_datasets
# try:
# from pytest_astropy_header.display import (PYTEST_HEADER_MODULES,
# TESTED_VERSIONS)
# except ImportError:
# PYTEST_HEADER_MODULES = {}
# TESTED_VERSIONS = {}
PYTEST_HEADER_MODULES = {}
TESTED_VERSIONS = {}
try:
from spacekit import __version__ as version
except ImportError:
version = "unknown"
# The following line treats all DeprecationWarnings as exceptions.
from astropy.tests.helper import enable_deprecations_as_exceptions
enable_deprecations_as_exceptions()
# Uncomment and customize the following lines to add/remove entries
# from the list of packages for which version numbers are displayed
# when running the tests.
# PYTEST_HEADER_MODULES['astropy'] = 'astropy'
# PYTEST_HEADER_MODULES.pop('Matplotlib')
# PYTEST_HEADER_MODULES.pop('Pandas')
# PYTEST_HEADER_MODULES.pop('h5py')
TESTED_VERSIONS["spacekit"] = version
class Config:
def __init__(self, env):
SUPPORTED_ENVS = ["svm", "cal"]
self.env = env
if env.lower() not in SUPPORTED_ENVS:
raise Exception(
f"{env} is not a supported environment (supported envs: {SUPPORTED_ENVS})"
)
self.data_path = {
"svm": os.path.join(f"tests/data/{env}/data.zip"),
"cal": os.path.join(f"tests/data/{env}/data.zip"),
}[env]
self.kwargs = {"svm": dict(index_col="index"), "cal": dict(index_col="ipst")}[
env
]
self.decoder = {
"svm": {"det": {0: "hrc", 1: "ir", 2: "sbc", 3: "uvis", 4: "wfc"}},
"cal": {"instr": {0: "acs", 1: "cos", 2: "stis", 3: "wfc3"}},
}[env]
self.labeled = {
"svm": "tests/data/svm/train/training.csv",
"cal": "tests/data/cal/train/training.csv",
}[env]
self.unlabeled = {
"svm": "tests/data/svm/predict/unlabeled.csv",
"cal": "tests/data/cal/predict/unlabeled.csv",
}[env]
self.norm_cols = {
"svm": [
"numexp",
"rms_ra",
"rms_dec",
"nmatches",
"point",
"segment",
"gaia",
],
"cal": ["n_files", "total_mb"],
}[env]
self.rename_cols = {"svm": "_scl", "cal": ["x_files", "x_size"]}[env]
self.enc_cols = {
"svm": ["det", "wcs", "cat"],
"cal": [
"drizcorr",
"pctecorr",
"crsplit",
"subarray",
"detector",
"dtype",
"instr",
],
}[env]
self.tx_file = {
"svm": "tests/data/svm/tx_data.json",
"cal": "tests/data/cal/tx_data.json",
}[env]
def pytest_addoption(parser):
parser.addoption("--env", action="store", help="Environment to run tests against")
@fixture(scope="session")
def env(request):
return request.config.getoption("--env")
@fixture(scope="session")
def cfg(env):
cfg = Config(env)
return cfg
@fixture(scope="session")
def res_data_path(cfg, tmp_path_factory):
basepath = tmp_path_factory.getbasetemp()
data_file = cfg.data_path
with ZipFile(data_file, "r") as z:
z.extractall(basepath)
dname = os.path.basename(data_file.split(".")[0])
data_path = os.path.join(basepath, dname)
return data_path
@fixture(scope="session")
def df_ncols(cfg):
fname = cfg.labeled
X_cols = cfg.norm_cols + cfg.enc_cols
df = load_datasets([fname], index_col=cfg.kwargs["index_col"], column_order=X_cols)
ncols = [i for i, c in enumerate(df.columns) if c in cfg.norm_cols]
return (df, ncols)
@fixture(scope="session")
def scanner(cfg, res_data_path):
if cfg.env == "svm":
scanner = SvmScanner(perimeter=f"{res_data_path}/20??-*-*-*", primary=-1)
elif cfg.env == "cal":
scanner = CalScanner(perimeter=f"{res_data_path}/20??-*-*-*", primary=-1)
scanner.exp = cfg.env
return scanner
@fixture(scope="session")
def explorer(cfg, res_data_path):
fname = res_data_path
df = import_dataset(filename=fname, kwargs=cfg.kwargs, decoder=cfg.decoder)
if cfg.env == "svm":
hst = HstSvmPlots(df)
elif cfg.env == "cal":
hst = HstCalPlots(df)
hst.env = cfg.env
return hst
# SVM PREP
@fixture(scope="session") # "ibl738.tgz"
def single_visit_path(tmp_path_factory):
visit_path = os.path.abspath("tests/data/svm/prep/singlevisits.tgz")
basepath = tmp_path_factory.getbasetemp()
with tarfile.TarFile.open(visit_path) as tar:
tar.extractall(basepath)
dname = os.path.basename(visit_path.split(".")[0])
visit_path = os.path.join(basepath, dname)
return visit_path
@fixture(scope="function")
def img_outpath(tmp_path):
return os.path.join(tmp_path, "img")
# SVM PREDICT
@fixture(scope="function")
def svm_unlabeled_dataset():
return "tests/data/svm/predict/unlabeled.csv"
@fixture(scope="session", params=["img.tgz", "img_pred.npz"])
def svm_pred_img(request, tmp_path_factory):
img_path = os.path.join("tests/data/svm/predict", request.param)
if img_path.split(".")[-1] == "tgz":
basepath = tmp_path_factory.getbasetemp()
with tarfile.TarFile.open(img_path) as tar:
tar.extractall(basepath)
fname = os.path.basename(img_path.split(".")[0])
img_path = os.path.join(basepath, fname)
return img_path
# SVM TRAIN
@fixture(scope="function") # session
def svm_labeled_dataset():
return "tests/data/svm/train/training.csv"
@fixture(scope="session", params=["img.tgz", "img_data.npz"])
def svm_train_img(request, tmp_path_factory):
img_path = os.path.join("tests/data/svm/train", request.param)
if img_path.split(".")[-1] == "tgz":
basepath = tmp_path_factory.getbasetemp()
with tarfile.TarFile.open(img_path) as tar:
tar.extractall(basepath)
fname = os.path.basename(img_path.split(".")[0])
img_path = os.path.join(basepath, fname)
return img_path
@fixture(scope="function")
def svm_train_npz():
return "tests/data/svm/train/img_data.npz"
# GENERATOR: DRAW
@fixture(params=["single_reg.csv"])
def draw_mosaic_fname(request):
return os.path.join("tests/data/svm/prep", request.param)
@fixture(params=["*", "ibl*", ""])
def draw_mosaic_pattern(request):
return request.param
# PREPROCESSOR: SCRUB
@fixture(scope="function")
def raw_csv_file():
return "tests/data/svm/prep/single_scrub.csv"
@fixture(scope="function")
def h5_data():
return "tests/data/svm/prep/single_reg"
@fixture(scope="function")
def scrubbed_cols_file():
return "tests/data/svm/prep/scrubbed_cols.csv"
@fixture(scope="function")
def scraped_fits_file():
return "tests/data/svm/prep/scraped_fits.csv"
@fixture(scope="function")
def scraped_mast_file():
return "tests/data/svm/prep/scraped_mast.csv"
# CAL
@fixture(scope="function")
def cal_labeled_dataset():
return "tests/data/cal/train/training.csv"
|
inductive bool' : Type
| true' | false'
open bool'
def or' : bool' -> bool' -> bool'
| true' _ := true'
| _ true' := true'
| _ _ := false'
infixr `||` := or'
example : true' || false' = true' := rfl
example : true' || true' = true' := rfl
example : false' || false' = false' := rfl
example : false' || true' = true' := rfl
def and' : bool' -> bool' -> bool'
| false' _ := false'
| true' false' := false'
| _ _ := true'
infixr `&&` := and'
def not' : bool' → bool'
| true' := false'
| _ := true'
|
import numpy as np
from pygrape import run_grape, UnitarySetup
H = np.array([
[0, 0],
[0, 1],
])
Hc_x = np.array([
[0, 1],
[1, 0],
])
Hc_y = np.array([
[0, 1j],
[-1j, 0],
])
U_target = np.array([
[0, 1],
[1, 0],
])
setup = UnitarySetup(H, [Hc_x, Hc_y], U_target)
init_ctrls = 1e-3 * np.ones((2, 200))
result = run_grape(init_ctrls, setup, dt=.2)
import matplotlib.pyplot as plt
plt.plot(result.ts, result.controls[0], label='x')
plt.plot(result.ts, result.controls[1], label='y')
plt.legend()
# plt.savefig('../docs/_static/pi_pulse_xy_ctrls.png')
plt.show()
|
/*
* Copyright 2015-2019 Autoware Foundation. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <boost/thread.hpp>
#include <geometry_msgs/PoseStamped.h>
#include <geometry_msgs/Twist.h>
#include <geometry_msgs/TwistStamped.h>
#include <ros/ros.h>
using namespace std;
class TwistCmdConverter {
private:
boost::mutex m_mutex_;
// ROS
ros::NodeHandle nh_;
ros::Subscriber sub_twist_;
ros::Publisher pub_twist_;
int queue_size_;
string twist_in_, twist_out_;
public:
TwistCmdConverter()
: nh_("~"), queue_size_(100), twist_in_("/twist_in"),
twist_out_("/twist_out") {
// Subscribe to the cloud topic using both the old message format and the
// new
sub_twist_ = nh_.subscribe(twist_in_, queue_size_,
&TwistCmdConverter::cloud_cb_twist, this);
pub_twist_ = nh_.advertise<geometry_msgs::Twist>(twist_out_, queue_size_);
nh_.resolveName(twist_in_).c_str();
nh_.resolveName(twist_out_).c_str();
}
void cloud_cb_twist(const geometry_msgs::TwistStampedConstPtr &msg) {
if (pub_twist_.getNumSubscribers() <= 0) {
// ROS_DEBUG ("[point_cloud_converter] Got a PointCloud with %d points on
// %s, but no subscribers.", (int)msg->points.size (), nh_.resolveName
// (points_in_).c_str ());
return;
}
geometry_msgs::Twist output;
output = msg->twist;
pub_twist_.publish(output);
}
};
/* ---[ */
int main(int argc, char **argv) {
// ROS init
ros::init(argc, argv, "twist_cmd_converter",
ros::init_options::AnonymousName);
TwistCmdConverter p;
ros::spin();
return (0);
}
|
#define BOOST_BIND_NO_PLACEHOLDERS
#include "shift/service/service_host_impl.hpp"
#include "shift/service/basic_service.hpp"
#include "shift/service/detail/guid_generator.hpp"
#include <shift/network/network_host.hpp>
#include <shift/network/socket_base.hpp>
#include <shift/network/tcp_socket_client.hpp>
#include <shift/network/tcp_socket_listener.hpp>
#include <shift/network/udp_socket.hpp>
#include <shift/serialization/pair.hpp>
#include <shift/serialization/compact/inputarchive.hpp>
#include <shift/serialization/compact/outputarchive.hpp>
#include <shift/log/log.hpp>
#include <shift/platform/environment.hpp>
#include <shift/core/boost_disable_warnings.hpp>
#include <boost/iostreams/device/back_inserter.hpp>
#include <shift/core/boost_restore_warnings.hpp>
#include <iostream>
namespace shift::service
{
service_host::impl::impl(service_host& host)
: host(host), host_guid(detail::generate_guid())
{
}
service_host::impl::~impl()
{
auto& network_host = network::network_host::singleton_instance();
network_host.on_update_statistics = nullptr;
}
bool service_host::impl::start(
serialization::protocol_version_t new_protocol_version,
serialization::protocol_version_t new_required_version)
{
std::lock_guard lock(start_mutex);
if (running)
return false;
if (bind_addresses.empty())
{
bind_addresses.emplace_back(
boost::asio::ip::address_v4::from_string("0.0.0.0"));
bind_addresses.emplace_back(
boost::asio::ip::address_v6::from_string("0::0"));
}
protocol_version = new_protocol_version;
required_version = new_required_version;
using namespace std::placeholders;
for (const auto& bind_address : bind_addresses)
{
auto multicast_socket = std::make_shared<network::udp_socket>();
multicast_socket->on_receive =
std::bind(&service_host::impl::on_receive_multicast, this, _1, _2, _3);
if ((bind_address.is_v4() &&
multicast_socket->open(bind_address, multicast_port,
multicast_address4)) ||
(bind_address.is_v6() &&
multicast_socket->open(bind_address, multicast_port,
multicast_address6)))
{
multicast_sockets.emplace_back(std::move(multicast_socket));
}
else
{
for (auto& socket : multicast_sockets)
socket->close();
multicast_sockets.clear();
return false;
}
}
running = true;
return true;
}
void service_host::impl::publish()
{
std::lock_guard lock(service_provider_mutex);
auto count = static_cast<std::uint16_t>(service_providers.size());
BOOST_ASSERT(service_providers.size() <=
std::numeric_limits<decltype(count)>::max());
for (auto& multicast_socket : multicast_sockets)
{
auto bind_address = multicast_socket->local_endpoint().address();
std::vector<char> buffer;
{
serialization::compact_output_archive<> archive(1);
archive.push(boost::iostreams::back_inserter(buffer));
archive << host_guid << count;
for (const auto& provider : service_providers)
{
archive << provider->service_guid() << provider->service_uid()
<< provider->interface_uids() << provider->service_tag()
<< provider->service_port(bind_address);
}
}
if (bind_address.is_v4())
{
multicast_socket->post(
std::move(buffer),
boost::asio::ip::udp::endpoint(multicast_address4, multicast_port));
}
else
{
multicast_socket->post(
std::move(buffer),
boost::asio::ip::udp::endpoint(multicast_address6, multicast_port));
}
}
}
void service_host::impl::on_receive_multicast(
network::udp_socket& socket, boost::asio::ip::udp::endpoint sender,
std::vector<char> buffer)
{
remote_service_provider service_provider;
std::uint16_t remote_service_count;
serialization::compact_input_archive<> archive(1);
archive.push(boost::iostreams::array_source(buffer.data(), buffer.size()));
archive >> service_provider.host_guid >> remote_service_count;
/// ToDo: Catch exceptions.
//{
// if (service_host::singleton_instance().debug_multicasts)
// log::warning() << "Ignoring ill-formed multicast message.";
// return;
//}
if (service_provider.host_guid == host_guid)
return; // Drop own packets.
// service_provider.lastSeen = std::chrono::system_clock::now();
if (remote_service_count != 0u)
{
std::lock_guard service_client_lock(service_client_mutex);
std::lock_guard remote_service_lock(known_remote_services_mutex);
for (; remote_service_count > 0; --remote_service_count)
{
archive >> service_provider.service_guid >>
service_provider.service_uid >> service_provider.interface_uids >>
service_provider.service_tag >> service_provider.remote_port;
service_provider.local_address = socket.local_endpoint().address();
service_provider.remote_address = sender.address();
// Make sure each remote service is only added once.
if (known_remote_services.find(service_provider.service_guid) !=
known_remote_services.end())
{
// known_remote_service_iter->lastSeen =
// std::chrono::system_clock::now();
continue;
}
known_remote_services.insert(service_provider);
for (auto& service_client : service_clients)
{
if (service_provider.service_uid == service_client->service_uid())
{
service_client->add_service_provider(
service_provider.host_guid, service_provider.service_guid,
service_provider.interface_uids, service_provider.service_tag,
service_provider.local_address,
boost::asio::ip::tcp::endpoint(service_provider.remote_address,
service_provider.remote_port));
}
}
}
}
}
}
|
/-
Copyright (c) 2020 Anne Baanen. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Nathaniel Thomas, Jeremy Avigad, Johannes Hölzl, Mario Carneiro, Anne Baanen,
Frédéric Dupuis, Heather Macbeth
-/
import algebra.hom.group_action
import algebra.module.pi
import algebra.star.basic
import data.set.pointwise.smul
import algebra.ring.comp_typeclasses
/-!
# (Semi)linear maps
> THIS FILE IS SYNCHRONIZED WITH MATHLIB4.
> Any changes to this file require a corresponding PR to mathlib4.
In this file we define
* `linear_map σ M M₂`, `M →ₛₗ[σ] M₂` : a semilinear map between two `module`s. Here,
`σ` is a `ring_hom` from `R` to `R₂` and an `f : M →ₛₗ[σ] M₂` satisfies
`f (c • x) = (σ c) • (f x)`. We recover plain linear maps by choosing `σ` to be `ring_hom.id R`.
This is denoted by `M →ₗ[R] M₂`. We also add the notation `M →ₗ⋆[R] M₂` for star-linear maps.
* `is_linear_map R f` : predicate saying that `f : M → M₂` is a linear map. (Note that this
was not generalized to semilinear maps.)
We then provide `linear_map` with the following instances:
* `linear_map.add_comm_monoid` and `linear_map.add_comm_group`: the elementwise addition structures
corresponding to addition in the codomain
* `linear_map.distrib_mul_action` and `linear_map.module`: the elementwise scalar action structures
corresponding to applying the action in the codomain.
* `module.End.semiring` and `module.End.ring`: the (semi)ring of endomorphisms formed by taking the
additive structure above with composition as multiplication.
## Implementation notes
To ensure that composition works smoothly for semilinear maps, we use the typeclasses
`ring_hom_comp_triple`, `ring_hom_inv_pair` and `ring_hom_surjective` from
`algebra/ring/comp_typeclasses`.
## Notation
* Throughout the file, we denote regular linear maps by `fₗ`, `gₗ`, etc, and semilinear maps
by `f`, `g`, etc.
## TODO
* Parts of this file have not yet been generalized to semilinear maps (i.e. `compatible_smul`)
## Tags
linear map
-/
assert_not_exists submonoid
assert_not_exists finset
open function
universes u u' v w x y z
variables {R : Type*} {R₁ : Type*} {R₂ : Type*} {R₃ : Type*}
variables {k : Type*} {S : Type*} {S₃ : Type*} {T : Type*}
variables {M : Type*} {M₁ : Type*} {M₂ : Type*} {M₃ : Type*}
variables {N₁ : Type*} {N₂ : Type*} {N₃ : Type*} {ι : Type*}
/-- A map `f` between modules over a semiring is linear if it satisfies the two properties
`f (x + y) = f x + f y` and `f (c • x) = c • f x`. The predicate `is_linear_map R f` asserts this
property. A bundled version is available with `linear_map`, and should be favored over
`is_linear_map` most of the time. -/
structure is_linear_map (R : Type u) {M : Type v} {M₂ : Type w}
[semiring R] [add_comm_monoid M] [add_comm_monoid M₂] [module R M] [module R M₂]
(f : M → M₂) : Prop :=
(map_add : ∀ x y, f (x + y) = f x + f y)
(map_smul : ∀ (c : R) x, f (c • x) = c • f x)
section
set_option old_structure_cmd true
/-- A map `f` between an `R`-module and an `S`-module over a ring homomorphism `σ : R →+* S`
is semilinear if it satisfies the two properties `f (x + y) = f x + f y` and
`f (c • x) = (σ c) • f x`. Elements of `linear_map σ M M₂` (available under the notation
`M →ₛₗ[σ] M₂`) are bundled versions of such maps. For plain linear maps (i.e. for which
`σ = ring_hom.id R`), the notation `M →ₗ[R] M₂` is available. An unbundled version of plain linear
maps is available with the predicate `is_linear_map`, but it should be avoided most of the time. -/
structure linear_map {R : Type*} {S : Type*} [semiring R] [semiring S] (σ : R →+* S)
(M : Type*) (M₂ : Type*)
[add_comm_monoid M] [add_comm_monoid M₂] [module R M] [module S M₂]
extends add_hom M M₂ :=
(map_smul' : ∀ (r : R) (x : M), to_fun (r • x) = (σ r) • to_fun x)
/-- The `add_hom` underlying a `linear_map`. -/
add_decl_doc linear_map.to_add_hom
notation M ` →ₛₗ[`:25 σ:25 `] `:0 M₂:0 := linear_map σ M M₂
notation M ` →ₗ[`:25 R:25 `] `:0 M₂:0 := linear_map (ring_hom.id R) M M₂
notation M ` →ₗ⋆[`:25 R:25 `] `:0 M₂:0 := linear_map (star_ring_end R) M M₂
/-- `semilinear_map_class F σ M M₂` asserts `F` is a type of bundled `σ`-semilinear maps `M → M₂`.
See also `linear_map_class F R M M₂` for the case where `σ` is the identity map on `R`.
A map `f` between an `R`-module and an `S`-module over a ring homomorphism `σ : R →+* S`
is semilinear if it satisfies the two properties `f (x + y) = f x + f y` and
`f (c • x) = (σ c) • f x`. -/
class semilinear_map_class (F : Type*) {R S : out_param Type*} [semiring R] [semiring S]
(σ : out_param $ R →+* S) (M M₂ : out_param Type*)
[add_comm_monoid M] [add_comm_monoid M₂] [module R M] [module S M₂]
extends add_hom_class F M M₂ :=
(map_smulₛₗ : ∀ (f : F) (r : R) (x : M), f (r • x) = (σ r) • f x)
end
-- `σ` becomes a metavariable but that's fine because it's an `out_param`
attribute [nolint dangerous_instance] semilinear_map_class.to_add_hom_class
export semilinear_map_class (map_smulₛₗ)
attribute [simp] map_smulₛₗ
/-- `linear_map_class F R M M₂` asserts `F` is a type of bundled `R`-linear maps `M → M₂`.
This is an abbreviation for `semilinear_map_class F (ring_hom.id R) M M₂`.
-/
abbreviation linear_map_class (F : Type*) (R M M₂ : out_param Type*)
[semiring R] [add_comm_monoid M] [add_comm_monoid M₂] [module R M] [module R M₂] :=
semilinear_map_class F (ring_hom.id R) M M₂
namespace semilinear_map_class
variables (F : Type*)
variables [semiring R] [semiring S]
variables [add_comm_monoid M] [add_comm_monoid M₁] [add_comm_monoid M₂] [add_comm_monoid M₃]
variables [add_comm_monoid N₁] [add_comm_monoid N₂] [add_comm_monoid N₃]
variables [module R M] [module R M₂] [module S M₃]
variables {σ : R →+* S}
@[priority 100, nolint dangerous_instance] -- `σ` is an `out_param` so it's not dangerous
instance [semilinear_map_class F σ M M₃] : add_monoid_hom_class F M M₃ :=
{ coe := λ f, (f : M → M₃),
map_zero := λ f, show f 0 = 0, by { rw [← zero_smul R (0 : M), map_smulₛₗ], simp },
.. semilinear_map_class.to_add_hom_class F σ M M₃ }
@[priority 100, nolint dangerous_instance] -- `R` is an `out_param` so it's not dangerous
instance [linear_map_class F R M M₂] : distrib_mul_action_hom_class F R M M₂ :=
{ coe := λ f, (f : M → M₂),
map_smul := λ f c x, by rw [map_smulₛₗ, ring_hom.id_apply],
.. semilinear_map_class.add_monoid_hom_class F }
variables {F} (f : F) [i : semilinear_map_class F σ M M₃]
include i
lemma map_smul_inv {σ' : S →+* R} [ring_hom_inv_pair σ σ'] (c : S) (x : M) :
c • f x = f (σ' c • x) :=
by simp
end semilinear_map_class
namespace linear_map
section add_comm_monoid
variables [semiring R] [semiring S]
section
variables [add_comm_monoid M] [add_comm_monoid M₁] [add_comm_monoid M₂] [add_comm_monoid M₃]
variables [add_comm_monoid N₁] [add_comm_monoid N₂] [add_comm_monoid N₃]
variables [module R M] [module R M₂] [module S M₃]
variables {σ : R →+* S}
instance : semilinear_map_class (M →ₛₗ[σ] M₃) σ M M₃ :=
{ coe := linear_map.to_fun,
coe_injective' := λ f g h, by cases f; cases g; congr',
map_add := linear_map.map_add',
map_smulₛₗ := linear_map.map_smul' }
/-- Helper instance for when there's too many metavariables to apply `fun_like.has_coe_to_fun`
directly.
-/
instance : has_coe_to_fun (M →ₛₗ[σ] M₃) (λ _, M → M₃) := ⟨λ f, f⟩
/-- The `distrib_mul_action_hom` underlying a `linear_map`. -/
def to_distrib_mul_action_hom (f : M →ₗ[R] M₂) : distrib_mul_action_hom R M M₂ :=
{ map_zero' := show f 0 = 0, from map_zero f, ..f }
@[simp] lemma to_fun_eq_coe {f : M →ₛₗ[σ] M₃} : f.to_fun = (f : M → M₃) := rfl
@[ext] theorem ext {f g : M →ₛₗ[σ] M₃} (h : ∀ x, f x = g x) : f = g := fun_like.ext f g h
/-- Copy of a `linear_map` with a new `to_fun` equal to the old one. Useful to fix definitional
equalities. -/
protected def copy (f : M →ₛₗ[σ] M₃) (f' : M → M₃) (h : f' = ⇑f) : M →ₛₗ[σ] M₃ :=
{ to_fun := f',
map_add' := h.symm ▸ f.map_add',
map_smul' := h.symm ▸ f.map_smul' }
@[simp] lemma coe_copy (f : M →ₛₗ[σ] M₃) (f' : M → M₃) (h : f' = ⇑f) : ⇑(f.copy f' h) = f' := rfl
lemma copy_eq (f : M →ₛₗ[σ] M₃) (f' : M → M₃) (h : f' = ⇑f) : f.copy f' h = f := fun_like.ext' h
/-- See Note [custom simps projection]. -/
protected def simps.apply {R S : Type*} [semiring R] [semiring S] (σ : R →+* S)
(M M₃ : Type*) [add_comm_monoid M] [add_comm_monoid M₃] [module R M] [module S M₃]
(f : M →ₛₗ[σ] M₃) : M → M₃ := f
initialize_simps_projections linear_map (to_fun → apply)
@[simp] lemma coe_mk {σ : R →+* S} (f : M → M₃) (h₁ h₂) :
((linear_map.mk f h₁ h₂ : M →ₛₗ[σ] M₃) : M → M₃) = f := rfl
/-- Identity map as a `linear_map` -/
def id : M →ₗ[R] M :=
{ to_fun := id, ..distrib_mul_action_hom.id R }
lemma id_apply (x : M) :
@id R M _ _ _ x = x := rfl
@[simp, norm_cast] lemma id_coe : ((linear_map.id : M →ₗ[R] M) : M → M) = _root_.id := rfl
end
section
variables [add_comm_monoid M] [add_comm_monoid M₁] [add_comm_monoid M₂] [add_comm_monoid M₃]
variables [add_comm_monoid N₁] [add_comm_monoid N₂] [add_comm_monoid N₃]
variables [module R M] [module R M₂] [module S M₃]
variables (σ : R →+* S)
variables (fₗ gₗ : M →ₗ[R] M₂) (f g : M →ₛₗ[σ] M₃)
theorem is_linear : is_linear_map R fₗ := ⟨fₗ.map_add', fₗ.map_smul'⟩
variables {fₗ gₗ f g σ}
theorem coe_injective : @injective (M →ₛₗ[σ] M₃) (M → M₃) coe_fn :=
fun_like.coe_injective
protected lemma congr_arg {x x' : M} : x = x' → f x = f x' :=
fun_like.congr_arg f
/-- If two linear maps are equal, they are equal at each point. -/
protected lemma congr_fun (h : f = g) (x : M) : f x = g x :=
fun_like.congr_fun h x
theorem ext_iff : f = g ↔ ∀ x, f x = g x :=
fun_like.ext_iff
@[simp] lemma mk_coe (f : M →ₛₗ[σ] M₃) (h₁ h₂) :
(linear_map.mk f h₁ h₂ : M →ₛₗ[σ] M₃) = f := ext $ λ _, rfl
variables (fₗ gₗ f g)
protected lemma map_add (x y : M) : f (x + y) = f x + f y := map_add f x y
protected lemma map_zero : f 0 = 0 := map_zero f
-- TODO: `simp` isn't picking up `map_smulₛₗ` for `linear_map`s without specifying `map_smulₛₗ f`
@[simp] protected lemma map_smulₛₗ (c : R) (x : M) : f (c • x) = (σ c) • f x := map_smulₛₗ f c x
protected lemma map_smul (c : R) (x : M) : fₗ (c • x) = c • fₗ x := map_smul fₗ c x
protected lemma map_smul_inv {σ' : S →+* R} [ring_hom_inv_pair σ σ'] (c : S) (x : M) :
c • f x = f (σ' c • x) :=
by simp
-- TODO: generalize to `zero_hom_class`
@[simp] lemma map_eq_zero_iff (h : function.injective f) {x : M} : f x = 0 ↔ x = 0 :=
⟨λ w, by { apply h, simp [w], }, λ w, by { subst w, simp, }⟩
section pointwise
open_locale pointwise
variables (M M₃ σ) {F : Type*} (h : F)
@[simp] lemma _root_.image_smul_setₛₗ [semilinear_map_class F σ M M₃] (c : R) (s : set M) :
h '' (c • s) = (σ c) • h '' s :=
begin
apply set.subset.antisymm,
{ rintros x ⟨y, ⟨z, zs, rfl⟩, rfl⟩,
exact ⟨h z, set.mem_image_of_mem _ zs, (map_smulₛₗ _ _ _).symm ⟩ },
{ rintros x ⟨y, ⟨z, hz, rfl⟩, rfl⟩,
exact (set.mem_image _ _ _).2 ⟨c • z, set.smul_mem_smul_set hz, map_smulₛₗ _ _ _⟩ }
end
lemma _root_.preimage_smul_setₛₗ [semilinear_map_class F σ M M₃] {c : R} (hc : is_unit c)
(s : set M₃) : h ⁻¹' (σ c • s) = c • h ⁻¹' s :=
begin
apply set.subset.antisymm,
{ rintros x ⟨y, ys, hy⟩,
refine ⟨(hc.unit.inv : R) • x, _, _⟩,
{ simp only [←hy, smul_smul, set.mem_preimage, units.inv_eq_coe_inv, map_smulₛₗ h, ← map_mul,
is_unit.coe_inv_mul, one_smul, map_one, ys] },
{ simp only [smul_smul, is_unit.mul_coe_inv, one_smul, units.inv_eq_coe_inv] } },
{ rintros x ⟨y, hy, rfl⟩,
refine ⟨h y, hy, by simp only [ring_hom.id_apply, map_smulₛₗ h]⟩ }
end
variables (R M₂)
lemma _root_.image_smul_set [linear_map_class F R M M₂] (c : R) (s : set M) :
h '' (c • s) = c • h '' s :=
image_smul_setₛₗ _ _ _ h c s
lemma _root_.preimage_smul_set [linear_map_class F R M M₂] {c : R} (hc : is_unit c) (s : set M₂) :
h ⁻¹' (c • s) = c • h ⁻¹' s :=
preimage_smul_setₛₗ _ _ _ h hc s
end pointwise
variables (M M₂)
/--
A typeclass for `has_smul` structures which can be moved through a `linear_map`.
This typeclass is generated automatically from a `is_scalar_tower` instance, but exists so that
we can also add an instance for `add_comm_group.int_module`, allowing `z •` to be moved even if
`R` does not support negation.
-/
class compatible_smul (R S : Type*) [semiring S] [has_smul R M]
[module S M] [has_smul R M₂] [module S M₂] :=
(map_smul : ∀ (fₗ : M →ₗ[S] M₂) (c : R) (x : M), fₗ (c • x) = c • fₗ x)
variables {M M₂}
@[priority 100]
instance is_scalar_tower.compatible_smul
{R S : Type*} [semiring S] [has_smul R S]
[has_smul R M] [module S M] [is_scalar_tower R S M]
[has_smul R M₂] [module S M₂] [is_scalar_tower R S M₂] : compatible_smul M M₂ R S :=
⟨λ fₗ c x, by rw [← smul_one_smul S c x, ← smul_one_smul S c (fₗ x), map_smul]⟩
@[simp, priority 900]
lemma map_smul_of_tower {R S : Type*} [semiring S] [has_smul R M]
[module S M] [has_smul R M₂] [module S M₂]
[compatible_smul M M₂ R S] (fₗ : M →ₗ[S] M₂) (c : R) (x : M) :
fₗ (c • x) = c • fₗ x :=
compatible_smul.map_smul fₗ c x
/-- convert a linear map to an additive map -/
def to_add_monoid_hom : M →+ M₃ :=
{ to_fun := f,
map_zero' := f.map_zero,
map_add' := f.map_add }
@[simp] lemma to_add_monoid_hom_coe : ⇑f.to_add_monoid_hom = f := rfl
section restrict_scalars
variables (R) [module S M] [module S M₂] [compatible_smul M M₂ R S]
/-- If `M` and `M₂` are both `R`-modules and `S`-modules and `R`-module structures
are defined by an action of `R` on `S` (formally, we have two scalar towers), then any `S`-linear
map from `M` to `M₂` is `R`-linear.
See also `linear_map.map_smul_of_tower`. -/
def restrict_scalars (fₗ : M →ₗ[S] M₂) : M →ₗ[R] M₂ :=
{ to_fun := fₗ,
map_add' := fₗ.map_add,
map_smul' := fₗ.map_smul_of_tower }
@[simp] lemma coe_restrict_scalars (fₗ : M →ₗ[S] M₂) : ⇑(restrict_scalars R fₗ) = fₗ :=
rfl
lemma restrict_scalars_apply (fₗ : M →ₗ[S] M₂) (x) : restrict_scalars R fₗ x = fₗ x :=
rfl
lemma restrict_scalars_injective :
function.injective (restrict_scalars R : (M →ₗ[S] M₂) → (M →ₗ[R] M₂)) :=
λ fₗ gₗ h, ext (linear_map.congr_fun h : _)
@[simp]
lemma restrict_scalars_inj (fₗ gₗ : M →ₗ[S] M₂) :
fₗ.restrict_scalars R = gₗ.restrict_scalars R ↔ fₗ = gₗ :=
(restrict_scalars_injective R).eq_iff
end restrict_scalars
variable {R}
theorem to_add_monoid_hom_injective :
function.injective (to_add_monoid_hom : (M →ₛₗ[σ] M₃) → (M →+ M₃)) :=
λ f g h, ext $ add_monoid_hom.congr_fun h
/-- If two `σ`-linear maps from `R` are equal on `1`, then they are equal. -/
@[ext] theorem ext_ring {f g : R →ₛₗ[σ] M₃} (h : f 1 = g 1) : f = g :=
ext $ λ x, by rw [← mul_one x, ← smul_eq_mul, f.map_smulₛₗ, g.map_smulₛₗ, h]
theorem ext_ring_iff {σ : R →+* R} {f g : R →ₛₗ[σ] M} : f = g ↔ f 1 = g 1 :=
⟨λ h, h ▸ rfl, ext_ring⟩
@[ext] theorem ext_ring_op {σ : Rᵐᵒᵖ →+* S} {f g : R →ₛₗ[σ] M₃} (h : f 1 = g 1) : f = g :=
ext $ λ x, by rw [← one_mul x, ← op_smul_eq_mul, f.map_smulₛₗ, g.map_smulₛₗ, h]
end
/-- Interpret a `ring_hom` `f` as an `f`-semilinear map. -/
@[simps]
def _root_.ring_hom.to_semilinear_map (f : R →+* S) : R →ₛₗ[f] S :=
{ to_fun := f,
map_smul' := f.map_mul,
.. f}
section
variables [semiring R₁] [semiring R₂] [semiring R₃]
variables [add_comm_monoid M] [add_comm_monoid M₁] [add_comm_monoid M₂] [add_comm_monoid M₃]
variables {module_M₁ : module R₁ M₁} {module_M₂ : module R₂ M₂} {module_M₃ : module R₃ M₃}
variables {σ₁₂ : R₁ →+* R₂} {σ₂₃ : R₂ →+* R₃} {σ₁₃ : R₁ →+* R₃}
variables [ring_hom_comp_triple σ₁₂ σ₂₃ σ₁₃]
variables (f : M₂ →ₛₗ[σ₂₃] M₃) (g : M₁ →ₛₗ[σ₁₂] M₂)
include module_M₁ module_M₂ module_M₃
/-- Composition of two linear maps is a linear map -/
def comp : M₁ →ₛₗ[σ₁₃] M₃ :=
{ to_fun := f ∘ g,
map_add' := by simp only [map_add, forall_const, eq_self_iff_true, comp_app],
map_smul' := λ r x, by rw [comp_app, map_smulₛₗ, map_smulₛₗ, ring_hom_comp_triple.comp_apply] }
omit module_M₁ module_M₂ module_M₃
infixr ` ∘ₗ `:80 := @linear_map.comp _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
(ring_hom.id _) (ring_hom.id _) (ring_hom.id _) ring_hom_comp_triple.ids
include σ₁₃
lemma comp_apply (x : M₁) : f.comp g x = f (g x) := rfl
omit σ₁₃
include σ₁₃
@[simp, norm_cast] lemma coe_comp : (f.comp g : M₁ → M₃) = f ∘ g := rfl
omit σ₁₃
@[simp] theorem comp_id : f.comp id = f :=
linear_map.ext $ λ x, rfl
@[simp] theorem id_comp : id.comp f = f :=
linear_map.ext $ λ x, rfl
variables {f g} {f' : M₂ →ₛₗ[σ₂₃] M₃} {g' : M₁ →ₛₗ[σ₁₂] M₂}
include σ₁₃
theorem cancel_right (hg : function.surjective g) :
f.comp g = f'.comp g ↔ f = f' :=
⟨λ h, ext $ hg.forall.2 (ext_iff.1 h), λ h, h ▸ rfl⟩
theorem cancel_left (hf : function.injective f) :
f.comp g = f.comp g' ↔ g = g' :=
⟨λ h, ext $ λ x, hf $ by rw [← comp_apply, h, comp_apply], λ h, h ▸ rfl⟩
omit σ₁₃
end
variables [add_comm_monoid M] [add_comm_monoid M₁] [add_comm_monoid M₂] [add_comm_monoid M₃]
/-- If a function `g` is a left and right inverse of a linear map `f`, then `g` is linear itself. -/
def inverse [module R M] [module S M₂] {σ : R →+* S} {σ' : S →+* R} [ring_hom_inv_pair σ σ']
(f : M →ₛₗ[σ] M₂) (g : M₂ → M) (h₁ : left_inverse g f) (h₂ : right_inverse g f) :
M₂ →ₛₗ[σ'] M :=
by dsimp [left_inverse, function.right_inverse] at h₁ h₂; exact
{ to_fun := g,
map_add' := λ x y, by { rw [← h₁ (g (x + y)), ← h₁ (g x + g y)]; simp [h₂] },
map_smul' := λ a b, by { rw [← h₁ (g (a • b)), ← h₁ ((σ' a) • g b)], simp [h₂] } }
end add_comm_monoid
section add_comm_group
variables [semiring R] [semiring S] [add_comm_group M] [add_comm_group M₂]
variables {module_M : module R M} {module_M₂ : module S M₂} {σ : R →+* S}
variables (f : M →ₛₗ[σ] M₂)
protected lemma map_neg (x : M) : f (- x) = - f x := map_neg f x
protected lemma map_sub (x y : M) : f (x - y) = f x - f y := map_sub f x y
instance compatible_smul.int_module
{S : Type*} [semiring S] [module S M] [module S M₂] : compatible_smul M M₂ ℤ S :=
⟨λ fₗ c x, begin
induction c using int.induction_on,
case hz : { simp },
case hp : n ih { simp [add_smul, ih] },
case hn : n ih { simp [sub_smul, ih] }
end⟩
instance compatible_smul.units {R S : Type*}
[monoid R] [mul_action R M] [mul_action R M₂] [semiring S] [module S M] [module S M₂]
[compatible_smul M M₂ R S] :
compatible_smul M M₂ Rˣ S :=
⟨λ fₗ c x, (compatible_smul.map_smul fₗ (c : R) x : _)⟩
end add_comm_group
end linear_map
namespace module
/-- `g : R →+* S` is `R`-linear when the module structure on `S` is `module.comp_hom S g` . -/
@[simps]
def comp_hom.to_linear_map {R S : Type*} [semiring R] [semiring S] (g : R →+* S) :
(by haveI := comp_hom S g; exact (R →ₗ[R] S)) :=
by exact
{ to_fun := (g : R → S),
map_add' := g.map_add,
map_smul' := g.map_mul }
end module
namespace distrib_mul_action_hom
variables [semiring R] [add_comm_monoid M] [add_comm_monoid M₂] [module R M] [module R M₂]
/-- A `distrib_mul_action_hom` between two modules is a linear map. -/
def to_linear_map (fₗ : M →+[R] M₂) : M →ₗ[R] M₂ := { ..fₗ }
instance : has_coe (M →+[R] M₂) (M →ₗ[R] M₂) := ⟨to_linear_map⟩
@[simp] lemma to_linear_map_eq_coe (f : M →+[R] M₂) :
f.to_linear_map = ↑f :=
rfl
@[simp, norm_cast] lemma coe_to_linear_map (f : M →+[R] M₂) :
((f : M →ₗ[R] M₂) : M → M₂) = f :=
rfl
lemma to_linear_map_injective {f g : M →+[R] M₂} (h : (f : M →ₗ[R] M₂) = (g : M →ₗ[R] M₂)) :
f = g :=
by { ext m, exact linear_map.congr_fun h m, }
end distrib_mul_action_hom
namespace is_linear_map
section add_comm_monoid
variables [semiring R] [add_comm_monoid M] [add_comm_monoid M₂]
variables [module R M] [module R M₂]
include R
/-- Convert an `is_linear_map` predicate to a `linear_map` -/
def mk' (f : M → M₂) (H : is_linear_map R f) : M →ₗ[R] M₂ :=
{ to_fun := f, map_add' := H.1, map_smul' := H.2 }
@[simp] theorem mk'_apply {f : M → M₂} (H : is_linear_map R f) (x : M) :
mk' f H x = f x := rfl
lemma is_linear_map_smul {R M : Type*} [comm_semiring R] [add_comm_monoid M] [module R M]
(c : R) :
is_linear_map R (λ (z : M), c • z) :=
begin
refine is_linear_map.mk (smul_add c) _,
intros _ _,
simp only [smul_smul, mul_comm]
end
lemma is_linear_map_smul' {R M : Type*} [semiring R] [add_comm_monoid M] [module R M] (a : M) :
is_linear_map R (λ (c : R), c • a) :=
is_linear_map.mk (λ x y, add_smul x y a) (λ x y, mul_smul x y a)
variables {f : M → M₂} (lin : is_linear_map R f)
include M M₂ lin
lemma map_zero : f (0 : M) = (0 : M₂) := (lin.mk' f).map_zero
end add_comm_monoid
section add_comm_group
variables [semiring R] [add_comm_group M] [add_comm_group M₂]
variables [module R M] [module R M₂]
include R
lemma is_linear_map_neg :
is_linear_map R (λ (z : M), -z) :=
is_linear_map.mk neg_add (λ x y, (smul_neg x y).symm)
variables {f : M → M₂} (lin : is_linear_map R f)
include M M₂ lin
lemma map_neg (x : M) : f (- x) = - f x := (lin.mk' f).map_neg x
lemma map_sub (x y) : f (x - y) = f x - f y := (lin.mk' f).map_sub x y
end add_comm_group
end is_linear_map
/-- Linear endomorphisms of a module, with associated ring structure
`module.End.semiring` and algebra structure `module.End.algebra`. -/
abbreviation module.End (R : Type u) (M : Type v)
[semiring R] [add_comm_monoid M] [module R M] := M →ₗ[R] M
/-- Reinterpret an additive homomorphism as a `ℕ`-linear map. -/
def add_monoid_hom.to_nat_linear_map [add_comm_monoid M] [add_comm_monoid M₂] (f : M →+ M₂) :
M →ₗ[ℕ] M₂ :=
{ to_fun := f, map_add' := f.map_add, map_smul' := map_nsmul f }
lemma add_monoid_hom.to_nat_linear_map_injective [add_comm_monoid M] [add_comm_monoid M₂] :
function.injective (@add_monoid_hom.to_nat_linear_map M M₂ _ _) :=
by { intros f g h, ext, exact linear_map.congr_fun h x }
/-- Reinterpret an additive homomorphism as a `ℤ`-linear map. -/
def add_monoid_hom.to_int_linear_map [add_comm_group M] [add_comm_group M₂] (f : M →+ M₂) :
M →ₗ[ℤ] M₂ :=
{ to_fun := f, map_add' := f.map_add, map_smul' := map_zsmul f }
lemma add_monoid_hom.to_int_linear_map_injective [add_comm_group M] [add_comm_group M₂] :
function.injective (@add_monoid_hom.to_int_linear_map M M₂ _ _) :=
by { intros f g h, ext, exact linear_map.congr_fun h x }
@[simp] lemma add_monoid_hom.coe_to_int_linear_map [add_comm_group M] [add_comm_group M₂]
(f : M →+ M₂) :
⇑f.to_int_linear_map = f := rfl
/-- Reinterpret an additive homomorphism as a `ℚ`-linear map. -/
def add_monoid_hom.to_rat_linear_map [add_comm_group M] [module ℚ M]
[add_comm_group M₂] [module ℚ M₂] (f : M →+ M₂) :
M →ₗ[ℚ] M₂ :=
{ map_smul' := map_rat_smul f, ..f }
lemma add_monoid_hom.to_rat_linear_map_injective
[add_comm_group M] [module ℚ M] [add_comm_group M₂] [module ℚ M₂] :
function.injective (@add_monoid_hom.to_rat_linear_map M M₂ _ _ _ _) :=
by { intros f g h, ext, exact linear_map.congr_fun h x }
@[simp] lemma add_monoid_hom.coe_to_rat_linear_map [add_comm_group M] [module ℚ M]
[add_comm_group M₂] [module ℚ M₂] (f : M →+ M₂) :
⇑f.to_rat_linear_map = f := rfl
namespace linear_map
section has_smul
variables [semiring R] [semiring R₂] [semiring R₃]
variables [add_comm_monoid M] [add_comm_monoid M₂] [add_comm_monoid M₃]
variables [module R M] [module R₂ M₂] [module R₃ M₃]
variables {σ₁₂ : R →+* R₂} {σ₂₃ : R₂ →+* R₃} {σ₁₃ : R →+* R₃} [ring_hom_comp_triple σ₁₂ σ₂₃ σ₁₃]
variables [monoid S] [distrib_mul_action S M₂] [smul_comm_class R₂ S M₂]
variables [monoid S₃] [distrib_mul_action S₃ M₃] [smul_comm_class R₃ S₃ M₃]
variables [monoid T] [distrib_mul_action T M₂] [smul_comm_class R₂ T M₂]
instance : has_smul S (M →ₛₗ[σ₁₂] M₂) :=
⟨λ a f, { to_fun := a • f,
map_add' := λ x y, by simp only [pi.smul_apply, f.map_add, smul_add],
map_smul' := λ c x, by simp [pi.smul_apply, smul_comm (σ₁₂ c)] }⟩
@[simp] lemma smul_apply (a : S) (f : M →ₛₗ[σ₁₂] M₂) (x : M) : (a • f) x = a • f x := rfl
lemma coe_smul (a : S) (f : M →ₛₗ[σ₁₂] M₂) : ⇑(a • f) = a • f := rfl
instance [smul_comm_class S T M₂] : smul_comm_class S T (M →ₛₗ[σ₁₂] M₂) :=
⟨λ a b f, ext $ λ x, smul_comm _ _ _⟩
-- example application of this instance: if S -> T -> R are homomorphisms of commutative rings and
-- M and M₂ are R-modules then the S-module and T-module structures on Hom_R(M,M₂) are compatible.
instance [has_smul S T] [is_scalar_tower S T M₂] : is_scalar_tower S T (M →ₛₗ[σ₁₂] M₂) :=
{ smul_assoc := λ _ _ _, ext $ λ _, smul_assoc _ _ _ }
instance [distrib_mul_action Sᵐᵒᵖ M₂] [smul_comm_class R₂ Sᵐᵒᵖ M₂] [is_central_scalar S M₂] :
is_central_scalar S (M →ₛₗ[σ₁₂] M₂) :=
{ op_smul_eq_smul := λ a b, ext $ λ x, op_smul_eq_smul _ _ }
end has_smul
/-! ### Arithmetic on the codomain -/
section arithmetic
variables [semiring R₁] [semiring R₂] [semiring R₃]
variables [add_comm_monoid M] [add_comm_monoid M₂] [add_comm_monoid M₃]
variables [add_comm_group N₁] [add_comm_group N₂] [add_comm_group N₃]
variables [module R₁ M] [module R₂ M₂] [module R₃ M₃]
variables [module R₁ N₁] [module R₂ N₂] [module R₃ N₃]
variables {σ₁₂ : R₁ →+* R₂} {σ₂₃ : R₂ →+* R₃} {σ₁₃ : R₁ →+* R₃} [ring_hom_comp_triple σ₁₂ σ₂₃ σ₁₃]
/-- The constant 0 map is linear. -/
instance : has_zero (M →ₛₗ[σ₁₂] M₂) :=
⟨{ to_fun := 0, map_add' := by simp, map_smul' := by simp }⟩
@[simp] lemma zero_apply (x : M) : (0 : M →ₛₗ[σ₁₂] M₂) x = 0 := rfl
@[simp] theorem comp_zero (g : M₂ →ₛₗ[σ₂₃] M₃) : (g.comp (0 : M →ₛₗ[σ₁₂] M₂) : M →ₛₗ[σ₁₃] M₃) = 0 :=
ext $ assume c, by rw [comp_apply, zero_apply, zero_apply, g.map_zero]
@[simp] theorem zero_comp (f : M →ₛₗ[σ₁₂] M₂) : ((0 : M₂ →ₛₗ[σ₂₃] M₃).comp f : M →ₛₗ[σ₁₃] M₃) = 0 :=
rfl
instance : inhabited (M →ₛₗ[σ₁₂] M₂) := ⟨0⟩
@[simp] lemma default_def : (default : (M →ₛₗ[σ₁₂] M₂)) = 0 := rfl
/-- The sum of two linear maps is linear. -/
instance : has_add (M →ₛₗ[σ₁₂] M₂) :=
⟨λ f g, { to_fun := f + g,
map_add' := by simp [add_comm, add_left_comm],
map_smul' := by simp [smul_add] }⟩
@[simp] lemma add_apply (f g : M →ₛₗ[σ₁₂] M₂) (x : M) : (f + g) x = f x + g x := rfl
lemma add_comp (f : M →ₛₗ[σ₁₂] M₂) (g h : M₂ →ₛₗ[σ₂₃] M₃) :
((h + g).comp f : M →ₛₗ[σ₁₃] M₃) = h.comp f + g.comp f := rfl
lemma comp_add (f g : M →ₛₗ[σ₁₂] M₂) (h : M₂ →ₛₗ[σ₂₃] M₃) :
(h.comp (f + g) : M →ₛₗ[σ₁₃] M₃) = h.comp f + h.comp g :=
ext $ λ _, h.map_add _ _
/-- The type of linear maps is an additive monoid. -/
instance : add_comm_monoid (M →ₛₗ[σ₁₂] M₂) :=
fun_like.coe_injective.add_comm_monoid _ rfl (λ _ _, rfl) (λ _ _, rfl)
/-- The negation of a linear map is linear. -/
instance : has_neg (M →ₛₗ[σ₁₂] N₂) :=
⟨λ f, { to_fun := -f, map_add' := by simp [add_comm], map_smul' := by simp }⟩
@[simp] lemma neg_apply (f : M →ₛₗ[σ₁₂] N₂) (x : M) : (- f) x = - f x := rfl
include σ₁₃
@[simp] lemma neg_comp (f : M →ₛₗ[σ₁₂] M₂) (g : M₂ →ₛₗ[σ₂₃] N₃) : (- g).comp f = - g.comp f := rfl
@[simp] lemma comp_neg (f : M →ₛₗ[σ₁₂] N₂) (g : N₂ →ₛₗ[σ₂₃] N₃) : g.comp (- f) = - g.comp f :=
ext $ λ _, g.map_neg _
omit σ₁₃
/-- The subtraction of two linear maps is linear. -/
instance : has_sub (M →ₛₗ[σ₁₂] N₂) :=
⟨λ f g, { to_fun := f - g,
map_add' := λ x y, by simp only [pi.sub_apply, map_add, add_sub_add_comm],
map_smul' := λ r x, by simp [pi.sub_apply, map_smul, smul_sub] }⟩
@[simp] lemma sub_apply (f g : M →ₛₗ[σ₁₂] N₂) (x : M) : (f - g) x = f x - g x := rfl
include σ₁₃
lemma sub_comp (f : M →ₛₗ[σ₁₂] M₂) (g h : M₂ →ₛₗ[σ₂₃] N₃) :
(g - h).comp f = g.comp f - h.comp f := rfl
lemma comp_sub (f g : M →ₛₗ[σ₁₂] N₂) (h : N₂ →ₛₗ[σ₂₃] N₃) :
h.comp (g - f) = h.comp g - h.comp f :=
ext $ λ _, h.map_sub _ _
omit σ₁₃
/-- The type of linear maps is an additive group. -/
instance : add_comm_group (M →ₛₗ[σ₁₂] N₂) :=
fun_like.coe_injective.add_comm_group _
rfl (λ _ _, rfl) (λ _, rfl) (λ _ _, rfl) (λ _ _, rfl) (λ _ _, rfl)
end arithmetic
section actions
variables [semiring R] [semiring R₂] [semiring R₃]
variables [add_comm_monoid M] [add_comm_monoid M₂] [add_comm_monoid M₃]
variables [module R M] [module R₂ M₂] [module R₃ M₃]
variables {σ₁₂ : R →+* R₂} {σ₂₃ : R₂ →+* R₃} {σ₁₃ : R →+* R₃} [ring_hom_comp_triple σ₁₂ σ₂₃ σ₁₃]
section has_smul
variables [monoid S] [distrib_mul_action S M₂] [smul_comm_class R₂ S M₂]
variables [monoid S₃] [distrib_mul_action S₃ M₃] [smul_comm_class R₃ S₃ M₃]
variables [monoid T] [distrib_mul_action T M₂] [smul_comm_class R₂ T M₂]
instance : distrib_mul_action S (M →ₛₗ[σ₁₂] M₂) :=
{ one_smul := λ f, ext $ λ _, one_smul _ _,
mul_smul := λ c c' f, ext $ λ _, mul_smul _ _ _,
smul_add := λ c f g, ext $ λ x, smul_add _ _ _,
smul_zero := λ c, ext $ λ x, smul_zero _ }
include σ₁₃
theorem smul_comp (a : S₃) (g : M₂ →ₛₗ[σ₂₃] M₃) (f : M →ₛₗ[σ₁₂] M₂) :
(a • g).comp f = a • (g.comp f) := rfl
omit σ₁₃
-- TODO: generalize this to semilinear maps
theorem comp_smul [module R M₂] [module R M₃] [smul_comm_class R S M₂] [distrib_mul_action S M₃]
[smul_comm_class R S M₃] [compatible_smul M₃ M₂ S R]
(g : M₃ →ₗ[R] M₂) (a : S) (f : M →ₗ[R] M₃) : g.comp (a • f) = a • (g.comp f) :=
ext $ λ x, g.map_smul_of_tower _ _
end has_smul
section module
variables [semiring S] [module S M₂] [smul_comm_class R₂ S M₂]
instance : module S (M →ₛₗ[σ₁₂] M₂) :=
{ add_smul := λ a b f, ext $ λ x, add_smul _ _ _,
zero_smul := λ f, ext $ λ x, zero_smul _ _ }
instance [no_zero_smul_divisors S M₂] : no_zero_smul_divisors S (M →ₛₗ[σ₁₂] M₂) :=
coe_injective.no_zero_smul_divisors _ rfl coe_smul
end module
end actions
/-!
### Monoid structure of endomorphisms
Lemmas about `pow` such as `linear_map.pow_apply` appear in later files.
-/
section endomorphisms
variables [semiring R] [add_comm_monoid M] [add_comm_group N₁] [module R M] [module R N₁]
instance : has_one (module.End R M) := ⟨linear_map.id⟩
instance : has_mul (module.End R M) := ⟨linear_map.comp⟩
lemma one_eq_id : (1 : module.End R M) = id := rfl
lemma mul_eq_comp (f g : module.End R M) : f * g = f.comp g := rfl
@[simp] lemma one_apply (x : M) : (1 : module.End R M) x = x := rfl
@[simp] lemma mul_apply (f g : module.End R M) (x : M) : (f * g) x = f (g x) := rfl
lemma coe_one : ⇑(1 : module.End R M) = _root_.id := rfl
lemma coe_mul (f g : module.End R M) : ⇑(f * g) = f ∘ g := rfl
instance _root_.module.End.monoid : monoid (module.End R M) :=
{ mul := (*),
one := (1 : M →ₗ[R] M),
mul_assoc := λ f g h, linear_map.ext $ λ x, rfl,
mul_one := comp_id,
one_mul := id_comp }
instance _root_.module.End.semiring : semiring (module.End R M) :=
{ mul := (*),
one := (1 : M →ₗ[R] M),
zero := 0,
add := (+),
mul_zero := comp_zero,
zero_mul := zero_comp,
left_distrib := λ f g h, comp_add _ _ _,
right_distrib := λ f g h, add_comp _ _ _,
nat_cast := λ n, n • 1,
nat_cast_zero := add_monoid.nsmul_zero' _,
nat_cast_succ := λ n, (add_monoid.nsmul_succ' n 1).trans (add_comm _ _),
.. add_monoid_with_one.unary,
.. _root_.module.End.monoid,
.. linear_map.add_comm_monoid }
/-- See also `module.End.nat_cast_def`. -/
@[simp] lemma _root_.module.End.nat_cast_apply (n : ℕ) (m : M) :
(↑n : module.End R M) m = n • m := rfl
instance _root_.module.End.ring : ring (module.End R N₁) :=
{ int_cast := λ z, z • 1,
int_cast_of_nat := of_nat_zsmul _,
int_cast_neg_succ_of_nat := zsmul_neg_succ_of_nat _,
..module.End.semiring, ..linear_map.add_comm_group }
/-- See also `module.End.int_cast_def`. -/
@[simp] lemma _root_.module.End.int_cast_apply (z : ℤ) (m : N₁) :
(↑z : module.End R N₁) m = z • m := rfl
section
variables [monoid S] [distrib_mul_action S M] [smul_comm_class R S M]
instance _root_.module.End.is_scalar_tower :
is_scalar_tower S (module.End R M) (module.End R M) := ⟨smul_comp⟩
instance _root_.module.End.smul_comm_class [has_smul S R] [is_scalar_tower S R M] :
smul_comm_class S (module.End R M) (module.End R M) :=
⟨λ s _ _, (comp_smul _ s _).symm⟩
instance _root_.module.End.smul_comm_class' [has_smul S R] [is_scalar_tower S R M] :
smul_comm_class (module.End R M) S (module.End R M) :=
smul_comm_class.symm _ _ _
end
/-! ### Action by a module endomorphism. -/
/-- The tautological action by `module.End R M` (aka `M →ₗ[R] M`) on `M`.
This generalizes `function.End.apply_mul_action`. -/
instance apply_module : module (module.End R M) M :=
{ smul := ($),
smul_zero := linear_map.map_zero,
smul_add := linear_map.map_add,
add_smul := linear_map.add_apply,
zero_smul := (linear_map.zero_apply : ∀ m, (0 : M →ₗ[R] M) m = 0),
one_smul := λ _, rfl,
mul_smul := λ _ _ _, rfl }
@[simp] protected lemma smul_def (f : module.End R M) (a : M) : f • a = f a := rfl
/-- `linear_map.apply_module` is faithful. -/
instance apply_has_faithful_smul : has_faithful_smul (module.End R M) M :=
⟨λ _ _, linear_map.ext⟩
instance apply_smul_comm_class : smul_comm_class R (module.End R M) M :=
{ smul_comm := λ r e m, (e.map_smul r m).symm }
instance apply_smul_comm_class' : smul_comm_class (module.End R M) R M :=
{ smul_comm := linear_map.map_smul }
instance apply_is_scalar_tower {R M : Type*} [comm_semiring R] [add_comm_monoid M] [module R M] :
is_scalar_tower R (module.End R M) M :=
⟨λ t f m, rfl⟩
end endomorphisms
end linear_map
/-! ### Actions as module endomorphisms -/
namespace distrib_mul_action
variables (R M) [semiring R] [add_comm_monoid M] [module R M]
variables [monoid S] [distrib_mul_action S M] [smul_comm_class S R M]
/-- Each element of the monoid defines a linear map.
This is a stronger version of `distrib_mul_action.to_add_monoid_hom`. -/
@[simps]
def to_linear_map (s : S) : M →ₗ[R] M :=
{ to_fun := has_smul.smul s,
map_add' := smul_add s,
map_smul' := λ a b, smul_comm _ _ _ }
/-- Each element of the monoid defines a module endomorphism.
This is a stronger version of `distrib_mul_action.to_add_monoid_End`. -/
@[simps]
def to_module_End : S →* module.End R M :=
{ to_fun := to_linear_map R M,
map_one' := linear_map.ext $ one_smul _,
map_mul' := λ a b, linear_map.ext $ mul_smul _ _ }
end distrib_mul_action
namespace module
variables (R M) [semiring R] [add_comm_monoid M] [module R M]
variables [semiring S] [module S M] [smul_comm_class S R M]
/-- Each element of the semiring defines a module endomorphism.
This is a stronger version of `distrib_mul_action.to_module_End`. -/
@[simps]
def to_module_End : S →+* module.End R M :=
{ to_fun := distrib_mul_action.to_linear_map R M,
map_zero' := linear_map.ext $ zero_smul _,
map_add' := λ f g, linear_map.ext $ add_smul _ _,
..distrib_mul_action.to_module_End R M }
/-- The canonical (semi)ring isomorphism from `Rᵐᵒᵖ` to `module.End R R` induced by the right
multiplication. -/
@[simps]
def module_End_self : Rᵐᵒᵖ ≃+* module.End R R :=
{ to_fun := distrib_mul_action.to_linear_map R R,
inv_fun := λ f, mul_opposite.op (f 1),
left_inv := mul_one,
right_inv := λ f, linear_map.ext_ring $ one_mul _,
..module.to_module_End R R }
/-- The canonical (semi)ring isomorphism from `R` to `module.End Rᵐᵒᵖ R` induced by the left
multiplication. -/
@[simps]
def module_End_self_op : R ≃+* module.End Rᵐᵒᵖ R :=
{ to_fun := distrib_mul_action.to_linear_map _ _,
inv_fun := λ f, f 1,
left_inv := mul_one,
right_inv := λ f, linear_map.ext_ring_op $ mul_one _,
..module.to_module_End _ _ }
lemma End.nat_cast_def (n : ℕ) [add_comm_monoid N₁] [module R N₁] :
(↑n : module.End R N₁) = module.to_module_End R N₁ n := rfl
lemma End.int_cast_def (z : ℤ) [add_comm_group N₁] [module R N₁] :
(↑z : module.End R N₁) = module.to_module_End R N₁ z := rfl
end module
|
[GOAL]
𝕜 : Type u_1
𝕝 : Type u_2
E : Type u_3
F : Type u_4
β : Type u_5
inst✝⁶ : OrderedSemiring 𝕜
inst✝⁵ : TopologicalSpace E
inst✝⁴ : TopologicalSpace F
inst✝³ : AddCommMonoid E
inst✝² : AddCommMonoid F
inst✝¹ : SMul 𝕜 E
inst✝ : SMul 𝕜 F
s : Set E
x y : E
a b : 𝕜
⊢ StrictConvex 𝕜 univ
[PROOFSTEP]
intro x _ y _ _ a b _ _ _
[GOAL]
𝕜 : Type u_1
𝕝 : Type u_2
E : Type u_3
F : Type u_4
β : Type u_5
inst✝⁶ : OrderedSemiring 𝕜
inst✝⁵ : TopologicalSpace E
inst✝⁴ : TopologicalSpace F
inst✝³ : AddCommMonoid E
inst✝² : AddCommMonoid F
inst✝¹ : SMul 𝕜 E
inst✝ : SMul 𝕜 F
s : Set E
x✝ y✝ : E
a✝⁶ b✝ : 𝕜
x : E
a✝⁵ : x ∈ univ
y : E
a✝⁴ : y ∈ univ
a✝³ : x ≠ y
a b : 𝕜
a✝² : 0 < a
a✝¹ : 0 < b
a✝ : a + b = 1
⊢ a • x + b • y ∈ interior univ
[PROOFSTEP]
rw [interior_univ]
[GOAL]
𝕜 : Type u_1
𝕝 : Type u_2
E : Type u_3
F : Type u_4
β : Type u_5
inst✝⁶ : OrderedSemiring 𝕜
inst✝⁵ : TopologicalSpace E
inst✝⁴ : TopologicalSpace F
inst✝³ : AddCommMonoid E
inst✝² : AddCommMonoid F
inst✝¹ : SMul 𝕜 E
inst✝ : SMul 𝕜 F
s : Set E
x✝ y✝ : E
a✝⁶ b✝ : 𝕜
x : E
a✝⁵ : x ∈ univ
y : E
a✝⁴ : y ∈ univ
a✝³ : x ≠ y
a b : 𝕜
a✝² : 0 < a
a✝¹ : 0 < b
a✝ : a + b = 1
⊢ a • x + b • y ∈ univ
[PROOFSTEP]
exact mem_univ _
[GOAL]
𝕜 : Type u_1
𝕝 : Type u_2
E : Type u_3
F : Type u_4
β : Type u_5
inst✝⁶ : OrderedSemiring 𝕜
inst✝⁵ : TopologicalSpace E
inst✝⁴ : TopologicalSpace F
inst✝³ : AddCommMonoid E
inst✝² : AddCommMonoid F
inst✝¹ : SMul 𝕜 E
inst✝ : SMul 𝕜 F
s : Set E
x y : E
a b : 𝕜
t : Set E
hs : StrictConvex 𝕜 s
ht : StrictConvex 𝕜 t
⊢ StrictConvex 𝕜 (s ∩ t)
[PROOFSTEP]
intro x hx y hy hxy a b ha hb hab
[GOAL]
𝕜 : Type u_1
𝕝 : Type u_2
E : Type u_3
F : Type u_4
β : Type u_5
inst✝⁶ : OrderedSemiring 𝕜
inst✝⁵ : TopologicalSpace E
inst✝⁴ : TopologicalSpace F
inst✝³ : AddCommMonoid E
inst✝² : AddCommMonoid F
inst✝¹ : SMul 𝕜 E
inst✝ : SMul 𝕜 F
s : Set E
x✝ y✝ : E
a✝ b✝ : 𝕜
t : Set E
hs : StrictConvex 𝕜 s
ht : StrictConvex 𝕜 t
x : E
hx : x ∈ s ∩ t
y : E
hy : y ∈ s ∩ t
hxy : x ≠ y
a b : 𝕜
ha : 0 < a
hb : 0 < b
hab : a + b = 1
⊢ a • x + b • y ∈ interior (s ∩ t)
[PROOFSTEP]
rw [interior_inter]
[GOAL]
𝕜 : Type u_1
𝕝 : Type u_2
E : Type u_3
F : Type u_4
β : Type u_5
inst✝⁶ : OrderedSemiring 𝕜
inst✝⁵ : TopologicalSpace E
inst✝⁴ : TopologicalSpace F
inst✝³ : AddCommMonoid E
inst✝² : AddCommMonoid F
inst✝¹ : SMul 𝕜 E
inst✝ : SMul 𝕜 F
s : Set E
x✝ y✝ : E
a✝ b✝ : 𝕜
t : Set E
hs : StrictConvex 𝕜 s
ht : StrictConvex 𝕜 t
x : E
hx : x ∈ s ∩ t
y : E
hy : y ∈ s ∩ t
hxy : x ≠ y
a b : 𝕜
ha : 0 < a
hb : 0 < b
hab : a + b = 1
⊢ a • x + b • y ∈ interior s ∩ interior t
[PROOFSTEP]
exact ⟨hs hx.1 hy.1 hxy ha hb hab, ht hx.2 hy.2 hxy ha hb hab⟩
[GOAL]
𝕜 : Type u_1
𝕝 : Type u_2
E : Type u_3
F : Type u_4
β : Type u_5
inst✝⁶ : OrderedSemiring 𝕜
inst✝⁵ : TopologicalSpace E
inst✝⁴ : TopologicalSpace F
inst✝³ : AddCommMonoid E
inst✝² : AddCommMonoid F
inst✝¹ : SMul 𝕜 E
inst✝ : SMul 𝕜 F
s✝ : Set E
x y : E
a b : 𝕜
ι : Sort u_6
s : ι → Set E
hdir : Directed (fun x x_1 => x ⊆ x_1) s
hs : ∀ ⦃i : ι⦄, StrictConvex 𝕜 (s i)
⊢ StrictConvex 𝕜 (⋃ (i : ι), s i)
[PROOFSTEP]
rintro x hx y hy hxy a b ha hb hab
[GOAL]
𝕜 : Type u_1
𝕝 : Type u_2
E : Type u_3
F : Type u_4
β : Type u_5
inst✝⁶ : OrderedSemiring 𝕜
inst✝⁵ : TopologicalSpace E
inst✝⁴ : TopologicalSpace F
inst✝³ : AddCommMonoid E
inst✝² : AddCommMonoid F
inst✝¹ : SMul 𝕜 E
inst✝ : SMul 𝕜 F
s✝ : Set E
x✝ y✝ : E
a✝ b✝ : 𝕜
ι : Sort u_6
s : ι → Set E
hdir : Directed (fun x x_1 => x ⊆ x_1) s
hs : ∀ ⦃i : ι⦄, StrictConvex 𝕜 (s i)
x : E
hx : x ∈ ⋃ (i : ι), s i
y : E
hy : y ∈ ⋃ (i : ι), s i
hxy : x ≠ y
a b : 𝕜
ha : 0 < a
hb : 0 < b
hab : a + b = 1
⊢ a • x + b • y ∈ interior (⋃ (i : ι), s i)
[PROOFSTEP]
rw [mem_iUnion] at hx hy
[GOAL]
𝕜 : Type u_1
𝕝 : Type u_2
E : Type u_3
F : Type u_4
β : Type u_5
inst✝⁶ : OrderedSemiring 𝕜
inst✝⁵ : TopologicalSpace E
inst✝⁴ : TopologicalSpace F
inst✝³ : AddCommMonoid E
inst✝² : AddCommMonoid F
inst✝¹ : SMul 𝕜 E
inst✝ : SMul 𝕜 F
s✝ : Set E
x✝ y✝ : E
a✝ b✝ : 𝕜
ι : Sort u_6
s : ι → Set E
hdir : Directed (fun x x_1 => x ⊆ x_1) s
hs : ∀ ⦃i : ι⦄, StrictConvex 𝕜 (s i)
x : E
hx : ∃ i, x ∈ s i
y : E
hy : ∃ i, y ∈ s i
hxy : x ≠ y
a b : 𝕜
ha : 0 < a
hb : 0 < b
hab : a + b = 1
⊢ a • x + b • y ∈ interior (⋃ (i : ι), s i)
[PROOFSTEP]
obtain ⟨i, hx⟩ := hx
[GOAL]
case intro
𝕜 : Type u_1
𝕝 : Type u_2
E : Type u_3
F : Type u_4
β : Type u_5
inst✝⁶ : OrderedSemiring 𝕜
inst✝⁵ : TopologicalSpace E
inst✝⁴ : TopologicalSpace F
inst✝³ : AddCommMonoid E
inst✝² : AddCommMonoid F
inst✝¹ : SMul 𝕜 E
inst✝ : SMul 𝕜 F
s✝ : Set E
x✝ y✝ : E
a✝ b✝ : 𝕜
ι : Sort u_6
s : ι → Set E
hdir : Directed (fun x x_1 => x ⊆ x_1) s
hs : ∀ ⦃i : ι⦄, StrictConvex 𝕜 (s i)
x y : E
hy : ∃ i, y ∈ s i
hxy : x ≠ y
a b : 𝕜
ha : 0 < a
hb : 0 < b
hab : a + b = 1
i : ι
hx : x ∈ s i
⊢ a • x + b • y ∈ interior (⋃ (i : ι), s i)
[PROOFSTEP]
obtain ⟨j, hy⟩ := hy
[GOAL]
case intro.intro
𝕜 : Type u_1
𝕝 : Type u_2
E : Type u_3
F : Type u_4
β : Type u_5
inst✝⁶ : OrderedSemiring 𝕜
inst✝⁵ : TopologicalSpace E
inst✝⁴ : TopologicalSpace F
inst✝³ : AddCommMonoid E
inst✝² : AddCommMonoid F
inst✝¹ : SMul 𝕜 E
inst✝ : SMul 𝕜 F
s✝ : Set E
x✝ y✝ : E
a✝ b✝ : 𝕜
ι : Sort u_6
s : ι → Set E
hdir : Directed (fun x x_1 => x ⊆ x_1) s
hs : ∀ ⦃i : ι⦄, StrictConvex 𝕜 (s i)
x y : E
hxy : x ≠ y
a b : 𝕜
ha : 0 < a
hb : 0 < b
hab : a + b = 1
i : ι
hx : x ∈ s i
j : ι
hy : y ∈ s j
⊢ a • x + b • y ∈ interior (⋃ (i : ι), s i)
[PROOFSTEP]
obtain ⟨k, hik, hjk⟩ := hdir i j
[GOAL]
case intro.intro.intro.intro
𝕜 : Type u_1
𝕝 : Type u_2
E : Type u_3
F : Type u_4
β : Type u_5
inst✝⁶ : OrderedSemiring 𝕜
inst✝⁵ : TopologicalSpace E
inst✝⁴ : TopologicalSpace F
inst✝³ : AddCommMonoid E
inst✝² : AddCommMonoid F
inst✝¹ : SMul 𝕜 E
inst✝ : SMul 𝕜 F
s✝ : Set E
x✝ y✝ : E
a✝ b✝ : 𝕜
ι : Sort u_6
s : ι → Set E
hdir : Directed (fun x x_1 => x ⊆ x_1) s
hs : ∀ ⦃i : ι⦄, StrictConvex 𝕜 (s i)
x y : E
hxy : x ≠ y
a b : 𝕜
ha : 0 < a
hb : 0 < b
hab : a + b = 1
i : ι
hx : x ∈ s i
j : ι
hy : y ∈ s j
k : ι
hik : s i ⊆ s k
hjk : s j ⊆ s k
⊢ a • x + b • y ∈ interior (⋃ (i : ι), s i)
[PROOFSTEP]
exact interior_mono (subset_iUnion s k) (hs (hik hx) (hjk hy) hxy ha hb hab)
[GOAL]
𝕜 : Type u_1
𝕝 : Type u_2
E : Type u_3
F : Type u_4
β : Type u_5
inst✝⁶ : OrderedSemiring 𝕜
inst✝⁵ : TopologicalSpace E
inst✝⁴ : TopologicalSpace F
inst✝³ : AddCommMonoid E
inst✝² : AddCommMonoid F
inst✝¹ : SMul 𝕜 E
inst✝ : SMul 𝕜 F
s : Set E
x y : E
a b : 𝕜
S : Set (Set E)
hdir : DirectedOn (fun x x_1 => x ⊆ x_1) S
hS : ∀ (s : Set E), s ∈ S → StrictConvex 𝕜 s
⊢ StrictConvex 𝕜 (⋃₀ S)
[PROOFSTEP]
rw [sUnion_eq_iUnion]
[GOAL]
𝕜 : Type u_1
𝕝 : Type u_2
E : Type u_3
F : Type u_4
β : Type u_5
inst✝⁶ : OrderedSemiring 𝕜
inst✝⁵ : TopologicalSpace E
inst✝⁴ : TopologicalSpace F
inst✝³ : AddCommMonoid E
inst✝² : AddCommMonoid F
inst✝¹ : SMul 𝕜 E
inst✝ : SMul 𝕜 F
s : Set E
x y : E
a b : 𝕜
S : Set (Set E)
hdir : DirectedOn (fun x x_1 => x ⊆ x_1) S
hS : ∀ (s : Set E), s ∈ S → StrictConvex 𝕜 s
⊢ StrictConvex 𝕜 (⋃ (i : ↑S), ↑i)
[PROOFSTEP]
exact (directedOn_iff_directed.1 hdir).strictConvex_iUnion fun s => hS _ s.2
[GOAL]
𝕜 : Type u_1
𝕝 : Type u_2
E : Type u_3
F : Type u_4
β : Type u_5
inst✝¹⁰ : OrderedSemiring 𝕜
inst✝⁹ : TopologicalSpace E
inst✝⁸ : TopologicalSpace F
inst✝⁷ : AddCommMonoid E
inst✝⁶ : AddCommMonoid F
inst✝⁵ : Module 𝕜 E
inst✝⁴ : Module 𝕜 F
s : Set E
inst✝³ : Semiring 𝕝
inst✝² : Module 𝕝 E
inst✝¹ : Module 𝕝 F
inst✝ : LinearMap.CompatibleSMul E F 𝕜 𝕝
hs : StrictConvex 𝕜 s
f : E →ₗ[𝕝] F
hf : IsOpenMap ↑f
⊢ StrictConvex 𝕜 (↑f '' s)
[PROOFSTEP]
rintro _ ⟨x, hx, rfl⟩ _ ⟨y, hy, rfl⟩ hxy a b ha hb hab
[GOAL]
case intro.intro.intro.intro
𝕜 : Type u_1
𝕝 : Type u_2
E : Type u_3
F : Type u_4
β : Type u_5
inst✝¹⁰ : OrderedSemiring 𝕜
inst✝⁹ : TopologicalSpace E
inst✝⁸ : TopologicalSpace F
inst✝⁷ : AddCommMonoid E
inst✝⁶ : AddCommMonoid F
inst✝⁵ : Module 𝕜 E
inst✝⁴ : Module 𝕜 F
s : Set E
inst✝³ : Semiring 𝕝
inst✝² : Module 𝕝 E
inst✝¹ : Module 𝕝 F
inst✝ : LinearMap.CompatibleSMul E F 𝕜 𝕝
hs : StrictConvex 𝕜 s
f : E →ₗ[𝕝] F
hf : IsOpenMap ↑f
x : E
hx : x ∈ s
y : E
hy : y ∈ s
hxy : ↑f x ≠ ↑f y
a b : 𝕜
ha : 0 < a
hb : 0 < b
hab : a + b = 1
⊢ a • ↑f x + b • ↑f y ∈ interior (↑f '' s)
[PROOFSTEP]
refine' hf.image_interior_subset _ ⟨a • x + b • y, hs hx hy (ne_of_apply_ne _ hxy) ha hb hab, _⟩
[GOAL]
case intro.intro.intro.intro
𝕜 : Type u_1
𝕝 : Type u_2
E : Type u_3
F : Type u_4
β : Type u_5
inst✝¹⁰ : OrderedSemiring 𝕜
inst✝⁹ : TopologicalSpace E
inst✝⁸ : TopologicalSpace F
inst✝⁷ : AddCommMonoid E
inst✝⁶ : AddCommMonoid F
inst✝⁵ : Module 𝕜 E
inst✝⁴ : Module 𝕜 F
s : Set E
inst✝³ : Semiring 𝕝
inst✝² : Module 𝕝 E
inst✝¹ : Module 𝕝 F
inst✝ : LinearMap.CompatibleSMul E F 𝕜 𝕝
hs : StrictConvex 𝕜 s
f : E →ₗ[𝕝] F
hf : IsOpenMap ↑f
x : E
hx : x ∈ s
y : E
hy : y ∈ s
hxy : ↑f x ≠ ↑f y
a b : 𝕜
ha : 0 < a
hb : 0 < b
hab : a + b = 1
⊢ ↑f (a • x + b • y) = a • ↑f x + b • ↑f y
[PROOFSTEP]
rw [map_add, f.map_smul_of_tower a, f.map_smul_of_tower b]
[GOAL]
𝕜 : Type u_1
𝕝 : Type u_2
E : Type u_3
F : Type u_4
β : Type u_5
inst✝⁶ : OrderedSemiring 𝕜
inst✝⁵ : TopologicalSpace E
inst✝⁴ : TopologicalSpace F
inst✝³ : AddCommMonoid E
inst✝² : AddCommMonoid F
inst✝¹ : Module 𝕜 E
inst✝ : Module 𝕜 F
s✝ : Set E
s : Set F
hs : StrictConvex 𝕜 s
f : E →ₗ[𝕜] F
hf : Continuous ↑f
hfinj : Injective ↑f
⊢ StrictConvex 𝕜 (↑f ⁻¹' s)
[PROOFSTEP]
intro x hx y hy hxy a b ha hb hab
[GOAL]
𝕜 : Type u_1
𝕝 : Type u_2
E : Type u_3
F : Type u_4
β : Type u_5
inst✝⁶ : OrderedSemiring 𝕜
inst✝⁵ : TopologicalSpace E
inst✝⁴ : TopologicalSpace F
inst✝³ : AddCommMonoid E
inst✝² : AddCommMonoid F
inst✝¹ : Module 𝕜 E
inst✝ : Module 𝕜 F
s✝ : Set E
s : Set F
hs : StrictConvex 𝕜 s
f : E →ₗ[𝕜] F
hf : Continuous ↑f
hfinj : Injective ↑f
x : E
hx : x ∈ ↑f ⁻¹' s
y : E
hy : y ∈ ↑f ⁻¹' s
hxy : x ≠ y
a b : 𝕜
ha : 0 < a
hb : 0 < b
hab : a + b = 1
⊢ a • x + b • y ∈ interior (↑f ⁻¹' s)
[PROOFSTEP]
refine' preimage_interior_subset_interior_preimage hf _
[GOAL]
𝕜 : Type u_1
𝕝 : Type u_2
E : Type u_3
F : Type u_4
β : Type u_5
inst✝⁶ : OrderedSemiring 𝕜
inst✝⁵ : TopologicalSpace E
inst✝⁴ : TopologicalSpace F
inst✝³ : AddCommMonoid E
inst✝² : AddCommMonoid F
inst✝¹ : Module 𝕜 E
inst✝ : Module 𝕜 F
s✝ : Set E
s : Set F
hs : StrictConvex 𝕜 s
f : E →ₗ[𝕜] F
hf : Continuous ↑f
hfinj : Injective ↑f
x : E
hx : x ∈ ↑f ⁻¹' s
y : E
hy : y ∈ ↑f ⁻¹' s
hxy : x ≠ y
a b : 𝕜
ha : 0 < a
hb : 0 < b
hab : a + b = 1
⊢ a • x + b • y ∈ ↑f ⁻¹' interior s
[PROOFSTEP]
rw [mem_preimage, f.map_add, f.map_smul, f.map_smul]
[GOAL]
𝕜 : Type u_1
𝕝 : Type u_2
E : Type u_3
F : Type u_4
β : Type u_5
inst✝⁶ : OrderedSemiring 𝕜
inst✝⁵ : TopologicalSpace E
inst✝⁴ : TopologicalSpace F
inst✝³ : AddCommMonoid E
inst✝² : AddCommMonoid F
inst✝¹ : Module 𝕜 E
inst✝ : Module 𝕜 F
s✝ : Set E
s : Set F
hs : StrictConvex 𝕜 s
f : E →ₗ[𝕜] F
hf : Continuous ↑f
hfinj : Injective ↑f
x : E
hx : x ∈ ↑f ⁻¹' s
y : E
hy : y ∈ ↑f ⁻¹' s
hxy : x ≠ y
a b : 𝕜
ha : 0 < a
hb : 0 < b
hab : a + b = 1
⊢ a • ↑f x + b • ↑f y ∈ interior s
[PROOFSTEP]
exact hs hx hy (hfinj.ne hxy) ha hb hab
[GOAL]
𝕜 : Type u_1
𝕝 : Type u_2
E : Type u_3
F : Type u_4
β : Type u_5
inst✝¹¹ : OrderedSemiring 𝕜
inst✝¹⁰ : TopologicalSpace E
inst✝⁹ : TopologicalSpace F
inst✝⁸ : AddCommMonoid E
inst✝⁷ : AddCommMonoid F
inst✝⁶ : Module 𝕜 E
inst✝⁵ : Module 𝕜 F
s✝ : Set E
inst✝⁴ : TopologicalSpace β
inst✝³ : LinearOrderedCancelAddCommMonoid β
inst✝² : OrderTopology β
inst✝¹ : Module 𝕜 β
inst✝ : OrderedSMul 𝕜 β
s : Set β
hs : OrdConnected s
⊢ StrictConvex 𝕜 s
[PROOFSTEP]
refine' strictConvex_iff_openSegment_subset.2 fun x hx y hy hxy => _
[GOAL]
𝕜 : Type u_1
𝕝 : Type u_2
E : Type u_3
F : Type u_4
β : Type u_5
inst✝¹¹ : OrderedSemiring 𝕜
inst✝¹⁰ : TopologicalSpace E
inst✝⁹ : TopologicalSpace F
inst✝⁸ : AddCommMonoid E
inst✝⁷ : AddCommMonoid F
inst✝⁶ : Module 𝕜 E
inst✝⁵ : Module 𝕜 F
s✝ : Set E
inst✝⁴ : TopologicalSpace β
inst✝³ : LinearOrderedCancelAddCommMonoid β
inst✝² : OrderTopology β
inst✝¹ : Module 𝕜 β
inst✝ : OrderedSMul 𝕜 β
s : Set β
hs : OrdConnected s
x : β
hx : x ∈ s
y : β
hy : y ∈ s
hxy : x ≠ y
⊢ (fun x y => openSegment 𝕜 x y ⊆ interior s) x y
[PROOFSTEP]
cases' hxy.lt_or_lt with hlt hlt <;> [skip; rw [openSegment_symm]]
[GOAL]
𝕜 : Type u_1
𝕝 : Type u_2
E : Type u_3
F : Type u_4
β : Type u_5
inst✝¹¹ : OrderedSemiring 𝕜
inst✝¹⁰ : TopologicalSpace E
inst✝⁹ : TopologicalSpace F
inst✝⁸ : AddCommMonoid E
inst✝⁷ : AddCommMonoid F
inst✝⁶ : Module 𝕜 E
inst✝⁵ : Module 𝕜 F
s✝ : Set E
inst✝⁴ : TopologicalSpace β
inst✝³ : LinearOrderedCancelAddCommMonoid β
inst✝² : OrderTopology β
inst✝¹ : Module 𝕜 β
inst✝ : OrderedSMul 𝕜 β
s : Set β
hs : OrdConnected s
x : β
hx : x ∈ s
y : β
hy : y ∈ s
hxy : x ≠ y
⊢ (fun x y => openSegment 𝕜 x y ⊆ interior s) x y
[PROOFSTEP]
cases' hxy.lt_or_lt with hlt hlt
[GOAL]
case inl
𝕜 : Type u_1
𝕝 : Type u_2
E : Type u_3
F : Type u_4
β : Type u_5
inst✝¹¹ : OrderedSemiring 𝕜
inst✝¹⁰ : TopologicalSpace E
inst✝⁹ : TopologicalSpace F
inst✝⁸ : AddCommMonoid E
inst✝⁷ : AddCommMonoid F
inst✝⁶ : Module 𝕜 E
inst✝⁵ : Module 𝕜 F
s✝ : Set E
inst✝⁴ : TopologicalSpace β
inst✝³ : LinearOrderedCancelAddCommMonoid β
inst✝² : OrderTopology β
inst✝¹ : Module 𝕜 β
inst✝ : OrderedSMul 𝕜 β
s : Set β
hs : OrdConnected s
x : β
hx : x ∈ s
y : β
hy : y ∈ s
hxy : x ≠ y
hlt : x < y
⊢ openSegment 𝕜 x y ⊆ interior s
[PROOFSTEP]
skip
[GOAL]
case inr
𝕜 : Type u_1
𝕝 : Type u_2
E : Type u_3
F : Type u_4
β : Type u_5
inst✝¹¹ : OrderedSemiring 𝕜
inst✝¹⁰ : TopologicalSpace E
inst✝⁹ : TopologicalSpace F
inst✝⁸ : AddCommMonoid E
inst✝⁷ : AddCommMonoid F
inst✝⁶ : Module 𝕜 E
inst✝⁵ : Module 𝕜 F
s✝ : Set E
inst✝⁴ : TopologicalSpace β
inst✝³ : LinearOrderedCancelAddCommMonoid β
inst✝² : OrderTopology β
inst✝¹ : Module 𝕜 β
inst✝ : OrderedSMul 𝕜 β
s : Set β
hs : OrdConnected s
x : β
hx : x ∈ s
y : β
hy : y ∈ s
hxy : x ≠ y
hlt : y < x
⊢ openSegment 𝕜 x y ⊆ interior s
[PROOFSTEP]
rw [openSegment_symm]
[GOAL]
case inl
𝕜 : Type u_1
𝕝 : Type u_2
E : Type u_3
F : Type u_4
β : Type u_5
inst✝¹¹ : OrderedSemiring 𝕜
inst✝¹⁰ : TopologicalSpace E
inst✝⁹ : TopologicalSpace F
inst✝⁸ : AddCommMonoid E
inst✝⁷ : AddCommMonoid F
inst✝⁶ : Module 𝕜 E
inst✝⁵ : Module 𝕜 F
s✝ : Set E
inst✝⁴ : TopologicalSpace β
inst✝³ : LinearOrderedCancelAddCommMonoid β
inst✝² : OrderTopology β
inst✝¹ : Module 𝕜 β
inst✝ : OrderedSMul 𝕜 β
s : Set β
hs : OrdConnected s
x : β
hx : x ∈ s
y : β
hy : y ∈ s
hxy : x ≠ y
hlt : x < y
⊢ openSegment 𝕜 x y ⊆ interior s
[PROOFSTEP]
exact
(openSegment_subset_Ioo hlt).trans (isOpen_Ioo.subset_interior_iff.2 <| Ioo_subset_Icc_self.trans <| hs.out ‹_› ‹_›)
[GOAL]
case inr
𝕜 : Type u_1
𝕝 : Type u_2
E : Type u_3
F : Type u_4
β : Type u_5
inst✝¹¹ : OrderedSemiring 𝕜
inst✝¹⁰ : TopologicalSpace E
inst✝⁹ : TopologicalSpace F
inst✝⁸ : AddCommMonoid E
inst✝⁷ : AddCommMonoid F
inst✝⁶ : Module 𝕜 E
inst✝⁵ : Module 𝕜 F
s✝ : Set E
inst✝⁴ : TopologicalSpace β
inst✝³ : LinearOrderedCancelAddCommMonoid β
inst✝² : OrderTopology β
inst✝¹ : Module 𝕜 β
inst✝ : OrderedSMul 𝕜 β
s : Set β
hs : OrdConnected s
x : β
hx : x ∈ s
y : β
hy : y ∈ s
hxy : x ≠ y
hlt : y < x
⊢ openSegment 𝕜 y x ⊆ interior s
[PROOFSTEP]
exact
(openSegment_subset_Ioo hlt).trans (isOpen_Ioo.subset_interior_iff.2 <| Ioo_subset_Icc_self.trans <| hs.out ‹_› ‹_›)
[GOAL]
𝕜 : Type u_1
𝕝 : Type u_2
E : Type u_3
F : Type u_4
β : Type u_5
inst✝⁵ : OrderedSemiring 𝕜
inst✝⁴ : TopologicalSpace E
inst✝³ : TopologicalSpace F
inst✝² : AddCancelCommMonoid E
inst✝¹ : ContinuousAdd E
inst✝ : Module 𝕜 E
s : Set E
hs : StrictConvex 𝕜 s
z : E
⊢ StrictConvex 𝕜 ((fun x => z + x) ⁻¹' s)
[PROOFSTEP]
intro x hx y hy hxy a b ha hb hab
[GOAL]
𝕜 : Type u_1
𝕝 : Type u_2
E : Type u_3
F : Type u_4
β : Type u_5
inst✝⁵ : OrderedSemiring 𝕜
inst✝⁴ : TopologicalSpace E
inst✝³ : TopologicalSpace F
inst✝² : AddCancelCommMonoid E
inst✝¹ : ContinuousAdd E
inst✝ : Module 𝕜 E
s : Set E
hs : StrictConvex 𝕜 s
z x : E
hx : x ∈ (fun x => z + x) ⁻¹' s
y : E
hy : y ∈ (fun x => z + x) ⁻¹' s
hxy : x ≠ y
a b : 𝕜
ha : 0 < a
hb : 0 < b
hab : a + b = 1
⊢ a • x + b • y ∈ interior ((fun x => z + x) ⁻¹' s)
[PROOFSTEP]
refine' preimage_interior_subset_interior_preimage (continuous_add_left _) _
[GOAL]
𝕜 : Type u_1
𝕝 : Type u_2
E : Type u_3
F : Type u_4
β : Type u_5
inst✝⁵ : OrderedSemiring 𝕜
inst✝⁴ : TopologicalSpace E
inst✝³ : TopologicalSpace F
inst✝² : AddCancelCommMonoid E
inst✝¹ : ContinuousAdd E
inst✝ : Module 𝕜 E
s : Set E
hs : StrictConvex 𝕜 s
z x : E
hx : x ∈ (fun x => z + x) ⁻¹' s
y : E
hy : y ∈ (fun x => z + x) ⁻¹' s
hxy : x ≠ y
a b : 𝕜
ha : 0 < a
hb : 0 < b
hab : a + b = 1
⊢ a • x + b • y ∈ (fun b => z + b) ⁻¹' interior s
[PROOFSTEP]
have h := hs hx hy ((add_right_injective _).ne hxy) ha hb hab
[GOAL]
𝕜 : Type u_1
𝕝 : Type u_2
E : Type u_3
F : Type u_4
β : Type u_5
inst✝⁵ : OrderedSemiring 𝕜
inst✝⁴ : TopologicalSpace E
inst✝³ : TopologicalSpace F
inst✝² : AddCancelCommMonoid E
inst✝¹ : ContinuousAdd E
inst✝ : Module 𝕜 E
s : Set E
hs : StrictConvex 𝕜 s
z x : E
hx : x ∈ (fun x => z + x) ⁻¹' s
y : E
hy : y ∈ (fun x => z + x) ⁻¹' s
hxy : x ≠ y
a b : 𝕜
ha : 0 < a
hb : 0 < b
hab : a + b = 1
h : a • (fun x => z + x) x + b • (fun x => z + x) y ∈ interior s
⊢ a • x + b • y ∈ (fun b => z + b) ⁻¹' interior s
[PROOFSTEP]
rwa [smul_add, smul_add, add_add_add_comm, ← _root_.add_smul, hab, one_smul] at h
[GOAL]
𝕜 : Type u_1
𝕝 : Type u_2
E : Type u_3
F : Type u_4
β : Type u_5
inst✝⁵ : OrderedSemiring 𝕜
inst✝⁴ : TopologicalSpace E
inst✝³ : TopologicalSpace F
inst✝² : AddCancelCommMonoid E
inst✝¹ : ContinuousAdd E
inst✝ : Module 𝕜 E
s : Set E
hs : StrictConvex 𝕜 s
z : E
⊢ StrictConvex 𝕜 ((fun x => x + z) ⁻¹' s)
[PROOFSTEP]
simpa only [add_comm] using hs.preimage_add_right z
[GOAL]
𝕜 : Type u_1
𝕝 : Type u_2
E : Type u_3
F : Type u_4
β : Type u_5
inst✝⁷ : OrderedSemiring 𝕜
inst✝⁶ : TopologicalSpace E
inst✝⁵ : TopologicalSpace F
inst✝⁴ : AddCommGroup E
inst✝³ : AddCommGroup F
inst✝² : Module 𝕜 E
inst✝¹ : Module 𝕜 F
inst✝ : ContinuousAdd E
s t : Set E
hs : StrictConvex 𝕜 s
ht : StrictConvex 𝕜 t
⊢ StrictConvex 𝕜 (s + t)
[PROOFSTEP]
rintro _ ⟨v, w, hv, hw, rfl⟩ _ ⟨x, y, hx, hy, rfl⟩ h a b ha hb hab
[GOAL]
case intro.intro.intro.intro.intro.intro.intro.intro
𝕜 : Type u_1
𝕝 : Type u_2
E : Type u_3
F : Type u_4
β : Type u_5
inst✝⁷ : OrderedSemiring 𝕜
inst✝⁶ : TopologicalSpace E
inst✝⁵ : TopologicalSpace F
inst✝⁴ : AddCommGroup E
inst✝³ : AddCommGroup F
inst✝² : Module 𝕜 E
inst✝¹ : Module 𝕜 F
inst✝ : ContinuousAdd E
s t : Set E
hs : StrictConvex 𝕜 s
ht : StrictConvex 𝕜 t
v w : E
hv : v ∈ s
hw : w ∈ t
x y : E
hx : x ∈ s
hy : y ∈ t
h : (fun x x_1 => x + x_1) v w ≠ (fun x x_1 => x + x_1) x y
a b : 𝕜
ha : 0 < a
hb : 0 < b
hab : a + b = 1
⊢ a • (fun x x_1 => x + x_1) v w + b • (fun x x_1 => x + x_1) x y ∈ interior (s + t)
[PROOFSTEP]
rw [smul_add, smul_add, add_add_add_comm]
[GOAL]
case intro.intro.intro.intro.intro.intro.intro.intro
𝕜 : Type u_1
𝕝 : Type u_2
E : Type u_3
F : Type u_4
β : Type u_5
inst✝⁷ : OrderedSemiring 𝕜
inst✝⁶ : TopologicalSpace E
inst✝⁵ : TopologicalSpace F
inst✝⁴ : AddCommGroup E
inst✝³ : AddCommGroup F
inst✝² : Module 𝕜 E
inst✝¹ : Module 𝕜 F
inst✝ : ContinuousAdd E
s t : Set E
hs : StrictConvex 𝕜 s
ht : StrictConvex 𝕜 t
v w : E
hv : v ∈ s
hw : w ∈ t
x y : E
hx : x ∈ s
hy : y ∈ t
h : (fun x x_1 => x + x_1) v w ≠ (fun x x_1 => x + x_1) x y
a b : 𝕜
ha : 0 < a
hb : 0 < b
hab : a + b = 1
⊢ a • v + b • x + (a • w + b • y) ∈ interior (s + t)
[PROOFSTEP]
obtain rfl | hvx := eq_or_ne v x
[GOAL]
case intro.intro.intro.intro.intro.intro.intro.intro.inl
𝕜 : Type u_1
𝕝 : Type u_2
E : Type u_3
F : Type u_4
β : Type u_5
inst✝⁷ : OrderedSemiring 𝕜
inst✝⁶ : TopologicalSpace E
inst✝⁵ : TopologicalSpace F
inst✝⁴ : AddCommGroup E
inst✝³ : AddCommGroup F
inst✝² : Module 𝕜 E
inst✝¹ : Module 𝕜 F
inst✝ : ContinuousAdd E
s t : Set E
hs : StrictConvex 𝕜 s
ht : StrictConvex 𝕜 t
v w : E
hv : v ∈ s
hw : w ∈ t
y : E
hy : y ∈ t
a b : 𝕜
ha : 0 < a
hb : 0 < b
hab : a + b = 1
hx : v ∈ s
h : (fun x x_1 => x + x_1) v w ≠ (fun x x_1 => x + x_1) v y
⊢ a • v + b • v + (a • w + b • y) ∈ interior (s + t)
[PROOFSTEP]
refine' interior_mono (add_subset_add (singleton_subset_iff.2 hv) Subset.rfl) _
[GOAL]
case intro.intro.intro.intro.intro.intro.intro.intro.inl
𝕜 : Type u_1
𝕝 : Type u_2
E : Type u_3
F : Type u_4
β : Type u_5
inst✝⁷ : OrderedSemiring 𝕜
inst✝⁶ : TopologicalSpace E
inst✝⁵ : TopologicalSpace F
inst✝⁴ : AddCommGroup E
inst✝³ : AddCommGroup F
inst✝² : Module 𝕜 E
inst✝¹ : Module 𝕜 F
inst✝ : ContinuousAdd E
s t : Set E
hs : StrictConvex 𝕜 s
ht : StrictConvex 𝕜 t
v w : E
hv : v ∈ s
hw : w ∈ t
y : E
hy : y ∈ t
a b : 𝕜
ha : 0 < a
hb : 0 < b
hab : a + b = 1
hx : v ∈ s
h : (fun x x_1 => x + x_1) v w ≠ (fun x x_1 => x + x_1) v y
⊢ a • v + b • v + (a • w + b • y) ∈ interior ({v} + t)
[PROOFSTEP]
rw [Convex.combo_self hab, singleton_add]
[GOAL]
case intro.intro.intro.intro.intro.intro.intro.intro.inl
𝕜 : Type u_1
𝕝 : Type u_2
E : Type u_3
F : Type u_4
β : Type u_5
inst✝⁷ : OrderedSemiring 𝕜
inst✝⁶ : TopologicalSpace E
inst✝⁵ : TopologicalSpace F
inst✝⁴ : AddCommGroup E
inst✝³ : AddCommGroup F
inst✝² : Module 𝕜 E
inst✝¹ : Module 𝕜 F
inst✝ : ContinuousAdd E
s t : Set E
hs : StrictConvex 𝕜 s
ht : StrictConvex 𝕜 t
v w : E
hv : v ∈ s
hw : w ∈ t
y : E
hy : y ∈ t
a b : 𝕜
ha : 0 < a
hb : 0 < b
hab : a + b = 1
hx : v ∈ s
h : (fun x x_1 => x + x_1) v w ≠ (fun x x_1 => x + x_1) v y
⊢ v + (a • w + b • y) ∈ interior ((fun x x_1 => x + x_1) v '' t)
[PROOFSTEP]
exact (isOpenMap_add_left _).image_interior_subset _ (mem_image_of_mem _ <| ht hw hy (ne_of_apply_ne _ h) ha hb hab)
[GOAL]
case intro.intro.intro.intro.intro.intro.intro.intro.inr
𝕜 : Type u_1
𝕝 : Type u_2
E : Type u_3
F : Type u_4
β : Type u_5
inst✝⁷ : OrderedSemiring 𝕜
inst✝⁶ : TopologicalSpace E
inst✝⁵ : TopologicalSpace F
inst✝⁴ : AddCommGroup E
inst✝³ : AddCommGroup F
inst✝² : Module 𝕜 E
inst✝¹ : Module 𝕜 F
inst✝ : ContinuousAdd E
s t : Set E
hs : StrictConvex 𝕜 s
ht : StrictConvex 𝕜 t
v w : E
hv : v ∈ s
hw : w ∈ t
x y : E
hx : x ∈ s
hy : y ∈ t
h : (fun x x_1 => x + x_1) v w ≠ (fun x x_1 => x + x_1) x y
a b : 𝕜
ha : 0 < a
hb : 0 < b
hab : a + b = 1
hvx : v ≠ x
⊢ a • v + b • x + (a • w + b • y) ∈ interior (s + t)
[PROOFSTEP]
exact subset_interior_add_left (add_mem_add (hs hv hx hvx ha hb hab) <| ht.convex hw hy ha.le hb.le hab)
[GOAL]
𝕜 : Type u_1
𝕝 : Type u_2
E : Type u_3
F : Type u_4
β : Type u_5
inst✝⁷ : OrderedSemiring 𝕜
inst✝⁶ : TopologicalSpace E
inst✝⁵ : TopologicalSpace F
inst✝⁴ : AddCommGroup E
inst✝³ : AddCommGroup F
inst✝² : Module 𝕜 E
inst✝¹ : Module 𝕜 F
inst✝ : ContinuousAdd E
s t : Set E
hs : StrictConvex 𝕜 s
z : E
⊢ StrictConvex 𝕜 ((fun x => z + x) '' s)
[PROOFSTEP]
simpa only [singleton_add] using (strictConvex_singleton z).add hs
[GOAL]
𝕜 : Type u_1
𝕝 : Type u_2
E : Type u_3
F : Type u_4
β : Type u_5
inst✝⁷ : OrderedSemiring 𝕜
inst✝⁶ : TopologicalSpace E
inst✝⁵ : TopologicalSpace F
inst✝⁴ : AddCommGroup E
inst✝³ : AddCommGroup F
inst✝² : Module 𝕜 E
inst✝¹ : Module 𝕜 F
inst✝ : ContinuousAdd E
s t : Set E
hs : StrictConvex 𝕜 s
z : E
⊢ StrictConvex 𝕜 ((fun x => x + z) '' s)
[PROOFSTEP]
simpa only [add_comm] using hs.add_left z
[GOAL]
𝕜 : Type u_1
𝕝 : Type u_2
E : Type u_3
F : Type u_4
β : Type u_5
inst✝¹⁰ : OrderedSemiring 𝕜
inst✝⁹ : TopologicalSpace E
inst✝⁸ : TopologicalSpace F
inst✝⁷ : AddCommGroup E
inst✝⁶ : AddCommGroup F
inst✝⁵ : Module 𝕜 E
inst✝⁴ : Module 𝕜 F
inst✝³ : LinearOrderedField 𝕝
inst✝² : Module 𝕝 E
inst✝¹ : ContinuousConstSMul 𝕝 E
inst✝ : LinearMap.CompatibleSMul E E 𝕜 𝕝
s : Set E
x : E
hs : StrictConvex 𝕜 s
c : 𝕝
⊢ StrictConvex 𝕜 (c • s)
[PROOFSTEP]
obtain rfl | hc := eq_or_ne c 0
[GOAL]
case inl
𝕜 : Type u_1
𝕝 : Type u_2
E : Type u_3
F : Type u_4
β : Type u_5
inst✝¹⁰ : OrderedSemiring 𝕜
inst✝⁹ : TopologicalSpace E
inst✝⁸ : TopologicalSpace F
inst✝⁷ : AddCommGroup E
inst✝⁶ : AddCommGroup F
inst✝⁵ : Module 𝕜 E
inst✝⁴ : Module 𝕜 F
inst✝³ : LinearOrderedField 𝕝
inst✝² : Module 𝕝 E
inst✝¹ : ContinuousConstSMul 𝕝 E
inst✝ : LinearMap.CompatibleSMul E E 𝕜 𝕝
s : Set E
x : E
hs : StrictConvex 𝕜 s
⊢ StrictConvex 𝕜 (0 • s)
[PROOFSTEP]
exact (subsingleton_zero_smul_set _).strictConvex
[GOAL]
case inr
𝕜 : Type u_1
𝕝 : Type u_2
E : Type u_3
F : Type u_4
β : Type u_5
inst✝¹⁰ : OrderedSemiring 𝕜
inst✝⁹ : TopologicalSpace E
inst✝⁸ : TopologicalSpace F
inst✝⁷ : AddCommGroup E
inst✝⁶ : AddCommGroup F
inst✝⁵ : Module 𝕜 E
inst✝⁴ : Module 𝕜 F
inst✝³ : LinearOrderedField 𝕝
inst✝² : Module 𝕝 E
inst✝¹ : ContinuousConstSMul 𝕝 E
inst✝ : LinearMap.CompatibleSMul E E 𝕜 𝕝
s : Set E
x : E
hs : StrictConvex 𝕜 s
c : 𝕝
hc : c ≠ 0
⊢ StrictConvex 𝕜 (c • s)
[PROOFSTEP]
exact hs.linear_image (LinearMap.lsmul _ _ c) (isOpenMap_smul₀ hc)
[GOAL]
𝕜 : Type u_1
𝕝 : Type u_2
E : Type u_3
F : Type u_4
β : Type u_5
inst✝⁵ : OrderedCommSemiring 𝕜
inst✝⁴ : TopologicalSpace E
inst✝³ : AddCommGroup E
inst✝² : Module 𝕜 E
inst✝¹ : NoZeroSMulDivisors 𝕜 E
inst✝ : ContinuousConstSMul 𝕜 E
s : Set E
hs : StrictConvex 𝕜 s
c : 𝕜
⊢ StrictConvex 𝕜 ((fun z => c • z) ⁻¹' s)
[PROOFSTEP]
classical
obtain rfl | hc := eq_or_ne c 0
· simp_rw [zero_smul, preimage_const]
split_ifs
· exact strictConvex_univ
· exact strictConvex_empty
refine' hs.linear_preimage (LinearMap.lsmul _ _ c) _ (smul_right_injective E hc)
unfold LinearMap.lsmul LinearMap.mk₂ LinearMap.mk₂' LinearMap.mk₂'ₛₗ
exact continuous_const_smul _
[GOAL]
𝕜 : Type u_1
𝕝 : Type u_2
E : Type u_3
F : Type u_4
β : Type u_5
inst✝⁵ : OrderedCommSemiring 𝕜
inst✝⁴ : TopologicalSpace E
inst✝³ : AddCommGroup E
inst✝² : Module 𝕜 E
inst✝¹ : NoZeroSMulDivisors 𝕜 E
inst✝ : ContinuousConstSMul 𝕜 E
s : Set E
hs : StrictConvex 𝕜 s
c : 𝕜
⊢ StrictConvex 𝕜 ((fun z => c • z) ⁻¹' s)
[PROOFSTEP]
obtain rfl | hc := eq_or_ne c 0
[GOAL]
case inl
𝕜 : Type u_1
𝕝 : Type u_2
E : Type u_3
F : Type u_4
β : Type u_5
inst✝⁵ : OrderedCommSemiring 𝕜
inst✝⁴ : TopologicalSpace E
inst✝³ : AddCommGroup E
inst✝² : Module 𝕜 E
inst✝¹ : NoZeroSMulDivisors 𝕜 E
inst✝ : ContinuousConstSMul 𝕜 E
s : Set E
hs : StrictConvex 𝕜 s
⊢ StrictConvex 𝕜 ((fun z => 0 • z) ⁻¹' s)
[PROOFSTEP]
simp_rw [zero_smul, preimage_const]
[GOAL]
case inl
𝕜 : Type u_1
𝕝 : Type u_2
E : Type u_3
F : Type u_4
β : Type u_5
inst✝⁵ : OrderedCommSemiring 𝕜
inst✝⁴ : TopologicalSpace E
inst✝³ : AddCommGroup E
inst✝² : Module 𝕜 E
inst✝¹ : NoZeroSMulDivisors 𝕜 E
inst✝ : ContinuousConstSMul 𝕜 E
s : Set E
hs : StrictConvex 𝕜 s
⊢ StrictConvex 𝕜 (if 0 ∈ s then univ else ∅)
[PROOFSTEP]
split_ifs
[GOAL]
case pos
𝕜 : Type u_1
𝕝 : Type u_2
E : Type u_3
F : Type u_4
β : Type u_5
inst✝⁵ : OrderedCommSemiring 𝕜
inst✝⁴ : TopologicalSpace E
inst✝³ : AddCommGroup E
inst✝² : Module 𝕜 E
inst✝¹ : NoZeroSMulDivisors 𝕜 E
inst✝ : ContinuousConstSMul 𝕜 E
s : Set E
hs : StrictConvex 𝕜 s
h✝ : 0 ∈ s
⊢ StrictConvex 𝕜 univ
[PROOFSTEP]
exact strictConvex_univ
[GOAL]
case neg
𝕜 : Type u_1
𝕝 : Type u_2
E : Type u_3
F : Type u_4
β : Type u_5
inst✝⁵ : OrderedCommSemiring 𝕜
inst✝⁴ : TopologicalSpace E
inst✝³ : AddCommGroup E
inst✝² : Module 𝕜 E
inst✝¹ : NoZeroSMulDivisors 𝕜 E
inst✝ : ContinuousConstSMul 𝕜 E
s : Set E
hs : StrictConvex 𝕜 s
h✝ : ¬0 ∈ s
⊢ StrictConvex 𝕜 ∅
[PROOFSTEP]
exact strictConvex_empty
[GOAL]
case inr
𝕜 : Type u_1
𝕝 : Type u_2
E : Type u_3
F : Type u_4
β : Type u_5
inst✝⁵ : OrderedCommSemiring 𝕜
inst✝⁴ : TopologicalSpace E
inst✝³ : AddCommGroup E
inst✝² : Module 𝕜 E
inst✝¹ : NoZeroSMulDivisors 𝕜 E
inst✝ : ContinuousConstSMul 𝕜 E
s : Set E
hs : StrictConvex 𝕜 s
c : 𝕜
hc : c ≠ 0
⊢ StrictConvex 𝕜 ((fun z => c • z) ⁻¹' s)
[PROOFSTEP]
refine' hs.linear_preimage (LinearMap.lsmul _ _ c) _ (smul_right_injective E hc)
[GOAL]
case inr
𝕜 : Type u_1
𝕝 : Type u_2
E : Type u_3
F : Type u_4
β : Type u_5
inst✝⁵ : OrderedCommSemiring 𝕜
inst✝⁴ : TopologicalSpace E
inst✝³ : AddCommGroup E
inst✝² : Module 𝕜 E
inst✝¹ : NoZeroSMulDivisors 𝕜 E
inst✝ : ContinuousConstSMul 𝕜 E
s : Set E
hs : StrictConvex 𝕜 s
c : 𝕜
hc : c ≠ 0
⊢ Continuous ↑(↑(LinearMap.lsmul 𝕜 E) c)
[PROOFSTEP]
unfold LinearMap.lsmul LinearMap.mk₂ LinearMap.mk₂' LinearMap.mk₂'ₛₗ
[GOAL]
case inr
𝕜 : Type u_1
𝕝 : Type u_2
E : Type u_3
F : Type u_4
β : Type u_5
inst✝⁵ : OrderedCommSemiring 𝕜
inst✝⁴ : TopologicalSpace E
inst✝³ : AddCommGroup E
inst✝² : Module 𝕜 E
inst✝¹ : NoZeroSMulDivisors 𝕜 E
inst✝ : ContinuousConstSMul 𝕜 E
s : Set E
hs : StrictConvex 𝕜 s
c : 𝕜
hc : c ≠ 0
⊢ Continuous
↑(↑{
toAddHom :=
{
toFun := fun m =>
{
toAddHom :=
{ toFun := (fun x x_1 => x • x_1) m,
map_add' := (_ : ∀ (b₁ b₂ : E), m • (b₁ + b₂) = m • b₁ + m • b₂) },
map_smul' := (_ : ∀ (c : 𝕜) (m_1 : E), m • c • m_1 = c • m • m_1) },
map_add' :=
(_ :
∀ (m₁ m₂ : 𝕜),
(fun m =>
{
toAddHom :=
{ toFun := fun x => m • x,
map_add' := (_ : ∀ (b₁ b₂ : E), m • (b₁ + b₂) = m • b₁ + m • b₂) },
map_smul' := (_ : ∀ (c : 𝕜) (m_1 : E), m • c • m_1 = c • m • m_1) })
(m₁ + m₂) =
(fun m =>
{
toAddHom :=
{ toFun := fun x => m • x,
map_add' := (_ : ∀ (b₁ b₂ : E), m • (b₁ + b₂) = m • b₁ + m • b₂) },
map_smul' := (_ : ∀ (c : 𝕜) (m_1 : E), m • c • m_1 = c • m • m_1) })
m₁ +
(fun m =>
{
toAddHom :=
{ toFun := fun x => m • x,
map_add' := (_ : ∀ (b₁ b₂ : E), m • (b₁ + b₂) = m • b₁ + m • b₂) },
map_smul' := (_ : ∀ (c : 𝕜) (m_1 : E), m • c • m_1 = c • m • m_1) })
m₂) },
map_smul' :=
(_ :
∀ (c m : 𝕜),
AddHom.toFun
{
toFun := fun m =>
{
toAddHom :=
{ toFun := fun x => m • x,
map_add' := (_ : ∀ (b₁ b₂ : E), m • (b₁ + b₂) = m • b₁ + m • b₂) },
map_smul' := (_ : ∀ (c : 𝕜) (m_1 : E), m • c • m_1 = c • m • m_1) },
map_add' :=
(_ :
∀ (m₁ m₂ : 𝕜),
(fun m =>
{
toAddHom :=
{ toFun := fun x => m • x,
map_add' := (_ : ∀ (b₁ b₂ : E), m • (b₁ + b₂) = m • b₁ + m • b₂) },
map_smul' := (_ : ∀ (c : 𝕜) (m_1 : E), m • c • m_1 = c • m • m_1) })
(m₁ + m₂) =
(fun m =>
{
toAddHom :=
{ toFun := fun x => m • x,
map_add' := (_ : ∀ (b₁ b₂ : E), m • (b₁ + b₂) = m • b₁ + m • b₂) },
map_smul' := (_ : ∀ (c : 𝕜) (m_1 : E), m • c • m_1 = c • m • m_1) })
m₁ +
(fun m =>
{
toAddHom :=
{ toFun := fun x => m • x,
map_add' := (_ : ∀ (b₁ b₂ : E), m • (b₁ + b₂) = m • b₁ + m • b₂) },
map_smul' := (_ : ∀ (c : 𝕜) (m_1 : E), m • c • m_1 = c • m • m_1) })
m₂) }
(c • m) =
↑(RingHom.id 𝕜) c •
AddHom.toFun
{
toFun := fun m =>
{
toAddHom :=
{ toFun := fun x => m • x,
map_add' := (_ : ∀ (b₁ b₂ : E), m • (b₁ + b₂) = m • b₁ + m • b₂) },
map_smul' := (_ : ∀ (c : 𝕜) (m_1 : E), m • c • m_1 = c • m • m_1) },
map_add' :=
(_ :
∀ (m₁ m₂ : 𝕜),
(fun m =>
{
toAddHom :=
{ toFun := fun x => m • x,
map_add' := (_ : ∀ (b₁ b₂ : E), m • (b₁ + b₂) = m • b₁ + m • b₂) },
map_smul' := (_ : ∀ (c : 𝕜) (m_1 : E), m • c • m_1 = c • m • m_1) })
(m₁ + m₂) =
(fun m =>
{
toAddHom :=
{ toFun := fun x => m • x,
map_add' := (_ : ∀ (b₁ b₂ : E), m • (b₁ + b₂) = m • b₁ + m • b₂) },
map_smul' := (_ : ∀ (c : 𝕜) (m_1 : E), m • c • m_1 = c • m • m_1) })
m₁ +
(fun m =>
{
toAddHom :=
{ toFun := fun x => m • x,
map_add' := (_ : ∀ (b₁ b₂ : E), m • (b₁ + b₂) = m • b₁ + m • b₂) },
map_smul' := (_ : ∀ (c : 𝕜) (m_1 : E), m • c • m_1 = c • m • m_1) })
m₂) }
m) }
c)
[PROOFSTEP]
exact continuous_const_smul _
[GOAL]
𝕜 : Type u_1
𝕝 : Type u_2
E : Type u_3
F : Type u_4
β : Type u_5
inst✝⁸ : OrderedRing 𝕜
inst✝⁷ : TopologicalSpace E
inst✝⁶ : TopologicalSpace F
inst✝⁵ : AddCommGroup E
inst✝⁴ : AddCommGroup F
inst✝³ : Module 𝕜 E
inst✝² : Module 𝕜 F
s t : Set E
x y : E
inst✝¹ : Nontrivial 𝕜
inst✝ : DenselyOrdered 𝕜
hs : StrictConvex 𝕜 s
hx : x ∈ s
hy : y ∈ s
h : openSegment 𝕜 x y ⊆ frontier s
⊢ x = y
[PROOFSTEP]
obtain ⟨a, ha₀, ha₁⟩ := DenselyOrdered.dense (0 : 𝕜) 1 zero_lt_one
[GOAL]
case intro.intro
𝕜 : Type u_1
𝕝 : Type u_2
E : Type u_3
F : Type u_4
β : Type u_5
inst✝⁸ : OrderedRing 𝕜
inst✝⁷ : TopologicalSpace E
inst✝⁶ : TopologicalSpace F
inst✝⁵ : AddCommGroup E
inst✝⁴ : AddCommGroup F
inst✝³ : Module 𝕜 E
inst✝² : Module 𝕜 F
s t : Set E
x y : E
inst✝¹ : Nontrivial 𝕜
inst✝ : DenselyOrdered 𝕜
hs : StrictConvex 𝕜 s
hx : x ∈ s
hy : y ∈ s
h : openSegment 𝕜 x y ⊆ frontier s
a : 𝕜
ha₀ : 0 < a
ha₁ : a < 1
⊢ x = y
[PROOFSTEP]
classical
by_contra hxy
exact
(h ⟨a, 1 - a, ha₀, sub_pos_of_lt ha₁, add_sub_cancel'_right _ _, rfl⟩).2
(hs hx hy hxy ha₀ (sub_pos_of_lt ha₁) <| add_sub_cancel'_right _ _)
[GOAL]
case intro.intro
𝕜 : Type u_1
𝕝 : Type u_2
E : Type u_3
F : Type u_4
β : Type u_5
inst✝⁸ : OrderedRing 𝕜
inst✝⁷ : TopologicalSpace E
inst✝⁶ : TopologicalSpace F
inst✝⁵ : AddCommGroup E
inst✝⁴ : AddCommGroup F
inst✝³ : Module 𝕜 E
inst✝² : Module 𝕜 F
s t : Set E
x y : E
inst✝¹ : Nontrivial 𝕜
inst✝ : DenselyOrdered 𝕜
hs : StrictConvex 𝕜 s
hx : x ∈ s
hy : y ∈ s
h : openSegment 𝕜 x y ⊆ frontier s
a : 𝕜
ha₀ : 0 < a
ha₁ : a < 1
⊢ x = y
[PROOFSTEP]
by_contra hxy
[GOAL]
case intro.intro
𝕜 : Type u_1
𝕝 : Type u_2
E : Type u_3
F : Type u_4
β : Type u_5
inst✝⁸ : OrderedRing 𝕜
inst✝⁷ : TopologicalSpace E
inst✝⁶ : TopologicalSpace F
inst✝⁵ : AddCommGroup E
inst✝⁴ : AddCommGroup F
inst✝³ : Module 𝕜 E
inst✝² : Module 𝕜 F
s t : Set E
x y : E
inst✝¹ : Nontrivial 𝕜
inst✝ : DenselyOrdered 𝕜
hs : StrictConvex 𝕜 s
hx : x ∈ s
hy : y ∈ s
h : openSegment 𝕜 x y ⊆ frontier s
a : 𝕜
ha₀ : 0 < a
ha₁ : a < 1
hxy : ¬x = y
⊢ False
[PROOFSTEP]
exact
(h ⟨a, 1 - a, ha₀, sub_pos_of_lt ha₁, add_sub_cancel'_right _ _, rfl⟩).2
(hs hx hy hxy ha₀ (sub_pos_of_lt ha₁) <| add_sub_cancel'_right _ _)
[GOAL]
𝕜 : Type u_1
𝕝 : Type u_2
E : Type u_3
F : Type u_4
β : Type u_5
inst✝⁶ : OrderedRing 𝕜
inst✝⁵ : TopologicalSpace E
inst✝⁴ : TopologicalSpace F
inst✝³ : AddCommGroup E
inst✝² : AddCommGroup F
inst✝¹ : Module 𝕜 E
inst✝ : Module 𝕜 F
s t✝ : Set E
x y : E
hs : StrictConvex 𝕜 s
hx : x ∈ s
hxy : x + y ∈ s
hy : y ≠ 0
t : 𝕜
ht₀ : 0 < t
ht₁ : t < 1
⊢ x + t • y ∈ interior s
[PROOFSTEP]
have h : x + t • y = (1 - t) • x + t • (x + y) := by
rw [smul_add, ← add_assoc, ← _root_.add_smul, sub_add_cancel, one_smul]
[GOAL]
𝕜 : Type u_1
𝕝 : Type u_2
E : Type u_3
F : Type u_4
β : Type u_5
inst✝⁶ : OrderedRing 𝕜
inst✝⁵ : TopologicalSpace E
inst✝⁴ : TopologicalSpace F
inst✝³ : AddCommGroup E
inst✝² : AddCommGroup F
inst✝¹ : Module 𝕜 E
inst✝ : Module 𝕜 F
s t✝ : Set E
x y : E
hs : StrictConvex 𝕜 s
hx : x ∈ s
hxy : x + y ∈ s
hy : y ≠ 0
t : 𝕜
ht₀ : 0 < t
ht₁ : t < 1
⊢ x + t • y = (1 - t) • x + t • (x + y)
[PROOFSTEP]
rw [smul_add, ← add_assoc, ← _root_.add_smul, sub_add_cancel, one_smul]
[GOAL]
𝕜 : Type u_1
𝕝 : Type u_2
E : Type u_3
F : Type u_4
β : Type u_5
inst✝⁶ : OrderedRing 𝕜
inst✝⁵ : TopologicalSpace E
inst✝⁴ : TopologicalSpace F
inst✝³ : AddCommGroup E
inst✝² : AddCommGroup F
inst✝¹ : Module 𝕜 E
inst✝ : Module 𝕜 F
s t✝ : Set E
x y : E
hs : StrictConvex 𝕜 s
hx : x ∈ s
hxy : x + y ∈ s
hy : y ≠ 0
t : 𝕜
ht₀ : 0 < t
ht₁ : t < 1
h : x + t • y = (1 - t) • x + t • (x + y)
⊢ x + t • y ∈ interior s
[PROOFSTEP]
rw [h]
[GOAL]
𝕜 : Type u_1
𝕝 : Type u_2
E : Type u_3
F : Type u_4
β : Type u_5
inst✝⁶ : OrderedRing 𝕜
inst✝⁵ : TopologicalSpace E
inst✝⁴ : TopologicalSpace F
inst✝³ : AddCommGroup E
inst✝² : AddCommGroup F
inst✝¹ : Module 𝕜 E
inst✝ : Module 𝕜 F
s t✝ : Set E
x y : E
hs : StrictConvex 𝕜 s
hx : x ∈ s
hxy : x + y ∈ s
hy : y ≠ 0
t : 𝕜
ht₀ : 0 < t
ht₁ : t < 1
h : x + t • y = (1 - t) • x + t • (x + y)
⊢ (1 - t) • x + t • (x + y) ∈ interior s
[PROOFSTEP]
refine' hs hx hxy (fun h => hy <| add_left_cancel _) (sub_pos_of_lt ht₁) ht₀ (sub_add_cancel _ _)
[GOAL]
case refine'_1
𝕜 : Type u_1
𝕝 : Type u_2
E : Type u_3
F : Type u_4
β : Type u_5
inst✝⁶ : OrderedRing 𝕜
inst✝⁵ : TopologicalSpace E
inst✝⁴ : TopologicalSpace F
inst✝³ : AddCommGroup E
inst✝² : AddCommGroup F
inst✝¹ : Module 𝕜 E
inst✝ : Module 𝕜 F
s t✝ : Set E
x y : E
hs : StrictConvex 𝕜 s
hx : x ∈ s
hxy : x + y ∈ s
hy : y ≠ 0
t : 𝕜
ht₀ : 0 < t
ht₁ : t < 1
h✝ : x + t • y = (1 - t) • x + t • (x + y)
h : x = x + y
⊢ ?refine'_2 h + y = ?refine'_2 h + 0
case refine'_2
𝕜 : Type u_1
𝕝 : Type u_2
E : Type u_3
F : Type u_4
β : Type u_5
inst✝⁶ : OrderedRing 𝕜
inst✝⁵ : TopologicalSpace E
inst✝⁴ : TopologicalSpace F
inst✝³ : AddCommGroup E
inst✝² : AddCommGroup F
inst✝¹ : Module 𝕜 E
inst✝ : Module 𝕜 F
s t✝ : Set E
x y : E
hs : StrictConvex 𝕜 s
hx : x ∈ s
hxy : x + y ∈ s
hy : y ≠ 0
t : 𝕜
ht₀ : 0 < t
ht₁ : t < 1
h : x + t • y = (1 - t) • x + t • (x + y)
⊢ x = x + y → E
[PROOFSTEP]
rw [← h, add_zero]
[GOAL]
𝕜 : Type u_1
𝕝 : Type u_2
E : Type u_3
F : Type u_4
β : Type u_5
inst✝⁶ : OrderedRing 𝕜
inst✝⁵ : TopologicalSpace E
inst✝⁴ : TopologicalSpace F
inst✝³ : AddCommGroup E
inst✝² : AddCommGroup F
inst✝¹ : Module 𝕜 E
inst✝ : Module 𝕜 F
s t✝ : Set E
x y : E
hs : StrictConvex 𝕜 s
zero_mem : 0 ∈ s
hx : x ∈ s
hx₀ : x ≠ 0
t : 𝕜
ht₀ : 0 < t
ht₁ : t < 1
⊢ t • x ∈ interior s
[PROOFSTEP]
simpa using hs.add_smul_mem zero_mem (by simpa using hx) hx₀ ht₀ ht₁
[GOAL]
𝕜 : Type u_1
𝕝 : Type u_2
E : Type u_3
F : Type u_4
β : Type u_5
inst✝⁶ : OrderedRing 𝕜
inst✝⁵ : TopologicalSpace E
inst✝⁴ : TopologicalSpace F
inst✝³ : AddCommGroup E
inst✝² : AddCommGroup F
inst✝¹ : Module 𝕜 E
inst✝ : Module 𝕜 F
s t✝ : Set E
x y : E
hs : StrictConvex 𝕜 s
zero_mem : 0 ∈ s
hx : x ∈ s
hx₀ : x ≠ 0
t : 𝕜
ht₀ : 0 < t
ht₁ : t < 1
⊢ 0 + x ∈ s
[PROOFSTEP]
simpa using hx
[GOAL]
𝕜 : Type u_1
𝕝 : Type u_2
E : Type u_3
F : Type u_4
β : Type u_5
inst✝⁶ : OrderedRing 𝕜
inst✝⁵ : TopologicalSpace E
inst✝⁴ : TopologicalSpace F
inst✝³ : AddCommGroup E
inst✝² : AddCommGroup F
inst✝¹ : Module 𝕜 E
inst✝ : Module 𝕜 F
s t✝ : Set E
x y : E
h : StrictConvex 𝕜 s
hx : x ∈ s
hy : y ∈ s
hxy : x ≠ y
t : 𝕜
ht₀ : 0 < t
ht₁ : t < 1
⊢ x + t • (y - x) ∈ interior s
[PROOFSTEP]
apply h.openSegment_subset hx hy hxy
[GOAL]
case a
𝕜 : Type u_1
𝕝 : Type u_2
E : Type u_3
F : Type u_4
β : Type u_5
inst✝⁶ : OrderedRing 𝕜
inst✝⁵ : TopologicalSpace E
inst✝⁴ : TopologicalSpace F
inst✝³ : AddCommGroup E
inst✝² : AddCommGroup F
inst✝¹ : Module 𝕜 E
inst✝ : Module 𝕜 F
s t✝ : Set E
x y : E
h : StrictConvex 𝕜 s
hx : x ∈ s
hy : y ∈ s
hxy : x ≠ y
t : 𝕜
ht₀ : 0 < t
ht₁ : t < 1
⊢ x + t • (y - x) ∈ openSegment 𝕜 x y
[PROOFSTEP]
rw [openSegment_eq_image']
[GOAL]
case a
𝕜 : Type u_1
𝕝 : Type u_2
E : Type u_3
F : Type u_4
β : Type u_5
inst✝⁶ : OrderedRing 𝕜
inst✝⁵ : TopologicalSpace E
inst✝⁴ : TopologicalSpace F
inst✝³ : AddCommGroup E
inst✝² : AddCommGroup F
inst✝¹ : Module 𝕜 E
inst✝ : Module 𝕜 F
s t✝ : Set E
x y : E
h : StrictConvex 𝕜 s
hx : x ∈ s
hy : y ∈ s
hxy : x ≠ y
t : 𝕜
ht₀ : 0 < t
ht₁ : t < 1
⊢ x + t • (y - x) ∈ (fun θ => x + θ • (y - x)) '' Ioo 0 1
[PROOFSTEP]
exact mem_image_of_mem _ ⟨ht₀, ht₁⟩
[GOAL]
𝕜 : Type u_1
𝕝 : Type u_2
E : Type u_3
F : Type u_4
β : Type u_5
inst✝⁶ : OrderedRing 𝕜
inst✝⁵ : TopologicalSpace E
inst✝⁴ : TopologicalSpace F
inst✝³ : AddCommGroup E
inst✝² : AddCommGroup F
inst✝¹ : Module 𝕜 E
inst✝ : Module 𝕜 F
s✝ t : Set E
x y : E
s : Set F
hs : StrictConvex 𝕜 s
f : E →ᵃ[𝕜] F
hf : Continuous ↑f
hfinj : Injective ↑f
⊢ StrictConvex 𝕜 (↑f ⁻¹' s)
[PROOFSTEP]
intro x hx y hy hxy a b ha hb hab
[GOAL]
𝕜 : Type u_1
𝕝 : Type u_2
E : Type u_3
F : Type u_4
β : Type u_5
inst✝⁶ : OrderedRing 𝕜
inst✝⁵ : TopologicalSpace E
inst✝⁴ : TopologicalSpace F
inst✝³ : AddCommGroup E
inst✝² : AddCommGroup F
inst✝¹ : Module 𝕜 E
inst✝ : Module 𝕜 F
s✝ t : Set E
x✝ y✝ : E
s : Set F
hs : StrictConvex 𝕜 s
f : E →ᵃ[𝕜] F
hf : Continuous ↑f
hfinj : Injective ↑f
x : E
hx : x ∈ ↑f ⁻¹' s
y : E
hy : y ∈ ↑f ⁻¹' s
hxy : x ≠ y
a b : 𝕜
ha : 0 < a
hb : 0 < b
hab : a + b = 1
⊢ a • x + b • y ∈ interior (↑f ⁻¹' s)
[PROOFSTEP]
refine' preimage_interior_subset_interior_preimage hf _
[GOAL]
𝕜 : Type u_1
𝕝 : Type u_2
E : Type u_3
F : Type u_4
β : Type u_5
inst✝⁶ : OrderedRing 𝕜
inst✝⁵ : TopologicalSpace E
inst✝⁴ : TopologicalSpace F
inst✝³ : AddCommGroup E
inst✝² : AddCommGroup F
inst✝¹ : Module 𝕜 E
inst✝ : Module 𝕜 F
s✝ t : Set E
x✝ y✝ : E
s : Set F
hs : StrictConvex 𝕜 s
f : E →ᵃ[𝕜] F
hf : Continuous ↑f
hfinj : Injective ↑f
x : E
hx : x ∈ ↑f ⁻¹' s
y : E
hy : y ∈ ↑f ⁻¹' s
hxy : x ≠ y
a b : 𝕜
ha : 0 < a
hb : 0 < b
hab : a + b = 1
⊢ a • x + b • y ∈ ↑f ⁻¹' interior s
[PROOFSTEP]
rw [mem_preimage, Convex.combo_affine_apply hab]
[GOAL]
𝕜 : Type u_1
𝕝 : Type u_2
E : Type u_3
F : Type u_4
β : Type u_5
inst✝⁶ : OrderedRing 𝕜
inst✝⁵ : TopologicalSpace E
inst✝⁴ : TopologicalSpace F
inst✝³ : AddCommGroup E
inst✝² : AddCommGroup F
inst✝¹ : Module 𝕜 E
inst✝ : Module 𝕜 F
s✝ t : Set E
x✝ y✝ : E
s : Set F
hs : StrictConvex 𝕜 s
f : E →ᵃ[𝕜] F
hf : Continuous ↑f
hfinj : Injective ↑f
x : E
hx : x ∈ ↑f ⁻¹' s
y : E
hy : y ∈ ↑f ⁻¹' s
hxy : x ≠ y
a b : 𝕜
ha : 0 < a
hb : 0 < b
hab : a + b = 1
⊢ a • ↑f x + b • ↑f y ∈ interior s
[PROOFSTEP]
exact hs hx hy (hfinj.ne hxy) ha hb hab
[GOAL]
𝕜 : Type u_1
𝕝 : Type u_2
E : Type u_3
F : Type u_4
β : Type u_5
inst✝⁶ : OrderedRing 𝕜
inst✝⁵ : TopologicalSpace E
inst✝⁴ : TopologicalSpace F
inst✝³ : AddCommGroup E
inst✝² : AddCommGroup F
inst✝¹ : Module 𝕜 E
inst✝ : Module 𝕜 F
s t : Set E
x y : E
hs : StrictConvex 𝕜 s
f : E →ᵃ[𝕜] F
hf : IsOpenMap ↑f
⊢ StrictConvex 𝕜 (↑f '' s)
[PROOFSTEP]
rintro _ ⟨x, hx, rfl⟩ _ ⟨y, hy, rfl⟩ hxy a b ha hb hab
[GOAL]
case intro.intro.intro.intro
𝕜 : Type u_1
𝕝 : Type u_2
E : Type u_3
F : Type u_4
β : Type u_5
inst✝⁶ : OrderedRing 𝕜
inst✝⁵ : TopologicalSpace E
inst✝⁴ : TopologicalSpace F
inst✝³ : AddCommGroup E
inst✝² : AddCommGroup F
inst✝¹ : Module 𝕜 E
inst✝ : Module 𝕜 F
s t : Set E
x✝ y✝ : E
hs : StrictConvex 𝕜 s
f : E →ᵃ[𝕜] F
hf : IsOpenMap ↑f
x : E
hx : x ∈ s
y : E
hy : y ∈ s
hxy : ↑f x ≠ ↑f y
a b : 𝕜
ha : 0 < a
hb : 0 < b
hab : a + b = 1
⊢ a • ↑f x + b • ↑f y ∈ interior (↑f '' s)
[PROOFSTEP]
exact
hf.image_interior_subset _ ⟨a • x + b • y, ⟨hs hx hy (ne_of_apply_ne _ hxy) ha hb hab, Convex.combo_affine_apply hab⟩⟩
[GOAL]
𝕜 : Type u_1
𝕝 : Type u_2
E : Type u_3
F : Type u_4
β : Type u_5
inst✝⁵ : LinearOrderedField 𝕜
inst✝⁴ : TopologicalSpace E
inst✝³ : AddCommGroup E
inst✝² : AddCommGroup F
inst✝¹ : Module 𝕜 E
inst✝ : Module 𝕜 F
s : Set E
x✝ : E
h : StrictConvex 𝕜 s
x : E
hx : x ∈ s
y : E
hy : y ∈ s
hxy : x ≠ y
a b : 𝕜
ha : 0 < a
hb : 0 < b
⊢ (a / (a + b)) • x + (b / (a + b)) • y ∈ interior s
[PROOFSTEP]
apply h hx hy hxy (div_pos ha <| add_pos ha hb) (div_pos hb <| add_pos ha hb)
[GOAL]
𝕜 : Type u_1
𝕝 : Type u_2
E : Type u_3
F : Type u_4
β : Type u_5
inst✝⁵ : LinearOrderedField 𝕜
inst✝⁴ : TopologicalSpace E
inst✝³ : AddCommGroup E
inst✝² : AddCommGroup F
inst✝¹ : Module 𝕜 E
inst✝ : Module 𝕜 F
s : Set E
x✝ : E
h : StrictConvex 𝕜 s
x : E
hx : x ∈ s
y : E
hy : y ∈ s
hxy : x ≠ y
a b : 𝕜
ha : 0 < a
hb : 0 < b
⊢ a / (a + b) + b / (a + b) = 1
[PROOFSTEP]
rw [← add_div]
[GOAL]
𝕜 : Type u_1
𝕝 : Type u_2
E : Type u_3
F : Type u_4
β : Type u_5
inst✝⁵ : LinearOrderedField 𝕜
inst✝⁴ : TopologicalSpace E
inst✝³ : AddCommGroup E
inst✝² : AddCommGroup F
inst✝¹ : Module 𝕜 E
inst✝ : Module 𝕜 F
s : Set E
x✝ : E
h : StrictConvex 𝕜 s
x : E
hx : x ∈ s
y : E
hy : y ∈ s
hxy : x ≠ y
a b : 𝕜
ha : 0 < a
hb : 0 < b
⊢ (a + b) / (a + b) = 1
[PROOFSTEP]
exact div_self (add_pos ha hb).ne'
[GOAL]
𝕜 : Type u_1
𝕝 : Type u_2
E : Type u_3
F : Type u_4
β : Type u_5
inst✝⁵ : LinearOrderedField 𝕜
inst✝⁴ : TopologicalSpace E
inst✝³ : AddCommGroup E
inst✝² : AddCommGroup F
inst✝¹ : Module 𝕜 E
inst✝ : Module 𝕜 F
s : Set E
x✝ : E
h : Set.Pairwise s fun x y => ∀ ⦃a b : 𝕜⦄, 0 < a → 0 < b → (a / (a + b)) • x + (b / (a + b)) • y ∈ interior s
x : E
hx : x ∈ s
y : E
hy : y ∈ s
hxy : x ≠ y
a b : 𝕜
ha : 0 < a
hb : 0 < b
hab : a + b = 1
⊢ a • x + b • y ∈ interior s
[PROOFSTEP]
convert h hx hy hxy ha hb
[GOAL]
case h.e'_4.h.e'_5.h.e'_5
𝕜 : Type u_1
𝕝 : Type u_2
E : Type u_3
F : Type u_4
β : Type u_5
inst✝⁵ : LinearOrderedField 𝕜
inst✝⁴ : TopologicalSpace E
inst✝³ : AddCommGroup E
inst✝² : AddCommGroup F
inst✝¹ : Module 𝕜 E
inst✝ : Module 𝕜 F
s : Set E
x✝ : E
h : Set.Pairwise s fun x y => ∀ ⦃a b : 𝕜⦄, 0 < a → 0 < b → (a / (a + b)) • x + (b / (a + b)) • y ∈ interior s
x : E
hx : x ∈ s
y : E
hy : y ∈ s
hxy : x ≠ y
a b : 𝕜
ha : 0 < a
hb : 0 < b
hab : a + b = 1
⊢ a = a / (a + b)
[PROOFSTEP]
rw [hab, div_one]
[GOAL]
case h.e'_4.h.e'_6.h.e'_5
𝕜 : Type u_1
𝕝 : Type u_2
E : Type u_3
F : Type u_4
β : Type u_5
inst✝⁵ : LinearOrderedField 𝕜
inst✝⁴ : TopologicalSpace E
inst✝³ : AddCommGroup E
inst✝² : AddCommGroup F
inst✝¹ : Module 𝕜 E
inst✝ : Module 𝕜 F
s : Set E
x✝ : E
h : Set.Pairwise s fun x y => ∀ ⦃a b : 𝕜⦄, 0 < a → 0 < b → (a / (a + b)) • x + (b / (a + b)) • y ∈ interior s
x : E
hx : x ∈ s
y : E
hy : y ∈ s
hxy : x ≠ y
a b : 𝕜
ha : 0 < a
hb : 0 < b
hab : a + b = 1
⊢ b = b / (a + b)
[PROOFSTEP]
rw [hab, div_one]
[GOAL]
𝕜 : Type u_1
𝕝 : Type u_2
E : Type u_3
F : Type u_4
β : Type u_5
inst✝⁵ : LinearOrderedField 𝕜
inst✝⁴ : TopologicalSpace E
inst✝³ : AddCommGroup E
inst✝² : AddCommGroup F
inst✝¹ : Module 𝕜 E
inst✝ : Module 𝕜 F
s : Set E
x : E
hs : StrictConvex 𝕜 s
zero_mem : 0 ∈ s
hx : x ∈ s
hx₀ : x ≠ 0
t : 𝕜
ht : 1 < t
⊢ x ∈ t • interior s
[PROOFSTEP]
rw [mem_smul_set_iff_inv_smul_mem₀ (zero_lt_one.trans ht).ne']
[GOAL]
𝕜 : Type u_1
𝕝 : Type u_2
E : Type u_3
F : Type u_4
β : Type u_5
inst✝⁵ : LinearOrderedField 𝕜
inst✝⁴ : TopologicalSpace E
inst✝³ : AddCommGroup E
inst✝² : AddCommGroup F
inst✝¹ : Module 𝕜 E
inst✝ : Module 𝕜 F
s : Set E
x : E
hs : StrictConvex 𝕜 s
zero_mem : 0 ∈ s
hx : x ∈ s
hx₀ : x ≠ 0
t : 𝕜
ht : 1 < t
⊢ t⁻¹ • x ∈ interior s
[PROOFSTEP]
exact hs.smul_mem_of_zero_mem zero_mem hx hx₀ (inv_pos.2 <| zero_lt_one.trans ht) (inv_lt_one ht)
|
% Figure 10.08 Feedback Control of Dynamic Systems, 5e
% Franklin, Powell, Emami
%
% fig10_08.m is a script to generate Fig. 10.8 the
% frequency response of the satellite with low-gain PD compensation
np =[0.0360 0.9100];
dp =[1.0000 0.0396 1.0010 0.0000 0.0000];
nc2=0.001*[30 1];
nol2=conv(nc2,np);
dol2=dp;
hold off ; clf
w=logspace(-2,.2);
w(46)=1;
[magol2, phol2]= bode(nol2,dol2,w);
subplot(211) ; loglog(w,magol2); grid; hold on;
xlabel('\omega (rad/sec)');
ylabel('Magnitude |D_2(s)G(s)|');
loglog(w,ones(size(magol2)),'g');
title('Fig. 10.8 Frequency response of low-gain satellite PD design')
phol2a=[phol2, -180*ones(size(phol2))];
subplot(212); semilogx(w, phol2a); grid; hold on;
xlabel('\omega (rad/sec)');
ylabel('Phase (deg)');
|
Formal statement is: lemma emeasure_eq_0: "N \<in> sets M \<Longrightarrow> emeasure M N = 0 \<Longrightarrow> K \<subseteq> N \<Longrightarrow> emeasure M K = 0" Informal statement is: If $N$ is a set of measure zero, then any subset of $N$ also has measure zero.
|
% Created 2020-03-10 Tue 10:06
% Intended LaTeX compiler: pdflatex
\documentclass[11pt]{article}
\usepackage[utf8]{inputenc}
\usepackage[T1]{fontenc}
\usepackage{graphicx}
\usepackage{grffile}
\usepackage{longtable}
\usepackage{wrapfig}
\usepackage{rotating}
\usepackage[normalem]{ulem}
\usepackage{amsmath}
\usepackage{textcomp}
\usepackage{amssymb}
\usepackage{capt-of}
\usepackage{hyperref}
\author{Abram Hindle}
\date{\today}
\title{CMPUT201W20B2 Week 4}
\hypersetup{
pdfauthor={Abram Hindle},
pdftitle={CMPUT201W20B2 Week 4},
pdfkeywords={},
pdfsubject={},
pdfcreator={Emacs 25.2.2 (Org mode 9.1.6)},
pdflang={English}}
\begin{document}
\maketitle
\tableofcontents
\section{Week4}
\label{sec:orgf0f9dc3}
\subsection{Copyright Statement}
\label{sec:org6309117}
If you are in CMPUT201 at UAlberta this code is released in the public
domain to you.
Otherwise it is (c) 2020 Abram Hindle, Hazel Campbell AGPL3.0+
\subsubsection{License}
\label{sec:org3ec3419}
Week 3 notes
Copyright (C) 2020 Abram Hindle, Hazel Campbell
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU Affero General Public License as
published by the Free Software Foundation, either version 3 of the
License, or (at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU Affero General Public License for more details.
You should have received a copy of the GNU Affero General Public License
along with this program. If not, see \url{https://www.gnu.org/licenses/}.
\subsubsection{Hazel Code is licensed under AGPL3.0+}
\label{sec:orgedf10b4}
Hazel's code is also found here
\url{https://github.com/hazelybell/examples/tree/C-2020-01}
Hazel code is licensed: The example code is licensed under the AGPL3+
license, unless otherwise noted.
\subsection{Init ORG-MODE}
\label{sec:org63caef3}
\begin{verbatim}
;; I need this for org-mode to work well
(require 'ob-sh)
;(require 'ob-shell)
(org-babel-do-load-languages 'org-babel-load-languages '((sh . t)))
(org-babel-do-load-languages 'org-babel-load-languages '((C . t)))
(org-babel-do-load-languages 'org-babel-load-languages '((python . t)))
(setq org-src-fontify-natively t)
\end{verbatim}
\subsubsection{Org export}
\label{sec:orgcabf44d}
\begin{verbatim}
(org-html-export-to-html)
(org-latex-export-to-pdf)
(org-ascii-export-to-ascii)
\end{verbatim}
\subsection{Org Template}
\label{sec:org09a5ccc}
Copy and paste this to demo C
\begin{verbatim}
#include <stdio.h>
int main(int argc, char**argv) {
return 0;
}
\end{verbatim}
\subsection{Remember how to compile?}
\label{sec:org3050ba6}
gcc -std=c99 -Wall -pedantic -Werror -o programname programname.c
\subsection{Functions}
\label{sec:org90f3e3d}
Functions replicate functions in mathematics. They allocate space on
the stack and have local variables.
Very similar to python functions
Define a function:
return\(_{\text{type}}\) functionName(ArgType1 arg1, ArgType2 arg2, ArgType3 arg3 ) \{
\ldots{}
\}
Call a function:
functionName( arg1, arg2, arg3 );
return\(_{\text{type}}\) returnValue = functionName( arg1, arg2, arg3) ;
IN C89 all variable declarations are at the top of the function.
\subsubsection{return\(_{\text{types}}\)}
\label{sec:org576aa6d}
\begin{itemize}
\item void -- nothing
\item int
\item char
\item float
\item double
\item \ldots{}
\item pointer (array or string)
\end{itemize}
\subsubsection{Example}
\label{sec:org8f54b94}
\begin{verbatim}
#include <stdio.h>
#include <stdlib.h>
void example() {
printf("I have been made an example of\n");
// return; // void return
}
int main() {
example();
return 0;
}
\end{verbatim}
\begin{verbatim}
I have been made an example of
\end{verbatim}
\subsubsection{Pass by Value}
\label{sec:org1c2102f}
The value of parameters are COPIED into registers and sometimes the
stack. Thus the original variables that the parameters come from are
safe.
Except pointers are not safe because given a pointer the called
function can manipulate the data the pointer points to, but they
cannot modify the original pointer.
\begin{verbatim}
#include <stdio.h>
#include <stdlib.h>
int example(int x) {
x++;
printf("example x:\t%p\n", (void*)&x);
return x;
}
int main() {
int x = 10;
printf("main x :\t%p\n", (void*)&x);
printf("x: %d\n", x);
int rx = example(x);
printf("x: %d\n", x);
printf("returned x vs x: %d vs %d\n", rx, x);
}
\end{verbatim}
\begin{verbatim}
main x : 0x7ffe19cd7700
x: 10
example x: 0x7ffe19cd76ec
x: 10
returned x vs x: 11 vs 10
\end{verbatim}
\subsubsection{Arrays again}
\label{sec:org3a5478a}
\begin{itemize}
\item void initArray(int cols, int values[cols]) \{
\item void initArray(int cols, int values[]) \{
\end{itemize}
You can specify array sizes in C99 but the size has to come earlier
\begin{itemize}
\item void init2D(int rows, int cols, int values[rows][cols]) \{
\item void init2D(int rows, int cols, int values[][cols])\{
\item void init3D(int planes, int rows, int cols, int values[planes][rows][cols]) \{
\item void init3D(int planes, int rows, int cols, int values[][rows][cols]) \{
\end{itemize}
\subsubsection{Don't trust sizeof inside of functions!}
\label{sec:org222cf4c}
sizeof is only trustable if you declared the variable in your scope
\begin{verbatim}
#include <stdio.h>
#include <stdlib.h>
#include <time.h>
void init2D(int rows, int cols, int values[][cols]) {
int i = 0;
printf("init2D: sizeof(values)=%lu\n", sizeof(values));
printf("init2D: sizeof(values[0])=%lu\n", sizeof(values[0]));
for (int row = 0; row < rows; row++) {
for (int col = 0; col < cols; col++) {
values[row][col] = i++;
}
}
}
void example() {
unsigned int n = 1 + rand() % 10;
unsigned int m = 1 + rand() % 10;
printf("%d X %d was chosen!\n", m, n);
int values[m][n]; // SO the compiler can't predict this allocation ahead of time
printf("sizeof(values) = %ld\n", sizeof(values));
printf("sizeof(&values) = %ld\n", sizeof(&values));
printf("sizeof(values[0]) = %ld\n", sizeof(values[0]));
init2D( m, n, values );
}
int main() {
srand(time(NULL)); //initialze based on the clock
example();
example();
example();
}
\end{verbatim}
\begin{verbatim}
10 X 7 was chosen!
sizeof(values) = 280
sizeof(&values) = 8
sizeof(values[0]) = 28
init2D: sizeof(values)=8
init2D: sizeof(values[0])=28
5 X 8 was chosen!
sizeof(values) = 160
sizeof(&values) = 8
sizeof(values[0]) = 32
init2D: sizeof(values)=8
init2D: sizeof(values[0])=32
5 X 2 was chosen!
sizeof(values) = 40
sizeof(&values) = 8
sizeof(values[0]) = 8
init2D: sizeof(values)=8
init2D: sizeof(values[0])=8
\end{verbatim}
\subsubsection{Returns}
\label{sec:org859313e}
Don't return arrays in general.
To return a value and exit the function immediately run:
return expr
\begin{verbatim}
#include <stdio.h>
#include <stdlib.h>
int squareInt(int x) {
return x*x;
}
float squareFloat(float x) {
return x*x;
}
int intDiv(int x, int y) {
return x/y;
}
float floatDiv(float x, float y) {
return x/y;
}
char returnChar( int i ) {
return i;
}
int main() {
printf("squareInt\t %d\n", squareInt(25));
printf("squareInt\t %d\n", squareInt(1.47));
printf("squareFloat\t %f\n", squareFloat(1.47));
printf("squareFloat\t %f\n", squareFloat(25));
printf("intDiv\t %d\n", intDiv(64,31));
printf("intDiv\t %d\n", intDiv(64.2,31));
printf("floatDiv\t %f\n", floatDiv(64,31));
printf("floatDiv\t %f\n", floatDiv(64.2,31));
printf("returnChar\t %hhu\n", returnChar( 578 ) );
printf("returnChar\t %hhu\n", returnChar( 'a' ) );
printf("returnChar\t %hhu\n", returnChar( 66.1 ) );
printf("returnChar\t %c\n", returnChar( 578 ) );
printf("returnChar\t %c\n", returnChar( 'a' ) );
printf("returnChar\t %c\n", returnChar( 66.1 ) );
}
\end{verbatim}
\begin{verbatim}
squareInt 625
squareInt 1
squareFloat 2.160900
squareFloat 625.000000
intDiv 2
intDiv 2
floatDiv 2.064516
floatDiv 2.070968
returnChar 66
returnChar 97
returnChar 66
returnChar B
returnChar a
returnChar B
\end{verbatim}
\subsubsection{Recursion}
\label{sec:orgd3ea940}
\begin{enumerate}
\item Recursion
\label{sec:org73fda68}
\begin{enumerate}
\item Recursion
\label{sec:org1d7db1a}
\begin{enumerate}
\item Recursion
\label{sec:org643393a}
\begin{verbatim}
#include <stdio.h>
#include <stdlib.h>
int divisibleBy(int x, int y);
int main() {
printf("%d\n",divisibleBy(33,32));
}
int divisibleBy(int x, int y) {
printf("%d %d\n", x,y);
if (x == 0) { return 0; }
if (y <= 0) { return 0; }
if (x % y == 0) { return y; }
return divisibleBy(x, y - 1);
}
\end{verbatim}
\begin{verbatim}
33 32
33 31
33 30
33 29
33 28
33 27
33 26
33 25
33 24
33 23
33 22
33 21
33 20
33 19
33 18
33 17
33 16
33 15
33 14
33 13
33 12
33 11
11
\end{verbatim}
\end{enumerate}
\end{enumerate}
\end{enumerate}
\subsubsection{Prototypes}
\label{sec:org7869f02}
\begin{verbatim}
#include <stdio.h>
#include <stdlib.h>
/* this is a prototype
it predeclares that a function with this
name will be available.
*/
// This program will not compile in C99 without this line:
//
int divisibleBy(int x, int y);
int main() {
printf("%d\n",divisibleBy(16,15));
}
int divisibleBy(int x, int y) {
printf("%d %d\n", x,y);
if (x == 0) { return 0; }
if (y <= 0) { return 0; }
if (x % y == 0) { return y; }
return divisibleBy(x, y - 1);
}
\end{verbatim}
\begin{verbatim}
16 15
16 14
16 13
16 12
16 11
16 10
16 9
16 8
8
\end{verbatim}
\begin{enumerate}
\item Prototypes and corecursive routines
\label{sec:orgd77611f}
\begin{verbatim}
#include <stdio.h>
#include <stdlib.h>
/* this is a prototype
it predeclares that a function with this
name will be available.
This is useful for co-recursive functions.
*/
// This program will not compile in C99 without this line:
//
int aReliesOnB(int x, int y);
int bReliesOnA(int x, int y);
//
int main() {
printf("%d\n",aReliesOnB(0,100));
}
int aReliesOnB(int x, int y) {
printf("> aReliesOnB( %d, %d)\n", x, y);
if (x >= y) {
return y;
}
return bReliesOnA(x+x+1, y);
}
int bReliesOnA(int x, int y) {
printf("> bReliesOnA( %d, %d)\n", x, y);
if (x >= y) {
return y;
}
return aReliesOnB(x * x + 1, y);
}
\end{verbatim}
\begin{verbatim}
> aReliesOnB( 0, 100)
> bReliesOnA( 1, 100)
> aReliesOnB( 2, 100)
> bReliesOnA( 5, 100)
> aReliesOnB( 26, 100)
> bReliesOnA( 53, 100)
> aReliesOnB( 2810, 100)
100
\end{verbatim}
\end{enumerate}
\subsubsection{Exercise}
\label{sec:org70085cf}
\begin{enumerate}
\item - make a recursive countdown function, printing each number until 0 is met.
\label{sec:org1c7e9d4}
\begin{verbatim}
#include <stdio.h>
void countDown(int n) {
printf("%d\n",n);
if (n > 0) {
countDown(n-1);
}
}
int main() {
countDown(10);
return 0;
}
\end{verbatim}
\begin{verbatim}
10
9
8
7
6
5
4
3
2
1
0
\end{verbatim}
\item - make a recursive fibonacci
\label{sec:org92f1873}
fib(0) = 1
fib(1) = 1
fib(n) = fib(n-1) + fib(n-2)
\begin{verbatim}
#include <stdio.h>
int fibonacci(int n) {
if (n == 0 || n == 1) {
return 1;
} else {
return fibonacci(n-1) + fibonacci(n-2);
}
}
int main() {
printf("%d\n",fibonacci(45));
return 0;
}
\end{verbatim}
\begin{verbatim}
1836311903
\end{verbatim}
\end{enumerate}
\subsection{Scope}
\label{sec:orge9a2ef1}
\subsubsection{const}
\label{sec:org0fcc271}
Instead of define you can use const for constants.
\begin{verbatim}
#include <stdio.h>
#include <stdlib.h>
const int nine = 9;
int catLives(int ncats) {
return nine * ncats;
}
int main() {
printf("10 cats %d lives\n", catLives( 10 ));
// you can't modify nine
// nine++;
// *(&nine) = 10;
void * totally_not_nine = (void*)&nine;
int * not_nine = (int *)totally_not_nine;
*not_nine = 10;
printf("%d\n",*not_nine);
}
\end{verbatim}
\subsubsection{Local variables}
\label{sec:org46022dd}
\begin{verbatim}
#include <stdio.h>
#include <stdlib.h>
// no x here
int example(int x) { // < this x is visible -- main's x is NOT visible here
x++; // < within
return x; // < this scope
}
// no x here
int main() {
int x = 10; // < this x is visible within all of main
printf("x: %d\n", x);
int rx = example(x);
printf("x: %d\n", x);
printf("returned x vs x: %d vs %d\n", rx, x);
}
\end{verbatim}
\begin{verbatim}
x: 10
x: 10
returned x vs x: 11 vs 10
\end{verbatim}
\subsection{Global Variables (BAD) / External Variables / File-level variables}
\label{sec:org11aa2aa}
Too common. Too error prone. You will usually cause lots of bugs by
making top-level variables. They will only be available within the
file you declare.
Global constants are fine. They are safe.
If you make a global in a file, explicitly limit it to the current
file with the static keyword.
If static is not used and the variable is in included files then it
will be visible across all files.
\begin{verbatim}
#include <stdio.h>
#include <stdlib.h>
// BAD
// int x = 111; // visible in all lines below unless occluded by local definitions
// BADISH
const int x = 111; // visible in all lines below unless occluded by local definitions
// BETTER but still not OK
//static int x = 111;
// BEST and allowed
static const int x = 111;
int globalX() {
return x; // returns the static global x
}
int example(int x) { // <x_2 this x, x_2 is visible -- main's x is NOT visible here nor is the global
x++; // <x_2 within
return x; // <x_2 this scope
}
int main() {
printf("Global x %d\n", globalX());
int x = 10; // < this x, x_3 is visible within all of main
const int y = globalX() * globalX();
printf("y: %d\n", y); // x_3
printf("x: %d\n", x); // x_3
int rx = example(x); // x_3
printf("x: %d\n", x); // x_3
printf("returned x vs x: %d vs %d\n", rx, x); // x_3
}
\end{verbatim}
\subsection{Static Function Scope}
\label{sec:org18c383b}
Static function local variables keep their old values. It is similar
to defining a global per function
\begin{verbatim}
#include <stdio.h>
#include <stdlib.h>
#include <time.h>
unsigned int counter() {
static unsigned int counter = 0; // this keeps its value
printf("%u\n", counter);
return ++counter;
}
static unsigned int __worseCounter__ = 0; // whoo don't touch this AKA DONT DO IT
unsigned int worseCounter() {
return ++__worseCounter__;
}
#define N 10
int main() {
srand(time(NULL));
unsigned int count = 0;
unsigned int wCount = 0;
for (int i = 0 ; i < N; i++) {
if (rand() % 3 == 0) {
count = counter();
wCount = worseCounter();
}
}
printf("Counted %u / %u numbers divisible by 3 generated by rand\n", count, N);
printf("Worse: Counted %u / %u numbers divisible by 3 generated by rand\n", wCount, N);
}
\end{verbatim}
\begin{verbatim}
0
1
2
Counted 3 / 10 numbers divisible by 3 generated by rand
Worse: Counted 3 / 10 numbers divisible by 3 generated by rand
\end{verbatim}
\subsection{Pointers!}
\label{sec:org99b7042}
\begin{itemize}
\item What is a pointer? A number that is a memory address.
\item What's at that memory address? the type of the pointer.
\begin{itemize}
\item char * str;
\end{itemize}
\item Why?
\begin{itemize}
\item you want to know the address so you can manipulate a value or
manipulate a shared value.
\item you want to return multiple values from a function.
\item your computer deals with memory as location and offsets the entire time
\item the local variables is the current base pointer + an offset
\end{itemize}
\item What is str? A integer that is a memory address.
\item What does str point to? A character, but many an array of characters!
\item Can I tell if it is an array of characters? No.
\item How can I get the first element of a character array at str?
\begin{itemize}
\item str[0]
\item *str
\end{itemize}
\item How can I make a pointer to:
\begin{itemize}
\item char myChar = 'a';
\item char * ptrToMyChar = \&myChar;
\end{itemize}
\item Can I manipulate pointers?
\begin{itemize}
\item char * ptrToChar = \&myChar;
\item ptrToChar++; // <--- goes to the following character in a character array
\item *ptrToChar = 'b'; // Dereference ptrToChar and chance myChar to the value of 'b'
\end{itemize}
\end{itemize}
\subsubsection{Operators}
\label{sec:org8cbbc68}
\begin{itemize}
\item \& unary operator means "address of"
\item * unary operator means "dereference pointer" -- that is return
the value it points to
\item don't confuse declaration of a variable int * x with
dereferencing a variable in an expression: *x
\end{itemize}
\begin{verbatim}
#include <stdio.h>
#include <stdlib.h>
// These are macros they cover up syntax
// Return the address of X
#define ADDRESSOF(X) (&X)
// Dereference X
#define DEREF(X) (*X)
typedef int * intptr_t;
int main() {
int i = 99;
intptr_t ptrToI1 = ADDRESSOF(i); // these 2 lines
int * ptrToI2 = &i; // are the same
printf("i: %4d,\naddress of i: %p\n\tptrToI1: %p, *ptrToI1: %d\n\tptrToI2: %p, *ptrToI2: %d\n",
i,
(void*)&i,
(void*)ptrToI1,
DEREF(ptrToI1),
(void*)ptrToI2,
*ptrToI2
);
printf("addressof i: %p,\naddress of ptrToI1: %p\n\tptrToI2: %p\n",
(void*)&i,
(void*)&ptrToI1,
(void*)&ptrToI2
);
return 0;
}
\end{verbatim}
\begin{verbatim}
i: 99,
address of i: 0x7ffef7f5a274
ptrToI1: 0x7ffef7f5a274, *ptrToI1: 99
ptrToI2: 0x7ffef7f5a274, *ptrToI2: 99
addressof i: 0x7ffef7f5a274,
address of ptrToI1: 0x7ffef7f5a278
ptrToI2: 0x7ffef7f5a280
\end{verbatim}
\subsubsection{Character Arrays and Pointers}
\label{sec:orgabd2b67}
\begin{verbatim}
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
int main() {
char myChars[] = "Abram believes he is a benevolent professor";
// char * strnstr(const char *big, const char *little, size_t len); from string.h
char * professor = strstr(myChars, "professor");
char * believes = strstr(myChars, "believes");
printf("Size of a pointer %lu\n", sizeof(professor));
printf("Location pointed to %p\n", professor);
printf("full representation %016lX\n", (long unsigned int)professor); // look how many bits are used
printf("myChars: %s\n", myChars);
printf("myChars location: %p\n", myChars);
printf("professor: %s\n", professor);
printf("professor location: %p\n", professor);
printf("believes: %s\n", believes);
printf("believes location: %p\n", believes);
printf("believes - myChars location: %llu\n", (long long unsigned int)believes - (long long unsigned int)myChars);
printf("professor - myChars location: %llu\n", (long long unsigned int)professor - (long long unsigned int)myChars);
printf("\nBut where are myChars and professor and believes?\n");
printf("myChars location: %p\t ptr address: %p \t*ptr %c\n", (void*)&myChars, myChars, *myChars);
printf("professor location: %p\t ptr address: %p \t*ptr %c\n", (void*)&professor, professor, *professor);
printf("believes location: %p\t ptr address: %p \t*ptr %c\n", (void*)&believes, believes, *believes);
}
\end{verbatim}
\begin{verbatim}
Size of a pointer 8
Location pointed to 0x7ffc5301a2d2
full representation 00007FFC5301A2D2
myChars: Abram believes he is a benevolent professor
myChars location: 0x7ffc5301a2b0
professor: professor
professor location: 0x7ffc5301a2d2
believes: believes he is a benevolent professor
believes location: 0x7ffc5301a2b6
believes - myChars location: 6
professor - myChars location: 34
But where are myChars and professor and believes?
myChars location: 0x7ffc5301a2b0 ptr address: 0x7ffc5301a2b0 *ptr A
professor location: 0x7ffc5301a2a0 ptr address: 0x7ffc5301a2d2 *ptr p
believes location: 0x7ffc5301a2a8 ptr address: 0x7ffc5301a2b6 *ptr b
\end{verbatim}
\subsubsection{Int arrays}
\label{sec:org5435415}
Now character arrays are easy because the size is 1 for a character
but what about arrays of larger size datatypes?
\begin{verbatim}
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#define N 1000
int main() {
int myInts[] = { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10 };
// char * strnstr(const char *big, const char *little, size_t len); from string.h
int * ptrToMyInts = &myInts[0];
int * five = &myInts[5];
int * fiveAgain = myInts + 5;
printf("myInts: %p\n", (void*)myInts);
printf("ptrToMyInts: %p\n", (void*)ptrToMyInts);
printf("five location: %p five value: %d\n", (void*)five, *five);
printf("fiveAgain location: %p fiveAgain value: %d\n", (void*)fiveAgain, *fiveAgain);
printf("five - myInts location: %llu\n",
(long long unsigned int)five - (long long unsigned int)myInts);
printf("five - myInts location / sizeof(int): %llu\n",
((long long unsigned int)five - (long long unsigned int)myInts)/(sizeof(int)));
printf("\n OK... Where are they?\n");
printf("myInts Location: %p\t ptr address: %p \t*ptr %d\n", (void*)&myInts, (void*)myInts, *myInts);
printf("ptrToMyIntsLocation: %p\t ptr address: %p \t*ptr %d\n", (void*)&ptrToMyInts, (void*)ptrToMyInts, *ptrToMyInts);
printf("five Location: %p\t ptr address: %p \t*ptr %d\n", (void*)&five, (void*)five, *five);
printf("fiveAgain Location: %p\t ptr address: %p \t*ptr %d\n", (void*)&fiveAgain, (void*)fiveAgain, *fiveAgain);
printf("\nLet's add 1 to five\n");
int * six = five + 1;
printf("five Location: %p\t ptr address: %p \t*ptr %d\n", (void*)&five, (void*)five, *five);
printf("six Location: %p\t ptr address: %p \t*ptr %d\n", (void*)&six, (void*)six, *six);
}
\end{verbatim}
\begin{verbatim}
myInts: 0x7ffe80918f50
ptrToMyInts: 0x7ffe80918f50
five location: 0x7ffe80918f64 five value: 5
fiveAgain location: 0x7ffe80918f64 fiveAgain value: 5
five - myInts location: 20
five - myInts location / sizeof(int): 5
OK... Where are they?
myInts Location: 0x7ffe80918f50 ptr address: 0x7ffe80918f50 *ptr 0
ptrToMyIntsLocation: 0x7ffe80918f30 ptr address: 0x7ffe80918f50 *ptr 0
five Location: 0x7ffe80918f38 ptr address: 0x7ffe80918f64 *ptr 5
fiveAgain Location: 0x7ffe80918f40 ptr address: 0x7ffe80918f64 *ptr 5
Let's add 1 to five
five Location: 0x7ffe80918f38 ptr address: 0x7ffe80918f64 *ptr 5
six Location: 0x7ffe80918f48 ptr address: 0x7ffe80918f68 *ptr 6
\end{verbatim}
myInts: 0x7ffd0bdc3f40
ptrToMyInts: 0x7ffd0bdc3f40
five location: 0x7ffd0bdc3f54 five value: 5
fiveAgain location: 0x7ffd0bdc3f54 fiveAgain value: 5
five - myInts location: 20
five - myInts location / sizeof(int): 5
OK\ldots{} Where are they?
myInts Location: 0x7ffd0bdc3f40 ptr address: 0x7ffd0bdc3f40 *ptr 0
ptrToMyIntsLocation: 0x7ffd0bdc3f20 ptr address: 0x7ffd0bdc3f40 *ptr 0
five Location: 0x7ffd0bdc3f28 ptr address: 0x7ffd0bdc3f54 *ptr 5
fiveAgain Location: 0x7ffd0bdc3f30 ptr address: 0x7ffd0bdc3f54 *ptr 5
Let's add 1 to five
five Location: 0x7ffd0bdc3f28 ptr address: 0x7ffd0bdc3f54 *ptr 5
six Location: 0x7ffd0bdc3f38 ptr address: 0x7ffd0bdc3f58 *ptr 6
\subsubsection{Arrays as pointers}
\label{sec:orge6cceb4}
\begin{verbatim}
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#define N 1000
int main() {
int myInts[] = { 99, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10 };
int * ptrToMyInts = myInts;
int * ptrToMyInts2 = &myInts[0];
printf("myInts:\t%p\n", (void*)myInts);
printf("ptrToMyInts:\t%p\n", (void*)ptrToMyInts);
printf("ptrToMyInts2:\t%p\n", (void*)ptrToMyInts2);
printf("deref myInts:\t%d\n", *myInts);
printf("deref ptrToMyInts:\t%d\n", *ptrToMyInts);
printf("deref ptrToMyInts2:\t%d\n", *ptrToMyInts2);
return 0;
}
\end{verbatim}
\begin{verbatim}
myInts: 0x7ffe24475770
ptrToMyInts: 0x7ffe24475770
ptrToMyInts2: 0x7ffe24475770
deref myInts: 99
deref ptrToMyInts: 99
deref ptrToMyInts2: 99
\end{verbatim}
\subsubsection{Pointer arthimetic again}
\label{sec:org888f4f8}
When you add to pointers you add not just an integer but your
n*sizeof(*p) + p where p is a pointer.
*ptr++ is a common idiom, it means give me the current value and
transition to the next memory location.
\begin{verbatim}
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#define N 1000
int main() {
long int myInts[] = { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10 };
long int * ptr = &myInts[0];
size_t count = sizeof(myInts) / sizeof(myInts[0]);
while(count > 0) {
printf("%ld \t %p\n", *ptr, (void*)ptr);
ptr++;
count--;
}
ptr = &myInts[10];
count = sizeof(myInts) / sizeof(myInts[0]);
while( count-- > 0) {
void * oldptr = (void*) ptr;
printf("%ld \t %p\t", *ptr--, oldptr); // this *ptr++ is
// idiomatic in C and
// confusing but you must
// learn it
printf("ptr - oldptr %ld\n", (unsigned long int)ptr - (unsigned long int)oldptr);
}
printf("%p %ld\n", (void*)ptr, *ptr);
return 0;
}
\end{verbatim}
\begin{verbatim}
0 0x7fff0c5ec000
1 0x7fff0c5ec008
2 0x7fff0c5ec010
3 0x7fff0c5ec018
4 0x7fff0c5ec020
5 0x7fff0c5ec028
6 0x7fff0c5ec030
7 0x7fff0c5ec038
8 0x7fff0c5ec040
9 0x7fff0c5ec048
10 0x7fff0c5ec050
10 0x7fff0c5ec050 ptr - oldptr -8
9 0x7fff0c5ec048 ptr - oldptr -8
8 0x7fff0c5ec040 ptr - oldptr -8
7 0x7fff0c5ec038 ptr - oldptr -8
6 0x7fff0c5ec030 ptr - oldptr -8
5 0x7fff0c5ec028 ptr - oldptr -8
4 0x7fff0c5ec020 ptr - oldptr -8
3 0x7fff0c5ec018 ptr - oldptr -8
2 0x7fff0c5ec010 ptr - oldptr -8
1 0x7fff0c5ec008 ptr - oldptr -8
0 0x7fff0c5ec000 ptr - oldptr -8
0x7fff0c5ebff8 140733400924160
\end{verbatim}
\begin{enumerate}
\item Now with Chars
\label{sec:orga1d8d86}
\begin{verbatim}
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#define N 1000
int main() {
char str[] = "Polar bears are cool bears";
char * strLiteral = "Polar bears are cool bears";
char * ptr = str;
char tmp = 0;
while( (tmp = *ptr++) ) {
putchar(tmp);
}
putchar('\n');
ptr = str;
tmp = 0;
while( (tmp = *ptr++) ) {
printf("%c %p %20lu\n", tmp, (void*)ptr, (unsigned long int)ptr);
}
// now watch the addresses
ptr = strLiteral;
printf("The start of this function's stack frame is pretty close to %p\n", (void*)&str);
while( (tmp = *ptr++) ) {
printf("%c %p %20lu\n", tmp, (void*)ptr, (unsigned long int)ptr);
}
// wow that's super far away in memory
printf("str - strLiteral in bytes: %lu\n", (unsigned long int)str - (unsigned long int)strLiteral);
printf("&str - &strLiteral in bytes: %lu\n", (unsigned long int)&str - (unsigned long int)&strLiteral);
return 0;
}
\end{verbatim}
\begin{verbatim}
Polar bears are cool bears
P 0x7ffc89491f51 140722611756881
o 0x7ffc89491f52 140722611756882
l 0x7ffc89491f53 140722611756883
a 0x7ffc89491f54 140722611756884
r 0x7ffc89491f55 140722611756885
0x7ffc89491f56 140722611756886
b 0x7ffc89491f57 140722611756887
e 0x7ffc89491f58 140722611756888
a 0x7ffc89491f59 140722611756889
r 0x7ffc89491f5a 140722611756890
s 0x7ffc89491f5b 140722611756891
0x7ffc89491f5c 140722611756892
a 0x7ffc89491f5d 140722611756893
r 0x7ffc89491f5e 140722611756894
e 0x7ffc89491f5f 140722611756895
0x7ffc89491f60 140722611756896
c 0x7ffc89491f61 140722611756897
o 0x7ffc89491f62 140722611756898
o 0x7ffc89491f63 140722611756899
l 0x7ffc89491f64 140722611756900
0x7ffc89491f65 140722611756901
b 0x7ffc89491f66 140722611756902
e 0x7ffc89491f67 140722611756903
a 0x7ffc89491f68 140722611756904
r 0x7ffc89491f69 140722611756905
s 0x7ffc89491f6a 140722611756906
The start of this function's stack frame is pretty close to 0x7ffc89491f50
P 0x563ee77aa919 94828171536665
o 0x563ee77aa91a 94828171536666
l 0x563ee77aa91b 94828171536667
a 0x563ee77aa91c 94828171536668
r 0x563ee77aa91d 94828171536669
0x563ee77aa91e 94828171536670
b 0x563ee77aa91f 94828171536671
e 0x563ee77aa920 94828171536672
a 0x563ee77aa921 94828171536673
r 0x563ee77aa922 94828171536674
s 0x563ee77aa923 94828171536675
0x563ee77aa924 94828171536676
a 0x563ee77aa925 94828171536677
r 0x563ee77aa926 94828171536678
e 0x563ee77aa927 94828171536679
0x563ee77aa928 94828171536680
c 0x563ee77aa929 94828171536681
o 0x563ee77aa92a 94828171536682
o 0x563ee77aa92b 94828171536683
l 0x563ee77aa92c 94828171536684
0x563ee77aa92d 94828171536685
b 0x563ee77aa92e 94828171536686
e 0x563ee77aa92f 94828171536687
a 0x563ee77aa930 94828171536688
r 0x563ee77aa931 94828171536689
s 0x563ee77aa932 94828171536690
str - strLiteral in bytes: 45894440220216
&str - &strLiteral in bytes: 16
\end{verbatim}
\end{enumerate}
\subsubsection{Hazel's ptrs.c}
\label{sec:org8cfd7c1}
The intent here is to demonstrate the use and features of pointers and
how to manipulate values via pointers within functions.
\begin{verbatim}
#include <stdio.h>
int pbv(int passed) {
passed++;
printf(" passed = %d\n", passed);
printf(" &passed = %p\n", (void *) &passed);
return passed;
}
void pbr(int *passed) {
printf(" passed = %p\n", (void *) passed);
printf(" *passed = %d\n", *passed);
printf(" &passed = %p\n", (void *) &passed);
(*passed)++;
}
/*
* 4 byte integer (32-bit PC)
* Example: our integer uses these 4 bytes
* byte 4287409512 (0xff8cad68)
* byte 4287409513 (0xff8cad69)
* byte 4287409514 (0xff8cad6a)
* byte 4287409515 (0xff8cad6b)
*/
int main() {
int thing_1 = 100;
int thing_2 = 200;
// type: define a_pointer as a pointer to an int
int *a_pointer = NULL;
// type of a_pointer is "int *"
// NULL: the NULL pointer, gives the pointer the value 0
// used to indicate that the pointer doesn't point to anything
printf("thing_1 = %d\n", thing_1);
printf("thing_2 = %d\n", thing_2);
// error: ‘a_pointer’ is used uninitialized in this function [-Werror=uninitialized]
//printf("a_pointer = %p\n", (void *) a_pointer);
//printf("a_pointer = %zu\n", (size_t) a_pointer);
printf("\nsizes:\n");
printf("sizeof(thing_1) = %zu\n", sizeof(thing_1));
printf("sizeof(thing_2) = %zu\n", sizeof(thing_2));
printf("sizeof(a_pointer) = %zu (%zu bits)\n", sizeof(a_pointer), sizeof(a_pointer) * 8);
// unary & operator: get address of (reference)
a_pointer = &thing_1;
printf("\na_pointer = &thing_1;\n");
printf(" &thing_1 = %p\n", (void *) &thing_1);
printf(" &thing_2 = %p\n", (void *) &thing_2);
printf("a_pointer = %p\n", (void *) a_pointer);
printf("a_pointer = %zu\n", (size_t) a_pointer);
// unary * operator: get value at (dereference)
printf("*a_pointer = %d\n", *a_pointer);
a_pointer = &thing_2;
printf("\na_pointer = &thing_2;\n");
printf("a_pointer = %p\n", (void *) a_pointer);
// unary * operator: get value at (dereference)
printf("*a_pointer = %d\n", *a_pointer);
// We're going to copy thing_1 and take a look
printf("\ncopy value:\n");
printf("\nint value = thing_1;\n");
int value = thing_1;
printf("thing_1 = %d\n", thing_1);
printf(" value = %d\n", value);
printf(" &thing_1 = %p\n", (void *) &thing_1);
printf(" &value = %p\n", (void *) &value);
printf("\ncopy value using pointer:\n");
printf("\nvalue = *(&thing_2);\n");
value = *(&thing_2);
printf("thing_2 = %d\n", thing_2);
printf(" value = %d\n", value);
printf(" &thing_2 = %p\n", (void *) &thing_2);
printf(" &value = %p\n", (void *) &value);
printf("\ncopy value using pointer:\n");
a_pointer = &thing_2;
printf("\na_pointer = &thing_2;\n");
printf("a_pointer = %p\n", (void *) a_pointer);
// unary * operator: get value at (dereference)
printf("*a_pointer = %d\n", *a_pointer);
printf("value = *a_pointer;\n");
value = *a_pointer;
printf("thing_2 = %d\n", thing_2);
printf(" value = %d\n", value);
printf(" &thing_2 = %p\n", (void *) &thing_2);
printf(" &value = %p\n", (void *) &value);
printf("\npass-by-value (copy):\n");
printf("\npbv(thing_1);\n");
printf(" thing_1 = %d\n", thing_1);
printf(" &thing_1 = %p\n", (void *) &thing_1);
pbv(thing_1);
printf(" thing_1 = %d\n", thing_1);
printf(" &thing_1 = %p\n", (void *) &thing_1);
printf("\npass-by-reference (no copy):\n");
printf("\npbr(&thing_1);\n");
printf(" thing_1 = %d\n", thing_1);
printf(" &thing_1 = %p\n", (void *) &thing_1);
pbr(&thing_1);
printf(" thing_1 = %d\n", thing_1);
printf(" &thing_1 = %p\n", (void *) &thing_1);
return 0;
}
\end{verbatim}
\begin{verbatim}
thing_1 = 100
thing_2 = 200
sizes:
sizeof(thing_1) = 4
sizeof(thing_2) = 4
sizeof(a_pointer) = 8 (64 bits)
a_pointer = &thing_1;
&thing_1 = 0x7ffe8deb9864
&thing_2 = 0x7ffe8deb9868
a_pointer = 0x7ffe8deb9864
a_pointer = 140731279448164
*a_pointer = 100
a_pointer = &thing_2;
a_pointer = 0x7ffe8deb9868
*a_pointer = 200
copy value:
int value = thing_1;
thing_1 = 100
value = 100
&thing_1 = 0x7ffe8deb9864
&value = 0x7ffe8deb986c
copy value using pointer:
value = *(&thing_2);
thing_2 = 200
value = 200
&thing_2 = 0x7ffe8deb9868
&value = 0x7ffe8deb986c
copy value using pointer:
a_pointer = &thing_2;
a_pointer = 0x7ffe8deb9868
*a_pointer = 200
value = *a_pointer;
thing_2 = 200
value = 200
&thing_2 = 0x7ffe8deb9868
&value = 0x7ffe8deb986c
pass-by-value (copy):
pbv(thing_1);
thing_1 = 100
&thing_1 = 0x7ffe8deb9864
passed = 101
&passed = 0x7ffe8deb984c
thing_1 = 100
&thing_1 = 0x7ffe8deb9864
pass-by-reference (no copy):
pbr(&thing_1);
thing_1 = 100
&thing_1 = 0x7ffe8deb9864
passed = 0x7ffe8deb9864
*passed = 100
&passed = 0x7ffe8deb9848
thing_1 = 101
&thing_1 = 0x7ffe8deb9864
\end{verbatim}
\subsubsection{Hazel's ptr\(_{\text{const.c}}\)}
\label{sec:org1e71eb3}
The intent here is to show that you shouldn't mess with const vars but
you can eventually mutate them with pointers.
\begin{verbatim}
#include <stdio.h>
int main() {
int mut_i = 100; // mutable integer
printf("mut_i = %d\n", mut_i);
const int const_i = 200; // constant integer
printf("const_i = %d\n", const_i);
// mutable pointer to mutable integer
int * mut_p = &mut_i;
printf("mut_p = %p\n", (void *) mut_p);
printf("*mut_p = %d\n", *mut_p);
// constant pointer to mutable integer
int * const const_p = &mut_i;
printf("const_p = %p\n", (void *) const_p);
printf("*const_p = %d\n", *const_p);
// mutable pointer to constant integer
const int * p_to_const = &const_i;
printf("p_to_const = %p\n", (void *) p_to_const);
printf("*p_to_const = %d\n", *p_to_const);
// constant pointer to constant integer
const int * const const_p_to_const = &const_i;
printf("const_p_to_const = %p\n", (void *) const_p_to_const);
printf("*const_p_to_const = %d\n", *const_p_to_const);
/*
// Don't do this!
// "warning: assignment discards ‘const’ qualifier from pointer target type"
mut_p = &const_i;
const char *str_lit = "String literals are const char *";
printf("%s\n", str_lit);
// but remember this means we can change str_lit to point to a different string!
str_lit = "String literal #2";
printf("%s\n", str_lit);
// This protects us from:
// str_lit[0] = 'D';
// this is wrong:
char *wrong = "We will try to change this string literal";
printf("%s\n", wrong);
// Because it doesn't protect us from:
// wrong[0] = 'D';
// what happens if you uncomment the above line?
// This might be better:
const char * const RIGHT = "Don't go changing on me!";
printf("%s\n", RIGHT);
// Because it protects us from:
// RIGHT[0] = 'L';
// and
// RIGHT = wrong;
*/
}
\end{verbatim}
\begin{verbatim}
mut_i = 100
const_i = 200
mut_p = 0x7ffd165d3e50
*mut_p = 100
const_p = 0x7ffd165d3e50
*const_p = 100
p_to_const = 0x7ffd165d3e54
*p_to_const = 200
const_p_to_const = 0x7ffd165d3e54
*const_p_to_const = 200
\end{verbatim}
\subsubsection{Hazel's Pointer No No's}
\label{sec:org8b96fb5}
\url{ptr\_nonos.c}
Note the lack of flags below.
\begin{verbatim}
gcc -std=c99 -Wall -pedantic -o ptr_nonos ptr_nonos.c && \
./ptr_nonos
\end{verbatim}
\begin{verbatim}
*pointer = 100
Three fives is 15
*pointer = 22089
Three fives is 15
result = 15
&result = 0x7ffee57fce84
&result_p = 0x7ffee57fce88
\end{verbatim}
\begin{verbatim}
#include <stdio.h>
#define SIZE 10
// This function tries to print out the int which is at address 0 in memory...
// Don't do this!
void dereference_null() {
printf("\ndereference null\n");
int *a_pointer = NULL;
printf(" a_pointer = %p\n", (void *) a_pointer);
printf("*a_pointer = %d\n", *a_pointer);
}
// This function tries to print out the int which is at some address we don't know in memory...
// Don't do this!
void dereference_uninit() {
printf("\ndereference unitialized pointer\n");
int *a_pointer;
printf(" a_pointer = %p\n", (void *) a_pointer);
printf("*a_pointer = %d\n", *a_pointer);
}
// This function returns a pointer to an "automatic" local variable...
// Don't do this!
int *return_pointer_to_local() {
int local_int = 100;
int *pointer = &local_int;
// when we return we give up the memory we allocated for "local_int"!
return pointer;
}
// This function just does some things...
int do_things() {
int three = 3;
int five = 5;
int three_fives = three * five;
printf(" Three fives is %d\n", three_fives);
return three_fives;
}
int main() {
// dereference_null();
int * pointer = return_pointer_to_local();
printf("*pointer = %d\n", *pointer);
do_things();
printf("*pointer = %d\n", *pointer);
// You can't get a pointer to some things...
// This won't compile:
// &(do_things());
// We can't do this for the same reason...
// &10;
// This one is actually exactly the same as the one above...
// &SIZE;
// You have to make memory to store the value to get a pointer to it!
int result = do_things();
printf(" result = %d\n", result);
printf(" &result = %p\n", (void *) &result);
// This won't compile either. Same reason.
// &(&result);
// You have to make memory to store the pointer to get a pointer to it!
int * result_p = &result;
printf("&result_p = %p\n", (void *) &result_p);
int **result_pp = &result_p;
int ***result_ppp = &result_pp;
printf("result_ppp = %p\n", (void *) result_ppp);
printf("&result_ppp = %p\n", (void *) &result_ppp);
printf("***result_ppp = %d\n", ***result_ppp);
return 0;
}
\end{verbatim}
\begin{verbatim}
*pointer = 100
Three fives is 15
*pointer = 21920
Three fives is 15
result = 15
&result = 0x7ffd1638f9f4
&result_p = 0x7ffd1638f9f8
result_ppp = 0x7ffd1638fa00
&result_ppp = 0x7ffd1638fa08
***result_ppp = 15
\end{verbatim}
\subsubsection{Multidimensional Arrays and Pointers}
\label{sec:org9b694c7}
\begin{verbatim}
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#define N 10
void init2D(int rows, int cols, int values[][cols]) {
int i = 0;
for (int row = 0; row < rows; row++) {
for (int col = 0; col < cols; col++) {
values[row][col] = i++;
}
}
}
int main() {
int myInts[N][N];
init2D(N, N, myInts);
// int * ptrToMyInts = myInts; // THIS WILL NOT WORK
int (* ptrToMyInts)[N][N] = &myInts;
int (* secondRow)[N] = &myInts[1];
printf("myInts:\t%p\n", (void*)myInts);
printf("ptrToMyInts:\t%p\n", (void*)ptrToMyInts);
printf("deref myInts:\t%d\n", **myInts);
printf("deref myInts + 1:\t%d\n", **(myInts + 1) ); // this hops a row!
printf("deref secondRow:\t%d\n", *secondRow[0]);
printf("deref *myInts + 1:\t%d\n", *(*myInts + 1) ); // this hops a col!
//printf("deref ptrToMyInts:\t%d\n", *ptrToMyInts);
return 0;
}
\end{verbatim}
\begin{verbatim}
myInts: 0x7ffc32f5f8f0
ptrToMyInts: 0x7ffc32f5f8f0
deref myInts: 0
deref myInts + 1: 10
deref secondRow: 10
deref *myInts + 1: 1
\end{verbatim}
\subsubsection{Arrays of Pointers or Pointers of Pointers}
\label{sec:orga889717}
Be aware that when declaring arrays there are arrays of pointers and
pointers to arrays.
They are different.
\begin{verbatim}
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#define N 4
int main() {
char * ptrs[4]; // an array of character poiunters!
char stringOnStack[] = "ON STACK";
// these literals will not be on the stack
ptrs[0] = "Anaxagoras";
ptrs[1] = "mummifies";
ptrs[2] = "shackles";
ptrs[3] = stringOnStack;
printf("sizeof(ptrs)=%lu sizeof(ptrs[0])=%lu\n",sizeof(ptrs), sizeof(ptrs[0]));
printf("sizeof(stringOnStack)=%lu sizeof(stringOnStack[0])=%lu\n",
sizeof(stringOnStack),
sizeof(stringOnStack[0]));
printf("sizeof(&stringOnStack)=%lu sizeof(&stringOnStack[0])=%lu\n",
sizeof(&stringOnStack),
sizeof(&stringOnStack[0]));
for (int i = 0; i < N; i++) {
printf("S:%s\t", ptrs[i]);
printf("P:%p\t", (void*)ptrs[i]);
printf("L:%p\n", (void*)&ptrs[i]);
}
char ** pointsToPointers = ptrs; // it is pointers to pointers (like an array!)
printf("sizeof(pointsToPointers)=%lu sizeof(pointsToPointers[0])=%lu\n",
sizeof(pointsToPointers),
sizeof(pointsToPointers[0]));
puts(*(pointsToPointers + 0));
puts(pointsToPointers[0]);
putchar('\n');
puts(*(pointsToPointers + 2));
puts(pointsToPointers[2]);
putchar('\n');
return 0;
}
\end{verbatim}
\begin{verbatim}
sizeof(ptrs)=32 sizeof(ptrs[0])=8
sizeof(stringOnStack)=9 sizeof(stringOnStack[0])=1
sizeof(&stringOnStack)=8 sizeof(&stringOnStack[0])=8
S:Anaxagoras P:0x55cb7601d978 L:0x7fffba6e7380
S:mummifies P:0x55cb7601d983 L:0x7fffba6e7388
S:shackles P:0x55cb7601d98d L:0x7fffba6e7390
S:ON STACK P:0x7fffba6e73af L:0x7fffba6e7398
sizeof(pointsToPointers)=8 sizeof(pointsToPointers[0])=8
Anaxagoras
Anaxagoras
shackles
shackles
\end{verbatim}
\subsubsection{Confusing Array Pointer interactions and syntax}
\label{sec:orgf76931c}
\begin{itemize}
\item int * myInts != int (* myInts)[]
\item
\end{itemize}
\begin{enumerate}
\item Make a pointer to the first element
\label{sec:org2465e2f}
\begin{verbatim}
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#define N 5
void init2D(int rows, int cols, int values[][cols]) {
int i = 0;
for (int row = 0; row < rows; row++) {
for (int col = 0; col < cols; col++) {
values[row][col] = i++;
}
}
}
int main() {
int matrix[N][N];
init2D( N, N, matrix );
int * pointToMatrix = &matrix[0][0];
for (int i = 0; i < N*N; i++) {
printf("%c", (i%N==0)?'\n':'\t');
printf("%d", pointToMatrix[i]);
}
return 0;
}
\end{verbatim}
\begin{verbatim}
0 1 2 3 4
5 6 7 8 9
10 11 12 13 14
15 16 17 18 19
20 21 22 23 24
\end{verbatim}
\item Make a pointer to the first row
\label{sec:org9990005}
\begin{verbatim}
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#define N 5
#define M 3
void init2D(int rows, int cols, int values[][cols]) {
int i = 0;
for (int row = 0; row < rows; row++) {
for (int col = 0; col < cols; col++) {
values[row][col] = i++;
}
}
}
int main() {
int matrix[M][N];
init2D( M, N, matrix );
// a pointer to an int array of size [N]
int (* pointToRow)[N] = &matrix[0];
printf("sizeof(pointToRow)=%lu\n", sizeof(pointToRow));
printf("sizeof(pointToRow[0])=%lu\n", sizeof(pointToRow[0]));
printf("Take a ref to row\n");
for (int i = 0; i < M; i++) {
int * row = pointToRow[i];
for (int j = 0 ; j < N; j++) {
printf("%d\t", row[j]);
}
printf("\n");
}
printf("Take a ref to row w/ pointer arithmetic\n");
pointToRow = &matrix[0];
for (int i = 0; i < M; i++) {
int * row = *pointToRow; //deref that row
pointToRow++; // go to next row
for (int j = 0 ; j < N; j++) {
printf("%d\t", row[j]);
}
printf("\n");
}
printf("Direct index\n");
pointToRow = &matrix[0];
// direct index
for (int i = 0; i < M; i++) {
for (int j = 0 ; j < N; j++) {
printf("%d\t", pointToRow[i][j]);
}
printf("\n");
}
printf("Skip a row\n");
// skip a row
pointToRow = &matrix[1];
for (int i = 1; i < M; i++) { // try not to go over our bounds
int * row = *pointToRow; //deref that row
pointToRow++; // go to next row
for (int j = 0 ; j < N; j++) {
printf("%d\t", row[j]);
}
printf("\n");
}
return 0;
}
\end{verbatim}
\begin{verbatim}
sizeof(pointToRow)=8
sizeof(pointToRow[0])=20
Take a ref to row
0 1 2 3 4
5 6 7 8 9
10 11 12 13 14
Take a ref to row w/ pointer arithmetic
0 1 2 3 4
5 6 7 8 9
10 11 12 13 14
Direct index
0 1 2 3 4
5 6 7 8 9
10 11 12 13 14
Skip a row
5 6 7 8 9
10 11 12 13 14
\end{verbatim}
\end{enumerate}
\end{document}
|
SUBROUTINE QUIKVIS5B(IDTARG,TARGNAMES,KTARGTYP,TARGPARM,IERR)
IMPLICIT REAL*8 (A-H,O-Z)
C
C THIS ROUTINE IS PART OF THE QUIKVIS PROGRAM. IT IS THE DRIVER FOR
C COMPUTING THE TARGET AVAILABILITY FOR THE GRID OF TARGETS IN THE
C SURVEY OPTION.
C
C
C VARIABLE DIM TYPE I/O DESCRIPTION
C -------- --- ---- --- -----------
C
C IDTARG MAXTARGS I*4 I DESCRIBED IN QUIKVIS(=MAIN) PROLOGUE.
C
C TARGNAMES MAXTARGS CH*16 I DESCRIBED IN QUIKVIS(=MAIN) PROLOGUE.
C
C KTARGTYP MAXTARGS I*4 I DESCRIBED IN QUIKVIS(=MAIN) PROLOGUE.
C
C TARGPARM NPARMS,MAXTARGS R*8 I DESCRIBED IN QUIKVIS(=MAIN) PROLOGUE.
C
C IERR 1 I*4 O ERROR RETURN FLAG
C =0, NO ERROR
C =OTHERWISE, ERROR.
C
C***********************************************************************
C
C BY C PETRUZZO/GFSC/742. 2/86.
C MODIFIED....
C
C***********************************************************************
C
INCLUDE 'QUIKVIS.INC'
C
CHARACTER*16 TARGNAMES(MAXTARGS)
INTEGER*4 IDTARG(MAXTARGS)
REAL*8 TARGPARM(NPARMS,MAXTARGS)
INTEGER*4 KTARGTYP(MAXTARGS)
REAL*8 VISCURR(MAXTARGS),VISCUMM(MAXTARGS)
C
C
REAL*8 SUNPOS(3),ELEMS(6),UTARG(3)
CHARACTER*18 DATETIME
LOGICAL GRIDNOW,DOCURR,DOCUMM
C
IBUG = 0
LUBUG = 19
C
C ERROR CHECK. IS PARAMETER MAXTARGS SET OK IN QUIKVIS.INC ?
C
IF(MAXTARGS.LT.NRASURVEY*NDECSURVEY)
* STOP ' QUIKVIS5B. CODING ERROR. STOPPED. SEE SOURCE CODE.'
C
C
IF(IBUG.NE.0) WRITE(LUBUG,9001)
* (ITARG,TARGPARM(1,ITARG)*DEGRAD,TARGPARM(2,ITARG)*DEGRAD,
* ITARG=1,80)
9001 FORMAT(/,' QUIKVIS5B. DEBUG. ENTRY VALUES.'/,
* (' ITARG=',I3,' RA,DEC=',2G13.5))
C
C
C ****************
C * INITIALIZE *
C ****************
C
IERR = 0
ELEMS(1) = SMA
ELEMS(2) = ECC
ELEMS(3) = ORBINCL
ELEMS(5) = ARGP
ELEMS(6) = 0.D0
DELRA = TWOPI/(NRASURVEY-1)
DELDEC = PI/(NDECSURVEY-1)
CALL MTXSETR8(VISCUMM,1.D10,MAXTARGS,1)
C KSVYOUT1 ASSIGNMENTS: (1=CURRENT DATE ONLY; 2=CUMULATIVE DATES ONLY
C ELSE=BOTH)
DOCURR = KSVYOUT1.NE.2
DOCUMM = KSVYOUT1.NE.1
C
C
C ********************************************
C * STEP ALONG THE TIME PERIOD OF INTEREST *
C ********************************************
C
DO 2000 ITIME=1,NUMTIMES
C
IF(INTERACTIVE) THEN
WRITE(LUPROMPT,9002) ITIME,NUMTIMES,DATETIME(0)
9002 FORMAT(' START PROCESSING TIME NUMBER ',I3,' OF ',I3,' AT ',A)
END IF
C
T50 = TSTART + (ITIME-1) * DELTIME
IF(DOREQMT(2)) CALL SOLM50(T50,SUNPOS,0)
CALL MTXSETR8(VISCURR,1.D10,MAXTARGS,1)
C
C SET THE RAAN RANGE. IT IS DONE INSIDE THE ITIME LOOP IN CASE THE
C RAAN'S ARE DEFINED IN TERMS OF MEAN SOLAR TIME.
CALL QUIKVIS5Y(T50,FIRSTNODE,NUMNODES,DELNODE)
C
C*** STEP ALONG THE RAAN RANGE
C
DO 3000 INODE=1,NUMNODES
ELEMS(4) = FIRSTNODE + (INODE-1) * DELNODE
C
C*** PROCESS EACH TARGET
C
DO 1000 ITARG=1,NUMTARGS
C
C ERROR CHECK. QUIKVIS5B IS FOR SKY SURVEY OPT. FIXED TARGS ONLY.
IF(KTARGTYP(ITARG).NE.3) STOP 'QUIKVIS5B. CODING ERROR. STOP 1.'
C
C WE NEED THIS TARGET'S AVAILABILITY INFO IF (1) THIS RUN IS GIVING
C SURVEY INFO AT EACH TIME STEP OR IF (2) IT IS GIVING CUMULATIVE
C INFO AND THIS TARGET'S MINIMUM AVAILABILITY IS GREATER THAN ZERO,
C SO THAT EVEN LOWER AVAILABILITY TIME MAY BE ENCOUNTERED.
IF(DOCURR .OR. (DOCUMM.AND.VISCUMM(ITARG).GT.0.D0)) THEN
RATARG = TARGPARM(1,ITARG)
DECTARG = TARGPARM(2,ITARG)
UTARG(1) = TARGPARM(3,ITARG)
UTARG(2) = TARGPARM(4,ITARG)
UTARG(3) = TARGPARM(5,ITARG)
CALL QUIKVIS5X(ELEMS,UTARG,SUNPOS,TAVAIL,.FALSE.,DUM,IERR)
IF(IERR.NE.0) THEN
IERR = 1
GO TO 9999
END IF
IF(DOCURR) VISCURR(ITARG) = DMIN1(TAVAIL,VISCURR(ITARG))
IF(DOCUMM) VISCUMM(ITARG) = DMIN1(TAVAIL,VISCUMM(ITARG))
END IF
C
1000 CONTINUE ! END ITARG LOOP
C
3000 CONTINUE ! END INODE LOOP
C
GRIDNOW = MOD(ITIME-1,KSVYFREQ).EQ.0 .OR. ITIME.EQ.NUMTIMES
IF(GRIDNOW) THEN
IF(DOCURR)
* CALL QUIKVIS5B1(T50,T50,VISCURR,IDTARG,KTARGTYP,TARGPARM)
IF(DOCUMM) THEN
IF(.NOT.(DOCURR.AND.ITIME.EQ.1)) ! IE, IF NOT ALREADY WRITTEN
* CALL QUIKVIS5B1(TSTART,T50,VISCUMM,IDTARG,KTARGTYP,TARGPARM)
END IF
END IF
C
2000 CONTINUE ! END ITIME LOOP
C
C
9999 CONTINUE
RETURN
C
C***********************************************************************
C
C
C**** INITIALIZATION CALL. PUT GLOBAL PARAMETER VALUES INTO THIS
C ROUTINE'S LOCAL VARIABLES.
C
ENTRY QVINIT5B
C
CALL QUIKVIS999(-1,R8DATA,I4DATA,L4DATA)
RETURN
C
C***********************************************************************
C
END
|
\chapter{Optimal Strategies}
After examining a variety of heuristic strategies that perform variably under different configuration of rules, a natural question arise: for a given set of rules, does there exist an \emph{optimal} strategy? The answer is ``yes''. In this chapter, we will explore several techniques that enable us to find such a strategy.
\section{Overview}
An \emph{optimal strategy} is a strategy that achieves a certain objective in an optimal manner. In this chapter, we will focus on the following three objectives, in order of strength of optimality:\footnote{In each objective presented, we could replace ``reveal'' with ``determine'', which are subtly different from an information perspective (see 2.4). The overall methodology remains the same. Therefore in this chapter we will focus on strategies that aim to reveal the secret.}
\emph{Min-Steps} objective. To minimize the average number of guesses needed to reveal a secret, assuming each codeword is equally likely to be the secret. This is equivalent to minimizing the total number of guesses needed to reveal all secrets.
\emph{Min-Depth} objective. Subject to being optimal in \emph{Min-Steps} sense, also minimize the maximum number of guesses required to reveal any single secret.
\emph{Min-Worst} objective. Subject to being optimal in \emph{Min-Depth} sense, also minimize the number of secrets that are revealed using the maximum number of guesses.
Note that there may be other objectives for a strategy, such as minimizing the number of guesses evaluated. See, for example, Temporel and Kovacs (2003). However, those objectives are not studied in this chapter.
Optionally, additional constraints could be enforced for an optimal strategy. Commonly used constraints include:
\emph{Max-Depth} constraint. This constraint limits the maximum number of guesses allowed to reveal any single secret. For certain configuration of rules, this constraint does have a meaningful impact on the result. See Koyama(93) for an example.
\emph{Possibility-Only} constraint. This constraint requires all guesses to be made from the remaining possibilities. This significantly reduces the complexity of searching at the cost of yielding sub-optimal solutions, and was employed in 1980's when computation power was limited. For example, see \cite{neuwirth81}.
The theory to find an optimal strategy is simple. Since the number of possible secrets as well as sequence of (non-redundant) guesses are finite, the code breaker can employ an exhaustive search to find out the optimal guessing strategy. However, the scale of the problem is very big and many optimization techniques are required to achieve an efficient (or even feasible) implementation. These technique are discussed in this chapter.
[could copy Neuwirth's illustration of finding an optimal strategy]
[show the search scale of the problem]
[show that an optimal strategy is not unique]
\section{Obvious Guesses}
Some definitions. Partitions, feedback count, etc.
While finding an optimal strategy for the general game is complex, in certain cases it's easy. For example, when there's only one possibility left, we should guess it. When there are only two possibilities left, we should guess (either) one of them. [these appear in Neuwirth 82]. When there are more than two possibilities, it's still possible.
An obviously-optimal guess is an optimal guess that doesn't require too much effort to identify. Depending on the techniques used to identify such, the "obvious"-ty could vary. Here we use the technique introduced by \cite{koyama93}.
[Definition.] An obviously-optimal guess is a guess that partitions the remaining possibilities into discrete cells, i.e.\ where every cell contains exactly one element.
If such a guess exists and comes from the possibility set, then it is optimal because it reveals one potential secret (itself) in the immediate step and reveals all the other potential secrets in two steps. It is easy to see that no other strategy could do better. If no such guess exists in the possibility set but one exists outside the possibility set, then that one is optimal because it reveals all secrets in two steps.
Note that an obviously-optimal guess is fairly generic about the goal -- it is optimal both in terms of the worst-case number of steps and the expected number of steps to determine or reveal the secret.
A necessary condition for an obviously-optimal guess to exist is that the number of remaining possibilities does not exceed the number of distinct feedbacks. For a game with $p$ pegs, the number of distinct feedbacks is $p(p+3)/2$. For example, in a four-peg game, there can be at most 14 secrets left for an obviously-optimal guess to exist. This is a useful check in practice to reduce unnecessary efforts to search for an obviously optimal guess.
Note also that in practice we may only want to check in the remaining possibilities (so that the effort is minimized). In turns out that if we check outside the remaining possibilities, it is equivalent to a full-run of a heuristic function, as we show below.
It turns out (not so surprisingly) that the heuristics introduced in the previous chapter will yield an obviously-optimal guess when one exists. We only need to show that the partition of an obviously-optimal guess (which we will call an \emph{obviously optimal partition} and denote by $Q$ below) achieves the lowest possible heuristic value.
Let $P$ denote any given partition. Let $k$ denote the number of (non-empty) cells in $P$. Let $n_i$ denote the number of elements in the $i$-th cell. Let $n = \sum_{i=1}^k$ denote the total number of elements, which is invariant across different $P$. Finally, let $Q$ denote the partition of an obviously-optimal guess, i.e. one with all singleton cells.
\paragraph{Min-Max}
The \minmax{} heuristic value of an obviously optimal partition is one. This is the minimum value of the heuristic function.
\[
h(P) = \max_{1 \le i \le n} n_i \ge 1 = h(Q).
\]
\paragraph{Min-Avg}
The \minavg{} heuristic value of an obviously optimal partition is one. This is the minimum value of the function.
\[
h(P) = \sum_{i=1}^k \frac{n_i}{n} n_i \ge \min_{1 \le i \le k} n_i \ge 1 = h(Q).
\]
\paragraph{Max-Entropy}
The \maxent{} heuristic value of an obviously optimal partition is ?. This is the maximum value of the function.
\[
h(P) = - \sum_{i=1}^k \frac{n_i}{n} \log \frac{n_i}{n} = ?
\]
\paragraph{Max-Parts}
The \maxpar{} heuristic value of an obviously optimal partition is $n$. This is the maximum value of the function.
\[
h(P) = k \le n = h(Q).
\]
Since all four heuristics introduced yield an obviously optimal guess when one exists, we can insert the step to find an optimal guess into the strategy as a shortcut to save computation time, knowing that this will not alter the output of the heuristic strategy.
\section{Less Obvious Guesses}
\section{Search space pruning}
Two techniques are important in reducing the search space: visit candidate guesses in order of their lower bound, and visit partitions in order of their size.
\section{Other techniques}
(e.g. two-phase optimization, hash collision group)
\section{Using a pre-built strategy tree}
\section{Extended/Adaptive strategy tree}
i.e. the tree not only contains guesses along the chosen strategy path, but also includes guesses if the user made a non-optimal guess halfway. The tree size in this case is much larger, and we must use isomorphism to detect the symmetry.
|
theory CTL
imports Main
begin
text\<open>Define and verify a model checker of properties defined in CTL on FTS.
Proofs are often provided twice, a slegehammer found one, and the more-manual
one from the tutorial\<close>
text\<open>state is a type parameter of the theory\<close>
typedecl state
text\<open>arbitrary but fixed transition systems defined as a
relation between states\<close>
consts M :: "(state \<times> state) set"
text\<open>type of atomic propositions\<close>
typedecl "atom"
text\<open>The labelling function that defines what subset of atoms
hold in a particular state\<close>
consts L :: "state \<Rightarrow> atom set"
text\<open>Formulae of Proposition Dynamic Logic are built up from atoms, negation,
conjunction and temporal connectives "all branches next" and "some branches
eventually\<close>
datatype formula = Atom "atom"
| Neg formula
| And formula formula
| AX formula
| EF formula
text\<open>Validity relation, when a particular PDL formul holds\<close>
primrec valid :: "state \<Rightarrow> formula \<Rightarrow> bool" ("(_ \<Turnstile> _)" [80, 80] 80)
where
"s \<Turnstile> Atom a = (a \<in> L s)" |
"s \<Turnstile> Neg f = (\<not>(s \<Turnstile> f))" |
"s \<Turnstile> And f g = (s \<Turnstile> f \<and> s \<Turnstile> g)" |
"s \<Turnstile> AX f = (\<forall> t. (s, t) \<in> M \<longrightarrow> t \<Turnstile> f)" |
"s \<Turnstile> EF f = (\<exists> t. (s, t) \<in> M\<^sup>* \<and> t \<Turnstile> f)"
text\<open>Now we define our model checker\<close>
primrec mc :: "formula \<Rightarrow> state set" where
"mc(Atom a) = {s. a \<in> L s}" |
"mc(Neg f) = -mc f" |
"mc(And f g) = mc f \<inter> mc g" |
"mc(AX f) = {s. \<forall> t. (s, t) \<in> M \<longrightarrow> t \<in> mc f}" |
"mc(EF f) = lfp(\<lambda>T. mc f \<union> (M^-1 `` T))"
text\<open>Proove that mc(EF _) is monotonic, and therefore has a least fixed point\<close>
lemma mono_ef: "mono(\<lambda>T. A \<union> (M^-1 `` T))"
by (smt Image_Un Un_iff monoI subsetI sup.order_iff)
lemma mono_ef': "mono(\<lambda>T. A \<union> (M^-1 `` T))"
apply (rule monoI)
by blast
text\<open>relate model checking with the logical semantics\<close>
lemma EF_lemma: "lfp(\<lambda>T. A \<union> (M^-1 `` T)) = {s. \<exists>t. (s, t) \<in> M\<^sup>* \<and> t \<in> A}"
by try
end
|
# Script to Run the Experiments
import re
import os
import matplotlib.pyplot as plt
import numpy as np
import tensorflow as tf
from tensorflow.keras.layers import Layer
import Preprocess
import layers
import models
import survival_definitions
# read gene names.
file_loc = 'F:\\genes_sans_bars.txt'
lineList = [line.rstrip('\n') for line in open(file_loc)]
names = list()
for i in range(len(lineList)):
div = re.split(' ', lineList[i])
names.append(div[0])
# read pathways
di = 'F:\\Pathways\\'
files_pways = os.listdir(di)
paths_list = []
adj = np.zeros((len(names),len(names)))
pgenes = []
posgenes = []
for i in files_pways:
adj_ = [line.rstrip('\n') for line in open('F:\\Pathways\\'+i)]
for j in range(len(adj_)):
div = re.split('\t', adj_[j])
if (names.count(div[0])>0) and (names.count(div[1])>0):
pos_i = names.index(div[0])
pos_j = names.index(div[1])
if pgenes.count(div[0])==0:
pgenes.append(div[0])
posgenes.append(pos_i)
if pgenes.count(div[1])==0:
pgenes.append(div[1])
posgenes.append(pos_j)
if div[2]=='inh':
w = -1.0
else:
w = 1
adj[pos_i,pos_j] = w
#adj[pos_j,pos_i] = 1
#sort posgenes.
posgenes_sorted = posgenes.copy()
posgenes_sorted.sort()
adj_pways = adj[posgenes_sorted,:]
adj_pways = adj_pways[:,posgenes_sorted]
# reorder pgenes according to posgenes.
pgenes_sort = []
for i in range(len(posgenes_sorted)):
idx = posgenes_sorted[i]
idx_2 = posgenes.index(idx)
pgenes_sort.append(pgenes[idx_2])
di = 'F:\\Datasets Firehose\\'
folders = list()
# get all folders.
for i in os.walk(di):
print(i[0])
folders.append(i[0])
# now we will go through the folders and remove from list which include gdac
folders_keep = list()
for i in folders:
a = re.search("gdac", i)
if (a == None):
folders_keep.append(i)
# pop the first object
folders_keep.pop(0)
# select major cancer types.
#maj_can = [1,2,5,13,14,15,17,18,19,21,24,25,27,31,33] # deleted 22
maj_can = [1,2,3,5,9,13,14,15,16,17,18,19,21,24,27,31,33]
# do a for loop and use exp_mut_surv function to read the data.
builts = []# make sure all the genome builts are correct.
for i in range(len(maj_can)):
print(folders_keep[maj_can[i]])
pos_i, ns_i, all_1_i, all_2_i, vt_i, built, exp_i,age_i, drugs_i, os_i = Preprocess.exp_mut_surv(folders_keep[maj_can[i]], pgenes_sort)
# concatenate all mutation data in the third axis.
mut_i = np.concatenate((all_1_i, all_2_i),axis = 2)
mut_i = np.concatenate((mut_i, vt_i,), axis = 2)
# concatenate all datasets.
if i == 0:
mut = mut_i + 0. # copy mut_i
pos = pos_i + 0.
ns = ns_i + 0.
builts.append(built)# make sure all genome builts are the same.
exp = exp_i + 0. # copy exp_i
age = age_i + 0. # copy age
drugs = drugs_i.copy() # copy drugs_i
osurv = os_i + 0. # copy overall survival
cancers = np.zeros((mut.shape[0],len(maj_can)))
cancers[:,i] = 1.
else:
mut = np.concatenate((mut, mut_i), axis = 0)
pos = np.concatenate((pos, pos_i), axis = 0)
ns = np.concatenate((ns, ns_i),axis =0)
exp = np.concatenate((exp, exp_i),axis = 0)
age = np.concatenate((age, age_i),axis = 0)
for j in range(len(drugs_i)):
drugs.append(drugs_i[j])
osurv = np.concatenate((osurv, os_i),axis = 0)
cancers_i = np.zeros((mut_i.shape[0],len(maj_can)))
cancers_i[:,i] = 1.
cancers = np.concatenate((cancers, cancers_i), axis = 0)
builts.append(built)# make sure all genome builts are the same.
# reshape exp.
exp = exp.reshape((exp.shape[0],exp.shape[1],1))
# log transform exp.
# if there are zeros make them a small number.
#exp[exp==0.] = 1e-5
#exp = np.log(exp)
# sample
n = exp.shape[0]
ntrain = int(np.round(0.8*(n)))
nval = int(np.round(0.1*(n)))
ntest= int(np.round(0.1*(n)))
# set a random seed.
np.random.seed(2021)
reshuffle_sub = np.random.choice(n, size = n, replace = False)
train = reshuffle_sub[0:ntrain]
val = reshuffle_sub[ntrain+1:(ntrain+nval)]
test = reshuffle_sub[(ntrain+nval+1):n]
# now we will encode the positions
pos_train, min_max = Preprocess.pos_normal(pos[train,:])
ns_train = ns[train,:]
ns_train = ns_train.reshape((ns_train.shape[0],ns_train.shape[1],1))
encode_train = Preprocess.mut_encoding(pos_train, 50)
mut_train = mut[train,:,:]
xs_train = np.concatenate((encode_train,mut_train), axis = 2)
xs_train = np.concatenate((xs_train, ns_train),axis = 2)
exp_train = Preprocess.normalize(exp[train,:,:],axis = 1)
xs_train = np.concatenate((xs_train,exp_train), axis = 2)
can_train = cancers[train,:]
file_loc = 'F:\\Outputs_pathways.txt'
lineList = [line.rstrip('\n') for line in open(file_loc)]
mask = np.zeros((len(posgenes),1))
for i in range(len(lineList)):
div = lineList[i]
if pgenes_sort.count(div)>0:
mask[pgenes_sort.index(div),0] = 1.
# classify cancers using expression data alone.
gt_cl = models.gat_k_model_msk([xs_train.shape[1],xs_train.shape[2]],adj_pways,'relu',can_train.shape[1],mask,hops = 2,units = 100, act_out = 'softmax')
opt = tf.keras.optimizers.SGD(learning_rate=0.1, momentum = 0.01, nesterov = True)
gt_cl.compile(optimizer = opt, loss= 'categorical_crossentropy', metrics = 'accuracy')
gt_cl.fit(xs_train, can_train, epochs= 100)
pos_val = Preprocess.pos_normal_val(pos[val,:], min_max)
ns_val = ns[val,:]
encode_val = Preprocess.mut_encoding(pos_val, 50)
mut_val = mut[val,:,:]
xs_val = np.concatenate((encode_val,mut_val), axis = 2)
ns_val = ns_val.reshape((ns_val.shape[0],ns_val.shape[1],1))
xs_val = np.concatenate((xs_val,ns_val), axis = 2)
exp_val = Preprocess.normalize(exp[val,:,:],axis = 1)
xs_val = np.concatenate((xs_val,exp_val), axis = 2)
can_val = cancers[val,:]
gt_cl.evaluate(xs_val, can_val)
f = gt_cl.layers[0](xs_val)
f, at = gt_cl.layers[1](f)
at = at.numpy()
BRAF_i = pgenes_sort.index('BRAF')
KRAS_i = pgenes_sort.index('KRAS')
RAF1_i = pgenes_sort.index('RAF1')
BRAF_V600e = np.where(pos[val,BRAF_i] == 140453136)[0]
## BRAF mutant
np.mean(at[BRAF_V600e,KRAS_i,BRAF_i])
## BRAF WT
BRAF_WT = np.where(pos[val,BRAF_i] == 0)[0]
KRAS_WT = np.where(pos[val,KRAS_i] == 0)[0]
KRAS_mut = np.where(pos[val,KRAS_i] != 0)[0]
np.mean(at[BRAF_WT,KRAS_i,BRAF_i])
## KRAS Mutant
import scipy.stats
scipy.stats.ttest_ind(at[BRAF_WT,KRAS_i,BRAF_i],at[BRAF_V600e,KRAS_i,BRAF_i])
np.mean(at[KRAS_WT,KRAS_i,BRAF_i])
np.mean(at[KRAS_mut,KRAS_i,BRAF_i])
scipy.stats.ttest_ind(at[KRAS_WT,KRAS_i,BRAF_i],at[KRAS_mut,KRAS_i,BRAF_i])
np.var(at[np.where(ns_val[:,BRAF_i]==1)[0],KRAS_i,BRAF_i])
##KRAS mutant
np.mean(at[np.where(ns_val[:,KRAS_i]==1)[0],KRAS_i,BRAF_i])
np.var(at[np.where(ns_val[:,KRAS_i]==1)[0],KRAS_i,BRAF_i])
## KRAS and BRAF WT
np.mean(at[np.where(ns_val[:,KRAS_i]+ns_val[:,BRAF_i]==0)[0],KRAS_i,BRAF_i])
np.var(at[np.where(ns_val[:,KRAS_i]+ns_val[:,BRAF_i]==0)[0],KRAS_i,BRAF_i])
## BRAF mutant
np.mean(at[np.where(ns_val[:,BRAF_i]==1)[0],BRAF_i,RAF1_i])
np.var(at[np.where(ns_val[:,BRAF_i]==1[0]),BRAF_i,RAF1_i])
|
@testset "SAM" begin
samdir = path_of_format("SAM")
@testset "MetaInfo" begin
metainfo = SAM.MetaInfo()
@test !isfilled(metainfo)
@test occursin("not filled", repr(metainfo))
metainfo = SAM.MetaInfo("CO", "some comment (parens)")
@test isfilled(metainfo)
@test string(metainfo) == "@CO\tsome comment (parens)"
@test occursin("CO", repr(metainfo))
@test SAM.tag(metainfo) == "CO"
@test SAM.value(metainfo) == "some comment (parens)"
@test_throws ArgumentError keys(metainfo)
@test_throws ArgumentError values(metainfo)
metainfo = SAM.MetaInfo("HD", ["VN" => "1.0", "SO" => "coordinate"])
@test isfilled(metainfo)
@test string(metainfo) == "@HD\tVN:1.0\tSO:coordinate"
@test occursin("HD", repr(metainfo))
@test SAM.tag(metainfo) == "HD"
@test SAM.value(metainfo) == "VN:1.0\tSO:coordinate"
@test keys(metainfo) == ["VN", "SO"]
@test values(metainfo) == ["1.0", "coordinate"]
@test SAM.keyvalues(metainfo) == ["VN" => "1.0", "SO" => "coordinate"]
@test haskey(metainfo, "VN")
@test haskey(metainfo, "SO")
@test !haskey(metainfo, "GO")
@test metainfo["VN"] == "1.0"
@test metainfo["SO"] == "coordinate"
@test_throws KeyError metainfo["GO"]
end
@testset "Header" begin
header = SAM.Header()
@test isempty(header)
push!(header, SAM.MetaInfo("@HD\tVN:1.0\tSO:coordinate"))
@test !isempty(header)
@test length(header) == 1
push!(header, SAM.MetaInfo("@CO\tsome comment"))
@test length(header) == 2
@test isa(collect(header), Vector{SAM.MetaInfo})
end
@testset "Record" begin
record = SAM.Record()
@test !isfilled(record)
@test !SAM.ismapped(record)
@test repr(record) == "XAM.SAM.Record: <not filled>"
@test_throws ArgumentError SAM.flag(record)
record = SAM.Record("r001\t99\tchr1\t7\t30\t8M2I4M1D3M\t=\t37\t39\tTTAGATAAAGGATACTG\t*")
@test isfilled(record)
@test occursin(r"^XAM.SAM.Record:\n", repr(record))
@test SAM.ismapped(record)
@test SAM.isprimary(record)
@test SAM.hastempname(record)
@test SAM.tempname(record) == "r001"
@test SAM.hasflag(record)
@test SAM.flag(record) === UInt16(99)
@test SAM.hasrefname(record)
@test SAM.refname(record) == "chr1"
@test SAM.hasposition(record)
@test SAM.position(record) === 7
@test SAM.hasmappingquality(record)
@test SAM.mappingquality(record) === UInt8(30)
@test SAM.hascigar(record)
@test SAM.cigar(record) == "8M2I4M1D3M"
@test SAM.hasnextrefname(record)
@test SAM.nextrefname(record) == "="
@test SAM.hasnextposition(record)
@test SAM.nextposition(record) === 37
@test SAM.hastemplength(record)
@test SAM.templength(record) === 39
@test SAM.hassequence(record)
@test SAM.sequence(record) == dna"TTAGATAAAGGATACTG"
@test !SAM.hasquality(record)
@test_throws MissingFieldException SAM.quality(record)
end
@testset "Reader" begin
reader = open(SAM.Reader, joinpath(samdir, "ce#1.sam"))
@test isa(reader, SAM.Reader)
@test eltype(reader) === SAM.Record
# header
h = header(reader)
@test string.(findall(header(reader), "SQ")) == ["@SQ\tSN:CHROMOSOME_I\tLN:1009800"]
# first record
record = SAM.Record()
read!(reader, record)
@test SAM.ismapped(record)
@test SAM.refname(record) == "CHROMOSOME_I"
@test SAM.position(record) == leftposition(record) == 2
@test SAM.rightposition(record) == rightposition(record) == 102
@test SAM.tempname(record) == seqname(record) == "SRR065390.14978392"
@test SAM.sequence(record) == sequence(record) == dna"CCTAGCCCTAACCCTAACCCTAACCCTAGCCTAAGCCTAAGCCTAAGCCTAAGCCTAAGCCTAAGCCTAAGCCTAAGCCTAAGCCTAAGCCTAAGCCTAA"
@test SAM.sequence(String, record) == "CCTAGCCCTAACCCTAACCCTAACCCTAGCCTAAGCCTAAGCCTAAGCCTAAGCCTAAGCCTAAGCCTAAGCCTAAGCCTAAGCCTAAGCCTAAGCCTAA"
@test SAM.seqlength(record) == 100
@test SAM.quality(record) == (b"#############################@B?8B?BA@@DDBCDDCBC@CDCDCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCC" .- 33)
@test SAM.quality(String, record) == "#############################@B?8B?BA@@DDBCDDCBC@CDCDCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCC"
@test SAM.flag(record) == 16
@test SAM.cigar(record) == "27M1D73M"
@test SAM.alignment(record) == Alignment([
AlignmentAnchor( 0, 1, OP_START),
AlignmentAnchor( 27, 28, OP_MATCH),
AlignmentAnchor( 27, 29, OP_DELETE),
AlignmentAnchor(100, 102, OP_MATCH)])
@test record["XG"] == 1
@test record["XM"] == 5
@test record["XN"] == 0
@test record["XO"] == 1
@test record["AS"] == -18
@test record["XS"] == -18
@test record["YT"] == "UU"
@test eof(reader)
close(reader)
# rightposition (also implicitly alignlength)
records = collect(open(SAM.Reader, joinpath(samdir, "ce#5b.sam")))
@test SAM.rightposition(records[6]) == rightposition(records[6]) == 83
# iterator
@test length(collect(open(SAM.Reader, joinpath(samdir, "ce#1.sam")))) == 1
@test length(collect(open(SAM.Reader, joinpath(samdir, "ce#2.sam")))) == 2
# IOStream
@test length(collect(SAM.Reader(open(joinpath(samdir, "ce#1.sam"))))) == 1
@test length(collect(SAM.Reader(open(joinpath(samdir, "ce#2.sam"))))) == 2
end
@testset "Round trip" begin
function compare_records(xs, ys)
if length(xs) != length(ys)
return false
end
for (x, y) in zip(xs, ys)
if x.data[x.filled] != y.data[y.filled]
return false
end
end
return true
end
for specimen in list_valid_specimens("SAM")
filepath = joinpath(samdir, filename(specimen))
mktemp() do path, io
# copy
reader = open(SAM.Reader, filepath)
header_original = header(reader)
writer = SAM.Writer(io, header_original)
records = SAM.Record[]
for record in reader
push!(records, record)
write(writer, record)
end
close(reader)
close(writer)
reader = open(SAM.Reader, path)
@test header(reader) == header_original
@test compare_records(collect(reader), records)
close(reader)
end
end
end
@testset "In-Place-Reading Pattern" begin
file_sam = joinpath(samdir, "ce#5b.sam")
records = open(collect, SAM.Reader, file_sam)
reader = open(SAM.Reader, file_sam)
record = SAM.Record()
i = 0
while !eof(reader)
empty!(record) # Reset the record.
read!(reader, record)
i = i + 1
@test records[i] == record
end
close(reader)
# Test blank file.
file_sam = joinpath(samdir, "xx#blank.sam")
records = open(collect, SAM.Reader, file_sam)
@test records == []
end
end
|
-------------------------------------------------------------------------------
RSX : DPMI-DOS 0.9/1.0 extender for 80386+ processors
Copyright (C) 1993-1998 Rainer Schnither
email to [email protected]
Home FTP-server:
ftp: ftp.uni-bielefeld.de
ftp: hermes.hrz.uni-bielefeld.de
dir: /pub/systems/msdos/misc
-------------------------------------------------------------------------------
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License version 2 as
published by the Free Software Foundation.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
-------------------------------------------------------------------------------
Special exception for distributing software using RSX.EXE:
The RSX.EXE binary can be distributed without sources only if:
1) you have not modified any sources for RSX.EXE
2) you have included this file in your distribution
3) you have included a notice where the source code can be obtained
You are allowed to copy some runtime information for RSX from the
/RSX/DOC directories.
|
[STATEMENT]
lemma set_restriction_fun_all_conv: "
((set_restriction_fun P) A = A) = (\<forall>x\<in>A. P x)"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. (set_restriction_fun P A = A) = (\<forall>x\<in>A. P x)
[PROOF STEP]
unfolding set_restriction_fun_def
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. ({x \<in> A. P x} = A) = (\<forall>x\<in>A. P x)
[PROOF STEP]
by blast
|
mutable struct REINFORCE{T,TP,TV} <: AbstractPolicyAgent where{T<:Real,TP<:AbstractPolicy,TV<:AbstractFuncApprox}
π::TP
vf::TV
αₚ::T
αᵥ::T
γ::T
avgGrad::Bool
discountGrad::Bool
states::Vector
actions::Vector
rewards::Array{T, 1}
function REINFORCE(π::AbstractPolicy, vf::AbstractFuncApprox, αₚ::T, αᵥ::T, γ::T, avgGrad::Bool=false, discountGrad::Bool=true) where {T<:Real}
new{T,typeof(π),typeof(vf)}(clone(π), clone(vf), αₚ, αᵥ, γ, avgGrad, discountGrad, [], [], T[])
end
end
function act!(agent::REINFORCE, env::AbstractEnvironment, rng::AbstractRNG)
push!(agent.states, env.state)
get_action!(agent.π, env.state, rng)
push!(agent.actions, agent.π.action)
reward = step!(env, agent.π.action, rng)
push!(agent.rewards, reward)
if is_terminal(env)
Gs = compute_returns(agent.rewards, agent.γ)
update_withBaseline!(agent, Gs)
new_episode!(agent, rng)
end
end
function compute_returns(rewards::Array{T, 1}, γ::T)::Array{T, 1} where {T<:Real}
G = 0.
Gs = zeros(T, length(rewards))
for t in length(rewards):-1:1
G = rewards[t] + γ * G
Gs[t] = G
end
return Gs
end
function update_withBaseline!(agent::REINFORCE, Gs::Array{T, 1}) where {T<:Real}
γₜ = 1.0
gₚ = zeros(get_num_params(agent.π))
gᵥ = zeros(get_num_params(agent.vf))
∇ = zeros(get_num_params(agent.π))
for t in 1:length(agent.states)
v = call_gradient!(gᵥ, agent.vf, agent.states[t], 1)
δ = Gs[t] - v
@. gᵥ *= δ * agent.αᵥ
add_to_params!(agent.vf, gᵥ)
gradient_logp!(gₚ, agent.π, agent.states[t], agent.actions[t])
@. ∇ += δ * agent.αₚ * γₜ * gₚ
if agent.discountGrad
γₜ *= agent.γ
end
end
if agent.avgGrad
∇ ./= length(agent.states)
end
add_to_params!(agent.π, ∇)
end
function new_episode!(agent::REINFORCE, rng::AbstractRNG)
empty!(agent.states)
empty!(agent.actions)
empty!(agent.rewards)
end
function clone(agent::REINFORCE)::REINFORCE
a = REINFORCE(agent.π, agent.vf, agent.αₚ, agent.αᵥ, agent.γ, agent.avgGrad, agent.discountGrad)
a.states = deepcopy(agent.states)
a.actions = deepcopy(agent.actions)
a.rewards = deepcopy(agent.rewards)
return a
end
|
function [f] = elec_sphere_fit_optim(r, X, Y, Z, xo, yo, zo)
% elec_sphere_fit_optim - Optimization for elec_sphere_fit.m
%
% Called from elec_sphere_fit.m
%
% $Revision: 1.1 $ $Date: 2009-04-28 22:13:55 $
% Licence: GNU GPL, no implied or express warranties
% History: 02/2002, Darren.Weber_at_radiology.ucsf.edu
%
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% with center (Xo,Yo,Zo) and radius r, the equation of a sphere is:
%
% r^2 = (x-xo)^2 + (y-yo)^2 + (z-zo)^2
%
% This function below creates a scalar value to
% return to the fminsearch function in elec_sphere_fit.
S = (X-xo).^2 + (Y-yo).^2 + (Z-zo).^2 - r^2;
f = sum( S.^2 );
|
theory EigbyzProof
imports EigbyzDefs "../Majorities" "../Reduction"
begin
subsection {* Preliminary Lemmas *}
text {* Some technical lemmas about labels and trees. *}
lemma not_leaf_length:
assumes l: "\<not>(is_leaf l)"
shows "length_lbl l \<le> f"
using l length_lbl[of l] by (simp add: is_leaf_def)
lemma nil_is_Label: "[] \<in> Label"
by (auto simp: Label_def)
lemma card_set_lbl: "card (set_lbl l) = length_lbl l"
unfolding set_lbl_def length_lbl_def
using Rep_Label[of l, unfolded Label_def]
by (auto elim: distinct_card)
lemma Rep_Label_root_node [simp]: "Rep_Label root_node = []"
using nil_is_Label by (simp add: root_node_def Abs_Label_inverse)
lemma root_node_length [simp]: "length_lbl root_node = 0"
by (simp add: length_lbl_def)
lemma root_node_not_leaf: "\<not>(is_leaf root_node)"
by (simp add: is_leaf_def)
text {* Removing the last element of a non-root label gives a label. *}
lemma butlast_rep_in_label:
assumes l:"l \<noteq> root_node"
shows "butlast (Rep_Label l) \<in> Label"
proof -
have "Rep_Label l \<noteq> []"
proof
assume "Rep_Label l = []"
hence "Rep_Label l = Rep_Label root_node" by simp
with l show "False" by (simp only: Rep_Label_inject)
qed
with Rep_Label[of l] show ?thesis
by (auto simp: Label_def elim: distinct_butlast)
qed
text {*
The label of a child is well-formed.
*}
lemma Rep_Label_append:
assumes l: "\<not>(is_leaf l)"
shows "(Rep_Label l @ [p] \<in> Label) = (p \<notin> set_lbl l)"
(is "?lhs = ?rhs" is "(?l' \<in> _) = _")
proof
assume lhs: "?lhs" thus ?rhs
by (auto simp: Label_def set_lbl_def)
next
assume p: "?rhs"
from l[THEN not_leaf_length] have "length ?l' \<le> Suc f"
by (simp add: length_lbl_def)
moreover
from Rep_Label[of l] have "distinct (Rep_Label l)"
by (simp add: Label_def)
with p have "distinct ?l'" by (simp add: set_lbl_def)
ultimately
show ?lhs by (simp add: Label_def)
qed
text {*
The label of a child is the label of the parent, extended by a process.
*}
lemma label_children:
assumes c: "c \<in> children l"
shows "\<exists>p. p \<notin> set_lbl l \<and> Rep_Label c = Rep_Label l @ [p]"
proof -
from c obtain p
where p: "p \<notin> set_lbl l" and l: "\<not>(is_leaf l)"
and c: "c = Abs_Label (Rep_Label l @ [p])"
by (auto simp: children_def)
with Rep_Label_append[OF l] show ?thesis
by (auto simp: Abs_Label_inverse)
qed
text {*
The label of any child node is one longer than the label of its parent.
*}
lemma children_length:
assumes "l \<in> children h"
shows "length_lbl l = Suc (length_lbl h)"
using label_children[OF assms] by (auto simp: length_lbl_def)
text {* The root node is never a child. *}
lemma children_not_root:
assumes "root_node \<in> children l"
shows "P"
using label_children[OF assms] Abs_Label_inverse[OF nil_is_Label]
by (auto simp: root_node_def)
text {*
The label of a child with the last element removed is the label of
the parent.
*}
lemma children_butlast_lbl:
assumes "c \<in> children l"
shows "butlast_lbl c = l"
using label_children[OF assms]
by (auto simp: butlast_lbl_def Rep_Label_inverse)
text {*
The root node is not a child, and it is the only such node.
*}
lemma root_iff_no_child: "(l = root_node) = (\<forall>l'. l \<notin> children l')"
proof
assume "l = root_node"
thus "\<forall>l'. l \<notin> children l'" by (auto elim: children_not_root)
next
assume rhs: "\<forall>l'. l \<notin> children l'"
show "l = root_node"
proof (rule rev_exhaust[of "Rep_Label l"])
assume "Rep_Label l = []"
hence "Rep_Label l = Rep_Label root_node" by simp
thus ?thesis by (simp only: Rep_Label_inject)
next
fix l' q
assume l': "Rep_Label l = l' @ [q]"
let ?l' = "Abs_Label l'"
from Rep_Label[of l] l' have "l' \<in> Label" by (simp add: Label_def)
hence repl': "Rep_Label ?l' = l'" by (rule Abs_Label_inverse)
from Rep_Label[of l] l' have "l' @ [q] \<in> Label" by (simp add: Label_def)
with l' have "Rep_Label l = Rep_Label (Abs_Label (l' @ [q]))"
by (simp add: Abs_Label_inverse)
hence "l = Abs_Label (l' @ [q])" by (simp add: Rep_Label_inject)
moreover
from Rep_Label[of l] l' have "length l' < Suc f" "q \<notin> set l'"
by (auto simp: Label_def)
moreover
note repl'
ultimately have "l \<in> children ?l'"
by (auto simp: children_def is_leaf_def length_lbl_def set_lbl_def)
with rhs show ?thesis by blast
qed
qed
text {*
If some label @{text l} is not a leaf, then the set of processes that
appear at the end of the labels of its children is the set of all
processes that do not appear in @{text l}.
*}
lemma children_last_set:
assumes l: "\<not>(is_leaf l)"
shows "last_lbl ` (children l) = UNIV - set_lbl l"
proof
show "last_lbl ` (children l) \<subseteq> UNIV - set_lbl l"
by (auto dest: label_children simp: last_lbl_def)
next
show "UNIV - set_lbl l \<subseteq> last_lbl ` (children l)"
proof (auto simp: image_def)
fix p
assume p: "p \<notin> set_lbl l"
with l have c: "Abs_Label (Rep_Label l @ [p]) \<in> children l"
by (auto simp: children_def)
with Rep_Label_append[OF l] p
show "\<exists>c \<in> children l. p = last_lbl c"
by (force simp: last_lbl_def Abs_Label_inverse)
qed
qed
text {*
The function returning the last element of a label is injective on the
set of children of some given label.
*}
lemma last_lbl_inj_on_children:"inj_on last_lbl (children l)"
proof (auto simp: inj_on_def)
fix c c'
assume c: "c \<in> children l" and c': "c' \<in> children l"
and eq: "last_lbl c = last_lbl c'"
from c c' obtain p p'
where p: "Rep_Label c = Rep_Label l @ [p]"
and p': "Rep_Label c' = Rep_Label l @ [p']"
by (auto dest!: label_children)
from p p' eq have "p = p'" by (simp add: last_lbl_def)
with p p' have "Rep_Label c = Rep_Label c'" by simp
thus "c = c'" by (simp add: Rep_Label_inject)
qed
text {*
The number of children of any non-leaf label @{text l} is the
number of processes that do not appear in @{text l}.
*}
lemma card_children:
assumes "\<not>(is_leaf l)"
shows "card (children l) = N - (length_lbl l)"
proof -
from assms
have "last_lbl ` (children l) = UNIV - set_lbl l"
by (rule children_last_set)
moreover
have "card (UNIV - set_lbl l) = card (UNIV::Proc set) - card (set_lbl l)"
by (auto simp: card_Diff_subset_Int)
moreover
from last_lbl_inj_on_children
have "card (children l) = card (last_lbl ` children l)"
by (rule sym[OF card_image])
moreover
note card_set_lbl[of l]
ultimately
show ?thesis by auto
qed
text {*
Suppose a non-root label @{text l'} of length @{text "r+1"} ending in @{text q},
and suppose that @{text q} is well heard by process @{text p} in round
@{text r}. Then the value with which @{text p} decorates @{text l} is the one
that @{text q} associates to the parent of @{text l}.
*}
lemma sho_correct_vals:
assumes run: "SHORun EIG_M rho HOs SHOs"
and l': "l' \<in> children l"
and shop: "last_lbl l' \<in> SHOs (length_lbl l) p \<inter> HOs (length_lbl l) p"
(is "?q \<in> SHOs (?len l) p \<inter> _")
shows "vals (rho (?len l') p) l' = vals (rho (?len l) ?q) l"
proof -
let ?r = "?len l"
from run obtain \<mu>p
where nxt: "nextState EIG_M ?r p (rho ?r p) \<mu>p (rho (Suc ?r) p)"
and mu: "\<mu>p \<in> SHOmsgVectors EIG_M ?r p (rho ?r) (HOs ?r p) (SHOs ?r p)"
by (auto simp: EIG_SHOMachine_def SHORun_eq SHOnextConfig_eq)
with shop
have msl:"\<mu>p ?q = Some (vals (rho ?r ?q))"
by (auto simp: EIG_SHOMachine_def EIG_sendMsg_def SHOmsgVectors_def)
from nxt length_lbl[of l'] children_length[OF l']
have "extend_vals ?r p (rho ?r p) \<mu>p (rho (Suc ?r) p)"
by (auto simp: EIG_SHOMachine_def nextState_def EIG_nextState_def
next_main_def next_end_def)
with msl l' show ?thesis
by (auto simp: extend_vals_def children_length children_butlast_lbl)
qed
text {*
A process fixes the value @{text "vals l"} of a label at state
@{text "length_lbl l"}, and then never modifies the value.
*}
(* currently nowhere used *)
lemma keep_vals:
assumes run: "SHORun EIG_M rho HOs SHOs"
shows "vals (rho (length_lbl l + n) p) l = vals (rho (length_lbl l) p) l"
(is "?v n = ?vl")
proof (induct n)
show "?v 0 = ?vl" by simp
next
fix n
assume ih: "?v n = ?vl"
let ?r = "length_lbl l + n"
from run obtain \<mu>p
where nxt: "nextState EIG_M ?r p (rho ?r p) \<mu>p (rho (Suc ?r) p)"
by (auto simp: EIG_SHOMachine_def SHORun_eq SHOnextConfig_eq)
with ih show "?v (Suc n) = ?vl"
by (auto simp: EIG_SHOMachine_def nextState_def EIG_nextState_def
next_main_def next_end_def extend_vals_def)
qed
subsection {* Lynch's Lemmas and Theorems *}
text {*
If some process is safely heard by all processes at round @{text r},
then all processes agree on the value associated to labels of length
@{text "r+1"} ending in that process.
*}
lemma lynch_6_15:
assumes run: "SHORun EIG_M rho HOs SHOs"
and l': "l' \<in> children l"
and skr: "last_lbl l' \<in> SKr (HOs (length_lbl l)) (SHOs (length_lbl l))"
shows "vals (rho (length_lbl l') p) l' = vals (rho (length_lbl l') q) l'"
using assms unfolding SKr_def by (auto simp: sho_correct_vals)
text {*
Suppose that @{text l} is a non-root label whose last element was well
heard by all processes at round @{text r}, and that @{text l'} is a
child of @{text l} corresponding to process @{text q} that is also
well heard by all processes at round @{text "r+1"}. Then the values
associated with @{text l} and @{text l'} by any process @{text p}
are identical.
*}
lemma lynch_6_16_a:
assumes run: "SHORun EIG_M rho HOs SHOs"
and l: "l \<in> children t"
and skrl: "last_lbl l \<in> SKr (HOs (length_lbl t)) (SHOs (length_lbl t))"
and l': "l' \<in> children l"
and skrl':"last_lbl l' \<in> SKr (HOs (length_lbl l)) (SHOs (length_lbl l))"
shows "vals (rho (length_lbl l') p) l' = vals (rho (length_lbl l) p) l"
using assms by (auto simp: SKr_def sho_correct_vals)
text {*
For any non-leaf label @{text l}, more than half of its children end with a
process that is well heard by everyone at round @{text "length_lbl l"}.
*}
lemma lynch_6_16_c:
assumes commR: "EIG_commPerRd (HOs (length_lbl l)) (SHOs (length_lbl l))"
(is "EIG_commPerRd (HOs ?r) _")
and l: "\<not>(is_leaf l)"
shows "card {l' \<in> children l. last_lbl l' \<in> SKr (HOs ?r) (SHOs ?r)}
> card (children l) div 2"
(is "card ?lhs > _")
proof -
let ?skr = "SKr (HOs ?r) (SHOs ?r)"
have "last_lbl ` ?lhs = ?skr - set_lbl l"
proof
from children_last_set[OF l]
show "last_lbl ` ?lhs \<subseteq> ?skr - set_lbl l"
by (auto simp: children_length)
next
{
fix p
assume p: "p \<in> ?skr" "p \<notin> set_lbl l"
with children_last_set[OF l]
have "p \<in> last_lbl ` children l" by auto
with p have "p \<in> last_lbl ` ?lhs"
by (auto simp: image_def children_length)
}
thus "?skr - set_lbl l \<subseteq> last_lbl ` ?lhs" by auto
qed
moreover
from last_lbl_inj_on_children[of l]
have "inj_on last_lbl ?lhs" by (auto simp: inj_on_def)
ultimately
have "card ?lhs = card (?skr - set_lbl l)" by (auto dest: card_image)
also have "\<dots> \<ge> (card ?skr) - (card (set_lbl l))"
by (simp add: diff_card_le_card_Diff)
finally have "card ?lhs \<ge> (card ?skr) - ?r"
using card_set_lbl[of l] by simp
moreover
from commR have "card ?skr > (N + f) div 2"
by (auto simp: EIG_commPerRd_def)
with not_leaf_length[OF l] f
have "(card ?skr) - ?r > (N - ?r) div 2" by auto
with card_children[OF l]
have "(card ?skr) - ?r > card (children l) div 2" by simp
ultimately show ?thesis by simp
qed
text {*
If @{text l} is a non-leaf label such that all of its children corresponding
to well-heard processes at round @{text "length_lbl l"} have a uniform
@{text newvals} decoration at round @{text "f+1"}, then @{text l}
itself is decorated with that same value.
*}
lemma newvals_skr_uniform:
assumes run: "SHORun EIG_M rho HOs SHOs"
and commR: "EIG_commPerRd (HOs (length_lbl l)) (SHOs (length_lbl l))"
(is "EIG_commPerRd (HOs ?r) _")
and notleaf: "\<not>(is_leaf l)"
and unif: "\<And>l'. \<lbrakk>l' \<in> children l;
last_lbl l' \<in> SKr (HOs (length_lbl l)) (SHOs (length_lbl l))
\<rbrakk> \<Longrightarrow> newvals (rho (Suc f) p) l' = v"
shows "newvals (rho (Suc f) p) l = v"
proof -
from unif
have "card {l' \<in> children l. last_lbl l' \<in> SKr (HOs ?r) (SHOs ?r)}
\<le> card {l' \<in> children l. newvals (rho (Suc f) p) l' = v}"
by (auto intro: card_mono)
with lynch_6_16_c[of HOs l SHOs, OF commR notleaf]
have maj: "has_majority v (newvals (rho (Suc f) p)) (children l)"
by (simp add: has_majority_def)
from run have "check_newvals (rho (Suc f) p)"
by (auto simp: EIG_SHOMachine_def SHORun_eq SHOnextConfig_eq
nextState_def EIG_nextState_def next_end_def)
with maj notleaf obtain w
where wmaj: "has_majority w (newvals (rho (Suc f) p)) (children l)"
and wupd: "newvals (rho (Suc f) p) l = w"
by (auto simp: check_newvals_def)
from maj wmaj have "w = v"
by (auto simp: has_majority_def elim: abs_majoritiesE')
with wupd show ?thesis by simp
qed
text {*
A node whose label @{text l} ends with a process which is well heard
at round @{text "length_lbl l"} will have its @{text newvals} field set
(at round @{text "f+1"}) to the ``fixed-up'' value given by @{text vals}.
*}
lemma lynch_6_16_d:
assumes run: "SHORun EIG_M rho HOs SHOs"
and commR: "\<forall>r. EIG_commPerRd (HOs r) (SHOs r)"
and notroot: "l \<in> children t"
and skr: "last_lbl l \<in> SKr (HOs (length_lbl t)) (SHOs (length_lbl t))"
(is "_ \<in> SKr (HOs (?len t)) _")
shows "newvals (rho (Suc f) p) l = fixupval (vals (rho (?len l) p) l)"
(is "?P l")
using notroot skr proof (induct "Suc f - (?len l)" arbitrary: l t)
fix l t
assume "0 = Suc f - ?len l"
with length_lbl[of l] have leaf: "is_leaf l" by (simp add: is_leaf_def)
from run have "check_newvals (rho (Suc f) p)"
by (auto simp: EIG_SHOMachine_def SHORun_eq SHOnextConfig_eq
nextState_def EIG_nextState_def next_end_def)
with leaf show "?P l"
by (auto simp: check_newvals_def is_leaf_def)
next
fix k l t
assume ih: "\<And> l' t'.
\<lbrakk>k = Suc f - length_lbl l'; l' \<in> children t';
last_lbl l' \<in> SKr (HOs (?len t')) (SHOs (?len t'))\<rbrakk>
\<Longrightarrow> ?P l'"
and flk: "Suc k = Suc f - ?len l"
and notroot: "l \<in> children t"
and skr: "last_lbl l \<in> SKr (HOs (?len t)) (SHOs (?len t))"
let ?v = "fixupval (vals (rho (?len l) p) l)"
from flk have notlf: "\<not>(is_leaf l)" by (simp add: is_leaf_def)
{
fix l'
assume l': "l' \<in> children l"
and skr': "last_lbl l' \<in> SKr (HOs (?len l)) (SHOs (?len l))"
from run notroot skr l' skr'
have "vals (rho (?len l') p) l' = vals (rho (?len l) p) l"
by (rule lynch_6_16_a)
moreover
from flk l' have "k = Suc f - ?len l'" by (simp add: children_length)
from this l' skr' have "?P l'" by (rule ih)
ultimately
have "newvals (rho (Suc f) p) l' = ?v"
using notroot l' by (simp add: children_length)
}
with run commR notlf show "?P l" by (auto intro: newvals_skr_uniform)
qed
text {*
Following Lynch~\cite{lynch:distributed}, we introduce some more useful
concepts for reasoning about the data structure.
*}
text {*
A label is \emph{common} if all processes agree on the final value it is
decorated with.
*}
definition common where
"common rho l \<equiv>
\<forall>p q. newvals (rho (Suc f) p) l = newvals (rho (Suc f) q) l"
text {*
The subtrees of a given label are all its possible extensions.
*}
definition subtrees where
"subtrees h \<equiv> { l . \<exists>t. Rep_Label l = (Rep_Label h) @ t }"
lemma children_in_subtree:
assumes "l \<in> children h"
shows "l \<in> subtrees h"
using label_children[OF assms] by (auto simp: subtrees_def)
lemma subtrees_refl [iff]: "l \<in> subtrees l"
by (auto simp: subtrees_def)
lemma subtrees_root [iff]: "l \<in> subtrees root_node"
by (auto simp: subtrees_def)
lemma subtrees_trans:
assumes "l'' \<in> subtrees l'" and "l' \<in> subtrees l"
shows "l'' \<in> subtrees l"
using assms by (auto simp: subtrees_def)
lemma subtrees_antisym:
assumes "l \<in> subtrees l'" and "l' \<in> subtrees l"
shows "l' = l"
using assms by (auto simp: subtrees_def Rep_Label_inject)
lemma subtrees_tree:
assumes l': "l \<in> subtrees l'" and l'': "l \<in> subtrees l''"
shows "l' \<in> subtrees l'' \<or> l'' \<in> subtrees l'"
using assms proof (auto simp: subtrees_def append_eq_append_conv2)
fix xs
assume "Rep_Label l'' @ xs = Rep_Label l'"
hence "Rep_Label l' = Rep_Label l'' @ xs" by (rule sym)
thus "\<exists>ys. Rep_Label l' = Rep_Label l'' @ ys" ..
qed
lemma subtrees_cases:
assumes l': "l' \<in> subtrees l"
and self: "l' = l \<Longrightarrow> P"
and child: "\<And>c. \<lbrakk> c \<in> children l; l' \<in> subtrees c \<rbrakk> \<Longrightarrow> P"
shows "P"
proof -
from l' obtain t where t: "Rep_Label l' = (Rep_Label l) @ t"
by (auto simp: subtrees_def)
have "l' = l \<or> (\<exists>c \<in> children l. l' \<in> subtrees c)"
proof (cases t)
assume "t = []"
with t show ?thesis by (simp add: Rep_Label_inject)
next
fix p t'
assume cons: "t = p # t'"
from Rep_Label[of l'] t have "length (Rep_Label l @ t) \<le> Suc f"
by (simp add: Label_def)
with cons have notleaf: "\<not>(is_leaf l)"
by (auto simp: is_leaf_def length_lbl_def)
let ?c = "Abs_Label (Rep_Label l @ [p])"
from t cons Rep_Label[of l'] have p: "p \<notin> set_lbl l"
by (auto simp: Label_def set_lbl_def)
with notleaf have c: "?c \<in> children l"
by (auto simp: children_def)
moreover
from notleaf p have "Rep_Label l @ [p] \<in> Label"
by (simp add: Rep_Label_append)
hence "Rep_Label ?c = (Rep_Label l @ [p])"
by (simp add: Abs_Label_inverse)
with cons t have "l' \<in> subtrees ?c"
by (auto simp: subtrees_def)
ultimately show ?thesis by blast
qed
thus ?thesis by (auto elim!: self child)
qed
lemma subtrees_leaf:
assumes l: "is_leaf l" and l': "l' \<in> subtrees l"
shows "l' = l"
using l' proof (rule subtrees_cases)
fix c
assume "c \<in> children l" -- {* impossible *}
with l show ?thesis by (simp add: children_def)
qed
lemma children_subtrees_equal:
assumes c: "c \<in> children l" and c': "c' \<in> children l"
and sub: "c' \<in> subtrees c"
shows "c' = c"
proof -
from assms have "Rep_Label c' = Rep_Label c"
by (auto simp: subtrees_def dest!: label_children)
thus ?thesis by (simp add: Rep_Label_inject)
qed
text {*
A set @{text C} of labels is a \emph{subcovering} w.r.t. label @{text l}
if for all leaf subtrees @{text s} of @{text l} there
exists some label @{text "h \<in> C"} such that @{text s} is a subtree of
@{text h} and @{text h} is a subtree of @{text l}.
*}
definition subcovering where
"subcovering C l \<equiv>
\<forall>s \<in> subtrees l. is_leaf s \<longrightarrow> (\<exists>h \<in> C. h \<in> subtrees l \<and> s \<in> subtrees h)"
text {*
A \emph{covering} is a subcovering w.r.t. the root node.
*}
abbreviation covering where
"covering C \<equiv> subcovering C root_node"
text {*
The set of labels whose last element is well heard by all processes
throughout the execution forms a covering, and all these labels are common.
*}
lemma lynch_6_18_a:
assumes "SHORun EIG_M rho HOs SHOs"
and "\<forall>r. EIG_commPerRd (HOs r) (SHOs r)"
and "l \<in> children t"
and "last_lbl l \<in> SKr (HOs (length_lbl t)) (SHOs (length_lbl t))"
shows "common rho l"
using assms
by (auto simp: common_def lynch_6_16_d lynch_6_15
intro: arg_cong[where f="fixupval"])
lemma lynch_6_18_b:
assumes run: "SHORun EIG_M rho HOs SHOs"
and commG: "EIG_commGlobal HOs SHOs"
and commR: "\<forall>r. EIG_commPerRd (HOs r) (SHOs r)"
shows "covering {l. \<exists>t. l \<in> children t \<and> last_lbl l \<in> (SK HOs SHOs)}"
proof (clarsimp simp: subcovering_def)
fix l
assume "is_leaf l"
with card_set_lbl[of l] have "card (set_lbl l) = Suc f"
by (simp add: is_leaf_def)
with commG have "N < card (SK HOs SHOs) + card (set_lbl l)"
by (simp add: EIG_commGlobal_def)
hence "\<exists>q \<in> set_lbl l . q \<in> SK HOs SHOs"
by (auto dest: majorities_intersect)
then obtain l1 q l2 where
l: "Rep_Label l = (l1 @ [q]) @ l2" and q: "q \<in> SK HOs SHOs"
unfolding set_lbl_def by (auto intro: split_list_propE)
let ?h = "Abs_Label (l1 @ [q])"
from Rep_Label[of l] l have "l1 @ [q] \<in> Label" by (simp add: Label_def)
hence reph: "Rep_Label ?h = l1 @ [q]" by (rule Abs_Label_inverse)
hence "length_lbl ?h \<noteq> 0" by (simp add: length_lbl_def)
hence "?h \<noteq> root_node" by auto
then obtain t where t: "?h \<in> children t"
by (auto simp: root_iff_no_child)
moreover
from reph q have "last_lbl ?h \<in> SK HOs SHOs" by (simp add: last_lbl_def)
moreover
from reph l have "l \<in> subtrees ?h" by (simp add: subtrees_def)
ultimately
show "\<exists>h. (\<exists>t. h \<in> children t) \<and> last_lbl h \<in> SK HOs SHOs \<and> l \<in> subtrees h"
by blast
qed
text {*
If @{text C} covers the subtree rooted at label @{text l} and if
@{text "l \<notin> C"} then @{text C} also covers subtrees rooted at
@{text l}'s children.
*}
lemma lynch_6_19_a:
assumes cov: "subcovering C l"
and l: "l \<notin> C"
and e: "e \<in> children l"
shows "subcovering C e"
proof (clarsimp simp: subcovering_def)
fix s
assume s: "s \<in> subtrees e" and leaf: "is_leaf s"
from s children_in_subtree[OF e] have "s \<in> subtrees l"
by (rule subtrees_trans)
with leaf cov obtain h where h: "h \<in> C" "h \<in> subtrees l" "s \<in> subtrees h"
by (auto simp: subcovering_def)
with l obtain e' where e': "e' \<in> children l" "h \<in> subtrees e'"
by (auto elim: subtrees_cases)
from `s \<in> subtrees h` `h \<in> subtrees e'` have "s \<in> subtrees e'"
by (rule subtrees_trans)
with s have "e \<in> subtrees e' \<or> e' \<in> subtrees e"
by (rule subtrees_tree)
with e e' have "e' = e"
by (auto dest: children_subtrees_equal)
with e' h show "\<exists>h\<in>C. h \<in> subtrees e \<and> s \<in> subtrees h" by blast
qed
text {*
If there is a subcovering @{text C} for a label @{text l} such that all labels
in @{text C} are common, then @{text l} itself is common as well.
*}
lemma lynch_6_19_b:
assumes run: "SHORun EIG_M rho HOs SHOs"
and cov: "subcovering C l"
and com: "\<forall>l' \<in> C. common rho l'"
shows "common rho l"
using cov proof (induct "Suc f - length_lbl l" arbitrary: l)
fix l
assume 0: "0 = Suc f - length_lbl l"
and C: "subcovering C l"
from 0 length_lbl[of l] have "is_leaf l"
by (simp add: is_leaf_def)
with C obtain h where h: "h \<in> C" "h \<in> subtrees l" "l \<in> subtrees h"
by (auto simp: subcovering_def)
hence "l \<in> C" by (auto dest: subtrees_antisym)
with com show "common rho l" ..
next
fix k l
assume k: "Suc k = Suc f - length_lbl l"
and C: "subcovering C l"
and ih: "\<And>l'. \<lbrakk>k = Suc f - length_lbl l'; subcovering C l'\<rbrakk> \<Longrightarrow> common rho l'"
show "common rho l"
proof (cases "l \<in> C")
case True
with com show ?thesis ..
next
case False
with C have "\<forall>e \<in> children l. subcovering C e"
by (blast intro: lynch_6_19_a)
moreover
from k have "\<forall>e \<in> children l. k = Suc f - length_lbl e"
by (auto simp: children_length)
ultimately
have com_ch: "\<forall>e \<in> children l. common rho e"
by (blast intro: ih)
show ?thesis
proof (clarsimp simp: common_def)
fix p q
from k have notleaf: "\<not>(is_leaf l)" by (simp add: is_leaf_def)
let ?r = "Suc f"
from com_ch
have "\<forall>e \<in> children l. newvals (rho ?r p) e = newvals (rho ?r q) e"
by (auto simp: common_def)
hence "\<forall>w. {e \<in> children l. newvals (rho ?r p) e = w}
= {e \<in> children l. newvals (rho ?r q) e = w}"
by auto
moreover
from run
have "check_newvals (rho ?r p)" "check_newvals (rho ?r q)"
by (auto simp: EIG_SHOMachine_def SHORun_eq SHOnextConfig_eq nextState_def
EIG_nextState_def next_end_def)
with notleaf have
"(\<exists>w. has_majority w (newvals (rho ?r p)) (children l)
\<and> newvals (rho ?r p) l = w)
\<or> \<not>(\<exists>w. has_majority w (newvals (rho ?r p)) (children l))
\<and> newvals (rho ?r p) l = undefined"
"(\<exists>w. has_majority w (newvals (rho ?r q)) (children l)
\<and> newvals (rho ?r q) l = w)
\<or> \<not>(\<exists>w. has_majority w (newvals (rho ?r q)) (children l))
\<and> newvals (rho ?r q) l = undefined"
by (auto simp: check_newvals_def)
ultimately show "newvals (rho ?r p) l = newvals (rho ?r q) l"
by (auto simp: has_majority_def elim: abs_majoritiesE')
qed
qed
qed
text {* The root of the tree is a common node. *}
lemma lynch_6_20:
assumes run: "SHORun EIG_M rho HOs SHOs"
and commG: "EIG_commGlobal HOs SHOs"
and commR: "\<forall>r. EIG_commPerRd (HOs r) (SHOs r)"
shows "common rho root_node"
using run lynch_6_18_b[OF assms]
proof (rule lynch_6_19_b, clarify)
fix l t
assume "l \<in> children t" "last_lbl l \<in> SK HOs SHOs"
thus "common rho l" by (auto simp: SK_def elim: lynch_6_18_a[OF run commR])
qed
text {*
A decision is taken only at state @{text "f+1"} and then stays stable.
*}
lemma decide:
assumes run: "SHORun EIG_M rho HOs SHOs"
shows "decide (rho r p) =
(if r < Suc f then None
else Some (newvals (rho (Suc f) p) root_node))"
(is "?P r")
proof (induct r)
from run show "?P 0"
by (auto simp: EIG_SHOMachine_def SHORun_eq HOinitConfig_eq
initState_def EIG_initState_def)
next
fix r
assume ih: "?P r"
from run obtain \<mu>p
where "EIG_nextState r p (rho r p) \<mu>p (rho (Suc r) p)"
by (auto simp: EIG_SHOMachine_def SHORun_eq SHOnextConfig_eq
nextState_def)
thus "?P (Suc r)"
proof (auto simp: EIG_nextState_def next_main_def next_end_def)
assume "\<not>(r < f)" "r \<noteq> f"
with ih
show "decide (rho r p) = Some (newvals (rho (Suc f) p) root_node)"
by simp
qed
qed
subsection {* Proof of Agreement, Validity, and Termination *}
text {*
The Agreement property is an immediate consequence of lemma @{text lynch_6_20}.
*}
theorem Agreement:
assumes run: "SHORun EIG_M rho HOs SHOs"
and commG: "EIG_commGlobal HOs SHOs"
and commR: "\<forall>r. EIG_commPerRd (HOs r) (SHOs r)"
and p: "decide (rho m p) = Some v"
and q: "decide (rho n q) = Some w"
shows "v = w"
using p q lynch_6_20[OF run commG commR]
by (auto simp: decide[OF run] common_def)
text {*
We now show the Validity property: if all processes initially
propose the same value @{text v}, then no other value may be decided.
By lemma @{text sho_correct_vals}, value @{text v} must propagate to
all children of the root that are well heard at round @{text 0}, and
lemma @{text lynch_6_16_d} implies that @{text v} is the value assigned
to all these children by @{text newvals}. Finally, lemma
@{text newvals_skr_uniform} lets us conclude.
*}
theorem Validity:
assumes run: "SHORun EIG_M rho HOs SHOs"
and commR: "\<forall>r. EIG_commPerRd (HOs r) (SHOs r)"
and initv: "\<forall>q. the (vals (rho 0 q) root_node) = v"
and dp: "decide (rho r p) = Some w"
shows "v = w"
proof -
have v: "\<forall>q. vals (rho 0 q) root_node = Some v"
proof
fix q
from run have "vals (rho 0 q) root_node \<noteq> None"
by (auto simp: EIG_SHOMachine_def SHORun_eq HOinitConfig_eq
initState_def EIG_initState_def)
then obtain w where w: "vals (rho 0 q) root_node = Some w"
by auto
from initv have "the (vals (rho 0 q) root_node) = v" ..
with w show "vals (rho 0 q) root_node = Some v" by simp
qed
let ?len = length_lbl
let ?r = "Suc f"
{
fix l'
assume l': "l' \<in> children root_node"
and skr: "last_lbl l' \<in> SKr (HOs 0) (SHOs 0)"
with run v have "vals (rho (?len l') p) l' = Some v"
by (auto dest: sho_correct_vals simp: SKr_def)
moreover
from run commR l' skr
have "newvals (rho ?r p) l' = fixupval (vals (rho (?len l') p) l')"
by (auto intro: lynch_6_16_d)
ultimately
have "newvals (rho ?r p) l' = v" by simp
}
with run commR root_node_not_leaf
have "newvals (rho ?r p) root_node = v"
by (auto intro: newvals_skr_uniform)
with dp show ?thesis by (simp add: decide[OF run])
qed
text {* Termination is trivial for \eigbyz{}. *}
theorem Termination:
assumes "SHORun EIG_M rho HOs SHOs"
shows "\<exists>r v. decide (rho r p) = Some v"
using assms by (auto simp: decide)
subsection {* \eigbyz{} Solves Weak Consensus *}
text {*
Summing up, all (coarse-grained) runs of \eigbyz{} for
HO and SHO collections that satisfy the communication predicate
satisfy the Weak Consensus property.
*}
theorem eig_weak_consensus:
assumes run: "SHORun EIG_M rho HOs SHOs"
and commR: "\<forall>r. EIG_commPerRd (HOs r) (SHOs r)"
and commG: "EIG_commGlobal HOs SHOs"
shows "weak_consensus (\<lambda>p. the (vals (rho 0 p) root_node)) decide rho"
unfolding weak_consensus_def
using Validity[OF run commR]
Agreement[OF run commG commR]
Termination[OF run]
by auto
text {*
By the reduction theorem, the correctness of the algorithm carries over
to the fine-grained model of runs.
*}
theorem eig_weak_consensus_fg:
assumes run: "fg_run EIG_M rho HOs SHOs (\<lambda>r q. undefined)"
and commR: "\<forall>r. EIG_commPerRd (HOs r) (SHOs r)"
and commG: "EIG_commGlobal HOs SHOs"
shows "weak_consensus (\<lambda>p. the (vals (state (rho 0) p) root_node))
decide (state \<circ> rho)"
(is "weak_consensus ?inits _ _")
proof (rule local_property_reduction[OF run weak_consensus_is_local])
fix crun
assume crun: "CSHORun EIG_M crun HOs SHOs (\<lambda>r q. undefined)"
and init: "crun 0 = state (rho 0)"
from crun have "SHORun EIG_M crun HOs SHOs" by (unfold SHORun_def)
from this commR commG
have "weak_consensus (\<lambda>p. the (vals (crun 0 p) root_node)) decide crun"
by (rule eig_weak_consensus)
with init show "weak_consensus ?inits decide crun"
by (simp add: o_def)
qed
end
|
lemma eventually_at_left_to_right: "eventually P (at_left a) \<longleftrightarrow> eventually (\<lambda>x. P (- x)) (at_right (-a))" for a :: real
|
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% System :
% Module :
% Object Name : $RCSfile$
% Revision : $Revision$
% Date : $Date$
% Author : $Author$
% Created By : Robert Heller
% Created : Wed May 31 20:07:09 2017
% Last Modified : <171104.0938>
%
% Description
%
% Notes
%
% History
%
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% Copyright (C) 2017 Robert Heller D/B/A Deepwoods Software
% 51 Locke Hill Road
% Wendell, MA 01379-9728
%
% This program is free software; you can redistribute it and/or modify
% it under the terms of the GNU General Public License as published by
% the Free Software Foundation; either version 2 of the License, or
% (at your option) any later version.
%
% This program is distributed in the hope that it will be useful,
% but WITHOUT ANY WARRANTY; without even the implied warranty of
% MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
% GNU General Public License for more details.
%
% You should have received a copy of the GNU General Public License
% along with this program; if not, write to the Free Software
% Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
%
%
%
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\chapter{QuadSSSQuadIn: Quad SSR and Quad 5V Input HAT}
This is a circuit board to for an add-on board for a Raspberry Pi B+ that will
add four 5V logic inputs and four Solid State Relays, using a MCP23008 I2C I/O
expander. There is a jumper header to set one of eight addresses for the
MCP23008 chip. This allows using more than one of this board or any other
board featuring a MCP23008 or MCP23016 or MCP23017 chip (up to eight total).
The circuit board uses a 40pin header socket to connect to the 40pin header on
the Raspberry Pi B+ and can use a stack-through header to allow additional
boards to be stacked on top of it.
\section{Circuit Description}
\begin{figure}[hbpt]\begin{centering}%
\includegraphics[width=5in]{QuadSSSQuadIn.pdf}
\caption{Circuit Diagram of the QuadSSSQuadIn}
\end{centering}\end{figure}
This circuit uses a MCP23008 to expand the Raspberry Pis I/O to 8 additional
I/O pins. Four of these pins (0-3) are used to drive a pair of dual
opto-isolated SSR chip and the remaining 4 (4-7) are driven by a input buffer
chip that can be driven with 5V logic. The SSRs can be used to switch
arbitrary trackside devices, since the output side of the SSRs are rated upto
+/- 400 volts at up to 0.1 Amp (100ma). The 5V logic inputs are compatible
with many sensor boards available (particularly occupancy detector circuits).
\section{Parts List}
\begin{table}[htdp]
\begin{centering}\begin{tabular}{|l|l|p{1in}|l|p{.5in}|}
\hline
Value&Qty&Refs&Mouser Part Number&Adafruit Part Number\\
\hline
.1 uf&3&C1 C2 C3&21RZ310-RC&\\
\hline
ASSR-4128&2&IC1 IC2&630-ASSR-4128-002E&\\
\hline
RPi GPIO&1&J0&855-M20-6102045&2223\\
\hline
CONN 3X2&1&J1&517-929836-02-03&\\
\hline
330 Ohms&1&RP1&652-4608X-AP2-331LF&\\
\hline
10K Ohms&2&RR1 RR2&652-4605X-1LF-10K&\\
\hline
CONN 8&1&T1&651-1725711&\\
\hline
CONN 6&1&T2&651-1725698&\\
\hline
MCP23008&1&U1&579-MCP23008-E/P&\\
\hline
74LV125AN&1&U2&595-SN74LV125AN&\\
\hline
\end{tabular}
\caption{Parts list for QuadSSSQuadIn boards.}
\end{centering}\end{table}\footnote{Mouser Project link:
\url{http://www.mouser.com/ProjectManager/ProjectDetail.aspx?AccessID=97fe7b85dc}.}
The only parts that might be substituted are J0 (the RPi GPIO Header), and T1
and T2 (the I/O terminals). The parts listed are for the stacking headers for
the RPi GPIO Header, and screw terminals for the I/O terminals. Feel free to
select a non-stacking header for the RPi GPIO Header and to select either pin
arrays or spring terminals for the T1 and T2.
\section{Circuit Board Layout}
\begin{figure}[hbpt]\begin{centering}%
\includegraphics[width=5in]{QuadSSSQuadIn3DTop.png}
\caption{3D rendering of the QuadSSSQuadIn board}
\end{centering}\end{figure}
\begin{figure}[hbpt]\begin{centering}%
\includegraphics[width=5in]{QuadSSSQuadIn.png}
\caption{Fabrication image of the QuadSSSQuadIn board}
\end{centering}\end{figure}
Board assembly is straight forward. You need to be careful orienting the ICs,
noting that the SSRs are oppositely oriented from the other ICs. Also the
SIP resistor arrays need to be carefully oriented -- the dot marks pin 1,
which is indicated on the board with a square pad\footnote{The first batch of
the boards I ordered used the wrong PCB modules for the terminals and the
holes are too small for the screw terminal pins to go all the way in. They
can be ``jammed'' in enough to be soldered. Pin arrays fit a little better,
but still need some effort to seat. The next batch I order will not have this
problem.}.
\section{Downloadables and Software Support}
Full design information is available on GitHub here:
\url{https://github.com/RobertPHeller/RPi-RRCircuits/tree/master/QuadSSSQuadIn}.
This board is supported by the Model Railroad System\footnote{Available as a
free download from Deepwoods Software at this web address:
\url{http://www.deepsoft.com/home/products/modelrailroadsystem/}.}
\texttt{OpenLCB\_PiMCP23008} daemon. A basic XML file for it is included in
its GitHub folder.
|
#!/usr/bin/env Rscript
# This script takes the FLAG of each read in `ORI_BAM` and apply them to reads of the same QNAME in `SNCR_BAM`.
#
# The input parameters are (following the right order):
# * `ORI_BAM`, path of the original BAM file, BAM before spliting reads with GATK's SplitNCigarReads (SNCR) function;
# * `SNCR_BAM`, path of the split BAM file, BAM after spliting reads with SNCR;
# * `OUTPUT_BAM`, path of the output BAM file to be written, the '.bam' extension is added in case it is missing; and
# * `THREADS`, number of cores to use.
#
# To run flagCorrection:
# Rscript $PATH_TO_FLAGCORRECTION/flagCorrection.r \
# $ORI_BAM \
# $SNCR_BAM \
# $OUTPUT_BAM \
# $THREADS
time1 <- Sys.time()
ORI_BAM=commandArgs(TRUE)[1]
SNCR_BAM=commandArgs(TRUE)[2]
OUTPUT_BAM=commandArgs(TRUE)[3]
THREADS=as.integer(commandArgs(TRUE)[4])
### load packages
library(Rsamtools)
library(foreach)
library(doParallel)
### test inputs
if( !file.exists(ORI_BAM) ) stop( gettextf("File %s doesn't exist", ORI_BAM) )
if( !file.exists(SNCR_BAM) ) stop( gettextf("File %s doesn't exist", SNCR_BAM) )
output_dir <- dirname(OUTPUT_BAM)
if( !dir.exists(output_dir) ) stop( gettextf("Directory %s doesn't exist", output_dir) )
### create temp dir
temp_dir <- tempfile("flagCorrection_temp_dir_", tmpdir=output_dir)
dir.create(temp_dir)
### get flags and qnames from the original bam
k <- scanBam( ORI_BAM, param=ScanBamParam(what=c("qname", "flag")) ) [[1]]
ori_bam_qname_flag <- setNames(k$flag, k$qname)
### get headers only
bam_header <- file.path(temp_dir, "header.sam")
cmd <- gettextf("samtools view -H %s > %s", ORI_BAM, bam_header)
system(cmd)
### define some variables
# chromosome names
cmd <- gettextf("samtools view -H %s | perl -lne '/SN:(\\S+)/ and print $1'", SNCR_BAM)
chr_names <- system(cmd, intern=TRUE)
# split sam addresses
k <- file.path(temp_dir, chr_names)
split_sncr_sams <- paste0(k, ".sam")
# corrected flags
corrected_flags_files <- paste0(k, "_corrected_flags.txt")
# corrected split sam addresses
corrected_split_sncr_sams <- paste0(k, "_corrected.sam")
# headered corrected split sam addresses
headered_corrected_split_sncr_sams <- paste0(k, "_corrected_headered.sam")
# headered corrected split bam addresses
final_bams <- paste0(k, "_final.bam")
# threads
threads <- min( THREADS, length(chr_names) )
### function to split bam by chromosomes
correct_flags_per_chromosome <- function(chr_names_i, split_sncr_sams_i, corrected_flags_files_i, corrected_split_sncr_sams_i,
headered_corrected_split_sncr_sams_i, final_bams_i, SNCR_BAM, ori_bam_qname_flag, bam_header){
# create the SAM file of the chromosome
cmd <- gettextf("samtools view %s %s > %s", SNCR_BAM, chr_names_i, split_sncr_sams_i)
system(cmd)
# load QNAMEs from sncr bam
cmd <- gettextf("cut -f 1 %s", split_sncr_sams_i)
qname_sncr <- system(cmd, intern=TRUE)
# get flags corrected
corrected_flag <- unname( ori_bam_qname_flag[qname_sncr] )
write.table(corrected_flag, file=corrected_flags_files_i, quote=FALSE, sep="\t", row.names=FALSE, col.names=FALSE)
# replace the flags of sam
cmd <- gettextf("awk 'FNR==NR{a[NR]=$1;next}{$2=a[FNR]}1' OFS='\t' %s %s > %s", corrected_flags_files_i, split_sncr_sams_i, corrected_split_sncr_sams_i)
system(cmd)
unlink(split_sncr_sams_i)
# add headers to sam
cmd <- gettextf("cat %s %s > %s", bam_header, corrected_split_sncr_sams_i, headered_corrected_split_sncr_sams_i)
system(cmd)
unlink(corrected_split_sncr_sams_i)
# sam to bam
cmd <- gettextf("samtools view -S -b %s > %s", headered_corrected_split_sncr_sams_i, final_bams_i)
system(cmd)
unlink(headered_corrected_split_sncr_sams_i)
}
### get corrected SAMs
registerDoParallel(cores=threads)
result <- foreach(chr_names_i = chr_names,
split_sncr_sams_i = split_sncr_sams,
corrected_flags_files_i = corrected_flags_files,
corrected_split_sncr_sams_i = corrected_split_sncr_sams,
headered_corrected_split_sncr_sams_i = headered_corrected_split_sncr_sams,
final_bams_i = final_bams) %dopar%
correct_flags_per_chromosome(chr_names_i,
split_sncr_sams_i,
corrected_flags_files_i,
corrected_split_sncr_sams_i,
headered_corrected_split_sncr_sams_i,
final_bams_i,
SNCR_BAM,
ori_bam_qname_flag,
bam_header)
unlink( c(corrected_flags_files, bam_header) )
### add .bam extension to the output bam in case it is missing
if( !grepl("\\.bam$", OUTPUT_BAM) ){
OUTPUT_BAM <- paste0(OUTPUT_BAM, ".bam")
}
### merge bams
all_bams_to_merge <- paste(final_bams, collapse=" ")
cmd <- gettextf("samtools merge -c -p -f -@ %i %s %s", THREADS, OUTPUT_BAM, all_bams_to_merge)
system(cmd)
### delete temp files/dir
unlink(temp_dir, recursive=TRUE)
### report time spent
time2 <- Sys.time()
Time <- time2 - time1
Time <- round( as.numeric(Time, units = "mins"), 2 )
cat( gettextf("\nflagCorrection finished after %s minutes.\n\n", Time) )
|
[STATEMENT]
lemma set_remove: "set (remove l x) <= set l"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. set (remove l x) \<subseteq> set l
[PROOF STEP]
by (induct l, auto)
|
------------------------------------------------------------------------
-- The Agda standard library
--
-- Bundles of parameters for passing to the Ring Solver
------------------------------------------------------------------------
{-# OPTIONS --without-K --safe #-}
-- This module packages up all the stuff that's passed to the other
-- modules in a convenient form.
module Tactic.RingSolver.Core.Polynomial.Parameters where
open import Algebra.Bundles using (RawRing)
open import Data.Bool.Base using (Bool; T)
open import Function
open import Level
open import Relation.Unary
open import Tactic.RingSolver.Core.AlmostCommutativeRing
-- This record stores all the stuff we need for the coefficients:
--
-- * A raw ring
-- * A (decidable) predicate on "zeroeness"
--
-- It's used for defining the operations on the Horner normal form.
record RawCoeff ℓ₁ ℓ₂ : Set (suc (ℓ₁ ⊔ ℓ₂)) where
field
rawRing : RawRing ℓ₁ ℓ₂
isZero : RawRing.Carrier rawRing → Bool
open RawRing rawRing public
-- This record stores the full information we need for converting
-- to the final ring.
record Homomorphism ℓ₁ ℓ₂ ℓ₃ ℓ₄ : Set (suc (ℓ₁ ⊔ ℓ₂ ⊔ ℓ₃ ⊔ ℓ₄)) where
field
from : RawCoeff ℓ₁ ℓ₂
to : AlmostCommutativeRing ℓ₃ ℓ₄
module Raw = RawCoeff from
open AlmostCommutativeRing to public
field
morphism : Raw.rawRing -Raw-AlmostCommutative⟶ to
open _-Raw-AlmostCommutative⟶_ morphism renaming (⟦_⟧ to ⟦_⟧ᵣ) public
field
Zero-C⟶Zero-R : ∀ x → T (Raw.isZero x) → 0# ≈ ⟦ x ⟧ᵣ
|
function sigmoid_of(f1::Function, f2::Function, x₀, α)
return x -> begin
Logging.configure(level=INFO)
sf = (1 - sigmoid(x, x₀, α))*f1(x) + sigmoid(x, x₀, α)*f2(x)
if sf === NaN || sf === NaN64 || sf === NaN32 || sf === NaN16
error("SIGMOID NaN: x=$x, x₀=$x₀, α=$α, f1(x)=$(f1(x)), f2(x)=$(f2(x)), σ(x)=$(sigmoid(x, x₀, α))")
end
return sf
end
end
function sigmoid_of_name(f1::Function, f2::Function, x₀, α, name::AbstractString)
return x -> begin
Logging.configure(level=INFO)
sf = (1 - sigmoid(x, x₀, α))*f1(x) + sigmoid(x, x₀, α)*f2(x)
if sf === NaN || sf === NaN64 || sf === NaN32 || sf === NaN16
error("SIGMOID NaN: x=$x, x₀=$x₀, α=$α, f1(x)=$(f1(x)), f2(x)=$(f2(x)), σ(x)=$(sigmoid(x, x₀, α)), name=$name")
end
return sf
end
end
const sigmoid = (x, x₀, α) -> 1/(1+exp(-(x - x₀)/α))
|
[STATEMENT]
lemma Tag_Tag: "(Tag X = Tag X') = (X = X')"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. (Tag X = Tag X') = (X = X')
[PROOF STEP]
by transfer auto
|
%!TEX program = xelatex
%!TEX options=--shell-escape
\documentclass[aspectratio=169]{beamer}
\usepackage{pdfpages}
\setbeamerfont{title}{series=\bfseries}
\setbeamerfont{frametitle}{series=\bfseries}
\definecolor{oxford-blue}{HTML}{002147}
\definecolor{oxford-gold}{RGB}{207, 122, 48}
\definecolor{oxford-blue}{RGB}{0,33,71}
\setbeamercolor{title}{fg=oxford-blue}
\setbeamercolor{frametitle}{fg=oxford-blue}
\setbeamercolor{section in toc}{fg=black}
\setbeamercolor{description item}{fg=black}
\setbeamertemplate{description item}{\bfseries\insertdescriptionitem}
\setbeamertemplate{itemize item}{\color{oxford-blue}$\blacktriangleright$}
\setbeamerfont{block title}{series=\bfseries}
\setbeamercolor{block title}{use={frametitle},fg=frametitle.fg,bg=frametitle.bg}
\AtBeginSection[]{
\begin{frame}
\vfill\centering\usebeamerfont{title}\insertsectionhead\par\vfill
\end{frame}
}
\newcommand{\hl}[1]{\textcolor{oxford-blue}{\textbf{#1}}}
\usepackage{fontspec}
\setmonofont[Scale=MatchUppercase]{Fira Mono}
\setsansfont{FoundrySterling}[
UprightFont=*-Book,
BoldFont=*-Bold,
ItalicFont=*-BookItalic
]
\usetheme{default}
\usefonttheme[onlymath]{serif}
\beamertemplatenavigationsymbolsempty
\usepackage{graphicx}
\usepackage{pgffor}
\usepackage{hyperref}
\usepackage{booktabs, colortbl}
\usepackage{tikz}
\usetikzlibrary{positioning, decorations.pathreplacing, fit, calligraphy, arrows.meta, backgrounds, shapes.callouts, shadows.blur, calc}
\usepackage[export]{adjustbox}
\title{
\includegraphics[height=18mm]{../../backtracer}\\~\\%
A NetLogo extension for data provenance
}
\date{\scriptsize\today}
\author{Nicolas Payette}
\institute{%
\includegraphics[height=8mm]{geog-brand-pos}%
\includegraphics[height=8mm]{ox_brand_cmyk_pos.eps}
}
\titlegraphic{\textcolor{oxford-blue!85}{\url{https://payette.io}}}
\begin{document}
\maketitle
\section{A bit of context}
{
\setbeamercolor{background canvas}{bg=}
\includepdf[pages=1]{../poster/poster.pdf}
}
\tikzset{
decoration = {calligraphic brace, amplitude = 10pt},
backfont/.style = {font=\Large\bf, text=black!50},
box/.style = {
semithick, draw, rounded corners = 6pt, align = center
},
frontbox/.style = {
box, fill=oxford-gold!10, draw=oxford-gold, text width = 30mm, minimum height=1.8cm, font=\Large
},
backbox/.style = {
box, fill=oxford-blue!5, draw=oxford-blue!40, inner sep=4mm, backfont
},
output/.style = {input},
arrow/.style = {very thick, -{Stealth[scale=1.25]}, draw=black!60},
fatarrow/.style = {ultra thick, -{Stealth[scale=1.25]}, draw=oxford-blue},
hl/.style = {font=\bf, text=oxford-blue},
bracelabel/.style = {midway, xshift=-7mm, text width=5cm, anchor=center, rotate=90, backfont, align = center}
}
\newcommand{\Brace}[2]{
\node [fit=#2] (#1-brace) {};
\draw [decorate, very thick, color=black!70] (#1-brace.south west) -- (#1-brace.north west) node [bracelabel] {#1};
}
\newcommand{\adaptedview}[1]{
\begin{adjustbox}{max height=.85\textheight}
\begin{tikzpicture}[
wideback/.style = {backbox, minimum width = 95mm},
analbox/.style = {frontbox, fill=black!1, draw=black!50}
]
\node [analbox] (quantianal) {Quantitative\\analysis};
\node [analbox, right = 2cm of quantianal] (qualianal) {Qualitative\\analysis};
\begin{scope}[on background layer]
\node [wideback, fit=(quantianal)(qualianal)] (anal) {};
\Brace{Analysis}{(anal)}
\end{scope}
\node [frontbox, above = 15mm of quantianal] (formalmodels) {Formal\\models};
\node [frontbox, above = 15mm of qualianal] (semiformalmodels) {Semi-formal\\models};
\begin{scope}[on background layer]
\node [wideback, fit=(formalmodels)(semiformalmodels)] (modelbox) {};
\Brace{Models}{(modelbox)}
\end{scope}
\node [frontbox, above = of modelbox] (code) {NetLogo\\code};
\begin{scope}[on background layer]
\node [wideback, fit=(code)] (codebox) {};
\Brace{Simulation}{(codebox)}
\end{scope}
\node [frontbox, below = of anal] (raw) {``Raw'' data};
\begin{scope}[on background layer]
\node [wideback, fit=(raw)] (data) {};
\Brace{Data}{(data)}
\end{scope}
#1
\end{tikzpicture}
\end{adjustbox}
}
\begin{frame}[t]\frametitle{Qualitative survey data}
\vfill\centering
\resizebox{.75\textwidth}{!}{$n=7$}
\vfill
\end{frame}
\newcommand{\whatshoulditdo}[2]{
\frametitle{\large In your ideal world, what would a \texttt{qual2rule} NetLogo extension do?}
\begin{columns}[c]
\begin{column}{0.625\textwidth}
\LARGE
#1
\vfill
\end{column}
\begin{column}{0.375\textwidth}
\adaptedview{#2}
\end{column}
\end{columns}
}
\begin{frame}[fragile]{The big picture}
\begin{center}
\adaptedview{
\node [wideback, below = of raw] (phenom) {Phenomena};
\draw [arrow] (phenom) -- (raw);
\draw [arrow] (raw) -- (quantianal);
\draw [arrow] (raw) -- (qualianal);
\uncover<2>{
\draw [arrow] (quantianal) -- (formalmodels);
\draw [arrow] (qualianal) -- (semiformalmodels);
\draw [arrow] (qualianal) -- (formalmodels);
\draw [arrow] (semiformalmodels) -- (formalmodels);
\draw [arrow] (formalmodels) -- (code);
}
}
\end{center}
\end{frame}
\begin{frame}
\whatshoulditdo{\normalsize
\begin{itemize}
\item ``link bits of code to some text (maybe in Infotab) and back again? Or... faciltates some of the qualitative analytic frameworks to aid translation into code''
\item ``Provide data traceability (e.g. link specific theory, to specific qualitative data, to specific model specification, to specific agent behaviour).''
\item ``Help with systematic documentation of data use (especially of the qualitative data). Here, maybe something can be done to extend the ODD protocol.''
\item ``Possibly provide a kind of link to qualitative data analysis software (e.g. Nvivo, Atlas.ti) in a similar way a link to GIS and R is provided.''
\end{itemize}
}{
\draw [fatarrow, {Stealth[scale=1.25]}-{Stealth[scale=1.25]}] (code) -- (formalmodels);
\draw [fatarrow, {Stealth[scale=1.25]}-{Stealth[scale=1.25]}] (code) -- (semiformalmodels);
\draw [fatarrow, {Stealth[scale=1.25]}-{Stealth[scale=1.25]}] (code) -- (raw);
\draw [fatarrow, {Stealth[scale=1.25]}-{Stealth[scale=1.25]}] (formalmodels) -| ([xshift=-25mm]raw);
\draw [fatarrow, {Stealth[scale=1.25]}-{Stealth[scale=1.25]}] (semiformalmodels) -| ([xshift=25mm]raw);
}
\end{frame}
|
@doc raw"""
function aic(t :: Trace)
Computes the Akaike Information Criterion for a *single trace* (thus replacing the definition) with
"maximum likelihood" by one with "likelihood". The formula is
```math
\text{AIC}(t)/2 = |\text{params}(t)| - \ell(t),
```
where ``\text{params}(t)|`` is the sum of the dimensionalities of non-observed
and non-deterministic sample nodes.
"""
function aic(t :: Trace)
ll = 0.0
k = 0
for v in values(t)
if !(v.interpretation == DETERMINISTIC || v.observed)
length(size(v.value)) == 0 ? k += 1 : k += length(v.value)
end
if v.observed
ll += v.logprob_sum
end
end
2.0 * (k - ll)
end
function numparams(t::Trace)
k = 0
for v in values(t)
if !(v.interpretation == DETERMINISTIC || v.observed)
length(size(v.value)) == 0 ? k += 1 : k += length(v.value)
end
end
k
end
@doc raw"""
function aic(r :: SamplingResults{I}) where I <: InferenceType
Computes an empirical estimate of the Akaike Information Criterion from a `SamplingResults`.
The formula is
```math
\text{AIC}(r)/2 = \min_{t \in \text{traces}(r)}|\text{params}(t)| - \hat\ell(t),
```
where ``\text{params}(t)|`` is the sum of the dimensionalities of non-observed
and non-deterministic sample nodes and
``\hat\ell(t)`` is the empirical maximum likelihood.
"""
function aic(r :: SamplingResults{I}) where I <: InferenceType
min_aic = Inf
for t in r.traces
a = aic(t)
if a < min_aic
min_aic = a
end
end
min_aic
end
@doc raw"""
function hpds(r::SamplingResults{I}, pct::Float64) where I <: InferenceType
Computes the highest posterior density set (HPDS) of the `SamplingResults` object.
Let ``\mathcal T`` be the set of traces. The ``100\times Q \%``-percentile HPDS is defined as the
set that satisfies ``\sum_{t \in \mathrm{HPDS}} p(t) = Q`` and, for all ``t \in \mathrm{HPDS}``,
``p(t) > p(s)`` for every ``s \in \mathcal T - \mathrm{HPDS}``.
It is possible to compute the HPDS using the full joint density ``p(t) \equiv p(x, z)``, where ``x`` is the
set of observed rvs and ``z`` is the set of latent rvs, since ``p(z|x) \propto p(x, z)``.
`pct` should be a float in (0.0, 1.0). E.g., `pct = 0.95` returns the 95% HPDS.
"""
function hpds(r::SamplingResults{I}, pct::Float64) where I <: InferenceType
(pct > 0.0 && pct < 1.0) || error("pct must be in (0, 1)")
ix = Int(floor(length(r.traces) * pct))
for t in r.traces
if t.logprob_sum == 0.0
logprob!(t)
end
end
# p(z|x) \propto p(x, z), so we can sort by the joint density, which is easy to
# calculate, instead of needing to approximate the posterior density
tr_ix = partialsortperm(r.traces, 1:ix, by=(t,) -> t.logprob_sum, rev=true)
NonparametricSamplingResults{typeof(r.interpretation)}(
r.interpretation,
r.log_weights[tr_ix],
r.return_values[tr_ix],
r.traces[tr_ix]
)
end
@doc raw"""
function hpdi(r::SamplingResults{I}, pct::Float64, addresses::AbstractArray{T}) where {I <: InferenceType, T}
Computes the highest posterior density interval(s) for a univariate variable. Does *not* check that the
data corresponding to each address in `addresses` is actually univariate; if in doubt, use `hpds` instead.
"""
function hpdi(r::SamplingResults{I}, pct::Float64, addresses::AbstractArray{T}) where {I <: InferenceType, T}
r = hpds(r, pct)
Dict(a => (minimum(r[a]), maximum(r[a])) for a in addresses)
end
export aic, hpds, hpdi, numparams
|
The St. Clair Catholic District School Board is responsible for the city 's seven elementary and two secondary Catholic schools ( St. Christopher 's and St. Patrick 's ) . In 2014 , St. Patrick 's and St. Christopher 's merged , under the St. Patrick 's name , on St. Christopher 's North Sarnia site .
|
## Marcel Ramos
## YouTube viewing habits
library(lattice)
library(xlsx)
demo <- read.csv("data-raw/YouTubeDemographics.csv", header = TRUE)
demo <- demo[,c("Gender", "Age.group", "Percentage")]
demo$percentage <- as.numeric(unlist(strsplit(as.character(demo$Percentage),
split = "0000%", fixed = TRUE)))
summary(demo$Age.group)
demo$AgeG <- factor(demo$Age.group, levels =
c("18-24", "25-34", "35-44", "45-54", "55-64", "65-"))
barchart(demo$percentage~demo$AgeG, data = demo,
groups = demo$Gender,
auto.key = list(space = "right"),
main = "Demographics by Age Group",
xlab = "Age Group", ylab = "Percentage")
devs <- read.csv("data-raw/YouTubeDevices.csv", header = TRUE)
devs
devs$devper <- round(devs$Views/sum(devs$Views)*100, 2)
survey <- read.xlsx("data-raw/PH750-2SurveyDataRaw.xlsx", sheetIndex = 1,
header = TRUE)
table(survey$What.was.your.preferred.way.to.attend.lectures.for.this.course.)
|
using RDatasets, Gadfly
set_default_plot_size(6inch, 3inch)
plot(dataset("plm", "Cigar"), x=:Sales, y=:Year, Scale.y_discrete,
Geom.bar(orientation=:horizontal))
|
(* A category is very much like a graph. It has vertices
named objects and vertices named arrows. Each arrow goes
from an object to an object (possibly the same!). *)
Class Cat (obj: Type) (arr: obj -> obj -> Type): Type :=
MkCat {
(* For each object, there is an arrow called `id` which
goes from the object to itself. *)
id: forall {o: obj}, arr o o;
(* Given an arrow `f` from object `a` to `b` and an arrow
`g` from `b` to `c`. We can compose these arrow. The
result is an arrow from `a` to `c`. *)
compose: forall {a b c: obj}, arr a b -> arr b c -> arr a c;
(* Here comes some properties of `id` and `compose` *)
(* For any arrow `f`, compose id f = f *)
neutralLeft: forall {a b: obj} (f: arr a b), compose id f = f;
(* For any arrow `f`, compose f id = f *)
neutralRight: forall {a b: obj} (f: arr a b), compose f id = f;
(* For any arrows `f`, `g` and `h`,
composing f with g, and then the result with h
gives exatctly the same result as
composing f with the result of the composition of g and h
Which means, like string concatenation than we can commpose
the way we preserve the order of each element in the sequence. *)
associativity: forall {a b c d: obj} (f: arr a b) (g: arr b c) (h: arr c d),
compose (compose f g) h = compose f (compose g h);
}.
(* `LE n m` encode the property that `n ≤ m`
i.e. `n` is less or equal to `m` *)
Inductive LE : nat -> nat -> Prop :=
LERefl: forall {o: nat}, LE o o
| LENext: forall {a b: nat}, LE a b -> LE a (S b)
.
(* Taking naturals as objects and `LE` as arrows,
this actually forms a category! *)
Instance natPoset: Cat nat LE := ???
.
|
theory Graph
imports Main
begin
section\<open>Rooted Graphs\<close>
text \<open>In this section, we model rooted graphs and their sub\hyp{}paths and paths. We give a number
of lemmas that will help proofs in the following theories, but that are very specific to our
approach.\<close>
text \<open>First, we will need the following simple lemma, which is not graph related, but that will
prove useful when we will want to exhibit the last element of a non-empty sequence.\<close>
lemma neq_Nil_conv2 :
"xs \<noteq> [] = (\<exists> x xs'. xs = xs' @ [x])"
by (induct xs rule : rev_induct, auto)
subsection \<open>Basic Definitions and Properties\<close>
subsubsection \<open>Edges\<close>
text \<open>We model edges by a record \<open>'v edge\<close> which is parameterized by the type \<open>'v\<close>
of vertices. This allows us to represent the red part
of red-black graphs as well as the black part (i.e. LTS) using extensible records (more on this later). Edges have two
components, @{term "src"} and @{term "tgt"}, which respectively give their source and target.\<close>
record 'v edge =
src :: "'v"
tgt :: "'v"
subsubsection \<open>Rooted graphs\<close>
text \<open>We model rooted graphs by the record \<open>'v rgraph\<close>. It consists of two components: its
root and its set of edges.\<close>
record 'v rgraph =
root :: "'v"
edges :: "'v edge set"
subsubsection \<open>Vertices\<close>
text \<open>The set of vertices of a rooted graph is made of its root and the endpoints of its
edges. Isabelle/HOL provides \emph{extensible records}, i.e.\ it is possible to define records using
existing records by adding components. The following definition suppose that @{term "g"} is of type
\<open>('v,'x) rgraph_scheme\<close>, i.e.\ an object that has at least all the components of a
\<open>'v rgraph\<close>. The second type parameter \<open>'x\<close> stands for the hypothetical type
parameters that such an object could have in addition of the type of vertices \<open>'v\<close>.
Using \<open>('v,'x) rgraph_scheme\<close> instead of \<open>'v rgraph\<close> allows to reuse the following
definition(s) for all type of objects that have at least the components of a rooted graph. For
example, we will reuse the following definition to characterize the set of locations of a LTS (see
\verb?LTS.thy?).\<close>
definition vertices ::
"('v,'x) rgraph_scheme \<Rightarrow> 'v set"
where
"vertices g = {root g} \<union> src `edges g \<union> tgt ` edges g"
subsubsection \<open>Basic properties of rooted graphs\<close>
text \<open>In the following, we will be only interested in loop free rooted graphs
and in what we call
\emph{well formed rooted graphs}. A well formed rooted graph is rooted graph that has an empty set
of edges or, if this is not the case, has at least one edge whose source is its root.\<close>
abbreviation loop_free ::
"('v,'x) rgraph_scheme \<Rightarrow> bool"
where
"loop_free g \<equiv> \<forall> e \<in> edges g. src e \<noteq> tgt e"
abbreviation wf_rgraph ::
"('v,'x) rgraph_scheme \<Rightarrow> bool"
where
"wf_rgraph g \<equiv> root g \<in> src ` edges g = (edges g \<noteq> {})"
text \<open>Even if we are only interested in this kind of rooted graphs, we will not assume the graphs
are loop free or well formed when this is not needed.\<close>
subsubsection \<open>Out-going edges\<close>
text \<open>This abbreviation will prove handy in the following.\<close>
abbreviation out_edges ::
"('v,'x) rgraph_scheme \<Rightarrow> 'v \<Rightarrow> 'v edge set"
where
"out_edges g v \<equiv> {e \<in> edges g. src e = v}"
subsection \<open>Consistent Edge Sequences, Sub-paths and Paths\<close>
subsubsection \<open>Consistency of a sequence of edges\<close>
text \<open>A sequence of edges @{term "es"} is consistent from
vertex @{term "v1"} to another vertex @{term "v2"} if @{term "v1 = v2"} if it is empty, or, if it is
not empty:
\begin{itemize}
\item @{term "v1"} is the source of its first element, and
\item @{term "v2"} is the target of its last element, and
\item the target of each of its elements is the source of its follower.
\end{itemize}\<close>
fun ces ::
"'v \<Rightarrow> 'v edge list \<Rightarrow> 'v \<Rightarrow> bool"
where
"ces v1 [] v2 = (v1 = v2)"
| "ces v1 (e#es) v2 = (src e = v1 \<and> ces (tgt e) es v2)"
subsubsection \<open>Sub-paths and paths\<close>
text \<open>Let @{term "g"} be a rooted graph, @{term "es"} a sequence of edges and @{term "v1"} and
\<open>v2\<close> two vertices. @{term "es"} is a sub-path in @{term "g"} from @{term "v1"} to
@{term "v2"} if:
\begin{itemize}
\item it is consistent from @{term "v1"} to @{term "v2"},
\item @{term "v1"} is a vertex of @{term "g"},
\item all of its elements are edges of @{term "g"}.
\end{itemize}
The second constraint is needed in the case of the empty sequence: without it,
the empty sequence would be a sub-path of @{term "g"} even when @{term "v1"} is not one of
its vertices.\<close>
definition subpath ::
"('v,'x) rgraph_scheme \<Rightarrow> 'v \<Rightarrow> 'v edge list \<Rightarrow> 'v \<Rightarrow> bool"
where
"subpath g v1 es v2 \<equiv> ces v1 es v2 \<and> v1 \<in> vertices g \<and> set es \<subseteq> edges g"
text \<open>Let @{term "es"} be a sub-path of @{term "g"} leading from @{term "v1"} to @{term "v2"}.
@{term "v1"} and @{term "v2"} are both vertices of @{term "g"}.\<close>
lemma lst_of_sp_is_vert :
assumes "subpath g v1 es v2"
shows "v2 \<in> vertices g"
using assms by (induction es arbitrary : v1, auto simp add: subpath_def vertices_def)
text \<open>The empty sequence of edges is a sub-path from @{term "v1"} to @{term "v2"} if and only if
they are equal and belong to the graph.\<close>
text \<open>The empty sequence is a sub-path from the root of any rooted graph.\<close>
lemma
"subpath g (root g) [] (root g)"
by (auto simp add : vertices_def subpath_def)
text \<open>In the following, we will not always be interested in the final vertex of a sub-path. We
will use the abbreviation @{term "subpath_from"} whenever this final vertex has no importance, and
@{term subpath} otherwise.\<close>
abbreviation subpath_from ::
"('v,'x) rgraph_scheme \<Rightarrow> 'v \<Rightarrow> 'v edge list \<Rightarrow> bool"
where
"subpath_from g v es \<equiv> \<exists> v'. subpath g v es v'"
abbreviation subpaths_from ::
"('v,'x) rgraph_scheme \<Rightarrow> 'v \<Rightarrow> 'v edge list set"
where
"subpaths_from g v \<equiv> {es. subpath_from g v es}"
text \<open>A path is a sub-path starting at the root of the graph.\<close>
abbreviation path ::
"('v,'x) rgraph_scheme \<Rightarrow> 'v edge list \<Rightarrow> 'v \<Rightarrow> bool"
where
"path g es v \<equiv> subpath g (root g) es v"
abbreviation paths ::
"('a,'b) rgraph_scheme \<Rightarrow> 'a edge list set"
where
"paths g \<equiv> {es. \<exists> v. path g es v}"
text \<open>The empty sequence is a path of any rooted graph.\<close>
lemma
"[] \<in> paths g"
by (auto simp add : subpath_def vertices_def)
text \<open>Some useful simplification lemmas for @{term "subpath"}.\<close>
lemma sp_one :
"subpath g v1 [e] v2 = (src e = v1 \<and> e \<in> edges g \<and> tgt e = v2)"
by (auto simp add : subpath_def vertices_def)
lemma sp_Cons :
"subpath g v1 (e#es) v2 = (src e = v1 \<and> e \<in> edges g \<and> subpath g (tgt e) es v2)"
by (auto simp add : subpath_def vertices_def)
lemma sp_append_one :
"subpath g v1 (es@[e]) v2 = (subpath g v1 es (src e) \<and> e \<in> edges g \<and> tgt e = v2)"
by (induct es arbitrary : v1, auto simp add : subpath_def vertices_def)
lemma sp_append :
"subpath g v1 (es1@es2) v2 = (\<exists> v. subpath g v1 es1 v \<and> subpath g v es2 v2)"
by (induct es1 arbitrary : v1)
((simp add : subpath_def, fast),
(auto simp add : fst_of_sp_is_vert sp_Cons))
text \<open>A sub-path leads to a unique vertex.\<close>
lemma sp_same_src_imp_same_tgt :
assumes "subpath g v es v1"
assumes "subpath g v es v2"
shows "v1 = v2"
using assms
by (induct es arbitrary : v)
(auto simp add : sp_Cons subpath_def vertices_def)
text \<open>In the following, we are interested in the evolution of the set of sub-paths of our symbolic
execution graph after symbolic execution of a transition from the LTS representation of the program
under analysis. Symbolic execution of a transition results in adding to the graph a new edge whose
source is already a vertex of this graph, but not its target. The following lemma describes
sub-paths ending in the target of such an edge.\<close>
text \<open>Let @{term "e"} be an edge whose target has not out-going edges. A sub-path @{term "es"}
containing @{term "e"} ends by @{term "e"} and this occurrence of @{term "e"} is unique along
@{term "es"}.\<close>
lemma sp_through_de_decomp :
assumes "out_edges g (tgt e) = {}"
assumes "subpath g v1 es v2"
assumes "e \<in> set es"
shows "\<exists> es'. es = es' @ [e] \<and> e \<notin> set es'"
using assms(2,3)
proof (induction es arbitrary : v1)
case Nil thus ?case by simp
next
case (Cons e' es)
hence "e = e' \<or> (e \<noteq> e' \<and> e \<in> set es)" by auto
thus ?case
proof (elim disjE, goal_cases)
case 1 thus ?case
using assms(1) Cons
by (rule_tac ?x="[]" in exI) (cases es, auto simp add: sp_Cons)
next
case 2 thus ?case
using assms(1) Cons(1)[of "tgt e'"] Cons(2)
by (auto simp add : sp_Cons)
qed
qed
subsection \<open>Adding Edges\<close>
text \<open>This definition and the following lemma are here mainly to ease the definitions and proofs
in the next theories.\<close>
abbreviation add_edge ::
"('v,'x) rgraph_scheme \<Rightarrow> 'v edge \<Rightarrow> ('v,'x) rgraph_scheme"
where
"add_edge g e \<equiv> rgraph.edges_update (\<lambda> edges. edges \<union> {e}) g"
text \<open>Let @{term "es"} be a sub-path from a vertex other than the target of @{term "e"} in the
graph obtained from @{term "g"} by the addition of edge @{term "e"}. Moreover, assume that the
target of @{term "e"} is not a vertex of @{term "g"}. Then @{term "e"} is an element of
@{term "es"}.\<close>
lemma sp_ends_in_tgt_imp_mem :
assumes "tgt e \<notin> vertices g"
assumes "v \<noteq> tgt e"
assumes "subpath (add_edge g e) v es (tgt e)"
shows "e \<in> set es"
proof -
have "es \<noteq> []" using assms(2,3) by (auto simp add : subpath_def)
then obtain e' es' where "es = es' @ [e']" by (simp add : neq_Nil_conv2) blast
thus ?thesis using assms(1,3) by (auto simp add : sp_append_one vertices_def image_def)
qed
subsection \<open>Trees\<close>
text \<open>We define trees as rooted-graphs in which there exists a unique path leading to each vertex.\<close>
definition is_tree ::
"('v,'x) rgraph_scheme \<Rightarrow> bool"
where
"is_tree g \<equiv> \<forall> l \<in> Graph.vertices g. \<exists>! p. Graph.path g p l"
text \<open>The empty graph is thus a tree.\<close>
lemma empty_graph_is_tree :
assumes "edges g = {}"
shows "is_tree g"
using assms by (auto simp add : is_tree_def subpath_def vertices_def)
end
|
module Language.JSON.Decode
import Language.JSON
import Language.JSON.Data
%access export
%default total
||| Describes how to turn a `JSON` into a `v`.
||| Use with `decodeJSON` or `decodeString`.
Decoder : Type -> Type
Decoder = \v => JSON -> Either String v
private
error : String -> Decoder a
error expected actual =
Left $ "Expected " ++ expected ++ ", got: " ++ format 0 actual
||| Run a `Decoder` on some `JSON`.
decodeJSON : Decoder a -> JSON -> Either String a
decodeJSON decoder json =
decoder json
||| Parse a `String` to `JSON` and run a `Decoder` on it
decodeString : Decoder a -> String -> Either String a
decodeString decoder str =
case parse str of
Just json => decoder json
Nothing => Left "parse error"
||| Given a function, transform a decoder's result
map : (a -> b) -> Decoder a -> Decoder b
map f decoder =
\json =>
case decoder json of
Right x => Right (f x)
Left x => Left x
||| Always decode the given value. Alias of `pure`.
succeed : a -> Decoder a
succeed x _ =
Right x
||| Always fail with the given error string.
fail : String -> Decoder a
fail err _ =
Left err
||| Apply a function to the result of a decoder to get another decoder, then run that
andThen : (a -> Decoder b) -> Decoder a -> Decoder b
andThen f decoder json =
case decoder json of
Right x => (f x) json
Left x => Left x
||| Always decode the given value
pure : a -> Decoder a
pure x json =
Right x
||| Given a decoder of function and one argument, return a decoder of result
ap : Decoder (a -> b) -> Decoder a -> Decoder b
ap df dx json =
case (df json, dx json) of
(Right f, Right x) => Right $ f x
(Right f, Left err) => Left err
(Left err, _ ) => Left err
||| Alias of apply
(<*>) : Decoder (a -> b) -> Decoder a -> Decoder b
(<*>) = ap
||| Alias of map
(<$>) : (a -> b) -> Decoder a -> Decoder b
(<$>) = map
||| Avoid infinite loops in recursive decoders by wrapping the recursive call with lazy
lazy : (() -> Decoder a) -> Decoder a
lazy thunk =
andThen thunk (succeed ())
||| Decode the given value when encountering JNull
null : a -> Decoder a
null x JNull = Right x
null _ json = error "null" json
||| Decode a JString
string : Decoder String
string (JString str) = Right str
string json = error "string" json
||| Decode JBoolean
bool : Decoder Bool
bool (JBoolean bool) = Right bool
bool json = error "bool" json
||| Decode a `JNumber` by casting it to `Int`
int : Decoder Int
int (JNumber x) =
if floor x == x
then Right (cast x)
else error {a = Int } "int" (JNumber x)
int json = error "int" json
||| Decode a `JNumber` as `Double`
float : Decoder Double
float (JNumber x) = Right x
float json = error "string" json
||| Given a list of decoders, use the first one that succeeds
oneOf : List (Decoder a) -> Decoder a
oneOf [] json = error "oneOf" json
oneOf (d :: ds) json =
case d json of
Right v => Right v
Left err => oneOf ds json
||| Decode a list of elements
list : Decoder a -> Decoder (List a)
list decoder json@(JArray lst) =
Prelude.Functor.map reverse $ foldr f (Right []) $ map decoder $ lst
where
f (Right v) (Right l) = Right (v :: l)
f (Left err) _ = Left err
f (Right v) (Left err) = Left err
list _ json = error "list" json
||| Decode a JObject as key-value pairs
keyValuePairs : Decoder a -> Decoder (List (String, a))
keyValuePairs decoder (JObject o) =
Prelude.Functor.map reverse $ foldr f (Right []) $ map (\( k, v ) => ( k, decoder v)) $ o
where
f : ( String, Either String a ) -> Either String (List ( String, a )) -> Either String (List ( String, a ))
f ( k, Right v ) (Right lst) = Right (( k, v ) :: lst)
f ( k, Right v) (Left err) = Left err
f ( k, Left err ) _ = Left err
keyValuePairs _ json = error "keyValuePairs" json
||| Decode the given field in a `JObject`
field : String -> Decoder a -> Decoder a
field key decoder json@(JObject fields) =
case filter ((== key) . fst) fields of
( _, v ) :: _ => decoder v
_ => error ("object with field \"" ++ key ++ "\"") json
field key _ json = error ("object with field \"" ++ key ++ "\"") json
||| Decode a nested `JObject` field
at : List String -> Decoder a -> Decoder a
at fields decoder =
foldr field decoder fields
||| Decode the `n`th element of a `JArray`
index : Int -> Decoder a -> Decoder a
index n decoder json@(JArray lst) =
case index' (cast n) lst of
Just j => decoder j
Nothing => error ("list of length > " ++ cast n ) json
index n decoder json =
error "list" json
||| Decode `Just` a value if the given decoder succeeds, `Nothing` otherwise
maybe : Decoder a -> Decoder (Maybe a)
maybe decoder json =
Right $ either (const Nothing) Just (decoder json)
||| Decode `Just` a value or `Nothing` in case of `JNull`
nullable : Decoder a -> Decoder (Maybe a)
nullable decoder =
oneOf
[ null Nothing
, map Just decoder
]
|
/-
Copyright (c) 2021 Adam Topaz. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Adam Topaz
-/
import category_theory.sites.sheafification
import category_theory.sites.whiskering
/-!
In this file, we prove that the plus functor is compatible with functors which
preserve the correct limits and colimits.
See `category_theory/sites/compatible_sheafification` for the compatibility
of sheafification, which follows easily from the content in this file.
-/
namespace category_theory.grothendieck_topology
open category_theory
open category_theory.limits
open opposite
universes w₁ w₂ v u
variables {C : Type u} [category.{v} C] (J : grothendieck_topology C)
variables {D : Type w₁} [category.{max v u} D]
variables {E : Type w₂} [category.{max v u} E]
variables (F : D ⥤ E)
noncomputable theory
variables [∀ (α β : Type (max v u)) (fst snd : β → α),
has_limits_of_shape (walking_multicospan fst snd) D]
variables [∀ (α β : Type (max v u)) (fst snd : β → α),
has_limits_of_shape (walking_multicospan fst snd) E]
variables [∀ (X : C) (W : J.cover X) (P : Cᵒᵖ ⥤ D), preserves_limit (W.index P).multicospan F]
variables (P : Cᵒᵖ ⥤ D)
/-- The diagram used to define `P⁺`, composed with `F`, is isomorphic
to the diagram used to define `P ⋙ F`. -/
def diagram_comp_iso (X : C) : J.diagram P X ⋙ F ≅ J.diagram (P ⋙ F) X :=
nat_iso.of_components
(λ W, begin
refine _ ≪≫ has_limit.iso_of_nat_iso (W.unop.multicospan_comp _ _).symm,
refine (is_limit_of_preserves F (limit.is_limit _)).cone_point_unique_up_to_iso
(limit.is_limit _)
end) begin
intros A B f,
ext,
dsimp,
simp only [functor.map_cone_π_app, multiequalizer.multifork_π_app_left,
iso.symm_hom, multiequalizer.lift_ι, eq_to_hom_refl, category.comp_id,
limit.cone_point_unique_up_to_iso_hom_comp,
grothendieck_topology.cover.multicospan_comp_hom_inv_left,
has_limit.iso_of_nat_iso_hom_π, category.assoc],
simp only [← F.map_comp, multiequalizer.lift_ι],
end
@[simp, reassoc]
lemma diagram_comp_iso_hom_ι (X : C) (W : (J.cover X)ᵒᵖ) (i : W.unop.arrow):
(J.diagram_comp_iso F P X).hom.app W ≫ multiequalizer.ι _ i =
F.map (multiequalizer.ι _ _) :=
begin
delta diagram_comp_iso,
dsimp,
simp,
end
variables [∀ (X : C), has_colimits_of_shape (J.cover X)ᵒᵖ D]
variables [∀ (X : C), has_colimits_of_shape (J.cover X)ᵒᵖ E]
variables [∀ (X : C), preserves_colimits_of_shape (J.cover X)ᵒᵖ F]
/-- The isomorphism between `P⁺ ⋙ F` and `(P ⋙ F)⁺`. -/
def plus_comp_iso : J.plus_obj P ⋙ F ≅ J.plus_obj (P ⋙ F) :=
nat_iso.of_components
(λ X, begin
refine _ ≪≫ has_colimit.iso_of_nat_iso (J.diagram_comp_iso F P X.unop),
refine (is_colimit_of_preserves F (colimit.is_colimit
(J.diagram P (unop X)))).cocone_point_unique_up_to_iso (colimit.is_colimit _)
end) begin
intros X Y f,
apply (is_colimit_of_preserves F (colimit.is_colimit (J.diagram P X.unop))).hom_ext,
intros W,
dsimp [plus_obj, plus_map],
simp only [functor.map_comp, category.assoc],
slice_rhs 1 2
{ erw (is_colimit_of_preserves F (colimit.is_colimit (J.diagram P X.unop))).fac },
slice_lhs 1 3
{ simp only [← F.map_comp],
dsimp [colim_map, is_colimit.map, colimit.pre],
simp only [colimit.ι_desc_assoc, colimit.ι_desc],
dsimp [cocones.precompose],
rw [category.assoc, colimit.ι_desc],
dsimp [cocone.whisker],
rw F.map_comp },
simp only [category.assoc],
slice_lhs 2 3
{ erw (is_colimit_of_preserves F (colimit.is_colimit (J.diagram P Y.unop))).fac },
dsimp,
simp only [has_colimit.iso_of_nat_iso_ι_hom_assoc,
grothendieck_topology.diagram_pullback_app, colimit.ι_pre,
has_colimit.iso_of_nat_iso_ι_hom, ι_colim_map_assoc],
simp only [← category.assoc],
congr' 1,
ext,
dsimp,
simp only [category.assoc],
erw [multiequalizer.lift_ι, diagram_comp_iso_hom_ι, diagram_comp_iso_hom_ι,
← F.map_comp, multiequalizer.lift_ι],
end
@[simp, reassoc]
lemma ι_plus_comp_iso_hom (X) (W) : F.map (colimit.ι _ W) ≫ (J.plus_comp_iso F P).hom.app X =
(J.diagram_comp_iso F P X.unop).hom.app W ≫ colimit.ι _ W :=
begin
delta diagram_comp_iso plus_comp_iso,
dsimp [is_colimit.cocone_point_unique_up_to_iso],
simp only [← category.assoc],
erw (is_colimit_of_preserves F (colimit.is_colimit (J.diagram P (unop X)))).fac,
dsimp,
simp,
end
@[simp, reassoc]
lemma plus_comp_iso_whisker_left {F G : D ⥤ E} (η : F ⟶ G) (P : Cᵒᵖ ⥤ D)
[∀ (X : C), preserves_colimits_of_shape (J.cover X)ᵒᵖ F]
[∀ (X : C) (W : J.cover X) (P : Cᵒᵖ ⥤ D), preserves_limit (W.index P).multicospan F]
[∀ (X : C), preserves_colimits_of_shape (J.cover X)ᵒᵖ G]
[∀ (X : C) (W : J.cover X) (P : Cᵒᵖ ⥤ D), preserves_limit (W.index P).multicospan G] :
whisker_left _ η ≫ (J.plus_comp_iso G P).hom =
(J.plus_comp_iso F P).hom ≫ J.plus_map (whisker_left _ η) :=
begin
ext X,
apply (is_colimit_of_preserves F (colimit.is_colimit (J.diagram P X.unop))).hom_ext,
intros W,
dsimp [plus_obj, plus_map],
simp only [ι_plus_comp_iso_hom, ι_colim_map, whisker_left_app, ι_plus_comp_iso_hom_assoc,
nat_trans.naturality_assoc, grothendieck_topology.diagram_nat_trans_app],
simp only [← category.assoc],
congr' 1,
ext,
dsimp,
simpa,
end
/-- The isomorphism between `P⁺ ⋙ F` and `(P ⋙ F)⁺`, functorially in `F`. -/
@[simps hom_app inv_app]
def plus_functor_whisker_left_iso (P : Cᵒᵖ ⥤ D)
[∀ (F : D ⥤ E) (X : C), preserves_colimits_of_shape (J.cover X)ᵒᵖ F]
[∀ (F : D ⥤ E) (X : C) (W : J.cover X) (P : Cᵒᵖ ⥤ D),
preserves_limit (W.index P).multicospan F] :
(whiskering_left _ _ E).obj (J.plus_obj P) ≅
(whiskering_left _ _ _).obj P ⋙ J.plus_functor E :=
nat_iso.of_components
(λ X, plus_comp_iso _ _ _) $ λ F G η, plus_comp_iso_whisker_left _ _ _
@[simp, reassoc]
lemma plus_comp_iso_whisker_right {P Q : Cᵒᵖ ⥤ D} (η : P ⟶ Q) :
whisker_right (J.plus_map η) F ≫ (J.plus_comp_iso F Q).hom =
(J.plus_comp_iso F P).hom ≫ J.plus_map (whisker_right η F) :=
begin
ext X,
apply (is_colimit_of_preserves F (colimit.is_colimit (J.diagram P X.unop))).hom_ext,
intros W,
dsimp [plus_obj, plus_map],
simp only [ι_colim_map, whisker_right_app, ι_plus_comp_iso_hom_assoc,
grothendieck_topology.diagram_nat_trans_app],
simp only [← category.assoc, ← F.map_comp],
dsimp [colim_map, is_colimit.map],
simp only [colimit.ι_desc],
dsimp [cocones.precompose],
simp only [functor.map_comp, category.assoc, ι_plus_comp_iso_hom],
simp only [← category.assoc],
congr' 1,
ext,
dsimp,
simp only [diagram_comp_iso_hom_ι_assoc, multiequalizer.lift_ι,
diagram_comp_iso_hom_ι, category.assoc],
simp only [← F.map_comp, multiequalizer.lift_ι],
end
/-- The isomorphism between `P⁺ ⋙ F` and `(P ⋙ F)⁺`, functorially in `P`. -/
@[simps hom_app inv_app]
def plus_functor_whisker_right_iso : J.plus_functor D ⋙ (whiskering_right _ _ _).obj F ≅
(whiskering_right _ _ _).obj F ⋙ J.plus_functor E :=
nat_iso.of_components (λ P, J.plus_comp_iso _ _) $ λ P Q η, plus_comp_iso_whisker_right _ _ _
@[simp, reassoc]
lemma whisker_right_to_plus_comp_plus_comp_iso_hom :
whisker_right (J.to_plus _) _ ≫ (J.plus_comp_iso F P).hom = J.to_plus _ :=
begin
ext,
dsimp [to_plus],
simp only [ι_plus_comp_iso_hom, functor.map_comp, category.assoc],
simp only [← category.assoc],
congr' 1,
ext,
delta cover.to_multiequalizer,
simp only [diagram_comp_iso_hom_ι, category.assoc, ← F.map_comp],
erw [multiequalizer.lift_ι, multiequalizer.lift_ι],
refl,
end
@[simp]
lemma to_plus_comp_plus_comp_iso_inv : J.to_plus _ ≫ (J.plus_comp_iso F P).inv =
whisker_right (J.to_plus _) _ :=
by simp [iso.comp_inv_eq]
lemma plus_comp_iso_inv_eq_plus_lift (hP : presheaf.is_sheaf J ((J.plus_obj P) ⋙ F)) :
(J.plus_comp_iso F P).inv = J.plus_lift (whisker_right (J.to_plus _) _) hP :=
by { apply J.plus_lift_unique, simp [iso.comp_inv_eq] }
end category_theory.grothendieck_topology
|
function CreateAffinePanel(handles)
%Affine Panel
handles.affinePanel = uipanel('Parent',handles.toolPanel,'Units','normalize','FontSize',10,'Title','Registration Parameters',...
'Tag','affinePanel','Clipping','off',...
'Position',[0 0 1 1],...
'Visible','on');
dy = 160;
h312 = uicontrol(...
'Parent',handles.affinePanel,...
'Units','pixel',...
'FontSize',10,...
'HorizontalAlignment','right',...
'Position',[9 124+dy 117 19],...
'String','Translation Scale:',...
'Style','text',...
'Tag','text62');
handles.affine_tscale = uicontrol(...
'Parent',handles.affinePanel,...
'Units','pixel',...
'FontSize',10,...
'HorizontalAlignment','left',...
'Position',[129 122+dy 78 23],...
'String','0.001',...
'Style','edit',...
'Tag','affine_tscale');
handles.affine_minstep = uicontrol(...
'Parent',handles.affinePanel,...
'Units','pixel',...
'FontSize',10,...
'HorizontalAlignment','left',...
'Position',[129 86+dy 78 24],...
'String','0.01',...
'Style','edit',...
'Tag','affine_minstep');
handles.affine_maxstep = uicontrol(...
'Parent',handles.affinePanel,...
'Units','pixel',...
'FontSize',10,...
'HorizontalAlignment','left',...
'Position',[129 52+dy 78 23],...
'String','1.0',...
'Style','edit',...
'Tag','affine_maxstep');
h316 = uicontrol(...
'Parent',handles.affinePanel,...
'Units','pixel',...
'FontSize',10,...
'HorizontalAlignment','right',...
'Position',[8 92+dy 118 17],...
'String','Minimum Step:',...
'Style','text',...
'Tag','text65');
h317 = uicontrol(...
'Parent',handles.affinePanel,...
'Units','pixel',...
'FontSize',10,...
'HorizontalAlignment','right',...
'Position',[8 58+dy 118 17],...
'String','Maximum Step:',...
'Style','text',...
'Tag','text66');
h318 = uicontrol(...
'Parent',handles.affinePanel,...
'Units','pixel',...
'FontSize',10,...
'HorizontalAlignment','right',...
'Position',[8 24+dy 118 17],...
'String','Iteration Number :',...
'Style','text',...
'Tag','text67');
handles.affine_iternum = uicontrol(...
'Parent',handles.affinePanel,...
'Units','pixel',...
'FontSize',10,...
'HorizontalAlignment','left',...
'Position',[129 19+dy 78 23],...
'String','200',...
'Style','edit',...
'Tag','affine_iternum');
guidata(handles.mainframe, handles);
end
|
-- ----------------------------------
-- Demostrar que
-- (s ∩ t) ∪ (s ∩ u) ⊆ s ∩ (t ∪ u)
-- ----------------------------------
import data.set.basic
open set
variable {α : Type}
variables s t u : set α
example : (s ∩ t) ∪ (s ∩ u) ⊆ s ∩ (t ∪ u):=
sorry
|
(* Title: HOL/Auth/n_g2kAbsAfter_lemma_inv__70_on_rules.thy
Author: Yongjian Li and Kaiqiang Duan, State Key Lab of Computer Science, Institute of Software, Chinese Academy of Sciences
Copyright 2016 State Key Lab of Computer Science, Institute of Software, Chinese Academy of Sciences
*)
header{*The n_g2kAbsAfter Protocol Case Study*}
theory n_g2kAbsAfter_lemma_inv__70_on_rules imports n_g2kAbsAfter_lemma_on_inv__70
begin
section{*All lemmas on causal relation between inv__70*}
lemma lemma_inv__70_on_rules:
assumes b1: "r \<in> rules N" and b2: "(f=inv__70 )"
shows "invHoldForRule s f r (invariants N)"
proof -
have c1: "(\<exists> d. d\<le>N\<and>r=n_n_Store_i1 d)\<or>
(\<exists> d. d\<le>N\<and>r=n_n_AStore_i1 d)\<or>
(r=n_n_SendReqS_j1 )\<or>
(r=n_n_SendReqEI_i1 )\<or>
(r=n_n_SendReqES_i1 )\<or>
(r=n_n_RecvReq_i1 )\<or>
(r=n_n_SendInvE_i1 )\<or>
(r=n_n_SendInvS_i1 )\<or>
(r=n_n_SendInvAck_i1 )\<or>
(r=n_n_RecvInvAck_i1 )\<or>
(r=n_n_SendGntS_i1 )\<or>
(r=n_n_SendGntE_i1 )\<or>
(r=n_n_RecvGntS_i1 )\<or>
(r=n_n_RecvGntE_i1 )\<or>
(r=n_n_ASendReqIS_j1 )\<or>
(r=n_n_ASendReqSE_j1 )\<or>
(r=n_n_ASendReqEI_i1 )\<or>
(r=n_n_ASendReqES_i1 )\<or>
(r=n_n_SendReqEE_i1 )\<or>
(r=n_n_ARecvReq_i1 )\<or>
(r=n_n_ASendInvE_i1 )\<or>
(r=n_n_ASendInvS_i1 )\<or>
(r=n_n_ASendInvAck_i1 )\<or>
(r=n_n_ARecvInvAck_i1 )\<or>
(r=n_n_ASendGntS_i1 )\<or>
(r=n_n_ASendGntE_i1 )\<or>
(r=n_n_ARecvGntS_i1 )\<or>
(r=n_n_ARecvGntE_i1 )"
apply (cut_tac b1, auto) done
moreover {
assume d1: "(\<exists> d. d\<le>N\<and>r=n_n_Store_i1 d)"
have "invHoldForRule s f r (invariants N)"
apply (cut_tac b2 d1, metis n_n_Store_i1Vsinv__70) done
}
moreover {
assume d1: "(\<exists> d. d\<le>N\<and>r=n_n_AStore_i1 d)"
have "invHoldForRule s f r (invariants N)"
apply (cut_tac b2 d1, metis n_n_AStore_i1Vsinv__70) done
}
moreover {
assume d1: "(r=n_n_SendReqS_j1 )"
have "invHoldForRule s f r (invariants N)"
apply (cut_tac b2 d1, metis n_n_SendReqS_j1Vsinv__70) done
}
moreover {
assume d1: "(r=n_n_SendReqEI_i1 )"
have "invHoldForRule s f r (invariants N)"
apply (cut_tac b2 d1, metis n_n_SendReqEI_i1Vsinv__70) done
}
moreover {
assume d1: "(r=n_n_SendReqES_i1 )"
have "invHoldForRule s f r (invariants N)"
apply (cut_tac b2 d1, metis n_n_SendReqES_i1Vsinv__70) done
}
moreover {
assume d1: "(r=n_n_RecvReq_i1 )"
have "invHoldForRule s f r (invariants N)"
apply (cut_tac b2 d1, metis n_n_RecvReq_i1Vsinv__70) done
}
moreover {
assume d1: "(r=n_n_SendInvE_i1 )"
have "invHoldForRule s f r (invariants N)"
apply (cut_tac b2 d1, metis n_n_SendInvE_i1Vsinv__70) done
}
moreover {
assume d1: "(r=n_n_SendInvS_i1 )"
have "invHoldForRule s f r (invariants N)"
apply (cut_tac b2 d1, metis n_n_SendInvS_i1Vsinv__70) done
}
moreover {
assume d1: "(r=n_n_SendInvAck_i1 )"
have "invHoldForRule s f r (invariants N)"
apply (cut_tac b2 d1, metis n_n_SendInvAck_i1Vsinv__70) done
}
moreover {
assume d1: "(r=n_n_RecvInvAck_i1 )"
have "invHoldForRule s f r (invariants N)"
apply (cut_tac b2 d1, metis n_n_RecvInvAck_i1Vsinv__70) done
}
moreover {
assume d1: "(r=n_n_SendGntS_i1 )"
have "invHoldForRule s f r (invariants N)"
apply (cut_tac b2 d1, metis n_n_SendGntS_i1Vsinv__70) done
}
moreover {
assume d1: "(r=n_n_SendGntE_i1 )"
have "invHoldForRule s f r (invariants N)"
apply (cut_tac b2 d1, metis n_n_SendGntE_i1Vsinv__70) done
}
moreover {
assume d1: "(r=n_n_RecvGntS_i1 )"
have "invHoldForRule s f r (invariants N)"
apply (cut_tac b2 d1, metis n_n_RecvGntS_i1Vsinv__70) done
}
moreover {
assume d1: "(r=n_n_RecvGntE_i1 )"
have "invHoldForRule s f r (invariants N)"
apply (cut_tac b2 d1, metis n_n_RecvGntE_i1Vsinv__70) done
}
moreover {
assume d1: "(r=n_n_ASendReqIS_j1 )"
have "invHoldForRule s f r (invariants N)"
apply (cut_tac b2 d1, metis n_n_ASendReqIS_j1Vsinv__70) done
}
moreover {
assume d1: "(r=n_n_ASendReqSE_j1 )"
have "invHoldForRule s f r (invariants N)"
apply (cut_tac b2 d1, metis n_n_ASendReqSE_j1Vsinv__70) done
}
moreover {
assume d1: "(r=n_n_ASendReqEI_i1 )"
have "invHoldForRule s f r (invariants N)"
apply (cut_tac b2 d1, metis n_n_ASendReqEI_i1Vsinv__70) done
}
moreover {
assume d1: "(r=n_n_ASendReqES_i1 )"
have "invHoldForRule s f r (invariants N)"
apply (cut_tac b2 d1, metis n_n_ASendReqES_i1Vsinv__70) done
}
moreover {
assume d1: "(r=n_n_SendReqEE_i1 )"
have "invHoldForRule s f r (invariants N)"
apply (cut_tac b2 d1, metis n_n_SendReqEE_i1Vsinv__70) done
}
moreover {
assume d1: "(r=n_n_ARecvReq_i1 )"
have "invHoldForRule s f r (invariants N)"
apply (cut_tac b2 d1, metis n_n_ARecvReq_i1Vsinv__70) done
}
moreover {
assume d1: "(r=n_n_ASendInvE_i1 )"
have "invHoldForRule s f r (invariants N)"
apply (cut_tac b2 d1, metis n_n_ASendInvE_i1Vsinv__70) done
}
moreover {
assume d1: "(r=n_n_ASendInvS_i1 )"
have "invHoldForRule s f r (invariants N)"
apply (cut_tac b2 d1, metis n_n_ASendInvS_i1Vsinv__70) done
}
moreover {
assume d1: "(r=n_n_ASendInvAck_i1 )"
have "invHoldForRule s f r (invariants N)"
apply (cut_tac b2 d1, metis n_n_ASendInvAck_i1Vsinv__70) done
}
moreover {
assume d1: "(r=n_n_ARecvInvAck_i1 )"
have "invHoldForRule s f r (invariants N)"
apply (cut_tac b2 d1, metis n_n_ARecvInvAck_i1Vsinv__70) done
}
moreover {
assume d1: "(r=n_n_ASendGntS_i1 )"
have "invHoldForRule s f r (invariants N)"
apply (cut_tac b2 d1, metis n_n_ASendGntS_i1Vsinv__70) done
}
moreover {
assume d1: "(r=n_n_ASendGntE_i1 )"
have "invHoldForRule s f r (invariants N)"
apply (cut_tac b2 d1, metis n_n_ASendGntE_i1Vsinv__70) done
}
moreover {
assume d1: "(r=n_n_ARecvGntS_i1 )"
have "invHoldForRule s f r (invariants N)"
apply (cut_tac b2 d1, metis n_n_ARecvGntS_i1Vsinv__70) done
}
moreover {
assume d1: "(r=n_n_ARecvGntE_i1 )"
have "invHoldForRule s f r (invariants N)"
apply (cut_tac b2 d1, metis n_n_ARecvGntE_i1Vsinv__70) done
}
ultimately show "invHoldForRule s f r (invariants N)"
by satx
qed
end
|
If $s$ is a component of $u$ and $s \subseteq t \subseteq u$, then $s$ is a component of $t$.
|
function [A, C, Z] = ldsPca(X, k, m)
% Subspace method for learning linear dynamic system.
% Input:
% X: d x n data matrix
% k: dimension of hidden variable
% m: stacking order for the Hankel matrix
% Output:
% A: k x k transition matrix
% C: k x d emission matrix
% Z: k x n latent variable
% Y: d x n reconstructed data
% reference: Bayesian Reasoning and Machine Learning (BRML) chapter 24.5.3 p.507
% Written by Mo Chen ([email protected]).
[d,n] = size(X);
H = reshape(X(:,hankel(1:m,m:n)),d*m,[]);
[U,S,V] = svd(H,'econ');
C = U(1:d,1:k);
Z = S(1:k,1:k)*V(:,1:k)';
A = Z(:,2:end)/Z(:,1:end-1); % estimated transition
% Y = C*Z; % reconstructions
|
% Defines a parameter
%
% Author: Jonathan Karr
% Affilitation: Covert Lab, Department of Bioengineering, Stanford University
% Last updated: 11/17/2009
classdef Parameter < edu.stanford.covert.cell.kb.KnowledgeBaseObject
properties
process = edu.stanford.covert.cell.kb.Process.empty(0, 0);
state = edu.stanford.covert.cell.kb.State.empty(0, 0);
reactions = edu.stanford.covert.cell.kb.Reaction.empty(0, 0);
proteinMonomers = edu.stanford.covert.cell.kb.ProteinMonomer.empty(0, 0);
proteinComplexs = edu.stanford.covert.cell.kb.ProteinComplex.empty(0, 0);
end
properties %(SetAccess = protected)
index
defaultValue
units
experimentallyConstrained
end
methods
function this = Parameter(knowledgeBase, wid, wholeCellModelID, name, ...
index, defaultValue, units, experimentallyConstrained, ...
comments, crossReferences)
if nargin == 0; return; end;
this = edu.stanford.covert.cell.kb.Parameter.empty(size(wid, 1), 0);
this(size(wid, 1), 1) = edu.stanford.covert.cell.kb.Parameter;
for i = 1:size(wid, 1)
this(i, 1).idx = i;
this(i, 1).knowledgeBase = knowledgeBase;
this(i, 1).wid = wid(i);
this(i, 1).wholeCellModelID = wholeCellModelID{i};
this(i, 1).name = name{i};
this(i, 1).index = index{i};
this(i, 1).defaultValue = this.parseDefaultValue(defaultValue{i});
this(i, 1).units = units{i};
this(i, 1).experimentallyConstrained = experimentallyConstrained(i);
if exist('comments','var') && ~isempty(comments); this(i,1).comments = comments{i}; end;
if exist('crossReferences', 'var')
if size(crossReferences, 1) > 1
this(i, 1).crossReferences = crossReferences(i);
else
this(i, 1).crossReferences = struct;
fields = fieldnames(crossReferences);
for j = 1:size(fields,1)
values = crossReferences.(fields{j});
this(i, 1).crossReferences.(fields{j}) = values(i);
end
end
end
end
end
function serializeLinks(this)
for i = 1:numel(this)
this(i).process = this.serializeLinksHelper(this(i).process);
this(i).state = this.serializeLinksHelper(this(i).state);
this(i).reactions = this.serializeLinksHelper(this(i).reactions);
this(i).proteinMonomers = this.serializeLinksHelper(this(i).proteinMonomers);
this(i).proteinComplexs = this.serializeLinksHelper(this(i).proteinComplexs);
[email protected](this(i))
end
end
function deserializeLinks(this, kb)
for i = 1:numel(this)
this(i).process = this.deserializeLinksHelper(this(i).process, kb.processes);
this(i).state = this.deserializeLinksHelper(this(i).state, kb.states);
this(i).reactions = this.deserializeLinksHelper(this(i).reactions, kb.reactions);
this(i).proteinMonomers = this.deserializeLinksHelper(this(i).proteinMonomers, kb.proteinMonomers);
this(i).proteinComplexs = this.deserializeLinksHelper(this(i).proteinComplexs, kb.proteinComplexs);
[email protected](this(i), kb);
end
end
function deleteLinks(this)
for i = 1:numel(this)
this(i).process = [];
this(i).state = [];
this(i).reactions = [];
this(i).proteinMonomers = [];
this(i).proteinComplexs = [];
[email protected](this(i))
end
end
end
methods (Static = true)
function parsedValue = parseDefaultValue(rawValue)
if isempty(rawValue)
parsedValue = {};
return;
end
if iscell(rawValue) && numel(rawValue) == 1
rawValue = rawValue{1};
end
rawValue = strsplit(';', rawValue)';
parsedValue = zeros(size(rawValue));
for i = 1:length(rawValue)
parsedValue(i) = str2double(rawValue{i});
end
if any(isnan(parsedValue))
parsedValue = rawValue;
end
end
end
end
|
cat(sapply(readLines(tail(commandArgs(), n=1)), function(s) {
t <- strsplit(s, " | ", fixed=TRUE)[[1]]
r <- 0
for (i in 1:nchar(t[1])) {
if (substr(t[1], i, i) != substr(t[2], i, i)) {
r <- r + 1
}
}
if (r == 0) {
"Done"
} else if (r <= 2) {
"Low"
} else if (r <= 4) {
"Medium"
} else if (r <= 6) {
"High"
} else {
"Critical"
}
}), sep="\n")
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.