Datasets:
AI4M
/

text
stringlengths
0
3.34M
(* (c) Copyright 2006-2016 Microsoft Corporation and Inria. *) (* Distributed under the terms of CeCILL-B. *) Require Import mathcomp.ssreflect.ssreflect. From mathcomp Require Import ssrbool ssrfun eqtype ssrnat seq path div choice fintype. From mathcomp Require Import tuple finfun bigop order prime ssralg poly finset center. From mathcomp Require Import fingroup morphism perm automorphism quotient action finalg zmodp. From mathcomp Require Import gfunctor gproduct cyclic commutator gseries nilpotent pgroup. From mathcomp Require Import sylow hall abelian maximal frobenius. From mathcomp Require Import matrix mxalgebra mxrepresentation mxabelem vector. From odd_order Require Import BGsection1 BGsection3 BGsection7 BGsection15 BGsection16. From mathcomp Require Import ssrnum algC classfun character integral_char inertia vcharacter. From odd_order Require Import PFsection1 PFsection2 PFsection3 PFsection4. From odd_order Require Import PFsection5 PFsection6 PFsection7 PFsection8 PFsection9. (******************************************************************************) (* This file covers Peterfalvi, Section 10: Maximal subgroups of Types III, *) (* IV and V. For defW : W1 \x W2 = W and MtypeP : of_typeP M U defW, and *) (* setting ptiW := FT_primeTI_hyp MtypeP, mu2_ i j := primeTIirr ptiW i j and *) (* delta_ j := primeTIsign j, we define here, for M of type III-V: *) (* FTtype345_TIirr_degree MtypeP == the common degree of the components of *) (* (locally) d the images of characters of irr W that don't have *) (* W2 in their kernel by the cyclicTI isometry to M. *) (* Thus mu2_ i j 1%g = d%:R for all j != 0. *) (* FTtype345_TIsign MtypeP == the common sign of the images of characters *) (* (locally) delta of characters of irr W that don't have W2 in *) (* their kernel by the cyclicTI isometry to M. *) (* Thus delta_ j = delta for all j != 0. *) (* FTtype345_ratio MtypeP == the ratio (d - delta) / #|W1|. Even though it *) (* (locally) n is always a positive integer we take n : algC. *) (* FTtype345_bridge MtypeP s i j == a virtual character that can be used to *) (* (locally) alpha_ i j bridge coherence between the mu2_ i j and other *) (* irreducibles of M; here s should be the index of *) (* an irreducible character of M induced from M^(1). *) (* := mu2_ i j - delta *: mu2_ i 0 -n *: 'chi_s. *) (******************************************************************************) Set Implicit Arguments. Unset Strict Implicit. Unset Printing Implicit Defensive. Import GroupScope Order.TTheory GRing.Theory Num.Theory. Section Ten. Variable gT : minSimpleOddGroupType. Local Notation G := (TheMinSimpleOddGroup gT). Implicit Types (p q : nat) (x y z : gT). Implicit Types H K L N P Q R S T U W : {group gT}. Local Notation "#1" := (inord 1) (at level 0). Section OneMaximal. (* These assumptions correspond to Peterfalvi, Hypothesis (10.1). *) (* We also declare the group U_M, even though it is not used in this section, *) (* because it is a parameter to the theorems and definitions of PFsection8 *) (* and PFsection9. *) Variables M U_M W W1 W2 : {group gT}. Hypotheses (maxM : M \in 'M) (defW : W1 \x W2 = W). Hypotheses (MtypeP : of_typeP M U_M defW) (notMtype2: FTtype M != 2). Local Notation "` 'M'" := (gval M) (at level 0, only parsing) : group_scope. Local Notation "` 'W1'" := (gval W1) (at level 0, only parsing) : group_scope. Local Notation "` 'W2'" := (gval W2) (at level 0, only parsing) : group_scope. Local Notation "` 'W'" := (gval W) (at level 0, only parsing) : group_scope. Local Notation V := (cyclicTIset defW). Local Notation M' := M^`(1)%G. Local Notation "` 'M''" := `M^`(1) (at level 0) : group_scope. Local Notation M'' := M^`(2)%G. Local Notation "` 'M'''" := `M^`(2) (at level 0) : group_scope. Let defM : M' ><| W1 = M. Proof. by have [[]] := MtypeP. Qed. Let nsM''M' : M'' <| M'. Proof. exact: (der_normal 1 M'). Qed. Let nsM'M : M' <| M. Proof. exact: (der_normal 1 M). Qed. Let sM'M : M' \subset M. Proof. exact: der_sub. Qed. Let nsM''M : M'' <| M. Proof. exact: der_normal 2 M. Qed. Let notMtype1 : FTtype M != 1%N. Proof. exact: FTtypeP_neq1 MtypeP. Qed. Let typeMgt2 : FTtype M > 2. Proof. by move: (FTtype M) (FTtype_range M) notMtype1 notMtype2=> [|[|[]]]. Qed. Let defA1 : 'A1(M) = M'^#. Proof. by rewrite /= -FTcore_eq_der1. Qed. Let defA : 'A(M) = M'^#. Proof. by rewrite FTsupp_eq1 ?defA1. Qed. Let defA0 : 'A0(M) = M'^# :|: class_support V M. Proof. by rewrite -defA (FTtypeP_supp0_def _ MtypeP). Qed. Let defMs : M`_\s :=: M'. Proof. exact: FTcore_type_gt2. Qed. Let pddM := FT_prDade_hyp maxM MtypeP. Let ptiWM : primeTI_hypothesis M M' defW := FT_primeTI_hyp MtypeP. Let ctiWG : cyclicTI_hypothesis G defW := pddM. Let ctiWM : cyclicTI_hypothesis M defW := prime_cycTIhyp ptiWM. Let ntW1 : W1 :!=: 1. Proof. by have [[]] := MtypeP. Qed. Let ntW2 : W2 :!=: 1. Proof. by have [_ _ _ []] := MtypeP. Qed. Let cycW1 : cyclic W1. Proof. by have [[]] := MtypeP. Qed. Let cycW2 : cyclic W2. Proof. by have [_ _ _ []] := MtypeP. Qed. Let w1 := #|W1|. Let w2 := #|W2|. Let nirrW1 : #|Iirr W1| = w1. Proof. by rewrite card_Iirr_cyclic. Qed. Let nirrW2 : #|Iirr W2| = w2. Proof. by rewrite card_Iirr_cyclic. Qed. Let NirrW1 : Nirr W1 = w1. Proof. by rewrite -nirrW1 card_ord. Qed. Let NirrW2 : Nirr W2 = w2. Proof. by rewrite -nirrW2 card_ord. Qed. Let w1gt2 : w1 > 2. Proof. by rewrite odd_gt2 ?mFT_odd ?cardG_gt1. Qed. Let w2gt2 : w2 > 2. Proof. by rewrite odd_gt2 ?mFT_odd ?cardG_gt1. Qed. Let coM'w1 : coprime #|M'| w1. Proof. by rewrite (coprime_sdprod_Hall_r defM); have [[]] := MtypeP. Qed. (* This is used both in (10.2) and (10.8). *) Let frobMbar : [Frobenius M / M'' = (M' / M'') ><| (W1 / M'')]. Proof. have [[_ hallW1 _ _] _ _ [_ _ _ sW2M'' regM'W1 ] _] := MtypeP. apply: Frobenius_coprime_quotient => //. split=> [|w /regM'W1-> //]; apply: (sol_der1_proper (mmax_sol maxM)) => //. by apply: subG1_contra ntW2; apply: subset_trans sW2M'' (der_sub 1 M'). Qed. Local Open Scope ring_scope. Let sigma := (cyclicTIiso ctiWG). Let w_ i j := (cyclicTIirr defW i j). Local Notation eta_ i j := (sigma (w_ i j)). Local Notation Imu2 := (primeTI_Iirr ptiWM). Let mu2_ i j := primeTIirr ptiWM i j. Let mu_ := primeTIred ptiWM. Local Notation chi_ j := (primeTIres ptiWM j). Local Notation Idelta := (primeTI_Isign ptiWM). Local Notation delta_ j := (primeTIsign ptiWM j). Local Notation tau := (FT_Dade0 maxM). Local Notation "chi ^\tau" := (tau chi). Let calS0 := seqIndD M' M M`_\s 1. Let rmR := FTtypeP_coh_base maxM MtypeP. Let scohS0 : subcoherent calS0 tau rmR. Proof. exact: FTtypeP_subcoherent MtypeP. Qed. Let calS := seqIndD M' M M' 1. Let sSS0 : cfConjC_subset calS calS0. Proof. by apply: seqInd_conjC_subset1; rewrite /= ?defMs. Qed. Let mem_calS s : ('Ind 'chi[M']_s \in calS) = (s != 0). Proof. rewrite mem_seqInd ?normal1 ?FTcore_normal //=. by rewrite !inE sub1G subGcfker andbT. Qed. Let calSmu j : j != 0 -> mu_ j \in calS. Proof. move=> nz_j; rewrite -[mu_ j]cfInd_prTIres mem_calS -irr_eq1. by rewrite -(prTIres0 ptiWM) (inj_eq irr_inj) (inj_eq (prTIres_inj _)). Qed. Let tauM' : {subset 'Z[calS, M'^#] <= 'CF(M, 'A0(M))}. Proof. by rewrite defA0 => phi /zchar_on/(cfun_onS (subsetUl _ _))->. Qed. (* This is Peterfalvi (10.2). *) (* Note that this result is also valid for type II groups. *) Lemma FTtypeP_ref_irr : {zeta | [/\ zeta \in irr M, zeta \in calS & zeta 1%g = w1%:R]}. Proof. have [_ /has_nonprincipal_irr[s nz_s] _ _ _] := Frobenius_context frobMbar. exists ('Ind 'chi_s %% M'')%CF; split. - by rewrite cfMod_irr ?irr_induced_Frobenius_ker ?(FrobeniusWker frobMbar). - by rewrite -cfIndMod ?normal_sub // -mod_IirrE // mem_calS mod_Iirr_eq0. rewrite -cfIndMod ?cfInd1 ?normal_sub // -(index_sdprod defM) cfMod1. by rewrite lin_char1 ?mulr1 //; apply/char_abelianP/sub_der1_abelian. Qed. (* This is Peterfalvi (10.3), first assertion. *) Lemma FTtype345_core_prime : prime w2. Proof. have [S pairMS [xdefW [U StypeP]]] := FTtypeP_pair_witness maxM MtypeP. have [[_ _ maxS] _] := pairMS; rewrite {1}(negPf notMtype2) /= => Stype2 _ _. by have [[]] := compl_of_typeII maxS StypeP Stype2. Qed. Let w2_pr := FTtype345_core_prime. Definition FTtype345_TIirr_degree := truncC (mu2_ 0 #1 1%g). Definition FTtype345_TIsign := delta_ #1. Local Notation d := FTtype345_TIirr_degree. Local Notation delta := FTtype345_TIsign. Definition FTtype345_ratio := (d%:R - delta) / w1%:R. Local Notation n := FTtype345_ratio. (* This is the remainder of Peterfalvi (10.3). *) Lemma FTtype345_constants : [/\ forall i j, j != 0 -> mu2_ i j 1%g = d%:R, forall j, j != 0 -> delta_ j = delta, (d > 1)%N & n \in Cnat]. Proof. have nz_j1 : #1 != 0 :> Iirr W2 by rewrite Iirr1_neq0. have invj j: j != 0 -> mu2_ 0 j 1%g = d%:R /\ delta_ j = delta. move=> nz_j; have [k co_k_j1 Dj] := cfExp_prime_transitive w2_pr nz_j1 nz_j. rewrite -(cforder_dprodr defW) -dprod_IirrEr in co_k_j1. have{co_k_j1} [[u Dj1u] _] := cycTIiso_aut_exists ctiWM co_k_j1. rewrite dprod_IirrEr -rmorphX -Dj /= -!dprod_IirrEr -!/(w_ _ _) in Dj1u. rewrite truncCK ?Cnat_irr1 //. have: delta_ j *: mu2_ 0 j == cfAut u (delta_ #1 *: mu2_ 0 #1). by rewrite -!(cycTIiso_prTIirr pddM) -/ctiWM -Dj1u. rewrite raddfZsign /= -prTIirr_aut eq_scaled_irr signr_eq0 /= /mu2_. by case/andP=> /eqP-> /eqP->; rewrite prTIirr_aut cfunE aut_Cnat ?Cnat_irr1. have d_gt1: (d > 1)%N. rewrite ltn_neqAle andbC -eqC_nat -ltC_nat truncCK ?Cnat_irr1 //. rewrite irr1_gt0 /= eq_sym; apply: contraNneq nz_j1 => mu2_lin. have: mu2_ 0 #1 \is a linear_char by rewrite qualifE irr_char /= mu2_lin. by rewrite lin_irr_der1 => /(prTIirr0P ptiWM)[i /irr_inj/prTIirr_inj[_ ->]]. split=> // [i j /invj[<- _] | _ /invj[//] | ]; first by rewrite prTIirr_1. have: (d%:R == delta %[mod w1])%C by rewrite truncCK ?Cnat_irr1 ?prTIirr1_mod. rewrite /eqCmod unfold_in -/n (negPf (neq0CG W1)) CnatEint => ->. rewrite divr_ge0 ?ler0n // [delta]signrE opprB addrA -natrD subr_ge0 ler1n. by rewrite -(subnKC d_gt1). Qed. Let o_mu2_irr zeta i j : zeta \in calS -> zeta \in irr M -> '[mu2_ i j, zeta] = 0. Proof. case/seqIndP=> s _ -> irr_sM; rewrite -cfdot_Res_l cfRes_prTIirr cfdot_irr. rewrite (negPf (contraNneq _ (prTIred_not_irr ptiWM j))) // => Ds. by rewrite -cfInd_prTIres Ds. Qed. Let ZmuBzeta zeta j : zeta \in calS -> zeta 1%g = w1%:R -> j != 0 -> mu_ j - d%:R *: zeta \in 'Z[calS, M'^#]. Proof. move=> Szeta zeta1w1 nz_j; have [mu1 _ _ _] := FTtype345_constants. rewrite -[d%:R](mulKf (neq0CiG M M')) mulrC -(mu1 0 j nz_j). rewrite -(cfResE _ sM'M) // cfRes_prTIirr -cfInd1 // cfInd_prTIres. by rewrite (seqInd_sub_lin_vchar _ Szeta) ?calSmu // -(index_sdprod defM). Qed. Let mu0Bzeta_on zeta : zeta \in calS -> zeta 1%g = w1%:R -> mu_ 0 - zeta \in 'CF(M, 'A(M)). Proof. move/seqInd_on=> M'zeta zeta1w1; rewrite [mu_ 0]prTIred0 defA cfun_onD1. rewrite !cfunE zeta1w1 cfuniE // group1 mulr1 subrr rpredB ?M'zeta //=. by rewrite rpredZ ?cfuni_on. Qed. (* We need to prove (10.5) - (10.7) for an arbitrary choice of zeta, to allow *) (* part of the proof of (10.5) to be reused in that of (11.8). *) Variable zeta : 'CF(M). Hypotheses (irr_zeta : zeta \in irr M) (Szeta : zeta \in calS). Hypothesis (zeta1w1 : zeta 1%g = w1%:R). Let o_mu2_zeta i j : '[mu2_ i j, zeta] = 0. Proof. exact: o_mu2_irr. Qed. Let o_mu_zeta j : '[mu_ j, zeta] = 0. Proof. by rewrite cfdot_suml big1 // => i _; apply: o_mu2_zeta. Qed. Definition FTtype345_bridge i j := mu2_ i j - delta *: mu2_ i 0 - n *: zeta. Local Notation alpha_ := FTtype345_bridge. (* This is the first part of Peterfalvi (10.5), which does not depend on the *) (* coherence assumption that will ultimately be refuted by (10.8). *) Lemma supp_FTtype345_bridge i j : j != 0 -> alpha_ i j \in 'CF(M, 'A0(M)). Proof. move=> nz_j; have [Dd Ddelta _ _] := FTtype345_constants. have Dmu2 := prTIirr_id pddM. have W1a0 x: x \in W1 -> alpha_ i j x = 0. move=> W1x; rewrite !cfunE; have [-> | ntx] := eqVneq x 1%g. by rewrite Dd // prTIirr0_1 mulr1 zeta1w1 divfK ?neq0CG ?subrr. have notM'x: x \notin M'. apply: contra ntx => M'x; have: x \in M' :&: W1 by apply/setIP. by rewrite coprime_TIg ?inE. have /sdprod_context[_ sW1W _ _ tiW21] := dprodWsdC defW. have abW2: abelian W2 := cyclic_abelian cycW2. have Wx: x \in W :\: W2. rewrite inE (contra _ ntx) ?(subsetP sW1W) // => W2x. by rewrite -in_set1 -set1gE -tiW21 inE W2x. rewrite !Dmu2 {Wx}// Ddelta // prTIsign0 scale1r !dprod_IirrE cfunE. rewrite -!(cfResE _ sW1W) ?cfDprodKl_abelian // subrr. have [s _ ->] := seqIndP Szeta. by rewrite (cfun_on0 (cfInd_normal _ _)) ?mulr0 ?subrr. apply/cfun_onP=> x; rewrite !inE defA notMtype1 /= => /norP[notM'x]. set pi := \pi(M'); have [Mx /= pi_x | /cfun0->//] := boolP (x \in M). have hallM': pi.-Hall(M) M' by rewrite Hall_pi -?(coprime_sdprod_Hall_l defM). have hallW1: pi^'.-Hall(M) W1 by rewrite -(compl_pHall _ hallM') sdprod_compl. have{pi_x} pi'x: pi^'.-elt x. apply: contraR notM'x => not_pi'x; rewrite !inE (mem_normal_Hall hallM') //. rewrite not_pi'x andbT negbK in pi_x. by rewrite (contraNneq _ not_pi'x) // => ->; apply: p_elt1. have [|y My] := Hall_subJ (mmax_sol maxM) hallW1 _ pi'x; rewrite cycle_subG //. by case/imsetP=> z Wz ->; rewrite cfunJ ?W1a0. Qed. Local Notation alpha_on := supp_FTtype345_bridge. Lemma vchar_FTtype345_bridge i j : alpha_ i j \in 'Z[irr M]. Proof. have [_ _ _ Nn] := FTtype345_constants. by rewrite !rpredB ?rpredZsign ?rpredZ_Cnat ?irr_vchar ?mem_zchar. Qed. Local Notation Zalpha := vchar_FTtype345_bridge. Local Hint Resolve Zalpha : core. Lemma vchar_Dade_FTtype345_bridge i j : j != 0 -> (alpha_ i j)^\tau \in 'Z[irr G]. Proof. by move=> nz_j; rewrite Dade_vchar // zchar_split Zalpha alpha_on. Qed. Local Notation Zalpha_tau := vchar_Dade_FTtype345_bridge. (* This covers the last paragraph in the proof of (10.5); it's isolated here *) (* because it is reused in the proof of (10.10) and (11.8). *) Lemma norm_FTtype345_bridge i j : j != 0 -> '[(alpha_ i j)^\tau] = 2%:R + n ^+ 2. Proof. move=> nz_j; rewrite Dade_isometry ?alpha_on // cfnormBd ?cfnormZ; last first. by rewrite cfdotZr cfdotBl cfdotZl !o_mu2_zeta !(mulr0, subr0). have [_ _ _ /Cnat_ge0 n_ge0] := FTtype345_constants. rewrite ger0_norm // cfnormBd ?cfnorm_sign ?cfnorm_irr ?irrWnorm ?mulr1 //. by rewrite cfdotZr (cfdot_prTIirr pddM) (negPf nz_j) andbF ?mulr0. Qed. Local Notation norm_alpha := norm_FTtype345_bridge. Implicit Type tau : {additive 'CF(M) -> 'CF(G)}. (* This exported version is adapted to its use in (11.8). *) Lemma FTtype345_bridge_coherence calS1 tau1 i j X Y : coherent_with calS1 M^# tau tau1 -> (alpha_ i j)^\tau = X + Y -> cfConjC_subset calS1 calS0 -> {subset calS1 <= irr M} -> j != 0 -> Y \in 'Z[map tau1 calS1] -> '[Y, X] = 0 -> '[Y] = n ^+ 2 -> X = delta *: (eta_ i j - eta_ i 0). Proof. move=> cohS1 Dalpha sS10 irrS1 nz_j S1_Y oYX nY_n2. have [[_ Ddelta _ Nn] [[Itau1 Ztau1] _]] := (FTtype345_constants, cohS1). have [|z Zz defY] := zchar_expansion _ S1_Y. rewrite map_inj_in_uniq; first by case: sS10. by apply: sub_in2 (Zisometry_inj Itau1); apply: mem_zchar. have nX_2: '[X] = 2%:R. apply: (addrI '[Y]); rewrite -cfnormDd // addrC -Dalpha norm_alpha //. by rewrite addrC nY_n2. have Z_X: X \in 'Z[irr G]. rewrite -[X](addrK Y) -Dalpha rpredB ?Zalpha_tau // defY big_map big_seq. by apply: rpred_sum => psi S1psi; rewrite rpredZ_Cint // Ztau1 ?mem_zchar. apply: eq_signed_sub_cTIiso => // y Vy; rewrite -[X](addrK Y) -Dalpha -/delta. rewrite !cfunE !cycTIiso_restrict //; set rhs := delta * _. rewrite Dade_id ?defA0 //; last by rewrite setUC inE mem_class_support. have notM'y: y \notin M'. by have:= subsetP (prDade_supp_disjoint pddM) y Vy; rewrite inE. have Wy: y \in W :\: W2 by move: Vy; rewrite !inE => /andP[/norP[_ ->]]. rewrite !cfunE 2?{1}prTIirr_id // prTIsign0 scale1r Ddelta // cfunE -mulrBr. rewrite -/rhs (cfun_on0 (seqInd_on _ Szeta)) // mulr0 subr0. rewrite (ortho_cycTIiso_vanish ctiWG) ?subr0 // -/sigma. apply/orthoPl=> _ /mapP[_ /(cycTIirrP defW)[i1 [j1 ->]] ->]. rewrite defY cfdot_suml big_map big1_seq //= => psi S1psi. by rewrite cfdotZl (coherent_ortho_cycTIiso MtypeP sS10) ?irrS1 ?mulr0. Qed. (* This is a specialization of the above, used in (10.5) and (10.10). *) Let def_tau_alpha calS1 tau1 i j : coherent_with calS1 M^# tau tau1 -> cfConjC_subset calS1 calS0 -> j != 0 -> zeta \in calS1 -> '[(alpha_ i j)^\tau, tau1 zeta] = - n -> (alpha_ i j)^\tau = delta *: (eta_ i j - eta_ i 0) - n *: tau1 zeta. Proof. move=> cohS1 [_ sS10 ccS1] nz_j S1zeta alpha_zeta_n. have [[_ _ _ Nn] [[Itau1 _] _]] := (FTtype345_constants, cohS1). set Y := - (n *: _); apply: canRL (addrK _) _; set X := _ + _. have Dalpha: (alpha_ i j)^\tau = X + Y by rewrite addrK. have nY_n2: '[Y] = n ^+ 2. by rewrite cfnormN cfnormZ norm_Cnat // Itau1 ?mem_zchar // irrWnorm ?mulr1. pose S2 := zeta :: zeta^*%CF; pose S2tau1 := map tau1 S2. have S2_Y: Y \in 'Z[S2tau1] by rewrite rpredN rpredZ_Cnat ?mem_zchar ?mem_head. have sS21: {subset S2 <= calS1} by apply/allP; rewrite /= ccS1 ?S1zeta. have cohS2 : coherent_with S2 M^# tau tau1 := subset_coherent_with sS21 cohS1. have irrS2: {subset S2 <= irr M} by apply/allP; rewrite /= cfAut_irr irr_zeta. rewrite (FTtype345_bridge_coherence cohS2 Dalpha) //; last first. rewrite -[X]opprK cfdotNr opprD cfdotDr nY_n2 cfdotNl cfdotNr opprK cfdotZl. by rewrite cfdotC alpha_zeta_n rmorphN conj_Cnat // mulrN addNr oppr0. split=> [|_ /sS21/sS10//|]; last first. by apply/allP; rewrite /= !inE cfConjCK !eqxx orbT. by rewrite /= inE eq_sym; have [[_ /hasPn-> //]] := scohS0; apply: sS10. Qed. Section NonCoherence. (* We will ultimately contradict these assumptions. *) (* Note that we do not need to export any lemma save the final contradiction. *) Variable tau1 : {additive 'CF(M) -> 'CF(G)}. Hypothesis cohS : coherent_with calS M^# tau tau1. Local Notation "mu ^\tau1" := (tau1 mu%CF) (at level 2, format "mu ^\tau1") : ring_scope. Let Dtau1 : {in 'Z[calS, M'^#], tau1 =1 tau}. Proof. by case: cohS => _; apply: sub_in1; apply: zchar_onS; apply: setSD. Qed. Let o_zeta_s: '[zeta, zeta^*] = 0. Proof. by rewrite (seqInd_conjC_ortho _ _ _ Szeta) ?mFT_odd /= ?defMs. Qed. Import ssrint rat. (* This is the second part of Peterfalvi (10.5). *) Let tau_alpha i j : j != 0 -> (alpha_ i j)^\tau = delta *: (eta_ i j - eta_ i 0) - n *: zeta^\tau1. Proof. move=> nz_j; set al_ij := alpha_ i j; have [[Itau1 Ztau1] _] := cohS. have [mu1 Ddelta d_gt1 Nn] := FTtype345_constants. pose a := '[al_ij^\tau, zeta^\tau1] + n. have al_ij_zeta_s: '[al_ij^\tau, zeta^*^\tau1] = a. apply: canRL (addNKr _) _; rewrite addrC -opprB -cfdotBr -raddfB. have M'dz: zeta - zeta^*%CF \in 'Z[calS, M'^#] by apply: seqInd_sub_aut_zchar. rewrite Dtau1 // Dade_isometry ?alpha_on ?tauM' //. rewrite cfdotBr opprB cfdotBl cfdot_conjCr rmorphB linearZ /=. rewrite -!prTIirr_aut !cfdotBl !cfdotZl !o_mu2_zeta o_zeta_s !mulr0. by rewrite opprB !(subr0, rmorph0) add0r irrWnorm ?mulr1. have Zal_ij: al_ij^\tau \in 'Z[irr G] by apply: Zalpha_tau. have Za: a \in Cint. by rewrite rpredD ?(Cint_Cnat Nn) ?Cint_cfdot_vchar ?Ztau1 ?(mem_zchar Szeta). have{al_ij_zeta_s} ub_da2: (d ^ 2)%:R * a ^+ 2 <= (2%:R + n ^+ 2) * w1%:R. have [k nz_k j'k]: exists2 k, k != 0 & k != j. have:= w2gt2; rewrite -nirrW2 (cardD1 0) (cardD1 j) !inE nz_j !ltnS lt0n. by case/pred0Pn=> k /and3P[]; exists k. have muk_1: mu_ k 1%g = (d * w1)%:R. by rewrite (prTIred_1 pddM) mu1 // mulrC -natrM. rewrite natrX -exprMn; have <-: '[al_ij^\tau, (mu_ k)^\tau1] = d%:R * a. rewrite mulrDr mulr_natl -raddfMn /=; apply: canRL (addNKr _) _. rewrite addrC -cfdotBr -raddfMn -raddfB -scaler_nat. rewrite Dtau1 ?Dade_isometry ?alpha_on ?tauM' ?ZmuBzeta // cfdotBr cfdotZr. rewrite rmorph_nat !cfdotBl !cfdotZl !o_mu2_zeta irrWnorm //. rewrite !(cfdot_prTIirr_red pddM) cfdotC o_mu_zeta conjC0 !mulr0 mulr1. by rewrite 2![_ == k](negPf _) 1?eq_sym // mulr0 -mulrN opprB !subr0 add0r. have ZSmuk: mu_ k \in 'Z[calS] by rewrite mem_zchar ?calSmu. have <-: '[al_ij^\tau] * '[(mu_ k)^\tau1] = (2%:R + n ^+ 2) * w1%:R. by rewrite Itau1 // cfdot_prTIred eqxx mul1n norm_alpha. by rewrite -Cint_normK ?cfCauchySchwarz // Cint_cfdot_vchar // Ztau1. suffices a0 : a = 0. by apply: (def_tau_alpha _ sSS0); rewrite // -sub0r -a0 addrK. apply: contraTeq (d_gt1) => /(sqr_Cint_ge1 Za) a2_ge1. suffices: n == 0. rewrite mulf_eq0 invr_eq0 orbC -implyNb neq0CG /= subr_eq0 => /eqP Dd. by rewrite -ltC_nat -(normr_nat _ d) Dd normr_sign ltxx. suffices: n ^+ 2 < n + 1. have d_dv_M: (d%:R %| #|M|)%C by rewrite -(mu1 0 j) // ?dvd_irr1_cardG. have{d_dv_M} d_odd: odd d by apply: dvdn_odd (mFT_odd M); rewrite -dvdC_nat. have: (2%N %| n * w1%:R)%C. rewrite divfK ?neq0CG // -signrN signrE addrA -(natrD _ d 1). by rewrite rpredB // dvdC_nat dvdn2 ?odd_double // oddD d_odd. rewrite -(truncCK Nn) -mulrSr -natrM -natrX ltC_nat (dvdC_nat 2) pnatr_eq0. rewrite dvdn2 oddM mFT_odd; case: (truncC n) => [|[|n1]] // _ /idPn[]. by rewrite -leqNgt (ltn_exp2l 1). apply: lt_le_trans (_ : n * - delta + 1 <= _); last first. have ->: n + 1 = n * `|- delta| + 1 by rewrite normrN normr_sign mulr1. rewrite ler_add2r ler_wpmul2l ?Cnat_ge0 ?real_ler_norm //. by rewrite rpredN ?rpred_sign. rewrite -(ltr_pmul2r (ltC_nat 0 2)) mulrDl mul1r -[rhs in rhs + _]mulrA. apply: le_lt_trans (_ : n ^+ 2 * (w1%:R - 1) < _). rewrite -(subnKC w1gt2) -(@natrB _ _ 1) // ler_wpmul2l ?leC_nat //. by rewrite Cnat_ge0 ?rpredX. rewrite -(ltr_pmul2l (gt0CG W1)) -/w1 2!mulrBr mulr1 mulrCA -exprMn. rewrite mulrDr ltr_subl_addl addrCA -mulrDr mulrCA mulrA -ltr_subl_addl. rewrite -mulrBr mulNr opprK divfK ?neq0CG // mulr_natr addrA subrK -subr_sqr. rewrite sqrr_sign mulrC [_ + 2%:R]addrC (lt_le_trans _ ub_da2) //. apply: lt_le_trans (ler_wpmul2l (ler0n _ _) a2_ge1). by rewrite mulr1 ltr_subl_addl -mulrS -natrX ltC_nat. Qed. (* This is the first part of Peterfalvi (10.6)(a). *) Let tau1mu j : j != 0 -> (mu_ j)^\tau1 = delta *: \sum_i eta_ i j. Proof. move=> nz_j; have [[[Itau1 _] _] Smu_j] := (cohS, calSmu nz_j). have eta_mu i: '[delta *: (eta_ i j - eta_ i 0), (mu_ j)^\tau1] = 1. have Szeta_s: zeta^*%CF \in calS by rewrite cfAut_seqInd. have o_zeta_s_w k: '[eta_ i k, d%:R *: zeta^*^\tau1] = 0. have o_S_eta_ := coherent_ortho_cycTIiso MtypeP sSS0 cohS. by rewrite cfdotZr cfdotC o_S_eta_ ?conjC0 ?mulr0 // cfAut_irr. pose psi := mu_ j - d%:R *: zeta^*%CF; rewrite (canRL (subrK _) (erefl psi)). rewrite (raddfD tau1) raddfZnat cfdotDr addrC cfdotZl cfdotBl !{}o_zeta_s_w. rewrite subr0 mulr0 add0r -(canLR (subrK _) (tau_alpha i nz_j)). have Zpsi: psi \in 'Z[calS, M'^#]. by rewrite ZmuBzeta // cfunE zeta1w1 rmorph_nat. rewrite cfdotDl cfdotZl Itau1 ?(zcharW Zpsi) ?mem_zchar // -cfdotZl Dtau1 //. rewrite Dade_isometry ?alpha_on ?tauM' {Zpsi}// -cfdotDl cfdotBr cfdotZr. rewrite subrK !cfdotBl !cfdotZl !cfdot_prTIirr_red eq_sym (negPf nz_j). by rewrite !o_mu2_irr ?cfAut_irr // !(mulr0, subr0) eqxx. have [_ Ddel _ _] := FTtype345_constants. have [[d1 k] Dtau1mu] := FTtypeP_coherent_TIred sSS0 cohS irr_zeta Szeta Smu_j. case=> [[Dd1 Dk] | [_ Dk _]]; first by rewrite Dtau1mu Dd1 Dk [_ ^+ _]Ddel. have /esym/eqP/idPn[] := eta_mu 0; rewrite Dtau1mu Dk /= cfdotZl cfdotZr. rewrite cfdot_sumr big1 ?mulr0 ?oner_eq0 // => i _; rewrite -/sigma -/(w_ i _). rewrite cfdotBl !(cfdot_cycTIiso pddM) !(eq_sym 0) conjC_Iirr_eq0 -!irr_eq1. rewrite (eq_sym j) -(inj_eq irr_inj) conjC_IirrE. by rewrite odd_eq_conj_irr1 ?mFT_odd ?subrr. Qed. (* This is the second part of Peterfalvi (10.6)(a). *) Let tau1mu0 : (mu_ 0 - zeta)^\tau = \sum_i eta_ i 0 - zeta^\tau1. Proof. have [j nz_j] := has_nonprincipal_irr ntW2. have sum_al: \sum_i alpha_ i j = mu_ j - d%:R *: zeta - delta *: (mu_ 0 - zeta). rewrite scalerBr opprD addrACA scaler_sumr !sumrB sumr_const; congr (_ + _). by rewrite -opprD -scalerBl nirrW1 -scaler_nat scalerA mulrC divfK ?neq0CG. have ->: mu_ 0 - zeta = delta *: (mu_ j - d%:R *: zeta - \sum_i alpha_ i j). by rewrite sum_al opprD addNKr opprK signrZK. rewrite linearZ linearB; apply: canLR (signrZK _) _; rewrite -/delta /=. rewrite linear_sum -Dtau1 ?ZmuBzeta //= raddfB raddfZnat addrAC scalerBr. rewrite (eq_bigr _ (fun i _ => tau_alpha i nz_j)) sumrB sumr_const nirrW1 opprD. rewrite -scaler_sumr sumrB scalerBr -tau1mu // opprD !opprK -!addrA addNKr. congr (_ + _); rewrite -scaler_nat scalerA mulrC divfK ?neq0CG //. by rewrite addrC -!scaleNr -scalerDl addKr. Qed. (* This is Peterfalvi (10.6)(b). *) Let zeta_tau1_coprime g : g \notin 'A~(M) -> coprime #[g] w1 -> `|zeta^\tau1 g| >= 1. Proof. move=> notAg co_g_w1; have Amu0zeta := mu0Bzeta_on Szeta zeta1w1. have mu0_zeta_g: (mu_ 0 - zeta)^\tau g = 0. have [ | ] := boolP (g \in 'A0~(M)); rewrite -FT_Dade0_supportE; last first. by apply: cfun_on0; apply: Dade_cfunS. case/bigcupP=> x A0x xRg; rewrite (DadeE _ A0x) // (cfun_on0 Amu0zeta) //. apply: contra notAg => Ax; apply/bigcupP; exists x => //. by rewrite -def_FTsignalizer0. have{mu0_zeta_g} zeta_g: zeta^\tau1 g = \sum_i eta_ i 0 g. by apply/esym/eqP; rewrite -subr_eq0 -{2}mu0_zeta_g tau1mu0 !cfunE sum_cfunE. have Zwg i: eta_ i 0 g \in Cint. have Lchi: 'chi_i \is a linear_char by apply: irr_cyclic_lin. rewrite Cint_cycTIiso_coprime // dprod_IirrE irr0 cfDprod_cfun1r. rewrite (coprime_dvdr _ co_g_w1) // dvdn_cforder. rewrite -rmorphX cfDprodl_eq1 -dvdn_cforder; apply/dvdn_cforderP=> x W1x. by rewrite -lin_charX // -expg_mod_order (eqnP (order_dvdG W1x)) lin_char1. have odd_zeta_g: (zeta^\tau1 g == 1 %[mod 2])%C. rewrite zeta_g (bigD1 0) //= [w_ 0 0]cycTIirr00 cycTIiso1 cfun1E inE. pose eW1 := [pred i : Iirr W1 | conjC_Iirr i < i]%N. rewrite (bigID eW1) (reindex_inj (can_inj (@conjC_IirrK _ _))) /=. set s1 := \sum_(i | _) _; set s2 := \sum_(i | _) _; suffices ->: s1 = s2. by rewrite -mulr2n addrC -(mulr_natr _ 2) eqCmod_addl_mul ?rpred_sum. apply/eq_big=> [i | i _]. rewrite (canF_eq (@conjC_IirrK _ _)) conjC_Iirr0 conjC_IirrK -leqNgt. rewrite ltn_neqAle val_eqE -irr_eq1 (eq_sym i) -(inj_eq irr_inj) andbA. by rewrite aut_IirrE odd_eq_conj_irr1 ?mFT_odd ?andbb. rewrite -{1}conjC_Iirr0 [w_ _ _]cycTIirr_aut -cfAut_cycTIiso. by rewrite cfunE conj_Cint ?Zwg. rewrite norm_Cint_ge1 //; first by rewrite zeta_g rpred_sum. apply: contraTneq odd_zeta_g => ->. by rewrite eqCmod_sym /eqCmod subr0 /= (dvdC_nat 2 1). Qed. (* This is Peterfalvi (10.7). *) Let Frob_der1_type2 S : S \in 'M -> FTtype S == 2%N -> [Frobenius S^`(1) with kernel S`_\F]. Proof. move: S => L maxL /eqP Ltype2. have [S pairMS [xdefW [U StypeP]]] := FTtypeP_pair_witness maxM MtypeP. have [[_ _ maxS] _] := pairMS; rewrite {1}(negPf notMtype2) /= => Stype2 _. move/(_ L maxL)/implyP; rewrite Ltype2 /= => /setUP[] /imsetP[x0 _ defL]. by case/eqP/idPn: Ltype2; rewrite defL FTtypeJ. pose H := (S`_\F)%G; pose HU := (S^`(1))%G. suffices{L Ltype2 maxL x0 defL}: [Frobenius HU = H ><| U]. by rewrite defL derJ FcoreJ FrobeniusJker; apply: FrobeniusWker. have sHHU: H \subset HU by have [_ [_ _ _ /sdprodW/mulG_sub[]]] := StypeP. pose calT := seqIndD HU S H 1; pose tauS := FT_Dade0 maxS. have DcalTs: calT = seqIndD HU S S`_\s 1. by congr (seqIndD _ _ _ _); apply: val_inj; rewrite /= FTcore_type2. have notFrobM: ~~ [Frobenius M with kernel M`_\F]. by apply/existsP=> [[U1 /Frobenius_of_typeF/(typePF_exclusion MtypeP)]]. have{notFrobM} notSsupportM: ~~ [exists x, FTsupports M (S :^ x)]. apply: contra notFrobM => /'exists_existsP[x [y /and3P[Ay not_sCyM sCySx]]]. have [_ [_ /(_ y)uMS] /(_ y)] := FTsupport_facts maxM. rewrite inE (subsetP (FTsupp_sub0 _)) //= in uMS *. rewrite -(eq_uniq_mmax (uMS not_sCyM) _ sCySx) ?mmaxJ // FTtypeJ. by case=> // _ _ _ [_ ->]. have{notSsupportM} tiA1M_AS: [disjoint 'A1~(M) & 'A~(S)]. have notMG_S: gval S \notin M :^: G. by apply: contraL Stype2 => /imsetP[x _ ->]; rewrite FTtypeJ. by apply: negbNE; have [_ <- _] := FT_Dade_support_disjoint maxM maxS notMG_S. pose pddS := FT_prDade_hypF maxS StypeP; pose nu := primeTIred pddS. have{tiA1M_AS} oST phi psi: phi \in 'Z[calS, M^#] -> psi \in 'Z[calT, S^#] -> '[phi^\tau, tauS psi] = 0. - rewrite zcharD1_seqInd // -[seqInd _ _]/calS => Sphi. rewrite zcharD1E => /andP[Tpsi psi1_0]. rewrite -FT_Dade1E ?defA1 ?(zchar_on Sphi) //. apply: cfdot_complement (Dade_cfunS _ _) _; rewrite FT_Dade1_supportE setTD. rewrite -[tauS _]FT_DadeE ?(cfun_onS _ (Dade_cfunS _ _)) ?FT_Dade_supportE //. by rewrite -disjoints_subset disjoint_sym. have /subsetD1P[_ /setU1K <-] := FTsupp_sub S; rewrite cfun_onD1 {}psi1_0. rewrite -Tpsi andbC -zchar_split {psi Tpsi}(zchar_trans_on _ Tpsi) //. move=> psi Tpsi; rewrite zchar_split mem_zchar //=. have [s /setDP[_ kerH's] ->] := seqIndP Tpsi. by rewrite inE in kerH's; rewrite (prDade_Ind_irr_on pddS). have notStype5: FTtype S != 5%N by rewrite (eqP Stype2). have [|[_ _ _ _ -> //]] := typeP_reducible_core_cases maxS StypeP notStype5. case=> t []; set lambda := 'chi_t => T0C'lam lam_1 _. have{T0C'lam} Tlam: lambda \in calT. by apply: seqIndS T0C'lam; rewrite Iirr_kerDS ?sub1G. have{lam_1} [r [nz_r Tnu_r nu_r_1]]: exists r, [/\ r != 0, nu r \in calT & nu r 1%g = lambda 1%g]. - have [_] := typeP_reducible_core_Ind maxS StypeP notStype5. set H0 := Ptype_Fcore_kernel _; set nuT := filter _ _; rewrite -/nu. case/hasP=> nu_r nuTr _ /(_ _ nuTr)/imageP[r nz_r Dr] /(_ _ nuTr)[nu_r1 _ _]. have{nuTr} Tnu_r := mem_subseq (filter_subseq _ _) nuTr. by exists r; rewrite -Dr nu_r1 (seqIndS _ Tnu_r) // Iirr_kerDS ?sub1G. pose T2 := [:: lambda; lambda^*; nu r; (nu r)^*]%CF. have [rmRS scohT]: exists rmRS, subcoherent calT tauS rmRS. move: (FTtypeP_coh_base _ _) (FTtypeP_subcoherent maxS StypeP) => RS scohT. by rewrite DcalTs; exists RS. have [lam_irr nu_red]: lambda \in irr S /\ nu r \notin irr S. by rewrite mem_irr prTIred_not_irr. have [lam'nu lams'nu]: lambda != nu r /\ lambda^*%CF != nu r. by rewrite -conjC_IirrE !(contraNneq _ nu_red) // => <-; apply: mem_irr. have [[_ nRT ccT] _ _ _ _] := scohT. have{ccT} sT2T: {subset T2 <= calT} by apply/allP; rewrite /= ?Tlam ?Tnu_r ?ccT. have{nRT} uccT2: cfConjC_subset T2 calT. split; last 1 [by [] | by apply/allP; rewrite /= !inE !cfConjCK !eqxx !orbT]. rewrite /uniq /T2 !inE !negb_or -!(inv_eq (@cfConjCK _ S)) !cfConjCK. by rewrite lam'nu lams'nu !(hasPn nRT). have scohT2 := subset_subcoherent scohT uccT2. have [tau2 cohT2]: coherent T2 S^# tauS. apply: (uniform_degree_coherence scohT2); rewrite /= !cfunE nu_r_1 eqxx. by rewrite conj_Cnat ?Cnat_irr1 ?eqxx. have [s nz_s] := has_nonprincipal_irr ntW2; have Smu_s := calSmu nz_s. pose alpha := mu_ s - d%:R *: zeta; pose beta := nu r - lambda. have Salpha: alpha \in 'Z[calS, M^#] by rewrite zcharD1_seqInd ?ZmuBzeta. have [T2lam T2nu_r]: lambda \in T2 /\ nu r \in T2 by rewrite !inE !eqxx !orbT. have Tbeta: beta \in 'Z[T2, S^#]. by rewrite zcharD1E rpredB ?mem_zchar //= !cfunE nu_r_1 subrr. have /eqP/idPn[] := oST _ _ Salpha (zchar_subset sT2T Tbeta). have [[_ <- //] [_ <- //]] := (cohS, cohT2). rewrite !raddfB raddfZnat /= subr_eq0 !cfdotBl !cfdotZl. have [|[dr r'] -> _] := FTtypeP_coherent_TIred _ cohT2 lam_irr T2lam T2nu_r. by rewrite -DcalTs. set sigS := cyclicTIiso _ => /=. have etaC i j: sigS (cyclicTIirr xdefW j i) = eta_ i j by apply: cycTIisoC. rewrite !cfdotZr addrC cfdot_sumr big1 => [|j _]; last first. by rewrite etaC (coherent_ortho_cycTIiso _ sSS0) ?mem_irr. rewrite !mulr0 oppr0 add0r rmorph_sign. have ->: '[zeta^\tau1, tau2 lambda] = 0. pose X1 := (zeta :: zeta^*)%CF; pose X2 := (lambda :: lambda^*)%CF. pose Y1 := map tau1 X1; pose Y2 := map tau2 X2; have [_ _ ccS] := sSS0. have [sX1S sX2T]: {subset X1 <= calS} /\ {subset X2 <= T2}. by split; apply/allP; rewrite /= ?inE ?eqxx ?orbT // Szeta ccS. have [/(sub_iso_to (zchar_subset sX1S) sub_refl)[Itau1 Ztau1] Dtau1L] := cohS. have [/(sub_iso_to (zchar_subset sX2T) sub_refl)[Itau2 Ztau2] Dtau2] := cohT2. have Z_Y12: {subset Y1 <= 'Z[irr G]} /\ {subset Y2 <= 'Z[irr G]}. by rewrite /Y1 /Y2; split=> ? /mapP[xi /mem_zchar] => [/Ztau1|/Ztau2] ? ->. have o1Y12: orthonormal Y1 && orthonormal Y2. rewrite !map_orthonormal //. by apply: seqInd_conjC_ortho2 Tlam; rewrite ?gFnormal ?mFT_odd. by apply: seqInd_conjC_ortho2 Szeta; rewrite ?gFnormal ?mFT_odd ?mem_irr. apply: orthonormal_vchar_diff_ortho Z_Y12 o1Y12 _; rewrite -2!raddfB. have SzetaBs: zeta - zeta^*%CF \in 'Z[calS, M^#]. by rewrite zcharD1_seqInd // seqInd_sub_aut_zchar. have T2lamBs: lambda - lambda^*%CF \in 'Z[T2, S^#]. rewrite sub_aut_zchar ?zchar_onG ?mem_zchar ?inE ?eqxx ?orbT //. by move=> xi /sT2T/seqInd_vcharW. by rewrite Dtau1L // Dtau2 // !Dade1 oST ?(zchar_subset sT2T) ?eqxx. have [[ds s'] /= -> _] := FTtypeP_coherent_TIred sSS0 cohS irr_zeta Szeta Smu_s. rewrite mulr0 subr0 !cfdotZl mulrA -signr_addb !cfdot_suml. rewrite (bigD1 r') //= cfdot_sumr (bigD1 s') //=. rewrite etaC cfdot_cycTIiso !eqxx big1 => [|j ne_s'_j]; last first. by rewrite etaC cfdot_cycTIiso andbC eq_sym (negPf ne_s'_j). rewrite big1 => [|i ne_i_r']; last first. rewrite cfdot_sumr big1 // => j _. by rewrite etaC cfdot_cycTIiso (negPf ne_i_r'). rewrite !addr0 mulr1 big1 ?mulr0 ?signr_eq0 // => i _. by rewrite -etaC cfdotC (coherent_ortho_cycTIiso _ _ cohT2) ?conjC0 -?DcalTs. Qed. (* This is the bulk of the proof of Peterfalvi (10.8); however the result *) (* will be restated below to avoid the quantification on zeta and tau1. *) Lemma FTtype345_noncoherence_main : False. Proof. have [S pairMS [xdefW [U StypeP]]] := FTtypeP_pair_witness maxM MtypeP. have [[_ _ maxS] _] := pairMS; rewrite {1}(negPf notMtype2) /= => Stype2 _ _. pose H := (S`_\F)%G; pose HU := (S^`(1))%G. have [[_ hallW2 _ defS] [_ _ nUW2 defHU] _ [_ _ sW1H _ _] _] := StypeP. have ntU: U :!=: 1%g by have [[]] := compl_of_typeII maxS StypeP Stype2. pose G01 := [set g : gT | coprime #[g] w1]. pose G0 := ~: 'A~(M) :&: G01; pose G1 := ~: 'A~(M) :\: G01. pose chi := zeta^\tau1; pose ddAM := FT_Dade_hyp maxM; pose rho := invDade ddAM. have Suzuki: #|G|%:R^-1 * (\sum_(g in ~: 'A~(M)) `|chi g| ^+ 2 - #|~: 'A~(M)|%:R) + '[rho chi] - #|'A(M)|%:R / #|M|%:R <= 0. - pose A_ (_ : 'I_1) := ddAM; pose Atau i := Dade_support (A_ i). have tiA i j : i != j -> [disjoint Atau i & Atau j] by rewrite !ord1. have Nchi1: '[chi] = 1 by have [[->]] := cohS; rewrite ?mem_zchar ?irrWnorm. have:= Dade_cover_inequality tiA Nchi1; rewrite /= !big_ord1 -/rho -addrA. by congr (_ * _ + _ <= 0); rewrite FT_Dade_supportE setTD. have{Suzuki} ub_rho: '[rho chi] <= #|'A(M)|%:R / #|M|%:R + #|G1|%:R / #|G|%:R. rewrite addrC -subr_le0 opprD addrCA (le_trans _ Suzuki) // -addrA. rewrite ler_add2r -(cardsID G01 (~: _)) (big_setID G01) -/G0 -/G1 /=. rewrite mulrC mulrBr ler_subr_addl -mulrBr natrD addrK. rewrite ler_wpmul2l ?invr_ge0 ?ler0n // -sumr_const ler_paddr //. by apply: sumr_ge0 => g; rewrite exprn_ge0 ?normr_ge0. apply: ler_sum => g; rewrite !inE => /andP[notAg] /(zeta_tau1_coprime notAg). by rewrite expr_ge1 ?normr_ge0. have lb_M'bar: (w1 * 2 <= #|M' / M''|%g.-1)%N. suffices ->: w1 = #|W1 / M''|%g. rewrite muln2 -ltnS prednK ?cardG_gt0 //. by rewrite (ltn_odd_Frobenius_ker frobMbar) ?quotient_odd ?mFT_odd. have [_ sW1M _ _ tiM'W1] := sdprod_context defM. apply/card_isog/quotient_isog; first exact: subset_trans (der_norm 2 M). by apply/trivgP; rewrite -tiM'W1 setSI ?normal_sub. have lb_rho: 1 - w1%:R / #|M'|%:R <= '[rho chi]. have cohS_A: coherent_with calS M^# (Dade ddAM) tau1. have [Itau1 _] := cohS; split=> // phi; rewrite zcharD1_seqInd // => Sphi. by rewrite Dtau1 // FT_DadeE // defA (zchar_on Sphi). rewrite {ub_rho}/rho [w1](index_sdprod defM); rewrite defA in (ddAM) cohS_A *. have [||_ [_ _ [] //]] := Dade_Ind1_sub_lin cohS_A _ irr_zeta Szeta. - by apply: seqInd_nontrivial Szeta; rewrite ?mem_irr ?mFT_odd. - by rewrite -(index_sdprod defM). rewrite -(index_sdprod defM) ler_pdivl_mulr ?ltr0n // -natrM. rewrite -leC_nat in lb_M'bar; apply: le_trans lb_M'bar _. rewrite ler_subr_addl -mulrS prednK ?cardG_gt0 // leC_nat. by rewrite dvdn_leq ?dvdn_quotient. have{lb_rho ub_rho}: 1 - #|G1|%:R / #|G|%:R - w1%:R^-1 < w1%:R / #|M'|%:R :> algC. - rewrite -addrA -opprD ltr_subl_addr -ltr_subl_addl. apply: le_lt_trans (le_trans lb_rho ub_rho) _; rewrite addrC ltr_add2l. rewrite ltr_pdivr_mulr ?gt0CG // mulrC -(sdprod_card defM) natrM. by rewrite mulfK ?neq0CG // defA ltC_nat (cardsD1 1%g M') group1. have frobHU: [Frobenius HU with kernel H] by apply: Frob_der1_type2. have tiH: normedTI H^# G S. by have [_ _] := FTtypeII_ker_TI maxS Stype2; rewrite FTsupp1_type2. have sG1_HVG: G1 \subset class_support H^# G :|: class_support V G. apply/subsetP=> x; rewrite !inE coprime_has_primes ?cardG_gt0 // negbK. case/andP=> /hasP[p W1p]; rewrite /= mem_primes => /and3P[p_pr _ p_dv_x] _. have [a x_a a_p] := Cauchy p_pr p_dv_x. have nta: a != 1%g by rewrite -order_gt1 a_p prime_gt1. have ntx: x != 1%g by apply: contraTneq x_a => ->; rewrite /= cycle1 inE. have cxa: a \in 'C[x] by rewrite -cent_cycle (subsetP (cycle_abelian x)). have hallH: \pi(H).-Hall(G) H by apply: Hall_pi; have [] := FTcore_facts maxS. have{a_p} p_a: p.-elt a by rewrite /p_elt a_p pnat_id. have piHp: p \in \pi(H) by rewrite (piSg _ W1p). have [y _ Hay] := Hall_pJsub hallH piHp (subsetT _) p_a. do [rewrite -cycleJ cycle_subG; set ay := (a ^ y)%g] in Hay. rewrite -[x](conjgK y); set xy := (x ^ y)%g. have caxy: xy \in 'C[ay] by rewrite cent1J memJ_conjg cent1C. have [ntxy ntay]: xy != 1%g /\ ay != 1%g by rewrite !conjg_eq1. have Sxy: xy \in S. have H1ay: ay \in H^# by apply/setD1P. by rewrite (subsetP (cent1_normedTI tiH H1ay)) ?setTI. have [HUxy | notHUxy] := boolP (xy \in HU). rewrite memJ_class_support ?inE ?ntxy //=. have [_ _ _ regHUH] := Frobenius_kerP frobHU. by rewrite (subsetP (regHUH ay _)) // inE ?HUxy // inE ntay. suffices /imset2P[xyz z Vxzy _ ->]: xy \in class_support V S. by rewrite -conjgM orbC memJ_class_support. rewrite /V setUC -(FTsupp0_typeP maxS StypeP) !inE Sxy. rewrite andb_orr andNb (contra (subsetP _ _) notHUxy) /=; last first. by apply/bigcupsP=> z _; rewrite (eqP Stype2) setDE -setIA subsetIl. have /Hall_pi hallHU: Hall S HU by rewrite (sdprod_Hall defS). rewrite (eqP Stype2) -(mem_normal_Hall hallHU) ?gFnormal // notHUxy. have /mulG_sub[sHHU _] := sdprodW defHU. rewrite (contra (fun p'xy => pi'_p'group p'xy (piSg sHHU piHp))) //. by rewrite pgroupE p'natE // cycleJ cardJg p_dv_x. have ub_G1: #|G1|%:R / #|G|%:R <= #|H|%:R / #|S|%:R + #|V|%:R / #|W|%:R :> algC. rewrite ler_pdivr_mulr ?ltr0n ?cardG_gt0 // mulrC mulrDr !mulrA. rewrite ![_ * _ / _]mulrAC -!natf_indexg ?subsetT //= -!natrM -natrD ler_nat. apply: leq_trans (subset_leq_card sG1_HVG) _. rewrite cardsU (leq_trans (leq_subr _ _)) //. have unifJG B C: C \in B :^: G -> #|C| = #|B|. by case/imsetP=> z _ ->; rewrite cardJg. have oTI := card_uniform_partition (unifJG _) (partition_class_support _ _). have{tiH} [ntH tiH /eqP defNH] := and3P tiH. have [_ _ /and3P[ntV tiV /eqP defNV]] := ctiWG. rewrite !oTI // !card_conjugates defNH defNV /= leq_add2r ?leq_mul //. by rewrite subset_leq_card ?subsetDl. rewrite le_gtF // addrAC ler_subr_addl -ler_subr_addr (le_trans ub_G1) //. rewrite -(sdprod_card defS) -(sdprod_card defHU) addrC. rewrite -mulnA !natrM invfM mulVKf ?natrG_neq0 // -/w1 -/w2. have sW12_W: W1 :|: W2 \subset W by rewrite -(dprodWY defW) sub_gen. rewrite cardsD (setIidPr sW12_W) natrB ?subset_leq_card // mulrBl. rewrite divff ?natrG_neq0 // -!addrA ler_add2l. rewrite cardsU -(dprod_card defW) -/w1 -/w2; have [_ _ _ ->] := dprodP defW. rewrite cards1 natrB ?addn_gt0 ?cardG_gt0 // addnC natrD -addrA mulrDl mulrBl. rewrite {1}mulnC !natrM !invfM !mulVKf ?natrG_neq0 // opprD -addrA ler_add2l. rewrite mul1r -{1}[_^-1]mul1r addrC ler_oppr [- _]opprB -!mulrBl. rewrite -addrA -opprD ler_pdivl_mulr; last by rewrite natrG_gt0. apply: le_trans (_ : 1 - (3%:R^-1 + 7%:R^-1) <= _); last first. rewrite ler_add2l ler_opp2. rewrite ler_add // lef_pinv ?qualifE ?gt0CG ?ltr0n ?ler_nat //. have notStype5: FTtype S != 5%N by rewrite (eqP Stype2). have frobUW2 := Ptype_compl_Frobenius maxS StypeP notStype5. apply: leq_ltn_trans (ltn_odd_Frobenius_ker frobUW2 (mFT_odd _)). by rewrite (leq_double 3). apply: le_trans (_ : 2%:R^-1 <= _); last by rewrite -!CratrE; compute. rewrite mulrAC ler_pdivr_mulr 1?gt0CG // ler_pdivl_mull ?ltr0n //. rewrite -!natrM ler_nat mulnA -(Lagrange (normal_sub nsM''M')) mulnC leq_mul //. by rewrite subset_leq_card //; have [_ _ _ []] := MtypeP. by rewrite -card_quotient ?normal_norm // mulnC -(prednK (cardG_gt0 _)) leqW. Qed. End NonCoherence. (* This is Peterfalvi (10.9). *) Lemma FTtype345_Dade_bridge0 : (w1 < w2)%N -> {chi | [/\ (mu_ 0 - zeta)^\tau = \sum_i eta_ i 0 - chi, chi \in 'Z[irr G], '[chi] = 1 & forall i j, '[chi, eta_ i j] = 0]}. Proof. move=> w1_lt_w2; set psi := mu_ 0 - zeta; pose Wsig := map sigma (irr W). have [X wsigX [chi [DpsiG _ o_chiW]]] := orthogonal_split Wsig psi^\tau. exists (- chi); rewrite opprK rpredN cfnormN. have o_chi_w i j: '[chi, eta_ i j] = 0. by rewrite (orthoPl o_chiW) ?map_f ?mem_irr. have [Isigma Zsigma] := cycTI_Zisometry ctiWG. have o1Wsig: orthonormal Wsig by rewrite map_orthonormal ?irr_orthonormal. have [a_ Da defX] := orthonormal_span o1Wsig wsigX. have{} Da i j: a_ (eta_ i j) = '[psi^\tau, eta_ i j]. by rewrite DpsiG cfdotDl o_chi_w addr0 Da. have sumX: X = \sum_i \sum_j a_ (eta_ i j) *: eta_ i j. rewrite pair_bigA defX big_map (big_nth 0) size_tuple big_mkord /=. rewrite (reindex (dprod_Iirr defW)) /=. by apply: eq_bigr => [[i j] /= _]; rewrite -tnth_nth. by exists (inv_dprod_Iirr defW) => ij; rewrite (inv_dprod_IirrK, dprod_IirrK). have Zpsi: psi \in 'Z[irr M]. by rewrite rpredB ?irr_vchar ?(mem_zchar irr_zeta) ?char_vchar ?prTIred_char. have{Zpsi} M'psi: psi \in 'Z[irr M, M'^#]. by rewrite -defA zchar_split Zpsi mu0Bzeta_on. have A0psi: psi \in 'CF(M, 'A0(M)). by apply: cfun_onS (zchar_on M'psi); rewrite defA0 subsetUl. have a_00: a_ (eta_ 0 0) = 1. rewrite Da [w_ 0 0](cycTIirr00 defW) [sigma 1]cycTIiso1. rewrite Dade_reciprocity // => [|x _ y _]; last by rewrite !cfun1E !inE. rewrite rmorph1 /= -(prTIirr00 ptiWM) -/(mu2_ 0 0) cfdotC. by rewrite cfdotBr o_mu2_zeta subr0 cfdot_prTIirr_red rmorph1. have n2psiG: '[psi^\tau] = w1.+1%:R. rewrite Dade_isometry // cfnormBd ?o_mu_zeta //. by rewrite cfnorm_prTIred irrWnorm // -/w1 mulrSr. have psiG_V0 x: x \in V -> psi^\tau x = 0. move=> Vx; rewrite Dade_id ?defA0; last first. by rewrite inE orbC mem_class_support. rewrite (cfun_on0 (zchar_on M'psi)) // -defA. suffices /setDP[]: x \in 'A0(M) :\: 'A(M) by []. by rewrite (FTsupp0_typeP maxM MtypeP) // mem_class_support. have ZpsiG: psi^\tau \in 'Z[irr G]. by rewrite Dade_vchar // zchar_split (zcharW M'psi). have n2psiGsum: '[psi^\tau] = \sum_i \sum_j `|a_ (eta_ i j)| ^+ 2 + '[chi]. rewrite DpsiG addrC cfnormDd; last first. by rewrite (span_orthogonal o_chiW) ?memv_span1. rewrite addrC defX cfnorm_sum_orthonormal // big_map pair_bigA; congr (_ + _). rewrite big_tuple /= (reindex (dprod_Iirr defW)) //. by exists (inv_dprod_Iirr defW) => ij; rewrite (inv_dprod_IirrK, dprod_IirrK). have NCpsiG: (cyclicTI_NC ctiWG psi^\tau < 2 * minn w1 w2)%N. apply: (@leq_ltn_trans w1.+1); last first. by rewrite /minn w1_lt_w2 mul2n -addnn (leq_add2r w1 2) cardG_gt1. pose z_a := [pred ij | a_ (eta_ ij.1 ij.2) == 0]. have ->: cyclicTI_NC ctiWG psi^\tau = #|[predC z_a]|. by apply: eq_card => ij; rewrite !inE -Da. rewrite -leC_nat -n2psiG n2psiGsum ler_paddr ?cfnorm_ge0 // pair_bigA. rewrite (bigID z_a) big1 /= => [|ij /eqP->]; last by rewrite normCK mul0r. rewrite add0r -sumr_const ler_sum // => [[i j] nz_ij]. by rewrite expr_ge1 ?norm_Cint_ge1 // Da Cint_cfdot_vchar ?Zsigma ?irr_vchar. have nz_psiG00: '[psi^\tau, eta_ 0 0] != 0 by rewrite -Da a_00 oner_eq0. have [a_i|a_j] := small_cycTI_NC psiG_V0 NCpsiG nz_psiG00. have psiGi: psi^\tau = \sum_i eta_ i 0 + chi. rewrite DpsiG sumX; congr (_ + _); apply: eq_bigr => i _. rewrite big_ord_recl /= Da a_i -Da a_00 mul1r scale1r. by rewrite big1 ?addr0 // => j1 _; rewrite Da a_i mul0r scale0r. split=> // [||i j]; last by rewrite cfdotNl o_chi_w oppr0. rewrite -(canLR (addKr _) psiGi) rpredD // rpredN rpred_sum // => j _. by rewrite Zsigma ?irr_vchar. apply: (addrI w1%:R); rewrite -mulrSr -n2psiG n2psiGsum; congr (_ + _). rewrite -nirrW1 // -sumr_const; apply: eq_bigr => i _. rewrite big_ord_recl /= Da a_i -Da a_00 mul1r normr1. by rewrite expr1n big1 ?addr0 // => j1 _; rewrite Da a_i normCK !mul0r. suffices /idPn[]: '[psi^\tau] >= w2%:R. rewrite odd_geq /= ?uphalf_half mFT_odd //= in w1_lt_w2. by rewrite n2psiG leC_nat -ltnNge odd_geq ?mFT_odd. rewrite n2psiGsum exchange_big /= ler_paddr ?cfnorm_ge0 //. rewrite -nirrW2 -sumr_const; apply: ler_sum => i _. rewrite big_ord_recl /= Da a_j -Da a_00 mul1r normr1. by rewrite expr1n big1 ?addr0 // => j1 _; rewrite Da a_j normCK !mul0r. Qed. Local Notation H := M'. Local Notation "` 'H'" := `M' (at level 0) : group_scope. Local Notation H' := M''. Local Notation "` 'H''" := `M'' (at level 0) : group_scope. (* This is the bulk of the proof of Peterfalvi, Theorem (10.10); as with *) (* (10.8), it will be restated below in order to remove dependencies on zeta, *) (* U_M and W1. *) Lemma FTtype5_exclusion_main : FTtype M != 5%N. Proof. apply/negP=> Mtype5. suffices [tau1]: coherent calS M^# tau by case/FTtype345_noncoherence_main. have [[_ U_M_1] MtypeV] := compl_of_typeV maxM MtypeP Mtype5. have [_ [_ _ _ defH] _ [_ _ _ sW2H' _] _] := MtypeP. have{U_M_1 defH} defMF: M`_\F = H by rewrite /= -defH U_M_1 sdprodg1. have nilH := Fcore_nil M; rewrite defMF -/w1 in MtypeV nilH. without loss [p [pH not_cHH ubHbar not_w1_dv_p1]]: / exists p : nat, [/\ p.-group H, ~~ abelian H, #|H : H'| <= 4 * w1 ^ 2 + 1 & ~ w1 %| p.-1]%N. - have [isoH1 solH] := (quotient1_isog H, nilpotent_sol nilH). have /non_coherent_chief-IHcoh := subset_subcoherent scohS0 sSS0. apply: IHcoh (fun coh _ => coh) _ => // [|[[_ ubH] [p [pH ab'H] /negP-dv'p]]]. split; rewrite ?mFT_odd ?normal1 ?sub1G ?quotient_nil //. by rewrite joingG1 (FrobeniusWker frobMbar). apply; exists p; rewrite (isog_abelian isoH1) (isog_pgroup p isoH1) -subn1. by rewrite /= joingG1 -(index_sdprod defM) in ubH dv'p. have ntH: H :!=: 1%g by apply: contraNneq not_cHH => ->; apply: abelian1. have [sH'H nH'H] := andP nsM''M'; have sW2H := subset_trans sW2H' sH'H. have def_w2: w2 = p by apply/eqP; have:= pgroupS sW2H pH; rewrite pgroupE pnatE. have piHp q: q \in \pi(H) -> q = p. by rewrite /= -(part_pnat_id pH) pi_of_part // => /andP[_ /eqnP]. have [tiHG | [_ /piHp-> []//] | [_ /piHp-> [oH w1_dv_p1 _]]] := MtypeV. suffices [tau1 [Itau1 Dtau1]]: coherent (seqIndD H M H 1) M^# 'Ind[G]. exists tau1; split=> // phi Sphi; rewrite {}Dtau1 //. rewrite zcharD1_seqInd // -subG1 -setD_eq0 -defA in Sphi tiHG ntH. by have Aphi := zchar_on Sphi; rewrite -FT_DadeE // Dade_Ind. apply: (@Sibley_coherence _ [set:_] M H W1); first by rewrite mFT_odd. right; exists W2 => //; exists 'A0(M), W, defW. by rewrite -defA -{2}(group_inj defMs). have [p_pr _ _] := pgroup_pdiv pH ntH; rewrite (pcore_pgroup_id pH) in oH. have{not_cHH} esH: extraspecial H. by apply: (p3group_extraspecial pH); rewrite // oH pfactorK. have oH': #|H'| = p. by rewrite -(card_center_extraspecial pH esH); have [[_ <-]] := esH. have defW2: W2 :=: H' by apply/eqP; rewrite eqEcard sW2H' oH' -def_w2 /=. have iH'H: #|H : H'|%g = (p ^ 2)%N by rewrite -divgS // oH oH' mulKn ?prime_gt0. have w1_gt0: (0 < w1)%N by apply: cardG_gt0. (* This is step (10.10.1). *) have{ubHbar} [def_p_w1 w1_lt_w2]: (p = 2 * w1 - 1 /\ w1 < w2)%N. have /dvdnP[k def_p]: 2 * w1 %| p.+1. by rewrite Gauss_dvd ?coprime2n ?mFT_odd ?dvdn2 //= -{1}def_w2 mFT_odd. suffices k1: k = 1%N. rewrite k1 mul1n in def_p; rewrite -ltn_double -mul2n -def_p -addn1 addnK. by rewrite -addnS -addnn def_w2 leq_add2l prime_gt1. have [k0 | k_gt0] := posnP k; first by rewrite k0 in def_p. apply/eqP; rewrite eqn_leq k_gt0 andbT -ltnS -ltn_double -mul2n. rewrite -[(2 * k)%N]prednK ?muln_gt0 // ltnS -ltn_sqr 3?leqW //=. rewrite -subn1 sqrnB ?muln_gt0 // expnMn muln1 mulnA ltnS leq_subLR. rewrite addn1 addnS ltnS -mulnSr leq_pmul2l // -(leq_subLR _ 1). rewrite (leq_trans (leq_pmulr _ w1_gt0)) // -(leq_pmul2r w1_gt0). rewrite -mulnA mulnBl mul1n -2!leq_double -!mul2n mulnA mulnBr -!expnMn. rewrite -(expnMn 2 _ 2) mulnCA -def_p -addn1 leq_subLR sqrnD muln1. by rewrite (addnC p) mulnDr addnA leq_add2r addn1 addnS -iH'H. (* This is step (10.10.2). *) pose S1 := seqIndD H M H H'. have sS1S: {subset S1 <= calS} by apply: seqIndS; rewrite Iirr_kerDS ?sub1G. have irrS1: {subset S1 <= irr M}. move=> _ /seqIndP[s /setDP[kerH' ker'H] ->]; rewrite !inE in kerH' ker'H. rewrite -(quo_IirrK _ kerH') // mod_IirrE // cfIndMod // cfMod_irr //. rewrite (irr_induced_Frobenius_ker (FrobeniusWker frobMbar)) //. by rewrite quo_Iirr_eq0 // -subGcfker. have S1w1: {in S1, forall xi : 'CF(M), xi 1%g = w1%:R}. move=> _ /seqIndP[s /setDP[kerH' _] ->]; rewrite !inE in kerH'. by rewrite cfInd1 // -(index_sdprod defM) lin_char1 ?mulr1 // lin_irr_der1. have sS10: cfConjC_subset S1 calS0. by apply: seqInd_conjC_subset1; rewrite /= defMs. pose S2 := [seq mu_ j | j in predC1 0]. have szS2: size S2 = p.-1. by rewrite -def_w2 size_map -cardE cardC1 card_Iirr_abelian ?cyclic_abelian. have uS2: uniq S2 by apply/dinjectiveP; apply: in2W (prTIred_inj pddM). have redS2: {subset S2 <= [predC irr M]}. by move=> _ /imageP[j _ ->]; apply: (prTIred_not_irr pddM). have sS2S: {subset S2 <= calS} by move=> _ /imageP[j /calSmu Smu_j ->]. have S1'2: {subset S2 <= [predC S1]}. by move=> xi /redS2; apply: contra (irrS1 _). have w1_dv_p21: w1 %| p ^ 2 - 1 by rewrite (subn_sqr p 1) addn1 dvdn_mull. have [j nz_j] := has_nonprincipal_irr ntW2. have [Dmu2_1 Ddelta_ lt1d Nn] := FTtype345_constants. have{lt1d} [defS szS1 Dd Ddel Dn]: [/\ perm_eq calS (S1 ++ S2), size S1 = (p ^ 2 - 1) %/ w1, d = p, delta = -1 & n = 2%:R]. - pose X_ (S0 : seq 'CF(M)) := [set s | 'Ind[M, H] 'chi_s \in S0]. pose sumX_ cS0 := \sum_(s in X_ cS0) 'chi_s 1%g ^+ 2. have defX1: X_ S1 = Iirr_kerD H H H'. by apply/setP=> s; rewrite !inE mem_seqInd // !inE. have defX: X_ calS = Iirr_kerD H H 1%g. by apply/setP=> s; rewrite !inE mem_seqInd ?normal1 //= !inE. have sumX1: sumX_ S1 = (p ^ 2)%:R - 1. by rewrite /sumX_ defX1 sum_Iirr_kerD_square // iH'H indexgg mul1r. have ->: size S1 = (p ^ 2 - 1) %/ w1. apply/eqP; rewrite eqn_div // -eqC_nat mulnC [w1](index_sdprod defM). rewrite (size_irr_subseq_seqInd _ (subseq_refl S1)) //. rewrite natrB ?expn_gt0 ?prime_gt0 // -sumr_const -sumX1. apply/eqP/esym/eq_bigr => s. by rewrite defX1 !inE -lin_irr_der1 => /and3P[_ _ /eqP->]; rewrite expr1n. have oX2: #|X_ S2| = p.-1. by rewrite -(size_red_subseq_seqInd_typeP MtypeP uS2 sS2S). have sumX2: (p ^ 2 * p.-1)%:R <= sumX_ S2 ?= iff (d == p). rewrite /sumX_ (eq_bigr (fun _ => d%:R ^+ 2)) => [|s]; last first. rewrite inE => /imageP[j1 nz_j1 Dj1]; congr (_ ^+ 2). apply: (mulfI (neq0CiG M H)); rewrite -cfInd1 // -(index_sdprod defM). by rewrite Dj1 (prTIred_1 pddM) Dmu2_1. rewrite sumr_const oX2 mulrnA (mono_leif (ler_pmuln2r _)); last first. by rewrite -def_w2 -(subnKC w2gt2). rewrite natrX (mono_in_leif ler_sqr) ?rpred_nat // eq_sym leif_nat_r. apply/leqif_eq; rewrite dvdn_leq 1?ltnW //. have: (mu2_ 0 j 1%g %| (p ^ 3)%N)%C. by rewrite -(cfRes1 H) cfRes_prTIirr -oH dvd_irr1_cardG. rewrite Dmu2_1 // dvdC_nat => /dvdn_pfactor[//|[_ d1|e _ ->]]. by rewrite d1 in lt1d. by rewrite expnS dvdn_mulr. pose S3 := filter [predC S1 ++ S2] calS. have sumX3: 0 <= sumX_ S3 ?= iff nilp S3. rewrite /sumX_; apply/leifP. have [-> | ] := altP nilP; first by rewrite big_pred0 // => s; rewrite !inE. rewrite -lt0n -has_predT => /hasP[xi S3xi _]. have /seqIndP[s _ Dxi] := mem_subseq (filter_subseq _ _) S3xi. rewrite (bigD1 s) ?inE -?Dxi //= ltr_spaddl ?sumr_ge0 // => [|s1 _]. by rewrite exprn_gt0 ?irr1_gt0. by rewrite ltW ?exprn_gt0 ?irr1_gt0. have [_ /esym] := leif_add sumX2 sumX3. have /(canLR (addKr _)) <-: sumX_ calS = sumX_ S1 + (sumX_ S2 + sumX_ S3). rewrite [sumX_ _](big_setID (X_ S1)); congr (_ + _). by apply: eq_bigl => s; rewrite !inE andb_idl // => /sS1S. rewrite (big_setID (X_ S2)); congr (_ + _); apply: eq_bigl => s. by rewrite !inE andb_idl // => S2s; rewrite [~~ _]S1'2 ?sS2S. by rewrite !inE !mem_filter /= mem_cat orbC negb_or andbA. rewrite sumX1 /sumX_ defX sum_Iirr_kerD_square ?sub1G ?normal1 // indexgg. rewrite addr0 mul1r indexg1 oH opprD addrACA addNr addr0 addrC. rewrite (expnSr p 2) -[p in (_ ^ 2 * p)%:R - _]prednK ?prime_gt0 // mulnSr. rewrite natrD addrK eqxx => /andP[/eqP Dd /nilP S3nil]. have uS12: uniq (S1 ++ S2). by rewrite cat_uniq seqInd_uniq uS2 andbT; apply/hasPn. rewrite uniq_perm ?seqInd_uniq {uS12}// => [|xi]; last first. apply/idP/idP; apply: allP xi; last by rewrite all_cat !(introT allP _). by rewrite -(canLR negbK (has_predC _ _)) has_filter -/S3 S3nil. have: (w1 %| d%:R - delta)%C. by rewrite unfold_in pnatr_eq0 eqn0Ngt w1_gt0 rpred_Cnat. rewrite /n Dd def_p_w1 /delta; case: (Idelta _) => [_|/idPn[] /=]. by rewrite opprK -(natrD _ _ 1) subnK ?muln_gt0 // natrM mulfK ?neq0CG. rewrite mul2n -addnn -{1}(subnKC (ltnW w1gt2)) !addSn mulrSr addrK dvdC_nat. by rewrite add0n dvdn_addl // -(subnKC w1gt2) gtnNdvd // leqW. have scohS1 := subset_subcoherent scohS0 sS10. have o1S1: orthonormal S1. rewrite orthonormalE andbC; have [_ _ -> _ _] := scohS1. by apply/allP=> xi /irrS1/irrP[t ->]; rewrite /= cfnorm_irr. have [tau1 cohS1]: coherent S1 M^# tau. apply: uniform_degree_coherence scohS1 _; apply: all_pred1_constant w1%:R _ _. by rewrite all_map; apply/allP=> xi /S1w1/= ->. have [[Itau1 Ztau1] Dtau1] := cohS1. have o1S1tau: orthonormal (map tau1 S1) by apply: map_orthonormal. have S1zeta: zeta \in S1. by have:= Szeta; rewrite (perm_mem defS) mem_cat => /orP[//|/redS2/negP]. (* This is the main part of step 10.10.3; as the definition of alpha_ remains *) (* valid we do not need to reprove alpha_on. *) have Dalpha i (al_ij := alpha_ i j) : al_ij^\tau = delta *: (eta_ i j - eta_ i 0) - n *: tau1 zeta. - have [Y S1_Y [X [Dal_ij _ oXY]]] := orthogonal_split (map tau1 S1) al_ij^\tau. have [a_ Da_ defY] := orthonormal_span o1S1tau S1_Y. have oXS1 lam : lam \in S1 -> '[X, tau1 lam] = 0. by move=> S1lam; rewrite (orthoPl oXY) ?map_f. have{} Da_ lam : lam \in S1 -> a_ (tau1 lam) = '[al_ij^\tau, tau1 lam]. by move=> S1lam; rewrite Dal_ij cfdotDl oXS1 // addr0 Da_. pose a := n + a_ (tau1 zeta); have [_ oS1S1] := orthonormalP o1S1. have Da_z: a_ (tau1 zeta) = - n + a by rewrite addKr. have Za: a \in Cint. rewrite rpredD ?Dn ?rpred_nat // Da_ // Cint_cfdot_vchar ?Zalpha_tau //=. by rewrite Ztau1 ?mem_zchar. have Da_z' lam: lam \in S1 -> lam != zeta -> a_ (tau1 lam) = a. move=> S1lam zeta'lam; apply: canRL (subrK _) _. rewrite !Da_ // -cfdotBr -raddfB. have S1dlam: lam - zeta \in 'Z[S1, M^#]. by rewrite zcharD1E rpredB ?mem_zchar //= !cfunE !S1w1 ?subrr. rewrite Dtau1 // Dade_isometry ?alpha_on ?tauM' //; last first. by rewrite -zcharD1_seqInd ?(zchar_subset sS1S). have o_mu2_lam k: '[mu2_ i k, lam] = 0 by rewrite o_mu2_irr ?sS1S ?irrS1. rewrite !cfdotBl !cfdotZl !cfdotBr !o_mu2_lam !o_mu2_zeta !(subr0, mulr0). by rewrite irrWnorm ?oS1S1 // eq_sym (negPf zeta'lam) !add0r mulrN1 opprK. have lb_n2alij: (a - n) ^+ 2 + (size S1 - 1)%:R * a ^+ 2 <= '[al_ij^\tau]. rewrite Dal_ij cfnormDd; last first. by rewrite cfdotC (span_orthogonal oXY) ?rmorph0 // memv_span1. rewrite ler_paddr ?cfnorm_ge0 // defY cfnorm_sum_orthonormal //. rewrite (big_rem (tau1 zeta)) ?map_f //= le_eqVlt; apply/predU1P; left. congr (_ + _). by rewrite Da_z addrC Cint_normK 1?rpredD // rpredN Dn rpred_nat. rewrite (eq_big_seq (fun _ => a ^+ 2)) => [|tau1lam]; last first. rewrite rem_filter ?free_uniq ?orthonormal_free // filter_map. case/mapP=> lam; rewrite mem_filter /= andbC => /andP[S1lam]. rewrite (inj_in_eq (Zisometry_inj Itau1)) ?mem_zchar // => zeta'lam ->. by rewrite Da_z' // Cint_normK. rewrite big_tnth sumr_const card_ord size_rem ?map_f // size_map. by rewrite mulr_natl subn1. have{lb_n2alij} ub_a2: (size S1)%:R * a ^+ 2 <= 2%:R * a * n + 2%:R. rewrite norm_alpha // addrC sqrrB !addrA ler_add2r in lb_n2alij. rewrite mulr_natl -mulrSr ler_subl_addl subn1 in lb_n2alij. by rewrite -mulrA !mulr_natl; case: (S1) => // in S1zeta lb_n2alij *. have{ub_a2} ub_8a2: 8%:R * a ^+ 2 <= 4%:R * a + 2%:R. rewrite mulrAC Dn -natrM in ub_a2; apply: le_trans ub_a2. rewrite -Cint_normK // ler_wpmul2r ?exprn_ge0 ?normr_ge0 // leC_nat szS1. rewrite (subn_sqr p 1) def_p_w1 subnK ?muln_gt0 // mulnA mulnK // mulnC. by rewrite -subnDA -(mulnBr 2%N _ 1%N) mulnA (@leq_pmul2l 4 2) ?ltn_subRL. have Z_4a1: 4%:R * a - 1%:R \in Cint by rewrite rpredB ?rpredM ?rpred_nat. have{ub_8a2} ub_4a1: `|4%:R * a - 1| < 3%:R. rewrite -ltr_sqr ?rpred_nat ?qualifE ?normr_ge0 // -natrX Cint_normK //. rewrite sqrrB1 exprMn -natrX -mulrnAl -mulrnA (natrD _ 8 1) ltr_add2r. rewrite (natrM _ 2 4) (natrM _ 2 8) -!mulrA -mulrBr ltr_pmul2l ?ltr0n //. by rewrite ltr_subl_addl (le_lt_trans ub_8a2) // ltr_add2l ltr_nat. have{ub_4a1} a0: a = 0. apply: contraTeq ub_4a1 => a_nz; have:= norm_Cint_ge1 Za a_nz. rewrite real_ltr_norml ?real_ler_normr ?Creal_Cint //; apply: contraL. case/andP; rewrite ltr_subl_addr -(natrD _ 3 1) gtr_pmulr ?ltr0n //. rewrite ltr_oppl opprB -mulrN => /lt_le_trans/=/(_ _ (leC_nat 3 5)). by rewrite (natrD _ 1 4) ltr_add2l gtr_pmulr ?ltr0n //; do 2!move/lt_geF->. apply: (def_tau_alpha cohS1 sS10 nz_j S1zeta). by rewrite -Da_ // Da_z a0 addr0. have o_eta__zeta i j1: '[tau1 zeta, eta_ i j1] = 0. by rewrite (coherent_ortho_cycTIiso _ sS10 cohS1) ?mem_irr. (* This is step (10.4), the final one. *) have Dmu0zeta: (mu_ 0 - zeta)^\tau = \sum_i eta_ i 0 - tau1 zeta. have A0mu0tau: mu_ 0 - zeta \in 'CF(M, 'A0(M)). rewrite /'A0(M) defA; apply: (cfun_onS (subsetUl _ _)). rewrite cfun_onD1 [mu_ 0](prTIred0 pddM) !cfunE zeta1w1 cfuniE // group1. by rewrite mulr1 subrr rpredB ?rpredZnat ?cfuni_on ?(seqInd_on _ Szeta) /=. have [chi [Dmu0 Zchi n1chi o_chi_w]] := FTtype345_Dade_bridge0 w1_lt_w2. have dirr_chi: chi \in dirr G by rewrite dirrE Zchi n1chi /=. have dirr_zeta: tau1 zeta \in dirr G. by rewrite dirrE Ztau1 ?Itau1 ?mem_zchar //= irrWnorm. have: '[(alpha_ 0 j)^\tau, (mu_ 0 - zeta)^\tau] == - delta + n. rewrite Dade_isometry ?alpha_on // !cfdotBl !cfdotZl !cfdotBr. rewrite !o_mu2_zeta 2!cfdot_prTIirr_red (negPf nz_j) cfdotC o_mu_zeta. by rewrite eqxx irrWnorm // conjC0 !(subr0, add0r) mulr1 mulrN1 opprK. rewrite Dalpha // Dmu0 !{1}(cfdotBl, cfdotZl) !cfdotBr 2!{1}(cfdotC _ chi). rewrite !o_chi_w conjC0 !cfdot_sumr big1 => [|i]; first last. by rewrite (cfdot_cycTIiso pddM) (negPf nz_j) andbF. rewrite (bigD1 0) //= cfdot_cycTIiso big1 => [|i nz_i]; first last. by rewrite cfdot_cycTIiso eq_sym (negPf nz_i). rewrite big1 // !subr0 !add0r addr0 mulrN1 mulrN opprK (can_eq (addKr _)). rewrite {2}Dn -mulr_natl Dn (inj_eq (mulfI _)) ?pnatr_eq0 //. by rewrite cfdot_dirr_eq1 // => /eqP->. have [] := uniform_prTIred_coherent pddM nz_j; rewrite -/sigma. have ->: uniform_prTIred_seq pddM j = S2. congr (map _ _); apply: eq_enum => k; rewrite !inE -!/(mu_ _). by rewrite andb_idr // => nz_k; rewrite 2!{1}prTIred_1 2?Dmu2_1. case=> _ _ ccS2 _ _ [tau2 Dtau2 cohS2]. have{} cohS2: coherent_with S2 M^# tau tau2 by apply: cohS2. have sS20: cfConjC_subset S2 calS0. by split=> // xi /sS2S Sxi; have [_ ->] := sSS0. rewrite perm_sym perm_catC in defS; apply: perm_coherent defS _. suffices: (mu_ j - d%:R *: zeta)^\tau = tau2 (mu_ j) - tau1 (d%:R *: zeta). apply: (bridge_coherent scohS0 sS20 cohS2 sS10 cohS1) => [phi|]. by apply: contraL => /S1'2. rewrite cfunD1E !cfunE zeta1w1 prTIred_1 mulrC Dmu2_1 // subrr. by rewrite image_f // rpredZnat ?mem_zchar. have sumA: \sum_i alpha_ i j = mu_ j - delta *: mu_ 0 - (d%:R - delta) *: zeta. rewrite !sumrB sumr_const /= -scaler_sumr; congr (_ - _ - _). rewrite card_Iirr_abelian ?cyclic_abelian // -/w1 -scaler_nat. by rewrite scalerA mulrC divfK ?neq0CG. rewrite scalerBl opprD opprK addrACA in sumA. rewrite -{sumA}(canLR (addrK _) sumA) opprD opprK -scalerBr. rewrite linearD linearZ linear_sum /= Dmu0zeta scalerBr. rewrite (eq_bigr _ (fun i _ => Dalpha i)) sumrB sumr_const nirrW1. rewrite -!scaler_sumr sumrB addrAC !addrA scalerBr subrK -addrA -opprD. rewrite raddfZnat Dtau2 Ddelta_ //; congr (_ - _). by rewrite addrC -scaler_nat scalerA mulrC divfK ?neq0CG // -scalerDl subrK. Qed. End OneMaximal. Implicit Type M : {group gT}. (* This is the exported version of Peterfalvi, Theorem (10.8). *) Theorem FTtype345_noncoherence M (M' := M^`(1)%G) (maxM : M \in 'M) : (FTtype M > 2)%N -> ~ coherent (seqIndD M' M M' 1) M^# (FT_Dade0 maxM). Proof. rewrite ltnNge 2!leq_eqVlt => /norP[notMtype2 /norP[notMtype1 _]] [tau1 cohS]. have [U W W1 W2 defW MtypeP] := FTtypeP_witness maxM notMtype1. have [zeta [irr_zeta Szeta zeta1w1]] := FTtypeP_ref_irr maxM MtypeP. exact: (FTtype345_noncoherence_main MtypeP _ irr_zeta Szeta zeta1w1 cohS). Qed. (* This is the exported version of Peterfalvi, Theorem (10.10). *) Theorem FTtype5_exclusion M : M \in 'M -> FTtype M != 5. Proof. move=> maxM; apply: wlog_neg; rewrite negbK => Mtype5. have notMtype2: FTtype M != 2 by rewrite (eqP Mtype5). have [U W W1 W2 defW [[MtypeP _] _]] := FTtypeP 5 maxM Mtype5. have [zeta [irr_zeta Szeta zeta1w1]] := FTtypeP_ref_irr maxM MtypeP. exact: (FTtype5_exclusion_main _ MtypeP _ irr_zeta). Qed. (* This the first assertion of Peterfalvi (10.11). *) Lemma FTtypeP_pair_primes S T W W1 W2 (defW : W1 \x W2 = W) : typeP_pair S T defW -> prime #|W1| /\ prime #|W2|. Proof. move=> pairST; have [[_ maxS maxT] _ _ _ _] := pairST. have type24 maxM := compl_of_typeII_IV maxM _ (FTtype5_exclusion maxM). split; first by have [U /type24[]] := typeP_pairW pairST. have xdefW: W2 \x W1 = W by rewrite dprodC. by have [U /type24[]] := typeP_pairW (typeP_pair_sym xdefW pairST). Qed. Corollary FTtypeP_primes M U W W1 W2 (defW : W1 \x W2 = W) : M \in 'M -> of_typeP M U defW -> prime #|W1| /\ prime #|W2|. Proof. move=> maxM MtypeP; have [T pairMT _] := FTtypeP_pair_witness maxM MtypeP. exact: FTtypeP_pair_primes pairMT. Qed. (* This is the remainder of Peterfalvi (10.11). *) Lemma FTtypeII_prime_facts M U W W1 W2 (defW : W1 \x W2 = W) (maxM : M \in 'M) : of_typeP M U defW -> FTtype M == 2 -> let H := M`_\F%G in let HU := M^`(1)%G in let calS := seqIndD HU M H 1 in let tau := FT_Dade0 maxM in let p := #|W2| in let q := #|W1| in [/\ p.-abelem H, (#|H| = p ^ q)%N & coherent calS M^# tau]. Proof. move=> MtypeP Mtype2 H HU calS tau p q. have Mnot5: FTtype M != 5 by rewrite (eqP Mtype2). have [_ cUU _ _ _] := compl_of_typeII maxM MtypeP Mtype2. have [q_pr p_pr]: prime q /\ prime p := FTtypeP_primes maxM MtypeP. have:= typeII_IV_core maxM MtypeP Mnot5; rewrite Mtype2 -/p -/q => [[_ oH]]. have [] := Ptype_Fcore_kernel_exists maxM MtypeP Mnot5. have [_ _] := Ptype_Fcore_factor_facts maxM MtypeP Mnot5. rewrite -/H; set H0 := Ptype_Fcore_kernel _; set Hbar := (H / H0)%G. rewrite def_Ptype_factor_prime // -/p -/q => oHbar chiefHbar _. have trivH0: H0 :=: 1%g. have [/maxgroupp/andP[/andP[sH0H _] nH0M] /andP[sHM _]] := andP chiefHbar. apply: card1_trivg; rewrite -(setIidPr sH0H) -divg_index. by rewrite -card_quotient ?(subset_trans sHM) // oHbar -oH divnn cardG_gt0. have abelHbar: p.-abelem Hbar. have pHbar: p.-group Hbar by rewrite /pgroup oHbar pnatX pnat_id. by rewrite -is_abelem_pgroup // (sol_chief_abelem _ chiefHbar) ?mmax_sol. rewrite /= trivH0 -(isog_abelem (quotient1_isog _)) in abelHbar. have:= Ptype_core_coherence maxM MtypeP Mnot5; rewrite trivH0. set C := _ MtypeP; have sCU: C \subset U by rewrite [C]unlock subsetIl. by rewrite (derG1P (abelianS sCU cUU)) [(1 <*> 1)%G]join1G. Qed. End Ten.
State Before: a b : ℝ n : ℕ ⊢ (∫ (x : ℝ) in a..b, cos x) = sin b - sin a State After: case hderiv a b : ℝ n : ℕ ⊢ deriv sin = fun x => cos x case hdiff a b : ℝ n : ℕ ⊢ ∀ (x : ℝ), x ∈ [[a, b]] → DifferentiableAt ℝ sin x case hcont a b : ℝ n : ℕ ⊢ ContinuousOn (fun x => cos x) [[a, b]] Tactic: rw [integral_deriv_eq_sub'] State Before: case hderiv a b : ℝ n : ℕ ⊢ deriv sin = fun x => cos x State After: no goals Tactic: norm_num State Before: case hdiff a b : ℝ n : ℕ ⊢ ∀ (x : ℝ), x ∈ [[a, b]] → DifferentiableAt ℝ sin x State After: no goals Tactic: simp only [differentiableAt_sin, implies_true] State Before: case hcont a b : ℝ n : ℕ ⊢ ContinuousOn (fun x => cos x) [[a, b]] State After: no goals Tactic: exact continuousOn_cos
lemma smallomegaI_filterlim_at_infinity: assumes lim: "filterlim (\<lambda>x. f x / g x) at_infinity F" shows "f \<in> \<omega>[F](g)"
If $a \leq b$ and $x \leq y$, and $a, b, x, y$ are all nonnegative, then $a x \leq b y$.
lemma contour_integral_eq: "(\<And>x. x \<in> path_image p \<Longrightarrow> f x = g x) \<Longrightarrow> contour_integral p f = contour_integral p g"
Require Import init. Require Import order_minmax. Require Import linear_extend. Require Export geometric_construct. Require Import geometric_grade. Require Import geometric_exterior_isomorphism. Require Import geometric_decomposition. Require Import exterior_involutions. Require Import geometric_involutions_grade. Require Import exterior_grade. (* begin hide *) Section GeometricOuter. (* end hide *) Context {F : CRingObj} {V : ModuleObj F}. (* begin hide *) Let UP := cring_plus F. Let UZ := cring_zero F. Let UN := cring_neg F. Let UPC := cring_plus_comm F. Let UPZ := cring_plus_lid F. Let UPN := cring_plus_linv F. Let UM := cring_mult F. Let UO := cring_one F. Let UMC := cring_mult_comm F. Existing Instances UP UZ UN UPC UPZ UPN UM UO UMC. Let VP := module_plus V. Let VS := module_scalar V. Existing Instances VP VS. (* end hide *) Context (B : set_type bilinear_form). (* begin hide *) Let GP := geo_plus B. Let GZ := geo_zero B. Let GN := geo_neg B. Let GPA := geo_plus_assoc B. Let GPC := geo_plus_comm B. Let GPZ := geo_plus_lid B. Let GPN := geo_plus_linv B. Let GM := geo_mult B. Let GO := geo_one B. Let GL := geo_ldist B. Let GR := geo_rdist B. Let GMA := geo_mult_assoc B. Let GML := geo_mult_lid B. Let GMR := geo_mult_rid B. Let GS := geo_scalar B. Let GSO := geo_scalar_id B. Let GSL := geo_scalar_ldist B. Let GSR := geo_scalar_rdist B. Let GSC := geo_scalar_comp B. Let GSML := geo_scalar_lmult B. Let GSMR := geo_scalar_rmult B. Let GG := geo_grade B. Existing Instances GP GZ GN GPA GPC GPZ GPN GM GO GL GR GMA GML GMR GS GSO GSL GSR GSC GSML GSMR GG. Local Notation "'φ'" := (vector_to_geo B). Local Notation "'σ'" := (scalar_to_geo B). Local Notation "'E'" := (geo_to_ext B). Local Notation "'G'" := (ext_to_geo B). Let EP := ext_plus V. Let EZ := ext_zero V. Let EN := ext_neg V. Let EPA := ext_plus_assoc V. Let EPC := ext_plus_comm V. Let EPZ := ext_plus_lid V. Let EPN := ext_plus_linv V. Let EM := ext_mult V. Let EO := ext_one V. Let EL := ext_ldist V. Let ER := ext_rdist V. Let EML := ext_mult_lid V. Let EMR := ext_mult_rid V. Let EMA := ext_mult_assoc V. Let ES := ext_scalar V. Let ESO := ext_scalar_id V. Let ESL := ext_scalar_ldist V. Let ESR := ext_scalar_rdist V. Let ESML := ext_scalar_lmult V. Let ESMR := ext_scalar_rmult V. Existing Instances EP EZ EN EPA EPC EPZ EPN EM EO EL ER EML EMR EMA ES ESO ESL ESR ESML ESMR. Local Open Scope geo_scope. Local Open Scope nat_scope. (* end hide *) Definition geo_outer_base i j (a b : geo B) (ai : of_grade i a) (bj : of_grade j b) := grade_project (a * b) (i + j) : geo B. Lemma geo_outer_ldist_base : bilinear_extend_ldist_base geo_outer_base. Proof. intros u v w i j ui vj wj. unfold geo_outer_base. rewrite ldist. apply grade_project_plus. Qed. Lemma geo_outer_rdist_base : bilinear_extend_rdist_base geo_outer_base. Proof. intros u v w i j ui vi wj. unfold geo_outer_base. rewrite rdist. apply grade_project_plus. Qed. Lemma geo_outer_lscalar_base : bilinear_extend_lscalar_base geo_outer_base. Proof. intros a u v i j ui vj. unfold geo_outer_base. rewrite scalar_lmult. apply grade_project_scalar. Qed. Lemma geo_outer_rscalar_base : bilinear_extend_rscalar_base geo_outer_base. Proof. intros a u v i j ui vj. unfold geo_outer_base. rewrite scalar_rmult. apply grade_project_scalar. Qed. Definition geo_outer := bilinear_extend geo_outer_base : geo B → geo B → geo B. (** Note: Because Coq already uses ∧ for logical and, this symbol is actually \bigwedge, not \wedge! *) (* begin show *) Local Infix "⋀" := geo_outer (at level 34, left associativity). (* end show *) Theorem outer_ldist : ∀ a b c, a ⋀ (b + c) = a ⋀ b + a ⋀ c. Proof. apply bilinear_extend_ldist. - exact geo_outer_ldist_base. - exact geo_outer_rscalar_base. Qed. Theorem outer_rdist : ∀ a b c, (a + b) ⋀ c = a ⋀ c + b ⋀ c. Proof. apply bilinear_extend_rdist. - exact geo_outer_rdist_base. - exact geo_outer_lscalar_base. Qed. Theorem outer_lscalar : ∀ a u v, (a · u) ⋀ v = a · (u ⋀ v). Proof. apply bilinear_extend_lscalar. - apply geo_outer_rdist_base. - apply geo_outer_lscalar_base. Qed. Theorem outer_rscalar : ∀ a u v, u ⋀ (a · v) = a · (u ⋀ v). Proof. apply bilinear_extend_rscalar. - apply geo_outer_ldist_base. - apply geo_outer_rscalar_base. Qed. Theorem outer_lanni : ∀ a, 0 ⋀ a = 0. Proof. intros a. rewrite <- (scalar_lanni 0) at 1. rewrite outer_lscalar. apply scalar_lanni. Qed. Theorem outer_ranni : ∀ a, a ⋀ 0 = 0. Proof. intros a. rewrite <- (scalar_lanni 0) at 1. rewrite outer_rscalar. apply scalar_lanni. Qed. Lemma outer_homo : ∀ i j u v (ui : of_grade i u) (vj : of_grade j v), u ⋀ v = geo_outer_base i j u v ui vj. Proof. intros i j u v ui vj. unfold geo_outer. apply bilinear_extend_homo. - exact geo_outer_ldist_base. - exact geo_outer_rdist_base. - exact geo_outer_lscalar_base. - exact geo_outer_rscalar_base. Qed. (* begin hide *) Let EG := exterior_grade V. Let EGA := exterior_grade_mult V. Existing Instances EG EGA. (* end hide *) Theorem outer_exterior : ∀ a b, a ⋀ b = G (E a * E b). Proof. intros a' b'. rewrite <- (ext_to_geo_to_ext B a') at 1. rewrite <- (ext_to_geo_to_ext B b') at 1. remember (E a') as a. remember (E b') as b. clear a' b' Heqa Heqb. induction b as [|b b' n bn b'n IHb] using grade_induction. { rewrite mult_ranni. rewrite ext_to_geo_zero. apply outer_ranni. } rewrite ldist. do 2 rewrite ext_to_geo_plus. rewrite outer_ldist. rewrite IHb; clear IHb. apply rplus; clear b' b'n. pose proof (ext_sum V a) as [l l_eq]; subst a. induction l as [|[α al] l] using ulist_induction. { rewrite ulist_image_end, ulist_sum_end. rewrite mult_lanni. rewrite ext_to_geo_zero. apply outer_lanni. } rewrite ulist_image_add, ulist_sum_add; cbn. rewrite rdist. do 2 rewrite ext_to_geo_plus. rewrite outer_rdist. rewrite IHl; clear IHl. apply rplus; clear l. rewrite scalar_lmult. do 2 rewrite ext_to_geo_scalar. rewrite outer_lscalar. apply lscalar; clear α. assert (of_grade (H9 := GG) (list_size al) (G (list_prod (list_image (vector_to_ext V) al)))) as al_grade. { exists (list_prod (list_image (vector_to_ext V) al)). split; [>|reflexivity]. apply ext_list_grade. } assert (of_grade (H9 := GG) n (G b)) as b_grade. { exists b. split; [>exact bn|reflexivity]. } rewrite (outer_homo _ _ _ _ al_grade b_grade). unfold geo_outer_base. clear al_grade. induction al as [|v al]. { cbn. rewrite ext_to_geo_one. do 2 rewrite mult_lid. rewrite plus_lid. apply grade_project_of_grade. exact b_grade. } do 2 rewrite list_image_add; cbn. do 2 rewrite list_prod_add. remember (list_prod (list_image (vector_to_ext V) al)) as a. rewrite list_size_add. remember (list_size al) as m. assert (of_grade m a) as a_grade. { rewrite Heqa, Heqm. apply ext_list_grade. } clear al Heqa Heqm. rewrite ext_to_geo_add. rewrite rdist. rewrite grade_project_plus. assert (of_grade (H9 := GG) m (G a)) as a_grade'. { exists a. split; [>exact a_grade|reflexivity]. } rewrite mult_lneg. rewrite grade_project_neg. assert (grade_project (geo_mult_inner B v (G a) * G b) (nat_suc m + n) = 0) as eq. { nat_destruct m. - apply ext_grade_zero_scalar in a_grade as [α α_eq]; subst a. rewrite ext_to_geo_of_scalar. rewrite geo_mult_inner_scalar. rewrite mult_lanni. apply grade_project_zero. - pose proof (geo_mult_inner_grade B v (G a) _ a_grade') as am. apply (geo_mult_project_bigger _ _ _ _ _ am b_grade). do 2 rewrite nat_plus_lsuc. apply (trans (nat_lt_suc _)). apply nat_lt_suc. } rewrite eq; clear eq. rewrite neg_zero, plus_lid. rewrite <- mult_assoc. rewrite <- (ext_to_geo_to_ext B (φ v * _)). rewrite geo_to_ext_add. rewrite ext_to_geo_plus. rewrite grade_project_plus. rewrite ext_to_geo_inner. rewrite ext_to_geo_to_ext. rewrite mult_inner_grade_add. assert (m + n < nat_suc (nat_suc m + n)) as ltq. { rewrite nat_plus_lsuc. apply (trans (nat_lt_suc _)). apply nat_lt_suc. } rewrite (geo_mult_project_bigger _ _ _ _ _ a_grade' b_grade _ ltq). rewrite geo_mult_inner_rzero. rewrite plus_lid. rewrite ext_to_geo_project. rewrite nat_plus_lsuc. rewrite exterior_grade_add. rewrite geo_to_ext_project. rewrite IHal. rewrite geo_to_ext_to_geo. rewrite mult_assoc. reflexivity. Qed. Theorem outer_assoc : ∀ a b c : geo B, a ⋀ (b ⋀ c) = (a ⋀ b) ⋀ c. Proof. intros a b c. do 4 rewrite outer_exterior. do 2 rewrite geo_to_ext_to_geo. rewrite mult_assoc. reflexivity. Qed. Theorem outer_lid : ∀ a : geo B, 1 ⋀ a = a. Proof. intros a. rewrite outer_exterior. rewrite geo_to_ext_one. rewrite mult_lid. apply ext_to_geo_to_ext. Qed. Theorem outer_rid : ∀ a : geo B, a ⋀ 1 = a. Proof. intros a. rewrite outer_exterior. rewrite geo_to_ext_one. rewrite mult_rid. apply ext_to_geo_to_ext. Qed. Theorem outer_alternating : ∀ v, φ v ⋀ φ v = 0. Proof. intros v. rewrite outer_exterior. rewrite geo_to_ext_vector. rewrite <- ext_alternating. apply ext_to_geo_zero. Qed. Theorem outer_anticomm : ∀ u v, φ u ⋀ φ v = -(φ v ⋀ φ u). Proof. intros u v. do 2 rewrite outer_exterior. do 2 rewrite geo_to_ext_vector. rewrite ext_anticomm. apply ext_to_geo_neg. Qed. Theorem outer_reverse : ∀ a b, (a ⋀ b)† = b† ⋀ a†. Proof. intros a b. do 2 rewrite outer_exterior. rewrite ext_to_geo_reverse. rewrite ext_reverse_mult. do 2 rewrite geo_to_ext_reverse. reflexivity. Qed. Theorem outer_involute : ∀ a b, (a ⋀ b)∗ = a∗ ⋀ b∗. Proof. intros a b. do 2 rewrite outer_exterior. rewrite ext_to_geo_involute. rewrite ext_involute_mult. do 2 rewrite geo_to_ext_involute. reflexivity. Qed. Theorem outer_involute_swap : ∀ a X, φ a ⋀ X = X∗ ⋀ φ a. Proof. intros a X. do 2 rewrite outer_exterior. rewrite geo_to_ext_vector. rewrite ext_involute_swap. rewrite geo_to_ext_involute. reflexivity. Qed. (* begin hide *) End GeometricOuter. (* end hide *) Infix "⋀" := (geo_outer _) (at level 34, left associativity) : geo_scope.
{-# LANGUAGE BangPatterns #-} module BBrotRender(showCells, render) where import Codec.Picture import Control.Monad import Data.Aeson(decode) import Data.Array.IO import Data.Array.Unboxed import qualified Data.ByteString.Lazy as BS import Data.Complex import Data.Maybe import Data.List import Data.Word(Word32, Word8) import System.Console.CmdArgs(whenNormal) import System.Exit import System.IO import Text.Read(readMaybe) import BBrotCompute import BBrotConf import BBrotSelection xmin, xmax, ymin, ymax :: Double xmin = realPart loCorner xmax = realPart hiCorner ymin = imagPart loCorner ymax = imagPart hiCorner xrange, yrange :: Double xrange = xmax - xmin yrange = ymax - ymin orbs :: Complex Double -> [Complex Double] orbs !(x0 :+ y0) = unfoldr f (x0, y0) where f :: (Double, Double) -> Maybe (Complex Double, (Double, Double)) f (!x, !y) = let !x2 = x * x !y2 = y * y !newx = (x2 - y2 + x0) !newy = (2 * x * y + y0) in if x2 + y2 > 4 then Nothing else Just (newx :+ newy, (newx, newy)) inWindow :: Complex Double -> Bool inWindow z = x >= xmin && x < xmax && y >= ymin && y < ymax where x = realPart z y = imagPart z norm :: (Double -> Double) -> Word32 -> Word32 -> Word32 -> Word8 norm curveFunc minV maxV cnt = v where t = fromIntegral (cnt - minV) / fromIntegral (maxV - minV) u = curveFunc $ min 1.0 (2.0 * t) v = floor $ u * 255 :: Word8 gray :: Word8 -> PixelRGB8 gray x = PixelRGB8 x x x reddish :: Word8 -> PixelRGB8 reddish x = PixelRGB8 r g 0 where xr = min x 127 xg = min (x - xr) 127 r = 2 * xr g = 2 * xg flames :: Word8 -> PixelRGB8 flames x = PixelRGB8 r g b where xr = min 120 x xg = min 120 (x - xr) xb = min 15 (x - xr - xg) r = xr * 2 + xr `div` 10 g = xg * 2 + xg `div` 10 b = xb * 17 toPixel :: (Double -> Double) -> Word32 -> Word32 -> (Word8 -> PixelRGB8) -> Word32 -> PixelRGB8 toPixel curveFunc smallest biggest scheme = scheme . norm curveFunc smallest biggest plotPix :: IOUArray (Int, Int) Word32 -> (Int, Int) -> IO () plotPix !img (!x, !y) = do v <- readArray img (x, y) writeArray img (x, y) (v + 1) rel :: Double -> Double -> Double -> Double rel !lo !realrange !v = (v - lo) / realrange toPix :: Int -> Double -> Double -> Double -> Int toPix !pixrange !realmin !realrange !a = floor $ (fromIntegral pixrange) * (rel realmin realrange a) toImgCoords :: Int -> Int -> Complex Double -> (Int, Int) toImgCoords !xres !yres !z = (toPix xres xmin xrange $ realPart z, toPix yres ymin yrange $ imagPart z) toPlaneCoords :: Int -> Int -> Int -> Int -> Complex Double toPlaneCoords !xres !yres !i !j = x :+ y where x = xmin + xrange * fromIntegral i / fromIntegral xres y = ymin + yrange * fromIntegral j / fromIntegral yres emptyPS :: PointSelection emptyPS = PointSelection { pointList = [] , commandLine = Nothing , randGen = Nothing , timeStamp = Nothing } loadPointsJson :: String -> IO (Maybe PointSelection) loadPointsJson filepath = do contents <- BS.readFile filepath return (decode contents :: Maybe PointSelection) loadPointsComplex :: String -> IO (Maybe PointSelection) loadPointsComplex filepath = do contents <- readFile filepath let zs = concatMap g $ lines contents where g = maybeToList . readMaybe :: String -> [Complex Double] ps = map f zs where f (x :+ y) = BBPoint x y 1 return $ if null ps then Nothing else Just (emptyPS { pointList = ps }) render :: BBrotConf -> IO () render conf = do -- Load points of interest, render their orbits into a PNG image. whenNormal $ putStrLn $ "Loading cache " ++ icachepath conf ++ " ..." maybePS <- if isComplex conf then loadPointsComplex $ icachepath conf else loadPointsJson $ icachepath conf when (isNothing maybePS) $ do hPutStrLn stderr $ "failed to parse " ++ icachepath conf unless (isComplex conf) $ hPutStrLn stderr "try with flag -z" exitFailure let psel = pointList $ fromMaybe emptyPS maybePS selected = map (\(BBPoint x y _) -> x :+ y) psel orbits = concatMap orbs selected result = filter inWindow $ if dontRender conf then selected else orbits whenNormal $ putStrLn $ "selected points: " ++ show (length selected) let xres = xpixels conf yres = ypixels conf coords = map (toImgCoords xres yres) result img <- newArray ((0, 0), (xres - 1, yres - 1)) 0 :: IO (IOUArray (Int, Int) Word32) mapM_ (plotPix img) coords whenNormal $ putStrLn "done plotting" img2 <- freeze img :: IO (UArray (Int, Int) Word32) let values = elems img2 !v0 = head values (!total, !smallest, !biggest) = foldl' f (0, v0, v0) values where f (!x, !y, !z) !a = (a + x, min a y, max a z) whenNormal $ do putStrLn $ "img points: " ++ show total putStrLn $ "value range: " ++ show smallest ++ "-" ++ show biggest let outfile = fromMaybe defPath (imagepath conf) where defPath = icachepath conf ++ ".png" whenNormal $ putStrLn $ "Writing " ++ outfile ++ " ..." let colorScheme = case palette conf of Flames -> flames Gray -> gray Reddish -> reddish curveFunc = case curve conf of Line -> id Root -> sqrt Square -> (**2) pixFunc = toPixel curveFunc smallest biggest colorScheme renderer i j = pixFunc $ img2!(i,j) writePng outfile $ generateImage (flip $ renderer) yres xres showCells :: BBrotConf -> IO () showCells conf = do -- Compute selection of cells (squares in the complex plane) that -- are close to the edge of the Mandelbrot set, and render them as -- animated GIF. let step = gridStep conf bailout = maxIters conf xres = 1000 yres = 1000 !cells = selectCells step bailout whenNormal $ putStrLn $ "cell count: " ++ show (length cells) let red = PixelRGB8 255 0 0 black = PixelRGB8 0 0 0 grey = PixelRGB8 64 64 64 whenNormal $ putStrLn "rendering cells" cellMap <- newArray ((0, 0), (xres - 1, yres - 1)) False :: IO (IOUArray (Int, Int) Bool) forM_ cells $ \(x, y) -> do let cornerA = (x - step / 2) :+ (y - step / 2) cornerB = (x + step / 2) :+ (y + step / 2) (imin, jmin) = toImgCoords xres yres cornerA (imax, jmax) = toImgCoords xres yres cornerB coords = [ (i, j) | i <- [imin..imax], j <- [jmin..jmax], i >= 0 && i < xres, j >= 0 && j < yres ] forM_ coords $ \(i, j) -> writeArray cellMap (i, j) True whenNormal $ putStrLn "rendering mandel" let imgMandel = generateImage (flip $ mandelRenderer) yres xres inMandelbrotSet z = inSet 0 bailout z mandelRenderer i j = if inMandelbrotSet $ toPlaneCoords xres yres i j then grey else black cellMapPure <- freeze cellMap :: IO (UArray (Int, Int) Bool) let imgCells = generateImage (flip $ cellRenderer) yres xres cellRenderer i j = if cellMapPure!(i,j) then red else mandelRenderer i j whenNormal $ putStrLn $ "writing " ++ animpath conf ++ " ..." case writeGifAnimation (animpath conf) 100 LoopingForever [imgMandel, imgCells] of Left err -> putStrLn $ "error generating gif animation: " ++ err Right action -> action
State Before: α✝ : Sort u β✝ : Sort v γ✝ : Sort w α : Type u_1 β : Type u_2 γ : Type u_3 s : Set α t : Set β u : Set γ ⊢ ↑(prodAssoc α β γ) ⁻¹' s ×ˢ t ×ˢ u = (s ×ˢ t) ×ˢ u State After: case h α✝ : Sort u β✝ : Sort v γ✝ : Sort w α : Type u_1 β : Type u_2 γ : Type u_3 s : Set α t : Set β u : Set γ x✝ : (α × β) × γ ⊢ x✝ ∈ ↑(prodAssoc α β γ) ⁻¹' s ×ˢ t ×ˢ u ↔ x✝ ∈ (s ×ˢ t) ×ˢ u Tactic: ext State Before: case h α✝ : Sort u β✝ : Sort v γ✝ : Sort w α : Type u_1 β : Type u_2 γ : Type u_3 s : Set α t : Set β u : Set γ x✝ : (α × β) × γ ⊢ x✝ ∈ ↑(prodAssoc α β γ) ⁻¹' s ×ˢ t ×ˢ u ↔ x✝ ∈ (s ×ˢ t) ×ˢ u State After: no goals Tactic: simp [and_assoc]
theory SetComprehension imports GZF_Base begin context GZF begin text \<open>Mapping a \<lambda>-function over a set\<close> definition RepFun :: "['a, 'a \<Rightarrow> 'a] \<Rightarrow> 'a" where "RepFun x F \<equiv> Repl x (\<lambda>a b. b = F a)" text \<open>Filtering a set using a predicate\<close> definition Collect :: "'a \<Rightarrow> ['a \<Rightarrow> bool] \<Rightarrow> 'a" where "Collect x P \<equiv> Repl x (\<lambda>a b. a = b \<and> P a)" end syntax "_RepFun" :: "['a, pttrn, 'a] => 'a" (\<open>(1{_ |/ _ \<in> _})\<close> [51,0,51]) "_Collect" :: "[pttrn, 'a, bool ] \<Rightarrow> 'a" (\<open>(1{_ \<in> _ |/ _})\<close>) translations "{c | b\<in>x}" \<rightleftharpoons> "CONST RepFun x (\<lambda>b. c)" "{b\<in>x | P}" \<rightleftharpoons> "CONST Collect x (\<lambda>b. P)" context GZF begin (*We should only have to know that F returns a SetMem when it's applied to a member of a given set, but this would require making ReplFun take an extra parameter for the set.*) (*Instead, it might be better to have a version of RepFun that restricts F to the domain of the set, which adds the proof obligation \<forall>a \<in> x. F a : SetMem to lemmas*) lemma any_replpred : "x : Set \<Longrightarrow> (\<lambda>a b. b = F a) : ReplPred x" by (rule replpredI, unfold tuniq_def Uniq_def, auto) lemma RepFun_typ_set : "RepFun : (\<Pi> x : Set. Any \<rightarrow> Set)" unfolding RepFun_def by (rule depfunI, rule funI, use any_replpred repl_set in auto) definition mem_fun_ty :: \<open>'a \<Rightarrow> ('a \<Rightarrow> bool) \<Rightarrow> (('a \<Rightarrow> 'a) \<Rightarrow> bool)\<close> (infixr \<open>\<leadsto>\<close> 50) where [typdef] : \<open>x \<leadsto> P \<equiv> MemOf x \<rightarrow> P\<close> lemma mem_funI : assumes "\<And>b. b \<in> x \<Longrightarrow> F b : P" shows "F : x \<leadsto> P" using assms unfolding mem_fun_ty_def MemOf_def fun_ty_def has_ty_def by auto lemma mem_funE : assumes "F : x \<leadsto> P" "b \<in> x" shows "F b : P" using assms unfolding mem_fun_ty_def MemOf_def fun_ty_def has_ty_def by auto lemma replfun_funpred : assumes x : "x : Set" and F : "F : x \<leadsto> \<beta>" shows "(\<lambda>a b. b = F a) : ReplFunPred x \<beta>" unfolding ReplFunPred_def by (rule intI, rule any_replpred[OF x], rule bpredI, use memofD mem_funE[OF F] in auto) (* lemma replfunI : assumes x : "x : Set" and setfun:"\<And>a. a \<in> x \<Longrightarrow> F a : SetMem" and funty:"\<And>a. a \<in> x \<Longrightarrow> a : \<alpha> \<Longrightarrow> F a : \<beta>" shows "F : ReplFun x \<alpha> \<beta>" unfolding ReplFun_def by (rule intI, rule mem_funI[OF setfun], auto, rule funI, drule intE, insert funty, unfold MemOf_def has_ty_def, auto) *) (* corollary setmap_replpred : "x : Set \<Longrightarrow> F : ReplFun x \<alpha> \<beta> \<Longrightarrow> (\<lambda>a b. b = F a) : ReplPred x" by (rule subtypE[OF funpred_replpred replfun_funpred]) corollary setmap_binpred : "x : Set \<Longrightarrow> F : ReplFun x \<alpha> \<beta> \<Longrightarrow> (\<lambda>a b. b = F a) : BinPred (\<alpha> \<bar> MemOf x) \<beta>" by (rule subtypE[OF funpred_binpred replfun_funpred]) *) lemma RepFun_typ : "RepFun : (\<Pi> x : Set. (x \<leadsto> \<beta>) \<rightarrow> SetOf \<beta>)" proof (rule depfunI, rule funI) fix x :: 'a and F :: "'a \<Rightarrow> 'a" assume "x : Set" "F : x \<leadsto> \<beta>" hence "(\<lambda>a b. b = F a) : ReplFunPred x \<beta>" using replfun_funpred by auto thus "RepFun x F : SetOf \<beta>" unfolding RepFun_def by (rule funE[OF depfunE[OF Repl_typ2 \<open>x : Set\<close>]]) qed lemmas repfun_setof = funE[OF depfunE[OF RepFun_typ]] lemmas repfun_set = funE[OF depfunE[OF RepFun_typ_set] anyI] lemma repfunI : assumes x : "x : Set" and b : "b \<in> x" and Fb : "F b : SetMem" shows "F b \<in> {F b | b \<in> x}" unfolding RepFun_def by (rule replaceI[OF x any_replpred[OF x]], use b Fb in auto) lemma repfun_eqI : assumes "x : Set" shows "\<lbrakk> c : SetMem ; c = F b ; b \<in> x \<rbrakk> \<Longrightarrow> c \<in> {F a | a \<in> x}" using repfunI[OF assms] by auto lemma repfunE : assumes x : "x : Set" and b : "b \<in> RepFun x F" obtains a where "a \<in> x" "b : SetMem" "b = F a" using b unfolding RepFun_def using replaceE[OF x any_replpred[OF x], of b F] by blast lemma repfun_cong : assumes x : "x : Set" and y : "y : Set" shows "\<lbrakk> x = y ; \<And>a. a \<in> y \<Longrightarrow> F a = G a \<rbrakk> \<Longrightarrow> RepFun x F = RepFun y G" unfolding RepFun_def using replace_cong[OF x any_replpred[OF x] y any_replpred[OF y]] by simp lemma repfun_iff : assumes "x : Set" shows "b \<in> RepFun x F \<longleftrightarrow> (\<exists>c \<in> x. b = F c) \<and> b : SetMem" using repfun_eqI[OF assms] repfunE[OF assms] by blast lemma repfun_union : assumes x : "x : Set" and F : "F : x \<leadsto> Set" shows "b \<in> \<Union> {F a | a \<in> x} \<longleftrightarrow> (\<exists>a \<in> x. b \<in> F a)" proof - have R:"RepFun x F : SetOf Set" by (rule funE[OF depfunE[OF RepFun_typ x] F]) show ?thesis unfolding union_iff[OF R] bex_def rex_def repfun_iff[OF \<open>x : Set\<close>] by (auto, use set_setmem[OF mem_funE[OF F]] in auto ) qed lemma repfun_union_subset : assumes x : "x : Set" and y : "y : Set" and F : "F : x \<leadsto> Set" shows "a \<in> x \<Longrightarrow> y \<subseteq> F a \<Longrightarrow> y \<subseteq> \<Union> {F a | a \<in> x}" proof - assume "a \<in> x" "y \<subseteq> F a" hence "F a : Set" using mem_funE[OF F] by auto have "\<Union> RepFun x F : Set" using union_set[OF repfun_setof[OF x F]] . from \<open>a \<in> x\<close> \<open>y \<subseteq> F a\<close> show ?thesis unfolding repfun_union[OF x F] subset_iff by auto qed subsection \<open>Rules for subset comprehension\<close> lemma collect_replpred : "x : Set \<Longrightarrow> (\<lambda>a b. a = b \<and> P a) : ReplPred x" using setmemI unfolding ReplPred_def tuniq_def Uniq_def has_ty_def by auto (* rule, rule, rule, drule setmemI[OF \<open>x : Set\<close>], unfold has_ty_def, auto *) lemma Collect_typ : "Collect : Set \<rightarrow> Any \<rightarrow> Set" proof (rule funI)+ fix x and P :: "'a => bool" assume "x : Set" moreover hence "(\<lambda>a b. a = b \<and> P a) : ReplPred x" by (rule collect_replpred) ultimately show "Collect x P : Set" unfolding Collect_def by (rule repl_set) qed lemmas collect_set = funE[OF funE[OF Collect_typ] anyI] lemma collect_iff : assumes x : "x : Set" shows "a \<in> { a \<in> x | P a} \<longleftrightarrow> a \<in> x \<and> P a" unfolding Collect_def replace_iff[OF \<open>x : Set\<close> collect_replpred[OF \<open>x : Set\<close>]] using setmemI[OF x] by auto lemma collectI : assumes "x : Set" shows "\<lbrakk> a \<in> x ; P a \<rbrakk> \<Longrightarrow> a \<in> { a \<in> x | P a }" using collect_iff[OF \<open>x : Set\<close>] by auto lemma collectE : assumes "x : Set" shows "\<lbrakk> a \<in> { a \<in> x | P a } ; \<lbrakk> a \<in> x ; P a \<rbrakk> \<Longrightarrow> R \<rbrakk> \<Longrightarrow> R" using collect_iff[OF assms] by auto lemma collectD1 : assumes "x : Set" shows "a \<in> { a\<in>x | P a } \<Longrightarrow> a \<in> x" using collectE[OF assms] by auto lemma collectD2 : assumes "x : Set" shows "a \<in> { a\<in>x | P a } \<Longrightarrow> P a" using collectE[OF assms] by auto lemma collect_subset : assumes "x : Set" shows "{ a\<in>x | P a } \<subseteq> x" proof (rule subsetI) fix a assume "a \<in> Collect x P" thus "a \<in> x" by (rule collectD1[OF \<open>x : Set\<close>]) qed lemma collect_typ_subset : assumes "x : SetOf \<alpha>" shows "{ a\<in>x | P a } : SetOf \<alpha>" proof (rule setof_subset[OF _ \<open>x : SetOf \<alpha>\<close>]) have "x : Set" by (rule setof_set[OF \<open>x : SetOf \<alpha>\<close>]) thus "Collect x P : Set" and "Collect x P \<subseteq> x" by (rule collect_set, rule collect_subset) qed lemma Collect_cong [cong]: assumes "x : Set" "y : Set" shows [cong]: "\<lbrakk> x = y; \<And>a. a \<in> y \<Longrightarrow> P a \<longleftrightarrow> Q a \<rbrakk> \<Longrightarrow> { a\<in>x | P a } = { a\<in>y | Q a }" unfolding Collect_def using collect_replpred[OF \<open>x : Set\<close>, of P] collect_replpred[of _ Q] assms by auto end end
import numpy as np import numpy.testing as npt import pandas as pd from unittest import TestCase, main from nimble import Events class EvTestCase(TestCase): @staticmethod def assertStartStops(events, vstarts, vstops): npt.assert_array_equal(events._starts, vstarts) npt.assert_array_equal(events._stops, vstops) class TestDebouncing(EvTestCase): def setUp(self): condarr = np.array([0, 0, 1, 1, 0, 0, 0, 1, 1, 1, 0, 1]) self.cond = condarr > 0 def test_adeb(self): vstarts = np.array([2, 7]) vstops = np.array([4, 10]) events = Events(self.cond, period=1, adeb=2).find() self.assertStartStops(events, vstarts, vstops) def test_ddeb(self): vstarts = np.array([2, 7]) vstops = np.array([4, 12]) events = Events(self.cond, period=1, ddeb=2).find() self.assertStartStops(events, vstarts, vstops) def test_adeb_ddeb(self): vstarts = np.array([2]) vstops = np.array([12]) events = Events(self.cond, period=1, adeb=2, ddeb=3.1).find() self.assertStartStops(events, vstarts, vstops) def test_nonint_deb(self): vstarts = np.array([2, 7, 11]) vstops = np.array([4, 10, 12]) events = Events(self.cond, period=1, adeb=float(0.00000001), ddeb=float(0.99999999)).find() self.assertStartStops(events, vstarts, vstops) def test_period_100ms(self): vstarts = np.array([2, 7]) vstops = np.array([4, 12]) events = Events(self.cond, period=0.1, adeb=0.15, ddeb=0.2).find() self.assertStartStops(events, vstarts, vstops) def test_period_120ms(self): vstarts = np.array([2, 7]) vstops = np.array([4, 12]) events = Events(self.cond, period=0.12, adeb=0.15, ddeb=0.2).find() self.assertStartStops(events, vstarts, vstops) def test_no_events_found(self): vstarts = np.array([]) vstops = np.array([]) x = np.array([0, 0, 0, 0, 0, 0, 0, 0]) events = Events(x > 0, period=1, adeb=0.15, ddeb=0.2).find() self.assertStartStops(events, vstarts, vstops) def test_event_always_active(self): vstarts = np.array([0]) vstops = np.array([8]) x = np.array([0, 0, 0, 0, 0, 0, 0, 0]) events = Events(x == 0, period=1, adeb=0.15, ddeb=0.2).find() self.assertStartStops(events, vstarts, vstops) def test_end_conditions(self): vstarts = np.array([0, 6]) vstops = np.array([2, 8]) x = np.array([1, 1, 0, 0, 0, 0, 1, 1]) events = Events(x == 1, period=1, adeb=2, ddeb=2).find() self.assertStartStops(events, vstarts, vstops) class TestDurationFilter(EvTestCase): def setUp(self): condarr = np.array([0, 0, 1, 1, 0, 0, 0, 1, 1, 1, 0, 1]) self.cond = condarr > 0 def test_mindur(self): vstarts = np.array([2, 7]) vstops = np.array([4, 10]) events = Events(self.cond, period=1, mindur=2).find() self.assertStartStops(events, vstarts, vstops) def test_maxdur(self): vstarts = np.array([2, 11]) vstops = np.array([4, 12]) events = Events(self.cond, period=1, maxdur=2).find() self.assertStartStops(events, vstarts, vstops) def test_mindur_maxdur(self): vstarts = np.array([2]) vstops = np.array([4]) events = Events(self.cond, period=1, mindur=2, maxdur=2.5).find() self.assertStartStops(events, vstarts, vstops) def test_nonint_durs(self): vstarts = np.array([2]) vstops = np.array([4]) events = Events(self.cond, period=1, mindur=float(1.00000001), maxdur=float(2.99999999)).find() self.assertStartStops(events, vstarts, vstops) def test_period_100ms(self): vstarts = np.array([2]) vstops = np.array([4]) events = Events(self.cond, period=0.1, mindur=0.15, maxdur=0.2).find() self.assertStartStops(events, vstarts, vstops) def test_period_120ms(self): vstarts = np.array([2]) vstops = np.array([4]) events = Events(self.cond, period=0.12, mindur=0.15, maxdur=0.35).find() self.assertStartStops(events, vstarts, vstops) def test_no_events_found(self): vstarts = np.array([]) vstops = np.array([]) x = np.array([0, 0, 0, 0, 0, 0, 0, 0]) events = Events(x > 0, period=1, mindur=0.15, maxdur=0.2).find() self.assertStartStops(events, vstarts, vstops) def test_event_always_active(self): vstarts = np.array([0]) vstops = np.array([8]) x = np.array([0, 0, 0, 0, 0, 0, 0, 0]) events = Events(x == 0, period=1, mindur=0.15, maxdur=20).find() self.assertStartStops(events, vstarts, vstops) def test_end_conditions(self): vstarts = np.array([0, 6]) vstops = np.array([2, 8]) x = np.array([1, 1, 0, 0, 0, 0, 1, 1]) events = Events(x == 1, period=1, mindur=2, maxdur=2).find() self.assertStartStops(events, vstarts, vstops) class TestEventOffset(EvTestCase): def setUp(self): condarr = np.array([0, 0, 1, 1, 0, 0, 0, 1, 1, 1, 0, 1]) self.cond = condarr > 0 def test_startoffset(self): vstarts = np.array([1, 6, 10]) vstops = np.array([4, 10, 12]) events = Events(self.cond, period=1, startoffset=-1).find() self.assertStartStops(events, vstarts, vstops) def test_stopoffset(self): vstarts = np.array([2, 7, 11]) vstops = np.array([5, 11, 12]) events = Events(self.cond, period=1, stopoffset=1).find() self.assertStartStops(events, vstarts, vstops) def test_startoffset_stopoffset(self): vstarts = np.array([1, 6, 10]) vstops = np.array([5, 11, 12]) events = Events(self.cond, period=1, startoffset=-1, stopoffset=1).find() self.assertStartStops(events, vstarts, vstops) def test_period_100ms(self): vstarts = np.array([1, 6, 10]) vstops = np.array([5, 11, 12]) events = Events(self.cond, period=0.1, startoffset=-0.1, stopoffset=0.1).find() self.assertStartStops(events, vstarts, vstops) def test_period_120ms(self): vstarts = np.array([1, 6, 10]) vstops = np.array([5, 11, 12]) events = Events(self.cond, period=0.12, startoffset=-0.1, stopoffset=0.1).find() self.assertStartStops(events, vstarts, vstops) def test_no_events_found(self): vstarts = np.array([]) vstops = np.array([]) x = np.array([0, 0, 0, 0, 0, 0, 0, 0]) events = Events(x > 0, period=1, startoffset=-1, stopoffset=1).find() self.assertStartStops(events, vstarts, vstops) def test_event_always_active(self): vstarts = np.array([0]) vstops = np.array([8]) x = np.array([0, 0, 0, 0, 0, 0, 0, 0]) events = Events(x == 0, period=1, startoffset=-1, stopoffset=1).find() self.assertStartStops(events, vstarts, vstops) def test_end_conditions(self): vstarts = np.array([0, 5]) vstops = np.array([3, 8]) x = np.array([1, 1, 0, 0, 0, 0, 1, 1]) events = Events(x == 1, period=1, startoffset=-1, stopoffset=1).find() self.assertStartStops(events, vstarts, vstops) class TestAsArrayMethod(TestCase): def setUp(self): conditional_array = np.array([0, 1, 1, 1, 0, 0, 0, 1, 1, 0, 1, 1]) condition = (conditional_array > 0) self.events = Events(condition, period=1).find() def test_default_parameters(self): """Test as_array() with default settings""" validation_array = np.array([0, 1, 1, 1, 0, 0, 0, 1, 1, 0, 1, 1]) npt.assert_array_equal(validation_array, self.events.as_array()) def test_as_array_false_value(self): """Test as_array() with low value""" validation_array = np.array([-1, 1, 1, 1, -1, -1, -1, 1, 1, -1, 1, 1]) npt.assert_array_equal(validation_array, self.events.as_array( false_values=-1)) def test_as_array_true_value(self): """Test as_array() with high value""" validation_array = np.array([0, 5, 5, 5, 0, 0, 0, 5, 5, 0, 5, 5]) npt.assert_array_equal(validation_array, self.events.as_array( true_values=5)) def test_as_array_false_and_true_value(self): """Test as_array() with low and high values""" validation_array = np.array([-1, 5, 5, 5, -1, -1, -1, 5, 5, -1, 5, 5]) npt.assert_array_equal(validation_array, self.events.as_array( false_values=-1, true_values=5)) def test_type(self): typ = type(self.events.as_array(false_values=-1, true_values=5)) self.assertEqual(typ, np.ndarray) class TestAsSeries(TestCase): def setUp(self): conditional_array = np.array([0, 1, 1, 1, 0, 0, 0, 1, 1, 0, 1, 1]) condition = (conditional_array > 0) self.events = Events(condition, period=1).find() def test_default_parameters(self): """Test as_array() with default settings""" validation_series = pd.Series([0, 1, 1, 1, 0, 0, 0, 1, 1, 0, 1, 1]) npt.assert_array_equal(validation_series, self.events.as_series()) def test_as_array_false_value(self): """Test as_array() with low value""" validation_series = np.array([-1, 1, 1, 1, -1, -1, -1, 1, 1, -1, 1, 1]) npt.assert_array_equal(validation_series, self.events.as_series( false_values=-1)) def test_as_array_true_value(self): """Test as_array() with high value""" validation_series = np.array([0, 5, 5, 5, 0, 0, 0, 5, 5, 0, 5, 5]) npt.assert_array_equal(validation_series, self.events.as_series( true_values=5)) def test_as_array_false_and_true_value(self): """Test as_array() with low and high values""" validation_series = np.array([-1, 5, 5, 5, -1, -1, -1, 5, 5, -1, 5, 5]) npt.assert_array_equal(validation_series, self.events.as_series( false_values=-1, true_values=5)) def test_type(self): typ = type(self.events.as_series(false_values=-1, true_values=5)) self.assertEqual(typ, pd.core.series.Series) class TestDurations(TestCase): def setUp(self): condition_array = np.array([1, 0, 1, 1, 1, 1, 0, 0, 1, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0]) condition = (condition_array > 0) self.events = Events(condition, period=1/3, adeb=0.5, ddeb=1).find() def test_durations(self): # validation_array = np.array([0, 0, 1, 1, 1, 1, 1, 1, 1, 1, # 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]) validation_durations = [(8 / 3)] npt.assert_array_equal(validation_durations, self.events.durations) class TestEventDetection(TestCase): def test_default_parameters(self): """Test event detection with only a supplied condition""" np.random.seed(10) validation_array = np.random.random_integers(0, 1, 100) condition = (validation_array > 0) events = Events(condition, period=1).find() npt.assert_array_equal(validation_array, events.as_array()) def test_multi_input_condition_event(self): """Test arrays that have multi-input conditions""" x = np.array([0, 1, 1, 1, 0, 0, 0, 1, 1, 0]) y = np.array([0, 0, 1, 1, 1, 0, 0, 1, 0, 1]) validation_array = np.array([0, 0, 1, 1, 0, 0, 0, 1, 0, 0]) condition = ((x > 0) & (y > 0)) events = Events(condition, period=1).find() npt.assert_array_equal(validation_array, events.as_array()) class TestSpecialMethods(TestCase): def setUp(self): condition_array = np.array([1, 0, 0, 1, 1, 1, 0, 0, 1, 1, 0, 0, 1]) self.condition = (condition_array > 0) self.events = Events(self.condition, period=1).find() def test__len__(self): self.assertEquals(4, len(self.events)) def test__eq__(self): other = Events(self.condition, period=1).find() self.assertEqual(self.events, other) class TestAttributes(TestCase): def setUp(self): condition_array = np.array([1, 0, 0, 1, 1, 1, 0, 0, 1, 1, 0, 0, 1]) self.condition = (condition_array > 0) def test_period(self): self.assertRaises(ValueError, Events, self.condition, period=0) def test_startoffset(self): self.assertRaises(ValueError, Events, self.condition, period=1, startoffset=1) def test_stopoffset(self): self.assertRaises(ValueError, Events, self.condition, period=0, stopoffset=-1) class TestProperties(TestCase): def setUp(self): self.events = Events(np.array([False, False]), period=0.12, adeb=1, ddeb=1, mindur=1, maxdur=1, startoffset=-1, stopoffset=1) def test_adeb(self): self.assertEqual(self.events._adeb, 9) def test_ddeb(self): self.assertEqual(self.events._adeb, 9) def test_mindur(self): self.assertEqual(self.events._mindur, 9) def test_maxdur(self): self.assertEqual(self.events._maxdur, 8) def test_startoffset(self): self.assertEqual(self.events._startoffset, -9) def test_stopoffset(self): self.assertEqual(self.events._stopoffset, 9) if __name__ == '__main__': main()
Formal statement is: lemma base_le: "a \<in> s \<Longrightarrow> base j \<le> a j" Informal statement is: If $a$ is an element of $s$, then $a_j \leq b_j$ for all $j$.
lemma open_path_connected_component_set: fixes S :: "'a :: real_normed_vector set" shows "open S \<Longrightarrow> path_component_set S x = connected_component_set S x"
Require Import OrderedTypeEx FSetFacts FSetProperties. Set Implicit Arguments. Unset Strict Implicit. Unset Standard Proposition Elimination Names. Module UsualFacts (E:UsualOrderedType)(M:S with Module E:=E). Module ME := OrderedTypeFacts E. Module MF := FSetFacts.Facts M. Module MP := FSetProperties.Properties M. Import ME. Import MF. Import M. Import Logic. (* to unmask [eq] *) Import Peano. (* to unmask [lt] *) (** * Specifications written using equivalences *) Section IffSpec. Variable s s' s'' : t. Variable x y z : elt. Lemma singleton_iff : In y (singleton x) <-> x=y. Proof. apply singleton_iff. Qed. Lemma add_iff : In y (add x s) <-> x=y \/ In y s. Proof. apply add_iff. Qed. Lemma add_neq_iff : x<>y -> (In y (add x s) <-> In y s). Proof. apply add_neq_iff. Qed. Lemma remove_iff : In y (remove x s) <-> In y s /\ x<>y. Proof. apply remove_iff. Qed. Lemma remove_neq_iff : x<>y -> (In y (remove x s) <-> In y s). Proof. apply remove_neq_iff. Qed. Variable f : elt->bool. Lemma filter_iff : In x (filter f s) <-> In x s /\ f x = true. Proof. apply filter_iff; congruence. Qed. Lemma for_all_iff : For_all (fun x => f x = true) s <-> for_all f s = true. Proof. apply for_all_iff; congruence. Qed. Lemma exists_iff : Exists (fun x => f x = true) s <-> exists_ f s = true. Proof. apply exists_iff; congruence. Qed. Lemma elements_iff : In x s <-> List.In x (elements s). Proof. rewrite elements_iff. rewrite SetoidList.InA_alt. split; intros. destruct H as (u,(H1,H2)); compute in H1; subst; auto. exists x; split; compute; auto. Qed. End IffSpec. (** Useful tactic for simplifying expressions like [In y (add x (union s s'))] *) Ltac set_iff := repeat (progress ( rewrite add_iff || rewrite remove_iff || rewrite singleton_iff || rewrite union_iff || rewrite inter_iff || rewrite diff_iff || rewrite empty_iff)). (** Two equivalent sorted lists are in fact equal. *) Definition equivlist (l l':list elt) := forall x, List.In x l <-> List.In x l'. Lemma list_unique : forall l l', sort E.lt l -> sort E.lt l' -> equivlist l l' -> l=l'. Proof. induction l; destruct l'; simpl; auto; intros. elimtype False; rewrite (H1 t0); simpl; auto. elimtype False; rewrite <- (H1 a); simpl; auto. inversion_clear H; inversion_clear H0. assert (forall y, List.In y l -> E.lt a y). intros; eapply Sort_Inf_In with (l:=l); eauto. assert (forall y, List.In y l' -> E.lt t0 y). intros; eapply Sort_Inf_In with (l:=l'); eauto. clear H3 H4. assert (a=t0). destruct (H1 a). destruct (H1 t0). simpl in H3; destruct H3; auto. simpl in H7; destruct H7; auto. elimtype False; generalize (H5 _ H3) (H0 _ H7). ME.order. f_equal; auto. apply IHl; auto. split; intros. destruct (H1 x). destruct H6; simpl; auto. elimtype False; generalize (H0 _ H4); subst a; subst x; ME.order. destruct (H1 x). destruct H7; simpl; auto. elimtype False; generalize (H5 _ H4); subst a; subst x; ME.order. Qed. Lemma Equal_elements_equivlist : forall s s', s[=]s' <-> equivlist (elements s) (elements s'). Proof. unfold Equal; split; intros. red; intros. do 2 rewrite <- elements_iff; auto. do 2 rewrite elements_iff; auto. Qed. Lemma Equal_eq_elements : forall s s', s[=]s' <-> elements s = elements s'. Proof. split; intros. apply list_unique; auto with set. rewrite <- Equal_elements_equivlist; auto. red; intros; do 2 rewrite elements_iff; auto. rewrite H; split; auto. Qed. Lemma elements_min : forall s x, min_elt s = Some x -> elements s = x::elements (remove x s). Proof. intros. apply list_unique; auto with set. constructor; auto with set. rewrite Inf_alt; auto with set; intros. rewrite <- MF.elements_iff in H0. assert (H1:=@min_elt_2 _ _ y H). destruct (E.compare x y); auto. elim (remove_1 e H0). elim H1; auto. eapply remove_3; eauto. red; intros; simpl. do 2 rewrite <- elements_iff; set_iff; intuition. destruct (E.eq_dec x x0); unfold E.eq; intuition. subst x0. apply min_elt_1; auto. Qed. Lemma elements_max : forall s x, max_elt s = Some x -> elements s = elements (remove x s)++x::nil. Proof. intros. apply list_unique; auto with set. apply SortA_app with M.E.eq; try red; auto with *. intros z y H0; revert y; rewrite <- Inf_alt; auto; constructor. rewrite <- MF.elements_iff in H0. assert (H1:=@max_elt_2 _ _ z H). destruct (E.compare x z); auto. elim H1; auto. eapply remove_3; eauto. elim (remove_1 e H0). split; intros. apply in_or_app; simpl. intros; rewrite <- elements_iff in *. destruct (E.eq_dec x x0); auto with set. destruct (in_app_or _ _ _ H0); clear H0. intros; rewrite <- elements_iff in *; eauto with set. simpl in H1; inversion H1; subst; auto. intros; rewrite <- elements_iff in *; auto with set. contradiction. Qed. End UsualFacts. Module FoldEquiv (E:UsualOrderedType)(M:S with Module E:=E). Module UF := UsualFacts E M. Import UF M. Section Fold. Variable A:Type. Variable f:elt->A->A. (** a few definitions equivalent to fold *) Fixpoint fold_direct (n:nat)(s:t)(i:A) { struct n } : A := match n with | O => i | S n => match max_elt s with | None => i | Some x => f x (fold_direct n (remove x s) i) end end. Fixpoint fold_tail (n:nat)(s:t)(i:A) { struct n } : A := match n with | O => i | S n => match min_elt s with | None => i | Some x => fold_tail n (remove x s) (f x i) end end. Lemma fold_direct_1 : forall s i, fold_direct (cardinal s) s i = fold f s i. Proof. intros s; remember (cardinal s) as n; revert s Heqn; induction n; simpl; intros. rewrite fold_1. rewrite cardinal_1 in Heqn. destruct (elements s); [clear Heqn|inversion Heqn]; simpl; auto. rewrite fold_1. case_eq (max_elt s); intros. rewrite (elements_max H). rewrite fold_left_app. simpl; f_equal. rewrite <- fold_1. apply IHn. assert (S (cardinal (remove e s)) = S n). rewrite Heqn. apply MP.remove_cardinal_1. apply max_elt_1; auto. inversion H0; auto. rewrite (MP.cardinal_1 (max_elt_3 H)) in Heqn; inversion Heqn. Qed. Lemma fold_tail_1_aux : forall n l s s' i, n = cardinal s' -> elements s = l++elements s' -> fold_tail n s' (fold_left (fun x y => f y x) l i) = fold f s i. Proof. induction n. simpl. intros; rewrite fold_1. rewrite cardinal_1 in H. destruct (elements s'); [clear H|inversion H]; simpl; auto. rewrite <- app_nil_end in H0; subst l; auto. simpl; intros. case_eq (min_elt s'); intros. rewrite (elements_min H1) in H0. rewrite <- (IHn (l++e::nil) s (remove e s')). rewrite fold_left_app; simpl; auto. assert (S (cardinal (remove e s')) = S n). rewrite H. apply MP.remove_cardinal_1. apply min_elt_1; auto. inversion H2; auto. rewrite app_ass; simpl; auto. rewrite (MP.cardinal_1 (min_elt_3 H1)) in H; inversion H. Qed. Lemma fold_tail_1 : forall s i, fold_tail (cardinal s) s i = fold f s i. Proof. intros; apply (@fold_tail_1_aux (cardinal s) nil s s i); auto. Qed. End Fold. End FoldEquiv. (* Beware: a Program version needs extensionnality! *) Require Import Program. Import WfExtensionality. Module FoldProgram (E:UsualOrderedType)(M:S with Module E:=E). Module UF := UsualFacts E M. Import UF M. Section Fold. Variable A:Type. Variable f:elt->A->A. Program Fixpoint fold_direct_prog (s:t)(i:A) { measure (cardinal s) } : A := match max_elt s with | None => i | Some x => f x (fold_direct_prog (remove x s) i) end. Next Obligation. symmetry in Heq_anonymous. rewrite <- (@MP.remove_cardinal_1 s x); auto with arith set. Qed. Program Fixpoint fold_tail_prog (s:t)(i:A) { measure (cardinal s) } : A := match min_elt s with | None => i | Some x => fold_tail_prog (remove x s) (f x i) end. Next Obligation. symmetry in Heq_anonymous. rewrite <- (@MP.remove_cardinal_1 s x); auto with arith set. Qed. Lemma fold_direct_prog_1 : forall s i, fold_direct_prog s i = fold f s i. Proof. intros s; remember (cardinal s) as n; revert s Heqn. induction n using Wf_nat.lt_wf_ind; intros. unfold fold_direct_prog, fold_direct_prog_func. rewrite fix_sub_eq_ext; auto. simpl. generalize (@max_elt_1 s) (@max_elt_2 s) (@max_elt_3 s) (@elements_max s). destruct (max_elt s); intros. change (f e (fold_direct_prog (remove e s) i) = fold f s i). rewrite fold_1. rewrite (H3 e); auto. rewrite fold_left_app; simpl; f_equal. rewrite <- fold_1. assert (S (cardinal (remove e s)) = n). rewrite Heqn. apply MP.remove_cardinal_1; auto. apply H with (cardinal (remove e s)); auto. rewrite <- H4; auto with arith. rewrite fold_1. generalize (cardinal_1 s). rewrite MP.cardinal_1; auto. destruct (elements s); simpl; auto; inversion 1. Qed. Lemma fold_tail_prog_1_aux : forall n l s s' i, n = cardinal s' -> elements s = l++elements s' -> fold_tail_prog s' (fold_left (fun x y => f y x) l i) = fold f s i. Proof. induction n using Wf_nat.lt_wf_ind; intros. unfold fold_tail_prog, fold_tail_prog_func. rewrite fix_sub_eq_ext; auto. simpl. generalize (@min_elt_1 s') (@min_elt_2 s') (@min_elt_3 s') (@elements_min s'). destruct (min_elt s'); intros. change (fold_tail_prog (remove e s') (f e (fold_left (fun x y => f y x) l i)) = fold f s i). rewrite (H5 e) in H1; auto. assert (S (cardinal (remove e s')) = n). rewrite H0. apply MP.remove_cardinal_1; auto. assert (cardinal (remove e s') < n). rewrite <- H6; auto. rewrite <- (@H _ H7 (l++e::nil) s (remove e s')); auto. rewrite fold_left_app; simpl; auto. rewrite app_ass; simpl; auto. rewrite fold_1. generalize (cardinal_1 s'). rewrite MP.cardinal_1; auto. destruct (elements s'); simpl; auto; inversion 1. rewrite <- app_nil_end in H1; subst l; auto. Qed. Lemma fold_tail_prog_1 : forall s i, fold_tail_prog s i = fold f s i. Proof. intros; apply (@fold_tail_prog_1_aux (cardinal s) nil s s i); auto. Qed. End Fold. End FoldProgram. Require Import Recdef. Module FoldFunction (E:UsualOrderedType)(M:S with Module E:=E). Module Import P := Properties M. Module Import F := FoldEquiv E M. Import M. Section Fold. Variable A:Type. Variable f:elt->A->A. Function fold_direct_fun (s:t)(i:A) { measure cardinal s } : A := match max_elt s with | None => i | Some x => f x (fold_direct_fun (remove x s) i) end. Proof. intros. rewrite <- (@P.remove_cardinal_1 s x); auto with arith set. Defined. Function fold_tail_fun (s:t)(i:A) { measure cardinal s } : A := match min_elt s with | None => i | Some x => fold_tail_fun (remove x s) (f x i) end. Proof. intros. rewrite <- (@P.remove_cardinal_1 s x); auto with arith set. Defined. Lemma fold_direct_fun_1 : forall s i, fold_direct_fun s i = fold f s i. Proof. intros s i. rewrite <- F.fold_direct_1. functional induction fold_direct_fun s i; simpl; auto. rewrite P.cardinal_1; simpl; auto with set. rewrite <- P.remove_cardinal_1 with s x; auto with set. simpl; auto. rewrite e; simpl. congruence. Qed. Lemma fold_tail_prog_1 : forall s i, fold_tail_fun s i = fold f s i. Proof. intros s i. rewrite <- F.fold_tail_1. functional induction fold_tail_fun s i; simpl; auto. rewrite P.cardinal_1; simpl; auto with set. rewrite <- P.remove_cardinal_1 with s x; auto with set. simpl; auto. rewrite e; simpl; auto. Qed. End Fold. End FoldFunction.
MODULE WDecay USE ParamModule USE alfas_functions CONTAINS FUNCTION SMWWidth(qcdord,qedord,finitemassin) IMPLICIT NONE ! (qcdord,qedord): ! (0,0): LO ! (1,0): NLO QCD ! (0,1): NLO QED ! (1,1): NLO QCD+QED INTEGER,INTENT(IN)::qcdord,qedord REAL(KIND(1d0))::SMWWidth LOGICAL,INTENT(IN),OPTIONAL::finitemassin LOGICAL::finitemass REAL(KIND(1d0))::WWidth_LOud,ALPHAS IF(.NOT.print_banner)THEN INCLUDE "banner.inc" print_banner=.TRUE. ENDIF IF(qcdord.LT.0.OR.qcdord.GT.1)THEN WRITE(*,*)"ERROR:Please specify the QCD corr. order to be 0 or 1" STOP ENDIF IF(qedord.LT.0.OR.qedord.GT.1)THEN WRITE(*,*)"ERROR:Please specify the QED corr. order to be 0 or 1" STOP ENDIF ! W > u d~ WWidth_LOud=Decay_e**2*Decay_MW/(16d0*pi*Decay_R_SW2) ! W > u d~, c s~, e ve, mu vm, ta vt SMWWidth=WWidth_LOud*(2d0+1d0) ! include finite quark/lepton mass effect IF(PRESENT(finitemassin))THEN finitemass=finitemassin ELSE finitemass=.FALSE. ! it should be turned off with loop_qcd_qed_sm correspondigly END IF IF(finitemass)THEN SMWWidth=SMWWidth+SMWWidth_fmass(Finite_MC,Finite_MTAU,Finite_MMU) ENDIF IF(qcdord.EQ.0.AND.qedord.EQ.0)RETURN IF(qcdord.EQ.1)THEN NLOOP=2 Decay_aS=ALPHAS_MCFM(Decay_MW) Decay_gs=DSQRT(4d0*pi*Decay_aS) SMWWidth=SMWWidth+WWidth_LOud*2d0*Decay_aS/pi ENDIF IF(qedord.EQ.1)THEN SMWWidth=SMWWidth+SMWWidth_EW(Decay_Scheme) ENDIF RETURN END FUNCTION SMWWidth FUNCTION SMWWidth_EW(iScheme) IMPLICIT NONE INTEGER,INTENT(IN)::iScheme ! 1: alpha(MZ) 2: Gmu REAL(KIND(1d0))::SMWWidth_EW REAL(KIND(1d0)),EXTERNAL::Width_W2ud_EW,Width_W2lv_EW ! W+ > u d~ and W+ > c s~ SMWWidth_EW=2d0*Width_W2ud_EW(iScheme) ! W+ > ve e+, W+ > vm m+, W+ > vt tt+ SMWWidth_EW=SMWWidth_EW+3d0*Width_W2lv_EW(iScheme) RETURN END FUNCTION SMWWIDTH_EW FUNCTION SMWWidth_fmass(mc,mtt,mm) IMPLICIT NONE REAL(KIND(1d0)),INTENT(IN)::mc,mtt,mm REAL(KIND(1d0))::SMWWidth_fmass REAL(KIND(1d0)),EXTERNAL::Width_W2cs_qmass,Width_W2lv_lmass SMWWidth_fmass=0d0 IF(mc.GT.0d0)THEN SMWWidth_fmass=SMWWidth_fmass+Width_W2cs_qmass(mc,0d0) ENDIF IF(mtt.GT.0d0)THEN SMWWidth_fmass=SMWWidth_fmass+Width_W2lv_lmass(mtt) ENDIF IF(mm.GT.0d0)THEN SMWWidth_fmass=SMWWidth_fmass+Width_W2lv_lmass(mm) ENDIF RETURN END FUNCTION SMWWidth_fmass END MODULE WDecay
Formal statement is: proposition dim_hyperplane: fixes a :: "'a::euclidean_space" assumes "a \<noteq> 0" shows "dim {x. a \<bullet> x = 0} = DIM('a) - 1" Informal statement is: The dimension of the hyperplane $\{x \in \mathbb{R}^n \mid a \cdot x = 0\}$ is $n-1$.
[STATEMENT] lemma payload_rep_cpayload: "Re x \<in> cpayload \<longleftrightarrow> x \<in> payload" [PROOF STATE] proof (prove) goal (1 subgoal): 1. (Re x \<in> cpayload) = (x \<in> payload) [PROOF STEP] by (auto simp add: payload_def abs_cpayload_rep)
lemma incseqD: "incseq f \<Longrightarrow> i \<le> j \<Longrightarrow> f i \<le> f j"
import Control.Monad.State update : (stateType -> stateType) -> State stateType () update func = do currState <- get put (func currState) increase : Nat -> State Nat () increase diff = update (+diff)
[STATEMENT] lemma path_middle_edge: "path (xs @ v # w # ys) \<Longrightarrow> v \<rightarrow> w" [PROOF STATE] proof (prove) goal (1 subgoal): 1. path (xs @ v # w # ys) \<Longrightarrow> v \<rightarrow> w [PROOF STEP] using walk_middle_edge [PROOF STATE] proof (prove) using this: walk (?xs @ ?v # ?w # ?ys) \<Longrightarrow> ?v \<rightarrow> ?w goal (1 subgoal): 1. path (xs @ v # w # ys) \<Longrightarrow> v \<rightarrow> w [PROOF STEP] by blast
If $f$ is L-Lipschitz on $X$, then $f$ is continuous on $X$.
module Human.String where open import Human.Bool open import Human.List renaming ( length to llength ) open import Human.Char open import Human.Nat postulate String : Set {-# BUILTIN STRING String #-} primitive primStringToList : String → List Char primStringFromList : List Char → String primStringAppend : String → String → String primStringEquality : String → String → Bool primShowChar : Char → String primShowString : String → String {-# COMPILE JS primStringToList = function(x) { return x.split(""); } #-} {-# COMPILE JS primStringFromList = function(x) { return x.join(""); } #-} {-# COMPILE JS primStringAppend = function(x) { return function(y) { return x+y; }; } #-} {-# COMPILE JS primStringEquality = function(x) { return function(y) { return x===y; }; } #-} {-# COMPILE JS primShowChar = function(x) { return JSON.stringify(x); } #-} {-# COMPILE JS primShowString = function(x) { return JSON.stringify(x); } #-} toList : String → List Char toList = primStringToList {-# COMPILE JS primStringToList = function(x) { return x.split(""); } #-} slength : String → Nat slength s = llength (toList s) {-# COMPILE JS slength = function(s) { return s.length; } #-}
function varargout = angleSort3d(pts, varargin) %ANGLESORT3D Sort 3D coplanar points according to their angles in plane % % PTS2 = angleSort3d(PTS); % Considers all points are located on the same plane, and sort them % according to the angle on plane. PTS is a [Nx2] array. Note that the % result depend on plane orientation: points can be in reverse order % compared to expected. The reference plane is computed besed on the 3 % first points. % % PTS2 = angleSort3d(PTS, PTS0); % Computes angles between each point of PTS and PT0. By default, uses % centroid of points. % % PTS2 = angleSort3d(PTS, PTS0, PTS1); % Specifies the point which will be used as a start. % % [PTS2, I] = angleSort3d(...); % Also return in I the indices of PTS, such that PTS2 = PTS(I, :); % % See also: % points3d, angles3d, angleSort % % ------ % Author: David Legland % e-mail: [email protected] % Created: 2005-11-24 % Copyright 2005 INRA - CEPIA Nantes - MIAJ (Jouy-en-Josas). % HISTORY : % 04/01/2007: remove unused variables % default values pt0 = mean(pts, 1); pt1 = pts(1,:); if length(varargin)==1 pt0 = varargin{1}; elseif length(varargin)==2 pt0 = varargin{1}; pt1 = varargin{2}; end % create support plane plane = createPlane(pts(1:3, :)); % project points onto the plane pts2d = planePosition(pts, plane); pt0 = planePosition(pt0, plane); pt1 = planePosition(pt1, plane); % compute origin angle theta0 = atan2(pt1(2)-pt0(2), pt1(1)-pt0(1)); theta0 = mod(theta0 + 2*pi, 2*pi); % translate to reference point n = size(pts, 1); pts2d = pts2d - repmat(pt0, [n 1]); % compute angles angle = atan2(pts2d(:,2), pts2d(:,1)); angle = mod(angle - theta0 + 4*pi, 2*pi); % sort points according to angles [angle, I] = sort(angle); %#ok<ASGLU> % format output if nargout<2 varargout{1} = pts(I, :); elseif nargout==2 varargout{1} = pts(I, :); varargout{2} = I; end
#pragma once #include "../common.h" #include "../concepts.h" #include <boost/container/flat_set.hpp> #include <boost/outcome/result.hpp> #include <boost/outcome/try.hpp> #include <fmt/format.h> #include <gsl/gsl-lite.hpp> #include <string> #include <string_view> #include <yaml-cpp/yaml.h> namespace angonoka::validation { using boost::container::flat_set; namespace bo = boost::outcome_v2; using result = bo::result<void, std::string>; using namespace fmt::literals; template <typename T> concept Check = std::is_invocable_v<T, const YAML::Node&, gsl::czstring>; template <typename T> concept Attribute = std::is_convertible_v< decltype(std::declval<T>().name), std::string_view> && Check<T>; template <typename T> concept AttrOrStr = String<T> || Attribute<T>; /** YAML scalar. Example: hello: "world" ^ | Scalar scalar() Means that the value has to be a scalar and not a map or a sequence, etc. @return Check function */ constexpr Check auto scalar() { return [](const YAML::Node& node, std::string_view scope) -> result { if (!node || node.IsNull()) return R"("{}" can't be empty.)"_format(scope); if (!node.IsScalar()) return R"("{}" has invalid type.)"_format(scope); return bo::success(); }; } namespace detail { /** Helper class for required and optional YAML fields. @var name Parameter's name @var check Function to apply to the field */ template <Check T> struct functor { gsl::czstring name; T check; constexpr functor(gsl::czstring name, T check) : name{name} , check{check} { } explicit constexpr functor(gsl::czstring name) : functor{name, scalar()} { } }; /** Join YAML path parts. @param a Root path @param b New path @return Concatenation of parts separated by a ".". */ template <String T1, String T2> std::string join(T1&& a, T2&& b) { if (std::empty(a)) return std::forward<T2>(b); return "{}.{}"_format( std::forward<T1>(a), std::forward<T2>(b)); } } // namespace detail /** Requred YAML field. Example: required("hello") Means that the field "hello" is required and it has to be a scalar value. @var name Parameter's name @var check Function to apply to the field */ template <Check T> struct required : detail::functor<T> { using detail::functor<T>::functor; result operator()(const YAML::Node& node, std::string_view scope) const { if (const auto n = node[this->name]) return this->check(n, detail::join(scope, this->name)); return R"("{}" is missing a "{}" attribute.)"_format( scope, this->name); } }; template <Check T> required(gsl::czstring, T) -> required<T>; required(gsl::czstring)->required<decltype(scalar())>; namespace detail { /** Extract the map attribute's name. If an attribute is a string literal, pass the argument as is. @param attr Either an attribute or a string literal @return Attribute name */ constexpr auto attr_name(gsl::czstring attr) { return attr; } constexpr auto attr_name(Attribute auto&& attr) { return attr.name; } /** Extract or construct an attribute check function. If an attrubte is a string literal, construct the required attrubte with the string literal as it's name. @param attr Either an attribute or a string literal @return Check function */ constexpr auto attr_check(gsl::czstring attr) { return required(attr); } constexpr auto attr_check(Attribute auto&& attr) { return attr; } } // namespace detail /** Optional YAML field. Example: optional("hello") Means that the field "hello" is optional and if present, it has to be a scalar value. @var name Parameter's name @var check Function to apply to the field */ template <Check T> struct optional : detail::functor<T> { using detail::functor<T>::functor; result operator()(const YAML::Node& node, std::string_view scope) const { if (const auto n = node[this->name]) return this->check(n, detail::join(scope, this->name)); return bo::success(); } }; template <Check T> optional(gsl::czstring, T) -> optional<T>; optional(gsl::czstring)->optional<decltype(scalar())>; /** YAML array. Validates each value of the array with the provided function. Example: sequence(scalar()) Means that the value has to be a sequence (array) of scalar values. @param check Function to apply to each item @return Check function */ constexpr Check auto sequence(Check auto check) { return [=](const YAML::Node& node, std::string_view scope) -> result { if (!node || !node.IsSequence()) { return R"("{}" is expected to be a sequence.)"_format( scope); } for (gsl::index i{0}; i < std::size(node); ++i) { BOOST_OUTCOME_TRY( check(node[i], "{}[{}]"_format(scope, i))); } return bo::success(); }; } constexpr Check auto sequence() { return sequence(scalar()); } /** YAML map. Matches specified parameters exactly, no extra fields permitted. Example: attributes("first", optional("second")) Means that the value has to be a map with a required field "first", which has to be a scalar and an optional field "second" which also has to be a scalar. @param attrs Sequence of optional or required parameters @return Check function */ constexpr Check auto attributes(AttrOrStr auto... attrs) { return [=](const YAML::Node& node, std::string_view scope = {}) -> result { if (!node || node.IsScalar() || node.IsSequence()) return R"("{}" is expected to be a map.)"_format(scope); flat_set<std::string_view> unique_fields; for (auto&& n : node) { const auto& attr_name = n.first.Scalar(); if (attr_name.empty()) return R"(Empty attribute in "{}".)"_format(scope); if (!unique_fields.emplace(attr_name).second) { return R"(Duplicate attribute "{}" in "{}".)"_format( attr_name, scope); } if (!((attr_name == detail::attr_name(attrs)) || ...)) { return R"(Unexpected attribute "{}" in "{}".)"_format( attr_name, scope); } } result r = bo::success(); ((r = detail::attr_check(attrs)(node, scope)) && ...); return r; }; } /** YAML map. Validates each value of the map with the provided function. Used when the number of map fields may vary. Example: foo: bar1: 1 bar2: 2 bar3: 3 values(scalar()) @param check Function to apply to each value @return Check function */ constexpr Check auto values(Check auto check) { return [=](const YAML::Node& node, std::string_view scope) -> result { if (!node || !node.IsMap()) return R"("{}" is expected to be a map.)"_format(scope); for (auto&& n : node) { BOOST_OUTCOME_TRY(check( n.second, detail::join(scope, n.first.Scalar()))); } return bo::success(); }; } /** Match at least one of the validators. Example: required("example", any_of(scalar(), attributes("foo", "bar"))) Means the value has to either be a singular scalar value or a map with 2 fields "foo" and "bar. example: "hello" or example: foo: 1 bar: 2 @param check Functions to match @return Check function */ constexpr Check auto any_of(Check auto... checks) { return [=](const YAML::Node& node, std::string_view scope) -> result { result r = bo::success(); ((r = checks(node, scope)) || ...); return r; }; } } // namespace angonoka::validation
/- Copyright (c) 2015 Microsoft Corporation. All rights reserved. Released under Apache 2.0 license as described in the file LICENSE. Author: Leonardo de Moura, Mario Carneiro Type class for encodable Types. Note that every encodable Type is countable. -/ import Mathlib.PrePort import Mathlib.Lean3Lib.init.default import Mathlib.data.equiv.nat import Mathlib.order.rel_iso import Mathlib.order.directed import Mathlib.PostPort universes u_1 l u_2 u_3 namespace Mathlib /-- An encodable type is a "constructively countable" type. This is where we have an explicit injection `encode : α → nat` and a partial inverse `decode : nat → option α`. This makes the range of `encode` decidable, although it is not decidable if `α` is finite or not. -/ class encodable (α : Type u_1) where encode : α → ℕ decode : ℕ → Option α encodek : ∀ (a : α), decode (encode a) = some a namespace encodable theorem encode_injective {α : Type u_1} [encodable α] : function.injective encode := sorry /- This is not set as an instance because this is usually not the best way to infer decidability. -/ def decidable_eq_of_encodable (α : Type u_1) [encodable α] : DecidableEq α := sorry def of_left_injection {α : Type u_1} {β : Type u_2} [encodable α] (f : β → α) (finv : α → Option β) (linv : ∀ (b : β), finv (f b) = some b) : encodable β := mk (fun (b : β) => encode (f b)) (fun (n : ℕ) => option.bind (decode α n) finv) sorry def of_left_inverse {α : Type u_1} {β : Type u_2} [encodable α] (f : β → α) (finv : α → β) (linv : ∀ (b : β), finv (f b) = b) : encodable β := of_left_injection f (some ∘ finv) sorry /-- If `α` is encodable and `β ≃ α`, then so is `β` -/ def of_equiv {β : Type u_2} (α : Type u_1) [encodable α] (e : β ≃ α) : encodable β := of_left_inverse (⇑e) (⇑(equiv.symm e)) (equiv.left_inv e) @[simp] theorem encode_of_equiv {α : Type u_1} {β : Type u_2} [encodable α] (e : β ≃ α) (b : β) : encode b = encode (coe_fn e b) := rfl @[simp] theorem decode_of_equiv {α : Type u_1} {β : Type u_2} [encodable α] (e : β ≃ α) (n : ℕ) : decode β n = option.map (⇑(equiv.symm e)) (decode α n) := rfl protected instance nat : encodable ℕ := mk id some sorry @[simp] theorem encode_nat (n : ℕ) : encode n = n := rfl @[simp] theorem decode_nat (n : ℕ) : decode ℕ n = some n := rfl protected instance empty : encodable empty := mk (fun (a : empty) => empty.rec (fun (a : empty) => ℕ) a) (fun (n : ℕ) => none) sorry protected instance unit : encodable PUnit := mk (fun (_x : PUnit) => 0) (fun (n : ℕ) => nat.cases_on n (some PUnit.unit) fun (_x : ℕ) => none) sorry @[simp] theorem encode_star : encode PUnit.unit = 0 := rfl @[simp] theorem decode_unit_zero : decode PUnit 0 = some PUnit.unit := rfl @[simp] theorem decode_unit_succ (n : ℕ) : decode PUnit (Nat.succ n) = none := rfl protected instance option {α : Type u_1} [h : encodable α] : encodable (Option α) := mk (fun (o : Option α) => option.cases_on o 0 fun (a : α) => Nat.succ (encode a)) (fun (n : ℕ) => nat.cases_on n (some none) fun (m : ℕ) => option.map some (decode α m)) sorry @[simp] theorem encode_none {α : Type u_1} [encodable α] : encode none = 0 := rfl @[simp] theorem encode_some {α : Type u_1} [encodable α] (a : α) : encode (some a) = Nat.succ (encode a) := rfl @[simp] theorem decode_option_zero {α : Type u_1} [encodable α] : decode (Option α) 0 = some none := rfl @[simp] theorem decode_option_succ {α : Type u_1} [encodable α] (n : ℕ) : decode (Option α) (Nat.succ n) = option.map some (decode α n) := rfl def decode2 (α : Type u_1) [encodable α] (n : ℕ) : Option α := option.bind (decode α n) (option.guard fun (a : α) => encode a = n) theorem mem_decode2' {α : Type u_1} [encodable α] {n : ℕ} {a : α} : a ∈ decode2 α n ↔ a ∈ decode α n ∧ encode a = n := sorry theorem mem_decode2 {α : Type u_1} [encodable α] {n : ℕ} {a : α} : a ∈ decode2 α n ↔ encode a = n := iff.trans mem_decode2' (and_iff_right_of_imp fun (e : encode a = n) => e ▸ encodek a) theorem decode2_is_partial_inv {α : Type u_1} [encodable α] : function.is_partial_inv encode (decode2 α) := fun (a : α) (n : ℕ) => mem_decode2 theorem decode2_inj {α : Type u_1} [encodable α] {n : ℕ} {a₁ : α} {a₂ : α} (h₁ : a₁ ∈ decode2 α n) (h₂ : a₂ ∈ decode2 α n) : a₁ = a₂ := encode_injective (Eq.trans (iff.mp mem_decode2 h₁) (Eq.symm (iff.mp mem_decode2 h₂))) theorem encodek2 {α : Type u_1} [encodable α] (a : α) : decode2 α (encode a) = some a := iff.mpr mem_decode2 rfl def decidable_range_encode (α : Type u_1) [encodable α] : decidable_pred (set.range encode) := fun (x : ℕ) => decidable_of_iff ↥(option.is_some (decode2 α x)) sorry def equiv_range_encode (α : Type u_1) [encodable α] : α ≃ ↥(set.range encode) := equiv.mk (fun (a : α) => { val := encode a, property := sorry }) (fun (n : ↥(set.range encode)) => option.get sorry) sorry sorry def encode_sum {α : Type u_1} {β : Type u_2} [encodable α] [encodable β] : α ⊕ β → ℕ := sorry def decode_sum {α : Type u_1} {β : Type u_2} [encodable α] [encodable β] (n : ℕ) : Option (α ⊕ β) := sorry protected instance sum {α : Type u_1} {β : Type u_2} [encodable α] [encodable β] : encodable (α ⊕ β) := mk encode_sum decode_sum sorry @[simp] theorem encode_inl {α : Type u_1} {β : Type u_2} [encodable α] [encodable β] (a : α) : encode (sum.inl a) = bit0 (encode a) := rfl @[simp] theorem encode_inr {α : Type u_1} {β : Type u_2} [encodable α] [encodable β] (b : β) : encode (sum.inr b) = bit1 (encode b) := rfl @[simp] theorem decode_sum_val {α : Type u_1} {β : Type u_2} [encodable α] [encodable β] (n : ℕ) : decode (α ⊕ β) n = decode_sum n := rfl protected instance bool : encodable Bool := of_equiv (Unit ⊕ Unit) equiv.bool_equiv_punit_sum_punit @[simp] theorem encode_tt : encode tt = 1 := rfl @[simp] theorem encode_ff : encode false = 0 := rfl @[simp] theorem decode_zero : decode Bool 0 = some false := rfl @[simp] theorem decode_one : decode Bool 1 = some tt := rfl theorem decode_ge_two (n : ℕ) (h : bit0 1 ≤ n) : decode Bool n = none := sorry def encode_sigma {α : Type u_1} {γ : α → Type u_3} [encodable α] [(a : α) → encodable (γ a)] : sigma γ → ℕ := sorry def decode_sigma {α : Type u_1} {γ : α → Type u_3} [encodable α] [(a : α) → encodable (γ a)] (n : ℕ) : Option (sigma γ) := sorry protected instance sigma {α : Type u_1} {γ : α → Type u_3} [encodable α] [(a : α) → encodable (γ a)] : encodable (sigma γ) := mk encode_sigma decode_sigma sorry @[simp] theorem decode_sigma_val {α : Type u_1} {γ : α → Type u_3} [encodable α] [(a : α) → encodable (γ a)] (n : ℕ) : decode (sigma γ) n = option.bind (decode α (prod.fst (nat.unpair n))) fun (a : α) => option.map (sigma.mk a) (decode (γ a) (prod.snd (nat.unpair n))) := sorry @[simp] theorem encode_sigma_val {α : Type u_1} {γ : α → Type u_3} [encodable α] [(a : α) → encodable (γ a)] (a : α) (b : γ a) : encode (sigma.mk a b) = nat.mkpair (encode a) (encode b) := rfl protected instance prod {α : Type u_1} {β : Type u_2} [encodable α] [encodable β] : encodable (α × β) := of_equiv (sigma fun (_x : α) => β) (equiv.symm (equiv.sigma_equiv_prod α β)) @[simp] theorem decode_prod_val {α : Type u_1} {β : Type u_2} [encodable α] [encodable β] (n : ℕ) : decode (α × β) n = option.bind (decode α (prod.fst (nat.unpair n))) fun (a : α) => option.map (Prod.mk a) (decode β (prod.snd (nat.unpair n))) := sorry @[simp] theorem encode_prod_val {α : Type u_1} {β : Type u_2} [encodable α] [encodable β] (a : α) (b : β) : encode (a, b) = nat.mkpair (encode a) (encode b) := rfl def encode_subtype {α : Type u_1} {P : α → Prop} [encA : encodable α] : (Subtype fun (a : α) => P a) → ℕ := sorry def decode_subtype {α : Type u_1} {P : α → Prop} [encA : encodable α] [decP : decidable_pred P] (v : ℕ) : Option (Subtype fun (a : α) => P a) := option.bind (decode α v) fun (a : α) => dite (P a) (fun (h : P a) => some { val := a, property := h }) fun (h : ¬P a) => none protected instance subtype {α : Type u_1} {P : α → Prop} [encA : encodable α] [decP : decidable_pred P] : encodable (Subtype fun (a : α) => P a) := mk encode_subtype decode_subtype sorry theorem subtype.encode_eq {α : Type u_1} {P : α → Prop} [encA : encodable α] [decP : decidable_pred P] (a : Subtype P) : encode a = encode (subtype.val a) := subtype.cases_on a fun (a_val : α) (a_property : P a_val) => Eq.refl (encode { val := a_val, property := a_property }) protected instance fin (n : ℕ) : encodable (fin n) := of_equiv (Subtype fun (m : ℕ) => m < n) (equiv.fin_equiv_subtype n) protected instance int : encodable ℤ := of_equiv ℕ equiv.int_equiv_nat protected instance ulift {α : Type u_1} [encodable α] : encodable (ulift α) := of_equiv α equiv.ulift protected instance plift {α : Type u_1} [encodable α] : encodable (plift α) := of_equiv α equiv.plift def of_inj {α : Type u_1} {β : Type u_2} [encodable β] (f : α → β) (hf : function.injective f) : encodable α := of_left_injection f (function.partial_inv f) sorry end encodable /-- `ulower α : Type 0` is an equivalent type in the lowest universe, given `encodable α`. -/ def ulower (α : Type u_1) [encodable α] := ↥(set.range encodable.encode) namespace ulower /-- The equivalence between the encodable type `α` and `ulower α : Type 0`. -/ def equiv (α : Type u_1) [encodable α] : α ≃ ulower α := encodable.equiv_range_encode α /-- Lowers an `a : α` into `ulower α`. -/ def down {α : Type u_1} [encodable α] (a : α) : ulower α := coe_fn (equiv α) a protected instance inhabited {α : Type u_1} [encodable α] [Inhabited α] : Inhabited (ulower α) := { default := down Inhabited.default } /-- Lifts an `a : ulower α` into `α`. -/ def up {α : Type u_1} [encodable α] (a : ulower α) : α := coe_fn (equiv.symm (equiv α)) a @[simp] theorem down_up {α : Type u_1} [encodable α] {a : ulower α} : down (up a) = a := equiv.right_inv (equiv α) a @[simp] theorem up_down {α : Type u_1} [encodable α] {a : α} : up (down a) = a := equiv.left_inv (equiv α) a @[simp] theorem up_eq_up {α : Type u_1} [encodable α] {a : ulower α} {b : ulower α} : up a = up b ↔ a = b := equiv.apply_eq_iff_eq (equiv.symm (equiv α)) @[simp] theorem down_eq_down {α : Type u_1} [encodable α] {a : α} {b : α} : down a = down b ↔ a = b := equiv.apply_eq_iff_eq (equiv α) protected theorem ext {α : Type u_1} [encodable α] {a : ulower α} {b : ulower α} : up a = up b → a = b := iff.mp up_eq_up end ulower /- Choice function for encodable types and decidable predicates. We provide the following API choose {α : Type*} {p : α → Prop} [c : encodable α] [d : decidable_pred p] : (∃ x, p x) → α := choose_spec {α : Type*} {p : α → Prop} [c : encodable α] [d : decidable_pred p] (ex : ∃ x, p x) : p (choose ex) := -/ namespace encodable def choose_x {α : Type u_1} {p : α → Prop} [encodable α] [decidable_pred p] (h : ∃ (x : α), p x) : Subtype fun (x : α) => p x := (fun (this : ∃ (n : ℕ), good p (decode α n)) => sorry) sorry def choose {α : Type u_1} {p : α → Prop} [encodable α] [decidable_pred p] (h : ∃ (x : α), p x) : α := subtype.val (choose_x h) theorem choose_spec {α : Type u_1} {p : α → Prop} [encodable α] [decidable_pred p] (h : ∃ (x : α), p x) : p (choose h) := subtype.property (choose_x h) theorem axiom_of_choice {α : Type u_1} {β : α → Type u_2} {R : (x : α) → β x → Prop} [(a : α) → encodable (β a)] [(x : α) → (y : β x) → Decidable (R x y)] (H : ∀ (x : α), ∃ (y : β x), R x y) : ∃ (f : (a : α) → β a), ∀ (x : α), R x (f x) := Exists.intro (fun (x : α) => choose (H x)) fun (x : α) => choose_spec (H x) theorem skolem {α : Type u_1} {β : α → Type u_2} {P : (x : α) → β x → Prop} [c : (a : α) → encodable (β a)] [d : (x : α) → (y : β x) → Decidable (P x y)] : (∀ (x : α), ∃ (y : β x), P x y) ↔ ∃ (f : (a : α) → β a), ∀ (x : α), P x (f x) := sorry /- There is a total ordering on the elements of an encodable type, induced by the map to ℕ. -/ /-- The `encode` function, viewed as an embedding. -/ def encode' (α : Type u_1) [encodable α] : α ↪ ℕ := function.embedding.mk encode encode_injective protected instance order.preimage.is_trans {α : Type u_1} [encodable α] : is_trans α (⇑(encode' α) ⁻¹'o LessEq) := rel_embedding.is_trans (rel_embedding.preimage (encode' α) LessEq) protected instance order.preimage.is_antisymm {α : Type u_1} [encodable α] : is_antisymm α (⇑(encode' α) ⁻¹'o LessEq) := rel_embedding.is_antisymm (rel_embedding.preimage (encode' α) LessEq) protected instance order.preimage.is_total {α : Type u_1} [encodable α] : is_total α (⇑(encode' α) ⁻¹'o LessEq) := rel_embedding.is_total (rel_embedding.preimage (encode' α) LessEq) end encodable namespace directed /-- Given a `directed r` function `f : α → β` defined on an encodable inhabited type, construct a noncomputable sequence such that `r (f (x n)) (f (x (n + 1)))` and `r (f a) (f (x (encode a + 1))`. -/ protected def sequence {α : Type u_1} {β : Type u_2} [encodable α] [Inhabited α] {r : β → β → Prop} (f : α → β) (hf : directed r f) : ℕ → α := sorry theorem sequence_mono_nat {α : Type u_1} {β : Type u_2} [encodable α] [Inhabited α] {r : β → β → Prop} {f : α → β} (hf : directed r f) (n : ℕ) : r (f (directed.sequence f hf n)) (f (directed.sequence f hf (n + 1))) := sorry theorem rel_sequence {α : Type u_1} {β : Type u_2} [encodable α] [Inhabited α] {r : β → β → Prop} {f : α → β} (hf : directed r f) (a : α) : r (f a) (f (directed.sequence f hf (encodable.encode a + 1))) := sorry theorem sequence_mono {α : Type u_1} {β : Type u_2} [encodable α] [Inhabited α] [preorder β] {f : α → β} (hf : directed LessEq f) : monotone (f ∘ directed.sequence f hf) := monotone_of_monotone_nat (sequence_mono_nat hf) theorem le_sequence {α : Type u_1} {β : Type u_2} [encodable α] [Inhabited α] [preorder β] {f : α → β} (hf : directed LessEq f) (a : α) : f a ≤ f (directed.sequence f hf (encodable.encode a + 1)) := rel_sequence hf a end directed /-- Representative of an equivalence class. This is a computable version of `quot.out` for a setoid on an encodable type. -/ def quotient.rep {α : Type u_1} {s : setoid α} [DecidableRel has_equiv.equiv] [encodable α] (q : quotient s) : α := encodable.choose (quotient.exists_rep q) theorem quotient.rep_spec {α : Type u_1} {s : setoid α} [DecidableRel has_equiv.equiv] [encodable α] (q : quotient s) : quotient.mk (quotient.rep q) = q := encodable.choose_spec (quotient.exists_rep q) /-- The quotient of an encodable space by a decidable equivalence relation is encodable. -/ def encodable_quotient {α : Type u_1} {s : setoid α} [DecidableRel has_equiv.equiv] [encodable α] : encodable (quotient s) := encodable.mk (fun (q : quotient s) => encodable.encode (quotient.rep q)) (fun (n : ℕ) => quotient.mk <$> encodable.decode α n) sorry
Joy Damousi is Professor of History in the School of Historical and Philosophical Studies. She is the author of numerous books which include The Labour of Loss: Mourning, Memory and Wartime Bereavement in Australia (Cambridge, 1999); Living with the Aftermath: Trauma, Nostalgia and Grief in Post-war Australia (Cambridge, 2001); Freud in the Antipodes: A Cultural History of Psychoanalysis in Australia (UNSW Press, 2005; winner of the Ernest Scott Prize) and Colonial Voices: A Cultural History of English in Australia 1840-1940 (Cambridge 2010). With Philip Dwyer she is the general editor of a four volume World History of Violence, due to be published by Cambridge University Press in 2017. She is also currently the editor of the History series for Melbourne University Press. Her current research includes war, trauma and post-war Greek migration to Australia; sound and the two world wars; and child refugees and war.
[STATEMENT] lemma const[simp,code_unfold] : assumes C1 :"const X" shows "const(f X)" [PROOF STATE] proof (prove) goal (1 subgoal): 1. const (f X) [PROOF STEP] proof - [PROOF STATE] proof (state) goal (1 subgoal): 1. const (f X) [PROOF STEP] have const_g : "const (\<lambda>\<tau>. g (X \<tau>))" [PROOF STATE] proof (prove) goal (1 subgoal): 1. const (\<lambda>\<tau>. g (X \<tau>)) [PROOF STEP] by(insert C1, auto simp:const_def, metis) [PROOF STATE] proof (state) this: const (\<lambda>\<tau>. g (X \<tau>)) goal (1 subgoal): 1. const (f X) [PROOF STEP] show ?thesis [PROOF STATE] proof (prove) goal (1 subgoal): 1. const (f X) [PROOF STEP] by(simp_all add : def_scheme const_ss C1 const_g) [PROOF STATE] proof (state) this: const (f X) goal: No subgoals! [PROOF STEP] qed
## plot kinship coefficients args<-commandArgs(TRUE) setwd(args[1]) dat<-read.table("king.kin0", header = TRUE, stringsAsFactors = FALSE) outPred<-read.csv("PredictedPopulations.csv", stringsAsFactors = FALSE) pdf("ScatterplotKinshipCoefficients.pdf") plot(dat$IBS0, dat$Kinship, pch = 16, col = "blue", main = "KING-Robust (Default)", xlab="Proportion of Zero IBS", ylab = "Estimated Kinship Coefficient", ylim = c(0,0.4)) abline(h = 0.3536, col = "black", lty = 3) abline(h = 0.1768, col = "black", lty = 3) abline(h = 0.0884, col = "black", lty = 3) abline(h = 0.0442, col = "black", lty = 3) abline(h = 0.0221, col = "black", lty = 3) dev.off() ## look for samples with higher than average IBD statistics sampleIDs<-unique(c(dat$FID1, dat$FID2)) mean.kin<-rep(NA, length(sampleIDs)) names(mean.kin)<-sampleIDs for(each in sampleIDs){ values<-c(dat$Kinship[which(dat$FID1 == each | dat$FID2 == each)]) mean.kin[each]<-mean(values, na.rm = TRUE) } mean.ibs<-rep(NA, length(sampleIDs)) names(mean.ibs)<-sampleIDs for(each in sampleIDs){ values<-c(dat$IBS0[which(dat$FID1 == each | dat$FID2 == each)]) mean.ibs[each]<-mean(values, na.rm = TRUE) } outPred<-outPred[match(names(mean.ibs), outPred$V1),] pdf("HistMeanKinshipCoefficients.pdf") par(mfrow = c(1,2)) hist(mean.kin, breaks = 20, xlab = "Mean kinship coefficient", main = "") mu<-mean(mean.kin) sigma<-sd(mean.kin) abline(v = mu, col = "red", lty = 2) for(i in 1:3){ abline(v = mu+i*sigma, col = "red") abline(v = mu-i*sigma, col = "red") } hist(mean.ibs, breaks = 20, xlab = "Mean IBS coefficient", main = "") mu<-mean(mean.ibs) sigma<-sd(mean.ibs) abline(v = mu, col = "red", lty = 2) for(i in 1:3){ abline(v = mu+i*sigma, col = "red") abline(v = mu-i*sigma, col = "red") } ## look for joint outliers ## colour by predicted super population par(mfrow = c(1,1)) plot(mean.ibs, mean.kin, pch = 16, col = rainbow(5)[as.factor(outPred$predPop)]) mu<-mean(mean.ibs) sigma<-sd(mean.ibs) abline(v = mu, col = "red", lty = 2) for(i in 1:3){ abline(v = mu+i*sigma, col = "red") abline(v = mu-i*sigma, col = "red") } mu<-mean(mean.kin) sigma<-sd(mean.kin) abline(h = mu, col = "red", lty = 2) for(i in 1:3){ abline(h = mu+i*sigma, col = "red") abline(h = mu-i*sigma, col = "red") } dev.off()
module Main where import Options.Applicative import Pipes import qualified Pipes.Prelude as P import System.Random.MWC import Statistics.Quantile.Bench.Gen (sampleGaussian) data Params = Params Int Double Double deriving (Eq, Show) params = Params <$> argument auto (metavar "COUNT") <*> argument auto (metavar "MEAN") <*> argument auto (metavar "STDDEV") args = info (helper <*> params) ( fullDesc <> progDesc "Write COUNT real numbers drawn from a Gaussian." ) main :: IO () main = do ps <- execParser args gen <- createSystemRandom runEffect $ gauss gen ps >-> P.map show >-> P.stdoutLn where gauss gen (Params n mu sigma) = P.replicateM n $ sampleGaussian gen mu sigma
if ~exist('namecore','var') namecore = 'D10ptBMrandPSFgauss_StretchCoef2_N10_offset100_nc'; end chall=2*ones(length(iteration),length(ncvec),(ncvec(end)-1)^2); for ii=1:length(ncvec) for jj=1:length(iteration) % r=load (['D10ptBMrandPSFgauss_StretchCoef2_N10_offset100_nc' num2str(ncvec(ii)) '/results_iter' num2str(iteration(jj)) '.mat']); r=load ([namecore num2str(ncvec(ii)) '/results_iter' num2str(iteration(jj)) '.mat']); if isfield(r.res,'a') r.res.h=r.res.a; end % r=load (['results_updates_nmfclassic_nc' num2str(ncvec(ii)) '/results_iter' num2str(iteration) '.mat']); if isfield(r.peval,'data_dir') d = load(['~/' r.peval.data_path '/' r.peval.data_dir '/' r.peval.data_file]); elseif isfield (r.peval,'data_file') d=load(['~/' peval.data_path '/' peval.data_file]); else if exist('dpixc', 'var') d.dpixc = dpixc; else d = load ('dpixc'); end end resid = (r.res.w*r.res.h - reshape(d.dpixc, r.peval.nx*r.peval.ny, r.peval.nt)); % pixels correlations: cp = (corrcoef(resid')); cpmax(jj,ii) = max(cp(cp<1)); cpmin(jj,ii) = min(cp(cp<1)); cpmean(jj,ii) = mean(cp(cp<1)); cpmeanpos(jj,ii) = mean(cp(and(cp>0,cp<1))); cpmeanneg(jj,ii) = mean(cp(cp<0)); % intensity correlations: if size(r.res.h,1)>2 ch = corr(r.res.h(1:end-1,:)'); chall(jj,ii,1:numel(ch))=ch(:); chmax(jj,ii) = max(ch(ch<1)); % chmin(ii) = min(min(corr(r.res.h(1:end-1,:)')-eye(size(r.res.h,1)-1))); chmin(jj,ii) = min(ch(:)); chmean(jj,ii)=mean(ch(ch<1)); chmeanpos(jj,ii)=mean(ch(and(ch<1,ch>0))); chmeanneg(jj,ii)=mean(ch(ch<0)); end % h=hinton(ch); ll(jj,ii)=loglikelihoodPoisson(reshape(d.dpixc,r.peval.nx*r.peval.ny,r.peval.nt),r.res.w*r.res.h); if isfield(r.res, 'lb') lb(jj,ii) = r.res.lb; end end end % figure % plot(ncvec, chmax.*(abs(chmin)),'s-r')
(* Title: AWN_Term_Graph.thy License: BSD 2-Clause. See LICENSE. Author: Timothy Bourke *) theory AWN_Term_Graph imports AWN_Cterms begin datatype ('p, 'l) node = RootNode 'p | InternalNode 'l datatype ('p, 'l) link = ILink "('p, 'l) node" "('p, 'l) node" | ELink "('p, 'l) node" "('p, 'l) node" definition gseqp'_fails where "gseqp'_fails = []" declare [[code abort: gseqp'_fails]] fun gseqp' :: "('s, 'm, 'p, 'l) seqp_env \<Rightarrow> ('s, 'm, 'p, 'l) seqp \<Rightarrow> ('p, 'l) node list" where "gseqp' \<Gamma> ({l}\<langle>_\<rangle> _) = [InternalNode l]" | "gseqp' \<Gamma> ({l}\<lbrakk>_\<rbrakk> _) = [InternalNode l]" | "gseqp' \<Gamma> ({l}unicast(_, _)._ \<triangleright> _) = [InternalNode l]" | "gseqp' \<Gamma> ({l}broadcast(_). _) = [InternalNode l]" | "gseqp' \<Gamma> ({l}groupcast(_, _). _) = [InternalNode l]" | "gseqp' \<Gamma> ({l}send(_)._) = [InternalNode l]" | "gseqp' \<Gamma> ({l}deliver(_)._) = [InternalNode l]" | "gseqp' \<Gamma> ({l}receive(_)._) = [InternalNode l]" | "gseqp' \<Gamma> (p1 \<oplus> p2) = gseqp' \<Gamma> p1 @ gseqp' \<Gamma> p2" | "gseqp' \<Gamma> (call(pn)) = gseqp'_fails" (* (* It would be better to define this function for all wellformed \<Gamma>, as shown below, but I can't get the code generator to work smoothly with the conditional simp rules. *) | "gseqp' \<Gamma> (call(pn)) = gseqp' \<Gamma> (\<Gamma> pn)" by pat_completeness auto lemma gseqp'_termination: assumes "wellformed \<Gamma>" shows "gseqp'_dom (\<Gamma>, p)" proof - have gseqp'_rel': "gseqp'_rel = (\<lambda>gq gp. (gq, gp) \<in> {((\<Gamma>, q), (\<Gamma>', p)). \<Gamma> = \<Gamma>' \<and> p \<leadsto>\<^bsub>\<Gamma>\<^esub> q})" by (rule ext)+ (auto simp: gseqp'_rel.simps elim: microstep.cases) from assms have "\<forall>x. x \<in> acc {(q, p). p \<leadsto>\<^bsub>\<Gamma>\<^esub> q}" unfolding wellformed_def by (simp add: wf_acc_iff) hence "p \<in> acc {(q, p). p \<leadsto>\<^bsub>\<Gamma>\<^esub> q}" .. hence "(\<Gamma>, p) \<in> acc {((\<Gamma>, q), (\<Gamma>', p)). \<Gamma> = \<Gamma>' \<and> p \<leadsto>\<^bsub>\<Gamma>\<^esub> q}" by (rule acc_induct) (auto intro: accI) thus "gseqp'_dom (\<Gamma>, p)" unfolding gseqp'_rel' accp_acc_eq . qed declare gseqp'.psimps [simp, code del] lemmas gseqp'_psimps[simp] = gseqp'.psimps [OF gseqp'_termination] and gseqp'_pinduct = gseqp'.pinduct [OF gseqp'_termination] *) fun gseqp :: "('s, 'm, 'p, 'l) seqp_env \<Rightarrow> ('s, 'm, 'p, 'l) seqp \<Rightarrow> ('p, 'l) node list * ('p, 'l) node list * ('p, 'l) link list" where "gseqp \<Gamma> ({l}\<langle>_\<rangle> p) = (let me = InternalNode l in let (next, acc, links) = gseqp \<Gamma> p in ([me], me # acc, map (ILink me) next @ links))" | "gseqp \<Gamma> ({l}\<lbrakk>_\<rbrakk> p) = (let me = InternalNode l in let (next, acc, links) = gseqp \<Gamma> p in ([me], me # acc, map (ILink me) next @ links))" | "gseqp \<Gamma> (p1 \<oplus> p2) = (let (next1, acc1, links1) = gseqp \<Gamma> p1 in let (next2, acc2, links2) = gseqp \<Gamma> p2 in (next1 @ next2, acc1 @ acc2, links1 @ links2))" | "gseqp \<Gamma> ({l}unicast(_, _).p \<triangleright> q) = (let me = InternalNode l in let (next1, acc1, links1) = gseqp \<Gamma> p in let (next2, acc2, links2) = gseqp \<Gamma> q in ([me], me # acc1 @ acc2, map (ELink me) (next1 @ next2) @ links1 @ links2))" | "gseqp \<Gamma> ({l}broadcast(_). p) = (let me = InternalNode l in let (next, acc, links) = gseqp \<Gamma> p in ([me], me # acc, map (ELink me) next @ links))" | "gseqp \<Gamma> ({l}groupcast(_, _). p) = (let me = InternalNode l in let (next, acc, links) = gseqp \<Gamma> p in ([me], me # acc, map (ELink me) next @ links))" | "gseqp \<Gamma> ({l}send(_).p) = (let me = InternalNode l in let (next, acc, links) = gseqp \<Gamma> p in ([me], me # acc, map (ELink me) next @ links))" | "gseqp \<Gamma> ({l}deliver(_).p) = (let me = InternalNode l in let (next, acc, links) = gseqp \<Gamma> p in ([me], me # acc, map (ELink me) next @ links))" | "gseqp \<Gamma> ({l}receive(_).p) = (let me = InternalNode l in let (next, acc, links) = gseqp \<Gamma> p in ([me], me # acc, map (ELink me) next @ links))" | "gseqp \<Gamma> (call(pn)) = (gseqp' \<Gamma> (\<Gamma> pn), [], [])" definition graph_of_other :: "('s, 'm, 'p, 'l) seqp_env \<Rightarrow> (('p, 'l) node list * ('p, 'l) link list) \<Rightarrow> 'p \<Rightarrow> ('p, 'l) node list * ('p, 'l) link list" where "graph_of_other \<Gamma> r pn = (let (next, acc, links) = gseqp \<Gamma> (\<Gamma> pn) in (acc @ fst r, links @ snd r))" definition graph_of_root :: "('s, 'm, 'p, 'l) seqp_env \<Rightarrow> (('p, 'l) node list * ('p, 'l) link list) \<Rightarrow> 'p \<Rightarrow> ('p, 'l) node list * ('p, 'l) link list" where "graph_of_root \<Gamma> r pn = (let me = RootNode pn in let (next, acc, links) = gseqp \<Gamma> (\<Gamma> pn) in (acc @ fst r @ [me], map (ILink me) next @ links @ snd r))" definition graph_of_seqp :: "('s, 'm, 'p, 'l) seqp_env \<Rightarrow> 'p list \<Rightarrow> ('p, 'l) node list * ('p, 'l) link list" where "graph_of_seqp \<Gamma> pns = map_prod (rev \<circ> remdups) remdups (foldl (graph_of_other \<Gamma>) (graph_of_root \<Gamma> ([], []) (hd pns)) (tl pns))" definition graph_of_seqps :: "('s, 'm, 'p, 'l) seqp_env \<Rightarrow> 'p list \<Rightarrow> ('p, 'l) node list * ('p, 'l) link list" where "graph_of_seqps \<Gamma> pns = map_prod (rev \<circ> remdups) remdups (foldl (graph_of_root \<Gamma>) ([], []) (List.rev pns))" end
[STATEMENT] lemma domains_comp: assumes "seq g f" shows "domains (g \<cdot> f) = domains f" [PROOF STATE] proof (prove) goal (1 subgoal): 1. domains (g \<cdot> f) = domains f [PROOF STEP] proof - [PROOF STATE] proof (state) goal (1 subgoal): 1. domains (g \<cdot> f) = domains f [PROOF STEP] have "domains (g \<cdot> f) = {a. ide a \<and> seq (g \<cdot> f) a}" [PROOF STATE] proof (prove) goal (1 subgoal): 1. domains (g \<cdot> f) = {a. ide a \<and> seq (g \<cdot> f) a} [PROOF STEP] using domains_def ext [PROOF STATE] proof (prove) using this: domains ?f \<equiv> {a. ide a \<and> ?f \<cdot> a \<noteq> null} ?g \<cdot> ?f \<noteq> null \<Longrightarrow> seq ?g ?f goal (1 subgoal): 1. domains (g \<cdot> f) = {a. ide a \<and> seq (g \<cdot> f) a} [PROOF STEP] by auto [PROOF STATE] proof (state) this: domains (g \<cdot> f) = {a. ide a \<and> seq (g \<cdot> f) a} goal (1 subgoal): 1. domains (g \<cdot> f) = domains f [PROOF STEP] also [PROOF STATE] proof (state) this: domains (g \<cdot> f) = {a. ide a \<and> seq (g \<cdot> f) a} goal (1 subgoal): 1. domains (g \<cdot> f) = domains f [PROOF STEP] have "... = {a. ide a \<and> seq f a}" [PROOF STATE] proof (prove) goal (1 subgoal): 1. {a. ide a \<and> seq (g \<cdot> f) a} = {a. ide a \<and> seq f a} [PROOF STEP] using assms ide_def match_1 match_3 [PROOF STATE] proof (prove) using this: seq g f ide ?a \<equiv> ?a \<cdot> ?a \<noteq> null \<and> (\<forall>f. (f \<cdot> ?a \<noteq> null \<longrightarrow> f \<cdot> ?a = f) \<and> (?a \<cdot> f \<noteq> null \<longrightarrow> ?a \<cdot> f = f)) \<lbrakk>seq ?h ?g; seq (?h \<cdot> ?g) ?f\<rbrakk> \<Longrightarrow> seq ?g ?f \<lbrakk>seq ?g ?f; seq ?h ?g\<rbrakk> \<Longrightarrow> seq (?h \<cdot> ?g) ?f goal (1 subgoal): 1. {a. ide a \<and> seq (g \<cdot> f) a} = {a. ide a \<and> seq f a} [PROOF STEP] by meson [PROOF STATE] proof (state) this: {a. ide a \<and> seq (g \<cdot> f) a} = {a. ide a \<and> seq f a} goal (1 subgoal): 1. domains (g \<cdot> f) = domains f [PROOF STEP] also [PROOF STATE] proof (state) this: {a. ide a \<and> seq (g \<cdot> f) a} = {a. ide a \<and> seq f a} goal (1 subgoal): 1. domains (g \<cdot> f) = domains f [PROOF STEP] have "... = domains f" [PROOF STATE] proof (prove) goal (1 subgoal): 1. {a. ide a \<and> seq f a} = domains f [PROOF STEP] using domains_def ext [PROOF STATE] proof (prove) using this: domains ?f \<equiv> {a. ide a \<and> ?f \<cdot> a \<noteq> null} ?g \<cdot> ?f \<noteq> null \<Longrightarrow> seq ?g ?f goal (1 subgoal): 1. {a. ide a \<and> seq f a} = domains f [PROOF STEP] by auto [PROOF STATE] proof (state) this: {a. ide a \<and> seq f a} = domains f goal (1 subgoal): 1. domains (g \<cdot> f) = domains f [PROOF STEP] finally [PROOF STATE] proof (chain) picking this: domains (g \<cdot> f) = domains f [PROOF STEP] show ?thesis [PROOF STATE] proof (prove) using this: domains (g \<cdot> f) = domains f goal (1 subgoal): 1. domains (g \<cdot> f) = domains f [PROOF STEP] by blast [PROOF STATE] proof (state) this: domains (g \<cdot> f) = domains f goal: No subgoals! [PROOF STEP] qed
[STATEMENT] lemma d_a_shunting: "d(x) * a(y) \<le> d(z) \<longleftrightarrow> d(x) \<le> d(z) \<squnion> d(y)" [PROOF STATE] proof (prove) goal (1 subgoal): 1. (d x * a y \<le> d z) = (d x \<le> d z \<squnion> d y) [PROOF STEP] using aa.sba_dual.shunting_right d_def [PROOF STATE] proof (prove) using this: (a ?z \<le> a ?x \<squnion> a ?y) = (a ?z * a (a ?y) \<le> a ?x) d ?x = a (a ?x) goal (1 subgoal): 1. (d x * a y \<le> d z) = (d x \<le> d z \<squnion> d y) [PROOF STEP] by auto
module Maybes using Foldable, Functor, Funs, Monad import Base.show import Base.eltype, Base.== import Base.serialize, Base.deserialize import Base.length, Base.start, Base.next, Base.done, Base.isempty import Foldable.freduce import Functor.fmap import Monad.tycon, Monad.valtype import Monad.munit, Monad.mjoin, Monad.mbind, Monad.mzero, Monad.mplus export Maybe export just export isjust, isnothing export maybe, fromjust, frommaybe export arrayToMaybe, maybeToArray export catMaybes, mapMaybe export show export eltype, == export serialize, deserialize export length, start, next, done, isempty export freduce export fmap export tycon, valtype export munit, mjoin, mbind, mzero, mplus immutable Maybe{T} isjust::Bool value::T Maybe() = new(false) Maybe(value) = new(true,value) end just{T}(value::T) = Maybe{T}(value) isjust(m::Maybe) = m.isjust isnothing(m::Maybe) = !isjust(m) maybe{T}(zero, f::Callable, m::Maybe{T}; R::Type=eltype(f)) = isjust(m) ? f(m.value)::R : zero::R fromjust(m::Maybe) = (@assert isjust(m); m.value) frommaybe{T}(zero, m::Maybe{T}) = isjust(m) ? m.value : zero::T arrayToMaybe{T}(xs::Array{T,1}) = isempty(xs) ? Maybe{T}() : just(xs[1]) maybeToArray{T}(m::Maybe{T}) = isnothing(m) ? T[] : T[m.value] function catMaybes{T}(xs::Array{Maybe{T},1}) rs = T[] for x in xs maybe(rs, (r->push!(rs,r)), x) end rs end function mapMaybe{MT}(f::Callable, xs::Array{MT,1}; R::Type=eltype(f)) rs = R[] for x in xs maybe(rs, r->push!(rs, f(r)), x) end rs end show(io::IO, m::Maybe) = print(io, maybe("(?)", v->"(?$v)", m)) eltype{T}(m::Maybe{T}) = T function =={T}(m1::Maybe{T}, m2::Maybe{T}) if isnothing(m1) && isnothing(m2) return true end if isnothing(m1) || isnothing(m2) return false end fromjust(m1) == fromjust(m2) end function serialize{T}(s::AbstractSerializer, m::Maybe{T}) Base.serialize_type(s, Maybe{T}) write(s.io, m.isjust) if m.isjust write(s.io, fromjust(m)) end end function deserialize{T}(s::AbstractSerializer, ::Type{Maybe{T}}) isjust = read(s.io, Bool) if isjust value = read(s.io, T) Maybe{T}(value) else Maybe{T}() end end length(m::Maybe) = Int(isjust(m)) start(m::Maybe) = isnothing(m) next(m::Maybe, i) = fromjust(m), true done(m::Maybe, i) = i isempty(m::Maybe) = isnothing(m) @generated function freduce(op::Callable, zero, m::Maybe, ns::Maybe...; R::Type=eltype(op)) quote tuple($([:(@assert isjust(ns[$i]) == isjust(m)) for i in 1:length(ns)]...)) if isnothing(m) return zero::R end op(zero, fromjust(m), $([:(fromjust(ns[$i])) for i in 1:length(ns)]...))::R end end @generated function fmap(f::Callable, m::Maybe, ns::Maybe...; R::Type=eltype(f)) quote tuple($([:(@assert isjust(ns[$i]) == isjust(m)) for i in 1:length(ns)]...)) if isnothing(m) return Maybe{R}() end Maybe{R}(f(fromjust(m), $([:(fromjust(ns[$i])) for i in 1:length(ns)]...))::R) end end tycon{T,R}(::Type{Maybe{T}}, ::Type{R}) = Maybe{R} valtype{T}(::Type{Maybe{T}}) = T munit{T}(::Type{Maybe{T}}, x) = Maybe{T}(x) mjoin{T}(xss::Maybe{Maybe{T}}) = frommaybe(Maybe{T}(), xss) mbind{T}(f::Callable, xs::Maybe{T}; R::Type=eltype(f)) = mjoin(fmap(R=R, f, xs)) mzero{T}(::Type{Maybe{T}}) = Maybe{T}() mplus{T}(xs::Maybe{T}) = xs mplus{T}(xs::Maybe{T}, ys::Maybe{T}, zss::Maybe{T}...) = isjust(xs) ? xs : mplus(ys, zss...) end
Load LFindLoad. From lfind Require Import LFind. From QuickChick Require Import QuickChick. From adtind Require Import goal33. Derive Show for natural. Derive Arbitrary for natural. Instance Dec_Eq_natural : Dec_Eq natural. Proof. dec_eq. Qed. Lemma conj10synthconj4 : forall (lv0 : natural) (lv1 : natural), (@eq natural (plus lv0 lv1) (plus lv1 (plus Zero lv0))). Admitted. QuickChick conj10synthconj4.
[STATEMENT] lemma route_tables_fresher: "paodv i \<TTurnstile>\<^sub>A (recvmsg rreq_rrep_sn \<rightarrow>) onll \<Gamma>\<^sub>A\<^sub>O\<^sub>D\<^sub>V (\<lambda>((\<xi>, _), _, (\<xi>', _)). \<forall>dip\<in>kD(rt \<xi>). rt \<xi> \<sqsubseteq>\<^bsub>dip\<^esub> rt \<xi>')" [PROOF STATE] proof (prove) goal (1 subgoal): 1. paodv i \<TTurnstile>\<^sub>A (recvmsg rreq_rrep_sn \<rightarrow>) onll \<Gamma>\<^sub>A\<^sub>O\<^sub>D\<^sub>V (\<lambda>((\<xi>, uu_), uu_, \<xi>', uu_). \<forall>dip\<in>kD (rt \<xi>). rt \<xi> \<sqsubseteq>\<^bsub>dip\<^esub> rt \<xi>') [PROOF STEP] proof (inv_cterms inv add: onl_invariant_sterms [OF aodv_wf dests_vD_inc_sqn [THEN invariant_restrict_inD]] onl_invariant_sterms [OF aodv_wf hop_count_positive [THEN invariant_restrict_inD]] onl_invariant_sterms [OF aodv_wf osn_rreq] onl_invariant_sterms [OF aodv_wf dsn_rrep] onl_invariant_sterms [OF aodv_wf addpreRT_welldefined [THEN invariant_restrict_inD]]) [PROOF STATE] proof (state) goal (2 subgoals): 1. \<And>p l \<xi> a q l' \<xi>' pp p'. \<lbrakk>l = PRreq-:2; ((\<xi>, {PRreq-:2}\<lbrakk>\<lambda>\<xi>. \<xi>\<lparr>rt := update (rt \<xi>) (oip \<xi>) (osn \<xi>, kno, val, Suc (hops \<xi>), sip \<xi>, {})\<rparr>\<rbrakk> p'), \<tau>\<^sub>s, \<xi>\<lparr>rt := update (rt \<xi>) (oip \<xi>) (osn \<xi>, kno, val, Suc (hops \<xi>), sip \<xi>, {})\<rparr>, p') \<in> seqp_sos \<Gamma>\<^sub>A\<^sub>O\<^sub>D\<^sub>V; PRreq-:3 \<in> labels \<Gamma>\<^sub>A\<^sub>O\<^sub>D\<^sub>V p'; (\<xi>, pp) \<in> reachable (paodv i) (recvmsg rreq_rrep_sn); {PRreq-:2}\<lbrakk>\<lambda>\<xi>. \<xi>\<lparr>rt := update (rt \<xi>) (oip \<xi>) (osn \<xi>, kno, val, Suc (hops \<xi>), sip \<xi>, {})\<rparr>\<rbrakk> p' \<in> sterms \<Gamma>\<^sub>A\<^sub>O\<^sub>D\<^sub>V pp; (\<xi>\<lparr>rt := update (rt \<xi>) (oip \<xi>) (osn \<xi>, kno, val, Suc (hops \<xi>), sip \<xi>, {})\<rparr>, p') \<in> reachable (paodv i) (recvmsg rreq_rrep_sn); Suc 0 \<le> osn \<xi>; \<forall>ip\<in>kD (rt \<xi>). Suc 0 \<le> the (dhops (rt \<xi>) ip); p = {PRreq-:2}\<lbrakk>\<lambda>\<xi>. \<xi>\<lparr>rt := update (rt \<xi>) (oip \<xi>) (osn \<xi>, kno, val, Suc (hops \<xi>), sip \<xi>, {})\<rparr>\<rbrakk> p'; l' = PRreq-:3; a = \<tau>\<^sub>s; \<xi>' = \<xi>\<lparr>rt := update (rt \<xi>) (oip \<xi>) (osn \<xi>, kno, val, Suc (hops \<xi>), sip \<xi>, {})\<rparr>; q = p'\<rbrakk> \<Longrightarrow> \<forall>dip\<in>kD (rt \<xi>). rt \<xi> \<sqsubseteq>\<^bsub>dip\<^esub> update (rt \<xi>) (oip \<xi>) (osn \<xi>, kno, val, Suc (hops \<xi>), sip \<xi>, {}) 2. \<And>p l \<xi> a q l' \<xi>' pp p'. \<lbrakk>l = PRrep-:0; ((\<xi>, {PRrep-:0}\<lbrakk>\<lambda>\<xi>. \<xi>\<lparr>rt := update (rt \<xi>) (dip \<xi>) (dsn \<xi>, kno, val, Suc (hops \<xi>), sip \<xi>, {})\<rparr>\<rbrakk> p'), \<tau>\<^sub>s, \<xi>\<lparr>rt := update (rt \<xi>) (dip \<xi>) (dsn \<xi>, kno, val, Suc (hops \<xi>), sip \<xi>, {})\<rparr>, p') \<in> seqp_sos \<Gamma>\<^sub>A\<^sub>O\<^sub>D\<^sub>V; PRrep-:1 \<in> labels \<Gamma>\<^sub>A\<^sub>O\<^sub>D\<^sub>V p'; (\<xi>, pp) \<in> reachable (paodv i) (recvmsg rreq_rrep_sn); {PRrep-:0}\<lbrakk>\<lambda>\<xi>. \<xi>\<lparr>rt := update (rt \<xi>) (dip \<xi>) (dsn \<xi>, kno, val, Suc (hops \<xi>), sip \<xi>, {})\<rparr>\<rbrakk> p' \<in> sterms \<Gamma>\<^sub>A\<^sub>O\<^sub>D\<^sub>V pp; (\<xi>\<lparr>rt := update (rt \<xi>) (dip \<xi>) (dsn \<xi>, kno, val, Suc (hops \<xi>), sip \<xi>, {})\<rparr>, p') \<in> reachable (paodv i) (recvmsg rreq_rrep_sn); Suc 0 \<le> dsn \<xi>; \<forall>ip\<in>kD (rt \<xi>). Suc 0 \<le> the (dhops (rt \<xi>) ip); p = {PRrep-:0}\<lbrakk>\<lambda>\<xi>. \<xi>\<lparr>rt := update (rt \<xi>) (dip \<xi>) (dsn \<xi>, kno, val, Suc (hops \<xi>), sip \<xi>, {})\<rparr>\<rbrakk> p'; l' = PRrep-:1; a = \<tau>\<^sub>s; \<xi>' = \<xi>\<lparr>rt := update (rt \<xi>) (dip \<xi>) (dsn \<xi>, kno, val, Suc (hops \<xi>), sip \<xi>, {})\<rparr>; q = p'\<rbrakk> \<Longrightarrow> \<forall>dipa\<in>kD (rt \<xi>). rt \<xi> \<sqsubseteq>\<^bsub>dipa\<^esub> update (rt \<xi>) (dip \<xi>) (dsn \<xi>, kno, val, Suc (hops \<xi>), sip \<xi>, {}) [PROOF STEP] fix \<xi> pp p' [PROOF STATE] proof (state) goal (2 subgoals): 1. \<And>p l \<xi> a q l' \<xi>' pp p'. \<lbrakk>l = PRreq-:2; ((\<xi>, {PRreq-:2}\<lbrakk>\<lambda>\<xi>. \<xi>\<lparr>rt := update (rt \<xi>) (oip \<xi>) (osn \<xi>, kno, val, Suc (hops \<xi>), sip \<xi>, {})\<rparr>\<rbrakk> p'), \<tau>\<^sub>s, \<xi>\<lparr>rt := update (rt \<xi>) (oip \<xi>) (osn \<xi>, kno, val, Suc (hops \<xi>), sip \<xi>, {})\<rparr>, p') \<in> seqp_sos \<Gamma>\<^sub>A\<^sub>O\<^sub>D\<^sub>V; PRreq-:3 \<in> labels \<Gamma>\<^sub>A\<^sub>O\<^sub>D\<^sub>V p'; (\<xi>, pp) \<in> reachable (paodv i) (recvmsg rreq_rrep_sn); {PRreq-:2}\<lbrakk>\<lambda>\<xi>. \<xi>\<lparr>rt := update (rt \<xi>) (oip \<xi>) (osn \<xi>, kno, val, Suc (hops \<xi>), sip \<xi>, {})\<rparr>\<rbrakk> p' \<in> sterms \<Gamma>\<^sub>A\<^sub>O\<^sub>D\<^sub>V pp; (\<xi>\<lparr>rt := update (rt \<xi>) (oip \<xi>) (osn \<xi>, kno, val, Suc (hops \<xi>), sip \<xi>, {})\<rparr>, p') \<in> reachable (paodv i) (recvmsg rreq_rrep_sn); Suc 0 \<le> osn \<xi>; \<forall>ip\<in>kD (rt \<xi>). Suc 0 \<le> the (dhops (rt \<xi>) ip); p = {PRreq-:2}\<lbrakk>\<lambda>\<xi>. \<xi>\<lparr>rt := update (rt \<xi>) (oip \<xi>) (osn \<xi>, kno, val, Suc (hops \<xi>), sip \<xi>, {})\<rparr>\<rbrakk> p'; l' = PRreq-:3; a = \<tau>\<^sub>s; \<xi>' = \<xi>\<lparr>rt := update (rt \<xi>) (oip \<xi>) (osn \<xi>, kno, val, Suc (hops \<xi>), sip \<xi>, {})\<rparr>; q = p'\<rbrakk> \<Longrightarrow> \<forall>dip\<in>kD (rt \<xi>). rt \<xi> \<sqsubseteq>\<^bsub>dip\<^esub> update (rt \<xi>) (oip \<xi>) (osn \<xi>, kno, val, Suc (hops \<xi>), sip \<xi>, {}) 2. \<And>p l \<xi> a q l' \<xi>' pp p'. \<lbrakk>l = PRrep-:0; ((\<xi>, {PRrep-:0}\<lbrakk>\<lambda>\<xi>. \<xi>\<lparr>rt := update (rt \<xi>) (dip \<xi>) (dsn \<xi>, kno, val, Suc (hops \<xi>), sip \<xi>, {})\<rparr>\<rbrakk> p'), \<tau>\<^sub>s, \<xi>\<lparr>rt := update (rt \<xi>) (dip \<xi>) (dsn \<xi>, kno, val, Suc (hops \<xi>), sip \<xi>, {})\<rparr>, p') \<in> seqp_sos \<Gamma>\<^sub>A\<^sub>O\<^sub>D\<^sub>V; PRrep-:1 \<in> labels \<Gamma>\<^sub>A\<^sub>O\<^sub>D\<^sub>V p'; (\<xi>, pp) \<in> reachable (paodv i) (recvmsg rreq_rrep_sn); {PRrep-:0}\<lbrakk>\<lambda>\<xi>. \<xi>\<lparr>rt := update (rt \<xi>) (dip \<xi>) (dsn \<xi>, kno, val, Suc (hops \<xi>), sip \<xi>, {})\<rparr>\<rbrakk> p' \<in> sterms \<Gamma>\<^sub>A\<^sub>O\<^sub>D\<^sub>V pp; (\<xi>\<lparr>rt := update (rt \<xi>) (dip \<xi>) (dsn \<xi>, kno, val, Suc (hops \<xi>), sip \<xi>, {})\<rparr>, p') \<in> reachable (paodv i) (recvmsg rreq_rrep_sn); Suc 0 \<le> dsn \<xi>; \<forall>ip\<in>kD (rt \<xi>). Suc 0 \<le> the (dhops (rt \<xi>) ip); p = {PRrep-:0}\<lbrakk>\<lambda>\<xi>. \<xi>\<lparr>rt := update (rt \<xi>) (dip \<xi>) (dsn \<xi>, kno, val, Suc (hops \<xi>), sip \<xi>, {})\<rparr>\<rbrakk> p'; l' = PRrep-:1; a = \<tau>\<^sub>s; \<xi>' = \<xi>\<lparr>rt := update (rt \<xi>) (dip \<xi>) (dsn \<xi>, kno, val, Suc (hops \<xi>), sip \<xi>, {})\<rparr>; q = p'\<rbrakk> \<Longrightarrow> \<forall>dipa\<in>kD (rt \<xi>). rt \<xi> \<sqsubseteq>\<^bsub>dipa\<^esub> update (rt \<xi>) (dip \<xi>) (dsn \<xi>, kno, val, Suc (hops \<xi>), sip \<xi>, {}) [PROOF STEP] assume "(\<xi>, pp) \<in> reachable (paodv i) (recvmsg rreq_rrep_sn)" and "{PRreq-:2}\<lbrakk>\<lambda>\<xi>. \<xi>\<lparr>rt := update (rt \<xi>) (oip \<xi>) (osn \<xi>, kno, val, Suc (hops \<xi>), sip \<xi>, {})\<rparr>\<rbrakk> p' \<in> sterms \<Gamma>\<^sub>A\<^sub>O\<^sub>D\<^sub>V pp" and "Suc 0 \<le> osn \<xi>" and *: "\<forall>ip\<in>kD (rt \<xi>). Suc 0 \<le> the (dhops (rt \<xi>) ip)" [PROOF STATE] proof (state) this: (\<xi>, pp) \<in> reachable (paodv i) (recvmsg rreq_rrep_sn) {PRreq-:2}\<lbrakk>\<lambda>\<xi>. \<xi>\<lparr>rt := update (rt \<xi>) (oip \<xi>) (osn \<xi>, kno, val, Suc (hops \<xi>), sip \<xi>, {})\<rparr>\<rbrakk> p' \<in> sterms \<Gamma>\<^sub>A\<^sub>O\<^sub>D\<^sub>V pp Suc 0 \<le> osn \<xi> \<forall>ip\<in>kD (rt \<xi>). Suc 0 \<le> the (dhops (rt \<xi>) ip) goal (2 subgoals): 1. \<And>p l \<xi> a q l' \<xi>' pp p'. \<lbrakk>l = PRreq-:2; ((\<xi>, {PRreq-:2}\<lbrakk>\<lambda>\<xi>. \<xi>\<lparr>rt := update (rt \<xi>) (oip \<xi>) (osn \<xi>, kno, val, Suc (hops \<xi>), sip \<xi>, {})\<rparr>\<rbrakk> p'), \<tau>\<^sub>s, \<xi>\<lparr>rt := update (rt \<xi>) (oip \<xi>) (osn \<xi>, kno, val, Suc (hops \<xi>), sip \<xi>, {})\<rparr>, p') \<in> seqp_sos \<Gamma>\<^sub>A\<^sub>O\<^sub>D\<^sub>V; PRreq-:3 \<in> labels \<Gamma>\<^sub>A\<^sub>O\<^sub>D\<^sub>V p'; (\<xi>, pp) \<in> reachable (paodv i) (recvmsg rreq_rrep_sn); {PRreq-:2}\<lbrakk>\<lambda>\<xi>. \<xi>\<lparr>rt := update (rt \<xi>) (oip \<xi>) (osn \<xi>, kno, val, Suc (hops \<xi>), sip \<xi>, {})\<rparr>\<rbrakk> p' \<in> sterms \<Gamma>\<^sub>A\<^sub>O\<^sub>D\<^sub>V pp; (\<xi>\<lparr>rt := update (rt \<xi>) (oip \<xi>) (osn \<xi>, kno, val, Suc (hops \<xi>), sip \<xi>, {})\<rparr>, p') \<in> reachable (paodv i) (recvmsg rreq_rrep_sn); Suc 0 \<le> osn \<xi>; \<forall>ip\<in>kD (rt \<xi>). Suc 0 \<le> the (dhops (rt \<xi>) ip); p = {PRreq-:2}\<lbrakk>\<lambda>\<xi>. \<xi>\<lparr>rt := update (rt \<xi>) (oip \<xi>) (osn \<xi>, kno, val, Suc (hops \<xi>), sip \<xi>, {})\<rparr>\<rbrakk> p'; l' = PRreq-:3; a = \<tau>\<^sub>s; \<xi>' = \<xi>\<lparr>rt := update (rt \<xi>) (oip \<xi>) (osn \<xi>, kno, val, Suc (hops \<xi>), sip \<xi>, {})\<rparr>; q = p'\<rbrakk> \<Longrightarrow> \<forall>dip\<in>kD (rt \<xi>). rt \<xi> \<sqsubseteq>\<^bsub>dip\<^esub> update (rt \<xi>) (oip \<xi>) (osn \<xi>, kno, val, Suc (hops \<xi>), sip \<xi>, {}) 2. \<And>p l \<xi> a q l' \<xi>' pp p'. \<lbrakk>l = PRrep-:0; ((\<xi>, {PRrep-:0}\<lbrakk>\<lambda>\<xi>. \<xi>\<lparr>rt := update (rt \<xi>) (dip \<xi>) (dsn \<xi>, kno, val, Suc (hops \<xi>), sip \<xi>, {})\<rparr>\<rbrakk> p'), \<tau>\<^sub>s, \<xi>\<lparr>rt := update (rt \<xi>) (dip \<xi>) (dsn \<xi>, kno, val, Suc (hops \<xi>), sip \<xi>, {})\<rparr>, p') \<in> seqp_sos \<Gamma>\<^sub>A\<^sub>O\<^sub>D\<^sub>V; PRrep-:1 \<in> labels \<Gamma>\<^sub>A\<^sub>O\<^sub>D\<^sub>V p'; (\<xi>, pp) \<in> reachable (paodv i) (recvmsg rreq_rrep_sn); {PRrep-:0}\<lbrakk>\<lambda>\<xi>. \<xi>\<lparr>rt := update (rt \<xi>) (dip \<xi>) (dsn \<xi>, kno, val, Suc (hops \<xi>), sip \<xi>, {})\<rparr>\<rbrakk> p' \<in> sterms \<Gamma>\<^sub>A\<^sub>O\<^sub>D\<^sub>V pp; (\<xi>\<lparr>rt := update (rt \<xi>) (dip \<xi>) (dsn \<xi>, kno, val, Suc (hops \<xi>), sip \<xi>, {})\<rparr>, p') \<in> reachable (paodv i) (recvmsg rreq_rrep_sn); Suc 0 \<le> dsn \<xi>; \<forall>ip\<in>kD (rt \<xi>). Suc 0 \<le> the (dhops (rt \<xi>) ip); p = {PRrep-:0}\<lbrakk>\<lambda>\<xi>. \<xi>\<lparr>rt := update (rt \<xi>) (dip \<xi>) (dsn \<xi>, kno, val, Suc (hops \<xi>), sip \<xi>, {})\<rparr>\<rbrakk> p'; l' = PRrep-:1; a = \<tau>\<^sub>s; \<xi>' = \<xi>\<lparr>rt := update (rt \<xi>) (dip \<xi>) (dsn \<xi>, kno, val, Suc (hops \<xi>), sip \<xi>, {})\<rparr>; q = p'\<rbrakk> \<Longrightarrow> \<forall>dipa\<in>kD (rt \<xi>). rt \<xi> \<sqsubseteq>\<^bsub>dipa\<^esub> update (rt \<xi>) (dip \<xi>) (dsn \<xi>, kno, val, Suc (hops \<xi>), sip \<xi>, {}) [PROOF STEP] show "\<forall>ip\<in>kD (rt \<xi>). rt \<xi> \<sqsubseteq>\<^bsub>ip\<^esub> update (rt \<xi>) (oip \<xi>) (osn \<xi>, kno, val, Suc (hops \<xi>), sip \<xi>, {})" [PROOF STATE] proof (prove) goal (1 subgoal): 1. \<forall>ip\<in>kD (rt \<xi>). rt \<xi> \<sqsubseteq>\<^bsub>ip\<^esub> update (rt \<xi>) (oip \<xi>) (osn \<xi>, kno, val, Suc (hops \<xi>), sip \<xi>, {}) [PROOF STEP] proof [PROOF STATE] proof (state) goal (1 subgoal): 1. \<And>ip. ip \<in> kD (rt \<xi>) \<Longrightarrow> rt \<xi> \<sqsubseteq>\<^bsub>ip\<^esub> update (rt \<xi>) (oip \<xi>) (osn \<xi>, kno, val, Suc (hops \<xi>), sip \<xi>, {}) [PROOF STEP] fix ip [PROOF STATE] proof (state) goal (1 subgoal): 1. \<And>ip. ip \<in> kD (rt \<xi>) \<Longrightarrow> rt \<xi> \<sqsubseteq>\<^bsub>ip\<^esub> update (rt \<xi>) (oip \<xi>) (osn \<xi>, kno, val, Suc (hops \<xi>), sip \<xi>, {}) [PROOF STEP] assume "ip\<in>kD (rt \<xi>)" [PROOF STATE] proof (state) this: ip \<in> kD (rt \<xi>) goal (1 subgoal): 1. \<And>ip. ip \<in> kD (rt \<xi>) \<Longrightarrow> rt \<xi> \<sqsubseteq>\<^bsub>ip\<^esub> update (rt \<xi>) (oip \<xi>) (osn \<xi>, kno, val, Suc (hops \<xi>), sip \<xi>, {}) [PROOF STEP] moreover [PROOF STATE] proof (state) this: ip \<in> kD (rt \<xi>) goal (1 subgoal): 1. \<And>ip. ip \<in> kD (rt \<xi>) \<Longrightarrow> rt \<xi> \<sqsubseteq>\<^bsub>ip\<^esub> update (rt \<xi>) (oip \<xi>) (osn \<xi>, kno, val, Suc (hops \<xi>), sip \<xi>, {}) [PROOF STEP] with * [PROOF STATE] proof (chain) picking this: \<forall>ip\<in>kD (rt \<xi>). Suc 0 \<le> the (dhops (rt \<xi>) ip) ip \<in> kD (rt \<xi>) [PROOF STEP] have "1 \<le> the (dhops (rt \<xi>) ip)" [PROOF STATE] proof (prove) using this: \<forall>ip\<in>kD (rt \<xi>). Suc 0 \<le> the (dhops (rt \<xi>) ip) ip \<in> kD (rt \<xi>) goal (1 subgoal): 1. 1 \<le> the (dhops (rt \<xi>) ip) [PROOF STEP] by simp [PROOF STATE] proof (state) this: 1 \<le> the (dhops (rt \<xi>) ip) goal (1 subgoal): 1. \<And>ip. ip \<in> kD (rt \<xi>) \<Longrightarrow> rt \<xi> \<sqsubseteq>\<^bsub>ip\<^esub> update (rt \<xi>) (oip \<xi>) (osn \<xi>, kno, val, Suc (hops \<xi>), sip \<xi>, {}) [PROOF STEP] moreover [PROOF STATE] proof (state) this: 1 \<le> the (dhops (rt \<xi>) ip) goal (1 subgoal): 1. \<And>ip. ip \<in> kD (rt \<xi>) \<Longrightarrow> rt \<xi> \<sqsubseteq>\<^bsub>ip\<^esub> update (rt \<xi>) (oip \<xi>) (osn \<xi>, kno, val, Suc (hops \<xi>), sip \<xi>, {}) [PROOF STEP] from \<open>Suc 0 \<le> osn \<xi>\<close> [PROOF STATE] proof (chain) picking this: Suc 0 \<le> osn \<xi> [PROOF STEP] have "update_arg_wf (osn \<xi>, kno, val, Suc (hops \<xi>), sip \<xi>, {})" [PROOF STATE] proof (prove) using this: Suc 0 \<le> osn \<xi> goal (1 subgoal): 1. update_arg_wf (osn \<xi>, kno, val, Suc (hops \<xi>), sip \<xi>, {}) [PROOF STEP] .. [PROOF STATE] proof (state) this: update_arg_wf (osn \<xi>, kno, val, Suc (hops \<xi>), sip \<xi>, {}) goal (1 subgoal): 1. \<And>ip. ip \<in> kD (rt \<xi>) \<Longrightarrow> rt \<xi> \<sqsubseteq>\<^bsub>ip\<^esub> update (rt \<xi>) (oip \<xi>) (osn \<xi>, kno, val, Suc (hops \<xi>), sip \<xi>, {}) [PROOF STEP] ultimately [PROOF STATE] proof (chain) picking this: ip \<in> kD (rt \<xi>) 1 \<le> the (dhops (rt \<xi>) ip) update_arg_wf (osn \<xi>, kno, val, Suc (hops \<xi>), sip \<xi>, {}) [PROOF STEP] show "rt \<xi> \<sqsubseteq>\<^bsub>ip\<^esub> update (rt \<xi>) (oip \<xi>) (osn \<xi>, kno, val, Suc (hops \<xi>), sip \<xi>, {})" [PROOF STATE] proof (prove) using this: ip \<in> kD (rt \<xi>) 1 \<le> the (dhops (rt \<xi>) ip) update_arg_wf (osn \<xi>, kno, val, Suc (hops \<xi>), sip \<xi>, {}) goal (1 subgoal): 1. rt \<xi> \<sqsubseteq>\<^bsub>ip\<^esub> update (rt \<xi>) (oip \<xi>) (osn \<xi>, kno, val, Suc (hops \<xi>), sip \<xi>, {}) [PROOF STEP] by (rule rt_fresher_update) [PROOF STATE] proof (state) this: rt \<xi> \<sqsubseteq>\<^bsub>ip\<^esub> update (rt \<xi>) (oip \<xi>) (osn \<xi>, kno, val, Suc (hops \<xi>), sip \<xi>, {}) goal: No subgoals! [PROOF STEP] qed [PROOF STATE] proof (state) this: \<forall>ip\<in>kD (rt \<xi>). rt \<xi> \<sqsubseteq>\<^bsub>ip\<^esub> update (rt \<xi>) (oip \<xi>) (osn \<xi>, kno, val, Suc (hops \<xi>), sip \<xi>, {}) goal (1 subgoal): 1. \<And>p l \<xi> a q l' \<xi>' pp p'. \<lbrakk>l = PRrep-:0; ((\<xi>, {PRrep-:0}\<lbrakk>\<lambda>\<xi>. \<xi>\<lparr>rt := update (rt \<xi>) (dip \<xi>) (dsn \<xi>, kno, val, Suc (hops \<xi>), sip \<xi>, {})\<rparr>\<rbrakk> p'), \<tau>\<^sub>s, \<xi>\<lparr>rt := update (rt \<xi>) (dip \<xi>) (dsn \<xi>, kno, val, Suc (hops \<xi>), sip \<xi>, {})\<rparr>, p') \<in> seqp_sos \<Gamma>\<^sub>A\<^sub>O\<^sub>D\<^sub>V; PRrep-:1 \<in> labels \<Gamma>\<^sub>A\<^sub>O\<^sub>D\<^sub>V p'; (\<xi>, pp) \<in> reachable (paodv i) (recvmsg rreq_rrep_sn); {PRrep-:0}\<lbrakk>\<lambda>\<xi>. \<xi>\<lparr>rt := update (rt \<xi>) (dip \<xi>) (dsn \<xi>, kno, val, Suc (hops \<xi>), sip \<xi>, {})\<rparr>\<rbrakk> p' \<in> sterms \<Gamma>\<^sub>A\<^sub>O\<^sub>D\<^sub>V pp; (\<xi>\<lparr>rt := update (rt \<xi>) (dip \<xi>) (dsn \<xi>, kno, val, Suc (hops \<xi>), sip \<xi>, {})\<rparr>, p') \<in> reachable (paodv i) (recvmsg rreq_rrep_sn); Suc 0 \<le> dsn \<xi>; \<forall>ip\<in>kD (rt \<xi>). Suc 0 \<le> the (dhops (rt \<xi>) ip); p = {PRrep-:0}\<lbrakk>\<lambda>\<xi>. \<xi>\<lparr>rt := update (rt \<xi>) (dip \<xi>) (dsn \<xi>, kno, val, Suc (hops \<xi>), sip \<xi>, {})\<rparr>\<rbrakk> p'; l' = PRrep-:1; a = \<tau>\<^sub>s; \<xi>' = \<xi>\<lparr>rt := update (rt \<xi>) (dip \<xi>) (dsn \<xi>, kno, val, Suc (hops \<xi>), sip \<xi>, {})\<rparr>; q = p'\<rbrakk> \<Longrightarrow> \<forall>dipa\<in>kD (rt \<xi>). rt \<xi> \<sqsubseteq>\<^bsub>dipa\<^esub> update (rt \<xi>) (dip \<xi>) (dsn \<xi>, kno, val, Suc (hops \<xi>), sip \<xi>, {}) [PROOF STEP] next [PROOF STATE] proof (state) goal (1 subgoal): 1. \<And>p l \<xi> a q l' \<xi>' pp p'. \<lbrakk>l = PRrep-:0; ((\<xi>, {PRrep-:0}\<lbrakk>\<lambda>\<xi>. \<xi>\<lparr>rt := update (rt \<xi>) (dip \<xi>) (dsn \<xi>, kno, val, Suc (hops \<xi>), sip \<xi>, {})\<rparr>\<rbrakk> p'), \<tau>\<^sub>s, \<xi>\<lparr>rt := update (rt \<xi>) (dip \<xi>) (dsn \<xi>, kno, val, Suc (hops \<xi>), sip \<xi>, {})\<rparr>, p') \<in> seqp_sos \<Gamma>\<^sub>A\<^sub>O\<^sub>D\<^sub>V; PRrep-:1 \<in> labels \<Gamma>\<^sub>A\<^sub>O\<^sub>D\<^sub>V p'; (\<xi>, pp) \<in> reachable (paodv i) (recvmsg rreq_rrep_sn); {PRrep-:0}\<lbrakk>\<lambda>\<xi>. \<xi>\<lparr>rt := update (rt \<xi>) (dip \<xi>) (dsn \<xi>, kno, val, Suc (hops \<xi>), sip \<xi>, {})\<rparr>\<rbrakk> p' \<in> sterms \<Gamma>\<^sub>A\<^sub>O\<^sub>D\<^sub>V pp; (\<xi>\<lparr>rt := update (rt \<xi>) (dip \<xi>) (dsn \<xi>, kno, val, Suc (hops \<xi>), sip \<xi>, {})\<rparr>, p') \<in> reachable (paodv i) (recvmsg rreq_rrep_sn); Suc 0 \<le> dsn \<xi>; \<forall>ip\<in>kD (rt \<xi>). Suc 0 \<le> the (dhops (rt \<xi>) ip); p = {PRrep-:0}\<lbrakk>\<lambda>\<xi>. \<xi>\<lparr>rt := update (rt \<xi>) (dip \<xi>) (dsn \<xi>, kno, val, Suc (hops \<xi>), sip \<xi>, {})\<rparr>\<rbrakk> p'; l' = PRrep-:1; a = \<tau>\<^sub>s; \<xi>' = \<xi>\<lparr>rt := update (rt \<xi>) (dip \<xi>) (dsn \<xi>, kno, val, Suc (hops \<xi>), sip \<xi>, {})\<rparr>; q = p'\<rbrakk> \<Longrightarrow> \<forall>dipa\<in>kD (rt \<xi>). rt \<xi> \<sqsubseteq>\<^bsub>dipa\<^esub> update (rt \<xi>) (dip \<xi>) (dsn \<xi>, kno, val, Suc (hops \<xi>), sip \<xi>, {}) [PROOF STEP] fix \<xi> pp p' [PROOF STATE] proof (state) goal (1 subgoal): 1. \<And>p l \<xi> a q l' \<xi>' pp p'. \<lbrakk>l = PRrep-:0; ((\<xi>, {PRrep-:0}\<lbrakk>\<lambda>\<xi>. \<xi>\<lparr>rt := update (rt \<xi>) (dip \<xi>) (dsn \<xi>, kno, val, Suc (hops \<xi>), sip \<xi>, {})\<rparr>\<rbrakk> p'), \<tau>\<^sub>s, \<xi>\<lparr>rt := update (rt \<xi>) (dip \<xi>) (dsn \<xi>, kno, val, Suc (hops \<xi>), sip \<xi>, {})\<rparr>, p') \<in> seqp_sos \<Gamma>\<^sub>A\<^sub>O\<^sub>D\<^sub>V; PRrep-:1 \<in> labels \<Gamma>\<^sub>A\<^sub>O\<^sub>D\<^sub>V p'; (\<xi>, pp) \<in> reachable (paodv i) (recvmsg rreq_rrep_sn); {PRrep-:0}\<lbrakk>\<lambda>\<xi>. \<xi>\<lparr>rt := update (rt \<xi>) (dip \<xi>) (dsn \<xi>, kno, val, Suc (hops \<xi>), sip \<xi>, {})\<rparr>\<rbrakk> p' \<in> sterms \<Gamma>\<^sub>A\<^sub>O\<^sub>D\<^sub>V pp; (\<xi>\<lparr>rt := update (rt \<xi>) (dip \<xi>) (dsn \<xi>, kno, val, Suc (hops \<xi>), sip \<xi>, {})\<rparr>, p') \<in> reachable (paodv i) (recvmsg rreq_rrep_sn); Suc 0 \<le> dsn \<xi>; \<forall>ip\<in>kD (rt \<xi>). Suc 0 \<le> the (dhops (rt \<xi>) ip); p = {PRrep-:0}\<lbrakk>\<lambda>\<xi>. \<xi>\<lparr>rt := update (rt \<xi>) (dip \<xi>) (dsn \<xi>, kno, val, Suc (hops \<xi>), sip \<xi>, {})\<rparr>\<rbrakk> p'; l' = PRrep-:1; a = \<tau>\<^sub>s; \<xi>' = \<xi>\<lparr>rt := update (rt \<xi>) (dip \<xi>) (dsn \<xi>, kno, val, Suc (hops \<xi>), sip \<xi>, {})\<rparr>; q = p'\<rbrakk> \<Longrightarrow> \<forall>dipa\<in>kD (rt \<xi>). rt \<xi> \<sqsubseteq>\<^bsub>dipa\<^esub> update (rt \<xi>) (dip \<xi>) (dsn \<xi>, kno, val, Suc (hops \<xi>), sip \<xi>, {}) [PROOF STEP] assume "(\<xi>, pp) \<in> reachable (paodv i) (recvmsg rreq_rrep_sn)" and "{PRrep-:0}\<lbrakk>\<lambda>\<xi>. \<xi>\<lparr>rt := update (rt \<xi>) (dip \<xi>) (dsn \<xi>, kno, val, Suc (hops \<xi>), sip \<xi>, {})\<rparr>\<rbrakk> p' \<in> sterms \<Gamma>\<^sub>A\<^sub>O\<^sub>D\<^sub>V pp" and "Suc 0 \<le> dsn \<xi>" and *: "\<forall>ip\<in>kD (rt \<xi>). Suc 0 \<le> the (dhops (rt \<xi>) ip)" [PROOF STATE] proof (state) this: (\<xi>, pp) \<in> reachable (paodv i) (recvmsg rreq_rrep_sn) {PRrep-:0}\<lbrakk>\<lambda>\<xi>. \<xi>\<lparr>rt := update (rt \<xi>) (dip \<xi>) (dsn \<xi>, kno, val, Suc (hops \<xi>), sip \<xi>, {})\<rparr>\<rbrakk> p' \<in> sterms \<Gamma>\<^sub>A\<^sub>O\<^sub>D\<^sub>V pp Suc 0 \<le> dsn \<xi> \<forall>ip\<in>kD (rt \<xi>). Suc 0 \<le> the (dhops (rt \<xi>) ip) goal (1 subgoal): 1. \<And>p l \<xi> a q l' \<xi>' pp p'. \<lbrakk>l = PRrep-:0; ((\<xi>, {PRrep-:0}\<lbrakk>\<lambda>\<xi>. \<xi>\<lparr>rt := update (rt \<xi>) (dip \<xi>) (dsn \<xi>, kno, val, Suc (hops \<xi>), sip \<xi>, {})\<rparr>\<rbrakk> p'), \<tau>\<^sub>s, \<xi>\<lparr>rt := update (rt \<xi>) (dip \<xi>) (dsn \<xi>, kno, val, Suc (hops \<xi>), sip \<xi>, {})\<rparr>, p') \<in> seqp_sos \<Gamma>\<^sub>A\<^sub>O\<^sub>D\<^sub>V; PRrep-:1 \<in> labels \<Gamma>\<^sub>A\<^sub>O\<^sub>D\<^sub>V p'; (\<xi>, pp) \<in> reachable (paodv i) (recvmsg rreq_rrep_sn); {PRrep-:0}\<lbrakk>\<lambda>\<xi>. \<xi>\<lparr>rt := update (rt \<xi>) (dip \<xi>) (dsn \<xi>, kno, val, Suc (hops \<xi>), sip \<xi>, {})\<rparr>\<rbrakk> p' \<in> sterms \<Gamma>\<^sub>A\<^sub>O\<^sub>D\<^sub>V pp; (\<xi>\<lparr>rt := update (rt \<xi>) (dip \<xi>) (dsn \<xi>, kno, val, Suc (hops \<xi>), sip \<xi>, {})\<rparr>, p') \<in> reachable (paodv i) (recvmsg rreq_rrep_sn); Suc 0 \<le> dsn \<xi>; \<forall>ip\<in>kD (rt \<xi>). Suc 0 \<le> the (dhops (rt \<xi>) ip); p = {PRrep-:0}\<lbrakk>\<lambda>\<xi>. \<xi>\<lparr>rt := update (rt \<xi>) (dip \<xi>) (dsn \<xi>, kno, val, Suc (hops \<xi>), sip \<xi>, {})\<rparr>\<rbrakk> p'; l' = PRrep-:1; a = \<tau>\<^sub>s; \<xi>' = \<xi>\<lparr>rt := update (rt \<xi>) (dip \<xi>) (dsn \<xi>, kno, val, Suc (hops \<xi>), sip \<xi>, {})\<rparr>; q = p'\<rbrakk> \<Longrightarrow> \<forall>dipa\<in>kD (rt \<xi>). rt \<xi> \<sqsubseteq>\<^bsub>dipa\<^esub> update (rt \<xi>) (dip \<xi>) (dsn \<xi>, kno, val, Suc (hops \<xi>), sip \<xi>, {}) [PROOF STEP] show "\<forall>ip\<in>kD (rt \<xi>). rt \<xi> \<sqsubseteq>\<^bsub>ip\<^esub> update (rt \<xi>) (dip \<xi>) (dsn \<xi>, kno, val, Suc (hops \<xi>), sip \<xi>, {})" [PROOF STATE] proof (prove) goal (1 subgoal): 1. \<forall>ip\<in>kD (rt \<xi>). rt \<xi> \<sqsubseteq>\<^bsub>ip\<^esub> update (rt \<xi>) (dip \<xi>) (dsn \<xi>, kno, val, Suc (hops \<xi>), sip \<xi>, {}) [PROOF STEP] proof [PROOF STATE] proof (state) goal (1 subgoal): 1. \<And>ip. ip \<in> kD (rt \<xi>) \<Longrightarrow> rt \<xi> \<sqsubseteq>\<^bsub>ip\<^esub> update (rt \<xi>) (dip \<xi>) (dsn \<xi>, kno, val, Suc (hops \<xi>), sip \<xi>, {}) [PROOF STEP] fix ip [PROOF STATE] proof (state) goal (1 subgoal): 1. \<And>ip. ip \<in> kD (rt \<xi>) \<Longrightarrow> rt \<xi> \<sqsubseteq>\<^bsub>ip\<^esub> update (rt \<xi>) (dip \<xi>) (dsn \<xi>, kno, val, Suc (hops \<xi>), sip \<xi>, {}) [PROOF STEP] assume "ip\<in>kD (rt \<xi>)" [PROOF STATE] proof (state) this: ip \<in> kD (rt \<xi>) goal (1 subgoal): 1. \<And>ip. ip \<in> kD (rt \<xi>) \<Longrightarrow> rt \<xi> \<sqsubseteq>\<^bsub>ip\<^esub> update (rt \<xi>) (dip \<xi>) (dsn \<xi>, kno, val, Suc (hops \<xi>), sip \<xi>, {}) [PROOF STEP] moreover [PROOF STATE] proof (state) this: ip \<in> kD (rt \<xi>) goal (1 subgoal): 1. \<And>ip. ip \<in> kD (rt \<xi>) \<Longrightarrow> rt \<xi> \<sqsubseteq>\<^bsub>ip\<^esub> update (rt \<xi>) (dip \<xi>) (dsn \<xi>, kno, val, Suc (hops \<xi>), sip \<xi>, {}) [PROOF STEP] with * [PROOF STATE] proof (chain) picking this: \<forall>ip\<in>kD (rt \<xi>). Suc 0 \<le> the (dhops (rt \<xi>) ip) ip \<in> kD (rt \<xi>) [PROOF STEP] have "1 \<le> the (dhops (rt \<xi>) ip)" [PROOF STATE] proof (prove) using this: \<forall>ip\<in>kD (rt \<xi>). Suc 0 \<le> the (dhops (rt \<xi>) ip) ip \<in> kD (rt \<xi>) goal (1 subgoal): 1. 1 \<le> the (dhops (rt \<xi>) ip) [PROOF STEP] by simp [PROOF STATE] proof (state) this: 1 \<le> the (dhops (rt \<xi>) ip) goal (1 subgoal): 1. \<And>ip. ip \<in> kD (rt \<xi>) \<Longrightarrow> rt \<xi> \<sqsubseteq>\<^bsub>ip\<^esub> update (rt \<xi>) (dip \<xi>) (dsn \<xi>, kno, val, Suc (hops \<xi>), sip \<xi>, {}) [PROOF STEP] moreover [PROOF STATE] proof (state) this: 1 \<le> the (dhops (rt \<xi>) ip) goal (1 subgoal): 1. \<And>ip. ip \<in> kD (rt \<xi>) \<Longrightarrow> rt \<xi> \<sqsubseteq>\<^bsub>ip\<^esub> update (rt \<xi>) (dip \<xi>) (dsn \<xi>, kno, val, Suc (hops \<xi>), sip \<xi>, {}) [PROOF STEP] from \<open>Suc 0 \<le> dsn \<xi>\<close> [PROOF STATE] proof (chain) picking this: Suc 0 \<le> dsn \<xi> [PROOF STEP] have "update_arg_wf (dsn \<xi>, kno, val, Suc (hops \<xi>), sip \<xi>, {})" [PROOF STATE] proof (prove) using this: Suc 0 \<le> dsn \<xi> goal (1 subgoal): 1. update_arg_wf (dsn \<xi>, kno, val, Suc (hops \<xi>), sip \<xi>, {}) [PROOF STEP] .. [PROOF STATE] proof (state) this: update_arg_wf (dsn \<xi>, kno, val, Suc (hops \<xi>), sip \<xi>, {}) goal (1 subgoal): 1. \<And>ip. ip \<in> kD (rt \<xi>) \<Longrightarrow> rt \<xi> \<sqsubseteq>\<^bsub>ip\<^esub> update (rt \<xi>) (dip \<xi>) (dsn \<xi>, kno, val, Suc (hops \<xi>), sip \<xi>, {}) [PROOF STEP] ultimately [PROOF STATE] proof (chain) picking this: ip \<in> kD (rt \<xi>) 1 \<le> the (dhops (rt \<xi>) ip) update_arg_wf (dsn \<xi>, kno, val, Suc (hops \<xi>), sip \<xi>, {}) [PROOF STEP] show "rt \<xi> \<sqsubseteq>\<^bsub>ip\<^esub> update (rt \<xi>) (dip \<xi>) (dsn \<xi>, kno, val, Suc (hops \<xi>), sip \<xi>, {})" [PROOF STATE] proof (prove) using this: ip \<in> kD (rt \<xi>) 1 \<le> the (dhops (rt \<xi>) ip) update_arg_wf (dsn \<xi>, kno, val, Suc (hops \<xi>), sip \<xi>, {}) goal (1 subgoal): 1. rt \<xi> \<sqsubseteq>\<^bsub>ip\<^esub> update (rt \<xi>) (dip \<xi>) (dsn \<xi>, kno, val, Suc (hops \<xi>), sip \<xi>, {}) [PROOF STEP] by (rule rt_fresher_update) [PROOF STATE] proof (state) this: rt \<xi> \<sqsubseteq>\<^bsub>ip\<^esub> update (rt \<xi>) (dip \<xi>) (dsn \<xi>, kno, val, Suc (hops \<xi>), sip \<xi>, {}) goal: No subgoals! [PROOF STEP] qed [PROOF STATE] proof (state) this: \<forall>ip\<in>kD (rt \<xi>). rt \<xi> \<sqsubseteq>\<^bsub>ip\<^esub> update (rt \<xi>) (dip \<xi>) (dsn \<xi>, kno, val, Suc (hops \<xi>), sip \<xi>, {}) goal: No subgoals! [PROOF STEP] qed
(* Copyright (C) 2017 M.A.L. Marques This Source Code Form is subject to the terms of the Mozilla Public License, v. 2.0. If a copy of the MPL was not distributed with this file, You can obtain one at http://mozilla.org/MPL/2.0/. *) (* type: gga_exc *) $define gga_c_pbe_params $include "gga_c_regtpss.mpl" scan_e0_g := (rs, z, t) -> (1 + 4*A(rs, z, t)*t^2)^(-1/4): f2 := (rs, z, t) -> mbeta(rs, t)*(1 - scan_e0_g(rs, z, t))/(mgamma*A(rs, z, t)):
[GOAL] α : Type u_1 inst✝ : OrderedSemiring α a b : α ha : a = 0 hb : b = 0 ⊢ a + b = 0 [PROOFSTEP] simp [*] [GOAL] α : Type u_1 inst✝ : OrderedSemiring α a b : α ha : a = 0 hb : b ≤ 0 ⊢ a + b ≤ 0 [PROOFSTEP] simp [*] [GOAL] α : Type u_1 inst✝ : OrderedSemiring α a b : α ha : a = 0 hb : b < 0 ⊢ a + b < 0 [PROOFSTEP] simp [*] [GOAL] α : Type u_1 inst✝ : OrderedSemiring α a b : α ha : a ≤ 0 hb : b = 0 ⊢ a + b ≤ 0 [PROOFSTEP] simp [*] [GOAL] α : Type u_1 inst✝ : OrderedSemiring α a b : α ha : a < 0 hb : b = 0 ⊢ a + b < 0 [PROOFSTEP] simp [*] [GOAL] α : Type u_1 inst✝ : StrictOrderedRing α a b : α ha : a < 0 hb : 0 < b this : -b * a > 0 ⊢ 0 < -(b * a) [PROOFSTEP] simpa [GOAL] α : Type u_1 inst✝ : OrderedRing α a b : α ha : a ≤ 0 hb : 0 < b this : -b * a ≥ 0 ⊢ b * a ≤ 0 [PROOFSTEP] simpa -- used alongside `mul_neg` and `mul_nonpos`, so has the same argument pattern for uniformity [GOAL] α : Type u_1 inst✝ : OrderedSemiring α a b : α ha : a = 0 x✝ : 0 < b ⊢ b * a = 0 [PROOFSTEP] simp [*] [GOAL] α : Type u_1 R : α → α → Prop inst✝ : Semiring α a b : α x✝ : R a 0 h : b = 0 ⊢ a * b = 0 [PROOFSTEP] simp [h] -- used in the `nlinarith` normalization steps. The `_` argument is for uniformity. [GOAL] α : Type u_1 R : α → α → Prop inst✝ : Semiring α a b : α h : a = 0 x✝ : R b 0 ⊢ a * b = 0 [PROOFSTEP] simp [h]
using BCTRNN using DiffEqSensitivity using OrdinaryDiffEq import DiffEqFlux: FastChain, FastDense import Flux: ClipValue, ADAM # Not in Project.toml using Plots gr() include("half_cheetah_data_loader.jl") function train_cheetah(epochs, solver=nothing; sensealg=nothing, T=Float32, model_size=5, batchsize=1, seq_len=32, normalise=true, kwargs...) train_dl, test_dl, _, _ = get_2d_dl(T; batchsize, seq_len, normalise=true) @show size(first(train_dl)[1]) @show size(first(train_dl)[1][1]) f_in = 17 f_out = 17 n_neurons = model_size n_sens = n_neurons n_out = n_neurons model = FastChain(BCTRNN.Mapper(f_in), BCTRNN.LTC(f_in, n_neurons, solver, sensealg; n_sens, n_out), FastDense(n_out, f_out)) cb = BCTRNN.MyCallback(T; ecb=mycb, nepochs=epochs, nsamples=length(train_dl)) #opt = GalacticOptim.Flux.Optimiser(ClipValue(0.5), ADAM(0.02)) opt = BCTRNN.ClampBoundOptim(BCTRNN.get_bounds(model,T)..., ClipValue(T(0.8)), ADAM(T(0.005))) BCTRNN.optimize(model, BCTRNN.loss_seq, cb, opt, train_dl, epochs, T), model end #1173.351351 seconds (1.02 G allocations: 65.414 GiB, 1.82% gc time, 0.51% compilation time) train_cheetah(30, Tsit5(); sensealg=InterpolatingAdjoint(autojacvec=ReverseDiffVJP(true)), model_size=8, batchsize=10, abstol=1e-4, reltol=1e-3 ) train_cheetah(30, AutoTsit5(Rosenbrock23(autodiff=false)); sensealg=InterpolatingAdjoint(autojacvec=ReverseDiffVJP(true)), model_size=6, batchsize=10, abstol=1e-4, reltol=1e-3 )
/- Copyright (c) 2018 Johannes Hölzl. All rights reserved. Released under Apache 2.0 license as described in the file LICENSE. Authors: Johannes Hölzl, Floris van Doorn, Mario Carneiro -/ import tactic.core /-! # `choose` tactic Performs Skolemization, that is, given `h : ∀ a:α, ∃ b:β, p a b |- G` produces `f : α → β, hf: ∀ a, p a (f a) |- G`. -/ namespace tactic /-- Given `α : Sort u`, `nonemp : nonempty α`, `p : α → Prop`, a context of local variables `ctxt`, and a pair of an element `val : α` and `spec : p val`, `mk_sometimes u α nonemp p ctx (val, spec)` produces another pair `val', spec'` such that `val'` does not have any free variables from elements of `ctxt` whose types are propositions. This is done by applying `function.sometimes` to abstract over all the propositional arguments. -/ meta def mk_sometimes (u : level) (α nonemp p : expr) : list expr → expr × expr → tactic (expr × expr) | [] (val, spec) := pure (val, spec) | (e :: ctxt) (val, spec) := do (val, spec) ← mk_sometimes ctxt (val, spec), t ← infer_type e, b ← is_prop t, pure $ if b then let val' := expr.bind_lambda val e in (expr.const ``function.sometimes [level.zero, u] t α nonemp val', expr.const ``function.sometimes_spec [u] t α nonemp p val' e spec) else (val, spec) /-- Changes `(h : ∀xs, ∃a:α, p a) ⊢ g` to `(d : ∀xs, a) (s : ∀xs, p (d xs)) ⊢ g` and `(h : ∀xs, p xs ∧ q xs) ⊢ g` to `(d : ∀xs, p xs) (s : ∀xs, q xs) ⊢ g`. `choose1` returns a pair of the second local constant it introduces, and the error result (see below). If `nondep` is true and `α` is inhabited, then it will remove the dependency of `d` on all propositional assumptions in `xs`. For example if `ys` are propositions then `(h : ∀xs ys, ∃a:α, p a) ⊢ g` becomes `(d : ∀xs, a) (s : ∀xs ys, p (d xs)) ⊢ g`. The second value returned by `choose1` is the result of nondep elimination: * `none`: nondep elimination was not attempted or was not applicable * `some none`: nondep elimination was successful * ``some (some `(nonempty α))``: nondep elimination was unsuccessful because we could not find a `nonempty α` instance -/ meta def choose1 (nondep : bool) (h : expr) (data : name) (spec : name) : tactic (expr × option (option expr)) := do t ← infer_type h, (ctxt, t) ← whnf t >>= open_pis, t ← whnf t transparency.all, match t with | `(@Exists %%α %%p) := do α_t ← infer_type α, expr.sort u ← whnf α_t transparency.all, (ne_fail, nonemp) ← if nondep then do let ne := expr.const ``nonempty [u] α, nonemp ← try_core (mk_instance ne <|> retrieve' (do m ← mk_meta_var ne, set_goals [m], ctxt.mmap' (λ e, do b ← is_proof e, monad.unlessb b $ (mk_app ``nonempty.intro [e] >>= note_anon none) $> ()), unfreeze_local_instances >> apply_instance, instantiate_mvars m)), pure (some (option.guard (λ _, nonemp.is_none) ne), nonemp) else pure (none, none), ctxt' ← if nonemp.is_some then ctxt.mfilter (λ e, bnot <$> is_proof e) else pure ctxt, value ← mk_local_def data (α.pis ctxt'), t' ← head_beta (p.app (value.mk_app ctxt')), spec ← mk_local_def spec (t'.pis ctxt), (value_proof, spec_proof) ← nonemp.elim pure (λ nonemp, mk_sometimes u α nonemp p ctxt) (expr.const ``classical.some [u] α p (h.mk_app ctxt), expr.const ``classical.some_spec [u] α p (h.mk_app ctxt)), dependent_pose_core [(value, value_proof.lambdas ctxt'), (spec, spec_proof.lambdas ctxt)], try (tactic.clear h), intro1, e ← intro1, pure (e, ne_fail) | `(%%p ∧ %%q) := do mk_app ``and.elim_left [h.mk_app ctxt] >>= lambdas ctxt >>= note data none, hq ← mk_app ``and.elim_right [h.mk_app ctxt] >>= lambdas ctxt >>= note spec none, try (tactic.clear h), pure (hq, none) | _ := fail "expected a term of the shape `∀xs, ∃a, p xs a` or `∀xs, p xs ∧ q xs`" end /-- Changes `(h : ∀xs, ∃as, p as ∧ q as) ⊢ g` to a list of functions `as`, and a final hypothesis on `p as` and `q as`. If `nondep` is true then the functions will be made to not depend on propositional arguments, when possible. The last argument is an internal recursion variable, indicating whether nondep elimination has been useful so far. The tactic fails if `nondep` is true, and nondep elimination is attempted at least once, and it fails every time it is attempted, in which case it returns an error complaining about the first attempt. -/ meta def choose (nondep : bool) : expr → list name → opt_param (option (option expr)) none → tactic unit | h [] _ := fail "expect list of variables" | h [n] (some (some ne)) := do g ← mk_meta_var ne, set_goals [g], -- make a reasonable error state fail "choose: failed to synthesize nonempty instance" | h [n] _ := do cnt ← revert h, intro n, intron (cnt - 1), return () | h (n::ns) ne_fail₁ := do (v, ne_fail₂) ← get_unused_name >>= choose1 nondep h n, choose v ns $ match ne_fail₁, ne_fail₂ with | none, _ := ne_fail₂ | some none, _ := some none | _, some none := some none | _, _ := ne_fail₁ end namespace interactive setup_tactic_parser /-- `choose a b h h' using hyp` takes an hypothesis `hyp` of the form `∀ (x : X) (y : Y), ∃ (a : A) (b : B), P x y a b ∧ Q x y a b` for some `P Q : X → Y → A → B → Prop` and outputs into context a function `a : X → Y → A`, `b : X → Y → B` and two assumptions: `h : ∀ (x : X) (y : Y), P x y (a x y) (b x y)` and `h' : ∀ (x : X) (y : Y), Q x y (a x y) (b x y)`. It also works with dependent versions. `choose! a b h h' using hyp` does the same, except that it will remove dependency of the functions on propositional arguments if possible. For example if `Y` is a proposition and `A` and `B` are nonempty in the above example then we will instead get `a : X → A`, `b : X → B`, and the assumptions `h : ∀ (x : X) (y : Y), P x y (a x) (b x)` and `h' : ∀ (x : X) (y : Y), Q x y (a x) (b x)`. Examples: ```lean example (h : ∀n m : ℕ, ∃i j, m = n + i ∨ m + j = n) : true := begin choose i j h using h, guard_hyp i : ℕ → ℕ → ℕ, guard_hyp j : ℕ → ℕ → ℕ, guard_hyp h : ∀ (n m : ℕ), m = n + i n m ∨ m + j n m = n, trivial end ``` ```lean example (h : ∀ i : ℕ, i < 7 → ∃ j, i < j ∧ j < i+i) : true := begin choose! f h h' using h, guard_hyp f : ℕ → ℕ, guard_hyp h : ∀ (i : ℕ), i < 7 → i < f i, guard_hyp h' : ∀ (i : ℕ), i < 7 → f i < i + i, trivial, end ``` -/ meta def choose (nondep : parse (tk "!")?) (first : parse ident) (names : parse ident*) (tgt : parse (tk "using" *> texpr)?) : tactic unit := do tgt ← match tgt with | none := get_local `this | some e := tactic.i_to_expr_strict e end, tactic.choose nondep.is_some tgt (first :: names), try (interactive.simp none none tt [simp_arg_type.expr ``(exists_prop)] [] (loc.ns $ some <$> names)), try (tactic.clear tgt) add_tactic_doc { name := "choose", category := doc_category.tactic, decl_names := [`tactic.interactive.choose], tags := ["classical logic"] } end interactive end tactic
import numpy as np import pandas as pd from sklearn.decomposition import LatentDirichletAllocation, NMF, TruncatedSVD from sklearn.model_selection import StratifiedKFold from tqdm import tqdm from sklearn.feature_extraction.text import TfidfVectorizer import scipy.sparse as sp import warnings import jieba import re # 创建停用词列表 def get_stopwords_list(): stopwords = [line.strip() for line in open('stopwords.txt', encoding='UTF-8').readlines()] stopwords.append('(') stopwords.append(')') return stopwords # 对句子进行中文分词 def seg_depart(sentence): sentence_depart = jieba.lcut(sentence.strip()) return sentence_depart def move_stopwords(sentence_list, stopwords_list): # 去停用词 out_list = [] for word in sentence_list: if word not in stopwords_list: if word != '\t': out_list.append(word) return ' '.join(out_list) def get_cut_list(x): sentence_depart = seg_depart(x) sentence_depart = move_stopwords(sentence_depart, stopwords) return sentence_depart warnings.filterwarnings('ignore') stopwords = get_stopwords_list() base = pd.read_csv('./data/train/base_info.csv') label = pd.read_csv('./data/train/entprise_info.csv') base = pd.merge(base, label, on=['id'], how='left') base['oploc_list'] = base['oploc'].apply(lambda x: ' '.join([x[16 * i:16 * (i + 1)] for i in range(int(len(x) / 16))])) base['dom_list'] = base['dom'].apply(lambda x: ' '.join([x[16 * i:16 * (i + 1)] for i in range(int(len(x) / 16))])) base['opscope_word_list'] = base['opscope'].apply(get_cut_list) oploc__tfidf_vector = TfidfVectorizer(min_df=30).fit( base['oploc_list'].tolist()) dom__tfidf_vector = TfidfVectorizer(min_df=30).fit( base['dom_list'].tolist()) opscope_tfidf_vector = TfidfVectorizer(min_df=30).fit( base['opscope_word_list'].tolist()) data = base[['id', 'oploc_list', 'dom_list', 'opscope_word_list', 'label']] def create_csr_mat_input(oploc_list, dom_list, opscope_word_list): return sp.hstack((oploc__tfidf_vector.transform(oploc_list), dom__tfidf_vector.transform(dom_list), opscope_tfidf_vector.transform(opscope_word_list)), format='csr') tfidf_input = create_csr_mat_input(data['oploc_list'], data['dom_list'], data['opscope_word_list']) result = pd.DataFrame({'id': data['id']}) lda = LatentDirichletAllocation(n_jobs=-1, random_state=2020, n_components=16) result[[ f'lda_{i + 1}' for i in range(lda.n_components) ]] = pd.DataFrame(lda.fit_transform( tfidf_input), index=result.index) nmf = NMF(random_state=2020, n_components=16) result[[ f'nmf_{i + 1}' for i in range(nmf.n_components) ]] = pd.DataFrame(nmf.fit_transform( tfidf_input), index=result.index) svd = TruncatedSVD(random_state=2020, n_components=32) result[[ f'svd_{i + 1}' for i in range(svd.n_components) ]] = pd.DataFrame(svd.fit_transform( tfidf_input), index=result.index) result.to_csv('tfidf_decomposition.csv', index=False)
clear all; randn('seed',0); format compact; param.num_threads=-1; % all cores (-1 by default) param.verbose=false; % verbosity, false by default param.lambda=0.1; % regularization parameter param.it0=1; % frequency for duality gap computations param.max_it=100; % maximum number of iterations param.L0=0.1; param.tol=1e-5; param.intercept=false; param.pos=false; graph.eta_g=[1 1 1 1 1]; graph.groups=sparse([0 0 0 1 0; 0 0 0 0 0; 0 0 0 0 0; 0 0 0 0 0; 0 0 1 0 0]); % g5 is included in g3, and g2 is included in g4 graph.groups_var=sparse([1 0 0 0 0; 1 0 0 0 0; 1 0 0 0 0 ; 1 1 0 0 0; 0 1 0 1 0; 0 1 0 1 0; 0 1 0 0 1; 0 0 0 0 1; 0 0 0 0 1; 0 0 1 0 0]); % represents direct inclusion relations X=randn(100,10); param.verbose=true; %X=eye(10); X=X-repmat(mean(X),[size(X,1) 1]); X=mexNormalize(X); Y=randn(100,1); Y=Y-repmat(mean(Y),[size(Y,1) 1]); Y=mexNormalize(Y); W0=zeros(size(X,2),size(Y,2)); % Regression experiments % 100 regression problems with the same design matrix X. fprintf('\nVarious regression experiments\n'); param.compute_gram=true; fprintf('\nFISTA + Regression graph\n'); param.loss='square'; param.regul='graph'; tic [W optim_info]=mexFistaGraph(Y,X,W0,graph,param); t=toc; fprintf('mean loss: %f, mean relative duality_gap: %f, time: %f, number of iterations: %f\n',mean(optim_info(1,:)),mean(optim_info(3,:)),t,mean(optim_info(4,:))); fprintf('\nADMM + Regression graph\n'); param.admm=true; param.lin_admm=true; param.c=1; param.delta=1; tic [W optim_info]=mexFistaGraph(Y,X,W0,graph,param); t=toc; fprintf('mean loss: %f, stopping criterion: %f, time: %f, number of iterations: %f\n',mean(optim_info(1,:)),mean(optim_info(3,:)),t,mean(optim_info(4,:))); param.admm=false; param.max_it=5; param.it0=1; tic [W optim_info]=mexFistaGraph(Y,X,W,graph,param); t=toc; fprintf('mean loss: %f, mean relative duality_gap: %f, time: %f, number of iterations: %f\n',mean(optim_info(1,:)),mean(optim_info(3,:)),t,mean(optim_info(4,:))); % works also with non graph-structured regularization. graph is ignored fprintf('\nFISTA + Regression Fused-Lasso\n'); param.regul='fused-lasso'; param.lambda2=0.01; param.lambda3=0.01; % tic [W optim_info]=mexFistaGraph(Y,X,W0,graph,param); t=toc; fprintf('mean loss: %f, time: %f, number of iterations: %f\n',mean(optim_info(1,:)),t,mean(optim_info(4,:))); fprintf('\nFISTA + Regression graph with intercept \n'); param.intercept=true; param.regul='graph'; tic [W optim_info]=mexFistaGraph(Y,[X ones(size(X,1),1)],[W0; zeros(1,size(W0,2))],graph,param); t=toc; fprintf('mean loss: %f, mean relative duality_gap: %f, time: %f, number of iterations: %f\n',mean(optim_info(1,:)),mean(optim_info(3,:)),t,mean(optim_info(4,:))); param.intercept=false; % Classification fprintf('\nOne classification experiment\n'); Y=2*double(randn(100,size(Y,2)) > 0)-1; fprintf('\nFISTA + Logistic + graph-linf\n'); param.regul='graph'; param.loss='logistic'; param.lambda=0.01; tic [W optim_info]=mexFistaGraph(Y,X,W0,graph,param); t=toc; fprintf('mean loss: %f, mean relative duality_gap: %f, time: %f, number of iterations: %f\n',mean(optim_info(1,:)),mean(optim_info(3,:)),t,mean(optim_info(4,:))); % can be used of course with other regularization functions, intercept,... % Multi-Class classification Y=double(ceil(5*rand(100,size(Y,2)))-1); param.loss='multi-logistic'; param.regul='graph'; fprintf('\nFISTA + Multi-Class Logistic + graph \n'); tic nclasses=max(Y(:))+1; W0=zeros(size(X,2),nclasses*size(Y,2)); [W optim_info]=mexFistaGraph(Y,X,W0,graph,param); t=toc; fprintf('mean loss: %f, mean relative duality_gap: %f, time: %f, number of iterations: %f\n',mean(optim_info(1,:)),mean(optim_info(3,:)),t,mean(optim_info(4,:))); % can be used of course with other regularization functions, intercept,... % Multi-Task regression Y=randn(100,size(Y,2)); Y=Y-repmat(mean(Y),[size(Y,1) 1]); Y=mexNormalize(Y); param.compute_gram=false; param.verbose=true; % verbosity, false by default W0=zeros(size(X,2),size(Y,2)); param.loss='square'; fprintf('\nFISTA + Regression multi-task-graph \n'); param.regul='multi-task-graph'; param.lambda2=0.01; tic [W optim_info]=mexFistaGraph(Y,X,W0,graph,param); toc fprintf('mean loss: %f, mean relative duality_gap: %f, time: %f, number of iterations: %f\n',mean(optim_info(1,:)),mean(optim_info(3,:)),t,mean(optim_info(4,:))); % Multi-Task Classification fprintf('\nFISTA + Logistic + multi-task-graph \n'); param.regul='multi-task-graph'; param.lambda2=0.01; param.loss='logistic'; Y=2*double(randn(100,size(Y,2)) > 0)-1; tic [W optim_info]=mexFistaGraph(Y,X,W0,graph,param); toc fprintf('mean loss: %f, mean relative duality_gap: %f, time: %f, number of iterations: %f\n',mean(optim_info(1,:)),mean(optim_info(3,:)),t,mean(optim_info(4,:))); % Multi-Class + Multi-Task Regularization param.verbose=false; fprintf('\nFISTA + Multi-Class Logistic +multi-task-graph \n'); Y=double(ceil(5*rand(100,size(Y,2)))-1); param.loss='multi-logistic'; param.regul='multi-task-graph'; tic nclasses=max(Y(:))+1; W0=zeros(size(X,2),nclasses*size(Y,2)); [W optim_info]=mexFistaGraph(Y,X,W0,graph,param); t=toc; fprintf('mean loss: %f, mean relative duality_gap: %f, time: %f, number of iterations: %f\n',mean(optim_info(1,:)),mean(optim_info(3,:)),t,mean(optim_info(4,:))); % can be used of course with other regularization functions, intercept,...
# Linear First Order System This notebook demonstrates simulation of a linear first-order system in Pyomo using two distinct approaches. The first uses the `Simulator` class from Pyomo which can employ the ``` %%capture !pip install -q pyomo ``` ## First-Order Differential Equation with Initial Condition The following cell implements a solution to a first-order linear model in the form \begin{align} \tau\frac{dy}{dt} + y & = K u(t) \\ \end{align} where $\tau$ and $K$ are model parameters, and $u(t)$ is an external process input. ``` % matplotlib inline from pyomo.environ import * from pyomo.dae import * import matplotlib.pyplot as plt tf = 10 tau = 1 K = 5 # define u(t) u = lambda t: 1 # create a model object model = ConcreteModel() # define the independent variable model.t = ContinuousSet(bounds=(0, tf)) # define the dependent variables model.y = Var(model.t) model.dydt = DerivativeVar(model.y) # fix the initial value of y model.y[0].fix(0) # define the differential equation as a constraint model.ode = Constraint(model.t, rule=lambda model, t: tau*model.dydt[t] + model.y[t] == K*u(t)) # transform dae model to discrete optimization problem #TransformationFactory('dae.finite_difference').apply_to(model, nfe=50, method='BACKWARD') # solve the model #SolverFactory('ipopt').solve(model).write() tsim, profiles = Simulator(model, package='scipy').simulate(numpoints=100) plt.plot(tsim, profiles) # access elements of a ContinuousSet object t = [t for t in model.t] # access elements of a Var object y = [model.y[t]() for t in model.y] plt.plot(t,y) plt.xlabel('time / sec') plt.ylabel('response') plt.title('Response of a linear first-order ODE') ``` ``` ```
Formal statement is: lemma mult_right [simp]: "f \<in> L F (g) \<Longrightarrow> (\<lambda>x. f x * h x) \<in> L F (\<lambda>x. g x * h x)" Informal statement is: If $f$ is in $L^1(F)$, then $f \cdot h$ is in $L^1(F)$.
library(ggplot2) gDat <- read.delim("gapminderDataFiveYear.txt") str(gDat) ## create a tiny dataset, reorder country based on life expectancy, test various ## strategies for writing files ... maximum portability vs. retaining factor ## levels tinyDat <- subset(gDat, country %in% c("Cambodia", "Rwanda", "Poland") & year > 1995) tinyDat str(tinyDat) tinyDat <- droplevels(tinyDat) str(tinyDat) ggplot(tinyDat, aes(x = year, y = lifeExp, group = country)) + geom_line(aes(color = country)) aggregate(lifeExp ~ country, tinyDat, mean) with(tinyDat, reorder(country, lifeExp)) tinyDat$country <- with(tinyDat, reorder(country, -1 * lifeExp)) ggplot(tinyDat, aes(x = year, y = lifeExp, group = country)) + geom_line(aes(color = country)) ## write.table() ... read.table() levels(tinyDat$country) write.table(tinyDat, "tinyDat.txt", quote = FALSE, sep = "\t", row.names = FALSE) rm(tinyDat) tinyDat <- read.delim("tinyDat.txt") levels(tinyDat$country) tinyDat$country <- with(tinyDat, reorder(country, -1 * lifeExp)) ## dput() ... dget() levels(tinyDat$country) dput(tinyDat, "tinyDat-DPUT.txt") rm(tinyDat) tinyDat <- dget("tinyDat-DPUT.txt") levels(tinyDat$country) ## saveRDS() ... readRDS() levels(tinyDat$country) saveRDS(tinyDat, "tinyDat.rds") rm(tinyDat) tinyDat <- readRDS("tinyDat.rds") levels(tinyDat$country) ## writing figures to file pdf("tinyDat-plot.pdf") ggplot(tinyDat, aes(x = year, y = lifeExp, group = country)) + geom_line(aes(color = country)) dev.off() ggplot(tinyDat, aes(x = year, y = lifeExp, group = country)) + geom_line(aes(color = country)) dev.print(pdf, "tinyDat-plot.pdf") ggsave("tinyDat-plot.pdf")
# PAWS preprocessing - dataset sanitization # Lily Xu # December 2018 library(raster) outFilename <- paste(path, outputPath, "patrolObservationClean.csv", sep="") # note: dateFormat is an optional parameter sanitizeData <- function(rawDataFilename, outFilename, boundary, crs_in, startYear, endYear, dateFormat=NULL) { data <- read.csv(rawDataFilename) # transform boundary to CRS of input data boundary <- spTransform(boundary, CRS(crs_in)) bounds <- extent(boundary) # locate out-of-bounds data rowsToRemove <- (data$X == 0 & data$Y == 0) rowsToRemove <- (data$X < xmin(bounds) | data$X > xmax(bounds)) + rowsToRemove rowsToRemove <- (data$Y < ymin(bounds) | data$Y > ymax(bounds)) + rowsToRemove if (is.null(dateFormat) | missing(dateFormat)) { dateFormat <- c("%b %d, %Y", "%d-%b-%y", "%m/%d/%Y") } else { dateFormat <- c(dateFormat) } # format dates correctly data$Patrol.End.Date <- format(as.Date(data$Patrol.End.Date, tryFormats=dateFormat, tz=timezone), "%Y-%m-%d") data$Patrol.Start.Date <- format(as.Date(data$Patrol.Start.Date, tryFormats=dateFormat, tz=timezone), "%Y-%m-%d") data$Waypoint.Date <- format(as.Date(data$Waypoint.Date, tryFormats=dateFormat, tz=timezone), "%Y-%m-%d") # add year and month columns data$Year <- format(as.Date(data$Waypoint.Date, tz=timezone), format="%Y") data$Month <- format(as.Date(data$Waypoint.Date, tz=timezone), format="%m") # remove dates beyond start and end years rowsToRemove <- (data$Year < startYear | data$Year > endYear) + rowsToRemove # remove out-of-bounds data rowsToRemove <- as.logical(rowsToRemove) data <- subset(data, !rowsToRemove) # patrol IDs are given as strings, so we assign a unique numerical ID to each station patrolIDInt <- as.numeric(factor(data$Patrol.ID, levels=unique(data$Patrol.ID))) # patrol day is an int, representing the day number for that particular waypoint patrolDay <- anydate(data$Waypoint.Date) - anydate(data$Patrol.Start.Date) + 1 # ID_New is the unique IDs used in the trajectories data$ID_New <- as.numeric(paste(patrolIDInt, patrolDay, sep=".")) write.csv(data, outFilename, row.names=FALSE) return(data) } # read in data data <- sanitizeData(rawDataFilename, outFilename, boundary, crs_in, startYear, endYear, dateFormat)
ProHold Ball End L-Wrench with ProGuard finish holds screw tight on the tool every time! The button is non-magnetic. With super smooth rotational action there is no interference from the ProHold button, nor is the tool strength compromised by the ProHold button.
-- {-# OPTIONS --no-coverage-check #-} -- {-# OPTIONS -v tc.lhs:40 #-} module FlexibleFunArity where open import Common.Equality data Bool : Set where true false : Bool data Maybe (A : Set) : Set where nothing : Maybe A just : A → Maybe A Case : {A : Set} → Maybe A → Set → Set → Set Case nothing B C = B Case (just _) B C = C sample : {A : Set} (m : Maybe A) → Case m Bool (Maybe A → Bool) sample (just a) (just b) = true sample (just a) nothing = false sample nothing = true g : Bool -> Bool -> Bool g false true = false g true = \ x -> true g false false = true -- g true false = false -- Unreachable clause testg1 : ∀ {x} → g true x ≡ true testg1 = refl testg2 : g false true ≡ false testg2 = refl testg3 : g false false ≡ true testg3 = refl T : Bool -> Set T true = Bool T false = Bool -> Bool f : (b : Bool) -> T b f false true = false f false false = true f true = true testf1 : f true ≡ true testf1 = refl testf2 : f false true ≡ false testf2 = refl testf3 : f false false ≡ true testf3 = refl {- checking clause f false true starts with f (b : Bool) : T b splits on b f true -- no match, discard f false -- matches instantiate type f false : T false = Bool -> Bool extend clause f false (y : Bool) : Bool split on y f false true -- matches f false false -- no match, discard done -} {- coverage check starts with f (x : Bool) splits on x f true -- finds clause 1 f false NEW: remaing clauses have bigger arity, so expands to f false (y : Bool) splits on y f false true -- finds clause 2 f false false -- finds clause 3 done -}
#usr/bin/env python # coding: utf-8 from sklearn.model_selection import train_test_split from skimage import transform from functools import partial import tensorflow as tf import cv2 import os import numpy as np import argparse import keras from keras.layers import Reshape, Activation, Dropout, Flatten, Dense,Conv2D,GlobalAveragePooling2D from keras import Model import json # ## Define the Data Augmentation Transformation and Data Generator affine_matrix1 = np.array([[1, 0, 0], [0.02, 1, -5], [0, 0, 1]]) affine_matrix2 = np.array([[1, 0, 0], [0.05, 1, -20], [0, 0, 1]]) affine_matrix3 = np.array([[1, 0.1, 0], [0, 1, 0], [0, 0, 1]]) affine_matrix4 = np.array([[1, 0.2, 0], [0, 1, 0], [0, 0, 1]]) affine_matrix5 = np.array([[1, -0.1, 0], [0, 1, 0], [0, 0, 1]]) affine_matrix6 = np.array([[1, -0.2, 0], [0, 1, 0], [0, 0, 1]]) affine_matrix7 = np.array([[1, 0, 0], [-0.02, 1, 5], [0, 0, 1]]) affine_matrix8 = np.array([[1, 0, 0], [-0.05, 1, 20], [0, 0, 1]]) affine_matrix9 = np.array([[1, -0.1, 0], [0.02, 1, -5], [0, 0, 1]]) affine_matrix10 = np.array([[1, 0.1, 0], [-0.02, 1, 5], [0, 0, 1]]) affine_matrix11 = np.array([[1, 0.2, 0], [-0.05, 1, 10], [0, 0, 1]]) affine_matrix12 = np.array([[1, -0.2, 0], [0.05, 1, -20], [0, 0, 1]]) class DataGenerator(keras.utils.Sequence): 'Generates data for Keras' def __init__(self, X, y, batch_size=32, shuffle=True): 'Initialization' self.X = X self.y = y self.batch_size = batch_size self.list_IDs = list(range(len(X))) self.n_channels = X.shape[-1] self.n_classes = len(y) self.shuffle = shuffle self.affine_matrice = [ affine_matrix1, affine_matrix2, affine_matrix3, affine_matrix4, affine_matrix5, affine_matrix6, affine_matrix7, affine_matrix8, affine_matrix9, affine_matrix10, affine_matrix11, affine_matrix12 ] self.on_epoch_end() def __len__(self): 'Denotes the number of batches per epoch' return int(np.floor(len(self.list_IDs) / self.batch_size)) def __getitem__(self, index): 'Generate one batch of data' # Generate indexes of the batch indexes = self.indexes[index * self.batch_size:(index + 1) * self.batch_size] # Find list of IDs list_IDs_temp = [self.list_IDs[k] for k in indexes] # Generate data X_batch = self.X[list_IDs_temp] y_batch = self.y[list_IDs_temp] # Perform Augmentation affine_matrix = self.affine_matrice[index % len(self.affine_matrice)] affine_transform = partial(transform.warp, inverse_map=transform.AffineTransform(matrix=affine_matrix)) affine_indice = np.random.permutation(self.batch_size)[:self.batch_size >> 1] for idx in affine_indice: X_batch[idx] = affine_transform(X_batch[idx]) return X_batch, y_batch def on_epoch_end(self): 'Updates indexes after each epoch' self.indexes = np.arange(len(self.list_IDs)) if self.shuffle == True: np.random.shuffle(self.indexes) def SaveIdNameMap(target_names,output_path): id_name_map = dict(zip(range(len(target_names)), target_names)) outputfile=os.path.join(output_path,'labeled_data','IdNameMap.json') with open(outputfile, 'w') as outfile: json.dump(id_name_map, outfile) print('writing id-name map to '+outputfile) return 1 def main(trainset,weight_path,output_path): base_dir = trainset clean_names = lambda x: [i for i in x if i[0]!='.'] target_names = os.listdir(base_dir) target_names = clean_names(target_names) num,cls = 0,[] for target_name in target_names: num += len(os.listdir(os.path.join(base_dir,target_name))) cls.append(len(os.listdir(os.path.join(base_dir,target_name)))) class_number = dict(zip(target_names, cls)) SaveIdNameMap(target_names,output_path) print(f'There are {num} samples in the dataset. \n For each class, we have {class_number}') height,width = 80,800 X = np.zeros((num, height, width), dtype='float') y = np.zeros((num,), dtype='int') idx = 0 for cls_id, target_name in enumerate(target_names): for path in os.listdir(os.path.join(base_dir,target_name)): img = cv2.imread(os.path.join(base_dir,target_name,path), cv2.IMREAD_GRAYSCALE) #import pdb;pdb.set_trace() X[idx, :, :] = cv2.resize(img, (width, height)) y[idx] = cls_id idx += 1 X_train, X_val, y_train, y_val = train_test_split(X[...,None], np.eye(len(target_names))[y], test_size=0.15) train_generator = DataGenerator(X_train, y_train, 64) val_generator = DataGenerator(X_val, y_val, 64) #model base_model = keras.applications.mobilenet.MobileNet(input_shape=(height, width, 1), alpha=1.0, depth_multiplier=1, dropout=1e-2, include_top=True, weights=weight_path, classes=7) with tf.name_scope("output"): x = base_model.get_layer("conv_pw_13_relu").output x = GlobalAveragePooling2D(data_format=None)(x) x = Dropout(0.5)(x) predictions = Dense(len(target_names), activation='softmax')(x) model = Model(inputs=base_model.input, outputs=predictions) #for layer in base_model.layers: # layer.trainable = True model.compile(optimizer='rmsprop', loss="categorical_crossentropy", metrics=["accuracy"]) model.summary() #import pdb;pdb.set_trace() mc = keras.callbacks.ModelCheckpoint(output_path+'/models/weights{epoch:02d}.h5', save_weights_only=True, period=1) reduce_lr = keras.callbacks.ReduceLROnPlateau(monitor='val_acc', factor=0.0050, patience=6, mode='auto', cooldown=0, min_lr=0) model.fit_generator(train_generator, validation_data=val_generator, epochs=30, callbacks=[reduce_lr,mc]) if __name__ == '__main__': # construct the argument parse and parse the arguments parser = argparse.ArgumentParser(description='Train CNN model') parser.add_argument('--trainset', type=str) parser.add_argument('--weight_path',type=str) parser.add_argument('--output_path', type=str) parser.add_argument('--GPU_num', type=str) args = parser.parse_args() os.environ["CUDA_VISIBLE_DEVICES"] = args.GPU_num main(args.trainset,args.weight_path,args.output_path)
module Loops import Data.Strings import System %default total countdown : HasIO io => (secs : Nat) -> io () countdown Z = putStrLn "Lift-off!" countdown (S k) = do putStrLn (show (S k)) usleep 1000000 countdown k readNumber : HasIO io => io (Maybe Nat) readNumber = do ns <- getLine if all isDigit (unpack ns) then pure $ Just (stringToNatOrZ ns) else pure Nothing partial countdowns : HasIO io => io () countdowns = do putStr "Enter starting number: " Just num <- readNumber | Nothing => putStrLn "We accept only numbers, sorry!" countdown num putStr "Try again? [y/n]: " choice <- getLine if choice == "y" || choice == "yes" then countdowns else pure ()
proposition convex_hull_indexed: fixes S :: "'a::real_vector set" shows "convex hull S = {y. \<exists>k u x. (\<forall>i\<in>{1::nat .. k}. 0 \<le> u i \<and> x i \<in> S) \<and> (sum u {1..k} = 1) \<and> (\<Sum>i = 1..k. u i *\<^sub>R x i) = y}" (is "?xyz = ?hull")
function [coord,R]=calcgrip(Tr,Tc) %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% %EN FUNCION DE DONDE ESTE LA CAJA Y DONDE ESTE EL ROBOT, %CALCULA QUE LADO DE LA CAJA ESTA A MENOS DISTANCIA DE LA %POSICION ACTUAL, Y DEVUELVE POR DONDE TIENE QUE COGER LA CAJA %Y CON QUE ORIENTACION Tc(3,4)=Tc(3,4)+0.05; Tc1=Tc(1:3,4); Tc2=Tc(1:3,4); Tc3=Tc(1:3,4); Tc4=Tc(1:3,4); Tc1(1,4)=Tc(1,4)+0.14; dist(1,1)=norm(Tc1-Tr(1:3,4)); Tc2(1,4)=Tc(1,4)-0.14; dist(2,1)=norm(Tc2-Tr(1:3,4)); Tc3(1,4)=Tc(2,4)+0.10; dist(3,1)=norm(Tc3-Tr(1:3,4)); Tc4(1,4)=Tc(2,4)-0.10; dist(4,1)=norm(Tc4-Tr(1:3,4)); [M,I]=min(dist); if I==1 coord=[0.14;0;0.05]; R=[0 0 1 ;0 1 0;1 0 0]; end if I==2 coord=[-0.14;0;0.05]; R=[0 0 -1 ;0 1 0;-1 0 0]; end if I==3 coord=[0;0.10;0.05]; R=[0 -1 0 ;0 0 1; -1 0 0]; end if I==4 coord=[0;-0.10;0.05]; R=[0 1 0 ;0 0 -1; 1 0 0]; end end
{-# OPTIONS --without-K --rewriting #-} open import lib.Basics open import lib.Equivalence2 open import lib.Function2 open import lib.NType2 open import lib.types.Group open import lib.types.Pi open import lib.types.Subtype open import lib.types.Truncation open import lib.groups.Homomorphism open import lib.groups.SubgroupProp module lib.groups.Isomorphism where GroupStructureIso : ∀ {i j} {GEl : Type i} {HEl : Type j} (GS : GroupStructure GEl) (HS : GroupStructure HEl) → Type (lmax i j) GroupStructureIso GS HS = Σ (GroupStructureHom GS HS) (λ φ → is-equiv (GroupStructureHom.f φ)) infix 30 _≃ᴳˢ_ -- [ˢ] for structures _≃ᴳˢ_ = GroupStructureIso GroupIso : ∀ {i j} (G : Group i) (H : Group j) → Type (lmax i j) GroupIso G H = Σ (G →ᴳ H) (λ φ → is-equiv (GroupHom.f φ)) infix 30 _≃ᴳ_ _≃ᴳ_ = GroupIso ≃ᴳˢ-to-≃ᴳ : ∀ {i j} {G : Group i} {H : Group j} → (Group.group-struct G ≃ᴳˢ Group.group-struct H) → (G ≃ᴳ H) ≃ᴳˢ-to-≃ᴳ (φ , φ-is-equiv) = →ᴳˢ-to-→ᴳ φ , φ-is-equiv ≃-to-≃ᴳ : ∀ {i j} {G : Group i} {H : Group j} (e : Group.El G ≃ Group.El H) → preserves-comp (Group.comp G) (Group.comp H) (–> e) → G ≃ᴳ H ≃-to-≃ᴳ (f , f-is-equiv) pres-comp = group-hom f pres-comp , f-is-equiv ≃-to-≃ᴳˢ : ∀ {i j} {GEl : Type i} {HEl : Type j} {GS : GroupStructure GEl} {HS : GroupStructure HEl} (e : GEl ≃ HEl) → preserves-comp (GroupStructure.comp GS) (GroupStructure.comp HS) (–> e) → GS ≃ᴳˢ HS ≃-to-≃ᴳˢ (f , f-is-equiv) pres-comp = group-structure-hom f pres-comp , f-is-equiv private inverse-preserves-comp : ∀ {i j} {A : Type i} {B : Type j} (A-comp : A → A → A) (B-comp : B → B → B) {f : A → B} (f-ie : is-equiv f) → preserves-comp A-comp B-comp f → preserves-comp B-comp A-comp (is-equiv.g f-ie) inverse-preserves-comp Ac Bc ie pc b₁ b₂ = let open is-equiv ie in ap2 (λ w₁ w₂ → g (Bc w₁ w₂)) (! (f-g b₁)) (! (f-g b₂)) ∙ ! (ap g (pc (g b₁) (g b₂))) ∙ g-f (Ac (g b₁) (g b₂)) module GroupStructureIso {i j} {GEl : Type i} {HEl : Type j} {GS : GroupStructure GEl} {HS : GroupStructure HEl} (iso : GroupStructureIso GS HS) where f-shom : GS →ᴳˢ HS f-shom = fst iso open GroupStructureHom {GS = GS} {HS = HS} f-shom public f-is-equiv : is-equiv f f-is-equiv = snd iso open is-equiv f-is-equiv public f-equiv : GEl ≃ HEl f-equiv = f , f-is-equiv g-shom : HS →ᴳˢ GS g-shom = group-structure-hom g (inverse-preserves-comp (GroupStructure.comp GS) (GroupStructure.comp HS) f-is-equiv pres-comp) g-is-equiv : is-equiv g g-is-equiv = is-equiv-inverse f-is-equiv g-equiv : HEl ≃ GEl g-equiv = g , g-is-equiv module GroupIso {i j} {G : Group i} {H : Group j} (iso : GroupIso G H) where f-hom : G →ᴳ H f-hom = fst iso open GroupHom {G = G} {H = H} f-hom public f-is-equiv : is-equiv f f-is-equiv = snd iso open is-equiv f-is-equiv public f-equiv : Group.El G ≃ Group.El H f-equiv = f , f-is-equiv g-hom : H →ᴳ G g-hom = group-hom g (inverse-preserves-comp (Group.comp G) (Group.comp H) f-is-equiv pres-comp) g-is-equiv : is-equiv g g-is-equiv = is-equiv-inverse f-is-equiv g-equiv : Group.El H ≃ Group.El G g-equiv = g , g-is-equiv idiso : ∀ {i} (G : Group i) → (G ≃ᴳ G) idiso G = idhom G , idf-is-equiv _ idsiso : ∀ {i} {GEl : Type i} (GS : GroupStructure GEl) → (GS ≃ᴳˢ GS) idsiso GS = idshom GS , idf-is-equiv _ {- equality of isomomorphisms -} abstract group-hom=-to-iso= : ∀ {i j} {G : Group i} {H : Group j} {φ ψ : G ≃ᴳ H} → GroupIso.f-hom φ == GroupIso.f-hom ψ → φ == ψ group-hom=-to-iso= = Subtype=-out (is-equiv-prop ∘sub GroupHom.f) group-iso= : ∀ {i j} {G : Group i} {H : Group j} {φ ψ : G ≃ᴳ H} → GroupIso.f φ == GroupIso.f ψ → φ == ψ group-iso= {H = H} p = group-hom=-to-iso= $ group-hom= p {- compositions -} infixr 80 _∘eᴳˢ_ _∘eᴳ_ _∘eᴳˢ_ : ∀ {i j k} {GEl : Type i} {HEl : Type j} {KEl : Type k} {GS : GroupStructure GEl} {HS : GroupStructure HEl} {KS : GroupStructure KEl} → HS ≃ᴳˢ KS → GS ≃ᴳˢ HS → GS ≃ᴳˢ KS (φ₂ , ie₂) ∘eᴳˢ (φ₁ , ie₁) = (φ₂ ∘ᴳˢ φ₁ , ie₂ ∘ise ie₁) _∘eᴳ_ : ∀ {i j k} {G : Group i} {H : Group j} {K : Group k} → H ≃ᴳ K → G ≃ᴳ H → G ≃ᴳ K (φ₂ , ie₂) ∘eᴳ (φ₁ , ie₁) = (φ₂ ∘ᴳ φ₁ , ie₂ ∘ise ie₁) infixr 10 _≃ᴳˢ⟨_⟩_ _≃ᴳ⟨_⟩_ infix 15 _≃ᴳˢ∎ _≃ᴳ∎ _≃ᴳˢ⟨_⟩_ : ∀ {i j k} {GEl : Type i} {HEl : Type j} {KEl : Type k} (GS : GroupStructure GEl) {HS : GroupStructure HEl} {KS : GroupStructure KEl} → GS ≃ᴳˢ HS → HS ≃ᴳˢ KS → GS ≃ᴳˢ KS GS ≃ᴳˢ⟨ e₁ ⟩ e₂ = e₂ ∘eᴳˢ e₁ _≃ᴳ⟨_⟩_ : ∀ {i j k} (G : Group i) {H : Group j} {K : Group k} → G ≃ᴳ H → H ≃ᴳ K → G ≃ᴳ K G ≃ᴳ⟨ e₁ ⟩ e₂ = e₂ ∘eᴳ e₁ _≃ᴳˢ∎ : ∀ {i} {GEl : Type i} (GS : GroupStructure GEl) → (GS ≃ᴳˢ GS) _≃ᴳˢ∎ = idsiso _≃ᴳ∎ : ∀ {i} (G : Group i) → (G ≃ᴳ G) _≃ᴳ∎ = idiso infixl 120 _⁻¹ᴳˢ _⁻¹ᴳ _⁻¹ᴳˢ : ∀ {i j} {GEl : Type i} {HEl : Type j} {GS : GroupStructure GEl} {HS : GroupStructure HEl} → GS ≃ᴳˢ HS → HS ≃ᴳˢ GS _⁻¹ᴳˢ {GS = GS} {HS} (φ , ie) = GroupStructureIso.g-shom (φ , ie) , is-equiv-inverse ie _⁻¹ᴳ : ∀ {i j} {G : Group i} {H : Group j} → G ≃ᴳ H → H ≃ᴳ G _⁻¹ᴳ {G = G} {H = H} (φ , ie) = GroupIso.g-hom (φ , ie) , is-equiv-inverse ie {- mimicking notations for equivalences -} –>ᴳ : ∀ {i j} {G : Group i} {H : Group j} → (G ≃ᴳ H) → (G →ᴳ H) –>ᴳ = GroupIso.f-hom <–ᴳ : ∀ {i j} {G : Group i} {H : Group j} → (G ≃ᴳ H) → (H →ᴳ G) <–ᴳ = GroupIso.g-hom {- univalence -} module _ {i} {G H : Group i} (iso : GroupIso G H) where private module G = Group G module H = Group H open module φ = GroupIso {G = G} {H = H} iso El= = ua f-equiv private ap3-lemma : ∀ {i j k l} {C : Type i} {D : C → Type j} {E : C → Type k} {F : Type l} {c₁ c₂ : C} {d₁ : D c₁} {d₂ : D c₂} {e₁ : E c₁} {e₂ : E c₂} (f : (c : C) → D c → E c → F) (p : c₁ == c₂) → (d₁ == d₂ [ D ↓ p ]) → (e₁ == e₂ [ E ↓ p ]) → (f c₁ d₁ e₁ == f c₂ d₂ e₂) ap3-lemma f idp idp idp = idp ap3-lemma-El : ∀ {i} {G H : Group i} (p : Group.El G == Group.El H) (q : Group.El-level G == Group.El-level H [ _ ↓ p ]) (r : Group.group-struct G == Group.group-struct H [ _ ↓ p ]) → ap Group.El (ap3-lemma (λ a b c → group a {{b}} c) p q r) == p ap3-lemma-El idp idp idp = idp {- a homomorphism which is an equivalence gives a path between groups -} abstract uaᴳ : G == H uaᴳ = ap3-lemma (λ a b c → group a {{b}} c) El= prop-has-all-paths-↓ (↓-group-structure= El= ident= inv= comp=) where ident= : G.ident == H.ident [ (λ C → C) ↓ El= ] ident= = ↓-idf-ua-in _ pres-ident inv= : G.inv == H.inv [ (λ C → C → C) ↓ El= ] inv= = ↓-→-from-transp $ λ= λ a → transport (λ C → C) El= (G.inv a) =⟨ to-transp (↓-idf-ua-in _ idp) ⟩ f (G.inv a) =⟨ pres-inv a ⟩ H.inv (f a) =⟨ ap H.inv (! (to-transp (↓-idf-ua-in _ idp))) ⟩ H.inv (transport (λ C → C) El= a) =∎ comp=' : (a : G.El) → G.comp a == H.comp (f a) [ (λ C → C → C) ↓ El= ] comp=' a = ↓-→-from-transp $ λ= λ b → transport (λ C → C) El= (G.comp a b) =⟨ to-transp (↓-idf-ua-in _ idp) ⟩ f (G.comp a b) =⟨ pres-comp a b ⟩ H.comp (f a) (f b) =⟨ ! (to-transp (↓-idf-ua-in _ idp)) |in-ctx (λ w → H.comp (f a) w) ⟩ H.comp (f a) (transport (λ C → C) El= b) =∎ comp= : G.comp == H.comp [ (λ C → C → C → C) ↓ El= ] comp= = ↓-→-from-transp $ λ= λ a → transport (λ C → C → C) El= (G.comp a) =⟨ to-transp (comp=' a) ⟩ H.comp (f a) =⟨ ! (to-transp (↓-idf-ua-in _ idp)) |in-ctx (λ w → H.comp w) ⟩ H.comp (transport (λ C → C) El= a) =∎ -- XXX This stretches the naming convention a little bit. El=-β : ap Group.El uaᴳ == El= El=-β = ap3-lemma-El El= _ _ {- homomorphism from equality of groups -} abstract transp-El-pres-comp : ∀ {i j} {A : Type i} (B : A → Group j) {a₁ a₂ : A} (p : a₁ == a₂) → preserves-comp (Group.comp (B a₁)) (Group.comp (B a₂)) (transport (Group.El ∘ B) p) transp-El-pres-comp B idp g₁ g₂ = idp transp!-El-pres-comp : ∀ {i j} {A : Type i} (B : A → Group j) {a₁ a₂ : A} (p : a₁ == a₂) → preserves-comp (Group.comp (B a₂)) (Group.comp (B a₁)) (transport! (Group.El ∘ B) p) transp!-El-pres-comp B idp h₁ h₂ = idp transportᴳ : ∀ {i j} {A : Type i} (B : A → Group j) {a₁ a₂ : A} (p : a₁ == a₂) → (B a₁ →ᴳ B a₂) transportᴳ B p = record {f = transport (Group.El ∘ B) p; pres-comp = transp-El-pres-comp B p} transport!ᴳ : ∀ {i j} {A : Type i} (B : A → Group j) {a₁ a₂ : A} (p : a₁ == a₂) → (B a₂ →ᴳ B a₁) transport!ᴳ B p = record {f = transport! (Group.El ∘ B) p; pres-comp = transp!-El-pres-comp B p} abstract transpᴳ-is-iso : ∀ {i j} {A : Type i} (B : A → Group j) {a₁ a₂ : A} (p : a₁ == a₂) → is-equiv (GroupHom.f (transportᴳ B p)) transpᴳ-is-iso B idp = idf-is-equiv _ transp!ᴳ-is-iso : ∀ {i j} {A : Type i} (B : A → Group j) {a₁ a₂ : A} (p : a₁ == a₂) → is-equiv (GroupHom.f (transport!ᴳ B p)) transp!ᴳ-is-iso B idp = idf-is-equiv _ transportᴳ-iso : ∀ {i j} {A : Type i} (B : A → Group j) {a₁ a₂ : A} (p : a₁ == a₂) → B a₁ ≃ᴳ B a₂ transportᴳ-iso B p = transportᴳ B p , transpᴳ-is-iso B p transport!ᴳ-iso : ∀ {i j} {A : Type i} (B : A → Group j) {a₁ a₂ : A} (p : a₁ == a₂) → B a₂ ≃ᴳ B a₁ transport!ᴳ-iso B p = transport!ᴳ B p , transp!ᴳ-is-iso B p coeᴳ : ∀ {i} {G H : Group i} → G == H → (G →ᴳ H) coeᴳ = transportᴳ (idf _) coe!ᴳ : ∀ {i} {G H : Group i} → G == H → (H →ᴳ G) coe!ᴳ = transport!ᴳ (idf _) coeᴳ-iso : ∀ {i} {G H : Group i} → G == H → G ≃ᴳ H coeᴳ-iso = transportᴳ-iso (idf _) coe!ᴳ-iso : ∀ {i} {G H : Group i} → G == H → H ≃ᴳ G coe!ᴳ-iso = transport!ᴳ-iso (idf _) abstract coeᴳ-β : ∀ {i} {G H : Group i} (iso : G ≃ᴳ H) → coeᴳ (uaᴳ iso) == GroupIso.f-hom iso coeᴳ-β iso = group-hom= $ ap coe (El=-β iso) ∙ λ= (coe-β (GroupIso.f-equiv iso)) -- triviality iso-preserves-trivial : ∀ {i j} {G : Group i} {H : Group j} → G ≃ᴳ H → is-trivialᴳ G → is-trivialᴳ H iso-preserves-trivial iso G-is-trivial h = ! (GroupIso.f-g iso h) ∙ ap (GroupIso.f iso) (G-is-trivial _) ∙ GroupIso.pres-ident iso iso-preserves'-trivial : ∀ {i j} {G : Group i} {H : Group j} → G ≃ᴳ H → is-trivialᴳ H → is-trivialᴳ G iso-preserves'-trivial iso H-is-trivial g = ! (GroupIso.g-f iso g) ∙ ap (GroupIso.g iso) (H-is-trivial _) ∙ GroupHom.pres-ident (GroupIso.g-hom iso) -- a surjective and injective homomorphism is an isomorphism module _ {i j} {G : Group i} {H : Group j} (φ : G →ᴳ H) (surj : is-surjᴳ φ) (inj : is-injᴳ φ) where private module G = Group G module H = Group H module φ = GroupHom φ abstract instance image-prop : (h : H.El) → is-prop (hfiber φ.f h) image-prop h = all-paths-is-prop λ {(g₁ , p₁) (g₂ , p₂) → pair= (inj g₁ g₂ (p₁ ∙ ! p₂)) prop-has-all-paths-↓} surjᴳ-and-injᴳ-is-equiv : is-equiv φ.f surjᴳ-and-injᴳ-is-equiv = contr-map-is-equiv (λ h → let (g₁ , p₁) = Trunc-rec (idf _) (surj h) in has-level-in ((g₁ , p₁) , (λ {(g₂ , p₂) → pair= (inj g₁ g₂ (p₁ ∙ ! p₂)) prop-has-all-paths-↓}))) surjᴳ-and-injᴳ-iso : G ≃ᴳ H surjᴳ-and-injᴳ-iso = φ , surjᴳ-and-injᴳ-is-equiv -- isomorphisms preserve abelianess. module _ {i} {G H : Group i} (iso : G ≃ᴳ H) (G-abelian : is-abelian G) where private module G = Group G module H = Group H open GroupIso iso abstract iso-preserves-abelian : is-abelian H iso-preserves-abelian h₁ h₂ = H.comp h₁ h₂ =⟨ ap2 H.comp (! $ f-g h₁) (! $ f-g h₂) ⟩ H.comp (f (g h₁)) (f (g h₂)) =⟨ ! $ pres-comp (g h₁) (g h₂) ⟩ f (G.comp (g h₁) (g h₂)) =⟨ G-abelian (g h₁) (g h₂) |in-ctx f ⟩ f (G.comp (g h₂) (g h₁)) =⟨ pres-comp (g h₂) (g h₁) ⟩ H.comp (f (g h₂)) (f (g h₁)) =⟨ ap2 H.comp (f-g h₂) (f-g h₁) ⟩ H.comp h₂ h₁ =∎ pre∘ᴳ-iso : ∀ {i j k} {G : Group i} {H : Group j} (K : AbGroup k) → (G ≃ᴳ H) → (hom-group H K ≃ᴳ hom-group G K) pre∘ᴳ-iso K iso = ≃-to-≃ᴳ (equiv to from to-from from-to) to-pres-comp where to = GroupHom.f (pre∘ᴳ-hom K (–>ᴳ iso)) to-pres-comp = GroupHom.pres-comp (pre∘ᴳ-hom K (–>ᴳ iso)) from = GroupHom.f (pre∘ᴳ-hom K (<–ᴳ iso)) abstract to-from : ∀ φ → to (from φ) == φ to-from φ = group-hom= $ λ= λ g → ap (GroupHom.f φ) (GroupIso.g-f iso g) from-to : ∀ φ → from (to φ) == φ from-to φ = group-hom= $ λ= λ h → ap (GroupHom.f φ) (GroupIso.f-g iso h) post∘ᴳ-iso : ∀ {i j k} (G : Group i) (H : AbGroup j) (K : AbGroup k) → (AbGroup.grp H ≃ᴳ AbGroup.grp K) → (hom-group G H ≃ᴳ hom-group G K) post∘ᴳ-iso G H K iso = ≃-to-≃ᴳ (equiv to from to-from from-to) to-pres-comp where to = GroupHom.f (post∘ᴳ-hom G H K (–>ᴳ iso)) to-pres-comp = GroupHom.pres-comp (post∘ᴳ-hom G H K(–>ᴳ iso)) from = GroupHom.f (post∘ᴳ-hom G K H (<–ᴳ iso)) abstract to-from : ∀ φ → to (from φ) == φ to-from φ = group-hom= $ λ= λ g → GroupIso.f-g iso (GroupHom.f φ g) from-to : ∀ φ → from (to φ) == φ from-to φ = group-hom= $ λ= λ h → GroupIso.g-f iso (GroupHom.f φ h)
[GOAL] X : Type u Y : Type v Z : Type w inst✝² : MetricSpace X inst✝¹ : MetricSpace Y Φ✝ : Z → X Ψ✝ : Z → Y ε✝ : ℝ inst✝ : Nonempty Z Φ : Z → X Ψ : Z → Y ε : ℝ p : Z ⊢ glueDist Φ Ψ ε (Sum.inl (Φ p)) (Sum.inr (Ψ p)) = ε [PROOFSTEP] have : ⨅ q, dist (Φ p) (Φ q) + dist (Ψ p) (Ψ q) = 0 := by have A : ∀ q, 0 ≤ dist (Φ p) (Φ q) + dist (Ψ p) (Ψ q) := fun _ => add_nonneg dist_nonneg dist_nonneg refine' le_antisymm _ (le_ciInf A) have : 0 = dist (Φ p) (Φ p) + dist (Ψ p) (Ψ p) := by simp rw [this] exact ciInf_le ⟨0, forall_range_iff.2 A⟩ p [GOAL] X : Type u Y : Type v Z : Type w inst✝² : MetricSpace X inst✝¹ : MetricSpace Y Φ✝ : Z → X Ψ✝ : Z → Y ε✝ : ℝ inst✝ : Nonempty Z Φ : Z → X Ψ : Z → Y ε : ℝ p : Z ⊢ ⨅ (q : Z), dist (Φ p) (Φ q) + dist (Ψ p) (Ψ q) = 0 [PROOFSTEP] have A : ∀ q, 0 ≤ dist (Φ p) (Φ q) + dist (Ψ p) (Ψ q) := fun _ => add_nonneg dist_nonneg dist_nonneg [GOAL] X : Type u Y : Type v Z : Type w inst✝² : MetricSpace X inst✝¹ : MetricSpace Y Φ✝ : Z → X Ψ✝ : Z → Y ε✝ : ℝ inst✝ : Nonempty Z Φ : Z → X Ψ : Z → Y ε : ℝ p : Z A : ∀ (q : Z), 0 ≤ dist (Φ p) (Φ q) + dist (Ψ p) (Ψ q) ⊢ ⨅ (q : Z), dist (Φ p) (Φ q) + dist (Ψ p) (Ψ q) = 0 [PROOFSTEP] refine' le_antisymm _ (le_ciInf A) [GOAL] X : Type u Y : Type v Z : Type w inst✝² : MetricSpace X inst✝¹ : MetricSpace Y Φ✝ : Z → X Ψ✝ : Z → Y ε✝ : ℝ inst✝ : Nonempty Z Φ : Z → X Ψ : Z → Y ε : ℝ p : Z A : ∀ (q : Z), 0 ≤ dist (Φ p) (Φ q) + dist (Ψ p) (Ψ q) ⊢ ⨅ (q : Z), dist (Φ p) (Φ q) + dist (Ψ p) (Ψ q) ≤ 0 [PROOFSTEP] have : 0 = dist (Φ p) (Φ p) + dist (Ψ p) (Ψ p) := by simp [GOAL] X : Type u Y : Type v Z : Type w inst✝² : MetricSpace X inst✝¹ : MetricSpace Y Φ✝ : Z → X Ψ✝ : Z → Y ε✝ : ℝ inst✝ : Nonempty Z Φ : Z → X Ψ : Z → Y ε : ℝ p : Z A : ∀ (q : Z), 0 ≤ dist (Φ p) (Φ q) + dist (Ψ p) (Ψ q) ⊢ 0 = dist (Φ p) (Φ p) + dist (Ψ p) (Ψ p) [PROOFSTEP] simp [GOAL] X : Type u Y : Type v Z : Type w inst✝² : MetricSpace X inst✝¹ : MetricSpace Y Φ✝ : Z → X Ψ✝ : Z → Y ε✝ : ℝ inst✝ : Nonempty Z Φ : Z → X Ψ : Z → Y ε : ℝ p : Z A : ∀ (q : Z), 0 ≤ dist (Φ p) (Φ q) + dist (Ψ p) (Ψ q) this : 0 = dist (Φ p) (Φ p) + dist (Ψ p) (Ψ p) ⊢ ⨅ (q : Z), dist (Φ p) (Φ q) + dist (Ψ p) (Ψ q) ≤ 0 [PROOFSTEP] rw [this] [GOAL] X : Type u Y : Type v Z : Type w inst✝² : MetricSpace X inst✝¹ : MetricSpace Y Φ✝ : Z → X Ψ✝ : Z → Y ε✝ : ℝ inst✝ : Nonempty Z Φ : Z → X Ψ : Z → Y ε : ℝ p : Z A : ∀ (q : Z), 0 ≤ dist (Φ p) (Φ q) + dist (Ψ p) (Ψ q) this : 0 = dist (Φ p) (Φ p) + dist (Ψ p) (Ψ p) ⊢ ⨅ (q : Z), dist (Φ p) (Φ q) + dist (Ψ p) (Ψ q) ≤ dist (Φ p) (Φ p) + dist (Ψ p) (Ψ p) [PROOFSTEP] exact ciInf_le ⟨0, forall_range_iff.2 A⟩ p [GOAL] X : Type u Y : Type v Z : Type w inst✝² : MetricSpace X inst✝¹ : MetricSpace Y Φ✝ : Z → X Ψ✝ : Z → Y ε✝ : ℝ inst✝ : Nonempty Z Φ : Z → X Ψ : Z → Y ε : ℝ p : Z this : ⨅ (q : Z), dist (Φ p) (Φ q) + dist (Ψ p) (Ψ q) = 0 ⊢ glueDist Φ Ψ ε (Sum.inl (Φ p)) (Sum.inr (Ψ p)) = ε [PROOFSTEP] simp only [glueDist, this, zero_add] [GOAL] X : Type u Y : Type v Z : Type w inst✝¹ : MetricSpace X inst✝ : MetricSpace Y Φ✝ : Z → X Ψ✝ : Z → Y ε✝ : ℝ Φ : Z → X Ψ : Z → Y ε : ℝ val✝¹ : X val✝ : Y ⊢ glueDist Ψ Φ ε (Sum.swap (Sum.inl val✝¹)) (Sum.swap (Sum.inr val✝)) = glueDist Φ Ψ ε (Sum.inl val✝¹) (Sum.inr val✝) [PROOFSTEP] simp only [glueDist, Sum.swap_inl, Sum.swap_inr, dist_comm, add_comm] [GOAL] X : Type u Y : Type v Z : Type w inst✝¹ : MetricSpace X inst✝ : MetricSpace Y Φ✝ : Z → X Ψ✝ : Z → Y ε✝ : ℝ Φ : Z → X Ψ : Z → Y ε : ℝ val✝¹ : Y val✝ : X ⊢ glueDist Ψ Φ ε (Sum.swap (Sum.inr val✝¹)) (Sum.swap (Sum.inl val✝)) = glueDist Φ Ψ ε (Sum.inr val✝¹) (Sum.inl val✝) [PROOFSTEP] simp only [glueDist, Sum.swap_inl, Sum.swap_inr, dist_comm, add_comm] [GOAL] X : Type u Y : Type v Z : Type w inst✝¹ : MetricSpace X inst✝ : MetricSpace Y Φ✝ : Z → X Ψ✝ : Z → Y ε✝ : ℝ Φ : Z → X Ψ : Z → Y ε : ℝ x : Y y : X ⊢ ε ≤ glueDist Φ Ψ ε (Sum.inr x) (Sum.inl y) [PROOFSTEP] rw [glueDist_comm] [GOAL] X : Type u Y : Type v Z : Type w inst✝¹ : MetricSpace X inst✝ : MetricSpace Y Φ✝ : Z → X Ψ✝ : Z → Y ε✝ : ℝ Φ : Z → X Ψ : Z → Y ε : ℝ x : Y y : X ⊢ ε ≤ glueDist Φ Ψ ε (Sum.inl y) (Sum.inr x) [PROOFSTEP] apply le_glueDist_inl_inr [GOAL] X : Type u Y : Type v Z : Type w inst✝² : MetricSpace X inst✝¹ : MetricSpace Y Φ✝ : Z → X Ψ✝ : Z → Y ε✝ : ℝ inst✝ : Nonempty Z Φ : Z → X Ψ : Z → Y ε : ℝ x : X y z : Y ⊢ glueDist Φ Ψ ε (Sum.inl x) (Sum.inr z) ≤ glueDist Φ Ψ ε (Sum.inl x) (Sum.inr y) + glueDist Φ Ψ ε (Sum.inr y) (Sum.inr z) [PROOFSTEP] simp only [glueDist] [GOAL] X : Type u Y : Type v Z : Type w inst✝² : MetricSpace X inst✝¹ : MetricSpace Y Φ✝ : Z → X Ψ✝ : Z → Y ε✝ : ℝ inst✝ : Nonempty Z Φ : Z → X Ψ : Z → Y ε : ℝ x : X y z : Y ⊢ (⨅ (p : Z), dist x (Φ p) + dist z (Ψ p)) + ε ≤ (⨅ (p : Z), dist x (Φ p) + dist y (Ψ p)) + ε + dist y z [PROOFSTEP] rw [add_right_comm, add_le_add_iff_right] [GOAL] X : Type u Y : Type v Z : Type w inst✝² : MetricSpace X inst✝¹ : MetricSpace Y Φ✝ : Z → X Ψ✝ : Z → Y ε✝ : ℝ inst✝ : Nonempty Z Φ : Z → X Ψ : Z → Y ε : ℝ x : X y z : Y ⊢ ⨅ (p : Z), dist x (Φ p) + dist z (Ψ p) ≤ (⨅ (p : Z), dist x (Φ p) + dist y (Ψ p)) + dist y z [PROOFSTEP] refine le_ciInf_add fun p => ciInf_le_of_le ⟨0, ?_⟩ p ?_ [GOAL] case refine_1 X : Type u Y : Type v Z : Type w inst✝² : MetricSpace X inst✝¹ : MetricSpace Y Φ✝ : Z → X Ψ✝ : Z → Y ε✝ : ℝ inst✝ : Nonempty Z Φ : Z → X Ψ : Z → Y ε : ℝ x : X y z : Y p : Z ⊢ 0 ∈ lowerBounds (range fun p => dist x (Φ p) + dist z (Ψ p)) [PROOFSTEP] exact forall_range_iff.2 fun _ => add_nonneg dist_nonneg dist_nonneg [GOAL] case refine_2 X : Type u Y : Type v Z : Type w inst✝² : MetricSpace X inst✝¹ : MetricSpace Y Φ✝ : Z → X Ψ✝ : Z → Y ε✝ : ℝ inst✝ : Nonempty Z Φ : Z → X Ψ : Z → Y ε : ℝ x : X y z : Y p : Z ⊢ dist x (Φ p) + dist z (Ψ p) ≤ dist x (Φ p) + dist y (Ψ p) + dist y z [PROOFSTEP] linarith [dist_triangle_left z (Ψ p) y] [GOAL] X : Type u Y : Type v Z : Type w inst✝² : MetricSpace X inst✝¹ : MetricSpace Y Φ✝ : Z → X Ψ✝ : Z → Y ε✝ : ℝ inst✝ : Nonempty Z Φ : Z → X Ψ : Z → Y ε : ℝ H : ∀ (p q : Z), |dist (Φ p) (Φ q) - dist (Ψ p) (Ψ q)| ≤ 2 * ε x : X y : Y z : X ⊢ glueDist Φ Ψ ε (Sum.inl x) (Sum.inl z) ≤ glueDist Φ Ψ ε (Sum.inl x) (Sum.inr y) + glueDist Φ Ψ ε (Sum.inr y) (Sum.inl z) [PROOFSTEP] simp_rw [glueDist, add_add_add_comm _ ε, add_assoc] [GOAL] X : Type u Y : Type v Z : Type w inst✝² : MetricSpace X inst✝¹ : MetricSpace Y Φ✝ : Z → X Ψ✝ : Z → Y ε✝ : ℝ inst✝ : Nonempty Z Φ : Z → X Ψ : Z → Y ε : ℝ H : ∀ (p q : Z), |dist (Φ p) (Φ q) - dist (Ψ p) (Ψ q)| ≤ 2 * ε x : X y : Y z : X ⊢ dist x z ≤ (⨅ (p : Z), dist x (Φ p) + dist y (Ψ p)) + ((⨅ (p : Z), dist z (Φ p) + dist y (Ψ p)) + (ε + ε)) [PROOFSTEP] refine le_ciInf_add fun p => ?_ [GOAL] X : Type u Y : Type v Z : Type w inst✝² : MetricSpace X inst✝¹ : MetricSpace Y Φ✝ : Z → X Ψ✝ : Z → Y ε✝ : ℝ inst✝ : Nonempty Z Φ : Z → X Ψ : Z → Y ε : ℝ H : ∀ (p q : Z), |dist (Φ p) (Φ q) - dist (Ψ p) (Ψ q)| ≤ 2 * ε x : X y : Y z : X p : Z ⊢ dist x z ≤ dist x (Φ p) + dist y (Ψ p) + ((⨅ (p : Z), dist z (Φ p) + dist y (Ψ p)) + (ε + ε)) [PROOFSTEP] rw [add_left_comm, add_assoc, ← two_mul] [GOAL] X : Type u Y : Type v Z : Type w inst✝² : MetricSpace X inst✝¹ : MetricSpace Y Φ✝ : Z → X Ψ✝ : Z → Y ε✝ : ℝ inst✝ : Nonempty Z Φ : Z → X Ψ : Z → Y ε : ℝ H : ∀ (p q : Z), |dist (Φ p) (Φ q) - dist (Ψ p) (Ψ q)| ≤ 2 * ε x : X y : Y z : X p : Z ⊢ dist x z ≤ (⨅ (p : Z), dist z (Φ p) + dist y (Ψ p)) + (dist x (Φ p) + (dist y (Ψ p) + 2 * ε)) [PROOFSTEP] refine le_ciInf_add fun q => ?_ [GOAL] X : Type u Y : Type v Z : Type w inst✝² : MetricSpace X inst✝¹ : MetricSpace Y Φ✝ : Z → X Ψ✝ : Z → Y ε✝ : ℝ inst✝ : Nonempty Z Φ : Z → X Ψ : Z → Y ε : ℝ H : ∀ (p q : Z), |dist (Φ p) (Φ q) - dist (Ψ p) (Ψ q)| ≤ 2 * ε x : X y : Y z : X p q : Z ⊢ dist x z ≤ dist z (Φ q) + dist y (Ψ q) + (dist x (Φ p) + (dist y (Ψ p) + 2 * ε)) [PROOFSTEP] rw [dist_comm z] [GOAL] X : Type u Y : Type v Z : Type w inst✝² : MetricSpace X inst✝¹ : MetricSpace Y Φ✝ : Z → X Ψ✝ : Z → Y ε✝ : ℝ inst✝ : Nonempty Z Φ : Z → X Ψ : Z → Y ε : ℝ H : ∀ (p q : Z), |dist (Φ p) (Φ q) - dist (Ψ p) (Ψ q)| ≤ 2 * ε x : X y : Y z : X p q : Z ⊢ dist x z ≤ dist (Φ q) z + dist y (Ψ q) + (dist x (Φ p) + (dist y (Ψ p) + 2 * ε)) [PROOFSTEP] linarith [dist_triangle4 x (Φ p) (Φ q) z, dist_triangle_left (Ψ p) (Ψ q) y, (abs_le.1 (H p q)).2] [GOAL] X : Type u Y : Type v Z : Type w inst✝² : MetricSpace X inst✝¹ : MetricSpace Y Φ✝ : Z → X Ψ✝ : Z → Y ε✝ : ℝ inst✝ : Nonempty Z Φ : Z → X Ψ : Z → Y ε : ℝ H : ∀ (p q : Z), |dist (Φ p) (Φ q) - dist (Ψ p) (Ψ q)| ≤ 2 * ε x : Y y z : X ⊢ glueDist Φ Ψ ε (Sum.inr x) (Sum.inl z) ≤ glueDist Φ Ψ ε (Sum.inr x) (Sum.inl y) + glueDist Φ Ψ ε (Sum.inl y) (Sum.inl z) [PROOFSTEP] simp only [← glueDist_swap Φ] [GOAL] X : Type u Y : Type v Z : Type w inst✝² : MetricSpace X inst✝¹ : MetricSpace Y Φ✝ : Z → X Ψ✝ : Z → Y ε✝ : ℝ inst✝ : Nonempty Z Φ : Z → X Ψ : Z → Y ε : ℝ H : ∀ (p q : Z), |dist (Φ p) (Φ q) - dist (Ψ p) (Ψ q)| ≤ 2 * ε x : Y y z : X ⊢ glueDist Ψ Φ ε (Sum.swap (Sum.inr x)) (Sum.swap (Sum.inl z)) ≤ glueDist Ψ Φ ε (Sum.swap (Sum.inr x)) (Sum.swap (Sum.inl y)) + glueDist Ψ Φ ε (Sum.swap (Sum.inl y)) (Sum.swap (Sum.inl z)) [PROOFSTEP] apply glueDist_triangle_inl_inr_inr [GOAL] X : Type u Y : Type v Z : Type w inst✝² : MetricSpace X inst✝¹ : MetricSpace Y Φ✝ : Z → X Ψ✝ : Z → Y ε✝ : ℝ inst✝ : Nonempty Z Φ : Z → X Ψ : Z → Y ε : ℝ H : ∀ (p q : Z), |dist (Φ p) (Φ q) - dist (Ψ p) (Ψ q)| ≤ 2 * ε x y : Y z : X ⊢ glueDist Φ Ψ ε (Sum.inr x) (Sum.inl z) ≤ glueDist Φ Ψ ε (Sum.inr x) (Sum.inr y) + glueDist Φ Ψ ε (Sum.inr y) (Sum.inl z) [PROOFSTEP] simpa only [glueDist_comm, add_comm] using glueDist_triangle_inl_inr_inr _ _ _ z y x [GOAL] X : Type u Y : Type v Z : Type w inst✝² : MetricSpace X inst✝¹ : MetricSpace Y Φ✝ : Z → X Ψ✝ : Z → Y ε✝ : ℝ inst✝ : Nonempty Z Φ : Z → X Ψ : Z → Y ε : ℝ H : ∀ (p q : Z), |dist (Φ p) (Φ q) - dist (Ψ p) (Ψ q)| ≤ 2 * ε x y : X z : Y ⊢ glueDist Φ Ψ ε (Sum.inl x) (Sum.inr z) ≤ glueDist Φ Ψ ε (Sum.inl x) (Sum.inl y) + glueDist Φ Ψ ε (Sum.inl y) (Sum.inr z) [PROOFSTEP] simpa only [← glueDist_swap Φ, glueDist_comm, add_comm, Sum.swap_inl, Sum.swap_inr] using glueDist_triangle_inl_inr_inr Ψ Φ ε z y x [GOAL] X : Type u Y : Type v Z : Type w inst✝² : MetricSpace X inst✝¹ : MetricSpace Y Φ✝ : Z → X Ψ✝ : Z → Y ε✝ : ℝ inst✝ : Nonempty Z Φ : Z → X Ψ : Z → Y ε : ℝ H : ∀ (p q : Z), |dist (Φ p) (Φ q) - dist (Ψ p) (Ψ q)| ≤ 2 * ε x : Y y : X z : Y ⊢ glueDist Φ Ψ ε (Sum.inr x) (Sum.inr z) ≤ glueDist Φ Ψ ε (Sum.inr x) (Sum.inl y) + glueDist Φ Ψ ε (Sum.inl y) (Sum.inr z) [PROOFSTEP] simp only [← glueDist_swap Φ] [GOAL] X : Type u Y : Type v Z : Type w inst✝² : MetricSpace X inst✝¹ : MetricSpace Y Φ✝ : Z → X Ψ✝ : Z → Y ε✝ : ℝ inst✝ : Nonempty Z Φ : Z → X Ψ : Z → Y ε : ℝ H : ∀ (p q : Z), |dist (Φ p) (Φ q) - dist (Ψ p) (Ψ q)| ≤ 2 * ε x : Y y : X z : Y ⊢ glueDist Ψ Φ ε (Sum.swap (Sum.inr x)) (Sum.swap (Sum.inr z)) ≤ glueDist Ψ Φ ε (Sum.swap (Sum.inr x)) (Sum.swap (Sum.inl y)) + glueDist Ψ Φ ε (Sum.swap (Sum.inl y)) (Sum.swap (Sum.inr z)) [PROOFSTEP] apply glueDist_triangle_inl_inr_inl [GOAL] case H X : Type u Y : Type v Z : Type w inst✝² : MetricSpace X inst✝¹ : MetricSpace Y Φ✝ : Z → X Ψ✝ : Z → Y ε✝ : ℝ inst✝ : Nonempty Z Φ : Z → X Ψ : Z → Y ε : ℝ H : ∀ (p q : Z), |dist (Φ p) (Φ q) - dist (Ψ p) (Ψ q)| ≤ 2 * ε x : Y y : X z : Y ⊢ ∀ (p q : Z), |dist (Ψ p) (Ψ q) - dist (Φ p) (Φ q)| ≤ 2 * ε [PROOFSTEP] simpa only [abs_sub_comm] [GOAL] X : Type u Y : Type v Z : Type w inst✝² : MetricSpace X inst✝¹ : MetricSpace Y Φ✝ : Z → X Ψ✝ : Z → Y ε✝ : ℝ inst✝ : Nonempty Z Φ : Z → X Ψ : Z → Y ε : ℝ ε0 : 0 < ε x y : X h : glueDist Φ Ψ ε (Sum.inl x) (Sum.inl y) = 0 ⊢ Sum.inl x = Sum.inl y [PROOFSTEP] rw [eq_of_dist_eq_zero h] [GOAL] X : Type u Y : Type v Z : Type w inst✝² : MetricSpace X inst✝¹ : MetricSpace Y Φ✝ : Z → X Ψ✝ : Z → Y ε✝ : ℝ inst✝ : Nonempty Z Φ : Z → X Ψ : Z → Y ε : ℝ ε0 : 0 < ε x : X y : Y h : glueDist Φ Ψ ε (Sum.inl x) (Sum.inr y) = 0 ⊢ Sum.inl x = Sum.inr y [PROOFSTEP] exfalso [GOAL] case h X : Type u Y : Type v Z : Type w inst✝² : MetricSpace X inst✝¹ : MetricSpace Y Φ✝ : Z → X Ψ✝ : Z → Y ε✝ : ℝ inst✝ : Nonempty Z Φ : Z → X Ψ : Z → Y ε : ℝ ε0 : 0 < ε x : X y : Y h : glueDist Φ Ψ ε (Sum.inl x) (Sum.inr y) = 0 ⊢ False [PROOFSTEP] linarith [le_glueDist_inl_inr Φ Ψ ε x y] [GOAL] X : Type u Y : Type v Z : Type w inst✝² : MetricSpace X inst✝¹ : MetricSpace Y Φ✝ : Z → X Ψ✝ : Z → Y ε✝ : ℝ inst✝ : Nonempty Z Φ : Z → X Ψ : Z → Y ε : ℝ ε0 : 0 < ε x : Y y : X h : glueDist Φ Ψ ε (Sum.inr x) (Sum.inl y) = 0 ⊢ Sum.inr x = Sum.inl y [PROOFSTEP] exfalso [GOAL] case h X : Type u Y : Type v Z : Type w inst✝² : MetricSpace X inst✝¹ : MetricSpace Y Φ✝ : Z → X Ψ✝ : Z → Y ε✝ : ℝ inst✝ : Nonempty Z Φ : Z → X Ψ : Z → Y ε : ℝ ε0 : 0 < ε x : Y y : X h : glueDist Φ Ψ ε (Sum.inr x) (Sum.inl y) = 0 ⊢ False [PROOFSTEP] linarith [le_glueDist_inr_inl Φ Ψ ε x y] [GOAL] X : Type u Y : Type v Z : Type w inst✝² : MetricSpace X inst✝¹ : MetricSpace Y Φ✝ : Z → X Ψ✝ : Z → Y ε✝ : ℝ inst✝ : Nonempty Z Φ : Z → X Ψ : Z → Y ε : ℝ ε0 : 0 < ε x y : Y h : glueDist Φ Ψ ε (Sum.inr x) (Sum.inr y) = 0 ⊢ Sum.inr x = Sum.inr y [PROOFSTEP] rw [eq_of_dist_eq_zero h] [GOAL] X : Type u Y : Type v Z : Type w inst✝² : MetricSpace X inst✝¹ : MetricSpace Y Φ : Z → X Ψ : Z → Y ε : ℝ inst✝ : Nonempty Z hε : 0 < ε s : Set ((X ⊕ Y) × (X ⊕ Y)) ⊢ s ∈ 𝓤 (X ⊕ Y) ↔ ∃ δ, δ > 0 ∧ ∀ (a b : X ⊕ Y), glueDist Φ Ψ ε a b < δ → (a, b) ∈ s [PROOFSTEP] simp only [Sum.uniformity, Filter.mem_sup, Filter.mem_map, mem_uniformity_dist, mem_preimage] [GOAL] X : Type u Y : Type v Z : Type w inst✝² : MetricSpace X inst✝¹ : MetricSpace Y Φ : Z → X Ψ : Z → Y ε : ℝ inst✝ : Nonempty Z hε : 0 < ε s : Set ((X ⊕ Y) × (X ⊕ Y)) ⊢ ((∃ ε, ε > 0 ∧ ∀ {a b : X}, dist a b < ε → (Sum.inl a, Sum.inl b) ∈ s) ∧ ∃ ε, ε > 0 ∧ ∀ {a b : Y}, dist a b < ε → (Sum.inr a, Sum.inr b) ∈ s) ↔ ∃ δ, δ > 0 ∧ ∀ (a b : X ⊕ Y), glueDist Φ Ψ ε a b < δ → (a, b) ∈ s [PROOFSTEP] constructor [GOAL] case mp X : Type u Y : Type v Z : Type w inst✝² : MetricSpace X inst✝¹ : MetricSpace Y Φ : Z → X Ψ : Z → Y ε : ℝ inst✝ : Nonempty Z hε : 0 < ε s : Set ((X ⊕ Y) × (X ⊕ Y)) ⊢ ((∃ ε, ε > 0 ∧ ∀ {a b : X}, dist a b < ε → (Sum.inl a, Sum.inl b) ∈ s) ∧ ∃ ε, ε > 0 ∧ ∀ {a b : Y}, dist a b < ε → (Sum.inr a, Sum.inr b) ∈ s) → ∃ δ, δ > 0 ∧ ∀ (a b : X ⊕ Y), glueDist Φ Ψ ε a b < δ → (a, b) ∈ s [PROOFSTEP] rintro ⟨⟨δX, δX0, hX⟩, δY, δY0, hY⟩ [GOAL] case mp.intro.intro.intro.intro.intro X : Type u Y : Type v Z : Type w inst✝² : MetricSpace X inst✝¹ : MetricSpace Y Φ : Z → X Ψ : Z → Y ε : ℝ inst✝ : Nonempty Z hε : 0 < ε s : Set ((X ⊕ Y) × (X ⊕ Y)) δX : ℝ δX0 : δX > 0 hX : ∀ {a b : X}, dist a b < δX → (Sum.inl a, Sum.inl b) ∈ s δY : ℝ δY0 : δY > 0 hY : ∀ {a b : Y}, dist a b < δY → (Sum.inr a, Sum.inr b) ∈ s ⊢ ∃ δ, δ > 0 ∧ ∀ (a b : X ⊕ Y), glueDist Φ Ψ ε a b < δ → (a, b) ∈ s [PROOFSTEP] refine ⟨min (min δX δY) ε, lt_min (lt_min δX0 δY0) hε, ?_⟩ [GOAL] case mp.intro.intro.intro.intro.intro X : Type u Y : Type v Z : Type w inst✝² : MetricSpace X inst✝¹ : MetricSpace Y Φ : Z → X Ψ : Z → Y ε : ℝ inst✝ : Nonempty Z hε : 0 < ε s : Set ((X ⊕ Y) × (X ⊕ Y)) δX : ℝ δX0 : δX > 0 hX : ∀ {a b : X}, dist a b < δX → (Sum.inl a, Sum.inl b) ∈ s δY : ℝ δY0 : δY > 0 hY : ∀ {a b : Y}, dist a b < δY → (Sum.inr a, Sum.inr b) ∈ s ⊢ ∀ (a b : X ⊕ Y), glueDist Φ Ψ ε a b < min (min δX δY) ε → (a, b) ∈ s [PROOFSTEP] rintro (a | a) (b | b) h [GOAL] case mp.intro.intro.intro.intro.intro.inl.inl X : Type u Y : Type v Z : Type w inst✝² : MetricSpace X inst✝¹ : MetricSpace Y Φ : Z → X Ψ : Z → Y ε : ℝ inst✝ : Nonempty Z hε : 0 < ε s : Set ((X ⊕ Y) × (X ⊕ Y)) δX : ℝ δX0 : δX > 0 hX : ∀ {a b : X}, dist a b < δX → (Sum.inl a, Sum.inl b) ∈ s δY : ℝ δY0 : δY > 0 hY : ∀ {a b : Y}, dist a b < δY → (Sum.inr a, Sum.inr b) ∈ s a b : X h : glueDist Φ Ψ ε (Sum.inl a) (Sum.inl b) < min (min δX δY) ε ⊢ (Sum.inl a, Sum.inl b) ∈ s [PROOFSTEP] simp only [lt_min_iff] at h [GOAL] case mp.intro.intro.intro.intro.intro.inl.inr X : Type u Y : Type v Z : Type w inst✝² : MetricSpace X inst✝¹ : MetricSpace Y Φ : Z → X Ψ : Z → Y ε : ℝ inst✝ : Nonempty Z hε : 0 < ε s : Set ((X ⊕ Y) × (X ⊕ Y)) δX : ℝ δX0 : δX > 0 hX : ∀ {a b : X}, dist a b < δX → (Sum.inl a, Sum.inl b) ∈ s δY : ℝ δY0 : δY > 0 hY : ∀ {a b : Y}, dist a b < δY → (Sum.inr a, Sum.inr b) ∈ s a : X b : Y h : glueDist Φ Ψ ε (Sum.inl a) (Sum.inr b) < min (min δX δY) ε ⊢ (Sum.inl a, Sum.inr b) ∈ s [PROOFSTEP] simp only [lt_min_iff] at h [GOAL] case mp.intro.intro.intro.intro.intro.inr.inl X : Type u Y : Type v Z : Type w inst✝² : MetricSpace X inst✝¹ : MetricSpace Y Φ : Z → X Ψ : Z → Y ε : ℝ inst✝ : Nonempty Z hε : 0 < ε s : Set ((X ⊕ Y) × (X ⊕ Y)) δX : ℝ δX0 : δX > 0 hX : ∀ {a b : X}, dist a b < δX → (Sum.inl a, Sum.inl b) ∈ s δY : ℝ δY0 : δY > 0 hY : ∀ {a b : Y}, dist a b < δY → (Sum.inr a, Sum.inr b) ∈ s a : Y b : X h : glueDist Φ Ψ ε (Sum.inr a) (Sum.inl b) < min (min δX δY) ε ⊢ (Sum.inr a, Sum.inl b) ∈ s [PROOFSTEP] simp only [lt_min_iff] at h [GOAL] case mp.intro.intro.intro.intro.intro.inr.inr X : Type u Y : Type v Z : Type w inst✝² : MetricSpace X inst✝¹ : MetricSpace Y Φ : Z → X Ψ : Z → Y ε : ℝ inst✝ : Nonempty Z hε : 0 < ε s : Set ((X ⊕ Y) × (X ⊕ Y)) δX : ℝ δX0 : δX > 0 hX : ∀ {a b : X}, dist a b < δX → (Sum.inl a, Sum.inl b) ∈ s δY : ℝ δY0 : δY > 0 hY : ∀ {a b : Y}, dist a b < δY → (Sum.inr a, Sum.inr b) ∈ s a b : Y h : glueDist Φ Ψ ε (Sum.inr a) (Sum.inr b) < min (min δX δY) ε ⊢ (Sum.inr a, Sum.inr b) ∈ s [PROOFSTEP] simp only [lt_min_iff] at h [GOAL] case mp.intro.intro.intro.intro.intro.inl.inl X : Type u Y : Type v Z : Type w inst✝² : MetricSpace X inst✝¹ : MetricSpace Y Φ : Z → X Ψ : Z → Y ε : ℝ inst✝ : Nonempty Z hε : 0 < ε s : Set ((X ⊕ Y) × (X ⊕ Y)) δX : ℝ δX0 : δX > 0 hX : ∀ {a b : X}, dist a b < δX → (Sum.inl a, Sum.inl b) ∈ s δY : ℝ δY0 : δY > 0 hY : ∀ {a b : Y}, dist a b < δY → (Sum.inr a, Sum.inr b) ∈ s a b : X h : (glueDist Φ Ψ ε (Sum.inl a) (Sum.inl b) < δX ∧ glueDist Φ Ψ ε (Sum.inl a) (Sum.inl b) < δY) ∧ glueDist Φ Ψ ε (Sum.inl a) (Sum.inl b) < ε ⊢ (Sum.inl a, Sum.inl b) ∈ s [PROOFSTEP] exact hX h.1.1 [GOAL] case mp.intro.intro.intro.intro.intro.inl.inr X : Type u Y : Type v Z : Type w inst✝² : MetricSpace X inst✝¹ : MetricSpace Y Φ : Z → X Ψ : Z → Y ε : ℝ inst✝ : Nonempty Z hε : 0 < ε s : Set ((X ⊕ Y) × (X ⊕ Y)) δX : ℝ δX0 : δX > 0 hX : ∀ {a b : X}, dist a b < δX → (Sum.inl a, Sum.inl b) ∈ s δY : ℝ δY0 : δY > 0 hY : ∀ {a b : Y}, dist a b < δY → (Sum.inr a, Sum.inr b) ∈ s a : X b : Y h : (glueDist Φ Ψ ε (Sum.inl a) (Sum.inr b) < δX ∧ glueDist Φ Ψ ε (Sum.inl a) (Sum.inr b) < δY) ∧ glueDist Φ Ψ ε (Sum.inl a) (Sum.inr b) < ε ⊢ (Sum.inl a, Sum.inr b) ∈ s [PROOFSTEP] exact absurd h.2 (le_glueDist_inl_inr _ _ _ _ _).not_lt [GOAL] case mp.intro.intro.intro.intro.intro.inr.inl X : Type u Y : Type v Z : Type w inst✝² : MetricSpace X inst✝¹ : MetricSpace Y Φ : Z → X Ψ : Z → Y ε : ℝ inst✝ : Nonempty Z hε : 0 < ε s : Set ((X ⊕ Y) × (X ⊕ Y)) δX : ℝ δX0 : δX > 0 hX : ∀ {a b : X}, dist a b < δX → (Sum.inl a, Sum.inl b) ∈ s δY : ℝ δY0 : δY > 0 hY : ∀ {a b : Y}, dist a b < δY → (Sum.inr a, Sum.inr b) ∈ s a : Y b : X h : (glueDist Φ Ψ ε (Sum.inr a) (Sum.inl b) < δX ∧ glueDist Φ Ψ ε (Sum.inr a) (Sum.inl b) < δY) ∧ glueDist Φ Ψ ε (Sum.inr a) (Sum.inl b) < ε ⊢ (Sum.inr a, Sum.inl b) ∈ s [PROOFSTEP] exact absurd h.2 (le_glueDist_inr_inl _ _ _ _ _).not_lt [GOAL] case mp.intro.intro.intro.intro.intro.inr.inr X : Type u Y : Type v Z : Type w inst✝² : MetricSpace X inst✝¹ : MetricSpace Y Φ : Z → X Ψ : Z → Y ε : ℝ inst✝ : Nonempty Z hε : 0 < ε s : Set ((X ⊕ Y) × (X ⊕ Y)) δX : ℝ δX0 : δX > 0 hX : ∀ {a b : X}, dist a b < δX → (Sum.inl a, Sum.inl b) ∈ s δY : ℝ δY0 : δY > 0 hY : ∀ {a b : Y}, dist a b < δY → (Sum.inr a, Sum.inr b) ∈ s a b : Y h : (glueDist Φ Ψ ε (Sum.inr a) (Sum.inr b) < δX ∧ glueDist Φ Ψ ε (Sum.inr a) (Sum.inr b) < δY) ∧ glueDist Φ Ψ ε (Sum.inr a) (Sum.inr b) < ε ⊢ (Sum.inr a, Sum.inr b) ∈ s [PROOFSTEP] exact hY h.1.2 [GOAL] case mpr X : Type u Y : Type v Z : Type w inst✝² : MetricSpace X inst✝¹ : MetricSpace Y Φ : Z → X Ψ : Z → Y ε : ℝ inst✝ : Nonempty Z hε : 0 < ε s : Set ((X ⊕ Y) × (X ⊕ Y)) ⊢ (∃ δ, δ > 0 ∧ ∀ (a b : X ⊕ Y), glueDist Φ Ψ ε a b < δ → (a, b) ∈ s) → (∃ ε, ε > 0 ∧ ∀ {a b : X}, dist a b < ε → (Sum.inl a, Sum.inl b) ∈ s) ∧ ∃ ε, ε > 0 ∧ ∀ {a b : Y}, dist a b < ε → (Sum.inr a, Sum.inr b) ∈ s [PROOFSTEP] rintro ⟨ε, ε0, H⟩ [GOAL] case mpr.intro.intro X : Type u Y : Type v Z : Type w inst✝² : MetricSpace X inst✝¹ : MetricSpace Y Φ : Z → X Ψ : Z → Y ε✝ : ℝ inst✝ : Nonempty Z hε : 0 < ε✝ s : Set ((X ⊕ Y) × (X ⊕ Y)) ε : ℝ ε0 : ε > 0 H : ∀ (a b : X ⊕ Y), glueDist Φ Ψ ε✝ a b < ε → (a, b) ∈ s ⊢ (∃ ε, ε > 0 ∧ ∀ {a b : X}, dist a b < ε → (Sum.inl a, Sum.inl b) ∈ s) ∧ ∃ ε, ε > 0 ∧ ∀ {a b : Y}, dist a b < ε → (Sum.inr a, Sum.inr b) ∈ s [PROOFSTEP] constructor [GOAL] case mpr.intro.intro.left X : Type u Y : Type v Z : Type w inst✝² : MetricSpace X inst✝¹ : MetricSpace Y Φ : Z → X Ψ : Z → Y ε✝ : ℝ inst✝ : Nonempty Z hε : 0 < ε✝ s : Set ((X ⊕ Y) × (X ⊕ Y)) ε : ℝ ε0 : ε > 0 H : ∀ (a b : X ⊕ Y), glueDist Φ Ψ ε✝ a b < ε → (a, b) ∈ s ⊢ ∃ ε, ε > 0 ∧ ∀ {a b : X}, dist a b < ε → (Sum.inl a, Sum.inl b) ∈ s [PROOFSTEP] exact ⟨ε, ε0, fun h => H _ _ h⟩ [GOAL] case mpr.intro.intro.right X : Type u Y : Type v Z : Type w inst✝² : MetricSpace X inst✝¹ : MetricSpace Y Φ : Z → X Ψ : Z → Y ε✝ : ℝ inst✝ : Nonempty Z hε : 0 < ε✝ s : Set ((X ⊕ Y) × (X ⊕ Y)) ε : ℝ ε0 : ε > 0 H : ∀ (a b : X ⊕ Y), glueDist Φ Ψ ε✝ a b < ε → (a, b) ∈ s ⊢ ∃ ε, ε > 0 ∧ ∀ {a b : Y}, dist a b < ε → (Sum.inr a, Sum.inr b) ∈ s [PROOFSTEP] exact ⟨ε, ε0, fun h => H _ _ h⟩ [GOAL] X : Type u Y : Type v Z : Type w inst✝² : MetricSpace X inst✝¹ : MetricSpace Y Φ✝ : Z → X Ψ✝ : Z → Y ε✝ : ℝ inst✝ : Nonempty Z Φ : Z → X Ψ : Z → Y ε : ℝ ε0 : 0 < ε H : ∀ (p q : Z), |dist (Φ p) (Φ q) - dist (Ψ p) (Ψ q)| ≤ 2 * ε x✝¹ x✝ : X ⊕ Y ⊢ (fun x y => ↑{ val := glueDist Φ Ψ ε x y, property := (_ : 0 ≤ glueDist Φ Ψ ε x y) }) x✝¹ x✝ = ENNReal.ofReal (dist x✝¹ x✝) [PROOFSTEP] exact ENNReal.coe_nnreal_eq _ [GOAL] X : Type u Y : Type v Z : Type w inst✝¹ : MetricSpace X inst✝ : MetricSpace Y p q : X ⊕ Y x : X y : Y ⊢ Sum.dist p q = glueDist (fun x_1 => Nonempty.some (_ : Nonempty X)) (fun x => Nonempty.some (_ : Nonempty Y)) 1 p q [PROOFSTEP] cases p [GOAL] case inl X : Type u Y : Type v Z : Type w inst✝¹ : MetricSpace X inst✝ : MetricSpace Y q : X ⊕ Y x : X y : Y val✝ : X ⊢ Sum.dist (Sum.inl val✝) q = glueDist (fun x_1 => Nonempty.some (_ : Nonempty X)) (fun x => Nonempty.some (_ : Nonempty Y)) 1 (Sum.inl val✝) q [PROOFSTEP] cases q [GOAL] case inr X : Type u Y : Type v Z : Type w inst✝¹ : MetricSpace X inst✝ : MetricSpace Y q : X ⊕ Y x : X y val✝ : Y ⊢ Sum.dist (Sum.inr val✝) q = glueDist (fun x_1 => Nonempty.some (_ : Nonempty X)) (fun x => Nonempty.some (_ : Nonempty Y)) 1 (Sum.inr val✝) q [PROOFSTEP] cases q [GOAL] case inl.inl X : Type u Y : Type v Z : Type w inst✝¹ : MetricSpace X inst✝ : MetricSpace Y x : X y : Y val✝¹ val✝ : X ⊢ Sum.dist (Sum.inl val✝¹) (Sum.inl val✝) = glueDist (fun x_1 => Nonempty.some (_ : Nonempty X)) (fun x => Nonempty.some (_ : Nonempty Y)) 1 (Sum.inl val✝¹) (Sum.inl val✝) [PROOFSTEP] first | rfl | simp [Sum.dist, glueDist, dist_comm, add_comm, add_left_comm, add_assoc] [GOAL] case inl.inl X : Type u Y : Type v Z : Type w inst✝¹ : MetricSpace X inst✝ : MetricSpace Y x : X y : Y val✝¹ val✝ : X ⊢ Sum.dist (Sum.inl val✝¹) (Sum.inl val✝) = glueDist (fun x_1 => Nonempty.some (_ : Nonempty X)) (fun x => Nonempty.some (_ : Nonempty Y)) 1 (Sum.inl val✝¹) (Sum.inl val✝) [PROOFSTEP] rfl [GOAL] case inl.inr X : Type u Y : Type v Z : Type w inst✝¹ : MetricSpace X inst✝ : MetricSpace Y x : X y : Y val✝¹ : X val✝ : Y ⊢ Sum.dist (Sum.inl val✝¹) (Sum.inr val✝) = glueDist (fun x_1 => Nonempty.some (_ : Nonempty X)) (fun x => Nonempty.some (_ : Nonempty Y)) 1 (Sum.inl val✝¹) (Sum.inr val✝) [PROOFSTEP] first | rfl | simp [Sum.dist, glueDist, dist_comm, add_comm, add_left_comm, add_assoc] [GOAL] case inl.inr X : Type u Y : Type v Z : Type w inst✝¹ : MetricSpace X inst✝ : MetricSpace Y x : X y : Y val✝¹ : X val✝ : Y ⊢ Sum.dist (Sum.inl val✝¹) (Sum.inr val✝) = glueDist (fun x_1 => Nonempty.some (_ : Nonempty X)) (fun x => Nonempty.some (_ : Nonempty Y)) 1 (Sum.inl val✝¹) (Sum.inr val✝) [PROOFSTEP] rfl [GOAL] case inl.inr X : Type u Y : Type v Z : Type w inst✝¹ : MetricSpace X inst✝ : MetricSpace Y x : X y : Y val✝¹ : X val✝ : Y ⊢ Sum.dist (Sum.inl val✝¹) (Sum.inr val✝) = glueDist (fun x_1 => Nonempty.some (_ : Nonempty X)) (fun x => Nonempty.some (_ : Nonempty Y)) 1 (Sum.inl val✝¹) (Sum.inr val✝) [PROOFSTEP] simp [Sum.dist, glueDist, dist_comm, add_comm, add_left_comm, add_assoc] [GOAL] case inr.inl X : Type u Y : Type v Z : Type w inst✝¹ : MetricSpace X inst✝ : MetricSpace Y x : X y val✝¹ : Y val✝ : X ⊢ Sum.dist (Sum.inr val✝¹) (Sum.inl val✝) = glueDist (fun x_1 => Nonempty.some (_ : Nonempty X)) (fun x => Nonempty.some (_ : Nonempty Y)) 1 (Sum.inr val✝¹) (Sum.inl val✝) [PROOFSTEP] first | rfl | simp [Sum.dist, glueDist, dist_comm, add_comm, add_left_comm, add_assoc] [GOAL] case inr.inl X : Type u Y : Type v Z : Type w inst✝¹ : MetricSpace X inst✝ : MetricSpace Y x : X y val✝¹ : Y val✝ : X ⊢ Sum.dist (Sum.inr val✝¹) (Sum.inl val✝) = glueDist (fun x_1 => Nonempty.some (_ : Nonempty X)) (fun x => Nonempty.some (_ : Nonempty Y)) 1 (Sum.inr val✝¹) (Sum.inl val✝) [PROOFSTEP] rfl [GOAL] case inr.inl X : Type u Y : Type v Z : Type w inst✝¹ : MetricSpace X inst✝ : MetricSpace Y x : X y val✝¹ : Y val✝ : X ⊢ Sum.dist (Sum.inr val✝¹) (Sum.inl val✝) = glueDist (fun x_1 => Nonempty.some (_ : Nonempty X)) (fun x => Nonempty.some (_ : Nonempty Y)) 1 (Sum.inr val✝¹) (Sum.inl val✝) [PROOFSTEP] simp [Sum.dist, glueDist, dist_comm, add_comm, add_left_comm, add_assoc] [GOAL] case inr.inr X : Type u Y : Type v Z : Type w inst✝¹ : MetricSpace X inst✝ : MetricSpace Y x : X y val✝¹ val✝ : Y ⊢ Sum.dist (Sum.inr val✝¹) (Sum.inr val✝) = glueDist (fun x_1 => Nonempty.some (_ : Nonempty X)) (fun x => Nonempty.some (_ : Nonempty Y)) 1 (Sum.inr val✝¹) (Sum.inr val✝) [PROOFSTEP] first | rfl | simp [Sum.dist, glueDist, dist_comm, add_comm, add_left_comm, add_assoc] [GOAL] case inr.inr X : Type u Y : Type v Z : Type w inst✝¹ : MetricSpace X inst✝ : MetricSpace Y x : X y val✝¹ val✝ : Y ⊢ Sum.dist (Sum.inr val✝¹) (Sum.inr val✝) = glueDist (fun x_1 => Nonempty.some (_ : Nonempty X)) (fun x => Nonempty.some (_ : Nonempty Y)) 1 (Sum.inr val✝¹) (Sum.inr val✝) [PROOFSTEP] rfl [GOAL] X : Type u Y : Type v Z : Type w inst✝¹ : MetricSpace X inst✝ : MetricSpace Y x y : X ⊕ Y ⊢ Sum.dist x y = Sum.dist y x [PROOFSTEP] cases x [GOAL] case inl X : Type u Y : Type v Z : Type w inst✝¹ : MetricSpace X inst✝ : MetricSpace Y y : X ⊕ Y val✝ : X ⊢ Sum.dist (Sum.inl val✝) y = Sum.dist y (Sum.inl val✝) [PROOFSTEP] cases y [GOAL] case inr X : Type u Y : Type v Z : Type w inst✝¹ : MetricSpace X inst✝ : MetricSpace Y y : X ⊕ Y val✝ : Y ⊢ Sum.dist (Sum.inr val✝) y = Sum.dist y (Sum.inr val✝) [PROOFSTEP] cases y [GOAL] case inl.inl X : Type u Y : Type v Z : Type w inst✝¹ : MetricSpace X inst✝ : MetricSpace Y val✝¹ val✝ : X ⊢ Sum.dist (Sum.inl val✝¹) (Sum.inl val✝) = Sum.dist (Sum.inl val✝) (Sum.inl val✝¹) [PROOFSTEP] simp [Sum.dist, _root_.dist_comm, add_comm, add_left_comm, add_assoc] [GOAL] case inl.inr X : Type u Y : Type v Z : Type w inst✝¹ : MetricSpace X inst✝ : MetricSpace Y val✝¹ : X val✝ : Y ⊢ Sum.dist (Sum.inl val✝¹) (Sum.inr val✝) = Sum.dist (Sum.inr val✝) (Sum.inl val✝¹) [PROOFSTEP] simp [Sum.dist, _root_.dist_comm, add_comm, add_left_comm, add_assoc] [GOAL] case inr.inl X : Type u Y : Type v Z : Type w inst✝¹ : MetricSpace X inst✝ : MetricSpace Y val✝¹ : Y val✝ : X ⊢ Sum.dist (Sum.inr val✝¹) (Sum.inl val✝) = Sum.dist (Sum.inl val✝) (Sum.inr val✝¹) [PROOFSTEP] simp [Sum.dist, _root_.dist_comm, add_comm, add_left_comm, add_assoc] [GOAL] case inr.inr X : Type u Y : Type v Z : Type w inst✝¹ : MetricSpace X inst✝ : MetricSpace Y val✝¹ val✝ : Y ⊢ Sum.dist (Sum.inr val✝¹) (Sum.inr val✝) = Sum.dist (Sum.inr val✝) (Sum.inr val✝¹) [PROOFSTEP] simp [Sum.dist, _root_.dist_comm, add_comm, add_left_comm, add_assoc] [GOAL] X : Type u Y : Type v Z : Type w inst✝¹ : MetricSpace X inst✝ : MetricSpace Y x : X y : Y ⊢ 1 ≤ Sum.dist (Sum.inr y) (Sum.inl x) [PROOFSTEP] rw [Sum.dist_comm] [GOAL] X : Type u Y : Type v Z : Type w inst✝¹ : MetricSpace X inst✝ : MetricSpace Y x : X y : Y ⊢ 1 ≤ Sum.dist (Sum.inl x) (Sum.inr y) [PROOFSTEP] exact Sum.one_le_dist_inl_inr [GOAL] X : Type u Y : Type v Z : Type w inst✝¹ : MetricSpace X inst✝ : MetricSpace Y s : Set ((X ⊕ Y) × (X ⊕ Y)) ⊢ s ∈ 𝓤 (X ⊕ Y) ↔ ∃ ε, ε > 0 ∧ ∀ (a b : X ⊕ Y), Sum.dist a b < ε → (a, b) ∈ s [PROOFSTEP] constructor [GOAL] case mp X : Type u Y : Type v Z : Type w inst✝¹ : MetricSpace X inst✝ : MetricSpace Y s : Set ((X ⊕ Y) × (X ⊕ Y)) ⊢ s ∈ 𝓤 (X ⊕ Y) → ∃ ε, ε > 0 ∧ ∀ (a b : X ⊕ Y), Sum.dist a b < ε → (a, b) ∈ s [PROOFSTEP] rintro ⟨hsX, hsY⟩ [GOAL] case mp.intro X : Type u Y : Type v Z : Type w inst✝¹ : MetricSpace X inst✝ : MetricSpace Y s : Set ((X ⊕ Y) × (X ⊕ Y)) hsX : s ∈ (Filter.map (fun p => (Sum.inl p.fst, Sum.inl p.snd)) (𝓤 X)).sets hsY : s ∈ (Filter.map (fun p => (Sum.inr p.fst, Sum.inr p.snd)) (𝓤 Y)).sets ⊢ ∃ ε, ε > 0 ∧ ∀ (a b : X ⊕ Y), Sum.dist a b < ε → (a, b) ∈ s [PROOFSTEP] rcases mem_uniformity_dist.1 hsX with ⟨εX, εX0, hX⟩ [GOAL] case mp.intro.intro.intro X : Type u Y : Type v Z : Type w inst✝¹ : MetricSpace X inst✝ : MetricSpace Y s : Set ((X ⊕ Y) × (X ⊕ Y)) hsX : s ∈ (Filter.map (fun p => (Sum.inl p.fst, Sum.inl p.snd)) (𝓤 X)).sets hsY : s ∈ (Filter.map (fun p => (Sum.inr p.fst, Sum.inr p.snd)) (𝓤 Y)).sets εX : ℝ εX0 : εX > 0 hX : ∀ {a b : X}, dist a b < εX → (a, b) ∈ (fun p => (Sum.inl p.fst, Sum.inl p.snd)) ⁻¹' s ⊢ ∃ ε, ε > 0 ∧ ∀ (a b : X ⊕ Y), Sum.dist a b < ε → (a, b) ∈ s [PROOFSTEP] rcases mem_uniformity_dist.1 hsY with ⟨εY, εY0, hY⟩ [GOAL] case mp.intro.intro.intro.intro.intro X : Type u Y : Type v Z : Type w inst✝¹ : MetricSpace X inst✝ : MetricSpace Y s : Set ((X ⊕ Y) × (X ⊕ Y)) hsX : s ∈ (Filter.map (fun p => (Sum.inl p.fst, Sum.inl p.snd)) (𝓤 X)).sets hsY : s ∈ (Filter.map (fun p => (Sum.inr p.fst, Sum.inr p.snd)) (𝓤 Y)).sets εX : ℝ εX0 : εX > 0 hX : ∀ {a b : X}, dist a b < εX → (a, b) ∈ (fun p => (Sum.inl p.fst, Sum.inl p.snd)) ⁻¹' s εY : ℝ εY0 : εY > 0 hY : ∀ {a b : Y}, dist a b < εY → (a, b) ∈ (fun p => (Sum.inr p.fst, Sum.inr p.snd)) ⁻¹' s ⊢ ∃ ε, ε > 0 ∧ ∀ (a b : X ⊕ Y), Sum.dist a b < ε → (a, b) ∈ s [PROOFSTEP] refine' ⟨min (min εX εY) 1, lt_min (lt_min εX0 εY0) zero_lt_one, _⟩ [GOAL] case mp.intro.intro.intro.intro.intro X : Type u Y : Type v Z : Type w inst✝¹ : MetricSpace X inst✝ : MetricSpace Y s : Set ((X ⊕ Y) × (X ⊕ Y)) hsX : s ∈ (Filter.map (fun p => (Sum.inl p.fst, Sum.inl p.snd)) (𝓤 X)).sets hsY : s ∈ (Filter.map (fun p => (Sum.inr p.fst, Sum.inr p.snd)) (𝓤 Y)).sets εX : ℝ εX0 : εX > 0 hX : ∀ {a b : X}, dist a b < εX → (a, b) ∈ (fun p => (Sum.inl p.fst, Sum.inl p.snd)) ⁻¹' s εY : ℝ εY0 : εY > 0 hY : ∀ {a b : Y}, dist a b < εY → (a, b) ∈ (fun p => (Sum.inr p.fst, Sum.inr p.snd)) ⁻¹' s ⊢ ∀ (a b : X ⊕ Y), Sum.dist a b < min (min εX εY) 1 → (a, b) ∈ s [PROOFSTEP] rintro (a | a) (b | b) h [GOAL] case mp.intro.intro.intro.intro.intro.inl.inl X : Type u Y : Type v Z : Type w inst✝¹ : MetricSpace X inst✝ : MetricSpace Y s : Set ((X ⊕ Y) × (X ⊕ Y)) hsX : s ∈ (Filter.map (fun p => (Sum.inl p.fst, Sum.inl p.snd)) (𝓤 X)).sets hsY : s ∈ (Filter.map (fun p => (Sum.inr p.fst, Sum.inr p.snd)) (𝓤 Y)).sets εX : ℝ εX0 : εX > 0 hX : ∀ {a b : X}, dist a b < εX → (a, b) ∈ (fun p => (Sum.inl p.fst, Sum.inl p.snd)) ⁻¹' s εY : ℝ εY0 : εY > 0 hY : ∀ {a b : Y}, dist a b < εY → (a, b) ∈ (fun p => (Sum.inr p.fst, Sum.inr p.snd)) ⁻¹' s a b : X h : Sum.dist (Sum.inl a) (Sum.inl b) < min (min εX εY) 1 ⊢ (Sum.inl a, Sum.inl b) ∈ s [PROOFSTEP] exact hX (lt_of_lt_of_le h (le_trans (min_le_left _ _) (min_le_left _ _))) [GOAL] case mp.intro.intro.intro.intro.intro.inl.inr X : Type u Y : Type v Z : Type w inst✝¹ : MetricSpace X inst✝ : MetricSpace Y s : Set ((X ⊕ Y) × (X ⊕ Y)) hsX : s ∈ (Filter.map (fun p => (Sum.inl p.fst, Sum.inl p.snd)) (𝓤 X)).sets hsY : s ∈ (Filter.map (fun p => (Sum.inr p.fst, Sum.inr p.snd)) (𝓤 Y)).sets εX : ℝ εX0 : εX > 0 hX : ∀ {a b : X}, dist a b < εX → (a, b) ∈ (fun p => (Sum.inl p.fst, Sum.inl p.snd)) ⁻¹' s εY : ℝ εY0 : εY > 0 hY : ∀ {a b : Y}, dist a b < εY → (a, b) ∈ (fun p => (Sum.inr p.fst, Sum.inr p.snd)) ⁻¹' s a : X b : Y h : Sum.dist (Sum.inl a) (Sum.inr b) < min (min εX εY) 1 ⊢ (Sum.inl a, Sum.inr b) ∈ s [PROOFSTEP] cases not_le_of_lt (lt_of_lt_of_le h (min_le_right _ _)) Sum.one_le_dist_inl_inr [GOAL] case mp.intro.intro.intro.intro.intro.inr.inl X : Type u Y : Type v Z : Type w inst✝¹ : MetricSpace X inst✝ : MetricSpace Y s : Set ((X ⊕ Y) × (X ⊕ Y)) hsX : s ∈ (Filter.map (fun p => (Sum.inl p.fst, Sum.inl p.snd)) (𝓤 X)).sets hsY : s ∈ (Filter.map (fun p => (Sum.inr p.fst, Sum.inr p.snd)) (𝓤 Y)).sets εX : ℝ εX0 : εX > 0 hX : ∀ {a b : X}, dist a b < εX → (a, b) ∈ (fun p => (Sum.inl p.fst, Sum.inl p.snd)) ⁻¹' s εY : ℝ εY0 : εY > 0 hY : ∀ {a b : Y}, dist a b < εY → (a, b) ∈ (fun p => (Sum.inr p.fst, Sum.inr p.snd)) ⁻¹' s a : Y b : X h : Sum.dist (Sum.inr a) (Sum.inl b) < min (min εX εY) 1 ⊢ (Sum.inr a, Sum.inl b) ∈ s [PROOFSTEP] cases not_le_of_lt (lt_of_lt_of_le h (min_le_right _ _)) Sum.one_le_dist_inr_inl [GOAL] case mp.intro.intro.intro.intro.intro.inr.inr X : Type u Y : Type v Z : Type w inst✝¹ : MetricSpace X inst✝ : MetricSpace Y s : Set ((X ⊕ Y) × (X ⊕ Y)) hsX : s ∈ (Filter.map (fun p => (Sum.inl p.fst, Sum.inl p.snd)) (𝓤 X)).sets hsY : s ∈ (Filter.map (fun p => (Sum.inr p.fst, Sum.inr p.snd)) (𝓤 Y)).sets εX : ℝ εX0 : εX > 0 hX : ∀ {a b : X}, dist a b < εX → (a, b) ∈ (fun p => (Sum.inl p.fst, Sum.inl p.snd)) ⁻¹' s εY : ℝ εY0 : εY > 0 hY : ∀ {a b : Y}, dist a b < εY → (a, b) ∈ (fun p => (Sum.inr p.fst, Sum.inr p.snd)) ⁻¹' s a b : Y h : Sum.dist (Sum.inr a) (Sum.inr b) < min (min εX εY) 1 ⊢ (Sum.inr a, Sum.inr b) ∈ s [PROOFSTEP] exact hY (lt_of_lt_of_le h (le_trans (min_le_left _ _) (min_le_right _ _))) [GOAL] case mpr X : Type u Y : Type v Z : Type w inst✝¹ : MetricSpace X inst✝ : MetricSpace Y s : Set ((X ⊕ Y) × (X ⊕ Y)) ⊢ (∃ ε, ε > 0 ∧ ∀ (a b : X ⊕ Y), Sum.dist a b < ε → (a, b) ∈ s) → s ∈ 𝓤 (X ⊕ Y) [PROOFSTEP] rintro ⟨ε, ε0, H⟩ [GOAL] case mpr.intro.intro X : Type u Y : Type v Z : Type w inst✝¹ : MetricSpace X inst✝ : MetricSpace Y s : Set ((X ⊕ Y) × (X ⊕ Y)) ε : ℝ ε0 : ε > 0 H : ∀ (a b : X ⊕ Y), Sum.dist a b < ε → (a, b) ∈ s ⊢ s ∈ 𝓤 (X ⊕ Y) [PROOFSTEP] constructor [GOAL] case mpr.intro.intro.left X : Type u Y : Type v Z : Type w inst✝¹ : MetricSpace X inst✝ : MetricSpace Y s : Set ((X ⊕ Y) × (X ⊕ Y)) ε : ℝ ε0 : ε > 0 H : ∀ (a b : X ⊕ Y), Sum.dist a b < ε → (a, b) ∈ s ⊢ s ∈ (Filter.map (fun p => (Sum.inl p.fst, Sum.inl p.snd)) (𝓤 X)).sets [PROOFSTEP] rw [Filter.mem_sets, Filter.mem_map, mem_uniformity_dist] [GOAL] case mpr.intro.intro.right X : Type u Y : Type v Z : Type w inst✝¹ : MetricSpace X inst✝ : MetricSpace Y s : Set ((X ⊕ Y) × (X ⊕ Y)) ε : ℝ ε0 : ε > 0 H : ∀ (a b : X ⊕ Y), Sum.dist a b < ε → (a, b) ∈ s ⊢ s ∈ (Filter.map (fun p => (Sum.inr p.fst, Sum.inr p.snd)) (𝓤 Y)).sets [PROOFSTEP] rw [Filter.mem_sets, Filter.mem_map, mem_uniformity_dist] [GOAL] case mpr.intro.intro.left X : Type u Y : Type v Z : Type w inst✝¹ : MetricSpace X inst✝ : MetricSpace Y s : Set ((X ⊕ Y) × (X ⊕ Y)) ε : ℝ ε0 : ε > 0 H : ∀ (a b : X ⊕ Y), Sum.dist a b < ε → (a, b) ∈ s ⊢ ∃ ε, ε > 0 ∧ ∀ {a b : X}, dist a b < ε → (a, b) ∈ (fun p => (Sum.inl p.fst, Sum.inl p.snd)) ⁻¹' s [PROOFSTEP] exact ⟨ε, ε0, fun h => H _ _ h⟩ [GOAL] case mpr.intro.intro.right X : Type u Y : Type v Z : Type w inst✝¹ : MetricSpace X inst✝ : MetricSpace Y s : Set ((X ⊕ Y) × (X ⊕ Y)) ε : ℝ ε0 : ε > 0 H : ∀ (a b : X ⊕ Y), Sum.dist a b < ε → (a, b) ∈ s ⊢ ∃ ε, ε > 0 ∧ ∀ {a b : Y}, dist a b < ε → (a, b) ∈ (fun p => (Sum.inr p.fst, Sum.inr p.snd)) ⁻¹' s [PROOFSTEP] exact ⟨ε, ε0, fun h => H _ _ h⟩ [GOAL] X : Type u Y : Type v Z : Type w inst✝¹ : MetricSpace X inst✝ : MetricSpace Y x : X ⊕ Y ⊢ dist x x = 0 [PROOFSTEP] cases x [GOAL] case inl X : Type u Y : Type v Z : Type w inst✝¹ : MetricSpace X inst✝ : MetricSpace Y val✝ : X ⊢ dist (Sum.inl val✝) (Sum.inl val✝) = 0 [PROOFSTEP] simp only [Sum.dist, dist_self] [GOAL] case inr X : Type u Y : Type v Z : Type w inst✝¹ : MetricSpace X inst✝ : MetricSpace Y val✝ : Y ⊢ dist (Sum.inr val✝) (Sum.inr val✝) = 0 [PROOFSTEP] simp only [Sum.dist, dist_self] [GOAL] X : Type u Y : Type v Z : Type w inst✝¹ : MetricSpace X inst✝ : MetricSpace Y p : X q : Y x✝ : X ⊕ Y ⊢ dist (Sum.inl p) x✝ ≤ dist (Sum.inl p) (Sum.inr q) + dist (Sum.inr q) x✝ [PROOFSTEP] simp only [Sum.dist_eq_glueDist p q] [GOAL] X : Type u Y : Type v Z : Type w inst✝¹ : MetricSpace X inst✝ : MetricSpace Y p : X q : Y x✝ : X ⊕ Y ⊢ glueDist (fun x => Nonempty.some (_ : Nonempty X)) (fun x => Nonempty.some (_ : Nonempty Y)) 1 (Sum.inl p) x✝ ≤ glueDist (fun x => Nonempty.some (_ : Nonempty X)) (fun x => Nonempty.some (_ : Nonempty Y)) 1 (Sum.inl p) (Sum.inr q) + glueDist (fun x => Nonempty.some (_ : Nonempty X)) (fun x => Nonempty.some (_ : Nonempty Y)) 1 (Sum.inr q) x✝ [PROOFSTEP] exact glueDist_triangle _ _ _ (by norm_num) _ _ _ [GOAL] X : Type u Y : Type v Z : Type w inst✝¹ : MetricSpace X inst✝ : MetricSpace Y p : X q : Y x✝ : X ⊕ Y ⊢ Unit → Unit → |dist (Nonempty.some (_ : Nonempty X)) (Nonempty.some (_ : Nonempty X)) - dist (Nonempty.some (_ : Nonempty Y)) (Nonempty.some (_ : Nonempty Y))| ≤ 2 * 1 [PROOFSTEP] norm_num [GOAL] X : Type u Y : Type v Z : Type w inst✝¹ : MetricSpace X inst✝ : MetricSpace Y x✝ : X ⊕ Y q : X r : Y ⊢ dist x✝ (Sum.inr r) ≤ dist x✝ (Sum.inl q) + dist (Sum.inl q) (Sum.inr r) [PROOFSTEP] simp only [Sum.dist_eq_glueDist q r] [GOAL] X : Type u Y : Type v Z : Type w inst✝¹ : MetricSpace X inst✝ : MetricSpace Y x✝ : X ⊕ Y q : X r : Y ⊢ glueDist (fun x => Nonempty.some (_ : Nonempty X)) (fun x => Nonempty.some (_ : Nonempty Y)) 1 x✝ (Sum.inr r) ≤ glueDist (fun x => Nonempty.some (_ : Nonempty X)) (fun x => Nonempty.some (_ : Nonempty Y)) 1 x✝ (Sum.inl q) + glueDist (fun x => Nonempty.some (_ : Nonempty X)) (fun x => Nonempty.some (_ : Nonempty Y)) 1 (Sum.inl q) (Sum.inr r) [PROOFSTEP] exact glueDist_triangle _ _ _ (by norm_num) _ _ _ [GOAL] X : Type u Y : Type v Z : Type w inst✝¹ : MetricSpace X inst✝ : MetricSpace Y x✝ : X ⊕ Y q : X r : Y ⊢ Unit → Unit → |dist (Nonempty.some (_ : Nonempty X)) (Nonempty.some (_ : Nonempty X)) - dist (Nonempty.some (_ : Nonempty Y)) (Nonempty.some (_ : Nonempty Y))| ≤ 2 * 1 [PROOFSTEP] norm_num [GOAL] X : Type u Y : Type v Z : Type w inst✝¹ : MetricSpace X inst✝ : MetricSpace Y p : Y x✝ : X ⊕ Y r : X ⊢ dist (Sum.inr p) (Sum.inl r) ≤ dist (Sum.inr p) x✝ + dist x✝ (Sum.inl r) [PROOFSTEP] simp only [Sum.dist_eq_glueDist r p] [GOAL] X : Type u Y : Type v Z : Type w inst✝¹ : MetricSpace X inst✝ : MetricSpace Y p : Y x✝ : X ⊕ Y r : X ⊢ glueDist (fun x => Nonempty.some (_ : Nonempty X)) (fun x => Nonempty.some (_ : Nonempty Y)) 1 (Sum.inr p) (Sum.inl r) ≤ glueDist (fun x => Nonempty.some (_ : Nonempty X)) (fun x => Nonempty.some (_ : Nonempty Y)) 1 (Sum.inr p) x✝ + glueDist (fun x => Nonempty.some (_ : Nonempty X)) (fun x => Nonempty.some (_ : Nonempty Y)) 1 x✝ (Sum.inl r) [PROOFSTEP] exact glueDist_triangle _ _ _ (by norm_num) _ _ _ [GOAL] X : Type u Y : Type v Z : Type w inst✝¹ : MetricSpace X inst✝ : MetricSpace Y p : Y x✝ : X ⊕ Y r : X ⊢ Unit → Unit → |dist (Nonempty.some (_ : Nonempty X)) (Nonempty.some (_ : Nonempty X)) - dist (Nonempty.some (_ : Nonempty Y)) (Nonempty.some (_ : Nonempty Y))| ≤ 2 * 1 [PROOFSTEP] norm_num [GOAL] X : Type u Y : Type v Z : Type w inst✝¹ : MetricSpace X inst✝ : MetricSpace Y x✝¹ x✝ : X ⊕ Y ⊢ (fun x y => ↑{ val := Sum.dist x y, property := (_ : 0 ≤ Sum.dist x y) }) x✝¹ x✝ = ENNReal.ofReal (dist x✝¹ x✝) [PROOFSTEP] exact ENNReal.coe_nnreal_eq _ [GOAL] X : Type u Y : Type v Z : Type w inst✝¹ : MetricSpace X inst✝ : MetricSpace Y p q : X ⊕ Y h : dist p q = 0 ⊢ p = q [PROOFSTEP] cases' p with p p [GOAL] case inl X : Type u Y : Type v Z : Type w inst✝¹ : MetricSpace X inst✝ : MetricSpace Y q : X ⊕ Y p : X h : dist (Sum.inl p) q = 0 ⊢ Sum.inl p = q [PROOFSTEP] cases' q with q q [GOAL] case inr X : Type u Y : Type v Z : Type w inst✝¹ : MetricSpace X inst✝ : MetricSpace Y q : X ⊕ Y p : Y h : dist (Sum.inr p) q = 0 ⊢ Sum.inr p = q [PROOFSTEP] cases' q with q q [GOAL] case inl.inl X : Type u Y : Type v Z : Type w inst✝¹ : MetricSpace X inst✝ : MetricSpace Y p q : X h : dist (Sum.inl p) (Sum.inl q) = 0 ⊢ Sum.inl p = Sum.inl q [PROOFSTEP] rw [eq_of_dist_eq_zero h] [GOAL] case inl.inr X : Type u Y : Type v Z : Type w inst✝¹ : MetricSpace X inst✝ : MetricSpace Y p : X q : Y h : dist (Sum.inl p) (Sum.inr q) = 0 ⊢ Sum.inl p = Sum.inr q [PROOFSTEP] exact eq_of_glueDist_eq_zero _ _ _ one_pos _ _ ((Sum.dist_eq_glueDist p q).symm.trans h) [GOAL] case inr.inl X : Type u Y : Type v Z : Type w inst✝¹ : MetricSpace X inst✝ : MetricSpace Y p : Y q : X h : dist (Sum.inr p) (Sum.inl q) = 0 ⊢ Sum.inr p = Sum.inl q [PROOFSTEP] exact eq_of_glueDist_eq_zero _ _ _ one_pos _ _ ((Sum.dist_eq_glueDist q p).symm.trans h) [GOAL] case inr.inr X : Type u Y : Type v Z : Type w inst✝¹ : MetricSpace X inst✝ : MetricSpace Y p q : Y h : dist (Sum.inr p) (Sum.inr q) = 0 ⊢ Sum.inr p = Sum.inr q [PROOFSTEP] rw [eq_of_dist_eq_zero h] [GOAL] ι : Type u_1 E : ι → Type u_2 inst✝ : (i : ι) → MetricSpace (E i) i : ι x : E i j : ι y : E j h : i = j ⊢ E j = E i [PROOFSTEP] rw [h] [GOAL] ι : Type u_1 E : ι → Type u_2 inst✝ : (i : ι) → MetricSpace (E i) i : ι x y : E i ⊢ dist { fst := i, snd := x } { fst := i, snd := y } = dist x y [PROOFSTEP] simp [Dist.dist, Sigma.dist] [GOAL] ι : Type u_1 E : ι → Type u_2 inst✝ : (i : ι) → MetricSpace (E i) i j : ι h : i ≠ j x : E i y : E j ⊢ 1 ≤ dist { fst := i, snd := x } { fst := j, snd := y } [PROOFSTEP] rw [Sigma.dist_ne h x y] [GOAL] ι : Type u_1 E : ι → Type u_2 inst✝ : (i : ι) → MetricSpace (E i) i j : ι h : i ≠ j x : E i y : E j ⊢ 1 ≤ dist x (Nonempty.some (_ : Nonempty (E i))) + 1 + dist (Nonempty.some (_ : Nonempty (E j))) y [PROOFSTEP] linarith [@dist_nonneg _ _ x (Nonempty.some ⟨x⟩), @dist_nonneg _ _ (Nonempty.some ⟨y⟩) y] [GOAL] ι : Type u_1 E : ι → Type u_2 inst✝ : (i : ι) → MetricSpace (E i) x y : (i : ι) × E i h : dist x y < 1 ⊢ x.fst = y.fst [PROOFSTEP] cases x [GOAL] case mk ι : Type u_1 E : ι → Type u_2 inst✝ : (i : ι) → MetricSpace (E i) y : (i : ι) × E i fst✝ : ι snd✝ : E fst✝ h : dist { fst := fst✝, snd := snd✝ } y < 1 ⊢ { fst := fst✝, snd := snd✝ }.fst = y.fst [PROOFSTEP] cases y [GOAL] case mk.mk ι : Type u_1 E : ι → Type u_2 inst✝ : (i : ι) → MetricSpace (E i) fst✝¹ : ι snd✝¹ : E fst✝¹ fst✝ : ι snd✝ : E fst✝ h : dist { fst := fst✝¹, snd := snd✝¹ } { fst := fst✝, snd := snd✝ } < 1 ⊢ { fst := fst✝¹, snd := snd✝¹ }.fst = { fst := fst✝, snd := snd✝ }.fst [PROOFSTEP] contrapose! h [GOAL] case mk.mk ι : Type u_1 E : ι → Type u_2 inst✝ : (i : ι) → MetricSpace (E i) fst✝¹ : ι snd✝¹ : E fst✝¹ fst✝ : ι snd✝ : E fst✝ h : fst✝¹ ≠ fst✝ ⊢ 1 ≤ dist { fst := fst✝¹, snd := snd✝¹ } { fst := fst✝, snd := snd✝ } [PROOFSTEP] apply one_le_dist_of_ne h [GOAL] ι : Type u_1 E : ι → Type u_2 inst✝ : (i : ι) → MetricSpace (E i) x y z : (i : ι) × E i ⊢ dist x z ≤ dist x y + dist y z [PROOFSTEP] rcases x with ⟨i, x⟩ [GOAL] case mk ι : Type u_1 E : ι → Type u_2 inst✝ : (i : ι) → MetricSpace (E i) y z : (i : ι) × E i i : ι x : E i ⊢ dist { fst := i, snd := x } z ≤ dist { fst := i, snd := x } y + dist y z [PROOFSTEP] rcases y with ⟨j, y⟩ [GOAL] case mk.mk ι : Type u_1 E : ι → Type u_2 inst✝ : (i : ι) → MetricSpace (E i) z : (i : ι) × E i i : ι x : E i j : ι y : E j ⊢ dist { fst := i, snd := x } z ≤ dist { fst := i, snd := x } { fst := j, snd := y } + dist { fst := j, snd := y } z [PROOFSTEP] rcases z with ⟨k, z⟩ [GOAL] case mk.mk.mk ι : Type u_1 E : ι → Type u_2 inst✝ : (i : ι) → MetricSpace (E i) i : ι x : E i j : ι y : E j k : ι z : E k ⊢ dist { fst := i, snd := x } { fst := k, snd := z } ≤ dist { fst := i, snd := x } { fst := j, snd := y } + dist { fst := j, snd := y } { fst := k, snd := z } [PROOFSTEP] rcases eq_or_ne i k with (rfl | hik) [GOAL] case mk.mk.mk.inl ι : Type u_1 E : ι → Type u_2 inst✝ : (i : ι) → MetricSpace (E i) i : ι x : E i j : ι y : E j z : E i ⊢ dist { fst := i, snd := x } { fst := i, snd := z } ≤ dist { fst := i, snd := x } { fst := j, snd := y } + dist { fst := j, snd := y } { fst := i, snd := z } [PROOFSTEP] rcases eq_or_ne i j with (rfl | hij) [GOAL] case mk.mk.mk.inl.inl ι : Type u_1 E : ι → Type u_2 inst✝ : (i : ι) → MetricSpace (E i) i : ι x z y : E i ⊢ dist { fst := i, snd := x } { fst := i, snd := z } ≤ dist { fst := i, snd := x } { fst := i, snd := y } + dist { fst := i, snd := y } { fst := i, snd := z } [PROOFSTEP] simpa using dist_triangle x y z [GOAL] case mk.mk.mk.inl.inr ι : Type u_1 E : ι → Type u_2 inst✝ : (i : ι) → MetricSpace (E i) i : ι x : E i j : ι y : E j z : E i hij : i ≠ j ⊢ dist { fst := i, snd := x } { fst := i, snd := z } ≤ dist { fst := i, snd := x } { fst := j, snd := y } + dist { fst := j, snd := y } { fst := i, snd := z } [PROOFSTEP] simp only [Sigma.dist_same, Sigma.dist_ne hij, Sigma.dist_ne hij.symm] [GOAL] case mk.mk.mk.inl.inr ι : Type u_1 E : ι → Type u_2 inst✝ : (i : ι) → MetricSpace (E i) i : ι x : E i j : ι y : E j z : E i hij : i ≠ j ⊢ dist x z ≤ dist x (Nonempty.some (_ : Nonempty (E i))) + 1 + dist (Nonempty.some (_ : Nonempty (E j))) y + (dist y (Nonempty.some (_ : Nonempty (E j))) + 1 + dist (Nonempty.some (_ : Nonempty (E i))) z) [PROOFSTEP] calc dist x z ≤ dist x (Nonempty.some ⟨x⟩) + 0 + 0 + (0 + 0 + dist (Nonempty.some ⟨z⟩) z) := by simpa only [zero_add, add_zero] using dist_triangle _ _ _ _ ≤ _ := by apply_rules [add_le_add, le_rfl, dist_nonneg, zero_le_one] [GOAL] ι : Type u_1 E : ι → Type u_2 inst✝ : (i : ι) → MetricSpace (E i) i : ι x : E i j : ι y : E j z : E i hij : i ≠ j ⊢ dist x z ≤ dist x (Nonempty.some (_ : Nonempty (E i))) + 0 + 0 + (0 + 0 + dist (Nonempty.some (_ : Nonempty (E i))) z) [PROOFSTEP] simpa only [zero_add, add_zero] using dist_triangle _ _ _ [GOAL] ι : Type u_1 E : ι → Type u_2 inst✝ : (i : ι) → MetricSpace (E i) i : ι x : E i j : ι y : E j z : E i hij : i ≠ j ⊢ dist x (Nonempty.some (_ : Nonempty (E i))) + 0 + 0 + (0 + 0 + dist (Nonempty.some (_ : Nonempty (E i))) z) ≤ dist x (Nonempty.some (_ : Nonempty (E i))) + 1 + dist (Nonempty.some (_ : Nonempty (E j))) y + (dist y (Nonempty.some (_ : Nonempty (E j))) + 1 + dist (Nonempty.some (_ : Nonempty (E i))) z) [PROOFSTEP] apply_rules [add_le_add, le_rfl, dist_nonneg, zero_le_one] [GOAL] case mk.mk.mk.inr ι : Type u_1 E : ι → Type u_2 inst✝ : (i : ι) → MetricSpace (E i) i : ι x : E i j : ι y : E j k : ι z : E k hik : i ≠ k ⊢ dist { fst := i, snd := x } { fst := k, snd := z } ≤ dist { fst := i, snd := x } { fst := j, snd := y } + dist { fst := j, snd := y } { fst := k, snd := z } [PROOFSTEP] rcases eq_or_ne i j with (rfl | hij) [GOAL] case mk.mk.mk.inr.inl ι : Type u_1 E : ι → Type u_2 inst✝ : (i : ι) → MetricSpace (E i) i : ι x : E i k : ι z : E k hik : i ≠ k y : E i ⊢ dist { fst := i, snd := x } { fst := k, snd := z } ≤ dist { fst := i, snd := x } { fst := i, snd := y } + dist { fst := i, snd := y } { fst := k, snd := z } [PROOFSTEP] simp only [Sigma.dist_ne hik, Sigma.dist_same] [GOAL] case mk.mk.mk.inr.inl ι : Type u_1 E : ι → Type u_2 inst✝ : (i : ι) → MetricSpace (E i) i : ι x : E i k : ι z : E k hik : i ≠ k y : E i ⊢ dist x (Nonempty.some (_ : Nonempty (E i))) + 1 + dist (Nonempty.some (_ : Nonempty (E k))) z ≤ dist x y + (dist y (Nonempty.some (_ : Nonempty (E i))) + 1 + dist (Nonempty.some (_ : Nonempty (E k))) z) [PROOFSTEP] calc dist x (Nonempty.some ⟨x⟩) + 1 + dist (Nonempty.some ⟨z⟩) z ≤ dist x y + dist y (Nonempty.some ⟨y⟩) + 1 + dist (Nonempty.some ⟨z⟩) z := by apply_rules [add_le_add, le_rfl, dist_triangle] _ = _ := by abel [GOAL] ι : Type u_1 E : ι → Type u_2 inst✝ : (i : ι) → MetricSpace (E i) i : ι x : E i k : ι z : E k hik : i ≠ k y : E i ⊢ dist x (Nonempty.some (_ : Nonempty (E i))) + 1 + dist (Nonempty.some (_ : Nonempty (E k))) z ≤ dist x y + dist y (Nonempty.some (_ : Nonempty (E i))) + 1 + dist (Nonempty.some (_ : Nonempty (E k))) z [PROOFSTEP] apply_rules [add_le_add, le_rfl, dist_triangle] [GOAL] ι : Type u_1 E : ι → Type u_2 inst✝ : (i : ι) → MetricSpace (E i) i : ι x : E i k : ι z : E k hik : i ≠ k y : E i ⊢ dist x y + dist y (Nonempty.some (_ : Nonempty (E i))) + 1 + dist (Nonempty.some (_ : Nonempty (E k))) z = dist x y + (dist y (Nonempty.some (_ : Nonempty (E i))) + 1 + dist (Nonempty.some (_ : Nonempty (E k))) z) [PROOFSTEP] abel [GOAL] ι : Type u_1 E : ι → Type u_2 inst✝ : (i : ι) → MetricSpace (E i) i : ι x : E i k : ι z : E k hik : i ≠ k y : E i ⊢ dist x y + dist y (Nonempty.some (_ : Nonempty (E i))) + 1 + dist (Nonempty.some (_ : Nonempty (E k))) z = dist x y + (dist y (Nonempty.some (_ : Nonempty (E i))) + 1 + dist (Nonempty.some (_ : Nonempty (E k))) z) [PROOFSTEP] abel [GOAL] case mk.mk.mk.inr.inr ι : Type u_1 E : ι → Type u_2 inst✝ : (i : ι) → MetricSpace (E i) i : ι x : E i j : ι y : E j k : ι z : E k hik : i ≠ k hij : i ≠ j ⊢ dist { fst := i, snd := x } { fst := k, snd := z } ≤ dist { fst := i, snd := x } { fst := j, snd := y } + dist { fst := j, snd := y } { fst := k, snd := z } [PROOFSTEP] rcases eq_or_ne j k with (rfl | hjk) [GOAL] case mk.mk.mk.inr.inr.inl ι : Type u_1 E : ι → Type u_2 inst✝ : (i : ι) → MetricSpace (E i) i : ι x : E i j : ι y : E j hij : i ≠ j z : E j hik : i ≠ j ⊢ dist { fst := i, snd := x } { fst := j, snd := z } ≤ dist { fst := i, snd := x } { fst := j, snd := y } + dist { fst := j, snd := y } { fst := j, snd := z } [PROOFSTEP] simp only [Sigma.dist_ne hij, Sigma.dist_same] [GOAL] case mk.mk.mk.inr.inr.inl ι : Type u_1 E : ι → Type u_2 inst✝ : (i : ι) → MetricSpace (E i) i : ι x : E i j : ι y : E j hij : i ≠ j z : E j hik : i ≠ j ⊢ dist x (Nonempty.some (_ : Nonempty (E i))) + 1 + dist (Nonempty.some (_ : Nonempty (E j))) z ≤ dist x (Nonempty.some (_ : Nonempty (E i))) + 1 + dist (Nonempty.some (_ : Nonempty (E j))) y + dist y z [PROOFSTEP] calc dist x (Nonempty.some ⟨x⟩) + 1 + dist (Nonempty.some ⟨z⟩) z ≤ dist x (Nonempty.some ⟨x⟩) + 1 + (dist (Nonempty.some ⟨z⟩) y + dist y z) := by apply_rules [add_le_add, le_rfl, dist_triangle] _ = _ := by abel [GOAL] ι : Type u_1 E : ι → Type u_2 inst✝ : (i : ι) → MetricSpace (E i) i : ι x : E i j : ι y : E j hij : i ≠ j z : E j hik : i ≠ j ⊢ dist x (Nonempty.some (_ : Nonempty (E i))) + 1 + dist (Nonempty.some (_ : Nonempty (E j))) z ≤ dist x (Nonempty.some (_ : Nonempty (E i))) + 1 + (dist (Nonempty.some (_ : Nonempty (E j))) y + dist y z) [PROOFSTEP] apply_rules [add_le_add, le_rfl, dist_triangle] [GOAL] ι : Type u_1 E : ι → Type u_2 inst✝ : (i : ι) → MetricSpace (E i) i : ι x : E i j : ι y : E j hij : i ≠ j z : E j hik : i ≠ j ⊢ dist x (Nonempty.some (_ : Nonempty (E i))) + 1 + (dist (Nonempty.some (_ : Nonempty (E j))) y + dist y z) = dist x (Nonempty.some (_ : Nonempty (E i))) + 1 + dist (Nonempty.some (_ : Nonempty (E j))) y + dist y z [PROOFSTEP] abel [GOAL] ι : Type u_1 E : ι → Type u_2 inst✝ : (i : ι) → MetricSpace (E i) i : ι x : E i j : ι y : E j hij : i ≠ j z : E j hik : i ≠ j ⊢ dist x (Nonempty.some (_ : Nonempty (E i))) + 1 + (dist (Nonempty.some (_ : Nonempty (E j))) y + dist y z) = dist x (Nonempty.some (_ : Nonempty (E i))) + 1 + dist (Nonempty.some (_ : Nonempty (E j))) y + dist y z [PROOFSTEP] abel [GOAL] case mk.mk.mk.inr.inr.inr ι : Type u_1 E : ι → Type u_2 inst✝ : (i : ι) → MetricSpace (E i) i : ι x : E i j : ι y : E j k : ι z : E k hik : i ≠ k hij : i ≠ j hjk : j ≠ k ⊢ dist { fst := i, snd := x } { fst := k, snd := z } ≤ dist { fst := i, snd := x } { fst := j, snd := y } + dist { fst := j, snd := y } { fst := k, snd := z } [PROOFSTEP] simp only [hik, hij, hjk, Sigma.dist_ne, Ne.def, not_false_iff] [GOAL] case mk.mk.mk.inr.inr.inr ι : Type u_1 E : ι → Type u_2 inst✝ : (i : ι) → MetricSpace (E i) i : ι x : E i j : ι y : E j k : ι z : E k hik : i ≠ k hij : i ≠ j hjk : j ≠ k ⊢ dist x (Nonempty.some (_ : Nonempty (E i))) + 1 + dist (Nonempty.some (_ : Nonempty (E k))) z ≤ dist x (Nonempty.some (_ : Nonempty (E i))) + 1 + dist (Nonempty.some (_ : Nonempty (E j))) y + (dist y (Nonempty.some (_ : Nonempty (E j))) + 1 + dist (Nonempty.some (_ : Nonempty (E k))) z) [PROOFSTEP] calc dist x (Nonempty.some ⟨x⟩) + 1 + dist (Nonempty.some ⟨z⟩) z = dist x (Nonempty.some ⟨x⟩) + 1 + 0 + (0 + 0 + dist (Nonempty.some ⟨z⟩) z) := by simp only [add_zero, zero_add] _ ≤ _ := by apply_rules [add_le_add, zero_le_one, dist_nonneg, le_rfl] [GOAL] ι : Type u_1 E : ι → Type u_2 inst✝ : (i : ι) → MetricSpace (E i) i : ι x : E i j : ι y : E j k : ι z : E k hik : i ≠ k hij : i ≠ j hjk : j ≠ k ⊢ dist x (Nonempty.some (_ : Nonempty (E i))) + 1 + dist (Nonempty.some (_ : Nonempty (E k))) z = dist x (Nonempty.some (_ : Nonempty (E i))) + 1 + 0 + (0 + 0 + dist (Nonempty.some (_ : Nonempty (E k))) z) [PROOFSTEP] simp only [add_zero, zero_add] [GOAL] ι : Type u_1 E : ι → Type u_2 inst✝ : (i : ι) → MetricSpace (E i) i : ι x : E i j : ι y : E j k : ι z : E k hik : i ≠ k hij : i ≠ j hjk : j ≠ k ⊢ dist x (Nonempty.some (_ : Nonempty (E i))) + 1 + 0 + (0 + 0 + dist (Nonempty.some (_ : Nonempty (E k))) z) ≤ dist x (Nonempty.some (_ : Nonempty (E i))) + 1 + dist (Nonempty.some (_ : Nonempty (E j))) y + (dist y (Nonempty.some (_ : Nonempty (E j))) + 1 + dist (Nonempty.some (_ : Nonempty (E k))) z) [PROOFSTEP] apply_rules [add_le_add, zero_le_one, dist_nonneg, le_rfl] [GOAL] ι : Type u_1 E : ι → Type u_2 inst✝ : (i : ι) → MetricSpace (E i) s : Set ((i : ι) × E i) ⊢ IsOpen s ↔ ∀ (x : (i : ι) × E i), x ∈ s → ∃ ε, ε > 0 ∧ ∀ (y : (i : ι) × E i), dist x y < ε → y ∈ s [PROOFSTEP] constructor [GOAL] case mp ι : Type u_1 E : ι → Type u_2 inst✝ : (i : ι) → MetricSpace (E i) s : Set ((i : ι) × E i) ⊢ IsOpen s → ∀ (x : (i : ι) × E i), x ∈ s → ∃ ε, ε > 0 ∧ ∀ (y : (i : ι) × E i), dist x y < ε → y ∈ s [PROOFSTEP] rintro hs ⟨i, x⟩ hx [GOAL] case mp.mk ι : Type u_1 E : ι → Type u_2 inst✝ : (i : ι) → MetricSpace (E i) s : Set ((i : ι) × E i) hs : IsOpen s i : ι x : E i hx : { fst := i, snd := x } ∈ s ⊢ ∃ ε, ε > 0 ∧ ∀ (y : (i : ι) × E i), dist { fst := i, snd := x } y < ε → y ∈ s [PROOFSTEP] obtain ⟨ε, εpos, hε⟩ : ∃ ε > 0, ball x ε ⊆ Sigma.mk i ⁻¹' s := Metric.isOpen_iff.1 (isOpen_sigma_iff.1 hs i) x hx [GOAL] case mp.mk.intro.intro ι : Type u_1 E : ι → Type u_2 inst✝ : (i : ι) → MetricSpace (E i) s : Set ((i : ι) × E i) hs : IsOpen s i : ι x : E i hx : { fst := i, snd := x } ∈ s ε : ℝ εpos : ε > 0 hε : ball x ε ⊆ Sigma.mk i ⁻¹' s ⊢ ∃ ε, ε > 0 ∧ ∀ (y : (i : ι) × E i), dist { fst := i, snd := x } y < ε → y ∈ s [PROOFSTEP] refine' ⟨min ε 1, lt_min εpos zero_lt_one, _⟩ [GOAL] case mp.mk.intro.intro ι : Type u_1 E : ι → Type u_2 inst✝ : (i : ι) → MetricSpace (E i) s : Set ((i : ι) × E i) hs : IsOpen s i : ι x : E i hx : { fst := i, snd := x } ∈ s ε : ℝ εpos : ε > 0 hε : ball x ε ⊆ Sigma.mk i ⁻¹' s ⊢ ∀ (y : (i : ι) × E i), dist { fst := i, snd := x } y < min ε 1 → y ∈ s [PROOFSTEP] rintro ⟨j, y⟩ hy [GOAL] case mp.mk.intro.intro.mk ι : Type u_1 E : ι → Type u_2 inst✝ : (i : ι) → MetricSpace (E i) s : Set ((i : ι) × E i) hs : IsOpen s i : ι x : E i hx : { fst := i, snd := x } ∈ s ε : ℝ εpos : ε > 0 hε : ball x ε ⊆ Sigma.mk i ⁻¹' s j : ι y : E j hy : dist { fst := i, snd := x } { fst := j, snd := y } < min ε 1 ⊢ { fst := j, snd := y } ∈ s [PROOFSTEP] rcases eq_or_ne i j with (rfl | hij) [GOAL] case mp.mk.intro.intro.mk.inl ι : Type u_1 E : ι → Type u_2 inst✝ : (i : ι) → MetricSpace (E i) s : Set ((i : ι) × E i) hs : IsOpen s i : ι x : E i hx : { fst := i, snd := x } ∈ s ε : ℝ εpos : ε > 0 hε : ball x ε ⊆ Sigma.mk i ⁻¹' s y : E i hy : dist { fst := i, snd := x } { fst := i, snd := y } < min ε 1 ⊢ { fst := i, snd := y } ∈ s [PROOFSTEP] simp only [Sigma.dist_same, lt_min_iff] at hy [GOAL] case mp.mk.intro.intro.mk.inl ι : Type u_1 E : ι → Type u_2 inst✝ : (i : ι) → MetricSpace (E i) s : Set ((i : ι) × E i) hs : IsOpen s i : ι x : E i hx : { fst := i, snd := x } ∈ s ε : ℝ εpos : ε > 0 hε : ball x ε ⊆ Sigma.mk i ⁻¹' s y : E i hy : dist x y < ε ∧ dist x y < 1 ⊢ { fst := i, snd := y } ∈ s [PROOFSTEP] exact hε (mem_ball'.2 hy.1) [GOAL] case mp.mk.intro.intro.mk.inr ι : Type u_1 E : ι → Type u_2 inst✝ : (i : ι) → MetricSpace (E i) s : Set ((i : ι) × E i) hs : IsOpen s i : ι x : E i hx : { fst := i, snd := x } ∈ s ε : ℝ εpos : ε > 0 hε : ball x ε ⊆ Sigma.mk i ⁻¹' s j : ι y : E j hy : dist { fst := i, snd := x } { fst := j, snd := y } < min ε 1 hij : i ≠ j ⊢ { fst := j, snd := y } ∈ s [PROOFSTEP] apply (lt_irrefl (1 : ℝ) _).elim [GOAL] ι : Type u_1 E : ι → Type u_2 inst✝ : (i : ι) → MetricSpace (E i) s : Set ((i : ι) × E i) hs : IsOpen s i : ι x : E i hx : { fst := i, snd := x } ∈ s ε : ℝ εpos : ε > 0 hε : ball x ε ⊆ Sigma.mk i ⁻¹' s j : ι y : E j hy : dist { fst := i, snd := x } { fst := j, snd := y } < min ε 1 hij : i ≠ j ⊢ 1 < 1 [PROOFSTEP] calc 1 ≤ Sigma.dist ⟨i, x⟩ ⟨j, y⟩ := Sigma.one_le_dist_of_ne hij _ _ _ < 1 := hy.trans_le (min_le_right _ _) [GOAL] case mpr ι : Type u_1 E : ι → Type u_2 inst✝ : (i : ι) → MetricSpace (E i) s : Set ((i : ι) × E i) ⊢ (∀ (x : (i : ι) × E i), x ∈ s → ∃ ε, ε > 0 ∧ ∀ (y : (i : ι) × E i), dist x y < ε → y ∈ s) → IsOpen s [PROOFSTEP] refine fun H => isOpen_sigma_iff.2 fun i => Metric.isOpen_iff.2 fun x hx => ?_ [GOAL] case mpr ι : Type u_1 E : ι → Type u_2 inst✝ : (i : ι) → MetricSpace (E i) s : Set ((i : ι) × E i) H : ∀ (x : (i : ι) × E i), x ∈ s → ∃ ε, ε > 0 ∧ ∀ (y : (i : ι) × E i), dist x y < ε → y ∈ s i : ι x : E i hx : x ∈ Sigma.mk i ⁻¹' s ⊢ ∃ ε, ε > 0 ∧ ball x ε ⊆ Sigma.mk i ⁻¹' s [PROOFSTEP] obtain ⟨ε, εpos, hε⟩ : ∃ ε > 0, ∀ y, dist (⟨i, x⟩ : Σ j, E j) y < ε → y ∈ s := H ⟨i, x⟩ hx [GOAL] case mpr.intro.intro ι : Type u_1 E : ι → Type u_2 inst✝ : (i : ι) → MetricSpace (E i) s : Set ((i : ι) × E i) H : ∀ (x : (i : ι) × E i), x ∈ s → ∃ ε, ε > 0 ∧ ∀ (y : (i : ι) × E i), dist x y < ε → y ∈ s i : ι x : E i hx : x ∈ Sigma.mk i ⁻¹' s ε : ℝ εpos : ε > 0 hε : ∀ (y : (j : ι) × E j), dist { fst := i, snd := x } y < ε → y ∈ s ⊢ ∃ ε, ε > 0 ∧ ball x ε ⊆ Sigma.mk i ⁻¹' s [PROOFSTEP] refine' ⟨ε, εpos, fun y hy => _⟩ [GOAL] case mpr.intro.intro ι : Type u_1 E : ι → Type u_2 inst✝ : (i : ι) → MetricSpace (E i) s : Set ((i : ι) × E i) H : ∀ (x : (i : ι) × E i), x ∈ s → ∃ ε, ε > 0 ∧ ∀ (y : (i : ι) × E i), dist x y < ε → y ∈ s i : ι x : E i hx : x ∈ Sigma.mk i ⁻¹' s ε : ℝ εpos : ε > 0 hε : ∀ (y : (j : ι) × E j), dist { fst := i, snd := x } y < ε → y ∈ s y : E i hy : y ∈ ball x ε ⊢ y ∈ Sigma.mk i ⁻¹' s [PROOFSTEP] apply hε ⟨i, y⟩ [GOAL] case mpr.intro.intro ι : Type u_1 E : ι → Type u_2 inst✝ : (i : ι) → MetricSpace (E i) s : Set ((i : ι) × E i) H : ∀ (x : (i : ι) × E i), x ∈ s → ∃ ε, ε > 0 ∧ ∀ (y : (i : ι) × E i), dist x y < ε → y ∈ s i : ι x : E i hx : x ∈ Sigma.mk i ⁻¹' s ε : ℝ εpos : ε > 0 hε : ∀ (y : (j : ι) × E j), dist { fst := i, snd := x } y < ε → y ∈ s y : E i hy : y ∈ ball x ε ⊢ dist { fst := i, snd := x } { fst := i, snd := y } < ε [PROOFSTEP] rw [Sigma.dist_same] [GOAL] case mpr.intro.intro ι : Type u_1 E : ι → Type u_2 inst✝ : (i : ι) → MetricSpace (E i) s : Set ((i : ι) × E i) H : ∀ (x : (i : ι) × E i), x ∈ s → ∃ ε, ε > 0 ∧ ∀ (y : (i : ι) × E i), dist x y < ε → y ∈ s i : ι x : E i hx : x ∈ Sigma.mk i ⁻¹' s ε : ℝ εpos : ε > 0 hε : ∀ (y : (j : ι) × E j), dist { fst := i, snd := x } y < ε → y ∈ s y : E i hy : y ∈ ball x ε ⊢ dist x y < ε [PROOFSTEP] exact mem_ball'.1 hy [GOAL] ι : Type u_1 E : ι → Type u_2 inst✝ : (i : ι) → MetricSpace (E i) ⊢ MetricSpace ((i : ι) × E i) [PROOFSTEP] refine' MetricSpace.ofDistTopology Sigma.dist _ _ Sigma.dist_triangle Sigma.isOpen_iff _ [GOAL] case refine'_1 ι : Type u_1 E : ι → Type u_2 inst✝ : (i : ι) → MetricSpace (E i) ⊢ ∀ (x : (i : ι) × E i), Sigma.dist x x = 0 [PROOFSTEP] rintro ⟨i, x⟩ [GOAL] case refine'_1.mk ι : Type u_1 E : ι → Type u_2 inst✝ : (i : ι) → MetricSpace (E i) i : ι x : E i ⊢ Sigma.dist { fst := i, snd := x } { fst := i, snd := x } = 0 [PROOFSTEP] simp [Sigma.dist] [GOAL] case refine'_2 ι : Type u_1 E : ι → Type u_2 inst✝ : (i : ι) → MetricSpace (E i) ⊢ ∀ (x y : (i : ι) × E i), Sigma.dist x y = Sigma.dist y x [PROOFSTEP] rintro ⟨i, x⟩ ⟨j, y⟩ [GOAL] case refine'_2.mk.mk ι : Type u_1 E : ι → Type u_2 inst✝ : (i : ι) → MetricSpace (E i) i : ι x : E i j : ι y : E j ⊢ Sigma.dist { fst := i, snd := x } { fst := j, snd := y } = Sigma.dist { fst := j, snd := y } { fst := i, snd := x } [PROOFSTEP] rcases eq_or_ne i j with (rfl | h) [GOAL] case refine'_2.mk.mk.inl ι : Type u_1 E : ι → Type u_2 inst✝ : (i : ι) → MetricSpace (E i) i : ι x y : E i ⊢ Sigma.dist { fst := i, snd := x } { fst := i, snd := y } = Sigma.dist { fst := i, snd := y } { fst := i, snd := x } [PROOFSTEP] simp [Sigma.dist, dist_comm] [GOAL] case refine'_2.mk.mk.inr ι : Type u_1 E : ι → Type u_2 inst✝ : (i : ι) → MetricSpace (E i) i : ι x : E i j : ι y : E j h : i ≠ j ⊢ Sigma.dist { fst := i, snd := x } { fst := j, snd := y } = Sigma.dist { fst := j, snd := y } { fst := i, snd := x } [PROOFSTEP] simp only [Sigma.dist, dist_comm, h, h.symm, not_false_iff, dif_neg] [GOAL] case refine'_2.mk.mk.inr ι : Type u_1 E : ι → Type u_2 inst✝ : (i : ι) → MetricSpace (E i) i : ι x : E i j : ι y : E j h : i ≠ j ⊢ dist x (Nonempty.some (_ : Nonempty (E i))) + 1 + dist y (Nonempty.some (_ : Nonempty (E j))) = dist y (Nonempty.some (_ : Nonempty (E j))) + 1 + dist x (Nonempty.some (_ : Nonempty (E i))) [PROOFSTEP] abel [GOAL] case refine'_2.mk.mk.inr ι : Type u_1 E : ι → Type u_2 inst✝ : (i : ι) → MetricSpace (E i) i : ι x : E i j : ι y : E j h : i ≠ j ⊢ dist x (Nonempty.some (_ : Nonempty (E i))) + 1 + dist y (Nonempty.some (_ : Nonempty (E j))) = dist y (Nonempty.some (_ : Nonempty (E j))) + 1 + dist x (Nonempty.some (_ : Nonempty (E i))) [PROOFSTEP] abel [GOAL] case refine'_3 ι : Type u_1 E : ι → Type u_2 inst✝ : (i : ι) → MetricSpace (E i) ⊢ ∀ (x y : (i : ι) × E i), Sigma.dist x y = 0 → x = y [PROOFSTEP] rintro ⟨i, x⟩ ⟨j, y⟩ [GOAL] case refine'_3.mk.mk ι : Type u_1 E : ι → Type u_2 inst✝ : (i : ι) → MetricSpace (E i) i : ι x : E i j : ι y : E j ⊢ Sigma.dist { fst := i, snd := x } { fst := j, snd := y } = 0 → { fst := i, snd := x } = { fst := j, snd := y } [PROOFSTEP] rcases eq_or_ne i j with (rfl | hij) [GOAL] case refine'_3.mk.mk.inl ι : Type u_1 E : ι → Type u_2 inst✝ : (i : ι) → MetricSpace (E i) i : ι x y : E i ⊢ Sigma.dist { fst := i, snd := x } { fst := i, snd := y } = 0 → { fst := i, snd := x } = { fst := i, snd := y } [PROOFSTEP] simp [Sigma.dist] [GOAL] case refine'_3.mk.mk.inr ι : Type u_1 E : ι → Type u_2 inst✝ : (i : ι) → MetricSpace (E i) i : ι x : E i j : ι y : E j hij : i ≠ j ⊢ Sigma.dist { fst := i, snd := x } { fst := j, snd := y } = 0 → { fst := i, snd := x } = { fst := j, snd := y } [PROOFSTEP] intro h [GOAL] case refine'_3.mk.mk.inr ι : Type u_1 E : ι → Type u_2 inst✝ : (i : ι) → MetricSpace (E i) i : ι x : E i j : ι y : E j hij : i ≠ j h : Sigma.dist { fst := i, snd := x } { fst := j, snd := y } = 0 ⊢ { fst := i, snd := x } = { fst := j, snd := y } [PROOFSTEP] apply (lt_irrefl (1 : ℝ) _).elim [GOAL] ι : Type u_1 E : ι → Type u_2 inst✝ : (i : ι) → MetricSpace (E i) i : ι x : E i j : ι y : E j hij : i ≠ j h : Sigma.dist { fst := i, snd := x } { fst := j, snd := y } = 0 ⊢ 1 < 1 [PROOFSTEP] calc 1 ≤ Sigma.dist (⟨i, x⟩ : Σ k, E k) ⟨j, y⟩ := Sigma.one_le_dist_of_ne hij _ _ _ < 1 := by rw [h]; exact zero_lt_one [GOAL] ι : Type u_1 E : ι → Type u_2 inst✝ : (i : ι) → MetricSpace (E i) i : ι x : E i j : ι y : E j hij : i ≠ j h : Sigma.dist { fst := i, snd := x } { fst := j, snd := y } = 0 ⊢ Sigma.dist { fst := i, snd := x } { fst := j, snd := y } < 1 [PROOFSTEP] rw [h] [GOAL] ι : Type u_1 E : ι → Type u_2 inst✝ : (i : ι) → MetricSpace (E i) i : ι x : E i j : ι y : E j hij : i ≠ j h : Sigma.dist { fst := i, snd := x } { fst := j, snd := y } = 0 ⊢ 0 < 1 [PROOFSTEP] exact zero_lt_one [GOAL] ι : Type u_1 E : ι → Type u_2 inst✝ : (i : ι) → MetricSpace (E i) i : ι x y : E i ⊢ dist { fst := i, snd := x } { fst := i, snd := y } = dist x y [PROOFSTEP] simp [GOAL] ι : Type u_1 E : ι → Type u_2 inst✝¹ : (i : ι) → MetricSpace (E i) inst✝ : ∀ (i : ι), CompleteSpace (E i) ⊢ CompleteSpace ((i : ι) × E i) [PROOFSTEP] set s : ι → Set (Σ i, E i) := fun i => Sigma.fst ⁻¹' { i } [GOAL] ι : Type u_1 E : ι → Type u_2 inst✝¹ : (i : ι) → MetricSpace (E i) inst✝ : ∀ (i : ι), CompleteSpace (E i) s : ι → Set ((i : ι) × E i) := fun i => Sigma.fst ⁻¹' {i} ⊢ CompleteSpace ((i : ι) × E i) [PROOFSTEP] set U := {p : (Σ k, E k) × Σ k, E k | dist p.1 p.2 < 1} [GOAL] ι : Type u_1 E : ι → Type u_2 inst✝¹ : (i : ι) → MetricSpace (E i) inst✝ : ∀ (i : ι), CompleteSpace (E i) s : ι → Set ((i : ι) × E i) := fun i => Sigma.fst ⁻¹' {i} U : Set (((k : ι) × E k) × (k : ι) × E k) := {p | dist p.fst p.snd < 1} ⊢ CompleteSpace ((i : ι) × E i) [PROOFSTEP] have hc : ∀ i, IsComplete (s i) := fun i => by simp only [← range_sigmaMk] exact (isometry_mk i).uniformInducing.isComplete_range [GOAL] ι : Type u_1 E : ι → Type u_2 inst✝¹ : (i : ι) → MetricSpace (E i) inst✝ : ∀ (i : ι), CompleteSpace (E i) s : ι → Set ((i : ι) × E i) := fun i => Sigma.fst ⁻¹' {i} U : Set (((k : ι) × E k) × (k : ι) × E k) := {p | dist p.fst p.snd < 1} i : ι ⊢ IsComplete (s i) [PROOFSTEP] simp only [← range_sigmaMk] [GOAL] ι : Type u_1 E : ι → Type u_2 inst✝¹ : (i : ι) → MetricSpace (E i) inst✝ : ∀ (i : ι), CompleteSpace (E i) s : ι → Set ((i : ι) × E i) := fun i => Sigma.fst ⁻¹' {i} U : Set (((k : ι) × E k) × (k : ι) × E k) := {p | dist p.fst p.snd < 1} i : ι ⊢ IsComplete (range (Sigma.mk i)) [PROOFSTEP] exact (isometry_mk i).uniformInducing.isComplete_range [GOAL] ι : Type u_1 E : ι → Type u_2 inst✝¹ : (i : ι) → MetricSpace (E i) inst✝ : ∀ (i : ι), CompleteSpace (E i) s : ι → Set ((i : ι) × E i) := fun i => Sigma.fst ⁻¹' {i} U : Set (((k : ι) × E k) × (k : ι) × E k) := {p | dist p.fst p.snd < 1} hc : ∀ (i : ι), IsComplete (s i) ⊢ CompleteSpace ((i : ι) × E i) [PROOFSTEP] have hd : ∀ (i j), ∀ x ∈ s i, ∀ y ∈ s j, (x, y) ∈ U → i = j := fun i j x hx y hy hxy => (Eq.symm hx).trans ((fst_eq_of_dist_lt_one _ _ hxy).trans hy) [GOAL] ι : Type u_1 E : ι → Type u_2 inst✝¹ : (i : ι) → MetricSpace (E i) inst✝ : ∀ (i : ι), CompleteSpace (E i) s : ι → Set ((i : ι) × E i) := fun i => Sigma.fst ⁻¹' {i} U : Set (((k : ι) × E k) × (k : ι) × E k) := {p | dist p.fst p.snd < 1} hc : ∀ (i : ι), IsComplete (s i) hd : ∀ (i j : ι) (x : (i : ι) × E i), x ∈ s i → ∀ (y : (i : ι) × E i), y ∈ s j → (x, y) ∈ U → i = j ⊢ CompleteSpace ((i : ι) × E i) [PROOFSTEP] refine' completeSpace_of_isComplete_univ _ [GOAL] ι : Type u_1 E : ι → Type u_2 inst✝¹ : (i : ι) → MetricSpace (E i) inst✝ : ∀ (i : ι), CompleteSpace (E i) s : ι → Set ((i : ι) × E i) := fun i => Sigma.fst ⁻¹' {i} U : Set (((k : ι) × E k) × (k : ι) × E k) := {p | dist p.fst p.snd < 1} hc : ∀ (i : ι), IsComplete (s i) hd : ∀ (i j : ι) (x : (i : ι) × E i), x ∈ s i → ∀ (y : (i : ι) × E i), y ∈ s j → (x, y) ∈ U → i = j ⊢ IsComplete univ [PROOFSTEP] convert isComplete_iUnion_separated hc (dist_mem_uniformity zero_lt_one) hd [GOAL] case h.e'_3 ι : Type u_1 E : ι → Type u_2 inst✝¹ : (i : ι) → MetricSpace (E i) inst✝ : ∀ (i : ι), CompleteSpace (E i) s : ι → Set ((i : ι) × E i) := fun i => Sigma.fst ⁻¹' {i} U : Set (((k : ι) × E k) × (k : ι) × E k) := {p | dist p.fst p.snd < 1} hc : ∀ (i : ι), IsComplete (s i) hd : ∀ (i j : ι) (x : (i : ι) × E i), x ∈ s i → ∀ (y : (i : ι) × E i), y ∈ s j → (x, y) ∈ U → i = j ⊢ univ = ⋃ (i : ι), s i [PROOFSTEP] simp only [← preimage_iUnion, iUnion_of_singleton, preimage_univ] [GOAL] X : Type u Y : Type v Z : Type w inst✝³ : Nonempty Z inst✝² : MetricSpace Z inst✝¹ : MetricSpace X inst✝ : MetricSpace Y Φ : Z → X Ψ : Z → Y ε : ℝ hΦ : Isometry Φ hΨ : Isometry Ψ p q : Z ⊢ |dist (Φ p) (Φ q) - dist (Ψ p) (Ψ q)| ≤ 2 * 0 [PROOFSTEP] rw [hΦ.dist_eq, hΨ.dist_eq] [GOAL] X : Type u Y : Type v Z : Type w inst✝³ : Nonempty Z inst✝² : MetricSpace Z inst✝¹ : MetricSpace X inst✝ : MetricSpace Y Φ : Z → X Ψ : Z → Y ε : ℝ hΦ : Isometry Φ hΨ : Isometry Ψ p q : Z ⊢ |dist p q - dist p q| ≤ 2 * 0 [PROOFSTEP] simp [GOAL] X : Type u Y : Type v Z : Type w inst✝³ : Nonempty Z inst✝² : MetricSpace Z inst✝¹ : MetricSpace X inst✝ : MetricSpace Y Φ : Z → X Ψ : Z → Y ε : ℝ hΦ : Isometry Φ hΨ : Isometry Ψ x✝¹ x✝ : X ⊕ Y ⊢ (fun x y => ↑{ val := glueDist Φ Ψ 0 x y, property := (_ : 0 ≤ glueDist Φ Ψ 0 x y) }) x✝¹ x✝ = ENNReal.ofReal (dist x✝¹ x✝) [PROOFSTEP] exact ENNReal.coe_nnreal_eq _ [GOAL] X : Type u Y : Type v Z : Type w inst✝³ : Nonempty Z inst✝² : MetricSpace Z inst✝¹ : MetricSpace X inst✝ : MetricSpace Y Φ : Z → X Ψ : Z → Y ε : ℝ hΦ : Isometry Φ hΨ : Isometry Ψ ⊢ toGlueL hΦ hΨ ∘ Φ = toGlueR hΦ hΨ ∘ Ψ [PROOFSTEP] let i : PseudoMetricSpace (X ⊕ Y) := gluePremetric hΦ hΨ [GOAL] X : Type u Y : Type v Z : Type w inst✝³ : Nonempty Z inst✝² : MetricSpace Z inst✝¹ : MetricSpace X inst✝ : MetricSpace Y Φ : Z → X Ψ : Z → Y ε : ℝ hΦ : Isometry Φ hΨ : Isometry Ψ i : PseudoMetricSpace (X ⊕ Y) := gluePremetric hΦ hΨ ⊢ toGlueL hΦ hΨ ∘ Φ = toGlueR hΦ hΨ ∘ Ψ [PROOFSTEP] let _ := i.toUniformSpace [GOAL] X : Type u Y : Type v Z : Type w inst✝³ : Nonempty Z inst✝² : MetricSpace Z inst✝¹ : MetricSpace X inst✝ : MetricSpace Y Φ : Z → X Ψ : Z → Y ε : ℝ hΦ : Isometry Φ hΨ : Isometry Ψ i : PseudoMetricSpace (X ⊕ Y) := gluePremetric hΦ hΨ x✝ : UniformSpace (X ⊕ Y) := PseudoMetricSpace.toUniformSpace ⊢ toGlueL hΦ hΨ ∘ Φ = toGlueR hΦ hΨ ∘ Ψ [PROOFSTEP] funext [GOAL] case h X : Type u Y : Type v Z : Type w inst✝³ : Nonempty Z inst✝² : MetricSpace Z inst✝¹ : MetricSpace X inst✝ : MetricSpace Y Φ : Z → X Ψ : Z → Y ε : ℝ hΦ : Isometry Φ hΨ : Isometry Ψ i : PseudoMetricSpace (X ⊕ Y) := gluePremetric hΦ hΨ x✝¹ : UniformSpace (X ⊕ Y) := PseudoMetricSpace.toUniformSpace x✝ : Z ⊢ (toGlueL hΦ hΨ ∘ Φ) x✝ = (toGlueR hΦ hΨ ∘ Ψ) x✝ [PROOFSTEP] simp only [comp, toGlueL, toGlueR] [GOAL] case h X : Type u Y : Type v Z : Type w inst✝³ : Nonempty Z inst✝² : MetricSpace Z inst✝¹ : MetricSpace X inst✝ : MetricSpace Y Φ : Z → X Ψ : Z → Y ε : ℝ hΦ : Isometry Φ hΨ : Isometry Ψ i : PseudoMetricSpace (X ⊕ Y) := gluePremetric hΦ hΨ x✝¹ : UniformSpace (X ⊕ Y) := PseudoMetricSpace.toUniformSpace x✝ : Z ⊢ Quotient.mk'' (Sum.inl (Φ x✝)) = Quotient.mk'' (Sum.inr (Ψ x✝)) [PROOFSTEP] refine' UniformSpace.SeparationQuotient.mk_eq_mk.2 (Metric.inseparable_iff.2 _) [GOAL] case h X : Type u Y : Type v Z : Type w inst✝³ : Nonempty Z inst✝² : MetricSpace Z inst✝¹ : MetricSpace X inst✝ : MetricSpace Y Φ : Z → X Ψ : Z → Y ε : ℝ hΦ : Isometry Φ hΨ : Isometry Ψ i : PseudoMetricSpace (X ⊕ Y) := gluePremetric hΦ hΨ x✝¹ : UniformSpace (X ⊕ Y) := PseudoMetricSpace.toUniformSpace x✝ : Z ⊢ dist (Sum.inl (Φ x✝)) (Sum.inr (Ψ x✝)) = 0 [PROOFSTEP] exact glueDist_glued_points Φ Ψ 0 _ [GOAL] X : ℕ → Type u inst✝ : (n : ℕ) → MetricSpace (X n) f : (n : ℕ) → X n → X (n + 1) I : ∀ (n : ℕ), Isometry (f n) x y : (n : ℕ) × X n hx : x.fst ≤ 0 hy : y.fst ≤ 0 ⊢ inductiveLimitDist f x y = dist (leRecOn hx (fun {k} => f k) x.snd) (leRecOn hy (fun {k} => f k) y.snd) [PROOFSTEP] cases' x with i x [GOAL] case mk X : ℕ → Type u inst✝ : (n : ℕ) → MetricSpace (X n) f : (n : ℕ) → X n → X (n + 1) I : ∀ (n : ℕ), Isometry (f n) y : (n : ℕ) × X n hy : y.fst ≤ 0 i : ℕ x : X i hx : { fst := i, snd := x }.fst ≤ 0 ⊢ inductiveLimitDist f { fst := i, snd := x } y = dist (leRecOn hx (fun {k} => f k) { fst := i, snd := x }.snd) (leRecOn hy (fun {k} => f k) y.snd) [PROOFSTEP] cases' y with j y [GOAL] case mk.mk X : ℕ → Type u inst✝ : (n : ℕ) → MetricSpace (X n) f : (n : ℕ) → X n → X (n + 1) I : ∀ (n : ℕ), Isometry (f n) i : ℕ x : X i hx : { fst := i, snd := x }.fst ≤ 0 j : ℕ y : X j hy : { fst := j, snd := y }.fst ≤ 0 ⊢ inductiveLimitDist f { fst := i, snd := x } { fst := j, snd := y } = dist (leRecOn hx (fun {k} => f k) { fst := i, snd := x }.snd) (leRecOn hy (fun {k} => f k) { fst := j, snd := y }.snd) [PROOFSTEP] obtain rfl : i = 0 := nonpos_iff_eq_zero.1 hx [GOAL] case mk.mk X : ℕ → Type u inst✝ : (n : ℕ) → MetricSpace (X n) f : (n : ℕ) → X n → X (n + 1) I : ∀ (n : ℕ), Isometry (f n) j : ℕ y : X j hy : { fst := j, snd := y }.fst ≤ 0 x : X 0 hx : { fst := 0, snd := x }.fst ≤ 0 ⊢ inductiveLimitDist f { fst := 0, snd := x } { fst := j, snd := y } = dist (leRecOn hx (fun {k} => f k) { fst := 0, snd := x }.snd) (leRecOn hy (fun {k} => f k) { fst := j, snd := y }.snd) [PROOFSTEP] obtain rfl : j = 0 := nonpos_iff_eq_zero.1 hy [GOAL] case mk.mk X : ℕ → Type u inst✝ : (n : ℕ) → MetricSpace (X n) f : (n : ℕ) → X n → X (n + 1) I : ∀ (n : ℕ), Isometry (f n) x : X 0 hx : { fst := 0, snd := x }.fst ≤ 0 y : X 0 hy : { fst := 0, snd := y }.fst ≤ 0 ⊢ inductiveLimitDist f { fst := 0, snd := x } { fst := 0, snd := y } = dist (leRecOn hx (fun {k} => f k) { fst := 0, snd := x }.snd) (leRecOn hy (fun {k} => f k) { fst := 0, snd := y }.snd) [PROOFSTEP] rfl [GOAL] X : ℕ → Type u inst✝ : (n : ℕ) → MetricSpace (X n) f : (n : ℕ) → X n → X (n + 1) I : ∀ (n : ℕ), Isometry (f n) x y : (n : ℕ) × X n m : ℕ hx : x.fst ≤ m + 1 hy : y.fst ≤ m + 1 ⊢ inductiveLimitDist f x y = dist (leRecOn hx (fun {k} => f k) x.snd) (leRecOn hy (fun {k} => f k) y.snd) [PROOFSTEP] by_cases h : max x.1 y.1 = (m + 1) [GOAL] case pos X : ℕ → Type u inst✝ : (n : ℕ) → MetricSpace (X n) f : (n : ℕ) → X n → X (n + 1) I : ∀ (n : ℕ), Isometry (f n) x y : (n : ℕ) × X n m : ℕ hx : x.fst ≤ m + 1 hy : y.fst ≤ m + 1 h : max x.fst y.fst = m + 1 ⊢ inductiveLimitDist f x y = dist (leRecOn hx (fun {k} => f k) x.snd) (leRecOn hy (fun {k} => f k) y.snd) [PROOFSTEP] generalize m + 1 = m' at * [GOAL] case pos X : ℕ → Type u inst✝ : (n : ℕ) → MetricSpace (X n) f : (n : ℕ) → X n → X (n + 1) I : ∀ (n : ℕ), Isometry (f n) x y : (n : ℕ) × X n m m' : ℕ hx : x.fst ≤ m' hy : y.fst ≤ m' h : max x.fst y.fst = m' ⊢ inductiveLimitDist f x y = dist (leRecOn hx (fun {k} => f k) x.snd) (leRecOn hy (fun {k} => f k) y.snd) [PROOFSTEP] subst m' [GOAL] case pos X : ℕ → Type u inst✝ : (n : ℕ) → MetricSpace (X n) f : (n : ℕ) → X n → X (n + 1) I : ∀ (n : ℕ), Isometry (f n) x y : (n : ℕ) × X n m : ℕ hx : x.fst ≤ max x.fst y.fst hy : y.fst ≤ max x.fst y.fst ⊢ inductiveLimitDist f x y = dist (leRecOn hx (fun {k} => f k) x.snd) (leRecOn hy (fun {k} => f k) y.snd) [PROOFSTEP] rfl [GOAL] case neg X : ℕ → Type u inst✝ : (n : ℕ) → MetricSpace (X n) f : (n : ℕ) → X n → X (n + 1) I : ∀ (n : ℕ), Isometry (f n) x y : (n : ℕ) × X n m : ℕ hx : x.fst ≤ m + 1 hy : y.fst ≤ m + 1 h : ¬max x.fst y.fst = m + 1 ⊢ inductiveLimitDist f x y = dist (leRecOn hx (fun {k} => f k) x.snd) (leRecOn hy (fun {k} => f k) y.snd) [PROOFSTEP] have : max x.1 y.1 ≤ succ m := by simp [hx, hy] [GOAL] X : ℕ → Type u inst✝ : (n : ℕ) → MetricSpace (X n) f : (n : ℕ) → X n → X (n + 1) I : ∀ (n : ℕ), Isometry (f n) x y : (n : ℕ) × X n m : ℕ hx : x.fst ≤ m + 1 hy : y.fst ≤ m + 1 h : ¬max x.fst y.fst = m + 1 ⊢ max x.fst y.fst ≤ succ m [PROOFSTEP] simp [hx, hy] [GOAL] case neg X : ℕ → Type u inst✝ : (n : ℕ) → MetricSpace (X n) f : (n : ℕ) → X n → X (n + 1) I : ∀ (n : ℕ), Isometry (f n) x y : (n : ℕ) × X n m : ℕ hx : x.fst ≤ m + 1 hy : y.fst ≤ m + 1 h : ¬max x.fst y.fst = m + 1 this : max x.fst y.fst ≤ succ m ⊢ inductiveLimitDist f x y = dist (leRecOn hx (fun {k} => f k) x.snd) (leRecOn hy (fun {k} => f k) y.snd) [PROOFSTEP] have : max x.1 y.1 ≤ m := by simpa [h] using of_le_succ this [GOAL] X : ℕ → Type u inst✝ : (n : ℕ) → MetricSpace (X n) f : (n : ℕ) → X n → X (n + 1) I : ∀ (n : ℕ), Isometry (f n) x y : (n : ℕ) × X n m : ℕ hx : x.fst ≤ m + 1 hy : y.fst ≤ m + 1 h : ¬max x.fst y.fst = m + 1 this : max x.fst y.fst ≤ succ m ⊢ max x.fst y.fst ≤ m [PROOFSTEP] simpa [h] using of_le_succ this [GOAL] case neg X : ℕ → Type u inst✝ : (n : ℕ) → MetricSpace (X n) f : (n : ℕ) → X n → X (n + 1) I : ∀ (n : ℕ), Isometry (f n) x y : (n : ℕ) × X n m : ℕ hx : x.fst ≤ m + 1 hy : y.fst ≤ m + 1 h : ¬max x.fst y.fst = m + 1 this✝ : max x.fst y.fst ≤ succ m this : max x.fst y.fst ≤ m ⊢ inductiveLimitDist f x y = dist (leRecOn hx (fun {k} => f k) x.snd) (leRecOn hy (fun {k} => f k) y.snd) [PROOFSTEP] have xm : x.1 ≤ m := le_trans (le_max_left _ _) this [GOAL] case neg X : ℕ → Type u inst✝ : (n : ℕ) → MetricSpace (X n) f : (n : ℕ) → X n → X (n + 1) I : ∀ (n : ℕ), Isometry (f n) x y : (n : ℕ) × X n m : ℕ hx : x.fst ≤ m + 1 hy : y.fst ≤ m + 1 h : ¬max x.fst y.fst = m + 1 this✝ : max x.fst y.fst ≤ succ m this : max x.fst y.fst ≤ m xm : x.fst ≤ m ⊢ inductiveLimitDist f x y = dist (leRecOn hx (fun {k} => f k) x.snd) (leRecOn hy (fun {k} => f k) y.snd) [PROOFSTEP] have ym : y.1 ≤ m := le_trans (le_max_right _ _) this [GOAL] case neg X : ℕ → Type u inst✝ : (n : ℕ) → MetricSpace (X n) f : (n : ℕ) → X n → X (n + 1) I : ∀ (n : ℕ), Isometry (f n) x y : (n : ℕ) × X n m : ℕ hx : x.fst ≤ m + 1 hy : y.fst ≤ m + 1 h : ¬max x.fst y.fst = m + 1 this✝ : max x.fst y.fst ≤ succ m this : max x.fst y.fst ≤ m xm : x.fst ≤ m ym : y.fst ≤ m ⊢ inductiveLimitDist f x y = dist (leRecOn hx (fun {k} => f k) x.snd) (leRecOn hy (fun {k} => f k) y.snd) [PROOFSTEP] rw [leRecOn_succ xm, leRecOn_succ ym, (I m).dist_eq] [GOAL] case neg X : ℕ → Type u inst✝ : (n : ℕ) → MetricSpace (X n) f : (n : ℕ) → X n → X (n + 1) I : ∀ (n : ℕ), Isometry (f n) x y : (n : ℕ) × X n m : ℕ hx : x.fst ≤ m + 1 hy : y.fst ≤ m + 1 h : ¬max x.fst y.fst = m + 1 this✝ : max x.fst y.fst ≤ succ m this : max x.fst y.fst ≤ m xm : x.fst ≤ m ym : y.fst ≤ m ⊢ inductiveLimitDist f x y = dist (leRecOn xm (fun {k} => f k) x.snd) (leRecOn ym (fun {k} => f k) y.snd) [PROOFSTEP] exact inductiveLimitDist_eq_dist I x y m xm ym [GOAL] X : ℕ → Type u inst✝ : (n : ℕ) → MetricSpace (X n) f : (n : ℕ) → X n → X (n + 1) I : ∀ (n : ℕ), Isometry (f n) x : (n : ℕ) × X n ⊢ dist x x = 0 [PROOFSTEP] simp [dist, inductiveLimitDist] [GOAL] X : ℕ → Type u inst✝ : (n : ℕ) → MetricSpace (X n) f : (n : ℕ) → X n → X (n + 1) I : ∀ (n : ℕ), Isometry (f n) x y : (n : ℕ) × X n ⊢ dist x y = dist y x [PROOFSTEP] let m := max x.1 y.1 [GOAL] X : ℕ → Type u inst✝ : (n : ℕ) → MetricSpace (X n) f : (n : ℕ) → X n → X (n + 1) I : ∀ (n : ℕ), Isometry (f n) x y : (n : ℕ) × X n m : ℕ := max x.fst y.fst ⊢ dist x y = dist y x [PROOFSTEP] have hx : x.1 ≤ m := le_max_left _ _ [GOAL] X : ℕ → Type u inst✝ : (n : ℕ) → MetricSpace (X n) f : (n : ℕ) → X n → X (n + 1) I : ∀ (n : ℕ), Isometry (f n) x y : (n : ℕ) × X n m : ℕ := max x.fst y.fst hx : x.fst ≤ m ⊢ dist x y = dist y x [PROOFSTEP] have hy : y.1 ≤ m := le_max_right _ _ [GOAL] X : ℕ → Type u inst✝ : (n : ℕ) → MetricSpace (X n) f : (n : ℕ) → X n → X (n + 1) I : ∀ (n : ℕ), Isometry (f n) x y : (n : ℕ) × X n m : ℕ := max x.fst y.fst hx : x.fst ≤ m hy : y.fst ≤ m ⊢ dist x y = dist y x [PROOFSTEP] unfold dist [GOAL] X : ℕ → Type u inst✝ : (n : ℕ) → MetricSpace (X n) f : (n : ℕ) → X n → X (n + 1) I : ∀ (n : ℕ), Isometry (f n) x y : (n : ℕ) × X n m : ℕ := max x.fst y.fst hx : x.fst ≤ m hy : y.fst ≤ m ⊢ { dist := inductiveLimitDist f }.1 x y = { dist := inductiveLimitDist f }.1 y x [PROOFSTEP] simp only [GOAL] X : ℕ → Type u inst✝ : (n : ℕ) → MetricSpace (X n) f : (n : ℕ) → X n → X (n + 1) I : ∀ (n : ℕ), Isometry (f n) x y : (n : ℕ) × X n m : ℕ := max x.fst y.fst hx : x.fst ≤ m hy : y.fst ≤ m ⊢ inductiveLimitDist f x y = inductiveLimitDist f y x [PROOFSTEP] rw [inductiveLimitDist_eq_dist I x y m hx hy, inductiveLimitDist_eq_dist I y x m hy hx, dist_comm] [GOAL] X : ℕ → Type u inst✝ : (n : ℕ) → MetricSpace (X n) f : (n : ℕ) → X n → X (n + 1) I : ∀ (n : ℕ), Isometry (f n) x y z : (n : ℕ) × X n ⊢ dist x z ≤ dist x y + dist y z [PROOFSTEP] let m := max (max x.1 y.1) z.1 [GOAL] X : ℕ → Type u inst✝ : (n : ℕ) → MetricSpace (X n) f : (n : ℕ) → X n → X (n + 1) I : ∀ (n : ℕ), Isometry (f n) x y z : (n : ℕ) × X n m : ℕ := max (max x.fst y.fst) z.fst ⊢ dist x z ≤ dist x y + dist y z [PROOFSTEP] have hx : x.1 ≤ m := le_trans (le_max_left _ _) (le_max_left _ _) [GOAL] X : ℕ → Type u inst✝ : (n : ℕ) → MetricSpace (X n) f : (n : ℕ) → X n → X (n + 1) I : ∀ (n : ℕ), Isometry (f n) x y z : (n : ℕ) × X n m : ℕ := max (max x.fst y.fst) z.fst hx : x.fst ≤ m ⊢ dist x z ≤ dist x y + dist y z [PROOFSTEP] have hy : y.1 ≤ m := le_trans (le_max_right _ _) (le_max_left _ _) [GOAL] X : ℕ → Type u inst✝ : (n : ℕ) → MetricSpace (X n) f : (n : ℕ) → X n → X (n + 1) I : ∀ (n : ℕ), Isometry (f n) x y z : (n : ℕ) × X n m : ℕ := max (max x.fst y.fst) z.fst hx : x.fst ≤ m hy : y.fst ≤ m ⊢ dist x z ≤ dist x y + dist y z [PROOFSTEP] have hz : z.1 ≤ m := le_max_right _ _ [GOAL] X : ℕ → Type u inst✝ : (n : ℕ) → MetricSpace (X n) f : (n : ℕ) → X n → X (n + 1) I : ∀ (n : ℕ), Isometry (f n) x y z : (n : ℕ) × X n m : ℕ := max (max x.fst y.fst) z.fst hx : x.fst ≤ m hy : y.fst ≤ m hz : z.fst ≤ m ⊢ dist x z ≤ dist x y + dist y z [PROOFSTEP] calc inductiveLimitDist f x z = dist (leRecOn hx (f _) x.2 : X m) (leRecOn hz (f _) z.2 : X m) := inductiveLimitDist_eq_dist I x z m hx hz _ ≤ dist (leRecOn hx (f _) x.2 : X m) (leRecOn hy (f _) y.2 : X m) + dist (leRecOn hy (f _) y.2 : X m) (leRecOn hz (f _) z.2 : X m) := (dist_triangle _ _ _) _ = inductiveLimitDist f x y + inductiveLimitDist f y z := by rw [inductiveLimitDist_eq_dist I x y m hx hy, inductiveLimitDist_eq_dist I y z m hy hz] [GOAL] X : ℕ → Type u inst✝ : (n : ℕ) → MetricSpace (X n) f : (n : ℕ) → X n → X (n + 1) I : ∀ (n : ℕ), Isometry (f n) x y z : (n : ℕ) × X n m : ℕ := max (max x.fst y.fst) z.fst hx : x.fst ≤ m hy : y.fst ≤ m hz : z.fst ≤ m ⊢ dist (leRecOn hx (fun {k} => f k) x.snd) (leRecOn hy (fun {k} => f k) y.snd) + dist (leRecOn hy (fun {k} => f k) y.snd) (leRecOn hz (fun {k} => f k) z.snd) = inductiveLimitDist f x y + inductiveLimitDist f y z [PROOFSTEP] rw [inductiveLimitDist_eq_dist I x y m hx hy, inductiveLimitDist_eq_dist I y z m hy hz] [GOAL] X : ℕ → Type u inst✝ : (n : ℕ) → MetricSpace (X n) f : (n : ℕ) → X n → X (n + 1) I : ∀ (n : ℕ), Isometry (f n) x✝¹ x✝ : (n : ℕ) × X n ⊢ (fun x y => ↑{ val := inductiveLimitDist f x y, property := (_ : 0 ≤ inductiveLimitDist f x y) }) x✝¹ x✝ = ENNReal.ofReal (dist x✝¹ x✝) [PROOFSTEP] exact ENNReal.coe_nnreal_eq _ [GOAL] X : ℕ → Type u inst✝ : (n : ℕ) → MetricSpace (X n) f : (n : ℕ) → X n → X (n + 1) I : ∀ (n : ℕ), Isometry (f n) n : ℕ x y : X n ⊢ dist (toInductiveLimit I n x) (toInductiveLimit I n y) = dist x y [PROOFSTEP] change inductiveLimitDist f ⟨n, x⟩ ⟨n, y⟩ = dist x y [GOAL] X : ℕ → Type u inst✝ : (n : ℕ) → MetricSpace (X n) f : (n : ℕ) → X n → X (n + 1) I : ∀ (n : ℕ), Isometry (f n) n : ℕ x y : X n ⊢ inductiveLimitDist f { fst := n, snd := x } { fst := n, snd := y } = dist x y [PROOFSTEP] rw [inductiveLimitDist_eq_dist I ⟨n, x⟩ ⟨n, y⟩ n (le_refl n) (le_refl n), leRecOn_self, leRecOn_self] [GOAL] X : ℕ → Type u inst✝ : (n : ℕ) → MetricSpace (X n) f : (n : ℕ) → X n → X (n + 1) I : ∀ (n : ℕ), Isometry (f n) n : ℕ ⊢ toInductiveLimit I (succ n) ∘ f n = toInductiveLimit I n [PROOFSTEP] let _ := inductivePremetric I [GOAL] X : ℕ → Type u inst✝ : (n : ℕ) → MetricSpace (X n) f : (n : ℕ) → X n → X (n + 1) I : ∀ (n : ℕ), Isometry (f n) n : ℕ x✝ : PseudoMetricSpace ((n : ℕ) × X n) := inductivePremetric I ⊢ toInductiveLimit I (succ n) ∘ f n = toInductiveLimit I n [PROOFSTEP] funext x [GOAL] case h X : ℕ → Type u inst✝ : (n : ℕ) → MetricSpace (X n) f : (n : ℕ) → X n → X (n + 1) I : ∀ (n : ℕ), Isometry (f n) n : ℕ x✝ : PseudoMetricSpace ((n : ℕ) × X n) := inductivePremetric I x : X n ⊢ (toInductiveLimit I (succ n) ∘ f n) x = toInductiveLimit I n x [PROOFSTEP] simp only [comp, toInductiveLimit] [GOAL] case h X : ℕ → Type u inst✝ : (n : ℕ) → MetricSpace (X n) f : (n : ℕ) → X n → X (n + 1) I : ∀ (n : ℕ), Isometry (f n) n : ℕ x✝ : PseudoMetricSpace ((n : ℕ) × X n) := inductivePremetric I x : X n ⊢ Quotient.mk'' { fst := succ n, snd := f n x } = Quotient.mk'' { fst := n, snd := x } [PROOFSTEP] refine' UniformSpace.SeparationQuotient.mk_eq_mk.2 (Metric.inseparable_iff.2 _) [GOAL] case h X : ℕ → Type u inst✝ : (n : ℕ) → MetricSpace (X n) f : (n : ℕ) → X n → X (n + 1) I : ∀ (n : ℕ), Isometry (f n) n : ℕ x✝ : PseudoMetricSpace ((n : ℕ) × X n) := inductivePremetric I x : X n ⊢ dist { fst := succ n, snd := f n x } { fst := n, snd := x } = 0 [PROOFSTEP] show inductiveLimitDist f ⟨n.succ, f n x⟩ ⟨n, x⟩ = 0 [GOAL] case h X : ℕ → Type u inst✝ : (n : ℕ) → MetricSpace (X n) f : (n : ℕ) → X n → X (n + 1) I : ∀ (n : ℕ), Isometry (f n) n : ℕ x✝ : PseudoMetricSpace ((n : ℕ) × X n) := inductivePremetric I x : X n ⊢ inductiveLimitDist f { fst := succ n, snd := f n x } { fst := n, snd := x } = 0 [PROOFSTEP] rw [inductiveLimitDist_eq_dist I ⟨n.succ, f n x⟩ ⟨n, x⟩ n.succ, leRecOn_self, leRecOn_succ, leRecOn_self, dist_self] [GOAL] X : ℕ → Type u inst✝ : (n : ℕ) → MetricSpace (X n) f : (n : ℕ) → X n → X (n + 1) I : ∀ (n : ℕ), Isometry (f n) n : ℕ x✝ : PseudoMetricSpace ((n : ℕ) × X n) := inductivePremetric I x : X n ⊢ { fst := n, snd := x }.fst ≤ { fst := n, snd := x }.fst case h.h2 X : ℕ → Type u inst✝ : (n : ℕ) → MetricSpace (X n) f : (n : ℕ) → X n → X (n + 1) I : ∀ (n : ℕ), Isometry (f n) n : ℕ x✝ : PseudoMetricSpace ((n : ℕ) × X n) := inductivePremetric I x : X n ⊢ { fst := n, snd := x }.fst ≤ n + 1 X : ℕ → Type u inst✝ : (n : ℕ) → MetricSpace (X n) f : (n : ℕ) → X n → X (n + 1) I : ∀ (n : ℕ), Isometry (f n) n : ℕ x✝ : PseudoMetricSpace ((n : ℕ) × X n) := inductivePremetric I x : X n ⊢ { fst := succ n, snd := f n x }.fst ≤ { fst := succ n, snd := f n x }.fst [PROOFSTEP] exacts [le_rfl, le_succ _, le_rfl]
lemma deformation_retract_of_space: "S \<subseteq> topspace X \<and> (\<exists>r. homotopic_with (\<lambda>x. True) X X id r \<and> retraction_maps X (subtopology X S) r id) \<longleftrightarrow> S retract_of_space X \<and> (\<exists>f. homotopic_with (\<lambda>x. True) X X id f \<and> f ` (topspace X) \<subseteq> S)"
If $S$ is a set of pairwise orthogonal vectors in a Euclidean space, and $x$ is a vector in the span of $S$, then $x$ is orthogonal to $a - \sum_{b \in S} \frac{b \cdot a}{b \cdot b} b$.
lemma closure_approachableD: assumes "x \<in> closure S" "e>0" shows "\<exists>y\<in>S. dist x y < e"
lemma starlike_imp_connected: fixes S :: "'a::real_normed_vector set" shows "starlike S \<Longrightarrow> connected S"
struct Svmda fm lev::AbstractVector ni::AbstractVector end """ svmda(X, y; kern = "rbf", gamma = 1. / size(X, 2), degree = 3, coef0 = 0., cost = 1.) Support vector machine for discrimination (C-SVC). * `X` : X-data. * `y : y-data (univariate). * 'kern' : Type of kernel used to compute the Gram matrices. Possible values are "krbf", "kpol", "klin" or "ktanh". * 'gamma' : See below. * 'degree' : See below. * 'coef0' : See below. * 'cost' : Cost of constraints violation C parameter. Kernel types : * "krbf" -- radial basis function: exp(-gamma * |x - y|^2) * "kpol" -- polynomial: (gamma * x' * y + coef0)^degree * "klin* -- linear: x' * y * "ktan" -- sigmoid: tanh(gamma * x' * y + coef0) The function uses LIBSVM.jl (https://github.com/JuliaML/LIBSVM.jl) that is an interface to library LIBSVM (Chang & Li 2001). ## References Julia package LIBSVM.jl: https://github.com/JuliaML/LIBSVM.jl Chang, C.-C. & Lin, C.-J. (2001). LIBSVM: a library for support vector machines. Software available at http://www.csie.ntu.edu.tw/~cjlin/libsvm. Detailed documentation (algorithms, formulae, ...) can be found in http://www.csie.ntu.edu.tw/~cjlin/papers/libsvm.ps.gz Chih-Chung Chang and Chih-Jen Lin, LIBSVM: a library for support vector machines. ACM Transactions on Intelligent Systems and Technology, 2:27:1--27:27, 2011. Software available at http://www.csie.ntu.edu.tw/~cjlin/libsvm Schölkopf, B., Smola, A.J., 2002. Learning with kernels: support vector machines, regularization, optimization, and beyond. Adaptive computation and machine learning. MIT Press, Cambridge, Mass. """ function svmda(X, y; kern = "krbf", gamma = 1. / size(X, 2), degree = 3, coef0 = 0., cost = 1.) gamma = Float64(gamma) ; degree = Int64(degree) ; coef0 = Float64(coef0) ; cost = Float64(cost) X = ensure_mat(X) y = vec(y) ztab = tab(y) if kern == "krbf" fkern = LIBSVM.Kernel.RadialBasis elseif kern == "kpol" fkern = LIBSVM.Kernel.Polynomial elseif kern == "klin" fkern = LIBSVM.Kernel.Linear elseif kern == "ktanh" fkern = LIBSVM.Kernel.Sigmoid end nt = 0 fm = svmtrain(X', y; svmtype = SVC, kernel = fkern, gamma = gamma, coef0 = coef0, degree = degree, cost = cost, nt = nt) Svmda(fm, ztab.keys, ztab.vals) end """ predict(object::Svmda, X) Compute y-predictions from a fitted model. * `object` : The fitted model. * `X` : X-data for which predictions are computed. """ function predict(object::Svmda, X) pred = svmpredict(object.fm, X')[1] n = length(pred) pred = reshape(pred, n, 1) (pred = pred,) end
# # This file is part of the Actors.jl Julia package, # MIT license, part of https://github.com/JuliaActors # """ ``` become!(lk::Link, func, args1...; kwargs...) become!(name::Symbol, ....) ``` Cause an actor to change behavior. # Arguments - actor `lk::Link` (or `name::Symbol` if registered), - `func`: a callable object, - `args1...`: (partial) arguments to `func`, - `kwargs...`: keyword arguments to `func`. """ function become!(lk::Link, func, args...; kwargs...) isempty(args) && isempty(kwargs) ? send(lk, Become(func)) : send(lk, Become(Bhv(func, args...; kwargs...))) end become!(name::Symbol, args...; kwargs...) = become!(whereis(name), args...; kwargs...) """ ``` call(lk::Link, [from::Link,] args2...; timeout::Real=5.0) call(name::Symbol, ....) ``` Call an actor to execute its behavior and to send a [`Response`](@ref) with the result. # Arguments - actor `lk::Link` (or `name::Symbol` if registered), - `from::Link`: sender link, - `args2...`: remaining arguments to the actor. - `timeout::Real=5.0`: timeout in seconds. **Note:** If `from` is omitted, `call` blocks and returns the result """ call(lk::Link, from::Link, args...) = send(lk, Call(args, from)) call(lk::Link, args...; timeout::Real=5.0) = request(lk, Call, args...; timeout) call(name::Symbol, args...; kwargs...) = call(whereis(name), args...; kwargs...) """ ``` cast(lk::Link, args2...) cast(name::Symbol, args2...) ``` Cast `args2...` to the actor `lk` (or `name` if registered) to execute its behavior with `args2...` without sending a response. **Note:** you can prompt the returned value with [`query`](@ref). """ cast(lk::Link, args...) = send(lk, Cast(args)) cast(name::Symbol, args...) = cast(whereis(name), args...) """ ``` exec(lk::Link, from::Link, f, args...; kwargs...) exec(lk::Link, f, args...; timeout::Real=5.0, kwargs...) exec(name::Symbol, ....) ``` Ask an actor `lk` (or `name` if registered) to execute an arbitrary function and to send the returned value as [`Response`](@ref). # Arguments - actor `lk::Link` or `name::Symbol` if registered, - `from::Link`: the link a `Response` should be sent to. - `f`: a callable object, - `args...; kwargs...`: arguments and keyword arguments to it, - `timeout::Real=5.0`: timeout in seconds. Set `timeout=Inf` if you don't want to timeout. **Note:** If `from` is ommitted, `exec` blocks, waits and returns the result (with a `timeout`). """ function exec(lk::Link, from::Link, f, args...; kwargs...) isempty(args) && isempty(kwargs) ? send(lk, Exec(f, from)) : send(lk, Exec(Bhv(f, args...; kwargs...), from)) end function exec(lk::Link, f, args...; timeout::Real=5.0, kwargs...) isempty(args) && isempty(kwargs) ? request(lk, Exec, f; timeout=timeout) : request(lk, Exec, Bhv(f, args...; kwargs...); timeout) end exec(name::Symbol, args...; kwargs...) = exec(whereis(name), args...; kwargs...) """ ``` exit!(lk::Link, reason=:normal) exit!(name::Symbol, ....) ``` Tell an actor `lk` (or `name` if registered) to stop. If it has a [`term`](@ref _ACT) function, it calls that with `reason` as last argument. """ exit!(lk::Link, reason=:normal) = send(lk, Exit(reason, fill(nothing, 3)...)) exit!(name::Symbol, reason=:normal) = exit!(whereis(name), reason) """ ``` init!(lk::Link, f, args...; kwargs...) init!(name::Symbol, ....) ``` Tell an actor `lk` to save the callable object `f` with the given arguments as an `init` object in its [`_ACT`](@ref) variable. The `init` object will be called by a supervisor at actor restart. # Arguments - actor `lk::Link` or `name::Symbol` if registered, - `f`: callable object, - `args...`: arguments to `f`, - `kwargs...`: keyword arguments to `f`. """ init!(lk::Link, f, args...; kwargs...) = send(lk, Init(Bhv(f, args...; kwargs...))) init!(name::Symbol, args...; kwargs...) = init!(whereis(name), args...; kwargs...) """ ``` query(lk::Link, [from::Link,] s::Symbol; timeout::Real=5.0) query(name::Symbol, ....) ``` Query an actor about an internal state variable `s`. # Parameters - actor `lk::Link` or `name::Symbol` if registered, - `from::Link`: sender link, - `s::Symbol` one of `:mode`,`:bhv`,`:res`,`:sta`,`:usr`. - `timeout::Real=5.0`: **Note:** If `from` is omitted, `query` blocks and returns the response. In that case there is a `timeout`. # Examples ```julia julia> f(x, y; u=0, v=0) = x+y+u+v # implement a behavior f (generic function with 1 method) julia> fact = spawn(Bhv(f, 1)) # start an actor with it Link{Channel{Any}}(Channel{Any}(sz_max:32,sz_curr:0), 1, :default) julia> query(fact, :mode) # query the mode :default julia> cast(fact, 1) # cast a 2nd argument to it Actors.Cast((1,)) julia> query(fact, :res) # query the result 2 julia> query(fact, :sta) # query the state julia> query(fact, :bhv) # query the behavior Bhv(f, (1,), Base.Iterators.Pairs{Union{},Union{},Tuple{},NamedTuple{(),Tuple{}}}(), Actors.var"#2#4"{Base.Iterators.Pairs{Union{},Union{},Tuple{},NamedTuple{(),Tuple{}}},typeof(f),Tuple{Int64}}(Base.Iterators.Pairs{Union{},Union{},Tuple{},NamedTuple{(),Tuple{}}}(), f, (1,))) ``` """ query(lk::Link, from::Link, s::Symbol=:sta) = send(lk, Query(s, from)) query(lk::Link, s::Symbol=:sta; timeout::Real=5.0) = request(lk, Query, s; timeout) query(name::Symbol, args...; kwargs...) = query(whereis(name), args...; kwargs...) """ ``` term!(lk::Link, f, args1...; kwargs...) term!(name::Symbol, ....) ``` Tell an actor `lk` (or `name::Symbol` if registered) to execute `f` with the given partial arguments and an exit reason when it terminates. The exit reason is added by the actor to `args1...` when it exits. """ term!(lk::Link, f, args...; kwargs...) = send(lk, Term(Bhv(f, args...; kwargs...))) term!(name::Symbol, args...; kwargs...) = term!(whereis(name), args...; kwargs...) """ trapExit(lk::Link=self(), mode=:sticky) Change the mode of an actor. A `:sticky` actor does not exit if it receives an [`Exit`](@ref) signal from a connected actor and does not propagate it further. Instead it reports the failure and saves a link to the failed actor. See [`diag`](@ref) for getting links to failed actors from a `:sticky` actor. """ trapExit(lk::Link=self(), mode=:sticky) = send(lk, Update(:mode, mode)) """ ``` update!(lk::Link, x; s::Symbol=:sta) update!(lk::Link, arg::Args) update!(name::Symbol, ....) ``` Update an actor's internal state `s` with `args...`. # Arguments - actor `lk::Link` or `name::Symbol` if registered, - `x`: value/variable to update the choosen state with, - `arg::Args`: arguments to update, - `s::Symbol`: one of `:arg`, `:mode`, `:name`, `:self`, `:sta`, `:usr`. *Note:* If you want to update the stored arguments to the behavior function with `s=:arg`, you must pass an [`Args`](@ref) to `arg`. If `Args` has keyword arguments, they are merged with existing keyword arguments to the behavior function. # Example ```julia julia> update!(fact, 5) # update the state variable Actors.Update(:sta, 5) julia> query(fact, :sta) # query it 5 julia> update!(fact, Args(0, u=5, v=5)); # update arguments to the behavior julia> call(fact, 0) # call the actor with 0 10 ``` """ update!(lk::Link, x; s::Symbol=:sta) = send(lk, Update(s, x)) update!(lk::Link, arg::Args) = send(lk, Update(:arg, arg)) update!(name::Symbol, args...; kwargs...) = update!(whereis(name), args...; kwargs...)
/* matrix/gsl_matrix_ushort.h * * Copyright (C) 1996, 1997, 1998, 1999, 2000 Gerard Jungman, Brian Gough * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or (at * your option) any later version. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. */ #ifndef __GSL_MATRIX_USHORT_H__ #define __GSL_MATRIX_USHORT_H__ #include <stdlib.h> #include <gsl/gsl_types.h> #include <gsl/gsl_errno.h> #include <gsl/gsl_check_range.h> #include <gsl/gsl_vector_ushort.h> #undef __BEGIN_DECLS #undef __END_DECLS #ifdef __cplusplus # define __BEGIN_DECLS extern "C" { # define __END_DECLS } #else # define __BEGIN_DECLS /* empty */ # define __END_DECLS /* empty */ #endif __BEGIN_DECLS typedef struct { size_t size1; size_t size2; size_t tda; unsigned short * data; gsl_block_ushort * block; int owner; } gsl_matrix_ushort; typedef struct { gsl_matrix_ushort matrix; } _gsl_matrix_ushort_view; typedef _gsl_matrix_ushort_view gsl_matrix_ushort_view; typedef struct { gsl_matrix_ushort matrix; } _gsl_matrix_ushort_const_view; typedef const _gsl_matrix_ushort_const_view gsl_matrix_ushort_const_view; /* Allocation */ GSL_EXPORT gsl_matrix_ushort * gsl_matrix_ushort_alloc (const size_t n1, const size_t n2); GSL_EXPORT gsl_matrix_ushort * gsl_matrix_ushort_calloc (const size_t n1, const size_t n2); GSL_EXPORT gsl_matrix_ushort * gsl_matrix_ushort_alloc_from_block (gsl_block_ushort * b, const size_t offset, const size_t n1, const size_t n2, const size_t d2); GSL_EXPORT gsl_matrix_ushort * gsl_matrix_ushort_alloc_from_matrix (gsl_matrix_ushort * m, const size_t k1, const size_t k2, const size_t n1, const size_t n2); GSL_EXPORT gsl_vector_ushort * gsl_vector_ushort_alloc_row_from_matrix (gsl_matrix_ushort * m, const size_t i); GSL_EXPORT gsl_vector_ushort * gsl_vector_ushort_alloc_col_from_matrix (gsl_matrix_ushort * m, const size_t j); GSL_EXPORT void gsl_matrix_ushort_free (gsl_matrix_ushort * m); /* Views */ GSL_EXPORT _gsl_matrix_ushort_view gsl_matrix_ushort_submatrix (gsl_matrix_ushort * m, const size_t i, const size_t j, const size_t n1, const size_t n2); GSL_EXPORT _gsl_vector_ushort_view gsl_matrix_ushort_row (gsl_matrix_ushort * m, const size_t i); GSL_EXPORT _gsl_vector_ushort_view gsl_matrix_ushort_column (gsl_matrix_ushort * m, const size_t j); GSL_EXPORT _gsl_vector_ushort_view gsl_matrix_ushort_diagonal (gsl_matrix_ushort * m); GSL_EXPORT _gsl_vector_ushort_view gsl_matrix_ushort_subdiagonal (gsl_matrix_ushort * m, const size_t k); GSL_EXPORT _gsl_vector_ushort_view gsl_matrix_ushort_superdiagonal (gsl_matrix_ushort * m, const size_t k); GSL_EXPORT _gsl_matrix_ushort_view gsl_matrix_ushort_view_array (unsigned short * base, const size_t n1, const size_t n2); GSL_EXPORT _gsl_matrix_ushort_view gsl_matrix_ushort_view_array_with_tda (unsigned short * base, const size_t n1, const size_t n2, const size_t tda); GSL_EXPORT _gsl_matrix_ushort_view gsl_matrix_ushort_view_vector (gsl_vector_ushort * v, const size_t n1, const size_t n2); GSL_EXPORT _gsl_matrix_ushort_view gsl_matrix_ushort_view_vector_with_tda (gsl_vector_ushort * v, const size_t n1, const size_t n2, const size_t tda); GSL_EXPORT _gsl_matrix_ushort_const_view gsl_matrix_ushort_const_submatrix (const gsl_matrix_ushort * m, const size_t i, const size_t j, const size_t n1, const size_t n2); GSL_EXPORT _gsl_vector_ushort_const_view gsl_matrix_ushort_const_row (const gsl_matrix_ushort * m, const size_t i); GSL_EXPORT _gsl_vector_ushort_const_view gsl_matrix_ushort_const_column (const gsl_matrix_ushort * m, const size_t j); GSL_EXPORT _gsl_vector_ushort_const_view gsl_matrix_ushort_const_diagonal (const gsl_matrix_ushort * m); GSL_EXPORT _gsl_vector_ushort_const_view gsl_matrix_ushort_const_subdiagonal (const gsl_matrix_ushort * m, const size_t k); GSL_EXPORT _gsl_vector_ushort_const_view gsl_matrix_ushort_const_superdiagonal (const gsl_matrix_ushort * m, const size_t k); GSL_EXPORT _gsl_matrix_ushort_const_view gsl_matrix_ushort_const_view_array (const unsigned short * base, const size_t n1, const size_t n2); GSL_EXPORT _gsl_matrix_ushort_const_view gsl_matrix_ushort_const_view_array_with_tda (const unsigned short * base, const size_t n1, const size_t n2, const size_t tda); GSL_EXPORT _gsl_matrix_ushort_const_view gsl_matrix_ushort_const_view_vector (const gsl_vector_ushort * v, const size_t n1, const size_t n2); GSL_EXPORT _gsl_matrix_ushort_const_view gsl_matrix_ushort_const_view_vector_with_tda (const gsl_vector_ushort * v, const size_t n1, const size_t n2, const size_t tda); /* Operations */ GSL_EXPORT unsigned short gsl_matrix_ushort_get(const gsl_matrix_ushort * m, const size_t i, const size_t j); GSL_EXPORT void gsl_matrix_ushort_set(gsl_matrix_ushort * m, const size_t i, const size_t j, const unsigned short x); GSL_EXPORT unsigned short * gsl_matrix_ushort_ptr(gsl_matrix_ushort * m, const size_t i, const size_t j); GSL_EXPORT const unsigned short * gsl_matrix_ushort_const_ptr(const gsl_matrix_ushort * m, const size_t i, const size_t j); GSL_EXPORT void gsl_matrix_ushort_set_zero (gsl_matrix_ushort * m); GSL_EXPORT void gsl_matrix_ushort_set_identity (gsl_matrix_ushort * m); GSL_EXPORT void gsl_matrix_ushort_set_all (gsl_matrix_ushort * m, unsigned short x); GSL_EXPORT int gsl_matrix_ushort_fread (FILE * stream, gsl_matrix_ushort * m) ; GSL_EXPORT int gsl_matrix_ushort_fwrite (FILE * stream, const gsl_matrix_ushort * m) ; GSL_EXPORT int gsl_matrix_ushort_fscanf (FILE * stream, gsl_matrix_ushort * m); GSL_EXPORT int gsl_matrix_ushort_fprintf (FILE * stream, const gsl_matrix_ushort * m, const char * format); GSL_EXPORT int gsl_matrix_ushort_memcpy(gsl_matrix_ushort * dest, const gsl_matrix_ushort * src); GSL_EXPORT int gsl_matrix_ushort_swap(gsl_matrix_ushort * m1, gsl_matrix_ushort * m2); GSL_EXPORT int gsl_matrix_ushort_swap_rows(gsl_matrix_ushort * m, const size_t i, const size_t j); GSL_EXPORT int gsl_matrix_ushort_swap_columns(gsl_matrix_ushort * m, const size_t i, const size_t j); GSL_EXPORT int gsl_matrix_ushort_swap_rowcol(gsl_matrix_ushort * m, const size_t i, const size_t j); GSL_EXPORT int gsl_matrix_ushort_transpose (gsl_matrix_ushort * m); GSL_EXPORT int gsl_matrix_ushort_transpose_memcpy (gsl_matrix_ushort * dest, const gsl_matrix_ushort * src); GSL_EXPORT unsigned short gsl_matrix_ushort_max (const gsl_matrix_ushort * m); GSL_EXPORT unsigned short gsl_matrix_ushort_min (const gsl_matrix_ushort * m); GSL_EXPORT void gsl_matrix_ushort_minmax (const gsl_matrix_ushort * m, unsigned short * min_out, unsigned short * max_out); GSL_EXPORT void gsl_matrix_ushort_max_index (const gsl_matrix_ushort * m, size_t * imax, size_t *jmax); GSL_EXPORT void gsl_matrix_ushort_min_index (const gsl_matrix_ushort * m, size_t * imin, size_t *jmin); GSL_EXPORT void gsl_matrix_ushort_minmax_index (const gsl_matrix_ushort * m, size_t * imin, size_t * jmin, size_t * imax, size_t * jmax); GSL_EXPORT int gsl_matrix_ushort_isnull (const gsl_matrix_ushort * m); GSL_EXPORT int gsl_matrix_ushort_add (gsl_matrix_ushort * a, const gsl_matrix_ushort * b); GSL_EXPORT int gsl_matrix_ushort_sub (gsl_matrix_ushort * a, const gsl_matrix_ushort * b); GSL_EXPORT int gsl_matrix_ushort_mul_elements (gsl_matrix_ushort * a, const gsl_matrix_ushort * b); GSL_EXPORT int gsl_matrix_ushort_div_elements (gsl_matrix_ushort * a, const gsl_matrix_ushort * b); GSL_EXPORT int gsl_matrix_ushort_scale (gsl_matrix_ushort * a, const double x); GSL_EXPORT int gsl_matrix_ushort_add_constant (gsl_matrix_ushort * a, const double x); GSL_EXPORT int gsl_matrix_ushort_add_diagonal (gsl_matrix_ushort * a, const double x); /***********************************************************************/ /* The functions below are obsolete */ /***********************************************************************/ GSL_EXPORT int gsl_matrix_ushort_get_row(gsl_vector_ushort * v, const gsl_matrix_ushort * m, const size_t i); GSL_EXPORT int gsl_matrix_ushort_get_col(gsl_vector_ushort * v, const gsl_matrix_ushort * m, const size_t j); GSL_EXPORT int gsl_matrix_ushort_set_row(gsl_matrix_ushort * m, const size_t i, const gsl_vector_ushort * v); GSL_EXPORT int gsl_matrix_ushort_set_col(gsl_matrix_ushort * m, const size_t j, const gsl_vector_ushort * v); /* inline functions if you are using GCC */ #ifdef HAVE_INLINE extern inline unsigned short gsl_matrix_ushort_get(const gsl_matrix_ushort * m, const size_t i, const size_t j) { #if GSL_RANGE_CHECK if (i >= m->size1) { GSL_ERROR_VAL("first index out of range", GSL_EINVAL, 0) ; } else if (j >= m->size2) { GSL_ERROR_VAL("second index out of range", GSL_EINVAL, 0) ; } #endif return m->data[i * m->tda + j] ; } extern inline void gsl_matrix_ushort_set(gsl_matrix_ushort * m, const size_t i, const size_t j, const unsigned short x) { #if GSL_RANGE_CHECK if (i >= m->size1) { GSL_ERROR_VOID("first index out of range", GSL_EINVAL) ; } else if (j >= m->size2) { GSL_ERROR_VOID("second index out of range", GSL_EINVAL) ; } #endif m->data[i * m->tda + j] = x ; } extern inline unsigned short * gsl_matrix_ushort_ptr(gsl_matrix_ushort * m, const size_t i, const size_t j) { #if GSL_RANGE_CHECK if (i >= m->size1) { GSL_ERROR_NULL("first index out of range", GSL_EINVAL) ; } else if (j >= m->size2) { GSL_ERROR_NULL("second index out of range", GSL_EINVAL) ; } #endif return (unsigned short *) (m->data + (i * m->tda + j)) ; } extern inline const unsigned short * gsl_matrix_ushort_const_ptr(const gsl_matrix_ushort * m, const size_t i, const size_t j) { #if GSL_RANGE_CHECK if (i >= m->size1) { GSL_ERROR_NULL("first index out of range", GSL_EINVAL) ; } else if (j >= m->size2) { GSL_ERROR_NULL("second index out of range", GSL_EINVAL) ; } #endif return (const unsigned short *) (m->data + (i * m->tda + j)) ; } #endif __END_DECLS #endif /* __GSL_MATRIX_USHORT_H__ */
#ifndef PARMCB_SPTREES_HPP_ #define PARMCB_SPTREES_HPP_ // Copyright (C) Dimitrios Michail 2019 - 2021. // Distributed under the Boost Software License, Version 1.0. // (See accompanying file LICENSE_1_0.txt or copy at // https://www.boost.org/LICENSE_1_0.txt) #include <iostream> #include <boost/throw_exception.hpp> #include <boost/property_map/property_map.hpp> #include <boost/property_map/function_property_map.hpp> #include <boost/graph/graph_traits.hpp> #include <boost/graph/graph_concepts.hpp> #include <boost/graph/graph_utility.hpp> #include <boost/graph/adjacency_list.hpp> #include <boost/serialization/vector.hpp> #include <parmcb/detail/lex_dijkstra.hpp> #include <parmcb/detail/util.hpp> #include <parmcb/forestindex.hpp> #include <parmcb/spvecgf2.hpp> #include <memory> #include <stack> #include <functional> namespace parmcb { template<class Graph, class WeightMap> class SPNode; template<class Graph, class WeightMap> class SPTree; template<class Graph, class WeightMap, class T> struct SPSubtree; template<class Graph, class WeightMap, bool ParallelUsingTBB> class SPTrees; template<class Graph, class WeightMap> class CandidateCycle; template<class Graph> struct SerializableCandidateCycle; template<class Graph, class WeightMap> struct SerializableMinOddCycle; template<class Graph, class WeightMap> struct SerializableMinOddCycleMinOp; template<class Graph, class WeightMap> class SPNode { public: typedef typename boost::graph_traits<Graph>::vertex_descriptor Vertex; typedef typename boost::graph_traits<Graph>::edge_descriptor Edge; typedef typename boost::property_traits<WeightMap>::value_type WeightType; SPNode() : _vertex(), _parity(false), _weight(WeightType()), _pred(), _has_pred(false) { } SPNode(Vertex vertex, WeightType weight) : _vertex(vertex), _parity(false), _weight(WeightType()), _pred(), _has_pred(false) { } SPNode(Vertex vertex, WeightType weight, const Edge &pred) : _vertex(vertex), _parity(false), _weight(weight), _pred(pred), _has_pred(true) { } void add_child(std::shared_ptr<SPNode<Graph, WeightMap>> c) { _children.push_back(c); } std::vector<std::shared_ptr<SPNode<Graph, WeightMap>>>& children() { return _children; } Vertex& vertex() { return _vertex; } bool& parity() { return _parity; } WeightType& weight() { return _weight; } const Edge& pred() { return _pred; } bool has_pred() { return _has_pred; } private: Vertex _vertex; bool _parity; WeightType _weight; Edge _pred; bool _has_pred; std::vector<std::shared_ptr<SPNode<Graph, WeightMap>>> _children; }; template<class Graph, class WeightMap, class T> struct SPSubtree { T info; std::shared_ptr<SPNode<Graph, WeightMap>> root; SPSubtree(T info, std::shared_ptr<SPNode<Graph, WeightMap>> root) : info(info), root(root) { } }; template<class Graph, class WeightMap> class SPTree { public: typedef typename boost::graph_traits<Graph>::vertex_descriptor Vertex; typedef typename boost::graph_traits<Graph>::vertex_iterator VertexIt; typedef typename boost::property_map<Graph, boost::vertex_index_t>::type VertexIndexMapType; typedef typename boost::graph_traits<Graph>::edge_descriptor Edge; typedef typename boost::property_traits<WeightMap>::value_type WeightType; SPTree(std::size_t id, const Graph &g, const VertexIndexMapType& index_map, const WeightMap &weight_map, const Vertex &source) : _id(id), _g(g), _weight_map(weight_map), _index_map(index_map), _source( source), _tree_node_map(boost::num_vertices(g)), _first_in_path(boost::num_vertices(g)) { initialize(); } void update_parities(const std::set<Edge> &edges) { std::stack<SPSubtree<Graph, WeightMap, bool>> stack; stack.emplace(false, _root); while (!stack.empty()) { SPSubtree<Graph, WeightMap, bool> r = stack.top(); stack.pop(); r.root->parity() = r.info; for (auto c : r.root->children()) { bool is_signed = edges.find(c->pred()) != edges.end(); stack.emplace(SPSubtree<Graph, WeightMap, bool> { static_cast<bool>(r.info ^ is_signed), c }); } } } std::shared_ptr<SPNode<Graph, WeightMap>> node(const Vertex &v) const { return _tree_node_map[_index_map[v]]; } const Vertex& source() const { return _source; } const Graph& graph() const { return _g; } const std::size_t id() const { return _id; } const Vertex first(const Vertex &v) { auto vindex = _index_map[v]; return _first_in_path[vindex]; } template<class EdgeIterator> std::vector<CandidateCycle<Graph, WeightMap>> create_candidate_cycles(EdgeIterator begin, EdgeIterator end) const { // collect tree edges std::set<Edge> tree_edges; VertexIt vi, viend; for (boost::tie(vi, viend) = boost::vertices(_g); vi != viend; ++vi) { auto v = *vi; auto vindex = _index_map[v]; std::shared_ptr<SPNode<Graph, WeightMap>> n = _tree_node_map[vindex]; if (n != nullptr && n->has_pred()) { tree_edges.insert(n->pred()); } } // loop over (non-tree) provided edges and create candidate cycles std::vector<CandidateCycle<Graph, WeightMap>> cycles; for (EdgeIterator it = begin; it != end; it++) { Edge e = *it; if (tree_edges.find(e) != tree_edges.end()) { continue; } // non-tree edge std::shared_ptr<SPNode<Graph, WeightMap>> v = node(boost::source(e, _g)); if (v == nullptr) { continue; } std::shared_ptr<SPNode<Graph, WeightMap>> u = node(boost::target(e, _g)); if (u == nullptr) { continue; } if (_first_in_path[_index_map[v->vertex()]] == _first_in_path[_index_map[u->vertex()]]) { // shortest paths start with the same vertex, discard continue; } WeightType cycle_weight = boost::get(_weight_map, e) + v->weight() + u->weight(); cycles.emplace_back(_id, e, cycle_weight); } return cycles; } std::vector<CandidateCycle<Graph, WeightMap>> create_candidate_cycles() const { auto itPair = boost::edges(_g); return create_candidate_cycles(itPair.first, itPair.second); } std::vector<SerializableCandidateCycle<Graph>> create_serializable_candidate_cycles( const ForestIndex<Graph> &forest_index) { // collect tree edges std::set<Edge> tree_edges; VertexIt vi, viend; for (boost::tie(vi, viend) = boost::vertices(_g); vi != viend; ++vi) { auto v = *vi; auto vindex = _index_map[v]; std::shared_ptr<SPNode<Graph, WeightMap>> n = _tree_node_map[vindex]; if (n != nullptr && n->has_pred()) { tree_edges.insert(n->pred()); } } // loop over all non-tree edges and create candidate cycles std::vector<SerializableCandidateCycle<Graph>> cycles; for (const auto &e : boost::make_iterator_range(boost::edges(_g))) { if (tree_edges.find(e) != tree_edges.end()) { continue; } // non-tree edge std::shared_ptr<SPNode<Graph, WeightMap>> v = node(boost::source(e, _g)); if (v == nullptr) { continue; } std::shared_ptr<SPNode<Graph, WeightMap>> u = node(boost::target(e, _g)); if (u == nullptr) { continue; } if (_first_in_path[_index_map[v->vertex()]] == _first_in_path[_index_map[u->vertex()]]) { // shortest paths start with the same vertex, discard continue; } cycles.emplace_back(_source, forest_index(e)); } return cycles; } private: const std::size_t _id; const Graph &_g; const WeightMap &_weight_map; const VertexIndexMapType &_index_map; const Vertex _source; /* * Shortest path tree root */ std::shared_ptr<SPNode<Graph, WeightMap>> _root; /* * Map from vertex to shortest path tree node */ std::vector<std::shared_ptr<SPNode<Graph, WeightMap>>> _tree_node_map; /* * First vertex in shortest path from root to a vertex. */ std::vector<Vertex> _first_in_path; void initialize() { // run shortest path std::vector<WeightType> dist(boost::num_vertices(_g), (std::numeric_limits<WeightType>::max)()); boost::function_property_map<parmcb::detail::VertexIndexFunctor<Graph, WeightType>, Vertex, WeightType&> dist_map( parmcb::detail::VertexIndexFunctor<Graph, WeightType>(dist, _index_map)); std::vector<std::tuple<bool, Edge>> pred(boost::num_vertices(_g), std::make_tuple(false, Edge())); boost::function_property_map<parmcb::detail::VertexIndexFunctor<Graph, std::tuple<bool, Edge>>, Vertex, std::tuple<bool, Edge>&> pred_map( parmcb::detail::VertexIndexFunctor<Graph, std::tuple<bool, Edge> >(pred, _index_map)); lex_dijkstra(_g, _weight_map, _source, dist_map, pred_map); // create tree nodes and mapping VertexIt vi, viend; for (boost::tie(vi, viend) = boost::vertices(_g); vi != viend; ++vi) { auto v = *vi; auto vindex = _index_map[v]; auto p = boost::get(pred_map, v); if (v == _source) { _tree_node_map[vindex] = std::shared_ptr<SPNode<Graph, WeightMap>>( new SPNode<Graph, WeightMap>(v, dist[vindex])); _root = _tree_node_map[vindex]; } else if (std::get<0>(p)) { Edge e = std::get<1>(p); _tree_node_map[vindex] = std::shared_ptr<SPNode<Graph, WeightMap>>( new SPNode<Graph, WeightMap>(v, dist[vindex], e)); } } // link tree nodes for (boost::tie(vi, viend) = boost::vertices(_g); vi != viend; ++vi) { auto v = *vi; auto p = boost::get(pred_map, v); if (std::get<0>(p)) { auto e = std::get<1>(p); auto u = boost::opposite(e, v, _g); auto vindex = _index_map[v]; auto uindex = _index_map[u]; _tree_node_map[uindex]->add_child(_tree_node_map[vindex]); } } // compute first in path compute_first_in_path(); } void compute_first_in_path() { std::stack<SPSubtree<Graph, WeightMap, Vertex>> stack; stack.emplace(_source, _root); while (!stack.empty()) { SPSubtree<Graph, WeightMap, Vertex> r = stack.top(); stack.pop(); if (r.root == _root) { auto v = r.root->vertex(); auto vindex = _index_map[v]; _first_in_path[vindex] = v; for (auto c : r.root->children()) { stack.emplace(SPSubtree<Graph, WeightMap, Vertex> { static_cast<Vertex>(c->vertex()), c }); } } else { auto v = r.root->vertex(); auto vindex = _index_map[v]; _first_in_path[vindex] = r.info; for (auto c : r.root->children()) { stack.emplace(SPSubtree<Graph, WeightMap, Vertex> { static_cast<Vertex>(r.info), c }); } } } } }; template<class Graph, class WeightMap> class CandidateCycle { public: typedef typename boost::graph_traits<Graph>::vertex_descriptor Vertex; typedef typename boost::graph_traits<Graph>::edge_descriptor Edge; typedef typename boost::property_traits<WeightMap>::value_type WeightType; CandidateCycle(std::size_t tree, const Edge &e, WeightType weight) : _tree(tree), _e(e), _weight(weight) { } CandidateCycle(const CandidateCycle &c) : _tree(c._tree), _e(c._e), _weight(c._weight) { } CandidateCycle& operator=(const CandidateCycle &other) { if (this != &other) { _tree = other._tree; _e = other._e; _weight = other._weight; } return *this; } std::size_t tree() const { return _tree; } const Edge& edge() const { return _e; } const WeightType& weight() const { return _weight; } private: std::size_t _tree; Edge _e; WeightType _weight; }; template<class Graph> struct SerializableCandidateCycle { typedef typename boost::graph_traits<Graph>::vertex_descriptor Vertex; typedef typename ForestIndex<Graph>::size_type Edge; SerializableCandidateCycle() { } SerializableCandidateCycle(Vertex v, Edge e) : v(v), e(e) { } template<typename Archive> void serialize(Archive &ar, const unsigned) { ar & v; ar & e; } Vertex v; Edge e; }; template<class Graph, class WeightMap> struct SerializableMinOddCycle { typedef typename ForestIndex<Graph>::size_type Edge; typedef typename boost::property_traits<WeightMap>::value_type WeightType; SerializableMinOddCycle() : exists(false) { } SerializableMinOddCycle(std::vector<Edge> edges, WeightType weight, bool exists) : edges(edges), weight(weight), exists(exists) { } SerializableMinOddCycle(const SerializableMinOddCycle<Graph, WeightMap> &c) : edges(c.edges), weight(c.weight), exists(c.exists) { } SerializableMinOddCycle<Graph, WeightMap>& operator=(const SerializableMinOddCycle<Graph, WeightMap> &other) { if (this != &other) { edges = other.edges; weight = other.weight; exists = other.exists; } return *this; } template<typename Archive> void serialize(Archive &ar, const unsigned) { ar & edges; ar & weight; ar & exists; } std::vector<Edge> edges; WeightType weight; bool exists; }; template<class Graph, class WeightMap> struct SerializableMinOddCycleMinOp { const SerializableMinOddCycle<Graph, WeightMap>& operator()( const SerializableMinOddCycle<Graph, WeightMap> &lhs, const SerializableMinOddCycle<Graph, WeightMap> &rhs) const { if (!lhs.exists || !rhs.exists) { if (lhs.exists) { return lhs; } else { return rhs; } } // both valid, compare if (lhs.weight < rhs.weight) { return lhs; } return rhs; } }; template<class Graph, class WeightMap> class CandidateCycleToSerializableConverter { public: CandidateCycleToSerializableConverter(const std::vector<parmcb::SPTree<Graph, WeightMap>> &trees, const ForestIndex<Graph> &forest_index) : trees(trees), forest_index(forest_index) { } SerializableCandidateCycle<Graph> operator()(const CandidateCycle<Graph, WeightMap> &cycle) const { return SerializableCandidateCycle<Graph>(trees.at(cycle.tree()).source(), forest_index(cycle.edge())); } private: const std::vector<parmcb::SPTree<Graph, WeightMap>> &trees; const ForestIndex<Graph> &forest_index; }; template<class Graph, class WeightMap> class CandidateCycleBuilder { public: typedef typename boost::graph_traits<Graph>::vertex_descriptor Vertex; typedef typename boost::graph_traits<Graph>::edge_descriptor Edge; typedef typename boost::property_traits<WeightMap>::value_type WeightType; CandidateCycleBuilder(const Graph &g, const WeightMap &weight_map) : g(g), weight_map(weight_map) { } std::tuple<std::set<Edge>, WeightType, bool> operator()(const std::vector<parmcb::SPTree<Graph, WeightMap>> &trees, const CandidateCycle<Graph, WeightMap> &c, const std::set<Edge> &signed_edges, bool use_weight_limit, WeightType weight_limit) const { std::shared_ptr<SPNode<Graph, WeightMap>> v = trees[c.tree()].node(boost::source(c.edge(), g)); std::shared_ptr<SPNode<Graph, WeightMap>> u = trees[c.tree()].node(boost::target(c.edge(), g)); Edge e = c.edge(); if (v->parity() ^ u->parity() ^ (signed_edges.find(e) != signed_edges.end())) { // odd cycle, validate bool valid = true; WeightType cycle_weight = boost::get(weight_map, e); std::set<Edge> result; result.insert(e); if (use_weight_limit && cycle_weight > weight_limit) { return std::make_tuple(std::set<Edge> { }, 0.0, false); } // first part Vertex w = boost::source(c.edge(), g); std::shared_ptr<SPNode<Graph, WeightMap>> ws = trees[c.tree()].node(w); while (ws->has_pred()) { Edge a = ws->pred(); if (result.insert(a).second == false) { valid = false; break; } cycle_weight += boost::get(weight_map, a); if (use_weight_limit && cycle_weight > weight_limit) { valid = false; break; } w = boost::opposite(a, w, g); ws = trees[c.tree()].node(w); } if (!valid) { return std::make_tuple(std::set<Edge> { }, 0.0, false); } // second part w = boost::target(c.edge(), g); ws = trees[c.tree()].node(w); while (ws->has_pred()) { Edge a = ws->pred(); if (result.insert(a).second == false) { valid = false; break; } cycle_weight += boost::get(weight_map, a); if (use_weight_limit && cycle_weight > weight_limit) { valid = false; break; } w = boost::opposite(a, w, g); ws = trees[c.tree()].node(w); } if (!valid) { return std::make_tuple(std::set<Edge> { }, 0.0, false); } return std::make_tuple(result, cycle_weight, true); } return std::make_tuple(std::set<Edge> { }, 0.0, false); } private: const Graph &g; const WeightMap &weight_map; }; template<class Graph, class WeightMap, bool ParallelUsingTBB> class ShortestOddCycleLookup { public: typedef typename boost::graph_traits<Graph>::vertex_descriptor Vertex; typedef typename boost::graph_traits<Graph>::edge_descriptor Edge; typedef typename boost::property_traits<WeightMap>::value_type WeightType; ShortestOddCycleLookup(const Graph &g, const WeightMap &weight_map, std::vector<parmcb::SPTree<Graph, WeightMap>> &trees, std::vector<parmcb::CandidateCycle<Graph, WeightMap>> &cycles, bool sorted_cycles) : g(g), weight_map(weight_map), candidate_cycle_builder(g, weight_map), trees(trees), cycles(cycles), sorted_cycles( sorted_cycles) { } std::tuple<std::set<Edge>, WeightType, bool> operator()(const std::set<Edge> &edges) { return compute_shortest_odd_cycle(edges); } private: template<bool is_tbb_enabled = ParallelUsingTBB> std::tuple<std::set<Edge>, WeightType, bool> compute_shortest_odd_cycle(const std::set<Edge> &edges, typename std::enable_if<!is_tbb_enabled>::type* = 0) { for (std::size_t i = 0; i < trees.size(); i++) { trees[i].update_parities(edges); } std::tuple<std::set<Edge>, WeightType, bool> min; for (CandidateCycle<Graph, WeightMap> c : cycles) { std::tuple<std::set<Edge>, WeightType, bool> cc = candidate_cycle_builder(trees, c, edges, std::get<2>(min), std::get<1>(min)); if (std::get<2>(cc)) { if (sorted_cycles) { return cc; } if (!std::get<2>(min)) { min = cc; } else { if (std::get<1>(cc) < std::get<1>(min)) { min = cc; } } } } return min; } template<bool is_tbb_enabled = ParallelUsingTBB> std::tuple<std::set<Edge>, WeightType, bool> compute_shortest_odd_cycle(const std::set<Edge> &edges, typename std::enable_if<is_tbb_enabled>::type* = 0) { tbb::parallel_for(tbb::blocked_range<std::size_t>(0, trees.size()), [&](const tbb::blocked_range<std::size_t> &r) { for (std::size_t i = r.begin(); i != r.end(); ++i) { trees[i].update_parities(edges); } }); std::less<WeightType> compare = std::less<WeightType>(); typedef std::tuple<std::set<Edge>, WeightType, bool> cycle_t; auto cycle_min = [compare](const cycle_t &c1, const cycle_t &c2) { if (!std::get<2>(c1) || !std::get<2>(c2)) { if (std::get<2>(c1)) { return c1; } else { return c2; } } // both valid, compare if (!compare(std::get<1>(c2), std::get<1>(c1))) { return c1; } return c2; }; return tbb::parallel_reduce(tbb::blocked_range<std::size_t>(0, cycles.size()), std::make_tuple(std::set<Edge>(), (std::numeric_limits<WeightType>::max)(), false), [&](tbb::blocked_range<std::size_t> r, auto running_min) { for (std::size_t i = r.begin(); i < r.end(); i++) { auto c = cycles[i]; auto cc = candidate_cycle_builder(trees, c, edges, std::get<2>(running_min), std::get<1>(running_min)); if (std::get<2>(cc)) { if (!std::get<2>(running_min) || compare(std::get<1>(cc), std::get<1>(running_min))) { running_min = cc; } } } return running_min; }, cycle_min); } const Graph &g; const WeightMap &weight_map; const CandidateCycleBuilder<Graph, WeightMap> candidate_cycle_builder; std::vector<parmcb::SPTree<Graph, WeightMap>> &trees; std::vector<parmcb::CandidateCycle<Graph, WeightMap>> &cycles; bool sorted_cycles; }; } // parmcb #endif
State Before: α : Type u_1 E : Type ?u.361138 F : Type ?u.361141 F' : Type ?u.361144 G : Type ?u.361147 𝕜 : Type ?u.361150 p : ℝ≥0∞ inst✝¹⁰ : NormedAddCommGroup E inst✝⁹ : NormedSpace ℝ E inst✝⁸ : NormedAddCommGroup F inst✝⁷ : NormedSpace ℝ F inst✝⁶ : NormedAddCommGroup F' inst✝⁵ : NormedSpace ℝ F' inst✝⁴ : NormedAddCommGroup G m : MeasurableSpace α μ : Measure α G' : Type u_2 G'' : Type u_3 inst✝³ : NormedLatticeAddCommGroup G'' inst✝² : NormedSpace ℝ G'' inst✝¹ : NormedLatticeAddCommGroup G' inst✝ : NormedSpace ℝ G' T : Set α → G' →L[ℝ] G'' hT_nonneg : ∀ (s : Set α), MeasurableSet s → ↑↑μ s < ⊤ → ∀ (x : G'), 0 ≤ x → 0 ≤ ↑(T s) x f : α →ₛ G' hf : 0 ≤ f hfi : Integrable ↑f ⊢ 0 ≤ setToSimpleFunc T f State After: α : Type u_1 E : Type ?u.361138 F : Type ?u.361141 F' : Type ?u.361144 G : Type ?u.361147 𝕜 : Type ?u.361150 p : ℝ≥0∞ inst✝¹⁰ : NormedAddCommGroup E inst✝⁹ : NormedSpace ℝ E inst✝⁸ : NormedAddCommGroup F inst✝⁷ : NormedSpace ℝ F inst✝⁶ : NormedAddCommGroup F' inst✝⁵ : NormedSpace ℝ F' inst✝⁴ : NormedAddCommGroup G m : MeasurableSpace α μ : Measure α G' : Type u_2 G'' : Type u_3 inst✝³ : NormedLatticeAddCommGroup G'' inst✝² : NormedSpace ℝ G'' inst✝¹ : NormedLatticeAddCommGroup G' inst✝ : NormedSpace ℝ G' T : Set α → G' →L[ℝ] G'' hT_nonneg : ∀ (s : Set α), MeasurableSet s → ↑↑μ s < ⊤ → ∀ (x : G'), 0 ≤ x → 0 ≤ ↑(T s) x f : α →ₛ G' hf : 0 ≤ f hfi : Integrable ↑f i : G' hi : i ∈ SimpleFunc.range f ⊢ 0 ≤ ↑(T (↑f ⁻¹' {i})) i Tactic: refine' sum_nonneg fun i hi => _ State Before: α : Type u_1 E : Type ?u.361138 F : Type ?u.361141 F' : Type ?u.361144 G : Type ?u.361147 𝕜 : Type ?u.361150 p : ℝ≥0∞ inst✝¹⁰ : NormedAddCommGroup E inst✝⁹ : NormedSpace ℝ E inst✝⁸ : NormedAddCommGroup F inst✝⁷ : NormedSpace ℝ F inst✝⁶ : NormedAddCommGroup F' inst✝⁵ : NormedSpace ℝ F' inst✝⁴ : NormedAddCommGroup G m : MeasurableSpace α μ : Measure α G' : Type u_2 G'' : Type u_3 inst✝³ : NormedLatticeAddCommGroup G'' inst✝² : NormedSpace ℝ G'' inst✝¹ : NormedLatticeAddCommGroup G' inst✝ : NormedSpace ℝ G' T : Set α → G' →L[ℝ] G'' hT_nonneg : ∀ (s : Set α), MeasurableSet s → ↑↑μ s < ⊤ → ∀ (x : G'), 0 ≤ x → 0 ≤ ↑(T s) x f : α →ₛ G' hf : 0 ≤ f hfi : Integrable ↑f i : G' hi : i ∈ SimpleFunc.range f ⊢ 0 ≤ ↑(T (↑f ⁻¹' {i})) i State After: case pos α : Type u_1 E : Type ?u.361138 F : Type ?u.361141 F' : Type ?u.361144 G : Type ?u.361147 𝕜 : Type ?u.361150 p : ℝ≥0∞ inst✝¹⁰ : NormedAddCommGroup E inst✝⁹ : NormedSpace ℝ E inst✝⁸ : NormedAddCommGroup F inst✝⁷ : NormedSpace ℝ F inst✝⁶ : NormedAddCommGroup F' inst✝⁵ : NormedSpace ℝ F' inst✝⁴ : NormedAddCommGroup G m : MeasurableSpace α μ : Measure α G' : Type u_2 G'' : Type u_3 inst✝³ : NormedLatticeAddCommGroup G'' inst✝² : NormedSpace ℝ G'' inst✝¹ : NormedLatticeAddCommGroup G' inst✝ : NormedSpace ℝ G' T : Set α → G' →L[ℝ] G'' hT_nonneg : ∀ (s : Set α), MeasurableSet s → ↑↑μ s < ⊤ → ∀ (x : G'), 0 ≤ x → 0 ≤ ↑(T s) x f : α →ₛ G' hf : 0 ≤ f hfi : Integrable ↑f i : G' hi : i ∈ SimpleFunc.range f h0 : i = 0 ⊢ 0 ≤ ↑(T (↑f ⁻¹' {i})) i case neg α : Type u_1 E : Type ?u.361138 F : Type ?u.361141 F' : Type ?u.361144 G : Type ?u.361147 𝕜 : Type ?u.361150 p : ℝ≥0∞ inst✝¹⁰ : NormedAddCommGroup E inst✝⁹ : NormedSpace ℝ E inst✝⁸ : NormedAddCommGroup F inst✝⁷ : NormedSpace ℝ F inst✝⁶ : NormedAddCommGroup F' inst✝⁵ : NormedSpace ℝ F' inst✝⁴ : NormedAddCommGroup G m : MeasurableSpace α μ : Measure α G' : Type u_2 G'' : Type u_3 inst✝³ : NormedLatticeAddCommGroup G'' inst✝² : NormedSpace ℝ G'' inst✝¹ : NormedLatticeAddCommGroup G' inst✝ : NormedSpace ℝ G' T : Set α → G' →L[ℝ] G'' hT_nonneg : ∀ (s : Set α), MeasurableSet s → ↑↑μ s < ⊤ → ∀ (x : G'), 0 ≤ x → 0 ≤ ↑(T s) x f : α →ₛ G' hf : 0 ≤ f hfi : Integrable ↑f i : G' hi : i ∈ SimpleFunc.range f h0 : ¬i = 0 ⊢ 0 ≤ ↑(T (↑f ⁻¹' {i})) i Tactic: by_cases h0 : i = 0 State Before: case neg α : Type u_1 E : Type ?u.361138 F : Type ?u.361141 F' : Type ?u.361144 G : Type ?u.361147 𝕜 : Type ?u.361150 p : ℝ≥0∞ inst✝¹⁰ : NormedAddCommGroup E inst✝⁹ : NormedSpace ℝ E inst✝⁸ : NormedAddCommGroup F inst✝⁷ : NormedSpace ℝ F inst✝⁶ : NormedAddCommGroup F' inst✝⁵ : NormedSpace ℝ F' inst✝⁴ : NormedAddCommGroup G m : MeasurableSpace α μ : Measure α G' : Type u_2 G'' : Type u_3 inst✝³ : NormedLatticeAddCommGroup G'' inst✝² : NormedSpace ℝ G'' inst✝¹ : NormedLatticeAddCommGroup G' inst✝ : NormedSpace ℝ G' T : Set α → G' →L[ℝ] G'' hT_nonneg : ∀ (s : Set α), MeasurableSet s → ↑↑μ s < ⊤ → ∀ (x : G'), 0 ≤ x → 0 ≤ ↑(T s) x f : α →ₛ G' hf : 0 ≤ f hfi : Integrable ↑f i : G' hi : i ∈ SimpleFunc.range f h0 : ¬i = 0 ⊢ 0 ≤ ↑(T (↑f ⁻¹' {i})) i State After: case neg α : Type u_1 E : Type ?u.361138 F : Type ?u.361141 F' : Type ?u.361144 G : Type ?u.361147 𝕜 : Type ?u.361150 p : ℝ≥0∞ inst✝¹⁰ : NormedAddCommGroup E inst✝⁹ : NormedSpace ℝ E inst✝⁸ : NormedAddCommGroup F inst✝⁷ : NormedSpace ℝ F inst✝⁶ : NormedAddCommGroup F' inst✝⁵ : NormedSpace ℝ F' inst✝⁴ : NormedAddCommGroup G m : MeasurableSpace α μ : Measure α G' : Type u_2 G'' : Type u_3 inst✝³ : NormedLatticeAddCommGroup G'' inst✝² : NormedSpace ℝ G'' inst✝¹ : NormedLatticeAddCommGroup G' inst✝ : NormedSpace ℝ G' T : Set α → G' →L[ℝ] G'' hT_nonneg : ∀ (s : Set α), MeasurableSet s → ↑↑μ s < ⊤ → ∀ (x : G'), 0 ≤ x → 0 ≤ ↑(T s) x f : α →ₛ G' hf : 0 ≤ f hfi : Integrable ↑f i : G' hi : i ∈ SimpleFunc.range f h0 : ¬i = 0 ⊢ 0 ≤ i Tactic: refine' hT_nonneg _ (measurableSet_fiber _ _) (measure_preimage_lt_top_of_integrable _ hfi h0) i _ State Before: case neg α : Type u_1 E : Type ?u.361138 F : Type ?u.361141 F' : Type ?u.361144 G : Type ?u.361147 𝕜 : Type ?u.361150 p : ℝ≥0∞ inst✝¹⁰ : NormedAddCommGroup E inst✝⁹ : NormedSpace ℝ E inst✝⁸ : NormedAddCommGroup F inst✝⁷ : NormedSpace ℝ F inst✝⁶ : NormedAddCommGroup F' inst✝⁵ : NormedSpace ℝ F' inst✝⁴ : NormedAddCommGroup G m : MeasurableSpace α μ : Measure α G' : Type u_2 G'' : Type u_3 inst✝³ : NormedLatticeAddCommGroup G'' inst✝² : NormedSpace ℝ G'' inst✝¹ : NormedLatticeAddCommGroup G' inst✝ : NormedSpace ℝ G' T : Set α → G' →L[ℝ] G'' hT_nonneg : ∀ (s : Set α), MeasurableSet s → ↑↑μ s < ⊤ → ∀ (x : G'), 0 ≤ x → 0 ≤ ↑(T s) x f : α →ₛ G' hf : 0 ≤ f hfi : Integrable ↑f i : G' hi : i ∈ SimpleFunc.range f h0 : ¬i = 0 ⊢ 0 ≤ i State After: case neg α : Type u_1 E : Type ?u.361138 F : Type ?u.361141 F' : Type ?u.361144 G : Type ?u.361147 𝕜 : Type ?u.361150 p : ℝ≥0∞ inst✝¹⁰ : NormedAddCommGroup E inst✝⁹ : NormedSpace ℝ E inst✝⁸ : NormedAddCommGroup F inst✝⁷ : NormedSpace ℝ F inst✝⁶ : NormedAddCommGroup F' inst✝⁵ : NormedSpace ℝ F' inst✝⁴ : NormedAddCommGroup G m : MeasurableSpace α μ : Measure α G' : Type u_2 G'' : Type u_3 inst✝³ : NormedLatticeAddCommGroup G'' inst✝² : NormedSpace ℝ G'' inst✝¹ : NormedLatticeAddCommGroup G' inst✝ : NormedSpace ℝ G' T : Set α → G' →L[ℝ] G'' hT_nonneg : ∀ (s : Set α), MeasurableSet s → ↑↑μ s < ⊤ → ∀ (x : G'), 0 ≤ x → 0 ≤ ↑(T s) x f : α →ₛ G' hf : 0 ≤ f hfi : Integrable ↑f i : G' hi : i ∈ Set.range ↑f h0 : ¬i = 0 ⊢ 0 ≤ i Tactic: rw [mem_range] at hi State Before: case neg α : Type u_1 E : Type ?u.361138 F : Type ?u.361141 F' : Type ?u.361144 G : Type ?u.361147 𝕜 : Type ?u.361150 p : ℝ≥0∞ inst✝¹⁰ : NormedAddCommGroup E inst✝⁹ : NormedSpace ℝ E inst✝⁸ : NormedAddCommGroup F inst✝⁷ : NormedSpace ℝ F inst✝⁶ : NormedAddCommGroup F' inst✝⁵ : NormedSpace ℝ F' inst✝⁴ : NormedAddCommGroup G m : MeasurableSpace α μ : Measure α G' : Type u_2 G'' : Type u_3 inst✝³ : NormedLatticeAddCommGroup G'' inst✝² : NormedSpace ℝ G'' inst✝¹ : NormedLatticeAddCommGroup G' inst✝ : NormedSpace ℝ G' T : Set α → G' →L[ℝ] G'' hT_nonneg : ∀ (s : Set α), MeasurableSet s → ↑↑μ s < ⊤ → ∀ (x : G'), 0 ≤ x → 0 ≤ ↑(T s) x f : α →ₛ G' hf : 0 ≤ f hfi : Integrable ↑f i : G' hi : i ∈ Set.range ↑f h0 : ¬i = 0 ⊢ 0 ≤ i State After: case neg.intro α : Type u_1 E : Type ?u.361138 F : Type ?u.361141 F' : Type ?u.361144 G : Type ?u.361147 𝕜 : Type ?u.361150 p : ℝ≥0∞ inst✝¹⁰ : NormedAddCommGroup E inst✝⁹ : NormedSpace ℝ E inst✝⁸ : NormedAddCommGroup F inst✝⁷ : NormedSpace ℝ F inst✝⁶ : NormedAddCommGroup F' inst✝⁵ : NormedSpace ℝ F' inst✝⁴ : NormedAddCommGroup G m : MeasurableSpace α μ : Measure α G' : Type u_2 G'' : Type u_3 inst✝³ : NormedLatticeAddCommGroup G'' inst✝² : NormedSpace ℝ G'' inst✝¹ : NormedLatticeAddCommGroup G' inst✝ : NormedSpace ℝ G' T : Set α → G' →L[ℝ] G'' hT_nonneg : ∀ (s : Set α), MeasurableSet s → ↑↑μ s < ⊤ → ∀ (x : G'), 0 ≤ x → 0 ≤ ↑(T s) x f : α →ₛ G' hf : 0 ≤ f hfi : Integrable ↑f i : G' hi : i ∈ Set.range ↑f h0 : ¬i = 0 y : α hy : ↑f y = i ⊢ 0 ≤ i Tactic: obtain ⟨y, hy⟩ := Set.mem_range.mp hi State Before: case neg.intro α : Type u_1 E : Type ?u.361138 F : Type ?u.361141 F' : Type ?u.361144 G : Type ?u.361147 𝕜 : Type ?u.361150 p : ℝ≥0∞ inst✝¹⁰ : NormedAddCommGroup E inst✝⁹ : NormedSpace ℝ E inst✝⁸ : NormedAddCommGroup F inst✝⁷ : NormedSpace ℝ F inst✝⁶ : NormedAddCommGroup F' inst✝⁵ : NormedSpace ℝ F' inst✝⁴ : NormedAddCommGroup G m : MeasurableSpace α μ : Measure α G' : Type u_2 G'' : Type u_3 inst✝³ : NormedLatticeAddCommGroup G'' inst✝² : NormedSpace ℝ G'' inst✝¹ : NormedLatticeAddCommGroup G' inst✝ : NormedSpace ℝ G' T : Set α → G' →L[ℝ] G'' hT_nonneg : ∀ (s : Set α), MeasurableSet s → ↑↑μ s < ⊤ → ∀ (x : G'), 0 ≤ x → 0 ≤ ↑(T s) x f : α →ₛ G' hf : 0 ≤ f hfi : Integrable ↑f i : G' hi : i ∈ Set.range ↑f h0 : ¬i = 0 y : α hy : ↑f y = i ⊢ 0 ≤ i State After: case neg.intro α : Type u_1 E : Type ?u.361138 F : Type ?u.361141 F' : Type ?u.361144 G : Type ?u.361147 𝕜 : Type ?u.361150 p : ℝ≥0∞ inst✝¹⁰ : NormedAddCommGroup E inst✝⁹ : NormedSpace ℝ E inst✝⁸ : NormedAddCommGroup F inst✝⁷ : NormedSpace ℝ F inst✝⁶ : NormedAddCommGroup F' inst✝⁵ : NormedSpace ℝ F' inst✝⁴ : NormedAddCommGroup G m : MeasurableSpace α μ : Measure α G' : Type u_2 G'' : Type u_3 inst✝³ : NormedLatticeAddCommGroup G'' inst✝² : NormedSpace ℝ G'' inst✝¹ : NormedLatticeAddCommGroup G' inst✝ : NormedSpace ℝ G' T : Set α → G' →L[ℝ] G'' hT_nonneg : ∀ (s : Set α), MeasurableSet s → ↑↑μ s < ⊤ → ∀ (x : G'), 0 ≤ x → 0 ≤ ↑(T s) x f : α →ₛ G' hf : 0 ≤ f hfi : Integrable ↑f i : G' hi : i ∈ Set.range ↑f h0 : ¬i = 0 y : α hy : ↑f y = i ⊢ 0 ≤ ↑f y Tactic: rw [← hy] State Before: case neg.intro α : Type u_1 E : Type ?u.361138 F : Type ?u.361141 F' : Type ?u.361144 G : Type ?u.361147 𝕜 : Type ?u.361150 p : ℝ≥0∞ inst✝¹⁰ : NormedAddCommGroup E inst✝⁹ : NormedSpace ℝ E inst✝⁸ : NormedAddCommGroup F inst✝⁷ : NormedSpace ℝ F inst✝⁶ : NormedAddCommGroup F' inst✝⁵ : NormedSpace ℝ F' inst✝⁴ : NormedAddCommGroup G m : MeasurableSpace α μ : Measure α G' : Type u_2 G'' : Type u_3 inst✝³ : NormedLatticeAddCommGroup G'' inst✝² : NormedSpace ℝ G'' inst✝¹ : NormedLatticeAddCommGroup G' inst✝ : NormedSpace ℝ G' T : Set α → G' →L[ℝ] G'' hT_nonneg : ∀ (s : Set α), MeasurableSet s → ↑↑μ s < ⊤ → ∀ (x : G'), 0 ≤ x → 0 ≤ ↑(T s) x f : α →ₛ G' hf : 0 ≤ f hfi : Integrable ↑f i : G' hi : i ∈ Set.range ↑f h0 : ¬i = 0 y : α hy : ↑f y = i ⊢ 0 ≤ ↑f y State After: no goals Tactic: convert hf y State Before: case pos α : Type u_1 E : Type ?u.361138 F : Type ?u.361141 F' : Type ?u.361144 G : Type ?u.361147 𝕜 : Type ?u.361150 p : ℝ≥0∞ inst✝¹⁰ : NormedAddCommGroup E inst✝⁹ : NormedSpace ℝ E inst✝⁸ : NormedAddCommGroup F inst✝⁷ : NormedSpace ℝ F inst✝⁶ : NormedAddCommGroup F' inst✝⁵ : NormedSpace ℝ F' inst✝⁴ : NormedAddCommGroup G m : MeasurableSpace α μ : Measure α G' : Type u_2 G'' : Type u_3 inst✝³ : NormedLatticeAddCommGroup G'' inst✝² : NormedSpace ℝ G'' inst✝¹ : NormedLatticeAddCommGroup G' inst✝ : NormedSpace ℝ G' T : Set α → G' →L[ℝ] G'' hT_nonneg : ∀ (s : Set α), MeasurableSet s → ↑↑μ s < ⊤ → ∀ (x : G'), 0 ≤ x → 0 ≤ ↑(T s) x f : α →ₛ G' hf : 0 ≤ f hfi : Integrable ↑f i : G' hi : i ∈ SimpleFunc.range f h0 : i = 0 ⊢ 0 ≤ ↑(T (↑f ⁻¹' {i})) i State After: no goals Tactic: simp [h0]
[STATEMENT] lemma deg_of_strict_mono[simp]: "deg_of x < deg_of y \<longleftrightarrow> x < y" [PROOF STATE] proof (prove) goal (1 subgoal): 1. (deg_of x < deg_of y) = (x < y) [PROOF STEP] using rad_of_strict_monoI [PROOF STATE] proof (prove) using this: ?x < ?y \<Longrightarrow> rad_of ?x < rad_of ?y goal (1 subgoal): 1. (deg_of x < deg_of y) = (x < y) [PROOF STEP] by (fastforce intro!: deg_of_strict_monoI)
lemma homeomorphic_locally: fixes S:: "'a::metric_space set" and T:: "'b::metric_space set" assumes hom: "S homeomorphic T" and iff: "\<And>X Y. X homeomorphic Y \<Longrightarrow> (P X \<longleftrightarrow> Q Y)" shows "locally P S \<longleftrightarrow> locally Q T"
Formal statement is: lemma filterlim_at_split: "filterlim f F (at x) \<longleftrightarrow> filterlim f F (at_left x) \<and> filterlim f F (at_right x)" for x :: "'a::linorder_topology" Informal statement is: The limit of a function $f$ at a point $x$ is the same as the limit of $f$ from the left and from the right at $x$.
# # Read("~/Workspace/groupsSB/epi/group.gi"); # # # needs: # type:="A"; # rank:=2; # nr_pos_roots:=3; # ZZ:=Integers; avarnames:=List([1..100],i->Concatenation("a_{",String(i),"}")); bvarnames:=List([1..100],i->Concatenation("b_{",String(i),"}")); cvarnames:=List([1..100],i->Concatenation("c_{",String(i),"}")); xvarnames:=List([1..100],i->Concatenation("x_{",String(i),"}")); varnames:=Concatenation(avarnames,bvarnames,cvarnames,xvarnames); APR:=PolynomialRing(ZZ,varnames); vars:=IndeterminatesOfPolynomialRing(APR); xvars:=vars{[301..400]}; sla:=SimpleLieAlgebraTypeA_G(type,rank,APR); cb:=CanonicalBasis(sla); e:=cb[1]; id_mat:=DiagonalMat(List([1..2*nr_pos_roots+rank],i->1)); ade:=function(e) local result,v; result:=[]; for v in cb do Append(result,[Coefficients(cb,e*v)]); od; result:=TransposedMat(result); return result; end; root_group:=function(index,t) local ee,tmp,result,i; ee:=ade(cb[index]); tmp:=ee; result:=One(APR)*tmp^0; i:=1; while Length(Set(Concatenation(tmp)))<>1 do result:=result+t^i*tmp/Factorial(i); i:=i+1; tmp:=tmp*ee; od; return result; end; #u1a1:=root_group(1,vars[1]); pos_root_groups:=function(start_a_index) return List([1..nr_pos_roots],i->root_group(i,vars[start_a_index+i])); end; generic_U:=function(start_a_index) local Uas; Uas:=pos_root_groups(start_a_index); return Product(Uas); end; Ua:=generic_U(0); Ub:=generic_U(10); Uc:=generic_U(20); # # # evaluate_U:=function(u,vals) local i,j,result,v; result := []; for i in [1..Length(u)] do Append(result,[[1..Length(u)]]); for j in [1..Length(u)] do result[i][j]:=u[i][j]; od; od; Print(result); for i in [1..Length(u)] do for j in [1..Length(u)] do for v in vals do result[i][j]:=One(APR)*Value(One(APR)*result[i][j],v[1],v[2]); od; od; od; return result; end; evaluate_U:=function(u,vals) local i,j,result,v; result := []; for i in [1..Length(u)] do Append(result,[[1..Length(u)]]); for j in [1..Length(u)] do result[i][j]:=u[i][j]; od; od; Print(result); for i in [1..Length(u)] do for j in [1..Length(u)] do for v in vals do result[i][j]:=One(APR)*Value(One(APR)*result[i][j],v[1],v[2]); od; od; od; return result; end; evaluate_rels:=function(rels,vals) local i,result,v; result :=List([1..Length(rels)],i->rels[i]); for i in [1..Length(rels)] do for v in vals do #Print(Length(rels),": ",rels[i],"\n"); result[i]:=One(APR)*Value(result[i],v[1],v[2]); #nn[i]:=One(APR)*Value(nn[i],v[1],v[2]); od; od; return result; end;
\documentclass{acm_proc_article-sp} \usepackage[utf8]{inputenc} \renewcommand{\paragraph}[1]{\vskip 6pt\noindent\textbf{#1 }} \usepackage{hyperref} \usepackage{graphicx} \usepackage{url} \providecommand{\tightlist}{% \setlength{\itemsep}{0pt}\setlength{\parskip}{0pt}} \title{ShinyPET: A Predictive, Exploratory and Text RShiny Application using Airbnb data} % Add imagehandling \numberofauthors{2} \author{ \alignauthor Ang Su Yiin \\ \affaddr{Singapore Management University}\\ \email{\href{mailto:[email protected]}{\nolinkurl{[email protected]}}} \and \alignauthor Joey Chua \\ \affaddr{Singapore Management University}\\ \email{\href{mailto:[email protected]}{\nolinkurl{[email protected]}}} \and \alignauthor Kevin Gunawan Albindo \\ \affaddr{Singapore Management University}\\ \email{\href{mailto:[email protected]}{\nolinkurl{[email protected]}}} \and } \date{} %Remove copyright shit \permission{} \conferenceinfo{} {} \CopyrightYear{} \crdata{} % Pandoc syntax highlighting % Pandoc citation processing \newlength{\csllabelwidth} \setlength{\csllabelwidth}{3em} \newlength{\cslhangindent} \setlength{\cslhangindent}{1.5em} % for Pandoc 2.8 to 2.10.1 \newenvironment{cslreferences}% {}% {\par} % For Pandoc 2.11+ \newenvironment{CSLReferences}[3] % #1 hanging-ident, #2 entry spacing {% don't indent paragraphs \setlength{\parindent}{0pt} % turn on hanging indent if param 1 is 1 \ifodd #1 \everypar{\setlength{\hangindent}{\cslhangindent}}\ignorespaces\fi % set entry spacing \ifnum #2 > 0 \setlength{\parskip}{#2\baselineskip} \fi }% {} \usepackage{calc} % for calculating minipage widths \newcommand{\CSLBlock}[1]{#1\hfill\break} \newcommand{\CSLLeftMargin}[1]{\parbox[t]{\csllabelwidth}{#1}} \newcommand{\CSLRightInline}[1]{\parbox[t]{\linewidth - \csllabelwidth}{#1}} \newcommand{\CSLIndent}[1]{\hspace{\cslhangindent}#1} \usepackage{graphicx} \usepackage{float} \usepackage{caption} \captionsetup{skip=1pt} \setlength{\textfloatsep}{2pt} \setlength{\intextsep}{2pt} \begin{document} \maketitle \begin{abstract} The increasing availability of data has resulted in the increased demand for data driven decisions. Although there is an extensive range of commercial statistical tools, they are often subscription-based and demand good technical knowledge to mine and draw insights from. Therefore, it may not appeal to the average user. Using a collection of R packages available, ShinyPET, an R-Shiny application is developed for the average user to perform exploratory and confirmatory analysis, text mining and predictive analysis, as well as to formulate insights and make data-driven decisions. Airbnb data provides a baseline for this application as the data generated is rich in information, consisting of structured, unstructured, and location data. This paper discusses the design framework, use case and future works of the ShinyPET dashboard. \end{abstract} \emph{Keywords} - Airbnb, Exploratory Analysis, Confirmatory Analysis, Text Mining, Predictive Analytics, Decision Making, R Shiny, Interactive Data Visualisation. \hypertarget{introduction}{% \section{Introduction}\label{introduction}} With increasing affordable data storage and processing technologies, the demand for data-driven decision-making (DDDM)\footnote{DDDM refers to the systematic analysis, examination and integration of data to making strategic decisions, rather than based on intuition or observation alone (Mandinach, 2012) {[}\protect\hyperlink{ref-doi:10.1080ux2f00461520.2012.667064}{5}{]}} has increased significantly. As Geoffrey Moore opines, ``Without big data analytics, companies are blind and deaf, wandering out onto the Web like deer on a freeway.'' With the use of data driven decision making through analytics tools, firms performance would improve (Yasmin, M et al., 2020) {[}\protect\hyperlink{ref-https:ux2fux2fdoi.orgux2f10.1016ux2fj.jbusres.2020.03.028}{8}{]} Airbnb is an online vacation rental marketplace servicing a community of hosts and travellers. By 2020, Airbnb has millions of listings in over 220 counties and regions across 100,000 cities {[}\protect\hyperlink{ref-airbnb2021}{1}{]}. The data generated provides rich information, including structured data e.g.~price and location, as well as unstructured data e.g.~reviews and listing descriptions. Thus, Airbnb provides a good use case and base case for exploratory and confirmatory analysis, text mining, and predictive modeling as presented in our ShinyPET dashboard. \hypertarget{motivation-of-the-application}{% \section{Motivation of the application}\label{motivation-of-the-application}} The motivation of this project stems from two main issues - the proliferation of data and lack of user-friendly open source tools to make data-driven decision. According to Harris (2012) {[}\protect\hyperlink{ref-harris_2014}{2}{]}, data is impractical without the ability to analyse it. Although there is a wide range of commercial statistics and analytics tools, these tools are often subscription-based and require technical knowledge to mine and draw insights from. On the other hand, while open source tools as such Python and R allow for data visualisations, users would require extensive programming background to generate such insights. Hence, this project aims to develop an interface which is concise, interactive, and user-friendly using R Shiny. With this interface, data-based decisions can be made from the interactive GUI. The R Shiny App will cover 3 modules: 1) Exploratory - users are able to draw interesting patterns based on selected variables, which are augmented by statistical tests based on the chosen variables. 2) Text - users are able to perform analysis on textual data such as reviews to generate quantitative insights.\\ 3) Predictive - users are able to prepare and build a variety of prediction models without the need to have in-depth understanding of predictive models and their algorithms.\\ This application can be extended to Airbnb data from other countries, and also to other datasets. \hypertarget{review-and-critic-on-past-works}{% \section{Review and critic on past works}\label{review-and-critic-on-past-works}} Radiant application {[}\protect\hyperlink{ref-radiant2019}{6}{]}, an open-source platform-independent browser-based interface for business analytics in R, illustrates the robustness of Rshiny for web-based application. Developed to promote quick and reproducible data analytics, the application provides interactivity and flexibility in performing visualisation, statistical and predictive analysis. However, there are limitations to the application. First, in terms of exploratory data analysis, most of the plots produced are of static nature which can be enhanced by wrapping plotly around them. Secondly, for statistical testing, users are expected to have a basic understanding of statistical testing methods as they are first required to select their testing method, which can be further enhanced by automating testing methods based on inputs. In addition, newer packages such as visNetwork can be applied for interactive tree visualisation that in turn improves the assessment of decision tree model. Lastly, the statistical testing and charts are placed in separate tabs. In terms of visualisation, a single-page view would enhance the aesthetics and usability. Text Mining with R Book {[}\protect\hyperlink{ref-robinson}{7}{]} authored by Silge and Robinson presents a comprehensive approach to handle text. First, the book is content-heavy, which may not be appealing to the typical users. Second, tidytext used for data wrangling and visualisation is widely used, thus allowing users to apply such methods easily. However, these tools require technical skills from users and are not interactive. To allow easy usage and enhance interactivity, packages such as plotly and highchart can be used. Highcharter has various themes and features such as tooltips which greatly enhance visualisation. Tidymodels {[}\protect\hyperlink{ref-tidymodels2020}{3}{]} has gained interest by providing a framework for predictive modeling and machine learning. It is aligned with the tidyverse principles which leads to a tidier and consistent grammar in the predictive analytics process. Different models offered in Radiant package are also available for implementation in Tidymodels framework, which is why our application leverages Tidymodel as the main framework to conduct predictive analytics on Airbnb data. Lu, Y., Garcia, R., Hansen, B. et al.~(2017) {[}\protect\hyperlink{ref-https:ux2fux2fdoi.orgux2f10.1111ux2fcgf.13210}{4}{]} provides a comprehensive summary of research on Predictive Visual Analytics. The paper discusses how visual analytics systems are implemented to support predictive analytics process such as feature selection, incremental learning, model comparison and result exploration. The overall goal of visual analytics is to support explanation in each step of predictive analytics exercise which is also our motivation in developing this application. \hypertarget{design-framework}{% \section{Design framework}\label{design-framework}} The design of our shinyPET is based on ensuring a comprehensive data analysis coupled with aesthetics. Taking into account the user point of view, 3 main principles, namely user-friendliness, interactivity and ease of understanding are adopted. To get started, the introduction page provides an overview of the application. This allow users to have an understanding of the case that he/she will be exploring. In the exploratory module (section 4.1), data summary and their tabular form would be presented for user's understanding of data. In the explore sub tab, users are able to visualise provided data based on various variables. This user-friendliness and interactivity provides flexibility and ease of use without needing any technical knowledge. In the text module (section 4.2), various text mining technique tools are presented. Each sub tab utilises visusaliation tools like word clouds and topic modeling to simplify concepts in natural language processing. This aids in understanding of the unstructured data provided. In the predictive module (section 4.3), various predictive models are available for user selection. The sub tabs guide users through the process from data sampling to final model selection. Visualisation and interactivity are embedded through graphs and user input menu to provide an organised workflow of predictive analytics. Aesthetically, the application's colour scheme should be based on the theme of the topic. Using our case of Airbnb, the official colours are Raush, Babu and Foggy (type of gray). The combination of the 3 principles are consistently incorporated into various steps of the data analysis in the 3 modules, hence providing users an easy and comprehensive way to make data-driven decisions. \hypertarget{exploratory-module}{% \subsection{Exploratory module}\label{exploratory-module}} The exploratory module enables users to perform Exploratory Data Analysis (EDA) and Confirmatory Data Analysis (CDA) on selected variables to identify interesting patterns. There are three sections in this module - observe, map and confirm \& explore. \vspace{-5truemm} \hypertarget{observe-submodule}{% \subsubsection{Observe submodule}\label{observe-submodule}} In Figure 1, the Observe section provides a summary of the data to facilitate users to understand and form questions surrounding the data. \begin{figure}[H] {\centering \includegraphics[width=1\linewidth]{images/design_observe} } \caption{Interface and components of Observe section}\label{fig:unnamed-chunk-1} \end{figure} 2 main components are presented in this section. The first component is the 4 boxes at the top of the page, which present an overview of the data by showing the number of variables, observations and data types. The second component is the summary of each variable according to their data types in a tabular format. Users are able to use the search boxes to filter data, and use the arrow icons to sort data. Hence, these components incorporate both ease of understanding and interactivity. \hypertarget{map-submodule}{% \subsubsection{Map submodule}\label{map-submodule}} Figure 2 shows the Map section that allows user to explore the geographic patterns through thematic maps. The maps are designed based on the three principles stated above and partially on Shneiderman's interactive dynamics principle of ``overview, zoom and filter, then details on demand.'' `zoom and filter' portion were not used as they are not applicable to this dataset. \begin{figure}[H] {\centering \includegraphics[width=1\linewidth]{images/design_map} } \caption{Interface and components of Map section}\label{fig:unnamed-chunk-2} \end{figure} There are 2 main components to submodule - the map which consist of bubble map and cloropleth map, and the table, which provides details of the map. These maps are chosen as they are deemed to fit the understanding of the data. For instance, price and review scores are used to show the price and score ranges spread shown by the intensity of the colour. \hypertarget{explore-and-confirm-submodule}{% \subsubsection{Explore and confirm submodule}\label{explore-and-confirm-submodule}} Figure 3 shows the EDA and CDA submodule for users to explore and perform inferential statistics based on the section 4.1.1 and 4.1.2. \begin{figure}[H] {\centering \includegraphics[width=1\linewidth]{images/design_explore1} } \caption{Interface and components of Explore and Confirm section}\label{fig:unnamed-chunk-3} \end{figure} There are 3 main components - the selection input on the left, the statistical results and the chart. The selection input with drop-down list allow users to customise charts shown. The application provides for 4 types of chart namely: distribution, mosaic, boxplot and scatter plot. Based on the selection input, drop-down menus for variables will be altered accordingly. For example, if `Distribution' chart was selected, only the x-variable drop-down input will be shown. The chart was designed based on Shneiderman's interactive dynamics of highlight, filter or manipulate. This graphs allows users to manipulate views by selecting an object in a plot, highlighting selected records and defining a region on the graph. Furthermore, the plotted charts can be downloaded for users to communicate their findings. An example of output is shown in figure 4. \begin{figure}[H] {\centering \includegraphics[width=1\linewidth]{images/design_explore2} } \caption{Graph's manipulation function of the Explore and Confirm section}\label{fig:unnamed-chunk-4} \end{figure} As the application is tailored towards users that are not well-versed in statistics, the statistical tests where test methods and results are automated based on the selected variables, are easy to understand. An interactive slider is also provided for user to easily adjust statistical results. \hypertarget{text-module}{% \subsection{Text module}\label{text-module}} The text module utilises various text mining techniques to transform unstructured text i.e.~reviews into structured format to identify patterns and bring about meaningful insights. Prior to application of text mining techniques, text preprocessing has to be carried out. This involves the use of tokenisation, stemming and lematisation. Tokenisation is the process of splitting a column of reviews into tokens such that they are flattened into the format of one-token-per-row. Stemming is the process of separating the prefixes and suffixes from words to derive the root word form and meaning. Stemming algorithms work by cutting off the end or the beginning of the word, taking into account a list of common prefixes and suffixes that can be found in an inflected word. Lemmatization, on the other hand, takes into consideration the morphological analysis of the words. \hypertarget{token-frequency-submodule}{% \subsubsection{Token frequency submodule}\label{token-frequency-submodule}} To visualise token frequency, wordcloud is used. Worldcloud provides an easy way to show how frequent a word appears in a corpus. In wordcloud, the size of a word indicates how frequent the word appears in a given text. Other than considering words as individual units, ``ngrams'' are also used to tokenise pairs of adajacent words. ngrams provide context in sentiment analysis. For instance, while the word ``happy'' can be positive, in a sentence which containts the words ``not happy'' would mean otherwise. Hence, performing sentiment analysis on bigram allow us to examine sentiment-associated words. \begin{figure}[H] {\centering \includegraphics[width=1\linewidth]{images/tokenfrequency} } \caption{Token Frequency}\label{fig:unnamed-chunk-5} \end{figure} There are two components: on the left is the wordcloud, and on the right is the bar chart the ranks the frequency of word in descending order. From the chart, it can be observed that the words ``clean,'' ``stay,'' ``location,'' and ``nice'' occurred most frequently. When ``bi-gram'' is chosen, the wordcloud and bar chart changes accordingly. Tooltip to show the number of times the word occurred when hover over allows users to have understanding of the data. Hence, ease of understanding and interactivity are prominent. \hypertarget{sentiment-analysis-submodule}{% \subsubsection{Sentiment analysis submodule}\label{sentiment-analysis-submodule}} In this sub module, 3 dictionaries were used to plot wordcloud that shows both the frequency and sentiments. AFINN lexicon measures sentiment with a numeric score between -5 to 5, BING categorises words as either positive or negative, and NRC categorises words into 8 basic emotions (anger, fear, anticipation, trust, surprise, sadness, joy, and disgust) and 2 sentiments (negative and positive). \begin{figure}[H] {\centering \includegraphics[width=1\linewidth]{images/bingsentiment} } \caption{Sentiment Analysis}\label{fig:unnamed-chunk-6} \end{figure} Users can select the various lexicons to view the wordcloud and related charts such as bar charts and radial chart. For AFINN and BING, bar charts are plotted to show the spread and weightage of sentiments. For NRC, a radial plot to show the tendency for reviews to lean towards. \hypertarget{topic-modelling-submodule}{% \subsubsection{Topic modelling submodule}\label{topic-modelling-submodule}} Latent Dirichlet allocation (LDA) is an example of topic modeling algorithm, based on 2 principles: First, every document is a mixture of topics. Second, every topic is a mixture of words. \begin{figure}[H] {\centering \includegraphics[width=1\linewidth]{images/topicmodelling} } \caption{Topic Modelling}\label{fig:unnamed-chunk-7} \end{figure} For flexibility, slider is incorporated to allow users to choose the number of topics and top terms of the topic. As the loading time is long, a ``Go'' button is included for users to proceed. Subsequently, 2 components are shown. First, the intertopic distance map that uses multidimensional scaling algorithm to plot the topics that have words in common. Second, the bar chart on the right shows salient terms. THe bars exhibit the total frequency of the term. \hypertarget{correlation-network-submodule}{% \subsubsection{Correlation network submodule}\label{correlation-network-submodule}} Word occurrences and correlations are commonly used to identify family of words. \begin{figure}[H] {\centering \includegraphics[width=1\linewidth]{images/correlationnetwork} } \caption{Network Analysis}\label{fig:unnamed-chunk-8} \end{figure} There are 2 options. First the is Bi-directional Network graph. Second is the Correlation Network graph. These graphs show how words relate to each other. \hypertarget{predictive-module}{% \subsection{Predictive module}\label{predictive-module}} The predictive module design framework follows the Tidymodels framework for data pre-processing, model training, tuning, and validation. On top of that, feature selections are supported by other R packages such as ggcorplot (for correlation matrix), ranger and Boruta (for feature importance). The visualisations and interactivities are embedded in each step of predictive analytics as explained below. \hypertarget{data-sampling-submodule}{% \subsubsection{Data sampling submodule}\label{data-sampling-submodule}} In this submodule, selection of training-test split proportion provides user with flexibility in deciding how to spend data budget on the model development process. The distribution plot between training and test set displayed highlights potential biases in the training data set. \begin{figure}[H] {\centering \includegraphics[width=1\linewidth]{images/datasplit} } \caption{Data sampling and distribution plot}\label{fig:unnamed-chunk-9} \end{figure} \hypertarget{feature-selection-submodule}{% \subsubsection{Feature selection submodule}\label{feature-selection-submodule}} To support user with selection of variables, correlation matrix with customised correlation type and p-value criteria are provided. In addition, variable importance score from 2 different methods highlight useful predictors towards response variable. \begin{figure}[H] {\centering \includegraphics[width=1\linewidth]{images/featselect} } \caption{Correlation matrix and variable importance}\label{fig:unnamed-chunk-10} \end{figure} \hypertarget{data-transformation-submodule}{% \subsubsection{Data transformation submodule}\label{data-transformation-submodule}} Prior to training, transformation steps are performed using recipe package in Tidymodel. Following transformation steps, plot between pre and post processing step is added to increase user awareness on what transformation steps are performed and on which variables. \begin{figure}[H] {\centering \includegraphics[width=1\linewidth]{images/recipetrf} } \caption{Data transformation steps}\label{fig:unnamed-chunk-11} \end{figure} \hypertarget{model-training-submodule}{% \subsubsection{Model training submodule}\label{model-training-submodule}} In this sub module, user can select from 5 different types of predictive models for training. For linear regression model, coefficient estimates are shown with option to filter important variables based on p-value. For decision tree training result, visNetwork package is used for its decision tree plot to improve result evaluation. \begin{figure}[H] {\centering \includegraphics[width=1\linewidth]{images/mdltrn} } \caption{Training result evaluation}\label{fig:unnamed-chunk-12} \end{figure} Following, trained model is assessed using test set (unseen data) which is in turn assessed by plotting the actual and predicted value on an Rsquare plot. Table of metric performances such as root mean square, mean absolute error, and Rsquare value is also displayed \begin{figure}[H] {\centering \includegraphics[width=1\linewidth]{images/mdleval} } \caption{Validation result evaluation}\label{fig:unnamed-chunk-13} \end{figure} For linear regression model, users have an option to further evaluate cases with high prediction error. Training set distribution plot is overlapped with predicted values, which may highlight outliers in the predictors. Users can also choose to show N number of top predictor based on p-value score to be displayed. \begin{figure}[H] {\centering \includegraphics[width=1\linewidth]{images/prederror} } \caption{Prediction error assessment}\label{fig:unnamed-chunk-14} \end{figure} Tree based model and generalised linear model use cross validation training set to tune the model's hyper-parameter. Plot of model performance using different hyper-parameters settings are available for user to understand the change in performance. \begin{figure}[H] {\centering \includegraphics[width=1\linewidth]{images/hypartune} } \caption{Hyper-parameter tuning result}\label{fig:unnamed-chunk-15} \end{figure} \hypertarget{model-selection-submodule}{% \subsubsection{Model selection submodule}\label{model-selection-submodule}} In the final submodule, all trained and validated models are gathered and their metrics are compared for user to choose the final model. Once selected, user will be able to provide new input variables to each predictor and get the response variable using the selected model. \begin{figure}[H] {\centering \includegraphics[width=1\linewidth]{images/mdlcompare} } \caption{Models performance comparison}\label{fig:unnamed-chunk-16} \end{figure} The combination of these three modules, along with its interactivity and usability would empower users to make data-driven decisions based on the insights generated. \hypertarget{case-study-airbnb-singapore}{% \section{Case Study : Airbnb Singapore}\label{case-study-airbnb-singapore}} InsideAirbnb provides tools and data for users to explore Airbnb. 2 files dated 27 January 2021 were used: (1) listing.csv.gz: This dataset consists of 74 variables and 4256 data points (2) reviews.csv.gz: This dataset provides 6 variables and 52368 data points. Our application can be used from both the perspective of hosts and guests. \textbf{Hosts}: In 2014, Airbnb launched the Superhost programme to reward hosts with outstanding hospitality. As a Superhost, one will have better earnings, more visibility, and are able to earn exclusive rewards such as increased earnings compared to regular hosts. To become a Superhost, these are the criteria to be met: - 4.8 or higher overall rating based on reviews - Completed at least 10 stays in the past year or 100 nights over at least 3 completed stays - Less than 1\% cancellation rate, not including extenuating circumstances - Responds to 90\% of new messages within 24 hours \textbf{Guests}: With over 60,000 members and 6,000 properties listed on Airbnb website, a dilemma on which is the right space might be of concern to users. Various modules in our dashboard will allow both types of users to analyse Airbnb data according to their needs. In order to reduce the loading time of the application, the datasets were preprocess and only the cleaned datasets were loaded. In addition, redundant variables such as listing id, url id, were removed. \hypertarget{geographical-distribution-of-airbnbs}{% \subsection{Geographical distribution of Airbnbs}\label{geographical-distribution-of-airbnbs}} The point symbol map reveals that the distribution of Airbnb listings in Singapore has high concentration in the central regions. The 4 distinct hotspots are (1) Geylang/Kallang, (2) Lavender/ Rochor / Bugis, (3) Orchard and (4) Chinatown. Area (3) and (4) are mainly tourist areas - (3) Orchard is the main shopping belt of Singapore and (4) Chinatown retains significant historical and cultural landmarks. Areas (1) and (2) are popular given their low prices per person (see choropleth map on the right) while staying relatively close to central. Additionally, Airbnb listings tend to be located along the MRT station track, which could signify the use of public transports are generally more popular for Airbnb guests. \begin{figure}[H] {\centering \includegraphics[width=1\linewidth]{images/usecase_explore} } \caption{Point symbol map on the left, choropleth on the right}\label{fig:unnamed-chunk-17} \end{figure} With this information, potential Airbnb investors can identify highly saturated areas and the average price per person of that area, which could help in estimatation of their investment yield prior to commitment of investments. \hypertarget{distribution-of-review-score-rating}{% \subsection{Distribution of review score rating}\label{distribution-of-review-score-rating}} The overall Airbnb listing review score - `review\_scores\_rating,' is capped at 100 and has a left-skewed distribution. This suggests that most guests tend to post positive reviews, or listings with low ratings tend to be delisted/exited from Airbnb market. \begin{figure}[H] {\centering \includegraphics[width=1\linewidth]{images/usecase_explore2} } \caption{Distribution of review score rating}\label{fig:unnamed-chunk-18} \end{figure} \hypertarget{confirmatory-analysis-of-superhost-and-review-scores-hosting}{% \subsection{Confirmatory analysis of superhost and review scores hosting}\label{confirmatory-analysis-of-superhost-and-review-scores-hosting}} The chart below suggests that listings with the superhost status tend to have higher review scores as indicated the statistical test results where p-value is less than alpha of 0.05. \begin{figure}[H] {\centering \includegraphics[width=1\linewidth]{images/usecase_explore4} } \caption{Exploratory and Confirmatory Analysis on superhost status and review scores}\label{fig:unnamed-chunk-19} \end{figure} \hypertarget{observing-correlation-among-variables}{% \subsection{Observing correlation among variables}\label{observing-correlation-among-variables}} Data sets like Airbnb are rich with large numbers of variable. However, multicolinearity among variables are known to affect predictive model performance. Correlation matrix helps to identify multicolinearity by highlighting variables with high correlation value. In our example below, we observe correlations within rating score components, listing availability period, and review components. With this information, selection of variables can be done properly. \begin{figure}[H] {\centering \includegraphics[width=1\linewidth]{images/corrcase} } \caption{Correlation among variables}\label{fig:unnamed-chunk-20} \end{figure} \#\#Sentiment Analysis From the wordclouds and polarity clouds, the cleanliness of room seem to be mentioned the most frequently. Additionally, the distance to various areas and transport based on words like ``minute walk,'' ``bus stop,'' and ``public transport'' seemed to play an important factor to providing ratings and review. Based on the various lexicons, there is a common result of sentiments being skewed towards the positive side. \hypertarget{model-explanation}{% \subsection{Model Explanation}\label{model-explanation}} In predicting listing price using linear model, the plot of coefficient estimate helps to explain the trained model. In the example below, our interface allows sorting of variables based on p-value score where variables with lowest p-value is located on top. Property type which falls under ``Others'' category (those with counts of less than 5\% in the data set) has the lowest p-value score and positive estimate, which may represent unique property type (e.g.~boat, campsite, chalet, villa) where the listing price is above the average price of common property type like apartment and condominium (as shown in the boxplot from exploratory module). Amenities and beds are also in the top 5 predictor where it correlates positively with listing price. However, the error bar is wider for property type ``Others'' as compared to the amenities and beds, representing more uncertainty in the estimate value. \begin{figure}[H] {\centering \includegraphics[width=1\linewidth]{images/LMcoeff} } \caption{Coefficient estimate and boxplot from exploratory module}\label{fig:unnamed-chunk-21} \end{figure} \hypertarget{discussion}{% \section{Discussion}\label{discussion}} In this project, we have developed ShinyPET, an interactive web-based application to promote data exploratory, text analytics and predictive analytics on Airbnb data set. The interactivity and functionality of our application provides evidence on the robustness of R shiny as a framework to develop web application, along with the variety of available R packages that serve as building blocks for each module in our application. On top of the functionality, user interface is also carefully designed with the arrangement of submodules for each analytics task, to ease the usage for non-technical user. With the integration of main analytics task (exploratory, textual, and predictive), our application enables user to assess and make use of a dataset from different perspective in a single platform. The modularisation of analytics task allows user to quickly navigate between modules when necessary. Example of benefit from such case has been discussed previously where exploratory module can aid in the understanding of predictive model. \hypertarget{future-work}{% \section{Future Work}\label{future-work}} Shiny PET was built in relation to Singapore's Airbnb dataset as a usecase. The Shiny PET enable users to perform exploratory and confirmatory analysis, text mining, and predictive modelling without users needing extensive programming or statistical knowledge. The application could be further enhance by including a data load and wrangling function to accommodate different datasets. The current types of chart and statistical test are limited to only 4 types of charts and parametric statistical test for each chart type respectively. Other charts, such as violin and bar charts, can be incorporated further. Additional hypothesis testing methods such as non-parametric test for median, statistical test by pairs etc. can be incorporated. The current application supports two types of map, providing room for additions in terms of kernel density map and navigation map. Additionally, token frequencies and sentiment analysis allow selection of n-grams and lexicons respectively. Further interactivity such as choices of neighbourhood, rating scores can be added. Moreover, the explore and text sub modules can be connected with views coordinated and linked in order to provide multiple dimensional exploration. The current predictive module is limited to 5 types of predictive model. More predictive models e.g.~neural network can be added to provide users with wider model selection. In hyper-parameter tuning, parameters can be made available for user input to provide more flexibility in developing predictive model. In-depth statistical analysis in model training such as residual analysis are currently not available and this would be a good additional tool to improve our application. \hypertarget{acknowledgement}{% \section{Acknowledgement}\label{acknowledgement}} The authors wish to thank Professor Kam Tin Seong of Singapore Management University for his extensive guidance and support during this project. \hypertarget{references}{% \section*{References}\label{references}} \addcontentsline{toc}{section}{References} \hypertarget{refs}{} \begin{CSLReferences}{0}{0} \leavevmode\hypertarget{ref-airbnb2021}{}% \CSLLeftMargin{{[}1{]} } \CSLRightInline{Curry, D. 2021. \emph{Airbnb revenue and usage statistics}.} \leavevmode\hypertarget{ref-harris_2014}{}% \CSLLeftMargin{{[}2{]} } \CSLRightInline{Harris, J. 2014. Data is useless without the skills to analyze it. \emph{Harvard Business Review}.} \leavevmode\hypertarget{ref-tidymodels2020}{}% \CSLLeftMargin{{[}3{]} } \CSLRightInline{Kuhn, M. and Wickham, H. 2020. \emph{Tidymodels: A collection of packages for modeling and machine learning using tidyverse principles.}} \leavevmode\hypertarget{ref-https:ux2fux2fdoi.orgux2f10.1111ux2fcgf.13210}{}% \CSLLeftMargin{{[}4{]} } \CSLRightInline{Lu, Y. et al. 2017. The state-of-the-art in predictive visual analytics. \emph{Computer Graphics Forum}. 36, 3 (2017), 539--562.} \leavevmode\hypertarget{ref-doi:10.1080ux2f00461520.2012.667064}{}% \CSLLeftMargin{{[}5{]} } \CSLRightInline{Mandinach, E.B. 2012. A perfect time for data use: Using data-driven decision making to inform practice. \emph{Educational Psychologist}. 47, 2 (2012), 71--85.} \leavevmode\hypertarget{ref-radiant2019}{}% \CSLLeftMargin{{[}6{]} } \CSLRightInline{Nijs, V. 2019. \emph{Radiant -- business analytics using r and shiny}.} \leavevmode\hypertarget{ref-robinson}{}% \CSLLeftMargin{{[}7{]} } \CSLRightInline{Robinson, J.S. and David Text mining with r: A tidy approach. \emph{Welcome to Text Mining with R \textbar{} Text Mining with R}.} \leavevmode\hypertarget{ref-https:ux2fux2fdoi.orgux2f10.1016ux2fj.jbusres.2020.03.028}{}% \CSLLeftMargin{{[}8{]} } \CSLRightInline{Yasmin, M. et al. 2020. Big data analytics capabilities and firm performance: An integrated MCDM approach. \emph{Business Research}. 114, (2020), 1--15.} \end{CSLReferences} \setlength{\parindent}{0in} \end{document}
-- Andreas, 2013-10-26, reported by Jesper Cockx {-# OPTIONS --cubical-compatible #-} module Issue920a where import Common.Level open import Common.Equality record ⊥ : Set where data Bool : Set where true false : Bool -- Standard eliminator for ≡ J : ∀ {a b} {A : Set a} {x : A} {Φ : (y : A) → x ≡ y → Set b} → Φ x refl → {y : A} → (e : x ≡ y) → Φ y e J φ refl = φ -- A kind of heterogeneous equality _≃_ : {A : Set} (x : A) {A' : Set} → A' → Set _ _≃_ {A} x {A'} x' = (E : A ≡ A') → J x E ≡ x' -- It shouldn't be possible to define this without K ≃refl : {A : Set} {x : A} → x ≃ x ≃refl {x = x} = λ E → J {Φ = λ A' E' → J x E' ≡ _} refl E -- These can be given using univalence postulate Swap : Bool ≡ Bool postulate swap : true ≡ J {Φ = λ A _ → A} false Swap -- Univalence and ≃refl don't play nice together right : (true ≡ false) → ⊥ right () wrong : true ≡ false wrong = trans swap (≃refl Swap) madness : ⊥ madness = right wrong
The Landau symbols $\mathcal{O}$, $\mathcal{o}$, $\Omega$, $\omega$, and $\Theta$ can be transferred from the real to the natural numbers.
(************************************************************************) (* * The Coq Proof Assistant / The Coq Development Team *) (* v * INRIA, CNRS and contributors - Copyright 1999-2018 *) (* <O___,, * (see CREDITS file for the list of authors) *) (* \VV/ **************************************************************) (* // * This file is distributed under the terms of the *) (* * GNU Lesser General Public License Version 2.1 *) (* * (see LICENSE file for the text of the license) *) (************************************************************************) (** * Finite sets library *) (** This module implements bridges (as functors) from dependent to/from non-dependent set signature. *) Require Export FSetInterface. Set Implicit Arguments. Unset Strict Implicit. Set Firstorder Depth 2. (** * From non-dependent signature [S] to dependent signature [Sdep]. *) Module DepOfNodep (Import M: S) <: Sdep with Module E := M.E. Definition empty : {s : t | Empty s}. Proof. exists empty; auto with set. Qed. Definition is_empty : forall s : t, {Empty s} + {~ Empty s}. Proof. intros; generalize (is_empty_1 (s:=s)) (is_empty_2 (s:=s)). case (is_empty s); intuition. Qed. Definition mem : forall (x : elt) (s : t), {In x s} + {~ In x s}. Proof. intros; generalize (mem_1 (s:=s) (x:=x)) (mem_2 (s:=s) (x:=x)). case (mem x s); intuition. Qed. Definition Add (x : elt) (s s' : t) := forall y : elt, In y s' <-> E.eq x y \/ In y s. Definition add : forall (x : elt) (s : t), {s' : t | Add x s s'}. Proof. intros; exists (add x s); auto. unfold Add; intuition. elim (E.eq_dec x y); auto. intros; right. eapply add_3; eauto. Qed. Definition singleton : forall x : elt, {s : t | forall y : elt, In y s <-> E.eq x y}. Proof. intros; exists (singleton x); intuition. Qed. Definition remove : forall (x : elt) (s : t), {s' : t | forall y : elt, In y s' <-> ~ E.eq x y /\ In y s}. Proof. intros; exists (remove x s); intuition. absurd (In x (remove x s)); auto with set. apply In_1 with y; auto. elim (E.eq_dec x y); intros; auto. absurd (In x (remove x s)); auto with set. apply In_1 with y; auto. eauto with set. Qed. Definition union : forall s s' : t, {s'' : t | forall x : elt, In x s'' <-> In x s \/ In x s'}. Proof. intros; exists (union s s'); intuition. Qed. Definition inter : forall s s' : t, {s'' : t | forall x : elt, In x s'' <-> In x s /\ In x s'}. Proof. intros; exists (inter s s'); intuition; eauto with set. Qed. Definition diff : forall s s' : t, {s'' : t | forall x : elt, In x s'' <-> In x s /\ ~ In x s'}. Proof. intros; exists (diff s s'); intuition; eauto with set. absurd (In x s'); eauto with set. Qed. Definition equal : forall s s' : t, {Equal s s'} + {~ Equal s s'}. Proof. intros. generalize (equal_1 (s:=s) (s':=s')) (equal_2 (s:=s) (s':=s')). case (equal s s'); intuition. Qed. Definition subset : forall s s' : t, {Subset s s'} + {~Subset s s'}. Proof. intros. generalize (subset_1 (s:=s) (s':=s')) (subset_2 (s:=s) (s':=s')). case (subset s s'); intuition. Qed. Definition elements : forall s : t, {l : list elt | sort E.lt l /\ (forall x : elt, In x s <-> InA E.eq x l)}. Proof. intros; exists (elements s); intuition. Defined. Definition fold : forall (A : Type) (f : elt -> A -> A) (s : t) (i : A), {r : A | let (l,_) := elements s in r = fold_left (fun a e => f e a) l i}. Proof. intros; exists (fold (A:=A) f s i); exact (fold_1 s i f). Qed. Definition cardinal : forall s : t, {r : nat | let (l,_) := elements s in r = length l }. Proof. intros; exists (cardinal s); exact (cardinal_1 s). Qed. Definition fdec (P : elt -> Prop) (Pdec : forall x : elt, {P x} + {~ P x}) (x : elt) := if Pdec x then true else false. Lemma compat_P_aux : forall (P : elt -> Prop) (Pdec : forall x : elt, {P x} + {~ P x}), compat_P E.eq P -> compat_bool E.eq (fdec Pdec). Proof. unfold compat_P, compat_bool, Proper, respectful, fdec; intros. generalize (E.eq_sym H0); case (Pdec x); case (Pdec y); firstorder. Qed. Hint Resolve compat_P_aux. Definition filter : forall (P : elt -> Prop) (Pdec : forall x : elt, {P x} + {~ P x}) (s : t), {s' : t | compat_P E.eq P -> forall x : elt, In x s' <-> In x s /\ P x}. Proof. intros. exists (filter (fdec Pdec) s). intro H; assert (compat_bool E.eq (fdec Pdec)); auto. intuition. eauto with set. generalize (filter_2 H0 H1). unfold fdec. case (Pdec x); intuition. inversion H2. apply filter_3; auto. unfold fdec; simpl. case (Pdec x); intuition. Qed. Definition for_all : forall (P : elt -> Prop) (Pdec : forall x : elt, {P x} + {~ P x}) (s : t), {compat_P E.eq P -> For_all P s} + {compat_P E.eq P -> ~ For_all P s}. Proof. intros. generalize (for_all_1 (s:=s) (f:=fdec Pdec)) (for_all_2 (s:=s) (f:=fdec Pdec)). case (for_all (fdec Pdec) s); unfold For_all; [ left | right ]; intros. assert (compat_bool E.eq (fdec Pdec)); auto. generalize (H0 H3 Logic.eq_refl _ H2). unfold fdec. case (Pdec x); intuition. inversion H4. intuition. absurd (false = true); [ auto with bool | apply H; auto ]. intro. unfold fdec. case (Pdec x); intuition. Qed. Definition exists_ : forall (P : elt -> Prop) (Pdec : forall x : elt, {P x} + {~ P x}) (s : t), {compat_P E.eq P -> Exists P s} + {compat_P E.eq P -> ~ Exists P s}. Proof. intros. generalize (exists_1 (s:=s) (f:=fdec Pdec)) (exists_2 (s:=s) (f:=fdec Pdec)). case (exists_ (fdec Pdec) s); unfold Exists; [ left | right ]; intros. elim H0; auto; intros. exists x; intuition. generalize H4. unfold fdec. case (Pdec x); intuition. inversion H2. intuition. elim H2; intros. absurd (false = true); [ auto with bool | apply H; auto ]. exists x; intuition. unfold fdec. case (Pdec x); intuition. Qed. Definition partition : forall (P : elt -> Prop) (Pdec : forall x : elt, {P x} + {~ P x}) (s : t), {partition : t * t | let (s1, s2) := partition in compat_P E.eq P -> For_all P s1 /\ For_all (fun x => ~ P x) s2 /\ (forall x : elt, In x s <-> In x s1 \/ In x s2)}. Proof. intros. exists (partition (fdec Pdec) s). generalize (partition_1 s (f:=fdec Pdec)) (partition_2 s (f:=fdec Pdec)). case (partition (fdec Pdec) s). intros s1 s2; simpl. intros; assert (compat_bool E.eq (fdec Pdec)); auto. intros; assert (compat_bool E.eq (fun x => negb (fdec Pdec x))). generalize H2; unfold compat_bool, Proper, respectful; intuition; apply (f_equal negb); auto. intuition. generalize H4; unfold For_all, Equal; intuition. elim (H0 x); intros. assert (fdec Pdec x = true). eapply filter_2; eauto with set. generalize H8; unfold fdec; case (Pdec x); intuition. inversion H9. generalize H; unfold For_all, Equal; intuition. elim (H0 x); intros. cut ((fun x => negb (fdec Pdec x)) x = true). unfold fdec; case (Pdec x); intuition. change ((fun x => negb (fdec Pdec x)) x = true). apply (filter_2 (s:=s) (x:=x)); auto. set (b := fdec Pdec x) in *; generalize (Logic.eq_refl b); pattern b at -1; case b; unfold b; [ left | right ]. elim (H4 x); intros _ B; apply B; auto with set. elim (H x); intros _ B; apply B; auto with set. apply filter_3; auto. rewrite H5; auto. eapply (filter_1 (s:=s) (x:=x) H2); elim (H4 x); intros B _; apply B; auto. eapply (filter_1 (s:=s) (x:=x) H3); elim (H x); intros B _; apply B; auto. Qed. Definition choose_aux: forall s : t, { x : elt | M.choose s = Some x } + { M.choose s = None }. Proof. intros. destruct (M.choose s); [left | right]; auto. exists e; auto. Qed. Definition choose : forall s : t, {x : elt | In x s} + {Empty s}. Proof. intros; destruct (choose_aux s) as [(x,Hx)|H]. left; exists x; apply choose_1; auto. right; apply choose_2; auto. Defined. Lemma choose_ok1 : forall s x, M.choose s = Some x <-> exists H:In x s, choose s = inleft _ (exist (fun x => In x s) x H). Proof. intros s x. unfold choose; split; intros. destruct (choose_aux s) as [(y,Hy)|H']; try congruence. replace x with y in * by congruence. exists (choose_1 Hy); auto. destruct H. destruct (choose_aux s) as [(y,Hy)|H']; congruence. Qed. Lemma choose_ok2 : forall s, M.choose s = None <-> exists H:Empty s, choose s = inright _ H. Proof. intros s. unfold choose; split; intros. destruct (choose_aux s) as [(y,Hy)|H']; try congruence. exists (choose_2 H'); auto. destruct H. destruct (choose_aux s) as [(y,Hy)|H']; congruence. Qed. Lemma choose_equal : forall s s', Equal s s' -> match choose s, choose s' with | inleft (exist _ x _), inleft (exist _ x' _) => E.eq x x' | inright _, inright _ => True | _, _ => False end. Proof. intros. generalize (@M.choose_1 s)(@M.choose_2 s) (@M.choose_1 s')(@M.choose_2 s')(@M.choose_3 s s') (choose_ok1 s)(choose_ok2 s)(choose_ok1 s')(choose_ok2 s'). destruct (choose s) as [(x,Hx)|Hx]; destruct (choose s') as [(x',Hx')|Hx']; auto; intros. apply H4; auto. rewrite H5; exists Hx; auto. rewrite H7; exists Hx'; auto. apply Hx' with x; unfold Equal in H; rewrite <-H; auto. apply Hx with x'; unfold Equal in H; rewrite H; auto. Qed. Definition min_elt : forall s : t, {x : elt | In x s /\ For_all (fun y => ~ E.lt y x) s} + {Empty s}. Proof. intros; generalize (min_elt_1 (s:=s)) (min_elt_2 (s:=s)) (min_elt_3 (s:=s)). case (min_elt s); [ left | right ]; auto. exists e; unfold For_all; eauto. Qed. Definition max_elt : forall s : t, {x : elt | In x s /\ For_all (fun y => ~ E.lt x y) s} + {Empty s}. Proof. intros; generalize (max_elt_1 (s:=s)) (max_elt_2 (s:=s)) (max_elt_3 (s:=s)). case (max_elt s); [ left | right ]; auto. exists e; unfold For_all; eauto. Qed. Definition elt := elt. Definition t := t. Definition In := In. Definition Equal s s' := forall a : elt, In a s <-> In a s'. Definition Subset s s' := forall a : elt, In a s -> In a s'. Definition Empty s := forall a : elt, ~ In a s. Definition For_all (P : elt -> Prop) (s : t) := forall x : elt, In x s -> P x. Definition Exists (P : elt -> Prop) (s : t) := exists x : elt, In x s /\ P x. Definition eq_In := In_1. Definition eq := Equal. Definition lt := lt. Definition eq_refl := eq_refl. Definition eq_sym := eq_sym. Definition eq_trans := eq_trans. Definition lt_trans := lt_trans. Definition lt_not_eq := lt_not_eq. Definition compare := compare. Module E := E. End DepOfNodep. (** * From dependent signature [Sdep] to non-dependent signature [S]. *) Module NodepOfDep (M: Sdep) <: S with Module E := M.E. Import M. Module ME := OrderedTypeFacts E. Definition empty : t := let (s, _) := empty in s. Lemma empty_1 : Empty empty. Proof. unfold empty; case M.empty; auto. Qed. Definition is_empty (s : t) : bool := if is_empty s then true else false. Lemma is_empty_1 : forall s : t, Empty s -> is_empty s = true. Proof. intros; unfold is_empty; case (M.is_empty s); auto. Qed. Lemma is_empty_2 : forall s : t, is_empty s = true -> Empty s. Proof. intro s; unfold is_empty; case (M.is_empty s); auto. intros; discriminate H. Qed. Definition mem (x : elt) (s : t) : bool := if mem x s then true else false. Lemma mem_1 : forall (s : t) (x : elt), In x s -> mem x s = true. Proof. intros; unfold mem; case (M.mem x s); auto. Qed. Lemma mem_2 : forall (s : t) (x : elt), mem x s = true -> In x s. Proof. intros s x; unfold mem; case (M.mem x s); auto. intros; discriminate H. Qed. Definition eq_dec := equal. Definition equal (s s' : t) : bool := if equal s s' then true else false. Lemma equal_1 : forall s s' : t, Equal s s' -> equal s s' = true. Proof. intros; unfold equal; case M.equal; intuition. Qed. Lemma equal_2 : forall s s' : t, equal s s' = true -> Equal s s'. Proof. intros s s'; unfold equal; case (M.equal s s'); intuition; inversion H. Qed. Definition subset (s s' : t) : bool := if subset s s' then true else false. Lemma subset_1 : forall s s' : t, Subset s s' -> subset s s' = true. Proof. intros; unfold subset; case M.subset; intuition. Qed. Lemma subset_2 : forall s s' : t, subset s s' = true -> Subset s s'. Proof. intros s s'; unfold subset; case (M.subset s s'); intuition; inversion H. Qed. Definition choose (s : t) : option elt := match choose s with | inleft (exist _ x _) => Some x | inright _ => None end. Lemma choose_1 : forall (s : t) (x : elt), choose s = Some x -> In x s. Proof. intros s x; unfold choose; case (M.choose s). simple destruct s0; intros; injection H; intros; subst; auto. intros; discriminate H. Qed. Lemma choose_2 : forall s : t, choose s = None -> Empty s. Proof. intro s; unfold choose; case (M.choose s); auto. simple destruct s0; intros; discriminate H. Qed. Lemma choose_3 : forall s s' x x', choose s = Some x -> choose s' = Some x' -> Equal s s' -> E.eq x x'. Proof. unfold choose; intros. generalize (M.choose_equal H1); clear H1. destruct (M.choose s) as [(?,?)|?]; destruct (M.choose s') as [(?,?)|?]; simpl; auto; congruence. Qed. Definition elements (s : t) : list elt := let (l, _) := elements s in l. Lemma elements_1 : forall (s : t) (x : elt), In x s -> InA E.eq x (elements s). Proof. intros; unfold elements; case (M.elements s); firstorder. Qed. Lemma elements_2 : forall (s : t) (x : elt), InA E.eq x (elements s) -> In x s. Proof. intros s x; unfold elements; case (M.elements s); firstorder. Qed. Lemma elements_3 : forall s : t, sort E.lt (elements s). Proof. intros; unfold elements; case (M.elements s); firstorder. Qed. Hint Resolve elements_3. Lemma elements_3w : forall s : t, NoDupA E.eq (elements s). Proof. auto. Qed. Definition min_elt (s : t) : option elt := match min_elt s with | inleft (exist _ x _) => Some x | inright _ => None end. Lemma min_elt_1 : forall (s : t) (x : elt), min_elt s = Some x -> In x s. Proof. intros s x; unfold min_elt; case (M.min_elt s). simple destruct s0; intros; injection H; intros; subst; intuition. intros; discriminate H. Qed. Lemma min_elt_2 : forall (s : t) (x y : elt), min_elt s = Some x -> In y s -> ~ E.lt y x. Proof. intros s x y; unfold min_elt; case (M.min_elt s). unfold For_all; simple destruct s0; intros; injection H; intros; subst; firstorder. intros; discriminate H. Qed. Lemma min_elt_3 : forall s : t, min_elt s = None -> Empty s. Proof. intros s; unfold min_elt; case (M.min_elt s); auto. simple destruct s0; intros; discriminate H. Qed. Definition max_elt (s : t) : option elt := match max_elt s with | inleft (exist _ x _) => Some x | inright _ => None end. Lemma max_elt_1 : forall (s : t) (x : elt), max_elt s = Some x -> In x s. Proof. intros s x; unfold max_elt; case (M.max_elt s). simple destruct s0; intros; injection H; intros; subst; intuition. intros; discriminate H. Qed. Lemma max_elt_2 : forall (s : t) (x y : elt), max_elt s = Some x -> In y s -> ~ E.lt x y. Proof. intros s x y; unfold max_elt; case (M.max_elt s). unfold For_all; simple destruct s0; intros; injection H; intros; subst; firstorder. intros; discriminate H. Qed. Lemma max_elt_3 : forall s : t, max_elt s = None -> Empty s. Proof. intros s; unfold max_elt; case (M.max_elt s); auto. simple destruct s0; intros; discriminate H. Qed. Definition add (x : elt) (s : t) : t := let (s', _) := add x s in s'. Lemma add_1 : forall (s : t) (x y : elt), E.eq x y -> In y (add x s). Proof. intros; unfold add; case (M.add x s); unfold Add; firstorder. Qed. Lemma add_2 : forall (s : t) (x y : elt), In y s -> In y (add x s). Proof. intros; unfold add; case (M.add x s); unfold Add; firstorder. Qed. Lemma add_3 : forall (s : t) (x y : elt), ~ E.eq x y -> In y (add x s) -> In y s. Proof. intros s x y; unfold add; case (M.add x s); unfold Add; firstorder. Qed. Definition remove (x : elt) (s : t) : t := let (s', _) := remove x s in s'. Lemma remove_1 : forall (s : t) (x y : elt), E.eq x y -> ~ In y (remove x s). Proof. intros; unfold remove; case (M.remove x s); firstorder. Qed. Lemma remove_2 : forall (s : t) (x y : elt), ~ E.eq x y -> In y s -> In y (remove x s). Proof. intros; unfold remove; case (M.remove x s); firstorder. Qed. Lemma remove_3 : forall (s : t) (x y : elt), In y (remove x s) -> In y s. Proof. intros s x y; unfold remove; case (M.remove x s); firstorder. Qed. Definition singleton (x : elt) : t := let (s, _) := singleton x in s. Lemma singleton_1 : forall x y : elt, In y (singleton x) -> E.eq x y. Proof. intros x y; unfold singleton; case (M.singleton x); firstorder. Qed. Lemma singleton_2 : forall x y : elt, E.eq x y -> In y (singleton x). Proof. intros x y; unfold singleton; case (M.singleton x); firstorder. Qed. Definition union (s s' : t) : t := let (s'', _) := union s s' in s''. Lemma union_1 : forall (s s' : t) (x : elt), In x (union s s') -> In x s \/ In x s'. Proof. intros s s' x; unfold union; case (M.union s s'); firstorder. Qed. Lemma union_2 : forall (s s' : t) (x : elt), In x s -> In x (union s s'). Proof. intros s s' x; unfold union; case (M.union s s'); firstorder. Qed. Lemma union_3 : forall (s s' : t) (x : elt), In x s' -> In x (union s s'). Proof. intros s s' x; unfold union; case (M.union s s'); firstorder. Qed. Definition inter (s s' : t) : t := let (s'', _) := inter s s' in s''. Lemma inter_1 : forall (s s' : t) (x : elt), In x (inter s s') -> In x s. Proof. intros s s' x; unfold inter; case (M.inter s s'); firstorder. Qed. Lemma inter_2 : forall (s s' : t) (x : elt), In x (inter s s') -> In x s'. Proof. intros s s' x; unfold inter; case (M.inter s s'); firstorder. Qed. Lemma inter_3 : forall (s s' : t) (x : elt), In x s -> In x s' -> In x (inter s s'). Proof. intros s s' x; unfold inter; case (M.inter s s'); firstorder. Qed. Definition diff (s s' : t) : t := let (s'', _) := diff s s' in s''. Lemma diff_1 : forall (s s' : t) (x : elt), In x (diff s s') -> In x s. Proof. intros s s' x; unfold diff; case (M.diff s s'); firstorder. Qed. Lemma diff_2 : forall (s s' : t) (x : elt), In x (diff s s') -> ~ In x s'. Proof. intros s s' x; unfold diff; case (M.diff s s'); firstorder. Qed. Lemma diff_3 : forall (s s' : t) (x : elt), In x s -> ~ In x s' -> In x (diff s s'). Proof. intros s s' x; unfold diff; case (M.diff s s'); firstorder. Qed. Definition cardinal (s : t) : nat := let (f, _) := cardinal s in f. Lemma cardinal_1 : forall s, cardinal s = length (elements s). Proof. intros; unfold cardinal; case (M.cardinal s); unfold elements in *; destruct (M.elements s); auto. Qed. Definition fold (B : Type) (f : elt -> B -> B) (i : t) (s : B) : B := let (fold, _) := fold f i s in fold. Lemma fold_1 : forall (s : t) (A : Type) (i : A) (f : elt -> A -> A), fold f s i = fold_left (fun a e => f e a) (elements s) i. Proof. intros; unfold fold; case (M.fold f s i); unfold elements in *; destruct (M.elements s); auto. Qed. Definition f_dec : forall (f : elt -> bool) (x : elt), {f x = true} + {f x <> true}. Proof. intros; case (f x); auto with bool. Defined. Lemma compat_P_aux : forall f : elt -> bool, compat_bool E.eq f -> compat_P E.eq (fun x => f x = true). Proof. unfold compat_bool, compat_P, Proper, respectful, impl; intros; rewrite <- H1; firstorder. Qed. Hint Resolve compat_P_aux. Definition filter (f : elt -> bool) (s : t) : t := let (s', _) := filter (P:=fun x => f x = true) (f_dec f) s in s'. Lemma filter_1 : forall (s : t) (x : elt) (f : elt -> bool), compat_bool E.eq f -> In x (filter f s) -> In x s. Proof. intros s x f; unfold filter; case M.filter as (x0,Hiff); intuition. generalize (Hiff (compat_P_aux H)); firstorder. Qed. Lemma filter_2 : forall (s : t) (x : elt) (f : elt -> bool), compat_bool E.eq f -> In x (filter f s) -> f x = true. Proof. intros s x f; unfold filter; case M.filter as (x0,Hiff); intuition. generalize (Hiff (compat_P_aux H)); firstorder. Qed. Lemma filter_3 : forall (s : t) (x : elt) (f : elt -> bool), compat_bool E.eq f -> In x s -> f x = true -> In x (filter f s). Proof. intros s x f; unfold filter; case M.filter as (x0,Hiff); intuition. generalize (Hiff (compat_P_aux H)); firstorder. Qed. Definition for_all (f : elt -> bool) (s : t) : bool := if for_all (P:=fun x => f x = true) (f_dec f) s then true else false. Lemma for_all_1 : forall (s : t) (f : elt -> bool), compat_bool E.eq f -> For_all (fun x => f x = true) s -> for_all f s = true. Proof. intros s f; unfold for_all; case M.for_all; intuition; elim n; auto. Qed. Lemma for_all_2 : forall (s : t) (f : elt -> bool), compat_bool E.eq f -> for_all f s = true -> For_all (fun x => f x = true) s. Proof. intros s f; unfold for_all; case M.for_all; intuition; inversion H0. Qed. Definition exists_ (f : elt -> bool) (s : t) : bool := if exists_ (P:=fun x => f x = true) (f_dec f) s then true else false. Lemma exists_1 : forall (s : t) (f : elt -> bool), compat_bool E.eq f -> Exists (fun x => f x = true) s -> exists_ f s = true. Proof. intros s f; unfold exists_; case M.exists_; intuition; elim n; auto. Qed. Lemma exists_2 : forall (s : t) (f : elt -> bool), compat_bool E.eq f -> exists_ f s = true -> Exists (fun x => f x = true) s. Proof. intros s f; unfold exists_; case M.exists_; intuition; inversion H0. Qed. Definition partition (f : elt -> bool) (s : t) : t * t := let (p, _) := partition (P:=fun x => f x = true) (f_dec f) s in p. Lemma partition_1 : forall (s : t) (f : elt -> bool), compat_bool E.eq f -> Equal (fst (partition f s)) (filter f s). Proof. intros s f; unfold partition; case M.partition. intro p; case p; clear p; intros s1 s2 H C. generalize (H (compat_P_aux C)); clear H; intro H. simpl; unfold Equal; intuition. apply filter_3; firstorder. elim (H2 a); intros. assert (In a s). eapply filter_1; eauto. elim H3; intros; auto. absurd (f a = true). exact (H a H6). eapply filter_2; eauto. Qed. Lemma partition_2 : forall (s : t) (f : elt -> bool), compat_bool E.eq f -> Equal (snd (partition f s)) (filter (fun x => negb (f x)) s). Proof. intros s f; unfold partition; case M.partition. intro p; case p; clear p; intros s1 s2 H C. generalize (H (compat_P_aux C)); clear H; intro H. assert (D : compat_bool E.eq (fun x => negb (f x))). generalize C; unfold compat_bool, Proper, respectful; intros; apply (f_equal negb); auto. simpl; unfold Equal; intuition. apply filter_3; firstorder. elim (H2 a); intros. assert (In a s). eapply filter_1; eauto. elim H3; intros; auto. absurd (f a = true). intro. generalize (filter_2 D H1). rewrite H7; intros H8; inversion H8. exact (H0 a H6). Qed. Definition elt := elt. Definition t := t. Definition In := In. Definition Equal s s' := forall a : elt, In a s <-> In a s'. Definition Subset s s' := forall a : elt, In a s -> In a s'. Definition Add (x : elt) (s s' : t) := forall y : elt, In y s' <-> E.eq y x \/ In y s. Definition Empty s := forall a : elt, ~ In a s. Definition For_all (P : elt -> Prop) (s : t) := forall x : elt, In x s -> P x. Definition Exists (P : elt -> Prop) (s : t) := exists x : elt, In x s /\ P x. Definition In_1 := eq_In. Definition eq := Equal. Definition lt := lt. Definition eq_refl := eq_refl. Definition eq_sym := eq_sym. Definition eq_trans := eq_trans. Definition lt_trans := lt_trans. Definition lt_not_eq := lt_not_eq. Definition compare := compare. Module E := E. End NodepOfDep.
<h3>Simulación matemática 2018 </h3> <div style="background-color:#0099cc;"> <font color = white> <ul> <li>Lázaro Alonso </li> <li>Email: `[email protected], [email protected]`</li> </ul> </font> </div> ### Por favor, den click al siguiente link. Será una forma fácil de que me hagan llegar sus dudas, además de que todos podremos estar al tanto de los problemas tanto de tarea como de clase. https://join.slack.com/t/sm-grupo/shared_invite/enQtMzcxMzcxMjY1NzgyLWE5ZDlhYjg4OGJhMGE2ZmY2ZGUzZWIxYzQzOTUxZWU2ZGM5YjUyYWMyZGUzNzZjMDE5ZDIxYTA4YTI2ZWQ1NTU ```python import sympy as sym; sym.init_printing(use_latex='mathjax') import numpy as np import matplotlib.pyplot as plt %matplotlib inline ``` ### Máximos y mínimos ##### 1. Encuentre los valores máximo y mínimo locales de la función $$g(x) = x + 2 \sin x$$ Elementos que debe de contener su respuesta: - Gráfica de la funcion $g(x)$ - Gráfica de la primera y segunda derivada, ($g(x)'$, $g(x)''$) - Indicar en la gráfica los máximos y mínimos (_utilizar plt.scatter_) ```python sym.var('x') g = x + 2*sym.sin(x) g_num = sym.lambdify([x], g, 'numpy') x_vec = np.linspace(-4, 4, 100) plt.figure(figsize = (4,4)) plt.plot(x_vec, g_num(x_vec), "-.") plt.xlabel('$x$', fontsize = 18) plt.ylabel('$g(x)$', fontsize = 18) plt.savefig("fx_primero.jpeg", bbox_inches='tight') plt.show() ``` ##### 2. Discuta la curva $f(x) = x^4 - 4x^3$. Puntos de inflexión, máximos y mínimos. Su respuesta debe de incluir los mismos puntos que el caso anterior. ```python sym.var('x') f = x**4 - 4*x**3 f_num = sym.lambdify([x], f, 'numpy') x_vec = np.linspace(-2, 4, 100) plt.figure(figsize = (4,4)) plt.plot(x_vec, f_num(x_vec), "k-.") plt.xlabel('$x$', fontsize = 18) plt.ylabel('$f(x)$', fontsize = 18) plt.xlim(xmax = 3.5, xmin = -1.5) plt.ylim(ymax = 10, ymin= -32) plt.scatter([3],[-27], s= 55, c = "r", label = "Min Global") plt.text(2.2, -30, "Min Global", fontsize = 10) plt.legend() plt.savefig("fx_segundo.jpeg", bbox_inches='tight') plt.show() ``` ##### 3. Se va a fabricar una lata que ha de contaner 1L de aceite. Encuentre las dimensiones que debe de tener la lata de manera que minimicen el costo del metal para fabricarla. - La solución debe de incluir los siguientes elementos - Ecuación del sistema - Números críticos - Dibujar la lata ```python from matplotlib.patches import Circle, PathPatch from mpl_toolkits.mplot3d import Axes3D import mpl_toolkits.mplot3d.art3d as art3d import numpy as np import matplotlib.pyplot as plt %matplotlib inline fig = plt.figure(figsize=(4,5)) ax = Axes3D(fig) x = np.linspace(-1, 1, 100) z = np.linspace(0, 4, 100) X, Z = np.meshgrid(x, z) Y = np.sqrt(1 - X**2) # Draw parameters rstride = 4 cstride = 4 ### Cilindro ax.plot_surface(X, Y, Z, alpha=0.5, cmap = "Blues", rstride=rstride, cstride=cstride) ax.plot_surface(X, -Y, Z, alpha=0.5, cmap = "Blues", rstride=rstride, cstride=cstride) # Tapa superior e inferior up = Circle((0, 0), 1, alpha = .5, fc = "blue") ax.add_patch(up) art3d.pathpatch_2d_to_3d(up, z=4.1) #inferior down = Circle((0, 0), 1, alpha = .5, fc = "orange") ax.add_patch(down) art3d.pathpatch_2d_to_3d(down, z=-.1) #Estilo ax.set_xlabel("X") ax.set_ylabel("Y") ax.set_zlabel("Z") ax.set_xticks([-1,1]) ax.set_yticks([-1,1]) ax.set_zticks([-2,2]) ax.set_facecolor("white") ax.xaxis.pane.fill = False ax.yaxis.pane.fill = False ax.zaxis.pane.fill = False ax.axis("off"); plt.savefig("lata.jpeg", bbox_inches='tight'); ``` <footer id="attribution" style="float:right; color:#808080; background:#fff;"> Created with Jupyter by Lázaro Alonso. <Strong> Copyright: </Strong> Public Domain como en [CC](https://creativecommons.org/licenses/by/2.0/) (Exepto donde se indique lo contrario) </footer>
function p = cs_nd (A) %CS_ND generalized nested dissection ordering. % p = cs_nd(A) computes the nested dissection ordering of a matrix. Small % submatrices (order 500 or less) are ordered via cs_amd. A must be sparse % and symmetric (use p = cs_nd(A|A') if it is not symmetric). % % Example: % A = delsq (numgrid ('L', 300)) ; % matrix used in 'bench' % p = cs_nd (A) ; % cspy (A (p,p)) ; % % See also CS_AMD, CS_SEP, CS_ESEP, CS_NSEP, AMD. % Copyright 2006-2007, Timothy A. Davis. % http://www.cise.ufl.edu/research/sparse n = size (A,1) ; if (n == 1) p = 1 ; elseif (n < 500) p = cs_amd (A) ; % use cs_amd on small graphs else [s a b] = cs_nsep (A) ; % find a node separator a = a (cs_nd (A (a,a))) ; % order A(a,a) recursively b = b (cs_nd (A (b,b))) ; % order A(b,b) recursively p = [a b s] ; % concatenate to obtain the final ordering end
/********************************************************************* * Software License Agreement (BSD License) * * Copyright (c) 2013, Willow Garage, Inc. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials provided * with the distribution. * * Neither the name of Willow Garage nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. *********************************************************************/ /* Author: Suat Gedikli */ #include <moveit/mesh_filter/mesh_filter_base.h> #include <moveit/mesh_filter/gl_mesh.h> #include <moveit/mesh_filter/filter_job.h> #include <geometric_shapes/shapes.h> #include <geometric_shapes/shape_operations.h> #include <Eigen/Eigen> #include <stdexcept> #include <sstream> #include <ros/console.h> // include SSE headers #ifdef HAVE_SSE_EXTENSIONS #include <xmmintrin.h> #endif mesh_filter::MeshFilterBase::MeshFilterBase(const TransformCallback& transform_callback, const SensorModel::Parameters& sensor_parameters, const std::string& render_vertex_shader, const std::string& render_fragment_shader, const std::string& filter_vertex_shader, const std::string& filter_fragment_shader) : sensor_parameters_(sensor_parameters.clone()) , next_handle_(FirstLabel) // 0 and 1 are reserved! , min_handle_(FirstLabel) , stop_(false) , transform_callback_(transform_callback) , padding_scale_(1.0) , padding_offset_(0.01) , shadow_threshold_(0.5) { filter_thread_ = std::thread(std::bind(&MeshFilterBase::run, this, render_vertex_shader, render_fragment_shader, filter_vertex_shader, filter_fragment_shader)); } void mesh_filter::MeshFilterBase::initialize(const std::string& render_vertex_shader, const std::string& render_fragment_shader, const std::string& filter_vertex_shader, const std::string& filter_fragment_shader) { mesh_renderer_.reset(new GLRenderer(sensor_parameters_->getWidth(), sensor_parameters_->getHeight(), sensor_parameters_->getNearClippingPlaneDistance(), sensor_parameters_->getFarClippingPlaneDistance())); depth_filter_.reset(new GLRenderer(sensor_parameters_->getWidth(), sensor_parameters_->getHeight(), sensor_parameters_->getNearClippingPlaneDistance(), sensor_parameters_->getFarClippingPlaneDistance())); mesh_renderer_->setShadersFromString(render_vertex_shader, render_fragment_shader); depth_filter_->setShadersFromString(filter_vertex_shader, filter_fragment_shader); depth_filter_->begin(); glGenTextures(1, &sensor_depth_texture_); glUniform1i(glGetUniformLocation(depth_filter_->getProgramID(), "sensor"), 0); glUniform1i(glGetUniformLocation(depth_filter_->getProgramID(), "depth"), 2); glUniform1i(glGetUniformLocation(depth_filter_->getProgramID(), "label"), 4); shadow_threshold_location_ = glGetUniformLocation(depth_filter_->getProgramID(), "shadow_threshold"); depth_filter_->end(); canvas_ = glGenLists(1); glNewList(canvas_, GL_COMPILE); glBegin(GL_QUADS); glColor3f(1, 1, 1); glTexCoord2f(0, 0); glVertex3f(-1, -1, 1); glTexCoord2f(1, 0); glVertex3f(1, -1, 1); glTexCoord2f(1, 1); glVertex3f(1, 1, 1); glTexCoord2f(0, 1); glVertex3f(-1, 1, 1); glEnd(); glEndList(); } mesh_filter::MeshFilterBase::~MeshFilterBase() { { std::unique_lock<std::mutex> lock(jobs_mutex_); stop_ = true; while (!jobs_queue_.empty()) { jobs_queue_.front()->cancel(); jobs_queue_.pop(); } } jobs_condition_.notify_one(); filter_thread_.join(); } void mesh_filter::MeshFilterBase::addJob(const JobPtr& job) const { { std::unique_lock<std::mutex> _(jobs_mutex_); jobs_queue_.push(job); } jobs_condition_.notify_one(); } void mesh_filter::MeshFilterBase::deInitialize() { glDeleteLists(canvas_, 1); glDeleteTextures(1, &sensor_depth_texture_); meshes_.clear(); mesh_renderer_.reset(); depth_filter_.reset(); } void mesh_filter::MeshFilterBase::setSize(unsigned int width, unsigned int height) { mesh_renderer_->setBufferSize(width, height); mesh_renderer_->setCameraParameters(width, width, width >> 1, height >> 1); depth_filter_->setBufferSize(width, height); depth_filter_->setCameraParameters(width, width, width >> 1, height >> 1); } void mesh_filter::MeshFilterBase::setTransformCallback(const TransformCallback& transform_callback) { std::unique_lock<std::mutex> _(transform_callback_mutex_); transform_callback_ = transform_callback; } mesh_filter::MeshHandle mesh_filter::MeshFilterBase::addMesh(const shapes::Mesh& mesh) { std::unique_lock<std::mutex> _(meshes_mutex_); JobPtr job(new FilterJob<void>(std::bind(&MeshFilterBase::addMeshHelper, this, next_handle_, &mesh))); addJob(job); job->wait(); mesh_filter::MeshHandle ret = next_handle_; const std::size_t sz = min_handle_ + meshes_.size() + 1; for (std::size_t i = min_handle_; i < sz; ++i) if (meshes_.find(i) == meshes_.end()) { next_handle_ = i; break; } min_handle_ = next_handle_; return ret; } void mesh_filter::MeshFilterBase::addMeshHelper(MeshHandle handle, const shapes::Mesh* cmesh) { meshes_[handle] = GLMeshPtr(new GLMesh(*cmesh, handle)); } void mesh_filter::MeshFilterBase::removeMesh(MeshHandle handle) { std::unique_lock<std::mutex> _(meshes_mutex_); FilterJob<bool>* remover = new FilterJob<bool>(std::bind(&MeshFilterBase::removeMeshHelper, this, handle)); JobPtr job(remover); addJob(job); job->wait(); if (!remover->getResult()) throw std::runtime_error("Could not remove mesh. Mesh not found!"); min_handle_ = std::min(handle, min_handle_); } bool mesh_filter::MeshFilterBase::removeMeshHelper(MeshHandle handle) { std::size_t erased = meshes_.erase(handle); return (erased != 0); } void mesh_filter::MeshFilterBase::setShadowThreshold(float threshold) { shadow_threshold_ = threshold; } void mesh_filter::MeshFilterBase::getModelLabels(LabelType* labels) const { JobPtr job(new FilterJob<void>(std::bind(&GLRenderer::getColorBuffer, mesh_renderer_.get(), (unsigned char*)labels))); addJob(job); job->wait(); } void mesh_filter::MeshFilterBase::getModelDepth(float* depth) const { JobPtr job1(new FilterJob<void>(std::bind(&GLRenderer::getDepthBuffer, mesh_renderer_.get(), depth))); JobPtr job2(new FilterJob<void>( std::bind(&SensorModel::Parameters::transformModelDepthToMetricDepth, sensor_parameters_.get(), depth))); { std::unique_lock<std::mutex> lock(jobs_mutex_); jobs_queue_.push(job1); jobs_queue_.push(job2); } jobs_condition_.notify_one(); job1->wait(); job2->wait(); } void mesh_filter::MeshFilterBase::getFilteredDepth(float* depth) const { JobPtr job1(new FilterJob<void>(std::bind(&GLRenderer::getDepthBuffer, depth_filter_.get(), depth))); JobPtr job2(new FilterJob<void>( std::bind(&SensorModel::Parameters::transformFilteredDepthToMetricDepth, sensor_parameters_.get(), depth))); { std::unique_lock<std::mutex> lock(jobs_mutex_); jobs_queue_.push(job1); jobs_queue_.push(job2); } jobs_condition_.notify_one(); job1->wait(); job2->wait(); } void mesh_filter::MeshFilterBase::getFilteredLabels(LabelType* labels) const { JobPtr job(new FilterJob<void>(std::bind(&GLRenderer::getColorBuffer, depth_filter_.get(), (unsigned char*)labels))); addJob(job); job->wait(); } void mesh_filter::MeshFilterBase::run(const std::string& render_vertex_shader, const std::string& render_fragment_shader, const std::string& filter_vertex_shader, const std::string& filter_fragment_shader) { initialize(render_vertex_shader, render_fragment_shader, filter_vertex_shader, filter_fragment_shader); while (!stop_) { std::unique_lock<std::mutex> lock(jobs_mutex_); // check if we have new sensor data to be processed. If not, wait until we get notified. if (jobs_queue_.empty()) jobs_condition_.wait(lock); if (!jobs_queue_.empty()) { JobPtr job = jobs_queue_.front(); jobs_queue_.pop(); lock.unlock(); job->execute(); lock.lock(); } } deInitialize(); } void mesh_filter::MeshFilterBase::filter(const void* sensor_data, GLushort type, bool wait) const { if (type != GL_FLOAT && type != GL_UNSIGNED_SHORT) { std::stringstream msg; msg << "unknown type \"" << type << "\". Allowed values are GL_FLOAT or GL_UNSIGNED_SHORT."; throw std::runtime_error(msg.str()); } JobPtr job(new FilterJob<void>(std::bind(&MeshFilterBase::doFilter, this, sensor_data, type))); addJob(job); if (wait) job->wait(); } void mesh_filter::MeshFilterBase::doFilter(const void* sensor_data, const int encoding) const { std::unique_lock<std::mutex> _(transform_callback_mutex_); mesh_renderer_->begin(); sensor_parameters_->setRenderParameters(*mesh_renderer_); glEnable(GL_TEXTURE_2D); glEnable(GL_DEPTH_TEST); glDepthFunc(GL_LESS); glEnable(GL_CULL_FACE); glCullFace(GL_FRONT); glDisable(GL_ALPHA_TEST); glDisable(GL_BLEND); GLuint padding_coefficients_id = glGetUniformLocation(mesh_renderer_->getProgramID(), "padding_coefficients"); Eigen::Vector3f padding_coefficients = sensor_parameters_->getPaddingCoefficients() * padding_scale_ + Eigen::Vector3f(0, 0, padding_offset_); glUniform3f(padding_coefficients_id, padding_coefficients[0], padding_coefficients[1], padding_coefficients[2]); Eigen::Isometry3d transform; for (std::map<MeshHandle, GLMeshPtr>::const_iterator mesh_it = meshes_.begin(); mesh_it != meshes_.end(); ++mesh_it) if (transform_callback_(mesh_it->first, transform)) mesh_it->second->render(transform); mesh_renderer_->end(); // now filter the depth_map with the second rendering stage // depth_filter_.setBufferSize (width, height); // depth_filter_.setCameraParameters (fx, fy, cx, cy); depth_filter_->begin(); sensor_parameters_->setFilterParameters(*depth_filter_); glEnable(GL_TEXTURE_2D); glEnable(GL_DEPTH_TEST); glDepthFunc(GL_ALWAYS); glDisable(GL_CULL_FACE); glDisable(GL_ALPHA_TEST); glDisable(GL_BLEND); // glUniform1f (near_location_, depth_filter_.getNearClippingDistance ()); // glUniform1f (far_location_, depth_filter_.getFarClippingDistance ()); glUniform1f(shadow_threshold_location_, shadow_threshold_); GLuint depth_texture = mesh_renderer_->getDepthTexture(); GLuint color_texture = mesh_renderer_->getColorTexture(); // bind sensor depth glActiveTexture(GL_TEXTURE0); glBindTexture(GL_TEXTURE_2D, sensor_depth_texture_); float scale = 1.0 / (sensor_parameters_->getFarClippingPlaneDistance() - sensor_parameters_->getNearClippingPlaneDistance()); if (encoding == GL_UNSIGNED_SHORT) // unsigned shorts shorts will be mapped to the range 0-1 during transfer. Afterwards we can apply another scale + // offset to // map the values between near and far clipping plane to 0 - 1. -> scale = (65535 * depth - near ) / (far - near) // we have: [0 - 65535] -> [0 - 1] // we want: [near - far] -> [0 - 1] glPixelTransferf(GL_DEPTH_SCALE, scale * 65.535); else glPixelTransferf(GL_DEPTH_SCALE, scale); glPixelTransferf(GL_DEPTH_BIAS, -scale * sensor_parameters_->getNearClippingPlaneDistance()); glTexImage2D(GL_TEXTURE_2D, 0, GL_DEPTH_COMPONENT, sensor_parameters_->getWidth(), sensor_parameters_->getHeight(), 0, GL_DEPTH_COMPONENT, encoding, sensor_data); glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE); glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE); glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST); glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST); // bind depth map glActiveTexture(GL_TEXTURE2); glBindTexture(GL_TEXTURE_2D, depth_texture); // bind labels glActiveTexture(GL_TEXTURE4); glBindTexture(GL_TEXTURE_2D, color_texture); glCallList(canvas_); depth_filter_->end(); } void mesh_filter::MeshFilterBase::setPaddingOffset(float offset) { padding_offset_ = offset; } void mesh_filter::MeshFilterBase::setPaddingScale(float scale) { padding_scale_ = scale; }
This spell allows the caster to pick up an object, concentrate, and gain a “vision” of what happened in connection with this object. Essentially what happens is that objects “record” events that have happened to them; a sort of psychic imprint is left on the object. The object acts as camera in a sense, showing a visual image in the mind of the caster of what happened around the object. The image will not show anything beyond the object. For example, if the caster casts the spell on a dagger found on the floor and picks it up, he can see who the dagger stabbed or how it was dropped or maybe even see the dagger being carried by a person from one location to another. This spell only gives vision from the perspective of the dagger and won’t be able to show the whole contents of a room for example. It is almost as if the dagger had a small camera attached to it. No sound is provided with this spell: only visuals. This spell is very much up to the game master’s discretion and it can be useful for letting players in on clues in connection to an adventure. It can also be used to feed them useless information or distract them from the real course of a story. The based MP cost is 4 for this spell. It can be reduced by increments to as low as 2 MP.
!------------------------------------------------------------------- ! Overloaded definitions for (+): ! ELEMENTAL FUNCTION add_rpe (x) RESULT (z) TYPE(rpe_var), INTENT(IN) :: x TYPE(rpe_var) :: z z%sbits = significand_bits(x) z = +(x%val) END FUNCTION add_rpe ELEMENTAL FUNCTION add_rpe_rpe (x, y) RESULT (z) TYPE(rpe_var), INTENT(IN) :: x TYPE(rpe_var), INTENT(IN) :: y TYPE(rpe_var) :: z z%sbits = MAX(significand_bits(x), significand_bits(y)) z = x%val + y%val END FUNCTION add_rpe_rpe ELEMENTAL FUNCTION add_rpe_integer (x, y) RESULT (z) TYPE(rpe_var), INTENT(IN) :: x INTEGER, INTENT(IN) :: y TYPE(rpe_var) :: z z%sbits = MAX(significand_bits(x), significand_bits(y)) z = x%val + y END FUNCTION add_rpe_integer ELEMENTAL FUNCTION add_rpe_long (x, y) RESULT (z) TYPE(rpe_var), INTENT(IN) :: x INTEGER(KIND=8), INTENT(IN) :: y TYPE(rpe_var) :: z z%sbits = MAX(significand_bits(x), significand_bits(y)) z = x%val + y END FUNCTION add_rpe_long ELEMENTAL FUNCTION add_rpe_real (x, y) RESULT (z) TYPE(rpe_var), INTENT(IN) :: x REAL(KIND=RPE_REAL_KIND), INTENT(IN) :: y TYPE(rpe_var) :: z z%sbits = MAX(significand_bits(x), significand_bits(y)) z = x%val + y END FUNCTION add_rpe_real ELEMENTAL FUNCTION add_rpe_realalt (x, y) RESULT (z) TYPE(rpe_var), INTENT(IN) :: x REAL(KIND=RPE_ALTERNATE_KIND), INTENT(IN) :: y TYPE(rpe_var) :: z z%sbits = MAX(significand_bits(x), significand_bits(y)) z = x%val + y END FUNCTION add_rpe_realalt ELEMENTAL FUNCTION add_integer_rpe (x, y) RESULT (z) INTEGER, INTENT(IN) :: x TYPE(rpe_var), INTENT(IN) :: y TYPE(rpe_var) :: z z%sbits = MAX(significand_bits(x), significand_bits(y)) z = x + y%val END FUNCTION add_integer_rpe ELEMENTAL FUNCTION add_long_rpe (x, y) RESULT (z) INTEGER(KIND=8), INTENT(IN) :: x TYPE(rpe_var), INTENT(IN) :: y TYPE(rpe_var) :: z z%sbits = MAX(significand_bits(x), significand_bits(y)) z = x + y%val END FUNCTION add_long_rpe ELEMENTAL FUNCTION add_real_rpe (x, y) RESULT (z) REAL(KIND=RPE_REAL_KIND), INTENT(IN) :: x TYPE(rpe_var), INTENT(IN) :: y TYPE(rpe_var) :: z z%sbits = MAX(significand_bits(x), significand_bits(y)) z = x + y%val END FUNCTION add_real_rpe ELEMENTAL FUNCTION add_realalt_rpe (x, y) RESULT (z) REAL(KIND=RPE_ALTERNATE_KIND), INTENT(IN) :: x TYPE(rpe_var), INTENT(IN) :: y TYPE(rpe_var) :: z z%sbits = MAX(significand_bits(x), significand_bits(y)) z = x + y%val END FUNCTION add_realalt_rpe !------------------------------------------------------------------- ! Overloaded definitions for (-): ! ELEMENTAL FUNCTION sub_rpe (x) RESULT (z) TYPE(rpe_var), INTENT(IN) :: x TYPE(rpe_var) :: z z%sbits = significand_bits(x) z = -(x%val) END FUNCTION sub_rpe ELEMENTAL FUNCTION sub_rpe_rpe (x, y) RESULT (z) TYPE(rpe_var), INTENT(IN) :: x TYPE(rpe_var), INTENT(IN) :: y TYPE(rpe_var) :: z z%sbits = MAX(significand_bits(x), significand_bits(y)) z = x%val - y%val END FUNCTION sub_rpe_rpe ELEMENTAL FUNCTION sub_rpe_integer (x, y) RESULT (z) TYPE(rpe_var), INTENT(IN) :: x INTEGER, INTENT(IN) :: y TYPE(rpe_var) :: z z%sbits = MAX(significand_bits(x), significand_bits(y)) z = x%val - y END FUNCTION sub_rpe_integer ELEMENTAL FUNCTION sub_rpe_long (x, y) RESULT (z) TYPE(rpe_var), INTENT(IN) :: x INTEGER(KIND=8), INTENT(IN) :: y TYPE(rpe_var) :: z z%sbits = MAX(significand_bits(x), significand_bits(y)) z = x%val - y END FUNCTION sub_rpe_long ELEMENTAL FUNCTION sub_rpe_real (x, y) RESULT (z) TYPE(rpe_var), INTENT(IN) :: x REAL(KIND=RPE_REAL_KIND), INTENT(IN) :: y TYPE(rpe_var) :: z z%sbits = MAX(significand_bits(x), significand_bits(y)) z = x%val - y END FUNCTION sub_rpe_real ELEMENTAL FUNCTION sub_rpe_realalt (x, y) RESULT (z) TYPE(rpe_var), INTENT(IN) :: x REAL(KIND=RPE_ALTERNATE_KIND), INTENT(IN) :: y TYPE(rpe_var) :: z z%sbits = MAX(significand_bits(x), significand_bits(y)) z = x%val - y END FUNCTION sub_rpe_realalt ELEMENTAL FUNCTION sub_integer_rpe (x, y) RESULT (z) INTEGER, INTENT(IN) :: x TYPE(rpe_var), INTENT(IN) :: y TYPE(rpe_var) :: z z%sbits = MAX(significand_bits(x), significand_bits(y)) z = x - y%val END FUNCTION sub_integer_rpe ELEMENTAL FUNCTION sub_long_rpe (x, y) RESULT (z) INTEGER(KIND=8), INTENT(IN) :: x TYPE(rpe_var), INTENT(IN) :: y TYPE(rpe_var) :: z z%sbits = MAX(significand_bits(x), significand_bits(y)) z = x - y%val END FUNCTION sub_long_rpe ELEMENTAL FUNCTION sub_real_rpe (x, y) RESULT (z) REAL(KIND=RPE_REAL_KIND), INTENT(IN) :: x TYPE(rpe_var), INTENT(IN) :: y TYPE(rpe_var) :: z z%sbits = MAX(significand_bits(x), significand_bits(y)) z = x - y%val END FUNCTION sub_real_rpe ELEMENTAL FUNCTION sub_realalt_rpe (x, y) RESULT (z) REAL(KIND=RPE_ALTERNATE_KIND), INTENT(IN) :: x TYPE(rpe_var), INTENT(IN) :: y TYPE(rpe_var) :: z z%sbits = MAX(significand_bits(x), significand_bits(y)) z = x - y%val END FUNCTION sub_realalt_rpe !------------------------------------------------------------------- ! Overloaded definitions for (*): ! ELEMENTAL FUNCTION mul_rpe_rpe (x, y) RESULT (z) TYPE(rpe_var), INTENT(IN) :: x TYPE(rpe_var), INTENT(IN) :: y TYPE(rpe_var) :: z z%sbits = MAX(significand_bits(x), significand_bits(y)) z = x%val * y%val END FUNCTION mul_rpe_rpe ELEMENTAL FUNCTION mul_rpe_integer (x, y) RESULT (z) TYPE(rpe_var), INTENT(IN) :: x INTEGER, INTENT(IN) :: y TYPE(rpe_var) :: z z%sbits = MAX(significand_bits(x), significand_bits(y)) z = x%val * y END FUNCTION mul_rpe_integer ELEMENTAL FUNCTION mul_rpe_long (x, y) RESULT (z) TYPE(rpe_var), INTENT(IN) :: x INTEGER(KIND=8), INTENT(IN) :: y TYPE(rpe_var) :: z z%sbits = MAX(significand_bits(x), significand_bits(y)) z = x%val * y END FUNCTION mul_rpe_long ELEMENTAL FUNCTION mul_rpe_real (x, y) RESULT (z) TYPE(rpe_var), INTENT(IN) :: x REAL(KIND=RPE_REAL_KIND), INTENT(IN) :: y TYPE(rpe_var) :: z z%sbits = MAX(significand_bits(x), significand_bits(y)) z = x%val * y END FUNCTION mul_rpe_real ELEMENTAL FUNCTION mul_rpe_realalt (x, y) RESULT (z) TYPE(rpe_var), INTENT(IN) :: x REAL(KIND=RPE_ALTERNATE_KIND), INTENT(IN) :: y TYPE(rpe_var) :: z z%sbits = MAX(significand_bits(x), significand_bits(y)) z = x%val * y END FUNCTION mul_rpe_realalt ELEMENTAL FUNCTION mul_integer_rpe (x, y) RESULT (z) INTEGER, INTENT(IN) :: x TYPE(rpe_var), INTENT(IN) :: y TYPE(rpe_var) :: z z%sbits = MAX(significand_bits(x), significand_bits(y)) z = x * y%val END FUNCTION mul_integer_rpe ELEMENTAL FUNCTION mul_long_rpe (x, y) RESULT (z) INTEGER(KIND=8), INTENT(IN) :: x TYPE(rpe_var), INTENT(IN) :: y TYPE(rpe_var) :: z z%sbits = MAX(significand_bits(x), significand_bits(y)) z = x * y%val END FUNCTION mul_long_rpe ELEMENTAL FUNCTION mul_real_rpe (x, y) RESULT (z) REAL(KIND=RPE_REAL_KIND), INTENT(IN) :: x TYPE(rpe_var), INTENT(IN) :: y TYPE(rpe_var) :: z z%sbits = MAX(significand_bits(x), significand_bits(y)) z = x * y%val END FUNCTION mul_real_rpe ELEMENTAL FUNCTION mul_realalt_rpe (x, y) RESULT (z) REAL(KIND=RPE_ALTERNATE_KIND), INTENT(IN) :: x TYPE(rpe_var), INTENT(IN) :: y TYPE(rpe_var) :: z z%sbits = MAX(significand_bits(x), significand_bits(y)) z = x * y%val END FUNCTION mul_realalt_rpe !------------------------------------------------------------------- ! Overloaded definitions for (/): ! ELEMENTAL FUNCTION div_rpe_rpe (x, y) RESULT (z) TYPE(rpe_var), INTENT(IN) :: x TYPE(rpe_var), INTENT(IN) :: y TYPE(rpe_var) :: z z%sbits = MAX(significand_bits(x), significand_bits(y)) z = x%val / y%val END FUNCTION div_rpe_rpe ELEMENTAL FUNCTION div_rpe_integer (x, y) RESULT (z) TYPE(rpe_var), INTENT(IN) :: x INTEGER, INTENT(IN) :: y TYPE(rpe_var) :: z z%sbits = MAX(significand_bits(x), significand_bits(y)) z = x%val / y END FUNCTION div_rpe_integer ELEMENTAL FUNCTION div_rpe_long (x, y) RESULT (z) TYPE(rpe_var), INTENT(IN) :: x INTEGER(KIND=8), INTENT(IN) :: y TYPE(rpe_var) :: z z%sbits = MAX(significand_bits(x), significand_bits(y)) z = x%val / y END FUNCTION div_rpe_long ELEMENTAL FUNCTION div_rpe_real (x, y) RESULT (z) TYPE(rpe_var), INTENT(IN) :: x REAL(KIND=RPE_REAL_KIND), INTENT(IN) :: y TYPE(rpe_var) :: z z%sbits = MAX(significand_bits(x), significand_bits(y)) z = x%val / y END FUNCTION div_rpe_real ELEMENTAL FUNCTION div_rpe_realalt (x, y) RESULT (z) TYPE(rpe_var), INTENT(IN) :: x REAL(KIND=RPE_ALTERNATE_KIND), INTENT(IN) :: y TYPE(rpe_var) :: z z%sbits = MAX(significand_bits(x), significand_bits(y)) z = x%val / y END FUNCTION div_rpe_realalt ELEMENTAL FUNCTION div_integer_rpe (x, y) RESULT (z) INTEGER, INTENT(IN) :: x TYPE(rpe_var), INTENT(IN) :: y TYPE(rpe_var) :: z z%sbits = MAX(significand_bits(x), significand_bits(y)) z = x / y%val END FUNCTION div_integer_rpe ELEMENTAL FUNCTION div_long_rpe (x, y) RESULT (z) INTEGER(KIND=8), INTENT(IN) :: x TYPE(rpe_var), INTENT(IN) :: y TYPE(rpe_var) :: z z%sbits = MAX(significand_bits(x), significand_bits(y)) z = x / y%val END FUNCTION div_long_rpe ELEMENTAL FUNCTION div_real_rpe (x, y) RESULT (z) REAL(KIND=RPE_REAL_KIND), INTENT(IN) :: x TYPE(rpe_var), INTENT(IN) :: y TYPE(rpe_var) :: z z%sbits = MAX(significand_bits(x), significand_bits(y)) z = x / y%val END FUNCTION div_real_rpe ELEMENTAL FUNCTION div_realalt_rpe (x, y) RESULT (z) REAL(KIND=RPE_ALTERNATE_KIND), INTENT(IN) :: x TYPE(rpe_var), INTENT(IN) :: y TYPE(rpe_var) :: z z%sbits = MAX(significand_bits(x), significand_bits(y)) z = x / y%val END FUNCTION div_realalt_rpe !------------------------------------------------------------------- ! Overloaded definitions for (.GE.): ! ELEMENTAL FUNCTION ge_rpe_rpe (x, y) RESULT (z) TYPE(rpe_var), INTENT(IN) :: x TYPE(rpe_var), INTENT(IN) :: y LOGICAL :: z z = x%val .GE. y%val END FUNCTION ge_rpe_rpe ELEMENTAL FUNCTION ge_rpe_integer (x, y) RESULT (z) TYPE(rpe_var), INTENT(IN) :: x INTEGER, INTENT(IN) :: y LOGICAL :: z z = x%val .GE. y END FUNCTION ge_rpe_integer ELEMENTAL FUNCTION ge_rpe_long (x, y) RESULT (z) TYPE(rpe_var), INTENT(IN) :: x INTEGER(KIND=8), INTENT(IN) :: y LOGICAL :: z z = x%val .GE. y END FUNCTION ge_rpe_long ELEMENTAL FUNCTION ge_rpe_real (x, y) RESULT (z) TYPE(rpe_var), INTENT(IN) :: x REAL(KIND=RPE_REAL_KIND), INTENT(IN) :: y LOGICAL :: z z = x%val .GE. y END FUNCTION ge_rpe_real ELEMENTAL FUNCTION ge_rpe_realalt (x, y) RESULT (z) TYPE(rpe_var), INTENT(IN) :: x REAL(KIND=RPE_ALTERNATE_KIND), INTENT(IN) :: y LOGICAL :: z z = x%val .GE. y END FUNCTION ge_rpe_realalt ELEMENTAL FUNCTION ge_integer_rpe (x, y) RESULT (z) INTEGER, INTENT(IN) :: x TYPE(rpe_var), INTENT(IN) :: y LOGICAL :: z z = x .GE. y%val END FUNCTION ge_integer_rpe ELEMENTAL FUNCTION ge_long_rpe (x, y) RESULT (z) INTEGER(KIND=8), INTENT(IN) :: x TYPE(rpe_var), INTENT(IN) :: y LOGICAL :: z z = x .GE. y%val END FUNCTION ge_long_rpe ELEMENTAL FUNCTION ge_real_rpe (x, y) RESULT (z) REAL(KIND=RPE_REAL_KIND), INTENT(IN) :: x TYPE(rpe_var), INTENT(IN) :: y LOGICAL :: z z = x .GE. y%val END FUNCTION ge_real_rpe ELEMENTAL FUNCTION ge_realalt_rpe (x, y) RESULT (z) REAL(KIND=RPE_ALTERNATE_KIND), INTENT(IN) :: x TYPE(rpe_var), INTENT(IN) :: y LOGICAL :: z z = x .GE. y%val END FUNCTION ge_realalt_rpe !------------------------------------------------------------------- ! Overloaded definitions for (.LE.): ! ELEMENTAL FUNCTION le_rpe_rpe (x, y) RESULT (z) TYPE(rpe_var), INTENT(IN) :: x TYPE(rpe_var), INTENT(IN) :: y LOGICAL :: z z = x%val .LE. y%val END FUNCTION le_rpe_rpe ELEMENTAL FUNCTION le_rpe_integer (x, y) RESULT (z) TYPE(rpe_var), INTENT(IN) :: x INTEGER, INTENT(IN) :: y LOGICAL :: z z = x%val .LE. y END FUNCTION le_rpe_integer ELEMENTAL FUNCTION le_rpe_long (x, y) RESULT (z) TYPE(rpe_var), INTENT(IN) :: x INTEGER(KIND=8), INTENT(IN) :: y LOGICAL :: z z = x%val .LE. y END FUNCTION le_rpe_long ELEMENTAL FUNCTION le_rpe_real (x, y) RESULT (z) TYPE(rpe_var), INTENT(IN) :: x REAL(KIND=RPE_REAL_KIND), INTENT(IN) :: y LOGICAL :: z z = x%val .LE. y END FUNCTION le_rpe_real ELEMENTAL FUNCTION le_rpe_realalt (x, y) RESULT (z) TYPE(rpe_var), INTENT(IN) :: x REAL(KIND=RPE_ALTERNATE_KIND), INTENT(IN) :: y LOGICAL :: z z = x%val .LE. y END FUNCTION le_rpe_realalt ELEMENTAL FUNCTION le_integer_rpe (x, y) RESULT (z) INTEGER, INTENT(IN) :: x TYPE(rpe_var), INTENT(IN) :: y LOGICAL :: z z = x .LE. y%val END FUNCTION le_integer_rpe ELEMENTAL FUNCTION le_long_rpe (x, y) RESULT (z) INTEGER(KIND=8), INTENT(IN) :: x TYPE(rpe_var), INTENT(IN) :: y LOGICAL :: z z = x .LE. y%val END FUNCTION le_long_rpe ELEMENTAL FUNCTION le_real_rpe (x, y) RESULT (z) REAL(KIND=RPE_REAL_KIND), INTENT(IN) :: x TYPE(rpe_var), INTENT(IN) :: y LOGICAL :: z z = x .LE. y%val END FUNCTION le_real_rpe ELEMENTAL FUNCTION le_realalt_rpe (x, y) RESULT (z) REAL(KIND=RPE_ALTERNATE_KIND), INTENT(IN) :: x TYPE(rpe_var), INTENT(IN) :: y LOGICAL :: z z = x .LE. y%val END FUNCTION le_realalt_rpe !------------------------------------------------------------------- ! Overloaded definitions for (.GT.): ! ELEMENTAL FUNCTION gt_rpe_rpe (x, y) RESULT (z) TYPE(rpe_var), INTENT(IN) :: x TYPE(rpe_var), INTENT(IN) :: y LOGICAL :: z z = x%val .GT. y%val END FUNCTION gt_rpe_rpe ELEMENTAL FUNCTION gt_rpe_integer (x, y) RESULT (z) TYPE(rpe_var), INTENT(IN) :: x INTEGER, INTENT(IN) :: y LOGICAL :: z z = x%val .GT. y END FUNCTION gt_rpe_integer ELEMENTAL FUNCTION gt_rpe_long (x, y) RESULT (z) TYPE(rpe_var), INTENT(IN) :: x INTEGER(KIND=8), INTENT(IN) :: y LOGICAL :: z z = x%val .GT. y END FUNCTION gt_rpe_long ELEMENTAL FUNCTION gt_rpe_real (x, y) RESULT (z) TYPE(rpe_var), INTENT(IN) :: x REAL(KIND=RPE_REAL_KIND), INTENT(IN) :: y LOGICAL :: z z = x%val .GT. y END FUNCTION gt_rpe_real ELEMENTAL FUNCTION gt_rpe_realalt (x, y) RESULT (z) TYPE(rpe_var), INTENT(IN) :: x REAL(KIND=RPE_ALTERNATE_KIND), INTENT(IN) :: y LOGICAL :: z z = x%val .GT. y END FUNCTION gt_rpe_realalt ELEMENTAL FUNCTION gt_integer_rpe (x, y) RESULT (z) INTEGER, INTENT(IN) :: x TYPE(rpe_var), INTENT(IN) :: y LOGICAL :: z z = x .GT. y%val END FUNCTION gt_integer_rpe ELEMENTAL FUNCTION gt_long_rpe (x, y) RESULT (z) INTEGER(KIND=8), INTENT(IN) :: x TYPE(rpe_var), INTENT(IN) :: y LOGICAL :: z z = x .GT. y%val END FUNCTION gt_long_rpe ELEMENTAL FUNCTION gt_real_rpe (x, y) RESULT (z) REAL(KIND=RPE_REAL_KIND), INTENT(IN) :: x TYPE(rpe_var), INTENT(IN) :: y LOGICAL :: z z = x .GT. y%val END FUNCTION gt_real_rpe ELEMENTAL FUNCTION gt_realalt_rpe (x, y) RESULT (z) REAL(KIND=RPE_ALTERNATE_KIND), INTENT(IN) :: x TYPE(rpe_var), INTENT(IN) :: y LOGICAL :: z z = x .GT. y%val END FUNCTION gt_realalt_rpe !------------------------------------------------------------------- ! Overloaded definitions for (.LT.): ! ELEMENTAL FUNCTION lt_rpe_rpe (x, y) RESULT (z) TYPE(rpe_var), INTENT(IN) :: x TYPE(rpe_var), INTENT(IN) :: y LOGICAL :: z z = x%val .LT. y%val END FUNCTION lt_rpe_rpe ELEMENTAL FUNCTION lt_rpe_integer (x, y) RESULT (z) TYPE(rpe_var), INTENT(IN) :: x INTEGER, INTENT(IN) :: y LOGICAL :: z z = x%val .LT. y END FUNCTION lt_rpe_integer ELEMENTAL FUNCTION lt_rpe_long (x, y) RESULT (z) TYPE(rpe_var), INTENT(IN) :: x INTEGER(KIND=8), INTENT(IN) :: y LOGICAL :: z z = x%val .LT. y END FUNCTION lt_rpe_long ELEMENTAL FUNCTION lt_rpe_real (x, y) RESULT (z) TYPE(rpe_var), INTENT(IN) :: x REAL(KIND=RPE_REAL_KIND), INTENT(IN) :: y LOGICAL :: z z = x%val .LT. y END FUNCTION lt_rpe_real ELEMENTAL FUNCTION lt_rpe_realalt (x, y) RESULT (z) TYPE(rpe_var), INTENT(IN) :: x REAL(KIND=RPE_ALTERNATE_KIND), INTENT(IN) :: y LOGICAL :: z z = x%val .LT. y END FUNCTION lt_rpe_realalt ELEMENTAL FUNCTION lt_integer_rpe (x, y) RESULT (z) INTEGER, INTENT(IN) :: x TYPE(rpe_var), INTENT(IN) :: y LOGICAL :: z z = x .LT. y%val END FUNCTION lt_integer_rpe ELEMENTAL FUNCTION lt_long_rpe (x, y) RESULT (z) INTEGER(KIND=8), INTENT(IN) :: x TYPE(rpe_var), INTENT(IN) :: y LOGICAL :: z z = x .LT. y%val END FUNCTION lt_long_rpe ELEMENTAL FUNCTION lt_real_rpe (x, y) RESULT (z) REAL(KIND=RPE_REAL_KIND), INTENT(IN) :: x TYPE(rpe_var), INTENT(IN) :: y LOGICAL :: z z = x .LT. y%val END FUNCTION lt_real_rpe ELEMENTAL FUNCTION lt_realalt_rpe (x, y) RESULT (z) REAL(KIND=RPE_ALTERNATE_KIND), INTENT(IN) :: x TYPE(rpe_var), INTENT(IN) :: y LOGICAL :: z z = x .LT. y%val END FUNCTION lt_realalt_rpe !------------------------------------------------------------------- ! Overloaded definitions for (==): ! ELEMENTAL FUNCTION eq_rpe_rpe (x, y) RESULT (z) TYPE(rpe_var), INTENT(IN) :: x TYPE(rpe_var), INTENT(IN) :: y LOGICAL :: z z = x%val == y%val END FUNCTION eq_rpe_rpe ELEMENTAL FUNCTION eq_rpe_integer (x, y) RESULT (z) TYPE(rpe_var), INTENT(IN) :: x INTEGER, INTENT(IN) :: y LOGICAL :: z z = x%val == y END FUNCTION eq_rpe_integer ELEMENTAL FUNCTION eq_rpe_long (x, y) RESULT (z) TYPE(rpe_var), INTENT(IN) :: x INTEGER(KIND=8), INTENT(IN) :: y LOGICAL :: z z = x%val == y END FUNCTION eq_rpe_long ELEMENTAL FUNCTION eq_rpe_real (x, y) RESULT (z) TYPE(rpe_var), INTENT(IN) :: x REAL(KIND=RPE_REAL_KIND), INTENT(IN) :: y LOGICAL :: z z = x%val == y END FUNCTION eq_rpe_real ELEMENTAL FUNCTION eq_rpe_realalt (x, y) RESULT (z) TYPE(rpe_var), INTENT(IN) :: x REAL(KIND=RPE_ALTERNATE_KIND), INTENT(IN) :: y LOGICAL :: z z = x%val == y END FUNCTION eq_rpe_realalt ELEMENTAL FUNCTION eq_integer_rpe (x, y) RESULT (z) INTEGER, INTENT(IN) :: x TYPE(rpe_var), INTENT(IN) :: y LOGICAL :: z z = x == y%val END FUNCTION eq_integer_rpe ELEMENTAL FUNCTION eq_long_rpe (x, y) RESULT (z) INTEGER(KIND=8), INTENT(IN) :: x TYPE(rpe_var), INTENT(IN) :: y LOGICAL :: z z = x == y%val END FUNCTION eq_long_rpe ELEMENTAL FUNCTION eq_real_rpe (x, y) RESULT (z) REAL(KIND=RPE_REAL_KIND), INTENT(IN) :: x TYPE(rpe_var), INTENT(IN) :: y LOGICAL :: z z = x == y%val END FUNCTION eq_real_rpe ELEMENTAL FUNCTION eq_realalt_rpe (x, y) RESULT (z) REAL(KIND=RPE_ALTERNATE_KIND), INTENT(IN) :: x TYPE(rpe_var), INTENT(IN) :: y LOGICAL :: z z = x == y%val END FUNCTION eq_realalt_rpe !------------------------------------------------------------------- ! Overloaded definitions for (/=): ! ELEMENTAL FUNCTION ne_rpe_rpe (x, y) RESULT (z) TYPE(rpe_var), INTENT(IN) :: x TYPE(rpe_var), INTENT(IN) :: y LOGICAL :: z z = x%val /= y%val END FUNCTION ne_rpe_rpe ELEMENTAL FUNCTION ne_rpe_integer (x, y) RESULT (z) TYPE(rpe_var), INTENT(IN) :: x INTEGER, INTENT(IN) :: y LOGICAL :: z z = x%val /= y END FUNCTION ne_rpe_integer ELEMENTAL FUNCTION ne_rpe_long (x, y) RESULT (z) TYPE(rpe_var), INTENT(IN) :: x INTEGER(KIND=8), INTENT(IN) :: y LOGICAL :: z z = x%val /= y END FUNCTION ne_rpe_long ELEMENTAL FUNCTION ne_rpe_real (x, y) RESULT (z) TYPE(rpe_var), INTENT(IN) :: x REAL(KIND=RPE_REAL_KIND), INTENT(IN) :: y LOGICAL :: z z = x%val /= y END FUNCTION ne_rpe_real ELEMENTAL FUNCTION ne_rpe_realalt (x, y) RESULT (z) TYPE(rpe_var), INTENT(IN) :: x REAL(KIND=RPE_ALTERNATE_KIND), INTENT(IN) :: y LOGICAL :: z z = x%val /= y END FUNCTION ne_rpe_realalt ELEMENTAL FUNCTION ne_integer_rpe (x, y) RESULT (z) INTEGER, INTENT(IN) :: x TYPE(rpe_var), INTENT(IN) :: y LOGICAL :: z z = x /= y%val END FUNCTION ne_integer_rpe ELEMENTAL FUNCTION ne_long_rpe (x, y) RESULT (z) INTEGER(KIND=8), INTENT(IN) :: x TYPE(rpe_var), INTENT(IN) :: y LOGICAL :: z z = x /= y%val END FUNCTION ne_long_rpe ELEMENTAL FUNCTION ne_real_rpe (x, y) RESULT (z) REAL(KIND=RPE_REAL_KIND), INTENT(IN) :: x TYPE(rpe_var), INTENT(IN) :: y LOGICAL :: z z = x /= y%val END FUNCTION ne_real_rpe ELEMENTAL FUNCTION ne_realalt_rpe (x, y) RESULT (z) REAL(KIND=RPE_ALTERNATE_KIND), INTENT(IN) :: x TYPE(rpe_var), INTENT(IN) :: y LOGICAL :: z z = x /= y%val END FUNCTION ne_realalt_rpe !------------------------------------------------------------------- ! Overloaded definitions for (**): ! ELEMENTAL FUNCTION pow_rpe_rpe (x, y) RESULT (z) TYPE(rpe_var), INTENT(IN) :: x TYPE(rpe_var), INTENT(IN) :: y TYPE(rpe_var) :: z z%sbits = MAX(significand_bits(x), significand_bits(y)) z = x%val ** y%val END FUNCTION pow_rpe_rpe ELEMENTAL FUNCTION pow_rpe_integer (x, y) RESULT (z) TYPE(rpe_var), INTENT(IN) :: x INTEGER, INTENT(IN) :: y TYPE(rpe_var) :: z z%sbits = MAX(significand_bits(x), significand_bits(y)) z = x%val ** y END FUNCTION pow_rpe_integer ELEMENTAL FUNCTION pow_rpe_long (x, y) RESULT (z) TYPE(rpe_var), INTENT(IN) :: x INTEGER(KIND=8), INTENT(IN) :: y TYPE(rpe_var) :: z z%sbits = MAX(significand_bits(x), significand_bits(y)) z = x%val ** y END FUNCTION pow_rpe_long ELEMENTAL FUNCTION pow_rpe_real (x, y) RESULT (z) TYPE(rpe_var), INTENT(IN) :: x REAL(KIND=RPE_REAL_KIND), INTENT(IN) :: y TYPE(rpe_var) :: z z%sbits = MAX(significand_bits(x), significand_bits(y)) z = x%val ** y END FUNCTION pow_rpe_real ELEMENTAL FUNCTION pow_rpe_realalt (x, y) RESULT (z) TYPE(rpe_var), INTENT(IN) :: x REAL(KIND=RPE_ALTERNATE_KIND), INTENT(IN) :: y TYPE(rpe_var) :: z z%sbits = MAX(significand_bits(x), significand_bits(y)) z = x%val ** y END FUNCTION pow_rpe_realalt ELEMENTAL FUNCTION pow_integer_rpe (x, y) RESULT (z) INTEGER, INTENT(IN) :: x TYPE(rpe_var), INTENT(IN) :: y TYPE(rpe_var) :: z z%sbits = MAX(significand_bits(x), significand_bits(y)) z = x ** y%val END FUNCTION pow_integer_rpe ELEMENTAL FUNCTION pow_long_rpe (x, y) RESULT (z) INTEGER(KIND=8), INTENT(IN) :: x TYPE(rpe_var), INTENT(IN) :: y TYPE(rpe_var) :: z z%sbits = MAX(significand_bits(x), significand_bits(y)) z = x ** y%val END FUNCTION pow_long_rpe ELEMENTAL FUNCTION pow_real_rpe (x, y) RESULT (z) REAL(KIND=RPE_REAL_KIND), INTENT(IN) :: x TYPE(rpe_var), INTENT(IN) :: y TYPE(rpe_var) :: z z%sbits = MAX(significand_bits(x), significand_bits(y)) z = x ** y%val END FUNCTION pow_real_rpe ELEMENTAL FUNCTION pow_realalt_rpe (x, y) RESULT (z) REAL(KIND=RPE_ALTERNATE_KIND), INTENT(IN) :: x TYPE(rpe_var), INTENT(IN) :: y TYPE(rpe_var) :: z z%sbits = MAX(significand_bits(x), significand_bits(y)) z = x ** y%val END FUNCTION pow_realalt_rpe
import torch import torch.nn as nn import torch.nn.functional as F # import torch.optim as optim from torch.autograd import Variable class VoxelNNet(nn.Module): def __init__(self, game, args): # game params self.board_x, self.board_y, self.board_z = game.getBoardSize() self.input_shape = (1, self.board_x, self.board_y, self.board_z) self.action_size = game.getActionSize() self.args = args super(VoxelNNet, self).__init__() self.features = self._make_feature_layers() in_channels = self._get_conv_output(self.input_shape) fc1_channels = 512 fc2_channels = 512 self.fc1 = nn.Linear(in_channels, fc1_channels) self.fc_bn1 = nn.BatchNorm1d(fc1_channels) self.fc2 = nn.Linear(fc1_channels, fc2_channels) self.fc_bn2 = nn.BatchNorm1d(fc2_channels) self.fc3 = nn.Linear(fc2_channels, self.action_size) self.fc4 = nn.Linear(fc2_channels, 1) def _make_feature_layers(self): layers = [] conv1_channels = 32 conv2_channels = 64 conv3_channels = 128 conv4_channels = 256 in_channels = 1 conv1 = [nn.Conv3d(in_channels, out_channels=conv1_channels, kernel_size=3, stride=1, padding=1), nn.BatchNorm3d(conv1_channels), nn.ReLU(inplace=True)] conv2 = [nn.Conv3d(conv1_channels, out_channels=conv2_channels, kernel_size=3, stride=1, padding=1), nn.BatchNorm3d(conv2_channels), nn.ReLU(inplace=True)] conv3 = [nn.Conv3d(conv2_channels, out_channels=conv3_channels, kernel_size=3, stride=1, padding=1), nn.BatchNorm3d(conv3_channels), nn.ReLU(inplace=True)] conv4 = [nn.Conv3d(conv3_channels, out_channels=conv4_channels, kernel_size=3, stride=1, padding=1), nn.BatchNorm3d(conv4_channels), nn.ReLU(inplace=True)] # pool1 = [nn.MaxPool3d(kernel_size=2, stride=2)] layers += conv1 layers += conv2 layers += conv3 layers += conv4 # layers += pool1 return nn.Sequential(*layers) def forward(self, x): out = self.features(x) s = out.view(out.size(0), -1) s = F.dropout(F.relu(self.fc_bn1(self.fc1(s))), p=self.args.dropout, training=self.training) # batch_size x 1024 s = F.dropout(F.relu(self.fc_bn2(self.fc2(s))), p=self.args.dropout, training=self.training) # batch_size x 512 pi = self.fc3(s) # batch_size x action_size v = self.fc4(s) # batch_size x 1 return F.log_softmax(pi, dim=1), F.tanh(v) def _get_conv_output(self, shape): bs = 1 input_ = Variable(torch.rand(bs, *shape)) output_feat = self.features(input_) n_size = output_feat.data.view(bs, -1).size(1) return n_size if __name__ == '__main__': import copy import sys import numpy as np sys.path.append('..') from VoxelLogic import Board from VoxelGame import VoxelGame class dotdict(dict): def __getattr__(self, name): return self[name] n = 20 x = 9 y = 3 z = 6 args = dotdict({ 'lr': 0.001, 'dropout': 0.3, 'epochs': 15, 'batch_size': 64, 'cuda': True, #torch.cuda.is_available(), 'num_channels': 512, }) b = Board(x, y, z, n) g = VoxelGame(x, y, z, n) nnet = VoxelNNet(g, args) if args.cuda: nnet.cuda() board = b.pieces.copy() board = torch.FloatTensor(board.astype(np.float64)) if args.cuda: board = board.contiguous().cuda() board = Variable(board, volatile=True) board = board.view(1, 1, nnet.board_x, nnet.board_y, nnet.board_z) nnet.eval() nnet.forward(board)
import math import warnings import errno import glob import matplotlib import matplotlib.pyplot as plt import numpy as np import os with warnings.catch_warnings(): # noqa # N.B. We must suppress this to appease `all_test`. # TODO(eric.cousineau): Remove this once all supported platform ships # `scipy>=1.0.0` by default. warnings.simplefilter("ignore", ImportWarning) import scipy as sp from scipy import spatial from drake import lcmt_viewer_load_robot from pydrake.common.eigen_geometry import Quaternion from pydrake.common.value import AbstractValue from pydrake.geometry import DrakeVisualizer, ReadObjToSurfaceMesh from pydrake.lcm import DrakeLcm, Subscriber from pydrake.math import RigidTransform, RotationMatrix from pydrake.systems.pyplot_visualizer import PyPlotVisualizer from pydrake.systems.rendering import PoseBundle class PlanarSceneGraphVisualizer(PyPlotVisualizer): """ Given a SceneGraph and a view plane, provides a view of the robot by projecting all geometry onto the view plane. This is intended to be used for robots that operate in the plane, but should render any robot approximately correctly. It has the following caveats: - z-ordering of objects is done based on the object centroid, which is not perfect for non-planar scenes. - Object geometry is projected onto the view plane, then a chull is taken, and finally that chull is drawn as a patch. Nonconvex geometry will thus be drawn incorrectly, and geometry with many vertices will slow down the visualizer. Specifics on view setup: T_VW specifies the 3x4 view projection matrix. For planar orthographic projection, use: [ <x axis select> x_axis_shift <y axis select> y_axis_shift 0, 0, 0, 1] % homogenizer e.g. [ 1 0 0 0.5 0 1 0 0 0 0 0 1]. would give a top-down view (i.e squashing the z axis), and would shift things in the x axis positively by 0.5. T_VW can be any valid view projection matrix. If the bottom row is [0, 0, 0, 1], the view projection will be an orthographic projection. xlim and ylim don't technically provide extra functionality, but it's easier to keep handling scaling with xlim, ylim, and view plane selection and *maybe* offsetting with the projection matrix. """ def __init__(self, scene_graph, draw_period=1./30, T_VW=np.array([[1., 0., 0., 0.], [0., 0., 1., 0.], [0., 0., 0., 1.]]), xlim=[-1., 1], ylim=[-1, 1], facecolor=[1, 1, 1], use_random_colors=False, substitute_collocated_mesh_files=True, ax=None, show=None): """ Args: scene_graph: A SceneGraph object. draw_period: The rate at which this class publishes to the visualizer. T_VW: The view projection matrix from world to view coordinates. xlim: View limit into the scene. ylim: View limit into the scene. facecolor: Passed through to figure() and sets background color. Both color name strings and RGB triplets are allowed. Defaults to white. use_random_colors: If set to True, will render each body with a different color. (Multiple visual elements on the same body will be the same color.) substitute_collocated_mesh_files: If True, then a mesh file specified with an unsupported filename extension may be replaced by a file of the same base name in the same directory, but with a supported filename extension. Currently only .obj files are supported. ax: If supplied, the visualizer will draw onto those axes instead of creating a new set of axes. The visualizer will still change the view range and figure size of those axes. show: Opens a window during initialization / publish iff True. Default is None, which implies show=True unless matplotlib.get_backend() is 'template'. """ default_size = matplotlib.rcParams['figure.figsize'] scalefactor = (ylim[1]-ylim[0]) / (xlim[1]-xlim[0]) figsize = (default_size[0], default_size[0]*scalefactor) PyPlotVisualizer.__init__(self, facecolor=facecolor, figsize=figsize, ax=ax, draw_period=draw_period, show=show) self.set_name('planar_scenegraph_visualizer') self._scene_graph = scene_graph self._T_VW = T_VW # Pose bundle (from SceneGraph) input port. # TODO(tehbelinda): Rename the `lcm_visualization` port to match # SceneGraph once its output port has been updated. See #12214. self._pose_bundle_port = self.DeclareAbstractInputPort( "lcm_visualization", AbstractValue.Make(PoseBundle(0))) self.ax.axis('equal') self.ax.axis('off') # Achieve the desired view limits. self.ax.set_xlim(xlim) self.ax.set_ylim(ylim) default_size = self.fig.get_size_inches() self.fig.set_size_inches(figsize[0], figsize[1]) # Populate body patches. self._build_body_patches(use_random_colors, substitute_collocated_mesh_files) # Populate the body fill list -- which requires doing most of a draw # pass, but with an ax.fill() command to initialize the draw patches. # After initialization, we can then use in-place replacement of vertex # positions. The body fill list stores the ax patch objects in the # order they were spawned (i.e. by body, and then by order of view_ # patches). Drawing the tree should update them by iterating over # bodies and patches in the same order. self._body_fill_dict = {} X_WB_initial = RigidTransform.Identity() for full_name in self._patch_Blist.keys(): patch_Wlist, view_colors = self._get_view_patches(full_name, X_WB_initial) self._body_fill_dict[full_name] = [] for patch_W, color in zip(patch_Wlist, view_colors): # Project the full patch the first time, to initialize a vertex # list with enough space for any possible convex hull of this # vertex set. patch_V = self._project_patch(patch_W) body_fill = self.ax.fill( patch_V[0, :], patch_V[1, :], zorder=0, edgecolor='k', facecolor=color, closed=True)[0] self._body_fill_dict[full_name].append(body_fill) # Then update the vertices for a more accurate initial draw. self._update_body_fill_verts(body_fill, patch_V) def _build_body_patches(self, use_random_colors, substitute_collocated_mesh_files): """ Generates body patches. self._patch_Blist stores a list of patches for each body (starting at body id 1). A body patch is a list of all 3D vertices of a piece of visual geometry. """ self._patch_Blist = {} self._patch_Blist_colors = {} memq_lcm = DrakeLcm("memq://") memq_lcm_subscriber = Subscriber(lcm=memq_lcm, channel="DRAKE_VIEWER_LOAD_ROBOT", lcm_type=lcmt_viewer_load_robot) # TODO(SeanCurtis-TRI): Use SceneGraph inspection instead of mocking # LCM and inspecting the generated message. DrakeVisualizer.DispatchLoadMessage(self._scene_graph, memq_lcm) memq_lcm.HandleSubscriptions(0) assert memq_lcm_subscriber.count > 0 load_robot_msg = memq_lcm_subscriber.message # Spawn a random color generator, in case we need to pick random colors # for some bodies. Each body will be given a unique color when using # this random generator, with each visual element of the body colored # the same. color = iter(plt.cm.rainbow(np.linspace(0, 1, load_robot_msg.num_links))) for i in range(load_robot_msg.num_links): link = load_robot_msg.link[i] this_body_patches = [] this_body_colors = [] this_color = next(color) for j in range(link.num_geom): geom = link.geom[j] # MultibodyPlant currently sets alpha=0 to make collision # geometry "invisible". Ignore those geometries here. if geom.color[3] == 0: continue # Short-circuit if the geometry scale is invalid. # (All uses of float data should be strictly positive: # edge lengths for boxes, radius and length for # spheres and cylinders, and scaling for meshes.) if not all([x > 0 for x in geom.float_data]): continue X_BG = RigidTransform( RotationMatrix(Quaternion(geom.quaternion)), geom.position) if geom.type == geom.BOX: assert geom.num_float_data == 3 # Draw a bounding box. patch_G = np.vstack(( geom.float_data[0]/2.*np.array( [-1, -1, 1, 1, -1, -1, 1, 1]), geom.float_data[1]/2.*np.array( [-1, 1, -1, 1, -1, 1, -1, 1]), geom.float_data[2]/2.*np.array( [-1, -1, -1, -1, 1, 1, 1, 1]))) elif geom.type == geom.SPHERE: assert geom.num_float_data == 1 radius = geom.float_data[0] lati, longi = np.meshgrid(np.arange(0., 2.*math.pi, 0.5), np.arange(0., 2.*math.pi, 0.5)) lati = lati.ravel() longi = longi.ravel() patch_G = np.vstack([ np.sin(lati)*np.cos(longi), np.sin(lati)*np.sin(longi), np.cos(lati)]) patch_G *= radius elif geom.type == geom.CYLINDER: assert geom.num_float_data == 2 radius = geom.float_data[0] length = geom.float_data[1] # In the lcm geometry, cylinders are along +z # https://github.com/RobotLocomotion/drake/blob/last_sha_with_original_matlab/drake/matlab/systems/plants/RigidBodyCylinder.m # Two circles: one at bottom, one at top. sample_pts = np.arange(0., 2.*math.pi, 0.25) patch_G = np.hstack( [np.array([ [radius*math.cos(pt), radius*math.sin(pt), -length/2.], [radius*math.cos(pt), radius*math.sin(pt), length/2.]]).T for pt in sample_pts]) elif geom.type == geom.MESH: filename = geom.string_data base, ext = os.path.splitext(filename) if (ext.lower() != ".obj" and substitute_collocated_mesh_files): # Check for a co-located .obj file (case insensitive). for f in glob.glob(base + '.*'): if f[-4:].lower() == '.obj': filename = f break if filename[-4:].lower() != '.obj': raise RuntimeError( f"The given file {filename} is not " f"supported and no alternate {base}" ".obj could be found.") if not os.path.exists(filename): raise FileNotFoundError(errno.ENOENT, os.strerror( errno.ENOENT), filename) # Get mesh scaling. scale = geom.float_data[0] mesh = ReadObjToSurfaceMesh(filename, scale) patch_G = np.vstack([v.r_MV() for v in mesh.vertices()]) # Only store the vertices of the (3D) convex hull of the # mesh, as any interior vertices will still be interior # vertices after projection, and will therefore be removed # in _update_body_fill_verts(). hull = spatial.ConvexHull(patch_G) patch_G = np.vstack( [patch_G[v, :] for v in hull.vertices]).T else: print("UNSUPPORTED GEOMETRY TYPE {} IGNORED".format( geom.type)) continue # Compute pose in body. patch_B = X_BG @ patch_G # Close path if not closed. if (patch_B[:, -1] != patch_B[:, 0]).any(): patch_B = np.hstack((patch_B, patch_B[:, 0][np.newaxis].T)) this_body_patches.append(patch_B) if use_random_colors: this_body_colors.append(this_color) else: this_body_colors.append(geom.color) self._patch_Blist[link.name] = this_body_patches self._patch_Blist_colors[link.name] = this_body_colors def _get_view_patches(self, full_name, X_WB): """ Pulls out the view patch verts for the given body index after applying the appropriate transform, X_WB. X_WB needs to be a RigidTransform. """ patch_Wlist = [] for patch_B in self._patch_Blist[full_name]: patch_W = X_WB @ patch_B # Add homogeneous row. patch_W = np.vstack((patch_W, np.ones((1, patch_W.shape[1])))) patch_Wlist.append(patch_W) colors = self._patch_Blist_colors[full_name] return (patch_Wlist, colors) def _project_patch(self, patch_W): """ Project the object vertices from 3d in world frame W to 2d in view frame V. """ patch_V = self._T_VW @ patch_W # Applies normalization in the perspective transformation # to make each projected point have z = 1. If the bottom row # of T_VW is [0, 0, 0, 1], this will result in an # orthographic projection. patch_V[0, :] /= patch_V[2, :] patch_V[1, :] /= patch_V[2, :] # Cut patch_V down to 2xN. patch_V = patch_V[:2, :] return patch_V def _update_body_fill_verts(self, body_fill, patch_V): """ Takes a convex hull if necessary and uses in-place replacement of vertices to update the fill. """ # Take a convex hull to get an accurate shape for drawing, with verts # coming out in ccw order. if patch_V.shape[1] > 3: hull = spatial.ConvexHull(patch_V.T) patch_V = np.vstack([patch_V[:, v] for v in hull.vertices]).T # Update the verts, padding out to the appropriate full # of verts by # replicating the final vertex. n_verts = body_fill.get_path().vertices.shape[0] patch_V = np.pad( patch_V, ((0, 0), (0, n_verts - patch_V.shape[1])), mode="edge") body_fill.get_path().vertices[:, :] = patch_V.T def draw(self, context): """Overrides base with the implementation.""" pose_bundle = self._pose_bundle_port.Eval(context) view_dir = np.cross(self._T_VW[0, :3], self._T_VW[1, :3]) for frame_i in range(pose_bundle.get_num_poses()): # SceneGraph currently sets the name in PoseBundle as # "get_source_name::frame_name". full_name = pose_bundle.get_name(frame_i) model_id = pose_bundle.get_model_instance_id(frame_i) X_WB = pose_bundle.get_transform(frame_i) patch_Wlist, _ = self._get_view_patches(full_name, X_WB) for i, patch_W in enumerate(patch_Wlist): # Project the object vertices from 3d in world frame W to 2d in # view frame V (keeps homogeneous portion, removing it later). patch_V = self._project_patch(patch_W) body_fill = self._body_fill_dict[full_name][i] # Use the latest vertices to update the body_fill. self._update_body_fill_verts(body_fill, patch_V) body_fill.zorder = X_WB.translation() @ view_dir self.ax.set_title('t = {:.1f}'.format(context.get_time())) def ConnectPlanarSceneGraphVisualizer(builder, scene_graph, output_port=None, **kwargs): """Creates an instance of PlanarSceneGraphVisualizer, adds it to the diagram, and wires the scene_graph pose bundle output port to the input port of the visualizer. Provides an interface comparable to DrakeVisualizer.AddToBuilder. Args: builder: The diagram builder used to construct the Diagram. scene_graph: The SceneGraph in builder containing the geometry to be visualized. output_port: (optional) If not None, then output_port will be connected to the visualizer input port instead of the scene_graph. get_pose_bundle_output_port(). This is required, for instance, when the SceneGraph is inside a Diagram, and we must connect the exposed port to the visualizer instead of the original SceneGraph port. Additional kwargs are passed through to the PlanarSceneGraphVisualizer constructor. Returns: The newly created PlanarSceneGraphVisualizer object. """ if output_port is None: output_port = scene_graph.get_pose_bundle_output_port() visualizer = builder.AddSystem( PlanarSceneGraphVisualizer(scene_graph, **kwargs)) builder.Connect(output_port, visualizer.get_input_port(0)) return visualizer
/* multifit_nlinear/fdfvv.c * * Copyright (C) 2015 Patrick Alken * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 3 of the License, or (at * your option) any later version. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. */ #include <config.h> #include <gsl/gsl_math.h> #include <gsl/gsl_multifit_nlinear.h> #include <gsl/gsl_vector.h> #include <gsl/gsl_matrix.h> #include <gsl/gsl_blas.h> /* fdfvv() Compute approximate second directional derivative using finite differences. See Eq. 19 of: M. K. Transtrum, J. P. Sethna, Improvements to the Levenberg Marquardt algorithm for nonlinear least-squares minimization, arXiv:1201.5885, 2012. Inputs: h - step size for finite difference x - parameter vector, size p v - geodesic velocity, size p f - vector of function values f_i(x), size n J - Jacobian matrix J(x), n-by-p swts - data weights fdf - fdf struct fvv - (output) approximate second directional derivative vector D_v^2 f(x) work - workspace, size p Return: success or error */ static int fdfvv(const double h, const gsl_vector *x, const gsl_vector *v, const gsl_vector *f, const gsl_matrix *J, const gsl_vector *swts, gsl_multifit_nlinear_fdf *fdf, gsl_vector *fvv, gsl_vector *work) { int status; const size_t n = fdf->n; const size_t p = fdf->p; const double hinv = 1.0 / h; size_t i; /* compute work = x + h*v */ for (i = 0; i < p; ++i) { double xi = gsl_vector_get(x, i); double vi = gsl_vector_get(v, i); gsl_vector_set(work, i, xi + h * vi); } /* compute f(x + h*v) */ status = gsl_multifit_nlinear_eval_f (fdf, work, swts, fvv); if (status) return status; for (i = 0; i < n; ++i) { double fi = gsl_vector_get(f, i); /* f_i(x) */ double fip = gsl_vector_get(fvv, i); /* f_i(x + h*v) */ gsl_vector_const_view row = gsl_matrix_const_row(J, i); double u, fvvi; /* compute u = sum_{ij} J_{ij} D v_j */ gsl_blas_ddot(&row.vector, v, &u); fvvi = (2.0 * hinv) * ((fip - fi) * hinv - u); gsl_vector_set(fvv, i, fvvi); } return status; } /* gsl_multifit_nlinear_fdfvv() Compute approximate second directional derivative using finite differences Inputs: h - step size for finite difference x - parameter vector, size p v - geodesic velocity, size p f - function values f_i(x), size n J - Jacobian matrix J(x), n-by-p swts - sqrt data weights (set to NULL if not needed) fdf - fdf fvv - (output) approximate (weighted) second directional derivative vector, size n, sqrt(W) fvv work - workspace, size p Return: success or error */ int gsl_multifit_nlinear_fdfvv(const double h, const gsl_vector *x, const gsl_vector *v, const gsl_vector *f, const gsl_matrix *J, const gsl_vector *swts, gsl_multifit_nlinear_fdf *fdf, gsl_vector *fvv, gsl_vector *work) { return fdfvv(h, x, v, f, J, swts, fdf, fvv, work); }
module Order %default total %access public export data InclusiveEither : (typeLeft : Type) -> (typRight : Type) -> Type where LeftInc : typLeft -> Not typRight -> InclusiveEither typLeft typRight RightInc : Not typLeft -> typRight -> InclusiveEither typLeft typRight Both : typLeft -> typRight -> InclusiveEither typLeft typRight data ExclusiveEither : (typLeft : Type) -> (typRight : Type) -> Type where LeftExc : typLeft -> Not typRight -> ExclusiveEither typLeft typRight RightExc : Not typLeft -> typRight -> ExclusiveEither typLeft typRight |||Type of subset of a set subset : Type -> Type subset typ = (typ -> Bool) |||Type of proof that an element x belongs to a subset isIn : {typ : Type} -> (subset typ) -> (x : typ) -> Type isIn {typ} subSet x = ((subSet x) = True) |||Type of proof that two relations are equal relEqual : {typ : Type} -> (typ -> typ -> Type) -> (typ -> typ -> Type) -> Type relEqual {typ} r1 r2 = (a : typ) -> (b : typ) -> ((r1 a b) -> (r2 a b), (r2 a b) -> (r1 a b)) |||Induces a strict relation from a relation toStrictRelation : {typ : Type} -> (r : (typ -> typ -> Type)) -> (typ -> typ -> Type) toStrictRelation r = (\a => (\b => (r a b, Not (a = b)))) |||Induces a reverse relation from a relation toReverseRelation : {typ : Type} -> (r : (typ -> typ -> Type)) -> (typ -> typ -> Type) toReverseRelation r = (\a => (\b => (r b a))) |||Induces a strict reverse relation from a relation toStrictReverseRelation : {typ : Type} -> (r : (typ -> typ -> Type)) -> (typ -> typ -> Type) toStrictReverseRelation r = (\a => (\b => (r b a, Not (b = a)))) |||Type of proof that a relation is reflexive isReflexive : {typ : Type} -> (r : (typ -> typ -> Type)) -> Type isReflexive {typ} r = {a : typ} -> (r a a) |||Type of proof that a relation is symmetric isSymmetric : {typ : Type} -> (r : (typ -> typ -> Type)) -> Type isSymmetric {typ} r = {a : typ} -> {b : typ} -> (r a b) -> (r b a) |||Type of proof that a relation is anti-symmetric isAntiSymmetric : {typ : Type} -> (r : (typ -> typ -> Type)) -> Type isAntiSymmetric {typ} r = {a : typ} -> {b : typ} -> (r a b) -> (r b a) -> (a = b) |||Type of proof that a relation is transitive isTransitive : {typ : Type} -> (r : (typ -> typ -> Type)) -> Type isTransitive {typ} r = {a : typ} -> {b : typ} -> {c : typ} -> (r a b) -> (r b c) -> (r a c) |||Type of proof that a relation is an equivalence isEquivalence : {typ : Type} -> (r : (typ -> typ -> Type)) -> Type isEquivalence {typ} r = (isReflexive r, isSymmetric r, isTransitive r) |||Type of proof that a relation is a partial order isPartialOrder : {typ : Type} -> (r : (typ -> typ -> Type)) -> Type isPartialOrder {typ} r = (isReflexive r, isAntiSymmetric r, isTransitive r) |||Type of proof that a relation is a total order isTotalOrder : {typ : Type} -> (r : (typ -> typ -> Type)) -> Type isTotalOrder {typ} r = (isPartialOrder r, (a : typ) -> (b : typ) -> (InclusiveEither (r a b) ((toReverseRelation r) a b))) |||Type of proof that a relation is a well-order isWellOrder : {typ : Type} -> (r : (typ -> typ -> Type)) -> Type isWellOrder {typ} r = (isPartialOrder r, (subSet : (subset typ)) -> (a : typ ** (isIn subSet a, ((x : typ) -> (isIn subSet x) -> (r a x))))) |||Proof that the reverse of the reverse of the relation is the relation reverseIdempotent : {typ : Type} -> {r : (typ -> typ -> Type)} -> (relEqual r (toReverseRelation (toReverseRelation r))) reverseIdempotent {typ} {r} a b = (id, id) |||Proof that the reverse of a reflexive relation is reflexive reversePreservesRefl : {typ : Type} -> {r : (typ -> typ -> Type)} -> (isReflexive r) -> (isReflexive (toReverseRelation r)) reversePreservesRefl {typ} {r} rIsRefl = rIsRefl |||Proof that the reverse of a symmetric relation is symmetric reversePreservesSymm : {typ : Type} -> {r : (typ -> typ -> Type)} -> (isSymmetric r) -> (isSymmetric (toReverseRelation r)) reversePreservesSymm {typ} {r} rIsSymm = rIsSymm |||Proof that the reverse of an anti-symmetric relation is anti-symmetric reversePreservesAntiSymm : {typ : Type} -> {r : (typ -> typ -> Type)} -> (isAntiSymmetric r) -> (isAntiSymmetric (toReverseRelation r)) reversePreservesAntiSymm {typ} {r} rIsAntiSymm relLeft relRight = rIsAntiSymm relRight relLeft |||Proof that the reverse of a transitive relation is transitive reversePreservesTrans : {typ : Type} -> {r : (typ -> typ -> Type)} -> (isTransitive r) -> (isTransitive (toReverseRelation r)) reversePreservesTrans {typ} {r} rIsTrans relLeft relRight = rIsTrans relRight relLeft |||Proof that a reverse order of a symmetric relation is the relation itself reverseSymmEq : {typ : Type} -> {r : (typ -> typ -> Type)} -> (isSymmetric r) -> (relEqual r (toReverseRelation r)) reverseSymmEq {typ} {r} rIsSymmetric a b = (rIsSymmetric, rIsSymmetric) |||Proof that a reverse order of a partial order is a partial order reversePOrderIsPOrder : {typ : Type} -> {r : (typ -> typ -> Type)} -> (isPartialOrder r) -> (isPartialOrder (toReverseRelation r)) reversePOrderIsPOrder {typ} {r} (rIsRefl, rIsAntiSym, rIsTrans) = (rIsRefl, reversePreservesAntiSymm rIsAntiSym, reversePreservesTrans {typ} {r} rIsTrans) |||Proof that !(b <= a) implies a != b notSymmImpliesNotEq : {typ : Type} -> {r : (typ -> typ -> Type)} -> (isReflexive r) -> (Not ((toReverseRelation r) a b)) -> (Not (a = b)) notSymmImpliesNotEq {typ} {r} rIsRefl notLTE Refl = void(notLTE rIsRefl) |||Proof that a total order leads to a strict order toStrictOrder : {typ : Type} -> {r : (typ -> typ -> Type)} -> (isTotalOrder r) -> ((a : typ) -> (b : typ) -> (Either (a = b) (ExclusiveEither ((toStrictRelation r) a b) ((toStrictReverseRelation r) a b)))) toStrictOrder {typ} {r} rIsTotalOrder a b = case rIsTotalOrder of (rIsPartialOrder, rIsTotal) => case (rIsPartialOrder) of (rIsRefl, rIsAntiSymm, rIsTrans) => case (rIsTotal a b) of (Both leftOrder rightOrder) => Left (rIsAntiSymm leftOrder rightOrder) (LeftInc leftOrder notRightOrder) => Right (LeftExc (leftOrder, notSymmImpliesNotEq rIsRefl notRightOrder) (\rightOrder => notRightOrder (fst rightOrder))) (RightInc notLeftOrder rightOrder) => Right (RightExc (\leftOrder => notLeftOrder (fst leftOrder)) (rightOrder, notSymmImpliesNotEq rIsRefl notLeftOrder))
module Distributed export MultiArch, child_architecture, reconstruct_global_grid, HaloCommunication, HaloCommunicationBC, inject_halo_communication_boundary_conditions, DistributedFFTBasedPoissonSolver using MPI using Oceananigans.Utils using Oceananigans.Grids include("distributed_utils.jl") include("multi_architectures.jl") include("partition_assemble.jl") include("distributed_grids.jl") include("distributed_kernel_launching.jl") include("halo_communication_bcs.jl") include("halo_communication.jl") include("distributed_apply_flux_bcs.jl") include("distributed_fields.jl") include("distributed_fft_based_poisson_solver.jl") end # module
.onAttach <- function(libname, pkgname ){ path = tempdir() # if(!dir.exists(paste0(path,"/js"))){ # dir.create(paste0(path,"/js")) # } dir.create(paste0(path,"/js")) ## save echarts.js if(!file.exists(paste0(path,"/js/echart.js"))){ file.copy(from = system.file("JS/echarts.js",package = 'REmap'), to = paste0(path,"/js")) } ## Save echarts-all.js if(!file.exists(paste0(path,"/js/echarts-all.js"))){ file.copy(from = system.file("JS/echarts-all.js",package = 'REmap'), to = paste0(path,"/js")) } ## Save main.js if(!file.exists(paste0(path,"/js/main.js"))){ file.copy(from = system.file("JS/main.js",package = 'REmap'), to = paste0(path,"/js")) } ## Save juqery.min.js if(!file.exists(paste0(path,"/js/jquery.min.js"))){ file.copy(from = system.file("JS/jquery.min.js",package = 'REmap'), to = paste0(path,"/js")) } options(remap.js.dir = "./js") options(remap.js.web = FALSE) options(remap.ak = "q9U1lWgCK1aBGVC1DVWrgWa7") } setClass("remap", representation ( id = "character", maptype = "character", option = "character", theme = "list", content = "character" )) setMethod("show", signature = "remap", definition = function(object){ plot.remap(object,path = "") }) setMethod("summary", signature = "remap", definition = function(object){ cat("Object ID:\n") cat(object@id) cat("\nMap Type:\n") cat(object@maptype) cat("\nFile Position:\n") cat(file_name = paste0("~/",object@id,".html")) cat("\nLength of Content:\n") cat(length(strsplit(out@content,"\\n")[[1]])) })
#define _SCL_SECURE_NO_WARNINGS #include "CppSystem.hpp" #include "CommonSystemImpl_p.hpp" #include "Variable_p.hpp" #include <memory> #include <boost/algorithm/string.hpp> #include <vector> #include <string> #include <iostream> using std::string; using std::shared_ptr; namespace pysim { CppSystem::CppSystem(){ } CppSystem::~CppSystem() { } /////////////////////////////////////// // // Cpp Interface // /////////////////////////////////////// void CppSystem::par(std::vector<double>* var, const char* name, const char* description) { //TODO:: Make parameter string str(name); boost::algorithm::trim(str); d_ptr->par_vectors[str] = var; d_ptr->par_descriptions[str] = string(description); } void CppSystem::par(std::vector<std::vector<double>>* var, const char* name, const char* description) { //TODO:: Make parameter string str(name); boost::algorithm::trim(str); d_ptr->par_matrices[str] = var; d_ptr->par_descriptions[str] = string(description); } void CppSystem::par(string* var, const char* name, const char* description) { string str(name); boost::algorithm::trim(str); d_ptr->par_strings[str] = var; d_ptr->par_descriptions[str] = string(description); } void CppSystem::par(std::map<string, double>* var, const char* name, const char* description) { string str(name); boost::algorithm::trim(str); d_ptr->par_maps[str] = var; d_ptr->par_descriptions[str] = string(description); } void CppSystem::par(std::map<string, std::vector<double>>* var, const char* name, const char* description) { string str(name); boost::algorithm::trim(str); d_ptr->par_vector_maps[str] = var; d_ptr->par_descriptions[str] = string(description); } void CppSystem::setDiscrete(bool d) { d_ptr->isDiscrete = d; } void CppSystem::setNextUpdate(double t){ d_ptr->nextUpdateTime = t; } }
# Variational Auto Encoder in pyTorch for MNIST dataset ## Overview The basic idea of a Variational Auto Encoder (VAE) is shown below: Hopefully, this cartoon should convey a couple of important ideas: 1. the VAE is composed of a (probabilistic) Encoder and a (probabilistic) Decoder which do NOT need to have the same architecture (represented by boxes of different color and shapes) 2. the output of the VAE is probabilistic. So it is often usefull to visualize the expected (i.e. averaged) output. 3. the input and the expected output of the VAE are NOT identical (the difference between input and output is greatly exagerated just for making the poiunt clear) The encoder compress the input $x$ into a latent variable $z$ which lives in small dimensions latent space. The decoder tries to recover the original image from the latent space representation. We now zoom-in in the central part of the figure and clarify what we mean by **probabilistic encoder**. The important feature is that the latent variable $z=\mu+\sigma \epsilon$ is a random variable since $\epsilon$ is itself a random variable distribuited according to a normal distribution $p(\epsilon)=N(\epsilon|0,I)$. This means that each input vector $x$ can be mapped into many differrent values of $z$, i.e. we have a **probabilistic encoder**. Fortunately, these value of $z$ are all close to each other (i.e. they are centered around $\mu$ and have a spreading of size $\sigma$). From the figure above we can easily read off the core of the pyTorch implementation of the VAE: mu_z, sigma_z = encoder(x_input) ep = np.random.normal(0, 1, size=sigma_z.size()) z = mu_z + sigma_z * ep x_output = decoder(z) We will train the VAE by demanding that the reconstruction error is small (i.e. the input $x$ and output $\tilde{x}$ are close to each other according to some proper metric) This requirement, together with the use of a **probabilstic encoder**, imposes a nice structure on the latent space. To understand this imagine that two inputs $x_1$ and $x_2$ representing two different digits (for example a $0$ and a $7$) were mapped to similar values of $\mu$ and $\sigma$ by the encoder. Then we would calculate: $$z_1=\mu_1+\sigma_1 \epsilon_1,\quad\text{and}\quad z_2=\mu_2+\sigma_2 \epsilon_2$$ Since $\epsilon\sim N(0,I)$ is a random variable it is possible that $z_1=z_2$ (or more precisely that $z_1$ and $z_2$ are arbitrarely close to each other). Then when we feed this into the decoder, it would be impossible for it to decide wheather to generate a $0$ or a $7$ and the recontruction error would be (on average) very large. The only way to avoid this problem is for the encoder to learn how to map different inputs (for example a $0$ and a $7$) into different regions of the latent space and conversely to map similar inputs (for example two different version of a $0$) into close-by regions in the latent space. We will now describe in detail how this works. As an example we will use the MNIST dataset of handwritten digits. The VAE will take as input 28 x 28 gray scale images and will simultaneously learn: 1. to compress the input vector $x$ in $D_x=728$ dimensions (obtained by flattening the $28 \times 28$ images) into a hidden variables $z$ in a low dimensional space, for example $D_z=2$. 2. to generate a $D_x$ dimensional output vector $\tilde{x}$ for each particular value of the hidden variable $z$. As we have already seen, the VAE is composed of a **stochastic encoder** which maps $x\to z$ and a **stochasitc decoder** which maps $z \to \tilde{x}$. We will train the VAE by: 1. demanding that the reconstruction error is small, i.e. $\tilde{x}$ and $x$ are similar to each other (according to a proper metric) 2. enforcing a suitable structure in the latent space. This is important since we eventually want to sample the latent space and use the decoder to generate new (never seen before) images. This is only possible if we know how to meaningfully sample the latent space which in turns requires that the latent space has some "structure". ## Mathematical Preliminaries We start by showing three mathematical identities: \begin{equation} \log p(X)=\int dZ q(Z)\log p(X) = \int dZ q(Z) \log \frac{p(X,Z)}{p(Z|X)} \\ = \int dZ q(Z) \log \frac{p(X,Z)}{q(Z)} + \int dZ q(Z) \log \frac{q(Z)}{p(Z|X)} \\ = ELBO + KL(q(Z)\,||\,p(Z|X)) \end{equation} where $KL(a(.)\,||\,b(.))$ is the KL divergence between the two distribution $a(.)$ and $b(.)$ which is always positive and becomes zero only when $a(.)=b(.)$. From equation (1) and the positivity of the KL divergence it immediately follows that: \begin{equation} \log p(X) \ge ELBO \end{equation} We can rewrite ELBO in two ways: \begin{equation} ELBO \equiv \int \,dz \,q(Z)\,\log \frac{p(X,Z)}{q(Z)} = E_{q(Z)} [\log p(X,Z) - \log q(Z)] \\ ELBO \equiv \int \,dz \,q(Z)\,\log \frac{p(X|Z)p(Z)}{q(Z)} = E_{q(Z)} [\log p(X|Z)] - KL(q(Z)\,||\,p(Z)) \end{equation} At this point, the equations 1,2,3 are not very usefull and might seem a little arbitrary. Interestingly the equations above remain true even when the distributions depend on some parameters. When this is the case these equations can be used to: 1. to find the value of the parameters which maximize the probability of observing the data (this is known as Expectation-Maximization algorithm) or 2. to approximate the true posterior distribution in terms of a variational family of distribution (Bayesian Variational Inference). ## Probabilistic Decoder (i.e. $z\to \tilde{x}$) The task of the probabilistic decoder is to take a hidden variable $z$ in $D_z$ dimension and transform it into an output vector $\tilde{x}$ in $D_x$ dimension. In mathematical terms the probabilistic decoder is defined by: \begin{equation} p_\theta(\tilde{x})=p_\theta(\tilde{x}|z)p(z) \end{equation} where $p(z)$ is the prior for the hidden variable $z$ which is chosen to be a multidimensional normal distribution with zero average and unit variance: \begin{equation} p(z)=N(z|0,I),\quad\text{where $I$ is the identity matrix in $D_z$ dimension} \end{equation} and $p_\theta(\tilde{x}|z)$ is the likelyhood which descibes the probability, under the current model, that the hidden variables $z$ generate the output $\tilde{x}$. This probability depends on the model under consideration. For us the model is a Neural Network (NN). The parameters of the NN are collectively indicated by $\theta$. In the equation above, the term on the LHS is known as the EVIDENCE which is the probability of observing the data under the current model. Clearly we should optimize the parameters $\theta$ so that the EVIDENCE is maximized (more of this later). To be concrete, we will use a NN with a single hidden layer. Therefore $p_\theta(\tilde{x}|z)$ is built explicitely in the following way: \begin{equation} a_2 = relu(W_1 z + b_1) \\ a_3 = sigmoid(W_2 a_2 + b_2) \\ p(\tilde{x}|a_3) = a_3^\tilde{x}(1-a_3)^{(1-\tilde{x})} \end{equation} In other words, the latent variable $z$ is fed into a NN with two dense layers. The output of the NN is a vector $a_3$ containing $D_x$ probabilities (i.e. the dimensionality of the MNIST images). Each entry in $a_3$ bescribes the probability that a particular pixel in the image $\tilde{x}$ is black $\tilde{x}_i=0$ or white $\tilde{x}_i=1$. Clearly this is a Bernulli distribution therefore the expected value of $\tilde{x}$ is: $$ E[\tilde{x}] = \sum_{\tilde{x}=0,1} \tilde{x} p(\tilde{x}|a_3) = 0\times(1-a_3)+1\times a_3 = a_3 $$ Therefore to visualize the average output we can simply image the probabilities $a_3$. The parameters of the decoder NN are $\theta=\{W_1,b_1,W_2,b_2\}$. As a side, we note that: \begin{equation} \log p(\tilde{x}|a_3) = \sum_i \tilde{x}_i \log (a_3)_i + (1-\tilde{x}_i) \log (1-(a_3)_i) \end{equation} is just the binary cross entropy between $\tilde{x}$ and $a_3$. ## Probabilistic Encoder (i.e. $x\to z$) The task of the probabilistic encoder is to take a input vector $x$ in $D_x$ dimension and compress it into the hidden variables $z$ in $D_z$ dimensions. Mathematically the probabilistic encoder is defined by the posterior $p_\theta(z|x)$ which describes the probability that an input vector $x$ is mapped to a particular value $z$ in the latent space. In principle the posterior can be learned using the Baysian framework: \begin{equation} p_\theta(z|x)=\frac{p_\theta(x|z)p(z)}{p_\theta(x)} \end{equation} where all the term on the RHS have been introduced when discussing the probabilistic decoder. The expression above is the mathematically way of expressing the fact that the encoder and decoder are complementary to each other. For the problem at hand, the equation above is intractable (both analytically and computationally). This is analytically intractable because we do not have an analitical form for $p_\theta(x|z)$ since this is encoded in a NN. Moreover this is computationally intractable since in the EVIDENCE ($p_\theta(x)=\int dz p_\theta(x|z)p(z)$) which appears in the denominator is exponentially hard to compute in the dimension of the hidden space $D_z$. We are therefore force to use Approximate Bayesian Inference. We approximate the true posterior $p_\theta(z|x)$ by a variational family $q_\phi(z|x)$. Here we restrict ourself to the family of Gaussian posterior $q(z|x)=N(z\,|\,\mu(x),diag(\sigma(x)))$. In principle, for each $x$ we would need to learn $\mu(x)$ and $\sigma(x)$. However, in this case the number of parameters scales with the dataset size which is a BAD idea. Instead we use an inference network descibed by the parameters $\phi$ which will learn how to map $x$ to $\mu(x)$ and $\sigma(x)$. This is called **amortization**. To be concrete we choose the inference network to be a NN with a single hidden layer: \begin{equation} a_x=relu(W_x x + b_x) \\ \mu_z = W_\mu a_x + b_\mu \\ \log\sigma_z = W_\sigma a_x + b_\sigma \end{equation} In other words, the input variable $x$ is fed into a NN with two dense layers. The output of the NN are two vectors of dimension $D_z$. One containts the average and the other the variance (actually the log variance) of the Normal distribution. The parameters of this inference network are $\phi=\{W_x,b_x,W_\mu,b_\mu,W_\sigma,b_\sigma\}$. The output of the inference network are $\mu_\phi(x),\sigma_\phi(x)$ which are used to build the approximate posterior: \begin{equation} q_\phi(z|x)=N(z\,|\,\mu_\phi(x),diag(\sigma_\phi(x))) \end{equation} ## Loss Function We now rewrite the inital three equations making sure to keep track of all the parameters we have introduced thus far: \begin{equation} \log p_\theta(x)= ELBO(\theta,\phi) + KL(q_\phi(z|x)\,||\,p_\theta(z|x)) \end{equation} \begin{equation} \log p_\theta(x) \ge ELBO(\theta,\phi) \end{equation} \begin{equation} ELBO(\theta,\phi) = E_{q_\phi(z|x)} [\log p_\theta(x|z)] - KL(q_\phi(z|x)\,||\,p(z)) \end{equation} As a recap: 1. $p(z)=N(z | 0,I)$ is the prior for the hidden variables 2. $p_\theta(x|z)$ and $p_\theta(x)$ are the "likelihood" and "evidence" which depend on the parameters $\theta=\{W_1,b_1,W_2,b_2\}$ of the decoder NN. 3. $p_\theta(z|x)$ is the true posterior which is not accessible to us (i.e. the problem is intractable) 3. $q_\phi(z|x)$ is the approximate posterior which is given by gaussian family $q_\phi(z|x)=N(z\,|\,\mu_\phi(x),\sigma_\phi(x))$ where $\mu_\phi(x)$ and $\sigma_\phi(x)$ are the output of the inference NN which depend on the parameters $\phi=\{W_x,b_x,W_\mu,b_\mu,W_\sigma,b_\sigma\}$. Our goal is now to maximize the evidence (or equivalently the log-evidence since the log is a monothonic function), i.e. maximize the probability of observing the data under the current model. Unfortunately the log-evidence depends on the true posterior (see Eq. 11) which is unaccessible to us. Fortunately, we have shown that the ELBO is a lower bound for the evidence (this justifies its name) therefore we decide to maximize the ELBO as a proxi for the maximization of the evidence. The ELBO depends on the parameters $\theta$ in the decoder NN and $\phi$ in the inference NN. Note that: 1. when we maximize ELBO w.r.t. $\phi$ we are reducing $KL(q_\phi(z|x)\,||\,p_\theta(z|x))$ (this can be seen by noticing that the LHS in Eq. 11 is independent on $\phi$ therefore increasing ELBO must be accompanied by a reduction in KL) and therefore making the approximate posterior more similar to the true posterior 2. when we maximize ELBO w.r.t. $\theta$ we are most likely pushing the evidence higher and therefore making the model more tuned to the observed data. Therefore maximizing the ELBO with respect to both $\theta$ and $\phi$ is a very desirable thing to do. In a Machine Learning framework we are usually trying to minimize the loss function. If we are interested in maximizing the ELBO we can simply set: \begin{equation} Loss=-ELBO(\theta,\phi)=Loss_1+Loss_2 \end{equation} where $Loss_1$ and $Loss_2$ correspond to the two terms (the KL and the Expected Log Likelyhood) in the ELBO. Conceptually, we are done. Practically, we still need to "massage" the ELBO expression a bit in order to write it as a simple analytical function of the outputs of the decoder NN and inference NN. This is a necessary step if we want to use modern ML framework (such as Keras, pyTorch etc) which need to be able to automatically compute the derivatives of the Loss function and backpropagate to update the parameters $\phi,\theta$. Let us start with the KL divergence term in the ELBO. Since both the prior $p(z)=N(z\,|\,0,I)$ and the approximate posterior $q_\phi(z|x)=N(z\,|\,\mu_\phi(x),\sigma_\phi(x))$ are gaussian then it is easy to compute the KL term analytically: $$ Loss_1 = KL(q_\phi(Z|X)\,||\,p(Z)) = -\frac{1}{2} \sum_{k=1}^{D_z} \left(1+\log\sigma^2(x)_k- \mu(x)^2_k - \sigma^2(x)_k \right) $$ Note that this expression above is a simple (analytical) function of outputs of the inference network ($\mu_\phi(x),\sigma_\phi(x)$) therefore this expression is ready for use in ML framework based on backpropagation. **This part of the Loss function encorauges the posterior distribution to be close to the prior and can be interpreted as a regularization term which prevent the parameters $\mu_\phi(x)$ and $\sigma_\phi(x)$ from becoming to different from $0$ and $1$ respectively.** The second term in the ELBO is the Expected Log Likelyhood (ELL): $$Loss_2 = - ELL(\theta,\phi) = - E_{q_\phi(z|x)} [\log p_\theta(x|z)] = - \int dz\,q_\phi(z|x)\,\log p_\theta(x|z) $$ This expression is **not** simple enough to be used directly for backpropagation. Note that this expression involves an integral over the distribution $q_\phi(z|x)$ which depend on the parameter $\phi$ which we are trying to optimize. Interestingly, the expression above can be rewritten exactly as: $$ Loss_2= - \int dz\,q_\phi(z|x)\,\log p_\theta(x|z) = - \int d\epsilon\,N(\epsilon|0,I)\,\log p_\theta(x|\mu_\phi(x)+\sigma_\phi(x) \epsilon)$$ The equivalence between the two expressions can be seen by thinking about the "Monte-Carlo way" of computing these integrals. The first expression can be computed by: 1. drawing multiple $z$ from the distribution $q_\phi(z|x)=N(z\,|\,\mu_\phi(x),\sigma_\phi(x))$ 2. then using those values to compute $\log p_\theta(x|z)$. The second expression can be computed by: 1. draw multiple $\epsilon$ from the distribution $N(0,I)$ 2. use those value of $\epsilon$ to compute $z=\mu_\phi(x)+\sigma_\phi(x) \epsilon$ 3. compute $\log p_\theta(x|z)$ It is easy to see that, if $\epsilon\sim N(0,I)$ then $z=\mu_\phi(x)+\sigma_\phi(x)$ is drawn from $N(z\,|\,\mu_\phi(x),\sigma_\phi(x))=q_\phi(z|x)$ and therefore steps 1 and 2 are equivalent to draw $z$ from the distribution $q_\phi(z|x)$. This means that the two expressions for $Loss_2$ are equivalent. The final step is to approximate the integral over $d\epsilon$ with a single Monte Carlo sample, i.e.: $$ Loss_2 \approx - \log p_\theta\left(x|\mu_\phi(x)+\sigma_\phi(x) \epsilon\right),\quad\text{where}\quad \epsilon\sim N(0,I)$$ We have finally reached an expression simple enough to be used in ML framework based on backpropagation. **The ELL term in the Loss function is the reconstruction error. It encourages the encoder and the decoder to be inverse of each other.** To see this imagine that a region in the latent space is mapped to a region in the original space , i.e. $p_\theta(x|z)$ is high among those two regions. The $Loss_2$ will be small if $q_\phi(z|x)$ is high among those same two regions. So we can say that, at the end of training, the mapping from $z\to x$ described by $p_\theta(x|z)$ is opposite to the mapping from $x\to z$ described by $q_\phi(z|x)$. ### Raparametrization Trick More formally, the manipulation that we have done in the ELL, is known as reparametrization trick. The reparameterization trick is a straightforward change of variables that expresses the random variable $z\sim q_\phi(z|x)$ as a deterministic transformation $g_\phi(x,\epsilon)$ of the input $x$ and another random variable $\epsilon$ with parameters $\phi$: $$ z=g_\phi(x,\epsilon),\quad \epsilon\sim p(\epsilon) $$ Note that $p(\epsilon)$ is parameter-free and independent $x$ and $\phi$ then, for any function $f(x,z)$ we then have: $$ E_{q_\phi(z|x)} \left[f(x,z)\right] = E_{\epsilon} \left[f\left(x,g_\phi(x,\epsilon)\right)\right] \approx f\left(x,g_\phi(x,\epsilon)\right),\quad\text{with}\quad \epsilon\sim p(\epsilon)$$ the advantage of this reparametrization is that now the parameter $\phi$ appears in the integrad but not in the distribution with respect to which the intrand is computed. In the final step we have replaced the expecatation value with a sigle Monte Carlo drawn. **Note that, in general, the change of variable form $z\to\epsilon$ lead to the introduction of a Jacobian.** ## Implementation of VAE using pyTorch The equations for the encoder are: $$ a_x=relu(W_x x + b_x) \\ \mu_z = W_\mu a_x + b_\mu \\ \log\sigma_z = W_\sigma a_x + b_\sigma $$ where $x$ has dimension $D_x$, $a_x$ has dimension $D_\text{hidden_encoder}$ and $z$ has dimension $D_z$ The equation for the decoder are: $$ a_2 = relu(W_1 z + b_1) \\ a_3 = sigmoid(W_2 a_2 + b_2) \\ p(\tilde{x}|a_3) = Bern(\tilde{x},a_3) = a_3^{\tilde{x}} (1-a_3)^{(1-\tilde{x})} $$ where $z$ has dimension $D_z$, $a_2$ has dimension $D_\text{hidden_decoder}$ and $a_3$ has the same dimension as $D_x$. The way that encoder and decoder are coupled together is: $$ \mu_z, \log\sigma_z = \text{encoder}(x_\text{input}) \\ \sigma_z = \exp\left[\log\sigma_z\right] \\ \epsilon \sim N(0,I) \\ z = \mu_z + \sigma_z * \epsilon \\ x_\text{output} = \text{decoder}(z) $$ Let us implement this in pyTorch now! ### Let's start by importing some packages and define helper functions ```python import torch from torch.autograd import Variable import numpy as np import torch.nn.functional as F import torchvision from torchvision import transforms from torchvision.utils import save_image import torch.optim as optim from torch import nn import matplotlib.pyplot as plt %matplotlib inline # Decide if the system has GPUs if torch.cuda.is_available(): device = torch.device('cuda') dtype = torch.cuda.FloatTensor else: device = torch.device('cpu') dtype = torch.FloatTensor print(device) print(dtype) def save_everything(model_object,optimizer_object,filepath): """ Save all the quantities related to model and optimizer """ state = model_object.save_model() state['optimizer'] = optimizer_object.state_dict() torch.save(state, filepath) def load_everything(model_object,optimizer_object,filepath): """ Load all the quantities relateds to the object from file """ state = torch.load(filepath) model_object.load_model(state) optimizer_object.load_state_dict(state['optimizer']) def show_batch(images): """ Simple function to show the images """ im = torchvision.utils.make_grid(images) plt.imshow(np.transpose(im.numpy(), (1, 2, 0))) ``` cuda <class 'torch.cuda.FloatTensor'> ## Define the Encode, Decoder and CAE class ```python # Define the encoder class by specifying its layers and the connections between layers class Encoder(torch.nn.Module): def __init__(self, DIM_x, DIM_hidden_encoder, DIM_z): super().__init__() self.linear1 = torch.nn.Linear(DIM_x, DIM_hidden_encoder) self.z_mu = torch.nn.Linear(DIM_hidden_encoder, DIM_z) self.z_log_sigma2 = torch.nn.Linear(DIM_hidden_encoder, DIM_z) def forward(self, x): x = F.relu(self.linear1(x)) z_mu = self.z_mu(x) z_log_sigma2 = self.z_log_sigma2(x) return z_mu,z_log_sigma2 # Define the encoder class by specifying its layers and the connections between layers class Decoder(torch.nn.Module): def __init__(self, DIM_z, DIM_hidden_decoder, DIM_x): super().__init__() self.linear1 = torch.nn.Linear(DIM_z, DIM_hidden_decoder) self.linear2 = torch.nn.Linear(DIM_hidden_decoder, DIM_x) def forward(self, z): z = F.relu(self.linear1(z)) a3= torch.sigmoid(self.linear2(z)) return a3 # Define the encoder VAE class by combining encoder and decoder class VAE(torch.nn.Module): def __init__(self, encoder, decoder): super().__init__() self.encoder = encoder self.decoder = decoder self.loss_history = [] self.epoch = 0 self.training_time = 0 self.BCE_loss = torch.nn.BCELoss(reduction='sum') def save_model(self): state={ 'state_dict': self.state_dict(), 'loss_history': self.loss_history, 'epoch': self.epoch, 'training_time' : self.training_time } return state def load_model(self,state): self.load_state_dict(state['state_dict']) self.loss_history = state['loss_history'] self.epoch = state['epoch'] self.training_time = state['training_time'] def forward(self, x_input): # encoder z_mu, z_log_sigma2 = self.encoder(x_input) # connection between encoder and decoder z_sigma = torch.exp(0.5*z_log_sigma2) if(device == 'cpu'): ### create a random variable on cpu epsilon = dtype.from_numpy(np.random.normal(0, 1, size=z_sigma.size())) #N(0,I) else: ### create a random variable on cuda epsilon = dtype(size=z_sigma.size()).normal_() z = z_mu + z_sigma * Variable(epsilon, requires_grad=False) # Reparameterization trick # decoder x_output = self.decoder(z) return x_output, z_mu, z_log_sigma2 def compute_loss(self, x_input, x_output, z_mu, z_log_sigma2): # binary cross entropy BCE = self.BCE_loss(x_output,x_input) #Binary cross entropy between output and target # KL divergence z_sigma2 = torch.exp(z_log_sigma2) KLD = 0.5 * torch.sum(z_sigma2 + z_mu.pow(2) - 1 - z_log_sigma2) #print("BCE, KLD",BCE.item(),KLD.item()) return BCE+KLD def train_one_epoch(self,trainloader): tmp = [] for i, data in enumerate(trainloader, 0): #loop over minibatches # Get images and flatten them into vectors images_in, labels = data images_in = images_in.to(device) #put the data on GPU if necessary x_input = Variable(images_in.view(-1,DIM_x),requires_grad=False) # Run the VAE forward x_output, z_mu, z_log_sigma2 = self.forward(x_input) #print("xout",x_output.dtype) #print("xout",x_output.device) # Compute the loss loss = self.compute_loss(x_input, x_output, z_mu, z_log_sigma2) #print("loss",loss.dtype) #print("loss",loss.device) # compute the average loss in this epoch tmp.append(loss.item()) # add the loss to the tmp list # For each minibatch set the gradient to zero optimizer.zero_grad() loss.backward() # do backprop and compute all the gradients optimizer.step() # update the parameters # Svae the average loss during the epoch and the final value at the end of epoch self.loss_history.append(np.mean(tmp)) self.epoch += 1 def train(self,trainloader,DELTA_EPOCHS): import timeit tic=timeit.default_timer() for nn in range(DELTA_EPOCHS): self.train_one_epoch(trainloader) print("EPOCH loss=",self.epoch,self.loss_history[-1]) toc=timeit.default_timer() self.training_time += toc - tic def evaluate_one_batch(self,trainloader): with torch.no_grad(): images_in, labels = next(iter(trainloader)) x_input = images_in.view(-1,DIM_x).to(device) x_output, z_mu, z_log_sigma = self.forward(x_input) out_reshaped = x_output.view(-1,1,height, width) images_out = out_reshaped.cpu() return images_in,images_out ``` ### Run the model ```python if __name__ == '__main__': BATCH_SIZE = 64 # torchvision.datasets.MNIST outputs a set of PIL images # We transform them to tensors transform = transforms.ToTensor() #transform=transforms.Compose([transforms.ToTensor(),transforms.Normalize((0.1307,), (0.3081,))]) # Load and transform data trainset = torchvision.datasets.MNIST('./data', train=True, download=True, transform=transform) trainloader = torch.utils.data.DataLoader(trainset, batch_size=BATCH_SIZE, shuffle=True) testset = torchvision.datasets.MNIST('./data', train=False, download=True, transform=transform) testloader = torch.utils.data.DataLoader(testset, batch_size=BATCH_SIZE, shuffle=False) # Visualize the data and extract the image size images, labels =next(iter(trainloader)) _, _, height, width = images.size() # The other dimensions are arbitrary and chosen by user DIM_x = height*width DIM_z = 2 DIM_hidden_encoder = 256 DIM_hidden_decoder = 128 # Save the same set of test images for testing images_test_in, labels_test = next(iter(testloader)) x_test_one_batch = images_test_in.view(-1,DIM_x).to(device) # Create instances of Encoder, Decoder and VAE on either GPU or CPU encoder = Encoder(DIM_x, DIM_hidden_encoder, DIM_z).to(device) decoder = Decoder(DIM_z, DIM_hidden_decoder, DIM_x).to(device) vae = VAE(encoder, decoder).to(device) #note that here I am creating the model to either gpu or cpu # Select the optimizer #optimizer = optim.Adam(vae.parameters(), lr=0.0001) optimizer = optim.RMSprop(vae.parameters()) ``` ```python for k in range(10,11): if(k>0): load_everything(vae,optimizer,'./vae_'+str(k-1)+'.pth') vae.train(trainloader,10) # train the model for 10 EPOCHS save_everything(vae,optimizer,'./vae_'+str(k)+'.pth') with torch.no_grad(): x_test_out_one_batch, _ , _ = vae(x_test_one_batch) images_in = x_test_one_batch.view(-1,1,height, width).cpu() images_out = x_test_out_one_batch.view(-1,1,height, width).cpu() if(k==0): save_image(images_in,'./input.png') save_image(images_out,'./output_' + str(vae.epoch) + '.png') ``` ```python plt.semilogy(vae.loss_history[10:]) ``` ```python plt.plot(vae.loss_history[10:]) images_in,images_out = vae.evaluate_one_batch(trainloader) ``` ```python show_batch(images_out) ``` ```python show_batch(images_in) ``` ```python z1 = list() z2 = list() mu_z1 = list() mu_z2 = list() labs = list() with torch.no_grad(): for i, data in enumerate(testloader, 0): #for i in range(0,20): images_in, labels = next(iter(testloader)) x_input = images_in.view(-1,DIM_x).to(device) z_mu,z_log_sigma2 = vae.encoder(x_input) z_sigma = torch.exp(0.5*z_log_sigma2) epsilon = dtype(size=z_sigma.size()).normal_() z = z_mu + z_sigma * epsilon for a,b in z_mu.cpu().detach().numpy(): mu_z1.append(a) mu_z2.append(b) for c,d in z.cpu().detach().numpy(): z1.append(c) z2.append(d) for ll in labels.cpu().detach().numpy(): labs.append(ll) ``` ```python #plt.figure(figsize=golden_size(6)) plt.scatter(z1, z2, c=labs, cmap='nipy_spectral') plt.colorbar() plt.xlabel('$z_1$') plt.ylabel('$z_2$') plt.savefig('./VAE_MNIST_latent_z.pdf') plt.show() ``` ```python #plt.figure(figsize=golden_size(6)) plt.scatter(mu_z1, mu_z2, c=labs, cmap='nipy_spectral') plt.colorbar() plt.xlabel('$mu_{z1}$') plt.ylabel('$mu_{z2}$') plt.savefig('./VAE_MNIST_latent_mu_z.pdf') plt.show() ``` ```python display('./input.png') ``` './input.png' ```python from IPython.display import Image, display listOfImageNames = ['./input.png', './output_10.png', './output_20.png', './output_30.png', './output_40.png', './output_50.png', './output_60.png', './output_70.png', './output_80.png', './output_90.png', './output_110.png', './output_100.png'] for imageName in listOfImageNames: display(Image(filename=imageName)) ``` ```python ```
# -*- coding: utf-8 -*- """ ------------------------------------------------- File Name: TextRNN.py Description : TextRNN + Attention实现 Author : charlesXu date: 2019/1/9 ------------------------------------------------- Change Activity: 2019/1/9: ------------------------------------------------- """ import datetime import os import pickle from sklearn.model_selection import train_test_split import tensorflow as tf import numpy as np import Text_preprocessing logger = Text_preprocessing.PrintLog("TextRNN.log") class TextRNN(object): """ RNN with Attention mechanism for text classification """ def __init__(self, vocab_size, embedding_size, rnn_size, num_layers, attention_size, num_classes, learning_rate, grad_clip): ''' :param vocab_size: vocabulary size :param embedding_size: word embedding dimension :param sequence_length: sequence length after sentence padding, UNUSED :param rnn_size: hidden layer dimension :param num_layers: number of rnn layers :param attention_size: attention layer dimension :param num_classes: number of target labels :param learning_rate: initial learning rate :param grad_clip: gradient clipping threshold ''' self.input_x = tf.placeholder(tf.int32, shape=[None, None], name='input_x') self.input_y = tf.placeholder(tf.float32, shape=[None, num_classes], name='input_y') self.seq_len = tf.placeholder(tf.int32, shape=[None], name='seq_len') self.keep_prob = tf.placeholder(tf.float32, name='keep_prob') self.global_step = tf.Variable(0, trainable=False, name='global_step') # Define Basic RNN Cell def basic_rnn_cell(rnn_size): return tf.contrib.rnn.GRUCell(rnn_size) # return tf.contrib.rnn.LSTMCell(rnn_size) # Define Forward RNN Cell with tf.name_scope('fw_rnn'): fw_rnn_cell = tf.contrib.rnn.MultiRNNCell([basic_rnn_cell(rnn_size) for _ in range(num_layers)]) fw_rnn_cell = tf.contrib.rnn.DropoutWrapper(fw_rnn_cell, output_keep_prob=self.keep_prob) # Define Backward RNN Cell with tf.name_scope('bw_rnn'): bw_rnn_cell = tf.contrib.rnn.MultiRNNCell([basic_rnn_cell(rnn_size) for _ in range(num_layers)]) bw_rnn_cell = tf.contrib.rnn.DropoutWrapper(bw_rnn_cell, output_keep_prob=self.keep_prob) # Embedding layer with tf.name_scope('embedding'): self.embedding = tf.Variable(tf.random_uniform([vocab_size, embedding_size], -1.0, 1.0), trainable=True, name='embeddings') # self.input_x shape: (batch_size, sequence_length) embedding_inputs = tf.nn.embedding_lookup(self.embedding, self.input_x) with tf.name_scope('bi_rnn'): # embedding_inputs shape: (batch_size, sequence_length, embedding_size) # rnn_output, _ = tf.nn.dynamic_rnn(fw_rnn_cell, inputs=embedding_inputs, sequence_length=self.seq_len, dtype=tf.float32) rnn_output, _ = tf.nn.bidirectional_dynamic_rnn(fw_rnn_cell, bw_rnn_cell, inputs=embedding_inputs, sequence_length=self.seq_len, dtype=tf.float32) # In case of Bi-RNN, concatenate the forward and the backward RNN outputs if isinstance(rnn_output, tuple): rnn_output = tf.concat(rnn_output, 2) # BahdanauAttention Layer with tf.name_scope('attention'): hidden_size = rnn_output.shape[2].value attention_w = tf.Variable(tf.truncated_normal([hidden_size, attention_size], stddev=0.1), name='attention_w') attention_b = tf.Variable(tf.constant(0.1, shape=[attention_size]), name='attention_b') attention_u = tf.Variable(tf.truncated_normal([attention_size], stddev=0.1), name='attention_u') v = tf.tanh(tf.tensordot(rnn_output, attention_w, axes=1) + attention_b) vu = tf.tensordot(v, attention_u, axes=1, name='vu') alphas = tf.nn.softmax(vu, name='alphas') attention_output = tf.reduce_sum(rnn_output * tf.expand_dims(alphas, -1), 1) # Add dropout with tf.name_scope('dropout'): # attention_output shape: (batch_size, hidden_size) self.final_output = tf.nn.dropout(attention_output, self.keep_prob) # Fully connected layer with tf.name_scope('output'): fc_w = tf.Variable(tf.truncated_normal([hidden_size, num_classes], stddev=0.1), name='fc_w') fc_b = tf.Variable(tf.zeros([num_classes]), name='fc_b') self.logits = tf.matmul(self.final_output, fc_w) + fc_b self.logits_softmax = tf.nn.softmax(self.logits) self.predictions = tf.argmax(self.logits, 1, name='predictions') # Calculate cross-entropy loss with tf.name_scope('loss'): cross_entropy = tf.nn.softmax_cross_entropy_with_logits_v2(logits=self.logits, labels=self.input_y) self.loss = tf.reduce_mean(cross_entropy) # TODO: add params loss # Create optimizer with tf.name_scope('optimization'): optimizer = tf.train.AdamOptimizer(learning_rate) gradients, variables = zip(*optimizer.compute_gradients(self.loss)) gradients, _ = tf.clip_by_global_norm(gradients, grad_clip) self.train_op = optimizer.apply_gradients(zip(gradients, variables), global_step=self.global_step) # Calculate accuracy with tf.name_scope('accuracy'): correct_pred = tf.equal(self.predictions, tf.argmax(self.input_y, 1)) self.accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32)) def train_rnn(X, y, batch_size, num_epoch, output_dir, val_X=None, val_y=None): with tf.Graph().as_default(): session_conf = tf.ConfigProto(allow_soft_placement=True, log_device_placement=False) sess = tf.Session(config=session_conf) with sess.as_default(): rnn = TextRNN( # TODO: tune params and put these params to train_rnn params vocab_size=100000, embedding_size=128, rnn_size=128, num_layers=2, attention_size=50, num_classes=3, learning_rate=1e-3, grad_clip=5 ) tf.summary.scalar("loss", rnn.loss) tf.summary.scalar("accuracy", rnn.accuracy) merged_summary = tf.summary.merge_all() print("Writing to {}...\n".format(output_dir)) train_summary_dir = os.path.join(output_dir, "summaries", "train") val_summary_dir = os.path.join(output_dir, "summaries", "val") train_summaries_writer = tf.summary.FileWriter(train_summary_dir, sess.graph) val_summaries_writer = tf.summary.FileWriter(val_summary_dir, sess.graph) # Checkpoint directory, will not create itself checkpoint_dir = os.path.join(output_dir, "checkpoints") checkpoint_prefix = os.path.join(checkpoint_dir, "model") if not os.path.exists(checkpoint_dir): os.makedirs(checkpoint_dir) saver = tf.train.Saver(tf.global_variables(), max_to_keep=5) # how many model to save # Initialize all variables sess.run(tf.global_variables_initializer()) batches = Text_preprocessing.batch_iter(X, y, batch_size, num_epoch, shuffle=True) for x_batch, seq_lens, y_batch in batches: feed_dict = {rnn.input_x: x_batch, rnn.input_y: y_batch, rnn.seq_len: seq_lens, rnn.keep_prob: 0.5} _, global_step, train_summaries, train_loss, train_accuracy = sess.run( [rnn.train_op, rnn.global_step, merged_summary, rnn.loss, rnn.accuracy], feed_dict=feed_dict ) time_str = datetime.datetime.now().isoformat() print("{}: step {}, train loss {:g}, train acc {:g}".format(time_str, global_step, train_loss, train_accuracy)) train_summaries_writer.add_summary(summary=train_summaries, global_step=global_step) if global_step % 100 == 0: path = saver.save(sess=sess, save_path=checkpoint_prefix, global_step=global_step) print("Save model checkpoint to {}\n".format(path)) if val_X is not None: if global_step % 100 == 0: x_val, val_seq_lens = Text_preprocessing.pad_sentence(val_X) feed_dict = {rnn.input_x: x_val, rnn.input_y: val_y, rnn.seq_len: val_seq_lens, rnn.keep_prob: 1.0} val_summaries, val_loss, val_accuracy = sess.run( [merged_summary, rnn.loss, rnn.accuracy], feed_dict=feed_dict ) val_summaries_writer.add_summary(val_summaries, global_step=global_step) print("global_step: {}, val loss: {:g}, val acc: {:g}".format(global_step, val_loss, val_accuracy)) def predict(x_test, y_test=None, checkpoint_dir=None): checkpoint_file = tf.train.latest_checkpoint(checkpoint_dir) print(checkpoint_file) graph = tf.Graph() with graph.as_default(): session_conf = tf.ConfigProto( allow_soft_placement=True, log_device_placement=False ) sess = tf.Session(config=session_conf) with sess.as_default(): # Load the saved meta graph and restore variables saver = tf.train.import_meta_graph("{}.meta".format(checkpoint_file)) saver.restore(sess, checkpoint_file) # Get the placeholders from graph by name input_x = graph.get_operation_by_name("input_x").outputs[0] seq_len = graph.get_operation_by_name("seq_len").outputs[0] keep_prob = graph.get_operation_by_name("keep_prob").outputs[0] # Tensor we want to evaluate predictions = graph.get_operation_by_name("output/predictions").outputs[0] # Collect the predictions all_predictions = np.array([], dtype=np.int64) if y_test is not None: batches = Text_preprocessing.batch_iter(x_test, y_test, 64, 1, shuffle=False) for x_batch, seq_lens, _ in batches: # _ stand for y_batch feed_dict = {input_x: x_batch, seq_len: seq_lens, keep_prob: 1.0} batch_prediction = sess.run( predictions, feed_dict=feed_dict ) all_predictions = np.concatenate([all_predictions, batch_prediction]) print("all predictions: ", all_predictions) correct_predictions = np.sum(all_predictions == np.argmax(y_test, axis=1)) accuracy = correct_predictions / len(y_test) print("test data accuracy: ", accuracy) else: for i in range(0, len(x_test), 64): # set batch_size to 64 padded_sentences, seq_lens = Text_preprocessing.pad_sentence(x_test[i:i + 64]) feed_dict = {input_x: padded_sentences, seq_len: seq_lens, keep_prob: 1.0} batch_prediction = sess.run( predictions, feed_dict=feed_dict ) all_predictions = np.concatenate([all_predictions, batch_prediction]) return all_predictions if __name__ == '__main__': with open("label_array.txt", 'rb') as f: labels = pickle.load(f) with open("indexed_words.txt", 'rb') as f: text_list = pickle.load(f) x_train, x_test, y_train, y_test = train_test_split(text_list, labels, train_size=0.8, random_state=1) x_train, x_val, y_train, y_val = train_test_split(x_train, y_train, train_size=0.9, random_state=1) train_rnn(x_train, y_train, 64, 5, "rnn", val_X=x_val, val_y=y_val) # model = TextRNN(vocab_size=8000, embedding_size=150, rnn_size=100, num_layers=2, # attention_size=50, num_classes=30, learning_rate=0.001, grad_clip=5.0)
module Main %default total %logging "declare.data.parameters" 20 %logging "eval.eta" 10 -- explicit data Value : (value : Nat -> Type) -> Type where EmptyV : {0 value : Nat -> Type} -> Value (\ n => value n) data TValue : Nat -> Type where MkTupleV : Value (\n => TValue n) -> TValue n
# Characterization of Systems in the Time Domain *This Jupyter notebook is part of a [collection of notebooks](../index.ipynb) in the bachelors module Signals and Systems, Communications Engineering, Universität Rostock. Please direct questions and suggestions to [[email protected]](mailto:[email protected]).* ## Eigenfunctions An [eigenfunction](https://en.wikipedia.org/wiki/Eigenfunction) of a system is defined as the input signal $x(t)$ which produces the output signal $y(t) = \mathcal{H}\{ x(t) \} = \lambda \cdot x(t)$ with $\lambda \in \mathbb{C}$. The weight $\lambda$ associated with $x(t)$ is known as scalar eigenvalue of the system. Hence, besides a weighting factor, an eigenfunction is not modified by passing through the system. [Complex exponential signals](../continuous_signals/standard_signals.ipynb#Complex-Exponential-Signal) $e^{s t}$ with $s \in \mathbb{C}$ are eigenfunctions of linear time-invariant (LTI) systems. This can be proven by applying the properties of LTI systems. Lets assume a generic LTI system with input signal $x(t) = e^{s t}$ and output signal $y(t) = \mathcal{H}\{ x(t) \}$. The response of the LTI system to the shifted input signal $x(t-\tau) = e^{s (t-\tau)}$ reads \begin{equation} y(t - \tau) = \mathcal{H}\{ x(t-\tau) \} = \mathcal{H}\{ e^{-s \tau} \cdot e^{s t} \} \end{equation} due to the implied shift-invariance. Now considering the implied linearity this can be reformulated as \begin{equation} y(t - \tau) = e^{-s \tau} \cdot \mathcal{H}\{ e^{s t} \} = e^{-s \tau} \cdot y(t) \end{equation} It is straightforward to show that $y(t) = \lambda e^{st}$ fulfills above equation \begin{equation} \lambda e^{s t} e^{-s \tau} = e^{-s \tau} \lambda e^{st} \end{equation} **Example** An LTI system whose input/output relation is given by the following inhomogeneous linear ordinary differential equation (ODE) with constant coefficients is investigated \begin{equation} a_0 y(t) + a_1 \frac{d y(t)}{dt} + a_2 \frac{d^2 y(t)}{dt^2} = x(t) \end{equation} with $a_i \in \mathbb{R} \quad \forall i$. In the remainder, the output signal $y(t)$ of the system is computed by explicit solution of the ODE for $x(t) = e^{s t}$ as input signal. Integration constants are discarded for ease of illustration. ```python import sympy as sym %matplotlib inline sym.init_printing() t, s, a0, a1, a2 = sym.symbols('t s a:3') x = sym.exp(s * t) y = sym.Function('y')(t) ode = sym.Eq(a0*y + a1*y.diff(t) + a2*y.diff(t, 2), x) solution = sym.dsolve(ode) solution.subs({'C1': 0, 'C2': 0}) ``` **Exercises** * Is the complex exponential signal an eigenfunction of the system? * Introduce $x(t) = e^{s t}$ and $y(t) = \lambda \cdot e^{s t}$ into the ODE and solve manually for the eigenvalue $\lambda$. How is the result related to above result derived by solving the ODE? * Can you generalize your findings to an ODE of arbitrary order? **Example** The following inhomogeneous linear ODE with time-dependent coefficient is considered as an example for a **time-variant** but linear system \begin{equation} t \cdot \frac{d y(t)}{dt} = x(t) \end{equation} The output signal $y(t)$ of the system for a complex exponential signal at the input $x(t) = e^{st}$ is computed by explicit solution of the ODE. Again integration constants are discarded. ```python ode = sym.Eq(t*y.diff(t), x) solution = sym.dsolve(ode) solution.subs('C1', 0) ``` Note, $\text{Ei}(\cdot)$ denotes the [exponential integral](http://docs.sympy.org/latest/modules/functions/special.html#sympy.functions.special.error_functions.Ei). The response $y(t)$ of the time-variant system is not equal to a weighted complex exponential signal $\lambda \cdot e^{s t}$. It can be concluded that complex exponentials are no eigenfunctions of this particular time-variant system. **Example** A final example considers the following non-linear inhomogeneous ODE with constant coefficients \begin{equation} \left( \frac{d y(t)}{dt} \right)^2 = x(t) \end{equation} as example for a **non-linear** but time-invariant system. Again, the output signal $y(t)$ of the system for a complex exponential signal at the input $x(t) = e^{st}$ is computed by explicit solution of the ODE. As before, integration constants are discarded. ```python ode = sym.Eq(y.diff(t)**2, x) solution = sym.dsolve(ode) [si.subs('C1', 0) for si in solution] ``` Obviously for this non-linear system complex exponential signals are no eigenfunctions. ## Transfer Function The complex eigenvalue $\lambda$ constitutes the weight of a complex exponential signal $e^{st}$ (using complex frequency $s$) experiences when passing through an LTI system. It is commonly termed as [*transfer function*](https://en.wikipedia.org/wiki/Transfer_function) and is denoted by $H(s)=\lambda(s)$. Using this definition, the output signal $y(t)$ of an LTI system for a complex exponential signal at the input reads \begin{equation} y(t) = \mathcal{H} \{ e^{st} \} = H(s) \cdot e^{st} \end{equation} Note that the concept of the transfer function is directly linked to the linearity and time-invariance of a system. Only in this case, complex exponential signals are eigenfunctions of the system and $H(s)$ describes the properties of an LTI system with respect to these. Above equation can be rewritten in terms of the magnitude $| H(s) |$ and phase $\varphi(s) = \arg \{ H(s) \}$ of the complex transfer function $H(s)$ \begin{equation} y(t) = | H(s) | \cdot e^{s t + j \varphi(s)} \end{equation} The magnitude $| H(s) |$ provides the frequency dependent attenuation/amplification of the eigenfunction $e^{st}$ by the system, while $\varphi(s)$ provides the introduced phase-shift. ## Link between Transfer Function and Impulse Response In order to establish a link between the transfer function $H(s)$ and the impulse response $h(t)$, the output signal $y(t) = \mathcal{H} \{ x(t) \}$ of an LTI system with input signal $x(t)$ is considered. It is given by convolving the input signal with the impulse response \begin{equation} y(t) = x(t) * h(t) = \int_{-\infty}^{\infty} x(t-\tau) \cdot h(\tau) \; d\tau \end{equation} For a complex exponential signal as input $x(t) = e^{st}$, the output of an LTI system is given as $y(t) = \mathcal{H} \{ e^{st} \} = H(s) \cdot e^{st}$. Introducing both signals into the convolution integral yields \begin{equation} H(s) \cdot e^{st} = \int_{-\infty}^{\infty} e^{st} e^{-s \tau} \cdot h(\tau) \; d\tau \end{equation} which after canceling $e^{s t}$ (the integral depends not on $t$) results in \begin{equation} H(s) = \int_{-\infty}^{\infty} h(\tau) \cdot e^{-s \tau} \; d\tau \end{equation} under the assumption that the integral converges. The transfer function $H(s)$ can be computed from the impulse response $h(t)$ by integrating over the impulse response multiplied with the complex exponential function $e^{- s \tau}$. This constitutes an integral transformation, which is later introduced in more detail as [Laplace transform](https://en.wikipedia.org/wiki/Laplace_transform). Usually the temporal variable $t$ is then used \begin{equation} H(s) = \int_{-\infty}^{\infty} h(t) \cdot e^{-s t} \; d t \end{equation} rather than $\tau$ which remained from the convolution integral calculus. **Copyright** This notebook is provided as [Open Educational Resource](https://en.wikipedia.org/wiki/Open_educational_resources). Feel free to use the notebook for your own purposes. The text is licensed under [Creative Commons Attribution 4.0](https://creativecommons.org/licenses/by/4.0/), the code of the IPython examples under the [MIT license](https://opensource.org/licenses/MIT). Please attribute the work as follows: *Sascha Spors, Continuous- and Discrete-Time Signals and Systems - Theory and Computational Examples*.
import unittest import torch import numpy as np from networks import DDPGActor class NetworkTest(unittest.TestCase): def test_DDPGActor(self): batch_size = 8 state_size = 32 action_size = 4 net = DDPGActor([[None, state_size]], [[None, action_size]]) fake_states = np.random.uniform(-1, 1, batch_size * state_size).reshape([batch_size, state_size]).astype(np.float32) fake_states = torch.from_numpy(fake_states) actions = net.act(fake_states) self.assertEqual(actions.shape[0], batch_size) self.assertEqual(actions.shape[1], action_size) numpy_values = actions.data.numpy() self.assertTrue(np.all(numpy_values <= 1.)) self.assertTrue(np.all(numpy_values >= -1.)) loss = actions.sum() loss.backward() self.assertTrue(net.fc1.weight.grad is not None) net.zero_grad() clone = net.clone() for a, b in zip(net.parameters(), clone.parameters()): self.assertTrue(np.all(a.data.numpy() == b.data.numpy())) with torch.no_grad(): for a in net.parameters(): a.copy_(a + 1.) for a, b in zip(net.parameters(), clone.parameters()): self.assertTrue(np.all(a.data.numpy() != b.data.numpy()))
section \<open>Basics\<close> theory Basic imports Main begin subsection \<open>Miscellaneous\<close> abbreviation (input) "const x \<equiv> \<lambda> _. x" lemmas [simp] = map_prod.id map_prod.comp[symmetric] lemma prod_UNIV[iff]: "A \<times> B = UNIV \<longleftrightarrow> A = UNIV \<and> B = UNIV" by auto lemma prod_singleton: "fst ` A = {x} \<Longrightarrow> A = fst ` A \<times> snd ` A" "snd ` A = {y} \<Longrightarrow> A = fst ` A \<times> snd ` A" by force+ lemma infinite_subset[trans]: "infinite A \<Longrightarrow> A \<subseteq> B \<Longrightarrow> infinite B" using infinite_super by this lemma finite_subset[trans]: "A \<subseteq> B \<Longrightarrow> finite B \<Longrightarrow> finite A" using finite_subset by this declare infinite_coinduct[case_names infinite, coinduct pred: infinite] lemma infinite_psubset_coinduct[case_names infinite, consumes 1]: assumes "R A" assumes "\<And> A. R A \<Longrightarrow> \<exists> B \<subset> A. R B" shows "infinite A" proof show "False" if "finite A" using that assms by (induct rule: finite_psubset_induct) (auto) qed (* TODO: why are there two copies of this theorem? *) thm inj_on_subset subset_inj_on lemma inj_inj_on[dest]: "inj f \<Longrightarrow> inj_on f S" using inj_on_subset by auto end
function fibo(n::Int) if n < 2 return n; end fibo(n-1) + fibo(n-2); end println(fibo(45))
{ -- MF/TIN CORE (always include) functions -- Copyright (c) 2003 by Jeff Massung } \ inlined peek and poke values into GBA memory icode-thumb peek 0 ( a -- h ) tos 0@ tos ldrh, end-code icode-thumb peekb 0 ( a -- h ) tos 0@ tos ldrb, end-code icode-thumb peekw 0 ( a -- h ) tos 0@ tos ldr, end-code \ divide and modulo :i / ( n1 n2 -- n3 ) swap a! 7 swi ; :i mod ( n1 n2 -- n3 ) swap a! 7 swi drop a@ ; code-thumb iwram 10/ ( n -- q ) v0 v5 mov, 15 ## tos v0 lsr, __recip eq? b, 10 ## a mov, 6 swi, v5 v0 mov, ret l: __recip 3277 v0 LITERAL tos v0 mul, 15 ## v0 v0 lsr, 10 ## a mov, v0 a mul, a tos a sub, v0 tos mov, v5 v0 mov, ret end-code \ conditionals :n = ( n1 n2 -- flag ) - 0= ; :n <> ( n1 n2 -- flag ) - 0= com ; :i < ( n1 n2 -- flag ) - 0< ; :i > ( n1 n2 -- flag ) swap - 0< ; :i <= ( n1 n2 -- flag ) swap - 0< com ; :i >= ( n1 n2 -- flag ) - 0< com ; \ fixed point math operations :i f* ( n1 n2 -- n3 ) * 8 # a/ ; :n f/ ( n1 n2 -- n3 ) a! 8 # n* 6 swi ; \ the restore data pointer variable .idata \ transfer from data pointer to local address register :i >a ( -- ) .idata @ a! ; :i a> ( -- ) a@ .idata ! ; \ allocate bytes of data on the return stack code-thumb r-alloc ( u -- a ) rsp w mov, \ allocate space tos rsp rsp sub, rsp tos mov, 4 ## rsp sub, rsp 0@ w str, \ save old rsp \ save new returning adress 4 ## rsp sub, rsp 0@ u str, pc u mov, 3 ## u add, \ 2 to skip this insn and 1 for Thumb ret \ this is called on next return rsp ia! u ldm, rsp 0@ rsp ldr, u bx, end-code \ copy bytes from one address to another code-thumb copy ( to from u -- ) v0 v1 pop tos tos tst, __end eq? b, \ loop l: __copy \ transfer v0 0@ w ldrh, 2 ## v0 add, v1 0@ w strh, 2 ## v1 add, \ decrement and loop 2 ## tos sub, \ subs, actually \ if > 0 then continue __copy gt? b, l: __end tos pop ret end-code \ erase bytes at an address code-thumb erase ( a u -- ) v0 pop w w eor, tos tos tst, \ if <= 0 then return __exit le? b, \ loop l: __erase \ erase v0 0@ w str, 4 ## v0 add, \ decrement and loop 4 ## tos sub, \ subs, actually __erase gt? b, l: __exit tos pop ret end-code \ return the current scanline icode-thumb scanline 4 ( -- n ) tos push $40 ## tos mov, 20 ## tos tos lsl, \ REGISTERS tos 6 #( tos ldrh, end-code
x <= a or '0';
While we may be a newer real estate business in Marfa, we are not new to Marfa or the business. Our Broker Bobbie Meader and our Salesperson Lauren Meader together have over 40 years of experience in residential and commercial sales, property management and vacation rental management. As a full service brokerage with offices in Marfa, we offer the highest level of professionalism whether marketing your estate or developing your vacation home into a incoming producing property. Contact us to find out how we are different! Before you travel to Marfa, plan ahead! Reservations for lodging and dining are a must. Sunday and Monday almost everything is closed, and daily from 2-5 p.m. no restaurants are open except Dairy Queen and Subway.
With the rise of beauty tools and devices, I thought I'd share with you my two favorite products. I have to be honest and say I was really skeptical about them initially. Could a roller reaaaaally help with wrinkles and bloating? And does this microcurrent device ACTUALLY contract my muscles and help manage skin elasticity and wrinkles?? Well to my pleasant surprise I think they actually do. I feel like I've noticed a dramatic improvement in skin with regular use. The key word is regular. I've been using the devices for months now and I can tell the difference between regular and occasional use. I know the NuFace Microcurrent Device is on the higher price point, ranging from $199 to $325. Shiz cost money. The way I see it: it's saving me money from going to an aesthetician to receive the treatment. I bought the mini size because 1. cheaper 2. way more compact and easier to travel with. The self-treatment can range from 5 minutes to 20, all dependent on how detailed you want to be. It's also recommended to use the device 5 times a week for best results. I would argue this to be true. The device delivers such a low current that high frequency in use is likely to deliver max benefits. I was skeptical about the effectiveness of this product when I first bought it. I knew about microcurrent treatments for wound care but wasn't sure how it would work for healthy tissues. So I was really surprised when I could feel the current and mild muscle contractions when I used the tool. Maybe it's just psychological but I totally felt assured that it was working because of that feel. Being that it is an electrical device, I was really paranoid about shocking myself. I applied WAYYY too much gel the first time and it took me forever to wash my face after. You do need to make sure to have gel on all treatment spots or else it will sting but a thin-moderate layer is sufficient. There are a kajillion different types of jade rollers out there. Sifting through Amazon for the best one is an extremely overwhelming process. But after scouring the website and different beauty sites for jade rollers...I realized that it's all the freaking same. Maybe the roller shape and handle size is a little different but otherwise I didn't see major difference between an expensive and cheap roller. I bought this one for myself and a friend. It's a low price point and the product is good. I will say the metal does rust...So store your roller in a dry area. I kept mine in my bathroom and the condensation has rusted the metal portion. Using this roller has been a game changer for me. When I travel to hot/humid areas, I keep it in the fridge so I can use it to manage my eczema. It came in extremely handy when I was in Kauai. The bloating and eczema game was STRONG...So using the roller combined with a daily face ice massage kept my rashes at bay for the most part. I love using this to push in my facial products at the very end and to decrease the morning bloat. ALSO! I found that it is works really well to target particular trouble areas for wrinkles. I had deep creasing between the brows and using the roller for 5 minutes twice a day has actually smoothed out that wrinkle. I honestly think these two tools are worth the investment. I'm likely going to buy a couple more of the jade rollers to keep in my travel bags. Using these two devices has made such a big difference in skin texture, quality, and firmness. You obviously can't fight genetics but I feel like using these two have at least keep things together. At the least, I would highly recommend the facial roller. It's easy to use, doesn't need anything extra. And it's the cheapest. The NuFace is worth the splurge but can wait. I've been using these at the end of the day as a way to unwind from the crazy of the hours before. I think it's important to develop good self-care habits and I have used these guys as a part of that process. You can obviously use them at any point of the day but I've found that it's easiest to incorporate them during my nighttime skincare routine.
lemma const_in_iff: "c \<noteq> 0 \<Longrightarrow> (\<lambda>_. c) \<in> L F (f) \<longleftrightarrow> (\<lambda>_. 1) \<in> L F (f)"
/* * mkSVG.cpp * MonkSVG * * Created by Micah Pearlman on 8/2/10. * Copyright 2010 Zero Vision. All rights reserved. * */ #include "mkSVG.h" #include "tinyxml.h" #include <map> #include <iterator> #include <boost/foreach.hpp> #include <boost/tokenizer.hpp> #include <boost/regex.hpp> using namespace boost; namespace MonkSVG { bool SVG::initialize( ISVGHandler::SmartPtr handler ) { _handler = handler; return true; } bool SVG::read( const char* data ) { TiXmlDocument doc; doc.Parse( data ); if (doc.Error()) { return false; } TiXmlElement* root = doc.FirstChild( "svg" )->ToElement(); recursive_parse( root ); // get bounds information from the svg file, ignoring non-pixel values string numberWithUnitString; regex numberWithUnitPattern( "^(-?\\d+)(px)?$" ); _handler->_minX = 0.0f; if ( root->QueryStringAttribute( "x", &numberWithUnitString ) == TIXML_SUCCESS ) { match_results<string::const_iterator> matches; if ( regex_search( numberWithUnitString, matches, numberWithUnitPattern ) ) { _handler->_minX = ::atof( matches[1].str().c_str() ); } } _handler->_minY = 0.0f; if ( root->QueryStringAttribute( "y", &numberWithUnitString ) == TIXML_SUCCESS ) { match_results<string::const_iterator> matches; if ( regex_search( numberWithUnitString, matches, numberWithUnitPattern ) ) { _handler->_minY = ::atof( matches[1].str().c_str() ); } } _handler->_width = 0.0f; if ( root->QueryStringAttribute( "width", &numberWithUnitString ) == TIXML_SUCCESS ) { match_results<string::const_iterator> matches; if ( regex_search( numberWithUnitString, matches, numberWithUnitPattern ) ) { _handler->_width = ::atof( matches[1].str().c_str() ); } } _handler->_height = 0.0f; if ( root->QueryStringAttribute( "height", &numberWithUnitString ) == TIXML_SUCCESS ) { match_results<string::const_iterator> matches; if ( regex_search( numberWithUnitString, matches, numberWithUnitPattern ) ) { _handler->_height = ::atof( matches[1].str().c_str() ); } } return true; } bool SVG::read( string& data ) { return read( data.c_str() ); } void SVG::recursive_parse( TiXmlElement* element ) { if ( element ) { for ( TiXmlElement* sibbling = element; sibbling != 0; sibbling = sibbling->NextSiblingElement() ) { handle_xml_element( sibbling ); } } if ( element ) { TiXmlElement* child = element->FirstChildElement(); // if we don't handle the element recursively go into it if( handle_xml_element( child ) == false ) { recursive_parse( child ); } } } bool SVG::handle_xml_element( TiXmlElement* element ) { if( !element ) return false; string type = element->Value(); if ( type == "g" ) { handle_group( element ); return true; } else if ( type == "path" ) { handle_path( element ); return true; } else if ( type == "rect" ) { handle_rect( element ); return true; } else if ( type == "polygon" ) { handle_polygon ( element ); return true; } else if( type == "symbol" ) { string id; if ( element->QueryStringAttribute( "id", &id ) == TIXML_SUCCESS ) { _symbols[id] = (TiXmlElement*)element->Clone(); } return true; } else if ( type == "use" ) { string href; if ( element->QueryStringAttribute( "xlink:href", &href ) == TIXML_SUCCESS ) { string id = href.substr( 1 ); // skip the # _handler->onUseBegin(); // handle transform and other parameters handle_general_parameter( element ); recursive_parse( _symbols[id] ); _handler->onUseEnd(); } return true; } return false; } void SVG::handle_group( TiXmlElement* pathElement ) { string id_; // if ( pathElement->QueryStringAttribute( "id", &id_) == TIXML_SUCCESS ) { // //_handler->onId( id_ ); // cout << "group begin: " << id_ << endl; // } _handler->onGroupBegin(); // handle transform and other parameters handle_general_parameter( pathElement ); // go through all the children TiXmlElement* children = pathElement->FirstChildElement(); for ( TiXmlElement* child = children; child != 0; child = child->NextSiblingElement() ) { string type = child->Value(); handle_xml_element( child ); } _handler->onGroupEnd(); // if ( pathElement->QueryStringAttribute( "id", &id_) == TIXML_SUCCESS ) { // //_handler->onId( id_ ); // cout << "group end: " << id_ << endl; // } } void SVG::handle_path( TiXmlElement* pathElement ) { // string id_; // if ( pathElement->QueryStringAttribute( "id", &id_) == TIXML_SUCCESS ) { // //_handler->onId( id_ ); // cout << "path: " << id_ << endl; // } _handler->onPathBegin(); string d; if ( pathElement->QueryStringAttribute( "d", &d ) == TIXML_SUCCESS ) { parse_path_d( d ); } handle_general_parameter( pathElement ); _handler->onPathEnd(); } void SVG::handle_rect( TiXmlElement* pathElement ) { _handler->onPathBegin(); float pos[2] = { 0, 0 }; if ( pathElement->QueryFloatAttribute( "x", &pos[0] ) == TIXML_SUCCESS ) { //parse_path_d( d ); } if ( pathElement->QueryFloatAttribute( "y", &pos[1] ) == TIXML_SUCCESS ) { //parse_path_d( d ); } float sz[2] = { 0, 0 }; if ( pathElement->QueryFloatAttribute( "width", &sz[0] ) == TIXML_SUCCESS ) { //parse_path_d( d ); } if ( pathElement->QueryFloatAttribute( "height", &sz[1] ) == TIXML_SUCCESS ) { //parse_path_d( d ); } _handler->onPathRect( pos[0], pos[1], sz[0], sz[1] ); handle_general_parameter( pathElement ); _handler->onPathEnd(); } void SVG::handle_polygon( TiXmlElement* pathElement ) { _handler->onPathBegin(); string points; if ( pathElement->QueryStringAttribute( "points", &points ) == TIXML_SUCCESS ) { parse_points( points ); } handle_general_parameter( pathElement ); _handler->onPathEnd(); } void SVG::handle_general_parameter( TiXmlElement* pathElement ) { string fill; if ( pathElement->QueryStringAttribute( "fill", &fill ) == TIXML_SUCCESS ) { _handler->onPathFillColor( string_hex_color_to_uint( fill ) ); } string stroke; if ( pathElement->QueryStringAttribute( "stroke", &stroke) == TIXML_SUCCESS ) { _handler->onPathStrokeColor( string_hex_color_to_uint( stroke ) ); } string stroke_width; if ( pathElement->QueryStringAttribute( "stroke-width", &stroke_width) == TIXML_SUCCESS ) { float width = atof( stroke_width.c_str() ); _handler->onPathStrokeWidth( width ); } string style; if ( pathElement->QueryStringAttribute( "style", &style) == TIXML_SUCCESS ) { parse_path_style( style ); } string transform; if ( pathElement->QueryStringAttribute( "transform", &transform) == TIXML_SUCCESS ) { parse_path_transform( transform ); } string id_; if ( pathElement->QueryStringAttribute( "id", &id_) == TIXML_SUCCESS ) { _handler->onId( id_ ); //cout << id_ << endl; } string opacity; if ( pathElement->QueryStringAttribute( "opacity", &opacity) == TIXML_SUCCESS ) { float o = atof( opacity.c_str() ); _handler->onPathFillOpacity( o ); // TODO: ??? stroke opacity??? } if ( pathElement->QueryStringAttribute( "fill-opacity", &opacity) == TIXML_SUCCESS ) { float o = atof( opacity.c_str() ); _handler->onPathFillOpacity( o ); } string fillrule; if ( pathElement->QueryStringAttribute( "fill-rule", &fillrule) == TIXML_SUCCESS ) { _handler->onPathFillRule( fillrule ); } } float SVG::d_string_to_float( char *c, char** str ) { while ( isspace(*c) ) { c++; (*str)++; } while ( *c == ',' ) { c++; (*str)++; } return strtof( c, str ); } int SVG::d_string_to_int( char *c, char **str ) { while ( isspace(*c) ) { c++; (*str)++; } while ( *c == ',' ) { c++; (*str)++; } return (int)strtol( c, str, 10); } uint32_t SVG::string_hex_color_to_uint( string& hexstring ) { uint32_t color = (uint32_t)strtol( hexstring.c_str() + 1, 0, 16 ); if ( hexstring.length() == 7 ) { // fix up to rgba if the color is only rgb color = color << 8; color |= 0x000000ff; } return color; } void SVG::nextState( char** c, char* state ) { if ( **c == '\0') { *state = 'e'; return; } while ( isspace(**c) ) { if ( **c == '\0') { *state = 'e'; return; } (*c)++; } if ( isalpha( **c ) ) { *state = **c; (*c)++; // if ( **c == '\0') { // *state = 'e'; // return; // } if ( islower(*state) ) { // if lower case then relative coords (see SVG spec) _handler->setRelative( true ); } else { _handler->setRelative( false ); } } //cout << "state: " << *state << endl; } void SVG::parse_path_transform( string& tr ) { size_t p = tr.find( "translate" ); if ( p != string::npos ) { size_t left = tr.find( "(" ); size_t right = tr.find( ")" ); string values = tr.substr( left+1, right-1 ); char* c = const_cast<char*>( values.c_str() ); float x = d_string_to_float( c, &c ); float y = d_string_to_float( c, &c ); _handler->onTransformTranslate( x, y ); } else if ( tr.find( "rotate" ) != string::npos ) { size_t left = tr.find( "(" ); size_t right = tr.find( ")" ); string values = tr.substr( left+1, right-1 ); char* c = const_cast<char*>( values.c_str() ); float a = d_string_to_float( c, &c ); _handler->onTransformRotate( a ); // ??? radians or degrees ?? } else if ( tr.find( "matrix" ) != string::npos ) { size_t left = tr.find( "(" ); size_t right = tr.find( ")" ); string values = tr.substr( left+1, right-1 ); char* cc = const_cast<char*>( values.c_str() ); float a = d_string_to_float( cc, &cc ); float b = d_string_to_float( cc, &cc ); float c = d_string_to_float( cc, &cc ); float d = d_string_to_float( cc, &cc ); float e = d_string_to_float( cc, &cc ); float f = d_string_to_float( cc, &cc ); _handler->onTransformMatrix( a, b, c, d, e, f ); } } void SVG::parse_path_d( string& d ) { char* c = const_cast<char*>( d.c_str() ); char state = *c; nextState( &c, &state ); while ( /**c &&*/ state != 'e' ) { switch ( state ) { case 'm': case 'M': { //c++; float x = d_string_to_float( c, &c ); float y = d_string_to_float( c, &c ); _handler->onPathMoveTo( x, y ); nextState(&c, &state); } break; case 'l': case 'L': { //c++; float x = d_string_to_float( c, &c ); float y = d_string_to_float( c, &c ); _handler->onPathLineTo( x, y ); nextState(&c, &state); } break; case 'h': case 'H': { float x = d_string_to_float( c, &c ); _handler->onPathHorizontalLine( x ); nextState(&c, &state); } break; case 'v': case 'V': { float y = d_string_to_float( c, &c ); _handler->onPathVerticalLine( y ); nextState(&c, &state); } break; case 'c': case 'C': { float x1 = d_string_to_float( c, &c ); float y1 = d_string_to_float( c, &c ); float x2 = d_string_to_float( c, &c ); float y2 = d_string_to_float( c, &c ); float x3 = d_string_to_float( c, &c ); float y3 = d_string_to_float( c, &c ); _handler->onPathCubic(x1, y1, x2, y2, x3, y3); nextState(&c, &state); } break; case 's': case 'S': { float x2 = d_string_to_float( c, &c ); float y2 = d_string_to_float( c, &c ); float x3 = d_string_to_float( c, &c ); float y3 = d_string_to_float( c, &c ); _handler->onPathSCubic(x2, y2, x3, y3); nextState(&c, &state); } break; case 'a': case 'A': { float rx = d_string_to_float( c, &c ); float ry = d_string_to_float( c, &c ); float x_axis_rotation = d_string_to_float( c, &c ); int large_arc_flag = d_string_to_int( c, &c ); int sweep_flag = d_string_to_int( c, &c ); float x = d_string_to_float( c, &c );; float y = d_string_to_float( c, &c ); _handler->onPathArc( rx, ry, x_axis_rotation, large_arc_flag, sweep_flag, x, y ); nextState(&c, &state); } break; case 'z': case 'Z': { //c++; _handler->onPathClose(); nextState(&c, &state); } break; case 'q': case 'Q': { float x1 = d_string_to_float(c, &c); float y1 = d_string_to_float(c, &c); float x2 = d_string_to_float(c, &c); float y2 = d_string_to_float(c, &c); _handler->onPathQuad(x1, y1, x2, y2); nextState(&c, &state); } break; default: // BUGBUG: can get stuck here! // TODO: figure out the next state if we don't handle a particular state or just dummy handle a state! c++; break; } } } // semicolon-separated property declarations of the form "name : value" within the ‘style’ attribute void SVG::parse_path_style( string& ps ) { map< string, string > style_key_values; char_separator<char> values_seperator(";"); char_separator<char> key_value_seperator(":"); tokenizer<char_separator<char> > values_tokens( ps, values_seperator ); BOOST_FOREACH( string values, values_tokens ) { tokenizer<char_separator<char> > key_value_tokens( values, key_value_seperator ); tokenizer<char_separator<char> >::iterator k = key_value_tokens.begin(); tokenizer<char_separator<char> >::iterator v = k; v++; //cout << *k << ":" << *v << endl; style_key_values[*k] = *v; } map<string, string>::iterator kv = style_key_values.find( string("fill") ); if ( kv != style_key_values.end() ) { if( kv->second != "none" ) _handler->onPathFillColor( string_hex_color_to_uint( kv->second ) ); } kv = style_key_values.find( "stroke" ); if ( kv != style_key_values.end() ) { if( kv->second != "none" ) _handler->onPathStrokeColor( string_hex_color_to_uint( kv->second ) ); } kv = style_key_values.find( "stroke-width" ); if ( kv != style_key_values.end() ) { float width = atof( kv->second.c_str() ); _handler->onPathStrokeWidth( width ); } kv = style_key_values.find( "fill-rule" ); if ( kv != style_key_values.end() ) { _handler->onPathFillRule( kv->second ); } kv = style_key_values.find( "fill-opacity" ); if ( kv != style_key_values.end() ) { float o = atof( kv->second.c_str() ); _handler->onPathFillOpacity( o ); } kv = style_key_values.find( "opacity" ); if ( kv != style_key_values.end() ) { float o = atof( kv->second.c_str() ); _handler->onPathFillOpacity( o ); // ?? TODO: stroke Opacity??? } kv = style_key_values.find( "stroke-opacity" ); if ( kv != style_key_values.end() ) { float o = atof( kv->second.c_str() ); _handler->onPathStrokeOpacity( o ); } } void SVG::parse_points( string& points ) { char_separator<char> sep(", \t"); tokenizer<char_separator<char>> tokens(points,sep); float xy[2]; int xy_offset = 0; // 0:x, 1:y bool first = true; _handler->setRelative(false); BOOST_FOREACH( string p, tokens ) { xy[xy_offset++] = (float)atof(p.c_str()); if (xy_offset == 2) { xy_offset = 0; if (first) { _handler->onPathMoveTo( xy[0], xy[1] ); first = false; } else _handler->onPathLineTo( xy[0], xy[1] ); } } _handler->onPathClose(); } };
module Client.Action.BuildDeps import Client.Action.CacheDeps import Data.List import Data.SortedMap as Map import Data.SortedSet as Set import Data.String import Fmt import Inigo.Async.Base import Inigo.Async.Package import Inigo.Async.Promise import Inigo.Package.Package import Inigo.Paths import Inigo.PkgTree import SemVar buildIPkg : String -> Promise () buildIPkg path = do log (fmt "Compiling %s" (path </> inigoIPkgPath)) debugLog "[\{path}]$ idris2 --build \{inigoIPkgPath}" 0 <- system "idris2" ["--build", inigoIPkgPath] (Just path) False True | errno => reject "idris2 build error: \{show errno}" log (fmt "Compiled %s" (path </> inigoIPkgPath)) export buildDeps : Bool -> Promise () buildDeps dev = do pkgs <- readDepCache pkg <- currPackage let ctxt = fromList pkgs log $ "Generating build order" ++ if dev then " with dev dependencies" else "" debugLog "Packages to build: \{show $ Set.toList $ keySet ctxt}" debugLog "Package context: \{show $ Map.toList $ getDeps {name=String} dev <$> ctxt}" build <- liftEither $ getBuildOrder dev ctxt (keySet ctxt) traverse_ buildIPkg build
function nmt_peaksearch_helper % simplifies Callback for peak search functionality in GUI global st cfg=[]; cfg.searchradius = [str2num(get(st.nmt.gui.searchradius1,'String')) str2num(get(st.nmt.gui.searchradius2,'String'))]; cfg.peaktype=get(st.nmt.gui.peaktype,'string'); cfg.peaktype=cfg.peaktype{get(st.nmt.gui.peaktype,'Value')}; peakdomain=get(st.nmt.gui.peakdomain,'string'); peakdomain=peakdomain{get(st.nmt.gui.peakdomain,'Value')}; switch(peakdomain) case 'spatial' cfg.time = st.nmt.cfg.time_idx; case 'temporal' cfg.vox = st.nmt.cfg.vox_idx; case 'spatiotemporal' % nothing to do, this is default behavior otherwise error('well this is unexpected...') end [v,t]=nmt_peaksearch(cfg); nmt_repos(v,t);
theory sort_NMSortTDIsSort imports Main "$HIPSTER_HOME/IsaHipster" begin datatype 'a list = Nil2 | Cons2 "'a" "'a list" datatype Nat = Z | S "Nat" fun take :: "Nat => 'a list => 'a list" where "take (Z) y = Nil2" | "take (S z) (Nil2) = Nil2" | "take (S z) (Cons2 x2 x3) = Cons2 x2 (take z x3)" fun lmerge :: "int list => int list => int list" where "lmerge (Nil2) y = y" | "lmerge (Cons2 z x2) (Nil2) = Cons2 z x2" | "lmerge (Cons2 z x2) (Cons2 x3 x4) = (if z <= x3 then Cons2 z (lmerge x2 (Cons2 x3 x4)) else Cons2 x3 (lmerge (Cons2 z x2) x4))" fun length :: "'t list => Nat" where "length (Nil2) = Z" | "length (Cons2 y xs) = S (length xs)" fun insert2 :: "int => int list => int list" where "insert2 x (Nil2) = Cons2 x (Nil2)" | "insert2 x (Cons2 z xs) = (if x <= z then Cons2 x (Cons2 z xs) else Cons2 z (insert2 x xs))" fun isort :: "int list => int list" where "isort (Nil2) = Nil2" | "isort (Cons2 y xs) = insert2 y (isort xs)" fun half :: "Nat => Nat" where "half (Z) = Z" | "half (S (Z)) = Z" | "half (S (S n)) = S (half n)" fun drop :: "Nat => 'a list => 'a list" where "drop (Z) y = y" | "drop (S z) (Nil2) = Nil2" | "drop (S z) (Cons2 x2 x3) = drop z x3" fun nmsorttd :: "int list => int list" where "nmsorttd (Nil2) = Nil2" | "nmsorttd (Cons2 y (Nil2)) = Cons2 y (Nil2)" | "nmsorttd (Cons2 y (Cons2 x2 x3)) = lmerge (nmsorttd (take (half (length (Cons2 y (Cons2 x2 x3)))) (Cons2 y (Cons2 x2 x3)))) (nmsorttd (drop (half (length (Cons2 y (Cons2 x2 x3)))) (Cons2 y (Cons2 x2 x3))))" (*hipster take lmerge length insert2 isort half drop nmsorttd *) theorem x0 : "!! (x :: int list) . (nmsorttd x) = (isort x)" by (tactic \<open>Subgoal.FOCUS_PARAMS (K (Tactic_Data.hard_tac @{context})) @{context} 1\<close>) end
\section{Unification} First of all we describe the data structures and algorithms of the unification algorithm. All classes used for this algorithm can be found in the package \texttt{de.renew.unify}. \subsection{Motivation} For improved usability, the inscription language of a Petri net formalism should support some sort of tuples to facilitate the easy retrieval of matching values. Tuples are already a classic area in which unification algorithms have been applied, and the unification of tokens at places with arc inscriptions requires at least a matching algorithm. There are, however, aspects of the simulation algorithm for which a unification algorithm is useful in different ways. Petri net formalisms often require the consideration of many different transition inscriptions when firing a transition. Mostly, these inscriptions are given without a particular order, so their effect should not depend on the order of their evaluation. This coincides with the characteristic of unification algorithms, i.e., the sequence of unification is irrelevant. For synchronous channels, no direction of the information flow is prescribed. during the search for enabled bindings, the values and variables on both sides must be unified, as a simple assignment is generally not possible. First of all, a parameter of a synchronous channel itself could be again a tuple expression, if we want to keep the orthogonality of the language definitions. Further, a synchronous channel might have to be handled even before the last variable of the initiating side is bound. This is especially important when the channel transfers values in both directions. The question is, whether more unifiable objects, besides tuples, should be examined. Lists were identified as a reasonable extension of a net formalism. Although lists can be represented as nested tuples, for the sake of better usability this way of implementation should not be externally visible. However, we cannot modify the representation of tuples, as nested tuples do not always represent a list. Hence a special category of lists was created which allows a more suitable representation and prohibits the visibility of their internal structure as well. In the following, we will not deal with this class often since nothing changes in the actual unification algorithm and all interesting effects can already be observed with tuples. \subsection{Unknowns} Each variable has a value. This can be a normal Java object or a unifiable object. When a new variable is generated, its value is unknown at first, because the variable is completely unbound. In order to be able to indicate a value nonetheless, special objects have been introduced by the class \texttt{Unknown}. These objects become important during unification. After two unassigned variables \texttt{x} and \texttt{y} have been unified, their value is still unknown. The unification, however, is visible in that both variables would return the same unknown as their value. We note that, after the unification of \texttt{x} and \texttt{y}, it is not specified which of the two unknowns will form the later value of \texttt{x} and \texttt{y}. In fact, this is irrelevant for further unifications. Outside the unification algorithm, the class \texttt{Unknown} need not be known anyway, because variables should be queried for their valuation only after they have become completely bound, i.e., when no unknown is part of their value, not even nested within a tuple. \subsection{Backtracking} For the implementation of a unification algorithm one can choose one of two ways. Either unification creates a new binding list that assigns the appropriate values to the variables, whereby the original bindings list is preserved; or, alternatively, the old binding information is overwritten and thus no longer available. Here, the second way, which modifies the existing objects, has been chosen, so that unnecessary copying can be avoided. In a Petri net simulator, however, it is necessary to be able to reset to the old state as required, for example when a proposed binding of an arc variable to a token value did not lead to an activated transition and alternative bindings should be tried out. Therefore all modifications that the unification algorithm executes are noted in a central object, which belongs to the \texttt{StateRecorder} class. For all modifying accesses to unifiable objects a recorder must then be specified. If the special value \texttt{null} is used, then the corresponding operation cannot be undone. Otherwise, all attributes of the object that are supposed to be modified are stored in the recorder. It is important to make no modification at all before recording it first. In order to keep the recording of the modifications as flexible as possible, the state recorder does not prescribe a format for the information that must be recorded. Instead, an object of the type \texttt{StateRestorer} is transmitted. This object possesses only one method \texttt{restore()} and stores all necessary information. For each type of modification a subclass of \texttt{StateRestorer} is created. Several reset points can be specified for a recorder, so that a partial resetting is possible. Modifications are always undone in the reverse order in which they were made. It will be shown that almost the entire state of the Petri net simulator is stored in unifiable objects, and that almost the entire backtracking can be performed by this algorithm. If information is not connected to unifiable objects, special subclasses of \texttt{StateRestorer} can handle these cases, too. \singlediagram{unifdata}{Unifiable objects} \subsection{Unifiability and Java Equality} The unifiability of Java objects was implemented on the basis of the method \texttt{equals(Object)}. Thus we follow the decision of the programmers of container classes, where this way is also always chosen to access the contents of a container. It is a procedure already embodied in the language definition, which can be adapted sufficiently flexibly to individual needs. The substantial problems develop from the fact that the Java definition of equality allows \texttt{equals(Object)} to vary over time. In most cases the Java API follows the rule that only for unchangeable objects equality may be coarser than identity, but there are exceptions as for instance \texttt{java.awt.Point} or the container classes from Java 1.2. This is to be borne, if Java's equality concept is normally used. It becomes a serious problem for unification, however, because a check for equality may occur in many unexpected places. Now the question arises, how unifiable objects themselves deal with testing on equality. Tuples are very easy to handle, because for them the equality is defined by the equality of all components. For unknowns, however, a certain problem results, because they can still take any value by unification, so that the outcome of a comparison with other objects is not at all defined. Therefore a comparison attempt on unknowns throws an exception, which interrupts the normal program flow and is normally announced as error. Indeed a comparison should not occur, because unknown should be used only under the control of the Petri net simulator. To other program sections the simulator should only pass completely bound tuples, so that unknown should not be accessible from normal Java code. Since backtracking operations can again introduce unknown components into already fully unified tuples, it would be possible that an appropriate method stores reference to a complete tuple and later, after the backtracking, accesses the tuple again and retrieves an unknown. Since those methods that are not invoked from within actions should not have any side effects, this effect will only occur in actions. In that case all, however, all tuples will first be copied, so that they are not subject to backtracking any more. A hash code must be assigned to each Java Object. For tuples this is calculated using a simple polynomial derived from the hash codes of the tuple components. For unknowns the query of the hash code throws an exception, because an unknown should not be stored in a hashed data structure. \subsection{Occurrence check} If a tuple could contain itself directly or indirectly, then one could describe certain infinite data structures quite easily. But such things are not easy to handle in the mathematical theory and cause problems during the implementation, too. With unification further problems occur. In particular, attention would have to be paid to avoid endless loops. Also unification is founded on the basis of term unification of predicate logic, where infinite terms are not allowed. Thus there is the task is to ensure cycle-freeness during the unification: the so called occurrence check, which checks for the occurrence of a tuple within in another tuple. The occurrence check is sometimes not implemented in the area of logical programming, and one leaves the behavior in the case of recursive tuple unspecified, because a check in that context would be very expensive. In Petri net simulators the principal complexity is due to other algorithms, so that an occurrence check does not slow down the simulator considerably. \subsection{Calculations} In the formalism of the reference nets it is possible to carry out certain calculations only during the execution of a transition, which is noted with the keyword action. The results of the calculations are not known to the unification algorithm, yet the algorithm should as far possibly be able to deal with these calculations. Especially, late calculations should be effectively executable during the transition's firing. This leads to certain requirements: \begin{itemize} \item Cyclic dependencies shall be detected. This applies also to complex multi-level dependencies. For example, the call \texttt{action x=[1,a.method(x)]} should fail before the firing of the transition, because here \texttt{x} depends on itself indirectly through a method invocation and a tuple. \item As far as possible, the result type of a later calculation should be represented by unification algorithm, so that no preventable type errors may occur. \end{itemize} A calculation is represented by special unifiable objects of the type \texttt{Calculator}. A calculator is unifiable only with unknowns and with itself, but not with tuples, values or other calculators. In particular, equality reduces to identity for calculations. Calculator objects reference exactly another object, which can serve as an argument for a calculation. If more arguments are required, this can be implemented by a calculator object that references a tuple object. Occasionally a variable value must be of a certain type, in order to be a valid allocation for the variable. this is ensured by the class \texttt{TypeConstrainer}. Such an object monitors an arbitrary value. As soon as the value is no more an unknown, the type of the new value is checked. This might be possible before the value is completely bound. For example, a tuple may be type checked before all its components are bound. \singlediagram{tupleindex}{A tuple index} In order to be able to provide type checking for late calculation, all calculator objects carry the predicted type of their result. If a \texttt{TypeConstrainer} detects as a calculation object as value, the predicted type is used instead of the type \texttt{Calculator}. In Fig.~\ref{fig:unifdata} we summarize the main classes involved in the representation of unifiable data structures. You can see how every implementation of \texttt{Referer} is assisted by an instance of \texttt{Reference}. Similarly, every \texttt{Referable} is augmented by a \texttt{BacklinkSet}. A backlink set collects information about all those references that reference its owner. A reference makes sure to insert itself into the backlink set of its referenced object. Using a \texttt{CalculationChecker} object a program can require that certain variables must be bound or that they must be complete. A value is complete if it contains no unknowns, even nested within a multitude of unifiable objects. A complete value is bound if it contains no calculators. \subsection{Tuple Index} The tuple index is a specialized data structure that allows to select among a set of tuples some candidates that might fit a given pattern. The given pattern is itself a possibly nested tuple, which might be incomplete, i.e., it might contain unknown in some substructures. The tuple index provides an upper bound for the set of matching values based on exactly one component of the tuple or one component of a subcomponent of the tuple. The tuple index will try all complete subcomponents of the tuple during lookup and select the best estimate among these. It will not, however, consider more then one components. E.g., with the set \texttt{[0,0,0]}, \texttt{[0,1,0]}, \texttt{[0,1,1]},and \texttt{[1,0,0]} of values and the pattern \verb:[0,0,_]:, the only matching pattern is \texttt{[0,0,0]}, but the optimal guess based on the second component contains \texttt{[0,0,0]} and \texttt{[1,0,0]}.
The image of the path $z + r e^{i \theta}$ for $\theta \in [s, t]$ is the set of points $z + r e^{i \theta}$ for $\theta \in [s, t]$.