text
stringlengths 0
3.34M
|
---|
using LinearAlgebra, DelimitedFiles, StatsBase
crabs = vec(readdlm("data_7.dat", ',', Int))
max_pos = maximum(crabs)
min_pos = minimum(crabs)
dists = Int[]
fuels = Int[]
for p in min_pos:max_pos
dist = broadcast(abs, crabs .- p)
fuel = 0
for d in dist
fuel += (d * (d + 1)) / 2
end
append!(fuels, fuel)
end
println(minimum(fuels))
|
(*
* Copyright 2020, Data61, CSIRO (ABN 41 687 119 230)
*
* SPDX-License-Identifier: GPL-2.0-only
*)
(*
Properties of machine operations.
*)
theory Machine_AI
imports "../Bits_AI"
begin
definition
"no_irq f \<equiv> \<forall>P. \<lbrace>\<lambda>s. P (irq_masks s)\<rbrace> f \<lbrace>\<lambda>_ s. P (irq_masks s)\<rbrace>"
lemma wpc_helper_no_irq:
"no_irq f \<Longrightarrow> wpc_helper (P, P') (Q, Q') (no_irq f)"
by (simp add: wpc_helper_def)
wpc_setup "\<lambda>m. no_irq m" wpc_helper_no_irq
ML \<open>
structure CrunchNoIrqInstance : CrunchInstance =
struct
val name = "no_irq";
type extra = unit;
val eq_extra = op =;
fun parse_extra ctxt extra
= case extra of
"" => (Syntax.parse_term ctxt "%_. True", ())
| _ => error "no_irq does not need a precondition";
val has_preconds = false;
fun mk_term _ body _ =
(Syntax.parse_term @{context} "no_irq") $ body;
fun dest_term (Const (@{const_name no_irq}, _) $ body)
= SOME (Term.dummy, body, ())
| dest_term _ = NONE;
fun put_precond _ _ = error "crunch no_irq should not be calling put_precond";
val pre_thms = [];
val wpc_tactic = wp_cases_tactic_weak;
fun wps_tactic _ _ _ = no_tac;
val magic = Syntax.parse_term @{context}
"\<lambda>mapp_lambda_ignore. no_irq mapp_lambda_ignore";
val get_monad_state_type = get_nondet_monad_state_type;
end;
structure CrunchNoIrq : CRUNCH = Crunch(CrunchNoIrqInstance);
\<close>
setup \<open>
add_crunch_instance "no_irq" (CrunchNoIrq.crunch_x, CrunchNoIrq.crunch_ignore_add_del)
\<close>
crunch_ignore (no_irq) (add:
NonDetMonad.bind return "when" get gets fail
assert put modify unless select
alternative assert_opt gets_the
returnOk throwError lift bindE
liftE whenE unlessE throw_opt
assertE liftM liftME sequence_x
zipWithM_x mapM_x sequence mapM sequenceE_x
mapME_x catch select_f
handleE' handleE handle_elseE forM forM_x
zipWithM ignore_failure)
context Arch begin
lemma det_getRegister: "det (getRegister x)"
by (simp add: getRegister_def)
lemma det_setRegister: "det (setRegister x w)"
by (simp add: setRegister_def det_def modify_def get_def put_def bind_def)
lemma det_getRestartPC: "det getRestartPC"
by (simp add: getRestartPC_def det_getRegister)
lemma det_setNextPC: "det (setNextPC p)"
by (simp add: setNextPC_def det_setRegister)
lemma ef_loadWord: "empty_fail (loadWord x)"
by (simp add: loadWord_def)
lemma ef_storeWord: "empty_fail (storeWord x y)"
by (simp add: storeWord_def)
lemma no_fail_getRestartPC: "no_fail \<top> getRestartPC"
by (simp add: getRestartPC_def getRegister_def)
lemma no_fail_loadWord [wp]: "no_fail (\<lambda>_. is_aligned p 3) (loadWord p)"
apply (simp add: loadWord_def is_aligned_mask [symmetric])
apply (rule no_fail_pre)
apply wp
apply simp
done
lemma no_fail_storeWord: "no_fail (\<lambda>_. is_aligned p 3) (storeWord p w)"
apply (simp add: storeWord_def is_aligned_mask [symmetric])
apply (rule no_fail_pre)
apply (wp)
apply simp
done
lemma no_fail_machine_op_lift [simp]:
"no_fail \<top> (machine_op_lift f)"
by (simp add: machine_op_lift_def)
lemma ef_machine_op_lift [simp]:
"empty_fail (machine_op_lift f)"
by (simp add: machine_op_lift_def)
lemma no_fail_setNextPC: "no_fail \<top> (setNextPC pc)"
by (simp add: setNextPC_def setRegister_def)
lemma no_fail_initL2Cache: "no_fail \<top> initL2Cache"
by (simp add: initL2Cache_def)
lemma no_fail_resetTimer[wp]: "no_fail \<top> resetTimer"
by (simp add: resetTimer_def)
lemma loadWord_inv: "\<lbrace>P\<rbrace> loadWord x \<lbrace>\<lambda>x. P\<rbrace>"
apply (simp add: loadWord_def)
apply wp
apply simp
done
lemma getRestartPC_inv: "\<lbrace>P\<rbrace> getRestartPC \<lbrace>\<lambda>rv. P\<rbrace>"
by (simp add: getRestartPC_def getRegister_def)
lemma no_fail_clearMemory[simp, wp]:
"no_fail (\<lambda>_. is_aligned p 3) (clearMemory p b)"
apply (simp add: clearMemory_def mapM_x_mapM)
apply (rule no_fail_pre)
apply (wp no_fail_mapM' no_fail_storeWord )
apply (clarsimp simp: upto_enum_step_def)
apply (erule aligned_add_aligned)
apply (simp add: word_size_def)
apply (rule is_aligned_mult_triv2 [where n = 3, simplified])
apply simp
done
lemma no_fail_freeMemory[simp, wp]:
"no_fail (\<lambda>_. is_aligned p 3) (freeMemory p b)"
apply (simp add: freeMemory_def mapM_x_mapM)
apply (rule no_fail_pre)
apply (wp no_fail_mapM' no_fail_storeWord)
apply (clarsimp simp: upto_enum_step_def)
apply (erule aligned_add_aligned)
apply (simp add: word_size_def)
apply (rule is_aligned_mult_triv2 [where n = 3, simplified])
apply simp
done
lemma no_fail_getActiveIRQ[wp]:
"no_fail \<top> (getActiveIRQ in_kernel)"
apply (simp add: getActiveIRQ_def)
apply (rule no_fail_pre)
apply (wp non_fail_select)
apply simp
done
definition "irq_state_independent P \<equiv> \<forall>f s. P s \<longrightarrow> P (irq_state_update f s)"
lemma getActiveIRQ_inv [wp]:
"\<lbrakk>irq_state_independent P\<rbrakk> \<Longrightarrow> \<lbrace>P\<rbrace> getActiveIRQ in_kernel \<lbrace>\<lambda>rv. P\<rbrace>"
apply (simp add: getActiveIRQ_def)
apply (wp alternative_wp select_wp)
apply (simp add: irq_state_independent_def)
done
lemma no_fail_ackInterrupt[wp]: "no_fail \<top> (ackInterrupt irq)"
by (simp add: ackInterrupt_def)
lemma no_fail_maskInterrupt[wp]: "no_fail \<top> (maskInterrupt irq bool)"
by (simp add: maskInterrupt_def)
lemma no_irq_use:
"\<lbrakk> no_irq f; (rv,s') \<in> fst (f s) \<rbrakk> \<Longrightarrow> irq_masks s' = irq_masks s"
apply (simp add: no_irq_def valid_def)
apply (erule_tac x="\<lambda>x. x = irq_masks s" in allE)
apply fastforce
done
lemma machine_rest_lift_no_irq:
"no_irq (machine_rest_lift f)"
apply (clarsimp simp: no_irq_def machine_rest_lift_def split_def)
apply wp
apply simp
done
crunch (no_irq) no_irq[wp]: machine_op_lift
declare machine_op_lift_no_irq[simp] (* avoids crunch warning *)
lemma no_irq:
"no_irq f \<Longrightarrow> \<lbrace>\<lambda>s. P (irq_masks s)\<rbrace> f \<lbrace>\<lambda>_ s. P (irq_masks s)\<rbrace>"
by (simp add: no_irq_def)
lemma no_irq_initL2Cache: "no_irq initL2Cache"
by (simp add: initL2Cache_def)
lemma no_irq_gets [simp]:
"no_irq (gets f)"
by (simp add: no_irq_def)
lemma no_irq_resetTimer: "no_irq resetTimer"
by (simp add: resetTimer_def)
lemma no_irq_debugPrint: "no_irq (debugPrint $ xs)"
by (simp add: no_irq_def)
context notes no_irq[wp] begin
lemma no_irq_ackInterrupt: "no_irq (ackInterrupt irq)"
by (wp | clarsimp simp: no_irq_def ackInterrupt_def)+
lemma no_irq_setIRQTrigger: "no_irq (setIRQTrigger irq bool)"
by (wp | clarsimp simp: no_irq_def setIRQTrigger_def)+
lemma no_irq_loadWord: "no_irq (loadWord x)"
apply (clarsimp simp: no_irq_def)
apply (rule loadWord_inv)
done
lemma no_irq_getActiveIRQ: "no_irq (getActiveIRQ in_kernel)"
apply (clarsimp simp: no_irq_def)
apply (rule getActiveIRQ_inv)
apply (simp add: irq_state_independent_def)
done
lemma no_irq_mapM:
"(\<And>x. x \<in> set xs \<Longrightarrow> no_irq (f x)) \<Longrightarrow> no_irq (mapM f xs)"
apply (subst no_irq_def)
apply clarify
apply (rule mapM_wp)
prefer 2
apply (rule order_refl)
apply (wp; simp)
done
lemma no_irq_mapM_x:
"(\<And>x. x \<in> set xs \<Longrightarrow> no_irq (f x)) \<Longrightarrow> no_irq (mapM_x f xs)"
apply (subst no_irq_def)
apply clarify
apply (rule mapM_x_wp)
prefer 2
apply (rule order_refl)
apply (wp; simp)
done
lemma no_irq_swp:
"no_irq (f y x) \<Longrightarrow> no_irq (swp f x y)"
by (simp add: swp_def)
lemma no_irq_seq [wp]:
"\<lbrakk> no_irq f; \<And>x. no_irq (g x) \<rbrakk> \<Longrightarrow> no_irq (f >>= g)"
apply (subst no_irq_def)
apply clarsimp
apply (rule hoare_seq_ext)
apply (wp|simp)+
done
lemma no_irq_return [simp, wp]: "no_irq (return v)"
unfolding no_irq_def return_def
by (rule allI, simp add: valid_def)
lemma no_irq_fail [simp, wp]: "no_irq fail"
unfolding no_irq_def fail_def
by (rule allI, simp add: valid_def)
lemma no_irq_assert [simp, wp]: "no_irq (assert P)"
unfolding assert_def by simp
lemma no_irq_modify:
"(\<And>s. irq_masks (f s) = irq_masks s) \<Longrightarrow> no_irq (modify f)"
unfolding modify_def no_irq_def
apply (rule allI, simp add: valid_def put_def get_def)
apply (clarsimp simp: in_monad)
done
lemma no_irq_storeWord: "no_irq (storeWord w p)"
apply (simp add: storeWord_def)
apply (wp no_irq_modify)
apply simp
done
lemma no_irq_when:
"\<lbrakk>P \<Longrightarrow> no_irq f\<rbrakk> \<Longrightarrow> no_irq (when P f)"
by (simp add: when_def)
lemma no_irq_clearMemory: "no_irq (clearMemory a b)"
apply (simp add: clearMemory_def)
apply (wp no_irq_mapM_x no_irq_storeWord)
done
lemma getActiveIRQ_le_maxIRQ':
"\<lbrace>\<lambda>s. \<forall>irq > maxIRQ. irq_masks s irq\<rbrace>
getActiveIRQ in_kernel
\<lbrace>\<lambda>rv s. \<forall>x. rv = Some x \<longrightarrow> x \<le> maxIRQ\<rbrace>"
apply (simp add: getActiveIRQ_def)
apply (wp alternative_wp select_wp)
apply clarsimp
apply (rule ccontr)
apply (simp add: linorder_not_le)
done
lemma getActiveIRQ_neq_Some0xFF':
"\<lbrace>\<top>\<rbrace> getActiveIRQ in_kernel \<lbrace>\<lambda>rv s. rv \<noteq> Some 0xFF\<rbrace>"
apply (simp add: getActiveIRQ_def)
apply (wp alternative_wp select_wp)
apply simp
done
lemma getActiveIRQ_neq_non_kernel:
"\<lbrace>\<top>\<rbrace> getActiveIRQ True \<lbrace>\<lambda>rv s. rv \<notin> Some ` non_kernel_IRQs \<rbrace>"
apply (simp add: getActiveIRQ_def)
apply (wp alternative_wp select_wp)
apply auto
done
lemma dmo_getActiveIRQ_non_kernel[wp]:
"\<lbrace>\<top>\<rbrace> do_machine_op (getActiveIRQ True)
\<lbrace>\<lambda>rv s. \<forall>irq. rv = Some irq \<longrightarrow> irq \<in> non_kernel_IRQs \<longrightarrow> P irq s\<rbrace>"
unfolding do_machine_op_def
apply wpsimp
apply (drule use_valid, rule getActiveIRQ_neq_non_kernel, rule TrueI)
apply clarsimp
done
lemma empty_fail_initL2Cache: "empty_fail initL2Cache"
by (simp add: initL2Cache_def)
lemma empty_fail_clearMemory [simp, intro!]:
"\<And>a b. empty_fail (clearMemory a b)"
by (simp add: clearMemory_def mapM_x_mapM ef_storeWord)
end
end
end
|
/-
Copyright (c) 2022 Andrew Yang. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Andrew Yang
! This file was ported from Lean 3 source module ring_theory.ideal.cotangent
! leanprover-community/mathlib commit 70fd9563a21e7b963887c9360bd29b2393e6225a
! Please do not edit these lines, except to modify the commit id
! if you have ported upstream changes.
-/
import Mathbin.RingTheory.Ideal.Operations
import Mathbin.Algebra.Module.Torsion
import Mathbin.Algebra.Ring.Idempotents
import Mathbin.LinearAlgebra.FiniteDimensional
import Mathbin.RingTheory.Ideal.LocalRing
/-!
# The module `I ⧸ I ^ 2`
In this file, we provide special API support for the module `I ⧸ I ^ 2`. The official
definition is a quotient module of `I`, but the alternative definition as an ideal of `R ⧸ I ^ 2` is
also given, and the two are `R`-equivalent as in `ideal.cotangent_equiv_ideal`.
Additional support is also given to the cotangent space `m ⧸ m ^ 2` of a local ring.
-/
namespace Ideal
variable {R S S' : Type _} [CommRing R] [CommSemiring S] [Algebra S R]
variable [CommSemiring S'] [Algebra S' R] [Algebra S S'] [IsScalarTower S S' R] (I : Ideal R)
/- ./././Mathport/Syntax/Translate/Command.lean:42:9: unsupported derive handler module[module] «expr ⧸ »(R, I) -/
/-- `I ⧸ I ^ 2` as a quotient of `I`. -/
def Cotangent : Type _ :=
I ⧸ (I • ⊤ : Submodule R I)deriving AddCommGroup,
./././Mathport/Syntax/Translate/Command.lean:42:9: unsupported derive handler module[module] «expr ⧸ »(R, I)
#align ideal.cotangent Ideal.Cotangent
instance : Inhabited I.Cotangent :=
⟨0⟩
instance Cotangent.moduleOfTower : Module S I.Cotangent :=
Submodule.Quotient.module' _
#align ideal.cotangent.module_of_tower Ideal.Cotangent.moduleOfTower
instance : IsScalarTower S S' I.Cotangent :=
by
delta cotangent
constructor
intro s s' x
rw [← @IsScalarTower.algebraMap_smul S' R, ← @IsScalarTower.algebraMap_smul S' R, ← smul_assoc, ←
IsScalarTower.toAlgHom_apply S S' R, map_smul]
rfl
instance [IsNoetherian R I] : IsNoetherian R I.Cotangent :=
by
delta cotangent
infer_instance
/-- The quotient map from `I` to `I ⧸ I ^ 2`. -/
@[simps (config := lemmasOnly) apply]
def toCotangent : I →ₗ[R] I.Cotangent :=
Submodule.mkQ _
#align ideal.to_cotangent Ideal.toCotangent
theorem map_toCotangent_ker : I.toCotangent.ker.map I.Subtype = I ^ 2 := by
simp [Ideal.toCotangent, Submodule.map_smul'', pow_two]
#align ideal.map_to_cotangent_ker Ideal.map_toCotangent_ker
theorem mem_toCotangent_ker {x : I} : x ∈ I.toCotangent.ker ↔ (x : R) ∈ I ^ 2 :=
by
rw [← I.map_to_cotangent_ker]
simp
#align ideal.mem_to_cotangent_ker Ideal.mem_toCotangent_ker
theorem toCotangent_eq {x y : I} : I.toCotangent x = I.toCotangent y ↔ (x - y : R) ∈ I ^ 2 :=
by
rw [← sub_eq_zero, ← map_sub]
exact I.mem_to_cotangent_ker
#align ideal.to_cotangent_eq Ideal.toCotangent_eq
theorem toCotangent_eq_zero (x : I) : I.toCotangent x = 0 ↔ (x : R) ∈ I ^ 2 :=
I.mem_toCotangent_ker
#align ideal.to_cotangent_eq_zero Ideal.toCotangent_eq_zero
theorem toCotangent_surjective : Function.Surjective I.toCotangent :=
Submodule.mkQ_surjective _
#align ideal.to_cotangent_surjective Ideal.toCotangent_surjective
theorem toCotangent_range : I.toCotangent.range = ⊤ :=
Submodule.range_mkQ _
#align ideal.to_cotangent_range Ideal.toCotangent_range
theorem cotangent_subsingleton_iff : Subsingleton I.Cotangent ↔ IsIdempotentElem I :=
by
constructor
· intro H
refine' (pow_two I).symm.trans (le_antisymm (Ideal.pow_le_self two_ne_zero) _)
exact fun x hx => (I.to_cotangent_eq_zero ⟨x, hx⟩).mp (Subsingleton.elim _ _)
·
exact fun e =>
⟨fun x y =>
Quotient.inductionOn₂' x y fun x y =>
I.to_cotangent_eq.mpr <| ((pow_two I).trans e).symm ▸ I.sub_mem x.Prop y.Prop⟩
#align ideal.cotangent_subsingleton_iff Ideal.cotangent_subsingleton_iff
/-- The inclusion map `I ⧸ I ^ 2` to `R ⧸ I ^ 2`. -/
def cotangentToQuotientSquare : I.Cotangent →ₗ[R] R ⧸ I ^ 2 :=
Submodule.mapQ (I • ⊤) (I ^ 2) I.Subtype
(by
rw [← Submodule.map_le_iff_le_comap, Submodule.map_smul'', Submodule.map_top,
Submodule.range_subtype, smul_eq_mul, pow_two]
exact rfl.le)
#align ideal.cotangent_to_quotient_square Ideal.cotangentToQuotientSquare
theorem to_quotient_square_comp_toCotangent :
I.cotangentToQuotientSquare.comp I.toCotangent = (I ^ 2).mkQ.comp (Submodule.subtype I) :=
LinearMap.ext fun _ => rfl
#align ideal.to_quotient_square_comp_to_cotangent Ideal.to_quotient_square_comp_toCotangent
@[simp]
theorem toCotangent_to_quotient_square (x : I) :
I.cotangentToQuotientSquare (I.toCotangent x) = (I ^ 2).mkQ x :=
rfl
#align ideal.to_cotangent_to_quotient_square Ideal.toCotangent_to_quotient_square
/-- `I ⧸ I ^ 2` as an ideal of `R ⧸ I ^ 2`. -/
def cotangentIdeal (I : Ideal R) : Ideal (R ⧸ I ^ 2) :=
by
haveI : @RingHomSurjective R (R ⧸ I ^ 2) _ _ _ := ⟨Ideal.Quotient.mk_surjective⟩
let rq := (I ^ 2).Quotient.mk
exact Submodule.map rq.to_semilinear_map I
#align ideal.cotangent_ideal Ideal.cotangentIdeal
theorem cotangentIdeal_square (I : Ideal R) : I.cotangentIdeal ^ 2 = ⊥ :=
by
rw [eq_bot_iff, pow_two I.cotangent_ideal, ← smul_eq_mul]
intro x hx
apply Submodule.smul_induction_on hx
· rintro _ ⟨x, hx, rfl⟩ _ ⟨y, hy, rfl⟩
apply (Submodule.Quotient.eq _).mpr _
rw [sub_zero, pow_two]
exact Ideal.mul_mem_mul hx hy
· intro x y hx hy
exact add_mem hx hy
#align ideal.cotangent_ideal_square Ideal.cotangentIdeal_square
theorem to_quotient_square_range :
I.cotangentToQuotientSquare.range = I.cotangentIdeal.restrictScalars R :=
by
trans (I.cotangent_to_quotient_square.comp I.to_cotangent).range
· rw [LinearMap.range_comp, I.to_cotangent_range, Submodule.map_top]
· rw [to_quotient_square_comp_to_cotangent, LinearMap.range_comp, I.range_subtype]
ext
rfl
#align ideal.to_quotient_square_range Ideal.to_quotient_square_range
/-- The equivalence of the two definitions of `I / I ^ 2`, either as the quotient of `I` or the
ideal of `R / I ^ 2`. -/
noncomputable def cotangentEquivIdeal : I.Cotangent ≃ₗ[R] I.cotangentIdeal :=
by
refine'
{
I.cotangent_to_quotient_square.cod_restrict (I.cotangent_ideal.restrict_scalars R) fun x =>
by
rw [← to_quotient_square_range]
exact LinearMap.mem_range_self _ _,
Equiv.ofBijective _ ⟨_, _⟩ with }
· rintro x y e
replace e := congr_arg Subtype.val e
obtain ⟨x, rfl⟩ := I.to_cotangent_surjective x
obtain ⟨y, rfl⟩ := I.to_cotangent_surjective y
rw [I.to_cotangent_eq]
dsimp only [to_cotangent_to_quotient_square, Submodule.mkQ_apply] at e
rwa [Submodule.Quotient.eq] at e
· rintro ⟨_, x, hx, rfl⟩
refine' ⟨I.to_cotangent ⟨x, hx⟩, Subtype.ext rfl⟩
#align ideal.cotangent_equiv_ideal Ideal.cotangentEquivIdeal
@[simp, nolint simp_nf]
theorem cotangentEquivIdeal_apply (x : I.Cotangent) :
↑(I.cotangentEquivIdeal x) = I.cotangentToQuotientSquare x :=
rfl
#align ideal.cotangent_equiv_ideal_apply Ideal.cotangentEquivIdeal_apply
theorem cotangentEquivIdeal_symm_apply (x : R) (hx : x ∈ I) :
I.cotangentEquivIdeal.symm ⟨(I ^ 2).mkQ x, Submodule.mem_map_of_mem hx⟩ =
I.toCotangent ⟨x, hx⟩ :=
by
apply I.cotangent_equiv_ideal.injective
rw [I.cotangent_equiv_ideal.apply_symm_apply]
ext
rfl
#align ideal.cotangent_equiv_ideal_symm_apply Ideal.cotangentEquivIdeal_symm_apply
variable {A B : Type _} [CommRing A] [CommRing B] [Algebra R A] [Algebra R B]
/-- The lift of `f : A →ₐ[R] B` to `A ⧸ J ^ 2 →ₐ[R] B` with `J` being the kernel of `f`. -/
def AlgHom.kerSquareLift (f : A →ₐ[R] B) : A ⧸ f.toRingHom.ker ^ 2 →ₐ[R] B :=
by
refine' { Ideal.Quotient.lift (f.to_ring_hom.ker ^ 2) f.to_ring_hom _ with commutes' := _ }
· intro a ha
exact Ideal.pow_le_self two_ne_zero ha
· intro r
rw [IsScalarTower.algebraMap_apply R A, RingHom.toFun_eq_coe, Ideal.Quotient.algebraMap_eq,
Ideal.Quotient.lift_mk]
exact f.map_algebra_map r
#align alg_hom.ker_square_lift AlgHom.kerSquareLift
theorem AlgHom.ker_ker_sqare_lift (f : A →ₐ[R] B) :
f.kerSquareLift.toRingHom.ker = f.toRingHom.ker.cotangentIdeal :=
by
apply le_antisymm
· intro x hx
obtain ⟨x, rfl⟩ := Ideal.Quotient.mk_surjective x
exact ⟨x, hx, rfl⟩
· rintro _ ⟨x, hx, rfl⟩
exact hx
#align alg_hom.ker_ker_sqare_lift AlgHom.ker_ker_sqare_lift
/-- The quotient ring of `I ⧸ I ^ 2` is `R ⧸ I`. -/
def quotCotangent : (R ⧸ I ^ 2) ⧸ I.cotangentIdeal ≃+* R ⧸ I :=
by
refine' (Ideal.quotEquivOfEq (Ideal.map_eq_submodule_map _ _).symm).trans _
refine' (DoubleQuot.quotQuotEquivQuotSup _ _).trans _
exact Ideal.quotEquivOfEq (sup_eq_right.mpr <| Ideal.pow_le_self two_ne_zero)
#align ideal.quot_cotangent Ideal.quotCotangent
end Ideal
namespace LocalRing
variable (R : Type _) [CommRing R] [LocalRing R]
/-- The `A ⧸ I`-vector space `I ⧸ I ^ 2`. -/
@[reducible]
def CotangentSpace : Type _ :=
(maximalIdeal R).Cotangent
#align local_ring.cotangent_space LocalRing.CotangentSpace
instance : Module (ResidueField R) (CotangentSpace R) :=
Ideal.Cotangent.module _
instance : IsScalarTower R (ResidueField R) (CotangentSpace R) :=
Module.IsTorsionBySet.isScalarTower _
instance [IsNoetherianRing R] : FiniteDimensional (ResidueField R) (CotangentSpace R) :=
Module.Finite.of_restrictScalars_finite R _ _
end LocalRing
|
program main
use readingfile
use update
use writetofile
use printti
implicit none
!running the simulation
call run()
contains
!for running the simulation
subroutine run()
implicit none
REAL(rk) :: t1, t2 !to calculate the duration of the simulation
INTEGER(ik ) :: tofile=0 !datapoints saved to the file
INTEGER(ik ):: i
CHARACTER(MAXBUFF ):: filename !filename where datapoints are stored
CALL readfile() !extracting the data of objects from an output file
!needed for simulation
call convert() !all the data are stored in the objectarray
nos=totalen/deltaT !calculating how many total steps in the simulation
call GET_COMMAND_ARGUMENT(2, filename) !get command line argument for the filename of the output file
call CPU_TIME(t1)
do i=1, nos
!saving datapoints
savefile: if ((i==1).or.(mod(i, savelen)==0)) then
call writetofilesub(i, filename, tofile)
end if savefile
call CPU_TIME(t2)
!printing datapoints to the terminal
tulostus: if ((i==1).or.(i==nos).or.(mod(i, printlen)==0)) then
call printing(t2-t1, i, tofile)
end if tulostus
!updating the data of the objects using the algorithm
call updatesub()
end do
!closing the file
if (i==nos) then
call writetofilesub(i, filename, tofile, .True.)
end if
end subroutine run
end program main
|
lemma is_pole_basic': assumes "f holomorphic_on A" "open A" "0 \<in> A" "f 0 \<noteq> 0" "n > 0" shows "is_pole (\<lambda>w. f w / w ^ n) 0" |
[STATEMENT]
lemma vimage_pair[simp]: "Pair x -` {p} = (if x = fst p then {snd p} else {})"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. Pair x -` {p} = (if x = fst p then {snd p} else {})
[PROOF STEP]
by auto |
import topology.metric_space.basic
/-!
# Pseudometric spaces
A pseudometric on a set (or type) X is a distance function obeying
all the axioms of a metric except possible d(x,y)=0 ↔ x = y.
-/
/-- A function d : X^2 → ℝ is a *pseudometric* if it satisfies the axioms
for a metric space apart from possibly the axiom saying d(x,y)=0 -> x=y -/
class is_pseudometric {X : Type} (d : X → X → ℝ) :=
(d_self : ∀ x : X, d x x = 0)
(d_comm : ∀ x y : X, d x y = d y x)
(d_triangle : ∀ x y z : X, d x z ≤ d x y + d y z)
-- ignore this boilerplate code: it's restating the lemmas to make them easier to use
variable {X : Type}
lemma d_self (d : X → X → ℝ) [is_pseudometric d] :
∀ x : X, d x x = 0 := @is_pseudometric.d_self X d _
lemma d_comm (d : X → X → ℝ) [is_pseudometric d] :
∀ x y : X, d x y = d y x := @is_pseudometric.d_comm X d _
lemma d_triangle (d : X → X → ℝ) [is_pseudometric d] :
∀ x y z : X, d x z ≤ d x y + d y z := @is_pseudometric.d_triangle X d _
-- fun fact: we never included the axiom that d x y ≥ 0, because it follows
-- from the other axioms!
variables (d : X → X → ℝ) [is_pseudometric d]
theorem d_nonneg {x y : X} : 0 ≤ d x y :=
begin
-- First note 0 = d(x,x) ≤ d(x,y)+d(y,x) = 2d(x,y)
have h2 : 0 ≤ 2 * d x y,
calc 0 = d x x : by rw d_self d
... ≤ d x y + d y x : by refine d_triangle d _ _ _
... = d x y + d x y : by rw d_comm d
... = 2 * d x y : by ring,
-- and now the result is obvious
linarith
end
|
State Before: ⊢ bernoulli 0 = 1 State After: no goals Tactic: simp [bernoulli] |
\documentclass[conference]{IEEEtran}
\IEEEoverridecommandlockouts
\usepackage{cite}
\usepackage{amsmath,amssymb,amsfonts}
\usepackage{algorithmic}
\usepackage{graphicx}
\usepackage{textcomp}
\usepackage{xcolor}
\pagestyle{plain}
%Russian-specific packages
\usepackage[T2A]{fontenc}
\usepackage[utf8]{inputenc}
\usepackage[russian]{babel}
\begin{document}
\title{Обзор публикаций по медицинской робототехнике за период 2019-2021 гг}
\author{\IEEEauthorblockN{Mark Eremenko}
\IEEEauthorblockA{\textit{School of Computer Science and Robotics} \\
\textit{Tomsk Polytechnic University}\\
Tomsk, Russia \\
[email protected]}
\and
\IEEEauthorblockN{Rostislav Yavorskiy}
\IEEEauthorblockA{\textit{School of Computer Science and Robotics} \\
\textit{Tomsk Polytechnic University}\\
Tomsk, Russia \\
[email protected]}
}
\maketitle
\begin{abstract}
Представленный обзор публикаций по медицинской робототехнике за период 2019-2021 гг. аггрегирует информацию о более чем 150 наиболее релевантных публикациях по этой тематике в англоязычной научной литературе.
\end{abstract}
\begin{IEEEkeywords}
medical robotics, healthcare robotics, remote surgery
\end{IEEEkeywords}
\medskip
\subsubsection{Erin, Onder, Mustafa Boyvat, Mehmet Efe Tiryaki, Martin Phelan, and Metin Sitti. "Magnetic resonance imaging system–driven medical robotics." Advanced Intelligent Systems 2, no. 2 (2020): 1900110.}
see \cite{erin2020magnetic}
Magnetic resonance imaging (MRI) system–driven medical robotics is an emerging field that aims to use clinical MRI systems not only for medical imaging but also for actuation, localization, and control of medical robots. Submillimeter scale resolution of MR images for soft tissues combined with the electromagnetic gradient coil–based magnetic actuation available inside MR scanners can enable theranostic applications of medical robots for precise image-guided minimally invasive interventions. MRI-driven robotics typically does not introduce new MRI instrumentation for actuation but instead focuses on converting already available instrumentation for robotic purposes. To use the advantages of this technology, various medical devices such as untethered mobile magnetic robots and tethered active catheters have been designed to be powered magnetically inside MRI systems. Herein, the state-of-the-art progress, challenges, and future directions of MRI-driven medical robotic systems are reviewed.
\medskip
\subsubsection{Strydom, Mario, Artur Banach, Liao Wu, Anjali Jaiprakash, Ross Crawford, and Jonathan Roberts. "Anatomical joint measurement with application to medical robotics." IEEE Access 8 (2020): 118510-118524.}
see \cite{strydom2020anatomical}
Robotic-assisted orthopaedic procedures demand accurate spatial joint measurements. Tracking of human joint motion is challenging in many applications, such as in sport motion analyses. In orthopaedic surgery, these challenges are even more prevalent, where small errors may cause iatrogenic damage in patients - highlighting the need for robust and precise joint and instrument tracking methods. In this study, we present a novel kinematic modelling approach to track any anatomical points on the femur and / or tibia by exploiting optical tracking measurements combined with a priori computed tomography information. The framework supports simultaneous tracking of anatomical positions, from which we calculate the pose of the leg (joint angles and translations of both the hip and knee joints) and of each of the surgical instruments. Experimental validation on cadaveric data shows that our method is capable of measuring these anatomical regions with sub-millimetre accuracy, with a maximum joint angle uncertainty of ±0.47°. This study is a fundamental step in robotic orthopaedic research, which can be used as a ground-truth for future research such as automating leg manipulation in orthopaedic procedures.
\medskip
\subsubsection{von Haxthausen, Felix, Sven Böttger, Daniel Wulff, Jannis Hagenah, Verónica García-Vázquez, and Svenja Ipsen. "Medical robotics for ultrasound imaging: current systems and future trends." Current Robotics Reports (2021): 1-17.}
see \cite{von2021medical}
Purpose of Review
This review provides an overview of the most recent robotic ultrasound systems that have contemporary emerged over the past five years, highlighting their status and future directions. The systems are categorized based on their level of robot autonomy (LORA).
Recent Findings
Teleoperating systems show the highest level of technical maturity. Collaborative assisting and autonomous systems are still in the research phase, with a focus on ultrasound image processing and force adaptation strategies. However, missing key factors are clinical studies and appropriate safety strategies. Future research will likely focus on artificial intelligence and virtual/augmented reality to improve image understanding and ergonomics.
Summary
A review on robotic ultrasound systems is presented in which first technical specifications are outlined. Hereafter, the literature of the past five years is subdivided into teleoperation, collaborative assistance, or autonomous systems based on LORA. Finally, future trends for robotic ultrasound systems are reviewed with a focus on artificial intelligence and virtual/augmented reality.
\medskip
\subsubsection{Li, Kun, and Joel W. Burdick. "Human motion analysis in medical robotics via high-dimensional inverse reinforcement learning." The International Journal of Robotics Research 39, no. 5 (2020): 568-585.}
see \cite{li2020human}
This work develops a novel high-dimensional inverse reinforcement learning (IRL) algorithm for human motion analysis in medical, clinical, and robotics applications. The method is based on the assumption that a surgical robot operators’ skill or a patient’s motor skill is encoded into the innate reward function during motion planning and recovered by an IRL algorithm from motion demonstrations. This class of applications is characterized by high-dimensional sensory data, which is computationally prohibitive for most existing IRL algorithms. We propose a novel function approximation framework and reformulate the Bellman optimality equation to handle high-dimensional state spaces efficiently. We compare different function approximators in simulated environments, and adopt a deep neural network as the function approximator. The technique is applied to evaluating human patients with spinal cord injuries under spinal stimulation, and the skill levels of surgical robot operators. The results demonstrate the efficiency and effectiveness of the proposed method.
\medskip
\subsubsection{Fotouhi, Javad, Tianyu Song, Arian Mehrfard, Giacomo Taylor, Qiaochu Wang, Fengfan Xian, Alejandro Martin-Gomez et al. "Reflective-ar display: An interaction methodology for virtual-to-real alignment in medical robotics." IEEE Robotics and Automation Letters 5, no. 2 (2020): 2722-2729.}
see \cite{fotouhi2020reflective}
Robot-assisted minimally invasive surgery has shown to improve patient outcomes, as well as reduce complications and recovery time for several clinical applications. While increasingly configurable robotic arms can maximize reach and avoid collisions in cluttered environments, positioning them appropriately during surgery is complicated because safety regulations prevent automatic driving. We propose a head-mounted display (HMD) based augmented reality (AR) system designed to guide optimal surgical arm set up. The staff equipped with HMD aligns the robot with its planned virtual counterpart. In this user-centric setting, the main challenge is the perspective ambiguities hindering such collaborative robotic solution. To overcome this challenge, we introduce a novel registration concept for intuitive alignment of AR content to its physical counterpart by providing a multi-view AR experience via reflective-AR displays that simultaneously show the augmentations from multiple viewpoints. Using this system, users can visualize different perspectives while actively adjusting the pose to determine the registration transformation that most closely superimposes the virtual onto the real. The experimental results demonstrate improvement in the interactive alignment a virtual and real robot when using a reflective-AR display. We also present measurements from configuring a robotic manipulator in a simulated trocar placement surgery using the AR guidance methodology.
\medskip
\subsubsection{Ozmen, M. Mahir, Asutay Ozmen, and Çetin Kaya Koç. "Artificial Intelligence for Next-Generation Medical Robotics." In Digital Surgery, pp. 25-36. Springer, Cham, 2021.}
see \cite{ozmen2021artificial}
Technology-based advancements have the potential to empower every surgeon with the ability to improve the quality of global surgical care. Innovation in robotic surgery will continue to parallel advancements in technology, especially with the considerable progress in computer science and artificial intelligence (AI). It is also known that high-quality surgical techniques and skill sets correlate positively with patient outcomes. AI could help pool this surgical experience to standardize decision-making, thus creating a global consensus in operating theaters worldwide. Next-generation surgical robots will be integral in augmenting a surgeon’s skills effectively to achieve accuracy and high precision during complex procedures. The next level of surgery will be achieved by surgical robotics which likely evolve to include AI and machine learning.
\medskip
\subsubsection{Zheng, Jia, Shuangyi Wang, James Housden, Zeng-Guang Hou, Davinder Singh, and Kawal Rhode. "A Safety Joint with Passive Compliant and Manual Override Mechanisms for Medical Robotics." In 2021 IEEE International Conference on Intelligence and Safety for Robotics (ISR), pp. 1-4. IEEE, 2021.}
see \cite{zheng2021safety}
Force and collision control is a primary concern to guarantee the safe use of medical robots as such systems normally need to interact with clinicians and patients, while at the same time cooperate with other devices. Among different strategies, passive features working with intrinsically safety components are treated as one of the most effective approaches and therefore deserve in-depth study. In this study, we focus on the design of a novel back-drivable safety joint that incorporates a torque limiter with passive compliance and a manual override mechanism to disconnect the robotic joint from its drive train. The design and working principle of the proposed joint are explained, followed by the mathematical analysis of its performance and the impacts of parameters. An example of the design was manufactured and tested experimentally to validate the working concepts. It is concluded that the proposed multi-functional safety joint provides more versatility and customization to the design of bespoke medical robots and would limit the maximum torque that can be exserted onto the patient, allow the clinician to push the joint back, and enable the operator to switch back to manual override.
\medskip
\subsubsection{Starszak, Krzysztof, Michał Smoczok, and Weronika Starszak. "New technologies in health care—medical robotics and innovations during the COVID-19 pandemic, considering Polish achievements." Chirurgia Polska (2021).}
see \cite{starszak2021new}
In March 2020, the WHO declared a state of a pandemic, which encompassed the whole world. During
the pandemic, numerous new solutions have been introduced and some of the already existing ones
have been improved to increase the safety, both of patients and healthcare professionals. The publication
aims to present the achievements in the field of innovations with the effects of the Covid-19 pandemic,
considering the activities of Polish scientists. The literature and current data were reviewed and useful in
the topic of research were selected. The pandemic period showed the interdisciplinary nature of medical
robots, both for surgical and diagnostic purposes. Robots are widely used in cleaning and disinfecting
rooms. Patient psychological care systems also deserve attention - during the pandemic, the number of
those in need suffering from mental diseases increased. Medical robotics should be developed and used
more and more commonly.
\medskip
\subsubsection{Makhataeva, Zhanat, and Huseyin Atakan Varol. "Augmented reality for robotics: a review." Robotics 9, no. 2 (2020): 21.}
see \cite{makhataeva2020augmented}
Augmented reality (AR) is used to enhance the perception of the real world by integrating virtual objects to an image sequence acquired from various camera technologies. Numerous AR applications in robotics have been developed in recent years. The aim of this paper is to provide an overview of AR research in robotics during the five year period from 2015 to 2019. We classified these works in terms of application areas into four categories: (1) Medical robotics: Robot-Assisted surgery (RAS), prosthetics, rehabilitation, and training systems; (2) Motion planning and control: trajectory generation, robot programming, simulation, and manipulation; (3) Human-robot interaction (HRI): teleoperation, collaborative interfaces, wearable robots, haptic interfaces, brain-computer interfaces (BCIs), and gaming; (4) Multi-agent systems: use of visual feedback to remotely control drones, robot swarms, and robots with shared workspace. Recent developments in AR technology are discussed followed by the challenges met in AR due to issues of camera localization, environment mapping, and registration. We explore AR applications in terms of how AR was integrated and which improvements it introduced to corresponding fields of robotics. In addition, we summarize the major limitations of the presented applications in each category. Finally, we conclude our review with future directions of AR research in robotics. The survey covers over 100 research works published over the last five years.
\medskip
\subsubsection{Scimeca, Luca, Fumiya Iida, Perla Maiolino, and Thrishantha Nanayakkara. "Human-Robot Medical Interaction." In Companion of the 2020 ACM/IEEE International Conference on Human-Robot Interaction, pp. 660-661. 2020.}
see \cite{scimeca2020human}
Advances in Soft Robotics, Haptics, AI and simulation have changed the medical robotics field, allowing robotics technologies to be deployed in medical environments. In this context, the relationship between doctors, robotics devices, and patients is fundamental, as only with the synergetic collaboration of the three parties results in medical robotics can be achieved. This workshop focuses on the use of soft robotics technologies, sensing, AI and Simulation, to further improve medical practitioner training, as well as the creation of new tools for diagnosis and healthcare through the medical interaction of humans and robots. The Robo-patient is more specifically the idea behind the creation of sensorised robotic patient with controllable organs to present a given set of physiological conditions. This is both to investigate the embodied nature of haptic interaction in physical examination, as well as the doctor-patient relationship to further improve medical practice through robotics technologies. The Robo-doctor aspect is also relevant, with robotics prototypes performing, or helping to perform, medical diagnosis. In the workshop, key technologies as well as future views in the field will be discussed both by expert and new upcoming researchers.
\medskip
\subsubsection{DCRUST, Murthal, Jasbir Singh Saini, and Sanjeev Kumar. "Internet of Medical Things." Patron in Chief: 145.}
see \cite{dcrustinternet}
Internet of Medical Things (IoMT) enables machine to machine interaction, real time
intervention, better affordability and more reliability in future of healthcare. IoMT has
applications in chronic disease management, tele-health, lifestyle assessment, remote
intervention, drug management, medical robotics, etc. The various technologies used for
implementing IoMT involve the macro-level IoT architecture comprising layers to enable
healthcare solutions. These smart medical devices when connected to other medical devices
allow patient management, surgical intervention, etc. The key players in the market of smart
medical things dominate the Intellectual field worldwide. Keeping in view the benefits and
related challenges, IoMT appears to be a valuable solution to benefit healthcare monitoring,
diagnosis and treatment procedures. We have incorporated the analysis of such applications in
this paper.
\medskip
\subsubsection{Goel, Rahul. "Role of Robotics in Health Care of the Future." Journal of Medical Academics 3, no. 1 (2020): 28.}
see \cite{goel2020role}
As the demands on medical professionals and healthcare infrastructure increase, the introduction of automation via robotics is inevitable.
Robotics originated in science fiction literature and from there industrial robotic arms, and more recently surgical robotic devices have been
created. In this article, we examine the types of robots, their development, and upcoming projects.
\medskip
\subsubsection{Dixit, Pooja, Manju Payal, Nidhi Goyal, and Vishal Dutt. "Robotics, AI and IoT in Medical and Healthcare Applications." AI and IoT‐Based Intelligent Automation in Robotics (2021): 53-73.}
see \cite{dixit2021robotics}
Today, the vital role of robotics, AI and IOT technologies have recast healthcare. Healthcare apps enabled by these technologies help manage the health of consumers, thus ensuring their health. The main focus of this chapter is to study the applications for the techniques that make the healthcare system more affordable, provide better outcomes and also access patients' records in order to provide better solutions. Thus, when these technologies merge, there is a chance that they will be capable of better operational efficiency for tracking and monitoring patients, with automation making more optimistic solutions possible.
\medskip
\subsubsection{Niu, Guojun, Bo Pan, Yili Fu, and Cuicui Qu. "Development of a new medical robot system for minimally invasive surgery." IEEE Access 8 (2020): 144136-144155.}
see \cite{niu2020development}
This article presents the development of a new medical robot system comprising a spherical remote center motion (RCM) mechanism with modular design and two mechanical decoupling methods for Minimally Invasive Surgery (MIS). We achieved excellent comprehensive performance indices through a novel multi-objective optimization model comprising four optimization objective functions, three constrained conditions and two optimization variables. In order to enhance the manipulability, remove the coupling between motors, and reduce the control difficulty, two new decoupling mechanism means were proposed to remove coupling motion between the wrist and pincers, coupling motion between the translational joint of mobile platform and four interface disks of surgical instrument as a results of rear drive motor, respectively. The control system architecture is designed to include intuitive motion control, incremental motion control, and proportional motion control. Master-slave attitude registration and surgical instrument replacement strategies improve the master-slave control efficiency. We tested the spherical RCM mechanism performance indices and developed two mechanical decoupling methods and a master-slave control algorithm. Our experimental test results validated that fixing point accuracy, the coupling motions, the positioning and repeated positioning accuracy of the MIS robot, and master-slave control algorithm meet the requirements of MIS. Successful animal experiments confirmed effectiveness of the novel MIS robot system.
\medskip
\subsubsection{Begishev, Ildar, Zarina Khisamova, and Vitaly Vasyukov. "Technological, Ethical, Environmental and Legal Aspects of Robotics." In E3S Web of Conferences, vol. 244, p. 12028. EDP Sciences, 2021.}
see \cite{begishev2021technological}
Robotics is considered by modern researchers from various positions. The most common technical approach to the study of this concept, which examines the current state and achievements in the field of robotics, as well as the prospects for its development. Also, quite often in recent years, legal experts have begun to address problems related to the development of robotics, focusing on issues related to the legal personality of robots and artificial intelligence, as well as the responsibility of AI for causing harm. A separate direction in the field of robotics research is the analysis of this concept and the relations associated with it, from the standpoint of morality, ethics and technologies.
\medskip
\subsubsection{Boiadjiev, Tony, George Boiadjiev, Kamen Delchev, Ivan Chavdarov, and Roumen Kastelov. "Orthopedic Bone Drilling Robot ODRO: Basic Characteristics and Areas of Applications." In Medical Robotics. IntechOpen, 2021.}
see \cite{boiadjiev2021orthopedic}
The orthopedic manipulation “bone drilling” is the most executed one in the orthopedic surgery concerning the operative treatment of bone fractures. The drilling process is characterized by a number of input and output parameters. The most important input parameters are the feed rate [mm/s] and the drill speed [rpm]. They play significant role for the final result (the output parameters): thermal and mechanical damages of the bone tissue as well as hole quality. During the manual drilling these parameters are controlled by the surgeon on the base of his practical skills. But the optimal results of the manipulations can be assured only when the input parameters are under control during an automatic execution of the drilling process. This work presents the functional characteristics of the handheld robotized system ODRO (Orthopedic Drilling Robot) for automatic bone drilling. Some experimental results are also shown. A comparison is made between the similar systems which are known in the literature, some of which are available on the market. The application areas of ODRO in the orthopedic surgery practice are underlined.
\medskip
\subsubsection{Ginoya, Tirth, Yaser Maddahi, and Kourosh Zareinia. "A historical review of medical robotic platforms." Journal of Robotics 2021 (2021).}
see \cite{ginoya2021historical}
This paper provides a brief history of medical robotic systems. Since the first use of robots in medical procedures, there have been countless companies competing to developed robotic systems in hopes to dominate a field. Many companies have succeeded, and many have failed. This review paper shows the timeline history of some of the old and most successful medical robots and new robotic systems. As the patents of the most successful system, i.e., Da Vinci® Surgical System, have expired or are expiring soon, this paper can provide some insights for new designers and manufacturers to explore new opportunities in this field.
\medskip
\subsubsection{Gruionu, Lucian Gheorghe, Catalin Constantinescu, Andreea Iacob, and Gabriel Gruionu. "Robotic System for Catheter Navigation during Medical Procedures." In Applied Mechanics and Materials, vol. 896, pp. 211-217. Trans Tech Publications Ltd, 2020.}
see \cite{gruionu2020robotic}
ung cancer is the most common cancer globally with over 2 million new cases diagnosed every year. Fortunately, if caught early, the likelihood of survival is greatly improved. If diagnosed in Stage I, survival rates are > 75 \% over 5 years, vs. just 1 \% if diagnosed in Stage IV. Early diagnosis requires finding and sampling (biopsy) small, peripheral nodules that are located in the parenchima of the lung and predominately outside small airways. Currently, for early diagnosis a bronchoscope is inserted into the lung airway but due to large size it cannot reach the small airways. Therefore, the doctor has to advance a sharp biopsy needle blindly from the tip of the bronchoscope and into the lung tissue in the approximate direction of the nodule. This blind procedure has low accuracy and carries a high risk of misdiagnosis. Currently, to improve the accuracy, real time x-ray (fluoroscopy) is use which causes exposure of the patient and physician to harmful radiation. Computer and image assisted surgery and medical robotics present viable solutions but are not optimal at present. The scope of our research was to develop a robotic solution for increased precision and accuracy of early diagnosis and treatment of lung cancer, to increase procedure success rate, decrease patient radiation and stress exposure, and reduce the procedure cost. For this purpose, we developed an advanced prototype of a robotic system which is small in size, easy to use and effective. To demonstrate its effectiveness in navigating to peripheral small size lung cancer lesions, we performed laboratory tests or a realistic lung airway model. The preliminary tests of a novel medical robot using a complex lung airway model proved that our catheter driving robotic system is working as designed and allows navigation, through a complex 3D channels structure like the bronchial tree, in both manipulator and robotic modes without fluoroscopy scanning. The robotic system is more precise and stable, and can avoid patient injury and instrument damage due to accidental impact with the airway wall. Because it could be controlled from a different room via the software platform, using this robotic system can drastically reduce radiation exposure of the patient and totally avoid the exposure of the doctor. Another benefit of the proposed robotic system is that it uses currently available catheters in which a reusable electromagnetic guide wire is temporarily inserted to guide the tip of the catheter towards hard to reach targets. After the target is confirmed, the sensor can be retracted and the catheter can be used for its routine function such as biopsy collection. Future development will include placement of a force sensor at the tip of the catheter to “feel” the wall and adapt the speed of insertion in order to avoid wall damage and an improved algorithm to increase the speed in the automatic mode.
\medskip
\subsubsection{Tian, Xiumei, and Yan Xu. "Low Delay Control Algorithm of Robot Arm for Minimally Invasive Medical Surgery." IEEE Access 8 (2020): 93548-93560.}
see \cite{tian2020low}
Minimally invasive surgical robots have received more and more attention from medical patients because of their higher surgical accuracy and higher safety than doctors. Minimally invasive surgery is rapidly revolutionizing the treatment of traditional surgery. In order to solve the problem that the surgical robot has a redundant degree of freedom, which makes the kinematics solution more complicated, this paper analyzes the kinematics of the coordinate system block. Aiming at the problem that the strategy search algorithm needs to re-learn when the target pose changes, a convolutional neural network control strategy is studied and constructed. By designing the structure of the convolutional neural network visual layer and motor control layer, the loss function and sampling of the training process are established. Aiming at the problem of long training time of convolutional neural network, an effective pre-training method is proposed to shorten the training time of the neural network. At the same time, the effectiveness of the above method and the end-to-end control of the convolutional neural network strategy are verified through simulation experiments. The physical structure of the manipulator body is analyzed, and the forward and inverse kinematic equations of the manipulator are established by the D-H method. Monte Carlo method was used to analyze the working space of the manipulator, and low-latency control and simulation experiments were carried out on the movement trajectory of the manipulator in joint space and Cartesian space. The results show that the low-latency control algorithm in this paper is effective to control the mechanical arm of the minimally invasive medical surgery robot.
\medskip
\subsubsection{Kennedy-Metz, Lauren R., Pietro Mascagni, Antonio Torralba, Roger D. Dias, Pietro Perona, Julie A. Shah, Nicolas Padoy, and Marco A. Zenati. "Computer Vision in the Operating Room: Opportunities and Caveats." IEEE transactions on medical robotics and bionics 3, no. 1 (2020): 2-10.}
see \cite{kennedy2020computer}
Effectiveness of computer vision techniques has been demonstrated through a number of applications, both within and outside healthcare. The operating room environment specifically is a setting with rich data sources compatible with computational approaches and high potential for direct patient benefit. The aim of this review is to summarize major topics in computer vision for surgical domains. The major capabilities of computer vision are described as an aid to surgical teams to improve performance and contribute to enhanced patient safety. Literature was identified through leading experts in the fields of surgery, computational analysis and modeling in medicine, and computer vision in healthcare. The literature supports the application of computer vision principles to surgery. Potential applications within surgery include operating room vigilance, endoscopic vigilance, and individual and team-wide behavioral analysis. To advance the field, we recommend collecting and publishing carefully annotated datasets. Doing so will enable the surgery community to collectively define well-specified common objectives for automated systems, spur academic research, mobilize industry, and provide benchmarks with which we can track progress. Leveraging computer vision approaches through interdisciplinary collaboration and advanced approaches to data acquisition, modeling, interpretation, and integration promises a powerful impact on patient safety, public health, and financial costs.
\medskip
\subsubsection{Maibaum, Arne, Andreas Bischof, Jannis Hergesell, and Benjamin Lipp. "A critique of robotics in health care." AI \& SOCIETY (2021): 1-11.}
see \cite{maibaum2021critique}
When the social relevance of robotic applications is addressed today, the use of assistive technology in care settings is almost always the first example. So-called care robots are presented as a solution to the nursing crisis, despite doubts about their technological readiness and the lack of concrete usage scenarios in everyday nursing practice. We inquire into this interconnection of social robotics and care. We show how both are made available for each other in three arenas: innovation policy, care organization, and robotic engineering. First, we analyze the discursive “logics” of care robotics within European innovation policy, second, we disclose how care robotics is encountering a historically grown conflict within health care organization, and third we show how care scenarios are being used in robotic engineering. From this diagnosis, we derive a threefold critique of robotics in healthcare, which calls attention to the politics, historicity, and social situatedness of care robotics in elderly care.
\medskip
\subsubsection{Kyrarini, Maria, Fotios Lygerakis, Akilesh Rajavenkatanarayanan, Christos Sevastopoulos, Harish Ram Nambiappan, Kodur Krishna Chaitanya, Ashwin Ramesh Babu, Joanne Mathew, and Fillia Makedon. "A survey of robots in healthcare." Technologies 9, no. 1 (2021): 8.}
see \cite{kyrarini2021survey}
In recent years, with the current advancements in Robotics and Artificial Intelligence (AI), robots have the potential to support the field of healthcare. Robotic systems are often introduced in the care of the elderly, children, and persons with disabilities, in hospitals, in rehabilitation and walking assistance, and other healthcare situations. In this survey paper, the recent advances in robotic technology applied in the healthcare domain are discussed. The paper provides detailed information about state-of-the-art research in care, hospital, assistive, rehabilitation, and walking assisting robots. The paper also discusses the open challenges healthcare robots face to be integrated into our society.
\medskip
\subsubsection{Tsigie, Sisay Ebabye, and Gizealew Alazie Dagnaw. "The Role of Robotics Technology and Internet of Things for Industry 4.0 Realization." International Journal 10, no. 2 (2021).}
see \cite{tsigie2021role}
Robotic systems can already proactively monitor and adapt to changes in a production line. Nowadays, internet of things and robotic systems are key drivers of technological innovation trends.Major companies are now making investments in machine learning-powered approaches to improve in principle all aspects of manufacturing. Connected devices, sensors, and similar advancements allow people and companies to do things they wouldn't even dream of in earlier eras. For realizing it time series feature extraction approach is selected.Industrial internet of things solutions are poised to transform many industry verticals including healthcare, retail, automotive, and transport. For many industries, the industrial internet of things has significantly improved reliability, production, and customer satisfaction. The internet of things and robotics are coming together to create the internet of robotic things. Industrial internet of thing is a subset of industry 4.0. It can encourage smartness at a bigger level in industrial robots.
\medskip
\subsubsection{Daghottra, Ankita, and Divya Jain. "From Humans to Robots: Machine Learning for Healthcare." (2021).}
see \cite{daghottra2021humans}
Machine learning is a branch of artificial intelligence (AI) through which identification of patterns in data is done and with help of these patterns, useful outcomes or conclusions are predicted. One of the most prominent or frequently studied applications of machine learning is the surgical phase or robotic surgery. This makes machine learning an important part of research in robotics. The implementation of this technology in the field of healthcare aims in improving medical practices resulting in more precise and advanced surgical assessments. This paper aims in outlining the implementation and applications of machine learning related to robotics in the field of healthcare. Machine learning aims in generating positive outcomes with assumptions. The objective of this paper is to bring light on how these technologies have become an important part of providing more effective and comprehensive strategies which eventually add to positive patient outcomes and more advanced healthcare practices.
\medskip
\subsubsection{Porkodi, S., and D. Kesavaraja. "Healthcare Robots Enabled with IoT and Artificial Intelligence for Elderly Patients." AI and IoT‐Based Intelligent Automation in Robotics (2021): 87-108.}
see \cite{porkodi2021healthcare}
As the demand for doctors is increasing day by day, a need has arisen to provide personalized healthcare for elderly patients and those with chronic conditions in addition to taking necessary actions during emergency situations. So, healthcare in the digital era is experimenting with adopting robotics to provide personal healthcare to patients in need. In this chapter, the needs of elderly patients are identified and solutions are provided with a personalized robot. Emergency situations can also be predicted more quickly with the vital information provided by IoT devices and necessary action can be suggested using artificial intelligence. IoT-based wearable devices are used to obtain necessary health data from patients. These data are processed and decision-making is carried out by AI, whereas the required action is taken by the designed robot. Humanoid robots can be designed for providing healthcare and physical assistance to elderly patients and those with chronic conditions. Animal-like robots can also be designed that act like pets as a solution for those with psychosocial issues. The major goal is to review robots to develop a robot in the future that can prevent interventions, perform multiple functions, provide motivational interaction style, provide better educational data, and alert an ambulance in case of an emergency.
\medskip
\subsubsection{Luo, Shi, Xi Zhou, Xinyue Tang, Jialu Li, Dacheng Wei, Guojun Tai, Zongyong Chen et al. "Microconformal electrode-dielectric integration for flexible ultrasensitive robotic tactile sensing." Nano Energy 80 (2021): 105580.}
see \cite{luo2021microconformal}
Flexible pressure sensors have attracted a lot of interest because of their widespread applications in healthcare, robotics, wearable smart devices, and human-machine interfaces. While microstructuring both the electrodes and dielectrics has been proven to have a significant improvement in the sensitivity and response speed of piezocapacitive sensors, the synergetic influence of microstructured electrodes and dielectrics has not been discussed yet. Herein, a flexible piezocapacitive sensor has been demonstrated with a microstructured graphene nanowalls (GNWs) electrode and a conformally microstructured dielectric layer that consists of polydimethylsiloxane (PDMS) and piezoelectric enhancer of zinc oxide (ZnO). Such microstructured assembly with piezoelectric film constructs a microconformal GNWs/PDMS/ZnO electrode-dielectric integration (MEDI), which can effectively enhance the sensitivity and the pressure-response range. The piezocapacitive sensor exhibits an ultra-high sensitivity (22.3 kPa-1), fast response speed (25 ms), and broad pressure range (22 kPa). The finite element analysis indicates that the polarized electric field caused by the ZnO film’s piezoelectric effect greatly enhances the capacitance of the sensor. Moreover, the integration of the electrode and dielectric layer can eliminate the slippage between contiguous layers, which effectively increases the mechanical stability. Benefitting from the outstanding comprehensive performance, the potential application in robotic tactile perception has been successfully demonstrated, including object grabbing, braille recognition, and roughness detection. The MEDI in structure capacitive sensors provides a new approach to achieve high-performance E-skin, which delivers great potential applications in nextgeneration robotic tactile sensing.
\medskip
\subsubsection{Lestingi, Livia, Mehrnoosh Askarpour, Marcello M. Bersani, and Matteo Rossi. "Formal verification of human-robot interaction in healthcare scenarios." In International Conference on Software Engineering and Formal Methods, pp. 303-324. Springer, Cham, 2020.}
see \cite{lestingi2020formal}
We present a model-driven approach for the creation of formally verified scenarios involving human-robot interaction in healthcare settings. The work offers an innovative take on the application of formal methods to human modeling, as it incorporates physiology-related aspects. The model, based on the formalism of Hybrid Automata, includes a stochastic component to capture the variability of human behavior, which makes it suitable for Statistical Model Checking. The toolchain is meant to be accessible to a wide range of professional figures. Therefore, we have laid out a user-friendly representation format for the scenario, from which the full formal model is automatically generated and verified through the Uppaal tool. The outcome is an estimation of the probability of success of the mission, based on which the user can refine the model if the result is not satisfactory.
\medskip
\subsubsection{Cifuentes, Carlos A., Maria J. Pinto, Nathalia Céspedes, and Marcela Múnera. "Social robots in therapy and care." Current Robotics Reports (2020): 1-16.}
see \cite{cifuentes2020social}
Purpose of Review
This work presents a comprehensive overview of social robots in therapy and the healthcare of children, adults, and elderly populations. According to recent evidence in this field, the primary outcomes and limitations are highlighted. This review points out the implications and requirements for the proper deployment of social robots in therapy and healthcare scenarios.
Recent Findings
Social robots are a current trend that is being studied in different healthcare services. Evidence highlights the potential and favorable results due to the support and assistance provided by social robots. However, some side effects and limitations are still under research.
Summary
Social robots can play various roles in the area of health and well-being. However, further studies regarding the acceptability and perception are still required. There are challenges to be addressed, such as improvements in the functionality and robustness of these robotic systems.
\medskip
\subsubsection{James, Jesin, B. T. Balamurali, Catherine I. Watson, and Bruce MacDonald. "Empathetic Speech Synthesis and Testing for Healthcare Robots." International Journal of Social Robotics (2020): 1-19.}
see \cite{james2020empathetic}
One of the major factors that affect the acceptance of robots in Human-Robot Interaction applications is the type of voice with which they interact with humans. The robot’s voice can be used to express empathy, which is an affective response of the robot to the human user. In this study, the aim is to find out if social robots with empathetic voice are acceptable for users in healthcare applications. A pilot study using an empathetic voice spoken by a voice actor was conducted. Only prosody in speech is used to express empathy here, without any visual cues. Also, the emotions needed for an empathetic voice are identified. It was found that the emotions needed are not only the stronger primary emotions, but also the nuanced secondary emotions. These emotions are then synthesised using prosody modelling. A second study, replicating the pilot test is conducted using the synthesised voices to investigate if empathy is perceived from the synthetic voice as well. This paper reports the modelling and synthesises of an empathetic voice, and experimentally shows that people prefer empathetic voice for healthcare robots. The results can be further used to develop empathetic social robots, that can improve people’s acceptance of social robots.
\medskip
\subsubsection{Attanasio, Aleks, Bruno Scaglioni, Elena De Momi, Paolo Fiorini, and Pietro Valdastri. "Autonomy in surgical robotics." Annual Review of Control, Robotics, and Autonomous Systems 4 (2021): 651-679.}
see \cite{attanasio2021autonomy}
This review examines the dichotomy between automatic and autonomous behaviors in surgical robots, maps the possible levels of autonomy of these robots, and describes the primary enabling technologies that are driving research in this field. It is organized in five main sections that cover increasing levels of autonomy. At level 0, where the bulk of commercial platforms are, the robot has no decision autonomy. At level 1, the robot can provide cognitive and physical assistance to the surgeon, while at level 2, it can autonomously perform a surgical task. Level 3 comes with conditional autonomy, enabling the robot to plan a task and update planning during execution. Finally, robots at level 4 can plan and execute a sequence of surgical tasks autonomously.
\medskip
\subsubsection{Narejo, Ghous Bakhsh. "Robotics and Machine Learning." In Privacy Vulnerabilities and Data Security Challenges in the IoT, pp. 183-198. CRC Press, 2020.}
see \cite{narejo2020robotics}
Robotics can be described as an interdisciplinary branch of engineering that may include mechanical, electronics, information, computer, and other engineering faculties. The main purpose of robotics is to deal with the design, construction, operation, and use of robots, as well as computer devices for their control, feedback, and the processing of data.
\medskip
\subsubsection{Rahangdale, Swapnil, Dezi Maind, Sakshi Amle, Komal Yadav, Pradip Dhore, Pooja Gajbhiye, and Vaibhav Rasekar. "Meher (Medical Help Robo)." Annals of the Romanian Society for Cell Biology (2020): 298-307.}
see \cite{rahangdale2020meher}
The main objective of this project is tofabricate a robotic trolley for material handling inindustries. In this project a robotic vehicle is fabricated which runs like a car by carrying necessary tools from one place toanother. The motor is connected with the wheel.When the trolley is loaded with a tool or some other goodsit can be easily move to the place as per need by means ofwireless remote controller .It can be used in industries, hospitals etc. This paper describes the evolving role of robotics in healthcare and allied areas with special concerns relating to the management and control of the spread of the viral or contagious diseases. The prime utilization of such robots is to minimize person-to-person contact and to ensure cleaning, sterilization and support in hospitals and similar facilities such as quarantine. This will result in minimizing the life threat to medical staff and doctors taking an active role in the management of such diseases. The intention of the present research is to highlight the importance of medical robotics in general and then to connect its utilization with the perspective of viral disease management so that the hospital management can direct themselves to maximize the use of medical robots for various medical procedures. This is despite the popularity of telemedicine, which is also effective in similar situations.
\medskip
\subsubsection{Ruby, J., Susan Daenke, Xianpei Li, J. Tisa, William Harry, J. Nedumaan, Mingmin Pan, J. Lepika, Thomas Binford, and PS Jagadeesh Kumar. "Integrating medical robots for brain surgical applications." J Med Surg 5, no. 1 (2020): 1-14.}
see \cite{ruby2020integrating}
Neurosurgery has customarily been at the cutting edge of propelling innovations, adjusting new strategies and gadgets effectively with an end goal to build the security and viability of brain surgery procedures. Among these adjustments is the surgical robot technology. This paper features a portion of the all the more encouraging frameworks in neurosurgical robotics, integrating brain surgical robots being used and being advanced. The reason for this paper is twofold, to address the most encouraging models for neurosurgical applications, and to examine a portion of the entanglements of robotic neurosurgery given the exceptional framework of the brain. The utilization of robotic assistance and input direction on surgical operations could improve the specialization of the experts during the underlying period of the expectation to absorb information.
\medskip
\subsubsection{Petersen, Inga Lypunova, Weronika Nowakowska, Christian Ulrich, and Lotte NS Andreasen Struijk. "A Novel sEMG Triggered FES-Hybrid Robotic Lower Limb Rehabilitation System for Stroke Patients." IEEE Transactions on Medical Robotics and Bionics 2, no. 4 (2020): 631-638.} see \cite{petersen2020novel}
Stroke is a leading cause of acquired disability among adults. Current rehabilitation programs result in only partial recovery of motor ability for the patients, which has resulted in an ongoing search for methods to improve the rehabilitation approaches. Therefore, this study presents a novel method for early onset of active rehabilitation by combining an end effector robot with surface electromyography (sEMG) triggered functional electrical stimulation (FES) of rectus femoris and tibialis anterior muscles. This rehabilitation system was demonstrated in 10 able-bodied experimental participants. Defining a successful exercise repetition as a fully completed exercise, from start point to end point followed by a return to start point, when FES onset is triggered by the EMG threshold, the results showed that 97 \% of the exercise repetitions were successful for a leg press exercise and 100 \% for a dorsiflexion exercise. Furthermore, an FES stimulation current amplitude of 20–53 mA was required for the leg press exercise and 10–30 mA for the dorsiflexion exercise.
\medskip
\subsubsection{Atallah, Asa B., and Sam Atallah. "Cloud Computing for Robotics and Surgery." In Digital Surgery, pp. 37-58. Springer, Cham, 2021.}
see \cite{atallah2021cloud}
This chapter is intended to be an introduction to cloud computing for surgeons and noncomputer scientists. In addition to presenting a modern history of the cloud, it explores theoretical concepts of applying cloud computer systems to next-generation medical robots and operating room infrastructures. It explains how the cloud is suited for high-scale computational tasks necessary for the integration of artificial intelligence and machine learning into tomorrow’s surgical suite and how it will provide a framework for digital surgery. Machine learning via the cloud versus single machine learning is also addressed.
\medskip
\subsubsection{Mathis-Ullrich, F., and P. M. Scheikl. "Robots in the operating room-(co) operation during surgery." Der Gastroenterologe: Zeitschrift fur Gastroenterologie und Hepatologie (2020): 1-8.}
see \cite{mathis2020robots}
Background
Medical robotics has the potential to improve surgical and endoluminal procedures by enabling high-precision movements and superhuman perception.
Objectives
To present historical, existing and future robotic assistants for surgery and to highlight their characteristics and advantages for keyhole surgery and endoscopy.
Methods
In particular, historical medical robots and conventional telemanipulators are presented and compared with minimally invasive continuum robots and novel robotic concepts from practice and research. In addition, a perspective for future generations of surgical and endoluminal robots is offered.
Conclusion
Robot-assisted medicine offers great added value for quality of intervention as well as safety for surgeons and patients. In the future, more surgical steps will be performed (semi-)autonomously and in cooperation with the experts.
\medskip
\subsubsection{Dey, Sharmita, Takashi Yoshida, Robert H. Foerster, Michael Ernst, Thomas Schmalz, and Arndt F. Schilling. "Continuous Prediction of Joint Angular Positions and Moments: A Potential Control Strategy for Active Knee-Ankle Prostheses." IEEE Transactions on Medical Robotics and Bionics 2, no. 3 (2020): 347-355.}
see \cite{dey2020continuous}
Transfemoral amputation substantially impairs locomotion. To restore the lost locomotive capability, amputees rely on knee-ankle prostheses. Theoretically, active knee-ankle prostheses may better support natural gait than their passive counterparts by replacing the missing muscle function. The control algorithms of such active devices need to comprehend the user's locomotive intention and convert them into control commands for actuating the prosthesis. For an active knee-ankle prosthesis, the gait variables to be controlled to allow the desired locomotion could be the knee angle, knee moment, ankle angle, ankle moment. In this paper, a random forest regression model is employed for the continuous prediction of these gait variables for level ground walking at self-selected normal speed. Experimentally obtained thigh kinematics were the input to the random forest model. The proposed method could predict the angles and moments of the knee and ankle with high accuracy (mean R2 value of 0.97 for ankle angle, 0.98 for ankle moment, 0.99 for knee angle, 0.95 for knee moment across four able-bodied subjects). The proposed strategy shows potential for continuously controlling an active knee-ankle prosthesis for transfemoral amputees, whose thigh angular motion can be used to infer the required prosthetic moments or angles
\medskip
\subsubsection{Leporini, Alice, Elettra Oleari, Carmela Landolfo, Alberto Sanna, Alessandro Larcher, Giorgio Gandaglia, Nicola Fossati et al. "Technical and functional validation of a teleoperated multirobots platform for minimally invasive surgery." IEEE Transactions on Medical Robotics and Bionics 2, no. 2 (2020): 148-156.}
see \cite{leporini2020technical}
Nowadays Robotic assisted Minimally Invasive Surgeries (R-MIS) are the elective procedures for treating highly accurate and scarcely invasive pathologies, thanks to their ability to empower surgeons' dexterity and skills. The research on new Multi-Robots Surgery (MRS) platform is cardinal to the development of a new SARAS surgical robotic platform, which aims at carrying out autonomously the assistants tasks during R-MIS procedures. In this work, we will present the SARAS MRS platform validation protocol, framed in order to assess: (i) its technical performances in purely dexterity exercises, and (ii) its functional performances. The results obtained show a prototype able to put the users in the condition of accomplishing the tasks requested (both dexterity- and surgical-related), even with reasonably lower performances respect to the industrial standard. The main aspects on which further improvements are needed result to be the stability of the end effectors, the depth perception and the vision systems, to be enriched with dedicated virtual fixtures. The SARAS' aim is to reduce the main surgeon's workload through the automation of assistive tasks which would benefit both surgeons and patients by facilitating the surgery and reducing the operation time.
\medskip
\subsubsection{Perez-Guagnelli, Eduardo, Joanna Jones, Ahmet H. Tokel, Nicolas Herzig, Bryn Jones, Shuhei Miyashita, and Dana D. Damian. "Characterization, simulation and control of a soft helical pneumatic implantable robot for tissue regeneration." IEEE Transactions on Medical Robotics and Bionics 2, no. 1 (2020): 94-103.}
see \cite{perez2020characterization}
Therapies for tissue repair and regeneration have remained sub-optimal, with limited approaches investigated to improve their effectiveness, dynamic and control response. We introduce a Soft Pneumatic Helically-Interlayered Actuator (SoPHIA) for tissue repair and regeneration of tubular tissues. The actuator features shape configurability in two and three dimensions for minimal or non-invasive in vivo implantation; multi-modal therapy to apply mechanical stimulation axially and radially, in accordance to the anatomy of tubular tissues; and anti-buckling structural strength. We present a model and characteristics of this soft actuator. SoPHIA reaches up to 36.3 \% of elongation with respect to its initial height and up to 7 N of force when pressurized at 38 kPa against anatomically-realistic spatial constraints. Furthermore, we introduce the capabilities of a physical in vivo simulator of biological tissue stiffness and growth, for the evaluation of the soft actuator in physiologically-relevant conditions. Lastly, we propose a model-based multi-stage control of the axial elongation of the actuator according to the tissue's physiological response. SoPHIA has the potential to reduce the invasiveness of surgical interventions and increase the effectiveness in growing tissue due to its mechanically compliant, configurable and multi-modal design.
\medskip
\subsubsection{Trefry, Elizabeth, and Tennille Gifford. "The Implications of Robots in Health Care."}
see \cite{trefryimplications}
This paper will discuss the historical differences in education for healthcare staff and the evolution of training and preparing the healthcare worker using technology in today’s healthcare environment. The trainee no longer has to rely on their ability to pretend certain circumstances are occurring, the scenario can be created using technology, artificial intelligence and virtual reality. The conclusion of the paper will review the benefits and risks of relying on technology versus skills learned “in the real world”.
\medskip
\subsubsection{Bayro-Corrochano, Eduardo. "Geometric Computing for Minimal Invasive Surgery." In Geometric Algebra Applications Vol. II, pp. 565-583. Springer, Cham, 2020.}
see \cite{bayro2020geometric}
In this chapter, we show the treatment of a variety of tasks of medical robotics handled using a powerful, non-redundant coefficient geometric language. This chapter is based on our previous works [1, 2]. You will see how we can treat the representation and modeling using geometric primitives like points, lines, and spheres. The screw and motors are used for interpolation, grasping, holding, object manipulation, and surgical maneuvering. We use geometric algebra algorithms in three scenarios: the virtual world for surgical planning, the haptic interface to command the robot arms, and the visually guided robot arms system for operation of ultrasound scanning and surgery. Note that in this work, we do not present a complete system for computer-aided surgery, here we illustrate the application of geometric algebra algorithms for some relevant tasks in minimal invasive surgery.
\medskip
\subsubsection{Sharon, Yarden, Anthony M. Jarc, Thomas S. Lendvay, and Ilana Nisky. "Rate of Orientation Change as a New Metric for Robot-Assisted and Open Surgical Skill Evaluation." IEEE Transactions on Medical Robotics and Bionics 3, no. 2 (2021): 414-425.}
see \cite{sharon2021rate}
Surgeons’ technical skill directly impacts patient outcomes. To date, the angular motion of the instruments has been largely overlooked in objective skill evaluation. To fill this gap, we have developed metrics for surgical skill evaluation that are based on the orientation of surgical instruments. We tested our new metrics on two datasets with different conditions: (1) a dataset of experienced robotic surgeons and nonmedical users performing needle-driving on a dry lab model, and (2) a small dataset of suturing movements performed by surgeons training on a porcine model. We evaluated the performance of our new metrics (angular displacement and the rate of orientation change) alongside the performances of classical metrics (task time and path length). We calculated each metric on different segments of the movement. Our results highlighted the importance of segmentation rather than calculating the metrics on the entire movement. Our new metric, the rate of orientation change, showed statistically significant differences between experienced surgeons and nonmedical users / novice surgeons, which were consistent with the classical task time metric. The rate of orientation change captures technical aspects that are taught during surgeons’ training, and together with classical metrics can lead to a more comprehensive discrimination of skills.
\medskip
\subsubsection{Boehler, Quentin, David S. Gage, Phyllis Hofmann, Alexandra Gehring, Christophe Chautems, Donat R. Spahn, Peter Biro, and Bradley J. Nelson. "REALITI: A robotic endoscope automated via laryngeal imaging for tracheal intubation." IEEE Transactions on Medical Robotics and Bionics 2, no. 2 (2020): 157-164.}
see \cite{boehler2020realiti}
Tracheal intubation is considered the gold standard to secure the airway of patients in need of respiratory assistance, yet this procedure relies on the dexterity and experience of the physician to correctly place a tracheal tube into the patient's trachea. Such a complex procedure may greatly benefit from robotic assistance in order to make the intubation safer and more efficient. We developed the first device to provide such assistance, the REALITI, which stands for Robotic Endoscope Automated via Laryngeal Imaging for Tracheal Intubation. This device allies the automated detection of key anatomical features in an endoscopic image to the robotic steering toward the recognized features in the task of guiding the tracheal tube into its correct position. The pre-clinical prototype presented in this paper has been developed to perform in vitro tracheal inbutation on a standard airway management training manikin. We performed a robust detection of anatomical features to steer the endoscope in a visual servoing fashion. Our prototype has been successfully used to perform automated and manual insertions into the trachea of an airway manikin.
\medskip
\subsubsection{Langlois, Kevin, David Rodriguez-Cianca, Ben Serrien, Joris De Winter, Tom Verstraten, Carlos Rodriguez-Guerrero, Bram Vanderborght, and Dirk Lefeber. "Investigating the Effects of Strapping Pressure on Human-Robot Interface Dynamics Using a Soft Robotic Cuff." IEEE Transactions on Medical Robotics and Bionics 3, no. 1 (2020): 146-155.}
see \cite{langlois2020investigating}
Physical human-robot interfaces are a challenging aspect of exoskeleton design, mainly due to the fact that interfaces tend to migrate relatively to the body leading to discomfort and power losses. Therefore, the key is to develop interfaces that optimize attachment stiffness, i.e., reduce relative motion, without compromising comfort. To that end, we propose a method to obtain the optimal attachment pressure in terms of connection stiffness and comfort. The method is based on a soft robotic interface capable of actively controlling strapping pressure which is coupled to a cobot. Hereby the effects of strapping pressure on energetic losses, connection stiffness, and perceived comfort are analyzed. Results indicate a trade-off between connection stiffness and perceived comfort for this type of interface. An optimal strapping pressure was found in the 50 to 80 mmHg range. Connection stiffness was found to increase linearly over a pressure range from 0 mmHg (stiffness of 1139 N/m) to 100 mmHg (stiffness of 2232 N/m). And energetic losses were reduced by 42\% by increasing connection stiffness. This research highlights the importance of strapping pressure when attaching an exoskeleton to a human and introduces a new adaptive interface to improve the coupling from an exoskeleton to an individual
\medskip
\subsubsection{Wang, Chao, Hao Zhang, Lu Zhang, Meng Kong, Kai Zhu, Chuan‐li Zhou, and Xue‐xiao Ma. "Accuracy and deviation analysis of robot‐assisted spinal implants: A retrospective overview of 105 cases and preliminary comparison to open freehand surgery in lumbar spondylolisthesis." The International Journal of Medical Robotics and Computer Assisted Surgery (2021): e2273.}
see \cite{wang2021accuracy}
Background
Whether the accuracy of robot-assisted spinal screw placement is significantly higher than that of freehand and the source of robotic deviation remain unclear.
Methods
Clinical data of 105 patients who underwent robot-assisted spinal surgery was collected, and screw accuracy was evaluated by computed tomography according to the modified Gertzbein–Robbins classification. Patients were grouped by percutaneous and open surgery. Intergroup comparisons of clinical and screw accuracy parameters were performed. Reasons for deviation were determined. Thirty-one patients with lumbar spondylolisthesis undergoing open robot-assisted surgery and the same number of patients treated by open freehand surgery were compared for screw accuracy.
Results
Screw accuracy was not significantly different between the percutaneous and open groups in both intra- and postoperative evaluations. Tool skiving was identified as the main cause of deviation. The proportion of malpositioned screws (grade B + C + D) was significantly higher in the freehand group than in the robot-assisted group. However, remarkably malpositioned (grade C + D) screws showed no significant differences between the groups. No revision surgery was necessary.
Conclusions
Robot-assisted spinal instrumentation manifests high accuracy and low incidence of nerve injury. Tool skiving is a major cause of implant deviation.
\medskip
\subsubsection{Kastritsi, Theodora, and Zoe Doulgeri. "A Controller to Impose a RCM for Hands-on Robotic-Assisted Minimally Invasive Surgery." IEEE Transactions on Medical Robotics and Bionics 3, no. 2 (2021): 392-401.}
see \cite{kastritsi2021controller}
In Robotic-Assisted Minimally Invasive Surgery a long and thin instrument attached to the robotic arm enters the human body through a tiny incision. To ensure that no injury occurs when the surgeon is manipulating the instrument, the incision point must be a remote center of motion (RCM) for the instrument. For this purpose, a novel target admittance model is designed in the joint space for hands-on procedures that can be applied in all commercially available general-purpose manipulators with six or more degrees of freedom. It is proved that the joint reference trajectories generated by the proposed target admittance model under the exertion of a human force are stable and satisfy the RCM constraint. The measurements of the human force and the robot’s forward kinematic model are only required. Its use spans all hands-on surgical procedures. The proposed model can be easily extended to achieve additional objectives. Simulation results validate the theoretical findings and experimental results utilizing a KUKA LWR4+ demonstrate that trocar displacements are less than 1mm.
\medskip
\subsubsection{Pradhan, Bikash, Deepti Bharti, Sumit Chakravarty, Sirsendu S. Ray, Vera V. Voinova, Anton P. Bonartsev, and Kunal Pal. "Internet of Things and Robotics in Transforming Current-Day Healthcare Services." Journal of Healthcare Engineering 2021 (2021).}
see \cite{pradhan2021internet}
Technology has become an integral part of everyday lives. Recent years have witnessed advancement in technology with a wide range of applications in healthcare. However, the use of the Internet of Things (IoT) and robotics are yet to see substantial growth in terms of its acceptability in healthcare applications. The current study has discussed the role of the aforesaid technology in transforming healthcare services. The study also presented various functionalities of the ideal IoT-aided robotic systems and their importance in healthcare applications. Furthermore, the study focused on the application of the IoT and robotics in providing healthcare services such as rehabilitation, assistive surgery, elderly care, and prosthetics. Recent developments, current status, limitations, and challenges in the aforesaid area have been presented in detail. The study also discusses the role and applications of the aforementioned technology in managing the current pandemic of COVID-19. A comprehensive knowledge has been provided on the prospect of the functionality, application, challenges, and future scope of the IoT-aided robotic system in healthcare services. This will help the future researcher to make an inclusive idea on the use of the said technology in improving the healthcare services in the future.
\medskip
\subsubsection{Xu, Yongkang, Hongqiang Zhao, Xuanyi Zhou, Salih Ertug Ovur, and Ting Xia. "Energy Management for Medical Rescue Robot." In 2020 5th International Conference on Advanced Robotics and Mechatronics (ICARM), pp. 44-49. IEEE, 2020.}
see \cite{xu2020energy}
This paper mainly centers on the energy management of the medical rescue mobile robot with different payloads in the uncertain road conditions. Efficient energy usage is a crucial issue because energy consumption will increase with the application and expansion of robotics. The power consumption of the robot system is affected by the task and its operating environment. This paper designs a six-wheel-drive medical rescue mobile robot using a hybrid system. Aiming at the wheel driving conditions of mobile robots, a power-adaptation control strategy is proposed, and its structure and driving form is introduced. Firstly, based on a power-adaptation control strategy, a nonlinear model predictive control (NMPC) algorithm is proposed to optimize the power adaptive control strategy. Non-linear model predictive control uses a non-linear model to represent the medical rescue robot model and external characteristic constraints. Secondly, the AVL CRUISE module is used to build a dynamic model of a medical rescue mobile robot, and it is jointly simulated with MATLAB/Simulink to simulate the energy consumption of a medical rescue robot under Urban Driving Cycle (UDC) standard operating conditions. Finally, the NMPC method is used to solve the system and compared with the power-adaptation control strategy. The simulation results show that compared with the power adaptive control strategy, the fuel consumption of combustion engine of the NMPC is improved by 26.4\%.
\medskip
\subsubsection{Su, Baiquan, Shi Yu, Xintong Li, Yi Gong, Han Li, Zifeng Ren, Yijing Xia et al. "Autonomous Robot for Removing Superficial Traumatic Blood." IEEE Journal of Translational Engineering in Health and Medicine 9 (2021): 1-9.}
see \cite{su2021autonomous}
Objective: To remove blood from an incision and find the incision spot is a key task during surgery, or else over discharge of blood will endanger a patient's life. However, the repetitive manual blood removal involves plenty of workload contributing fatigue of surgeons. Thus, it is valuable to design a robotic system which can automatically remove blood on the incision surface. Methods: In this paper, we design a robotic system to fulfill the surgical task of the blood removal. The system consists of a pair of dual cameras, a 6-DoF robotic arm, an aspirator whose handle is fixed to a robotic arm, and a pump connected to the aspirator. Further, a path-planning algorithm is designed to generate a path, which the aspirator tip should follow to remove blood. Results: In a group of simulating bleeding experiments on ex vivo porcine tissue, the contour of the blood region is detected, and the reconstructed spatial coordinates of the detected blood contour is obtained afterward. The BRR robot cleans thoroughly the blood running out the incision. Conclusions: This study contributes the first result on designing an autonomous blood removal medical robot. The skill of the surgical blood removal operation, which is manually operated by surgeons nowadays, is alternatively grasped by the proposed BRR medical robot.
\medskip
\subsubsection{Avila-Tomás, J. F., M. A. Mayer-Pujadas, and V. J. Quesada-Varela. "Artificial intelligence and its applications in medicine I: introductory background to AI and robotics." Atencion Primaria 52, no. 10 (2020): 778-784.}
see \cite{avila2020artificial}
Technology and medicine follow a parallel path during the last decades. Technological advances are changing the concept of health and health needs are influencing the development of technology. Artificial intelligence (AI) is made up of a series of sufficiently trained logical algorithms from which machines are capable of making decisions for specific cases based on general rules. This technology has applications in the diagnosis and follow-up of patients with an individualized prognostic evaluation of them. Furthermore, if we combine this technology with robotics, we can create intelligent machines that make more efficient diagnostic proposals in their work. Therefore, AI is going to be a technology present in our daily work through machines or computer programs, which in a more or less transparent way for the user, will become a daily reality in health processes. Health professionals have to know this technology, its advantages and disadvantages, because it will be an integral part of our work. In these two articles we intend to give a basic vision of this technology adapted to doctors with a review of its history and evolution, its real applications at the present time and a vision of a future in which AI and Big Data will shape the personalized medicine that will characterize the 21st century.
\medskip
\subsubsection{Maglio, S., C. Park, S. Tognarelli, A. Menciassi, and E. T. Roche. "High fidelity physical organ simulators: from artificial to bio hybrid solutions." IEEE Transactions on Medical Robotics and Bionics (2021).}
see \cite{maglio2021high}
Over the past decade, there has been growing interest in high-fidelity simulation for medical applications leading to huge research efforts towards physical organ simulators with realistic representations of human organs. As this is a relatively young research field, this review aims to provide an insight into the current state of the art in high-fidelity physical organ simulators that are used for training purposes, as educational tools, for biomechanical studies, and for preclinical device testing. Motivated by a paucity of clear definitions and categorization of various simulators, we describe high-fidelity physical organ simulators in terms of their degree of representation of the anatomy, material properties, and physiological behavior of the target organs in the context of their applications. We highlight the traditional approaches for static organ simulators using synthetic materials, and diverse approaches for dynamic organ simulators including soft robotic, ex vivo , and biohybrid strategies to meet the ever-increasing demand for realistic anthropomorphic organ models. Finally, we discuss challenges and potential future avenues in the field of high-fidelity physical organ simulators.
\medskip
\subsubsection{Xia, Runzhi, Zhicheng Tong, Yi Hu, Keyu Kong, Xiulin Wu, and Huiwu Li. "“Skywalker” surgical robot for total knee arthroplasty: An experimental sawbone study." The International Journal of Medical Robotics and Computer Assisted Surgery (2021): e2292.}
see \cite{xia2021skywalker}
Background
Currently, robot-assisted surgical systems are used to reduce the error range of total knee arthroplasty (TKA) osteotomy and component positioning.
Methods
We used 20 sawbone models of the femur and 20 sawbone models of the tibia and fibula to evaluate the osteotomy effect of ‘Skywalker’ robot-assisted TKA.
Results
The maximal movement of the cutting jig was less than 0.25 mm at each osteotomy plane. The mean and standard deviation values of the angle deviation between the planned osteotomy plane and the actual osteotomy plane at each osteotomy plane were not more than 1.03° and 0.55°, respectively. The mean and standard deviation values of absolute error of resection thickness at each osteotomy position were less than 0.78 and 0.71 mm, respectively.
Conclusions
The ‘Skywalker’ system has good osteotomy accuracy, can achieve the planned osteotomy well and is expected to assist surgeons in performing accurate TKA in clinical applications in future.
\medskip
\subsubsection{Lan, Ning, Manzhao Hao, Chuanxin M. Niu, He Cui, Yu Wang, Ting Zhang, Peng Fang, and Chih-hong Chou. "Next-generation prosthetic hand: from biomimetic to biorealistic." Research 2021 (2021).}
see \cite{lan2021next}
Integrating a prosthetic hand to amputees with seamless neural compatibility presents a grand challenge to neuroscientists and neural engineers for more than half century. Mimicking anatomical structure or appearance of human hand does not lead to improved neural connectivity to the sensorimotor system of amputees. The functions of modern prosthetic hands do not match the dexterity of human hand due primarily to lack of sensory awareness and compliant actuation. Lately, progress in restoring sensory feedback has marked a significant step forward in improving neural continuity of sensory information from prosthetic hands to amputees. However, little effort has been made to replicate the compliant property of biological muscle when actuating prosthetic hands. Furthermore, a full-fledged biorealistic approach to designing prosthetic hands has not been contemplated in neuroprosthetic research. In this perspective article, we advance a novel view that a prosthetic hand can be integrated harmoniously with amputees only if neural compatibility to the sensorimotor system is achieved. Our ongoing research supports that the next-generation prosthetic hand must incorporate biologically realistic actuation, sensing, and reflex functions in order to fully attain neural compatibility.
\medskip
\subsubsection{Kunz, Christian, Michal Hlaváč, Max Schneider, Andrej Pala, Pit Henrich, Birgit Jickeli, Heinz Wörn, Björn Hein, Rainer Wirtz, and Franziska Mathis-Ullrich. "Autonomous Planning and Intraoperative Augmented Reality Navigation for Neurosurgery." IEEE Transactions on Medical Robotics and Bionics (2021).}
see \cite{kunz2021autonomous}
Neurosurgical interventions in the brain are challenging due to delicate anatomical structures. During surgery, precise navigation of surgical instruments supports surgeons and allows prevention of adverse events. Here, an augmented reality-based navigation aid with automated segmentation of risk structures and path planning is presented. Superimposed patient models are visualized during neurosurgical interventions on the example of the ventricular puncture. The proposed system is experimentally validated in a realistic operating room scenario with expert neurosurgeons to determine its quality of support as well as its potential for clinical translation. The automated segmentation reaches a F1-Score of 95-99\%. Paths are planned correctly in 93.4\%. The entire process enables navigation aid in under five minutes. Validation shows that the system allows for a puncture success rate of 81.7\% with mean accuracy of 4.8 ± 2.5 mm. A control group who performed the standard-of-care procedure reached a rate of 71.7\% with 6.5 ± 2.4 mm accuracy. Acceptability analysis shows that 85.7\% of the participating surgeons approve of the system’s convenience and 92.9\% expect accuracy improvement. The presented navigation aid for ventricular puncture enables automated surgical planning and may improve accuracy and success rates of neurosurgical interventions.
\medskip
\subsubsection{Satale, Kavita, Tanmayi Bhave, Chirag Chandak, and S. A. Patil. "Nursing Robot." (2020).}
see \cite{satale2020nursing}
This paper highlights the role of robotics in healthcare .The paper also focuses on the areas of management in the hospital and control of the spread of the novel coronavirus disease 2019 (COVID-19). The main intension of such robots is to minimize person-to-person contact and also to ensure cleaning, sterilization and support in hospitals and similar facilities such as quarantine. This will be useful for reducing the life threat to doctors and medical staff taking an active role in the management of the COVID-19 pandemic. The purpose of the present research on robot is to highlight the importance of medical robotics in general, and then to connect its utilization with the intension of COVID-19 management so that the hospital staff can direct themselves to maximize the use of medical robots for various medical procedures. This is despite the popularity of telemedicine robots, which are also effective in similar situations? Our proposed system will help nurses and doctors to supply medicines as well as food to infected patients
\medskip
\subsubsection{Zhang, Jing, Jiahui Qian, Han Zhang, Ling He, Bin Li, Jing Qin, Hongning Dai, Wei Tang, and Weidong Tian. "Maxillofacial surgical simulation system with haptic feedback." Journal of Industrial \& Management Optimization (2020).}
see \cite{zhang2020maxillofacial}
Due to the complexity of the maxillofacial surgery, the novice should be sufficiently trained before one is qualified to carry on the surgery. To reduce the training costs and improve the training efficiency, a virtual mandible surgical system with haptic feedback is proposed. This surgical simulation system offers users the haptic feedback while simulating maxillofacial surgery. An integrated model is introduced to optimize the system simulation process, which includes force output to a six-degree-of-freedom haptic device. Based on the anatomy structure of the bone tissue, a two-layer mechanism model is designed to balance the requirement of real-time response and the force feedback accuracy. Collision detection, force rendering, and grinding function are studied to simulate some essential operations: open reduction, osteotomy, and palate fixation. The proposed simulation platform can assist in the training and planning of these oral and maxillofacial surgeries. The fast response feature enables surgeons to design a patient-specific guide plate in real-time. Ten stomatology surgeons evaluated this surgical simulation system from the following four indexes: the level of immersion, user-friendliness, stability, and the effect of surgical training. The evaluation score is eight out of ten.
\medskip
\subsubsection{Caccianiga, Guido, Andrea Mariani, Elena De Momi, Gabriela Cantarero, and Jeremy D. Brown. "An evaluation of inanimate and virtual reality training for psychomotor skill development in robot-assisted minimally invasive surgery." IEEE Transactions on Medical Robotics and Bionics 2, no. 2 (2020): 118-129.}
see \cite{caccianiga2020evaluation}
Robot-assisted minimally invasive surgery (RAMIS) is gaining widespread adoption in many surgical specialties, despite the lack of a standardized training curriculum. Current training approaches rely heavily on virtual reality simulators, in particular for basic psychomotor and visuomotor skill development. It is not clear, however, whether training in virtual reality is equivalent to inanimate model training. In this manuscript, we seek to compare virtual reality training to inanimate model training, with regard to skill learning and skill transfer. Using a custom-developed needle-driving training task with inanimate and virtual analogs, we investigated the extent to which N=18 participants improved their skill on a given platform post-training, and transferred that skill to the opposite platform. Results indicate that the two approaches are not equivalent, with more salient skill transfer after inanimate training than virtual training. These findings support the claim that training with real physical models is the gold standard, and suggest more inanimate model training be incorporated into training curricula for early psychomotor skill development.
\medskip
\subsubsection{Su, Hang, Yunus Schmirander, Sarah Elena Valderrama-Hincapie, Jairo Pinedo, Xuanyi Zhou, Jiehao Li, Longbin Zhang, Yingbai Hu, Giancarlo Ferrigno, and Elena De Momi. "Asymmetric bimanual control of dual-arm serial manipulator for robot-assisted minimally invasive surgeries." (2020): 1223-1233.}
see \cite{su2020asymmetric}
Robotic assistance is promising for improving minimally invasive surgery (MIS). This work presents asymmetric bimanual control of a dual-arm serial robot with two remote centers of motion (RCMs) constraints for MIS. In our previous works, general null space controllers to guarantee the fixed RCM constraint have been proposed. However, an incision on a patient’s abdominal wall is not fixed owing to the respiration of the patient, which generates an uncertain disturbance at the joints of robotic manipulators. To improve accuracy, a radial basis function neural network is implemented to adapt to these disturbances and control the end-effector position. Finally, the adaptive bimanual control strategy is validated through simulations based on clinical data. The proposed control shows improved accuracy in the end effector position for all the designed surgical tasks. In future works, the algorithm will be validated on an actual dual-arm serial robot making use of a body phantom.
\medskip
\subsubsection{De Rossi, Giacomo, Marco Minelli, Serena Roin, Fabio Falezza, Alessio Sozzi, Federica Ferraguti, Francesco Setti, Marcello Bonfè, Cristian Secchi, and Riccardo Muradore. "A First Evaluation of a Multi-Modal Learning System to Control Surgical Assistant Robots via Action Segmentation." IEEE Transactions on Medical Robotics and Bionics (2021).}
see \cite{de2021first}
The next stage for robotics development is to introduce autonomy and cooperation with human agents in tasks that require high levels of precision and/or that exert considerable physical strain. To guarantee the highest possible safety standards, the best approach is to devise a deterministic automaton that performs identically for each operation. Clearly, such approach inevitably fails to adapt itself to changing environments or different human companions. In a surgical scenario, the highest variability happens for the timing of different actions performed within the same phases. This paper presents a cognitive control architecture that uses a multi-modal neural network trained on a cooperative task performed by human surgeons and produces an action segmentation that provides the required timing for actions while maintaining full phase execution control via a deterministic Supervisory Controller and full execution safety by a velocity-constrained Model-Predictive Controller.
\medskip
\subsubsection{Thai, Mai Thanh, Phuoc Thien Phan, Trung Thien Hoang, Shing Wong, Nigel H. Lovell, and Thanh Nho Do. "Advanced intelligent systems for surgical robotics." Advanced Intelligent Systems 2, no. 8 (2020): 1900138.}
see \cite{thai2020advanced}
Surgical robots have had clinical use since the mid-1990s. Robot-assisted surgeries offer many benefits over the conventional approach including lower risk of infection and blood loss, shorter recovery, and an overall safer procedure for patients. The past few decades have shown many emerging surgical robotic platforms that can work in complex and confined channels of the internal human organs and improve the cognitive and physical skills of the surgeons during the operation. Advanced technologies for sensing, actuation, and intelligent control have enabled multiple surgical devices to simultaneously operate within the human body at low cost and with more efficiency. Despite advances, current surgical intervention systems are not able to execute autonomous tasks and make cognitive decisions that are analogous to those of humans. Herein, the historical development of surgery from conventional open to robotic-assisted approaches with discussion on the capabilities of advanced intelligent systems and devices that are currently implemented in existing surgical robotic systems is reviewed. Also, available autonomous surgical platforms are comprehensively discussed with comments on the essential technologies, existing challenges, and suggestions for the future development of intelligent robotic-assisted surgical systems toward the achievement of fully autonomous operation.
\medskip
\subsubsection{Watson, Connor, and Tania K. Morimoto. "Permanent magnet-based localization for growing robots in medical applications." IEEE Robotics and Automation Letters 5, no. 2 (2020): 2666-2673.}
see \cite{watson2020permanent}
Growing robots that achieve locomotion by extending from their tip, are inherently compliant and can safely navigate through constrained environments that prove challenging for traditional robots. However, the same compliance and tip-extension mechanism that enables this ability, also leads directly to challenges in their shape estimation and control. In this letter, we present a low-cost, wireless, permanent magnet-based method for localizing the tip of these robots. A permanent magnet is placed at the robot tip, and an array of magneto-inductive sensors is used to measure the change in magnetic field as the robot moves through its workspace. We develop an approach to localization that combines analytical and machine learning techniques and show that it outperforms existing methods. We also measure the position error over a 500mm × 500 mm workspace with different magnet sizes to show that this approach can accommodate growing robots of different scales. Lastly, we show that our localization method is suitable for tracking the tip of a growing robot by deploying a 12 mm robot through different, constrained environments. Our method achieves position and orientation errors of 3.0 ± 1.1 mm and 6.5 ±5.4° in the planar case and 4.3 ± 2.3 mm, 3.9 ±3.0°, and 3.8 ±3.5° in the 5-DOF setting.
\medskip
\subsubsection{Nawrat, Zbigniew. "MIS AI-artificial intelligence application in minimally invasive surgery." Mini-invasive Surgery 4 (2020).}
see \cite{nawrat2020mis}
This chapter is devoted towards analyzing the progress and barriers to the development of artificial intelligence (AI) and medical robotics in minimally-invasive surgery. The less invasive the surgical intervention and the further the surgeon is from the operating table, the greater the roles of decision support systems (AI) and performance of specific tasks (by medical robots).
\medskip
\subsubsection{Li, Mi, Ning Xi, Yuechao Wang, and Lianqing Liu. "Progress in nanorobotics for advancing biomedicine." IEEE Transactions on Biomedical Engineering 68, no. 1 (2020): 130-147.}
see \cite{li2020progress}
Nanorobotics, which has long been a fantasy in the realm of science fiction, is now a reality due to the considerable developments in diverse fields including chemistry, materials, physics, information and nanotechnology in the past decades. Not only different prototypes of nanorobots whose sizes are nanoscale are invented for various biomedical applications, but also robotic nanomanipulators which are able to handle nano-objects obtain substantial achievements for applications in biomedicine. The outstanding achievements in nanorobotics have significantly expanded the field of medical robotics and yielded novel insights into the underlying mechanisms guiding life activities, remarkably showing an emerging and promising way for advancing the diagnosis \& treatment level in the coming era of personalized precision medicine. In this review, the recent advances in nanorobotics (nanorobots, nanorobotic manipulations) for biomedical applications are summarized from several facets (including molecular machines, nanomotors, DNA nanorobotics, and robotic nanomanipulators), and the future perspectives are also presented.
\medskip
\subsubsection{Giesen, Luuk, Laurie Bax, and Jurgen Riedl. "Automated real-time 3D ultrasound mapping of vessels [3D-ULTRAMAN]."}
see \cite{giesenautomated}
The 3D Ultraman project is a collaboration between medical robotics company Vitestro and clinical laboratory Result Laboratorium.
During the project, real-time detection of arteries were developed, including a clinical ultrasound-force study as well as training and
evaluation of deep learning architectures. In a phase 2 project, these promising algorithms will be further developed and integrated in an
ATTRACT phase 2 project. This technology could be the basis for robotic devices autonomously performing a variety of vascular access
procedures, enabling new levels of automation in medicine.
\medskip
\subsubsection{Cortesão, Rui, and Luís Santos. "Noise Effects on Quaternion and Axis-Angle Representations in Robotics." IEEE Robotics and Automation Letters 6, no. 1 (2020): 64-71.}
see \cite{cortesao2020noise}
This letter provides a methodology to analyze noise sensitivity of quaternion and axis-angle representations in the presence of joint measurement noise. A general formulation based on the trace of the rotation matrix is proposed, enabling to compute noise sensitivity as a function of robot postures and noise variances. Additionally, noise sensitivity as a function of the orientation angle is derived, enabling to identify regions with different sensitivities. The theoretical findings are general and are not associated to any particular noise distribution. Simulation results with zero-mean Gaussian noise and real experiments are provided corroborating the theoretical findings.
\medskip
\subsubsection{Yang, Su, Hyuck‐Jun Yoon, Seyed Jamaleddin Mostafavi Yazdi, and Jong‐Ha Lee. "A novel automated lumen segmentation and classification algorithm for detection of irregular protrusion after stents deployment." The International Journal of Medical Robotics and Computer Assisted Surgery 16, no. 1 (2020): e2033.}
see \cite{yang2020novel}
Background
Clinically, irregular protrusions and blockages after stent deployment can lead to significant adverse outcomes such as thrombotic reocclusion or restenosis. In this study, we propose a novel fully automated method for irregular lumen segmentation and normal/abnormal lumen classification.
Methods
The proposed method consists of a lumen segmentation, feature extraction, and lumen classification. In total, 92 features were extracted to classify normal/abnormal lumen. The lumen classification method is a combination of supervised learning algorithm and feature selection that is a partition-membership filter method.
Results
As the results, our proposed lumen segmentation method obtained the average of dice similarity coefficient (DSC) and the accuracy of proposed features and the random forest (RF) for normal/abnormal lumen classification as 97.6\% and 98.2\%, respectively.
Conclusions
Therefore, we can lead to better understanding of the overall vascular status and help to determine cardiovascular diagnosis.
\medskip
\subsubsection{Hwang, Minho, Daniel Seita, Brijen Thananjeyan, Jeffrey Ichnowski, Samuel Paradis, Danyal Fer, Thomas Low, and Ken Goldberg. "Applying depth-sensing to automated surgical manipulation with a Da Vinci Robot." In 2020 International Symposium on Medical Robotics (ISMR), pp. 22-29. IEEE, 2020.}
see \cite{hwang2020applying}
Recent advances in depth-sensing have significantly increased accuracy, resolution, and frame rate, as shown in the 1920x1200 resolution and 13 frames per second Zivid RGBD camera. In this study, we explore the potential of depth sensing for efficient and reliable automation of surgical subtasks. We consider a monochrome (all red) version of the peg transfer task from the Fundamentals of Laparoscopic Surgery training suite implemented with the da Vinci Research Kit (dVRK). We use calibration techniques that allow the imprecise, cable-driven da Vinci to reduce error from 4-5mm to 1-2mm in the task space. We report experimental results for a handover-free version of the peg transfer task, performing 20 and 5 physical episodes with single- and bilateral-arm setups, respectively. Results over 236 and 49 total block transfer attempts for the single- and bilateral-arm peg transfer cases suggest that reliability can be attained with 86.9\% and 78.0\% for each individual block, with respective block transfer speeds of 10.02 and 5.72 seconds. Supplementary material is available at https://sites.google.com/view/peg-transfer.
\medskip
\subsubsection{Sheft, Maxina, Priya Kulkarni, Jiawei Ge, Hamed Saeidi, Justin D. Opfermann, Arjun Joshi, Martin Schnermann, and Axel Krieger. "Development and Error Analysis of a Novel Robotic System for Photodynamic Therapy." In 2020 International Symposium on Medical Robotics (ISMR), pp. 166-172. IEEE, 2020.}
see \cite{sheft2020development}
Photodynamic therapy has the potential to not only treat tumors directly but also to reduce incidental damage caused by large surgical margins and radiation therapy. In this study, a novel robotic system of delivering light was developed using a cartesian robot. Human input was limited to a computer input and no physical positioning of the light delivery system was required during testing. Error analysis was conducted to ensure the system's applicability to a clinical environment. Error involved in both the outlining and coverage of the targeted areas was examined. The average outlining error and standard deviation were 0.23 +/- 0.16mm, and the coverage time error was below 4\%. These results indicate that a robotic light delivery system for photodynamic therapy can consistently provide light delivery with sub-millimeter errors when testing with ex-vivo phantoms.
\medskip
\subsubsection{Li, Junyu, Yanming Fang, Zhao Jin, Yuchen Wang, and Miao Yu. "The impact of robot‐assisted spine surgeries on clinical outcomes: A systemic review and meta‐analysis." The International Journal of Medical Robotics and Computer Assisted Surgery 16, no. 6 (2020): 1-14.}
see \cite{li2020impact}
Background
Medical robotics has enabled a significant advancement in the field of modern spine surgery, especially in pedicle screw fixation. A plethora of studies focused on the accuracy of pedicle fixation in robotic-assisted (RA) technology. However, it is not clear whether RA techniques can improve patients' clinical outcomes.
Methods
We retrieved relevant studies that compare the differences between RA and freehand (FH) techniques in spine surgeries from the following databases: PubMed, Embase, Cochrane Library and Web of Science. The perioperative outcomes of this technology were measured with parameters including radiation exposure, operative time, the length of hospital stay, complication rates and revision rates. Two reviewers independently reviewed the studies in our sample, assessed their validity and extracted relevant data.
Conclusions
This study suggests that RA spine surgeries would result in fewer complications, a lower revision rate and shorter length of hospital stay. As the technology continues to evolve, we may expect more applications of robotic systems in spine surgeries.
\medskip
\subsubsection{Avinash, Apeksha, Alaa Eldin Abdelaal, and Septimiu E. Salcudean. "Evaluation of Increasing Camera Baseline on Depth Perception in Surgical Robotics." In 2020 IEEE International Conference on Robotics and Automation (ICRA), pp. 5509-5515. IEEE, 2020.}
see \cite{avinash2020evaluation}
In this paper, we evaluate the effect of increasing camera baselines on depth perception in robot-assisted surgery. Restricted by the diameter of the surgical trocar through which they are inserted, current clinical stereo endoscopes have a fixed baseline of 5.5 mm. To overcome this restriction, we propose using a stereoscopic "pickup" camera with a side-firing design that allows for larger baselines. We conducted a user study with baselines of 10 mm, 15 mm, 20 mm, and 30 mm to evaluate the effect of increasing baseline on depth perception when used with the da Vinci surgical system. Subjects (N=28) were recruited and asked to rank differently sized poles, mounted at a distance of 200 mm from the cameras, according to their increasing order of height when viewed under different baseline conditions. The results showed that subjects performed better as the baseline was increased with the best performance at a 20 mm baseline. This preliminary proof-of-concept study shows that there is opportunity to improve depth perception in robot-assisted surgical systems with a change in endoscope design philosophy. In this paper, we present this change with our side-firing "pickup" camera and its flexible baseline design. Ultimately, this serves as the first step towards an adaptive baseline camera design that maximizes depth perception in surgery.
\medskip
\subsubsection{Rosero, Hermes Fabian Vargas. "Robotics in surgeryand neurosurgery, applications and challenges, a review." Scientia et Technica 25, no. 3 (2020): 478-490.}
see \cite{rosero2020robotics}
The integration of robots in operating rooms aims to improve the performance and efficiency of various procedures, since it offers remarkable advantages over conventional procedures, in particular precision, hand shake filtering and the possibility of executing complex tasks, however, Considerable challenges still prevail affecting massification and maneuverability on the part of surgeons. In the present work a review of the current state of robotic surgery, the challenges and trends is carried out. Specifically, the need for optimal force feedback mechanisms is evidenced, as well as dynamic visualization through augmented reality or virtual reality. It is not yet possible to determine that robotic surgery has reached standards, however, the integration of alternative technologies will allow surgeons to improve not only the efficiency of the robot, but also of its operation by the surgeon
\medskip
\subsubsection{Sun, Y., J. A. Kim, M. Keshavarz, and A. Thompson. "Microrobots for Precision Medicine."}
see \cite{sunmicrorobots}
The project has visualised and simulated the microrobots behaviour in
response to an external magnetic field, and a 3‐D printed phantom is
going to be used as a replica of the targeted organs. Preliminary results
have also been demonstrated in this poster.
\medskip
\subsubsection{Avgousti, Sotiris, Eftychios G. Christoforou, Andreas S. Panayides, Panicos Masouras, Pierre Vieyres, and Constantinos S. Pattichis. "Robotic systems in current clinical practice." In 2020 IEEE 20th Mediterranean Electrotechnical Conference (MELECON), pp. 269-274. IEEE, 2020.}
see \cite{avgousti2020robotic}
Medical robotic systems are successfully employed in various surgical specialties today. Yet, a substantial number of remarkable systems that have been developed and piloted, have failed to reach commercialization and thus adoption in clinical practice. This is partly due to the strict regulatory requirements, which typically occupy a significant amount of the development time while incurring additional costs. Pertinent to regulatory approvals is the field of Human Factors, which plays a central role in the design of safe and efficient medical devices. This study briefly introduces the FDA regulatory approval process, discusses the role of human factors in the design process and highlights specific robotic systems that have obtained approval for clinical use. The purpose is to show the status of robotic technologies in relation to the current clinical practice.
\medskip
\subsubsection{Zhang, Jian, Weishi Li, Lei Hu, Yu Zhao, and Tianmiao Wang. "A robotic system for spine surgery positioning and pedicle screw placement." The International Journal of Medical Robotics and Computer Assisted Surgery (2021): e2262.}
see \cite{zhang2021robotic}
Background
In recent years, surgeons have explored minimally invasive methods of percutaneous pedicle screw implantation which can effectively reduce human injuries. This article presents an accurate and efficient positioning method and robot system for percutaneous needle placement under c-arm fluoroscopy.
Methods
A simple five degree of freedom (DOF) robot with a unique end-effector is designed to perform perspective calibration and image space registration. The principle of pedicle standard axis positioning is adopted to make the axis of the pedicle overlap with the x-ray axis of c-arm.
Results
Then the clinical operation is carried out to verify the clinical feasibility of the designed robot and positioning method. The experimental results show that a total of 26 pedicle screws were accurately implanted. The accuracy of Grade A is 96.15\%. The positioning time of a single guide pin is about 154.77 s, and three x-ray films need to be taken on average.
Conclusions
The positioning accuracy is increased by using the present method. In addition, this method is simple in operation, short in operation time, low in X-ray exposure.
\medskip
\subsubsection{Yang, Bo, Jian Huang, Xinxing Chen, Caihua Xiong, and Yasuhisa Hasegawa. "Supernumerary Robotic Limbs: A Review and Future Outlook." IEEE Transactions on Medical Robotics and Bionics (2021).}
see \cite{yang2021supernumerary}
Wearable robots have become a prevalent method in the field of human augmentation and medical rehabilitation. Typical wearable robots mainly include exoskeletons and prostheses. However, their functions are limited due to dedicated design. In recent years, Supernumerary Robotic Limbs (SRLs) have become a hot spot in the field of wearable robots. Different from exoskeletons and prostheses, SRLs compensate and strengthen human abilities by providing extra limbs. This advantage allows SRLs to assist users in a novel way, rather than substituting missing limbs or enhancing existing limbs. However, finding a trade-off between wearability, efficiency, and usability of those SRLs is still an issue. This paper presents the state of the art in SRLs and discusses some open questions about SRLs’ design and control for further research. This review covers the following areas: (1) Basic concepts and classifications of SRLs; (2) The literature retrieval methodology; (3) Design functions of different types of SRLs, including their positive and negative aspects; (4) Different control strategies of SRLs, including positive and negative aspects, and some improvement methods in applying SRLs; (5) The impact on human body schema while using SRLs; (6) Open challenges and suggestions for future development. This review will help researchers understand the current state of SRLs and provide comprehensive knowledge foundations for them.
\medskip
\subsubsection{Lacava, G., A. Marotta, F. Martinelli, A. Saracino, A. La Marra, E. Gil-Uriarte, and V. Mayoral Vilches. Current research issues on cyber security in robotics. Technical report, 2020.}
see \cite{lacava2020current}
Cyber Security in Robotics is a rapidly developing area which draws attention from
practitioners and researchers. In this paper we provided an overview of the key issues
arising in the cyber security robotic landscape and the threats affecting this sector. We
also analyzed the scientific approaches to managing cyber attacks in robotics. Finally, we
proposed directions for further advances in this area
\medskip
\subsubsection{Farokh Atashzar, S., Mahdi Tavakoli, Dario Farina, and Rajni V. Patel. "Autonomy and Intelligence in Neurorehabilitation Robotic and Prosthetic Technologies." (2020): 2002001.}
see \cite{farokh2020autonomy}
Neurorehabilitation robotic technologies and powered assistive prosthetic devices have shown great potential for accelerating motor recovery or compensating for the lost motor functions of disabled users. The functioning of these technologies relies on a highly-interactive bidirectional flow of information and physical energy between a human user and a robotic system. Thus, key factors are integrity, intelligence and quality of the interaction loops. As a result, research in this field has focused on (a) enhancing the quality and safety of the physical interaction between disabled users and robotic systems while providing a high level of intelligence and adaptability for generating assistive and therapeutic force fields; (b) detecting the user’s motor intention with high spatiotemporal resolution to provide bidirectional human–machine interfacing; (c) promoting mental engagement through designing multimodal interactive interfaces and various sensory manipulation strategies. This Special Issue has collected papers that contribute to these three research areas, highlighting the importance of different aspects in human–robot interaction loops for augmenting the performance of neurorehabilitation robotic systems and prosthetic devices.
\medskip
\subsubsection{Oetgen, Matthew E., Jody Litrenta, Bamshad Azizi Koutenaei, and Kevin R. Cleary. "A novel surgical navigation technology for placement of implants in slipped capital femoral epiphysis." The International Journal of Medical Robotics and Computer Assisted Surgery 16, no. 1 (2020): e2070.}
see \cite{oetgen2020novel}
Background
Fixation with a single screw is the recommended treatment for slipped capital femoral epiphysis (SCFE). Achieving optimal implant positioning can be difficult owing to the complex geometry of the proximal femur in SCFE. We assessed a novel navigation technology incorporating an inertial measurement unit to facilitate implant placement in an SCFE model.
Methods
Guidewires were placed into 30 SCFE models, using a navigation system that displayed the surgeon's projected implant trajectory simultaneously in multiple planes. The accuracy and the precision of the system were assessed as was the time to perform the procedure.
Results
Implants were placed an average of 5.3mm from the femoral head center, with a system precision of 0.94mm. The actual trajectory of the implant deviated from the planned trajectory by an average of 4.9°±2.2°. The total average procedure time was 97 seconds.
Conclusion
The use of computer-based navigation in a SCFE model demonstrated good accuracy and precision in terms of both implant trajectory and placement in the center of the femoral head.
\medskip
\subsubsection{Tolu, Gheorghe, Daniel Ghiculescu, and Miron Zapciu. "THE NONCONVENTIONAL SURGICAL SYSTEM DA VINCI." Revista de Tehnologii Neconventionale 24, no. 1 (2020): 39-43.}
see \cite{tolu2020nonconventional}
Intuitive Surgical is the pioneer and a global technology leader in robotic-assisted, minimally invasive surgery. The company develops, manufactures and markets the Surgical System da Vinci, the most complex device used in medical robotics. The product is called "da Vinci" because Leonardo da Vinci invented the first robot, and his works excel in anatomical details. The Surgical System da Vinci is a robotic platform that allows complex surgery through incisions of 1-2 cm. So far, hundreds of thousands of surgeries have been performed. The Surgical System da Vinci reproduces the surgeon's movements in real time. The advantages of using the Surgical System da Vinci, are greater precision, a much better picture and remote surgery. The disadvantages to traditional techniques are that it cannot be programmed and cannot make decisions on its own to make a surgical move or surgery without the surgeon's command. The Surgical System da Vinci is used currently in major medical centers around the world.
\medskip
\subsubsection{Martinez, Daniel Enrique, Waiman Meinhold, John Oshinski, Ai-Ping Hu, and Jun Ueda. "Resolution-Enhanced MRI-Guided Navigation of Spinal Cellular Injection Robot." In 2020 International Symposium on Medical Robotics (ISMR), pp. 83-88. IEEE, 2020.}
see \cite{martinez2020resolution}
This paper presents a method of navigating a surgical robot beyond the resolution of magnetic resonance imaging (MRI) by using a resolution enhancement technique enabled by high-precision piezoelectric actuation. The surgical robot was specifically designed for injecting stem cells into the spinal cord. This particular therapy can be performed in a shorter time by using a MRI-compatible robotic platform than by using a manual needle positioning platform. Imaging resolution of fiducial markers attached to the needle guide tubing was enhanced by reconstructing a high-resolution image from multiple images with sub-pixel movements of the robot. The parallel-plane direct-drive needle positioning mechanism positioned the needle guide with a high spatial precision that is two orders of magnitude higher than typical MRI resolution up to 1 mm. Reconstructed resolution enhanced images were used to navigate the robot precisely that would not have been possible by using standard MRI. Experiments were conducted to verify the effectiveness of the proposed enhanced-resolution image-guided intervention.
\medskip
\subsubsection{Uslu, Tuğrul, Erkin Gezgin, Seda Özbek, Didem Güzin, Fatih Cemal Can, and Levent Çetin. "Utilization of Low Cost Motion Capture Cameras for Virtual Navigation Procedures: Performance Evaluation for Surgical Navigation." Measurement (2021): 109624.uslu2021utilization}
see \cite{uslu2021utilization}
Thanks to recent advances in medical robotics, various traditional surgical procedures have been started to be carried out by the help of robot manipulators. In order to enhance visual feedback and operation efficiency in these scenarios, utilization of virtual navigation techniques with specialized hardware has become a widespread choice. On the other hand, relatively high equipment costs have risks to slow down researches and mass adoption on the field. In light of this, current study represents performance evaluation of a low cost motion capture cameras on a scenario that tries to demonstrate robotic surgery like operation on a target patient through virtual navigation. Throughout the study least squares point based registration technique was utilized to correlate different reference frames with each other. A new approach was proposed for the calibration between robot manipulator and motion capture system to allow operation without using markers on manipulator side. Innovative patient mockup design with precisely formed landmark points was also introduced in order to verify performances of utilized low cost hardware. At the end of the study, hardware verification results showed the possibility of sub millimeter precisions in demonstrated navigation procedures.
\medskip
\subsubsection{Mehrdad, Sarmad, Fei Liu, Minh Tu Pham, Arnaud Lelevé, and S. Farokh Atashzar. "Review of advanced medical telerobots." Applied Sciences 11, no. 1 (2021): 209.}
see \cite{mehrdad2021review}
The advent of telerobotic systems has revolutionized various aspects of the industry and human life. This technology is designed to augment human sensorimotor capabilities to extend them beyond natural competence. Classic examples are space and underwater applications when distance and access are the two major physical barriers to be combated with this technology. In modern examples, telerobotic systems have been used in several clinical applications, including teleoperated surgery and telerehabilitation. In this regard, there has been a significant amount of research and development due to the major benefits in terms of medical outcomes. Recently telerobotic systems are combined with advanced artificial intelligence modules to better share the agency with the operator and open new doors of medical automation. In this review paper, we have provided a comprehensive analysis of the literature considering various topologies of telerobotic systems in the medical domain while shedding light on different levels of autonomy for this technology, starting from direct control, going up to command-tracking autonomous telerobots. Existing challenges, including instrumentation, transparency, autonomy, stochastic communication delays, and stability, in addition to the current direction of research related to benefit in telemedicine and medical automation, and future vision of this technology, are discussed in this review paper.
\medskip
\subsubsection{Liu, Yajun, Peihao Jin, Wenyong Liu, and Wei Tian. "Basic Principle of Robot-Assisted Orthopedic Surgery." In Navigation Assisted Robotics in Spine and Trauma Surgery, pp. 5-10. Springer, Singapore, 2020.}
see \cite{liu2020basic}
The collaboration between robot and medical environment (including medical staff) plays a critical role through the entire procedure of robot-assisted orthopedic surgery. From aspects of surgical informatization and interactivity, this chapter introduces the functional configuration (workflow and basic setup) and the human–robot interaction modes in the orthopedic operating room. Suggestions for improving the performance and the clinical acceptability of the robot system is also briefly discussed.
\medskip
\subsubsection{Yang, Jianxing, Yan Xiong, Xiaohong Chen, Yuanxi Sun, Wensheng Hou, Rui Chen, Shandeng Huang, and Long Bai. "Bone Fracture Reduction Surgery-aimed Bone Connection Robotic Hand." Journal of Bionic Engineering 18, no. 2 (2021): 333-345.}
see \cite{yang2021bone}
Bone connection with robot is an important topic in the research of robot assisted fracture reduction surgery. With the method to achieve bone-robot connection in current robots, requirements on reliability and low trauma can not be satisfied at the same time. In this paper, the design, manufacturing, and experiments of a novel Bone Connection Robotic Hand (BCRH) with variable stiffness capability are carried out through the bionics research on human hand and the principle of particle jamming. BCRH’s variable stiffness characteristic is a special connection between “hard connection” and “soft connection”, which is different from the existing researches. It maximizes the reliability of bone-robot connection while minimizes trauma, meets the axial load requirement in clinical practice, and effectively shortens the operating time to less than 40 s (for mode 1) or 2 min (for mode 2). Meanwhile, a theoretical analysis of bone-robot connection failure based on particle jamming is carried out to provide references for the research in this paper and other related studies
\medskip
\subsubsection{Zhang, Wu, Haiyuan Li, Linlin Cui, Haiyang Li, Xiangyan Zhang, Shanxiang Fang, and Qinjian Zhang. "Research progress and development trend of surgical robot and surgical instrument arm." The International Journal of Medical Robotics and Computer Assisted Surgery (2021): e2309.}
see \cite{zhang2021research}
Background
In recent years, surgical robots have become an indispensable part of the medical field. Surgical robots are increasingly being used in the areas of gynaecological surgery, urological surgery, orthopaedic surgery, general surgery and so forth. In this paper, the development of surgical robots in different operations is reviewed and analysed. In the type of master–slave surgical robotic system, the robotic surgical instrument arms were located in the execution terminal of a surgical robot system, as one of the core components, and directly contact with the patient during the operation, which plays an important role in the efficiency and safety of the operation. In clinical, the arm function and design in different systems varies. Furtherly, the current research progress of robotic surgical instrument arms used in different operations is analysed and summarised. Finally, the challenge and trend are concluded.
Methods
According to the classification of surgical types, the development of surgical robots for laparoscopic surgery, neurosurgery, orthopaedics and microsurgery are analysed and summarised. Then, focusing on the research of robotic surgical instrument arms, according to structure type, the research and application of straight-rod surgical instrument arm, joint surgical instrument arm and continuous surgical instrument arm are analysed respectively.
Results
According to the discussion and summary of the characteristics of the existing surgical robots and instrument arms, it is concluded that they still have a lot of room for development in the future. Therefore, the development trends of the surgical robot and instrument arm are discussed and analysed in the five aspects of structural materials, modularisation, telemedicine, intelligence and human–machine collaboration.
Conclusion
Surgical robots have shown the development trend of miniaturisation, intelligence, autonomy and dexterity. Thereby, in the field of science and technology, the research on the next generation of minimally invasive surgical robots will usher in a peak period of development.
\medskip
\subsubsection{Yang, Geng, Zhibo Pang, M. Jamal Deen, Mianxiong Dong, Yuan-Ting Zhang, Nigel Lovell, and Amir M. Rahmani. "Homecare robotic systems for healthcare 4.0: visions and enabling technologies." IEEE journal of biomedical and health informatics 24, no. 9 (2020): 2535-2549.}
see \cite{yang2020homecare}
Powered by the technologies that have originated from manufacturing, the fourth revolution of healthcare technologies is happening (Healthcare 4.0). As an example of such revolution, new generation homecare robotic systems (HRS) based on the cyber-physical systems (CPS) with higher speed and more intelligent execution are emerging. In this article, the new visions and features of the CPS-based HRS are proposed. The latest progress in related enabling technologies is reviewed, including artificial intelligence, sensing fundamentals, materials and machines, cloud computing and communication, as well as motion capture and mapping. Finally, the future perspectives of the CPS-based HRS and the technical challenges faced in each technical area are discussed.
\medskip
\subsubsection{Zhang, Zhuangzhuang, Qixin Cao, Xiaoxiao Zhu, Yiqi Yang, and Nan Luan. "External Force Estimation on a Robotic Surgical Instrument." In 2020 5th International Conference on Advanced Robotics and Mechatronics (ICARM), pp. 263-268. IEEE, 2020.}
see \cite{zhang2020external}
In this paper, a novel force/torque estimation algorithm for the in-house developed instrument in the robotic-assisted arthroscopic surgery system is proposed. This surgical robot system consists of two parts with 7 degree-of-freedom (DOF) Franka Emika robot for providing 4-DOF Remote Centre of Motion (RCM) about the incision-trocar and an instrument performing bone grinding. The method utilizes Neural Networks (NN) in the Cartesian space to estimate external forces acting on the instrument. The instrument is a rigid-link mechanism attached to the end of the Franka robot by a 6-DOF wrist force sensor. With this proposed method it is possible to obtain force and torque estimation in Cartesian space for any rigid-link wrist mechanism under RCM constraints. Several experiments are performed on an actual robotic system prototype and results show the efficacy of the proposed method.
\medskip
\subsubsection{Bhandari, Mahendra, Trevor Zeffiro, and Madhu Reddiboina. "Artificial intelligence and robotic surgery: current perspective and future directions." Current opinion in urology 30, no. 1 (2020): 48-54.}
see \cite{bhandari2020artificial}
Purpose of review
This review aims to draw a road-map to the use of artificial intelligence in an era of robotic surgery and highlight the challenges inherent to this process.
Recent findings
Conventional mechanical robots function by transmitting actions of the surgeon's hands to the surgical target through the tremor-filtered movements of surgical instruments. Similarly, the next iteration of surgical robots conform human-initiated actions to a personalized surgical plan leveraging 3D digital segmentation generated prior to surgery. The advancements in cloud computing, big data analytics, and artificial intelligence have led to increased research and development of intelligent robots in all walks of human life. Inspired by the successful application of deep learning, several surgical companies are joining hands with tech giants to develop intelligent surgical robots. We, hereby, highlight key steps in the handling and analysis of big data to build, define, and deploy deep-learning models for building autonomous robots.
Summary
Despite tremendous growth of autonomous robotics, their entry into the operating room remains elusive. It is time that surgeons actively collaborate for the development of the next generation of intelligent robotic surgery.
\medskip
\subsubsection{Prokhorenko, Leonid, Daniil Klimov, Denis Mishchenkov, and Yuri Poduraev. "Surgeon–robot interface development framework." Computers in biology and medicine 120 (2020): 103717.}
see \cite{prokhorenko2020surgeon}
The progress of robotic medicine leads to the emergence of an increasing number of highly specialized automated systems based on specialized software. In any such system, there is the task of translating the surgeon’s requests into the process of automated procedure execution. The hardware and software system that provides the translation is the interface between the surgeon and the robot. This paper proposes a generalized framework architecture for the development of such software — the surgeon–robot interface. Existing implementations of such an interface are considered, solutions for the internal structure design of the framework are proposed. Experiments were performed using a prototype of the proposed framework. Such a development framework will allow one to effectively implement the surgeon–robot interfaces at all stages of the robotization of medical procedures, from prototype to final use in the operating room.
\medskip
\subsubsection{Song, Mi Ok, and Yong Jin Cho. "The Present and Future of Medical Robots: Focused on Surgical Robots." Journal of Digital Convergence 19, no. 4 (2021): 349-353.}
see \cite{song2021present}
This study is a review study attempted to analyze the current situation of surgical robots based on previous research on surgical robots in the era of the 4th revolution, and to forecast the future direction of surgical robots. Surgical robots have made full progress since the launch of the da Vinci and the surgical robot is playing a role of supporting the surgeries of the surgeons or the master-slave method reflecting the intention of the surgeons. Recently, technologies are being developed to combine artificial intelligence and big data with surgical robots, and to commercialize a universal platform rather than a platform dedicated to surgery. Moreover, technologies for automating surgical robots are being developed by generating 3D image data based on diagnostic image data, providing real-time images, and integrating image data into one system. For the development of surgical robots, cooperation with clinicians and engineers, safety management of surgical robot, and institutional support for the use of surgical robots will be required.
\medskip
\subsubsection{Kadkhodamohammadi, Abdolrahim, Nachappa Sivanesan Uthraraj, Petros Giataganas, Gauthier Gras, Karen Kerr, Imanol Luengo, Sam Oussedik, and Danail Stoyanov. "Towards video-based surgical workflow understanding in open orthopaedic surgery." Computer Methods in Biomechanics and Biomedical Engineering: Imaging \& Visualization (2020): 1-8.}
see \cite{kadkhodamohammadi2020towards}
Safe and efficient surgical training and workflow management play a critical role in clinical competency and ultimately, patient outcomes. Video data in minimally invasive surgery (MIS) have enabled opportunities for vision-based artificial intelligence (AI) systems to improve surgical skills training and assurance through post-operative video analysis and development of real-time computer-assisted interventions (CAI). Despite the availability of mounted cameras for the operating room (OR), similar capabilities are much more complex to develop for recording open surgery procedures, which has resulted in a shortage of exemplar video-based training materials. In this paper, we present a potential solution to record open surgical procedures using head-mounted cameras. Recorded videos were anonymised to remove patient and staff identifiable information using a machine learning algorithm that achieves state-of-the-art results on the OR Face dataset. We then propose a CNN-LSTM-based model to automatically segment videos into different surgical phases, which has never been previously demonstrated in open procedures. The redacted videos, along with the automatically predicted phases, are then available for surgeons and their teams for post-operative review and analysis. To our knowledge, this is the first demonstration of the feasibility of deploying camera recording systems and developing machine learning-based workflow analysis solutions for open surgery, particularly in orthopaedics.
\medskip
\subsubsection{Li, Changsheng, Xiaoyi Gu, Xiao Xiao, Chwee Ming Lim, Xingguang Duan, and Hongliang Ren. "A flexible transoral robot towards covid-19 swab sampling." Frontiers in Robotics and AI 8 (2021): 51.}
see \cite{li2021flexible}
There are high risks of infection for surgeons during the face-to-face COVID-19 swab sampling due to the novel coronavirus’s infectivity. To address this issue, we propose a flexible transoral robot with a teleoperated configuration for swab sampling. The robot comprises a flexible manipulator, an endoscope with a monitor, and a master device. A 3- prismatic-universal (3-PU) flexible parallel mechanism with 3 degrees of freedom (DOF) is used to realize the manipulator’s movements. The flexibility of the manipulator improves the safety of testees. Besides, the master device is similar to the manipulator in structure. It is easy to use for operators. Under the guidance of the vision from the endoscope, the surgeon can operate the master device to control the swab’s motion attached to the manipulator for sampling. In this paper, the robotic system, the workspace, and the operation procedure are described in detail. The tongue depressor, which is used to prevent the tongue’s interference during the sampling, is also tested. The accuracy of the manipulator under visual guidance is validated intuitively. Finally, the experiment on a human phantom is conducted to demonstrate the feasibility of the robot preliminarily.
\medskip
\subsubsection{Ohnishi, Ayumi, Hayate Tohnan, Tsutomu Terada, Minoru Hattori, Hisaaki Yoshinaka, Yusuke Sumi, Hiroyuki Egi, and Masahiko Tsukamoto. "A Method for Estimating Doctor's Fatigue Level in Operating a Surgical Robot Using Wearable Sensors." In 2021 IEEE International Conference on Pervasive Computing and Communications Workshops and other Affiliated Events (PerCom Workshops), pp. 38-43. IEEE, 2021.}
see \cite{ohnishi2021method}
Robot-assisted laparoscopic surgery, such as the da Vinci Surgical System, has a problem in that surgeons might continue operating for a long period of time without realizing their fatigue and without proper concentration, or they might ignore their fatigue even when they notice it. We propose a method for quantitatively estimating the level fatigue by attaching wearable sensors to a doctor while using a surgical robot. In this study, several sensor configurations were tested to investigate the sensor configuration, which is easy for the surgeon to wear. In an evaluation experiment, doctors used a robotic surgery simulator for a long period of time, and the score calculated by the simulator was estimated as the fatigue level expressed by the surgeon. We discussed with doctors how the results of the fatigue estimation should be applied to the system design.
\medskip
\subsubsection{Bondarenko, Viktor, Andrey Kholyavin, Yaroslav Belyaev, Dmitry Epifanov, Islam Bzhikhatlov, Mikhail Abramchuk, and Maxim Mokeyev. "Development of a 6-axis Robotic Manipulator for Stereotactic Surgery." In 2020 XI International Conference on Electrical Power Drive Systems (ICEPDS), pp. 1-4. IEEE, 2020.}
see \cite{bondarenko2020development}
The paper presents the results of development of a 6-axis robotic manipulator for stereotactic surgery. The robotic manipulator can be used for the surgical treatment of deep-located brain tumors, for the intracerebral implantation of electrodes for brain stimulation in patients with Parkinson Disease, for the treatment of epilepsy, etc. The experimental model of the low-cost robotic manipulator is described in details. Basic requirements for stereotactic robotic manipulator are as follows: workspace is not less than 0.4 m3, position accuracy is ± 0.5 mm, the ability to change the angle of the stereotactic instrument while the coordinates of its end point are the same, compatibility with the surgical navigation systems, and compliance with the requirements for medical equipment. The results of a phantom testing of the manipulator in the neurosurgery operation room using the surgical optical navigation system showed that it is possible to use this manipulator for all types of stereotactic surgery interventions on the brain.
\medskip
\subsubsection{Tian, Wei, Yi Wei, and Xiaoguang Han. "The history and development of robot-assisted orthopedic surgery." In Navigation Assisted Robotics in Spine and Trauma Surgery, pp. 1-3. Springer, Singapore, 2020.}
see \cite{tian2020history}
Orthopedic surgical robot is the core intelligent equipment to promote the development of precision, minimally invasive orthopedics surgery, which has become the focus of international researches. This chapter introduces the development of orthopedic surgical robots and the typical products of orthopedic robots.
\medskip
\subsubsection{Omisore, Olatunji Mumini, Shipeng Han, Jing Xiong, Hui Li, Zheng Li, and Lei Wang. "A review on flexible robotic systems for minimally invasive surgery." IEEE Transactions on Systems, Man, and Cybernetics: Systems (2020).}
see \cite{omisore2020review}
Recently, flexible robotic systems are developed to enhance minimally invasive interventions on internal organs located in confined areas of human body. These surgical devices are designed to navigate anatomical pathways via single-port access, such as natural orifices or minimal incisions and intraluminal interventions. With improved precision, spatial flexibility and dexterity, the robotic technology can enhance surgery such that minimally invasive flexible access would become a faster, safer, and more convenient method for intra-body interventions without multiple or wide incisions. However, a lot of works are still required for global acceptance of existing flexible robotic surgical platforms. This review provides extended insights on the design details of two types of flexible robotic systems used for endoscopic and endovascular procedures. As of today, several prototypes of both platforms have been proposed; however, their global acceptability and applicability remains very low. To address these, we present an extensive review on design constraints and control methods which are vital for safer, faster, and better operation of the flexible robotic systems in minimally invasive surgery (MIS). Finally, research trends of flexible robotic systems and their clinical application status in MIS are discussed along with some of the technical and technological challenges hindering their prominence.
\medskip
\subsubsection{Zemmar, Ajmal, Andres M. Lozano, and Bradley J. Nelson. "The rise of robots in surgical environments during COVID-19." Nature Machine Intelligence 2, no. 10 (2020): 566-572.}
see \cite{zemmar2020rise}
The COVID-19 pandemic has changed our world and impacted multiple layers of our society. All frontline workers and in particular those in direct contact with patients have been exposed to major risk. To mitigate pathogen spread and protect healthcare workers and patients, medical services have been largely restricted, including cancellation of elective surgeries, which has posed a substantial burden for patients and immense economic loss for various hospitals. The integration of a robot as a shielding layer, physically separating the healthcare worker and patient, is a powerful tool to combat the omnipresent fear of pathogen contamination and maintain surgical volumes. In this Perspective, we outline detailed scenarios in the pre-, intra- and postoperative care, in which the use of robots and artificial intelligence can mitigate infectious contamination and aid patient management in the surgical environment during times of immense patient influx. We also discuss cost-effectiveness and benefits of surgical robotic systems beyond their use in pandemics. The current pandemic creates unprecedented demands for hospitals. Digitization and machine intelligence are gaining significance in healthcare to combat the virus. Their legacy may well outlast the pandemic and revolutionize surgical performance and management.
\medskip
\subsubsection{Cheng, Ching-Hwa. "A Real-Time Robot-Arm Surgical Guiding System Development by Image-Tracking." In 2020 2nd IEEE International Conference on Artificial Intelligence Circuits and Systems (AICAS), pp. 133-133. IEEE, 2020.}
see \cite{cheng2020real}
The endoscope is widely used for various diagnoses and treatments in Minimally Invasive Surgery (MIS), such as hysteroscopy, laparoscopy, and colonoscopy. However, the limited field of image of the endoscope is often the most problematic issue faced by surgeons and medical students, especially for those inexperienced physicians, which leads to difficulty during surgical operations. To reduce the difficulties of MIS with respect to endoscope function, the proposed identifying and locating techniques provide the angle and distance from the surgical instruments to the lesion. The in-time guiding information provides global positioning information by tracking the lesion position during surgery. The jointed with a robot-arm system can help an inexperienced surgeon with stable assistance for the long-time surgical operation. The whole system has been successfully validated by surgeons.
\medskip
\subsubsection{Roy, Rupanjan. "Medical Applications of Artificial Intelligence." (2021).}
see \cite{roy2021medical}
The medical \& the dental field is a never ending field of innovations \& developments and each time the reasearchers come up with something new. One such new dimension in the fields of medicine being the incorporation of Artificial intelligence assisted technologies improving diagnosis, treatmemt plan and treatment stategies. This review focusses on the application of different technologies of AI in different fields of medicine.
\medskip
\subsubsection{Su, Hang, Andrea Mariani, Salih Ertug Ovur, Arianna Menciassi, Giancarlo Ferrigno, and Elena De Momi. "Toward teaching by demonstration for robot-assisted minimally invasive surgery." IEEE Transactions on Automation Science and Engineering 18, no. 2 (2021): 484-494.}
see \cite{su2021toward}
Learning manipulation skills from open surgery provides more flexible access to the organ targets in the abdomen cavity and this could make the surgical robot working in a highly intelligent and friendly manner. Teaching by demonstration (TbD) is capable of transferring the manipulation skills from human to humanoid robots by employing active learning of multiple demonstrated tasks. This work aims to transfer motion skills from multiple human demonstrations in open surgery to robot manipulators in robot-assisted minimally invasive surgery (RA-MIS) by using TbD. However, the kinematic constraint should be respected during the performing of the learned skills by using a robot for minimally invasive surgery. In this article, we propose a novel methodology by integrating the cognitive learning techniques and the developed control techniques, allowing the robot to be highly intelligent to learn senior surgeons’ skills and to perform the learned surgical operations in semiautonomous surgery in the future. Finally, experiments are performed to verify the efficiency of the proposed strategy, and the results demonstrate the ability of the system to transfer human manipulation skills to a robot in RA-MIS and also shows that the remote center of motion (RCM) constraint can be guaranteed simultaneously. Note to Practitioners —This article is inspired by limited access to the manipulation of laparoscopic surgery under a kinematic constraint at the point of incision. Current commercial surgical robots are mostly operated by teleoperation, which is representing less autonomy on surgery. Assisting and enhancing the surgeon’s performance by increasing the autonomy of surgical robots has fundamental importance. The technique of teaching by demonstration (TbD) is capable of transferring the manipulation skills from human to humanoid robots by employing active learning of multiple demonstrated tasks. With the improved ability to interact with humans, such as flexibility and compliance, the new generation of serial robots becomes more and more popular in nonclinical research. Thus, advanced control strategies are required by integrating cognitive functions and learning techniques into the processes of surgical operation between robots, surgeon, and minimally invasive surgery (MIS). In this article, we propose a novel methodology to model the manipulation skill from multiple demonstrations and execute the learned operations in robot-assisted minimally invasive surgery (RA-MIS) by using a decoupled controller to respect the remote center of motion (RCM) constraint exploiting the redundancy of the robot. The developed control scheme has the following functionalities: 1) it enables the 3-D manipulation skill modeling after multiple demonstrations of the surgical tasks in open surgery by integrating dynamic time warping (DTW) and Gaussian mixture model (GMM)-based dynamic movement primitive (DMP) and 2) it maintains the RCM constraint in a smaller safe area while performing the learned operation in RA-MIS. The developed control strategy can also be potentially used in other industrial applications with a similar scenario
\medskip
\subsubsection{Phan, Gia Hoang. "Humanoid robotics in healthcare: A review." Design Engineering (2021): 3641-3656.
}
see \cite{phan2021humanoid}
This page seeks to help scientists and the wider community better think that makes a robot pleasant by giving an overview of healthcare robot ideas, laboratory testing, and applications. When healthcare robots are utilized appropriately for their structure and functions, they show their capabilities. Companions for the elderly and others with cognitive impairments, robots in educational settings, and cognitive and behavioral enhancement technology are just a few examples. While the robots shown in films and literature remain futuristic, science fiction has inspired everybody to envision a world in which robotics help us in every aspect of our everyday lives. While we have a long way to go before robots are ubiquitous in our social spaces, significant advances in healthcare robotics technology, supported by social sciences, are bringing us closer.
\medskip
\subsubsection{Gasteiger, Norina, and Elizabeth Broadbent. "AI, robotics, medicine and health sciences." In The Routledge Social Science Handbook of AI, pp. 313-338. Routledge, 2021.}
see \cite{gasteiger2021ai}
The emergence of artificial intelligence (AI) has provided many opportunities for improvements in healthcare. Early expectations were for AI to change the role of physicians, recruitment and education. This chapter explores the historical and intellectual development of AI and robotics, with a focus on health purposes. It covers major claims and developments, principal contributions to healthcare and major criticisms of using AI and robots in healthcare. Neural networks and Deep learning are complex techniques of machine learning. Artificial neural networks vaguely simulate how neurons in the brain would process signals. The 1980s and 1990s were characterised by a surge of interest in AI, especially the application of neural networks, fuzzy set theory and Bayesian networks. Wearable computing refers to computer-powered wearable items, such as clothing, earphones, shoes, socks, watches, wristbands and glasses. Mimicking the success and acceptance of animal therapy, many companion robots look and behave like real animals.
\medskip
\subsubsection{Sadiku, Matthew NO, Nishu Gupta, Yogita P. Akhare, and Sarhan M. Musa. "Emerging IoT Technologies in Smart Healthcare." In IoT and ICT for Healthcare Applications, pp. 3-10. Springer, Cham, 2020.}
see \cite{sadiku2020emerging}
The digital revolution seeks to transform healthcare and empower citizens in taking charge of their own health. Healthcare services amount to a considerable portion of the world economy. Therefore, we need to have a check over its rising costs, which is a major concern for any nation. The advancement of healthcare has been attributed to innovation of new medical devices and technologies. The new devices provide innovative solutions for diagnosis, prevention, and treatments. This chapter begins by discussing the concept of emerging technology. Then, it covers several emerging technologies in healthcare. It discusses some of the applications of the emerging technologies. It presents some the benefits and challenges of the emerging technologies. The last section concludes with some comments.
\medskip
\subsubsection{Khalid, Sibar. "Internet of Robotic Things: A Review." Journal of Applied Science and Technology Trends 2, no. 03 (2021): 78-90.}
see \cite{khalid2021internet}
The Internet of Things (IoT) gives a strong structure for connecting things to the internet to facilitate Machine to Machine (M2M) communication and data transmission through basic network protocols such as TCP/IP. IoT is growing at a fast pace, and billions of devices are now associated, with the amount expected to reach trillions in the coming years. Many fields, including the army, farming, manufacturing, healthcare, robotics, and biotechnology, are adopting IoT for advanced solutions as technology advances. This paper offers a detailed view of the current IoT paradigm, specifically proposed for robots, namely the Internet of Robotic Things (IoRT). IoRT is a collection of various developments such as Cloud Computing, Artificial Intelligence (AI), Machine Learning, and the (IoT). This paper also goes over architecture, which would be essential in the design of Multi-Role Robotic Systems for IoRT. Furthermore, includes systems underlying IoRT, as well as IoRT implementations. The paper provides the foundation for researchers to imagine the idea of IoRT and to look beyond the frame while designing and implementing IoRT-based robotic systems in real-world implementations.
\medskip
\subsubsection{Boubaker, Olfa. "Medical robotics." Control Theory in Biomedical Engineering (2020): 153-204.}
see \cite{boubaker2020medical}
Today, robotic devices are used for delicate surgical procedures from open surgery to minimally invasive procedures, replacing missing limbs, delivering neuro-rehabilitation therapy to stroke patients, teaching people with learning disabilities, administering drugs, and performing a growing number of other health tasks. This chapter highlights the impact of these machines to improve efficiency and precision of human abilities. Through a comprehensive review of the literature, this chapter presents the different classification approaches of robotic devices. More than 150 references in the open literature, most of them survey papers, are compiled to provide a historical point of view in medical robotics, a review in emerging robotic systems, and an investigation in related challenging problems.
\medskip
\subsubsection{Cooper, Sara, Alessandro Di Fava, Carlos Vivas, Luca Marchionni, and Francesco Ferro. "ARI: The social assistive robot and companion." In 2020 29th IEEE International Conference on Robot and Human Interactive Communication (RO-MAN), pp. 745-751. IEEE, 2020.}
see \cite{cooper2020ari}
With the world population aging and the number of healthcare users with multiple chronic diseases increasing, healthcare is becoming more costly, and as such, the need to optimise both hospital and in-home care is of paramount importance. This paper reviews the challenges that the older people, people with mobility constraints, hospital patients and isolated healthcare users face, and how socially assistive robots can be used to help them. Related promising areas and limitations are highlighted. The main focus is placed on the newest PAL Robotics' robot: ARI, a high-performance social robot and companion designed for a wide range of multi-modal expressive gestures, gaze and personalised behaviour, with great potential to become part of the healthcare community by applying powerful AI algorithms. ARI can be used to help administer first-care attention, providing emotional support to people who live in isolation, including the elderly population or healthcare users who are confined because of infectious diseases such as Covid-19. The ARI robot technical features and potential applications are introduced in this paper.
\medskip
\subsubsection{Saniotis, Arthur, and Maciej Henneberg. "Neurosurgical robots and ethical challenges to medicine." Ethics in Science and Environmental Politics 21 (2021): 25-30.
}
see \cite{saniotis2021neurosurgical}
Over the last 20 yr, neurosurgical robots have been increasingly assisting in neurosurgical procedures. Surgical robots are considered to have noticeable advantages over humans, such as reduction of procedure time, surgical dexterity, no experience of fatigue and improved healthcare outcomes. In recent years, neurosurgical robots have been developed to perform various procedures. Public demand is informing the direction of neurosurgery and placing greater pressure on neurosurgeons to use neurosurgical robots. The increasing diversity and sophistication of neurosurgical robots have received ethical scrutiny due to the surgical complications that may arise as well as the role of robots in the future. In this paper, we address 3 ethical areas regarding neurosurgical robots: (1) Loss of neurosurgical skills due to increasing dependency on robots; (2) How far do we want to go with neurosurgical robots? (3) Neurosurgical robots and conflict of interest and medical bias.
\medskip
\subsubsection{Fischer, Kerstin, Johanna Seibt, Raffaele Rodogno, Maike Kirkegård Rasmussen, Astrid Weiss, Leon Bodenhagen, William Kristian Juel, and Norbert Krüger. "Integrative social robotics hands-on." Interaction Studies 21, no. 1 (2020): 145-185.}
see \cite{fischer2020integrative}
In this paper, we discuss the development of robot use cases in an elderly care facility in the context of exploring the method of Integrative Social Robotics (ISR) when used on top of a user-centered design approach. Integrative Social Robotics is a new proposal for how to generate responsible, i.e. culturally and ethically sustainable, social robotics applications. Starting point for the discussion are the five principles that characterize an ISR approach, which are discussed in application to the three use cases for robot support in a Danish elderly care facility developed within the SMOOTH project. The discussion by an interdisciplinary design team explores what attention to the five principles of ISR can offer for use case development. We report on the consequences of this short-time exposure to the basic ideas of ISR for use case development and discuss the value of approaching robot development from an ISR perspective.
\medskip
\subsubsection{Seidita, Valeria, Francesco Lanza, Arianna Pipitone, and Antonio Chella. "Robots as intelligent assistants to face COVID-19 pandemic." Briefings in Bioinformatics 22, no. 2 (2021): 823-831.}
see \cite{seidita2021robots}
Motivation
The epidemic at the beginning of this year, due to a new virus in the coronavirus family, is causing many deaths and is bringing the world economy to its knees. Moreover, situations of this kind are historically cyclical. The symptoms and treatment of infected patients are, for better or worse even for new viruses, always the same: more or less severe flu symptoms, isolation and full hygiene. By now man has learned how to manage epidemic situations, but deaths and negative effects continue to occur. What about technology? What effect has the actual technological progress we have achieved? In this review, we wonder about the role of robotics in the fight against COVID. It presents the analysis of scientific articles, industrial initiatives and project calls for applications from March to now highlighting how much robotics was ready to face this situation, what is expected from robots and what remains to do.
Results
The analysis was made by focusing on what research groups offer as a means of support for therapies and prevention actions. We then reported some remarks on what we think is the state of maturity of robotics in dealing with situations like COVID-19.
\medskip
\subsubsection{Kubota, Alyssa, and Laurel D. Riek. "Methods for robot behavior adaptation for cognitive neurorehabilitation." Annual Review of Control, Robotics, and Autonomous Systems 5 (2021).}
see \cite{kubota2021methods}
An estimated 11\% of adults report experiencing some form of cognitive decline which may be associated with conditions such as stroke or dementia, and can impact their memory, cognition, behavior, and physical abilities. While there are no known pharmacological treatments for many of these conditions, behavioral treatments such as cognitive training can prolong the independence of people with cognitive impairments. These treatments teach metacognitive strategies to compensate for memory difficulties in their everyday lives. Personalizing these treatments to suit the preferences and goals of an individual is critical to improving their engagement and sustainment, as well as maximizing the treatment’s effectiveness. Robots have great potential to facilitate these training regimens and support people with cognitive impairments, their caregivers, and clinicians. This article examines how robots can adapt their behavior to be personalized to an individual in the context of cognitive neurorehabilitation. We provide an overview of existing robots being used to support neurorehabilitation, and identify key principles to working in this space. We then examine state-of-the-art technical approaches to enabling longitudinal behavioral adaptation. To conclude, we discuss our recent work on enabling social robots to automatically adapt their behavior and explore open challenges for longitudinal behavior adaptation. This work will help guide the robotics community as they continue to provide more engaging, effective, and personalized interactions between people and robots.
\medskip
\subsubsection{Roy, Ritam. "IMPLEMENTATION AND SENTIMENT ANALYSIS OF ARTIFICIAL INTELLIGENCE IN HEALTH CARE INDUSTRY." International Journal of Modern Agriculture 10, no. 2 (2021): 211-222.}
see \cite{roy2021implementation}
The implementation of Artificial Intelligence and Information Technology is taking a quantum leap in every industry. In recent years there has been an intensified focus on the utilization of Artificial Intelligence in different areas to take care of the complex issue. The same goes for the healthcare services industry. With the increase in complexity and rise of data in medical industry adaptation of Artificial Intelligence is growing at a rapid pace, it is bringing a paradigm shift to healthcare. The use of AI can enhance patient engagement and also through prediction it can help the hospitals better resource allocation. Although there are lots of opportunities for Artificial Intelligence in the health care sector but currently there are limited examples of such techniques being successfully deployed. This study aims to understand the consumer behaviour towards the adaptation of Artificial Intelligence in Health Care.
\medskip
\subsubsection{Virgos, Lucia Alonso, Miguel A. Sanchez Vidales, Fernando López Hernández, and J. Javier Rainer Granados. "Internet of Medical Things: Current and Future Trends." In Internet of Medical Things, pp. 19-36. CRC Press, 2021.}
see \cite{virgos2021internet}
The Internet of Medical Things (IoMT) establishes the necessary scenario for medical devices and applications to evolve through information technology. The ability to connect medical devices and systems increases the possibility of data storage, intelligently analyze data, interact, monitor and monitor with the user remotely, update the security concept.
This is a revolution in the field of medicine that allows there to be evident progress in its effectiveness. At present, there are certain implantation tendencies that seek, mainly, to offer innovations for improvement in the different areas. The future trend will be to offer standardized systems of implementation, improvement and evaluation of them, so that there may be a standardization that allows the medical sector to make a quantitative leap in quality.
In this chapter we analyze the most relevant current and future trends, referred to the IoMT. The objective is to offer a theoretical framework that allows to initiate new lines of research focused on offering new methods and/or analyzing carefully the benefits, the methods of implementation and the evaluation of each one of the existing systems.
\medskip
\subsubsection{Saini, Akanksha, A. J. Meitei, and Jitenkumar Singh. "Machine Learning in Healthcare: A Review." Available at SSRN 3834096 (2021).}
see \cite{saini2021machine}
This study attempts to introduce artificial intelligence and its significant subfields in machine learning algorithms and reviews the role of these subfields in various areas in healthcare such as bioinformatics, gene detection for cancer diagnosis, epileptic seizure, brain-computer interface. It also reviews the medical image processing through deep learning for diseases such as diabetic retinopathy, gastrointestinal disease, and tumour. And finally, this article discusses the real-world obstacles that need to be overcome to make AI techniques easier to use.
\medskip
\subsubsection{Miura, Satoshi, Ryutaro Ohta, Yang Cao, Kazuya Kawamura, Yo Kobayashi, and Masakatsu G. Fujie. "Using Operator Gaze Tracking to Design Wrist Mechanism for Surgical Robots." IEEE Transactions on Human-Machine Systems (2021).}
see \cite{miura2021using}
This article assessed how surgical robot parameters influenced operator viewpoint during a simulated surgical procedure. Surgical robots are useful tools in minimally invasive surgery. However, even with robots, suturing is difficult because the needle is sometimes obscured by tissue or manipulators and is thus not always visible during the procedure. This is especially true in pediatric surgery, where the surgical environment is smaller than in adult surgery. Hence, surgeons must carefully track the instruments and tissues to understand and predict their current and expected situations. In this article, we used gaze-tracking techniques to analyze the location and timing of the gaze of participants while they manipulated a virtual robotic surgical simulation system. To differentiate between the ideal and actual viewpoint trajectories, we conducted experiments with and without obstacles (i.e., simulated tissue and the manipulator arm). In the obstacle condition, we modulated the wrist length of the manipulator to bring it into view. In the no-obstacle condition, the participants mostly watched the suture needle tip. In the with-obstacle condition, the participants spent less time watching the instruments and more time watching the target point. The amount of time spent watching the target point increased as wrist length increased. Given this trade off relationship, we examined the proportion of time the participants spent looking at the instruments or target points by wrist length. We calculated the Pareto solutions and clarified the relationship between wrist length and the watching parts.
\medskip
\subsubsection{Miyachi, Shigeru, Yoshitaka Nagano, Reo Kawaguchi, Tomotaka Ohshima, and Hiroki Tadauchi. "Remote surgery using a neuroendovascular intervention support robot equipped with a sensing function: Experimental verification." Asian Journal of Neurosurgery 16, no. 2 (2021): 363.}
see \cite{miyachi2021remote}
Purpose: Expectations for remote surgery in endovascular treatments are increasing. We conducted the world's first remote catheter surgery experiment using an endovascular treatment-supported robot. We considered the results, examined the issues, and suggested countermeasures for practical use.
Methods: The slave robot in the angiography room is an original machine that enables sensing feedback by using an originally developed insertion force-measuring device, which detects the pressure stress on the vessel wall and alerts the operator using an audible scale. The master side was set in a separate room. They were connected via HTTP communication using local area network system. The surgeon operated by looking at a personal computer monitor that shared an angiography monitor. The slave robot catheterized and inserted a coil for an aneurysm in the silicon blood vessel model in the angiography room.
Results: Our robot responded to the surgeon's operations promptly and to the joystick's swift movements quite accurately. The surgeon could control the stress to the model vessels using various actions, because the operator could hear the sound from the insertion force. However, the robot required a time gradient to reach a stable advanced speed at the time of the initial movement, and experienced a slight time lag.
Conclusion: Our remote operation appeared to be sufficiently feasible to perform the surgery safely. This system seems extremely promising for preventing viral infection and radiation exposure to medical staff. It will also enable medical professionals to operate in remote areas and create a ubiquitous medical environment.
\medskip
\subsubsection{Cheng, Irene, Richard Moreau, Nathaniel Rossol, Arnaud Leleve, Patrick Lermusiux, Antoine Millon, and Anup Basu. "A Gesture-Based Interface for Remote Surgery." In Connected Health in Smart Cities, pp. 11-22. Springer, Cham, 2020.cheng2020gesture}
see \cite{cheng2020gesture}
There has been a great deal of research activity in computer- and robot-assisted surgeries in recent years. Some of the advances have included robotic hip surgery, image-guided endoscopic surgery, and the use of intra-operative MRI to assist in neurosurgery. However, most of the work in the literature assumes that all of the expert surgeons are physically present close to the location of a surgery. A new direction that is now worth investigating is assisting in performing surgeries remotely. As a first step in this direction, this chapter presents a system that can detect movement of hands and fingers, and thereby detect gestures, which can be used to control a catheter remotely. Our development is aimed at performing remote endovascular surgery by controlling the movement of a catheter through blood vessels. Our hand movement detection is facilitated by sensors, like LEAP, which can track the position of fingertips and the palm. In order to make the system robust to occlusions, we have improved the implementation by optimally integrating the input from two different sensors. Following this step, we identify high-level gestures, like push and turn, to enable remote catheter movements. To simulate a realistic environment we have fabricated a flexible endovascular mold, and also a phantom of the abdominal region with the endovascular mold integrated inside. A mechanical device that can remotely control a catheter based on movement primitives extracted from gestures has been built. Experimental results are shown demonstrating the accuracy of the system.
\medskip
\subsubsection{Mei, Ziyang. "Remote Vascular Interventional Surgery Robotics: A Review." (2021).}
see \cite{mei2021remote}
Interventional doctors are exposed to radiation hazards during the operation and endure high work intensity. Remote vascular interventional surgery robotics is a hot research field that can not only protect the health of interventional doctors, but also improve accuracy and efficiency of surgeries. However, the current vascular interventional robots still have many shortcomings to be improved. This article introduces the mechanical structure characteristics of various fields of vascular interventional therapy surgical robots, discusses the current key features of vascular interventional surgical robotics in force sensing, haptic feedback, and control methods, summarizes current frontiers about autonomous surgery, long geographic distances remote surgery and MRI-compatible structures. Finally, combined with the current research status of vascular interventional surgery robots, this article analyzes the development directions and puts forward a vision for the future vascular interventional surgery robots.
\medskip
\subsubsection{Legeza, Peter, Gavin W. Britz, Thomas Loh, and Alan Lumsden. "Current utilization and future directions of robotic-assisted endovascular surgery." Expert Review of Medical Devices 17, no. 9 (2020): 919-927.}
see \cite{legeza2020current}
Introduction
Endovascular surgery has become the standard of care to treat most vascular diseases using a minimally invasive approach. The CorPath system further enhances the potential and enables surgeons to perform robotic-assisted endovascular procedures in interventional cardiology, peripheral vascular surgery, and neurovascular surgery. With the introduction of this technique, the operator can perform multiple steps of endovascular interventions outside of the radiation field with high precision movements even from long-geographical distances.
Areas covered
The first and second-generation CorPath systems are currently the only commercially available robotic devices for endovascular surgery. This review article discusses the clinical experiences and outcomes with the robot, the advanced navigational features, and the results with recent hardware and software modifications, which enables the use of the system for neurovascular interventions, and long-distance interventional procedures.
Expert opinion
A high procedural success was achieved with the CorPath robotic systems in coronary and peripheral interventions, and the device seems promising in neurovascular procedures. More experience is needed with robotic neurovascular interventions and with complex peripheral arterial cases. In the future, long-distance endovascular surgery can potentially transform the management and treatment of acute myocardial infarction and stroke, with making endovascular care more accessible for patients in remote areas.
\medskip
\subsubsection{Arian, Y. "A Review of the Application of Robots in Maxillofacial Surgery." J Oral Health Dent Res 1, no. 1 (2021): 1-4.}
see \cite{arian2021review}
Aim: The purpose of this study was to review new articles on the use of robotic surgery in maxillofacial surgery.
Method and Materials: For the purpose of this review study, all Medline (PubMed), Google scholar electronic resources focused on the use of robotic surgery in maxillofacial surgery in the period 1999-2021 were reviewed.
Results: Using robots in maxillofacial surgery can reduce hospitalization time, reduce intraoperative bleeding, and improve recovery for patients, although the high cost and lack of touch can be a problem.
Conclusion: The results of this review study show that the surgery robot can replace open surgical methods of maxillofacial surgery. Although it may not be generalized for use, patients may be assisted in areas where the surgeon may not be present.
\medskip
\subsubsection{Desselle, Mathilde R., Ross A. Brown, Allan R. James, Mark J. Midwinter, Sean K. Powell, and Maria A. Woodruff. "Augmented and virtual reality in surgery." Computing in Science \& Engineering 22, no. 3 (2020): 18-26.}
see \cite{desselle2020augmented}
Augmented and virtual reality are transforming the practice of healthcare by providing powerful and intuitive methods of exploring and interacting with digital medical data, as well as integrating data into the physical world to create natural and interactive virtual experiences. These immersive technologies use lightweight stereoscopic head-mounted displays to place users into simulated and realistic three-dimensional digital environments, unlocking significant benefits from the seamless integration of digital information with the healthcare practitioner and patient's experience. This review article explores some of the current and emerging technologies and applications in surgery, their benefits and challenges around immersion, spatial awareness and cognition, and their reported and projected use in learning environments, procedure planning and perioperative contexts and in the surgical theatre. The enhanced access to information, knowledge, and experience enabled by virtual and augmented reality will improve healthcare approaches and lead to better outcomes for patients and the wider community.
\medskip
\subsubsection{Meshram, Dewanand A., and Dipti D. Patil. "5G Enabled Tactile Internet for Tele-Robotic Surgery." Procedia Computer Science 171 (2020): 2618-2625.}
see \cite{meshram20205g}
In today’s communication era, we are speedily moving from various generations of mobile communication technologies to Next Generation mobile communication. Moving from 3G, 4G and 5G, one to the next generation, it includes in-built improvements in various characteristics. The world is collecting traditional information this is huge and providing the same to every corner of the world that creates huge pressure on the core networking and backhaul resources. This paper focuses on combining 5G technologies with mobile edge computing for robotic telesurgery. A huge amount of data is required to continuously transfer over the high-speed network for this process. This process demands high network bandwidth, minimal information loss, minimal delay and real-time response for precise surgery. As of today, useful and cost-effective solutions for medical domain telerobotic surgeries are not preferred yet may be due to the performance limitations of existing communication technologies like 4G. Hence, realtime medical video transmission using 5G enabled tactile (T5ET) internet technology environment is experimented in this research, with a focus on QoS parameters like jitter control, throughput, and delay. Promising results are observed in the evaluation and discussed here which will prominently contribute to the medical domain.
\medskip
\subsubsection{Shah, Shinil K., Melissa M. Felinski, Todd D. Wilson, Kulvinder S. Bajwa, and Erik B. Wilson. "Next-Generation Surgical Robots." In Digital Surgery, pp. 401-405. Springer, Cham, 2021.}
see \cite{shah2021next}
The adoption of robotic surgery continues to increase, with over 1 million robotic-assisted surgical procedures performed worldwide (2018 estimate). Over 70 companies are developing/introducing platforms for robotic-assisted surgery in nearly every procedural speciality. In this chapter, we review concepts that are important to consider when discussing the future of robotic surgical platforms, including design of the surgeon, surgical team, and patient interfaces, integrated versus modular designs, reality augmentation, cost, and data analytics.
\medskip
\subsubsection{Ara, Jinat, Hanif Bhuiyan, Yeasin Arafat Bhuiyan, Salma Begum Bhyan, and Muhammad Ismail Bhuiyan. "AR-based Modern Healthcare: A Review." arXiv preprint arXiv:2101.06364 (2021).}
see \cite{ara2021ar}
The recent advances of Augmented Reality (AR) in healthcare have shown that technology is a significant part of the current healthcare system. In recent days, augmented reality has proposed numerous smart applications in healthcare domain including wearable access, telemedicine, remote surgery, diagnosis of medical reports, emergency medicine, etc. The aim of the developed augmented healthcare application is to improve patient care, increase efficiency, and decrease costs. This article puts on an effort to review the advances in AR-based healthcare technologies and goes to peek into the strategies that are being taken to further this branch of technology. This article explores the important services of augmented-based healthcare solutions and throws light on recently invented ones as well as their respective platforms. It also addresses concurrent concerns and their relevant future challenges. In addition, this paper analyzes distinct AR security and privacy including security requirements and attack terminologies. Furthermore, this paper proposes a security model to minimize security risks. Augmented reality advantages in healthcare, especially for operating surgery, emergency diagnosis, and medical training is being demonstrated here thorough proper analysis. To say the least, the article illustrates a complete overview of augmented reality technology in the modern healthcare sector by demonstrating its impacts, advancements, current vulnerabilities; future challenges, and concludes with recommendations to a new direction for further research.
\medskip
\subsubsection{Troccaz, Jocelyne, Giulio Dagnino, and Guang-Zhong Yang. "Frontiers of medical robotics: from concept to systems to clinical translation." Annual review of biomedical engineering 21 (2019): 193-218.}
see \cite{troccaz2019frontiers}
Medical robotics is poised to transform all aspects of medicine—from surgical intervention to targeted therapy, rehabilitation, and hospital automation. A key area is the development of robots for minimally invasive interventions. This review provides a detailed analysis of the evolution of interventional robots and discusses how the integration of imaging, sensing, and robotics can influence the patient care pathway toward precision intervention and patient-specific treatment. It outlines how closer coupling of perception, decision, and action can lead to enhanced dexterity, greater precision, and reduced invasiveness. It provides a critical analysis of some of the key interventional robot platforms developed over the years and their relative merit and intrinsic limitations. The review also presents a future outlook for robotic interventions and emerging trends in making them easier to use, lightweight, ergonomic, and intelligent, and thus smarter, safer, and more accessible for clinical use.
\medskip
\subsubsection{Dupont, Pierre E., Bradley J. Nelson, Michael Goldfarb, Blake Hannaford, Arianna Menciassi, Marcia K. O’Malley, Nabil Simaan, Pietro Valdastri, and Guang-Zhong Yang. "A decade retrospective of medical robotics research from 2010 to 2020." Science Robotics 6, no. 60 (2021): eabi8017.}
see \cite{dupont2021decade}
Robotics is a forward-looking discipline. Attention is focused on identifying the next grand challenges. In an applied field such as medical robotics, however, it is important to plan the future based on a clear understanding of what the research community has recently accomplished and where this work stands with respect to clinical needs and commercialization. This Review article identifies and analyzes the eight key research themes in medical robotics over the past decade. These thematic areas were identified using search criteria that identified the most highly cited papers of the decade. Our goal for this Review article is to provide an accessible way for readers to quickly appreciate some of the most exciting accomplishments in medical robotics over the past decade; for this reason, we have focused only on a small number of seminal papers in each thematic area. We hope that this article serves to foster an entrepreneurial spirit in researchers to reduce the widening gap between research and translation.
\medskip
\subsubsection{Hu, Jiabing, Ying Sun, Gongfa Li, Guozhang Jiang, and Bo Tao. "Probability analysis for grasp planning facing the field of medical robotics." Measurement 141 (2019): 227-234.}
see \cite{hu2019probability}
Medical surgical robot is a fusion of medical image information matching fusion technology and robotic trajectory control technology. The medical image information matching fusion is to obtain two images of a certain range of the patient’s body by two cameras on the robot, and after matching fusion processing, an image is obtained. At present, surgical robots have been successfully applied in minimally invasive surgery such as pelvic organ prolapse, defects and other basin basement reconstruction operations. Previously, most of the robots used in medical surgery have only one arm, but with the development of robotics related fields, multi-fingered robots with binocular stereo vision become possible in completing complex minimally invasive surgery. This paper aims to promote the further integration of multi-fingered manipulator and medical image detection, focusing on the grasping probability of multi-fingered manipulator. When the three-dimensional information of the object is incomplete, the machine learning method performs better than the hard coding method in the object grasping point planning. At present, most known methods can obtain classification results but could not give the probability of this category. Aiming at the problem of grab point planning, this paper proposes a crawling planning method based on big data Gaussian process classification. In this paper, a planner based on Gaussian process classification is designed, and the hyper constant used in the Gaussian process to judge the probability of capture is calculated. Based on the determined crawling scheme, the feasibility distribution map of the grab points which obtained by the trained Gaussian process classifier is drawn in MATLAB. The results show that the trained Gaussian process classifier is biased towards the center of the object which is the point with high stability. This method can give classification results and corresponding probabilities, which represents the feasibility of grasping points.
\medskip
\subsubsection{Hsiao, Jen-Hsuan, Jen-Yuan Chang, and Chao-Min Cheng. "Soft medical robotics: clinical and biomedical applications, challenges, and future directions." Advanced Robotics 33, no. 21 (2019): 1099-1111.}
see \cite{hsiao2019soft}
Bioinspired soft robotics allow for safer clinical interactions with human patients but conventional, hard robots, which are often built with rigid materials and complex control systems, compromise tissue integrity, freedom of movement, conformability, and overall human bio-compatibility. Soft, compliant materials intrinsically reduce mechanical complexity, accommodate their usage environment, and provide great practical potential for medical device developments. Previous review papers have generally covered the topics of materials, manufacturing processes, actuator modeling and control, and current trends. Here, we focus on recent developments in soft robotic applications for the medical field including advances in cardiac devices, surgical robots, and soft rehabilitation and assistance devices. In medical applications, soft robotic devices not only expedite the evolution of minimally invasive surgery but also improve the bio-compatibility of rehabilitation and assistance devices. Here, we evaluate design requirements, mechanisms, achievements and challenges in these key areas. Of particular note, this paper concludes with a discussion on advances in 3D printing and adapting neural networks for modeling and control frameworks that have facilitated the development of faster and less expensive soft medical devices.
\medskip
\subsubsection{Bai, Long, Jianxing Yang, Xiaohong Chen, Yuanxi Sun, and Xingyu Li. "Medical robotics in bone fracture reduction surgery: a review." Sensors 19, no. 16 (2019): 3593.}
see \cite{bai2019medical}
Since the advantages of precise operation and effective reduction of radiation, robots have become one of the best choices for solving the defects of traditional fracture reduction surgery. This paper focuses on the application of robots in fracture reduction surgery, design of the mechanism, navigation technology, robotic control, interaction technology, and the bone–robot connection technology. Through literature review, the problems in current fracture reduction robot and its future development are discussed.
\medskip
\subsubsection{Taylor, Russell H., Peter Kazanzides, Gregory S. Fischer, and Nabil Simaan. "Medical robotics and computer-integrated interventional medicine." In Biomedical Information Technology, pp. 617-672. Academic Press, 2020.}
see \cite{taylor2020medical}
This chapter is concerned with medical robotics and computer-integrated interventional medicine (CIIM). Broadly speaking, CIIM systems enable a three-way partnership between humans, technology, and information to improve treatment processes in surgery and other forms of interventional medicine. We first review the architecture, basic mathematical methods, and technology found in such systems and briefly discuss some of the common safety and regulatory compliance issues associated with them. Then we provide two common and interrelated paradigms found in CIIM systems and provide a few selected examples of each paradigm. The first paradigm (which we call “surgical CAD/CAM”) emphasizes CIIM as a closed-loop process consisting of (1) constructing a patient-specific model and interventional plan; (2) registering the model and plan to the patient; (3) using technology to assist in carrying out the plan; and (4) assessing the result. The second paradigm, which we refer to as “surgical assistance,” emphasizes the interactive nature of CIIM systems, in which surgical decisions are made in the operating room.
\medskip
\subsubsection{Liu, Jun, Gurpreet Singh, Subhi Al'Aref, Benjamin Lee, Olachi Oleru, James K. Min, Simon Dunham, Mert R. Sabuncu, and Bobak Mosadegh. "Image registration in medical robotics and intelligent systems: fundamentals and applications." Advanced Intelligent Systems 1, no. 6 (2019): 1900048.}
see \cite{liu2019image}
Medical image registration, by transforming two or more sets of imaging data into one coordinate system, plays a central role in medical robotics and intelligent systems from diagnostics and surgical planning to real-time guidance and postprocedural assessment. Recent advances in medical image registration have made a significant impact in orthopedic, neurological, cardiovascular, and oncological applications.The recent literature in medical image registration is reviewed, providing a discussion of their fundamentals and applications. Within each section, the registration techniques are introduced, classifying each method based on their working mechanisms, and discussing their benefits and limitations are discussed. Recently, machine learning has had an important impact on the field of image registration, yielding novel methods and unprecedented speed. The validation of registration methods, however, remains a challenge due to the lack of reliable ground truth. Medical image registration will continue to make significant impacts in the area of advanced medical imaging, as the fusion/combination of multimodal images and advanced visualization technology become more widespread.
\medskip
\subsubsection{Buettner, Ricardo, Alena Renner, and Anna Boos. "A systematic literature review of research in the surgical field of medical robotics." In 2020 IEEE 44th Annual Computers, Software, and Applications Conference (COMPSAC), pp. 517-522. IEEE, 2020.}
see \cite{buettner2020systematic}
Due to the growing demand for efficient and precise surgical options, there has been a push in the field of development of medical robot systems in recent years. This paper provides an overview of the current status and development of medical robots in surgery through a systematic literature review of research. The classification is made into minimally invasive and non-invasive robotic assistance. An introduction and definition of medical robots is provided. The advantages and disadvantages of the applications are highlighted. A summary and insight into future work is presented.
\medskip
\subsubsection{Mois, George, and Jenay M. Beer. "The Role of Healthcare Robotics in Providing Support to Older Adults: a Socio-ecological Perspective." Current Geriatrics Reports 9, no. 2 (2020): 82-89.}
see \cite{mois2020role}
Purpose of Review
In this review, we provide an overview of how healthcare robotics can facilitate healthy aging, with an emphasis on physical, cognitive, and social supports. We next provide a synthesis of future challenges and considerations in the development and application of healthcare robots. We organize these considerations using a socio-ecological perspective and discuss considerations at the individual, care partner, community healthcare, and healthcare policy levels.
Recent Findings
Older adults are the fastest growing segment of the US population. Age-related changes and challenges can present difficulties, for older adults want to age healthily and maintain independence. Technology, specifically healthcare robots, has potential to provide health supports to older adults. These supports span widely across the physical, cognitive, and social aspects of healthy aging.
Summary
Our review suggests that while healthcare robotics has potential to revolutionize the way in which older adults manage their health, there are many challenges such as clinical effectiveness, technology acceptance, health informatics, and healthcare policy and ethics. Addressing these challenges at all levels of the healthcare system will help ensure that healthcare robotics promote healthy aging and are applied safely, effectively, and reliably.
\medskip
\subsubsection{Pichetworakoon, Arachamon, Nitchanand Kooptarnond, and Sutthichai Ngamchuensuwan. "Economic and Legal on The Deploying of Medical and Healthcare Robotics: Case Study on a Comparison of the European Union (EU), South Africa, and Thailand." The Journal of Law, Public Administration and Social Science. School of Law Chiang Rai Rajabhat University 5, no. 2 (2021): 21-43.}
see \cite{pichetworakoon2021economic}
Europe is well placed to benefit from the potential of Artificial Intelligence (AI). It produces industrial and professional service robots for healthcare and plays an important role in developing and using software applications for companies. Surprisingly, the African Region and Thailand, neither of which is traditionally associated with robotics technology, are making good progress in the use and development of robots in the field of medical service and healthcare by promoting investment in robotics innovation. However, there are still several unclear aspects to be addressed by policymakers in these two regions. Although medical robots have shown great potential in these two areas by contributing to various healing processes, many limitations to the application of the technology as such have emerged in terms of economic policy, legal frameworks, the risk to privacy, and moral responsibility. Apparently, the main barrier holding back these two communities from being the next generation of automotive developers in medical robots is that legislative and investment policies governing robot activities are produced and enforced by different organizations separately. This effectively discourages not only management policy but also related action to support robotics innovation. Even though experts are creating increasingly advanced robot technology, regulation of its development is still lagging behind. This article will inform the social sciences, ethics, law, and market policy to find a solution where robots and humans can work side by side, with an emphasize on the application of legal and economic regulations relating to this growth in automation to encourage the status of the robots, bringing them to the forefront of the socio-scientific platform by applying documentary and action research methodology. In order to achieve this goal, economic policymakers and legal regulators have to engage in these agendas together with producers to establish how law and market policy should react to medical robots appropriately.
\medskip
\subsubsection{Wehde, Mark. "Healthcare 4.0." IEEE Engineering Management Review 47, no. 3 (2019): 24-28.}
see \cite{wehde2019healthcare}
Healthcare is shifting from traditional hospital-centric care to a more virtual, distributed care that heavily leverages the latest technologies around artificial intelligence, deep learning, data analytics, genomics, home-based healthcare, robotics, and three-dimensional printing of tissue and implants. In the future, fundamental shifts will reshape the healthcare industry. Healthcare will be delivered as a seamless continuum of care, away from the clinic-centered point-of-care model and with a greater focus on prevention and early intervention.
\medskip
\subsubsection{Brannan, Laura. "Inference over Knowledge Representations Automatically Generated from Medical Texts with Applications in Healthcare Robotics." (2021).}
see \cite{brannan2021inference}
Many nations across the world, including the United States, face an impending shortage of trained medical professionals and personnel. The development of a robotic healthcare assistant would help alleviate this ongoing shortage in healthcare workers. For a robotic healthcare assistant to be useful, it must facilitate human-like interactions and maintain contextual understanding of its environment. In this work, we take steps toward endowing healthcare assistant robots with the ability to anticipate the equipment needs of healthcare providers without being explicitly asked. We utilize an automatically formulated knowledge representation from web-based knowledge bases paired with a traversal algorithm to achieve these objectives. Equipped with a proper knowledge base and rule-based traversal algorithm, our robot will have the ability to retrieve relevant related information given a medical condition or symptom.
\medskip
\subsubsection{Alla, Sujatha, and Pilar Pazos. "Healthcare Robotics: Key Factors that Impact Robot Adoption in Healthcare." In IIE Annual Conference. Proceedings, pp. 1121-1126. Institute of Industrial and Systems Engineers (IISE), 2019.}
see \cite{alla2019healthcare}
In the current dynamic business environment, healthcare organizations are focused on improving patient satisfaction, performance, and efficiency. The healthcare industry is considered a complex system that is highly reliant of new technologies to support clinical as well as business processes. Robotics is one of such technologies that is considered to have the potential to increase efficiency in a wide range of clinical services. Although the use of robotics in healthcare is at the early stages of adoption, some studies have shown the capacity of this technology to improve precision, accessibility through less invasive procedures, and reduction of human error during complex surgeries. Additionally, experts have anticipated an increase in the use of robots for tasks that require physical strength, and also tasks that are repetitive, unsafe or that might be contagious. Several studies reported cost savings as a result of using clinical robots. Although robotics shows promise to reduce healthcare cost, the current hospital systems are still not using them in large scale. The application of robotics in healthcare constitutes a problem of integration of new technology in an existing and highly regulated complex system. This paper will present a classification of robots based on their role in a healthcare facility and identify the key factors that affect the integration of robotics in healthcare by applying a prior technology integration framework.
\medskip
\subsubsection{Johanson, Deborah L., Ho Seok Ahn, and Elizabeth Broadbent. "Improving Interactions with Healthcare Robots: A Review of Communication Behaviours in Social and Healthcare Contexts." International Journal of Social Robotics (2020): 1-16.}
see \cite{johanson2020improving}
A growing shortfall exists between the number of older individuals who require healthcare support and the number of qualified healthcare professionals who can provide this. Robots offer the potential to provide healthcare support to patients both at home and in healthcare settings. However, in order for robots to be successfully implemented in these environments, they need to behave in ways that are appropriate and acceptable to human users. One way to identify appropriate social behaviours for healthcare robots is to model their behaviour on interactions between healthcare professionals and patients. This literature review aimed to inform healthcare robotics research by highlighting communication behaviours that are important within the context of healthcare. The review focussed on relevant research in human clinical interactions, followed by a review of similar factors in social robotics research. Three databases were searched for terms relating to healthcare professional communication behaviours associated with patient outcomes. The results identified key communication behaviours that can convey clinical empathy, including humour, self-disclosure, facial expressions, eye gaze, body posture, and gestures. A further search was conducted to identify research examining these key behaviours within the context of social and healthcare robotics. Research into these factors in human–robot interaction in healthcare is limited to date, and this review provides a useful guide for future research.
\medskip
\subsubsection{Radic, Marija, Agnes Vosen, and Birgit Graf. "Use of robotics in the German healthcare sector." In International Conference on Social Robotics, pp. 434-442. Springer, Cham, 2019.}
see \cite{radic2019use}
This page seeks to help scientists and the wider community better think that makes a robot pleasant by giving an overview of healthcare robot ideas, laboratory testing, and applications. When healthcare robots are utilized appropriately for their structure and functions, they show their capabilities. Companions for the elderly and others with cognitive impairments, robots in educational settings, and cognitive and behavioral enhancement technology are just a few examples. While the robots shown in films and literature remain futuristic, science fiction has inspired everybody to envision a world in which robotics help us in every aspect of our everyday lives. While we have a long way to go before robots are ubiquitous in our social spaces, significant advances in healthcare robotics technology, supported by social sciences, are bringing us closer.
\medskip
\subsubsection{Bartosiak, Marcin, Gianni Bonelli, Lorenzo Stefano Maffioli, Ugo Palaoro, Francesco Dentali, Giovanni Poggialini, Federica Pagliarin, Stefano Denicolai, and Pietro Previtali. "Advanced Robotics as a Support in Healthcare Organizational Response. A COVID-19 Pandemic case." In Healthcare Management Forum, p. 08404704211042467. Sage CA: Los Angeles, CA: SAGE Publications, 2021.}
see \cite{bartosiak2021advanced}
The use of robotics is becoming widespread in healthcare. However, little is known about how robotics can affect the relationship with patients in epidemic emergency response or how it impacts clinicians in their organization and work. As a hospital responding to the consequences of the COVID-19 pandemic “ASST dei Sette Laghi” (A7L) in Varese, Italy, had to react quickly to protect its staff from infection while coping with high budgetary pressure as prices of Personal Protection Equipment (PPE) increased rapidly. In response, it introduced six semi-autonomous robots to mediate interactions between staff and patients. Thanks to the cooperation of multiple departments, A7L implemented the solution in less than 10 weeks. It reduced risks to staff and outlay for PPE. However, the characteristics of the robots affected their perception by healthcare staff. This case study reviews critical issues faced by A7L in introducing these devices and recommendations for the path forward.
\medskip
\subsubsection{Khan, Arshia, and Yumna Anwar. "Robots in healthcare: A survey." In Science and Information Conference, pp. 280-292. Springer, Cham, 2019.}
see \cite{khan2019robots}
Advances in robotic technology is stimulating growth in new treatment mechanism by enhancing patient outcomes and helping reduce healthcare costs, while providing alternate care apparatus. Provision of care by assistive therapeutic robots has increasingly grown in the past decade. Although the healthcare industry has been lagging in the use of assistive robots; the use of assistive robots in the manufacturing industry has been a norm for a long time. The vulnerable population of patients with illnesses, cognition challenges, and disabilities are some of the causes for the delay in the use of assistive therapeutic robots in healthcare. In this paper we explore the various types of assistive robots and their use in the healthcare industry.
\medskip
\subsubsection{Ikeda, Yoko, and Michiko Iizuka. Global Rulemaking Strategy for Implementing Emerging Innovation: Case of Medical/Healthcare Robot, HAL by Cyberdyne (Japanese). Research Institute of Economy, Trade and Industry (RIETI), 2019.}
see \cite{ikeda2019global}
Robots have been put to use in many fields mostly for automation or areas where a great degree of precision is required. Robots can be of huge assistance in medical field too, as they can relieve the patient or the medical personnel from routine and mundane tasks, which may sometime be very crucial and may need to be performed with utmost care, accuracy and precision. The use of robotics is already there in healthcare, but it's not main-stream yet and it would take some time for that to become a reality. The main goal of this research paper would be to shed some light on the same. I have proposed some ideas on how robotics can be used in some niche in healthcare, and how it can be made easy to spread and implement on the ground level. Focus on the need of robotics in healthcare, along with their added advantages in the quality of healthcare and the savings in long time costs would be there. With this, the future of healthcare i.e. Telemedicine would become a reality and it would be a lot easier and cheaper for people to get access to quality healthcare, anywhere in the world with physically attending the hospital.
\medskip
\subsubsection{Sarker, Sujan, Lafifa Jamal, Syeda Faiza Ahmed, and Niloy Irtisam. "Robotics and artificial intelligence in healthcare during COVID-19 pandemic: A systematic review." Robotics and autonomous systems (2021): 103902.}
see \cite{sarker2021robotics}
The outbreak of the COVID-19 pandemic is unarguably the biggest catastrophe of the 21st century, probably the most significant global crisis after the second world war. The rapid spreading capability of the virus has compelled the world population to maintain strict preventive measures. The outrage of the virus has rampaged through the healthcare sector tremendously. This pandemic created a huge demand for necessary healthcare equipment, medicines along with the requirement for advanced robotics and artificial intelligence-based applications. The intelligent robot systems have great potential to render service in diagnosis, risk assessment, monitoring, telehealthcare, disinfection, and several other operations during this pandemic which has helped reduce the workload of the frontline workers remarkably. The long-awaited vaccine discovery of this deadly virus has also been greatly accelerated with AI-empowered tools. In addition to that, many robotics and Robotics Process Automation platforms have substantially facilitated the distribution of the vaccine in many arrangements pertaining to it. These forefront technologies have also aided in giving comfort to the people dealing with less addressed mental health complicacies. This paper investigates the use of robotics and artificial intelligence-based technologies and their applications in healthcare to fight against the COVID-19 pandemic. A systematic search following the Preferred Reporting Items for Systematic Reviews and Meta-Analyses (PRISMA) method is conducted to accumulate such literature, and an extensive review on 147 selected records is performed.
\medskip
\subsubsection{Alotaibi, Meshal, and Mohammad Yamin. "Role of robots in healthcare management." In 2019 6th International Conference on Computing for Sustainable Global Development (INDIACom), pp. 1311-1314. IEEE, 2019.}
see \cite{alotaibi2019role}
Robots in medical science and healthcare are playing significant roles by doing procedures and other tasks which earlier were being done by humans. Robots are assisting health patients, administrators, healthcare systems and entities in many ways to improve the health and well-being of the people. Robots have simplified many complex surgical procedures which earlier were very obscure. Although there is a lot of literature on various aspects of healthcare, but there still is a scope for having a detailed review of how robots can help various entities of the healthcare management. This paper intends to fill that gap and provides an exposition of the role of robots in healthcare and health procedures, especially the surgical procedures and patient assistance. Our review exhibits various aspects of hospital and healthcare management and patient administration.
\medskip
\subsubsection{Aggarwal, Shivangi, Deepa Gupta, and Sonia Saini. "A literature survey on robotics in healthcare." In 2019 4th International Conference on Information Systems and Computer Networks (ISCON), pp. 55-58. IEEE, 2019.}
see \cite{aggarwal2019literature}
In this paper we will become familiar with the utilization of various kinds of robots in therapeutic and social insurance part. How robots are being utilized in therapeutic division, their applications, favorable circumstances and impediments. Therapeutic robots much of the time utilized in Neurosurgery, Laparoscopic, Orthoprodic basically. Robots are being utilized in tele medical procedure too. Robots have been giving invasiness in determination. This paper will talk about them quickly. Robots are utilized for recovery purposes. The robots in the medicinal services industry largy affect the social insurance division by expanding labor and substantially more. Restorative mechanical autonomy has become quicker in recent years and growing always. Restorative mechanical technology is expanding in the field of Nanotechnology, Orthoprodics and different medical procedures. The presence of apply autonomy is changing the businesses over the globe. Mechanical autonomy is expanding step by step and covering functionalities, for example, nanotechnology, man-made reasoning to adjust the human services and different areas. The objective of the paper was to comprehend the job of mechanical technology in medicinal services area and its future difficulties and the advantages of the apply autonomy in human services industry. The utilization of robots in wellbeing area gives an energizing just as a pivotal chance to help an enormous number of individuals.
\medskip
\subsubsection{Puaschunder, Julia M. "The Potential for Artificial Intelligence in Healthcare." Available at SSRN 3525037 (2020).}
see \cite{puaschunder2020potential}
Artificial Intelligence (AI), Robotics and Big Data revolutionized the world and opened unprecedented opportunities and potentials in healthcare. No other scientific field grants as much hope in the determination of life and death and fastest-pace innovation potential with economically highest profit margin prospects as does medical care.
An expert survey conducted in November 2019 identified big data-driven knowledge generation and tailored personal medical care but also efficiency, precision and better quality work as most beneficial advancements of AI, robotics and big data in the healthcare sector. Decentralized preventive healthcare and telemedicine open access to personalized, affordable healthcare.
Technical advancements and big data insights – at the same time – increase costs for a whole-roundedly healthy lifestyle. Particularly in Western Europe, the currently tipping demographic pyramid coupled with obstacles to integrate migrants long-term to rejuvenate the population and boost economic output impose challenges for policy makers and insurance practitioners alike. Studies in the US found that 70\% of all health related costs are accrued during the last few weeks of peoples’ living. In Austria, healthcare cost are expected to double in this decade. This predicament of rising costs of an aging Western world population, raises questions such as – Should we decrease the access to the best quality medical care of Austrian in order to maintain the pursuit of a mandate of medical care for all – or should we allow a different-tiered class-based medical system, in which money determines who can afford excellent healthcare? There must be a better solution for a country like Austria in the heart of the European continent that may stem from a Moving Forward thinking community as we all represent together today.
\medskip
\subsubsection{Mohanty, Kajol, S. Subiksha, S. Kirthika, B. H. Sujal, Sumathi Sokkanarayanan, Panjavarnam Bose, and Mithileysh Sathiyanarayanan. "Opportunities of Adopting AI-Powered Robotics to Tackle COVID-19." In 2021 International Conference on COMmunication Systems \& NETworkS (COMSNETS), pp. 703-708. IEEE, 2021.}
see \cite{mohanty2021opportunities}
The coronavirus disease (COVID-19) pandemic has made a dire requirement for traditional and disruptive technologies to react to the flare-up across health and wellbeing areas, and technologies such as AI and robotics have been recognized as promising ways to tackle the current challenges. The COVID-19 pandemic has exhibited the solid capability of different advanced technologies that have been tried during the emergency. However, acceptability and adoptability of the latest technologies may face serious challenges due to potential conflicts with users' cultural, moral, and religious backgrounds. This paper discusses the current opportunities and challenges with respect to artificial intelligence (AI) powered robots to battle COVID-19. To diminish the danger of contamination and infection, the opportunities must be utilized during this pandemic for a better future. More deliberate measures ought to be executed to guarantee that future robotic health initiatives will have a greater impact on the pandemic and meet the most key needs to facilitate the life of individuals who are at the forefront of the crisis.
\medskip
\subsubsection{Fosch Villaronga, Eduard, and Hadassah Drukarch. "On healthcare robots." On healthcare robots (2021).}
see \cite{fosch2021healthcare}
The rise of healthcare robotics
Robotics have increased productivity and resource efficiency in the industrial and retail sectors, and now there is an emerging interest in realizing a comparable transformation in healthcare. Robotics and artificial intelligence (AI) are some of the latest promising technologies expected to increase the quality and safety of care while simultaneously restraining expenditure and, recently, reducing human contact too. Healthcare robots are likely to be deployed at an unprecedented rate due to their reduced cost and increasing capabilities such as carrying out medical interventions, supporting biomedical research and clinical practice, conducting therapy with children, or keeping the elderly company.
The lack of healthcare robot policy
Although healthcare is a remarkably sensitive domain of application, and systems that exert direct control over the world can cause harm in a way that humans cannot necessarily correct or oversee, it is still unclear whether and how healthcare robots are currently regulated or should be regulated. Existing regulations are primarily unprepared to provide guidance for such a rapidly evolving field and accommodate devices that rely on machine learning and AI. Moreover, the field of healthcare robotics is very rich and extensive, butit is still very much scattered and unclear in terms of definitions, medical and technical classifications, product characteristics, purpose, and intended use. As a result, these devices often navigate between the medical device regulation or other non-medical norms, such as the ISO personal care standard. Before regulating the field of healthcare robots, it is therefore essential to map the major state-of-the-art developments in healthcare robotics, their capabilities and applications, and the challenges we face as a result of their integration within the healthcare environment.
Our contribution to the policy making debate on healthcare robots and AI technologies
This contribution fills in this gap and lack of clarity currently experienced within healthcare robotics and its governance by providing a structured overview of and further elaboration on the main categories now established, their intended purpose, use, and main characteristics. We explicitly focus on surgical, assistive, and service robots to rightfully match the definition of healthcare as the organized provision of medical care to individuals, including efforts to maintain, treat, or restore physical, mental, or emotional well-being. We complement these findings with policy recommendations to help policymakers unravel an optimal regulatory framing for healthcare robot technologies.
\medskip
\subsubsection{Su, Yun-Hsuan, Adnan Munawar, Anton Deguet, Andrew Lewis, Kyle Lindgren, Yangming Li, Russell H. Taylor, Gregory S. Fischer, Blake Hannaford, and Peter Kazanzides. "Collaborative Robotics Toolkit (CRTK): Open Software Framework for Surgical Robotics Research." In 2020 Fourth IEEE International Conference on Robotic Computing (IRC), pp. 48-55. IEEE, 2020.}
see \cite{su2020collaborative}
Robot-assisted minimally invasive surgery has made a substantial impact in operating rooms over the past few decades with their high dexterity, small tool size, and impact on adoption of minimally invasive techniques. In recent years, intelligence and different levels of surgical robot autonomy have emerged thanks to the medical robotics endeavors at numerous academic institutions and leading surgical robot companies. To accelerate interaction within the research community and prevent repeated development, we propose the Collaborative Robotics Toolkit (CRTK), a common API for the RAVEN-II and da Vinci Research Kit (dVRK) - two open surgical robot platforms installed at more than 40 institutions worldwide. CRTK has broadened to include other robots and devices, including simulated robotic systems and industrial robots. This common API is a community software infrastructure for research and education in cutting edge human-robot collaborative areas such as semi-autonomous teleoperation and medical robotics. This paper presents the concepts, design details and the integration of CRTK with physical robot systems and simulation platforms.
\medskip
\subsubsection{Javaid, Mohd, Abid Haleem, Abhishek Vaish, Raju Vaishya, and Karthikeyan P. Iyengar. "Robotics applications in COVID-19: A review." Journal of Industrial Integration and Management 5, no. 04 (2020): 441-451.}
see \cite{javaid2020robotics}
The COVID-19 outbreak has resulted in the manufacturing and service sectors being badly hit globally. Since there are no vaccines or any proven medical treatment available, there is an urgent need to take necessary steps to prevent the spread of this virus. As the virus spreads with human-to-human interaction, lockdown has been declared in many countries, and the public is advised to observe social distancing strictly. Robots can undertake human-like activities and can be gainfully programmed to replace some of the human interactions. Through this paper, we identify and propose the introduction of robots to take up this challenge in the fight against the COVID-19 pandemic. We did a comprehensive review of the literature to identify robots’ possible applications in the management of epidemics and pandemics of this nature. We have reviewed the available literature through the search engines of PubMed, SCOPUS, Google Scholar, and Research Gate. A comprehensive review of the literature identified different types of robots being used in the medical field. We could find several vital applications of robots in the management of the COVID-19 pandemic. No doubt technology comes with a cost. In this paper, we identified how different types of robots are used gainfully to deliver medicine, food, and other essential items to COVID-19 patients who are under quarantine. Therefore, there is extensive scope for customising robots to undertake hazardous and repetitive jobs with precision and reliability.
\medskip
\subsubsection{Guntur, Sitaramanjaneya Reddy, Rajani Reddy Gorrepati, and Vijaya R. Dirisala. "Robotics in healthcare: an internet of medical robotic things (IoMRT) perspective." In Machine learning in bio-signal analysis and diagnostic imaging, pp. 293-318. Academic Press, 2019.}
see \cite{guntur2019robotics}
Robotics is one of the most advanced and emerging technologies in the field of medicine. Electronic sensors incorporated with combination of control into mechanical systems greatly enhance the performance and flexibility of systems. The robotic technology used for movement of arms were not accurate and unable to send the exact sensory feedback, exact movement, and positioning. With the advances in hardware, software, and control programming systems, extensive automation is being utilized to operate with more degree of freedom than humans under an extensive array of conditions. Currently, robotic innovations are introduced in numerous areas that specifically influence the understanding and consideration of patient care. Robotics technology in medicine is the prime focus of the healthcare services in ICUs, general rooms, and surgery room which reduces risks for patients, doctors and it is also utilized in laboratories to collect the samples followed by transportation of samples if required, analyzing, and preserving them for long-term storage. The healthcare services provided by robotics become complex and critical pertaining to sharing of information, data communication and distribution of the sensors data. The Internet of Medical Robotics Things (IoMRT) approaches incorporated robots as a “thing” and buildup connections with new communication such as Li-Fi technology and information technology on web. This chapter demonstrates the overview of robotics in performing surgeries, other healthcare services and it also emphasizes on long-term benefits for human beings using robotics with IoMT-based new communication (Li-Fi) technology. Also the limitations and future challenges associated with this technology are described in detail.
\medskip
\subsubsection{Pierce, Robin, and Eduard Fosch Villaronga. "Medical robots and the right to health care: A progressive realisation." (2020).}
see \cite{pierce2020medical}
Robotic technologies have shown to have clear potential for providing innovation in treatments and treatment modalities for various diseases and disorders that cover unmet needs and are cost-efficient. However, the emergence of technology that promises to improve health outcomes raises the question regarding the extent to which it should be incorporated, how, made available to whom, and on what basis. Since countries usually have limited resources to favour access to state-of-the-art technologies and develop strategies to realize the right to health progressively, in this article, we investigate whether the right to health, particularly the core obligations specified under this right, helps implement medical robots.
\medskip
\subsubsection{Amir Hossein, Molkizadeh, Rahim Baghban, Somayeh Rahmanian, Saeed Bayyenat, and Mohammad Ali Kiani. "Telemedicine: An Essential Requirement for the Health Care Providers, with Emphasis on Legal Aspects." International Journal of Pediatrics 8, no. 9 (2020): 12131-12142.}
see \cite{amir2020telemedicine}
Telemedicine is the use of telecommunication and information technologies in order to provide clinical health care at a distance. These technologies allow communications between patient and medical staff with convenience as well as the transmission of medical, imaging and health informatics data from one site to another. It is also used to save lives in critical care and emergency situations. Although telemedicine systems have many advantages, including the distribution of high quality medical services to remote areas, failure to comply with infrastructure will reduce the efficiency and quality of their services. Issues such as building the infrastructure of the medical information industry, including the legal infrastructure, and thus providing a suitable platform for the legal and ethical issues of Telemedicine, as well as obtaining the necessary permits and requirements, will play an important role in the successful implementation of a Telemedicine system. The purpose of this study was to become more familiar with the field of Telemedicine and its services, as well as to review some legal issues in the field of e-health. Telemedicine is not able to solve the problems of the health and social systems, but the problems of the health and social systems cannot be solved without Telemedicine.
%\medskip
%\subsubsection{}
%see \cite{}
\bibliographystyle{IEEEtran}
\bibliography{lit}
\end{document}
|
/-
Copyright (c) 2018 Chris Hughes. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Chris Hughes
-/
import data.equiv.mul_add
import tactic.norm_num
import data.part
/-!
# Natural numbers with infinity
The natural numbers and an extra `top` element `⊤`.
## Main definitions
The following instances are defined:
* `ordered_add_comm_monoid enat`
* `canonically_ordered_add_monoid enat`
There is no additive analogue of `monoid_with_zero`; if there were then `enat` could
be an `add_monoid_with_top`.
* `to_with_top` : the map from `enat` to `with_top ℕ`, with theorems that it plays well
with `+` and `≤`.
* `with_top_add_equiv : enat ≃+ with_top ℕ`
* `with_top_order_iso : enat ≃o with_top ℕ`
## Implementation details
`enat` is defined to be `part ℕ`.
`+` and `≤` are defined on `enat`, but there is an issue with `*` because it's not
clear what `0 * ⊤` should be. `mul` is hence left undefined. Similarly `⊤ - ⊤` is ambiguous
so there is no `-` defined on `enat`.
Before the `open_locale classical` line, various proofs are made with decidability assumptions.
This can cause issues -- see for example the non-simp lemma `to_with_top_zero` proved by `rfl`,
followed by `@[simp] lemma to_with_top_zero'` whose proof uses `convert`.
## Tags
enat, with_top ℕ
-/
open part (hiding some)
/-- Type of natural numbers with infinity (`⊤`) -/
def enat : Type := part ℕ
namespace enat
/-- The computable embedding `ℕ → enat`.
This coincides with the coercion `coe : ℕ → enat`, see `enat.some_eq_coe`.
However, `coe` is noncomputable so `some` is preferable when computability is a concern. -/
def some : ℕ → enat := part.some
instance : has_zero enat := ⟨some 0⟩
instance : inhabited enat := ⟨0⟩
instance : has_one enat := ⟨some 1⟩
instance : has_add enat := ⟨λ x y, ⟨x.dom ∧ y.dom, λ h, get x h.1 + get y h.2⟩⟩
instance (n : ℕ) : decidable (some n).dom := is_true trivial
lemma some_eq_coe (n : ℕ) : some n = n :=
begin
induction n with n ih, { refl },
apply part.ext',
{ show true ↔ ((n : enat).dom ∧ true), rw [← ih, and_true], exact iff.rfl },
{ intros h H, show n.succ = (n : enat).get H.1 + 1,
rw [nat.cast_succ] at H, revert H, simp only [← ih], intro, refl },
end
@[simp] lemma coe_inj {x y : ℕ} : (x : enat) = y ↔ x = y :=
by simpa only [← some_eq_coe] using part.some_inj
@[simp] lemma dom_some (x : ℕ) : (some x).dom := trivial
@[simp] lemma dom_coe (x : ℕ) : (x : enat).dom := by rw [← some_eq_coe]; trivial
instance : add_comm_monoid enat :=
{ add := (+),
zero := (0),
add_comm := λ x y, part.ext' and.comm (λ _ _, add_comm _ _),
zero_add := λ x, part.ext' (true_and _) (λ _ _, zero_add _),
add_zero := λ x, part.ext' (and_true _) (λ _ _, add_zero _),
add_assoc := λ x y z, part.ext' and.assoc (λ _ _, add_assoc _ _ _) }
instance : has_le enat := ⟨λ x y, ∃ h : y.dom → x.dom, ∀ hy : y.dom, x.get (h hy) ≤ y.get hy⟩
instance : has_top enat := ⟨none⟩
instance : has_bot enat := ⟨0⟩
instance : has_sup enat := ⟨λ x y, ⟨x.dom ∧ y.dom, λ h, x.get h.1 ⊔ y.get h.2⟩⟩
lemma le_def (x y : enat) : x ≤ y ↔ ∃ h : y.dom → x.dom, ∀ hy : y.dom, x.get (h hy) ≤ y.get hy :=
iff.rfl
@[elab_as_eliminator] protected lemma cases_on' {P : enat → Prop} :
∀ a : enat, P ⊤ → (∀ n : ℕ, P (some n)) → P a :=
part.induction_on
@[elab_as_eliminator] protected lemma cases_on {P : enat → Prop} :
∀ a : enat, P ⊤ → (∀ n : ℕ, P n) → P a :=
by { simp only [← some_eq_coe], exact enat.cases_on' }
@[simp] lemma top_add (x : enat) : ⊤ + x = ⊤ :=
part.ext' (false_and _) (λ h, h.left.elim)
@[simp] lemma add_top (x : enat) : x + ⊤ = ⊤ :=
by rw [add_comm, top_add]
@[simp] lemma coe_get {x : enat} (h : x.dom) : (x.get h : enat) = x :=
by { rw [← some_eq_coe], exact part.ext' (iff_of_true trivial h) (λ _ _, rfl) }
@[simp, norm_cast] lemma get_coe' (x : ℕ) (h : (x : enat).dom) : get (x : enat) h = x :=
by rw [← coe_inj, coe_get]
lemma get_coe {x : ℕ} : get (x : enat) (dom_coe x) = x := get_coe' _ _
lemma coe_add_get {x : ℕ} {y : enat} (h : ((x : enat) + y).dom) :
get ((x : enat) + y) h = x + get y h.2 :=
by { simp only [← some_eq_coe] at h ⊢, refl }
@[simp] lemma get_add {x y : enat} (h : (x + y).dom) :
get (x + y) h = x.get h.1 + y.get h.2 := rfl
@[simp] lemma get_zero (h : (0 : enat).dom) : (0 : enat).get h = 0 := rfl
@[simp] lemma get_one (h : (1 : enat).dom) : (1 : enat).get h = 1 := rfl
lemma get_eq_iff_eq_some {a : enat} {ha : a.dom} {b : ℕ} :
a.get ha = b ↔ a = some b := get_eq_iff_eq_some
lemma get_eq_iff_eq_coe {a : enat} {ha : a.dom} {b : ℕ} :
a.get ha = b ↔ a = b := by rw [get_eq_iff_eq_some, some_eq_coe]
lemma dom_of_le_of_dom {x y : enat} : x ≤ y → y.dom → x.dom := λ ⟨h, _⟩, h
lemma dom_of_le_some {x : enat} {y : ℕ} (h : x ≤ some y) : x.dom := dom_of_le_of_dom h trivial
lemma dom_of_le_coe {x : enat} {y : ℕ} (h : x ≤ y) : x.dom :=
by { rw [← some_eq_coe] at h, exact dom_of_le_some h }
instance decidable_le (x y : enat) [decidable x.dom] [decidable y.dom] : decidable (x ≤ y) :=
if hx : x.dom
then decidable_of_decidable_of_iff
(show decidable (∀ (hy : (y : enat).dom), x.get hx ≤ (y : enat).get hy),
from forall_prop_decidable _) $
by { dsimp [(≤)], simp only [hx, exists_prop_of_true, forall_true_iff] }
else if hy : y.dom
then is_false $ λ h, hx $ dom_of_le_of_dom h hy
else is_true ⟨λ h, (hy h).elim, λ h, (hy h).elim⟩
/-- The coercion `ℕ → enat` preserves `0` and addition. -/
def coe_hom : ℕ →+ enat := ⟨coe, nat.cast_zero, nat.cast_add⟩
@[simp] lemma coe_coe_hom : ⇑coe_hom = coe := rfl
instance : partial_order enat :=
{ le := (≤),
le_refl := λ x, ⟨id, λ _, le_refl _⟩,
le_trans := λ x y z ⟨hxy₁, hxy₂⟩ ⟨hyz₁, hyz₂⟩,
⟨hxy₁ ∘ hyz₁, λ _, le_trans (hxy₂ _) (hyz₂ _)⟩,
le_antisymm := λ x y ⟨hxy₁, hxy₂⟩ ⟨hyx₁, hyx₂⟩, part.ext' ⟨hyx₁, hxy₁⟩
(λ _ _, le_antisymm (hxy₂ _) (hyx₂ _)) }
lemma lt_def (x y : enat) : x < y ↔ ∃ (hx : x.dom), ∀ (hy : y.dom), x.get hx < y.get hy :=
begin
rw [lt_iff_le_not_le, le_def, le_def, not_exists],
split,
{ rintro ⟨⟨hyx, H⟩, h⟩,
by_cases hx : x.dom,
{ use hx, intro hy,
specialize H hy, specialize h (λ _, hy),
rw not_forall at h, cases h with hx' h,
rw not_le at h, exact h },
{ specialize h (λ hx', (hx hx').elim),
rw not_forall at h, cases h with hx' h,
exact (hx hx').elim } },
{ rintro ⟨hx, H⟩, exact ⟨⟨λ _, hx, λ hy, (H hy).le⟩, λ hxy h, not_lt_of_le (h _) (H _)⟩ }
end
@[simp, norm_cast] lemma coe_le_coe {x y : ℕ} : (x : enat) ≤ y ↔ x ≤ y :=
by { rw [← some_eq_coe, ← some_eq_coe], exact ⟨λ ⟨_, h⟩, h trivial, λ h, ⟨λ _, trivial, λ _, h⟩⟩ }
@[simp, norm_cast] lemma coe_lt_coe {x y : ℕ} : (x : enat) < y ↔ x < y :=
by rw [lt_iff_le_not_le, lt_iff_le_not_le, coe_le_coe, coe_le_coe]
@[simp] lemma get_le_get {x y : enat} {hx : x.dom} {hy : y.dom} :
x.get hx ≤ y.get hy ↔ x ≤ y :=
by conv { to_lhs, rw [← coe_le_coe, coe_get, coe_get]}
lemma le_coe_iff (x : enat) (n : ℕ) : x ≤ n ↔ ∃ h : x.dom, x.get h ≤ n :=
begin
rw [← some_eq_coe],
show (∃ (h : true → x.dom), _) ↔ ∃ h : x.dom, x.get h ≤ n,
simp only [forall_prop_of_true, some_eq_coe, dom_coe, get_coe'],
split; rintro ⟨_, _⟩; refine ⟨_, _⟩; intros; try { assumption }
end
lemma lt_coe_iff (x : enat) (n : ℕ) : x < n ↔ ∃ h : x.dom, x.get h < n :=
by simp only [lt_def, forall_prop_of_true, get_coe', dom_coe]
lemma coe_le_iff (n : ℕ) (x : enat) : (n : enat) ≤ x ↔ ∀ h : x.dom, n ≤ x.get h :=
begin
rw [← some_eq_coe],
simp only [le_def, exists_prop_of_true, dom_some, forall_true_iff],
refl,
end
lemma coe_lt_iff (n : ℕ) (x : enat) : (n : enat) < x ↔ ∀ h : x.dom, n < x.get h :=
begin
rw [← some_eq_coe],
simp only [lt_def, exists_prop_of_true, dom_some, forall_true_iff],
refl,
end
protected lemma zero_lt_one : (0 : enat) < 1 :=
by { norm_cast, norm_num }
instance semilattice_sup : semilattice_sup enat :=
{ sup := (⊔),
le_sup_left := λ _ _, ⟨and.left, λ _, le_sup_left⟩,
le_sup_right := λ _ _, ⟨and.right, λ _, le_sup_right⟩,
sup_le := λ x y z ⟨hx₁, hx₂⟩ ⟨hy₁, hy₂⟩, ⟨λ hz, ⟨hx₁ hz, hy₁ hz⟩,
λ _, sup_le (hx₂ _) (hy₂ _)⟩,
..enat.partial_order }
instance order_bot : order_bot enat :=
{ bot := (⊥),
bot_le := λ _, ⟨λ _, trivial, λ _, nat.zero_le _⟩ }
instance order_top : order_top enat :=
{ top := (⊤),
le_top := λ x, ⟨λ h, false.elim h, λ hy, false.elim hy⟩ }
lemma dom_of_lt {x y : enat} : x < y → x.dom :=
enat.cases_on x not_top_lt $ λ _ _, dom_coe _
lemma top_eq_none : (⊤ : enat) = none := rfl
@[simp] lemma coe_lt_top (x : ℕ) : (x : enat) < ⊤ :=
ne.lt_top (λ h, absurd (congr_arg dom h) $ by simpa only [dom_coe] using true_ne_false)
@[simp] lemma coe_ne_top (x : ℕ) : (x : enat) ≠ ⊤ := ne_of_lt (coe_lt_top x)
lemma ne_top_iff {x : enat} : x ≠ ⊤ ↔ ∃ (n : ℕ), x = n :=
by simpa only [← some_eq_coe] using part.ne_none_iff
lemma ne_top_iff_dom {x : enat} : x ≠ ⊤ ↔ x.dom :=
by classical; exact not_iff_comm.1 part.eq_none_iff'.symm
lemma ne_top_of_lt {x y : enat} (h : x < y) : x ≠ ⊤ :=
ne_of_lt $ lt_of_lt_of_le h le_top
lemma eq_top_iff_forall_lt (x : enat) : x = ⊤ ↔ ∀ n : ℕ, (n : enat) < x :=
begin
split,
{ rintro rfl n, exact coe_lt_top _ },
{ contrapose!, rw ne_top_iff, rintro ⟨n, rfl⟩, exact ⟨n, irrefl _⟩ }
end
lemma eq_top_iff_forall_le (x : enat) : x = ⊤ ↔ ∀ n : ℕ, (n : enat) ≤ x :=
(eq_top_iff_forall_lt x).trans
⟨λ h n, (h n).le, λ h n, lt_of_lt_of_le (coe_lt_coe.mpr n.lt_succ_self) (h (n + 1))⟩
lemma pos_iff_one_le {x : enat} : 0 < x ↔ 1 ≤ x :=
enat.cases_on x (by simp only [iff_true, le_top, coe_lt_top, ← @nat.cast_zero enat]) $
λ n, by { rw [← nat.cast_zero, ← nat.cast_one, enat.coe_lt_coe, enat.coe_le_coe], refl }
noncomputable instance : linear_order enat :=
{ le_total := λ x y, enat.cases_on x
(or.inr le_top) (enat.cases_on y (λ _, or.inl le_top)
(λ x y, (le_total x y).elim (or.inr ∘ coe_le_coe.2)
(or.inl ∘ coe_le_coe.2))),
decidable_le := classical.dec_rel _,
..enat.partial_order }
instance : bounded_order enat :=
{ ..enat.order_top,
..enat.order_bot }
noncomputable instance : lattice enat :=
{ inf := min,
inf_le_left := min_le_left,
inf_le_right := min_le_right,
le_inf := λ _ _ _, le_min,
..enat.semilattice_sup }
lemma sup_eq_max {a b : enat} : a ⊔ b = max a b :=
le_antisymm (sup_le (le_max_left _ _) (le_max_right _ _))
(max_le le_sup_left le_sup_right)
lemma inf_eq_min {a b : enat} : a ⊓ b = min a b := rfl
instance : ordered_add_comm_monoid enat :=
{ add_le_add_left := λ a b ⟨h₁, h₂⟩ c,
enat.cases_on c (by simp)
(λ c, ⟨λ h, and.intro (dom_coe _) (h₁ h.2),
λ h, by simpa only [coe_add_get] using add_le_add_left (h₂ _) c⟩),
..enat.linear_order,
..enat.add_comm_monoid }
instance : canonically_ordered_add_monoid enat :=
{ le_iff_exists_add := λ a b, enat.cases_on b
(iff_of_true le_top ⟨⊤, (add_top _).symm⟩)
(λ b, enat.cases_on a
(iff_of_false (not_le_of_gt (coe_lt_top _))
(not_exists.2 (λ x, ne_of_lt (by rw [top_add]; exact coe_lt_top _))))
(λ a, ⟨λ h, ⟨(b - a : ℕ),
by rw [← nat.cast_add, coe_inj, add_comm, tsub_add_cancel_of_le (coe_le_coe.1 h)]⟩,
(λ ⟨c, hc⟩, enat.cases_on c
(λ hc, hc.symm ▸ show (a : enat) ≤ a + ⊤, by rw [add_top]; exact le_top)
(λ c (hc : (b : enat) = a + c),
coe_le_coe.2 (by rw [← nat.cast_add, coe_inj] at hc;
rw hc; exact nat.le_add_right _ _)) hc)⟩)),
..enat.semilattice_sup,
..enat.order_bot,
..enat.ordered_add_comm_monoid }
protected lemma add_lt_add_right {x y z : enat} (h : x < y) (hz : z ≠ ⊤) : x + z < y + z :=
begin
rcases ne_top_iff.mp (ne_top_of_lt h) with ⟨m, rfl⟩,
rcases ne_top_iff.mp hz with ⟨k, rfl⟩,
induction y using enat.cases_on with n,
{ rw [top_add], apply_mod_cast coe_lt_top },
norm_cast at h, apply_mod_cast add_lt_add_right h
end
protected lemma add_lt_add_iff_right {x y z : enat} (hz : z ≠ ⊤) : x + z < y + z ↔ x < y :=
⟨lt_of_add_lt_add_right, λ h, enat.add_lt_add_right h hz⟩
protected lemma add_lt_add_iff_left {x y z : enat} (hz : z ≠ ⊤) : z + x < z + y ↔ x < y :=
by rw [add_comm z, add_comm z, enat.add_lt_add_iff_right hz]
protected lemma lt_add_iff_pos_right {x y : enat} (hx : x ≠ ⊤) : x < x + y ↔ 0 < y :=
by { conv_rhs { rw [← enat.add_lt_add_iff_left hx] }, rw [add_zero] }
lemma lt_add_one {x : enat} (hx : x ≠ ⊤) : x < x + 1 :=
by { rw [enat.lt_add_iff_pos_right hx], norm_cast, norm_num }
lemma le_of_lt_add_one {x y : enat} (h : x < y + 1) : x ≤ y :=
begin
induction y using enat.cases_on with n, apply le_top,
rcases ne_top_iff.mp (ne_top_of_lt h) with ⟨m, rfl⟩,
apply_mod_cast nat.le_of_lt_succ, apply_mod_cast h
end
lemma add_one_le_of_lt {x y : enat} (h : x < y) : x + 1 ≤ y :=
begin
induction y using enat.cases_on with n, apply le_top,
rcases ne_top_iff.mp (ne_top_of_lt h) with ⟨m, rfl⟩,
apply_mod_cast nat.succ_le_of_lt, apply_mod_cast h
end
lemma add_one_le_iff_lt {x y : enat} (hx : x ≠ ⊤) : x + 1 ≤ y ↔ x < y :=
begin
split, swap, exact add_one_le_of_lt,
intro h, rcases ne_top_iff.mp hx with ⟨m, rfl⟩,
induction y using enat.cases_on with n, apply coe_lt_top,
apply_mod_cast nat.lt_of_succ_le, apply_mod_cast h
end
lemma lt_add_one_iff_lt {x y : enat} (hx : x ≠ ⊤) : x < y + 1 ↔ x ≤ y :=
begin
split, exact le_of_lt_add_one,
intro h, rcases ne_top_iff.mp hx with ⟨m, rfl⟩,
induction y using enat.cases_on with n, { rw [top_add], apply coe_lt_top },
apply_mod_cast nat.lt_succ_of_le, apply_mod_cast h
end
lemma add_eq_top_iff {a b : enat} : a + b = ⊤ ↔ a = ⊤ ∨ b = ⊤ :=
by apply enat.cases_on a; apply enat.cases_on b;
simp; simp only [(nat.cast_add _ _).symm, enat.coe_ne_top]; simp
protected lemma add_right_cancel_iff {a b c : enat} (hc : c ≠ ⊤) : a + c = b + c ↔ a = b :=
begin
rcases ne_top_iff.1 hc with ⟨c, rfl⟩,
apply enat.cases_on a; apply enat.cases_on b;
simp [add_eq_top_iff, coe_ne_top, @eq_comm _ (⊤ : enat)];
simp only [(nat.cast_add _ _).symm, add_left_cancel_iff, enat.coe_inj, add_comm];
tauto
end
protected lemma add_left_cancel_iff {a b c : enat} (ha : a ≠ ⊤) : a + b = a + c ↔ b = c :=
by rw [add_comm a, add_comm a, enat.add_right_cancel_iff ha]
section with_top
/-- Computably converts an `enat` to a `with_top ℕ`. -/
def to_with_top (x : enat) [decidable x.dom] : with_top ℕ := x.to_option
lemma to_with_top_top : to_with_top ⊤ = ⊤ := rfl
@[simp] lemma to_with_top_top' {h : decidable (⊤ : enat).dom} : to_with_top ⊤ = ⊤ :=
by convert to_with_top_top
lemma to_with_top_zero : to_with_top 0 = 0 := rfl
@[simp] lemma to_with_top_zero' {h : decidable (0 : enat).dom} : to_with_top 0 = 0 :=
by convert to_with_top_zero
lemma to_with_top_some (n : ℕ) : to_with_top (some n) = n := rfl
lemma to_with_top_coe (n : ℕ) {_ : decidable (n : enat).dom} : to_with_top n = n :=
by { simp only [← some_eq_coe, ← to_with_top_some], congr }
@[simp] lemma to_with_top_coe' (n : ℕ) {h : decidable (n : enat).dom} :
to_with_top (n : enat) = n :=
by convert to_with_top_coe n
@[simp] lemma to_with_top_le {x y : enat} : Π [decidable x.dom]
[decidable y.dom], by exactI to_with_top x ≤ to_with_top y ↔ x ≤ y :=
enat.cases_on y (by simp) (enat.cases_on x (by simp) (by intros; simp))
@[simp] lemma to_with_top_lt {x y : enat} [decidable x.dom] [decidable y.dom] :
to_with_top x < to_with_top y ↔ x < y :=
lt_iff_lt_of_le_iff_le to_with_top_le
end with_top
section with_top_equiv
open_locale classical
@[simp] lemma to_with_top_add {x y : enat} : to_with_top (x + y) = to_with_top x + to_with_top y :=
begin
apply enat.cases_on y; apply enat.cases_on x,
{ simp },
{ simp },
{ simp },
-- not sure why `simp` can't do this
{ intros, rw [to_with_top_coe', to_with_top_coe'], norm_cast, exact to_with_top_coe' _ }
end
/-- `equiv` between `enat` and `with_top ℕ` (for the order isomorphism see `with_top_order_iso`). -/
noncomputable def with_top_equiv : enat ≃ with_top ℕ :=
{ to_fun := λ x, to_with_top x,
inv_fun := λ x, match x with (option.some n) := coe n | none := ⊤ end,
left_inv := λ x, by apply enat.cases_on x; intros; simp; refl,
right_inv := λ x, by cases x; simp [with_top_equiv._match_1]; refl }
@[simp] lemma with_top_equiv_top : with_top_equiv ⊤ = ⊤ :=
to_with_top_top'
@[simp] lemma with_top_equiv_coe (n : nat) : with_top_equiv n = n :=
to_with_top_coe' _
@[simp] lemma with_top_equiv_zero : with_top_equiv 0 = 0 :=
by simpa only [nat.cast_zero] using with_top_equiv_coe 0
@[simp] lemma with_top_equiv_le {x y : enat} : with_top_equiv x ≤ with_top_equiv y ↔ x ≤ y :=
to_with_top_le
@[simp] lemma with_top_equiv_lt {x y : enat} : with_top_equiv x < with_top_equiv y ↔ x < y :=
to_with_top_lt
/-- `to_with_top` induces an order isomorphism between `enat` and `with_top ℕ`. -/
noncomputable def with_top_order_iso : enat ≃o with_top ℕ :=
{ map_rel_iff' := λ _ _, with_top_equiv_le,
.. with_top_equiv}
@[simp] lemma with_top_equiv_symm_top : with_top_equiv.symm ⊤ = ⊤ :=
rfl
@[simp] lemma with_top_equiv_symm_coe (n : nat) : with_top_equiv.symm n = n :=
rfl
@[simp] lemma with_top_equiv_symm_zero : with_top_equiv.symm 0 = 0 :=
rfl
@[simp] lemma with_top_equiv_symm_le {x y : with_top ℕ} :
with_top_equiv.symm x ≤ with_top_equiv.symm y ↔ x ≤ y :=
by rw ← with_top_equiv_le; simp
@[simp] lemma with_top_equiv_symm_lt {x y : with_top ℕ} :
with_top_equiv.symm x < with_top_equiv.symm y ↔ x < y :=
by rw ← with_top_equiv_lt; simp
/-- `to_with_top` induces an additive monoid isomorphism between `enat` and `with_top ℕ`. -/
noncomputable def with_top_add_equiv : enat ≃+ with_top ℕ :=
{ map_add' := λ x y, by simp only [with_top_equiv]; convert to_with_top_add,
..with_top_equiv}
end with_top_equiv
lemma lt_wf : well_founded ((<) : enat → enat → Prop) :=
show well_founded (λ a b : enat, a < b),
by haveI := classical.dec; simp only [to_with_top_lt.symm] {eta := ff};
exact inv_image.wf _ (with_top.well_founded_lt nat.lt_wf)
instance : has_well_founded enat := ⟨(<), lt_wf⟩
section find
variables (P : ℕ → Prop) [decidable_pred P]
/-- The smallest `enat` satisfying a (decidable) predicate `P : ℕ → Prop` -/
def find : enat := ⟨∃ n, P n, nat.find⟩
@[simp] lemma find_get (h : (find P).dom) : (find P).get h = nat.find h := rfl
lemma find_dom (h : ∃ n, P n) : (find P).dom := h
lemma lt_find (n : ℕ) (h : ∀ m ≤ n, ¬P m) : (n : enat) < find P :=
begin
rw coe_lt_iff, intro h', rw find_get,
have := @nat.find_spec P _ h',
contrapose! this,
exact h _ this
end
lemma lt_find_iff (n : ℕ) : (n : enat) < find P ↔ (∀ m ≤ n, ¬P m) :=
begin
refine ⟨_, lt_find P n⟩,
intros h m hm,
by_cases H : (find P).dom,
{ apply nat.find_min H, rw coe_lt_iff at h, specialize h H, exact lt_of_le_of_lt hm h },
{ exact not_exists.mp H m }
end
lemma find_le (n : ℕ) (h : P n) : find P ≤ n :=
by { rw le_coe_iff, refine ⟨⟨_, h⟩, @nat.find_min' P _ _ _ h⟩ }
lemma find_eq_top_iff : find P = ⊤ ↔ ∀ n, ¬P n :=
(eq_top_iff_forall_lt _).trans
⟨λ h n, (lt_find_iff P n).mp (h n) _ le_rfl, λ h n, lt_find P n $ λ _ _, h _⟩
end find
noncomputable instance : linear_ordered_add_comm_monoid_with_top enat :=
{ top_add' := top_add,
.. enat.linear_order,
.. enat.ordered_add_comm_monoid,
.. enat.order_top }
end enat
|
From Coq Require Import ssreflect ssrbool ssrfun.
From MetaCoq.Template Require Import All.
Definition fold_right_i [A B] (f : nat -> B -> A -> A) (a0 : A) :=
(fix fold_right_i n (l : list B) : A :=
match l with
| []%list => a0
| (hd :: tl)%list => f n hd (fold_right_i (S n) tl)
end) 0.
(** Helper functions relying on MetaCoq *)
(** [build_const [t1;..;tn] body] builds the term λ(_:t1)..(_:tn).body *)
Definition build_const (argtys: list term) body :=
List.fold_right
(fun ty t => tLambda (mkBindAnn nAnon Relevant) ty t)
body argtys.
(** [mkApps_ctx t shift ctx] build the application of t to the ctx shifted by shift *)
Definition mkApps_ctx (t:term) (shift:nat) (ctx:context) :=
let args :=
let fold_fun args i decl :=
match decl_body decl with
| None => ((tRel (shift+i)) :: args)%list
| Some _ => args
end
in
fold_left_i fold_fun ctx nil in
mkApps t args.
Definition is_prim_record (decl:mutual_inductive_body) : option (option ident)
:=
match ind_bodies decl with
| (oib :: nil)%list =>
match ind_ctors oib with
| (ctor_body :: nil)%list => Some (Some (cstr_name ctor_body)) (* Some data is missing wrt to primitivity *)
| _ => None
end
| _ => None
end.
Definition one_ind_body_to_entry (decl : mutual_inductive_body) (oib : one_inductive_body) : one_inductive_entry :=
let ra := remove_arity decl.(ind_npars) in
let ctors := oib.(ind_ctors) in
{|
mind_entry_typename := oib.(ind_name);
mind_entry_arity := oib.(ind_type);
mind_entry_consnames := List.map cstr_name ctors ;
mind_entry_lc := List.map (fun x => ra x.(cstr_type)) ctors;
|}.
Arguments List.combine {_ _} _ _.
Definition mind_body_params (decl : mutual_inductive_body) : context :=
match List.hd_error decl.(ind_bodies) with
| Some oib =>
let args := fst (decompose_prod oib.(ind_type)) in
let nametypes := List.firstn decl.(ind_npars) (uncurry List.combine args) in
List.rev (List.map (uncurry vass) nametypes)
| None => nil
end.
Arguments List.combine [_ _] _ _.
Definition mind_body_to_entry (decl : mutual_inductive_body)
: mutual_inductive_entry :=
{|
mind_entry_record := is_prim_record decl;
mind_entry_finite := ind_finite decl;
mind_entry_params := mind_body_params decl ;
mind_entry_inds := List.map (one_ind_body_to_entry decl) decl.(ind_bodies);
mind_entry_universes := Universes.universes_entry_of_decl decl.(ind_universes);
mind_entry_private := None ;
mind_entry_template := false ;
mind_entry_variance :=
Option.map (List.map Some) decl.(ind_variance)
|}.
Section TemplateMonad.
Import MCMonadNotation.
Polymorphic Definition monad_iteri {T} `{Monad T} {A} (f: nat -> A -> T unit) l :=
monad_map_i f l ;; ret tt.
(** [extract_uniq l fail_msg] returns x if l = [x] or fails with fail_msg *)
Polymorphic Definition extract_uniq {A} (l :list A) (fail_msg:string) : TemplateMonad A :=
match l with | (x :: nil)%list => ret x | _ => tmFail fail_msg end.
Definition isIndRef := fun x => match x with IndRef _ => true | _ => false end.
Polymorphic Definition get_inductive@{u} (qid:qualid): TemplateMonad@{_ u} inductive :=
ts <- tmLocate qid ;;
match List.filter isIndRef ts with
| [ IndRef ind ]%list => ret ind
| (_ :: _ :: _)%list => tmFail ("Ambiguous definition of " ++ qid ++ " as an inductive")%bs
| _ => tmFail ("The ident " ++ qid ++ " does not refer to an existing inductive ")%bs
end.
Definition isConstRef := fun x => match x with ConstRef _ => true | _ => false end.
Definition get_const (qid:qualid): TemplateMonad kername :=
ts <- tmLocate qid ;;
match List.filter isConstRef ts with
| [ ConstRef kn ]%list => ret kn
| (_ :: _ :: _)%list => tmFail ("Ambiguous definition of " ++ qid ++ " as a constant")%bs
| _ => tmFail ("The ident " ++ qid ++ " does not refer to an existing const ant")%bs
end.
Definition assertTM (b:bool) : TemplateMonad unit :=
if b then tmFail "assertion failed"%bs else ret tt.
(** [def_kername id id_kername] looks up the kernel name of id
and define a constant with name id_kername to it;
fails if the associated kernel name is not uniquely defined *)
Definition def_kername (id id_kername:ident) : TemplateMonad unit :=
globrefs <- tmLocate id ;;
kername <- match globrefs with
| [ConstRef kn]%list => ret kn
| [IndRef ind]%list => tmEval all (inductive_mind ind)
| [VarRef _]%list =>
tmFail ("Found a variable associated to "++ id ++", no associated kername")%bs
| [ConstructRef _ _]%list =>
tmFail ("Found a constructor associated to "++ id ++", no associated kername")%bs
| []%list => tmFail ("No global reference associated to "++id)%bs
| (_ :: _ :: _)%list => tmFail ("Multiple global references associated to "++id)%bs
end ;;
qkername <- tmQuote kername ;;
tmMkDefinition id_kername qkername.
End TemplateMonad.
|
/-
Copyright (c) 2020 Jean Lo. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Jean Lo
! This file was ported from Lean 3 source module dynamics.flow
! leanprover-community/mathlib commit 717c073262cd9d59b1a1dcda7e8ab570c5b63370
! Please do not edit these lines, except to modify the commit id
! if you have ported upstream changes.
-/
import Mathlib.Topology.Algebra.Group.Basic
import Mathlib.Logic.Function.Iterate
/-!
# Flows and invariant sets
This file defines a flow on a topological space `α` by a topological
monoid `τ` as a continuous monoid-act of `τ` on `α`. Anticipating the
cases where `τ` is one of `ℕ`, `ℤ`, `ℝ⁺`, or `ℝ`, we use additive
notation for the monoids, though the definition does not require
commutativity.
A subset `s` of `α` is invariant under a family of maps `ϕₜ : α → α`
if `ϕₜ s ⊆ s` for all `t`. In many cases `ϕ` will be a flow on
`α`. For the cases where `ϕ` is a flow by an ordered (additive,
commutative) monoid, we additionally define forward invariance, where
`t` ranges over those elements which are nonnegative.
Additionally, we define such constructions as the restriction of a
flow onto an invariant subset, and the time-reveral of a flow by a
group.
-/
open Set Function Filter
/-!
### Invariant sets
-/
section Invariant
variable {τ : Type _} {α : Type _}
/-- A set `s ⊆ α` is invariant under `ϕ : τ → α → α` if
`ϕ t s ⊆ s` for all `t` in `τ`. -/
def IsInvariant (ϕ : τ → α → α) (s : Set α) : Prop :=
∀ t, MapsTo (ϕ t) s s
#align is_invariant IsInvariant
variable (ϕ : τ → α → α) (s : Set α)
theorem isInvariant_iff_image : IsInvariant ϕ s ↔ ∀ t, ϕ t '' s ⊆ s := by
simp_rw [IsInvariant, mapsTo']
#align is_invariant_iff_image isInvariant_iff_image
/-- A set `s ⊆ α` is forward-invariant under `ϕ : τ → α → α` if
`ϕ t s ⊆ s` for all `t ≥ 0`. -/
def IsFwInvariant [Preorder τ] [Zero τ] (ϕ : τ → α → α) (s : Set α) : Prop :=
∀ ⦃t⦄, 0 ≤ t → MapsTo (ϕ t) s s
#align is_fw_invariant IsFwInvariant
theorem IsInvariant.isFwInvariant [Preorder τ] [Zero τ] {ϕ : τ → α → α} {s : Set α}
(h : IsInvariant ϕ s) : IsFwInvariant ϕ s := fun t _ht => h t
#align is_invariant.is_fw_invariant IsInvariant.isFwInvariant
/-- If `τ` is a `CanonicallyOrderedAddMonoid` (e.g., `ℕ` or `ℝ≥0`), then the notions
`IsFwInvariant` and `IsInvariant` are equivalent. -/
theorem IsFwInvariant.isInvariant [CanonicallyOrderedAddMonoid τ] {ϕ : τ → α → α} {s : Set α}
(h : IsFwInvariant ϕ s) : IsInvariant ϕ s := fun t => h (zero_le t)
#align is_fw_invariant.is_invariant IsFwInvariant.isInvariant
/-- If `τ` is a `CanonicallyOrderedAddMonoid` (e.g., `ℕ` or `ℝ≥0`), then the notions
`IsFwInvariant` and `IsInvariant` are equivalent. -/
theorem isFwInvariant_iff_isInvariant [CanonicallyOrderedAddMonoid τ] {ϕ : τ → α → α} {s : Set α} :
IsFwInvariant ϕ s ↔ IsInvariant ϕ s :=
⟨IsFwInvariant.isInvariant, IsInvariant.isFwInvariant⟩
#align is_fw_invariant_iff_is_invariant isFwInvariant_iff_isInvariant
end Invariant
/-!
### Flows
-/
/-- A flow on a topological space `α` by an a additive topological
monoid `τ` is a continuous monoid action of `τ` on `α`.-/
structure Flow (τ : Type _) [TopologicalSpace τ] [AddMonoid τ] [ContinuousAdd τ] (α : Type _)
[TopologicalSpace α] where
toFun : τ → α → α
cont' : Continuous (uncurry toFun)
map_add' : ∀ t₁ t₂ x, toFun (t₁ + t₂) x = toFun t₁ (toFun t₂ x)
map_zero' : ∀ x, toFun 0 x = x
#align flow Flow
namespace Flow
variable {τ : Type _} [AddMonoid τ] [TopologicalSpace τ] [ContinuousAdd τ] {α : Type _}
[TopologicalSpace α] (ϕ : Flow τ α)
instance : Inhabited (Flow τ α) :=
⟨{ toFun := fun _ x => x
cont' := continuous_snd
map_add' := fun _ _ _ => rfl
map_zero' := fun _ => rfl }⟩
instance : CoeFun (Flow τ α) fun _ => τ → α → α := ⟨Flow.toFun⟩
@[ext]
@[continuity]
protected theorem continuous {β : Type _} [TopologicalSpace β] {t : β → τ} (ht : Continuous t)
{f : β → α} (hf : Continuous f) : Continuous fun x => ϕ (t x) (f x) :=
ϕ.cont'.comp (ht.prod_mk hf)
#align flow.continuous Flow.continuous
alias Flow.continuous ← _root_.Continuous.flow
#align continuous.flow Continuous.flow
theorem map_add (t₁ t₂ : τ) (x : α) : ϕ (t₁ + t₂) x = ϕ t₁ (ϕ t₂ x) := ϕ.map_add' _ _ _
#align flow.map_add Flow.map_add
@[simp]
theorem map_zero : ϕ 0 = id := funext ϕ.map_zero'
#align flow.map_zero Flow.map_zero
theorem map_zero_apply (x : α) : ϕ 0 x = x := ϕ.map_zero' x
#align flow.map_zero_apply Flow.map_zero_apply
/-- Iterations of a continuous function from a topological space `α`
to itself defines a semiflow by `ℕ` on `α`. -/
def fromIter {g : α → α} (h : Continuous g) : Flow ℕ α where
toFun n x := (g^[n]) x
cont' := continuous_uncurry_of_discreteTopology_left (Continuous.iterate h)
map_add' := iterate_add_apply _
map_zero' _x := rfl
#align flow.from_iter Flow.fromIter
/-- Restriction of a flow onto an invariant set. -/
def restrict {s : Set α} (h : IsInvariant ϕ s) : Flow τ (↥s) where
toFun t := (h t).restrict _ _ _
cont' := (ϕ.continuous continuous_fst continuous_subtype_val.snd').subtype_mk _
map_add' _ _ _ := Subtype.ext (map_add _ _ _ _)
map_zero' _ := Subtype.ext (map_zero_apply _ _)
#align flow.restrict Flow.restrict
end Flow
namespace Flow
variable {τ : Type _} [AddCommGroup τ] [TopologicalSpace τ] [TopologicalAddGroup τ] {α : Type _}
[TopologicalSpace α] (ϕ : Flow τ α)
theorem isInvariant_iff_image_eq (s : Set α) : IsInvariant ϕ s ↔ ∀ t, ϕ t '' s = s :=
(isInvariant_iff_image _ _).trans
(Iff.intro
(fun h t => Subset.antisymm (h t) fun _ hx => ⟨_, h (-t) ⟨_, hx, rfl⟩, by simp [← map_add]⟩)
fun h t => by rw [h t])
#align flow.is_invariant_iff_image_eq Flow.isInvariant_iff_image_eq
/-- The time-reversal of a flow `ϕ` by a (commutative, additive) group
is defined `ϕ.reverse t x = ϕ (-t) x`. -/
def reverse : Flow τ α where
toFun t := ϕ (-t)
cont' := ϕ.continuous continuous_fst.neg continuous_snd
map_add' _ _ _ := by dsimp; rw [neg_add, map_add]
map_zero' _ := by dsimp; rw [neg_zero, map_zero_apply]
#align flow.reverse Flow.reverse
-- Porting note: add @continuity to Flow.toFun so that these works:
-- Porting note: Homeomorphism.continuous_toFun : Continuous toFun := by continuity
-- Porting note: Homeomorphism.continuous_invFun : Continuous invFun := by continuity
@[continuity]
theorem continuous_toFun (t : τ) : Continuous (ϕ.toFun t) := by
rw [←curry_uncurry ϕ.toFun]
apply continuous_curry
exact ϕ.cont'
/-- The map `ϕ t` as a homeomorphism. -/
def toHomeomorph (t : τ) : (α ≃ₜ α) where
toFun := ϕ t
invFun := ϕ (-t)
left_inv x := by rw [← map_add, neg_add_self, map_zero_apply]
right_inv x := by rw [← map_add, add_neg_self, map_zero_apply]
#align flow.to_homeomorph Flow.toHomeomorph
theorem image_eq_preimage (t : τ) (s : Set α) : ϕ t '' s = ϕ (-t) ⁻¹' s :=
(ϕ.toHomeomorph t).toEquiv.image_eq_preimage s
#align flow.image_eq_preimage Flow.image_eq_preimage
end Flow
|
(*
* Copyright 2020, Data61, CSIRO (ABN 41 687 119 230)
*
* SPDX-License-Identifier: GPL-2.0-only
*)
chapter \<open>RPC Receive\<close>
(*<*)
theory RPCTo imports
"CParser.CTranslation"
"AutoCorres.AutoCorres"
begin
(* THIS THEORY IS GENERATED. DO NOT EDIT. *)
(* ucast is type-polymorphic, but often appears in a visually indistinguishable form in proofs.
* These abbreviations help you identify which type signature you're looking at. These
* abbreviations need to exist outside the locale in order to be used in output.
*)
abbreviation "ucast_32_to_32 \<equiv> ucast :: word32 \<Rightarrow> word32"
abbreviation "ucast_s32_to_32 \<equiv> ucast :: sword32 \<Rightarrow> word32"
abbreviation "ucast_32_to_s32 \<equiv> ucast :: word32 \<Rightarrow> sword32"
abbreviation "ucast_s32_to_s32 \<equiv> ucast :: sword32 \<Rightarrow> sword32"
(* As above for scast. *)
abbreviation "scast_32_to_32 \<equiv> scast :: word32 \<Rightarrow> word32"
abbreviation "scast_s32_to_32 \<equiv> scast :: sword32 \<Rightarrow> word32"
abbreviation "scast_32_to_s32 \<equiv> scast :: word32 \<Rightarrow> sword32"
abbreviation "scast_s32_to_s32 \<equiv> scast :: sword32 \<Rightarrow> sword32"
declare [[allow_underscore_idents=true]]
external_file "RPCTo.c"
install_C_file "RPCTo.c"
(* Use non-determinism instead of the standard option monad type stregthening and do not heap
* abstract seL4_SetMR.
*)
autocorres [ts_rules = nondet, no_heap_abs = seL4_SetMR] "RPCTo.c"
context RPCTo begin
(* Repeated constants from C. *)
abbreviation "seL4_MsgMaxLength \<equiv> 120"
definition
seL4_SetMR_lifted' :: "int \<Rightarrow> word32 \<Rightarrow> lifted_globals \<Rightarrow> (unit \<times> lifted_globals) set \<times> bool"
where
"seL4_SetMR_lifted' i val \<equiv>
do
ret' \<leftarrow> seL4_GetIPCBuffer';
guard (\<lambda>s. i < seL4_MsgMaxLength);
guard (\<lambda>s. 0 \<le> i);
modify (\<lambda>s. s \<lparr>heap_seL4_IPCBuffer__C := (heap_seL4_IPCBuffer__C s)(ret' :=
msg_C_update (
\<lambda>a. Arrays.update a (nat i) val) (heap_seL4_IPCBuffer__C s ret')
) \<rparr>)
od"
end
locale RPCTo_glue = RPCTo +
assumes seL4_SetMR_axiom: "exec_concrete lift_global_heap (seL4_SetMR' i val) = seL4_SetMR_lifted' i val"
assumes RPCTo_echo_int_wp: "\<lbrace>\<lambda>s0'. \<forall>r1'. P2' r1'
s0'\<rbrace> RPCTo_echo_int' i3' \<lbrace>P2'\<rbrace>!"
assumes RPCTo_echo_parameter_wp: "\<lbrace>\<lambda>s4'. \<forall>r5'. P6' r5'
s4'\<rbrace> RPCTo_echo_parameter' pin7' pout8' \<lbrace>P6'\<rbrace>!"
assumes RPCTo_echo_char_wp: "\<lbrace>\<lambda>s9'. \<forall>r10'. P11' r10'
s9'\<rbrace> RPCTo_echo_char' i12' \<lbrace>P11'\<rbrace>!"
assumes RPCTo_increment_char_wp: "\<lbrace>\<lambda>s13'. \<forall>r14'. P15'
r14' s13'\<rbrace> RPCTo_increment_char' x16' \<lbrace>P15'\<rbrace>!"
assumes RPCTo_increment_parameter_wp: "\<lbrace>\<lambda>s17'. \<forall>r18'.
P19' r18' s17'\<rbrace> RPCTo_increment_parameter' x20'
\<lbrace>P19'\<rbrace>!"
assumes RPCTo_increment_64_wp: "\<lbrace>\<lambda>s21'. \<forall>r22'. P23'
r22' s21'\<rbrace> RPCTo_increment_64' x24' \<lbrace>P23'\<rbrace>!"
assumes swi_safe_to_ignore[simplified, simp]:
"asm_semantics_ok_to_ignore TYPE(nat) true (''swi '' @ x)"
begin
definition
globals_frame_intact :: "lifted_globals \<Rightarrow> bool"
where
"globals_frame_intact s \<equiv> is_valid_seL4_IPCBuffer__C'ptr s (Ptr (scast seL4_GlobalsFrame))"
definition
ipc_buffer_valid :: "lifted_globals \<Rightarrow> bool"
where
"ipc_buffer_valid s \<equiv> is_valid_seL4_IPCBuffer__C s (heap_seL4_IPCBuffer__C'ptr s (Ptr (scast seL4_GlobalsFrame)))"
lemma abort_wp[wp]:
"\<lbrace>\<lambda>_. False\<rbrace> abort' \<lbrace>P\<rbrace>!"
by (rule validNF_false_pre)
definition
setMR :: "lifted_globals \<Rightarrow> nat \<Rightarrow> word32 \<Rightarrow> lifted_globals"
where
"setMR s i v \<equiv>
s\<lparr>heap_seL4_IPCBuffer__C := (heap_seL4_IPCBuffer__C s)
(heap_seL4_IPCBuffer__C'ptr s (Ptr (scast seL4_GlobalsFrame)) :=
msg_C_update (\<lambda>a. Arrays.update a i v)
(heap_seL4_IPCBuffer__C s (heap_seL4_IPCBuffer__C'ptr s
(Ptr (scast seL4_GlobalsFrame)))))\<rparr>"
definition
setMRs :: "lifted_globals \<Rightarrow> word32 \<Rightarrow> word32 \<Rightarrow>
word32 \<Rightarrow> word32 \<Rightarrow> lifted_globals"
where
"setMRs s mr0 mr1 mr2 mr3 \<equiv>
setMR (setMR (setMR (setMR s 0 mr0) 1 mr1) 2 mr2) 3 mr3"
lemma seL4_GetIPCBuffer_wp':
"\<forall>s'. \<lbrace>\<lambda>s. globals_frame_intact s \<and>
s = s'\<rbrace>
seL4_GetIPCBuffer'
\<lbrace>\<lambda>r s. r = heap_seL4_IPCBuffer__C'ptr s (Ptr (scast seL4_GlobalsFrame)) \<and>
s = s'\<rbrace>!"
apply (rule allI)
apply (simp add:seL4_GetIPCBuffer'_def)
apply wp
apply (clarsimp simp:globals_frame_intact_def)
done
lemmas seL4_GetIPCBuffer_wp[wp_unsafe] =
seL4_GetIPCBuffer_wp'[THEN validNF_make_schematic_post, simplified]
lemma seL4_SetMR_wp[wp_unsafe]:
notes seL4_SetMR_axiom[simp]
shows
"\<lbrace>\<lambda>s. globals_frame_intact s \<and>
ipc_buffer_valid s \<and>
i \<ge> 0 \<and>
i < seL4_MsgMaxLength \<and>
(\<forall>x. P x (setMR s (nat i) v))\<rbrace>
exec_concrete lift_global_heap (seL4_SetMR' i v)
\<lbrace>P\<rbrace>!"
apply (simp add:seL4_SetMR_lifted'_def)
apply (wp seL4_GetIPCBuffer_wp)
apply (simp add:setMR_def globals_frame_intact_def ipc_buffer_valid_def)
done
lemma seL4_GetMR_wp[wp_unsafe]:
"\<lbrace>\<lambda>s. \<forall>x. i \<ge> 0 \<and>
i < seL4_MsgMaxLength \<and>
globals_frame_intact s \<and>
ipc_buffer_valid s \<and>
P x s\<rbrace>
seL4_GetMR' i
\<lbrace>P\<rbrace>!"
apply (simp add:seL4_GetMR'_def)
apply (wp seL4_GetIPCBuffer_wp)
apply (simp add:globals_frame_intact_def ipc_buffer_valid_def)
done
lemma seL4_Wait_wp[wp_unsafe]:
"\<lbrace>\<lambda>s. globals_frame_intact s \<and>
ipc_buffer_valid s \<and>
(\<forall>x v0 v1 v2 v3. P x (setMRs s v0 v1 v2 v3))\<rbrace>
seL4_Wait' cap NULL
\<lbrace>P\<rbrace>!"
apply (simp add:seL4_Wait'_def)
apply (wp seL4_SetMR_wp)
apply (simp add:globals_frame_intact_def ipc_buffer_valid_def setMRs_def
setMR_def)
done
lemma seL4_ReplyWait_wp[wp_unsafe]:
"\<lbrace>\<lambda>s. globals_frame_intact s \<and>
ipc_buffer_valid s \<and>
(\<forall>x v0 v1 v2 v3. P x (setMRs s v0 v1 v2 v3))\<rbrace>
seL4_ReplyWait' cap info NULL
\<lbrace>P\<rbrace>!"
apply (simp add:seL4_ReplyWait'_def seL4_GetMR'_def)
apply (wp seL4_SetMR_wp seL4_GetIPCBuffer_wp)
apply (simp add:globals_frame_intact_def ipc_buffer_valid_def setMRs_def
setMR_def)
done
definition
thread_count :: word32
where
"thread_count \<equiv> 2"
(* Various definitions used in assumptions on the TLS region. *)
definition
tls_ptr :: "lifted_globals \<Rightarrow> camkes_tls_t_C ptr"
where
"tls_ptr s \<equiv> Ptr (ptr_val (heap_seL4_IPCBuffer__C'ptr s
(Ptr (scast_s32_to_32 seL4_GlobalsFrame))) && 0xFFFFF000)"
definition
tls :: "lifted_globals \<Rightarrow> camkes_tls_t_C"
where
"tls s \<equiv> heap_camkes_tls_t_C s (tls_ptr s)"
definition
tls_valid :: "lifted_globals \<Rightarrow> bool"
where
"tls_valid s \<equiv> is_valid_camkes_tls_t_C s (tls_ptr s)"
lemma camkes_get_tls_wp':
"\<forall>s'. \<lbrace>\<lambda>s. tls_valid s \<and> globals_frame_intact s \<and> s = s'\<rbrace>
camkes_get_tls'
\<lbrace>\<lambda>r s. s = s' \<and> r = tls_ptr s\<rbrace>!"
apply (rule allI)
apply (simp add:camkes_get_tls'_def seL4_GetIPCBuffer'_def tls_valid_def)
apply wp
apply (clarsimp simp:globals_frame_intact_def tls_ptr_def)
done
lemmas camkes_get_tls'_wp[wp] =
camkes_get_tls_wp'[THEN validNF_make_schematic_post, simplified]
(*>*)
text \<open>
The generated definitions and lemmas in this chapter have been formed from the same procedure
specification provided in the previous chapter. Again, to give some context to the proofs below,
the generated receiving code for the \code{echo\_int} method is given here.
\clisting{to-echo-int.c}
The glue code receiving an RPC invocation from another component unmarshals arguments and then
invokes the user's interface implementation. To show the safety of this glue code we assume that
the user's implementation being invoked does not modify the state of the system. For example, for
the \code{echo\_int} method, we assume the following property.
@{term "\<lbrace>\<lambda>s. \<forall>r. P r s\<rbrace> RPCTo_echo_int' i \<lbrace>P\<rbrace>!"}
The unbound variables in the above statement, @{term P} and @{term i}, unify
with any suitably typed expression to allow use of the assumption in all contexts. This property
states that the
user's implementation, \code{RPCTo\_echo\_int}, only manipulates local variables and does not
write to any global memory. The property we ultimately need from the user's implementation is
weaker than this; that the function does not invalidate the TLS memory. In future the assumption
above will be reduced to this, but for now we assume this stronger property.
\<close>
text \<open>
For each thread-local variable, a function to retrieve a pointer to the relevant memory for the
current thread is emitted. For each of these we generate a proof that it does not modify the
state. This is uninteresting, in and of itself, but useful for reasoning about glue code that
calls these.
\<close>
text \<open>\newpage\<close>
lemma get_echo_int_i_nf:
"\<forall>s25'.
\<lbrace>\<lambda>s26'. globals_frame_intact s26' \<and>
ipc_buffer_valid s26' \<and>
tls_valid s26' \<and>
thread_index_C (tls s26') \<in> {1..thread_count} \<and>
is_valid_w32 s26' (Ptr (symbol_table ''echo_int_i_1'')) \<and>
is_valid_w32 s26' (Ptr (symbol_table ''echo_int_i_2'')) \<and>
s26' = s25'\<rbrace>
get_echo_int_i'
\<lbrace>\<lambda>r27' s26'. r27' \<in> {Ptr (symbol_table
''echo_int_i_1''), Ptr (symbol_table ''echo_int_i_2'')} \<and>
s26' = s25'\<rbrace>!"
apply (rule allI)
apply (simp add:get_echo_int_i'_def)
apply (wp seL4_GetIPCBuffer_wp)
apply (clarsimp simp:thread_count_def tls_valid_def tls_def)
apply unat_arith
done
(*<*)
lemmas get_echo_int_i_wp[wp_unsafe] =
get_echo_int_i_nf[THEN validNF_make_schematic_post, simplified]
definition
update_global_w32 :: "char list \<Rightarrow> word32 \<Rightarrow> lifted_globals \<Rightarrow> lifted_globals"
where
"update_global_w32 symbol v s \<equiv>
heap_w32_update (\<lambda>c. c(Ptr (symbol_table symbol) := (ucast v))) s"
lemma get_echo_parameter_pin_nf:
"\<forall>s28'.
\<lbrace>\<lambda>s29'. globals_frame_intact s29' \<and>
ipc_buffer_valid s29' \<and>
tls_valid s29' \<and>
thread_index_C (tls s29') \<in> {1..thread_count} \<and>
is_valid_w32 s29' (Ptr (symbol_table ''echo_parameter_pin_1'')) \<and>
is_valid_w32 s29' (Ptr (symbol_table ''echo_parameter_pin_2'')) \<and>
s29' = s28'\<rbrace>
get_echo_parameter_pin'
\<lbrace>\<lambda>r30' s29'. r30' \<in> {Ptr (symbol_table
''echo_parameter_pin_1''), Ptr (symbol_table ''echo_parameter_pin_2'')}
\<and>
s29' = s28'\<rbrace>!"
apply (rule allI)
apply (simp add:get_echo_parameter_pin'_def)
apply (wp seL4_GetIPCBuffer_wp)
apply (clarsimp simp:thread_count_def tls_valid_def tls_def)
apply unat_arith
done
lemmas get_echo_parameter_pin_wp[wp_unsafe] =
get_echo_parameter_pin_nf[THEN validNF_make_schematic_post, simplified]
lemma get_echo_parameter_pout_nf:
"\<forall>s31'.
\<lbrace>\<lambda>s32'. globals_frame_intact s32' \<and>
ipc_buffer_valid s32' \<and>
tls_valid s32' \<and>
thread_index_C (tls s32') \<in> {1..thread_count} \<and>
is_valid_w32 s32' (Ptr (symbol_table ''echo_parameter_pout_1'')) \<and>
is_valid_w32 s32' (Ptr (symbol_table ''echo_parameter_pout_2'')) \<and>
s32' = s31'\<rbrace>
get_echo_parameter_pout'
\<lbrace>\<lambda>r33' s32'. r33' \<in> {Ptr (symbol_table
''echo_parameter_pout_1''), Ptr (symbol_table ''echo_parameter_pout_2'')
} \<and>
s32' = s31'\<rbrace>!"
apply (rule allI)
apply (simp add:get_echo_parameter_pout'_def)
apply (wp seL4_GetIPCBuffer_wp)
apply (clarsimp simp:thread_count_def tls_valid_def tls_def)
apply unat_arith
done
lemmas get_echo_parameter_pout_wp[wp_unsafe] =
get_echo_parameter_pout_nf[THEN validNF_make_schematic_post, simplified]
lemma get_echo_char_i_nf:
"\<forall>s34'.
\<lbrace>\<lambda>s35'. globals_frame_intact s35' \<and>
ipc_buffer_valid s35' \<and>
tls_valid s35' \<and>
thread_index_C (tls s35') \<in> {1..thread_count} \<and>
is_valid_w8 s35' (Ptr (symbol_table ''echo_char_i_1'')) \<and>
is_valid_w8 s35' (Ptr (symbol_table ''echo_char_i_2'')) \<and>
s35' = s34'\<rbrace>
get_echo_char_i'
\<lbrace>\<lambda>r36' s35'. r36' \<in> {Ptr (symbol_table
''echo_char_i_1''), Ptr (symbol_table ''echo_char_i_2'')} \<and>
s35' = s34'\<rbrace>!"
apply (rule allI)
apply (simp add:get_echo_char_i'_def)
apply (wp seL4_GetIPCBuffer_wp)
apply (clarsimp simp:thread_count_def tls_valid_def tls_def)
apply unat_arith
done
lemmas get_echo_char_i_wp[wp_unsafe] =
get_echo_char_i_nf[THEN validNF_make_schematic_post, simplified]
definition
update_global_w8 :: "char list \<Rightarrow> word32 \<Rightarrow> lifted_globals \<Rightarrow> lifted_globals"
where
"update_global_w8 symbol v s \<equiv>
heap_w8_update (\<lambda>c. c(Ptr (symbol_table symbol) := (ucast v))) s"
lemma get_increment_char_x_nf:
"\<forall>s37'.
\<lbrace>\<lambda>s38'. globals_frame_intact s38' \<and>
ipc_buffer_valid s38' \<and>
tls_valid s38' \<and>
thread_index_C (tls s38') \<in> {1..thread_count} \<and>
is_valid_w8 s38' (Ptr (symbol_table ''increment_char_x_1'')) \<and>
is_valid_w8 s38' (Ptr (symbol_table ''increment_char_x_2'')) \<and>
s38' = s37'\<rbrace>
get_increment_char_x'
\<lbrace>\<lambda>r39' s38'. r39' \<in> {Ptr (symbol_table
''increment_char_x_1''), Ptr (symbol_table ''increment_char_x_2'')}
\<and>
s38' = s37'\<rbrace>!"
apply (rule allI)
apply (simp add:get_increment_char_x'_def)
apply (wp seL4_GetIPCBuffer_wp)
apply (clarsimp simp:thread_count_def tls_valid_def tls_def)
apply unat_arith
done
lemmas get_increment_char_x_wp[wp_unsafe] =
get_increment_char_x_nf[THEN validNF_make_schematic_post, simplified]
lemma get_increment_parameter_x_nf:
"\<forall>s40'.
\<lbrace>\<lambda>s41'. globals_frame_intact s41' \<and>
ipc_buffer_valid s41' \<and>
tls_valid s41' \<and>
thread_index_C (tls s41') \<in> {1..thread_count} \<and>
is_valid_w32 s41' (Ptr (symbol_table ''increment_parameter_x_1'')) \<and>
is_valid_w32 s41' (Ptr (symbol_table ''increment_parameter_x_2'')) \<and>
s41' = s40'\<rbrace>
get_increment_parameter_x'
\<lbrace>\<lambda>r42' s41'. r42' \<in> {Ptr (symbol_table
''increment_parameter_x_1''), Ptr (symbol_table
''increment_parameter_x_2'')} \<and>
s41' = s40'\<rbrace>!"
apply (rule allI)
apply (simp add:get_increment_parameter_x'_def)
apply (wp seL4_GetIPCBuffer_wp)
apply (clarsimp simp:thread_count_def tls_valid_def tls_def)
apply unat_arith
done
lemmas get_increment_parameter_x_wp[wp_unsafe] =
get_increment_parameter_x_nf[THEN validNF_make_schematic_post, simplified]
lemma get_increment_64_x_nf:
"\<forall>s43'.
\<lbrace>\<lambda>s44'. globals_frame_intact s44' \<and>
ipc_buffer_valid s44' \<and>
tls_valid s44' \<and>
thread_index_C (tls s44') \<in> {1..thread_count} \<and>
is_valid_w64 s44' (Ptr (symbol_table ''increment_64_x_1'')) \<and>
is_valid_w64 s44' (Ptr (symbol_table ''increment_64_x_2'')) \<and>
s44' = s43'\<rbrace>
get_increment_64_x'
\<lbrace>\<lambda>r45' s44'. r45' \<in> {Ptr (symbol_table
''increment_64_x_1''), Ptr (symbol_table ''increment_64_x_2'')} \<and>
s44' = s43'\<rbrace>!"
apply (rule allI)
apply (simp add:get_increment_64_x'_def)
apply (wp seL4_GetIPCBuffer_wp)
apply (clarsimp simp:thread_count_def tls_valid_def tls_def)
apply unat_arith
done
lemmas get_increment_64_x_wp[wp_unsafe] =
get_increment_64_x_nf[THEN validNF_make_schematic_post, simplified]
definition
update_global_w64 :: "char list \<Rightarrow> word32 \<Rightarrow> lifted_globals \<Rightarrow> lifted_globals"
where
"update_global_w64 symbol v s \<equiv>
heap_w64_update (\<lambda>c. c(Ptr (symbol_table symbol) := (ucast v))) s"
definition
update_global_w64_high :: "char list \<Rightarrow> word32 \<Rightarrow> lifted_globals \<Rightarrow> lifted_globals"
where
"update_global_w64_high symbol high s \<equiv>
heap_w64_update (\<lambda>c. c(Ptr (symbol_table symbol) :=
(heap_w64 s (Ptr (symbol_table symbol))) || (ucast high << 32))) s"
(*>*)
text \<open>
For each method in the procedure we generate a function for specifically handling receiving of a
call to that method. A top-level dispatch function is generated that selects the appropriate
handler to invoke after receiving an RPC invocation. The handler function for the first method is
the code given at the start of this chapter. We generate proofs that the handler
functions do not fail, as given below.
\<close>
lemma echo_int_internal_wp[wp_unsafe]:
notes seL4_GetMR_wp[wp] seL4_SetMR_wp[wp]
shows
"\<lbrace>\<lambda>s46'. globals_frame_intact s46' \<and>
ipc_buffer_valid s46' \<and>
tls_valid s46' \<and>
thread_index_C (tls s46') \<in> {1..thread_count} \<and>
is_valid_w32 s46' (Ptr (symbol_table ''echo_int_i_1'')) \<and>
is_valid_w32 s46' (Ptr (symbol_table ''echo_int_i_2'')) \<and>
(\<forall>x r47' i_in_value .
\<forall> i_symbol \<in> {''echo_int_i_1'', ''echo_int_i_2''}.
P48' x
(setMR (update_global_w32 i_symbol i_in_value s46') 0 r47')
)\<rbrace>
echo_int_internal'
\<lbrace>P48'\<rbrace>!"
apply (simp add:echo_int_internal'_def)
apply wp
apply (wp RPCTo_echo_int_wp)+
apply (wp get_echo_int_i_wp)+
apply (clarsimp simp:globals_frame_intact_def ipc_buffer_valid_def
tls_valid_def tls_def tls_ptr_def thread_count_def setMR_def ucast_id
update_global_w32_def)
apply force?
done
lemma echo_parameter_internal_wp[wp_unsafe]:
notes seL4_GetMR_wp[wp] seL4_SetMR_wp[wp]
shows
"\<lbrace>\<lambda>s49'. globals_frame_intact s49' \<and>
ipc_buffer_valid s49' \<and>
tls_valid s49' \<and>
thread_index_C (tls s49') \<in> {1..thread_count} \<and>
is_valid_w32 s49' (Ptr (symbol_table ''echo_parameter_pin_1'')) \<and>
is_valid_w32 s49' (Ptr (symbol_table ''echo_parameter_pin_2'')) \<and>
is_valid_w32 s49' (Ptr (symbol_table ''echo_parameter_pout_1'')) \<and>
is_valid_w32 s49' (Ptr (symbol_table ''echo_parameter_pout_2'')) \<and>
(\<forall>x r50' pin_in_value pout_out_value .
\<forall> pin_symbol \<in> {''echo_parameter_pin_1'',
''echo_parameter_pin_2''}.
\<forall> pout_symbol \<in> {''echo_parameter_pout_1'',
''echo_parameter_pout_2''}.
P51' x
(setMR (setMR (update_global_w32 pin_symbol pin_in_value s49') 0
r50') 1 pout_out_value))\<rbrace>
echo_parameter_internal'
\<lbrace>P51'\<rbrace>!"
apply (simp add:echo_parameter_internal'_def)
apply wp
apply (wp RPCTo_echo_parameter_wp)+
apply (wp get_echo_parameter_pout_wp)
apply (wp get_echo_parameter_pin_wp)+
apply (clarsimp simp:globals_frame_intact_def ipc_buffer_valid_def
tls_valid_def tls_def tls_ptr_def thread_count_def setMR_def ucast_id
update_global_w32_def)
apply force?
done
lemma echo_char_internal_wp[wp_unsafe]:
notes seL4_GetMR_wp[wp] seL4_SetMR_wp[wp]
shows
"\<lbrace>\<lambda>s52'. globals_frame_intact s52' \<and>
ipc_buffer_valid s52' \<and>
tls_valid s52' \<and>
thread_index_C (tls s52') \<in> {1..thread_count} \<and>
is_valid_w8 s52' (Ptr (symbol_table ''echo_char_i_1'')) \<and>
is_valid_w8 s52' (Ptr (symbol_table ''echo_char_i_2'')) \<and>
(\<forall>x r53' i_in_value .
\<forall> i_symbol \<in> {''echo_char_i_1'', ''echo_char_i_2''}.
P54' x
(setMR (update_global_w8 i_symbol i_in_value s52') 0 r53')
)\<rbrace>
echo_char_internal'
\<lbrace>P54'\<rbrace>!"
apply (simp add:echo_char_internal'_def)
apply wp
apply (wp RPCTo_echo_char_wp)+
apply (wp get_echo_char_i_wp)+
apply (clarsimp simp:globals_frame_intact_def ipc_buffer_valid_def
tls_valid_def tls_def tls_ptr_def thread_count_def setMR_def ucast_id
update_global_w8_def)
apply force?
done
lemma increment_char_internal_wp[wp_unsafe]:
notes seL4_GetMR_wp[wp] seL4_SetMR_wp[wp]
shows
"\<lbrace>\<lambda>s55'. globals_frame_intact s55' \<and>
ipc_buffer_valid s55' \<and>
tls_valid s55' \<and>
thread_index_C (tls s55') \<in> {1..thread_count} \<and>
is_valid_w8 s55' (Ptr (symbol_table ''increment_char_x_1'')) \<and>
is_valid_w8 s55' (Ptr (symbol_table ''increment_char_x_2'')) \<and>
(\<forall>x x_in_value x_out_value .
\<forall> x_symbol \<in> {''increment_char_x_1'',
''increment_char_x_2''}.
P57' x
(setMR (update_global_w8 x_symbol x_in_value s55') 0 x_out_value)
)\<rbrace>
increment_char_internal'
\<lbrace>P57'\<rbrace>!"
apply (simp add:increment_char_internal'_def)
apply wp
apply (wp RPCTo_increment_char_wp)+
apply (wp get_increment_char_x_wp)+
apply (clarsimp simp:globals_frame_intact_def ipc_buffer_valid_def
tls_valid_def tls_def tls_ptr_def thread_count_def setMR_def ucast_id
update_global_w8_def)
apply force?
done
lemma increment_parameter_internal_wp[wp_unsafe]:
notes seL4_GetMR_wp[wp] seL4_SetMR_wp[wp]
shows
"\<lbrace>\<lambda>s58'. globals_frame_intact s58' \<and>
ipc_buffer_valid s58' \<and>
tls_valid s58' \<and>
thread_index_C (tls s58') \<in> {1..thread_count} \<and>
is_valid_w32 s58' (Ptr (symbol_table ''increment_parameter_x_1'')) \<and>
is_valid_w32 s58' (Ptr (symbol_table ''increment_parameter_x_2'')) \<and>
(\<forall>x x_in_value x_out_value .
\<forall> x_symbol \<in> {''increment_parameter_x_1'',
''increment_parameter_x_2''}.
P60' x
(setMR (update_global_w32 x_symbol x_in_value s58') 0 x_out_value)
)\<rbrace>
increment_parameter_internal'
\<lbrace>P60'\<rbrace>!"
apply (simp add:increment_parameter_internal'_def)
apply wp
apply (wp RPCTo_increment_parameter_wp)+
apply (wp get_increment_parameter_x_wp)+
apply (clarsimp simp:globals_frame_intact_def ipc_buffer_valid_def
tls_valid_def tls_def tls_ptr_def thread_count_def setMR_def ucast_id
update_global_w32_def)
apply force?
done
lemma increment_64_internal_wp[wp_unsafe]:
notes seL4_GetMR_wp[wp] seL4_SetMR_wp[wp]
shows
"\<lbrace>\<lambda>s61'. globals_frame_intact s61' \<and>
ipc_buffer_valid s61' \<and>
tls_valid s61' \<and>
thread_index_C (tls s61') \<in> {1..thread_count} \<and>
is_valid_w64 s61' (Ptr (symbol_table ''increment_64_x_1'')) \<and>
is_valid_w64 s61' (Ptr (symbol_table ''increment_64_x_2'')) \<and>
(\<forall>x x_in_value x_in_value_high x_out_value x_out_value_high .
\<forall> x_symbol \<in> {''increment_64_x_1'',
''increment_64_x_2''}.
P63' x
(setMR (setMR (update_global_w64_high x_symbol x_in_value_high
(update_global_w64 x_symbol x_in_value s61') ) 0 x_out_value) 1
x_out_value_high))\<rbrace>
increment_64_internal'
\<lbrace>P63'\<rbrace>!"
apply (simp add:increment_64_internal'_def)
apply wp
apply (wp RPCTo_increment_64_wp)+
apply (wp get_increment_64_x_wp)+
apply (clarsimp simp:globals_frame_intact_def ipc_buffer_valid_def
tls_valid_def tls_def tls_ptr_def thread_count_def setMR_def ucast_id
update_global_w64_def update_global_w64_high_def)
apply force?
done
text \<open>
\newpage
With proofs that the handler functions do not fail, the proof of the same for the top-level
dispatch function can be generated by composing these. Note that we accumulate the pre- and
post-conditions from the leaf functions.
\<close>
lemma RPCTo_run_internal_wp[wp_unsafe]:
notes seL4_SetMR_axiom[simp] seL4_SetMR_wp[wp] seL4_GetMR_wp[wp]
shows
"\<lbrace>\<lambda>s64'. globals_frame_intact s64' \<and>
tls_valid s64' \<and>
thread_index_C (tls s64') \<in> {1..thread_count} \<and>
is_valid_w32 s64' (Ptr (symbol_table ''echo_int_i_1'')) \<and>
is_valid_w32 s64' (Ptr (symbol_table ''echo_int_i_2'')) \<and>
is_valid_w32 s64' (Ptr (symbol_table ''echo_parameter_pin_1'')) \<and>
is_valid_w32 s64' (Ptr (symbol_table ''echo_parameter_pin_2'')) \<and>
is_valid_w32 s64' (Ptr (symbol_table ''echo_parameter_pout_1'')) \<and>
is_valid_w32 s64' (Ptr (symbol_table ''echo_parameter_pout_2'')) \<and>
is_valid_w8 s64' (Ptr (symbol_table ''echo_char_i_1'')) \<and>
is_valid_w8 s64' (Ptr (symbol_table ''echo_char_i_2'')) \<and>
is_valid_w8 s64' (Ptr (symbol_table ''increment_char_x_1'')) \<and>
is_valid_w8 s64' (Ptr (symbol_table ''increment_char_x_2'')) \<and>
is_valid_w32 s64' (Ptr (symbol_table ''increment_parameter_x_1'')) \<and>
is_valid_w32 s64' (Ptr (symbol_table ''increment_parameter_x_2'')) \<and>
is_valid_w64 s64' (Ptr (symbol_table ''increment_64_x_1'')) \<and>
is_valid_w64 s64' (Ptr (symbol_table ''increment_64_x_2'')) \<and>
ipc_buffer_valid s64'\<rbrace>
RPCTo__run_internal' first65' info66'
\<lbrace>\<lambda>_ s64'. globals_frame_intact s64' \<and>
tls_valid s64' \<and>
thread_index_C (tls s64') \<in> {1..thread_count} \<and>
is_valid_w32 s64' (Ptr (symbol_table ''echo_int_i_1'')) \<and>
is_valid_w32 s64' (Ptr (symbol_table ''echo_int_i_2'')) \<and>
is_valid_w32 s64' (Ptr (symbol_table ''echo_parameter_pin_1'')) \<and>
is_valid_w32 s64' (Ptr (symbol_table ''echo_parameter_pin_2'')) \<and>
is_valid_w32 s64' (Ptr (symbol_table ''echo_parameter_pout_1'')) \<and>
is_valid_w32 s64' (Ptr (symbol_table ''echo_parameter_pout_2'')) \<and>
is_valid_w8 s64' (Ptr (symbol_table ''echo_char_i_1'')) \<and>
is_valid_w8 s64' (Ptr (symbol_table ''echo_char_i_2'')) \<and>
is_valid_w8 s64' (Ptr (symbol_table ''increment_char_x_1'')) \<and>
is_valid_w8 s64' (Ptr (symbol_table ''increment_char_x_2'')) \<and>
is_valid_w32 s64' (Ptr (symbol_table ''increment_parameter_x_1'')) \<and>
is_valid_w32 s64' (Ptr (symbol_table ''increment_parameter_x_2'')) \<and>
is_valid_w64 s64' (Ptr (symbol_table ''increment_64_x_1'')) \<and>
is_valid_w64 s64' (Ptr (symbol_table ''increment_64_x_2'')) \<and>
ipc_buffer_valid s64'\<rbrace>!"
apply (simp add:RPCTo__run_internal'_def)
apply wp
apply (wp seL4_ReplyWait_wp)
apply (simp add:seL4_MessageInfo_new'_def)
apply wp
apply (wp echo_int_internal_wp)
apply (wp seL4_ReplyWait_wp)
apply (simp add:seL4_MessageInfo_new'_def)
apply wp
apply (wp echo_parameter_internal_wp)
apply (wp seL4_ReplyWait_wp)
apply (simp add:seL4_MessageInfo_new'_def)
apply wp
apply (wp echo_char_internal_wp)
apply (wp seL4_ReplyWait_wp)
apply (simp add:seL4_MessageInfo_new'_def)
apply wp
apply (wp increment_char_internal_wp)
apply (wp seL4_ReplyWait_wp)
apply (simp add:seL4_MessageInfo_new'_def)
apply wp
apply (wp increment_parameter_internal_wp)
apply (wp seL4_ReplyWait_wp)
apply (simp add:seL4_MessageInfo_new'_def)
apply wp
apply (wp increment_64_internal_wp)
apply (wp seL4_Wait_wp)+
apply (clarsimp simp:globals_frame_intact_def ipc_buffer_valid_def
tls_valid_def tls_def tls_ptr_def ucast_id seL4_GetIPCBuffer'_def
thread_count_def setMR_def setMRs_def update_global_w32_def
update_global_w8_def update_global_w64_def update_global_w64_high_def)
done
(*<*)
end
end
(*>*)
|
! { dg-do run }
! Type-bound procedures
! Check they can actually be called and run correctly.
! This also checks for correct module save/restore.
! FIXME: Check that calls to inherited bindings work once CLASS allows that.
MODULE m
IMPLICIT NONE
TYPE mynum
REAL :: num_real
INTEGER :: num_int
CONTAINS
PROCEDURE, PASS, PRIVATE :: add_mynum ! Check that this may be PRIVATE.
PROCEDURE, PASS :: add_int
PROCEDURE, PASS :: add_real
PROCEDURE, PASS :: assign_int
PROCEDURE, PASS :: assign_real
PROCEDURE, PASS(from) :: assign_to_int
PROCEDURE, PASS(from) :: assign_to_real
PROCEDURE, PASS :: get_all
GENERIC :: OPERATOR(+) => add_mynum, add_int, add_real
GENERIC :: OPERATOR(.GET.) => get_all
GENERIC :: ASSIGNMENT(=) => assign_int, assign_real, &
assign_to_int, assign_to_real
END TYPE mynum
CONTAINS
TYPE(mynum) FUNCTION add_mynum (a, b)
CLASS(mynum), INTENT(IN) :: a, b
add_mynum = mynum (a%num_real + b%num_real, a%num_int + b%num_int)
END FUNCTION add_mynum
TYPE(mynum) FUNCTION add_int (a, b)
CLASS(mynum), INTENT(IN) :: a
INTEGER, INTENT(IN) :: b
add_int = mynum (a%num_real, a%num_int + b)
END FUNCTION add_int
TYPE(mynum) FUNCTION add_real (a, b)
CLASS(mynum), INTENT(IN) :: a
REAL, INTENT(IN) :: b
add_real = mynum (a%num_real + b, a%num_int)
END FUNCTION add_real
REAL FUNCTION get_all (me)
CLASS(mynum), INTENT(IN) :: me
get_all = me%num_real + me%num_int
END FUNCTION get_all
SUBROUTINE assign_real (dest, from)
CLASS(mynum), INTENT(INOUT) :: dest
REAL, INTENT(IN) :: from
dest%num_real = from
END SUBROUTINE assign_real
SUBROUTINE assign_int (dest, from)
CLASS(mynum), INTENT(INOUT) :: dest
INTEGER, INTENT(IN) :: from
dest%num_int = from
END SUBROUTINE assign_int
SUBROUTINE assign_to_real (dest, from)
REAL, INTENT(OUT) :: dest
CLASS(mynum), INTENT(IN) :: from
dest = from%num_real
END SUBROUTINE assign_to_real
SUBROUTINE assign_to_int (dest, from)
INTEGER, INTENT(OUT) :: dest
CLASS(mynum), INTENT(IN) :: from
dest = from%num_int
END SUBROUTINE assign_to_int
! Test it works basically within the module.
SUBROUTINE check_in_module ()
IMPLICIT NONE
TYPE(mynum) :: num
num = mynum (1.0, 2)
num = num + 7
IF (num%num_real /= 1.0 .OR. num%num_int /= 9) CALL abort ()
END SUBROUTINE check_in_module
END MODULE m
! Here we see it also works for use-associated operators loaded from a module.
PROGRAM main
USE m, ONLY: mynum, check_in_module
IMPLICIT NONE
TYPE(mynum) :: num1, num2, num3
REAL :: real_var
INTEGER :: int_var
CALL check_in_module ()
num1 = mynum (1.0, 2)
num2 = mynum (2.0, 3)
num3 = num1 + num2
IF (num3%num_real /= 3.0 .OR. num3%num_int /= 5) CALL abort ()
num3 = num1 + 5
IF (num3%num_real /= 1.0 .OR. num3%num_int /= 7) CALL abort ()
num3 = num1 + (-100.5)
IF (num3%num_real /= -99.5 .OR. num3%num_int /= 2) CALL abort ()
num3 = 42
num3 = -1.2
IF (num3%num_real /= -1.2 .OR. num3%num_int /= 42) CALL abort ()
real_var = num3
int_var = num3
IF (real_var /= -1.2 .OR. int_var /= 42) CALL abort ()
IF (.GET. num1 /= 3.0) CALL abort ()
END PROGRAM main
! { dg-final { cleanup-modules "m" } }
|
{-# OPTIONS --without-K #-}
module A where
open import Data.Nat
open import Data.Empty
open import Data.Unit
open import Data.Sum
open import Data.Product
infix 4 _≡_ -- propositional equality
infixr 10 _◎_
infixr 30 _⟷_
------------------------------------------------------------------------------
-- Our own version of refl that makes 'a' explicit
data _≡_ {ℓ} {A : Set ℓ} : (a b : A) → Set ℓ where
refl : (a : A) → (a ≡ a)
sym : ∀ {ℓ} {A : Set ℓ} {a b : A} → (a ≡ b) → (b ≡ a)
sym {a = a} {b = .a} (refl .a) = refl a
{--
Just confirming that the following does not typecheck!
proof-irrelevance : {A : Set} {x y : A} (p q : x ≡ y) → p ≡ q
proof-irrelevance (refl x) (refl .x) = refl (refl x)
--}
------------------------------------------------------------------------------
{--
Types are higher groupoids:
- 0 is empty
- 1 has one element and one path refl
- sum type is disjoint union; paths are component wise
- product type is cartesian product; paths are pairs of paths
--}
data U : Set where
ZERO : U
ONE : U
PLUS : U → U → U
TIMES : U → U → U
-- Points
⟦_⟧ : U → Set
⟦ ZERO ⟧ = ⊥
⟦ ONE ⟧ = ⊤
⟦ PLUS t t' ⟧ = ⟦ t ⟧ ⊎ ⟦ t' ⟧
⟦ TIMES t t' ⟧ = ⟦ t ⟧ × ⟦ t' ⟧
BOOL : U
BOOL = PLUS ONE ONE
BOOL² : U
BOOL² = TIMES BOOL BOOL
TRUE : ⟦ BOOL ⟧
TRUE = inj₁ tt
FALSE : ⟦ BOOL ⟧
FALSE = inj₂ tt
NOT : ⟦ BOOL ⟧ → ⟦ BOOL ⟧
NOT (inj₁ tt) = FALSE
NOT (inj₂ tt) = TRUE
CNOT : ⟦ BOOL ⟧ → ⟦ BOOL ⟧ → ⟦ BOOL ⟧ × ⟦ BOOL ⟧
CNOT (inj₁ tt) b = (TRUE , NOT b)
CNOT (inj₂ tt) b = (FALSE , b)
------------------------------------------------------------------------------
-- Paths connect points in t₁ and t₂ if there is an isomorphism between the
-- types t₁ and t₂. The family ⟷ plays the role of identity types in HoTT
data _⟷_ : U → U → Set where
unite₊ : {t : U} → PLUS ZERO t ⟷ t
uniti₊ : {t : U} → t ⟷ PLUS ZERO t
swap₊ : {t₁ t₂ : U} → PLUS t₁ t₂ ⟷ PLUS t₂ t₁
assocl₊ : {t₁ t₂ t₃ : U} → PLUS t₁ (PLUS t₂ t₃) ⟷ PLUS (PLUS t₁ t₂) t₃
assocr₊ : {t₁ t₂ t₃ : U} → PLUS (PLUS t₁ t₂) t₃ ⟷ PLUS t₁ (PLUS t₂ t₃)
unite⋆ : {t : U} → TIMES ONE t ⟷ t
uniti⋆ : {t : U} → t ⟷ TIMES ONE t
swap⋆ : {t₁ t₂ : U} → TIMES t₁ t₂ ⟷ TIMES t₂ t₁
assocl⋆ : {t₁ t₂ t₃ : U} → TIMES t₁ (TIMES t₂ t₃) ⟷ TIMES (TIMES t₁ t₂) t₃
assocr⋆ : {t₁ t₂ t₃ : U} → TIMES (TIMES t₁ t₂) t₃ ⟷ TIMES t₁ (TIMES t₂ t₃)
distz : {t : U} → TIMES ZERO t ⟷ ZERO
factorz : {t : U} → ZERO ⟷ TIMES ZERO t
dist : {t₁ t₂ t₃ : U} →
TIMES (PLUS t₁ t₂) t₃ ⟷ PLUS (TIMES t₁ t₃) (TIMES t₂ t₃)
factor : {t₁ t₂ t₃ : U} →
PLUS (TIMES t₁ t₃) (TIMES t₂ t₃) ⟷ TIMES (PLUS t₁ t₂) t₃
id⟷ : {t : U} → t ⟷ t
sym⟷ : {t₁ t₂ : U} → (t₁ ⟷ t₂) → (t₂ ⟷ t₁)
_◎_ : {t₁ t₂ t₃ : U} → (t₁ ⟷ t₂) → (t₂ ⟷ t₃) → (t₁ ⟷ t₃)
_⊕_ : {t₁ t₂ t₃ t₄ : U} →
(t₁ ⟷ t₃) → (t₂ ⟷ t₄) → (PLUS t₁ t₂ ⟷ PLUS t₃ t₄)
_⊗_ : {t₁ t₂ t₃ t₄ : U} →
(t₁ ⟷ t₃) → (t₂ ⟷ t₄) → (TIMES t₁ t₂ ⟷ TIMES t₃ t₄)
cond : {t₁ t₂ : U} → (t₁ ⟷ t₂) → (t₁ ⟷ t₂) →
((TIMES BOOL t₁) ⟷ (TIMES BOOL t₂))
cond f g = dist ◎ ((id⟷ ⊗ f) ⊕ (id⟷ ⊗ g)) ◎ factor
controlled : {t : U} → (t ⟷ t) → ((TIMES BOOL t) ⟷ (TIMES BOOL t))
controlled f = cond f id⟷
cnot : BOOL² ⟷ BOOL²
cnot = controlled swap₊
-- Paths: each combinator defines a space of paths between its end points
mutual
Paths : {t₁ t₂ : U} → (t₁ ⟷ t₂) → ⟦ t₁ ⟧ → ⟦ t₂ ⟧ → Set
Paths unite₊ (inj₁ ())
Paths unite₊ (inj₂ v) v' = (v ≡ v')
Paths uniti₊ v (inj₁ ())
Paths uniti₊ v (inj₂ v') = (v ≡ v')
Paths swap₊ (inj₁ v) (inj₁ v') = ⊥
Paths swap₊ (inj₁ v) (inj₂ v') = (v ≡ v')
Paths swap₊ (inj₂ v) (inj₁ v') = (v ≡ v')
Paths swap₊ (inj₂ v) (inj₂ v') = ⊥
Paths assocl₊ (inj₁ v) (inj₁ (inj₁ v')) = (v ≡ v')
Paths assocl₊ (inj₁ v) (inj₁ (inj₂ v')) = ⊥
Paths assocl₊ (inj₁ v) (inj₂ v') = ⊥
Paths assocl₊ (inj₂ (inj₁ v)) (inj₁ (inj₁ v')) = ⊥
Paths assocl₊ (inj₂ (inj₁ v)) (inj₁ (inj₂ v')) = (v ≡ v')
Paths assocl₊ (inj₂ (inj₁ v)) (inj₂ v') = ⊥
Paths assocl₊ (inj₂ (inj₂ v)) (inj₁ v') = ⊥
Paths assocl₊ (inj₂ (inj₂ v)) (inj₂ v') = (v ≡ v')
Paths assocr₊ (inj₁ (inj₁ v)) (inj₁ v') = (v ≡ v')
Paths assocr₊ (inj₁ (inj₁ v)) (inj₂ v') = ⊥
Paths assocr₊ (inj₁ (inj₂ v)) (inj₁ v') = ⊥
Paths assocr₊ (inj₁ (inj₂ v)) (inj₂ (inj₁ v')) = (v ≡ v')
Paths assocr₊ (inj₁ (inj₂ v)) (inj₂ (inj₂ v')) = ⊥
Paths assocr₊ (inj₂ v) (inj₁ v') = ⊥
Paths assocr₊ (inj₂ v) (inj₂ (inj₁ v')) = ⊥
Paths assocr₊ (inj₂ v) (inj₂ (inj₂ v')) = (v ≡ v')
Paths unite⋆ (tt , v) v' = (v ≡ v')
Paths uniti⋆ v (tt , v') = (v ≡ v')
Paths swap⋆ (v₁ , v₂) (v₂' , v₁') = (v₁ ≡ v₁') × (v₂ ≡ v₂')
Paths assocl⋆ (v₁ , (v₂ , v₃)) ((v₁' , v₂') , v₃') =
(v₁ ≡ v₁') × (v₂ ≡ v₂') × (v₃ ≡ v₃')
Paths assocr⋆ ((v₁ , v₂) , v₃) (v₁' , (v₂' , v₃')) =
(v₁ ≡ v₁') × (v₂ ≡ v₂') × (v₃ ≡ v₃')
Paths distz (() , v)
Paths factorz ()
Paths dist (inj₁ v₁ , v₃) (inj₁ (v₁' , v₃')) = (v₁ ≡ v₁') × (v₃ ≡ v₃')
Paths dist (inj₁ v₁ , v₃) (inj₂ (v₂' , v₃')) = ⊥
Paths dist (inj₂ v₂ , v₃) (inj₁ (v₁' , v₃')) = ⊥
Paths dist (inj₂ v₂ , v₃) (inj₂ (v₂' , v₃')) = (v₂ ≡ v₂') × (v₃ ≡ v₃')
Paths factor (inj₁ (v₁ , v₃)) (inj₁ v₁' , v₃') =
(v₁ ≡ v₁') × (v₃ ≡ v₃')
Paths factor (inj₁ (v₁ , v₃)) (inj₂ v₂' , v₃') = ⊥
Paths factor (inj₂ (v₂ , v₃)) (inj₁ v₁' , v₃') = ⊥
Paths factor (inj₂ (v₂ , v₃)) (inj₂ v₂' , v₃') =
(v₂ ≡ v₂') × (v₃ ≡ v₃')
Paths {t} id⟷ v v' = (v ≡ v')
Paths (sym⟷ c) v v' = PathsB c v v'
Paths (_◎_ {t₁} {t₂} {t₃} c₁ c₂) v v' =
Σ[ u ∈ ⟦ t₂ ⟧ ] (Paths c₁ v u × Paths c₂ u v')
Paths (c₁ ⊕ c₂) (inj₁ v) (inj₁ v') = Paths c₁ v v'
Paths (c₁ ⊕ c₂) (inj₁ v) (inj₂ v') = ⊥
Paths (c₁ ⊕ c₂) (inj₂ v) (inj₁ v') = ⊥
Paths (c₁ ⊕ c₂) (inj₂ v) (inj₂ v') = Paths c₂ v v'
Paths (c₁ ⊗ c₂) (v₁ , v₂) (v₁' , v₂') =
Paths c₁ v₁ v₁' × Paths c₂ v₂ v₂'
PathsB : {t₁ t₂ : U} → (t₁ ⟷ t₂) → ⟦ t₂ ⟧ → ⟦ t₁ ⟧ → Set
PathsB unite₊ v (inj₁ ())
PathsB unite₊ v (inj₂ v') = (v ≡ v')
PathsB uniti₊ (inj₁ ())
PathsB uniti₊ (inj₂ v) v' = (v ≡ v')
PathsB swap₊ (inj₁ v) (inj₁ v') = ⊥
PathsB swap₊ (inj₁ v) (inj₂ v') = (v ≡ v')
PathsB swap₊ (inj₂ v) (inj₁ v') = (v ≡ v')
PathsB swap₊ (inj₂ v) (inj₂ v') = ⊥
PathsB assocl₊ (inj₁ (inj₁ v)) (inj₁ v') = (v ≡ v')
PathsB assocl₊ (inj₁ (inj₁ v)) (inj₂ v') = ⊥
PathsB assocl₊ (inj₁ (inj₂ v)) (inj₁ v') = ⊥
PathsB assocl₊ (inj₁ (inj₂ v)) (inj₂ (inj₁ v')) = (v ≡ v')
PathsB assocl₊ (inj₁ (inj₂ v)) (inj₂ (inj₂ v')) = ⊥
PathsB assocl₊ (inj₂ v) (inj₁ v') = ⊥
PathsB assocl₊ (inj₂ v) (inj₂ (inj₁ v')) = ⊥
PathsB assocl₊ (inj₂ v) (inj₂ (inj₂ v')) = (v ≡ v')
PathsB assocr₊ (inj₁ v) (inj₁ (inj₁ v')) = (v ≡ v')
PathsB assocr₊ (inj₁ v) (inj₁ (inj₂ v')) = ⊥
PathsB assocr₊ (inj₁ v) (inj₂ v') = ⊥
PathsB assocr₊ (inj₂ (inj₁ v)) (inj₁ (inj₁ v')) = ⊥
PathsB assocr₊ (inj₂ (inj₁ v)) (inj₁ (inj₂ v')) = (v ≡ v')
PathsB assocr₊ (inj₂ (inj₁ v)) (inj₂ v') = ⊥
PathsB assocr₊ (inj₂ (inj₂ v)) (inj₁ v') = ⊥
PathsB assocr₊ (inj₂ (inj₂ v)) (inj₂ v') = (v ≡ v')
PathsB unite⋆ v (tt , v') = (v ≡ v')
PathsB uniti⋆ (tt , v) v' = (v ≡ v')
PathsB swap⋆ (v₁ , v₂) (v₂' , v₁') = (v₁ ≡ v₁') × (v₂ ≡ v₂')
PathsB assocl⋆ ((v₁ , v₂) , v₃) (v₁' , (v₂' , v₃')) =
(v₁ ≡ v₁') × (v₂ ≡ v₂') × (v₃ ≡ v₃')
PathsB assocr⋆ (v₁ , (v₂ , v₃)) ((v₁' , v₂') , v₃') =
(v₁ ≡ v₁') × (v₂ ≡ v₂') × (v₃ ≡ v₃')
PathsB distz ()
PathsB factorz (() , v)
PathsB dist (inj₁ (v₁ , v₃)) (inj₁ v₁' , v₃') =
(v₁ ≡ v₁') × (v₃ ≡ v₃')
PathsB dist (inj₁ (v₁ , v₃)) (inj₂ v₂' , v₃') = ⊥
PathsB dist (inj₂ (v₂ , v₃)) (inj₁ v₁' , v₃') = ⊥
PathsB dist (inj₂ (v₂ , v₃)) (inj₂ v₂' , v₃') =
(v₂ ≡ v₂') × (v₃ ≡ v₃')
PathsB factor (inj₁ v₁ , v₃) (inj₁ (v₁' , v₃')) =
(v₁ ≡ v₁') × (v₃ ≡ v₃')
PathsB factor (inj₁ v₁ , v₃) (inj₂ (v₂' , v₃')) = ⊥
PathsB factor (inj₂ v₂ , v₃) (inj₁ (v₁' , v₃')) = ⊥
PathsB factor (inj₂ v₂ , v₃) (inj₂ (v₂' , v₃')) =
(v₂ ≡ v₂') × (v₃ ≡ v₃')
PathsB {t} id⟷ v v' = (v ≡ v')
PathsB (sym⟷ c) v v' = Paths c v v'
PathsB (_◎_ {t₁} {t₂} {t₃} c₁ c₂) v v' =
Σ[ u ∈ ⟦ t₂ ⟧ ] (PathsB c₂ v u × PathsB c₁ u v')
PathsB (c₁ ⊕ c₂) (inj₁ v) (inj₁ v') = PathsB c₁ v v'
PathsB (c₁ ⊕ c₂) (inj₁ v) (inj₂ v') = ⊥
PathsB (c₁ ⊕ c₂) (inj₂ v) (inj₁ v') = ⊥
PathsB (c₁ ⊕ c₂) (inj₂ v) (inj₂ v') = PathsB c₂ v v'
PathsB (c₁ ⊗ c₂) (v₁ , v₂) (v₁' , v₂') =
PathsB c₁ v₁ v₁' × PathsB c₂ v₂ v₂'
-- Given a combinator c : t₁ ⟷ t₂ and values v₁ : ⟦ t₁ ⟧ and v₂ : ⟦ t₂ ⟧,
-- Paths c v₁ v₂ gives us the space of paths that could connect v₁ and v₂
-- Examples:
pathIdtt : Paths id⟷ tt tt
pathIdtt = refl tt
-- four different ways of relating F to F:
pathIdFF : Paths id⟷ FALSE FALSE
pathIdFF = refl FALSE
pathIdIdFF : Paths (id⟷ ◎ id⟷) FALSE FALSE
pathIdIdFF = (FALSE , refl FALSE , refl FALSE)
pathNotNotFF : Paths (swap₊ ◎ swap₊) FALSE FALSE
pathNotNotFF = TRUE , refl tt , refl tt
pathPlusFF : Paths (id⟷ ⊕ id⟷) FALSE FALSE
pathPlusFF = refl tt
-- are there 2-paths between the above 3 paths???
-- space of paths is empty; cannot produce any path; can
-- use pattern matching to confirm that the space is empty
pathIdFT : Paths id⟷ FALSE TRUE → ⊤
pathIdFT ()
-- three different ways of relating (F,F) to (F,F)
pathIdFFFF : Paths id⟷ (FALSE , FALSE) (FALSE , FALSE)
pathIdFFFF = refl (FALSE , FALSE)
pathTimesFFFF : Paths (id⟷ ⊗ id⟷) (FALSE , FALSE) (FALSE , FALSE)
pathTimesFFFF = (refl FALSE , refl FALSE)
pathTimesPlusFFFF : Paths
((id⟷ ⊕ id⟷) ⊗ (id⟷ ⊕ id⟷))
(FALSE , FALSE) (FALSE , FALSE)
pathTimesPlusFFFF = (refl tt , refl tt)
pathSwap₊FT : Paths swap₊ FALSE TRUE
pathSwap₊FT = refl tt
pathSwap₊TF : Paths swap₊ TRUE FALSE
pathSwap₊TF = refl tt
-- no path
pathSwap₊FF : Paths swap₊ FALSE FALSE → ⊤
pathSwap₊FF ()
-- intuitively the two paths below should not be related by a 2-path because
-- pathCnotTF is "essentially" cnot which would map (F,F) to (F,F) but
-- pathIdNotTF would map (F,F) to (F,T).
pathIdNotFF : Paths (id⟷ ⊗ swap₊) (FALSE , FALSE) (FALSE , TRUE)
pathIdNotFF = refl FALSE , refl tt
pathIdNotFT : Paths (id⟷ ⊗ swap₊) (FALSE , TRUE) (FALSE , FALSE)
pathIdNotFT = refl FALSE , refl tt
pathIdNotTF : Paths (id⟷ ⊗ swap₊) (TRUE , FALSE) (TRUE , TRUE)
pathIdNotTF = refl TRUE , refl tt
pathIdNotTT : Paths (id⟷ ⊗ swap₊) (TRUE , TRUE) (TRUE , FALSE)
pathIdNotTT = refl TRUE , refl tt
pathIdNotb : {b₁ b₂ : ⟦ BOOL ⟧} → Paths (id⟷ ⊗ swap₊) (b₁ , b₂) (b₁ , NOT b₂)
pathIdNotb {b₁} {inj₁ tt} = refl b₁ , refl tt
pathIdNotb {b₁} {inj₂ tt} = refl b₁ , refl tt
pathCnotbb : {b₁ b₂ : ⟦ BOOL ⟧} → Paths cnot (b₁ , b₂) (CNOT b₁ b₂)
pathCnotbb {inj₁ tt} {inj₁ tt} = inj₁ (tt , TRUE) ,
(refl tt , refl TRUE) ,
(inj₁ (tt , FALSE) ,
(refl tt , refl tt) ,
(refl tt , refl FALSE))
pathCnotbb {inj₁ tt} {inj₂ tt} = inj₁ (tt , FALSE) ,
(refl tt , refl FALSE) ,
(inj₁ (tt , TRUE) ,
(refl tt , refl tt) ,
(refl tt , refl TRUE))
pathCnotbb {inj₂ tt} {b₂} = inj₂ (tt , b₂) ,
(refl tt , refl b₂) ,
(inj₂ (tt , b₂) ,
(refl tt , refl b₂) ,
(refl tt , refl b₂))
pathCnotFF : Paths cnot (FALSE , FALSE) (FALSE , FALSE)
pathCnotFF = inj₂ (tt , FALSE) ,
(refl tt , refl FALSE) ,
(inj₂ (tt , FALSE) ,
(refl tt , refl FALSE) ,
(refl tt , refl FALSE))
pathCnotFT : Paths cnot (FALSE , TRUE) (FALSE , TRUE)
pathCnotFT = inj₂ (tt , TRUE) ,
(refl tt , refl TRUE) ,
(inj₂ (tt , TRUE) ,
(refl tt , refl TRUE) ,
(refl tt , refl TRUE))
pathCnotTF : Paths cnot (TRUE , FALSE) (TRUE , TRUE)
pathCnotTF = inj₁ (tt , FALSE) , -- first intermediate value
-- path using dist from (T,F) to (inj₁ (tt , F))
(refl tt , refl FALSE) ,
-- path from (inj₁ (tt , F)) to (T,T)
(inj₁ (tt , TRUE) , -- next intermediate value
(refl tt , refl tt) ,
(refl tt , refl TRUE))
pathCnotTT : Paths cnot (TRUE , TRUE) (TRUE , FALSE)
pathCnotTT = inj₁ (tt , TRUE) ,
(refl tt , refl TRUE) ,
(inj₁ (tt , FALSE) ,
(refl tt , refl tt) ,
(refl tt , refl FALSE))
pathUnite₊ : {t : U} {v v' : ⟦ t ⟧} → (v ≡ v') → Paths unite₊ (inj₂ v) v'
pathUnite₊ p = p
-- Higher groupoid structure
-- For every path between v₁ and v₂ there is a path between v₂ and v₁
mutual
pathInv : {t₁ t₂ : U} {v₁ : ⟦ t₁ ⟧} {v₂ : ⟦ t₂ ⟧} {c : t₁ ⟷ t₂} →
Paths c v₁ v₂ → Paths (sym⟷ c) v₂ v₁
pathInv {v₁ = inj₁ ()} {v₂ = v} {unite₊}
pathInv {v₁ = inj₂ v} {v₂ = v'} {unite₊} p = sym p
pathInv {v₁ = v} {v₂ = inj₁ ()} {uniti₊}
pathInv {v₁ = v} {v₂ = inj₂ v'} {uniti₊} p = sym p
pathInv {v₁ = inj₁ v} {v₂ = inj₁ v'} {swap₊} ()
pathInv {v₁ = inj₁ v} {v₂ = inj₂ v'} {swap₊} p = sym p
pathInv {v₁ = inj₂ v} {v₂ = inj₁ v'} {swap₊} p = sym p
pathInv {v₁ = inj₂ v} {v₂ = inj₂ v'} {swap₊} ()
pathInv {v₁ = inj₁ v} {v₂ = inj₁ (inj₁ v')} {assocl₊} p = sym p
pathInv {v₁ = inj₁ v} {v₂ = inj₁ (inj₂ v')} {assocl₊} ()
pathInv {v₁ = inj₁ v} {v₂ = inj₂ v'} {assocl₊} ()
pathInv {v₁ = inj₂ (inj₁ v)} {v₂ = inj₁ (inj₁ v')} {assocl₊} ()
pathInv {v₁ = inj₂ (inj₁ v)} {v₂ = inj₁ (inj₂ v')} {assocl₊} p = sym p
pathInv {v₁ = inj₂ (inj₁ v)} {v₂ = inj₂ v'} {assocl₊} ()
pathInv {v₁ = inj₂ (inj₂ v)} {v₂ = inj₁ v'} {assocl₊} ()
pathInv {v₁ = inj₂ (inj₂ v)} {v₂ = inj₂ v'} {assocl₊} p = sym p
pathInv {v₁ = inj₁ (inj₁ v)} {v₂ = inj₁ v'} {assocr₊} p = sym p
pathInv {v₁ = inj₁ (inj₁ v)} {v₂ = inj₂ v'} {assocr₊} ()
pathInv {v₁ = inj₁ (inj₂ v)} {v₂ = inj₁ v'} {assocr₊} ()
pathInv {v₁ = inj₁ (inj₂ v)} {v₂ = inj₂ (inj₁ v')} {assocr₊} p = sym p
pathInv {v₁ = inj₁ (inj₂ v)} {v₂ = inj₂ (inj₂ v')} {assocr₊} ()
pathInv {v₁ = inj₂ v} {v₂ = inj₁ v'} {assocr₊} ()
pathInv {v₁ = inj₂ v} {v₂ = inj₂ (inj₁ v')} {assocr₊} ()
pathInv {v₁ = inj₂ v} {v₂ = inj₂ (inj₂ v')} {assocr₊} p = sym p
pathInv {v₁ = (tt , v)} {v₂ = v'} {unite⋆} p = sym p
pathInv {v₁ = v} {v₂ = (tt , v')} {uniti⋆} p = sym p
pathInv {v₁ = (u , v)} {v₂ = (v' , u')} {swap⋆} (p₁ , p₂) = (sym p₂ , sym p₁)
pathInv {v₁ = (u , (v , w))} {v₂ = ((u' , v') , w')} {assocl⋆} (p₁ , p₂ , p₃)
= (sym p₁ , sym p₂ , sym p₃)
pathInv {v₁ = ((u , v) , w)} {v₂ = (u' , (v' , w'))} {assocr⋆} (p₁ , p₂ , p₃)
= (sym p₁ , sym p₂ , sym p₃)
pathInv {v₁ = _} {v₂ = ()} {distz}
pathInv {v₁ = ()} {v₂ = _} {factorz}
pathInv {v₁ = (inj₁ v₁ , v₃)} {v₂ = inj₁ (v₁' , v₃')} {dist} (p₁ , p₂) =
(sym p₁ , sym p₂)
pathInv {v₁ = (inj₁ v₁ , v₃)} {v₂ = inj₂ (v₂' , v₃')} {dist} ()
pathInv {v₁ = (inj₂ v₂ , v₃)} {v₂ = inj₁ (v₁' , v₃')} {dist} ()
pathInv {v₁ = (inj₂ v₂ , v₃)} {v₂ = inj₂ (v₂' , v₃')} {dist} (p₁ , p₂) =
(sym p₁ , sym p₂)
pathInv {v₁ = inj₁ (v₁ , v₃)} {v₂ = (inj₁ v₁' , v₃')} {factor} (p₁ , p₂) =
(sym p₁ , sym p₂)
pathInv {v₁ = inj₁ (v₁ , v₃)} {v₂ = (inj₂ v₂' , v₃')} {factor} ()
pathInv {v₁ = inj₂ (v₂ , v₃)} {v₂ = (inj₁ v₁' , v₃')} {factor} ()
pathInv {v₁ = inj₂ (v₂ , v₃)} {v₂ = (inj₂ v₂' , v₃')} {factor} (p₁ , p₂) =
(sym p₁ , sym p₂)
pathInv {v₁ = v} {v₂ = v'} {id⟷} p = sym p
pathInv {v₁ = v} {v₂ = v'} {sym⟷ c} p = pathBInv {v₁ = v'} {v₂ = v} {c} p
pathInv {v₁ = v} {v₂ = v'} {c₁ ◎ c₂} (u , (p₁ , p₂)) =
(u , (pathInv {c = c₂} p₂ , pathInv {c = c₁} p₁))
pathInv {v₁ = inj₁ v} {v₂ = inj₁ v'} {c₁ ⊕ c₂} p = pathInv {c = c₁} p
pathInv {v₁ = inj₁ v} {v₂ = inj₂ v'} {c₁ ⊕ c₂} ()
pathInv {v₁ = inj₂ v} {v₂ = inj₁ v'} {c₁ ⊕ c₂} ()
pathInv {v₁ = inj₂ v} {v₂ = inj₂ v'} {c₁ ⊕ c₂} p = pathInv {c = c₂} p
pathInv {v₁ = (u , v)} {v₂ = (u' , v')} {c₁ ⊗ c₂} (p₁ , p₂) =
(pathInv {c = c₁} p₁ , pathInv {c = c₂} p₂)
pathBInv : {t₁ t₂ : U} {v₁ : ⟦ t₁ ⟧} {v₂ : ⟦ t₂ ⟧} {c : t₁ ⟷ t₂} →
PathsB c v₂ v₁ → PathsB (sym⟷ c) v₁ v₂
pathBInv {v₁ = inj₁ ()} {v₂ = v} {unite₊}
pathBInv {v₁ = inj₂ v} {v₂ = v'} {unite₊} p = sym p
pathBInv {v₁ = v} {v₂ = inj₁ ()} {uniti₊}
pathBInv {v₁ = v} {v₂ = inj₂ v'} {uniti₊} p = sym p
pathBInv {v₁ = inj₁ v} {v₂ = inj₁ v'} {swap₊} ()
pathBInv {v₁ = inj₁ v} {v₂ = inj₂ v'} {swap₊} p = sym p
pathBInv {v₁ = inj₂ v} {v₂ = inj₁ v'} {swap₊} p = sym p
pathBInv {v₁ = inj₂ v} {v₂ = inj₂ v'} {swap₊} ()
pathBInv {v₁ = inj₁ v} {v₂ = inj₁ (inj₁ v')} {assocl₊} p = sym p
pathBInv {v₁ = inj₂ v} {v₂ = inj₁ (inj₁ v')} {assocl₊} ()
pathBInv {v₁ = inj₁ v} {v₂ = inj₁ (inj₂ v')} {assocl₊} ()
pathBInv {v₁ = inj₂ (inj₁ v)} {v₂ = inj₁ (inj₂ v')} {assocl₊} p = sym p
pathBInv {v₁ = inj₂ (inj₂ v)} {v₂ = inj₁ (inj₂ v')} {assocl₊} ()
pathBInv {v₁ = inj₁ v} {v₂ = inj₂ v'} {assocl₊} ()
pathBInv {v₁ = inj₂ (inj₁ v)} {v₂ = inj₂ v'} {assocl₊} ()
pathBInv {v₁ = inj₂ (inj₂ v)} {v₂ = inj₂ v'} {assocl₊} p = sym p
pathBInv {v₁ = inj₁ (inj₁ v)} {v₂ = inj₁ v'} {assocr₊} p = sym p
pathBInv {v₁ = inj₁ (inj₂ v)} {v₂ = inj₁ v'} {assocr₊} ()
pathBInv {v₁ = inj₂ v} {v₂ = inj₁ v'} {assocr₊} ()
pathBInv {v₁ = inj₁ (inj₁ v)} {v₂ = inj₂ (inj₁ v')} {assocr₊} ()
pathBInv {v₁ = inj₁ (inj₂ v)} {v₂ = inj₂ (inj₁ v')} {assocr₊} p = sym p
pathBInv {v₁ = inj₂ v} {v₂ = inj₂ (inj₁ v')} {assocr₊} ()
pathBInv {v₁ = inj₁ v} {v₂ = inj₂ (inj₂ v')} {assocr₊} ()
pathBInv {v₁ = inj₂ v} {v₂ = inj₂ (inj₂ v')} {assocr₊} p = sym p
pathBInv {v₁ = (tt , v)} {v₂ = v'} {unite⋆} p = sym p
pathBInv {v₁ = v} {v₂ = (tt , v')} {uniti⋆} p = sym p
pathBInv {v₁ = (u , v)} {v₂ = (v' , u')} {swap⋆} (p₁ , p₂) = (sym p₂ , sym p₁)
pathBInv {v₁ = (u , (v , w))} {v₂ = ((u' , v') , w')} {assocl⋆} (p₁ , p₂ , p₃)
= (sym p₁ , sym p₂ , sym p₃)
pathBInv {v₁ = ((u , v) , w)} {v₂ = (u' , (v' , w'))} {assocr⋆} (p₁ , p₂ , p₃)
= (sym p₁ , sym p₂ , sym p₃)
pathBInv {v₁ = _} {v₂ = ()} {distz}
pathBInv {v₁ = ()} {v₂ = _} {factorz}
pathBInv {v₁ = (inj₁ v₁ , v₃)} {v₂ = inj₁ (v₁' , v₃')} {dist} (p₁ , p₂) =
(sym p₁ , sym p₂)
pathBInv {v₁ = (inj₁ v₁ , v₃)} {v₂ = inj₂ (v₂' , v₃')} {dist} ()
pathBInv {v₁ = (inj₂ v₂ , v₃)} {v₂ = inj₁ (v₁' , v₃')} {dist} ()
pathBInv {v₁ = (inj₂ v₂ , v₃)} {v₂ = inj₂ (v₂' , v₃')} {dist} (p₁ , p₂) =
(sym p₁ , sym p₂)
pathBInv {v₁ = inj₁ (v₁ , v₃)} {v₂ = (inj₁ v₁' , v₃')} {factor} (p₁ , p₂) =
(sym p₁ , sym p₂)
pathBInv {v₁ = inj₁ (v₁ , v₃)} {v₂ = (inj₂ v₂' , v₃')} {factor} ()
pathBInv {v₁ = inj₂ (v₂ , v₃)} {v₂ = (inj₁ v₁' , v₃')} {factor} ()
pathBInv {v₁ = inj₂ (v₂ , v₃)} {v₂ = (inj₂ v₂' , v₃')} {factor} (p₁ , p₂) =
(sym p₁ , sym p₂)
pathBInv {v₁ = v} {v₂ = v'} {id⟷} p = sym p
pathBInv {v₁ = v} {v₂ = v'} {sym⟷ c} p = pathInv {v₁ = v'} {v₂ = v} {c} p
pathBInv {t₁} {t₂} {v₁} {v₂} {c₁ ◎ c₂} (u , (p₂ , p₁)) =
(u , (pathBInv {v₁ = v₁} {v₂ = u} {c = c₁} p₁ ,
pathBInv {v₁ = u} {v₂ = v₂} {c = c₂} p₂))
pathBInv {v₁ = inj₁ v} {v₂ = inj₁ v'} {c₁ ⊕ c₂} p = pathBInv {c = c₁} p
pathBInv {v₁ = inj₁ v} {v₂ = inj₂ v'} {c₁ ⊕ c₂} ()
pathBInv {v₁ = inj₂ v} {v₂ = inj₁ v'} {c₁ ⊕ c₂} ()
pathBInv {v₁ = inj₂ v} {v₂ = inj₂ v'} {c₁ ⊕ c₂} p = pathBInv {c = c₂} p
pathBInv {v₁ = (u , v)} {v₂ = (u' , v')} {c₁ ⊗ c₂} (p₁ , p₂) =
(pathBInv {c = c₁} p₁ , pathBInv {c = c₂} p₂)
-- for every paths from v1 to v2 and from v2 to v3, there is a path from v1
-- to v3 that (obviously) goes through v2
pathTrans : {t₁ t₂ t₃ : U} {v₁ : ⟦ t₁ ⟧} {v₂ : ⟦ t₂ ⟧} {v₃ : ⟦ t₃ ⟧}
{c₁ : t₁ ⟷ t₂} {c₂ : t₂ ⟷ t₃} →
Paths c₁ v₁ v₂ → Paths c₂ v₂ v₃ → Paths (c₁ ◎ c₂) v₁ v₃
pathTrans {v₂ = v₂} p q = (v₂ , p , q)
pathBTrans : {t₁ t₂ t₃ : U} {v₁ : ⟦ t₁ ⟧} {v₂ : ⟦ t₂ ⟧} {v₃ : ⟦ t₃ ⟧}
{c₁ : t₁ ⟷ t₂} {c₂ : t₂ ⟷ t₃} →
PathsB c₁ v₂ v₁ → PathsB c₂ v₃ v₂ → PathsB (c₁ ◎ c₂) v₃ v₁
pathBTrans {v₂ = v₂} p q = (v₂ , q , p)
-- we always have a canonical path from v to v
pathId : {t : U} {v : ⟦ t ⟧} → Paths id⟷ v v
pathId {v = v} = refl v
------------------------------------------------------------------------------
-- Int construction
-- this will allow us to represents paths as values and then define 2paths
-- between them
data DU : Set where
diff : U → U → DU
pos : DU → U
pos (diff t₁ t₂) = t₁
neg : DU → U
neg (diff t₁ t₂) = t₂
zeroD : DU
zeroD = diff ZERO ZERO
oneD : DU
oneD = diff ONE ZERO
plusD : DU → DU → DU
plusD (diff t₁ t₂) (diff t₁' t₂') = diff (PLUS t₁ t₁') (PLUS t₂ t₂')
timesD : DU → DU → DU
timesD (diff t₁ t₂) (diff t₁' t₂') =
diff (PLUS (TIMES t₁ t₁') (TIMES t₂ t₂'))
(PLUS (TIMES t₂ t₁') (TIMES t₁ t₂'))
dualD : DU → DU
dualD (diff t₁ t₂) = diff t₂ t₁
lolliD : DU → DU → DU
lolliD (diff t₁ t₂) (diff t₁' t₂') = diff (PLUS t₂ t₁') (PLUS t₁ t₂')
_≤=>_ : DU → DU → Set
d₁ ≤=> d₂ = PLUS (pos d₁) (neg d₂) ⟷ PLUS (neg d₁) (pos d₂)
idD : {d : DU} → d ≤=> d
idD = swap₊
--curryD : {d₁ d₂ d₃ : DU} → (plusD d₁ d₂ ≤=> d₃) → (d₁ ≤=> lolliD d₂ d₃)
--curryD f = assocl₊ ◎ f ◎ assocr₊
-- take a path and represent it as a value of type lolli and then use
-- ≤=> between these values as the definition of 2paths???
------------------------------------------------------------------------------
-- Can we show:
-- p : Paths c v₁ v₂ == pathTrans p (refl v₂)
-- Groupoid structure (i.e. laws), for 2Paths. Some of the rest of
-- the structure is given above already
-- If the following is right, we can come up with syntax (like _[_]⟺[_]_ ) for p₁ [c₁]⟺[c₂] p₂ . We really do
-- need to index the ⟺ by the combinators 'explicitly' as Agda can never infer them.
data 2P {t₁ t₂ : U} {v₁ : ⟦ t₁ ⟧} {v₂ : ⟦ t₂ ⟧ } : {c₁ c₂ : t₁ ⟷ t₂} → Paths c₁ v₁ v₂ → Paths c₂ v₁ v₂ → Set where
id2 : {c : t₁ ⟷ t₂} {p : Paths c v₁ v₂} → 2P {c₁ = c} {c} p p
inv2 : {c₁ c₂ : t₁ ⟷ t₂} {p₁ : Paths c₁ v₁ v₂} {p₂ : Paths c₂ v₁ v₂} → 2P {c₁ = c₁} {c₂} p₁ p₂ → 2P {c₁ = c₂} {c₁} p₂ p₁
comp2 : {c₁ c₂ c₃ : t₁ ⟷ t₂} {p₁ : Paths c₁ v₁ v₂} {p₂ : Paths c₂ v₁ v₂} {p₃ : Paths c₃ v₁ v₂} →
2P {c₁ = c₁} {c₂} p₁ p₂ → 2P {c₁ = c₂} {c₃} p₂ p₃ → 2P {c₁ = c₁} {c₃} p₁ p₃
-- should define composition which effectively does as below??
lid : {c : t₁ ⟷ t₂} {p : Paths c v₁ v₂} → 2P {c₁ = id⟷ ◎ c} {c} (pathTrans {c₁ = id⟷} {c} pathId p) p
rid : {c : t₁ ⟷ t₂} {p : Paths c v₁ v₂} → 2P {c₁ = c ◎ id⟷} {c} (pathTrans {c₁ = c} {id⟷} p pathId) p
-- also need:
-- assoc
-- linv : {c : t₁ ⟷ t₂} {p : Paths c v₁ v₂} → 2P {c₁ = c}
-- linv
-- rinv
-- and perhaps cong, i.e. ◎-resp-2P
mutual
2Paths : {t₁ t₂ : U} {v₁ : ⟦ t₁ ⟧} {v₂ : ⟦ t₂ ⟧ } {c₁ c₂ : t₁ ⟷ t₂} {p₁ : Paths c₁ v₁ v₂} {p₂ : Paths c₂ v₁ v₂} → 2P {c₁ = c₁} {c₂} p₁ p₂ → Paths c₁ v₁ v₂ → Paths c₂ v₁ v₂ → Set
2Paths id2 p₁ p₂ = p₁ ≡ p₂
2Paths (inv2 p) p₁ p₂ = 2PathsB p p₁ p₂
2Paths {t₁} {t₂} {v₁} {v₂} (comp2 {c₁ = c₁} {c₂} {c₃} {p₁} {p₂} {p₃} p q) α₁ α₂ =
Σ[ r ∈ Paths c₂ v₁ v₂ ] (2P {c₁ = c₁} {c₂} α₁ r × 2P {c₁ = c₂} {c₃} r α₂)
2Paths lid (a , refl .a , p₂) p₃ = p₂ ≡ p₃
2Paths rid (a , p₂ , refl .a) p₃ = p₂ ≡ p₃
2PathsB : {t₁ t₂ : U} {v₁ : ⟦ t₁ ⟧} {v₂ : ⟦ t₂ ⟧ } {c₁ c₂ : t₁ ⟷ t₂} {p₁ : Paths c₁ v₁ v₂} {p₂ : Paths c₂ v₁ v₂} → 2P {c₁ = c₁} {c₂} p₁ p₂ → Paths c₂ v₁ v₂ → Paths c₁ v₁ v₂ → Set
2PathsB id2 p q = q ≡ p
2PathsB (inv2 p) p₁ p₂ = 2PathsB p p₂ p₁
2PathsB (comp2 p q) p₁ p₂ = {!!}
2PathsB lid p (a , refl .a , p₃) = p ≡ p₃
2PathsB rid p (a , p₂ , refl .a) = p ≡ p₂
example : 2Paths {t₁ = BOOL} {t₂ = BOOL} {v₁ = FALSE} {v₂ = FALSE}
{c₁ = id⟷ ◎ id⟷} {c₂ = id⟷} {p₁ = pathIdIdFF}
lid pathIdIdFF pathIdFF
example = refl (refl FALSE)
{--
2Paths : {t₁ t₂ : U} {v₁ : ⟦ t₁ ⟧} {v₂ : ⟦ t₂ ⟧} {c₁ c₂ : t₁ ⟷ t₂} →
Paths c₁ v₁ v₂ → Paths c₂ v₁ v₂ → Set
2Paths p q = {!!}
reflR : {t₁ t₂ : U} {v₁ : ⟦ t₁ ⟧} {v₂ : ⟦ t₂ ⟧} {c : t₁ ⟷ t₂}
{p : Paths c v₁ v₂} → {q : Paths (c ◎ id⟷) v₁ v₂} →
2Paths {t₁} {t₂} {v₁} {v₂} {c} {c ◎ id⟷} p q
reflR {c = unite₊} = {!!}
-- p : Paths unite₊ .v₁ .v₂
-- q : Paths (unite₊ ◎ id⟷) .v₁ .v₂
-- 2Paths p q
reflR {c = uniti₊} = {!!}
reflR {c = swap₊} = {!!}
reflR {c = assocl₊} = {!!}
reflR {c = assocr₊} = {!!}
reflR {c = unite⋆} = {!!}
reflR {c = uniti⋆} = {!!}
reflR {c = swap⋆} = {!!}
reflR {c = assocl⋆} = {!!}
reflR {c = assocr⋆} = {!!}
reflR {c = distz} = {!!}
reflR {c = factorz} = {!!}
reflR {c = dist} = {!!}
reflR {c = factor} = {!!}
reflR {c = id⟷} = {!!}
reflR {c = sym⟷ c} = {!!}
reflR {c = c₁ ◎ c₂} = {!!}
reflR {c = c₁ ⊕ c₂} = {!!}
reflR {c = c₁ ⊗ c₂} = {!!}
--}
{--
-- If we have a path between v₁ and v₁' and a combinator that connects v₁ to
-- v₂, then the combinator also connects v₁' to some v₂' such that there is
-- path between v₂ and v₂'
pathFunctor : {t₁ t₂ : U} {v₁ v₁' : ⟦ t₁ ⟧} {v₂ v₂' : ⟦ t₂ ⟧} {c : t₁ ⟷ t₂} →
(v₁ ≡ v₁') → Paths c v₁ v₂ → (v₂ ≡ v₂') → Paths c v₁' v₂'
pathFunctor = {!!}
All kind of structure to investigate in the HoTT book. Let's push forward
with cubical types though...
--}
------------------------------------------------------------------------------
-- N dimensional version
{-
data C : ℕ → Set where
ZD : U → C 0
Node : {n : ℕ} → C n → C n → C (suc n)
⟦_⟧N : {n : ℕ} → C n → Set
⟦ ZD t ⟧N = ⟦ t ⟧
⟦ Node c₁ c₂ ⟧N = ⟦ c₁ ⟧N ⊎ ⟦ c₂ ⟧N
liftN : (n : ℕ) → (t : U) → C n
liftN 0 t = ZD t
liftN (suc n) t = Node (liftN n t) (liftN n ZERO)
zeroN : (n : ℕ) → C n
zeroN n = liftN n ZERO
oneN : (n : ℕ) → C n
oneN n = liftN n ONE
plus : {n : ℕ} → C n → C n → C n
plus (ZD t₁) (ZD t₂) = ZD (PLUS t₁ t₂)
plus (Node c₁ c₂) (Node c₁' c₂') = Node (plus c₁ c₁') (plus c₂ c₂')
times : {m n : ℕ} → C m → C n → C (m + n)
times (ZD t₁) (ZD t₂) = ZD (TIMES t₁ t₂)
times (ZD t) (Node c₁ c₂) = Node (times (ZD t) c₁) (times (ZD t) c₂)
times (Node c₁ c₂) c = Node (times c₁ c) (times c₂ c)
-- N-dimensional paths connect points in c₁ and c₂ if there is an isomorphism
-- between the types c₁ and c₂.
data _⟺_ : {n : ℕ} → C n → C n → Set where
baseC : {t₁ t₂ : U} → (t₁ ⟷ t₂) → ((ZD t₁) ⟺ (ZD t₂))
nodeC : {n : ℕ} {c₁ : C n} {c₂ : C n} {c₃ : C n} {c₄ : C n} →
(c₁ ⟺ c₂) → (c₃ ⟺ c₄) → ((Node c₁ c₃) ⟺ (Node c₂ c₄))
-- zerolC : {n : ℕ} {c : C n} → ((Node c c) ⟺ (zeroN (suc n)))
-- zerorC : {n : ℕ} {c : C n} → ((zeroN (suc n)) ⟺ (Node c c))
NPaths : {n : ℕ} {c₁ c₂ : C n} → (c₁ ⟺ c₂) → ⟦ c₁ ⟧N → ⟦ c₂ ⟧N → Set
NPaths (baseC c) v₁ v₂ = Paths c v₁ v₂
NPaths (nodeC α₁ α₂) (inj₁ v₁) (inj₁ v₂) = NPaths α₁ v₁ v₂
NPaths (nodeC α₁ α₂) (inj₁ v₁) (inj₂ v₂) = ⊥
NPaths (nodeC α₁ α₂) (inj₂ v₁) (inj₁ v₂) = ⊥
NPaths (nodeC α₁ α₂) (inj₂ v₁) (inj₂ v₂) = NPaths α₂ v₁ v₂
--NPaths zerolC v₁ v₂ = {!!}
--NPaths zerorC v₁ v₂ = {!!}
-}
------------------------------------------------------------------------------
|
//==============================================================================
// Copyright 2003 - 2011 LASMEA UMR 6602 CNRS/Univ. Clermont II
// Copyright 2009 - 2011 LRI UMR 8623 CNRS/Univ Paris Sud XI
//
// Distributed under the Boost Software License, Version 1.0.
// See accompanying file LICENSE.txt or copy at
// http://www.boost.org/LICENSE_1_0.txt
//==============================================================================
#ifndef BOOST_SIMD_BOOLEAN_FUNCTIONS_LOGICAL_ANDNOT_HPP_INCLUDED
#define BOOST_SIMD_BOOLEAN_FUNCTIONS_LOGICAL_ANDNOT_HPP_INCLUDED
/*!
* \file
**/
#include <boost/simd/include/functor.hpp>
#include <boost/dispatch/include/functor.hpp>
#include <boost/proto/tags.hpp>
/*!
* \ingroup boost_simd_operator
* \defgroup boost_simd_operator_logical_andnot logical_andnot
*
* \par Description
* return the logical and of the first parameter and of the negation the second parameter
* the result type is logical type associated to the first parameter
*
* \par Header file
*
* \code
* #include <nt2/include/functions/logical_andnot.hpp>
* \endcode
*
* \par Alias
* \arg l_andnot
*
* \synopsis
*
* \code
* namespace boost::simd
* {
* template <class A0>
* meta::call<tag::logical_andnot_(A0,A1)>::type
* logical_andnot(const A0 & a0,const A1 & a1);
* }
* \endcode
*
* \param a0 the first parameter of logical_andnot
* \param a1 the second parameter of logical_andnot
*
* \return a value of the logical type associated to the first parameter
*
* \par Notes
* In SIMD mode, this function acts elementwise on the inputs vectors elements
* \par
* This is a logical operation. Such operations return logical types.
* You are invited to consult the rationale.
*
**/
namespace boost { namespace simd
{
namespace tag
{
/*!
* \brief Define the tag logical_andnot_ of functor logical_andnot
* in namespace boost::simd::tag for toolbox boost.simd.operator
**/
struct logical_andnot_ : ext::elementwise_<logical_andnot_> { typedef ext::elementwise_<logical_andnot_> parent; };
}
BOOST_DISPATCH_FUNCTION_IMPLEMENTATION(tag::logical_andnot_ , logical_andnot , 2 )
BOOST_DISPATCH_FUNCTION_IMPLEMENTATION(tag::logical_andnot_ , l_andnot , 2 )
} }
#include <boost/simd/operator/specific/common.hpp>
#endif
|
C
C $Id: slfrme.f,v 1.4 2008-07-27 00:17:26 haley Exp $
C
C Copyright (C) 2000
C University Corporation for Atmospheric Research
C All Rights Reserved
C
C The use of this Software is governed by a License Agreement.
C
SUBROUTINE SLFRME
C
C The object of calling this routine is really just to advance the
C frame. The calls to PLOTIF are to work around a GKS/translator
C problem: the background color doesn't get set properly for a totally
C blank frame, which it's easy to have in STITLE.
C
CALL PLOTIF (0.,0.,0)
IF (ICFELL('SLFRME',1).NE.0) RETURN
CALL PLOTIF (0.,0.,1)
IF (ICFELL('SLFRME',2).NE.0) RETURN
C
CALL FRAME
IF (ICFELL('SLFRME',3).NE.0) RETURN
C
C Done.
C
RETURN
C
END
|
State Before: R : Type u_1
M : Type u_2
inst✝² : Semiring R
inst✝¹ : AddCommMonoid M
inst✝ : Module R M
N₁ N₂ : Submodule R M
hN₁ : FG N₁
hN₂ : FG N₂
t₁ : Set M
ht₁ : Set.Finite t₁ ∧ span R t₁ = N₁
t₂ : Set M
ht₂ : Set.Finite t₂ ∧ span R t₂ = N₂
⊢ span R (t₁ ∪ t₂) = N₁ ⊔ N₂ State After: no goals Tactic: rw [span_union, ht₁.2, ht₂.2] |
[STATEMENT]
lemma mult_mult_of [simp]: "mult (mult_of R) = mult R"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. (\<otimes>\<^bsub>mult_of R\<^esub>) = (\<otimes>\<^bsub>R\<^esub>)
[PROOF STEP]
by (simp add: mult_of_def) |
data M : Set where
m : (I : _) → (I → M) → M
-- inferred
-- m : (I : Set) → (I → M) → M
|
(* Title: HOL/Auth/Recur.thy
Author: Lawrence C Paulson, Cambridge University Computer Laboratory
Copyright 1996 University of Cambridge
*)
section\<open>The Otway-Bull Recursive Authentication Protocol\<close>
theory Recur imports Public begin
text\<open>End marker for message bundles\<close>
abbreviation
END :: "msg" where
"END == Number 0"
(*Two session keys are distributed to each agent except for the initiator,
who receives one.
Perhaps the two session keys could be bundled into a single message.
*)
inductive_set (*Server's response to the nested message*)
respond :: "event list => (msg*msg*key)set"
for evs :: "event list"
where
One: "Key KAB \<notin> used evs
==> (Hash[Key(shrK A)] \<lbrace>Agent A, Agent B, Nonce NA, END\<rbrace>,
\<lbrace>Crypt (shrK A) \<lbrace>Key KAB, Agent B, Nonce NA\<rbrace>, END\<rbrace>,
KAB) \<in> respond evs"
(*The most recent session key is passed up to the caller*)
| Cons: "[| (PA, RA, KAB) \<in> respond evs;
Key KBC \<notin> used evs; Key KBC \<notin> parts {RA};
PA = Hash[Key(shrK A)] \<lbrace>Agent A, Agent B, Nonce NA, P\<rbrace> |]
==> (Hash[Key(shrK B)] \<lbrace>Agent B, Agent C, Nonce NB, PA\<rbrace>,
\<lbrace>Crypt (shrK B) \<lbrace>Key KBC, Agent C, Nonce NB\<rbrace>,
Crypt (shrK B) \<lbrace>Key KAB, Agent A, Nonce NB\<rbrace>,
RA\<rbrace>,
KBC)
\<in> respond evs"
(*Induction over "respond" can be difficult due to the complexity of the
subgoals. Set "responses" captures the general form of certificates.
*)
inductive_set
responses :: "event list => msg set"
for evs :: "event list"
where
(*Server terminates lists*)
Nil: "END \<in> responses evs"
| Cons: "[| RA \<in> responses evs; Key KAB \<notin> used evs |]
==> \<lbrace>Crypt (shrK B) \<lbrace>Key KAB, Agent A, Nonce NB\<rbrace>,
RA\<rbrace> \<in> responses evs"
inductive_set recur :: "event list set"
where
(*Initial trace is empty*)
Nil: "[] \<in> recur"
(*The spy MAY say anything he CAN say. Common to
all similar protocols.*)
| Fake: "[| evsf \<in> recur; X \<in> synth (analz (knows Spy evsf)) |]
==> Says Spy B X # evsf \<in> recur"
(*Alice initiates a protocol run.
END is a placeholder to terminate the nesting.*)
| RA1: "[| evs1 \<in> recur; Nonce NA \<notin> used evs1 |]
==> Says A B (Hash[Key(shrK A)] \<lbrace>Agent A, Agent B, Nonce NA, END\<rbrace>)
# evs1 \<in> recur"
(*Bob's response to Alice's message. C might be the Server.
We omit PA = \<lbrace>XA, Agent A, Agent B, Nonce NA, P\<rbrace> because
it complicates proofs, so B may respond to any message at all!*)
| RA2: "[| evs2 \<in> recur; Nonce NB \<notin> used evs2;
Says A' B PA \<in> set evs2 |]
==> Says B C (Hash[Key(shrK B)] \<lbrace>Agent B, Agent C, Nonce NB, PA\<rbrace>)
# evs2 \<in> recur"
(*The Server receives Bob's message and prepares a response.*)
| RA3: "[| evs3 \<in> recur; Says B' Server PB \<in> set evs3;
(PB,RB,K) \<in> respond evs3 |]
==> Says Server B RB # evs3 \<in> recur"
(*Bob receives the returned message and compares the Nonces with
those in the message he previously sent the Server.*)
| RA4: "[| evs4 \<in> recur;
Says B C \<lbrace>XH, Agent B, Agent C, Nonce NB,
XA, Agent A, Agent B, Nonce NA, P\<rbrace> \<in> set evs4;
Says C' B \<lbrace>Crypt (shrK B) \<lbrace>Key KBC, Agent C, Nonce NB\<rbrace>,
Crypt (shrK B) \<lbrace>Key KAB, Agent A, Nonce NB\<rbrace>,
RA\<rbrace> \<in> set evs4 |]
==> Says B A RA # evs4 \<in> recur"
(*No "oops" message can easily be expressed. Each session key is
associated--in two separate messages--with two nonces. This is
one try, but it isn't that useful. Re domino attack, note that
Recur.thy proves that each session key is secure provided the two
peers are, even if there are compromised agents elsewhere in
the chain. Oops cases proved using parts_cut, Key_in_keysFor_parts,
etc.
Oops: "[| evso \<in> recur; Says Server B RB \<in> set evso;
RB \<in> responses evs'; Key K \<in> parts {RB} |]
==> Notes Spy \<lbrace>Key K, RB\<rbrace> # evso \<in> recur"
*)
declare Says_imp_knows_Spy [THEN analz.Inj, dest]
declare parts.Body [dest]
declare analz_into_parts [dest]
declare Fake_parts_insert_in_Un [dest]
(** Possibility properties: traces that reach the end
ONE theorem would be more elegant and faster!
By induction on a list of agents (no repetitions)
**)
text\<open>Simplest case: Alice goes directly to the server\<close>
lemma "Key K \<notin> used []
==> \<exists>NA. \<exists>evs \<in> recur.
Says Server A \<lbrace>Crypt (shrK A) \<lbrace>Key K, Agent Server, Nonce NA\<rbrace>,
END\<rbrace> \<in> set evs"
apply (intro exI bexI)
apply (rule_tac [2] recur.Nil [THEN recur.RA1,
THEN recur.RA3 [OF _ _ respond.One]])
apply (possibility, simp add: used_Cons)
done
text\<open>Case two: Alice, Bob and the server\<close>
lemma "[| Key K \<notin> used []; Key K' \<notin> used []; K \<noteq> K';
Nonce NA \<notin> used []; Nonce NB \<notin> used []; NA < NB |]
==> \<exists>NA. \<exists>evs \<in> recur.
Says B A \<lbrace>Crypt (shrK A) \<lbrace>Key K, Agent B, Nonce NA\<rbrace>,
END\<rbrace> \<in> set evs"
apply (intro exI bexI)
apply (rule_tac [2]
recur.Nil
[THEN recur.RA1 [of _ NA],
THEN recur.RA2 [of _ NB],
THEN recur.RA3 [OF _ _ respond.One
[THEN respond.Cons [of _ _ K _ K']]],
THEN recur.RA4], possibility)
apply (auto simp add: used_Cons)
done
(*Case three: Alice, Bob, Charlie and the server Rather slow (5 seconds)*)
lemma "[| Key K \<notin> used []; Key K' \<notin> used [];
Key K'' \<notin> used []; K \<noteq> K'; K' \<noteq> K''; K \<noteq> K'';
Nonce NA \<notin> used []; Nonce NB \<notin> used []; Nonce NC \<notin> used [];
NA < NB; NB < NC |]
==> \<exists>K. \<exists>NA. \<exists>evs \<in> recur.
Says B A \<lbrace>Crypt (shrK A) \<lbrace>Key K, Agent B, Nonce NA\<rbrace>,
END\<rbrace> \<in> set evs"
apply (intro exI bexI)
apply (rule_tac [2]
recur.Nil [THEN recur.RA1,
THEN recur.RA2, THEN recur.RA2,
THEN recur.RA3
[OF _ _ respond.One
[THEN respond.Cons, THEN respond.Cons]],
THEN recur.RA4, THEN recur.RA4])
apply basic_possibility
apply (tactic "DEPTH_SOLVE (swap_res_tac @{context} [refl, conjI, disjCI] 1)")
done
lemma respond_imp_not_used: "(PA,RB,KAB) \<in> respond evs ==> Key KAB \<notin> used evs"
by (erule respond.induct, simp_all)
lemma Key_in_parts_respond [rule_format]:
"[| Key K \<in> parts {RB}; (PB,RB,K') \<in> respond evs |] ==> Key K \<notin> used evs"
apply (erule rev_mp, erule respond.induct)
apply (auto dest: Key_not_used respond_imp_not_used)
done
text\<open>Simple inductive reasoning about responses\<close>
lemma respond_imp_responses:
"(PA,RB,KAB) \<in> respond evs ==> RB \<in> responses evs"
apply (erule respond.induct)
apply (blast intro!: respond_imp_not_used responses.intros)+
done
(** For reasoning about the encrypted portion of messages **)
lemmas RA2_analz_spies = Says_imp_spies [THEN analz.Inj]
lemma RA4_analz_spies:
"Says C' B \<lbrace>Crypt K X, X', RA\<rbrace> \<in> set evs ==> RA \<in> analz (spies evs)"
by blast
(*RA2_analz... and RA4_analz... let us treat those cases using the same
argument as for the Fake case. This is possible for most, but not all,
proofs: Fake does not invent new nonces (as in RA2), and of course Fake
messages originate from the Spy. *)
lemmas RA2_parts_spies = RA2_analz_spies [THEN analz_into_parts]
lemmas RA4_parts_spies = RA4_analz_spies [THEN analz_into_parts]
(** Theorems of the form X \<notin> parts (spies evs) imply that NOBODY
sends messages containing X! **)
(** Spy never sees another agent's shared key! (unless it's bad at start) **)
lemma Spy_see_shrK [simp]:
"evs \<in> recur ==> (Key (shrK A) \<in> parts (spies evs)) = (A \<in> bad)"
apply (erule recur.induct, auto)
txt\<open>RA3. It's ugly to call auto twice, but it seems necessary.\<close>
apply (auto dest: Key_in_parts_respond simp add: parts_insert_spies)
done
lemma Spy_analz_shrK [simp]:
"evs \<in> recur ==> (Key (shrK A) \<in> analz (spies evs)) = (A \<in> bad)"
by auto
lemma Spy_see_shrK_D [dest!]:
"[|Key (shrK A) \<in> parts (knows Spy evs); evs \<in> recur|] ==> A \<in> bad"
by (blast dest: Spy_see_shrK)
(*** Proofs involving analz ***)
(** Session keys are not used to encrypt other session keys **)
(*Version for "responses" relation. Handles case RA3 in the theorem below.
Note that it holds for *any* set H (not just "spies evs")
satisfying the inductive hypothesis.*)
lemma resp_analz_image_freshK_lemma:
"[| RB \<in> responses evs;
\<forall>K KK. KK \<subseteq> - (range shrK) -->
(Key K \<in> analz (Key`KK Un H)) =
(K \<in> KK | Key K \<in> analz H) |]
==> \<forall>K KK. KK \<subseteq> - (range shrK) -->
(Key K \<in> analz (insert RB (Key`KK Un H))) =
(K \<in> KK | Key K \<in> analz (insert RB H))"
apply (erule responses.induct)
apply (simp_all del: image_insert
add: analz_image_freshK_simps, auto)
done
text\<open>Version for the protocol. Proof is easy, thanks to the lemma.\<close>
lemma raw_analz_image_freshK:
"evs \<in> recur ==>
\<forall>K KK. KK \<subseteq> - (range shrK) -->
(Key K \<in> analz (Key`KK Un (spies evs))) =
(K \<in> KK | Key K \<in> analz (spies evs))"
apply (erule recur.induct)
apply (drule_tac [4] RA2_analz_spies,
drule_tac [5] respond_imp_responses,
drule_tac [6] RA4_analz_spies, analz_freshK, spy_analz)
txt\<open>RA3\<close>
apply (simp_all add: resp_analz_image_freshK_lemma)
done
(*Instance of the lemma with H replaced by (spies evs):
[| RB \<in> responses evs; evs \<in> recur; |]
==> KK \<subseteq> - (range shrK) -->
Key K \<in> analz (insert RB (Key`KK Un spies evs)) =
(K \<in> KK | Key K \<in> analz (insert RB (spies evs)))
*)
lemmas resp_analz_image_freshK =
resp_analz_image_freshK_lemma [OF _ raw_analz_image_freshK]
lemma analz_insert_freshK:
"[| evs \<in> recur; KAB \<notin> range shrK |]
==> (Key K \<in> analz (insert (Key KAB) (spies evs))) =
(K = KAB | Key K \<in> analz (spies evs))"
by (simp del: image_insert
add: analz_image_freshK_simps raw_analz_image_freshK)
text\<open>Everything that's hashed is already in past traffic.\<close>
lemma Hash_imp_body:
"[| Hash \<lbrace>Key(shrK A), X\<rbrace> \<in> parts (spies evs);
evs \<in> recur; A \<notin> bad |] ==> X \<in> parts (spies evs)"
apply (erule rev_mp)
apply (erule recur.induct,
drule_tac [6] RA4_parts_spies,
drule_tac [5] respond_imp_responses,
drule_tac [4] RA2_parts_spies)
txt\<open>RA3 requires a further induction\<close>
apply (erule_tac [5] responses.induct, simp_all)
txt\<open>Fake\<close>
apply (blast intro: parts_insertI)
done
(** The Nonce NA uniquely identifies A's message.
This theorem applies to steps RA1 and RA2!
Unicity is not used in other proofs but is desirable in its own right.
**)
lemma unique_NA:
"[| Hash \<lbrace>Key(shrK A), Agent A, B, NA, P\<rbrace> \<in> parts (spies evs);
Hash \<lbrace>Key(shrK A), Agent A, B',NA, P'\<rbrace> \<in> parts (spies evs);
evs \<in> recur; A \<notin> bad |]
==> B=B' & P=P'"
apply (erule rev_mp, erule rev_mp)
apply (erule recur.induct,
drule_tac [5] respond_imp_responses)
apply (force, simp_all)
txt\<open>Fake\<close>
apply blast
apply (erule_tac [3] responses.induct)
txt\<open>RA1,2: creation of new Nonce\<close>
apply simp_all
apply (blast dest!: Hash_imp_body)+
done
(*** Lemmas concerning the Server's response
(relations "respond" and "responses")
***)
lemma shrK_in_analz_respond [simp]:
"[| RB \<in> responses evs; evs \<in> recur |]
==> (Key (shrK B) \<in> analz (insert RB (spies evs))) = (B:bad)"
apply (erule responses.induct)
apply (simp_all del: image_insert
add: analz_image_freshK_simps resp_analz_image_freshK, auto)
done
lemma resp_analz_insert_lemma:
"[| Key K \<in> analz (insert RB H);
\<forall>K KK. KK \<subseteq> - (range shrK) -->
(Key K \<in> analz (Key`KK Un H)) =
(K \<in> KK | Key K \<in> analz H);
RB \<in> responses evs |]
==> (Key K \<in> parts{RB} | Key K \<in> analz H)"
apply (erule rev_mp, erule responses.induct)
apply (simp_all del: image_insert parts_image
add: analz_image_freshK_simps resp_analz_image_freshK_lemma)
txt\<open>Simplification using two distinct treatments of "image"\<close>
apply (simp add: parts_insert2, blast)
done
lemmas resp_analz_insert =
resp_analz_insert_lemma [OF _ raw_analz_image_freshK]
text\<open>The last key returned by respond indeed appears in a certificate\<close>
lemma respond_certificate:
"(Hash[Key(shrK A)] \<lbrace>Agent A, B, NA, P\<rbrace>, RA, K) \<in> respond evs
==> Crypt (shrK A) \<lbrace>Key K, B, NA\<rbrace> \<in> parts {RA}"
apply (ind_cases "(Hash[Key (shrK A)] \<lbrace>Agent A, B, NA, P\<rbrace>, RA, K) \<in> respond evs")
apply simp_all
done
(*This unicity proof differs from all the others in the HOL/Auth directory.
The conclusion isn't quite unicity but duplicity, in that there are two
possibilities. Also, the presence of two different matching messages in
the inductive step complicates the case analysis. Unusually for such proofs,
the quantifiers appear to be necessary.*)
lemma unique_lemma [rule_format]:
"(PB,RB,KXY) \<in> respond evs ==>
\<forall>A B N. Crypt (shrK A) \<lbrace>Key K, Agent B, N\<rbrace> \<in> parts {RB} -->
(\<forall>A' B' N'. Crypt (shrK A') \<lbrace>Key K, Agent B', N'\<rbrace> \<in> parts {RB} -->
(A'=A & B'=B) | (A'=B & B'=A))"
apply (erule respond.induct)
apply (simp_all add: all_conj_distrib)
apply (blast dest: respond_certificate)
done
lemma unique_session_keys:
"[| Crypt (shrK A) \<lbrace>Key K, Agent B, N\<rbrace> \<in> parts {RB};
Crypt (shrK A') \<lbrace>Key K, Agent B', N'\<rbrace> \<in> parts {RB};
(PB,RB,KXY) \<in> respond evs |]
==> (A'=A & B'=B) | (A'=B & B'=A)"
by (rule unique_lemma, auto)
(** Crucial secrecy property: Spy does not see the keys sent in msg RA3
Does not in itself guarantee security: an attack could violate
the premises, e.g. by having A=Spy **)
lemma respond_Spy_not_see_session_key [rule_format]:
"[| (PB,RB,KAB) \<in> respond evs; evs \<in> recur |]
==> \<forall>A A' N. A \<notin> bad & A' \<notin> bad -->
Crypt (shrK A) \<lbrace>Key K, Agent A', N\<rbrace> \<in> parts{RB} -->
Key K \<notin> analz (insert RB (spies evs))"
apply (erule respond.induct)
apply (frule_tac [2] respond_imp_responses)
apply (frule_tac [2] respond_imp_not_used)
apply (simp_all del: image_insert parts_image
add: analz_image_freshK_simps split_ifs shrK_in_analz_respond
resp_analz_image_freshK parts_insert2)
txt\<open>Base case of respond\<close>
apply blast
txt\<open>Inductive step of respond\<close>
apply (intro allI conjI impI, simp_all)
txt\<open>by unicity, either @{term "B=Aa"} or @{term "B=A'"}, a contradiction
if @{term "B \<in> bad"}\<close>
apply (blast dest: unique_session_keys respond_certificate)
apply (blast dest!: respond_certificate)
apply (blast dest!: resp_analz_insert)
done
lemma Spy_not_see_session_key:
"[| Crypt (shrK A) \<lbrace>Key K, Agent A', N\<rbrace> \<in> parts (spies evs);
A \<notin> bad; A' \<notin> bad; evs \<in> recur |]
==> Key K \<notin> analz (spies evs)"
apply (erule rev_mp)
apply (erule recur.induct)
apply (drule_tac [4] RA2_analz_spies,
frule_tac [5] respond_imp_responses,
drule_tac [6] RA4_analz_spies,
simp_all add: split_ifs analz_insert_eq analz_insert_freshK)
txt\<open>Fake\<close>
apply spy_analz
txt\<open>RA2\<close>
apply blast
txt\<open>RA3\<close>
apply (simp add: parts_insert_spies)
apply (metis Key_in_parts_respond parts.Body parts.Fst resp_analz_insert
respond_Spy_not_see_session_key usedI)
txt\<open>RA4\<close>
apply blast
done
(**** Authenticity properties for Agents ****)
text\<open>The response never contains Hashes\<close>
lemma Hash_in_parts_respond:
"[| Hash \<lbrace>Key (shrK B), M\<rbrace> \<in> parts (insert RB H);
(PB,RB,K) \<in> respond evs |]
==> Hash \<lbrace>Key (shrK B), M\<rbrace> \<in> parts H"
apply (erule rev_mp)
apply (erule respond_imp_responses [THEN responses.induct], auto)
done
text\<open>Only RA1 or RA2 can have caused such a part of a message to appear.
This result is of no use to B, who cannot verify the Hash. Moreover,
it can say nothing about how recent A's message is. It might later be
used to prove B's presence to A at the run's conclusion.\<close>
lemma Hash_auth_sender [rule_format]:
"[| Hash \<lbrace>Key(shrK A), Agent A, Agent B, NA, P\<rbrace> \<in> parts(spies evs);
A \<notin> bad; evs \<in> recur |]
==> Says A B (Hash[Key(shrK A)] \<lbrace>Agent A, Agent B, NA, P\<rbrace>) \<in> set evs"
apply (unfold HPair_def)
apply (erule rev_mp)
apply (erule recur.induct,
drule_tac [6] RA4_parts_spies,
drule_tac [4] RA2_parts_spies,
simp_all)
txt\<open>Fake, RA3\<close>
apply (blast dest: Hash_in_parts_respond)+
done
(** These two results subsume (for all agents) the guarantees proved
separately for A and B in the Otway-Rees protocol.
**)
text\<open>Certificates can only originate with the Server.\<close>
lemma Cert_imp_Server_msg:
"[| Crypt (shrK A) Y \<in> parts (spies evs);
A \<notin> bad; evs \<in> recur |]
==> \<exists>C RC. Says Server C RC \<in> set evs &
Crypt (shrK A) Y \<in> parts {RC}"
apply (erule rev_mp, erule recur.induct, simp_all)
txt\<open>Fake\<close>
apply blast
txt\<open>RA1\<close>
apply blast
txt\<open>RA2: it cannot be a new Nonce, contradiction.\<close>
apply blast
txt\<open>RA3. Pity that the proof is so brittle: this step requires the rewriting,
which however would break all other steps.\<close>
apply (simp add: parts_insert_spies, blast)
txt\<open>RA4\<close>
apply blast
done
end
|
#include <math.h>
#include <stdio.h>
#include <gsl/gsl_matrix.h>
#include <gsl/gsl_linalg.h>
#include <Python.h>
int sum(int* npyArray3D, int npyLength1D, int npyLength2D, int npyLength3D)
{
int i, j, k;
int sum = 0;
double dummy = sqrt(9.0);
int MAT_DIM = 6;
gsl_matrix *m = gsl_matrix_alloc(MAT_DIM, MAT_DIM);
for (i=0; i < MAT_DIM*MAT_DIM; i++) {
gsl_matrix_set (m, i%MAT_DIM, i/MAT_DIM, (double) npyArray3D[i]);
}
for (i=0; i < MAT_DIM*MAT_DIM; i++) {
printf("%f, ", gsl_matrix_get (m, i%MAT_DIM, i/MAT_DIM));
}
printf("\n");
for (i=0;i<npyLength1D;i++)
for (j=0;j<npyLength2D;j++)
for (k=0;k<npyLength3D;k++)
sum += npyArray3D[i*npyLength3D*npyLength2D + k*npyLength2D + j];
printf("%f\n",dummy);
return sum;
}
double get_det(PyObject *A)
{
int MAT_DIM = 6;
int i, signum;
double det;
int nInts = PyList_Size(A);
gsl_matrix *m = gsl_matrix_alloc(MAT_DIM, MAT_DIM);
gsl_permutation *p;
p = gsl_permutation_alloc(m->size1);
for (i=0; i<nInts; i++)
{
PyObject *oo = PyList_GetItem(A, i);
gsl_matrix_set (m, i%MAT_DIM, i/MAT_DIM, PyFloat_AS_DOUBLE(oo));
}
gsl_linalg_LU_decomp(m, p, &signum);
det = gsl_linalg_LU_det(m, signum);
return det;
}
|
American Youth Circus Organization Festival!
Come try awesome circus stuff. No experience necessary!
* Showcase performance by circus kids 21 and under. Be a circus star or watch kids your age perform!
Participation is limited to 150 people, so sign up before we sell out!
Spread the word. See you October 21!
Sign up by September 8 for an earlybird discount!
We also need volunteers to lead workshops and help with the event. Please contact [email protected] if you are interested in finding out more. |
[GOAL]
R : Type u_1
inst✝¹⁴ : CommRing R
M₁ : Type u_2
M₂ : Type u_3
M : Type u_4
M' : Type u_5
inst✝¹³ : AddCommMonoid M₁
inst✝¹² : AddCommMonoid M₂
inst✝¹¹ : AddCommMonoid M
inst✝¹⁰ : AddCommMonoid M'
inst✝⁹ : Module R M₁
inst✝⁸ : Module R M₂
inst✝⁷ : Module R M
inst✝⁶ : Module R M'
f : M₁ →ₗ[R] M₂ →ₗ[R] M
N₁ : Type u_6
N₂ : Type u_7
N : Type u_8
inst✝⁵ : AddCommMonoid N₁
inst✝⁴ : AddCommMonoid N₂
inst✝³ : AddCommMonoid N
inst✝² : Module R N₁
inst✝¹ : Module R N₂
inst✝ : Module R N
g : N₁ →ₗ[R] N₂ →ₗ[R] N
⊢ IsTensorProduct (mk R M N)
[PROOFSTEP]
delta IsTensorProduct
[GOAL]
R : Type u_1
inst✝¹⁴ : CommRing R
M₁ : Type u_2
M₂ : Type u_3
M : Type u_4
M' : Type u_5
inst✝¹³ : AddCommMonoid M₁
inst✝¹² : AddCommMonoid M₂
inst✝¹¹ : AddCommMonoid M
inst✝¹⁰ : AddCommMonoid M'
inst✝⁹ : Module R M₁
inst✝⁸ : Module R M₂
inst✝⁷ : Module R M
inst✝⁶ : Module R M'
f : M₁ →ₗ[R] M₂ →ₗ[R] M
N₁ : Type u_6
N₂ : Type u_7
N : Type u_8
inst✝⁵ : AddCommMonoid N₁
inst✝⁴ : AddCommMonoid N₂
inst✝³ : AddCommMonoid N
inst✝² : Module R N₁
inst✝¹ : Module R N₂
inst✝ : Module R N
g : N₁ →ₗ[R] N₂ →ₗ[R] N
⊢ Function.Bijective ↑(lift (mk R M N))
[PROOFSTEP]
convert_to Function.Bijective (LinearMap.id : M ⊗[R] N →ₗ[R] M ⊗[R] N) using 2
[GOAL]
case h.e'_3.h.e'_5
R : Type u_1
inst✝¹⁴ : CommRing R
M₁ : Type u_2
M₂ : Type u_3
M : Type u_4
M' : Type u_5
inst✝¹³ : AddCommMonoid M₁
inst✝¹² : AddCommMonoid M₂
inst✝¹¹ : AddCommMonoid M
inst✝¹⁰ : AddCommMonoid M'
inst✝⁹ : Module R M₁
inst✝⁸ : Module R M₂
inst✝⁷ : Module R M
inst✝⁶ : Module R M'
f : M₁ →ₗ[R] M₂ →ₗ[R] M
N₁ : Type u_6
N₂ : Type u_7
N : Type u_8
inst✝⁵ : AddCommMonoid N₁
inst✝⁴ : AddCommMonoid N₂
inst✝³ : AddCommMonoid N
inst✝² : Module R N₁
inst✝¹ : Module R N₂
inst✝ : Module R N
g : N₁ →ₗ[R] N₂ →ₗ[R] N
⊢ lift (mk R M N) = LinearMap.id
[PROOFSTEP]
apply TensorProduct.ext'
[GOAL]
case h.e'_3.h.e'_5.H
R : Type u_1
inst✝¹⁴ : CommRing R
M₁ : Type u_2
M₂ : Type u_3
M : Type u_4
M' : Type u_5
inst✝¹³ : AddCommMonoid M₁
inst✝¹² : AddCommMonoid M₂
inst✝¹¹ : AddCommMonoid M
inst✝¹⁰ : AddCommMonoid M'
inst✝⁹ : Module R M₁
inst✝⁸ : Module R M₂
inst✝⁷ : Module R M
inst✝⁶ : Module R M'
f : M₁ →ₗ[R] M₂ →ₗ[R] M
N₁ : Type u_6
N₂ : Type u_7
N : Type u_8
inst✝⁵ : AddCommMonoid N₁
inst✝⁴ : AddCommMonoid N₂
inst✝³ : AddCommMonoid N
inst✝² : Module R N₁
inst✝¹ : Module R N₂
inst✝ : Module R N
g : N₁ →ₗ[R] N₂ →ₗ[R] N
⊢ ∀ (x : M) (y : N), ↑(lift (mk R M N)) (x ⊗ₜ[R] y) = ↑LinearMap.id (x ⊗ₜ[R] y)
[PROOFSTEP]
simp
[GOAL]
R : Type u_1
inst✝¹⁴ : CommRing R
M₁ : Type u_2
M₂ : Type u_3
M : Type u_4
M' : Type u_5
inst✝¹³ : AddCommMonoid M₁
inst✝¹² : AddCommMonoid M₂
inst✝¹¹ : AddCommMonoid M
inst✝¹⁰ : AddCommMonoid M'
inst✝⁹ : Module R M₁
inst✝⁸ : Module R M₂
inst✝⁷ : Module R M
inst✝⁶ : Module R M'
f : M₁ →ₗ[R] M₂ →ₗ[R] M
N₁ : Type u_6
N₂ : Type u_7
N : Type u_8
inst✝⁵ : AddCommMonoid N₁
inst✝⁴ : AddCommMonoid N₂
inst✝³ : AddCommMonoid N
inst✝² : Module R N₁
inst✝¹ : Module R N₂
inst✝ : Module R N
g : N₁ →ₗ[R] N₂ →ₗ[R] N
⊢ Function.Bijective ↑LinearMap.id
[PROOFSTEP]
exact Function.bijective_id
[GOAL]
R : Type u_1
inst✝¹⁴ : CommRing R
M₁ : Type u_2
M₂ : Type u_3
M : Type u_4
M' : Type u_5
inst✝¹³ : AddCommMonoid M₁
inst✝¹² : AddCommMonoid M₂
inst✝¹¹ : AddCommMonoid M
inst✝¹⁰ : AddCommMonoid M'
inst✝⁹ : Module R M₁
inst✝⁸ : Module R M₂
inst✝⁷ : Module R M
inst✝⁶ : Module R M'
f : M₁ →ₗ[R] M₂ →ₗ[R] M
N₁ : Type u_6
N₂ : Type u_7
N : Type u_8
inst✝⁵ : AddCommMonoid N₁
inst✝⁴ : AddCommMonoid N₂
inst✝³ : AddCommMonoid N
inst✝² : Module R N₁
inst✝¹ : Module R N₂
inst✝ : Module R N
g : N₁ →ₗ[R] N₂ →ₗ[R] N
h : IsTensorProduct f
x₁ : M₁
x₂ : M₂
⊢ ↑(LinearEquiv.symm (equiv h)) (↑(↑f x₁) x₂) = x₁ ⊗ₜ[R] x₂
[PROOFSTEP]
apply h.equiv.injective
[GOAL]
case a
R : Type u_1
inst✝¹⁴ : CommRing R
M₁ : Type u_2
M₂ : Type u_3
M : Type u_4
M' : Type u_5
inst✝¹³ : AddCommMonoid M₁
inst✝¹² : AddCommMonoid M₂
inst✝¹¹ : AddCommMonoid M
inst✝¹⁰ : AddCommMonoid M'
inst✝⁹ : Module R M₁
inst✝⁸ : Module R M₂
inst✝⁷ : Module R M
inst✝⁶ : Module R M'
f : M₁ →ₗ[R] M₂ →ₗ[R] M
N₁ : Type u_6
N₂ : Type u_7
N : Type u_8
inst✝⁵ : AddCommMonoid N₁
inst✝⁴ : AddCommMonoid N₂
inst✝³ : AddCommMonoid N
inst✝² : Module R N₁
inst✝¹ : Module R N₂
inst✝ : Module R N
g : N₁ →ₗ[R] N₂ →ₗ[R] N
h : IsTensorProduct f
x₁ : M₁
x₂ : M₂
⊢ ↑(equiv h) (↑(LinearEquiv.symm (equiv h)) (↑(↑f x₁) x₂)) = ↑(equiv h) (x₁ ⊗ₜ[R] x₂)
[PROOFSTEP]
refine' (h.equiv.apply_symm_apply _).trans _
[GOAL]
case a
R : Type u_1
inst✝¹⁴ : CommRing R
M₁ : Type u_2
M₂ : Type u_3
M : Type u_4
M' : Type u_5
inst✝¹³ : AddCommMonoid M₁
inst✝¹² : AddCommMonoid M₂
inst✝¹¹ : AddCommMonoid M
inst✝¹⁰ : AddCommMonoid M'
inst✝⁹ : Module R M₁
inst✝⁸ : Module R M₂
inst✝⁷ : Module R M
inst✝⁶ : Module R M'
f : M₁ →ₗ[R] M₂ →ₗ[R] M
N₁ : Type u_6
N₂ : Type u_7
N : Type u_8
inst✝⁵ : AddCommMonoid N₁
inst✝⁴ : AddCommMonoid N₂
inst✝³ : AddCommMonoid N
inst✝² : Module R N₁
inst✝¹ : Module R N₂
inst✝ : Module R N
g : N₁ →ₗ[R] N₂ →ₗ[R] N
h : IsTensorProduct f
x₁ : M₁
x₂ : M₂
⊢ ↑(↑f x₁) x₂ = ↑(equiv h) (x₁ ⊗ₜ[R] x₂)
[PROOFSTEP]
simp
[GOAL]
R : Type u_1
inst✝¹⁴ : CommRing R
M₁ : Type u_2
M₂ : Type u_3
M : Type u_4
M' : Type u_5
inst✝¹³ : AddCommMonoid M₁
inst✝¹² : AddCommMonoid M₂
inst✝¹¹ : AddCommMonoid M
inst✝¹⁰ : AddCommMonoid M'
inst✝⁹ : Module R M₁
inst✝⁸ : Module R M₂
inst✝⁷ : Module R M
inst✝⁶ : Module R M'
f : M₁ →ₗ[R] M₂ →ₗ[R] M
N₁ : Type u_6
N₂ : Type u_7
N : Type u_8
inst✝⁵ : AddCommMonoid N₁
inst✝⁴ : AddCommMonoid N₂
inst✝³ : AddCommMonoid N
inst✝² : Module R N₁
inst✝¹ : Module R N₂
inst✝ : Module R N
g : N₁ →ₗ[R] N₂ →ₗ[R] N
h : IsTensorProduct f
f' : M₁ →ₗ[R] M₂ →ₗ[R] M'
x₁ : M₁
x₂ : M₂
⊢ ↑(lift h f') (↑(↑f x₁) x₂) = ↑(↑f' x₁) x₂
[PROOFSTEP]
delta IsTensorProduct.lift
[GOAL]
R : Type u_1
inst✝¹⁴ : CommRing R
M₁ : Type u_2
M₂ : Type u_3
M : Type u_4
M' : Type u_5
inst✝¹³ : AddCommMonoid M₁
inst✝¹² : AddCommMonoid M₂
inst✝¹¹ : AddCommMonoid M
inst✝¹⁰ : AddCommMonoid M'
inst✝⁹ : Module R M₁
inst✝⁸ : Module R M₂
inst✝⁷ : Module R M
inst✝⁶ : Module R M'
f : M₁ →ₗ[R] M₂ →ₗ[R] M
N₁ : Type u_6
N₂ : Type u_7
N : Type u_8
inst✝⁵ : AddCommMonoid N₁
inst✝⁴ : AddCommMonoid N₂
inst✝³ : AddCommMonoid N
inst✝² : Module R N₁
inst✝¹ : Module R N₂
inst✝ : Module R N
g : N₁ →ₗ[R] N₂ →ₗ[R] N
h : IsTensorProduct f
f' : M₁ →ₗ[R] M₂ →ₗ[R] M'
x₁ : M₁
x₂ : M₂
⊢ ↑(LinearMap.comp (TensorProduct.lift f') ↑(LinearEquiv.symm (equiv h))) (↑(↑f x₁) x₂) = ↑(↑f' x₁) x₂
[PROOFSTEP]
simp
[GOAL]
R : Type u_1
inst✝¹⁴ : CommRing R
M₁ : Type u_2
M₂ : Type u_3
M : Type u_4
M' : Type u_5
inst✝¹³ : AddCommMonoid M₁
inst✝¹² : AddCommMonoid M₂
inst✝¹¹ : AddCommMonoid M
inst✝¹⁰ : AddCommMonoid M'
inst✝⁹ : Module R M₁
inst✝⁸ : Module R M₂
inst✝⁷ : Module R M
inst✝⁶ : Module R M'
f : M₁ →ₗ[R] M₂ →ₗ[R] M
N₁ : Type u_6
N₂ : Type u_7
N : Type u_8
inst✝⁵ : AddCommMonoid N₁
inst✝⁴ : AddCommMonoid N₂
inst✝³ : AddCommMonoid N
inst✝² : Module R N₁
inst✝¹ : Module R N₂
inst✝ : Module R N
g : N₁ →ₗ[R] N₂ →ₗ[R] N
hf : IsTensorProduct f
hg : IsTensorProduct g
i₁ : M₁ →ₗ[R] N₁
i₂ : M₂ →ₗ[R] N₂
x₁ : M₁
x₂ : M₂
⊢ ↑(map hf hg i₁ i₂) (↑(↑f x₁) x₂) = ↑(↑g (↑i₁ x₁)) (↑i₂ x₂)
[PROOFSTEP]
delta IsTensorProduct.map
[GOAL]
R : Type u_1
inst✝¹⁴ : CommRing R
M₁ : Type u_2
M₂ : Type u_3
M : Type u_4
M' : Type u_5
inst✝¹³ : AddCommMonoid M₁
inst✝¹² : AddCommMonoid M₂
inst✝¹¹ : AddCommMonoid M
inst✝¹⁰ : AddCommMonoid M'
inst✝⁹ : Module R M₁
inst✝⁸ : Module R M₂
inst✝⁷ : Module R M
inst✝⁶ : Module R M'
f : M₁ →ₗ[R] M₂ →ₗ[R] M
N₁ : Type u_6
N₂ : Type u_7
N : Type u_8
inst✝⁵ : AddCommMonoid N₁
inst✝⁴ : AddCommMonoid N₂
inst✝³ : AddCommMonoid N
inst✝² : Module R N₁
inst✝¹ : Module R N₂
inst✝ : Module R N
g : N₁ →ₗ[R] N₂ →ₗ[R] N
hf : IsTensorProduct f
hg : IsTensorProduct g
i₁ : M₁ →ₗ[R] N₁
i₂ : M₂ →ₗ[R] N₂
x₁ : M₁
x₂ : M₂
⊢ ↑(LinearMap.comp (↑(equiv hg)) (LinearMap.comp (TensorProduct.map i₁ i₂) ↑(LinearEquiv.symm (equiv hf))))
(↑(↑f x₁) x₂) =
↑(↑g (↑i₁ x₁)) (↑i₂ x₂)
[PROOFSTEP]
simp
[GOAL]
R : Type u_1
inst✝¹⁴ : CommRing R
M₁ : Type u_2
M₂ : Type u_3
M : Type u_4
M' : Type u_5
inst✝¹³ : AddCommMonoid M₁
inst✝¹² : AddCommMonoid M₂
inst✝¹¹ : AddCommMonoid M
inst✝¹⁰ : AddCommMonoid M'
inst✝⁹ : Module R M₁
inst✝⁸ : Module R M₂
inst✝⁷ : Module R M
inst✝⁶ : Module R M'
f : M₁ →ₗ[R] M₂ →ₗ[R] M
N₁ : Type u_6
N₂ : Type u_7
N : Type u_8
inst✝⁵ : AddCommMonoid N₁
inst✝⁴ : AddCommMonoid N₂
inst✝³ : AddCommMonoid N
inst✝² : Module R N₁
inst✝¹ : Module R N₂
inst✝ : Module R N
g : N₁ →ₗ[R] N₂ →ₗ[R] N
h : IsTensorProduct f
C : M → Prop
m : M
h0 : C 0
htmul : ∀ (x : M₁) (y : M₂), C (↑(↑f x) y)
hadd : ∀ (x y : M), C x → C y → C (x + y)
⊢ C m
[PROOFSTEP]
rw [← h.equiv.right_inv m]
[GOAL]
R : Type u_1
inst✝¹⁴ : CommRing R
M₁ : Type u_2
M₂ : Type u_3
M : Type u_4
M' : Type u_5
inst✝¹³ : AddCommMonoid M₁
inst✝¹² : AddCommMonoid M₂
inst✝¹¹ : AddCommMonoid M
inst✝¹⁰ : AddCommMonoid M'
inst✝⁹ : Module R M₁
inst✝⁸ : Module R M₂
inst✝⁷ : Module R M
inst✝⁶ : Module R M'
f : M₁ →ₗ[R] M₂ →ₗ[R] M
N₁ : Type u_6
N₂ : Type u_7
N : Type u_8
inst✝⁵ : AddCommMonoid N₁
inst✝⁴ : AddCommMonoid N₂
inst✝³ : AddCommMonoid N
inst✝² : Module R N₁
inst✝¹ : Module R N₂
inst✝ : Module R N
g : N₁ →ₗ[R] N₂ →ₗ[R] N
h : IsTensorProduct f
C : M → Prop
m : M
h0 : C 0
htmul : ∀ (x : M₁) (y : M₂), C (↑(↑f x) y)
hadd : ∀ (x y : M), C x → C y → C (x + y)
⊢ C (AddHom.toFun (↑(equiv h)).toAddHom (LinearEquiv.invFun (equiv h) m))
[PROOFSTEP]
generalize h.equiv.invFun m = y
[GOAL]
R : Type u_1
inst✝¹⁴ : CommRing R
M₁ : Type u_2
M₂ : Type u_3
M : Type u_4
M' : Type u_5
inst✝¹³ : AddCommMonoid M₁
inst✝¹² : AddCommMonoid M₂
inst✝¹¹ : AddCommMonoid M
inst✝¹⁰ : AddCommMonoid M'
inst✝⁹ : Module R M₁
inst✝⁸ : Module R M₂
inst✝⁷ : Module R M
inst✝⁶ : Module R M'
f : M₁ →ₗ[R] M₂ →ₗ[R] M
N₁ : Type u_6
N₂ : Type u_7
N : Type u_8
inst✝⁵ : AddCommMonoid N₁
inst✝⁴ : AddCommMonoid N₂
inst✝³ : AddCommMonoid N
inst✝² : Module R N₁
inst✝¹ : Module R N₂
inst✝ : Module R N
g : N₁ →ₗ[R] N₂ →ₗ[R] N
h : IsTensorProduct f
C : M → Prop
m : M
h0 : C 0
htmul : ∀ (x : M₁) (y : M₂), C (↑(↑f x) y)
hadd : ∀ (x y : M), C x → C y → C (x + y)
y : M₁ ⊗[R] M₂
⊢ C (AddHom.toFun (↑(equiv h)).toAddHom y)
[PROOFSTEP]
change C (TensorProduct.lift f y)
[GOAL]
R : Type u_1
inst✝¹⁴ : CommRing R
M₁ : Type u_2
M₂ : Type u_3
M : Type u_4
M' : Type u_5
inst✝¹³ : AddCommMonoid M₁
inst✝¹² : AddCommMonoid M₂
inst✝¹¹ : AddCommMonoid M
inst✝¹⁰ : AddCommMonoid M'
inst✝⁹ : Module R M₁
inst✝⁸ : Module R M₂
inst✝⁷ : Module R M
inst✝⁶ : Module R M'
f : M₁ →ₗ[R] M₂ →ₗ[R] M
N₁ : Type u_6
N₂ : Type u_7
N : Type u_8
inst✝⁵ : AddCommMonoid N₁
inst✝⁴ : AddCommMonoid N₂
inst✝³ : AddCommMonoid N
inst✝² : Module R N₁
inst✝¹ : Module R N₂
inst✝ : Module R N
g : N₁ →ₗ[R] N₂ →ₗ[R] N
h : IsTensorProduct f
C : M → Prop
m : M
h0 : C 0
htmul : ∀ (x : M₁) (y : M₂), C (↑(↑f x) y)
hadd : ∀ (x y : M), C x → C y → C (x + y)
y : M₁ ⊗[R] M₂
⊢ C (↑(TensorProduct.lift f) y)
[PROOFSTEP]
induction y using TensorProduct.induction_on with
| zero => rwa [map_zero]
| tmul _ _ =>
rw [TensorProduct.lift.tmul]
apply htmul
| add _ _ _ _ =>
rw [map_add]
apply hadd <;> assumption
[GOAL]
R : Type u_1
inst✝¹⁴ : CommRing R
M₁ : Type u_2
M₂ : Type u_3
M : Type u_4
M' : Type u_5
inst✝¹³ : AddCommMonoid M₁
inst✝¹² : AddCommMonoid M₂
inst✝¹¹ : AddCommMonoid M
inst✝¹⁰ : AddCommMonoid M'
inst✝⁹ : Module R M₁
inst✝⁸ : Module R M₂
inst✝⁷ : Module R M
inst✝⁶ : Module R M'
f : M₁ →ₗ[R] M₂ →ₗ[R] M
N₁ : Type u_6
N₂ : Type u_7
N : Type u_8
inst✝⁵ : AddCommMonoid N₁
inst✝⁴ : AddCommMonoid N₂
inst✝³ : AddCommMonoid N
inst✝² : Module R N₁
inst✝¹ : Module R N₂
inst✝ : Module R N
g : N₁ →ₗ[R] N₂ →ₗ[R] N
h : IsTensorProduct f
C : M → Prop
m : M
h0 : C 0
htmul : ∀ (x : M₁) (y : M₂), C (↑(↑f x) y)
hadd : ∀ (x y : M), C x → C y → C (x + y)
y : M₁ ⊗[R] M₂
⊢ C (↑(TensorProduct.lift f) y)
[PROOFSTEP]
induction y using TensorProduct.induction_on with
| zero => rwa [map_zero]
| tmul _ _ =>
rw [TensorProduct.lift.tmul]
apply htmul
| add _ _ _ _ =>
rw [map_add]
apply hadd <;> assumption
[GOAL]
case zero
R : Type u_1
inst✝¹⁴ : CommRing R
M₁ : Type u_2
M₂ : Type u_3
M : Type u_4
M' : Type u_5
inst✝¹³ : AddCommMonoid M₁
inst✝¹² : AddCommMonoid M₂
inst✝¹¹ : AddCommMonoid M
inst✝¹⁰ : AddCommMonoid M'
inst✝⁹ : Module R M₁
inst✝⁸ : Module R M₂
inst✝⁷ : Module R M
inst✝⁶ : Module R M'
f : M₁ →ₗ[R] M₂ →ₗ[R] M
N₁ : Type u_6
N₂ : Type u_7
N : Type u_8
inst✝⁵ : AddCommMonoid N₁
inst✝⁴ : AddCommMonoid N₂
inst✝³ : AddCommMonoid N
inst✝² : Module R N₁
inst✝¹ : Module R N₂
inst✝ : Module R N
g : N₁ →ₗ[R] N₂ →ₗ[R] N
h : IsTensorProduct f
C : M → Prop
m : M
h0 : C 0
htmul : ∀ (x : M₁) (y : M₂), C (↑(↑f x) y)
hadd : ∀ (x y : M), C x → C y → C (x + y)
⊢ C (↑(TensorProduct.lift f) 0)
[PROOFSTEP]
| zero => rwa [map_zero]
[GOAL]
case zero
R : Type u_1
inst✝¹⁴ : CommRing R
M₁ : Type u_2
M₂ : Type u_3
M : Type u_4
M' : Type u_5
inst✝¹³ : AddCommMonoid M₁
inst✝¹² : AddCommMonoid M₂
inst✝¹¹ : AddCommMonoid M
inst✝¹⁰ : AddCommMonoid M'
inst✝⁹ : Module R M₁
inst✝⁸ : Module R M₂
inst✝⁷ : Module R M
inst✝⁶ : Module R M'
f : M₁ →ₗ[R] M₂ →ₗ[R] M
N₁ : Type u_6
N₂ : Type u_7
N : Type u_8
inst✝⁵ : AddCommMonoid N₁
inst✝⁴ : AddCommMonoid N₂
inst✝³ : AddCommMonoid N
inst✝² : Module R N₁
inst✝¹ : Module R N₂
inst✝ : Module R N
g : N₁ →ₗ[R] N₂ →ₗ[R] N
h : IsTensorProduct f
C : M → Prop
m : M
h0 : C 0
htmul : ∀ (x : M₁) (y : M₂), C (↑(↑f x) y)
hadd : ∀ (x y : M), C x → C y → C (x + y)
⊢ C (↑(TensorProduct.lift f) 0)
[PROOFSTEP]
rwa [map_zero]
[GOAL]
case tmul
R : Type u_1
inst✝¹⁴ : CommRing R
M₁ : Type u_2
M₂ : Type u_3
M : Type u_4
M' : Type u_5
inst✝¹³ : AddCommMonoid M₁
inst✝¹² : AddCommMonoid M₂
inst✝¹¹ : AddCommMonoid M
inst✝¹⁰ : AddCommMonoid M'
inst✝⁹ : Module R M₁
inst✝⁸ : Module R M₂
inst✝⁷ : Module R M
inst✝⁶ : Module R M'
f : M₁ →ₗ[R] M₂ →ₗ[R] M
N₁ : Type u_6
N₂ : Type u_7
N : Type u_8
inst✝⁵ : AddCommMonoid N₁
inst✝⁴ : AddCommMonoid N₂
inst✝³ : AddCommMonoid N
inst✝² : Module R N₁
inst✝¹ : Module R N₂
inst✝ : Module R N
g : N₁ →ₗ[R] N₂ →ₗ[R] N
h : IsTensorProduct f
C : M → Prop
m : M
h0 : C 0
htmul : ∀ (x : M₁) (y : M₂), C (↑(↑f x) y)
hadd : ∀ (x y : M), C x → C y → C (x + y)
x✝ : M₁
y✝ : M₂
⊢ C (↑(TensorProduct.lift f) (x✝ ⊗ₜ[R] y✝))
[PROOFSTEP]
| tmul _ _ =>
rw [TensorProduct.lift.tmul]
apply htmul
[GOAL]
case tmul
R : Type u_1
inst✝¹⁴ : CommRing R
M₁ : Type u_2
M₂ : Type u_3
M : Type u_4
M' : Type u_5
inst✝¹³ : AddCommMonoid M₁
inst✝¹² : AddCommMonoid M₂
inst✝¹¹ : AddCommMonoid M
inst✝¹⁰ : AddCommMonoid M'
inst✝⁹ : Module R M₁
inst✝⁸ : Module R M₂
inst✝⁷ : Module R M
inst✝⁶ : Module R M'
f : M₁ →ₗ[R] M₂ →ₗ[R] M
N₁ : Type u_6
N₂ : Type u_7
N : Type u_8
inst✝⁵ : AddCommMonoid N₁
inst✝⁴ : AddCommMonoid N₂
inst✝³ : AddCommMonoid N
inst✝² : Module R N₁
inst✝¹ : Module R N₂
inst✝ : Module R N
g : N₁ →ₗ[R] N₂ →ₗ[R] N
h : IsTensorProduct f
C : M → Prop
m : M
h0 : C 0
htmul : ∀ (x : M₁) (y : M₂), C (↑(↑f x) y)
hadd : ∀ (x y : M), C x → C y → C (x + y)
x✝ : M₁
y✝ : M₂
⊢ C (↑(TensorProduct.lift f) (x✝ ⊗ₜ[R] y✝))
[PROOFSTEP]
rw [TensorProduct.lift.tmul]
[GOAL]
case tmul
R : Type u_1
inst✝¹⁴ : CommRing R
M₁ : Type u_2
M₂ : Type u_3
M : Type u_4
M' : Type u_5
inst✝¹³ : AddCommMonoid M₁
inst✝¹² : AddCommMonoid M₂
inst✝¹¹ : AddCommMonoid M
inst✝¹⁰ : AddCommMonoid M'
inst✝⁹ : Module R M₁
inst✝⁸ : Module R M₂
inst✝⁷ : Module R M
inst✝⁶ : Module R M'
f : M₁ →ₗ[R] M₂ →ₗ[R] M
N₁ : Type u_6
N₂ : Type u_7
N : Type u_8
inst✝⁵ : AddCommMonoid N₁
inst✝⁴ : AddCommMonoid N₂
inst✝³ : AddCommMonoid N
inst✝² : Module R N₁
inst✝¹ : Module R N₂
inst✝ : Module R N
g : N₁ →ₗ[R] N₂ →ₗ[R] N
h : IsTensorProduct f
C : M → Prop
m : M
h0 : C 0
htmul : ∀ (x : M₁) (y : M₂), C (↑(↑f x) y)
hadd : ∀ (x y : M), C x → C y → C (x + y)
x✝ : M₁
y✝ : M₂
⊢ C (↑(↑f x✝) y✝)
[PROOFSTEP]
apply htmul
[GOAL]
case add
R : Type u_1
inst✝¹⁴ : CommRing R
M₁ : Type u_2
M₂ : Type u_3
M : Type u_4
M' : Type u_5
inst✝¹³ : AddCommMonoid M₁
inst✝¹² : AddCommMonoid M₂
inst✝¹¹ : AddCommMonoid M
inst✝¹⁰ : AddCommMonoid M'
inst✝⁹ : Module R M₁
inst✝⁸ : Module R M₂
inst✝⁷ : Module R M
inst✝⁶ : Module R M'
f : M₁ →ₗ[R] M₂ →ₗ[R] M
N₁ : Type u_6
N₂ : Type u_7
N : Type u_8
inst✝⁵ : AddCommMonoid N₁
inst✝⁴ : AddCommMonoid N₂
inst✝³ : AddCommMonoid N
inst✝² : Module R N₁
inst✝¹ : Module R N₂
inst✝ : Module R N
g : N₁ →ₗ[R] N₂ →ₗ[R] N
h : IsTensorProduct f
C : M → Prop
m : M
h0 : C 0
htmul : ∀ (x : M₁) (y : M₂), C (↑(↑f x) y)
hadd : ∀ (x y : M), C x → C y → C (x + y)
x✝ y✝ : M₁ ⊗[R] M₂
a✝¹ : C (↑(TensorProduct.lift f) x✝)
a✝ : C (↑(TensorProduct.lift f) y✝)
⊢ C (↑(TensorProduct.lift f) (x✝ + y✝))
[PROOFSTEP]
| add _ _ _ _ =>
rw [map_add]
apply hadd <;> assumption
[GOAL]
case add
R : Type u_1
inst✝¹⁴ : CommRing R
M₁ : Type u_2
M₂ : Type u_3
M : Type u_4
M' : Type u_5
inst✝¹³ : AddCommMonoid M₁
inst✝¹² : AddCommMonoid M₂
inst✝¹¹ : AddCommMonoid M
inst✝¹⁰ : AddCommMonoid M'
inst✝⁹ : Module R M₁
inst✝⁸ : Module R M₂
inst✝⁷ : Module R M
inst✝⁶ : Module R M'
f : M₁ →ₗ[R] M₂ →ₗ[R] M
N₁ : Type u_6
N₂ : Type u_7
N : Type u_8
inst✝⁵ : AddCommMonoid N₁
inst✝⁴ : AddCommMonoid N₂
inst✝³ : AddCommMonoid N
inst✝² : Module R N₁
inst✝¹ : Module R N₂
inst✝ : Module R N
g : N₁ →ₗ[R] N₂ →ₗ[R] N
h : IsTensorProduct f
C : M → Prop
m : M
h0 : C 0
htmul : ∀ (x : M₁) (y : M₂), C (↑(↑f x) y)
hadd : ∀ (x y : M), C x → C y → C (x + y)
x✝ y✝ : M₁ ⊗[R] M₂
a✝¹ : C (↑(TensorProduct.lift f) x✝)
a✝ : C (↑(TensorProduct.lift f) y✝)
⊢ C (↑(TensorProduct.lift f) (x✝ + y✝))
[PROOFSTEP]
rw [map_add]
[GOAL]
case add
R : Type u_1
inst✝¹⁴ : CommRing R
M₁ : Type u_2
M₂ : Type u_3
M : Type u_4
M' : Type u_5
inst✝¹³ : AddCommMonoid M₁
inst✝¹² : AddCommMonoid M₂
inst✝¹¹ : AddCommMonoid M
inst✝¹⁰ : AddCommMonoid M'
inst✝⁹ : Module R M₁
inst✝⁸ : Module R M₂
inst✝⁷ : Module R M
inst✝⁶ : Module R M'
f : M₁ →ₗ[R] M₂ →ₗ[R] M
N₁ : Type u_6
N₂ : Type u_7
N : Type u_8
inst✝⁵ : AddCommMonoid N₁
inst✝⁴ : AddCommMonoid N₂
inst✝³ : AddCommMonoid N
inst✝² : Module R N₁
inst✝¹ : Module R N₂
inst✝ : Module R N
g : N₁ →ₗ[R] N₂ →ₗ[R] N
h : IsTensorProduct f
C : M → Prop
m : M
h0 : C 0
htmul : ∀ (x : M₁) (y : M₂), C (↑(↑f x) y)
hadd : ∀ (x y : M), C x → C y → C (x + y)
x✝ y✝ : M₁ ⊗[R] M₂
a✝¹ : C (↑(TensorProduct.lift f) x✝)
a✝ : C (↑(TensorProduct.lift f) y✝)
⊢ C (↑(TensorProduct.lift f) x✝ + ↑(TensorProduct.lift f) y✝)
[PROOFSTEP]
apply hadd
[GOAL]
case add.a
R : Type u_1
inst✝¹⁴ : CommRing R
M₁ : Type u_2
M₂ : Type u_3
M : Type u_4
M' : Type u_5
inst✝¹³ : AddCommMonoid M₁
inst✝¹² : AddCommMonoid M₂
inst✝¹¹ : AddCommMonoid M
inst✝¹⁰ : AddCommMonoid M'
inst✝⁹ : Module R M₁
inst✝⁸ : Module R M₂
inst✝⁷ : Module R M
inst✝⁶ : Module R M'
f : M₁ →ₗ[R] M₂ →ₗ[R] M
N₁ : Type u_6
N₂ : Type u_7
N : Type u_8
inst✝⁵ : AddCommMonoid N₁
inst✝⁴ : AddCommMonoid N₂
inst✝³ : AddCommMonoid N
inst✝² : Module R N₁
inst✝¹ : Module R N₂
inst✝ : Module R N
g : N₁ →ₗ[R] N₂ →ₗ[R] N
h : IsTensorProduct f
C : M → Prop
m : M
h0 : C 0
htmul : ∀ (x : M₁) (y : M₂), C (↑(↑f x) y)
hadd : ∀ (x y : M), C x → C y → C (x + y)
x✝ y✝ : M₁ ⊗[R] M₂
a✝¹ : C (↑(TensorProduct.lift f) x✝)
a✝ : C (↑(TensorProduct.lift f) y✝)
⊢ C (↑(TensorProduct.lift f) x✝)
[PROOFSTEP]
assumption
[GOAL]
case add.a
R : Type u_1
inst✝¹⁴ : CommRing R
M₁ : Type u_2
M₂ : Type u_3
M : Type u_4
M' : Type u_5
inst✝¹³ : AddCommMonoid M₁
inst✝¹² : AddCommMonoid M₂
inst✝¹¹ : AddCommMonoid M
inst✝¹⁰ : AddCommMonoid M'
inst✝⁹ : Module R M₁
inst✝⁸ : Module R M₂
inst✝⁷ : Module R M
inst✝⁶ : Module R M'
f : M₁ →ₗ[R] M₂ →ₗ[R] M
N₁ : Type u_6
N₂ : Type u_7
N : Type u_8
inst✝⁵ : AddCommMonoid N₁
inst✝⁴ : AddCommMonoid N₂
inst✝³ : AddCommMonoid N
inst✝² : Module R N₁
inst✝¹ : Module R N₂
inst✝ : Module R N
g : N₁ →ₗ[R] N₂ →ₗ[R] N
h : IsTensorProduct f
C : M → Prop
m : M
h0 : C 0
htmul : ∀ (x : M₁) (y : M₂), C (↑(↑f x) y)
hadd : ∀ (x y : M), C x → C y → C (x + y)
x✝ y✝ : M₁ ⊗[R] M₂
a✝¹ : C (↑(TensorProduct.lift f) x✝)
a✝ : C (↑(TensorProduct.lift f) y✝)
⊢ C (↑(TensorProduct.lift f) y✝)
[PROOFSTEP]
assumption
[GOAL]
R : Type u_1
M : Type v₁
N : Type v₂
S : Type v₃
inst✝¹⁴ : AddCommMonoid M
inst✝¹³ : AddCommMonoid N
inst✝¹² : CommRing R
inst✝¹¹ : CommRing S
inst✝¹⁰ : Algebra R S
inst✝⁹ : Module R M
inst✝⁸ : Module R N
inst✝⁷ : Module S N
inst✝⁶ : IsScalarTower R S N
f : M →ₗ[R] N
h : IsBaseChange S f
P : Type u_2
Q : Type u_3
inst✝⁵ : AddCommMonoid P
inst✝⁴ : Module R P
inst✝³ : AddCommMonoid Q
inst✝² : Module S Q
inst✝¹ : Module R Q
inst✝ : IsScalarTower R S Q
g : M →ₗ[R] Q
src✝ : N →ₗ[R] Q := IsTensorProduct.lift h (↑R (↑(LinearMap.flip (Algebra.linearMap S (Module.End S (M →ₗ[R] Q)))) g))
r : S
x : N
⊢ AddHom.toFun src✝.toAddHom (r • x) = ↑(RingHom.id S) r • AddHom.toFun src✝.toAddHom x
[PROOFSTEP]
let F := ((Algebra.linearMap S <| Module.End S (M →ₗ[R] Q)).flip g).restrictScalars R
[GOAL]
R : Type u_1
M : Type v₁
N : Type v₂
S : Type v₃
inst✝¹⁴ : AddCommMonoid M
inst✝¹³ : AddCommMonoid N
inst✝¹² : CommRing R
inst✝¹¹ : CommRing S
inst✝¹⁰ : Algebra R S
inst✝⁹ : Module R M
inst✝⁸ : Module R N
inst✝⁷ : Module S N
inst✝⁶ : IsScalarTower R S N
f : M →ₗ[R] N
h : IsBaseChange S f
P : Type u_2
Q : Type u_3
inst✝⁵ : AddCommMonoid P
inst✝⁴ : Module R P
inst✝³ : AddCommMonoid Q
inst✝² : Module S Q
inst✝¹ : Module R Q
inst✝ : IsScalarTower R S Q
g : M →ₗ[R] Q
src✝ : N →ₗ[R] Q := IsTensorProduct.lift h (↑R (↑(LinearMap.flip (Algebra.linearMap S (Module.End S (M →ₗ[R] Q)))) g))
r : S
x : N
F : S →ₗ[R] M →ₗ[R] Q := ↑R (↑(LinearMap.flip (Algebra.linearMap S (Module.End S (M →ₗ[R] Q)))) g)
⊢ AddHom.toFun src✝.toAddHom (r • x) = ↑(RingHom.id S) r • AddHom.toFun src✝.toAddHom x
[PROOFSTEP]
have hF : ∀ (s : S) (m : M), h.lift F (s • f m) = s • g m := h.lift_eq F
[GOAL]
R : Type u_1
M : Type v₁
N : Type v₂
S : Type v₃
inst✝¹⁴ : AddCommMonoid M
inst✝¹³ : AddCommMonoid N
inst✝¹² : CommRing R
inst✝¹¹ : CommRing S
inst✝¹⁰ : Algebra R S
inst✝⁹ : Module R M
inst✝⁸ : Module R N
inst✝⁷ : Module S N
inst✝⁶ : IsScalarTower R S N
f : M →ₗ[R] N
h : IsBaseChange S f
P : Type u_2
Q : Type u_3
inst✝⁵ : AddCommMonoid P
inst✝⁴ : Module R P
inst✝³ : AddCommMonoid Q
inst✝² : Module S Q
inst✝¹ : Module R Q
inst✝ : IsScalarTower R S Q
g : M →ₗ[R] Q
src✝ : N →ₗ[R] Q := IsTensorProduct.lift h (↑R (↑(LinearMap.flip (Algebra.linearMap S (Module.End S (M →ₗ[R] Q)))) g))
r : S
x : N
F : S →ₗ[R] M →ₗ[R] Q := ↑R (↑(LinearMap.flip (Algebra.linearMap S (Module.End S (M →ₗ[R] Q)))) g)
hF : ∀ (s : S) (m : M), ↑(IsTensorProduct.lift h F) (s • ↑f m) = s • ↑g m
⊢ AddHom.toFun src✝.toAddHom (r • x) = ↑(RingHom.id S) r • AddHom.toFun src✝.toAddHom x
[PROOFSTEP]
change h.lift F (r • x) = r • h.lift F x
[GOAL]
R : Type u_1
M : Type v₁
N : Type v₂
S : Type v₃
inst✝¹⁴ : AddCommMonoid M
inst✝¹³ : AddCommMonoid N
inst✝¹² : CommRing R
inst✝¹¹ : CommRing S
inst✝¹⁰ : Algebra R S
inst✝⁹ : Module R M
inst✝⁸ : Module R N
inst✝⁷ : Module S N
inst✝⁶ : IsScalarTower R S N
f : M →ₗ[R] N
h : IsBaseChange S f
P : Type u_2
Q : Type u_3
inst✝⁵ : AddCommMonoid P
inst✝⁴ : Module R P
inst✝³ : AddCommMonoid Q
inst✝² : Module S Q
inst✝¹ : Module R Q
inst✝ : IsScalarTower R S Q
g : M →ₗ[R] Q
src✝ : N →ₗ[R] Q := IsTensorProduct.lift h (↑R (↑(LinearMap.flip (Algebra.linearMap S (Module.End S (M →ₗ[R] Q)))) g))
r : S
x : N
F : S →ₗ[R] M →ₗ[R] Q := ↑R (↑(LinearMap.flip (Algebra.linearMap S (Module.End S (M →ₗ[R] Q)))) g)
hF : ∀ (s : S) (m : M), ↑(IsTensorProduct.lift h F) (s • ↑f m) = s • ↑g m
⊢ ↑(IsTensorProduct.lift h F) (r • x) = r • ↑(IsTensorProduct.lift h F) x
[PROOFSTEP]
apply h.inductionOn x
[GOAL]
case h0
R : Type u_1
M : Type v₁
N : Type v₂
S : Type v₃
inst✝¹⁴ : AddCommMonoid M
inst✝¹³ : AddCommMonoid N
inst✝¹² : CommRing R
inst✝¹¹ : CommRing S
inst✝¹⁰ : Algebra R S
inst✝⁹ : Module R M
inst✝⁸ : Module R N
inst✝⁷ : Module S N
inst✝⁶ : IsScalarTower R S N
f : M →ₗ[R] N
h : IsBaseChange S f
P : Type u_2
Q : Type u_3
inst✝⁵ : AddCommMonoid P
inst✝⁴ : Module R P
inst✝³ : AddCommMonoid Q
inst✝² : Module S Q
inst✝¹ : Module R Q
inst✝ : IsScalarTower R S Q
g : M →ₗ[R] Q
src✝ : N →ₗ[R] Q := IsTensorProduct.lift h (↑R (↑(LinearMap.flip (Algebra.linearMap S (Module.End S (M →ₗ[R] Q)))) g))
r : S
x : N
F : S →ₗ[R] M →ₗ[R] Q := ↑R (↑(LinearMap.flip (Algebra.linearMap S (Module.End S (M →ₗ[R] Q)))) g)
hF : ∀ (s : S) (m : M), ↑(IsTensorProduct.lift h F) (s • ↑f m) = s • ↑g m
⊢ ↑(IsTensorProduct.lift h F) (r • 0) = r • ↑(IsTensorProduct.lift h F) 0
[PROOFSTEP]
rw [smul_zero, map_zero, smul_zero]
[GOAL]
case htmul
R : Type u_1
M : Type v₁
N : Type v₂
S : Type v₃
inst✝¹⁴ : AddCommMonoid M
inst✝¹³ : AddCommMonoid N
inst✝¹² : CommRing R
inst✝¹¹ : CommRing S
inst✝¹⁰ : Algebra R S
inst✝⁹ : Module R M
inst✝⁸ : Module R N
inst✝⁷ : Module S N
inst✝⁶ : IsScalarTower R S N
f : M →ₗ[R] N
h : IsBaseChange S f
P : Type u_2
Q : Type u_3
inst✝⁵ : AddCommMonoid P
inst✝⁴ : Module R P
inst✝³ : AddCommMonoid Q
inst✝² : Module S Q
inst✝¹ : Module R Q
inst✝ : IsScalarTower R S Q
g : M →ₗ[R] Q
src✝ : N →ₗ[R] Q := IsTensorProduct.lift h (↑R (↑(LinearMap.flip (Algebra.linearMap S (Module.End S (M →ₗ[R] Q)))) g))
r : S
x : N
F : S →ₗ[R] M →ₗ[R] Q := ↑R (↑(LinearMap.flip (Algebra.linearMap S (Module.End S (M →ₗ[R] Q)))) g)
hF : ∀ (s : S) (m : M), ↑(IsTensorProduct.lift h F) (s • ↑f m) = s • ↑g m
⊢ ∀ (x : S) (y : M),
↑(IsTensorProduct.lift h F)
(r • ↑(↑(↑R (↑(LinearMap.flip (Algebra.linearMap S (Module.End S (M →ₗ[R] N)))) f)) x) y) =
r •
↑(IsTensorProduct.lift h F)
(↑(↑(↑R (↑(LinearMap.flip (Algebra.linearMap S (Module.End S (M →ₗ[R] N)))) f)) x) y)
[PROOFSTEP]
intro s m
[GOAL]
case htmul
R : Type u_1
M : Type v₁
N : Type v₂
S : Type v₃
inst✝¹⁴ : AddCommMonoid M
inst✝¹³ : AddCommMonoid N
inst✝¹² : CommRing R
inst✝¹¹ : CommRing S
inst✝¹⁰ : Algebra R S
inst✝⁹ : Module R M
inst✝⁸ : Module R N
inst✝⁷ : Module S N
inst✝⁶ : IsScalarTower R S N
f : M →ₗ[R] N
h : IsBaseChange S f
P : Type u_2
Q : Type u_3
inst✝⁵ : AddCommMonoid P
inst✝⁴ : Module R P
inst✝³ : AddCommMonoid Q
inst✝² : Module S Q
inst✝¹ : Module R Q
inst✝ : IsScalarTower R S Q
g : M →ₗ[R] Q
src✝ : N →ₗ[R] Q := IsTensorProduct.lift h (↑R (↑(LinearMap.flip (Algebra.linearMap S (Module.End S (M →ₗ[R] Q)))) g))
r : S
x : N
F : S →ₗ[R] M →ₗ[R] Q := ↑R (↑(LinearMap.flip (Algebra.linearMap S (Module.End S (M →ₗ[R] Q)))) g)
hF : ∀ (s : S) (m : M), ↑(IsTensorProduct.lift h F) (s • ↑f m) = s • ↑g m
s : S
m : M
⊢ ↑(IsTensorProduct.lift h F)
(r • ↑(↑(↑R (↑(LinearMap.flip (Algebra.linearMap S (Module.End S (M →ₗ[R] N)))) f)) s) m) =
r •
↑(IsTensorProduct.lift h F) (↑(↑(↑R (↑(LinearMap.flip (Algebra.linearMap S (Module.End S (M →ₗ[R] N)))) f)) s) m)
[PROOFSTEP]
change h.lift F (r • s • f m) = r • h.lift F (s • f m)
[GOAL]
case htmul
R : Type u_1
M : Type v₁
N : Type v₂
S : Type v₃
inst✝¹⁴ : AddCommMonoid M
inst✝¹³ : AddCommMonoid N
inst✝¹² : CommRing R
inst✝¹¹ : CommRing S
inst✝¹⁰ : Algebra R S
inst✝⁹ : Module R M
inst✝⁸ : Module R N
inst✝⁷ : Module S N
inst✝⁶ : IsScalarTower R S N
f : M →ₗ[R] N
h : IsBaseChange S f
P : Type u_2
Q : Type u_3
inst✝⁵ : AddCommMonoid P
inst✝⁴ : Module R P
inst✝³ : AddCommMonoid Q
inst✝² : Module S Q
inst✝¹ : Module R Q
inst✝ : IsScalarTower R S Q
g : M →ₗ[R] Q
src✝ : N →ₗ[R] Q := IsTensorProduct.lift h (↑R (↑(LinearMap.flip (Algebra.linearMap S (Module.End S (M →ₗ[R] Q)))) g))
r : S
x : N
F : S →ₗ[R] M →ₗ[R] Q := ↑R (↑(LinearMap.flip (Algebra.linearMap S (Module.End S (M →ₗ[R] Q)))) g)
hF : ∀ (s : S) (m : M), ↑(IsTensorProduct.lift h F) (s • ↑f m) = s • ↑g m
s : S
m : M
⊢ ↑(IsTensorProduct.lift h F) (r • s • ↑f m) = r • ↑(IsTensorProduct.lift h F) (s • ↑f m)
[PROOFSTEP]
rw [← mul_smul, hF, hF]
[GOAL]
case htmul
R : Type u_1
M : Type v₁
N : Type v₂
S : Type v₃
inst✝¹⁴ : AddCommMonoid M
inst✝¹³ : AddCommMonoid N
inst✝¹² : CommRing R
inst✝¹¹ : CommRing S
inst✝¹⁰ : Algebra R S
inst✝⁹ : Module R M
inst✝⁸ : Module R N
inst✝⁷ : Module S N
inst✝⁶ : IsScalarTower R S N
f : M →ₗ[R] N
h : IsBaseChange S f
P : Type u_2
Q : Type u_3
inst✝⁵ : AddCommMonoid P
inst✝⁴ : Module R P
inst✝³ : AddCommMonoid Q
inst✝² : Module S Q
inst✝¹ : Module R Q
inst✝ : IsScalarTower R S Q
g : M →ₗ[R] Q
src✝ : N →ₗ[R] Q := IsTensorProduct.lift h (↑R (↑(LinearMap.flip (Algebra.linearMap S (Module.End S (M →ₗ[R] Q)))) g))
r : S
x : N
F : S →ₗ[R] M →ₗ[R] Q := ↑R (↑(LinearMap.flip (Algebra.linearMap S (Module.End S (M →ₗ[R] Q)))) g)
hF : ∀ (s : S) (m : M), ↑(IsTensorProduct.lift h F) (s • ↑f m) = s • ↑g m
s : S
m : M
⊢ (r * s) • ↑g m = r • s • ↑g m
[PROOFSTEP]
rw [mul_smul]
-- Porting note: this line does nothing
[GOAL]
case htmul
R : Type u_1
M : Type v₁
N : Type v₂
S : Type v₃
inst✝¹⁴ : AddCommMonoid M
inst✝¹³ : AddCommMonoid N
inst✝¹² : CommRing R
inst✝¹¹ : CommRing S
inst✝¹⁰ : Algebra R S
inst✝⁹ : Module R M
inst✝⁸ : Module R N
inst✝⁷ : Module S N
inst✝⁶ : IsScalarTower R S N
f : M →ₗ[R] N
h : IsBaseChange S f
P : Type u_2
Q : Type u_3
inst✝⁵ : AddCommMonoid P
inst✝⁴ : Module R P
inst✝³ : AddCommMonoid Q
inst✝² : Module S Q
inst✝¹ : Module R Q
inst✝ : IsScalarTower R S Q
g : M →ₗ[R] Q
src✝ : N →ₗ[R] Q := IsTensorProduct.lift h (↑R (↑(LinearMap.flip (Algebra.linearMap S (Module.End S (M →ₗ[R] Q)))) g))
r : S
x : N
F : S →ₗ[R] M →ₗ[R] Q := ↑R (↑(LinearMap.flip (Algebra.linearMap S (Module.End S (M →ₗ[R] Q)))) g)
hF : ∀ (s : S) (m : M), ↑(IsTensorProduct.lift h F) (s • ↑f m) = s • ↑g m
s : S
m : M
⊢ (r * s) • ↑g m = r • s • ↑g m
[PROOFSTEP]
apply mul_smul
[GOAL]
case hadd
R : Type u_1
M : Type v₁
N : Type v₂
S : Type v₃
inst✝¹⁴ : AddCommMonoid M
inst✝¹³ : AddCommMonoid N
inst✝¹² : CommRing R
inst✝¹¹ : CommRing S
inst✝¹⁰ : Algebra R S
inst✝⁹ : Module R M
inst✝⁸ : Module R N
inst✝⁷ : Module S N
inst✝⁶ : IsScalarTower R S N
f : M →ₗ[R] N
h : IsBaseChange S f
P : Type u_2
Q : Type u_3
inst✝⁵ : AddCommMonoid P
inst✝⁴ : Module R P
inst✝³ : AddCommMonoid Q
inst✝² : Module S Q
inst✝¹ : Module R Q
inst✝ : IsScalarTower R S Q
g : M →ₗ[R] Q
src✝ : N →ₗ[R] Q := IsTensorProduct.lift h (↑R (↑(LinearMap.flip (Algebra.linearMap S (Module.End S (M →ₗ[R] Q)))) g))
r : S
x : N
F : S →ₗ[R] M →ₗ[R] Q := ↑R (↑(LinearMap.flip (Algebra.linearMap S (Module.End S (M →ₗ[R] Q)))) g)
hF : ∀ (s : S) (m : M), ↑(IsTensorProduct.lift h F) (s • ↑f m) = s • ↑g m
⊢ ∀ (x y : N),
↑(IsTensorProduct.lift h F) (r • x) = r • ↑(IsTensorProduct.lift h F) x →
↑(IsTensorProduct.lift h F) (r • y) = r • ↑(IsTensorProduct.lift h F) y →
↑(IsTensorProduct.lift h F) (r • (x + y)) = r • ↑(IsTensorProduct.lift h F) (x + y)
[PROOFSTEP]
intro x₁ x₂ e₁ e₂
[GOAL]
case hadd
R : Type u_1
M : Type v₁
N : Type v₂
S : Type v₃
inst✝¹⁴ : AddCommMonoid M
inst✝¹³ : AddCommMonoid N
inst✝¹² : CommRing R
inst✝¹¹ : CommRing S
inst✝¹⁰ : Algebra R S
inst✝⁹ : Module R M
inst✝⁸ : Module R N
inst✝⁷ : Module S N
inst✝⁶ : IsScalarTower R S N
f : M →ₗ[R] N
h : IsBaseChange S f
P : Type u_2
Q : Type u_3
inst✝⁵ : AddCommMonoid P
inst✝⁴ : Module R P
inst✝³ : AddCommMonoid Q
inst✝² : Module S Q
inst✝¹ : Module R Q
inst✝ : IsScalarTower R S Q
g : M →ₗ[R] Q
src✝ : N →ₗ[R] Q := IsTensorProduct.lift h (↑R (↑(LinearMap.flip (Algebra.linearMap S (Module.End S (M →ₗ[R] Q)))) g))
r : S
x : N
F : S →ₗ[R] M →ₗ[R] Q := ↑R (↑(LinearMap.flip (Algebra.linearMap S (Module.End S (M →ₗ[R] Q)))) g)
hF : ∀ (s : S) (m : M), ↑(IsTensorProduct.lift h F) (s • ↑f m) = s • ↑g m
x₁ x₂ : N
e₁ : ↑(IsTensorProduct.lift h F) (r • x₁) = r • ↑(IsTensorProduct.lift h F) x₁
e₂ : ↑(IsTensorProduct.lift h F) (r • x₂) = r • ↑(IsTensorProduct.lift h F) x₂
⊢ ↑(IsTensorProduct.lift h F) (r • (x₁ + x₂)) = r • ↑(IsTensorProduct.lift h F) (x₁ + x₂)
[PROOFSTEP]
rw [map_add, smul_add, map_add, smul_add, e₁, e₂]
[GOAL]
R : Type u_1
M : Type v₁
N : Type v₂
S : Type v₃
inst✝¹⁴ : AddCommMonoid M
inst✝¹³ : AddCommMonoid N
inst✝¹² : CommRing R
inst✝¹¹ : CommRing S
inst✝¹⁰ : Algebra R S
inst✝⁹ : Module R M
inst✝⁸ : Module R N
inst✝⁷ : Module S N
inst✝⁶ : IsScalarTower R S N
f : M →ₗ[R] N
h : IsBaseChange S f
P : Type u_2
Q : Type u_3
inst✝⁵ : AddCommMonoid P
inst✝⁴ : Module R P
inst✝³ : AddCommMonoid Q
inst✝² : Module S Q
inst✝¹ : Module R Q
inst✝ : IsScalarTower R S Q
g : M →ₗ[R] Q
x : M
⊢ ↑(lift h g) (↑f x) = ↑g x
[PROOFSTEP]
have hF : ∀ (s : S) (m : M), h.lift g (s • f m) = s • g m := h.lift_eq _
[GOAL]
R : Type u_1
M : Type v₁
N : Type v₂
S : Type v₃
inst✝¹⁴ : AddCommMonoid M
inst✝¹³ : AddCommMonoid N
inst✝¹² : CommRing R
inst✝¹¹ : CommRing S
inst✝¹⁰ : Algebra R S
inst✝⁹ : Module R M
inst✝⁸ : Module R N
inst✝⁷ : Module S N
inst✝⁶ : IsScalarTower R S N
f : M →ₗ[R] N
h : IsBaseChange S f
P : Type u_2
Q : Type u_3
inst✝⁵ : AddCommMonoid P
inst✝⁴ : Module R P
inst✝³ : AddCommMonoid Q
inst✝² : Module S Q
inst✝¹ : Module R Q
inst✝ : IsScalarTower R S Q
g : M →ₗ[R] Q
x : M
hF : ∀ (s : S) (m : M), ↑(lift h g) (s • ↑f m) = s • ↑g m
⊢ ↑(lift h g) (↑f x) = ↑g x
[PROOFSTEP]
convert hF 1 x
[GOAL]
case h.e'_2.h.e'_6
R : Type u_1
M : Type v₁
N : Type v₂
S : Type v₃
inst✝¹⁴ : AddCommMonoid M
inst✝¹³ : AddCommMonoid N
inst✝¹² : CommRing R
inst✝¹¹ : CommRing S
inst✝¹⁰ : Algebra R S
inst✝⁹ : Module R M
inst✝⁸ : Module R N
inst✝⁷ : Module S N
inst✝⁶ : IsScalarTower R S N
f : M →ₗ[R] N
h : IsBaseChange S f
P : Type u_2
Q : Type u_3
inst✝⁵ : AddCommMonoid P
inst✝⁴ : Module R P
inst✝³ : AddCommMonoid Q
inst✝² : Module S Q
inst✝¹ : Module R Q
inst✝ : IsScalarTower R S Q
g : M →ₗ[R] Q
x : M
hF : ∀ (s : S) (m : M), ↑(lift h g) (s • ↑f m) = s • ↑g m
⊢ ↑f x = 1 • ↑f x
[PROOFSTEP]
rw [one_smul]
[GOAL]
case h.e'_3
R : Type u_1
M : Type v₁
N : Type v₂
S : Type v₃
inst✝¹⁴ : AddCommMonoid M
inst✝¹³ : AddCommMonoid N
inst✝¹² : CommRing R
inst✝¹¹ : CommRing S
inst✝¹⁰ : Algebra R S
inst✝⁹ : Module R M
inst✝⁸ : Module R N
inst✝⁷ : Module S N
inst✝⁶ : IsScalarTower R S N
f : M →ₗ[R] N
h : IsBaseChange S f
P : Type u_2
Q : Type u_3
inst✝⁵ : AddCommMonoid P
inst✝⁴ : Module R P
inst✝³ : AddCommMonoid Q
inst✝² : Module S Q
inst✝¹ : Module R Q
inst✝ : IsScalarTower R S Q
g : M →ₗ[R] Q
x : M
hF : ∀ (s : S) (m : M), ↑(lift h g) (s • ↑f m) = s • ↑g m
⊢ ↑g x = 1 • ↑g x
[PROOFSTEP]
rw [one_smul]
[GOAL]
R : Type u_1
M : Type v₁
N : Type v₂
S : Type v₃
inst✝¹² : AddCommMonoid M
inst✝¹¹ : AddCommMonoid N
inst✝¹⁰ : CommRing R
inst✝⁹ : CommRing S
inst✝⁸ : Algebra R S
inst✝⁷ : Module R M
inst✝⁶ : Module R N
inst✝⁵ : Module S N
inst✝⁴ : IsScalarTower R S N
f : M →ₗ[R] N
h : IsBaseChange S f
P : Type u_2
Q : Type u_3
inst✝³ : AddCommMonoid P
inst✝² : Module R P
inst✝¹ : AddCommMonoid Q
inst✝ : Module S Q
g₁ g₂ : N →ₗ[S] Q
e : ∀ (x : M), ↑g₁ (↑f x) = ↑g₂ (↑f x)
⊢ g₁ = g₂
[PROOFSTEP]
ext x
[GOAL]
case h
R : Type u_1
M : Type v₁
N : Type v₂
S : Type v₃
inst✝¹² : AddCommMonoid M
inst✝¹¹ : AddCommMonoid N
inst✝¹⁰ : CommRing R
inst✝⁹ : CommRing S
inst✝⁸ : Algebra R S
inst✝⁷ : Module R M
inst✝⁶ : Module R N
inst✝⁵ : Module S N
inst✝⁴ : IsScalarTower R S N
f : M →ₗ[R] N
h : IsBaseChange S f
P : Type u_2
Q : Type u_3
inst✝³ : AddCommMonoid P
inst✝² : Module R P
inst✝¹ : AddCommMonoid Q
inst✝ : Module S Q
g₁ g₂ : N →ₗ[S] Q
e : ∀ (x : M), ↑g₁ (↑f x) = ↑g₂ (↑f x)
x : N
⊢ ↑g₁ x = ↑g₂ x
[PROOFSTEP]
refine h.inductionOn x ?_ ?_ ?_ ?_
[GOAL]
case h.refine_1
R : Type u_1
M : Type v₁
N : Type v₂
S : Type v₃
inst✝¹² : AddCommMonoid M
inst✝¹¹ : AddCommMonoid N
inst✝¹⁰ : CommRing R
inst✝⁹ : CommRing S
inst✝⁸ : Algebra R S
inst✝⁷ : Module R M
inst✝⁶ : Module R N
inst✝⁵ : Module S N
inst✝⁴ : IsScalarTower R S N
f : M →ₗ[R] N
h : IsBaseChange S f
P : Type u_2
Q : Type u_3
inst✝³ : AddCommMonoid P
inst✝² : Module R P
inst✝¹ : AddCommMonoid Q
inst✝ : Module S Q
g₁ g₂ : N →ₗ[S] Q
e : ∀ (x : M), ↑g₁ (↑f x) = ↑g₂ (↑f x)
x : N
⊢ ↑g₁ 0 = ↑g₂ 0
[PROOFSTEP]
rw [map_zero, map_zero]
[GOAL]
case h.refine_2
R : Type u_1
M : Type v₁
N : Type v₂
S : Type v₃
inst✝¹² : AddCommMonoid M
inst✝¹¹ : AddCommMonoid N
inst✝¹⁰ : CommRing R
inst✝⁹ : CommRing S
inst✝⁸ : Algebra R S
inst✝⁷ : Module R M
inst✝⁶ : Module R N
inst✝⁵ : Module S N
inst✝⁴ : IsScalarTower R S N
f : M →ₗ[R] N
h : IsBaseChange S f
P : Type u_2
Q : Type u_3
inst✝³ : AddCommMonoid P
inst✝² : Module R P
inst✝¹ : AddCommMonoid Q
inst✝ : Module S Q
g₁ g₂ : N →ₗ[S] Q
e : ∀ (x : M), ↑g₁ (↑f x) = ↑g₂ (↑f x)
x : N
⊢ ∀ (m : M), ↑g₁ (↑f m) = ↑g₂ (↑f m)
[PROOFSTEP]
assumption
[GOAL]
case h.refine_3
R : Type u_1
M : Type v₁
N : Type v₂
S : Type v₃
inst✝¹² : AddCommMonoid M
inst✝¹¹ : AddCommMonoid N
inst✝¹⁰ : CommRing R
inst✝⁹ : CommRing S
inst✝⁸ : Algebra R S
inst✝⁷ : Module R M
inst✝⁶ : Module R N
inst✝⁵ : Module S N
inst✝⁴ : IsScalarTower R S N
f : M →ₗ[R] N
h : IsBaseChange S f
P : Type u_2
Q : Type u_3
inst✝³ : AddCommMonoid P
inst✝² : Module R P
inst✝¹ : AddCommMonoid Q
inst✝ : Module S Q
g₁ g₂ : N →ₗ[S] Q
e : ∀ (x : M), ↑g₁ (↑f x) = ↑g₂ (↑f x)
x : N
⊢ ∀ (s : S) (n : N), ↑g₁ n = ↑g₂ n → ↑g₁ (s • n) = ↑g₂ (s • n)
[PROOFSTEP]
intro s n e'
[GOAL]
case h.refine_3
R : Type u_1
M : Type v₁
N : Type v₂
S : Type v₃
inst✝¹² : AddCommMonoid M
inst✝¹¹ : AddCommMonoid N
inst✝¹⁰ : CommRing R
inst✝⁹ : CommRing S
inst✝⁸ : Algebra R S
inst✝⁷ : Module R M
inst✝⁶ : Module R N
inst✝⁵ : Module S N
inst✝⁴ : IsScalarTower R S N
f : M →ₗ[R] N
h : IsBaseChange S f
P : Type u_2
Q : Type u_3
inst✝³ : AddCommMonoid P
inst✝² : Module R P
inst✝¹ : AddCommMonoid Q
inst✝ : Module S Q
g₁ g₂ : N →ₗ[S] Q
e : ∀ (x : M), ↑g₁ (↑f x) = ↑g₂ (↑f x)
x : N
s : S
n : N
e' : ↑g₁ n = ↑g₂ n
⊢ ↑g₁ (s • n) = ↑g₂ (s • n)
[PROOFSTEP]
rw [g₁.map_smul, g₂.map_smul, e']
[GOAL]
case h.refine_4
R : Type u_1
M : Type v₁
N : Type v₂
S : Type v₃
inst✝¹² : AddCommMonoid M
inst✝¹¹ : AddCommMonoid N
inst✝¹⁰ : CommRing R
inst✝⁹ : CommRing S
inst✝⁸ : Algebra R S
inst✝⁷ : Module R M
inst✝⁶ : Module R N
inst✝⁵ : Module S N
inst✝⁴ : IsScalarTower R S N
f : M →ₗ[R] N
h : IsBaseChange S f
P : Type u_2
Q : Type u_3
inst✝³ : AddCommMonoid P
inst✝² : Module R P
inst✝¹ : AddCommMonoid Q
inst✝ : Module S Q
g₁ g₂ : N →ₗ[S] Q
e : ∀ (x : M), ↑g₁ (↑f x) = ↑g₂ (↑f x)
x : N
⊢ ∀ (n₁ n₂ : N), ↑g₁ n₁ = ↑g₂ n₁ → ↑g₁ n₂ = ↑g₂ n₂ → ↑g₁ (n₁ + n₂) = ↑g₂ (n₁ + n₂)
[PROOFSTEP]
intro x y e₁ e₂
[GOAL]
case h.refine_4
R : Type u_1
M : Type v₁
N : Type v₂
S : Type v₃
inst✝¹² : AddCommMonoid M
inst✝¹¹ : AddCommMonoid N
inst✝¹⁰ : CommRing R
inst✝⁹ : CommRing S
inst✝⁸ : Algebra R S
inst✝⁷ : Module R M
inst✝⁶ : Module R N
inst✝⁵ : Module S N
inst✝⁴ : IsScalarTower R S N
f : M →ₗ[R] N
h : IsBaseChange S f
P : Type u_2
Q : Type u_3
inst✝³ : AddCommMonoid P
inst✝² : Module R P
inst✝¹ : AddCommMonoid Q
inst✝ : Module S Q
g₁ g₂ : N →ₗ[S] Q
e : ∀ (x : M), ↑g₁ (↑f x) = ↑g₂ (↑f x)
x✝ x y : N
e₁ : ↑g₁ x = ↑g₂ x
e₂ : ↑g₁ y = ↑g₂ y
⊢ ↑g₁ (x + y) = ↑g₂ (x + y)
[PROOFSTEP]
rw [map_add, map_add, e₁, e₂]
[GOAL]
R : Type u_1
M : Type v₁
N : Type v₂
S : Type v₃
inst✝¹² : AddCommMonoid M
inst✝¹¹ : AddCommMonoid N
inst✝¹⁰ : CommRing R
inst✝⁹ : CommRing S
inst✝⁸ : Algebra R S
inst✝⁷ : Module R M
inst✝⁶ : Module R N
inst✝⁵ : Module S N
inst✝⁴ : IsScalarTower R S N
f : M →ₗ[R] N
h : IsBaseChange S f
P : Type u_2
Q : Type u_3
inst✝³ : AddCommMonoid P
inst✝² : Module R P
inst✝¹ : AddCommMonoid Q
inst✝ : Module S Q
⊢ IsBaseChange S (↑(mk R S M) 1)
[PROOFSTEP]
delta IsBaseChange
[GOAL]
R : Type u_1
M : Type v₁
N : Type v₂
S : Type v₃
inst✝¹² : AddCommMonoid M
inst✝¹¹ : AddCommMonoid N
inst✝¹⁰ : CommRing R
inst✝⁹ : CommRing S
inst✝⁸ : Algebra R S
inst✝⁷ : Module R M
inst✝⁶ : Module R N
inst✝⁵ : Module S N
inst✝⁴ : IsScalarTower R S N
f : M →ₗ[R] N
h : IsBaseChange S f
P : Type u_2
Q : Type u_3
inst✝³ : AddCommMonoid P
inst✝² : Module R P
inst✝¹ : AddCommMonoid Q
inst✝ : Module S Q
⊢ IsTensorProduct (↑R (↑(LinearMap.flip (Algebra.linearMap S (Module.End S (M →ₗ[R] S ⊗[R] M)))) (↑(mk R S M) 1)))
[PROOFSTEP]
convert TensorProduct.isTensorProduct R S M using 1
[GOAL]
case h.e'_12
R : Type u_1
M : Type v₁
N : Type v₂
S : Type v₃
inst✝¹² : AddCommMonoid M
inst✝¹¹ : AddCommMonoid N
inst✝¹⁰ : CommRing R
inst✝⁹ : CommRing S
inst✝⁸ : Algebra R S
inst✝⁷ : Module R M
inst✝⁶ : Module R N
inst✝⁵ : Module S N
inst✝⁴ : IsScalarTower R S N
f : M →ₗ[R] N
h : IsBaseChange S f
P : Type u_2
Q : Type u_3
inst✝³ : AddCommMonoid P
inst✝² : Module R P
inst✝¹ : AddCommMonoid Q
inst✝ : Module S Q
⊢ ↑R (↑(LinearMap.flip (Algebra.linearMap S (Module.End S (M →ₗ[R] S ⊗[R] M)))) (↑(mk R S M) 1)) = mk R S M
[PROOFSTEP]
ext s x
[GOAL]
case h.e'_12.h.h
R : Type u_1
M : Type v₁
N : Type v₂
S : Type v₃
inst✝¹² : AddCommMonoid M
inst✝¹¹ : AddCommMonoid N
inst✝¹⁰ : CommRing R
inst✝⁹ : CommRing S
inst✝⁸ : Algebra R S
inst✝⁷ : Module R M
inst✝⁶ : Module R N
inst✝⁵ : Module S N
inst✝⁴ : IsScalarTower R S N
f : M →ₗ[R] N
h : IsBaseChange S f
P : Type u_2
Q : Type u_3
inst✝³ : AddCommMonoid P
inst✝² : Module R P
inst✝¹ : AddCommMonoid Q
inst✝ : Module S Q
s : S
x : M
⊢ ↑(↑(↑R (↑(LinearMap.flip (Algebra.linearMap S (Module.End S (M →ₗ[R] S ⊗[R] M)))) (↑(mk R S M) 1))) s) x =
↑(↑(mk R S M) s) x
[PROOFSTEP]
change s • (1 : S) ⊗ₜ[R] x = s ⊗ₜ[R] x
[GOAL]
case h.e'_12.h.h
R : Type u_1
M : Type v₁
N : Type v₂
S : Type v₃
inst✝¹² : AddCommMonoid M
inst✝¹¹ : AddCommMonoid N
inst✝¹⁰ : CommRing R
inst✝⁹ : CommRing S
inst✝⁸ : Algebra R S
inst✝⁷ : Module R M
inst✝⁶ : Module R N
inst✝⁵ : Module S N
inst✝⁴ : IsScalarTower R S N
f : M →ₗ[R] N
h : IsBaseChange S f
P : Type u_2
Q : Type u_3
inst✝³ : AddCommMonoid P
inst✝² : Module R P
inst✝¹ : AddCommMonoid Q
inst✝ : Module S Q
s : S
x : M
⊢ s • 1 ⊗ₜ[R] x = s ⊗ₜ[R] x
[PROOFSTEP]
rw [TensorProduct.smul_tmul']
[GOAL]
case h.e'_12.h.h
R : Type u_1
M : Type v₁
N : Type v₂
S : Type v₃
inst✝¹² : AddCommMonoid M
inst✝¹¹ : AddCommMonoid N
inst✝¹⁰ : CommRing R
inst✝⁹ : CommRing S
inst✝⁸ : Algebra R S
inst✝⁷ : Module R M
inst✝⁶ : Module R N
inst✝⁵ : Module S N
inst✝⁴ : IsScalarTower R S N
f : M →ₗ[R] N
h : IsBaseChange S f
P : Type u_2
Q : Type u_3
inst✝³ : AddCommMonoid P
inst✝² : Module R P
inst✝¹ : AddCommMonoid Q
inst✝ : Module S Q
s : S
x : M
⊢ (s • 1) ⊗ₜ[R] x = s ⊗ₜ[R] x
[PROOFSTEP]
congr 1
[GOAL]
case h.e'_12.h.h.e_m
R : Type u_1
M : Type v₁
N : Type v₂
S : Type v₃
inst✝¹² : AddCommMonoid M
inst✝¹¹ : AddCommMonoid N
inst✝¹⁰ : CommRing R
inst✝⁹ : CommRing S
inst✝⁸ : Algebra R S
inst✝⁷ : Module R M
inst✝⁶ : Module R N
inst✝⁵ : Module S N
inst✝⁴ : IsScalarTower R S N
f : M →ₗ[R] N
h : IsBaseChange S f
P : Type u_2
Q : Type u_3
inst✝³ : AddCommMonoid P
inst✝² : Module R P
inst✝¹ : AddCommMonoid Q
inst✝ : Module S Q
s : S
x : M
⊢ s • 1 = s
[PROOFSTEP]
exact mul_one _
[GOAL]
R : Type u_1
M : Type v₁
N : Type v₂
S : Type v₃
inst✝¹² : AddCommMonoid M
inst✝¹¹ : AddCommMonoid N
inst✝¹⁰ : CommRing R
inst✝⁹ : CommRing S
inst✝⁸ : Algebra R S
inst✝⁷ : Module R M
inst✝⁶ : Module R N
inst✝⁵ : Module S N
inst✝⁴ : IsScalarTower R S N
f : M →ₗ[R] N
h : IsBaseChange S f
P : Type u_2
Q : Type u_3
inst✝³ : AddCommMonoid P
inst✝² : Module R P
inst✝¹ : AddCommMonoid Q
inst✝ : Module S Q
src✝ : S ⊗[R] M ≃ₗ[R] N := IsTensorProduct.equiv h
r : S
x : S ⊗[R] M
⊢ AddHom.toFun src✝.toAddHom (r • x) = ↑(RingHom.id S) r • AddHom.toFun src✝.toAddHom x
[PROOFSTEP]
change h.equiv (r • x) = r • h.equiv x
[GOAL]
R : Type u_1
M : Type v₁
N : Type v₂
S : Type v₃
inst✝¹² : AddCommMonoid M
inst✝¹¹ : AddCommMonoid N
inst✝¹⁰ : CommRing R
inst✝⁹ : CommRing S
inst✝⁸ : Algebra R S
inst✝⁷ : Module R M
inst✝⁶ : Module R N
inst✝⁵ : Module S N
inst✝⁴ : IsScalarTower R S N
f : M →ₗ[R] N
h : IsBaseChange S f
P : Type u_2
Q : Type u_3
inst✝³ : AddCommMonoid P
inst✝² : Module R P
inst✝¹ : AddCommMonoid Q
inst✝ : Module S Q
src✝ : S ⊗[R] M ≃ₗ[R] N := IsTensorProduct.equiv h
r : S
x : S ⊗[R] M
⊢ ↑(IsTensorProduct.equiv h) (r • x) = r • ↑(IsTensorProduct.equiv h) x
[PROOFSTEP]
refine TensorProduct.induction_on x ?_ ?_ ?_
[GOAL]
case refine_1
R : Type u_1
M : Type v₁
N : Type v₂
S : Type v₃
inst✝¹² : AddCommMonoid M
inst✝¹¹ : AddCommMonoid N
inst✝¹⁰ : CommRing R
inst✝⁹ : CommRing S
inst✝⁸ : Algebra R S
inst✝⁷ : Module R M
inst✝⁶ : Module R N
inst✝⁵ : Module S N
inst✝⁴ : IsScalarTower R S N
f : M →ₗ[R] N
h : IsBaseChange S f
P : Type u_2
Q : Type u_3
inst✝³ : AddCommMonoid P
inst✝² : Module R P
inst✝¹ : AddCommMonoid Q
inst✝ : Module S Q
src✝ : S ⊗[R] M ≃ₗ[R] N := IsTensorProduct.equiv h
r : S
x : S ⊗[R] M
⊢ ↑(IsTensorProduct.equiv h) (r • 0) = r • ↑(IsTensorProduct.equiv h) 0
[PROOFSTEP]
rw [smul_zero, map_zero, smul_zero]
[GOAL]
case refine_2
R : Type u_1
M : Type v₁
N : Type v₂
S : Type v₃
inst✝¹² : AddCommMonoid M
inst✝¹¹ : AddCommMonoid N
inst✝¹⁰ : CommRing R
inst✝⁹ : CommRing S
inst✝⁸ : Algebra R S
inst✝⁷ : Module R M
inst✝⁶ : Module R N
inst✝⁵ : Module S N
inst✝⁴ : IsScalarTower R S N
f : M →ₗ[R] N
h : IsBaseChange S f
P : Type u_2
Q : Type u_3
inst✝³ : AddCommMonoid P
inst✝² : Module R P
inst✝¹ : AddCommMonoid Q
inst✝ : Module S Q
src✝ : S ⊗[R] M ≃ₗ[R] N := IsTensorProduct.equiv h
r : S
x : S ⊗[R] M
⊢ ∀ (x : S) (y : M), ↑(IsTensorProduct.equiv h) (r • x ⊗ₜ[R] y) = r • ↑(IsTensorProduct.equiv h) (x ⊗ₜ[R] y)
[PROOFSTEP]
intro x y
[GOAL]
case refine_2
R : Type u_1
M : Type v₁
N : Type v₂
S : Type v₃
inst✝¹² : AddCommMonoid M
inst✝¹¹ : AddCommMonoid N
inst✝¹⁰ : CommRing R
inst✝⁹ : CommRing S
inst✝⁸ : Algebra R S
inst✝⁷ : Module R M
inst✝⁶ : Module R N
inst✝⁵ : Module S N
inst✝⁴ : IsScalarTower R S N
f : M →ₗ[R] N
h : IsBaseChange S f
P : Type u_2
Q : Type u_3
inst✝³ : AddCommMonoid P
inst✝² : Module R P
inst✝¹ : AddCommMonoid Q
inst✝ : Module S Q
src✝ : S ⊗[R] M ≃ₗ[R] N := IsTensorProduct.equiv h
r : S
x✝ : S ⊗[R] M
x : S
y : M
⊢ ↑(IsTensorProduct.equiv h) (r • x ⊗ₜ[R] y) = r • ↑(IsTensorProduct.equiv h) (x ⊗ₜ[R] y)
[PROOFSTEP]
simp only [Algebra.linearMap_apply, lift.tmul, smul_eq_mul, LinearMap.mul_apply, LinearMap.smul_apply,
IsTensorProduct.equiv_apply, Module.algebraMap_end_apply, _root_.map_mul, smul_tmul', eq_self_iff_true,
LinearMap.coe_restrictScalars, LinearMap.flip_apply]
[GOAL]
case refine_3
R : Type u_1
M : Type v₁
N : Type v₂
S : Type v₃
inst✝¹² : AddCommMonoid M
inst✝¹¹ : AddCommMonoid N
inst✝¹⁰ : CommRing R
inst✝⁹ : CommRing S
inst✝⁸ : Algebra R S
inst✝⁷ : Module R M
inst✝⁶ : Module R N
inst✝⁵ : Module S N
inst✝⁴ : IsScalarTower R S N
f : M →ₗ[R] N
h : IsBaseChange S f
P : Type u_2
Q : Type u_3
inst✝³ : AddCommMonoid P
inst✝² : Module R P
inst✝¹ : AddCommMonoid Q
inst✝ : Module S Q
src✝ : S ⊗[R] M ≃ₗ[R] N := IsTensorProduct.equiv h
r : S
x : S ⊗[R] M
⊢ ∀ (x y : S ⊗[R] M),
↑(IsTensorProduct.equiv h) (r • x) = r • ↑(IsTensorProduct.equiv h) x →
↑(IsTensorProduct.equiv h) (r • y) = r • ↑(IsTensorProduct.equiv h) y →
↑(IsTensorProduct.equiv h) (r • (x + y)) = r • ↑(IsTensorProduct.equiv h) (x + y)
[PROOFSTEP]
intro x y hx hy
[GOAL]
case refine_3
R : Type u_1
M : Type v₁
N : Type v₂
S : Type v₃
inst✝¹² : AddCommMonoid M
inst✝¹¹ : AddCommMonoid N
inst✝¹⁰ : CommRing R
inst✝⁹ : CommRing S
inst✝⁸ : Algebra R S
inst✝⁷ : Module R M
inst✝⁶ : Module R N
inst✝⁵ : Module S N
inst✝⁴ : IsScalarTower R S N
f : M →ₗ[R] N
h : IsBaseChange S f
P : Type u_2
Q : Type u_3
inst✝³ : AddCommMonoid P
inst✝² : Module R P
inst✝¹ : AddCommMonoid Q
inst✝ : Module S Q
src✝ : S ⊗[R] M ≃ₗ[R] N := IsTensorProduct.equiv h
r : S
x✝ x y : S ⊗[R] M
hx : ↑(IsTensorProduct.equiv h) (r • x) = r • ↑(IsTensorProduct.equiv h) x
hy : ↑(IsTensorProduct.equiv h) (r • y) = r • ↑(IsTensorProduct.equiv h) y
⊢ ↑(IsTensorProduct.equiv h) (r • (x + y)) = r • ↑(IsTensorProduct.equiv h) (x + y)
[PROOFSTEP]
rw [map_add, smul_add, map_add, smul_add, hx, hy]
[GOAL]
R : Type u_1
M : Type v₁
N : Type v₂
S : Type v₃
inst✝¹² : AddCommMonoid M
inst✝¹¹ : AddCommMonoid N
inst✝¹⁰ : CommRing R
inst✝⁹ : CommRing S
inst✝⁸ : Algebra R S
inst✝⁷ : Module R M
inst✝⁶ : Module R N
inst✝⁵ : Module S N
inst✝⁴ : IsScalarTower R S N
f : M →ₗ[R] N
h : IsBaseChange S f
P : Type u_2
Q : Type u_3
inst✝³ : AddCommMonoid P
inst✝² : Module R P
inst✝¹ : AddCommMonoid Q
inst✝ : Module S Q
m : M
⊢ ↑(LinearEquiv.symm (equiv h)) (↑f m) = 1 ⊗ₜ[R] m
[PROOFSTEP]
rw [h.equiv.symm_apply_eq, h.equiv_tmul, one_smul]
[GOAL]
R : Type u_1
M : Type v₁
N : Type v₂
S : Type v₃
inst✝¹² : AddCommMonoid M
inst✝¹¹ : AddCommMonoid N
inst✝¹⁰ : CommRing R
inst✝⁹ : CommRing S
inst✝⁸ : Algebra R S
inst✝⁷ : Module R M
inst✝⁶ : Module R N
inst✝⁵ : Module S N
inst✝⁴ : IsScalarTower R S N
f : M →ₗ[R] N
h✝ : IsBaseChange S f
P : Type u_2
Q : Type u_3
inst✝³ : AddCommMonoid P
inst✝² : Module R P
inst✝¹ : AddCommMonoid Q
inst✝ : Module S Q
h :
∀ (Q : Type (max v₁ v₂ v₃)) [inst : AddCommMonoid Q] [inst_1 : Module R Q] [inst_2 : Module S Q]
[inst_3 : IsScalarTower R S Q] (g : M →ₗ[R] Q), ∃! g', LinearMap.comp (↑R g') f = g
⊢ IsBaseChange S f
[PROOFSTEP]
obtain ⟨g, hg, -⟩ := h (ULift.{v₂} <| S ⊗[R] M) (ULift.moduleEquiv.symm.toLinearMap.comp <| TensorProduct.mk R S M 1)
[GOAL]
case intro.intro
R : Type u_1
M : Type v₁
N : Type v₂
S : Type v₃
inst✝¹² : AddCommMonoid M
inst✝¹¹ : AddCommMonoid N
inst✝¹⁰ : CommRing R
inst✝⁹ : CommRing S
inst✝⁸ : Algebra R S
inst✝⁷ : Module R M
inst✝⁶ : Module R N
inst✝⁵ : Module S N
inst✝⁴ : IsScalarTower R S N
f : M →ₗ[R] N
h✝ : IsBaseChange S f
P : Type u_2
Q : Type u_3
inst✝³ : AddCommMonoid P
inst✝² : Module R P
inst✝¹ : AddCommMonoid Q
inst✝ : Module S Q
h :
∀ (Q : Type (max v₁ v₂ v₃)) [inst : AddCommMonoid Q] [inst_1 : Module R Q] [inst_2 : Module S Q]
[inst_3 : IsScalarTower R S Q] (g : M →ₗ[R] Q), ∃! g', LinearMap.comp (↑R g') f = g
g : N →ₗ[S] ULift (S ⊗[R] M)
hg : LinearMap.comp (↑R g) f = LinearMap.comp (↑(LinearEquiv.symm ULift.moduleEquiv)) (↑(mk R S M) 1)
⊢ IsBaseChange S f
[PROOFSTEP]
let f' : S ⊗[R] M →ₗ[R] N :=
TensorProduct.lift
(((LinearMap.flip (AlgHom.toLinearMap (Algebra.ofId S (Module.End S (M →ₗ[R] N))))) f).restrictScalars R)
[GOAL]
case intro.intro
R : Type u_1
M : Type v₁
N : Type v₂
S : Type v₃
inst✝¹² : AddCommMonoid M
inst✝¹¹ : AddCommMonoid N
inst✝¹⁰ : CommRing R
inst✝⁹ : CommRing S
inst✝⁸ : Algebra R S
inst✝⁷ : Module R M
inst✝⁶ : Module R N
inst✝⁵ : Module S N
inst✝⁴ : IsScalarTower R S N
f : M →ₗ[R] N
h✝ : IsBaseChange S f
P : Type u_2
Q : Type u_3
inst✝³ : AddCommMonoid P
inst✝² : Module R P
inst✝¹ : AddCommMonoid Q
inst✝ : Module S Q
h :
∀ (Q : Type (max v₁ v₂ v₃)) [inst : AddCommMonoid Q] [inst_1 : Module R Q] [inst_2 : Module S Q]
[inst_3 : IsScalarTower R S Q] (g : M →ₗ[R] Q), ∃! g', LinearMap.comp (↑R g') f = g
g : N →ₗ[S] ULift (S ⊗[R] M)
hg : LinearMap.comp (↑R g) f = LinearMap.comp (↑(LinearEquiv.symm ULift.moduleEquiv)) (↑(mk R S M) 1)
f' : S ⊗[R] M →ₗ[R] N :=
TensorProduct.lift (↑R (↑(LinearMap.flip (AlgHom.toLinearMap (Algebra.ofId S (Module.End S (M →ₗ[R] N))))) f))
⊢ IsBaseChange S f
[PROOFSTEP]
change Function.Bijective f'
[GOAL]
case intro.intro
R : Type u_1
M : Type v₁
N : Type v₂
S : Type v₃
inst✝¹² : AddCommMonoid M
inst✝¹¹ : AddCommMonoid N
inst✝¹⁰ : CommRing R
inst✝⁹ : CommRing S
inst✝⁸ : Algebra R S
inst✝⁷ : Module R M
inst✝⁶ : Module R N
inst✝⁵ : Module S N
inst✝⁴ : IsScalarTower R S N
f : M →ₗ[R] N
h✝ : IsBaseChange S f
P : Type u_2
Q : Type u_3
inst✝³ : AddCommMonoid P
inst✝² : Module R P
inst✝¹ : AddCommMonoid Q
inst✝ : Module S Q
h :
∀ (Q : Type (max v₁ v₂ v₃)) [inst : AddCommMonoid Q] [inst_1 : Module R Q] [inst_2 : Module S Q]
[inst_3 : IsScalarTower R S Q] (g : M →ₗ[R] Q), ∃! g', LinearMap.comp (↑R g') f = g
g : N →ₗ[S] ULift (S ⊗[R] M)
hg : LinearMap.comp (↑R g) f = LinearMap.comp (↑(LinearEquiv.symm ULift.moduleEquiv)) (↑(mk R S M) 1)
f' : S ⊗[R] M →ₗ[R] N :=
TensorProduct.lift (↑R (↑(LinearMap.flip (AlgHom.toLinearMap (Algebra.ofId S (Module.End S (M →ₗ[R] N))))) f))
⊢ Function.Bijective ↑f'
[PROOFSTEP]
let f'' : S ⊗[R] M →ₗ[S] N :=
by
refine'
{ f' with
toFun := f'
map_smul' := fun s x => TensorProduct.induction_on x _ (fun s' y => smul_assoc s s' _) fun x y hx hy => _ }
· dsimp; rw [map_zero, smul_zero, map_zero, smul_zero]
· dsimp at *; rw [smul_add, map_add, map_add, smul_add, hx, hy]
[GOAL]
R : Type u_1
M : Type v₁
N : Type v₂
S : Type v₃
inst✝¹² : AddCommMonoid M
inst✝¹¹ : AddCommMonoid N
inst✝¹⁰ : CommRing R
inst✝⁹ : CommRing S
inst✝⁸ : Algebra R S
inst✝⁷ : Module R M
inst✝⁶ : Module R N
inst✝⁵ : Module S N
inst✝⁴ : IsScalarTower R S N
f : M →ₗ[R] N
h✝ : IsBaseChange S f
P : Type u_2
Q : Type u_3
inst✝³ : AddCommMonoid P
inst✝² : Module R P
inst✝¹ : AddCommMonoid Q
inst✝ : Module S Q
h :
∀ (Q : Type (max v₁ v₂ v₃)) [inst : AddCommMonoid Q] [inst_1 : Module R Q] [inst_2 : Module S Q]
[inst_3 : IsScalarTower R S Q] (g : M →ₗ[R] Q), ∃! g', LinearMap.comp (↑R g') f = g
g : N →ₗ[S] ULift (S ⊗[R] M)
hg : LinearMap.comp (↑R g) f = LinearMap.comp (↑(LinearEquiv.symm ULift.moduleEquiv)) (↑(mk R S M) 1)
f' : S ⊗[R] M →ₗ[R] N :=
TensorProduct.lift (↑R (↑(LinearMap.flip (AlgHom.toLinearMap (Algebra.ofId S (Module.End S (M →ₗ[R] N))))) f))
⊢ S ⊗[R] M →ₗ[S] N
[PROOFSTEP]
refine'
{ f' with
toFun := f'
map_smul' := fun s x => TensorProduct.induction_on x _ (fun s' y => smul_assoc s s' _) fun x y hx hy => _ }
[GOAL]
case refine'_1
R : Type u_1
M : Type v₁
N : Type v₂
S : Type v₃
inst✝¹² : AddCommMonoid M
inst✝¹¹ : AddCommMonoid N
inst✝¹⁰ : CommRing R
inst✝⁹ : CommRing S
inst✝⁸ : Algebra R S
inst✝⁷ : Module R M
inst✝⁶ : Module R N
inst✝⁵ : Module S N
inst✝⁴ : IsScalarTower R S N
f : M →ₗ[R] N
h✝ : IsBaseChange S f
P : Type u_2
Q : Type u_3
inst✝³ : AddCommMonoid P
inst✝² : Module R P
inst✝¹ : AddCommMonoid Q
inst✝ : Module S Q
h :
∀ (Q : Type (max v₁ v₂ v₃)) [inst : AddCommMonoid Q] [inst_1 : Module R Q] [inst_2 : Module S Q]
[inst_3 : IsScalarTower R S Q] (g : M →ₗ[R] Q), ∃! g', LinearMap.comp (↑R g') f = g
g : N →ₗ[S] ULift (S ⊗[R] M)
hg : LinearMap.comp (↑R g) f = LinearMap.comp (↑(LinearEquiv.symm ULift.moduleEquiv)) (↑(mk R S M) 1)
f' : S ⊗[R] M →ₗ[R] N :=
TensorProduct.lift (↑R (↑(LinearMap.flip (AlgHom.toLinearMap (Algebra.ofId S (Module.End S (M →ₗ[R] N))))) f))
s : S
x : S ⊗[R] M
⊢ AddHom.toFun
{ toFun := ↑f',
map_add' :=
(_ :
∀ (x y : S ⊗[R] M),
AddHom.toFun f'.toAddHom (x + y) = AddHom.toFun f'.toAddHom x + AddHom.toFun f'.toAddHom y) }
(s • 0) =
↑(RingHom.id S) s •
AddHom.toFun
{ toFun := ↑f',
map_add' :=
(_ :
∀ (x y : S ⊗[R] M),
AddHom.toFun f'.toAddHom (x + y) = AddHom.toFun f'.toAddHom x + AddHom.toFun f'.toAddHom y) }
0
[PROOFSTEP]
dsimp
[GOAL]
case refine'_1
R : Type u_1
M : Type v₁
N : Type v₂
S : Type v₃
inst✝¹² : AddCommMonoid M
inst✝¹¹ : AddCommMonoid N
inst✝¹⁰ : CommRing R
inst✝⁹ : CommRing S
inst✝⁸ : Algebra R S
inst✝⁷ : Module R M
inst✝⁶ : Module R N
inst✝⁵ : Module S N
inst✝⁴ : IsScalarTower R S N
f : M →ₗ[R] N
h✝ : IsBaseChange S f
P : Type u_2
Q : Type u_3
inst✝³ : AddCommMonoid P
inst✝² : Module R P
inst✝¹ : AddCommMonoid Q
inst✝ : Module S Q
h :
∀ (Q : Type (max v₁ v₂ v₃)) [inst : AddCommMonoid Q] [inst_1 : Module R Q] [inst_2 : Module S Q]
[inst_3 : IsScalarTower R S Q] (g : M →ₗ[R] Q), ∃! g', LinearMap.comp (↑R g') f = g
g : N →ₗ[S] ULift (S ⊗[R] M)
hg : LinearMap.comp (↑R g) f = LinearMap.comp (↑(LinearEquiv.symm ULift.moduleEquiv)) (↑(mk R S M) 1)
f' : S ⊗[R] M →ₗ[R] N :=
TensorProduct.lift (↑R (↑(LinearMap.flip (AlgHom.toLinearMap (Algebra.ofId S (Module.End S (M →ₗ[R] N))))) f))
s : S
x : S ⊗[R] M
⊢ ↑(TensorProduct.lift (↑R (↑(LinearMap.flip (AlgHom.toLinearMap (Algebra.ofId S (Module.End S (M →ₗ[R] N))))) f)))
(s • 0) =
s •
↑(TensorProduct.lift (↑R (↑(LinearMap.flip (AlgHom.toLinearMap (Algebra.ofId S (Module.End S (M →ₗ[R] N))))) f)))
0
[PROOFSTEP]
rw [map_zero, smul_zero, map_zero, smul_zero]
[GOAL]
case refine'_2
R : Type u_1
M : Type v₁
N : Type v₂
S : Type v₃
inst✝¹² : AddCommMonoid M
inst✝¹¹ : AddCommMonoid N
inst✝¹⁰ : CommRing R
inst✝⁹ : CommRing S
inst✝⁸ : Algebra R S
inst✝⁷ : Module R M
inst✝⁶ : Module R N
inst✝⁵ : Module S N
inst✝⁴ : IsScalarTower R S N
f : M →ₗ[R] N
h✝ : IsBaseChange S f
P : Type u_2
Q : Type u_3
inst✝³ : AddCommMonoid P
inst✝² : Module R P
inst✝¹ : AddCommMonoid Q
inst✝ : Module S Q
h :
∀ (Q : Type (max v₁ v₂ v₃)) [inst : AddCommMonoid Q] [inst_1 : Module R Q] [inst_2 : Module S Q]
[inst_3 : IsScalarTower R S Q] (g : M →ₗ[R] Q), ∃! g', LinearMap.comp (↑R g') f = g
g : N →ₗ[S] ULift (S ⊗[R] M)
hg : LinearMap.comp (↑R g) f = LinearMap.comp (↑(LinearEquiv.symm ULift.moduleEquiv)) (↑(mk R S M) 1)
f' : S ⊗[R] M →ₗ[R] N :=
TensorProduct.lift (↑R (↑(LinearMap.flip (AlgHom.toLinearMap (Algebra.ofId S (Module.End S (M →ₗ[R] N))))) f))
s : S
x✝ x y : S ⊗[R] M
hx :
AddHom.toFun
{ toFun := ↑f',
map_add' :=
(_ :
∀ (x y : S ⊗[R] M),
AddHom.toFun f'.toAddHom (x + y) = AddHom.toFun f'.toAddHom x + AddHom.toFun f'.toAddHom y) }
(s • x) =
↑(RingHom.id S) s •
AddHom.toFun
{ toFun := ↑f',
map_add' :=
(_ :
∀ (x y : S ⊗[R] M),
AddHom.toFun f'.toAddHom (x + y) = AddHom.toFun f'.toAddHom x + AddHom.toFun f'.toAddHom y) }
x
hy :
AddHom.toFun
{ toFun := ↑f',
map_add' :=
(_ :
∀ (x y : S ⊗[R] M),
AddHom.toFun f'.toAddHom (x + y) = AddHom.toFun f'.toAddHom x + AddHom.toFun f'.toAddHom y) }
(s • y) =
↑(RingHom.id S) s •
AddHom.toFun
{ toFun := ↑f',
map_add' :=
(_ :
∀ (x y : S ⊗[R] M),
AddHom.toFun f'.toAddHom (x + y) = AddHom.toFun f'.toAddHom x + AddHom.toFun f'.toAddHom y) }
y
⊢ AddHom.toFun
{ toFun := ↑f',
map_add' :=
(_ :
∀ (x y : S ⊗[R] M),
AddHom.toFun f'.toAddHom (x + y) = AddHom.toFun f'.toAddHom x + AddHom.toFun f'.toAddHom y) }
(s • (x + y)) =
↑(RingHom.id S) s •
AddHom.toFun
{ toFun := ↑f',
map_add' :=
(_ :
∀ (x y : S ⊗[R] M),
AddHom.toFun f'.toAddHom (x + y) = AddHom.toFun f'.toAddHom x + AddHom.toFun f'.toAddHom y) }
(x + y)
[PROOFSTEP]
dsimp at *
[GOAL]
case refine'_2
R : Type u_1
M : Type v₁
N : Type v₂
S : Type v₃
inst✝¹² : AddCommMonoid M
inst✝¹¹ : AddCommMonoid N
inst✝¹⁰ : CommRing R
inst✝⁹ : CommRing S
inst✝⁸ : Algebra R S
inst✝⁷ : Module R M
inst✝⁶ : Module R N
inst✝⁵ : Module S N
inst✝⁴ : IsScalarTower R S N
f : M →ₗ[R] N
h✝ : IsBaseChange S f
P : Type u_2
Q : Type u_3
inst✝³ : AddCommMonoid P
inst✝² : Module R P
inst✝¹ : AddCommMonoid Q
inst✝ : Module S Q
h :
∀ (Q : Type (max v₁ v₂ v₃)) [inst : AddCommMonoid Q] [inst_1 : Module R Q] [inst_2 : Module S Q]
[inst_3 : IsScalarTower R S Q] (g : M →ₗ[R] Q), ∃! g', LinearMap.comp (↑R g') f = g
g : N →ₗ[S] ULift (S ⊗[R] M)
hg : LinearMap.comp (↑R g) f = LinearMap.comp (↑(LinearEquiv.symm ULift.moduleEquiv)) (↑(mk R S M) 1)
f' : S ⊗[R] M →ₗ[R] N :=
TensorProduct.lift (↑R (↑(LinearMap.flip (AlgHom.toLinearMap (Algebra.ofId S (Module.End S (M →ₗ[R] N))))) f))
s : S
x✝ x y : S ⊗[R] M
hx :
↑(TensorProduct.lift (↑R (↑(LinearMap.flip (AlgHom.toLinearMap (Algebra.ofId S (Module.End S (M →ₗ[R] N))))) f)))
(s • x) =
s •
↑(TensorProduct.lift (↑R (↑(LinearMap.flip (AlgHom.toLinearMap (Algebra.ofId S (Module.End S (M →ₗ[R] N))))) f)))
x
hy :
↑(TensorProduct.lift (↑R (↑(LinearMap.flip (AlgHom.toLinearMap (Algebra.ofId S (Module.End S (M →ₗ[R] N))))) f)))
(s • y) =
s •
↑(TensorProduct.lift (↑R (↑(LinearMap.flip (AlgHom.toLinearMap (Algebra.ofId S (Module.End S (M →ₗ[R] N))))) f)))
y
⊢ ↑(TensorProduct.lift (↑R (↑(LinearMap.flip (AlgHom.toLinearMap (Algebra.ofId S (Module.End S (M →ₗ[R] N))))) f)))
(s • (x + y)) =
s •
↑(TensorProduct.lift (↑R (↑(LinearMap.flip (AlgHom.toLinearMap (Algebra.ofId S (Module.End S (M →ₗ[R] N))))) f)))
(x + y)
[PROOFSTEP]
rw [smul_add, map_add, map_add, smul_add, hx, hy]
[GOAL]
case intro.intro
R : Type u_1
M : Type v₁
N : Type v₂
S : Type v₃
inst✝¹² : AddCommMonoid M
inst✝¹¹ : AddCommMonoid N
inst✝¹⁰ : CommRing R
inst✝⁹ : CommRing S
inst✝⁸ : Algebra R S
inst✝⁷ : Module R M
inst✝⁶ : Module R N
inst✝⁵ : Module S N
inst✝⁴ : IsScalarTower R S N
f : M →ₗ[R] N
h✝ : IsBaseChange S f
P : Type u_2
Q : Type u_3
inst✝³ : AddCommMonoid P
inst✝² : Module R P
inst✝¹ : AddCommMonoid Q
inst✝ : Module S Q
h :
∀ (Q : Type (max v₁ v₂ v₃)) [inst : AddCommMonoid Q] [inst_1 : Module R Q] [inst_2 : Module S Q]
[inst_3 : IsScalarTower R S Q] (g : M →ₗ[R] Q), ∃! g', LinearMap.comp (↑R g') f = g
g : N →ₗ[S] ULift (S ⊗[R] M)
hg : LinearMap.comp (↑R g) f = LinearMap.comp (↑(LinearEquiv.symm ULift.moduleEquiv)) (↑(mk R S M) 1)
f' : S ⊗[R] M →ₗ[R] N :=
TensorProduct.lift (↑R (↑(LinearMap.flip (AlgHom.toLinearMap (Algebra.ofId S (Module.End S (M →ₗ[R] N))))) f))
f'' : S ⊗[R] M →ₗ[S] N :=
{
toAddHom :=
{ toFun := ↑f',
map_add' :=
(_ :
∀ (x y : S ⊗[R] M),
AddHom.toFun f'.toAddHom (x + y) = AddHom.toFun f'.toAddHom x + AddHom.toFun f'.toAddHom y) },
map_smul' :=
(_ :
∀ (s : S) (x : S ⊗[R] M),
AddHom.toFun
{ toFun := ↑f',
map_add' :=
(_ :
∀ (x y : S ⊗[R] M),
AddHom.toFun f'.toAddHom (x + y) = AddHom.toFun f'.toAddHom x + AddHom.toFun f'.toAddHom y) }
(s • x) =
↑(RingHom.id S) s •
AddHom.toFun
{ toFun := ↑f',
map_add' :=
(_ :
∀ (x y : S ⊗[R] M),
AddHom.toFun f'.toAddHom (x + y) = AddHom.toFun f'.toAddHom x + AddHom.toFun f'.toAddHom y) }
x) }
⊢ Function.Bijective ↑f'
[PROOFSTEP]
simp_rw [FunLike.ext_iff, LinearMap.comp_apply, LinearMap.restrictScalars_apply] at hg
[GOAL]
case intro.intro
R : Type u_1
M : Type v₁
N : Type v₂
S : Type v₃
inst✝¹² : AddCommMonoid M
inst✝¹¹ : AddCommMonoid N
inst✝¹⁰ : CommRing R
inst✝⁹ : CommRing S
inst✝⁸ : Algebra R S
inst✝⁷ : Module R M
inst✝⁶ : Module R N
inst✝⁵ : Module S N
inst✝⁴ : IsScalarTower R S N
f : M →ₗ[R] N
h✝ : IsBaseChange S f
P : Type u_2
Q : Type u_3
inst✝³ : AddCommMonoid P
inst✝² : Module R P
inst✝¹ : AddCommMonoid Q
inst✝ : Module S Q
h :
∀ (Q : Type (max v₁ v₂ v₃)) [inst : AddCommMonoid Q] [inst_1 : Module R Q] [inst_2 : Module S Q]
[inst_3 : IsScalarTower R S Q] (g : M →ₗ[R] Q), ∃! g', LinearMap.comp (↑R g') f = g
g : N →ₗ[S] ULift (S ⊗[R] M)
f' : S ⊗[R] M →ₗ[R] N :=
TensorProduct.lift (↑R (↑(LinearMap.flip (AlgHom.toLinearMap (Algebra.ofId S (Module.End S (M →ₗ[R] N))))) f))
f'' : S ⊗[R] M →ₗ[S] N :=
{
toAddHom :=
{ toFun := ↑f',
map_add' :=
(_ :
∀ (x y : S ⊗[R] M),
AddHom.toFun f'.toAddHom (x + y) = AddHom.toFun f'.toAddHom x + AddHom.toFun f'.toAddHom y) },
map_smul' :=
(_ :
∀ (s : S) (x : S ⊗[R] M),
AddHom.toFun
{ toFun := ↑f',
map_add' :=
(_ :
∀ (x y : S ⊗[R] M),
AddHom.toFun f'.toAddHom (x + y) = AddHom.toFun f'.toAddHom x + AddHom.toFun f'.toAddHom y) }
(s • x) =
↑(RingHom.id S) s •
AddHom.toFun
{ toFun := ↑f',
map_add' :=
(_ :
∀ (x y : S ⊗[R] M),
AddHom.toFun f'.toAddHom (x + y) = AddHom.toFun f'.toAddHom x + AddHom.toFun f'.toAddHom y) }
x) }
hg : ∀ (x : M), ↑g (↑f x) = ↑↑(LinearEquiv.symm ULift.moduleEquiv) (↑(↑(mk R S M) 1) x)
⊢ Function.Bijective ↑f'
[PROOFSTEP]
let fe : S ⊗[R] M ≃ₗ[S] N := LinearEquiv.ofLinear f'' (ULift.moduleEquiv.toLinearMap.comp g) ?_ ?_
[GOAL]
case intro.intro.refine_3
R : Type u_1
M : Type v₁
N : Type v₂
S : Type v₃
inst✝¹² : AddCommMonoid M
inst✝¹¹ : AddCommMonoid N
inst✝¹⁰ : CommRing R
inst✝⁹ : CommRing S
inst✝⁸ : Algebra R S
inst✝⁷ : Module R M
inst✝⁶ : Module R N
inst✝⁵ : Module S N
inst✝⁴ : IsScalarTower R S N
f : M →ₗ[R] N
h✝ : IsBaseChange S f
P : Type u_2
Q : Type u_3
inst✝³ : AddCommMonoid P
inst✝² : Module R P
inst✝¹ : AddCommMonoid Q
inst✝ : Module S Q
h :
∀ (Q : Type (max v₁ v₂ v₃)) [inst : AddCommMonoid Q] [inst_1 : Module R Q] [inst_2 : Module S Q]
[inst_3 : IsScalarTower R S Q] (g : M →ₗ[R] Q), ∃! g', LinearMap.comp (↑R g') f = g
g : N →ₗ[S] ULift (S ⊗[R] M)
f' : S ⊗[R] M →ₗ[R] N :=
TensorProduct.lift (↑R (↑(LinearMap.flip (AlgHom.toLinearMap (Algebra.ofId S (Module.End S (M →ₗ[R] N))))) f))
f'' : S ⊗[R] M →ₗ[S] N :=
{
toAddHom :=
{ toFun := ↑f',
map_add' :=
(_ :
∀ (x y : S ⊗[R] M),
AddHom.toFun f'.toAddHom (x + y) = AddHom.toFun f'.toAddHom x + AddHom.toFun f'.toAddHom y) },
map_smul' :=
(_ :
∀ (s : S) (x : S ⊗[R] M),
AddHom.toFun
{ toFun := ↑f',
map_add' :=
(_ :
∀ (x y : S ⊗[R] M),
AddHom.toFun f'.toAddHom (x + y) = AddHom.toFun f'.toAddHom x + AddHom.toFun f'.toAddHom y) }
(s • x) =
↑(RingHom.id S) s •
AddHom.toFun
{ toFun := ↑f',
map_add' :=
(_ :
∀ (x y : S ⊗[R] M),
AddHom.toFun f'.toAddHom (x + y) = AddHom.toFun f'.toAddHom x + AddHom.toFun f'.toAddHom y) }
x) }
hg : ∀ (x : M), ↑g (↑f x) = ↑↑(LinearEquiv.symm ULift.moduleEquiv) (↑(↑(mk R S M) 1) x)
fe : S ⊗[R] M ≃ₗ[S] N :=
LinearEquiv.ofLinear f'' (LinearMap.comp (↑ULift.moduleEquiv) g) ?intro.intro.refine_1 ?intro.intro.refine_2
⊢ Function.Bijective ↑f'
[PROOFSTEP]
exact fe.bijective
[GOAL]
case intro.intro.refine_1
R : Type u_1
M : Type v₁
N : Type v₂
S : Type v₃
inst✝¹² : AddCommMonoid M
inst✝¹¹ : AddCommMonoid N
inst✝¹⁰ : CommRing R
inst✝⁹ : CommRing S
inst✝⁸ : Algebra R S
inst✝⁷ : Module R M
inst✝⁶ : Module R N
inst✝⁵ : Module S N
inst✝⁴ : IsScalarTower R S N
f : M →ₗ[R] N
h✝ : IsBaseChange S f
P : Type u_2
Q : Type u_3
inst✝³ : AddCommMonoid P
inst✝² : Module R P
inst✝¹ : AddCommMonoid Q
inst✝ : Module S Q
h :
∀ (Q : Type (max v₁ v₂ v₃)) [inst : AddCommMonoid Q] [inst_1 : Module R Q] [inst_2 : Module S Q]
[inst_3 : IsScalarTower R S Q] (g : M →ₗ[R] Q), ∃! g', LinearMap.comp (↑R g') f = g
g : N →ₗ[S] ULift (S ⊗[R] M)
f' : S ⊗[R] M →ₗ[R] N :=
TensorProduct.lift (↑R (↑(LinearMap.flip (AlgHom.toLinearMap (Algebra.ofId S (Module.End S (M →ₗ[R] N))))) f))
f'' : S ⊗[R] M →ₗ[S] N :=
{
toAddHom :=
{ toFun := ↑f',
map_add' :=
(_ :
∀ (x y : S ⊗[R] M),
AddHom.toFun f'.toAddHom (x + y) = AddHom.toFun f'.toAddHom x + AddHom.toFun f'.toAddHom y) },
map_smul' :=
(_ :
∀ (s : S) (x : S ⊗[R] M),
AddHom.toFun
{ toFun := ↑f',
map_add' :=
(_ :
∀ (x y : S ⊗[R] M),
AddHom.toFun f'.toAddHom (x + y) = AddHom.toFun f'.toAddHom x + AddHom.toFun f'.toAddHom y) }
(s • x) =
↑(RingHom.id S) s •
AddHom.toFun
{ toFun := ↑f',
map_add' :=
(_ :
∀ (x y : S ⊗[R] M),
AddHom.toFun f'.toAddHom (x + y) = AddHom.toFun f'.toAddHom x + AddHom.toFun f'.toAddHom y) }
x) }
hg : ∀ (x : M), ↑g (↑f x) = ↑↑(LinearEquiv.symm ULift.moduleEquiv) (↑(↑(mk R S M) 1) x)
⊢ LinearMap.comp f'' (LinearMap.comp (↑ULift.moduleEquiv) g) = LinearMap.id
[PROOFSTEP]
rw [← LinearMap.cancel_left (ULift.moduleEquiv : ULift.{max v₁ v₃} N ≃ₗ[S] N).symm.injective]
[GOAL]
case intro.intro.refine_1
R : Type u_1
M : Type v₁
N : Type v₂
S : Type v₃
inst✝¹² : AddCommMonoid M
inst✝¹¹ : AddCommMonoid N
inst✝¹⁰ : CommRing R
inst✝⁹ : CommRing S
inst✝⁸ : Algebra R S
inst✝⁷ : Module R M
inst✝⁶ : Module R N
inst✝⁵ : Module S N
inst✝⁴ : IsScalarTower R S N
f : M →ₗ[R] N
h✝ : IsBaseChange S f
P : Type u_2
Q : Type u_3
inst✝³ : AddCommMonoid P
inst✝² : Module R P
inst✝¹ : AddCommMonoid Q
inst✝ : Module S Q
h :
∀ (Q : Type (max v₁ v₂ v₃)) [inst : AddCommMonoid Q] [inst_1 : Module R Q] [inst_2 : Module S Q]
[inst_3 : IsScalarTower R S Q] (g : M →ₗ[R] Q), ∃! g', LinearMap.comp (↑R g') f = g
g : N →ₗ[S] ULift (S ⊗[R] M)
f' : S ⊗[R] M →ₗ[R] N :=
TensorProduct.lift (↑R (↑(LinearMap.flip (AlgHom.toLinearMap (Algebra.ofId S (Module.End S (M →ₗ[R] N))))) f))
f'' : S ⊗[R] M →ₗ[S] N :=
{
toAddHom :=
{ toFun := ↑f',
map_add' :=
(_ :
∀ (x y : S ⊗[R] M),
AddHom.toFun f'.toAddHom (x + y) = AddHom.toFun f'.toAddHom x + AddHom.toFun f'.toAddHom y) },
map_smul' :=
(_ :
∀ (s : S) (x : S ⊗[R] M),
AddHom.toFun
{ toFun := ↑f',
map_add' :=
(_ :
∀ (x y : S ⊗[R] M),
AddHom.toFun f'.toAddHom (x + y) = AddHom.toFun f'.toAddHom x + AddHom.toFun f'.toAddHom y) }
(s • x) =
↑(RingHom.id S) s •
AddHom.toFun
{ toFun := ↑f',
map_add' :=
(_ :
∀ (x y : S ⊗[R] M),
AddHom.toFun f'.toAddHom (x + y) = AddHom.toFun f'.toAddHom x + AddHom.toFun f'.toAddHom y) }
x) }
hg : ∀ (x : M), ↑g (↑f x) = ↑↑(LinearEquiv.symm ULift.moduleEquiv) (↑(↑(mk R S M) 1) x)
⊢ LinearMap.comp (↑(LinearEquiv.symm ULift.moduleEquiv)) (LinearMap.comp f'' (LinearMap.comp (↑ULift.moduleEquiv) g)) =
LinearMap.comp (↑(LinearEquiv.symm ULift.moduleEquiv)) LinearMap.id
[PROOFSTEP]
refine' (h (ULift.{max v₁ v₃} N) <| ULift.moduleEquiv.symm.toLinearMap.comp f).unique _ rfl
[GOAL]
case intro.intro.refine_1
R : Type u_1
M : Type v₁
N : Type v₂
S : Type v₃
inst✝¹² : AddCommMonoid M
inst✝¹¹ : AddCommMonoid N
inst✝¹⁰ : CommRing R
inst✝⁹ : CommRing S
inst✝⁸ : Algebra R S
inst✝⁷ : Module R M
inst✝⁶ : Module R N
inst✝⁵ : Module S N
inst✝⁴ : IsScalarTower R S N
f : M →ₗ[R] N
h✝ : IsBaseChange S f
P : Type u_2
Q : Type u_3
inst✝³ : AddCommMonoid P
inst✝² : Module R P
inst✝¹ : AddCommMonoid Q
inst✝ : Module S Q
h :
∀ (Q : Type (max v₁ v₂ v₃)) [inst : AddCommMonoid Q] [inst_1 : Module R Q] [inst_2 : Module S Q]
[inst_3 : IsScalarTower R S Q] (g : M →ₗ[R] Q), ∃! g', LinearMap.comp (↑R g') f = g
g : N →ₗ[S] ULift (S ⊗[R] M)
f' : S ⊗[R] M →ₗ[R] N :=
TensorProduct.lift (↑R (↑(LinearMap.flip (AlgHom.toLinearMap (Algebra.ofId S (Module.End S (M →ₗ[R] N))))) f))
f'' : S ⊗[R] M →ₗ[S] N :=
{
toAddHom :=
{ toFun := ↑f',
map_add' :=
(_ :
∀ (x y : S ⊗[R] M),
AddHom.toFun f'.toAddHom (x + y) = AddHom.toFun f'.toAddHom x + AddHom.toFun f'.toAddHom y) },
map_smul' :=
(_ :
∀ (s : S) (x : S ⊗[R] M),
AddHom.toFun
{ toFun := ↑f',
map_add' :=
(_ :
∀ (x y : S ⊗[R] M),
AddHom.toFun f'.toAddHom (x + y) = AddHom.toFun f'.toAddHom x + AddHom.toFun f'.toAddHom y) }
(s • x) =
↑(RingHom.id S) s •
AddHom.toFun
{ toFun := ↑f',
map_add' :=
(_ :
∀ (x y : S ⊗[R] M),
AddHom.toFun f'.toAddHom (x + y) = AddHom.toFun f'.toAddHom x + AddHom.toFun f'.toAddHom y) }
x) }
hg : ∀ (x : M), ↑g (↑f x) = ↑↑(LinearEquiv.symm ULift.moduleEquiv) (↑(↑(mk R S M) 1) x)
⊢ LinearMap.comp
(↑R
(LinearMap.comp (↑(LinearEquiv.symm ULift.moduleEquiv))
(LinearMap.comp f'' (LinearMap.comp (↑ULift.moduleEquiv) g))))
f =
LinearMap.comp (↑(LinearEquiv.symm ULift.moduleEquiv)) f
[PROOFSTEP]
ext x
[GOAL]
case intro.intro.refine_1.h.h
R : Type u_1
M : Type v₁
N : Type v₂
S : Type v₃
inst✝¹² : AddCommMonoid M
inst✝¹¹ : AddCommMonoid N
inst✝¹⁰ : CommRing R
inst✝⁹ : CommRing S
inst✝⁸ : Algebra R S
inst✝⁷ : Module R M
inst✝⁶ : Module R N
inst✝⁵ : Module S N
inst✝⁴ : IsScalarTower R S N
f : M →ₗ[R] N
h✝ : IsBaseChange S f
P : Type u_2
Q : Type u_3
inst✝³ : AddCommMonoid P
inst✝² : Module R P
inst✝¹ : AddCommMonoid Q
inst✝ : Module S Q
h :
∀ (Q : Type (max v₁ v₂ v₃)) [inst : AddCommMonoid Q] [inst_1 : Module R Q] [inst_2 : Module S Q]
[inst_3 : IsScalarTower R S Q] (g : M →ₗ[R] Q), ∃! g', LinearMap.comp (↑R g') f = g
g : N →ₗ[S] ULift (S ⊗[R] M)
f' : S ⊗[R] M →ₗ[R] N :=
TensorProduct.lift (↑R (↑(LinearMap.flip (AlgHom.toLinearMap (Algebra.ofId S (Module.End S (M →ₗ[R] N))))) f))
f'' : S ⊗[R] M →ₗ[S] N :=
{
toAddHom :=
{ toFun := ↑f',
map_add' :=
(_ :
∀ (x y : S ⊗[R] M),
AddHom.toFun f'.toAddHom (x + y) = AddHom.toFun f'.toAddHom x + AddHom.toFun f'.toAddHom y) },
map_smul' :=
(_ :
∀ (s : S) (x : S ⊗[R] M),
AddHom.toFun
{ toFun := ↑f',
map_add' :=
(_ :
∀ (x y : S ⊗[R] M),
AddHom.toFun f'.toAddHom (x + y) = AddHom.toFun f'.toAddHom x + AddHom.toFun f'.toAddHom y) }
(s • x) =
↑(RingHom.id S) s •
AddHom.toFun
{ toFun := ↑f',
map_add' :=
(_ :
∀ (x y : S ⊗[R] M),
AddHom.toFun f'.toAddHom (x + y) = AddHom.toFun f'.toAddHom x + AddHom.toFun f'.toAddHom y) }
x) }
hg : ∀ (x : M), ↑g (↑f x) = ↑↑(LinearEquiv.symm ULift.moduleEquiv) (↑(↑(mk R S M) 1) x)
x : M
⊢ (↑(LinearMap.comp
(↑R
(LinearMap.comp (↑(LinearEquiv.symm ULift.moduleEquiv))
(LinearMap.comp f'' (LinearMap.comp (↑ULift.moduleEquiv) g))))
f)
x).down =
(↑(LinearMap.comp (↑(LinearEquiv.symm ULift.moduleEquiv)) f) x).down
[PROOFSTEP]
simp only [LinearMap.comp_apply, LinearMap.restrictScalars_apply, hg]
[GOAL]
case intro.intro.refine_1.h.h
R : Type u_1
M : Type v₁
N : Type v₂
S : Type v₃
inst✝¹² : AddCommMonoid M
inst✝¹¹ : AddCommMonoid N
inst✝¹⁰ : CommRing R
inst✝⁹ : CommRing S
inst✝⁸ : Algebra R S
inst✝⁷ : Module R M
inst✝⁶ : Module R N
inst✝⁵ : Module S N
inst✝⁴ : IsScalarTower R S N
f : M →ₗ[R] N
h✝ : IsBaseChange S f
P : Type u_2
Q : Type u_3
inst✝³ : AddCommMonoid P
inst✝² : Module R P
inst✝¹ : AddCommMonoid Q
inst✝ : Module S Q
h :
∀ (Q : Type (max v₁ v₂ v₃)) [inst : AddCommMonoid Q] [inst_1 : Module R Q] [inst_2 : Module S Q]
[inst_3 : IsScalarTower R S Q] (g : M →ₗ[R] Q), ∃! g', LinearMap.comp (↑R g') f = g
g : N →ₗ[S] ULift (S ⊗[R] M)
f' : S ⊗[R] M →ₗ[R] N :=
TensorProduct.lift (↑R (↑(LinearMap.flip (AlgHom.toLinearMap (Algebra.ofId S (Module.End S (M →ₗ[R] N))))) f))
f'' : S ⊗[R] M →ₗ[S] N :=
{
toAddHom :=
{ toFun := ↑f',
map_add' :=
(_ :
∀ (x y : S ⊗[R] M),
AddHom.toFun f'.toAddHom (x + y) = AddHom.toFun f'.toAddHom x + AddHom.toFun f'.toAddHom y) },
map_smul' :=
(_ :
∀ (s : S) (x : S ⊗[R] M),
AddHom.toFun
{ toFun := ↑f',
map_add' :=
(_ :
∀ (x y : S ⊗[R] M),
AddHom.toFun f'.toAddHom (x + y) = AddHom.toFun f'.toAddHom x + AddHom.toFun f'.toAddHom y) }
(s • x) =
↑(RingHom.id S) s •
AddHom.toFun
{ toFun := ↑f',
map_add' :=
(_ :
∀ (x y : S ⊗[R] M),
AddHom.toFun f'.toAddHom (x + y) = AddHom.toFun f'.toAddHom x + AddHom.toFun f'.toAddHom y) }
x) }
hg : ∀ (x : M), ↑g (↑f x) = ↑↑(LinearEquiv.symm ULift.moduleEquiv) (↑(↑(mk R S M) 1) x)
x : M
⊢ (↑↑(LinearEquiv.symm ULift.moduleEquiv)
(↑{
toAddHom :=
{
toFun :=
↑(TensorProduct.lift
(↑R (↑(LinearMap.flip (AlgHom.toLinearMap (Algebra.ofId S (Module.End S (M →ₗ[R] N))))) f))),
map_add' :=
(_ :
∀ (x y : S ⊗[R] M),
AddHom.toFun f'.toAddHom (x + y) = AddHom.toFun f'.toAddHom x + AddHom.toFun f'.toAddHom y) },
map_smul' :=
(_ :
∀ (s : S) (x : S ⊗[R] M),
AddHom.toFun
{ toFun := ↑f',
map_add' :=
(_ :
∀ (x y : S ⊗[R] M),
AddHom.toFun f'.toAddHom (x + y) =
AddHom.toFun f'.toAddHom x + AddHom.toFun f'.toAddHom y) }
(s • x) =
↑(RingHom.id S) s •
AddHom.toFun
{ toFun := ↑f',
map_add' :=
(_ :
∀ (x y : S ⊗[R] M),
AddHom.toFun f'.toAddHom (x + y) =
AddHom.toFun f'.toAddHom x + AddHom.toFun f'.toAddHom y) }
x) }
(↑↑ULift.moduleEquiv (↑↑(LinearEquiv.symm ULift.moduleEquiv) (↑(↑(mk R S M) 1) x))))).down =
(↑↑(LinearEquiv.symm ULift.moduleEquiv) (↑f x)).down
[PROOFSTEP]
apply one_smul
[GOAL]
case intro.intro.refine_2
R : Type u_1
M : Type v₁
N : Type v₂
S : Type v₃
inst✝¹² : AddCommMonoid M
inst✝¹¹ : AddCommMonoid N
inst✝¹⁰ : CommRing R
inst✝⁹ : CommRing S
inst✝⁸ : Algebra R S
inst✝⁷ : Module R M
inst✝⁶ : Module R N
inst✝⁵ : Module S N
inst✝⁴ : IsScalarTower R S N
f : M →ₗ[R] N
h✝ : IsBaseChange S f
P : Type u_2
Q : Type u_3
inst✝³ : AddCommMonoid P
inst✝² : Module R P
inst✝¹ : AddCommMonoid Q
inst✝ : Module S Q
h :
∀ (Q : Type (max v₁ v₂ v₃)) [inst : AddCommMonoid Q] [inst_1 : Module R Q] [inst_2 : Module S Q]
[inst_3 : IsScalarTower R S Q] (g : M →ₗ[R] Q), ∃! g', LinearMap.comp (↑R g') f = g
g : N →ₗ[S] ULift (S ⊗[R] M)
f' : S ⊗[R] M →ₗ[R] N :=
TensorProduct.lift (↑R (↑(LinearMap.flip (AlgHom.toLinearMap (Algebra.ofId S (Module.End S (M →ₗ[R] N))))) f))
f'' : S ⊗[R] M →ₗ[S] N :=
{
toAddHom :=
{ toFun := ↑f',
map_add' :=
(_ :
∀ (x y : S ⊗[R] M),
AddHom.toFun f'.toAddHom (x + y) = AddHom.toFun f'.toAddHom x + AddHom.toFun f'.toAddHom y) },
map_smul' :=
(_ :
∀ (s : S) (x : S ⊗[R] M),
AddHom.toFun
{ toFun := ↑f',
map_add' :=
(_ :
∀ (x y : S ⊗[R] M),
AddHom.toFun f'.toAddHom (x + y) = AddHom.toFun f'.toAddHom x + AddHom.toFun f'.toAddHom y) }
(s • x) =
↑(RingHom.id S) s •
AddHom.toFun
{ toFun := ↑f',
map_add' :=
(_ :
∀ (x y : S ⊗[R] M),
AddHom.toFun f'.toAddHom (x + y) = AddHom.toFun f'.toAddHom x + AddHom.toFun f'.toAddHom y) }
x) }
hg : ∀ (x : M), ↑g (↑f x) = ↑↑(LinearEquiv.symm ULift.moduleEquiv) (↑(↑(mk R S M) 1) x)
⊢ LinearMap.comp (LinearMap.comp (↑ULift.moduleEquiv) g) f'' = LinearMap.id
[PROOFSTEP]
ext x
[GOAL]
case intro.intro.refine_2.a.h.h
R : Type u_1
M : Type v₁
N : Type v₂
S : Type v₃
inst✝¹² : AddCommMonoid M
inst✝¹¹ : AddCommMonoid N
inst✝¹⁰ : CommRing R
inst✝⁹ : CommRing S
inst✝⁸ : Algebra R S
inst✝⁷ : Module R M
inst✝⁶ : Module R N
inst✝⁵ : Module S N
inst✝⁴ : IsScalarTower R S N
f : M →ₗ[R] N
h✝ : IsBaseChange S f
P : Type u_2
Q : Type u_3
inst✝³ : AddCommMonoid P
inst✝² : Module R P
inst✝¹ : AddCommMonoid Q
inst✝ : Module S Q
h :
∀ (Q : Type (max v₁ v₂ v₃)) [inst : AddCommMonoid Q] [inst_1 : Module R Q] [inst_2 : Module S Q]
[inst_3 : IsScalarTower R S Q] (g : M →ₗ[R] Q), ∃! g', LinearMap.comp (↑R g') f = g
g : N →ₗ[S] ULift (S ⊗[R] M)
f' : S ⊗[R] M →ₗ[R] N :=
TensorProduct.lift (↑R (↑(LinearMap.flip (AlgHom.toLinearMap (Algebra.ofId S (Module.End S (M →ₗ[R] N))))) f))
f'' : S ⊗[R] M →ₗ[S] N :=
{
toAddHom :=
{ toFun := ↑f',
map_add' :=
(_ :
∀ (x y : S ⊗[R] M),
AddHom.toFun f'.toAddHom (x + y) = AddHom.toFun f'.toAddHom x + AddHom.toFun f'.toAddHom y) },
map_smul' :=
(_ :
∀ (s : S) (x : S ⊗[R] M),
AddHom.toFun
{ toFun := ↑f',
map_add' :=
(_ :
∀ (x y : S ⊗[R] M),
AddHom.toFun f'.toAddHom (x + y) = AddHom.toFun f'.toAddHom x + AddHom.toFun f'.toAddHom y) }
(s • x) =
↑(RingHom.id S) s •
AddHom.toFun
{ toFun := ↑f',
map_add' :=
(_ :
∀ (x y : S ⊗[R] M),
AddHom.toFun f'.toAddHom (x + y) = AddHom.toFun f'.toAddHom x + AddHom.toFun f'.toAddHom y) }
x) }
hg : ∀ (x : M), ↑g (↑f x) = ↑↑(LinearEquiv.symm ULift.moduleEquiv) (↑(↑(mk R S M) 1) x)
x : M
⊢ ↑(↑(AlgebraTensorModule.curry (LinearMap.comp (LinearMap.comp (↑ULift.moduleEquiv) g) f'')) 1) x =
↑(↑(AlgebraTensorModule.curry LinearMap.id) 1) x
[PROOFSTEP]
change (g <| (1 : S) • f x).down = _
[GOAL]
case intro.intro.refine_2.a.h.h
R : Type u_1
M : Type v₁
N : Type v₂
S : Type v₃
inst✝¹² : AddCommMonoid M
inst✝¹¹ : AddCommMonoid N
inst✝¹⁰ : CommRing R
inst✝⁹ : CommRing S
inst✝⁸ : Algebra R S
inst✝⁷ : Module R M
inst✝⁶ : Module R N
inst✝⁵ : Module S N
inst✝⁴ : IsScalarTower R S N
f : M →ₗ[R] N
h✝ : IsBaseChange S f
P : Type u_2
Q : Type u_3
inst✝³ : AddCommMonoid P
inst✝² : Module R P
inst✝¹ : AddCommMonoid Q
inst✝ : Module S Q
h :
∀ (Q : Type (max v₁ v₂ v₃)) [inst : AddCommMonoid Q] [inst_1 : Module R Q] [inst_2 : Module S Q]
[inst_3 : IsScalarTower R S Q] (g : M →ₗ[R] Q), ∃! g', LinearMap.comp (↑R g') f = g
g : N →ₗ[S] ULift (S ⊗[R] M)
f' : S ⊗[R] M →ₗ[R] N :=
TensorProduct.lift (↑R (↑(LinearMap.flip (AlgHom.toLinearMap (Algebra.ofId S (Module.End S (M →ₗ[R] N))))) f))
f'' : S ⊗[R] M →ₗ[S] N :=
{
toAddHom :=
{ toFun := ↑f',
map_add' :=
(_ :
∀ (x y : S ⊗[R] M),
AddHom.toFun f'.toAddHom (x + y) = AddHom.toFun f'.toAddHom x + AddHom.toFun f'.toAddHom y) },
map_smul' :=
(_ :
∀ (s : S) (x : S ⊗[R] M),
AddHom.toFun
{ toFun := ↑f',
map_add' :=
(_ :
∀ (x y : S ⊗[R] M),
AddHom.toFun f'.toAddHom (x + y) = AddHom.toFun f'.toAddHom x + AddHom.toFun f'.toAddHom y) }
(s • x) =
↑(RingHom.id S) s •
AddHom.toFun
{ toFun := ↑f',
map_add' :=
(_ :
∀ (x y : S ⊗[R] M),
AddHom.toFun f'.toAddHom (x + y) = AddHom.toFun f'.toAddHom x + AddHom.toFun f'.toAddHom y) }
x) }
hg : ∀ (x : M), ↑g (↑f x) = ↑↑(LinearEquiv.symm ULift.moduleEquiv) (↑(↑(mk R S M) 1) x)
x : M
⊢ (↑g (1 • ↑f x)).down = ↑(↑(AlgebraTensorModule.curry LinearMap.id) 1) x
[PROOFSTEP]
rw [one_smul, hg]
[GOAL]
case intro.intro.refine_2.a.h.h
R : Type u_1
M : Type v₁
N : Type v₂
S : Type v₃
inst✝¹² : AddCommMonoid M
inst✝¹¹ : AddCommMonoid N
inst✝¹⁰ : CommRing R
inst✝⁹ : CommRing S
inst✝⁸ : Algebra R S
inst✝⁷ : Module R M
inst✝⁶ : Module R N
inst✝⁵ : Module S N
inst✝⁴ : IsScalarTower R S N
f : M →ₗ[R] N
h✝ : IsBaseChange S f
P : Type u_2
Q : Type u_3
inst✝³ : AddCommMonoid P
inst✝² : Module R P
inst✝¹ : AddCommMonoid Q
inst✝ : Module S Q
h :
∀ (Q : Type (max v₁ v₂ v₃)) [inst : AddCommMonoid Q] [inst_1 : Module R Q] [inst_2 : Module S Q]
[inst_3 : IsScalarTower R S Q] (g : M →ₗ[R] Q), ∃! g', LinearMap.comp (↑R g') f = g
g : N →ₗ[S] ULift (S ⊗[R] M)
f' : S ⊗[R] M →ₗ[R] N :=
TensorProduct.lift (↑R (↑(LinearMap.flip (AlgHom.toLinearMap (Algebra.ofId S (Module.End S (M →ₗ[R] N))))) f))
f'' : S ⊗[R] M →ₗ[S] N :=
{
toAddHom :=
{ toFun := ↑f',
map_add' :=
(_ :
∀ (x y : S ⊗[R] M),
AddHom.toFun f'.toAddHom (x + y) = AddHom.toFun f'.toAddHom x + AddHom.toFun f'.toAddHom y) },
map_smul' :=
(_ :
∀ (s : S) (x : S ⊗[R] M),
AddHom.toFun
{ toFun := ↑f',
map_add' :=
(_ :
∀ (x y : S ⊗[R] M),
AddHom.toFun f'.toAddHom (x + y) = AddHom.toFun f'.toAddHom x + AddHom.toFun f'.toAddHom y) }
(s • x) =
↑(RingHom.id S) s •
AddHom.toFun
{ toFun := ↑f',
map_add' :=
(_ :
∀ (x y : S ⊗[R] M),
AddHom.toFun f'.toAddHom (x + y) = AddHom.toFun f'.toAddHom x + AddHom.toFun f'.toAddHom y) }
x) }
hg : ∀ (x : M), ↑g (↑f x) = ↑↑(LinearEquiv.symm ULift.moduleEquiv) (↑(↑(mk R S M) 1) x)
x : M
⊢ (↑↑(LinearEquiv.symm ULift.moduleEquiv) (↑(↑(mk R S M) 1) x)).down = ↑(↑(AlgebraTensorModule.curry LinearMap.id) 1) x
[PROOFSTEP]
rfl
[GOAL]
R : Type u_1
M : Type v₁
N : Type v₂
S : Type v₃
inst✝¹² : AddCommMonoid M
inst✝¹¹ : AddCommMonoid N
inst✝¹⁰ : CommRing R
inst✝⁹ : CommRing S
inst✝⁸ : Algebra R S
inst✝⁷ : Module R M
inst✝⁶ : Module R N
inst✝⁵ : Module S N
inst✝⁴ : IsScalarTower R S N
f : M →ₗ[R] N
h✝ : IsBaseChange S f
P : Type u_2
Q : Type u_3
inst✝³ : AddCommMonoid P
inst✝² : Module R P
inst✝¹ : AddCommMonoid Q
inst✝ : Module S Q
h : IsBaseChange S f
⊢ ∀ (Q : Type (max v₁ v₂ v₃)) [inst : AddCommMonoid Q] [inst_1 : Module R Q] [inst_2 : Module S Q]
[inst_3 : IsScalarTower R S Q] (g : M →ₗ[R] Q), ∃! g', LinearMap.comp (↑R g') f = g
[PROOFSTEP]
intros Q _ _ _ _ g
[GOAL]
R : Type u_1
M : Type v₁
N : Type v₂
S : Type v₃
inst✝¹⁶ : AddCommMonoid M
inst✝¹⁵ : AddCommMonoid N
inst✝¹⁴ : CommRing R
inst✝¹³ : CommRing S
inst✝¹² : Algebra R S
inst✝¹¹ : Module R M
inst✝¹⁰ : Module R N
inst✝⁹ : Module S N
inst✝⁸ : IsScalarTower R S N
f : M →ₗ[R] N
h✝ : IsBaseChange S f
P : Type u_2
Q✝ : Type u_3
inst✝⁷ : AddCommMonoid P
inst✝⁶ : Module R P
inst✝⁵ : AddCommMonoid Q✝
inst✝⁴ : Module S Q✝
h : IsBaseChange S f
Q : Type (max v₁ v₂ v₃)
inst✝³ : AddCommMonoid Q
inst✝² : Module R Q
inst✝¹ : Module S Q
inst✝ : IsScalarTower R S Q
g : M →ₗ[R] Q
⊢ ∃! g', LinearMap.comp (↑R g') f = g
[PROOFSTEP]
exact ⟨h.lift g, h.lift_comp g, fun g' e => h.algHom_ext' _ _ (e.trans (h.lift_comp g).symm)⟩
[GOAL]
R : Type u_1
M : Type v₁
N : Type v₂
S : Type v₃
inst✝¹² : AddCommMonoid M
inst✝¹¹ : AddCommMonoid N
inst✝¹⁰ : CommRing R
inst✝⁹ : CommRing S
inst✝⁸ : Algebra R S
inst✝⁷ : Module R M
inst✝⁶ : Module R N
inst✝⁵ : Module S N
inst✝⁴ : IsScalarTower R S N
f : M →ₗ[R] N
h : IsBaseChange S f
P : Type u_2
Q : Type u_3
inst✝³ : AddCommMonoid P
inst✝² : Module R P
inst✝¹ : AddCommMonoid Q
inst✝ : Module S Q
e : M ≃ₗ[R] N
⊢ IsBaseChange R ↑e
[PROOFSTEP]
apply IsBaseChange.of_lift_unique
[GOAL]
case h
R : Type u_1
M : Type v₁
N : Type v₂
S : Type v₃
inst✝¹² : AddCommMonoid M
inst✝¹¹ : AddCommMonoid N
inst✝¹⁰ : CommRing R
inst✝⁹ : CommRing S
inst✝⁸ : Algebra R S
inst✝⁷ : Module R M
inst✝⁶ : Module R N
inst✝⁵ : Module S N
inst✝⁴ : IsScalarTower R S N
f : M →ₗ[R] N
h : IsBaseChange S f
P : Type u_2
Q : Type u_3
inst✝³ : AddCommMonoid P
inst✝² : Module R P
inst✝¹ : AddCommMonoid Q
inst✝ : Module S Q
e : M ≃ₗ[R] N
⊢ ∀ (Q : Type (max v₁ v₂ u_1)) [inst : AddCommMonoid Q] [inst_1 : Module R Q] [inst_2 : Module R Q]
[inst_3 : IsScalarTower R R Q] (g : M →ₗ[R] Q), ∃! g', LinearMap.comp (↑R g') ↑e = g
[PROOFSTEP]
intro Q I₁ I₂ I₃ I₄ g
[GOAL]
case h
R : Type u_1
M : Type v₁
N : Type v₂
S : Type v₃
inst✝¹² : AddCommMonoid M
inst✝¹¹ : AddCommMonoid N
inst✝¹⁰ : CommRing R
inst✝⁹ : CommRing S
inst✝⁸ : Algebra R S
inst✝⁷ : Module R M
inst✝⁶ : Module R N
inst✝⁵ : Module S N
inst✝⁴ : IsScalarTower R S N
f : M →ₗ[R] N
h : IsBaseChange S f
P : Type u_2
Q✝ : Type u_3
inst✝³ : AddCommMonoid P
inst✝² : Module R P
inst✝¹ : AddCommMonoid Q✝
inst✝ : Module S Q✝
e : M ≃ₗ[R] N
Q : Type (max v₁ v₂ u_1)
I₁ : AddCommMonoid Q
I₂ I₃ : Module R Q
I₄ : IsScalarTower R R Q
g : M →ₗ[R] Q
⊢ ∃! g', LinearMap.comp (↑R g') ↑e = g
[PROOFSTEP]
have : I₂ = I₃ := by
ext r q
show (by let _ := I₂; exact r • q) = (by let _ := I₃; exact r • q)
dsimp
rw [← one_smul R q, smul_smul, ← @smul_assoc _ _ _ (id _) (id _) (id _) I₄, smul_eq_mul, mul_one]
[GOAL]
R : Type u_1
M : Type v₁
N : Type v₂
S : Type v₃
inst✝¹² : AddCommMonoid M
inst✝¹¹ : AddCommMonoid N
inst✝¹⁰ : CommRing R
inst✝⁹ : CommRing S
inst✝⁸ : Algebra R S
inst✝⁷ : Module R M
inst✝⁶ : Module R N
inst✝⁵ : Module S N
inst✝⁴ : IsScalarTower R S N
f : M →ₗ[R] N
h : IsBaseChange S f
P : Type u_2
Q✝ : Type u_3
inst✝³ : AddCommMonoid P
inst✝² : Module R P
inst✝¹ : AddCommMonoid Q✝
inst✝ : Module S Q✝
e : M ≃ₗ[R] N
Q : Type (max v₁ v₂ u_1)
I₁ : AddCommMonoid Q
I₂ I₃ : Module R Q
I₄ : IsScalarTower R R Q
g : M →ₗ[R] Q
⊢ I₂ = I₃
[PROOFSTEP]
ext r q
[GOAL]
case smul.h.h
R : Type u_1
M : Type v₁
N : Type v₂
S : Type v₃
inst✝¹² : AddCommMonoid M
inst✝¹¹ : AddCommMonoid N
inst✝¹⁰ : CommRing R
inst✝⁹ : CommRing S
inst✝⁸ : Algebra R S
inst✝⁷ : Module R M
inst✝⁶ : Module R N
inst✝⁵ : Module S N
inst✝⁴ : IsScalarTower R S N
f : M →ₗ[R] N
h : IsBaseChange S f
P : Type u_2
Q✝ : Type u_3
inst✝³ : AddCommMonoid P
inst✝² : Module R P
inst✝¹ : AddCommMonoid Q✝
inst✝ : Module S Q✝
e : M ≃ₗ[R] N
Q : Type (max v₁ v₂ u_1)
I₁ : AddCommMonoid Q
I₂ I₃ : Module R Q
I₄ : IsScalarTower R R Q
g : M →ₗ[R] Q
r : R
q : Q
⊢ SMul.smul r q = SMul.smul r q
[PROOFSTEP]
show (by let _ := I₂; exact r • q) = (by let _ := I₃; exact r • q)
[GOAL]
R : Type u_1
M : Type v₁
N : Type v₂
S : Type v₃
inst✝¹² : AddCommMonoid M
inst✝¹¹ : AddCommMonoid N
inst✝¹⁰ : CommRing R
inst✝⁹ : CommRing S
inst✝⁸ : Algebra R S
inst✝⁷ : Module R M
inst✝⁶ : Module R N
inst✝⁵ : Module S N
inst✝⁴ : IsScalarTower R S N
f : M →ₗ[R] N
h : IsBaseChange S f
P : Type u_2
Q✝ : Type u_3
inst✝³ : AddCommMonoid P
inst✝² : Module R P
inst✝¹ : AddCommMonoid Q✝
inst✝ : Module S Q✝
e : M ≃ₗ[R] N
Q : Type (max v₁ v₂ u_1)
I₁ : AddCommMonoid Q
I₂ I₃ : Module R Q
I₄ : IsScalarTower R R Q
g : M →ₗ[R] Q
r : R
q : Q
⊢ ?m.507040
[PROOFSTEP]
let _ := I₂
[GOAL]
R : Type u_1
M : Type v₁
N : Type v₂
S : Type v₃
inst✝¹² : AddCommMonoid M
inst✝¹¹ : AddCommMonoid N
inst✝¹⁰ : CommRing R
inst✝⁹ : CommRing S
inst✝⁸ : Algebra R S
inst✝⁷ : Module R M
inst✝⁶ : Module R N
inst✝⁵ : Module S N
inst✝⁴ : IsScalarTower R S N
f : M →ₗ[R] N
h : IsBaseChange S f
P : Type u_2
Q✝ : Type u_3
inst✝³ : AddCommMonoid P
inst✝² : Module R P
inst✝¹ : AddCommMonoid Q✝
inst✝ : Module S Q✝
e : M ≃ₗ[R] N
Q : Type (max v₁ v₂ u_1)
I₁ : AddCommMonoid Q
I₂ I₃ : Module R Q
I₄ : IsScalarTower R R Q
g : M →ₗ[R] Q
r : R
q : Q
x✝ : Module R Q := I₂
⊢ ?m.507040
[PROOFSTEP]
exact r • q
[GOAL]
R : Type u_1
M : Type v₁
N : Type v₂
S : Type v₃
inst✝¹² : AddCommMonoid M
inst✝¹¹ : AddCommMonoid N
inst✝¹⁰ : CommRing R
inst✝⁹ : CommRing S
inst✝⁸ : Algebra R S
inst✝⁷ : Module R M
inst✝⁶ : Module R N
inst✝⁵ : Module S N
inst✝⁴ : IsScalarTower R S N
f : M →ₗ[R] N
h : IsBaseChange S f
P : Type u_2
Q✝ : Type u_3
inst✝³ : AddCommMonoid P
inst✝² : Module R P
inst✝¹ : AddCommMonoid Q✝
inst✝ : Module S Q✝
e : M ≃ₗ[R] N
Q : Type (max v₁ v₂ u_1)
I₁ : AddCommMonoid Q
I₂ I₃ : Module R Q
I₄ : IsScalarTower R R Q
g : M →ₗ[R] Q
r : R
q : Q
⊢ Q
[PROOFSTEP]
let _ := I₃
[GOAL]
R : Type u_1
M : Type v₁
N : Type v₂
S : Type v₃
inst✝¹² : AddCommMonoid M
inst✝¹¹ : AddCommMonoid N
inst✝¹⁰ : CommRing R
inst✝⁹ : CommRing S
inst✝⁸ : Algebra R S
inst✝⁷ : Module R M
inst✝⁶ : Module R N
inst✝⁵ : Module S N
inst✝⁴ : IsScalarTower R S N
f : M →ₗ[R] N
h : IsBaseChange S f
P : Type u_2
Q✝ : Type u_3
inst✝³ : AddCommMonoid P
inst✝² : Module R P
inst✝¹ : AddCommMonoid Q✝
inst✝ : Module S Q✝
e : M ≃ₗ[R] N
Q : Type (max v₁ v₂ u_1)
I₁ : AddCommMonoid Q
I₂ I₃ : Module R Q
I₄ : IsScalarTower R R Q
g : M →ₗ[R] Q
r : R
q : Q
x✝ : Module R Q := I₃
⊢ Q
[PROOFSTEP]
exact r • q
[GOAL]
case smul.h.h
R : Type u_1
M : Type v₁
N : Type v₂
S : Type v₃
inst✝¹² : AddCommMonoid M
inst✝¹¹ : AddCommMonoid N
inst✝¹⁰ : CommRing R
inst✝⁹ : CommRing S
inst✝⁸ : Algebra R S
inst✝⁷ : Module R M
inst✝⁶ : Module R N
inst✝⁵ : Module S N
inst✝⁴ : IsScalarTower R S N
f : M →ₗ[R] N
h : IsBaseChange S f
P : Type u_2
Q✝ : Type u_3
inst✝³ : AddCommMonoid P
inst✝² : Module R P
inst✝¹ : AddCommMonoid Q✝
inst✝ : Module S Q✝
e : M ≃ₗ[R] N
Q : Type (max v₁ v₂ u_1)
I₁ : AddCommMonoid Q
I₂ I₃ : Module R Q
I₄ : IsScalarTower R R Q
g : M →ₗ[R] Q
r : R
q : Q
⊢ (let x := I₂;
r • q) =
let x := I₃;
r • q
[PROOFSTEP]
dsimp
[GOAL]
case smul.h.h
R : Type u_1
M : Type v₁
N : Type v₂
S : Type v₃
inst✝¹² : AddCommMonoid M
inst✝¹¹ : AddCommMonoid N
inst✝¹⁰ : CommRing R
inst✝⁹ : CommRing S
inst✝⁸ : Algebra R S
inst✝⁷ : Module R M
inst✝⁶ : Module R N
inst✝⁵ : Module S N
inst✝⁴ : IsScalarTower R S N
f : M →ₗ[R] N
h : IsBaseChange S f
P : Type u_2
Q✝ : Type u_3
inst✝³ : AddCommMonoid P
inst✝² : Module R P
inst✝¹ : AddCommMonoid Q✝
inst✝ : Module S Q✝
e : M ≃ₗ[R] N
Q : Type (max v₁ v₂ u_1)
I₁ : AddCommMonoid Q
I₂ I₃ : Module R Q
I₄ : IsScalarTower R R Q
g : M →ₗ[R] Q
r : R
q : Q
⊢ r • q = r • q
[PROOFSTEP]
rw [← one_smul R q, smul_smul, ← @smul_assoc _ _ _ (id _) (id _) (id _) I₄, smul_eq_mul, mul_one]
[GOAL]
case h
R : Type u_1
M : Type v₁
N : Type v₂
S : Type v₃
inst✝¹² : AddCommMonoid M
inst✝¹¹ : AddCommMonoid N
inst✝¹⁰ : CommRing R
inst✝⁹ : CommRing S
inst✝⁸ : Algebra R S
inst✝⁷ : Module R M
inst✝⁶ : Module R N
inst✝⁵ : Module S N
inst✝⁴ : IsScalarTower R S N
f : M →ₗ[R] N
h : IsBaseChange S f
P : Type u_2
Q✝ : Type u_3
inst✝³ : AddCommMonoid P
inst✝² : Module R P
inst✝¹ : AddCommMonoid Q✝
inst✝ : Module S Q✝
e : M ≃ₗ[R] N
Q : Type (max v₁ v₂ u_1)
I₁ : AddCommMonoid Q
I₂ I₃ : Module R Q
I₄ : IsScalarTower R R Q
g : M →ₗ[R] Q
this : I₂ = I₃
⊢ ∃! g', LinearMap.comp (↑R g') ↑e = g
[PROOFSTEP]
cases this
[GOAL]
case h.refl
R : Type u_1
M : Type v₁
N : Type v₂
S : Type v₃
inst✝¹² : AddCommMonoid M
inst✝¹¹ : AddCommMonoid N
inst✝¹⁰ : CommRing R
inst✝⁹ : CommRing S
inst✝⁸ : Algebra R S
inst✝⁷ : Module R M
inst✝⁶ : Module R N
inst✝⁵ : Module S N
inst✝⁴ : IsScalarTower R S N
f : M →ₗ[R] N
h : IsBaseChange S f
P : Type u_2
Q✝ : Type u_3
inst✝³ : AddCommMonoid P
inst✝² : Module R P
inst✝¹ : AddCommMonoid Q✝
inst✝ : Module S Q✝
e : M ≃ₗ[R] N
Q : Type (max v₁ v₂ u_1)
I₁ : AddCommMonoid Q
I₂ : Module R Q
g : M →ₗ[R] Q
I₄ : IsScalarTower R R Q
⊢ ∃! g', LinearMap.comp (↑R g') ↑e = g
[PROOFSTEP]
refine'
⟨g.comp e.symm.toLinearMap, by
ext
simp, _⟩
[GOAL]
R : Type u_1
M : Type v₁
N : Type v₂
S : Type v₃
inst✝¹² : AddCommMonoid M
inst✝¹¹ : AddCommMonoid N
inst✝¹⁰ : CommRing R
inst✝⁹ : CommRing S
inst✝⁸ : Algebra R S
inst✝⁷ : Module R M
inst✝⁶ : Module R N
inst✝⁵ : Module S N
inst✝⁴ : IsScalarTower R S N
f : M →ₗ[R] N
h : IsBaseChange S f
P : Type u_2
Q✝ : Type u_3
inst✝³ : AddCommMonoid P
inst✝² : Module R P
inst✝¹ : AddCommMonoid Q✝
inst✝ : Module S Q✝
e : M ≃ₗ[R] N
Q : Type (max v₁ v₂ u_1)
I₁ : AddCommMonoid Q
I₂ : Module R Q
g : M →ₗ[R] Q
I₄ : IsScalarTower R R Q
⊢ (fun g' => LinearMap.comp (↑R g') ↑e = g) (LinearMap.comp g ↑(LinearEquiv.symm e))
[PROOFSTEP]
ext
[GOAL]
case h
R : Type u_1
M : Type v₁
N : Type v₂
S : Type v₃
inst✝¹² : AddCommMonoid M
inst✝¹¹ : AddCommMonoid N
inst✝¹⁰ : CommRing R
inst✝⁹ : CommRing S
inst✝⁸ : Algebra R S
inst✝⁷ : Module R M
inst✝⁶ : Module R N
inst✝⁵ : Module S N
inst✝⁴ : IsScalarTower R S N
f : M →ₗ[R] N
h : IsBaseChange S f
P : Type u_2
Q✝ : Type u_3
inst✝³ : AddCommMonoid P
inst✝² : Module R P
inst✝¹ : AddCommMonoid Q✝
inst✝ : Module S Q✝
e : M ≃ₗ[R] N
Q : Type (max v₁ v₂ u_1)
I₁ : AddCommMonoid Q
I₂ : Module R Q
g : M →ₗ[R] Q
I₄ : IsScalarTower R R Q
x✝ : M
⊢ ↑(LinearMap.comp (↑R (LinearMap.comp g ↑(LinearEquiv.symm e))) ↑e) x✝ = ↑g x✝
[PROOFSTEP]
simp
[GOAL]
case h.refl
R : Type u_1
M : Type v₁
N : Type v₂
S : Type v₃
inst✝¹² : AddCommMonoid M
inst✝¹¹ : AddCommMonoid N
inst✝¹⁰ : CommRing R
inst✝⁹ : CommRing S
inst✝⁸ : Algebra R S
inst✝⁷ : Module R M
inst✝⁶ : Module R N
inst✝⁵ : Module S N
inst✝⁴ : IsScalarTower R S N
f : M →ₗ[R] N
h : IsBaseChange S f
P : Type u_2
Q✝ : Type u_3
inst✝³ : AddCommMonoid P
inst✝² : Module R P
inst✝¹ : AddCommMonoid Q✝
inst✝ : Module S Q✝
e : M ≃ₗ[R] N
Q : Type (max v₁ v₂ u_1)
I₁ : AddCommMonoid Q
I₂ : Module R Q
g : M →ₗ[R] Q
I₄ : IsScalarTower R R Q
⊢ ∀ (y : N →ₗ[R] Q), (fun g' => LinearMap.comp (↑R g') ↑e = g) y → y = LinearMap.comp g ↑(LinearEquiv.symm e)
[PROOFSTEP]
rintro y (rfl : _ = _)
[GOAL]
case h.refl
R : Type u_1
M : Type v₁
N : Type v₂
S : Type v₃
inst✝¹² : AddCommMonoid M
inst✝¹¹ : AddCommMonoid N
inst✝¹⁰ : CommRing R
inst✝⁹ : CommRing S
inst✝⁸ : Algebra R S
inst✝⁷ : Module R M
inst✝⁶ : Module R N
inst✝⁵ : Module S N
inst✝⁴ : IsScalarTower R S N
f : M →ₗ[R] N
h : IsBaseChange S f
P : Type u_2
Q✝ : Type u_3
inst✝³ : AddCommMonoid P
inst✝² : Module R P
inst✝¹ : AddCommMonoid Q✝
inst✝ : Module S Q✝
e : M ≃ₗ[R] N
Q : Type (max v₁ v₂ u_1)
I₁ : AddCommMonoid Q
I₂ : Module R Q
I₄ : IsScalarTower R R Q
y : N →ₗ[R] Q
⊢ y = LinearMap.comp (LinearMap.comp (↑R y) ↑e) ↑(LinearEquiv.symm e)
[PROOFSTEP]
ext
[GOAL]
case h.refl.h
R : Type u_1
M : Type v₁
N : Type v₂
S : Type v₃
inst✝¹² : AddCommMonoid M
inst✝¹¹ : AddCommMonoid N
inst✝¹⁰ : CommRing R
inst✝⁹ : CommRing S
inst✝⁸ : Algebra R S
inst✝⁷ : Module R M
inst✝⁶ : Module R N
inst✝⁵ : Module S N
inst✝⁴ : IsScalarTower R S N
f : M →ₗ[R] N
h : IsBaseChange S f
P : Type u_2
Q✝ : Type u_3
inst✝³ : AddCommMonoid P
inst✝² : Module R P
inst✝¹ : AddCommMonoid Q✝
inst✝ : Module S Q✝
e : M ≃ₗ[R] N
Q : Type (max v₁ v₂ u_1)
I₁ : AddCommMonoid Q
I₂ : Module R Q
I₄ : IsScalarTower R R Q
y : N →ₗ[R] Q
x✝ : N
⊢ ↑y x✝ = ↑(LinearMap.comp (LinearMap.comp (↑R y) ↑e) ↑(LinearEquiv.symm e)) x✝
[PROOFSTEP]
simp
[GOAL]
R : Type u_1
M : Type v₁
N : Type v₂
S : Type v₃
inst✝²³ : AddCommMonoid M
inst✝²² : AddCommMonoid N
inst✝²¹ : CommRing R
inst✝²⁰ : CommRing S
inst✝¹⁹ : Algebra R S
inst✝¹⁸ : Module R M
inst✝¹⁷ : Module R N
inst✝¹⁶ : Module S N
inst✝¹⁵ : IsScalarTower R S N
f✝ : M →ₗ[R] N
h : IsBaseChange S f✝
P : Type u_2
Q : Type u_3
inst✝¹⁴ : AddCommMonoid P
inst✝¹³ : Module R P
inst✝¹² : AddCommMonoid Q
inst✝¹¹ : Module S Q
T : Type u_4
O : Type u_5
inst✝¹⁰ : CommRing T
inst✝⁹ : Algebra R T
inst✝⁸ : Algebra S T
inst✝⁷ : IsScalarTower R S T
inst✝⁶ : AddCommMonoid O
inst✝⁵ : Module R O
inst✝⁴ : Module S O
inst✝³ : Module T O
inst✝² : IsScalarTower S T O
inst✝¹ : IsScalarTower R S O
inst✝ : IsScalarTower R T O
f : M →ₗ[R] N
hf : IsBaseChange S f
g : N →ₗ[S] O
hg : IsBaseChange T g
⊢ IsBaseChange T (LinearMap.comp (↑R g) f)
[PROOFSTEP]
apply IsBaseChange.of_lift_unique
[GOAL]
case h
R : Type u_1
M : Type v₁
N : Type v₂
S : Type v₃
inst✝²³ : AddCommMonoid M
inst✝²² : AddCommMonoid N
inst✝²¹ : CommRing R
inst✝²⁰ : CommRing S
inst✝¹⁹ : Algebra R S
inst✝¹⁸ : Module R M
inst✝¹⁷ : Module R N
inst✝¹⁶ : Module S N
inst✝¹⁵ : IsScalarTower R S N
f✝ : M →ₗ[R] N
h : IsBaseChange S f✝
P : Type u_2
Q : Type u_3
inst✝¹⁴ : AddCommMonoid P
inst✝¹³ : Module R P
inst✝¹² : AddCommMonoid Q
inst✝¹¹ : Module S Q
T : Type u_4
O : Type u_5
inst✝¹⁰ : CommRing T
inst✝⁹ : Algebra R T
inst✝⁸ : Algebra S T
inst✝⁷ : IsScalarTower R S T
inst✝⁶ : AddCommMonoid O
inst✝⁵ : Module R O
inst✝⁴ : Module S O
inst✝³ : Module T O
inst✝² : IsScalarTower S T O
inst✝¹ : IsScalarTower R S O
inst✝ : IsScalarTower R T O
f : M →ₗ[R] N
hf : IsBaseChange S f
g : N →ₗ[S] O
hg : IsBaseChange T g
⊢ ∀ (Q : Type (max v₁ u_5 u_4)) [inst : AddCommMonoid Q] [inst_1 : Module R Q] [inst_2 : Module T Q]
[inst_3 : IsScalarTower R T Q] (g_1 : M →ₗ[R] Q), ∃! g', LinearMap.comp (↑R g') (LinearMap.comp (↑R g) f) = g_1
[PROOFSTEP]
intro Q _ _ _ _ i
[GOAL]
case h
R : Type u_1
M : Type v₁
N : Type v₂
S : Type v₃
inst✝²⁷ : AddCommMonoid M
inst✝²⁶ : AddCommMonoid N
inst✝²⁵ : CommRing R
inst✝²⁴ : CommRing S
inst✝²³ : Algebra R S
inst✝²² : Module R M
inst✝²¹ : Module R N
inst✝²⁰ : Module S N
inst✝¹⁹ : IsScalarTower R S N
f✝ : M →ₗ[R] N
h : IsBaseChange S f✝
P : Type u_2
Q✝ : Type u_3
inst✝¹⁸ : AddCommMonoid P
inst✝¹⁷ : Module R P
inst✝¹⁶ : AddCommMonoid Q✝
inst✝¹⁵ : Module S Q✝
T : Type u_4
O : Type u_5
inst✝¹⁴ : CommRing T
inst✝¹³ : Algebra R T
inst✝¹² : Algebra S T
inst✝¹¹ : IsScalarTower R S T
inst✝¹⁰ : AddCommMonoid O
inst✝⁹ : Module R O
inst✝⁸ : Module S O
inst✝⁷ : Module T O
inst✝⁶ : IsScalarTower S T O
inst✝⁵ : IsScalarTower R S O
inst✝⁴ : IsScalarTower R T O
f : M →ₗ[R] N
hf : IsBaseChange S f
g : N →ₗ[S] O
hg : IsBaseChange T g
Q : Type (max v₁ u_5 u_4)
inst✝³ : AddCommMonoid Q
inst✝² : Module R Q
inst✝¹ : Module T Q
inst✝ : IsScalarTower R T Q
i : M →ₗ[R] Q
⊢ ∃! g', LinearMap.comp (↑R g') (LinearMap.comp (↑R g) f) = i
[PROOFSTEP]
letI := Module.compHom Q (algebraMap S T)
[GOAL]
case h
R : Type u_1
M : Type v₁
N : Type v₂
S : Type v₃
inst✝²⁷ : AddCommMonoid M
inst✝²⁶ : AddCommMonoid N
inst✝²⁵ : CommRing R
inst✝²⁴ : CommRing S
inst✝²³ : Algebra R S
inst✝²² : Module R M
inst✝²¹ : Module R N
inst✝²⁰ : Module S N
inst✝¹⁹ : IsScalarTower R S N
f✝ : M →ₗ[R] N
h : IsBaseChange S f✝
P : Type u_2
Q✝ : Type u_3
inst✝¹⁸ : AddCommMonoid P
inst✝¹⁷ : Module R P
inst✝¹⁶ : AddCommMonoid Q✝
inst✝¹⁵ : Module S Q✝
T : Type u_4
O : Type u_5
inst✝¹⁴ : CommRing T
inst✝¹³ : Algebra R T
inst✝¹² : Algebra S T
inst✝¹¹ : IsScalarTower R S T
inst✝¹⁰ : AddCommMonoid O
inst✝⁹ : Module R O
inst✝⁸ : Module S O
inst✝⁷ : Module T O
inst✝⁶ : IsScalarTower S T O
inst✝⁵ : IsScalarTower R S O
inst✝⁴ : IsScalarTower R T O
f : M →ₗ[R] N
hf : IsBaseChange S f
g : N →ₗ[S] O
hg : IsBaseChange T g
Q : Type (max v₁ u_5 u_4)
inst✝³ : AddCommMonoid Q
inst✝² : Module R Q
inst✝¹ : Module T Q
inst✝ : IsScalarTower R T Q
i : M →ₗ[R] Q
this : Module S Q := Module.compHom Q (algebraMap S T)
⊢ ∃! g', LinearMap.comp (↑R g') (LinearMap.comp (↑R g) f) = i
[PROOFSTEP]
haveI : IsScalarTower S T Q :=
⟨fun x y z => by
rw [Algebra.smul_def, mul_smul]
rfl⟩
[GOAL]
R : Type u_1
M : Type v₁
N : Type v₂
S : Type v₃
inst✝²⁷ : AddCommMonoid M
inst✝²⁶ : AddCommMonoid N
inst✝²⁵ : CommRing R
inst✝²⁴ : CommRing S
inst✝²³ : Algebra R S
inst✝²² : Module R M
inst✝²¹ : Module R N
inst✝²⁰ : Module S N
inst✝¹⁹ : IsScalarTower R S N
f✝ : M →ₗ[R] N
h : IsBaseChange S f✝
P : Type u_2
Q✝ : Type u_3
inst✝¹⁸ : AddCommMonoid P
inst✝¹⁷ : Module R P
inst✝¹⁶ : AddCommMonoid Q✝
inst✝¹⁵ : Module S Q✝
T : Type u_4
O : Type u_5
inst✝¹⁴ : CommRing T
inst✝¹³ : Algebra R T
inst✝¹² : Algebra S T
inst✝¹¹ : IsScalarTower R S T
inst✝¹⁰ : AddCommMonoid O
inst✝⁹ : Module R O
inst✝⁸ : Module S O
inst✝⁷ : Module T O
inst✝⁶ : IsScalarTower S T O
inst✝⁵ : IsScalarTower R S O
inst✝⁴ : IsScalarTower R T O
f : M →ₗ[R] N
hf : IsBaseChange S f
g : N →ₗ[S] O
hg : IsBaseChange T g
Q : Type (max v₁ u_5 u_4)
inst✝³ : AddCommMonoid Q
inst✝² : Module R Q
inst✝¹ : Module T Q
inst✝ : IsScalarTower R T Q
i : M →ₗ[R] Q
this : Module S Q := Module.compHom Q (algebraMap S T)
x : S
y : T
z : Q
⊢ (x • y) • z = x • y • z
[PROOFSTEP]
rw [Algebra.smul_def, mul_smul]
[GOAL]
R : Type u_1
M : Type v₁
N : Type v₂
S : Type v₃
inst✝²⁷ : AddCommMonoid M
inst✝²⁶ : AddCommMonoid N
inst✝²⁵ : CommRing R
inst✝²⁴ : CommRing S
inst✝²³ : Algebra R S
inst✝²² : Module R M
inst✝²¹ : Module R N
inst✝²⁰ : Module S N
inst✝¹⁹ : IsScalarTower R S N
f✝ : M →ₗ[R] N
h : IsBaseChange S f✝
P : Type u_2
Q✝ : Type u_3
inst✝¹⁸ : AddCommMonoid P
inst✝¹⁷ : Module R P
inst✝¹⁶ : AddCommMonoid Q✝
inst✝¹⁵ : Module S Q✝
T : Type u_4
O : Type u_5
inst✝¹⁴ : CommRing T
inst✝¹³ : Algebra R T
inst✝¹² : Algebra S T
inst✝¹¹ : IsScalarTower R S T
inst✝¹⁰ : AddCommMonoid O
inst✝⁹ : Module R O
inst✝⁸ : Module S O
inst✝⁷ : Module T O
inst✝⁶ : IsScalarTower S T O
inst✝⁵ : IsScalarTower R S O
inst✝⁴ : IsScalarTower R T O
f : M →ₗ[R] N
hf : IsBaseChange S f
g : N →ₗ[S] O
hg : IsBaseChange T g
Q : Type (max v₁ u_5 u_4)
inst✝³ : AddCommMonoid Q
inst✝² : Module R Q
inst✝¹ : Module T Q
inst✝ : IsScalarTower R T Q
i : M →ₗ[R] Q
this : Module S Q := Module.compHom Q (algebraMap S T)
x : S
y : T
z : Q
⊢ ↑(algebraMap S T) x • y • z = x • y • z
[PROOFSTEP]
rfl
[GOAL]
case h
R : Type u_1
M : Type v₁
N : Type v₂
S : Type v₃
inst✝²⁷ : AddCommMonoid M
inst✝²⁶ : AddCommMonoid N
inst✝²⁵ : CommRing R
inst✝²⁴ : CommRing S
inst✝²³ : Algebra R S
inst✝²² : Module R M
inst✝²¹ : Module R N
inst✝²⁰ : Module S N
inst✝¹⁹ : IsScalarTower R S N
f✝ : M →ₗ[R] N
h : IsBaseChange S f✝
P : Type u_2
Q✝ : Type u_3
inst✝¹⁸ : AddCommMonoid P
inst✝¹⁷ : Module R P
inst✝¹⁶ : AddCommMonoid Q✝
inst✝¹⁵ : Module S Q✝
T : Type u_4
O : Type u_5
inst✝¹⁴ : CommRing T
inst✝¹³ : Algebra R T
inst✝¹² : Algebra S T
inst✝¹¹ : IsScalarTower R S T
inst✝¹⁰ : AddCommMonoid O
inst✝⁹ : Module R O
inst✝⁸ : Module S O
inst✝⁷ : Module T O
inst✝⁶ : IsScalarTower S T O
inst✝⁵ : IsScalarTower R S O
inst✝⁴ : IsScalarTower R T O
f : M →ₗ[R] N
hf : IsBaseChange S f
g : N →ₗ[S] O
hg : IsBaseChange T g
Q : Type (max v₁ u_5 u_4)
inst✝³ : AddCommMonoid Q
inst✝² : Module R Q
inst✝¹ : Module T Q
inst✝ : IsScalarTower R T Q
i : M →ₗ[R] Q
this✝ : Module S Q := Module.compHom Q (algebraMap S T)
this : IsScalarTower S T Q
⊢ ∃! g', LinearMap.comp (↑R g') (LinearMap.comp (↑R g) f) = i
[PROOFSTEP]
have : IsScalarTower R S Q := by
refine' ⟨fun x y z => _⟩
change (IsScalarTower.toAlgHom R S T) (x • y) • z = x • algebraMap S T y • z
rw [AlgHom.map_smul, smul_assoc]
rfl
[GOAL]
R : Type u_1
M : Type v₁
N : Type v₂
S : Type v₃
inst✝²⁷ : AddCommMonoid M
inst✝²⁶ : AddCommMonoid N
inst✝²⁵ : CommRing R
inst✝²⁴ : CommRing S
inst✝²³ : Algebra R S
inst✝²² : Module R M
inst✝²¹ : Module R N
inst✝²⁰ : Module S N
inst✝¹⁹ : IsScalarTower R S N
f✝ : M →ₗ[R] N
h : IsBaseChange S f✝
P : Type u_2
Q✝ : Type u_3
inst✝¹⁸ : AddCommMonoid P
inst✝¹⁷ : Module R P
inst✝¹⁶ : AddCommMonoid Q✝
inst✝¹⁵ : Module S Q✝
T : Type u_4
O : Type u_5
inst✝¹⁴ : CommRing T
inst✝¹³ : Algebra R T
inst✝¹² : Algebra S T
inst✝¹¹ : IsScalarTower R S T
inst✝¹⁰ : AddCommMonoid O
inst✝⁹ : Module R O
inst✝⁸ : Module S O
inst✝⁷ : Module T O
inst✝⁶ : IsScalarTower S T O
inst✝⁵ : IsScalarTower R S O
inst✝⁴ : IsScalarTower R T O
f : M →ₗ[R] N
hf : IsBaseChange S f
g : N →ₗ[S] O
hg : IsBaseChange T g
Q : Type (max v₁ u_5 u_4)
inst✝³ : AddCommMonoid Q
inst✝² : Module R Q
inst✝¹ : Module T Q
inst✝ : IsScalarTower R T Q
i : M →ₗ[R] Q
this✝ : Module S Q := Module.compHom Q (algebraMap S T)
this : IsScalarTower S T Q
⊢ IsScalarTower R S Q
[PROOFSTEP]
refine' ⟨fun x y z => _⟩
[GOAL]
R : Type u_1
M : Type v₁
N : Type v₂
S : Type v₃
inst✝²⁷ : AddCommMonoid M
inst✝²⁶ : AddCommMonoid N
inst✝²⁵ : CommRing R
inst✝²⁴ : CommRing S
inst✝²³ : Algebra R S
inst✝²² : Module R M
inst✝²¹ : Module R N
inst✝²⁰ : Module S N
inst✝¹⁹ : IsScalarTower R S N
f✝ : M →ₗ[R] N
h : IsBaseChange S f✝
P : Type u_2
Q✝ : Type u_3
inst✝¹⁸ : AddCommMonoid P
inst✝¹⁷ : Module R P
inst✝¹⁶ : AddCommMonoid Q✝
inst✝¹⁵ : Module S Q✝
T : Type u_4
O : Type u_5
inst✝¹⁴ : CommRing T
inst✝¹³ : Algebra R T
inst✝¹² : Algebra S T
inst✝¹¹ : IsScalarTower R S T
inst✝¹⁰ : AddCommMonoid O
inst✝⁹ : Module R O
inst✝⁸ : Module S O
inst✝⁷ : Module T O
inst✝⁶ : IsScalarTower S T O
inst✝⁵ : IsScalarTower R S O
inst✝⁴ : IsScalarTower R T O
f : M →ₗ[R] N
hf : IsBaseChange S f
g : N →ₗ[S] O
hg : IsBaseChange T g
Q : Type (max v₁ u_5 u_4)
inst✝³ : AddCommMonoid Q
inst✝² : Module R Q
inst✝¹ : Module T Q
inst✝ : IsScalarTower R T Q
i : M →ₗ[R] Q
this✝ : Module S Q := Module.compHom Q (algebraMap S T)
this : IsScalarTower S T Q
x : R
y : S
z : Q
⊢ (x • y) • z = x • y • z
[PROOFSTEP]
change (IsScalarTower.toAlgHom R S T) (x • y) • z = x • algebraMap S T y • z
[GOAL]
R : Type u_1
M : Type v₁
N : Type v₂
S : Type v₃
inst✝²⁷ : AddCommMonoid M
inst✝²⁶ : AddCommMonoid N
inst✝²⁵ : CommRing R
inst✝²⁴ : CommRing S
inst✝²³ : Algebra R S
inst✝²² : Module R M
inst✝²¹ : Module R N
inst✝²⁰ : Module S N
inst✝¹⁹ : IsScalarTower R S N
f✝ : M →ₗ[R] N
h : IsBaseChange S f✝
P : Type u_2
Q✝ : Type u_3
inst✝¹⁸ : AddCommMonoid P
inst✝¹⁷ : Module R P
inst✝¹⁶ : AddCommMonoid Q✝
inst✝¹⁵ : Module S Q✝
T : Type u_4
O : Type u_5
inst✝¹⁴ : CommRing T
inst✝¹³ : Algebra R T
inst✝¹² : Algebra S T
inst✝¹¹ : IsScalarTower R S T
inst✝¹⁰ : AddCommMonoid O
inst✝⁹ : Module R O
inst✝⁸ : Module S O
inst✝⁷ : Module T O
inst✝⁶ : IsScalarTower S T O
inst✝⁵ : IsScalarTower R S O
inst✝⁴ : IsScalarTower R T O
f : M →ₗ[R] N
hf : IsBaseChange S f
g : N →ₗ[S] O
hg : IsBaseChange T g
Q : Type (max v₁ u_5 u_4)
inst✝³ : AddCommMonoid Q
inst✝² : Module R Q
inst✝¹ : Module T Q
inst✝ : IsScalarTower R T Q
i : M →ₗ[R] Q
this✝ : Module S Q := Module.compHom Q (algebraMap S T)
this : IsScalarTower S T Q
x : R
y : S
z : Q
⊢ ↑(IsScalarTower.toAlgHom R S T) (x • y) • z = x • ↑(algebraMap S T) y • z
[PROOFSTEP]
rw [AlgHom.map_smul, smul_assoc]
[GOAL]
R : Type u_1
M : Type v₁
N : Type v₂
S : Type v₃
inst✝²⁷ : AddCommMonoid M
inst✝²⁶ : AddCommMonoid N
inst✝²⁵ : CommRing R
inst✝²⁴ : CommRing S
inst✝²³ : Algebra R S
inst✝²² : Module R M
inst✝²¹ : Module R N
inst✝²⁰ : Module S N
inst✝¹⁹ : IsScalarTower R S N
f✝ : M →ₗ[R] N
h : IsBaseChange S f✝
P : Type u_2
Q✝ : Type u_3
inst✝¹⁸ : AddCommMonoid P
inst✝¹⁷ : Module R P
inst✝¹⁶ : AddCommMonoid Q✝
inst✝¹⁵ : Module S Q✝
T : Type u_4
O : Type u_5
inst✝¹⁴ : CommRing T
inst✝¹³ : Algebra R T
inst✝¹² : Algebra S T
inst✝¹¹ : IsScalarTower R S T
inst✝¹⁰ : AddCommMonoid O
inst✝⁹ : Module R O
inst✝⁸ : Module S O
inst✝⁷ : Module T O
inst✝⁶ : IsScalarTower S T O
inst✝⁵ : IsScalarTower R S O
inst✝⁴ : IsScalarTower R T O
f : M →ₗ[R] N
hf : IsBaseChange S f
g : N →ₗ[S] O
hg : IsBaseChange T g
Q : Type (max v₁ u_5 u_4)
inst✝³ : AddCommMonoid Q
inst✝² : Module R Q
inst✝¹ : Module T Q
inst✝ : IsScalarTower R T Q
i : M →ₗ[R] Q
this✝ : Module S Q := Module.compHom Q (algebraMap S T)
this : IsScalarTower S T Q
x : R
y : S
z : Q
⊢ x • ↑(IsScalarTower.toAlgHom R S T) y • z = x • ↑(algebraMap S T) y • z
[PROOFSTEP]
rfl
[GOAL]
case h
R : Type u_1
M : Type v₁
N : Type v₂
S : Type v₃
inst✝²⁷ : AddCommMonoid M
inst✝²⁶ : AddCommMonoid N
inst✝²⁵ : CommRing R
inst✝²⁴ : CommRing S
inst✝²³ : Algebra R S
inst✝²² : Module R M
inst✝²¹ : Module R N
inst✝²⁰ : Module S N
inst✝¹⁹ : IsScalarTower R S N
f✝ : M →ₗ[R] N
h : IsBaseChange S f✝
P : Type u_2
Q✝ : Type u_3
inst✝¹⁸ : AddCommMonoid P
inst✝¹⁷ : Module R P
inst✝¹⁶ : AddCommMonoid Q✝
inst✝¹⁵ : Module S Q✝
T : Type u_4
O : Type u_5
inst✝¹⁴ : CommRing T
inst✝¹³ : Algebra R T
inst✝¹² : Algebra S T
inst✝¹¹ : IsScalarTower R S T
inst✝¹⁰ : AddCommMonoid O
inst✝⁹ : Module R O
inst✝⁸ : Module S O
inst✝⁷ : Module T O
inst✝⁶ : IsScalarTower S T O
inst✝⁵ : IsScalarTower R S O
inst✝⁴ : IsScalarTower R T O
f : M →ₗ[R] N
hf : IsBaseChange S f
g : N →ₗ[S] O
hg : IsBaseChange T g
Q : Type (max v₁ u_5 u_4)
inst✝³ : AddCommMonoid Q
inst✝² : Module R Q
inst✝¹ : Module T Q
inst✝ : IsScalarTower R T Q
i : M →ₗ[R] Q
this✝¹ : Module S Q := Module.compHom Q (algebraMap S T)
this✝ : IsScalarTower S T Q
this : IsScalarTower R S Q
⊢ ∃! g', LinearMap.comp (↑R g') (LinearMap.comp (↑R g) f) = i
[PROOFSTEP]
refine'
⟨hg.lift (hf.lift i), by
ext
simp [IsBaseChange.lift_eq], _⟩
[GOAL]
R : Type u_1
M : Type v₁
N : Type v₂
S : Type v₃
inst✝²⁷ : AddCommMonoid M
inst✝²⁶ : AddCommMonoid N
inst✝²⁵ : CommRing R
inst✝²⁴ : CommRing S
inst✝²³ : Algebra R S
inst✝²² : Module R M
inst✝²¹ : Module R N
inst✝²⁰ : Module S N
inst✝¹⁹ : IsScalarTower R S N
f✝ : M →ₗ[R] N
h : IsBaseChange S f✝
P : Type u_2
Q✝ : Type u_3
inst✝¹⁸ : AddCommMonoid P
inst✝¹⁷ : Module R P
inst✝¹⁶ : AddCommMonoid Q✝
inst✝¹⁵ : Module S Q✝
T : Type u_4
O : Type u_5
inst✝¹⁴ : CommRing T
inst✝¹³ : Algebra R T
inst✝¹² : Algebra S T
inst✝¹¹ : IsScalarTower R S T
inst✝¹⁰ : AddCommMonoid O
inst✝⁹ : Module R O
inst✝⁸ : Module S O
inst✝⁷ : Module T O
inst✝⁶ : IsScalarTower S T O
inst✝⁵ : IsScalarTower R S O
inst✝⁴ : IsScalarTower R T O
f : M →ₗ[R] N
hf : IsBaseChange S f
g : N →ₗ[S] O
hg : IsBaseChange T g
Q : Type (max v₁ u_5 u_4)
inst✝³ : AddCommMonoid Q
inst✝² : Module R Q
inst✝¹ : Module T Q
inst✝ : IsScalarTower R T Q
i : M →ₗ[R] Q
this✝¹ : Module S Q := Module.compHom Q (algebraMap S T)
this✝ : IsScalarTower S T Q
this : IsScalarTower R S Q
⊢ (fun g' => LinearMap.comp (↑R g') (LinearMap.comp (↑R g) f) = i) (lift hg (lift hf i))
[PROOFSTEP]
ext
[GOAL]
case h
R : Type u_1
M : Type v₁
N : Type v₂
S : Type v₃
inst✝²⁷ : AddCommMonoid M
inst✝²⁶ : AddCommMonoid N
inst✝²⁵ : CommRing R
inst✝²⁴ : CommRing S
inst✝²³ : Algebra R S
inst✝²² : Module R M
inst✝²¹ : Module R N
inst✝²⁰ : Module S N
inst✝¹⁹ : IsScalarTower R S N
f✝ : M →ₗ[R] N
h : IsBaseChange S f✝
P : Type u_2
Q✝ : Type u_3
inst✝¹⁸ : AddCommMonoid P
inst✝¹⁷ : Module R P
inst✝¹⁶ : AddCommMonoid Q✝
inst✝¹⁵ : Module S Q✝
T : Type u_4
O : Type u_5
inst✝¹⁴ : CommRing T
inst✝¹³ : Algebra R T
inst✝¹² : Algebra S T
inst✝¹¹ : IsScalarTower R S T
inst✝¹⁰ : AddCommMonoid O
inst✝⁹ : Module R O
inst✝⁸ : Module S O
inst✝⁷ : Module T O
inst✝⁶ : IsScalarTower S T O
inst✝⁵ : IsScalarTower R S O
inst✝⁴ : IsScalarTower R T O
f : M →ₗ[R] N
hf : IsBaseChange S f
g : N →ₗ[S] O
hg : IsBaseChange T g
Q : Type (max v₁ u_5 u_4)
inst✝³ : AddCommMonoid Q
inst✝² : Module R Q
inst✝¹ : Module T Q
inst✝ : IsScalarTower R T Q
i : M →ₗ[R] Q
this✝¹ : Module S Q := Module.compHom Q (algebraMap S T)
this✝ : IsScalarTower S T Q
this : IsScalarTower R S Q
x✝ : M
⊢ ↑(LinearMap.comp (↑R (lift hg (lift hf i))) (LinearMap.comp (↑R g) f)) x✝ = ↑i x✝
[PROOFSTEP]
simp [IsBaseChange.lift_eq]
[GOAL]
case h
R : Type u_1
M : Type v₁
N : Type v₂
S : Type v₃
inst✝²⁷ : AddCommMonoid M
inst✝²⁶ : AddCommMonoid N
inst✝²⁵ : CommRing R
inst✝²⁴ : CommRing S
inst✝²³ : Algebra R S
inst✝²² : Module R M
inst✝²¹ : Module R N
inst✝²⁰ : Module S N
inst✝¹⁹ : IsScalarTower R S N
f✝ : M →ₗ[R] N
h : IsBaseChange S f✝
P : Type u_2
Q✝ : Type u_3
inst✝¹⁸ : AddCommMonoid P
inst✝¹⁷ : Module R P
inst✝¹⁶ : AddCommMonoid Q✝
inst✝¹⁵ : Module S Q✝
T : Type u_4
O : Type u_5
inst✝¹⁴ : CommRing T
inst✝¹³ : Algebra R T
inst✝¹² : Algebra S T
inst✝¹¹ : IsScalarTower R S T
inst✝¹⁰ : AddCommMonoid O
inst✝⁹ : Module R O
inst✝⁸ : Module S O
inst✝⁷ : Module T O
inst✝⁶ : IsScalarTower S T O
inst✝⁵ : IsScalarTower R S O
inst✝⁴ : IsScalarTower R T O
f : M →ₗ[R] N
hf : IsBaseChange S f
g : N →ₗ[S] O
hg : IsBaseChange T g
Q : Type (max v₁ u_5 u_4)
inst✝³ : AddCommMonoid Q
inst✝² : Module R Q
inst✝¹ : Module T Q
inst✝ : IsScalarTower R T Q
i : M →ₗ[R] Q
this✝¹ : Module S Q := Module.compHom Q (algebraMap S T)
this✝ : IsScalarTower S T Q
this : IsScalarTower R S Q
⊢ ∀ (y : O →ₗ[T] Q), (fun g' => LinearMap.comp (↑R g') (LinearMap.comp (↑R g) f) = i) y → y = lift hg (lift hf i)
[PROOFSTEP]
rintro g' (e : _ = _)
[GOAL]
case h
R : Type u_1
M : Type v₁
N : Type v₂
S : Type v₃
inst✝²⁷ : AddCommMonoid M
inst✝²⁶ : AddCommMonoid N
inst✝²⁵ : CommRing R
inst✝²⁴ : CommRing S
inst✝²³ : Algebra R S
inst✝²² : Module R M
inst✝²¹ : Module R N
inst✝²⁰ : Module S N
inst✝¹⁹ : IsScalarTower R S N
f✝ : M →ₗ[R] N
h : IsBaseChange S f✝
P : Type u_2
Q✝ : Type u_3
inst✝¹⁸ : AddCommMonoid P
inst✝¹⁷ : Module R P
inst✝¹⁶ : AddCommMonoid Q✝
inst✝¹⁵ : Module S Q✝
T : Type u_4
O : Type u_5
inst✝¹⁴ : CommRing T
inst✝¹³ : Algebra R T
inst✝¹² : Algebra S T
inst✝¹¹ : IsScalarTower R S T
inst✝¹⁰ : AddCommMonoid O
inst✝⁹ : Module R O
inst✝⁸ : Module S O
inst✝⁷ : Module T O
inst✝⁶ : IsScalarTower S T O
inst✝⁵ : IsScalarTower R S O
inst✝⁴ : IsScalarTower R T O
f : M →ₗ[R] N
hf : IsBaseChange S f
g : N →ₗ[S] O
hg : IsBaseChange T g
Q : Type (max v₁ u_5 u_4)
inst✝³ : AddCommMonoid Q
inst✝² : Module R Q
inst✝¹ : Module T Q
inst✝ : IsScalarTower R T Q
i : M →ₗ[R] Q
this✝¹ : Module S Q := Module.compHom Q (algebraMap S T)
this✝ : IsScalarTower S T Q
this : IsScalarTower R S Q
g' : O →ₗ[T] Q
e : LinearMap.comp (↑R g') (LinearMap.comp (↑R g) f) = i
⊢ g' = lift hg (lift hf i)
[PROOFSTEP]
refine' hg.algHom_ext' _ _ (hf.algHom_ext' _ _ _)
[GOAL]
case h
R : Type u_1
M : Type v₁
N : Type v₂
S : Type v₃
inst✝²⁷ : AddCommMonoid M
inst✝²⁶ : AddCommMonoid N
inst✝²⁵ : CommRing R
inst✝²⁴ : CommRing S
inst✝²³ : Algebra R S
inst✝²² : Module R M
inst✝²¹ : Module R N
inst✝²⁰ : Module S N
inst✝¹⁹ : IsScalarTower R S N
f✝ : M →ₗ[R] N
h : IsBaseChange S f✝
P : Type u_2
Q✝ : Type u_3
inst✝¹⁸ : AddCommMonoid P
inst✝¹⁷ : Module R P
inst✝¹⁶ : AddCommMonoid Q✝
inst✝¹⁵ : Module S Q✝
T : Type u_4
O : Type u_5
inst✝¹⁴ : CommRing T
inst✝¹³ : Algebra R T
inst✝¹² : Algebra S T
inst✝¹¹ : IsScalarTower R S T
inst✝¹⁰ : AddCommMonoid O
inst✝⁹ : Module R O
inst✝⁸ : Module S O
inst✝⁷ : Module T O
inst✝⁶ : IsScalarTower S T O
inst✝⁵ : IsScalarTower R S O
inst✝⁴ : IsScalarTower R T O
f : M →ₗ[R] N
hf : IsBaseChange S f
g : N →ₗ[S] O
hg : IsBaseChange T g
Q : Type (max v₁ u_5 u_4)
inst✝³ : AddCommMonoid Q
inst✝² : Module R Q
inst✝¹ : Module T Q
inst✝ : IsScalarTower R T Q
i : M →ₗ[R] Q
this✝¹ : Module S Q := Module.compHom Q (algebraMap S T)
this✝ : IsScalarTower S T Q
this : IsScalarTower R S Q
g' : O →ₗ[T] Q
e : LinearMap.comp (↑R g') (LinearMap.comp (↑R g) f) = i
⊢ LinearMap.comp (↑R (LinearMap.comp (↑S g') g)) f = LinearMap.comp (↑R (LinearMap.comp (↑S (lift hg (lift hf i))) g)) f
[PROOFSTEP]
rw [IsBaseChange.lift_comp, IsBaseChange.lift_comp, ← e]
[GOAL]
case h
R : Type u_1
M : Type v₁
N : Type v₂
S : Type v₃
inst✝²⁷ : AddCommMonoid M
inst✝²⁶ : AddCommMonoid N
inst✝²⁵ : CommRing R
inst✝²⁴ : CommRing S
inst✝²³ : Algebra R S
inst✝²² : Module R M
inst✝²¹ : Module R N
inst✝²⁰ : Module S N
inst✝¹⁹ : IsScalarTower R S N
f✝ : M →ₗ[R] N
h : IsBaseChange S f✝
P : Type u_2
Q✝ : Type u_3
inst✝¹⁸ : AddCommMonoid P
inst✝¹⁷ : Module R P
inst✝¹⁶ : AddCommMonoid Q✝
inst✝¹⁵ : Module S Q✝
T : Type u_4
O : Type u_5
inst✝¹⁴ : CommRing T
inst✝¹³ : Algebra R T
inst✝¹² : Algebra S T
inst✝¹¹ : IsScalarTower R S T
inst✝¹⁰ : AddCommMonoid O
inst✝⁹ : Module R O
inst✝⁸ : Module S O
inst✝⁷ : Module T O
inst✝⁶ : IsScalarTower S T O
inst✝⁵ : IsScalarTower R S O
inst✝⁴ : IsScalarTower R T O
f : M →ₗ[R] N
hf : IsBaseChange S f
g : N →ₗ[S] O
hg : IsBaseChange T g
Q : Type (max v₁ u_5 u_4)
inst✝³ : AddCommMonoid Q
inst✝² : Module R Q
inst✝¹ : Module T Q
inst✝ : IsScalarTower R T Q
i : M →ₗ[R] Q
this✝¹ : Module S Q := Module.compHom Q (algebraMap S T)
this✝ : IsScalarTower S T Q
this : IsScalarTower R S Q
g' : O →ₗ[T] Q
e : LinearMap.comp (↑R g') (LinearMap.comp (↑R g) f) = i
⊢ LinearMap.comp (↑R (LinearMap.comp (↑S g') g)) f = LinearMap.comp (↑R g') (LinearMap.comp (↑R g) f)
[PROOFSTEP]
ext
[GOAL]
case h.h
R : Type u_1
M : Type v₁
N : Type v₂
S : Type v₃
inst✝²⁷ : AddCommMonoid M
inst✝²⁶ : AddCommMonoid N
inst✝²⁵ : CommRing R
inst✝²⁴ : CommRing S
inst✝²³ : Algebra R S
inst✝²² : Module R M
inst✝²¹ : Module R N
inst✝²⁰ : Module S N
inst✝¹⁹ : IsScalarTower R S N
f✝ : M →ₗ[R] N
h : IsBaseChange S f✝
P : Type u_2
Q✝ : Type u_3
inst✝¹⁸ : AddCommMonoid P
inst✝¹⁷ : Module R P
inst✝¹⁶ : AddCommMonoid Q✝
inst✝¹⁵ : Module S Q✝
T : Type u_4
O : Type u_5
inst✝¹⁴ : CommRing T
inst✝¹³ : Algebra R T
inst✝¹² : Algebra S T
inst✝¹¹ : IsScalarTower R S T
inst✝¹⁰ : AddCommMonoid O
inst✝⁹ : Module R O
inst✝⁸ : Module S O
inst✝⁷ : Module T O
inst✝⁶ : IsScalarTower S T O
inst✝⁵ : IsScalarTower R S O
inst✝⁴ : IsScalarTower R T O
f : M →ₗ[R] N
hf : IsBaseChange S f
g : N →ₗ[S] O
hg : IsBaseChange T g
Q : Type (max v₁ u_5 u_4)
inst✝³ : AddCommMonoid Q
inst✝² : Module R Q
inst✝¹ : Module T Q
inst✝ : IsScalarTower R T Q
i : M →ₗ[R] Q
this✝¹ : Module S Q := Module.compHom Q (algebraMap S T)
this✝ : IsScalarTower S T Q
this : IsScalarTower R S Q
g' : O →ₗ[T] Q
e : LinearMap.comp (↑R g') (LinearMap.comp (↑R g) f) = i
x✝ : M
⊢ ↑(LinearMap.comp (↑R (LinearMap.comp (↑S g') g)) f) x✝ = ↑(LinearMap.comp (↑R g') (LinearMap.comp (↑R g) f)) x✝
[PROOFSTEP]
rfl
[GOAL]
R : Type u_1
M : Type v₁
N : Type v₂
S : Type v₃
inst✝³¹ : AddCommMonoid M
inst✝³⁰ : AddCommMonoid N
inst✝²⁹ : CommRing R
inst✝²⁸ : CommRing S
inst✝²⁷ : Algebra R S
inst✝²⁶ : Module R M
inst✝²⁵ : Module R N
inst✝²⁴ : Module S N
inst✝²³ : IsScalarTower R S N
f : M →ₗ[R] N
h✝ : IsBaseChange S f
P : Type u_2
Q : Type u_3
inst✝²² : AddCommMonoid P
inst✝²¹ : Module R P
inst✝²⁰ : AddCommMonoid Q
inst✝¹⁹ : Module S Q
T : Type u_4
O : Type u_5
inst✝¹⁸ : CommRing T
inst✝¹⁷ : Algebra R T
inst✝¹⁶ : Algebra S T
inst✝¹⁵ : IsScalarTower R S T
inst✝¹⁴ : AddCommMonoid O
inst✝¹³ : Module R O
inst✝¹² : Module S O
inst✝¹¹ : Module T O
inst✝¹⁰ : IsScalarTower S T O
inst✝⁹ : IsScalarTower R S O
inst✝⁸ : IsScalarTower R T O
R' : Type u_6
S' : Type u_7
inst✝⁷ : CommRing R'
inst✝⁶ : CommRing S'
inst✝⁵ : Algebra R R'
inst✝⁴ : Algebra S S'
inst✝³ : Algebra R' S'
inst✝² : Algebra R S'
inst✝¹ : IsScalarTower R R' S'
inst✝ : IsScalarTower R S S'
h : IsPushout R S R' S'
⊢ IsPushout R R' S S'
[PROOFSTEP]
let _ := (Algebra.TensorProduct.includeRight : R' →ₐ[R] S ⊗ R').toRingHom.toAlgebra
[GOAL]
R : Type u_1
M : Type v₁
N : Type v₂
S : Type v₃
inst✝³¹ : AddCommMonoid M
inst✝³⁰ : AddCommMonoid N
inst✝²⁹ : CommRing R
inst✝²⁸ : CommRing S
inst✝²⁷ : Algebra R S
inst✝²⁶ : Module R M
inst✝²⁵ : Module R N
inst✝²⁴ : Module S N
inst✝²³ : IsScalarTower R S N
f : M →ₗ[R] N
h✝ : IsBaseChange S f
P : Type u_2
Q : Type u_3
inst✝²² : AddCommMonoid P
inst✝²¹ : Module R P
inst✝²⁰ : AddCommMonoid Q
inst✝¹⁹ : Module S Q
T : Type u_4
O : Type u_5
inst✝¹⁸ : CommRing T
inst✝¹⁷ : Algebra R T
inst✝¹⁶ : Algebra S T
inst✝¹⁵ : IsScalarTower R S T
inst✝¹⁴ : AddCommMonoid O
inst✝¹³ : Module R O
inst✝¹² : Module S O
inst✝¹¹ : Module T O
inst✝¹⁰ : IsScalarTower S T O
inst✝⁹ : IsScalarTower R S O
inst✝⁸ : IsScalarTower R T O
R' : Type u_6
S' : Type u_7
inst✝⁷ : CommRing R'
inst✝⁶ : CommRing S'
inst✝⁵ : Algebra R R'
inst✝⁴ : Algebra S S'
inst✝³ : Algebra R' S'
inst✝² : Algebra R S'
inst✝¹ : IsScalarTower R R' S'
inst✝ : IsScalarTower R S S'
h : IsPushout R S R' S'
x✝ : Algebra R' (S ⊗[R] R') := RingHom.toAlgebra ↑TensorProduct.includeRight
⊢ IsPushout R R' S S'
[PROOFSTEP]
let e : R' ⊗[R] S ≃ₗ[R'] S' :=
by
refine' { (_root_.TensorProduct.comm R R' S).trans <| h.1.equiv.restrictScalars R with map_smul' := _ }
intro r x
change h.1.equiv (TensorProduct.comm R R' S (r • x)) = r • h.1.equiv (TensorProduct.comm R R' S x)
refine TensorProduct.induction_on x ?_ ?_ ?_
· simp only [smul_zero, map_zero]
· intro x y
simp [smul_tmul', Algebra.smul_def, RingHom.algebraMap_toAlgebra, h.1.equiv_tmul]
ring
· intro x y hx hy
rw [map_add, map_add, smul_add, map_add, map_add, hx, hy, smul_add]
[GOAL]
R : Type u_1
M : Type v₁
N : Type v₂
S : Type v₃
inst✝³¹ : AddCommMonoid M
inst✝³⁰ : AddCommMonoid N
inst✝²⁹ : CommRing R
inst✝²⁸ : CommRing S
inst✝²⁷ : Algebra R S
inst✝²⁶ : Module R M
inst✝²⁵ : Module R N
inst✝²⁴ : Module S N
inst✝²³ : IsScalarTower R S N
f : M →ₗ[R] N
h✝ : IsBaseChange S f
P : Type u_2
Q : Type u_3
inst✝²² : AddCommMonoid P
inst✝²¹ : Module R P
inst✝²⁰ : AddCommMonoid Q
inst✝¹⁹ : Module S Q
T : Type u_4
O : Type u_5
inst✝¹⁸ : CommRing T
inst✝¹⁷ : Algebra R T
inst✝¹⁶ : Algebra S T
inst✝¹⁵ : IsScalarTower R S T
inst✝¹⁴ : AddCommMonoid O
inst✝¹³ : Module R O
inst✝¹² : Module S O
inst✝¹¹ : Module T O
inst✝¹⁰ : IsScalarTower S T O
inst✝⁹ : IsScalarTower R S O
inst✝⁸ : IsScalarTower R T O
R' : Type u_6
S' : Type u_7
inst✝⁷ : CommRing R'
inst✝⁶ : CommRing S'
inst✝⁵ : Algebra R R'
inst✝⁴ : Algebra S S'
inst✝³ : Algebra R' S'
inst✝² : Algebra R S'
inst✝¹ : IsScalarTower R R' S'
inst✝ : IsScalarTower R S S'
h : IsPushout R S R' S'
x✝ : Algebra R' (S ⊗[R] R') := RingHom.toAlgebra ↑TensorProduct.includeRight
⊢ R' ⊗[R] S ≃ₗ[R'] S'
[PROOFSTEP]
refine' { (_root_.TensorProduct.comm R R' S).trans <| h.1.equiv.restrictScalars R with map_smul' := _ }
[GOAL]
R : Type u_1
M : Type v₁
N : Type v₂
S : Type v₃
inst✝³¹ : AddCommMonoid M
inst✝³⁰ : AddCommMonoid N
inst✝²⁹ : CommRing R
inst✝²⁸ : CommRing S
inst✝²⁷ : Algebra R S
inst✝²⁶ : Module R M
inst✝²⁵ : Module R N
inst✝²⁴ : Module S N
inst✝²³ : IsScalarTower R S N
f : M →ₗ[R] N
h✝ : IsBaseChange S f
P : Type u_2
Q : Type u_3
inst✝²² : AddCommMonoid P
inst✝²¹ : Module R P
inst✝²⁰ : AddCommMonoid Q
inst✝¹⁹ : Module S Q
T : Type u_4
O : Type u_5
inst✝¹⁸ : CommRing T
inst✝¹⁷ : Algebra R T
inst✝¹⁶ : Algebra S T
inst✝¹⁵ : IsScalarTower R S T
inst✝¹⁴ : AddCommMonoid O
inst✝¹³ : Module R O
inst✝¹² : Module S O
inst✝¹¹ : Module T O
inst✝¹⁰ : IsScalarTower S T O
inst✝⁹ : IsScalarTower R S O
inst✝⁸ : IsScalarTower R T O
R' : Type u_6
S' : Type u_7
inst✝⁷ : CommRing R'
inst✝⁶ : CommRing S'
inst✝⁵ : Algebra R R'
inst✝⁴ : Algebra S S'
inst✝³ : Algebra R' S'
inst✝² : Algebra R S'
inst✝¹ : IsScalarTower R R' S'
inst✝ : IsScalarTower R S S'
h : IsPushout R S R' S'
x✝ : Algebra R' (S ⊗[R] R') := RingHom.toAlgebra ↑TensorProduct.includeRight
src✝ : R' ⊗[R] S ≃ₗ[R] S' :=
LinearEquiv.trans (_root_.TensorProduct.comm R R' S)
(LinearEquiv.restrictScalars R (IsBaseChange.equiv (_ : IsBaseChange S (AlgHom.toLinearMap (toAlgHom R R' S')))))
⊢ ∀ (r : R') (x : R' ⊗[R] S), AddHom.toFun src✝.toAddHom (r • x) = ↑(RingHom.id R') r • AddHom.toFun src✝.toAddHom x
[PROOFSTEP]
intro r x
[GOAL]
R : Type u_1
M : Type v₁
N : Type v₂
S : Type v₃
inst✝³¹ : AddCommMonoid M
inst✝³⁰ : AddCommMonoid N
inst✝²⁹ : CommRing R
inst✝²⁸ : CommRing S
inst✝²⁷ : Algebra R S
inst✝²⁶ : Module R M
inst✝²⁵ : Module R N
inst✝²⁴ : Module S N
inst✝²³ : IsScalarTower R S N
f : M →ₗ[R] N
h✝ : IsBaseChange S f
P : Type u_2
Q : Type u_3
inst✝²² : AddCommMonoid P
inst✝²¹ : Module R P
inst✝²⁰ : AddCommMonoid Q
inst✝¹⁹ : Module S Q
T : Type u_4
O : Type u_5
inst✝¹⁸ : CommRing T
inst✝¹⁷ : Algebra R T
inst✝¹⁶ : Algebra S T
inst✝¹⁵ : IsScalarTower R S T
inst✝¹⁴ : AddCommMonoid O
inst✝¹³ : Module R O
inst✝¹² : Module S O
inst✝¹¹ : Module T O
inst✝¹⁰ : IsScalarTower S T O
inst✝⁹ : IsScalarTower R S O
inst✝⁸ : IsScalarTower R T O
R' : Type u_6
S' : Type u_7
inst✝⁷ : CommRing R'
inst✝⁶ : CommRing S'
inst✝⁵ : Algebra R R'
inst✝⁴ : Algebra S S'
inst✝³ : Algebra R' S'
inst✝² : Algebra R S'
inst✝¹ : IsScalarTower R R' S'
inst✝ : IsScalarTower R S S'
h : IsPushout R S R' S'
x✝ : Algebra R' (S ⊗[R] R') := RingHom.toAlgebra ↑TensorProduct.includeRight
src✝ : R' ⊗[R] S ≃ₗ[R] S' :=
LinearEquiv.trans (_root_.TensorProduct.comm R R' S)
(LinearEquiv.restrictScalars R (IsBaseChange.equiv (_ : IsBaseChange S (AlgHom.toLinearMap (toAlgHom R R' S')))))
r : R'
x : R' ⊗[R] S
⊢ AddHom.toFun src✝.toAddHom (r • x) = ↑(RingHom.id R') r • AddHom.toFun src✝.toAddHom x
[PROOFSTEP]
change h.1.equiv (TensorProduct.comm R R' S (r • x)) = r • h.1.equiv (TensorProduct.comm R R' S x)
[GOAL]
R : Type u_1
M : Type v₁
N : Type v₂
S : Type v₃
inst✝³¹ : AddCommMonoid M
inst✝³⁰ : AddCommMonoid N
inst✝²⁹ : CommRing R
inst✝²⁸ : CommRing S
inst✝²⁷ : Algebra R S
inst✝²⁶ : Module R M
inst✝²⁵ : Module R N
inst✝²⁴ : Module S N
inst✝²³ : IsScalarTower R S N
f : M →ₗ[R] N
h✝ : IsBaseChange S f
P : Type u_2
Q : Type u_3
inst✝²² : AddCommMonoid P
inst✝²¹ : Module R P
inst✝²⁰ : AddCommMonoid Q
inst✝¹⁹ : Module S Q
T : Type u_4
O : Type u_5
inst✝¹⁸ : CommRing T
inst✝¹⁷ : Algebra R T
inst✝¹⁶ : Algebra S T
inst✝¹⁵ : IsScalarTower R S T
inst✝¹⁴ : AddCommMonoid O
inst✝¹³ : Module R O
inst✝¹² : Module S O
inst✝¹¹ : Module T O
inst✝¹⁰ : IsScalarTower S T O
inst✝⁹ : IsScalarTower R S O
inst✝⁸ : IsScalarTower R T O
R' : Type u_6
S' : Type u_7
inst✝⁷ : CommRing R'
inst✝⁶ : CommRing S'
inst✝⁵ : Algebra R R'
inst✝⁴ : Algebra S S'
inst✝³ : Algebra R' S'
inst✝² : Algebra R S'
inst✝¹ : IsScalarTower R R' S'
inst✝ : IsScalarTower R S S'
h : IsPushout R S R' S'
x✝ : Algebra R' (S ⊗[R] R') := RingHom.toAlgebra ↑TensorProduct.includeRight
src✝ : R' ⊗[R] S ≃ₗ[R] S' :=
LinearEquiv.trans (_root_.TensorProduct.comm R R' S)
(LinearEquiv.restrictScalars R (IsBaseChange.equiv (_ : IsBaseChange S (AlgHom.toLinearMap (toAlgHom R R' S')))))
r : R'
x : R' ⊗[R] S
⊢ ↑(IsBaseChange.equiv (_ : IsBaseChange S (AlgHom.toLinearMap (toAlgHom R R' S'))))
(↑(TensorProduct.comm R R' S) (r • x)) =
r •
↑(IsBaseChange.equiv (_ : IsBaseChange S (AlgHom.toLinearMap (toAlgHom R R' S'))))
(↑(TensorProduct.comm R R' S) x)
[PROOFSTEP]
refine TensorProduct.induction_on x ?_ ?_ ?_
[GOAL]
case refine_1
R : Type u_1
M : Type v₁
N : Type v₂
S : Type v₃
inst✝³¹ : AddCommMonoid M
inst✝³⁰ : AddCommMonoid N
inst✝²⁹ : CommRing R
inst✝²⁸ : CommRing S
inst✝²⁷ : Algebra R S
inst✝²⁶ : Module R M
inst✝²⁵ : Module R N
inst✝²⁴ : Module S N
inst✝²³ : IsScalarTower R S N
f : M →ₗ[R] N
h✝ : IsBaseChange S f
P : Type u_2
Q : Type u_3
inst✝²² : AddCommMonoid P
inst✝²¹ : Module R P
inst✝²⁰ : AddCommMonoid Q
inst✝¹⁹ : Module S Q
T : Type u_4
O : Type u_5
inst✝¹⁸ : CommRing T
inst✝¹⁷ : Algebra R T
inst✝¹⁶ : Algebra S T
inst✝¹⁵ : IsScalarTower R S T
inst✝¹⁴ : AddCommMonoid O
inst✝¹³ : Module R O
inst✝¹² : Module S O
inst✝¹¹ : Module T O
inst✝¹⁰ : IsScalarTower S T O
inst✝⁹ : IsScalarTower R S O
inst✝⁸ : IsScalarTower R T O
R' : Type u_6
S' : Type u_7
inst✝⁷ : CommRing R'
inst✝⁶ : CommRing S'
inst✝⁵ : Algebra R R'
inst✝⁴ : Algebra S S'
inst✝³ : Algebra R' S'
inst✝² : Algebra R S'
inst✝¹ : IsScalarTower R R' S'
inst✝ : IsScalarTower R S S'
h : IsPushout R S R' S'
x✝ : Algebra R' (S ⊗[R] R') := RingHom.toAlgebra ↑TensorProduct.includeRight
src✝ : R' ⊗[R] S ≃ₗ[R] S' :=
LinearEquiv.trans (_root_.TensorProduct.comm R R' S)
(LinearEquiv.restrictScalars R (IsBaseChange.equiv (_ : IsBaseChange S (AlgHom.toLinearMap (toAlgHom R R' S')))))
r : R'
x : R' ⊗[R] S
⊢ ↑(IsBaseChange.equiv (_ : IsBaseChange S (AlgHom.toLinearMap (toAlgHom R R' S'))))
(↑(TensorProduct.comm R R' S) (r • 0)) =
r •
↑(IsBaseChange.equiv (_ : IsBaseChange S (AlgHom.toLinearMap (toAlgHom R R' S'))))
(↑(TensorProduct.comm R R' S) 0)
[PROOFSTEP]
simp only [smul_zero, map_zero]
[GOAL]
case refine_2
R : Type u_1
M : Type v₁
N : Type v₂
S : Type v₃
inst✝³¹ : AddCommMonoid M
inst✝³⁰ : AddCommMonoid N
inst✝²⁹ : CommRing R
inst✝²⁸ : CommRing S
inst✝²⁷ : Algebra R S
inst✝²⁶ : Module R M
inst✝²⁵ : Module R N
inst✝²⁴ : Module S N
inst✝²³ : IsScalarTower R S N
f : M →ₗ[R] N
h✝ : IsBaseChange S f
P : Type u_2
Q : Type u_3
inst✝²² : AddCommMonoid P
inst✝²¹ : Module R P
inst✝²⁰ : AddCommMonoid Q
inst✝¹⁹ : Module S Q
T : Type u_4
O : Type u_5
inst✝¹⁸ : CommRing T
inst✝¹⁷ : Algebra R T
inst✝¹⁶ : Algebra S T
inst✝¹⁵ : IsScalarTower R S T
inst✝¹⁴ : AddCommMonoid O
inst✝¹³ : Module R O
inst✝¹² : Module S O
inst✝¹¹ : Module T O
inst✝¹⁰ : IsScalarTower S T O
inst✝⁹ : IsScalarTower R S O
inst✝⁸ : IsScalarTower R T O
R' : Type u_6
S' : Type u_7
inst✝⁷ : CommRing R'
inst✝⁶ : CommRing S'
inst✝⁵ : Algebra R R'
inst✝⁴ : Algebra S S'
inst✝³ : Algebra R' S'
inst✝² : Algebra R S'
inst✝¹ : IsScalarTower R R' S'
inst✝ : IsScalarTower R S S'
h : IsPushout R S R' S'
x✝ : Algebra R' (S ⊗[R] R') := RingHom.toAlgebra ↑TensorProduct.includeRight
src✝ : R' ⊗[R] S ≃ₗ[R] S' :=
LinearEquiv.trans (_root_.TensorProduct.comm R R' S)
(LinearEquiv.restrictScalars R (IsBaseChange.equiv (_ : IsBaseChange S (AlgHom.toLinearMap (toAlgHom R R' S')))))
r : R'
x : R' ⊗[R] S
⊢ ∀ (x : R') (y : S),
↑(IsBaseChange.equiv (_ : IsBaseChange S (AlgHom.toLinearMap (toAlgHom R R' S'))))
(↑(TensorProduct.comm R R' S) (r • x ⊗ₜ[R] y)) =
r •
↑(IsBaseChange.equiv (_ : IsBaseChange S (AlgHom.toLinearMap (toAlgHom R R' S'))))
(↑(TensorProduct.comm R R' S) (x ⊗ₜ[R] y))
[PROOFSTEP]
intro x y
[GOAL]
case refine_2
R : Type u_1
M : Type v₁
N : Type v₂
S : Type v₃
inst✝³¹ : AddCommMonoid M
inst✝³⁰ : AddCommMonoid N
inst✝²⁹ : CommRing R
inst✝²⁸ : CommRing S
inst✝²⁷ : Algebra R S
inst✝²⁶ : Module R M
inst✝²⁵ : Module R N
inst✝²⁴ : Module S N
inst✝²³ : IsScalarTower R S N
f : M →ₗ[R] N
h✝ : IsBaseChange S f
P : Type u_2
Q : Type u_3
inst✝²² : AddCommMonoid P
inst✝²¹ : Module R P
inst✝²⁰ : AddCommMonoid Q
inst✝¹⁹ : Module S Q
T : Type u_4
O : Type u_5
inst✝¹⁸ : CommRing T
inst✝¹⁷ : Algebra R T
inst✝¹⁶ : Algebra S T
inst✝¹⁵ : IsScalarTower R S T
inst✝¹⁴ : AddCommMonoid O
inst✝¹³ : Module R O
inst✝¹² : Module S O
inst✝¹¹ : Module T O
inst✝¹⁰ : IsScalarTower S T O
inst✝⁹ : IsScalarTower R S O
inst✝⁸ : IsScalarTower R T O
R' : Type u_6
S' : Type u_7
inst✝⁷ : CommRing R'
inst✝⁶ : CommRing S'
inst✝⁵ : Algebra R R'
inst✝⁴ : Algebra S S'
inst✝³ : Algebra R' S'
inst✝² : Algebra R S'
inst✝¹ : IsScalarTower R R' S'
inst✝ : IsScalarTower R S S'
h : IsPushout R S R' S'
x✝¹ : Algebra R' (S ⊗[R] R') := RingHom.toAlgebra ↑TensorProduct.includeRight
src✝ : R' ⊗[R] S ≃ₗ[R] S' :=
LinearEquiv.trans (_root_.TensorProduct.comm R R' S)
(LinearEquiv.restrictScalars R (IsBaseChange.equiv (_ : IsBaseChange S (AlgHom.toLinearMap (toAlgHom R R' S')))))
r : R'
x✝ : R' ⊗[R] S
x : R'
y : S
⊢ ↑(IsBaseChange.equiv (_ : IsBaseChange S (AlgHom.toLinearMap (toAlgHom R R' S'))))
(↑(TensorProduct.comm R R' S) (r • x ⊗ₜ[R] y)) =
r •
↑(IsBaseChange.equiv (_ : IsBaseChange S (AlgHom.toLinearMap (toAlgHom R R' S'))))
(↑(TensorProduct.comm R R' S) (x ⊗ₜ[R] y))
[PROOFSTEP]
simp [smul_tmul', Algebra.smul_def, RingHom.algebraMap_toAlgebra, h.1.equiv_tmul]
[GOAL]
case refine_2
R : Type u_1
M : Type v₁
N : Type v₂
S : Type v₃
inst✝³¹ : AddCommMonoid M
inst✝³⁰ : AddCommMonoid N
inst✝²⁹ : CommRing R
inst✝²⁸ : CommRing S
inst✝²⁷ : Algebra R S
inst✝²⁶ : Module R M
inst✝²⁵ : Module R N
inst✝²⁴ : Module S N
inst✝²³ : IsScalarTower R S N
f : M →ₗ[R] N
h✝ : IsBaseChange S f
P : Type u_2
Q : Type u_3
inst✝²² : AddCommMonoid P
inst✝²¹ : Module R P
inst✝²⁰ : AddCommMonoid Q
inst✝¹⁹ : Module S Q
T : Type u_4
O : Type u_5
inst✝¹⁸ : CommRing T
inst✝¹⁷ : Algebra R T
inst✝¹⁶ : Algebra S T
inst✝¹⁵ : IsScalarTower R S T
inst✝¹⁴ : AddCommMonoid O
inst✝¹³ : Module R O
inst✝¹² : Module S O
inst✝¹¹ : Module T O
inst✝¹⁰ : IsScalarTower S T O
inst✝⁹ : IsScalarTower R S O
inst✝⁸ : IsScalarTower R T O
R' : Type u_6
S' : Type u_7
inst✝⁷ : CommRing R'
inst✝⁶ : CommRing S'
inst✝⁵ : Algebra R R'
inst✝⁴ : Algebra S S'
inst✝³ : Algebra R' S'
inst✝² : Algebra R S'
inst✝¹ : IsScalarTower R R' S'
inst✝ : IsScalarTower R S S'
h : IsPushout R S R' S'
x✝¹ : Algebra R' (S ⊗[R] R') := RingHom.toAlgebra ↑TensorProduct.includeRight
src✝ : R' ⊗[R] S ≃ₗ[R] S' :=
LinearEquiv.trans (_root_.TensorProduct.comm R R' S)
(LinearEquiv.restrictScalars R (IsBaseChange.equiv (_ : IsBaseChange S (AlgHom.toLinearMap (toAlgHom R R' S')))))
r : R'
x✝ : R' ⊗[R] S
x : R'
y : S
⊢ ↑(algebraMap S S') y * (↑(algebraMap R' S') r * ↑(algebraMap R' S') x) =
↑(algebraMap R' S') r * (↑(algebraMap S S') y * ↑(algebraMap R' S') x)
[PROOFSTEP]
ring
[GOAL]
case refine_3
R : Type u_1
M : Type v₁
N : Type v₂
S : Type v₃
inst✝³¹ : AddCommMonoid M
inst✝³⁰ : AddCommMonoid N
inst✝²⁹ : CommRing R
inst✝²⁸ : CommRing S
inst✝²⁷ : Algebra R S
inst✝²⁶ : Module R M
inst✝²⁵ : Module R N
inst✝²⁴ : Module S N
inst✝²³ : IsScalarTower R S N
f : M →ₗ[R] N
h✝ : IsBaseChange S f
P : Type u_2
Q : Type u_3
inst✝²² : AddCommMonoid P
inst✝²¹ : Module R P
inst✝²⁰ : AddCommMonoid Q
inst✝¹⁹ : Module S Q
T : Type u_4
O : Type u_5
inst✝¹⁸ : CommRing T
inst✝¹⁷ : Algebra R T
inst✝¹⁶ : Algebra S T
inst✝¹⁵ : IsScalarTower R S T
inst✝¹⁴ : AddCommMonoid O
inst✝¹³ : Module R O
inst✝¹² : Module S O
inst✝¹¹ : Module T O
inst✝¹⁰ : IsScalarTower S T O
inst✝⁹ : IsScalarTower R S O
inst✝⁸ : IsScalarTower R T O
R' : Type u_6
S' : Type u_7
inst✝⁷ : CommRing R'
inst✝⁶ : CommRing S'
inst✝⁵ : Algebra R R'
inst✝⁴ : Algebra S S'
inst✝³ : Algebra R' S'
inst✝² : Algebra R S'
inst✝¹ : IsScalarTower R R' S'
inst✝ : IsScalarTower R S S'
h : IsPushout R S R' S'
x✝ : Algebra R' (S ⊗[R] R') := RingHom.toAlgebra ↑TensorProduct.includeRight
src✝ : R' ⊗[R] S ≃ₗ[R] S' :=
LinearEquiv.trans (_root_.TensorProduct.comm R R' S)
(LinearEquiv.restrictScalars R (IsBaseChange.equiv (_ : IsBaseChange S (AlgHom.toLinearMap (toAlgHom R R' S')))))
r : R'
x : R' ⊗[R] S
⊢ ∀ (x y : R' ⊗[R] S),
↑(IsBaseChange.equiv (_ : IsBaseChange S (AlgHom.toLinearMap (toAlgHom R R' S'))))
(↑(TensorProduct.comm R R' S) (r • x)) =
r •
↑(IsBaseChange.equiv (_ : IsBaseChange S (AlgHom.toLinearMap (toAlgHom R R' S'))))
(↑(TensorProduct.comm R R' S) x) →
↑(IsBaseChange.equiv (_ : IsBaseChange S (AlgHom.toLinearMap (toAlgHom R R' S'))))
(↑(TensorProduct.comm R R' S) (r • y)) =
r •
↑(IsBaseChange.equiv (_ : IsBaseChange S (AlgHom.toLinearMap (toAlgHom R R' S'))))
(↑(TensorProduct.comm R R' S) y) →
↑(IsBaseChange.equiv (_ : IsBaseChange S (AlgHom.toLinearMap (toAlgHom R R' S'))))
(↑(TensorProduct.comm R R' S) (r • (x + y))) =
r •
↑(IsBaseChange.equiv (_ : IsBaseChange S (AlgHom.toLinearMap (toAlgHom R R' S'))))
(↑(TensorProduct.comm R R' S) (x + y))
[PROOFSTEP]
intro x y hx hy
[GOAL]
case refine_3
R : Type u_1
M : Type v₁
N : Type v₂
S : Type v₃
inst✝³¹ : AddCommMonoid M
inst✝³⁰ : AddCommMonoid N
inst✝²⁹ : CommRing R
inst✝²⁸ : CommRing S
inst✝²⁷ : Algebra R S
inst✝²⁶ : Module R M
inst✝²⁵ : Module R N
inst✝²⁴ : Module S N
inst✝²³ : IsScalarTower R S N
f : M →ₗ[R] N
h✝ : IsBaseChange S f
P : Type u_2
Q : Type u_3
inst✝²² : AddCommMonoid P
inst✝²¹ : Module R P
inst✝²⁰ : AddCommMonoid Q
inst✝¹⁹ : Module S Q
T : Type u_4
O : Type u_5
inst✝¹⁸ : CommRing T
inst✝¹⁷ : Algebra R T
inst✝¹⁶ : Algebra S T
inst✝¹⁵ : IsScalarTower R S T
inst✝¹⁴ : AddCommMonoid O
inst✝¹³ : Module R O
inst✝¹² : Module S O
inst✝¹¹ : Module T O
inst✝¹⁰ : IsScalarTower S T O
inst✝⁹ : IsScalarTower R S O
inst✝⁸ : IsScalarTower R T O
R' : Type u_6
S' : Type u_7
inst✝⁷ : CommRing R'
inst✝⁶ : CommRing S'
inst✝⁵ : Algebra R R'
inst✝⁴ : Algebra S S'
inst✝³ : Algebra R' S'
inst✝² : Algebra R S'
inst✝¹ : IsScalarTower R R' S'
inst✝ : IsScalarTower R S S'
h : IsPushout R S R' S'
x✝¹ : Algebra R' (S ⊗[R] R') := RingHom.toAlgebra ↑TensorProduct.includeRight
src✝ : R' ⊗[R] S ≃ₗ[R] S' :=
LinearEquiv.trans (_root_.TensorProduct.comm R R' S)
(LinearEquiv.restrictScalars R (IsBaseChange.equiv (_ : IsBaseChange S (AlgHom.toLinearMap (toAlgHom R R' S')))))
r : R'
x✝ x y : R' ⊗[R] S
hx :
↑(IsBaseChange.equiv (_ : IsBaseChange S (AlgHom.toLinearMap (toAlgHom R R' S'))))
(↑(TensorProduct.comm R R' S) (r • x)) =
r •
↑(IsBaseChange.equiv (_ : IsBaseChange S (AlgHom.toLinearMap (toAlgHom R R' S'))))
(↑(TensorProduct.comm R R' S) x)
hy :
↑(IsBaseChange.equiv (_ : IsBaseChange S (AlgHom.toLinearMap (toAlgHom R R' S'))))
(↑(TensorProduct.comm R R' S) (r • y)) =
r •
↑(IsBaseChange.equiv (_ : IsBaseChange S (AlgHom.toLinearMap (toAlgHom R R' S'))))
(↑(TensorProduct.comm R R' S) y)
⊢ ↑(IsBaseChange.equiv (_ : IsBaseChange S (AlgHom.toLinearMap (toAlgHom R R' S'))))
(↑(TensorProduct.comm R R' S) (r • (x + y))) =
r •
↑(IsBaseChange.equiv (_ : IsBaseChange S (AlgHom.toLinearMap (toAlgHom R R' S'))))
(↑(TensorProduct.comm R R' S) (x + y))
[PROOFSTEP]
rw [map_add, map_add, smul_add, map_add, map_add, hx, hy, smul_add]
[GOAL]
R : Type u_1
M : Type v₁
N : Type v₂
S : Type v₃
inst✝³¹ : AddCommMonoid M
inst✝³⁰ : AddCommMonoid N
inst✝²⁹ : CommRing R
inst✝²⁸ : CommRing S
inst✝²⁷ : Algebra R S
inst✝²⁶ : Module R M
inst✝²⁵ : Module R N
inst✝²⁴ : Module S N
inst✝²³ : IsScalarTower R S N
f : M →ₗ[R] N
h✝ : IsBaseChange S f
P : Type u_2
Q : Type u_3
inst✝²² : AddCommMonoid P
inst✝²¹ : Module R P
inst✝²⁰ : AddCommMonoid Q
inst✝¹⁹ : Module S Q
T : Type u_4
O : Type u_5
inst✝¹⁸ : CommRing T
inst✝¹⁷ : Algebra R T
inst✝¹⁶ : Algebra S T
inst✝¹⁵ : IsScalarTower R S T
inst✝¹⁴ : AddCommMonoid O
inst✝¹³ : Module R O
inst✝¹² : Module S O
inst✝¹¹ : Module T O
inst✝¹⁰ : IsScalarTower S T O
inst✝⁹ : IsScalarTower R S O
inst✝⁸ : IsScalarTower R T O
R' : Type u_6
S' : Type u_7
inst✝⁷ : CommRing R'
inst✝⁶ : CommRing S'
inst✝⁵ : Algebra R R'
inst✝⁴ : Algebra S S'
inst✝³ : Algebra R' S'
inst✝² : Algebra R S'
inst✝¹ : IsScalarTower R R' S'
inst✝ : IsScalarTower R S S'
h : IsPushout R S R' S'
x✝ : Algebra R' (S ⊗[R] R') := RingHom.toAlgebra ↑TensorProduct.includeRight
e : R' ⊗[R] S ≃ₗ[R'] S' :=
let src :=
LinearEquiv.trans (_root_.TensorProduct.comm R R' S)
(LinearEquiv.restrictScalars R (IsBaseChange.equiv (_ : IsBaseChange S (AlgHom.toLinearMap (toAlgHom R R' S')))));
{
toLinearMap :=
{ toAddHom := src.toAddHom,
map_smul' :=
(_ :
∀ (r : R') (x : R' ⊗[R] S),
↑(IsBaseChange.equiv (_ : IsBaseChange S (AlgHom.toLinearMap (toAlgHom R R' S'))))
(↑(TensorProduct.comm R R' S) (r • x)) =
r •
↑(IsBaseChange.equiv (_ : IsBaseChange S (AlgHom.toLinearMap (toAlgHom R R' S'))))
(↑(TensorProduct.comm R R' S) x)) },
invFun := src.invFun, left_inv := (_ : Function.LeftInverse src.invFun src.toFun),
right_inv := (_ : Function.RightInverse src.invFun src.toFun) }
⊢ IsPushout R R' S S'
[PROOFSTEP]
have : (toAlgHom R S S').toLinearMap = (e.toLinearMap.restrictScalars R).comp (TensorProduct.mk R R' S 1) :=
by
ext
simp [h.1.equiv_tmul, Algebra.smul_def]
[GOAL]
R : Type u_1
M : Type v₁
N : Type v₂
S : Type v₃
inst✝³¹ : AddCommMonoid M
inst✝³⁰ : AddCommMonoid N
inst✝²⁹ : CommRing R
inst✝²⁸ : CommRing S
inst✝²⁷ : Algebra R S
inst✝²⁶ : Module R M
inst✝²⁵ : Module R N
inst✝²⁴ : Module S N
inst✝²³ : IsScalarTower R S N
f : M →ₗ[R] N
h✝ : IsBaseChange S f
P : Type u_2
Q : Type u_3
inst✝²² : AddCommMonoid P
inst✝²¹ : Module R P
inst✝²⁰ : AddCommMonoid Q
inst✝¹⁹ : Module S Q
T : Type u_4
O : Type u_5
inst✝¹⁸ : CommRing T
inst✝¹⁷ : Algebra R T
inst✝¹⁶ : Algebra S T
inst✝¹⁵ : IsScalarTower R S T
inst✝¹⁴ : AddCommMonoid O
inst✝¹³ : Module R O
inst✝¹² : Module S O
inst✝¹¹ : Module T O
inst✝¹⁰ : IsScalarTower S T O
inst✝⁹ : IsScalarTower R S O
inst✝⁸ : IsScalarTower R T O
R' : Type u_6
S' : Type u_7
inst✝⁷ : CommRing R'
inst✝⁶ : CommRing S'
inst✝⁵ : Algebra R R'
inst✝⁴ : Algebra S S'
inst✝³ : Algebra R' S'
inst✝² : Algebra R S'
inst✝¹ : IsScalarTower R R' S'
inst✝ : IsScalarTower R S S'
h : IsPushout R S R' S'
x✝ : Algebra R' (S ⊗[R] R') := RingHom.toAlgebra ↑TensorProduct.includeRight
e : R' ⊗[R] S ≃ₗ[R'] S' :=
let src :=
LinearEquiv.trans (_root_.TensorProduct.comm R R' S)
(LinearEquiv.restrictScalars R (IsBaseChange.equiv (_ : IsBaseChange S (AlgHom.toLinearMap (toAlgHom R R' S')))));
{
toLinearMap :=
{ toAddHom := src.toAddHom,
map_smul' :=
(_ :
∀ (r : R') (x : R' ⊗[R] S),
↑(IsBaseChange.equiv (_ : IsBaseChange S (AlgHom.toLinearMap (toAlgHom R R' S'))))
(↑(TensorProduct.comm R R' S) (r • x)) =
r •
↑(IsBaseChange.equiv (_ : IsBaseChange S (AlgHom.toLinearMap (toAlgHom R R' S'))))
(↑(TensorProduct.comm R R' S) x)) },
invFun := src.invFun, left_inv := (_ : Function.LeftInverse src.invFun src.toFun),
right_inv := (_ : Function.RightInverse src.invFun src.toFun) }
⊢ AlgHom.toLinearMap (toAlgHom R S S') = LinearMap.comp (↑R ↑e) (↑(TensorProduct.mk R R' S) 1)
[PROOFSTEP]
ext
[GOAL]
case h
R : Type u_1
M : Type v₁
N : Type v₂
S : Type v₃
inst✝³¹ : AddCommMonoid M
inst✝³⁰ : AddCommMonoid N
inst✝²⁹ : CommRing R
inst✝²⁸ : CommRing S
inst✝²⁷ : Algebra R S
inst✝²⁶ : Module R M
inst✝²⁵ : Module R N
inst✝²⁴ : Module S N
inst✝²³ : IsScalarTower R S N
f : M →ₗ[R] N
h✝ : IsBaseChange S f
P : Type u_2
Q : Type u_3
inst✝²² : AddCommMonoid P
inst✝²¹ : Module R P
inst✝²⁰ : AddCommMonoid Q
inst✝¹⁹ : Module S Q
T : Type u_4
O : Type u_5
inst✝¹⁸ : CommRing T
inst✝¹⁷ : Algebra R T
inst✝¹⁶ : Algebra S T
inst✝¹⁵ : IsScalarTower R S T
inst✝¹⁴ : AddCommMonoid O
inst✝¹³ : Module R O
inst✝¹² : Module S O
inst✝¹¹ : Module T O
inst✝¹⁰ : IsScalarTower S T O
inst✝⁹ : IsScalarTower R S O
inst✝⁸ : IsScalarTower R T O
R' : Type u_6
S' : Type u_7
inst✝⁷ : CommRing R'
inst✝⁶ : CommRing S'
inst✝⁵ : Algebra R R'
inst✝⁴ : Algebra S S'
inst✝³ : Algebra R' S'
inst✝² : Algebra R S'
inst✝¹ : IsScalarTower R R' S'
inst✝ : IsScalarTower R S S'
h : IsPushout R S R' S'
x✝¹ : Algebra R' (S ⊗[R] R') := RingHom.toAlgebra ↑TensorProduct.includeRight
e : R' ⊗[R] S ≃ₗ[R'] S' :=
let src :=
LinearEquiv.trans (_root_.TensorProduct.comm R R' S)
(LinearEquiv.restrictScalars R (IsBaseChange.equiv (_ : IsBaseChange S (AlgHom.toLinearMap (toAlgHom R R' S')))));
{
toLinearMap :=
{ toAddHom := src.toAddHom,
map_smul' :=
(_ :
∀ (r : R') (x : R' ⊗[R] S),
↑(IsBaseChange.equiv (_ : IsBaseChange S (AlgHom.toLinearMap (toAlgHom R R' S'))))
(↑(TensorProduct.comm R R' S) (r • x)) =
r •
↑(IsBaseChange.equiv (_ : IsBaseChange S (AlgHom.toLinearMap (toAlgHom R R' S'))))
(↑(TensorProduct.comm R R' S) x)) },
invFun := src.invFun, left_inv := (_ : Function.LeftInverse src.invFun src.toFun),
right_inv := (_ : Function.RightInverse src.invFun src.toFun) }
x✝ : S
⊢ ↑(AlgHom.toLinearMap (toAlgHom R S S')) x✝ = ↑(LinearMap.comp (↑R ↑e) (↑(TensorProduct.mk R R' S) 1)) x✝
[PROOFSTEP]
simp [h.1.equiv_tmul, Algebra.smul_def]
[GOAL]
R : Type u_1
M : Type v₁
N : Type v₂
S : Type v₃
inst✝³¹ : AddCommMonoid M
inst✝³⁰ : AddCommMonoid N
inst✝²⁹ : CommRing R
inst✝²⁸ : CommRing S
inst✝²⁷ : Algebra R S
inst✝²⁶ : Module R M
inst✝²⁵ : Module R N
inst✝²⁴ : Module S N
inst✝²³ : IsScalarTower R S N
f : M →ₗ[R] N
h✝ : IsBaseChange S f
P : Type u_2
Q : Type u_3
inst✝²² : AddCommMonoid P
inst✝²¹ : Module R P
inst✝²⁰ : AddCommMonoid Q
inst✝¹⁹ : Module S Q
T : Type u_4
O : Type u_5
inst✝¹⁸ : CommRing T
inst✝¹⁷ : Algebra R T
inst✝¹⁶ : Algebra S T
inst✝¹⁵ : IsScalarTower R S T
inst✝¹⁴ : AddCommMonoid O
inst✝¹³ : Module R O
inst✝¹² : Module S O
inst✝¹¹ : Module T O
inst✝¹⁰ : IsScalarTower S T O
inst✝⁹ : IsScalarTower R S O
inst✝⁸ : IsScalarTower R T O
R' : Type u_6
S' : Type u_7
inst✝⁷ : CommRing R'
inst✝⁶ : CommRing S'
inst✝⁵ : Algebra R R'
inst✝⁴ : Algebra S S'
inst✝³ : Algebra R' S'
inst✝² : Algebra R S'
inst✝¹ : IsScalarTower R R' S'
inst✝ : IsScalarTower R S S'
h : IsPushout R S R' S'
x✝ : Algebra R' (S ⊗[R] R') := RingHom.toAlgebra ↑TensorProduct.includeRight
e : R' ⊗[R] S ≃ₗ[R'] S' :=
let src :=
LinearEquiv.trans (_root_.TensorProduct.comm R R' S)
(LinearEquiv.restrictScalars R (IsBaseChange.equiv (_ : IsBaseChange S (AlgHom.toLinearMap (toAlgHom R R' S')))));
{
toLinearMap :=
{ toAddHom := src.toAddHom,
map_smul' :=
(_ :
∀ (r : R') (x : R' ⊗[R] S),
↑(IsBaseChange.equiv (_ : IsBaseChange S (AlgHom.toLinearMap (toAlgHom R R' S'))))
(↑(TensorProduct.comm R R' S) (r • x)) =
r •
↑(IsBaseChange.equiv (_ : IsBaseChange S (AlgHom.toLinearMap (toAlgHom R R' S'))))
(↑(TensorProduct.comm R R' S) x)) },
invFun := src.invFun, left_inv := (_ : Function.LeftInverse src.invFun src.toFun),
right_inv := (_ : Function.RightInverse src.invFun src.toFun) }
this : AlgHom.toLinearMap (toAlgHom R S S') = LinearMap.comp (↑R ↑e) (↑(TensorProduct.mk R R' S) 1)
⊢ IsPushout R R' S S'
[PROOFSTEP]
constructor
[GOAL]
case out
R : Type u_1
M : Type v₁
N : Type v₂
S : Type v₃
inst✝³¹ : AddCommMonoid M
inst✝³⁰ : AddCommMonoid N
inst✝²⁹ : CommRing R
inst✝²⁸ : CommRing S
inst✝²⁷ : Algebra R S
inst✝²⁶ : Module R M
inst✝²⁵ : Module R N
inst✝²⁴ : Module S N
inst✝²³ : IsScalarTower R S N
f : M →ₗ[R] N
h✝ : IsBaseChange S f
P : Type u_2
Q : Type u_3
inst✝²² : AddCommMonoid P
inst✝²¹ : Module R P
inst✝²⁰ : AddCommMonoid Q
inst✝¹⁹ : Module S Q
T : Type u_4
O : Type u_5
inst✝¹⁸ : CommRing T
inst✝¹⁷ : Algebra R T
inst✝¹⁶ : Algebra S T
inst✝¹⁵ : IsScalarTower R S T
inst✝¹⁴ : AddCommMonoid O
inst✝¹³ : Module R O
inst✝¹² : Module S O
inst✝¹¹ : Module T O
inst✝¹⁰ : IsScalarTower S T O
inst✝⁹ : IsScalarTower R S O
inst✝⁸ : IsScalarTower R T O
R' : Type u_6
S' : Type u_7
inst✝⁷ : CommRing R'
inst✝⁶ : CommRing S'
inst✝⁵ : Algebra R R'
inst✝⁴ : Algebra S S'
inst✝³ : Algebra R' S'
inst✝² : Algebra R S'
inst✝¹ : IsScalarTower R R' S'
inst✝ : IsScalarTower R S S'
h : IsPushout R S R' S'
x✝ : Algebra R' (S ⊗[R] R') := RingHom.toAlgebra ↑TensorProduct.includeRight
e : R' ⊗[R] S ≃ₗ[R'] S' :=
let src :=
LinearEquiv.trans (_root_.TensorProduct.comm R R' S)
(LinearEquiv.restrictScalars R (IsBaseChange.equiv (_ : IsBaseChange S (AlgHom.toLinearMap (toAlgHom R R' S')))));
{
toLinearMap :=
{ toAddHom := src.toAddHom,
map_smul' :=
(_ :
∀ (r : R') (x : R' ⊗[R] S),
↑(IsBaseChange.equiv (_ : IsBaseChange S (AlgHom.toLinearMap (toAlgHom R R' S'))))
(↑(TensorProduct.comm R R' S) (r • x)) =
r •
↑(IsBaseChange.equiv (_ : IsBaseChange S (AlgHom.toLinearMap (toAlgHom R R' S'))))
(↑(TensorProduct.comm R R' S) x)) },
invFun := src.invFun, left_inv := (_ : Function.LeftInverse src.invFun src.toFun),
right_inv := (_ : Function.RightInverse src.invFun src.toFun) }
this : AlgHom.toLinearMap (toAlgHom R S S') = LinearMap.comp (↑R ↑e) (↑(TensorProduct.mk R R' S) 1)
⊢ IsBaseChange R' (AlgHom.toLinearMap (toAlgHom R S S'))
[PROOFSTEP]
rw [this]
[GOAL]
case out
R : Type u_1
M : Type v₁
N : Type v₂
S : Type v₃
inst✝³¹ : AddCommMonoid M
inst✝³⁰ : AddCommMonoid N
inst✝²⁹ : CommRing R
inst✝²⁸ : CommRing S
inst✝²⁷ : Algebra R S
inst✝²⁶ : Module R M
inst✝²⁵ : Module R N
inst✝²⁴ : Module S N
inst✝²³ : IsScalarTower R S N
f : M →ₗ[R] N
h✝ : IsBaseChange S f
P : Type u_2
Q : Type u_3
inst✝²² : AddCommMonoid P
inst✝²¹ : Module R P
inst✝²⁰ : AddCommMonoid Q
inst✝¹⁹ : Module S Q
T : Type u_4
O : Type u_5
inst✝¹⁸ : CommRing T
inst✝¹⁷ : Algebra R T
inst✝¹⁶ : Algebra S T
inst✝¹⁵ : IsScalarTower R S T
inst✝¹⁴ : AddCommMonoid O
inst✝¹³ : Module R O
inst✝¹² : Module S O
inst✝¹¹ : Module T O
inst✝¹⁰ : IsScalarTower S T O
inst✝⁹ : IsScalarTower R S O
inst✝⁸ : IsScalarTower R T O
R' : Type u_6
S' : Type u_7
inst✝⁷ : CommRing R'
inst✝⁶ : CommRing S'
inst✝⁵ : Algebra R R'
inst✝⁴ : Algebra S S'
inst✝³ : Algebra R' S'
inst✝² : Algebra R S'
inst✝¹ : IsScalarTower R R' S'
inst✝ : IsScalarTower R S S'
h : IsPushout R S R' S'
x✝ : Algebra R' (S ⊗[R] R') := RingHom.toAlgebra ↑TensorProduct.includeRight
e : R' ⊗[R] S ≃ₗ[R'] S' :=
let src :=
LinearEquiv.trans (_root_.TensorProduct.comm R R' S)
(LinearEquiv.restrictScalars R (IsBaseChange.equiv (_ : IsBaseChange S (AlgHom.toLinearMap (toAlgHom R R' S')))));
{
toLinearMap :=
{ toAddHom := src.toAddHom,
map_smul' :=
(_ :
∀ (r : R') (x : R' ⊗[R] S),
↑(IsBaseChange.equiv (_ : IsBaseChange S (AlgHom.toLinearMap (toAlgHom R R' S'))))
(↑(TensorProduct.comm R R' S) (r • x)) =
r •
↑(IsBaseChange.equiv (_ : IsBaseChange S (AlgHom.toLinearMap (toAlgHom R R' S'))))
(↑(TensorProduct.comm R R' S) x)) },
invFun := src.invFun, left_inv := (_ : Function.LeftInverse src.invFun src.toFun),
right_inv := (_ : Function.RightInverse src.invFun src.toFun) }
this : AlgHom.toLinearMap (toAlgHom R S S') = LinearMap.comp (↑R ↑e) (↑(TensorProduct.mk R R' S) 1)
⊢ IsBaseChange R' (LinearMap.comp (↑R ↑e) (↑(TensorProduct.mk R R' S) 1))
[PROOFSTEP]
exact (TensorProduct.isBaseChange R S R').comp (IsBaseChange.ofEquiv e)
[GOAL]
R : Type u_1
M : Type v₁
N : Type v₂
S : Type v₃
inst✝³³ : AddCommMonoid M
inst✝³² : AddCommMonoid N
inst✝³¹ : CommRing R
inst✝³⁰ : CommRing S
inst✝²⁹ : Algebra R S
inst✝²⁸ : Module R M
inst✝²⁷ : Module R N
inst✝²⁶ : Module S N
inst✝²⁵ : IsScalarTower R S N
f✝ : M →ₗ[R] N
h : IsBaseChange S f✝
P : Type u_2
Q : Type u_3
inst✝²⁴ : AddCommMonoid P
inst✝²³ : Module R P
inst✝²² : AddCommMonoid Q
inst✝²¹ : Module S Q
T : Type u_4
O : Type u_5
inst✝²⁰ : CommRing T
inst✝¹⁹ : Algebra R T
inst✝¹⁸ : Algebra S T
inst✝¹⁷ : IsScalarTower R S T
inst✝¹⁶ : AddCommMonoid O
inst✝¹⁵ : Module R O
inst✝¹⁴ : Module S O
inst✝¹³ : Module T O
inst✝¹² : IsScalarTower S T O
inst✝¹¹ : IsScalarTower R S O
inst✝¹⁰ : IsScalarTower R T O
R' : Type u_6
S' : Type u_7
inst✝⁹ : CommRing R'
inst✝⁸ : CommRing S'
inst✝⁷ : Algebra R R'
inst✝⁶ : Algebra S S'
inst✝⁵ : Algebra R' S'
inst✝⁴ : Algebra R S'
inst✝³ : IsScalarTower R R' S'
inst✝² : IsScalarTower R S S'
H : IsPushout R S R' S'
A : Type u_8
inst✝¹ : Semiring A
inst✝ : Algebra R A
f : S →ₐ[R] A
g : R' →ₐ[R] A
hf : ∀ (x : S) (y : R'), ↑f x * ↑g y = ↑g y * ↑f x
⊢ S' →ₐ[R] A
[PROOFSTEP]
letI := Module.compHom A f.toRingHom
[GOAL]
R : Type u_1
M : Type v₁
N : Type v₂
S : Type v₃
inst✝³³ : AddCommMonoid M
inst✝³² : AddCommMonoid N
inst✝³¹ : CommRing R
inst✝³⁰ : CommRing S
inst✝²⁹ : Algebra R S
inst✝²⁸ : Module R M
inst✝²⁷ : Module R N
inst✝²⁶ : Module S N
inst✝²⁵ : IsScalarTower R S N
f✝ : M →ₗ[R] N
h : IsBaseChange S f✝
P : Type u_2
Q : Type u_3
inst✝²⁴ : AddCommMonoid P
inst✝²³ : Module R P
inst✝²² : AddCommMonoid Q
inst✝²¹ : Module S Q
T : Type u_4
O : Type u_5
inst✝²⁰ : CommRing T
inst✝¹⁹ : Algebra R T
inst✝¹⁸ : Algebra S T
inst✝¹⁷ : IsScalarTower R S T
inst✝¹⁶ : AddCommMonoid O
inst✝¹⁵ : Module R O
inst✝¹⁴ : Module S O
inst✝¹³ : Module T O
inst✝¹² : IsScalarTower S T O
inst✝¹¹ : IsScalarTower R S O
inst✝¹⁰ : IsScalarTower R T O
R' : Type u_6
S' : Type u_7
inst✝⁹ : CommRing R'
inst✝⁸ : CommRing S'
inst✝⁷ : Algebra R R'
inst✝⁶ : Algebra S S'
inst✝⁵ : Algebra R' S'
inst✝⁴ : Algebra R S'
inst✝³ : IsScalarTower R R' S'
inst✝² : IsScalarTower R S S'
H : IsPushout R S R' S'
A : Type u_8
inst✝¹ : Semiring A
inst✝ : Algebra R A
f : S →ₐ[R] A
g : R' →ₐ[R] A
hf : ∀ (x : S) (y : R'), ↑f x * ↑g y = ↑g y * ↑f x
this : Module S A := Module.compHom A ↑f
⊢ S' →ₐ[R] A
[PROOFSTEP]
haveI : IsScalarTower R S A :=
{ smul_assoc := fun r s a => show f (r • s) * a = r • (f s * a) by rw [f.map_smul, smul_mul_assoc] }
[GOAL]
R : Type u_1
M : Type v₁
N : Type v₂
S : Type v₃
inst✝³³ : AddCommMonoid M
inst✝³² : AddCommMonoid N
inst✝³¹ : CommRing R
inst✝³⁰ : CommRing S
inst✝²⁹ : Algebra R S
inst✝²⁸ : Module R M
inst✝²⁷ : Module R N
inst✝²⁶ : Module S N
inst✝²⁵ : IsScalarTower R S N
f✝ : M →ₗ[R] N
h : IsBaseChange S f✝
P : Type u_2
Q : Type u_3
inst✝²⁴ : AddCommMonoid P
inst✝²³ : Module R P
inst✝²² : AddCommMonoid Q
inst✝²¹ : Module S Q
T : Type u_4
O : Type u_5
inst✝²⁰ : CommRing T
inst✝¹⁹ : Algebra R T
inst✝¹⁸ : Algebra S T
inst✝¹⁷ : IsScalarTower R S T
inst✝¹⁶ : AddCommMonoid O
inst✝¹⁵ : Module R O
inst✝¹⁴ : Module S O
inst✝¹³ : Module T O
inst✝¹² : IsScalarTower S T O
inst✝¹¹ : IsScalarTower R S O
inst✝¹⁰ : IsScalarTower R T O
R' : Type u_6
S' : Type u_7
inst✝⁹ : CommRing R'
inst✝⁸ : CommRing S'
inst✝⁷ : Algebra R R'
inst✝⁶ : Algebra S S'
inst✝⁵ : Algebra R' S'
inst✝⁴ : Algebra R S'
inst✝³ : IsScalarTower R R' S'
inst✝² : IsScalarTower R S S'
H : IsPushout R S R' S'
A : Type u_8
inst✝¹ : Semiring A
inst✝ : Algebra R A
f : S →ₐ[R] A
g : R' →ₐ[R] A
hf : ∀ (x : S) (y : R'), ↑f x * ↑g y = ↑g y * ↑f x
this : Module S A := Module.compHom A ↑f
r : R
s : S
a : A
⊢ ↑f (r • s) * a = r • (↑f s * a)
[PROOFSTEP]
rw [f.map_smul, smul_mul_assoc]
[GOAL]
R : Type u_1
M : Type v₁
N : Type v₂
S : Type v₃
inst✝³³ : AddCommMonoid M
inst✝³² : AddCommMonoid N
inst✝³¹ : CommRing R
inst✝³⁰ : CommRing S
inst✝²⁹ : Algebra R S
inst✝²⁸ : Module R M
inst✝²⁷ : Module R N
inst✝²⁶ : Module S N
inst✝²⁵ : IsScalarTower R S N
f✝ : M →ₗ[R] N
h : IsBaseChange S f✝
P : Type u_2
Q : Type u_3
inst✝²⁴ : AddCommMonoid P
inst✝²³ : Module R P
inst✝²² : AddCommMonoid Q
inst✝²¹ : Module S Q
T : Type u_4
O : Type u_5
inst✝²⁰ : CommRing T
inst✝¹⁹ : Algebra R T
inst✝¹⁸ : Algebra S T
inst✝¹⁷ : IsScalarTower R S T
inst✝¹⁶ : AddCommMonoid O
inst✝¹⁵ : Module R O
inst✝¹⁴ : Module S O
inst✝¹³ : Module T O
inst✝¹² : IsScalarTower S T O
inst✝¹¹ : IsScalarTower R S O
inst✝¹⁰ : IsScalarTower R T O
R' : Type u_6
S' : Type u_7
inst✝⁹ : CommRing R'
inst✝⁸ : CommRing S'
inst✝⁷ : Algebra R R'
inst✝⁶ : Algebra S S'
inst✝⁵ : Algebra R' S'
inst✝⁴ : Algebra R S'
inst✝³ : IsScalarTower R R' S'
inst✝² : IsScalarTower R S S'
H : IsPushout R S R' S'
A : Type u_8
inst✝¹ : Semiring A
inst✝ : Algebra R A
f : S →ₐ[R] A
g : R' →ₐ[R] A
hf : ∀ (x : S) (y : R'), ↑f x * ↑g y = ↑g y * ↑f x
this✝ : Module S A := Module.compHom A ↑f
this : IsScalarTower R S A
⊢ S' →ₐ[R] A
[PROOFSTEP]
haveI : IsScalarTower S A A := { smul_assoc := fun r a b => mul_assoc _ _ _ }
[GOAL]
R : Type u_1
M : Type v₁
N : Type v₂
S : Type v₃
inst✝³³ : AddCommMonoid M
inst✝³² : AddCommMonoid N
inst✝³¹ : CommRing R
inst✝³⁰ : CommRing S
inst✝²⁹ : Algebra R S
inst✝²⁸ : Module R M
inst✝²⁷ : Module R N
inst✝²⁶ : Module S N
inst✝²⁵ : IsScalarTower R S N
f✝ : M →ₗ[R] N
h : IsBaseChange S f✝
P : Type u_2
Q : Type u_3
inst✝²⁴ : AddCommMonoid P
inst✝²³ : Module R P
inst✝²² : AddCommMonoid Q
inst✝²¹ : Module S Q
T : Type u_4
O : Type u_5
inst✝²⁰ : CommRing T
inst✝¹⁹ : Algebra R T
inst✝¹⁸ : Algebra S T
inst✝¹⁷ : IsScalarTower R S T
inst✝¹⁶ : AddCommMonoid O
inst✝¹⁵ : Module R O
inst✝¹⁴ : Module S O
inst✝¹³ : Module T O
inst✝¹² : IsScalarTower S T O
inst✝¹¹ : IsScalarTower R S O
inst✝¹⁰ : IsScalarTower R T O
R' : Type u_6
S' : Type u_7
inst✝⁹ : CommRing R'
inst✝⁸ : CommRing S'
inst✝⁷ : Algebra R R'
inst✝⁶ : Algebra S S'
inst✝⁵ : Algebra R' S'
inst✝⁴ : Algebra R S'
inst✝³ : IsScalarTower R R' S'
inst✝² : IsScalarTower R S S'
H : IsPushout R S R' S'
A : Type u_8
inst✝¹ : Semiring A
inst✝ : Algebra R A
f : S →ₐ[R] A
g : R' →ₐ[R] A
hf : ∀ (x : S) (y : R'), ↑f x * ↑g y = ↑g y * ↑f x
this✝¹ : Module S A := Module.compHom A ↑f
this✝ : IsScalarTower R S A
this : IsScalarTower S A A
⊢ S' →ₐ[R] A
[PROOFSTEP]
have : ∀ x, H.out.lift g.toLinearMap (algebraMap R' S' x) = g x := H.out.lift_eq _
[GOAL]
R : Type u_1
M : Type v₁
N : Type v₂
S : Type v₃
inst✝³³ : AddCommMonoid M
inst✝³² : AddCommMonoid N
inst✝³¹ : CommRing R
inst✝³⁰ : CommRing S
inst✝²⁹ : Algebra R S
inst✝²⁸ : Module R M
inst✝²⁷ : Module R N
inst✝²⁶ : Module S N
inst✝²⁵ : IsScalarTower R S N
f✝ : M →ₗ[R] N
h : IsBaseChange S f✝
P : Type u_2
Q : Type u_3
inst✝²⁴ : AddCommMonoid P
inst✝²³ : Module R P
inst✝²² : AddCommMonoid Q
inst✝²¹ : Module S Q
T : Type u_4
O : Type u_5
inst✝²⁰ : CommRing T
inst✝¹⁹ : Algebra R T
inst✝¹⁸ : Algebra S T
inst✝¹⁷ : IsScalarTower R S T
inst✝¹⁶ : AddCommMonoid O
inst✝¹⁵ : Module R O
inst✝¹⁴ : Module S O
inst✝¹³ : Module T O
inst✝¹² : IsScalarTower S T O
inst✝¹¹ : IsScalarTower R S O
inst✝¹⁰ : IsScalarTower R T O
R' : Type u_6
S' : Type u_7
inst✝⁹ : CommRing R'
inst✝⁸ : CommRing S'
inst✝⁷ : Algebra R R'
inst✝⁶ : Algebra S S'
inst✝⁵ : Algebra R' S'
inst✝⁴ : Algebra R S'
inst✝³ : IsScalarTower R R' S'
inst✝² : IsScalarTower R S S'
H : IsPushout R S R' S'
A : Type u_8
inst✝¹ : Semiring A
inst✝ : Algebra R A
f : S →ₐ[R] A
g : R' →ₐ[R] A
hf : ∀ (x : S) (y : R'), ↑f x * ↑g y = ↑g y * ↑f x
this✝² : Module S A := Module.compHom A ↑f
this✝¹ : IsScalarTower R S A
this✝ : IsScalarTower S A A
this :
∀ (x : R'),
↑(IsBaseChange.lift (_ : IsBaseChange S (AlgHom.toLinearMap (toAlgHom R R' S'))) (AlgHom.toLinearMap g))
(↑(algebraMap R' S') x) =
↑g x
⊢ S' →ₐ[R] A
[PROOFSTEP]
refine' AlgHom.ofLinearMap ((H.out.lift g.toLinearMap).restrictScalars R) _ _
[GOAL]
case refine'_1
R : Type u_1
M : Type v₁
N : Type v₂
S : Type v₃
inst✝³³ : AddCommMonoid M
inst✝³² : AddCommMonoid N
inst✝³¹ : CommRing R
inst✝³⁰ : CommRing S
inst✝²⁹ : Algebra R S
inst✝²⁸ : Module R M
inst✝²⁷ : Module R N
inst✝²⁶ : Module S N
inst✝²⁵ : IsScalarTower R S N
f✝ : M →ₗ[R] N
h : IsBaseChange S f✝
P : Type u_2
Q : Type u_3
inst✝²⁴ : AddCommMonoid P
inst✝²³ : Module R P
inst✝²² : AddCommMonoid Q
inst✝²¹ : Module S Q
T : Type u_4
O : Type u_5
inst✝²⁰ : CommRing T
inst✝¹⁹ : Algebra R T
inst✝¹⁸ : Algebra S T
inst✝¹⁷ : IsScalarTower R S T
inst✝¹⁶ : AddCommMonoid O
inst✝¹⁵ : Module R O
inst✝¹⁴ : Module S O
inst✝¹³ : Module T O
inst✝¹² : IsScalarTower S T O
inst✝¹¹ : IsScalarTower R S O
inst✝¹⁰ : IsScalarTower R T O
R' : Type u_6
S' : Type u_7
inst✝⁹ : CommRing R'
inst✝⁸ : CommRing S'
inst✝⁷ : Algebra R R'
inst✝⁶ : Algebra S S'
inst✝⁵ : Algebra R' S'
inst✝⁴ : Algebra R S'
inst✝³ : IsScalarTower R R' S'
inst✝² : IsScalarTower R S S'
H : IsPushout R S R' S'
A : Type u_8
inst✝¹ : Semiring A
inst✝ : Algebra R A
f : S →ₐ[R] A
g : R' →ₐ[R] A
hf : ∀ (x : S) (y : R'), ↑f x * ↑g y = ↑g y * ↑f x
this✝² : Module S A := Module.compHom A ↑f
this✝¹ : IsScalarTower R S A
this✝ : IsScalarTower S A A
this :
∀ (x : R'),
↑(IsBaseChange.lift (_ : IsBaseChange S (AlgHom.toLinearMap (toAlgHom R R' S'))) (AlgHom.toLinearMap g))
(↑(algebraMap R' S') x) =
↑g x
⊢ ↑(↑R (IsBaseChange.lift (_ : IsBaseChange S (AlgHom.toLinearMap (toAlgHom R R' S'))) (AlgHom.toLinearMap g))) 1 = 1
[PROOFSTEP]
dsimp only [LinearMap.restrictScalars_apply]
[GOAL]
case refine'_1
R : Type u_1
M : Type v₁
N : Type v₂
S : Type v₃
inst✝³³ : AddCommMonoid M
inst✝³² : AddCommMonoid N
inst✝³¹ : CommRing R
inst✝³⁰ : CommRing S
inst✝²⁹ : Algebra R S
inst✝²⁸ : Module R M
inst✝²⁷ : Module R N
inst✝²⁶ : Module S N
inst✝²⁵ : IsScalarTower R S N
f✝ : M →ₗ[R] N
h : IsBaseChange S f✝
P : Type u_2
Q : Type u_3
inst✝²⁴ : AddCommMonoid P
inst✝²³ : Module R P
inst✝²² : AddCommMonoid Q
inst✝²¹ : Module S Q
T : Type u_4
O : Type u_5
inst✝²⁰ : CommRing T
inst✝¹⁹ : Algebra R T
inst✝¹⁸ : Algebra S T
inst✝¹⁷ : IsScalarTower R S T
inst✝¹⁶ : AddCommMonoid O
inst✝¹⁵ : Module R O
inst✝¹⁴ : Module S O
inst✝¹³ : Module T O
inst✝¹² : IsScalarTower S T O
inst✝¹¹ : IsScalarTower R S O
inst✝¹⁰ : IsScalarTower R T O
R' : Type u_6
S' : Type u_7
inst✝⁹ : CommRing R'
inst✝⁸ : CommRing S'
inst✝⁷ : Algebra R R'
inst✝⁶ : Algebra S S'
inst✝⁵ : Algebra R' S'
inst✝⁴ : Algebra R S'
inst✝³ : IsScalarTower R R' S'
inst✝² : IsScalarTower R S S'
H : IsPushout R S R' S'
A : Type u_8
inst✝¹ : Semiring A
inst✝ : Algebra R A
f : S →ₐ[R] A
g : R' →ₐ[R] A
hf : ∀ (x : S) (y : R'), ↑f x * ↑g y = ↑g y * ↑f x
this✝² : Module S A := Module.compHom A ↑f
this✝¹ : IsScalarTower R S A
this✝ : IsScalarTower S A A
this :
∀ (x : R'),
↑(IsBaseChange.lift (_ : IsBaseChange S (AlgHom.toLinearMap (toAlgHom R R' S'))) (AlgHom.toLinearMap g))
(↑(algebraMap R' S') x) =
↑g x
⊢ ↑(IsBaseChange.lift (_ : IsBaseChange S (AlgHom.toLinearMap (toAlgHom R R' S'))) (AlgHom.toLinearMap g)) 1 = 1
[PROOFSTEP]
rw [← (algebraMap R' S').map_one, this, g.map_one]
[GOAL]
case refine'_2
R : Type u_1
M : Type v₁
N : Type v₂
S : Type v₃
inst✝³³ : AddCommMonoid M
inst✝³² : AddCommMonoid N
inst✝³¹ : CommRing R
inst✝³⁰ : CommRing S
inst✝²⁹ : Algebra R S
inst✝²⁸ : Module R M
inst✝²⁷ : Module R N
inst✝²⁶ : Module S N
inst✝²⁵ : IsScalarTower R S N
f✝ : M →ₗ[R] N
h : IsBaseChange S f✝
P : Type u_2
Q : Type u_3
inst✝²⁴ : AddCommMonoid P
inst✝²³ : Module R P
inst✝²² : AddCommMonoid Q
inst✝²¹ : Module S Q
T : Type u_4
O : Type u_5
inst✝²⁰ : CommRing T
inst✝¹⁹ : Algebra R T
inst✝¹⁸ : Algebra S T
inst✝¹⁷ : IsScalarTower R S T
inst✝¹⁶ : AddCommMonoid O
inst✝¹⁵ : Module R O
inst✝¹⁴ : Module S O
inst✝¹³ : Module T O
inst✝¹² : IsScalarTower S T O
inst✝¹¹ : IsScalarTower R S O
inst✝¹⁰ : IsScalarTower R T O
R' : Type u_6
S' : Type u_7
inst✝⁹ : CommRing R'
inst✝⁸ : CommRing S'
inst✝⁷ : Algebra R R'
inst✝⁶ : Algebra S S'
inst✝⁵ : Algebra R' S'
inst✝⁴ : Algebra R S'
inst✝³ : IsScalarTower R R' S'
inst✝² : IsScalarTower R S S'
H : IsPushout R S R' S'
A : Type u_8
inst✝¹ : Semiring A
inst✝ : Algebra R A
f : S →ₐ[R] A
g : R' →ₐ[R] A
hf : ∀ (x : S) (y : R'), ↑f x * ↑g y = ↑g y * ↑f x
this✝² : Module S A := Module.compHom A ↑f
this✝¹ : IsScalarTower R S A
this✝ : IsScalarTower S A A
this :
∀ (x : R'),
↑(IsBaseChange.lift (_ : IsBaseChange S (AlgHom.toLinearMap (toAlgHom R R' S'))) (AlgHom.toLinearMap g))
(↑(algebraMap R' S') x) =
↑g x
⊢ ∀ (x y : S'),
↑(↑R (IsBaseChange.lift (_ : IsBaseChange S (AlgHom.toLinearMap (toAlgHom R R' S'))) (AlgHom.toLinearMap g)))
(x * y) =
↑(↑R (IsBaseChange.lift (_ : IsBaseChange S (AlgHom.toLinearMap (toAlgHom R R' S'))) (AlgHom.toLinearMap g))) x *
↑(↑R (IsBaseChange.lift (_ : IsBaseChange S (AlgHom.toLinearMap (toAlgHom R R' S'))) (AlgHom.toLinearMap g))) y
[PROOFSTEP]
intro x y
[GOAL]
case refine'_2
R : Type u_1
M : Type v₁
N : Type v₂
S : Type v₃
inst✝³³ : AddCommMonoid M
inst✝³² : AddCommMonoid N
inst✝³¹ : CommRing R
inst✝³⁰ : CommRing S
inst✝²⁹ : Algebra R S
inst✝²⁸ : Module R M
inst✝²⁷ : Module R N
inst✝²⁶ : Module S N
inst✝²⁵ : IsScalarTower R S N
f✝ : M →ₗ[R] N
h : IsBaseChange S f✝
P : Type u_2
Q : Type u_3
inst✝²⁴ : AddCommMonoid P
inst✝²³ : Module R P
inst✝²² : AddCommMonoid Q
inst✝²¹ : Module S Q
T : Type u_4
O : Type u_5
inst✝²⁰ : CommRing T
inst✝¹⁹ : Algebra R T
inst✝¹⁸ : Algebra S T
inst✝¹⁷ : IsScalarTower R S T
inst✝¹⁶ : AddCommMonoid O
inst✝¹⁵ : Module R O
inst✝¹⁴ : Module S O
inst✝¹³ : Module T O
inst✝¹² : IsScalarTower S T O
inst✝¹¹ : IsScalarTower R S O
inst✝¹⁰ : IsScalarTower R T O
R' : Type u_6
S' : Type u_7
inst✝⁹ : CommRing R'
inst✝⁸ : CommRing S'
inst✝⁷ : Algebra R R'
inst✝⁶ : Algebra S S'
inst✝⁵ : Algebra R' S'
inst✝⁴ : Algebra R S'
inst✝³ : IsScalarTower R R' S'
inst✝² : IsScalarTower R S S'
H : IsPushout R S R' S'
A : Type u_8
inst✝¹ : Semiring A
inst✝ : Algebra R A
f : S →ₐ[R] A
g : R' →ₐ[R] A
hf : ∀ (x : S) (y : R'), ↑f x * ↑g y = ↑g y * ↑f x
this✝² : Module S A := Module.compHom A ↑f
this✝¹ : IsScalarTower R S A
this✝ : IsScalarTower S A A
this :
∀ (x : R'),
↑(IsBaseChange.lift (_ : IsBaseChange S (AlgHom.toLinearMap (toAlgHom R R' S'))) (AlgHom.toLinearMap g))
(↑(algebraMap R' S') x) =
↑g x
x y : S'
⊢ ↑(↑R (IsBaseChange.lift (_ : IsBaseChange S (AlgHom.toLinearMap (toAlgHom R R' S'))) (AlgHom.toLinearMap g)))
(x * y) =
↑(↑R (IsBaseChange.lift (_ : IsBaseChange S (AlgHom.toLinearMap (toAlgHom R R' S'))) (AlgHom.toLinearMap g))) x *
↑(↑R (IsBaseChange.lift (_ : IsBaseChange S (AlgHom.toLinearMap (toAlgHom R R' S'))) (AlgHom.toLinearMap g))) y
[PROOFSTEP]
refine H.out.inductionOn x ?_ ?_ ?_ ?_
[GOAL]
case refine'_2.refine_1
R : Type u_1
M : Type v₁
N : Type v₂
S : Type v₃
inst✝³³ : AddCommMonoid M
inst✝³² : AddCommMonoid N
inst✝³¹ : CommRing R
inst✝³⁰ : CommRing S
inst✝²⁹ : Algebra R S
inst✝²⁸ : Module R M
inst✝²⁷ : Module R N
inst✝²⁶ : Module S N
inst✝²⁵ : IsScalarTower R S N
f✝ : M →ₗ[R] N
h : IsBaseChange S f✝
P : Type u_2
Q : Type u_3
inst✝²⁴ : AddCommMonoid P
inst✝²³ : Module R P
inst✝²² : AddCommMonoid Q
inst✝²¹ : Module S Q
T : Type u_4
O : Type u_5
inst✝²⁰ : CommRing T
inst✝¹⁹ : Algebra R T
inst✝¹⁸ : Algebra S T
inst✝¹⁷ : IsScalarTower R S T
inst✝¹⁶ : AddCommMonoid O
inst✝¹⁵ : Module R O
inst✝¹⁴ : Module S O
inst✝¹³ : Module T O
inst✝¹² : IsScalarTower S T O
inst✝¹¹ : IsScalarTower R S O
inst✝¹⁰ : IsScalarTower R T O
R' : Type u_6
S' : Type u_7
inst✝⁹ : CommRing R'
inst✝⁸ : CommRing S'
inst✝⁷ : Algebra R R'
inst✝⁶ : Algebra S S'
inst✝⁵ : Algebra R' S'
inst✝⁴ : Algebra R S'
inst✝³ : IsScalarTower R R' S'
inst✝² : IsScalarTower R S S'
H : IsPushout R S R' S'
A : Type u_8
inst✝¹ : Semiring A
inst✝ : Algebra R A
f : S →ₐ[R] A
g : R' →ₐ[R] A
hf : ∀ (x : S) (y : R'), ↑f x * ↑g y = ↑g y * ↑f x
this✝² : Module S A := Module.compHom A ↑f
this✝¹ : IsScalarTower R S A
this✝ : IsScalarTower S A A
this :
∀ (x : R'),
↑(IsBaseChange.lift (_ : IsBaseChange S (AlgHom.toLinearMap (toAlgHom R R' S'))) (AlgHom.toLinearMap g))
(↑(algebraMap R' S') x) =
↑g x
x y : S'
⊢ ↑(↑R (IsBaseChange.lift (_ : IsBaseChange S (AlgHom.toLinearMap (toAlgHom R R' S'))) (AlgHom.toLinearMap g)))
(0 * y) =
↑(↑R (IsBaseChange.lift (_ : IsBaseChange S (AlgHom.toLinearMap (toAlgHom R R' S'))) (AlgHom.toLinearMap g))) 0 *
↑(↑R (IsBaseChange.lift (_ : IsBaseChange S (AlgHom.toLinearMap (toAlgHom R R' S'))) (AlgHom.toLinearMap g))) y
[PROOFSTEP]
rw [zero_mul, map_zero, zero_mul]
[GOAL]
case refine'_2.refine_2
R : Type u_1
M : Type v₁
N : Type v₂
S : Type v₃
inst✝³³ : AddCommMonoid M
inst✝³² : AddCommMonoid N
inst✝³¹ : CommRing R
inst✝³⁰ : CommRing S
inst✝²⁹ : Algebra R S
inst✝²⁸ : Module R M
inst✝²⁷ : Module R N
inst✝²⁶ : Module S N
inst✝²⁵ : IsScalarTower R S N
f✝ : M →ₗ[R] N
h : IsBaseChange S f✝
P : Type u_2
Q : Type u_3
inst✝²⁴ : AddCommMonoid P
inst✝²³ : Module R P
inst✝²² : AddCommMonoid Q
inst✝²¹ : Module S Q
T : Type u_4
O : Type u_5
inst✝²⁰ : CommRing T
inst✝¹⁹ : Algebra R T
inst✝¹⁸ : Algebra S T
inst✝¹⁷ : IsScalarTower R S T
inst✝¹⁶ : AddCommMonoid O
inst✝¹⁵ : Module R O
inst✝¹⁴ : Module S O
inst✝¹³ : Module T O
inst✝¹² : IsScalarTower S T O
inst✝¹¹ : IsScalarTower R S O
inst✝¹⁰ : IsScalarTower R T O
R' : Type u_6
S' : Type u_7
inst✝⁹ : CommRing R'
inst✝⁸ : CommRing S'
inst✝⁷ : Algebra R R'
inst✝⁶ : Algebra S S'
inst✝⁵ : Algebra R' S'
inst✝⁴ : Algebra R S'
inst✝³ : IsScalarTower R R' S'
inst✝² : IsScalarTower R S S'
H : IsPushout R S R' S'
A : Type u_8
inst✝¹ : Semiring A
inst✝ : Algebra R A
f : S →ₐ[R] A
g : R' →ₐ[R] A
hf : ∀ (x : S) (y : R'), ↑f x * ↑g y = ↑g y * ↑f x
this✝² : Module S A := Module.compHom A ↑f
this✝¹ : IsScalarTower R S A
this✝ : IsScalarTower S A A
this :
∀ (x : R'),
↑(IsBaseChange.lift (_ : IsBaseChange S (AlgHom.toLinearMap (toAlgHom R R' S'))) (AlgHom.toLinearMap g))
(↑(algebraMap R' S') x) =
↑g x
x y : S'
⊢ ∀ (m : R'),
↑(↑R (IsBaseChange.lift (_ : IsBaseChange S (AlgHom.toLinearMap (toAlgHom R R' S'))) (AlgHom.toLinearMap g)))
(↑(AlgHom.toLinearMap (toAlgHom R R' S')) m * y) =
↑(↑R (IsBaseChange.lift (_ : IsBaseChange S (AlgHom.toLinearMap (toAlgHom R R' S'))) (AlgHom.toLinearMap g)))
(↑(AlgHom.toLinearMap (toAlgHom R R' S')) m) *
↑(↑R (IsBaseChange.lift (_ : IsBaseChange S (AlgHom.toLinearMap (toAlgHom R R' S'))) (AlgHom.toLinearMap g))) y
case refine'_2.refine_3
R : Type u_1
M : Type v₁
N : Type v₂
S : Type v₃
inst✝³³ : AddCommMonoid M
inst✝³² : AddCommMonoid N
inst✝³¹ : CommRing R
inst✝³⁰ : CommRing S
inst✝²⁹ : Algebra R S
inst✝²⁸ : Module R M
inst✝²⁷ : Module R N
inst✝²⁶ : Module S N
inst✝²⁵ : IsScalarTower R S N
f✝ : M →ₗ[R] N
h : IsBaseChange S f✝
P : Type u_2
Q : Type u_3
inst✝²⁴ : AddCommMonoid P
inst✝²³ : Module R P
inst✝²² : AddCommMonoid Q
inst✝²¹ : Module S Q
T : Type u_4
O : Type u_5
inst✝²⁰ : CommRing T
inst✝¹⁹ : Algebra R T
inst✝¹⁸ : Algebra S T
inst✝¹⁷ : IsScalarTower R S T
inst✝¹⁶ : AddCommMonoid O
inst✝¹⁵ : Module R O
inst✝¹⁴ : Module S O
inst✝¹³ : Module T O
inst✝¹² : IsScalarTower S T O
inst✝¹¹ : IsScalarTower R S O
inst✝¹⁰ : IsScalarTower R T O
R' : Type u_6
S' : Type u_7
inst✝⁹ : CommRing R'
inst✝⁸ : CommRing S'
inst✝⁷ : Algebra R R'
inst✝⁶ : Algebra S S'
inst✝⁵ : Algebra R' S'
inst✝⁴ : Algebra R S'
inst✝³ : IsScalarTower R R' S'
inst✝² : IsScalarTower R S S'
H : IsPushout R S R' S'
A : Type u_8
inst✝¹ : Semiring A
inst✝ : Algebra R A
f : S →ₐ[R] A
g : R' →ₐ[R] A
hf : ∀ (x : S) (y : R'), ↑f x * ↑g y = ↑g y * ↑f x
this✝² : Module S A := Module.compHom A ↑f
this✝¹ : IsScalarTower R S A
this✝ : IsScalarTower S A A
this :
∀ (x : R'),
↑(IsBaseChange.lift (_ : IsBaseChange S (AlgHom.toLinearMap (toAlgHom R R' S'))) (AlgHom.toLinearMap g))
(↑(algebraMap R' S') x) =
↑g x
x y : S'
⊢ ∀ (s : S) (n : S'),
↑(↑R (IsBaseChange.lift (_ : IsBaseChange S (AlgHom.toLinearMap (toAlgHom R R' S'))) (AlgHom.toLinearMap g)))
(n * y) =
↑(↑R (IsBaseChange.lift (_ : IsBaseChange S (AlgHom.toLinearMap (toAlgHom R R' S'))) (AlgHom.toLinearMap g)))
n *
↑(↑R (IsBaseChange.lift (_ : IsBaseChange S (AlgHom.toLinearMap (toAlgHom R R' S'))) (AlgHom.toLinearMap g)))
y →
↑(↑R (IsBaseChange.lift (_ : IsBaseChange S (AlgHom.toLinearMap (toAlgHom R R' S'))) (AlgHom.toLinearMap g)))
(s • n * y) =
↑(↑R (IsBaseChange.lift (_ : IsBaseChange S (AlgHom.toLinearMap (toAlgHom R R' S'))) (AlgHom.toLinearMap g)))
(s • n) *
↑(↑R (IsBaseChange.lift (_ : IsBaseChange S (AlgHom.toLinearMap (toAlgHom R R' S'))) (AlgHom.toLinearMap g)))
y
case refine'_2.refine_4
R : Type u_1
M : Type v₁
N : Type v₂
S : Type v₃
inst✝³³ : AddCommMonoid M
inst✝³² : AddCommMonoid N
inst✝³¹ : CommRing R
inst✝³⁰ : CommRing S
inst✝²⁹ : Algebra R S
inst✝²⁸ : Module R M
inst✝²⁷ : Module R N
inst✝²⁶ : Module S N
inst✝²⁵ : IsScalarTower R S N
f✝ : M →ₗ[R] N
h : IsBaseChange S f✝
P : Type u_2
Q : Type u_3
inst✝²⁴ : AddCommMonoid P
inst✝²³ : Module R P
inst✝²² : AddCommMonoid Q
inst✝²¹ : Module S Q
T : Type u_4
O : Type u_5
inst✝²⁰ : CommRing T
inst✝¹⁹ : Algebra R T
inst✝¹⁸ : Algebra S T
inst✝¹⁷ : IsScalarTower R S T
inst✝¹⁶ : AddCommMonoid O
inst✝¹⁵ : Module R O
inst✝¹⁴ : Module S O
inst✝¹³ : Module T O
inst✝¹² : IsScalarTower S T O
inst✝¹¹ : IsScalarTower R S O
inst✝¹⁰ : IsScalarTower R T O
R' : Type u_6
S' : Type u_7
inst✝⁹ : CommRing R'
inst✝⁸ : CommRing S'
inst✝⁷ : Algebra R R'
inst✝⁶ : Algebra S S'
inst✝⁵ : Algebra R' S'
inst✝⁴ : Algebra R S'
inst✝³ : IsScalarTower R R' S'
inst✝² : IsScalarTower R S S'
H : IsPushout R S R' S'
A : Type u_8
inst✝¹ : Semiring A
inst✝ : Algebra R A
f : S →ₐ[R] A
g : R' →ₐ[R] A
hf : ∀ (x : S) (y : R'), ↑f x * ↑g y = ↑g y * ↑f x
this✝² : Module S A := Module.compHom A ↑f
this✝¹ : IsScalarTower R S A
this✝ : IsScalarTower S A A
this :
∀ (x : R'),
↑(IsBaseChange.lift (_ : IsBaseChange S (AlgHom.toLinearMap (toAlgHom R R' S'))) (AlgHom.toLinearMap g))
(↑(algebraMap R' S') x) =
↑g x
x y : S'
⊢ ∀ (n₁ n₂ : S'),
↑(↑R (IsBaseChange.lift (_ : IsBaseChange S (AlgHom.toLinearMap (toAlgHom R R' S'))) (AlgHom.toLinearMap g)))
(n₁ * y) =
↑(↑R (IsBaseChange.lift (_ : IsBaseChange S (AlgHom.toLinearMap (toAlgHom R R' S'))) (AlgHom.toLinearMap g)))
n₁ *
↑(↑R (IsBaseChange.lift (_ : IsBaseChange S (AlgHom.toLinearMap (toAlgHom R R' S'))) (AlgHom.toLinearMap g)))
y →
↑(↑R (IsBaseChange.lift (_ : IsBaseChange S (AlgHom.toLinearMap (toAlgHom R R' S'))) (AlgHom.toLinearMap g)))
(n₂ * y) =
↑(↑R (IsBaseChange.lift (_ : IsBaseChange S (AlgHom.toLinearMap (toAlgHom R R' S'))) (AlgHom.toLinearMap g)))
n₂ *
↑(↑R
(IsBaseChange.lift (_ : IsBaseChange S (AlgHom.toLinearMap (toAlgHom R R' S')))
(AlgHom.toLinearMap g)))
y →
↑(↑R (IsBaseChange.lift (_ : IsBaseChange S (AlgHom.toLinearMap (toAlgHom R R' S'))) (AlgHom.toLinearMap g)))
((n₁ + n₂) * y) =
↑(↑R (IsBaseChange.lift (_ : IsBaseChange S (AlgHom.toLinearMap (toAlgHom R R' S'))) (AlgHom.toLinearMap g)))
(n₁ + n₂) *
↑(↑R
(IsBaseChange.lift (_ : IsBaseChange S (AlgHom.toLinearMap (toAlgHom R R' S')))
(AlgHom.toLinearMap g)))
y
[PROOFSTEP]
rotate_left
[GOAL]
case refine'_2.refine_3
R : Type u_1
M : Type v₁
N : Type v₂
S : Type v₃
inst✝³³ : AddCommMonoid M
inst✝³² : AddCommMonoid N
inst✝³¹ : CommRing R
inst✝³⁰ : CommRing S
inst✝²⁹ : Algebra R S
inst✝²⁸ : Module R M
inst✝²⁷ : Module R N
inst✝²⁶ : Module S N
inst✝²⁵ : IsScalarTower R S N
f✝ : M →ₗ[R] N
h : IsBaseChange S f✝
P : Type u_2
Q : Type u_3
inst✝²⁴ : AddCommMonoid P
inst✝²³ : Module R P
inst✝²² : AddCommMonoid Q
inst✝²¹ : Module S Q
T : Type u_4
O : Type u_5
inst✝²⁰ : CommRing T
inst✝¹⁹ : Algebra R T
inst✝¹⁸ : Algebra S T
inst✝¹⁷ : IsScalarTower R S T
inst✝¹⁶ : AddCommMonoid O
inst✝¹⁵ : Module R O
inst✝¹⁴ : Module S O
inst✝¹³ : Module T O
inst✝¹² : IsScalarTower S T O
inst✝¹¹ : IsScalarTower R S O
inst✝¹⁰ : IsScalarTower R T O
R' : Type u_6
S' : Type u_7
inst✝⁹ : CommRing R'
inst✝⁸ : CommRing S'
inst✝⁷ : Algebra R R'
inst✝⁶ : Algebra S S'
inst✝⁵ : Algebra R' S'
inst✝⁴ : Algebra R S'
inst✝³ : IsScalarTower R R' S'
inst✝² : IsScalarTower R S S'
H : IsPushout R S R' S'
A : Type u_8
inst✝¹ : Semiring A
inst✝ : Algebra R A
f : S →ₐ[R] A
g : R' →ₐ[R] A
hf : ∀ (x : S) (y : R'), ↑f x * ↑g y = ↑g y * ↑f x
this✝² : Module S A := Module.compHom A ↑f
this✝¹ : IsScalarTower R S A
this✝ : IsScalarTower S A A
this :
∀ (x : R'),
↑(IsBaseChange.lift (_ : IsBaseChange S (AlgHom.toLinearMap (toAlgHom R R' S'))) (AlgHom.toLinearMap g))
(↑(algebraMap R' S') x) =
↑g x
x y : S'
⊢ ∀ (s : S) (n : S'),
↑(↑R (IsBaseChange.lift (_ : IsBaseChange S (AlgHom.toLinearMap (toAlgHom R R' S'))) (AlgHom.toLinearMap g)))
(n * y) =
↑(↑R (IsBaseChange.lift (_ : IsBaseChange S (AlgHom.toLinearMap (toAlgHom R R' S'))) (AlgHom.toLinearMap g)))
n *
↑(↑R (IsBaseChange.lift (_ : IsBaseChange S (AlgHom.toLinearMap (toAlgHom R R' S'))) (AlgHom.toLinearMap g)))
y →
↑(↑R (IsBaseChange.lift (_ : IsBaseChange S (AlgHom.toLinearMap (toAlgHom R R' S'))) (AlgHom.toLinearMap g)))
(s • n * y) =
↑(↑R (IsBaseChange.lift (_ : IsBaseChange S (AlgHom.toLinearMap (toAlgHom R R' S'))) (AlgHom.toLinearMap g)))
(s • n) *
↑(↑R (IsBaseChange.lift (_ : IsBaseChange S (AlgHom.toLinearMap (toAlgHom R R' S'))) (AlgHom.toLinearMap g)))
y
[PROOFSTEP]
intro s s' e
[GOAL]
case refine'_2.refine_3
R : Type u_1
M : Type v₁
N : Type v₂
S : Type v₃
inst✝³³ : AddCommMonoid M
inst✝³² : AddCommMonoid N
inst✝³¹ : CommRing R
inst✝³⁰ : CommRing S
inst✝²⁹ : Algebra R S
inst✝²⁸ : Module R M
inst✝²⁷ : Module R N
inst✝²⁶ : Module S N
inst✝²⁵ : IsScalarTower R S N
f✝ : M →ₗ[R] N
h : IsBaseChange S f✝
P : Type u_2
Q : Type u_3
inst✝²⁴ : AddCommMonoid P
inst✝²³ : Module R P
inst✝²² : AddCommMonoid Q
inst✝²¹ : Module S Q
T : Type u_4
O : Type u_5
inst✝²⁰ : CommRing T
inst✝¹⁹ : Algebra R T
inst✝¹⁸ : Algebra S T
inst✝¹⁷ : IsScalarTower R S T
inst✝¹⁶ : AddCommMonoid O
inst✝¹⁵ : Module R O
inst✝¹⁴ : Module S O
inst✝¹³ : Module T O
inst✝¹² : IsScalarTower S T O
inst✝¹¹ : IsScalarTower R S O
inst✝¹⁰ : IsScalarTower R T O
R' : Type u_6
S' : Type u_7
inst✝⁹ : CommRing R'
inst✝⁸ : CommRing S'
inst✝⁷ : Algebra R R'
inst✝⁶ : Algebra S S'
inst✝⁵ : Algebra R' S'
inst✝⁴ : Algebra R S'
inst✝³ : IsScalarTower R R' S'
inst✝² : IsScalarTower R S S'
H : IsPushout R S R' S'
A : Type u_8
inst✝¹ : Semiring A
inst✝ : Algebra R A
f : S →ₐ[R] A
g : R' →ₐ[R] A
hf : ∀ (x : S) (y : R'), ↑f x * ↑g y = ↑g y * ↑f x
this✝² : Module S A := Module.compHom A ↑f
this✝¹ : IsScalarTower R S A
this✝ : IsScalarTower S A A
this :
∀ (x : R'),
↑(IsBaseChange.lift (_ : IsBaseChange S (AlgHom.toLinearMap (toAlgHom R R' S'))) (AlgHom.toLinearMap g))
(↑(algebraMap R' S') x) =
↑g x
x y : S'
s : S
s' : S'
e :
↑(↑R (IsBaseChange.lift (_ : IsBaseChange S (AlgHom.toLinearMap (toAlgHom R R' S'))) (AlgHom.toLinearMap g)))
(s' * y) =
↑(↑R (IsBaseChange.lift (_ : IsBaseChange S (AlgHom.toLinearMap (toAlgHom R R' S'))) (AlgHom.toLinearMap g))) s' *
↑(↑R (IsBaseChange.lift (_ : IsBaseChange S (AlgHom.toLinearMap (toAlgHom R R' S'))) (AlgHom.toLinearMap g))) y
⊢ ↑(↑R (IsBaseChange.lift (_ : IsBaseChange S (AlgHom.toLinearMap (toAlgHom R R' S'))) (AlgHom.toLinearMap g)))
(s • s' * y) =
↑(↑R (IsBaseChange.lift (_ : IsBaseChange S (AlgHom.toLinearMap (toAlgHom R R' S'))) (AlgHom.toLinearMap g)))
(s • s') *
↑(↑R (IsBaseChange.lift (_ : IsBaseChange S (AlgHom.toLinearMap (toAlgHom R R' S'))) (AlgHom.toLinearMap g))) y
[PROOFSTEP]
dsimp only [LinearMap.restrictScalars_apply] at e ⊢
[GOAL]
case refine'_2.refine_3
R : Type u_1
M : Type v₁
N : Type v₂
S : Type v₃
inst✝³³ : AddCommMonoid M
inst✝³² : AddCommMonoid N
inst✝³¹ : CommRing R
inst✝³⁰ : CommRing S
inst✝²⁹ : Algebra R S
inst✝²⁸ : Module R M
inst✝²⁷ : Module R N
inst✝²⁶ : Module S N
inst✝²⁵ : IsScalarTower R S N
f✝ : M →ₗ[R] N
h : IsBaseChange S f✝
P : Type u_2
Q : Type u_3
inst✝²⁴ : AddCommMonoid P
inst✝²³ : Module R P
inst✝²² : AddCommMonoid Q
inst✝²¹ : Module S Q
T : Type u_4
O : Type u_5
inst✝²⁰ : CommRing T
inst✝¹⁹ : Algebra R T
inst✝¹⁸ : Algebra S T
inst✝¹⁷ : IsScalarTower R S T
inst✝¹⁶ : AddCommMonoid O
inst✝¹⁵ : Module R O
inst✝¹⁴ : Module S O
inst✝¹³ : Module T O
inst✝¹² : IsScalarTower S T O
inst✝¹¹ : IsScalarTower R S O
inst✝¹⁰ : IsScalarTower R T O
R' : Type u_6
S' : Type u_7
inst✝⁹ : CommRing R'
inst✝⁸ : CommRing S'
inst✝⁷ : Algebra R R'
inst✝⁶ : Algebra S S'
inst✝⁵ : Algebra R' S'
inst✝⁴ : Algebra R S'
inst✝³ : IsScalarTower R R' S'
inst✝² : IsScalarTower R S S'
H : IsPushout R S R' S'
A : Type u_8
inst✝¹ : Semiring A
inst✝ : Algebra R A
f : S →ₐ[R] A
g : R' →ₐ[R] A
hf : ∀ (x : S) (y : R'), ↑f x * ↑g y = ↑g y * ↑f x
this✝² : Module S A := Module.compHom A ↑f
this✝¹ : IsScalarTower R S A
this✝ : IsScalarTower S A A
this :
∀ (x : R'),
↑(IsBaseChange.lift (_ : IsBaseChange S (AlgHom.toLinearMap (toAlgHom R R' S'))) (AlgHom.toLinearMap g))
(↑(algebraMap R' S') x) =
↑g x
x y : S'
s : S
s' : S'
e :
↑(IsBaseChange.lift (_ : IsBaseChange S (AlgHom.toLinearMap (toAlgHom R R' S'))) (AlgHom.toLinearMap g)) (s' * y) =
↑(IsBaseChange.lift (_ : IsBaseChange S (AlgHom.toLinearMap (toAlgHom R R' S'))) (AlgHom.toLinearMap g)) s' *
↑(IsBaseChange.lift (_ : IsBaseChange S (AlgHom.toLinearMap (toAlgHom R R' S'))) (AlgHom.toLinearMap g)) y
⊢ ↑(IsBaseChange.lift (_ : IsBaseChange S (AlgHom.toLinearMap (toAlgHom R R' S'))) (AlgHom.toLinearMap g))
(s • s' * y) =
↑(IsBaseChange.lift (_ : IsBaseChange S (AlgHom.toLinearMap (toAlgHom R R' S'))) (AlgHom.toLinearMap g)) (s • s') *
↑(IsBaseChange.lift (_ : IsBaseChange S (AlgHom.toLinearMap (toAlgHom R R' S'))) (AlgHom.toLinearMap g)) y
[PROOFSTEP]
rw [LinearMap.map_smul, smul_mul_assoc, LinearMap.map_smul, e, smul_mul_assoc]
[GOAL]
case refine'_2.refine_4
R : Type u_1
M : Type v₁
N : Type v₂
S : Type v₃
inst✝³³ : AddCommMonoid M
inst✝³² : AddCommMonoid N
inst✝³¹ : CommRing R
inst✝³⁰ : CommRing S
inst✝²⁹ : Algebra R S
inst✝²⁸ : Module R M
inst✝²⁷ : Module R N
inst✝²⁶ : Module S N
inst✝²⁵ : IsScalarTower R S N
f✝ : M →ₗ[R] N
h : IsBaseChange S f✝
P : Type u_2
Q : Type u_3
inst✝²⁴ : AddCommMonoid P
inst✝²³ : Module R P
inst✝²² : AddCommMonoid Q
inst✝²¹ : Module S Q
T : Type u_4
O : Type u_5
inst✝²⁰ : CommRing T
inst✝¹⁹ : Algebra R T
inst✝¹⁸ : Algebra S T
inst✝¹⁷ : IsScalarTower R S T
inst✝¹⁶ : AddCommMonoid O
inst✝¹⁵ : Module R O
inst✝¹⁴ : Module S O
inst✝¹³ : Module T O
inst✝¹² : IsScalarTower S T O
inst✝¹¹ : IsScalarTower R S O
inst✝¹⁰ : IsScalarTower R T O
R' : Type u_6
S' : Type u_7
inst✝⁹ : CommRing R'
inst✝⁸ : CommRing S'
inst✝⁷ : Algebra R R'
inst✝⁶ : Algebra S S'
inst✝⁵ : Algebra R' S'
inst✝⁴ : Algebra R S'
inst✝³ : IsScalarTower R R' S'
inst✝² : IsScalarTower R S S'
H : IsPushout R S R' S'
A : Type u_8
inst✝¹ : Semiring A
inst✝ : Algebra R A
f : S →ₐ[R] A
g : R' →ₐ[R] A
hf : ∀ (x : S) (y : R'), ↑f x * ↑g y = ↑g y * ↑f x
this✝² : Module S A := Module.compHom A ↑f
this✝¹ : IsScalarTower R S A
this✝ : IsScalarTower S A A
this :
∀ (x : R'),
↑(IsBaseChange.lift (_ : IsBaseChange S (AlgHom.toLinearMap (toAlgHom R R' S'))) (AlgHom.toLinearMap g))
(↑(algebraMap R' S') x) =
↑g x
x y : S'
⊢ ∀ (n₁ n₂ : S'),
↑(↑R (IsBaseChange.lift (_ : IsBaseChange S (AlgHom.toLinearMap (toAlgHom R R' S'))) (AlgHom.toLinearMap g)))
(n₁ * y) =
↑(↑R (IsBaseChange.lift (_ : IsBaseChange S (AlgHom.toLinearMap (toAlgHom R R' S'))) (AlgHom.toLinearMap g)))
n₁ *
↑(↑R (IsBaseChange.lift (_ : IsBaseChange S (AlgHom.toLinearMap (toAlgHom R R' S'))) (AlgHom.toLinearMap g)))
y →
↑(↑R (IsBaseChange.lift (_ : IsBaseChange S (AlgHom.toLinearMap (toAlgHom R R' S'))) (AlgHom.toLinearMap g)))
(n₂ * y) =
↑(↑R (IsBaseChange.lift (_ : IsBaseChange S (AlgHom.toLinearMap (toAlgHom R R' S'))) (AlgHom.toLinearMap g)))
n₂ *
↑(↑R
(IsBaseChange.lift (_ : IsBaseChange S (AlgHom.toLinearMap (toAlgHom R R' S')))
(AlgHom.toLinearMap g)))
y →
↑(↑R (IsBaseChange.lift (_ : IsBaseChange S (AlgHom.toLinearMap (toAlgHom R R' S'))) (AlgHom.toLinearMap g)))
((n₁ + n₂) * y) =
↑(↑R (IsBaseChange.lift (_ : IsBaseChange S (AlgHom.toLinearMap (toAlgHom R R' S'))) (AlgHom.toLinearMap g)))
(n₁ + n₂) *
↑(↑R
(IsBaseChange.lift (_ : IsBaseChange S (AlgHom.toLinearMap (toAlgHom R R' S')))
(AlgHom.toLinearMap g)))
y
[PROOFSTEP]
intro s s' e₁ e₂
[GOAL]
case refine'_2.refine_4
R : Type u_1
M : Type v₁
N : Type v₂
S : Type v₃
inst✝³³ : AddCommMonoid M
inst✝³² : AddCommMonoid N
inst✝³¹ : CommRing R
inst✝³⁰ : CommRing S
inst✝²⁹ : Algebra R S
inst✝²⁸ : Module R M
inst✝²⁷ : Module R N
inst✝²⁶ : Module S N
inst✝²⁵ : IsScalarTower R S N
f✝ : M →ₗ[R] N
h : IsBaseChange S f✝
P : Type u_2
Q : Type u_3
inst✝²⁴ : AddCommMonoid P
inst✝²³ : Module R P
inst✝²² : AddCommMonoid Q
inst✝²¹ : Module S Q
T : Type u_4
O : Type u_5
inst✝²⁰ : CommRing T
inst✝¹⁹ : Algebra R T
inst✝¹⁸ : Algebra S T
inst✝¹⁷ : IsScalarTower R S T
inst✝¹⁶ : AddCommMonoid O
inst✝¹⁵ : Module R O
inst✝¹⁴ : Module S O
inst✝¹³ : Module T O
inst✝¹² : IsScalarTower S T O
inst✝¹¹ : IsScalarTower R S O
inst✝¹⁰ : IsScalarTower R T O
R' : Type u_6
S' : Type u_7
inst✝⁹ : CommRing R'
inst✝⁸ : CommRing S'
inst✝⁷ : Algebra R R'
inst✝⁶ : Algebra S S'
inst✝⁵ : Algebra R' S'
inst✝⁴ : Algebra R S'
inst✝³ : IsScalarTower R R' S'
inst✝² : IsScalarTower R S S'
H : IsPushout R S R' S'
A : Type u_8
inst✝¹ : Semiring A
inst✝ : Algebra R A
f : S →ₐ[R] A
g : R' →ₐ[R] A
hf : ∀ (x : S) (y : R'), ↑f x * ↑g y = ↑g y * ↑f x
this✝² : Module S A := Module.compHom A ↑f
this✝¹ : IsScalarTower R S A
this✝ : IsScalarTower S A A
this :
∀ (x : R'),
↑(IsBaseChange.lift (_ : IsBaseChange S (AlgHom.toLinearMap (toAlgHom R R' S'))) (AlgHom.toLinearMap g))
(↑(algebraMap R' S') x) =
↑g x
x y s s' : S'
e₁ :
↑(↑R (IsBaseChange.lift (_ : IsBaseChange S (AlgHom.toLinearMap (toAlgHom R R' S'))) (AlgHom.toLinearMap g)))
(s * y) =
↑(↑R (IsBaseChange.lift (_ : IsBaseChange S (AlgHom.toLinearMap (toAlgHom R R' S'))) (AlgHom.toLinearMap g))) s *
↑(↑R (IsBaseChange.lift (_ : IsBaseChange S (AlgHom.toLinearMap (toAlgHom R R' S'))) (AlgHom.toLinearMap g))) y
e₂ :
↑(↑R (IsBaseChange.lift (_ : IsBaseChange S (AlgHom.toLinearMap (toAlgHom R R' S'))) (AlgHom.toLinearMap g)))
(s' * y) =
↑(↑R (IsBaseChange.lift (_ : IsBaseChange S (AlgHom.toLinearMap (toAlgHom R R' S'))) (AlgHom.toLinearMap g))) s' *
↑(↑R (IsBaseChange.lift (_ : IsBaseChange S (AlgHom.toLinearMap (toAlgHom R R' S'))) (AlgHom.toLinearMap g))) y
⊢ ↑(↑R (IsBaseChange.lift (_ : IsBaseChange S (AlgHom.toLinearMap (toAlgHom R R' S'))) (AlgHom.toLinearMap g)))
((s + s') * y) =
↑(↑R (IsBaseChange.lift (_ : IsBaseChange S (AlgHom.toLinearMap (toAlgHom R R' S'))) (AlgHom.toLinearMap g)))
(s + s') *
↑(↑R (IsBaseChange.lift (_ : IsBaseChange S (AlgHom.toLinearMap (toAlgHom R R' S'))) (AlgHom.toLinearMap g))) y
[PROOFSTEP]
dsimp only [LinearMap.restrictScalars_apply] at e₁ e₂ ⊢
[GOAL]
case refine'_2.refine_4
R : Type u_1
M : Type v₁
N : Type v₂
S : Type v₃
inst✝³³ : AddCommMonoid M
inst✝³² : AddCommMonoid N
inst✝³¹ : CommRing R
inst✝³⁰ : CommRing S
inst✝²⁹ : Algebra R S
inst✝²⁸ : Module R M
inst✝²⁷ : Module R N
inst✝²⁶ : Module S N
inst✝²⁵ : IsScalarTower R S N
f✝ : M →ₗ[R] N
h : IsBaseChange S f✝
P : Type u_2
Q : Type u_3
inst✝²⁴ : AddCommMonoid P
inst✝²³ : Module R P
inst✝²² : AddCommMonoid Q
inst✝²¹ : Module S Q
T : Type u_4
O : Type u_5
inst✝²⁰ : CommRing T
inst✝¹⁹ : Algebra R T
inst✝¹⁸ : Algebra S T
inst✝¹⁷ : IsScalarTower R S T
inst✝¹⁶ : AddCommMonoid O
inst✝¹⁵ : Module R O
inst✝¹⁴ : Module S O
inst✝¹³ : Module T O
inst✝¹² : IsScalarTower S T O
inst✝¹¹ : IsScalarTower R S O
inst✝¹⁰ : IsScalarTower R T O
R' : Type u_6
S' : Type u_7
inst✝⁹ : CommRing R'
inst✝⁸ : CommRing S'
inst✝⁷ : Algebra R R'
inst✝⁶ : Algebra S S'
inst✝⁵ : Algebra R' S'
inst✝⁴ : Algebra R S'
inst✝³ : IsScalarTower R R' S'
inst✝² : IsScalarTower R S S'
H : IsPushout R S R' S'
A : Type u_8
inst✝¹ : Semiring A
inst✝ : Algebra R A
f : S →ₐ[R] A
g : R' →ₐ[R] A
hf : ∀ (x : S) (y : R'), ↑f x * ↑g y = ↑g y * ↑f x
this✝² : Module S A := Module.compHom A ↑f
this✝¹ : IsScalarTower R S A
this✝ : IsScalarTower S A A
this :
∀ (x : R'),
↑(IsBaseChange.lift (_ : IsBaseChange S (AlgHom.toLinearMap (toAlgHom R R' S'))) (AlgHom.toLinearMap g))
(↑(algebraMap R' S') x) =
↑g x
x y s s' : S'
e₁ :
↑(IsBaseChange.lift (_ : IsBaseChange S (AlgHom.toLinearMap (toAlgHom R R' S'))) (AlgHom.toLinearMap g)) (s * y) =
↑(IsBaseChange.lift (_ : IsBaseChange S (AlgHom.toLinearMap (toAlgHom R R' S'))) (AlgHom.toLinearMap g)) s *
↑(IsBaseChange.lift (_ : IsBaseChange S (AlgHom.toLinearMap (toAlgHom R R' S'))) (AlgHom.toLinearMap g)) y
e₂ :
↑(IsBaseChange.lift (_ : IsBaseChange S (AlgHom.toLinearMap (toAlgHom R R' S'))) (AlgHom.toLinearMap g)) (s' * y) =
↑(IsBaseChange.lift (_ : IsBaseChange S (AlgHom.toLinearMap (toAlgHom R R' S'))) (AlgHom.toLinearMap g)) s' *
↑(IsBaseChange.lift (_ : IsBaseChange S (AlgHom.toLinearMap (toAlgHom R R' S'))) (AlgHom.toLinearMap g)) y
⊢ ↑(IsBaseChange.lift (_ : IsBaseChange S (AlgHom.toLinearMap (toAlgHom R R' S'))) (AlgHom.toLinearMap g))
((s + s') * y) =
↑(IsBaseChange.lift (_ : IsBaseChange S (AlgHom.toLinearMap (toAlgHom R R' S'))) (AlgHom.toLinearMap g)) (s + s') *
↑(IsBaseChange.lift (_ : IsBaseChange S (AlgHom.toLinearMap (toAlgHom R R' S'))) (AlgHom.toLinearMap g)) y
[PROOFSTEP]
rw [add_mul, map_add, map_add, add_mul, e₁, e₂]
[GOAL]
case refine'_2.refine_2
R : Type u_1
M : Type v₁
N : Type v₂
S : Type v₃
inst✝³³ : AddCommMonoid M
inst✝³² : AddCommMonoid N
inst✝³¹ : CommRing R
inst✝³⁰ : CommRing S
inst✝²⁹ : Algebra R S
inst✝²⁸ : Module R M
inst✝²⁷ : Module R N
inst✝²⁶ : Module S N
inst✝²⁵ : IsScalarTower R S N
f✝ : M →ₗ[R] N
h : IsBaseChange S f✝
P : Type u_2
Q : Type u_3
inst✝²⁴ : AddCommMonoid P
inst✝²³ : Module R P
inst✝²² : AddCommMonoid Q
inst✝²¹ : Module S Q
T : Type u_4
O : Type u_5
inst✝²⁰ : CommRing T
inst✝¹⁹ : Algebra R T
inst✝¹⁸ : Algebra S T
inst✝¹⁷ : IsScalarTower R S T
inst✝¹⁶ : AddCommMonoid O
inst✝¹⁵ : Module R O
inst✝¹⁴ : Module S O
inst✝¹³ : Module T O
inst✝¹² : IsScalarTower S T O
inst✝¹¹ : IsScalarTower R S O
inst✝¹⁰ : IsScalarTower R T O
R' : Type u_6
S' : Type u_7
inst✝⁹ : CommRing R'
inst✝⁸ : CommRing S'
inst✝⁷ : Algebra R R'
inst✝⁶ : Algebra S S'
inst✝⁵ : Algebra R' S'
inst✝⁴ : Algebra R S'
inst✝³ : IsScalarTower R R' S'
inst✝² : IsScalarTower R S S'
H : IsPushout R S R' S'
A : Type u_8
inst✝¹ : Semiring A
inst✝ : Algebra R A
f : S →ₐ[R] A
g : R' →ₐ[R] A
hf : ∀ (x : S) (y : R'), ↑f x * ↑g y = ↑g y * ↑f x
this✝² : Module S A := Module.compHom A ↑f
this✝¹ : IsScalarTower R S A
this✝ : IsScalarTower S A A
this :
∀ (x : R'),
↑(IsBaseChange.lift (_ : IsBaseChange S (AlgHom.toLinearMap (toAlgHom R R' S'))) (AlgHom.toLinearMap g))
(↑(algebraMap R' S') x) =
↑g x
x y : S'
⊢ ∀ (m : R'),
↑(↑R (IsBaseChange.lift (_ : IsBaseChange S (AlgHom.toLinearMap (toAlgHom R R' S'))) (AlgHom.toLinearMap g)))
(↑(AlgHom.toLinearMap (toAlgHom R R' S')) m * y) =
↑(↑R (IsBaseChange.lift (_ : IsBaseChange S (AlgHom.toLinearMap (toAlgHom R R' S'))) (AlgHom.toLinearMap g)))
(↑(AlgHom.toLinearMap (toAlgHom R R' S')) m) *
↑(↑R (IsBaseChange.lift (_ : IsBaseChange S (AlgHom.toLinearMap (toAlgHom R R' S'))) (AlgHom.toLinearMap g))) y
[PROOFSTEP]
intro x
[GOAL]
case refine'_2.refine_2
R : Type u_1
M : Type v₁
N : Type v₂
S : Type v₃
inst✝³³ : AddCommMonoid M
inst✝³² : AddCommMonoid N
inst✝³¹ : CommRing R
inst✝³⁰ : CommRing S
inst✝²⁹ : Algebra R S
inst✝²⁸ : Module R M
inst✝²⁷ : Module R N
inst✝²⁶ : Module S N
inst✝²⁵ : IsScalarTower R S N
f✝ : M →ₗ[R] N
h : IsBaseChange S f✝
P : Type u_2
Q : Type u_3
inst✝²⁴ : AddCommMonoid P
inst✝²³ : Module R P
inst✝²² : AddCommMonoid Q
inst✝²¹ : Module S Q
T : Type u_4
O : Type u_5
inst✝²⁰ : CommRing T
inst✝¹⁹ : Algebra R T
inst✝¹⁸ : Algebra S T
inst✝¹⁷ : IsScalarTower R S T
inst✝¹⁶ : AddCommMonoid O
inst✝¹⁵ : Module R O
inst✝¹⁴ : Module S O
inst✝¹³ : Module T O
inst✝¹² : IsScalarTower S T O
inst✝¹¹ : IsScalarTower R S O
inst✝¹⁰ : IsScalarTower R T O
R' : Type u_6
S' : Type u_7
inst✝⁹ : CommRing R'
inst✝⁸ : CommRing S'
inst✝⁷ : Algebra R R'
inst✝⁶ : Algebra S S'
inst✝⁵ : Algebra R' S'
inst✝⁴ : Algebra R S'
inst✝³ : IsScalarTower R R' S'
inst✝² : IsScalarTower R S S'
H : IsPushout R S R' S'
A : Type u_8
inst✝¹ : Semiring A
inst✝ : Algebra R A
f : S →ₐ[R] A
g : R' →ₐ[R] A
hf : ∀ (x : S) (y : R'), ↑f x * ↑g y = ↑g y * ↑f x
this✝² : Module S A := Module.compHom A ↑f
this✝¹ : IsScalarTower R S A
this✝ : IsScalarTower S A A
this :
∀ (x : R'),
↑(IsBaseChange.lift (_ : IsBaseChange S (AlgHom.toLinearMap (toAlgHom R R' S'))) (AlgHom.toLinearMap g))
(↑(algebraMap R' S') x) =
↑g x
x✝ y : S'
x : R'
⊢ ↑(↑R (IsBaseChange.lift (_ : IsBaseChange S (AlgHom.toLinearMap (toAlgHom R R' S'))) (AlgHom.toLinearMap g)))
(↑(AlgHom.toLinearMap (toAlgHom R R' S')) x * y) =
↑(↑R (IsBaseChange.lift (_ : IsBaseChange S (AlgHom.toLinearMap (toAlgHom R R' S'))) (AlgHom.toLinearMap g)))
(↑(AlgHom.toLinearMap (toAlgHom R R' S')) x) *
↑(↑R (IsBaseChange.lift (_ : IsBaseChange S (AlgHom.toLinearMap (toAlgHom R R' S'))) (AlgHom.toLinearMap g))) y
[PROOFSTEP]
dsimp
[GOAL]
case refine'_2.refine_2
R : Type u_1
M : Type v₁
N : Type v₂
S : Type v₃
inst✝³³ : AddCommMonoid M
inst✝³² : AddCommMonoid N
inst✝³¹ : CommRing R
inst✝³⁰ : CommRing S
inst✝²⁹ : Algebra R S
inst✝²⁸ : Module R M
inst✝²⁷ : Module R N
inst✝²⁶ : Module S N
inst✝²⁵ : IsScalarTower R S N
f✝ : M →ₗ[R] N
h : IsBaseChange S f✝
P : Type u_2
Q : Type u_3
inst✝²⁴ : AddCommMonoid P
inst✝²³ : Module R P
inst✝²² : AddCommMonoid Q
inst✝²¹ : Module S Q
T : Type u_4
O : Type u_5
inst✝²⁰ : CommRing T
inst✝¹⁹ : Algebra R T
inst✝¹⁸ : Algebra S T
inst✝¹⁷ : IsScalarTower R S T
inst✝¹⁶ : AddCommMonoid O
inst✝¹⁵ : Module R O
inst✝¹⁴ : Module S O
inst✝¹³ : Module T O
inst✝¹² : IsScalarTower S T O
inst✝¹¹ : IsScalarTower R S O
inst✝¹⁰ : IsScalarTower R T O
R' : Type u_6
S' : Type u_7
inst✝⁹ : CommRing R'
inst✝⁸ : CommRing S'
inst✝⁷ : Algebra R R'
inst✝⁶ : Algebra S S'
inst✝⁵ : Algebra R' S'
inst✝⁴ : Algebra R S'
inst✝³ : IsScalarTower R R' S'
inst✝² : IsScalarTower R S S'
H : IsPushout R S R' S'
A : Type u_8
inst✝¹ : Semiring A
inst✝ : Algebra R A
f : S →ₐ[R] A
g : R' →ₐ[R] A
hf : ∀ (x : S) (y : R'), ↑f x * ↑g y = ↑g y * ↑f x
this✝² : Module S A := Module.compHom A ↑f
this✝¹ : IsScalarTower R S A
this✝ : IsScalarTower S A A
this :
∀ (x : R'),
↑(IsBaseChange.lift (_ : IsBaseChange S (AlgHom.toLinearMap (toAlgHom R R' S'))) (AlgHom.toLinearMap g))
(↑(algebraMap R' S') x) =
↑g x
x✝ y : S'
x : R'
⊢ ↑(IsBaseChange.lift (_ : IsBaseChange S (AlgHom.toLinearMap (toAlgHom R R' S'))) (AlgHom.toLinearMap g))
(↑(algebraMap R' S') x * y) =
↑(IsBaseChange.lift (_ : IsBaseChange S (AlgHom.toLinearMap (toAlgHom R R' S'))) (AlgHom.toLinearMap g))
(↑(algebraMap R' S') x) *
↑(IsBaseChange.lift (_ : IsBaseChange S (AlgHom.toLinearMap (toAlgHom R R' S'))) (AlgHom.toLinearMap g)) y
[PROOFSTEP]
rw [this]
[GOAL]
case refine'_2.refine_2
R : Type u_1
M : Type v₁
N : Type v₂
S : Type v₃
inst✝³³ : AddCommMonoid M
inst✝³² : AddCommMonoid N
inst✝³¹ : CommRing R
inst✝³⁰ : CommRing S
inst✝²⁹ : Algebra R S
inst✝²⁸ : Module R M
inst✝²⁷ : Module R N
inst✝²⁶ : Module S N
inst✝²⁵ : IsScalarTower R S N
f✝ : M →ₗ[R] N
h : IsBaseChange S f✝
P : Type u_2
Q : Type u_3
inst✝²⁴ : AddCommMonoid P
inst✝²³ : Module R P
inst✝²² : AddCommMonoid Q
inst✝²¹ : Module S Q
T : Type u_4
O : Type u_5
inst✝²⁰ : CommRing T
inst✝¹⁹ : Algebra R T
inst✝¹⁸ : Algebra S T
inst✝¹⁷ : IsScalarTower R S T
inst✝¹⁶ : AddCommMonoid O
inst✝¹⁵ : Module R O
inst✝¹⁴ : Module S O
inst✝¹³ : Module T O
inst✝¹² : IsScalarTower S T O
inst✝¹¹ : IsScalarTower R S O
inst✝¹⁰ : IsScalarTower R T O
R' : Type u_6
S' : Type u_7
inst✝⁹ : CommRing R'
inst✝⁸ : CommRing S'
inst✝⁷ : Algebra R R'
inst✝⁶ : Algebra S S'
inst✝⁵ : Algebra R' S'
inst✝⁴ : Algebra R S'
inst✝³ : IsScalarTower R R' S'
inst✝² : IsScalarTower R S S'
H : IsPushout R S R' S'
A : Type u_8
inst✝¹ : Semiring A
inst✝ : Algebra R A
f : S →ₐ[R] A
g : R' →ₐ[R] A
hf : ∀ (x : S) (y : R'), ↑f x * ↑g y = ↑g y * ↑f x
this✝² : Module S A := Module.compHom A ↑f
this✝¹ : IsScalarTower R S A
this✝ : IsScalarTower S A A
this :
∀ (x : R'),
↑(IsBaseChange.lift (_ : IsBaseChange S (AlgHom.toLinearMap (toAlgHom R R' S'))) (AlgHom.toLinearMap g))
(↑(algebraMap R' S') x) =
↑g x
x✝ y : S'
x : R'
⊢ ↑(IsBaseChange.lift (_ : IsBaseChange S (AlgHom.toLinearMap (toAlgHom R R' S'))) (AlgHom.toLinearMap g))
(↑(algebraMap R' S') x * y) =
↑g x * ↑(IsBaseChange.lift (_ : IsBaseChange S (AlgHom.toLinearMap (toAlgHom R R' S'))) (AlgHom.toLinearMap g)) y
[PROOFSTEP]
refine H.out.inductionOn y ?_ ?_ ?_ ?_
[GOAL]
case refine'_2.refine_2.refine_1
R : Type u_1
M : Type v₁
N : Type v₂
S : Type v₃
inst✝³³ : AddCommMonoid M
inst✝³² : AddCommMonoid N
inst✝³¹ : CommRing R
inst✝³⁰ : CommRing S
inst✝²⁹ : Algebra R S
inst✝²⁸ : Module R M
inst✝²⁷ : Module R N
inst✝²⁶ : Module S N
inst✝²⁵ : IsScalarTower R S N
f✝ : M →ₗ[R] N
h : IsBaseChange S f✝
P : Type u_2
Q : Type u_3
inst✝²⁴ : AddCommMonoid P
inst✝²³ : Module R P
inst✝²² : AddCommMonoid Q
inst✝²¹ : Module S Q
T : Type u_4
O : Type u_5
inst✝²⁰ : CommRing T
inst✝¹⁹ : Algebra R T
inst✝¹⁸ : Algebra S T
inst✝¹⁷ : IsScalarTower R S T
inst✝¹⁶ : AddCommMonoid O
inst✝¹⁵ : Module R O
inst✝¹⁴ : Module S O
inst✝¹³ : Module T O
inst✝¹² : IsScalarTower S T O
inst✝¹¹ : IsScalarTower R S O
inst✝¹⁰ : IsScalarTower R T O
R' : Type u_6
S' : Type u_7
inst✝⁹ : CommRing R'
inst✝⁸ : CommRing S'
inst✝⁷ : Algebra R R'
inst✝⁶ : Algebra S S'
inst✝⁵ : Algebra R' S'
inst✝⁴ : Algebra R S'
inst✝³ : IsScalarTower R R' S'
inst✝² : IsScalarTower R S S'
H : IsPushout R S R' S'
A : Type u_8
inst✝¹ : Semiring A
inst✝ : Algebra R A
f : S →ₐ[R] A
g : R' →ₐ[R] A
hf : ∀ (x : S) (y : R'), ↑f x * ↑g y = ↑g y * ↑f x
this✝² : Module S A := Module.compHom A ↑f
this✝¹ : IsScalarTower R S A
this✝ : IsScalarTower S A A
this :
∀ (x : R'),
↑(IsBaseChange.lift (_ : IsBaseChange S (AlgHom.toLinearMap (toAlgHom R R' S'))) (AlgHom.toLinearMap g))
(↑(algebraMap R' S') x) =
↑g x
x✝ y : S'
x : R'
⊢ ↑(IsBaseChange.lift (_ : IsBaseChange S (AlgHom.toLinearMap (toAlgHom R R' S'))) (AlgHom.toLinearMap g))
(↑(algebraMap R' S') x * 0) =
↑g x * ↑(IsBaseChange.lift (_ : IsBaseChange S (AlgHom.toLinearMap (toAlgHom R R' S'))) (AlgHom.toLinearMap g)) 0
[PROOFSTEP]
rw [mul_zero, map_zero, mul_zero]
[GOAL]
case refine'_2.refine_2.refine_2
R : Type u_1
M : Type v₁
N : Type v₂
S : Type v₃
inst✝³³ : AddCommMonoid M
inst✝³² : AddCommMonoid N
inst✝³¹ : CommRing R
inst✝³⁰ : CommRing S
inst✝²⁹ : Algebra R S
inst✝²⁸ : Module R M
inst✝²⁷ : Module R N
inst✝²⁶ : Module S N
inst✝²⁵ : IsScalarTower R S N
f✝ : M →ₗ[R] N
h : IsBaseChange S f✝
P : Type u_2
Q : Type u_3
inst✝²⁴ : AddCommMonoid P
inst✝²³ : Module R P
inst✝²² : AddCommMonoid Q
inst✝²¹ : Module S Q
T : Type u_4
O : Type u_5
inst✝²⁰ : CommRing T
inst✝¹⁹ : Algebra R T
inst✝¹⁸ : Algebra S T
inst✝¹⁷ : IsScalarTower R S T
inst✝¹⁶ : AddCommMonoid O
inst✝¹⁵ : Module R O
inst✝¹⁴ : Module S O
inst✝¹³ : Module T O
inst✝¹² : IsScalarTower S T O
inst✝¹¹ : IsScalarTower R S O
inst✝¹⁰ : IsScalarTower R T O
R' : Type u_6
S' : Type u_7
inst✝⁹ : CommRing R'
inst✝⁸ : CommRing S'
inst✝⁷ : Algebra R R'
inst✝⁶ : Algebra S S'
inst✝⁵ : Algebra R' S'
inst✝⁴ : Algebra R S'
inst✝³ : IsScalarTower R R' S'
inst✝² : IsScalarTower R S S'
H : IsPushout R S R' S'
A : Type u_8
inst✝¹ : Semiring A
inst✝ : Algebra R A
f : S →ₐ[R] A
g : R' →ₐ[R] A
hf : ∀ (x : S) (y : R'), ↑f x * ↑g y = ↑g y * ↑f x
this✝² : Module S A := Module.compHom A ↑f
this✝¹ : IsScalarTower R S A
this✝ : IsScalarTower S A A
this :
∀ (x : R'),
↑(IsBaseChange.lift (_ : IsBaseChange S (AlgHom.toLinearMap (toAlgHom R R' S'))) (AlgHom.toLinearMap g))
(↑(algebraMap R' S') x) =
↑g x
x✝ y : S'
x : R'
⊢ ∀ (m : R'),
↑(IsBaseChange.lift (_ : IsBaseChange S (AlgHom.toLinearMap (toAlgHom R R' S'))) (AlgHom.toLinearMap g))
(↑(algebraMap R' S') x * ↑(AlgHom.toLinearMap (toAlgHom R R' S')) m) =
↑g x *
↑(IsBaseChange.lift (_ : IsBaseChange S (AlgHom.toLinearMap (toAlgHom R R' S'))) (AlgHom.toLinearMap g))
(↑(AlgHom.toLinearMap (toAlgHom R R' S')) m)
[PROOFSTEP]
intro y
[GOAL]
case refine'_2.refine_2.refine_2
R : Type u_1
M : Type v₁
N : Type v₂
S : Type v₃
inst✝³³ : AddCommMonoid M
inst✝³² : AddCommMonoid N
inst✝³¹ : CommRing R
inst✝³⁰ : CommRing S
inst✝²⁹ : Algebra R S
inst✝²⁸ : Module R M
inst✝²⁷ : Module R N
inst✝²⁶ : Module S N
inst✝²⁵ : IsScalarTower R S N
f✝ : M →ₗ[R] N
h : IsBaseChange S f✝
P : Type u_2
Q : Type u_3
inst✝²⁴ : AddCommMonoid P
inst✝²³ : Module R P
inst✝²² : AddCommMonoid Q
inst✝²¹ : Module S Q
T : Type u_4
O : Type u_5
inst✝²⁰ : CommRing T
inst✝¹⁹ : Algebra R T
inst✝¹⁸ : Algebra S T
inst✝¹⁷ : IsScalarTower R S T
inst✝¹⁶ : AddCommMonoid O
inst✝¹⁵ : Module R O
inst✝¹⁴ : Module S O
inst✝¹³ : Module T O
inst✝¹² : IsScalarTower S T O
inst✝¹¹ : IsScalarTower R S O
inst✝¹⁰ : IsScalarTower R T O
R' : Type u_6
S' : Type u_7
inst✝⁹ : CommRing R'
inst✝⁸ : CommRing S'
inst✝⁷ : Algebra R R'
inst✝⁶ : Algebra S S'
inst✝⁵ : Algebra R' S'
inst✝⁴ : Algebra R S'
inst✝³ : IsScalarTower R R' S'
inst✝² : IsScalarTower R S S'
H : IsPushout R S R' S'
A : Type u_8
inst✝¹ : Semiring A
inst✝ : Algebra R A
f : S →ₐ[R] A
g : R' →ₐ[R] A
hf : ∀ (x : S) (y : R'), ↑f x * ↑g y = ↑g y * ↑f x
this✝² : Module S A := Module.compHom A ↑f
this✝¹ : IsScalarTower R S A
this✝ : IsScalarTower S A A
this :
∀ (x : R'),
↑(IsBaseChange.lift (_ : IsBaseChange S (AlgHom.toLinearMap (toAlgHom R R' S'))) (AlgHom.toLinearMap g))
(↑(algebraMap R' S') x) =
↑g x
x✝ y✝ : S'
x y : R'
⊢ ↑(IsBaseChange.lift (_ : IsBaseChange S (AlgHom.toLinearMap (toAlgHom R R' S'))) (AlgHom.toLinearMap g))
(↑(algebraMap R' S') x * ↑(AlgHom.toLinearMap (toAlgHom R R' S')) y) =
↑g x *
↑(IsBaseChange.lift (_ : IsBaseChange S (AlgHom.toLinearMap (toAlgHom R R' S'))) (AlgHom.toLinearMap g))
(↑(AlgHom.toLinearMap (toAlgHom R R' S')) y)
[PROOFSTEP]
dsimp
[GOAL]
case refine'_2.refine_2.refine_2
R : Type u_1
M : Type v₁
N : Type v₂
S : Type v₃
inst✝³³ : AddCommMonoid M
inst✝³² : AddCommMonoid N
inst✝³¹ : CommRing R
inst✝³⁰ : CommRing S
inst✝²⁹ : Algebra R S
inst✝²⁸ : Module R M
inst✝²⁷ : Module R N
inst✝²⁶ : Module S N
inst✝²⁵ : IsScalarTower R S N
f✝ : M →ₗ[R] N
h : IsBaseChange S f✝
P : Type u_2
Q : Type u_3
inst✝²⁴ : AddCommMonoid P
inst✝²³ : Module R P
inst✝²² : AddCommMonoid Q
inst✝²¹ : Module S Q
T : Type u_4
O : Type u_5
inst✝²⁰ : CommRing T
inst✝¹⁹ : Algebra R T
inst✝¹⁸ : Algebra S T
inst✝¹⁷ : IsScalarTower R S T
inst✝¹⁶ : AddCommMonoid O
inst✝¹⁵ : Module R O
inst✝¹⁴ : Module S O
inst✝¹³ : Module T O
inst✝¹² : IsScalarTower S T O
inst✝¹¹ : IsScalarTower R S O
inst✝¹⁰ : IsScalarTower R T O
R' : Type u_6
S' : Type u_7
inst✝⁹ : CommRing R'
inst✝⁸ : CommRing S'
inst✝⁷ : Algebra R R'
inst✝⁶ : Algebra S S'
inst✝⁵ : Algebra R' S'
inst✝⁴ : Algebra R S'
inst✝³ : IsScalarTower R R' S'
inst✝² : IsScalarTower R S S'
H : IsPushout R S R' S'
A : Type u_8
inst✝¹ : Semiring A
inst✝ : Algebra R A
f : S →ₐ[R] A
g : R' →ₐ[R] A
hf : ∀ (x : S) (y : R'), ↑f x * ↑g y = ↑g y * ↑f x
this✝² : Module S A := Module.compHom A ↑f
this✝¹ : IsScalarTower R S A
this✝ : IsScalarTower S A A
this :
∀ (x : R'),
↑(IsBaseChange.lift (_ : IsBaseChange S (AlgHom.toLinearMap (toAlgHom R R' S'))) (AlgHom.toLinearMap g))
(↑(algebraMap R' S') x) =
↑g x
x✝ y✝ : S'
x y : R'
⊢ ↑(IsBaseChange.lift (_ : IsBaseChange S (AlgHom.toLinearMap (toAlgHom R R' S'))) (AlgHom.toLinearMap g))
(↑(algebraMap R' S') x * ↑(algebraMap R' S') y) =
↑g x *
↑(IsBaseChange.lift (_ : IsBaseChange S (AlgHom.toLinearMap (toAlgHom R R' S'))) (AlgHom.toLinearMap g))
(↑(algebraMap R' S') y)
[PROOFSTEP]
rw [← _root_.map_mul, this, this, _root_.map_mul]
[GOAL]
case refine'_2.refine_2.refine_3
R : Type u_1
M : Type v₁
N : Type v₂
S : Type v₃
inst✝³³ : AddCommMonoid M
inst✝³² : AddCommMonoid N
inst✝³¹ : CommRing R
inst✝³⁰ : CommRing S
inst✝²⁹ : Algebra R S
inst✝²⁸ : Module R M
inst✝²⁷ : Module R N
inst✝²⁶ : Module S N
inst✝²⁵ : IsScalarTower R S N
f✝ : M →ₗ[R] N
h : IsBaseChange S f✝
P : Type u_2
Q : Type u_3
inst✝²⁴ : AddCommMonoid P
inst✝²³ : Module R P
inst✝²² : AddCommMonoid Q
inst✝²¹ : Module S Q
T : Type u_4
O : Type u_5
inst✝²⁰ : CommRing T
inst✝¹⁹ : Algebra R T
inst✝¹⁸ : Algebra S T
inst✝¹⁷ : IsScalarTower R S T
inst✝¹⁶ : AddCommMonoid O
inst✝¹⁵ : Module R O
inst✝¹⁴ : Module S O
inst✝¹³ : Module T O
inst✝¹² : IsScalarTower S T O
inst✝¹¹ : IsScalarTower R S O
inst✝¹⁰ : IsScalarTower R T O
R' : Type u_6
S' : Type u_7
inst✝⁹ : CommRing R'
inst✝⁸ : CommRing S'
inst✝⁷ : Algebra R R'
inst✝⁶ : Algebra S S'
inst✝⁵ : Algebra R' S'
inst✝⁴ : Algebra R S'
inst✝³ : IsScalarTower R R' S'
inst✝² : IsScalarTower R S S'
H : IsPushout R S R' S'
A : Type u_8
inst✝¹ : Semiring A
inst✝ : Algebra R A
f : S →ₐ[R] A
g : R' →ₐ[R] A
hf : ∀ (x : S) (y : R'), ↑f x * ↑g y = ↑g y * ↑f x
this✝² : Module S A := Module.compHom A ↑f
this✝¹ : IsScalarTower R S A
this✝ : IsScalarTower S A A
this :
∀ (x : R'),
↑(IsBaseChange.lift (_ : IsBaseChange S (AlgHom.toLinearMap (toAlgHom R R' S'))) (AlgHom.toLinearMap g))
(↑(algebraMap R' S') x) =
↑g x
x✝ y : S'
x : R'
⊢ ∀ (s : S) (n : S'),
↑(IsBaseChange.lift (_ : IsBaseChange S (AlgHom.toLinearMap (toAlgHom R R' S'))) (AlgHom.toLinearMap g))
(↑(algebraMap R' S') x * n) =
↑g x *
↑(IsBaseChange.lift (_ : IsBaseChange S (AlgHom.toLinearMap (toAlgHom R R' S'))) (AlgHom.toLinearMap g)) n →
↑(IsBaseChange.lift (_ : IsBaseChange S (AlgHom.toLinearMap (toAlgHom R R' S'))) (AlgHom.toLinearMap g))
(↑(algebraMap R' S') x * s • n) =
↑g x *
↑(IsBaseChange.lift (_ : IsBaseChange S (AlgHom.toLinearMap (toAlgHom R R' S'))) (AlgHom.toLinearMap g))
(s • n)
[PROOFSTEP]
intro s s' e
[GOAL]
case refine'_2.refine_2.refine_3
R : Type u_1
M : Type v₁
N : Type v₂
S : Type v₃
inst✝³³ : AddCommMonoid M
inst✝³² : AddCommMonoid N
inst✝³¹ : CommRing R
inst✝³⁰ : CommRing S
inst✝²⁹ : Algebra R S
inst✝²⁸ : Module R M
inst✝²⁷ : Module R N
inst✝²⁶ : Module S N
inst✝²⁵ : IsScalarTower R S N
f✝ : M →ₗ[R] N
h : IsBaseChange S f✝
P : Type u_2
Q : Type u_3
inst✝²⁴ : AddCommMonoid P
inst✝²³ : Module R P
inst✝²² : AddCommMonoid Q
inst✝²¹ : Module S Q
T : Type u_4
O : Type u_5
inst✝²⁰ : CommRing T
inst✝¹⁹ : Algebra R T
inst✝¹⁸ : Algebra S T
inst✝¹⁷ : IsScalarTower R S T
inst✝¹⁶ : AddCommMonoid O
inst✝¹⁵ : Module R O
inst✝¹⁴ : Module S O
inst✝¹³ : Module T O
inst✝¹² : IsScalarTower S T O
inst✝¹¹ : IsScalarTower R S O
inst✝¹⁰ : IsScalarTower R T O
R' : Type u_6
S' : Type u_7
inst✝⁹ : CommRing R'
inst✝⁸ : CommRing S'
inst✝⁷ : Algebra R R'
inst✝⁶ : Algebra S S'
inst✝⁵ : Algebra R' S'
inst✝⁴ : Algebra R S'
inst✝³ : IsScalarTower R R' S'
inst✝² : IsScalarTower R S S'
H : IsPushout R S R' S'
A : Type u_8
inst✝¹ : Semiring A
inst✝ : Algebra R A
f : S →ₐ[R] A
g : R' →ₐ[R] A
hf : ∀ (x : S) (y : R'), ↑f x * ↑g y = ↑g y * ↑f x
this✝² : Module S A := Module.compHom A ↑f
this✝¹ : IsScalarTower R S A
this✝ : IsScalarTower S A A
this :
∀ (x : R'),
↑(IsBaseChange.lift (_ : IsBaseChange S (AlgHom.toLinearMap (toAlgHom R R' S'))) (AlgHom.toLinearMap g))
(↑(algebraMap R' S') x) =
↑g x
x✝ y : S'
x : R'
s : S
s' : S'
e :
↑(IsBaseChange.lift (_ : IsBaseChange S (AlgHom.toLinearMap (toAlgHom R R' S'))) (AlgHom.toLinearMap g))
(↑(algebraMap R' S') x * s') =
↑g x * ↑(IsBaseChange.lift (_ : IsBaseChange S (AlgHom.toLinearMap (toAlgHom R R' S'))) (AlgHom.toLinearMap g)) s'
⊢ ↑(IsBaseChange.lift (_ : IsBaseChange S (AlgHom.toLinearMap (toAlgHom R R' S'))) (AlgHom.toLinearMap g))
(↑(algebraMap R' S') x * s • s') =
↑g x *
↑(IsBaseChange.lift (_ : IsBaseChange S (AlgHom.toLinearMap (toAlgHom R R' S'))) (AlgHom.toLinearMap g)) (s • s')
[PROOFSTEP]
rw [mul_comm, smul_mul_assoc, LinearMap.map_smul, LinearMap.map_smul, mul_comm, e]
[GOAL]
case refine'_2.refine_2.refine_3
R : Type u_1
M : Type v₁
N : Type v₂
S : Type v₃
inst✝³³ : AddCommMonoid M
inst✝³² : AddCommMonoid N
inst✝³¹ : CommRing R
inst✝³⁰ : CommRing S
inst✝²⁹ : Algebra R S
inst✝²⁸ : Module R M
inst✝²⁷ : Module R N
inst✝²⁶ : Module S N
inst✝²⁵ : IsScalarTower R S N
f✝ : M →ₗ[R] N
h : IsBaseChange S f✝
P : Type u_2
Q : Type u_3
inst✝²⁴ : AddCommMonoid P
inst✝²³ : Module R P
inst✝²² : AddCommMonoid Q
inst✝²¹ : Module S Q
T : Type u_4
O : Type u_5
inst✝²⁰ : CommRing T
inst✝¹⁹ : Algebra R T
inst✝¹⁸ : Algebra S T
inst✝¹⁷ : IsScalarTower R S T
inst✝¹⁶ : AddCommMonoid O
inst✝¹⁵ : Module R O
inst✝¹⁴ : Module S O
inst✝¹³ : Module T O
inst✝¹² : IsScalarTower S T O
inst✝¹¹ : IsScalarTower R S O
inst✝¹⁰ : IsScalarTower R T O
R' : Type u_6
S' : Type u_7
inst✝⁹ : CommRing R'
inst✝⁸ : CommRing S'
inst✝⁷ : Algebra R R'
inst✝⁶ : Algebra S S'
inst✝⁵ : Algebra R' S'
inst✝⁴ : Algebra R S'
inst✝³ : IsScalarTower R R' S'
inst✝² : IsScalarTower R S S'
H : IsPushout R S R' S'
A : Type u_8
inst✝¹ : Semiring A
inst✝ : Algebra R A
f : S →ₐ[R] A
g : R' →ₐ[R] A
hf : ∀ (x : S) (y : R'), ↑f x * ↑g y = ↑g y * ↑f x
this✝² : Module S A := Module.compHom A ↑f
this✝¹ : IsScalarTower R S A
this✝ : IsScalarTower S A A
this :
∀ (x : R'),
↑(IsBaseChange.lift (_ : IsBaseChange S (AlgHom.toLinearMap (toAlgHom R R' S'))) (AlgHom.toLinearMap g))
(↑(algebraMap R' S') x) =
↑g x
x✝ y : S'
x : R'
s : S
s' : S'
e :
↑(IsBaseChange.lift (_ : IsBaseChange S (AlgHom.toLinearMap (toAlgHom R R' S'))) (AlgHom.toLinearMap g))
(↑(algebraMap R' S') x * s') =
↑g x * ↑(IsBaseChange.lift (_ : IsBaseChange S (AlgHom.toLinearMap (toAlgHom R R' S'))) (AlgHom.toLinearMap g)) s'
⊢ s •
(↑g x *
↑(IsBaseChange.lift (_ : IsBaseChange S (AlgHom.toLinearMap (toAlgHom R R' S'))) (AlgHom.toLinearMap g)) s') =
↑g x *
s • ↑(IsBaseChange.lift (_ : IsBaseChange S (AlgHom.toLinearMap (toAlgHom R R' S'))) (AlgHom.toLinearMap g)) s'
[PROOFSTEP]
change f s * (g x * _) = g x * (f s * _)
[GOAL]
case refine'_2.refine_2.refine_3
R : Type u_1
M : Type v₁
N : Type v₂
S : Type v₃
inst✝³³ : AddCommMonoid M
inst✝³² : AddCommMonoid N
inst✝³¹ : CommRing R
inst✝³⁰ : CommRing S
inst✝²⁹ : Algebra R S
inst✝²⁸ : Module R M
inst✝²⁷ : Module R N
inst✝²⁶ : Module S N
inst✝²⁵ : IsScalarTower R S N
f✝ : M →ₗ[R] N
h : IsBaseChange S f✝
P : Type u_2
Q : Type u_3
inst✝²⁴ : AddCommMonoid P
inst✝²³ : Module R P
inst✝²² : AddCommMonoid Q
inst✝²¹ : Module S Q
T : Type u_4
O : Type u_5
inst✝²⁰ : CommRing T
inst✝¹⁹ : Algebra R T
inst✝¹⁸ : Algebra S T
inst✝¹⁷ : IsScalarTower R S T
inst✝¹⁶ : AddCommMonoid O
inst✝¹⁵ : Module R O
inst✝¹⁴ : Module S O
inst✝¹³ : Module T O
inst✝¹² : IsScalarTower S T O
inst✝¹¹ : IsScalarTower R S O
inst✝¹⁰ : IsScalarTower R T O
R' : Type u_6
S' : Type u_7
inst✝⁹ : CommRing R'
inst✝⁸ : CommRing S'
inst✝⁷ : Algebra R R'
inst✝⁶ : Algebra S S'
inst✝⁵ : Algebra R' S'
inst✝⁴ : Algebra R S'
inst✝³ : IsScalarTower R R' S'
inst✝² : IsScalarTower R S S'
H : IsPushout R S R' S'
A : Type u_8
inst✝¹ : Semiring A
inst✝ : Algebra R A
f : S →ₐ[R] A
g : R' →ₐ[R] A
hf : ∀ (x : S) (y : R'), ↑f x * ↑g y = ↑g y * ↑f x
this✝² : Module S A := Module.compHom A ↑f
this✝¹ : IsScalarTower R S A
this✝ : IsScalarTower S A A
this :
∀ (x : R'),
↑(IsBaseChange.lift (_ : IsBaseChange S (AlgHom.toLinearMap (toAlgHom R R' S'))) (AlgHom.toLinearMap g))
(↑(algebraMap R' S') x) =
↑g x
x✝ y : S'
x : R'
s : S
s' : S'
e :
↑(IsBaseChange.lift (_ : IsBaseChange S (AlgHom.toLinearMap (toAlgHom R R' S'))) (AlgHom.toLinearMap g))
(↑(algebraMap R' S') x * s') =
↑g x * ↑(IsBaseChange.lift (_ : IsBaseChange S (AlgHom.toLinearMap (toAlgHom R R' S'))) (AlgHom.toLinearMap g)) s'
⊢ ↑f s *
(↑g x *
↑(IsBaseChange.lift (_ : IsBaseChange S (AlgHom.toLinearMap (toAlgHom R R' S'))) (AlgHom.toLinearMap g)) s') =
↑g x *
(↑f s *
↑(IsBaseChange.lift (_ : IsBaseChange S (AlgHom.toLinearMap (toAlgHom R R' S'))) (AlgHom.toLinearMap g)) s')
[PROOFSTEP]
rw [← mul_assoc, ← mul_assoc, hf]
[GOAL]
case refine'_2.refine_2.refine_4
R : Type u_1
M : Type v₁
N : Type v₂
S : Type v₃
inst✝³³ : AddCommMonoid M
inst✝³² : AddCommMonoid N
inst✝³¹ : CommRing R
inst✝³⁰ : CommRing S
inst✝²⁹ : Algebra R S
inst✝²⁸ : Module R M
inst✝²⁷ : Module R N
inst✝²⁶ : Module S N
inst✝²⁵ : IsScalarTower R S N
f✝ : M →ₗ[R] N
h : IsBaseChange S f✝
P : Type u_2
Q : Type u_3
inst✝²⁴ : AddCommMonoid P
inst✝²³ : Module R P
inst✝²² : AddCommMonoid Q
inst✝²¹ : Module S Q
T : Type u_4
O : Type u_5
inst✝²⁰ : CommRing T
inst✝¹⁹ : Algebra R T
inst✝¹⁸ : Algebra S T
inst✝¹⁷ : IsScalarTower R S T
inst✝¹⁶ : AddCommMonoid O
inst✝¹⁵ : Module R O
inst✝¹⁴ : Module S O
inst✝¹³ : Module T O
inst✝¹² : IsScalarTower S T O
inst✝¹¹ : IsScalarTower R S O
inst✝¹⁰ : IsScalarTower R T O
R' : Type u_6
S' : Type u_7
inst✝⁹ : CommRing R'
inst✝⁸ : CommRing S'
inst✝⁷ : Algebra R R'
inst✝⁶ : Algebra S S'
inst✝⁵ : Algebra R' S'
inst✝⁴ : Algebra R S'
inst✝³ : IsScalarTower R R' S'
inst✝² : IsScalarTower R S S'
H : IsPushout R S R' S'
A : Type u_8
inst✝¹ : Semiring A
inst✝ : Algebra R A
f : S →ₐ[R] A
g : R' →ₐ[R] A
hf : ∀ (x : S) (y : R'), ↑f x * ↑g y = ↑g y * ↑f x
this✝² : Module S A := Module.compHom A ↑f
this✝¹ : IsScalarTower R S A
this✝ : IsScalarTower S A A
this :
∀ (x : R'),
↑(IsBaseChange.lift (_ : IsBaseChange S (AlgHom.toLinearMap (toAlgHom R R' S'))) (AlgHom.toLinearMap g))
(↑(algebraMap R' S') x) =
↑g x
x✝ y : S'
x : R'
⊢ ∀ (n₁ n₂ : S'),
↑(IsBaseChange.lift (_ : IsBaseChange S (AlgHom.toLinearMap (toAlgHom R R' S'))) (AlgHom.toLinearMap g))
(↑(algebraMap R' S') x * n₁) =
↑g x *
↑(IsBaseChange.lift (_ : IsBaseChange S (AlgHom.toLinearMap (toAlgHom R R' S'))) (AlgHom.toLinearMap g)) n₁ →
↑(IsBaseChange.lift (_ : IsBaseChange S (AlgHom.toLinearMap (toAlgHom R R' S'))) (AlgHom.toLinearMap g))
(↑(algebraMap R' S') x * n₂) =
↑g x *
↑(IsBaseChange.lift (_ : IsBaseChange S (AlgHom.toLinearMap (toAlgHom R R' S'))) (AlgHom.toLinearMap g))
n₂ →
↑(IsBaseChange.lift (_ : IsBaseChange S (AlgHom.toLinearMap (toAlgHom R R' S'))) (AlgHom.toLinearMap g))
(↑(algebraMap R' S') x * (n₁ + n₂)) =
↑g x *
↑(IsBaseChange.lift (_ : IsBaseChange S (AlgHom.toLinearMap (toAlgHom R R' S'))) (AlgHom.toLinearMap g))
(n₁ + n₂)
[PROOFSTEP]
intro s s' e₁ e₂
[GOAL]
case refine'_2.refine_2.refine_4
R : Type u_1
M : Type v₁
N : Type v₂
S : Type v₃
inst✝³³ : AddCommMonoid M
inst✝³² : AddCommMonoid N
inst✝³¹ : CommRing R
inst✝³⁰ : CommRing S
inst✝²⁹ : Algebra R S
inst✝²⁸ : Module R M
inst✝²⁷ : Module R N
inst✝²⁶ : Module S N
inst✝²⁵ : IsScalarTower R S N
f✝ : M →ₗ[R] N
h : IsBaseChange S f✝
P : Type u_2
Q : Type u_3
inst✝²⁴ : AddCommMonoid P
inst✝²³ : Module R P
inst✝²² : AddCommMonoid Q
inst✝²¹ : Module S Q
T : Type u_4
O : Type u_5
inst✝²⁰ : CommRing T
inst✝¹⁹ : Algebra R T
inst✝¹⁸ : Algebra S T
inst✝¹⁷ : IsScalarTower R S T
inst✝¹⁶ : AddCommMonoid O
inst✝¹⁵ : Module R O
inst✝¹⁴ : Module S O
inst✝¹³ : Module T O
inst✝¹² : IsScalarTower S T O
inst✝¹¹ : IsScalarTower R S O
inst✝¹⁰ : IsScalarTower R T O
R' : Type u_6
S' : Type u_7
inst✝⁹ : CommRing R'
inst✝⁸ : CommRing S'
inst✝⁷ : Algebra R R'
inst✝⁶ : Algebra S S'
inst✝⁵ : Algebra R' S'
inst✝⁴ : Algebra R S'
inst✝³ : IsScalarTower R R' S'
inst✝² : IsScalarTower R S S'
H : IsPushout R S R' S'
A : Type u_8
inst✝¹ : Semiring A
inst✝ : Algebra R A
f : S →ₐ[R] A
g : R' →ₐ[R] A
hf : ∀ (x : S) (y : R'), ↑f x * ↑g y = ↑g y * ↑f x
this✝² : Module S A := Module.compHom A ↑f
this✝¹ : IsScalarTower R S A
this✝ : IsScalarTower S A A
this :
∀ (x : R'),
↑(IsBaseChange.lift (_ : IsBaseChange S (AlgHom.toLinearMap (toAlgHom R R' S'))) (AlgHom.toLinearMap g))
(↑(algebraMap R' S') x) =
↑g x
x✝ y : S'
x : R'
s s' : S'
e₁ :
↑(IsBaseChange.lift (_ : IsBaseChange S (AlgHom.toLinearMap (toAlgHom R R' S'))) (AlgHom.toLinearMap g))
(↑(algebraMap R' S') x * s) =
↑g x * ↑(IsBaseChange.lift (_ : IsBaseChange S (AlgHom.toLinearMap (toAlgHom R R' S'))) (AlgHom.toLinearMap g)) s
e₂ :
↑(IsBaseChange.lift (_ : IsBaseChange S (AlgHom.toLinearMap (toAlgHom R R' S'))) (AlgHom.toLinearMap g))
(↑(algebraMap R' S') x * s') =
↑g x * ↑(IsBaseChange.lift (_ : IsBaseChange S (AlgHom.toLinearMap (toAlgHom R R' S'))) (AlgHom.toLinearMap g)) s'
⊢ ↑(IsBaseChange.lift (_ : IsBaseChange S (AlgHom.toLinearMap (toAlgHom R R' S'))) (AlgHom.toLinearMap g))
(↑(algebraMap R' S') x * (s + s')) =
↑g x *
↑(IsBaseChange.lift (_ : IsBaseChange S (AlgHom.toLinearMap (toAlgHom R R' S'))) (AlgHom.toLinearMap g)) (s + s')
[PROOFSTEP]
rw [mul_add, map_add, map_add, mul_add, e₁, e₂]
[GOAL]
R : Type u_1
M : Type v₁
N : Type v₂
S : Type v₃
inst✝³³ : AddCommMonoid M
inst✝³² : AddCommMonoid N
inst✝³¹ : CommRing R
inst✝³⁰ : CommRing S
inst✝²⁹ : Algebra R S
inst✝²⁸ : Module R M
inst✝²⁷ : Module R N
inst✝²⁶ : Module S N
inst✝²⁵ : IsScalarTower R S N
f✝ : M →ₗ[R] N
h : IsBaseChange S f✝
P : Type u_2
Q : Type u_3
inst✝²⁴ : AddCommMonoid P
inst✝²³ : Module R P
inst✝²² : AddCommMonoid Q
inst✝²¹ : Module S Q
T : Type u_4
O : Type u_5
inst✝²⁰ : CommRing T
inst✝¹⁹ : Algebra R T
inst✝¹⁸ : Algebra S T
inst✝¹⁷ : IsScalarTower R S T
inst✝¹⁶ : AddCommMonoid O
inst✝¹⁵ : Module R O
inst✝¹⁴ : Module S O
inst✝¹³ : Module T O
inst✝¹² : IsScalarTower S T O
inst✝¹¹ : IsScalarTower R S O
inst✝¹⁰ : IsScalarTower R T O
R' : Type u_6
S' : Type u_7
inst✝⁹ : CommRing R'
inst✝⁸ : CommRing S'
inst✝⁷ : Algebra R R'
inst✝⁶ : Algebra S S'
inst✝⁵ : Algebra R' S'
inst✝⁴ : Algebra R S'
inst✝³ : IsScalarTower R R' S'
inst✝² : IsScalarTower R S S'
H : IsPushout R S R' S'
A : Type u_8
inst✝¹ : Semiring A
inst✝ : Algebra R A
f : S →ₐ[R] A
g : R' →ₐ[R] A
hf : ∀ (x : S) (y : R'), ↑f x * ↑g y = ↑g y * ↑f x
s : S'
⊢ Sort ?u.1234081
[PROOFSTEP]
letI := Module.compHom A f.toRingHom
[GOAL]
R : Type u_1
M : Type v₁
N : Type v₂
S : Type v₃
inst✝³³ : AddCommMonoid M
inst✝³² : AddCommMonoid N
inst✝³¹ : CommRing R
inst✝³⁰ : CommRing S
inst✝²⁹ : Algebra R S
inst✝²⁸ : Module R M
inst✝²⁷ : Module R N
inst✝²⁶ : Module S N
inst✝²⁵ : IsScalarTower R S N
f✝ : M →ₗ[R] N
h : IsBaseChange S f✝
P : Type u_2
Q : Type u_3
inst✝²⁴ : AddCommMonoid P
inst✝²³ : Module R P
inst✝²² : AddCommMonoid Q
inst✝²¹ : Module S Q
T : Type u_4
O : Type u_5
inst✝²⁰ : CommRing T
inst✝¹⁹ : Algebra R T
inst✝¹⁸ : Algebra S T
inst✝¹⁷ : IsScalarTower R S T
inst✝¹⁶ : AddCommMonoid O
inst✝¹⁵ : Module R O
inst✝¹⁴ : Module S O
inst✝¹³ : Module T O
inst✝¹² : IsScalarTower S T O
inst✝¹¹ : IsScalarTower R S O
inst✝¹⁰ : IsScalarTower R T O
R' : Type u_6
S' : Type u_7
inst✝⁹ : CommRing R'
inst✝⁸ : CommRing S'
inst✝⁷ : Algebra R R'
inst✝⁶ : Algebra S S'
inst✝⁵ : Algebra R' S'
inst✝⁴ : Algebra R S'
inst✝³ : IsScalarTower R R' S'
inst✝² : IsScalarTower R S S'
H : IsPushout R S R' S'
A : Type u_8
inst✝¹ : Semiring A
inst✝ : Algebra R A
f : S →ₐ[R] A
g : R' →ₐ[R] A
hf : ∀ (x : S) (y : R'), ↑f x * ↑g y = ↑g y * ↑f x
s : S'
this : Module S A := Module.compHom A ↑f
⊢ Sort ?u.1234081
[PROOFSTEP]
haveI : IsScalarTower R S A :=
{ smul_assoc := fun r s a => show f (r • s) * a = r • (f s * a) by rw [f.map_smul, smul_mul_assoc] }
[GOAL]
R : Type u_1
M : Type v₁
N : Type v₂
S : Type v₃
inst✝³³ : AddCommMonoid M
inst✝³² : AddCommMonoid N
inst✝³¹ : CommRing R
inst✝³⁰ : CommRing S
inst✝²⁹ : Algebra R S
inst✝²⁸ : Module R M
inst✝²⁷ : Module R N
inst✝²⁶ : Module S N
inst✝²⁵ : IsScalarTower R S N
f✝ : M →ₗ[R] N
h : IsBaseChange S f✝
P : Type u_2
Q : Type u_3
inst✝²⁴ : AddCommMonoid P
inst✝²³ : Module R P
inst✝²² : AddCommMonoid Q
inst✝²¹ : Module S Q
T : Type u_4
O : Type u_5
inst✝²⁰ : CommRing T
inst✝¹⁹ : Algebra R T
inst✝¹⁸ : Algebra S T
inst✝¹⁷ : IsScalarTower R S T
inst✝¹⁶ : AddCommMonoid O
inst✝¹⁵ : Module R O
inst✝¹⁴ : Module S O
inst✝¹³ : Module T O
inst✝¹² : IsScalarTower S T O
inst✝¹¹ : IsScalarTower R S O
inst✝¹⁰ : IsScalarTower R T O
R' : Type u_6
S' : Type u_7
inst✝⁹ : CommRing R'
inst✝⁸ : CommRing S'
inst✝⁷ : Algebra R R'
inst✝⁶ : Algebra S S'
inst✝⁵ : Algebra R' S'
inst✝⁴ : Algebra R S'
inst✝³ : IsScalarTower R R' S'
inst✝² : IsScalarTower R S S'
H : IsPushout R S R' S'
A : Type u_8
inst✝¹ : Semiring A
inst✝ : Algebra R A
f : S →ₐ[R] A
g : R' →ₐ[R] A
hf : ∀ (x : S) (y : R'), ↑f x * ↑g y = ↑g y * ↑f x
s✝ : S'
this : Module S A := Module.compHom A ↑f
r : R
s : S
a : A
⊢ ↑f (r • s) * a = r • (↑f s * a)
[PROOFSTEP]
rw [f.map_smul, smul_mul_assoc]
[GOAL]
R : Type u_1
M : Type v₁
N : Type v₂
S : Type v₃
inst✝³³ : AddCommMonoid M
inst✝³² : AddCommMonoid N
inst✝³¹ : CommRing R
inst✝³⁰ : CommRing S
inst✝²⁹ : Algebra R S
inst✝²⁸ : Module R M
inst✝²⁷ : Module R N
inst✝²⁶ : Module S N
inst✝²⁵ : IsScalarTower R S N
f✝ : M →ₗ[R] N
h : IsBaseChange S f✝
P : Type u_2
Q : Type u_3
inst✝²⁴ : AddCommMonoid P
inst✝²³ : Module R P
inst✝²² : AddCommMonoid Q
inst✝²¹ : Module S Q
T : Type u_4
O : Type u_5
inst✝²⁰ : CommRing T
inst✝¹⁹ : Algebra R T
inst✝¹⁸ : Algebra S T
inst✝¹⁷ : IsScalarTower R S T
inst✝¹⁶ : AddCommMonoid O
inst✝¹⁵ : Module R O
inst✝¹⁴ : Module S O
inst✝¹³ : Module T O
inst✝¹² : IsScalarTower S T O
inst✝¹¹ : IsScalarTower R S O
inst✝¹⁰ : IsScalarTower R T O
R' : Type u_6
S' : Type u_7
inst✝⁹ : CommRing R'
inst✝⁸ : CommRing S'
inst✝⁷ : Algebra R R'
inst✝⁶ : Algebra S S'
inst✝⁵ : Algebra R' S'
inst✝⁴ : Algebra R S'
inst✝³ : IsScalarTower R R' S'
inst✝² : IsScalarTower R S S'
H : IsPushout R S R' S'
A : Type u_8
inst✝¹ : Semiring A
inst✝ : Algebra R A
f : S →ₐ[R] A
g : R' →ₐ[R] A
hf : ∀ (x : S) (y : R'), ↑f x * ↑g y = ↑g y * ↑f x
s : S'
this✝ : Module S A := Module.compHom A ↑f
this : IsScalarTower R S A
⊢ Sort ?u.1234081
[PROOFSTEP]
exact Algebra.pushoutDesc S' f g hf s = H.out.lift g.toLinearMap s
[GOAL]
R : Type u_1
M : Type v₁
N : Type v₂
S : Type v₃
inst✝³³ : AddCommMonoid M
inst✝³² : AddCommMonoid N
inst✝³¹ : CommRing R
inst✝³⁰ : CommRing S
inst✝²⁹ : Algebra R S
inst✝²⁸ : Module R M
inst✝²⁷ : Module R N
inst✝²⁶ : Module S N
inst✝²⁵ : IsScalarTower R S N
f✝ : M →ₗ[R] N
h : IsBaseChange S f✝
P : Type u_2
Q : Type u_3
inst✝²⁴ : AddCommMonoid P
inst✝²³ : Module R P
inst✝²² : AddCommMonoid Q
inst✝²¹ : Module S Q
T : Type u_4
O : Type u_5
inst✝²⁰ : CommRing T
inst✝¹⁹ : Algebra R T
inst✝¹⁸ : Algebra S T
inst✝¹⁷ : IsScalarTower R S T
inst✝¹⁶ : AddCommMonoid O
inst✝¹⁵ : Module R O
inst✝¹⁴ : Module S O
inst✝¹³ : Module T O
inst✝¹² : IsScalarTower S T O
inst✝¹¹ : IsScalarTower R S O
inst✝¹⁰ : IsScalarTower R T O
R' : Type u_6
S' : Type u_7
inst✝⁹ : CommRing R'
inst✝⁸ : CommRing S'
inst✝⁷ : Algebra R R'
inst✝⁶ : Algebra S S'
inst✝⁵ : Algebra R' S'
inst✝⁴ : Algebra R S'
inst✝³ : IsScalarTower R R' S'
inst✝² : IsScalarTower R S S'
H✝ : IsPushout R S R' S'
A : Type u_8
inst✝¹ : Semiring A
inst✝ : Algebra R A
f : S →ₐ[R] A
g : R' →ₐ[R] A
H : ∀ (x : S) (y : R'), ↑f x * ↑g y = ↑g y * ↑f x
x : S
⊢ ↑(pushoutDesc S' f g H) (↑(algebraMap S S') x) = ↑f x
[PROOFSTEP]
letI := Module.compHom A f.toRingHom
[GOAL]
R : Type u_1
M : Type v₁
N : Type v₂
S : Type v₃
inst✝³³ : AddCommMonoid M
inst✝³² : AddCommMonoid N
inst✝³¹ : CommRing R
inst✝³⁰ : CommRing S
inst✝²⁹ : Algebra R S
inst✝²⁸ : Module R M
inst✝²⁷ : Module R N
inst✝²⁶ : Module S N
inst✝²⁵ : IsScalarTower R S N
f✝ : M →ₗ[R] N
h : IsBaseChange S f✝
P : Type u_2
Q : Type u_3
inst✝²⁴ : AddCommMonoid P
inst✝²³ : Module R P
inst✝²² : AddCommMonoid Q
inst✝²¹ : Module S Q
T : Type u_4
O : Type u_5
inst✝²⁰ : CommRing T
inst✝¹⁹ : Algebra R T
inst✝¹⁸ : Algebra S T
inst✝¹⁷ : IsScalarTower R S T
inst✝¹⁶ : AddCommMonoid O
inst✝¹⁵ : Module R O
inst✝¹⁴ : Module S O
inst✝¹³ : Module T O
inst✝¹² : IsScalarTower S T O
inst✝¹¹ : IsScalarTower R S O
inst✝¹⁰ : IsScalarTower R T O
R' : Type u_6
S' : Type u_7
inst✝⁹ : CommRing R'
inst✝⁸ : CommRing S'
inst✝⁷ : Algebra R R'
inst✝⁶ : Algebra S S'
inst✝⁵ : Algebra R' S'
inst✝⁴ : Algebra R S'
inst✝³ : IsScalarTower R R' S'
inst✝² : IsScalarTower R S S'
H✝ : IsPushout R S R' S'
A : Type u_8
inst✝¹ : Semiring A
inst✝ : Algebra R A
f : S →ₐ[R] A
g : R' →ₐ[R] A
H : ∀ (x : S) (y : R'), ↑f x * ↑g y = ↑g y * ↑f x
x : S
this : Module S A := Module.compHom A ↑f
⊢ ↑(pushoutDesc S' f g H) (↑(algebraMap S S') x) = ↑f x
[PROOFSTEP]
haveI : IsScalarTower R S A :=
{ smul_assoc := fun r s a => show f (r • s) * a = r • (f s * a) by rw [f.map_smul, smul_mul_assoc] }
[GOAL]
R : Type u_1
M : Type v₁
N : Type v₂
S : Type v₃
inst✝³³ : AddCommMonoid M
inst✝³² : AddCommMonoid N
inst✝³¹ : CommRing R
inst✝³⁰ : CommRing S
inst✝²⁹ : Algebra R S
inst✝²⁸ : Module R M
inst✝²⁷ : Module R N
inst✝²⁶ : Module S N
inst✝²⁵ : IsScalarTower R S N
f✝ : M →ₗ[R] N
h : IsBaseChange S f✝
P : Type u_2
Q : Type u_3
inst✝²⁴ : AddCommMonoid P
inst✝²³ : Module R P
inst✝²² : AddCommMonoid Q
inst✝²¹ : Module S Q
T : Type u_4
O : Type u_5
inst✝²⁰ : CommRing T
inst✝¹⁹ : Algebra R T
inst✝¹⁸ : Algebra S T
inst✝¹⁷ : IsScalarTower R S T
inst✝¹⁶ : AddCommMonoid O
inst✝¹⁵ : Module R O
inst✝¹⁴ : Module S O
inst✝¹³ : Module T O
inst✝¹² : IsScalarTower S T O
inst✝¹¹ : IsScalarTower R S O
inst✝¹⁰ : IsScalarTower R T O
R' : Type u_6
S' : Type u_7
inst✝⁹ : CommRing R'
inst✝⁸ : CommRing S'
inst✝⁷ : Algebra R R'
inst✝⁶ : Algebra S S'
inst✝⁵ : Algebra R' S'
inst✝⁴ : Algebra R S'
inst✝³ : IsScalarTower R R' S'
inst✝² : IsScalarTower R S S'
H✝ : IsPushout R S R' S'
A : Type u_8
inst✝¹ : Semiring A
inst✝ : Algebra R A
f : S →ₐ[R] A
g : R' →ₐ[R] A
H : ∀ (x : S) (y : R'), ↑f x * ↑g y = ↑g y * ↑f x
x : S
this : Module S A := Module.compHom A ↑f
r : R
s : S
a : A
⊢ ↑f (r • s) * a = r • (↑f s * a)
[PROOFSTEP]
rw [f.map_smul, smul_mul_assoc]
[GOAL]
R : Type u_1
M : Type v₁
N : Type v₂
S : Type v₃
inst✝³³ : AddCommMonoid M
inst✝³² : AddCommMonoid N
inst✝³¹ : CommRing R
inst✝³⁰ : CommRing S
inst✝²⁹ : Algebra R S
inst✝²⁸ : Module R M
inst✝²⁷ : Module R N
inst✝²⁶ : Module S N
inst✝²⁵ : IsScalarTower R S N
f✝ : M →ₗ[R] N
h : IsBaseChange S f✝
P : Type u_2
Q : Type u_3
inst✝²⁴ : AddCommMonoid P
inst✝²³ : Module R P
inst✝²² : AddCommMonoid Q
inst✝²¹ : Module S Q
T : Type u_4
O : Type u_5
inst✝²⁰ : CommRing T
inst✝¹⁹ : Algebra R T
inst✝¹⁸ : Algebra S T
inst✝¹⁷ : IsScalarTower R S T
inst✝¹⁶ : AddCommMonoid O
inst✝¹⁵ : Module R O
inst✝¹⁴ : Module S O
inst✝¹³ : Module T O
inst✝¹² : IsScalarTower S T O
inst✝¹¹ : IsScalarTower R S O
inst✝¹⁰ : IsScalarTower R T O
R' : Type u_6
S' : Type u_7
inst✝⁹ : CommRing R'
inst✝⁸ : CommRing S'
inst✝⁷ : Algebra R R'
inst✝⁶ : Algebra S S'
inst✝⁵ : Algebra R' S'
inst✝⁴ : Algebra R S'
inst✝³ : IsScalarTower R R' S'
inst✝² : IsScalarTower R S S'
H✝ : IsPushout R S R' S'
A : Type u_8
inst✝¹ : Semiring A
inst✝ : Algebra R A
f : S →ₐ[R] A
g : R' →ₐ[R] A
H : ∀ (x : S) (y : R'), ↑f x * ↑g y = ↑g y * ↑f x
x : S
this✝ : Module S A := Module.compHom A ↑f
this : IsScalarTower R S A
⊢ ↑(pushoutDesc S' f g H) (↑(algebraMap S S') x) = ↑f x
[PROOFSTEP]
haveI : IsScalarTower S A A := { smul_assoc := fun r a b => mul_assoc _ _ _ }
[GOAL]
R : Type u_1
M : Type v₁
N : Type v₂
S : Type v₃
inst✝³³ : AddCommMonoid M
inst✝³² : AddCommMonoid N
inst✝³¹ : CommRing R
inst✝³⁰ : CommRing S
inst✝²⁹ : Algebra R S
inst✝²⁸ : Module R M
inst✝²⁷ : Module R N
inst✝²⁶ : Module S N
inst✝²⁵ : IsScalarTower R S N
f✝ : M →ₗ[R] N
h : IsBaseChange S f✝
P : Type u_2
Q : Type u_3
inst✝²⁴ : AddCommMonoid P
inst✝²³ : Module R P
inst✝²² : AddCommMonoid Q
inst✝²¹ : Module S Q
T : Type u_4
O : Type u_5
inst✝²⁰ : CommRing T
inst✝¹⁹ : Algebra R T
inst✝¹⁸ : Algebra S T
inst✝¹⁷ : IsScalarTower R S T
inst✝¹⁶ : AddCommMonoid O
inst✝¹⁵ : Module R O
inst✝¹⁴ : Module S O
inst✝¹³ : Module T O
inst✝¹² : IsScalarTower S T O
inst✝¹¹ : IsScalarTower R S O
inst✝¹⁰ : IsScalarTower R T O
R' : Type u_6
S' : Type u_7
inst✝⁹ : CommRing R'
inst✝⁸ : CommRing S'
inst✝⁷ : Algebra R R'
inst✝⁶ : Algebra S S'
inst✝⁵ : Algebra R' S'
inst✝⁴ : Algebra R S'
inst✝³ : IsScalarTower R R' S'
inst✝² : IsScalarTower R S S'
H✝ : IsPushout R S R' S'
A : Type u_8
inst✝¹ : Semiring A
inst✝ : Algebra R A
f : S →ₐ[R] A
g : R' →ₐ[R] A
H : ∀ (x : S) (y : R'), ↑f x * ↑g y = ↑g y * ↑f x
x : S
this✝¹ : Module S A := Module.compHom A ↑f
this✝ : IsScalarTower R S A
this : IsScalarTower S A A
⊢ ↑(pushoutDesc S' f g H) (↑(algebraMap S S') x) = ↑f x
[PROOFSTEP]
rw [Algebra.algebraMap_eq_smul_one, pushoutDesc_apply, map_smul, ← Algebra.pushoutDesc_apply S' f g H, _root_.map_one]
[GOAL]
R : Type u_1
M : Type v₁
N : Type v₂
S : Type v₃
inst✝³³ : AddCommMonoid M
inst✝³² : AddCommMonoid N
inst✝³¹ : CommRing R
inst✝³⁰ : CommRing S
inst✝²⁹ : Algebra R S
inst✝²⁸ : Module R M
inst✝²⁷ : Module R N
inst✝²⁶ : Module S N
inst✝²⁵ : IsScalarTower R S N
f✝ : M →ₗ[R] N
h : IsBaseChange S f✝
P : Type u_2
Q : Type u_3
inst✝²⁴ : AddCommMonoid P
inst✝²³ : Module R P
inst✝²² : AddCommMonoid Q
inst✝²¹ : Module S Q
T : Type u_4
O : Type u_5
inst✝²⁰ : CommRing T
inst✝¹⁹ : Algebra R T
inst✝¹⁸ : Algebra S T
inst✝¹⁷ : IsScalarTower R S T
inst✝¹⁶ : AddCommMonoid O
inst✝¹⁵ : Module R O
inst✝¹⁴ : Module S O
inst✝¹³ : Module T O
inst✝¹² : IsScalarTower S T O
inst✝¹¹ : IsScalarTower R S O
inst✝¹⁰ : IsScalarTower R T O
R' : Type u_6
S' : Type u_7
inst✝⁹ : CommRing R'
inst✝⁸ : CommRing S'
inst✝⁷ : Algebra R R'
inst✝⁶ : Algebra S S'
inst✝⁵ : Algebra R' S'
inst✝⁴ : Algebra R S'
inst✝³ : IsScalarTower R R' S'
inst✝² : IsScalarTower R S S'
H✝ : IsPushout R S R' S'
A : Type u_8
inst✝¹ : Semiring A
inst✝ : Algebra R A
f : S →ₐ[R] A
g : R' →ₐ[R] A
H : ∀ (x : S) (y : R'), ↑f x * ↑g y = ↑g y * ↑f x
x : S
this✝¹ : Module S A := Module.compHom A ↑f
this✝ : IsScalarTower R S A
this : IsScalarTower S A A
⊢ x • 1 = ↑f x
[PROOFSTEP]
exact mul_one (f x)
[GOAL]
R : Type u_1
M : Type v₁
N : Type v₂
S : Type v₃
inst✝³³ : AddCommMonoid M
inst✝³² : AddCommMonoid N
inst✝³¹ : CommRing R
inst✝³⁰ : CommRing S
inst✝²⁹ : Algebra R S
inst✝²⁸ : Module R M
inst✝²⁷ : Module R N
inst✝²⁶ : Module S N
inst✝²⁵ : IsScalarTower R S N
f✝ : M →ₗ[R] N
h : IsBaseChange S f✝
P : Type u_2
Q : Type u_3
inst✝²⁴ : AddCommMonoid P
inst✝²³ : Module R P
inst✝²² : AddCommMonoid Q
inst✝²¹ : Module S Q
T : Type u_4
O : Type u_5
inst✝²⁰ : CommRing T
inst✝¹⁹ : Algebra R T
inst✝¹⁸ : Algebra S T
inst✝¹⁷ : IsScalarTower R S T
inst✝¹⁶ : AddCommMonoid O
inst✝¹⁵ : Module R O
inst✝¹⁴ : Module S O
inst✝¹³ : Module T O
inst✝¹² : IsScalarTower S T O
inst✝¹¹ : IsScalarTower R S O
inst✝¹⁰ : IsScalarTower R T O
R' : Type u_6
S' : Type u_7
inst✝⁹ : CommRing R'
inst✝⁸ : CommRing S'
inst✝⁷ : Algebra R R'
inst✝⁶ : Algebra S S'
inst✝⁵ : Algebra R' S'
inst✝⁴ : Algebra R S'
inst✝³ : IsScalarTower R R' S'
inst✝² : IsScalarTower R S S'
H✝ : IsPushout R S R' S'
A : Type u_8
inst✝¹ : Semiring A
inst✝ : Algebra R A
f : S →ₐ[R] A
g : R' →ₐ[R] A
H : ∀ (x : S) (y : R'), ↑f x * ↑g y = ↑g y * ↑f x
x : R'
this : Module S A := Module.compHom A ↑f
r : R
s : S
a : A
⊢ ↑f (r • s) * a = r • (↑f s * a)
[PROOFSTEP]
rw [f.map_smul, smul_mul_assoc]
[GOAL]
R : Type u_1
M : Type v₁
N : Type v₂
S : Type v₃
inst✝³³ : AddCommMonoid M
inst✝³² : AddCommMonoid N
inst✝³¹ : CommRing R
inst✝³⁰ : CommRing S
inst✝²⁹ : Algebra R S
inst✝²⁸ : Module R M
inst✝²⁷ : Module R N
inst✝²⁶ : Module S N
inst✝²⁵ : IsScalarTower R S N
f✝ : M →ₗ[R] N
h : IsBaseChange S f✝
P : Type u_2
Q : Type u_3
inst✝²⁴ : AddCommMonoid P
inst✝²³ : Module R P
inst✝²² : AddCommMonoid Q
inst✝²¹ : Module S Q
T : Type u_4
O : Type u_5
inst✝²⁰ : CommRing T
inst✝¹⁹ : Algebra R T
inst✝¹⁸ : Algebra S T
inst✝¹⁷ : IsScalarTower R S T
inst✝¹⁶ : AddCommMonoid O
inst✝¹⁵ : Module R O
inst✝¹⁴ : Module S O
inst✝¹³ : Module T O
inst✝¹² : IsScalarTower S T O
inst✝¹¹ : IsScalarTower R S O
inst✝¹⁰ : IsScalarTower R T O
R' : Type u_6
S' : Type u_7
inst✝⁹ : CommRing R'
inst✝⁸ : CommRing S'
inst✝⁷ : Algebra R R'
inst✝⁶ : Algebra S S'
inst✝⁵ : Algebra R' S'
inst✝⁴ : Algebra R S'
inst✝³ : IsScalarTower R R' S'
inst✝² : IsScalarTower R S S'
H : IsPushout R S R' S'
A : Type u_8
inst✝¹ : Semiring A
inst✝ : Algebra R A
f g : S' →ₐ[R] A
h₁ : AlgHom.comp f (toAlgHom R R' S') = AlgHom.comp g (toAlgHom R R' S')
h₂ : AlgHom.comp f (toAlgHom R S S') = AlgHom.comp g (toAlgHom R S S')
⊢ f = g
[PROOFSTEP]
ext x
[GOAL]
case H
R : Type u_1
M : Type v₁
N : Type v₂
S : Type v₃
inst✝³³ : AddCommMonoid M
inst✝³² : AddCommMonoid N
inst✝³¹ : CommRing R
inst✝³⁰ : CommRing S
inst✝²⁹ : Algebra R S
inst✝²⁸ : Module R M
inst✝²⁷ : Module R N
inst✝²⁶ : Module S N
inst✝²⁵ : IsScalarTower R S N
f✝ : M →ₗ[R] N
h : IsBaseChange S f✝
P : Type u_2
Q : Type u_3
inst✝²⁴ : AddCommMonoid P
inst✝²³ : Module R P
inst✝²² : AddCommMonoid Q
inst✝²¹ : Module S Q
T : Type u_4
O : Type u_5
inst✝²⁰ : CommRing T
inst✝¹⁹ : Algebra R T
inst✝¹⁸ : Algebra S T
inst✝¹⁷ : IsScalarTower R S T
inst✝¹⁶ : AddCommMonoid O
inst✝¹⁵ : Module R O
inst✝¹⁴ : Module S O
inst✝¹³ : Module T O
inst✝¹² : IsScalarTower S T O
inst✝¹¹ : IsScalarTower R S O
inst✝¹⁰ : IsScalarTower R T O
R' : Type u_6
S' : Type u_7
inst✝⁹ : CommRing R'
inst✝⁸ : CommRing S'
inst✝⁷ : Algebra R R'
inst✝⁶ : Algebra S S'
inst✝⁵ : Algebra R' S'
inst✝⁴ : Algebra R S'
inst✝³ : IsScalarTower R R' S'
inst✝² : IsScalarTower R S S'
H : IsPushout R S R' S'
A : Type u_8
inst✝¹ : Semiring A
inst✝ : Algebra R A
f g : S' →ₐ[R] A
h₁ : AlgHom.comp f (toAlgHom R R' S') = AlgHom.comp g (toAlgHom R R' S')
h₂ : AlgHom.comp f (toAlgHom R S S') = AlgHom.comp g (toAlgHom R S S')
x : S'
⊢ ↑f x = ↑g x
[PROOFSTEP]
refine H.1.inductionOn x ?_ ?_ ?_ ?_
[GOAL]
case H.refine_1
R : Type u_1
M : Type v₁
N : Type v₂
S : Type v₃
inst✝³³ : AddCommMonoid M
inst✝³² : AddCommMonoid N
inst✝³¹ : CommRing R
inst✝³⁰ : CommRing S
inst✝²⁹ : Algebra R S
inst✝²⁸ : Module R M
inst✝²⁷ : Module R N
inst✝²⁶ : Module S N
inst✝²⁵ : IsScalarTower R S N
f✝ : M →ₗ[R] N
h : IsBaseChange S f✝
P : Type u_2
Q : Type u_3
inst✝²⁴ : AddCommMonoid P
inst✝²³ : Module R P
inst✝²² : AddCommMonoid Q
inst✝²¹ : Module S Q
T : Type u_4
O : Type u_5
inst✝²⁰ : CommRing T
inst✝¹⁹ : Algebra R T
inst✝¹⁸ : Algebra S T
inst✝¹⁷ : IsScalarTower R S T
inst✝¹⁶ : AddCommMonoid O
inst✝¹⁵ : Module R O
inst✝¹⁴ : Module S O
inst✝¹³ : Module T O
inst✝¹² : IsScalarTower S T O
inst✝¹¹ : IsScalarTower R S O
inst✝¹⁰ : IsScalarTower R T O
R' : Type u_6
S' : Type u_7
inst✝⁹ : CommRing R'
inst✝⁸ : CommRing S'
inst✝⁷ : Algebra R R'
inst✝⁶ : Algebra S S'
inst✝⁵ : Algebra R' S'
inst✝⁴ : Algebra R S'
inst✝³ : IsScalarTower R R' S'
inst✝² : IsScalarTower R S S'
H : IsPushout R S R' S'
A : Type u_8
inst✝¹ : Semiring A
inst✝ : Algebra R A
f g : S' →ₐ[R] A
h₁ : AlgHom.comp f (toAlgHom R R' S') = AlgHom.comp g (toAlgHom R R' S')
h₂ : AlgHom.comp f (toAlgHom R S S') = AlgHom.comp g (toAlgHom R S S')
x : S'
⊢ ↑f 0 = ↑g 0
[PROOFSTEP]
simp only [map_zero]
[GOAL]
case H.refine_2
R : Type u_1
M : Type v₁
N : Type v₂
S : Type v₃
inst✝³³ : AddCommMonoid M
inst✝³² : AddCommMonoid N
inst✝³¹ : CommRing R
inst✝³⁰ : CommRing S
inst✝²⁹ : Algebra R S
inst✝²⁸ : Module R M
inst✝²⁷ : Module R N
inst✝²⁶ : Module S N
inst✝²⁵ : IsScalarTower R S N
f✝ : M →ₗ[R] N
h : IsBaseChange S f✝
P : Type u_2
Q : Type u_3
inst✝²⁴ : AddCommMonoid P
inst✝²³ : Module R P
inst✝²² : AddCommMonoid Q
inst✝²¹ : Module S Q
T : Type u_4
O : Type u_5
inst✝²⁰ : CommRing T
inst✝¹⁹ : Algebra R T
inst✝¹⁸ : Algebra S T
inst✝¹⁷ : IsScalarTower R S T
inst✝¹⁶ : AddCommMonoid O
inst✝¹⁵ : Module R O
inst✝¹⁴ : Module S O
inst✝¹³ : Module T O
inst✝¹² : IsScalarTower S T O
inst✝¹¹ : IsScalarTower R S O
inst✝¹⁰ : IsScalarTower R T O
R' : Type u_6
S' : Type u_7
inst✝⁹ : CommRing R'
inst✝⁸ : CommRing S'
inst✝⁷ : Algebra R R'
inst✝⁶ : Algebra S S'
inst✝⁵ : Algebra R' S'
inst✝⁴ : Algebra R S'
inst✝³ : IsScalarTower R R' S'
inst✝² : IsScalarTower R S S'
H : IsPushout R S R' S'
A : Type u_8
inst✝¹ : Semiring A
inst✝ : Algebra R A
f g : S' →ₐ[R] A
h₁ : AlgHom.comp f (toAlgHom R R' S') = AlgHom.comp g (toAlgHom R R' S')
h₂ : AlgHom.comp f (toAlgHom R S S') = AlgHom.comp g (toAlgHom R S S')
x : S'
⊢ ∀ (m : R'), ↑f (↑(AlgHom.toLinearMap (toAlgHom R R' S')) m) = ↑g (↑(AlgHom.toLinearMap (toAlgHom R R' S')) m)
[PROOFSTEP]
exact AlgHom.congr_fun h₁
[GOAL]
case H.refine_3
R : Type u_1
M : Type v₁
N : Type v₂
S : Type v₃
inst✝³³ : AddCommMonoid M
inst✝³² : AddCommMonoid N
inst✝³¹ : CommRing R
inst✝³⁰ : CommRing S
inst✝²⁹ : Algebra R S
inst✝²⁸ : Module R M
inst✝²⁷ : Module R N
inst✝²⁶ : Module S N
inst✝²⁵ : IsScalarTower R S N
f✝ : M →ₗ[R] N
h : IsBaseChange S f✝
P : Type u_2
Q : Type u_3
inst✝²⁴ : AddCommMonoid P
inst✝²³ : Module R P
inst✝²² : AddCommMonoid Q
inst✝²¹ : Module S Q
T : Type u_4
O : Type u_5
inst✝²⁰ : CommRing T
inst✝¹⁹ : Algebra R T
inst✝¹⁸ : Algebra S T
inst✝¹⁷ : IsScalarTower R S T
inst✝¹⁶ : AddCommMonoid O
inst✝¹⁵ : Module R O
inst✝¹⁴ : Module S O
inst✝¹³ : Module T O
inst✝¹² : IsScalarTower S T O
inst✝¹¹ : IsScalarTower R S O
inst✝¹⁰ : IsScalarTower R T O
R' : Type u_6
S' : Type u_7
inst✝⁹ : CommRing R'
inst✝⁸ : CommRing S'
inst✝⁷ : Algebra R R'
inst✝⁶ : Algebra S S'
inst✝⁵ : Algebra R' S'
inst✝⁴ : Algebra R S'
inst✝³ : IsScalarTower R R' S'
inst✝² : IsScalarTower R S S'
H : IsPushout R S R' S'
A : Type u_8
inst✝¹ : Semiring A
inst✝ : Algebra R A
f g : S' →ₐ[R] A
h₁ : AlgHom.comp f (toAlgHom R R' S') = AlgHom.comp g (toAlgHom R R' S')
h₂ : AlgHom.comp f (toAlgHom R S S') = AlgHom.comp g (toAlgHom R S S')
x : S'
⊢ ∀ (s : S) (n : S'), ↑f n = ↑g n → ↑f (s • n) = ↑g (s • n)
[PROOFSTEP]
intro s s' e
[GOAL]
case H.refine_3
R : Type u_1
M : Type v₁
N : Type v₂
S : Type v₃
inst✝³³ : AddCommMonoid M
inst✝³² : AddCommMonoid N
inst✝³¹ : CommRing R
inst✝³⁰ : CommRing S
inst✝²⁹ : Algebra R S
inst✝²⁸ : Module R M
inst✝²⁷ : Module R N
inst✝²⁶ : Module S N
inst✝²⁵ : IsScalarTower R S N
f✝ : M →ₗ[R] N
h : IsBaseChange S f✝
P : Type u_2
Q : Type u_3
inst✝²⁴ : AddCommMonoid P
inst✝²³ : Module R P
inst✝²² : AddCommMonoid Q
inst✝²¹ : Module S Q
T : Type u_4
O : Type u_5
inst✝²⁰ : CommRing T
inst✝¹⁹ : Algebra R T
inst✝¹⁸ : Algebra S T
inst✝¹⁷ : IsScalarTower R S T
inst✝¹⁶ : AddCommMonoid O
inst✝¹⁵ : Module R O
inst✝¹⁴ : Module S O
inst✝¹³ : Module T O
inst✝¹² : IsScalarTower S T O
inst✝¹¹ : IsScalarTower R S O
inst✝¹⁰ : IsScalarTower R T O
R' : Type u_6
S' : Type u_7
inst✝⁹ : CommRing R'
inst✝⁸ : CommRing S'
inst✝⁷ : Algebra R R'
inst✝⁶ : Algebra S S'
inst✝⁵ : Algebra R' S'
inst✝⁴ : Algebra R S'
inst✝³ : IsScalarTower R R' S'
inst✝² : IsScalarTower R S S'
H : IsPushout R S R' S'
A : Type u_8
inst✝¹ : Semiring A
inst✝ : Algebra R A
f g : S' →ₐ[R] A
h₁ : AlgHom.comp f (toAlgHom R R' S') = AlgHom.comp g (toAlgHom R R' S')
h₂ : AlgHom.comp f (toAlgHom R S S') = AlgHom.comp g (toAlgHom R S S')
x : S'
s : S
s' : S'
e : ↑f s' = ↑g s'
⊢ ↑f (s • s') = ↑g (s • s')
[PROOFSTEP]
rw [Algebra.smul_def, f.map_mul, g.map_mul, e]
[GOAL]
case H.refine_3
R : Type u_1
M : Type v₁
N : Type v₂
S : Type v₃
inst✝³³ : AddCommMonoid M
inst✝³² : AddCommMonoid N
inst✝³¹ : CommRing R
inst✝³⁰ : CommRing S
inst✝²⁹ : Algebra R S
inst✝²⁸ : Module R M
inst✝²⁷ : Module R N
inst✝²⁶ : Module S N
inst✝²⁵ : IsScalarTower R S N
f✝ : M →ₗ[R] N
h : IsBaseChange S f✝
P : Type u_2
Q : Type u_3
inst✝²⁴ : AddCommMonoid P
inst✝²³ : Module R P
inst✝²² : AddCommMonoid Q
inst✝²¹ : Module S Q
T : Type u_4
O : Type u_5
inst✝²⁰ : CommRing T
inst✝¹⁹ : Algebra R T
inst✝¹⁸ : Algebra S T
inst✝¹⁷ : IsScalarTower R S T
inst✝¹⁶ : AddCommMonoid O
inst✝¹⁵ : Module R O
inst✝¹⁴ : Module S O
inst✝¹³ : Module T O
inst✝¹² : IsScalarTower S T O
inst✝¹¹ : IsScalarTower R S O
inst✝¹⁰ : IsScalarTower R T O
R' : Type u_6
S' : Type u_7
inst✝⁹ : CommRing R'
inst✝⁸ : CommRing S'
inst✝⁷ : Algebra R R'
inst✝⁶ : Algebra S S'
inst✝⁵ : Algebra R' S'
inst✝⁴ : Algebra R S'
inst✝³ : IsScalarTower R R' S'
inst✝² : IsScalarTower R S S'
H : IsPushout R S R' S'
A : Type u_8
inst✝¹ : Semiring A
inst✝ : Algebra R A
f g : S' →ₐ[R] A
h₁ : AlgHom.comp f (toAlgHom R R' S') = AlgHom.comp g (toAlgHom R R' S')
h₂ : AlgHom.comp f (toAlgHom R S S') = AlgHom.comp g (toAlgHom R S S')
x : S'
s : S
s' : S'
e : ↑f s' = ↑g s'
⊢ ↑f (↑(algebraMap S S') s) * ↑g s' = ↑g (↑(algebraMap S S') s) * ↑g s'
[PROOFSTEP]
congr 1
[GOAL]
case H.refine_3.e_a
R : Type u_1
M : Type v₁
N : Type v₂
S : Type v₃
inst✝³³ : AddCommMonoid M
inst✝³² : AddCommMonoid N
inst✝³¹ : CommRing R
inst✝³⁰ : CommRing S
inst✝²⁹ : Algebra R S
inst✝²⁸ : Module R M
inst✝²⁷ : Module R N
inst✝²⁶ : Module S N
inst✝²⁵ : IsScalarTower R S N
f✝ : M →ₗ[R] N
h : IsBaseChange S f✝
P : Type u_2
Q : Type u_3
inst✝²⁴ : AddCommMonoid P
inst✝²³ : Module R P
inst✝²² : AddCommMonoid Q
inst✝²¹ : Module S Q
T : Type u_4
O : Type u_5
inst✝²⁰ : CommRing T
inst✝¹⁹ : Algebra R T
inst✝¹⁸ : Algebra S T
inst✝¹⁷ : IsScalarTower R S T
inst✝¹⁶ : AddCommMonoid O
inst✝¹⁵ : Module R O
inst✝¹⁴ : Module S O
inst✝¹³ : Module T O
inst✝¹² : IsScalarTower S T O
inst✝¹¹ : IsScalarTower R S O
inst✝¹⁰ : IsScalarTower R T O
R' : Type u_6
S' : Type u_7
inst✝⁹ : CommRing R'
inst✝⁸ : CommRing S'
inst✝⁷ : Algebra R R'
inst✝⁶ : Algebra S S'
inst✝⁵ : Algebra R' S'
inst✝⁴ : Algebra R S'
inst✝³ : IsScalarTower R R' S'
inst✝² : IsScalarTower R S S'
H : IsPushout R S R' S'
A : Type u_8
inst✝¹ : Semiring A
inst✝ : Algebra R A
f g : S' →ₐ[R] A
h₁ : AlgHom.comp f (toAlgHom R R' S') = AlgHom.comp g (toAlgHom R R' S')
h₂ : AlgHom.comp f (toAlgHom R S S') = AlgHom.comp g (toAlgHom R S S')
x : S'
s : S
s' : S'
e : ↑f s' = ↑g s'
⊢ ↑f (↑(algebraMap S S') s) = ↑g (↑(algebraMap S S') s)
[PROOFSTEP]
exact (AlgHom.congr_fun h₂ s : _)
[GOAL]
case H.refine_4
R : Type u_1
M : Type v₁
N : Type v₂
S : Type v₃
inst✝³³ : AddCommMonoid M
inst✝³² : AddCommMonoid N
inst✝³¹ : CommRing R
inst✝³⁰ : CommRing S
inst✝²⁹ : Algebra R S
inst✝²⁸ : Module R M
inst✝²⁷ : Module R N
inst✝²⁶ : Module S N
inst✝²⁵ : IsScalarTower R S N
f✝ : M →ₗ[R] N
h : IsBaseChange S f✝
P : Type u_2
Q : Type u_3
inst✝²⁴ : AddCommMonoid P
inst✝²³ : Module R P
inst✝²² : AddCommMonoid Q
inst✝²¹ : Module S Q
T : Type u_4
O : Type u_5
inst✝²⁰ : CommRing T
inst✝¹⁹ : Algebra R T
inst✝¹⁸ : Algebra S T
inst✝¹⁷ : IsScalarTower R S T
inst✝¹⁶ : AddCommMonoid O
inst✝¹⁵ : Module R O
inst✝¹⁴ : Module S O
inst✝¹³ : Module T O
inst✝¹² : IsScalarTower S T O
inst✝¹¹ : IsScalarTower R S O
inst✝¹⁰ : IsScalarTower R T O
R' : Type u_6
S' : Type u_7
inst✝⁹ : CommRing R'
inst✝⁸ : CommRing S'
inst✝⁷ : Algebra R R'
inst✝⁶ : Algebra S S'
inst✝⁵ : Algebra R' S'
inst✝⁴ : Algebra R S'
inst✝³ : IsScalarTower R R' S'
inst✝² : IsScalarTower R S S'
H : IsPushout R S R' S'
A : Type u_8
inst✝¹ : Semiring A
inst✝ : Algebra R A
f g : S' →ₐ[R] A
h₁ : AlgHom.comp f (toAlgHom R R' S') = AlgHom.comp g (toAlgHom R R' S')
h₂ : AlgHom.comp f (toAlgHom R S S') = AlgHom.comp g (toAlgHom R S S')
x : S'
⊢ ∀ (n₁ n₂ : S'), ↑f n₁ = ↑g n₁ → ↑f n₂ = ↑g n₂ → ↑f (n₁ + n₂) = ↑g (n₁ + n₂)
[PROOFSTEP]
intro s₁ s₂ e₁ e₂
[GOAL]
case H.refine_4
R : Type u_1
M : Type v₁
N : Type v₂
S : Type v₃
inst✝³³ : AddCommMonoid M
inst✝³² : AddCommMonoid N
inst✝³¹ : CommRing R
inst✝³⁰ : CommRing S
inst✝²⁹ : Algebra R S
inst✝²⁸ : Module R M
inst✝²⁷ : Module R N
inst✝²⁶ : Module S N
inst✝²⁵ : IsScalarTower R S N
f✝ : M →ₗ[R] N
h : IsBaseChange S f✝
P : Type u_2
Q : Type u_3
inst✝²⁴ : AddCommMonoid P
inst✝²³ : Module R P
inst✝²² : AddCommMonoid Q
inst✝²¹ : Module S Q
T : Type u_4
O : Type u_5
inst✝²⁰ : CommRing T
inst✝¹⁹ : Algebra R T
inst✝¹⁸ : Algebra S T
inst✝¹⁷ : IsScalarTower R S T
inst✝¹⁶ : AddCommMonoid O
inst✝¹⁵ : Module R O
inst✝¹⁴ : Module S O
inst✝¹³ : Module T O
inst✝¹² : IsScalarTower S T O
inst✝¹¹ : IsScalarTower R S O
inst✝¹⁰ : IsScalarTower R T O
R' : Type u_6
S' : Type u_7
inst✝⁹ : CommRing R'
inst✝⁸ : CommRing S'
inst✝⁷ : Algebra R R'
inst✝⁶ : Algebra S S'
inst✝⁵ : Algebra R' S'
inst✝⁴ : Algebra R S'
inst✝³ : IsScalarTower R R' S'
inst✝² : IsScalarTower R S S'
H : IsPushout R S R' S'
A : Type u_8
inst✝¹ : Semiring A
inst✝ : Algebra R A
f g : S' →ₐ[R] A
h₁ : AlgHom.comp f (toAlgHom R R' S') = AlgHom.comp g (toAlgHom R R' S')
h₂ : AlgHom.comp f (toAlgHom R S S') = AlgHom.comp g (toAlgHom R S S')
x s₁ s₂ : S'
e₁ : ↑f s₁ = ↑g s₁
e₂ : ↑f s₂ = ↑g s₂
⊢ ↑f (s₁ + s₂) = ↑g (s₁ + s₂)
[PROOFSTEP]
rw [map_add, map_add, e₁, e₂]
|
/-
Copyright (c) 2022 Oliver Nash. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Oliver Nash
! This file was ported from Lean 3 source module data.set.pointwise.iterate
! leanprover-community/mathlib commit f2f413b9d4be3a02840d0663dace76e8fe3da053
! Please do not edit these lines, except to modify the commit id
! if you have ported upstream changes.
-/
import Mathbin.Data.Set.Pointwise.Smul
import Mathbin.Algebra.Hom.Iterate
import Mathbin.Dynamics.FixedPoints.Basic
/-!
# Results about pointwise operations on sets with iteration.
> THIS FILE IS SYNCHRONIZED WITH MATHLIB4.
> Any changes to this file require a corresponding PR to mathlib4.
-/
open Pointwise
open Set Function
/- warning: smul_eq_self_of_preimage_zpow_eq_self -> smul_eq_self_of_preimage_zpow_eq_self is a dubious translation:
lean 3 declaration is
forall {G : Type.{u1}} [_inst_1 : CommGroup.{u1} G] {n : Int} {s : Set.{u1} G}, (Eq.{succ u1} (Set.{u1} G) (Set.preimage.{u1, u1} G G (fun (x : G) => HPow.hPow.{u1, 0, u1} G Int G (instHPow.{u1, 0} G Int (DivInvMonoid.Pow.{u1} G (Group.toDivInvMonoid.{u1} G (CommGroup.toGroup.{u1} G _inst_1)))) x n) s) s) -> (forall {g : G} {j : Nat}, (Eq.{succ u1} G (HPow.hPow.{u1, 0, u1} G Int G (instHPow.{u1, 0} G Int (DivInvMonoid.Pow.{u1} G (Group.toDivInvMonoid.{u1} G (CommGroup.toGroup.{u1} G _inst_1)))) g (HPow.hPow.{0, 0, 0} Int Nat Int (instHPow.{0, 0} Int Nat (Monoid.Pow.{0} Int Int.monoid)) n j)) (OfNat.ofNat.{u1} G 1 (OfNat.mk.{u1} G 1 (One.one.{u1} G (MulOneClass.toHasOne.{u1} G (Monoid.toMulOneClass.{u1} G (DivInvMonoid.toMonoid.{u1} G (Group.toDivInvMonoid.{u1} G (CommGroup.toGroup.{u1} G _inst_1))))))))) -> (Eq.{succ u1} (Set.{u1} G) (SMul.smul.{u1, u1} G (Set.{u1} G) (Set.smulSet.{u1, u1} G G (Mul.toSMul.{u1} G (MulOneClass.toHasMul.{u1} G (Monoid.toMulOneClass.{u1} G (DivInvMonoid.toMonoid.{u1} G (Group.toDivInvMonoid.{u1} G (CommGroup.toGroup.{u1} G _inst_1))))))) g s) s))
but is expected to have type
forall {G : Type.{u1}} [_inst_1 : CommGroup.{u1} G] {n : Int} {s : Set.{u1} G}, (Eq.{succ u1} (Set.{u1} G) (Set.preimage.{u1, u1} G G (fun (x : G) => HPow.hPow.{u1, 0, u1} G Int G (instHPow.{u1, 0} G Int (DivInvMonoid.Pow.{u1} G (Group.toDivInvMonoid.{u1} G (CommGroup.toGroup.{u1} G _inst_1)))) x n) s) s) -> (forall {g : G} {j : Nat}, (Eq.{succ u1} G (HPow.hPow.{u1, 0, u1} G Int G (instHPow.{u1, 0} G Int (DivInvMonoid.Pow.{u1} G (Group.toDivInvMonoid.{u1} G (CommGroup.toGroup.{u1} G _inst_1)))) g (HPow.hPow.{0, 0, 0} Int Nat Int Int.instHPowIntNat n j)) (OfNat.ofNat.{u1} G 1 (One.toOfNat1.{u1} G (InvOneClass.toOne.{u1} G (DivInvOneMonoid.toInvOneClass.{u1} G (DivisionMonoid.toDivInvOneMonoid.{u1} G (DivisionCommMonoid.toDivisionMonoid.{u1} G (CommGroup.toDivisionCommMonoid.{u1} G _inst_1)))))))) -> (Eq.{succ u1} (Set.{u1} G) (HSMul.hSMul.{u1, u1, u1} G (Set.{u1} G) (Set.{u1} G) (instHSMul.{u1, u1} G (Set.{u1} G) (Set.smulSet.{u1, u1} G G (MulAction.toSMul.{u1, u1} G G (DivInvMonoid.toMonoid.{u1} G (Group.toDivInvMonoid.{u1} G (CommGroup.toGroup.{u1} G _inst_1))) (Monoid.toMulAction.{u1} G (DivInvMonoid.toMonoid.{u1} G (Group.toDivInvMonoid.{u1} G (CommGroup.toGroup.{u1} G _inst_1))))))) g s) s))
Case conversion may be inaccurate. Consider using '#align smul_eq_self_of_preimage_zpow_eq_self smul_eq_self_of_preimage_zpow_eq_selfₓ'. -/
/-- Let `n : ℤ` and `s` a subset of a commutative group `G` that is invariant under preimage for
the map `x ↦ x^n`. Then `s` is invariant under the pointwise action of the subgroup of elements
`g : G` such that `g^(n^j) = 1` for some `j : ℕ`. (This subgroup is called the Prüfer subgroup when
`G` is the `circle` and `n` is prime.) -/
@[to_additive
"Let `n : ℤ` and `s` a subset of an additive commutative group `G` that is invariant\nunder preimage for the map `x ↦ n • x`. Then `s` is invariant under the pointwise action of the\nadditive subgroup of elements `g : G` such that `(n^j) • g = 0` for some `j : ℕ`. (This additive\nsubgroup is called the Prüfer subgroup when `G` is the `add_circle` and `n` is prime.)"]
theorem smul_eq_self_of_preimage_zpow_eq_self {G : Type _} [CommGroup G] {n : ℤ} {s : Set G}
(hs : (fun x => x ^ n) ⁻¹' s = s) {g : G} {j : ℕ} (hg : g ^ n ^ j = 1) : g • s = s :=
by
suffices ∀ {g' : G} (hg' : g' ^ n ^ j = 1), g' • s ⊆ s
by
refine' le_antisymm (this hg) _
conv_lhs => rw [← smul_inv_smul g s]
replace hg : g⁻¹ ^ n ^ j = 1
· rw [inv_zpow, hg, inv_one]
simpa only [le_eq_subset, set_smul_subset_set_smul_iff] using this hg
rw [(is_fixed_pt.preimage_iterate hs j : zpowGroupHom n^[j] ⁻¹' s = s).symm]
rintro g' hg' - ⟨y, hy, rfl⟩
change (zpowGroupHom n^[j]) (g' * y) ∈ s
replace hg' : (zpowGroupHom n^[j]) g' = 1
· simpa [zpowGroupHom]
rwa [MonoidHom.iterate_map_mul, hg', one_mul]
#align smul_eq_self_of_preimage_zpow_eq_self smul_eq_self_of_preimage_zpow_eq_self
#align vadd_eq_self_of_preimage_zsmul_eq_self vadd_eq_self_of_preimage_zsmul_eq_self
|
She had a standard displacement of 1 @,@ 213 tonnes ( 1 @,@ 194 long tons ; 1 @,@ 337 short tons ) an overall length of 314 feet 5 inches ( 95 @.@ 83 m ) , a beam of 31 feet 8 inches ( 9 @.@ 65 m ) and a draught of 9 feet 4 inches ( 2 @.@ 84 m ) . On trials , Harding reached a speed of 33 @.@ 2 knots ( 61 @.@ 5 km / h ; 38 @.@ 2 mph ) . She was armed with four 4 " / 50 caliber guns , two 3 " / 23 caliber guns , and twelve 21 @-@ inch torpedo tubes . She had a regular crew complement of 122 officers and enlisted men . She was driven by two Parsons or Westinghouse turbines , and powered by four Normand boilers .
|
{-# LANGUAGE CPP #-}
#if defined(__GLASGOW_HASKELL__) && __GLASGOW_HASKELL__ >= 702
{-# LANGUAGE Trustworthy
, DefaultSignatures #-}
#define USE_GHC_GENERICS
#endif
{-# LANGUAGE RankNTypes
, TypeFamilies
, KindSignatures
, DeriveGeneric
, FlexibleInstances
, FlexibleContexts
, BangPatterns
, MultiParamTypeClasses #-}
{-# OPTIONS_GHC -fno-warn-orphans #-}
-------------------------------------------------------------------------------
-- |
-- Module : OGLS.Engine.Math.Vectors
-- description : 3D and 4D vectors arithmetic
-- Copyright : (c) Adam, 2017
-- License : MIT
-- Maintainer : [email protected]
-- Stability : unstable
-- Portability : POSIX
-------------------------------------------------------------------------------
module OGLS.Engine.Math.Vectors
( (^$+), (^$-), (^$*)
, dot, step
) where
#ifdef USE_GHC_GENERICS
import GHC.Generics ( Generic (..) )
#endif
import Control.Applicative ( liftA2 )
import Control.Monad ( liftM2 )
import Control.Parallel ( pseq )
import Data.Complex ( Complex (..) )
import Data.Orphans ( )
import Control.Lens hiding ( (<.>) )
import OGLS.Engine.Math.Instances
infixl 6 ^$+, ^$-
infixl 7 ^$*
default ()
class Vector a v where
-- not yet implemented
(^$+) :: forall (t :: * -> *) a. (Applicative t, Num a) => t a -> t a -> t a
(^$-) :: forall (t :: * -> *) a. (Applicative t, Num a) => t a -> t a -> t a
(^$*) :: forall (t :: * -> *) a. (Applicative t, Num a) => t a -> t a -> t a
newtype E t = E { el :: forall x. Lens' (t x) x }
data Vec3 a = Vec3 { v3x :: {-# UNPACK #-} !a
, v3y :: {-# UNPACK #-} !a
, v3z :: {-# UNPACK #-} !a
} deriving ( Eq, Ord, Show, Generic )
instance Functor Vec3 where
fmap f (Vec3 x y z) = Vec3 (f x) (f y) (f z)
instance Applicative Vec3 where
pure x = Vec3 x x x
Vec3 f g h <*> Vec3 x y z = Vec3 (f x) (g y) (h z)
data Vec4 a = Vec4 { v4x :: {-# UNPACK #-} !a
, v4y :: {-# UNPACK #-} !a
, v4z :: {-# UNPACK #-} !a
, v4w :: {-# UNPACK #-} !a
} deriving ( Eq, Ord, Show )
(^$+) = liftA2 (+)
{-# INLINE (^$+) #-}
(^$-) = liftA2 (-)
{-# INLINE (^$-) #-}
(^$*) = liftA2 (*)
{-# INLINE (^$*) #-}
-- | Left scalar product vector
(*^^) :: forall (f :: * -> *) a. (Functor f, Num a) => a -> f a -> f a
(*^^) a = fmap (a*)
{-# INLINE (*^^) #-}
-- | Right scalar product vector
(^^*) :: forall (f :: * -> *) a. (Functor f, Num a) => f a -> a -> f a
f ^^* a = fmap (*a) f
{-# INLINE (^^*) #-}
{- Needs Additive class and instances
basis :: (Traversable t, Num a) => [t a]
basis = basisFor (zero :: Additive v => v Int)
basisFor :: (Traversable t, Num a) => t b -> [t a]
basisFor = \t ->
ifoldMapOf traversed ?? t $ \i _ ->
return $ iover traversed ?? t $ \j _ ->
if i == j then 1 else 0
{-# INLINABLE basisFor #-}
-}
-- | Linear interpolation
lerp :: forall (f :: * -> *) a. (Functor f, Applicative f, Num a) => a -> f a -> f a -> f a
lerp alpha u v = alpha *^^ u ^$+ (1 - alpha) *^^ v
{-# INLINE lerp #-}
-- | Dot product
dot :: forall (t :: * -> *) a. (Applicative t, Foldable t, Num a) => t a -> t a -> a
dot v1 v2 = sum $ v1 ^$* v2
step :: forall (t :: * -> *) a. (Applicative t, Num a, Ord a) => t a -> t a -> t a
step = liftA2 (\a b -> if b < a then 0 else 1)
#ifdef HLINT
{-# ANN module "HLint: ignore Redundant lambda" #-}
#endif
|
Tracks from the concert DVD were filmed from the television special Kelly Clarkson 's Cautionary Christmas Music Tale .
|
First Nations peoples have lived , hunted , and traveled across the area for at least 10 @,@ 000 years , as shown by archaeological evidence on Walpole Island . These peoples were drawn from an amalgamation of Ojibwa , Odawa , and <unk> clans , which formed the Three Fires Confederacy , also called the Council of Three Fires , in <unk> These clans came together through common links in both language and culture , developing a self @-@ sufficient society where tasks and responsibilities were equally shared among all members .
|
lemma (in first_countable_topology) first_countable_basisE: fixes x :: 'a obtains \<A> where "countable \<A>" "\<And>A. A \<in> \<A> \<Longrightarrow> x \<in> A" "\<And>A. A \<in> \<A> \<Longrightarrow> open A" "\<And>S. open S \<Longrightarrow> x \<in> S \<Longrightarrow> (\<exists>A\<in>\<A>. A \<subseteq> S)" |
# Music Machine Learning - Bayesian inference
### Author: Philippe Esling ([email protected])
In this course we will cover
1. An introduction to [Bayesian inference](#bayesian)
2. A formal introduction to [Variational Auto-Encoders](#vae) (VAEs)
3. An explanation of the [implementation](#implem) of VAEs
4. Some [modifications and tips to improve the reconstruction](#improve) of VAEs **(exercise)**
<a id="recap"> </a>
## Introduction on Bayesian inference
Here, we discuss *Bayesian inference* and how to use the **Bayes theorem** to perform classification. First, we will see how to derive *estimators* for the different properties of the distributions, and verify that these are *unbiased*. Then, we will implement the **Maximum Likelihood** Estimators (MLE) in order to perform classification of a dataset. First, we will assess the case where parameters are known to implement the discriminant function and decision rule. Then, we will perform the MLE to obtain the means and covariance matrix for each class.
To understand these concepts graphically, we will rely on both the `scikit-learn` and `Pytorch` libraries (with the `probability` package).
```python
import torch
import torch.distributions as distribution
import torch.distributions.transforms as transform
import matplotlib.pyplot as plt
import numpy as np
import seaborn as sns
from helper_plot import hdr_plot_style, plot_gaussian_ellipsoid
hdr_plot_style()
```
### Bayesian framework
Two alternative interpretations of probability can be considered
* **Frequentist** (*classical* approach), assumes that probability is the *long-term frequency of events*. This becomes harder to interpret when events have no long-term frequency (eg. probability in an election, which happens only once). In that case, frequentists consider the *frequency of occurrences across alternative realities*, which defines the probability.
* **Bayesian** interprets probability as measure of *believability in an event*. Therefore, a probability is measure of *belief*, or confidence, of an event occurring. This definition leaves room for conflicting beliefs based on the different *information* about the world. Hence, bayesian inference is mostly based on *updating your beliefs* after considering new *evidence*.
To align with probability notation, we denote a belief about event $a$ as $p\left(a\right)$, called the *prior probability* of an event to occur. We denote the updated belief as $ p\left( a \mid e \right) $, interpreted as the probability of $a$ *given* the new evidence $e$, called the *posterior probability*. The prior belief is not completely removed, but we *re-weight this prior* to incorporate new evidence $e$ (i.e. we put more weight, or confidence, on some beliefs versus others). By introducing prior uncertainty about events, we admit that any guess we make can be wrong. As we gather an *infinite* amount of evidence $N \rightarrow \infty$, the Bayesian results (often) align with frequentist results. Hence for small $N$, inference is *unstable*, where frequentist estimates have more variance and larger confidence intervals. However, by introducing a prior, and returning probabilities, we *preserve the uncertainty* that reflects the instability on a small $N$ dataset.
Updating the *prior belief* to obtain our *posterior belief* is done via the the Bayes' Theorem
$$
\begin{equation}
p\left( a \mid e \right) \propto \frac{ p\left(e \mid a\right) p\left(a\right) } {p\left(e\right) }
\end{equation}
$$
We see that our posterior belief of event $a$ given the new evidence $e$ is proportional to ($\propto$) the *likelihood* of observing this particular evidence $e$ given the event $a$ ($p\left(e \mid a\right)$) multiplied by our prior belief in that particular event $a$ ($p\left(a\right)$).
### Using Bayesian classification (in `scikit-learn`)
The Bayesian classification methods rely on Bayes's theorem, where we are interested in finding the probability of a label $y$ given some observed features, which we can write as $p(y~|~{\rm features})$. Bayes's theorem tells us how to express this in terms of quantities that we can compute more directly
$$
p(y~|~{\rm features}) = \frac{p({\rm features}~|~y)p(y)}{p({\rm features})}
$$
If we are trying to decide between two labels ($y_1$ and $y_2$), then one way to make this decision is to compute the ratio of the posterior probabilities for each label
$$
\frac{p(y_1~|~{\rm features})}{p(y_2~|~{\rm features})} = \frac{p({\rm features}~|~y_1)}{p({\rm features}~|~y_2)}\frac{p(y_1)}{p(y_2)}
$$
All we need now is some model by which we can compute $p({\rm features}~|~y_i)$ for each label.
Such a model is called a *generative model* because it specifies the hypothetical random process that generates the data.
Specifying this generative model for each label is the main piece of the training of such a Bayesian classifier.
The general version of such a training step is a very difficult task, but we can make it simpler through the use of some simplifying assumptions about the form of this model. If we make *very naive assumptions* (called *Naive Bayes*) about the generative model for each label, we can find a rough approximation of the generative model for each class, and then proceed with the Bayesian classification.
#### Gaussian Naive Bayes
Perhaps the easiest naive Bayes classifier to understand is Gaussian naive Bayes. In this classifier, the assumption is that *data from each label is drawn from a simple Gaussian distribution*. Imagine that we have the following data
```python
hdr_plot_style()
from sklearn.datasets import make_blobs
X, y = make_blobs(200, 2, centers=2, random_state=2, cluster_std=2.2)
plt.figure(figsize=(10, 8)); plt.scatter(X[:, 0], X[:, 1], c=y, s=80, cmap='RdBu', edgecolor='w'); plt.grid(True)
```
One extremely fast way to create a simple model is to assume that the data is described by a Gaussian distribution with no covariance between dimensions. This model can be fit by simply finding the mean and standard deviation of the points within each label, which is all you need to define such a distribution.
With this generative model for each class, we have a simple recipe to compute the likelihood $P({\rm features}~|~L_1)$ for any data point, and thus we can quickly compute the posterior ratio and determine which label is the most probable for a given point. This procedure is implemented in Scikit-Learn's ``sklearn.naive_bayes.GaussianNB`` estimator
```python
from sklearn.naive_bayes import GaussianNB
model = GaussianNB()
model.fit(X, y);
```
Now let's generate some new data and predict the label:
```python
rng = np.random.RandomState(0)
Xnew = [-6, -14] + [14, 18] * rng.rand(2000, 2)
ynew = model.predict(Xnew)
```
Now we can plot the data and see where the decision boundary is
```python
fig = plt.figure(figsize=(10,8)); ax = fig.add_subplot(111)
# predict the classification probabilities on a grid
xlim = X[:, 0].min() - .5, X[:, 0].max() + .5
ylim = X[:, 1].min() - .5, X[:, 1].max() + .5
xx, yy = np.meshgrid(np.linspace(xlim[0], xlim[1], 71), np.linspace(ylim[0], ylim[1], 81))
Z = model.predict_proba(np.c_[xx.ravel(), yy.ravel()])[:, 1]
Z = Z.reshape(xx.shape)
ax.contourf(xx, yy, Z, cmap=plt.cm.RdBu, alpha=.8)
ax.scatter(X[:, 0], X[:, 1], c=y, s=50, cmap='RdBu', edgecolor='w');
```
We see a slightly curved boundary in the classifications (generally, the boundary in Gaussian naive Bayes is quadratic). A nice piece of this Bayesian formalism is that it naturally allows for probabilistic classification, which we can compute using the ``predict_proba`` method:
```python
yprob = model.predict_proba(Xnew)
yprob[-8:].round(2)
```
The columns give the posterior probabilities of the first and second label, respectively. If you are looking for estimates of uncertainty in your classification, Bayesian approaches like this can be a useful approach. Of course, the final classification will only be as good as the model assumptions that lead to it, which is why Gaussian naive Bayes often does not produce very good results.
## Bayesian inference
Suppose we have coin and want to estimate the probability of heads ($p$) for it. The coin is Bernoulli distributed:
$$
\begin{equation}
\phi(x)= p^x (1-p)^{(1-x)}
\end{equation}
$$
where $x$ is the outcome, *1* for heads and *0* for tails. Based on $n$ *independent* flips, we have the likelihood:
$$
\begin{equation}
\mathcal{L}(p|\mathbf{x})= \prod_{i=1}^n p^{ x_i }(1-p)^{1-x_i}
\end{equation}
$$
(the independent-trials assumption allows us to just substitute everything into $ \phi(x)$).
The idea of *maximum likelihood* will be to maximize this as the function of $p$ after given all of the $x_i$ data. This means that our estimator, $\hat{p}$, is a function of the observed $x_i$ data, and as such, is a random variable with its own distribution.
**Defining the estimator**
The only way to know for sure that our estimator is correctly defined is to check if the estimator is unbiased, namely, if
$$ \mathbb{E}(\hat{p}) = p $$
### Understanding this behavior with `pytorch`
Without solving for the previous exercise, we can Let's say that we have 100 samples from a Bernoulli distribution.
```python
bernoulli = distribution.Bernoulli(0.2)
sample = bernoulli.sample((100, ))
sns.distplot(sample)
plt.title("Samples from a Bernoulli (p = .2)")
torch.mean(sample)
```
Now we can use our estimator, and define our problem more formally, by defining our input samples as a dataset of observations $x$, and we are trying to model this set, as a `Bernoulli` distribution, for which the $p$ parameter is unknown (here defined as a `Variable`).
```python
from torch.autograd import Variable
x = Variable(sample).type(torch.FloatTensor)
p = Variable(torch.rand(1), requires_grad=True)
```
Now we can use our estimator to gradually compute the Maximum Likelihood, in order to uncover the estimated probability of the underlying distribution, solely based on observing the samples.
```python
learning_rate = 0.00002
for t in range(1000):
NLL = -torch.sum(x*torch.log(p) + (1-x)*torch.log(1-p))
NLL.backward()
if t % 200 == 0:
print("loglik = %.4f - p = %.4f - dL/dp = %.4f"%(NLL.data.numpy(), p.data.numpy(), p.grad.data.numpy()))
p.data -= learning_rate * p.grad.data
p.grad.data.zero_()
print('Final probability p =', p.data[0])
```
***
**Exercise**
1. Compute the *log-likelihood* $J=\log(\mathcal{L}(p \mid \mathbf{x}))$ of our given problem
2. Based on this, compute its derivative $ \frac{dJ}{dp} $
3. Solve it to find the estimator $\hat{p}$
4. Verify that this estimator is unbiased $ \mathbb{E}(\hat{p}) = p $
5. Compute the variance of the estimator $ \mathbb{E}\left(\hat{p}^2\right) $
***
```python
x = Variable(sample).type(torch.FloatTensor)
p = torch.rand(1)
sample = bernoulli.sample((100, ))
######################
# YOUR CODE GOES HERE
######################
# Questions :
# - Pourquoi est-ce que la formule du haut marche quand même ?
# - Koman on fé si l'on ne connaît pas la loi que suit la distribution ?
# - _Model boosting_ : méthode où l'on choisit entre plusieurs modèle selon
# leur pertinence au problème a posteriori.
ll = lambda p, x: np.sum(x*np.log(p) + (1-x)*np.log(1-p))
dll_dp = lambda p, x: np.sum(x/p - (1-x)/(1-p))
learning_rate = 0.00002
for t in range(1000):
nll = -ll(p.data.numpy(),x.data.numpy())
grad = -dll_dp(p.data.numpy(), x.data.numpy())
if t % 200 == 0:
print("loglik = %.4f - p = %.4f - dL/dp = %.4f"%(nll, p, grad))
p.data -= learning_rate * grad
print('Final probability p =', p.data[0])
bias = torch.abs(p - torch.mean(sample))
diff = torch.pow(p-x, 2)
variance = torch.sum(diff)
print(f"Variance: {variance}")
print(f"Bias: {bias}")
```
### Full estimator density
In general, computing the mean and variance of the estimator is insufficient to characterize the underlying probability density of $\hat{p}$, except if we knew that $\hat{p}$ were normally distributed. This is where the [*central limit theorem*](http://mathworld.wolfram.com/CentralLimitTheorem.html). Indeed, the form of the estimator, implies that $\hat{p}$ is normally distributed, but only *asymptotically*, which doesn't quantify how many samples $n$ we need. Unfortunately, in the real world, each sample may be precious. Hence, to write out the full density for $\hat{p}$, we first have to ask what is the probability that the estimator will equal a specific value such as
$$
\begin{equation}
\hat{p} = \frac{1}{n}\sum_{i=1}^n x_i = 0
\end{equation}
$$
This can only happen when $x_i=0$, $\forall i$. The corresponding probability can be computed from the density
$$
\begin{equation}
f(\mathbf{x},p)= \prod_{i=1}^n \left(p^{x_i} (1-p)^{1-x_i} \right)
\end{equation}
$$
$$
\begin{equation}
f\left(\sum_{i=1}^n x_i = 0,p\right)= \left(1-p\right)^n
\end{equation}
$$
Likewise, if $\lbrace x_i \rbrace$ has one $i^{th}$ value equal to one, then
$$
\begin{equation}
f\left(\sum_{i=1}^n x_i = 1,p\right)= n p \prod_{i=1}^{n-1} \left(1-p\right)
\end{equation}
$$
where the $n$ comes from the $n$ ways to pick one value equal to one from the $n$ elements $x_i$. Continuing this way, we can construct the entire density as
$$
\begin{equation}
f\left(\sum_{i=1}^n x_i = k,p\right)= \binom{n}{k} p^k (1-p)^{n-k}
\end{equation}
$$
where the term on the left is the binomial coefficient of $n$ things taken $k$ at a time. This is the binomial distribution and it's not the density for $\hat{p}$, but rather for $n\hat{p}$. We'll leave this as-is because it's easier to work with below. We just have to remember to keep track of the $n$ factor.
## Maximum Likelihood implementation
Maximum Likelihood Estimate (MLE) allows to perform typical statistical pattern classification tasks. In the cases where **probabilistic models and parameters are known**, the design of a Bayes' classifier is rather easy. However, in real applications, we are rarely given this information and this is where the MLE comes into play.
MLE still **requires partial knowledge** about the problem. We have to assume that the **model of the class conditional densities is known** (usually Gaussian distributions). Hence, Using MLE, we want to estimate the values of the parameters of a given distribution for the class-conditional densities, for example, the *mean* and *variance* assuming that the class-conditional densities are *normal* distributed (Gaussian) with
$$
\begin{equation}
p(\pmb x \mid y_i) \sim N(\mu, \sigma^2)
\end{equation}
$$
### Parameters known
Imagine that we want to classify data consisting of two-dimensional patterns, $\pmb{x} = [x_1, x_2] \in \mathbb{R}^{2}$ that could belong to 1 out of 3 classes $y_1,y_2,y_3$.
Let's assume the following information about the model where we use continuous univariate normal (Gaussian) model for the class-conditional densities
$$
\begin{equation}
p(\pmb x \mid y_j) \sim N(\pmb \mu \mid \Sigma) = \frac{1}{(2\pi)^{d/2} \mid \Sigma|^{1/2}} exp \bigg[ -\frac{1}{2}(\pmb x - \pmb \mu)^t \Sigma^{-1}(\pmb x - \pmb \mu) \bigg]
\end{equation}
$$
Furthermore, we consider for this first problem that we know the distributions of the classes, ie. their mean and covariances.
$$
\begin{equation}
p([x_1, x_2]^t \mid y_1) ∼ N([0,0],3I), \\
p([x_1, x_2]^t \mid y_2) ∼ N([9,0],3I), \\
p([x_1, x_2]^t \mid y_3) ∼ N([6,6],4I),
\end{equation}
$$
Therefore, the means of the sample distributions for 2-dimensional features are defined as
$$
\begin{equation}
\pmb{\mu}_{\,1} = \bigg[ 0, 0 \bigg], \pmb{\mu}_{\,2} = \bigg[ 9, 0 \bigg], \pmb{\mu}_{\,3} = \bigg[ 6, 6 \bigg]
\end{equation}
$$
The **covariance matrices** for the statistically independent and identically distributed ('i.i.d') features
$$
\begin{array}{ccc}
\Sigma_1 = \bigg[
\begin{array}{cc}
3 & 0\\
0 & 3 \\
\end{array} \bigg],
\Sigma_2 = \bigg[
\begin{array}{cc}
3 & 0\\
0 & 3 \\
\end{array} \bigg],
\Sigma_3 = \bigg[
\begin{array}{cc}
4 & 0\\
0 & 4 \\
\end{array} \bigg]
\end{array}$$
Finally, we consider that all classes have an **equal prior probability**
$$p(y_1) = p(y_2) = p(y_3) = \frac{1}{3}$$
***
**Exercise**
1. Generate some data (samples from the multivariate Gaussians) following classes distributions
2. Plot the class-dependent data
***
```python
# 7.0 - Generate data with known parameters
nb_patterns = 100
# Generate random patterns for class 1
mu1 = np.array([0,0])
cov1 = np.array([[1,0],[0,1]])
# Generate random patterns for class 2
mu2 = np.array([9,0])
cov2 = np.array([[3,0],[0,3]])
# Generate random patterns for class 3
mu3 = np.array([6,6])
cov3 = np.array([[7,0],[0,7]])
######################
# YOUR CODE GOES HERE
######################
# Prepare concatenated versions of class properties
mu_vals = {};
mu_vals[0] = np.array([mu1]).transpose()
mu_vals[1] = np.array([mu2]).transpose()
mu_vals[2] = np.array([mu3]).transpose()
cov_vals = {}
cov_vals[0] = cov1
cov_vals[1] = cov2
cov_vals[2] = cov3
# Plot the corresponding data
plt.figure(figsize=(12, 8))
plt.scatter(x1samples[:, 0], x1samples[:, 1], s=60, marker='o', c=[0, 0.8, 0], edgecolors='w')
plt.scatter(x2samples[:, 0], x2samples[:, 1], s=60, marker='o', c=[0, 0, 0.8], edgecolors='w')
plt.scatter(x3samples[:, 0], x3samples[:, 1], s=60, marker='o', c=[0.8, 0, 0], edgecolors='w')
h = plot_gaussian_ellipsoid(mu1, cov1, 2, color=[0, 0.8, 0]);
h = plot_gaussian_ellipsoid(mu2, cov2, 2, color=[0, 0, 0.8]);
h = plot_gaussian_ellipsoid(mu3, cov3, 2, color=[0.8, 0, 0]);
plt.title('Training Dataset')
plt.ylabel('x2')
plt.xlabel('x1')
```
Here, our **objective function** is to maximize the discriminant function $g_i(\pmb x)$, which we define as the posterior probability to perform a **minimum-error classification** (Bayes classifier).
$$
\begin{equation}
g_1(\pmb x) = p(y_1 \mid \pmb{x}), \quad g_2(\pmb{x}) = p(y_2 \mid \pmb{x}), \quad g_3(\pmb{x}) = p(y_3 \mid \pmb{x})
\end{equation}
$$
So that our decision rule is to choose the class $\omega_i$ for which $g_i(\pmb x)$ is max., where
$$
\begin{equation}
\quad g_i(\pmb{x}) = \pmb{x}^{\,t} \bigg( - \frac{1}{2} \Sigma_i^{-1} \bigg) \pmb{x} + \bigg( \Sigma_i^{-1} \pmb{\mu}_{\,i}\bigg)^t \pmb x + \bigg( -\frac{1}{2} \pmb{\mu}_{\,i}^{\,t} \Sigma_{i}^{-1} \pmb{\mu}_{\,i} -\frac{1}{2} ln(\left|\Sigma_i\right|)\bigg)
\end{equation}
$$
***
**Exercise**
1. Implement the discriminant function
2. Implement the decision rule (classifier)
3. Classify the data generated in the previous exercise
4. Plot the confusion matrix
5. Calculate the empirical error
***
```python
import operator
def discriminant_function(x_vec, mu_vec, cov_mat):
# Calculates the value of the discriminant function for a dx1 dimensional
# sample given the covariance matrix and mean vector.
#
# x_vec: A dx1 dimensional numpy array representing the sample.
# cov_mat: numpy array of the covariance matrix.
# mu_vec: dx1 dimensional numpy array of the sample mean.
#
# Returns a float value g as result of the discriminant function.
######################
# YOUR CODE GOES HERE
######################
return float(g)
def classify_data(x_vec, g, mu_vecs, cov_mats):
# Classifies an input sample into 1 out of 3 classes determined by
# maximizing the discriminant function g_i().
# Keyword arguments:
# x_vec: A dx1 dimensional numpy array representing the sample.
# g: The discriminant function.
# mu_vecs: A list of mean vectors as input for g.
# cov_mats: A list of covariance matrices as input for g.
#
# Returns the max probability and class id.
######################
# YOUR CODE GOES HERE
######################
return maxVal, maxId
x1classes = np.zeros((nb_patterns, 1))
conf_matrix = np.zeros((3, 3))
for i in range(nb_patterns):
x, g = classify_data(x1samples[i, :], discriminant_function, mu_vals, cov_vals);
x1classes[i] = g;
conf_matrix[0, g] = conf_matrix[0, g] + 1;
x2classes = np.zeros((nb_patterns, 1))
for i in range(nb_patterns):
x, g = classify_data(x2samples[i, :], discriminant_function, mu_vals, cov_vals);
x2classes[i] = g;
conf_matrix[1, g] = conf_matrix[1, g] + 1;
x3classes = np.zeros((nb_patterns, 1))
for i in range(nb_patterns):
x, g = classify_data(x3samples[i, :], discriminant_function, mu_vals, cov_vals);
x3classes[i] = g;
conf_matrix[2, g] = conf_matrix[2, g] + 1;
print('%16s \t %s \t %s \t %s\n' % (' ', 'class 1', 'class 2', 'class 3'));
print('%16s \t %f \t %f \t %f\n' % ('class 1', conf_matrix[0, 0], conf_matrix[0, 1], conf_matrix[0, 2]));
print('%16s \t %f \t %f \t %f\n' % ('class 2', conf_matrix[1, 0], conf_matrix[1, 1], conf_matrix[1, 2]));
print('%16s \t %f \t %f \t %f\n' % ('class 3', conf_matrix[2, 0], conf_matrix[2, 1], conf_matrix[2, 2]));
```
### Unknown parameters
In contrast to the previous case, let us assume that we only know the number of parameters for the class conditional densities $p (\pmb x \mid y_i)$, and we want to use a Maximum Likelihood Estimation (MLE) to estimate the quantities of these parameters from the training data.
Given the information about the our model (the data is normal distributed) the 2 parameters to be estimated for each class are $\pmb \mu_i$ and $\pmb \Sigma_i$, which are summarized by the parameter vector
$$
\begin{equation}
\pmb \theta_i = \bigg[ \begin{array}{c}
\ \theta_{i1} \\
\ \theta_{i2} \\
\end{array} \bigg]=
\bigg[ \begin{array}{c}
\pmb \mu_i \\
\pmb \Sigma_i \\
\end{array} \bigg]
\end{equation}
$$
For the Maximum Likelihood Estimate (MLE), we assume that we have a set of samples $D = \left\{ \pmb x_1, \pmb x_2,..., \pmb x_n \right\} $ that are *i.i.d.* (independent and identically distributed, drawn with probability $p(\pmb x \mid y_i, \pmb \theta_i) )$. Thus, we can **work with each class separately** and omit the class labels, so that we write the probability density as $p(\pmb x \mid \pmb \theta)$
**Likelihood of $ \pmb \theta $**
Thus, the probability of observing $D = \left\{ \pmb x_1, \pmb x_2,..., \pmb x_n \right\} $ is
$$
\begin{equation}
p(D \mid \pmb \theta ) = p(\pmb x_1 \mid \pmb \theta ) \cdot p(\pmb x_2 \mid \pmb \theta ) \cdot ... p(\pmb x_n \mid \pmb \theta ) = \prod_{k=1}^{n} p(\pmb x_k \pmb \mid \pmb \theta )
\end{equation}
$$
Where $p(D \mid \pmb \theta )$ is also called the ***likelihood of $\pmb\ \theta$***
We know that $p([x_1,x_2]^t) ∼ N(\pmb \mu,\pmb \Sigma) $ (remember that we dropped the class labels, since we are working with every class separately). And the mutlivariate normal density is given as
$$
\begin{equation}
\quad \quad p(\pmb x) = \frac{1}{(2\pi)^{d/2} |\Sigma|^{1/2}} exp \bigg[ -\frac{1}{2}(\pmb x - \pmb \mu)^t \Sigma^{-1}(\pmb x - \pmb \mu) \bigg]
\end{equation}
$$
Therefore, we obtain
$$
\begin{equation}
p(D \mid \pmb \theta ) = \prod_{k=1}^{n} p(\pmb x_k \pmb \mid \pmb \theta ) = \prod_{k=1}^{n} \frac{1}{(2\pi)^{d/2} |\Sigma|^{1/2}} exp \bigg[ -\frac{1}{2}(\pmb x - \pmb \mu)^t \Sigma^{-1}(\pmb x - \pmb \mu) \bigg]
\end{equation}
$$
and the log of the multivariate density
$$
\begin{equation}
l(\pmb\theta) = \sum\limits_{k=1}^{n} - \frac{1}{2}(\pmb x - \pmb \mu)^t \pmb \Sigma^{-1} (\pmb x - \pmb \mu) - \frac{d}{2} ln 2\pi - \frac{1}{2} ln |\pmb\Sigma|
\end{equation}
$$
In order to obtain the MLE $\boldsymbol{\hat{\theta}}$, we maximize $l (\pmb \theta)$, which can be done via differentiation
$$
\begin{equation}
\nabla_{\pmb \theta} \equiv \begin{bmatrix}
\frac{\partial }{\partial \theta_1} \\
\frac{\partial }{\partial \theta_2}
\end{bmatrix} = \begin{bmatrix}
\frac{\partial }{\partial \pmb \mu} \\
\frac{\partial }{\partial \pmb \sigma}
\end{bmatrix}
\end{equation}
$$
$$
\begin{equation}
\nabla_{\pmb \theta} l = \sum\limits_{k=1}^n \nabla_{\pmb \theta} ln p(\pmb x| \pmb \theta) = 0
\end{equation}
$$
***
**Exercise**
<div markdown = "1">
1. Perform the differentiation for $\frac{\partial \mathcal{L}}{\partial \pmb \mu}$ to obtain the estimator of the mean $\hat{\mu}$
2. Perform the differentiation for $\frac{\partial \mathcal{L}}{\partial \pmb \Sigma}$ to obtain the estimator of the covariance matrix $\hat{\Sigma}$
3. Implement the two estimators as functions based on a set of data
4. Apply these estimators (MLE) in order to obtain estimated parameters
5. Re-compute the classification errors on the previous dataset
***
```python
########################################################
# YOUR CODE GOES HERE (Perform mu estimates)
########################################################
# mu_est_1 = ?
# mu_est_2 = ?
# mu_est_3 = ?
print('%16s \t %s \t %s \t %s \t %s \t %s \t %s' % ('', 'mu1_1 ', 'mu1_2 ', 'mu2_1 ', 'mu2_2 ', 'mu3_1 ', 'mu3_2 '));
print('%16s \t %f \t %f \t %f \t %f \t %f \t %f' % ('MLE', mu_est_1[0], mu_est_1[1], mu_est_2[0], mu_est_2[1], mu_est_3[0], mu_est_3[1]));
print('%16s \t %f \t %f \t %f \t %f \t %f \t %f' % ('Truth', mu1[0], mu1[1], mu2[0], mu2[1], mu3[0], mu3[1]));
def mle_covariance(x_samples, mu_est):
# Calculates the Maximum Likelihood Estimate for the covariance matrix.
#
# Keyword Arguments:
# x_samples: np.array of the samples for 1 class, n x d dimensional
# mu_est: np.array of the mean MLE, d x 1 dimensional
#
# Returns the MLE for the covariance matrix as d x d numpy array.
######################
# YOUR CODE GOES HERE
######################
return cov_est_
cov_est_1 = mle_covariance(x1samples, np.array([mu_est_1]).transpose());
cov_est_2 = mle_covariance(x2samples, np.array([mu_est_2]).transpose());
cov_est_3 = mle_covariance(x3samples, np.array([mu_est_3]).transpose());
print('%16s \t %s \t %s \t %s \t %s \t %s \t %s' % ('', 'cov1_1 ', 'cov1_2 ', 'cov2_1 ', 'cov2_2 ', 'cov3_1 ', 'cov3_2 '));
print('%16s \t %f \t %f \t %f \t %f \t %f \t %f' % ('MLE', cov_est_1[0, 0], cov_est_1[1, 0], cov_est_2[0, 0], cov_est_2[1, 0], cov_est_3[0, 0], cov_est_3[1, 0]));
print('%16s \t %f \t %f \t %f \t %f \t %f \t %f' % ('Truth', cov1[0, 0], cov1[1, 0], cov2[0, 0], cov2[1, 0], cov3[0, 0], cov3[1, 0]));
mu_estimates = {};
mu_estimates[0] = np.array([mu_est_1]).transpose()
mu_estimates[1] = np.array([mu_est_2]).transpose()
mu_estimates[2] = np.array([mu_est_3]).transpose()
cov_estimates = {};
cov_estimates[0] = cov_est_1;
cov_estimates[1] = cov_est_2;
cov_estimates[2] = cov_est_3;
# Plot the corresponding data
plt.figure(figsize=(12, 8))
plt.scatter(x1samples[:,0], x1samples[:,1], s=40, marker='o', c=[0, 0.8, 0], edgecolors='w');
plt.scatter(x2samples[:,0], x2samples[:,1], s=40, marker='s', c=[0, 0, 0.8], edgecolors='w');
plt.scatter(x3samples[:,0], x3samples[:,1], s=40, marker='^', c=[0.8, 0, 0], edgecolors='w');
h = plot_gaussian_ellipsoid(mu1, cov1, 2, color=[0.2, 0.6, 0.2]);
h = plot_gaussian_ellipsoid(mu2, cov2, 2, color=[0.2, 0.2, 0.6]);
h = plot_gaussian_ellipsoid(mu3, cov3, 2, color=[0.6, 0.2, 0.2]);
h = plot_gaussian_ellipsoid(mu_est_1, cov_est_1, 2, color=[0.1, 0.99, 0.1]);
h = plot_gaussian_ellipsoid(mu_est_2, cov_est_2, 2, color=[0.1, 0.1, 0.99]);
h = plot_gaussian_ellipsoid(mu_est_3, cov_est_3, 2, color=[0.99, 0.1, 0.1]);
plt.title('Comparing estimated MLE Gaussians');
```
## Audio source separation
The maximum likelihood estimator (MLE) is widely used in practical signal modeling and we can show that the MLE is equivalent to the least squares estimator for a wide class of problems, including well resolved sinusoids in white noise. We are going to consider a model consisting of a complex sinusoid in additive white (complex) noise:
$$
\displaystyle x(n) = {\cal A}e^{j\omega_{0} n} + v(n)
$$
Here, $ {\cal A}= A e^{j\phi} $ is the complex amplitude of the sinusoid, and $ v(n) $ is white noise that we assume to be Gaussian distributed with zero mean. Hence, we assume that its probability density function is given by
$$
\displaystyle p_{v}(\nu) = \frac{1}{\pi \sigma_{v}^2} e^{-\frac{\vert\nu\vert^2}{\sigma_{v}^2}}.
$$
We express the zero-mean Gaussian assumption by writing
$$
\displaystyle v(n) \sim {\cal N}(0,\sigma_{v}^2)
$$
The parameter $ \sigma_{v}^2 $ is the *variance* of the random process $ v(n) $ , and $ \sigma_{v} $ is its standard deviation. It turns out that when Gaussian random variables $ v(n) $ are uncorrelated (i.e., when $ v(n) $ is white noise), they are also independent. This means that the probability of observing particular values of $ v(n) $ and $ v(m) $ is given by the product of their respective probabilities. We will now use this fact to compute an explicit probability for observing any data sequence $ x(n) $
Since the sinusoidal part of our signal model, $ {\cal A}e^{j\omega_{0}n}$ , is deterministic; i.e., it does not including any random components; it may be treated as the time-varying mean of a Gaussian random process $ x(n) $ . That is, our signal model can be rewritten as
$$
\displaystyle x(n) \sim {\cal N}({\cal A}e^{j\omega_{0} n},\sigma_{v}^2)
$$
and the probability density function for the whole set of observations $ x(n) $ , $ n=0,1,2,\ldots,N-1 $ is given by
$$
\displaystyle p(x) = p[x(0)] p[x(1)]\cdots p[x(N-1)] = \left(\frac{1}{\pi \sigma_v^2}\right)^N e^{-\frac{1}{\sigma_v^2}\sum_{n=0}^{N-1} \left\vert x(n) - {\cal A}e^{j\omega_0 n}\right\vert^2}
$$
Thus, given the noise variance $ \sigma_v^2 $ and the three sinusoidal parameters $ A,\phi,\omega_0 $ (remember that $ {\cal A}= A e^{j\phi} $ ), we can compute the relative probability of any observed data samples $ x(n) $ .
We can generalize this approach in order to perform a complete blind audio source separation algorithm, such as detailed in the following paper
Févotte, C., & Cardoso, J. F. "Maximum likelihood approach for blind audio source separation using time-frequency Gaussian source models". *IEEE Workshop on Applications of Signal Processing to Audio and Acoustics*, 2005. (pp. 78-81). IEEE.
You can try to implement this by relying on the [full paper](http://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.640.6981&rep=rep1&type=pdf) that details this method.
***
**Exercise**
<div markdown = "1">
1. Implement the single sinusoid extraction
2. Apply this approach to multiple sinusoids
3. Follow the paper to implement blind source separation
***
```python
```
|
[GOAL]
α : Type u_1
inst✝ : MetricSpace α
m0 : MeasurableSpace α
μ : Measure α
v : VitaliFamily μ
ν : Measure α
hν : ν ≪ μ
s : Set α
f : α → Set (Set α)
h : ∀ (x : α), x ∈ s → f x ⊆ setsAt v x
h' : ∀ (x : α), x ∈ s → ∀ (ε : ℝ), ε > 0 → ∃ a, a ∈ f x ∧ a ⊆ closedBall x ε
⊢ ∃ t,
(∀ (p : α × Set α), p ∈ t → p.fst ∈ s) ∧
(PairwiseDisjoint t fun p => p.snd) ∧
(∀ (p : α × Set α), p ∈ t → p.snd ∈ f p.fst) ∧ ↑↑ν (s \ ⋃ (p : α × Set α) (_ : p ∈ t), p.snd) = 0
[PROOFSTEP]
rcases v.covering s f h h' with ⟨t, ts, disj, mem_f, hμ⟩
[GOAL]
case intro.intro.intro.intro
α : Type u_1
inst✝ : MetricSpace α
m0 : MeasurableSpace α
μ : Measure α
v : VitaliFamily μ
ν : Measure α
hν : ν ≪ μ
s : Set α
f : α → Set (Set α)
h : ∀ (x : α), x ∈ s → f x ⊆ setsAt v x
h' : ∀ (x : α), x ∈ s → ∀ (ε : ℝ), ε > 0 → ∃ a, a ∈ f x ∧ a ⊆ closedBall x ε
t : Set (α × Set α)
ts : ∀ (p : α × Set α), p ∈ t → p.fst ∈ s
disj : PairwiseDisjoint t fun p => p.snd
mem_f : ∀ (p : α × Set α), p ∈ t → p.snd ∈ f p.fst
hμ : ↑↑μ (s \ ⋃ (p : α × Set α) (_ : p ∈ t), p.snd) = 0
⊢ ∃ t,
(∀ (p : α × Set α), p ∈ t → p.fst ∈ s) ∧
(PairwiseDisjoint t fun p => p.snd) ∧
(∀ (p : α × Set α), p ∈ t → p.snd ∈ f p.fst) ∧ ↑↑ν (s \ ⋃ (p : α × Set α) (_ : p ∈ t), p.snd) = 0
[PROOFSTEP]
exact ⟨t, ts, disj, mem_f, hν hμ⟩
[GOAL]
α : Type u_1
inst✝¹ : MetricSpace α
m0 : MeasurableSpace α
μ : Measure α
v : VitaliFamily μ
f : α → Set (Set α)
s : Set α
h : FineSubfamilyOn v f s
inst✝ : SecondCountableTopology α
ρ : Measure α
hρ : ρ ≪ μ
⊢ s ⊆
(s \ ⋃ (p : α × Set α) (_ : p ∈ FineSubfamilyOn.index h), FineSubfamilyOn.covering h p) ∪
⋃ (p : α × Set α) (_ : p ∈ FineSubfamilyOn.index h), FineSubfamilyOn.covering h p
[PROOFSTEP]
simp only [subset_union_left, diff_union_self]
[GOAL]
α : Type u_1
inst✝¹ : MetricSpace α
m0 : MeasurableSpace α
μ : Measure α
v : VitaliFamily μ
f : α → Set (Set α)
s : Set α
h : FineSubfamilyOn v f s
inst✝ : SecondCountableTopology α
ρ : Measure α
hρ : ρ ≪ μ
⊢ ↑↑ρ (s \ ⋃ (p : α × Set α) (_ : p ∈ FineSubfamilyOn.index h), FineSubfamilyOn.covering h p) +
↑↑ρ (⋃ (p : α × Set α) (_ : p ∈ FineSubfamilyOn.index h), FineSubfamilyOn.covering h p) =
∑' (p : ↑(FineSubfamilyOn.index h)), ↑↑ρ (FineSubfamilyOn.covering h ↑p)
[PROOFSTEP]
rw [hρ h.measure_diff_biUnion, zero_add,
measure_biUnion h.index_countable h.covering_disjoint fun x hx => h.measurableSet_u hx]
[GOAL]
α : Type u_1
inst✝ : MetricSpace α
m0 : MeasurableSpace α
μ : Measure α
v : VitaliFamily μ
δ : ℝ
δpos : 0 < δ
x : α
a : Set α
ha : a ∈ (fun x => setsAt v x ∪ {a | MeasurableSet a ∧ Set.Nonempty (interior a) ∧ ¬a ⊆ closedBall x δ}) x
⊢ MeasurableSet a
[PROOFSTEP]
cases' ha with ha ha
[GOAL]
case inl
α : Type u_1
inst✝ : MetricSpace α
m0 : MeasurableSpace α
μ : Measure α
v : VitaliFamily μ
δ : ℝ
δpos : 0 < δ
x : α
a : Set α
ha : a ∈ setsAt v x
⊢ MeasurableSet a
case inr
α : Type u_1
inst✝ : MetricSpace α
m0 : MeasurableSpace α
μ : Measure α
v : VitaliFamily μ
δ : ℝ
δpos : 0 < δ
x : α
a : Set α
ha : a ∈ {a | MeasurableSet a ∧ Set.Nonempty (interior a) ∧ ¬a ⊆ closedBall x δ}
⊢ MeasurableSet a
[PROOFSTEP]
exacts [v.MeasurableSet' _ _ ha, ha.1]
[GOAL]
α : Type u_1
inst✝ : MetricSpace α
m0 : MeasurableSpace α
μ : Measure α
v : VitaliFamily μ
δ : ℝ
δpos : 0 < δ
x : α
a : Set α
ha : a ∈ (fun x => setsAt v x ∪ {a | MeasurableSet a ∧ Set.Nonempty (interior a) ∧ ¬a ⊆ closedBall x δ}) x
⊢ Set.Nonempty (interior a)
[PROOFSTEP]
cases' ha with ha ha
[GOAL]
case inl
α : Type u_1
inst✝ : MetricSpace α
m0 : MeasurableSpace α
μ : Measure α
v : VitaliFamily μ
δ : ℝ
δpos : 0 < δ
x : α
a : Set α
ha : a ∈ setsAt v x
⊢ Set.Nonempty (interior a)
case inr
α : Type u_1
inst✝ : MetricSpace α
m0 : MeasurableSpace α
μ : Measure α
v : VitaliFamily μ
δ : ℝ
δpos : 0 < δ
x : α
a : Set α
ha : a ∈ {a | MeasurableSet a ∧ Set.Nonempty (interior a) ∧ ¬a ⊆ closedBall x δ}
⊢ Set.Nonempty (interior a)
[PROOFSTEP]
exacts [v.nonempty_interior _ _ ha, ha.2.1]
[GOAL]
α : Type u_1
inst✝ : MetricSpace α
m0 : MeasurableSpace α
μ : Measure α
v : VitaliFamily μ
δ : ℝ
δpos : 0 < δ
⊢ ∀ (x : α) (ε : ℝ),
ε > 0 →
∃ y,
y ∈ (fun x => setsAt v x ∪ {a | MeasurableSet a ∧ Set.Nonempty (interior a) ∧ ¬a ⊆ closedBall x δ}) x ∧
y ⊆ closedBall x ε
[PROOFSTEP]
intro x ε εpos
[GOAL]
α : Type u_1
inst✝ : MetricSpace α
m0 : MeasurableSpace α
μ : Measure α
v : VitaliFamily μ
δ : ℝ
δpos : 0 < δ
x : α
ε : ℝ
εpos : ε > 0
⊢ ∃ y,
y ∈ (fun x => setsAt v x ∪ {a | MeasurableSet a ∧ Set.Nonempty (interior a) ∧ ¬a ⊆ closedBall x δ}) x ∧
y ⊆ closedBall x ε
[PROOFSTEP]
rcases v.Nontrivial x ε εpos with ⟨a, ha, h'a⟩
[GOAL]
case intro.intro
α : Type u_1
inst✝ : MetricSpace α
m0 : MeasurableSpace α
μ : Measure α
v : VitaliFamily μ
δ : ℝ
δpos : 0 < δ
x : α
ε : ℝ
εpos : ε > 0
a : Set α
ha : a ∈ setsAt v x
h'a : a ⊆ closedBall x ε
⊢ ∃ y,
y ∈ (fun x => setsAt v x ∪ {a | MeasurableSet a ∧ Set.Nonempty (interior a) ∧ ¬a ⊆ closedBall x δ}) x ∧
y ⊆ closedBall x ε
[PROOFSTEP]
exact ⟨a, mem_union_left _ ha, h'a⟩
[GOAL]
α : Type u_1
inst✝ : MetricSpace α
m0 : MeasurableSpace α
μ : Measure α
v : VitaliFamily μ
δ : ℝ
δpos : 0 < δ
⊢ ∀ (s : Set α) (f : α → Set (Set α)),
(∀ (x : α),
x ∈ s →
f x ⊆ (fun x => setsAt v x ∪ {a | MeasurableSet a ∧ Set.Nonempty (interior a) ∧ ¬a ⊆ closedBall x δ}) x) →
(∀ (x : α), x ∈ s → ∀ (ε : ℝ), ε > 0 → ∃ a, a ∈ f x ∧ a ⊆ closedBall x ε) →
∃ t,
(∀ (p : α × Set α), p ∈ t → p.fst ∈ s) ∧
(PairwiseDisjoint t fun p => p.snd) ∧
(∀ (p : α × Set α), p ∈ t → p.snd ∈ f p.fst) ∧ ↑↑μ (s \ ⋃ (p : α × Set α) (_ : p ∈ t), p.snd) = 0
[PROOFSTEP]
intro s f fset ffine
[GOAL]
α : Type u_1
inst✝ : MetricSpace α
m0 : MeasurableSpace α
μ : Measure α
v : VitaliFamily μ
δ : ℝ
δpos : 0 < δ
s : Set α
f : α → Set (Set α)
fset :
∀ (x : α),
x ∈ s → f x ⊆ (fun x => setsAt v x ∪ {a | MeasurableSet a ∧ Set.Nonempty (interior a) ∧ ¬a ⊆ closedBall x δ}) x
ffine : ∀ (x : α), x ∈ s → ∀ (ε : ℝ), ε > 0 → ∃ a, a ∈ f x ∧ a ⊆ closedBall x ε
⊢ ∃ t,
(∀ (p : α × Set α), p ∈ t → p.fst ∈ s) ∧
(PairwiseDisjoint t fun p => p.snd) ∧
(∀ (p : α × Set α), p ∈ t → p.snd ∈ f p.fst) ∧ ↑↑μ (s \ ⋃ (p : α × Set α) (_ : p ∈ t), p.snd) = 0
[PROOFSTEP]
let g : α → Set (Set α) := fun x => f x ∩ v.setsAt x
[GOAL]
α : Type u_1
inst✝ : MetricSpace α
m0 : MeasurableSpace α
μ : Measure α
v : VitaliFamily μ
δ : ℝ
δpos : 0 < δ
s : Set α
f : α → Set (Set α)
fset :
∀ (x : α),
x ∈ s → f x ⊆ (fun x => setsAt v x ∪ {a | MeasurableSet a ∧ Set.Nonempty (interior a) ∧ ¬a ⊆ closedBall x δ}) x
ffine : ∀ (x : α), x ∈ s → ∀ (ε : ℝ), ε > 0 → ∃ a, a ∈ f x ∧ a ⊆ closedBall x ε
g : α → Set (Set α) := fun x => f x ∩ setsAt v x
⊢ ∃ t,
(∀ (p : α × Set α), p ∈ t → p.fst ∈ s) ∧
(PairwiseDisjoint t fun p => p.snd) ∧
(∀ (p : α × Set α), p ∈ t → p.snd ∈ f p.fst) ∧ ↑↑μ (s \ ⋃ (p : α × Set α) (_ : p ∈ t), p.snd) = 0
[PROOFSTEP]
have : ∀ x ∈ s, ∀ ε : ℝ, ε > 0 → ∃ (a : Set α), a ∈ g x ∧ a ⊆ closedBall x ε :=
by
intro x hx ε εpos
obtain ⟨a, af, ha⟩ : ∃ a ∈ f x, a ⊆ closedBall x (min ε δ)
exact ffine x hx (min ε δ) (lt_min εpos δpos)
rcases fset x hx af with (h'a | h'a)
· exact ⟨a, ⟨af, h'a⟩, ha.trans (closedBall_subset_closedBall (min_le_left _ _))⟩
· refine' False.elim (h'a.2.2 _)
exact ha.trans (closedBall_subset_closedBall (min_le_right _ _))
[GOAL]
α : Type u_1
inst✝ : MetricSpace α
m0 : MeasurableSpace α
μ : Measure α
v : VitaliFamily μ
δ : ℝ
δpos : 0 < δ
s : Set α
f : α → Set (Set α)
fset :
∀ (x : α),
x ∈ s → f x ⊆ (fun x => setsAt v x ∪ {a | MeasurableSet a ∧ Set.Nonempty (interior a) ∧ ¬a ⊆ closedBall x δ}) x
ffine : ∀ (x : α), x ∈ s → ∀ (ε : ℝ), ε > 0 → ∃ a, a ∈ f x ∧ a ⊆ closedBall x ε
g : α → Set (Set α) := fun x => f x ∩ setsAt v x
⊢ ∀ (x : α), x ∈ s → ∀ (ε : ℝ), ε > 0 → ∃ a, a ∈ g x ∧ a ⊆ closedBall x ε
[PROOFSTEP]
intro x hx ε εpos
[GOAL]
α : Type u_1
inst✝ : MetricSpace α
m0 : MeasurableSpace α
μ : Measure α
v : VitaliFamily μ
δ : ℝ
δpos : 0 < δ
s : Set α
f : α → Set (Set α)
fset :
∀ (x : α),
x ∈ s → f x ⊆ (fun x => setsAt v x ∪ {a | MeasurableSet a ∧ Set.Nonempty (interior a) ∧ ¬a ⊆ closedBall x δ}) x
ffine : ∀ (x : α), x ∈ s → ∀ (ε : ℝ), ε > 0 → ∃ a, a ∈ f x ∧ a ⊆ closedBall x ε
g : α → Set (Set α) := fun x => f x ∩ setsAt v x
x : α
hx : x ∈ s
ε : ℝ
εpos : ε > 0
⊢ ∃ a, a ∈ g x ∧ a ⊆ closedBall x ε
[PROOFSTEP]
obtain ⟨a, af, ha⟩ : ∃ a ∈ f x, a ⊆ closedBall x (min ε δ)
[GOAL]
α : Type u_1
inst✝ : MetricSpace α
m0 : MeasurableSpace α
μ : Measure α
v : VitaliFamily μ
δ : ℝ
δpos : 0 < δ
s : Set α
f : α → Set (Set α)
fset :
∀ (x : α),
x ∈ s → f x ⊆ (fun x => setsAt v x ∪ {a | MeasurableSet a ∧ Set.Nonempty (interior a) ∧ ¬a ⊆ closedBall x δ}) x
ffine : ∀ (x : α), x ∈ s → ∀ (ε : ℝ), ε > 0 → ∃ a, a ∈ f x ∧ a ⊆ closedBall x ε
g : α → Set (Set α) := fun x => f x ∩ setsAt v x
x : α
hx : x ∈ s
ε : ℝ
εpos : ε > 0
⊢ ∃ a, a ∈ f x ∧ a ⊆ closedBall x (min ε δ)
case intro.intro
α : Type u_1
inst✝ : MetricSpace α
m0 : MeasurableSpace α
μ : Measure α
v : VitaliFamily μ
δ : ℝ
δpos : 0 < δ
s : Set α
f : α → Set (Set α)
fset :
∀ (x : α),
x ∈ s → f x ⊆ (fun x => setsAt v x ∪ {a | MeasurableSet a ∧ Set.Nonempty (interior a) ∧ ¬a ⊆ closedBall x δ}) x
ffine : ∀ (x : α), x ∈ s → ∀ (ε : ℝ), ε > 0 → ∃ a, a ∈ f x ∧ a ⊆ closedBall x ε
g : α → Set (Set α) := fun x => f x ∩ setsAt v x
x : α
hx : x ∈ s
ε : ℝ
εpos : ε > 0
a : Set α
af : a ∈ f x
ha : a ⊆ closedBall x (min ε δ)
⊢ ∃ a, a ∈ g x ∧ a ⊆ closedBall x ε
[PROOFSTEP]
exact ffine x hx (min ε δ) (lt_min εpos δpos)
[GOAL]
case intro.intro
α : Type u_1
inst✝ : MetricSpace α
m0 : MeasurableSpace α
μ : Measure α
v : VitaliFamily μ
δ : ℝ
δpos : 0 < δ
s : Set α
f : α → Set (Set α)
fset :
∀ (x : α),
x ∈ s → f x ⊆ (fun x => setsAt v x ∪ {a | MeasurableSet a ∧ Set.Nonempty (interior a) ∧ ¬a ⊆ closedBall x δ}) x
ffine : ∀ (x : α), x ∈ s → ∀ (ε : ℝ), ε > 0 → ∃ a, a ∈ f x ∧ a ⊆ closedBall x ε
g : α → Set (Set α) := fun x => f x ∩ setsAt v x
x : α
hx : x ∈ s
ε : ℝ
εpos : ε > 0
a : Set α
af : a ∈ f x
ha : a ⊆ closedBall x (min ε δ)
⊢ ∃ a, a ∈ g x ∧ a ⊆ closedBall x ε
[PROOFSTEP]
rcases fset x hx af with (h'a | h'a)
[GOAL]
case intro.intro.inl
α : Type u_1
inst✝ : MetricSpace α
m0 : MeasurableSpace α
μ : Measure α
v : VitaliFamily μ
δ : ℝ
δpos : 0 < δ
s : Set α
f : α → Set (Set α)
fset :
∀ (x : α),
x ∈ s → f x ⊆ (fun x => setsAt v x ∪ {a | MeasurableSet a ∧ Set.Nonempty (interior a) ∧ ¬a ⊆ closedBall x δ}) x
ffine : ∀ (x : α), x ∈ s → ∀ (ε : ℝ), ε > 0 → ∃ a, a ∈ f x ∧ a ⊆ closedBall x ε
g : α → Set (Set α) := fun x => f x ∩ setsAt v x
x : α
hx : x ∈ s
ε : ℝ
εpos : ε > 0
a : Set α
af : a ∈ f x
ha : a ⊆ closedBall x (min ε δ)
h'a : a ∈ setsAt v x
⊢ ∃ a, a ∈ g x ∧ a ⊆ closedBall x ε
[PROOFSTEP]
exact ⟨a, ⟨af, h'a⟩, ha.trans (closedBall_subset_closedBall (min_le_left _ _))⟩
[GOAL]
case intro.intro.inr
α : Type u_1
inst✝ : MetricSpace α
m0 : MeasurableSpace α
μ : Measure α
v : VitaliFamily μ
δ : ℝ
δpos : 0 < δ
s : Set α
f : α → Set (Set α)
fset :
∀ (x : α),
x ∈ s → f x ⊆ (fun x => setsAt v x ∪ {a | MeasurableSet a ∧ Set.Nonempty (interior a) ∧ ¬a ⊆ closedBall x δ}) x
ffine : ∀ (x : α), x ∈ s → ∀ (ε : ℝ), ε > 0 → ∃ a, a ∈ f x ∧ a ⊆ closedBall x ε
g : α → Set (Set α) := fun x => f x ∩ setsAt v x
x : α
hx : x ∈ s
ε : ℝ
εpos : ε > 0
a : Set α
af : a ∈ f x
ha : a ⊆ closedBall x (min ε δ)
h'a : a ∈ {a | MeasurableSet a ∧ Set.Nonempty (interior a) ∧ ¬a ⊆ closedBall x δ}
⊢ ∃ a, a ∈ g x ∧ a ⊆ closedBall x ε
[PROOFSTEP]
refine' False.elim (h'a.2.2 _)
[GOAL]
case intro.intro.inr
α : Type u_1
inst✝ : MetricSpace α
m0 : MeasurableSpace α
μ : Measure α
v : VitaliFamily μ
δ : ℝ
δpos : 0 < δ
s : Set α
f : α → Set (Set α)
fset :
∀ (x : α),
x ∈ s → f x ⊆ (fun x => setsAt v x ∪ {a | MeasurableSet a ∧ Set.Nonempty (interior a) ∧ ¬a ⊆ closedBall x δ}) x
ffine : ∀ (x : α), x ∈ s → ∀ (ε : ℝ), ε > 0 → ∃ a, a ∈ f x ∧ a ⊆ closedBall x ε
g : α → Set (Set α) := fun x => f x ∩ setsAt v x
x : α
hx : x ∈ s
ε : ℝ
εpos : ε > 0
a : Set α
af : a ∈ f x
ha : a ⊆ closedBall x (min ε δ)
h'a : a ∈ {a | MeasurableSet a ∧ Set.Nonempty (interior a) ∧ ¬a ⊆ closedBall x δ}
⊢ a ⊆ closedBall x δ
[PROOFSTEP]
exact ha.trans (closedBall_subset_closedBall (min_le_right _ _))
[GOAL]
α : Type u_1
inst✝ : MetricSpace α
m0 : MeasurableSpace α
μ : Measure α
v : VitaliFamily μ
δ : ℝ
δpos : 0 < δ
s : Set α
f : α → Set (Set α)
fset :
∀ (x : α),
x ∈ s → f x ⊆ (fun x => setsAt v x ∪ {a | MeasurableSet a ∧ Set.Nonempty (interior a) ∧ ¬a ⊆ closedBall x δ}) x
ffine : ∀ (x : α), x ∈ s → ∀ (ε : ℝ), ε > 0 → ∃ a, a ∈ f x ∧ a ⊆ closedBall x ε
g : α → Set (Set α) := fun x => f x ∩ setsAt v x
this : ∀ (x : α), x ∈ s → ∀ (ε : ℝ), ε > 0 → ∃ a, a ∈ g x ∧ a ⊆ closedBall x ε
⊢ ∃ t,
(∀ (p : α × Set α), p ∈ t → p.fst ∈ s) ∧
(PairwiseDisjoint t fun p => p.snd) ∧
(∀ (p : α × Set α), p ∈ t → p.snd ∈ f p.fst) ∧ ↑↑μ (s \ ⋃ (p : α × Set α) (_ : p ∈ t), p.snd) = 0
[PROOFSTEP]
rcases v.covering s g (fun x _ => inter_subset_right _ _) this with ⟨t, ts, tdisj, tg, μt⟩
[GOAL]
case intro.intro.intro.intro
α : Type u_1
inst✝ : MetricSpace α
m0 : MeasurableSpace α
μ : Measure α
v : VitaliFamily μ
δ : ℝ
δpos : 0 < δ
s : Set α
f : α → Set (Set α)
fset :
∀ (x : α),
x ∈ s → f x ⊆ (fun x => setsAt v x ∪ {a | MeasurableSet a ∧ Set.Nonempty (interior a) ∧ ¬a ⊆ closedBall x δ}) x
ffine : ∀ (x : α), x ∈ s → ∀ (ε : ℝ), ε > 0 → ∃ a, a ∈ f x ∧ a ⊆ closedBall x ε
g : α → Set (Set α) := fun x => f x ∩ setsAt v x
this : ∀ (x : α), x ∈ s → ∀ (ε : ℝ), ε > 0 → ∃ a, a ∈ g x ∧ a ⊆ closedBall x ε
t : Set (α × Set α)
ts : ∀ (p : α × Set α), p ∈ t → p.fst ∈ s
tdisj : PairwiseDisjoint t fun p => p.snd
tg : ∀ (p : α × Set α), p ∈ t → p.snd ∈ g p.fst
μt : ↑↑μ (s \ ⋃ (p : α × Set α) (_ : p ∈ t), p.snd) = 0
⊢ ∃ t,
(∀ (p : α × Set α), p ∈ t → p.fst ∈ s) ∧
(PairwiseDisjoint t fun p => p.snd) ∧
(∀ (p : α × Set α), p ∈ t → p.snd ∈ f p.fst) ∧ ↑↑μ (s \ ⋃ (p : α × Set α) (_ : p ∈ t), p.snd) = 0
[PROOFSTEP]
exact ⟨t, ts, tdisj, fun p hp => (tg p hp).1, μt⟩
[GOAL]
α : Type u_1
inst✝ : MetricSpace α
m0 : MeasurableSpace α
μ : Measure α
v : VitaliFamily μ
x : α
s : Set (Set α)
⊢ s ∈ filterAt v x ↔ ∃ ε, ε > 0 ∧ ∀ (a : Set α), a ∈ setsAt v x → a ⊆ closedBall x ε → a ∈ s
[PROOFSTEP]
simp only [filterAt, exists_prop, gt_iff_lt]
[GOAL]
α : Type u_1
inst✝ : MetricSpace α
m0 : MeasurableSpace α
μ : Measure α
v : VitaliFamily μ
x : α
s : Set (Set α)
⊢ s ∈ ⨅ (ε : ℝ) (_ : ε ∈ Ioi 0), 𝓟 {a | a ∈ setsAt v x ∧ a ⊆ closedBall x ε} ↔
∃ ε, 0 < ε ∧ ∀ (a : Set α), a ∈ setsAt v x → a ⊆ closedBall x ε → a ∈ s
[PROOFSTEP]
rw [mem_biInf_of_directed]
[GOAL]
α : Type u_1
inst✝ : MetricSpace α
m0 : MeasurableSpace α
μ : Measure α
v : VitaliFamily μ
x : α
s : Set (Set α)
⊢ (∃ i, i ∈ Ioi 0 ∧ s ∈ 𝓟 {a | a ∈ setsAt v x ∧ a ⊆ closedBall x i}) ↔
∃ ε, 0 < ε ∧ ∀ (a : Set α), a ∈ setsAt v x → a ⊆ closedBall x ε → a ∈ s
[PROOFSTEP]
simp only [subset_def, and_imp, exists_prop, mem_sep_iff, mem_Ioi, mem_principal]
[GOAL]
case h
α : Type u_1
inst✝ : MetricSpace α
m0 : MeasurableSpace α
μ : Measure α
v : VitaliFamily μ
x : α
s : Set (Set α)
⊢ DirectedOn ((fun ε => 𝓟 {a | a ∈ setsAt v x ∧ a ⊆ closedBall x ε}) ⁻¹'o fun x x_1 => x ≥ x_1) (Ioi 0)
[PROOFSTEP]
simp only [DirectedOn, exists_prop, ge_iff_le, le_principal_iff, mem_Ioi, Order.Preimage, mem_principal]
[GOAL]
case h
α : Type u_1
inst✝ : MetricSpace α
m0 : MeasurableSpace α
μ : Measure α
v : VitaliFamily μ
x : α
s : Set (Set α)
⊢ ∀ (x_1 : ℝ),
0 < x_1 →
∀ (y : ℝ),
0 < y →
∃ z,
0 < z ∧
{a | a ∈ setsAt v x ∧ a ⊆ closedBall x z} ⊆ {a | a ∈ setsAt v x ∧ a ⊆ closedBall x x_1} ∧
{a | a ∈ setsAt v x ∧ a ⊆ closedBall x z} ⊆ {a | a ∈ setsAt v x ∧ a ⊆ closedBall x y}
[PROOFSTEP]
intro x hx y hy
[GOAL]
case h
α : Type u_1
inst✝ : MetricSpace α
m0 : MeasurableSpace α
μ : Measure α
v : VitaliFamily μ
x✝ : α
s : Set (Set α)
x : ℝ
hx : 0 < x
y : ℝ
hy : 0 < y
⊢ ∃ z,
0 < z ∧
{a | a ∈ setsAt v x✝ ∧ a ⊆ closedBall x✝ z} ⊆ {a | a ∈ setsAt v x✝ ∧ a ⊆ closedBall x✝ x} ∧
{a | a ∈ setsAt v x✝ ∧ a ⊆ closedBall x✝ z} ⊆ {a | a ∈ setsAt v x✝ ∧ a ⊆ closedBall x✝ y}
[PROOFSTEP]
refine'
⟨min x y, lt_min hx hy, fun a ha => ⟨ha.1, ha.2.trans (closedBall_subset_closedBall (min_le_left _ _))⟩, fun a ha =>
⟨ha.1, ha.2.trans (closedBall_subset_closedBall (min_le_right _ _))⟩⟩
[GOAL]
case ne
α : Type u_1
inst✝ : MetricSpace α
m0 : MeasurableSpace α
μ : Measure α
v : VitaliFamily μ
x : α
s : Set (Set α)
⊢ Set.Nonempty (Ioi 0)
[PROOFSTEP]
exact ⟨(1 : ℝ), mem_Ioi.2 zero_lt_one⟩
[GOAL]
α : Type u_1
inst✝ : MetricSpace α
m0 : MeasurableSpace α
μ : Measure α
v : VitaliFamily μ
x : α
⊢ NeBot (filterAt v x)
[PROOFSTEP]
simp only [neBot_iff, ← empty_mem_iff_bot, mem_filterAt_iff, not_exists, exists_prop, mem_empty_iff_false, and_true_iff,
gt_iff_lt, not_and, Ne.def, not_false_iff, not_forall]
[GOAL]
α : Type u_1
inst✝ : MetricSpace α
m0 : MeasurableSpace α
μ : Measure α
v : VitaliFamily μ
x : α
⊢ ∀ (x_1 : ℝ), 0 < x_1 → ∃ x_2, x_2 ∈ setsAt v x ∧ x_2 ⊆ closedBall x x_1
[PROOFSTEP]
intro ε εpos
[GOAL]
α : Type u_1
inst✝ : MetricSpace α
m0 : MeasurableSpace α
μ : Measure α
v : VitaliFamily μ
x : α
ε : ℝ
εpos : 0 < ε
⊢ ∃ x_1, x_1 ∈ setsAt v x ∧ x_1 ⊆ closedBall x ε
[PROOFSTEP]
obtain ⟨w, w_sets, hw⟩ : ∃ w ∈ v.setsAt x, w ⊆ closedBall x ε := v.Nontrivial x ε εpos
[GOAL]
case intro.intro
α : Type u_1
inst✝ : MetricSpace α
m0 : MeasurableSpace α
μ : Measure α
v : VitaliFamily μ
x : α
ε : ℝ
εpos : 0 < ε
w : Set α
w_sets : w ∈ setsAt v x
hw : w ⊆ closedBall x ε
⊢ ∃ x_1, x_1 ∈ setsAt v x ∧ x_1 ⊆ closedBall x ε
[PROOFSTEP]
exact ⟨w, w_sets, hw⟩
[GOAL]
α : Type u_1
inst✝ : MetricSpace α
m0 : MeasurableSpace α
μ : Measure α
v : VitaliFamily μ
x : α
⊢ ∀ᶠ (a : Set α) in filterAt v x, a ∈ setsAt v x
[PROOFSTEP]
simp (config := { contextual := true }) only [eventually_filterAt_iff, exists_prop, and_true_iff, gt_iff_lt,
imp_true_iff]
[GOAL]
α : Type u_1
inst✝ : MetricSpace α
m0 : MeasurableSpace α
μ : Measure α
v : VitaliFamily μ
x : α
⊢ ∃ ε, 0 < ε
[PROOFSTEP]
exact ⟨1, zero_lt_one⟩
[GOAL]
α : Type u_1
inst✝ : MetricSpace α
m0 : MeasurableSpace α
μ : Measure α
v : VitaliFamily μ
x : α
ε : ℝ
hε : 0 < ε
⊢ ∀ᶠ (a : Set α) in filterAt v x, a ⊆ closedBall x ε
[PROOFSTEP]
simp only [v.eventually_filterAt_iff]
[GOAL]
α : Type u_1
inst✝ : MetricSpace α
m0 : MeasurableSpace α
μ : Measure α
v : VitaliFamily μ
x : α
ε : ℝ
hε : 0 < ε
⊢ ∃ ε_1, ε_1 > 0 ∧ ∀ (a : Set α), a ∈ setsAt v x → a ⊆ closedBall x ε_1 → a ⊆ closedBall x ε
[PROOFSTEP]
exact ⟨ε, hε, fun a _ ha' => ha'⟩
[GOAL]
α : Type u_1
inst✝ : MetricSpace α
m0 : MeasurableSpace α
μ : Measure α
v : VitaliFamily μ
ι : Type u_2
l : Filter ι
f : ι → Set α
x : α
⊢ Tendsto f l (filterAt v x) ↔
(∀ᶠ (i : ι) in l, f i ∈ setsAt v x) ∧ ∀ (ε : ℝ), ε > 0 → ∀ᶠ (i : ι) in l, f i ⊆ closedBall x ε
[PROOFSTEP]
refine'
⟨fun H =>
⟨H.eventually <| v.eventually_filterAt_mem_sets x, fun ε hε =>
H.eventually <| v.eventually_filterAt_subset_closedBall x hε⟩,
fun H s hs => (_ : ∀ᶠ i in l, f i ∈ s)⟩
[GOAL]
α : Type u_1
inst✝ : MetricSpace α
m0 : MeasurableSpace α
μ : Measure α
v : VitaliFamily μ
ι : Type u_2
l : Filter ι
f : ι → Set α
x : α
H : (∀ᶠ (i : ι) in l, f i ∈ setsAt v x) ∧ ∀ (ε : ℝ), ε > 0 → ∀ᶠ (i : ι) in l, f i ⊆ closedBall x ε
s : Set (Set α)
hs : s ∈ filterAt v x
⊢ ∀ᶠ (i : ι) in l, f i ∈ s
[PROOFSTEP]
obtain ⟨ε, εpos, hε⟩ := v.mem_filterAt_iff.mp hs
[GOAL]
case intro.intro
α : Type u_1
inst✝ : MetricSpace α
m0 : MeasurableSpace α
μ : Measure α
v : VitaliFamily μ
ι : Type u_2
l : Filter ι
f : ι → Set α
x : α
H : (∀ᶠ (i : ι) in l, f i ∈ setsAt v x) ∧ ∀ (ε : ℝ), ε > 0 → ∀ᶠ (i : ι) in l, f i ⊆ closedBall x ε
s : Set (Set α)
hs : s ∈ filterAt v x
ε : ℝ
εpos : ε > 0
hε : ∀ (a : Set α), a ∈ setsAt v x → a ⊆ closedBall x ε → a ∈ s
⊢ ∀ᶠ (i : ι) in l, f i ∈ s
[PROOFSTEP]
filter_upwards [H.1, H.2 ε εpos] with i hi hiε using hε _ hi hiε
[GOAL]
α : Type u_1
inst✝ : MetricSpace α
m0 : MeasurableSpace α
μ : Measure α
v : VitaliFamily μ
x : α
⊢ ∀ᶠ (a : Set α) in filterAt v x, MeasurableSet a
[PROOFSTEP]
filter_upwards [v.eventually_filterAt_mem_sets x] with _ ha using v.MeasurableSet' _ _ ha
[GOAL]
α : Type u_1
inst✝ : MetricSpace α
m0 : MeasurableSpace α
μ : Measure α
v : VitaliFamily μ
x : α
P : Set α → Prop
⊢ (∃ᶠ (a : Set α) in filterAt v x, P a) ↔ ∀ (ε : ℝ), ε > 0 → ∃ a, a ∈ setsAt v x ∧ a ⊆ closedBall x ε ∧ P a
[PROOFSTEP]
simp only [Filter.Frequently, eventually_filterAt_iff, not_exists, exists_prop, not_and, Classical.not_not, not_forall]
[GOAL]
α : Type u_1
inst✝ : MetricSpace α
m0 : MeasurableSpace α
μ : Measure α
v : VitaliFamily μ
x : α
o : Set α
hx : o ∈ 𝓝 x
⊢ ∀ᶠ (a : Set α) in filterAt v x, a ⊆ o
[PROOFSTEP]
rw [eventually_filterAt_iff]
[GOAL]
α : Type u_1
inst✝ : MetricSpace α
m0 : MeasurableSpace α
μ : Measure α
v : VitaliFamily μ
x : α
o : Set α
hx : o ∈ 𝓝 x
⊢ ∃ ε, ε > 0 ∧ ∀ (a : Set α), a ∈ setsAt v x → a ⊆ closedBall x ε → a ⊆ o
[PROOFSTEP]
rcases Metric.mem_nhds_iff.1 hx with ⟨ε, εpos, hε⟩
[GOAL]
case intro.intro
α : Type u_1
inst✝ : MetricSpace α
m0 : MeasurableSpace α
μ : Measure α
v : VitaliFamily μ
x : α
o : Set α
hx : o ∈ 𝓝 x
ε : ℝ
εpos : ε > 0
hε : ball x ε ⊆ o
⊢ ∃ ε, ε > 0 ∧ ∀ (a : Set α), a ∈ setsAt v x → a ⊆ closedBall x ε → a ⊆ o
[PROOFSTEP]
exact ⟨ε / 2, half_pos εpos, fun a _ ha => ha.trans ((closedBall_subset_ball (half_lt_self εpos)).trans hε)⟩
[GOAL]
α : Type u_1
inst✝ : MetricSpace α
m0 : MeasurableSpace α
μ : Measure α
v✝ v : VitaliFamily μ
f : α → Set (Set α)
s : Set α
h : ∀ (x : α), x ∈ s → ∃ᶠ (a : Set α) in filterAt v x, a ∈ f x
⊢ FineSubfamilyOn v f s
[PROOFSTEP]
intro x hx ε εpos
[GOAL]
α : Type u_1
inst✝ : MetricSpace α
m0 : MeasurableSpace α
μ : Measure α
v✝ v : VitaliFamily μ
f : α → Set (Set α)
s : Set α
h : ∀ (x : α), x ∈ s → ∃ᶠ (a : Set α) in filterAt v x, a ∈ f x
x : α
hx : x ∈ s
ε : ℝ
εpos : ε > 0
⊢ ∃ a, a ∈ setsAt v x ∩ f x ∧ a ⊆ closedBall x ε
[PROOFSTEP]
obtain ⟨a, av, ha, af⟩ : ∃ (a : Set α), a ∈ v.setsAt x ∧ a ⊆ closedBall x ε ∧ a ∈ f x :=
v.frequently_filterAt_iff.1 (h x hx) ε εpos
[GOAL]
case intro.intro.intro
α : Type u_1
inst✝ : MetricSpace α
m0 : MeasurableSpace α
μ : Measure α
v✝ v : VitaliFamily μ
f : α → Set (Set α)
s : Set α
h : ∀ (x : α), x ∈ s → ∃ᶠ (a : Set α) in filterAt v x, a ∈ f x
x : α
hx : x ∈ s
ε : ℝ
εpos : ε > 0
a : Set α
av : a ∈ setsAt v x
ha : a ⊆ closedBall x ε
af : a ∈ f x
⊢ ∃ a, a ∈ setsAt v x ∩ f x ∧ a ⊆ closedBall x ε
[PROOFSTEP]
exact ⟨a, ⟨av, af⟩, ha⟩
|
library(reticulate)
library(cowplot)
library(ggbeeswarm)
source("config.r")
np <- import("numpy")
data <- np$load(
paste0("./outputs/camacan_preproc_impact.npy"),
allow_pickle = T)
# unpack
data <- data[[1]]
# make baseline appear in all both SSP-ER and SSS series
# concat first enetry [1]
data <- c(data[1], data)
# sort by SSP/SSP series
sorter <- c(c(1, 3, 4, 5, 6, 7), c(2, 8, 9, 10, 11, 12))
data <- data[sorter]
preproc_experiments <- names(data)
exp_seq <- seq_len(length(preproc_experiments))
exp_structure <- do.call(rbind, lapply(exp_seq, function(idx){
x <- preproc_experiments[[idx]]
x <- gsub("ssp_", "ssp-", x)
x <- gsub("do_ar", "do-ar", x)
xlist <- strsplit(x, "_")
keys <- sapply(xlist, function(y) {
substring(y, 1, nchar(y) - 2)
})
flags <- sapply(xlist, function(y) {
as.logical(as.numeric(substring(y, nchar(y), nchar(y))))
})
out <- data.frame(t(flags))
names(out) <- keys
out[['id']] <- factor(idx)
return(out)
}))
exp_structure$sub_id <- factor(rep(1:6, times = 2))
exp_structure$series <- factor(rep(c("SSP", "SSS"), each = 6))
n_folds <- length(data[[11]][[1]])
model_names <- c('riemann_53', 'riemann', 'spoc', 'spoc_67', 'log-diag')
data_results_long <- do.call(rbind, lapply(exp_seq, function(idx) {
x <- data[[idx]]
out <- data.frame(
score = do.call("c", sapply(model_names, function(name) x[name])),
estimator = factor(rep(model_names, each = n_folds)))
out[['id']] <- factor(idx)
out[['sub_id']] <- exp_structure$sub_id[idx]
out[['series']] <- exp_structure$series[idx]
return(out)}
))
data_plot <- merge(exp_structure, data_results_long, by = "id")
my_cl <- function(x){
out <- data.frame(
y = mean(x),
ymin = quantile(x, probs = c(0.05)),
ymax = quantile(x, probs = c(0.95)))
return(out)
}
my_colors <- setNames(
with(color_cats, c(blue, vermillon)),
c('SSS', 'SSP')
)
proc_label <- c('env', 'eog', 'ecg', 'eo/cg', 'rej')
raw_means <- aggregate(score ~ estimator,
data = subset(data_results_long, sub_id == 1),
FUN = mean)
raw_means$label <- 'raw'
# raw_means$series <- c("SSP", "SSS")
# data_results_long$raw_score <- factor(data_results_long$raw_score)
data_plot <- subset(
data_results_long, sub_id != 1 & !estimator %in% c("riemann", "spoc"))
data_plot$estimator <- factor(
data_plot$estimator, levels = unique(data_plot$estimator))
data_plot$raw_score <- raw_means$score[3]
data_plot[data_plot$estimator == 'log-diag',]$raw_score <- raw_means$score[1]
data_plot[data_plot$estimator == 'spoc_67',]$raw_score <- raw_means$score[5]
estimator_labs <- setNames(
c("Riemann[53]", "SPoC[67]", "diag"),
c("riemann_53", "spoc_67", "log-diag")
)
fig_preproc <- ggplot(
data = data_plot,
mapping = aes(x = sub_id, y = score, group = series, color = series)) +
geom_hline(aes(yintercept = raw_score), linetype = 'dashed') +
geom_text(
data = subset(raw_means, !estimator %in% c("riemann", "spoc")),
aes(y = score, x = 1.2, label = label),
color = color_cats[['black']], vjust = -0.5, size = 4,
inherit.aes = F) +
geom_beeswarm(alpha = 0.2, size = 1.5,
dodge.width = 0.5) +
stat_summary(geom = 'line', fun.y = mean,
position = position_dodge(width = 0.5), size = 1) +
stat_summary(geom= 'point', fun.y = mean,
position = position_dodge(width = 0.5),
shape = 21, fill = 'white', size = 4) +
scale_y_continuous(breaks = 6:14) +
coord_cartesian(ylim = c(6, 14)) +
facet_wrap(~estimator, nrow = 1,
labeller = as_labeller(estimator_labs, label_parsed)) +
scale_x_discrete(labels = proc_label) +
scale_color_manual(
breaks = names(my_colors),
values = my_colors,
name = NULL) +
ylab("MAE") +
xlab("Preprocessing steps") +
my_theme +
theme(legend.position = c(0.8, 0.07)) +
guides(color = guide_legend(nrow=1))
fig_preproc
fname <- './outputs/preproc_impact'
ggsave(paste0(fname, '.pdf'),
plot = fig_preproc, width = 10, height = 4,
useDingbats = FALSE)
embedFonts(file = paste0(fname, ".pdf"), outfile = paste0(fname, ".pdf"))
ggsave(paste0(fname, '.png'),
plot = fig_preproc, width = 10, height = 4)
|
noncomputable section
theorem ex : ∃ x : Nat, x > 0 :=
⟨1, by decide⟩
def a : Nat := Classical.choose ex
def b : Nat := 0
abbrev c : Nat := Classical.choose ex
abbrev d : Nat := 1
instance e : Inhabited Nat :=
⟨a⟩
instance f : Inhabited Nat :=
⟨b⟩
#eval b + d + f.default
section Foo
def g : Nat := Classical.choose ex
def h (x : Nat) : Nat :=
match x with
| 0 => a
| x+1 => h x + 1
end Foo
end
def i : Nat := Classical.choose ex -- Error
|
#' common_group_quantification.r
#' Supply a sv/otu table, a taxonomy file, and a phylogenetic level.
#' Return sample abundances, relative abundances and sequence depth for all taxa at that level
#' found in greater than X% of samples (default 50%).
#' Also returns the number of unique SVs/OTUs that went into each group.
#'
#' @param sv #sv or otu table where columns are unique taxonomic assignments, rows are samples.
#' @param tax #taxonomy for sv/otu table. 7 columns for k/p/c/o/f/g/s.
#' @param groups #unique groups you are looking for. ex: c('Ascomycota','Basidiomycota')
#' @param phyla_level #level of phylogeny in quotes. i.e. 'phylum'.
#'
#' @return
#' @export
#'
#' @examples
common_group_quantification <- function(sv, tax, groups, tax_level, samp_freq = 0.5){
#some tests.
if(ncol(sv) != nrow(tax)){
stop('Number of columns of sv table does not match number of rows in taxonomy table.')
}
tax_level <- tolower(tax_level)
#make sure taxonomy column names are lower case.
tax_names <- c('kingdom','phylum','class','order','family','genus','species')
colnames(tax) <- tax_names
#merge taxonomy and sv file.
k <- cbind(tax, t(sv))
k <- data.table(k)
to_change <- colnames(k)[colnames(k) %in% tax_level]
setnames(k,to_change,'tax_level')
#count sequence abundance in each group by sample, and number of samples in which it occurs.
abundance <- list()
frequency <- list()
unique.sv <- list()
diversity <- list()
evenness <- list()
for(i in 1:length(groups)){
z <- k[tax_level == groups[i],]
start <- ncol(tax) + 1
seq.count <- colSums(z[,start:ncol(z)])
seq.freq <- length(seq.count[seq.count > 0]) / nrow(sv)
#get diversity, richness and evenness.
seq.div <- rowSums(z[,start:ncol(z)])
seq.div <- seq.div[seq.div > 0]
n.SVs <- length(seq.div)
div <- vegan::diversity(seq.div / sum(seq.div)) #same whether proportional or not.
n.SVs <- length(seq.div[seq.div > 0])
even <- div/log(n.SVs)
abundance[[i]] <- seq.count
frequency[[i]] <- seq.freq
unique.sv[[i]] <- n.SVs
diversity[[i]] <- div
evenness[[i]] <- even
}
abundance <- do.call(rbind,abundance)
frequency <- do.call(rbind,frequency)
unique.sv <- data.frame(unlist(unique.sv))
diversity <- data.frame(unlist(diversity))
evenness <- data.frame(unlist( evenness))
#name some stuff.
unique.sv$groups <- groups
diversity$groups <- groups
evenness$groups <- groups
rownames(abundance) <- groups
colnames(abundance) <- rownames(sv)
colnames(unique.sv) <- c('N.SVs','groups')
colnames(diversity) <- c('diversity','groups')
colnames( evenness) <- c( 'evenness','groups')
#put together frequency table and order.
frequency <- data.frame(cbind(groups,frequency))
colnames(frequency)[2] <- 'sample_frequency'
frequency$sample_frequency <- as.character(frequency$sample_frequency)
frequency$sample_frequency <- as.numeric(frequency$sample_frequency)
#subset to those that are found in > sam_freq of samples (default 50%).
ref.frequency <- frequency[frequency$sample_frequency > samp_freq,]
#kill unknown and anything unknown
ref.frequency <- ref.frequency[!(ref.frequency$groups %in% c('unknown','Unknown')),]
#merge in number of OTUs, diversity and evenness in each group to frequency table.
frequency <- merge(frequency,unique.sv, all.x = T)
frequency <- merge(frequency,diversity, all.x = T)
frequency <- merge(frequency, evenness, all.x = T)
frequency <- frequency[order(-frequency$sample_frequency),]
#get sequence depth per sample.
seq_total <- rowSums(sv)
#only keep groups that are found in >samp_freq% of samples.
abundance <- abundance[rownames(abundance) %in% ref.frequency$groups,]
abundance <- t(abundance)
#get abundances and relative abundances.
other <- seq_total - rowSums(abundance)
abundance <- cbind(other, abundance)
rel.abundance <- abundance / colSums(abundance)
#return output: abundances, relative abundances, sequence depth, group sample frequencies.
to_return <- list(abundance,rel.abundance,seq_total,frequency)
names(to_return) <- c('abundances','rel.abundances','seq_total','group_frequencies')
return(to_return)
} |
The cone hull of a set is a cone. |
/* This file is part of VoltDB.
* Copyright (C) 2008-2020 VoltDB Inc.
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU Affero General Public License as
* published by the Free Software Foundation, either version 3 of the
* License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Affero General Public License for more details.
*
* You should have received a copy of the GNU Affero General Public License
* along with VoltDB. If not, see <http://www.gnu.org/licenses/>.
*/
#include <boost/accumulators/accumulators.hpp>
#include <boost/accumulators/statistics.hpp>
#include "common/MiscUtil.h"
namespace voltdb {
using namespace std;
vector<string> MiscUtil::splitString(string const& str, char delimiter) {
vector<string> vec;
size_t begin = 0;
while (true) {
size_t end = str.find(delimiter, begin);
if (end == string::npos) {
if (begin != str.size()) {
vec.push_back(str.substr(begin));
}
break;
}
vec.push_back(str.substr(begin, end - begin));
begin = end + 1;
}
return vec;
}
vector<string> MiscUtil::splitToTwoString(string const& str, char delimiter) {
vector<string> vec;
size_t end = str.find(delimiter);
if (end == string::npos) {
vec.push_back(str);
} else {
vec.push_back(str.substr(0, end));
vec.push_back(str.substr(end + 1));
}
return vec;
}
AbstractTimer::AbstractTimer(FILE* sink, char const* op):
m_fp(sink), m_operation(op), m_started(now()) {}
double AbstractTimer::format_duration(usec const& elapsed, char const*& unit) {
static char const* usec = "usec", *msec = "msec";
auto const val = elapsed.count();
if (val < 1e3) {
unit = usec;
return val;
} else {
unit = msec;
return val / 1e3;
}
}
SimpleTimer::~SimpleTimer() {
report(diff(m_started, now()));
}
void SimpleTimer::report(usec const& elapsed) const {
char const* unit;
auto const val = format_duration(elapsed, unit);
fprintf(file(), "%s took %.1g %s\n", m_operation.c_str(), val, unit);
}
using namespace boost::accumulators;
struct statistics_calculator {
accumulator_set<double,
features<tag::count, tag::min, tag::max, tag::mean, tag::median>> m_stats;
public:
template<typename iterator>
statistics_calculator(iterator from, iterator to) {
for_each(from, to, bind<void>(ref(m_stats), std::placeholders::_1));
}
#define getter(T, name) \
T name() const noexcept { \
return extract_result<tag::name>(m_stats); \
}
getter(size_t, count);
getter(double, min);
getter(double, max);
getter(double, mean);
getter(double, median);
#undef getter
};
/**
* Use unit 1 with given stat values, if average value is below 5000;
* use unit 2 with values divided by 1000 otherwise
*/
string to_string(char const* prefix, char const* unit1, char const* unit2,
statistics_calculator const& stat) {
ostringstream oss;
bool const converted = stat.mean() > 1e3;
char const* unit = converted ? unit2 : unit1;
if (converted) {
// convert from usec to msec
oss << prefix << "{ count = " << stat.count()
<< ", min = " << stat.min() / 1e3 << " " << unit
<< ", max = " << stat.max() / 1e3 << " " << unit
<< ", mean = " << stat.mean() / 1e3 << " " << unit
<< ", median = " << stat.median() / 1e3 << " " << unit
<< "}\n";
} else {
oss << prefix << "{ count = " << stat.count()
<< ", min = " << stat.min() << " " << unit
<< ", max = " << stat.max() << " " << unit
<< ", mean = " << stat.mean() << " " << unit
<< ", median = " << stat.median() << " " << unit
<< "}\n";
}
return oss.str();
}
RestartableTimer::RestartableTimer(FILE* sink, char const* op,
size_t periodic_report, size_t binWidth):
AbstractTimer(sink, op),
m_flushPeriod(periodic_report), m_binWidth(binWidth) {
m_elapsed.reserve(m_flushPeriod * 64);
}
void RestartableTimer::report(bool final_report) const {
auto const* prefix = final_report ? "Final" : "Periodic";
if (count()) {
fprintf(file(), "[%s] %s summary:\n# # # # # # # # # # %s\n# # # # # # # # # #\n",
m_operation.c_str(), prefix,
statistics(const_cast<std::vector<size_t>&>(m_elapsed),
m_binWidth).c_str());
} else {
fprintf(file(), "[%s] %s summary: No timed events occurred\n",
m_operation.c_str(), prefix);
}
}
void RestartableTimer::stop() {
assert(m_active);
m_elapsed.emplace_back(diff(m_started, now()).count());
m_active = false;
if (m_flushPeriod && count() % m_flushPeriod == 0) {
report(false);
}
}
void RestartableTimer::restart() {
assert(! m_active);
m_active = true;
const_cast<time_point&>(m_started) = now();
}
size_t RestartableTimer::count() const noexcept {
return m_elapsed.size();
}
RestartableTimer::~RestartableTimer() {
if (active()) {
stop();
}
report(true);
}
inline RestartableTimer::ScopedTimer::ScopedTimer(RestartableTimer& tm) noexcept:
m_tm(tm) {
m_tm.restart();
}
RestartableTimer::ScopedTimer::~ScopedTimer() noexcept {
m_tm.stop();
}
string RestartableTimer::statistics(
vector<size_t> const& elapsed, size_t bin_size) {
using iterator = typename vector<size_t>::const_iterator;
size_t from = 0;
size_t const full = elapsed.size(),
est_bin_size = full / bin_size + 1;
vector<statistics_calculator> bin_stats;
bin_stats.reserve(est_bin_size);
vector<pair<size_t, size_t>> bin_positions;
bin_positions.reserve(est_bin_size);
while (from < full) {
auto const to = std::min(from + bin_size, full);
bin_stats.emplace_back(next(elapsed.begin(), from), next(elapsed.begin(), to));
bin_positions.emplace_back(from, to);
from = to;
}
ostringstream oss;
auto const full_stat = statistics_calculator{elapsed.begin(), elapsed.end()};
oss << to_string("Total statistics\n# # # # # # # # # #\n", "usec", "msec", full_stat)
<< "\nBreak-down statistics\n# # # # # # # # # #\n";
for (size_t i = 0; i < bin_positions.size(); ++i) {
string prefix("[#");
oss << to_string(
prefix.append(std::to_string(bin_positions[i].first))
.append(" - #").append(std::to_string(bin_positions[i].second - 1))
.append("]: ").c_str(),
"usec", "msec", bin_stats[i])
<< endl;
}
return oss.str();
}
RestartableTimers::RestartableTimers(FILE* sink, char const* op,
size_t binWidth, vector<string> const&& names):
super(sink, op, 0/* disables periodic report */, binWidth) {
if (names.empty()) {
printf("Warning: named timers not provided");
} else {
for_each(names.cbegin(), names.cend(),
[sink, op, binWidth, this] (string const& nm) {
if (! m_subTimers.emplace(make_pair(nm,
RestartableTimer(sink,
string(op).append(".").append(nm).c_str(),
0, binWidth))).second) {
char buf[128];
snprintf(buf, sizeof buf,
"Duplicated name \"%s\" found for RestartableTimers",
nm.c_str());
buf[sizeof buf - 1] = '\0';
throw logic_error(buf);
}
});
}
}
typename RestartableTimer::ScopedTimer RestartableTimers::get(char const* k) {
auto iter = m_subTimers.find(k);
if (iter == m_subTimers.cend()) {
char buf[128];
snprintf(buf, sizeof buf, "Cannot find sub-timer named \"%s\". Typo?", k);
buf[sizeof buf - 1] = '\0';
throw logic_error(buf);
} else {
return iter->second.get();
}
}
inline typename RestartableTimer::ScopedTimer RestartableTimer::ScopedTimer::create(
RestartableTimer& tm) noexcept {
return {tm};
}
typename RestartableTimer::ScopedTimer RestartableTimer::get() {
return ScopedTimer::create(*this);
}
} // namespace voltdb
|
[STATEMENT]
lemma sec_id_identified_2:
"(Spy, PubKey {Tok_PriK n, Rev_PriK}) \<notin> s\<^sub>0"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. (Spy, PubKey {Tok_PriK n, Rev_PriK}) \<notin> s\<^sub>0
[PROOF STEP]
by (insert tok_prik_rev sec_id_identified_1, simp add: image_def,
drule spec [of _ n], auto simp: set_eq_iff) |
lemma poly_map_poly_cnj [simp]: "poly (map_poly cnj p) x = cnj (poly p (cnj x))" |
Formal statement is: lemma is_nth_power_nth_power': assumes "n dvd n'" shows "is_nth_power n (m ^ n')" Informal statement is: If $n$ divides $n'$, then $m^{n'}$ is an $n$th power. |
program merge_events
c Merge many event files into a unique file
c gfortran -I../SubProcesses/P0_<anydir> -o merge_events
c merge_events.f handling_lhe_events.f fill_MC_mshell.f
implicit none
integer maxevt,ifile,ofile,i,j,npart,mgfile
integer IDBMUP(2),PDFGUP(2),PDFSUP(2),IDWTUP,NPRUP
double precision EBMUP(2),XSECUP,XERRUP,XMAXUP,LPRUP
INTEGER MAXNUP
PARAMETER (MAXNUP=500)
INTEGER NUP,IDPRUP,IDUP(MAXNUP),ISTUP(MAXNUP),
# MOTHUP(2,MAXNUP),ICOLUP(2,MAXNUP)
DOUBLE PRECISION XWGTUP,SCALUP,AQEDUP,AQCDUP,
# PUP(5,MAXNUP),VTIMUP(MAXNUP),SPINUP(MAXNUP)
character*80 event_file,fname1,executable,inputfile,pref
character*1000 buff,tmpstr
character*10 MonteCarlo,mc
character*3 str
integer evts,leftover,loc,loc1,loc2,num_file,sumevt
integer numscales,numPDFpairs,isc,ipdf
common/cwgxsec1/numscales,numPDFpairs
c
write(*,*)'pref of the files (in the form pref.001, ...) '
read(*,*)pref
write(*,*)'number of files to merge'
read(*,*)num_file
ifile=34
sumevt=0
loc=index(pref,' ')
do i=1,num_file
str='000'
if (i.le.9) write (str(3:3),'(i1)') i
if (i.gt.9.and.i.le.99) write (str(2:3),'(i2)') i
if (i.gt.99.and.i.le.999) write (str(1:3),'(i3)') i
fname1=pref(1:loc-1)//'.'//str
open(unit=ifile,file=fname1(1:loc+3),status='unknown')
call read_lhef_header_full(ifile,evts,isc,ipdf,MonteCarlo)
numscales=int(sqrt(dble(isc)))
numPDFpairs=ipdf/2
if(i.eq.1)then
mc=MonteCarlo
call read_lhef_init(ifile,
& IDBMUP,EBMUP,PDFGUP,PDFSUP,IDWTUP,NPRUP,
& XSECUP,XERRUP,XMAXUP,LPRUP)
endif
if(MonteCarlo.ne.mc)then
write(*,*)'incompatible files',mc,MonteCarlo,' file ',i
stop
endif
sumevt=sumevt+evts
enddo
ofile=34
ifile=35
open(unit=ofile,file=pref(1:loc-1)//'.complete',status='unknown')
open(unit=ifile,file=pref(1:loc-1)//'.001',status='old')
call copy_header(ifile,ofile,sumevt)
close(ifile)
call write_lhef_init(ofile,
& IDBMUP,EBMUP,PDFGUP,PDFSUP,IDWTUP,NPRUP,
& XSECUP,XERRUP,XMAXUP,LPRUP)
do i=1,num_file
str='00'
if (i.le.9) write (str(3:3),'(i1)') i
if (i.gt.9.and.i.le.99) write (str(2:3),'(i2)') i
if (i.gt.99.and.i.le.999) write (str(1:3),'(i3)') i
fname1=pref(1:loc-1)//'.'//str
write(*,*)'merging file ',fname1(1:loc+3)
open(unit=ifile,file=fname1(1:loc+3),status='unknown')
call read_lhef_header(ifile,evts,MonteCarlo)
call read_lhef_init(ifile,
& IDBMUP,EBMUP,PDFGUP,PDFSUP,IDWTUP,NPRUP,
& XSECUP,XERRUP,XMAXUP,LPRUP)
do j=1,evts
if(j.eq.evts)then
call read_lhef_event(ifile,
& NUP,IDPRUP,XWGTUP,SCALUP,AQEDUP,AQCDUP,
& IDUP,ISTUP,MOTHUP,ICOLUP,PUP,VTIMUP,SPINUP,buff)
read(ifile,*)tmpstr
goto 999
endif
call read_lhef_event(ifile,
& NUP,IDPRUP,XWGTUP,SCALUP,AQEDUP,AQCDUP,
& IDUP,ISTUP,MOTHUP,ICOLUP,PUP,VTIMUP,SPINUP,buff)
999 continue
call write_lhef_event(ofile,
& NUP,IDPRUP,XWGTUP,SCALUP,AQEDUP,AQCDUP,
& IDUP,ISTUP,MOTHUP,ICOLUP,PUP,VTIMUP,SPINUP,buff)
enddo
close(ifile)
enddo
write(ofile,*)'</LesHouchesEvents>'
close(ofile)
stop
end
|
lemmas prime_imp_coprime_nat = prime_imp_coprime[where ?'a = nat] |
function resolve_import_block(x::EXPR, state::State, root, usinged, markfinal=true)
if x.head == :as
resolve_import_block(x.args[1], state, root, usinged, markfinal)
if x.args[2].meta === nothing
x.args[2].meta = Meta()
end
if hasbinding(last(x.args[1].args)) && CSTParser.isidentifier(x.args[2])
lhsbinding = bindingof(last(x.args[1].args))
x.args[2].meta.binding = Binding(x.args[2], lhsbinding.val, lhsbinding.type, lhsbinding.refs)
setref!(x.args[2], bindingof(x.args[2]))
last(x.args[1].args).meta.binding = nothing
end
return
end
n = length(x.args)
for i = 1:length(x.args)
arg = x.args[i]
if isoperator(arg) && valof(arg) == "."
# Leading dots. Can only be leading elements.
if root == getsymbols(state)
root = state.scope
elseif root isa Scope && parentof(root) !== nothing
root = parentof(root)
else
return
end
elseif isidentifier(arg) || (i == n && (CSTParser.ismacroname(arg) || isoperator(arg)))
root = maybe_lookup(hasref(arg) ? refof(arg) : _get_field(root, arg, state), state)
setref!(arg, root)
if i == n
markfinal && _mark_import_arg(arg, root, state, usinged)
return refof(arg)
end
else
return
end
end
end
function resolve_import(x::EXPR, state::State, root=getsymbols(state))
if headof(x) === :using || headof(x) === :import
usinged = headof(x) === :using
if length(x.args) > 0 && isoperator(headof(x.args[1])) && valof(headof(x.args[1])) == ":"
root = resolve_import_block(x.args[1].args[1], state, root, false, false)
for i = 2:length(x.args[1].args)
resolve_import_block(x.args[1].args[i], state, root, usinged)
end
else
for i = 1:length(x.args)
resolve_import_block(x.args[i], state, root, usinged)
end
end
end
end
function _mark_import_arg(arg, par, state, usinged)
if par !== nothing && CSTParser.is_id_or_macroname(arg)
if par isa Binding # mark reference to binding
push!(par.refs, arg)
end
if par isa SymbolServer.VarRef
par = SymbolServer._lookup(par, getsymbols(state), true)
!(par isa SymbolServer.SymStore) && return
end
if bindingof(arg) === nothing
if !hasmeta(arg)
arg.meta = Meta()
end
arg.meta.binding = Binding(arg, par, _typeof(par, state), [])
setref!(arg, bindingof(arg))
end
if usinged
if par isa SymbolServer.ModuleStore
add_to_imported_modules(state.scope, Symbol(valofid(arg)), par)
elseif par isa Binding && par.val isa SymbolServer.ModuleStore
add_to_imported_modules(state.scope, Symbol(valofid(arg)), par.val)
elseif par isa Binding && par.val isa EXPR && CSTParser.defines_module(par.val)
add_to_imported_modules(state.scope, Symbol(valofid(arg)), scopeof(par.val))
elseif par isa Binding && par.val isa Binding && par.val.val isa EXPR && CSTParser.defines_module(par.val.val)
add_to_imported_modules(state.scope, Symbol(valofid(arg)), scopeof(par.val.val))
end
end
end
end
function has_workspace_package(server, name)
haskey(server.workspacepackages, name) &&
hasscope(getcst(server.workspacepackages[name])) &&
haskey(scopeof(getcst(server.workspacepackages[name])).names, name) &&
scopeof(getcst(server.workspacepackages[name])).names[name] isa Binding &&
scopeof(getcst(server.workspacepackages[name])).names[name].val isa EXPR &&
CSTParser.defines_module(scopeof(getcst(server.workspacepackages[name])).names[name].val)
end
function add_to_imported_modules(scope::Scope, name::Symbol, val)
if scope.modules isa Dict
scope.modules[name] = val
else
Dict(name => val)
end
end
no_modules_above(s::Scope) = !CSTParser.defines_module(s.expr) || s.parent === nothing || no_modules_above(s.parent)
function get_named_toplevel_module(s, name)
return nothing
end
function get_named_toplevel_module(s::Scope, name::String)
if CSTParser.defines_module(s.expr)
m_name = CSTParser.get_name(s.expr)
if ((headof(m_name) === :IDENTIFIER && valof(m_name) == name) || headof(m_name) === :NONSTDIDENTIFIER && length(m_name.args) == 2 && valof(m_name.args[2]) == name) && no_modules_above(s)
return s.expr
end
end
if s.parent isa Scope
return get_named_toplevel_module(s.parent, name)
end
return nothing
end
function _get_field(par, arg, state)
arg_str_rep = CSTParser.str_value(arg)
if par isa SymbolServer.EnvStore
if (arg_scope = retrieve_scope(arg)) !== nothing && (tlm = get_named_toplevel_module(arg_scope, arg_str_rep)) !== nothing && hasbinding(tlm)
return bindingof(tlm)
# elseif has_workspace_package(state.server, arg_str_rep)
# return scopeof(getcst(state.server.workspacepackages[arg_str_rep])).names[arg_str_rep]
elseif haskey(par, Symbol(arg_str_rep))
if isempty(state.env.project_deps) || Symbol(arg_str_rep) in state.env.project_deps
return par[Symbol(arg_str_rep)]
end
end
elseif par isa SymbolServer.ModuleStore # imported module
if Symbol(arg_str_rep) === par.name.name
return par
elseif haskey(par, Symbol(arg_str_rep))
par = par[Symbol(arg_str_rep)]
if par isa SymbolServer.VarRef # reference to dependency
return SymbolServer._lookup(par, getsymbols(state), true)
end
return par
end
for used_module_name in par.used_modules
used_module = maybe_lookup(par[used_module_name], state)
if used_module !== nothing && isexportedby(Symbol(arg_str_rep), used_module)
return used_module[Symbol(arg_str_rep)]
end
end
elseif par isa Scope
if scopehasbinding(par, arg_str_rep)
return par.names[arg_str_rep]
elseif par.modules !== nothing
for used_module in values(par.modules)
if used_module isa SymbolServer.ModuleStore && isexportedby(Symbol(arg_str_rep), used_module)
return maybe_lookup(used_module[Symbol(arg_str_rep)], state)
elseif used_module isa Scope && scope_exports(used_module, arg_str_rep, state)
return used_module.names[arg_str_rep]
end
end
end
elseif par isa Binding
if par.val isa Binding
return _get_field(par.val, arg, state)
elseif par.val isa EXPR && CSTParser.defines_module(par.val) && scopeof(par.val) isa Scope
return _get_field(scopeof(par.val), arg, state)
elseif par.val isa EXPR && isassignment(par.val)
if hasref(par.val.args[2])
return _get_field(refof(par.val.args[2]), arg, state)
elseif is_getfield_w_quotenode(par.val.args[2])
return _get_field(refof_maybe_getfield(par.val.args[2]), arg, state)
end
elseif par.val isa SymbolServer.ModuleStore
return _get_field(par.val, arg, state)
end
end
return
end
|
Tactic Notation "mrewrite" uconstr(EQ) "in" "*" :=
rewrite EQ in *.
Tactic Notation "mrewrite" uconstr(EQ) "in" "*" :=
rewrite EQ in *.
Tactic Notation "mrewrite" uconstr(EQ) "in" ident(H) :=
rewrite EQ in H.
Tactic Notation "mrewrite" uconstr(EQ) :=
rewrite EQ.
Tactic Notation "mrewrite" "<-" uconstr(EQ) "in" "*" :=
rewrite <- EQ in *.
Tactic Notation "mrewrite" "<-" uconstr(EQ) "in" ident(H) :=
rewrite <- EQ in H.
Tactic Notation "mrewrite" "<-" uconstr(EQ) :=
rewrite <- EQ.
Tactic Notation "mrewrite" "!" uconstr(EQ) "in" "*" :=
rewrite ! EQ in *.
Tactic Notation "mrewrite" "!" uconstr(EQ) "in" ident(H) :=
rewrite ! EQ in H.
Tactic Notation "mrewrite" "!" uconstr(EQ) :=
rewrite ! EQ.
Tactic Notation "mrewrite" "<-" "!" uconstr(EQ) "in" "*" :=
rewrite <- ! EQ in *.
Tactic Notation "mrewrite" "<-" "!" uconstr(EQ) "in" ident(H) :=
rewrite <- ! EQ in H.
Tactic Notation "mrewrite" "<-" "!" uconstr(EQ) :=
rewrite <- ! EQ.
Tactic Notation "rewrite" uconstr(EQ) "in" "*" :=
mrewrite EQ in *.
Tactic Notation "rewrite" uconstr(EQ) "in" ident(H) :=
mrewrite EQ in H.
Tactic Notation "rewrite" uconstr(EQ) :=
mrewrite EQ.
Tactic Notation "rewrite" "<-" uconstr(EQ) "in" "*" :=
mrewrite <- EQ in *.
Tactic Notation "rewrite" "<-" uconstr(EQ) "in" ident(H) :=
mrewrite <- EQ in H.
Tactic Notation "rewrite" "<-" uconstr(EQ) :=
mrewrite <- EQ.
Tactic Notation "rewrite" "!" uconstr(EQ) "in" "*" :=
mrewrite ! EQ in *.
Tactic Notation "rewrite" "!" uconstr(EQ) "in" ident(H) :=
mrewrite ! EQ in H.
Tactic Notation "rewrite" "!" uconstr(EQ) :=
mrewrite ! EQ.
Tactic Notation "rewrite" "<-" "!" uconstr(EQ) "in" "*" :=
mrewrite <- ! EQ in *.
Tactic Notation "rewrite" "<-" "!" uconstr(EQ) "in" ident(H) :=
mrewrite <- ! EQ in H.
Tactic Notation "rewrite" "<-" "!" uconstr(EQ) :=
mrewrite <- ! EQ.
Ltac mf_equal := f_equal.
Ltac f_equal := mf_equal.
Module REWRITETEST.
Section TEST.
Variable x y: nat.
Hypothesis XY: x = y.
Goal forall (a b c: nat) (EQ0: a = b) (EQ1: a = c) (EQ2: c = b) (EQ3: x = y),
a + b + y = c + c + x.
Proof.
(* intros. *)
(* rewrite EQ0 in *. Undo. *)
(* rewrite EQ1 in EQ0. Undo. *)
(* rewrite EQ0. Undo. *)
(* rewrite <- EQ0 in *. Undo. *)
(* rewrite <- EQ2 in EQ0. Undo. *)
(* rewrite <- EQ2. Undo. *)
(* rewrite ! EQ0 in *. Undo. *)
(* rewrite ! EQ1 in EQ0. Undo. *)
(* rewrite ! EQ0. Undo. *)
(* rewrite <- ! EQ0 in *. Undo. *)
(* rewrite <- ! EQ2 in EQ0. Undo. *)
(* rewrite <- ! EQ2. Undo. *)
(* rewrite XY in *. Undo. *)
(* rewrite XY in EQ3. Undo. *)
(* rewrite XY. Undo. *)
(* rewrite <- XY in *. Undo. *)
(* rewrite <- XY in EQ3. Undo. *)
(* rewrite <- XY. Undo. *)
(* rewrite ! XY in *. Undo. *)
(* rewrite ! XY in EQ3. Undo. *)
(* rewrite ! XY. Undo. *)
(* rewrite <- ! XY in *. Undo. *)
(* rewrite <- ! XY in EQ3. Undo. *)
(* rewrite <- ! XY. Undo. *)
Abort.
End TEST.
End REWRITETEST.
Require Import Basics.
Notation "f ∘ g" := (fun x => (f (g x))).
Typeclasses Opaque flip.
Require Export List.
|
# Singular value decomposition (SVD)
The singular value decompostion of a real-valued $m \times n$ matrix $\boldsymbol{A}$ is:
$$
\boldsymbol{A} = \boldsymbol{U} \boldsymbol{\Sigma} \boldsymbol{V}^{T}
$$
where
- $\boldsymbol{U}$ is an $m \times m$ orthogonal matrix;
- $\boldsymbol{\Sigma}$ is an $m \times n$ diagonal matrix with diagonal entries $\sigma_{1} \ge \sigma_{2} \ge \ldots \ge \sigma_{p} \ge 0$, where $p = \min(m, n)$; and
- $\boldsymbol{U}$ is an $n \times n$ orthogonal matrix.
We will use NumPy to compute the SVD and Matplotlib to visualise results, so we first import some modules:
```python
%matplotlib inline
import matplotlib
import matplotlib.pyplot as plt
import numpy as np
from PIL import Image
```
**Note:** If you run this notebook yourself it can take sometime because it computes number of moderate size SVD problems.
## Low rank approximations
Recall that we can represent a matrix as a sum of rank-1 matrices:
$$
\boldsymbol{A} = \sum_{i} \sigma_{i} \boldsymbol{u}_{i} \boldsymbol{v}^{T}_{i}
$$
where $\sigma_{i}$ is the $i$th singular value and $\boldsymbol{u}_{i}$ and $\boldsymbol{v}_{i}$ are the $i$th columns vectors of $\boldsymbol{U}$ and $\boldsymbol{V}$, respectively from the SVD. Clearly, for any $\sigma_{i} = 0$ we can avoid storing the data that makes no contribution. If $\sigma_{i}$ is small, then the contribution of $\boldsymbol{u}_{i} \boldsymbol{v}^{T}_{i}$ is small and we discard it and introduce only a small 'error' to the matrix. We will use low rank approximations in a number of examples in this notebook.
## Data compression
We start with a $100 \times 200$ matrix that has entries equal to one or zero. We create a matrix with all entries set to zero, and we then set some entries equal to one in the pattern of rectangle.
```python
A = np.ones((100, 200))
A[33:33 + 4, 33:133] = 0.0
A[78:78 + 4, 33:133] = 0.0
A[33:78+4, 33:33+4] = 0.0
A[33:78+4, 129:129+4] = 0.0
plt.imshow(A, cmap='gray', interpolation='none')
plt.show()
```
Performing the SVD and counting the number of singular values that are greater than $10^{-9}$:
```python
U, s, V = np.linalg.svd(A, full_matrices=False)
print("Number of singular values greater than 1.0e-9: {}".format((s > 1.0e-9).sum()))
```
With only three nonzero singular values, we could reconstruct the matrix with very little data - just three singular values and six vectors.
### Removing noise
We consider the same matrix problem again, this time with some back ground noise in the white regions.
```python
A = np.ones((100, 200))
A = A - 1.0e-1*np.random.rand(100, 200)
A[33:33 + 4, 33:133] = 0.0
A[78:78 + 4, 33:133] = 0.0
A[33:78+4, 33:33+4] = 0.0
A[33:78+4, 129:129+4] = 0.0
plt.imshow(A, cmap='gray', interpolation='none');
```
The effect of the noise is clear in the image.
We can try to eliminate much of the background noise via a low-rank approximation of the noisy image that discards information associated with small singular values of the matrix.
```python
# Compute SVD of nois matrix
U, s, V = np.linalg.svd(A, full_matrices=False)
# Set any singular values less than 1.0 equation zero
s[s < 1.0] = 0.0
# Reconstruct low rank approximation and display
A_denoised = np.dot(U, np.dot(np.diag(s), V))
plt.imshow(A_denoised, cmap='gray', interpolation='none')
plt.show();
```
We can see that much of the noise in the image has been eliminated.
## Image compression
### Gray scale image
We load a colour PNG file. It uses three colour channels (red/green/blue), with at each pixel an 8-bit unsigned integer (in the range $[0, 255]$, but sometimes represented as a float) for each colour for the colour intensity. This is know as 24-bit colour - three channels times 8 bit.
We load the image as three matrices (red, green, blue), each with dimension equal to the number pixels in each direction:
```python
from urllib.request import urlopen
url = "https://github.com/garth-wells/notebooks-3M1/raw/master/photo/2020-1.png"
img_colour = Image.open(urlopen(url))
img_colour = img_colour.convert('RGB')
print("Image size (pixels):", img_colour.size)
print("Image array shape: ", np.array(img_colour).shape)
plt.figure(figsize=(15, 15/1.77))
plt.imshow(img_colour);
```
We could work with the colour image, but it is simpler to work with a gray scale image because then we have only one value for the colour intensity at each pixel rather than three (red/green/blue).
```python
img_bw = img_colour.convert('L')
plt.figure(figsize=(15, 15/1.77))
plt.imshow(img_bw, cmap='gray');
print("Image array shape: {}".format(img_bw.size))
plt.savefig("bw.pdf")
```
We can convert the image to a regular matrix with values between 0 and 255, with each entry corresponding to a pixel in the image. Creating the matrix and inspecting first four rows and three columns (top left corner of the image):
```python
img_array = np.array(img_bw)
print("Image shape:", img_array.shape)
print(img_array[:4, :3])
```
Now, maybe we can discard information associated with small singular values without perceiving any visual change in the image. To explore this, we compute the SVD of the gray scale image:
```python
U, s, V = np.linalg.svd(img_array, full_matrices=False)
```
The argument `full_matrices=False` tells NumPy to not store all the redundant zero terms in the $\boldsymbol{\Sigma}$ array. This is the normal approach in practice, but not in most text books. Note that NumPy return the singular values as a one-dimendional array, not as a matrix.
We now print the largest and smallest singular values, and plot all the singular values $\sigma_{i}$ on a log-scale:
```python
print("Number of singular values: {}".format(len(s)))
print("Max, min singular values: {}, {}".format(s[0], s[-1]))
plt.xlabel('$i$')
plt.ylabel('$\sigma_i$')
plt.title('Singular values')
plt.yscale('log')
plt.plot(s, 'bo');
plt.savefig("bw-svd.pdf")
```
We can now try compressing the image. We first try retaining using only the largest 25% of values:
```python
# Compute num_sigma/4 (25%) and zero values
r = int(0.25*len(s))
# Re-construct low rank approximation (this may look a little cryptic, but we use the below
# expression to avoid unecessary computation)
compressed = U[:,:r].dot(s[:r, np.newaxis]*V[:r,:])
compressed = compressed.astype(int)
# Plot compressed and original image
fig, axes = plt.subplots(nrows=1, ncols=2, figsize=(18, 18/1.77));
axes[0].set_title('Compressed image with largest 25% of singular values retained')
axes[0].imshow(compressed, cmap='gray');
axes[1].set_title('Original image')
axes[1].imshow(img_array, cmap='gray');
```
We have discarded 3/4 of the singular values, but can barely perceive a difference in the image.
To explore other levels of compression, we write a function that takes the fraction of singular values we wish to retain:
```python
def compress_image(U, s, V, f):
"Compress image where 0 < f <= 1 is the fraction on singular values to retain"
r = int(f*len(s))
return (U[:,:r].dot(s[:r, np.newaxis]*V[:r,:])).astype(int)
```
Let's try retaining just 10% of the singular values:
```python
# Compress image/matrix
compressed = compress_image(U, s, V, 0.1)
# Plot compressed and original image
fig, axes = plt.subplots(nrows=1, ncols=2, figsize=(20, 20/1.77))
axes[0].set_title('Compressed image with largest 10% of singular values retained')
axes[0].imshow(compressed, cmap='gray');
axes[1].set_title('Original image')
axes[1].imshow(img_array, cmap='gray');
plt.savefig("bw-0-10.pdf")
```
Even with only 10% if the singular values retains, it is hard to perceive a difference between the images. Next we try keeping only 2%:
```python
# Compress image/matrix
compressed = compress_image(U, s, V, 0.02)
# Plot compressed and original image
fig, axes = plt.subplots(nrows=1, ncols=2, figsize=(20, 20/1.77))
axes[0].set_title('Compressed image with largest 2% of singular values retained')
axes[0].imshow(compressed, cmap='gray');
axes[1].set_title('Original image')
axes[1].imshow(img_array, cmap='gray');
plt.savefig("bw-0-02.pdf")
```
We now see some image clear degradation, but the image is sill recognisable. We'll try one more case where we retain only 0.5% of the singular values.
```python
# Compress image/matrix
compressed = compress_image(U, s, V, 0.005)
# Plot compressed and original image
fig, axes = plt.subplots(nrows=1, ncols=2, figsize=(20, 20/1.77))
axes[0].set_title('Compressed image with largest 0.5% of singular values retained')
axes[0].imshow(compressed, cmap='gray');
axes[1].set_title('Original image')
axes[1].imshow(img_array, cmap='gray');
plt.savefig("bw-0-005.pdf")
```
The image quality is now quite poor.
### Colour image: RGB
We'll now try compressing a colour image.
```python
print("Image array shape: {}".format(img_colour.size))
plt.figure(figsize=(20,20/1.77))
plt.title('This is a photo of 2020 3M1 class members')
plt.imshow(img_colour);
```
We can extract the red, green and blue components to have a look:
```python
# Display red, green and blue channels by zeroing other channels
fig, axes = plt.subplots(nrows=1, ncols=3, figsize=(20, 20/1.77))
img_array = np.array(img_colour)
# Zero the g/b channels
red = img_array.copy()
red[:,:,(1,2)] = 0.0
axes[0].imshow(red);
# Zero the r/b channels
green = img_array.copy()
green[:,:,(0,2)] = 0.0
axes[1].imshow(green);
# Zero the r/g channels
blue = img_array.copy()
blue[:,:,(0,1)] = 0.0
axes[2].imshow(blue);
```
We now compute an SVD for the matrix of each colour:
```python
# Compute SVD for each colour
U, s, V = [0]*3, [0]*3, [0]*3
for i in range(3):
U[i], s[i], V[i] = np.linalg.svd(img_array[:, :, i], full_matrices=False)
```
Compressing the matrix for each colouring separately and then reconstructing the three-dimensional array:
```python
# Compress each colour separately
compressed = [compress_image(U[i], s[i], V[i], 0.1) for i in range(3)]
# Reconstruct 3D RGB array and filter any values outside of (0, 1)
compressed = np.dstack(compressed)
```
Comparing the compressed and original images side-by-side:
```python
fig, axes = plt.subplots(nrows=1, ncols=2, figsize=(20, 20/1.77))
axes[0].set_title('Image with largest 10% of singular values retained')
axes[0].imshow(compressed, interpolation="nearest");
axes[1].set_title('Original image')
axes[1].imshow(img_colour);
```
Retaining 10% of the singular values for each colour, we can see some artifacts in the compressed image, which indicates that using the SVD for each colour independently is probably not a good idea.
### Colour image: YCbCr
A better approach is to split the image into [YCbCr](https://en.wikipedia.org/wiki/YCbCr), rather than RGB.
YCbCr is splits the image into luminance (Y), and chrominance (Cb and Cr) colour values.
```python
img_colour_ycbcr = np.array(img_colour.convert("YCbCr"))
```
```python
# Display Luminance(Y), Blue Chroma(Cb) and Red Chroma(Cr) channels
fig, axes = plt.subplots(nrows=1, ncols=3, figsize=(20, 20/1.77))
Y = img_colour_ycbcr[:,:,0]
axes[0].imshow(Y, cmap='gray');
Cb = img_colour_ycbcr[:,:,1]
axes[1].imshow(Cb, cmap='gray');
Cr = img_colour_ycbcr[:,:,2]
axes[2].imshow(Cr, cmap='gray');
```
Compute the SVD of each channel:
```python
# Compute SVD for each channel
U, s, V = [0]*3, [0]*3, [0]*3
for i in range(3):
U[i], s[i], V[i] = np.linalg.svd(img_colour_ycbcr[:, :, i], full_matrices=False)
```
Compress each channel, and display compressed channels in gray scale:
```python
# Compress each component separately
compressed = [compress_image(U[0], s[0], V[0], 0.05),
compress_image(U[1], s[1], V[1], 0.005),
compress_image(U[2], s[2], V[2], 0.005)]
# Reconstruct 3D YCbCr array
compressed = np.dstack(compressed)
fig, axes = plt.subplots(nrows=1, ncols=3, figsize=(20, 20/1.77))
Y = compressed[:,:,0]
axes[0].imshow(Y, cmap='gray');
Cb = compressed[:,:,1]
axes[1].imshow(Cb, cmap='gray');
Cr = compressed[:,:,2]
axes[2].imshow(Cr, cmap='gray');
```
Combine compressed channels:
```python
fig, axes = plt.subplots(nrows=1, ncols=2, figsize=(20, 20/1.77))
axes[0].set_title('Image with largest 20% of brightness singular values retained and 0.5% colours')
im = Image.fromarray(np.uint8(compressed), mode="YCbCr")
axes[0].imshow(im)
axes[1].set_title('Original image')
axes[1].imshow(img_colour);
```
### Interactive compression
We'll now create an interactive image with sliders to interactively control the compression level.
```python
from ipywidgets import widgets
from ipywidgets import interact
url = "https://github.com/garth-wells/notebooks-3M1/raw/master/photo/IMG_20190117_141222563.png"
img = Image.open(urlopen(url))
img_colour_ycbcr = np.array(img.convert("YCbCr"))
# Compute SVD for each channel
U0, s0, V0 = [0]*3, [0]*3, [0]*3
for i in range(3):
U0[i], s0[i], V0[i] = np.linalg.svd(img_colour_ycbcr[:, :, i], full_matrices=False)
@interact(ratio_Y=(0.005, 0.4, 0.02),
ratio_Cb=(0.001, 0.1, 0.01),
ratio_Cr=(0.001, 0.1, 0.01))
def plot_image(ratio_Y=0.1, ratio_Cb=0.01, ratio_Cr=0.01):
compressed = [compress_image(U0[0], s0[0], V0[0], ratio_Y),
compress_image(U0[1], s0[1], V0[1], ratio_Cb),
compress_image(U0[2], s0[2], V0[2], ratio_Cr)]
# Reconstruct 3D YCbCr array
compressed = np.dstack(compressed)
img_compressed = Image.fromarray(np.uint8(compressed), mode="YCbCr")
# Show
fig, axes = plt.subplots(nrows=1, ncols=2, figsize=(20, 20/1.77))
axes[0].set_title('Compressed image')
axes[0].imshow(img_compressed)
axes[1].set_title('Original image')
axes[1].imshow(img)
```
## Effective rank
Determining the rank of a matrix is not a binary question in the context of floating point arithmetic or measurement errors. The SVD can be used to determine the 'effective rank' of a matrix.
Consider the matrix:
```python
A = np.array([[1, 1, 1], [2, 2, 2], [1, 0 ,1]])
print(A)
```
Clearly the first two rows are linearly dependent and the rank of this matrix is 2. We can verify this using NumPy:
```python
print("Rank of A is: {}".format(np.linalg.matrix_rank(A)))
```
We now add some noise in the range $(0, 10^{-6})$ to the matrix entries:
```python
np.random.seed(10)
A = A + 1.0e-6*np.random.rand(A.shape[0], A.shape[1])
```
We now test the rank:
```python
print("Rank of A (with noise) is: {}".format(np.linalg.matrix_rank(A)))
```
The problem is that we have a 'data set' that is linearly dependent, but this is being masked by very small measurement noise.
Computing the SVD of the matrix with noise and printing the singular values:
```python
U, s, V = np.linalg.svd(A)
print("The singular values of A (with noise) are: {}".format(s))
```
If we define the effective rank as the number of singular values that are greater than the noise level, the effective rank of $\boldsymbol{A}$ is 2.
## Rank deficient least-squares problems
For least squares problem, we have seen before that we solve
$$
\boldsymbol{A}^{T} \boldsymbol{A} \hat{\boldsymbol{x}} = \boldsymbol{A}^{T} \boldsymbol{b}
$$
or
$$
\begin{align}
\hat{\boldsymbol{x}} &= (\boldsymbol{A}^{T} \boldsymbol{A})^{-1} \boldsymbol{A}^{T} \boldsymbol{b}
\\
&= \boldsymbol{A}^{+}\boldsymbol{b}
\end{align}
$$
Everything is fine as long as $\boldsymbol{A}$ is full rank. The problem is that we might have data that leads to $\boldsymbol{A}$ not being full rank. For example, if we try to fit a polynomial in $x$ and $y$, but the data lies on a line.
We have covered in the lectures how to handle least-squares problems that are rank deficient. Here we present an example.
### Example: fitting points in a two-dimensional space
Say we are given four data points that depend on $x$ and $y$, and we are asked to fit a polynomial of the form
$$
f(x, y) = c_{00} + c_{10}x + c_{01}y + c_{11}xy
$$
to the data points. Normally, we would expect to be able to fit the above polynomial to four data points by interpolation, i.e. solving $\boldsymbol{A} \boldsymbol{c} = \boldsymbol{f}$ where
$\boldsymbol{A}$ a square Vandermonde matrix. However, if the points happened to lie on a line, then $\boldsymbol{A}$ will be singular. If the points happen to almost lie on a line, then $\boldsymbol{A}$ will be close to singular.
A possibility is to exclude zero or small singular values from the process, thereby finding a least-squares fit with minimal $\|\boldsymbol{c}\|_{2}$. We test this for the data set
\begin{equation}
f_{1}(1, 0) = 3, \\
f_{2}(2, 0) = 5, \\
f_{3}(3, 0) = 7, \\
f_{4}(4, 0) = 9.
\end{equation}
The data lies on the line $y = 0$, and is in fact is linear in $x$.
We create arrays to hold this data, and visualise the points:
```python
x, y, f = np.zeros(4), np.zeros(4), np.zeros(4)
x[0], y[0], f[0] = 1.0, 0.0, 3.0
x[1], y[1], f[1] = 2.0, 0.0, 5.0
x[2], y[2], f[2] = 3.0, 0.0, 7.0
x[3], y[3], f[3] = 4.0, 0.0, 9.0
from mpl_toolkits.mplot3d import Axes3D
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
ax.set_xlabel('x')
ax.set_ylabel('y')
ax.set_zlabel('f')
ax.scatter(x, y, f)
plt.show()
```
To find the polynomial coefficients we want to solve
\begin{equation}
\begin{bmatrix}
1 & x_{1} & y_{1} & x_{1}y_{1} \\
1 & x_{2} & y_{2} & x_{2}y_{2} \\
1 & x_{3} & y_{3} & x_{3}y_{3} \\
1 & x_{4} & y_{4} & x_{4}y_{4} \\
\end{bmatrix}
\begin{bmatrix}
c_{00} \\ c_{10} \\ c_{01} \\ c_{11}
\end{bmatrix}
=
\begin{bmatrix}
f_{1} \\ f_{2} \\ f_{3} \\ f_{4}
\end{bmatrix}
\end{equation}
where the matrix is the Vandermonde matrix. We can use a NumPy function to create the Vandermonde matrix:
```python
A = np.polynomial.polynomial.polyvander2d(y, x, [1, 1])
print(A)
```
It is clear by inspection that $\boldsymbol{A}$ is not full rank, and is rank 2.
Computing the SVD of $\boldsymbol{A}$ and printing the singular values:
```python
U, s, V = np.linalg.svd(A)
print(s)
```
We can see that two of the singular values are zero. To find a least-squares fit to the data with minimal $\| \boldsymbol{c}\|_{2}$ we compute
$$
\hat{\boldsymbol{c}} = \boldsymbol{V}_{1} \boldsymbol{\Sigma}^{+}
\boldsymbol{U}_{1}^{T}\boldsymbol{b}
$$
Creating $\boldsymbol{V}_{1}$, $\boldsymbol{\Sigma}^{+}$ and $\boldsymbol{U}_{1}$ (recall that the NumPy SVD returns $\boldsymbol{V}^{T}$ rather than $\boldsymbol{V}$):
```python
# Create view of U with last two columns removed
U1 = U[:, :2]
# Create view of V with last two columns removed
V1 = V[:2,:]
# Create Sigma^{+} by inverting the nonzero singular values and
# discarding the zero singular values
S1 = np.diag(1.0/s[:-2])
print(S1)
```
Computing the least-squares solution from $\hat{\boldsymbol{c}} = \boldsymbol{V}_{1} \boldsymbol{\Sigma}^{+} \boldsymbol{U}_{1}^{T}\boldsymbol{b}$:
```python
c = np.transpose(V1).dot(S1.dot(U1.T).dot(f))
print(c)
```
The solution is $f(x, y) = 1 + 2x$, which in this case in fact interpolates the data points. Plotting the function, we have a plane that passes through the points.
```python
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
# Plot points
ax.set_xlabel('$x$')
ax.set_ylabel('$y$')
ax.set_zlabel('$f$')
ax.scatter(x, y, f)
# Plot surface
X = np.arange(0, 5, 0.2)
Y = np.arange(-5, 5, 0.2)
X, Y = np.meshgrid(X, Y)
Z = 1.0 + 2.0*X + 0.0*Y
surf = ax.plot_surface(X, Y, Z, rstride=5, cstride=5, alpha=0.1)
ax.view_init(elev=30, azim=80)
plt.show()
```
We now try adding some noise to the sample positions and the measured values. The Vandermonde matrix is no longer singular so we can solve $\boldsymbol{A} \boldsymbol{c} = \boldsymbol{f}$ to get the polynomial coefficients:
```python
np.random.seed(20)
xn = x + 1.0e-3*(1.0 - np.random.rand(len(x)))
yn = y + 1.0e-3*(1.0 - np.random.rand(len(y)))
fn = f + 1.0e-3*(1.0 - np.random.rand(len(f)))
A = np.polynomial.polynomial.polyvander2d(yn, xn, [1, 1])
c = np.linalg.solve(A, fn)
print(c)
```
We now see significant coefficients for the $y$ and $xy$ terms in the interpolating polynomial just as a consequence of adding small amount of noise. Plotting the surface and the points, we see in dramatic impact of the noise.
```python
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
# Plot points
ax.set_xlabel('$x$')
ax.set_ylabel('$y$')
ax.set_zlabel('$f$')
ax.scatter(xn, yn, fn)
# Plot surface
X = np.arange(0, 5, 0.2)
Y = np.arange(-5, 5, 0.2)
X, Y = np.meshgrid(X, Y)
Z = c[0] + c[1]*X + c[2]*Y + c[3]*X*Y
surf = ax.plot_surface(X, Y, Z, rstride=5, cstride=5, alpha=0.1)
ax.view_init(elev=30, azim=80)
plt.show()
```
Performing an SVD on the matrix with noise and printing the singular values:
```python
U, s, V = np.linalg.svd(A)
print(s)
```
We see that two of the values are considerably small than the others. If we set these to zero and follow the least-squares procedure for rank-deficient problem:
```python
# Create view of U with last two columns removed
U1 = U[:, :2]
# Create view of V with last two columns removed
V1 = V[:2,:]
# Create \Sigma^{+}
S1 = np.diag(1.0/s[:-2])
c = np.transpose(V1).dot(S1.dot(U1.T).dot(f))
print(c)
```
We see that the fitting polynomial is very close to the noise-free case.
## Principal component analysis
Principal component analysis finds a transformation such that covariance of a data set is zero in the transformed directions, and the variance in these directions is greatest. From a dataset this tells us which are the 'important' parameters in a system.
Consider taking $N = 200$ measurements of two quantities $x_{1}$ and $x_{2}$. We model the system by:
```python
np.random.seed(1)
x0 = np.random.randn(200) + 5.0
x1 = 1.5*x0 + np.random.rand(len(x0))
ax = plt.axes()
ax.scatter(x0, x1, alpha=0.5);
ax.set_xlabel('$x_{1}$');
ax.set_ylabel('$x_{2}$');
```
We collect the data in a $200 \times 2$ matrix $\boldsymbol{X}$ (200 measurements, 2 variables):
```python
X = np.column_stack((x0, x1))
```
We can compute the covariance matrix $\boldsymbol{C}$ by making the columns of $\boldsymbol{X}$ zero mean and computing $\boldsymbol{X}^{T}\boldsymbol{X}^{T}/(N-1)$
```python
for c in range(X.shape[1]):
X[:,c] = X[:,c] - np.mean(X[:,c])
C = (X.T).dot(X)/(len(x0)-1.0)
```
The covariance matrix is square and symmetric, so w can diagonalise it by computing the eigenvalues and eigenvectors.
We could also compute the SVD of $\boldsymbol{X}$ since the $\boldsymbol{V}$ is made of the eigenvectors of $\boldsymbol{X}^{T}\boldsymbol{X}^{T}$:
```python
U, s, V = np.linalg.svd(C)
print(s)
```
Plotting the data set and the principal directions:
```python
ax = plt.axes()
ax.set_aspect(1.0);
ax.set_ylim(-4.0, 4.0);
ax.set_xlabel('$x_{1}$')
ax.set_ylabel('$x_{2}$')
ax.quiver(V[0, 0], V[0, 1], angles='xy',scale_units='xy',scale=0.3);
ax.quiver(V[1, 0], V[1, 1], angles='xy',scale_units='xy',scale=1);
ax.scatter(X[:,0], X[:,1], alpha=0.2);
```
PCA effectively detects correlation in a data set. In the above example it suggest that the system could be modelled with one variable in the direction of the first column of $\boldsymbol{V}$.
|
In the UK , the album charted at number 20 on the Albums Chart . In the US , it peaked at number 24 on the Billboard Top Electronic Albums chart . In both cases , it was the first Lemon Jelly album to do so . The album 's two singles , " Space Walk " and " Nice Weather for Ducks " , also managed to chart on the UK Singles Chart , at number 36 and 16 respectively . Again , this was a first for the band . On 20 December 2002 the album was certified Silver . Almost six months later , on 22 July 2013 , it was certified gold , denoting shipments of over 100 @,@ 000 . In 2003 , the album was nominated for a Mercury Music Prize , although the album lost to Dizzee Rascal 's Boy in da Corner .
|
State Before: l : Type ?u.1289823
m : Type ?u.1289826
n : Type u_3
o : Type ?u.1289832
m' : o → Type ?u.1289837
n' : o → Type ?u.1289842
R : Type u_1
S : Type u_2
α : Type v
β : Type w
γ : Type ?u.1289855
inst✝⁴ : Fintype n
inst✝³ : NonAssocSemiring α
inst✝² : NonAssocSemiring β
inst✝¹ : NonAssocSemiring R
inst✝ : NonAssocSemiring S
f : R →+* S
v w : n → R
⊢ ↑f (v ⬝ᵥ w) = ↑f ∘ v ⬝ᵥ ↑f ∘ w State After: no goals Tactic: simp only [Matrix.dotProduct, f.map_sum, f.map_mul, Function.comp] |
Load "include/ops_header.v".
(* These preconditions are temporarily set to True, and will be refined
by the very process of formalization. *)
Module precond.
Definition Sn2 (n : int) := (n != - 2%:~R) /\ (n != - 1).
End precond.
Load "include/ann_z.v".
Record Ann z : Type := ann {
Sn2_ : Sn2 z
}.
|
[GOAL]
C : Cat
⊢ { obj := fun C => Quotient (isIsomorphicSetoid ↑C),
map := fun {C D} F => Quot.map F.obj (_ : ∀ (X Y : ↑C), Setoid.r X Y → Setoid.r (F.obj X) (F.obj Y)) }.map
(𝟙 C) =
𝟙
({ obj := fun C => Quotient (isIsomorphicSetoid ↑C),
map := fun {C D} F => Quot.map F.obj (_ : ∀ (X Y : ↑C), Setoid.r X Y → Setoid.r (F.obj X) (F.obj Y)) }.obj
C)
[PROOFSTEP]
dsimp
[GOAL]
C : Cat
⊢ Quot.map (𝟙 C).obj (_ : ∀ (X Y : ↑C), Setoid.r X Y → Setoid.r ((𝟙 C).obj X) ((𝟙 C).obj Y)) =
𝟙 (Quotient (isIsomorphicSetoid ↑C))
[PROOFSTEP]
apply funext
[GOAL]
case h
C : Cat
⊢ ∀ (x : Quot Setoid.r),
Quot.map (𝟙 C).obj (_ : ∀ (X Y : ↑C), Setoid.r X Y → Setoid.r ((𝟙 C).obj X) ((𝟙 C).obj Y)) x =
𝟙 (Quotient (isIsomorphicSetoid ↑C)) x
[PROOFSTEP]
intro x
[GOAL]
case h
C : Cat
x : Quot Setoid.r
⊢ Quot.map (𝟙 C).obj (_ : ∀ (X Y : ↑C), Setoid.r X Y → Setoid.r ((𝟙 C).obj X) ((𝟙 C).obj Y)) x =
𝟙 (Quotient (isIsomorphicSetoid ↑C)) x
[PROOFSTEP]
apply x.recOn
[GOAL]
case h.h
C : Cat
x : Quot Setoid.r
⊢ ∀ (a b : ↑C) (p : Setoid.r a b),
(_ :
Quot.map (𝟙 C).obj (_ : ∀ (X Y : ↑C), Setoid.r X Y → Setoid.r ((𝟙 C).obj X) ((𝟙 C).obj Y))
(Quot.mk Setoid.r b) =
𝟙 (Quotient (isIsomorphicSetoid ↑C)) (Quot.mk Setoid.r b)) =
(_ : ?m.1036 (Quot.mk Setoid.r b))
[PROOFSTEP]
intro _ _ p
[GOAL]
case h.h
C : Cat
x : Quot Setoid.r
a✝ b✝ : ↑C
p : Setoid.r a✝ b✝
⊢ (_ :
Quot.map (𝟙 C).obj (_ : ∀ (X Y : ↑C), Setoid.r X Y → Setoid.r ((𝟙 C).obj X) ((𝟙 C).obj Y)) (Quot.mk Setoid.r b✝) =
𝟙 (Quotient (isIsomorphicSetoid ↑C)) (Quot.mk Setoid.r b✝)) =
(_ : ?m.1036 (Quot.mk Setoid.r b✝))
[PROOFSTEP]
simp only [types_id_apply]
[GOAL]
case h.f
C : Cat
x : Quot Setoid.r
⊢ ∀ (a : ↑C),
Quot.map (𝟙 C).obj (_ : ∀ (X Y : ↑C), Setoid.r X Y → Setoid.r ((𝟙 C).obj X) ((𝟙 C).obj Y)) (Quot.mk Setoid.r a) =
𝟙 (Quotient (isIsomorphicSetoid ↑C)) (Quot.mk Setoid.r a)
[PROOFSTEP]
intro _
[GOAL]
case h.f
C : Cat
x : Quot Setoid.r
a✝ : ↑C
⊢ Quot.map (𝟙 C).obj (_ : ∀ (X Y : ↑C), Setoid.r X Y → Setoid.r ((𝟙 C).obj X) ((𝟙 C).obj Y)) (Quot.mk Setoid.r a✝) =
𝟙 (Quotient (isIsomorphicSetoid ↑C)) (Quot.mk Setoid.r a✝)
[PROOFSTEP]
rfl
[GOAL]
C D E : Cat
f : C ⟶ D
g : D ⟶ E
⊢ { obj := fun C => Quotient (isIsomorphicSetoid ↑C),
map := fun {C D} F => Quot.map F.obj (_ : ∀ (X Y : ↑C), Setoid.r X Y → Setoid.r (F.obj X) (F.obj Y)) }.map
(f ≫ g) =
{ obj := fun C => Quotient (isIsomorphicSetoid ↑C),
map := fun {C D} F => Quot.map F.obj (_ : ∀ (X Y : ↑C), Setoid.r X Y → Setoid.r (F.obj X) (F.obj Y)) }.map
f ≫
{ obj := fun C => Quotient (isIsomorphicSetoid ↑C),
map := fun {C D} F => Quot.map F.obj (_ : ∀ (X Y : ↑C), Setoid.r X Y → Setoid.r (F.obj X) (F.obj Y)) }.map
g
[PROOFSTEP]
dsimp
[GOAL]
C D E : Cat
f : C ⟶ D
g : D ⟶ E
⊢ Quot.map (f ≫ g).obj (_ : ∀ (X Y : ↑C), Setoid.r X Y → Setoid.r (g.obj (f.obj X)) (g.obj (f.obj Y))) =
Quot.map f.obj (_ : ∀ (X Y : ↑C), Setoid.r X Y → Setoid.r (f.obj X) (f.obj Y)) ≫
Quot.map g.obj (_ : ∀ (X Y : ↑D), Setoid.r X Y → Setoid.r (g.obj X) (g.obj Y))
[PROOFSTEP]
apply funext
[GOAL]
case h
C D E : Cat
f : C ⟶ D
g : D ⟶ E
⊢ ∀ (x : Quot Setoid.r),
Quot.map (f ≫ g).obj (_ : ∀ (X Y : ↑C), Setoid.r X Y → Setoid.r (g.obj (f.obj X)) (g.obj (f.obj Y))) x =
(Quot.map f.obj (_ : ∀ (X Y : ↑C), Setoid.r X Y → Setoid.r (f.obj X) (f.obj Y)) ≫
Quot.map g.obj (_ : ∀ (X Y : ↑D), Setoid.r X Y → Setoid.r (g.obj X) (g.obj Y)))
x
[PROOFSTEP]
intro x
[GOAL]
case h
C D E : Cat
f : C ⟶ D
g : D ⟶ E
x : Quot Setoid.r
⊢ Quot.map (f ≫ g).obj (_ : ∀ (X Y : ↑C), Setoid.r X Y → Setoid.r (g.obj (f.obj X)) (g.obj (f.obj Y))) x =
(Quot.map f.obj (_ : ∀ (X Y : ↑C), Setoid.r X Y → Setoid.r (f.obj X) (f.obj Y)) ≫
Quot.map g.obj (_ : ∀ (X Y : ↑D), Setoid.r X Y → Setoid.r (g.obj X) (g.obj Y)))
x
[PROOFSTEP]
apply x.recOn
[GOAL]
case h.h
C D E : Cat
f : C ⟶ D
g : D ⟶ E
x : Quot Setoid.r
⊢ ∀ (a b : ↑C) (p : Setoid.r a b),
(_ :
Quot.map (f ≫ g).obj (_ : ∀ (X Y : ↑C), Setoid.r X Y → Setoid.r (g.obj (f.obj X)) (g.obj (f.obj Y)))
(Quot.mk Setoid.r b) =
(Quot.map f.obj (_ : ∀ (X Y : ↑C), Setoid.r X Y → Setoid.r (f.obj X) (f.obj Y)) ≫
Quot.map g.obj (_ : ∀ (X Y : ↑D), Setoid.r X Y → Setoid.r (g.obj X) (g.obj Y)))
(Quot.mk Setoid.r b)) =
(_ : ?m.1407 (Quot.mk Setoid.r b))
[PROOFSTEP]
intro _ _ _
[GOAL]
case h.h
C D E : Cat
f : C ⟶ D
g : D ⟶ E
x : Quot Setoid.r
a✝ b✝ : ↑C
p✝ : Setoid.r a✝ b✝
⊢ (_ :
Quot.map (f ≫ g).obj (_ : ∀ (X Y : ↑C), Setoid.r X Y → Setoid.r (g.obj (f.obj X)) (g.obj (f.obj Y)))
(Quot.mk Setoid.r b✝) =
(Quot.map f.obj (_ : ∀ (X Y : ↑C), Setoid.r X Y → Setoid.r (f.obj X) (f.obj Y)) ≫
Quot.map g.obj (_ : ∀ (X Y : ↑D), Setoid.r X Y → Setoid.r (g.obj X) (g.obj Y)))
(Quot.mk Setoid.r b✝)) =
(_ : ?m.1407 (Quot.mk Setoid.r b✝))
[PROOFSTEP]
simp only [types_id_apply]
[GOAL]
case h.f
C D E : Cat
f : C ⟶ D
g : D ⟶ E
x : Quot Setoid.r
⊢ ∀ (a : ↑C),
Quot.map (f ≫ g).obj (_ : ∀ (X Y : ↑C), Setoid.r X Y → Setoid.r (g.obj (f.obj X)) (g.obj (f.obj Y)))
(Quot.mk Setoid.r a) =
(Quot.map f.obj (_ : ∀ (X Y : ↑C), Setoid.r X Y → Setoid.r (f.obj X) (f.obj Y)) ≫
Quot.map g.obj (_ : ∀ (X Y : ↑D), Setoid.r X Y → Setoid.r (g.obj X) (g.obj Y)))
(Quot.mk Setoid.r a)
[PROOFSTEP]
intro _
[GOAL]
case h.f
C D E : Cat
f : C ⟶ D
g : D ⟶ E
x : Quot Setoid.r
a✝ : ↑C
⊢ Quot.map (f ≫ g).obj (_ : ∀ (X Y : ↑C), Setoid.r X Y → Setoid.r (g.obj (f.obj X)) (g.obj (f.obj Y)))
(Quot.mk Setoid.r a✝) =
(Quot.map f.obj (_ : ∀ (X Y : ↑C), Setoid.r X Y → Setoid.r (f.obj X) (f.obj Y)) ≫
Quot.map g.obj (_ : ∀ (X Y : ↑D), Setoid.r X Y → Setoid.r (g.obj X) (g.obj Y)))
(Quot.mk Setoid.r a✝)
[PROOFSTEP]
rfl
|
Require Import Coq.Sets.Ensembles Bedrock.Platform.AutoSep Bedrock.Platform.Malloc.
Require Import Bedrock.Platform.Facade.examples.FiatADTs.
Infix "===" := (@Same_set W).
Definition empty := Empty_set W.
Notation "%0" := empty.
(* Who knows why this wrapper is necessary to keep the tactics happy.... *)
Module Type HAS.
Parameter has : Ensemble W -> W -> Prop.
Axiom has_eq : has = Ensembles.In _.
End HAS.
Module Has : HAS.
Definition has := Ensembles.In W.
Theorem has_eq : has = Ensembles.In _.
auto.
Qed.
End Has.
Import Has.
Export Has.
Infix "%has" := has (at level 70).
Definition add := Ensembles.Add W.
Infix "%+" := add (at level 50).
Definition sub := Subtract W.
Infix "%-" := sub (at level 50).
Section adt.
Variable P : Ensemble W -> W -> HProp.
Variable res : nat.
Definition newS := SPEC("extra_stack") reserving res
PRE[_] mallocHeap 0
POST[R] P %0 R * mallocHeap 0.
Definition deleteS := SPEC("extra_stack", "self") reserving res
Al s,
PRE[V] P s (V "self") * mallocHeap 0
POST[R] [| R = $0 |] * mallocHeap 0.
Definition memS := SPEC("extra_stack", "self", "n") reserving res
Al s,
PRE[V] P s (V "self") * mallocHeap 0
POST[R] [| s %has V "n" \is R |] * P s (V "self") * mallocHeap 0.
Definition addS := SPEC("extra_stack", "self", "n") reserving res
Al s,
PRE[V] P s (V "self") * mallocHeap 0
POST[R] [| R = $0 |] * P (s %+ V "n") (V "self") * mallocHeap 0.
Definition removeS := SPEC("extra_stack", "self", "n") reserving res
Al s,
PRE[V] P s (V "self") * mallocHeap 0
POST[R] [| R = $0 |] * P (s %- V "n") (V "self") * mallocHeap 0.
Definition cardinal_is (s : Ensemble W) (R : W) :=
exists n, cardinal _ s n /\ R = natToWord _ n.
Definition sizeS := SPEC("extra_stack", "self") reserving res
Al s,
PRE[V] P s (V "self") * mallocHeap 0
POST[R] [| cardinal_is s R |] * P s (V "self") * mallocHeap 0.
End adt.
|
lemma Re_Reals_divide: "r \<in> \<real> \<Longrightarrow> Re (r / z) = Re r * Re z / (norm z)\<^sup>2" |
#ifndef __GSLSTREAM_H__
#define __GSLSTREAM_H__
#include "RngStream.h"
#include <gsl/gsl_rng.h>
#include <gsl/gsl_randist.h>
namespace rng {
class GSLStream : public RngStream {
public:
GSLStream() : type(gsl_rng_taus2) {}
virtual ~GSLStream() { free(); }
inline void alloc(unsigned long seed = time(NULL)) {
if (! is_alloc) {
rng = gsl_rng_alloc(type);
is_alloc = 1;
gsl_rng_set(rng,seed);
}
}
inline void free() {
if (is_alloc) {
gsl_rng_free(rng);
is_alloc = 0;
}
}
inline void uniform(size_t n, double* r, double a = 0.0, double b = 1.0) {
for (size_t i = 0; i < n; ++i) {
r[i] = a + (b-a)*gsl_rng_uniform(rng);
}
}
inline void uniform_int(size_t n, int* r, int a = 0, int b = 10) {
for (size_t i = 0; i < n; ++i) {
r[i] = a + gsl_rng_uniform_int(rng,b-a);
}
}
inline void shuffle(int* x, size_t n) { gsl_ran_shuffle(rng,x,n,sizeof(int)); }
inline void shuffle(double* x, size_t n) { gsl_ran_shuffle(rng,x,n,sizeof(double)); }
inline void multinomial(size_t k, size_t n, const double* p, unsigned* a) {
gsl_ran_multinomial(rng,k,n,p,a);
}
inline void gaussian(size_t n, double* r, double mu = 0.0, double sigma = 1.0) {
*r = gsl_ran_gaussian(rng,sigma) + mu;
};
inline void poisson(size_t n, int* k, double lambda) {
*k = gsl_ran_poisson(rng,lambda);
}
protected:
const gsl_rng_type* type;
gsl_rng* rng;
};
}
#endif
|
= = = Northern Ireland = = =
|
function h=ref_pfilt_1(f,g,a)
%REF_PFILT_1 Reference PFILT implementation by FFT
%
% This is the old reference pfilt from before the struct filters where
% introduced.
[L W]=size(f);
g=fir2long(g,L);
% Force FFT along dimension 1, since we have permuted the dimensions
% manually
if isreal(f) && isreal(g)
h=ifftreal(fftreal(f,L,1).*repmat(fftreal(g,L,1),1,W),L,1);
else
h=ifft(fft(f,L,1).*repmat(fft(g,L,1),1,W),L,1);
end;
h=h(1:a:end,:);
|
/-
Copyright (c) 2021 OpenAI. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Kunhao Zheng, Stanislas Polu, David Renshaw, OpenAI GPT-f
-/
import mathzoo.imports.miniF2F
open_locale nat rat real big_operators topological_space
theorem induction_nfactltnexpnm1ngt3
(n : ℕ)
(h₀ : 3 ≤ n) :
nat.factorial n < n^(n - 1) :=
begin
induction h₀ with k h₀ IH,
{ norm_num },
{
have k_ge_one : 1 ≤ k := le_trans dec_trivial h₀,
calc k.succ.factorial = k.succ * k.factorial : rfl
... < k.succ * k ^ (k-1) : (mul_lt_mul_left (nat.succ_pos k)).mpr IH
... ≤ k.succ * (k.succ) ^ (k-1): nat.mul_le_mul_left _ $ nat.pow_le_pow_of_le_left (nat.le_succ k) (k-1)
... = k.succ ^ (k-1 + 1): by rw ← (pow_succ k.succ (k-1))
... = k.succ ^ k: by rw nat.sub_add_cancel k_ge_one,
}
end |
% !TEX TS-program = xelatex
% !TEX encoding = UTF-8 Unicode
\documentclass[12pt, letterpaper]{article}
% Set document data
\newcommand{\settitle}{Analysis of Muddy Points Survey \#\VAR{contents.get("quiz_number")}}
\newcommand{\setauthor}{Andrew Hoetker}
\title{\settitle}
\author{\setauthor}
% Math environments
\usepackage{amsmath}
\usepackage{amsfonts}
\usepackage{amssymb}
% Chemical formulas
\usepackage[version=4]{mhchem}
\usepackage{chemfig}
% SI units and friends
\usepackage{siunitx}
\DeclareSIUnit\Fahrenheit{\degree F}
\DeclareSIUnit\pound{lb}
\DeclareSIUnit\poundmass{lb\textsubscript{m}}
\DeclareSIUnit\foot{ft}
\DeclareSIUnit\atm{atm}
\DeclareSIUnit\poise{P}
% Page layout
\usepackage[margin=1in,paper=letterpaper]{geometry}
\usepackage{setspace}
% Header
\usepackage{fancyhdr}
\pagestyle{fancy}
\lhead{\setauthor}
\rhead{\settitle}
\rfoot{\tiny{Generated \VAR{
contents.get("timestamp")
} using Canvas API}}
% XeLaTeX Fonts
%\usepackage{unicode-math}
%\setmainfont[%
% BoldFont={STIX2Text-Bold.otf},
% ItalicFont={STIX2Text-Italic.otf}]{STIX2Text-Regular.otf}
%\setmathfont{STIX2Math.otf}
\usepackage{stix2}
% Bibliography
%\usepackage[backend=biber,style=chem-acs,citestyle=chem-acs]{biblatex}
%\addbibresource{styreneproject.bib}
% Fancy linked references
\usepackage{varioref}
\labelformat{equation}{equation~(#1)}
\labelformat{figure}{figure~#1}
\labelformat{table}{table~#1}
\labelformat{appendix}{appendix~#1}
% Hyperlinks
% These will turn internal cross-references, in-text citations,
% and headings into clickable links in the PDF. The link text
% will still be black.
\usepackage{color}
\usepackage[%
pdfborder=0,
colorlinks=true,
urlcolor=black,
linkcolor=black,
citecolor=black]{hyperref}
% Figures and captions
\usepackage{graphicx}
\usepackage{booktabs}
\usepackage{caption}
\captionsetup[figure]{justification=centering,labelfont=it}
\captionsetup[table]{justification=centering,labelfont=it}
\usepackage[section]{placeins}
% Filler text
\usepackage{lipsum}
\setcounter{secnumdepth}{0} % sections are level 1
\begin{document}
\maketitle
\tableofcontents
\newpage
\section{Analysis for Both Sections}
\subsection{Attendance by Section}
This week, \VAR{
contents
.get("combined")
.get("attendance")
.get("count")
} students earned attendance points by responding to the Muddy Points survey.
\begin{figure}[hbt!]
\centering
\includegraphics{\VAR{
contents
.get("combined")
.get("attendance")
.get("filename")
}}
\caption{\VAR{
contents
.get("combined")
.get("attendance")
.get("title")
}}
\end{figure}
%- if contents.get("combined").get("attendance").get("notes")
\medskip
\textbf{Note:} \VAR{contents
.get("combined")
.get("attendance")
.get("notes")
}
%- endif
\subsection{Self-Assessed Confusion by Section}
\begin{figure}[hbt!]
\centering
\includegraphics{\VAR{
contents
.get("combined")
.get("stacked")
.get("filename")
}}
\caption{\VAR{
contents
.get("combined")
.get("stacked")
.get("title")
}}
\end{figure}
%- for instructor in contents["instructors"]
\newpage
\section{Analysis for Dr. \VAR{instructor}}
\subsection{Self-Assessed Confusion}
The students were asked, "\VAR{
contents
.get("instructors")
.get(instructor)
.get("ranked_confusion")
.get("title")
.strip()
}".
\begin{figure}[hbt!]
\centering
\includegraphics{\VAR{
contents
.get("instructors")
.get(instructor)
.get("ranked_confusion")
.get("filename")
}}
\caption{Histogram of self-reported confusion.}
\end{figure}
\FloatBarrier
This week, \VAR{
contents
.get("instructors")
.get(instructor)
.get("ranked_confusion")
.get("count")
} students responded to the question in this section.
The average self-reported confusion was \VAR{
"{:.2f}".format(contents
.get("instructors")
.get(instructor)
.get("ranked_confusion")
.get("mean_confusion")
)
}, and the median self-reported confusion was \VAR{
"{:.2f}".format(contents
.get("instructors")
.get(instructor)
.get("ranked_confusion")
.get("median_confusion")
)
}.
\subsection{Confusing and Interesting Topics}
The students were asked to respond with a short answer to, "\VAR{
contents
.get("instructors")
.get(instructor)
.get("short_response")
.get("title")
.strip()
}".
These are some responses from students who reported the highest levels of confusion:
%- for question in contents.get("instructors").get(instructor).get("most_confused")
%- for k, v in question.items()
\bigskip
\noindent\textbf{Confusion level: \VAR{v}} \\
\begin{quote}
\textit{\VAR{k}}
\end{quote}
%- endfor
%- endfor
\begin{figure}[hbt!]
\centering
\includegraphics[scale=0.85]{\VAR{
contents
.get("instructors")
.get(instructor)
.get("short_response")
.get("filename")
}}
\caption{Wordcloud of short responses.}
\end{figure}
\FloatBarrier
%- endfor
\end{document}
|
(* Title: HOL/Quickcheck_Random.thy
Author: Florian Haftmann & Lukas Bulwahn, TU Muenchen
*)
section \<open>A simple counterexample generator performing random testing\<close>
theory Quickcheck_Random
imports Random Code_Evaluation Enum
begin
notation fcomp (infixl "\<circ>>" 60)
notation scomp (infixl "\<circ>\<rightarrow>" 60)
setup \<open>Code_Target.add_derived_target ("Quickcheck", [(Code_Runtime.target, I)])\<close>
subsection \<open>Catching Match exceptions\<close>
axiomatization catch_match :: "'a => 'a => 'a"
code_printing
constant catch_match \<rightharpoonup> (Quickcheck) "((_) handle Match => _)"
subsection \<open>The \<open>random\<close> class\<close>
class random = typerep +
fixes random :: "natural \<Rightarrow> Random.seed \<Rightarrow> ('a \<times> (unit \<Rightarrow> term)) \<times> Random.seed"
subsection \<open>Fundamental and numeric types\<close>
instantiation bool :: random
begin
definition
"random i = Random.range 2 \<circ>\<rightarrow>
(\<lambda>k. Pair (if k = 0 then Code_Evaluation.valtermify False else Code_Evaluation.valtermify True))"
instance ..
end
instantiation itself :: (typerep) random
begin
definition
random_itself :: "natural \<Rightarrow> Random.seed \<Rightarrow> ('a itself \<times> (unit \<Rightarrow> term)) \<times> Random.seed"
where "random_itself _ = Pair (Code_Evaluation.valtermify TYPE('a))"
instance ..
end
instantiation char :: random
begin
definition
"random _ = Random.select (Enum.enum :: char list) \<circ>\<rightarrow> (\<lambda>c. Pair (c, \<lambda>u. Code_Evaluation.term_of c))"
instance ..
end
instantiation String.literal :: random
begin
definition
"random _ = Pair (STR '''', \<lambda>u. Code_Evaluation.term_of (STR ''''))"
instance ..
end
instantiation nat :: random
begin
definition random_nat :: "natural \<Rightarrow> Random.seed
\<Rightarrow> (nat \<times> (unit \<Rightarrow> Code_Evaluation.term)) \<times> Random.seed"
where
"random_nat i = Random.range (i + 1) \<circ>\<rightarrow> (\<lambda>k. Pair (
let n = nat_of_natural k
in (n, \<lambda>_. Code_Evaluation.term_of n)))"
instance ..
end
instantiation int :: random
begin
definition
"random i = Random.range (2 * i + 1) \<circ>\<rightarrow> (\<lambda>k. Pair (
let j = (if k \<ge> i then int (nat_of_natural (k - i)) else - (int (nat_of_natural (i - k))))
in (j, \<lambda>_. Code_Evaluation.term_of j)))"
instance ..
end
instantiation natural :: random
begin
definition random_natural :: "natural \<Rightarrow> Random.seed
\<Rightarrow> (natural \<times> (unit \<Rightarrow> Code_Evaluation.term)) \<times> Random.seed"
where
"random_natural i = Random.range (i + 1) \<circ>\<rightarrow> (\<lambda>n. Pair (n, \<lambda>_. Code_Evaluation.term_of n))"
instance ..
end
instantiation integer :: random
begin
definition random_integer :: "natural \<Rightarrow> Random.seed
\<Rightarrow> (integer \<times> (unit \<Rightarrow> Code_Evaluation.term)) \<times> Random.seed"
where
"random_integer i = Random.range (2 * i + 1) \<circ>\<rightarrow> (\<lambda>k. Pair (
let j = (if k \<ge> i then integer_of_natural (k - i) else - (integer_of_natural (i - k)))
in (j, \<lambda>_. Code_Evaluation.term_of j)))"
instance ..
end
subsection \<open>Complex generators\<close>
text \<open>Towards @{typ "'a \<Rightarrow> 'b"}\<close>
axiomatization random_fun_aux :: "typerep \<Rightarrow> typerep \<Rightarrow> ('a \<Rightarrow> 'a \<Rightarrow> bool) \<Rightarrow> ('a \<Rightarrow> term)
\<Rightarrow> (Random.seed \<Rightarrow> ('b \<times> (unit \<Rightarrow> term)) \<times> Random.seed)
\<Rightarrow> (Random.seed \<Rightarrow> Random.seed \<times> Random.seed)
\<Rightarrow> Random.seed \<Rightarrow> (('a \<Rightarrow> 'b) \<times> (unit \<Rightarrow> term)) \<times> Random.seed"
definition random_fun_lift :: "(Random.seed \<Rightarrow> ('b \<times> (unit \<Rightarrow> term)) \<times> Random.seed)
\<Rightarrow> Random.seed \<Rightarrow> (('a::term_of \<Rightarrow> 'b::typerep) \<times> (unit \<Rightarrow> term)) \<times> Random.seed"
where
"random_fun_lift f =
random_fun_aux TYPEREP('a) TYPEREP('b) (op =) Code_Evaluation.term_of f Random.split_seed"
instantiation "fun" :: ("{equal, term_of}", random) random
begin
definition
random_fun :: "natural \<Rightarrow> Random.seed \<Rightarrow> (('a \<Rightarrow> 'b) \<times> (unit \<Rightarrow> term)) \<times> Random.seed"
where "random i = random_fun_lift (random i)"
instance ..
end
text \<open>Towards type copies and datatypes\<close>
definition collapse :: "('a \<Rightarrow> ('a \<Rightarrow> 'b \<times> 'a) \<times> 'a) \<Rightarrow> 'a \<Rightarrow> 'b \<times> 'a"
where "collapse f = (f \<circ>\<rightarrow> id)"
definition beyond :: "natural \<Rightarrow> natural \<Rightarrow> natural"
where "beyond k l = (if l > k then l else 0)"
lemma beyond_zero: "beyond k 0 = 0"
by (simp add: beyond_def)
definition (in term_syntax) [code_unfold]:
"valterm_emptyset = Code_Evaluation.valtermify ({} :: ('a :: typerep) set)"
definition (in term_syntax) [code_unfold]:
"valtermify_insert x s = Code_Evaluation.valtermify insert {\<cdot>} (x :: ('a :: typerep * _)) {\<cdot>} s"
instantiation set :: (random) random
begin
fun random_aux_set
where
"random_aux_set 0 j = collapse (Random.select_weight [(1, Pair valterm_emptyset)])"
| "random_aux_set (Code_Numeral.Suc i) j =
collapse (Random.select_weight
[(1, Pair valterm_emptyset),
(Code_Numeral.Suc i,
random j \<circ>\<rightarrow> (%x. random_aux_set i j \<circ>\<rightarrow> (%s. Pair (valtermify_insert x s))))])"
definition "random_set i = random_aux_set i i"
instance ..
end
lemma random_aux_rec:
fixes random_aux :: "natural \<Rightarrow> 'a"
assumes "random_aux 0 = rhs 0"
and "\<And>k. random_aux (Code_Numeral.Suc k) = rhs (Code_Numeral.Suc k)"
shows "random_aux k = rhs k"
using assms by (rule natural.induct)
subsection \<open>Deriving random generators for datatypes\<close>
ML_file "Tools/Quickcheck/quickcheck_common.ML"
ML_file "Tools/Quickcheck/random_generators.ML"
subsection \<open>Code setup\<close>
code_printing
constant random_fun_aux \<rightharpoonup> (Quickcheck) "Random'_Generators.random'_fun"
\<comment> \<open>With enough criminal energy this can be abused to derive @{prop False};
for this reason we use a distinguished target \<open>Quickcheck\<close>
not spoiling the regular trusted code generation\<close>
code_reserved Quickcheck Random_Generators
no_notation fcomp (infixl "\<circ>>" 60)
no_notation scomp (infixl "\<circ>\<rightarrow>" 60)
hide_const (open) catch_match random collapse beyond random_fun_aux random_fun_lift
hide_fact (open) collapse_def beyond_def random_fun_lift_def
end
|
import Aesop
axiom Ring : Type
axiom Scheme : Type
axiom affine (X : Scheme) : Prop
axiom quasi_compact (X : Scheme) : Prop
@[aesop 99%] axiom Spec : Ring → Scheme
@[aesop 99%] axiom qc_of_af {X : Scheme} (h : affine X) : quasi_compact X
@[aesop 99%] axiom ZZ : Ring
@[aesop 99%] axiom spec_affine (R : Ring) : affine (Spec R)
theorem thm : ∃ (X : Scheme) (h₁ : affine X) (h₂ : quasi_compact X), True := by {
aesop;
}
#print thm
-- example : ∃ (X : Scheme) (h₁ : P X) (h₂ : Q X), True := by {
-- apply Exists.intro (Spec ZZ);
-- apply Exists.intro (spec_affine ZZ);
-- apply Exists.intro (qc_of_af (spec_affine ZZ));
-- apply True.intro;
-- }
|
{-# OPTIONS --without-K --rewriting #-}
{-
Imports everything that is not imported by something else.
This is not supposed to be used anywhere, this is just a simple way to
do `make all'
This file is intentionally named index.agda so that
Agda will generate index.html.
-}
module index where
{- some group theory results -}
import groups.ReducedWord
import groups.ProductRepr
import groups.CoefficientExtensionality
{- homotopy groups of circles -}
import homotopy.LoopSpaceCircle
import homotopy.PinSn
import homotopy.HopfJunior
import homotopy.Hopf
{- cohomology -}
import cohomology.EMModel
import cohomology.Sigma
import cohomology.Coproduct
import cohomology.Torus
-- import cohomology.MayerVietorisExact -- FIXME
{- prop * prop is still a prop -}
import homotopy.PropJoinProp
{- a space with preassigned homotopy groups -}
import homotopy.SpaceFromGroups
{- pushout 3x3 lemma -}
{- These takes lots of time and memory to check. -}
-- import homotopy.3x3.Commutes -- commented out because this does not run on travis.
-- import homotopy.JoinAssoc3x3 -- commented out because this does not run on travis.
{- covering spaces -}
import homotopy.GroupSetsRepresentCovers
import homotopy.AnyUniversalCoverIsPathSet
import homotopy.PathSetIsInitalCover
{- van kampen -}
import homotopy.VanKampen
{- blakers massey -}
import homotopy.BlakersMassey
{- cw complexes -}
import cw.CW
import cw.examples.Examples
-- cellular cohomology groups
import cw.cohomology.CellularChainComplex
-- Eilenberg-Steenred cohomology groups rephrased
import cw.cohomology.ReconstructedCohomologyGroups
-- isomorphisms between the cochains the heads
import cw.cohomology.ReconstructedCochainsIsoCellularCochains
-- There are some unported theorems
-- import Spaces.IntervalProps
-- import Algebra.F2NotCommutative
-- import Spaces.LoopSpaceDecidableWedgeCircles
-- import Homotopy.PullbackIsPullback
-- import Homotopy.PushoutIsPushout
-- import Homotopy.Truncation
-- import Sets.QuotientUP
|
Require Import VST.concurrency.dry_machine.
Require Import VST.concurrency.erased_machine.
Require Import VST.concurrency.threads_lemmas.
Require Import VST.concurrency.permissions.
Require Import VST.concurrency.semantics.
Require Import VST.concurrency.concurrent_machine.
Require Import compcert.common.Globalenvs.
Require Import compcert.lib.Axioms.
From mathcomp.ssreflect Require Import ssreflect ssrbool ssrnat ssrfun eqtype seq fintype finfun.
Set Implicit Arguments.
Import Concur.
Module Type MachinesSig.
Declare Module SEM: Semantics.
Module DryMachine := DryMachineShell SEM.
Module ErasedMachine := ErasedMachineShell SEM.
Module DryConc := CoarseMachine mySchedule DryMachine.
Module FineConc := FineMachine mySchedule DryMachine.
(** SC machine*)
Module SC := FineMachine mySchedule ErasedMachine.
Import DryMachine ThreadPool.
Global Ltac pf_cleanup :=
repeat match goal with
| [H1: invariant ?X, H2: invariant ?X |- _] =>
assert (H1 = H2) by (by eapply proof_irr);
subst H2
| [H1: mem_compatible ?TP ?M, H2: mem_compatible ?TP ?M |- _] =>
assert (H1 = H2) by (by eapply proof_irr);
subst H2
| [H1: is_true (leq ?X ?Y), H2: is_true (leq ?X ?Y) |- _] =>
assert (H1 = H2) by (by eapply proof_irr); subst H2
| [H1: containsThread ?TP ?M, H2: containsThread ?TP ?M |- _] =>
assert (H1 = H2) by (by eapply proof_irr); subst H2
| [H1: containsThread ?TP ?M,
H2: containsThread (@updThreadC _ ?TP _ _) ?M |- _] =>
apply cntUpdateC' in H2;
assert (H1 = H2) by (by eapply cnt_irr); subst H2
| [H1: containsThread ?TP ?M,
H2: containsThread (@updThread _ ?TP _ _ _) ?M |- _] =>
apply cntUpdate' in H2;
assert (H1 = H2) by (by eapply cnt_irr); subst H2
end.
End MachinesSig.
Module Type AsmContext (SEM : Semantics)
(Machines : MachinesSig with Module SEM := SEM).
Import Machines.
Parameter initU: mySchedule.schedule.
Parameter init_mem : option Memory.Mem.mem.
Definition init_perm :=
match init_mem with
| Some m => Some (getCurPerm m, empty_map)
| None => None
end.
Parameter the_ge : SEM.G.
Definition coarse_semantics:=
DryConc.MachineSemantics initU init_perm.
Definition fine_semantics:=
FineConc.MachineSemantics initU init_perm.
Definition sc_semantics :=
SC.MachineSemantics initU None.
Definition tpc_init f arg := initial_core coarse_semantics 0 the_ge f arg.
Definition tpf_init f arg := initial_core fine_semantics 0 the_ge f arg.
Definition sc_init f arg := initial_core sc_semantics 0 the_ge f arg.
End AsmContext.
|
#ifndef __nark_util_throw_hpp__
#define __nark_util_throw_hpp__
#include "autofree.hpp"
#include <boost/current_function.hpp>
#include <stdio.h>
#include <errno.h>
#ifdef _MSC_VER
#define NARK_THROW(Except, fmt, ...) \
do { \
char __buf[4096]; \
int __len = _snprintf(__buf, sizeof(__buf), \
"%s:%d: %s: errno=%d : " fmt, \
__FILE__, __LINE__, BOOST_CURRENT_FUNCTION, errno, \
##__VA_ARGS__); \
fprintf(stderr, "%s\n", __buf); \
std::string strMsg(__buf, __len); \
throw Except(strMsg); \
} while (0)
#else
#define NARK_THROW(Except, fmt, ...) \
do { \
nark::AutoFree<char> __msg; \
int __len = asprintf(&__msg.p, "%s:%d: %s: " fmt, \
__FILE__, __LINE__, BOOST_CURRENT_FUNCTION, \
##__VA_ARGS__); \
fprintf(stderr, "%s\n", __msg.p); \
std::string strMsg(__msg.p, __len); \
throw Except(strMsg); \
} while (0)
#endif
#define THROW_STD(Except, fmt, ...) \
NARK_THROW(std::Except, fmt, ##__VA_ARGS__)
#endif // __nark_util_throw_hpp__
|
lemmas prime_dvd_power_nat = prime_dvd_power[where ?'a = nat] |
/-
Copyright (c) 2021 Patrick Massot. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Patrick Massot
-/
import topology.algebra.nonarchimedean.bases
import topology.algebra.uniform_filter_basis
import ring_theory.valuation.basic
/-!
# The topology on a valued ring
In this file, we define the non archimedean topology induced by a valuation on a ring.
The main definition is a `valued` type class which equips a ring with a valuation taking
values in a group with zero. Other instances are then deduced from this.
-/
open_locale classical topology uniformity
open set valuation
noncomputable theory
universes v u
variables {R : Type u} [ring R] {Γ₀ : Type v} [linear_ordered_comm_group_with_zero Γ₀]
namespace valuation
variables (v : valuation R Γ₀)
/-- The basis of open subgroups for the topology on a ring determined by a valuation. -/
lemma subgroups_basis :
ring_subgroups_basis (λ γ : Γ₀ˣ, (v.lt_add_subgroup γ : add_subgroup R)) :=
{ inter := begin
rintros γ₀ γ₁,
use min γ₀ γ₁,
simp [valuation.lt_add_subgroup] ; tauto
end,
mul := begin
rintros γ,
cases exists_square_le γ with γ₀ h,
use γ₀,
rintro - ⟨r, s, r_in, s_in, rfl⟩,
calc (v (r*s) : Γ₀) = v r * v s : valuation.map_mul _ _ _
... < γ₀*γ₀ : mul_lt_mul₀ r_in s_in
... ≤ γ : by exact_mod_cast h
end,
left_mul := begin
rintros x γ,
rcases group_with_zero.eq_zero_or_unit (v x) with Hx | ⟨γx, Hx⟩,
{ use (1 : Γ₀ˣ),
rintros y (y_in : (v y : Γ₀) < 1),
change v (x * y) < _,
rw [valuation.map_mul, Hx, zero_mul],
exact units.zero_lt γ },
{ simp only [image_subset_iff, set_of_subset_set_of, preimage_set_of_eq, valuation.map_mul],
use γx⁻¹*γ,
rintros y (vy_lt : v y < ↑(γx⁻¹ * γ)),
change (v (x * y) : Γ₀) < γ,
rw [valuation.map_mul, Hx, mul_comm],
rw [units.coe_mul, mul_comm] at vy_lt,
simpa using mul_inv_lt_of_lt_mul₀ vy_lt }
end,
right_mul := begin
rintros x γ,
rcases group_with_zero.eq_zero_or_unit (v x) with Hx | ⟨γx, Hx⟩,
{ use 1,
rintros y (y_in : (v y : Γ₀) < 1),
change v (y * x) < _,
rw [valuation.map_mul, Hx, mul_zero],
exact units.zero_lt γ },
{ use γx⁻¹*γ,
rintros y (vy_lt : v y < ↑(γx⁻¹ * γ)),
change (v (y * x) : Γ₀) < γ,
rw [valuation.map_mul, Hx],
rw [units.coe_mul, mul_comm] at vy_lt,
simpa using mul_inv_lt_of_lt_mul₀ vy_lt }
end }
end valuation
/-- A valued ring is a ring that comes equipped with a distinguished valuation. The class `valued`
is designed for the situation that there is a canonical valuation on the ring.
TODO: show that there always exists an equivalent valuation taking values in a type belonging to
the same universe as the ring.
See Note [forgetful inheritance] for why we extend `uniform_space`, `uniform_add_group`. -/
class valued (R : Type u) [ring R] (Γ₀ : out_param (Type v))
[linear_ordered_comm_group_with_zero Γ₀] extends uniform_space R, uniform_add_group R :=
(v : valuation R Γ₀)
(is_topological_valuation : ∀ s, s ∈ 𝓝 (0 : R) ↔ ∃ (γ : Γ₀ˣ), { x : R | v x < γ } ⊆ s)
/-- The `dangerous_instance` linter does not check whether the metavariables only occur in
arguments marked with `out_param`, so in this instance it gives a false positive. -/
attribute [nolint dangerous_instance] valued.to_uniform_space
namespace valued
/-- Alternative `valued` constructor for use when there is no preferred `uniform_space`
structure. -/
def mk' (v : valuation R Γ₀) : valued R Γ₀ :=
{ v := v,
to_uniform_space := @topological_add_group.to_uniform_space R _ v.subgroups_basis.topology _,
to_uniform_add_group := @topological_add_comm_group_is_uniform _ _ v.subgroups_basis.topology _,
is_topological_valuation :=
begin
letI := @topological_add_group.to_uniform_space R _ v.subgroups_basis.topology _,
intros s,
rw filter.has_basis_iff.mp v.subgroups_basis.has_basis_nhds_zero s,
exact exists_congr (λ γ, by simpa),
end }
variables (R Γ₀) [_i : valued R Γ₀]
include _i
lemma has_basis_nhds_zero :
(𝓝 (0 : R)).has_basis (λ _, true) (λ (γ : Γ₀ˣ), { x | v x < (γ : Γ₀) }) :=
by simp [filter.has_basis_iff, is_topological_valuation]
lemma has_basis_uniformity :
(𝓤 R).has_basis (λ _, true) (λ (γ : Γ₀ˣ), { p : R × R | v (p.2 - p.1) < (γ : Γ₀) }) :=
begin
rw uniformity_eq_comap_nhds_zero,
exact (has_basis_nhds_zero R Γ₀).comap _,
end
lemma to_uniform_space_eq :
to_uniform_space = @topological_add_group.to_uniform_space R _ v.subgroups_basis.topology _ :=
uniform_space_eq
((has_basis_uniformity R Γ₀).eq_of_same_basis $ v.subgroups_basis.has_basis_nhds_zero.comap _)
variables {R Γ₀}
lemma mem_nhds {s : set R} {x : R} :
(s ∈ 𝓝 x) ↔ ∃ (γ : Γ₀ˣ), {y | (v (y - x) : Γ₀) < γ } ⊆ s :=
by simp only [← nhds_translation_add_neg x, ← sub_eq_add_neg, preimage_set_of_eq, exists_true_left,
((has_basis_nhds_zero R Γ₀).comap (λ y, y - x)).mem_iff]
lemma mem_nhds_zero {s : set R} :
(s ∈ 𝓝 (0 : R)) ↔ ∃ γ : Γ₀ˣ, {x | v x < (γ : Γ₀) } ⊆ s :=
by simp only [mem_nhds, sub_zero]
lemma loc_const {x : R} (h : (v x : Γ₀) ≠ 0) : {y : R | v y = v x} ∈ 𝓝 x :=
begin
rw mem_nhds,
rcases units.exists_iff_ne_zero.mpr h with ⟨γ, hx⟩,
use γ,
rw hx,
intros y y_in,
exact valuation.map_eq_of_sub_lt _ y_in
end
@[priority 100]
instance : topological_ring R :=
(to_uniform_space_eq R Γ₀).symm ▸ v.subgroups_basis.to_ring_filter_basis.is_topological_ring
lemma cauchy_iff {F : filter R} :
cauchy F ↔ F.ne_bot ∧ ∀ γ : Γ₀ˣ, ∃ M ∈ F, ∀ x y ∈ M, (v (y - x) : Γ₀) < γ :=
begin
rw [to_uniform_space_eq, add_group_filter_basis.cauchy_iff],
apply and_congr iff.rfl,
simp_rw valued.v.subgroups_basis.mem_add_group_filter_basis_iff,
split,
{ intros h γ,
exact h _ (valued.v.subgroups_basis.mem_add_group_filter_basis _) },
{ rintros h - ⟨γ, rfl⟩,
exact h γ }
end
end valued
|
import combinatorics.simple_graph.coloring
--OUTPUT 1
theorem bipartite_graph_two_colorable (G : Type*) [graph G] : two_colorable G ↔ bipartite G :=
begin
-- Let $G$ be a 2-colorable graph, which means we can color every vertex either red or blue, and no edge will have both endpoints colored the same color.
assume h1 : two_colorable G,
-- Let $A$ denote the subset of vertices colored red, and let $B$ denote the subset of vertices colored blue.
have h2 : ∃ A B : set G, ∀ v : G, v ∈ A ∨ v ∈ B, from by auto [h1],
-- Since all vertices of $A$ are red, there are no edges within $A$, and similarly for $B$.
have h3 : ∀ a b : G, a ∈ A ∧ b ∈ A → ¬ edge a b, from by auto [h1],
have h4 : ∀ a b : G, a ∈ B ∧ b ∈ B → ¬ edge a b, from by auto [h1],
-- This implies that every edge has one endpoint in $A$ and the other in $B$, which means $G$ is bipartite.
have h5 : ∀ a b : G, edge a b → (a ∈ A ∧ b ∈ B) ∨ (a ∈ B ∧ b ∈ A), from by auto [h1],
show bipartite G, from by auto [h2, h3, h4, h5],
-- Conversely, suppose $G$ is bipartite, that is, we can partition the vertices into two subsets $V_{1}, V_{2}$ every edge has one endpoint in $V_{1}$ and the other in $V_{2}$.
assume h1 : bipartite G,
-- Then coloring every vertex of $V_{1}$ red and every vertex of $V_{2}$ blue yields a valid coloring, so $G$ is 2-colorable.
have h2 : ∃ A B : set G, ∀ v : G, v ∈ A ∨ v ∈ B, from by auto [h1],
have h3 : ∀ a b : G, a ∈ A ∧ b ∈ A → ¬ edge a b, from by auto [h1],
have h4 : ∀ a b : G, a ∈ B ∧ b ∈ B → ¬ edge a b, from by auto [h1],
have h5 : ∀ a b : G, edge a b → (a ∈ A ∧ b ∈ B) ∨ (a ∈ B ∧ b ∈ A), from by auto [h1],
show two_colorable G, from by auto [h2, h3, h4, h5],
end
/- FEW SHOT PROMPTS TO CODEX(START)
/--`theorem`
Power Set is Closed under Intersection
Let $S$ be a set.
Let $\powerset S$ be the power set of $S$.
Then:
:$\forall A, B \in \powerset S: A \cap B \in \powerset S$
`proof`
Let $A, B \in \powerset S$.
Then by the definition of power set, $A \subseteq S$ and $B \subseteq S$.
From Intersection is Subset we have that $A \cap B \subseteq A$.
It follows from Subset Relation is Transitive that $A \cap B \subseteq S$.
Thus $A \cap B \in \powerset S$ and closure is proved.
{{qed}}
-/
theorem power_set_intersection_closed {α : Type*} (S : set α) : ∀ A B ∈ 𝒫 S, (A ∩ B) ∈ 𝒫 S :=
begin
-- $A$ and $B$ are sets. $A$ and $B$ belong to power set of $S$
assume (A : set α) (hA : A ∈ 𝒫 S) (B : set α) (hB : B ∈ 𝒫 S),
-- Then $A ⊆ S$ and $B ⊆ S$, by power set definition
have h1 : (A ⊆ S) ∧ (B ⊆ S), from by auto [set.subset_of_mem_powerset, set.subset_of_mem_powerset],
-- Then $(A ∩ B) ⊆ A$, by intersection of set is a subset
have h2 : (A ∩ B) ⊆ A, from by auto [set.inter_subset_left],
-- Then $(A ∩ B) ⊆ S$, by subset relation is transitive
have h3 : (A ∩ B) ⊆ S, from by auto [set.subset.trans],
-- Hence $(A ∩ B) ∈ 𝒫 S$, by power set definition
show (A ∩ B) ∈ 𝒫 S, from by auto [set.mem_powerset],
end
/--`theorem`
Square of Sum
:$\forall x, y \in \R: \paren {x + y}^2 = x^2 + 2 x y + y^2$
`proof`
Follows from the distribution of multiplication over addition:
{{begin-eqn}}
{{eqn | l = \left({x + y}\right)^2
| r = \left({x + y}\right) \cdot \left({x + y}\right)
}}
{{eqn | r = x \cdot \left({x + y}\right) + y \cdot \left({x + y}\right)
| c = Real Multiplication Distributes over Addition
}}
{{eqn | r = x \cdot x + x \cdot y + y \cdot x + y \cdot y
| c = Real Multiplication Distributes over Addition
}}
{{eqn | r = x^2 + 2xy + y^2
| c =
}}
{{end-eqn}}
{{qed}}
-/
theorem square_of_sum (x y : ℝ) : (x + y)^2 = (x^2 + 2*x*y + y^2) :=
begin
-- expand the power
calc (x + y)^2 = (x+y)*(x+y) : by auto [sq]
-- distributive property of multiplication over addition gives:
... = x*(x+y) + y*(x+y) : by auto [add_mul]
-- applying the above property further gives:
... = x*x + x*y + y*x + y*y : by auto [mul_comm, add_mul] using [ring]
-- rearranging the terms using commutativity and adding gives:
... = x^2 + 2*x*y + y^2 : by auto [sq, mul_comm] using [ring]
end
/--`theorem`
Identity of Group is Unique
Let $\struct {G, \circ}$ be a group. Then there is a unique identity element $e \in G$.
`proof`
From Group has Latin Square Property, there exists a unique $x \in G$ such that:
:$a x = b$
and there exists a unique $y \in G$ such that:
:$y a = b$
Setting $b = a$, this becomes:
There exists a unique $x \in G$ such that:
:$a x = a$
and there exists a unique $y \in G$ such that:
:$y a = a$
These $x$ and $y$ are both $e$, by definition of identity element.
{{qed}}
-/
theorem group_identity_unique {G : Type*} [group G] : ∃! e : G, ∀ a : G, e * a = a ∧ a * e = a :=
begin
-- Group has Latin Square Property
have h1 : ∀ a b : G, ∃! x : G, a * x = b, from by auto using [use (a⁻¹ * b)],
have h2 : ∀ a b : G, ∃! y : G, y * a = b, from by auto using [use b * a⁻¹],
-- Setting $b = a$, this becomes:
have h3 : ∀ a : G, ∃! x : G, a * x = a, from by auto [h1],
have h4 : ∀ a : G, ∃! y : G, y * a = a, from by auto [h2],
-- These $x$ and $y$ are both $(1 : G)$, by definition of identity element
have h5 : ∀ a : G, classical.some (h3 a).exists = (1 : G), from by auto [exists_unique.unique, h3, classical.some_spec, exists_unique.exists, mul_one],
have h6 : ∀ a : G, classical.some (h4 a).exists = (1 : G), from by auto [exists_unique.unique, h4, classical.some_spec, exists_unique.exists, one_mul],
show ∃! e : G, ∀ a : G, e * a = a ∧ a * e = a, from by auto [h3, h4, exists_unique.unique, classical.some_spec, exists_unique.exists] using [use (1 : G)],
end
/--`theorem`
Bipartite Graph is two colorable
Let $G$ be a graph. Then $G$ is 2-colorable if and only if $G$ is bipartite.
`proof`
Let $G$ be a 2-colorable graph, which means we can color every vertex either red or blue, and no edge will have both endpoints colored the same color. Let $A$ denote the subset of vertices colored red, and let $B$ denote the subset of vertices colored blue. Since all vertices of $A$ are red, there are no edges within $A$, and similarly for $B$. This implies that every edge has one endpoint in $A$ and the other in $B$, which means $G$ is bipartite.
Conversely, suppose $G$ is bipartite, that is, we can partition the vertices into two subsets $V_{1}, V_{2}$ every edge has one endpoint in $V_{1}$ and the other in $V_{2}$. Then coloring every vertex of $V_{1}$ red and every vertex of $V_{2}$ blue yields a valid coloring, so $G$ is 2-colorable.
QED
-/
theorem
FEW SHOT PROMPTS TO CODEX(END)-/
|
//! Imu (re)publisher node for BNO055 measurements.
/*!
* This node republishes the compact (excluding covariances) Bno055Measurements message from the Teensy 3.2 Adafruit
* BNO055 rosserial node as standard sensor_msgs/Imu messages, taking into account the calibration status of the
* Adafruit BNO055 sensor.
*
* \file
*
* \author Maarten De Munck <[email protected]>
*/
#include "imu_publisher.hpp"
#include <boost/program_options.hpp>
#include <ros/ros.h>
int main (int argc, char ** argv) {
// Define namespace aliases.
namespace program_options = boost::program_options;
// Get command line parameters.
program_options::options_description description("Recognised options");
description.add_options()
("help", "display this help and exit")
("frame-id", program_options::value<std::string>()->default_value("imu_link"),
"frame_id to use on the published sensor_msgs/Imu messages (default: 'imu_link')");
program_options::variables_map variables;
program_options::store(program_options::parse_command_line(argc, argv, description), variables);
program_options::notify(variables);
// Display help message.
if (variables.count("help")) {
std::cout << description << std::endl;
return EXIT_FAILURE;
}
// Initialise Imu publisher node.
ros::init(argc, argv, "adafruit_bno055_imu_publisher_node");
ros::NodeHandle node_handle;
// And GO!
{
earth_rover_firmware::ImuPublisher imu_publisher{variables["frame-id"].as<std::string>()};
ros::spin();
}
// Take everything down.
return EXIT_SUCCESS;
}
|
With the war going against the Austrians by the end of 1918 , Zrínyi was prepared to be transferred to the new State of Slovenes , Croats and Serbs . On 10 November 1918 — just one day before the end of the war , navy officers sailed the battleship out of Pola ( Pula ) and eventually surrendered to a squadron of American submarine chasers . Following the handover to the United States Navy , she was briefly designated USS Zrínyi . In the Treaty of Saint @-@ Germain @-@ en @-@ Laye , the transfer was not recognized ; instead , Zrínyi was given to Italy and broken up for scrap .
|
#' Defunct functions
#'
#' \Sexpr[results=rd, stage=render]{lifecycle::badge("defunct")}
#' Executing these functions will tell you which function replaces them.
#'
#' @keywords internal
#' @name defunct
NULL
#' @export
#' @rdname defunct
id <- function(.variables, drop = FALSE) {
lifecycle::deprecate_stop("0.5.0", "id()", "vctrs::vec_group_id()")
}
#' @export
#' @rdname defunct
failwith <- function(default = NULL, f, quiet = FALSE) {
lifecycle::deprecate_stop("0.7.0", "failwith()", "purrr::possibly()")
}
|
State Before: V : Type u
inst✝² : Fintype V
inst✝¹ : DecidableEq V
G : SimpleGraph V
inst✝ : DecidableRel G.Adj
n k ℓ μ : ℕ
v w : V
h : Adj G v w
⊢ (neighborFinset G vᶜ ∩ neighborFinset G wᶜ) \ ({w} ∪ {v}) = neighborFinset G vᶜ ∩ neighborFinset G wᶜ State After: case a
V : Type u
inst✝² : Fintype V
inst✝¹ : DecidableEq V
G : SimpleGraph V
inst✝ : DecidableRel G.Adj
n k ℓ μ : ℕ
v w : V
h : Adj G v w
a✝ : V
⊢ a✝ ∈ (neighborFinset G vᶜ ∩ neighborFinset G wᶜ) \ ({w} ∪ {v}) ↔ a✝ ∈ neighborFinset G vᶜ ∩ neighborFinset G wᶜ Tactic: ext State Before: case a
V : Type u
inst✝² : Fintype V
inst✝¹ : DecidableEq V
G : SimpleGraph V
inst✝ : DecidableRel G.Adj
n k ℓ μ : ℕ
v w : V
h : Adj G v w
a✝ : V
⊢ a✝ ∈ (neighborFinset G vᶜ ∩ neighborFinset G wᶜ) \ ({w} ∪ {v}) ↔ a✝ ∈ neighborFinset G vᶜ ∩ neighborFinset G wᶜ State After: case a
V : Type u
inst✝² : Fintype V
inst✝¹ : DecidableEq V
G : SimpleGraph V
inst✝ : DecidableRel G.Adj
n k ℓ μ : ℕ
v w : V
h : Adj G v w
a✝ : V
⊢ ¬Adj G v a✝ → ¬Adj G w a✝ → ¬(a✝ = w ∨ a✝ = v) Tactic: simp only [and_imp, mem_union, mem_sdiff, mem_compl, and_iff_left_iff_imp, mem_neighborFinset,
mem_inter, mem_singleton] State Before: case a
V : Type u
inst✝² : Fintype V
inst✝¹ : DecidableEq V
G : SimpleGraph V
inst✝ : DecidableRel G.Adj
n k ℓ μ : ℕ
v w : V
h : Adj G v w
a✝ : V
⊢ ¬Adj G v a✝ → ¬Adj G w a✝ → ¬(a✝ = w ∨ a✝ = v) State After: case a.inl
V : Type u
inst✝² : Fintype V
inst✝¹ : DecidableEq V
G : SimpleGraph V
inst✝ : DecidableRel G.Adj
n k ℓ μ : ℕ
v a✝ : V
hnv : ¬Adj G v a✝
h : Adj G v a✝
hnw : ¬Adj G a✝ a✝
⊢ False
case a.inr
V : Type u
inst✝² : Fintype V
inst✝¹ : DecidableEq V
G : SimpleGraph V
inst✝ : DecidableRel G.Adj
n k ℓ μ : ℕ
w a✝ : V
hnw : ¬Adj G w a✝
h : Adj G a✝ w
hnv : ¬Adj G a✝ a✝
⊢ False Tactic: rintro hnv hnw (rfl | rfl) State Before: case a.inl
V : Type u
inst✝² : Fintype V
inst✝¹ : DecidableEq V
G : SimpleGraph V
inst✝ : DecidableRel G.Adj
n k ℓ μ : ℕ
v a✝ : V
hnv : ¬Adj G v a✝
h : Adj G v a✝
hnw : ¬Adj G a✝ a✝
⊢ False State After: no goals Tactic: exact hnv h State Before: case a.inr
V : Type u
inst✝² : Fintype V
inst✝¹ : DecidableEq V
G : SimpleGraph V
inst✝ : DecidableRel G.Adj
n k ℓ μ : ℕ
w a✝ : V
hnw : ¬Adj G w a✝
h : Adj G a✝ w
hnv : ¬Adj G a✝ a✝
⊢ False State After: case a.inr
V : Type u
inst✝² : Fintype V
inst✝¹ : DecidableEq V
G : SimpleGraph V
inst✝ : DecidableRel G.Adj
n k ℓ μ : ℕ
w a✝ : V
hnw : ¬Adj G w a✝
h : Adj G a✝ w
hnv : ¬Adj G a✝ a✝
⊢ Adj G w a✝ Tactic: apply hnw State Before: case a.inr
V : Type u
inst✝² : Fintype V
inst✝¹ : DecidableEq V
G : SimpleGraph V
inst✝ : DecidableRel G.Adj
n k ℓ μ : ℕ
w a✝ : V
hnw : ¬Adj G w a✝
h : Adj G a✝ w
hnv : ¬Adj G a✝ a✝
⊢ Adj G w a✝ State After: no goals Tactic: rwa [adj_comm] |
// This file is part of snark, a generic and flexible library for robotics research
// Copyright (c) 2011 The University of Sydney
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
// 1. Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// 2. Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// 3. Neither the name of the University of Sydney nor the
// names of its contributors may be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// NO EXPRESS OR IMPLIED LICENSES TO ANY PARTY'S PATENT RIGHTS ARE
// GRANTED BY THIS LICENSE. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT
// HOLDERS AND CONTRIBUTORS \"AS IS\" AND ANY EXPRESS OR IMPLIED
// WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
// MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
// BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
// WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
// OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
// IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#include <cmath>
#include <boost/array.hpp>
#include "calculator.h"
#include "packet.h"
namespace snark { namespace velodyne { namespace puck {
static boost::array< double, 16 > elevation_ = {{ -15.0 * M_PI / 180
, 1 * M_PI / 180
, -13 * M_PI / 180
, 3 * M_PI / 180
, -11 * M_PI / 180
, 5 * M_PI / 180
, -9 * M_PI / 180
, 7 * M_PI / 180
, -7 * M_PI / 180
, 9 * M_PI / 180
, -5 * M_PI / 180
, 11 * M_PI / 180
, -3 * M_PI / 180
, 13 * M_PI / 180
, -1 * M_PI / 180
, 15 * M_PI / 180 }};
struct laser
{
double sin;
double cos;
laser() {}
laser( unsigned int index ) : sin( std::sin( elevation_[ index ] ) ), cos( std::cos( elevation_[ index ] ) ) {}
};
typedef boost::array< laser, puck::packet::number_of_lasers > lasers_t;
static lasers_t init_lasers()
{
lasers_t lasers;
for( unsigned int j = 0; j < puck::packet::number_of_lasers; ++j ) { lasers[j] = laser( j ); }
return lasers;
}
static lasers_t lasers = init_lasers();
std::pair< ::Eigen::Vector3d, ::Eigen::Vector3d > calculator::ray( unsigned int laser, double range, double angle ) const { return std::make_pair( ::Eigen::Vector3d::Zero(), point( laser, range, angle ) ); }
::Eigen::Vector3d calculator::point( unsigned int laser, double range, double angle ) const
{
// todo: once puck/packet.cpp is fixed, use the commented line below
// return ::Eigen::Vector3d( range * lasers[laser].cos * std::cos( angle ), range * lasers[laser].cos * std::sin( angle ), range * lasers[laser].sin );
return ::Eigen::Vector3d( range * lasers[laser].cos * std::sin( angle ), range * lasers[laser].cos * std::cos( angle ), range * lasers[laser].sin );
}
double calculator::range( unsigned int, double range ) const { return range; }
// todo! super quick and dirty; by right the places to fix:
// - puck/packet.cpp: fiddly part: fix azimuth and make sure step works correctly
// - puck/calculator.cpp: easy part: simply swap sin and cos in calculator::point()
// - scan_tick.cpp: packet_angle_( puck::packet ): hardcoded offset
// - if time permits, see why there is such a 90-degree discrepancy in puck spec
// double calculator::azimuth( unsigned int, double azimuth ) const { return azimuth; }
double calculator::azimuth( unsigned int, double azimuth ) const { return M_PI / 2 - azimuth; }
double calculator::intensity( unsigned int, unsigned char intensity, double ) const { return intensity; }
} } } // namespace snark { namespace velodyne { namespace puck {
|
lemma pow_one (a : mynat) : a ^ (1 : mynat) = a :=
begin
rwa [one_eq_succ_zero, pow_succ, pow_zero],
simp,
end
|
module params
!DEFAULT VALUES---------------------------------------------------------------
!Iterations to run
integer :: NSTEPS=10000
integer :: ISTEPS=2000
integer :: VSTEPS=50
!Resolution
integer :: NX = 64
integer :: NY = 64
integer :: NZ = 64
double precision :: DSPACE = 0.2d0, DTSIZE = 0.01d0
!Dump frequency - Wavefunction - Misc Utils
integer :: dumpwf = 100, dumputil = 100
!GPE Type - 0 Natural Units - 1 Hamonic Oscillator Units
integer :: RHSType = 1
double precision :: harm_osc_C = 300.0d0
double precision :: harm_osc_mu = 10.136d0
complex*16 :: GAMMAC = 0.0d0
logical :: rtNorm = .false.
!Boundary Conditions - 0 reflective - 1 periodic
integer :: BCX = 0
integer :: BCY = 0
integer :: BCZ = 0
!Noise Amplitude - 0.001d0 works well
integer :: noiseamp = 0.000d0
!Flow Speed in X Dir - Start
integer :: VOBS = 0
double precision :: VOBSCALE = 100.0
double precision :: DVDT = 0.0d0
double precision :: VTVTIME = 200.0d0
!Rotation term in GPE
double precision :: OMEGA = 0.0d0
!Potential types - -1 none - 0 object - 3 afm-img
logical :: enablePot = .true.
logical :: enableTrap = .true.
integer :: potType = -1
integer :: trapType = 0 !traptype 0: harmonic, 1: ring
!Enable if you need to constantly recalculate the potential
integer :: potRep = 0
!Object properties
double precision :: RRX=2.0d0
double precision :: RRY=2.0d0
double precision :: RRZ=2.0d0
double precision :: OBJXDASH=0.0d0
double precision :: OBJYDASH=0.0d0
double precision :: OBJZDASH=0.0d0
double precision :: OBJHEIGHT=0.0d0
!Trap
double precision :: TXDASH=0.0d0
double precision :: TYDASH=0.0d0
double precision :: TZDASH=0.0d0
double precision :: TXSCALE = 1.0d0
double precision :: TYSCALE = 1.0d0
double precision :: TZSCALE = 1.0d0
double precision :: TR0 = 0.0d0
!Floor
double precision :: FLR = 0.0d0
!AFM-IMAGE
character(2048) :: afm_filename
integer :: afmRES = 256
double precision :: afmXScale=1000.0d0
double precision :: afmYscale=1000.0d0
double precision :: afmZscale=1.0d0
double precision :: TRUNCPARAM = 1.0d0
!pot-image
character(2048) :: pot_filename
!Initial initial condition 0 - default, 1 - Non-equilibrium, 2 - restart
integer :: initialCondType = 0
!Non-equilibrium initial condition
double precision :: ENERV = 0.75
double precision :: NV = 0.75
!initial condition resume
character(2048) :: icr_filename
double precision :: RESUMETIME = 0.0d0!1000.0*250.0*DTSIZE lastdump*dumpwf*dtsize
integer :: RESUMESTEP = 1!1000*250 + 1 lastdump*dumpwf + 1
!GLOBALS----------------------------------------------------------------------
double precision :: VOB
double precision,parameter :: PI = 4.0d0*ATAN(1.0d0)
complex*16 :: DT,EYE = (0.0d0,1.0d0)
double precision :: NORM,OLDNORM = 1.0d0
complex*16, dimension(:,:,:), ALLOCATABLE :: GRID,OBJPOT
double precision :: TIME, rpKC, rpAMP
double precision :: OBJXVEL = 0.0d0,OBJYVEL = 0.0d0,OBJZVEL = 0.0d0
integer :: INITSSTEP = 1
contains
SUBROUTINE init_params
IMPLICIT NONE
afm_filename = repeat(" ", 2048) !Clear memory so entire string is blank
pot_filename = repeat(" ", 2048) !Clear memory so entire string is blank
icr_filename = repeat(" ", 2048) !Clear memory so entire string is blank
include 'params.in'
ALLOCATE(GRID(-NX/2:NX/2,-NY/2:NY/2,-NZ/2:NZ/2))
ALLOCATE(OBJPOT(-NX/2:NX/2,-NY/2:NY/2,-NZ/2:NZ/2))
END SUBROUTINE
end module
|
(* Title: statecharts/HA/HAOps.thy
Author: Steffen Helke, Software Engineering Group
Copyright 2010 Technische Universitaet Berlin
*)
section \<open>Constructing Hierarchical Automata\<close>
theory HAOps
imports HA
begin
subsection "Constructing a Composition Function for a PseudoHA"
definition
EmptyMap :: "'s set => ('s \<rightharpoonup> (('s,'e,'d)seqauto) set)" where
"EmptyMap S = (\<lambda> a . if a \<in> S then Some {} else None)"
lemma EmptyMap_dom [simp]:
"dom (EmptyMap S) = S"
by (unfold dom_def EmptyMap_def,auto)
lemma EmptyMap_ran [simp]:
"S \<noteq> {} \<Longrightarrow> ran (EmptyMap S) = {{}}"
by (unfold ran_def EmptyMap_def, auto)
lemma EmptyMap_the [simp]:
"x \<in> S \<Longrightarrow> the ((EmptyMap S) x) = {}"
by (unfold ran_def EmptyMap_def, auto)
lemma EmptyMap_ran_override:
"\<lbrakk> S \<noteq> {}; (S \<inter> (dom G)) = {} \<rbrakk> \<Longrightarrow>
ran (G ++ EmptyMap S) = insert {} (ran G)"
apply (subst ran_override)
apply (simp add: Int_commute)
apply simp
done
lemma EmptyMap_Union_ran_override:
"\<lbrakk> S \<noteq> {};
S \<inter> dom G = {} \<rbrakk> \<Longrightarrow>
(Union (ran (G ++ (EmptyMap S)))) = (Union (ran G))"
apply (subst EmptyMap_ran_override)
apply auto
done
lemma EmptyMap_Union_ran_override2:
"\<lbrakk> S \<noteq> {}; S \<inter> dom G1 = {};
dom G1 \<inter> dom G2 = {} \<rbrakk> \<Longrightarrow>
\<Union> (ran (G1 ++ EmptyMap S ++ G2)) = (\<Union> (ran G1 \<union> ran G2))"
apply (unfold Union_eq UNION_eq EmptyMap_def Int_def ran_def)
apply (simp add: map_add_Some_iff)
apply (unfold dom_def)
apply simp
apply (rule equalityI)
apply (rule subsetI)
apply simp
apply fast
apply (rule subsetI)
apply (rename_tac t)
apply simp
apply (erule bexE)
apply (rename_tac U)
apply simp
apply (erule disjE)
apply (erule exE)
apply (rename_tac v)
apply (rule_tac x=U in exI)
apply simp
apply (rule_tac x=v in exI)
apply auto
done
lemma EmptyMap_Root [simp]:
"Root {SA} (EmptyMap (States SA)) = SA"
by (unfold Root_def, auto)
lemma EmptyMap_RootEx [simp]:
"RootEx {SA} (EmptyMap (States SA))"
by (unfold RootEx_def, auto)
lemma EmptyMap_OneAncestor [simp]:
"OneAncestor {SA} (EmptyMap (States SA))"
by (unfold OneAncestor_def, auto)
lemma EmptyMap_NoCycles [simp]:
"NoCycles {SA} (EmptyMap (States SA))"
by (unfold NoCycles_def EmptyMap_def , auto)
lemma EmptyMap_IsCompFun [simp]:
"IsCompFun {SA} (EmptyMap (States SA))"
by (unfold IsCompFun_def, auto)
lemma EmptyMap_hierauto [simp]:
"(D,{SA}, SAEvents SA, EmptyMap (States SA)) \<in> hierauto"
by (unfold hierauto_def HierAuto_def, auto)
subsection "Extending a Composition Function by a SA"
definition
FAddSA :: "[('s \<rightharpoonup> (('s,'e,'d)seqauto) set), 's * ('s,'e,'d)seqauto]
=> ('s \<rightharpoonup> (('s,'e,'d)seqauto) set)"
("(_ [f+]/ _)" [10,11]10) where
"FAddSA G SSA = (let (S,SA) = SSA
in
(if ((S \<in> dom G) \<and> (S \<notin> States SA)) then
(G ++ (Map.empty(S \<mapsto> (insert SA (the (G S)))))
++ EmptyMap (States SA))
else G))"
lemma FAddSA_dom [simp]:
"(S \<notin> (dom (A::('a => ('a,'c,'d)seqauto set option)))) \<Longrightarrow>
((A [f+] (S,(SA::('a,'c,'d)seqauto))) = A)"
by (unfold FAddSA_def Let_def, auto)
lemma FAddSA_States [simp]:
"(S \<in> (States (SA::('a,'c,'d)seqauto))) \<Longrightarrow>
(((A::('a => ('a,'c,'d)seqauto set option)) [f+] (S,SA)) = A)"
by (unfold FAddSA_def Let_def, auto)
lemma FAddSA_dom_insert [simp]:
"\<lbrakk> S \<in> (dom A); S \<notin> States SA \<rbrakk> \<Longrightarrow>
(((A [f+] (S,SA)) S) = Some (insert SA (the (A S))))"
by (unfold FAddSA_def Let_def restrict_def, auto)
lemma FAddSA_States_neq [simp]:
"\<lbrakk> S' \<notin> States (SA::('a,'c,'d)seqauto); S \<noteq> S' \<rbrakk> \<Longrightarrow>
((((A::('a => ('a,'c,'d)seqauto set option)) [f+] (S,SA)) S') = (A S'))"
apply (case_tac "S \<in> dom A")
apply (case_tac "S \<in> States SA")
apply auto
apply (case_tac "S' \<in> dom A")
apply (unfold FAddSA_def Let_def)
apply auto
apply (simp add: dom_None)
done
lemma FAddSA_dom_emptyset [simp]:
"\<lbrakk> S \<in> (dom A); S \<notin> States SA; S' \<in> States (SA::('a,'c,'d)seqauto) \<rbrakk> \<Longrightarrow>
((((A::('a => ('a,'c,'d)seqauto set option))) [f+] (S,SA)) S') = (Some {})"
apply (unfold FAddSA_def Let_def)
apply auto
apply (unfold EmptyMap_def)
apply auto
done
lemma FAddSA_dom_dom_States [simp]:
"\<lbrakk> S \<in> (dom F); S \<notin> States SA \<rbrakk> \<Longrightarrow>
(dom ((F::('a \<rightharpoonup> (('a,'b,'d)seqauto) set)) [f+] (S, SA))) =
((dom F) \<union> (States (SA::('a,'b,'d)seqauto)))"
by (unfold FAddSA_def Let_def, auto)
lemma FAddSA_dom_dom [simp]:
"S \<notin> (dom F) \<Longrightarrow>
(dom ((F::('a \<rightharpoonup> (('a,'b,'d)seqauto) set)) [f+]
(S,(SA::('a,'b,'d)seqauto)))) = (dom F)"
by (unfold FAddSA_def Let_def, auto)
lemma FAddSA_States_dom [simp]:
"S \<in> (States SA) \<Longrightarrow>
(dom ((F::('a \<rightharpoonup> (('a,'b,'d)seqauto) set)) [f+]
(S,(SA::('a,'b,'d)seqauto)))) = (dom F)"
by (unfold FAddSA_def Let_def, auto)
lemma FAddSA_dom_insert_dom_disjunct [simp]:
"\<lbrakk> S \<in> dom G; States SA \<inter> dom G = {} \<rbrakk> \<Longrightarrow> ((G [f+] (S,SA)) S) = Some (insert SA (the (G S)))"
apply (rule FAddSA_dom_insert)
apply auto
done
lemma FAddSA_Union_ran:
"\<lbrakk> S \<in> dom G; (States SA) \<inter> (dom G) = {} \<rbrakk> \<Longrightarrow>
(\<Union> (ran (G [f+] (S,SA)))) = (insert SA (\<Union> (ran G)))"
apply (unfold FAddSA_def Let_def)
apply simp
apply (rule conjI)
prefer 2
apply (rule impI)
apply (unfold Int_def)
apply simp
apply (fold Int_def)
apply (rule impI)
apply (subst EmptyMap_Union_ran_override)
apply auto
done
lemma FAddSA_Union_ran2:
"\<lbrakk> S \<in> dom G1; (States SA) \<inter> (dom G1) = {}; (dom G1 \<inter> dom G2) = {} \<rbrakk> \<Longrightarrow>
(\<Union> (ran ((G1 [f+] (S,SA)) ++ G2))) = (insert SA (\<Union> ((ran G1) \<union> (ran G2))))"
apply (unfold FAddSA_def Let_def)
apply (simp (no_asm_simp))
apply (rule conjI)
apply (rule impI)
apply (subst EmptyMap_Union_ran_override2)
apply simp
apply simp
apply simp
apply fast
apply (subst Union_Un_distrib)
apply (subst Union_ran_override2)
apply auto
done
lemma FAddSA_ran:
"\<lbrakk> \<forall> T \<in> dom G . T \<noteq> S \<longrightarrow> (the (G T) \<inter> the (G S)) = {};
S \<in> dom G; (States SA) \<inter> (dom G) = {} \<rbrakk> \<Longrightarrow>
ran (G [f+] (S,SA)) = insert {} (insert (insert SA (the (G S))) (ran G - {the (G S)}))"
apply (unfold FAddSA_def Let_def)
apply simp
apply (rule conjI)
apply (rule impI)+
prefer 2
apply fast
apply (simp add: EmptyMap_ran_override)
apply (unfold ran_def)
apply auto
apply (rename_tac Y X a xa xb)
apply (erule_tac x=a in allE)
apply simp
apply (erule_tac x=a in allE)
apply simp
done
lemma FAddSA_RootEx_def:
"\<lbrakk> S \<in> dom G; (States SA) \<inter> (dom G) = {} \<rbrakk> \<Longrightarrow>
RootEx F (G [f+] (S,SA)) = (\<exists>! A . A \<in> F \<and> A \<notin> insert SA (\<Union> (ran G)))"
apply (unfold RootEx_def)
apply (simp only: FAddSA_Union_ran Int_commute)
done
lemma FAddSA_RootEx:
"\<lbrakk> \<Union> (ran G) = F - {Root F G};
dom G = \<Union>(States ` F);
(dom G \<inter> States SA) = {}; S \<in> dom G;
RootEx F G \<rbrakk> \<Longrightarrow> RootEx (insert SA F) (G [f+] (S,SA))"
apply (simp add: FAddSA_RootEx_def Int_commute cong: rev_conj_cong)
apply (auto cong: conj_cong)
done
lemma FAddSA_Root_def:
"\<lbrakk> S \<in> dom G; (States SA) \<inter> (dom G) = {} \<rbrakk> \<Longrightarrow>
(Root F (G [f+] (S,SA)) = (@ A . A \<in> F \<and> A \<notin> insert SA (\<Union> (ran G))))"
apply (unfold Root_def)
apply (simp only: FAddSA_Union_ran Int_commute)
done
lemma FAddSA_RootEx_Root:
"\<lbrakk> Union (ran G) = F - {Root F G};
\<Union>(States ` F) = dom G;
(dom G \<inter> States SA) = {}; S \<in> dom G;
RootEx F G \<rbrakk> \<Longrightarrow> (Root (insert SA F) (G [f+] (S,SA))) = (Root F G)"
apply (simp add: FAddSA_Root_def Int_commute cong: rev_conj_cong)
apply (simp cong:conj_cong)
done
lemma FAddSA_OneAncestor:
"\<lbrakk> \<Union> (ran G) = F - {Root F G};
(dom G \<inter> States SA) = {}; S \<in> dom G;
\<Union>(States ` F) = dom G; RootEx F G;
OneAncestor F G \<rbrakk> \<Longrightarrow> OneAncestor (insert SA F) (G [f+] (S,SA))"
apply (subst OneAncestor_def)
apply simp
apply (rule ballI)
apply (rename_tac SAA)
apply (case_tac "SA = SAA")
apply (rule_tac a=S in ex1I)
apply (rule conjI)
apply simp
apply fast
apply (subst FAddSA_dom_insert)
apply simp
apply (simp add:Int_def)
apply simp
apply (rename_tac T)
apply (erule conjE bexE exE disjE)+
apply (rename_tac SAAA)
apply simp
apply (erule conjE)
apply (subst not_not [THEN sym])
apply (rule notI)
apply (case_tac "T \<in> States SAA")
apply blast
apply (drule_tac A=G and S=S and SA=SAA in FAddSA_States_neq)
apply fast
apply simp
apply (case_tac "SAA \<notin> Union (ran G)")
apply (frule ran_dom_the)
prefer 2
apply fast
apply blast
apply simp
apply (erule conjE)
apply (simp add: States_Int_not_mem)
apply (unfold OneAncestor_def)
apply (drule_tac G=G and S=S and SA=SA in FAddSA_RootEx_Root)
apply simp
apply simp
apply simp
apply simp
apply (erule_tac x=SAA in ballE)
prefer 2
apply simp
apply simp
apply (erule conjE bexE ex1E exE disjE)+
apply (rename_tac T SAAA)
apply (rule_tac a=T in ex1I)
apply (rule conjI)
apply fast
apply (case_tac "T = S")
apply simp
apply (case_tac "S \<notin> States SA")
apply simp
apply simp
apply (subst FAddSA_States_neq)
apply blast
apply (rule not_sym)
apply simp
apply simp
apply (rename_tac U)
apply simp
apply (erule conjE bexE)+
apply (rename_tac SAAAA)
apply simp
apply (erule conjE disjE)+
apply (frule FAddSA_dom_emptyset)
prefer 2
apply fast
back
back
apply simp
apply blast
apply simp
apply (erule_tac x=U in allE)
apply (erule impE)
prefer 2
apply simp
apply (rule conjI)
apply fast
apply (case_tac "S \<noteq> U")
apply (subgoal_tac "U \<notin> States SA")
apply (drule_tac A=G in FAddSA_States_neq)
apply fast
apply simp
apply blast
apply (drule_tac A=G and SA=SA in FAddSA_dom_insert)
apply simp
apply blast
apply auto
done
lemma FAddSA_IsCompFun:
"\<lbrakk> (States SA \<inter> (\<Union>(States ` F))) = {};
S \<in> (\<Union>(States ` F));
IsCompFun F G \<rbrakk> \<Longrightarrow> IsCompFun (insert SA F) (G [f+] (S,SA))"
apply (unfold IsCompFun_def)
apply (erule conjE)+
apply (simp add: Int_commute FAddSA_RootEx_Root FAddSA_RootEx FAddSA_OneAncestor FAddSA_NoCycles)
apply (rule conjI)
apply (subst FAddSA_dom_dom_States)
apply simp
apply blast
apply (simp add: Un_commute)
apply (simp add: FAddSA_Union_ran)
apply (case_tac "SA = Root F G")
prefer 2
apply blast
apply (subgoal_tac "States (Root F G) \<subseteq> \<Union>(States ` F)")
apply simp
apply (frule subset_lemma)
apply auto
done
lemma FAddSA_HierAuto:
"\<lbrakk> (States SA \<inter> (\<Union>(States ` F))) = {};
S \<in> (\<Union>(States ` F));
HierAuto D F E G \<rbrakk> \<Longrightarrow> HierAuto D (insert SA F) (E \<union> SAEvents SA) (G [f+] (S,SA))"
apply (unfold HierAuto_def)
apply auto
apply (simp add: MutuallyDistinct_Insert)
apply (rule FAddSA_IsCompFun)
apply auto
done
lemma FAddSA_HierAuto_insert [simp]:
"\<lbrakk> (States SA \<inter> HAStates HA) = {};
S \<in> HAStates HA \<rbrakk> \<Longrightarrow>
HierAuto (HAInitValue HA)
(insert SA (SAs HA))
(HAEvents HA \<union> SAEvents SA)
(CompFun HA [f+] (S,SA))"
apply (unfold HAStates_def)
apply (rule FAddSA_HierAuto)
apply auto
done
subsection "Constructing a PseudoHA"
definition
PseudoHA :: "[('s,'e,'d)seqauto,'d data] => ('s,'e,'d)hierauto" where
"PseudoHA SA D = Abs_hierauto(D,{SA}, SAEvents SA ,EmptyMap (States SA))"
lemma PseudoHA_SAs [simp]:
"SAs (PseudoHA SA D) = {SA}"
by (unfold PseudoHA_def SAs_def, simp add: Abs_hierauto_inverse)
lemma PseudoHA_Events [simp]:
"HAEvents (PseudoHA SA D) = SAEvents SA"
by (unfold PseudoHA_def HAEvents_def, simp add: Abs_hierauto_inverse)
lemma PseudoHA_CompFun [simp]:
"CompFun (PseudoHA SA D) = EmptyMap (States SA)"
by (unfold PseudoHA_def CompFun_def, simp add: Abs_hierauto_inverse)
lemma PseudoHA_HAInitValue [simp]:
"(HAInitValue (PseudoHA SA D)) = D"
by (unfold PseudoHA_def Let_def HAInitValue_def, simp add: Abs_hierauto_inverse)
lemma PseudoHA_CompFun_the [simp]:
"S \<in> States A \<Longrightarrow> (the (CompFun (PseudoHA A D) S)) = {}"
by simp
lemma PseudoHA_CompFun_ran [simp]:
"(ran (CompFun (PseudoHA SA D))) = {{}}"
by auto
lemma PseudoHA_HARoot [simp]:
"(HARoot (PseudoHA SA D)) = SA"
by (unfold HARoot_def, auto)
lemma PseudoHA_HAInitState [simp]:
"HAInitState (PseudoHA A D) = InitState A"
apply (unfold HAInitState_def)
apply simp
done
lemma PseudoHA_HAInitStates [simp]:
"HAInitStates (PseudoHA A D) = {InitState A}"
apply (unfold HAInitStates_def)
apply simp
done
lemma PseudoHA_Chi [simp]:
"S \<in> States A \<Longrightarrow> Chi (PseudoHA A D) S = {}"
apply (unfold Chi_def restrict_def)
apply auto
done
lemma PseudoHA_ChiRel [simp]:
"ChiRel (PseudoHA A D) = {}"
apply (unfold ChiRel_def)
apply simp
done
lemma PseudoHA_InitConf [simp]:
"InitConf (PseudoHA A D) = {InitState A}"
apply (unfold InitConf_def)
apply simp
done
subsection \<open>Extending a HA by a SA (\<open>AddSA\<close>)\<close>
definition
AddSA :: "[('s,'e,'d)hierauto, 's * ('s,'e,'d)seqauto]
=> ('s,'e,'d)hierauto"
("(_ [++]/ _)" [10,11]10) where
"AddSA HA SSA = (let (S,SA) = SSA;
DNew = HAInitValue HA;
FNew = insert SA (SAs HA);
ENew = HAEvents HA \<union> SAEvents SA;
GNew = CompFun HA [f+] (S,SA)
in
Abs_hierauto(DNew,FNew,ENew,GNew))"
definition
AddHA :: "[('s,'e,'d)hierauto, 's * ('s,'e,'d)hierauto]
=> ('s,'e,'d)hierauto"
("(_ [**]/ _)" [10,11]10) where
"AddHA HA1 SHA =
(let (S,HA2) = SHA;
(D1,F1,E1,G1) = Rep_hierauto (HA1 [++] (S,HARoot HA2));
(D2,F2,E2,G2) = Rep_hierauto HA2;
FNew = F1 \<union> F2;
ENew = E1 \<union> E2;
GNew = G1 ++ G2
in
Abs_hierauto(D1,FNew,ENew,GNew))"
lemma AddSA_SAs:
"\<lbrakk> (States SA \<inter> HAStates HA) = {};
S \<in> HAStates HA \<rbrakk> \<Longrightarrow> (SAs (HA [++] (S,SA))) = insert SA (SAs HA)"
apply (unfold Let_def AddSA_def)
apply (subst SAs_def)
apply (simp add: hierauto_def Abs_hierauto_inverse)
done
lemma AddSA_Events:
"\<lbrakk> (States SA \<inter> HAStates HA) = {};
S \<in> HAStates HA \<rbrakk> \<Longrightarrow>
HAEvents (HA [++] (S,SA)) = (HAEvents HA) \<union> (SAEvents SA)"
apply (unfold Let_def AddSA_def)
apply (subst HAEvents_def)
apply (simp add: hierauto_def Abs_hierauto_inverse)
done
lemma AddSA_CompFun:
"\<lbrakk> (States SA \<inter> HAStates HA) = {};
S \<in> HAStates HA \<rbrakk> \<Longrightarrow>
CompFun (HA [++] (S,SA)) = (CompFun HA [f+] (S,SA))"
apply (unfold Let_def AddSA_def)
apply (subst CompFun_def)
apply (simp add: hierauto_def Abs_hierauto_inverse)
done
lemma AddSA_HAStates:
"\<lbrakk> (States SA \<inter> HAStates HA) = {};
S \<in> HAStates HA \<rbrakk> \<Longrightarrow>
HAStates (HA [++] (S,SA)) = (HAStates HA) \<union> (States SA)"
apply (unfold HAStates_def)
apply (subst AddSA_SAs)
apply (unfold HAStates_def)
apply auto
done
lemma AddSA_HAInitValue:
"\<lbrakk> (States SA \<inter> HAStates HA) = {};
S \<in> HAStates HA \<rbrakk> \<Longrightarrow>
(HAInitValue (HA [++] (S,SA))) = (HAInitValue HA)"
apply (unfold Let_def AddSA_def)
apply (subst HAInitValue_def)
apply (simp add: hierauto_def Abs_hierauto_inverse)
done
lemma AddSA_HARoot:
"\<lbrakk> (States SA \<inter> HAStates HA) = {};
S \<in> HAStates HA \<rbrakk> \<Longrightarrow>
(HARoot (HA [++] (S,SA))) = (HARoot HA)"
apply (unfold HARoot_def)
apply (simp add: AddSA_CompFun AddSA_SAs)
apply (subst FAddSA_RootEx_Root)
apply auto
apply (simp only: HAStates_SA_mem)
apply (unfold HAStates_def)
apply fast
done
lemma AddSA_CompFun_the:
"\<lbrakk> (States SA \<inter> HAStates A) = {};
S \<in> HAStates A \<rbrakk> \<Longrightarrow>
(the ((CompFun (A [++] (S,SA))) S)) = insert SA (the ((CompFun A) S))"
by (simp add: AddSA_CompFun)
lemma AddSA_CompFun_the2:
"\<lbrakk> S' \<in> States (SA::('a,'c,'d)seqauto);
(States SA \<inter> HAStates A) = {};
S \<in> HAStates A \<rbrakk> \<Longrightarrow>
the ((CompFun (A [++] (S,SA))) S') = {}"
apply (simp add: AddSA_CompFun)
apply (subst FAddSA_dom_emptyset)
apply auto
done
lemma AddSA_CompFun_the3:
"\<lbrakk> S' \<notin> States (SA::('a,'c,'d)seqauto);
S \<noteq> S';
(States SA \<inter> HAStates A) = {};
S \<in> HAStates A \<rbrakk> \<Longrightarrow>
(the ((CompFun (A [++] (S,SA))) S')) = (the ((CompFun A) S'))"
by (simp add: AddSA_CompFun)
lemma AddSA_CompFun_ran:
"\<lbrakk> (States SA \<inter> HAStates A) = {};
S \<in> HAStates A \<rbrakk> \<Longrightarrow>
ran (CompFun (A [++] (S,SA))) =
insert {} (insert (insert SA (the ((CompFun A) S))) (ran (CompFun A) - {the ((CompFun A) S)}))"
apply (simp add: AddSA_CompFun)
apply (subst FAddSA_ran)
apply auto
apply (fast dest: CompFun_Int_disjoint)
done
lemma AddSA_CompFun_ran2:
"\<lbrakk> (States SA1 \<inter> HAStates A) = {};
(States SA2 \<inter> (HAStates A \<union> States SA1)) = {};
S \<in> HAStates A;
T \<in> States SA1 \<rbrakk> \<Longrightarrow>
ran (CompFun ((A [++] (S,SA1)) [++] (T,SA2))) =
insert {} (insert {SA2} (ran (CompFun (A [++] (S,SA1)))))"
apply (simp add: AddSA_HAStates AddSA_CompFun)
apply (subst FAddSA_ran)
apply (rule ballI)
apply (rule impI)
apply (subst AddSA_CompFun [THEN sym])
apply simp
apply simp
apply (subst AddSA_CompFun [THEN sym])
apply simp
apply simp
apply (rule CompFun_Int_disjoint)
apply simp
apply (simp add: AddSA_HAStates)
apply (simp add: AddSA_HAStates)
apply (case_tac "S \<in> States SA1")
apply simp
apply (simp only: dom_CompFun [THEN sym])
apply (frule FAddSA_dom_dom_States)
apply fast
apply simp
apply (case_tac "S \<in> States SA1")
apply simp
apply fast
apply (subst FAddSA_dom_dom_States)
apply simp
apply simp
apply simp
apply (case_tac "S \<in> States SA1")
apply simp
apply fast
apply (subst FAddSA_dom_dom_States)
apply simp
apply simp
apply simp
apply (case_tac "S \<in> States SA1")
apply simp
apply fast
apply simp
apply fast
done
lemma AddSA_CompFun_ran_not_mem:
"\<lbrakk> States SA2 \<inter> (HAStates A \<union> States SA1) = {};
States SA1 \<inter> HAStates A = {};
S \<in> HAStates A \<rbrakk> \<Longrightarrow>
{SA2} \<notin> ran (CompFun A [f+] (S, SA1))"
apply (cut_tac HA="A [++] (S,SA1)" and Sas="{SA2}" in ran_CompFun_is_not_SA)
apply (simp add: AddSA_HAStates AddSA_CompFun)
apply (simp add: AddSA_HAStates AddSA_SAs)
apply auto
apply (simp add: Int_def)
apply (cut_tac SA=SA2 in EX_State_SA)
apply (erule exE)
apply (frule HAStates_SA_mem)
apply fast
apply (simp only: HAStates_def)
apply fast
apply (simp add: AddSA_HAStates AddSA_CompFun)
done
lemma AddSA_CompFun_ran3:
"\<lbrakk> (States SA1 \<inter> HAStates A) = {};
(States SA2 \<inter> (HAStates A \<union> States SA1)) = {};
(States SA3 \<inter> (HAStates A \<union> States SA1 \<union> States SA2)) = {};
S \<in> HAStates A;
T \<in> States SA1 \<rbrakk> \<Longrightarrow>
ran (CompFun ((A [++] (S,SA1)) [++] (T,SA2) [++] (T,SA3))) =
insert {} (insert {SA3,SA2} (ran (CompFun (A [++] (S,SA1)))))"
apply (simp add: AddSA_HAStates AddSA_CompFun)
apply (subst FAddSA_ran)
apply (rule ballI)
apply (rule impI)
apply (subst AddSA_CompFun [THEN sym])
apply simp
apply simp
apply (subst AddSA_CompFun [THEN sym])
apply (simp add: AddSA_HAStates)
apply (simp add: AddSA_HAStates)
apply (subst AddSA_CompFun [THEN sym])
apply simp
apply simp
apply (subst AddSA_CompFun [THEN sym])
apply (simp add: AddSA_HAStates)
apply (simp add: AddSA_HAStates)
apply (rule CompFun_Int_disjoint)
apply simp
apply (simp add: AddSA_HAStates)
apply (simp add: AddSA_HAStates)
apply (simp only: dom_CompFun [THEN sym])
apply (cut_tac F="CompFun A [f+] (S, SA1)" and S=T and SA="SA2" in FAddSA_dom_dom_States)
apply (cut_tac F="CompFun A" and S=S and SA="SA1" in FAddSA_dom_dom_States)
apply fast
apply fast
apply simp
apply fast
apply simp
apply (cut_tac F="CompFun A" and S=S and SA="SA1" in FAddSA_dom_dom_States)
apply simp
apply fast
apply simp
apply (subst FAddSA_dom_dom_States)
apply (subst FAddSA_dom_dom_States)
apply simp
apply fast
apply simp
apply fast
apply (subst FAddSA_dom_dom_States)
apply simp
apply fast
apply simp
apply (subst FAddSA_dom_dom_States)
apply (subst FAddSA_dom_dom_States)
apply simp
apply fast
apply simp
apply fast
apply (subst FAddSA_dom_dom_States)
apply simp
apply fast
apply simp
apply (subst AddSA_CompFun [THEN sym])
back
apply simp
apply simp
apply (subst AddSA_CompFun [THEN sym])
back
apply (simp add: AddSA_HAStates)
apply (simp add: AddSA_HAStates)
apply (subst AddSA_CompFun_ran2)
apply fast
apply fast
apply fast
apply fast
apply (simp add: AddSA_CompFun)
apply (subst FAddSA_dom_insert)
apply (subst FAddSA_dom_dom_States)
apply simp
apply fast
apply simp
apply fast
apply (subst FAddSA_dom_emptyset)
apply simp
apply fast
apply simp
apply simp
apply (subst FAddSA_dom_insert)
apply (subst FAddSA_dom_dom_States)
apply simp
apply fast
apply simp
apply fast
apply (subst FAddSA_dom_emptyset)
apply simp
apply fast
apply simp
apply simp
apply (case_tac "{SA2} \<notin> ran (CompFun A [f+] (S,SA1))")
apply fast
apply (simp add:AddSA_CompFun_ran_not_mem)
done
lemma AddSA_CompFun_PseudoHA_ran:
"\<lbrakk> S \<in> States RootSA;
States RootSA \<inter> States SA = {} \<rbrakk> \<Longrightarrow>
(ran (CompFun ((PseudoHA RootSA D) [++] (S,SA)))) = (insert {} {{SA}})"
apply (subst AddSA_CompFun_ran)
apply auto
done
lemma AddSA_CompFun_PseudoHA_ran2:
"\<lbrakk> States SA1 \<inter> States RootSA = {};
States SA2 \<inter> (States RootSA \<union> States SA1) = {};
S \<in> States RootSA \<rbrakk> \<Longrightarrow>
(ran (CompFun ((PseudoHA RootSA D) [++] (S,SA1) [++] (S,SA2)))) = (insert {} {{SA2,SA1}})"
apply (subst AddSA_CompFun_ran)
prefer 3
apply (subst AddSA_CompFun_the)
apply simp
apply simp
apply (subst AddSA_CompFun_PseudoHA_ran)
apply fast
apply fast
apply (subst AddSA_CompFun_the)
apply simp
apply simp
apply simp
apply fast
apply (simp add: AddSA_HAStates)
apply (simp add: AddSA_HAStates)
done
lemma AddSA_HAInitStates [simp]:
"\<lbrakk> States SA \<inter> HAStates A = {};
S \<in> HAStates A \<rbrakk> \<Longrightarrow>
HAInitStates (A [++] (S,SA)) = insert (InitState SA) (HAInitStates A)"
apply (unfold HAInitStates_def)
apply (simp add: AddSA_SAs)
done
lemma AddSA_HAInitState [simp]:
"\<lbrakk> States SA \<inter> HAStates A = {};
S \<in> HAStates A \<rbrakk> \<Longrightarrow>
HAInitState (A [++] (S,SA)) = (HAInitState A)"
apply (unfold HAInitState_def)
apply (simp add: AddSA_HARoot)
done
lemma AddSA_Chi [simp]:
"\<lbrakk> States SA \<inter> HAStates A = {};
S \<in> HAStates A \<rbrakk> \<Longrightarrow>
Chi (A [++] (S,SA)) S = (States SA) \<union> (Chi A S)"
apply (unfold Chi_def restrict_def)
apply (simp add: AddSA_SAs AddSA_HAStates AddSA_CompFun_the)
apply auto
done
lemma AddSA_Chi2 [simp]:
"\<lbrakk> States SA \<inter> HAStates A = {};
S \<in> HAStates A;
T \<in> States SA \<rbrakk> \<Longrightarrow>
Chi (A [++] (S,SA)) T = {}"
apply (unfold Chi_def restrict_def)
apply (simp add: AddSA_SAs AddSA_HAStates AddSA_CompFun_the2)
done
lemma AddSA_Chi3 [simp]:
"\<lbrakk> States SA \<inter> HAStates A = {};
S \<in> HAStates A;
T \<notin> States SA; T \<noteq> S \<rbrakk> \<Longrightarrow>
Chi (A [++] (S,SA)) T = Chi A T"
apply (unfold Chi_def restrict_def)
apply (simp add: AddSA_SAs AddSA_HAStates AddSA_CompFun_the3)
apply auto
done
lemma AddSA_ChiRel [simp]:
"\<lbrakk> States SA \<inter> HAStates A = {};
S \<in> HAStates A \<rbrakk> \<Longrightarrow>
ChiRel (A [++] (S,SA)) = { (T,T') . T = S \<and> T' \<in> States SA } \<union> (ChiRel A)"
apply (unfold ChiRel_def)
apply (simp add: AddSA_HAStates)
apply safe
apply (rename_tac T U)
apply (case_tac "T \<in> States SA")
apply simp
apply simp
apply (rename_tac T U)
apply (case_tac "T \<noteq> S")
apply (case_tac "T \<in> States SA")
apply simp
apply simp
apply simp
apply (rename_tac T U)
apply (case_tac "T \<in> States SA")
apply simp
apply simp
apply (cut_tac A=A and T=T in Chi_HAStates)
apply fast
apply (case_tac "T \<in> States SA")
apply simp
apply simp
apply (cut_tac A=A and T=T in Chi_HAStates)
apply fast
apply fast
apply (rename_tac T U)
apply (case_tac "T \<noteq> S")
apply (case_tac "T \<in> States SA")
apply simp
apply simp
apply simp
apply (rename_tac T U)
apply (case_tac "T \<in> States SA")
apply auto
apply (metis AddSA_Chi AddSA_Chi3 Int_iff Un_iff empty_iff)
done
lemma help_InitConf:
"\<lbrakk>States SA \<inter> HAStates A = {} \<rbrakk> \<Longrightarrow> {p. fst p \<noteq> InitState SA \<and> snd p \<noteq> InitState SA \<and>
p \<in> insert (InitState SA) (HAInitStates A) \<times> insert (InitState SA) (HAInitStates A) \<and>
(p \<in> {S} \<times> States SA \<or> p \<in> ChiRel A)} =
(HAInitStates A \<times> HAInitStates A \<inter> ChiRel A)"
apply auto
apply (cut_tac A=SA in InitState_States)
apply (cut_tac A=A in HAInitStates_HAStates, fast)
apply (cut_tac A=SA in InitState_States)
apply (cut_tac A=A in HAInitStates_HAStates, fast)
done
lemma AddSA_InitConf [simp]:
"\<lbrakk> States SA \<inter> HAStates A = {};
S \<in> InitConf A \<rbrakk> \<Longrightarrow>
InitConf (A [++] (S,SA)) = insert (InitState SA) (InitConf A)"
apply (frule InitConf_HAStates2)
apply (unfold InitConf_def)
apply (simp del: insert_Times_insert)
apply auto
apply (rename_tac T)
apply (case_tac "T=S")
apply auto
prefer 3
apply (rule_tac R="(HAInitStates A) \<times> (HAInitStates A) \<inter> ChiRel A" in trancl_subseteq)
apply auto
apply (rotate_tac 3)
apply (frule trancl_collect)
prefer 2
apply fast
apply auto
apply (cut_tac A=SA in InitState_States)
apply (frule ChiRel_HAStates)
apply fast
apply (frule ChiRel_HAStates)
apply (cut_tac A=SA in InitState_States)
apply fast
apply (frule ChiRel_HAStates)
apply (cut_tac A=SA in InitState_States)
apply fast
apply (subst help_InitConf [THEN sym])
apply fast
apply auto
apply (rule_tac b=S in rtrancl_into_rtrancl)
apply auto
prefer 2
apply (erule rtranclE)
apply auto
prefer 2
apply (erule rtranclE)
apply auto
apply (rule_tac R="(HAInitStates A) \<times> (HAInitStates A) \<inter> ChiRel A" in trancl_subseteq)
apply auto
done
lemma AddSA_InitConf2 [simp]:
"\<lbrakk> States SA \<inter> HAStates A = {};
S \<notin> InitConf A;
S \<in> HAStates A \<rbrakk> \<Longrightarrow>
InitConf (A [++] (S,SA)) = InitConf A"
apply (unfold InitConf_def)
apply simp
apply auto
apply (rename_tac T)
prefer 2
apply (rule_tac R="(HAInitStates A) \<times> (HAInitStates A) \<inter> ChiRel A" in trancl_subseteq)
apply auto
apply (case_tac "T=InitState SA")
apply auto
prefer 2
apply (rotate_tac 3)
apply (frule trancl_collect)
prefer 2
apply fast
apply auto
apply (cut_tac A=SA in InitState_States)
apply (frule ChiRel_HAStates)
apply fast
apply (cut_tac A=SA in InitState_States)
apply (frule ChiRel_HAStates)
apply fast
apply (cut_tac A=SA in InitState_States)
apply (cut_tac A=A in HAInitStates_HAStates)
apply fast
apply (subst help_InitConf [THEN sym])
apply fast
apply auto
apply (rule_tac b="InitState SA" in rtrancl_induct)
apply auto
apply (frule ChiRel_HAStates2)
apply (cut_tac A=SA in InitState_States)
apply fast
prefer 2
apply (frule ChiRel_HAStates)
apply (cut_tac A=SA in InitState_States)
apply fast
apply (rule rtrancl_into_rtrancl)
apply auto
apply (rule rtrancl_into_rtrancl)
apply auto
done
subsection "Theorems for Calculating Wellformedness of HA"
lemma PseudoHA_HAStates_IFF:
"(States SA) = X \<Longrightarrow> (HAStates (PseudoHA SA D)) = X"
apply simp
done
lemma AddSA_SAs_IFF:
"\<lbrakk> States SA \<inter> HAStates HA = {};
S \<in> HAStates HA;
(SAs HA) = X \<rbrakk> \<Longrightarrow> (SAs (HA [++] (S, SA))) = (insert SA X)"
apply (subst AddSA_SAs)
apply auto
done
lemma AddSA_Events_IFF:
"\<lbrakk> States SA \<inter> HAStates HA = {};
S \<in> HAStates HA;
(HAEvents HA) = HAE;
(SAEvents SA) = SAE;
(HAE \<union> SAE) = X \<rbrakk> \<Longrightarrow> (HAEvents (HA [++] (S, SA))) = X"
apply (subst AddSA_Events)
apply auto
done
lemma AddSA_CompFun_IFF:
"\<lbrakk> States SA \<inter> HAStates HA = {};
S \<in> HAStates HA;
(CompFun HA) = HAG;
(HAG [f+] (S, SA)) = X \<rbrakk> \<Longrightarrow> (CompFun (HA [++] (S, SA))) = X"
apply (subst AddSA_CompFun)
apply auto
done
lemma AddSA_HAStates_IFF:
"\<lbrakk> States SA \<inter> HAStates HA = {};
S \<in> HAStates HA;
(HAStates HA) = HAS;
(States SA) = SAS;
(HAS \<union> SAS) = X \<rbrakk> \<Longrightarrow> (HAStates (HA [++] (S, SA))) = X"
apply (subst AddSA_HAStates)
apply auto
done
lemma AddSA_HAInitValue_IFF:
"\<lbrakk> States SA \<inter> HAStates HA = {};
S \<in> HAStates HA;
(HAInitValue HA) = X \<rbrakk> \<Longrightarrow> (HAInitValue (HA [++] (S, SA))) = X"
apply (subst AddSA_HAInitValue)
apply auto
done
lemma AddSA_HARoot_IFF:
"\<lbrakk> States SA \<inter> HAStates HA = {};
S \<in> HAStates HA;
(HARoot HA) = X \<rbrakk> \<Longrightarrow> (HARoot (HA [++] (S, SA))) = X"
apply (subst AddSA_HARoot)
apply auto
done
lemma AddSA_InitConf_IFF:
"\<lbrakk> InitConf A = Y;
States SA \<inter> HAStates A = {};
S \<in> HAStates A;
(if S \<in> Y then insert (InitState SA) Y else Y) = X \<rbrakk> \<Longrightarrow>
InitConf (A [++] (S,SA)) = X"
apply (case_tac "S \<in> Y")
apply auto
done
lemma AddSA_CompFun_ran_IFF:
"\<lbrakk> (States SA \<inter> HAStates A) = {};
S \<in> HAStates A;
(insert {} (insert (insert SA (the ((CompFun A) S))) (ran (CompFun A) - {the ((CompFun A) S)}))) = X \<rbrakk> \<Longrightarrow>
ran (CompFun (A [++] (S,SA))) = X"
apply (subst AddSA_CompFun_ran)
apply auto
done
lemma AddSA_CompFun_ran2_IFF:
"\<lbrakk> (States SA1 \<inter> HAStates A) = {};
(States SA2 \<inter> (HAStates A \<union> States SA1)) = {};
S \<in> HAStates A;
T \<in> States SA1;
insert {} (insert {SA2} (ran (CompFun (A [++] (S,SA1))))) = X \<rbrakk> \<Longrightarrow>
ran (CompFun ((A [++] (S,SA1)) [++] (T,SA2))) = X"
apply (subst AddSA_CompFun_ran2)
apply auto
done
lemma AddSA_CompFun_ran3_IFF:
"\<lbrakk> (States SA1 \<inter> HAStates A) = {};
(States SA2 \<inter> (HAStates A \<union> States SA1)) = {};
(States SA3 \<inter> (HAStates A \<union> States SA1 \<union> States SA2)) = {};
S \<in> HAStates A;
T \<in> States SA1;
insert {} (insert {SA3,SA2} (ran (CompFun (A [++] (S,SA1))))) = X \<rbrakk> \<Longrightarrow>
ran (CompFun ((A [++] (S,SA1)) [++] (T,SA2) [++] (T,SA3))) = X"
apply (subst AddSA_CompFun_ran3)
apply auto
done
lemma AddSA_CompFun_PseudoHA_ran_IFF:
"\<lbrakk> S \<in> States RootSA;
States RootSA \<inter> States SA = {};
(insert {} {{SA}}) = X \<rbrakk> \<Longrightarrow>
(ran (CompFun ((PseudoHA RootSA D) [++] (S,SA)))) = X"
apply (subst AddSA_CompFun_PseudoHA_ran)
apply auto
done
lemma AddSA_CompFun_PseudoHA_ran2_IFF:
"\<lbrakk> States SA1 \<inter> States RootSA = {};
States SA2 \<inter> (States RootSA \<union> States SA1) = {};
S \<in> States RootSA;
(insert {} {{SA2,SA1}}) = X \<rbrakk> \<Longrightarrow>
(ran (CompFun ((PseudoHA RootSA D) [++] (S,SA1) [++] (S,SA2)))) = X"
apply (subst AddSA_CompFun_PseudoHA_ran2)
apply auto
done
ML \<open>
val AddSA_SAs_IFF = @{thm AddSA_SAs_IFF};
val AddSA_Events_IFF = @{thm AddSA_Events_IFF};
val AddSA_CompFun_IFF = @{thm AddSA_CompFun_IFF};
val AddSA_HAStates_IFF = @{thm AddSA_HAStates_IFF};
val PseudoHA_HAStates_IFF = @{thm PseudoHA_HAStates_IFF};
val AddSA_HAInitValue_IFF = @{thm AddSA_HAInitValue_IFF};
val AddSA_CompFun_ran_IFF = @{thm AddSA_CompFun_ran_IFF};
val AddSA_HARoot_IFF = @{thm AddSA_HARoot_IFF};
val insert_inter = @{thm insert_inter};
val insert_notmem = @{thm insert_notmem};
val PseudoHA_CompFun = @{thm PseudoHA_CompFun};
val PseudoHA_Events = @{thm PseudoHA_Events};
val PseudoHA_SAs = @{thm PseudoHA_SAs};
val PseudoHA_HARoot = @{thm PseudoHA_HARoot};
val PseudoHA_HAInitValue = @{thm PseudoHA_HAInitValue};
val PseudoHA_CompFun_ran = @{thm PseudoHA_CompFun_ran};
val Un_empty_right = @{thm Un_empty_right};
val insert_union = @{thm insert_union};
fun wellformed_tac ctxt L i =
FIRST[resolve_tac ctxt [AddSA_SAs_IFF] i,
resolve_tac ctxt [AddSA_Events_IFF] i,
resolve_tac ctxt [AddSA_CompFun_IFF] i,
resolve_tac ctxt [AddSA_HAStates_IFF] i,
resolve_tac ctxt [PseudoHA_HAStates_IFF] i,
resolve_tac ctxt [AddSA_HAInitValue_IFF] i,
resolve_tac ctxt [AddSA_HARoot_IFF] i,
resolve_tac ctxt [AddSA_CompFun_ran_IFF] i,
resolve_tac ctxt [insert_inter] i,
resolve_tac ctxt [insert_notmem] i,
CHANGED (simp_tac (put_simpset HOL_basic_ss ctxt addsimps
[PseudoHA_HARoot, PseudoHA_CompFun, PseudoHA_CompFun_ran,PseudoHA_Events,PseudoHA_SAs,insert_union,
PseudoHA_HAInitValue,Un_empty_right]@ L) i),
fast_tac ctxt i,
CHANGED (simp_tac ctxt i)];
\<close>
method_setup wellformed = \<open>Attrib.thms >> (fn thms => fn ctxt => (METHOD (fn facts =>
(HEADGOAL (wellformed_tac ctxt (facts @ thms))))))\<close>
end
|
(****************************************************************************)
(* *)
(* *)
(* Solange Coupet-Grimal & Catherine Nouvet *)
(* *)
(* *)
(* Laboratoire d'Informatique Fondamentale de Marseille *)
(* CMI-Technopole de Chateau-Gombert *)
(* 39, Rue F. Joliot Curie *)
(* 13453 MARSEILLE Cedex 13 *)
(* Contact :[email protected] *)
(* *)
(* *)
(* Coq V7.0 *)
(* Septembre 2002 *)
(* *)
(****************************************************************************)
(* parameters_card.v *)
(****************************************************************************)
Section parameters_card.
Require Export gc.
Require Export card.
Notation Card := (card _ _) (only parsing).
Inductive grey_white_sons (m1 m2 : marking) (h1 h2 : heap)
(g : node) : Prop :=
grey_sons :
(forall n m : node, h1 n m = h2 n m) ->
(forall m : node, h1 g m = true /\ m1 m = white -> m2 m = grey) ->
(forall m : node,
m <> g /\ (h1 g m = true /\ m1 m <> white \/ h1 g m = false) ->
m1 m = m2 m) -> grey_white_sons m1 m2 h1 h2 g.
Definition card_color (c0 : color) :=
card node color (fun c : color => c = c0).
Inductive card_sons (n0 : node) (c0 : color) (m : marking)
(h : heap) (nb : nat) : Prop :=
CS :
forall M : node -> bool * color,
(forall n : node, M n = (h n0 n, m n)) ->
card _ _
(fun p : bool * color =>
match p with
| (b, c) => b = true /\ c = c0
end) M nb -> card_sons n0 c0 m h nb.
Definition update_heap (b : bool) (h : heap) (n0 f n m : node) :=
match eq_dec_node n n0 with
| right _ => h n m
| left _ =>
match eq_dec_node m f with
| right _ => h n0 m
| left _ => b
end
end.
Lemma exist_updated_heap :
forall (b : bool) (h : heap) (n0 f : node),
exists h' : heap,
(forall n m : node, n <> n0 -> h n m = h' n m) /\
(forall n : node, n <> f -> h n0 n = h' n0 n) /\ h' n0 f = b.
intros b h n0 f.
split with (update_heap b h n0 f).
unfold update_heap in |- *; split.
intros n m ndifn0.
case (eq_dec_node n n0); auto.
intro neqn0; absurd (n = n0); auto.
split.
intros n ndiff; case (eq_dec_node n0 n0); auto.
intro n0eqn0; case (eq_dec_node n f); auto.
intro neqf; absurd (n = f); auto.
case (eq_dec_node n0 n0); auto.
intro n0eqn0; case (eq_dec_node f f); auto.
intro fdiff; absurd (f = f); auto.
intro n0difn0; absurd (n0 = n0); auto.
Qed.
Lemma not_true_and_white :
forall (M : node -> bool * color) (m : marking) (h : heap) (n n0 : node),
(forall n : node, M n = (h n0 n, m n)) ->
(forall n : node, ~ (let (b0, c0) := M n in b0 = true /\ c0 = white)) ->
~ (h n0 n = true /\ m n = white).
intros M m h n n0 Mn H_M.
intro H; elim H; clear H.
intros hn0n mn.
absurd (h n0 n = true /\ m n = white); auto.
elim (H_M n); generalize (refl_equal (M n)); pattern (M n) at -1 in |- *;
case (M n); intros b c M_n; split.
replace b with (fst (M n)).
rewrite (Mn n); assumption.
rewrite M_n; auto.
replace c with (snd (M n)); auto.
rewrite (Mn n); assumption.
rewrite M_n; auto.
Qed.
Lemma is_white :
forall (M : node -> bool * color) (h : heap) (m : marking) (n n0 : node),
(forall n : node, M n = (h n0 n, m n)) ->
(let (b0, c0) := M n in b0 = true /\ c0 = white) -> m n = white.
intros M h m n n0 Mn H_M.
cut (let (b0, c0) := M n in b0 = true /\ c0 = white); auto.
generalize (refl_equal (M n)); pattern (M n) at -1 in |- *; case (M n);
intros b c M_n H_b_c; elim H_b_c; clear H_b_c.
intros H_b H_c; replace (m n) with c.
assumption.
replace c with (snd (M n)).
rewrite (Mn n); auto.
rewrite M_n; auto.
Qed.
Lemma is_true :
forall (M : node -> bool * color) (h : heap) (m : marking) (n n0 : node),
(forall n : node, M n = (h n0 n, m n)) ->
(let (b0, c0) := M n in b0 = true /\ c0 = white) -> h n0 n = true.
intros M h m n n0 Mn H_M.
cut (let (b0, c0) := M n in b0 = true /\ c0 = white); auto.
generalize (refl_equal (M n)); pattern (M n) at -1 in |- *; case (M n);
intros b c M_n H_b_c; elim H_b_c; clear H_b_c.
intros H_b H_c; replace (h n0 n) with b.
assumption.
replace b with (fst (M n)).
rewrite (Mn n); auto.
rewrite M_n; auto.
Qed.
End parameters_card. |
/-
Copyright (c) 2021 Aaron Anderson. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Aaron Anderson
-/
import data.set.finite
import data.fintype.basic
import order.well_founded
import order.order_iso_nat
import algebra.pointwise
/-!
# Well-founded sets
A well-founded subset of an ordered type is one on which the relation `<` is well-founded.
## Main Definitions
* `set.well_founded_on s r` indicates that the relation `r` is
well-founded when restricted to the set `s`.
* `set.is_wf s` indicates that `<` is well-founded when restricted to `s`.
* `set.is_pwo s` indicates that any infinite sequence of elements in `s`
contains an infinite monotone subsequence.
### Definitions for Hahn Series
* `set.add_antidiagonal s t a` and `set.mul_antidiagonal s t a` are the sets of pairs of elements
from `s` and `t` that add/multiply to `a`.
* `finset.add_antidiagonal` and `finset.mul_antidiagonal` are finite versions of
`set.add_antidiagonal` and `set.mul_antidiagonal` defined when `s` and `t` are well-founded.
## Main Results
* `set.well_founded_on_iff` relates `well_founded_on` to the well-foundedness of a relation on the
original type, to avoid dealing with subtypes.
* `set.is_wf.mono` shows that a subset of a well-founded subset is well-founded.
* `set.is_wf.union` shows that the union of two well-founded subsets is well-founded.
* `finset.is_wf` shows that all `finset`s are well-founded.
-/
variables {α : Type*}
namespace set
/-- `s.well_founded_on r` indicates that the relation `r` is well-founded when restricted to `s`. -/
def well_founded_on (s : set α) (r : α → α → Prop) : Prop :=
well_founded (λ (a : s) (b : s), r a b)
lemma well_founded_on_iff {s : set α} {r : α → α → Prop} :
s.well_founded_on r ↔ well_founded (λ (a b : α), r a b ∧ a ∈ s ∧ b ∈ s) :=
begin
have f : rel_embedding (λ (a : s) (b : s), r a b) (λ (a b : α), r a b ∧ a ∈ s ∧ b ∈ s) :=
⟨⟨coe, subtype.coe_injective⟩, λ a b, by simp⟩,
refine ⟨λ h, _, f.well_founded⟩,
rw well_founded.well_founded_iff_has_min,
intros t ht,
by_cases hst : (s ∩ t).nonempty,
{ rw ← subtype.preimage_coe_nonempty at hst,
rcases well_founded.well_founded_iff_has_min.1 h (coe ⁻¹' t) hst with ⟨⟨m, ms⟩, mt, hm⟩,
exact ⟨m, mt, λ x xt ⟨xm, xs, ms⟩, hm ⟨x, xs⟩ xt xm⟩ },
{ rcases ht with ⟨m, mt⟩,
exact ⟨m, mt, λ x xt ⟨xm, xs, ms⟩, hst ⟨m, ⟨ms, mt⟩⟩⟩ }
end
instance is_strict_order.subset {s : set α} {r : α → α → Prop} [is_strict_order α r] :
is_strict_order α (λ (a b : α), r a b ∧ a ∈ s ∧ b ∈ s) :=
{ to_is_irrefl := ⟨λ a con, irrefl_of r a con.1 ⟩,
to_is_trans := ⟨λ a b c ab bc, ⟨trans_of r ab.1 bc.1, ab.2.1, bc.2.2⟩ ⟩ }
theorem well_founded_on_iff_no_descending_seq {s : set α} {r : α → α → Prop} [is_strict_order α r] :
s.well_founded_on r ↔ ∀ (f : ((>) : ℕ → ℕ → Prop) ↪r r), ¬ (range f) ⊆ s :=
begin
rw [well_founded_on_iff, rel_embedding.well_founded_iff_no_descending_seq],
refine ⟨λ h f con, h begin
use f,
{ exact f.injective },
{ intros a b,
simp only [con (mem_range_self a), con (mem_range_self b), and_true, gt_iff_lt,
function.embedding.coe_fn_mk, f.map_rel_iff] }
end, λ h con, _⟩,
rcases con with ⟨f, hf⟩,
have hfs' : ∀ n : ℕ, f n ∈ s := λ n, (hf.2 n.lt_succ_self).2.2,
refine h ⟨f, λ a b, _⟩ (λ n hn, _),
{ rw ← hf,
exact ⟨λ h, ⟨h, hfs' _, hfs' _⟩, λ h, h.1⟩ },
{ rcases set.mem_range.1 hn with ⟨m, hm⟩,
rw ← hm,
apply hfs' }
end
section has_lt
variables [has_lt α]
/-- `s.is_wf` indicates that `<` is well-founded when restricted to `s`. -/
def is_wf (s : set α) : Prop := well_founded_on s (<)
lemma is_wf_univ_iff : is_wf (univ : set α) ↔ well_founded ((<) : α → α → Prop) :=
by simp [is_wf, well_founded_on_iff]
variables {s t : set α}
theorem is_wf.mono (h : is_wf t) (st : s ⊆ t) : is_wf s :=
begin
rw [is_wf, well_founded_on_iff] at *,
refine subrelation.wf (λ x y xy, _) h,
exact ⟨xy.1, st xy.2.1, st xy.2.2⟩,
end
end has_lt
section partial_order
variables [partial_order α] {s t : set α} {a : α}
theorem is_wf_iff_no_descending_seq :
is_wf s ↔ ∀ (f : (order_dual ℕ) ↪o α), ¬ (range f) ⊆ s :=
begin
haveI : is_strict_order α (λ (a b : α), a < b ∧ a ∈ s ∧ b ∈ s) := {
to_is_irrefl := ⟨λ x con, lt_irrefl x con.1⟩,
to_is_trans := ⟨λ a b c ab bc, ⟨lt_trans ab.1 bc.1, ab.2.1, bc.2.2⟩⟩,
},
rw [is_wf, well_founded_on_iff_no_descending_seq],
exact ⟨λ h f, h f.lt_embedding, λ h f, h (order_embedding.of_strict_mono
f (λ _ _, f.map_rel_iff.2))⟩,
end
theorem is_wf.union (hs : is_wf s) (ht : is_wf t) : is_wf (s ∪ t) :=
begin
classical,
rw [is_wf_iff_no_descending_seq] at *,
rintros f fst,
have h : infinite (f ⁻¹' s) ∨ infinite (f ⁻¹' t),
{ have h : infinite (univ : set ℕ) := infinite_univ,
have hpre : f ⁻¹' (s ∪ t) = set.univ,
{ rw [← image_univ, image_subset_iff, univ_subset_iff] at fst,
exact fst },
rw preimage_union at hpre,
rw ← hpre at h,
rw [infinite, infinite],
rw infinite at h,
contrapose! h,
exact finite.union h.1 h.2, },
rw [← infinite_coe_iff, ← infinite_coe_iff] at h,
cases h with inf inf; haveI := inf,
{ apply hs ((nat.order_embedding_of_set (f ⁻¹' s)).dual.trans f),
change range (function.comp f (nat.order_embedding_of_set (f ⁻¹' s))) ⊆ s,
rw [range_comp, image_subset_iff],
simp },
{ apply ht ((nat.order_embedding_of_set (f ⁻¹' t)).dual.trans f),
change range (function.comp f (nat.order_embedding_of_set (f ⁻¹' t))) ⊆ t,
rw [range_comp, image_subset_iff],
simp }
end
end partial_order
end set
namespace set
/-- A subset is partially well-ordered by a relation `r` when any infinite sequence contains
two elements where the first is related to the second by `r`. -/
def partially_well_ordered_on (s) (r : α → α → Prop) : Prop :=
∀ (f : ℕ → α), range f ⊆ s → ∃ (m n : ℕ), m < n ∧ r (f m) (f n)
/-- A subset of a preorder is partially well-ordered when any infinite sequence contains
a monotone subsequence of length 2 (or equivalently, an infinite monotone subsequence). -/
def is_pwo [preorder α] (s) : Prop :=
partially_well_ordered_on s ((≤) : α → α → Prop)
theorem partially_well_ordered_on.mono {s t : set α} {r : α → α → Prop}
(ht : t.partially_well_ordered_on r) (hsub : s ⊆ t) :
s.partially_well_ordered_on r :=
λ f hf, ht f (set.subset.trans hf hsub)
theorem partially_well_ordered_on.image_of_monotone {s : set α}
{r : α → α → Prop} {β : Type*} {r' : β → β → Prop}
(hs : s.partially_well_ordered_on r) {f : α → β} (hf : ∀ a1 a2 : α, r a1 a2 → r' (f a1) (f a2)) :
(f '' s).partially_well_ordered_on r' :=
λ g hg, begin
have h := λ (n : ℕ), ((mem_image _ _ _).1 (hg (mem_range_self n))),
obtain ⟨m, n, hlt, hmn⟩ := hs (λ n, classical.some (h n)) _,
{ refine ⟨m, n, hlt, _⟩,
rw [← (classical.some_spec (h m)).2,
← (classical.some_spec (h n)).2],
apply hf _ _ hmn },
{ rintros _ ⟨n, rfl⟩,
exact (classical.some_spec (h n)).1 }
end
section partial_order
variables {s : set α} {t : set α} {r : α → α → Prop}
theorem partially_well_ordered_on.exists_monotone_subseq [is_refl α r] [is_trans α r]
(h : s.partially_well_ordered_on r) (f : ℕ → α) (hf : range f ⊆ s) :
∃ (g : ℕ ↪o ℕ), ∀ m n : ℕ, m ≤ n → r (f (g m)) (f (g n)) :=
begin
obtain ⟨g, h1 | h2⟩ := exists_increasing_or_nonincreasing_subseq r f,
{ refine ⟨g, λ m n hle, _⟩,
obtain hlt | heq := lt_or_eq_of_le hle,
{ exact h1 m n hlt, },
{ rw [heq],
apply refl_of r } },
{ exfalso,
obtain ⟨m, n, hlt, hle⟩ := h (f ∘ g) (subset.trans (range_comp_subset_range _ _) hf),
exact h2 m n hlt hle }
end
theorem partially_well_ordered_on_iff_exists_monotone_subseq [is_refl α r] [is_trans α r] :
s.partially_well_ordered_on r ↔
∀ f : ℕ → α, range f ⊆ s → ∃ (g : ℕ ↪o ℕ), ∀ m n : ℕ, m ≤ n → r (f (g m)) (f (g n)) :=
begin
classical,
split; intros h f hf,
{ exact h.exists_monotone_subseq f hf },
{ obtain ⟨g, gmon⟩ := h f hf,
refine ⟨g 0, g 1, g.lt_iff_lt.2 zero_lt_one, gmon _ _ zero_le_one⟩, }
end
lemma partially_well_ordered_on.well_founded_on [is_partial_order α r]
(h : s.partially_well_ordered_on r) :
s.well_founded_on (λ a b, r a b ∧ a ≠ b) :=
begin
haveI : is_strict_order α (λ a b, r a b ∧ a ≠ b) :=
{ to_is_irrefl := ⟨λ a con, con.2 rfl⟩,
to_is_trans := ⟨λ a b c ab bc, ⟨trans ab.1 bc.1,
λ ac, ab.2 (antisymm ab.1 (ac.symm ▸ bc.1))⟩⟩ },
rw well_founded_on_iff_no_descending_seq,
intros f con,
obtain ⟨m, n, hlt, hle⟩ := h f con,
exact (f.map_rel_iff.2 hlt).2 (antisymm hle (f.map_rel_iff.2 hlt).1).symm,
end
variables [partial_order α]
lemma is_pwo.is_wf (h : s.is_pwo) :
s.is_wf :=
begin
rw [is_wf],
convert h.well_founded_on,
ext x y,
rw lt_iff_le_and_ne,
end
theorem is_pwo.exists_monotone_subseq
(h : s.is_pwo) (f : ℕ → α) (hf : range f ⊆ s) :
∃ (g : ℕ ↪o ℕ), monotone (f ∘ g) :=
h.exists_monotone_subseq f hf
theorem is_pwo_iff_exists_monotone_subseq :
s.is_pwo ↔
∀ f : ℕ → α, range f ⊆ s → ∃ (g : ℕ ↪o ℕ), monotone (f ∘ g) :=
partially_well_ordered_on_iff_exists_monotone_subseq
lemma is_pwo.prod (hs : s.is_pwo)
(ht : t.is_pwo) :
(s.prod t).is_pwo :=
begin
classical,
rw is_pwo_iff_exists_monotone_subseq at *,
intros f hf,
obtain ⟨g1, h1⟩ := hs (prod.fst ∘ f) _,
swap,
{ rw [range_comp, image_subset_iff],
refine subset.trans hf _,
rintros ⟨x1, x2⟩ hx,
simp only [mem_preimage, hx.1] },
obtain ⟨g2, h2⟩ := ht (prod.snd ∘ f ∘ g1) _,
refine ⟨g2.trans g1, λ m n mn, _⟩,
swap,
{ rw [range_comp, image_subset_iff],
refine subset.trans (range_comp_subset_range _ _) (subset.trans hf _),
rintros ⟨x1, x2⟩ hx,
simp only [mem_preimage, hx.2] },
simp only [rel_embedding.coe_trans, function.comp_app],
exact ⟨h1 (g2.le_iff_le.2 mn), h2 mn⟩,
end
theorem is_pwo.image_of_monotone {β : Type*} [partial_order β]
(hs : s.is_pwo) {f : α → β} (hf : monotone f) :
is_pwo (f '' s) :=
hs.image_of_monotone hf
theorem is_pwo.union (hs : is_pwo s) (ht : is_pwo t) : is_pwo (s ∪ t) :=
begin
classical,
rw [is_pwo_iff_exists_monotone_subseq] at *,
rintros f fst,
have h : infinite (f ⁻¹' s) ∨ infinite (f ⁻¹' t),
{ have h : infinite (univ : set ℕ) := infinite_univ,
have hpre : f ⁻¹' (s ∪ t) = set.univ,
{ rw [← image_univ, image_subset_iff, univ_subset_iff] at fst,
exact fst },
rw preimage_union at hpre,
rw ← hpre at h,
rw [infinite, infinite],
rw infinite at h,
contrapose! h,
exact finite.union h.1 h.2, },
rw [← infinite_coe_iff, ← infinite_coe_iff] at h,
cases h with inf inf; haveI := inf,
{ obtain ⟨g, hg⟩ := hs (f ∘ (nat.order_embedding_of_set (f ⁻¹' s))) _,
{ rw [function.comp.assoc, ← rel_embedding.coe_trans] at hg,
exact ⟨_, hg⟩ },
rw [range_comp, image_subset_iff],
simp },
{ obtain ⟨g, hg⟩ := ht (f ∘ (nat.order_embedding_of_set (f ⁻¹' t))) _,
{ rw [function.comp.assoc, ← rel_embedding.coe_trans] at hg,
exact ⟨_, hg⟩ },
rw [range_comp, image_subset_iff],
simp }
end
end partial_order
theorem is_wf.is_pwo [linear_order α] {s : set α}
(hs : s.is_wf) : s.is_pwo :=
λ f hf, begin
rw [is_wf, well_founded_on_iff] at hs,
have hrange : (range f).nonempty := ⟨f 0, mem_range_self 0⟩,
let a := hs.min (range f) hrange,
obtain ⟨m, hm⟩ := hs.min_mem (range f) hrange,
refine ⟨m, m.succ, m.lt_succ_self, le_of_not_lt (λ con, _)⟩,
rw hm at con,
apply hs.not_lt_min (range f) hrange (mem_range_self m.succ)
⟨con, hf (mem_range_self m.succ), hf _⟩,
rw ← hm,
apply mem_range_self,
end
theorem is_wf_iff_is_pwo [linear_order α] {s : set α} :
s.is_wf ↔ s.is_pwo :=
⟨is_wf.is_pwo, is_pwo.is_wf⟩
end set
namespace finset
@[simp]
theorem partially_well_ordered_on {r : α → α → Prop} [is_refl α r] {f : finset α} :
set.partially_well_ordered_on (↑f : set α) r :=
begin
intros g hg,
by_cases hinj : function.injective g,
{ exact (set.infinite_of_injective_forall_mem hinj (set.range_subset_iff.1 hg)
f.finite_to_set).elim },
{ rw [function.injective] at hinj,
push_neg at hinj,
obtain ⟨m, n, gmgn, hne⟩ := hinj,
cases lt_or_gt_of_ne hne with hlt hlt;
{ refine ⟨_, _, hlt, _⟩,
rw gmgn,
exact refl_of r _, } }
end
@[simp]
theorem is_pwo [partial_order α] {f : finset α} :
set.is_pwo (↑f : set α) :=
finset.partially_well_ordered_on
@[simp]
theorem well_founded_on {r : α → α → Prop} [is_strict_order α r] {f : finset α} :
set.well_founded_on (↑f : set α) r :=
begin
rw [set.well_founded_on_iff_no_descending_seq],
intros g con,
apply set.infinite_of_injective_forall_mem g.injective (set.range_subset_iff.1 con),
exact f.finite_to_set,
end
@[simp]
theorem is_wf [partial_order α] {f : finset α} : set.is_wf (↑f : set α) :=
finset.is_pwo.is_wf
end finset
namespace set
variables [partial_order α] {s : set α} {a : α}
theorem finite.is_pwo (h : s.finite) : s.is_pwo :=
begin
rw ← h.coe_to_finset,
exact finset.is_pwo,
end
@[simp]
theorem fintype.is_pwo [fintype α] : s.is_pwo := (finite.of_fintype s).is_pwo
@[simp]
theorem is_pwo_empty : is_pwo (∅ : set α) :=
finite_empty.is_pwo
@[simp]
theorem is_pwo_singleton (a) : is_pwo ({a} : set α) :=
(finite_singleton a).is_pwo
theorem is_pwo.insert (a) (hs : is_pwo s) : is_pwo (insert a s) :=
by { rw ← union_singleton, exact hs.union (is_pwo_singleton a) }
/-- `is_wf.min` returns a minimal element of a nonempty well-founded set. -/
noncomputable def is_wf.min (hs : is_wf s) (hn : s.nonempty) : α :=
hs.min univ (nonempty_iff_univ_nonempty.1 hn.to_subtype)
lemma is_wf.min_mem (hs : is_wf s) (hn : s.nonempty) : hs.min hn ∈ s :=
(well_founded.min hs univ (nonempty_iff_univ_nonempty.1 hn.to_subtype)).2
lemma is_wf.not_lt_min (hs : is_wf s) (hn : s.nonempty) (ha : a ∈ s) : ¬ a < hs.min hn :=
hs.not_lt_min univ (nonempty_iff_univ_nonempty.1 hn.to_subtype) (mem_univ (⟨a, ha⟩ : s))
@[simp]
lemma is_wf_min_singleton (a) {hs : is_wf ({a} : set α)} {hn : ({a} : set α).nonempty} :
hs.min hn = a :=
eq_of_mem_singleton (is_wf.min_mem hs hn)
end set
@[simp]
theorem finset.is_wf_sup {ι : Type*} [partial_order α] (f : finset ι) (g : ι → set α)
(hf : ∀ i : ι, i ∈ f → (g i).is_wf) : (f.sup g).is_wf :=
begin
classical,
revert hf,
apply f.induction_on,
{ intro h,
simp [set.is_pwo_empty.is_wf], },
{ intros s f sf hf hsf,
rw finset.sup_insert,
exact (hsf s (finset.mem_insert_self _ _)).union (hf (λ s' s'f, hsf _
(finset.mem_insert_of_mem s'f))) }
end
@[simp]
theorem finset.is_pwo_sup {ι : Type*} [partial_order α] (f : finset ι) (g : ι → set α)
(hf : ∀ i : ι, i ∈ f → (g i).is_pwo) : (f.sup g).is_pwo :=
begin
classical,
revert hf,
apply f.induction_on,
{ intro h,
simp [set.is_pwo_empty.is_wf], },
{ intros s f sf hf hsf,
rw finset.sup_insert,
exact (hsf s (finset.mem_insert_self _ _)).union (hf (λ s' s'f, hsf _
(finset.mem_insert_of_mem s'f))) }
end
namespace set
variables [linear_order α] {s t : set α} {a : α}
lemma is_wf.min_le
(hs : s.is_wf) (hn : s.nonempty) (ha : a ∈ s) : hs.min hn ≤ a :=
le_of_not_lt (hs.not_lt_min hn ha)
lemma is_wf.le_min_iff
(hs : s.is_wf) (hn : s.nonempty) :
a ≤ hs.min hn ↔ ∀ b, b ∈ s → a ≤ b :=
⟨λ ha b hb, le_trans ha (hs.min_le hn hb), λ h, h _ (hs.min_mem _)⟩
lemma is_wf.min_le_min_of_subset
{hs : s.is_wf} {hsn : s.nonempty} {ht : t.is_wf} {htn : t.nonempty} (hst : s ⊆ t) :
ht.min htn ≤ hs.min hsn :=
(is_wf.le_min_iff _ _).2 (λ b hb, ht.min_le htn (hst hb))
lemma is_wf.min_union (hs : s.is_wf) (hsn : s.nonempty) (ht : t.is_wf) (htn : t.nonempty) :
(hs.union ht).min (union_nonempty.2 (or.intro_left _ hsn)) = min (hs.min hsn) (ht.min htn) :=
begin
refine le_antisymm (le_min (is_wf.min_le_min_of_subset (subset_union_left _ _))
(is_wf.min_le_min_of_subset (subset_union_right _ _))) _,
rw min_le_iff,
exact ((mem_union _ _ _).1 ((hs.union ht).min_mem
(union_nonempty.2 (or.intro_left _ hsn)))).imp (hs.min_le _) (ht.min_le _),
end
end set
namespace set
variables {s : set α} {t : set α}
@[to_additive]
theorem is_pwo.mul [ordered_cancel_comm_monoid α] (hs : s.is_pwo) (ht : t.is_pwo) :
is_pwo (s * t) :=
begin
rw ← image_mul_prod,
exact (is_pwo.prod hs ht).image_of_monotone (λ _ _ h, mul_le_mul' h.1 h.2),
end
variable [linear_ordered_cancel_comm_monoid α]
@[to_additive]
theorem is_wf.mul (hs : s.is_wf) (ht : t.is_wf) : is_wf (s * t) :=
(hs.is_pwo.mul ht.is_pwo).is_wf
@[to_additive]
theorem is_wf.min_mul (hs : s.is_wf) (ht : t.is_wf) (hsn : s.nonempty) (htn : t.nonempty) :
(hs.mul ht).min (hsn.mul htn) = hs.min hsn * ht.min htn :=
begin
refine le_antisymm (is_wf.min_le _ _ (mem_mul.2 ⟨_, _, hs.min_mem _, ht.min_mem _, rfl⟩)) _,
rw is_wf.le_min_iff,
rintros _ ⟨x, y, hx, hy, rfl⟩,
exact mul_le_mul' (hs.min_le _ hx) (ht.min_le _ hy),
end
end set
namespace set
/-- `set.mul_antidiagonal s t a` is the set of all pairs of an element in `s` and an element in `t`
that multiply to `a`. -/
@[to_additive "`set.add_antidiagonal s t a` is the set of all pairs of an element in `s`
and an element in `t` that add to `a`."]
def mul_antidiagonal [monoid α] (s t : set α) (a : α) : set (α × α) :=
{ x | x.1 * x.2 = a ∧ x.1 ∈ s ∧ x.2 ∈ t }
namespace mul_antidiagonal
@[simp, to_additive]
lemma mem_mul_antidiagonal [monoid α] {s t : set α} {a : α} {x : α × α} :
x ∈ mul_antidiagonal s t a ↔ x.1 * x.2 = a ∧ x.1 ∈ s ∧ x.2 ∈ t := iff.refl _
section cancel_comm_monoid
variables [cancel_comm_monoid α] {s t : set α} {a : α}
@[to_additive]
lemma fst_eq_fst_iff_snd_eq_snd {x y : (mul_antidiagonal s t a)} :
(x : α × α).fst = (y : α × α).fst ↔ (x : α × α).snd = (y : α × α).snd :=
⟨λ h, begin
have hx := x.2.1,
rw [subtype.val_eq_coe, h] at hx,
apply mul_left_cancel (hx.trans y.2.1.symm),
end, λ h, begin
have hx := x.2.1,
rw [subtype.val_eq_coe, h] at hx,
apply mul_right_cancel (hx.trans y.2.1.symm),
end⟩
@[to_additive]
lemma eq_of_fst_eq_fst {x y : (mul_antidiagonal s t a)}
(h : (x : α × α).fst = (y : α × α).fst) : x = y :=
subtype.ext (prod.ext h (mul_antidiagonal.fst_eq_fst_iff_snd_eq_snd.1 h))
@[to_additive]
lemma eq_of_snd_eq_snd {x y : (mul_antidiagonal s t a)}
(h : (x : α × α).snd = (y : α × α).snd) : x = y :=
subtype.ext (prod.ext (mul_antidiagonal.fst_eq_fst_iff_snd_eq_snd.2 h) h)
end cancel_comm_monoid
section ordered_cancel_comm_monoid
variables [ordered_cancel_comm_monoid α] (s t : set α) (a : α)
@[to_additive]
lemma eq_of_fst_le_fst_of_snd_le_snd {x y : (mul_antidiagonal s t a)}
(h1 : (x : α × α).fst ≤ (y : α × α).fst) (h2 : (x : α × α).snd ≤ (y : α × α).snd ) :
x = y :=
begin
apply eq_of_fst_eq_fst,
cases eq_or_lt_of_le h1 with heq hlt,
{ exact heq },
exfalso,
exact ne_of_lt (mul_lt_mul_of_lt_of_le hlt h2)
((mem_mul_antidiagonal.1 x.2).1.trans (mem_mul_antidiagonal.1 y.2).1.symm)
end
variables {s} {t}
@[to_additive]
theorem finite_of_is_pwo (hs : s.is_pwo) (ht : t.is_pwo) (a) :
(mul_antidiagonal s t a).finite :=
begin
by_contra h,
rw [← set.infinite] at h,
have h1 : (mul_antidiagonal s t a).partially_well_ordered_on (prod.fst ⁻¹'o (≤)),
{ intros f hf,
refine hs (prod.fst ∘ f) _,
rw range_comp,
rintros _ ⟨⟨x, y⟩, hxy, rfl⟩,
exact (mem_mul_antidiagonal.1 (hf hxy)).2.1 },
have h2 : (mul_antidiagonal s t a).partially_well_ordered_on (prod.snd ⁻¹'o (≤)),
{ intros f hf,
refine ht (prod.snd ∘ f) _,
rw range_comp,
rintros _ ⟨⟨x, y⟩, hxy, rfl⟩,
exact (mem_mul_antidiagonal.1 (hf hxy)).2.2 },
obtain ⟨g, hg⟩ := h1.exists_monotone_subseq (λ x, h.nat_embedding _ x) _,
swap, { rintro _ ⟨k, rfl⟩,
exact ((infinite.nat_embedding (s.mul_antidiagonal t a) h) _).2 },
obtain ⟨m, n, mn, h2'⟩ := h2 (λ x, (h.nat_embedding _) (g x)) _,
swap, { rintro _ ⟨k, rfl⟩,
exact ((infinite.nat_embedding (s.mul_antidiagonal t a) h) _).2, },
apply ne_of_lt mn (g.injective ((h.nat_embedding _).injective _)),
exact eq_of_fst_le_fst_of_snd_le_snd _ _ _ (hg _ _ (le_of_lt mn)) h2',
end
end ordered_cancel_comm_monoid
@[to_additive]
theorem finite_of_is_wf [linear_ordered_cancel_comm_monoid α] {s t : set α}
(hs : s.is_wf) (ht : t.is_wf) (a) :
(mul_antidiagonal s t a).finite :=
finite_of_is_pwo hs.is_pwo ht.is_pwo a
end mul_antidiagonal
end set
namespace finset
variables [ordered_cancel_comm_monoid α]
variables {s t : set α} (hs : s.is_pwo) (ht : t.is_pwo) (a : α)
/-- `finset.mul_antidiagonal_of_is_wf hs ht a` is the set of all pairs of an element in
`s` and an element in `t` that multiply to `a`, but its construction requires proofs
`hs` and `ht` that `s` and `t` are well-ordered. -/
@[to_additive "`finset.add_antidiagonal_of_is_wf hs ht a` is the set of all pairs of an element in
`s` and an element in `t` that add to `a`, but its construction requires proofs
`hs` and `ht` that `s` and `t` are well-ordered."]
noncomputable def mul_antidiagonal : finset (α × α) :=
(set.mul_antidiagonal.finite_of_is_pwo hs ht a).to_finset
variables {hs} {ht} {u : set α} {hu : u.is_pwo} {a} {x : α × α}
@[simp, to_additive]
lemma mem_mul_antidiagonal :
x ∈ mul_antidiagonal hs ht a ↔ x.1 * x.2 = a ∧ x.1 ∈ s ∧ x.2 ∈ t :=
by simp [mul_antidiagonal]
@[to_additive]
lemma mul_antidiagonal_mono_left (hus : u ⊆ s) :
(finset.mul_antidiagonal hu ht a) ⊆ (finset.mul_antidiagonal hs ht a) :=
λ x hx, begin
rw mem_mul_antidiagonal at *,
exact ⟨hx.1, hus hx.2.1, hx.2.2⟩,
end
@[to_additive]
lemma mul_antidiagonal_mono_right (hut : u ⊆ t) :
(finset.mul_antidiagonal hs hu a) ⊆ (finset.mul_antidiagonal hs ht a) :=
λ x hx, begin
rw mem_mul_antidiagonal at *,
exact ⟨hx.1, hx.2.1, hut hx.2.2⟩,
end
@[to_additive]
lemma support_mul_antidiagonal_subset_mul :
{ a : α | (mul_antidiagonal hs ht a).nonempty } ⊆ s * t :=
(λ x ⟨⟨a1, a2⟩, ha⟩, begin
obtain ⟨hmul, h1, h2⟩ := mem_mul_antidiagonal.1 ha,
exact ⟨a1, a2, h1, h2, hmul⟩,
end)
@[to_additive]
theorem is_pwo_support_mul_antidiagonal :
{ a : α | (mul_antidiagonal hs ht a).nonempty }.is_pwo :=
(hs.mul ht).mono support_mul_antidiagonal_subset_mul
@[to_additive]
theorem mul_antidiagonal_min_mul_min {α} [linear_ordered_cancel_comm_monoid α] {s t : set α}
(hs : s.is_wf) (ht : t.is_wf)
(hns : s.nonempty) (hnt : t.nonempty) :
mul_antidiagonal hs.is_pwo ht.is_pwo ((hs.min hns) * (ht.min hnt)) =
{(hs.min hns, ht.min hnt)} :=
begin
ext ⟨a1, a2⟩,
rw [mem_mul_antidiagonal, finset.mem_singleton, prod.ext_iff],
split,
{ rintro ⟨hast, has, hat⟩,
cases eq_or_lt_of_le (hs.min_le hns has) with heq hlt,
{ refine ⟨heq.symm, _⟩,
rw heq at hast,
exact mul_left_cancel hast },
{ contrapose hast,
exact ne_of_gt (mul_lt_mul_of_lt_of_le hlt (ht.min_le hnt hat)) } },
{ rintro ⟨ha1, ha2⟩,
rw [ha1, ha2],
exact ⟨rfl, hs.min_mem _, ht.min_mem _⟩ }
end
end finset
lemma well_founded.is_wf [has_lt α] (h : well_founded ((<) : α → α → Prop)) (s : set α) :
s.is_wf :=
(set.is_wf_univ_iff.2 h).mono (set.subset_univ s)
|
abstract type AbstractStep <: AbstractPolicy end
# function stepsize(step::AbstractStep,k::Integer,fval::Real,x::AbstractVector,g::AbstractVector)
# error("No defined step function for step policy ", typeof(step))
# end
function stepsize() end
function step_wrapper(klocal::Cint, kglobal::Cint, fval::Cdouble, xbegin::Ptr{Cdouble},
xend::Ptr{Cdouble}, gbegin::Ptr{Cdouble},
step_data::Ptr{Cvoid})
step_policy = unsafe_pointer_to_objref(step_data)::AbstractStep
ptrdiff = Int(xend - xbegin)
N = divrem(ptrdiff, sizeof(Cdouble))[1]
x = unsafe_wrap(Array, xbegin, N)
g = unsafe_wrap(Array, gbegin, N)
return stepsize(step_policy, klocal, kglobal, fval, x, g)
end
module Step
using LinearAlgebra
using Parameters
using POLO: AbstractStep, AbstractPolicyParameters
import POLO: initialize!, stepsize
function initialize!(policy::AbstractStep,x₀::Vector{Float64})
nothing
end
include("constant.jl")
include("decreasing.jl")
include("bb.jl")
end
|
{-# OPTIONS --safe #-}
module Definition.Typed.Consequences.RelevanceUnicity where
open import Definition.Untyped hiding (U≢ℕ; U≢Π; U≢ne; ℕ≢Π; ℕ≢ne; Π≢ne; U≢Empty; ℕ≢Empty; Empty≢Π; Empty≢ne)
open import Definition.Untyped.Properties using (subst-Univ-either)
open import Definition.Typed
open import Definition.Typed.Properties
open import Definition.Typed.Weakening
open import Definition.Typed.Consequences.Equality
import Definition.Typed.Consequences.Inequality as Ineq
open import Definition.Typed.Consequences.Inversion
open import Definition.Typed.Consequences.Injectivity
open import Definition.Typed.Consequences.NeTypeEq
open import Definition.Typed.Consequences.Syntactic
open import Definition.Typed.Consequences.PiNorm
open import Definition.Typed.Consequences.Substitution
open import Tools.Product
open import Tools.Empty
open import Tools.Sum using (_⊎_; inj₁; inj₂)
import Tools.PropositionalEquality as PE
ℕ-relevant-term : ∀ {Γ A r} → Γ ⊢ ℕ ∷ A ^ r → Whnf A → A PE.≡ Univ ! ⁰
ℕ-relevant-term [ℕ] whnfA = let [[N]] , e = inversion-ℕ [ℕ]
in U≡A-whnf (sym (PE.subst (λ r → _ ⊢ _ ≡ _ ^ r) e [[N]])) whnfA
ℕ-relevant : ∀ {Γ r} → Γ ⊢ ℕ ^ r → r PE.≡ [ ! , ι ⁰ ]
ℕ-relevant (univ [ℕ]) = let er , el = Univ-PE-injectivity (ℕ-relevant-term [ℕ] Uₙ)
in PE.cong₂ (λ x y → [ x , ι y ]) er el
Empty-irrelevant-term : ∀ {Γ A lEmpty r} → Γ ⊢ Empty lEmpty ∷ A ^ r → Whnf A → A PE.≡ SProp lEmpty
Empty-irrelevant-term [Empty] whnfA = let [[Empty]] , e = inversion-Empty [Empty]
in U≡A-whnf (sym (PE.subst (λ r → _ ⊢ _ ≡ _ ^ r) e [[Empty]])) whnfA
Empty-irrelevant : ∀ {Γ lEmpty r} → Γ ⊢ Empty lEmpty ^ r → r PE.≡ [ % , ι lEmpty ]
Empty-irrelevant (univ [Empty]) = let er , el = Univ-PE-injectivity (Empty-irrelevant-term [Empty] Uₙ)
in PE.cong₂ (λ x y → [ x , ι y ]) er el
Univ-relevant-term : ∀ {Γ A rU lU r} → Γ ⊢ Univ rU lU ∷ A ^ r → Whnf A → A PE.≡ U ¹ × lU PE.≡ ⁰
Univ-relevant-term [U] whnfA = U≡A-whnf (sym (proj₁ (inversion-U [U]))) whnfA , proj₂ (proj₂ (inversion-U [U]))
Univ-relevant : ∀ {Γ rU lU r} → Γ ⊢ Univ rU lU ^ r → r PE.≡ [ ! , next lU ]
Univ-relevant (Uⱼ _) = PE.refl
Univ-relevant (univ [U]) = let er , el = Univ-PE-injectivity (proj₁ (Univ-relevant-term [U] Uₙ))
in PE.cong₂ (λ x y → [ x , y ]) er
(PE.trans (PE.cong ι el) (PE.cong next (PE.sym (proj₂ (Univ-relevant-term [U] Uₙ)))))
mutual
Univ-uniq′ : ∀ {Γ A T₁ T₂ r₁ r₂ l₁ l₁' l₂ l₂'} → Γ ⊢ T₁ ≡ Univ r₁ l₁ ^ [ ! , l₁' ] → Γ ⊢ T₂ ≡ Univ r₂ l₂ ^ [ ! , l₂' ]
→ next l₁ PE.≡ l₁' → next l₂ PE.≡ l₂'
→ ΠNorm A
→ Γ ⊢ A ∷ T₁ ^ [ ! , l₁' ] → Γ ⊢ A ∷ T₂ ^ [ ! , l₂' ] → r₁ PE.≡ r₂ × l₁' PE.≡ l₂'
Univ-uniq′ e₁ e₂ el₁ el₂ w (univ 0<1 x₁) (univ 0<1 x₃) =
let er₁ , _ = Uinjectivity e₁
er₂ , _ = Uinjectivity e₂
in PE.trans (PE.sym er₁) er₂ , PE.refl
Univ-uniq′ e₁ e₂ el₁ PE.refl w (ℕⱼ x) y =
let e₁′ , el₁′ = Uinjectivity e₁
e₂′ , el₂′ = Uinjectivity (trans (sym e₂) (proj₁ (inversion-ℕ y)) )
in PE.sym (PE.trans e₂′ e₁′) , PE.cong next (PE.sym el₂′)
Univ-uniq′ e₁ e₂ el₁ el₂ w (Emptyⱼ x) y =
let e₁′ , el₁′ = Uinjectivity e₁
e₂′ , el₂′ = Uinjectivity (trans (sym e₂) (proj₁ (inversion-Empty y)) )
in PE.sym (PE.trans e₂′ e₁′) , PE.trans (PE.cong next (PE.sym el₂′)) el₂
Univ-uniq′ e₁ e₂ el₁ el₂ w (Πⱼ a ▹ b ▹ x ▹ x₁) (Πⱼ a' ▹ b' ▹ y ▹ y₁) =
let er₁ , _ = Uinjectivity e₁
er₂ , _ = Uinjectivity e₂
res = Univ-uniq′ (refl (Ugenⱼ (wfTerm x₁))) (refl (Ugenⱼ (wfTerm x₁)))
PE.refl PE.refl (ΠNorm-Π w) x₁ y₁
in PE.trans (PE.sym er₁) (PE.trans (proj₁ res) er₂) , PE.refl
Univ-uniq′ e₁ e₂ el₁ el₂ (∃ₙ w) (∃ⱼ x ▹ x₁) (∃ⱼ y ▹ y₁) =
let er₁ , _ = Uinjectivity e₁
er₂ , _ = Uinjectivity e₂
_ , el = Univ-uniq w x y
in PE.trans (PE.sym er₁) er₂ , PE.cong next el
Univ-uniq′ e₁ e₂ el₁ el₂ w (var _ x) (var _ y) =
let T≡T , e = varTypeEq′ x y
_ , el = typelevel-injectivity e
⊢T≡T = PE.subst (λ T → _ ⊢ _ ≡ T ^ _) T≡T (refl (proj₁ (syntacticEq e₁)))
in proj₁ (Uinjectivity (trans (trans (sym e₁) ⊢T≡T) (PE.subst (λ lx → _ ⊢ _ ≡ _ ^ [ _ , lx ]) (PE.sym el) e₂))) , el
Univ-uniq′ e₁ e₂ el₁ el₂ (ne ()) (lamⱼ x x₁ x₂ X) y
Univ-uniq′ e₁ e₂ el₁ el₂ (ne (∘ₙ n)) (_∘ⱼ_ {G = G} x x₁) (_∘ⱼ_ {G = G₁} y y₁) =
let F≡F , rF≡rF , lF≡lF , lG≡lG , G≡G = injectivity (proj₂ (neTypeEq n x y))
r≡r , _ = Uinjectivity (trans (sym e₁) (trans (substitutionEq G≡G (substRefl (singleSubst x₁)) (wfEq F≡F))
(PE.subst (λ lx → _ ⊢ _ ≡ _ ^ [ _ , ι lx ]) (PE.sym lG≡lG) e₂)))
in r≡r , PE.cong ι lG≡lG
Univ-uniq′ e₁ e₂ el₁ el₂ (ne ()) (zeroⱼ x) y
Univ-uniq′ e₁ e₂ el₁ el₂ (ne ()) (sucⱼ X) y
Univ-uniq′ e₁ e₂ el₁ el₂ w (natrecⱼ x x₁ x₂ x₃) (natrecⱼ x₄ y y₁ y₂) = proj₁ (Uinjectivity (trans (sym e₁) e₂)) , PE.refl
Univ-uniq′ e₁ e₂ el₁ el₂ w (Emptyrecⱼ x x₁) (Emptyrecⱼ y y₁) = proj₁ (Uinjectivity (trans (sym e₁) e₂)) , PE.refl
Univ-uniq′ e₁ e₂ el₁ el₂ (ne (Idₙ x)) (Idⱼ X X₁ X₂) (Idⱼ {l = ll} Y Y₁ Y₂) =
let _ , el = Univ-uniq (ne x) X Y
er₁ , _ = Uinjectivity e₁
er₂ , _ = Uinjectivity e₂
in PE.trans (PE.sym er₁) er₂ , PE.cong next el
Univ-uniq′ e₁ e₂ el₁ el₂ (ne (Idℕₙ x)) (Idⱼ X X₁ X₂) (Idⱼ {l = ll} Y Y₁ Y₂) =
let _ , el = Univ-uniq ℕₙ X Y
er₁ , _ = Uinjectivity e₁
er₂ , _ = Uinjectivity e₂
in PE.trans (PE.sym er₁) er₂ , PE.cong next el
Univ-uniq′ e₁ e₂ el₁ el₂ (ne (Idℕ0ₙ x)) (Idⱼ X X₁ X₂) (Idⱼ {l = ll} Y Y₁ Y₂) =
let _ , el = Univ-uniq ℕₙ X Y
er₁ , _ = Uinjectivity e₁
er₂ , _ = Uinjectivity e₂
in PE.trans (PE.sym er₁) er₂ , PE.cong next el
Univ-uniq′ e₁ e₂ el₁ el₂ (ne (IdℕSₙ x)) (Idⱼ X X₁ X₂) (Idⱼ {l = ll} Y Y₁ Y₂) =
let _ , el = Univ-uniq ℕₙ X Y
er₁ , _ = Uinjectivity e₁
er₂ , _ = Uinjectivity e₂
in PE.trans (PE.sym er₁) er₂ , PE.cong next el
Univ-uniq′ e₁ e₂ el₁ el₂ (ne (IdUₙ x)) (Idⱼ X X₁ X₂) (Idⱼ {l = ll} Y Y₁ Y₂) =
let _ , el = Univ-uniq Uₙ X Y
er₁ , _ = Uinjectivity e₁
er₂ , _ = Uinjectivity e₂
in PE.trans (PE.sym er₁) er₂ , PE.cong next el
Univ-uniq′ e₁ e₂ el₁ el₂ (ne (IdUℕₙ x)) (Idⱼ X X₁ X₂) (Idⱼ {l = ll} Y Y₁ Y₂) =
let _ , el = Univ-uniq Uₙ X Y
er₁ , _ = Uinjectivity e₁
er₂ , _ = Uinjectivity e₂
in PE.trans (PE.sym er₁) er₂ , PE.cong next el
Univ-uniq′ e₁ e₂ el₁ el₂ (ne (IdUΠₙ x)) (Idⱼ X X₁ X₂) (Idⱼ {l = ll} Y Y₁ Y₂) =
let _ , el = Univ-uniq Uₙ X Y
er₁ , _ = Uinjectivity e₁
er₂ , _ = Uinjectivity e₂
in PE.trans (PE.sym er₁) er₂ , PE.cong next el
Univ-uniq′ e₁ e₂ el₁ el₂ w (castⱼ X X₁ X₂ X₃) (castⱼ y y₁ y₂ y₃) = proj₁ (Uinjectivity (trans (sym e₁) e₂)) , PE.refl
Univ-uniq′ e₁ e₂ el₁ el₂ w (conv x x₁) y = Univ-uniq′ (trans x₁ e₁) e₂ el₁ el₂ w x y
Univ-uniq′ e₁ e₂ el₁ el₂ w x (conv y y₁) = Univ-uniq′ e₁ (trans y₁ e₂) el₁ el₂ w x y
Univ-uniq : ∀ {Γ A r₁ r₂ l₁ l₂} → ΠNorm A
→ Γ ⊢ A ∷ Univ r₁ l₁ ^ [ ! , next l₁ ] → Γ ⊢ A ∷ Univ r₂ l₂ ^ [ ! , next l₂ ] → r₁ PE.≡ r₂ × l₁ PE.≡ l₂
Univ-uniq n ⊢A₁ ⊢A₂ =
let ⊢Γ = wfTerm ⊢A₁
er , el = Univ-uniq′ (refl (Ugenⱼ ⊢Γ)) (refl (Ugenⱼ ⊢Γ)) PE.refl PE.refl n ⊢A₁ ⊢A₂
in er , next-inj el
relevance-unicity′ : ∀ {Γ A r₁ r₂ l₁ l₂} → ΠNorm A → Γ ⊢ A ^ [ r₁ , l₁ ] → Γ ⊢ A ^ [ r₂ , l₂ ] → r₁ PE.≡ r₂ × l₁ PE.≡ l₂
relevance-unicity′ n (Uⱼ x) (Uⱼ x₁) = PE.refl , PE.refl
relevance-unicity′ n (Uⱼ x) (univ x₁) = let _ , _ , ¹≡⁰ = inversion-U x₁ in ⊥-elim (⁰≢¹ (PE.sym ¹≡⁰))
relevance-unicity′ n (univ x) (Uⱼ x₁) = let _ , _ , ¹≡⁰ = inversion-U x in ⊥-elim (⁰≢¹ (PE.sym ¹≡⁰))
relevance-unicity′ n (univ x) (univ x₁) = let er , el = Univ-uniq n x x₁ in er , PE.cong ι el
relevance-unicity : ∀ {Γ A r₁ r₂ l₁ l₂} → Γ ⊢ A ^ [ r₁ , l₁ ] → Γ ⊢ A ^ [ r₂ , l₂ ] → r₁ PE.≡ r₂ × l₁ PE.≡ l₂
relevance-unicity ⊢A₁ ⊢A₂ with doΠNorm ⊢A₁
... | _ with doΠNorm ⊢A₂
relevance-unicity ⊢A₁ ⊢A₂ | B , nB , ⊢B , rB | C , nC , ⊢C , rC =
let e = detΠNorm* nB nC rB rC
in relevance-unicity′ nC (PE.subst _ e ⊢B) ⊢C
-- inequalities at any relevance
U≢ℕ : ∀ {r r′ l l′ Γ} → Γ ⊢ Univ r l ≡ ℕ ^ [ r′ , l′ ] → ⊥
U≢ℕ U≡ℕ = Ineq.U≢ℕ! (PE.subst (λ rx → _ ⊢ _ ≡ _ ^ [ rx , _ ])
(proj₁ (relevance-unicity (proj₂ (syntacticEq U≡ℕ))
(univ (ℕⱼ (wfEq U≡ℕ)))))
U≡ℕ)
U≢Π : ∀ {rU lU F rF G lF lG lΠ r Γ} → Γ ⊢ Univ rU lU ≡ Π F ^ rF ° lF ▹ G ° lG ° lΠ ^ [ r , ι lΠ ] → ⊥
U≢Π U≡Π =
let r≡! , _ = relevance-unicity (proj₁ (syntacticEq U≡Π)) (Ugenⱼ (wfEq U≡Π))
in Ineq.U≢Π! (PE.subst (λ rx → _ ⊢ _ ≡ _ ^ [ rx , _ ]) r≡! U≡Π)
U≢ne : ∀ {rU lU r l K Γ} → Neutral K → Γ ⊢ Univ rU lU ≡ K ^ [ r , ι l ] → ⊥
U≢ne neK U≡K =
let r≡! , _ = relevance-unicity (proj₁ (syntacticEq U≡K)) (Ugenⱼ (wfEq U≡K))
in Ineq.U≢ne! neK (PE.subst (λ rx → _ ⊢ _ ≡ _ ^ [ rx , _ ]) r≡! U≡K)
ℕ≢Π : ∀ {F rF G lF lG r Γ} → Γ ⊢ ℕ ≡ Π F ^ rF ° lF ▹ G ° lG ° ⁰ ^ [ r , ι ⁰ ] → ⊥
ℕ≢Π ℕ≡Π =
let r≡! , _ = relevance-unicity (proj₁ (syntacticEq ℕ≡Π)) (univ (ℕⱼ (wfEq ℕ≡Π)))
in Ineq.ℕ≢Π! (PE.subst (λ rx → _ ⊢ _ ≡ _ ^ [ rx , _ ]) r≡! ℕ≡Π)
Empty≢Π : ∀ {F rF G lF lG lΠ r Γ} → Γ ⊢ Empty lΠ ≡ Π F ^ rF ° lF ▹ G ° lG ° lΠ ^ [ r , ι lΠ ] → ⊥
Empty≢Π Empty≡Π =
let r≡% , _ = relevance-unicity (proj₁ (syntacticEq Empty≡Π)) (univ (Emptyⱼ (wfEq Empty≡Π)))
in Ineq.Empty≢Π% (PE.subst (λ rx → _ ⊢ _ ≡ _ ^ [ rx , _ ]) r≡% Empty≡Π)
ℕ≢ne : ∀ {K r Γ} → Neutral K → Γ ⊢ ℕ ≡ K ^ [ r , ι ⁰ ] → ⊥
ℕ≢ne neK ℕ≡K =
let r≡! , _ = relevance-unicity (proj₁ (syntacticEq ℕ≡K)) (univ (ℕⱼ (wfEq ℕ≡K)))
in Ineq.ℕ≢ne! neK (PE.subst (λ rx → _ ⊢ _ ≡ _ ^ [ rx , _ ]) r≡! ℕ≡K)
Empty≢ne : ∀ {K r l Γ} → Neutral K → Γ ⊢ Empty l ≡ K ^ [ r , ι l ] → ⊥
Empty≢ne neK Empty≡K =
let r≡% , _ = relevance-unicity (proj₁ (syntacticEq Empty≡K)) (univ (Emptyⱼ (wfEq Empty≡K)))
in Ineq.Empty≢ne% neK (PE.subst (λ rx → _ ⊢ _ ≡ _ ^ [ rx , _ ]) r≡% Empty≡K)
-- U != Empty is given easily by relevances
U≢Empty : ∀ {Γ rU lU lEmpty r′} → Γ ⊢ Univ rU lU ≡ Empty lEmpty ^ r′ → ⊥
U≢Empty U≡Empty =
let ⊢U , ⊢Empty = syntacticEq U≡Empty
e₁ , _ = relevance-unicity ⊢U (Ugenⱼ (wfEq U≡Empty))
e₂ , _ = relevance-unicity ⊢Empty (univ (Emptyⱼ (wfEq U≡Empty)))
in !≢% (PE.trans (PE.sym e₁) e₂)
-- ℕ and Empty also by relevance
ℕ≢Empty : ∀ {Γ r l} → Γ ⊢ ℕ ≡ Empty l ^ r → ⊥
ℕ≢Empty ℕ≡Empty =
let ⊢ℕ , ⊢Empty = syntacticEq ℕ≡Empty
e₁ , _ = relevance-unicity ⊢ℕ (univ (ℕⱼ (wfEq ℕ≡Empty)))
e₂ , _ = relevance-unicity ⊢Empty (univ (Emptyⱼ (wfEq ℕ≡Empty)))
in !≢% (PE.trans (PE.sym e₁) e₂)
|
Require Import MetaCoq.Template.All.
Print tmQuote.
Print typing.
About typing_spine.
From MetaCoq.PCUIC Require Import
PCUICAst PCUICAstUtils PCUICInduction
PCUICSize
PCUICLiftSubst PCUICEquality
PCUICUnivSubst PCUICTyping PCUICGeneration.
(* Require Import MetaCoq.Template.All. *)
Require Import List String.
Import ListNotations MonadNotation Nat.
Require Import MetaCoq.Template.Pretty.
Require Import MetaCoq.PCUIC.PCUICPretty.
Check size.
Require Import Lia.
Goal forall (P:nat->Prop),
(forall n, (forall m, m<n -> P m) -> P n) ->
forall n, P n.
Proof.
intros P H n.
apply H.
induction n.
- intros.
lia.
- intros.
apply H.
intros.
apply IHn.
lia.
Qed.
Lemma size_induction X (f:X->nat) (P:X->Type):
(forall x, (forall y, f y<f x -> P y) -> P x) ->
forall x, P x.
Proof.
intros. apply X0.
assert(forall n y, f y < n -> P y).
{
induction n.
- lia.
- intros.
apply X0.
intros.
apply IHn.
lia.
}
apply X1.
Defined.
Goal forall (P:term->Type),
(forall t, (forall t2, size t2 < size t -> P t2) -> P t) ->
forall t, P t.
Proof.
apply size_induction.
Defined.
Print well_founded_induction.
(* Require Import Coq.Arith.Wf_nat. *)
Require Import Coq.Wellfounded.Wellfounded.
(* Print ltof. *)
(* Check (induction_ltof1 _ (@List.length term)). *)
(* Check (well_founded_induction *)
(* (wf_inverse_image _ nat _ (@List.length _) *)
(* PeanoNat.Nat.lt_wf_0)). *)
Definition term_size_ind := well_founded_induction
(wf_inverse_image _ nat _ (size)
PeanoNat.Nat.lt_wf_0).
Check term_size_ind.
Goal forall (P:term->Type),
(forall t, (forall t2, size t2 < size t -> P t2) -> P t) ->
forall t, P t.
Proof.
apply size_induction.
intros.
apply X.
induction t using term_forall_list_ind;simpl;intros.
- assert(size t2=0) by lia.
destruct t2;cbn in H0;congruence.
- assert(size t2=0) by lia.
destruct t2;cbn in H0;congruence.
- admit.
-
|
"""
mgradient(img; mode=:beucher, dims=coords_spatial(img), r=1)
mgradient(img, se; mode=:beucher)
Calculate morphological gradient of the image using given mode.
There are three widely used modes[1]:
- `:beucher`: the default mode. It calculates the arithmetic difference between the dilation
and the erosion -- `dilate(img, se) - erode(img, se)`.
- `:internal`: also known as _half-gradient by erosion_. It calculates the arithmetic
difference between the original image and its erosion -- `img - erode(img, se)`.
- `:external`: also known as _half-gradient by dilation_. It calculates the arithmetic
difference between dilation and the original image -- `dilate(img, se) - se`.
$(_docstring_se)
# Examples
```jldoctest; setup = :(using ImageMorphology)
julia> img = falses(7, 7); img[3:5, 3:5] .= true; img
7×7 BitMatrix:
0 0 0 0 0 0 0
0 0 0 0 0 0 0
0 0 1 1 1 0 0
0 0 1 1 1 0 0
0 0 1 1 1 0 0
0 0 0 0 0 0 0
0 0 0 0 0 0 0
julia> Int.(mgradient(img)) # default mode :beucher always creates a two-pixel wide boundary
7×7 Matrix{$Int}:
0 0 0 0 0 0 0
0 1 1 1 1 1 0
0 1 1 1 1 1 0
0 1 1 0 1 1 0
0 1 1 1 1 1 0
0 1 1 1 1 1 0
0 0 0 0 0 0 0
julia> Int.(mgradient(img; mode=:internal)) # half-gradient -- the boundary is internal to original image
7×7 Matrix{$Int}:
0 0 0 0 0 0 0
0 0 0 0 0 0 0
0 0 1 1 1 0 0
0 0 1 0 1 0 0
0 0 1 1 1 0 0
0 0 0 0 0 0 0
0 0 0 0 0 0 0
julia> Int.(mgradient(img; mode=:external)) # half-gradient -- the boundary is external to original image
7×7 Matrix{$Int}:
0 0 0 0 0 0 0
0 1 1 1 1 1 0
0 1 0 0 0 1 0
0 1 0 0 0 1 0
0 1 0 0 0 1 0
0 1 1 1 1 1 0
0 0 0 0 0 0 0
julia> Int.(mgradient(img, strel_diamond(img))) # use diamond shape SE
7×7 Matrix{$Int}:
0 0 0 0 0 0 0
0 0 1 1 1 0 0
0 1 1 1 1 1 0
0 1 1 0 1 1 0
0 1 1 1 1 1 0
0 0 1 1 1 0 0
0 0 0 0 0 0 0
```
The beucher operator is a self-complementary operator in the sense that `mgradient(img, se;
mode=:beucher) == mgradient(complement.(img), se; mode=:beucher)`. When `r>1`, it is usually
called _thick gradient_. If a line segment is used as `se`, then the gradient becomes the
_directional gradient_.
## See also
- [`mgradient!`](@ref) is the in-place version of this function.
- [`mlaplacian`](@ref) for the laplacian operator.
- `ImageBase.FiniteDiff` also provides a few finite difference operators, including `fdiff`,
`fgradient`, etc.
## References
- [1] Rivest, Jean-Francois, Pierre Soille, and Serge Beucher. "Morphological gradients."
Journal of Electronic Imaging 2.4 (1993): 326-336.
"""
function mgradient(img; dims=coords_spatial(img), r=nothing, mode=:beucher)
return mgradient(img, strel_box(img, dims; r); mode)
end
function mgradient(img::AbstractArray{T}, se; mode=:beucher) where {T}
out = similar(img, maybe_floattype(T))
buffer = _make_gradient_buffer(img, mode)
return mgradient!(out, img, se, buffer; mode)
end
"""
mgradient!(out, img, buffer; [dims], [r], [mode])
mgradient!(out, img, se, buffer; [mode])
The in-place version of [`mgradient`](@ref) with input image `img` and output image `out`.
The `buffer` array is required for `:beucher` mode. For `:internal` and `:external` modes,
`buffer` is not needed and can be `nothing`.
"""
function mgradient!(out, img, buffer; dims=coords_spatial(img), r=nothing, mode=:beucher)
return mgradient!(out, img, strel_box(img, dims; r), buffer; mode)
end
function mgradient!(out, img, se, @nospecialize(buffer); mode=:beucher)
require_symmetric_strel(se)
@debug "calculate mgradient using $mode mode"
if mode == :beucher
isnothing(buffer) && throw(ArgumentError("buffer array is required for mode :beucher"))
_beucher_gradient!(out, img, se, buffer)
elseif mode == :internal
_internal_gradient!(out, img, se)
elseif mode == :external
_external_gradient!(out, img, se)
end
return out
end
mgradient!(out::AbstractArray{<:Color3}, img, se, buffer; kwargs...) = throw(ArgumentError("color image is not supported"))
_make_gradient_buffer(img, mode) = mode == :beucher ? similar(img, maybe_floattype(eltype(img))) : nothing
function _internal_gradient!(out, img, se)
erode!(out, img, se)
@. out = img - out
return out
end
function _external_gradient!(out, img, se)
dilate!(out, img, se)
@. out = out - img
return out
end
function _beucher_gradient!(out, img, se, buffer)
dilate!(out, img, se)
erode!(buffer, img, se)
@. out = out - buffer
return out
end
|
[STATEMENT]
lemma red_indE:
assumes "red F p q"
shows "(\<exists>f\<in>F. f \<noteq> 0 \<and> lt f adds\<^sub>t lt p \<and>
(q = p - monom_mult (lc p / lc f) (lp p - lp f) f)) \<or>
red F (tail p) (q - monomial (lc p) (lt p))"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. (\<exists>f\<in>F. f \<noteq> 0 \<and> lt f adds\<^sub>t lt p \<and> q = p - monom_mult (lc p / lc f) (lp p - lp f) f) \<or> red F (tail p) (q - monomial (lc p) (lt p))
[PROOF STEP]
proof -
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. (\<exists>f\<in>F. f \<noteq> 0 \<and> lt f adds\<^sub>t lt p \<and> q = p - monom_mult (lc p / lc f) (lp p - lp f) f) \<or> red F (tail p) (q - monomial (lc p) (lt p))
[PROOF STEP]
from red_nonzero[OF assms]
[PROOF STATE]
proof (chain)
picking this:
p \<noteq> 0
[PROOF STEP]
have "p \<noteq> 0"
[PROOF STATE]
proof (prove)
using this:
p \<noteq> 0
goal (1 subgoal):
1. p \<noteq> 0
[PROOF STEP]
.
[PROOF STATE]
proof (state)
this:
p \<noteq> 0
goal (1 subgoal):
1. (\<exists>f\<in>F. f \<noteq> 0 \<and> lt f adds\<^sub>t lt p \<and> q = p - monom_mult (lc p / lc f) (lp p - lp f) f) \<or> red F (tail p) (q - monomial (lc p) (lt p))
[PROOF STEP]
from red_setE[OF assms]
[PROOF STATE]
proof (chain)
picking this:
(\<And>f t. \<lbrakk>f \<in> F; red_single p q f t\<rbrakk> \<Longrightarrow> ?thesis) \<Longrightarrow> ?thesis
[PROOF STEP]
obtain f t where "f \<in> F" and rs: "red_single p q f t"
[PROOF STATE]
proof (prove)
using this:
(\<And>f t. \<lbrakk>f \<in> F; red_single p q f t\<rbrakk> \<Longrightarrow> ?thesis) \<Longrightarrow> ?thesis
goal (1 subgoal):
1. (\<And>f t. \<lbrakk>f \<in> F; red_single p q f t\<rbrakk> \<Longrightarrow> thesis) \<Longrightarrow> thesis
[PROOF STEP]
by auto
[PROOF STATE]
proof (state)
this:
f \<in> F
red_single p q f t
goal (1 subgoal):
1. (\<exists>f\<in>F. f \<noteq> 0 \<and> lt f adds\<^sub>t lt p \<and> q = p - monom_mult (lc p / lc f) (lp p - lp f) f) \<or> red F (tail p) (q - monomial (lc p) (lt p))
[PROOF STEP]
from rs
[PROOF STATE]
proof (chain)
picking this:
red_single p q f t
[PROOF STEP]
have "f \<noteq> 0"
and cn0: "lookup p (t \<oplus> lt f) \<noteq> 0"
and q: "q = p - monom_mult ((lookup p (t \<oplus> lt f)) / lc f) t f"
[PROOF STATE]
proof (prove)
using this:
red_single p q f t
goal (1 subgoal):
1. f \<noteq> 0 &&& lookup p (t \<oplus> lt f) \<noteq> (0::'b) &&& q = p - monom_mult (lookup p (t \<oplus> lt f) / lc f) t f
[PROOF STEP]
unfolding red_single_def
[PROOF STATE]
proof (prove)
using this:
f \<noteq> 0 \<and> lookup p (t \<oplus> lt f) \<noteq> (0::'b) \<and> q = p - monom_mult (lookup p (t \<oplus> lt f) / lc f) t f
goal (1 subgoal):
1. f \<noteq> 0 &&& lookup p (t \<oplus> lt f) \<noteq> (0::'b) &&& q = p - monom_mult (lookup p (t \<oplus> lt f) / lc f) t f
[PROOF STEP]
by simp_all
[PROOF STATE]
proof (state)
this:
f \<noteq> 0
lookup p (t \<oplus> lt f) \<noteq> (0::'b)
q = p - monom_mult (lookup p (t \<oplus> lt f) / lc f) t f
goal (1 subgoal):
1. (\<exists>f\<in>F. f \<noteq> 0 \<and> lt f adds\<^sub>t lt p \<and> q = p - monom_mult (lc p / lc f) (lp p - lp f) f) \<or> red F (tail p) (q - monomial (lc p) (lt p))
[PROOF STEP]
show ?thesis
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. (\<exists>f\<in>F. f \<noteq> 0 \<and> lt f adds\<^sub>t lt p \<and> q = p - monom_mult (lc p / lc f) (lp p - lp f) f) \<or> red F (tail p) (q - monomial (lc p) (lt p))
[PROOF STEP]
proof (cases "lt p = t \<oplus> lt f")
[PROOF STATE]
proof (state)
goal (2 subgoals):
1. lt p = t \<oplus> lt f \<Longrightarrow> (\<exists>f\<in>F. f \<noteq> 0 \<and> lt f adds\<^sub>t lt p \<and> q = p - monom_mult (lc p / lc f) (lp p - lp f) f) \<or> red F (tail p) (q - monomial (lc p) (lt p))
2. lt p \<noteq> t \<oplus> lt f \<Longrightarrow> (\<exists>f\<in>F. f \<noteq> 0 \<and> lt f adds\<^sub>t lt p \<and> q = p - monom_mult (lc p / lc f) (lp p - lp f) f) \<or> red F (tail p) (q - monomial (lc p) (lt p))
[PROOF STEP]
case True
[PROOF STATE]
proof (state)
this:
lt p = t \<oplus> lt f
goal (2 subgoals):
1. lt p = t \<oplus> lt f \<Longrightarrow> (\<exists>f\<in>F. f \<noteq> 0 \<and> lt f adds\<^sub>t lt p \<and> q = p - monom_mult (lc p / lc f) (lp p - lp f) f) \<or> red F (tail p) (q - monomial (lc p) (lt p))
2. lt p \<noteq> t \<oplus> lt f \<Longrightarrow> (\<exists>f\<in>F. f \<noteq> 0 \<and> lt f adds\<^sub>t lt p \<and> q = p - monom_mult (lc p / lc f) (lp p - lp f) f) \<or> red F (tail p) (q - monomial (lc p) (lt p))
[PROOF STEP]
hence "lt f adds\<^sub>t lt p"
[PROOF STATE]
proof (prove)
using this:
lt p = t \<oplus> lt f
goal (1 subgoal):
1. lt f adds\<^sub>t lt p
[PROOF STEP]
by (simp add: term_simps)
[PROOF STATE]
proof (state)
this:
lt f adds\<^sub>t lt p
goal (2 subgoals):
1. lt p = t \<oplus> lt f \<Longrightarrow> (\<exists>f\<in>F. f \<noteq> 0 \<and> lt f adds\<^sub>t lt p \<and> q = p - monom_mult (lc p / lc f) (lp p - lp f) f) \<or> red F (tail p) (q - monomial (lc p) (lt p))
2. lt p \<noteq> t \<oplus> lt f \<Longrightarrow> (\<exists>f\<in>F. f \<noteq> 0 \<and> lt f adds\<^sub>t lt p \<and> q = p - monom_mult (lc p / lc f) (lp p - lp f) f) \<or> red F (tail p) (q - monomial (lc p) (lt p))
[PROOF STEP]
from True
[PROOF STATE]
proof (chain)
picking this:
lt p = t \<oplus> lt f
[PROOF STEP]
have eq1: "lp p - lp f = t"
[PROOF STATE]
proof (prove)
using this:
lt p = t \<oplus> lt f
goal (1 subgoal):
1. lp p - lp f = t
[PROOF STEP]
by (simp add: term_simps)
[PROOF STATE]
proof (state)
this:
lp p - lp f = t
goal (2 subgoals):
1. lt p = t \<oplus> lt f \<Longrightarrow> (\<exists>f\<in>F. f \<noteq> 0 \<and> lt f adds\<^sub>t lt p \<and> q = p - monom_mult (lc p / lc f) (lp p - lp f) f) \<or> red F (tail p) (q - monomial (lc p) (lt p))
2. lt p \<noteq> t \<oplus> lt f \<Longrightarrow> (\<exists>f\<in>F. f \<noteq> 0 \<and> lt f adds\<^sub>t lt p \<and> q = p - monom_mult (lc p / lc f) (lp p - lp f) f) \<or> red F (tail p) (q - monomial (lc p) (lt p))
[PROOF STEP]
from True
[PROOF STATE]
proof (chain)
picking this:
lt p = t \<oplus> lt f
[PROOF STEP]
have eq2: "lc p = lookup p (t \<oplus> lt f)"
[PROOF STATE]
proof (prove)
using this:
lt p = t \<oplus> lt f
goal (1 subgoal):
1. lc p = lookup p (t \<oplus> lt f)
[PROOF STEP]
unfolding lc_def
[PROOF STATE]
proof (prove)
using this:
lt p = t \<oplus> lt f
goal (1 subgoal):
1. lookup p (lt p) = lookup p (t \<oplus> lt f)
[PROOF STEP]
by simp
[PROOF STATE]
proof (state)
this:
lc p = lookup p (t \<oplus> lt f)
goal (2 subgoals):
1. lt p = t \<oplus> lt f \<Longrightarrow> (\<exists>f\<in>F. f \<noteq> 0 \<and> lt f adds\<^sub>t lt p \<and> q = p - monom_mult (lc p / lc f) (lp p - lp f) f) \<or> red F (tail p) (q - monomial (lc p) (lt p))
2. lt p \<noteq> t \<oplus> lt f \<Longrightarrow> (\<exists>f\<in>F. f \<noteq> 0 \<and> lt f adds\<^sub>t lt p \<and> q = p - monom_mult (lc p / lc f) (lp p - lp f) f) \<or> red F (tail p) (q - monomial (lc p) (lt p))
[PROOF STEP]
show ?thesis
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. (\<exists>f\<in>F. f \<noteq> 0 \<and> lt f adds\<^sub>t lt p \<and> q = p - monom_mult (lc p / lc f) (lp p - lp f) f) \<or> red F (tail p) (q - monomial (lc p) (lt p))
[PROOF STEP]
proof (intro disjI1, rule bexI[of _ f], intro conjI, fact+)
[PROOF STATE]
proof (state)
goal (2 subgoals):
1. q = p - monom_mult (lc p / lc f) (lp p - lp f) f
2. f \<in> F
[PROOF STEP]
from q eq1 eq2
[PROOF STATE]
proof (chain)
picking this:
q = p - monom_mult (lookup p (t \<oplus> lt f) / lc f) t f
lp p - lp f = t
lc p = lookup p (t \<oplus> lt f)
[PROOF STEP]
show "q = p - monom_mult (lc p / lc f) (lp p - lp f) f"
[PROOF STATE]
proof (prove)
using this:
q = p - monom_mult (lookup p (t \<oplus> lt f) / lc f) t f
lp p - lp f = t
lc p = lookup p (t \<oplus> lt f)
goal (1 subgoal):
1. q = p - monom_mult (lc p / lc f) (lp p - lp f) f
[PROOF STEP]
by simp
[PROOF STATE]
proof (state)
this:
q = p - monom_mult (lc p / lc f) (lp p - lp f) f
goal (1 subgoal):
1. f \<in> F
[PROOF STEP]
qed (fact)
[PROOF STATE]
proof (state)
this:
(\<exists>f\<in>F. f \<noteq> 0 \<and> lt f adds\<^sub>t lt p \<and> q = p - monom_mult (lc p / lc f) (lp p - lp f) f) \<or> red F (tail p) (q - monomial (lc p) (lt p))
goal (1 subgoal):
1. lt p \<noteq> t \<oplus> lt f \<Longrightarrow> (\<exists>f\<in>F. f \<noteq> 0 \<and> lt f adds\<^sub>t lt p \<and> q = p - monom_mult (lc p / lc f) (lp p - lp f) f) \<or> red F (tail p) (q - monomial (lc p) (lt p))
[PROOF STEP]
next
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. lt p \<noteq> t \<oplus> lt f \<Longrightarrow> (\<exists>f\<in>F. f \<noteq> 0 \<and> lt f adds\<^sub>t lt p \<and> q = p - monom_mult (lc p / lc f) (lp p - lp f) f) \<or> red F (tail p) (q - monomial (lc p) (lt p))
[PROOF STEP]
case False
[PROOF STATE]
proof (state)
this:
lt p \<noteq> t \<oplus> lt f
goal (1 subgoal):
1. lt p \<noteq> t \<oplus> lt f \<Longrightarrow> (\<exists>f\<in>F. f \<noteq> 0 \<and> lt f adds\<^sub>t lt p \<and> q = p - monom_mult (lc p / lc f) (lp p - lp f) f) \<or> red F (tail p) (q - monomial (lc p) (lt p))
[PROOF STEP]
from this lookup_tail_2[of p "t \<oplus> lt f"]
[PROOF STATE]
proof (chain)
picking this:
lt p \<noteq> t \<oplus> lt f
lookup (tail p) (t \<oplus> lt f) = (if t \<oplus> lt f = lt p then 0::'b else lookup p (t \<oplus> lt f))
[PROOF STEP]
have ct: "lookup (tail p) (t \<oplus> lt f) = lookup p (t \<oplus> lt f)"
[PROOF STATE]
proof (prove)
using this:
lt p \<noteq> t \<oplus> lt f
lookup (tail p) (t \<oplus> lt f) = (if t \<oplus> lt f = lt p then 0::'b else lookup p (t \<oplus> lt f))
goal (1 subgoal):
1. lookup (tail p) (t \<oplus> lt f) = lookup p (t \<oplus> lt f)
[PROOF STEP]
by simp
[PROOF STATE]
proof (state)
this:
lookup (tail p) (t \<oplus> lt f) = lookup p (t \<oplus> lt f)
goal (1 subgoal):
1. lt p \<noteq> t \<oplus> lt f \<Longrightarrow> (\<exists>f\<in>F. f \<noteq> 0 \<and> lt f adds\<^sub>t lt p \<and> q = p - monom_mult (lc p / lc f) (lp p - lp f) f) \<or> red F (tail p) (q - monomial (lc p) (lt p))
[PROOF STEP]
show ?thesis
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. (\<exists>f\<in>F. f \<noteq> 0 \<and> lt f adds\<^sub>t lt p \<and> q = p - monom_mult (lc p / lc f) (lp p - lp f) f) \<or> red F (tail p) (q - monomial (lc p) (lt p))
[PROOF STEP]
proof (intro disjI2, intro red_setI[of f], fact)
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. red_single (tail p) (q - monomial (lc p) (lt p)) f ?t1
[PROOF STEP]
show "red_single (tail p) (q - monomial (lc p) (lt p)) f t"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. red_single (tail p) (q - monomial (lc p) (lt p)) f t
[PROOF STEP]
unfolding red_single_def
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. f \<noteq> 0 \<and> lookup (tail p) (t \<oplus> lt f) \<noteq> (0::'b) \<and> q - monomial (lc p) (lt p) = tail p - monom_mult (lookup (tail p) (t \<oplus> lt f) / lc f) t f
[PROOF STEP]
proof (intro conjI, fact)
[PROOF STATE]
proof (state)
goal (2 subgoals):
1. lookup (tail p) (t \<oplus> lt f) \<noteq> (0::'b)
2. q - monomial (lc p) (lt p) = tail p - monom_mult (lookup (tail p) (t \<oplus> lt f) / lc f) t f
[PROOF STEP]
from cn0 ct
[PROOF STATE]
proof (chain)
picking this:
lookup p (t \<oplus> lt f) \<noteq> (0::'b)
lookup (tail p) (t \<oplus> lt f) = lookup p (t \<oplus> lt f)
[PROOF STEP]
show "lookup (tail p) (t \<oplus> lt f) \<noteq> 0"
[PROOF STATE]
proof (prove)
using this:
lookup p (t \<oplus> lt f) \<noteq> (0::'b)
lookup (tail p) (t \<oplus> lt f) = lookup p (t \<oplus> lt f)
goal (1 subgoal):
1. lookup (tail p) (t \<oplus> lt f) \<noteq> (0::'b)
[PROOF STEP]
by simp
[PROOF STATE]
proof (state)
this:
lookup (tail p) (t \<oplus> lt f) \<noteq> (0::'b)
goal (1 subgoal):
1. q - monomial (lc p) (lt p) = tail p - monom_mult (lookup (tail p) (t \<oplus> lt f) / lc f) t f
[PROOF STEP]
next
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. q - monomial (lc p) (lt p) = tail p - monom_mult (lookup (tail p) (t \<oplus> lt f) / lc f) t f
[PROOF STEP]
from leading_monomial_tail[of p]
[PROOF STATE]
proof (chain)
picking this:
p = monomial (lc p) (lt p) + tail p
[PROOF STEP]
have "p - monomial (lc p) (lt p) = (monomial (lc p) (lt p) + tail p) - monomial (lc p) (lt p)"
[PROOF STATE]
proof (prove)
using this:
p = monomial (lc p) (lt p) + tail p
goal (1 subgoal):
1. p - monomial (lc p) (lt p) = monomial (lc p) (lt p) + tail p - monomial (lc p) (lt p)
[PROOF STEP]
by simp
[PROOF STATE]
proof (state)
this:
p - monomial (lc p) (lt p) = monomial (lc p) (lt p) + tail p - monomial (lc p) (lt p)
goal (1 subgoal):
1. q - monomial (lc p) (lt p) = tail p - monom_mult (lookup (tail p) (t \<oplus> lt f) / lc f) t f
[PROOF STEP]
also
[PROOF STATE]
proof (state)
this:
p - monomial (lc p) (lt p) = monomial (lc p) (lt p) + tail p - monomial (lc p) (lt p)
goal (1 subgoal):
1. q - monomial (lc p) (lt p) = tail p - monom_mult (lookup (tail p) (t \<oplus> lt f) / lc f) t f
[PROOF STEP]
have "\<dots> = tail p"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. monomial (lc p) (lt p) + tail p - monomial (lc p) (lt p) = tail p
[PROOF STEP]
by simp
[PROOF STATE]
proof (state)
this:
monomial (lc p) (lt p) + tail p - monomial (lc p) (lt p) = tail p
goal (1 subgoal):
1. q - monomial (lc p) (lt p) = tail p - monom_mult (lookup (tail p) (t \<oplus> lt f) / lc f) t f
[PROOF STEP]
finally
[PROOF STATE]
proof (chain)
picking this:
p - monomial (lc p) (lt p) = tail p
[PROOF STEP]
have eq: "p - monomial (lc p) (lt p) = tail p"
[PROOF STATE]
proof (prove)
using this:
p - monomial (lc p) (lt p) = tail p
goal (1 subgoal):
1. p - monomial (lc p) (lt p) = tail p
[PROOF STEP]
.
[PROOF STATE]
proof (state)
this:
p - monomial (lc p) (lt p) = tail p
goal (1 subgoal):
1. q - monomial (lc p) (lt p) = tail p - monom_mult (lookup (tail p) (t \<oplus> lt f) / lc f) t f
[PROOF STEP]
from q
[PROOF STATE]
proof (chain)
picking this:
q = p - monom_mult (lookup p (t \<oplus> lt f) / lc f) t f
[PROOF STEP]
have "q - monomial (lc p) (lt p) =
(p - monomial (lc p) (lt p)) - monom_mult ((lookup p (t \<oplus> lt f)) / lc f) t f"
[PROOF STATE]
proof (prove)
using this:
q = p - monom_mult (lookup p (t \<oplus> lt f) / lc f) t f
goal (1 subgoal):
1. q - monomial (lc p) (lt p) = p - monomial (lc p) (lt p) - monom_mult (lookup p (t \<oplus> lt f) / lc f) t f
[PROOF STEP]
by simp
[PROOF STATE]
proof (state)
this:
q - monomial (lc p) (lt p) = p - monomial (lc p) (lt p) - monom_mult (lookup p (t \<oplus> lt f) / lc f) t f
goal (1 subgoal):
1. q - monomial (lc p) (lt p) = tail p - monom_mult (lookup (tail p) (t \<oplus> lt f) / lc f) t f
[PROOF STEP]
also
[PROOF STATE]
proof (state)
this:
q - monomial (lc p) (lt p) = p - monomial (lc p) (lt p) - monom_mult (lookup p (t \<oplus> lt f) / lc f) t f
goal (1 subgoal):
1. q - monomial (lc p) (lt p) = tail p - monom_mult (lookup (tail p) (t \<oplus> lt f) / lc f) t f
[PROOF STEP]
from eq
[PROOF STATE]
proof (chain)
picking this:
p - monomial (lc p) (lt p) = tail p
[PROOF STEP]
have "\<dots> = tail p - monom_mult ((lookup p (t \<oplus> lt f)) / lc f) t f"
[PROOF STATE]
proof (prove)
using this:
p - monomial (lc p) (lt p) = tail p
goal (1 subgoal):
1. p - monomial (lc p) (lt p) - monom_mult (lookup p (t \<oplus> lt f) / lc f) t f = tail p - monom_mult (lookup p (t \<oplus> lt f) / lc f) t f
[PROOF STEP]
by simp
[PROOF STATE]
proof (state)
this:
p - monomial (lc p) (lt p) - monom_mult (lookup p (t \<oplus> lt f) / lc f) t f = tail p - monom_mult (lookup p (t \<oplus> lt f) / lc f) t f
goal (1 subgoal):
1. q - monomial (lc p) (lt p) = tail p - monom_mult (lookup (tail p) (t \<oplus> lt f) / lc f) t f
[PROOF STEP]
finally
[PROOF STATE]
proof (chain)
picking this:
q - monomial (lc p) (lt p) = tail p - monom_mult (lookup p (t \<oplus> lt f) / lc f) t f
[PROOF STEP]
show "q - monomial (lc p) (lt p) = tail p - monom_mult (lookup (tail p) (t \<oplus> lt f) / lc f) t f"
[PROOF STATE]
proof (prove)
using this:
q - monomial (lc p) (lt p) = tail p - monom_mult (lookup p (t \<oplus> lt f) / lc f) t f
goal (1 subgoal):
1. q - monomial (lc p) (lt p) = tail p - monom_mult (lookup (tail p) (t \<oplus> lt f) / lc f) t f
[PROOF STEP]
using ct
[PROOF STATE]
proof (prove)
using this:
q - monomial (lc p) (lt p) = tail p - monom_mult (lookup p (t \<oplus> lt f) / lc f) t f
lookup (tail p) (t \<oplus> lt f) = lookup p (t \<oplus> lt f)
goal (1 subgoal):
1. q - monomial (lc p) (lt p) = tail p - monom_mult (lookup (tail p) (t \<oplus> lt f) / lc f) t f
[PROOF STEP]
by simp
[PROOF STATE]
proof (state)
this:
q - monomial (lc p) (lt p) = tail p - monom_mult (lookup (tail p) (t \<oplus> lt f) / lc f) t f
goal:
No subgoals!
[PROOF STEP]
qed
[PROOF STATE]
proof (state)
this:
red_single (tail p) (q - monomial (lc p) (lt p)) f t
goal:
No subgoals!
[PROOF STEP]
qed
[PROOF STATE]
proof (state)
this:
(\<exists>f\<in>F. f \<noteq> 0 \<and> lt f adds\<^sub>t lt p \<and> q = p - monom_mult (lc p / lc f) (lp p - lp f) f) \<or> red F (tail p) (q - monomial (lc p) (lt p))
goal:
No subgoals!
[PROOF STEP]
qed
[PROOF STATE]
proof (state)
this:
(\<exists>f\<in>F. f \<noteq> 0 \<and> lt f adds\<^sub>t lt p \<and> q = p - monom_mult (lc p / lc f) (lp p - lp f) f) \<or> red F (tail p) (q - monomial (lc p) (lt p))
goal:
No subgoals!
[PROOF STEP]
qed |
! RUN: bbc %s -o - | FileCheck %s
! Constant array ctor.
! CHECK-LABEL: func @_QPtest1(
subroutine test1(a, b)
real :: a(3)
integer :: b(4)
integer, parameter :: constant_array(4) = [6, 7, 42, 9]
! Array ctors for constant arrays should be outlined as constant globals.
! Look at inline constructor case
! CHECK: %{{.*}} = fir.address_of(@_QQro.3xr4.6e55f044605a4991f15fd4505d83faf4) : !fir.ref<!fir.array<3xf32>>
a = (/ 1.0, 2.0, 3.0 /)
! Look at PARAMETER case
! CHECK: %{{.*}} = fir.address_of(@_QQro.4xi4.6a6af0eea868c84da59807d34f7e1a86) : !fir.ref<!fir.array<4xi32>>
b = constant_array
end subroutine test1
! Dynamic array ctor with constant extent.
! CHECK-LABEL: func @_QPtest2(
! CHECK-SAME: %[[a:[^:]*]]: !fir.ref<!fir.array<5xf32>>{{.*}}, %[[b:[^:]*]]: !fir.ref<f32>{{.*}})
subroutine test2(a, b)
real :: a(5), b
real, external :: f
! Look for the 5 store patterns
! CHECK: %[[tmp:.*]] = fir.allocmem !fir.array<5xf32>
! CHECK: %[[val:.*]] = fir.call @_QPf(%[[b]]) : (!fir.ref<f32>) -> f32
! CHECK: %[[loc:.*]] = fir.coordinate_of %{{.*}}, %{{.*}} : (!fir.heap<!fir.array<5xf32>>, index) -> !fir.ref<f32>
! CHECK: fir.store %[[val]] to %[[loc]] : !fir.ref<f32>
! CHECK: fir.call @_QPf(%{{.*}}) : (!fir.ref<f32>) -> f32
! CHECK: fir.coordinate_of %{{.*}}, %{{.*}} : (!fir.heap<!fir.array<5xf32>>, index) -> !fir.ref<f32>
! CHECK: fir.store
! CHECK: fir.call @_QPf(
! CHECK: fir.coordinate_of %
! CHECK: fir.store
! CHECK: fir.call @_QPf(
! CHECK: fir.coordinate_of %
! CHECK: fir.store
! CHECK: fir.call @_QPf(
! CHECK: fir.coordinate_of %
! CHECK: fir.store
! After the ctor done, loop to copy result to `a`
! CHECK-DAG: fir.array_coor %[[tmp:.*]](%
! CHECK-DAG: %[[ai:.*]] = fir.array_coor %[[a]](%
! CHECK: fir.store %{{.*}} to %[[ai]] : !fir.ref<f32>
! CHECK: fir.freemem %[[tmp]]
a = [f(b), f(b+1), f(b+2), f(b+5), f(b+11)]
end subroutine test2
! Dynamic array ctor with dynamic extent.
! CHECK-LABEL: func @_QPtest3(
! CHECK-SAME: %[[a:.*]]: !fir.box<!fir.array<?xf32>>{{.*}})
subroutine test3(a)
real :: a(:)
real, allocatable :: b(:), c(:)
interface
subroutine test3b(x)
real, allocatable :: x(:)
end subroutine test3b
end interface
interface
function test3c
real, allocatable :: test3c(:)
end function test3c
end interface
! CHECK: fir.call @_QPtest3b
! CHECK: %{{.*}}:3 = fir.box_dims %{{.*}}, %{{.*}} : (!fir.box<!fir.heap<!fir.array<?xf32>>>, index) -> (index, index, index)
! CHECK: %{{.*}} = fir.box_addr %{{.*}} : (!fir.box<!fir.heap<!fir.array<?xf32>>>) -> !fir.heap<!fir.array<?xf32>>
! CHECK: %[[tmp:.*]] = fir.allocmem f32, %c32
call test3b(b)
! CHECK: %[[hp1:.*]] = fir.allocmem !fir.array<?xf32>, %{{.*}} {uniq_name = ".array.expr"}
! CHECK-DAG: %[[rep:.*]] = fir.convert %{{.*}} : (!fir.heap<f32>) -> !fir.ref<i8>
! CHECK-DAG: %[[res:.*]] = fir.convert %{{.*}} : (index) -> i64
! CHECK: %{{.*}} = fir.call @realloc(%[[rep]], %[[res]]) : (!fir.ref<i8>, i64) -> !fir.ref<i8>
! CHECK: fir.call @llvm.memcpy.p0i8.p0i8.i64(%{{.*}}, %{{.*}}, %{{.*}}, %false{{.*}}) : (!fir.ref<i8>, !fir.ref<i8>, i64, i1) -> ()
! CHECK: fir.call @_QPtest3c
! CHECK: fir.save_result
! CHECK: %[[tmp2:.*]] = fir.allocmem !fir.array<?xf32>, %{{.*}}#1 {uniq_name = ".array.expr"}
! CHECK: fir.call @realloc
! CHECK: fir.call @llvm.memcpy.p0i8.p0i8.i64(%
! CHECK: fir.array_coor %[[tmp:.*]](%{{.*}}) %{{.*}} : (!fir.heap<!fir.array<?xf32>>, !fir.shape<1>, index) -> !fir.ref<f32>
! CHECK-NEXT: fir.load
! CHECK-NEXT: fir.array_coor %arg0 %{{.*}} : (!fir.box<!fir.array<?xf32>>, index) -> !fir.ref<f32>
! CHECK-NEXT: fir.store
! CHECK: fir.freemem %[[tmp]]
! CHECK: fir.freemem %[[tmp2]]
! CHECK: %[[alli:.*]] = fir.box_addr %{{.*}} : (!fir.box<!fir.heap<!fir.array<?xf32>>>) -> !fir.heap<!fir.array<?xf32>>
! CHECK: fir.freemem %[[alli]]
! CHECK: fir.freemem %[[hp1]]
a = (/ b, test3c() /)
end subroutine test3
! CHECK-LABEL: func @_QPtest4(
subroutine test4(a, b, n1, m1)
real :: a(:)
real :: b(:,:)
integer, external :: f1, f2, f3
! Dynamic array ctor with dynamic extent using implied do loops.
! CHECK-DAG: fir.alloca index {bindc_name = ".buff.pos"}
! CHECK-DAG: fir.alloca index {bindc_name = ".buff.size"}
! CHECK-DAG: %[[c32:.*]] = arith.constant 32 : index
! CHECK: fir.allocmem f32, %[[c32]]
! CHECK: fir.call @_QPf1(%{{.*}}) : (!fir.ref<i32>) -> i32
! CHECK: fir.call @_QPf2(%arg2) : (!fir.ref<i32>) -> i32
! CHECK: fir.call @_QPf3(%{{.*}}) : (!fir.ref<i32>) -> i32
! CHECK: %[[q:.*]] = fir.coordinate_of %arg1, %{{.*}}, %{{.*}} : (!fir.box<!fir.array<?x?xf32>>, i64, i64) -> !fir.ref<f32>
! CHECK: %[[q2:.*]] = fir.load %[[q]] : !fir.ref<f32>
! CHECK: fir.store %[[q2]] to %{{.*}} : !fir.ref<f32>
! CHECK: fir.freemem %{{.*}}
! CHECK-NEXT: return
a = [ ((b(i,j), j=f1(i),f2(n1),f3(m1+i)), i=1,n1,m1) ]
end subroutine test4
! CHECK-LABEL: func @_QPtest5(
! CHECK-SAME: %[[a:[^:]*]]: !fir.box<!fir.array<?xf32>>{{.*}}, %[[array2:[^:]*]]: !fir.ref<!fir.array<2xf32>>{{.*}})
subroutine test5(a, array2)
real :: a(:)
real, parameter :: const_array1(2) = [ 1.0, 2.0 ]
real :: array2(2)
! Array ctor with runtime element values and constant extents.
! Concatenation of array values of constant extent.
! CHECK: %[[res:.*]] = fir.allocmem !fir.array<4xf32>
! CHECK: fir.address_of(@_QQro.2xr4.057a7f5ab69cb695657046b18832c330) : !fir.ref<!fir.array<2xf32>>
! CHECK: %[[tmp1:.*]] = fir.allocmem !fir.array<2xf32>
! CHECK: fir.call @llvm.memcpy.p0i8.p0i8.i64(%{{.*}}, %{{.*}}, %{{.*}}, %false{{.*}}) : (!fir.ref<i8>, !fir.ref<i8>, i64, i1) -> ()
! CHECK: %[[tmp2:.*]] = fir.allocmem !fir.array<2xf32>
! CHECK: = fir.array_coor %[[array2]](%{{.*}}) %{{.*}} : (!fir.ref<!fir.array<2xf32>>, !fir.shape<1>, index) -> !fir.ref<f32>
! CHECK: = fir.array_coor %[[tmp2]](%{{.*}}) %{{.*}} : (!fir.heap<!fir.array<2xf32>>, !fir.shape<1>, index) -> !fir.ref<f32>
! CHECK: fir.call @llvm.memcpy.p0i8.p0i8.i64(%{{.*}}, %{{.*}}, %{{.*}}, %false{{.*}}) : (!fir.ref<i8>, !fir.ref<i8>, i64, i1) -> ()
! CHECK: = fir.array_coor %{{.*}}(%{{.*}}) %{{.*}} : (!fir.heap<!fir.array<4xf32>>, !fir.shape<1>, index) -> !fir.ref<f32>
! CHECK: = fir.array_coor %[[a]] %{{.*}} : (!fir.box<!fir.array<?xf32>>, index) -> !fir.ref<f32>
! CHECK-DAG: fir.freemem %{{.*}}
! CHECK-DAG: fir.freemem %[[tmp2]]
! CHECK-DAG: fir.freemem %[[tmp1]]
! CHECK: return
a = [ const_array1, array2 ]
end subroutine test5
! CHECK-LABEL: func @_QPtest6(
subroutine test6(c, d, e)
character(5) :: c(3)
character(5) :: d, e
! CHECK: = fir.allocmem !fir.array<2x!fir.char<1,5>>
! CHECK: fir.call @realloc
! CHECK: %[[t:.*]] = fir.coordinate_of %{{.*}}, %{{.*}} : (!fir.heap<!fir.array<2x!fir.char<1,5>>>, index) -> !fir.ref<!fir.char<1,5>>
! CHECK: %[[to:.*]] = fir.convert %[[t]] : (!fir.ref<!fir.char<1,5>>) -> !fir.ref<i8>
! CHECK: fir.call @llvm.memcpy.p0i8.p0i8.i64(%[[to]], %{{.*}}, %{{.*}}, %false) : (!fir.ref<i8>, !fir.ref<i8>, i64, i1) -> ()
! CHECK: fir.call @realloc
! CHECK: %[[t:.*]] = fir.coordinate_of %{{.*}}, %{{.*}} : (!fir.heap<!fir.array<2x!fir.char<1,5>>>, index) -> !fir.ref<!fir.char<1,5>>
! CHECK: %[[to:.*]] = fir.convert %[[t]] : (!fir.ref<!fir.char<1,5>>) -> !fir.ref<i8>
! CHECK: fir.call @llvm.memcpy.p0i8.p0i8.i64(%[[to]], %{{.*}}, %{{.*}}, %false) : (!fir.ref<i8>, !fir.ref<i8>, i64, i1) -> ()
! CHECK: fir.freemem %{{.*}}
c = (/ d, e /)
end subroutine test6
! CHECK-LABEL: func @_QPtest7(
! CHECK: %[[i:.*]] = fir.convert %{{.*}} : (index) -> i8
! CHECK: %[[und:.*]] = fir.undefined !fir.char<1>
! CHECK: %[[scalar:.*]] = fir.insert_value %[[und]], %[[i]], [0 : index] : (!fir.char<1>, i8) -> !fir.char<1>
! CHECK: ^bb{{[0-9]+}}(%{{.*}}: !fir.heap<!fir.char<1>>): // 2 preds
! CHECK: fir.store %[[scalar]] to %{{.*}} : !fir.ref<!fir.char<1>>
subroutine test7(a, n)
character(1) :: a(n)
a = (/ (CHAR(i), i=1,n) /)
end subroutine test7
! CHECK: fir.global internal @_QQro.3xr4.{{.*}}(dense<[1.000000e+00, 2.000000e+00, 3.000000e+00]> : tensor<3xf32>) constant : !fir.array<3xf32>
! CHECK: fir.global internal @_QQro.4xi4.{{.*}}(dense<[6, 7, 42, 9]> : tensor<4xi32>) constant : !fir.array<4xi32>
|
//
// Copyright (c) 2019 Vinnie Falco ([email protected])
//
// Distributed under the Boost Software License, Version 1.0. (See accompanying
// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
//
// Official repository: https://github.com/vinniefalco/json
//
// Test that header file is self-contained.
#include <boost/json.hpp>
#include "test_suite.hpp"
namespace boost {
struct json_test
{
::test_suite::log_type log;
void
run()
{
using namespace json;
log <<
"sizeof(alignof)\n"
" object == " << sizeof(object) << " (" << alignof(object) << ")\n"
" value_type == " << sizeof(object::value_type) << " (" << alignof(object::value_type) << ")\n"
" array == " << sizeof(array) << " (" << alignof(array) << ")\n"
" string == " << sizeof(string) << " (" << alignof(string) << ")\n"
" value == " << sizeof(value) << " (" << alignof(value) << ")\n"
" serializer == " << sizeof(serializer) << "\n"
" basic_parser == " << sizeof(basic_parser) << "\n"
" parser == " << sizeof(parser)
;
BOOST_TEST_PASS();
}
};
TEST_SUITE(json_test, "boost.json.zsizes");
} // boost
|
English director Edgar Wright named Not Quite Hollywood his fourth favourite film of 2008 , and called it " the best documentary ever . "
|
namespace my_bool
inductive mybool : Type
| mytt : mybool
| myff : mybool
#check mybool.mytt
/-
We declare mybool to be an
inductively defined type:
a value of type, Type.
The set of values of this
type that can be constructed
is defined by the available
constructors.
Here there are just two,
mytt and myff, neither having
any arguments, so these are
the only two values of this
type: mytt and myff.
-/
-- We open the mybool namespace
open mybool
#check mytt
/-
With this data type in hand,
we can now define an "algebra"
involving such values. This will
be out little implementation of
Boolean algebra.
First we'll define the unary
functions involving values of
this type only. Then we'll do
a few binary functions, leaving
you a few more as exercises.
-/
/- Unary Operations -/
-- id_mybool returns what it's given
def id_mybool (b: mybool) : mybool := b
-- true_mybool always returns true
def true_mybool (b: mybool) := mytt
-- false_mybool always returns false
def false_mybool (b: mybool) := myff
-- not_mybool returns the other value
def not_mybool (b: mybool) :=
match b with
| mytt := myff
| myff := mytt
end
#reduce not_mybool mytt
#reduce not_mybool myff
/-
The match command does a kind of case
analysis on b. b, being of type, mybool,
can only have been "built" by the mytt
or myff constructors. In the first case,
the function returns myff. In the latter
case, it returns mytt.
-/
-- look, it works
#reduce id_mybool myff
#reduce id_mybool mytt
#reduce not_mybool myff
#reduce not_mybool mytt
/-
EXERCISE: use #reduce to test the other
two functions (false_ and true_mybool).
-/
/-
So we've just defined a data type and
some operations on values of this type.
Do our operations capture what we want,
which is to implement Boolean algebra?
Let's "verify" our software by proving
a proposition that we think should be
true: that if you apply not_mybool to
a value, b, of type mybool and apply it
again to the result, we should get back
to b.
-/
theorem not_inverse :
∀ b : mybool,
not_mybool (not_mybool b) = b :=
begin
intro b,
cases b,
apply rfl,
apply rfl,
end
/-
That's amazing. We didn't "test" our
software by running it with various
inputs. Rather we proved a fact about
its behavior on *all* possible values
of its inputs using logic.
-/
/-
Here's a binary function, and_mybool,
taking two mybools and returning one
as a result. We intend this function
to implement the Boolean and operator.
-/
def and_mybool' (b1 b2 : mybool) : mybool :=
match b1, b2 with
| mytt, mytt := mytt
| mytt, myff := myff
| myff, mytt := myff
| myff, myff := myff
end
#reduce and_mybool' mytt myff
/-
The new concept here is that we can match
on several arguments.
-/
/-
We notice that all of the combinations of
input values after mytt and mytt return
myff. We can use "wildcards" in matches to
match any value. Matches are attempted in
the order in which rules appear in code.
(It's important to try to match mytt, mytt
before applying the wildcarded rule! Why?)
-/
def and_mybool (b1 b2 : mybool) : mybool :=
match b1, b2 with
| mytt, mytt := mytt
| _, _ := myff
end
/-
EXERCISE: Now you should implement each of
the following Boolean operators:
- or, as or_mybool
- implies, as implies_mybool
-/
def or_mybool (b1 b2: mybool) : mybool :=
match b1, b2 with
| myff, myff := myff
| _, _ := mytt
end
/-
EXERCISE: To test that you have given
valid implementations, state and prove
the propositon that for any values, b1
and b2, not_mybool (and_mybool b1 b2) =
or_mybool (not_mybool b1) (not_mybool b2).
-/
theorem demorgan1 : ∀ b1 b2 : mybool,
not_mybool
(and_mybool b1 b2)
=
or_mybool
(not_mybool b1)
(not_mybool b2) :=
begin
intros b1 b2,
cases b1,
cases b2,
apply rfl,
apply rfl,
cases b2,
apply rfl,
apply rfl,
end
theorem demorgan2 :
∀ b1 b2 : mybool, not_mybool (or_mybool b1 b2) = and_mybool (not_mybool b1) (not_mybool b2) :=
begin
intros b1 b2,
cases b1,
cases b2,
sorry
end
/-
EXERCISE: State and prove the other
DeMorgan Law for Boolean algebra.
-/
end my_bool |
### A Pluto.jl notebook ###
# v0.17.4
using Markdown
using InteractiveUtils
# ╔═╡ 57ed2d88-970d-4b8d-9f36-7115f50622de
begin
using LinearAlgebra # for the norm and dot function
using ForwardDiff # for automatic differentiation
end
# ╔═╡ 3b36fb00-833f-11ec-0d4d-451fa6f0aaee
md"""
# Programming tapas (I)
These are a few, hopefully small, tasks to practice Julia.
- **Gradient decent**
- Array interface
"""
# ╔═╡ 0efbf532-66cb-41fd-98a5-6df89e364b84
md"""## Gradient decent with backtracking line search
Write a function `gradient_decent(f, x0, α, β)` which implements the gradient decent with backtracking line search.
You can follow the notation form this slides (or your own).
The pseudo algorithms are:
"""
# ╔═╡ 67ab9ac3-93e0-4d9a-84aa-131aaf543198
md"""
```julia
Gradient decent:
given a starting point x_0
x = x_0
repeat until stopping criterium
Compute decent direction Δx = ∇f(x).
Line search: Choose a step size t > 0.
Update: x = x + t * Δx
Stopping criterium: ‖ ∇f(x) ‖² < ε
end
```
"""
# ╔═╡ a5e2ee29-8de1-41be-8b05-8427b1a65bce
md"""
```julia
Backtracking line search:
given x, f, Δx, α, β
t = 1
repeat until f(x + t Δx) < f(x) + α t dot( ∇f(x), Δx )
Udate: t = β t
end
```
Source: [https://web.stanford.edu/class/ee364a/lectures/unconstrained.pdf](https://web.stanford.edu/class/ee364a/lectures/unconstrained.pdf)
"""
# ╔═╡ 304398ad-f680-4f8c-8a76-5768c0fbfa5e
# ╔═╡ 2c48c94e-7d31-450c-b3c7-0194894c38b7
function linesearch(f, Δx, x, α, β; t_min = 1e-5)
t = 1
fx = f(x)
m = norm(Δx)
while t > t_min
if f(x .+ t*Δx) <= fx + α * t * m^2
return t
end
t = t*β
cond || return t
end
return NaN
end
# ╔═╡ ae9e20b2-4f88-4c28-8856-ce61368445a3
function gradientdecent(f, x0, α, β, ε; max_steps = 1e4)
x = copy(x0)
i = 0
∇f = similar(x0)
@assert β < 1
@assert α < 0.5
while i < max_steps
ForwardDiff.gradient!(∇f, f, x)
Δx = -∇f
if norm(∇f)^2 < ε
return x
end
t = linesearch(f, Δx, x, α, β)
x .+= t * Δx
i += 1
end
return NaN
end
# ╔═╡ e331926f-6e11-434c-a7d3-02d0e9f0f269
begin
f(x) = exp(x[1]+3x[2]-0.1) + exp(x[1]-3x[2]-0.1) + exp(-x[1]-0.1)
α = 0.25
β = 0.5
ε = 1e-8
x0 = [0.0, 0.0]
gradientdecent(f, x0, α, β, ε)
end
# ╔═╡ 7a5cfa1b-9b47-4741-8656-210a3515b6f3
# ╔═╡ 81361eb6-092c-4cd1-96a0-eb9e167dfd3f
# ╔═╡ b04a090b-e9d7-4032-b956-1238fac26f94
md"## Array interpolation"
# ╔═╡ 3413f532-50ec-47f5-9547-4e9474be174e
md"""
We want to take regular vectors, but have an automated way to compute in between values with the syntax
```julia
x = [5, 7, 5]
x[1.5] == 6.0 # linear interpolation between 5 and 7
```
Implement
`Base.getindex(x::Vector{Float64}, p::Float64) = `
accordingly.
"""
# ╔═╡ 641f85f8-368a-45eb-9912-284eca29d83b
x = [5.0, 7.0, 5.0]
# ╔═╡ 9f62d306-25f2-4b8f-a691-7a0fa3f4aa03
# x[1.5]
# ╔═╡ 61eaf1c1-f033-4e36-8171-19ff6621cc82
# ╔═╡ 84c50e3b-b836-477f-81bb-121a2e5d055f
md"
## Merge sort
Implement (any variant) of merge sort.
You can for example try this strategy
```
MergeSort(arr[], l, r)
If r > l
1. Find the middle point to divide the array into two halves:
middle m = l+ (r-l)/2
2. Call mergeSort for first half:
Call mergeSort(arr, l, m)
3. Call mergeSort for second half:
Call mergeSort(arr, m+1, r)
4. Merge the two halves sorted in step 2 and 3:
Call merge(arr, l, m, r)
```
Source: https://www.geeksforgeeks.org/merge-sort/
"
# ╔═╡ 00000000-0000-0000-0000-000000000001
PLUTO_PROJECT_TOML_CONTENTS = """
[deps]
ForwardDiff = "f6369f11-7733-5829-9624-2563aa707210"
LinearAlgebra = "37e2e46d-f89d-539d-b4ee-838fcccc9c8e"
[compat]
ForwardDiff = "~0.10.25"
"""
# ╔═╡ 00000000-0000-0000-0000-000000000002
PLUTO_MANIFEST_TOML_CONTENTS = """
# This file is machine-generated - editing it directly is not advised
julia_version = "1.7.1"
manifest_format = "2.0"
[[deps.ArgTools]]
uuid = "0dad84c5-d112-42e6-8d28-ef12dabb789f"
[[deps.Artifacts]]
uuid = "56f22d72-fd6d-98f1-02f0-08ddc0907c33"
[[deps.Base64]]
uuid = "2a0f44e3-6c83-55bd-87e4-b1978d98bd5f"
[[deps.ChainRulesCore]]
deps = ["Compat", "LinearAlgebra", "SparseArrays"]
git-tree-sha1 = "f9982ef575e19b0e5c7a98c6e75ee496c0f73a93"
uuid = "d360d2e6-b24c-11e9-a2a3-2a2ae2dbcce4"
version = "1.12.0"
[[deps.ChangesOfVariables]]
deps = ["ChainRulesCore", "LinearAlgebra", "Test"]
git-tree-sha1 = "bf98fa45a0a4cee295de98d4c1462be26345b9a1"
uuid = "9e997f8a-9a97-42d5-a9f1-ce6bfc15e2c0"
version = "0.1.2"
[[deps.CommonSubexpressions]]
deps = ["MacroTools", "Test"]
git-tree-sha1 = "7b8a93dba8af7e3b42fecabf646260105ac373f7"
uuid = "bbf7d656-a473-5ed7-a52c-81e309532950"
version = "0.3.0"
[[deps.Compat]]
deps = ["Base64", "Dates", "DelimitedFiles", "Distributed", "InteractiveUtils", "LibGit2", "Libdl", "LinearAlgebra", "Markdown", "Mmap", "Pkg", "Printf", "REPL", "Random", "SHA", "Serialization", "SharedArrays", "Sockets", "SparseArrays", "Statistics", "Test", "UUIDs", "Unicode"]
git-tree-sha1 = "44c37b4636bc54afac5c574d2d02b625349d6582"
uuid = "34da2185-b29b-5c13-b0c7-acf172513d20"
version = "3.41.0"
[[deps.CompilerSupportLibraries_jll]]
deps = ["Artifacts", "Libdl"]
uuid = "e66e0078-7015-5450-92f7-15fbd957f2ae"
[[deps.Dates]]
deps = ["Printf"]
uuid = "ade2ca70-3891-5945-98fb-dc099432e06a"
[[deps.DelimitedFiles]]
deps = ["Mmap"]
uuid = "8bb1440f-4735-579b-a4ab-409b98df4dab"
[[deps.DiffResults]]
deps = ["StaticArrays"]
git-tree-sha1 = "c18e98cba888c6c25d1c3b048e4b3380ca956805"
uuid = "163ba53b-c6d8-5494-b064-1a9d43ac40c5"
version = "1.0.3"
[[deps.DiffRules]]
deps = ["IrrationalConstants", "LogExpFunctions", "NaNMath", "Random", "SpecialFunctions"]
git-tree-sha1 = "84083a5136b6abf426174a58325ffd159dd6d94f"
uuid = "b552c78f-8df3-52c6-915a-8e097449b14b"
version = "1.9.1"
[[deps.Distributed]]
deps = ["Random", "Serialization", "Sockets"]
uuid = "8ba89e20-285c-5b6f-9357-94700520ee1b"
[[deps.DocStringExtensions]]
deps = ["LibGit2"]
git-tree-sha1 = "b19534d1895d702889b219c382a6e18010797f0b"
uuid = "ffbed154-4ef7-542d-bbb7-c09d3a79fcae"
version = "0.8.6"
[[deps.Downloads]]
deps = ["ArgTools", "LibCURL", "NetworkOptions"]
uuid = "f43a241f-c20a-4ad4-852c-f6b1247861c6"
[[deps.ForwardDiff]]
deps = ["CommonSubexpressions", "DiffResults", "DiffRules", "LinearAlgebra", "LogExpFunctions", "NaNMath", "Preferences", "Printf", "Random", "SpecialFunctions", "StaticArrays"]
git-tree-sha1 = "1bd6fc0c344fc0cbee1f42f8d2e7ec8253dda2d2"
uuid = "f6369f11-7733-5829-9624-2563aa707210"
version = "0.10.25"
[[deps.InteractiveUtils]]
deps = ["Markdown"]
uuid = "b77e0a4c-d291-57a0-90e8-8db25a27a240"
[[deps.InverseFunctions]]
deps = ["Test"]
git-tree-sha1 = "a7254c0acd8e62f1ac75ad24d5db43f5f19f3c65"
uuid = "3587e190-3f89-42d0-90ee-14403ec27112"
version = "0.1.2"
[[deps.IrrationalConstants]]
git-tree-sha1 = "7fd44fd4ff43fc60815f8e764c0f352b83c49151"
uuid = "92d709cd-6900-40b7-9082-c6be49f344b6"
version = "0.1.1"
[[deps.JLLWrappers]]
deps = ["Preferences"]
git-tree-sha1 = "abc9885a7ca2052a736a600f7fa66209f96506e1"
uuid = "692b3bcd-3c85-4b1f-b108-f13ce0eb3210"
version = "1.4.1"
[[deps.LibCURL]]
deps = ["LibCURL_jll", "MozillaCACerts_jll"]
uuid = "b27032c2-a3e7-50c8-80cd-2d36dbcbfd21"
[[deps.LibCURL_jll]]
deps = ["Artifacts", "LibSSH2_jll", "Libdl", "MbedTLS_jll", "Zlib_jll", "nghttp2_jll"]
uuid = "deac9b47-8bc7-5906-a0fe-35ac56dc84c0"
[[deps.LibGit2]]
deps = ["Base64", "NetworkOptions", "Printf", "SHA"]
uuid = "76f85450-5226-5b5a-8eaa-529ad045b433"
[[deps.LibSSH2_jll]]
deps = ["Artifacts", "Libdl", "MbedTLS_jll"]
uuid = "29816b5a-b9ab-546f-933c-edad1886dfa8"
[[deps.Libdl]]
uuid = "8f399da3-3557-5675-b5ff-fb832c97cbdb"
[[deps.LinearAlgebra]]
deps = ["Libdl", "libblastrampoline_jll"]
uuid = "37e2e46d-f89d-539d-b4ee-838fcccc9c8e"
[[deps.LogExpFunctions]]
deps = ["ChainRulesCore", "ChangesOfVariables", "DocStringExtensions", "InverseFunctions", "IrrationalConstants", "LinearAlgebra"]
git-tree-sha1 = "e5718a00af0ab9756305a0392832c8952c7426c1"
uuid = "2ab3a3ac-af41-5b50-aa03-7779005ae688"
version = "0.3.6"
[[deps.Logging]]
uuid = "56ddb016-857b-54e1-b83d-db4d58db5568"
[[deps.MacroTools]]
deps = ["Markdown", "Random"]
git-tree-sha1 = "3d3e902b31198a27340d0bf00d6ac452866021cf"
uuid = "1914dd2f-81c6-5fcd-8719-6d5c9610ff09"
version = "0.5.9"
[[deps.Markdown]]
deps = ["Base64"]
uuid = "d6f4376e-aef5-505a-96c1-9c027394607a"
[[deps.MbedTLS_jll]]
deps = ["Artifacts", "Libdl"]
uuid = "c8ffd9c3-330d-5841-b78e-0817d7145fa1"
[[deps.Mmap]]
uuid = "a63ad114-7e13-5084-954f-fe012c677804"
[[deps.MozillaCACerts_jll]]
uuid = "14a3606d-f60d-562e-9121-12d972cd8159"
[[deps.NaNMath]]
git-tree-sha1 = "b086b7ea07f8e38cf122f5016af580881ac914fe"
uuid = "77ba4419-2d1f-58cd-9bb1-8ffee604a2e3"
version = "0.3.7"
[[deps.NetworkOptions]]
uuid = "ca575930-c2e3-43a9-ace4-1e988b2c1908"
[[deps.OpenBLAS_jll]]
deps = ["Artifacts", "CompilerSupportLibraries_jll", "Libdl"]
uuid = "4536629a-c528-5b80-bd46-f80d51c5b363"
[[deps.OpenLibm_jll]]
deps = ["Artifacts", "Libdl"]
uuid = "05823500-19ac-5b8b-9628-191a04bc5112"
[[deps.OpenSpecFun_jll]]
deps = ["Artifacts", "CompilerSupportLibraries_jll", "JLLWrappers", "Libdl", "Pkg"]
git-tree-sha1 = "13652491f6856acfd2db29360e1bbcd4565d04f1"
uuid = "efe28fd5-8261-553b-a9e1-b2916fc3738e"
version = "0.5.5+0"
[[deps.Pkg]]
deps = ["Artifacts", "Dates", "Downloads", "LibGit2", "Libdl", "Logging", "Markdown", "Printf", "REPL", "Random", "SHA", "Serialization", "TOML", "Tar", "UUIDs", "p7zip_jll"]
uuid = "44cfe95a-1eb2-52ea-b672-e2afdf69b78f"
[[deps.Preferences]]
deps = ["TOML"]
git-tree-sha1 = "2cf929d64681236a2e074ffafb8d568733d2e6af"
uuid = "21216c6a-2e73-6563-6e65-726566657250"
version = "1.2.3"
[[deps.Printf]]
deps = ["Unicode"]
uuid = "de0858da-6303-5e67-8744-51eddeeeb8d7"
[[deps.REPL]]
deps = ["InteractiveUtils", "Markdown", "Sockets", "Unicode"]
uuid = "3fa0cd96-eef1-5676-8a61-b3b8758bbffb"
[[deps.Random]]
deps = ["SHA", "Serialization"]
uuid = "9a3f8284-a2c9-5f02-9a11-845980a1fd5c"
[[deps.SHA]]
uuid = "ea8e919c-243c-51af-8825-aaa63cd721ce"
[[deps.Serialization]]
uuid = "9e88b42a-f829-5b0c-bbe9-9e923198166b"
[[deps.SharedArrays]]
deps = ["Distributed", "Mmap", "Random", "Serialization"]
uuid = "1a1011a3-84de-559e-8e89-a11a2f7dc383"
[[deps.Sockets]]
uuid = "6462fe0b-24de-5631-8697-dd941f90decc"
[[deps.SparseArrays]]
deps = ["LinearAlgebra", "Random"]
uuid = "2f01184e-e22b-5df5-ae63-d93ebab69eaf"
[[deps.SpecialFunctions]]
deps = ["ChainRulesCore", "IrrationalConstants", "LogExpFunctions", "OpenLibm_jll", "OpenSpecFun_jll"]
git-tree-sha1 = "e6bf188613555c78062842777b116905a9f9dd49"
uuid = "276daf66-3868-5448-9aa4-cd146d93841b"
version = "2.1.0"
[[deps.StaticArrays]]
deps = ["LinearAlgebra", "Random", "Statistics"]
git-tree-sha1 = "2884859916598f974858ff01df7dfc6c708dd895"
uuid = "90137ffa-7385-5640-81b9-e52037218182"
version = "1.3.3"
[[deps.Statistics]]
deps = ["LinearAlgebra", "SparseArrays"]
uuid = "10745b16-79ce-11e8-11f9-7d13ad32a3b2"
[[deps.TOML]]
deps = ["Dates"]
uuid = "fa267f1f-6049-4f14-aa54-33bafae1ed76"
[[deps.Tar]]
deps = ["ArgTools", "SHA"]
uuid = "a4e569a6-e804-4fa4-b0f3-eef7a1d5b13e"
[[deps.Test]]
deps = ["InteractiveUtils", "Logging", "Random", "Serialization"]
uuid = "8dfed614-e22c-5e08-85e1-65c5234f0b40"
[[deps.UUIDs]]
deps = ["Random", "SHA"]
uuid = "cf7118a7-6976-5b1a-9a39-7adc72f591a4"
[[deps.Unicode]]
uuid = "4ec0a83e-493e-50e2-b9ac-8f72acf5a8f5"
[[deps.Zlib_jll]]
deps = ["Libdl"]
uuid = "83775a58-1f1d-513f-b197-d71354ab007a"
[[deps.libblastrampoline_jll]]
deps = ["Artifacts", "Libdl", "OpenBLAS_jll"]
uuid = "8e850b90-86db-534c-a0d3-1478176c7d93"
[[deps.nghttp2_jll]]
deps = ["Artifacts", "Libdl"]
uuid = "8e850ede-7688-5339-a07c-302acd2aaf8d"
[[deps.p7zip_jll]]
deps = ["Artifacts", "Libdl"]
uuid = "3f19e933-33d8-53b3-aaab-bd5110c3b7a0"
"""
# ╔═╡ Cell order:
# ╟─3b36fb00-833f-11ec-0d4d-451fa6f0aaee
# ╟─0efbf532-66cb-41fd-98a5-6df89e364b84
# ╟─67ab9ac3-93e0-4d9a-84aa-131aaf543198
# ╟─a5e2ee29-8de1-41be-8b05-8427b1a65bce
# ╟─304398ad-f680-4f8c-8a76-5768c0fbfa5e
# ╠═57ed2d88-970d-4b8d-9f36-7115f50622de
# ╠═2c48c94e-7d31-450c-b3c7-0194894c38b7
# ╠═ae9e20b2-4f88-4c28-8856-ce61368445a3
# ╠═e331926f-6e11-434c-a7d3-02d0e9f0f269
# ╟─7a5cfa1b-9b47-4741-8656-210a3515b6f3
# ╟─81361eb6-092c-4cd1-96a0-eb9e167dfd3f
# ╟─b04a090b-e9d7-4032-b956-1238fac26f94
# ╟─3413f532-50ec-47f5-9547-4e9474be174e
# ╠═641f85f8-368a-45eb-9912-284eca29d83b
# ╠═9f62d306-25f2-4b8f-a691-7a0fa3f4aa03
# ╟─61eaf1c1-f033-4e36-8171-19ff6621cc82
# ╟─84c50e3b-b836-477f-81bb-121a2e5d055f
# ╟─00000000-0000-0000-0000-000000000001
# ╟─00000000-0000-0000-0000-000000000002
|
Require Export prosa.model.task.concept.
(** * The Sporadic Task Model *)
(** In the following, we define the arrival process commonly known as the
sporadic task model, where jobs may arrive at any time provided any two
jobs of a task are separated by at least the minimum inter-arrival time (or
period) of the task. *)
(** ** Task Parameter for the Sporadic Task Model *)
(** Under the sporadic task model, each task is characterized by its minimum
inter-arrival time, which we denote as [task_min_inter_arrival_time]. *)
Class SporadicModel (Task : TaskType) :=
task_min_inter_arrival_time : Task -> duration.
(** ** Model Validity *)
(** Next, we define the semantics of the sporadic task model. *)
Section ValidSporadicTaskModel.
(** Consider any type of sporadic tasks. *)
Context {Task : TaskType} `{SporadicModel Task}.
(** A valid sporadic task should have a non-zero minimum inter-arrival
time. *)
Definition valid_task_min_inter_arrival_time tsk :=
task_min_inter_arrival_time tsk > 0.
(** Further, in the context of a set of such tasks, ... *)
Variable ts : TaskSet Task.
(** ... every task in the set should have a valid inter-arrival time. *)
Definition valid_taskset_inter_arrival_times :=
forall tsk : Task,
tsk \in ts -> valid_task_min_inter_arrival_time tsk.
(** Next, consider any type of jobs stemming from these tasks ... *)
Context {Job : JobType} `{JobTask Job Task} `{JobArrival Job}.
(** ... and an arbitrary arrival sequence of such jobs. *)
Variable arr_seq : arrival_sequence Job.
(** We say that a task respects the sporadic task model if the arrivals of
its jobs in the arrival sequence are appropriately spaced in time. *)
Definition respects_sporadic_task_model (tsk : Task) :=
forall (j j': Job),
(** Given two different jobs j and j' ... *)
j <> j' ->
(** ...that belong to the arrival sequence... *)
arrives_in arr_seq j ->
arrives_in arr_seq j' ->
(** ... and that stem from the given task, ... *)
job_task j = tsk ->
job_task j' = tsk ->
(** ... if the arrival of j precedes the arrival of j' ..., *)
job_arrival j <= job_arrival j' ->
(** then the arrival of j and the arrival of j' are separated by at least
one period. *)
job_arrival j' >= job_arrival j + task_min_inter_arrival_time tsk.
(** Based on the above definition, we define the sporadic task model as
follows. *)
Definition taskset_respects_sporadic_task_model :=
forall tsk, tsk \in ts -> respects_sporadic_task_model tsk.
End ValidSporadicTaskModel.
|
% The COBRAToolbox: Lp9.m
%
% Purpose:
% - test if bug fix in LP9 affects the results for test model (without
% coupling constraints)
%
% Authors:
% Agnieszka Wegrzyn 2019/04/09, fix compatibility for models with coupling constraints
%
global CBTDIR
% define the features required to run the test
requireOneSolverOf = {'gurobi','ibm_cplex'};
% require the specified toolboxes and solvers, along with a UNIX OS
solversPkgs = prepareTest('requireOneSolverOf', requireOneSolverOf, 'excludeSolvers', {'matlab', 'lp_solve','pdco'});
% save the current path and initialize the test
currentDir = cd(fileparts(which(mfilename)));
% determine the test path for references
testPath = pwd;
% load the model
model = getDistributedModel('ecoli_core_model.mat'); %For all models in the test/models folder and subfolders
% load reference data and input variables
load('testData_LP9.mat');
% load
for k = 1:length(solversPkgs.LP)
fprintf(' -- Running testLP9.m using the solver interface: %s ... ', solversPkgs.LP{k});
solverLPOK = changeCobraSolver(solversPkgs.LP{k}, 'LP', 0);
if solverLPOK
% created test data for new version
V = LP9(options.K, options.P, model, options.LPproblem, options.epsilon);
solTest = model.c'*V;
end
assert(isequal(solOri,solTest), '\nResults are not consistent between old and new version of LP9\n')
% output a success message
fprintf('\nDone.\n');
end
% change the directory
cd(currentDir)
|
------------------------------------------------------------------------------
-- Group theory congruence proofs using pattern matching
------------------------------------------------------------------------------
{-# OPTIONS --exact-split #-}
{-# OPTIONS --no-sized-types #-}
{-# OPTIONS --no-universe-polymorphism #-}
{-# OPTIONS --without-K #-}
module LogicalFramework.GroupCongruence where
open import GroupTheory.Base
·-cong : ∀ {a b c d} → a ≡ b → c ≡ d → a · c ≡ b · d
·-cong refl refl = refl
·-leftCong : ∀ {a b c} → a ≡ b → a · c ≡ b · c
·-leftCong refl = refl
·-rightCong : ∀ {a b c} → b ≡ c → a · b ≡ a · c
·-rightCong refl = refl
⁻¹-cong : ∀ {a b} → a ≡ b → a ⁻¹ ≡ b ⁻¹
⁻¹-cong refl = refl
|
const SingleView{A,T} = SubArray{T,0,A,Tuple{Int64},true}
# Access to underlying data
_structure(x) = getfield(x, :structure)
_data(x) = getfield(x, :data)
# Need these function barriers for type stability
Base.@propagate_inbounds _getprop(elem::SingleView) = elem[]
Base.@propagate_inbounds _getprop(elem) = elem
function _setvalue!(arr, val)
arr[] = val
return nothing
end
Base.@propagate_inbounds Base.getproperty(ms::ModelingStruct, key::Symbol) = _getprop(getproperty(_structure(ms), key))
Base.@propagate_inbounds function Base.setproperty!(ms::ModelingStruct, key::Symbol, val)
d = _structure(ms)
prop = getproperty(d, key)
_setvalue!(prop, val)
return nothing
end
Base.getindex(ms::ModelingStruct, i) = getindex(_data(ms), i)
Base.getindex(ms::ModelingStruct, key::Union{Symbol, String}) = getproperty(ms, Symbol(key))
Base.setindex!(ms::ModelingStruct, val, i...) = setindex!(_data(ms), val, i...)
function Base.setindex!(ms::ModelingStruct, val, ::Colon)
for i in eachindex(ms)
setindex!(ms, val, i)
end
return nothing
end
function Base.setindex!(ms::ModelingStruct, val::AbstractArray, ::Colon)
ms .= val
return nothing
end
Base.setindex!(ms::ModelingStruct, val, key::Union{Symbol, String}) = setproperty!(ms, Symbol(key), val)
|
Though Nathan was slated to make $ 6 million in 2008 , on March 24 , 2008 , the Minnesota Twins re @-@ signed Nathan to a four @-@ year , $ 47 million contract through 2011 . The deal also includes a $ 12 @.@ 5 million club option for 2012 with a $ 2 million buyout .
|
module Ratio
%access public export
-- TODO: This could probably all be better...
gcd : (Integral a, Abs a, Eq a) => a -> a -> a
gcd n m = gcd' (abs n) (abs m)
where
gcd' : a -> a -> a
gcd' n m =
if m == 0
then n
else gcd' m (n `mod` m)
nonZero : (Integral a, Eq a) => a -> Bool
nonZero n = n /= 0
infixl 7 .%
data Ratio : Type -> Type where
(.%) : (Integral a, Eq a, Abs a, Ord a, Neg a) => a -> a -> Ratio a
infixl 7 :%
(:%) : (Integral a, Eq a, Abs a, Ord a, Neg a) =>
a -> a -> Maybe (Ratio a)
(:%) n m =
case m == 0 of
False =>
let cd = gcd n m
num = (n `div` cd) -- TODO: Should be `quot`?
den = (m `div` cd) -- TODO: Should be `quot`?
in Just $ num .% den
True => Nothing
Rational : Type
Rational = Ratio Integer
numerator : Ratio a -> a
numerator (n .% _) = n
denominator : Ratio a -> a
denominator (_ .% d) = d
Eq a => Eq (Ratio a) where
(a .% b) == (c .% d) = (a * d) == (c * b)
Ord a => Ord (Ratio a) where
compare (a .% b) (c .% d) = compare (a * d) (c * b)
rationalDiv : Ratio a -> Ratio a -> Maybe (Ratio a)
rationalDiv (a .% b) (c .% d) = (a * d) :% (b * c)
-- rationalMod : Ratio a -> Ratio a -> Maybe (Ratio a)
-- rationalMod a b = (a - b) * a `div` b
rationalAdd : Ratio a -> Ratio a -> Maybe (Ratio a)
rationalAdd (a .% b) (c .% d) = ((a * d) + (b * c)) :% (b * d)
rationalMul : Ratio a -> Ratio a -> Maybe (Ratio a)
rationalMul (a .% b) (c .% d) = (a * c) :% (b * d)
rationalFromInteger : Integer -> Maybe (Ratio Integer)
rationalFromInteger i = (fromInteger i) :% 1
rationalNegate : Ratio a -> Maybe (Ratio a)
rationalNegate (a .% b) = (negate a) :% b
rationalSub : Ratio a -> Ratio a -> Maybe (Ratio a)
rationalSub (a .% b) (c .% d) = ((a * d) - (c * b)) :% (b * d)
Show a => Show (Ratio a) where
show (n .% d) = show n ++ " % " ++ show d
rationalCast : Rational -> Maybe Double
rationalCast (a .% b) =
case b == 0 of
False => Just $ (cast a) / (cast b)
True => Nothing
|
PROGRAM PROG2
C External Functions
INTEGER SLASH, MAXELE, MINELE
PARAMETER(WIDTH = 30,
& HEIGHT = 20,
& GRIDWD = 61,
& LARGENUM = 100000000,
& TICKS =
$ "|- -|- -|- -|- -|- -|- -|- -|- -|- -|- -|- -|- -|")
C
C
CHARACTER GRID(61)
CHARACTER STR*61
INTEGER XVAL(100), YVAL(100)
INTEGER I, J, NUMOBS, MAXY, MAXX, MINX, HORISP, VERTSP, VLINE
I = 0
DO WHILE ( .NOT. EOI )
READ(*,*) XVAL(I),YVAL(I)
I = I + 1
ENDDO
NUMOBS = I
CALL SORT(YVAL,XVAL,NUMOBS)
MAXY = YVAL(0)
VERTSP = SLASH(MAXY,HEIGHT)
MAXX = XVAL(MAXELE(XVAL,NUMOBS))
NINX = XVAL(MINELE(XVAL,NUMOBS))
IF (ABS(MINX) .GT. ABS(MAXX)) THEN
HORISP = SLASH(ABS(MINX),WIDTH)
ELSE
HORISP = SLASH(ABS(MAXX),WIDTH)
END IF
STR = ' X AXIS'
WRITE (*,*) STR,SKIP
I = 0
VLINE = HEIGHT
DO WHILE (VLINE .GT. 0)
J = 0
IF (MOD(VLINE,5) .EQ. 0) THEN
CALL UNPACK(TICKS,GRID)
ELSE
DO WHILE (J .LT. GRIDWD)
GRID(J) = " "
J = J + 1
ENDDO
END IF
VLINE = VLINE - 1
DO WHILE (VLINE*VERTSP .LT. YVAL(I))
IF (XVAL(I) .GE. 0) THEN
GRID(WIDTH + SLASH(XVAL(I),HORISP)) = "*"
ELSE
GRID(WIDTH - SLASH(-XVAL(I),HORISP)) = "*"
END IF
I = I + 1
ENDDO
GRID(WIDTH) = "|"
CALL PACK(GRID,STR)
WRITE(*,*) STR,SKIP
ENDDO
STR =
$ "|----|----|----|----|----|----|----|----|----|----|----|----|"
CALL UNPACK (STR,GRID)
DO WHILE ((0 .LT. YVAL(I)) .AND. (I .LT. NUMOBS))
IF (XVAL(I) .GE. 0) THEN
GRID(WIDTH + SLASH(XVAL(I),HORISP)) = "*"
ELSE
GRID(WIDTH - SLASH(-XVAL(I),HORISP)) = "*"
END IF
I = I + 1
ENDDO
CALL PACK(GRID,STR)
WRITE(*,*) STR,SKIP
STR = " Y AXIS"
WRITE(*,*) STR,SKIP
END
* Program
SUBROUTINE SORT (KEYBUF,FREEBUF,N)
INTEGER KEYBUF(*)
INTEGER FREEBUF(*)
INTEGER N
INTEGER I, MAXP
INTEGER SRTKEYB(100), SRTFREEB(100)
I = 0
DO WHILE (I .LT. N)
SRTKEYB(I) = KEYBUF(I)
SRTFREEB(I) = FREEBUF(I)
I = I + 1
ENDDO
I = N
DO WHILE (I .GT. 0)
MAXP = MAXELE(SRTKEYB,I)
KEYBUF(N-I) = SRTKEYB(MAXP)
FREEBUF(N-I) = SRTFREEB(MAXP)
CALL REMOVE(SRTKEYB,MAXP,I)
CALL REMOVE(SRTFREEB,MAXP,I)
I = I - 1
ENDDO
END
* SORT
INTEGER FUNCTION MAXELE (BUF,N)
INTEGER BUF(*)
INTEGER N
PARAMETER(WIDTH = 30,
& HEIGHT = 20,
& GRIDWD = 61,
& LARGENUM = 100000000,
& TICKS =
$ "|- -|- -|- -|- -|- -|- -|- -|- -|- -|- -|- -|- -|")
INTEGER I, MAXPTR, MAX
MAXPTR = -1
MAX = -LARGENUM
I = 0
DO WHILE (I .LT. N)
IF (BUF(I) .GT. MAX) THEN
MAX = BUF(I)
MAXPTR = I
END IF
I= I + 1
ENDDO
MAXELE = MAXPTR
END
* MAXELE
INTEGER FUNCTION MINELE (BUF,N)
INTEGER BUF(*)
INTEGER N
PARAMETER(WIDTH = 30,
& HEIGHT = 20,
& GRIDWD = 61,
& LARGENUM = 100000000,
& TICKS =
$ "|- -|- -|- -|- -|- -|- -|- -|- -|- -|- -|- -|- -|")
INTEGER I, MINPTR, MIN
MINPTR = -1
MIN = LARGENUM
I = 0
DO WHILE (I .LT. N)
IF (BUF(I) .LT. MIN) THEN
MIN = BUF(I)
MINPTR = I
END IF
I = I + 1
ENDDO
MINELE = MINPTR
END
* MINELE
SUBROUTINE REMOVE (BUF,PTR,N)
INTEGER BUF(*)
INTEGER PTR
INTEGER N
INTEGER I
I = PTR
DO WHILE (I .LT. N-1)
BUF(I) = BUF(I+1)
I = I + 1
ENDDO
END
* REMOVE
INTEGER FUNCTION ABS (VAL)
INTEGER VAL
IF (VAL .LT. 0) THEN
ABS = -VAL
ELSE
ABS = VAL
END IF
END
* ABS
INTEGER FUNCTION SLASH (TOP,BOT)
INTEGER TOP
INTEGER BOT
INTEGER RES
RES = TOP/BOT
IF ((TOP .NE. RES*BOT) .AND. (TOP .GT. 0) .AND. (BOT .GT. 0)
$ .OR. ((TOP .LT. 0) .AND. (BOT .GT. 0))) THEN
RES = RES + 1
END IF
SLASH = RES
END
* SLASH
INTEGER FUNCTION MOD (N,M)
INTEGER N,M
INTEGER VAL
VAL = N-N/M*M
IF (VAL .LT. 0) THEN
VAL = VAL + M
END IF
MOD = VAL
END
* MOD
|
\documentclass{article}
\begin{document}
\section{COBOL}
\begin{itemize}
\item OpenCobolIDE is a specialized IDE designed for working with COBOL.
\item It serves different functions such as code completion which helps when working with COBOL code.
\item COBOL applications carry out functions such as payroll, book airline tickets, operate bank systems ,manage government pension funds
\item It is usually used in finance sector and administrative systems for companies and different governments
\item Python , C and Java are some programming languages related to COBOL
(Mitchell, Robert L. 14 March 2012).
\end{itemize}
\end{document} |
lemma compact_insert [simp]: assumes "compact s" shows "compact (insert x s)" |
Level 2 NVQ Diploma in Wood Occupations (Timber Frame Erection) - Q2W Ltd.
The Level 2 NVQ Diploma in Wood Occupations – Timber Frame Erection is designed to assess occupational competence in the workplace. Learners are required to demonstrate their skills, knowledge and experience in the construction of timber frame buildings. This qualification is suitable for practicing timber frame carpenters.
This NVQ provides formal recognition of practicing as timber frame carpenter.
There are no formal entry requirements for this NVQ qualification, however the potential learner must be able to demonstrate that they can meet the skills, knowledge and experience requirements of the NVQ in timber frame carpentry before enrolment.
The Level 2 NVQ Diploma in Wood Occupations – Timber Frame Erection qualifies the holder to apply for the Construction Skills Certification Scheme (CSCS) blue skill card (https://www.cscs.uk.com/card-type/manager/). |
{-# LANGUAGE FlexibleContexts #-}
{-# LANGUAGE FlexibleInstances #-}
{-# LANGUAGE UndecidableInstances #-}
-----------------------------------------------------------------------------
-- |
-- Module : Graphics.Rendering.Plot.Figure
-- Copyright : (c) A. V. H. McPhail 2010
-- License : BSD3
--
-- Maintainer : haskell.vivian.mcphail <at> gmail <dot> com
-- Stability : provisional
-- Portability : portable
--
-- Creation and manipulation of 'Figure's
--
-- The same problem of leaked instances as at <http://hackage.haskell.org/packages/archive/graphviz/2999.10.0.1/doc/html/Data-GraphViz-Commands.html#t%3AGraphvizCanvas> occurs here.
--
--
-- /with/, /set/, /clear/, /new/, and /add/ are the operations that can
-- be performed on various elements of a figure.
--
-- /glib/\//data-accessor/ abstractions (verbs/modifiers) are planned for future implementations
-----------------------------------------------------------------------------
module Graphics.Rendering.Plot.Figure (
module Data.Colour.Names
-- * Top level operation
, Figure(), FigureState()
-- * Default options
, withTextDefaults
, withLineDefaults
, withPointDefaults
, withBarDefaults
-- * Figures
, newFigure
-- ** Formatting
, setBackgroundColour
, setFigurePadding
, withTitle
, withSubTitle
, setPlots
, withPlot, withPlots
-- * Sub-plots
, Plot()
-- ** Colour
, setPlotBackgroundColour
-- ** Plot elements
, Border
, setBorder
, setPlotPadding
, withHeading
-- ** Series data
, Function(), VectorFunction(), Series(), MinMaxSeries(), ErrorSeries()
, Surface()
, SeriesLabel()
, Abscissa(), Ordinate(), Dataset()
, FormattedSeries(), SeriesType(..)
, line, point, linepoint
, impulse, step
, area
, bar
, hist
, candle, whisker
, setDataset
-- * Annotations
, Location, Head, Fill
, Annote()
, arrow
, oval
, rect
, glyph
, text
, cairo
, withAnnotations
-- ** Plot type
, setSeriesType
, setAllSeriesTypes
-- ** Formatting
, PlotFormats()
, withSeriesFormat
, withAllSeriesFormats
-- * Range
, Scale(..)
, setRange
, setRangeFromData
-- * Axes
, Axis
, AxisType(..),AxisSide(..),AxisPosn(..)
, clearAxes
, clearAxis
, addAxis
, withAxis
-- * BarSetting
, BarSetting(..)
, barSetting
-- * Data Sampling
, SampleData
, sampleData
-- * Legend
, Legend
, LegendBorder
, LegendLocation(..), LegendOrientation(..)
, clearLegend
, setLegend
, withLegendFormat
-- ** Formatting
, Tick(..), TickValues(..), GridLines
, TickFormat(..)
, setTicks
, setGridlines
, setTickLabelFormat
, setTickLabels
, withTickLabelsFormat
, withAxisLabel
, withAxisLine
, withGridLine
-- * Lines
, Line(), LineFormat()
, DashStyle,Dash(..),LineWidth
, clearLineFormat
, setDashStyle
, setLineWidth
, setLineColour
-- * Points
, Point(), PointFormat()
, Glyph(..)
, PointSize
, setGlyph
, setPointSize
, setPointColour
-- * Bars
, Bar(), BarFormat()
, clearBarFormat
, setBarWidth
, setBarColour
, setBarBorderWidth
, setBarBorderColour
-- * Text labels
, Text()
, FontFamily,FontSize,Color
-- | A text element must exist for formatting to work
, clearText
, clearTextFormat
, setText
, setFontFamily
, setFontStyle
, setFontVariant
, setFontWeight
, setFontStretch
, setFontSize
, setFontColour
) where
-----------------------------------------------------------------------------
--import Data.Packed.Vector
--import Numeric.LinearAlgebra.Linear
--import Data.Word
--import Data.Colour.SRGB
import Data.Colour.Names
import qualified Data.Array.IArray as A
--import qualified Graphics.Rendering.Cairo as C
--import qualified Graphics.Rendering.Pango as P
--import Control.Monad.State
--import Control.Monad.Reader
import Prelude hiding(min,max)
import Graphics.Rendering.Plot.Figure.Text
import Graphics.Rendering.Plot.Figure.Line
import Graphics.Rendering.Plot.Figure.Point
import Graphics.Rendering.Plot.Figure.Bar
import Graphics.Rendering.Plot.Figure.Plot
import Graphics.Rendering.Plot.Types
import Graphics.Rendering.Plot.Defaults
-----------------------------------------------------------------------------
-- | perform some actions on the text defaults, must be run before other text element modifications
withTextDefaults :: Text () -> Figure ()
withTextDefaults m = do
o <- getDefaults
let to' = _textoptions o
let (FontText to _) = execText m to' (FontText to' "")
modifyDefaults $ \s -> s { _textoptions = to }
-- | perform some actions on the line defaults, must be run before other line element modifications
withLineDefaults :: Line () -> Figure ()
withLineDefaults m = do
o <- getDefaults
let lo' = _lineoptions o
let (TypeLine lo _) = execLine m lo' (TypeLine lo' black)
modifyDefaults $ \s -> s { _lineoptions = lo }
-- | perform some actions on the point defaults, must be run before other point modifications
withPointDefaults :: Point () -> Figure ()
withPointDefaults m = do
o <- getDefaults
let po' = _pointoptions o
let (FullPoint po _) = execPoint m po' (FullPoint po' defaultGlyph)
modifyDefaults $ \s -> s { _pointoptions = po }
-- | perform some actions on the bar defaults, must be run before other point modifications
withBarDefaults :: Bar () -> Figure ()
withBarDefaults m = do
o <- getDefaults
let bo' = _baroptions o
let (TypeBar bo _) = execBar m bo' (TypeBar bo' black)
modifyDefaults $ \s -> s { _baroptions = bo }
-----------------------------------------------------------------------------
-- | create a new blank 'Figure'
newFigure :: Figure ()
newFigure = putFigure $ Figure defaultFigureBackgroundColour
defaultFigurePadding NoText NoText
(A.listArray ((1,1),(1,1)) [Nothing])
{-
newLineFigure :: DataSeries -- ^ the y series
-> FigureData
newLineFigure d@(DS_1toN _ _) = let ((xmin,xmax),(ymin,ymax)) = calculateRanges d
plot = Plot False defaultPlotPadding NoText
(defaultRanges xmin xmax ymin ymax)
[defaultXAxis,defaultYAxis]
Nothing Line d []
in Figure defaultFigurePadding NoText NoText
(A.listArray ((1,1),(1,1)) [Just plot])
-}
{-
-- | create a new 'Figure'
newFigure :: PlotType -> DataSeries -> Figure ()
newFigure Line d@(DS_1toN _ _) = putFigure $ newLineFigure d
--newFigure _ _ = error "Figure type not implemented"
-}
-----------------------------------------------------------------------------
-- | set the background colour of the figure
setBackgroundColour :: Color -> Figure ()
setBackgroundColour c = modifyFigure $ \s -> s { _back_clr = c }
-- | set the padding of the figure
setFigurePadding :: Double -> Double -> Double -> Double -> Figure ()
setFigurePadding l r b t = modifyFigure $ \s ->
s { _fig_pads = Padding l r b t }
-- | operate on the title
withTitle :: Text () -> Figure ()
withTitle m = do
o <- getDefaults
modifyFigure $ \s ->
s { _title = execText m (_textoptions o) (_title s) }
-- | operate on the sub-title
withSubTitle :: Text () -> Figure ()
withSubTitle m = do
o <- getDefaults
modifyFigure $ \s ->
s { _subtitle = execText m (_textoptions o) (_title s) }
-- | set the shape of the plots, losing all current plots
setPlots :: Int -- ^ rows
-> Int -- ^ columns
-> Figure ()
setPlots r c = modifyFigure $ \s ->
s { _plots = A.listArray ((1,1),(r,c))
(replicate (r*c) Nothing) }
-- | perform some actions on the specified subplot
withPlot :: (Int,Int) -> Plot () -> Figure ()
withPlot i m = do
o <- getDefaults
s <- getSupplies
modifyFigure $ \p ->
p { _plots = let plots = _plots p
plot' = plots A.! i
plot = case plot' of
Nothing -> emptyPlot
Just p' -> p'
-- we revert supplies to the original here
-- since we might want the same colour
-- order for all plots
-- HOWEVER: need a better execPlot group
in plots A.// [(i,Just $ execPlot m s o plot)] }
-- | perform some actions all subplots
withPlots :: Plot () -> Figure ()
withPlots m = do
o <- getDefaults
s <- getSupplies
modifyFigure $ \p ->
p { _plots = let plots = _plots p
plot p' = case p' of
Nothing -> emptyPlot
Just p'' -> p''
in plots A.// map (\(i,e) ->
(i,Just $ execPlot m s o (plot e))) (A.assocs plots) }
-----------------------------------------------------------------------------
|
import tableau order.lexicographic
open matrix fintype finset function pequiv partition
variables {m n : ℕ}
local notation `rvec`:2000 n := matrix (fin 1) (fin n) ℚ
local notation `cvec`:2000 m := matrix (fin m) (fin 1) ℚ
local infix ` ⬝ `:70 := matrix.mul
local postfix `ᵀ` : 1500 := transpose
namespace tableau
def pivot_linear_order (T : tableau m n) : decidable_linear_order (fin n) :=
decidable_linear_order.lift T.to_partition.colg (injective_colg _) (by apply_instance)
def pivot_col (T : tableau m n) (obj : fin m) : option (fin n) :=
option.cases_on
(fin.find (λ c : fin n, T.to_matrix obj c ≠ 0 ∧ T.to_partition.colg c ∉ T.restricted
∧ c ∉ T.dead))
(((list.fin_range n).filter (λ c : fin n, 0 < T.to_matrix obj c ∧ c ∉ T.dead)).argmin
T.to_partition.colg)
some
def pivot_row_linear_order (T : tableau m n) (c : fin n) : decidable_linear_order (fin m) :=
decidable_linear_order.lift
(show fin m → lex ℚ (fin (m + n)),
from λ r', (abs (T.const r' 0 / T.to_matrix r' c), T.to_partition.rowg r'))
(λ x y, by simp [T.to_partition.injective_rowg.eq_iff])
(by apply_instance)
section
local attribute [instance, priority 0] fin.has_le fin.decidable_linear_order
lemma pivot_row_linear_order_le_def (T : tableau m n) (c : fin n) :
@has_le.le (fin m)
(by haveI := pivot_row_linear_order T c; apply_instance) =
(λ i i', abs (T.const i 0 / T.to_matrix i c) < abs (T.const i' 0 / T.to_matrix i' c) ∨
(abs (T.const i 0 / T.to_matrix i c) = abs (T.const i' 0 / T.to_matrix i' c) ∧
T.to_partition.rowg i ≤ T.to_partition.rowg i')) :=
funext $ λ i, funext $ λ i', propext $ prod.lex_def _ _
end
def pivot_row (T : tableau m n) (obj: fin m) (c : fin n) : option (fin m) :=
let l := (list.fin_range m).filter (λ r : fin m, obj ≠ r ∧ T.to_partition.rowg r ∈ T.restricted
∧ T.to_matrix obj c / T.to_matrix r c < 0) in
@list.minimum _ (pivot_row_linear_order T c) l
lemma pivot_col_spec {T : tableau m n} {obj : fin m} {c : fin n} :
c ∈ pivot_col T obj → ((T.to_matrix obj c ≠ 0 ∧ T.to_partition.colg c ∉ T.restricted)
∨ (0 < T.to_matrix obj c ∧ T.to_partition.colg c ∈ T.restricted)) ∧ c ∉ T.dead :=
begin
simp [pivot_col],
cases h : fin.find (λ c : fin n, T.to_matrix obj c ≠ 0 ∧ T.to_partition.colg c ∉ T.restricted
∧ c ∉ T.dead),
{ finish [h, fin.find_eq_some_iff, fin.find_eq_none_iff, lt_irrefl, list.argmin_eq_some_iff] },
{ finish [fin.find_eq_some_iff] }
end
lemma nonpos_of_lt_pivot_col {T : tableau m n} {obj : fin m} {c j : fin n}
(hc : c ∈ pivot_col T obj) (hcres : T.to_partition.colg c ∈ T.restricted)
(hdead : j ∉ T.dead) (hjc : T.to_partition.colg j < T.to_partition.colg c) :
T.to_matrix obj j ≤ 0 :=
begin
rw [pivot_col] at hc,
cases h : fin.find (λ c, T.to_matrix obj c ≠ 0 ∧ colg (T.to_partition) c ∉ T.restricted
∧ c ∉ T.dead),
{ rw h at hc,
refine le_of_not_lt (λ hj0, _),
exact not_le_of_gt hjc ((list.mem_argmin_iff.1 hc).2.1 j
(list.mem_filter.2 (by simp [hj0, hdead]))) },
{ rw h at hc,
simp [*, fin.find_eq_some_iff] at * }
end
lemma pivot_col_eq_none {T : tableau m n} {obj : fin m} (hT : T.feasible)
(h : pivot_col T obj = none) : T.is_optimal (T.of_col 0) (T.to_partition.rowg obj) :=
is_optimal_of_col_zero hT
begin
revert h,
simp [pivot_col],
cases h : fin.find (λ c : fin n, T.to_matrix obj c ≠ 0 ∧ T.to_partition.colg c ∉ T.restricted
∧ c ∉ T.dead),
{ simp only [list.filter_eq_nil, forall_prop_of_true, list.argmin_eq_none,
list.mem_fin_range, not_and, not_not, fin.find_eq_none_iff] at *,
assume hj j hdead,
exact ⟨le_of_not_gt (λ h0, hdead (hj j h0)), by finish⟩ },
{ simp [h] }
end
lemma pivot_row_spec {T : tableau m n} {obj r : fin m} {c : fin n} :
r ∈ pivot_row T obj c →
obj ≠ r ∧ T.to_partition.rowg r ∈ T.restricted ∧
T.to_matrix obj c / T.to_matrix r c < 0 ∧
(∀ r' : fin m, obj ≠ r' → T.to_partition.rowg r' ∈ T.restricted →
T.to_matrix obj c / T.to_matrix r' c < 0 →
abs (T.const r 0 / T.to_matrix r c) ≤ abs (T.const r' 0 / T.to_matrix r' c)) :=
begin
simp only [list.mem_filter, pivot_row, option.mem_def, with_bot.some_eq_coe,
list.minimum_eq_coe_iff, list.mem_fin_range, true_and, and_imp],
rw [pivot_row_linear_order_le_def],
intros hor hres hr0 h,
simp only [*, true_and, ne.def, not_false_iff],
intros r' hor' hres' hr0',
cases h r' hor' hres' hr0',
{ exact le_of_lt (by assumption) },
{ exact le_of_eq (by tauto) }
end
lemma nonneg_of_lt_pivot_row {T : tableau m n} {obj : fin m} {r i : fin m} {c : fin n}
(hc0 : 0 < T.to_matrix obj c) (hres : T.to_partition.rowg i ∈ T.restricted)
(hc : c ∈ pivot_col T obj) (hr : r ∈ pivot_row T obj c)
(hconst : T.const i 0 = 0)
(hjc : T.to_partition.rowg i < T.to_partition.rowg r) :
0 ≤ T.to_matrix i c :=
if hobj : obj = i then le_of_lt $ hobj ▸ hc0
else
le_of_not_gt $ λ hic, not_le_of_lt hjc
begin
have := ((@list.minimum_eq_coe_iff _ (id _) _ _).1 hr).2 i
(list.mem_filter.2 ⟨list.mem_fin_range _, hobj, hres, div_neg_of_pos_of_neg hc0 hic⟩),
rw [pivot_row_linear_order_le_def] at this,
simp [hconst, not_lt_of_ge (abs_nonneg _), *] at *
end
lemma ne_zero_of_mem_pivot_row {T : tableau m n} {obj r : fin m} {c : fin n}
(hr : r ∈ pivot_row T obj c) : T.to_matrix r c ≠ 0 :=
assume hrc, by simpa [lt_irrefl, hrc] using pivot_row_spec hr
lemma ne_zero_of_mem_pivot_col {T : tableau m n} {obj : fin m} {c : fin n}
(hc : c ∈ pivot_col T obj) : T.to_matrix obj c ≠ 0 :=
λ h, by simpa [h, lt_irrefl] using pivot_col_spec hc
lemma pivot_row_eq_none_aux {T : tableau m n} {obj : fin m} {c : fin n}
(hrow : pivot_row T obj c = none) (hs : c ∈ pivot_col T obj) :
∀ r, obj ≠ r → T.to_partition.rowg r ∈ T.restricted → 0 ≤ T.to_matrix obj c / T.to_matrix r c :=
by simpa [pivot_row, list.filter_eq_nil] using hrow
lemma pivot_row_eq_none {T : tableau m n} {obj : fin m} {c : fin n} (hT : T.feasible)
(hrow : pivot_row T obj c = none) (hs : c ∈ pivot_col T obj) :
T.is_unbounded_above (T.to_partition.rowg obj) :=
have hrow : ∀ r, obj ≠ r → T.to_partition.rowg r ∈ T.restricted →
0 ≤ T.to_matrix obj c / T.to_matrix r c,
from pivot_row_eq_none_aux hrow hs,
have hc : ((T.to_matrix obj c ≠ 0 ∧ T.to_partition.colg c ∉ T.restricted)
∨ (0 < T.to_matrix obj c ∧ T.to_partition.colg c ∈ T.restricted)) ∧ c ∉ T.dead,
from pivot_col_spec hs,
have hToc : T.to_matrix obj c ≠ 0, from λ h, by simpa [h, lt_irrefl] using hc,
(lt_or_gt_of_ne hToc).elim
(λ hToc : T.to_matrix obj c < 0, is_unbounded_above_rowg_of_nonpos hT c
(hc.1.elim and.right (λ h, (not_lt_of_gt hToc h.1).elim)) hc.2
(λ i hi, classical.by_cases
(λ hoi : obj = i, le_of_lt (hoi ▸ hToc))
(λ hoi : obj ≠ i, inv_nonpos.1 $ nonpos_of_mul_nonneg_right (hrow _ hoi hi) hToc))
hToc)
(λ hToc : 0 < T.to_matrix obj c, is_unbounded_above_rowg_of_nonneg hT c
(λ i hi, classical.by_cases
(λ hoi : obj = i, le_of_lt (hoi ▸ hToc))
(λ hoi : obj ≠ i, inv_nonneg.1 $ nonneg_of_mul_nonneg_left (hrow _ hoi hi) hToc))
hc.2 hToc)
def feasible_of_mem_pivot_row_and_col {T : tableau m n} {obj : fin m} (hT : T.feasible) {c}
(hc : c ∈ pivot_col T obj) {r} (hr : r ∈ pivot_row T obj c) :
feasible (T.pivot r c) :=
begin
have := pivot_col_spec hc,
have := pivot_row_spec hr,
have := @feasible_simplex_pivot _ _ _ obj hT r c,
tauto
end
section blands_rule
local attribute [instance, priority 0] classical.dec
variable (obj : fin m)
lemma not_unique_row_and_unique_col {T T' : tableau m n} {r c c'}
(hcobj0 : 0 < T.to_matrix obj c)
(hc'obj0 : 0 < T'.to_matrix obj c')
(hrc0 : T.to_matrix r c < 0)
(hflat : T.flat = T'.flat)
(hs : T.to_partition.rowg r = T'.to_partition.colg c')
(hrobj : T.to_partition.rowg obj = T'.to_partition.rowg obj)
(hfickle : ∀ i, T.to_partition.rowg i ≠ T'.to_partition.rowg i → T.const i 0 = 0)
(hobj : T.const obj 0 = T'.const obj 0)
(nonpos_of_colg_ne : ∀ j,
T'.to_partition.colg j ≠ T.to_partition.colg j → j ≠ c' → T'.to_matrix obj j ≤ 0)
(nonpos_of_colg_eq : ∀ j, j ≠ c' →
T'.to_partition.colg j = T.to_partition.colg c → T'.to_matrix obj j ≤ 0)
(unique_row : ∀ i ≠ r, T.const i 0 = 0 → T.to_partition.rowg i ≠ T'.to_partition.rowg i →
0 ≤ T.to_matrix i c) :
false :=
let objr := T.to_partition.rowg obj in
let x := λ y : ℚ, T.of_col (y • (single c 0).to_matrix) in
have hxflatT' : ∀ {y}, x y ∈ flat T', from hflat ▸ λ _, of_col_mem_flat _ _,
have hxrow : ∀ y i, x y (T.to_partition.rowg i) 0 = T.const i 0 + y * T.to_matrix i c,
by simp [x, of_col_single_rowg],
have hxcol : ∀ {y j}, j ≠ c → x y (T.to_partition.colg j) 0 = 0,
from λ y j hjc, by simp [x, of_col_colg, pequiv.to_matrix, single_apply_of_ne hjc.symm],
have hxcolc : ∀ {y}, x y (T.to_partition.colg c) 0 = y, by simp [x, of_col_colg, pequiv.to_matrix],
let c_star : fin (m + n) → ℚ := λ v, option.cases_on (T'.to_partition.colp.symm v) 0
(T'.to_matrix obj) in
have hxobj : ∀ y, x y objr 0 = T.const obj 0 + y * T.to_matrix obj c, from λ y, hxrow _ _,
have hgetr : ∀ {y v}, c_star v * x y v 0 ≠ 0 → (T'.to_partition.colp.symm v).is_some,
from λ y v, by cases h : T'.to_partition.colp.symm v; dsimp [c_star]; rw h; simp,
have c_star_eq_get : ∀ {v} (hv : (T'.to_partition.colp.symm v).is_some),
c_star v = T'.to_matrix obj (option.get hv),
from λ v hv, by dsimp only [c_star]; conv_lhs{rw [← option.some_get hv]}; refl,
have hsummmn : ∀ {y}, sum univ (λ j, T'.to_matrix obj j * x y (T'.to_partition.colg j) 0) =
sum univ (λ v, c_star v * x y v 0),
from λ y, sum_bij_ne_zero (λ j _ _, T'.to_partition.colg j) (λ _ _ _, mem_univ _)
(λ _ _ _ _ _ _ h, T'.to_partition.injective_colg h)
(λ v _ h0, ⟨option.get (hgetr h0), mem_univ _,
by rw [← c_star_eq_get (hgetr h0)]; simpa using h0, by simp⟩)
(λ _ _ h0, by dsimp [c_star]; rw [colp_colg]),
have hgetc : ∀ {y v}, c_star v * x y v 0 ≠ 0 → v ≠ T.to_partition.colg c →
(T.to_partition.rowp.symm v).is_some,
from λ y v, (eq_rowg_or_colg T.to_partition v).elim
(λ ⟨i, hi⟩, by rw [hi, rowp_rowg]; simp)
(λ ⟨j, hj⟩ h0 hvc,
by rw [hj, hxcol (mt (congr_arg T.to_partition.colg) (hvc ∘ hj.trans)), mul_zero] at h0;
exact (h0 rfl).elim),
have hsummmnn : ∀ {y}, (univ.erase (T.to_partition.colg c)).sum (λ v, c_star v * x y v 0) =
univ.sum (λ i, c_star (T.to_partition.rowg i) * x y (T.to_partition.rowg i) 0),
from λ y, eq.symm $ sum_bij_ne_zero (λ i _ _, T.to_partition.rowg i) (by simp)
(λ _ _ _ _ _ _ h, T.to_partition.injective_rowg h)
(λ v hvc h0, ⟨option.get (hgetc h0 (mem_erase.1 hvc).1), mem_univ _, by simpa using h0⟩)
(by intros; refl),
have hsumm : ∀ {y}, univ.sum (λ i, c_star (T.to_partition.rowg i) * x y (T.to_partition.rowg i) 0) =
univ.sum (λ i, c_star (T.to_partition.rowg i) * T.const i 0) +
y * univ.sum (λ i, c_star (T.to_partition.rowg i) * T.to_matrix i c),
from λ y, by simp only [hxrow, mul_add, add_mul, sum_add_distrib, mul_assoc,
mul_left_comm _ y, mul_sum.symm],
have hxobj' : ∀ y, x y objr 0 = univ.sum (λ v, c_star v * x y v 0) + T'.const obj 0,
from λ y, by dsimp [objr]; rw [hrobj, mem_flat_iff.1 hxflatT', hsummmn],
have hy : ∀ {y}, y * T.to_matrix obj c = c_star (T.to_partition.colg c) * y +
univ.sum (λ i, c_star (T.to_partition.rowg i) * T.const i 0) +
y * univ.sum (λ i, c_star (T.to_partition.rowg i) * T.to_matrix i c),
from λ y, by rw [← add_left_inj (T.const obj 0), ← hxobj, hxobj',
← insert_erase (mem_univ (T.to_partition.colg c)), sum_insert (not_mem_erase _ _),
hsummmnn, hobj, hsumm, hxcolc]; simp,
have hy' : ∀ (y), y * (T.to_matrix obj c - c_star (T.to_partition.colg c) -
univ.sum (λ i, c_star (T.to_partition.rowg i) * T.to_matrix i c)) =
univ.sum (λ i, c_star (T.to_partition.rowg i) * T.const i 0),
from λ y, by rw [mul_sub, mul_sub, hy]; simp [mul_comm, mul_assoc, mul_left_comm],
have h0 : T.to_matrix obj c - c_star (T.to_partition.colg c) -
univ.sum (λ i, c_star (T.to_partition.rowg i) * T.to_matrix i c) = 0,
by rw [← (domain.mul_left_inj (@one_ne_zero ℚ _)), hy', ← hy' 0, zero_mul, mul_zero],
have hcolnec' : T'.to_partition.colp.symm (T.to_partition.colg c) ≠ some c',
from λ h,
by simpa [hs.symm] using congr_arg T'.to_partition.colg (option.eq_some_iff_get_eq.1 h).snd,
have eq_of_roweqc' : ∀ {i}, T'.to_partition.colp.symm (T.to_partition.rowg i) = some c' → i = r,
from λ i h, by simpa [hs.symm, T.to_partition.injective_rowg.eq_iff] using
congr_arg T'.to_partition.colg (option.eq_some_iff_get_eq.1 h).snd,
have sumpos : 0 < univ.sum (λ i, c_star (T.to_partition.rowg i) * T.to_matrix i c),
by rw [← sub_eq_zero.1 h0]; exact add_pos_of_pos_of_nonneg hcobj0
(begin
simp only [c_star, neg_nonneg],
cases h : T'.to_partition.colp.symm (T.to_partition.colg c) with j,
{ refl },
{ exact nonpos_of_colg_eq j (mt (congr_arg some) (h ▸ hcolnec'))
(by rw [← (option.eq_some_iff_get_eq.1 h).snd]; simp) }
end),
have hexi : ∃ i, 0 < c_star (T.to_partition.rowg i) * T.to_matrix i c,
from imp_of_not_imp_not _ _ (by simpa using @sum_nonpos _ _ (@univ (fin m) _)
(λ i, c_star (T.to_partition.rowg i) * T.to_matrix i c) _ _) sumpos,
let ⟨i, hi⟩ := hexi in
have hi0 : T.const i 0 = 0, from hfickle i
(λ h, by dsimp [c_star] at hi; rw [h, colp_rowg_eq_none] at hi; simpa [lt_irrefl] using hi),
have hi_some : (T'.to_partition.colp.symm (T.to_partition.rowg i)).is_some,
from option.ne_none_iff_is_some.1 (λ h, by dsimp only [c_star] at hi; rw h at hi;
simpa [lt_irrefl] using hi),
have hi' : 0 < T'.to_matrix obj (option.get hi_some) * T.to_matrix i c,
by dsimp only [c_star] at hi; rwa [← option.some_get hi_some] at hi,
have hir : i ≠ r, from λ hir, begin
have : option.get hi_some = c', from T'.to_partition.injective_colg
(by rw [colg_get_colp_symm, ← hs, hir]),
rw [this, hir] at hi',
exact not_lt_of_gt hi' (mul_neg_of_pos_of_neg hc'obj0 hrc0)
end,
have hnec' : option.get hi_some ≠ c',
from λ eq_c', hir $ @eq_of_roweqc' i (eq_c' ▸ by simp),
have hic0 : T.to_matrix i c < 0,
from neg_of_mul_pos_right hi' (nonpos_of_colg_ne _ (by simp) hnec'),
not_le_of_gt hic0 (unique_row _ hir hi0
(by rw [← colg_get_colp_symm _ _ hi_some]; exact colg_ne_rowg _ _ _))
inductive rel : tableau m n → tableau m n → Prop
| pivot : ∀ {T}, feasible T → ∀ {r c}, c ∈ pivot_col T obj →
r ∈ pivot_row T obj c → rel (T.pivot r c) T
| trans_pivot : ∀ {T₁ T₂ r c}, rel T₁ T₂ → c ∈ pivot_col T₁ obj →
r ∈ pivot_row T₁ obj c → rel (T₁.pivot r c) T₂
lemma feasible_of_rel_right {T T' : tableau m n} (h : rel obj T' T) : T.feasible :=
rel.rec_on h (by tauto) (by tauto)
lemma feasible_of_rel_left {T T' : tableau m n} (h : rel obj T' T) : T'.feasible :=
rel.rec_on h (λ _ hT _ _ hc hr, feasible_of_mem_pivot_row_and_col hT hc hr)
(λ _ _ _ _ _ hc hr hT, feasible_of_mem_pivot_row_and_col hT hc hr)
/-- Slightly stronger recursor than the default recursor -/
@[elab_as_eliminator]
lemma rel.rec_on' {obj : fin m} {C : tableau m n → tableau m n → Prop} {T T' : tableau m n}
(hrel : rel obj T T')
(hpivot : ∀ {T : tableau m n} {r : fin m} {c : fin n},
feasible T → c ∈ pivot_col T obj → r ∈ pivot_row T obj c → C (pivot T r c) T)
(hpivot_trans : ∀ {T₁ T₂ : tableau m n} {r : fin m} {c : fin n},
rel obj (T₁.pivot r c) T₁ → rel obj T₁ T₂ →
c ∈ pivot_col T₁ obj →
r ∈ pivot_row T₁ obj c → C (T₁.pivot r c) T₁ → C T₁ T₂ → C (pivot T₁ r c) T₂) :
C T T' :=
rel.rec_on hrel (λ T hT r c hc hr, hpivot hT hc hr) (λ T₁ T₂ r c hrelT₁₂ hc hr ih, hpivot_trans
(rel.pivot (feasible_of_rel_left obj hrelT₁₂) hc hr) hrelT₁₂ hc hr
(hpivot (feasible_of_rel_left obj hrelT₁₂) hc hr) ih)
lemma rel.trans {obj : fin m} {T₁ T₂ T₃ : tableau m n} (h₁₂ : rel obj T₁ T₂) :
rel obj T₂ T₃ → rel obj T₁ T₃ :=
rel.rec_on h₁₂
(λ T r c hT hc hr hrelT, rel.trans_pivot hrelT hc hr)
(λ T₁ T₂ r c hrelT₁₂ hc hr ih hrelT₂₃, rel.trans_pivot (ih hrelT₂₃) hc hr)
instance : is_trans (tableau m n) (rel obj) := ⟨@rel.trans _ _ obj⟩
lemma flat_eq_of_rel {T T' : tableau m n} (h : rel obj T' T) : flat T' = flat T :=
rel.rec_on' h (λ _ _ _ _ _ hr, flat_pivot (ne_zero_of_mem_pivot_row hr))
(λ _ _ _ _ _ _ _ _, eq.trans)
lemma rowg_obj_eq_of_rel {T T' : tableau m n} (h : rel obj T T') : T.to_partition.rowg obj =
T'.to_partition.rowg obj :=
rel.rec_on' h (λ T r c hfT hc hr, by simp [rowg_swap_of_ne _ (pivot_row_spec hr).1])
(λ _ _ _ _ _ _ _ _, eq.trans)
lemma restricted_eq_of_rel {T T' : tableau m n} (h : rel obj T T') : T.restricted = T'.restricted :=
rel.rec_on' h (λ _ _ _ _ _ _, rfl) (λ _ _ _ _ _ _ _ _, eq.trans)
lemma dead_eq_of_rel {T T' : tableau m n} (h : rel obj T T') : T.dead = T'.dead :=
rel.rec_on' h (λ _ _ _ _ _ _, rfl) (λ _ _ _ _ _ _ _ _, eq.trans)
lemma dead_eq_of_rel_or_eq {T T' : tableau m n} (h : T = T' ∨ rel obj T T') : T.dead = T'.dead :=
h.elim (congr_arg _) $ dead_eq_of_rel _
lemma exists_mem_pivot_row_col_of_rel {T T' : tableau m n} (h : rel obj T' T) :
∃ r c, c ∈ pivot_col T obj ∧ r ∈ pivot_row T obj c :=
rel.rec_on' h (λ _ r c _ hc hr, ⟨r, c, hc, hr⟩) (λ _ _ _ _ _ _ _ _ _, id)
lemma exists_mem_pivot_row_of_rel {T T' : tableau m n} (h : rel obj T' T) {c : fin n}
(hc : c ∈ pivot_col T obj) : ∃ r, r ∈ pivot_row T obj c :=
let ⟨r, c', hc', hr⟩ := exists_mem_pivot_row_col_of_rel obj h in ⟨r, by simp * at *⟩
lemma colg_eq_or_exists_mem_pivot_col {T₁ T₂ : tableau m n} (h : rel obj T₂ T₁) {c : fin n} :
T₁.to_partition.colg c = T₂.to_partition.colg c ∨
∃ T₃, (T₃ = T₁ ∨ rel obj T₃ T₁) ∧ (rel obj T₂ T₃) ∧
T₃.to_partition.colg c = T₁.to_partition.colg c ∧
c ∈ pivot_col T₃ obj :=
rel.rec_on' h begin
assume T r c' hT hc' hr,
by_cases hcc : c = c',
{ subst hcc,
exact or.inr ⟨T, or.inl rfl, rel.pivot hT hc' hr, rfl, hc'⟩ },
{ simp [colg_swap_of_ne _ hcc] }
end
(λ T₁ T₂ r c hrelp₁ hrel₁₂ hc hr ihp₁ ih₁₂,
ih₁₂.elim
(λ ih₁₂, ihp₁.elim
(λ ihp₁, or.inl (ih₁₂.trans ihp₁))
(λ ⟨T₃, hT₃⟩, or.inr ⟨T₃,
hT₃.1.elim (λ h, h.symm ▸ or.inr hrel₁₂) (λ h, or.inr $ h.trans hrel₁₂),
hT₃.2.1, hT₃.2.2.1.trans ih₁₂.symm, hT₃.2.2.2⟩))
(λ ⟨T₃, hT₃⟩, or.inr ⟨T₃, hT₃.1, hrelp₁.trans hT₃.2.1, hT₃.2.2⟩))
lemma rowg_eq_or_exists_mem_pivot_row {T₁ T₂ : tableau m n} (h : rel obj T₂ T₁) (r : fin m) :
T₁.to_partition.rowg r = T₂.to_partition.rowg r ∨
∃ (T₃ : tableau m n) c, (T₃ = T₁ ∨ rel obj T₃ T₁) ∧ (rel obj T₂ T₃) ∧
T₃.to_partition.rowg r = T₁.to_partition.rowg r ∧
c ∈ pivot_col T₃ obj ∧ r ∈ pivot_row T₃ obj c :=
rel.rec_on' h
begin
assume T r' c hT hc hr',
by_cases hrr : r = r',
{ subst hrr,
exact or.inr ⟨T, c, or.inl rfl, rel.pivot hT hc hr', rfl, hc, hr'⟩ },
{ simp [rowg_swap_of_ne _ hrr] }
end
(λ T₁ T₂ r c hrelp₁ hrel₁₂ hc hr ihp₁ ih₁₂,
ih₁₂.elim
(λ ih₁₂, ihp₁.elim
(λ ihp₁, or.inl $ ih₁₂.trans ihp₁)
(λ ⟨T₃, c', hT₃⟩, or.inr ⟨T₃, c', hT₃.1.elim (λ h, h.symm ▸ or.inr hrel₁₂)
(λ h, or.inr $ h.trans hrel₁₂), hT₃.2.1, ih₁₂.symm ▸ hT₃.2.2.1, hT₃.2.2.2⟩))
(λ ⟨T₃, c', hT₃⟩, or.inr ⟨T₃, c', hT₃.1,
(rel.pivot (feasible_of_rel_left _ hrel₁₂) hc hr).trans hT₃.2.1, hT₃.2.2⟩))
lemma eq_or_rel_pivot_of_rel {T₁ T₂ : tableau m n} (h : rel obj T₁ T₂) : ∀ {r c}
(hc : c ∈ pivot_col T₂ obj) (hr : r ∈ pivot_row T₂ obj c),
T₁ = T₂.pivot r c ∨ rel obj T₁ (T₂.pivot r c) :=
rel.rec_on' h (λ T r c hT hc hr r' c' hc' hr', by simp * at *)
(λ T₁ T₂ r c hrelp₁ hrel₁₂ hc hr ihp₁ ih₁₂ r' c' hc' hr',
(ih₁₂ hc' hr').elim
(λ ih₁₂, or.inr $ ih₁₂ ▸ rel.pivot (feasible_of_rel_left _ hrel₁₂) hc hr)
(λ ih₁₂, or.inr $ (rel.pivot (feasible_of_rel_left _ hrel₁₂) hc hr).trans ih₁₂))
lemma exists_mem_pivot_col_of_mem_pivot_row {T : tableau m n} (hrelTT : rel obj T T)
{r c} (hc : c ∈ pivot_col T obj) (hr : r ∈ pivot_row T obj c) :
∃ (T' : tableau m n), c ∈ pivot_col T' obj ∧ T'.to_partition.colg c =
T.to_partition.rowg r ∧ rel obj T' T ∧ rel obj T T' :=
have hrelTTp : rel obj T (T.pivot r c),
from (eq_or_rel_pivot_of_rel _ hrelTT hc hr).elim (λ h, h ▸ hrelTT ) id,
let ⟨T', hT'⟩ := (colg_eq_or_exists_mem_pivot_col obj hrelTTp).resolve_left
(show (T.pivot r c).to_partition.colg c ≠ T.to_partition.colg c, by simp) in
⟨T', hT'.2.2.2, by simp [hT'.2.2.1], hT'.1.elim
(λ h, h.symm ▸ rel.pivot (feasible_of_rel_left _ hrelTT) hc hr)
(λ h, h.trans $ rel.pivot (feasible_of_rel_left _ hrelTT) hc hr), hT'.2.1⟩
lemma exists_mem_pivot_col_of_rowg_ne {T T' : tableau m n} (hrelTT' : rel obj T T') {r : fin m}
(hrelT'T : rel obj T' T) (hrow : T.to_partition.rowg r ≠ T'.to_partition.rowg r) :
∃ (T₃ : tableau m n) c, c ∈ pivot_col T₃ obj ∧ T₃.to_partition.colg c =
T.to_partition.rowg r ∧ rel obj T₃ T ∧ rel obj T T₃ :=
let ⟨T₃, c, hT₃, hrelT₃T, hrow₃, hc, hr⟩ :=
(rowg_eq_or_exists_mem_pivot_row obj hrelT'T _).resolve_left hrow in
let ⟨T₄, hT₄⟩ := exists_mem_pivot_col_of_mem_pivot_row obj
(show rel obj T₃ T₃, from hT₃.elim (λ h, h.symm ▸ hrelTT'.trans hrelT'T)
(λ h, h.trans $ hrelTT'.trans hrelT₃T)) hc hr in
⟨T₄, c, hT₄.1, hT₄.2.1.trans hrow₃, hT₄.2.2.1.trans $ hT₃.elim (λ h, h.symm ▸ hrelTT'.trans hrelT'T)
(λ h, h.trans $ hrelTT'.trans hrelT'T), hrelTT'.trans (hrelT₃T.trans hT₄.2.2.2)⟩
lemma const_obj_le_of_rel {T₁ T₂ : tableau m n} (h : rel obj T₁ T₂) :
T₂.const obj 0 ≤ T₁.const obj 0 :=
rel.rec_on' h (λ T r c hT hc hr,
have hr' : _ := pivot_row_spec hr,
simplex_const_obj_le hT (by tauto) (by tauto))
(λ _ _ _ _ _ _ _ _ h₁ h₂, le_trans h₂ h₁)
lemma const_obj_eq_of_rel_of_rel {T₁ T₂ : tableau m n} (h₁₂ : rel obj T₁ T₂)
(h₂₁ : rel obj T₂ T₁) : T₁.const obj 0 = T₂.const obj 0 :=
le_antisymm (const_obj_le_of_rel _ h₂₁) (const_obj_le_of_rel _ h₁₂)
lemma const_eq_const_of_const_obj_eq {T₁ T₂ : tableau m n} (h₁₂ : rel obj T₁ T₂) :
∀ (hobj : T₁.const obj 0 = T₂.const obj 0) (i : fin m), T₁.const i 0 = T₂.const i 0 :=
rel.rec_on' h₁₂
(λ T r c hfT hc hr hobj i,
have hr0 : T.const r 0 = 0, from const_eq_zero_of_const_obj_eq hfT
(ne_zero_of_mem_pivot_col hc) (ne_zero_of_mem_pivot_row hr)
(pivot_row_spec hr).1 hobj,
if hir : i = r
then by simp [hir, hr0]
else by simp [const_pivot_of_ne _ hir, hr0])
(λ T₁ T₂ r c hrelp₁ hrel₁₂ hc hr ihp₁ ih₁₂ hobj i,
have hobjp : (pivot T₁ r c).const obj 0 = T₁.const obj 0,
from le_antisymm (hobj.symm ▸ const_obj_le_of_rel _ hrel₁₂)
(const_obj_le_of_rel _ hrelp₁),
by rw [ihp₁ hobjp, ih₁₂ (hobjp.symm.trans hobj)])
lemma const_eq_zero_of_rowg_ne_of_rel_self {T T' : tableau m n} (hrelTT' : rel obj T T')
(hrelT'T : rel obj T' T) (i : fin m) (hrow : T.to_partition.rowg i ≠ T'.to_partition.rowg i) :
T.const i 0 = 0 :=
let ⟨T₃, c, hT₃₁, hT'₃, hrow₃, hc, hi⟩ := (rowg_eq_or_exists_mem_pivot_row obj hrelT'T _).resolve_left hrow in
have T₃.const i 0 = 0, from const_eq_zero_of_const_obj_eq
(feasible_of_rel_right _ hT'₃) (ne_zero_of_mem_pivot_col hc)
(ne_zero_of_mem_pivot_row hi) (pivot_row_spec hi).1
(const_obj_eq_of_rel_of_rel _ (rel.pivot (feasible_of_rel_right _ hT'₃) hc hi)
((eq_or_rel_pivot_of_rel _ hT'₃ hc hi).elim
(λ h, h ▸ hT₃₁.elim (λ h, h.symm ▸ hrelTT') (λ h, h.trans hrelTT'))
(λ hrelT'p, hT₃₁.elim (λ h, h.symm ▸ hrelTT'.trans (h ▸ hrelT'p))
(λ h, h.trans $ hrelTT'.trans hrelT'p)))),
have hobj : T₃.const obj 0 = T.const obj 0,
from hT₃₁.elim (λ h, h ▸ rfl) (λ h, const_obj_eq_of_rel_of_rel _ h (hrelTT'.trans hT'₃)),
hT₃₁.elim (λ h, h ▸ this) (λ h, const_eq_const_of_const_obj_eq obj h hobj i ▸ this)
lemma colg_mem_restricted_of_rel_self {T : tableau m n} (hrelTT : rel obj T T)
{c} (hc : c ∈ pivot_col T obj) : T.to_partition.colg c ∈ T.restricted :=
let ⟨r, hr⟩ := exists_mem_pivot_row_of_rel obj hrelTT hc in
let ⟨T', c', hT', hrelTT', hrowcol, _, hr'⟩ := (rowg_eq_or_exists_mem_pivot_row obj
((eq_or_rel_pivot_of_rel _ hrelTT hc hr).elim
(λ h, show rel obj T (T.pivot r c), from h ▸ hrelTT) id) _).resolve_left
(show (T.pivot r c).to_partition.rowg r ≠ T.to_partition.rowg r, by simp) in
(restricted_eq_of_rel _ hrelTT').symm ▸ by convert (pivot_row_spec hr').2.1; simp [hrowcol]
lemma eq_zero_of_not_mem_restricted_of_rel_self {T : tableau m n} (hrelTT : rel obj T T)
{j} (hjres : T.to_partition.colg j ∉ T.restricted) (hdead : j ∉ T.dead) : T.to_matrix obj j = 0 :=
let ⟨r, c, hc, hr⟩ := exists_mem_pivot_row_col_of_rel obj hrelTT in
have hcres : T.to_partition.colg c ∈ T.restricted,
from colg_mem_restricted_of_rel_self obj hrelTT hc,
by_contradiction $ λ h0,
begin
simp [pivot_col] at hc,
cases h : fin.find (λ c, T.to_matrix obj c ≠ 0 ∧ colg (T.to_partition) c ∉ T.restricted
∧ c ∉ T.dead),
{ simp [*, fin.find_eq_none_iff] at * },
{ rw h at hc, clear_aux_decl,
have := (fin.find_eq_some_iff.1 h).1,
simp * at * }
end
lemma rel.irrefl {obj : fin m} : ∀ (T : tableau m n), ¬ rel obj T T :=
λ T1 hrelT1,
let ⟨rT1 , cT1, hrT1, hcT1⟩ := exists_mem_pivot_row_col_of_rel obj hrelT1 in
let ⟨t, ht⟩ := finset.max_of_mem
(show T1.to_partition.colg cT1 ∈ univ.filter (λ v, ∃ (T' : tableau m n) (c : fin n),
rel obj T' T' ∧ c ∈ pivot_col T' obj ∧ T'.to_partition.colg c = v),
by simp only [true_and, mem_filter, mem_univ, exists_and_distrib_left];
exact ⟨T1, hrelT1, cT1, hrT1, rfl⟩) in
let ⟨_, T', c', hrelTT'', hcT', hct⟩ := finset.mem_filter.1 (finset.mem_of_max ht) in
have htmax : ∀ (s : fin (m + n)) (T : tableau m n),
rel obj T T → ∀ (j : fin n), pivot_col T obj = some j →
T.to_partition.colg j = s → s ≤ t,
by simpa using λ s (h : s ∈ _), finset.le_max_of_mem h ht,
let ⟨r, hrT'⟩ := exists_mem_pivot_row_of_rel obj hrelTT'' hcT' in
have hrelTT''p : rel obj T' (T'.pivot r c'),
from (eq_or_rel_pivot_of_rel obj hrelTT'' hcT' hrT').elim (λ h, h ▸ hrelTT'') id,
let ⟨T, c, hTT', hrelT'T, hT'Tr, hc, hr⟩ := (rowg_eq_or_exists_mem_pivot_row obj
hrelTT''p r).resolve_left (by simp) in
have hfT' : feasible T', from feasible_of_rel_left _ hrelTT'',
have hfT : feasible T, from feasible_of_rel_right _ hrelT'T,
have hrelT'pT' : rel obj (T'.pivot r c') T', from rel.pivot hfT' hcT' hrT',
have hrelTT' : rel obj T T', from hTT'.elim (λ h, h.symm ▸ hrelT'pT') (λ h, h.trans hrelT'pT'),
have hrelTT : rel obj T T, from hrelTT'.trans hrelT'T,
have hc't : T.to_partition.colg c ≤ t, from htmax _ T hrelTT _ hc rfl,
have hoT'T : T'.const obj 0 = T.const obj 0, from const_obj_eq_of_rel_of_rel _ hrelT'T hrelTT',
have hfickle : ∀ i, T.to_partition.rowg i ≠ T'.to_partition.rowg i → T.const i 0 = 0,
from const_eq_zero_of_rowg_ne_of_rel_self obj hrelTT' hrelT'T,
have hobj : T.const obj 0 = T'.const obj 0, from const_obj_eq_of_rel_of_rel _ hrelTT' hrelT'T,
have hflat : T.flat = T'.flat, from flat_eq_of_rel obj hrelTT',
have hrobj : T.to_partition.rowg obj = T'.to_partition.rowg obj, from rowg_obj_eq_of_rel _ hrelTT',
have hs : T.to_partition.rowg r = T'.to_partition.colg c', by simpa using hT'Tr,
have hc'res : T'.to_partition.colg c' ∈ T'.restricted,
from hs ▸ restricted_eq_of_rel _ hrelTT' ▸ (pivot_row_spec hr).2.1,
have hc'obj0 : 0 < T'.to_matrix obj c' ∧ c' ∉ T'.dead,
by simpa [hc'res] using pivot_col_spec hcT',
have hcres : T.to_partition.colg c ∈ T.restricted,
from colg_mem_restricted_of_rel_self obj hrelTT hc,
have hcobj0 : 0 < to_matrix T obj c ∧ c ∉ T.dead,
by simpa [hcres] using pivot_col_spec hc,
have hrc0 : T.to_matrix r c < 0,
from inv_neg'.1 $ neg_of_mul_neg_left (pivot_row_spec hr).2.2.1 (le_of_lt hcobj0.1),
have nonpos_of_colg_ne : ∀ j, T'.to_partition.colg j ≠ T.to_partition.colg j → j ≠ c' →
T'.to_matrix obj j ≤ 0,
from λ j hj hjc',
let ⟨T₃, hT₃⟩ := (colg_eq_or_exists_mem_pivot_col obj hrelTT').resolve_left hj in
nonpos_of_lt_pivot_col hcT' hc'res
(dead_eq_of_rel_or_eq obj hT₃.1 ▸ (pivot_col_spec hT₃.2.2.2).2)
(lt_of_le_of_ne
(hct.symm ▸ hT₃.2.2.1 ▸ htmax _ T₃ (hT₃.1.elim (λ h, h.symm ▸ hrelTT'')
(λ h, h.trans (hrelT'T.trans hT₃.2.1))) _ hT₃.2.2.2 rfl)
(by rwa [ne.def, T'.to_partition.injective_colg.eq_iff])),
have nonpos_of_colg_eq : ∀ j, j ≠ c' →
T'.to_partition.colg j = T.to_partition.colg c → T'.to_matrix obj j ≤ 0,
from λ j hjc' hj,
if hjc : j = c
then by clear_aux_decl; subst j; exact nonpos_of_lt_pivot_col hcT' hc'res
(dead_eq_of_rel obj hrelTT' ▸ hcobj0.2)
(lt_of_le_of_ne
(hj.symm ▸ hct.symm ▸ htmax _ _ hrelTT _ hc rfl)
(hs ▸ hj.symm ▸ colg_ne_rowg _ _ _))
else let ⟨T₃, hT₃⟩ := (colg_eq_or_exists_mem_pivot_col obj hrelTT').resolve_left
(show T'.to_partition.colg j ≠ T.to_partition.colg j,
by simpa [hj, T.to_partition.injective_colg.eq_iff, eq_comm] using hjc) in
nonpos_of_lt_pivot_col hcT' hc'res
(dead_eq_of_rel_or_eq obj hT₃.1 ▸ (pivot_col_spec hT₃.2.2.2).2)
(lt_of_le_of_ne
(hct.symm ▸ hT₃.2.2.1 ▸ htmax _ T₃ (hT₃.1.elim (λ h, h.symm ▸ hrelTT'')
(λ h, h.trans (hrelT'T.trans hT₃.2.1))) _ hT₃.2.2.2 rfl)
(by rwa [ne.def, T'.to_partition.injective_colg.eq_iff])),
have unique_row : ∀ i ≠ r, T.const i 0 = 0 → T.to_partition.rowg i ≠ T'.to_partition.rowg i →
0 ≤ T.to_matrix i c,
from λ i hir hi0 hrow,
let ⟨T₃, c₃, hc₃, hrow₃, hrelT₃T, hrelTT₃⟩ :=
exists_mem_pivot_col_of_rowg_ne _ hrelTT' hrelT'T hrow in
have hrelT₃T₃ : rel obj T₃ T₃, from hrelT₃T.trans hrelTT₃,
nonneg_of_lt_pivot_row (by exact hcobj0.1)
(by rw [← hrow₃, ← restricted_eq_of_rel _ hrelT₃T];
exact colg_mem_restricted_of_rel_self _ hrelT₃T₃ hc₃) hc hr hi0
(lt_of_le_of_ne (by rw [hs, hct, ← hrow₃]; exact htmax _ _ hrelT₃T₃ _ hc₃ rfl)
(by simpa [T.to_partition.injective_rowg.eq_iff])),
not_unique_row_and_unique_col obj hcobj0.1 hc'obj0.1 hrc0 hflat hs hrobj hfickle hobj
nonpos_of_colg_ne nonpos_of_colg_eq unique_row
noncomputable instance fintype_rel (T : tableau m n) : fintype {T' | rel obj T' T} :=
fintype.of_injective (λ T', T'.val.to_partition)
(λ T₁ T₂ h, subtype.eq $ tableau.ext
(by rw [flat_eq_of_rel _ T₁.2, flat_eq_of_rel _ T₂.2]) h
(by rw [dead_eq_of_rel _ T₁.2, dead_eq_of_rel _ T₂.2])
(by rw [restricted_eq_of_rel _ T₁.2, restricted_eq_of_rel _ T₂.2]))
lemma rel_wf (m n : ℕ) (obj : fin m): well_founded (@rel m n obj) :=
subrelation.wf
(show subrelation (@rel m n obj) (measure (λ T, fintype.card {T' | rel obj T' T})),
from assume T₁ T₂ h,
set.card_lt_card (set.ssubset_iff_subset_not_subset.2 ⟨λ T' hT', hT'.trans h,
classical.not_forall.2 ⟨T₁, λ h', rel.irrefl _ (h' h)⟩⟩))
(measure_wf (λ T, fintype.card {T' | rel obj T' T}))
end blands_rule
@[derive _root_.decidable_eq] inductive termination : Type
| while : termination
| unbounded : termination
| optimal : termination
open termination
instance : has_repr termination := ⟨λ t, termination.cases_on t "while" "unbounded" "optimal"⟩
instance : fintype termination := ⟨⟨quotient.mk [while, unbounded, optimal], dec_trivial⟩,
λ x, by cases x; exact dec_trivial⟩
open termination
/-- The simplex algorithm -/
def simplex (w : tableau m n → bool) (obj : fin m) : Π (T : tableau m n) (hT : feasible T),
tableau m n × termination
| T := λ hT, cond (w T)
(match pivot_col T obj, @feasible_of_mem_pivot_row_and_col _ _ _ obj hT,
@rel.pivot m n obj _ hT with
| none, hc, hrel := (T, optimal)
| some c, hc, hrel :=
match pivot_row T obj c, @hc _ rfl, (λ r, @hrel r c rfl) with
| none, hr, hrel := (T, unbounded)
| some r, hr, hrel := have wf : rel obj (pivot T r c) T, from hrel _ rfl,
simplex (T.pivot r c) (hr rfl)
end
end)
(T, while)
using_well_founded {rel_tac := λ _ _, `[exact ⟨_, rel_wf m n obj⟩],
dec_tac := tactic.assumption}
lemma simplex_pivot {w : tableau m n → bool} {T : tableau m n} (hT : feasible T)
(hw : w T = tt) {obj : fin m} {r : fin m} {c : fin n}
(hc : c ∈ pivot_col T obj) (hr : r ∈ pivot_row T obj c) :
(T.pivot r c).simplex w obj (feasible_of_mem_pivot_row_and_col hT hc hr) =
T.simplex w obj hT :=
by conv_rhs { rw simplex };
simp [hw, show _ = _, from hr, show _ = _, from hc, simplex._match_1, simplex._match_2]
lemma simplex_spec_aux (w : tableau m n → bool) (obj : fin m) :
Π (T : tableau m n) (hT : feasible T),
((T.simplex w obj hT).2 = while ∧ w (T.simplex w obj hT).1 = ff) ∨
((T.simplex w obj hT).2 = optimal ∧ w (T.simplex w obj hT).1 = tt ∧
pivot_col (T.simplex w obj hT).1 obj = none) ∨
((T.simplex w obj hT).2 = unbounded ∧ w (T.simplex w obj hT).1 = tt ∧
∃ c, c ∈ pivot_col (T.simplex w obj hT).1 obj ∧
pivot_row (T.simplex w obj hT).1 obj c = none)
| T := λ hT,
begin
cases hw : w T,
{ rw simplex, simp [hw] },
{ cases hc : pivot_col T obj with c,
{ rw simplex, simp [hc, hw, simplex._match_1] },
{ cases hr : pivot_row T obj c with r,
{ rw simplex, simp [hr, hc, hw, simplex._match_1, simplex._match_2] },
{ rw [← simplex_pivot hT hw hc hr],
exact have wf : rel obj (T.pivot r c) T, from rel.pivot hT hc hr,
simplex_spec_aux _ _ } } }
end
using_well_founded {rel_tac := λ _ _, `[exact ⟨_, rel_wf m n obj⟩],
dec_tac := tactic.assumption}
lemma simplex_while_eq_ff {w : tableau m n → bool} {T : tableau m n} {hT : feasible T}
{obj : fin m} (hw : w T = ff) : T.simplex w obj hT = (T, while) :=
by rw [simplex, hw]; refl
lemma simplex_pivot_col_eq_none {w : tableau m n → bool} {T : tableau m n} {hT : feasible T}
(hw : w T = tt) {obj : fin m} (hc : pivot_col T obj = none) :
T.simplex w obj hT = (T, optimal) :=
by rw simplex; simp [hc, hw, simplex._match_1]
lemma simplex_pivot_row_eq_none {w : tableau m n → bool} {T : tableau m n} {hT : feasible T}
{obj : fin m} (hw : w T = tt) {c} (hc : c ∈ pivot_col T obj)
(hr : pivot_row T obj c = none) : T.simplex w obj hT = (T, unbounded) :=
by rw simplex; simp [hw, show _ = _, from hc, hr, simplex._match_1, simplex._match_2]
lemma simplex_induction (P : tableau m n → Prop) (w : tableau m n → bool) (obj : fin m):
Π {T : tableau m n} (hT : feasible T) (h0 : P T)
(hpivot : ∀ {T' r c}, w T' = tt → c ∈ pivot_col T' obj → r ∈ pivot_row T' obj c
→ feasible T' → P T' → P (T'.pivot r c)),
P (T.simplex w obj hT).1
| T := λ hT h0 hpivot,
begin
cases hw : w T,
{ rwa [simplex_while_eq_ff hw] },
{ cases hc : pivot_col T obj with c,
{ rwa [simplex_pivot_col_eq_none hw hc] },
{ cases hr : pivot_row T obj c with r,
{ rwa simplex_pivot_row_eq_none hw hc hr },
{ rw [← simplex_pivot _ hw hc hr],
exact have wf : rel obj (pivot T r c) T, from rel.pivot hT hc hr,
simplex_induction (feasible_of_mem_pivot_row_and_col hT hc hr)
(hpivot hw hc hr hT h0) @hpivot } } }
end
using_well_founded {rel_tac := λ _ _, `[exact ⟨_, rel_wf m n obj⟩],
dec_tac := `[tauto]}
@[simp] lemma feasible_simplex {w : tableau m n → bool} {T : tableau m n}
{hT : feasible T} {obj : fin m} : feasible (T.simplex w obj hT).1 :=
simplex_induction feasible _ _ hT hT
(λ _ _ _ _ hc hr _ hT', feasible_of_mem_pivot_row_and_col hT' hc hr)
@[simp] lemma simplex_simplex {w : tableau m n → bool} {T : tableau m n} {hT : feasible T}
{obj : fin m} : (T.simplex w obj hT).1.simplex w obj feasible_simplex = T.simplex w obj hT :=
simplex_induction (λ T', ∀ (hT' : feasible T'), T'.simplex w obj hT' = T.simplex w obj hT) w _ _
(λ _, rfl) (λ T' r c hw hc hr hT' ih hpivot, by rw [simplex_pivot hT' hw hc hr, ih]) _
/-- `simplex` does not move the row variable it is trying to maximise. -/
@[simp] lemma rowg_simplex (T : tableau m n) (hT : feasible T) (w : tableau m n → bool)
(obj : fin m) : (T.simplex w obj hT).1.to_partition.rowg obj = T.to_partition.rowg obj :=
simplex_induction (λ T', T'.to_partition.rowg obj = T.to_partition.rowg obj) _ _ _ rfl
(λ T' r c hw hc hr, by simp [rowg_swap_of_ne _ (pivot_row_spec hr).1])
@[simp] lemma colg_simplex_of_dead_aux {T : tableau m n} {hT : feasible T} {w : tableau m n → bool}
{obj : fin m} {c' : fin n} : c' ∈ (T.simplex w obj hT).1.dead →
(T.simplex w obj hT).1.to_partition.colg c' = T.to_partition.colg c' :=
simplex_induction (λ T', c' ∈ T'.dead → T'.to_partition.colg c' = T.to_partition.colg c') _ obj _
(λ _, rfl)
(λ T' r c hw hc hr hfT' ih hdead,
have c' ≠ c, from λ hcc, (pivot_col_spec hc).2 (by simp * at *),
by simp [colg_swap_of_ne _ this, ih hdead])
@[simp] lemma flat_simplex (T : tableau m n) (hT : feasible T) (w : tableau m n → bool)
(obj : fin m) : (T.simplex w obj hT).1.flat = T.flat :=
simplex_induction (λ T', T'.flat = T.flat) w obj _ rfl
(λ T' r c hw hc hr hT' ih,
have T'.to_matrix r c ≠ 0,
from λ h, by simpa [h, lt_irrefl] using pivot_row_spec hr,
by rw [flat_pivot this, ih])
@[simp] lemma restricted_simplex (T : tableau m n) (hT : feasible T) (w : tableau m n → bool)
(obj : fin m) : (T.simplex w obj hT).1.restricted = T.restricted :=
simplex_induction (λ T', T'.restricted = T.restricted) _ _ _ rfl (by simp { contextual := tt })
@[simp] lemma dead_simplex (T : tableau m n) (hT : feasible T) (w : tableau m n → bool)
(obj : fin m) : (T.simplex w obj hT).1.dead = T.dead :=
simplex_induction (λ T', T'.dead = T.dead) _ _ _ rfl (by simp { contextual := tt })
@[simp] lemma res_set_simplex (T : tableau m n) (hT : feasible T) (w : tableau m n → bool)
(obj : fin m) : (T.simplex w obj hT).1.res_set = T.res_set :=
simplex_induction (λ T', T'.res_set = T.res_set) w obj _ rfl
(λ T' r c hw hc hr, by simp [res_set_pivot (ne_zero_of_mem_pivot_row hr)] {contextual := tt})
@[simp] lemma dead_set_simplex (T : tableau m n) (hT : feasible T) (w : tableau m n → bool)
(obj : fin m) : (T.simplex w obj hT).1.dead_set = T.dead_set :=
simplex_induction (λ T', T'.dead_set = T.dead_set) w obj _ rfl
(λ T' r c hw hc hr,
by simp [dead_set_pivot (ne_zero_of_mem_pivot_row hr) (pivot_col_spec hc).2] {contextual := tt})
@[simp] lemma sol_set_simplex (T : tableau m n) (hT : feasible T) (w : tableau m n → bool)
(obj : fin m) : (T.simplex w obj hT).1.sol_set = T.sol_set :=
by simp [sol_set_eq_res_set_inter_dead_set]
@[simp] lemma of_col_simplex_zero_mem_sol_set {w : tableau m n → bool} {T : tableau m n}
{hT : feasible T} {obj : fin m} : (T.simplex w obj hT).1.of_col 0 ∈ sol_set T :=
by rw [← sol_set_simplex, of_col_zero_mem_sol_set_iff]; exact feasible_simplex
@[simp] lemma of_col_simplex_rowg {w : tableau m n → bool} {T : tableau m n}
{hT : feasible T} {obj : fin m} (x : cvec n) :
(T.simplex w obj hT).1.of_col x (T.to_partition.rowg obj) =
((T.simplex w obj hT).1.to_matrix ⬝ x + (T.simplex w obj hT).1.const) obj :=
by rw [← of_col_rowg (T.simplex w obj hT).1 x obj, rowg_simplex]
@[simp] lemma is_unbounded_above_simplex {T : tableau m n} {hT : feasible T} {w : tableau m n → bool}
{obj : fin m} {v : fin (m + n)} : is_unbounded_above (T.simplex w obj hT).1 v ↔
is_unbounded_above T v := by simp [is_unbounded_above]
@[simp] lemma is_optimal_simplex {T : tableau m n} {hT : feasible T} {w : tableau m n → bool}
{obj : fin m} {x : cvec (m + n)} {v : fin (m + n)} : is_optimal (T.simplex w obj hT).1 x v ↔
is_optimal T x v := by simp [is_optimal]
lemma termination_eq_while_iff {T : tableau m n} {hT : feasible T} {w : tableau m n → bool}
{obj : fin m} : (T.simplex w obj hT).2 = while ↔ w (T.simplex w obj hT).1 = ff :=
by have := simplex_spec_aux w obj T hT; finish
lemma termination_eq_optimal_iff_pivot_col_eq_none {T : tableau m n}
{hT : feasible T} {w : tableau m n → bool} {obj : fin m} : (T.simplex w obj hT).2 = optimal ↔
w (T.simplex w obj hT).1 = tt ∧ pivot_col (T.simplex w obj hT).1 obj = none :=
by have := simplex_spec_aux w obj T hT; finish
lemma termination_eq_unbounded_iff_pivot_row_eq_none {T : tableau m n} {hT : feasible T}
{w : tableau m n → bool} {obj : fin m} : (T.simplex w obj hT).2 = unbounded ↔
w (T.simplex w obj hT).1 = tt ∧ ∃ c, c ∈ pivot_col (T.simplex w obj hT).1 obj ∧
pivot_row (T.simplex w obj hT).1 obj c = none :=
by have := simplex_spec_aux w obj T hT; finish
lemma termination_eq_unbounded_iff_aux {T : tableau m n} {hT : feasible T}
{w : tableau m n → bool} {obj : fin m} : (T.simplex w obj hT).2 = unbounded →
w (T.simplex w obj hT).1 = tt ∧
is_unbounded_above T (T.to_partition.rowg obj) :=
begin
rw termination_eq_unbounded_iff_pivot_row_eq_none,
rintros ⟨_, c, hc⟩,
simpa * using pivot_row_eq_none feasible_simplex hc.2 hc.1
end
lemma termination_eq_optimal_iff {T : tableau m n} {hT : feasible T}
{w : tableau m n → bool} {obj : fin m} : (T.simplex w obj hT).2 = optimal ↔
w (T.simplex w obj hT).1 = tt ∧
is_optimal T ((T.simplex w obj hT).1.of_col 0) (T.to_partition.rowg obj) :=
begin
rw [termination_eq_optimal_iff_pivot_col_eq_none],
split,
{ rintros ⟨_, hc⟩,
simpa * using pivot_col_eq_none feasible_simplex hc },
{ cases ht : (T.simplex w obj hT).2,
{ simp [*, termination_eq_while_iff] at * },
{ cases termination_eq_unbounded_iff_aux ht,
simp [*, not_optimal_of_unbounded_above right] },
{ simp [*, termination_eq_optimal_iff_pivot_col_eq_none] at * } }
end
lemma termination_eq_unbounded_iff {T : tableau m n} {hT : feasible T}
{w : tableau m n → bool} {obj : fin m} : (T.simplex w obj hT).2 = unbounded ↔
w (T.simplex w obj hT).1 = tt ∧ is_unbounded_above T (T.to_partition.rowg obj) :=
⟨termination_eq_unbounded_iff_aux,
begin
have := @not_optimal_of_unbounded_above m n (T.simplex w obj hT).1 (T.to_partition.rowg obj)
((T.simplex w obj hT).1.of_col 0),
cases ht : (T.simplex w obj hT).2;
simp [termination_eq_optimal_iff, termination_eq_while_iff, *] at *
end⟩
end tableau
|
(*
Copyright 2018
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*)
theory uhyve_send_mem
imports syscall
begin
text \<open>Up to two locales per function in the binary.\<close>
locale uhyve_send_function = syscall_context +
fixes rsp\<^sub>0 rbp\<^sub>0 port a uhyve_send_ret :: \<open>64 word\<close>
and v\<^sub>0 :: \<open>8 word\<close>
and blocks :: \<open>(nat \<times> 64 word \<times> nat) set\<close>
assumes seps: \<open>seps blocks\<close>
and masters:
\<open>master blocks (a, 1) 0\<close>
\<open>master blocks (rsp\<^sub>0, 8) 1\<close>
\<open>master blocks (rsp\<^sub>0-8, 8) 2\<close>
\<open>master blocks (rsp\<^sub>0-12, 2) 3\<close>
\<open>master blocks (rsp\<^sub>0-16, 4) 4\<close>
\<open>master blocks (\<langle>15,0\<rangle>port, 4) 5\<close>
and ret_address: \<open>outside uhyve_send_ret 355 383\<close> \<comment> \<open>Only works for non-recursive functions.\<close>
begin
text \<open>
The Floyd invariant expresses for some locations properties that are invariably true.
Simply expresses that a byte in the memory remains untouched.
\<close>
definition pp_\<Theta> :: floyd_invar where
\<open>pp_\<Theta> \<equiv> [
\<comment> \<open>precondition\<close>
boffset+355 \<mapsto> \<lambda>\<sigma>. regs \<sigma> rsp = rsp\<^sub>0
\<and> regs \<sigma> rbp = rbp\<^sub>0
\<and> \<langle>15,0\<rangle>regs \<sigma> rdi = port
\<and> \<sigma> \<turnstile> *[rsp\<^sub>0,8] = boffset+uhyve_send_ret
\<and> \<sigma> \<turnstile> *[a,1] = v\<^sub>0,
\<comment> \<open>postcondition\<close>
boffset+uhyve_send_ret \<mapsto> \<lambda>\<sigma>. \<sigma> \<turnstile> *[a,1] = v\<^sub>0
\<and> regs \<sigma> rsp = rsp\<^sub>0+8
\<and> regs \<sigma> rbp = rbp\<^sub>0
]\<close>
text \<open>Adding some rules to the simplifier to simplify proofs.\<close>
schematic_goal pp_\<Theta>_zero[simp]:
shows \<open>pp_\<Theta> boffset = ?x\<close>
unfolding pp_\<Theta>_def
by simp
schematic_goal pp_\<Theta>_numeral_l[simp]:
shows \<open>pp_\<Theta> (n + boffset) = ?x\<close>
unfolding pp_\<Theta>_def
by simp
schematic_goal pp_\<Theta>_numeral_r[simp]:
shows \<open>pp_\<Theta> (boffset + n) = ?x\<close>
unfolding pp_\<Theta>_def
by simp
lemma rewrite_uhyve_send_mem:
\<open>is_std_invar uhyve_send_ret (floyd.invar uhyve_send_ret pp_\<Theta>)\<close>
text \<open>Boilerplate code to start the VCG\<close>
apply (rule floyd_invarI)
apply (rewrite at \<open>floyd_vcs uhyve_send_ret \<hole> _\<close> pp_\<Theta>_def)
apply (intro floyd_vcsI)
text \<open>Subgoal for rip = boffset+355\<close>
subgoal premises prems for \<sigma>
text \<open>Insert relevant knowledge\<close>
apply (insert prems seps ret_address)
text \<open>Apply VCG/symb.\ execution\<close>
apply (restart_symbolic_execution?, (symbolic_execution masters: masters)+, (finish_symbolic_execution masters: masters)?)+
done
text \<open>Trivial ending subgoal.\<close>
subgoal
by simp
done
end
end
|
#include <Eigen/Geometry>
#include <boost/format.hpp>
#include <trajopt_sco/expr_ops.hpp>
#include <trajopt_sco/modeling_utils.hpp>
#include "trajopt_interface/kinematic_terms.h"
using namespace std;
using namespace sco;
using namespace Eigen;
namespace trajopt_interface
{
VectorXd CartPoseErrCalculator::operator()(const VectorXd& dof_vals) const
{
// TODO: create the actual error function from information in planning scene
VectorXd err;
return err;
}
VectorXd JointVelErrCalculator::operator()(const VectorXd& var_vals) const
{
assert(var_vals.rows() % 2 == 0);
// var_vals = (theta_t1, theta_t2, theta_t3 ... 1/dt_1, 1/dt_2, 1/dt_3 ...)
int half = static_cast<int>(var_vals.rows() / 2);
int num_vels = half - 1;
// (x1-x0)*(1/dt)
VectorXd vel = (var_vals.segment(1, num_vels) - var_vals.segment(0, num_vels)).array() *
var_vals.segment(half + 1, num_vels).array();
// Note that for equality terms tols are 0, so error is effectively doubled
VectorXd result(vel.rows() * 2);
result.topRows(vel.rows()) = -(upper_tol_ - (vel.array() - target_));
result.bottomRows(vel.rows()) = lower_tol_ - (vel.array() - target_);
return result;
}
MatrixXd JointVelJacobianCalculator::operator()(const VectorXd& var_vals) const
{
// var_vals = (theta_t1, theta_t2, theta_t3 ... 1/dt_1, 1/dt_2, 1/dt_3 ...)
int num_vals = static_cast<int>(var_vals.rows());
int half = num_vals / 2;
int num_vels = half - 1;
MatrixXd jac = MatrixXd::Zero(num_vels * 2, num_vals);
for (int i = 0; i < num_vels; i++)
{
// v = (j_i+1 - j_i)*(1/dt)
// We calculate v with the dt from the second pt
int time_index = i + half + 1;
jac(i, i) = -1.0 * var_vals(time_index);
jac(i, i + 1) = 1.0 * var_vals(time_index);
jac(i, time_index) = var_vals(i + 1) - var_vals(i);
// All others are 0
}
// bottom half is negative velocities
jac.bottomRows(num_vels) = -jac.topRows(num_vels);
return jac;
}
} // namespace trajopt_interface
|
module Ch05.Exercise_5_2_7
import Ch05.LambdaCalculus
import Ch05.Exercise_5_2_5
%default total
||| `le m n` tests whether `m` is less than or equal to `n`
le : Term
le = let m = Var 0
n = Var 1 in
Abs 0 (Abs 1 (iszro . (sub . m . n)))
||| Test whether two Church numerals are equal
equal : Term
equal = let m = Var 0
n = Var 1
m_le_n = le . m . n
n_le_m = le . n . m in
Abs 0 (Abs 1 (and . m_le_n . n_le_m))
|
// stdafx.h : include file for standard system include files,
// or project specific include files that are used frequently, but
// are changed infrequently
//
#pragma once
#pragma warning (disable : 4514 4710 4711) // inlining
#pragma warning (disable: 5045) // TODO Spectre
// #define WIN32_LEAN_AND_MEAN // Exclude rarely-used stuff from Windows headers
// Windows Header Files:
#pragma warning (push, 1)
#pragma warning (disable : 5039 4668 4996 26814 4548 4355 4917 4702 26400 4987 4820 4365 4623 4625 4626 5026 5027 4571 4774 26412 26461 26426 26432 26447 26472 26446 26473 26440 26429 26496 26472 26482 26486 26487 26434)
#include <gsl.h>
#pragma warning (pop) // unbalanced push in span.h
#include <windows.h>
#include <Shlobj.h>
#include <Strsafe.h>
#include <algorithm>
#include <fstream>
#include <iostream>
#include <iomanip>
#include <sstream>
#include <memory>
#include <string>
#include <vector>
#include <array>
#include <ranges>
#include <span>
#include "Resource.h"
#pragma warning (pop)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.