text
stringlengths 0
3.34M
|
---|
function test_bug1262
% MEM 2gb
% WALLTIME 00:10:00
% DEPENDENCY ft_read_header ft_read_data ft_read_event
% Quick and dirty sanity check for reading of fcdc_buffer_offline data.
% Example data generated with sine example, saved with record.exe:
dirname = dccnpath('/home/common/matlab/fieldtrip/data/test/bug1262/0001');
hdr = ft_read_header(dirname);
assert(hdr.nChans == 16);
assert(hdr.nSamples == 1216);
dat = ft_read_data(dirname);
assert(all(size(dat) == [hdr.nChans, hdr.nSamples]));
evt = ft_read_event(dirname);
assert(numel(evt) == 0); % there are no events
|
import valuations
import analysis.topology.topological_space
import data.finsupp
import group_theory.quotient_group
universes u₁ u₂ u₃
local attribute [instance] classical.prop_decidable
variables {R : Type u₁} [comm_ring R] [decidable_eq R]
structure Valuation (R : Type u₁) [comm_ring R] :=
(Γ : Type u₁)
(grp : linear_ordered_comm_group Γ)
(val : @valuation R _ Γ grp)
namespace Valuation
open valuation
instance : has_coe_to_fun (Valuation R) :=
{ F := λ v, R → with_zero v.Γ, coe := λ v, v.val.val }
instance linear_ordered_value_group {v : Valuation R} : linear_ordered_comm_group v.Γ := v.grp
def of_valuation {Γ : Type u₂} [linear_ordered_comm_group Γ] (v : valuation R Γ) : Valuation R :=
{ Γ := (minimal_value_group v).Γ,
grp := minimal_value_group.linear_ordered_comm_group v,
val := v.minimal_valuation }
section
variables (R)
instance : setoid (Valuation R) :=
{ r := λ v₁ v₂, ∀ r s, v₁ r ≤ v₁ s ↔ v₂ r ≤ v₂ s,
iseqv := begin
split,
{ intros v r s, refl },
split,
{ intros v₁ v₂ h r s, symmetry, exact h r s },
{ intros v₁ v₂ v₃ h₁ h₂ r s,
exact iff.trans (h₁ r s) (h₂ r s) }
end }
end
lemma ne_zero_of_equiv_ne_zero {Γ₁ : Type u₂} [linear_ordered_comm_group Γ₁] {Γ₂ : Type u₃} [linear_ordered_comm_group Γ₂]
{v₁ : valuation R Γ₁} {v₂ : valuation R Γ₂} {r : R} (heq : valuation.is_equiv v₁ v₂) (H : v₁ r ≠ 0) : v₂ r ≠ 0 :=
begin
intro h,
rw [eq_zero_iff_le_zero, ← heq r 0, ← eq_zero_iff_le_zero] at h,
exact H h
end
end Valuation
section
variables (R)
definition Spv := {ineq : R → R → Prop // ∃ (v : Valuation R), ∀ r s : R, v r ≤ v s ↔ ineq r s}
end
namespace Spv
open valuation
definition mk (v : Valuation R) : Spv R := ⟨λ r s, v r ≤ v s, ⟨v, λ _ _, iff.rfl⟩⟩
definition mk' {Γ : Type u₂} [linear_ordered_comm_group Γ] (v : valuation R Γ) : Spv R := mk (Valuation.of_valuation v)
noncomputable definition out (v : Spv R) : Valuation R :=
subtype.cases_on v (λ ineq hv, classical.some hv)
noncomputable definition lift {β : Type u₃}
(f : Valuation R → β) (H : ∀ v₁ v₂ : Valuation R, v₁ ≈ v₂ → f v₁ = f v₂) : Spv R → β :=
f ∘ out
lemma out_mk {v : Valuation R} : out (mk v) ≈ v := classical.some_spec (mk v).property
@[simp] lemma mk_out {v : Spv R} : mk (out v) = v :=
begin
cases v with ineq hv,
rw subtype.ext,
ext,
exact classical.some_spec hv _ _
end
lemma lift_mk {β : Type u₃} {f : Valuation R → β} {H : ∀ v₁ v₂ : Valuation R, v₁ ≈ v₂ → f v₁ = f v₂} (v : Valuation R) :
lift f H (mk v) = f v := H _ _ out_mk
lemma exists_rep (v : Spv R) : ∃ v' : Valuation R, mk v' = v := ⟨out v, mk_out⟩
lemma ind {f : Spv R → Prop} (H : ∀ v, f (mk v)) : ∀ v, f v :=
λ v, by simpa using H (out v)
lemma sound {v₁ v₂ : Valuation R} (heq : v₁ ≈ v₂) : mk v₁ = mk v₂ :=
begin
rw subtype.ext,
funext,
ext,
exact heq _ _
end
noncomputable instance : has_coe (Spv R) (Valuation R) := ⟨out⟩
end Spv
-- TODO:
-- Also might need a variant of Wedhorn 1.27 (ii) -/
/-
theorem equiv_value_group_map (R : Type) [comm_ring R] (v w : valuations R) (H : v ≈ w) :
∃ φ : value_group v.f → value_group w.f, is_group_hom φ ∧ function.bijective φ :=
begin
existsi _,tactic.swap,
{ intro g,
cases g with g Hg,
unfold value_group at Hg,
unfold group.closure at Hg,
dsimp at Hg,
induction Hg,
},
{sorry
}
end
-/
namespace Spv
variables {A : Type u₁} [comm_ring A] [decidable_eq A]
definition basic_open (r s : A) : set (Spv A) :=
{v | v r ≤ v s ∧ v s ≠ 0}
lemma mk_mem_basic_open {r s : A} (v : Valuation A) : mk v ∈ basic_open r s ↔ v r ≤ v s ∧ v s ≠ 0 :=
begin
split; intro h; split,
{ exact (out_mk r s).mp h.left },
{ exact Valuation.ne_zero_of_equiv_ne_zero out_mk h.right },
{ exact (out_mk r s).mpr h.left },
{ exact Valuation.ne_zero_of_equiv_ne_zero (setoid.symm out_mk) h.right }
end
instance : topological_space (Spv A) :=
topological_space.generate_from {U : set (Spv A) | ∃ r s : A, U = basic_open r s}
end Spv |
function Base.insert!(ll::LinkedList{T}, index::Integer, item::T) where T
if index == 1
if isempty(ll)
return push!(ll, item)
else
ll.head = Node{T}(item, ll.head)
end
else
nd = ll.head
while index > 2
if nd.next isa EmptyNode
throw(BoundsError())
else
nd = nd.next
index -= 1
end
end
nd.next = Node{T}(item, nd.next)
end
return ll
end
|
(*
* Copyright 2014, General Dynamics C4 Systems
*
* SPDX-License-Identifier: GPL-2.0-only
*)
theory ArchInterrupt_AI
imports Interrupt_AI
begin
context Arch begin global_naming ARM_HYP
primrec arch_irq_control_inv_valid_real ::
"arch_irq_control_invocation \<Rightarrow> 'a::state_ext state \<Rightarrow> bool"
where
"arch_irq_control_inv_valid_real (ArchIRQControlIssue irq dest_slot src_slot trigger)
= (cte_wp_at ((=) cap.NullCap) dest_slot and
cte_wp_at ((=) cap.IRQControlCap) src_slot and
ex_cte_cap_wp_to is_cnode_cap dest_slot and
real_cte_at dest_slot and
K (irq \<le> maxIRQ))"
defs arch_irq_control_inv_valid_def:
"arch_irq_control_inv_valid \<equiv> arch_irq_control_inv_valid_real"
named_theorems Interrupt_AI_asms
lemma (* decode_irq_control_invocation_inv *)[Interrupt_AI_asms]:
"\<lbrace>P\<rbrace> decode_irq_control_invocation label args slot caps \<lbrace>\<lambda>rv. P\<rbrace>"
apply (simp add: decode_irq_control_invocation_def Let_def arch_check_irq_def
arch_decode_irq_control_invocation_def whenE_def, safe)
apply (wp | simp)+
done
lemma decode_irq_control_valid [Interrupt_AI_asms]:
"\<lbrace>\<lambda>s. invs s \<and> (\<forall>cap \<in> set caps. s \<turnstile> cap)
\<and> (\<forall>cap \<in> set caps. is_cnode_cap cap \<longrightarrow>
(\<forall>r \<in> cte_refs cap (interrupt_irq_node s). ex_cte_cap_wp_to is_cnode_cap r s))
\<and> cte_wp_at ((=) cap.IRQControlCap) slot s\<rbrace>
decode_irq_control_invocation label args slot caps
\<lbrace>irq_control_inv_valid\<rbrace>,-"
apply (simp add: decode_irq_control_invocation_def Let_def split_def
whenE_def arch_check_irq_def
arch_decode_irq_control_invocation_def
split del: if_split cong: if_cong)
apply (wpsimp wp: ensure_empty_stronger simp: cte_wp_at_eq_simp arch_irq_control_inv_valid_def
| wp (once) hoare_drop_imps)+
apply (clarsimp simp: linorder_not_less word_le_nat_alt unat_ucast maxIRQ_def)
apply (cases caps ; fastforce simp: cte_wp_at_eq_simp)
done
lemma get_irq_slot_different_ARCH[Interrupt_AI_asms]:
"\<lbrace>\<lambda>s. valid_global_refs s \<and> ex_cte_cap_wp_to is_cnode_cap ptr s\<rbrace>
get_irq_slot irq
\<lbrace>\<lambda>rv s. rv \<noteq> ptr\<rbrace>"
apply (simp add: get_irq_slot_def)
apply wp
apply (clarsimp simp: valid_global_refs_def valid_refs_def
ex_cte_cap_wp_to_def)
apply (elim allE, erule notE, erule cte_wp_at_weakenE)
apply (clarsimp simp: global_refs_def is_cap_simps cap_range_def)
done
lemma is_derived_use_interrupt_ARCH[Interrupt_AI_asms]:
"(is_ntfn_cap cap \<and> interrupt_derived cap cap') \<longrightarrow> (is_derived m p cap cap')"
apply (clarsimp simp: is_cap_simps)
apply (clarsimp simp: interrupt_derived_def is_derived_def)
apply (clarsimp simp: cap_master_cap_def split: cap.split_asm)
apply (simp add: is_cap_simps is_pt_cap_def vs_cap_ref_def)
done
lemmas maskInterrupt_invs_ARCH[Interrupt_AI_asms] = maskInterrupt_invs
lemma no_cap_to_obj_with_diff_IRQHandler_ARCH[Interrupt_AI_asms]:
"no_cap_to_obj_with_diff_ref (IRQHandlerCap irq) S = \<top>"
by (rule ext, simp add: no_cap_to_obj_with_diff_ref_def
cte_wp_at_caps_of_state
obj_ref_none_no_asid)
lemma (* set_irq_state_valid_cap *)[Interrupt_AI_asms]:
"\<lbrace>valid_cap cap\<rbrace> set_irq_state IRQSignal irq \<lbrace>\<lambda>rv. valid_cap cap\<rbrace>"
apply (clarsimp simp: set_irq_state_def)
apply (wp do_machine_op_valid_cap)
apply (auto simp: valid_cap_def valid_untyped_def
split: cap.splits option.splits arch_cap.splits
split del: if_split)
done
crunch valid_global_refs[Interrupt_AI_asms]: set_irq_state "valid_global_refs"
crunches arch_invoke_irq_handler
for typ_at[wp]: "\<lambda>s. P (typ_at T p s)"
and valid_list[wp]: valid_list
lemma invoke_irq_handler_invs'[Interrupt_AI_asms]:
assumes dmo_ex_inv[wp]: "\<And>f. \<lbrace>invs and ex_inv\<rbrace> do_machine_op f \<lbrace>\<lambda>rv::unit. ex_inv\<rbrace>"
assumes cap_insert_ex_inv[wp]: "\<And>cap src dest.
\<lbrace>ex_inv and invs and K (src \<noteq> dest)\<rbrace>
cap_insert cap src dest
\<lbrace>\<lambda>_.ex_inv\<rbrace>"
assumes cap_delete_one_ex_inv[wp]: "\<And>cap.
\<lbrace>ex_inv and invs\<rbrace> cap_delete_one cap \<lbrace>\<lambda>_.ex_inv\<rbrace>"
shows
"\<lbrace>invs and ex_inv and irq_handler_inv_valid i\<rbrace> invoke_irq_handler i \<lbrace>\<lambda>rv s. invs s \<and> ex_inv s\<rbrace>"
proof -
have
cap_insert_invs_ex_invs[wp]: "\<And>cap src dest. \<lbrace>ex_inv and (invs and cte_wp_at (\<lambda>c. c = NullCap) dest and valid_cap cap and
tcb_cap_valid cap dest and
ex_cte_cap_wp_to (appropriate_cte_cap cap) dest and
(\<lambda>s. \<forall>r\<in>obj_refs cap.
\<forall>p'. dest \<noteq> p' \<and> cte_wp_at (\<lambda>cap'. r \<in> obj_refs cap') p' s \<longrightarrow>
cte_wp_at (Not \<circ> is_zombie) p' s \<and> \<not> is_zombie cap) and
(\<lambda>s. cte_wp_at (is_derived (cdt s) src cap) src s) and
(\<lambda>s. cte_wp_at (\<lambda>cap'. \<forall>irq\<in>cap_irqs cap - cap_irqs cap'. irq_issued irq s)
src s) and
(\<lambda>s. \<forall>t R. cap = ReplyCap t False R \<longrightarrow>
st_tcb_at awaiting_reply t s \<and> \<not> has_reply_cap t s) and
K (\<not> is_master_reply_cap cap))\<rbrace>
cap_insert cap src dest \<lbrace>\<lambda>rv s. invs s \<and> ex_inv s\<rbrace>"
apply wp
apply (auto simp: cte_wp_at_caps_of_state)
done
show ?thesis
apply (cases i, simp_all)
apply (wp maskInterrupt_invs_ARCH)
apply simp+
apply (rename_tac irq cap prod)
apply (rule hoare_pre)
apply (wp valid_cap_typ [OF cap_delete_one_typ_at])
apply (strengthen real_cte_tcb_valid)
apply (wp real_cte_at_typ_valid [OF cap_delete_one_typ_at])
apply (rule_tac Q="\<lambda>rv s. is_ntfn_cap cap \<and> invs s
\<and> cte_wp_at (is_derived (cdt s) prod cap) prod s"
in hoare_post_imp)
apply (clarsimp simp: is_cap_simps is_derived_def cte_wp_at_caps_of_state)
apply (simp split: if_split_asm)
apply (simp add: cap_master_cap_def split: cap.split_asm)
apply (drule cte_wp_valid_cap [OF caps_of_state_cteD] | clarsimp)+
apply (clarsimp simp: cap_master_cap_simps valid_cap_def obj_at_def is_ntfn is_tcb is_cap_table
split: option.split_asm dest!:cap_master_cap_eqDs)
apply (wp cap_delete_one_still_derived)
apply simp
apply (wp get_irq_slot_ex_cte get_irq_slot_different_ARCH hoare_drop_imps)
apply (clarsimp simp: valid_state_def invs_def appropriate_cte_cap_def
is_cap_simps)
apply (erule cte_wp_at_weakenE, simp add: is_derived_use_interrupt_ARCH)
apply (wp| simp add: )+
done
qed
lemma (* invoke_irq_control_invs *) [Interrupt_AI_asms]:
"\<lbrace>invs and irq_control_inv_valid i\<rbrace> invoke_irq_control i \<lbrace>\<lambda>rv. invs\<rbrace>"
apply (cases i, simp_all)
apply (wp cap_insert_simple_invs
| simp add: IRQHandler_valid is_cap_simps no_cap_to_obj_with_diff_IRQHandler_ARCH
| strengthen real_cte_tcb_valid)+
apply (clarsimp simp: cte_wp_at_caps_of_state
is_simple_cap_def is_cap_simps is_pt_cap_def
safe_parent_for_def
ex_cte_cap_to_cnode_always_appropriate_strg)
apply (case_tac x2)
apply (simp add: arch_irq_control_inv_valid_def)
apply (wp cap_insert_simple_invs
| simp add: IRQHandler_valid is_cap_simps no_cap_to_obj_with_diff_IRQHandler_ARCH
| strengthen real_cte_tcb_valid)+
apply (clarsimp simp: cte_wp_at_caps_of_state
is_simple_cap_def is_cap_simps is_pt_cap_def
safe_parent_for_def
ex_cte_cap_to_cnode_always_appropriate_strg)
done
crunch device_state_inv[wp]: resetTimer "\<lambda>ms. P (device_state ms)"
lemma resetTimer_invs_ARCH[Interrupt_AI_asms]:
"\<lbrace>invs\<rbrace> do_machine_op resetTimer \<lbrace>\<lambda>_. invs\<rbrace>"
apply (wp dmo_invs)
apply safe
apply (drule_tac Q="%_ b. underlying_memory b p = underlying_memory m p"
in use_valid)
apply (simp add: resetTimer_def
machine_op_lift_def machine_rest_lift_def split_def)
apply wp
apply (clarsimp+)[2]
apply(erule use_valid, wp no_irq_resetTimer no_irq, assumption)
done
lemma empty_fail_ackInterrupt_ARCH[Interrupt_AI_asms]:
"empty_fail (ackInterrupt irq)"
by (wp | simp add: ackInterrupt_def)+
lemma empty_fail_maskInterrupt_ARCH[Interrupt_AI_asms]:
"empty_fail (maskInterrupt f irq)"
by (wp | simp add: maskInterrupt_def)+
lemma dmo_st_tcb_cur[wp]:
"\<lbrace>\<lambda>s. st_tcb_at P (cur_thread s) s\<rbrace> do_machine_op f \<lbrace>\<lambda>rv s. st_tcb_at P (cur_thread s) s\<rbrace>"
by (rule hoare_lift_Pf[where f=cur_thread]; wp)
lemma dmo_ex_nonz_cap_to[wp]:
"\<lbrace>\<lambda>s. ex_nonz_cap_to (cur_thread s) s\<rbrace> do_machine_op f \<lbrace>\<lambda>rv s. ex_nonz_cap_to (cur_thread s) s\<rbrace>"
by (rule hoare_lift_Pf[where f=cur_thread]; wp)
lemma conj_imp_strg:
"P \<Longrightarrow> (A \<longrightarrow> P) \<and> (B \<longrightarrow> P)" by simp
lemma runnable_eq:
"runnable st = (st = Running \<or> st = Restart)"
by (cases st; simp)
lemma halted_eq:
"halted st = (st = Inactive \<or> st = IdleThreadState)"
by (cases st; simp)
crunches vgic_update, vgic_update_lr, vcpu_update for ex_nonz_cap_to[wp]: "ex_nonz_cap_to p"
(wp: ex_nonz_cap_to_pres)
lemma vgic_maintenance_invs[wp]:
"\<lbrace>invs\<rbrace> vgic_maintenance \<lbrace>\<lambda>_. invs\<rbrace>"
unfolding vgic_maintenance_def
supply if_split[split del] valid_fault_def[simp]
apply (wpsimp simp: get_gic_vcpu_ctrl_misr_def get_gic_vcpu_ctrl_eisr1_def
get_gic_vcpu_ctrl_eisr0_def if_apply_def2
wp: thread_get_wp' hoare_vcg_imp_lift' gts_wp hoare_vcg_all_lift
| wps
| wp (once) hoare_drop_imp[where f="do_machine_op m" for m]
hoare_drop_imp[where f="return $ m" for m]
| strengthen not_pred_tcb_at_strengthen
| wp (once) hoare_vcg_imp_lift' gts_wp)+
apply (frule tcb_at_invs)
apply (clarsimp simp: runnable_eq halted_eq not_pred_tcb)
apply (fastforce intro!: st_tcb_ex_cap[where P=active]
simp: not_pred_tcb st_tcb_at_def obj_at_def halted_eq)
done
lemma vppi_event_invs[wp]:
"\<lbrace>invs\<rbrace> vppi_event irq \<lbrace>\<lambda>_. invs\<rbrace>"
unfolding vppi_event_def
supply if_split[split del] valid_fault_def[simp]
apply (wpsimp simp: if_apply_def2
wp: hoare_vcg_imp_lift' gts_wp hoare_vcg_all_lift maskInterrupt_invs
cong: vcpu.fold_congs
| wps
| strengthen not_pred_tcb_at_strengthen)+
apply (frule tcb_at_invs)
apply (clarsimp simp: runnable_eq halted_eq not_pred_tcb)
apply (fastforce intro!: st_tcb_ex_cap[where P=active]
simp: not_pred_tcb st_tcb_at_def obj_at_def halted_eq)
done
lemma handle_reserved_irq_invs[wp]:
"\<lbrace>invs\<rbrace> handle_reserved_irq irq \<lbrace>\<lambda>_. invs\<rbrace>"
unfolding handle_reserved_irq_def by (wpsimp simp: non_kernel_IRQs_def)
lemma (* handle_interrupt_invs *) [Interrupt_AI_asms]:
"\<lbrace>invs\<rbrace> handle_interrupt irq \<lbrace>\<lambda>_. invs\<rbrace>"
apply (simp add: handle_interrupt_def)
apply (rule conjI; rule impI)
apply (simp add: do_machine_op_bind empty_fail_ackInterrupt_ARCH empty_fail_maskInterrupt_ARCH)
apply (wp dmo_maskInterrupt_invs maskInterrupt_invs_ARCH dmo_ackInterrupt send_signal_interrupt_states
| wpc | simp add: arch_mask_irq_signal_def)+
apply (wp get_cap_wp send_signal_interrupt_states )
apply (rule_tac Q="\<lambda>rv. invs and (\<lambda>s. st = interrupt_states s irq)" in hoare_post_imp)
apply (clarsimp simp: ex_nonz_cap_to_def invs_valid_objs)
apply (intro allI exI, erule cte_wp_at_weakenE)
apply (clarsimp simp: is_cap_simps)
apply (wpsimp wp: hoare_drop_imps resetTimer_invs_ARCH
simp: get_irq_state_def
| rule conjI)+
done
lemma sts_arch_irq_control_inv_valid[wp, Interrupt_AI_asms]:
"\<lbrace>arch_irq_control_inv_valid i\<rbrace>
set_thread_state t st
\<lbrace>\<lambda>rv. arch_irq_control_inv_valid i\<rbrace>"
apply (simp add: arch_irq_control_inv_valid_def)
apply (cases i)
apply (clarsimp)
apply (wp ex_cte_cap_to_pres | simp add: cap_table_at_typ)+
done
end
interpretation Interrupt_AI?: Interrupt_AI
proof goal_cases
interpret Arch .
case 1 show ?case by (intro_locales; (unfold_locales, simp_all add: Interrupt_AI_asms)?)
qed
end
|
{------------------------------------------------------------------------------
IdrUtils.idr
------------------------------------------------------------------------------}
||| Miscellaneous Idris utilities.
module IdrUtils
import Debug.Error
%language ElabReflection -- for Debug.Error.error
%default total
-------------------------------------------------------------------------------
-- unwrap --
-------------------------------------------------------------------------------
||| Unsafe unwrap of `Maybe` value.
export
unwrap : Maybe a -> a
unwrap = fromMaybe $
error "unwrap failed: expected Just something, found Nothing"
-------------------------------------------------------------------------------
-- test --
-------------------------------------------------------------------------------
export
test : IO ()
test = do
let foo = [1,2,3]
let one = unwrap $ index' 0 foo
putStrLn $ "one: " ++ show one
|
# 1. Recall
### 1.1 Recall: Transformer
### 1.2 Recall: BERT
### 1.3 Recall: Language Model
- For the sequence $\color{MediumOrchid}{w_{1}, w_{2}, \ldots, w_{n}}$, using the chain rule, we have:
$$
P\left(w_{1}, \ldots, w_{n}\right)=P\left(w_{n} \mid w_{1}, \ldots, w_{n-1}\right) P\left(w_{n-1} \mid w_{1}, \ldots, w_{n-2}\right) \ldots P\left(w_{2} \mid w_{1}\right) P\left(w_{1}\right)
$$
- N-Gram Approximation: $\color{MediumOrchid}{P\left(w_{1}, \ldots, w_{n}\right)=\prod_{i=1}^{n} P\left(w_{i} \mid w_{i-N+1}, \ldots, w_{i-1}\right)}$
- Applications:
- Machine Translation: $\color{MediumOrchid}{P(\text{the cat is small })>P(\text{ small is the cat} )}$
- Grammar Checking: $\color{MediumOrchid}{P(\text{ He graduated from SJTU. )>P(He graduated on SJTU.)}}$
# 2. GPT
GPT: Generative Pre-Training
相关论文:
1. Radford, A., & Narasimhan, K. (2018). Improving Language Understanding by Generative Pre-Training.
2. Radford, A., Wu, J., Child, R., Luan, D., Amodei, D., & Sutskever, I. (2019). Language Models are Unsupervised Multitask Learners.
3. Brown, T. et al. “Language Models are Few-Shot Learners.” ArXiv abs/2005.14165 (2020): n. pag.
### 2.1 Introduction
1. In this paper, we explore a semi-supervised approach for language understanding tasks using a combination of <font color=red>unsupervised pre-training and supervised fine-tuning.</font>
2. Our goal is to learn a <font color=red>universal representation</font> that transfers with little adaptation to a wide range of tasks.
3. We employ a two-stage training procedure.
- First, we use a language modeling objective on the unlabeled data to learn the initial parameters of a neural network model.
- Subsequently, we adapt these parameters to a target task using the corresponding supervised objective.
4. For our model architecture, <font color=red>we use the Transformer</font>, This model choice provides us with a more structured memory for <font color=red>handling long-term dependencies in text</font>, compared to alternatives like recurrent networks, <font color=red>resulting in robust transfer performance across diverse tasks. </font>
5. We evaluate our approach on four types of language understanding tasks:
- natural language inference;
- question answering;
- semantic similarity;
- text classification.
### 2.2 Related Work
#### Semi-supervised learning for NLP
Over the last few years, researchers have demonstrated the benefits of using word embeddings, which are trained on unlabeled corpora, to improve performance on a variety of tasks. <font color=blue>These approaches, however, mainly transfer word-level information</font>, whereas <font color=red>we aim to capture higher-level semantics.</font>
#### Unsupervised pre-training
1. Unsupervised pre-training is a special case of semi-supervised learning where <font color=red>the goal is to find a good initialization point instead of modifying the supervised learning objective.</font>
2. Subsequent research demonstrated that <font color=red>pre-training acts as a regularization scheme, enabling better generalization in deep neural networks.</font>
3. The closest line of work to ours involves pre-training a neural network using a language modeling objective and then fine-tuning it on a target task with supervision.
4. Our choice of **transformer** networks allows us to **capture longer-range linguistic structure**, as demonstrated in our experiments.
5. Other approaches <font color=blue>use **hidden representations** from a pre-trained language or machine translation model</font> as auxiliary features while training a supervised model on the target task.
#### Auxiliary training objectives
1. Adding auxiliary unsupervised training objectives is an alternative form of semi-supervised learning.
2. Our experiments also use an auxiliary objective, but as we show, <font color=blue>unsupervised pre-training already learns several linguistic aspects relevant to target tasks.</font>
### 2.3 Framework
#### 2.3.1 Unsupervised pre-training
<table>
<td></td>
<td></td>
</table>
给定语料库的token集合 $\color{MediumOrchid}{\mathcal{U}=\left\{u_{1}, \ldots, u_{n}\right\}}$,使用标准的语言模型极大化似然函数:
$$
\color{MediumOrchid}{L_{1}(\mathcal{U})=\sum_{i} \log P\left(u_{i} \mid u_{i-k}, \ldots, u_{i-1} ; \Theta\right)} \tag{1}
$$
- $k:$ size of the context window
GPT使用Multi-layer Transformer Decoder来训练语言模型:
$$
\begin{eqnarray}
\color{red}{h_{0}} &=& \color{red}{UW_{e} + W_{p}~~ ???} \\
h_{l} &=& \text{transformer_block}(h_{l-1}) \quad \forall l \in [1, n] \tag{2}\\
P(u) &=& \text{softmax}(h_{n}W_{e}^{\mathrm{T}})
\end{eqnarray}
$$
- $U=(u_{-k}, \cdots, u_{-1}):$ context vector of tokens;
- $n:$ number of layers;
- $W_{e}:$ token embedding matrix;
- $W_{p}:$ position embedding matrix.
#### 2.3.2 Supervised fine-tuning
设有标注数据集 $\mathcal{C}$,每一个样本由输入序列 $x^{1}, \cdots, x^{m}$ 和标签 $y$ 组成。
将输入序列传入 pre-trained 模型中,取最后一层transformer block的激活项 $h_{l}^{m}$ ,送进一个线性输出层,预测 $y$:
$$
P\left(y \mid x^{1}, \ldots, x^{m}\right)=\operatorname{softmax}\left(h_{l}^{m} W_{y}\right) \tag{3}
$$
目标是最大化如下目标函数:
$$
L_{2}(\mathcal{C})=\sum_{(x, y)} \log P\left(y \mid x^{1}, \ldots, x^{m}\right) \tag{4}
$$
最终和$L_{1}$一起优化,目标函数为:
$$
L_{3}(\mathcal{C})=L_{2}(\mathcal{C})+\lambda * L_{1}(\mathcal{C}) \tag{5}
$$
加上fine-tuning的目标函数一起优化,可以:
- improving generalization of the supervised model;
- accelerating convergence.
# 3. Transformer-XL
- Segment-level Recurrence
- Relative Positional Embedding
在NLP领域,处理语言建模问题有两种最先进的架构:RNN和Transformer。RNN按照序列顺序逐个学习输入的单词或字符之间的关系;而Transformer则接收一整段序列,然后使用self-Attention机制来学习它们之间的依赖关系。
但它们都有一个共同不足之处: unable to model dependencies longer than a fixed length.
Transformer-XL <font color='#c63c26'>同时结合了RNN序列建模和Transformer自注意力机制的优点,在输入数据的每个片段上使用Transformer的Self-Attention模块,并使用循环机制来学习连续段之间的依赖关系。</font>
### 3.1 vanilla Transformer
Al-Rfou等人基于Transformer提出了vanilla model,它根据之前的字符预测片段中的下一个字符。例如,<font color=MediumOrchid>它使用$x_{1}, x_{2}, \cdots, x_{n-1}$预测字符$x_{n}$,而在$x_{n}$之后的序列则被mask掉。</font>论文中使用64层模型,并仅限于处理512个字符这种相对较短的输入,因此它将输入分成段,并分别从每个段中进行学习,如上图所示。在Evaluation阶段,该模型会在每一步中将输入向右移动一个字符,以此实现对单个字符的预测。
但vanilla model仍有些缺点:
1. 因为segments之间独立训练,所以不同的token之间,最长的依赖关系,就取决于segment的长度;
2. 出于效率考虑,在划分segments的时候,不考虑句子的自然边界,而是根据固定的长度来划分序列,导致分割出来的segments在语义上是不完整的。(context fragmentation problem);
3. 推理速度慢: 在Evaluation阶段,一般取最后一个位置的隐向量作为输出。为了充分利用上下文关系,在每做完一次预测之后,就对整个序列向右移动一个位置,再做一次计算,如上图(b)所示,则导致计算效率非常低。
### 3.2 Segment-Level Recurrence with State Reuse
Transformer-XL在<font color=MediumOrchid>对当前segment进行处理的时候,缓存并利用上一个segment中所有layer的隐向量序列,而且上一个segment的所有隐向量序列只参与前向计算,不再进行反向传播,这就是所谓的segment-level recurrence。</font>
#### 符号说明:
两个连续的segments表示为 $s_{\tau}=\left[x_{\tau, 1}, x_{\tau, 2}, \ldots, x_{\tau, L}\right],\ s_{\tau+1}=\left[x_{\tau+1, 1}, x_{\tau+1, 2}, \ldots, x_{\tau+1, L}\right], \ L$是序列长度;
假设整个模型中,包含$~N~$层Transformer-block,那么每个segment中就有$~N~$组长度为$L$的隐向量序列;
$\mathbf{h}_{\tau}^{n} \in \mathbb{R}^{L \times d}$——表示第$~\tau~$个segment的第$~n~$层隐向量序列;
$\text{SG}$是stop-gradient,不在对$~s_{\tau}$ 的隐向量做反向传播;
$\widetilde{\mathbf{h}}_{\tau+1}^{n-1}$ 是对两个隐向量序列沿长度方向的拼接,\[\]内两个隐向量的维度都是$L \times d$,拼接之后的向量维度是 $2L \times d$;
$\mathbf{q}$ 的计算方式不变,只使用当前segment中隐向量,计算得到的$\mathbf{q}$序列长度仍是$L$;
$\mathbf{k}, \mathbf{v}$采用拼接之后的$\widetilde{\mathbf{h}}$来计算,计算出来的序列长度是$2L$;
Transformer的输出隐向量序列长度取决于query的序列长度,而不是key和value.
$$
\begin{array}{l}
\widetilde{\mathbf{h}}_{\tau+1}^{n-1}=\left[\mathrm{SG}\left(\mathbf{h}_{\tau}^{n-1}\right) \circ \mathbf{h}_{\tau+1}^{n-1}\right] \\
\mathbf{q}_{\tau+1}^{n}, \mathbf{k}_{\tau+1}^{n}, \mathbf{v}_{\tau+1}^{n}=\mathbf{h}_{\tau+1}^{n-1} \mathbf{W}_{q}^{\top}, \widetilde{\mathbf{h}}_{\tau+1}^{n-1} \mathbf{W}_{k}^{\top}, \widetilde{\mathbf{h}}_{\tau+1}^{n-1} \mathbf{W}_{v}^{\top} \\
\mathbf{h}_{\tau+1}^{n}= \text{Transformer-Layer} \left(\mathbf{q}_{\tau+1}^{n}, \mathbf{k}_{\tau+1}^{n}, \mathbf{v}_{\tau+1}^{n}\right)
\end{array}
$$
训练和预测过程如Fig2所示。需注意的一点: <font color=MediumOrchid>在当前segment中,第$~n~$层的每个隐向量的计算,除了依赖当前位置的下一层隐向量,还与前$L-1$个位置的隐向量存在依赖关系,而且每往下走一层,依赖关系长度都会增加$(L-1)$,所以最长的依赖关系是$N(L-1)$。</font>在对长文本进行计算的时候,可以缓存上一个segment的隐向量的结果,不必重复计算,大幅提高计算效率。
上文中,我们只保存了上一个segment,实际操作的时候,可以保存尽可能多的segments,只要内存或者显存放得下。论文中的试验在训练的时候,只缓存一个segment,在预测的时候,会缓存多个segments。
### 3.3 Relative Positional Encodings
Transformer-XL放弃了vanilla transformer绝对位置编码,而采用相对位置编码。<font color=MediumOrchid>具体地,在计算Attention Score的时候,只考虑query向量与key向量的相对位置关系,</font>并且将这种相对位置关系,加入到每一层Transformer的Attention的计算中。
Vanilla Transformer中位置Embedding的计算公式如下:
$$
\begin{aligned}
\mathbf { A } _ { i , j } ^ { \mathrm { abs } }
& = \left\{ \mathbf { W }_{ q } \left( \mathbf { E }_{ x_{ i } } + \mathbf { U }_{ i } \right) \right\}^{\top}
\left\{ \mathbf { W }_{ k } \left( \mathbf { E }_{ x_{ j } } + \mathbf { U }_{ j } \right) \right\}\\
\quad \\
& = \underbrace { \mathbf { E } _ { x _ { i } } ^ { \top } \mathbf { W } _ { q } ^ { \top } \mathbf { W } _ { k } \mathbf { E } _ { x _ { j } } } _ { ( a ) } + \underbrace { \mathbf { E } _ { x _ { i } } ^ { \top } \mathbf { W } _ { q } ^ { \top } \mathbf { W } _ { k } \mathbf { U } _ { j } } _ { ( b ) } + \underbrace { \mathbf { U } _ { i } ^ { \top } \mathbf { W } _ { q } ^ { \top } \mathbf { W } _ { k } \mathbf { E } _ { x _ { j } } } _ { ( c ) } + \underbrace { \mathbf { U } _ { i } ^ { \top } \mathbf { W } _ { q } ^ { \top } \mathbf { W } _ { k } \mathbf { U } _ { j } } _ { ( d ) }
\end{aligned}
$$
而Transformer-XL中使用相对位置计算attention score的公式如下:
$$
\begin{aligned}
\mathbf { A } _ { i , j } ^ { \mathrm { rel } }
& = \underbrace { \mathbf { E } _ { x _ { i } } ^ { \top } \mathbf { W } _ { q } ^ { \top } \mathbf { W } _ { k,E } \mathbf { E } _ { x _ { j } } } _ { ( a ) } + \underbrace { \mathbf { E } _ { x _ { i } } ^ { \top } \mathbf { W } _ { q } ^ { \top } \mathbf { W } _ { k,R } \color{DeepSkyBlue}{ \mathbf { R } _ { i-j }} } _ { ( b ) } + \underbrace { \color{red}{u ^ { \top }} \mathbf { W } _ { k,E } \mathbf { E } _ { x _ { j } } } _ { ( c ) } + \underbrace { \color{red}{ v ^ { \top }} \mathbf { W } _ { k,R } \color{DeepSkyBlue}{ \mathbf { R } _ { i-j }} } _ { ( d ) }
\end{aligned}
$$
其中:
- $\color{red}{u,v}$ 是trainable parameters;
- $\mathbf{W}_{k,E}$ 用于生成基于内容的key向量;
- $\mathbf{W}_{k,R}$ 用于生成基于位置的key向量;
- $\mathbf{R} \in \mathbb{R}^{L_{max} \ \ \times d}$,第$~i~$行表示相对位置间隔为$~i~$的位置向量。论文中强调$\mathbf{R}$采用正弦函数生成,而不是通过学习得到的。
最后,对于一个$N$ 层的 single attention head 的Transformer-XL的计算公式如下:
$\text{For} \quad n=1,\cdots, N:$
# 3. XLNet
*XLNet: Generalized Autoregressive Pretraining for Language Understanding*
Part of Advances in Neural Information Processing Systems 32 (<font color=red>NeurIPS 2019</font>)
**Authors:**
Zhilin Yang, Zihang Dai, Yiming Yang, Jaime Carbonell, Russ R. Salakhutdinov, Quoc V. Le
## 3.1 AR vs. AE
### 3.1.1 Auto-Regression LM
ELMo(2018.03)/BERT(2018.10.11)出来之前,大家通常讲的Language Model其实是根据上文内容预测下一个可能跟随的单词,就是常说的自左向右的Language Model,或者反过来也行。这种Language Model被称为自回归语言模型(Auto-Regression LM)。GPT是典型的AR LM。ELMo尽管看上去是利用了上文,也利用了下文,但是本质上是Auto-Regression LM。<font color=MediumOrchid>ELMo是分别做了两个方向的自回归LM,然后把LSTM的两个方向的隐状态拼接到一起,来体现双向语言模型这个事情。所以本质上认识Auto-Regression LM</font>
给定文本序列 $\pmb{x}=\left[x_{1}, \ldots, x_{T}\right]$,Language Model的目标是调整参数使得训练数据上的似然函数最大:
$$
\max _{\theta} \log p_{\theta}(\pmb{x}) \color{red}{=} \sum_{t=1}^{T} \log p_{\theta}\left(x_{t} \mid \pmb{x}_{<t}\right)=\sum_{t=1}^{T} \log \frac{\exp \left(h_{\theta}\left(\pmb{x}_{1: t-1}\right)^{T} e\left(x_{t}\right)\right)}{\sum_{x^{\prime}} \exp \left(h_{\theta}\left(\pmb{x}_{1: t-1}\right)^{T} e\left(x^{\prime}\right)\right)}
$$
- $\pmb{x}_{<t}$: 表示$~t~$时刻之前的所有$x$,也就是 $\pmb{x}_{1:t-1}$;
- $h_{\theta}\left(\pmb{x}_{1: t-1}\right)$ 是RNN或者Transformer编码的$t$时刻之前的隐状态;
- $e(x)$: 是词$x$的Embedding;
- '=' 等号成立,是因为使用了chain rule.
Auto-Regression LM的优点:
- No discrepancy;
- 考虑依赖关系
Auto-Regression LM的缺点:
- <font color='#c63c26'>是无法同时利用上下文的信息。</font>
### 3.1.2 Auto-Encoder LM
BERT是一种典型的自编码语言模型(Auto-Encoder LM)。它通过将序列$~\pmb{x}~$中随机挑选15%的Token变成\[MASK\]得到带噪声版本的 $\hat{\pmb{x}}$。假设被Mask的原始值为$\bar{\pmb{x}}$,那么BERT希望尽量根据上下文恢复(猜测)出原始值,也就是:
$$
\max _{\theta} \log p_{\theta}(\overline{\mathbf{x}} \mid \hat{\mathbf{x}}) \color{red}{ \approx } \sum_{t=1}^{T} m_{t} \log p_{\theta}\left(x_{t} \mid \hat{\mathbf{x}}\right)=\sum_{t=1}^{T} m_{t} \log \frac{\exp \left(H_{\theta}(\mathbf{x})_{t}^{T} e\left(x_{t}\right)\right)}{\sum_{x^{\prime}} \exp \left(H_{\theta}(\mathbf{x})_{t}^{T} e\left(x^{\prime}\right)\right)}
$$
- $m_{t}=1$ 表示$t$时刻是一个MASK,需要恢复;
- $H_{\theta}$ 是一个Transformer,它把长度为$T$的序列$\pmb{x}$映射为隐状态的序列 $H_{\theta}(\mathbf{x})=\left[H_{\theta}(\mathbf{x})_{1}, H_{\theta}(\mathbf{x})_{2}, \ldots, H_{\theta}(\mathbf{x})_{T}\right]$
- '$\color{red}{\approx}$' ,是因为引入了条件独立的假设(Independent Assumption),$P(New ~ York|is,a,city) \color{red}{\approx} P(New|is,a,city)\cdot P(York|is,a,city)$
Auto-Encoder LM能比较自然地融入双向语言模型,同时看到被预测单词的上文和下文,这是优点。但是,<font color='#c63c26'>在输入侧引入\[MASK\]标记,导致Pre-training和Fine-tuning阶段不一致的问题,因为Fine-tuning阶段是看不到\[MASK\]标记的。</font>
<font color='#c63c26'>XLNet 的出发点就是:能否融合自回归 LM 和 DAE LM 两者的优点。具体来说就是,站在 AR 的角度,如何引入和双向语言模型等价的效果.</font>
## 3.2 Permutation Language Model
#### 要点:
- Sample a factorization order;
- Determine the attention masks based on the order;
- Optimize a standard language modeling objective:
$$
\max _{\theta} \mathbb{E}_{\mathbf{z} \sim \mathcal{Z}_{T}}\left[\sum_{t=1}^{T} \log p_{\theta}\left(x_{z_{t}} \mid \mathbf{x}_{\mathbf{z}_{<t}}\right)\right]
$$
- Benifits:
- Autoregressive, avoding disadvantages of AE;
- Able to model bidirectional context
具体实现方式: <font color=blue>通过随机取一句话排列的一种,然后将末尾一定量的词给“遮掩”(和BERT的[MASK]有些不同)掉,最后用Auto-Regression的方式来按照这种排列方式依次预测被“遮掩”掉的词。</font>
<font color=red>最后“遮掩”的token长度怎么选择呢?</font>
作者设了一个超参数$K$,$K$等于总长度除以需要预测的个数。以上图为例,中长为7,需要预测的长度为2,于是$K=7/2$。论文中实验得出的最佳$K$值介于6和7之间。如果去$K$的导数(即$\frac{1}{6}, \frac{1}{7}$),转化为百分比为(14.3%,16.7%)之间。而BERT中将Token替换为\[MASK\]的比列就是15%,二者之间应该有本质上的联系。
<font color=red>关于句子排列的采样:</font>对于一个长度为$T$的句子,有$T!$中排列,如果遍历每种排列,是不现实的。用 $\mathcal{Z}_{T}$ 表示所有排列组成的集合,$\mathcal{z}$ 表示从$\mathcal{Z}_{T}$采样得到一种排序,记为$\mathcal{z} \sim \mathcal{Z}_{T}$
XLNet并不是打乱输入句子的顺序,而是通过Transformer的Attention Masks来巧妙实现的。
## 3.3 Reparameterization
Permutation Language Model的思想很简单,但如果还是用standard Transformer parameterization,就会有问题,standard Transformer parameterization公式为:
$$
\max _{\theta} \ \log p_{\theta}(\pmb{x}) \color{red}{=} \sum_{t=1}^{T} \log p_{\theta}\left(x_{t} \mid \pmb{x}_{<t}\right)=\sum_{t=1}^{T} \log \frac{\exp \left(h_{\theta}\left(\pmb{x}_{1: t-1}\right)^{T} e\left(x_{t}\right)\right)}{\sum_{x^{\prime}} \exp \left(h_{\theta}\left(\pmb{x}_{1: t-1}\right)^{T} e\left(x^{\prime}\right)\right)}
$$
举例来说:
假设输入的句子是 "New York is a city",它由两种排列:
1. is a city New York ——> \[3,4,5,1,2\]
2. is a city York New ——> \[3,4,5,2,1\]
对于第1种排列,假设我们要预测$z_{4} = New$,则有:
$$
p_{\theta}\left( \text{New} \mid \text{is, a, city} \right) = \frac{\exp
\left \{ h_{\theta} \left( \text{is, a, city} \right)^{\mathrm{T}} \cdot e(\text{New}) \right\} }{ \sum_{x^{\prime}} \exp
\left \{ h_{\theta} \left( \text{is, a, city} \right)^{\mathrm{T}} \cdot e(x^{\prime}) \right\} }
$$
同理,对于第2中排列,假设我们要预测$z_{4} = New$,同样有:
$$
p_{\theta}\left( \text{New} \mid \text{is, a, city} \right) = \frac{\exp
\left \{ h_{\theta} \left( \text{is, a, city} \right)^{\mathrm{T}} \cdot e(\text{New}) \right\} }{ \sum_{x^{\prime}} \exp
\left \{ h_{\theta} \left( \text{is, a, city} \right)^{\mathrm{T}} \cdot e(x^{\prime}) \right\} }
$$
上面两个公式得到的概率是相等的,但是对于两种排列,它们的概率应该是不相等的,而问题的原因在于<font color=red>$h_{\theta}\left(\pmb{x}_{1: t-1}\right)$没有建模位置信息。</font>
为了解决上述问题,XLNet提出了一种新的参数化表示方法:
$$
p_{\theta}\left(X_{z_{t}}=x \mid \mathbf{x}_{z_{<t}}\right)=\frac{\exp \left(e(x)^{\top} g_{\theta}\left(\mathbf{x}_{\mathbf{z}_{<t}} \ , z_{t}\right)\right)}{\sum_{x^{\prime}} \exp \left(e\left(x^{\prime}\right)^{\top} g_{\theta}\left(\mathbf{x}_{\mathbf{z}_{<t}} \ , z_{t}\right)\right)}
$$
- $\mathbf{x}_{\mathbf{z}_{<t}}$ 表示$\mathbf{x}_{1:t-1}$的意思(包含了内容和位置信息)
- $z_{t}$ 表示$x_{t}$(预测)的位置信息
## 3.4 Two-Stream Self-Attention
对于3.3中提出的参数化表示,该用什么模型来表示$g_{\theta} \left(\mathbf{x}_{\mathbf{z} {<t}},z_{t} \right)$呢?
$g_{\theta}$需满足下面两个条件:
1. 预测$~x_{z_{t}}~$ 时,只能使用位置信息$z_{t}$,而不能使用内容信息$x_{z_{t}}$;
2. 而预测其它tokens $x_{z_{j}}(j > t)$时,需要包含内容信息$x_{z_{t}}$
对于上面的两点要求,普通的Transformer Self-Attention是不能满足的,举例说明:
为解决上述问题,XLNet引入了<font color=red>Two-Stream Self-Attention</font>的设计:
从上图可以看到,在计算Attention时,引入了两个Stream,也就是两个隐状态:
- 内容隐状态 $h_{\theta}(\mathbf{x}_{z<t})$,简写为$h_{z_{t}}$,它和标准的Transformer一样,既编码上下文内容,也编码$\mathbf{x}_{z_{t}}$的内容;
- 查询隐状态 $g_{\theta}(\mathbf{x}_{z<t},z_{t})$,简写为$g_{z_{t}}$,它只编码上下文和要预测的位置$z_{t}$,但不包含$\mathbf{x}_{z_{t}}$
#### 计算过程——初始化
1. 把查询隐状态$g_{i}^{(0)}$初始化为一个变量 $w$;
2. 把内容隐状态$h_{i}^{(0)}$初始化为token的Embedding $e(x_{i})$
这里上标(0)表示第0层(并不存在,只是用于计算第1层)
#### 计算过程——逐层计算
$$
\begin{align}
\text{Query Stream}&: \quad g_{z_{t}}^{(m)} \leftarrow \text{Attention} \left(Q=\color{red}{g_{z_{t}}^{(m-1)}}, K V=h_{\color{red}{z_{<t}}}^{(m-1)} ; \theta\right) \\
\text{Content Stream}&: \quad h_{z_{t}}^{(m)} \leftarrow \text{Attention} \left(Q=\color{red}{h_{z_{t}}^{(m-1)}}, K V=h_{\color{red}{z_{\leq t}}}^{(m-1)} ; \theta\right)
\end{align}
$$
- Query Stream: use $z_{t}$ but cannot see $\mathbf{x}_{z_{t}}$
- Content Stream: use both $z_{t}$ and $\mathbf{x}_{z_{t}}$
上面的梯度更新和Standard Transformer Self-Attention一样,在Fine-tuning时,可以丢弃掉Query Stream而只用 Content Stream。最后在计算公式的时候,我们可以用上面一层的Query向量$g_{z_{t}}^{(M)}$。
## 3.5 XLNet vs. BERT
假设输入Sentence='New York is a city'. 并且\[New, York\]是要预测的目标,则目标函数是 $\text{maximize} ~ \log p(\text{New York }\mid \text{ is, a, city)}$;
假设XLNet采样得到的句子排列是\[is, a, city, New, York\],则BERT和XLNet的目标函数分别是:
$$
\begin{align}
\mathcal{J}_{BERT} &= \log p\left(\text{New} \mid \text{is a city} \right) + \log p\left(\text{York} \mid \text{is a city} \right) \\
\mathcal{J}_{XLNet} &= \log p\left(\text{New} \mid \text{is a city} \right) + \log p\left(\text{York} \mid \color{red}{\text{New}}, \text{is a city} \right)
\end{align}
$$
可以看到,XLNet可以捕捉(New, York)词对之间的相互依赖关系,而BERT不能。
## 3.6 Summary
|
State Before: 𝕜 : Type u_3
R : Type u_1
M : Type u_2
inst✝¹⁷ : IsROrC 𝕜
inst✝¹⁶ : NormedRing R
inst✝¹⁵ : AddCommGroup M
inst✝¹⁴ : NormedAlgebra 𝕜 R
inst✝¹³ : Module R M
inst✝¹² : Module Rᵐᵒᵖ M
inst✝¹¹ : SMulCommClass R Rᵐᵒᵖ M
inst✝¹⁰ : Module 𝕜 M
inst✝⁹ : IsScalarTower 𝕜 R M
inst✝⁸ : IsScalarTower 𝕜 Rᵐᵒᵖ M
inst✝⁷ : TopologicalSpace M
inst✝⁶ : TopologicalRing R
inst✝⁵ : TopologicalAddGroup M
inst✝⁴ : ContinuousSMul R M
inst✝³ : ContinuousSMul Rᵐᵒᵖ M
inst✝² : CompleteSpace R
inst✝¹ : T2Space R
inst✝ : T2Space M
m : M
⊢ exp 𝕜 (inr m) = 1 + inr m State After: case hx
𝕜 : Type u_3
R : Type u_1
M : Type u_2
inst✝¹⁷ : IsROrC 𝕜
inst✝¹⁶ : NormedRing R
inst✝¹⁵ : AddCommGroup M
inst✝¹⁴ : NormedAlgebra 𝕜 R
inst✝¹³ : Module R M
inst✝¹² : Module Rᵐᵒᵖ M
inst✝¹¹ : SMulCommClass R Rᵐᵒᵖ M
inst✝¹⁰ : Module 𝕜 M
inst✝⁹ : IsScalarTower 𝕜 R M
inst✝⁸ : IsScalarTower 𝕜 Rᵐᵒᵖ M
inst✝⁷ : TopologicalSpace M
inst✝⁶ : TopologicalRing R
inst✝⁵ : TopologicalAddGroup M
inst✝⁴ : ContinuousSMul R M
inst✝³ : ContinuousSMul Rᵐᵒᵖ M
inst✝² : CompleteSpace R
inst✝¹ : T2Space R
inst✝ : T2Space M
m : M
⊢ MulOpposite.op (fst (inr m)) • snd (inr m) = fst (inr m) • snd (inr m) Tactic: rw [exp_def_of_smul_comm, snd_inr, fst_inr, exp_zero, one_smul, inl_one] State Before: case hx
𝕜 : Type u_3
R : Type u_1
M : Type u_2
inst✝¹⁷ : IsROrC 𝕜
inst✝¹⁶ : NormedRing R
inst✝¹⁵ : AddCommGroup M
inst✝¹⁴ : NormedAlgebra 𝕜 R
inst✝¹³ : Module R M
inst✝¹² : Module Rᵐᵒᵖ M
inst✝¹¹ : SMulCommClass R Rᵐᵒᵖ M
inst✝¹⁰ : Module 𝕜 M
inst✝⁹ : IsScalarTower 𝕜 R M
inst✝⁸ : IsScalarTower 𝕜 Rᵐᵒᵖ M
inst✝⁷ : TopologicalSpace M
inst✝⁶ : TopologicalRing R
inst✝⁵ : TopologicalAddGroup M
inst✝⁴ : ContinuousSMul R M
inst✝³ : ContinuousSMul Rᵐᵒᵖ M
inst✝² : CompleteSpace R
inst✝¹ : T2Space R
inst✝ : T2Space M
m : M
⊢ MulOpposite.op (fst (inr m)) • snd (inr m) = fst (inr m) • snd (inr m) State After: no goals Tactic: rw [snd_inr, fst_inr, MulOpposite.op_zero, zero_smul, zero_smul] |
// Copyright 2020 Your Name <your_email>
#include <async++.h>
#include <iostream>
#include <boost/thread.hpp>
#include <boost/thread/thread.hpp>
#include <string>
#include <boost/process.hpp>
#ifndef INCLUDE_BUILDER_HPP_
#define INCLUDE_BUILDER_HPP_
class Builder {
public:
Builder(std::string config, bool install, bool pack, int32_t timeout);
void Stop(async::cancellation_token &c);
bool NewTask(std::string task);
private:
std::string _config;
bool _install;
bool _pack;
int32_t _timeout;
boost::process::child _child;
};
#endif // INCLUDE_BUILDER_HPP_
|
#-h- gettok 2076 local 12/01/80 15:54:09
# gettok - get token. handles file inclusion and line numbers
include defs
character function gettok (token, toksiz)
character token (MAXTOK)
integer toksiz
include COMMON_BLOCKS
integer equal
character t, deftok
#character name(MAXNAME), t
#integer i, len, open, length
string ssubr "x$subr"
string sfunc "x$func"
#string incl "include"
# for (; level > 0; level = level - 1) {
gettok = deftok (token, toksiz)
if (gettok != EOF) {
if (gettok == XPP_DIRECTIVE) {
if (equal (token, sfunc) == YES) {
call skpblk
t = deftok (fcname, MAXNAME)
call pbstr (fcname)
if (t != ALPHA)
call synerr ("Missing function name.")
call putbak (BLANK)
swvnum = 0
swvlev = 0
return
} else if (equal (token, ssubr) == YES) {
swvnum = 0
swvlev = 0
return
} else
return
}
return
}
token (1) = EOF
token (2) = EOS
gettok = EOF
return
end
# -- Includes are now processed elsewhere
# else if (equal (token, incl) == NO)
# return
#
# # process 'include' statements:
# call skpblk
# t = deftok (name, MAXNAME)
# if (t == SQUOTE | t == DQUOTE) {
# len = length (name) - 1
# for (i = 1; i < len; i = i + 1)
# name (i) = name (i + 1)
# name (i) = EOS
# }
# i = length (name) + 1
# if (level >= NFILES)
# call synerr ("includes nested too deeply.")
# else {
# infile (level + 1) = open (name, READ)
# linect (level + 1) = 0
# if (infile (level + 1) == ERR)
# call synerr ("can't open include.")
# else {
# level = level + 1
# if (fnamp + i <= MAXFNAMES) {
# call scopy (name, 1, fnames, fnamp)
# fnamp = fnamp + i # push file name stack
# }
# }
# }
# }
# if (level > 1) { # close include file pop file name stack
# call close (infile (level))
# for (fnamp = fnamp - 1; fnamp > 1; fnamp = fnamp - 1)
# if (fnames (fnamp - 1) == EOS)
# break
# }
# }
|
If $f_n$ is a sequence of functions that are all integrable over a path $\gamma$, and if $f_n$ converges uniformly to $f$ over the image of $\gamma$, then $f$ is integrable over $\gamma$, and the sequence of integrals of $f_n$ converges to the integral of $f$. |
[STATEMENT]
lemma if\<^sub>S\<^sub>E_cond_cong : "f \<sigma> = g \<sigma> \<Longrightarrow>
(if\<^sub>S\<^sub>E f then c else d fi) \<sigma> =
(if\<^sub>S\<^sub>E g then c else d fi) \<sigma>"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. f \<sigma> = g \<sigma> \<Longrightarrow> (_if_SE f c d) \<sigma> = (_if_SE g c d) \<sigma>
[PROOF STEP]
unfolding if_SE_def
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. f \<sigma> = g \<sigma> \<Longrightarrow> (if f \<sigma> then c \<sigma> else d \<sigma>) = (if g \<sigma> then c \<sigma> else d \<sigma>)
[PROOF STEP]
by simp |
Require Import Coq.Relations.Relation_Definitions.
Require Import Axioms.
Require Import Tactics.
Require Import Equality.
Require Import Sigma.
Local Open Scope type_scope.
(* Ordered families of equivalences *)
Record ofe : Type :=
mk_ofe
{ car : Type;
dist : nat -> (relation car);
dist_eqrel : forall n, equiv car (dist n);
dist_limeq : forall x y, (forall n, dist n x y) -> x = y;
dist_downward : forall n x y, dist (S n) x y -> dist n x y;
dist_zero : forall x y, dist 0 x y }.
Arguments dist {o} n x y.
Definition dist_refl (A : ofe) n := dist_eqrel A n andel.
Definition dist_trans (A : ofe) n := dist_eqrel A n anderl.
Definition dist_symm (A : ofe) n := dist_eqrel A n anderr.
Lemma dist_refl' :
forall A n (x y : car A),
x = y
-> dist n x y.
Proof.
intros A n x y H.
subst y.
apply dist_refl.
Qed.
Lemma downward_leq :
forall (A : Type) (R : nat -> relation A) m n (x y : A),
(forall n x y, R (S n) x y -> R n x y)
-> m <= n
-> R n x y
-> R m x y.
Proof.
intros A R m n x y Hdownward Hleq Hdist.
revert Hdist.
induct Hleq; auto.
Qed.
Lemma dist_downward_leq :
forall (A : ofe) m n (x y : car A),
m <= n
-> dist n x y
-> dist m x y.
Proof.
intros A m n x y Hleq Hdist.
eapply downward_leq; eauto; [].
intros; apply dist_downward; auto.
Qed.
Lemma dist_downward_pred :
forall (A : ofe) n (x y : car A),
dist n x y
-> dist (pred n) x y.
Proof.
intros A n x y H.
destruct n as [| n'].
- simpl.
apply dist_zero.
- simpl.
apply dist_downward; auto.
Qed.
Lemma dist_if_pos :
forall (A : ofe) n (x y : car A),
(n > 0 -> dist n x y)
-> dist n x y.
Proof.
intros A n x y Hdist.
destruct n as [| n].
{
apply dist_zero.
}
apply Hdist.
omega.
Qed.
(* Nonexpansiveness/Contractiveness *)
Definition nonexpansive {A B : ofe} (f : car A -> car B) :=
forall n x y, dist n x y -> dist n (f x) (f y).
Definition contractive {A B : ofe} (f : car A -> car B) :=
forall n x y, dist n x y -> dist (S n) (f x) (f y).
Lemma ident_nonexpansive :
forall (A : ofe),
@nonexpansive A A (fun x => x).
Proof.
intros A n x y Hxy.
assumption.
Qed.
Lemma const_nonexpansive :
forall (A B : ofe) (x : car B),
@nonexpansive A B (fun _ => x).
Proof.
intros A B x.
intros n _ _ _.
apply dist_refl.
Qed.
Lemma transport_nonexpansive :
forall A (B : A -> ofe) i a a' (x y : car (B a)) (h : a = a'),
@dist (B a) i x y
-> @dist (B a') i (transport h (fun z => car (B z)) x) (transport h (fun z => car (B z)) y).
Proof.
intros A B i a a' x y h Hdist.
subst a'.
cbn.
exact Hdist.
Qed.
(* not used anywhere *)
Lemma transport_noncontractive :
forall A (B : A -> ofe) i a a' (x y : car (B a)) (h : a = a'),
dist i (transport h (fun z => car (B z)) x) (transport h (fun z => car (B z)) y)
-> dist i x y.
Proof.
intros A B i a a' x y h H.
subst a'.
auto.
Qed.
Lemma compose_ne_ne :
forall (A B C : ofe) f g,
@nonexpansive B C f
-> @nonexpansive A B g
-> nonexpansive (fun x => f (g x)).
Proof.
intros A B C f g Hnef Hneg.
intros n x y Hxy.
so (Hneg _#3 Hxy) as Hgxy.
so (Hnef _#3 Hgxy) as Hfgxy.
exact Hfgxy.
Qed.
(* Convergence *)
Definition convergent {A : Type} (d : nat -> relation A) (f : nat -> A) :=
forall n, d n (f n) (f (S n)).
Lemma convergent_leq_gen :
forall A (R : nat -> relation A) f,
(forall n, equiv _ (R n))
-> (forall n x y, R (S n) x y -> R n x y)
-> convergent R f
-> forall m n, m <= n -> R m (f m) (f n).
Proof.
intros A R f Hequiv Hdownward Hconv m n Hleq.
induct Hleq.
(* eq *)
{
apply (Hequiv _ andel).
}
(* S *)
{
intros n Hleq IH.
apply (Hequiv m anderl _ (f n)); auto; [].
eapply downward_leq; eauto.
}
Qed.
Lemma convergent_leq :
forall A f,
convergent (@dist A) f
-> forall m n, m <= n -> dist m (f m) (f n).
Proof.
intros A f Hconv m n Hleq.
apply convergent_leq_gen; auto.
- auto using dist_eqrel.
- auto using dist_downward.
Qed.
Lemma const_convergent :
forall A x,
convergent (@dist A) (fun _ => x).
Proof.
intros A x.
intros n.
apply dist_refl; done.
Qed.
Lemma map_convergent_prim :
forall (A B : Type) (d : nat -> relation A) (d' : nat -> relation B) (ch : nat -> A) (f : A -> B),
(forall n x y, d n x y -> d' n (f x) (f y))
-> convergent d ch
-> convergent d' (fun i => f (ch i)).
Proof.
intros A B d d' ch f Hne Hconv.
intro i.
apply Hne; [].
apply Hconv.
Qed.
Lemma map_convergent :
forall (A B : ofe) (ch : nat -> car A) (f : car A -> car B),
nonexpansive f
-> convergent (@dist A) ch
-> convergent (@dist B) (fun i => f (ch i)).
Proof.
intros A B ch f Hne Hconv.
eapply map_convergent_prim; eauto.
Qed.
(* Limits *)
(* It's convenient to require that cofes be nonempty, since they are easier
to work with, and all the cofes we consider are nonempty anyway.
Now that I've embraced the description axiom, I would probably state
completeness more conveniently, as an existence statement in Prop.
Maybe change it if I ever make serious use of completeness.
*)
Record complete (A : ofe) : Type :=
mk_complete
{ limit : forall ch, convergent dist ch -> car A;
inhabitant : car A;
complete_dist : forall ch n (p : convergent dist ch), dist n (limit ch p) (ch n) }.
Arguments limit {A} c.
Arguments inhabitant {A} c.
(* Give limit a less-awful interface. *)
Definition limits {A : ofe} (ch : nat -> car A) (x : car A) :=
forall n,
dist n (ch n) x.
Lemma limits_to_limit :
forall (A : ofe) (C : complete A) (ch : nat -> car A) (x : car A),
limits ch x
-> forall p, limit C ch p = x.
Proof.
intros A C ch x Hlimits Hconv.
apply dist_limeq; [].
intro n.
eapply dist_trans.
- apply complete_dist.
- apply Hlimits.
Qed.
Lemma limit_to_limits :
forall (A : ofe) (C : complete A) (ch : nat -> car A) (p : convergent (@dist A) ch),
limits ch (limit C ch p).
Proof.
intros A C ch p.
intro n.
apply dist_symm; [].
apply complete_dist.
Qed.
Lemma limits_unique :
forall (A : ofe) (ch : nat -> car A) (x y : car A),
limits ch x
-> limits ch y
-> x = y.
Proof.
intros A ch x y Hlimx Hlimy.
apply dist_limeq; [].
intro n.
eapply dist_trans.
- apply dist_symm; [].
apply Hlimx.
- apply Hlimy.
Qed.
Lemma limits_const :
forall (A : ofe) (x : car A),
limits (fun i => x) x.
Proof.
intros A x.
intro n.
apply dist_refl.
Qed.
Lemma limits_truncate :
forall (A : ofe) (ch : nat -> car A) (x : car A) n,
limits ch x
-> limits (fun i => ch (Nat.add n i)) x.
Proof.
intros A ch x n Hlim.
intro i.
apply (dist_downward_leq _ i (n+i)).
- omega.
- apply Hlim.
Qed.
(* We could strengthen this lemma (eliminate the convergent requirement)
if we weakened the definition of limits to permit slower convergence.
But the stronger convergence is awfully convenient, and this isn't unworkable.
*)
Lemma limits_prepend :
forall (A : ofe) (C : complete A) (ch : nat -> car A) (x : car A) n,
convergent (@dist A) ch
-> limits (fun i => ch (n + i)%nat) x
-> limits ch x.
Proof.
intros A C ch x n Hconv Hlim.
so (limit_to_limits _ C _ Hconv) as Hlim'.
so (limits_truncate _#3 n Hlim') as Hlim''.
so (limits_unique _#4 Hlim Hlim'').
subst x.
exact Hlim'.
Qed.
Lemma limits_convergent :
forall (A : ofe) (ch : nat -> car A) (x : car A),
limits ch x
-> convergent (@dist A) ch.
Proof.
intros A ch x Hlim.
intro i.
apply (dist_trans _ _ _ x).
- apply Hlim.
- apply dist_symm; [].
apply dist_downward; [].
apply Hlim.
Qed.
Lemma ofe_fixpoint :
forall (A : ofe) (C : complete A) (f : car A -> car A),
contractive f
-> existsT! (x : car A), f x = x.
Proof.
intros A C f Hcontract.
set (ch := @nat_rect (fun _ => car A) (inhabitant C) (fun _ x => f x)).
assert (convergent (@dist A) ch) as p.
{
intros i.
induct i.
- apply dist_zero.
- intros i IH.
simpl.
apply Hcontract; [].
exact IH.
}
exists (limit C ch p).
assert (f (limit C ch p) = limit C ch p) as Hfix.
{
so (limit_to_limits A C ch p) as Hlimits.
apply (limits_unique _ ch); auto; [].
apply (limits_prepend _ C _ _ 1); auto; [].
simpl.
intro i.
apply dist_downward; [].
apply Hcontract; [].
apply Hlimits.
}
split; auto; [].
intros y Hfix'.
apply dist_limeq; [].
intro i.
induct i.
- apply dist_zero.
- intros i IH.
rewrite <- Hfix; [].
rewrite <- Hfix'; [].
apply Hcontract.
assumption.
Qed.
(* Nonexpansive function space *)
Definition nearrow (A B : ofe) : Type := exT (car A -> car B) (@nonexpansive A B).
Notation "A -n> B" := (nearrow A B)
(at level 99, right associativity) : ofe_scope.
Open Scope ofe_scope.
Definition nearrow_const (A : ofe) (B : ofe) (x : car B) : A -n> B :=
expair (fun _ => x) (const_nonexpansive A B x).
Definition idne {A : ofe} : A -n> A := expair _ (ident_nonexpansive A).
Definition dist_ne {A B : ofe} n (f g : A -n> B) := forall x, dist n (pi1 f x) (pi1 g x).
Definition limit_ne {A B : ofe} (C : complete B) :
forall ch, convergent (@dist_ne A B) ch -> A -n> B.
Proof.
intros f Hconv.
exists (fun x : car A => limit C (fun i => pi1 (f i) x) (fun n => Hconv n x)).
intros n x y Hdist.
apply (dist_trans B n _ (pi1 (f n) x)).
- so (complete_dist B C _ n (fun n => Hconv n x)) as H.
simpl in H.
exact H.
- apply (dist_trans B n _ (pi1 (f n) y)).
+ so (pi2 (f n)) as Hne.
exact (Hne n x y Hdist).
+ apply (dist_symm B n); [].
so (complete_dist B C _ n (fun n => Hconv n y)) as H.
simpl in H.
exact H.
Defined.
Lemma nearrow_extensionality :
forall A B (f g : A -n> B),
(forall x, pi1 f x = pi1 g x)
-> f = g.
Proof.
intros A B f g H.
apply exT_extensionality_prop.
fextensionality 1.
exact H.
Qed.
Lemma nearrow_extensionality_dep :
forall A (B : A -> ofe) (C : ofe) (a a' : A) (f : B a -n> C) (g : B a' -n> C),
eq_dep A (fun a => car (B a) -> car C) a (pi1 f) a' (pi1 g)
-> eq_dep A (fun a => B a -n> C) a f a' g.
Proof.
intros A B C a a' f g Heq.
so (eq_dep_impl_eq_fst _#6 Heq); subst a'.
so (eq_dep_impl_eq_snd _#5 Heq) as Heq'.
apply eq_impl_eq_dep_snd.
apply exT_extensionality_prop; auto.
Qed.
Definition nearrow_ofe (A B : ofe) : ofe.
Proof.
apply
(mk_ofe
(A -n> B)
(@dist_ne A B)).
(* eqrel *)
{
intro n.
do2 2 split.
+ intros f x.
apply (dist_refl B n).
+ intros f g h Hfg Hgh x.
eapply (dist_trans B n); eauto.
+ intros f g H x.
apply (dist_symm B n); [].
apply H.
}
(* limeq *)
{
intros f g Hsim.
destruct f as [f Hnef].
destruct g as [g Hneg].
cut (f = g).
{
intro.
subst g.
f_equal; [].
apply proof_irrelevance.
}
apply functional_extensionality; [].
intro x.
apply (dist_limeq B); [].
intro n.
exact (Hsim n x).
}
(* downward *)
{
intros n f g Hdist.
intro x.
apply (dist_downward B); [].
exact (Hdist x).
}
(* zero *)
{
intros f g x.
apply dist_zero.
}
Defined.
Definition nearrow_complete (A : ofe) (B : ofe) (C : complete B) : complete (nearrow_ofe A B).
Proof.
apply
(mk_complete (nearrow_ofe A B)
(@limit_ne A B C)
(expair (fun _ => inhabitant C) (const_nonexpansive _ _ _))).
intros f n Hconv.
intro x.
cbn.
apply (complete_dist B).
Defined.
Definition nearrow_id (A : ofe) : A -n> A :=
expair _ (ident_nonexpansive A).
Definition nearrow_compose {A B C : ofe} (f : B -n> C) (g : A -n> B) : A -n> C :=
expair _ (compose_ne_ne A B C (pi1 f) (pi1 g) (pi2 f) (pi2 g)).
Lemma nearrow_compose_id_left :
forall A B (f : A -n> B),
nearrow_compose (nearrow_id B) f = f.
Proof.
intros A B f.
destruct f as [f Hne].
unfold nearrow_compose.
f_equal; [].
apply proof_irrelevance; done.
Qed.
Lemma nearrow_compose_id_right :
forall A B (f : A -n> B),
nearrow_compose f (nearrow_id A) = f.
Proof.
intros A B f.
destruct f as [f Hne].
unfold nearrow_compose.
f_equal; [].
apply proof_irrelevance; done.
Qed.
Lemma nearrow_compose_assoc :
forall A B C D (f : C -n> D) (g : B -n> C) (h : A -n> B),
nearrow_compose (nearrow_compose f g) h = nearrow_compose f (nearrow_compose g h).
Proof.
intros A B C D f g h.
destruct f as [f Hnef].
destruct g as [g Hneg].
destruct h as [h Hneh].
unfold nearrow_compose; simpl.
f_equal; [].
apply proof_irrelevance.
Qed.
Lemma nearrow_compose_nonexpansive :
forall A B C n (f f' : B -n> C) (g g' : A -n> B),
dist_ne n f f'
-> dist_ne n g g'
-> dist_ne n (nearrow_compose f g) (nearrow_compose f' g').
Proof.
intros A B C n f f' g g' Hdistf Hdistg.
intro x.
simpl.
destruct f as [f Hnef].
destruct f' as [f' Hnef'].
destruct g as [g Hneg].
destruct g' as [g' Hneg'].
simpl.
apply (dist_trans C n _ (f' (g x))).
- apply Hdistf; done.
- apply Hnef'; [].
apply Hdistg; done.
Qed.
Definition composer (A B C : ofe) (f : B -n> C) : nearrow_ofe A B -n> nearrow_ofe A C.
Proof.
exists (fun g => nearrow_compose f g).
intros n g g' Hg.
refine (nearrow_compose_nonexpansive _#8 _ Hg).
apply (@dist_refl (nearrow_ofe B C)).
Defined.
Lemma eq_nearrow_ne :
forall (A B : ofe) (h : A = B),
nonexpansive (fun x => transport h car x).
Proof.
intros A B h.
subst B.
apply ident_nonexpansive.
Qed.
Definition eq_nearrow {A B : ofe} (h : A = B) : A -n> B
:=
expair (fun x => transport h car x) (eq_nearrow_ne A B h).
Definition transport_ne {A} {a a' : A} (h : a = a') (B : A -> ofe)
: B a -n> B a'
:=
@expair
_ (@nonexpansive (B a) (B a'))
(fun x => transport h (fun z => car (B z)) x)
(fun i m n Hdist => transport_nonexpansive A B i a a' m n h Hdist).
Definition dep_transport_ne {A} {a a' : A} (h : a = a')
(B : A -> Type)
(C : forall a, B a -> ofe)
(b : B a)
: C a b -n> C a' (transport h B b)
:=
match h
as h
in _ = a'
return C a b -n> C a' (transport h B b)
with
| eq_refl _ => idne
end.
Definition dep_transport_ne' {A} {a a' : A} (h : a = a')
(B : A -> Type)
(C : forall a, B a -> ofe)
(b : B a)
: C a' (transport h B b) -n> C a b
:=
match h
as h
in _ = a'
return C a' (transport h B b) -n> C a b
with
| eq_refl _ => idne
end.
(* We could reuse nearrow_compose, but it seems cleaner to do it directly. *)
Definition nearrow_compose2 {A B C D : ofe} (f : A -n> B) (h : C -n> D)
(g : B -n> C) : A -n> D.
Proof.
refine (expair (fun x => pi1 h (pi1 g (pi1 f x))) _).
intros n x y Hxy.
apply (pi2 h).
apply (pi2 g).
apply (pi2 f); auto.
Defined.
Lemma nearrow_compose2_nonexpansive :
forall A B C D (f : A -n> B) (h : C -n> D),
@nonexpansive (nearrow_ofe B C) (nearrow_ofe A D) (nearrow_compose2 f h).
Proof.
intros A B C D f h.
intros n g g' Hg.
cbn.
intros x.
cbn.
apply (pi2 h).
apply Hg.
Qed.
Definition nearrow_compose2_ne {A B C D : ofe} (f : A -n> B) (h : C -n> D)
: nearrow_ofe B C -n> nearrow_ofe A D
:=
expair (nearrow_compose2 f h) (nearrow_compose2_nonexpansive _#4 f h).
Lemma nearrow_compose2_compose :
forall (A B C D E F : ofe)
(f1 : A -n> B) (f2 : B -n> C) (g : C -n> D) (h2 : D -n> E) (h1 : E -n> F),
nearrow_compose2 f1 h1 (nearrow_compose2 f2 h2 g)
=
nearrow_compose2 (nearrow_compose f2 f1) (nearrow_compose h1 h2) g.
Proof.
intros A B C D E F f1 f2 g h2 h1.
apply nearrow_extensionality.
intro x.
cbn.
reflexivity.
Qed.
Lemma nearrow_compose2_split :
forall A B C D (f : C -n> D) (g : B -n> C) (h : A -n> B),
nearrow_compose2 h f g
=
nearrow_compose f (nearrow_compose g h).
Proof.
intros A B C D f g h.
apply nearrow_extensionality.
intro x.
cbn.
reflexivity.
Qed.
(* Product spaces *)
Definition dist_prod {A B : ofe} n (p q : car A * car B) :=
dist n (fst p) (fst q) /\ dist n (snd p) (snd q).
Definition prod_ofe (A B : ofe) : ofe.
Proof.
apply (mk_ofe (car A * car B) (@dist_prod A B)).
(* eqrel *)
{
intro n.
do2 2 split.
+ intros (x, y).
split; apply dist_refl.
+ intros (x1, y1) (x2, y2) (x3, y3) H12 H23.
destruct H12 as (H12x, H12y).
destruct H23 as (H23x, H23y).
split; eapply dist_trans; eauto.
+ intros (x1, y1) (x2, y2) H.
destruct H as (Hx, Hy).
split; eapply dist_symm; eauto.
}
(* limeq *)
{
intros (x, y) (x', y') Hsim.
f_equal.
+ apply dist_limeq; [].
intro n.
destruct (Hsim n); auto.
+ apply dist_limeq; [].
intro n.
destruct (Hsim n); auto.
}
(* downward *)
{
intros n (x, y) (x', y') Hdist.
destruct Hdist.
split; apply dist_downward; eauto.
}
(* zero *)
{
intros (x, y) (x', y').
split; apply dist_zero.
}
Defined.
Definition pair_ne {A B C : ofe} (f : A -n> B) (g : A -n> C)
: A -n> prod_ofe B C.
Proof.
exists (fun x => (pi1 f x, pi1 g x)).
exact (fun n x y H => conj (pi2 f n x y H) (pi2 g n x y H)).
Defined.
Definition fst_ne {A B : ofe} : prod_ofe A B -n> A.
Proof.
exists fst.
intros n x y Hxy.
exact (Hxy andel).
Defined.
Definition snd_ne {A B : ofe} : prod_ofe A B -n> B.
Proof.
exists snd.
intros n x y Hxy.
exact (Hxy ander).
Defined.
Definition mpair_ne {A B C D : ofe} (f : A -n> C) (g : B -n> D)
: prod_ofe A B -n> prod_ofe C D.
Proof.
exists (fun x => (pi1 f (fst x), pi1 g (snd x))).
exact (fun n x y H => conj (pi2 f n _ _ (carp H)) (pi2 g n _ _ (cdrp H))).
Defined.
Lemma dist_prod_fst :
forall A B n (p q : car (prod_ofe A B)),
dist n p q
-> dist n (fst p) (fst q).
Proof.
intros A B n p q Hdist.
destruct Hdist.
auto.
Qed.
Lemma dist_prod_snd :
forall A B n (p q : car (prod_ofe A B)),
dist n p q
-> dist n (snd p) (snd q).
Proof.
intros A B n p q Hdist.
destruct Hdist.
auto.
Qed.
Definition limit_prod (A B : ofe) (C : complete A) (D : complete B) :
forall ch, convergent (@dist_prod A B) ch -> car A * car B.
Proof.
intros ch Hconv.
split.
{
refine (limit C (fun i => fst (ch i)) _); [].
eapply map_convergent_prim; eauto; [].
intros n p q Hdist.
destruct Hdist; auto.
}
{
refine (limit D (fun i => snd (ch i)) _); [].
eapply map_convergent_prim; eauto; [].
intros n p q Hdist.
destruct Hdist; auto.
}
Defined.
Definition prod_complete (A B : ofe) (C : complete A) (D : complete B) : complete (prod_ofe A B).
Proof.
apply (mk_complete (prod_ofe A B) (limit_prod A B C D) (inhabitant C, inhabitant D)).
intros ch n Hconv.
so (Hconv n) as (H1 & H2).
split.
+ apply complete_dist.
+ apply complete_dist.
Defined.
Definition unit_ofe : ofe.
Proof.
apply
(mk_ofe
unit
(fun _ _ _ => True)).
- do2 2 split; intro; auto.
- intros x y _.
destruct x; destruct y.
reflexivity.
- auto.
- auto.
Defined.
Definition unit_complete : complete unit_ofe.
Proof.
apply
(mk_complete unit_ofe
(fun _ _ => tt)
tt).
intros ch n _.
set (x := ch n).
destruct x.
apply dist_refl.
Defined.
|
lemma diff_monom: "monom a n - monom b n = monom (a - b) n" |
module Mod_Strings
contains
subroutine parse(str,delims,args,nargs)
! Parses the string 'str' into arguments args(1), ..., args(nargs) based on
! the delimiters contained in the string 'delims'. Preceding a delimiter in
! 'str' by a backslash (\) makes this particular instance not a delimiter.
! The integer output variable nargs contains the number of arguments found.
character(len=*) :: str,delims
character(len=len_trim(str)) :: strsav
character(len=*),dimension(:) :: args
strsav=str
call compact(str)
na=size(args)
do i=1,na
args(i)=' '
end do
nargs=0
lenstr=len_trim(str)
if(lenstr==0) return
k=0
do
if(len_trim(str) == 0) exit
nargs=nargs+1
call split(str,delims,args(nargs))
call removebksl(args(nargs))
end do
str=strsav
end subroutine parse
!**********************************************************************
subroutine compact(str)
! Converts multiple spaces and tabs to single spaces; deletes control characters;
! removes initial spaces.
character(len=*):: str
character(len=1):: ch
character(len=len_trim(str)):: outstr
str=adjustl(str)
lenstr=len_trim(str)
outstr=' '
isp=0
k=0
do i=1,lenstr
ch=str(i:i)
ich=iachar(ch)
select case(ich)
case(9,32) ! space or tab character
if(isp==0) then
k=k+1
outstr(k:k)=' '
end if
isp=1
case(33:) ! not a space, quote, or control character
k=k+1
outstr(k:k)=ch
isp=0
end select
end do
str=adjustl(outstr)
end subroutine compact
!**********************************************************************
subroutine removesp(str)
! Removes spaces, tabs, and control characters in string str
character(len=*):: str
character(len=1):: ch
character(len=len_trim(str))::outstr
str=adjustl(str)
lenstr=len_trim(str)
outstr=' '
k=0
do i=1,lenstr
ch=str(i:i)
ich=iachar(ch)
select case(ich)
case(0:32) ! space, tab, or control character
cycle
case(33:)
k=k+1
outstr(k:k)=ch
end select
end do
str=adjustl(outstr)
end subroutine removesp
!**********************************************************************
subroutine shiftstr(str,n)
! Shifts characters in in the string 'str' n positions (positive values
! denote a right shift and negative values denote a left shift). Characters
! that are shifted off the end are lost. Positions opened up by the shift
! are replaced by spaces.
character(len=*):: str
lenstr=len(str)
nabs=iabs(n)
if(nabs>=lenstr) then
str=repeat(' ',lenstr)
return
end if
if(n<0) str=str(nabs+1:)//repeat(' ',nabs) ! shift left
if(n>0) str=repeat(' ',nabs)//str(:lenstr-nabs) ! shift right
return
end subroutine shiftstr
!**********************************************************************
subroutine insertstr(str,strins,loc)
! Inserts the string 'strins' into the string 'str' at position 'loc'.
! Characters in 'str' starting at position 'loc' are shifted right to
! make room for the inserted string. Trailing spaces of 'strins' are
! removed prior to insertion
character(len=*):: str,strins
character(len=len(str))::tempstr
lenstrins=len_trim(strins)
tempstr=str(loc:)
call shiftstr(tempstr,lenstrins)
tempstr(1:lenstrins)=strins(1:lenstrins)
str(loc:)=tempstr
return
end subroutine insertstr
!**********************************************************************
subroutine delsubstr(str,substr)
! Deletes first occurrence of substring 'substr' from string 'str' and
! shifts characters left to fill hole. Trailing spaces or blanks are
! not considered part of 'substr'.
character(len=*):: str,substr
lensubstr=len_trim(substr)
ipos=index(str,substr)
if(ipos==0) return
if(ipos == 1) then
str=str(lensubstr+1:)
else
str=str(:ipos-1)//str(ipos+lensubstr:)
end if
return
end subroutine delsubstr
!**********************************************************************
subroutine delall(str,substr)
! Deletes all occurrences of substring 'substr' from string 'str' and
! shifts characters left to fill holes.
character(len=*):: str,substr
lensubstr=len_trim(substr)
do
ipos=index(str,substr)
if(ipos == 0) exit
if(ipos == 1) then
str=str(lensubstr+1:)
else
str=str(:ipos-1)//str(ipos+lensubstr:)
end if
end do
return
end subroutine delall
!**********************************************************************
function uppercase(str) result(ucstr)
! convert string to upper case
character (len=*):: str
character (len=len_trim(str)):: ucstr
ilen=len_trim(str)
ioffset=iachar('A')-iachar('a')
iquote=0
ucstr=str
do i=1,ilen
iav=iachar(str(i:i))
if(iquote==0 .and. (iav==34 .or.iav==39)) then
iquote=1
iqc=iav
cycle
end if
if(iquote==1 .and. iav==iqc) then
iquote=0
cycle
end if
if (iquote==1) cycle
if(iav >= iachar('a') .and. iav <= iachar('z')) then
ucstr(i:i)=achar(iav+ioffset)
else
ucstr(i:i)=str(i:i)
end if
end do
return
end function uppercase
!**********************************************************************
function lowercase(str) result(lcstr)
! convert string to lower case
character (len=*):: str
character (len=len_trim(str)):: lcstr
ilen=len_trim(str)
ioffset=iachar('A')-iachar('a')
iquote=0
lcstr=str
do i=1,ilen
iav=iachar(str(i:i))
if(iquote==0 .and. (iav==34 .or.iav==39)) then
iquote=1
iqc=iav
cycle
end if
if(iquote==1 .and. iav==iqc) then
iquote=0
cycle
end if
if (iquote==1) cycle
if(iav >= iachar('A') .and. iav <= iachar('Z')) then
lcstr(i:i)=achar(iav-ioffset)
else
lcstr(i:i)=str(i:i)
end if
end do
return
end function lowercase
!**********************************************************************
subroutine readline(nunitr,line,ios)
! Reads line from unit=nunitr, ignoring blank lines
! and deleting comments beginning with an exclamation point(!)
character (len=*):: line
do
read(nunitr,'(a)', iostat=ios) line ! read input line
if(ios /= 0) return
line=adjustl(line)
ipos=index(line,'!')
if(ipos == 1) cycle
if(ipos /= 0) line=line(:ipos-1)
if(len_trim(line) /= 0) exit
end do
return
end subroutine readline
!**********************************************************************
subroutine match(str,ipos,imatch)
! Sets imatch to the position in string of the delimiter matching the delimiter
! in position ipos. Allowable delimiters are (), [], {}, <>.
character(len=*) :: str
character :: delim1,delim2,ch
lenstr=len_trim(str)
delim1=str(ipos:ipos)
select case(delim1)
case('(')
idelim2=iachar(delim1)+1
istart=ipos+1
iend=lenstr
inc=1
case(')')
idelim2=iachar(delim1)-1
istart=ipos-1
iend=1
inc=-1
case('[','{','<')
idelim2=iachar(delim1)+2
istart=ipos+1
iend=lenstr
inc=1
case(']','}','>')
idelim2=iachar(delim1)-2
istart=ipos-1
iend=1
inc=-1
case default
write(*,*) delim1,' is not a valid delimiter'
return
end select
if(istart < 1 .or. istart > lenstr) then
write(*,*) delim1,' has no matching delimiter'
return
end if
delim2=achar(idelim2) ! matching delimiter
isum=1
do i=istart,iend,inc
ch=str(i:i)
if(ch /= delim1 .and. ch /= delim2) cycle
if(ch == delim1) isum=isum+1
if(ch == delim2) isum=isum-1
if(isum == 0) exit
end do
if(isum /= 0) then
write(*,*) delim1,' has no matching delimiter'
return
end if
imatch=i
return
end subroutine match
!***********************************************************************
subroutine trimzero(str)
! Deletes nonsignificant trailing zeroes from number string str. If number
! string ends in a decimal point, one trailing zero is added.
character(len=*) :: str
character :: ch
character(len=10) :: exp
ipos=scan(str,'eE')
if(ipos>0) then
exp=str(ipos:)
str=str(1:ipos-1)
endif
lstr=len_trim(str)
do i=lstr,1,-1
ch=str(i:i)
if(ch=='0') cycle
if(ch=='.') then
str=str(1:i)//'0'
if(ipos>0) str=trim(str)//trim(exp)
exit
endif
str=str(1:i)
exit
end do
if(ipos>0) str=trim(str)//trim(exp)
end subroutine trimzero
!**********************************************************************
function is_letter(ch) result(res)
! Returns .true. if ch is a letter and .false. otherwise
character :: ch
logical :: res
select case(ch)
case('A':'Z','a':'z')
res=.true.
case default
res=.false.
end select
return
end function is_letter
!**********************************************************************
function is_digit(ch) result(res)
! Returns .true. if ch is a digit (0,1,...,9) and .false. otherwise
character :: ch
logical :: res
select case(ch)
case('0':'9')
res=.true.
case default
res=.false.
end select
return
end function is_digit
!**********************************************************************
subroutine split(str,delims,before,sep)
! Routine finds the first instance of a character from 'delims' in the
! the string 'str'. The characters before the found delimiter are
! output in 'before'. The characters after the found delimiter are
! output in 'str'. The optional output character 'sep' contains the
! found delimiter. A delimiter in 'str' is treated like an ordinary
! character if it is preceded by a backslash (\). If the backslash
! character is desired in 'str', then precede it with another backslash.
character(len=*) :: str,delims,before
character,optional :: sep
logical :: pres
character :: ch,cha
pres=present(sep)
str=adjustl(str)
call compact(str)
lenstr=len_trim(str)
if(lenstr == 0) return ! string str is empty
k=0
ibsl=0 ! backslash initially inactive
before=' '
do i=1,lenstr
ch=str(i:i)
if(ibsl == 1) then ! backslash active
k=k+1
before(k:k)=ch
ibsl=0
cycle
end if
if(ch == '\') then ! backslash with backslash inactive
k=k+1
before(k:k)=ch
ibsl=1
cycle
end if
ipos=index(delims,ch)
if(ipos == 0) then ! character is not a delimiter
k=k+1
before(k:k)=ch
cycle
end if
if(ch /= ' ') then ! character is a delimiter that is not a space
str=str(i+1:)
if(pres) sep=ch
exit
end if
cha=str(i+1:i+1) ! character is a space delimiter
iposa=index(delims,cha)
if(iposa > 0) then ! next character is a delimiter
str=str(i+2:)
if(pres) sep=cha
exit
else
str=str(i+1:)
if(pres) sep=ch
exit
end if
end do
if(i >= lenstr) str=''
str=adjustl(str) ! remove initial spaces
return
end subroutine split
!**********************************************************************
subroutine removebksl(str)
! Removes backslash (\) characters. Double backslashes (\\) are replaced
! by a single backslash.
character(len=*):: str
character(len=1):: ch
character(len=len_trim(str))::outstr
str=adjustl(str)
lenstr=len_trim(str)
outstr=' '
k=0
ibsl=0 ! backslash initially inactive
do i=1,lenstr
ch=str(i:i)
if(ibsl == 1) then ! backslash active
k=k+1
outstr(k:k)=ch
ibsl=0
cycle
end if
if(ch == '\') then ! backslash with backslash inactive
ibsl=1
cycle
end if
k=k+1
outstr(k:k)=ch ! non-backslash with backslash inactive
end do
str=adjustl(outstr)
end subroutine removebksl
!**********************************************************************
end module Mod_Strings
|
[GOAL]
α : Type u_1
inst✝ : EMetricSpace α
cs : CompleteSpace α
K : ℝ≥0
f : α → α
hf : ContractingWith K f
⊢ 0 < 1 - ↑K
[PROOFSTEP]
simp [hf.1]
[GOAL]
α : Type u_1
inst✝ : EMetricSpace α
cs : CompleteSpace α
K : ℝ≥0
f : α → α
⊢ 1 - ↑K ≠ ⊤
[PROOFSTEP]
norm_cast
[GOAL]
α : Type u_1
inst✝ : EMetricSpace α
cs : CompleteSpace α
K : ℝ≥0
f : α → α
⊢ ¬1 - ↑K = ⊤
[PROOFSTEP]
exact ENNReal.coe_ne_top
[GOAL]
α : Type u_1
inst✝ : EMetricSpace α
cs : CompleteSpace α
K : ℝ≥0
f : α → α
hf : ContractingWith K f
x y : α
h : edist x y ≠ ⊤
⊢ edist x (f x) + edist (f x) (f y) + edist (f y) y = edist x (f x) + edist y (f y) + edist (f x) (f y)
[PROOFSTEP]
rw [edist_comm y, add_right_comm]
[GOAL]
α : Type u_1
inst✝ : EMetricSpace α
cs : CompleteSpace α
K : ℝ≥0
f : α → α
hf : ContractingWith K f
x y : α
h : edist x y ≠ ⊤
this : edist x y ≤ edist x (f x) + edist y (f y) + ↑K * edist x y
⊢ edist x y ≤ (edist x (f x) + edist y (f y)) / (1 - ↑K)
[PROOFSTEP]
rwa [ENNReal.le_div_iff_mul_le (Or.inl hf.one_sub_K_ne_zero) (Or.inl one_sub_K_ne_top), mul_comm,
ENNReal.sub_mul fun _ _ ↦ h, one_mul, tsub_le_iff_right]
[GOAL]
α : Type u_1
inst✝ : EMetricSpace α
cs : CompleteSpace α
K : ℝ≥0
f : α → α
hf : ContractingWith K f
x y : α
h : edist x y ≠ ⊤
hy : IsFixedPt f y
⊢ edist x y ≤ edist x (f x) / (1 - ↑K)
[PROOFSTEP]
simpa only [hy.eq, edist_self, add_zero] using hf.edist_inequality h
[GOAL]
α : Type u_1
inst✝ : EMetricSpace α
cs : CompleteSpace α
K : ℝ≥0
f : α → α
hf : ContractingWith K f
x y : α
hx : IsFixedPt f x
hy : IsFixedPt f y
⊢ x = y ∨ edist x y = ⊤
[PROOFSTEP]
refine' or_iff_not_imp_right.2 fun h ↦ edist_le_zero.1 _
[GOAL]
α : Type u_1
inst✝ : EMetricSpace α
cs : CompleteSpace α
K : ℝ≥0
f : α → α
hf : ContractingWith K f
x y : α
hx : IsFixedPt f x
hy : IsFixedPt f y
h : ¬edist x y = ⊤
⊢ edist x y ≤ 0
[PROOFSTEP]
simpa only [hx.eq, edist_self, add_zero, ENNReal.zero_div] using hf.edist_le_of_fixedPoint h hy
[GOAL]
α : Type u_1
inst✝ : EMetricSpace α
cs : CompleteSpace α
K : ℝ≥0
f : α → α
hf : ContractingWith K f
x : α
hx : edist x (f x) ≠ ⊤
⊢ edist x (efixedPoint f hf x hx) ≤ edist x (f x) / (1 - ↑K)
[PROOFSTEP]
convert hf.apriori_edist_iterate_efixedPoint_le hx 0
[GOAL]
case h.e'_4.h.e'_5
α : Type u_1
inst✝ : EMetricSpace α
cs : CompleteSpace α
K : ℝ≥0
f : α → α
hf : ContractingWith K f
x : α
hx : edist x (f x) ≠ ⊤
⊢ edist x (f x) = edist x (f x) * ↑K ^ 0
[PROOFSTEP]
simp only [pow_zero, mul_one]
[GOAL]
α : Type u_1
inst✝ : EMetricSpace α
cs : CompleteSpace α
K : ℝ≥0
f : α → α
hf : ContractingWith K f
x : α
hx : edist x (f x) ≠ ⊤
y : α
hy : edist y (f y) ≠ ⊤
h : edist x y ≠ ⊤
⊢ efixedPoint f hf x hx = efixedPoint f hf y hy
[PROOFSTEP]
refine' (hf.eq_or_edist_eq_top_of_fixedPoints _ _).elim id fun h' ↦ False.elim (ne_of_lt _ h')
[GOAL]
case refine'_1
α : Type u_1
inst✝ : EMetricSpace α
cs : CompleteSpace α
K : ℝ≥0
f : α → α
hf : ContractingWith K f
x : α
hx : edist x (f x) ≠ ⊤
y : α
hy : edist y (f y) ≠ ⊤
h : edist x y ≠ ⊤
⊢ IsFixedPt f (efixedPoint f hf x hx)
[PROOFSTEP]
try apply efixedPoint_isFixedPt
[GOAL]
case refine'_1
α : Type u_1
inst✝ : EMetricSpace α
cs : CompleteSpace α
K : ℝ≥0
f : α → α
hf : ContractingWith K f
x : α
hx : edist x (f x) ≠ ⊤
y : α
hy : edist y (f y) ≠ ⊤
h : edist x y ≠ ⊤
⊢ IsFixedPt f (efixedPoint f hf x hx)
[PROOFSTEP]
apply efixedPoint_isFixedPt
[GOAL]
case refine'_2
α : Type u_1
inst✝ : EMetricSpace α
cs : CompleteSpace α
K : ℝ≥0
f : α → α
hf : ContractingWith K f
x : α
hx : edist x (f x) ≠ ⊤
y : α
hy : edist y (f y) ≠ ⊤
h : edist x y ≠ ⊤
⊢ IsFixedPt f (efixedPoint f hf y hy)
[PROOFSTEP]
try apply efixedPoint_isFixedPt
[GOAL]
case refine'_2
α : Type u_1
inst✝ : EMetricSpace α
cs : CompleteSpace α
K : ℝ≥0
f : α → α
hf : ContractingWith K f
x : α
hx : edist x (f x) ≠ ⊤
y : α
hy : edist y (f y) ≠ ⊤
h : edist x y ≠ ⊤
⊢ IsFixedPt f (efixedPoint f hf y hy)
[PROOFSTEP]
apply efixedPoint_isFixedPt
[GOAL]
case refine'_3
α : Type u_1
inst✝ : EMetricSpace α
cs : CompleteSpace α
K : ℝ≥0
f : α → α
hf : ContractingWith K f
x : α
hx : edist x (f x) ≠ ⊤
y : α
hy : edist y (f y) ≠ ⊤
h : edist x y ≠ ⊤
h' : edist (efixedPoint f hf x hx) (efixedPoint f hf y hy) = ⊤
⊢ edist (efixedPoint f hf x hx) (efixedPoint f hf y hy) < ⊤
[PROOFSTEP]
try apply efixedPoint_isFixedPt
[GOAL]
case refine'_3
α : Type u_1
inst✝ : EMetricSpace α
cs : CompleteSpace α
K : ℝ≥0
f : α → α
hf : ContractingWith K f
x : α
hx : edist x (f x) ≠ ⊤
y : α
hy : edist y (f y) ≠ ⊤
h : edist x y ≠ ⊤
h' : edist (efixedPoint f hf x hx) (efixedPoint f hf y hy) = ⊤
⊢ edist (efixedPoint f hf x hx) (efixedPoint f hf y hy) < ⊤
[PROOFSTEP]
apply efixedPoint_isFixedPt
[GOAL]
case refine'_3
α : Type u_1
inst✝ : EMetricSpace α
cs : CompleteSpace α
K : ℝ≥0
f : α → α
hf : ContractingWith K f
x : α
hx : edist x (f x) ≠ ⊤
y : α
hy : edist y (f y) ≠ ⊤
h : edist x y ≠ ⊤
h' : edist (efixedPoint f hf x hx) (efixedPoint f hf y hy) = ⊤
⊢ edist (efixedPoint f hf x hx) (efixedPoint f hf y hy) < ⊤
[PROOFSTEP]
change edistLtTopSetoid.Rel _ _
[GOAL]
case refine'_3
α : Type u_1
inst✝ : EMetricSpace α
cs : CompleteSpace α
K : ℝ≥0
f : α → α
hf : ContractingWith K f
x : α
hx : edist x (f x) ≠ ⊤
y : α
hy : edist y (f y) ≠ ⊤
h : edist x y ≠ ⊤
h' : edist (efixedPoint f hf x hx) (efixedPoint f hf y hy) = ⊤
⊢ Setoid.Rel edistLtTopSetoid (efixedPoint f hf x hx) (efixedPoint f hf y hy)
[PROOFSTEP]
trans x
[GOAL]
α : Type u_1
inst✝ : EMetricSpace α
cs : CompleteSpace α
K : ℝ≥0
f : α → α
hf : ContractingWith K f
x : α
hx : edist x (f x) ≠ ⊤
y : α
hy : edist y (f y) ≠ ⊤
h : edist x y ≠ ⊤
h' : edist (efixedPoint f hf x hx) (efixedPoint f hf y hy) = ⊤
⊢ Setoid.Rel edistLtTopSetoid (efixedPoint f hf x hx) x
[PROOFSTEP]
apply Setoid.symm'
[GOAL]
case a
α : Type u_1
inst✝ : EMetricSpace α
cs : CompleteSpace α
K : ℝ≥0
f : α → α
hf : ContractingWith K f
x : α
hx : edist x (f x) ≠ ⊤
y : α
hy : edist y (f y) ≠ ⊤
h : edist x y ≠ ⊤
h' : edist (efixedPoint f hf x hx) (efixedPoint f hf y hy) = ⊤
⊢ Setoid.Rel edistLtTopSetoid x (efixedPoint f hf x hx)
[PROOFSTEP]
exact hf.edist_efixedPoint_lt_top hx
[GOAL]
α : Type u_1
inst✝ : EMetricSpace α
cs : CompleteSpace α
K : ℝ≥0
f : α → α
hf : ContractingWith K f
x : α
hx : edist x (f x) ≠ ⊤
y : α
hy : edist y (f y) ≠ ⊤
h : edist x y ≠ ⊤
h' : edist (efixedPoint f hf x hx) (efixedPoint f hf y hy) = ⊤
⊢ Setoid.Rel edistLtTopSetoid x (efixedPoint f hf y hy)
[PROOFSTEP]
trans y
[GOAL]
α : Type u_1
inst✝ : EMetricSpace α
cs : CompleteSpace α
K : ℝ≥0
f : α → α
hf : ContractingWith K f
x : α
hx : edist x (f x) ≠ ⊤
y : α
hy : edist y (f y) ≠ ⊤
h : edist x y ≠ ⊤
h' : edist (efixedPoint f hf x hx) (efixedPoint f hf y hy) = ⊤
⊢ Setoid.Rel edistLtTopSetoid x y
α : Type u_1
inst✝ : EMetricSpace α
cs : CompleteSpace α
K : ℝ≥0
f : α → α
hf : ContractingWith K f
x : α
hx : edist x (f x) ≠ ⊤
y : α
hy : edist y (f y) ≠ ⊤
h : edist x y ≠ ⊤
h' : edist (efixedPoint f hf x hx) (efixedPoint f hf y hy) = ⊤
⊢ Setoid.Rel edistLtTopSetoid y (efixedPoint f hf y hy)
[PROOFSTEP]
exacts [lt_top_iff_ne_top.2 h, hf.edist_efixedPoint_lt_top hy]
[GOAL]
α : Type u_1
inst✝ : EMetricSpace α
cs : CompleteSpace α
K : ℝ≥0
f : α → α
s : Set α
hsc : IsComplete s
hsf : MapsTo f s s
hf : ContractingWith K (MapsTo.restrict f s s hsf)
x : α
hxs : x ∈ s
hx : edist x (f x) ≠ ⊤
⊢ ∃ y,
y ∈ s ∧
IsFixedPt f y ∧
Tendsto (fun n => f^[n] x) atTop (𝓝 y) ∧ ∀ (n : ℕ), edist (f^[n] x) y ≤ edist x (f x) * ↑K ^ n / (1 - ↑K)
[PROOFSTEP]
haveI := hsc.completeSpace_coe
[GOAL]
α : Type u_1
inst✝ : EMetricSpace α
cs : CompleteSpace α
K : ℝ≥0
f : α → α
s : Set α
hsc : IsComplete s
hsf : MapsTo f s s
hf : ContractingWith K (MapsTo.restrict f s s hsf)
x : α
hxs : x ∈ s
hx : edist x (f x) ≠ ⊤
this : CompleteSpace ↑s
⊢ ∃ y,
y ∈ s ∧
IsFixedPt f y ∧
Tendsto (fun n => f^[n] x) atTop (𝓝 y) ∧ ∀ (n : ℕ), edist (f^[n] x) y ≤ edist x (f x) * ↑K ^ n / (1 - ↑K)
[PROOFSTEP]
rcases hf.exists_fixedPoint ⟨x, hxs⟩ hx with ⟨y, hfy, h_tendsto, hle⟩
[GOAL]
case intro.intro.intro
α : Type u_1
inst✝ : EMetricSpace α
cs : CompleteSpace α
K : ℝ≥0
f : α → α
s : Set α
hsc : IsComplete s
hsf : MapsTo f s s
hf : ContractingWith K (MapsTo.restrict f s s hsf)
x : α
hxs : x ∈ s
hx : edist x (f x) ≠ ⊤
this : CompleteSpace ↑s
y : ↑s
hfy : IsFixedPt (MapsTo.restrict f s s hsf) y
h_tendsto : Tendsto (fun n => (MapsTo.restrict f s s hsf)^[n] { val := x, property := hxs }) atTop (𝓝 y)
hle :
∀ (n : ℕ),
edist ((MapsTo.restrict f s s hsf)^[n] { val := x, property := hxs }) y ≤
edist { val := x, property := hxs } (MapsTo.restrict f s s hsf { val := x, property := hxs }) * ↑K ^ n / (1 - ↑K)
⊢ ∃ y,
y ∈ s ∧
IsFixedPt f y ∧
Tendsto (fun n => f^[n] x) atTop (𝓝 y) ∧ ∀ (n : ℕ), edist (f^[n] x) y ≤ edist x (f x) * ↑K ^ n / (1 - ↑K)
[PROOFSTEP]
refine' ⟨y, y.2, Subtype.ext_iff_val.1 hfy, _, fun n ↦ _⟩
[GOAL]
case intro.intro.intro.refine'_1
α : Type u_1
inst✝ : EMetricSpace α
cs : CompleteSpace α
K : ℝ≥0
f : α → α
s : Set α
hsc : IsComplete s
hsf : MapsTo f s s
hf : ContractingWith K (MapsTo.restrict f s s hsf)
x : α
hxs : x ∈ s
hx : edist x (f x) ≠ ⊤
this : CompleteSpace ↑s
y : ↑s
hfy : IsFixedPt (MapsTo.restrict f s s hsf) y
h_tendsto : Tendsto (fun n => (MapsTo.restrict f s s hsf)^[n] { val := x, property := hxs }) atTop (𝓝 y)
hle :
∀ (n : ℕ),
edist ((MapsTo.restrict f s s hsf)^[n] { val := x, property := hxs }) y ≤
edist { val := x, property := hxs } (MapsTo.restrict f s s hsf { val := x, property := hxs }) * ↑K ^ n / (1 - ↑K)
⊢ Tendsto (fun n => f^[n] x) atTop (𝓝 ↑y)
[PROOFSTEP]
convert (continuous_subtype_val.tendsto _).comp h_tendsto
[GOAL]
case h.e'_3.h
α : Type u_1
inst✝ : EMetricSpace α
cs : CompleteSpace α
K : ℝ≥0
f : α → α
s : Set α
hsc : IsComplete s
hsf : MapsTo f s s
hf : ContractingWith K (MapsTo.restrict f s s hsf)
x : α
hxs : x ∈ s
hx : edist x (f x) ≠ ⊤
this : CompleteSpace ↑s
y : ↑s
hfy : IsFixedPt (MapsTo.restrict f s s hsf) y
h_tendsto : Tendsto (fun n => (MapsTo.restrict f s s hsf)^[n] { val := x, property := hxs }) atTop (𝓝 y)
hle :
∀ (n : ℕ),
edist ((MapsTo.restrict f s s hsf)^[n] { val := x, property := hxs }) y ≤
edist { val := x, property := hxs } (MapsTo.restrict f s s hsf { val := x, property := hxs }) * ↑K ^ n / (1 - ↑K)
x✝ : ℕ
⊢ f^[x✝] x = (Subtype.val ∘ fun n => (MapsTo.restrict f s s hsf)^[n] { val := x, property := hxs }) x✝
[PROOFSTEP]
simp only [(· ∘ ·), MapsTo.iterate_restrict, MapsTo.val_restrict_apply]
[GOAL]
case intro.intro.intro.refine'_2
α : Type u_1
inst✝ : EMetricSpace α
cs : CompleteSpace α
K : ℝ≥0
f : α → α
s : Set α
hsc : IsComplete s
hsf : MapsTo f s s
hf : ContractingWith K (MapsTo.restrict f s s hsf)
x : α
hxs : x ∈ s
hx : edist x (f x) ≠ ⊤
this : CompleteSpace ↑s
y : ↑s
hfy : IsFixedPt (MapsTo.restrict f s s hsf) y
h_tendsto : Tendsto (fun n => (MapsTo.restrict f s s hsf)^[n] { val := x, property := hxs }) atTop (𝓝 y)
hle :
∀ (n : ℕ),
edist ((MapsTo.restrict f s s hsf)^[n] { val := x, property := hxs }) y ≤
edist { val := x, property := hxs } (MapsTo.restrict f s s hsf { val := x, property := hxs }) * ↑K ^ n / (1 - ↑K)
n : ℕ
⊢ edist (f^[n] x) ↑y ≤ edist x (f x) * ↑K ^ n / (1 - ↑K)
[PROOFSTEP]
convert hle n
[GOAL]
case h.e'_3
α : Type u_1
inst✝ : EMetricSpace α
cs : CompleteSpace α
K : ℝ≥0
f : α → α
s : Set α
hsc : IsComplete s
hsf : MapsTo f s s
hf : ContractingWith K (MapsTo.restrict f s s hsf)
x : α
hxs : x ∈ s
hx : edist x (f x) ≠ ⊤
this : CompleteSpace ↑s
y : ↑s
hfy : IsFixedPt (MapsTo.restrict f s s hsf) y
h_tendsto : Tendsto (fun n => (MapsTo.restrict f s s hsf)^[n] { val := x, property := hxs }) atTop (𝓝 y)
hle :
∀ (n : ℕ),
edist ((MapsTo.restrict f s s hsf)^[n] { val := x, property := hxs }) y ≤
edist { val := x, property := hxs } (MapsTo.restrict f s s hsf { val := x, property := hxs }) * ↑K ^ n / (1 - ↑K)
n : ℕ
⊢ edist (f^[n] x) ↑y = edist ((MapsTo.restrict f s s hsf)^[n] { val := x, property := hxs }) y
[PROOFSTEP]
rw [MapsTo.iterate_restrict]
[GOAL]
case h.e'_3
α : Type u_1
inst✝ : EMetricSpace α
cs : CompleteSpace α
K : ℝ≥0
f : α → α
s : Set α
hsc : IsComplete s
hsf : MapsTo f s s
hf : ContractingWith K (MapsTo.restrict f s s hsf)
x : α
hxs : x ∈ s
hx : edist x (f x) ≠ ⊤
this : CompleteSpace ↑s
y : ↑s
hfy : IsFixedPt (MapsTo.restrict f s s hsf) y
h_tendsto : Tendsto (fun n => (MapsTo.restrict f s s hsf)^[n] { val := x, property := hxs }) atTop (𝓝 y)
hle :
∀ (n : ℕ),
edist ((MapsTo.restrict f s s hsf)^[n] { val := x, property := hxs }) y ≤
edist { val := x, property := hxs } (MapsTo.restrict f s s hsf { val := x, property := hxs }) * ↑K ^ n / (1 - ↑K)
n : ℕ
⊢ edist (f^[n] x) ↑y = edist (MapsTo.restrict f^[n] s s (_ : MapsTo f^[n] s s) { val := x, property := hxs }) y
[PROOFSTEP]
rfl
[GOAL]
α : Type u_1
inst✝ : EMetricSpace α
cs : CompleteSpace α
K : ℝ≥0
f : α → α
s : Set α
hsc : IsComplete s
hsf : MapsTo f s s
hf : ContractingWith K (MapsTo.restrict f s s hsf)
x : α
hxs : x ∈ s
hx : edist x (f x) ≠ ⊤
⊢ edist x (efixedPoint' f hsc hsf hf x hxs hx) ≤ edist x (f x) / (1 - ↑K)
[PROOFSTEP]
convert hf.apriori_edist_iterate_efixedPoint_le' hsc hsf hxs hx 0
[GOAL]
case h.e'_4.h.e'_5
α : Type u_1
inst✝ : EMetricSpace α
cs : CompleteSpace α
K : ℝ≥0
f : α → α
s : Set α
hsc : IsComplete s
hsf : MapsTo f s s
hf : ContractingWith K (MapsTo.restrict f s s hsf)
x : α
hxs : x ∈ s
hx : edist x (f x) ≠ ⊤
⊢ edist x (f x) = edist x (f x) * ↑K ^ 0
[PROOFSTEP]
rw [pow_zero, mul_one]
[GOAL]
α : Type u_1
inst✝ : EMetricSpace α
cs : CompleteSpace α
K : ℝ≥0
f : α → α
hf : ContractingWith K f
s : Set α
hsc : IsComplete s
hsf : MapsTo f s s
hfs : ContractingWith K (MapsTo.restrict f s s hsf)
x : α
hxs : x ∈ s
hx : edist x (f x) ≠ ⊤
t : Set α
htc : IsComplete t
htf : MapsTo f t t
hft : ContractingWith K (MapsTo.restrict f t t htf)
y : α
hyt : y ∈ t
hy : edist y (f y) ≠ ⊤
hxy : edist x y ≠ ⊤
⊢ efixedPoint' f hsc hsf hfs x hxs hx = efixedPoint' f htc htf hft y hyt hy
[PROOFSTEP]
refine' (hf.eq_or_edist_eq_top_of_fixedPoints _ _).elim id fun h' ↦ False.elim (ne_of_lt _ h')
[GOAL]
case refine'_1
α : Type u_1
inst✝ : EMetricSpace α
cs : CompleteSpace α
K : ℝ≥0
f : α → α
hf : ContractingWith K f
s : Set α
hsc : IsComplete s
hsf : MapsTo f s s
hfs : ContractingWith K (MapsTo.restrict f s s hsf)
x : α
hxs : x ∈ s
hx : edist x (f x) ≠ ⊤
t : Set α
htc : IsComplete t
htf : MapsTo f t t
hft : ContractingWith K (MapsTo.restrict f t t htf)
y : α
hyt : y ∈ t
hy : edist y (f y) ≠ ⊤
hxy : edist x y ≠ ⊤
⊢ IsFixedPt f (efixedPoint' f hsc hsf hfs x hxs hx)
[PROOFSTEP]
try apply efixedPoint_isFixedPt'
[GOAL]
case refine'_1
α : Type u_1
inst✝ : EMetricSpace α
cs : CompleteSpace α
K : ℝ≥0
f : α → α
hf : ContractingWith K f
s : Set α
hsc : IsComplete s
hsf : MapsTo f s s
hfs : ContractingWith K (MapsTo.restrict f s s hsf)
x : α
hxs : x ∈ s
hx : edist x (f x) ≠ ⊤
t : Set α
htc : IsComplete t
htf : MapsTo f t t
hft : ContractingWith K (MapsTo.restrict f t t htf)
y : α
hyt : y ∈ t
hy : edist y (f y) ≠ ⊤
hxy : edist x y ≠ ⊤
⊢ IsFixedPt f (efixedPoint' f hsc hsf hfs x hxs hx)
[PROOFSTEP]
apply efixedPoint_isFixedPt'
[GOAL]
case refine'_2
α : Type u_1
inst✝ : EMetricSpace α
cs : CompleteSpace α
K : ℝ≥0
f : α → α
hf : ContractingWith K f
s : Set α
hsc : IsComplete s
hsf : MapsTo f s s
hfs : ContractingWith K (MapsTo.restrict f s s hsf)
x : α
hxs : x ∈ s
hx : edist x (f x) ≠ ⊤
t : Set α
htc : IsComplete t
htf : MapsTo f t t
hft : ContractingWith K (MapsTo.restrict f t t htf)
y : α
hyt : y ∈ t
hy : edist y (f y) ≠ ⊤
hxy : edist x y ≠ ⊤
⊢ IsFixedPt f (efixedPoint' f htc htf hft y hyt hy)
[PROOFSTEP]
try apply efixedPoint_isFixedPt'
[GOAL]
case refine'_2
α : Type u_1
inst✝ : EMetricSpace α
cs : CompleteSpace α
K : ℝ≥0
f : α → α
hf : ContractingWith K f
s : Set α
hsc : IsComplete s
hsf : MapsTo f s s
hfs : ContractingWith K (MapsTo.restrict f s s hsf)
x : α
hxs : x ∈ s
hx : edist x (f x) ≠ ⊤
t : Set α
htc : IsComplete t
htf : MapsTo f t t
hft : ContractingWith K (MapsTo.restrict f t t htf)
y : α
hyt : y ∈ t
hy : edist y (f y) ≠ ⊤
hxy : edist x y ≠ ⊤
⊢ IsFixedPt f (efixedPoint' f htc htf hft y hyt hy)
[PROOFSTEP]
apply efixedPoint_isFixedPt'
[GOAL]
case refine'_3
α : Type u_1
inst✝ : EMetricSpace α
cs : CompleteSpace α
K : ℝ≥0
f : α → α
hf : ContractingWith K f
s : Set α
hsc : IsComplete s
hsf : MapsTo f s s
hfs : ContractingWith K (MapsTo.restrict f s s hsf)
x : α
hxs : x ∈ s
hx : edist x (f x) ≠ ⊤
t : Set α
htc : IsComplete t
htf : MapsTo f t t
hft : ContractingWith K (MapsTo.restrict f t t htf)
y : α
hyt : y ∈ t
hy : edist y (f y) ≠ ⊤
hxy : edist x y ≠ ⊤
h' : edist (efixedPoint' f hsc hsf hfs x hxs hx) (efixedPoint' f htc htf hft y hyt hy) = ⊤
⊢ edist (efixedPoint' f hsc hsf hfs x hxs hx) (efixedPoint' f htc htf hft y hyt hy) < ⊤
[PROOFSTEP]
try apply efixedPoint_isFixedPt'
[GOAL]
case refine'_3
α : Type u_1
inst✝ : EMetricSpace α
cs : CompleteSpace α
K : ℝ≥0
f : α → α
hf : ContractingWith K f
s : Set α
hsc : IsComplete s
hsf : MapsTo f s s
hfs : ContractingWith K (MapsTo.restrict f s s hsf)
x : α
hxs : x ∈ s
hx : edist x (f x) ≠ ⊤
t : Set α
htc : IsComplete t
htf : MapsTo f t t
hft : ContractingWith K (MapsTo.restrict f t t htf)
y : α
hyt : y ∈ t
hy : edist y (f y) ≠ ⊤
hxy : edist x y ≠ ⊤
h' : edist (efixedPoint' f hsc hsf hfs x hxs hx) (efixedPoint' f htc htf hft y hyt hy) = ⊤
⊢ edist (efixedPoint' f hsc hsf hfs x hxs hx) (efixedPoint' f htc htf hft y hyt hy) < ⊤
[PROOFSTEP]
apply efixedPoint_isFixedPt'
[GOAL]
case refine'_3
α : Type u_1
inst✝ : EMetricSpace α
cs : CompleteSpace α
K : ℝ≥0
f : α → α
hf : ContractingWith K f
s : Set α
hsc : IsComplete s
hsf : MapsTo f s s
hfs : ContractingWith K (MapsTo.restrict f s s hsf)
x : α
hxs : x ∈ s
hx : edist x (f x) ≠ ⊤
t : Set α
htc : IsComplete t
htf : MapsTo f t t
hft : ContractingWith K (MapsTo.restrict f t t htf)
y : α
hyt : y ∈ t
hy : edist y (f y) ≠ ⊤
hxy : edist x y ≠ ⊤
h' : edist (efixedPoint' f hsc hsf hfs x hxs hx) (efixedPoint' f htc htf hft y hyt hy) = ⊤
⊢ edist (efixedPoint' f hsc hsf hfs x hxs hx) (efixedPoint' f htc htf hft y hyt hy) < ⊤
[PROOFSTEP]
change edistLtTopSetoid.Rel _ _
[GOAL]
case refine'_3
α : Type u_1
inst✝ : EMetricSpace α
cs : CompleteSpace α
K : ℝ≥0
f : α → α
hf : ContractingWith K f
s : Set α
hsc : IsComplete s
hsf : MapsTo f s s
hfs : ContractingWith K (MapsTo.restrict f s s hsf)
x : α
hxs : x ∈ s
hx : edist x (f x) ≠ ⊤
t : Set α
htc : IsComplete t
htf : MapsTo f t t
hft : ContractingWith K (MapsTo.restrict f t t htf)
y : α
hyt : y ∈ t
hy : edist y (f y) ≠ ⊤
hxy : edist x y ≠ ⊤
h' : edist (efixedPoint' f hsc hsf hfs x hxs hx) (efixedPoint' f htc htf hft y hyt hy) = ⊤
⊢ Setoid.Rel edistLtTopSetoid (efixedPoint' f hsc hsf hfs x hxs hx) (efixedPoint' f htc htf hft y hyt hy)
[PROOFSTEP]
trans x
[GOAL]
α : Type u_1
inst✝ : EMetricSpace α
cs : CompleteSpace α
K : ℝ≥0
f : α → α
hf : ContractingWith K f
s : Set α
hsc : IsComplete s
hsf : MapsTo f s s
hfs : ContractingWith K (MapsTo.restrict f s s hsf)
x : α
hxs : x ∈ s
hx : edist x (f x) ≠ ⊤
t : Set α
htc : IsComplete t
htf : MapsTo f t t
hft : ContractingWith K (MapsTo.restrict f t t htf)
y : α
hyt : y ∈ t
hy : edist y (f y) ≠ ⊤
hxy : edist x y ≠ ⊤
h' : edist (efixedPoint' f hsc hsf hfs x hxs hx) (efixedPoint' f htc htf hft y hyt hy) = ⊤
⊢ Setoid.Rel edistLtTopSetoid (efixedPoint' f hsc hsf hfs x hxs hx) x
[PROOFSTEP]
apply Setoid.symm'
[GOAL]
case a
α : Type u_1
inst✝ : EMetricSpace α
cs : CompleteSpace α
K : ℝ≥0
f : α → α
hf : ContractingWith K f
s : Set α
hsc : IsComplete s
hsf : MapsTo f s s
hfs : ContractingWith K (MapsTo.restrict f s s hsf)
x : α
hxs : x ∈ s
hx : edist x (f x) ≠ ⊤
t : Set α
htc : IsComplete t
htf : MapsTo f t t
hft : ContractingWith K (MapsTo.restrict f t t htf)
y : α
hyt : y ∈ t
hy : edist y (f y) ≠ ⊤
hxy : edist x y ≠ ⊤
h' : edist (efixedPoint' f hsc hsf hfs x hxs hx) (efixedPoint' f htc htf hft y hyt hy) = ⊤
⊢ Setoid.Rel edistLtTopSetoid x (efixedPoint' f hsc hsf hfs x hxs hx)
[PROOFSTEP]
apply edist_efixedPoint_lt_top'
[GOAL]
α : Type u_1
inst✝ : EMetricSpace α
cs : CompleteSpace α
K : ℝ≥0
f : α → α
hf : ContractingWith K f
s : Set α
hsc : IsComplete s
hsf : MapsTo f s s
hfs : ContractingWith K (MapsTo.restrict f s s hsf)
x : α
hxs : x ∈ s
hx : edist x (f x) ≠ ⊤
t : Set α
htc : IsComplete t
htf : MapsTo f t t
hft : ContractingWith K (MapsTo.restrict f t t htf)
y : α
hyt : y ∈ t
hy : edist y (f y) ≠ ⊤
hxy : edist x y ≠ ⊤
h' : edist (efixedPoint' f hsc hsf hfs x hxs hx) (efixedPoint' f htc htf hft y hyt hy) = ⊤
⊢ Setoid.Rel edistLtTopSetoid x (efixedPoint' f htc htf hft y hyt hy)
[PROOFSTEP]
trans y
[GOAL]
α : Type u_1
inst✝ : EMetricSpace α
cs : CompleteSpace α
K : ℝ≥0
f : α → α
hf : ContractingWith K f
s : Set α
hsc : IsComplete s
hsf : MapsTo f s s
hfs : ContractingWith K (MapsTo.restrict f s s hsf)
x : α
hxs : x ∈ s
hx : edist x (f x) ≠ ⊤
t : Set α
htc : IsComplete t
htf : MapsTo f t t
hft : ContractingWith K (MapsTo.restrict f t t htf)
y : α
hyt : y ∈ t
hy : edist y (f y) ≠ ⊤
hxy : edist x y ≠ ⊤
h' : edist (efixedPoint' f hsc hsf hfs x hxs hx) (efixedPoint' f htc htf hft y hyt hy) = ⊤
⊢ Setoid.Rel edistLtTopSetoid x y
α : Type u_1
inst✝ : EMetricSpace α
cs : CompleteSpace α
K : ℝ≥0
f : α → α
hf : ContractingWith K f
s : Set α
hsc : IsComplete s
hsf : MapsTo f s s
hfs : ContractingWith K (MapsTo.restrict f s s hsf)
x : α
hxs : x ∈ s
hx : edist x (f x) ≠ ⊤
t : Set α
htc : IsComplete t
htf : MapsTo f t t
hft : ContractingWith K (MapsTo.restrict f t t htf)
y : α
hyt : y ∈ t
hy : edist y (f y) ≠ ⊤
hxy : edist x y ≠ ⊤
h' : edist (efixedPoint' f hsc hsf hfs x hxs hx) (efixedPoint' f htc htf hft y hyt hy) = ⊤
⊢ Setoid.Rel edistLtTopSetoid y (efixedPoint' f htc htf hft y hyt hy)
[PROOFSTEP]
exact lt_top_iff_ne_top.2 hxy
[GOAL]
α : Type u_1
inst✝ : EMetricSpace α
cs : CompleteSpace α
K : ℝ≥0
f : α → α
hf : ContractingWith K f
s : Set α
hsc : IsComplete s
hsf : MapsTo f s s
hfs : ContractingWith K (MapsTo.restrict f s s hsf)
x : α
hxs : x ∈ s
hx : edist x (f x) ≠ ⊤
t : Set α
htc : IsComplete t
htf : MapsTo f t t
hft : ContractingWith K (MapsTo.restrict f t t htf)
y : α
hyt : y ∈ t
hy : edist y (f y) ≠ ⊤
hxy : edist x y ≠ ⊤
h' : edist (efixedPoint' f hsc hsf hfs x hxs hx) (efixedPoint' f htc htf hft y hyt hy) = ⊤
⊢ Setoid.Rel edistLtTopSetoid y (efixedPoint' f htc htf hft y hyt hy)
[PROOFSTEP]
apply edist_efixedPoint_lt_top'
[GOAL]
α : Type u_1
inst✝ : MetricSpace α
K : ℝ≥0
f : α → α
hf : ContractingWith K f
x y : α
this : dist x y ≤ dist x (f x) + dist y (f y) + ↑K * dist x y
⊢ dist x y ≤ (dist x (f x) + dist y (f y)) / (1 - ↑K)
[PROOFSTEP]
rwa [le_div_iff hf.one_sub_K_pos, mul_comm, _root_.sub_mul, one_mul, sub_le_iff_le_add]
[GOAL]
α : Type u_1
inst✝ : MetricSpace α
K : ℝ≥0
f : α → α
hf : ContractingWith K f
x y : α
hy : IsFixedPt f y
⊢ dist x y ≤ dist x (f x) / (1 - ↑K)
[PROOFSTEP]
simpa only [hy.eq, dist_self, add_zero] using hf.dist_inequality x y
[GOAL]
α : Type u_1
inst✝ : MetricSpace α
K : ℝ≥0
f : α → α
hf : ContractingWith K f
g : α → α
x y : α
hx : IsFixedPt f x
hy : IsFixedPt g y
C : ℝ
hfg : ∀ (z : α), dist (f z) (g z) ≤ C
⊢ dist y (f y) / (1 - ↑K) = dist (f y) (g y) / (1 - ↑K)
[PROOFSTEP]
rw [hy.eq, dist_comm]
[GOAL]
α : Type u_1
inst✝² : MetricSpace α
K : ℝ≥0
f : α → α
hf : ContractingWith K f
inst✝¹ : Nonempty α
inst✝ : CompleteSpace α
x : α
n : ℕ
⊢ dist (f^[n] x) (fixedPoint f hf) ≤ dist (f^[n] x) (f^[n + 1] x) / (1 - ↑K)
[PROOFSTEP]
rw [iterate_succ']
[GOAL]
α : Type u_1
inst✝² : MetricSpace α
K : ℝ≥0
f : α → α
hf : ContractingWith K f
inst✝¹ : Nonempty α
inst✝ : CompleteSpace α
x : α
n : ℕ
⊢ dist (f^[n] x) (fixedPoint f hf) ≤ dist (f^[n] x) ((f ∘ f^[n]) x) / (1 - ↑K)
[PROOFSTEP]
apply hf.dist_fixedPoint_le
[GOAL]
α : Type u_1
inst✝² : MetricSpace α
K : ℝ≥0
f : α → α
hf : ContractingWith K f
inst✝¹ : Nonempty α
inst✝ : CompleteSpace α
x : α
⊢ Tendsto (fun n => f^[n] x) atTop (𝓝 (fixedPoint f hf))
[PROOFSTEP]
convert tendsto_iterate_efixedPoint hf (edist_ne_top x _)
[GOAL]
case h.e'_5.h.e'_3
α : Type u_1
inst✝² : MetricSpace α
K : ℝ≥0
f : α → α
hf : ContractingWith K f
inst✝¹ : Nonempty α
inst✝ : CompleteSpace α
x : α
⊢ fixedPoint f hf = efixedPoint f hf x (_ : edist x (f x) ≠ ⊤)
[PROOFSTEP]
refine' (fixedPoint_unique _ _).symm
[GOAL]
case h.e'_5.h.e'_3
α : Type u_1
inst✝² : MetricSpace α
K : ℝ≥0
f : α → α
hf : ContractingWith K f
inst✝¹ : Nonempty α
inst✝ : CompleteSpace α
x : α
⊢ IsFixedPt f (efixedPoint f hf x (_ : edist x (f x) ≠ ⊤))
[PROOFSTEP]
apply efixedPoint_isFixedPt
[GOAL]
α : Type u_1
inst✝² : MetricSpace α
K : ℝ≥0
f : α → α
hf✝ : ContractingWith K f
inst✝¹ : Nonempty α
inst✝ : CompleteSpace α
n : ℕ
hf : ContractingWith K f^[n]
⊢ IsFixedPt f (fixedPoint f^[n] hf)
[PROOFSTEP]
set x := hf.fixedPoint f^[n]
[GOAL]
α : Type u_1
inst✝² : MetricSpace α
K : ℝ≥0
f : α → α
hf✝ : ContractingWith K f
inst✝¹ : Nonempty α
inst✝ : CompleteSpace α
n : ℕ
hf : ContractingWith K f^[n]
x : α := fixedPoint f^[n] hf
⊢ IsFixedPt f x
[PROOFSTEP]
have hx : f^[n] x = x := hf.fixedPoint_isFixedPt
[GOAL]
α : Type u_1
inst✝² : MetricSpace α
K : ℝ≥0
f : α → α
hf✝ : ContractingWith K f
inst✝¹ : Nonempty α
inst✝ : CompleteSpace α
n : ℕ
hf : ContractingWith K f^[n]
x : α := fixedPoint f^[n] hf
hx : f^[n] x = x
⊢ IsFixedPt f x
[PROOFSTEP]
have := hf.toLipschitzWith.dist_le_mul x (f x)
[GOAL]
α : Type u_1
inst✝² : MetricSpace α
K : ℝ≥0
f : α → α
hf✝ : ContractingWith K f
inst✝¹ : Nonempty α
inst✝ : CompleteSpace α
n : ℕ
hf : ContractingWith K f^[n]
x : α := fixedPoint f^[n] hf
hx : f^[n] x = x
this : dist (f^[n] x) (f^[n] (f x)) ≤ ↑K * dist x (f x)
⊢ IsFixedPt f x
[PROOFSTEP]
rw [← iterate_succ_apply, iterate_succ_apply', hx] at this
[GOAL]
α : Type u_1
inst✝² : MetricSpace α
K : ℝ≥0
f : α → α
hf✝ : ContractingWith K f
inst✝¹ : Nonempty α
inst✝ : CompleteSpace α
n : ℕ
hf : ContractingWith K f^[n]
x : α := fixedPoint f^[n] hf
hx : f^[n] x = x
this : dist x (f x) ≤ ↑K * dist x (f x)
⊢ IsFixedPt f x
[PROOFSTEP]
revert this
[GOAL]
α : Type u_1
inst✝² : MetricSpace α
K : ℝ≥0
f : α → α
hf✝ : ContractingWith K f
inst✝¹ : Nonempty α
inst✝ : CompleteSpace α
n : ℕ
hf : ContractingWith K f^[n]
x : α := fixedPoint f^[n] hf
hx : f^[n] x = x
⊢ dist x (f x) ≤ ↑K * dist x (f x) → IsFixedPt f x
[PROOFSTEP]
contrapose!
[GOAL]
α : Type u_1
inst✝² : MetricSpace α
K : ℝ≥0
f : α → α
hf✝ : ContractingWith K f
inst✝¹ : Nonempty α
inst✝ : CompleteSpace α
n : ℕ
hf : ContractingWith K f^[n]
x : α := fixedPoint f^[n] hf
hx : f^[n] x = x
⊢ ¬IsFixedPt f (fixedPoint f^[n] hf) →
↑K * dist (fixedPoint f^[n] hf) (f (fixedPoint f^[n] hf)) < dist (fixedPoint f^[n] hf) (f (fixedPoint f^[n] hf))
[PROOFSTEP]
intro this
[GOAL]
α : Type u_1
inst✝² : MetricSpace α
K : ℝ≥0
f : α → α
hf✝ : ContractingWith K f
inst✝¹ : Nonempty α
inst✝ : CompleteSpace α
n : ℕ
hf : ContractingWith K f^[n]
x : α := fixedPoint f^[n] hf
hx : f^[n] x = x
this : ¬IsFixedPt f (fixedPoint f^[n] hf)
⊢ ↑K * dist (fixedPoint f^[n] hf) (f (fixedPoint f^[n] hf)) < dist (fixedPoint f^[n] hf) (f (fixedPoint f^[n] hf))
[PROOFSTEP]
have := dist_pos.2 (Ne.symm this)
[GOAL]
α : Type u_1
inst✝² : MetricSpace α
K : ℝ≥0
f : α → α
hf✝ : ContractingWith K f
inst✝¹ : Nonempty α
inst✝ : CompleteSpace α
n : ℕ
hf : ContractingWith K f^[n]
x : α := fixedPoint f^[n] hf
hx : f^[n] x = x
this✝ : ¬IsFixedPt f (fixedPoint f^[n] hf)
this : 0 < dist (fixedPoint f^[n] hf) (f (fixedPoint f^[n] hf))
⊢ ↑K * dist (fixedPoint f^[n] hf) (f (fixedPoint f^[n] hf)) < dist (fixedPoint f^[n] hf) (f (fixedPoint f^[n] hf))
[PROOFSTEP]
simpa only [NNReal.coe_one, one_mul, NNReal.val_eq_coe] using (mul_lt_mul_right this).mpr hf.left
|
Maybe you'll have spotted it, maybe not... but guess what? I was in this morning's Mind Meld!
In case you weren't aware, the Mind Meld is a regular feature on the Hugo award-winning SF Signal which asks a bunch of genre fiction's best and brightest to put their heads together to answer a certain question.
In response, I wrote about "the years a friend and I spent butting heads over a couple of comic books. He was a Marvel man; me, a DC devotee. He read The X-Men; I was an unabashed Batman fan. Matter of fact, I still am, and I’d bet my last penny he’s still got the hots for Emma Frost."
As kids we were great mates, he and me. As adults, our friendship fell apart. So whether it’s Star Trek versus Star Wars or the merits of manga as opposed to anime, take heed, dear reader: at the end of the day these debates can be about the people as much as the particular properties.
Click on through, as you do, to read the rest of the Mind Meld in question, which also features Mur Lafferty, Maurice Broaddus, David Lomax and a whole load of other awesome authors. |
lemma le_add_iff1: "a *\<^sub>R e + c \<le> b *\<^sub>R e + d \<longleftrightarrow> (a - b) *\<^sub>R e + c \<le> d" for c d e :: "'a::ordered_real_vector" |
Formal statement is: lemmas prime_dvd_mult_eq_int = prime_dvd_mult_iff[where ?'a = int] Informal statement is: If $p$ is a prime number and $p \mid ab$, then $p \mid a$ or $p \mid b$. |
/-
Copyright (c) 2018 Johannes Hölzl. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Johannes Hölzl
! This file was ported from Lean 3 source module data.finsupp.multiset
! leanprover-community/mathlib commit 4c19a16e4b705bf135cf9a80ac18fcc99c438514
! Please do not edit these lines, except to modify the commit id
! if you have ported upstream changes.
-/
import Mathbin.Data.Finsupp.Basic
import Mathbin.Data.Finsupp.Order
/-!
# Equivalence between `multiset` and `ℕ`-valued finitely supported functions
> THIS FILE IS SYNCHRONIZED WITH MATHLIB4.
> Any changes to this file require a corresponding PR to mathlib4.
This defines `finsupp.to_multiset` the equivalence between `α →₀ ℕ` and `multiset α`, along
with `multiset.to_finsupp` the reverse equivalence and `finsupp.order_iso_multiset` the equivalence
promoted to an order isomorphism.
-/
open Finset
open BigOperators Classical
noncomputable section
variable {α β ι : Type _}
namespace Finsupp
#print Finsupp.toMultiset /-
/-- Given `f : α →₀ ℕ`, `f.to_multiset` is the multiset with multiplicities given by the values of
`f` on the elements of `α`. We define this function as an `add_equiv`. -/
def toMultiset : (α →₀ ℕ) ≃+ Multiset α
where
toFun f := f.Sum fun a n => n • {a}
invFun s := ⟨s.toFinset, fun a => s.count a, fun a => by simp⟩
left_inv f :=
ext fun a =>
by
simp only [Sum, Multiset.count_sum', Multiset.count_singleton, mul_boole, coe_mk,
mem_support_iff, Multiset.count_nsmul, Finset.sum_ite_eq, ite_not, ite_eq_right_iff]
exact Eq.symm
right_inv s := by simp only [Sum, coe_mk, Multiset.toFinset_sum_count_nsmul_eq]
map_add' f g := sum_add_index' (fun a => zero_nsmul _) fun a => add_nsmul _
#align finsupp.to_multiset Finsupp.toMultiset
-/
/- warning: finsupp.to_multiset_zero -> Finsupp.toMultiset_zero is a dubious translation:
lean 3 declaration is
forall {α : Type.{u1}}, Eq.{succ u1} (Multiset.{u1} α) (coeFn.{succ u1, succ u1} (AddEquiv.{u1, u1} (Finsupp.{u1, 0} α Nat Nat.hasZero) (Multiset.{u1} α) (Finsupp.add.{u1, 0} α Nat (AddMonoid.toAddZeroClass.{0} Nat Nat.addMonoid)) (Multiset.hasAdd.{u1} α)) (fun (_x : AddEquiv.{u1, u1} (Finsupp.{u1, 0} α Nat Nat.hasZero) (Multiset.{u1} α) (Finsupp.add.{u1, 0} α Nat (AddMonoid.toAddZeroClass.{0} Nat Nat.addMonoid)) (Multiset.hasAdd.{u1} α)) => (Finsupp.{u1, 0} α Nat Nat.hasZero) -> (Multiset.{u1} α)) (AddEquiv.hasCoeToFun.{u1, u1} (Finsupp.{u1, 0} α Nat Nat.hasZero) (Multiset.{u1} α) (Finsupp.add.{u1, 0} α Nat (AddMonoid.toAddZeroClass.{0} Nat Nat.addMonoid)) (Multiset.hasAdd.{u1} α)) (Finsupp.toMultiset.{u1} α) (OfNat.ofNat.{u1} (Finsupp.{u1, 0} α Nat Nat.hasZero) 0 (OfNat.mk.{u1} (Finsupp.{u1, 0} α Nat Nat.hasZero) 0 (Zero.zero.{u1} (Finsupp.{u1, 0} α Nat Nat.hasZero) (Finsupp.zero.{u1, 0} α Nat Nat.hasZero))))) (OfNat.ofNat.{u1} (Multiset.{u1} α) 0 (OfNat.mk.{u1} (Multiset.{u1} α) 0 (Zero.zero.{u1} (Multiset.{u1} α) (Multiset.hasZero.{u1} α))))
but is expected to have type
forall {α : Type.{u1}}, Eq.{succ u1} ((fun ([email protected]._hyg.403 : Finsupp.{u1, 0} α Nat (LinearOrderedCommMonoidWithZero.toZero.{0} Nat Nat.linearOrderedCommMonoidWithZero)) => Multiset.{u1} α) (OfNat.ofNat.{u1} (Finsupp.{u1, 0} α Nat (LinearOrderedCommMonoidWithZero.toZero.{0} Nat Nat.linearOrderedCommMonoidWithZero)) 0 (Zero.toOfNat0.{u1} (Finsupp.{u1, 0} α Nat (LinearOrderedCommMonoidWithZero.toZero.{0} Nat Nat.linearOrderedCommMonoidWithZero)) (Finsupp.zero.{u1, 0} α Nat (LinearOrderedCommMonoidWithZero.toZero.{0} Nat Nat.linearOrderedCommMonoidWithZero))))) (FunLike.coe.{succ u1, succ u1, succ u1} (AddEquiv.{u1, u1} (Finsupp.{u1, 0} α Nat (LinearOrderedCommMonoidWithZero.toZero.{0} Nat Nat.linearOrderedCommMonoidWithZero)) (Multiset.{u1} α) (Finsupp.add.{u1, 0} α Nat (AddMonoid.toAddZeroClass.{0} Nat Nat.addMonoid)) (Multiset.instAddMultiset.{u1} α)) (Finsupp.{u1, 0} α Nat (LinearOrderedCommMonoidWithZero.toZero.{0} Nat Nat.linearOrderedCommMonoidWithZero)) (fun (_x : Finsupp.{u1, 0} α Nat (LinearOrderedCommMonoidWithZero.toZero.{0} Nat Nat.linearOrderedCommMonoidWithZero)) => (fun ([email protected]._hyg.403 : Finsupp.{u1, 0} α Nat (LinearOrderedCommMonoidWithZero.toZero.{0} Nat Nat.linearOrderedCommMonoidWithZero)) => Multiset.{u1} α) _x) (AddHomClass.toFunLike.{u1, u1, u1} (AddEquiv.{u1, u1} (Finsupp.{u1, 0} α Nat (LinearOrderedCommMonoidWithZero.toZero.{0} Nat Nat.linearOrderedCommMonoidWithZero)) (Multiset.{u1} α) (Finsupp.add.{u1, 0} α Nat (AddMonoid.toAddZeroClass.{0} Nat Nat.addMonoid)) (Multiset.instAddMultiset.{u1} α)) (Finsupp.{u1, 0} α Nat (LinearOrderedCommMonoidWithZero.toZero.{0} Nat Nat.linearOrderedCommMonoidWithZero)) (Multiset.{u1} α) (AddZeroClass.toAdd.{u1} (Finsupp.{u1, 0} α Nat (LinearOrderedCommMonoidWithZero.toZero.{0} Nat Nat.linearOrderedCommMonoidWithZero)) (Finsupp.addZeroClass.{u1, 0} α Nat (AddMonoid.toAddZeroClass.{0} Nat Nat.addMonoid))) (AddZeroClass.toAdd.{u1} (Multiset.{u1} α) (AddMonoid.toAddZeroClass.{u1} (Multiset.{u1} α) (AddRightCancelMonoid.toAddMonoid.{u1} (Multiset.{u1} α) (AddCancelMonoid.toAddRightCancelMonoid.{u1} (Multiset.{u1} α) (AddCancelCommMonoid.toAddCancelMonoid.{u1} (Multiset.{u1} α) (OrderedCancelAddCommMonoid.toCancelAddCommMonoid.{u1} (Multiset.{u1} α) (Multiset.instOrderedCancelAddCommMonoidMultiset.{u1} α))))))) (AddMonoidHomClass.toAddHomClass.{u1, u1, u1} (AddEquiv.{u1, u1} (Finsupp.{u1, 0} α Nat (LinearOrderedCommMonoidWithZero.toZero.{0} Nat Nat.linearOrderedCommMonoidWithZero)) (Multiset.{u1} α) (Finsupp.add.{u1, 0} α Nat (AddMonoid.toAddZeroClass.{0} Nat Nat.addMonoid)) (Multiset.instAddMultiset.{u1} α)) (Finsupp.{u1, 0} α Nat (LinearOrderedCommMonoidWithZero.toZero.{0} Nat Nat.linearOrderedCommMonoidWithZero)) (Multiset.{u1} α) (Finsupp.addZeroClass.{u1, 0} α Nat (AddMonoid.toAddZeroClass.{0} Nat Nat.addMonoid)) (AddMonoid.toAddZeroClass.{u1} (Multiset.{u1} α) (AddRightCancelMonoid.toAddMonoid.{u1} (Multiset.{u1} α) (AddCancelMonoid.toAddRightCancelMonoid.{u1} (Multiset.{u1} α) (AddCancelCommMonoid.toAddCancelMonoid.{u1} (Multiset.{u1} α) (OrderedCancelAddCommMonoid.toCancelAddCommMonoid.{u1} (Multiset.{u1} α) (Multiset.instOrderedCancelAddCommMonoidMultiset.{u1} α)))))) (AddEquivClass.instAddMonoidHomClass.{u1, u1, u1} (AddEquiv.{u1, u1} (Finsupp.{u1, 0} α Nat (LinearOrderedCommMonoidWithZero.toZero.{0} Nat Nat.linearOrderedCommMonoidWithZero)) (Multiset.{u1} α) (Finsupp.add.{u1, 0} α Nat (AddMonoid.toAddZeroClass.{0} Nat Nat.addMonoid)) (Multiset.instAddMultiset.{u1} α)) (Finsupp.{u1, 0} α Nat (LinearOrderedCommMonoidWithZero.toZero.{0} Nat Nat.linearOrderedCommMonoidWithZero)) (Multiset.{u1} α) (Finsupp.addZeroClass.{u1, 0} α Nat (AddMonoid.toAddZeroClass.{0} Nat Nat.addMonoid)) (AddMonoid.toAddZeroClass.{u1} (Multiset.{u1} α) (AddRightCancelMonoid.toAddMonoid.{u1} (Multiset.{u1} α) (AddCancelMonoid.toAddRightCancelMonoid.{u1} (Multiset.{u1} α) (AddCancelCommMonoid.toAddCancelMonoid.{u1} (Multiset.{u1} α) (OrderedCancelAddCommMonoid.toCancelAddCommMonoid.{u1} (Multiset.{u1} α) (Multiset.instOrderedCancelAddCommMonoidMultiset.{u1} α)))))) (AddEquiv.instAddEquivClassAddEquiv.{u1, u1} (Finsupp.{u1, 0} α Nat (LinearOrderedCommMonoidWithZero.toZero.{0} Nat Nat.linearOrderedCommMonoidWithZero)) (Multiset.{u1} α) (Finsupp.add.{u1, 0} α Nat (AddMonoid.toAddZeroClass.{0} Nat Nat.addMonoid)) (Multiset.instAddMultiset.{u1} α))))) (Finsupp.toMultiset.{u1} α) (OfNat.ofNat.{u1} (Finsupp.{u1, 0} α Nat (LinearOrderedCommMonoidWithZero.toZero.{0} Nat Nat.linearOrderedCommMonoidWithZero)) 0 (Zero.toOfNat0.{u1} (Finsupp.{u1, 0} α Nat (LinearOrderedCommMonoidWithZero.toZero.{0} Nat Nat.linearOrderedCommMonoidWithZero)) (Finsupp.zero.{u1, 0} α Nat (LinearOrderedCommMonoidWithZero.toZero.{0} Nat Nat.linearOrderedCommMonoidWithZero))))) (OfNat.ofNat.{u1} ((fun ([email protected]._hyg.403 : Finsupp.{u1, 0} α Nat (LinearOrderedCommMonoidWithZero.toZero.{0} Nat Nat.linearOrderedCommMonoidWithZero)) => Multiset.{u1} α) (OfNat.ofNat.{u1} (Finsupp.{u1, 0} α Nat (LinearOrderedCommMonoidWithZero.toZero.{0} Nat Nat.linearOrderedCommMonoidWithZero)) 0 (Zero.toOfNat0.{u1} (Finsupp.{u1, 0} α Nat (LinearOrderedCommMonoidWithZero.toZero.{0} Nat Nat.linearOrderedCommMonoidWithZero)) (Finsupp.zero.{u1, 0} α Nat (LinearOrderedCommMonoidWithZero.toZero.{0} Nat Nat.linearOrderedCommMonoidWithZero))))) 0 (Zero.toOfNat0.{u1} ((fun ([email protected]._hyg.403 : Finsupp.{u1, 0} α Nat (LinearOrderedCommMonoidWithZero.toZero.{0} Nat Nat.linearOrderedCommMonoidWithZero)) => Multiset.{u1} α) (OfNat.ofNat.{u1} (Finsupp.{u1, 0} α Nat (LinearOrderedCommMonoidWithZero.toZero.{0} Nat Nat.linearOrderedCommMonoidWithZero)) 0 (Zero.toOfNat0.{u1} (Finsupp.{u1, 0} α Nat (LinearOrderedCommMonoidWithZero.toZero.{0} Nat Nat.linearOrderedCommMonoidWithZero)) (Finsupp.zero.{u1, 0} α Nat (LinearOrderedCommMonoidWithZero.toZero.{0} Nat Nat.linearOrderedCommMonoidWithZero))))) (Multiset.instZeroMultiset.{u1} α)))
Case conversion may be inaccurate. Consider using '#align finsupp.to_multiset_zero Finsupp.toMultiset_zeroₓ'. -/
theorem toMultiset_zero : (0 : α →₀ ℕ).toMultiset = 0 :=
rfl
#align finsupp.to_multiset_zero Finsupp.toMultiset_zero
/- warning: finsupp.to_multiset_add -> Finsupp.toMultiset_add is a dubious translation:
lean 3 declaration is
forall {α : Type.{u1}} (m : Finsupp.{u1, 0} α Nat Nat.hasZero) (n : Finsupp.{u1, 0} α Nat Nat.hasZero), Eq.{succ u1} (Multiset.{u1} α) (coeFn.{succ u1, succ u1} (AddEquiv.{u1, u1} (Finsupp.{u1, 0} α Nat Nat.hasZero) (Multiset.{u1} α) (Finsupp.add.{u1, 0} α Nat (AddMonoid.toAddZeroClass.{0} Nat Nat.addMonoid)) (Multiset.hasAdd.{u1} α)) (fun (_x : AddEquiv.{u1, u1} (Finsupp.{u1, 0} α Nat Nat.hasZero) (Multiset.{u1} α) (Finsupp.add.{u1, 0} α Nat (AddMonoid.toAddZeroClass.{0} Nat Nat.addMonoid)) (Multiset.hasAdd.{u1} α)) => (Finsupp.{u1, 0} α Nat Nat.hasZero) -> (Multiset.{u1} α)) (AddEquiv.hasCoeToFun.{u1, u1} (Finsupp.{u1, 0} α Nat Nat.hasZero) (Multiset.{u1} α) (Finsupp.add.{u1, 0} α Nat (AddMonoid.toAddZeroClass.{0} Nat Nat.addMonoid)) (Multiset.hasAdd.{u1} α)) (Finsupp.toMultiset.{u1} α) (HAdd.hAdd.{u1, u1, u1} (Finsupp.{u1, 0} α Nat Nat.hasZero) (Finsupp.{u1, 0} α Nat Nat.hasZero) (Finsupp.{u1, 0} α Nat Nat.hasZero) (instHAdd.{u1} (Finsupp.{u1, 0} α Nat Nat.hasZero) (Finsupp.add.{u1, 0} α Nat (AddMonoid.toAddZeroClass.{0} Nat Nat.addMonoid))) m n)) (HAdd.hAdd.{u1, u1, u1} (Multiset.{u1} α) (Multiset.{u1} α) (Multiset.{u1} α) (instHAdd.{u1} (Multiset.{u1} α) (Multiset.hasAdd.{u1} α)) (coeFn.{succ u1, succ u1} (AddEquiv.{u1, u1} (Finsupp.{u1, 0} α Nat Nat.hasZero) (Multiset.{u1} α) (Finsupp.add.{u1, 0} α Nat (AddMonoid.toAddZeroClass.{0} Nat Nat.addMonoid)) (Multiset.hasAdd.{u1} α)) (fun (_x : AddEquiv.{u1, u1} (Finsupp.{u1, 0} α Nat Nat.hasZero) (Multiset.{u1} α) (Finsupp.add.{u1, 0} α Nat (AddMonoid.toAddZeroClass.{0} Nat Nat.addMonoid)) (Multiset.hasAdd.{u1} α)) => (Finsupp.{u1, 0} α Nat Nat.hasZero) -> (Multiset.{u1} α)) (AddEquiv.hasCoeToFun.{u1, u1} (Finsupp.{u1, 0} α Nat Nat.hasZero) (Multiset.{u1} α) (Finsupp.add.{u1, 0} α Nat (AddMonoid.toAddZeroClass.{0} Nat Nat.addMonoid)) (Multiset.hasAdd.{u1} α)) (Finsupp.toMultiset.{u1} α) m) (coeFn.{succ u1, succ u1} (AddEquiv.{u1, u1} (Finsupp.{u1, 0} α Nat Nat.hasZero) (Multiset.{u1} α) (Finsupp.add.{u1, 0} α Nat (AddMonoid.toAddZeroClass.{0} Nat Nat.addMonoid)) (Multiset.hasAdd.{u1} α)) (fun (_x : AddEquiv.{u1, u1} (Finsupp.{u1, 0} α Nat Nat.hasZero) (Multiset.{u1} α) (Finsupp.add.{u1, 0} α Nat (AddMonoid.toAddZeroClass.{0} Nat Nat.addMonoid)) (Multiset.hasAdd.{u1} α)) => (Finsupp.{u1, 0} α Nat Nat.hasZero) -> (Multiset.{u1} α)) (AddEquiv.hasCoeToFun.{u1, u1} (Finsupp.{u1, 0} α Nat Nat.hasZero) (Multiset.{u1} α) (Finsupp.add.{u1, 0} α Nat (AddMonoid.toAddZeroClass.{0} Nat Nat.addMonoid)) (Multiset.hasAdd.{u1} α)) (Finsupp.toMultiset.{u1} α) n))
but is expected to have type
forall {α : Type.{u1}} (m : Finsupp.{u1, 0} α Nat (LinearOrderedCommMonoidWithZero.toZero.{0} Nat Nat.linearOrderedCommMonoidWithZero)) (n : Finsupp.{u1, 0} α Nat (LinearOrderedCommMonoidWithZero.toZero.{0} Nat Nat.linearOrderedCommMonoidWithZero)), Eq.{succ u1} ((fun ([email protected]._hyg.403 : Finsupp.{u1, 0} α Nat (LinearOrderedCommMonoidWithZero.toZero.{0} Nat Nat.linearOrderedCommMonoidWithZero)) => Multiset.{u1} α) (HAdd.hAdd.{u1, u1, u1} (Finsupp.{u1, 0} α Nat (LinearOrderedCommMonoidWithZero.toZero.{0} Nat Nat.linearOrderedCommMonoidWithZero)) (Finsupp.{u1, 0} α Nat (LinearOrderedCommMonoidWithZero.toZero.{0} Nat Nat.linearOrderedCommMonoidWithZero)) (Finsupp.{u1, 0} α Nat (LinearOrderedCommMonoidWithZero.toZero.{0} Nat Nat.linearOrderedCommMonoidWithZero)) (instHAdd.{u1} (Finsupp.{u1, 0} α Nat (LinearOrderedCommMonoidWithZero.toZero.{0} Nat Nat.linearOrderedCommMonoidWithZero)) (Finsupp.add.{u1, 0} α Nat (AddMonoid.toAddZeroClass.{0} Nat Nat.addMonoid))) m n)) (FunLike.coe.{succ u1, succ u1, succ u1} (AddEquiv.{u1, u1} (Finsupp.{u1, 0} α Nat (LinearOrderedCommMonoidWithZero.toZero.{0} Nat Nat.linearOrderedCommMonoidWithZero)) (Multiset.{u1} α) (Finsupp.add.{u1, 0} α Nat (AddMonoid.toAddZeroClass.{0} Nat Nat.addMonoid)) (Multiset.instAddMultiset.{u1} α)) (Finsupp.{u1, 0} α Nat (LinearOrderedCommMonoidWithZero.toZero.{0} Nat Nat.linearOrderedCommMonoidWithZero)) (fun (_x : Finsupp.{u1, 0} α Nat (LinearOrderedCommMonoidWithZero.toZero.{0} Nat Nat.linearOrderedCommMonoidWithZero)) => (fun ([email protected]._hyg.403 : Finsupp.{u1, 0} α Nat (LinearOrderedCommMonoidWithZero.toZero.{0} Nat Nat.linearOrderedCommMonoidWithZero)) => Multiset.{u1} α) _x) (AddHomClass.toFunLike.{u1, u1, u1} (AddEquiv.{u1, u1} (Finsupp.{u1, 0} α Nat (LinearOrderedCommMonoidWithZero.toZero.{0} Nat Nat.linearOrderedCommMonoidWithZero)) (Multiset.{u1} α) (Finsupp.add.{u1, 0} α Nat (AddMonoid.toAddZeroClass.{0} Nat Nat.addMonoid)) (Multiset.instAddMultiset.{u1} α)) (Finsupp.{u1, 0} α Nat (LinearOrderedCommMonoidWithZero.toZero.{0} Nat Nat.linearOrderedCommMonoidWithZero)) (Multiset.{u1} α) (AddZeroClass.toAdd.{u1} (Finsupp.{u1, 0} α Nat (LinearOrderedCommMonoidWithZero.toZero.{0} Nat Nat.linearOrderedCommMonoidWithZero)) (Finsupp.addZeroClass.{u1, 0} α Nat (AddMonoid.toAddZeroClass.{0} Nat Nat.addMonoid))) (AddZeroClass.toAdd.{u1} (Multiset.{u1} α) (AddMonoid.toAddZeroClass.{u1} (Multiset.{u1} α) (AddRightCancelMonoid.toAddMonoid.{u1} (Multiset.{u1} α) (AddCancelMonoid.toAddRightCancelMonoid.{u1} (Multiset.{u1} α) (AddCancelCommMonoid.toAddCancelMonoid.{u1} (Multiset.{u1} α) (OrderedCancelAddCommMonoid.toCancelAddCommMonoid.{u1} (Multiset.{u1} α) (Multiset.instOrderedCancelAddCommMonoidMultiset.{u1} α))))))) (AddMonoidHomClass.toAddHomClass.{u1, u1, u1} (AddEquiv.{u1, u1} (Finsupp.{u1, 0} α Nat (LinearOrderedCommMonoidWithZero.toZero.{0} Nat Nat.linearOrderedCommMonoidWithZero)) (Multiset.{u1} α) (Finsupp.add.{u1, 0} α Nat (AddMonoid.toAddZeroClass.{0} Nat Nat.addMonoid)) (Multiset.instAddMultiset.{u1} α)) (Finsupp.{u1, 0} α Nat (LinearOrderedCommMonoidWithZero.toZero.{0} Nat Nat.linearOrderedCommMonoidWithZero)) (Multiset.{u1} α) (Finsupp.addZeroClass.{u1, 0} α Nat (AddMonoid.toAddZeroClass.{0} Nat Nat.addMonoid)) (AddMonoid.toAddZeroClass.{u1} (Multiset.{u1} α) (AddRightCancelMonoid.toAddMonoid.{u1} (Multiset.{u1} α) (AddCancelMonoid.toAddRightCancelMonoid.{u1} (Multiset.{u1} α) (AddCancelCommMonoid.toAddCancelMonoid.{u1} (Multiset.{u1} α) (OrderedCancelAddCommMonoid.toCancelAddCommMonoid.{u1} (Multiset.{u1} α) (Multiset.instOrderedCancelAddCommMonoidMultiset.{u1} α)))))) (AddEquivClass.instAddMonoidHomClass.{u1, u1, u1} (AddEquiv.{u1, u1} (Finsupp.{u1, 0} α Nat (LinearOrderedCommMonoidWithZero.toZero.{0} Nat Nat.linearOrderedCommMonoidWithZero)) (Multiset.{u1} α) (Finsupp.add.{u1, 0} α Nat (AddMonoid.toAddZeroClass.{0} Nat Nat.addMonoid)) (Multiset.instAddMultiset.{u1} α)) (Finsupp.{u1, 0} α Nat (LinearOrderedCommMonoidWithZero.toZero.{0} Nat Nat.linearOrderedCommMonoidWithZero)) (Multiset.{u1} α) (Finsupp.addZeroClass.{u1, 0} α Nat (AddMonoid.toAddZeroClass.{0} Nat Nat.addMonoid)) (AddMonoid.toAddZeroClass.{u1} (Multiset.{u1} α) (AddRightCancelMonoid.toAddMonoid.{u1} (Multiset.{u1} α) (AddCancelMonoid.toAddRightCancelMonoid.{u1} (Multiset.{u1} α) (AddCancelCommMonoid.toAddCancelMonoid.{u1} (Multiset.{u1} α) (OrderedCancelAddCommMonoid.toCancelAddCommMonoid.{u1} (Multiset.{u1} α) (Multiset.instOrderedCancelAddCommMonoidMultiset.{u1} α)))))) (AddEquiv.instAddEquivClassAddEquiv.{u1, u1} (Finsupp.{u1, 0} α Nat (LinearOrderedCommMonoidWithZero.toZero.{0} Nat Nat.linearOrderedCommMonoidWithZero)) (Multiset.{u1} α) (Finsupp.add.{u1, 0} α Nat (AddMonoid.toAddZeroClass.{0} Nat Nat.addMonoid)) (Multiset.instAddMultiset.{u1} α))))) (Finsupp.toMultiset.{u1} α) (HAdd.hAdd.{u1, u1, u1} (Finsupp.{u1, 0} α Nat (LinearOrderedCommMonoidWithZero.toZero.{0} Nat Nat.linearOrderedCommMonoidWithZero)) (Finsupp.{u1, 0} α Nat (LinearOrderedCommMonoidWithZero.toZero.{0} Nat Nat.linearOrderedCommMonoidWithZero)) (Finsupp.{u1, 0} α Nat (LinearOrderedCommMonoidWithZero.toZero.{0} Nat Nat.linearOrderedCommMonoidWithZero)) (instHAdd.{u1} (Finsupp.{u1, 0} α Nat (LinearOrderedCommMonoidWithZero.toZero.{0} Nat Nat.linearOrderedCommMonoidWithZero)) (Finsupp.add.{u1, 0} α Nat (AddMonoid.toAddZeroClass.{0} Nat Nat.addMonoid))) m n)) (HAdd.hAdd.{u1, u1, u1} ((fun ([email protected]._hyg.403 : Finsupp.{u1, 0} α Nat (LinearOrderedCommMonoidWithZero.toZero.{0} Nat Nat.linearOrderedCommMonoidWithZero)) => Multiset.{u1} α) m) ((fun ([email protected]._hyg.403 : Finsupp.{u1, 0} α Nat (LinearOrderedCommMonoidWithZero.toZero.{0} Nat Nat.linearOrderedCommMonoidWithZero)) => Multiset.{u1} α) n) ((fun ([email protected]._hyg.403 : Finsupp.{u1, 0} α Nat (LinearOrderedCommMonoidWithZero.toZero.{0} Nat Nat.linearOrderedCommMonoidWithZero)) => Multiset.{u1} α) m) (instHAdd.{u1} ((fun ([email protected]._hyg.403 : Finsupp.{u1, 0} α Nat (LinearOrderedCommMonoidWithZero.toZero.{0} Nat Nat.linearOrderedCommMonoidWithZero)) => Multiset.{u1} α) m) (Multiset.instAddMultiset.{u1} α)) (FunLike.coe.{succ u1, succ u1, succ u1} (AddEquiv.{u1, u1} (Finsupp.{u1, 0} α Nat (LinearOrderedCommMonoidWithZero.toZero.{0} Nat Nat.linearOrderedCommMonoidWithZero)) (Multiset.{u1} α) (Finsupp.add.{u1, 0} α Nat (AddMonoid.toAddZeroClass.{0} Nat Nat.addMonoid)) (Multiset.instAddMultiset.{u1} α)) (Finsupp.{u1, 0} α Nat (LinearOrderedCommMonoidWithZero.toZero.{0} Nat Nat.linearOrderedCommMonoidWithZero)) (fun (_x : Finsupp.{u1, 0} α Nat (LinearOrderedCommMonoidWithZero.toZero.{0} Nat Nat.linearOrderedCommMonoidWithZero)) => (fun ([email protected]._hyg.403 : Finsupp.{u1, 0} α Nat (LinearOrderedCommMonoidWithZero.toZero.{0} Nat Nat.linearOrderedCommMonoidWithZero)) => Multiset.{u1} α) _x) (AddHomClass.toFunLike.{u1, u1, u1} (AddEquiv.{u1, u1} (Finsupp.{u1, 0} α Nat (LinearOrderedCommMonoidWithZero.toZero.{0} Nat Nat.linearOrderedCommMonoidWithZero)) (Multiset.{u1} α) (Finsupp.add.{u1, 0} α Nat (AddMonoid.toAddZeroClass.{0} Nat Nat.addMonoid)) (Multiset.instAddMultiset.{u1} α)) (Finsupp.{u1, 0} α Nat (LinearOrderedCommMonoidWithZero.toZero.{0} Nat Nat.linearOrderedCommMonoidWithZero)) (Multiset.{u1} α) (AddZeroClass.toAdd.{u1} (Finsupp.{u1, 0} α Nat (LinearOrderedCommMonoidWithZero.toZero.{0} Nat Nat.linearOrderedCommMonoidWithZero)) (Finsupp.addZeroClass.{u1, 0} α Nat (AddMonoid.toAddZeroClass.{0} Nat Nat.addMonoid))) (AddZeroClass.toAdd.{u1} (Multiset.{u1} α) (AddMonoid.toAddZeroClass.{u1} (Multiset.{u1} α) (AddRightCancelMonoid.toAddMonoid.{u1} (Multiset.{u1} α) (AddCancelMonoid.toAddRightCancelMonoid.{u1} (Multiset.{u1} α) (AddCancelCommMonoid.toAddCancelMonoid.{u1} (Multiset.{u1} α) (OrderedCancelAddCommMonoid.toCancelAddCommMonoid.{u1} (Multiset.{u1} α) (Multiset.instOrderedCancelAddCommMonoidMultiset.{u1} α))))))) (AddMonoidHomClass.toAddHomClass.{u1, u1, u1} (AddEquiv.{u1, u1} (Finsupp.{u1, 0} α Nat (LinearOrderedCommMonoidWithZero.toZero.{0} Nat Nat.linearOrderedCommMonoidWithZero)) (Multiset.{u1} α) (Finsupp.add.{u1, 0} α Nat (AddMonoid.toAddZeroClass.{0} Nat Nat.addMonoid)) (Multiset.instAddMultiset.{u1} α)) (Finsupp.{u1, 0} α Nat (LinearOrderedCommMonoidWithZero.toZero.{0} Nat Nat.linearOrderedCommMonoidWithZero)) (Multiset.{u1} α) (Finsupp.addZeroClass.{u1, 0} α Nat (AddMonoid.toAddZeroClass.{0} Nat Nat.addMonoid)) (AddMonoid.toAddZeroClass.{u1} (Multiset.{u1} α) (AddRightCancelMonoid.toAddMonoid.{u1} (Multiset.{u1} α) (AddCancelMonoid.toAddRightCancelMonoid.{u1} (Multiset.{u1} α) (AddCancelCommMonoid.toAddCancelMonoid.{u1} (Multiset.{u1} α) (OrderedCancelAddCommMonoid.toCancelAddCommMonoid.{u1} (Multiset.{u1} α) (Multiset.instOrderedCancelAddCommMonoidMultiset.{u1} α)))))) (AddEquivClass.instAddMonoidHomClass.{u1, u1, u1} (AddEquiv.{u1, u1} (Finsupp.{u1, 0} α Nat (LinearOrderedCommMonoidWithZero.toZero.{0} Nat Nat.linearOrderedCommMonoidWithZero)) (Multiset.{u1} α) (Finsupp.add.{u1, 0} α Nat (AddMonoid.toAddZeroClass.{0} Nat Nat.addMonoid)) (Multiset.instAddMultiset.{u1} α)) (Finsupp.{u1, 0} α Nat (LinearOrderedCommMonoidWithZero.toZero.{0} Nat Nat.linearOrderedCommMonoidWithZero)) (Multiset.{u1} α) (Finsupp.addZeroClass.{u1, 0} α Nat (AddMonoid.toAddZeroClass.{0} Nat Nat.addMonoid)) (AddMonoid.toAddZeroClass.{u1} (Multiset.{u1} α) (AddRightCancelMonoid.toAddMonoid.{u1} (Multiset.{u1} α) (AddCancelMonoid.toAddRightCancelMonoid.{u1} (Multiset.{u1} α) (AddCancelCommMonoid.toAddCancelMonoid.{u1} (Multiset.{u1} α) (OrderedCancelAddCommMonoid.toCancelAddCommMonoid.{u1} (Multiset.{u1} α) (Multiset.instOrderedCancelAddCommMonoidMultiset.{u1} α)))))) (AddEquiv.instAddEquivClassAddEquiv.{u1, u1} (Finsupp.{u1, 0} α Nat (LinearOrderedCommMonoidWithZero.toZero.{0} Nat Nat.linearOrderedCommMonoidWithZero)) (Multiset.{u1} α) (Finsupp.add.{u1, 0} α Nat (AddMonoid.toAddZeroClass.{0} Nat Nat.addMonoid)) (Multiset.instAddMultiset.{u1} α))))) (Finsupp.toMultiset.{u1} α) m) (FunLike.coe.{succ u1, succ u1, succ u1} (AddEquiv.{u1, u1} (Finsupp.{u1, 0} α Nat (LinearOrderedCommMonoidWithZero.toZero.{0} Nat Nat.linearOrderedCommMonoidWithZero)) (Multiset.{u1} α) (Finsupp.add.{u1, 0} α Nat (AddMonoid.toAddZeroClass.{0} Nat Nat.addMonoid)) (Multiset.instAddMultiset.{u1} α)) (Finsupp.{u1, 0} α Nat (LinearOrderedCommMonoidWithZero.toZero.{0} Nat Nat.linearOrderedCommMonoidWithZero)) (fun (_x : Finsupp.{u1, 0} α Nat (LinearOrderedCommMonoidWithZero.toZero.{0} Nat Nat.linearOrderedCommMonoidWithZero)) => (fun ([email protected]._hyg.403 : Finsupp.{u1, 0} α Nat (LinearOrderedCommMonoidWithZero.toZero.{0} Nat Nat.linearOrderedCommMonoidWithZero)) => Multiset.{u1} α) _x) (AddHomClass.toFunLike.{u1, u1, u1} (AddEquiv.{u1, u1} (Finsupp.{u1, 0} α Nat (LinearOrderedCommMonoidWithZero.toZero.{0} Nat Nat.linearOrderedCommMonoidWithZero)) (Multiset.{u1} α) (Finsupp.add.{u1, 0} α Nat (AddMonoid.toAddZeroClass.{0} Nat Nat.addMonoid)) (Multiset.instAddMultiset.{u1} α)) (Finsupp.{u1, 0} α Nat (LinearOrderedCommMonoidWithZero.toZero.{0} Nat Nat.linearOrderedCommMonoidWithZero)) (Multiset.{u1} α) (AddZeroClass.toAdd.{u1} (Finsupp.{u1, 0} α Nat (LinearOrderedCommMonoidWithZero.toZero.{0} Nat Nat.linearOrderedCommMonoidWithZero)) (Finsupp.addZeroClass.{u1, 0} α Nat (AddMonoid.toAddZeroClass.{0} Nat Nat.addMonoid))) (AddZeroClass.toAdd.{u1} (Multiset.{u1} α) (AddMonoid.toAddZeroClass.{u1} (Multiset.{u1} α) (AddRightCancelMonoid.toAddMonoid.{u1} (Multiset.{u1} α) (AddCancelMonoid.toAddRightCancelMonoid.{u1} (Multiset.{u1} α) (AddCancelCommMonoid.toAddCancelMonoid.{u1} (Multiset.{u1} α) (OrderedCancelAddCommMonoid.toCancelAddCommMonoid.{u1} (Multiset.{u1} α) (Multiset.instOrderedCancelAddCommMonoidMultiset.{u1} α))))))) (AddMonoidHomClass.toAddHomClass.{u1, u1, u1} (AddEquiv.{u1, u1} (Finsupp.{u1, 0} α Nat (LinearOrderedCommMonoidWithZero.toZero.{0} Nat Nat.linearOrderedCommMonoidWithZero)) (Multiset.{u1} α) (Finsupp.add.{u1, 0} α Nat (AddMonoid.toAddZeroClass.{0} Nat Nat.addMonoid)) (Multiset.instAddMultiset.{u1} α)) (Finsupp.{u1, 0} α Nat (LinearOrderedCommMonoidWithZero.toZero.{0} Nat Nat.linearOrderedCommMonoidWithZero)) (Multiset.{u1} α) (Finsupp.addZeroClass.{u1, 0} α Nat (AddMonoid.toAddZeroClass.{0} Nat Nat.addMonoid)) (AddMonoid.toAddZeroClass.{u1} (Multiset.{u1} α) (AddRightCancelMonoid.toAddMonoid.{u1} (Multiset.{u1} α) (AddCancelMonoid.toAddRightCancelMonoid.{u1} (Multiset.{u1} α) (AddCancelCommMonoid.toAddCancelMonoid.{u1} (Multiset.{u1} α) (OrderedCancelAddCommMonoid.toCancelAddCommMonoid.{u1} (Multiset.{u1} α) (Multiset.instOrderedCancelAddCommMonoidMultiset.{u1} α)))))) (AddEquivClass.instAddMonoidHomClass.{u1, u1, u1} (AddEquiv.{u1, u1} (Finsupp.{u1, 0} α Nat (LinearOrderedCommMonoidWithZero.toZero.{0} Nat Nat.linearOrderedCommMonoidWithZero)) (Multiset.{u1} α) (Finsupp.add.{u1, 0} α Nat (AddMonoid.toAddZeroClass.{0} Nat Nat.addMonoid)) (Multiset.instAddMultiset.{u1} α)) (Finsupp.{u1, 0} α Nat (LinearOrderedCommMonoidWithZero.toZero.{0} Nat Nat.linearOrderedCommMonoidWithZero)) (Multiset.{u1} α) (Finsupp.addZeroClass.{u1, 0} α Nat (AddMonoid.toAddZeroClass.{0} Nat Nat.addMonoid)) (AddMonoid.toAddZeroClass.{u1} (Multiset.{u1} α) (AddRightCancelMonoid.toAddMonoid.{u1} (Multiset.{u1} α) (AddCancelMonoid.toAddRightCancelMonoid.{u1} (Multiset.{u1} α) (AddCancelCommMonoid.toAddCancelMonoid.{u1} (Multiset.{u1} α) (OrderedCancelAddCommMonoid.toCancelAddCommMonoid.{u1} (Multiset.{u1} α) (Multiset.instOrderedCancelAddCommMonoidMultiset.{u1} α)))))) (AddEquiv.instAddEquivClassAddEquiv.{u1, u1} (Finsupp.{u1, 0} α Nat (LinearOrderedCommMonoidWithZero.toZero.{0} Nat Nat.linearOrderedCommMonoidWithZero)) (Multiset.{u1} α) (Finsupp.add.{u1, 0} α Nat (AddMonoid.toAddZeroClass.{0} Nat Nat.addMonoid)) (Multiset.instAddMultiset.{u1} α))))) (Finsupp.toMultiset.{u1} α) n))
Case conversion may be inaccurate. Consider using '#align finsupp.to_multiset_add Finsupp.toMultiset_addₓ'. -/
theorem toMultiset_add (m n : α →₀ ℕ) : (m + n).toMultiset = m.toMultiset + n.toMultiset :=
toMultiset.map_add m n
#align finsupp.to_multiset_add Finsupp.toMultiset_add
/- warning: finsupp.to_multiset_apply -> Finsupp.toMultiset_apply is a dubious translation:
lean 3 declaration is
forall {α : Type.{u1}} (f : Finsupp.{u1, 0} α Nat Nat.hasZero), Eq.{succ u1} (Multiset.{u1} α) (coeFn.{succ u1, succ u1} (AddEquiv.{u1, u1} (Finsupp.{u1, 0} α Nat Nat.hasZero) (Multiset.{u1} α) (Finsupp.add.{u1, 0} α Nat (AddMonoid.toAddZeroClass.{0} Nat Nat.addMonoid)) (Multiset.hasAdd.{u1} α)) (fun (_x : AddEquiv.{u1, u1} (Finsupp.{u1, 0} α Nat Nat.hasZero) (Multiset.{u1} α) (Finsupp.add.{u1, 0} α Nat (AddMonoid.toAddZeroClass.{0} Nat Nat.addMonoid)) (Multiset.hasAdd.{u1} α)) => (Finsupp.{u1, 0} α Nat Nat.hasZero) -> (Multiset.{u1} α)) (AddEquiv.hasCoeToFun.{u1, u1} (Finsupp.{u1, 0} α Nat Nat.hasZero) (Multiset.{u1} α) (Finsupp.add.{u1, 0} α Nat (AddMonoid.toAddZeroClass.{0} Nat Nat.addMonoid)) (Multiset.hasAdd.{u1} α)) (Finsupp.toMultiset.{u1} α) f) (Finsupp.sum.{u1, 0, u1} α Nat (Multiset.{u1} α) Nat.hasZero (OrderedCancelAddCommMonoid.toAddCommMonoid.{u1} (Multiset.{u1} α) (Multiset.orderedCancelAddCommMonoid.{u1} α)) f (fun (a : α) (n : Nat) => SMul.smul.{0, u1} Nat (Multiset.{u1} α) (AddMonoid.SMul.{u1} (Multiset.{u1} α) (AddRightCancelMonoid.toAddMonoid.{u1} (Multiset.{u1} α) (AddCancelMonoid.toAddRightCancelMonoid.{u1} (Multiset.{u1} α) (AddCancelCommMonoid.toAddCancelMonoid.{u1} (Multiset.{u1} α) (OrderedCancelAddCommMonoid.toCancelAddCommMonoid.{u1} (Multiset.{u1} α) (Multiset.orderedCancelAddCommMonoid.{u1} α)))))) n (Singleton.singleton.{u1, u1} α (Multiset.{u1} α) (Multiset.hasSingleton.{u1} α) a)))
but is expected to have type
forall {α : Type.{u1}} (f : Finsupp.{u1, 0} α Nat (LinearOrderedCommMonoidWithZero.toZero.{0} Nat Nat.linearOrderedCommMonoidWithZero)), Eq.{succ u1} ((fun ([email protected]._hyg.403 : Finsupp.{u1, 0} α Nat (LinearOrderedCommMonoidWithZero.toZero.{0} Nat Nat.linearOrderedCommMonoidWithZero)) => Multiset.{u1} α) f) (FunLike.coe.{succ u1, succ u1, succ u1} (AddEquiv.{u1, u1} (Finsupp.{u1, 0} α Nat (LinearOrderedCommMonoidWithZero.toZero.{0} Nat Nat.linearOrderedCommMonoidWithZero)) (Multiset.{u1} α) (Finsupp.add.{u1, 0} α Nat (AddMonoid.toAddZeroClass.{0} Nat Nat.addMonoid)) (Multiset.instAddMultiset.{u1} α)) (Finsupp.{u1, 0} α Nat (LinearOrderedCommMonoidWithZero.toZero.{0} Nat Nat.linearOrderedCommMonoidWithZero)) (fun (_x : Finsupp.{u1, 0} α Nat (LinearOrderedCommMonoidWithZero.toZero.{0} Nat Nat.linearOrderedCommMonoidWithZero)) => (fun ([email protected]._hyg.403 : Finsupp.{u1, 0} α Nat (LinearOrderedCommMonoidWithZero.toZero.{0} Nat Nat.linearOrderedCommMonoidWithZero)) => Multiset.{u1} α) _x) (AddHomClass.toFunLike.{u1, u1, u1} (AddEquiv.{u1, u1} (Finsupp.{u1, 0} α Nat (LinearOrderedCommMonoidWithZero.toZero.{0} Nat Nat.linearOrderedCommMonoidWithZero)) (Multiset.{u1} α) (Finsupp.add.{u1, 0} α Nat (AddMonoid.toAddZeroClass.{0} Nat Nat.addMonoid)) (Multiset.instAddMultiset.{u1} α)) (Finsupp.{u1, 0} α Nat (LinearOrderedCommMonoidWithZero.toZero.{0} Nat Nat.linearOrderedCommMonoidWithZero)) (Multiset.{u1} α) (AddZeroClass.toAdd.{u1} (Finsupp.{u1, 0} α Nat (LinearOrderedCommMonoidWithZero.toZero.{0} Nat Nat.linearOrderedCommMonoidWithZero)) (Finsupp.addZeroClass.{u1, 0} α Nat (AddMonoid.toAddZeroClass.{0} Nat Nat.addMonoid))) (AddZeroClass.toAdd.{u1} (Multiset.{u1} α) (AddMonoid.toAddZeroClass.{u1} (Multiset.{u1} α) (AddRightCancelMonoid.toAddMonoid.{u1} (Multiset.{u1} α) (AddCancelMonoid.toAddRightCancelMonoid.{u1} (Multiset.{u1} α) (AddCancelCommMonoid.toAddCancelMonoid.{u1} (Multiset.{u1} α) (OrderedCancelAddCommMonoid.toCancelAddCommMonoid.{u1} (Multiset.{u1} α) (Multiset.instOrderedCancelAddCommMonoidMultiset.{u1} α))))))) (AddMonoidHomClass.toAddHomClass.{u1, u1, u1} (AddEquiv.{u1, u1} (Finsupp.{u1, 0} α Nat (LinearOrderedCommMonoidWithZero.toZero.{0} Nat Nat.linearOrderedCommMonoidWithZero)) (Multiset.{u1} α) (Finsupp.add.{u1, 0} α Nat (AddMonoid.toAddZeroClass.{0} Nat Nat.addMonoid)) (Multiset.instAddMultiset.{u1} α)) (Finsupp.{u1, 0} α Nat (LinearOrderedCommMonoidWithZero.toZero.{0} Nat Nat.linearOrderedCommMonoidWithZero)) (Multiset.{u1} α) (Finsupp.addZeroClass.{u1, 0} α Nat (AddMonoid.toAddZeroClass.{0} Nat Nat.addMonoid)) (AddMonoid.toAddZeroClass.{u1} (Multiset.{u1} α) (AddRightCancelMonoid.toAddMonoid.{u1} (Multiset.{u1} α) (AddCancelMonoid.toAddRightCancelMonoid.{u1} (Multiset.{u1} α) (AddCancelCommMonoid.toAddCancelMonoid.{u1} (Multiset.{u1} α) (OrderedCancelAddCommMonoid.toCancelAddCommMonoid.{u1} (Multiset.{u1} α) (Multiset.instOrderedCancelAddCommMonoidMultiset.{u1} α)))))) (AddEquivClass.instAddMonoidHomClass.{u1, u1, u1} (AddEquiv.{u1, u1} (Finsupp.{u1, 0} α Nat (LinearOrderedCommMonoidWithZero.toZero.{0} Nat Nat.linearOrderedCommMonoidWithZero)) (Multiset.{u1} α) (Finsupp.add.{u1, 0} α Nat (AddMonoid.toAddZeroClass.{0} Nat Nat.addMonoid)) (Multiset.instAddMultiset.{u1} α)) (Finsupp.{u1, 0} α Nat (LinearOrderedCommMonoidWithZero.toZero.{0} Nat Nat.linearOrderedCommMonoidWithZero)) (Multiset.{u1} α) (Finsupp.addZeroClass.{u1, 0} α Nat (AddMonoid.toAddZeroClass.{0} Nat Nat.addMonoid)) (AddMonoid.toAddZeroClass.{u1} (Multiset.{u1} α) (AddRightCancelMonoid.toAddMonoid.{u1} (Multiset.{u1} α) (AddCancelMonoid.toAddRightCancelMonoid.{u1} (Multiset.{u1} α) (AddCancelCommMonoid.toAddCancelMonoid.{u1} (Multiset.{u1} α) (OrderedCancelAddCommMonoid.toCancelAddCommMonoid.{u1} (Multiset.{u1} α) (Multiset.instOrderedCancelAddCommMonoidMultiset.{u1} α)))))) (AddEquiv.instAddEquivClassAddEquiv.{u1, u1} (Finsupp.{u1, 0} α Nat (LinearOrderedCommMonoidWithZero.toZero.{0} Nat Nat.linearOrderedCommMonoidWithZero)) (Multiset.{u1} α) (Finsupp.add.{u1, 0} α Nat (AddMonoid.toAddZeroClass.{0} Nat Nat.addMonoid)) (Multiset.instAddMultiset.{u1} α))))) (Finsupp.toMultiset.{u1} α) f) (Finsupp.sum.{u1, 0, u1} α Nat ((fun ([email protected]._hyg.403 : Finsupp.{u1, 0} α Nat (LinearOrderedCommMonoidWithZero.toZero.{0} Nat Nat.linearOrderedCommMonoidWithZero)) => Multiset.{u1} α) f) (LinearOrderedCommMonoidWithZero.toZero.{0} Nat Nat.linearOrderedCommMonoidWithZero) (OrderedCancelAddCommMonoid.toAddCommMonoid.{u1} ((fun ([email protected]._hyg.403 : Finsupp.{u1, 0} α Nat (LinearOrderedCommMonoidWithZero.toZero.{0} Nat Nat.linearOrderedCommMonoidWithZero)) => Multiset.{u1} α) f) (Multiset.instOrderedCancelAddCommMonoidMultiset.{u1} α)) f (fun (a : α) (n : Nat) => HSMul.hSMul.{0, u1, u1} Nat ((fun ([email protected]._hyg.403 : Finsupp.{u1, 0} α Nat (LinearOrderedCommMonoidWithZero.toZero.{0} Nat Nat.linearOrderedCommMonoidWithZero)) => Multiset.{u1} α) f) ((fun ([email protected]._hyg.403 : Finsupp.{u1, 0} α Nat (LinearOrderedCommMonoidWithZero.toZero.{0} Nat Nat.linearOrderedCommMonoidWithZero)) => Multiset.{u1} α) f) (instHSMul.{0, u1} Nat ((fun ([email protected]._hyg.403 : Finsupp.{u1, 0} α Nat (LinearOrderedCommMonoidWithZero.toZero.{0} Nat Nat.linearOrderedCommMonoidWithZero)) => Multiset.{u1} α) f) (AddMonoid.SMul.{u1} ((fun ([email protected]._hyg.403 : Finsupp.{u1, 0} α Nat (LinearOrderedCommMonoidWithZero.toZero.{0} Nat Nat.linearOrderedCommMonoidWithZero)) => Multiset.{u1} α) f) (AddRightCancelMonoid.toAddMonoid.{u1} ((fun ([email protected]._hyg.403 : Finsupp.{u1, 0} α Nat (LinearOrderedCommMonoidWithZero.toZero.{0} Nat Nat.linearOrderedCommMonoidWithZero)) => Multiset.{u1} α) f) (AddCancelMonoid.toAddRightCancelMonoid.{u1} ((fun ([email protected]._hyg.403 : Finsupp.{u1, 0} α Nat (LinearOrderedCommMonoidWithZero.toZero.{0} Nat Nat.linearOrderedCommMonoidWithZero)) => Multiset.{u1} α) f) (AddCancelCommMonoid.toAddCancelMonoid.{u1} ((fun ([email protected]._hyg.403 : Finsupp.{u1, 0} α Nat (LinearOrderedCommMonoidWithZero.toZero.{0} Nat Nat.linearOrderedCommMonoidWithZero)) => Multiset.{u1} α) f) (OrderedCancelAddCommMonoid.toCancelAddCommMonoid.{u1} ((fun ([email protected]._hyg.403 : Finsupp.{u1, 0} α Nat (LinearOrderedCommMonoidWithZero.toZero.{0} Nat Nat.linearOrderedCommMonoidWithZero)) => Multiset.{u1} α) f) (Multiset.instOrderedCancelAddCommMonoidMultiset.{u1} α))))))) n (Singleton.singleton.{u1, u1} α ((fun ([email protected]._hyg.403 : Finsupp.{u1, 0} α Nat (LinearOrderedCommMonoidWithZero.toZero.{0} Nat Nat.linearOrderedCommMonoidWithZero)) => Multiset.{u1} α) f) (Multiset.instSingletonMultiset.{u1} α) a)))
Case conversion may be inaccurate. Consider using '#align finsupp.to_multiset_apply Finsupp.toMultiset_applyₓ'. -/
theorem toMultiset_apply (f : α →₀ ℕ) : f.toMultiset = f.Sum fun a n => n • {a} :=
rfl
#align finsupp.to_multiset_apply Finsupp.toMultiset_apply
/- warning: finsupp.to_multiset_symm_apply -> Finsupp.toMultiset_symm_apply is a dubious translation:
lean 3 declaration is
forall {α : Type.{u1}} [_inst_1 : DecidableEq.{succ u1} α] (s : Multiset.{u1} α) (x : α), Eq.{1} Nat (coeFn.{succ u1, succ u1} (Finsupp.{u1, 0} α Nat Nat.hasZero) (fun (_x : Finsupp.{u1, 0} α Nat Nat.hasZero) => α -> Nat) (Finsupp.coeFun.{u1, 0} α Nat Nat.hasZero) (coeFn.{succ u1, succ u1} (AddEquiv.{u1, u1} (Multiset.{u1} α) (Finsupp.{u1, 0} α Nat Nat.hasZero) (Multiset.hasAdd.{u1} α) (Finsupp.add.{u1, 0} α Nat (AddMonoid.toAddZeroClass.{0} Nat Nat.addMonoid))) (fun (_x : AddEquiv.{u1, u1} (Multiset.{u1} α) (Finsupp.{u1, 0} α Nat Nat.hasZero) (Multiset.hasAdd.{u1} α) (Finsupp.add.{u1, 0} α Nat (AddMonoid.toAddZeroClass.{0} Nat Nat.addMonoid))) => (Multiset.{u1} α) -> (Finsupp.{u1, 0} α Nat Nat.hasZero)) (AddEquiv.hasCoeToFun.{u1, u1} (Multiset.{u1} α) (Finsupp.{u1, 0} α Nat Nat.hasZero) (Multiset.hasAdd.{u1} α) (Finsupp.add.{u1, 0} α Nat (AddMonoid.toAddZeroClass.{0} Nat Nat.addMonoid))) (AddEquiv.symm.{u1, u1} (Finsupp.{u1, 0} α Nat Nat.hasZero) (Multiset.{u1} α) (Finsupp.add.{u1, 0} α Nat (AddMonoid.toAddZeroClass.{0} Nat Nat.addMonoid)) (Multiset.hasAdd.{u1} α) (Finsupp.toMultiset.{u1} α)) s) x) (Multiset.count.{u1} α (fun (a : α) (b : α) => _inst_1 a b) x s)
but is expected to have type
forall {α : Type.{u1}} [_inst_1 : DecidableEq.{succ u1} α] (s : Multiset.{u1} α) (x : α), Eq.{1} ((fun ([email protected]._hyg.779 : α) => Nat) x) (FunLike.coe.{succ u1, succ u1, 1} (Finsupp.{u1, 0} α Nat (LinearOrderedCommMonoidWithZero.toZero.{0} Nat Nat.linearOrderedCommMonoidWithZero)) α (fun (_x : α) => (fun ([email protected]._hyg.779 : α) => Nat) _x) (Finsupp.funLike.{u1, 0} α Nat (LinearOrderedCommMonoidWithZero.toZero.{0} Nat Nat.linearOrderedCommMonoidWithZero)) (FunLike.coe.{succ u1, succ u1, succ u1} (AddEquiv.{u1, u1} (Multiset.{u1} α) (Finsupp.{u1, 0} α Nat (LinearOrderedCommMonoidWithZero.toZero.{0} Nat Nat.linearOrderedCommMonoidWithZero)) (Multiset.instAddMultiset.{u1} α) (Finsupp.add.{u1, 0} α Nat (AddMonoid.toAddZeroClass.{0} Nat Nat.addMonoid))) (Multiset.{u1} α) (fun (_x : Multiset.{u1} α) => (fun ([email protected]._hyg.403 : Multiset.{u1} α) => Finsupp.{u1, 0} α Nat (LinearOrderedCommMonoidWithZero.toZero.{0} Nat Nat.linearOrderedCommMonoidWithZero)) _x) (AddHomClass.toFunLike.{u1, u1, u1} (AddEquiv.{u1, u1} (Multiset.{u1} α) (Finsupp.{u1, 0} α Nat (LinearOrderedCommMonoidWithZero.toZero.{0} Nat Nat.linearOrderedCommMonoidWithZero)) (Multiset.instAddMultiset.{u1} α) (Finsupp.add.{u1, 0} α Nat (AddMonoid.toAddZeroClass.{0} Nat Nat.addMonoid))) (Multiset.{u1} α) (Finsupp.{u1, 0} α Nat (LinearOrderedCommMonoidWithZero.toZero.{0} Nat Nat.linearOrderedCommMonoidWithZero)) (AddZeroClass.toAdd.{u1} (Multiset.{u1} α) (AddMonoid.toAddZeroClass.{u1} (Multiset.{u1} α) (AddRightCancelMonoid.toAddMonoid.{u1} (Multiset.{u1} α) (AddCancelMonoid.toAddRightCancelMonoid.{u1} (Multiset.{u1} α) (AddCancelCommMonoid.toAddCancelMonoid.{u1} (Multiset.{u1} α) (OrderedCancelAddCommMonoid.toCancelAddCommMonoid.{u1} (Multiset.{u1} α) (Multiset.instOrderedCancelAddCommMonoidMultiset.{u1} α))))))) (AddZeroClass.toAdd.{u1} (Finsupp.{u1, 0} α Nat (LinearOrderedCommMonoidWithZero.toZero.{0} Nat Nat.linearOrderedCommMonoidWithZero)) (Finsupp.addZeroClass.{u1, 0} α Nat (AddMonoid.toAddZeroClass.{0} Nat Nat.addMonoid))) (AddMonoidHomClass.toAddHomClass.{u1, u1, u1} (AddEquiv.{u1, u1} (Multiset.{u1} α) (Finsupp.{u1, 0} α Nat (LinearOrderedCommMonoidWithZero.toZero.{0} Nat Nat.linearOrderedCommMonoidWithZero)) (Multiset.instAddMultiset.{u1} α) (Finsupp.add.{u1, 0} α Nat (AddMonoid.toAddZeroClass.{0} Nat Nat.addMonoid))) (Multiset.{u1} α) (Finsupp.{u1, 0} α Nat (LinearOrderedCommMonoidWithZero.toZero.{0} Nat Nat.linearOrderedCommMonoidWithZero)) (AddMonoid.toAddZeroClass.{u1} (Multiset.{u1} α) (AddRightCancelMonoid.toAddMonoid.{u1} (Multiset.{u1} α) (AddCancelMonoid.toAddRightCancelMonoid.{u1} (Multiset.{u1} α) (AddCancelCommMonoid.toAddCancelMonoid.{u1} (Multiset.{u1} α) (OrderedCancelAddCommMonoid.toCancelAddCommMonoid.{u1} (Multiset.{u1} α) (Multiset.instOrderedCancelAddCommMonoidMultiset.{u1} α)))))) (Finsupp.addZeroClass.{u1, 0} α Nat (AddMonoid.toAddZeroClass.{0} Nat Nat.addMonoid)) (AddEquivClass.instAddMonoidHomClass.{u1, u1, u1} (AddEquiv.{u1, u1} (Multiset.{u1} α) (Finsupp.{u1, 0} α Nat (LinearOrderedCommMonoidWithZero.toZero.{0} Nat Nat.linearOrderedCommMonoidWithZero)) (Multiset.instAddMultiset.{u1} α) (Finsupp.add.{u1, 0} α Nat (AddMonoid.toAddZeroClass.{0} Nat Nat.addMonoid))) (Multiset.{u1} α) (Finsupp.{u1, 0} α Nat (LinearOrderedCommMonoidWithZero.toZero.{0} Nat Nat.linearOrderedCommMonoidWithZero)) (AddMonoid.toAddZeroClass.{u1} (Multiset.{u1} α) (AddRightCancelMonoid.toAddMonoid.{u1} (Multiset.{u1} α) (AddCancelMonoid.toAddRightCancelMonoid.{u1} (Multiset.{u1} α) (AddCancelCommMonoid.toAddCancelMonoid.{u1} (Multiset.{u1} α) (OrderedCancelAddCommMonoid.toCancelAddCommMonoid.{u1} (Multiset.{u1} α) (Multiset.instOrderedCancelAddCommMonoidMultiset.{u1} α)))))) (Finsupp.addZeroClass.{u1, 0} α Nat (AddMonoid.toAddZeroClass.{0} Nat Nat.addMonoid)) (AddEquiv.instAddEquivClassAddEquiv.{u1, u1} (Multiset.{u1} α) (Finsupp.{u1, 0} α Nat (LinearOrderedCommMonoidWithZero.toZero.{0} Nat Nat.linearOrderedCommMonoidWithZero)) (Multiset.instAddMultiset.{u1} α) (Finsupp.add.{u1, 0} α Nat (AddMonoid.toAddZeroClass.{0} Nat Nat.addMonoid)))))) (AddEquiv.symm.{u1, u1} (Finsupp.{u1, 0} α Nat (LinearOrderedCommMonoidWithZero.toZero.{0} Nat Nat.linearOrderedCommMonoidWithZero)) (Multiset.{u1} α) (Finsupp.add.{u1, 0} α Nat (AddMonoid.toAddZeroClass.{0} Nat Nat.addMonoid)) (Multiset.instAddMultiset.{u1} α) (Finsupp.toMultiset.{u1} α)) s) x) (Multiset.count.{u1} α (fun (a : α) (b : α) => _inst_1 a b) x s)
Case conversion may be inaccurate. Consider using '#align finsupp.to_multiset_symm_apply Finsupp.toMultiset_symm_applyₓ'. -/
@[simp]
theorem toMultiset_symm_apply [DecidableEq α] (s : Multiset α) (x : α) :
Finsupp.toMultiset.symm s x = s.count x := by convert rfl
#align finsupp.to_multiset_symm_apply Finsupp.toMultiset_symm_apply
/- warning: finsupp.to_multiset_single -> Finsupp.toMultiset_single is a dubious translation:
lean 3 declaration is
forall {α : Type.{u1}} (a : α) (n : Nat), Eq.{succ u1} (Multiset.{u1} α) (coeFn.{succ u1, succ u1} (AddEquiv.{u1, u1} (Finsupp.{u1, 0} α Nat Nat.hasZero) (Multiset.{u1} α) (Finsupp.add.{u1, 0} α Nat (AddMonoid.toAddZeroClass.{0} Nat Nat.addMonoid)) (Multiset.hasAdd.{u1} α)) (fun (_x : AddEquiv.{u1, u1} (Finsupp.{u1, 0} α Nat Nat.hasZero) (Multiset.{u1} α) (Finsupp.add.{u1, 0} α Nat (AddMonoid.toAddZeroClass.{0} Nat Nat.addMonoid)) (Multiset.hasAdd.{u1} α)) => (Finsupp.{u1, 0} α Nat Nat.hasZero) -> (Multiset.{u1} α)) (AddEquiv.hasCoeToFun.{u1, u1} (Finsupp.{u1, 0} α Nat Nat.hasZero) (Multiset.{u1} α) (Finsupp.add.{u1, 0} α Nat (AddMonoid.toAddZeroClass.{0} Nat Nat.addMonoid)) (Multiset.hasAdd.{u1} α)) (Finsupp.toMultiset.{u1} α) (Finsupp.single.{u1, 0} α Nat Nat.hasZero a n)) (SMul.smul.{0, u1} Nat (Multiset.{u1} α) (AddMonoid.SMul.{u1} (Multiset.{u1} α) (AddRightCancelMonoid.toAddMonoid.{u1} (Multiset.{u1} α) (AddCancelMonoid.toAddRightCancelMonoid.{u1} (Multiset.{u1} α) (AddCancelCommMonoid.toAddCancelMonoid.{u1} (Multiset.{u1} α) (OrderedCancelAddCommMonoid.toCancelAddCommMonoid.{u1} (Multiset.{u1} α) (Multiset.orderedCancelAddCommMonoid.{u1} α)))))) n (Singleton.singleton.{u1, u1} α (Multiset.{u1} α) (Multiset.hasSingleton.{u1} α) a))
but is expected to have type
forall {α : Type.{u1}} (a : α) (n : Nat), Eq.{succ u1} ((fun ([email protected]._hyg.403 : Finsupp.{u1, 0} α Nat (LinearOrderedCommMonoidWithZero.toZero.{0} Nat Nat.linearOrderedCommMonoidWithZero)) => Multiset.{u1} α) (Finsupp.single.{u1, 0} α Nat (LinearOrderedCommMonoidWithZero.toZero.{0} Nat Nat.linearOrderedCommMonoidWithZero) a n)) (FunLike.coe.{succ u1, succ u1, succ u1} (AddEquiv.{u1, u1} (Finsupp.{u1, 0} α Nat (LinearOrderedCommMonoidWithZero.toZero.{0} Nat Nat.linearOrderedCommMonoidWithZero)) (Multiset.{u1} α) (Finsupp.add.{u1, 0} α Nat (AddMonoid.toAddZeroClass.{0} Nat Nat.addMonoid)) (Multiset.instAddMultiset.{u1} α)) (Finsupp.{u1, 0} α Nat (LinearOrderedCommMonoidWithZero.toZero.{0} Nat Nat.linearOrderedCommMonoidWithZero)) (fun (_x : Finsupp.{u1, 0} α Nat (LinearOrderedCommMonoidWithZero.toZero.{0} Nat Nat.linearOrderedCommMonoidWithZero)) => (fun ([email protected]._hyg.403 : Finsupp.{u1, 0} α Nat (LinearOrderedCommMonoidWithZero.toZero.{0} Nat Nat.linearOrderedCommMonoidWithZero)) => Multiset.{u1} α) _x) (AddHomClass.toFunLike.{u1, u1, u1} (AddEquiv.{u1, u1} (Finsupp.{u1, 0} α Nat (LinearOrderedCommMonoidWithZero.toZero.{0} Nat Nat.linearOrderedCommMonoidWithZero)) (Multiset.{u1} α) (Finsupp.add.{u1, 0} α Nat (AddMonoid.toAddZeroClass.{0} Nat Nat.addMonoid)) (Multiset.instAddMultiset.{u1} α)) (Finsupp.{u1, 0} α Nat (LinearOrderedCommMonoidWithZero.toZero.{0} Nat Nat.linearOrderedCommMonoidWithZero)) (Multiset.{u1} α) (AddZeroClass.toAdd.{u1} (Finsupp.{u1, 0} α Nat (LinearOrderedCommMonoidWithZero.toZero.{0} Nat Nat.linearOrderedCommMonoidWithZero)) (Finsupp.addZeroClass.{u1, 0} α Nat (AddMonoid.toAddZeroClass.{0} Nat Nat.addMonoid))) (AddZeroClass.toAdd.{u1} (Multiset.{u1} α) (AddMonoid.toAddZeroClass.{u1} (Multiset.{u1} α) (AddRightCancelMonoid.toAddMonoid.{u1} (Multiset.{u1} α) (AddCancelMonoid.toAddRightCancelMonoid.{u1} (Multiset.{u1} α) (AddCancelCommMonoid.toAddCancelMonoid.{u1} (Multiset.{u1} α) (OrderedCancelAddCommMonoid.toCancelAddCommMonoid.{u1} (Multiset.{u1} α) (Multiset.instOrderedCancelAddCommMonoidMultiset.{u1} α))))))) (AddMonoidHomClass.toAddHomClass.{u1, u1, u1} (AddEquiv.{u1, u1} (Finsupp.{u1, 0} α Nat (LinearOrderedCommMonoidWithZero.toZero.{0} Nat Nat.linearOrderedCommMonoidWithZero)) (Multiset.{u1} α) (Finsupp.add.{u1, 0} α Nat (AddMonoid.toAddZeroClass.{0} Nat Nat.addMonoid)) (Multiset.instAddMultiset.{u1} α)) (Finsupp.{u1, 0} α Nat (LinearOrderedCommMonoidWithZero.toZero.{0} Nat Nat.linearOrderedCommMonoidWithZero)) (Multiset.{u1} α) (Finsupp.addZeroClass.{u1, 0} α Nat (AddMonoid.toAddZeroClass.{0} Nat Nat.addMonoid)) (AddMonoid.toAddZeroClass.{u1} (Multiset.{u1} α) (AddRightCancelMonoid.toAddMonoid.{u1} (Multiset.{u1} α) (AddCancelMonoid.toAddRightCancelMonoid.{u1} (Multiset.{u1} α) (AddCancelCommMonoid.toAddCancelMonoid.{u1} (Multiset.{u1} α) (OrderedCancelAddCommMonoid.toCancelAddCommMonoid.{u1} (Multiset.{u1} α) (Multiset.instOrderedCancelAddCommMonoidMultiset.{u1} α)))))) (AddEquivClass.instAddMonoidHomClass.{u1, u1, u1} (AddEquiv.{u1, u1} (Finsupp.{u1, 0} α Nat (LinearOrderedCommMonoidWithZero.toZero.{0} Nat Nat.linearOrderedCommMonoidWithZero)) (Multiset.{u1} α) (Finsupp.add.{u1, 0} α Nat (AddMonoid.toAddZeroClass.{0} Nat Nat.addMonoid)) (Multiset.instAddMultiset.{u1} α)) (Finsupp.{u1, 0} α Nat (LinearOrderedCommMonoidWithZero.toZero.{0} Nat Nat.linearOrderedCommMonoidWithZero)) (Multiset.{u1} α) (Finsupp.addZeroClass.{u1, 0} α Nat (AddMonoid.toAddZeroClass.{0} Nat Nat.addMonoid)) (AddMonoid.toAddZeroClass.{u1} (Multiset.{u1} α) (AddRightCancelMonoid.toAddMonoid.{u1} (Multiset.{u1} α) (AddCancelMonoid.toAddRightCancelMonoid.{u1} (Multiset.{u1} α) (AddCancelCommMonoid.toAddCancelMonoid.{u1} (Multiset.{u1} α) (OrderedCancelAddCommMonoid.toCancelAddCommMonoid.{u1} (Multiset.{u1} α) (Multiset.instOrderedCancelAddCommMonoidMultiset.{u1} α)))))) (AddEquiv.instAddEquivClassAddEquiv.{u1, u1} (Finsupp.{u1, 0} α Nat (LinearOrderedCommMonoidWithZero.toZero.{0} Nat Nat.linearOrderedCommMonoidWithZero)) (Multiset.{u1} α) (Finsupp.add.{u1, 0} α Nat (AddMonoid.toAddZeroClass.{0} Nat Nat.addMonoid)) (Multiset.instAddMultiset.{u1} α))))) (Finsupp.toMultiset.{u1} α) (Finsupp.single.{u1, 0} α Nat (LinearOrderedCommMonoidWithZero.toZero.{0} Nat Nat.linearOrderedCommMonoidWithZero) a n)) (HSMul.hSMul.{0, u1, u1} Nat ((fun ([email protected]._hyg.403 : Finsupp.{u1, 0} α Nat (LinearOrderedCommMonoidWithZero.toZero.{0} Nat Nat.linearOrderedCommMonoidWithZero)) => Multiset.{u1} α) (Finsupp.single.{u1, 0} α Nat (LinearOrderedCommMonoidWithZero.toZero.{0} Nat Nat.linearOrderedCommMonoidWithZero) a n)) ((fun ([email protected]._hyg.403 : Finsupp.{u1, 0} α Nat (LinearOrderedCommMonoidWithZero.toZero.{0} Nat Nat.linearOrderedCommMonoidWithZero)) => Multiset.{u1} α) (Finsupp.single.{u1, 0} α Nat (LinearOrderedCommMonoidWithZero.toZero.{0} Nat Nat.linearOrderedCommMonoidWithZero) a n)) (instHSMul.{0, u1} Nat ((fun ([email protected]._hyg.403 : Finsupp.{u1, 0} α Nat (LinearOrderedCommMonoidWithZero.toZero.{0} Nat Nat.linearOrderedCommMonoidWithZero)) => Multiset.{u1} α) (Finsupp.single.{u1, 0} α Nat (LinearOrderedCommMonoidWithZero.toZero.{0} Nat Nat.linearOrderedCommMonoidWithZero) a n)) (AddMonoid.SMul.{u1} ((fun ([email protected]._hyg.403 : Finsupp.{u1, 0} α Nat (LinearOrderedCommMonoidWithZero.toZero.{0} Nat Nat.linearOrderedCommMonoidWithZero)) => Multiset.{u1} α) (Finsupp.single.{u1, 0} α Nat (LinearOrderedCommMonoidWithZero.toZero.{0} Nat Nat.linearOrderedCommMonoidWithZero) a n)) (AddRightCancelMonoid.toAddMonoid.{u1} ((fun ([email protected]._hyg.403 : Finsupp.{u1, 0} α Nat (LinearOrderedCommMonoidWithZero.toZero.{0} Nat Nat.linearOrderedCommMonoidWithZero)) => Multiset.{u1} α) (Finsupp.single.{u1, 0} α Nat (LinearOrderedCommMonoidWithZero.toZero.{0} Nat Nat.linearOrderedCommMonoidWithZero) a n)) (AddCancelMonoid.toAddRightCancelMonoid.{u1} ((fun ([email protected]._hyg.403 : Finsupp.{u1, 0} α Nat (LinearOrderedCommMonoidWithZero.toZero.{0} Nat Nat.linearOrderedCommMonoidWithZero)) => Multiset.{u1} α) (Finsupp.single.{u1, 0} α Nat (LinearOrderedCommMonoidWithZero.toZero.{0} Nat Nat.linearOrderedCommMonoidWithZero) a n)) (AddCancelCommMonoid.toAddCancelMonoid.{u1} ((fun ([email protected]._hyg.403 : Finsupp.{u1, 0} α Nat (LinearOrderedCommMonoidWithZero.toZero.{0} Nat Nat.linearOrderedCommMonoidWithZero)) => Multiset.{u1} α) (Finsupp.single.{u1, 0} α Nat (LinearOrderedCommMonoidWithZero.toZero.{0} Nat Nat.linearOrderedCommMonoidWithZero) a n)) (OrderedCancelAddCommMonoid.toCancelAddCommMonoid.{u1} ((fun ([email protected]._hyg.403 : Finsupp.{u1, 0} α Nat (LinearOrderedCommMonoidWithZero.toZero.{0} Nat Nat.linearOrderedCommMonoidWithZero)) => Multiset.{u1} α) (Finsupp.single.{u1, 0} α Nat (LinearOrderedCommMonoidWithZero.toZero.{0} Nat Nat.linearOrderedCommMonoidWithZero) a n)) (Multiset.instOrderedCancelAddCommMonoidMultiset.{u1} α))))))) n (Singleton.singleton.{u1, u1} α ((fun ([email protected]._hyg.403 : Finsupp.{u1, 0} α Nat (LinearOrderedCommMonoidWithZero.toZero.{0} Nat Nat.linearOrderedCommMonoidWithZero)) => Multiset.{u1} α) (Finsupp.single.{u1, 0} α Nat (LinearOrderedCommMonoidWithZero.toZero.{0} Nat Nat.linearOrderedCommMonoidWithZero) a n)) (Multiset.instSingletonMultiset.{u1} α) a))
Case conversion may be inaccurate. Consider using '#align finsupp.to_multiset_single Finsupp.toMultiset_singleₓ'. -/
@[simp]
theorem toMultiset_single (a : α) (n : ℕ) : toMultiset (single a n) = n • {a} := by
rw [to_multiset_apply, sum_single_index] <;> apply zero_nsmul
#align finsupp.to_multiset_single Finsupp.toMultiset_single
/- warning: finsupp.to_multiset_sum -> Finsupp.toMultiset_sum is a dubious translation:
lean 3 declaration is
forall {α : Type.{u1}} {ι : Type.{u2}} {f : ι -> (Finsupp.{u1, 0} α Nat Nat.hasZero)} (s : Finset.{u2} ι), Eq.{succ u1} (Multiset.{u1} α) (coeFn.{succ u1, succ u1} (AddEquiv.{u1, u1} (Finsupp.{u1, 0} α Nat Nat.hasZero) (Multiset.{u1} α) (Finsupp.add.{u1, 0} α Nat (AddMonoid.toAddZeroClass.{0} Nat Nat.addMonoid)) (Multiset.hasAdd.{u1} α)) (fun (_x : AddEquiv.{u1, u1} (Finsupp.{u1, 0} α Nat Nat.hasZero) (Multiset.{u1} α) (Finsupp.add.{u1, 0} α Nat (AddMonoid.toAddZeroClass.{0} Nat Nat.addMonoid)) (Multiset.hasAdd.{u1} α)) => (Finsupp.{u1, 0} α Nat Nat.hasZero) -> (Multiset.{u1} α)) (AddEquiv.hasCoeToFun.{u1, u1} (Finsupp.{u1, 0} α Nat Nat.hasZero) (Multiset.{u1} α) (Finsupp.add.{u1, 0} α Nat (AddMonoid.toAddZeroClass.{0} Nat Nat.addMonoid)) (Multiset.hasAdd.{u1} α)) (Finsupp.toMultiset.{u1} α) (Finset.sum.{u1, u2} (Finsupp.{u1, 0} α Nat Nat.hasZero) ι (Finsupp.addCommMonoid.{u1, 0} α Nat Nat.addCommMonoid) s (fun (i : ι) => f i))) (Finset.sum.{u1, u2} (Multiset.{u1} α) ι (OrderedCancelAddCommMonoid.toAddCommMonoid.{u1} (Multiset.{u1} α) (Multiset.orderedCancelAddCommMonoid.{u1} α)) s (fun (i : ι) => coeFn.{succ u1, succ u1} (AddEquiv.{u1, u1} (Finsupp.{u1, 0} α Nat Nat.hasZero) (Multiset.{u1} α) (Finsupp.add.{u1, 0} α Nat (AddMonoid.toAddZeroClass.{0} Nat Nat.addMonoid)) (Multiset.hasAdd.{u1} α)) (fun (_x : AddEquiv.{u1, u1} (Finsupp.{u1, 0} α Nat Nat.hasZero) (Multiset.{u1} α) (Finsupp.add.{u1, 0} α Nat (AddMonoid.toAddZeroClass.{0} Nat Nat.addMonoid)) (Multiset.hasAdd.{u1} α)) => (Finsupp.{u1, 0} α Nat Nat.hasZero) -> (Multiset.{u1} α)) (AddEquiv.hasCoeToFun.{u1, u1} (Finsupp.{u1, 0} α Nat Nat.hasZero) (Multiset.{u1} α) (Finsupp.add.{u1, 0} α Nat (AddMonoid.toAddZeroClass.{0} Nat Nat.addMonoid)) (Multiset.hasAdd.{u1} α)) (Finsupp.toMultiset.{u1} α) (f i)))
but is expected to have type
forall {α : Type.{u2}} {ι : Type.{u1}} {f : ι -> (Finsupp.{u2, 0} α Nat (LinearOrderedCommMonoidWithZero.toZero.{0} Nat Nat.linearOrderedCommMonoidWithZero))} (s : Finset.{u1} ι), Eq.{succ u2} ((fun ([email protected]._hyg.403 : Finsupp.{u2, 0} α Nat (LinearOrderedCommMonoidWithZero.toZero.{0} Nat Nat.linearOrderedCommMonoidWithZero)) => Multiset.{u2} α) (Finset.sum.{u2, u1} (Finsupp.{u2, 0} α Nat (LinearOrderedCommMonoidWithZero.toZero.{0} Nat Nat.linearOrderedCommMonoidWithZero)) ι (Finsupp.addCommMonoid.{u2, 0} α Nat Nat.addCommMonoid) s (fun (i : ι) => f i))) (FunLike.coe.{succ u2, succ u2, succ u2} (AddEquiv.{u2, u2} (Finsupp.{u2, 0} α Nat (LinearOrderedCommMonoidWithZero.toZero.{0} Nat Nat.linearOrderedCommMonoidWithZero)) (Multiset.{u2} α) (Finsupp.add.{u2, 0} α Nat (AddMonoid.toAddZeroClass.{0} Nat Nat.addMonoid)) (Multiset.instAddMultiset.{u2} α)) (Finsupp.{u2, 0} α Nat (LinearOrderedCommMonoidWithZero.toZero.{0} Nat Nat.linearOrderedCommMonoidWithZero)) (fun (_x : Finsupp.{u2, 0} α Nat (LinearOrderedCommMonoidWithZero.toZero.{0} Nat Nat.linearOrderedCommMonoidWithZero)) => (fun ([email protected]._hyg.403 : Finsupp.{u2, 0} α Nat (LinearOrderedCommMonoidWithZero.toZero.{0} Nat Nat.linearOrderedCommMonoidWithZero)) => Multiset.{u2} α) _x) (AddHomClass.toFunLike.{u2, u2, u2} (AddEquiv.{u2, u2} (Finsupp.{u2, 0} α Nat (LinearOrderedCommMonoidWithZero.toZero.{0} Nat Nat.linearOrderedCommMonoidWithZero)) (Multiset.{u2} α) (Finsupp.add.{u2, 0} α Nat (AddMonoid.toAddZeroClass.{0} Nat Nat.addMonoid)) (Multiset.instAddMultiset.{u2} α)) (Finsupp.{u2, 0} α Nat (LinearOrderedCommMonoidWithZero.toZero.{0} Nat Nat.linearOrderedCommMonoidWithZero)) (Multiset.{u2} α) (AddZeroClass.toAdd.{u2} (Finsupp.{u2, 0} α Nat (LinearOrderedCommMonoidWithZero.toZero.{0} Nat Nat.linearOrderedCommMonoidWithZero)) (Finsupp.addZeroClass.{u2, 0} α Nat (AddMonoid.toAddZeroClass.{0} Nat Nat.addMonoid))) (AddZeroClass.toAdd.{u2} (Multiset.{u2} α) (AddMonoid.toAddZeroClass.{u2} (Multiset.{u2} α) (AddRightCancelMonoid.toAddMonoid.{u2} (Multiset.{u2} α) (AddCancelMonoid.toAddRightCancelMonoid.{u2} (Multiset.{u2} α) (AddCancelCommMonoid.toAddCancelMonoid.{u2} (Multiset.{u2} α) (OrderedCancelAddCommMonoid.toCancelAddCommMonoid.{u2} (Multiset.{u2} α) (Multiset.instOrderedCancelAddCommMonoidMultiset.{u2} α))))))) (AddMonoidHomClass.toAddHomClass.{u2, u2, u2} (AddEquiv.{u2, u2} (Finsupp.{u2, 0} α Nat (LinearOrderedCommMonoidWithZero.toZero.{0} Nat Nat.linearOrderedCommMonoidWithZero)) (Multiset.{u2} α) (Finsupp.add.{u2, 0} α Nat (AddMonoid.toAddZeroClass.{0} Nat Nat.addMonoid)) (Multiset.instAddMultiset.{u2} α)) (Finsupp.{u2, 0} α Nat (LinearOrderedCommMonoidWithZero.toZero.{0} Nat Nat.linearOrderedCommMonoidWithZero)) (Multiset.{u2} α) (Finsupp.addZeroClass.{u2, 0} α Nat (AddMonoid.toAddZeroClass.{0} Nat Nat.addMonoid)) (AddMonoid.toAddZeroClass.{u2} (Multiset.{u2} α) (AddRightCancelMonoid.toAddMonoid.{u2} (Multiset.{u2} α) (AddCancelMonoid.toAddRightCancelMonoid.{u2} (Multiset.{u2} α) (AddCancelCommMonoid.toAddCancelMonoid.{u2} (Multiset.{u2} α) (OrderedCancelAddCommMonoid.toCancelAddCommMonoid.{u2} (Multiset.{u2} α) (Multiset.instOrderedCancelAddCommMonoidMultiset.{u2} α)))))) (AddEquivClass.instAddMonoidHomClass.{u2, u2, u2} (AddEquiv.{u2, u2} (Finsupp.{u2, 0} α Nat (LinearOrderedCommMonoidWithZero.toZero.{0} Nat Nat.linearOrderedCommMonoidWithZero)) (Multiset.{u2} α) (Finsupp.add.{u2, 0} α Nat (AddMonoid.toAddZeroClass.{0} Nat Nat.addMonoid)) (Multiset.instAddMultiset.{u2} α)) (Finsupp.{u2, 0} α Nat (LinearOrderedCommMonoidWithZero.toZero.{0} Nat Nat.linearOrderedCommMonoidWithZero)) (Multiset.{u2} α) (Finsupp.addZeroClass.{u2, 0} α Nat (AddMonoid.toAddZeroClass.{0} Nat Nat.addMonoid)) (AddMonoid.toAddZeroClass.{u2} (Multiset.{u2} α) (AddRightCancelMonoid.toAddMonoid.{u2} (Multiset.{u2} α) (AddCancelMonoid.toAddRightCancelMonoid.{u2} (Multiset.{u2} α) (AddCancelCommMonoid.toAddCancelMonoid.{u2} (Multiset.{u2} α) (OrderedCancelAddCommMonoid.toCancelAddCommMonoid.{u2} (Multiset.{u2} α) (Multiset.instOrderedCancelAddCommMonoidMultiset.{u2} α)))))) (AddEquiv.instAddEquivClassAddEquiv.{u2, u2} (Finsupp.{u2, 0} α Nat (LinearOrderedCommMonoidWithZero.toZero.{0} Nat Nat.linearOrderedCommMonoidWithZero)) (Multiset.{u2} α) (Finsupp.add.{u2, 0} α Nat (AddMonoid.toAddZeroClass.{0} Nat Nat.addMonoid)) (Multiset.instAddMultiset.{u2} α))))) (Finsupp.toMultiset.{u2} α) (Finset.sum.{u2, u1} (Finsupp.{u2, 0} α Nat (LinearOrderedCommMonoidWithZero.toZero.{0} Nat Nat.linearOrderedCommMonoidWithZero)) ι (Finsupp.addCommMonoid.{u2, 0} α Nat Nat.addCommMonoid) s (fun (i : ι) => f i))) (Finset.sum.{u2, u1} (Multiset.{u2} α) ι (OrderedCancelAddCommMonoid.toAddCommMonoid.{u2} (Multiset.{u2} α) (Multiset.instOrderedCancelAddCommMonoidMultiset.{u2} α)) s (fun (i : ι) => FunLike.coe.{succ u2, succ u2, succ u2} (AddEquiv.{u2, u2} (Finsupp.{u2, 0} α Nat (LinearOrderedCommMonoidWithZero.toZero.{0} Nat Nat.linearOrderedCommMonoidWithZero)) (Multiset.{u2} α) (Finsupp.add.{u2, 0} α Nat (AddMonoid.toAddZeroClass.{0} Nat Nat.addMonoid)) (Multiset.instAddMultiset.{u2} α)) (Finsupp.{u2, 0} α Nat (LinearOrderedCommMonoidWithZero.toZero.{0} Nat Nat.linearOrderedCommMonoidWithZero)) (fun (_x : Finsupp.{u2, 0} α Nat (LinearOrderedCommMonoidWithZero.toZero.{0} Nat Nat.linearOrderedCommMonoidWithZero)) => (fun ([email protected]._hyg.403 : Finsupp.{u2, 0} α Nat (LinearOrderedCommMonoidWithZero.toZero.{0} Nat Nat.linearOrderedCommMonoidWithZero)) => Multiset.{u2} α) _x) (AddHomClass.toFunLike.{u2, u2, u2} (AddEquiv.{u2, u2} (Finsupp.{u2, 0} α Nat (LinearOrderedCommMonoidWithZero.toZero.{0} Nat Nat.linearOrderedCommMonoidWithZero)) (Multiset.{u2} α) (Finsupp.add.{u2, 0} α Nat (AddMonoid.toAddZeroClass.{0} Nat Nat.addMonoid)) (Multiset.instAddMultiset.{u2} α)) (Finsupp.{u2, 0} α Nat (LinearOrderedCommMonoidWithZero.toZero.{0} Nat Nat.linearOrderedCommMonoidWithZero)) (Multiset.{u2} α) (AddZeroClass.toAdd.{u2} (Finsupp.{u2, 0} α Nat (LinearOrderedCommMonoidWithZero.toZero.{0} Nat Nat.linearOrderedCommMonoidWithZero)) (Finsupp.addZeroClass.{u2, 0} α Nat (AddMonoid.toAddZeroClass.{0} Nat Nat.addMonoid))) (AddZeroClass.toAdd.{u2} (Multiset.{u2} α) (AddMonoid.toAddZeroClass.{u2} (Multiset.{u2} α) (AddRightCancelMonoid.toAddMonoid.{u2} (Multiset.{u2} α) (AddCancelMonoid.toAddRightCancelMonoid.{u2} (Multiset.{u2} α) (AddCancelCommMonoid.toAddCancelMonoid.{u2} (Multiset.{u2} α) (OrderedCancelAddCommMonoid.toCancelAddCommMonoid.{u2} (Multiset.{u2} α) (Multiset.instOrderedCancelAddCommMonoidMultiset.{u2} α))))))) (AddMonoidHomClass.toAddHomClass.{u2, u2, u2} (AddEquiv.{u2, u2} (Finsupp.{u2, 0} α Nat (LinearOrderedCommMonoidWithZero.toZero.{0} Nat Nat.linearOrderedCommMonoidWithZero)) (Multiset.{u2} α) (Finsupp.add.{u2, 0} α Nat (AddMonoid.toAddZeroClass.{0} Nat Nat.addMonoid)) (Multiset.instAddMultiset.{u2} α)) (Finsupp.{u2, 0} α Nat (LinearOrderedCommMonoidWithZero.toZero.{0} Nat Nat.linearOrderedCommMonoidWithZero)) (Multiset.{u2} α) (Finsupp.addZeroClass.{u2, 0} α Nat (AddMonoid.toAddZeroClass.{0} Nat Nat.addMonoid)) (AddMonoid.toAddZeroClass.{u2} (Multiset.{u2} α) (AddRightCancelMonoid.toAddMonoid.{u2} (Multiset.{u2} α) (AddCancelMonoid.toAddRightCancelMonoid.{u2} (Multiset.{u2} α) (AddCancelCommMonoid.toAddCancelMonoid.{u2} (Multiset.{u2} α) (OrderedCancelAddCommMonoid.toCancelAddCommMonoid.{u2} (Multiset.{u2} α) (Multiset.instOrderedCancelAddCommMonoidMultiset.{u2} α)))))) (AddEquivClass.instAddMonoidHomClass.{u2, u2, u2} (AddEquiv.{u2, u2} (Finsupp.{u2, 0} α Nat (LinearOrderedCommMonoidWithZero.toZero.{0} Nat Nat.linearOrderedCommMonoidWithZero)) (Multiset.{u2} α) (Finsupp.add.{u2, 0} α Nat (AddMonoid.toAddZeroClass.{0} Nat Nat.addMonoid)) (Multiset.instAddMultiset.{u2} α)) (Finsupp.{u2, 0} α Nat (LinearOrderedCommMonoidWithZero.toZero.{0} Nat Nat.linearOrderedCommMonoidWithZero)) (Multiset.{u2} α) (Finsupp.addZeroClass.{u2, 0} α Nat (AddMonoid.toAddZeroClass.{0} Nat Nat.addMonoid)) (AddMonoid.toAddZeroClass.{u2} (Multiset.{u2} α) (AddRightCancelMonoid.toAddMonoid.{u2} (Multiset.{u2} α) (AddCancelMonoid.toAddRightCancelMonoid.{u2} (Multiset.{u2} α) (AddCancelCommMonoid.toAddCancelMonoid.{u2} (Multiset.{u2} α) (OrderedCancelAddCommMonoid.toCancelAddCommMonoid.{u2} (Multiset.{u2} α) (Multiset.instOrderedCancelAddCommMonoidMultiset.{u2} α)))))) (AddEquiv.instAddEquivClassAddEquiv.{u2, u2} (Finsupp.{u2, 0} α Nat (LinearOrderedCommMonoidWithZero.toZero.{0} Nat Nat.linearOrderedCommMonoidWithZero)) (Multiset.{u2} α) (Finsupp.add.{u2, 0} α Nat (AddMonoid.toAddZeroClass.{0} Nat Nat.addMonoid)) (Multiset.instAddMultiset.{u2} α))))) (Finsupp.toMultiset.{u2} α) (f i)))
Case conversion may be inaccurate. Consider using '#align finsupp.to_multiset_sum Finsupp.toMultiset_sumₓ'. -/
theorem toMultiset_sum {f : ι → α →₀ ℕ} (s : Finset ι) :
Finsupp.toMultiset (∑ i in s, f i) = ∑ i in s, Finsupp.toMultiset (f i) :=
AddEquiv.map_sum _ _ _
#align finsupp.to_multiset_sum Finsupp.toMultiset_sum
/- warning: finsupp.to_multiset_sum_single -> Finsupp.toMultiset_sum_single is a dubious translation:
lean 3 declaration is
forall {ι : Type.{u1}} (s : Finset.{u1} ι) (n : Nat), Eq.{succ u1} (Multiset.{u1} ι) (coeFn.{succ u1, succ u1} (AddEquiv.{u1, u1} (Finsupp.{u1, 0} ι Nat Nat.hasZero) (Multiset.{u1} ι) (Finsupp.add.{u1, 0} ι Nat (AddMonoid.toAddZeroClass.{0} Nat Nat.addMonoid)) (Multiset.hasAdd.{u1} ι)) (fun (_x : AddEquiv.{u1, u1} (Finsupp.{u1, 0} ι Nat Nat.hasZero) (Multiset.{u1} ι) (Finsupp.add.{u1, 0} ι Nat (AddMonoid.toAddZeroClass.{0} Nat Nat.addMonoid)) (Multiset.hasAdd.{u1} ι)) => (Finsupp.{u1, 0} ι Nat Nat.hasZero) -> (Multiset.{u1} ι)) (AddEquiv.hasCoeToFun.{u1, u1} (Finsupp.{u1, 0} ι Nat Nat.hasZero) (Multiset.{u1} ι) (Finsupp.add.{u1, 0} ι Nat (AddMonoid.toAddZeroClass.{0} Nat Nat.addMonoid)) (Multiset.hasAdd.{u1} ι)) (Finsupp.toMultiset.{u1} ι) (Finset.sum.{u1, u1} (Finsupp.{u1, 0} ι Nat Nat.hasZero) ι (Finsupp.addCommMonoid.{u1, 0} ι Nat Nat.addCommMonoid) s (fun (i : ι) => Finsupp.single.{u1, 0} ι Nat Nat.hasZero i n))) (SMul.smul.{0, u1} Nat (Multiset.{u1} ι) (AddMonoid.SMul.{u1} (Multiset.{u1} ι) (AddRightCancelMonoid.toAddMonoid.{u1} (Multiset.{u1} ι) (AddCancelMonoid.toAddRightCancelMonoid.{u1} (Multiset.{u1} ι) (AddCancelCommMonoid.toAddCancelMonoid.{u1} (Multiset.{u1} ι) (OrderedCancelAddCommMonoid.toCancelAddCommMonoid.{u1} (Multiset.{u1} ι) (Multiset.orderedCancelAddCommMonoid.{u1} ι)))))) n (Finset.val.{u1} ι s))
but is expected to have type
forall {ι : Type.{u1}} (s : Finset.{u1} ι) (n : Nat), Eq.{succ u1} ((fun ([email protected]._hyg.403 : Finsupp.{u1, 0} ι Nat (LinearOrderedCommMonoidWithZero.toZero.{0} Nat Nat.linearOrderedCommMonoidWithZero)) => Multiset.{u1} ι) (Finset.sum.{u1, u1} (Finsupp.{u1, 0} ι Nat (LinearOrderedCommMonoidWithZero.toZero.{0} Nat Nat.linearOrderedCommMonoidWithZero)) ι (Finsupp.addCommMonoid.{u1, 0} ι Nat Nat.addCommMonoid) s (fun (i : ι) => Finsupp.single.{u1, 0} ι Nat (LinearOrderedCommMonoidWithZero.toZero.{0} Nat Nat.linearOrderedCommMonoidWithZero) i n))) (FunLike.coe.{succ u1, succ u1, succ u1} (AddEquiv.{u1, u1} (Finsupp.{u1, 0} ι Nat (LinearOrderedCommMonoidWithZero.toZero.{0} Nat Nat.linearOrderedCommMonoidWithZero)) (Multiset.{u1} ι) (Finsupp.add.{u1, 0} ι Nat (AddMonoid.toAddZeroClass.{0} Nat Nat.addMonoid)) (Multiset.instAddMultiset.{u1} ι)) (Finsupp.{u1, 0} ι Nat (LinearOrderedCommMonoidWithZero.toZero.{0} Nat Nat.linearOrderedCommMonoidWithZero)) (fun (_x : Finsupp.{u1, 0} ι Nat (LinearOrderedCommMonoidWithZero.toZero.{0} Nat Nat.linearOrderedCommMonoidWithZero)) => (fun ([email protected]._hyg.403 : Finsupp.{u1, 0} ι Nat (LinearOrderedCommMonoidWithZero.toZero.{0} Nat Nat.linearOrderedCommMonoidWithZero)) => Multiset.{u1} ι) _x) (AddHomClass.toFunLike.{u1, u1, u1} (AddEquiv.{u1, u1} (Finsupp.{u1, 0} ι Nat (LinearOrderedCommMonoidWithZero.toZero.{0} Nat Nat.linearOrderedCommMonoidWithZero)) (Multiset.{u1} ι) (Finsupp.add.{u1, 0} ι Nat (AddMonoid.toAddZeroClass.{0} Nat Nat.addMonoid)) (Multiset.instAddMultiset.{u1} ι)) (Finsupp.{u1, 0} ι Nat (LinearOrderedCommMonoidWithZero.toZero.{0} Nat Nat.linearOrderedCommMonoidWithZero)) (Multiset.{u1} ι) (AddZeroClass.toAdd.{u1} (Finsupp.{u1, 0} ι Nat (LinearOrderedCommMonoidWithZero.toZero.{0} Nat Nat.linearOrderedCommMonoidWithZero)) (Finsupp.addZeroClass.{u1, 0} ι Nat (AddMonoid.toAddZeroClass.{0} Nat Nat.addMonoid))) (AddZeroClass.toAdd.{u1} (Multiset.{u1} ι) (AddMonoid.toAddZeroClass.{u1} (Multiset.{u1} ι) (AddRightCancelMonoid.toAddMonoid.{u1} (Multiset.{u1} ι) (AddCancelMonoid.toAddRightCancelMonoid.{u1} (Multiset.{u1} ι) (AddCancelCommMonoid.toAddCancelMonoid.{u1} (Multiset.{u1} ι) (OrderedCancelAddCommMonoid.toCancelAddCommMonoid.{u1} (Multiset.{u1} ι) (Multiset.instOrderedCancelAddCommMonoidMultiset.{u1} ι))))))) (AddMonoidHomClass.toAddHomClass.{u1, u1, u1} (AddEquiv.{u1, u1} (Finsupp.{u1, 0} ι Nat (LinearOrderedCommMonoidWithZero.toZero.{0} Nat Nat.linearOrderedCommMonoidWithZero)) (Multiset.{u1} ι) (Finsupp.add.{u1, 0} ι Nat (AddMonoid.toAddZeroClass.{0} Nat Nat.addMonoid)) (Multiset.instAddMultiset.{u1} ι)) (Finsupp.{u1, 0} ι Nat (LinearOrderedCommMonoidWithZero.toZero.{0} Nat Nat.linearOrderedCommMonoidWithZero)) (Multiset.{u1} ι) (Finsupp.addZeroClass.{u1, 0} ι Nat (AddMonoid.toAddZeroClass.{0} Nat Nat.addMonoid)) (AddMonoid.toAddZeroClass.{u1} (Multiset.{u1} ι) (AddRightCancelMonoid.toAddMonoid.{u1} (Multiset.{u1} ι) (AddCancelMonoid.toAddRightCancelMonoid.{u1} (Multiset.{u1} ι) (AddCancelCommMonoid.toAddCancelMonoid.{u1} (Multiset.{u1} ι) (OrderedCancelAddCommMonoid.toCancelAddCommMonoid.{u1} (Multiset.{u1} ι) (Multiset.instOrderedCancelAddCommMonoidMultiset.{u1} ι)))))) (AddEquivClass.instAddMonoidHomClass.{u1, u1, u1} (AddEquiv.{u1, u1} (Finsupp.{u1, 0} ι Nat (LinearOrderedCommMonoidWithZero.toZero.{0} Nat Nat.linearOrderedCommMonoidWithZero)) (Multiset.{u1} ι) (Finsupp.add.{u1, 0} ι Nat (AddMonoid.toAddZeroClass.{0} Nat Nat.addMonoid)) (Multiset.instAddMultiset.{u1} ι)) (Finsupp.{u1, 0} ι Nat (LinearOrderedCommMonoidWithZero.toZero.{0} Nat Nat.linearOrderedCommMonoidWithZero)) (Multiset.{u1} ι) (Finsupp.addZeroClass.{u1, 0} ι Nat (AddMonoid.toAddZeroClass.{0} Nat Nat.addMonoid)) (AddMonoid.toAddZeroClass.{u1} (Multiset.{u1} ι) (AddRightCancelMonoid.toAddMonoid.{u1} (Multiset.{u1} ι) (AddCancelMonoid.toAddRightCancelMonoid.{u1} (Multiset.{u1} ι) (AddCancelCommMonoid.toAddCancelMonoid.{u1} (Multiset.{u1} ι) (OrderedCancelAddCommMonoid.toCancelAddCommMonoid.{u1} (Multiset.{u1} ι) (Multiset.instOrderedCancelAddCommMonoidMultiset.{u1} ι)))))) (AddEquiv.instAddEquivClassAddEquiv.{u1, u1} (Finsupp.{u1, 0} ι Nat (LinearOrderedCommMonoidWithZero.toZero.{0} Nat Nat.linearOrderedCommMonoidWithZero)) (Multiset.{u1} ι) (Finsupp.add.{u1, 0} ι Nat (AddMonoid.toAddZeroClass.{0} Nat Nat.addMonoid)) (Multiset.instAddMultiset.{u1} ι))))) (Finsupp.toMultiset.{u1} ι) (Finset.sum.{u1, u1} (Finsupp.{u1, 0} ι Nat (LinearOrderedCommMonoidWithZero.toZero.{0} Nat Nat.linearOrderedCommMonoidWithZero)) ι (Finsupp.addCommMonoid.{u1, 0} ι Nat Nat.addCommMonoid) s (fun (i : ι) => Finsupp.single.{u1, 0} ι Nat (LinearOrderedCommMonoidWithZero.toZero.{0} Nat Nat.linearOrderedCommMonoidWithZero) i n))) (HSMul.hSMul.{0, u1, u1} Nat (Multiset.{u1} ι) (Multiset.{u1} ι) (instHSMul.{0, u1} Nat (Multiset.{u1} ι) (AddMonoid.SMul.{u1} (Multiset.{u1} ι) (AddRightCancelMonoid.toAddMonoid.{u1} (Multiset.{u1} ι) (AddCancelMonoid.toAddRightCancelMonoid.{u1} (Multiset.{u1} ι) (AddCancelCommMonoid.toAddCancelMonoid.{u1} (Multiset.{u1} ι) (OrderedCancelAddCommMonoid.toCancelAddCommMonoid.{u1} (Multiset.{u1} ι) (Multiset.instOrderedCancelAddCommMonoidMultiset.{u1} ι))))))) n (Finset.val.{u1} ι s))
Case conversion may be inaccurate. Consider using '#align finsupp.to_multiset_sum_single Finsupp.toMultiset_sum_singleₓ'. -/
theorem toMultiset_sum_single (s : Finset ι) (n : ℕ) :
Finsupp.toMultiset (∑ i in s, single i n) = n • s.val := by
simp_rw [to_multiset_sum, Finsupp.toMultiset_single, sum_nsmul, sum_multiset_singleton]
#align finsupp.to_multiset_sum_single Finsupp.toMultiset_sum_single
/- warning: finsupp.card_to_multiset -> Finsupp.card_toMultiset is a dubious translation:
lean 3 declaration is
forall {α : Type.{u1}} (f : Finsupp.{u1, 0} α Nat Nat.hasZero), Eq.{1} Nat (coeFn.{succ u1, succ u1} (AddMonoidHom.{u1, 0} (Multiset.{u1} α) Nat (AddMonoid.toAddZeroClass.{u1} (Multiset.{u1} α) (AddRightCancelMonoid.toAddMonoid.{u1} (Multiset.{u1} α) (AddCancelMonoid.toAddRightCancelMonoid.{u1} (Multiset.{u1} α) (AddCancelCommMonoid.toAddCancelMonoid.{u1} (Multiset.{u1} α) (OrderedCancelAddCommMonoid.toCancelAddCommMonoid.{u1} (Multiset.{u1} α) (Multiset.orderedCancelAddCommMonoid.{u1} α)))))) (AddMonoid.toAddZeroClass.{0} Nat Nat.addMonoid)) (fun (_x : AddMonoidHom.{u1, 0} (Multiset.{u1} α) Nat (AddMonoid.toAddZeroClass.{u1} (Multiset.{u1} α) (AddRightCancelMonoid.toAddMonoid.{u1} (Multiset.{u1} α) (AddCancelMonoid.toAddRightCancelMonoid.{u1} (Multiset.{u1} α) (AddCancelCommMonoid.toAddCancelMonoid.{u1} (Multiset.{u1} α) (OrderedCancelAddCommMonoid.toCancelAddCommMonoid.{u1} (Multiset.{u1} α) (Multiset.orderedCancelAddCommMonoid.{u1} α)))))) (AddMonoid.toAddZeroClass.{0} Nat Nat.addMonoid)) => (Multiset.{u1} α) -> Nat) (AddMonoidHom.hasCoeToFun.{u1, 0} (Multiset.{u1} α) Nat (AddMonoid.toAddZeroClass.{u1} (Multiset.{u1} α) (AddRightCancelMonoid.toAddMonoid.{u1} (Multiset.{u1} α) (AddCancelMonoid.toAddRightCancelMonoid.{u1} (Multiset.{u1} α) (AddCancelCommMonoid.toAddCancelMonoid.{u1} (Multiset.{u1} α) (OrderedCancelAddCommMonoid.toCancelAddCommMonoid.{u1} (Multiset.{u1} α) (Multiset.orderedCancelAddCommMonoid.{u1} α)))))) (AddMonoid.toAddZeroClass.{0} Nat Nat.addMonoid)) (Multiset.card.{u1} α) (coeFn.{succ u1, succ u1} (AddEquiv.{u1, u1} (Finsupp.{u1, 0} α Nat Nat.hasZero) (Multiset.{u1} α) (Finsupp.add.{u1, 0} α Nat (AddMonoid.toAddZeroClass.{0} Nat Nat.addMonoid)) (Multiset.hasAdd.{u1} α)) (fun (_x : AddEquiv.{u1, u1} (Finsupp.{u1, 0} α Nat Nat.hasZero) (Multiset.{u1} α) (Finsupp.add.{u1, 0} α Nat (AddMonoid.toAddZeroClass.{0} Nat Nat.addMonoid)) (Multiset.hasAdd.{u1} α)) => (Finsupp.{u1, 0} α Nat Nat.hasZero) -> (Multiset.{u1} α)) (AddEquiv.hasCoeToFun.{u1, u1} (Finsupp.{u1, 0} α Nat Nat.hasZero) (Multiset.{u1} α) (Finsupp.add.{u1, 0} α Nat (AddMonoid.toAddZeroClass.{0} Nat Nat.addMonoid)) (Multiset.hasAdd.{u1} α)) (Finsupp.toMultiset.{u1} α) f)) (Finsupp.sum.{u1, 0, 0} α Nat Nat Nat.hasZero Nat.addCommMonoid f (fun (a : α) => id.{1} Nat))
but is expected to have type
forall {α : Type.{u1}} (f : Finsupp.{u1, 0} α Nat (LinearOrderedCommMonoidWithZero.toZero.{0} Nat Nat.linearOrderedCommMonoidWithZero)), Eq.{1} ((fun ([email protected]._hyg.403 : Multiset.{u1} α) => Nat) (FunLike.coe.{succ u1, succ u1, succ u1} (AddEquiv.{u1, u1} (Finsupp.{u1, 0} α Nat (LinearOrderedCommMonoidWithZero.toZero.{0} Nat Nat.linearOrderedCommMonoidWithZero)) (Multiset.{u1} α) (Finsupp.add.{u1, 0} α Nat (AddMonoid.toAddZeroClass.{0} Nat Nat.addMonoid)) (Multiset.instAddMultiset.{u1} α)) (Finsupp.{u1, 0} α Nat (LinearOrderedCommMonoidWithZero.toZero.{0} Nat Nat.linearOrderedCommMonoidWithZero)) (fun (a : Finsupp.{u1, 0} α Nat (LinearOrderedCommMonoidWithZero.toZero.{0} Nat Nat.linearOrderedCommMonoidWithZero)) => (fun ([email protected]._hyg.403 : Finsupp.{u1, 0} α Nat (LinearOrderedCommMonoidWithZero.toZero.{0} Nat Nat.linearOrderedCommMonoidWithZero)) => Multiset.{u1} α) a) (AddHomClass.toFunLike.{u1, u1, u1} (AddEquiv.{u1, u1} (Finsupp.{u1, 0} α Nat (LinearOrderedCommMonoidWithZero.toZero.{0} Nat Nat.linearOrderedCommMonoidWithZero)) (Multiset.{u1} α) (Finsupp.add.{u1, 0} α Nat (AddMonoid.toAddZeroClass.{0} Nat Nat.addMonoid)) (Multiset.instAddMultiset.{u1} α)) (Finsupp.{u1, 0} α Nat (LinearOrderedCommMonoidWithZero.toZero.{0} Nat Nat.linearOrderedCommMonoidWithZero)) (Multiset.{u1} α) (AddZeroClass.toAdd.{u1} (Finsupp.{u1, 0} α Nat (LinearOrderedCommMonoidWithZero.toZero.{0} Nat Nat.linearOrderedCommMonoidWithZero)) (Finsupp.addZeroClass.{u1, 0} α Nat (AddMonoid.toAddZeroClass.{0} Nat Nat.addMonoid))) (AddZeroClass.toAdd.{u1} (Multiset.{u1} α) (AddMonoid.toAddZeroClass.{u1} (Multiset.{u1} α) (AddRightCancelMonoid.toAddMonoid.{u1} (Multiset.{u1} α) (AddCancelMonoid.toAddRightCancelMonoid.{u1} (Multiset.{u1} α) (AddCancelCommMonoid.toAddCancelMonoid.{u1} (Multiset.{u1} α) (OrderedCancelAddCommMonoid.toCancelAddCommMonoid.{u1} (Multiset.{u1} α) (Multiset.instOrderedCancelAddCommMonoidMultiset.{u1} α))))))) (AddMonoidHomClass.toAddHomClass.{u1, u1, u1} (AddEquiv.{u1, u1} (Finsupp.{u1, 0} α Nat (LinearOrderedCommMonoidWithZero.toZero.{0} Nat Nat.linearOrderedCommMonoidWithZero)) (Multiset.{u1} α) (Finsupp.add.{u1, 0} α Nat (AddMonoid.toAddZeroClass.{0} Nat Nat.addMonoid)) (Multiset.instAddMultiset.{u1} α)) (Finsupp.{u1, 0} α Nat (LinearOrderedCommMonoidWithZero.toZero.{0} Nat Nat.linearOrderedCommMonoidWithZero)) (Multiset.{u1} α) (Finsupp.addZeroClass.{u1, 0} α Nat (AddMonoid.toAddZeroClass.{0} Nat Nat.addMonoid)) (AddMonoid.toAddZeroClass.{u1} (Multiset.{u1} α) (AddRightCancelMonoid.toAddMonoid.{u1} (Multiset.{u1} α) (AddCancelMonoid.toAddRightCancelMonoid.{u1} (Multiset.{u1} α) (AddCancelCommMonoid.toAddCancelMonoid.{u1} (Multiset.{u1} α) (OrderedCancelAddCommMonoid.toCancelAddCommMonoid.{u1} (Multiset.{u1} α) (Multiset.instOrderedCancelAddCommMonoidMultiset.{u1} α)))))) (AddEquivClass.instAddMonoidHomClass.{u1, u1, u1} (AddEquiv.{u1, u1} (Finsupp.{u1, 0} α Nat (LinearOrderedCommMonoidWithZero.toZero.{0} Nat Nat.linearOrderedCommMonoidWithZero)) (Multiset.{u1} α) (Finsupp.add.{u1, 0} α Nat (AddMonoid.toAddZeroClass.{0} Nat Nat.addMonoid)) (Multiset.instAddMultiset.{u1} α)) (Finsupp.{u1, 0} α Nat (LinearOrderedCommMonoidWithZero.toZero.{0} Nat Nat.linearOrderedCommMonoidWithZero)) (Multiset.{u1} α) (Finsupp.addZeroClass.{u1, 0} α Nat (AddMonoid.toAddZeroClass.{0} Nat Nat.addMonoid)) (AddMonoid.toAddZeroClass.{u1} (Multiset.{u1} α) (AddRightCancelMonoid.toAddMonoid.{u1} (Multiset.{u1} α) (AddCancelMonoid.toAddRightCancelMonoid.{u1} (Multiset.{u1} α) (AddCancelCommMonoid.toAddCancelMonoid.{u1} (Multiset.{u1} α) (OrderedCancelAddCommMonoid.toCancelAddCommMonoid.{u1} (Multiset.{u1} α) (Multiset.instOrderedCancelAddCommMonoidMultiset.{u1} α)))))) (AddEquiv.instAddEquivClassAddEquiv.{u1, u1} (Finsupp.{u1, 0} α Nat (LinearOrderedCommMonoidWithZero.toZero.{0} Nat Nat.linearOrderedCommMonoidWithZero)) (Multiset.{u1} α) (Finsupp.add.{u1, 0} α Nat (AddMonoid.toAddZeroClass.{0} Nat Nat.addMonoid)) (Multiset.instAddMultiset.{u1} α))))) (Finsupp.toMultiset.{u1} α) f)) (FunLike.coe.{succ u1, succ u1, 1} (AddMonoidHom.{u1, 0} (Multiset.{u1} α) Nat (AddMonoid.toAddZeroClass.{u1} (Multiset.{u1} α) (AddRightCancelMonoid.toAddMonoid.{u1} (Multiset.{u1} α) (AddCancelMonoid.toAddRightCancelMonoid.{u1} (Multiset.{u1} α) (AddCancelCommMonoid.toAddCancelMonoid.{u1} (Multiset.{u1} α) (OrderedCancelAddCommMonoid.toCancelAddCommMonoid.{u1} (Multiset.{u1} α) (Multiset.instOrderedCancelAddCommMonoidMultiset.{u1} α)))))) (AddMonoid.toAddZeroClass.{0} Nat Nat.addMonoid)) (Multiset.{u1} α) (fun (_x : Multiset.{u1} α) => (fun ([email protected]._hyg.403 : Multiset.{u1} α) => Nat) _x) (AddHomClass.toFunLike.{u1, u1, 0} (AddMonoidHom.{u1, 0} (Multiset.{u1} α) Nat (AddMonoid.toAddZeroClass.{u1} (Multiset.{u1} α) (AddRightCancelMonoid.toAddMonoid.{u1} (Multiset.{u1} α) (AddCancelMonoid.toAddRightCancelMonoid.{u1} (Multiset.{u1} α) (AddCancelCommMonoid.toAddCancelMonoid.{u1} (Multiset.{u1} α) (OrderedCancelAddCommMonoid.toCancelAddCommMonoid.{u1} (Multiset.{u1} α) (Multiset.instOrderedCancelAddCommMonoidMultiset.{u1} α)))))) (AddMonoid.toAddZeroClass.{0} Nat Nat.addMonoid)) (Multiset.{u1} α) Nat (AddZeroClass.toAdd.{u1} (Multiset.{u1} α) (AddMonoid.toAddZeroClass.{u1} (Multiset.{u1} α) (AddRightCancelMonoid.toAddMonoid.{u1} (Multiset.{u1} α) (AddCancelMonoid.toAddRightCancelMonoid.{u1} (Multiset.{u1} α) (AddCancelCommMonoid.toAddCancelMonoid.{u1} (Multiset.{u1} α) (OrderedCancelAddCommMonoid.toCancelAddCommMonoid.{u1} (Multiset.{u1} α) (Multiset.instOrderedCancelAddCommMonoidMultiset.{u1} α))))))) (AddZeroClass.toAdd.{0} Nat (AddMonoid.toAddZeroClass.{0} Nat Nat.addMonoid)) (AddMonoidHomClass.toAddHomClass.{u1, u1, 0} (AddMonoidHom.{u1, 0} (Multiset.{u1} α) Nat (AddMonoid.toAddZeroClass.{u1} (Multiset.{u1} α) (AddRightCancelMonoid.toAddMonoid.{u1} (Multiset.{u1} α) (AddCancelMonoid.toAddRightCancelMonoid.{u1} (Multiset.{u1} α) (AddCancelCommMonoid.toAddCancelMonoid.{u1} (Multiset.{u1} α) (OrderedCancelAddCommMonoid.toCancelAddCommMonoid.{u1} (Multiset.{u1} α) (Multiset.instOrderedCancelAddCommMonoidMultiset.{u1} α)))))) (AddMonoid.toAddZeroClass.{0} Nat Nat.addMonoid)) (Multiset.{u1} α) Nat (AddMonoid.toAddZeroClass.{u1} (Multiset.{u1} α) (AddRightCancelMonoid.toAddMonoid.{u1} (Multiset.{u1} α) (AddCancelMonoid.toAddRightCancelMonoid.{u1} (Multiset.{u1} α) (AddCancelCommMonoid.toAddCancelMonoid.{u1} (Multiset.{u1} α) (OrderedCancelAddCommMonoid.toCancelAddCommMonoid.{u1} (Multiset.{u1} α) (Multiset.instOrderedCancelAddCommMonoidMultiset.{u1} α)))))) (AddMonoid.toAddZeroClass.{0} Nat Nat.addMonoid) (AddMonoidHom.addMonoidHomClass.{u1, 0} (Multiset.{u1} α) Nat (AddMonoid.toAddZeroClass.{u1} (Multiset.{u1} α) (AddRightCancelMonoid.toAddMonoid.{u1} (Multiset.{u1} α) (AddCancelMonoid.toAddRightCancelMonoid.{u1} (Multiset.{u1} α) (AddCancelCommMonoid.toAddCancelMonoid.{u1} (Multiset.{u1} α) (OrderedCancelAddCommMonoid.toCancelAddCommMonoid.{u1} (Multiset.{u1} α) (Multiset.instOrderedCancelAddCommMonoidMultiset.{u1} α)))))) (AddMonoid.toAddZeroClass.{0} Nat Nat.addMonoid)))) (Multiset.card.{u1} α) (FunLike.coe.{succ u1, succ u1, succ u1} (AddEquiv.{u1, u1} (Finsupp.{u1, 0} α Nat (LinearOrderedCommMonoidWithZero.toZero.{0} Nat Nat.linearOrderedCommMonoidWithZero)) (Multiset.{u1} α) (Finsupp.add.{u1, 0} α Nat (AddMonoid.toAddZeroClass.{0} Nat Nat.addMonoid)) (Multiset.instAddMultiset.{u1} α)) (Finsupp.{u1, 0} α Nat (LinearOrderedCommMonoidWithZero.toZero.{0} Nat Nat.linearOrderedCommMonoidWithZero)) (fun (_x : Finsupp.{u1, 0} α Nat (LinearOrderedCommMonoidWithZero.toZero.{0} Nat Nat.linearOrderedCommMonoidWithZero)) => (fun ([email protected]._hyg.403 : Finsupp.{u1, 0} α Nat (LinearOrderedCommMonoidWithZero.toZero.{0} Nat Nat.linearOrderedCommMonoidWithZero)) => Multiset.{u1} α) _x) (AddHomClass.toFunLike.{u1, u1, u1} (AddEquiv.{u1, u1} (Finsupp.{u1, 0} α Nat (LinearOrderedCommMonoidWithZero.toZero.{0} Nat Nat.linearOrderedCommMonoidWithZero)) (Multiset.{u1} α) (Finsupp.add.{u1, 0} α Nat (AddMonoid.toAddZeroClass.{0} Nat Nat.addMonoid)) (Multiset.instAddMultiset.{u1} α)) (Finsupp.{u1, 0} α Nat (LinearOrderedCommMonoidWithZero.toZero.{0} Nat Nat.linearOrderedCommMonoidWithZero)) (Multiset.{u1} α) (AddZeroClass.toAdd.{u1} (Finsupp.{u1, 0} α Nat (LinearOrderedCommMonoidWithZero.toZero.{0} Nat Nat.linearOrderedCommMonoidWithZero)) (Finsupp.addZeroClass.{u1, 0} α Nat (AddMonoid.toAddZeroClass.{0} Nat Nat.addMonoid))) (AddZeroClass.toAdd.{u1} (Multiset.{u1} α) (AddMonoid.toAddZeroClass.{u1} (Multiset.{u1} α) (AddRightCancelMonoid.toAddMonoid.{u1} (Multiset.{u1} α) (AddCancelMonoid.toAddRightCancelMonoid.{u1} (Multiset.{u1} α) (AddCancelCommMonoid.toAddCancelMonoid.{u1} (Multiset.{u1} α) (OrderedCancelAddCommMonoid.toCancelAddCommMonoid.{u1} (Multiset.{u1} α) (Multiset.instOrderedCancelAddCommMonoidMultiset.{u1} α))))))) (AddMonoidHomClass.toAddHomClass.{u1, u1, u1} (AddEquiv.{u1, u1} (Finsupp.{u1, 0} α Nat (LinearOrderedCommMonoidWithZero.toZero.{0} Nat Nat.linearOrderedCommMonoidWithZero)) (Multiset.{u1} α) (Finsupp.add.{u1, 0} α Nat (AddMonoid.toAddZeroClass.{0} Nat Nat.addMonoid)) (Multiset.instAddMultiset.{u1} α)) (Finsupp.{u1, 0} α Nat (LinearOrderedCommMonoidWithZero.toZero.{0} Nat Nat.linearOrderedCommMonoidWithZero)) (Multiset.{u1} α) (Finsupp.addZeroClass.{u1, 0} α Nat (AddMonoid.toAddZeroClass.{0} Nat Nat.addMonoid)) (AddMonoid.toAddZeroClass.{u1} (Multiset.{u1} α) (AddRightCancelMonoid.toAddMonoid.{u1} (Multiset.{u1} α) (AddCancelMonoid.toAddRightCancelMonoid.{u1} (Multiset.{u1} α) (AddCancelCommMonoid.toAddCancelMonoid.{u1} (Multiset.{u1} α) (OrderedCancelAddCommMonoid.toCancelAddCommMonoid.{u1} (Multiset.{u1} α) (Multiset.instOrderedCancelAddCommMonoidMultiset.{u1} α)))))) (AddEquivClass.instAddMonoidHomClass.{u1, u1, u1} (AddEquiv.{u1, u1} (Finsupp.{u1, 0} α Nat (LinearOrderedCommMonoidWithZero.toZero.{0} Nat Nat.linearOrderedCommMonoidWithZero)) (Multiset.{u1} α) (Finsupp.add.{u1, 0} α Nat (AddMonoid.toAddZeroClass.{0} Nat Nat.addMonoid)) (Multiset.instAddMultiset.{u1} α)) (Finsupp.{u1, 0} α Nat (LinearOrderedCommMonoidWithZero.toZero.{0} Nat Nat.linearOrderedCommMonoidWithZero)) (Multiset.{u1} α) (Finsupp.addZeroClass.{u1, 0} α Nat (AddMonoid.toAddZeroClass.{0} Nat Nat.addMonoid)) (AddMonoid.toAddZeroClass.{u1} (Multiset.{u1} α) (AddRightCancelMonoid.toAddMonoid.{u1} (Multiset.{u1} α) (AddCancelMonoid.toAddRightCancelMonoid.{u1} (Multiset.{u1} α) (AddCancelCommMonoid.toAddCancelMonoid.{u1} (Multiset.{u1} α) (OrderedCancelAddCommMonoid.toCancelAddCommMonoid.{u1} (Multiset.{u1} α) (Multiset.instOrderedCancelAddCommMonoidMultiset.{u1} α)))))) (AddEquiv.instAddEquivClassAddEquiv.{u1, u1} (Finsupp.{u1, 0} α Nat (LinearOrderedCommMonoidWithZero.toZero.{0} Nat Nat.linearOrderedCommMonoidWithZero)) (Multiset.{u1} α) (Finsupp.add.{u1, 0} α Nat (AddMonoid.toAddZeroClass.{0} Nat Nat.addMonoid)) (Multiset.instAddMultiset.{u1} α))))) (Finsupp.toMultiset.{u1} α) f)) (Finsupp.sum.{u1, 0, 0} α Nat Nat (LinearOrderedCommMonoidWithZero.toZero.{0} Nat Nat.linearOrderedCommMonoidWithZero) Nat.addCommMonoid f (fun (a : α) => id.{1} Nat))
Case conversion may be inaccurate. Consider using '#align finsupp.card_to_multiset Finsupp.card_toMultisetₓ'. -/
theorem card_toMultiset (f : α →₀ ℕ) : f.toMultiset.card = f.Sum fun a => id := by
simp [to_multiset_apply, AddMonoidHom.map_finsupp_sum, Function.id_def]
#align finsupp.card_to_multiset Finsupp.card_toMultiset
/- warning: finsupp.to_multiset_map -> Finsupp.toMultiset_map is a dubious translation:
lean 3 declaration is
forall {α : Type.{u1}} {β : Type.{u2}} (f : Finsupp.{u1, 0} α Nat Nat.hasZero) (g : α -> β), Eq.{succ u2} (Multiset.{u2} β) (Multiset.map.{u1, u2} α β g (coeFn.{succ u1, succ u1} (AddEquiv.{u1, u1} (Finsupp.{u1, 0} α Nat Nat.hasZero) (Multiset.{u1} α) (Finsupp.add.{u1, 0} α Nat (AddMonoid.toAddZeroClass.{0} Nat Nat.addMonoid)) (Multiset.hasAdd.{u1} α)) (fun (_x : AddEquiv.{u1, u1} (Finsupp.{u1, 0} α Nat Nat.hasZero) (Multiset.{u1} α) (Finsupp.add.{u1, 0} α Nat (AddMonoid.toAddZeroClass.{0} Nat Nat.addMonoid)) (Multiset.hasAdd.{u1} α)) => (Finsupp.{u1, 0} α Nat Nat.hasZero) -> (Multiset.{u1} α)) (AddEquiv.hasCoeToFun.{u1, u1} (Finsupp.{u1, 0} α Nat Nat.hasZero) (Multiset.{u1} α) (Finsupp.add.{u1, 0} α Nat (AddMonoid.toAddZeroClass.{0} Nat Nat.addMonoid)) (Multiset.hasAdd.{u1} α)) (Finsupp.toMultiset.{u1} α) f)) (coeFn.{succ u2, succ u2} (AddEquiv.{u2, u2} (Finsupp.{u2, 0} β Nat Nat.hasZero) (Multiset.{u2} β) (Finsupp.add.{u2, 0} β Nat (AddMonoid.toAddZeroClass.{0} Nat Nat.addMonoid)) (Multiset.hasAdd.{u2} β)) (fun (_x : AddEquiv.{u2, u2} (Finsupp.{u2, 0} β Nat Nat.hasZero) (Multiset.{u2} β) (Finsupp.add.{u2, 0} β Nat (AddMonoid.toAddZeroClass.{0} Nat Nat.addMonoid)) (Multiset.hasAdd.{u2} β)) => (Finsupp.{u2, 0} β Nat Nat.hasZero) -> (Multiset.{u2} β)) (AddEquiv.hasCoeToFun.{u2, u2} (Finsupp.{u2, 0} β Nat Nat.hasZero) (Multiset.{u2} β) (Finsupp.add.{u2, 0} β Nat (AddMonoid.toAddZeroClass.{0} Nat Nat.addMonoid)) (Multiset.hasAdd.{u2} β)) (Finsupp.toMultiset.{u2} β) (Finsupp.mapDomain.{u1, u2, 0} α β Nat Nat.addCommMonoid g f))
but is expected to have type
forall {α : Type.{u2}} {β : Type.{u1}} (f : Finsupp.{u2, 0} α Nat (LinearOrderedCommMonoidWithZero.toZero.{0} Nat Nat.linearOrderedCommMonoidWithZero)) (g : α -> β), Eq.{succ u1} (Multiset.{u1} β) (Multiset.map.{u2, u1} α β g (FunLike.coe.{succ u2, succ u2, succ u2} (AddEquiv.{u2, u2} (Finsupp.{u2, 0} α Nat (LinearOrderedCommMonoidWithZero.toZero.{0} Nat Nat.linearOrderedCommMonoidWithZero)) (Multiset.{u2} α) (Finsupp.add.{u2, 0} α Nat (AddMonoid.toAddZeroClass.{0} Nat Nat.addMonoid)) (Multiset.instAddMultiset.{u2} α)) (Finsupp.{u2, 0} α Nat (LinearOrderedCommMonoidWithZero.toZero.{0} Nat Nat.linearOrderedCommMonoidWithZero)) (fun (_x : Finsupp.{u2, 0} α Nat (LinearOrderedCommMonoidWithZero.toZero.{0} Nat Nat.linearOrderedCommMonoidWithZero)) => (fun ([email protected]._hyg.403 : Finsupp.{u2, 0} α Nat (LinearOrderedCommMonoidWithZero.toZero.{0} Nat Nat.linearOrderedCommMonoidWithZero)) => Multiset.{u2} α) _x) (AddHomClass.toFunLike.{u2, u2, u2} (AddEquiv.{u2, u2} (Finsupp.{u2, 0} α Nat (LinearOrderedCommMonoidWithZero.toZero.{0} Nat Nat.linearOrderedCommMonoidWithZero)) (Multiset.{u2} α) (Finsupp.add.{u2, 0} α Nat (AddMonoid.toAddZeroClass.{0} Nat Nat.addMonoid)) (Multiset.instAddMultiset.{u2} α)) (Finsupp.{u2, 0} α Nat (LinearOrderedCommMonoidWithZero.toZero.{0} Nat Nat.linearOrderedCommMonoidWithZero)) (Multiset.{u2} α) (AddZeroClass.toAdd.{u2} (Finsupp.{u2, 0} α Nat (LinearOrderedCommMonoidWithZero.toZero.{0} Nat Nat.linearOrderedCommMonoidWithZero)) (Finsupp.addZeroClass.{u2, 0} α Nat (AddMonoid.toAddZeroClass.{0} Nat Nat.addMonoid))) (AddZeroClass.toAdd.{u2} (Multiset.{u2} α) (AddMonoid.toAddZeroClass.{u2} (Multiset.{u2} α) (AddRightCancelMonoid.toAddMonoid.{u2} (Multiset.{u2} α) (AddCancelMonoid.toAddRightCancelMonoid.{u2} (Multiset.{u2} α) (AddCancelCommMonoid.toAddCancelMonoid.{u2} (Multiset.{u2} α) (OrderedCancelAddCommMonoid.toCancelAddCommMonoid.{u2} (Multiset.{u2} α) (Multiset.instOrderedCancelAddCommMonoidMultiset.{u2} α))))))) (AddMonoidHomClass.toAddHomClass.{u2, u2, u2} (AddEquiv.{u2, u2} (Finsupp.{u2, 0} α Nat (LinearOrderedCommMonoidWithZero.toZero.{0} Nat Nat.linearOrderedCommMonoidWithZero)) (Multiset.{u2} α) (Finsupp.add.{u2, 0} α Nat (AddMonoid.toAddZeroClass.{0} Nat Nat.addMonoid)) (Multiset.instAddMultiset.{u2} α)) (Finsupp.{u2, 0} α Nat (LinearOrderedCommMonoidWithZero.toZero.{0} Nat Nat.linearOrderedCommMonoidWithZero)) (Multiset.{u2} α) (Finsupp.addZeroClass.{u2, 0} α Nat (AddMonoid.toAddZeroClass.{0} Nat Nat.addMonoid)) (AddMonoid.toAddZeroClass.{u2} (Multiset.{u2} α) (AddRightCancelMonoid.toAddMonoid.{u2} (Multiset.{u2} α) (AddCancelMonoid.toAddRightCancelMonoid.{u2} (Multiset.{u2} α) (AddCancelCommMonoid.toAddCancelMonoid.{u2} (Multiset.{u2} α) (OrderedCancelAddCommMonoid.toCancelAddCommMonoid.{u2} (Multiset.{u2} α) (Multiset.instOrderedCancelAddCommMonoidMultiset.{u2} α)))))) (AddEquivClass.instAddMonoidHomClass.{u2, u2, u2} (AddEquiv.{u2, u2} (Finsupp.{u2, 0} α Nat (LinearOrderedCommMonoidWithZero.toZero.{0} Nat Nat.linearOrderedCommMonoidWithZero)) (Multiset.{u2} α) (Finsupp.add.{u2, 0} α Nat (AddMonoid.toAddZeroClass.{0} Nat Nat.addMonoid)) (Multiset.instAddMultiset.{u2} α)) (Finsupp.{u2, 0} α Nat (LinearOrderedCommMonoidWithZero.toZero.{0} Nat Nat.linearOrderedCommMonoidWithZero)) (Multiset.{u2} α) (Finsupp.addZeroClass.{u2, 0} α Nat (AddMonoid.toAddZeroClass.{0} Nat Nat.addMonoid)) (AddMonoid.toAddZeroClass.{u2} (Multiset.{u2} α) (AddRightCancelMonoid.toAddMonoid.{u2} (Multiset.{u2} α) (AddCancelMonoid.toAddRightCancelMonoid.{u2} (Multiset.{u2} α) (AddCancelCommMonoid.toAddCancelMonoid.{u2} (Multiset.{u2} α) (OrderedCancelAddCommMonoid.toCancelAddCommMonoid.{u2} (Multiset.{u2} α) (Multiset.instOrderedCancelAddCommMonoidMultiset.{u2} α)))))) (AddEquiv.instAddEquivClassAddEquiv.{u2, u2} (Finsupp.{u2, 0} α Nat (LinearOrderedCommMonoidWithZero.toZero.{0} Nat Nat.linearOrderedCommMonoidWithZero)) (Multiset.{u2} α) (Finsupp.add.{u2, 0} α Nat (AddMonoid.toAddZeroClass.{0} Nat Nat.addMonoid)) (Multiset.instAddMultiset.{u2} α))))) (Finsupp.toMultiset.{u2} α) f)) (FunLike.coe.{succ u1, succ u1, succ u1} (AddEquiv.{u1, u1} (Finsupp.{u1, 0} β Nat (LinearOrderedCommMonoidWithZero.toZero.{0} Nat Nat.linearOrderedCommMonoidWithZero)) (Multiset.{u1} β) (Finsupp.add.{u1, 0} β Nat (AddMonoid.toAddZeroClass.{0} Nat Nat.addMonoid)) (Multiset.instAddMultiset.{u1} β)) (Finsupp.{u1, 0} β Nat (LinearOrderedCommMonoidWithZero.toZero.{0} Nat Nat.linearOrderedCommMonoidWithZero)) (fun (_x : Finsupp.{u1, 0} β Nat (LinearOrderedCommMonoidWithZero.toZero.{0} Nat Nat.linearOrderedCommMonoidWithZero)) => (fun ([email protected]._hyg.403 : Finsupp.{u1, 0} β Nat (LinearOrderedCommMonoidWithZero.toZero.{0} Nat Nat.linearOrderedCommMonoidWithZero)) => Multiset.{u1} β) _x) (AddHomClass.toFunLike.{u1, u1, u1} (AddEquiv.{u1, u1} (Finsupp.{u1, 0} β Nat (LinearOrderedCommMonoidWithZero.toZero.{0} Nat Nat.linearOrderedCommMonoidWithZero)) (Multiset.{u1} β) (Finsupp.add.{u1, 0} β Nat (AddMonoid.toAddZeroClass.{0} Nat Nat.addMonoid)) (Multiset.instAddMultiset.{u1} β)) (Finsupp.{u1, 0} β Nat (LinearOrderedCommMonoidWithZero.toZero.{0} Nat Nat.linearOrderedCommMonoidWithZero)) (Multiset.{u1} β) (AddZeroClass.toAdd.{u1} (Finsupp.{u1, 0} β Nat (LinearOrderedCommMonoidWithZero.toZero.{0} Nat Nat.linearOrderedCommMonoidWithZero)) (Finsupp.addZeroClass.{u1, 0} β Nat (AddMonoid.toAddZeroClass.{0} Nat Nat.addMonoid))) (AddZeroClass.toAdd.{u1} (Multiset.{u1} β) (AddMonoid.toAddZeroClass.{u1} (Multiset.{u1} β) (AddRightCancelMonoid.toAddMonoid.{u1} (Multiset.{u1} β) (AddCancelMonoid.toAddRightCancelMonoid.{u1} (Multiset.{u1} β) (AddCancelCommMonoid.toAddCancelMonoid.{u1} (Multiset.{u1} β) (OrderedCancelAddCommMonoid.toCancelAddCommMonoid.{u1} (Multiset.{u1} β) (Multiset.instOrderedCancelAddCommMonoidMultiset.{u1} β))))))) (AddMonoidHomClass.toAddHomClass.{u1, u1, u1} (AddEquiv.{u1, u1} (Finsupp.{u1, 0} β Nat (LinearOrderedCommMonoidWithZero.toZero.{0} Nat Nat.linearOrderedCommMonoidWithZero)) (Multiset.{u1} β) (Finsupp.add.{u1, 0} β Nat (AddMonoid.toAddZeroClass.{0} Nat Nat.addMonoid)) (Multiset.instAddMultiset.{u1} β)) (Finsupp.{u1, 0} β Nat (LinearOrderedCommMonoidWithZero.toZero.{0} Nat Nat.linearOrderedCommMonoidWithZero)) (Multiset.{u1} β) (Finsupp.addZeroClass.{u1, 0} β Nat (AddMonoid.toAddZeroClass.{0} Nat Nat.addMonoid)) (AddMonoid.toAddZeroClass.{u1} (Multiset.{u1} β) (AddRightCancelMonoid.toAddMonoid.{u1} (Multiset.{u1} β) (AddCancelMonoid.toAddRightCancelMonoid.{u1} (Multiset.{u1} β) (AddCancelCommMonoid.toAddCancelMonoid.{u1} (Multiset.{u1} β) (OrderedCancelAddCommMonoid.toCancelAddCommMonoid.{u1} (Multiset.{u1} β) (Multiset.instOrderedCancelAddCommMonoidMultiset.{u1} β)))))) (AddEquivClass.instAddMonoidHomClass.{u1, u1, u1} (AddEquiv.{u1, u1} (Finsupp.{u1, 0} β Nat (LinearOrderedCommMonoidWithZero.toZero.{0} Nat Nat.linearOrderedCommMonoidWithZero)) (Multiset.{u1} β) (Finsupp.add.{u1, 0} β Nat (AddMonoid.toAddZeroClass.{0} Nat Nat.addMonoid)) (Multiset.instAddMultiset.{u1} β)) (Finsupp.{u1, 0} β Nat (LinearOrderedCommMonoidWithZero.toZero.{0} Nat Nat.linearOrderedCommMonoidWithZero)) (Multiset.{u1} β) (Finsupp.addZeroClass.{u1, 0} β Nat (AddMonoid.toAddZeroClass.{0} Nat Nat.addMonoid)) (AddMonoid.toAddZeroClass.{u1} (Multiset.{u1} β) (AddRightCancelMonoid.toAddMonoid.{u1} (Multiset.{u1} β) (AddCancelMonoid.toAddRightCancelMonoid.{u1} (Multiset.{u1} β) (AddCancelCommMonoid.toAddCancelMonoid.{u1} (Multiset.{u1} β) (OrderedCancelAddCommMonoid.toCancelAddCommMonoid.{u1} (Multiset.{u1} β) (Multiset.instOrderedCancelAddCommMonoidMultiset.{u1} β)))))) (AddEquiv.instAddEquivClassAddEquiv.{u1, u1} (Finsupp.{u1, 0} β Nat (LinearOrderedCommMonoidWithZero.toZero.{0} Nat Nat.linearOrderedCommMonoidWithZero)) (Multiset.{u1} β) (Finsupp.add.{u1, 0} β Nat (AddMonoid.toAddZeroClass.{0} Nat Nat.addMonoid)) (Multiset.instAddMultiset.{u1} β))))) (Finsupp.toMultiset.{u1} β) (Finsupp.mapDomain.{u2, u1, 0} α β Nat Nat.addCommMonoid g f))
Case conversion may be inaccurate. Consider using '#align finsupp.to_multiset_map Finsupp.toMultiset_mapₓ'. -/
theorem toMultiset_map (f : α →₀ ℕ) (g : α → β) : f.toMultiset.map g = (f.mapDomain g).toMultiset :=
by
refine' f.induction _ _
· rw [to_multiset_zero, Multiset.map_zero, map_domain_zero, to_multiset_zero]
· intro a n f _ _ ih
rw [to_multiset_add, Multiset.map_add, ih, map_domain_add, map_domain_single,
to_multiset_single, to_multiset_add, to_multiset_single, ← Multiset.coe_mapAddMonoidHom,
(Multiset.mapAddMonoidHom g).map_nsmul]
rfl
#align finsupp.to_multiset_map Finsupp.toMultiset_map
/- warning: finsupp.prod_to_multiset -> Finsupp.prod_toMultiset is a dubious translation:
lean 3 declaration is
forall {α : Type.{u1}} [_inst_1 : CommMonoid.{u1} α] (f : Finsupp.{u1, 0} α Nat Nat.hasZero), Eq.{succ u1} α (Multiset.prod.{u1} α _inst_1 (coeFn.{succ u1, succ u1} (AddEquiv.{u1, u1} (Finsupp.{u1, 0} α Nat Nat.hasZero) (Multiset.{u1} α) (Finsupp.add.{u1, 0} α Nat (AddMonoid.toAddZeroClass.{0} Nat Nat.addMonoid)) (Multiset.hasAdd.{u1} α)) (fun (_x : AddEquiv.{u1, u1} (Finsupp.{u1, 0} α Nat Nat.hasZero) (Multiset.{u1} α) (Finsupp.add.{u1, 0} α Nat (AddMonoid.toAddZeroClass.{0} Nat Nat.addMonoid)) (Multiset.hasAdd.{u1} α)) => (Finsupp.{u1, 0} α Nat Nat.hasZero) -> (Multiset.{u1} α)) (AddEquiv.hasCoeToFun.{u1, u1} (Finsupp.{u1, 0} α Nat Nat.hasZero) (Multiset.{u1} α) (Finsupp.add.{u1, 0} α Nat (AddMonoid.toAddZeroClass.{0} Nat Nat.addMonoid)) (Multiset.hasAdd.{u1} α)) (Finsupp.toMultiset.{u1} α) f)) (Finsupp.prod.{u1, 0, u1} α Nat α Nat.hasZero _inst_1 f (fun (a : α) (n : Nat) => HPow.hPow.{u1, 0, u1} α Nat α (instHPow.{u1, 0} α Nat (Monoid.Pow.{u1} α (CommMonoid.toMonoid.{u1} α _inst_1))) a n))
but is expected to have type
forall {α : Type.{u1}} [_inst_1 : CommMonoid.{u1} α] (f : Finsupp.{u1, 0} α Nat (LinearOrderedCommMonoidWithZero.toZero.{0} Nat Nat.linearOrderedCommMonoidWithZero)), Eq.{succ u1} α (Multiset.prod.{u1} α _inst_1 (FunLike.coe.{succ u1, succ u1, succ u1} (AddEquiv.{u1, u1} (Finsupp.{u1, 0} α Nat (LinearOrderedCommMonoidWithZero.toZero.{0} Nat Nat.linearOrderedCommMonoidWithZero)) (Multiset.{u1} α) (Finsupp.add.{u1, 0} α Nat (AddMonoid.toAddZeroClass.{0} Nat Nat.addMonoid)) (Multiset.instAddMultiset.{u1} α)) (Finsupp.{u1, 0} α Nat (LinearOrderedCommMonoidWithZero.toZero.{0} Nat Nat.linearOrderedCommMonoidWithZero)) (fun (_x : Finsupp.{u1, 0} α Nat (LinearOrderedCommMonoidWithZero.toZero.{0} Nat Nat.linearOrderedCommMonoidWithZero)) => (fun ([email protected]._hyg.403 : Finsupp.{u1, 0} α Nat (LinearOrderedCommMonoidWithZero.toZero.{0} Nat Nat.linearOrderedCommMonoidWithZero)) => Multiset.{u1} α) _x) (AddHomClass.toFunLike.{u1, u1, u1} (AddEquiv.{u1, u1} (Finsupp.{u1, 0} α Nat (LinearOrderedCommMonoidWithZero.toZero.{0} Nat Nat.linearOrderedCommMonoidWithZero)) (Multiset.{u1} α) (Finsupp.add.{u1, 0} α Nat (AddMonoid.toAddZeroClass.{0} Nat Nat.addMonoid)) (Multiset.instAddMultiset.{u1} α)) (Finsupp.{u1, 0} α Nat (LinearOrderedCommMonoidWithZero.toZero.{0} Nat Nat.linearOrderedCommMonoidWithZero)) (Multiset.{u1} α) (AddZeroClass.toAdd.{u1} (Finsupp.{u1, 0} α Nat (LinearOrderedCommMonoidWithZero.toZero.{0} Nat Nat.linearOrderedCommMonoidWithZero)) (Finsupp.addZeroClass.{u1, 0} α Nat (AddMonoid.toAddZeroClass.{0} Nat Nat.addMonoid))) (AddZeroClass.toAdd.{u1} (Multiset.{u1} α) (AddMonoid.toAddZeroClass.{u1} (Multiset.{u1} α) (AddRightCancelMonoid.toAddMonoid.{u1} (Multiset.{u1} α) (AddCancelMonoid.toAddRightCancelMonoid.{u1} (Multiset.{u1} α) (AddCancelCommMonoid.toAddCancelMonoid.{u1} (Multiset.{u1} α) (OrderedCancelAddCommMonoid.toCancelAddCommMonoid.{u1} (Multiset.{u1} α) (Multiset.instOrderedCancelAddCommMonoidMultiset.{u1} α))))))) (AddMonoidHomClass.toAddHomClass.{u1, u1, u1} (AddEquiv.{u1, u1} (Finsupp.{u1, 0} α Nat (LinearOrderedCommMonoidWithZero.toZero.{0} Nat Nat.linearOrderedCommMonoidWithZero)) (Multiset.{u1} α) (Finsupp.add.{u1, 0} α Nat (AddMonoid.toAddZeroClass.{0} Nat Nat.addMonoid)) (Multiset.instAddMultiset.{u1} α)) (Finsupp.{u1, 0} α Nat (LinearOrderedCommMonoidWithZero.toZero.{0} Nat Nat.linearOrderedCommMonoidWithZero)) (Multiset.{u1} α) (Finsupp.addZeroClass.{u1, 0} α Nat (AddMonoid.toAddZeroClass.{0} Nat Nat.addMonoid)) (AddMonoid.toAddZeroClass.{u1} (Multiset.{u1} α) (AddRightCancelMonoid.toAddMonoid.{u1} (Multiset.{u1} α) (AddCancelMonoid.toAddRightCancelMonoid.{u1} (Multiset.{u1} α) (AddCancelCommMonoid.toAddCancelMonoid.{u1} (Multiset.{u1} α) (OrderedCancelAddCommMonoid.toCancelAddCommMonoid.{u1} (Multiset.{u1} α) (Multiset.instOrderedCancelAddCommMonoidMultiset.{u1} α)))))) (AddEquivClass.instAddMonoidHomClass.{u1, u1, u1} (AddEquiv.{u1, u1} (Finsupp.{u1, 0} α Nat (LinearOrderedCommMonoidWithZero.toZero.{0} Nat Nat.linearOrderedCommMonoidWithZero)) (Multiset.{u1} α) (Finsupp.add.{u1, 0} α Nat (AddMonoid.toAddZeroClass.{0} Nat Nat.addMonoid)) (Multiset.instAddMultiset.{u1} α)) (Finsupp.{u1, 0} α Nat (LinearOrderedCommMonoidWithZero.toZero.{0} Nat Nat.linearOrderedCommMonoidWithZero)) (Multiset.{u1} α) (Finsupp.addZeroClass.{u1, 0} α Nat (AddMonoid.toAddZeroClass.{0} Nat Nat.addMonoid)) (AddMonoid.toAddZeroClass.{u1} (Multiset.{u1} α) (AddRightCancelMonoid.toAddMonoid.{u1} (Multiset.{u1} α) (AddCancelMonoid.toAddRightCancelMonoid.{u1} (Multiset.{u1} α) (AddCancelCommMonoid.toAddCancelMonoid.{u1} (Multiset.{u1} α) (OrderedCancelAddCommMonoid.toCancelAddCommMonoid.{u1} (Multiset.{u1} α) (Multiset.instOrderedCancelAddCommMonoidMultiset.{u1} α)))))) (AddEquiv.instAddEquivClassAddEquiv.{u1, u1} (Finsupp.{u1, 0} α Nat (LinearOrderedCommMonoidWithZero.toZero.{0} Nat Nat.linearOrderedCommMonoidWithZero)) (Multiset.{u1} α) (Finsupp.add.{u1, 0} α Nat (AddMonoid.toAddZeroClass.{0} Nat Nat.addMonoid)) (Multiset.instAddMultiset.{u1} α))))) (Finsupp.toMultiset.{u1} α) f)) (Finsupp.prod.{u1, 0, u1} α Nat α (LinearOrderedCommMonoidWithZero.toZero.{0} Nat Nat.linearOrderedCommMonoidWithZero) _inst_1 f (fun (a : α) (n : Nat) => HPow.hPow.{u1, 0, u1} α Nat α (instHPow.{u1, 0} α Nat (Monoid.Pow.{u1} α (CommMonoid.toMonoid.{u1} α _inst_1))) a n))
Case conversion may be inaccurate. Consider using '#align finsupp.prod_to_multiset Finsupp.prod_toMultisetₓ'. -/
@[simp]
theorem prod_toMultiset [CommMonoid α] (f : α →₀ ℕ) : f.toMultiset.Prod = f.Prod fun a n => a ^ n :=
by
refine' f.induction _ _
· rw [to_multiset_zero, Multiset.prod_zero, Finsupp.prod_zero_index]
· intro a n f _ _ ih
rw [to_multiset_add, Multiset.prod_add, ih, to_multiset_single, Multiset.prod_nsmul,
Finsupp.prod_add_index' pow_zero pow_add, Finsupp.prod_single_index, Multiset.prod_singleton]
· exact pow_zero a
#align finsupp.prod_to_multiset Finsupp.prod_toMultiset
/- warning: finsupp.to_finset_to_multiset -> Finsupp.toFinset_toMultiset is a dubious translation:
lean 3 declaration is
forall {α : Type.{u1}} [_inst_1 : DecidableEq.{succ u1} α] (f : Finsupp.{u1, 0} α Nat Nat.hasZero), Eq.{succ u1} (Finset.{u1} α) (Multiset.toFinset.{u1} α (fun (a : α) (b : α) => _inst_1 a b) (coeFn.{succ u1, succ u1} (AddEquiv.{u1, u1} (Finsupp.{u1, 0} α Nat Nat.hasZero) (Multiset.{u1} α) (Finsupp.add.{u1, 0} α Nat (AddMonoid.toAddZeroClass.{0} Nat Nat.addMonoid)) (Multiset.hasAdd.{u1} α)) (fun (_x : AddEquiv.{u1, u1} (Finsupp.{u1, 0} α Nat Nat.hasZero) (Multiset.{u1} α) (Finsupp.add.{u1, 0} α Nat (AddMonoid.toAddZeroClass.{0} Nat Nat.addMonoid)) (Multiset.hasAdd.{u1} α)) => (Finsupp.{u1, 0} α Nat Nat.hasZero) -> (Multiset.{u1} α)) (AddEquiv.hasCoeToFun.{u1, u1} (Finsupp.{u1, 0} α Nat Nat.hasZero) (Multiset.{u1} α) (Finsupp.add.{u1, 0} α Nat (AddMonoid.toAddZeroClass.{0} Nat Nat.addMonoid)) (Multiset.hasAdd.{u1} α)) (Finsupp.toMultiset.{u1} α) f)) (Finsupp.support.{u1, 0} α Nat Nat.hasZero f)
but is expected to have type
forall {α : Type.{u1}} [_inst_1 : DecidableEq.{succ u1} α] (f : Finsupp.{u1, 0} α Nat (LinearOrderedCommMonoidWithZero.toZero.{0} Nat Nat.linearOrderedCommMonoidWithZero)), Eq.{succ u1} (Finset.{u1} α) (Multiset.toFinset.{u1} α (fun (a : α) (b : α) => _inst_1 a b) (FunLike.coe.{succ u1, succ u1, succ u1} (AddEquiv.{u1, u1} (Finsupp.{u1, 0} α Nat (LinearOrderedCommMonoidWithZero.toZero.{0} Nat Nat.linearOrderedCommMonoidWithZero)) (Multiset.{u1} α) (Finsupp.add.{u1, 0} α Nat (AddMonoid.toAddZeroClass.{0} Nat Nat.addMonoid)) (Multiset.instAddMultiset.{u1} α)) (Finsupp.{u1, 0} α Nat (LinearOrderedCommMonoidWithZero.toZero.{0} Nat Nat.linearOrderedCommMonoidWithZero)) (fun (_x : Finsupp.{u1, 0} α Nat (LinearOrderedCommMonoidWithZero.toZero.{0} Nat Nat.linearOrderedCommMonoidWithZero)) => (fun ([email protected]._hyg.403 : Finsupp.{u1, 0} α Nat (LinearOrderedCommMonoidWithZero.toZero.{0} Nat Nat.linearOrderedCommMonoidWithZero)) => Multiset.{u1} α) _x) (AddHomClass.toFunLike.{u1, u1, u1} (AddEquiv.{u1, u1} (Finsupp.{u1, 0} α Nat (LinearOrderedCommMonoidWithZero.toZero.{0} Nat Nat.linearOrderedCommMonoidWithZero)) (Multiset.{u1} α) (Finsupp.add.{u1, 0} α Nat (AddMonoid.toAddZeroClass.{0} Nat Nat.addMonoid)) (Multiset.instAddMultiset.{u1} α)) (Finsupp.{u1, 0} α Nat (LinearOrderedCommMonoidWithZero.toZero.{0} Nat Nat.linearOrderedCommMonoidWithZero)) (Multiset.{u1} α) (AddZeroClass.toAdd.{u1} (Finsupp.{u1, 0} α Nat (LinearOrderedCommMonoidWithZero.toZero.{0} Nat Nat.linearOrderedCommMonoidWithZero)) (Finsupp.addZeroClass.{u1, 0} α Nat (AddMonoid.toAddZeroClass.{0} Nat Nat.addMonoid))) (AddZeroClass.toAdd.{u1} (Multiset.{u1} α) (AddMonoid.toAddZeroClass.{u1} (Multiset.{u1} α) (AddRightCancelMonoid.toAddMonoid.{u1} (Multiset.{u1} α) (AddCancelMonoid.toAddRightCancelMonoid.{u1} (Multiset.{u1} α) (AddCancelCommMonoid.toAddCancelMonoid.{u1} (Multiset.{u1} α) (OrderedCancelAddCommMonoid.toCancelAddCommMonoid.{u1} (Multiset.{u1} α) (Multiset.instOrderedCancelAddCommMonoidMultiset.{u1} α))))))) (AddMonoidHomClass.toAddHomClass.{u1, u1, u1} (AddEquiv.{u1, u1} (Finsupp.{u1, 0} α Nat (LinearOrderedCommMonoidWithZero.toZero.{0} Nat Nat.linearOrderedCommMonoidWithZero)) (Multiset.{u1} α) (Finsupp.add.{u1, 0} α Nat (AddMonoid.toAddZeroClass.{0} Nat Nat.addMonoid)) (Multiset.instAddMultiset.{u1} α)) (Finsupp.{u1, 0} α Nat (LinearOrderedCommMonoidWithZero.toZero.{0} Nat Nat.linearOrderedCommMonoidWithZero)) (Multiset.{u1} α) (Finsupp.addZeroClass.{u1, 0} α Nat (AddMonoid.toAddZeroClass.{0} Nat Nat.addMonoid)) (AddMonoid.toAddZeroClass.{u1} (Multiset.{u1} α) (AddRightCancelMonoid.toAddMonoid.{u1} (Multiset.{u1} α) (AddCancelMonoid.toAddRightCancelMonoid.{u1} (Multiset.{u1} α) (AddCancelCommMonoid.toAddCancelMonoid.{u1} (Multiset.{u1} α) (OrderedCancelAddCommMonoid.toCancelAddCommMonoid.{u1} (Multiset.{u1} α) (Multiset.instOrderedCancelAddCommMonoidMultiset.{u1} α)))))) (AddEquivClass.instAddMonoidHomClass.{u1, u1, u1} (AddEquiv.{u1, u1} (Finsupp.{u1, 0} α Nat (LinearOrderedCommMonoidWithZero.toZero.{0} Nat Nat.linearOrderedCommMonoidWithZero)) (Multiset.{u1} α) (Finsupp.add.{u1, 0} α Nat (AddMonoid.toAddZeroClass.{0} Nat Nat.addMonoid)) (Multiset.instAddMultiset.{u1} α)) (Finsupp.{u1, 0} α Nat (LinearOrderedCommMonoidWithZero.toZero.{0} Nat Nat.linearOrderedCommMonoidWithZero)) (Multiset.{u1} α) (Finsupp.addZeroClass.{u1, 0} α Nat (AddMonoid.toAddZeroClass.{0} Nat Nat.addMonoid)) (AddMonoid.toAddZeroClass.{u1} (Multiset.{u1} α) (AddRightCancelMonoid.toAddMonoid.{u1} (Multiset.{u1} α) (AddCancelMonoid.toAddRightCancelMonoid.{u1} (Multiset.{u1} α) (AddCancelCommMonoid.toAddCancelMonoid.{u1} (Multiset.{u1} α) (OrderedCancelAddCommMonoid.toCancelAddCommMonoid.{u1} (Multiset.{u1} α) (Multiset.instOrderedCancelAddCommMonoidMultiset.{u1} α)))))) (AddEquiv.instAddEquivClassAddEquiv.{u1, u1} (Finsupp.{u1, 0} α Nat (LinearOrderedCommMonoidWithZero.toZero.{0} Nat Nat.linearOrderedCommMonoidWithZero)) (Multiset.{u1} α) (Finsupp.add.{u1, 0} α Nat (AddMonoid.toAddZeroClass.{0} Nat Nat.addMonoid)) (Multiset.instAddMultiset.{u1} α))))) (Finsupp.toMultiset.{u1} α) f)) (Finsupp.support.{u1, 0} α Nat (LinearOrderedCommMonoidWithZero.toZero.{0} Nat Nat.linearOrderedCommMonoidWithZero) f)
Case conversion may be inaccurate. Consider using '#align finsupp.to_finset_to_multiset Finsupp.toFinset_toMultisetₓ'. -/
@[simp]
theorem toFinset_toMultiset [DecidableEq α] (f : α →₀ ℕ) : f.toMultiset.toFinset = f.support :=
by
refine' f.induction _ _
· rw [to_multiset_zero, Multiset.toFinset_zero, support_zero]
· intro a n f ha hn ih
rw [to_multiset_add, Multiset.toFinset_add, ih, to_multiset_single, support_add_eq,
support_single_ne_zero _ hn, Multiset.toFinset_nsmul _ _ hn, Multiset.toFinset_singleton]
refine' Disjoint.mono_left support_single_subset _
rwa [Finset.disjoint_singleton_left]
#align finsupp.to_finset_to_multiset Finsupp.toFinset_toMultiset
/- warning: finsupp.count_to_multiset -> Finsupp.count_toMultiset is a dubious translation:
lean 3 declaration is
forall {α : Type.{u1}} [_inst_1 : DecidableEq.{succ u1} α] (f : Finsupp.{u1, 0} α Nat Nat.hasZero) (a : α), Eq.{1} Nat (Multiset.count.{u1} α (fun (a : α) (b : α) => _inst_1 a b) a (coeFn.{succ u1, succ u1} (AddEquiv.{u1, u1} (Finsupp.{u1, 0} α Nat Nat.hasZero) (Multiset.{u1} α) (Finsupp.add.{u1, 0} α Nat (AddMonoid.toAddZeroClass.{0} Nat Nat.addMonoid)) (Multiset.hasAdd.{u1} α)) (fun (_x : AddEquiv.{u1, u1} (Finsupp.{u1, 0} α Nat Nat.hasZero) (Multiset.{u1} α) (Finsupp.add.{u1, 0} α Nat (AddMonoid.toAddZeroClass.{0} Nat Nat.addMonoid)) (Multiset.hasAdd.{u1} α)) => (Finsupp.{u1, 0} α Nat Nat.hasZero) -> (Multiset.{u1} α)) (AddEquiv.hasCoeToFun.{u1, u1} (Finsupp.{u1, 0} α Nat Nat.hasZero) (Multiset.{u1} α) (Finsupp.add.{u1, 0} α Nat (AddMonoid.toAddZeroClass.{0} Nat Nat.addMonoid)) (Multiset.hasAdd.{u1} α)) (Finsupp.toMultiset.{u1} α) f)) (coeFn.{succ u1, succ u1} (Finsupp.{u1, 0} α Nat Nat.hasZero) (fun (_x : Finsupp.{u1, 0} α Nat Nat.hasZero) => α -> Nat) (Finsupp.coeFun.{u1, 0} α Nat Nat.hasZero) f a)
but is expected to have type
forall {α : Type.{u1}} [_inst_1 : DecidableEq.{succ u1} α] (f : Finsupp.{u1, 0} α Nat (LinearOrderedCommMonoidWithZero.toZero.{0} Nat Nat.linearOrderedCommMonoidWithZero)) (a : α), Eq.{1} Nat (Multiset.count.{u1} α (fun (a : α) (b : α) => _inst_1 a b) a (FunLike.coe.{succ u1, succ u1, succ u1} (AddEquiv.{u1, u1} (Finsupp.{u1, 0} α Nat (LinearOrderedCommMonoidWithZero.toZero.{0} Nat Nat.linearOrderedCommMonoidWithZero)) (Multiset.{u1} α) (Finsupp.add.{u1, 0} α Nat (AddMonoid.toAddZeroClass.{0} Nat Nat.addMonoid)) (Multiset.instAddMultiset.{u1} α)) (Finsupp.{u1, 0} α Nat (LinearOrderedCommMonoidWithZero.toZero.{0} Nat Nat.linearOrderedCommMonoidWithZero)) (fun (_x : Finsupp.{u1, 0} α Nat (LinearOrderedCommMonoidWithZero.toZero.{0} Nat Nat.linearOrderedCommMonoidWithZero)) => (fun ([email protected]._hyg.403 : Finsupp.{u1, 0} α Nat (LinearOrderedCommMonoidWithZero.toZero.{0} Nat Nat.linearOrderedCommMonoidWithZero)) => Multiset.{u1} α) _x) (AddHomClass.toFunLike.{u1, u1, u1} (AddEquiv.{u1, u1} (Finsupp.{u1, 0} α Nat (LinearOrderedCommMonoidWithZero.toZero.{0} Nat Nat.linearOrderedCommMonoidWithZero)) (Multiset.{u1} α) (Finsupp.add.{u1, 0} α Nat (AddMonoid.toAddZeroClass.{0} Nat Nat.addMonoid)) (Multiset.instAddMultiset.{u1} α)) (Finsupp.{u1, 0} α Nat (LinearOrderedCommMonoidWithZero.toZero.{0} Nat Nat.linearOrderedCommMonoidWithZero)) (Multiset.{u1} α) (AddZeroClass.toAdd.{u1} (Finsupp.{u1, 0} α Nat (LinearOrderedCommMonoidWithZero.toZero.{0} Nat Nat.linearOrderedCommMonoidWithZero)) (Finsupp.addZeroClass.{u1, 0} α Nat (AddMonoid.toAddZeroClass.{0} Nat Nat.addMonoid))) (AddZeroClass.toAdd.{u1} (Multiset.{u1} α) (AddMonoid.toAddZeroClass.{u1} (Multiset.{u1} α) (AddRightCancelMonoid.toAddMonoid.{u1} (Multiset.{u1} α) (AddCancelMonoid.toAddRightCancelMonoid.{u1} (Multiset.{u1} α) (AddCancelCommMonoid.toAddCancelMonoid.{u1} (Multiset.{u1} α) (OrderedCancelAddCommMonoid.toCancelAddCommMonoid.{u1} (Multiset.{u1} α) (Multiset.instOrderedCancelAddCommMonoidMultiset.{u1} α))))))) (AddMonoidHomClass.toAddHomClass.{u1, u1, u1} (AddEquiv.{u1, u1} (Finsupp.{u1, 0} α Nat (LinearOrderedCommMonoidWithZero.toZero.{0} Nat Nat.linearOrderedCommMonoidWithZero)) (Multiset.{u1} α) (Finsupp.add.{u1, 0} α Nat (AddMonoid.toAddZeroClass.{0} Nat Nat.addMonoid)) (Multiset.instAddMultiset.{u1} α)) (Finsupp.{u1, 0} α Nat (LinearOrderedCommMonoidWithZero.toZero.{0} Nat Nat.linearOrderedCommMonoidWithZero)) (Multiset.{u1} α) (Finsupp.addZeroClass.{u1, 0} α Nat (AddMonoid.toAddZeroClass.{0} Nat Nat.addMonoid)) (AddMonoid.toAddZeroClass.{u1} (Multiset.{u1} α) (AddRightCancelMonoid.toAddMonoid.{u1} (Multiset.{u1} α) (AddCancelMonoid.toAddRightCancelMonoid.{u1} (Multiset.{u1} α) (AddCancelCommMonoid.toAddCancelMonoid.{u1} (Multiset.{u1} α) (OrderedCancelAddCommMonoid.toCancelAddCommMonoid.{u1} (Multiset.{u1} α) (Multiset.instOrderedCancelAddCommMonoidMultiset.{u1} α)))))) (AddEquivClass.instAddMonoidHomClass.{u1, u1, u1} (AddEquiv.{u1, u1} (Finsupp.{u1, 0} α Nat (LinearOrderedCommMonoidWithZero.toZero.{0} Nat Nat.linearOrderedCommMonoidWithZero)) (Multiset.{u1} α) (Finsupp.add.{u1, 0} α Nat (AddMonoid.toAddZeroClass.{0} Nat Nat.addMonoid)) (Multiset.instAddMultiset.{u1} α)) (Finsupp.{u1, 0} α Nat (LinearOrderedCommMonoidWithZero.toZero.{0} Nat Nat.linearOrderedCommMonoidWithZero)) (Multiset.{u1} α) (Finsupp.addZeroClass.{u1, 0} α Nat (AddMonoid.toAddZeroClass.{0} Nat Nat.addMonoid)) (AddMonoid.toAddZeroClass.{u1} (Multiset.{u1} α) (AddRightCancelMonoid.toAddMonoid.{u1} (Multiset.{u1} α) (AddCancelMonoid.toAddRightCancelMonoid.{u1} (Multiset.{u1} α) (AddCancelCommMonoid.toAddCancelMonoid.{u1} (Multiset.{u1} α) (OrderedCancelAddCommMonoid.toCancelAddCommMonoid.{u1} (Multiset.{u1} α) (Multiset.instOrderedCancelAddCommMonoidMultiset.{u1} α)))))) (AddEquiv.instAddEquivClassAddEquiv.{u1, u1} (Finsupp.{u1, 0} α Nat (LinearOrderedCommMonoidWithZero.toZero.{0} Nat Nat.linearOrderedCommMonoidWithZero)) (Multiset.{u1} α) (Finsupp.add.{u1, 0} α Nat (AddMonoid.toAddZeroClass.{0} Nat Nat.addMonoid)) (Multiset.instAddMultiset.{u1} α))))) (Finsupp.toMultiset.{u1} α) f)) (FunLike.coe.{succ u1, succ u1, 1} (Finsupp.{u1, 0} α Nat (LinearOrderedCommMonoidWithZero.toZero.{0} Nat Nat.linearOrderedCommMonoidWithZero)) α (fun (_x : α) => (fun ([email protected]._hyg.779 : α) => Nat) _x) (Finsupp.funLike.{u1, 0} α Nat (LinearOrderedCommMonoidWithZero.toZero.{0} Nat Nat.linearOrderedCommMonoidWithZero)) f a)
Case conversion may be inaccurate. Consider using '#align finsupp.count_to_multiset Finsupp.count_toMultisetₓ'. -/
@[simp]
theorem count_toMultiset [DecidableEq α] (f : α →₀ ℕ) (a : α) : f.toMultiset.count a = f a :=
calc
f.toMultiset.count a = f.Sum fun x n => (n • {x} : Multiset α).count a :=
(Multiset.countAddMonoidHom a).map_sum _ f.support
_ = f.Sum fun x n => n * ({x} : Multiset α).count a := by simp only [Multiset.count_nsmul]
_ = f a * ({a} : Multiset α).count a :=
(sum_eq_single _
(fun a' _ H => by
simp only [Multiset.count_singleton, if_false, H.symm, MulZeroClass.mul_zero])
fun H => by simp only [not_mem_support_iff.1 H, MulZeroClass.zero_mul])
_ = f a := by rw [Multiset.count_singleton_self, mul_one]
#align finsupp.count_to_multiset Finsupp.count_toMultiset
/- warning: finsupp.mem_to_multiset -> Finsupp.mem_toMultiset is a dubious translation:
lean 3 declaration is
forall {α : Type.{u1}} (f : Finsupp.{u1, 0} α Nat Nat.hasZero) (i : α), Iff (Membership.Mem.{u1, u1} α (Multiset.{u1} α) (Multiset.hasMem.{u1} α) i (coeFn.{succ u1, succ u1} (AddEquiv.{u1, u1} (Finsupp.{u1, 0} α Nat Nat.hasZero) (Multiset.{u1} α) (Finsupp.add.{u1, 0} α Nat (AddMonoid.toAddZeroClass.{0} Nat Nat.addMonoid)) (Multiset.hasAdd.{u1} α)) (fun (_x : AddEquiv.{u1, u1} (Finsupp.{u1, 0} α Nat Nat.hasZero) (Multiset.{u1} α) (Finsupp.add.{u1, 0} α Nat (AddMonoid.toAddZeroClass.{0} Nat Nat.addMonoid)) (Multiset.hasAdd.{u1} α)) => (Finsupp.{u1, 0} α Nat Nat.hasZero) -> (Multiset.{u1} α)) (AddEquiv.hasCoeToFun.{u1, u1} (Finsupp.{u1, 0} α Nat Nat.hasZero) (Multiset.{u1} α) (Finsupp.add.{u1, 0} α Nat (AddMonoid.toAddZeroClass.{0} Nat Nat.addMonoid)) (Multiset.hasAdd.{u1} α)) (Finsupp.toMultiset.{u1} α) f)) (Membership.Mem.{u1, u1} α (Finset.{u1} α) (Finset.hasMem.{u1} α) i (Finsupp.support.{u1, 0} α Nat Nat.hasZero f))
but is expected to have type
forall {α : Type.{u1}} (f : Finsupp.{u1, 0} α Nat (LinearOrderedCommMonoidWithZero.toZero.{0} Nat Nat.linearOrderedCommMonoidWithZero)) (i : α), Iff (Membership.mem.{u1, u1} α ((fun ([email protected]._hyg.403 : Finsupp.{u1, 0} α Nat (LinearOrderedCommMonoidWithZero.toZero.{0} Nat Nat.linearOrderedCommMonoidWithZero)) => Multiset.{u1} α) f) (Multiset.instMembershipMultiset.{u1} α) i (FunLike.coe.{succ u1, succ u1, succ u1} (AddEquiv.{u1, u1} (Finsupp.{u1, 0} α Nat (LinearOrderedCommMonoidWithZero.toZero.{0} Nat Nat.linearOrderedCommMonoidWithZero)) (Multiset.{u1} α) (Finsupp.add.{u1, 0} α Nat (AddMonoid.toAddZeroClass.{0} Nat Nat.addMonoid)) (Multiset.instAddMultiset.{u1} α)) (Finsupp.{u1, 0} α Nat (LinearOrderedCommMonoidWithZero.toZero.{0} Nat Nat.linearOrderedCommMonoidWithZero)) (fun (_x : Finsupp.{u1, 0} α Nat (LinearOrderedCommMonoidWithZero.toZero.{0} Nat Nat.linearOrderedCommMonoidWithZero)) => (fun ([email protected]._hyg.403 : Finsupp.{u1, 0} α Nat (LinearOrderedCommMonoidWithZero.toZero.{0} Nat Nat.linearOrderedCommMonoidWithZero)) => Multiset.{u1} α) _x) (AddHomClass.toFunLike.{u1, u1, u1} (AddEquiv.{u1, u1} (Finsupp.{u1, 0} α Nat (LinearOrderedCommMonoidWithZero.toZero.{0} Nat Nat.linearOrderedCommMonoidWithZero)) (Multiset.{u1} α) (Finsupp.add.{u1, 0} α Nat (AddMonoid.toAddZeroClass.{0} Nat Nat.addMonoid)) (Multiset.instAddMultiset.{u1} α)) (Finsupp.{u1, 0} α Nat (LinearOrderedCommMonoidWithZero.toZero.{0} Nat Nat.linearOrderedCommMonoidWithZero)) (Multiset.{u1} α) (AddZeroClass.toAdd.{u1} (Finsupp.{u1, 0} α Nat (LinearOrderedCommMonoidWithZero.toZero.{0} Nat Nat.linearOrderedCommMonoidWithZero)) (Finsupp.addZeroClass.{u1, 0} α Nat (AddMonoid.toAddZeroClass.{0} Nat Nat.addMonoid))) (AddZeroClass.toAdd.{u1} (Multiset.{u1} α) (AddMonoid.toAddZeroClass.{u1} (Multiset.{u1} α) (AddRightCancelMonoid.toAddMonoid.{u1} (Multiset.{u1} α) (AddCancelMonoid.toAddRightCancelMonoid.{u1} (Multiset.{u1} α) (AddCancelCommMonoid.toAddCancelMonoid.{u1} (Multiset.{u1} α) (OrderedCancelAddCommMonoid.toCancelAddCommMonoid.{u1} (Multiset.{u1} α) (Multiset.instOrderedCancelAddCommMonoidMultiset.{u1} α))))))) (AddMonoidHomClass.toAddHomClass.{u1, u1, u1} (AddEquiv.{u1, u1} (Finsupp.{u1, 0} α Nat (LinearOrderedCommMonoidWithZero.toZero.{0} Nat Nat.linearOrderedCommMonoidWithZero)) (Multiset.{u1} α) (Finsupp.add.{u1, 0} α Nat (AddMonoid.toAddZeroClass.{0} Nat Nat.addMonoid)) (Multiset.instAddMultiset.{u1} α)) (Finsupp.{u1, 0} α Nat (LinearOrderedCommMonoidWithZero.toZero.{0} Nat Nat.linearOrderedCommMonoidWithZero)) (Multiset.{u1} α) (Finsupp.addZeroClass.{u1, 0} α Nat (AddMonoid.toAddZeroClass.{0} Nat Nat.addMonoid)) (AddMonoid.toAddZeroClass.{u1} (Multiset.{u1} α) (AddRightCancelMonoid.toAddMonoid.{u1} (Multiset.{u1} α) (AddCancelMonoid.toAddRightCancelMonoid.{u1} (Multiset.{u1} α) (AddCancelCommMonoid.toAddCancelMonoid.{u1} (Multiset.{u1} α) (OrderedCancelAddCommMonoid.toCancelAddCommMonoid.{u1} (Multiset.{u1} α) (Multiset.instOrderedCancelAddCommMonoidMultiset.{u1} α)))))) (AddEquivClass.instAddMonoidHomClass.{u1, u1, u1} (AddEquiv.{u1, u1} (Finsupp.{u1, 0} α Nat (LinearOrderedCommMonoidWithZero.toZero.{0} Nat Nat.linearOrderedCommMonoidWithZero)) (Multiset.{u1} α) (Finsupp.add.{u1, 0} α Nat (AddMonoid.toAddZeroClass.{0} Nat Nat.addMonoid)) (Multiset.instAddMultiset.{u1} α)) (Finsupp.{u1, 0} α Nat (LinearOrderedCommMonoidWithZero.toZero.{0} Nat Nat.linearOrderedCommMonoidWithZero)) (Multiset.{u1} α) (Finsupp.addZeroClass.{u1, 0} α Nat (AddMonoid.toAddZeroClass.{0} Nat Nat.addMonoid)) (AddMonoid.toAddZeroClass.{u1} (Multiset.{u1} α) (AddRightCancelMonoid.toAddMonoid.{u1} (Multiset.{u1} α) (AddCancelMonoid.toAddRightCancelMonoid.{u1} (Multiset.{u1} α) (AddCancelCommMonoid.toAddCancelMonoid.{u1} (Multiset.{u1} α) (OrderedCancelAddCommMonoid.toCancelAddCommMonoid.{u1} (Multiset.{u1} α) (Multiset.instOrderedCancelAddCommMonoidMultiset.{u1} α)))))) (AddEquiv.instAddEquivClassAddEquiv.{u1, u1} (Finsupp.{u1, 0} α Nat (LinearOrderedCommMonoidWithZero.toZero.{0} Nat Nat.linearOrderedCommMonoidWithZero)) (Multiset.{u1} α) (Finsupp.add.{u1, 0} α Nat (AddMonoid.toAddZeroClass.{0} Nat Nat.addMonoid)) (Multiset.instAddMultiset.{u1} α))))) (Finsupp.toMultiset.{u1} α) f)) (Membership.mem.{u1, u1} α (Finset.{u1} α) (Finset.instMembershipFinset.{u1} α) i (Finsupp.support.{u1, 0} α Nat (LinearOrderedCommMonoidWithZero.toZero.{0} Nat Nat.linearOrderedCommMonoidWithZero) f))
Case conversion may be inaccurate. Consider using '#align finsupp.mem_to_multiset Finsupp.mem_toMultisetₓ'. -/
@[simp]
theorem mem_toMultiset (f : α →₀ ℕ) (i : α) : i ∈ f.toMultiset ↔ i ∈ f.support := by
rw [← Multiset.count_ne_zero, Finsupp.count_toMultiset, Finsupp.mem_support_iff]
#align finsupp.mem_to_multiset Finsupp.mem_toMultiset
end Finsupp
namespace Multiset
#print Multiset.toFinsupp /-
/-- Given a multiset `s`, `s.to_finsupp` returns the finitely supported function on `ℕ` given by
the multiplicities of the elements of `s`. -/
def toFinsupp : Multiset α ≃+ (α →₀ ℕ) :=
Finsupp.toMultiset.symm
#align multiset.to_finsupp Multiset.toFinsupp
-/
/- warning: multiset.to_finsupp_support -> Multiset.toFinsupp_support is a dubious translation:
lean 3 declaration is
forall {α : Type.{u1}} [_inst_1 : DecidableEq.{succ u1} α] (s : Multiset.{u1} α), Eq.{succ u1} (Finset.{u1} α) (Finsupp.support.{u1, 0} α Nat Nat.hasZero (coeFn.{succ u1, succ u1} (AddEquiv.{u1, u1} (Multiset.{u1} α) (Finsupp.{u1, 0} α Nat Nat.hasZero) (Multiset.hasAdd.{u1} α) (Finsupp.add.{u1, 0} α Nat (AddMonoid.toAddZeroClass.{0} Nat Nat.addMonoid))) (fun (_x : AddEquiv.{u1, u1} (Multiset.{u1} α) (Finsupp.{u1, 0} α Nat Nat.hasZero) (Multiset.hasAdd.{u1} α) (Finsupp.add.{u1, 0} α Nat (AddMonoid.toAddZeroClass.{0} Nat Nat.addMonoid))) => (Multiset.{u1} α) -> (Finsupp.{u1, 0} α Nat Nat.hasZero)) (AddEquiv.hasCoeToFun.{u1, u1} (Multiset.{u1} α) (Finsupp.{u1, 0} α Nat Nat.hasZero) (Multiset.hasAdd.{u1} α) (Finsupp.add.{u1, 0} α Nat (AddMonoid.toAddZeroClass.{0} Nat Nat.addMonoid))) (Multiset.toFinsupp.{u1} α) s)) (Multiset.toFinset.{u1} α (fun (a : α) (b : α) => _inst_1 a b) s)
but is expected to have type
forall {α : Type.{u1}} [_inst_1 : DecidableEq.{succ u1} α] (s : Multiset.{u1} α), Eq.{succ u1} (Finset.{u1} α) (Finsupp.support.{u1, 0} α Nat (LinearOrderedCommMonoidWithZero.toZero.{0} Nat Nat.linearOrderedCommMonoidWithZero) (FunLike.coe.{succ u1, succ u1, succ u1} (AddEquiv.{u1, u1} (Multiset.{u1} α) (Finsupp.{u1, 0} α Nat (LinearOrderedCommMonoidWithZero.toZero.{0} Nat Nat.linearOrderedCommMonoidWithZero)) (Multiset.instAddMultiset.{u1} α) (Finsupp.add.{u1, 0} α Nat (AddMonoid.toAddZeroClass.{0} Nat Nat.addMonoid))) (Multiset.{u1} α) (fun (_x : Multiset.{u1} α) => (fun ([email protected]._hyg.403 : Multiset.{u1} α) => Finsupp.{u1, 0} α Nat (LinearOrderedCommMonoidWithZero.toZero.{0} Nat Nat.linearOrderedCommMonoidWithZero)) _x) (AddHomClass.toFunLike.{u1, u1, u1} (AddEquiv.{u1, u1} (Multiset.{u1} α) (Finsupp.{u1, 0} α Nat (LinearOrderedCommMonoidWithZero.toZero.{0} Nat Nat.linearOrderedCommMonoidWithZero)) (Multiset.instAddMultiset.{u1} α) (Finsupp.add.{u1, 0} α Nat (AddMonoid.toAddZeroClass.{0} Nat Nat.addMonoid))) (Multiset.{u1} α) (Finsupp.{u1, 0} α Nat (LinearOrderedCommMonoidWithZero.toZero.{0} Nat Nat.linearOrderedCommMonoidWithZero)) (AddZeroClass.toAdd.{u1} (Multiset.{u1} α) (AddMonoid.toAddZeroClass.{u1} (Multiset.{u1} α) (AddRightCancelMonoid.toAddMonoid.{u1} (Multiset.{u1} α) (AddCancelMonoid.toAddRightCancelMonoid.{u1} (Multiset.{u1} α) (AddCancelCommMonoid.toAddCancelMonoid.{u1} (Multiset.{u1} α) (OrderedCancelAddCommMonoid.toCancelAddCommMonoid.{u1} (Multiset.{u1} α) (Multiset.instOrderedCancelAddCommMonoidMultiset.{u1} α))))))) (AddZeroClass.toAdd.{u1} (Finsupp.{u1, 0} α Nat (LinearOrderedCommMonoidWithZero.toZero.{0} Nat Nat.linearOrderedCommMonoidWithZero)) (Finsupp.addZeroClass.{u1, 0} α Nat (AddMonoid.toAddZeroClass.{0} Nat Nat.addMonoid))) (AddMonoidHomClass.toAddHomClass.{u1, u1, u1} (AddEquiv.{u1, u1} (Multiset.{u1} α) (Finsupp.{u1, 0} α Nat (LinearOrderedCommMonoidWithZero.toZero.{0} Nat Nat.linearOrderedCommMonoidWithZero)) (Multiset.instAddMultiset.{u1} α) (Finsupp.add.{u1, 0} α Nat (AddMonoid.toAddZeroClass.{0} Nat Nat.addMonoid))) (Multiset.{u1} α) (Finsupp.{u1, 0} α Nat (LinearOrderedCommMonoidWithZero.toZero.{0} Nat Nat.linearOrderedCommMonoidWithZero)) (AddMonoid.toAddZeroClass.{u1} (Multiset.{u1} α) (AddRightCancelMonoid.toAddMonoid.{u1} (Multiset.{u1} α) (AddCancelMonoid.toAddRightCancelMonoid.{u1} (Multiset.{u1} α) (AddCancelCommMonoid.toAddCancelMonoid.{u1} (Multiset.{u1} α) (OrderedCancelAddCommMonoid.toCancelAddCommMonoid.{u1} (Multiset.{u1} α) (Multiset.instOrderedCancelAddCommMonoidMultiset.{u1} α)))))) (Finsupp.addZeroClass.{u1, 0} α Nat (AddMonoid.toAddZeroClass.{0} Nat Nat.addMonoid)) (AddEquivClass.instAddMonoidHomClass.{u1, u1, u1} (AddEquiv.{u1, u1} (Multiset.{u1} α) (Finsupp.{u1, 0} α Nat (LinearOrderedCommMonoidWithZero.toZero.{0} Nat Nat.linearOrderedCommMonoidWithZero)) (Multiset.instAddMultiset.{u1} α) (Finsupp.add.{u1, 0} α Nat (AddMonoid.toAddZeroClass.{0} Nat Nat.addMonoid))) (Multiset.{u1} α) (Finsupp.{u1, 0} α Nat (LinearOrderedCommMonoidWithZero.toZero.{0} Nat Nat.linearOrderedCommMonoidWithZero)) (AddMonoid.toAddZeroClass.{u1} (Multiset.{u1} α) (AddRightCancelMonoid.toAddMonoid.{u1} (Multiset.{u1} α) (AddCancelMonoid.toAddRightCancelMonoid.{u1} (Multiset.{u1} α) (AddCancelCommMonoid.toAddCancelMonoid.{u1} (Multiset.{u1} α) (OrderedCancelAddCommMonoid.toCancelAddCommMonoid.{u1} (Multiset.{u1} α) (Multiset.instOrderedCancelAddCommMonoidMultiset.{u1} α)))))) (Finsupp.addZeroClass.{u1, 0} α Nat (AddMonoid.toAddZeroClass.{0} Nat Nat.addMonoid)) (AddEquiv.instAddEquivClassAddEquiv.{u1, u1} (Multiset.{u1} α) (Finsupp.{u1, 0} α Nat (LinearOrderedCommMonoidWithZero.toZero.{0} Nat Nat.linearOrderedCommMonoidWithZero)) (Multiset.instAddMultiset.{u1} α) (Finsupp.add.{u1, 0} α Nat (AddMonoid.toAddZeroClass.{0} Nat Nat.addMonoid)))))) (Multiset.toFinsupp.{u1} α) s)) (Multiset.toFinset.{u1} α (fun (a : α) (b : α) => _inst_1 a b) s)
Case conversion may be inaccurate. Consider using '#align multiset.to_finsupp_support Multiset.toFinsupp_supportₓ'. -/
@[simp]
theorem toFinsupp_support [DecidableEq α] (s : Multiset α) : s.toFinsupp.support = s.toFinset := by
convert rfl
#align multiset.to_finsupp_support Multiset.toFinsupp_support
/- warning: multiset.to_finsupp_apply -> Multiset.toFinsupp_apply is a dubious translation:
lean 3 declaration is
forall {α : Type.{u1}} [_inst_1 : DecidableEq.{succ u1} α] (s : Multiset.{u1} α) (a : α), Eq.{1} Nat (coeFn.{succ u1, succ u1} (Finsupp.{u1, 0} α Nat Nat.hasZero) (fun (_x : Finsupp.{u1, 0} α Nat Nat.hasZero) => α -> Nat) (Finsupp.coeFun.{u1, 0} α Nat Nat.hasZero) (coeFn.{succ u1, succ u1} (AddEquiv.{u1, u1} (Multiset.{u1} α) (Finsupp.{u1, 0} α Nat Nat.hasZero) (Multiset.hasAdd.{u1} α) (Finsupp.add.{u1, 0} α Nat (AddMonoid.toAddZeroClass.{0} Nat Nat.addMonoid))) (fun (_x : AddEquiv.{u1, u1} (Multiset.{u1} α) (Finsupp.{u1, 0} α Nat Nat.hasZero) (Multiset.hasAdd.{u1} α) (Finsupp.add.{u1, 0} α Nat (AddMonoid.toAddZeroClass.{0} Nat Nat.addMonoid))) => (Multiset.{u1} α) -> (Finsupp.{u1, 0} α Nat Nat.hasZero)) (AddEquiv.hasCoeToFun.{u1, u1} (Multiset.{u1} α) (Finsupp.{u1, 0} α Nat Nat.hasZero) (Multiset.hasAdd.{u1} α) (Finsupp.add.{u1, 0} α Nat (AddMonoid.toAddZeroClass.{0} Nat Nat.addMonoid))) (Multiset.toFinsupp.{u1} α) s) a) (Multiset.count.{u1} α (fun (a : α) (b : α) => _inst_1 a b) a s)
but is expected to have type
forall {α : Type.{u1}} [_inst_1 : DecidableEq.{succ u1} α] (s : Multiset.{u1} α) (a : α), Eq.{1} ((fun ([email protected]._hyg.779 : α) => Nat) a) (FunLike.coe.{succ u1, succ u1, 1} (Finsupp.{u1, 0} α Nat (LinearOrderedCommMonoidWithZero.toZero.{0} Nat Nat.linearOrderedCommMonoidWithZero)) α (fun (_x : α) => (fun ([email protected]._hyg.779 : α) => Nat) _x) (Finsupp.funLike.{u1, 0} α Nat (LinearOrderedCommMonoidWithZero.toZero.{0} Nat Nat.linearOrderedCommMonoidWithZero)) (FunLike.coe.{succ u1, succ u1, succ u1} (AddEquiv.{u1, u1} (Multiset.{u1} α) (Finsupp.{u1, 0} α Nat (LinearOrderedCommMonoidWithZero.toZero.{0} Nat Nat.linearOrderedCommMonoidWithZero)) (Multiset.instAddMultiset.{u1} α) (Finsupp.add.{u1, 0} α Nat (AddMonoid.toAddZeroClass.{0} Nat Nat.addMonoid))) (Multiset.{u1} α) (fun (_x : Multiset.{u1} α) => (fun ([email protected]._hyg.403 : Multiset.{u1} α) => Finsupp.{u1, 0} α Nat (LinearOrderedCommMonoidWithZero.toZero.{0} Nat Nat.linearOrderedCommMonoidWithZero)) _x) (AddHomClass.toFunLike.{u1, u1, u1} (AddEquiv.{u1, u1} (Multiset.{u1} α) (Finsupp.{u1, 0} α Nat (LinearOrderedCommMonoidWithZero.toZero.{0} Nat Nat.linearOrderedCommMonoidWithZero)) (Multiset.instAddMultiset.{u1} α) (Finsupp.add.{u1, 0} α Nat (AddMonoid.toAddZeroClass.{0} Nat Nat.addMonoid))) (Multiset.{u1} α) (Finsupp.{u1, 0} α Nat (LinearOrderedCommMonoidWithZero.toZero.{0} Nat Nat.linearOrderedCommMonoidWithZero)) (AddZeroClass.toAdd.{u1} (Multiset.{u1} α) (AddMonoid.toAddZeroClass.{u1} (Multiset.{u1} α) (AddRightCancelMonoid.toAddMonoid.{u1} (Multiset.{u1} α) (AddCancelMonoid.toAddRightCancelMonoid.{u1} (Multiset.{u1} α) (AddCancelCommMonoid.toAddCancelMonoid.{u1} (Multiset.{u1} α) (OrderedCancelAddCommMonoid.toCancelAddCommMonoid.{u1} (Multiset.{u1} α) (Multiset.instOrderedCancelAddCommMonoidMultiset.{u1} α))))))) (AddZeroClass.toAdd.{u1} (Finsupp.{u1, 0} α Nat (LinearOrderedCommMonoidWithZero.toZero.{0} Nat Nat.linearOrderedCommMonoidWithZero)) (Finsupp.addZeroClass.{u1, 0} α Nat (AddMonoid.toAddZeroClass.{0} Nat Nat.addMonoid))) (AddMonoidHomClass.toAddHomClass.{u1, u1, u1} (AddEquiv.{u1, u1} (Multiset.{u1} α) (Finsupp.{u1, 0} α Nat (LinearOrderedCommMonoidWithZero.toZero.{0} Nat Nat.linearOrderedCommMonoidWithZero)) (Multiset.instAddMultiset.{u1} α) (Finsupp.add.{u1, 0} α Nat (AddMonoid.toAddZeroClass.{0} Nat Nat.addMonoid))) (Multiset.{u1} α) (Finsupp.{u1, 0} α Nat (LinearOrderedCommMonoidWithZero.toZero.{0} Nat Nat.linearOrderedCommMonoidWithZero)) (AddMonoid.toAddZeroClass.{u1} (Multiset.{u1} α) (AddRightCancelMonoid.toAddMonoid.{u1} (Multiset.{u1} α) (AddCancelMonoid.toAddRightCancelMonoid.{u1} (Multiset.{u1} α) (AddCancelCommMonoid.toAddCancelMonoid.{u1} (Multiset.{u1} α) (OrderedCancelAddCommMonoid.toCancelAddCommMonoid.{u1} (Multiset.{u1} α) (Multiset.instOrderedCancelAddCommMonoidMultiset.{u1} α)))))) (Finsupp.addZeroClass.{u1, 0} α Nat (AddMonoid.toAddZeroClass.{0} Nat Nat.addMonoid)) (AddEquivClass.instAddMonoidHomClass.{u1, u1, u1} (AddEquiv.{u1, u1} (Multiset.{u1} α) (Finsupp.{u1, 0} α Nat (LinearOrderedCommMonoidWithZero.toZero.{0} Nat Nat.linearOrderedCommMonoidWithZero)) (Multiset.instAddMultiset.{u1} α) (Finsupp.add.{u1, 0} α Nat (AddMonoid.toAddZeroClass.{0} Nat Nat.addMonoid))) (Multiset.{u1} α) (Finsupp.{u1, 0} α Nat (LinearOrderedCommMonoidWithZero.toZero.{0} Nat Nat.linearOrderedCommMonoidWithZero)) (AddMonoid.toAddZeroClass.{u1} (Multiset.{u1} α) (AddRightCancelMonoid.toAddMonoid.{u1} (Multiset.{u1} α) (AddCancelMonoid.toAddRightCancelMonoid.{u1} (Multiset.{u1} α) (AddCancelCommMonoid.toAddCancelMonoid.{u1} (Multiset.{u1} α) (OrderedCancelAddCommMonoid.toCancelAddCommMonoid.{u1} (Multiset.{u1} α) (Multiset.instOrderedCancelAddCommMonoidMultiset.{u1} α)))))) (Finsupp.addZeroClass.{u1, 0} α Nat (AddMonoid.toAddZeroClass.{0} Nat Nat.addMonoid)) (AddEquiv.instAddEquivClassAddEquiv.{u1, u1} (Multiset.{u1} α) (Finsupp.{u1, 0} α Nat (LinearOrderedCommMonoidWithZero.toZero.{0} Nat Nat.linearOrderedCommMonoidWithZero)) (Multiset.instAddMultiset.{u1} α) (Finsupp.add.{u1, 0} α Nat (AddMonoid.toAddZeroClass.{0} Nat Nat.addMonoid)))))) (Multiset.toFinsupp.{u1} α) s) a) (Multiset.count.{u1} α (fun (a : α) (b : α) => _inst_1 a b) a s)
Case conversion may be inaccurate. Consider using '#align multiset.to_finsupp_apply Multiset.toFinsupp_applyₓ'. -/
@[simp]
theorem toFinsupp_apply [DecidableEq α] (s : Multiset α) (a : α) : toFinsupp s a = s.count a := by
convert rfl
#align multiset.to_finsupp_apply Multiset.toFinsupp_apply
/- warning: multiset.to_finsupp_zero -> Multiset.toFinsupp_zero is a dubious translation:
lean 3 declaration is
forall {α : Type.{u1}}, Eq.{succ u1} (Finsupp.{u1, 0} α Nat Nat.hasZero) (coeFn.{succ u1, succ u1} (AddEquiv.{u1, u1} (Multiset.{u1} α) (Finsupp.{u1, 0} α Nat Nat.hasZero) (Multiset.hasAdd.{u1} α) (Finsupp.add.{u1, 0} α Nat (AddMonoid.toAddZeroClass.{0} Nat Nat.addMonoid))) (fun (_x : AddEquiv.{u1, u1} (Multiset.{u1} α) (Finsupp.{u1, 0} α Nat Nat.hasZero) (Multiset.hasAdd.{u1} α) (Finsupp.add.{u1, 0} α Nat (AddMonoid.toAddZeroClass.{0} Nat Nat.addMonoid))) => (Multiset.{u1} α) -> (Finsupp.{u1, 0} α Nat Nat.hasZero)) (AddEquiv.hasCoeToFun.{u1, u1} (Multiset.{u1} α) (Finsupp.{u1, 0} α Nat Nat.hasZero) (Multiset.hasAdd.{u1} α) (Finsupp.add.{u1, 0} α Nat (AddMonoid.toAddZeroClass.{0} Nat Nat.addMonoid))) (Multiset.toFinsupp.{u1} α) (OfNat.ofNat.{u1} (Multiset.{u1} α) 0 (OfNat.mk.{u1} (Multiset.{u1} α) 0 (Zero.zero.{u1} (Multiset.{u1} α) (Multiset.hasZero.{u1} α))))) (OfNat.ofNat.{u1} (Finsupp.{u1, 0} α Nat Nat.hasZero) 0 (OfNat.mk.{u1} (Finsupp.{u1, 0} α Nat Nat.hasZero) 0 (Zero.zero.{u1} (Finsupp.{u1, 0} α Nat Nat.hasZero) (Finsupp.zero.{u1, 0} α Nat Nat.hasZero))))
but is expected to have type
forall {α : Type.{u1}}, Eq.{succ u1} ((fun ([email protected]._hyg.403 : Multiset.{u1} α) => Finsupp.{u1, 0} α Nat (LinearOrderedCommMonoidWithZero.toZero.{0} Nat Nat.linearOrderedCommMonoidWithZero)) (OfNat.ofNat.{u1} (Multiset.{u1} α) 0 (Zero.toOfNat0.{u1} (Multiset.{u1} α) (Multiset.instZeroMultiset.{u1} α)))) (FunLike.coe.{succ u1, succ u1, succ u1} (AddEquiv.{u1, u1} (Multiset.{u1} α) (Finsupp.{u1, 0} α Nat (LinearOrderedCommMonoidWithZero.toZero.{0} Nat Nat.linearOrderedCommMonoidWithZero)) (Multiset.instAddMultiset.{u1} α) (Finsupp.add.{u1, 0} α Nat (AddMonoid.toAddZeroClass.{0} Nat Nat.addMonoid))) (Multiset.{u1} α) (fun (_x : Multiset.{u1} α) => (fun ([email protected]._hyg.403 : Multiset.{u1} α) => Finsupp.{u1, 0} α Nat (LinearOrderedCommMonoidWithZero.toZero.{0} Nat Nat.linearOrderedCommMonoidWithZero)) _x) (AddHomClass.toFunLike.{u1, u1, u1} (AddEquiv.{u1, u1} (Multiset.{u1} α) (Finsupp.{u1, 0} α Nat (LinearOrderedCommMonoidWithZero.toZero.{0} Nat Nat.linearOrderedCommMonoidWithZero)) (Multiset.instAddMultiset.{u1} α) (Finsupp.add.{u1, 0} α Nat (AddMonoid.toAddZeroClass.{0} Nat Nat.addMonoid))) (Multiset.{u1} α) (Finsupp.{u1, 0} α Nat (LinearOrderedCommMonoidWithZero.toZero.{0} Nat Nat.linearOrderedCommMonoidWithZero)) (AddZeroClass.toAdd.{u1} (Multiset.{u1} α) (AddMonoid.toAddZeroClass.{u1} (Multiset.{u1} α) (AddRightCancelMonoid.toAddMonoid.{u1} (Multiset.{u1} α) (AddCancelMonoid.toAddRightCancelMonoid.{u1} (Multiset.{u1} α) (AddCancelCommMonoid.toAddCancelMonoid.{u1} (Multiset.{u1} α) (OrderedCancelAddCommMonoid.toCancelAddCommMonoid.{u1} (Multiset.{u1} α) (Multiset.instOrderedCancelAddCommMonoidMultiset.{u1} α))))))) (AddZeroClass.toAdd.{u1} (Finsupp.{u1, 0} α Nat (LinearOrderedCommMonoidWithZero.toZero.{0} Nat Nat.linearOrderedCommMonoidWithZero)) (Finsupp.addZeroClass.{u1, 0} α Nat (AddMonoid.toAddZeroClass.{0} Nat Nat.addMonoid))) (AddMonoidHomClass.toAddHomClass.{u1, u1, u1} (AddEquiv.{u1, u1} (Multiset.{u1} α) (Finsupp.{u1, 0} α Nat (LinearOrderedCommMonoidWithZero.toZero.{0} Nat Nat.linearOrderedCommMonoidWithZero)) (Multiset.instAddMultiset.{u1} α) (Finsupp.add.{u1, 0} α Nat (AddMonoid.toAddZeroClass.{0} Nat Nat.addMonoid))) (Multiset.{u1} α) (Finsupp.{u1, 0} α Nat (LinearOrderedCommMonoidWithZero.toZero.{0} Nat Nat.linearOrderedCommMonoidWithZero)) (AddMonoid.toAddZeroClass.{u1} (Multiset.{u1} α) (AddRightCancelMonoid.toAddMonoid.{u1} (Multiset.{u1} α) (AddCancelMonoid.toAddRightCancelMonoid.{u1} (Multiset.{u1} α) (AddCancelCommMonoid.toAddCancelMonoid.{u1} (Multiset.{u1} α) (OrderedCancelAddCommMonoid.toCancelAddCommMonoid.{u1} (Multiset.{u1} α) (Multiset.instOrderedCancelAddCommMonoidMultiset.{u1} α)))))) (Finsupp.addZeroClass.{u1, 0} α Nat (AddMonoid.toAddZeroClass.{0} Nat Nat.addMonoid)) (AddEquivClass.instAddMonoidHomClass.{u1, u1, u1} (AddEquiv.{u1, u1} (Multiset.{u1} α) (Finsupp.{u1, 0} α Nat (LinearOrderedCommMonoidWithZero.toZero.{0} Nat Nat.linearOrderedCommMonoidWithZero)) (Multiset.instAddMultiset.{u1} α) (Finsupp.add.{u1, 0} α Nat (AddMonoid.toAddZeroClass.{0} Nat Nat.addMonoid))) (Multiset.{u1} α) (Finsupp.{u1, 0} α Nat (LinearOrderedCommMonoidWithZero.toZero.{0} Nat Nat.linearOrderedCommMonoidWithZero)) (AddMonoid.toAddZeroClass.{u1} (Multiset.{u1} α) (AddRightCancelMonoid.toAddMonoid.{u1} (Multiset.{u1} α) (AddCancelMonoid.toAddRightCancelMonoid.{u1} (Multiset.{u1} α) (AddCancelCommMonoid.toAddCancelMonoid.{u1} (Multiset.{u1} α) (OrderedCancelAddCommMonoid.toCancelAddCommMonoid.{u1} (Multiset.{u1} α) (Multiset.instOrderedCancelAddCommMonoidMultiset.{u1} α)))))) (Finsupp.addZeroClass.{u1, 0} α Nat (AddMonoid.toAddZeroClass.{0} Nat Nat.addMonoid)) (AddEquiv.instAddEquivClassAddEquiv.{u1, u1} (Multiset.{u1} α) (Finsupp.{u1, 0} α Nat (LinearOrderedCommMonoidWithZero.toZero.{0} Nat Nat.linearOrderedCommMonoidWithZero)) (Multiset.instAddMultiset.{u1} α) (Finsupp.add.{u1, 0} α Nat (AddMonoid.toAddZeroClass.{0} Nat Nat.addMonoid)))))) (Multiset.toFinsupp.{u1} α) (OfNat.ofNat.{u1} (Multiset.{u1} α) 0 (Zero.toOfNat0.{u1} (Multiset.{u1} α) (Multiset.instZeroMultiset.{u1} α)))) (OfNat.ofNat.{u1} ((fun ([email protected]._hyg.403 : Multiset.{u1} α) => Finsupp.{u1, 0} α Nat (LinearOrderedCommMonoidWithZero.toZero.{0} Nat Nat.linearOrderedCommMonoidWithZero)) (OfNat.ofNat.{u1} (Multiset.{u1} α) 0 (Zero.toOfNat0.{u1} (Multiset.{u1} α) (Multiset.instZeroMultiset.{u1} α)))) 0 (Zero.toOfNat0.{u1} ((fun ([email protected]._hyg.403 : Multiset.{u1} α) => Finsupp.{u1, 0} α Nat (LinearOrderedCommMonoidWithZero.toZero.{0} Nat Nat.linearOrderedCommMonoidWithZero)) (OfNat.ofNat.{u1} (Multiset.{u1} α) 0 (Zero.toOfNat0.{u1} (Multiset.{u1} α) (Multiset.instZeroMultiset.{u1} α)))) (Finsupp.zero.{u1, 0} α Nat (LinearOrderedCommMonoidWithZero.toZero.{0} Nat Nat.linearOrderedCommMonoidWithZero))))
Case conversion may be inaccurate. Consider using '#align multiset.to_finsupp_zero Multiset.toFinsupp_zeroₓ'. -/
theorem toFinsupp_zero : toFinsupp (0 : Multiset α) = 0 :=
AddEquiv.map_zero _
#align multiset.to_finsupp_zero Multiset.toFinsupp_zero
/- warning: multiset.to_finsupp_add -> Multiset.toFinsupp_add is a dubious translation:
lean 3 declaration is
forall {α : Type.{u1}} (s : Multiset.{u1} α) (t : Multiset.{u1} α), Eq.{succ u1} (Finsupp.{u1, 0} α Nat Nat.hasZero) (coeFn.{succ u1, succ u1} (AddEquiv.{u1, u1} (Multiset.{u1} α) (Finsupp.{u1, 0} α Nat Nat.hasZero) (Multiset.hasAdd.{u1} α) (Finsupp.add.{u1, 0} α Nat (AddMonoid.toAddZeroClass.{0} Nat Nat.addMonoid))) (fun (_x : AddEquiv.{u1, u1} (Multiset.{u1} α) (Finsupp.{u1, 0} α Nat Nat.hasZero) (Multiset.hasAdd.{u1} α) (Finsupp.add.{u1, 0} α Nat (AddMonoid.toAddZeroClass.{0} Nat Nat.addMonoid))) => (Multiset.{u1} α) -> (Finsupp.{u1, 0} α Nat Nat.hasZero)) (AddEquiv.hasCoeToFun.{u1, u1} (Multiset.{u1} α) (Finsupp.{u1, 0} α Nat Nat.hasZero) (Multiset.hasAdd.{u1} α) (Finsupp.add.{u1, 0} α Nat (AddMonoid.toAddZeroClass.{0} Nat Nat.addMonoid))) (Multiset.toFinsupp.{u1} α) (HAdd.hAdd.{u1, u1, u1} (Multiset.{u1} α) (Multiset.{u1} α) (Multiset.{u1} α) (instHAdd.{u1} (Multiset.{u1} α) (Multiset.hasAdd.{u1} α)) s t)) (HAdd.hAdd.{u1, u1, u1} (Finsupp.{u1, 0} α Nat Nat.hasZero) (Finsupp.{u1, 0} α Nat Nat.hasZero) (Finsupp.{u1, 0} α Nat Nat.hasZero) (instHAdd.{u1} (Finsupp.{u1, 0} α Nat Nat.hasZero) (Finsupp.add.{u1, 0} α Nat (AddMonoid.toAddZeroClass.{0} Nat Nat.addMonoid))) (coeFn.{succ u1, succ u1} (AddEquiv.{u1, u1} (Multiset.{u1} α) (Finsupp.{u1, 0} α Nat Nat.hasZero) (Multiset.hasAdd.{u1} α) (Finsupp.add.{u1, 0} α Nat (AddMonoid.toAddZeroClass.{0} Nat Nat.addMonoid))) (fun (_x : AddEquiv.{u1, u1} (Multiset.{u1} α) (Finsupp.{u1, 0} α Nat Nat.hasZero) (Multiset.hasAdd.{u1} α) (Finsupp.add.{u1, 0} α Nat (AddMonoid.toAddZeroClass.{0} Nat Nat.addMonoid))) => (Multiset.{u1} α) -> (Finsupp.{u1, 0} α Nat Nat.hasZero)) (AddEquiv.hasCoeToFun.{u1, u1} (Multiset.{u1} α) (Finsupp.{u1, 0} α Nat Nat.hasZero) (Multiset.hasAdd.{u1} α) (Finsupp.add.{u1, 0} α Nat (AddMonoid.toAddZeroClass.{0} Nat Nat.addMonoid))) (Multiset.toFinsupp.{u1} α) s) (coeFn.{succ u1, succ u1} (AddEquiv.{u1, u1} (Multiset.{u1} α) (Finsupp.{u1, 0} α Nat Nat.hasZero) (Multiset.hasAdd.{u1} α) (Finsupp.add.{u1, 0} α Nat (AddMonoid.toAddZeroClass.{0} Nat Nat.addMonoid))) (fun (_x : AddEquiv.{u1, u1} (Multiset.{u1} α) (Finsupp.{u1, 0} α Nat Nat.hasZero) (Multiset.hasAdd.{u1} α) (Finsupp.add.{u1, 0} α Nat (AddMonoid.toAddZeroClass.{0} Nat Nat.addMonoid))) => (Multiset.{u1} α) -> (Finsupp.{u1, 0} α Nat Nat.hasZero)) (AddEquiv.hasCoeToFun.{u1, u1} (Multiset.{u1} α) (Finsupp.{u1, 0} α Nat Nat.hasZero) (Multiset.hasAdd.{u1} α) (Finsupp.add.{u1, 0} α Nat (AddMonoid.toAddZeroClass.{0} Nat Nat.addMonoid))) (Multiset.toFinsupp.{u1} α) t))
but is expected to have type
forall {α : Type.{u1}} (s : Multiset.{u1} α) (t : Multiset.{u1} α), Eq.{succ u1} ((fun ([email protected]._hyg.403 : Multiset.{u1} α) => Finsupp.{u1, 0} α Nat (LinearOrderedCommMonoidWithZero.toZero.{0} Nat Nat.linearOrderedCommMonoidWithZero)) (HAdd.hAdd.{u1, u1, u1} (Multiset.{u1} α) (Multiset.{u1} α) (Multiset.{u1} α) (instHAdd.{u1} (Multiset.{u1} α) (Multiset.instAddMultiset.{u1} α)) s t)) (FunLike.coe.{succ u1, succ u1, succ u1} (AddEquiv.{u1, u1} (Multiset.{u1} α) (Finsupp.{u1, 0} α Nat (LinearOrderedCommMonoidWithZero.toZero.{0} Nat Nat.linearOrderedCommMonoidWithZero)) (Multiset.instAddMultiset.{u1} α) (Finsupp.add.{u1, 0} α Nat (AddMonoid.toAddZeroClass.{0} Nat Nat.addMonoid))) (Multiset.{u1} α) (fun (_x : Multiset.{u1} α) => (fun ([email protected]._hyg.403 : Multiset.{u1} α) => Finsupp.{u1, 0} α Nat (LinearOrderedCommMonoidWithZero.toZero.{0} Nat Nat.linearOrderedCommMonoidWithZero)) _x) (AddHomClass.toFunLike.{u1, u1, u1} (AddEquiv.{u1, u1} (Multiset.{u1} α) (Finsupp.{u1, 0} α Nat (LinearOrderedCommMonoidWithZero.toZero.{0} Nat Nat.linearOrderedCommMonoidWithZero)) (Multiset.instAddMultiset.{u1} α) (Finsupp.add.{u1, 0} α Nat (AddMonoid.toAddZeroClass.{0} Nat Nat.addMonoid))) (Multiset.{u1} α) (Finsupp.{u1, 0} α Nat (LinearOrderedCommMonoidWithZero.toZero.{0} Nat Nat.linearOrderedCommMonoidWithZero)) (AddZeroClass.toAdd.{u1} (Multiset.{u1} α) (AddMonoid.toAddZeroClass.{u1} (Multiset.{u1} α) (AddRightCancelMonoid.toAddMonoid.{u1} (Multiset.{u1} α) (AddCancelMonoid.toAddRightCancelMonoid.{u1} (Multiset.{u1} α) (AddCancelCommMonoid.toAddCancelMonoid.{u1} (Multiset.{u1} α) (OrderedCancelAddCommMonoid.toCancelAddCommMonoid.{u1} (Multiset.{u1} α) (Multiset.instOrderedCancelAddCommMonoidMultiset.{u1} α))))))) (AddZeroClass.toAdd.{u1} (Finsupp.{u1, 0} α Nat (LinearOrderedCommMonoidWithZero.toZero.{0} Nat Nat.linearOrderedCommMonoidWithZero)) (Finsupp.addZeroClass.{u1, 0} α Nat (AddMonoid.toAddZeroClass.{0} Nat Nat.addMonoid))) (AddMonoidHomClass.toAddHomClass.{u1, u1, u1} (AddEquiv.{u1, u1} (Multiset.{u1} α) (Finsupp.{u1, 0} α Nat (LinearOrderedCommMonoidWithZero.toZero.{0} Nat Nat.linearOrderedCommMonoidWithZero)) (Multiset.instAddMultiset.{u1} α) (Finsupp.add.{u1, 0} α Nat (AddMonoid.toAddZeroClass.{0} Nat Nat.addMonoid))) (Multiset.{u1} α) (Finsupp.{u1, 0} α Nat (LinearOrderedCommMonoidWithZero.toZero.{0} Nat Nat.linearOrderedCommMonoidWithZero)) (AddMonoid.toAddZeroClass.{u1} (Multiset.{u1} α) (AddRightCancelMonoid.toAddMonoid.{u1} (Multiset.{u1} α) (AddCancelMonoid.toAddRightCancelMonoid.{u1} (Multiset.{u1} α) (AddCancelCommMonoid.toAddCancelMonoid.{u1} (Multiset.{u1} α) (OrderedCancelAddCommMonoid.toCancelAddCommMonoid.{u1} (Multiset.{u1} α) (Multiset.instOrderedCancelAddCommMonoidMultiset.{u1} α)))))) (Finsupp.addZeroClass.{u1, 0} α Nat (AddMonoid.toAddZeroClass.{0} Nat Nat.addMonoid)) (AddEquivClass.instAddMonoidHomClass.{u1, u1, u1} (AddEquiv.{u1, u1} (Multiset.{u1} α) (Finsupp.{u1, 0} α Nat (LinearOrderedCommMonoidWithZero.toZero.{0} Nat Nat.linearOrderedCommMonoidWithZero)) (Multiset.instAddMultiset.{u1} α) (Finsupp.add.{u1, 0} α Nat (AddMonoid.toAddZeroClass.{0} Nat Nat.addMonoid))) (Multiset.{u1} α) (Finsupp.{u1, 0} α Nat (LinearOrderedCommMonoidWithZero.toZero.{0} Nat Nat.linearOrderedCommMonoidWithZero)) (AddMonoid.toAddZeroClass.{u1} (Multiset.{u1} α) (AddRightCancelMonoid.toAddMonoid.{u1} (Multiset.{u1} α) (AddCancelMonoid.toAddRightCancelMonoid.{u1} (Multiset.{u1} α) (AddCancelCommMonoid.toAddCancelMonoid.{u1} (Multiset.{u1} α) (OrderedCancelAddCommMonoid.toCancelAddCommMonoid.{u1} (Multiset.{u1} α) (Multiset.instOrderedCancelAddCommMonoidMultiset.{u1} α)))))) (Finsupp.addZeroClass.{u1, 0} α Nat (AddMonoid.toAddZeroClass.{0} Nat Nat.addMonoid)) (AddEquiv.instAddEquivClassAddEquiv.{u1, u1} (Multiset.{u1} α) (Finsupp.{u1, 0} α Nat (LinearOrderedCommMonoidWithZero.toZero.{0} Nat Nat.linearOrderedCommMonoidWithZero)) (Multiset.instAddMultiset.{u1} α) (Finsupp.add.{u1, 0} α Nat (AddMonoid.toAddZeroClass.{0} Nat Nat.addMonoid)))))) (Multiset.toFinsupp.{u1} α) (HAdd.hAdd.{u1, u1, u1} (Multiset.{u1} α) (Multiset.{u1} α) (Multiset.{u1} α) (instHAdd.{u1} (Multiset.{u1} α) (Multiset.instAddMultiset.{u1} α)) s t)) (HAdd.hAdd.{u1, u1, u1} ((fun ([email protected]._hyg.403 : Multiset.{u1} α) => Finsupp.{u1, 0} α Nat (LinearOrderedCommMonoidWithZero.toZero.{0} Nat Nat.linearOrderedCommMonoidWithZero)) s) ((fun ([email protected]._hyg.403 : Multiset.{u1} α) => Finsupp.{u1, 0} α Nat (LinearOrderedCommMonoidWithZero.toZero.{0} Nat Nat.linearOrderedCommMonoidWithZero)) t) ((fun ([email protected]._hyg.403 : Multiset.{u1} α) => Finsupp.{u1, 0} α Nat (LinearOrderedCommMonoidWithZero.toZero.{0} Nat Nat.linearOrderedCommMonoidWithZero)) s) (instHAdd.{u1} ((fun ([email protected]._hyg.403 : Multiset.{u1} α) => Finsupp.{u1, 0} α Nat (LinearOrderedCommMonoidWithZero.toZero.{0} Nat Nat.linearOrderedCommMonoidWithZero)) s) (Finsupp.add.{u1, 0} α Nat (AddMonoid.toAddZeroClass.{0} Nat Nat.addMonoid))) (FunLike.coe.{succ u1, succ u1, succ u1} (AddEquiv.{u1, u1} (Multiset.{u1} α) (Finsupp.{u1, 0} α Nat (LinearOrderedCommMonoidWithZero.toZero.{0} Nat Nat.linearOrderedCommMonoidWithZero)) (Multiset.instAddMultiset.{u1} α) (Finsupp.add.{u1, 0} α Nat (AddMonoid.toAddZeroClass.{0} Nat Nat.addMonoid))) (Multiset.{u1} α) (fun (_x : Multiset.{u1} α) => (fun ([email protected]._hyg.403 : Multiset.{u1} α) => Finsupp.{u1, 0} α Nat (LinearOrderedCommMonoidWithZero.toZero.{0} Nat Nat.linearOrderedCommMonoidWithZero)) _x) (AddHomClass.toFunLike.{u1, u1, u1} (AddEquiv.{u1, u1} (Multiset.{u1} α) (Finsupp.{u1, 0} α Nat (LinearOrderedCommMonoidWithZero.toZero.{0} Nat Nat.linearOrderedCommMonoidWithZero)) (Multiset.instAddMultiset.{u1} α) (Finsupp.add.{u1, 0} α Nat (AddMonoid.toAddZeroClass.{0} Nat Nat.addMonoid))) (Multiset.{u1} α) (Finsupp.{u1, 0} α Nat (LinearOrderedCommMonoidWithZero.toZero.{0} Nat Nat.linearOrderedCommMonoidWithZero)) (AddZeroClass.toAdd.{u1} (Multiset.{u1} α) (AddMonoid.toAddZeroClass.{u1} (Multiset.{u1} α) (AddRightCancelMonoid.toAddMonoid.{u1} (Multiset.{u1} α) (AddCancelMonoid.toAddRightCancelMonoid.{u1} (Multiset.{u1} α) (AddCancelCommMonoid.toAddCancelMonoid.{u1} (Multiset.{u1} α) (OrderedCancelAddCommMonoid.toCancelAddCommMonoid.{u1} (Multiset.{u1} α) (Multiset.instOrderedCancelAddCommMonoidMultiset.{u1} α))))))) (AddZeroClass.toAdd.{u1} (Finsupp.{u1, 0} α Nat (LinearOrderedCommMonoidWithZero.toZero.{0} Nat Nat.linearOrderedCommMonoidWithZero)) (Finsupp.addZeroClass.{u1, 0} α Nat (AddMonoid.toAddZeroClass.{0} Nat Nat.addMonoid))) (AddMonoidHomClass.toAddHomClass.{u1, u1, u1} (AddEquiv.{u1, u1} (Multiset.{u1} α) (Finsupp.{u1, 0} α Nat (LinearOrderedCommMonoidWithZero.toZero.{0} Nat Nat.linearOrderedCommMonoidWithZero)) (Multiset.instAddMultiset.{u1} α) (Finsupp.add.{u1, 0} α Nat (AddMonoid.toAddZeroClass.{0} Nat Nat.addMonoid))) (Multiset.{u1} α) (Finsupp.{u1, 0} α Nat (LinearOrderedCommMonoidWithZero.toZero.{0} Nat Nat.linearOrderedCommMonoidWithZero)) (AddMonoid.toAddZeroClass.{u1} (Multiset.{u1} α) (AddRightCancelMonoid.toAddMonoid.{u1} (Multiset.{u1} α) (AddCancelMonoid.toAddRightCancelMonoid.{u1} (Multiset.{u1} α) (AddCancelCommMonoid.toAddCancelMonoid.{u1} (Multiset.{u1} α) (OrderedCancelAddCommMonoid.toCancelAddCommMonoid.{u1} (Multiset.{u1} α) (Multiset.instOrderedCancelAddCommMonoidMultiset.{u1} α)))))) (Finsupp.addZeroClass.{u1, 0} α Nat (AddMonoid.toAddZeroClass.{0} Nat Nat.addMonoid)) (AddEquivClass.instAddMonoidHomClass.{u1, u1, u1} (AddEquiv.{u1, u1} (Multiset.{u1} α) (Finsupp.{u1, 0} α Nat (LinearOrderedCommMonoidWithZero.toZero.{0} Nat Nat.linearOrderedCommMonoidWithZero)) (Multiset.instAddMultiset.{u1} α) (Finsupp.add.{u1, 0} α Nat (AddMonoid.toAddZeroClass.{0} Nat Nat.addMonoid))) (Multiset.{u1} α) (Finsupp.{u1, 0} α Nat (LinearOrderedCommMonoidWithZero.toZero.{0} Nat Nat.linearOrderedCommMonoidWithZero)) (AddMonoid.toAddZeroClass.{u1} (Multiset.{u1} α) (AddRightCancelMonoid.toAddMonoid.{u1} (Multiset.{u1} α) (AddCancelMonoid.toAddRightCancelMonoid.{u1} (Multiset.{u1} α) (AddCancelCommMonoid.toAddCancelMonoid.{u1} (Multiset.{u1} α) (OrderedCancelAddCommMonoid.toCancelAddCommMonoid.{u1} (Multiset.{u1} α) (Multiset.instOrderedCancelAddCommMonoidMultiset.{u1} α)))))) (Finsupp.addZeroClass.{u1, 0} α Nat (AddMonoid.toAddZeroClass.{0} Nat Nat.addMonoid)) (AddEquiv.instAddEquivClassAddEquiv.{u1, u1} (Multiset.{u1} α) (Finsupp.{u1, 0} α Nat (LinearOrderedCommMonoidWithZero.toZero.{0} Nat Nat.linearOrderedCommMonoidWithZero)) (Multiset.instAddMultiset.{u1} α) (Finsupp.add.{u1, 0} α Nat (AddMonoid.toAddZeroClass.{0} Nat Nat.addMonoid)))))) (Multiset.toFinsupp.{u1} α) s) (FunLike.coe.{succ u1, succ u1, succ u1} (AddEquiv.{u1, u1} (Multiset.{u1} α) (Finsupp.{u1, 0} α Nat (LinearOrderedCommMonoidWithZero.toZero.{0} Nat Nat.linearOrderedCommMonoidWithZero)) (Multiset.instAddMultiset.{u1} α) (Finsupp.add.{u1, 0} α Nat (AddMonoid.toAddZeroClass.{0} Nat Nat.addMonoid))) (Multiset.{u1} α) (fun (_x : Multiset.{u1} α) => (fun ([email protected]._hyg.403 : Multiset.{u1} α) => Finsupp.{u1, 0} α Nat (LinearOrderedCommMonoidWithZero.toZero.{0} Nat Nat.linearOrderedCommMonoidWithZero)) _x) (AddHomClass.toFunLike.{u1, u1, u1} (AddEquiv.{u1, u1} (Multiset.{u1} α) (Finsupp.{u1, 0} α Nat (LinearOrderedCommMonoidWithZero.toZero.{0} Nat Nat.linearOrderedCommMonoidWithZero)) (Multiset.instAddMultiset.{u1} α) (Finsupp.add.{u1, 0} α Nat (AddMonoid.toAddZeroClass.{0} Nat Nat.addMonoid))) (Multiset.{u1} α) (Finsupp.{u1, 0} α Nat (LinearOrderedCommMonoidWithZero.toZero.{0} Nat Nat.linearOrderedCommMonoidWithZero)) (AddZeroClass.toAdd.{u1} (Multiset.{u1} α) (AddMonoid.toAddZeroClass.{u1} (Multiset.{u1} α) (AddRightCancelMonoid.toAddMonoid.{u1} (Multiset.{u1} α) (AddCancelMonoid.toAddRightCancelMonoid.{u1} (Multiset.{u1} α) (AddCancelCommMonoid.toAddCancelMonoid.{u1} (Multiset.{u1} α) (OrderedCancelAddCommMonoid.toCancelAddCommMonoid.{u1} (Multiset.{u1} α) (Multiset.instOrderedCancelAddCommMonoidMultiset.{u1} α))))))) (AddZeroClass.toAdd.{u1} (Finsupp.{u1, 0} α Nat (LinearOrderedCommMonoidWithZero.toZero.{0} Nat Nat.linearOrderedCommMonoidWithZero)) (Finsupp.addZeroClass.{u1, 0} α Nat (AddMonoid.toAddZeroClass.{0} Nat Nat.addMonoid))) (AddMonoidHomClass.toAddHomClass.{u1, u1, u1} (AddEquiv.{u1, u1} (Multiset.{u1} α) (Finsupp.{u1, 0} α Nat (LinearOrderedCommMonoidWithZero.toZero.{0} Nat Nat.linearOrderedCommMonoidWithZero)) (Multiset.instAddMultiset.{u1} α) (Finsupp.add.{u1, 0} α Nat (AddMonoid.toAddZeroClass.{0} Nat Nat.addMonoid))) (Multiset.{u1} α) (Finsupp.{u1, 0} α Nat (LinearOrderedCommMonoidWithZero.toZero.{0} Nat Nat.linearOrderedCommMonoidWithZero)) (AddMonoid.toAddZeroClass.{u1} (Multiset.{u1} α) (AddRightCancelMonoid.toAddMonoid.{u1} (Multiset.{u1} α) (AddCancelMonoid.toAddRightCancelMonoid.{u1} (Multiset.{u1} α) (AddCancelCommMonoid.toAddCancelMonoid.{u1} (Multiset.{u1} α) (OrderedCancelAddCommMonoid.toCancelAddCommMonoid.{u1} (Multiset.{u1} α) (Multiset.instOrderedCancelAddCommMonoidMultiset.{u1} α)))))) (Finsupp.addZeroClass.{u1, 0} α Nat (AddMonoid.toAddZeroClass.{0} Nat Nat.addMonoid)) (AddEquivClass.instAddMonoidHomClass.{u1, u1, u1} (AddEquiv.{u1, u1} (Multiset.{u1} α) (Finsupp.{u1, 0} α Nat (LinearOrderedCommMonoidWithZero.toZero.{0} Nat Nat.linearOrderedCommMonoidWithZero)) (Multiset.instAddMultiset.{u1} α) (Finsupp.add.{u1, 0} α Nat (AddMonoid.toAddZeroClass.{0} Nat Nat.addMonoid))) (Multiset.{u1} α) (Finsupp.{u1, 0} α Nat (LinearOrderedCommMonoidWithZero.toZero.{0} Nat Nat.linearOrderedCommMonoidWithZero)) (AddMonoid.toAddZeroClass.{u1} (Multiset.{u1} α) (AddRightCancelMonoid.toAddMonoid.{u1} (Multiset.{u1} α) (AddCancelMonoid.toAddRightCancelMonoid.{u1} (Multiset.{u1} α) (AddCancelCommMonoid.toAddCancelMonoid.{u1} (Multiset.{u1} α) (OrderedCancelAddCommMonoid.toCancelAddCommMonoid.{u1} (Multiset.{u1} α) (Multiset.instOrderedCancelAddCommMonoidMultiset.{u1} α)))))) (Finsupp.addZeroClass.{u1, 0} α Nat (AddMonoid.toAddZeroClass.{0} Nat Nat.addMonoid)) (AddEquiv.instAddEquivClassAddEquiv.{u1, u1} (Multiset.{u1} α) (Finsupp.{u1, 0} α Nat (LinearOrderedCommMonoidWithZero.toZero.{0} Nat Nat.linearOrderedCommMonoidWithZero)) (Multiset.instAddMultiset.{u1} α) (Finsupp.add.{u1, 0} α Nat (AddMonoid.toAddZeroClass.{0} Nat Nat.addMonoid)))))) (Multiset.toFinsupp.{u1} α) t))
Case conversion may be inaccurate. Consider using '#align multiset.to_finsupp_add Multiset.toFinsupp_addₓ'. -/
theorem toFinsupp_add (s t : Multiset α) : toFinsupp (s + t) = toFinsupp s + toFinsupp t :=
toFinsupp.map_add s t
#align multiset.to_finsupp_add Multiset.toFinsupp_add
/- warning: multiset.to_finsupp_singleton -> Multiset.toFinsupp_singleton is a dubious translation:
lean 3 declaration is
forall {α : Type.{u1}} (a : α), Eq.{succ u1} (Finsupp.{u1, 0} α Nat Nat.hasZero) (coeFn.{succ u1, succ u1} (AddEquiv.{u1, u1} (Multiset.{u1} α) (Finsupp.{u1, 0} α Nat Nat.hasZero) (Multiset.hasAdd.{u1} α) (Finsupp.add.{u1, 0} α Nat (AddMonoid.toAddZeroClass.{0} Nat Nat.addMonoid))) (fun (_x : AddEquiv.{u1, u1} (Multiset.{u1} α) (Finsupp.{u1, 0} α Nat Nat.hasZero) (Multiset.hasAdd.{u1} α) (Finsupp.add.{u1, 0} α Nat (AddMonoid.toAddZeroClass.{0} Nat Nat.addMonoid))) => (Multiset.{u1} α) -> (Finsupp.{u1, 0} α Nat Nat.hasZero)) (AddEquiv.hasCoeToFun.{u1, u1} (Multiset.{u1} α) (Finsupp.{u1, 0} α Nat Nat.hasZero) (Multiset.hasAdd.{u1} α) (Finsupp.add.{u1, 0} α Nat (AddMonoid.toAddZeroClass.{0} Nat Nat.addMonoid))) (Multiset.toFinsupp.{u1} α) (Singleton.singleton.{u1, u1} α (Multiset.{u1} α) (Multiset.hasSingleton.{u1} α) a)) (Finsupp.single.{u1, 0} α Nat Nat.hasZero a (OfNat.ofNat.{0} Nat 1 (OfNat.mk.{0} Nat 1 (One.one.{0} Nat Nat.hasOne))))
but is expected to have type
forall {α : Type.{u1}} (a : α), Eq.{succ u1} ((fun ([email protected]._hyg.403 : Multiset.{u1} α) => Finsupp.{u1, 0} α Nat (LinearOrderedCommMonoidWithZero.toZero.{0} Nat Nat.linearOrderedCommMonoidWithZero)) (Singleton.singleton.{u1, u1} α (Multiset.{u1} α) (Multiset.instSingletonMultiset.{u1} α) a)) (FunLike.coe.{succ u1, succ u1, succ u1} (AddEquiv.{u1, u1} (Multiset.{u1} α) (Finsupp.{u1, 0} α Nat (LinearOrderedCommMonoidWithZero.toZero.{0} Nat Nat.linearOrderedCommMonoidWithZero)) (Multiset.instAddMultiset.{u1} α) (Finsupp.add.{u1, 0} α Nat (AddMonoid.toAddZeroClass.{0} Nat Nat.addMonoid))) (Multiset.{u1} α) (fun (_x : Multiset.{u1} α) => (fun ([email protected]._hyg.403 : Multiset.{u1} α) => Finsupp.{u1, 0} α Nat (LinearOrderedCommMonoidWithZero.toZero.{0} Nat Nat.linearOrderedCommMonoidWithZero)) _x) (AddHomClass.toFunLike.{u1, u1, u1} (AddEquiv.{u1, u1} (Multiset.{u1} α) (Finsupp.{u1, 0} α Nat (LinearOrderedCommMonoidWithZero.toZero.{0} Nat Nat.linearOrderedCommMonoidWithZero)) (Multiset.instAddMultiset.{u1} α) (Finsupp.add.{u1, 0} α Nat (AddMonoid.toAddZeroClass.{0} Nat Nat.addMonoid))) (Multiset.{u1} α) (Finsupp.{u1, 0} α Nat (LinearOrderedCommMonoidWithZero.toZero.{0} Nat Nat.linearOrderedCommMonoidWithZero)) (AddZeroClass.toAdd.{u1} (Multiset.{u1} α) (AddMonoid.toAddZeroClass.{u1} (Multiset.{u1} α) (AddRightCancelMonoid.toAddMonoid.{u1} (Multiset.{u1} α) (AddCancelMonoid.toAddRightCancelMonoid.{u1} (Multiset.{u1} α) (AddCancelCommMonoid.toAddCancelMonoid.{u1} (Multiset.{u1} α) (OrderedCancelAddCommMonoid.toCancelAddCommMonoid.{u1} (Multiset.{u1} α) (Multiset.instOrderedCancelAddCommMonoidMultiset.{u1} α))))))) (AddZeroClass.toAdd.{u1} (Finsupp.{u1, 0} α Nat (LinearOrderedCommMonoidWithZero.toZero.{0} Nat Nat.linearOrderedCommMonoidWithZero)) (Finsupp.addZeroClass.{u1, 0} α Nat (AddMonoid.toAddZeroClass.{0} Nat Nat.addMonoid))) (AddMonoidHomClass.toAddHomClass.{u1, u1, u1} (AddEquiv.{u1, u1} (Multiset.{u1} α) (Finsupp.{u1, 0} α Nat (LinearOrderedCommMonoidWithZero.toZero.{0} Nat Nat.linearOrderedCommMonoidWithZero)) (Multiset.instAddMultiset.{u1} α) (Finsupp.add.{u1, 0} α Nat (AddMonoid.toAddZeroClass.{0} Nat Nat.addMonoid))) (Multiset.{u1} α) (Finsupp.{u1, 0} α Nat (LinearOrderedCommMonoidWithZero.toZero.{0} Nat Nat.linearOrderedCommMonoidWithZero)) (AddMonoid.toAddZeroClass.{u1} (Multiset.{u1} α) (AddRightCancelMonoid.toAddMonoid.{u1} (Multiset.{u1} α) (AddCancelMonoid.toAddRightCancelMonoid.{u1} (Multiset.{u1} α) (AddCancelCommMonoid.toAddCancelMonoid.{u1} (Multiset.{u1} α) (OrderedCancelAddCommMonoid.toCancelAddCommMonoid.{u1} (Multiset.{u1} α) (Multiset.instOrderedCancelAddCommMonoidMultiset.{u1} α)))))) (Finsupp.addZeroClass.{u1, 0} α Nat (AddMonoid.toAddZeroClass.{0} Nat Nat.addMonoid)) (AddEquivClass.instAddMonoidHomClass.{u1, u1, u1} (AddEquiv.{u1, u1} (Multiset.{u1} α) (Finsupp.{u1, 0} α Nat (LinearOrderedCommMonoidWithZero.toZero.{0} Nat Nat.linearOrderedCommMonoidWithZero)) (Multiset.instAddMultiset.{u1} α) (Finsupp.add.{u1, 0} α Nat (AddMonoid.toAddZeroClass.{0} Nat Nat.addMonoid))) (Multiset.{u1} α) (Finsupp.{u1, 0} α Nat (LinearOrderedCommMonoidWithZero.toZero.{0} Nat Nat.linearOrderedCommMonoidWithZero)) (AddMonoid.toAddZeroClass.{u1} (Multiset.{u1} α) (AddRightCancelMonoid.toAddMonoid.{u1} (Multiset.{u1} α) (AddCancelMonoid.toAddRightCancelMonoid.{u1} (Multiset.{u1} α) (AddCancelCommMonoid.toAddCancelMonoid.{u1} (Multiset.{u1} α) (OrderedCancelAddCommMonoid.toCancelAddCommMonoid.{u1} (Multiset.{u1} α) (Multiset.instOrderedCancelAddCommMonoidMultiset.{u1} α)))))) (Finsupp.addZeroClass.{u1, 0} α Nat (AddMonoid.toAddZeroClass.{0} Nat Nat.addMonoid)) (AddEquiv.instAddEquivClassAddEquiv.{u1, u1} (Multiset.{u1} α) (Finsupp.{u1, 0} α Nat (LinearOrderedCommMonoidWithZero.toZero.{0} Nat Nat.linearOrderedCommMonoidWithZero)) (Multiset.instAddMultiset.{u1} α) (Finsupp.add.{u1, 0} α Nat (AddMonoid.toAddZeroClass.{0} Nat Nat.addMonoid)))))) (Multiset.toFinsupp.{u1} α) (Singleton.singleton.{u1, u1} α (Multiset.{u1} α) (Multiset.instSingletonMultiset.{u1} α) a)) (Finsupp.single.{u1, 0} α Nat (LinearOrderedCommMonoidWithZero.toZero.{0} Nat Nat.linearOrderedCommMonoidWithZero) a (OfNat.ofNat.{0} Nat 1 (instOfNatNat 1)))
Case conversion may be inaccurate. Consider using '#align multiset.to_finsupp_singleton Multiset.toFinsupp_singletonₓ'. -/
@[simp]
theorem toFinsupp_singleton (a : α) : toFinsupp ({a} : Multiset α) = Finsupp.single a 1 :=
Finsupp.toMultiset.symm_apply_eq.2 <| by simp
#align multiset.to_finsupp_singleton Multiset.toFinsupp_singleton
/- warning: multiset.to_finsupp_to_multiset -> Multiset.toFinsupp_toMultiset is a dubious translation:
lean 3 declaration is
forall {α : Type.{u1}} (s : Multiset.{u1} α), Eq.{succ u1} (Multiset.{u1} α) (coeFn.{succ u1, succ u1} (AddEquiv.{u1, u1} (Finsupp.{u1, 0} α Nat Nat.hasZero) (Multiset.{u1} α) (Finsupp.add.{u1, 0} α Nat (AddMonoid.toAddZeroClass.{0} Nat Nat.addMonoid)) (Multiset.hasAdd.{u1} α)) (fun (_x : AddEquiv.{u1, u1} (Finsupp.{u1, 0} α Nat Nat.hasZero) (Multiset.{u1} α) (Finsupp.add.{u1, 0} α Nat (AddMonoid.toAddZeroClass.{0} Nat Nat.addMonoid)) (Multiset.hasAdd.{u1} α)) => (Finsupp.{u1, 0} α Nat Nat.hasZero) -> (Multiset.{u1} α)) (AddEquiv.hasCoeToFun.{u1, u1} (Finsupp.{u1, 0} α Nat Nat.hasZero) (Multiset.{u1} α) (Finsupp.add.{u1, 0} α Nat (AddMonoid.toAddZeroClass.{0} Nat Nat.addMonoid)) (Multiset.hasAdd.{u1} α)) (Finsupp.toMultiset.{u1} α) (coeFn.{succ u1, succ u1} (AddEquiv.{u1, u1} (Multiset.{u1} α) (Finsupp.{u1, 0} α Nat Nat.hasZero) (Multiset.hasAdd.{u1} α) (Finsupp.add.{u1, 0} α Nat (AddMonoid.toAddZeroClass.{0} Nat Nat.addMonoid))) (fun (_x : AddEquiv.{u1, u1} (Multiset.{u1} α) (Finsupp.{u1, 0} α Nat Nat.hasZero) (Multiset.hasAdd.{u1} α) (Finsupp.add.{u1, 0} α Nat (AddMonoid.toAddZeroClass.{0} Nat Nat.addMonoid))) => (Multiset.{u1} α) -> (Finsupp.{u1, 0} α Nat Nat.hasZero)) (AddEquiv.hasCoeToFun.{u1, u1} (Multiset.{u1} α) (Finsupp.{u1, 0} α Nat Nat.hasZero) (Multiset.hasAdd.{u1} α) (Finsupp.add.{u1, 0} α Nat (AddMonoid.toAddZeroClass.{0} Nat Nat.addMonoid))) (Multiset.toFinsupp.{u1} α) s)) s
but is expected to have type
forall {α : Type.{u1}} (s : Multiset.{u1} α), Eq.{succ u1} ((fun ([email protected]._hyg.403 : Finsupp.{u1, 0} α Nat (LinearOrderedCommMonoidWithZero.toZero.{0} Nat Nat.linearOrderedCommMonoidWithZero)) => Multiset.{u1} α) (FunLike.coe.{succ u1, succ u1, succ u1} (AddEquiv.{u1, u1} (Multiset.{u1} α) (Finsupp.{u1, 0} α Nat (LinearOrderedCommMonoidWithZero.toZero.{0} Nat Nat.linearOrderedCommMonoidWithZero)) (Multiset.instAddMultiset.{u1} α) (Finsupp.add.{u1, 0} α Nat (AddMonoid.toAddZeroClass.{0} Nat Nat.addMonoid))) (Multiset.{u1} α) (fun (a : Multiset.{u1} α) => (fun ([email protected]._hyg.403 : Multiset.{u1} α) => Finsupp.{u1, 0} α Nat (LinearOrderedCommMonoidWithZero.toZero.{0} Nat Nat.linearOrderedCommMonoidWithZero)) a) (AddHomClass.toFunLike.{u1, u1, u1} (AddEquiv.{u1, u1} (Multiset.{u1} α) (Finsupp.{u1, 0} α Nat (LinearOrderedCommMonoidWithZero.toZero.{0} Nat Nat.linearOrderedCommMonoidWithZero)) (Multiset.instAddMultiset.{u1} α) (Finsupp.add.{u1, 0} α Nat (AddMonoid.toAddZeroClass.{0} Nat Nat.addMonoid))) (Multiset.{u1} α) (Finsupp.{u1, 0} α Nat (LinearOrderedCommMonoidWithZero.toZero.{0} Nat Nat.linearOrderedCommMonoidWithZero)) (AddZeroClass.toAdd.{u1} (Multiset.{u1} α) (AddMonoid.toAddZeroClass.{u1} (Multiset.{u1} α) (AddRightCancelMonoid.toAddMonoid.{u1} (Multiset.{u1} α) (AddCancelMonoid.toAddRightCancelMonoid.{u1} (Multiset.{u1} α) (AddCancelCommMonoid.toAddCancelMonoid.{u1} (Multiset.{u1} α) (OrderedCancelAddCommMonoid.toCancelAddCommMonoid.{u1} (Multiset.{u1} α) (Multiset.instOrderedCancelAddCommMonoidMultiset.{u1} α))))))) (AddZeroClass.toAdd.{u1} (Finsupp.{u1, 0} α Nat (LinearOrderedCommMonoidWithZero.toZero.{0} Nat Nat.linearOrderedCommMonoidWithZero)) (Finsupp.addZeroClass.{u1, 0} α Nat (AddMonoid.toAddZeroClass.{0} Nat Nat.addMonoid))) (AddMonoidHomClass.toAddHomClass.{u1, u1, u1} (AddEquiv.{u1, u1} (Multiset.{u1} α) (Finsupp.{u1, 0} α Nat (LinearOrderedCommMonoidWithZero.toZero.{0} Nat Nat.linearOrderedCommMonoidWithZero)) (Multiset.instAddMultiset.{u1} α) (Finsupp.add.{u1, 0} α Nat (AddMonoid.toAddZeroClass.{0} Nat Nat.addMonoid))) (Multiset.{u1} α) (Finsupp.{u1, 0} α Nat (LinearOrderedCommMonoidWithZero.toZero.{0} Nat Nat.linearOrderedCommMonoidWithZero)) (AddMonoid.toAddZeroClass.{u1} (Multiset.{u1} α) (AddRightCancelMonoid.toAddMonoid.{u1} (Multiset.{u1} α) (AddCancelMonoid.toAddRightCancelMonoid.{u1} (Multiset.{u1} α) (AddCancelCommMonoid.toAddCancelMonoid.{u1} (Multiset.{u1} α) (OrderedCancelAddCommMonoid.toCancelAddCommMonoid.{u1} (Multiset.{u1} α) (Multiset.instOrderedCancelAddCommMonoidMultiset.{u1} α)))))) (Finsupp.addZeroClass.{u1, 0} α Nat (AddMonoid.toAddZeroClass.{0} Nat Nat.addMonoid)) (AddEquivClass.instAddMonoidHomClass.{u1, u1, u1} (AddEquiv.{u1, u1} (Multiset.{u1} α) (Finsupp.{u1, 0} α Nat (LinearOrderedCommMonoidWithZero.toZero.{0} Nat Nat.linearOrderedCommMonoidWithZero)) (Multiset.instAddMultiset.{u1} α) (Finsupp.add.{u1, 0} α Nat (AddMonoid.toAddZeroClass.{0} Nat Nat.addMonoid))) (Multiset.{u1} α) (Finsupp.{u1, 0} α Nat (LinearOrderedCommMonoidWithZero.toZero.{0} Nat Nat.linearOrderedCommMonoidWithZero)) (AddMonoid.toAddZeroClass.{u1} (Multiset.{u1} α) (AddRightCancelMonoid.toAddMonoid.{u1} (Multiset.{u1} α) (AddCancelMonoid.toAddRightCancelMonoid.{u1} (Multiset.{u1} α) (AddCancelCommMonoid.toAddCancelMonoid.{u1} (Multiset.{u1} α) (OrderedCancelAddCommMonoid.toCancelAddCommMonoid.{u1} (Multiset.{u1} α) (Multiset.instOrderedCancelAddCommMonoidMultiset.{u1} α)))))) (Finsupp.addZeroClass.{u1, 0} α Nat (AddMonoid.toAddZeroClass.{0} Nat Nat.addMonoid)) (AddEquiv.instAddEquivClassAddEquiv.{u1, u1} (Multiset.{u1} α) (Finsupp.{u1, 0} α Nat (LinearOrderedCommMonoidWithZero.toZero.{0} Nat Nat.linearOrderedCommMonoidWithZero)) (Multiset.instAddMultiset.{u1} α) (Finsupp.add.{u1, 0} α Nat (AddMonoid.toAddZeroClass.{0} Nat Nat.addMonoid)))))) (Multiset.toFinsupp.{u1} α) s)) (FunLike.coe.{succ u1, succ u1, succ u1} (AddEquiv.{u1, u1} (Finsupp.{u1, 0} α Nat (LinearOrderedCommMonoidWithZero.toZero.{0} Nat Nat.linearOrderedCommMonoidWithZero)) (Multiset.{u1} α) (Finsupp.add.{u1, 0} α Nat (AddMonoid.toAddZeroClass.{0} Nat Nat.addMonoid)) (Multiset.instAddMultiset.{u1} α)) (Finsupp.{u1, 0} α Nat (LinearOrderedCommMonoidWithZero.toZero.{0} Nat Nat.linearOrderedCommMonoidWithZero)) (fun (_x : Finsupp.{u1, 0} α Nat (LinearOrderedCommMonoidWithZero.toZero.{0} Nat Nat.linearOrderedCommMonoidWithZero)) => (fun ([email protected]._hyg.403 : Finsupp.{u1, 0} α Nat (LinearOrderedCommMonoidWithZero.toZero.{0} Nat Nat.linearOrderedCommMonoidWithZero)) => Multiset.{u1} α) _x) (AddHomClass.toFunLike.{u1, u1, u1} (AddEquiv.{u1, u1} (Finsupp.{u1, 0} α Nat (LinearOrderedCommMonoidWithZero.toZero.{0} Nat Nat.linearOrderedCommMonoidWithZero)) (Multiset.{u1} α) (Finsupp.add.{u1, 0} α Nat (AddMonoid.toAddZeroClass.{0} Nat Nat.addMonoid)) (Multiset.instAddMultiset.{u1} α)) (Finsupp.{u1, 0} α Nat (LinearOrderedCommMonoidWithZero.toZero.{0} Nat Nat.linearOrderedCommMonoidWithZero)) (Multiset.{u1} α) (AddZeroClass.toAdd.{u1} (Finsupp.{u1, 0} α Nat (LinearOrderedCommMonoidWithZero.toZero.{0} Nat Nat.linearOrderedCommMonoidWithZero)) (Finsupp.addZeroClass.{u1, 0} α Nat (AddMonoid.toAddZeroClass.{0} Nat Nat.addMonoid))) (AddZeroClass.toAdd.{u1} (Multiset.{u1} α) (AddMonoid.toAddZeroClass.{u1} (Multiset.{u1} α) (AddRightCancelMonoid.toAddMonoid.{u1} (Multiset.{u1} α) (AddCancelMonoid.toAddRightCancelMonoid.{u1} (Multiset.{u1} α) (AddCancelCommMonoid.toAddCancelMonoid.{u1} (Multiset.{u1} α) (OrderedCancelAddCommMonoid.toCancelAddCommMonoid.{u1} (Multiset.{u1} α) (Multiset.instOrderedCancelAddCommMonoidMultiset.{u1} α))))))) (AddMonoidHomClass.toAddHomClass.{u1, u1, u1} (AddEquiv.{u1, u1} (Finsupp.{u1, 0} α Nat (LinearOrderedCommMonoidWithZero.toZero.{0} Nat Nat.linearOrderedCommMonoidWithZero)) (Multiset.{u1} α) (Finsupp.add.{u1, 0} α Nat (AddMonoid.toAddZeroClass.{0} Nat Nat.addMonoid)) (Multiset.instAddMultiset.{u1} α)) (Finsupp.{u1, 0} α Nat (LinearOrderedCommMonoidWithZero.toZero.{0} Nat Nat.linearOrderedCommMonoidWithZero)) (Multiset.{u1} α) (Finsupp.addZeroClass.{u1, 0} α Nat (AddMonoid.toAddZeroClass.{0} Nat Nat.addMonoid)) (AddMonoid.toAddZeroClass.{u1} (Multiset.{u1} α) (AddRightCancelMonoid.toAddMonoid.{u1} (Multiset.{u1} α) (AddCancelMonoid.toAddRightCancelMonoid.{u1} (Multiset.{u1} α) (AddCancelCommMonoid.toAddCancelMonoid.{u1} (Multiset.{u1} α) (OrderedCancelAddCommMonoid.toCancelAddCommMonoid.{u1} (Multiset.{u1} α) (Multiset.instOrderedCancelAddCommMonoidMultiset.{u1} α)))))) (AddEquivClass.instAddMonoidHomClass.{u1, u1, u1} (AddEquiv.{u1, u1} (Finsupp.{u1, 0} α Nat (LinearOrderedCommMonoidWithZero.toZero.{0} Nat Nat.linearOrderedCommMonoidWithZero)) (Multiset.{u1} α) (Finsupp.add.{u1, 0} α Nat (AddMonoid.toAddZeroClass.{0} Nat Nat.addMonoid)) (Multiset.instAddMultiset.{u1} α)) (Finsupp.{u1, 0} α Nat (LinearOrderedCommMonoidWithZero.toZero.{0} Nat Nat.linearOrderedCommMonoidWithZero)) (Multiset.{u1} α) (Finsupp.addZeroClass.{u1, 0} α Nat (AddMonoid.toAddZeroClass.{0} Nat Nat.addMonoid)) (AddMonoid.toAddZeroClass.{u1} (Multiset.{u1} α) (AddRightCancelMonoid.toAddMonoid.{u1} (Multiset.{u1} α) (AddCancelMonoid.toAddRightCancelMonoid.{u1} (Multiset.{u1} α) (AddCancelCommMonoid.toAddCancelMonoid.{u1} (Multiset.{u1} α) (OrderedCancelAddCommMonoid.toCancelAddCommMonoid.{u1} (Multiset.{u1} α) (Multiset.instOrderedCancelAddCommMonoidMultiset.{u1} α)))))) (AddEquiv.instAddEquivClassAddEquiv.{u1, u1} (Finsupp.{u1, 0} α Nat (LinearOrderedCommMonoidWithZero.toZero.{0} Nat Nat.linearOrderedCommMonoidWithZero)) (Multiset.{u1} α) (Finsupp.add.{u1, 0} α Nat (AddMonoid.toAddZeroClass.{0} Nat Nat.addMonoid)) (Multiset.instAddMultiset.{u1} α))))) (Finsupp.toMultiset.{u1} α) (FunLike.coe.{succ u1, succ u1, succ u1} (AddEquiv.{u1, u1} (Multiset.{u1} α) (Finsupp.{u1, 0} α Nat (LinearOrderedCommMonoidWithZero.toZero.{0} Nat Nat.linearOrderedCommMonoidWithZero)) (Multiset.instAddMultiset.{u1} α) (Finsupp.add.{u1, 0} α Nat (AddMonoid.toAddZeroClass.{0} Nat Nat.addMonoid))) (Multiset.{u1} α) (fun (_x : Multiset.{u1} α) => (fun ([email protected]._hyg.403 : Multiset.{u1} α) => Finsupp.{u1, 0} α Nat (LinearOrderedCommMonoidWithZero.toZero.{0} Nat Nat.linearOrderedCommMonoidWithZero)) _x) (AddHomClass.toFunLike.{u1, u1, u1} (AddEquiv.{u1, u1} (Multiset.{u1} α) (Finsupp.{u1, 0} α Nat (LinearOrderedCommMonoidWithZero.toZero.{0} Nat Nat.linearOrderedCommMonoidWithZero)) (Multiset.instAddMultiset.{u1} α) (Finsupp.add.{u1, 0} α Nat (AddMonoid.toAddZeroClass.{0} Nat Nat.addMonoid))) (Multiset.{u1} α) (Finsupp.{u1, 0} α Nat (LinearOrderedCommMonoidWithZero.toZero.{0} Nat Nat.linearOrderedCommMonoidWithZero)) (AddZeroClass.toAdd.{u1} (Multiset.{u1} α) (AddMonoid.toAddZeroClass.{u1} (Multiset.{u1} α) (AddRightCancelMonoid.toAddMonoid.{u1} (Multiset.{u1} α) (AddCancelMonoid.toAddRightCancelMonoid.{u1} (Multiset.{u1} α) (AddCancelCommMonoid.toAddCancelMonoid.{u1} (Multiset.{u1} α) (OrderedCancelAddCommMonoid.toCancelAddCommMonoid.{u1} (Multiset.{u1} α) (Multiset.instOrderedCancelAddCommMonoidMultiset.{u1} α))))))) (AddZeroClass.toAdd.{u1} (Finsupp.{u1, 0} α Nat (LinearOrderedCommMonoidWithZero.toZero.{0} Nat Nat.linearOrderedCommMonoidWithZero)) (Finsupp.addZeroClass.{u1, 0} α Nat (AddMonoid.toAddZeroClass.{0} Nat Nat.addMonoid))) (AddMonoidHomClass.toAddHomClass.{u1, u1, u1} (AddEquiv.{u1, u1} (Multiset.{u1} α) (Finsupp.{u1, 0} α Nat (LinearOrderedCommMonoidWithZero.toZero.{0} Nat Nat.linearOrderedCommMonoidWithZero)) (Multiset.instAddMultiset.{u1} α) (Finsupp.add.{u1, 0} α Nat (AddMonoid.toAddZeroClass.{0} Nat Nat.addMonoid))) (Multiset.{u1} α) (Finsupp.{u1, 0} α Nat (LinearOrderedCommMonoidWithZero.toZero.{0} Nat Nat.linearOrderedCommMonoidWithZero)) (AddMonoid.toAddZeroClass.{u1} (Multiset.{u1} α) (AddRightCancelMonoid.toAddMonoid.{u1} (Multiset.{u1} α) (AddCancelMonoid.toAddRightCancelMonoid.{u1} (Multiset.{u1} α) (AddCancelCommMonoid.toAddCancelMonoid.{u1} (Multiset.{u1} α) (OrderedCancelAddCommMonoid.toCancelAddCommMonoid.{u1} (Multiset.{u1} α) (Multiset.instOrderedCancelAddCommMonoidMultiset.{u1} α)))))) (Finsupp.addZeroClass.{u1, 0} α Nat (AddMonoid.toAddZeroClass.{0} Nat Nat.addMonoid)) (AddEquivClass.instAddMonoidHomClass.{u1, u1, u1} (AddEquiv.{u1, u1} (Multiset.{u1} α) (Finsupp.{u1, 0} α Nat (LinearOrderedCommMonoidWithZero.toZero.{0} Nat Nat.linearOrderedCommMonoidWithZero)) (Multiset.instAddMultiset.{u1} α) (Finsupp.add.{u1, 0} α Nat (AddMonoid.toAddZeroClass.{0} Nat Nat.addMonoid))) (Multiset.{u1} α) (Finsupp.{u1, 0} α Nat (LinearOrderedCommMonoidWithZero.toZero.{0} Nat Nat.linearOrderedCommMonoidWithZero)) (AddMonoid.toAddZeroClass.{u1} (Multiset.{u1} α) (AddRightCancelMonoid.toAddMonoid.{u1} (Multiset.{u1} α) (AddCancelMonoid.toAddRightCancelMonoid.{u1} (Multiset.{u1} α) (AddCancelCommMonoid.toAddCancelMonoid.{u1} (Multiset.{u1} α) (OrderedCancelAddCommMonoid.toCancelAddCommMonoid.{u1} (Multiset.{u1} α) (Multiset.instOrderedCancelAddCommMonoidMultiset.{u1} α)))))) (Finsupp.addZeroClass.{u1, 0} α Nat (AddMonoid.toAddZeroClass.{0} Nat Nat.addMonoid)) (AddEquiv.instAddEquivClassAddEquiv.{u1, u1} (Multiset.{u1} α) (Finsupp.{u1, 0} α Nat (LinearOrderedCommMonoidWithZero.toZero.{0} Nat Nat.linearOrderedCommMonoidWithZero)) (Multiset.instAddMultiset.{u1} α) (Finsupp.add.{u1, 0} α Nat (AddMonoid.toAddZeroClass.{0} Nat Nat.addMonoid)))))) (Multiset.toFinsupp.{u1} α) s)) s
Case conversion may be inaccurate. Consider using '#align multiset.to_finsupp_to_multiset Multiset.toFinsupp_toMultisetₓ'. -/
@[simp]
theorem toFinsupp_toMultiset (s : Multiset α) : s.toFinsupp.toMultiset = s :=
Finsupp.toMultiset.apply_symm_apply s
#align multiset.to_finsupp_to_multiset Multiset.toFinsupp_toMultiset
/- warning: multiset.to_finsupp_eq_iff -> Multiset.toFinsupp_eq_iff is a dubious translation:
lean 3 declaration is
forall {α : Type.{u1}} {s : Multiset.{u1} α} {f : Finsupp.{u1, 0} α Nat Nat.hasZero}, Iff (Eq.{succ u1} (Finsupp.{u1, 0} α Nat Nat.hasZero) (coeFn.{succ u1, succ u1} (AddEquiv.{u1, u1} (Multiset.{u1} α) (Finsupp.{u1, 0} α Nat Nat.hasZero) (Multiset.hasAdd.{u1} α) (Finsupp.add.{u1, 0} α Nat (AddMonoid.toAddZeroClass.{0} Nat Nat.addMonoid))) (fun (_x : AddEquiv.{u1, u1} (Multiset.{u1} α) (Finsupp.{u1, 0} α Nat Nat.hasZero) (Multiset.hasAdd.{u1} α) (Finsupp.add.{u1, 0} α Nat (AddMonoid.toAddZeroClass.{0} Nat Nat.addMonoid))) => (Multiset.{u1} α) -> (Finsupp.{u1, 0} α Nat Nat.hasZero)) (AddEquiv.hasCoeToFun.{u1, u1} (Multiset.{u1} α) (Finsupp.{u1, 0} α Nat Nat.hasZero) (Multiset.hasAdd.{u1} α) (Finsupp.add.{u1, 0} α Nat (AddMonoid.toAddZeroClass.{0} Nat Nat.addMonoid))) (Multiset.toFinsupp.{u1} α) s) f) (Eq.{succ u1} (Multiset.{u1} α) s (coeFn.{succ u1, succ u1} (AddEquiv.{u1, u1} (Finsupp.{u1, 0} α Nat Nat.hasZero) (Multiset.{u1} α) (Finsupp.add.{u1, 0} α Nat (AddMonoid.toAddZeroClass.{0} Nat Nat.addMonoid)) (Multiset.hasAdd.{u1} α)) (fun (_x : AddEquiv.{u1, u1} (Finsupp.{u1, 0} α Nat Nat.hasZero) (Multiset.{u1} α) (Finsupp.add.{u1, 0} α Nat (AddMonoid.toAddZeroClass.{0} Nat Nat.addMonoid)) (Multiset.hasAdd.{u1} α)) => (Finsupp.{u1, 0} α Nat Nat.hasZero) -> (Multiset.{u1} α)) (AddEquiv.hasCoeToFun.{u1, u1} (Finsupp.{u1, 0} α Nat Nat.hasZero) (Multiset.{u1} α) (Finsupp.add.{u1, 0} α Nat (AddMonoid.toAddZeroClass.{0} Nat Nat.addMonoid)) (Multiset.hasAdd.{u1} α)) (Finsupp.toMultiset.{u1} α) f))
but is expected to have type
forall {α : Type.{u1}} {s : Multiset.{u1} α} {f : Finsupp.{u1, 0} α Nat (LinearOrderedCommMonoidWithZero.toZero.{0} Nat Nat.linearOrderedCommMonoidWithZero)}, Iff (Eq.{succ u1} ((fun ([email protected]._hyg.403 : Multiset.{u1} α) => Finsupp.{u1, 0} α Nat (LinearOrderedCommMonoidWithZero.toZero.{0} Nat Nat.linearOrderedCommMonoidWithZero)) s) (FunLike.coe.{succ u1, succ u1, succ u1} (AddEquiv.{u1, u1} (Multiset.{u1} α) (Finsupp.{u1, 0} α Nat (LinearOrderedCommMonoidWithZero.toZero.{0} Nat Nat.linearOrderedCommMonoidWithZero)) (Multiset.instAddMultiset.{u1} α) (Finsupp.add.{u1, 0} α Nat (AddMonoid.toAddZeroClass.{0} Nat Nat.addMonoid))) (Multiset.{u1} α) (fun (_x : Multiset.{u1} α) => (fun ([email protected]._hyg.403 : Multiset.{u1} α) => Finsupp.{u1, 0} α Nat (LinearOrderedCommMonoidWithZero.toZero.{0} Nat Nat.linearOrderedCommMonoidWithZero)) _x) (AddHomClass.toFunLike.{u1, u1, u1} (AddEquiv.{u1, u1} (Multiset.{u1} α) (Finsupp.{u1, 0} α Nat (LinearOrderedCommMonoidWithZero.toZero.{0} Nat Nat.linearOrderedCommMonoidWithZero)) (Multiset.instAddMultiset.{u1} α) (Finsupp.add.{u1, 0} α Nat (AddMonoid.toAddZeroClass.{0} Nat Nat.addMonoid))) (Multiset.{u1} α) (Finsupp.{u1, 0} α Nat (LinearOrderedCommMonoidWithZero.toZero.{0} Nat Nat.linearOrderedCommMonoidWithZero)) (AddZeroClass.toAdd.{u1} (Multiset.{u1} α) (AddMonoid.toAddZeroClass.{u1} (Multiset.{u1} α) (AddRightCancelMonoid.toAddMonoid.{u1} (Multiset.{u1} α) (AddCancelMonoid.toAddRightCancelMonoid.{u1} (Multiset.{u1} α) (AddCancelCommMonoid.toAddCancelMonoid.{u1} (Multiset.{u1} α) (OrderedCancelAddCommMonoid.toCancelAddCommMonoid.{u1} (Multiset.{u1} α) (Multiset.instOrderedCancelAddCommMonoidMultiset.{u1} α))))))) (AddZeroClass.toAdd.{u1} (Finsupp.{u1, 0} α Nat (LinearOrderedCommMonoidWithZero.toZero.{0} Nat Nat.linearOrderedCommMonoidWithZero)) (Finsupp.addZeroClass.{u1, 0} α Nat (AddMonoid.toAddZeroClass.{0} Nat Nat.addMonoid))) (AddMonoidHomClass.toAddHomClass.{u1, u1, u1} (AddEquiv.{u1, u1} (Multiset.{u1} α) (Finsupp.{u1, 0} α Nat (LinearOrderedCommMonoidWithZero.toZero.{0} Nat Nat.linearOrderedCommMonoidWithZero)) (Multiset.instAddMultiset.{u1} α) (Finsupp.add.{u1, 0} α Nat (AddMonoid.toAddZeroClass.{0} Nat Nat.addMonoid))) (Multiset.{u1} α) (Finsupp.{u1, 0} α Nat (LinearOrderedCommMonoidWithZero.toZero.{0} Nat Nat.linearOrderedCommMonoidWithZero)) (AddMonoid.toAddZeroClass.{u1} (Multiset.{u1} α) (AddRightCancelMonoid.toAddMonoid.{u1} (Multiset.{u1} α) (AddCancelMonoid.toAddRightCancelMonoid.{u1} (Multiset.{u1} α) (AddCancelCommMonoid.toAddCancelMonoid.{u1} (Multiset.{u1} α) (OrderedCancelAddCommMonoid.toCancelAddCommMonoid.{u1} (Multiset.{u1} α) (Multiset.instOrderedCancelAddCommMonoidMultiset.{u1} α)))))) (Finsupp.addZeroClass.{u1, 0} α Nat (AddMonoid.toAddZeroClass.{0} Nat Nat.addMonoid)) (AddEquivClass.instAddMonoidHomClass.{u1, u1, u1} (AddEquiv.{u1, u1} (Multiset.{u1} α) (Finsupp.{u1, 0} α Nat (LinearOrderedCommMonoidWithZero.toZero.{0} Nat Nat.linearOrderedCommMonoidWithZero)) (Multiset.instAddMultiset.{u1} α) (Finsupp.add.{u1, 0} α Nat (AddMonoid.toAddZeroClass.{0} Nat Nat.addMonoid))) (Multiset.{u1} α) (Finsupp.{u1, 0} α Nat (LinearOrderedCommMonoidWithZero.toZero.{0} Nat Nat.linearOrderedCommMonoidWithZero)) (AddMonoid.toAddZeroClass.{u1} (Multiset.{u1} α) (AddRightCancelMonoid.toAddMonoid.{u1} (Multiset.{u1} α) (AddCancelMonoid.toAddRightCancelMonoid.{u1} (Multiset.{u1} α) (AddCancelCommMonoid.toAddCancelMonoid.{u1} (Multiset.{u1} α) (OrderedCancelAddCommMonoid.toCancelAddCommMonoid.{u1} (Multiset.{u1} α) (Multiset.instOrderedCancelAddCommMonoidMultiset.{u1} α)))))) (Finsupp.addZeroClass.{u1, 0} α Nat (AddMonoid.toAddZeroClass.{0} Nat Nat.addMonoid)) (AddEquiv.instAddEquivClassAddEquiv.{u1, u1} (Multiset.{u1} α) (Finsupp.{u1, 0} α Nat (LinearOrderedCommMonoidWithZero.toZero.{0} Nat Nat.linearOrderedCommMonoidWithZero)) (Multiset.instAddMultiset.{u1} α) (Finsupp.add.{u1, 0} α Nat (AddMonoid.toAddZeroClass.{0} Nat Nat.addMonoid)))))) (Multiset.toFinsupp.{u1} α) s) f) (Eq.{succ u1} (Multiset.{u1} α) s (FunLike.coe.{succ u1, succ u1, succ u1} (AddEquiv.{u1, u1} (Finsupp.{u1, 0} α Nat (LinearOrderedCommMonoidWithZero.toZero.{0} Nat Nat.linearOrderedCommMonoidWithZero)) (Multiset.{u1} α) (Finsupp.add.{u1, 0} α Nat (AddMonoid.toAddZeroClass.{0} Nat Nat.addMonoid)) (Multiset.instAddMultiset.{u1} α)) (Finsupp.{u1, 0} α Nat (LinearOrderedCommMonoidWithZero.toZero.{0} Nat Nat.linearOrderedCommMonoidWithZero)) (fun (_x : Finsupp.{u1, 0} α Nat (LinearOrderedCommMonoidWithZero.toZero.{0} Nat Nat.linearOrderedCommMonoidWithZero)) => (fun ([email protected]._hyg.403 : Finsupp.{u1, 0} α Nat (LinearOrderedCommMonoidWithZero.toZero.{0} Nat Nat.linearOrderedCommMonoidWithZero)) => Multiset.{u1} α) _x) (AddHomClass.toFunLike.{u1, u1, u1} (AddEquiv.{u1, u1} (Finsupp.{u1, 0} α Nat (LinearOrderedCommMonoidWithZero.toZero.{0} Nat Nat.linearOrderedCommMonoidWithZero)) (Multiset.{u1} α) (Finsupp.add.{u1, 0} α Nat (AddMonoid.toAddZeroClass.{0} Nat Nat.addMonoid)) (Multiset.instAddMultiset.{u1} α)) (Finsupp.{u1, 0} α Nat (LinearOrderedCommMonoidWithZero.toZero.{0} Nat Nat.linearOrderedCommMonoidWithZero)) (Multiset.{u1} α) (AddZeroClass.toAdd.{u1} (Finsupp.{u1, 0} α Nat (LinearOrderedCommMonoidWithZero.toZero.{0} Nat Nat.linearOrderedCommMonoidWithZero)) (Finsupp.addZeroClass.{u1, 0} α Nat (AddMonoid.toAddZeroClass.{0} Nat Nat.addMonoid))) (AddZeroClass.toAdd.{u1} (Multiset.{u1} α) (AddMonoid.toAddZeroClass.{u1} (Multiset.{u1} α) (AddRightCancelMonoid.toAddMonoid.{u1} (Multiset.{u1} α) (AddCancelMonoid.toAddRightCancelMonoid.{u1} (Multiset.{u1} α) (AddCancelCommMonoid.toAddCancelMonoid.{u1} (Multiset.{u1} α) (OrderedCancelAddCommMonoid.toCancelAddCommMonoid.{u1} (Multiset.{u1} α) (Multiset.instOrderedCancelAddCommMonoidMultiset.{u1} α))))))) (AddMonoidHomClass.toAddHomClass.{u1, u1, u1} (AddEquiv.{u1, u1} (Finsupp.{u1, 0} α Nat (LinearOrderedCommMonoidWithZero.toZero.{0} Nat Nat.linearOrderedCommMonoidWithZero)) (Multiset.{u1} α) (Finsupp.add.{u1, 0} α Nat (AddMonoid.toAddZeroClass.{0} Nat Nat.addMonoid)) (Multiset.instAddMultiset.{u1} α)) (Finsupp.{u1, 0} α Nat (LinearOrderedCommMonoidWithZero.toZero.{0} Nat Nat.linearOrderedCommMonoidWithZero)) (Multiset.{u1} α) (Finsupp.addZeroClass.{u1, 0} α Nat (AddMonoid.toAddZeroClass.{0} Nat Nat.addMonoid)) (AddMonoid.toAddZeroClass.{u1} (Multiset.{u1} α) (AddRightCancelMonoid.toAddMonoid.{u1} (Multiset.{u1} α) (AddCancelMonoid.toAddRightCancelMonoid.{u1} (Multiset.{u1} α) (AddCancelCommMonoid.toAddCancelMonoid.{u1} (Multiset.{u1} α) (OrderedCancelAddCommMonoid.toCancelAddCommMonoid.{u1} (Multiset.{u1} α) (Multiset.instOrderedCancelAddCommMonoidMultiset.{u1} α)))))) (AddEquivClass.instAddMonoidHomClass.{u1, u1, u1} (AddEquiv.{u1, u1} (Finsupp.{u1, 0} α Nat (LinearOrderedCommMonoidWithZero.toZero.{0} Nat Nat.linearOrderedCommMonoidWithZero)) (Multiset.{u1} α) (Finsupp.add.{u1, 0} α Nat (AddMonoid.toAddZeroClass.{0} Nat Nat.addMonoid)) (Multiset.instAddMultiset.{u1} α)) (Finsupp.{u1, 0} α Nat (LinearOrderedCommMonoidWithZero.toZero.{0} Nat Nat.linearOrderedCommMonoidWithZero)) (Multiset.{u1} α) (Finsupp.addZeroClass.{u1, 0} α Nat (AddMonoid.toAddZeroClass.{0} Nat Nat.addMonoid)) (AddMonoid.toAddZeroClass.{u1} (Multiset.{u1} α) (AddRightCancelMonoid.toAddMonoid.{u1} (Multiset.{u1} α) (AddCancelMonoid.toAddRightCancelMonoid.{u1} (Multiset.{u1} α) (AddCancelCommMonoid.toAddCancelMonoid.{u1} (Multiset.{u1} α) (OrderedCancelAddCommMonoid.toCancelAddCommMonoid.{u1} (Multiset.{u1} α) (Multiset.instOrderedCancelAddCommMonoidMultiset.{u1} α)))))) (AddEquiv.instAddEquivClassAddEquiv.{u1, u1} (Finsupp.{u1, 0} α Nat (LinearOrderedCommMonoidWithZero.toZero.{0} Nat Nat.linearOrderedCommMonoidWithZero)) (Multiset.{u1} α) (Finsupp.add.{u1, 0} α Nat (AddMonoid.toAddZeroClass.{0} Nat Nat.addMonoid)) (Multiset.instAddMultiset.{u1} α))))) (Finsupp.toMultiset.{u1} α) f))
Case conversion may be inaccurate. Consider using '#align multiset.to_finsupp_eq_iff Multiset.toFinsupp_eq_iffₓ'. -/
theorem toFinsupp_eq_iff {s : Multiset α} {f : α →₀ ℕ} : s.toFinsupp = f ↔ s = f.toMultiset :=
Finsupp.toMultiset.symm_apply_eq
#align multiset.to_finsupp_eq_iff Multiset.toFinsupp_eq_iff
end Multiset
/- warning: finsupp.to_multiset_to_finsupp -> Finsupp.toMultiset_toFinsupp is a dubious translation:
lean 3 declaration is
forall {α : Type.{u1}} (f : Finsupp.{u1, 0} α Nat Nat.hasZero), Eq.{succ u1} (Finsupp.{u1, 0} α Nat Nat.hasZero) (coeFn.{succ u1, succ u1} (AddEquiv.{u1, u1} (Multiset.{u1} α) (Finsupp.{u1, 0} α Nat Nat.hasZero) (Multiset.hasAdd.{u1} α) (Finsupp.add.{u1, 0} α Nat (AddMonoid.toAddZeroClass.{0} Nat Nat.addMonoid))) (fun (_x : AddEquiv.{u1, u1} (Multiset.{u1} α) (Finsupp.{u1, 0} α Nat Nat.hasZero) (Multiset.hasAdd.{u1} α) (Finsupp.add.{u1, 0} α Nat (AddMonoid.toAddZeroClass.{0} Nat Nat.addMonoid))) => (Multiset.{u1} α) -> (Finsupp.{u1, 0} α Nat Nat.hasZero)) (AddEquiv.hasCoeToFun.{u1, u1} (Multiset.{u1} α) (Finsupp.{u1, 0} α Nat Nat.hasZero) (Multiset.hasAdd.{u1} α) (Finsupp.add.{u1, 0} α Nat (AddMonoid.toAddZeroClass.{0} Nat Nat.addMonoid))) (Multiset.toFinsupp.{u1} α) (coeFn.{succ u1, succ u1} (AddEquiv.{u1, u1} (Finsupp.{u1, 0} α Nat Nat.hasZero) (Multiset.{u1} α) (Finsupp.add.{u1, 0} α Nat (AddMonoid.toAddZeroClass.{0} Nat Nat.addMonoid)) (Multiset.hasAdd.{u1} α)) (fun (_x : AddEquiv.{u1, u1} (Finsupp.{u1, 0} α Nat Nat.hasZero) (Multiset.{u1} α) (Finsupp.add.{u1, 0} α Nat (AddMonoid.toAddZeroClass.{0} Nat Nat.addMonoid)) (Multiset.hasAdd.{u1} α)) => (Finsupp.{u1, 0} α Nat Nat.hasZero) -> (Multiset.{u1} α)) (AddEquiv.hasCoeToFun.{u1, u1} (Finsupp.{u1, 0} α Nat Nat.hasZero) (Multiset.{u1} α) (Finsupp.add.{u1, 0} α Nat (AddMonoid.toAddZeroClass.{0} Nat Nat.addMonoid)) (Multiset.hasAdd.{u1} α)) (Finsupp.toMultiset.{u1} α) f)) f
but is expected to have type
forall {α : Type.{u1}} (f : Finsupp.{u1, 0} α Nat (LinearOrderedCommMonoidWithZero.toZero.{0} Nat Nat.linearOrderedCommMonoidWithZero)), Eq.{succ u1} ((fun ([email protected]._hyg.403 : Multiset.{u1} α) => Finsupp.{u1, 0} α Nat (LinearOrderedCommMonoidWithZero.toZero.{0} Nat Nat.linearOrderedCommMonoidWithZero)) (FunLike.coe.{succ u1, succ u1, succ u1} (AddEquiv.{u1, u1} (Finsupp.{u1, 0} α Nat (LinearOrderedCommMonoidWithZero.toZero.{0} Nat Nat.linearOrderedCommMonoidWithZero)) (Multiset.{u1} α) (Finsupp.add.{u1, 0} α Nat (AddMonoid.toAddZeroClass.{0} Nat Nat.addMonoid)) (Multiset.instAddMultiset.{u1} α)) (Finsupp.{u1, 0} α Nat (LinearOrderedCommMonoidWithZero.toZero.{0} Nat Nat.linearOrderedCommMonoidWithZero)) (fun (a : Finsupp.{u1, 0} α Nat (LinearOrderedCommMonoidWithZero.toZero.{0} Nat Nat.linearOrderedCommMonoidWithZero)) => (fun ([email protected]._hyg.403 : Finsupp.{u1, 0} α Nat (LinearOrderedCommMonoidWithZero.toZero.{0} Nat Nat.linearOrderedCommMonoidWithZero)) => Multiset.{u1} α) a) (AddHomClass.toFunLike.{u1, u1, u1} (AddEquiv.{u1, u1} (Finsupp.{u1, 0} α Nat (LinearOrderedCommMonoidWithZero.toZero.{0} Nat Nat.linearOrderedCommMonoidWithZero)) (Multiset.{u1} α) (Finsupp.add.{u1, 0} α Nat (AddMonoid.toAddZeroClass.{0} Nat Nat.addMonoid)) (Multiset.instAddMultiset.{u1} α)) (Finsupp.{u1, 0} α Nat (LinearOrderedCommMonoidWithZero.toZero.{0} Nat Nat.linearOrderedCommMonoidWithZero)) (Multiset.{u1} α) (AddZeroClass.toAdd.{u1} (Finsupp.{u1, 0} α Nat (LinearOrderedCommMonoidWithZero.toZero.{0} Nat Nat.linearOrderedCommMonoidWithZero)) (Finsupp.addZeroClass.{u1, 0} α Nat (AddMonoid.toAddZeroClass.{0} Nat Nat.addMonoid))) (AddZeroClass.toAdd.{u1} (Multiset.{u1} α) (AddMonoid.toAddZeroClass.{u1} (Multiset.{u1} α) (AddRightCancelMonoid.toAddMonoid.{u1} (Multiset.{u1} α) (AddCancelMonoid.toAddRightCancelMonoid.{u1} (Multiset.{u1} α) (AddCancelCommMonoid.toAddCancelMonoid.{u1} (Multiset.{u1} α) (OrderedCancelAddCommMonoid.toCancelAddCommMonoid.{u1} (Multiset.{u1} α) (Multiset.instOrderedCancelAddCommMonoidMultiset.{u1} α))))))) (AddMonoidHomClass.toAddHomClass.{u1, u1, u1} (AddEquiv.{u1, u1} (Finsupp.{u1, 0} α Nat (LinearOrderedCommMonoidWithZero.toZero.{0} Nat Nat.linearOrderedCommMonoidWithZero)) (Multiset.{u1} α) (Finsupp.add.{u1, 0} α Nat (AddMonoid.toAddZeroClass.{0} Nat Nat.addMonoid)) (Multiset.instAddMultiset.{u1} α)) (Finsupp.{u1, 0} α Nat (LinearOrderedCommMonoidWithZero.toZero.{0} Nat Nat.linearOrderedCommMonoidWithZero)) (Multiset.{u1} α) (Finsupp.addZeroClass.{u1, 0} α Nat (AddMonoid.toAddZeroClass.{0} Nat Nat.addMonoid)) (AddMonoid.toAddZeroClass.{u1} (Multiset.{u1} α) (AddRightCancelMonoid.toAddMonoid.{u1} (Multiset.{u1} α) (AddCancelMonoid.toAddRightCancelMonoid.{u1} (Multiset.{u1} α) (AddCancelCommMonoid.toAddCancelMonoid.{u1} (Multiset.{u1} α) (OrderedCancelAddCommMonoid.toCancelAddCommMonoid.{u1} (Multiset.{u1} α) (Multiset.instOrderedCancelAddCommMonoidMultiset.{u1} α)))))) (AddEquivClass.instAddMonoidHomClass.{u1, u1, u1} (AddEquiv.{u1, u1} (Finsupp.{u1, 0} α Nat (LinearOrderedCommMonoidWithZero.toZero.{0} Nat Nat.linearOrderedCommMonoidWithZero)) (Multiset.{u1} α) (Finsupp.add.{u1, 0} α Nat (AddMonoid.toAddZeroClass.{0} Nat Nat.addMonoid)) (Multiset.instAddMultiset.{u1} α)) (Finsupp.{u1, 0} α Nat (LinearOrderedCommMonoidWithZero.toZero.{0} Nat Nat.linearOrderedCommMonoidWithZero)) (Multiset.{u1} α) (Finsupp.addZeroClass.{u1, 0} α Nat (AddMonoid.toAddZeroClass.{0} Nat Nat.addMonoid)) (AddMonoid.toAddZeroClass.{u1} (Multiset.{u1} α) (AddRightCancelMonoid.toAddMonoid.{u1} (Multiset.{u1} α) (AddCancelMonoid.toAddRightCancelMonoid.{u1} (Multiset.{u1} α) (AddCancelCommMonoid.toAddCancelMonoid.{u1} (Multiset.{u1} α) (OrderedCancelAddCommMonoid.toCancelAddCommMonoid.{u1} (Multiset.{u1} α) (Multiset.instOrderedCancelAddCommMonoidMultiset.{u1} α)))))) (AddEquiv.instAddEquivClassAddEquiv.{u1, u1} (Finsupp.{u1, 0} α Nat (LinearOrderedCommMonoidWithZero.toZero.{0} Nat Nat.linearOrderedCommMonoidWithZero)) (Multiset.{u1} α) (Finsupp.add.{u1, 0} α Nat (AddMonoid.toAddZeroClass.{0} Nat Nat.addMonoid)) (Multiset.instAddMultiset.{u1} α))))) (Finsupp.toMultiset.{u1} α) f)) (FunLike.coe.{succ u1, succ u1, succ u1} (AddEquiv.{u1, u1} (Multiset.{u1} α) (Finsupp.{u1, 0} α Nat (LinearOrderedCommMonoidWithZero.toZero.{0} Nat Nat.linearOrderedCommMonoidWithZero)) (Multiset.instAddMultiset.{u1} α) (Finsupp.add.{u1, 0} α Nat (AddMonoid.toAddZeroClass.{0} Nat Nat.addMonoid))) (Multiset.{u1} α) (fun (_x : Multiset.{u1} α) => (fun ([email protected]._hyg.403 : Multiset.{u1} α) => Finsupp.{u1, 0} α Nat (LinearOrderedCommMonoidWithZero.toZero.{0} Nat Nat.linearOrderedCommMonoidWithZero)) _x) (AddHomClass.toFunLike.{u1, u1, u1} (AddEquiv.{u1, u1} (Multiset.{u1} α) (Finsupp.{u1, 0} α Nat (LinearOrderedCommMonoidWithZero.toZero.{0} Nat Nat.linearOrderedCommMonoidWithZero)) (Multiset.instAddMultiset.{u1} α) (Finsupp.add.{u1, 0} α Nat (AddMonoid.toAddZeroClass.{0} Nat Nat.addMonoid))) (Multiset.{u1} α) (Finsupp.{u1, 0} α Nat (LinearOrderedCommMonoidWithZero.toZero.{0} Nat Nat.linearOrderedCommMonoidWithZero)) (AddZeroClass.toAdd.{u1} (Multiset.{u1} α) (AddMonoid.toAddZeroClass.{u1} (Multiset.{u1} α) (AddRightCancelMonoid.toAddMonoid.{u1} (Multiset.{u1} α) (AddCancelMonoid.toAddRightCancelMonoid.{u1} (Multiset.{u1} α) (AddCancelCommMonoid.toAddCancelMonoid.{u1} (Multiset.{u1} α) (OrderedCancelAddCommMonoid.toCancelAddCommMonoid.{u1} (Multiset.{u1} α) (Multiset.instOrderedCancelAddCommMonoidMultiset.{u1} α))))))) (AddZeroClass.toAdd.{u1} (Finsupp.{u1, 0} α Nat (LinearOrderedCommMonoidWithZero.toZero.{0} Nat Nat.linearOrderedCommMonoidWithZero)) (Finsupp.addZeroClass.{u1, 0} α Nat (AddMonoid.toAddZeroClass.{0} Nat Nat.addMonoid))) (AddMonoidHomClass.toAddHomClass.{u1, u1, u1} (AddEquiv.{u1, u1} (Multiset.{u1} α) (Finsupp.{u1, 0} α Nat (LinearOrderedCommMonoidWithZero.toZero.{0} Nat Nat.linearOrderedCommMonoidWithZero)) (Multiset.instAddMultiset.{u1} α) (Finsupp.add.{u1, 0} α Nat (AddMonoid.toAddZeroClass.{0} Nat Nat.addMonoid))) (Multiset.{u1} α) (Finsupp.{u1, 0} α Nat (LinearOrderedCommMonoidWithZero.toZero.{0} Nat Nat.linearOrderedCommMonoidWithZero)) (AddMonoid.toAddZeroClass.{u1} (Multiset.{u1} α) (AddRightCancelMonoid.toAddMonoid.{u1} (Multiset.{u1} α) (AddCancelMonoid.toAddRightCancelMonoid.{u1} (Multiset.{u1} α) (AddCancelCommMonoid.toAddCancelMonoid.{u1} (Multiset.{u1} α) (OrderedCancelAddCommMonoid.toCancelAddCommMonoid.{u1} (Multiset.{u1} α) (Multiset.instOrderedCancelAddCommMonoidMultiset.{u1} α)))))) (Finsupp.addZeroClass.{u1, 0} α Nat (AddMonoid.toAddZeroClass.{0} Nat Nat.addMonoid)) (AddEquivClass.instAddMonoidHomClass.{u1, u1, u1} (AddEquiv.{u1, u1} (Multiset.{u1} α) (Finsupp.{u1, 0} α Nat (LinearOrderedCommMonoidWithZero.toZero.{0} Nat Nat.linearOrderedCommMonoidWithZero)) (Multiset.instAddMultiset.{u1} α) (Finsupp.add.{u1, 0} α Nat (AddMonoid.toAddZeroClass.{0} Nat Nat.addMonoid))) (Multiset.{u1} α) (Finsupp.{u1, 0} α Nat (LinearOrderedCommMonoidWithZero.toZero.{0} Nat Nat.linearOrderedCommMonoidWithZero)) (AddMonoid.toAddZeroClass.{u1} (Multiset.{u1} α) (AddRightCancelMonoid.toAddMonoid.{u1} (Multiset.{u1} α) (AddCancelMonoid.toAddRightCancelMonoid.{u1} (Multiset.{u1} α) (AddCancelCommMonoid.toAddCancelMonoid.{u1} (Multiset.{u1} α) (OrderedCancelAddCommMonoid.toCancelAddCommMonoid.{u1} (Multiset.{u1} α) (Multiset.instOrderedCancelAddCommMonoidMultiset.{u1} α)))))) (Finsupp.addZeroClass.{u1, 0} α Nat (AddMonoid.toAddZeroClass.{0} Nat Nat.addMonoid)) (AddEquiv.instAddEquivClassAddEquiv.{u1, u1} (Multiset.{u1} α) (Finsupp.{u1, 0} α Nat (LinearOrderedCommMonoidWithZero.toZero.{0} Nat Nat.linearOrderedCommMonoidWithZero)) (Multiset.instAddMultiset.{u1} α) (Finsupp.add.{u1, 0} α Nat (AddMonoid.toAddZeroClass.{0} Nat Nat.addMonoid)))))) (Multiset.toFinsupp.{u1} α) (FunLike.coe.{succ u1, succ u1, succ u1} (AddEquiv.{u1, u1} (Finsupp.{u1, 0} α Nat (LinearOrderedCommMonoidWithZero.toZero.{0} Nat Nat.linearOrderedCommMonoidWithZero)) (Multiset.{u1} α) (Finsupp.add.{u1, 0} α Nat (AddMonoid.toAddZeroClass.{0} Nat Nat.addMonoid)) (Multiset.instAddMultiset.{u1} α)) (Finsupp.{u1, 0} α Nat (LinearOrderedCommMonoidWithZero.toZero.{0} Nat Nat.linearOrderedCommMonoidWithZero)) (fun (_x : Finsupp.{u1, 0} α Nat (LinearOrderedCommMonoidWithZero.toZero.{0} Nat Nat.linearOrderedCommMonoidWithZero)) => (fun ([email protected]._hyg.403 : Finsupp.{u1, 0} α Nat (LinearOrderedCommMonoidWithZero.toZero.{0} Nat Nat.linearOrderedCommMonoidWithZero)) => Multiset.{u1} α) _x) (AddHomClass.toFunLike.{u1, u1, u1} (AddEquiv.{u1, u1} (Finsupp.{u1, 0} α Nat (LinearOrderedCommMonoidWithZero.toZero.{0} Nat Nat.linearOrderedCommMonoidWithZero)) (Multiset.{u1} α) (Finsupp.add.{u1, 0} α Nat (AddMonoid.toAddZeroClass.{0} Nat Nat.addMonoid)) (Multiset.instAddMultiset.{u1} α)) (Finsupp.{u1, 0} α Nat (LinearOrderedCommMonoidWithZero.toZero.{0} Nat Nat.linearOrderedCommMonoidWithZero)) (Multiset.{u1} α) (AddZeroClass.toAdd.{u1} (Finsupp.{u1, 0} α Nat (LinearOrderedCommMonoidWithZero.toZero.{0} Nat Nat.linearOrderedCommMonoidWithZero)) (Finsupp.addZeroClass.{u1, 0} α Nat (AddMonoid.toAddZeroClass.{0} Nat Nat.addMonoid))) (AddZeroClass.toAdd.{u1} (Multiset.{u1} α) (AddMonoid.toAddZeroClass.{u1} (Multiset.{u1} α) (AddRightCancelMonoid.toAddMonoid.{u1} (Multiset.{u1} α) (AddCancelMonoid.toAddRightCancelMonoid.{u1} (Multiset.{u1} α) (AddCancelCommMonoid.toAddCancelMonoid.{u1} (Multiset.{u1} α) (OrderedCancelAddCommMonoid.toCancelAddCommMonoid.{u1} (Multiset.{u1} α) (Multiset.instOrderedCancelAddCommMonoidMultiset.{u1} α))))))) (AddMonoidHomClass.toAddHomClass.{u1, u1, u1} (AddEquiv.{u1, u1} (Finsupp.{u1, 0} α Nat (LinearOrderedCommMonoidWithZero.toZero.{0} Nat Nat.linearOrderedCommMonoidWithZero)) (Multiset.{u1} α) (Finsupp.add.{u1, 0} α Nat (AddMonoid.toAddZeroClass.{0} Nat Nat.addMonoid)) (Multiset.instAddMultiset.{u1} α)) (Finsupp.{u1, 0} α Nat (LinearOrderedCommMonoidWithZero.toZero.{0} Nat Nat.linearOrderedCommMonoidWithZero)) (Multiset.{u1} α) (Finsupp.addZeroClass.{u1, 0} α Nat (AddMonoid.toAddZeroClass.{0} Nat Nat.addMonoid)) (AddMonoid.toAddZeroClass.{u1} (Multiset.{u1} α) (AddRightCancelMonoid.toAddMonoid.{u1} (Multiset.{u1} α) (AddCancelMonoid.toAddRightCancelMonoid.{u1} (Multiset.{u1} α) (AddCancelCommMonoid.toAddCancelMonoid.{u1} (Multiset.{u1} α) (OrderedCancelAddCommMonoid.toCancelAddCommMonoid.{u1} (Multiset.{u1} α) (Multiset.instOrderedCancelAddCommMonoidMultiset.{u1} α)))))) (AddEquivClass.instAddMonoidHomClass.{u1, u1, u1} (AddEquiv.{u1, u1} (Finsupp.{u1, 0} α Nat (LinearOrderedCommMonoidWithZero.toZero.{0} Nat Nat.linearOrderedCommMonoidWithZero)) (Multiset.{u1} α) (Finsupp.add.{u1, 0} α Nat (AddMonoid.toAddZeroClass.{0} Nat Nat.addMonoid)) (Multiset.instAddMultiset.{u1} α)) (Finsupp.{u1, 0} α Nat (LinearOrderedCommMonoidWithZero.toZero.{0} Nat Nat.linearOrderedCommMonoidWithZero)) (Multiset.{u1} α) (Finsupp.addZeroClass.{u1, 0} α Nat (AddMonoid.toAddZeroClass.{0} Nat Nat.addMonoid)) (AddMonoid.toAddZeroClass.{u1} (Multiset.{u1} α) (AddRightCancelMonoid.toAddMonoid.{u1} (Multiset.{u1} α) (AddCancelMonoid.toAddRightCancelMonoid.{u1} (Multiset.{u1} α) (AddCancelCommMonoid.toAddCancelMonoid.{u1} (Multiset.{u1} α) (OrderedCancelAddCommMonoid.toCancelAddCommMonoid.{u1} (Multiset.{u1} α) (Multiset.instOrderedCancelAddCommMonoidMultiset.{u1} α)))))) (AddEquiv.instAddEquivClassAddEquiv.{u1, u1} (Finsupp.{u1, 0} α Nat (LinearOrderedCommMonoidWithZero.toZero.{0} Nat Nat.linearOrderedCommMonoidWithZero)) (Multiset.{u1} α) (Finsupp.add.{u1, 0} α Nat (AddMonoid.toAddZeroClass.{0} Nat Nat.addMonoid)) (Multiset.instAddMultiset.{u1} α))))) (Finsupp.toMultiset.{u1} α) f)) f
Case conversion may be inaccurate. Consider using '#align finsupp.to_multiset_to_finsupp Finsupp.toMultiset_toFinsuppₓ'. -/
@[simp]
theorem Finsupp.toMultiset_toFinsupp (f : α →₀ ℕ) : f.toMultiset.toFinsupp = f :=
Finsupp.toMultiset.symm_apply_apply f
#align finsupp.to_multiset_to_finsupp Finsupp.toMultiset_toFinsupp
/-! ### As an order isomorphism -/
namespace Finsupp
/- warning: finsupp.order_iso_multiset -> Finsupp.orderIsoMultiset is a dubious translation:
lean 3 declaration is
forall {ι : Type.{u1}}, OrderIso.{u1, u1} (Finsupp.{u1, 0} ι Nat Nat.hasZero) (Multiset.{u1} ι) (Finsupp.hasLe.{u1, 0} ι Nat Nat.hasZero Nat.hasLe) (Preorder.toLE.{u1} (Multiset.{u1} ι) (PartialOrder.toPreorder.{u1} (Multiset.{u1} ι) (Multiset.partialOrder.{u1} ι)))
but is expected to have type
forall {ι : Type.{u1}}, OrderIso.{u1, u1} (Finsupp.{u1, 0} ι Nat (LinearOrderedCommMonoidWithZero.toZero.{0} Nat Nat.linearOrderedCommMonoidWithZero)) (Multiset.{u1} ι) (Finsupp.instLEFinsupp.{u1, 0} ι Nat (LinearOrderedCommMonoidWithZero.toZero.{0} Nat Nat.linearOrderedCommMonoidWithZero) instLENat) (Preorder.toLE.{u1} (Multiset.{u1} ι) (PartialOrder.toPreorder.{u1} (Multiset.{u1} ι) (Multiset.instPartialOrderMultiset.{u1} ι)))
Case conversion may be inaccurate. Consider using '#align finsupp.order_iso_multiset Finsupp.orderIsoMultisetₓ'. -/
/-- `finsupp.to_multiset` as an order isomorphism. -/
def orderIsoMultiset : (ι →₀ ℕ) ≃o Multiset ι
where
toEquiv := toMultiset.toEquiv
map_rel_iff' f g := by simp [Multiset.le_iff_count, le_def]
#align finsupp.order_iso_multiset Finsupp.orderIsoMultiset
/- warning: finsupp.coe_order_iso_multiset -> Finsupp.coe_orderIsoMultiset is a dubious translation:
lean 3 declaration is
forall {ι : Type.{u1}}, Eq.{succ u1} ((Finsupp.{u1, 0} ι Nat Nat.hasZero) -> (Multiset.{u1} ι)) (coeFn.{succ u1, succ u1} (OrderIso.{u1, u1} (Finsupp.{u1, 0} ι Nat Nat.hasZero) (Multiset.{u1} ι) (Finsupp.hasLe.{u1, 0} ι Nat Nat.hasZero Nat.hasLe) (Preorder.toLE.{u1} (Multiset.{u1} ι) (PartialOrder.toPreorder.{u1} (Multiset.{u1} ι) (Multiset.partialOrder.{u1} ι)))) (fun (_x : RelIso.{u1, u1} (Finsupp.{u1, 0} ι Nat Nat.hasZero) (Multiset.{u1} ι) (LE.le.{u1} (Finsupp.{u1, 0} ι Nat Nat.hasZero) (Finsupp.hasLe.{u1, 0} ι Nat Nat.hasZero Nat.hasLe)) (LE.le.{u1} (Multiset.{u1} ι) (Preorder.toLE.{u1} (Multiset.{u1} ι) (PartialOrder.toPreorder.{u1} (Multiset.{u1} ι) (Multiset.partialOrder.{u1} ι))))) => (Finsupp.{u1, 0} ι Nat Nat.hasZero) -> (Multiset.{u1} ι)) (RelIso.hasCoeToFun.{u1, u1} (Finsupp.{u1, 0} ι Nat Nat.hasZero) (Multiset.{u1} ι) (LE.le.{u1} (Finsupp.{u1, 0} ι Nat Nat.hasZero) (Finsupp.hasLe.{u1, 0} ι Nat Nat.hasZero Nat.hasLe)) (LE.le.{u1} (Multiset.{u1} ι) (Preorder.toLE.{u1} (Multiset.{u1} ι) (PartialOrder.toPreorder.{u1} (Multiset.{u1} ι) (Multiset.partialOrder.{u1} ι))))) (Finsupp.orderIsoMultiset.{u1} ι)) (coeFn.{succ u1, succ u1} (AddEquiv.{u1, u1} (Finsupp.{u1, 0} ι Nat Nat.hasZero) (Multiset.{u1} ι) (Finsupp.add.{u1, 0} ι Nat (AddMonoid.toAddZeroClass.{0} Nat Nat.addMonoid)) (Multiset.hasAdd.{u1} ι)) (fun (_x : AddEquiv.{u1, u1} (Finsupp.{u1, 0} ι Nat Nat.hasZero) (Multiset.{u1} ι) (Finsupp.add.{u1, 0} ι Nat (AddMonoid.toAddZeroClass.{0} Nat Nat.addMonoid)) (Multiset.hasAdd.{u1} ι)) => (Finsupp.{u1, 0} ι Nat Nat.hasZero) -> (Multiset.{u1} ι)) (AddEquiv.hasCoeToFun.{u1, u1} (Finsupp.{u1, 0} ι Nat Nat.hasZero) (Multiset.{u1} ι) (Finsupp.add.{u1, 0} ι Nat (AddMonoid.toAddZeroClass.{0} Nat Nat.addMonoid)) (Multiset.hasAdd.{u1} ι)) (Finsupp.toMultiset.{u1} ι))
but is expected to have type
forall {ι : Type.{u1}}, Eq.{succ u1} (forall (ᾰ : Finsupp.{u1, 0} ι Nat (LinearOrderedCommMonoidWithZero.toZero.{0} Nat Nat.linearOrderedCommMonoidWithZero)), (fun ([email protected]._hyg.19 : Finsupp.{u1, 0} ι Nat (LinearOrderedCommMonoidWithZero.toZero.{0} Nat Nat.linearOrderedCommMonoidWithZero)) => Multiset.{u1} ι) ᾰ) (FunLike.coe.{succ u1, succ u1, succ u1} (Function.Embedding.{succ u1, succ u1} (Finsupp.{u1, 0} ι Nat (LinearOrderedCommMonoidWithZero.toZero.{0} Nat Nat.linearOrderedCommMonoidWithZero)) (Multiset.{u1} ι)) (Finsupp.{u1, 0} ι Nat (LinearOrderedCommMonoidWithZero.toZero.{0} Nat Nat.linearOrderedCommMonoidWithZero)) (fun (_x : Finsupp.{u1, 0} ι Nat (LinearOrderedCommMonoidWithZero.toZero.{0} Nat Nat.linearOrderedCommMonoidWithZero)) => (fun ([email protected]._hyg.19 : Finsupp.{u1, 0} ι Nat (LinearOrderedCommMonoidWithZero.toZero.{0} Nat Nat.linearOrderedCommMonoidWithZero)) => Multiset.{u1} ι) _x) (EmbeddingLike.toFunLike.{succ u1, succ u1, succ u1} (Function.Embedding.{succ u1, succ u1} (Finsupp.{u1, 0} ι Nat (LinearOrderedCommMonoidWithZero.toZero.{0} Nat Nat.linearOrderedCommMonoidWithZero)) (Multiset.{u1} ι)) (Finsupp.{u1, 0} ι Nat (LinearOrderedCommMonoidWithZero.toZero.{0} Nat Nat.linearOrderedCommMonoidWithZero)) (Multiset.{u1} ι) (Function.instEmbeddingLikeEmbedding.{succ u1, succ u1} (Finsupp.{u1, 0} ι Nat (LinearOrderedCommMonoidWithZero.toZero.{0} Nat Nat.linearOrderedCommMonoidWithZero)) (Multiset.{u1} ι))) (RelEmbedding.toEmbedding.{u1, u1} (Finsupp.{u1, 0} ι Nat (LinearOrderedCommMonoidWithZero.toZero.{0} Nat Nat.linearOrderedCommMonoidWithZero)) (Multiset.{u1} ι) (fun ([email protected]._hyg.1281 : Finsupp.{u1, 0} ι Nat (LinearOrderedCommMonoidWithZero.toZero.{0} Nat Nat.linearOrderedCommMonoidWithZero)) ([email protected]._hyg.1283 : Finsupp.{u1, 0} ι Nat (LinearOrderedCommMonoidWithZero.toZero.{0} Nat Nat.linearOrderedCommMonoidWithZero)) => LE.le.{u1} (Finsupp.{u1, 0} ι Nat (LinearOrderedCommMonoidWithZero.toZero.{0} Nat Nat.linearOrderedCommMonoidWithZero)) (Finsupp.instLEFinsupp.{u1, 0} ι Nat (LinearOrderedCommMonoidWithZero.toZero.{0} Nat Nat.linearOrderedCommMonoidWithZero) instLENat) [email protected]._hyg.1281 [email protected]._hyg.1283) (fun ([email protected]._hyg.1296 : Multiset.{u1} ι) ([email protected]._hyg.1298 : Multiset.{u1} ι) => LE.le.{u1} (Multiset.{u1} ι) (Preorder.toLE.{u1} (Multiset.{u1} ι) (PartialOrder.toPreorder.{u1} (Multiset.{u1} ι) (Multiset.instPartialOrderMultiset.{u1} ι))) [email protected]._hyg.1296 [email protected]._hyg.1298) (RelIso.toRelEmbedding.{u1, u1} (Finsupp.{u1, 0} ι Nat (LinearOrderedCommMonoidWithZero.toZero.{0} Nat Nat.linearOrderedCommMonoidWithZero)) (Multiset.{u1} ι) (fun ([email protected]._hyg.1281 : Finsupp.{u1, 0} ι Nat (LinearOrderedCommMonoidWithZero.toZero.{0} Nat Nat.linearOrderedCommMonoidWithZero)) ([email protected]._hyg.1283 : Finsupp.{u1, 0} ι Nat (LinearOrderedCommMonoidWithZero.toZero.{0} Nat Nat.linearOrderedCommMonoidWithZero)) => LE.le.{u1} (Finsupp.{u1, 0} ι Nat (LinearOrderedCommMonoidWithZero.toZero.{0} Nat Nat.linearOrderedCommMonoidWithZero)) (Finsupp.instLEFinsupp.{u1, 0} ι Nat (LinearOrderedCommMonoidWithZero.toZero.{0} Nat Nat.linearOrderedCommMonoidWithZero) instLENat) [email protected]._hyg.1281 [email protected]._hyg.1283) (fun ([email protected]._hyg.1296 : Multiset.{u1} ι) ([email protected]._hyg.1298 : Multiset.{u1} ι) => LE.le.{u1} (Multiset.{u1} ι) (Preorder.toLE.{u1} (Multiset.{u1} ι) (PartialOrder.toPreorder.{u1} (Multiset.{u1} ι) (Multiset.instPartialOrderMultiset.{u1} ι))) [email protected]._hyg.1296 [email protected]._hyg.1298) (Finsupp.orderIsoMultiset.{u1} ι)))) (FunLike.coe.{succ u1, succ u1, succ u1} (AddEquiv.{u1, u1} (Finsupp.{u1, 0} ι Nat (LinearOrderedCommMonoidWithZero.toZero.{0} Nat Nat.linearOrderedCommMonoidWithZero)) (Multiset.{u1} ι) (Finsupp.add.{u1, 0} ι Nat (AddMonoid.toAddZeroClass.{0} Nat Nat.addMonoid)) (Multiset.instAddMultiset.{u1} ι)) (Finsupp.{u1, 0} ι Nat (LinearOrderedCommMonoidWithZero.toZero.{0} Nat Nat.linearOrderedCommMonoidWithZero)) (fun (_x : Finsupp.{u1, 0} ι Nat (LinearOrderedCommMonoidWithZero.toZero.{0} Nat Nat.linearOrderedCommMonoidWithZero)) => (fun ([email protected]._hyg.403 : Finsupp.{u1, 0} ι Nat (LinearOrderedCommMonoidWithZero.toZero.{0} Nat Nat.linearOrderedCommMonoidWithZero)) => Multiset.{u1} ι) _x) (AddHomClass.toFunLike.{u1, u1, u1} (AddEquiv.{u1, u1} (Finsupp.{u1, 0} ι Nat (LinearOrderedCommMonoidWithZero.toZero.{0} Nat Nat.linearOrderedCommMonoidWithZero)) (Multiset.{u1} ι) (Finsupp.add.{u1, 0} ι Nat (AddMonoid.toAddZeroClass.{0} Nat Nat.addMonoid)) (Multiset.instAddMultiset.{u1} ι)) (Finsupp.{u1, 0} ι Nat (LinearOrderedCommMonoidWithZero.toZero.{0} Nat Nat.linearOrderedCommMonoidWithZero)) (Multiset.{u1} ι) (AddZeroClass.toAdd.{u1} (Finsupp.{u1, 0} ι Nat (LinearOrderedCommMonoidWithZero.toZero.{0} Nat Nat.linearOrderedCommMonoidWithZero)) (Finsupp.addZeroClass.{u1, 0} ι Nat (AddMonoid.toAddZeroClass.{0} Nat Nat.addMonoid))) (AddZeroClass.toAdd.{u1} (Multiset.{u1} ι) (AddMonoid.toAddZeroClass.{u1} (Multiset.{u1} ι) (AddRightCancelMonoid.toAddMonoid.{u1} (Multiset.{u1} ι) (AddCancelMonoid.toAddRightCancelMonoid.{u1} (Multiset.{u1} ι) (AddCancelCommMonoid.toAddCancelMonoid.{u1} (Multiset.{u1} ι) (OrderedCancelAddCommMonoid.toCancelAddCommMonoid.{u1} (Multiset.{u1} ι) (Multiset.instOrderedCancelAddCommMonoidMultiset.{u1} ι))))))) (AddMonoidHomClass.toAddHomClass.{u1, u1, u1} (AddEquiv.{u1, u1} (Finsupp.{u1, 0} ι Nat (LinearOrderedCommMonoidWithZero.toZero.{0} Nat Nat.linearOrderedCommMonoidWithZero)) (Multiset.{u1} ι) (Finsupp.add.{u1, 0} ι Nat (AddMonoid.toAddZeroClass.{0} Nat Nat.addMonoid)) (Multiset.instAddMultiset.{u1} ι)) (Finsupp.{u1, 0} ι Nat (LinearOrderedCommMonoidWithZero.toZero.{0} Nat Nat.linearOrderedCommMonoidWithZero)) (Multiset.{u1} ι) (Finsupp.addZeroClass.{u1, 0} ι Nat (AddMonoid.toAddZeroClass.{0} Nat Nat.addMonoid)) (AddMonoid.toAddZeroClass.{u1} (Multiset.{u1} ι) (AddRightCancelMonoid.toAddMonoid.{u1} (Multiset.{u1} ι) (AddCancelMonoid.toAddRightCancelMonoid.{u1} (Multiset.{u1} ι) (AddCancelCommMonoid.toAddCancelMonoid.{u1} (Multiset.{u1} ι) (OrderedCancelAddCommMonoid.toCancelAddCommMonoid.{u1} (Multiset.{u1} ι) (Multiset.instOrderedCancelAddCommMonoidMultiset.{u1} ι)))))) (AddEquivClass.instAddMonoidHomClass.{u1, u1, u1} (AddEquiv.{u1, u1} (Finsupp.{u1, 0} ι Nat (LinearOrderedCommMonoidWithZero.toZero.{0} Nat Nat.linearOrderedCommMonoidWithZero)) (Multiset.{u1} ι) (Finsupp.add.{u1, 0} ι Nat (AddMonoid.toAddZeroClass.{0} Nat Nat.addMonoid)) (Multiset.instAddMultiset.{u1} ι)) (Finsupp.{u1, 0} ι Nat (LinearOrderedCommMonoidWithZero.toZero.{0} Nat Nat.linearOrderedCommMonoidWithZero)) (Multiset.{u1} ι) (Finsupp.addZeroClass.{u1, 0} ι Nat (AddMonoid.toAddZeroClass.{0} Nat Nat.addMonoid)) (AddMonoid.toAddZeroClass.{u1} (Multiset.{u1} ι) (AddRightCancelMonoid.toAddMonoid.{u1} (Multiset.{u1} ι) (AddCancelMonoid.toAddRightCancelMonoid.{u1} (Multiset.{u1} ι) (AddCancelCommMonoid.toAddCancelMonoid.{u1} (Multiset.{u1} ι) (OrderedCancelAddCommMonoid.toCancelAddCommMonoid.{u1} (Multiset.{u1} ι) (Multiset.instOrderedCancelAddCommMonoidMultiset.{u1} ι)))))) (AddEquiv.instAddEquivClassAddEquiv.{u1, u1} (Finsupp.{u1, 0} ι Nat (LinearOrderedCommMonoidWithZero.toZero.{0} Nat Nat.linearOrderedCommMonoidWithZero)) (Multiset.{u1} ι) (Finsupp.add.{u1, 0} ι Nat (AddMonoid.toAddZeroClass.{0} Nat Nat.addMonoid)) (Multiset.instAddMultiset.{u1} ι))))) (Finsupp.toMultiset.{u1} ι))
Case conversion may be inaccurate. Consider using '#align finsupp.coe_order_iso_multiset Finsupp.coe_orderIsoMultisetₓ'. -/
@[simp]
theorem coe_orderIsoMultiset : ⇑(@orderIsoMultiset ι) = toMultiset :=
rfl
#align finsupp.coe_order_iso_multiset Finsupp.coe_orderIsoMultiset
/- warning: finsupp.coe_order_iso_multiset_symm -> Finsupp.coe_orderIsoMultiset_symm is a dubious translation:
lean 3 declaration is
forall {ι : Type.{u1}}, Eq.{succ u1} ((Multiset.{u1} ι) -> (Finsupp.{u1, 0} ι Nat Nat.hasZero)) (coeFn.{succ u1, succ u1} (OrderIso.{u1, u1} (Multiset.{u1} ι) (Finsupp.{u1, 0} ι Nat Nat.hasZero) (Preorder.toLE.{u1} (Multiset.{u1} ι) (PartialOrder.toPreorder.{u1} (Multiset.{u1} ι) (Multiset.partialOrder.{u1} ι))) (Finsupp.hasLe.{u1, 0} ι Nat Nat.hasZero Nat.hasLe)) (fun (_x : RelIso.{u1, u1} (Multiset.{u1} ι) (Finsupp.{u1, 0} ι Nat Nat.hasZero) (LE.le.{u1} (Multiset.{u1} ι) (Preorder.toLE.{u1} (Multiset.{u1} ι) (PartialOrder.toPreorder.{u1} (Multiset.{u1} ι) (Multiset.partialOrder.{u1} ι)))) (LE.le.{u1} (Finsupp.{u1, 0} ι Nat Nat.hasZero) (Finsupp.hasLe.{u1, 0} ι Nat Nat.hasZero Nat.hasLe))) => (Multiset.{u1} ι) -> (Finsupp.{u1, 0} ι Nat Nat.hasZero)) (RelIso.hasCoeToFun.{u1, u1} (Multiset.{u1} ι) (Finsupp.{u1, 0} ι Nat Nat.hasZero) (LE.le.{u1} (Multiset.{u1} ι) (Preorder.toLE.{u1} (Multiset.{u1} ι) (PartialOrder.toPreorder.{u1} (Multiset.{u1} ι) (Multiset.partialOrder.{u1} ι)))) (LE.le.{u1} (Finsupp.{u1, 0} ι Nat Nat.hasZero) (Finsupp.hasLe.{u1, 0} ι Nat Nat.hasZero Nat.hasLe))) (OrderIso.symm.{u1, u1} (Finsupp.{u1, 0} ι Nat Nat.hasZero) (Multiset.{u1} ι) (Finsupp.hasLe.{u1, 0} ι Nat Nat.hasZero Nat.hasLe) (Preorder.toLE.{u1} (Multiset.{u1} ι) (PartialOrder.toPreorder.{u1} (Multiset.{u1} ι) (Multiset.partialOrder.{u1} ι))) (Finsupp.orderIsoMultiset.{u1} ι))) (coeFn.{succ u1, succ u1} (AddEquiv.{u1, u1} (Multiset.{u1} ι) (Finsupp.{u1, 0} ι Nat Nat.hasZero) (Multiset.hasAdd.{u1} ι) (Finsupp.add.{u1, 0} ι Nat (AddMonoid.toAddZeroClass.{0} Nat Nat.addMonoid))) (fun (_x : AddEquiv.{u1, u1} (Multiset.{u1} ι) (Finsupp.{u1, 0} ι Nat Nat.hasZero) (Multiset.hasAdd.{u1} ι) (Finsupp.add.{u1, 0} ι Nat (AddMonoid.toAddZeroClass.{0} Nat Nat.addMonoid))) => (Multiset.{u1} ι) -> (Finsupp.{u1, 0} ι Nat Nat.hasZero)) (AddEquiv.hasCoeToFun.{u1, u1} (Multiset.{u1} ι) (Finsupp.{u1, 0} ι Nat Nat.hasZero) (Multiset.hasAdd.{u1} ι) (Finsupp.add.{u1, 0} ι Nat (AddMonoid.toAddZeroClass.{0} Nat Nat.addMonoid))) (Multiset.toFinsupp.{u1} ι))
but is expected to have type
forall {ι : Type.{u1}}, Eq.{succ u1} (forall (ᾰ : Multiset.{u1} ι), (fun ([email protected]._hyg.19 : Multiset.{u1} ι) => Finsupp.{u1, 0} ι Nat (LinearOrderedCommMonoidWithZero.toZero.{0} Nat Nat.linearOrderedCommMonoidWithZero)) ᾰ) (FunLike.coe.{succ u1, succ u1, succ u1} (Function.Embedding.{succ u1, succ u1} (Multiset.{u1} ι) (Finsupp.{u1, 0} ι Nat (LinearOrderedCommMonoidWithZero.toZero.{0} Nat Nat.linearOrderedCommMonoidWithZero))) (Multiset.{u1} ι) (fun (_x : Multiset.{u1} ι) => (fun ([email protected]._hyg.19 : Multiset.{u1} ι) => Finsupp.{u1, 0} ι Nat (LinearOrderedCommMonoidWithZero.toZero.{0} Nat Nat.linearOrderedCommMonoidWithZero)) _x) (EmbeddingLike.toFunLike.{succ u1, succ u1, succ u1} (Function.Embedding.{succ u1, succ u1} (Multiset.{u1} ι) (Finsupp.{u1, 0} ι Nat (LinearOrderedCommMonoidWithZero.toZero.{0} Nat Nat.linearOrderedCommMonoidWithZero))) (Multiset.{u1} ι) (Finsupp.{u1, 0} ι Nat (LinearOrderedCommMonoidWithZero.toZero.{0} Nat Nat.linearOrderedCommMonoidWithZero)) (Function.instEmbeddingLikeEmbedding.{succ u1, succ u1} (Multiset.{u1} ι) (Finsupp.{u1, 0} ι Nat (LinearOrderedCommMonoidWithZero.toZero.{0} Nat Nat.linearOrderedCommMonoidWithZero)))) (RelEmbedding.toEmbedding.{u1, u1} (Multiset.{u1} ι) (Finsupp.{u1, 0} ι Nat (LinearOrderedCommMonoidWithZero.toZero.{0} Nat Nat.linearOrderedCommMonoidWithZero)) (fun ([email protected]._hyg.1281 : Multiset.{u1} ι) ([email protected]._hyg.1283 : Multiset.{u1} ι) => LE.le.{u1} (Multiset.{u1} ι) (Preorder.toLE.{u1} (Multiset.{u1} ι) (PartialOrder.toPreorder.{u1} (Multiset.{u1} ι) (Multiset.instPartialOrderMultiset.{u1} ι))) [email protected]._hyg.1281 [email protected]._hyg.1283) (fun ([email protected]._hyg.1296 : Finsupp.{u1, 0} ι Nat (LinearOrderedCommMonoidWithZero.toZero.{0} Nat Nat.linearOrderedCommMonoidWithZero)) ([email protected]._hyg.1298 : Finsupp.{u1, 0} ι Nat (LinearOrderedCommMonoidWithZero.toZero.{0} Nat Nat.linearOrderedCommMonoidWithZero)) => LE.le.{u1} (Finsupp.{u1, 0} ι Nat (LinearOrderedCommMonoidWithZero.toZero.{0} Nat Nat.linearOrderedCommMonoidWithZero)) (Finsupp.instLEFinsupp.{u1, 0} ι Nat (LinearOrderedCommMonoidWithZero.toZero.{0} Nat Nat.linearOrderedCommMonoidWithZero) instLENat) [email protected]._hyg.1296 [email protected]._hyg.1298) (RelIso.toRelEmbedding.{u1, u1} (Multiset.{u1} ι) (Finsupp.{u1, 0} ι Nat (LinearOrderedCommMonoidWithZero.toZero.{0} Nat Nat.linearOrderedCommMonoidWithZero)) (fun ([email protected]._hyg.1281 : Multiset.{u1} ι) ([email protected]._hyg.1283 : Multiset.{u1} ι) => LE.le.{u1} (Multiset.{u1} ι) (Preorder.toLE.{u1} (Multiset.{u1} ι) (PartialOrder.toPreorder.{u1} (Multiset.{u1} ι) (Multiset.instPartialOrderMultiset.{u1} ι))) [email protected]._hyg.1281 [email protected]._hyg.1283) (fun ([email protected]._hyg.1296 : Finsupp.{u1, 0} ι Nat (LinearOrderedCommMonoidWithZero.toZero.{0} Nat Nat.linearOrderedCommMonoidWithZero)) ([email protected]._hyg.1298 : Finsupp.{u1, 0} ι Nat (LinearOrderedCommMonoidWithZero.toZero.{0} Nat Nat.linearOrderedCommMonoidWithZero)) => LE.le.{u1} (Finsupp.{u1, 0} ι Nat (LinearOrderedCommMonoidWithZero.toZero.{0} Nat Nat.linearOrderedCommMonoidWithZero)) (Finsupp.instLEFinsupp.{u1, 0} ι Nat (LinearOrderedCommMonoidWithZero.toZero.{0} Nat Nat.linearOrderedCommMonoidWithZero) instLENat) [email protected]._hyg.1296 [email protected]._hyg.1298) (OrderIso.symm.{u1, u1} (Finsupp.{u1, 0} ι Nat (LinearOrderedCommMonoidWithZero.toZero.{0} Nat Nat.linearOrderedCommMonoidWithZero)) (Multiset.{u1} ι) (Finsupp.instLEFinsupp.{u1, 0} ι Nat (LinearOrderedCommMonoidWithZero.toZero.{0} Nat Nat.linearOrderedCommMonoidWithZero) instLENat) (Preorder.toLE.{u1} (Multiset.{u1} ι) (PartialOrder.toPreorder.{u1} (Multiset.{u1} ι) (Multiset.instPartialOrderMultiset.{u1} ι))) (Finsupp.orderIsoMultiset.{u1} ι))))) (FunLike.coe.{succ u1, succ u1, succ u1} (AddEquiv.{u1, u1} (Multiset.{u1} ι) (Finsupp.{u1, 0} ι Nat (LinearOrderedCommMonoidWithZero.toZero.{0} Nat Nat.linearOrderedCommMonoidWithZero)) (Multiset.instAddMultiset.{u1} ι) (Finsupp.add.{u1, 0} ι Nat (AddMonoid.toAddZeroClass.{0} Nat Nat.addMonoid))) (Multiset.{u1} ι) (fun (_x : Multiset.{u1} ι) => (fun ([email protected]._hyg.403 : Multiset.{u1} ι) => Finsupp.{u1, 0} ι Nat (LinearOrderedCommMonoidWithZero.toZero.{0} Nat Nat.linearOrderedCommMonoidWithZero)) _x) (AddHomClass.toFunLike.{u1, u1, u1} (AddEquiv.{u1, u1} (Multiset.{u1} ι) (Finsupp.{u1, 0} ι Nat (LinearOrderedCommMonoidWithZero.toZero.{0} Nat Nat.linearOrderedCommMonoidWithZero)) (Multiset.instAddMultiset.{u1} ι) (Finsupp.add.{u1, 0} ι Nat (AddMonoid.toAddZeroClass.{0} Nat Nat.addMonoid))) (Multiset.{u1} ι) (Finsupp.{u1, 0} ι Nat (LinearOrderedCommMonoidWithZero.toZero.{0} Nat Nat.linearOrderedCommMonoidWithZero)) (AddZeroClass.toAdd.{u1} (Multiset.{u1} ι) (AddMonoid.toAddZeroClass.{u1} (Multiset.{u1} ι) (AddRightCancelMonoid.toAddMonoid.{u1} (Multiset.{u1} ι) (AddCancelMonoid.toAddRightCancelMonoid.{u1} (Multiset.{u1} ι) (AddCancelCommMonoid.toAddCancelMonoid.{u1} (Multiset.{u1} ι) (OrderedCancelAddCommMonoid.toCancelAddCommMonoid.{u1} (Multiset.{u1} ι) (Multiset.instOrderedCancelAddCommMonoidMultiset.{u1} ι))))))) (AddZeroClass.toAdd.{u1} (Finsupp.{u1, 0} ι Nat (LinearOrderedCommMonoidWithZero.toZero.{0} Nat Nat.linearOrderedCommMonoidWithZero)) (Finsupp.addZeroClass.{u1, 0} ι Nat (AddMonoid.toAddZeroClass.{0} Nat Nat.addMonoid))) (AddMonoidHomClass.toAddHomClass.{u1, u1, u1} (AddEquiv.{u1, u1} (Multiset.{u1} ι) (Finsupp.{u1, 0} ι Nat (LinearOrderedCommMonoidWithZero.toZero.{0} Nat Nat.linearOrderedCommMonoidWithZero)) (Multiset.instAddMultiset.{u1} ι) (Finsupp.add.{u1, 0} ι Nat (AddMonoid.toAddZeroClass.{0} Nat Nat.addMonoid))) (Multiset.{u1} ι) (Finsupp.{u1, 0} ι Nat (LinearOrderedCommMonoidWithZero.toZero.{0} Nat Nat.linearOrderedCommMonoidWithZero)) (AddMonoid.toAddZeroClass.{u1} (Multiset.{u1} ι) (AddRightCancelMonoid.toAddMonoid.{u1} (Multiset.{u1} ι) (AddCancelMonoid.toAddRightCancelMonoid.{u1} (Multiset.{u1} ι) (AddCancelCommMonoid.toAddCancelMonoid.{u1} (Multiset.{u1} ι) (OrderedCancelAddCommMonoid.toCancelAddCommMonoid.{u1} (Multiset.{u1} ι) (Multiset.instOrderedCancelAddCommMonoidMultiset.{u1} ι)))))) (Finsupp.addZeroClass.{u1, 0} ι Nat (AddMonoid.toAddZeroClass.{0} Nat Nat.addMonoid)) (AddEquivClass.instAddMonoidHomClass.{u1, u1, u1} (AddEquiv.{u1, u1} (Multiset.{u1} ι) (Finsupp.{u1, 0} ι Nat (LinearOrderedCommMonoidWithZero.toZero.{0} Nat Nat.linearOrderedCommMonoidWithZero)) (Multiset.instAddMultiset.{u1} ι) (Finsupp.add.{u1, 0} ι Nat (AddMonoid.toAddZeroClass.{0} Nat Nat.addMonoid))) (Multiset.{u1} ι) (Finsupp.{u1, 0} ι Nat (LinearOrderedCommMonoidWithZero.toZero.{0} Nat Nat.linearOrderedCommMonoidWithZero)) (AddMonoid.toAddZeroClass.{u1} (Multiset.{u1} ι) (AddRightCancelMonoid.toAddMonoid.{u1} (Multiset.{u1} ι) (AddCancelMonoid.toAddRightCancelMonoid.{u1} (Multiset.{u1} ι) (AddCancelCommMonoid.toAddCancelMonoid.{u1} (Multiset.{u1} ι) (OrderedCancelAddCommMonoid.toCancelAddCommMonoid.{u1} (Multiset.{u1} ι) (Multiset.instOrderedCancelAddCommMonoidMultiset.{u1} ι)))))) (Finsupp.addZeroClass.{u1, 0} ι Nat (AddMonoid.toAddZeroClass.{0} Nat Nat.addMonoid)) (AddEquiv.instAddEquivClassAddEquiv.{u1, u1} (Multiset.{u1} ι) (Finsupp.{u1, 0} ι Nat (LinearOrderedCommMonoidWithZero.toZero.{0} Nat Nat.linearOrderedCommMonoidWithZero)) (Multiset.instAddMultiset.{u1} ι) (Finsupp.add.{u1, 0} ι Nat (AddMonoid.toAddZeroClass.{0} Nat Nat.addMonoid)))))) (Multiset.toFinsupp.{u1} ι))
Case conversion may be inaccurate. Consider using '#align finsupp.coe_order_iso_multiset_symm Finsupp.coe_orderIsoMultiset_symmₓ'. -/
@[simp]
theorem coe_orderIsoMultiset_symm : ⇑(@orderIsoMultiset ι).symm = Multiset.toFinsupp :=
rfl
#align finsupp.coe_order_iso_multiset_symm Finsupp.coe_orderIsoMultiset_symm
/- warning: finsupp.to_multiset_strict_mono -> Finsupp.toMultiset_strictMono is a dubious translation:
lean 3 declaration is
forall {ι : Type.{u1}}, StrictMono.{u1, u1} (Finsupp.{u1, 0} ι Nat Nat.hasZero) (Multiset.{u1} ι) (Finsupp.preorder.{u1, 0} ι Nat Nat.hasZero (PartialOrder.toPreorder.{0} Nat (OrderedCancelAddCommMonoid.toPartialOrder.{0} Nat (StrictOrderedSemiring.toOrderedCancelAddCommMonoid.{0} Nat Nat.strictOrderedSemiring)))) (PartialOrder.toPreorder.{u1} (Multiset.{u1} ι) (Multiset.partialOrder.{u1} ι)) (coeFn.{succ u1, succ u1} (AddEquiv.{u1, u1} (Finsupp.{u1, 0} ι Nat Nat.hasZero) (Multiset.{u1} ι) (Finsupp.add.{u1, 0} ι Nat (AddMonoid.toAddZeroClass.{0} Nat Nat.addMonoid)) (Multiset.hasAdd.{u1} ι)) (fun (_x : AddEquiv.{u1, u1} (Finsupp.{u1, 0} ι Nat Nat.hasZero) (Multiset.{u1} ι) (Finsupp.add.{u1, 0} ι Nat (AddMonoid.toAddZeroClass.{0} Nat Nat.addMonoid)) (Multiset.hasAdd.{u1} ι)) => (Finsupp.{u1, 0} ι Nat Nat.hasZero) -> (Multiset.{u1} ι)) (AddEquiv.hasCoeToFun.{u1, u1} (Finsupp.{u1, 0} ι Nat Nat.hasZero) (Multiset.{u1} ι) (Finsupp.add.{u1, 0} ι Nat (AddMonoid.toAddZeroClass.{0} Nat Nat.addMonoid)) (Multiset.hasAdd.{u1} ι)) (Finsupp.toMultiset.{u1} ι))
but is expected to have type
forall {ι : Type.{u1}}, StrictMono.{u1, u1} (Finsupp.{u1, 0} ι Nat (LinearOrderedCommMonoidWithZero.toZero.{0} Nat Nat.linearOrderedCommMonoidWithZero)) (Multiset.{u1} ι) (Finsupp.preorder.{u1, 0} ι Nat (LinearOrderedCommMonoidWithZero.toZero.{0} Nat Nat.linearOrderedCommMonoidWithZero) (PartialOrder.toPreorder.{0} Nat (StrictOrderedSemiring.toPartialOrder.{0} Nat Nat.strictOrderedSemiring))) (PartialOrder.toPreorder.{u1} (Multiset.{u1} ι) (Multiset.instPartialOrderMultiset.{u1} ι)) (FunLike.coe.{succ u1, succ u1, succ u1} (AddEquiv.{u1, u1} (Finsupp.{u1, 0} ι Nat (LinearOrderedCommMonoidWithZero.toZero.{0} Nat Nat.linearOrderedCommMonoidWithZero)) (Multiset.{u1} ι) (Finsupp.add.{u1, 0} ι Nat (AddMonoid.toAddZeroClass.{0} Nat Nat.addMonoid)) (Multiset.instAddMultiset.{u1} ι)) (Finsupp.{u1, 0} ι Nat (LinearOrderedCommMonoidWithZero.toZero.{0} Nat Nat.linearOrderedCommMonoidWithZero)) (fun (_x : Finsupp.{u1, 0} ι Nat (LinearOrderedCommMonoidWithZero.toZero.{0} Nat Nat.linearOrderedCommMonoidWithZero)) => (fun ([email protected]._hyg.403 : Finsupp.{u1, 0} ι Nat (LinearOrderedCommMonoidWithZero.toZero.{0} Nat Nat.linearOrderedCommMonoidWithZero)) => Multiset.{u1} ι) _x) (AddHomClass.toFunLike.{u1, u1, u1} (AddEquiv.{u1, u1} (Finsupp.{u1, 0} ι Nat (LinearOrderedCommMonoidWithZero.toZero.{0} Nat Nat.linearOrderedCommMonoidWithZero)) (Multiset.{u1} ι) (Finsupp.add.{u1, 0} ι Nat (AddMonoid.toAddZeroClass.{0} Nat Nat.addMonoid)) (Multiset.instAddMultiset.{u1} ι)) (Finsupp.{u1, 0} ι Nat (LinearOrderedCommMonoidWithZero.toZero.{0} Nat Nat.linearOrderedCommMonoidWithZero)) (Multiset.{u1} ι) (AddZeroClass.toAdd.{u1} (Finsupp.{u1, 0} ι Nat (LinearOrderedCommMonoidWithZero.toZero.{0} Nat Nat.linearOrderedCommMonoidWithZero)) (Finsupp.addZeroClass.{u1, 0} ι Nat (AddMonoid.toAddZeroClass.{0} Nat Nat.addMonoid))) (AddZeroClass.toAdd.{u1} (Multiset.{u1} ι) (AddMonoid.toAddZeroClass.{u1} (Multiset.{u1} ι) (AddRightCancelMonoid.toAddMonoid.{u1} (Multiset.{u1} ι) (AddCancelMonoid.toAddRightCancelMonoid.{u1} (Multiset.{u1} ι) (AddCancelCommMonoid.toAddCancelMonoid.{u1} (Multiset.{u1} ι) (OrderedCancelAddCommMonoid.toCancelAddCommMonoid.{u1} (Multiset.{u1} ι) (Multiset.instOrderedCancelAddCommMonoidMultiset.{u1} ι))))))) (AddMonoidHomClass.toAddHomClass.{u1, u1, u1} (AddEquiv.{u1, u1} (Finsupp.{u1, 0} ι Nat (LinearOrderedCommMonoidWithZero.toZero.{0} Nat Nat.linearOrderedCommMonoidWithZero)) (Multiset.{u1} ι) (Finsupp.add.{u1, 0} ι Nat (AddMonoid.toAddZeroClass.{0} Nat Nat.addMonoid)) (Multiset.instAddMultiset.{u1} ι)) (Finsupp.{u1, 0} ι Nat (LinearOrderedCommMonoidWithZero.toZero.{0} Nat Nat.linearOrderedCommMonoidWithZero)) (Multiset.{u1} ι) (Finsupp.addZeroClass.{u1, 0} ι Nat (AddMonoid.toAddZeroClass.{0} Nat Nat.addMonoid)) (AddMonoid.toAddZeroClass.{u1} (Multiset.{u1} ι) (AddRightCancelMonoid.toAddMonoid.{u1} (Multiset.{u1} ι) (AddCancelMonoid.toAddRightCancelMonoid.{u1} (Multiset.{u1} ι) (AddCancelCommMonoid.toAddCancelMonoid.{u1} (Multiset.{u1} ι) (OrderedCancelAddCommMonoid.toCancelAddCommMonoid.{u1} (Multiset.{u1} ι) (Multiset.instOrderedCancelAddCommMonoidMultiset.{u1} ι)))))) (AddEquivClass.instAddMonoidHomClass.{u1, u1, u1} (AddEquiv.{u1, u1} (Finsupp.{u1, 0} ι Nat (LinearOrderedCommMonoidWithZero.toZero.{0} Nat Nat.linearOrderedCommMonoidWithZero)) (Multiset.{u1} ι) (Finsupp.add.{u1, 0} ι Nat (AddMonoid.toAddZeroClass.{0} Nat Nat.addMonoid)) (Multiset.instAddMultiset.{u1} ι)) (Finsupp.{u1, 0} ι Nat (LinearOrderedCommMonoidWithZero.toZero.{0} Nat Nat.linearOrderedCommMonoidWithZero)) (Multiset.{u1} ι) (Finsupp.addZeroClass.{u1, 0} ι Nat (AddMonoid.toAddZeroClass.{0} Nat Nat.addMonoid)) (AddMonoid.toAddZeroClass.{u1} (Multiset.{u1} ι) (AddRightCancelMonoid.toAddMonoid.{u1} (Multiset.{u1} ι) (AddCancelMonoid.toAddRightCancelMonoid.{u1} (Multiset.{u1} ι) (AddCancelCommMonoid.toAddCancelMonoid.{u1} (Multiset.{u1} ι) (OrderedCancelAddCommMonoid.toCancelAddCommMonoid.{u1} (Multiset.{u1} ι) (Multiset.instOrderedCancelAddCommMonoidMultiset.{u1} ι)))))) (AddEquiv.instAddEquivClassAddEquiv.{u1, u1} (Finsupp.{u1, 0} ι Nat (LinearOrderedCommMonoidWithZero.toZero.{0} Nat Nat.linearOrderedCommMonoidWithZero)) (Multiset.{u1} ι) (Finsupp.add.{u1, 0} ι Nat (AddMonoid.toAddZeroClass.{0} Nat Nat.addMonoid)) (Multiset.instAddMultiset.{u1} ι))))) (Finsupp.toMultiset.{u1} ι))
Case conversion may be inaccurate. Consider using '#align finsupp.to_multiset_strict_mono Finsupp.toMultiset_strictMonoₓ'. -/
theorem toMultiset_strictMono : StrictMono (@toMultiset ι) :=
(@orderIsoMultiset ι).StrictMono
#align finsupp.to_multiset_strict_mono Finsupp.toMultiset_strictMono
#print Finsupp.sum_id_lt_of_lt /-
theorem sum_id_lt_of_lt (m n : ι →₀ ℕ) (h : m < n) : (m.Sum fun _ => id) < n.Sum fun _ => id :=
by
rw [← card_to_multiset, ← card_to_multiset]
apply Multiset.card_lt_of_lt
exact to_multiset_strict_mono h
#align finsupp.sum_id_lt_of_lt Finsupp.sum_id_lt_of_lt
-/
variable (ι)
#print Finsupp.lt_wf /-
/-- The order on `ι →₀ ℕ` is well-founded. -/
theorem lt_wf : WellFounded (@LT.lt (ι →₀ ℕ) _) :=
Subrelation.wf sum_id_lt_of_lt <| InvImage.wf _ Nat.lt_wfRel
#align finsupp.lt_wf Finsupp.lt_wf
-/
end Finsupp
/- warning: multiset.to_finsupp_strict_mono -> Multiset.toFinsupp_strictMono is a dubious translation:
lean 3 declaration is
forall {ι : Type.{u1}}, StrictMono.{u1, u1} (Multiset.{u1} ι) (Finsupp.{u1, 0} ι Nat Nat.hasZero) (PartialOrder.toPreorder.{u1} (Multiset.{u1} ι) (Multiset.partialOrder.{u1} ι)) (Finsupp.preorder.{u1, 0} ι Nat Nat.hasZero (PartialOrder.toPreorder.{0} Nat (OrderedCancelAddCommMonoid.toPartialOrder.{0} Nat (StrictOrderedSemiring.toOrderedCancelAddCommMonoid.{0} Nat Nat.strictOrderedSemiring)))) (coeFn.{succ u1, succ u1} (AddEquiv.{u1, u1} (Multiset.{u1} ι) (Finsupp.{u1, 0} ι Nat Nat.hasZero) (Multiset.hasAdd.{u1} ι) (Finsupp.add.{u1, 0} ι Nat (AddMonoid.toAddZeroClass.{0} Nat Nat.addMonoid))) (fun (_x : AddEquiv.{u1, u1} (Multiset.{u1} ι) (Finsupp.{u1, 0} ι Nat Nat.hasZero) (Multiset.hasAdd.{u1} ι) (Finsupp.add.{u1, 0} ι Nat (AddMonoid.toAddZeroClass.{0} Nat Nat.addMonoid))) => (Multiset.{u1} ι) -> (Finsupp.{u1, 0} ι Nat Nat.hasZero)) (AddEquiv.hasCoeToFun.{u1, u1} (Multiset.{u1} ι) (Finsupp.{u1, 0} ι Nat Nat.hasZero) (Multiset.hasAdd.{u1} ι) (Finsupp.add.{u1, 0} ι Nat (AddMonoid.toAddZeroClass.{0} Nat Nat.addMonoid))) (Multiset.toFinsupp.{u1} ι))
but is expected to have type
forall {ι : Type.{u1}}, StrictMono.{u1, u1} (Multiset.{u1} ι) (Finsupp.{u1, 0} ι Nat (LinearOrderedCommMonoidWithZero.toZero.{0} Nat Nat.linearOrderedCommMonoidWithZero)) (PartialOrder.toPreorder.{u1} (Multiset.{u1} ι) (Multiset.instPartialOrderMultiset.{u1} ι)) (Finsupp.preorder.{u1, 0} ι Nat (LinearOrderedCommMonoidWithZero.toZero.{0} Nat Nat.linearOrderedCommMonoidWithZero) (PartialOrder.toPreorder.{0} Nat (StrictOrderedSemiring.toPartialOrder.{0} Nat Nat.strictOrderedSemiring))) (FunLike.coe.{succ u1, succ u1, succ u1} (AddEquiv.{u1, u1} (Multiset.{u1} ι) (Finsupp.{u1, 0} ι Nat (LinearOrderedCommMonoidWithZero.toZero.{0} Nat Nat.linearOrderedCommMonoidWithZero)) (Multiset.instAddMultiset.{u1} ι) (Finsupp.add.{u1, 0} ι Nat (AddMonoid.toAddZeroClass.{0} Nat Nat.addMonoid))) (Multiset.{u1} ι) (fun (_x : Multiset.{u1} ι) => (fun ([email protected]._hyg.403 : Multiset.{u1} ι) => Finsupp.{u1, 0} ι Nat (LinearOrderedCommMonoidWithZero.toZero.{0} Nat Nat.linearOrderedCommMonoidWithZero)) _x) (AddHomClass.toFunLike.{u1, u1, u1} (AddEquiv.{u1, u1} (Multiset.{u1} ι) (Finsupp.{u1, 0} ι Nat (LinearOrderedCommMonoidWithZero.toZero.{0} Nat Nat.linearOrderedCommMonoidWithZero)) (Multiset.instAddMultiset.{u1} ι) (Finsupp.add.{u1, 0} ι Nat (AddMonoid.toAddZeroClass.{0} Nat Nat.addMonoid))) (Multiset.{u1} ι) (Finsupp.{u1, 0} ι Nat (LinearOrderedCommMonoidWithZero.toZero.{0} Nat Nat.linearOrderedCommMonoidWithZero)) (AddZeroClass.toAdd.{u1} (Multiset.{u1} ι) (AddMonoid.toAddZeroClass.{u1} (Multiset.{u1} ι) (AddRightCancelMonoid.toAddMonoid.{u1} (Multiset.{u1} ι) (AddCancelMonoid.toAddRightCancelMonoid.{u1} (Multiset.{u1} ι) (AddCancelCommMonoid.toAddCancelMonoid.{u1} (Multiset.{u1} ι) (OrderedCancelAddCommMonoid.toCancelAddCommMonoid.{u1} (Multiset.{u1} ι) (Multiset.instOrderedCancelAddCommMonoidMultiset.{u1} ι))))))) (AddZeroClass.toAdd.{u1} (Finsupp.{u1, 0} ι Nat (LinearOrderedCommMonoidWithZero.toZero.{0} Nat Nat.linearOrderedCommMonoidWithZero)) (Finsupp.addZeroClass.{u1, 0} ι Nat (AddMonoid.toAddZeroClass.{0} Nat Nat.addMonoid))) (AddMonoidHomClass.toAddHomClass.{u1, u1, u1} (AddEquiv.{u1, u1} (Multiset.{u1} ι) (Finsupp.{u1, 0} ι Nat (LinearOrderedCommMonoidWithZero.toZero.{0} Nat Nat.linearOrderedCommMonoidWithZero)) (Multiset.instAddMultiset.{u1} ι) (Finsupp.add.{u1, 0} ι Nat (AddMonoid.toAddZeroClass.{0} Nat Nat.addMonoid))) (Multiset.{u1} ι) (Finsupp.{u1, 0} ι Nat (LinearOrderedCommMonoidWithZero.toZero.{0} Nat Nat.linearOrderedCommMonoidWithZero)) (AddMonoid.toAddZeroClass.{u1} (Multiset.{u1} ι) (AddRightCancelMonoid.toAddMonoid.{u1} (Multiset.{u1} ι) (AddCancelMonoid.toAddRightCancelMonoid.{u1} (Multiset.{u1} ι) (AddCancelCommMonoid.toAddCancelMonoid.{u1} (Multiset.{u1} ι) (OrderedCancelAddCommMonoid.toCancelAddCommMonoid.{u1} (Multiset.{u1} ι) (Multiset.instOrderedCancelAddCommMonoidMultiset.{u1} ι)))))) (Finsupp.addZeroClass.{u1, 0} ι Nat (AddMonoid.toAddZeroClass.{0} Nat Nat.addMonoid)) (AddEquivClass.instAddMonoidHomClass.{u1, u1, u1} (AddEquiv.{u1, u1} (Multiset.{u1} ι) (Finsupp.{u1, 0} ι Nat (LinearOrderedCommMonoidWithZero.toZero.{0} Nat Nat.linearOrderedCommMonoidWithZero)) (Multiset.instAddMultiset.{u1} ι) (Finsupp.add.{u1, 0} ι Nat (AddMonoid.toAddZeroClass.{0} Nat Nat.addMonoid))) (Multiset.{u1} ι) (Finsupp.{u1, 0} ι Nat (LinearOrderedCommMonoidWithZero.toZero.{0} Nat Nat.linearOrderedCommMonoidWithZero)) (AddMonoid.toAddZeroClass.{u1} (Multiset.{u1} ι) (AddRightCancelMonoid.toAddMonoid.{u1} (Multiset.{u1} ι) (AddCancelMonoid.toAddRightCancelMonoid.{u1} (Multiset.{u1} ι) (AddCancelCommMonoid.toAddCancelMonoid.{u1} (Multiset.{u1} ι) (OrderedCancelAddCommMonoid.toCancelAddCommMonoid.{u1} (Multiset.{u1} ι) (Multiset.instOrderedCancelAddCommMonoidMultiset.{u1} ι)))))) (Finsupp.addZeroClass.{u1, 0} ι Nat (AddMonoid.toAddZeroClass.{0} Nat Nat.addMonoid)) (AddEquiv.instAddEquivClassAddEquiv.{u1, u1} (Multiset.{u1} ι) (Finsupp.{u1, 0} ι Nat (LinearOrderedCommMonoidWithZero.toZero.{0} Nat Nat.linearOrderedCommMonoidWithZero)) (Multiset.instAddMultiset.{u1} ι) (Finsupp.add.{u1, 0} ι Nat (AddMonoid.toAddZeroClass.{0} Nat Nat.addMonoid)))))) (Multiset.toFinsupp.{u1} ι))
Case conversion may be inaccurate. Consider using '#align multiset.to_finsupp_strict_mono Multiset.toFinsupp_strictMonoₓ'. -/
theorem Multiset.toFinsupp_strictMono : StrictMono (@Multiset.toFinsupp ι) :=
(@Finsupp.orderIsoMultiset ι).symm.StrictMono
#align multiset.to_finsupp_strict_mono Multiset.toFinsupp_strictMono
|
module CS410-Nat where
open import CS410-Prelude
open import CS410-Monoid
data Nat : Set where
zero : Nat
suc : Nat -> Nat
{-# BUILTIN NATURAL Nat #-}
{-# COMPILED_DATA Nat HaskellSetup.Nat HaskellSetup.Zero HaskellSetup.Suc #-}
_+N_ : Nat -> Nat -> Nat
zero +N n = n
suc m +N n = suc (m +N n)
infixr 3 _+N_
+Mon : Monoid Nat
+Mon = record
{ e = 0
; op = _+N_
; lunit = \ m -> refl
; runit = ruHelp
; assoc = asHelp
} where
ruHelp : (m : Nat) -> m +N 0 == m
ruHelp zero = refl
ruHelp (suc m) rewrite ruHelp m = refl
asHelp : (m m' m'' : Nat) -> m +N (m' +N m'') == (m +N m') +N m''
asHelp zero m' m'' = refl
asHelp (suc m) m' m'' rewrite asHelp m m' m'' = refl
_*N_ : Nat -> Nat -> Nat
zero *N n = zero
suc m *N n = m *N n +N n
infixr 4 _*N_
_N>=_ : Nat -> Nat -> Set
m N>= zero = One
zero N>= suc n = Zero
suc m N>= suc n = m N>= n
N>=Unique : (m n : Nat)(p q : m N>= n) -> p == q
N>=Unique m zero p q = refl
N>=Unique zero (suc n) () q
N>=Unique (suc m) (suc n) p q = N>=Unique m n p q
plusSucFact : (m n : Nat) -> m +N suc n == suc m +N n
plusSucFact zero n = refl
plusSucFact (suc m) n rewrite plusSucFact m n = refl
plusCommFact : (m n : Nat) -> m +N n == n +N m
plusCommFact m zero = Monoid.runit +Mon m
plusCommFact m (suc n) rewrite plusSucFact m n | plusCommFact m n = refl
|
{-# OPTIONS --without-K #-}
open import M-types.Base.Core
open import M-types.Base.Sum
open import M-types.Base.Prod
open import M-types.Base.Eq
module M-types.Base.Equi where
Qinv : {X : Ty ℓ₀} {Y : Ty ℓ₁} →
∏[ f ∈ (X → Y)] Ty (ℓ-max ℓ₀ ℓ₁)
Qinv {_} {_} {X} {Y} f = ∑[ g ∈ (Y → X) ]
(∏[ x ∈ X ] g (f x) ≡ x) ×
(∏[ y ∈ Y ] f (g y) ≡ y)
IsEqui : {X : Ty ℓ₀} {Y : Ty ℓ₁} →
∏[ f ∈ (X → Y) ] Ty (ℓ-max ℓ₀ ℓ₁)
IsEqui {_} {_} {X} {Y} f =
(∑[ g ∈ (Y → X) ] ∏[ x ∈ X ] g (f x) ≡ x) ×
(∑[ g ∈ (Y → X) ] ∏[ y ∈ Y ] f (g y) ≡ y)
infixr 8 _≃_
_≃_ : ∏[ X ∈ Ty ℓ₀ ] ∏[ Y ∈ Ty ℓ₁ ] Ty (ℓ-max ℓ₀ ℓ₁)
X ≃ Y = ∑[ f ∈ (X → Y) ] IsEqui f
Qinv→IsEqui : {X : Ty ℓ₀} {Y : Ty ℓ₁} {f : X → Y} →
Qinv f → IsEqui f
Qinv→IsEqui (g , hom₀ , hom₁) = ((g , hom₀) , (g , hom₁))
IsEqui→Qinv : {X : Ty ℓ₀} {Y : Ty ℓ₁} {f : X → Y} →
IsEqui f → Qinv f
IsEqui→Qinv {_} {_} {_} {_} {f} ((g₀ , hom₀) , (g₁ , hom₁)) =
(
g₀ ,
hom₀ ,
λ y → ap f (ap g₀ (hom₁ y)⁻¹ · hom₀ (g₁ y)) · hom₁ y
)
inv : {X : Ty ℓ₀} {Y : Ty ℓ₁} →
∏[ equi ∈ X ≃ Y ] (Y → X)
inv (fun , isEqui) = pr₀ (IsEqui→Qinv isEqui)
hom₀ : {X : Ty ℓ₀} {Y : Ty ℓ₁} →
∏[ equi ∈ X ≃ Y ] ∏[ x ∈ X ] inv equi (fun equi x) ≡ x
hom₀ (fun , isEqui) = pr₀ (pr₁ (IsEqui→Qinv isEqui))
hom₁ : {X : Ty ℓ₀} {Y : Ty ℓ₁} →
∏[ equi ∈ X ≃ Y ] ∏[ y ∈ Y ] fun equi (inv equi y) ≡ y
hom₁ (fun , isEqui) = pr₁ (pr₁ (IsEqui→Qinv isEqui))
|
The numeral $n$ is equal to the monomial $x^0$ with coefficient $n$. |
function f = local_xy ( x , y )
%*****************************************************************************80
%
%% LOCAL_XY computes the local minimum function.
%
% Discussion:
%
% This function has a local minimum:
%
% X* = ( 0.28581..., 0.27936...), F(X*) = 5.9225...
%
% and a global minimum:
%
% X* = ( -21.026653..., -36.760090...), F(X*) = 0.
%
% Suggested starting point:
%
% X = ( 1, 1 ), F(X) = 3.33 * 10^6.
%
% Licensing:
%
% This code is distributed under the GNU LGPL license.
%
% Modified:
%
% 12 February 2008
%
% Author:
%
% John Burkardt
%
% Reference:
%
% David Himmelblau,
% Applied Nonlinear Programming,
% McGraw Hill, 1972,
% ISBN13: 978-0070289215,
% LC: T57.8.H55.
%
% Parameters:
%
% Input, real X, Y, the argument of the function.
%
% Output, real F, the value of the function at (X,Y).
%
f = ( x^2 + 12 * y - 1 )^2 ...
+ ( 49 * x^2 + 49 * y^2 + 84 * x + 2324 * y - 681 )^2;
return
end
|
/-
Copyright (c) 2017 Mario Carneiro. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Mario Carneiro, Yury G. Kudryashov
! This file was ported from Lean 3 source module data.sum.basic
! leanprover-community/mathlib commit bd9851ca476957ea4549eb19b40e7b5ade9428cc
! Please do not edit these lines, except to modify the commit id
! if you have ported upstream changes.
-/
import Mathbin.Logic.Function.Basic
import Mathbin.Tactic.Basic
/-!
# Disjoint union of types
> THIS FILE IS SYNCHRONIZED WITH MATHLIB4.
> Any changes to this file require a corresponding PR to mathlib4.
This file proves basic results about the sum type `α ⊕ β`.
`α ⊕ β` is the type made of a copy of `α` and a copy of `β`. It is also called *disjoint union*.
## Main declarations
* `sum.get_left`: Retrieves the left content of `x : α ⊕ β` or returns `none` if it's coming from
the right.
* `sum.get_right`: Retrieves the right content of `x : α ⊕ β` or returns `none` if it's coming from
the left.
* `sum.is_left`: Returns whether `x : α ⊕ β` comes from the left component or not.
* `sum.is_right`: Returns whether `x : α ⊕ β` comes from the right component or not.
* `sum.map`: Maps `α ⊕ β` to `γ ⊕ δ` component-wise.
* `sum.elim`: Nondependent eliminator/induction principle for `α ⊕ β`.
* `sum.swap`: Maps `α ⊕ β` to `β ⊕ α` by swapping components.
* `sum.lex`: Lexicographic order on `α ⊕ β` induced by a relation on `α` and a relation on `β`.
## Notes
The definition of `sum` takes values in `Type*`. This effectively forbids `Prop`- valued sum types.
To this effect, we have `psum`, which takes value in `Sort*` and carries a more complicated
universe signature in consequence. The `Prop` version is `or`.
-/
universe u v w x
variable {α : Type u} {α' : Type w} {β : Type v} {β' : Type x} {γ δ : Type _}
namespace Sum
deriving instance DecidableEq for Sum
#print Sum.forall /-
@[simp]
theorem forall {p : Sum α β → Prop} : (∀ x, p x) ↔ (∀ a, p (inl a)) ∧ ∀ b, p (inr b) :=
⟨fun h => ⟨fun a => h _, fun b => h _⟩, fun ⟨h₁, h₂⟩ => Sum.rec h₁ h₂⟩
#align sum.forall Sum.forall
-/
#print Sum.exists /-
@[simp]
theorem exists {p : Sum α β → Prop} : (∃ x, p x) ↔ (∃ a, p (inl a)) ∨ ∃ b, p (inr b) :=
⟨fun h =>
match h with
| ⟨inl a, h⟩ => Or.inl ⟨a, h⟩
| ⟨inr b, h⟩ => Or.inr ⟨b, h⟩,
fun h =>
match h with
| Or.inl ⟨a, h⟩ => ⟨inl a, h⟩
| Or.inr ⟨b, h⟩ => ⟨inr b, h⟩⟩
#align sum.exists Sum.exists
-/
#print Sum.inl_injective /-
theorem inl_injective : Function.Injective (inl : α → Sum α β) := fun x y => inl.inj
#align sum.inl_injective Sum.inl_injective
-/
#print Sum.inr_injective /-
theorem inr_injective : Function.Injective (inr : β → Sum α β) := fun x y => inr.inj
#align sum.inr_injective Sum.inr_injective
-/
section get
#print Sum.getLeft /-
/-- Check if a sum is `inl` and if so, retrieve its contents. -/
@[simp]
def getLeft : Sum α β → Option α
| inl a => some a
| inr _ => none
#align sum.get_left Sum.getLeft
-/
#print Sum.getRight /-
/-- Check if a sum is `inr` and if so, retrieve its contents. -/
@[simp]
def getRight : Sum α β → Option β
| inr b => some b
| inl _ => none
#align sum.get_right Sum.getRight
-/
#print Sum.isLeft /-
/-- Check if a sum is `inl`. -/
@[simp]
def isLeft : Sum α β → Bool
| inl _ => true
| inr _ => false
#align sum.is_left Sum.isLeft
-/
#print Sum.isRight /-
/-- Check if a sum is `inr`. -/
@[simp]
def isRight : Sum α β → Bool
| inl _ => false
| inr _ => true
#align sum.is_right Sum.isRight
-/
variable {x y : Sum α β}
#print Sum.getLeft_eq_none_iff /-
@[simp]
theorem getLeft_eq_none_iff : x.getLeft = none ↔ x.isRight := by
cases x <;>
simp only [get_left, is_right, Bool.coe_sort_true, Bool.coe_sort_false, eq_self_iff_true]
#align sum.get_left_eq_none_iff Sum.getLeft_eq_none_iff
-/
#print Sum.getRight_eq_none_iff /-
@[simp]
theorem getRight_eq_none_iff : x.getRight = none ↔ x.isLeft := by
cases x <;>
simp only [get_right, is_left, Bool.coe_sort_true, Bool.coe_sort_false, eq_self_iff_true]
#align sum.get_right_eq_none_iff Sum.getRight_eq_none_iff
-/
#print Sum.getLeft_eq_some_iff /-
@[simp]
theorem getLeft_eq_some_iff {a} : x.getLeft = some a ↔ x = inl a := by
cases x <;> simp only [get_left]
#align sum.get_left_eq_some_iff Sum.getLeft_eq_some_iff
-/
#print Sum.getRight_eq_some_iff /-
@[simp]
theorem getRight_eq_some_iff {b} : x.getRight = some b ↔ x = inr b := by
cases x <;> simp only [get_right]
#align sum.get_right_eq_some_iff Sum.getRight_eq_some_iff
-/
#print Sum.not_isLeft /-
@[simp]
theorem not_isLeft (x : Sum α β) : not x.isLeft = x.isRight := by cases x <;> rfl
#align sum.bnot_is_left Sum.not_isLeft
-/
#print Sum.isLeft_eq_false /-
@[simp]
theorem isLeft_eq_false : x.isLeft = false ↔ x.isRight := by cases x <;> simp
#align sum.is_left_eq_ff Sum.isLeft_eq_false
-/
#print Sum.Not_isLeft /-
theorem Not_isLeft : ¬x.isLeft ↔ x.isRight := by simp
#align sum.not_is_left Sum.Not_isLeft
-/
/- warning: sum.bnot_is_right -> Sum.not_isRight is a dubious translation:
lean 3 declaration is
forall {α : Type.{u1}} {β : Type.{u2}} (x : Sum.{u1, u2} α β), Eq.{1} Bool (not (Sum.isRight.{u1, u2} α β x)) (Sum.isLeft.{u1, u2} α β x)
but is expected to have type
forall {α : Type.{u1}} {β : Type.{u2}} (x : Sum.{u1, u2} α β), Eq.{1} Bool (not (Decidable.decide (Eq.{1} Bool (Sum.isRight.{u1, u2} α β x) (Sum.isLeft.{u1, u2} α β x)) (instDecidableEqBool (Sum.isRight.{u1, u2} α β x) (Sum.isLeft.{u1, u2} α β x)))) Bool.true
Case conversion may be inaccurate. Consider using '#align sum.bnot_is_right Sum.not_isRightₓ'. -/
@[simp]
theorem not_isRight (x : Sum α β) : not x.isRight = x.isLeft := by cases x <;> rfl
#align sum.bnot_is_right Sum.not_isRight
#print Sum.isRight_eq_false /-
@[simp]
theorem isRight_eq_false : x.isRight = false ↔ x.isLeft := by cases x <;> simp
#align sum.is_right_eq_ff Sum.isRight_eq_false
-/
#print Sum.Not_isRight /-
theorem Not_isRight : ¬x.isRight ↔ x.isLeft := by simp
#align sum.not_is_right Sum.Not_isRight
-/
#print Sum.isLeft_iff /-
theorem isLeft_iff : x.isLeft ↔ ∃ y, x = Sum.inl y := by cases x <;> simp
#align sum.is_left_iff Sum.isLeft_iff
-/
#print Sum.isRight_iff /-
theorem isRight_iff : x.isRight ↔ ∃ y, x = Sum.inr y := by cases x <;> simp
#align sum.is_right_iff Sum.isRight_iff
-/
end get
#print Sum.inl.inj_iff /-
theorem inl.inj_iff {a b} : (inl a : Sum α β) = inl b ↔ a = b :=
⟨inl.inj, congr_arg _⟩
#align sum.inl.inj_iff Sum.inl.inj_iff
-/
#print Sum.inr.inj_iff /-
theorem inr.inj_iff {a b} : (inr a : Sum α β) = inr b ↔ a = b :=
⟨inr.inj, congr_arg _⟩
#align sum.inr.inj_iff Sum.inr.inj_iff
-/
#print Sum.inl_ne_inr /-
theorem inl_ne_inr {a : α} {b : β} : inl a ≠ inr b :=
fun.
#align sum.inl_ne_inr Sum.inl_ne_inr
-/
#print Sum.inr_ne_inl /-
theorem inr_ne_inl {a : α} {b : β} : inr b ≠ inl a :=
fun.
#align sum.inr_ne_inl Sum.inr_ne_inl
-/
#print Sum.elim /-
/-- Define a function on `α ⊕ β` by giving separate definitions on `α` and `β`. -/
protected def elim {α β γ : Sort _} (f : α → γ) (g : β → γ) : Sum α β → γ := fun x =>
Sum.recOn x f g
#align sum.elim Sum.elim
-/
/- warning: sum.elim_inl -> Sum.elim_inl is a dubious translation:
lean 3 declaration is
forall {α : Type.{u1}} {β : Type.{u2}} {γ : Sort.{u3}} (f : α -> γ) (g : β -> γ) (x : α), Eq.{u3} γ (Sum.elim.{u1, u2, u3} α β γ f g (Sum.inl.{u1, u2} α β x)) (f x)
but is expected to have type
forall {α : Type.{u3}} {β : Type.{u2}} {γ : Sort.{u1}} (f : α -> γ) (g : β -> γ) (x : α), Eq.{u1} γ (Sum.elim.{u3, u2, u1} α β γ f g (Sum.inl.{u3, u2} α β x)) (f x)
Case conversion may be inaccurate. Consider using '#align sum.elim_inl Sum.elim_inlₓ'. -/
@[simp]
theorem elim_inl {α β γ : Sort _} (f : α → γ) (g : β → γ) (x : α) : Sum.elim f g (inl x) = f x :=
rfl
#align sum.elim_inl Sum.elim_inl
/- warning: sum.elim_inr -> Sum.elim_inr is a dubious translation:
lean 3 declaration is
forall {α : Type.{u1}} {β : Type.{u2}} {γ : Sort.{u3}} (f : α -> γ) (g : β -> γ) (x : β), Eq.{u3} γ (Sum.elim.{u1, u2, u3} α β γ f g (Sum.inr.{u1, u2} α β x)) (g x)
but is expected to have type
forall {α : Type.{u3}} {β : Type.{u2}} {γ : Sort.{u1}} (f : α -> γ) (g : β -> γ) (x : β), Eq.{u1} γ (Sum.elim.{u3, u2, u1} α β γ f g (Sum.inr.{u3, u2} α β x)) (g x)
Case conversion may be inaccurate. Consider using '#align sum.elim_inr Sum.elim_inrₓ'. -/
@[simp]
theorem elim_inr {α β γ : Sort _} (f : α → γ) (g : β → γ) (x : β) : Sum.elim f g (inr x) = g x :=
rfl
#align sum.elim_inr Sum.elim_inr
/- warning: sum.elim_comp_inl -> Sum.elim_comp_inl is a dubious translation:
lean 3 declaration is
forall {α : Type.{u1}} {β : Type.{u2}} {γ : Sort.{u3}} (f : α -> γ) (g : β -> γ), Eq.{imax (succ u1) u3} (α -> γ) (Function.comp.{succ u1, max (succ u1) (succ u2), u3} α (Sum.{u1, u2} α β) γ (Sum.elim.{u1, u2, u3} α β γ f g) (Sum.inl.{u1, u2} α β)) f
but is expected to have type
forall {α : Type.{u3}} {β : Type.{u2}} {γ : Sort.{u1}} (f : α -> γ) (g : β -> γ), Eq.{imax (succ u3) u1} (α -> γ) (Function.comp.{succ u3, max (succ u2) (succ u3), u1} α (Sum.{u3, u2} α β) γ (Sum.elim.{u3, u2, u1} α β γ f g) (Sum.inl.{u3, u2} α β)) f
Case conversion may be inaccurate. Consider using '#align sum.elim_comp_inl Sum.elim_comp_inlₓ'. -/
@[simp]
theorem elim_comp_inl {α β γ : Sort _} (f : α → γ) (g : β → γ) : Sum.elim f g ∘ inl = f :=
rfl
#align sum.elim_comp_inl Sum.elim_comp_inl
/- warning: sum.elim_comp_inr -> Sum.elim_comp_inr is a dubious translation:
lean 3 declaration is
forall {α : Type.{u1}} {β : Type.{u2}} {γ : Sort.{u3}} (f : α -> γ) (g : β -> γ), Eq.{imax (succ u2) u3} (β -> γ) (Function.comp.{succ u2, max (succ u1) (succ u2), u3} β (Sum.{u1, u2} α β) γ (Sum.elim.{u1, u2, u3} α β γ f g) (Sum.inr.{u1, u2} α β)) g
but is expected to have type
forall {α : Type.{u3}} {β : Type.{u2}} {γ : Sort.{u1}} (f : α -> γ) (g : β -> γ), Eq.{imax (succ u2) u1} (β -> γ) (Function.comp.{succ u2, max (succ u2) (succ u3), u1} β (Sum.{u3, u2} α β) γ (Sum.elim.{u3, u2, u1} α β γ f g) (Sum.inr.{u3, u2} α β)) g
Case conversion may be inaccurate. Consider using '#align sum.elim_comp_inr Sum.elim_comp_inrₓ'. -/
@[simp]
theorem elim_comp_inr {α β γ : Sort _} (f : α → γ) (g : β → γ) : Sum.elim f g ∘ inr = g :=
rfl
#align sum.elim_comp_inr Sum.elim_comp_inr
/- warning: sum.elim_inl_inr -> Sum.elim_inl_inr is a dubious translation:
lean 3 declaration is
forall {α : Type.{u1}} {β : Type.{u2}}, Eq.{max (succ u1) (succ u2)} ((Sum.{u1, u2} α β) -> (Sum.{u1, u2} α β)) (Sum.elim.{u1, u2, max (succ u1) (succ u2)} α β (Sum.{u1, u2} α β) (Sum.inl.{u1, u2} α β) (Sum.inr.{u1, u2} α β)) (id.{max (succ u1) (succ u2)} (Sum.{u1, u2} α β))
but is expected to have type
forall {α : Type.{u2}} {β : Type.{u1}}, Eq.{max (succ u1) (succ u2)} ((Sum.{u2, u1} α β) -> (Sum.{u2, u1} α β)) (Sum.elim.{u2, u1, max (succ u2) (succ u1)} α β (Sum.{u2, u1} α β) (Sum.inl.{u2, u1} α β) (Sum.inr.{u2, u1} α β)) (id.{max (succ u1) (succ u2)} (Sum.{u2, u1} α β))
Case conversion may be inaccurate. Consider using '#align sum.elim_inl_inr Sum.elim_inl_inrₓ'. -/
@[simp]
theorem elim_inl_inr {α β : Sort _} : @Sum.elim α β _ inl inr = id :=
funext fun x => Sum.casesOn x (fun _ => rfl) fun _ => rfl
#align sum.elim_inl_inr Sum.elim_inl_inr
/- warning: sum.comp_elim -> Sum.comp_elim is a dubious translation:
lean 3 declaration is
forall {α : Type.{u1}} {β : Type.{u2}} {γ : Sort.{u3}} {δ : Sort.{u4}} (f : γ -> δ) (g : α -> γ) (h : β -> γ), Eq.{imax (max (succ u1) (succ u2)) u4} ((Sum.{u1, u2} α β) -> δ) (Function.comp.{max (succ u1) (succ u2), u3, u4} (Sum.{u1, u2} α β) γ δ f (Sum.elim.{u1, u2, u3} α β γ g h)) (Sum.elim.{u1, u2, u4} α β δ (Function.comp.{succ u1, u3, u4} α γ δ f g) (Function.comp.{succ u2, u3, u4} β γ δ f h))
but is expected to have type
forall {α : Type.{u4}} {β : Type.{u3}} {γ : Sort.{u2}} {δ : Sort.{u1}} (f : γ -> δ) (g : α -> γ) (h : β -> γ), Eq.{imax (max (succ u3) (succ u4)) u1} ((Sum.{u4, u3} α β) -> δ) (Function.comp.{max (succ u3) (succ u4), u2, u1} (Sum.{u4, u3} α β) γ δ f (Sum.elim.{u4, u3, u2} α β γ g h)) (Sum.elim.{u4, u3, u1} α β δ (Function.comp.{succ u4, u2, u1} α γ δ f g) (Function.comp.{succ u3, u2, u1} β γ δ f h))
Case conversion may be inaccurate. Consider using '#align sum.comp_elim Sum.comp_elimₓ'. -/
theorem comp_elim {α β γ δ : Sort _} (f : γ → δ) (g : α → γ) (h : β → γ) :
f ∘ Sum.elim g h = Sum.elim (f ∘ g) (f ∘ h) :=
funext fun x => Sum.casesOn x (fun _ => rfl) fun _ => rfl
#align sum.comp_elim Sum.comp_elim
/- warning: sum.elim_comp_inl_inr -> Sum.elim_comp_inl_inr is a dubious translation:
lean 3 declaration is
forall {α : Type.{u1}} {β : Type.{u2}} {γ : Sort.{u3}} (f : (Sum.{u1, u2} α β) -> γ), Eq.{imax (max (succ u1) (succ u2)) u3} ((Sum.{u1, u2} α β) -> γ) (Sum.elim.{u1, u2, u3} α β γ (Function.comp.{succ u1, max (succ u1) (succ u2), u3} α (Sum.{u1, u2} α β) γ f (Sum.inl.{u1, u2} α β)) (Function.comp.{succ u2, max (succ u1) (succ u2), u3} β (Sum.{u1, u2} α β) γ f (Sum.inr.{u1, u2} α β))) f
but is expected to have type
forall {α : Type.{u3}} {β : Type.{u2}} {γ : Sort.{u1}} (f : (Sum.{u3, u2} α β) -> γ), Eq.{imax (max (succ u2) (succ u3)) u1} ((Sum.{u3, u2} α β) -> γ) (Sum.elim.{u3, u2, u1} α β γ (Function.comp.{succ u3, max (succ u2) (succ u3), u1} α (Sum.{u3, u2} α β) γ f (Sum.inl.{u3, u2} α β)) (Function.comp.{succ u2, max (succ u2) (succ u3), u1} β (Sum.{u3, u2} α β) γ f (Sum.inr.{u3, u2} α β))) f
Case conversion may be inaccurate. Consider using '#align sum.elim_comp_inl_inr Sum.elim_comp_inl_inrₓ'. -/
@[simp]
theorem elim_comp_inl_inr {α β γ : Sort _} (f : Sum α β → γ) : Sum.elim (f ∘ inl) (f ∘ inr) = f :=
funext fun x => Sum.casesOn x (fun _ => rfl) fun _ => rfl
#align sum.elim_comp_inl_inr Sum.elim_comp_inl_inr
#print Sum.map /-
/-- Map `α ⊕ β` to `α' ⊕ β'` sending `α` to `α'` and `β` to `β'`. -/
protected def map (f : α → α') (g : β → β') : Sum α β → Sum α' β' :=
Sum.elim (inl ∘ f) (inr ∘ g)
#align sum.map Sum.map
-/
#print Sum.map_inl /-
@[simp]
theorem map_inl (f : α → α') (g : β → β') (x : α) : (inl x).map f g = inl (f x) :=
rfl
#align sum.map_inl Sum.map_inl
-/
#print Sum.map_inr /-
@[simp]
theorem map_inr (f : α → α') (g : β → β') (x : β) : (inr x).map f g = inr (g x) :=
rfl
#align sum.map_inr Sum.map_inr
-/
/- warning: sum.map_map -> Sum.map_map is a dubious translation:
lean 3 declaration is
forall {α : Type.{u1}} {α' : Type.{u3}} {β : Type.{u2}} {β' : Type.{u4}} {α'' : Type.{u5}} {β'' : Type.{u6}} (f' : α' -> α'') (g' : β' -> β'') (f : α -> α') (g : β -> β') (x : Sum.{u1, u2} α β), Eq.{max (succ u5) (succ u6)} (Sum.{u5, u6} α'' β'') (Sum.map.{u3, u4, u5, u6} α' α'' β' β'' f' g' (Sum.map.{u1, u2, u3, u4} α α' β β' f g x)) (Sum.map.{u1, u2, u5, u6} α α'' β β'' (Function.comp.{succ u1, succ u3, succ u5} α α' α'' f' f) (Function.comp.{succ u2, succ u4, succ u6} β β' β'' g' g) x)
but is expected to have type
forall {α : Type.{u3}} {α' : Type.{u5}} {β : Type.{u4}} {β' : Type.{u6}} {α'' : Type.{u2}} {β'' : Type.{u1}} (f' : α' -> α'') (g' : β' -> β'') (f : α -> α') (g : β -> β') (x : Sum.{u3, u4} α β), Eq.{max (succ u1) (succ u2)} (Sum.{u2, u1} α'' β'') (Sum.map.{u5, u6, u2, u1} α' α'' β' β'' f' g' (Sum.map.{u3, u4, u5, u6} α α' β β' f g x)) (Sum.map.{u3, u4, u2, u1} α α'' β β'' (Function.comp.{succ u3, succ u5, succ u2} α α' α'' f' f) (Function.comp.{succ u4, succ u6, succ u1} β β' β'' g' g) x)
Case conversion may be inaccurate. Consider using '#align sum.map_map Sum.map_mapₓ'. -/
@[simp]
theorem map_map {α'' β''} (f' : α' → α'') (g' : β' → β'') (f : α → α') (g : β → β') :
∀ x : Sum α β, (x.map f g).map f' g' = x.map (f' ∘ f) (g' ∘ g)
| inl a => rfl
| inr b => rfl
#align sum.map_map Sum.map_map
/- warning: sum.map_comp_map -> Sum.map_comp_map is a dubious translation:
lean 3 declaration is
forall {α : Type.{u1}} {α' : Type.{u3}} {β : Type.{u2}} {β' : Type.{u4}} {α'' : Type.{u5}} {β'' : Type.{u6}} (f' : α' -> α'') (g' : β' -> β'') (f : α -> α') (g : β -> β'), Eq.{max (max (succ u1) (succ u2)) (succ u5) (succ u6)} ((Sum.{u1, u2} α β) -> (Sum.{u5, u6} α'' β'')) (Function.comp.{max (succ u1) (succ u2), max (succ u3) (succ u4), max (succ u5) (succ u6)} (Sum.{u1, u2} α β) (Sum.{u3, u4} α' β') (Sum.{u5, u6} α'' β'') (Sum.map.{u3, u4, u5, u6} α' α'' β' β'' f' g') (Sum.map.{u1, u2, u3, u4} α α' β β' f g)) (Sum.map.{u1, u2, u5, u6} α α'' β β'' (Function.comp.{succ u1, succ u3, succ u5} α α' α'' f' f) (Function.comp.{succ u2, succ u4, succ u6} β β' β'' g' g))
but is expected to have type
forall {α : Type.{u3}} {α' : Type.{u5}} {β : Type.{u4}} {β' : Type.{u6}} {α'' : Type.{u2}} {β'' : Type.{u1}} (f' : α' -> α'') (g' : β' -> β'') (f : α -> α') (g : β -> β'), Eq.{max (max (max (succ u3) (succ u4)) (succ u1)) (succ u2)} ((Sum.{u3, u4} α β) -> (Sum.{u2, u1} α'' β'')) (Function.comp.{max (succ u4) (succ u3), max (succ u6) (succ u5), max (succ u1) (succ u2)} (Sum.{u3, u4} α β) (Sum.{u5, u6} α' β') (Sum.{u2, u1} α'' β'') (Sum.map.{u5, u6, u2, u1} α' α'' β' β'' f' g') (Sum.map.{u3, u4, u5, u6} α α' β β' f g)) (Sum.map.{u3, u4, u2, u1} α α'' β β'' (Function.comp.{succ u3, succ u5, succ u2} α α' α'' f' f) (Function.comp.{succ u4, succ u6, succ u1} β β' β'' g' g))
Case conversion may be inaccurate. Consider using '#align sum.map_comp_map Sum.map_comp_mapₓ'. -/
@[simp]
theorem map_comp_map {α'' β''} (f' : α' → α'') (g' : β' → β'') (f : α → α') (g : β → β') :
Sum.map f' g' ∘ Sum.map f g = Sum.map (f' ∘ f) (g' ∘ g) :=
funext <| map_map f' g' f g
#align sum.map_comp_map Sum.map_comp_map
/- warning: sum.map_id_id -> Sum.map_id_id is a dubious translation:
lean 3 declaration is
forall (α : Type.{u1}) (β : Type.{u2}), Eq.{max (succ u1) (succ u2)} ((Sum.{u1, u2} α β) -> (Sum.{u1, u2} α β)) (Sum.map.{u1, u2, u1, u2} α α β β (id.{succ u1} α) (id.{succ u2} β)) (id.{max (succ u1) (succ u2)} (Sum.{u1, u2} α β))
but is expected to have type
forall (α : Type.{u2}) (β : Type.{u1}), Eq.{max (succ u1) (succ u2)} ((Sum.{u2, u1} α β) -> (Sum.{u2, u1} α β)) (Sum.map.{u2, u1, u2, u1} α α β β (id.{succ u2} α) (id.{succ u1} β)) (id.{max (succ u1) (succ u2)} (Sum.{u2, u1} α β))
Case conversion may be inaccurate. Consider using '#align sum.map_id_id Sum.map_id_idₓ'. -/
@[simp]
theorem map_id_id (α β) : Sum.map (@id α) (@id β) = id :=
funext fun x => Sum.recOn x (fun _ => rfl) fun _ => rfl
#align sum.map_id_id Sum.map_id_id
/- warning: sum.elim_map -> Sum.elim_map is a dubious translation:
lean 3 declaration is
forall {α : Type.{u1}} {β : Type.{u2}} {γ : Type.{u3}} {δ : Type.{u4}} {ε : Sort.{u5}} {f₁ : α -> β} {f₂ : β -> ε} {g₁ : γ -> δ} {g₂ : δ -> ε} {x : Sum.{u1, u3} α γ}, Eq.{u5} ε (Sum.elim.{u2, u4, u5} β δ ε f₂ g₂ (Sum.map.{u1, u3, u2, u4} α β γ δ f₁ g₁ x)) (Sum.elim.{u1, u3, u5} α γ ε (Function.comp.{succ u1, succ u2, u5} α β ε f₂ f₁) (Function.comp.{succ u3, succ u4, u5} γ δ ε g₂ g₁) x)
but is expected to have type
forall {α : Type.{u5}} {β : Type.{u4}} {γ : Type.{u3}} {δ : Type.{u2}} {ε : Sort.{u1}} {f₁ : α -> β} {f₂ : β -> ε} {g₁ : γ -> δ} {g₂ : δ -> ε} {x : Sum.{u5, u3} α γ}, Eq.{u1} ε (Sum.elim.{u4, u2, u1} β δ ε f₂ g₂ (Sum.map.{u5, u3, u4, u2} α β γ δ f₁ g₁ x)) (Sum.elim.{u5, u3, u1} α γ ε (Function.comp.{succ u5, succ u4, u1} α β ε f₂ f₁) (Function.comp.{succ u3, succ u2, u1} γ δ ε g₂ g₁) x)
Case conversion may be inaccurate. Consider using '#align sum.elim_map Sum.elim_mapₓ'. -/
theorem elim_map {α β γ δ ε : Sort _} {f₁ : α → β} {f₂ : β → ε} {g₁ : γ → δ} {g₂ : δ → ε} {x} :
Sum.elim f₂ g₂ (Sum.map f₁ g₁ x) = Sum.elim (f₂ ∘ f₁) (g₂ ∘ g₁) x := by cases x <;> rfl
#align sum.elim_map Sum.elim_map
/- warning: sum.elim_comp_map -> Sum.elim_comp_map is a dubious translation:
lean 3 declaration is
forall {α : Type.{u1}} {β : Type.{u2}} {γ : Type.{u3}} {δ : Type.{u4}} {ε : Sort.{u5}} {f₁ : α -> β} {f₂ : β -> ε} {g₁ : γ -> δ} {g₂ : δ -> ε}, Eq.{imax (max (succ u1) (succ u3)) u5} ((Sum.{u1, u3} α γ) -> ε) (Function.comp.{max (succ u1) (succ u3), max (succ u2) (succ u4), u5} (Sum.{u1, u3} α γ) (Sum.{u2, u4} β δ) ε (Sum.elim.{u2, u4, u5} β δ ε f₂ g₂) (Sum.map.{u1, u3, u2, u4} α β γ δ f₁ g₁)) (Sum.elim.{u1, u3, u5} α γ ε (Function.comp.{succ u1, succ u2, u5} α β ε f₂ f₁) (Function.comp.{succ u3, succ u4, u5} γ δ ε g₂ g₁))
but is expected to have type
forall {α : Type.{u5}} {β : Type.{u4}} {γ : Type.{u3}} {δ : Type.{u2}} {ε : Sort.{u1}} {f₁ : α -> β} {f₂ : β -> ε} {g₁ : γ -> δ} {g₂ : δ -> ε}, Eq.{imax (max (succ u3) (succ u5)) u1} ((Sum.{u5, u3} α γ) -> ε) (Function.comp.{max (succ u3) (succ u5), max (succ u2) (succ u4), u1} (Sum.{u5, u3} α γ) (Sum.{u4, u2} β δ) ε (Sum.elim.{u4, u2, u1} β δ ε f₂ g₂) (Sum.map.{u5, u3, u4, u2} α β γ δ f₁ g₁)) (Sum.elim.{u5, u3, u1} α γ ε (Function.comp.{succ u5, succ u4, u1} α β ε f₂ f₁) (Function.comp.{succ u3, succ u2, u1} γ δ ε g₂ g₁))
Case conversion may be inaccurate. Consider using '#align sum.elim_comp_map Sum.elim_comp_mapₓ'. -/
theorem elim_comp_map {α β γ δ ε : Sort _} {f₁ : α → β} {f₂ : β → ε} {g₁ : γ → δ} {g₂ : δ → ε} :
Sum.elim f₂ g₂ ∘ Sum.map f₁ g₁ = Sum.elim (f₂ ∘ f₁) (g₂ ∘ g₁) :=
funext fun _ => elim_map
#align sum.elim_comp_map Sum.elim_comp_map
/- warning: sum.is_left_map -> Sum.isLeft_map is a dubious translation:
lean 3 declaration is
forall {α : Type.{u1}} {β : Type.{u2}} {γ : Type.{u3}} {δ : Type.{u4}} (f : α -> β) (g : γ -> δ) (x : Sum.{u1, u3} α γ), Eq.{1} Bool (Sum.isLeft.{u2, u4} β δ (Sum.map.{u1, u3, u2, u4} α β γ δ f g x)) (Sum.isLeft.{u1, u3} α γ x)
but is expected to have type
forall {α : Type.{u3}} {β : Type.{u4}} {γ : Type.{u2}} {δ : Type.{u1}} (f : α -> β) (g : γ -> δ) (x : Sum.{u3, u2} α γ), Eq.{1} Bool (Sum.isLeft.{u4, u1} β δ (Sum.map.{u3, u2, u4, u1} α β γ δ f g x)) (Sum.isLeft.{u3, u2} α γ x)
Case conversion may be inaccurate. Consider using '#align sum.is_left_map Sum.isLeft_mapₓ'. -/
@[simp]
theorem isLeft_map (f : α → β) (g : γ → δ) (x : Sum α γ) : isLeft (x.map f g) = isLeft x := by
cases x <;> rfl
#align sum.is_left_map Sum.isLeft_map
/- warning: sum.is_right_map -> Sum.isRight_map is a dubious translation:
lean 3 declaration is
forall {α : Type.{u1}} {β : Type.{u2}} {γ : Type.{u3}} {δ : Type.{u4}} (f : α -> β) (g : γ -> δ) (x : Sum.{u1, u3} α γ), Eq.{1} Bool (Sum.isRight.{u2, u4} β δ (Sum.map.{u1, u3, u2, u4} α β γ δ f g x)) (Sum.isRight.{u1, u3} α γ x)
but is expected to have type
forall {α : Type.{u3}} {β : Type.{u4}} {γ : Type.{u2}} {δ : Type.{u1}} (f : α -> β) (g : γ -> δ) (x : Sum.{u3, u2} α γ), Eq.{1} Bool (Sum.isRight.{u4, u1} β δ (Sum.map.{u3, u2, u4, u1} α β γ δ f g x)) (Sum.isRight.{u3, u2} α γ x)
Case conversion may be inaccurate. Consider using '#align sum.is_right_map Sum.isRight_mapₓ'. -/
@[simp]
theorem isRight_map (f : α → β) (g : γ → δ) (x : Sum α γ) : isRight (x.map f g) = isRight x := by
cases x <;> rfl
#align sum.is_right_map Sum.isRight_map
/- warning: sum.get_left_map -> Sum.getLeft_map is a dubious translation:
lean 3 declaration is
forall {α : Type.{u1}} {β : Type.{u2}} {γ : Type.{u3}} {δ : Type.{u4}} (f : α -> β) (g : γ -> δ) (x : Sum.{u1, u3} α γ), Eq.{succ u2} (Option.{u2} β) (Sum.getLeft.{u2, u4} β δ (Sum.map.{u1, u3, u2, u4} α β γ δ f g x)) (Option.map.{u1, u2} α β f (Sum.getLeft.{u1, u3} α γ x))
but is expected to have type
forall {α : Type.{u3}} {β : Type.{u4}} {γ : Type.{u2}} {δ : Type.{u1}} (f : α -> β) (g : γ -> δ) (x : Sum.{u3, u2} α γ), Eq.{succ u4} (Option.{u4} β) (Sum.getLeft.{u4, u1} β δ (Sum.map.{u3, u2, u4, u1} α β γ δ f g x)) (Option.map.{u3, u4} α β f (Sum.getLeft.{u3, u2} α γ x))
Case conversion may be inaccurate. Consider using '#align sum.get_left_map Sum.getLeft_mapₓ'. -/
@[simp]
theorem getLeft_map (f : α → β) (g : γ → δ) (x : Sum α γ) : (x.map f g).getLeft = x.getLeft.map f :=
by cases x <;> rfl
#align sum.get_left_map Sum.getLeft_map
/- warning: sum.get_right_map -> Sum.getRight_map is a dubious translation:
lean 3 declaration is
forall {α : Type.{u1}} {β : Type.{u2}} {γ : Type.{u3}} {δ : Type.{u4}} (f : α -> β) (g : γ -> δ) (x : Sum.{u1, u3} α γ), Eq.{succ u4} (Option.{u4} δ) (Sum.getRight.{u2, u4} β δ (Sum.map.{u1, u3, u2, u4} α β γ δ f g x)) (Option.map.{u3, u4} γ δ g (Sum.getRight.{u1, u3} α γ x))
but is expected to have type
forall {α : Type.{u3}} {β : Type.{u4}} {γ : Type.{u2}} {δ : Type.{u1}} (f : α -> β) (g : γ -> δ) (x : Sum.{u3, u2} α γ), Eq.{succ u1} (Option.{u1} δ) (Sum.getRight.{u4, u1} β δ (Sum.map.{u3, u2, u4, u1} α β γ δ f g x)) (Option.map.{u2, u1} γ δ g (Sum.getRight.{u3, u2} α γ x))
Case conversion may be inaccurate. Consider using '#align sum.get_right_map Sum.getRight_mapₓ'. -/
@[simp]
theorem getRight_map (f : α → β) (g : γ → δ) (x : Sum α γ) :
(x.map f g).getRight = x.getRight.map g := by cases x <;> rfl
#align sum.get_right_map Sum.getRight_map
open Function (update update_eq_iff update_comp_eq_of_injective update_comp_eq_of_forall_ne)
/- warning: sum.update_elim_inl -> Sum.update_elim_inl is a dubious translation:
lean 3 declaration is
forall {α : Type.{u1}} {β : Type.{u2}} {γ : Type.{u3}} [_inst_1 : DecidableEq.{succ u1} α] [_inst_2 : DecidableEq.{max (succ u1) (succ u2)} (Sum.{u1, u2} α β)] {f : α -> γ} {g : β -> γ} {i : α} {x : γ}, Eq.{max (max (succ u1) (succ u2)) (succ u3)} ((Sum.{u1, u2} α β) -> γ) (Function.update.{max (succ u1) (succ u2), succ u3} (Sum.{u1, u2} α β) (fun (ᾰ : Sum.{u1, u2} α β) => γ) (fun (a : Sum.{u1, u2} α β) (b : Sum.{u1, u2} α β) => _inst_2 a b) (Sum.elim.{u1, u2, succ u3} α β γ f g) (Sum.inl.{u1, u2} α β i) x) (Sum.elim.{u1, u2, succ u3} α β γ (Function.update.{succ u1, succ u3} α (fun (ᾰ : α) => γ) (fun (a : α) (b : α) => _inst_1 a b) f i x) g)
but is expected to have type
forall {α : Type.{u2}} {β : Type.{u3}} {γ : Type.{u1}} [_inst_1 : DecidableEq.{succ u2} α] [_inst_2 : DecidableEq.{max (succ u3) (succ u2)} (Sum.{u2, u3} α β)] {f : α -> γ} {g : β -> γ} {i : α} {x : γ}, Eq.{max (max (succ u2) (succ u3)) (succ u1)} ((Sum.{u2, u3} α β) -> γ) (Function.update.{max (succ u3) (succ u2), succ u1} (Sum.{u2, u3} α β) (fun (ᾰ : Sum.{u2, u3} α β) => γ) (fun (a : Sum.{u2, u3} α β) (b : Sum.{u2, u3} α β) => _inst_2 a b) (Sum.elim.{u2, u3, succ u1} α β γ f g) (Sum.inl.{u2, u3} α β i) x) (Sum.elim.{u2, u3, succ u1} α β γ (Function.update.{succ u2, succ u1} α (fun (ᾰ : α) => γ) (fun (a : α) (b : α) => _inst_1 a b) f i x) g)
Case conversion may be inaccurate. Consider using '#align sum.update_elim_inl Sum.update_elim_inlₓ'. -/
@[simp]
theorem update_elim_inl [DecidableEq α] [DecidableEq (Sum α β)] {f : α → γ} {g : β → γ} {i : α}
{x : γ} : update (Sum.elim f g) (inl i) x = Sum.elim (update f i x) g :=
update_eq_iff.2 ⟨by simp, by simp (config := { contextual := true })⟩
#align sum.update_elim_inl Sum.update_elim_inl
/- warning: sum.update_elim_inr -> Sum.update_elim_inr is a dubious translation:
lean 3 declaration is
forall {α : Type.{u1}} {β : Type.{u2}} {γ : Type.{u3}} [_inst_1 : DecidableEq.{succ u2} β] [_inst_2 : DecidableEq.{max (succ u1) (succ u2)} (Sum.{u1, u2} α β)] {f : α -> γ} {g : β -> γ} {i : β} {x : γ}, Eq.{max (max (succ u1) (succ u2)) (succ u3)} ((Sum.{u1, u2} α β) -> γ) (Function.update.{max (succ u1) (succ u2), succ u3} (Sum.{u1, u2} α β) (fun (ᾰ : Sum.{u1, u2} α β) => γ) (fun (a : Sum.{u1, u2} α β) (b : Sum.{u1, u2} α β) => _inst_2 a b) (Sum.elim.{u1, u2, succ u3} α β γ f g) (Sum.inr.{u1, u2} α β i) x) (Sum.elim.{u1, u2, succ u3} α β γ f (Function.update.{succ u2, succ u3} β (fun (ᾰ : β) => γ) (fun (a : β) (b : β) => _inst_1 a b) g i x))
but is expected to have type
forall {α : Type.{u2}} {β : Type.{u3}} {γ : Type.{u1}} [_inst_1 : DecidableEq.{succ u3} β] [_inst_2 : DecidableEq.{max (succ u3) (succ u2)} (Sum.{u2, u3} α β)] {f : α -> γ} {g : β -> γ} {i : β} {x : γ}, Eq.{max (max (succ u2) (succ u3)) (succ u1)} ((Sum.{u2, u3} α β) -> γ) (Function.update.{max (succ u3) (succ u2), succ u1} (Sum.{u2, u3} α β) (fun (ᾰ : Sum.{u2, u3} α β) => γ) (fun (a : Sum.{u2, u3} α β) (b : Sum.{u2, u3} α β) => _inst_2 a b) (Sum.elim.{u2, u3, succ u1} α β γ f g) (Sum.inr.{u2, u3} α β i) x) (Sum.elim.{u2, u3, succ u1} α β γ f (Function.update.{succ u3, succ u1} β (fun (ᾰ : β) => γ) (fun (a : β) (b : β) => _inst_1 a b) g i x))
Case conversion may be inaccurate. Consider using '#align sum.update_elim_inr Sum.update_elim_inrₓ'. -/
@[simp]
theorem update_elim_inr [DecidableEq β] [DecidableEq (Sum α β)] {f : α → γ} {g : β → γ} {i : β}
{x : γ} : update (Sum.elim f g) (inr i) x = Sum.elim f (update g i x) :=
update_eq_iff.2 ⟨by simp, by simp (config := { contextual := true })⟩
#align sum.update_elim_inr Sum.update_elim_inr
/- warning: sum.update_inl_comp_inl -> Sum.update_inl_comp_inl is a dubious translation:
lean 3 declaration is
forall {α : Type.{u1}} {β : Type.{u2}} {γ : Type.{u3}} [_inst_1 : DecidableEq.{succ u1} α] [_inst_2 : DecidableEq.{max (succ u1) (succ u2)} (Sum.{u1, u2} α β)] {f : (Sum.{u1, u2} α β) -> γ} {i : α} {x : γ}, Eq.{max (succ u1) (succ u3)} (α -> γ) (Function.comp.{succ u1, max (succ u1) (succ u2), succ u3} α (Sum.{u1, u2} α β) γ (Function.update.{max (succ u1) (succ u2), succ u3} (Sum.{u1, u2} α β) (fun (ᾰ : Sum.{u1, u2} α β) => γ) (fun (a : Sum.{u1, u2} α β) (b : Sum.{u1, u2} α β) => _inst_2 a b) f (Sum.inl.{u1, u2} α β i) x) (Sum.inl.{u1, u2} α β)) (Function.update.{succ u1, succ u3} α (fun (ᾰ : α) => γ) (fun (a : α) (b : α) => _inst_1 a b) (Function.comp.{succ u1, max (succ u1) (succ u2), succ u3} α (Sum.{u1, u2} α β) γ f (Sum.inl.{u1, u2} α β)) i x)
but is expected to have type
forall {α : Type.{u2}} {β : Type.{u3}} {γ : Type.{u1}} [_inst_1 : DecidableEq.{succ u2} α] [_inst_2 : DecidableEq.{max (succ u3) (succ u2)} (Sum.{u2, u3} α β)] {f : (Sum.{u2, u3} α β) -> γ} {i : α} {x : γ}, Eq.{max (succ u2) (succ u1)} (α -> γ) (Function.comp.{succ u2, max (succ u2) (succ u3), succ u1} α (Sum.{u2, u3} α β) γ (Function.update.{max (succ u2) (succ u3), succ u1} (Sum.{u2, u3} α β) (fun (ᾰ : Sum.{u2, u3} α β) => γ) (fun (a : Sum.{u2, u3} α β) (b : Sum.{u2, u3} α β) => _inst_2 a b) f (Sum.inl.{u2, u3} α β i) x) (Sum.inl.{u2, u3} α β)) (Function.update.{succ u2, succ u1} α (fun (ᾰ : α) => γ) (fun (a : α) (b : α) => _inst_1 a b) (Function.comp.{succ u2, max (succ u2) (succ u3), succ u1} α (Sum.{u2, u3} α β) γ f (Sum.inl.{u2, u3} α β)) i x)
Case conversion may be inaccurate. Consider using '#align sum.update_inl_comp_inl Sum.update_inl_comp_inlₓ'. -/
@[simp]
theorem update_inl_comp_inl [DecidableEq α] [DecidableEq (Sum α β)] {f : Sum α β → γ} {i : α}
{x : γ} : update f (inl i) x ∘ inl = update (f ∘ inl) i x :=
update_comp_eq_of_injective _ inl_injective _ _
#align sum.update_inl_comp_inl Sum.update_inl_comp_inl
/- warning: sum.update_inl_apply_inl -> Sum.update_inl_apply_inl is a dubious translation:
lean 3 declaration is
forall {α : Type.{u1}} {β : Type.{u2}} {γ : Type.{u3}} [_inst_1 : DecidableEq.{succ u1} α] [_inst_2 : DecidableEq.{max (succ u1) (succ u2)} (Sum.{u1, u2} α β)] {f : (Sum.{u1, u2} α β) -> γ} {i : α} {j : α} {x : γ}, Eq.{succ u3} γ (Function.update.{max (succ u1) (succ u2), succ u3} (Sum.{u1, u2} α β) (fun (ᾰ : Sum.{u1, u2} α β) => γ) (fun (a : Sum.{u1, u2} α β) (b : Sum.{u1, u2} α β) => _inst_2 a b) f (Sum.inl.{u1, u2} α β i) x (Sum.inl.{u1, u2} α β j)) (Function.update.{succ u1, succ u3} α (fun (ᾰ : α) => γ) (fun (a : α) (b : α) => _inst_1 a b) (Function.comp.{succ u1, max (succ u1) (succ u2), succ u3} α (Sum.{u1, u2} α β) γ f (Sum.inl.{u1, u2} α β)) i x j)
but is expected to have type
forall {α : Type.{u2}} {β : Type.{u3}} {γ : Type.{u1}} [_inst_1 : DecidableEq.{succ u2} α] [_inst_2 : DecidableEq.{max (succ u3) (succ u2)} (Sum.{u2, u3} α β)] {f : (Sum.{u2, u3} α β) -> γ} {i : α} {j : α} {x : γ}, Eq.{succ u1} γ (Function.update.{max (succ u2) (succ u3), succ u1} (Sum.{u2, u3} α β) (fun (ᾰ : Sum.{u2, u3} α β) => γ) (fun (a : Sum.{u2, u3} α β) (b : Sum.{u2, u3} α β) => _inst_2 a b) f (Sum.inl.{u2, u3} α β i) x (Sum.inl.{u2, u3} α β j)) (Function.update.{succ u2, succ u1} α (fun (ᾰ : α) => γ) (fun (a : α) (b : α) => _inst_1 a b) (Function.comp.{succ u2, max (succ u2) (succ u3), succ u1} α (Sum.{u2, u3} α β) γ f (Sum.inl.{u2, u3} α β)) i x j)
Case conversion may be inaccurate. Consider using '#align sum.update_inl_apply_inl Sum.update_inl_apply_inlₓ'. -/
@[simp]
theorem update_inl_apply_inl [DecidableEq α] [DecidableEq (Sum α β)] {f : Sum α β → γ} {i j : α}
{x : γ} : update f (inl i) x (inl j) = update (f ∘ inl) i x j := by rw [← update_inl_comp_inl]
#align sum.update_inl_apply_inl Sum.update_inl_apply_inl
/- warning: sum.update_inl_comp_inr -> Sum.update_inl_comp_inr is a dubious translation:
lean 3 declaration is
forall {α : Type.{u1}} {β : Type.{u2}} {γ : Type.{u3}} [_inst_1 : DecidableEq.{max (succ u1) (succ u2)} (Sum.{u1, u2} α β)] {f : (Sum.{u1, u2} α β) -> γ} {i : α} {x : γ}, Eq.{max (succ u2) (succ u3)} (β -> γ) (Function.comp.{succ u2, max (succ u1) (succ u2), succ u3} β (Sum.{u1, u2} α β) γ (Function.update.{max (succ u1) (succ u2), succ u3} (Sum.{u1, u2} α β) (fun (ᾰ : Sum.{u1, u2} α β) => γ) (fun (a : Sum.{u1, u2} α β) (b : Sum.{u1, u2} α β) => _inst_1 a b) f (Sum.inl.{u1, u2} α β i) x) (Sum.inr.{u1, u2} α β)) (Function.comp.{succ u2, max (succ u1) (succ u2), succ u3} β (Sum.{u1, u2} α β) γ f (Sum.inr.{u1, u2} α β))
but is expected to have type
forall {α : Type.{u2}} {β : Type.{u3}} {γ : Type.{u1}} [_inst_1 : DecidableEq.{max (succ u3) (succ u2)} (Sum.{u2, u3} α β)] {f : (Sum.{u2, u3} α β) -> γ} {i : α} {x : γ}, Eq.{max (succ u3) (succ u1)} (β -> γ) (Function.comp.{succ u3, max (succ u2) (succ u3), succ u1} β (Sum.{u2, u3} α β) γ (Function.update.{max (succ u2) (succ u3), succ u1} (Sum.{u2, u3} α β) (fun (ᾰ : Sum.{u2, u3} α β) => γ) (fun (a : Sum.{u2, u3} α β) (b : Sum.{u2, u3} α β) => _inst_1 a b) f (Sum.inl.{u2, u3} α β i) x) (Sum.inr.{u2, u3} α β)) (Function.comp.{succ u3, max (succ u2) (succ u3), succ u1} β (Sum.{u2, u3} α β) γ f (Sum.inr.{u2, u3} α β))
Case conversion may be inaccurate. Consider using '#align sum.update_inl_comp_inr Sum.update_inl_comp_inrₓ'. -/
@[simp]
theorem update_inl_comp_inr [DecidableEq (Sum α β)] {f : Sum α β → γ} {i : α} {x : γ} :
update f (inl i) x ∘ inr = f ∘ inr :=
update_comp_eq_of_forall_ne _ _ fun _ => inr_ne_inl
#align sum.update_inl_comp_inr Sum.update_inl_comp_inr
/- warning: sum.update_inl_apply_inr -> Sum.update_inl_apply_inr is a dubious translation:
lean 3 declaration is
forall {α : Type.{u1}} {β : Type.{u2}} {γ : Type.{u3}} [_inst_1 : DecidableEq.{max (succ u1) (succ u2)} (Sum.{u1, u2} α β)] {f : (Sum.{u1, u2} α β) -> γ} {i : α} {j : β} {x : γ}, Eq.{succ u3} γ (Function.update.{max (succ u1) (succ u2), succ u3} (Sum.{u1, u2} α β) (fun (ᾰ : Sum.{u1, u2} α β) => γ) (fun (a : Sum.{u1, u2} α β) (b : Sum.{u1, u2} α β) => _inst_1 a b) f (Sum.inl.{u1, u2} α β i) x (Sum.inr.{u1, u2} α β j)) (f (Sum.inr.{u1, u2} α β j))
but is expected to have type
forall {α : Type.{u2}} {β : Type.{u3}} {γ : Type.{u1}} [_inst_1 : DecidableEq.{max (succ u3) (succ u2)} (Sum.{u2, u3} α β)] {f : (Sum.{u2, u3} α β) -> γ} {i : α} {j : β} {x : γ}, Eq.{succ u1} γ (Function.update.{max (succ u2) (succ u3), succ u1} (Sum.{u2, u3} α β) (fun (ᾰ : Sum.{u2, u3} α β) => γ) (fun (a : Sum.{u2, u3} α β) (b : Sum.{u2, u3} α β) => _inst_1 a b) f (Sum.inl.{u2, u3} α β i) x (Sum.inr.{u2, u3} α β j)) (f (Sum.inr.{u2, u3} α β j))
Case conversion may be inaccurate. Consider using '#align sum.update_inl_apply_inr Sum.update_inl_apply_inrₓ'. -/
@[simp]
theorem update_inl_apply_inr [DecidableEq (Sum α β)] {f : Sum α β → γ} {i : α} {j : β} {x : γ} :
update f (inl i) x (inr j) = f (inr j) :=
Function.update_noteq inr_ne_inl _ _
#align sum.update_inl_apply_inr Sum.update_inl_apply_inr
/- warning: sum.update_inr_comp_inl -> Sum.update_inr_comp_inl is a dubious translation:
lean 3 declaration is
forall {α : Type.{u1}} {β : Type.{u2}} {γ : Type.{u3}} [_inst_1 : DecidableEq.{max (succ u1) (succ u2)} (Sum.{u1, u2} α β)] {f : (Sum.{u1, u2} α β) -> γ} {i : β} {x : γ}, Eq.{max (succ u1) (succ u3)} (α -> γ) (Function.comp.{succ u1, max (succ u1) (succ u2), succ u3} α (Sum.{u1, u2} α β) γ (Function.update.{max (succ u1) (succ u2), succ u3} (Sum.{u1, u2} α β) (fun (ᾰ : Sum.{u1, u2} α β) => γ) (fun (a : Sum.{u1, u2} α β) (b : Sum.{u1, u2} α β) => _inst_1 a b) f (Sum.inr.{u1, u2} α β i) x) (Sum.inl.{u1, u2} α β)) (Function.comp.{succ u1, max (succ u1) (succ u2), succ u3} α (Sum.{u1, u2} α β) γ f (Sum.inl.{u1, u2} α β))
but is expected to have type
forall {α : Type.{u2}} {β : Type.{u3}} {γ : Type.{u1}} [_inst_1 : DecidableEq.{max (succ u3) (succ u2)} (Sum.{u2, u3} α β)] {f : (Sum.{u2, u3} α β) -> γ} {i : β} {x : γ}, Eq.{max (succ u2) (succ u1)} (α -> γ) (Function.comp.{succ u2, max (succ u2) (succ u3), succ u1} α (Sum.{u2, u3} α β) γ (Function.update.{max (succ u2) (succ u3), succ u1} (Sum.{u2, u3} α β) (fun (ᾰ : Sum.{u2, u3} α β) => γ) (fun (a : Sum.{u2, u3} α β) (b : Sum.{u2, u3} α β) => _inst_1 a b) f (Sum.inr.{u2, u3} α β i) x) (Sum.inl.{u2, u3} α β)) (Function.comp.{succ u2, max (succ u2) (succ u3), succ u1} α (Sum.{u2, u3} α β) γ f (Sum.inl.{u2, u3} α β))
Case conversion may be inaccurate. Consider using '#align sum.update_inr_comp_inl Sum.update_inr_comp_inlₓ'. -/
@[simp]
theorem update_inr_comp_inl [DecidableEq (Sum α β)] {f : Sum α β → γ} {i : β} {x : γ} :
update f (inr i) x ∘ inl = f ∘ inl :=
update_comp_eq_of_forall_ne _ _ fun _ => inl_ne_inr
#align sum.update_inr_comp_inl Sum.update_inr_comp_inl
/- warning: sum.update_inr_apply_inl -> Sum.update_inr_apply_inl is a dubious translation:
lean 3 declaration is
forall {α : Type.{u1}} {β : Type.{u2}} {γ : Type.{u3}} [_inst_1 : DecidableEq.{max (succ u1) (succ u2)} (Sum.{u1, u2} α β)] {f : (Sum.{u1, u2} α β) -> γ} {i : α} {j : β} {x : γ}, Eq.{succ u3} γ (Function.update.{max (succ u1) (succ u2), succ u3} (Sum.{u1, u2} α β) (fun (ᾰ : Sum.{u1, u2} α β) => γ) (fun (a : Sum.{u1, u2} α β) (b : Sum.{u1, u2} α β) => _inst_1 a b) f (Sum.inr.{u1, u2} α β j) x (Sum.inl.{u1, u2} α β i)) (f (Sum.inl.{u1, u2} α β i))
but is expected to have type
forall {α : Type.{u2}} {β : Type.{u3}} {γ : Type.{u1}} [_inst_1 : DecidableEq.{max (succ u3) (succ u2)} (Sum.{u2, u3} α β)] {f : (Sum.{u2, u3} α β) -> γ} {i : α} {j : β} {x : γ}, Eq.{succ u1} γ (Function.update.{max (succ u2) (succ u3), succ u1} (Sum.{u2, u3} α β) (fun (ᾰ : Sum.{u2, u3} α β) => γ) (fun (a : Sum.{u2, u3} α β) (b : Sum.{u2, u3} α β) => _inst_1 a b) f (Sum.inr.{u2, u3} α β j) x (Sum.inl.{u2, u3} α β i)) (f (Sum.inl.{u2, u3} α β i))
Case conversion may be inaccurate. Consider using '#align sum.update_inr_apply_inl Sum.update_inr_apply_inlₓ'. -/
@[simp]
theorem update_inr_apply_inl [DecidableEq (Sum α β)] {f : Sum α β → γ} {i : α} {j : β} {x : γ} :
update f (inr j) x (inl i) = f (inl i) :=
Function.update_noteq inl_ne_inr _ _
#align sum.update_inr_apply_inl Sum.update_inr_apply_inl
/- warning: sum.update_inr_comp_inr -> Sum.update_inr_comp_inr is a dubious translation:
lean 3 declaration is
forall {α : Type.{u1}} {β : Type.{u2}} {γ : Type.{u3}} [_inst_1 : DecidableEq.{succ u2} β] [_inst_2 : DecidableEq.{max (succ u1) (succ u2)} (Sum.{u1, u2} α β)] {f : (Sum.{u1, u2} α β) -> γ} {i : β} {x : γ}, Eq.{max (succ u2) (succ u3)} (β -> γ) (Function.comp.{succ u2, max (succ u1) (succ u2), succ u3} β (Sum.{u1, u2} α β) γ (Function.update.{max (succ u1) (succ u2), succ u3} (Sum.{u1, u2} α β) (fun (ᾰ : Sum.{u1, u2} α β) => γ) (fun (a : Sum.{u1, u2} α β) (b : Sum.{u1, u2} α β) => _inst_2 a b) f (Sum.inr.{u1, u2} α β i) x) (Sum.inr.{u1, u2} α β)) (Function.update.{succ u2, succ u3} β (fun (ᾰ : β) => γ) (fun (a : β) (b : β) => _inst_1 a b) (Function.comp.{succ u2, max (succ u1) (succ u2), succ u3} β (Sum.{u1, u2} α β) γ f (Sum.inr.{u1, u2} α β)) i x)
but is expected to have type
forall {α : Type.{u2}} {β : Type.{u3}} {γ : Type.{u1}} [_inst_1 : DecidableEq.{succ u3} β] [_inst_2 : DecidableEq.{max (succ u3) (succ u2)} (Sum.{u2, u3} α β)] {f : (Sum.{u2, u3} α β) -> γ} {i : β} {x : γ}, Eq.{max (succ u3) (succ u1)} (β -> γ) (Function.comp.{succ u3, max (succ u2) (succ u3), succ u1} β (Sum.{u2, u3} α β) γ (Function.update.{max (succ u2) (succ u3), succ u1} (Sum.{u2, u3} α β) (fun (ᾰ : Sum.{u2, u3} α β) => γ) (fun (a : Sum.{u2, u3} α β) (b : Sum.{u2, u3} α β) => _inst_2 a b) f (Sum.inr.{u2, u3} α β i) x) (Sum.inr.{u2, u3} α β)) (Function.update.{succ u3, succ u1} β (fun (ᾰ : β) => γ) (fun (a : β) (b : β) => _inst_1 a b) (Function.comp.{succ u3, max (succ u2) (succ u3), succ u1} β (Sum.{u2, u3} α β) γ f (Sum.inr.{u2, u3} α β)) i x)
Case conversion may be inaccurate. Consider using '#align sum.update_inr_comp_inr Sum.update_inr_comp_inrₓ'. -/
@[simp]
theorem update_inr_comp_inr [DecidableEq β] [DecidableEq (Sum α β)] {f : Sum α β → γ} {i : β}
{x : γ} : update f (inr i) x ∘ inr = update (f ∘ inr) i x :=
update_comp_eq_of_injective _ inr_injective _ _
#align sum.update_inr_comp_inr Sum.update_inr_comp_inr
/- warning: sum.update_inr_apply_inr -> Sum.update_inr_apply_inr is a dubious translation:
lean 3 declaration is
forall {α : Type.{u1}} {β : Type.{u2}} {γ : Type.{u3}} [_inst_1 : DecidableEq.{succ u2} β] [_inst_2 : DecidableEq.{max (succ u1) (succ u2)} (Sum.{u1, u2} α β)] {f : (Sum.{u1, u2} α β) -> γ} {i : β} {j : β} {x : γ}, Eq.{succ u3} γ (Function.update.{max (succ u1) (succ u2), succ u3} (Sum.{u1, u2} α β) (fun (ᾰ : Sum.{u1, u2} α β) => γ) (fun (a : Sum.{u1, u2} α β) (b : Sum.{u1, u2} α β) => _inst_2 a b) f (Sum.inr.{u1, u2} α β i) x (Sum.inr.{u1, u2} α β j)) (Function.update.{succ u2, succ u3} β (fun (ᾰ : β) => γ) (fun (a : β) (b : β) => _inst_1 a b) (Function.comp.{succ u2, max (succ u1) (succ u2), succ u3} β (Sum.{u1, u2} α β) γ f (Sum.inr.{u1, u2} α β)) i x j)
but is expected to have type
forall {α : Type.{u2}} {β : Type.{u3}} {γ : Type.{u1}} [_inst_1 : DecidableEq.{succ u3} β] [_inst_2 : DecidableEq.{max (succ u3) (succ u2)} (Sum.{u2, u3} α β)] {f : (Sum.{u2, u3} α β) -> γ} {i : β} {j : β} {x : γ}, Eq.{succ u1} γ (Function.update.{max (succ u2) (succ u3), succ u1} (Sum.{u2, u3} α β) (fun (ᾰ : Sum.{u2, u3} α β) => γ) (fun (a : Sum.{u2, u3} α β) (b : Sum.{u2, u3} α β) => _inst_2 a b) f (Sum.inr.{u2, u3} α β i) x (Sum.inr.{u2, u3} α β j)) (Function.update.{succ u3, succ u1} β (fun (ᾰ : β) => γ) (fun (a : β) (b : β) => _inst_1 a b) (Function.comp.{succ u3, max (succ u2) (succ u3), succ u1} β (Sum.{u2, u3} α β) γ f (Sum.inr.{u2, u3} α β)) i x j)
Case conversion may be inaccurate. Consider using '#align sum.update_inr_apply_inr Sum.update_inr_apply_inrₓ'. -/
@[simp]
theorem update_inr_apply_inr [DecidableEq β] [DecidableEq (Sum α β)] {f : Sum α β → γ} {i j : β}
{x : γ} : update f (inr i) x (inr j) = update (f ∘ inr) i x j := by rw [← update_inr_comp_inr]
#align sum.update_inr_apply_inr Sum.update_inr_apply_inr
#print Sum.swap /-
/-- Swap the factors of a sum type -/
def swap : Sum α β → Sum β α :=
Sum.elim inr inl
#align sum.swap Sum.swap
-/
#print Sum.swap_inl /-
@[simp]
theorem swap_inl (x : α) : swap (inl x : Sum α β) = inr x :=
rfl
#align sum.swap_inl Sum.swap_inl
-/
#print Sum.swap_inr /-
@[simp]
theorem swap_inr (x : β) : swap (inr x : Sum α β) = inl x :=
rfl
#align sum.swap_inr Sum.swap_inr
-/
#print Sum.swap_swap /-
@[simp]
theorem swap_swap (x : Sum α β) : swap (swap x) = x := by cases x <;> rfl
#align sum.swap_swap Sum.swap_swap
-/
#print Sum.swap_swap_eq /-
@[simp]
theorem swap_swap_eq : swap ∘ swap = @id (Sum α β) :=
funext <| swap_swap
#align sum.swap_swap_eq Sum.swap_swap_eq
-/
#print Sum.swap_leftInverse /-
@[simp]
theorem swap_leftInverse : Function.LeftInverse (@swap α β) swap :=
swap_swap
#align sum.swap_left_inverse Sum.swap_leftInverse
-/
#print Sum.swap_rightInverse /-
@[simp]
theorem swap_rightInverse : Function.RightInverse (@swap α β) swap :=
swap_swap
#align sum.swap_right_inverse Sum.swap_rightInverse
-/
#print Sum.isLeft_swap /-
@[simp]
theorem isLeft_swap (x : Sum α β) : x.symm.isLeft = x.isRight := by cases x <;> rfl
#align sum.is_left_swap Sum.isLeft_swap
-/
#print Sum.isRight_swap /-
@[simp]
theorem isRight_swap (x : Sum α β) : x.symm.isRight = x.isLeft := by cases x <;> rfl
#align sum.is_right_swap Sum.isRight_swap
-/
#print Sum.getLeft_swap /-
@[simp]
theorem getLeft_swap (x : Sum α β) : x.symm.getLeft = x.getRight := by cases x <;> rfl
#align sum.get_left_swap Sum.getLeft_swap
-/
#print Sum.getRight_swap /-
@[simp]
theorem getRight_swap (x : Sum α β) : x.symm.getRight = x.getLeft := by cases x <;> rfl
#align sum.get_right_swap Sum.getRight_swap
-/
section LiftRel
#print Sum.LiftRel /-
/-- Lifts pointwise two relations between `α` and `γ` and between `β` and `δ` to a relation between
`α ⊕ β` and `γ ⊕ δ`. -/
inductive LiftRel (r : α → γ → Prop) (s : β → δ → Prop) : Sum α β → Sum γ δ → Prop
| inl {a c} : r a c → lift_rel (inl a) (inl c)
| inr {b d} : s b d → lift_rel (inr b) (inr d)
#align sum.lift_rel Sum.LiftRel
-/
attribute [protected] lift_rel.inl lift_rel.inr
variable {r r₁ r₂ : α → γ → Prop} {s s₁ s₂ : β → δ → Prop} {a : α} {b : β} {c : γ} {d : δ}
{x : Sum α β} {y : Sum γ δ}
/- warning: sum.lift_rel_inl_inl -> Sum.liftRel_inl_inl is a dubious translation:
lean 3 declaration is
forall {α : Type.{u1}} {β : Type.{u2}} {γ : Type.{u3}} {δ : Type.{u4}} {r : α -> γ -> Prop} {s : β -> δ -> Prop} {a : α} {c : γ}, Iff (Sum.LiftRel.{u1, u2, u3, u4} α β γ δ r s (Sum.inl.{u1, u2} α β a) (Sum.inl.{u3, u4} γ δ c)) (r a c)
but is expected to have type
forall {α : Type.{u3}} {β : Type.{u4}} {γ : Type.{u2}} {δ : Type.{u1}} {r : α -> γ -> Prop} {s : β -> δ -> Prop} {a : α} {c : γ}, Iff (Sum.LiftRel.{u3, u4, u2, u1} α β γ δ r s (Sum.inl.{u3, u4} α β a) (Sum.inl.{u2, u1} γ δ c)) (r a c)
Case conversion may be inaccurate. Consider using '#align sum.lift_rel_inl_inl Sum.liftRel_inl_inlₓ'. -/
@[simp]
theorem liftRel_inl_inl : LiftRel r s (inl a) (inl c) ↔ r a c :=
⟨fun h => by
cases h
assumption, LiftRel.inl⟩
#align sum.lift_rel_inl_inl Sum.liftRel_inl_inl
/- warning: sum.not_lift_rel_inl_inr -> Sum.not_liftRel_inl_inr is a dubious translation:
lean 3 declaration is
forall {α : Type.{u1}} {β : Type.{u2}} {γ : Type.{u3}} {δ : Type.{u4}} {r : α -> γ -> Prop} {s : β -> δ -> Prop} {a : α} {d : δ}, Not (Sum.LiftRel.{u1, u2, u3, u4} α β γ δ r s (Sum.inl.{u1, u2} α β a) (Sum.inr.{u3, u4} γ δ d))
but is expected to have type
forall {α : Type.{u3}} {β : Type.{u4}} {γ : Type.{u2}} {δ : Type.{u1}} {r : α -> γ -> Prop} {s : β -> δ -> Prop} {a : α} {d : δ}, Not (Sum.LiftRel.{u3, u4, u2, u1} α β γ δ r s (Sum.inl.{u3, u4} α β a) (Sum.inr.{u2, u1} γ δ d))
Case conversion may be inaccurate. Consider using '#align sum.not_lift_rel_inl_inr Sum.not_liftRel_inl_inrₓ'. -/
@[simp]
theorem not_liftRel_inl_inr : ¬LiftRel r s (inl a) (inr d) :=
fun.
#align sum.not_lift_rel_inl_inr Sum.not_liftRel_inl_inr
/- warning: sum.not_lift_rel_inr_inl -> Sum.not_liftRel_inr_inl is a dubious translation:
lean 3 declaration is
forall {α : Type.{u1}} {β : Type.{u2}} {γ : Type.{u3}} {δ : Type.{u4}} {r : α -> γ -> Prop} {s : β -> δ -> Prop} {b : β} {c : γ}, Not (Sum.LiftRel.{u1, u2, u3, u4} α β γ δ r s (Sum.inr.{u1, u2} α β b) (Sum.inl.{u3, u4} γ δ c))
but is expected to have type
forall {α : Type.{u3}} {β : Type.{u4}} {γ : Type.{u2}} {δ : Type.{u1}} {r : α -> γ -> Prop} {s : β -> δ -> Prop} {b : β} {c : γ}, Not (Sum.LiftRel.{u3, u4, u2, u1} α β γ δ r s (Sum.inr.{u3, u4} α β b) (Sum.inl.{u2, u1} γ δ c))
Case conversion may be inaccurate. Consider using '#align sum.not_lift_rel_inr_inl Sum.not_liftRel_inr_inlₓ'. -/
@[simp]
theorem not_liftRel_inr_inl : ¬LiftRel r s (inr b) (inl c) :=
fun.
#align sum.not_lift_rel_inr_inl Sum.not_liftRel_inr_inl
/- warning: sum.lift_rel_inr_inr -> Sum.liftRel_inr_inr is a dubious translation:
lean 3 declaration is
forall {α : Type.{u1}} {β : Type.{u2}} {γ : Type.{u3}} {δ : Type.{u4}} {r : α -> γ -> Prop} {s : β -> δ -> Prop} {b : β} {d : δ}, Iff (Sum.LiftRel.{u1, u2, u3, u4} α β γ δ r s (Sum.inr.{u1, u2} α β b) (Sum.inr.{u3, u4} γ δ d)) (s b d)
but is expected to have type
forall {α : Type.{u3}} {β : Type.{u4}} {γ : Type.{u2}} {δ : Type.{u1}} {r : α -> γ -> Prop} {s : β -> δ -> Prop} {b : β} {d : δ}, Iff (Sum.LiftRel.{u3, u4, u2, u1} α β γ δ r s (Sum.inr.{u3, u4} α β b) (Sum.inr.{u2, u1} γ δ d)) (s b d)
Case conversion may be inaccurate. Consider using '#align sum.lift_rel_inr_inr Sum.liftRel_inr_inrₓ'. -/
@[simp]
theorem liftRel_inr_inr : LiftRel r s (inr b) (inr d) ↔ s b d :=
⟨fun h => by
cases h
assumption, LiftRel.inr⟩
#align sum.lift_rel_inr_inr Sum.liftRel_inr_inr
instance [∀ a c, Decidable (r a c)] [∀ b d, Decidable (s b d)] :
∀ (ab : Sum α β) (cd : Sum γ δ), Decidable (LiftRel r s ab cd)
| inl a, inl c => decidable_of_iff' _ liftRel_inl_inl
| inl a, inr d => Decidable.isFalse not_liftRel_inl_inr
| inr b, inl c => Decidable.isFalse not_liftRel_inr_inl
| inr b, inr d => decidable_of_iff' _ liftRel_inr_inr
/- warning: sum.lift_rel.mono -> Sum.LiftRel.mono is a dubious translation:
lean 3 declaration is
forall {α : Type.{u1}} {β : Type.{u2}} {γ : Type.{u3}} {δ : Type.{u4}} {r₁ : α -> γ -> Prop} {r₂ : α -> γ -> Prop} {s₁ : β -> δ -> Prop} {s₂ : β -> δ -> Prop} {x : Sum.{u1, u2} α β} {y : Sum.{u3, u4} γ δ}, (forall (a : α) (b : γ), (r₁ a b) -> (r₂ a b)) -> (forall (a : β) (b : δ), (s₁ a b) -> (s₂ a b)) -> (Sum.LiftRel.{u1, u2, u3, u4} α β γ δ r₁ s₁ x y) -> (Sum.LiftRel.{u1, u2, u3, u4} α β γ δ r₂ s₂ x y)
but is expected to have type
forall {α : Type.{u3}} {β : Type.{u4}} {γ : Type.{u2}} {δ : Type.{u1}} {r₁ : α -> γ -> Prop} {r₂ : α -> γ -> Prop} {s₁ : β -> δ -> Prop} {s₂ : β -> δ -> Prop} {x : Sum.{u3, u4} α β} {y : Sum.{u2, u1} γ δ}, (forall (a : α) (b : γ), (r₁ a b) -> (r₂ a b)) -> (forall (a : β) (b : δ), (s₁ a b) -> (s₂ a b)) -> (Sum.LiftRel.{u3, u4, u2, u1} α β γ δ r₁ s₁ x y) -> (Sum.LiftRel.{u3, u4, u2, u1} α β γ δ r₂ s₂ x y)
Case conversion may be inaccurate. Consider using '#align sum.lift_rel.mono Sum.LiftRel.monoₓ'. -/
theorem LiftRel.mono (hr : ∀ a b, r₁ a b → r₂ a b) (hs : ∀ a b, s₁ a b → s₂ a b)
(h : LiftRel r₁ s₁ x y) : LiftRel r₂ s₂ x y :=
by
cases h
exacts[lift_rel.inl (hr _ _ ‹_›), lift_rel.inr (hs _ _ ‹_›)]
#align sum.lift_rel.mono Sum.LiftRel.mono
/- warning: sum.lift_rel.mono_left -> Sum.LiftRel.mono_left is a dubious translation:
lean 3 declaration is
forall {α : Type.{u1}} {β : Type.{u2}} {γ : Type.{u3}} {δ : Type.{u4}} {r₁ : α -> γ -> Prop} {r₂ : α -> γ -> Prop} {s : β -> δ -> Prop} {x : Sum.{u1, u2} α β} {y : Sum.{u3, u4} γ δ}, (forall (a : α) (b : γ), (r₁ a b) -> (r₂ a b)) -> (Sum.LiftRel.{u1, u2, u3, u4} α β γ δ r₁ s x y) -> (Sum.LiftRel.{u1, u2, u3, u4} α β γ δ r₂ s x y)
but is expected to have type
forall {α : Type.{u3}} {β : Type.{u4}} {γ : Type.{u2}} {δ : Type.{u1}} {r₁ : α -> γ -> Prop} {r₂ : α -> γ -> Prop} {s : β -> δ -> Prop} {x : Sum.{u3, u4} α β} {y : Sum.{u2, u1} γ δ}, (forall (a : α) (b : γ), (r₁ a b) -> (r₂ a b)) -> (Sum.LiftRel.{u3, u4, u2, u1} α β γ δ r₁ s x y) -> (Sum.LiftRel.{u3, u4, u2, u1} α β γ δ r₂ s x y)
Case conversion may be inaccurate. Consider using '#align sum.lift_rel.mono_left Sum.LiftRel.mono_leftₓ'. -/
theorem LiftRel.mono_left (hr : ∀ a b, r₁ a b → r₂ a b) (h : LiftRel r₁ s x y) : LiftRel r₂ s x y :=
h.mono hr fun _ _ => id
#align sum.lift_rel.mono_left Sum.LiftRel.mono_left
/- warning: sum.lift_rel.mono_right -> Sum.LiftRel.mono_right is a dubious translation:
lean 3 declaration is
forall {α : Type.{u1}} {β : Type.{u2}} {γ : Type.{u3}} {δ : Type.{u4}} {r : α -> γ -> Prop} {s₁ : β -> δ -> Prop} {s₂ : β -> δ -> Prop} {x : Sum.{u1, u2} α β} {y : Sum.{u3, u4} γ δ}, (forall (a : β) (b : δ), (s₁ a b) -> (s₂ a b)) -> (Sum.LiftRel.{u1, u2, u3, u4} α β γ δ r s₁ x y) -> (Sum.LiftRel.{u1, u2, u3, u4} α β γ δ r s₂ x y)
but is expected to have type
forall {α : Type.{u3}} {β : Type.{u4}} {γ : Type.{u2}} {δ : Type.{u1}} {r : α -> γ -> Prop} {s₁ : β -> δ -> Prop} {s₂ : β -> δ -> Prop} {x : Sum.{u3, u4} α β} {y : Sum.{u2, u1} γ δ}, (forall (a : β) (b : δ), (s₁ a b) -> (s₂ a b)) -> (Sum.LiftRel.{u3, u4, u2, u1} α β γ δ r s₁ x y) -> (Sum.LiftRel.{u3, u4, u2, u1} α β γ δ r s₂ x y)
Case conversion may be inaccurate. Consider using '#align sum.lift_rel.mono_right Sum.LiftRel.mono_rightₓ'. -/
theorem LiftRel.mono_right (hs : ∀ a b, s₁ a b → s₂ a b) (h : LiftRel r s₁ x y) :
LiftRel r s₂ x y :=
h.mono (fun _ _ => id) hs
#align sum.lift_rel.mono_right Sum.LiftRel.mono_right
/- warning: sum.lift_rel.swap -> Sum.LiftRel.swap is a dubious translation:
lean 3 declaration is
forall {α : Type.{u1}} {β : Type.{u2}} {γ : Type.{u3}} {δ : Type.{u4}} {r : α -> γ -> Prop} {s : β -> δ -> Prop} {x : Sum.{u1, u2} α β} {y : Sum.{u3, u4} γ δ}, (Sum.LiftRel.{u1, u2, u3, u4} α β γ δ r s x y) -> (Sum.LiftRel.{u2, u1, u4, u3} β α δ γ s r (Sum.swap.{u1, u2} α β x) (Sum.swap.{u3, u4} γ δ y))
but is expected to have type
forall {α : Type.{u3}} {β : Type.{u4}} {γ : Type.{u2}} {δ : Type.{u1}} {r : α -> γ -> Prop} {s : β -> δ -> Prop} {x : Sum.{u3, u4} α β} {y : Sum.{u2, u1} γ δ}, (Sum.LiftRel.{u3, u4, u2, u1} α β γ δ r s x y) -> (Sum.LiftRel.{u4, u3, u1, u2} β α δ γ s r (Sum.swap.{u3, u4} α β x) (Sum.swap.{u2, u1} γ δ y))
Case conversion may be inaccurate. Consider using '#align sum.lift_rel.swap Sum.LiftRel.swapₓ'. -/
protected theorem LiftRel.swap (h : LiftRel r s x y) : LiftRel s r x.symm y.symm :=
by
cases h
exacts[lift_rel.inr ‹_›, lift_rel.inl ‹_›]
#align sum.lift_rel.swap Sum.LiftRel.swap
/- warning: sum.lift_rel_swap_iff -> Sum.liftRel_swap_iff is a dubious translation:
lean 3 declaration is
forall {α : Type.{u1}} {β : Type.{u2}} {γ : Type.{u3}} {δ : Type.{u4}} {r : α -> γ -> Prop} {s : β -> δ -> Prop} {x : Sum.{u1, u2} α β} {y : Sum.{u3, u4} γ δ}, Iff (Sum.LiftRel.{u2, u1, u4, u3} β α δ γ s r (Sum.swap.{u1, u2} α β x) (Sum.swap.{u3, u4} γ δ y)) (Sum.LiftRel.{u1, u2, u3, u4} α β γ δ r s x y)
but is expected to have type
forall {α : Type.{u3}} {β : Type.{u4}} {γ : Type.{u1}} {δ : Type.{u2}} {r : α -> γ -> Prop} {s : β -> δ -> Prop} {x : Sum.{u3, u4} α β} {y : Sum.{u1, u2} γ δ}, Iff (Sum.LiftRel.{u4, u3, u2, u1} β α δ γ s r (Sum.swap.{u3, u4} α β x) (Sum.swap.{u1, u2} γ δ y)) (Sum.LiftRel.{u3, u4, u1, u2} α β γ δ r s x y)
Case conversion may be inaccurate. Consider using '#align sum.lift_rel_swap_iff Sum.liftRel_swap_iffₓ'. -/
@[simp]
theorem liftRel_swap_iff : LiftRel s r x.symm y.symm ↔ LiftRel r s x y :=
⟨fun h => by
rw [← swap_swap x, ← swap_swap y]
exact h.swap, LiftRel.swap⟩
#align sum.lift_rel_swap_iff Sum.liftRel_swap_iff
end LiftRel
section Lex
#print Sum.Lex /-
/-- Lexicographic order for sum. Sort all the `inl a` before the `inr b`, otherwise use the
respective order on `α` or `β`. -/
inductive Lex (r : α → α → Prop) (s : β → β → Prop) : Sum α β → Sum α β → Prop
| inl {a₁ a₂} (h : r a₁ a₂) : Lex (inl a₁) (inl a₂)
| inr {b₁ b₂} (h : s b₁ b₂) : Lex (inr b₁) (inr b₂)
| sep (a b) : Lex (inl a) (inr b)
#align sum.lex Sum.Lex
-/
attribute [protected] Sum.Lex.inl Sum.Lex.inr
attribute [simp] lex.sep
variable {r r₁ r₂ : α → α → Prop} {s s₁ s₂ : β → β → Prop} {a a₁ a₂ : α} {b b₁ b₂ : β}
{x y : Sum α β}
#print Sum.lex_inl_inl /-
@[simp]
theorem lex_inl_inl : Lex r s (inl a₁) (inl a₂) ↔ r a₁ a₂ :=
⟨fun h => by
cases h
assumption, Lex.inl⟩
#align sum.lex_inl_inl Sum.lex_inl_inl
-/
#print Sum.lex_inr_inr /-
@[simp]
theorem lex_inr_inr : Lex r s (inr b₁) (inr b₂) ↔ s b₁ b₂ :=
⟨fun h => by
cases h
assumption, Lex.inr⟩
#align sum.lex_inr_inr Sum.lex_inr_inr
-/
#print Sum.lex_inr_inl /-
@[simp]
theorem lex_inr_inl : ¬Lex r s (inr b) (inl a) :=
fun.
#align sum.lex_inr_inl Sum.lex_inr_inl
-/
instance [DecidableRel r] [DecidableRel s] : DecidableRel (Lex r s)
| inl a, inl c => decidable_of_iff' _ lex_inl_inl
| inl a, inr d => Decidable.isTrue (Lex.sep _ _)
| inr b, inl c => Decidable.isFalse lex_inr_inl
| inr b, inr d => decidable_of_iff' _ lex_inr_inr
#print Sum.LiftRel.lex /-
protected theorem LiftRel.lex {a b : Sum α β} (h : LiftRel r s a b) : Lex r s a b :=
by
cases h
exacts[lex.inl ‹_›, lex.inr ‹_›]
#align sum.lift_rel.lex Sum.LiftRel.lex
-/
#print Sum.liftRel_subrelation_lex /-
theorem liftRel_subrelation_lex : Subrelation (LiftRel r s) (Lex r s) := fun a b => LiftRel.lex
#align sum.lift_rel_subrelation_lex Sum.liftRel_subrelation_lex
-/
#print Sum.Lex.mono /-
theorem Lex.mono (hr : ∀ a b, r₁ a b → r₂ a b) (hs : ∀ a b, s₁ a b → s₂ a b) (h : Lex r₁ s₁ x y) :
Lex r₂ s₂ x y := by
cases h
exacts[lex.inl (hr _ _ ‹_›), lex.inr (hs _ _ ‹_›), lex.sep _ _]
#align sum.lex.mono Sum.Lex.mono
-/
#print Sum.Lex.mono_left /-
theorem Lex.mono_left (hr : ∀ a b, r₁ a b → r₂ a b) (h : Lex r₁ s x y) : Lex r₂ s x y :=
h.mono hr fun _ _ => id
#align sum.lex.mono_left Sum.Lex.mono_left
-/
#print Sum.Lex.mono_right /-
theorem Lex.mono_right (hs : ∀ a b, s₁ a b → s₂ a b) (h : Lex r s₁ x y) : Lex r s₂ x y :=
h.mono (fun _ _ => id) hs
#align sum.lex.mono_right Sum.Lex.mono_right
-/
#print Sum.lex_acc_inl /-
theorem lex_acc_inl {a} (aca : Acc r a) : Acc (Lex r s) (inl a) :=
by
induction' aca with a H IH
constructor; intro y h
cases' h with a' _ h'
exact IH _ h'
#align sum.lex_acc_inl Sum.lex_acc_inl
-/
#print Sum.lex_acc_inr /-
theorem lex_acc_inr (aca : ∀ a, Acc (Lex r s) (inl a)) {b} (acb : Acc s b) :
Acc (Lex r s) (inr b) := by
induction' acb with b H IH
constructor; intro y h
cases' h with _ _ _ b' _ h' a
· exact IH _ h'
· exact aca _
#align sum.lex_acc_inr Sum.lex_acc_inr
-/
#print Sum.lex_wf /-
theorem lex_wf (ha : WellFounded r) (hb : WellFounded s) : WellFounded (Lex r s) :=
have aca : ∀ a, Acc (Lex r s) (inl a) := fun a => lex_acc_inl (ha.apply a)
⟨fun x => Sum.recOn x aca fun b => lex_acc_inr aca (hb.apply b)⟩
#align sum.lex_wf Sum.lex_wf
-/
end Lex
end Sum
open Sum
namespace Function
/- warning: function.injective.sum_elim -> Function.Injective.sum_elim is a dubious translation:
lean 3 declaration is
forall {α : Type.{u1}} {β : Type.{u2}} {γ : Type.{u3}} {f : α -> γ} {g : β -> γ}, (Function.Injective.{succ u1, succ u3} α γ f) -> (Function.Injective.{succ u2, succ u3} β γ g) -> (forall (a : α) (b : β), Ne.{succ u3} γ (f a) (g b)) -> (Function.Injective.{max (succ u1) (succ u2), succ u3} (Sum.{u1, u2} α β) γ (Sum.elim.{u1, u2, succ u3} α β γ f g))
but is expected to have type
forall {α : Type.{u2}} {β : Type.{u3}} {γ : Type.{u1}} {f : α -> γ} {g : β -> γ}, (Function.Injective.{succ u2, succ u1} α γ f) -> (Function.Injective.{succ u3, succ u1} β γ g) -> (forall (a : α) (b : β), Ne.{succ u1} γ (f a) (g b)) -> (Function.Injective.{max (succ u3) (succ u2), succ u1} (Sum.{u2, u3} α β) γ (Sum.elim.{u2, u3, succ u1} α β γ f g))
Case conversion may be inaccurate. Consider using '#align function.injective.sum_elim Function.Injective.sum_elimₓ'. -/
theorem Injective.sum_elim {f : α → γ} {g : β → γ} (hf : Injective f) (hg : Injective g)
(hfg : ∀ a b, f a ≠ g b) : Injective (Sum.elim f g)
| inl x, inl y, h => congr_arg inl <| hf h
| inl x, inr y, h => (hfg x y h).elim
| inr x, inl y, h => (hfg y x h.symm).elim
| inr x, inr y, h => congr_arg inr <| hg h
#align function.injective.sum_elim Function.Injective.sum_elim
#print Function.Injective.sum_map /-
theorem Injective.sum_map {f : α → β} {g : α' → β'} (hf : Injective f) (hg : Injective g) :
Injective (Sum.map f g)
| inl x, inl y, h => congr_arg inl <| hf <| inl.inj h
| inr x, inr y, h => congr_arg inr <| hg <| inr.inj h
#align function.injective.sum_map Function.Injective.sum_map
-/
#print Function.Surjective.sum_map /-
theorem Surjective.sum_map {f : α → β} {g : α' → β'} (hf : Surjective f) (hg : Surjective g) :
Surjective (Sum.map f g)
| inl y =>
let ⟨x, hx⟩ := hf y
⟨inl x, congr_arg inl hx⟩
| inr y =>
let ⟨x, hx⟩ := hg y
⟨inr x, congr_arg inr hx⟩
#align function.surjective.sum_map Function.Surjective.sum_map
-/
#print Function.Bijective.sum_map /-
theorem Bijective.sum_map {f : α → β} {g : α' → β'} (hf : Bijective f) (hg : Bijective g) :
Bijective (Sum.map f g) :=
⟨hf.Injective.sum_map hg.Injective, hf.Surjective.sum_map hg.Surjective⟩
#align function.bijective.sum_map Function.Bijective.sum_map
-/
end Function
namespace Sum
open Function
/- warning: sum.map_injective -> Sum.map_injective is a dubious translation:
lean 3 declaration is
forall {α : Type.{u1}} {β : Type.{u2}} {γ : Type.{u3}} {δ : Type.{u4}} {f : α -> γ} {g : β -> δ}, Iff (Function.Injective.{max (succ u1) (succ u2), max (succ u3) (succ u4)} (Sum.{u1, u2} α β) (Sum.{u3, u4} γ δ) (Sum.map.{u1, u2, u3, u4} α γ β δ f g)) (And (Function.Injective.{succ u1, succ u3} α γ f) (Function.Injective.{succ u2, succ u4} β δ g))
but is expected to have type
forall {α : Type.{u3}} {β : Type.{u4}} {γ : Type.{u1}} {δ : Type.{u2}} {f : α -> γ} {g : β -> δ}, Iff (Function.Injective.{max (succ u4) (succ u3), max (succ u2) (succ u1)} (Sum.{u3, u4} α β) (Sum.{u1, u2} γ δ) (Sum.map.{u3, u4, u1, u2} α γ β δ f g)) (And (Function.Injective.{succ u3, succ u1} α γ f) (Function.Injective.{succ u4, succ u2} β δ g))
Case conversion may be inaccurate. Consider using '#align sum.map_injective Sum.map_injectiveₓ'. -/
@[simp]
theorem map_injective {f : α → γ} {g : β → δ} :
Injective (Sum.map f g) ↔ Injective f ∧ Injective g :=
⟨fun h =>
⟨fun a₁ a₂ ha => inl_injective <| @h (inl a₁) (inl a₂) (congr_arg inl ha : _), fun b₁ b₂ hb =>
inr_injective <| @h (inr b₁) (inr b₂) (congr_arg inr hb : _)⟩,
fun h => h.1.sum_map h.2⟩
#align sum.map_injective Sum.map_injective
/- warning: sum.map_surjective -> Sum.map_surjective is a dubious translation:
lean 3 declaration is
forall {α : Type.{u1}} {β : Type.{u2}} {γ : Type.{u3}} {δ : Type.{u4}} {f : α -> γ} {g : β -> δ}, Iff (Function.Surjective.{max (succ u1) (succ u2), max (succ u3) (succ u4)} (Sum.{u1, u2} α β) (Sum.{u3, u4} γ δ) (Sum.map.{u1, u2, u3, u4} α γ β δ f g)) (And (Function.Surjective.{succ u1, succ u3} α γ f) (Function.Surjective.{succ u2, succ u4} β δ g))
but is expected to have type
forall {α : Type.{u3}} {β : Type.{u4}} {γ : Type.{u1}} {δ : Type.{u2}} {f : α -> γ} {g : β -> δ}, Iff (Function.Surjective.{max (succ u4) (succ u3), max (succ u2) (succ u1)} (Sum.{u3, u4} α β) (Sum.{u1, u2} γ δ) (Sum.map.{u3, u4, u1, u2} α γ β δ f g)) (And (Function.Surjective.{succ u3, succ u1} α γ f) (Function.Surjective.{succ u4, succ u2} β δ g))
Case conversion may be inaccurate. Consider using '#align sum.map_surjective Sum.map_surjectiveₓ'. -/
@[simp]
theorem map_surjective {f : α → γ} {g : β → δ} :
Surjective (Sum.map f g) ↔ Surjective f ∧ Surjective g :=
⟨fun h =>
⟨fun c => by
obtain ⟨a | b, h⟩ := h (inl c)
· exact ⟨a, inl_injective h⟩
· cases h, fun d => by
obtain ⟨a | b, h⟩ := h (inr d)
· cases h
· exact ⟨b, inr_injective h⟩⟩,
fun h => h.1.sum_map h.2⟩
#align sum.map_surjective Sum.map_surjective
/- warning: sum.map_bijective -> Sum.map_bijective is a dubious translation:
lean 3 declaration is
forall {α : Type.{u1}} {β : Type.{u2}} {γ : Type.{u3}} {δ : Type.{u4}} {f : α -> γ} {g : β -> δ}, Iff (Function.Bijective.{max (succ u1) (succ u2), max (succ u3) (succ u4)} (Sum.{u1, u2} α β) (Sum.{u3, u4} γ δ) (Sum.map.{u1, u2, u3, u4} α γ β δ f g)) (And (Function.Bijective.{succ u1, succ u3} α γ f) (Function.Bijective.{succ u2, succ u4} β δ g))
but is expected to have type
forall {α : Type.{u3}} {β : Type.{u4}} {γ : Type.{u1}} {δ : Type.{u2}} {f : α -> γ} {g : β -> δ}, Iff (Function.Bijective.{max (succ u4) (succ u3), max (succ u2) (succ u1)} (Sum.{u3, u4} α β) (Sum.{u1, u2} γ δ) (Sum.map.{u3, u4, u1, u2} α γ β δ f g)) (And (Function.Bijective.{succ u3, succ u1} α γ f) (Function.Bijective.{succ u4, succ u2} β δ g))
Case conversion may be inaccurate. Consider using '#align sum.map_bijective Sum.map_bijectiveₓ'. -/
@[simp]
theorem map_bijective {f : α → γ} {g : β → δ} :
Bijective (Sum.map f g) ↔ Bijective f ∧ Bijective g :=
(map_injective.And map_surjective).trans <| and_and_and_comm _ _ _ _
#align sum.map_bijective Sum.map_bijective
/- warning: sum.elim_const_const -> Sum.elim_const_const is a dubious translation:
lean 3 declaration is
forall {α : Type.{u1}} {β : Type.{u2}} {γ : Type.{u3}} (c : γ), Eq.{max (max (succ u1) (succ u2)) (succ u3)} ((Sum.{u1, u2} α β) -> γ) (Sum.elim.{u1, u2, succ u3} α β γ (Function.const.{succ u3, succ u1} γ α c) (Function.const.{succ u3, succ u2} γ β c)) (Function.const.{succ u3, max (succ u1) (succ u2)} γ (Sum.{u1, u2} α β) c)
but is expected to have type
forall {α : Type.{u2}} {β : Type.{u3}} {γ : Type.{u1}} (c : γ), Eq.{max (max (succ u2) (succ u3)) (succ u1)} ((Sum.{u2, u3} α β) -> γ) (Sum.elim.{u2, u3, succ u1} α β γ (Function.const.{succ u1, succ u2} γ α c) (Function.const.{succ u1, succ u3} γ β c)) (Function.const.{succ u1, max (succ u2) (succ u3)} γ (Sum.{u2, u3} α β) c)
Case conversion may be inaccurate. Consider using '#align sum.elim_const_const Sum.elim_const_constₓ'. -/
theorem elim_const_const (c : γ) : Sum.elim (const _ c : α → γ) (const _ c : β → γ) = const _ c :=
by
ext x
cases x <;> rfl
#align sum.elim_const_const Sum.elim_const_const
/- warning: sum.elim_lam_const_lam_const -> Sum.elim_lam_const_lam_const is a dubious translation:
lean 3 declaration is
forall {α : Type.{u1}} {β : Type.{u2}} {γ : Type.{u3}} (c : γ), Eq.{max (max (succ u1) (succ u2)) (succ u3)} ((Sum.{u1, u2} α β) -> γ) (Sum.elim.{u1, u2, succ u3} α β γ (fun (_x : α) => c) (fun (_x : β) => c)) (fun (_x : Sum.{u1, u2} α β) => c)
but is expected to have type
forall {α : Type.{u2}} {β : Type.{u3}} {γ : Type.{u1}} (c : γ), Eq.{max (max (succ u2) (succ u3)) (succ u1)} ((Sum.{u2, u3} α β) -> γ) (Sum.elim.{u2, u3, succ u1} α β γ (fun (_x : α) => c) (fun (_x : β) => c)) (fun (_x : Sum.{u2, u3} α β) => c)
Case conversion may be inaccurate. Consider using '#align sum.elim_lam_const_lam_const Sum.elim_lam_const_lam_constₓ'. -/
@[simp]
theorem elim_lam_const_lam_const (c : γ) :
(Sum.elim (fun _ : α => c) fun _ : β => c) = fun _ => c :=
Sum.elim_const_const c
#align sum.elim_lam_const_lam_const Sum.elim_lam_const_lam_const
/- warning: sum.elim_update_left -> Sum.elim_update_left is a dubious translation:
lean 3 declaration is
forall {α : Type.{u1}} {β : Type.{u2}} {γ : Type.{u3}} [_inst_1 : DecidableEq.{succ u1} α] [_inst_2 : DecidableEq.{succ u2} β] (f : α -> γ) (g : β -> γ) (i : α) (c : γ), Eq.{max (max (succ u1) (succ u2)) (succ u3)} ((Sum.{u1, u2} α β) -> γ) (Sum.elim.{u1, u2, succ u3} α β γ (Function.update.{succ u1, succ u3} α (fun (ᾰ : α) => γ) (fun (a : α) (b : α) => _inst_1 a b) f i c) g) (Function.update.{max (succ u1) (succ u2), succ u3} (Sum.{u1, u2} α β) (fun (ᾰ : Sum.{u1, u2} α β) => γ) (fun (a : Sum.{u1, u2} α β) (b : Sum.{u1, u2} α β) => Sum.decidableEq.{u1, u2} α (fun (a : α) (b : α) => _inst_1 a b) β (fun (a : β) (b : β) => _inst_2 a b) a b) (Sum.elim.{u1, u2, succ u3} α β γ f g) (Sum.inl.{u1, u2} α β i) c)
but is expected to have type
forall {α : Type.{u2}} {β : Type.{u3}} {γ : Type.{u1}} [_inst_1 : DecidableEq.{succ u2} α] [_inst_2 : DecidableEq.{succ u3} β] (f : α -> γ) (g : β -> γ) (i : α) (c : γ), Eq.{max (max (succ u2) (succ u3)) (succ u1)} ((Sum.{u2, u3} α β) -> γ) (Sum.elim.{u2, u3, succ u1} α β γ (Function.update.{succ u2, succ u1} α (fun (ᾰ : α) => γ) (fun (a : α) (b : α) => _inst_1 a b) f i c) g) (Function.update.{max (succ u3) (succ u2), succ u1} (Sum.{u2, u3} α β) (fun (ᾰ : Sum.{u2, u3} α β) => γ) (fun (a : Sum.{u2, u3} α β) (b : Sum.{u2, u3} α β) => Sum.instDecidableEqSum.{u2, u3} α β (fun (a : α) (b : α) => _inst_1 a b) (fun (a : β) (b : β) => _inst_2 a b) a b) (Sum.elim.{u2, u3, succ u1} α β γ f g) (Sum.inl.{u2, u3} α β i) c)
Case conversion may be inaccurate. Consider using '#align sum.elim_update_left Sum.elim_update_leftₓ'. -/
theorem elim_update_left [DecidableEq α] [DecidableEq β] (f : α → γ) (g : β → γ) (i : α) (c : γ) :
Sum.elim (Function.update f i c) g = Function.update (Sum.elim f g) (inl i) c :=
by
ext x; cases x
· by_cases h : x = i
· subst h
simp
· simp [h]
· simp
#align sum.elim_update_left Sum.elim_update_left
/- warning: sum.elim_update_right -> Sum.elim_update_right is a dubious translation:
lean 3 declaration is
forall {α : Type.{u1}} {β : Type.{u2}} {γ : Type.{u3}} [_inst_1 : DecidableEq.{succ u1} α] [_inst_2 : DecidableEq.{succ u2} β] (f : α -> γ) (g : β -> γ) (i : β) (c : γ), Eq.{max (max (succ u1) (succ u2)) (succ u3)} ((Sum.{u1, u2} α β) -> γ) (Sum.elim.{u1, u2, succ u3} α β γ f (Function.update.{succ u2, succ u3} β (fun (ᾰ : β) => γ) (fun (a : β) (b : β) => _inst_2 a b) g i c)) (Function.update.{max (succ u1) (succ u2), succ u3} (Sum.{u1, u2} α β) (fun (ᾰ : Sum.{u1, u2} α β) => γ) (fun (a : Sum.{u1, u2} α β) (b : Sum.{u1, u2} α β) => Sum.decidableEq.{u1, u2} α (fun (a : α) (b : α) => _inst_1 a b) β (fun (a : β) (b : β) => _inst_2 a b) a b) (Sum.elim.{u1, u2, succ u3} α β γ f g) (Sum.inr.{u1, u2} α β i) c)
but is expected to have type
forall {α : Type.{u2}} {β : Type.{u3}} {γ : Type.{u1}} [_inst_1 : DecidableEq.{succ u2} α] [_inst_2 : DecidableEq.{succ u3} β] (f : α -> γ) (g : β -> γ) (i : β) (c : γ), Eq.{max (max (succ u2) (succ u3)) (succ u1)} ((Sum.{u2, u3} α β) -> γ) (Sum.elim.{u2, u3, succ u1} α β γ f (Function.update.{succ u3, succ u1} β (fun (ᾰ : β) => γ) (fun (a : β) (b : β) => _inst_2 a b) g i c)) (Function.update.{max (succ u3) (succ u2), succ u1} (Sum.{u2, u3} α β) (fun (ᾰ : Sum.{u2, u3} α β) => γ) (fun (a : Sum.{u2, u3} α β) (b : Sum.{u2, u3} α β) => Sum.instDecidableEqSum.{u2, u3} α β (fun (a : α) (b : α) => _inst_1 a b) (fun (a : β) (b : β) => _inst_2 a b) a b) (Sum.elim.{u2, u3, succ u1} α β γ f g) (Sum.inr.{u2, u3} α β i) c)
Case conversion may be inaccurate. Consider using '#align sum.elim_update_right Sum.elim_update_rightₓ'. -/
theorem elim_update_right [DecidableEq α] [DecidableEq β] (f : α → γ) (g : β → γ) (i : β) (c : γ) :
Sum.elim f (Function.update g i c) = Function.update (Sum.elim f g) (inr i) c :=
by
ext x; cases x
· simp
· by_cases h : x = i
· subst h
simp
· simp [h]
#align sum.elim_update_right Sum.elim_update_right
end Sum
/-!
### Ternary sum
Abbreviations for the maps from the summands to `α ⊕ β ⊕ γ`. This is useful for pattern-matching.
-/
namespace Sum3
#print Sum3.in₀ /-
/-- The map from the first summand into a ternary sum. -/
@[match_pattern, simp, reducible]
def in₀ (a) : Sum α (Sum β γ) :=
inl a
#align sum3.in₀ Sum3.in₀
-/
#print Sum3.in₁ /-
/-- The map from the second summand into a ternary sum. -/
@[match_pattern, simp, reducible]
def in₁ (b) : Sum α (Sum β γ) :=
inr <| inl b
#align sum3.in₁ Sum3.in₁
-/
#print Sum3.in₂ /-
/-- The map from the third summand into a ternary sum. -/
@[match_pattern, simp, reducible]
def in₂ (c) : Sum α (Sum β γ) :=
inr <| inr c
#align sum3.in₂ Sum3.in₂
-/
end Sum3
|
module Sixel.Output
import System.FFI
import Sixel.Library
import Sixel.Symbols
import Sixel.Allocator
%default total
public export
PrimOutput : Type
PrimOutput = GCAnyPtr
public export
WriteFn : Type
WriteFn = (toWrite : AnyPtr) -> (size : Int) -> (priv : AnyPtr) -> PrimIO Int
MkOutputRes : Type
MkOutputRes = Struct "sixel_utils_output_t" [("output", AnyPtr), ("status", Int)]
export
data Output : (0 n : Nat) -> Type where
PointOutput : PrimOutput -> Output Z
RootOutput : Output n -> Output (S n)
%foreign (sixelutils "sixel_utils_output_ptr_new")
sixel_utils_output_ptr_new : PrimIO (Ptr AnyPtr)
%foreign (sixelutils "sixel_utils_output_ptr_deref")
sixel_utils_output_ptr_deref : Ptr AnyPtr -> AnyPtr
%foreign (sixel "sixel_output_new")
sixel_output_new : Ptr AnyPtr -> WriteFn -> (priv : AnyPtr) -> PrimAllocator -> PrimIO Int
%foreign (sixel "sixel_output_ref")
sixel_output_ref : PrimOutput -> ()
%foreign (sixel "sixel_output_unref")
sixel_output_unref : PrimOutput -> ()
%foreign (sixel "sixel_output_destroy")
sixel_output_destroy : AnyPtr -> PrimIO ()
%foreign (sixel "sixel_output_get_8bit_availability")
sixel_output_get_8bit_availability : PrimOutput -> Int
%foreign (sixel "sixel_output_set_8bit_availability")
sixel_output_set_8bit_availability : PrimOutput -> Int -> ()
%foreign (sixel "sixel_output_set_gri_arg_limit")
sixel_output_set_gri_arg_limit : PrimOutput -> Int -> ()
%foreign (sixel "sixel_output_set_penetrate_multiplexer")
sixel_output_set_penetrate_multiplexer : PrimOutput -> Int -> ()
%foreign (sixel "sixel_output_set_skip_dcs_envelope")
sixel_output_set_skip_dcs_envelope : PrimOutput -> Int -> ()
%foreign (sixel "sixel_output_set_palette_type")
sixel_output_set_palette_type : PrimOutput -> Int -> ()
%foreign (sixel "sixel_output_set_encode_policy")
sixel_output_set_encode_policy : PrimOutput -> Int -> ()
export
MkOutput : {auto n : Nat} ->
WriteFn ->
{default prim__getNullAnyPtr priv : AnyPtr} ->
{default (getNullAllocator n) alloc : Allocator n} ->
IO (Either Status (Output Z))
MkOutput writefn {priv} {alloc} = do
ppoutput <- fromPrim sixel_utils_output_ptr_new
status <- MkStatus <$> fromPrim (sixel_output_new ppoutput writefn priv (toPrim alloc))
poutput <- onCollectAny (sixel_utils_output_ptr_deref ppoutput) (\po => fromPrim $ sixel_output_destroy po)
pure $ if succeeded status then Right (PointOutput poutput) else Left status
export
ref : Output n -> IO (Output (S n))
ref (RootOutput output) = RootOutput <$> ref output
ref output@(PointOutput poutput) = let _ = sixel_output_ref poutput in pure $ RootOutput output
export
unref : Output (S n) -> IO (Output n)
unref (RootOutput output@(RootOutput _)) = RootOutput <$> unref output
unref (RootOutput output@(PointOutput poutput)) = let _ = sixel_output_unref poutput in pure output
export
refs : Output n -> Nat
refs (RootOutput output) = S (refs output)
refs (PointOutput _) = Z
export
toPrim : Output n -> PrimOutput
toPrim (RootOutput output) = toPrim output
toPrim (PointOutput poutput) = poutput
export %inline
get8BitAvailability : Output n -> IO Bool
get8BitAvailability output =
pure $ intToBool $ sixel_output_get_8bit_availability $ toPrim output
export %inline
set8BitAvailability : Output n -> (setting : Bool) -> IO ()
set8BitAvailability output setting =
pure $ sixel_output_set_8bit_availability (toPrim output) (ifThenElse setting 1 0)
export %inline
setGriArgLimit : Output n -> (setting : Bool) -> IO ()
setGriArgLimit output setting =
pure $ sixel_output_set_gri_arg_limit (toPrim output) (ifThenElse setting 1 0)
export %inline
setPenetrateMultiplexer : Output n -> (setting : Bool) -> IO ()
setPenetrateMultiplexer output setting =
pure $ sixel_output_set_penetrate_multiplexer (toPrim output) (ifThenElse setting 1 0)
export %inline
setSkipDcsEnvelope : Output n -> (setting : Bool) -> IO ()
setSkipDcsEnvelope output setting =
pure $ sixel_output_set_skip_dcs_envelope (toPrim output) (ifThenElse setting 1 0)
export %inline
setPaletteType : Output n -> PaletteType -> IO ()
setPaletteType output type =
pure $ sixel_output_set_palette_type (toPrim output) (binding type)
export %inline
setEncodePolicy : Output n -> EncodePolicy -> IO ()
setEncodePolicy output policy =
pure $ sixel_output_set_encode_policy (toPrim output) (binding policy)
|
After declaring the caves a World Heritage Site , UNESCO granted $ 100 @,@ 000 to document the site 's history and draw up a site plan . A part of the grant was used for conservation of the caves . Based on assessments by UNESCO , management plans include : better communication and collaboration between the ASI , on @-@ site staff , and other responsible government departments ; improved public information and awareness programs ; monitoring environmental impact of tourists on the cave and island environment ; greater attention to the maintenance of the rocks to address water leakages into the caves ; and daily monitoring of both structural and chemical conservation measures .
|
module Main
pattern : Eff () [STDIO]
pattern = do putStrLn "Pattern:"
for x in [1..4]:
for y in [1..x]:
putStr ("*")
putStrLn ("")
main : IO ()
main = run pattern
--Run:
--$ idris pattern.idr -o pattern
--$ ./pattern |
#local h2 estimation
library('RiskPortfolios')
library('MASS')
library('Matrix')
.libPaths("~/R/rlib-3.4_new")
library('pdist')
library('corpcor')
#segment position
start_pos=45859651
stop_pos=45909024
#sample size (ascertained)
n_cases=1610
n_controls=2205
#load esimated effect size from MR results
dataset='covid_nejm'
df<-read.csv(paste0('/data/g*/z*/covid19_neanderthals/mr/result/',dataset,'_r2_0.1.csv'),header = T,stringsAsFactors = F)
df<-df[df$mrjti_sig=='sig',]
df<-df[!is.na(df$mrjti_beta_mean),]
#gene annotation
anno<-read.table('/data/coxvgi/z*/anno/gencode/37/gencode.v32.GRCh37.txt',header = T,stringsAsFactors = F)
df<-merge(df,anno,by='geneid')
#sample size
ascertained_prevalence=n_cases/(n_cases+n_controls)
N_effective = 4*ascertained_prevalence*(1-ascertained_prevalence)*(n_cases+n_controls)
i=1 #for test
for (i in 1:nrow(df)){
print(i)
geneid=df[i,'geneid']
tissue=df[i,'tissue']
alpha=df[i,'mrjti_beta_mean']
alpha_se=df[i,'mrjti_beta_se']
#get chr pos
chr=as.numeric(sub('chr','',anno[which(anno$geneid == geneid),'chr']))
left=max(anno[which(anno$geneid == geneid),'left']-1e6,1)
right=anno[which(anno$geneid == geneid),'right']+1e6
#extract snp using plink
cmd<-paste0('plink --bfile /data/g*/z*/covid19_neanderthals/expression_mediated_h2/geno/geno --chr ',chr,' --from-bp ',left,' --to-bp ',right,' --recode A --out /data/g*/z*/covid19_neanderthals/expression_mediated_h2/geno/tmp/',geneid,'_1m; plink --bfile /data/g*/z*/covid19_neanderthals/expression_mediated_h2/geno/geno --chr ',chr,' --from-bp ',left,' --to-bp ',right,' --make-bed --out /data/g*/z*/covid19_neanderthals/expression_mediated_h2/geno/tmp/',geneid,'_1m')
system(cmd,wait = T)
#load dosage for
dosage_raw<-try(read.table(paste0('/data/g*/z*/covid19_neanderthals/expression_mediated_h2/geno/tmp/',geneid,'_1m.raw'),header = T,stringsAsFactors = F))
if('try-error' %in% class(dosage_raw)){next}
dosage<-dosage_raw[,-c(1,3:6)] #rm useless cols
#load info
info_raw<-read.table(paste0('/data/g*/z*/covid19_neanderthals/expression_mediated_h2/geno/tmp/',geneid,'_1m.bim'),stringsAsFactors = F)
#rm raw file
cmd<-paste0('rm /data/g*/z*/covid19_neanderthals/expression_mediated_h2/geno/tmp/',geneid,'_1m*'); system(cmd,wait = T)
#info
info_raw$counted_allele<-sapply(colnames(dosage)[-1],function(x) strsplit(x,"[_]")[[1]][2])
info_raw$ref_allele<-ifelse(info_raw$counted_allele==info_raw$V5,info_raw$V6,info_raw$V5)
info_raw$chr_bp<-paste0(info_raw$V1,'_',info_raw$V4)
colnames(info_raw)[2]<-'rsid'
snp_info<-info_raw[,c('rsid','chr_bp','ref_allele','counted_allele')]
snp_info$pos=sapply(snp_info$chr_bp,function(x) strsplit(x,"[_]")[[1]][2])
snp_info$in_segment<-ifelse((snp_info$pos>start_pos & snp_info$pos<stop_pos),1,0)
in_segment_pos<-which(snp_info$in_segment==1)
#dosage
dosage$IID<-sub('^.....','',dosage$IID)
colnames(dosage)<-c('sampleid',sapply(colnames(dosage)[-1],function(x) strsplit(x,"[_]")[[1]][1]))
#post imputation imputation replace by mean
if(ncol(dosage)>2){
dosage[,-1]<-round(apply(dosage[,-1], 2, function(x) ifelse(is.na(x),mean(x,na.rm=T),x)),3)
}
#scale genotype in dosage
dosage[,-1]<-sapply(dosage[,-1], function(x) scale(x))
dosage<-dosage[,c('sampleid',snp_info$rsid)]
#load expression file (residual)
exp<-readRDS(paste0('/data/coxvgi/z*/data/gtex/exp/v8/weighted/',tissue,'/',geneid,'.rds'))
exp<-exp[exp$tissue==tissue,]
exp$sampleid=sub('GTEX.','',exp$sampleid)
#estimate beta_eQTL
eqtl_df<-merge(exp[,c('sampleid','exp')],dosage,by='sampleid')
for (j in 1:nrow(snp_info)){
rsid=snp_info[j,'rsid']
ans<-summary(lm(eqtl_df[,'exp']~eqtl_df[,rsid]))
snp_info[j,'beta_eqtl']=ans$coefficients['eqtl_df[, rsid]','Estimate']
snp_info[j,'se_eqtl']=ans$coefficients['eqtl_df[, rsid]','Std. Error']
}
#---full---
full<-snp_info
#estimate snp-snp var-cov matrix
full_c <- covEstimation(as.matrix(dosage[,-1]), control = list(type = 'cor'))
#Moore-Penrose pseudoinverse
inverse_full_c<-ginv(full_c)
#q=rank(C) the maximum number of linearly independent vectors in rows or columns
q=rankMatrix(inverse_full_c)
q=as.numeric(q)
#pseudo gwas beta
alpha=df[i,'mrjti_beta_mean']
full$pseudo_gwas_beta = alpha * full$beta_eqtl
#pseudo gwas se
alpha_se=df[i,'mrjti_beta_se']
full$pseudo_gwas_se = ((alpha_se)^2 * (full$se_eqtl)^2 +
(alpha_se)^2 * (full$beta_eqtl)^2 +
(alpha)^2 * (full$se_eqtl)^2)^0.5
#n
df[i,'full_n'] = n = N_effective
#p
df[i,'full_p'] = p = nrow(full)
#h2
df[i,'full_h2_local'] = (t(full$pseudo_gwas_beta) %*% inverse_full_c %*% full$pseudo_gwas_beta - q/n) / (n-q) *n
#---segment---
segment<-full[full$in_segment==1,]
#estimate snp-snp var-cov matrix
segment_c<-full_c[which(full$in_segment==1),which(full$in_segment==1)]
#Moore-Penrose pseudoinverse
inverse_segment_c<-ginv(segment_c)
#q=rank(C) the maximum number of linearly independent vectors in rows or columns
q=rankMatrix(inverse_segment_c)
q=as.numeric(q)
#pseudo gwas beta
alpha=df[i,'mrjti_beta_mean']
segment$pseudo_gwas_beta = alpha * segment$beta_eqtl
#pseudo gwas se
alpha_se=df[i,'mrjti_beta_se']
segment$pseudo_gwas_se = ((alpha_se)^2 * (segment$se_eqtl)^2 +
(alpha_se)^2 * (segment$beta_eqtl)^2 +
(alpha)^2 * (segment$se_eqtl)^2)^0.5
#n
df[i,'segment_n']=n=N_effective
#p
df[i,'segment_p']=p=nrow(segment)
#h2
df[i,'segment_h2_local']=(t(segment$pseudo_gwas_beta) %*% inverse_segment_c %*% segment$pseudo_gwas_beta - q/n) / (n-q) *n
}
df$pi_c=df$segment_h2_local/df$full_h2_local
df$concentration_ratio=df$segment_h2_local*df$full_p/df$segment_p/df$full_h2_local
write.table(df,paste0('/data/g*/z*/covid19_neanderthals/expression_mediated_h2/result/h2_segment_',dataset,'.txt'),quote = F,sep='\t',row.names = F)
|
# Data Assimilation examples used during Bingewatch Academy
## Bingewatch Academy
In my Bingewatch Academy talk I explored the question: "Why is Daredevil better at predicting events than normal humans?" I explained how data assimilation is used in weather forecasting to contiously combine observations and model predictions to get the best possible forecast of the future. In this notebook I will generate the figures that were used in my presentation.
## Data assimilation
Data assimilation is the science of combining observational data and knowledge of system behavior to get an optimal estimation, including an estimation of the uncertainty in your estimation, of a systems past, current and/or future states. There are many different data asismilation variations: some focussing on a specific family of systems, some on specific use cases. In this example the data assimilation method that I use is the Ensemble Kalman Filter. The 'system', or 'model' that I use is the Lorenz-96 model.
## the Lorenz 96 model
The Lorenz 96 model <cite data-cite="2916206/TVEEWNWX"></cite> is a typical chaotic dynamical sytem that is often used as a benchmark model in data assimilation studies. It was desigende by Lorenz as a toy-model for aAtmospheric circulation. It is defined for $i=1,...,N$ by
\begin{equation}
\frac{dx_{i}}{dt}=\left(x_{i+1} - x_{i-2}\right)x_{i-1} - x_{i} + F
\end{equation}
where i is cyclical, ie. $x_{0}=x_{N}$ and $x_{-1} = x_{N-1}$. $F$ is an external force acting on the system. A value of $F=8$ is known to create chaotic bahavior and is often used. The dimension $N$ can be freely chosen and is typical $40$, but for testing very high dimension systems, higher values can be used. The Lorenz 96 model is a typical chaotic model where, although, the model is deterministic, slight variations in the input state will over time result in complete different states.
## Numerical implementation of the Lorenz 96 model
A fourth order Runga Kutta scheme is used to implement the Lorenz 96 model. Writing the entire state-vector as $\vec{x}$ and using $f\left(\vec{x}\right)$ as the right hand side of the model, ie:
\begin{eqnarray}
f\left(x_{i}\right) = \left(x_{i+1} - x_{i-2}\right)x_{i-1} - x_{i} + F
\\
f\left(\vec{x}\right) = \left\{f\left(x_{1}\right),...,f\left(x_{N}\right)\right\}
\end{eqnarray}
the implementation is given by:
\begin{eqnarray}
\vec{k}_{1}=f\left(\vec{x}\left(t\right)\right)
\\
\vec{k}_{2}=f\left(\vec{x}\left(t\right) + \frac{1}{2}\vec{k}_{1}\Delta t\right)
\\
\vec{k}_{3}=f\left(\vec{x}\left(t\right) + \frac{1}{2}\vec{k}_{2}\Delta t\right)
\\
\vec{k}_{4}=f\left(\vec{x}\left(t\right) + \vec{k}_{3}\Delta t\right)
\end{eqnarray}
and finally
\begin{equation}
\vec{x}\left(t + \Delta t\right) = \vec{x}\left(t\right) + \frac{1}{6}\left(\vec{k}_{1} + 2\vec{k}_{2} + 2 \vec{k}_{3} + \vec{k}_{4}\right)
\end{equation}
## The Basic Model Interface (BMI)
The basic model interface allows communicating with models in a generic fashion. It requires a few standard methods to be available such as 'initialize()' and 'update()'. Methods that are not relevant for the model need still be implemented, but can simply raise a one line exception. See <cite data-cite="2916206/VXTQPCA7"></cite> for more information. Implementing the BMI allows easy interaction with the model. The cells below initiate one instance of the model. For reasons that will become clear we will call this instance "truthModel".
BMI models are typically initialized with a settings-file. This is overkill here, but for completeness, we generate the settings-file first and than pass it to the model.
## Ensemble Kalman Filter example using Lorenz-96 model and BMI
The Ensemble Kalman Filter (EnKF) is a variant on the Kalman Filter used when dealing with models for which it is hard to define a tangiant model. Data Assimilation methods, including all variants of the Kalman Filter Family, set to provide the (mathimatically) optimal estimation of the true state of a system, given that a (often phyiscal/physially based) model is available that can project the current state of the model into the future and that at the same time observations are available that measure (parts of) the state, either directly or indirectly.
A mathematical overview of the EnKF is given in <cite data-cite="2916206/GVM9N4GZ"></cite>. This notebook is intended as an introduction on how to do data assimilation within the eWaterCycle framework, with models that communicate through BMI. It is not intended as an indepth explenation of the EnKF.
## data assimilation jargon
The following terms are often used in data assimilation:
- **ensemble** is a collection of model-instances. Often these are multiple instances of the same model where the spread in the model state represents the uncertainty in our knowledge of that model state.
- **model** a mathematical and/or computer code represenation of how the state of the system evolves in time.
- **observation** a measurement (or set of measurements, including images)that relate to (part of) the state of the system
- **observation model** a mathematical and/or computer code representation of how the state relates to the observations. Often donated by $\mathbf{H}$.
- **forecast** The forecasted state using the model and a previous state
- **analyses** The best estimate of the state using both a forecast and an observation. The analyses (or analyses-ensemble) is the output of a data assimilation method.
```python
#required libraries and settings
%matplotlib inline
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import yaml
import io
import math
import BMILorenz
import EnKF
```
## settings
The settings for this experiment are split between settings for the model, for the observations and for the data assimilation method and finally for the experiment
```python
##model
J = 40 #dimension of Lorenz-96 Model
commonStartState = [0 for i in range(J)] #start-vector
commonStartState[5]=0.01
#settings data in dict for export to YAML file
settings = {'J': J,
'F': 8.0,
'startTime': 0.0,
'endTime': 10.0,
'dt':1e-3,
'startState': commonStartState}
##Observations
observationSigma = [0.05,0.5] #standard deviation of the observations. I'm running two different versions!
obsVector = range(math.floor(J/2)) #only observe half of the state
obsSize = len(obsVector); #size of the observations vector
def H(state): #the "observation model" that links model-space to observation-space
return state[obsVector]
##Ensemble Kalman Filter
N = 100 #numeber of ensemble members, needs to be higher than dimension of the
# model for stability, since no inflation is implemented.
## Experiment
spinUpTime = 3 #time that the ensemble is run before data assimilation starts to
updateInterval = 1 #how often is the ensemble updated with observations
plotState = 5 #which state of the model (both truth and ensemble) to plot
obsPlotState = 5 #which state of the observations to plot
```
```python
# Write YAML setting file for BMI model
with io.open('settings.yaml', 'w', encoding='utf8') as outfile:
yaml.safe_dump(settings, outfile, default_flow_style=False, allow_unicode=True)
```
```python
#start with two empty ensembles. create one ensemble for the "high observational error" case and one for low errors.
ensembleLow = []
ensembleHigh = []
#create and initialize an instance of the BMILorenz class
truthModel = BMILorenz.BMILorenz ()
truthModel.initialize('settings.yaml')
output = pd.DataFrame(columns = ['truth','observation'])
for n in range (N):
#add an ensemble methods
ensembleLow.append(BMILorenz.BMILorenz ())
ensembleLow[n].initialize('settings.yaml')
ensembleLow[n].set_value_at_indices('state',5,ensembleLow[n].get_value_at_indices('state',5) + np.random.randn(1)*0.01)
ensembleHigh.append(BMILorenz.BMILorenz ())
ensembleHigh[n].initialize('settings.yaml')
ensembleHigh[n].set_value_at_indices('state',5,ensembleHigh[n].get_value_at_indices('state',5) + np.random.randn(1)*0.01)
#also add a column to the output dataframe to store the output
output['ensembleLow' + str(n)]= np.nan
output['ensembleHigh' + str(n)]= np.nan
```
```python
#spin up the Ensemble.
while truthModel.get_current_time()< spinUpTime:
truthModel.update()
output.loc[truthModel.get_current_time(),'truth'] = truthModel.get_value_at_indices('state',plotState)
#observationLow = truthModel.get_value('state') + observationSigma[0] * np.random.randn(J)
#output.at[truthModel.get_current_time(),'observationLow'] = observationLow[plotState]
#observationHigh = truthModel.get_value('state') + observationSigma[1] * np.random.randn(J)
#output.at[truthModel.get_current_time(),'observationJigh'] = observationHigh[plotState]
#loop through the ensemble members and store the state after each update
for n in range (N):
ensembleLow[n].update()
output.at[ensembleLow[n].get_current_time(),'ensembleLow' + str(n)] = ensembleLow[n].get_value_at_indices('state',plotState)
ensembleHigh[n].update()
output.at[ensembleHigh[n].get_current_time(),'ensembleHigh' + str(n)] = ensembleHigh[n].get_value_at_indices('state',plotState)
updateTime = spinUpTime
```
```python
#run
foreCastEnsembleLow = np.zeros([J,N])
foreCastEnsembleHigh = np.zeros([J,N])
observationEnsembleLow = np.zeros([obsSize,N])
observationEnsembleHigh = np.zeros([obsSize,N])
while truthModel.get_current_time()<truthModel.get_end_time():
truthModel.update()
output.loc[truthModel.get_current_time(),'truth'] = truthModel.get_value_at_indices('state',plotState)
#loop through the ensemble members and store the state after each update
for n in range (N):
ensembleLow[n].update()
ensembleHigh[n].update()
#observationEnsemble[:,n] = observation + observationSigma*np.random.randn(obsSize)
output.at[ensembleLow[n].get_current_time(),'ensembleLow' + str(n)] = ensembleLow[n].get_value_at_indices('state',plotState)
output.at[ensembleHigh[n].get_current_time(),'ensembleHigh' + str(n)] = ensembleHigh[n].get_value_at_indices('state',plotState)
#TODO update ensemble on bases of observation
if truthModel.get_current_time() > updateTime:
observationLow = H(truthModel.get_value('state')) + observationSigma[0] * np.random.randn(obsSize)
output.at[truthModel.get_current_time(),'observationLow'] = observationLow[plotState]
observationHigh = H(truthModel.get_value('state')) + observationSigma[1] * np.random.randn(obsSize)
output.at[truthModel.get_current_time(),'observationHigh'] = observationHigh[plotState]
for n in range (N):
observationEnsembleHigh[:,n] = observationHigh + observationSigma[1]*np.random.randn(obsSize)
observationEnsembleLow[:,n] = observationLow + observationSigma[0]*np.random.randn(obsSize)
foreCastEnsembleLow[:,n] = ensembleLow[n].get_value('state')
foreCastEnsembleHigh[:,n] = ensembleHigh[n].get_value('state')
analysesEnsembleLow = EnKF.EnKF(foreCastEnsembleLow,observationEnsembleLow,H)
np.clip(analysesEnsembleLow, -10, 20, out=analysesEnsembleLow)
for n in range (N):
ensembleLow[n].set_value('state',analysesEnsembleLow[:,n])
analysesEnsembleHigh = EnKF.EnKF(foreCastEnsembleHigh,observationEnsembleHigh,H)
np.clip(analysesEnsembleHigh, -10, 20, out=analysesEnsembleHigh)
for n in range (N):
ensembleHigh[n].set_value('state',analysesEnsembleHigh[:,n])
updateTime = updateTime + updateInterval
```
## Here come the plots
```python
plt.plot(output.loc[output.index < 3,'truth'],'r')
plt.xlim([0,6])
plt.ylim([-10,15])
plt.xlabel('time')
plt.savefig('truth.eps')
plt.show()
```
```python
plt.plot(output.loc[output.index < 3,output.columns.str.startswith('ensembleHigh')],'k')
plt.plot(output.loc[output.index < 3,'truth'],'r')
plt.xlim([0,6])
plt.ylim([-10,15])
plt.xlabel('time')
plt.savefig('truthEnsemble.eps')
plt.show()
```
```python
plt.plot(output.loc[output.index < 4,output.columns.str.startswith('ensembleHigh')],'k')
plt.plot(output.loc[output.index < 4,'observationHigh'],'r.',output.loc[output.index < 4,'observationHigh']+2*observationSigma[1],'r*',output.loc[output.index < 4,'observationHigh']-2*observationSigma[1],'r*')
plt.plot(output.loc[output.index < 4,'truth'],'r')
plt.xlim([0,6])
plt.ylim([-10,15])
plt.xlabel('time')
plt.savefig('EnKFHigh1step.eps')
plt.show()
```
```python
plt.plot(output.loc[output.index < 6,output.columns.str.startswith('ensembleHigh')],'k')
plt.plot(output.loc[output.index < 6,'observationHigh'],'r.',output.loc[:,'observationHigh']+2*observationSigma[1],'r*',output.loc[:,'observationHigh']-2*observationSigma[1],'r*')
plt.plot(output.loc[output.index < 6,'truth'],'r')
plt.xlim([0,6])
plt.ylim([-10,15])
plt.xlabel('time')
plt.savefig('EnKFHigh.eps')
plt.show()
```
```python
plt.plot(output.loc[output.index < 6,output.columns.str.startswith('ensembleLow')],'k')
plt.plot(output.loc[output.index < 6,'observationLow'],'r.',output.loc[:,'observationLow']+2*observationSigma[0],'r*',output.loc[:,'observationLow']-2*observationSigma[0],'r*')
plt.plot(output.loc[output.index < 6,'truth'],'r')
plt.xlim([0,6])
plt.ylim([-10,15])
plt.xlabel('time')
plt.savefig('EnKFLow.eps')
plt.show()
```
```python
plt.plot(output.loc[output.index < 6,output.columns.str.startswith('ensembleLow1')],'k')
plt.plot(output.loc[output.index < 6,'observationLow'],'r.',output.loc[:,'observationLow']+2*observationSigma[0],'r*',output.loc[:,'observationLow']-2*observationSigma[0],'r*')
plt.plot(output.loc[output.index < 6,'truth'],'r')
plt.xlim([0,6])
plt.ylim([-10,15])
plt.xlabel('time')
plt.savefig('EnKFLow10ens.eps')
plt.show()
```
<div class="cite2c-biblio"></div>
|
theory Failures_TickTock
imports
Failures_BasicOps
TickTock.TickTock
begin
text \<open> In calculating the failures, we drop tock events, both in the trace
and the refusals? We could still include it as part of the refusals
considering it as a regular event.. but that's probably unnecessary? \<close>
primrec ttevt2F :: "'e evt \<Rightarrow> 'e ttevent" where
"ttevt2F (evt a) = Event a" |
"ttevt2F tick = Tick"
lemma
"ttevt2F`(A \<union> B) = ttevt2F`A \<union> ttevt2F`B"
by auto
lemma ttevt2F_evt_set:
"ttevt2F`evt ` A = (Event`A)"
by (auto simp add: image_iff)
fun tt2T :: "'a tttrace \<Rightarrow> 'a trace" where
"tt2T [[Tick]\<^sub>E] = [tick]" |
"tt2T ([Event e]\<^sub>E # \<sigma>) = evt e # tt2T \<sigma>" |
"tt2T \<sigma> = []"
lemma tt2T_tocks_simp [simp]:
assumes "\<rho> \<in> tocks P" "\<rho> \<noteq> []"
shows "tt2T (\<rho> @ \<sigma>) = []"
using assms
using tocks.simps by fastforce
lemma tt2T_empty_concat [simp]:
assumes "\<rho> = []"
shows "tt2T (\<rho> @ \<sigma>) = tt2T \<sigma>"
using assms by auto
fun tt2F :: "'a tttrace \<Rightarrow> 'a failure option" where
"tt2F [[X]\<^sub>R] = Some ([],{x. ttevt2F x \<in> X})" |
"tt2F ([Event e]\<^sub>E # \<sigma>) = (case (tt2F \<sigma>) of (Some fl) \<Rightarrow> Some (evt e # fst fl,snd fl) | None \<Rightarrow> None)" |
"tt2F \<sigma> = None"
text \<open> Below is an attempt at breaking the definition of tt2F in concatenations. \<close>
fun tt2Fconcat :: "'a failure option \<Rightarrow> 'a failure option \<Rightarrow> 'a failure option" (infix "@\<^sub>F" 56) where
"None @\<^sub>F x = None" |
"x @\<^sub>F None = None" |
"(Some fl1) @\<^sub>F (Some fl2) = Some (fst fl1 @ fst fl2,snd fl2)"
lemma tt2F_Event_dist_tt2Fconcat:
"tt2F ([Event x1]\<^sub>E # x) = Some([evt x1],Z) @\<^sub>F tt2F(x)"
apply (induct x rule:tt2F.induct, auto)
by (simp add: option.case_eq_if)
lemma tt2Fconcat_assoc:
"x @\<^sub>F (y @\<^sub>F z) = (x @\<^sub>F y) @\<^sub>F z"
apply (induct x, auto)
apply (induct y, auto)
by (induct z, auto)
lemma tt2F_ev_neq_None:
assumes "tt2F ([ev]\<^sub>E # x) \<noteq> None"
shows "tt2F x \<noteq> None"
using assms
apply (cases ev, auto)
by (smt option.exhaust option.simps(4) surj_pair)
lemma tt2F_dist_tt2Fcontact:
assumes "set x \<inter> {[X]\<^sub>R | X. True} = {}" "(tt2F x) \<noteq> None" "ttWF(x@y)"
shows "tt2F (x@y) = (tt2F x) @\<^sub>F (tt2F y)"
using assms
proof (induct x)
case Nil
then show ?case by auto
next
case (Cons a x)
then show ?case
proof (cases a)
case (ObsEvent ev)
then have "tt2F x \<noteq> None"
using Cons.prems(2) tt2F_ev_neq_None by blast
then have tt2F_xy:"tt2F (x @ y) = tt2F x @\<^sub>F tt2F y"
using Cons ObsEvent
by (smt Cons.hyps Cons.prems Cons.prems(2) Set.is_empty_def append_Cons empty_set insert_disjoint(1) is_empty_set list.inject list.simps(15) null_rec(1) ttWF.elims(2) ttWF.simps(1) ttobs.distinct(1))
then show ?thesis
proof (cases ev)
case (Event x1)
then obtain Z where "tt2F ([Event x1]\<^sub>E # (x @ y)) = Some([evt x1],Z) @\<^sub>F tt2F(x @ y)"
using tt2F_Event_dist_tt2Fconcat by force
then have "Some([evt x1],Z) @\<^sub>F tt2F(x @ y) = Some([evt x1],Z) @\<^sub>F ((tt2F x) @\<^sub>F (tt2F y))"
using tt2F_xy by simp
then show ?thesis
proof (cases "tt2F x = None")
case True
then show ?thesis
using Event ObsEvent tt2F_xy by auto
next
case False
then show ?thesis
by (metis Cons_eq_appendI Event ObsEvent tt2F_Event_dist_tt2Fconcat tt2F_xy tt2Fconcat_assoc)
qed
next
case Tock
then show ?thesis
using Cons.prems(2) ObsEvent by auto
next
case Tick
then show ?thesis
by (metis Cons.prems(2) Nil_is_append_conv ObsEvent append_Cons list.exhaust tt2F.simps(3) tt2F.simps(5) tt2Fconcat.simps(1) ttWF.simps(10))
qed
next
case (Ref x2)
then show ?thesis
using Cons.prems(1) by auto
qed
qed
lemma tt2F_refusal_eq:
assumes "tt2F [[X]\<^sub>R] = tt2F [[Y]\<^sub>R]" "Tock \<in> X \<longleftrightarrow> Tock \<in> Y"
shows "[[X]\<^sub>R] = [[Y]\<^sub>R]"
using assms apply auto
by (metis mem_Collect_eq ttevent.exhaust ttevt2F.simps(1) ttevt2F.simps(2))+
lemma tt2F_eq_eqsets_or_Tock:
assumes "(\<forall>e. (e \<in> X) = (e \<in> Y) \<or> e = Tock)"
shows "tt2F [[X]\<^sub>R] = tt2F [[Y]\<^sub>R]"
using assms apply auto
by (metis evt.exhaust ttevent.distinct(1) ttevent.distinct(5) ttevt2F.simps(1) ttevt2F.simps(2))+
lemma tt2F_some_exists:
assumes "Some ([], b) = tt2F \<sigma>"
shows "\<exists>X. \<sigma> = [[X]\<^sub>R]"
using assms apply (cases \<sigma> rule:tt2F.cases, auto)
by (metis (no_types, lifting) Pair_inject list.simps(3) not_Some_eq option.case(1) option.inject option.simps(5))
lemma tt2F_tocks_simp [simp]:
assumes "\<rho> \<in> tocks P" "\<rho> \<noteq> []"
shows "tt2F (\<rho> @ \<sigma>) = None"
using assms
using tocks.simps by fastforce
lemma tt2F_refusal_without_Tock: "tt2F [[X]\<^sub>R] = tt2F [[X-{Tock}]\<^sub>R]"
apply auto
by (metis evt.exhaust ttevent.distinct(1) ttevent.distinct(5) ttevt2F.simps(1) ttevt2F.simps(2))
lemma tt2F_refusal_no_Tock: "tt2F [[X\<union>{Tock}]\<^sub>R] = tt2F [[X]\<^sub>R]"
apply auto
by (metis evt.exhaust ttevent.distinct(1) ttevent.distinct(5) ttevt2F.simps(1) ttevt2F.simps(2))
text \<open> The function mapping tick-tock processes to failures is then defined as follows. \<close>
definition ttproc2F :: "'a ttprocess \<Rightarrow> 'a process" where
"ttproc2F P = ({(s,X). \<exists>y. Some (s,X) = tt2F y \<and> y \<in> P},{s. \<exists>y. s = tt2T y \<and> y \<in> P})"
lemma Some_tt2F_set:
"Some ([], b) = tt2F [[{y. \<exists>x. y = ttevt2F x \<and> x \<in> b}]\<^sub>R]"
apply auto
by (metis evt.exhaust ttevent.distinct(3) ttevent.inject ttevt2F.simps(1) ttevt2F.simps(2))
lemma TT1_subset_single_ref:
assumes "TT1 P" "[[X]\<^sub>R] \<in> P"
shows "[[X-Y]\<^sub>R] \<in> P"
proof -
have "X-Y \<subseteq> X" by auto
then have "[[X-Y]\<^sub>R] \<lesssim>\<^sub>C [[X]\<^sub>R]"
by auto
then show ?thesis
using assms unfolding TT1_def by blast
qed
lemma
shows "tt2T ([Event x]\<^sub>E # ys) = (tt2T [[Event x]\<^sub>E]) @ (tt2T ys)"
by auto
lemma Some_tt2F_imp_tt2T:
assumes "Some (a, b) = tt2F y"
shows "tt2T y = a"
using assms apply (induct a y arbitrary:b rule:list_induct2', auto)
using tt2F_some_exists tt2T.simps(5) apply blast
apply (case_tac ya, auto, case_tac x1, auto)
apply (metis (mono_tags, lifting) Pair_inject list.inject option.case_eq_if option.inject option.simps(3))
apply (smt Pair_inject list.inject option.case_eq_if option.collapse option.inject option.simps(3) prod.collapse)
by (metis neq_Nil_conv not_Some_eq option.inject prod.inject tt2F.simps(1) tt2F.simps(8))
lemma tt2F_None_merge_traces:
assumes "([] \<lbrakk>A\<rbrakk>\<^sup>T\<^sub>C q) \<noteq> {}"
shows "tt2F`([] \<lbrakk>A\<rbrakk>\<^sup>T\<^sub>C q) = {None}"
using assms apply (induct q arbitrary:A rule:ttWF.induct, auto)
apply (metis (no_types, lifting) Set.set_insert equals0D image_insert insertI1 option.case_eq_if singletonD)
by (metis (mono_tags, lifting) equals0D image_eqI mem_Collect_eq option.simps(4) singleton_iff tt2F.simps(2))
lemma tt2F_None_merge_traces':
assumes "y \<in> ([] \<lbrakk>A\<rbrakk>\<^sup>T\<^sub>C q)"
shows "tt2F y = None"
using assms tt2F_None_merge_traces by blast
lemma tt2F_ending_Event_eq_None:
"tt2F (xs @ [[Event e]\<^sub>E]) = None"
apply (induct xs, auto)
by (metis list.exhaust rotate1.simps(2) rotate1_is_Nil_conv tt2F.simps(8) tt2F_ev_neq_None ttobs.exhaust)
lemma ttWF_tt2F_last_refusal_concat:
assumes "ttWF (xs@[[R]\<^sub>R])" "[Tock]\<^sub>E \<notin> set xs"
shows "tt2F (xs@[[R]\<^sub>R]) = Some(tt2T xs,{x. ttevt2F x \<in> R})"
using assms apply (induct xs, auto)
apply (case_tac a, auto, case_tac x1, auto)
using ttWF.elims(2) apply auto[1]
by (smt append_eq_append_conv2 list.distinct(1) list.inject list.set_intros(1) same_append_eq ttWF.elims(2) tt_prefix.elims(2) tt_prefix_concat ttobs.distinct(1))
lemma Some_tt2F_no_Tock:
assumes "Some (s, Y) = tt2F y"
shows "[Tock]\<^sub>E \<notin> set y"
using assms apply(induct y arbitrary:s Y, auto)
apply (case_tac a, auto)
apply (smt option.collapse option.simps(4) prod.collapse tt2F.simps(2) tt2F.simps(4) tt2F.simps(5) ttevent.exhaust)
by (metis list.set_cases option.distinct(1) tt2F.simps(8))
lemma Some_tt2F_no_Tick:
assumes "Some (s, Y) = tt2F y"
shows "[Tick]\<^sub>E \<notin> set y"
using assms apply(induct y arbitrary:s Y, auto)
apply (case_tac a, auto)
apply (smt option.collapse option.simps(4) prod.collapse tt2F.simps(2) tt2F.simps(4) tt2F.simps(5) ttevent.exhaust)
by (metis list.set_cases option.distinct(1) tt2F.simps(8))
lemma some_tt2F_ref_trace:
assumes "Some (s, Y) = tt2F y" "ttWF y"
shows "\<exists>ys R. y = ys@[[R]\<^sub>R] \<and> Y = {x. ttevt2F x \<in> R} \<and> tt2T ys = s"
using assms
proof (induct y rule:rev_induct)
case Nil
then show ?case by auto
next
case (snoc x xs)
then show ?case
proof (cases x)
case (ObsEvent ev)
then show ?thesis
proof (cases ev)
case (Event x1)
then have "tt2F (xs @ [x]) = None"
using ObsEvent snoc
by (simp add: tt2F_ending_Event_eq_None)
then show ?thesis
using snoc.prems(1) by auto
next
case Tock
then show ?thesis
using ObsEvent Some_tt2F_no_Tock snoc.prems(1) by fastforce
next
case Tick
then show ?thesis
using ObsEvent Some_tt2F_no_Tick snoc.prems(1) by fastforce
qed
next
case (Ref x2)
then have "[Tock]\<^sub>E \<notin> set xs"
by (metis Some_tt2F_no_Tock Un_iff set_append snoc.prems(1))
then show ?thesis using ttWF_tt2F_last_refusal_concat assms
by (metis Ref old.prod.inject option.inject snoc.prems(1) snoc.prems(2))
qed
qed
lemma Some_tt2F_imp_tt2T':
assumes "Some (a, b) = tt2F y"
shows "tt2T y = a"
using assms apply (induct a y arbitrary:b rule:list_induct2', auto)
using tt2F_some_exists tt2T.simps(5) apply blast
apply (case_tac ya, auto, case_tac x1, auto)
apply (metis (mono_tags, lifting) Pair_inject list.inject option.case_eq_if option.inject option.simps(3))
apply (smt Pair_inject list.inject option.case_eq_if option.collapse option.inject option.simps(3) prod.collapse)
by (metis neq_Nil_conv not_Some_eq option.inject prod.inject tt2F.simps(1) tt2F.simps(8))
lemma tocks_Some_prefix_tt2F:
assumes "x\<in>tocks P" "x \<le>\<^sub>C y" "Some (a, b) = tt2F y"
shows "x = []"
using assms
apply (induct y rule:tt2F.induct, auto)
using tocks.simps apply fastforce
using tt2F_tocks_simp tt_prefix_decompose by fastforce
lemma Some_tt2F_tail:
assumes "Some (a # s, b) = tt2F y"
shows "Some (s,b) = tt2F (tl y)"
using assms apply (induct y arbitrary:a b, auto)
apply (case_tac aa, auto)
apply (case_tac x1, auto)
apply (metis (no_types, lifting) Pair_inject list.inject option.case_eq_if option.expand option.sel option.simps(3) prod.collapse)
using Some_tt2F_imp_tt2T' by fastforce
lemma Some_no_tt2F_tick:
assumes "Some (a # s, b) = tt2F y"
shows "a \<noteq> tick"
using assms apply (induct y arbitrary:s b, auto)
apply (case_tac aa, auto)
apply (case_tac x1, auto)
apply (metis Some_tt2F_imp_tt2T' evt.distinct(1) list.sel(1) tt2F.simps(2) tt2T.simps(2))
using Some_tt2F_imp_tt2T' by fastforce
lemma Some_tt2F_exists_filter:
assumes "Some (s, b) = tt2F y"
shows "\<exists>z. Some (filter (\<lambda>e. e \<notin> X) s, b) = tt2F z"
using assms
proof (induct s arbitrary:b y X)
case Nil
then show ?case by auto
next
case (Cons a s)
then obtain z where z:"Some (filter (\<lambda>e. e \<notin> X) s, b) = tt2F z"
using Some_tt2F_tail by blast
then show ?case using Cons
proof (cases a)
case tick
then have "a \<noteq> tick"
using Cons Some_no_tt2F_tick by blast
then show ?thesis
using tick by auto
next
case (evt x2)
then show ?thesis
proof (cases "evt x2 \<in> X")
case True
then show ?thesis
using Cons.hyps Cons.prems Some_tt2F_tail evt by fastforce
next
case False
then have "filter (\<lambda>e. e \<notin> X) (a # s) = (a # filter (\<lambda>e. e \<notin> X) s)"
using evt by auto
then have "Some ((evt x2 # filter (\<lambda>e. e \<notin> X) s), b) = tt2F ([Event x2]\<^sub>E # z)"
apply auto
by (metis (no_types, lifting) fst_conv option.simps(5) snd_conv z)
then show ?thesis
by (metis \<open>filter (\<lambda>e. e \<notin> X) (a # s) = a # filter (\<lambda>e. e \<notin> X) s\<close> evt)
qed
qed
qed
lemma Some_tt2T_exists_filter:
assumes "Some (s, b) = tt2F y"
shows "\<exists>z. tt2T z = filter (\<lambda>e. e \<notin> X) s \<and> z \<noteq> []"
using assms
proof (induct s arbitrary:b y X)
case Nil
then show ?case
apply auto
using tt2T.simps(5) by blast
next
case (Cons a s)
then obtain c z where cz:"Some (s, c) = tt2F z"
using Cons
apply (induct y, auto)
using Some_tt2F_tail by blast
then obtain z2 where z2:"tt2T z2 = filter (\<lambda>e. e \<notin> X) s"
using Cons
by blast
then show ?case
proof (cases a)
case tick
then have "a \<noteq> tick"
using Cons Some_no_tt2F_tick by blast
then show ?thesis
using tick by auto
next
case (evt x2)
then show ?thesis
by (metis Cons.hyps \<open>\<And>thesis. (\<And>c z. Some (s, c) = tt2F z \<Longrightarrow> thesis) \<Longrightarrow> thesis\<close> filter.simps(2) list.distinct(1) tt2T.simps(2))
qed
qed
lemma filter_empty_iff:
"filter (\<lambda>e. e \<notin> HS) s = [] \<longleftrightarrow> (s = [] \<or> set s \<subseteq> HS)"
apply auto
by (auto simp add: filter_empty_conv)+
lemma Some_tt2F_event_tl:
assumes "Some (s, X) = tt2F ([Event e]\<^sub>E # t)"
shows "Some(tl s,X) = tt2F t"
using assms apply (induct t arbitrary:e X, auto)
by (metis (no_types, lifting) list.sel(3) option.case_eq_if option.distinct(1) option.expand option.sel prod.collapse prod.inject)
lemma tt2T_tl_evt:
assumes "tt2T z = (evt e # xs)"
shows "tt2T (tl z) = xs"
using assms apply (induct z, auto)
apply (case_tac a, auto)
apply (case_tac x1, auto)
using tt2T.elims by auto
lemma tt2T_hd_evt:
assumes "tt2T z = (evt e # xs)"
shows "hd z = [Event e]\<^sub>E"
using assms apply (induct z, auto)
apply (case_tac a, auto)
apply (case_tac x1, auto)
using tt2T.elims by auto
lemma Some_tt2F_concat_refusal:
assumes "Some (s, X) = tt2F y"
shows "\<exists>xs R. y = xs@[[R]\<^sub>R] \<and> tt2T xs = s \<and> X = {x. ttevt2F x \<in> R} \<and> [Tock]\<^sub>E \<notin> set xs \<and> ttWF(xs@[[R]\<^sub>R])"
using assms
proof (induct y arbitrary:s X rule:tt2F.induct)
case (1 X)
then show ?case by auto
next
case (2 e \<sigma>)
then obtain t Z where s_R:"Some (t, Z) = tt2F \<sigma>"
apply auto
by (meson "2.prems" Some_tt2F_event_tl)
then have "\<exists>xs R. \<sigma> = xs @ [[R]\<^sub>R] \<and> tt2T xs = t \<and> Z = {x. ttevt2F x \<in> R} \<and> [Tock]\<^sub>E \<notin> set xs \<and> ttWF(xs@[[R]\<^sub>R])"
using 2 by auto
then have "\<exists>xs R. [Event e]\<^sub>E # \<sigma> = [Event e]\<^sub>E # xs @ [[R]\<^sub>R] \<and> tt2T ([Event e]\<^sub>E # xs @ [[R]\<^sub>R]) = evt e # t \<and> Z = {x. ttevt2F x \<in> R} \<and> [Tock]\<^sub>E \<notin> set ([Event e]\<^sub>E # xs) \<and> ttWF ([Event e]\<^sub>E # xs @ [[R]\<^sub>R])"
apply auto
using ttWF_prefix_is_ttWF Some_tt2F_imp_tt2T' s_R by blast
then show ?case
proof -
obtain tts :: "'a ttobs list" and TT :: "'a ttevent set" where
f1: "[Event e]\<^sub>E # \<sigma> = [Event e]\<^sub>E # tts @ [[TT]\<^sub>R] \<and> tt2T ([Event e]\<^sub>E # tts @ [[TT]\<^sub>R]) = evt e # t \<and> Z = {e. ttevt2F e \<in> TT} \<and> [Tock]\<^sub>E \<notin> set ([Event e]\<^sub>E # tts) \<and> ttWF ([Event e]\<^sub>E # tts @ [[TT]\<^sub>R])"
using \<open>\<exists>xs R. [Event e]\<^sub>E # \<sigma> = [Event e]\<^sub>E # xs @ [[R]\<^sub>R] \<and> tt2T ([Event e]\<^sub>E # xs @ [[R]\<^sub>R]) = evt e # t \<and> Z = {x. ttevt2F x \<in> R} \<and> [Tock]\<^sub>E \<notin> set ([Event e]\<^sub>E # xs) \<and> ttWF ([Event e]\<^sub>E # xs @ [[R]\<^sub>R])\<close> by blast
have f2: "\<forall>es E ts. (Some (es, E) \<noteq> tt2F ts \<or> \<not> ttWF ts) \<or> (\<exists>tsa T. ts = tsa @ [[T]\<^sub>R] \<and> E = {e. ttevt2F (e::'a evt) \<in> T} \<and> tt2T tsa = es)"
by (simp add: some_tt2F_ref_trace)
obtain ttsa :: "'a ttobs list \<Rightarrow> 'a evt set \<Rightarrow> 'a evt list \<Rightarrow> 'a ttobs list" and TTa :: "'a ttobs list \<Rightarrow> 'a evt set \<Rightarrow> 'a evt list \<Rightarrow> 'a ttevent set" where
"\<forall>x0 x1 x2. (\<exists>v3 v4. x0 = v3 @ [[v4]\<^sub>R] \<and> x1 = {uua. ttevt2F uua \<in> v4} \<and> tt2T v3 = x2) = (x0 = ttsa x0 x1 x2 @ [[TTa x0 x1 x2]\<^sub>R] \<and> x1 = {uua. ttevt2F uua \<in> TTa x0 x1 x2} \<and> tt2T (ttsa x0 x1 x2) = x2)"
by moura
then have f3: "[Event e]\<^sub>E # tts @ [[TT]\<^sub>R] = ttsa ([Event e]\<^sub>E # tts @ [[TT]\<^sub>R]) X s @ [[TTa ([Event e]\<^sub>E # tts @ [[TT]\<^sub>R]) X s]\<^sub>R] \<and> X = {ea. ttevt2F ea \<in> TTa ([Event e]\<^sub>E # tts @ [[TT]\<^sub>R]) X s} \<and> tt2T (ttsa ([Event e]\<^sub>E # tts @ [[TT]\<^sub>R]) X s) = s"
using f2 f1 "2.prems" by presburger
then have "[Tock]\<^sub>E \<notin> set (ttsa ([Event e]\<^sub>E # tts @ [[TT]\<^sub>R]) X s)"
using f1 by simp
then show ?thesis
using f3 f1 by metis
qed
next
case "3_1"
then show ?case by auto
next
case ("3_2" va)
then show ?case by auto
next
case ("3_3" va)
then show ?case by auto
next
case ("3_4" vb vc)
then show ?case by auto
next
case ("3_5" vb vc)
then show ?case by auto
next
case ("3_6" va vb vc)
then show ?case by auto
qed
lemma
assumes "Some (s, b) = tt2F (xs@[[X]\<^sub>R])"
shows "s = tt2T xs \<and> b = {x. ttevt2F x \<in> X}"
using assms
using Some_tt2F_concat_refusal by force
lemma tt2F_Some_concat_Nil:
assumes "[] = tt2T xs" "Some (s, b) = tt2F (xs@[[X]\<^sub>R])"
shows "xs = []"
using assms
by (induct xs rule:ttWF.induct, auto)
lemma ttWF_Some_tt2F:
assumes "ttWF (xs@[[X]\<^sub>R])" "[Tock]\<^sub>E \<notin> set xs"
shows "Some (tt2T xs, {x. ttevt2F x \<in> X}) = tt2F (xs@[[X]\<^sub>R])"
using assms
apply (induct xs, auto)
apply (case_tac a, auto)
apply (case_tac x1, auto)
apply (smt fst_conv option.simps(5) snd_conv)
apply (metis list.exhaust_sel option.distinct(1) tt2F.simps(3) ttWF.simps(1) ttWF.simps(8))
by (case_tac xsa, auto, case_tac a, auto, case_tac x1, auto)
lemma Some_tt2F_subset:
assumes "Some (s, b \<union> HS) = tt2F y"
shows "\<exists>z. Some (s, b) = tt2F z \<and> z \<lesssim>\<^sub>C y"
proof -
obtain xs X where xs_X:"y = xs@[[X]\<^sub>R] \<and> b \<union> HS = {x. ttevt2F x \<in> X} \<and> [Tock]\<^sub>E \<notin> set xs \<and> ttWF(xs@[[X]\<^sub>R])"
using Some_tt2F_concat_refusal assms by blast
then have "ttevt2F`(b \<union> HS) \<subseteq> X"
by auto
then have "xs@[[ttevt2F`b]\<^sub>R] \<lesssim>\<^sub>C xs@[[X]\<^sub>R]"
apply auto
by (simp add: image_Un tt_prefix_common_concat)
then have "Some (tt2T xs, b \<union> HS) = tt2F (xs@[[X]\<^sub>R])"
apply auto
using Some_tt2F_concat_refusal assms xs_X by blast
have "ttWF (xs@[[ttevt2F`b]\<^sub>R])"
using \<open>xs @ [[ttevt2F ` b]\<^sub>R] \<lesssim>\<^sub>C xs @ [[X]\<^sub>R]\<close> tt_prefix_subset_ttWF xs_X by blast
have Tock_not_in_xs_b:"[Tock]\<^sub>E \<notin> set (xs@[[ttevt2F`b]\<^sub>R])"
by (simp add: xs_X)
have b_ttevt2F:"b = {x. ttevt2F x \<in> ttevt2F`b}"
using Some_tt2F_set by fastforce
then have "Some (tt2T xs, b) = tt2F (xs@[[ttevt2F`b]\<^sub>R])"
using Tock_not_in_xs_b ttWF_Some_tt2F b_ttevt2F
using \<open>ttWF (xs @ [[ttevt2F ` b]\<^sub>R])\<close> by fastforce
then show ?thesis
by (metis Pair_inject \<open>Some (tt2T xs, b \<union> HS) = tt2F (xs @ [[X]\<^sub>R])\<close> \<open>xs @ [[ttevt2F ` b]\<^sub>R] \<lesssim>\<^sub>C xs @ [[X]\<^sub>R]\<close> assms option.inject xs_X)
qed
lemma Some_no_tick_trace[simp]:
assumes "Some (a, b) = tt2F y"
shows "tick \<notin> set a"
using assms apply (induct a arbitrary:b y, auto)
using Some_no_tt2F_tick apply blast
using Some_tt2F_tail by blast
lemma tt2T_concat_dist:
assumes "[Tick]\<^sub>E \<notin> set s" "[Tock]\<^sub>E \<notin> set s" "\<not>(\<exists>R. [R]\<^sub>R \<in> set s)"
shows "tt2T (s @ t) = (tt2T s) @ (tt2T t)"
using assms apply (induct s arbitrary: t, auto)
apply (case_tac a, auto)
by (case_tac x1, auto)
lemma Some_tt2F_no_prev_refusals:
assumes "Some (a, b) = tt2F (s @ [[R]\<^sub>R])"
shows "\<not>(\<exists>R. [R]\<^sub>R \<in> set s)"
using assms apply (induct s arbitrary:a b R, auto)
apply (metis list.exhaust_sel option.distinct(1) snoc_eq_iff_butlast tt2F.simps(8))
by (metis (no_types, hide_lams) Some_tt2F_tail append_Cons append_Nil list.sel(3) neq_Nil_conv tt2F_some_exists)
lemma tt2T_tick_butlast:
assumes "s @ [tick] = tt2T y"
shows "tt2T (butlast y) = s"
using assms apply (induct y arbitrary:s, auto)
apply (case_tac a, auto)
apply (case_tac x1, auto)
apply (case_tac a, auto)
apply (case_tac x1, auto)
apply (metis (no_types, lifting) append_eq_Cons_conv evt.distinct(1) list.inject)
by (metis list.exhaust_sel snoc_eq_iff_butlast tt2T.simps(7))
lemma tt2T_tick_exists_Cons:
assumes "s @ [tick] = tt2T y"
shows "\<exists>z. z@[[Tick]\<^sub>E] = y"
using assms apply (induct y arbitrary:s, auto)
apply (case_tac a, auto)
apply (case_tac x1, auto)
apply (metis Cons_eq_append_conv evt.distinct(1) list.inject)
by (metis append_Nil list.exhaust_sel snoc_eq_iff_butlast tt2T.simps(7))
lemma
assumes "s @ [tick] = tt2T (z @ [[Tick]\<^sub>E])"
shows "s = tt2T z"
using assms
using tt2T_tick_butlast by fastforce
lemma tick_tt2T_concat_TickE[intro?]:
assumes "[tick] = tt2T (za @ [[Tick]\<^sub>E])"
shows "za = []"
using assms apply (induct za, auto)
apply (case_tac a, auto)
apply (case_tac x1, auto)
by (metis list.distinct(1) list.exhaust_sel snoc_eq_iff_butlast tt2T.simps(7))
lemma Some_concat_extend:
assumes "Some (t, b) = tt2F ya" "[Tick]\<^sub>E \<notin> set z" "[Tock]\<^sub>E \<notin> set z" "\<not>(\<exists>R. [R]\<^sub>R \<in> set z)" (* *)
shows "Some (tt2T z @ t, b) = tt2F (z @ ya)"
using assms apply (induct z arbitrary:t ya b rule:tt2F.induct , auto)
by (smt fst_conv option.simps(5) snd_conv)
lemma tt2T_concat_Tick_no_Tick_set:
assumes "s @ [tick] = tt2T (z @ [[Tick]\<^sub>E])"
shows "[Tick]\<^sub>E \<notin> set z"
using assms apply (induct z arbitrary:s, auto)
apply (metis list.exhaust_sel snoc_eq_iff_butlast tt2T.simps(7))
apply (case_tac a, auto)
apply (case_tac x1, auto)
apply (metis append_Nil evt.distinct(1) list.sel(1) list.sel(3) tl_append2)
by (metis list.exhaust_sel snoc_eq_iff_butlast tt2T.simps(7))
lemma tt2T_concat_Tick_no_Ref_set:
assumes "s @ [tick] = tt2T (z @ [[Tick]\<^sub>E])"
shows "\<not>(\<exists>R. [R]\<^sub>R \<in> set z)"
using assms apply (induct z arbitrary:s, auto)
apply (case_tac a, auto)
apply (case_tac x1, auto)
apply (metis append_Nil evt.distinct(1) list.sel(1) list.sel(3) tl_append2)
by (metis list.exhaust_sel snoc_eq_iff_butlast tt2T.simps(7))
lemma tt2T_concat_Tick_no_Tock_set:
assumes "s @ [tick] = tt2T (z @ [[Tick]\<^sub>E])"
shows "[Tock]\<^sub>E \<notin> set z"
using assms apply (induct z arbitrary:s, auto)
apply (case_tac a, auto)
apply (case_tac x1, auto)
apply (metis append_Nil evt.distinct(1) list.sel(1) list.sel(3) tl_append2)
by (metis list.exhaust_sel snoc_eq_iff_butlast tt2T.simps(7))
lemma Some_concat_extend':
assumes "Some (t, b) = tt2F ya" "s @ [tick] = tt2T (z @ [[Tick]\<^sub>E])"
shows "Some (tt2T z @ t, b) = tt2F (z @ ya)"
using assms Some_concat_extend tt2T_concat_Tick_no_Tick_set tt2T_concat_Tick_no_Ref_set tt2T_concat_Tick_no_Tock_set
by blast
lemma Tick_no_eq:
assumes "[Tick]\<^sub>E \<notin> set y"
shows "\<forall>s. y \<noteq> s @ [[Tick]\<^sub>E]"
using assms by (induct y rule:rev_induct, auto)
lemma Tick_set_tt2T_in:
assumes "tick \<in> set (tt2T y)"
shows "[Tick]\<^sub>E \<in> set y"
using assms apply (induct y, auto)
apply (case_tac a, auto)
by (case_tac x1, auto)
lemma Tick_set_ends_in_Tick:
assumes "[Tick]\<^sub>E \<in> set y" "ttWF y"
shows "\<exists>xs. y = xs@[[Tick]\<^sub>E]"
using assms apply (induct y, auto)
using ttWF.elims(2) apply auto[1]
by (metis append_Cons append_Nil list.exhaust_sel split_list ttWF.simps(8) ttWF_dist_notTock_cons ttevent.distinct(5))
lemma Tock_in_trace_Tick_no_Tick:
assumes "[Tock]\<^sub>E \<in> set s" "ttWF (s @ [[Tick]\<^sub>E])"
shows "tick \<notin> set (tt2T (s @ t))"
using assms by (induct s rule:tt2T.induct, auto)
lemma Tock_in_trace_Refusal_no_Tick:
assumes "(\<exists>R. [R]\<^sub>R \<in> set s)" "ttWF (s @ [[Tick]\<^sub>E])"
shows "tick \<notin> set (tt2T (s @ t))"
using assms by (induct s rule:tt2T.induct, auto)
lemma Tock_in_concat_lhs:
assumes "[Tock]\<^sub>E \<in> set s"
shows "tt2T (s @ t) = tt2T s"
using assms by (induct s rule:tt2T.induct, auto)
lemma Ref_in_concat_lhs:
assumes "(\<exists>R. [R]\<^sub>R \<in> set s)"
shows "tt2T (s @ t) = tt2T s"
using assms by (induct s rule:tt2T.induct, auto)
fun F2tt_trace :: "'a failure \<Rightarrow> 'a tttrace set" where
"F2tt_trace ([], X) = {[[ttevt2F ` X]\<^sub>R], [[(ttevt2F ` X) \<union> {Tock}]\<^sub>R]}" |
"F2tt_trace (e # t, X) = {s. \<exists>s'. s = [ttevt2F e]\<^sub>E # s' \<and> s' \<in> F2tt_trace (t, X)}"
definition "F2tt" :: "'a process \<Rightarrow> 'a ttprocess" where
"F2tt P = \<Union>(F2tt_trace ` (fst P)) \<union> map (\<lambda>e. [ttevt2F e]\<^sub>E) ` (snd P)"
lemma F2tt_ttproc2F_no_tocks:
assumes P_no_tock: "\<forall>t\<in>P. [Tock]\<^sub>E \<notin> set t" and P_wf: "\<forall>x\<in>P. ttWF x" and TT1_P: "TT1 P" and TT2_P: "TT2 P"
shows "P = F2tt (ttproc2F P)"
unfolding F2tt_def ttproc2F_def image_def
proof auto
fix x :: "'a tttrace"
have "\<And>P. ttWF x \<Longrightarrow> [Tock]\<^sub>E \<notin> set x \<Longrightarrow> x \<in> P \<Longrightarrow> \<forall>xa. (\<forall>a b. (\<forall>y. Some (a, b) = tt2F y \<longrightarrow> y \<notin> P) \<or> xa \<noteq> F2tt_trace (a, b)) \<or> x \<notin> xa \<Longrightarrow>
\<exists>xa. (\<exists>y. xa = tt2T y \<and> y \<in> P) \<and> x = map (\<lambda>e. [ttevt2F e]\<^sub>E) xa"
proof (induct x rule:ttWF.induct, auto)
fix P :: "'a ttprocess"
show "[] \<in> P \<Longrightarrow> \<exists>y. [] = tt2T y \<and> y \<in> P"
by (rule_tac x="[]" in exI, auto)
next
fix X and P :: "'a ttprocess"
show "[[X]\<^sub>R] \<in> P \<Longrightarrow>
\<forall>xa. (\<forall>a b. (\<forall>y. Some (a, b) = tt2F y \<longrightarrow> y \<notin> P) \<or> xa \<noteq> F2tt_trace (a, b)) \<or> [[X]\<^sub>R] \<notin> xa \<Longrightarrow>
\<exists>xa. (\<exists>y. xa = tt2T y \<and> y \<in> P) \<and> [[X]\<^sub>R] = map (\<lambda>e. [ttevt2F e]\<^sub>E) xa"
apply (erule_tac x="{[[X\<union>{Tock}]\<^sub>R], [[{e\<in>X. e \<noteq> Tock}]\<^sub>R]}" in allE, auto)
proof (erule_tac x="[]" in allE, erule_tac x="{e. ttevt2F e \<in> X}" in allE, safe, simp_all)
have "insert Tock X = insert Tock (ttevt2F ` {e. ttevt2F e \<in> X})"
by (auto, smt image_eqI mem_Collect_eq ttevent.exhaust ttevt2F.simps(1) ttevt2F.simps(2))
then show "insert Tock X = ttevt2F ` {e. ttevt2F e \<in> X} \<or> insert Tock X = insert Tock (ttevt2F ` {e. ttevt2F e \<in> X})"
by auto
next
have "{e \<in> X. e \<noteq> Tock} = ttevt2F ` {e. ttevt2F e \<in> X}"
apply (auto, metis (no_types, lifting) image_iff mem_Collect_eq ttevent.exhaust ttevt2F.simps(1) ttevt2F.simps(2))
by (metis evt.exhaust ttevent.distinct(1) ttevent.distinct(5) ttevt2F.simps(1) ttevt2F.simps(2))
then show "{e \<in> X. e \<noteq> Tock} = ttevt2F ` {e. ttevt2F e \<in> X} \<or> {e \<in> X. e \<noteq> Tock} = insert Tock (ttevt2F ` {e. ttevt2F e \<in> X})"
by auto
next
fix x
assume "x = [[ttevt2F ` {e. ttevt2F e \<in> X}]\<^sub>R] \<or> x = [[insert Tock (ttevt2F ` {e. ttevt2F e \<in> X})]\<^sub>R]"
then show "x \<noteq> [[insert Tock X]\<^sub>R] \<Longrightarrow> x = [[{e \<in> X. e \<noteq> Tock}]\<^sub>R]"
by (auto, (smt image_iff mem_Collect_eq ttevent.exhaust ttevt2F.simps(1) ttevt2F.simps(2))+)
qed
next
fix e \<sigma> and P :: "'a ttprocess"
assume case_assms: "ttWF \<sigma>" "[Event e]\<^sub>E # \<sigma> \<in> P"
assume ind_hyp: "\<And>P. \<sigma> \<in> P \<Longrightarrow>
\<forall>xa. (\<forall>a b. (\<forall>y. Some (a, b) = tt2F y \<longrightarrow> y \<notin> P) \<or> xa \<noteq> F2tt_trace (a, b)) \<or> \<sigma> \<notin> xa \<Longrightarrow>
\<exists>xa. (\<exists>y. xa = tt2T y \<and> y \<in> P) \<and> \<sigma> = map (\<lambda>e. [ttevt2F e]\<^sub>E) xa"
assume "\<forall>xa. (\<forall>a b. (\<forall>y. Some (a, b) = tt2F y \<longrightarrow> y \<notin> P) \<or> xa \<noteq> F2tt_trace (a, b)) \<or> [Event e]\<^sub>E # \<sigma> \<notin> xa"
then have "\<forall>xa. (\<forall>a b. (\<forall>y. Some (a, b) = tt2F y \<longrightarrow> y \<notin> {t. [Event e]\<^sub>E # t \<in> P}) \<or> xa \<noteq> F2tt_trace (a, b)) \<or> \<sigma> \<notin> xa"
apply (auto, erule_tac x="F2tt_trace (evt e # a, b)" in allE, auto)
apply (erule_tac x="evt e # a" in allE, erule_tac x=b in allE, auto)
by (erule_tac x="[Event e]\<^sub>E # y" in allE, auto, case_tac "tt2F y", auto)
then have "\<exists>xa. (\<exists>y. xa = tt2T y \<and> y \<in> {t. [Event e]\<^sub>E # t \<in> P}) \<and> \<sigma> = map (\<lambda>e. [ttevt2F e]\<^sub>E) xa"
using ind_hyp[where P="{t. [Event e]\<^sub>E # t \<in> P}"] case_assms by auto
then show "\<exists>xa. (\<exists>y. xa = tt2T y \<and> y \<in> P) \<and> [Event e]\<^sub>E # \<sigma> = map (\<lambda>e. [ttevt2F e]\<^sub>E) xa"
by auto
qed
then show "x \<in> P \<Longrightarrow>
\<forall>xa. (\<forall>a b. (\<forall>y. Some (a, b) = tt2F y \<longrightarrow> y \<notin> P) \<or> xa \<noteq> F2tt_trace (a, b)) \<or> x \<notin> xa \<Longrightarrow>
\<exists>xa. (\<exists>y. xa = tt2T y \<and> y \<in> P) \<and> x = map (\<lambda>e. [ttevt2F e]\<^sub>E) xa"
by (simp add: P_no_tock P_wf)
next
fix a b and x y :: "'a tttrace"
have "\<And> P x a. ttWF y \<Longrightarrow> y @ [[Tock]\<^sub>E] \<notin> P \<Longrightarrow> TT1 P \<Longrightarrow> TT2 P \<Longrightarrow> x \<in> F2tt_trace (a, b) \<Longrightarrow>
Some (a, b) = tt2F y \<Longrightarrow> y \<in> P \<Longrightarrow> x \<in> P"
proof (induct y rule:ttWF.induct, auto)
fix X and P :: "'a ttprocess"
show "TT1 P \<Longrightarrow> [[X]\<^sub>R] \<in> P \<Longrightarrow> [[ttevt2F ` {x. ttevt2F x \<in> X}]\<^sub>R] \<in> P"
unfolding TT1_def apply auto
by (metis (no_types, lifting) image_Collect_subsetI tt_prefix_subset.simps(1) tt_prefix_subset.simps(2))
next
fix X and P :: "'a ttprocess"
assume "TT2 P" "[[X]\<^sub>R] \<in> P" "[[X]\<^sub>R, [Tock]\<^sub>E] \<notin> P"
then have "[[insert Tock X]\<^sub>R] \<in> P"
unfolding TT2_def
apply (erule_tac x="[]" in allE, erule_tac x="[]" in allE)
by (erule_tac x=X in allE, erule_tac x="{Tock}" in allE, auto)
also have "insert Tock (ttevt2F ` {x. ttevt2F x \<in> X}) = insert Tock X"
unfolding image_def by (auto, case_tac x, auto, metis ttevt2F.simps(1), metis ttevt2F.simps(2))
then show "[[insert Tock (ttevt2F ` {x. ttevt2F x \<in> X})]\<^sub>R] \<in> P"
using calculation by auto
next
fix e \<sigma> x a and P :: "'a ttprocess"
assume case_assms: "ttWF \<sigma>" "[Event e]\<^sub>E # \<sigma> @ [[Tock]\<^sub>E] \<notin> P" "TT1 P" "TT2 P" "x \<in> F2tt_trace (a, b)"
"Some (a, b) = (case tt2F \<sigma> of None \<Rightarrow> None | Some fl \<Rightarrow> Some (evt e # fst fl, snd fl))" "[Event e]\<^sub>E # \<sigma> \<in> P"
assume ind_hyp: "\<And>P x a. \<sigma> @ [[Tock]\<^sub>E] \<notin> P \<Longrightarrow> TT1 P \<Longrightarrow> TT2 P \<Longrightarrow> x \<in> F2tt_trace (a, b) \<Longrightarrow> Some (a, b) = tt2F \<sigma> \<Longrightarrow> \<sigma> \<in> P \<Longrightarrow> x \<in> P"
obtain a' where a'_assms: "Some (a', b) = tt2F \<sigma> \<and> a = evt e # a'"
using case_assms(6) by (cases "tt2F \<sigma>", safe, simp_all)
obtain x' where x'_assms: "x = [Event e]\<^sub>E # x' \<and> x' \<in> F2tt_trace (a', b)"
using case_assms(5) a'_assms by auto
thm ind_hyp[where P="{t. [Event e]\<^sub>E # t \<in> P}", where x=x, where a=a']
have 1: "\<sigma> @ [[Tock]\<^sub>E] \<notin> {t. [Event e]\<^sub>E # t \<in> P}"
using case_assms(2) by blast
have 2: "TT1 {t. [Event e]\<^sub>E # t \<in> P}"
by (simp add: TT1_init_event case_assms(3))
have 3: "TT2 {t. [Event e]\<^sub>E # t \<in> P}"
by (simp add: TT2_init_event case_assms(4))
have "x' \<in> {t. [Event e]\<^sub>E # t \<in> P}"
using ind_hyp[where P="{t. [Event e]\<^sub>E # t \<in> P}"] 1 2 3 case_assms x'_assms a'_assms by auto
then show "x \<in> P"
using x'_assms by blast
qed
then show "x \<in> F2tt_trace (a, b) \<Longrightarrow> Some (a, b) = tt2F y \<Longrightarrow> y \<in> P \<Longrightarrow> x \<in> P"
by (meson P_no_tock P_wf TT1_P TT2_P in_set_conv_decomp)
next
fix y :: "'a tttrace"
have "ttWF y \<Longrightarrow> map (\<lambda>e. [ttevt2F e]\<^sub>E) (tt2T y) \<lesssim>\<^sub>C y"
by (induct y rule:ttWF.induct, auto)
then show "y \<in> P \<Longrightarrow> map (\<lambda>e. [ttevt2F e]\<^sub>E) (tt2T y) \<in> P"
using P_wf TT1_P TT1_def by blast
qed
lemma ttproc2F_eq_no_tocks_imp_F2tt_eq:
assumes "ttproc2F P = Q"
assumes "\<forall>t\<in>P. [Tock]\<^sub>E \<notin> set t" "\<forall>x\<in>P. ttWF x" "TT1 P" "TT2 P"
shows "P = F2tt Q"
using assms F2tt_ttproc2F_no_tocks by auto
end |
{-# OPTIONS --allow-unsolved-metas #-}
module FLutil where
open import Level hiding ( suc ; zero )
open import Data.Fin hiding ( _<_ ; _≤_ ; _-_ ; _+_ ; _≟_)
open import Data.Fin.Properties hiding ( <-trans ; ≤-refl ; ≤-trans ; ≤-irrelevant ; _≟_ ) renaming ( <-cmp to <-fcmp )
open import Data.Fin.Permutation -- hiding ([_,_])
open import Data.Nat -- using (ℕ; suc; zero; s≤s ; z≤n )
open import Data.Nat.Properties as DNP
open import Relation.Binary.PropositionalEquality hiding ( [_] )
open import Data.List using (List; []; _∷_ ; length ; _++_ ; tail ) renaming (reverse to rev )
open import Data.Product
open import Relation.Nullary
open import Data.Empty
open import Relation.Binary.Core
open import Relation.Binary.Definitions
open import logic
open import nat
infixr 100 _::_
data FL : (n : ℕ )→ Set where
f0 : FL 0
_::_ : { n : ℕ } → Fin (suc n ) → FL n → FL (suc n)
data _f<_ : {n : ℕ } (x : FL n ) (y : FL n) → Set where
f<n : {m : ℕ } {xn yn : Fin (suc m) } {xt yt : FL m} → xn Data.Fin.< yn → (xn :: xt) f< ( yn :: yt )
f<t : {m : ℕ } {xn : Fin (suc m) } {xt yt : FL m} → xt f< yt → (xn :: xt) f< ( xn :: yt )
FLeq : {n : ℕ } {xn yn : Fin (suc n)} {x : FL n } {y : FL n} → xn :: x ≡ yn :: y → ( xn ≡ yn ) × (x ≡ y )
FLeq refl = refl , refl
FLpos : {n : ℕ} → FL (suc n) → Fin (suc n)
FLpos (x :: _) = x
f-<> : {n : ℕ } {x : FL n } {y : FL n} → x f< y → y f< x → ⊥
f-<> (f<n x) (f<n x₁) = nat-<> x x₁
f-<> (f<n x) (f<t lt2) = nat-≡< refl x
f-<> (f<t lt) (f<n x) = nat-≡< refl x
f-<> (f<t lt) (f<t lt2) = f-<> lt lt2
f-≡< : {n : ℕ } {x : FL n } {y : FL n} → x ≡ y → y f< x → ⊥
f-≡< refl (f<n x) = nat-≡< refl x
f-≡< refl (f<t lt) = f-≡< refl lt
FLcmp : {n : ℕ } → Trichotomous {Level.zero} {FL n} _≡_ _f<_
FLcmp f0 f0 = tri≈ (λ ()) refl (λ ())
FLcmp (xn :: xt) (yn :: yt) with <-fcmp xn yn
... | tri< a ¬b ¬c = tri< (f<n a) (λ eq → nat-≡< (cong toℕ (proj₁ (FLeq eq)) ) a) (λ lt → f-<> lt (f<n a) )
... | tri> ¬a ¬b c = tri> (λ lt → f-<> lt (f<n c) ) (λ eq → nat-≡< (cong toℕ (sym (proj₁ (FLeq eq)) )) c) (f<n c)
... | tri≈ ¬a refl ¬c with FLcmp xt yt
... | tri< a ¬b ¬c₁ = tri< (f<t a) (λ eq → ¬b (proj₂ (FLeq eq) )) (λ lt → f-<> lt (f<t a) )
... | tri≈ ¬a₁ refl ¬c₁ = tri≈ (λ lt → f-≡< refl lt ) refl (λ lt → f-≡< refl lt )
... | tri> ¬a₁ ¬b c = tri> (λ lt → f-<> lt (f<t c) ) (λ eq → ¬b (proj₂ (FLeq eq) )) (f<t c)
f<-trans : {n : ℕ } { x y z : FL n } → x f< y → y f< z → x f< z
f<-trans {suc n} (f<n x) (f<n x₁) = f<n ( Data.Fin.Properties.<-trans x x₁ )
f<-trans {suc n} (f<n x) (f<t y<z) = f<n x
f<-trans {suc n} (f<t x<y) (f<n x) = f<n x
f<-trans {suc n} (f<t x<y) (f<t y<z) = f<t (f<-trans x<y y<z)
infixr 250 _f<?_
_f<?_ : {n : ℕ} → (x y : FL n ) → Dec (x f< y )
x f<? y with FLcmp x y
... | tri< a ¬b ¬c = yes a
... | tri≈ ¬a refl ¬c = no ( ¬a )
... | tri> ¬a ¬b c = no ( ¬a )
_f≤_ : {n : ℕ } (x : FL n ) (y : FL n) → Set
_f≤_ x y = (x ≡ y ) ∨ (x f< y )
FL0 : {n : ℕ } → FL n
FL0 {zero} = f0
FL0 {suc n} = zero :: FL0
fmax : { n : ℕ } → FL n
fmax {zero} = f0
fmax {suc n} = fromℕ< a<sa :: fmax {n}
fmax< : { n : ℕ } → {x : FL n } → ¬ (fmax f< x )
fmax< {suc n} {x :: y} (f<n lt) = nat-≤> (fmax1 x) lt where
fmax1 : {n : ℕ } → (x : Fin (suc n)) → toℕ x ≤ toℕ (fromℕ< {n} a<sa)
fmax1 {zero} zero = z≤n
fmax1 {suc n} zero = z≤n
fmax1 {suc n} (suc x) = s≤s (fmax1 x)
fmax< {suc n} {x :: y} (f<t lt) = fmax< {n} {y} lt
fmax¬ : { n : ℕ } → {x : FL n } → ¬ ( x ≡ fmax ) → x f< fmax
fmax¬ {zero} {f0} ne = ⊥-elim ( ne refl )
fmax¬ {suc n} {x} ne with FLcmp x fmax
... | tri< a ¬b ¬c = a
... | tri≈ ¬a b ¬c = ⊥-elim ( ne b)
... | tri> ¬a ¬b c = ⊥-elim (fmax< c)
x≤fmax : {n : ℕ } → {x : FL n} → x f≤ fmax
x≤fmax {n} {x} with FLcmp x fmax
... | tri< a ¬b ¬c = case2 a
... | tri≈ ¬a b ¬c = case1 b
... | tri> ¬a ¬b c = ⊥-elim ( fmax< c )
open import Data.Nat.Properties using ( ≤-trans ; <-trans )
fsuc : { n : ℕ } → (x : FL n ) → x f< fmax → FL n
fsuc {n} (x :: y) (f<n lt) = fromℕ< fsuc1 :: y where
fsuc1 : suc (toℕ x) < n
fsuc1 = Data.Nat.Properties.≤-trans (s≤s lt) ( s≤s ( toℕ≤pred[n] (fromℕ< a<sa)) )
fsuc (x :: y) (f<t lt) = x :: fsuc y lt
open import fin
flist1 : {n : ℕ } (i : ℕ) → i < suc n → List (FL n) → List (FL n) → List (FL (suc n))
flist1 zero i<n [] _ = []
flist1 zero i<n (a ∷ x ) z = ( zero :: a ) ∷ flist1 zero i<n x z
flist1 (suc i) (s≤s i<n) [] z = flist1 i (Data.Nat.Properties.<-trans i<n a<sa) z z
flist1 (suc i) i<n (a ∷ x ) z = ((fromℕ< i<n ) :: a ) ∷ flist1 (suc i) i<n x z
flist : {n : ℕ } → FL n → List (FL n)
flist {zero} f0 = f0 ∷ []
flist {suc n} (x :: y) = flist1 n a<sa (flist y) (flist y)
FL1 : List ℕ → List ℕ
FL1 [] = []
FL1 (x ∷ y) = suc x ∷ FL1 y
FL→plist : {n : ℕ} → FL n → List ℕ
FL→plist {0} f0 = []
FL→plist {suc n} (zero :: y) = zero ∷ FL1 (FL→plist y)
FL→plist {suc n} (suc x :: y) with FL→plist y
... | [] = zero ∷ []
... | x1 ∷ t = suc x1 ∷ FL2 x t where
FL2 : {n : ℕ} → Fin n → List ℕ → List ℕ
FL2 zero y = zero ∷ FL1 y
FL2 (suc i) [] = zero ∷ []
FL2 (suc i) (x ∷ y) = suc x ∷ FL2 i y
tt0 = (# 2) :: (# 1) :: (# 0) :: zero :: f0
tt1 = FL→plist tt0
open _∧_
find-zero : {n i : ℕ} → List ℕ → i < n → Fin n ∧ List ℕ
find-zero [] i<n = record { proj1 = fromℕ< i<n ; proj2 = [] }
find-zero x (s≤s z≤n) = record { proj1 = fromℕ< (s≤s z≤n) ; proj2 = x }
find-zero (zero ∷ y) (s≤s (s≤s i<n)) = record { proj1 = fromℕ< (s≤s (s≤s i<n)) ; proj2 = y }
find-zero (suc x ∷ y) (s≤s (s≤s i<n)) with find-zero y (s≤s i<n)
... | record { proj1 = i ; proj2 = y1 } = record { proj1 = suc i ; proj2 = suc x ∷ y1 }
plist→FL : {n : ℕ} → List ℕ → FL n -- wrong implementation
plist→FL {zero} [] = f0
plist→FL {suc n} [] = zero :: plist→FL {n} []
plist→FL {zero} x = f0
plist→FL {suc n} x with find-zero x a<sa
... | record { proj1 = i ; proj2 = y } = i :: plist→FL y
tt2 = 2 ∷ 1 ∷ 0 ∷ 3 ∷ []
tt3 : FL 4
tt3 = plist→FL tt2
tt4 = FL→plist tt3
tt5 = plist→FL {4} (FL→plist tt0)
-- maybe FL→iso can be easier using this ...
-- FL→plist-iso : {n : ℕ} → (f : FL n ) → plist→FL (FL→plist f ) ≡ f
-- FL→plist-iso = {!!}
-- FL→plist-inject : {n : ℕ} → (f g : FL n ) → FL→plist f ≡ FL→plist g → f ≡ g
-- FL→plist-inject = {!!}
open import Relation.Binary as B hiding (Decidable; _⇔_)
open import Data.Sum.Base as Sum -- inj₁
open import Relation.Nary using (⌊_⌋)
open import Data.List.Fresh hiding ([_])
FList : (n : ℕ ) → Set
FList n = List# (FL n) ⌊ _f<?_ ⌋
fr1 : FList 3
fr1 =
((# 0) :: ((# 0) :: ((# 0 ) :: f0))) ∷#
((# 0) :: ((# 1) :: ((# 0 ) :: f0))) ∷#
((# 1) :: ((# 0) :: ((# 0 ) :: f0))) ∷#
((# 2) :: ((# 0) :: ((# 0 ) :: f0))) ∷#
((# 2) :: ((# 1) :: ((# 0 ) :: f0))) ∷#
[]
open import Data.Product
open import Relation.Nullary.Decidable hiding (⌊_⌋)
-- open import Data.Bool hiding (_<_ ; _≤_ )
open import Data.Unit.Base using (⊤ ; tt)
-- fresh a [] = ⊤
-- fresh a (x ∷# xs) = R a x × fresh a xs
-- toWitness
-- ttf< : {n : ℕ } → {x a : FL n } → x f< a → T (isYes (x f<? a))
-- ttf< {n} {x} {a} x<a with x f<? a
-- ... | yes y = subst (λ k → Data.Bool.T k ) refl tt
-- ... | no nn = ⊥-elim ( nn x<a )
ttf : {n : ℕ } {x a : FL (n)} → x f< a → (y : FList (n)) → fresh (FL (n)) ⌊ _f<?_ ⌋ a y → fresh (FL (n)) ⌊ _f<?_ ⌋ x y
ttf _ [] fr = Level.lift tt
ttf {_} {x} {a} lt (cons a₁ y x1) (lift lt1 , x2 ) = (Level.lift (fromWitness (ttf1 lt1 lt ))) , ttf (ttf1 lt1 lt) y x1 where
ttf1 : True (a f<? a₁) → x f< a → x f< a₁
ttf1 t x<a = f<-trans x<a (toWitness t)
-- by https://gist.github.com/aristidb/1684202
FLinsert : {n : ℕ } → FL n → FList n → FList n
FLfresh : {n : ℕ } → (a x : FL (suc n) ) → (y : FList (suc n) ) → a f< x
→ fresh (FL (suc n)) ⌊ _f<?_ ⌋ a y → fresh (FL (suc n)) ⌊ _f<?_ ⌋ a (FLinsert x y)
FLinsert {zero} f0 y = f0 ∷# []
FLinsert {suc n} x [] = x ∷# []
FLinsert {suc n} x (cons a y x₁) with FLcmp x a
... | tri≈ ¬a b ¬c = cons a y x₁
... | tri< lt ¬b ¬c = cons x ( cons a y x₁) ( Level.lift (fromWitness lt ) , ttf lt y x₁)
FLinsert {suc n} x (cons a [] x₁) | tri> ¬a ¬b lt = cons a ( x ∷# [] ) ( Level.lift (fromWitness lt) , Level.lift tt )
FLinsert {suc n} x (cons a y yr) | tri> ¬a ¬b a<x = cons a (FLinsert x y) (FLfresh a x y a<x yr )
FLfresh a x [] a<x (Level.lift tt) = Level.lift (fromWitness a<x) , Level.lift tt
FLfresh a x (cons b [] (Level.lift tt)) a<x (Level.lift a<b , a<y) with FLcmp x b
... | tri< x<b ¬b ¬c = Level.lift (fromWitness a<x) , Level.lift a<b , Level.lift tt
... | tri≈ ¬a refl ¬c = Level.lift (fromWitness a<x) , Level.lift tt
... | tri> ¬a ¬b b<x = Level.lift a<b , Level.lift (fromWitness (f<-trans (toWitness a<b) b<x)) , Level.lift tt
FLfresh a x (cons b y br) a<x (Level.lift a<b , a<y) with FLcmp x b
... | tri< x<b ¬b ¬c = Level.lift (fromWitness a<x) , Level.lift a<b , ttf (toWitness a<b) y br
... | tri≈ ¬a refl ¬c = Level.lift (fromWitness a<x) , ttf a<x y br
FLfresh a x (cons b [] br) a<x (Level.lift a<b , a<y) | tri> ¬a ¬b b<x =
Level.lift a<b , Level.lift (fromWitness (f<-trans (toWitness a<b) b<x)) , Level.lift tt
FLfresh a x (cons b (cons a₁ y x₁) br) a<x (Level.lift a<b , a<y) | tri> ¬a ¬b b<x =
Level.lift a<b , FLfresh a x (cons a₁ y x₁) a<x a<y
fr6 = FLinsert ((# 1) :: ((# 1) :: ((# 0 ) :: f0))) fr1
open import Data.List.Fresh.Relation.Unary.Any
open import Data.List.Fresh.Relation.Unary.All
x∈FLins : {n : ℕ} → (x : FL n ) → (xs : FList n) → Any (x ≡_) (FLinsert x xs)
x∈FLins {zero} f0 [] = here refl
x∈FLins {zero} f0 (cons f0 xs x) = here refl
x∈FLins {suc n} x [] = here refl
x∈FLins {suc n} x (cons a xs x₁) with FLcmp x a
... | tri< x<a ¬b ¬c = here refl
... | tri≈ ¬a b ¬c = here b
x∈FLins {suc n} x (cons a [] x₁) | tri> ¬a ¬b a<x = there ( here refl )
x∈FLins {suc n} x (cons a (cons a₁ xs x₂) x₁) | tri> ¬a ¬b a<x = there ( x∈FLins x (cons a₁ xs x₂) )
nextAny : {n : ℕ} → {x h : FL n } → {L : FList n} → {hr : fresh (FL n) ⌊ _f<?_ ⌋ h L } → Any (x ≡_) L → Any (x ≡_) (cons h L hr )
nextAny (here x₁) = there (here x₁)
nextAny (there any) = there (there any)
insAny : {n : ℕ} → {x h : FL n } → (xs : FList n) → Any (x ≡_) xs → Any (x ≡_) (FLinsert h xs)
insAny {zero} {f0} {f0} (cons a L xr) (here refl) = here refl
insAny {zero} {f0} {f0} (cons a L xr) (there any) = insAny {zero} {f0} {f0} L any
insAny {suc n} {x} {h} (cons a L xr) any with FLcmp h a
... | tri< x<a ¬b ¬c = there any
... | tri≈ ¬a b ¬c = any
insAny {suc n} {a} {h} (cons a [] (Level.lift tt)) (here refl) | tri> ¬a ¬b c = here refl
insAny {suc n} {x} {h} (cons a (cons a₁ L x₁) xr) (here refl) | tri> ¬a ¬b c = here refl
insAny {suc n} {x} {h} (cons a (cons a₁ L x₁) xr) (there any) | tri> ¬a ¬b c = there (insAny (cons a₁ L x₁) any)
-- FLinsert membership
module FLMB { n : ℕ } where
FL-Setoid : Setoid Level.zero Level.zero
FL-Setoid = record { Carrier = FL n ; _≈_ = _≡_ ; isEquivalence = record { sym = sym ; refl = refl ; trans = trans }}
open import Data.List.Fresh.Membership.Setoid FL-Setoid
FLinsert-mb : (x : FL n ) → (xs : FList n) → x ∈ FLinsert x xs
FLinsert-mb x xs = x∈FLins {n} x xs
|
section\<open>Collapsing the levels\<close>
theory Level_Collapse
imports Conc_Impl
begin
text\<open>
The theory up to this point is implemented in a way that separated the different aspects into different levels.
This is highly beneficial for us, since it allows us to tackle the difficulties arising in small chunks.
However, exporting this to the user would be highly impractical.
Thus, this theory collapses all the different levels (i.e. refinement steps) and relates the computations in the heap monad to
@{type boolfunc}.
\<close>
definition "bddmi_rel cs \<equiv> {(a,c)|a b c. (a,b) \<in> bf_ifex_rel \<and> (c,b) \<in> Rmi cs}"
definition bdd_relator :: "(nat boolfunc \<times> nat) set \<Rightarrow> bddi \<Rightarrow> assn" where
"bdd_relator p s \<equiv> \<exists>\<^sub>Acs. is_bdd_impl cs s * \<up>(p \<subseteq> (bddmi_rel cs) \<and> bdd_sane cs) * true"
text\<open>
The @{type assn} predicate @{term bdd_relator} is the interface that is exposed to the user.
(The contents of the definition are not exposed.)
\<close>
lemma bdd_relator_mono[intro!]: "q \<subseteq> p \<Longrightarrow> bdd_relator p s \<Longrightarrow>\<^sub>A bdd_relator q s" unfolding bdd_relator_def by sep_auto
lemma bdd_relator_absorb_true[simp]: "bdd_relator p s * true = bdd_relator p s" unfolding bdd_relator_def by simp
thm bdd_relator_def[unfolded bddmi_rel_def, simplified]
lemma join_hlp1: "is_bdd_impl a s * is_bdd_impl b s \<Longrightarrow>\<^sub>A is_bdd_impl a s * is_bdd_impl b s * \<up>(a = b)"
apply clarsimp
apply(rule preciseD[where p=s and R="is_bdd_impl" and F="is_bdd_impl b s" and F'="is_bdd_impl a s"])
apply(rule is_bdd_impl_prec)
apply(unfold mod_and_dist)
apply(rule conjI)
apply assumption
apply(simp add: star_aci(2))
done
lemma join_hlp: "is_bdd_impl a s * is_bdd_impl b s = is_bdd_impl b s * is_bdd_impl a s * \<up>(a = b)"
apply(rule ent_iffI[rotated])
apply(simp; fail)
apply(rule ent_trans)
apply(rule join_hlp1)
apply(simp; fail)
done
definition node_relator where "node_relator x y \<longleftrightarrow> x \<in> y"
text \<open>\<open>sep_auto\<close> behaves sub-optimal when having @{term "(bf,bdd) \<in> computed_pointer_relation"} as assumption in our cases. Using @{const node_relator} instead fixes this behavior with a custom solver for \<open>simp\<close>.\<close>
lemma node_relatorI: "x \<in> y \<Longrightarrow> node_relator x y" unfolding node_relator_def .
lemma node_relatorD: "node_relator x y \<Longrightarrow> x \<in> y" unfolding node_relator_def .
ML\<open>fun TRY' tac = tac ORELSE' K all_tac\<close>
setup \<open>map_theory_simpset (fn ctxt =>
ctxt addSolver (Simplifier.mk_solver "node_relator"
(fn ctxt => fn n =>
let
val tac =
resolve_tac ctxt @{thms node_relatorI} THEN'
REPEAT_ALL_NEW (resolve_tac ctxt @{thms Set.insertI1 Set.insertI2}) THEN'
TRY' (dresolve_tac ctxt @{thms node_relatorD} THEN' assume_tac ctxt)
in
SOLVED' tac n
end))
)\<close>
text\<open>
This is the general form one wants to work with:
if a function on the bdd is called with a set of already existing and valid pointers, the arguments to the function have to be in that set.
The result is that one more pointer is the set of existing and valid pointers.
\<close>
thm iteci_rule[THEN mp] mi.ite_impl_R ifex_ite_rel_bf
lemma iteci_rule[sep_heap_rules]: "
\<lbrakk>node_relator (ib, ic) rp; node_relator (tb, tc) rp; node_relator (eb, ec) rp\<rbrakk> \<Longrightarrow>
<bdd_relator rp s>
iteci_lu ic tc ec s
<\<lambda>(r,s'). bdd_relator (insert (bf_ite ib tb eb,r) rp) s'>"
apply(unfold bdd_relator_def node_relator_def)
apply(intro norm_pre_ex_rule)
apply(clarsimp)
apply(unfold bddmi_rel_def)
apply(drule (1) rev_subsetD)+
apply(clarsimp)
apply(drule (3) mi.ite_impl_lu_R[where ii=ic and ti=tc and ei=ec, unfolded in_rel_def])
apply(drule ospecD2)
apply(clarsimp simp del: ifex_ite.simps)
apply(rule cons_post_rule)
apply(rule cons_pre_rule[rotated])
apply(rule iteci_lu_rule[THEN mp, THEN add_true])
apply(assumption)
apply(sep_auto; fail)
apply(clarsimp simp del: ifex_ite.simps)
apply(rule ent_ex_postI)
apply(subst ent_pure_post_iff)
apply(rule conjI[rotated])
apply(sep_auto; fail)
apply(clarsimp simp del: ifex_ite.simps)
apply(rule conjI[rotated])
apply(force simp add: mi.les_def)
apply(rule exI)
apply(rule conjI)
apply(erule (2) ifex_ite_opt_rel_bf[unfolded in_rel_def])
apply assumption
done
lemma tci_rule[sep_heap_rules]:
"<bdd_relator rp s>
tci s
<\<lambda>(r,s'). bdd_relator (insert (bf_True,r) rp) s'>"
apply(unfold bdd_relator_def)
apply(intro norm_pre_ex_rule)
apply(clarsimp)
apply(frule mi.Timpl_rule)
apply(drule ospecD2)
apply(clarify)
apply(sep_auto)
apply(unfold bddmi_rel_def)
apply(clarsimp)
apply(force simp add: mi.les_def)
done
lemma fci_rule[sep_heap_rules]:
"<bdd_relator rp s>
fci s
<\<lambda>(r,s'). bdd_relator (insert (bf_False,r) rp) s'>"
apply(unfold bdd_relator_def)
apply(intro norm_pre_ex_rule)
apply(clarsimp)
apply(frule mi.Fimpl_rule)
apply(drule ospecD2)
apply(clarify)
apply(sep_auto)
apply(unfold bddmi_rel_def)
apply(clarsimp)
apply(force simp add: mi.les_def)
done
text\<open>IFC/ifmi/ifci require that the variable order is ensured by the user.
Instead of using ifci, a combination of litci and iteci has to be used.\<close>
lemma [sep_heap_rules]:
"\<lbrakk>(tb, tc) \<in> rp; (eb, ec) \<in> rp\<rbrakk> \<Longrightarrow>
<bdd_relator rp s>
ifci v tc ec s
<\<lambda>(r,s'). bdd_relator (insert (bf_if v tb eb,r) rp) s'>"
text\<open>This probably doesn't hold.\<close>
oops
lemma notci_rule[sep_heap_rules]:
assumes "node_relator (tb, tc) rp"
shows "<bdd_relator rp s> notci tc s <\<lambda>(r,s'). bdd_relator (insert (bf_not tb,r) rp) s'>"
using assms
by(sep_auto simp: notci_def)
lemma cirules1[sep_heap_rules]:
assumes "node_relator (tb, tc) rp" "node_relator (eb, ec) rp"
shows
"<bdd_relator rp s> andci tc ec s <\<lambda>(r,s'). bdd_relator (insert (bf_and tb eb,r) rp) s'>"
"<bdd_relator rp s> orci tc ec s <\<lambda>(r,s'). bdd_relator (insert (bf_or tb eb,r) rp) s'>"
"<bdd_relator rp s> biimpci tc ec s <\<lambda>(r,s'). bdd_relator (insert (bf_biimp tb eb,r) rp) s'>"
"<bdd_relator rp s> xorci tc ec s <\<lambda>(r,s'). bdd_relator (insert (bf_xor tb eb,r) rp) s'>"
(* actually, these functions would allow for more insert. I think that would be inconvenient though. *)
using assms
by (sep_auto simp: andci_def orci_def biimpci_def xorci_def)+
lemma cirules2[sep_heap_rules]:
assumes "node_relator (tb, tc) rp" "node_relator (eb, ec) rp"
shows
"<bdd_relator rp s> nandci tc ec s <\<lambda>(r,s'). bdd_relator (insert (bf_nand tb eb,r) rp) s'>"
"<bdd_relator rp s> norci tc ec s <\<lambda>(r,s'). bdd_relator (insert (bf_nor tb eb,r) rp) s'>"
using assms
by(sep_auto simp: nandci_def norci_def)+
lemma litci_rule[sep_heap_rules]:
"<bdd_relator rp s> litci v s <\<lambda>(r,s'). bdd_relator (insert (bf_lit v,r) rp) s'>"
using assms
apply(unfold litci_def)
apply(subgoal_tac "\<And>t ab bb. (* introducing some vars\<dots> *)
<bdd_relator (insert (bf_False, ab) (insert (bf_True, t) rp)) bb * true>
ifci v t ab bb
<\<lambda>r. case r of (r, x) \<Rightarrow> bdd_relator (insert (bf_lit v, r) rp) x>")
apply(sep_auto; fail)
apply(rename_tac tc fc sc)
apply(unfold bdd_relator_def[abs_def])
apply(clarsimp)
apply(intro norm_pre_ex_rule)
apply(clarsimp)
apply(unfold bddmi_rel_def)
apply(clarsimp simp only: bf_ifex_rel_consts_ensured)
apply(frule mi.IFimpl_rule)
apply(rename_tac tc fc sc sm a aa b ba fm tm)
apply(thin_tac "(fm, Falseif) \<in> Rmi sm")
apply(assumption) (* hack: instantiate the first premise of mi.IFimpl_rule with the second assumption that matches. The other way around would be fine, too. *)
apply(assumption)
apply(clarsimp)
apply(drule ospecD2)
apply(clarify)
apply(sep_auto)
apply(force simp add: mi.les_def)
done
lemma tautci_rule[sep_heap_rules]:
shows "node_relator (tb, tc) rp \<Longrightarrow> <bdd_relator rp s> tautci tc s <\<lambda>r. bdd_relator rp s * \<up>(r \<longleftrightarrow> tb = bf_True)>"
apply(unfold node_relator_def)
apply(unfold tautci_def)
apply(unfold bdd_relator_def)
apply(intro norm_pre_ex_rule; clarsimp)
apply(unfold bddmi_rel_def)
apply(drule (1) rev_subsetD)
apply(clarsimp)
apply(rename_tac sm ti)
apply(frule (1) mi.DESTRimpl_rule; drule ospecD2; clarify)
apply(sep_auto split: ifex.splits)
done
lemma emptyci_rule[sep_heap_rules]:
shows "<emp> emptyci <\<lambda>r. bdd_relator {} r>"
by(sep_auto simp: bdd_relator_def)
(* TODO: make sure that emptyci_rule and firends don't appear duplicate, once concrete-impl style, once level-collapsed. *)
lemmas [simp] = bf_ite_def
text\<open>Efficient comparison of two nodes.\<close>
definition "eqci a b \<equiv> return (a = b)" (* wrapping definition so sep_auto does not run into nowhere *)
lemma iteeq_rule[sep_heap_rules]: "
\<lbrakk>node_relator (xb, xc) rp; node_relator (yb, yc) rp\<rbrakk> \<Longrightarrow>
<bdd_relator rp s>
eqci xc yc
<\<lambda>r. \<up>(r \<longleftrightarrow> xb = yb)>\<^sub>t"
apply(unfold bdd_relator_def node_relator_def eqci_def)
apply(intro norm_pre_ex_rule)
apply(clarsimp)
apply(unfold bddmi_rel_def)
apply(drule (1) rev_subsetD)+
apply(rule return_cons_rule)
apply(clarsimp)
apply(rule iffI)
using bf_ifex_eq mi.cmp_rule_eq apply(blast)
using bf_ifex_eq mi.cmp_rule_eq apply(blast)
done
end
|
(* Author: Tobias Nipkow *)
theory Abs_Int3_ITP
imports Abs_Int2_ivl_ITP
begin
subsection "Widening and Narrowing"
class WN = SL_top +
fixes widen :: "'a \<Rightarrow> 'a \<Rightarrow> 'a" (infix "\<nabla>" 65)
assumes widen1: "x \<sqsubseteq> x \<nabla> y"
assumes widen2: "y \<sqsubseteq> x \<nabla> y"
fixes narrow :: "'a \<Rightarrow> 'a \<Rightarrow> 'a" (infix "\<triangle>" 65)
assumes narrow1: "y \<sqsubseteq> x \<Longrightarrow> y \<sqsubseteq> x \<triangle> y"
assumes narrow2: "y \<sqsubseteq> x \<Longrightarrow> x \<triangle> y \<sqsubseteq> x"
subsubsection "Intervals"
instantiation ivl :: WN
begin
definition "widen_ivl ivl1 ivl2 =
((*if is_empty ivl1 then ivl2 else
if is_empty ivl2 then ivl1 else*)
case (ivl1,ivl2) of (I l1 h1, I l2 h2) \<Rightarrow>
I (if le_option False l2 l1 \<and> l2 \<noteq> l1 then None else l1)
(if le_option True h1 h2 \<and> h1 \<noteq> h2 then None else h1))"
definition "narrow_ivl ivl1 ivl2 =
((*if is_empty ivl1 \<or> is_empty ivl2 then empty else*)
case (ivl1,ivl2) of (I l1 h1, I l2 h2) \<Rightarrow>
I (if l1 = None then l2 else l1)
(if h1 = None then h2 else h1))"
instance
proof qed
(auto simp add: widen_ivl_def narrow_ivl_def le_option_def le_ivl_def empty_def split: ivl.split option.split if_splits)
end
subsubsection "Abstract State"
instantiation st :: (WN)WN
begin
definition "widen_st F1 F2 =
FunDom (\<lambda>x. fun F1 x \<nabla> fun F2 x) (inter_list (dom F1) (dom F2))"
definition "narrow_st F1 F2 =
FunDom (\<lambda>x. fun F1 x \<triangle> fun F2 x) (inter_list (dom F1) (dom F2))"
instance
proof
case goal1 thus ?case
by(simp add: widen_st_def le_st_def lookup_def widen1)
next
case goal2 thus ?case
by(simp add: widen_st_def le_st_def lookup_def widen2)
next
case goal3 thus ?case
by(auto simp: narrow_st_def le_st_def lookup_def narrow1)
next
case goal4 thus ?case
by(auto simp: narrow_st_def le_st_def lookup_def narrow2)
qed
end
subsubsection "Option"
instantiation option :: (WN)WN
begin
fun widen_option where
"None \<nabla> x = x" |
"x \<nabla> None = x" |
"(Some x) \<nabla> (Some y) = Some(x \<nabla> y)"
fun narrow_option where
"None \<triangle> x = None" |
"x \<triangle> None = None" |
"(Some x) \<triangle> (Some y) = Some(x \<triangle> y)"
instance
proof
case goal1 show ?case
by(induct x y rule: widen_option.induct) (simp_all add: widen1)
next
case goal2 show ?case
by(induct x y rule: widen_option.induct) (simp_all add: widen2)
next
case goal3 thus ?case
by(induct x y rule: narrow_option.induct) (simp_all add: narrow1)
next
case goal4 thus ?case
by(induct x y rule: narrow_option.induct) (simp_all add: narrow2)
qed
end
subsubsection "Annotated commands"
fun map2_acom :: "('a \<Rightarrow> 'a \<Rightarrow> 'a) \<Rightarrow> 'a acom \<Rightarrow> 'a acom \<Rightarrow> 'a acom" where
"map2_acom f (SKIP {a1}) (SKIP {a2}) = (SKIP {f a1 a2})" |
"map2_acom f (x ::= e {a1}) (x' ::= e' {a2}) = (x ::= e {f a1 a2})" |
"map2_acom f (c1;;c2) (c1';;c2') = (map2_acom f c1 c1';; map2_acom f c2 c2')" |
"map2_acom f (IF b THEN c1 ELSE c2 {a1}) (IF b' THEN c1' ELSE c2' {a2}) =
(IF b THEN map2_acom f c1 c1' ELSE map2_acom f c2 c2' {f a1 a2})" |
"map2_acom f ({a1} WHILE b DO c {a2}) ({a3} WHILE b' DO c' {a4}) =
({f a1 a3} WHILE b DO map2_acom f c c' {f a2 a4})"
abbreviation widen_acom :: "('a::WN)acom \<Rightarrow> 'a acom \<Rightarrow> 'a acom" (infix "\<nabla>\<^sub>c" 65)
where "widen_acom == map2_acom (op \<nabla>)"
abbreviation narrow_acom :: "('a::WN)acom \<Rightarrow> 'a acom \<Rightarrow> 'a acom" (infix "\<triangle>\<^sub>c" 65)
where "narrow_acom == map2_acom (op \<triangle>)"
lemma widen1_acom: "strip c = strip c' \<Longrightarrow> c \<sqsubseteq> c \<nabla>\<^sub>c c'"
by(induct c c' rule: le_acom.induct)(simp_all add: widen1)
lemma widen2_acom: "strip c = strip c' \<Longrightarrow> c' \<sqsubseteq> c \<nabla>\<^sub>c c'"
by(induct c c' rule: le_acom.induct)(simp_all add: widen2)
lemma narrow1_acom: "y \<sqsubseteq> x \<Longrightarrow> y \<sqsubseteq> x \<triangle>\<^sub>c y"
by(induct y x rule: le_acom.induct) (simp_all add: narrow1)
lemma narrow2_acom: "y \<sqsubseteq> x \<Longrightarrow> x \<triangle>\<^sub>c y \<sqsubseteq> x"
by(induct y x rule: le_acom.induct) (simp_all add: narrow2)
subsubsection "Post-fixed point computation"
definition iter_widen :: "('a acom \<Rightarrow> 'a acom) \<Rightarrow> 'a acom \<Rightarrow> ('a::WN)acom option"
where "iter_widen f = while_option (\<lambda>c. \<not> f c \<sqsubseteq> c) (\<lambda>c. c \<nabla>\<^sub>c f c)"
definition iter_narrow :: "('a acom \<Rightarrow> 'a acom) \<Rightarrow> 'a acom \<Rightarrow> 'a::WN acom option"
where "iter_narrow f = while_option (\<lambda>c. \<not> c \<sqsubseteq> c \<triangle>\<^sub>c f c) (\<lambda>c. c \<triangle>\<^sub>c f c)"
definition pfp_wn ::
"(('a::WN)option acom \<Rightarrow> 'a option acom) \<Rightarrow> com \<Rightarrow> 'a option acom option"
where "pfp_wn f c = (case iter_widen f (\<bottom>\<^sub>c c) of None \<Rightarrow> None
| Some c' \<Rightarrow> iter_narrow f c')"
lemma strip_map2_acom:
"strip c1 = strip c2 \<Longrightarrow> strip(map2_acom f c1 c2) = strip c1"
by(induct f c1 c2 rule: map2_acom.induct) simp_all
lemma iter_widen_pfp: "iter_widen f c = Some c' \<Longrightarrow> f c' \<sqsubseteq> c'"
by(auto simp add: iter_widen_def dest: while_option_stop)
lemma strip_while: fixes f :: "'a acom \<Rightarrow> 'a acom"
assumes "\<forall>c. strip (f c) = strip c" and "while_option P f c = Some c'"
shows "strip c' = strip c"
using while_option_rule[where P = "\<lambda>c'. strip c' = strip c", OF _ assms(2)]
by (metis assms(1))
lemma strip_iter_widen: fixes f :: "'a::WN acom \<Rightarrow> 'a acom"
assumes "\<forall>c. strip (f c) = strip c" and "iter_widen f c = Some c'"
shows "strip c' = strip c"
proof-
have "\<forall>c. strip(c \<nabla>\<^sub>c f c) = strip c" by (metis assms(1) strip_map2_acom)
from strip_while[OF this] assms(2) show ?thesis by(simp add: iter_widen_def)
qed
lemma iter_narrow_pfp: assumes "mono f" and "f c0 \<sqsubseteq> c0"
and "iter_narrow f c0 = Some c"
shows "f c \<sqsubseteq> c \<and> c \<sqsubseteq> c0" (is "?P c")
proof-
{ fix c assume "?P c"
note 1 = conjunct1[OF this] and 2 = conjunct2[OF this]
let ?c' = "c \<triangle>\<^sub>c f c"
have "?P ?c'"
proof
have "f ?c' \<sqsubseteq> f c" by(rule monoD[OF `mono f` narrow2_acom[OF 1]])
also have "\<dots> \<sqsubseteq> ?c'" by(rule narrow1_acom[OF 1])
finally show "f ?c' \<sqsubseteq> ?c'" .
have "?c' \<sqsubseteq> c" by (rule narrow2_acom[OF 1])
also have "c \<sqsubseteq> c0" by(rule 2)
finally show "?c' \<sqsubseteq> c0" .
qed
}
with while_option_rule[where P = ?P, OF _ assms(3)[simplified iter_narrow_def]]
assms(2) le_refl
show ?thesis by blast
qed
lemma pfp_wn_pfp:
"\<lbrakk> mono f; pfp_wn f c = Some c' \<rbrakk> \<Longrightarrow> f c' \<sqsubseteq> c'"
unfolding pfp_wn_def
by (auto dest: iter_widen_pfp iter_narrow_pfp split: option.splits)
lemma strip_pfp_wn:
"\<lbrakk> \<forall>c. strip(f c) = strip c; pfp_wn f c = Some c' \<rbrakk> \<Longrightarrow> strip c' = c"
apply(auto simp add: pfp_wn_def iter_narrow_def split: option.splits)
by (metis (mono_tags) strip_map2_acom strip_while strip_bot_acom strip_iter_widen)
locale Abs_Int2 = Abs_Int1_mono
where \<gamma>=\<gamma> for \<gamma> :: "'av::{WN,L_top_bot} \<Rightarrow> val set"
begin
definition AI_wn :: "com \<Rightarrow> 'av st option acom option" where
"AI_wn = pfp_wn (step' \<top>)"
lemma AI_wn_sound: "AI_wn c = Some c' \<Longrightarrow> CS c \<le> \<gamma>\<^sub>c c'"
proof(simp add: CS_def AI_wn_def)
assume 1: "pfp_wn (step' \<top>) c = Some c'"
from pfp_wn_pfp[OF mono_step'2 1]
have 2: "step' \<top> c' \<sqsubseteq> c'" .
have 3: "strip (\<gamma>\<^sub>c (step' \<top> c')) = c" by(simp add: strip_pfp_wn[OF _ 1])
have "lfp (step UNIV) c \<le> \<gamma>\<^sub>c (step' \<top> c')"
proof(rule lfp_lowerbound[simplified,OF 3])
show "step UNIV (\<gamma>\<^sub>c (step' \<top> c')) \<le> \<gamma>\<^sub>c (step' \<top> c')"
proof(rule step_preserves_le[OF _ _])
show "UNIV \<subseteq> \<gamma>\<^sub>o \<top>" by simp
show "\<gamma>\<^sub>c (step' \<top> c') \<le> \<gamma>\<^sub>c c'" by(rule mono_gamma_c[OF 2])
qed
qed
from this 2 show "lfp (step UNIV) c \<le> \<gamma>\<^sub>c c'"
by (blast intro: mono_gamma_c order_trans)
qed
end
global_interpretation Abs_Int2
where \<gamma> = \<gamma>_ivl and num' = num_ivl and plus' = plus_ivl
and test_num' = in_ivl
and filter_plus' = filter_plus_ivl and filter_less' = filter_less_ivl
defines AI_ivl' = AI_wn
..
subsubsection "Tests"
definition "step_up_ivl n = ((\<lambda>c. c \<nabla>\<^sub>c step_ivl \<top> c)^^n)"
definition "step_down_ivl n = ((\<lambda>c. c \<triangle>\<^sub>c step_ivl \<top> c)^^n)"
text{* For @{const test3_ivl}, @{const AI_ivl} needed as many iterations as
the loop took to execute. In contrast, @{const AI_ivl'} converges in a
constant number of steps: *}
value "show_acom (step_up_ivl 1 (\<bottom>\<^sub>c test3_ivl))"
value "show_acom (step_up_ivl 2 (\<bottom>\<^sub>c test3_ivl))"
value "show_acom (step_up_ivl 3 (\<bottom>\<^sub>c test3_ivl))"
value "show_acom (step_up_ivl 4 (\<bottom>\<^sub>c test3_ivl))"
value "show_acom (step_up_ivl 5 (\<bottom>\<^sub>c test3_ivl))"
value "show_acom (step_down_ivl 1 (step_up_ivl 5 (\<bottom>\<^sub>c test3_ivl)))"
value "show_acom (step_down_ivl 2 (step_up_ivl 5 (\<bottom>\<^sub>c test3_ivl)))"
value "show_acom (step_down_ivl 3 (step_up_ivl 5 (\<bottom>\<^sub>c test3_ivl)))"
text{* Now all the analyses terminate: *}
value "show_acom_opt (AI_ivl' test4_ivl)"
value "show_acom_opt (AI_ivl' test5_ivl)"
value "show_acom_opt (AI_ivl' test6_ivl)"
subsubsection "Termination: Intervals"
definition m_ivl :: "ivl \<Rightarrow> nat" where
"m_ivl ivl = (case ivl of I l h \<Rightarrow>
(case l of None \<Rightarrow> 0 | Some _ \<Rightarrow> 1) + (case h of None \<Rightarrow> 0 | Some _ \<Rightarrow> 1))"
lemma m_ivl_height: "m_ivl ivl \<le> 2"
by(simp add: m_ivl_def split: ivl.split option.split)
lemma m_ivl_anti_mono: "(y::ivl) \<sqsubseteq> x \<Longrightarrow> m_ivl x \<le> m_ivl y"
by(auto simp: m_ivl_def le_option_def le_ivl_def
split: ivl.split option.split if_splits)
lemma m_ivl_widen:
"~ y \<sqsubseteq> x \<Longrightarrow> m_ivl(x \<nabla> y) < m_ivl x"
by(auto simp: m_ivl_def widen_ivl_def le_option_def le_ivl_def
split: ivl.splits option.splits if_splits)
lemma Top_less_ivl: "\<top> \<sqsubseteq> x \<Longrightarrow> m_ivl x = 0"
by(auto simp: m_ivl_def le_option_def le_ivl_def empty_def Top_ivl_def
split: ivl.split option.split if_splits)
definition n_ivl :: "ivl \<Rightarrow> nat" where
"n_ivl ivl = 2 - m_ivl ivl"
lemma n_ivl_mono: "(x::ivl) \<sqsubseteq> y \<Longrightarrow> n_ivl x \<le> n_ivl y"
unfolding n_ivl_def by (metis diff_le_mono2 m_ivl_anti_mono)
lemma n_ivl_narrow:
"~ x \<sqsubseteq> x \<triangle> y \<Longrightarrow> n_ivl(x \<triangle> y) < n_ivl x"
by(auto simp: n_ivl_def m_ivl_def narrow_ivl_def le_option_def le_ivl_def
split: ivl.splits option.splits if_splits)
subsubsection "Termination: Abstract State"
definition "m_st m st = (\<Sum>x\<in>set(dom st). m(fun st x))"
lemma m_st_height: assumes "finite X" and "set (dom S) \<subseteq> X"
shows "m_st m_ivl S \<le> 2 * card X"
proof(auto simp: m_st_def)
have "(\<Sum>x\<in>set(dom S). m_ivl (fun S x)) \<le> (\<Sum>x\<in>set(dom S). 2)" (is "?L \<le> _")
by(rule sum_mono)(simp add:m_ivl_height)
also have "\<dots> \<le> (\<Sum>x\<in>X. 2)"
by(rule sum_mono3[OF assms]) simp
also have "\<dots> = 2 * card X" by(simp add: sum_constant)
finally show "?L \<le> \<dots>" .
qed
lemma m_st_anti_mono:
"S1 \<sqsubseteq> S2 \<Longrightarrow> m_st m_ivl S2 \<le> m_st m_ivl S1"
proof(auto simp: m_st_def le_st_def lookup_def split: if_splits)
let ?X = "set(dom S1)" let ?Y = "set(dom S2)"
let ?f = "fun S1" let ?g = "fun S2"
assume asm: "\<forall>x\<in>?Y. (x \<in> ?X \<longrightarrow> ?f x \<sqsubseteq> ?g x) \<and> (x \<in> ?X \<or> \<top> \<sqsubseteq> ?g x)"
hence 1: "\<forall>y\<in>?Y\<inter>?X. m_ivl(?g y) \<le> m_ivl(?f y)" by(simp add: m_ivl_anti_mono)
have 0: "\<forall>x\<in>?Y-?X. m_ivl(?g x) = 0" using asm by (auto simp: Top_less_ivl)
have "(\<Sum>y\<in>?Y. m_ivl(?g y)) = (\<Sum>y\<in>(?Y-?X) \<union> (?Y\<inter>?X). m_ivl(?g y))"
by (metis Un_Diff_Int)
also have "\<dots> = (\<Sum>y\<in>?Y-?X. m_ivl(?g y)) + (\<Sum>y\<in>?Y\<inter>?X. m_ivl(?g y))"
by(subst sum.union_disjoint) auto
also have "(\<Sum>y\<in>?Y-?X. m_ivl(?g y)) = 0" using 0 by simp
also have "0 + (\<Sum>y\<in>?Y\<inter>?X. m_ivl(?g y)) = (\<Sum>y\<in>?Y\<inter>?X. m_ivl(?g y))" by simp
also have "\<dots> \<le> (\<Sum>y\<in>?Y\<inter>?X. m_ivl(?f y))"
by(rule sum_mono)(simp add: 1)
also have "\<dots> \<le> (\<Sum>y\<in>?X. m_ivl(?f y))"
by(simp add: sum_mono3[of "?X" "?Y Int ?X", OF _ Int_lower2])
finally show "(\<Sum>y\<in>?Y. m_ivl(?g y)) \<le> (\<Sum>x\<in>?X. m_ivl(?f x))"
by (metis add_less_cancel_left)
qed
lemma m_st_widen:
assumes "\<not> S2 \<sqsubseteq> S1" shows "m_st m_ivl (S1 \<nabla> S2) < m_st m_ivl S1"
proof-
{ let ?X = "set(dom S1)" let ?Y = "set(dom S2)"
let ?f = "fun S1" let ?g = "fun S2"
fix x assume "x \<in> ?X" "\<not> lookup S2 x \<sqsubseteq> ?f x"
have "(\<Sum>x\<in>?X\<inter>?Y. m_ivl(?f x \<nabla> ?g x)) < (\<Sum>x\<in>?X. m_ivl(?f x))" (is "?L < ?R")
proof cases
assume "x : ?Y"
have "?L < (\<Sum>x\<in>?X\<inter>?Y. m_ivl(?f x))"
proof(rule sum_strict_mono_ex1, simp)
show "\<forall>x\<in>?X\<inter>?Y. m_ivl(?f x \<nabla> ?g x) \<le> m_ivl (?f x)"
by (metis m_ivl_anti_mono widen1)
next
show "\<exists>x\<in>?X\<inter>?Y. m_ivl(?f x \<nabla> ?g x) < m_ivl(?f x)"
using `x:?X` `x:?Y` `\<not> lookup S2 x \<sqsubseteq> ?f x`
by (metis IntI m_ivl_widen lookup_def)
qed
also have "\<dots> \<le> ?R" by(simp add: sum_mono3[OF _ Int_lower1])
finally show ?thesis .
next
assume "x ~: ?Y"
have "?L \<le> (\<Sum>x\<in>?X\<inter>?Y. m_ivl(?f x))"
proof(rule sum_mono, simp)
fix x assume "x:?X \<and> x:?Y" show "m_ivl(?f x \<nabla> ?g x) \<le> m_ivl (?f x)"
by (metis m_ivl_anti_mono widen1)
qed
also have "\<dots> < m_ivl(?f x) + \<dots>"
using m_ivl_widen[OF `\<not> lookup S2 x \<sqsubseteq> ?f x`]
by (metis Nat.le_refl add_strict_increasing gr0I not_less0)
also have "\<dots> = (\<Sum>y\<in>insert x (?X\<inter>?Y). m_ivl(?f y))"
using `x ~: ?Y` by simp
also have "\<dots> \<le> (\<Sum>x\<in>?X. m_ivl(?f x))"
by(rule sum_mono3)(insert `x:?X`, auto)
finally show ?thesis .
qed
} with assms show ?thesis
by(auto simp: le_st_def widen_st_def m_st_def Int_def)
qed
definition "n_st m X st = (\<Sum>x\<in>X. m(lookup st x))"
lemma n_st_mono: assumes "set(dom S1) \<subseteq> X" "set(dom S2) \<subseteq> X" "S1 \<sqsubseteq> S2"
shows "n_st n_ivl X S1 \<le> n_st n_ivl X S2"
proof-
have "(\<Sum>x\<in>X. n_ivl(lookup S1 x)) \<le> (\<Sum>x\<in>X. n_ivl(lookup S2 x))"
apply(rule sum_mono) using assms
by(auto simp: le_st_def lookup_def n_ivl_mono split: if_splits)
thus ?thesis by(simp add: n_st_def)
qed
lemma n_st_narrow:
assumes "finite X" and "set(dom S1) \<subseteq> X" "set(dom S2) \<subseteq> X"
and "S2 \<sqsubseteq> S1" "\<not> S1 \<sqsubseteq> S1 \<triangle> S2"
shows "n_st n_ivl X (S1 \<triangle> S2) < n_st n_ivl X S1"
proof-
have 1: "\<forall>x\<in>X. n_ivl (lookup (S1 \<triangle> S2) x) \<le> n_ivl (lookup S1 x)"
using assms(2-4)
by(auto simp: le_st_def narrow_st_def lookup_def n_ivl_mono narrow2
split: if_splits)
have 2: "\<exists>x\<in>X. n_ivl (lookup (S1 \<triangle> S2) x) < n_ivl (lookup S1 x)"
using assms(2-5)
by(auto simp: le_st_def narrow_st_def lookup_def intro: n_ivl_narrow
split: if_splits)
have "(\<Sum>x\<in>X. n_ivl(lookup (S1 \<triangle> S2) x)) < (\<Sum>x\<in>X. n_ivl(lookup S1 x))"
apply(rule sum_strict_mono_ex1[OF `finite X`]) using 1 2 by blast+
thus ?thesis by(simp add: n_st_def)
qed
subsubsection "Termination: Option"
definition "m_o m n opt = (case opt of None \<Rightarrow> n+1 | Some x \<Rightarrow> m x)"
lemma m_o_anti_mono: "finite X \<Longrightarrow> domo S2 \<subseteq> X \<Longrightarrow> S1 \<sqsubseteq> S2 \<Longrightarrow>
m_o (m_st m_ivl) (2 * card X) S2 \<le> m_o (m_st m_ivl) (2 * card X) S1"
apply(induction S1 S2 rule: le_option.induct)
apply(auto simp: domo_def m_o_def m_st_anti_mono le_SucI m_st_height
split: option.splits)
done
lemma m_o_widen: "\<lbrakk> finite X; domo S2 \<subseteq> X; \<not> S2 \<sqsubseteq> S1 \<rbrakk> \<Longrightarrow>
m_o (m_st m_ivl) (2 * card X) (S1 \<nabla> S2) < m_o (m_st m_ivl) (2 * card X) S1"
by(auto simp: m_o_def domo_def m_st_height less_Suc_eq_le m_st_widen
split: option.split)
definition "n_o n opt = (case opt of None \<Rightarrow> 0 | Some x \<Rightarrow> n x + 1)"
lemma n_o_mono: "domo S1 \<subseteq> X \<Longrightarrow> domo S2 \<subseteq> X \<Longrightarrow> S1 \<sqsubseteq> S2 \<Longrightarrow>
n_o (n_st n_ivl X) S1 \<le> n_o (n_st n_ivl X) S2"
apply(induction S1 S2 rule: le_option.induct)
apply(auto simp: domo_def n_o_def n_st_mono
split: option.splits)
done
lemma n_o_narrow:
"\<lbrakk> finite X; domo S1 \<subseteq> X; domo S2 \<subseteq> X; S2 \<sqsubseteq> S1; \<not> S1 \<sqsubseteq> S1 \<triangle> S2 \<rbrakk>
\<Longrightarrow> n_o (n_st n_ivl X) (S1 \<triangle> S2) < n_o (n_st n_ivl X) S1"
apply(induction S1 S2 rule: narrow_option.induct)
apply(auto simp: n_o_def domo_def n_st_narrow)
done
lemma domo_widen_subset: "domo (S1 \<nabla> S2) \<subseteq> domo S1 \<union> domo S2"
apply(induction S1 S2 rule: widen_option.induct)
apply (auto simp: domo_def widen_st_def)
done
lemma domo_narrow_subset: "domo (S1 \<triangle> S2) \<subseteq> domo S1 \<union> domo S2"
apply(induction S1 S2 rule: narrow_option.induct)
apply (auto simp: domo_def narrow_st_def)
done
subsubsection "Termination: Commands"
lemma strip_widen_acom[simp]:
"strip c' = strip (c::'a::WN acom) \<Longrightarrow> strip (c \<nabla>\<^sub>c c') = strip c"
by(induction "widen::'a\<Rightarrow>'a\<Rightarrow>'a" c c' rule: map2_acom.induct) simp_all
lemma strip_narrow_acom[simp]:
"strip c' = strip (c::'a::WN acom) \<Longrightarrow> strip (c \<triangle>\<^sub>c c') = strip c"
by(induction "narrow::'a\<Rightarrow>'a\<Rightarrow>'a" c c' rule: map2_acom.induct) simp_all
lemma annos_widen_acom[simp]: "strip c1 = strip (c2::'a::WN acom) \<Longrightarrow>
annos(c1 \<nabla>\<^sub>c c2) = map (%(x,y).x\<nabla>y) (zip (annos c1) (annos(c2::'a::WN acom)))"
by(induction "widen::'a\<Rightarrow>'a\<Rightarrow>'a" c1 c2 rule: map2_acom.induct)
(simp_all add: size_annos_same2)
lemma annos_narrow_acom[simp]: "strip c1 = strip (c2::'a::WN acom) \<Longrightarrow>
annos(c1 \<triangle>\<^sub>c c2) = map (%(x,y).x\<triangle>y) (zip (annos c1) (annos(c2::'a::WN acom)))"
by(induction "narrow::'a\<Rightarrow>'a\<Rightarrow>'a" c1 c2 rule: map2_acom.induct)
(simp_all add: size_annos_same2)
lemma widen_acom_Com[simp]: "strip c2 = strip c1 \<Longrightarrow>
c1 : Com X \<Longrightarrow> c2 : Com X \<Longrightarrow> (c1 \<nabla>\<^sub>c c2) : Com X"
apply(auto simp add: Com_def)
apply(rename_tac S S' x)
apply(erule in_set_zipE)
apply(auto simp: domo_def split: option.splits)
apply(case_tac S)
apply(case_tac S')
apply simp
apply fastforce
apply(case_tac S')
apply fastforce
apply (fastforce simp: widen_st_def)
done
lemma narrow_acom_Com[simp]: "strip c2 = strip c1 \<Longrightarrow>
c1 : Com X \<Longrightarrow> c2 : Com X \<Longrightarrow> (c1 \<triangle>\<^sub>c c2) : Com X"
apply(auto simp add: Com_def)
apply(rename_tac S S' x)
apply(erule in_set_zipE)
apply(auto simp: domo_def split: option.splits)
apply(case_tac S)
apply(case_tac S')
apply simp
apply fastforce
apply(case_tac S')
apply fastforce
apply (fastforce simp: narrow_st_def)
done
definition "m_c m c = (let as = annos c in \<Sum>i=0..<size as. m(as!i))"
lemma measure_m_c: "finite X \<Longrightarrow> {(c, c \<nabla>\<^sub>c c') |c c'::ivl st option acom.
strip c' = strip c \<and> c : Com X \<and> c' : Com X \<and> \<not> c' \<sqsubseteq> c}\<inverse>
\<subseteq> measure(m_c(m_o (m_st m_ivl) (2*card(X))))"
apply(auto simp: m_c_def Let_def Com_def)
apply(subgoal_tac "length(annos c') = length(annos c)")
prefer 2 apply (simp add: size_annos_same2)
apply (auto)
apply(rule sum_strict_mono_ex1)
apply simp
apply (clarsimp)
apply(erule m_o_anti_mono)
apply(rule subset_trans[OF domo_widen_subset])
apply fastforce
apply(rule widen1)
apply(auto simp: le_iff_le_annos listrel_iff_nth)
apply(rule_tac x=n in bexI)
prefer 2 apply simp
apply(erule m_o_widen)
apply (simp)+
done
lemma measure_n_c: "finite X \<Longrightarrow> {(c, c \<triangle>\<^sub>c c') |c c'.
strip c = strip c' \<and> c \<in> Com X \<and> c' \<in> Com X \<and> c' \<sqsubseteq> c \<and> \<not> c \<sqsubseteq> c \<triangle>\<^sub>c c'}\<inverse>
\<subseteq> measure(m_c(n_o (n_st n_ivl X)))"
apply(auto simp: m_c_def Let_def Com_def)
apply(subgoal_tac "length(annos c') = length(annos c)")
prefer 2 apply (simp add: size_annos_same2)
apply (auto)
apply(rule sum_strict_mono_ex1)
apply simp
apply (clarsimp)
apply(rule n_o_mono)
using domo_narrow_subset apply fastforce
apply fastforce
apply(rule narrow2)
apply(fastforce simp: le_iff_le_annos listrel_iff_nth)
apply(auto simp: le_iff_le_annos listrel_iff_nth strip_narrow_acom)
apply(rule_tac x=n in bexI)
prefer 2 apply simp
apply(erule n_o_narrow)
apply (simp)+
done
subsubsection "Termination: Post-Fixed Point Iterations"
lemma iter_widen_termination:
fixes c0 :: "'a::WN acom"
assumes P_f: "\<And>c. P c \<Longrightarrow> P(f c)"
assumes P_widen: "\<And>c c'. P c \<Longrightarrow> P c' \<Longrightarrow> P(c \<nabla>\<^sub>c c')"
and "wf({(c::'a acom,c \<nabla>\<^sub>c c')|c c'. P c \<and> P c' \<and> ~ c' \<sqsubseteq> c}^-1)"
and "P c0" and "c0 \<sqsubseteq> f c0" shows "EX c. iter_widen f c0 = Some c"
proof(simp add: iter_widen_def, rule wf_while_option_Some[where P = "P"])
show "wf {(cc', c). (P c \<and> \<not> f c \<sqsubseteq> c) \<and> cc' = c \<nabla>\<^sub>c f c}"
apply(rule wf_subset[OF assms(3)]) by(blast intro: P_f)
next
show "P c0" by(rule `P c0`)
next
fix c assume "P c" thus "P (c \<nabla>\<^sub>c f c)" by(simp add: P_f P_widen)
qed
lemma iter_narrow_termination:
assumes P_f: "\<And>c. P c \<Longrightarrow> P(c \<triangle>\<^sub>c f c)"
and wf: "wf({(c, c \<triangle>\<^sub>c f c)|c c'. P c \<and> ~ c \<sqsubseteq> c \<triangle>\<^sub>c f c}^-1)"
and "P c0" shows "EX c. iter_narrow f c0 = Some c"
proof(simp add: iter_narrow_def, rule wf_while_option_Some[where P = "P"])
show "wf {(c', c). (P c \<and> \<not> c \<sqsubseteq> c \<triangle>\<^sub>c f c) \<and> c' = c \<triangle>\<^sub>c f c}"
apply(rule wf_subset[OF wf]) by(blast intro: P_f)
next
show "P c0" by(rule `P c0`)
next
fix c assume "P c" thus "P (c \<triangle>\<^sub>c f c)" by(simp add: P_f)
qed
lemma iter_winden_step_ivl_termination:
"EX c. iter_widen (step_ivl \<top>) (\<bottom>\<^sub>c c0) = Some c"
apply(rule iter_widen_termination[where
P = "%c. strip c = c0 \<and> c : Com(vars c0)"])
apply (simp_all add: step'_Com bot_acom)
apply(rule wf_subset)
apply(rule wf_measure)
apply(rule subset_trans)
prefer 2
apply(rule measure_m_c[where X = "vars c0", OF finite_cvars])
apply blast
done
lemma iter_narrow_step_ivl_termination:
"c0 \<in> Com (vars(strip c0)) \<Longrightarrow> step_ivl \<top> c0 \<sqsubseteq> c0 \<Longrightarrow>
EX c. iter_narrow (step_ivl \<top>) c0 = Some c"
apply(rule iter_narrow_termination[where
P = "%c. strip c = strip c0 \<and> c : Com(vars(strip c0)) \<and> step_ivl \<top> c \<sqsubseteq> c"])
apply (simp_all add: step'_Com)
apply(clarify)
apply(frule narrow2_acom, drule mono_step'[OF le_refl], erule le_trans[OF _ narrow1_acom])
apply assumption
apply(rule wf_subset)
apply(rule wf_measure)
apply(rule subset_trans)
prefer 2
apply(rule measure_n_c[where X = "vars(strip c0)", OF finite_cvars])
apply auto
by (metis bot_least domo_Top order_refl step'_Com strip_step')
(* FIXME: simplify type system: Combine Com(X) and vars <= X?? *)
lemma while_Com:
fixes c :: "'a st option acom"
assumes "while_option P f c = Some c'"
and "!!c. strip(f c) = strip c"
and "\<forall>c::'a st option acom. c : Com(X) \<longrightarrow> vars(strip c) \<subseteq> X \<longrightarrow> f c : Com(X)"
and "c : Com(X)" and "vars(strip c) \<subseteq> X" shows "c' : Com(X)"
using while_option_rule[where P = "\<lambda>c'. c' : Com(X) \<and> vars(strip c') \<subseteq> X", OF _ assms(1)]
by(simp add: assms(2-))
lemma iter_widen_Com: fixes f :: "'a::WN st option acom \<Rightarrow> 'a st option acom"
assumes "iter_widen f c = Some c'"
and "\<forall>c. c : Com(X) \<longrightarrow> vars(strip c) \<subseteq> X \<longrightarrow> f c : Com(X)"
and "!!c. strip(f c) = strip c"
and "c : Com(X)" and "vars (strip c) \<subseteq> X" shows "c' : Com(X)"
proof-
have "\<forall>c. c : Com(X) \<longrightarrow> vars(strip c) \<subseteq> X \<longrightarrow> c \<nabla>\<^sub>c f c : Com(X)"
by (metis (full_types) widen_acom_Com assms(2,3))
from while_Com[OF assms(1)[simplified iter_widen_def] _ this assms(4,5)]
show ?thesis using assms(3) by(simp)
qed
context Abs_Int2
begin
lemma iter_widen_step'_Com:
"iter_widen (step' \<top>) c = Some c' \<Longrightarrow> vars(strip c) \<subseteq> X \<Longrightarrow> c : Com(X)
\<Longrightarrow> c' : Com(X)"
apply(subgoal_tac "strip c'= strip c")
prefer 2 apply (metis strip_iter_widen strip_step')
apply(drule iter_widen_Com)
prefer 3 apply assumption
prefer 3 apply assumption
apply (auto simp: step'_Com)
done
end
theorem AI_ivl'_termination:
"EX c'. AI_ivl' c = Some c'"
apply(auto simp: AI_wn_def pfp_wn_def iter_winden_step_ivl_termination split: option.split)
apply(rule iter_narrow_step_ivl_termination)
apply (metis bot_acom_Com iter_widen_step'_Com[OF _ subset_refl] strip_iter_widen strip_step')
apply(erule iter_widen_pfp)
done
end
(* interesting(?) relic
lemma widen_assoc:
"~ (y::ivl) \<sqsubseteq> x \<Longrightarrow> ~ z \<sqsubseteq> x \<nabla> y \<Longrightarrow> ((x::ivl) \<nabla> y) \<nabla> z = x \<nabla> (y \<nabla> z)"
apply(cases x)
apply(cases y)
apply(cases z)
apply(rename_tac x1 x2 y1 y2 z1 z2)
apply(simp add: le_ivl_def)
apply(case_tac x1)
apply(case_tac x2)
apply(simp add:le_option_def widen_ivl_def split: if_splits option.splits)
apply(simp add:le_option_def widen_ivl_def split: if_splits option.splits)
apply(case_tac x2)
apply(simp add:le_option_def widen_ivl_def split: if_splits option.splits)
apply(case_tac y1)
apply(case_tac y2)
apply(simp add:le_option_def widen_ivl_def split: if_splits option.splits)
apply(case_tac z1)
apply(auto simp add:le_option_def widen_ivl_def split: if_splits option.splits ivl.splits)[1]
apply(auto simp add:le_option_def widen_ivl_def split: if_splits option.splits ivl.splits)[1]
apply(case_tac y2)
apply(auto simp add:le_option_def widen_ivl_def split: if_splits option.splits ivl.splits)[1]
apply(case_tac z1)
apply(auto simp add:le_option_def widen_ivl_def split: if_splits ivl.splits option.splits)[1]
apply(case_tac z2)
apply(auto simp add:le_option_def widen_ivl_def split: if_splits option.splits)[1]
apply(auto simp add:le_option_def widen_ivl_def split: if_splits option.splits)[1]
done
lemma widen_step_trans:
"~ (y::ivl) \<sqsubseteq> x \<Longrightarrow> ~ z \<sqsubseteq> x \<nabla> y \<Longrightarrow> EX u. (x \<nabla> y) \<nabla> z = x \<nabla> u \<and> ~ u \<sqsubseteq> x"
by (metis widen_assoc preord_class.le_trans widen1)
*)
|
(* This Isabelle theory is produced using the TIP tool offered at the following website:
https://github.com/tip-org/tools
This file was originally provided as part of TIP benchmark at the following website:
https://github.com/tip-org/benchmarks
Yutaka Nagashima at CIIRC, CTU changed the TIP output theory file slightly
to make it compatible with Isabelle2017.
\:w
Some proofs were added by Yutaka Nagashima.*)
theory TIP_sort_nat_HSort2Count
imports "../../Test_Base"
begin
datatype 'a list = nil2 | cons2 "'a" "'a list"
datatype Nat = Z | S "Nat"
datatype Heap = Node "Heap" "Nat" "Heap" | Nil
fun plus :: "Nat => Nat => Nat" where
"plus (Z) y = y"
| "plus (S z) y = S (plus z y)"
fun le :: "Nat => Nat => bool" where
"le (Z) y = True"
| "le (S z) (Z) = False"
| "le (S z) (S x2) = le z x2"
fun hmerge :: "Heap => Heap => Heap" where
"hmerge (Node z x2 x3) (Node x4 x5 x6) =
(if le x2 x5 then Node (hmerge x3 (Node x4 x5 x6)) x2 z else
Node (hmerge (Node z x2 x3) x6) x5 x4)"
| "hmerge (Node z x2 x3) (Nil) = Node z x2 x3"
| "hmerge (Nil) y = y"
(*fun did not finish the proof*)
function toList :: "Heap => Nat list" where
"toList (Node q y r) = cons2 y (toList (hmerge q r))"
| "toList (Nil) = nil2"
by pat_completeness auto
fun hinsert :: "Nat => Heap => Heap" where
"hinsert x y = hmerge (Node Nil x Nil) y"
fun toHeap2 :: "Nat list => Heap" where
"toHeap2 (nil2) = Nil"
| "toHeap2 (cons2 y xs) = hinsert y (toHeap2 xs)"
fun hsort2 :: "Nat list => Nat list" where
"hsort2 x = toList (toHeap2 x)"
fun count :: "'a => 'a list => Nat" where
"count x (nil2) = Z"
| "count x (cons2 z ys) =
(if (x = z) then plus (S Z) (count x ys) else count x ys)"
theorem property0 :
"((count x (hsort2 xs)) = (count x xs))"
oops
end
|
[STATEMENT]
lemma dvd_associated2:
fixes a::"'a::idom"
assumes ab: "a dvd b" and ba: "b dvd a" and a: "a\<noteq>0"
shows "\<exists>u. u dvd 1 \<and> a = u*b"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<exists>u. u dvd (1::'a) \<and> a = u * b
[PROOF STEP]
proof -
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. \<exists>u. u dvd (1::'a) \<and> a = u * b
[PROOF STEP]
obtain k where a_kb: "a = k*b"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. (\<And>k. a = k * b \<Longrightarrow> thesis) \<Longrightarrow> thesis
[PROOF STEP]
using ab
[PROOF STATE]
proof (prove)
using this:
a dvd b
goal (1 subgoal):
1. (\<And>k. a = k * b \<Longrightarrow> thesis) \<Longrightarrow> thesis
[PROOF STEP]
unfolding dvd_def
[PROOF STATE]
proof (prove)
using this:
\<exists>k. b = a * k
goal (1 subgoal):
1. (\<And>k. a = k * b \<Longrightarrow> thesis) \<Longrightarrow> thesis
[PROOF STEP]
by (metis Groups.mult_ac(2) ba dvdE)
[PROOF STATE]
proof (state)
this:
a = k * b
goal (1 subgoal):
1. \<exists>u. u dvd (1::'a) \<and> a = u * b
[PROOF STEP]
obtain q where b_qa: "b = q*a"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. (\<And>q. b = q * a \<Longrightarrow> thesis) \<Longrightarrow> thesis
[PROOF STEP]
using ba
[PROOF STATE]
proof (prove)
using this:
b dvd a
goal (1 subgoal):
1. (\<And>q. b = q * a \<Longrightarrow> thesis) \<Longrightarrow> thesis
[PROOF STEP]
unfolding dvd_def
[PROOF STATE]
proof (prove)
using this:
\<exists>k. a = b * k
goal (1 subgoal):
1. (\<And>q. b = q * a \<Longrightarrow> thesis) \<Longrightarrow> thesis
[PROOF STEP]
by (metis Groups.mult_ac(2) ab dvdE)
[PROOF STATE]
proof (state)
this:
b = q * a
goal (1 subgoal):
1. \<exists>u. u dvd (1::'a) \<and> a = u * b
[PROOF STEP]
have 1: "a = k*q*a"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. a = k * q * a
[PROOF STEP]
using a_kb b_qa
[PROOF STATE]
proof (prove)
using this:
a = k * b
b = q * a
goal (1 subgoal):
1. a = k * q * a
[PROOF STEP]
by auto
[PROOF STATE]
proof (state)
this:
a = k * q * a
goal (1 subgoal):
1. \<exists>u. u dvd (1::'a) \<and> a = u * b
[PROOF STEP]
hence "k*q = 1"
[PROOF STATE]
proof (prove)
using this:
a = k * q * a
goal (1 subgoal):
1. k * q = (1::'a)
[PROOF STEP]
using a
[PROOF STATE]
proof (prove)
using this:
a = k * q * a
a \<noteq> (0::'a)
goal (1 subgoal):
1. k * q = (1::'a)
[PROOF STEP]
by simp
[PROOF STATE]
proof (state)
this:
k * q = (1::'a)
goal (1 subgoal):
1. \<exists>u. u dvd (1::'a) \<and> a = u * b
[PROOF STEP]
thus ?thesis
[PROOF STATE]
proof (prove)
using this:
k * q = (1::'a)
goal (1 subgoal):
1. \<exists>u. u dvd (1::'a) \<and> a = u * b
[PROOF STEP]
using 1
[PROOF STATE]
proof (prove)
using this:
k * q = (1::'a)
a = k * q * a
goal (1 subgoal):
1. \<exists>u. u dvd (1::'a) \<and> a = u * b
[PROOF STEP]
by (metis a_kb dvd_triv_left)
[PROOF STATE]
proof (state)
this:
\<exists>u. u dvd (1::'a) \<and> a = u * b
goal:
No subgoals!
[PROOF STEP]
qed |
From stdpp Require Import fin_maps.
From iris.proofmode Require Import coq_tactics reduction.
From iris.proofmode Require Import tactics.
From iris.algebra Require Import auth gmap.
From iris.base_logic Require Export gen_heap.
From iris.base_logic.lib Require Export proph_map invariants wsat.
From iris.program_logic Require Import atomic.
From iris.program_logic Require Export weakestpre total_weakestpre.
From iris.program_logic Require Import ectx_lifting total_ectx_lifting.
From iris_cf.cf_lang Require Export lang.
From iris_cf.cf_lang Require Import notation.
Set Default Proof Using "Type".
Class heapG Σ := HeapG {
heapG_invG : invG Σ;
heapG_gen_heapG :> gen_heapG loc val Σ;
heapG_proph_mapG :> proph_mapG proph_id (val * val) Σ
}.
(* MARK: *)
Instance heapG_irisG `{!heapG Σ} : irisG cf_lang Σ := {
iris_invG := heapG_invG;
state_interp σ κs _ :=
(gen_heap_ctx σ.(heap) ∗ proph_map_ctx κs σ.(used_proph_id) ∗ ⌜no_continue_state σ⌝)%I;
fork_post _ := True%I;
}.
(** Override the notations so that scopes and coercions work out *)
Notation "l ↦{ q } v" := (mapsto (L:=loc) (V:=val) l q v%V)
(at level 20, q at level 50, format "l ↦{ q } v") : bi_scope.
Notation "l ↦ v" :=
(mapsto (L:=loc) (V:=val) l 1 v%V) (at level 20) : bi_scope.
Notation "l ↦{ q } -" := (∃ v, l ↦{q} v)%I
(at level 20, q at level 50, format "l ↦{ q } -") : bi_scope.
Notation "l ↦ -" := (l ↦{1} -)%I (at level 20) : bi_scope.
(* MARK: weakest pre with multi-post *)
Section multi_post.
Context `{!heapG Σ}.
Definition multi_post (φn φb φc φr : val -> iPropI Σ) v : iPropI Σ :=
match v with
| SVal v => φn v
| SBreak v => φb v
| SContinue => φc $ LitV LitUnit
| SReturn v => φr v
end.
Definition top (v:val) : iPropI Σ := bi_pure True.
Definition bot (v:val) : iPropI Σ := bi_pure False.
Notation "'WP' e {{ φn } } {{ φb } } {{ φc } } {{ φr } }" :=
(wp NotStuck ⊤ e%E (multi_post φn φb φc φr)) (at level 20, e, φn, φb, φc, φr at level 200).
Notation "'{{' P } } e {{ φn } } {{ φb } } {{ φc } } {{ φr } }" :=
(uPred_persistently (P -∗ WP e {{ φn }} {{ φb }} {{ φc }} {{ φr }})) (at level 20).
(* MARK: control flow terminals *)
Lemma tac_wp_break v φ:
(φ v) ⊢
WP (EBreak $ Val v) {{ bot }} {{ φ }} {{ bot }} {{ bot }}.
Proof.
assert ((EBreak $ Val v) = (of_sval $ SBreak v)); auto.
rewrite H.
iIntros "Hφ".
rewrite wp_unfold /wp_pre language.to_of_val.
auto.
Qed.
Lemma tac_wp_continue φ:
(φ $ LitV LitUnit) ⊢
WP (EContinue) {{ bot }} {{ bot }} {{ φ }} {{ bot }}.
Proof.
assert (EContinue = (of_sval SContinue)); auto.
rewrite H.
iIntros "Hφ".
rewrite wp_unfold /wp_pre language.to_of_val.
auto.
Qed.
Lemma tac_wp_return v φ:
(φ v) ⊢
WP (EReturn $ Val v) {{ bot }} {{ bot }} {{ bot }} {{ φ }}.
Proof.
assert ((EReturn $ Val v) = (of_sval $ SReturn v)); auto.
rewrite H.
iIntros "Hφ".
rewrite wp_unfold /wp_pre language.to_of_val.
auto.
Qed.
(* MARK: loop *)
Lemma wp_loop_sval e0 e I φb φr s:
to_sval e = Some s ->
▷ {{ I }} LoopB e0 e0 {{φb}}{{bot}}{{bot}}{{φr}} -∗
WP e {{ λ v, I }} {{ φb }} {{ λ v, I }} {{ φr }} -∗
WP LoopB e0 e {{φb}}{{bot}}{{bot}}{{φr}}.
Proof.
iIntros (eq) "#IH Hwp".
destruct s; apply of_to_sval in eq; simpl in eq; subst.
(* value case *)
- repeat rewrite wp_unfold.
rewrite <- wp_unfold at 1.
rewrite /wp_pre; simpl.
iIntros (σ0 κ0 κs0 _) "Hs".
iApply fupd_frame_l.
iSplit.
+ iPureIntro. unfold reducible. simpl.
exists nil, (LoopB e0 e0), σ0, nil.
apply (Ectx_step _ _ _ _ _ _ EmptyCtx (LoopB e0 v) (LoopB e0 e0)); auto.
apply LoopBS.
+ unfold bi_fupd_fupd. simpl.
unfold uPred_fupd.
rewrite seal_eq.
unfold uPred_fupd_def.
iIntros "Hw".
iSpecialize ("Hwp" with "Hw").
repeat iMod "Hwp".
iDestruct "Hwp" as "[Hw [Htop H]]".
iApply except_0_bupd.
iModIntro.
iApply bupd_frame_l.
iFrame "Hw".
iApply bupd_frame_r.
iPoseProof ownE_empty as "Hown_phi".
iFrame "Hown_phi".
iIntros (? ? ? Hstep') "[Hw Hphi]".
repeat iModIntro.
iFrame "Hw". iFrame "Hphi".
iIntros "!# [Hw _]".
repeat iModIntro.
iFrame "Hw". iFrame "Htop".
assert (a = (LoopB e0 e0) /\ a0 = σ0 /\ κ0 = [] /\ a1 = []) as [? [? [? ?]]].
{
inversion Hstep'.
destruct K; inversion H; subst.
- simpl in *; subst.
inversion H1; subst; auto.
unfold expr_depth.singleton_ectx in H3.
destruct K; inversion H; subst; [inversion H3 |].
destruct K; inversion H7; simpl in *; subst.
inversion H0.
- destruct K; inversion H4; simpl in *; subst.
inversion H1; subst.
unfold expr_depth.singleton_ectx in H4.
destruct K; inversion H2; simpl in *; subst; inversion H4.
}
subst.
iFrame "Hs".
iSplitL; auto.
iApply "IH"; auto.
(* break case *)
- repeat rewrite wp_unfold.
rewrite <- wp_unfold at 1.
rewrite /wp_pre; simpl.
iIntros (σ0 κ0 κs0 _) "Hs".
iApply fupd_frame_l.
iSplit.
+ iPureIntro. unfold reducible. simpl.
exists nil, (Val v), σ0, nil.
apply (Ectx_step _ _ _ _ _ _ EmptyCtx (LoopB e0 (EBreak v)) (Val v)); auto.
apply LoopBBreakS.
+ unfold bi_fupd_fupd. simpl.
unfold uPred_fupd.
rewrite seal_eq.
unfold uPred_fupd_def.
iIntros "Hw".
iSpecialize ("Hwp" with "Hw").
repeat iMod "Hwp".
iDestruct "Hwp" as "[Hw [Htop H]]".
iApply except_0_bupd.
iModIntro.
iApply bupd_frame_l.
iFrame "Hw".
iApply bupd_frame_r.
iPoseProof ownE_empty as "Hown_phi".
iFrame "Hown_phi".
iIntros (? ? ? Hstep') "[Hw Hphi]".
repeat iModIntro.
iFrame "Hw". iFrame "Hphi".
iIntros "!# [Hw _]".
repeat iModIntro.
iFrame "Hw". iFrame "Htop".
assert (a = (Val v) /\ a0 = σ0 /\ κ0 = [] /\ a1 = []) as [? [? [? ?]]].
{
inversion Hstep'.
destruct K; inversion H; subst.
- simpl in *; subst.
inversion H1; subst; auto.
unfold expr_depth.singleton_ectx in H3.
destruct K; inversion H; subst; [inversion H3 |].
destruct K; inversion H7; simpl in *; subst.
+ exfalso; apply H4, BreakImpenLoop.
+ destruct K; inversion H7; subst. inversion H0.
- destruct K; inversion H4; simpl in *; subst.
+ inversion H1; subst.
unfold expr_depth.singleton_ectx in H4.
destruct K; inversion H2; simpl in *; subst; [inversion H4 |].
destruct K; inversion H7; simpl in *; subst. inversion H3.
+ destruct K; inversion H2; simpl in *; subst.
inversion H1; subst.
unfold expr_depth.singleton_ectx in H5.
destruct K; inversion H2; simpl in *; subst; inversion H5.
}
subst.
iFrame "Hs".
iSplitL; auto.
iClear "#".
rewrite wp_unfold /wp_pre; simpl.
auto.
(* continue case *)
- repeat rewrite wp_unfold.
rewrite <- wp_unfold at 1.
rewrite /wp_pre; simpl.
iIntros (σ0 κ0 κs0 _) "Hs".
iApply fupd_frame_l.
iSplit.
+ iPureIntro. unfold reducible. simpl.
exists nil, (LoopB e0 e0), σ0, nil.
apply (Ectx_step _ _ _ _ _ _ EmptyCtx (LoopB e0 EContinue) (LoopB e0 e0)); auto.
apply LoopBContinueS.
+ unfold bi_fupd_fupd. simpl.
unfold uPred_fupd.
rewrite seal_eq.
unfold uPred_fupd_def.
iIntros "Hw".
iSpecialize ("Hwp" with "Hw").
repeat iMod "Hwp".
iDestruct "Hwp" as "[Hw [Htop H]]".
iApply except_0_bupd.
iModIntro.
iApply bupd_frame_l.
iFrame "Hw".
iApply bupd_frame_r.
iPoseProof ownE_empty as "Hown_phi".
iFrame "Hown_phi".
iIntros (? ? ? Hstep') "[Hw Hphi]".
repeat iModIntro.
iFrame "Hw". iFrame "Hphi".
iIntros "!# [Hw _]".
repeat iModIntro.
iFrame "Hw". iFrame "Htop".
assert (a = (LoopB e0 e0) /\ a0 = σ0 /\ κ0 = [] /\ a1 = []) as [? [? [? ?]]].
{
inversion Hstep'.
destruct K; inversion H; subst.
- simpl in *; subst.
inversion H1; subst; auto.
unfold expr_depth.singleton_ectx in H3.
destruct K; inversion H; subst; [inversion H3 |].
destruct K; inversion H7; simpl in *; subst.
exfalso; apply H4, ContinueImpenLoop.
- destruct K; inversion H4; simpl in *; subst.
+ inversion H1; subst.
unfold expr_depth.singleton_ectx in H4.
destruct K; inversion H2; simpl in *; subst; inversion H4.
}
subst.
iFrame "Hs".
iSplitL; auto.
iApply "IH"; auto.
- repeat rewrite wp_unfold.
rewrite <- wp_unfold at 1.
rewrite /wp_pre; simpl.
iIntros (σ0 κ0 κs0 _) "Hs".
iApply fupd_frame_l.
iSplit.
+ iPureIntro. unfold reducible. simpl.
exists nil, (EReturn v), σ0, nil.
apply (Ectx_step _ _ _ _ _ _ EmptyCtx (LoopB e0 (EReturn v)) (EReturn v)); auto.
apply (CFCtxS (EReturn v) (LoopBCtx e0 EmptyCtx));
[apply return_is_cft | unfold expr_depth.singleton_ectx; auto |].
unfold not. intros.
inversion H; subst.
destruct K'; simpl in *; inversion H0; subst; try congruence.
destruct K; destruct K'; simpl in *; inversion H5; subst.
inversion H3; subst.
destruct K; destruct K'; simpl in *; inversion H2; subst; congruence.
+ unfold bi_fupd_fupd. simpl.
unfold uPred_fupd.
rewrite seal_eq.
unfold uPred_fupd_def.
iIntros "Hw".
iSpecialize ("Hwp" with "Hw").
repeat iMod "Hwp".
iDestruct "Hwp" as "[Hw [Htop H]]".
iApply except_0_bupd.
iModIntro.
iApply bupd_frame_l.
iFrame "Hw".
iApply bupd_frame_r.
iPoseProof ownE_empty as "Hown_phi".
iFrame "Hown_phi".
iIntros (? ? ? Hstep') "[Hw Hphi]".
repeat iModIntro.
iFrame "Hw". iFrame "Hphi".
iIntros "!# [Hw _]".
repeat iModIntro.
iFrame "Hw". iFrame "Htop".
assert (a = (EReturn v) /\ a0 = σ0 /\ κ0 = [] /\ a1 = []) as [? [? [? ?]]].
{
inversion Hstep'.
destruct K; inversion H; subst.
- simpl in *; subst.
inversion H1; subst; auto.
unfold expr_depth.singleton_ectx in H3.
destruct K; inversion H; subst; [inversion H3 |].
destruct K; inversion H7; simpl in *; subst; auto.
+ destruct K; inversion H7; subst. inversion H0.
- destruct K; inversion H4; simpl in *; subst.
+ inversion H1; subst.
unfold expr_depth.singleton_ectx in H4.
destruct K; inversion H2; simpl in *; subst; [inversion H4 |].
destruct K; inversion H7; simpl in *; subst. inversion H3.
+ destruct K; inversion H2; simpl in *; subst.
inversion H1; subst.
unfold expr_depth.singleton_ectx in H5.
destruct K; inversion H2; simpl in *; subst; inversion H5.
}
subst.
iFrame "Hs".
iSplitL; auto.
iClear "#".
rewrite wp_unfold /wp_pre; simpl.
auto.
Qed.
Lemma tac_wp_loop e I φb φr:
{{ I }} e {{ λ v, I }} {{ φb }} {{ λ v, I }} {{ φr }} ⊢
{{ I }} (loop e) {{ φb }} {{ bot }} {{ bot }} {{ φr }}.
Proof.
iIntros "#Hbdy !# H".
iLöb as "IH0".
destruct (to_sval e) eqn:eq.
{
iSpecialize ("Hbdy" with "H").
iApply wp_loop_sval; [apply eq | auto | auto].
}
{
(* Main proof for the preservation case *)
iPoseProof ("Hbdy" with "H") as "H1".
remember e as e0.
rewrite -> Heqe0 in *.
rewrite <- Heqe0 at 2.
rewrite <- Heqe0 at 2.
rewrite <- Heqe0 at 3.
clear Heqe0.
iClear "Hbdy".
iRevert (e eq) "H1".
iLöb as "IH".
iIntros (e eq) "H1".
repeat rewrite wp_unfold.
rewrite <- wp_unfold at 1.
rewrite /wp_pre; simpl.
rewrite eq.
iIntros (σ1 κ κs ?) "Hs".
iSpecialize ("H1" $! σ1 κ κs a with "Hs").
unfold fupd.
unfold bi_fupd_fupd. simpl.
unfold uPred_fupd.
rewrite seal_eq.
unfold uPred_fupd_def.
iIntros "Hw".
iSpecialize ("H1" with "Hw").
repeat iMod "H1".
repeat iModIntro.
iDestruct "H1" as "[Hw [Hphi [% H1]]]".
iFrame "Hw". iFrame "Hphi".
iSplitR; [iPureIntro | clear H].
{
destruct H as [κ' [e' [σ' [efs Hred]]]].
exists κ', (LoopB e0 e'), σ', efs.
inversion Hred; subst.
apply Ectx_step with (LoopBCtx e0 (comp_ectx K EmptyCtx)) e1' e2'; simpl;
[rewrite <- fill_comp | rewrite <- fill_comp |]; auto.
}
iIntros (e2 σ2 efs0 Hstep) "Hw".
(* MARK: use fill_step_inv instead of the proof below when using the singleton version *)
replace (LoopB e0 e) with (fill (LoopBCtx e0 EmptyCtx) e) in Hstep; auto.
pose proof fill_step_inv _ _ _ _ _ _ _ eq Hstep as [e1 [? Hred]]; subst; simpl.
(* DONE: may be used instead of congruence lemma *)
(* assert (exists e1, prim_step e σ1 κ e1 σ2 efs0 /\
(e2 = LoopB e0 e1 \/ exists v, e1 = e2 /\ e1 = EReturn $ Val v)) as [e1 [Hred ?]].
{
inversion Hstep.
destruct K; inversion H; simpl in *; subst.
- inversion H1; subst; inversion eq.
unfold expr_depth.singleton_ectx in H3.
destruct K; inversion H; subst; [inversion H3 |].
clear H H3 H6.
destruct e2'; inversion H0; subst.
+ exfalso. apply H4. apply BreakImpenLoop.
+ exfalso. apply H4. apply ContinueImpenLoop.
+ assert (¬ impenetrable_ectx (return v) K).
{
intros H. apply H4.
replace (LoopBCtx e0 K) with (comp_ectx (LoopBCtx e0 EmptyCtx) K); auto.
eapply CompImpenCtx; auto.
}
exists (EReturn v).
split.
* apply Ectx_step with EmptyCtx (fill K (EReturn v)) (EReturn v); auto.
apply CFCtxS; auto.
unfold expr_depth.singleton_ectx.
destruct_inversion K eq; eauto.
inversion eq.
* right. eauto.
- exists (fill K e2').
split.
+ apply Ectx_step with K e1' e2'; auto.
+ auto.
} *)
iSpecialize ("H1" $! e1 σ2 efs0 Hred with "Hw").
(* DONE: the congruence lemma is no longer needed
assert (κ' = κ /\ e2 = LoopB e0 e' /\ σ2 = σ' /\ efs0 = efs) as [? [? [? ?]]].
{
apply (fill_step _ _ _ _ _ _ (LoopBCtx e0 EmptyCtx)) in Hred.
simpl in Hred.
pose proof prim_step_congruence _ _ _ _ _ _ _ _ _ _ Hred Hstep.
naive_solver.
}
subst.
iSpecialize ("H1" $! e' σ' efs Hred with "Hw"). *)
repeat iMod "H1".
repeat iModIntro.
iDestruct "H1" as "[Hw [Hphi H1]]".
iFrame "Hw". iFrame "Hphi".
iNext.
iIntros "Hw".
iSpecialize ("H1" with "Hw").
repeat iMod "H1".
repeat iModIntro.
iDestruct "H1" as "[Hw [Hphi H1]]".
iFrame "Hw". iFrame "Hphi".
iDestruct "H1" as "[Hs [Hwp Hefs]]".
iFrame "Hs". iSplitR "Hefs"; auto.
destruct (to_sval e1) eqn:eq'; [| iApply "IH"; auto].
iApply wp_loop_sval; [apply eq' | auto | auto].
(* destruct H.
- subst.
destruct (to_sval e1) eqn:eq'; [| iApply "IH"; auto].
iApply wp_loop_sval; [apply eq' | auto | auto].
- destruct H as [v [? ?]]; subst.
repeat rewrite wp_unfold.
rewrite <- wp_unfold at 1.
rewrite /wp_pre; simpl.
auto. *)
}
Qed.
Lemma wp_bind_sval s e K φn φb φc φr:
to_sval e = Some s ->
WP e {{ λ v, (WP (fill K (Val v)) {{ φn }} {{ φb }} {{ φc }} {{ φr }}) }}
{{ λ v, uPred_and (uPred_pure (~ impenetrable_ectx (EBreak v) K)) (φb v) }}
{{ λ v, uPred_and (uPred_pure (~ impenetrable_ectx EContinue K)) (φc v) }}
{{ λ v, uPred_and (uPred_pure (~ impenetrable_ectx (EReturn v) K)) (φr v) }}
⊢ WP (fill K e) {{ φn }} {{ φb }} {{ φc }} {{ φr }}.
Proof.
iIntros (eq) "H".
destruct s; apply of_to_sval in eq; simpl in eq; subst.
{
rewrite wp_unfold /wp_pre; simpl.
iMod "H". auto.
}
{
rewrite wp_unfold /wp_pre; simpl.
iMod "H".
iDestruct "H" as "[% H]".
iRevert (K H).
iLöb as "IH".
iIntros (K H).
rewrite wp_unfold /wp_pre; simpl.
destruct (to_sval (fill K (break v))) eqn:eq;
[destruct K; inversion eq; try congruence; try (destruct K; inversion eq); auto |].
iIntros (σ1 κ κs _) "Hs".
unfold fupd.
unfold bi_fupd_fupd. simpl.
unfold uPred_fupd.
rewrite seal_eq.
unfold uPred_fupd_def.
iIntros "[Hw Htop]".
iApply except_0_bupd.
iModIntro.
iApply bupd_frame_l.
iFrame "Hw".
iApply bupd_frame_r.
iPoseProof ownE_empty as "Hown_phi".
iFrame "Hown_phi".
iSplitR.
{
iPureIntro.
apply cf_reducible; auto.
apply break_is_cft.
(* unfold reducible.
exists nil, (EBreak $ Val v), σ1, nil.
apply (Ectx_step _ _ _ _ _ _ EmptyCtx (fill K (break v)) (break v)); auto.
apply CFCtxS; auto.
- apply break_is_cft.
- destruct K; inversion eq; unfold expr_depth.singleton_ectx; eauto. *)
}
iIntros (e2 σ2 efs Hstep) "[Hw Hphi]".
repeat iModIntro.
iFrame "Hw". iFrame "Hphi".
iIntros "!# [Hw Hphi]".
repeat iModIntro.
iFrame "Hw". iFrame "Htop".
pose proof break_penetrable_preservation _ _ _ _ _ _ _ H Hstep as [? [? [? [K' [? ?]]]]].
subst.
iFrame "Hs".
iSplitL; auto.
iApply ("IH" with "[H] []"); auto.
}
{
rewrite wp_unfold /wp_pre; simpl.
iMod "H".
iDestruct "H" as "[% H]".
iRevert (K H).
iLöb as "IH".
iIntros (K H).
rewrite wp_unfold /wp_pre; simpl.
destruct (to_sval (fill K EContinue)) eqn:eq;
[destruct K; inversion eq; try congruence; try (destruct K; inversion eq); auto |].
iIntros (σ1 κ κs _) "Hs".
unfold fupd.
unfold bi_fupd_fupd. simpl.
unfold uPred_fupd.
rewrite seal_eq.
unfold uPred_fupd_def.
iIntros "[Hw Htop]".
iApply except_0_bupd.
iModIntro.
iApply bupd_frame_l.
iFrame "Hw".
iApply bupd_frame_r.
iPoseProof ownE_empty as "Hown_phi".
iFrame "Hown_phi".
iSplitR.
{
iPureIntro.
apply cf_reducible; auto.
apply continue_is_cft.
(* unfold reducible.
exists nil, (EContinue), σ1, nil.
apply (Ectx_step _ _ _ _ _ _ EmptyCtx (fill K EContinue) EContinue); auto.
apply CFCtxS; auto.
- apply continue_is_cft.
- unfold not. intros; subst.
inversion eq. *)
}
iIntros (e2 σ2 efs Hstep) "[Hw Hphi]".
repeat iModIntro.
iFrame "Hw". iFrame "Hphi".
iIntros "!# [Hw Hphi]".
repeat iModIntro.
iFrame "Hw". iFrame "Htop".
pose proof continue_penetrable_preservation _ _ _ _ _ _ H Hstep as [? [? [? [K' [? ?]]]]].
subst.
iFrame "Hs".
iSplitL; auto.
iApply ("IH" with "[H] []"); auto.
}
{
rewrite wp_unfold /wp_pre; simpl.
iMod "H".
iDestruct "H" as "[% H]".
iRevert (K H).
iLöb as "IH".
iIntros (K H).
rewrite wp_unfold /wp_pre; simpl.
destruct (to_sval (fill K (EReturn v))) eqn:eq;
[destruct K; inversion eq; try congruence; try (destruct K; inversion eq); auto |].
iIntros (σ1 κ κs _) "Hs".
unfold fupd.
unfold bi_fupd_fupd. simpl.
unfold uPred_fupd.
rewrite seal_eq.
unfold uPred_fupd_def.
iIntros "[Hw Htop]".
iApply except_0_bupd.
iModIntro.
iApply bupd_frame_l.
iFrame "Hw".
iApply bupd_frame_r.
iPoseProof ownE_empty as "Hown_phi".
iFrame "Hown_phi".
iSplitR.
{
iPureIntro.
apply cf_reducible; auto.
apply return_is_cft.
(* unfold reducible.
exists nil, (EReturn v), σ1, nil.
apply (Ectx_step _ _ _ _ _ _ EmptyCtx (fill K (EReturn v)) (EReturn v)); auto.
apply CFCtxS; auto.
- apply return_is_cft.
- unfold not. intros; subst.
inversion eq. *)
}
iIntros (e2 σ2 efs Hstep) "[Hw Hphi]".
repeat iModIntro.
iFrame "Hw". iFrame "Hphi".
iIntros "!# [Hw Hphi]".
repeat iModIntro.
iFrame "Hw". iFrame "Htop".
pose proof return_penetrable_preservation _ _ _ _ _ _ _ H Hstep as [? [? [? [K' [? ?]]]]].
subst.
iFrame "Hs".
iSplitL; auto.
iApply ("IH" with "[H] []"); auto.
}
Qed.
Lemma tac_wp_bind_inv_sval s e K φn φb φc φr:
to_sval e = Some s ->
WP (fill K e) {{ φn }} {{ φb }} {{ φc }} {{ φr }} ⊢
WP e {{ λ v, (WP (fill K (Val v)) {{ φn }} {{ φb }} {{ φc }} {{ φr }}) }}
{{ λ v, uPred_and (uPred_pure (~ impenetrable_ectx (EBreak v) K)) (φb v) }}
{{ λ v, uPred_and (uPred_pure (~ impenetrable_ectx EContinue K)) (φc v) }}
{{ λ v, uPred_and (uPred_pure (~ impenetrable_ectx (EReturn v) K)) (φr v) }}.
Proof.
iIntros (eq) "H".
destruct s; apply of_to_sval in eq; simpl in eq; subst.
{
repeat rewrite wp_unfold.
rewrite <- wp_unfold.
rewrite /wp_pre; simpl.
auto.
}
{
iRevert (K) "H".
iLöb as "IH".
iIntros (K) "H".
rewrite wp_unfold /wp_pre; simpl.
rewrite wp_unfold /wp_pre; simpl.
destruct (to_sval (fill K (break v))) eqn:eq.
{
destruct_inversion K eq; admit.
}
{
admit.
}
Abort.
Lemma tac_wp_bind e K φn φb φc φr:
WP e {{ λ v, (WP (fill K (Val v)) {{ φn }} {{ φb }} {{ φc }} {{ φr }}) }}
{{ λ v, uPred_and (uPred_pure (~ impenetrable_ectx (EBreak v) K)) (φb v) }}
{{ λ v, uPred_and (uPred_pure (~ impenetrable_ectx EContinue K)) (φc v) }}
{{ λ v, uPred_and (uPred_pure (~ impenetrable_ectx (EReturn v) K)) (φr v) }}
⊢ WP (fill K e) {{ φn }} {{ φb }} {{ φc }} {{ φr }}.
Proof.
iIntros "H".
destruct (to_sval e) eqn:eq.
{
iApply (wp_bind_sval s); auto.
}
{
iRevert (e eq) "H".
iLöb as "IH".
iIntros (e eq) "H".
repeat rewrite wp_unfold.
rewrite /wp_pre; simpl.
rewrite eq.
pose proof fill_not_sval K _ eq as eq'.
rewrite eq'.
iIntros (σ1 κ κs ?) "Hs".
iSpecialize ("H" $! σ1 κ κs a with "Hs").
unfold fupd.
unfold bi_fupd_fupd. simpl.
unfold uPred_fupd.
rewrite seal_eq.
unfold uPred_fupd_def.
iIntros "Hw".
iSpecialize ("H" with "Hw").
repeat iMod "H".
repeat iModIntro.
iDestruct "H" as "[Hw [Hphi [% H]]]".
iFrame "Hw".
iFrame "Hphi".
iSplitR;
[iPureIntro; apply my_reducible_fill; auto |].
iIntros (e2 σ2 efs Hstep) "[Hw Hphi]".
assert (exists e1, e2 = fill K e1 /\ prim_step e σ1 κ e1 σ2 efs) as [e1 [? Hred]].
{ apply fill_step_inv; auto. } subst.
(* destruct H as [κ' [e' [σ' [efs' H]]]].
(* DONE: congruence lemma is no longer needed *)
pose proof prim_step_congruence _ _ _ _ _ _ _ _ _ _ Hstep
(fill_step _ _ _ _ _ _ K H) as [? [? [? ?]]].
subst. *)
iCombine ("Hw Hphi") as "Hw".
iSpecialize ("H" $! e1 σ2 efs Hred with "Hw").
repeat iMod "H".
iDestruct "H" as "[Hw [Hphi H]]".
repeat iModIntro.
iFrame "Hw". iFrame "Hphi".
iNext.
iIntros "Hw".
iSpecialize ("H" with "Hw").
repeat iMod "H".
repeat iModIntro.
iDestruct "H" as "[Hw [Htop [Hs [Hwp Hefs]]]]".
iFrame "Hw". iFrame "Htop". iFrame "Hs".
iSplitR "Hefs"; auto.
destruct (to_sval e1) eqn:eq'';
[iApply (wp_bind_sval s); auto |].
iApply "IH"; auto.
}
Qed.
Lemma wp_call_sval s e φ:
to_sval e = Some s ->
WP e {{ φ }} {{ bot }} {{ bot }} {{ φ }} ⊢
WP Call e {{ φ }} {{ bot }} {{ bot }} {{ bot }}.
Proof.
iIntros (eq) "H".
destruct s; apply of_to_sval in eq; simpl in eq; subst.
{
rewrite wp_unfold /wp_pre; simpl.
rewrite wp_unfold /wp_pre; simpl.
unfold fupd.
unfold bi_fupd_fupd. simpl.
unfold uPred_fupd.
rewrite seal_eq.
unfold uPred_fupd_def.
iIntros (σ1 κ κs _) "Hs Hw".
iSpecialize ("H" with "Hw").
repeat iMod "H".
iDestruct "H" as "[Hw [Htop H]]".
iApply except_0_bupd.
iModIntro.
iApply bupd_frame_l.
iFrame "Hw".
iApply bupd_frame_r.
iPoseProof ownE_empty as "Hown_phi".
iFrame "Hown_phi".
iSplitR.
{
iPureIntro.
exists nil, (Val v), σ1, nil.
apply Ectx_step with EmptyCtx (Call v) (Val v); eauto.
constructor.
}
iIntros (e2 σ2 efs H) "[Hw Hphi]".
repeat iModIntro.
iFrame "Hw". iFrame "Hphi".
iIntros "!# [Hw Hphi]".
repeat iModIntro.
iFrame "Hw". iFrame "Htop".
assert (e2 = Val v /\ κ = [] /\ σ2 = σ1 /\ efs = []) as [? [? [? ?]]]; subst.
{
inversion H; subst.
destruct K; simpl in *; inversion H0; subst.
- inversion H2; subst; auto.
destruct_inversion K H0.
+ inversion H3.
+ destruct_inversion K H7; inversion H3.
- destruct_inversion K H3. inversion H2; subst.
destruct_inversion K H3. inversion H4.
}
iFrame "Hs".
iSplitL; auto.
rewrite wp_unfold /wp_pre; simpl.
auto.
}
{
rewrite wp_unfold /wp_pre; simpl.
rewrite wp_unfold /wp_pre; simpl.
unfold fupd.
unfold bi_fupd_fupd. simpl.
unfold uPred_fupd.
rewrite seal_eq.
unfold uPred_fupd_def.
iIntros (? ? ? ?) "? Hw".
iSpecialize ("H" with "Hw").
repeat iMod "H".
iDestruct "H" as "[? [? H]]".
iExFalso.
auto.
}
{
rewrite wp_unfold /wp_pre; simpl.
rewrite wp_unfold /wp_pre; simpl.
unfold fupd.
unfold bi_fupd_fupd. simpl.
unfold uPred_fupd.
rewrite seal_eq.
unfold uPred_fupd_def.
iIntros (? ? ? ?) "? Hw".
iSpecialize ("H" with "Hw").
repeat iMod "H".
iDestruct "H" as "[? [? H]]".
iExFalso.
auto.
}
{
rewrite wp_unfold /wp_pre; simpl.
rewrite wp_unfold /wp_pre; simpl.
unfold fupd.
unfold bi_fupd_fupd. simpl.
unfold uPred_fupd.
rewrite seal_eq.
unfold uPred_fupd_def.
iIntros (σ1 κ κs _) "Hs Hw".
iSpecialize ("H" with "Hw").
repeat iMod "H".
iDestruct "H" as "[Hw [Htop H]]".
iApply except_0_bupd.
iModIntro.
iApply bupd_frame_l.
iFrame "Hw".
iApply bupd_frame_r.
iPoseProof ownE_empty as "Hown_phi".
iFrame "Hown_phi".
iSplitR.
{
iPureIntro.
exists nil, (Val v), σ1, nil.
apply Ectx_step with EmptyCtx (Call (return v)) (Val v); eauto.
constructor.
}
iIntros (e2 σ2 efs H) "[Hw Hphi]".
repeat iModIntro.
iFrame "Hw". iFrame "Hphi".
iIntros "!# [Hw Hphi]".
repeat iModIntro.
iFrame "Hw". iFrame "Htop".
assert (e2 = Val v /\ κ = [] /\ σ2 = σ1 /\ efs = []) as [? [? [? ?]]]; subst.
{
inversion H; subst.
destruct K; simpl in *; inversion H0; subst.
- inversion H2; subst; auto.
unfold expr_depth.singleton_ectx in H4.
destruct_inversion K H0; [inversion H4 |].
destruct_inversion K H7.
+ exfalso. apply H5; constructor.
+ destruct_inversion K H8. inversion H3.
- destruct_inversion K H3.
+ inversion H2; subst.
unfold expr_depth.singleton_ectx in H5.
destruct_inversion K H3; [inversion H5 |].
destruct_inversion K H8. inversion H4.
+ destruct_inversion K H4. inversion H2; subst.
destruct_inversion K H4. inversion H5.
}
iFrame "Hs".
iSplitL; auto.
rewrite wp_unfold /wp_pre; simpl.
auto.
}
Qed.
Lemma tac_wp_call e φ:
WP e {{ φ }} {{ bot }} {{ bot }} {{ φ }} ⊢
WP Call e {{ φ }} {{ bot }} {{ bot }} {{ bot }}.
Proof.
iIntros "H".
destruct (to_sval e) eqn:eq.
{
iApply (wp_call_sval s); auto.
}
{
iRevert (e eq) "H".
iLöb as "IH".
iIntros (e eq) "H".
repeat rewrite wp_unfold.
rewrite /wp_pre; simpl.
rewrite eq.
iIntros (σ1 κ κs ?) "Hs".
iSpecialize ("H" $! σ1 κ κs a with "Hs").
unfold fupd.
unfold bi_fupd_fupd. simpl.
unfold uPred_fupd.
rewrite seal_eq.
unfold uPred_fupd_def.
iIntros "Hw".
iSpecialize ("H" with "Hw").
repeat iMod "H".
repeat iModIntro.
iDestruct "H" as "[Hw [Hphi [% H]]]".
iFrame "Hw".
iFrame "Hphi".
iSplitR.
{
iPureIntro.
replace (Call e) with (fill (CallCtx EmptyCtx) e); auto.
apply my_reducible_fill; auto.
}
iIntros (e2 σ2 efs Hstep) "[Hw Hphi]".
assert (exists e1, e2 = fill (CallCtx EmptyCtx) e1 /\ prim_step e σ1 κ e1 σ2 efs) as [e1 [? Hred]]; subst.
{
replace (Call e) with (fill (CallCtx EmptyCtx) e); auto.
apply fill_step_inv; auto.
}
simpl in *.
(* destruct H as [κ' [e' [σ' [efs' H]]]].
(* DONE: congruence lemma is no longer needed *)
pose proof prim_step_congruence _ _ _ _ _ _ _ _ _ _ Hstep
(fill_step _ _ _ _ _ _ K H) as [? [? [? ?]]].
subst. *)
iCombine ("Hw Hphi") as "Hw".
iSpecialize ("H" $! e1 σ2 efs Hred with "Hw").
repeat iMod "H".
iDestruct "H" as "[Hw [Hphi H]]".
repeat iModIntro.
iFrame "Hw". iFrame "Hphi".
iNext.
iIntros "Hw".
iSpecialize ("H" with "Hw").
repeat iMod "H".
repeat iModIntro.
iDestruct "H" as "[Hw [Htop [Hs [Hwp Hefs]]]]".
iFrame "Hw". iFrame "Htop". iFrame "Hs".
iSplitR "Hefs"; auto.
destruct (to_sval e1) eqn:eq'';
[iApply (wp_call_sval s); auto |].
iApply "IH"; auto.
}
Qed.
Lemma wp_consequence_sval s e φn φb φc φr φn' φb' φc' φr':
to_sval e = Some s ->
(forall v, φn v ⊢ φn' v) ->
(forall v, φb v ⊢ φb' v) ->
(forall v, φc v ⊢ φc' v) ->
(forall v, φr v ⊢ φr' v) ->
WP e {{ φn }} {{ φb }} {{ φc }} {{ φr }}
⊢ WP e {{ φn' }} {{ φb' }} {{ φc' }} {{ φr' }}.
Proof.
iIntros (eq ????) "H".
rewrite wp_unfold /wp_pre; simpl.
rewrite wp_unfold /wp_pre; simpl.
rewrite eq.
iMod "H". iModIntro.
destruct s; apply of_to_sval in eq; simpl in eq; subst; simpl;
[by iApply H | by iApply H0 | by iApply H1 | by iApply H2].
Qed.
Lemma tac_wp_consequence e φn φb φc φr φn' φb' φc' φr':
(forall v, φn v ⊢ φn' v) ->
(forall v, φb v ⊢ φb' v) ->
(forall v, φc v ⊢ φc' v) ->
(forall v, φr v ⊢ φr' v) ->
WP e {{ φn }} {{ φb }} {{ φc }} {{ φr }}
⊢ WP e {{ φn' }} {{ φb' }} {{ φc' }} {{ φr' }}.
Proof.
iIntros (????) "H".
iRevert (e) "H".
iLöb as "IH".
iIntros (e) "H".
destruct (to_sval e) eqn:eq.
{
pose proof wp_consequence_sval _ _ _ _ _ _ _ _ _ _ eq H H0 H1 H2.
iApply H3; auto.
}
{
rewrite wp_unfold /wp_pre; simpl.
rewrite wp_unfold /wp_pre; simpl.
rewrite eq.
iIntros (σ1 κ κs n) "Hs".
iSpecialize ("H" $! _ _ _ n with "Hs").
iMod "H" as "[Hred H]". iModIntro.
iFrame "Hred".
iIntros (e2 σ2 efs) "Hstep".
iSpecialize ("H" $! _ _ _ with "Hstep").
iMod "H". iModIntro. iNext.
iMod "H" as "[Hs [Hw Hefs]]". iModIntro.
iFrame "Hs". iSplitR "Hefs"; auto.
iApply "IH"; auto.
}
Qed.
Lemma wp_lift_step e1 φn φb φc φr:
to_sval e1 = None →
(∀ (σ1 : state) (κ κs : list observation),
gen_heap_ctx (heap σ1)
∗ proph_map_ctx (κ ++ κs) (used_proph_id σ1) ∗ ⌜no_continue_state σ1⌝
={⊤,∅}=∗ ⌜reducible e1 σ1⌝
∗ ▷ (∀ (e2 : expr) (σ2 : state) (efs : list expr),
⌜prim_step e1 σ1 κ e2 σ2 efs⌝
={∅,⊤}=∗ (gen_heap_ctx (heap σ2)
∗ proph_map_ctx κs (used_proph_id σ2)
∗ ⌜no_continue_state σ2⌝)
∗ WP e2 {{φn}}{{φb}}{{φc}}{{φr} }
∗ ([∗ list] ef ∈ efs, WP ef {{ _, True }})))
⊢ WP e1 {{ φn }} {{ φb }} {{ φc }} {{ φr }}.
Proof.
iIntros (?) "H".
rewrite wp_unfold /wp_pre; simpl.
destruct (to_sval e1); subst; [inversion H |].
iIntros (????) "Hσ".
iMod ("H" with "Hσ") as "[$ H]".
iIntros "!> * % !> !>".
by iApply "H".
Qed.
Lemma wp_lift_pure_step_no_fork E' e1 φn φb φc φr:
(forall σ1, reducible e1 σ1) →
(∀ κ σ1 e2 σ2 efs, prim_step e1 σ1 κ e2 σ2 efs → κ = [] ∧ σ2 = σ1 ∧ efs = []) →
(|={⊤,E'}▷=> ∀ κ e2 efs σ, ⌜prim_step e1 σ κ e2 σ efs⌝ → WP e2 {{ φn }} {{ φb }} {{ φc }} {{ φr }})
⊢ WP e1 {{ φn }} {{ φb }} {{ φc }} {{ φr }}.
Proof.
iIntros (Hsafe Hstep) "H". iApply wp_lift_step.
{ specialize (Hsafe inhabitant). eauto using reducible_not_val. }
iIntros (σ1 κ κs) "Hσ". iMod "H".
iMod fupd_intro_mask' as "Hclose"; last iModIntro; first by set_solver. iSplit.
{ iPureIntro. auto. }
iNext.
iIntros (e2 σ2 efs ?).
destruct (Hstep κ σ1 e2 σ2 efs) as (-> & <- & ->); auto.
iMod "Hclose" as "_". iMod "H". iModIntro.
iDestruct ("H" with "[//]") as "H". simpl. iFrame.
Qed.
Lemma wp_lift_pure_det_step_no_fork E' e1 e2 φn φb φc φr:
(forall σ1, reducible e1 σ1) →
(∀ σ1 κ e2' σ2 efs', prim_step e1 σ1 κ e2' σ2 efs' →
κ = [] ∧ σ2 = σ1 ∧ e2' = e2 ∧ efs' = []) →
(|={⊤,E'}▷=> WP e2 {{ φn }} {{ φb }} {{ φc }} {{ φr }})
⊢ WP e1 {{ φn }} {{ φb }} {{ φc }} {{ φr }}.
Proof.
iIntros (? Hpuredet) "H". iApply (wp_lift_pure_step_no_fork E'); try done.
{ naive_solver. }
iApply (step_fupd_wand with "H"); iIntros "H".
iIntros (κ e' efs' σ (_&?&->&?)%Hpuredet); auto.
Qed.
Lemma wp_pure_step_fupd E' e1 e2 φ n φn φb φc φr :
PureExec φ n e1 e2 →
φ →
(|={⊤,E'}▷=>^n WP e2 {{ φn }} {{ φb }} {{ φc }} {{ φr }})
⊢ WP e1 {{ φn }} {{ φb }} {{ φc }} {{ φr }}.
Proof.
iIntros (Hexec Hφ) "Hwp".
unfold PureExec in Hexec.
specialize (Hexec Hφ).
iInduction Hexec as [e|n e1 e2 e3 [Hsafe ?]] "IH"; simpl; first done.
simpl in *.
iApply wp_lift_pure_det_step_no_fork.
- intros σ. specialize (Hsafe σ).
unfold reducible_no_obs in Hsafe.
unfold reducible.
destruct Hsafe as [e' [sigma' [efs H]]].
exists nil, e', sigma', efs. auto.
- done.
- by iApply (step_fupd_wand with "Hwp").
Qed.
Lemma wp_pure_step_later e1 e2 φ n φn φb φc φr :
PureExec φ n e1 e2 →
φ →
▷^n WP e2 {{ φn }} {{ φb }} {{ φc }} {{ φr }} ⊢ WP e1 {{ φn }} {{ φb }} {{ φc }} {{ φr }}.
Proof.
intros Hexec ?. rewrite -wp_pure_step_fupd //. clear Hexec.
induction n as [|n IH]; by rewrite //= -step_fupd_intro // IH.
Qed.
Lemma wp_seq_sval s e1 e2 φn φb φc φr:
to_sval e1 = Some s ->
WP e1 {{ λ v, ▷ ▷ WP e2 {{ φn }} {{ φb }} {{ φc }} {{ φr }} }} {{ φb }} {{ φc }} {{ φr }} ⊢
WP (Seq e1 e2) {{ φn }} {{ φb }} {{ φc }} {{ φr }}.
Proof.
iIntros (eq) "H".
destruct s; apply of_to_sval in eq; simpl in eq; subst.
{
rewrite wp_unfold /wp_pre; simpl.
repeat rewrite wp_unfold.
rewrite <- wp_unfold at 1.
rewrite /wp_pre; simpl.
unfold fupd.
unfold bi_fupd_fupd. simpl.
unfold uPred_fupd.
rewrite seal_eq.
unfold uPred_fupd_def.
iIntros (σ1 κ κs _) "Hs Hw".
iSpecialize ("H" with "Hw").
repeat iMod "H".
iDestruct "H" as "[Hw [Htop H]]".
iApply except_0_bupd.
iModIntro.
iApply bupd_frame_l.
iFrame "Hw".
iApply bupd_frame_r.
iPoseProof ownE_empty as "Hown_phi".
iFrame "Hown_phi".
iSplitR.
{
iPureIntro.
exists nil, (App (Val $ RecV BAnon BAnon e2) (Val v)), σ1, nil.
apply Ectx_step with (AppLCtx EmptyCtx v) (Rec BAnon BAnon e2) (Val $ RecV BAnon BAnon e2); auto.
constructor.
}
iIntros (e0 σ2 efs H) "[Hw Hphi]".
repeat iModIntro.
iFrame "Hw". iFrame "Hphi".
iIntros "!# [Hw Hphi]".
repeat iModIntro.
iFrame "Hw". iFrame "Htop".
assert (σ1 = σ2 /\ κ = [] /\ efs = [] /\ e0 = (App (Val $ RecV BAnon BAnon e2) (Val v))) as [? [? [? ?]]]; subst.
{
inversion H.
destruct_inversion K H0.
- inversion H2; subst.
destruct_inversion K H0.
+ inversion H1.
+ unfold expr_depth.singleton_ectx in H4.
destruct_inversion K H4. simpl in *; subst.
inversion H1.
+ unfold expr_depth.singleton_ectx in H4. simpl in H4.
destruct_inversion K H4. simpl in *; subst. inversion H1.
- destruct_inversion K H4.
inversion H2; subst.
+ simpl. auto.
+ destruct_inversion K H3.
unfold expr_depth.singleton_ectx in H5. inversion H5.
- destruct_inversion K H5.
inversion H2; subst.
destruct_inversion K H3.
inversion H4.
}
iFrame.
iSplitL; auto.
assert (PureExec True 1 ((λ: <>, e2)%V v) e2).
{
unfold PureExec.
intros _.
eapply relations.nsteps_l; [| apply relations.nsteps_O].
eapply Build_pure_step; simpl.
- intros. unfold reducible_no_obs.
exists e2, σ1, nil.
apply (Ectx_step _ _ _ _ _ _ EmptyCtx ((λ: <>, e2)%V v) e2); auto.
constructor; auto.
- intros.
inversion H0.
destruct_inversion K H1.
+ inversion H3; subst; auto.
unfold expr_depth.singleton_ectx in H5.
destruct_inversion K H1; simpl in *; try congruence.
* destruct_inversion K H5. simpl in *. subst.
inversion H2.
* destruct_inversion K H5. simpl in *. subst.
inversion H2.
+ destruct_inversion K H5.
inversion H3; subst.
destruct_inversion K H4.
inversion H5.
+ destruct_inversion K H6.
inversion H3; subst.
destruct_inversion K H4.
inversion H5.
}
iApply wp_pure_step_later; auto.
}
1, 2, 3:
rewrite wp_unfold /wp_pre; simpl;
rewrite wp_unfold /wp_pre; simpl;
unfold fupd;
unfold bi_fupd_fupd; simpl;
unfold uPred_fupd;
rewrite seal_eq;
unfold uPred_fupd_def;
iIntros (σ1 κ κs _) "Hs Hw";
iSpecialize ("H" with "Hw");
repeat iMod "H";
iDestruct "H" as "[Hw [Htop H]]";
iApply except_0_bupd;
iModIntro;
iApply bupd_frame_l;
iFrame "Hw";
iApply bupd_frame_r;
iPoseProof ownE_empty as "Hown_phi";
iFrame "Hown_phi".
{
iSplitR.
{
iPureIntro.
exists nil, (EBreak v), σ1, nil.
pose proof Ectx_step.
apply (Ectx_step _ _ _ _ _ _ EmptyCtx (fill (AppRCtx (Rec BAnon BAnon e2) EmptyCtx) (EBreak v)) (EBreak v)); auto.
constructor.
- apply break_is_cft.
- unfold expr_depth.singleton_ectx. auto.
- unfold not; inversion 1; subst; simpl in *.
destruct_inversion K' H1; try congruence.
destruct_inversion K' H6.
inversion H4; subst; simpl in *.
destruct_inversion K' H5; simpl in *; try congruence.
}
iIntros (e0 σ2 efs H) "[Hw Hphi]".
repeat iModIntro.
iFrame "Hw". iFrame "Hphi".
iIntros "!# [Hw Hphi]".
repeat iModIntro.
iFrame "Hw". iFrame "Htop".
assert (σ1 = σ2 /\ κ = [] /\ efs = [] /\ e0 = EBreak (Val v)) as [? [? [? ?]]]; subst.
{
inversion H; subst; simpl in *.
destruct_inversion K H0.
+ inversion H2; subst; simpl in *.
unfold expr_depth.singleton_ectx in H4.
destruct_inversion K H0; simpl in *; try congruence.
destruct_inversion K H8; auto.
destruct_inversion K H7. inversion H3.
+ destruct_inversion K H4.
- sval_head_step_inv H2.
- destruct_inversion K H3.
sval_head_step_inv H2.
}
iFrame.
iSplitL; auto.
rewrite wp_unfold /wp_pre; simpl.
auto.
}
{
iSplitR.
{
iPureIntro.
exists nil, EContinue, σ1, nil.
pose proof Ectx_step.
apply (Ectx_step _ _ _ _ _ _ EmptyCtx (fill (AppRCtx (Rec BAnon BAnon e2) EmptyCtx) EContinue) EContinue); auto.
constructor.
- apply continue_is_cft.
- unfold expr_depth.singleton_ectx. auto.
- unfold not; inversion 1; subst; simpl in *.
destruct_inversion K' H1; try congruence.
destruct_inversion K' H6.
inversion H4; subst; simpl in *.
destruct_inversion K' H5; simpl in *; try congruence.
}
iIntros (e0 σ2 efs H) "[Hw Hphi]".
repeat iModIntro.
iFrame "Hw". iFrame "Hphi".
iIntros "!# [Hw Hphi]".
repeat iModIntro.
iFrame "Hw". iFrame "Htop".
assert (σ1 = σ2 /\ κ = [] /\ efs = [] /\ e0 = EContinue) as [? [? [? ?]]]; subst.
{
inversion H; subst; simpl in *.
destruct_inversion K H0.
+ inversion H2; subst; simpl in *.
unfold expr_depth.singleton_ectx in H4.
destruct_inversion K H0; simpl in *; try congruence.
destruct_inversion K H8; auto.
+ destruct_inversion K H4.
sval_head_step_inv H2.
}
iFrame.
iSplitL; auto.
rewrite wp_unfold /wp_pre; simpl.
auto.
}
{
iSplitR.
{
iPureIntro.
exists nil, (EReturn v), σ1, nil.
pose proof Ectx_step.
apply (Ectx_step _ _ _ _ _ _ EmptyCtx (fill (AppRCtx (Rec BAnon BAnon e2) EmptyCtx) (EReturn v)) (EReturn v)); auto.
constructor.
- apply return_is_cft.
- unfold expr_depth.singleton_ectx. auto.
- unfold not; inversion 1; subst; simpl in *.
destruct_inversion K' H1; try congruence.
destruct_inversion K' H6.
inversion H4; subst; simpl in *.
destruct_inversion K' H5; simpl in *; try congruence.
}
iIntros (e0 σ2 efs H) "[Hw Hphi]".
repeat iModIntro.
iFrame "Hw". iFrame "Hphi".
iIntros "!# [Hw Hphi]".
repeat iModIntro.
iFrame "Hw". iFrame "Htop".
assert (σ1 = σ2 /\ κ = [] /\ efs = [] /\ e0 = EReturn (Val v)) as [? [? [? ?]]]; subst.
{
inversion H; subst; simpl in *.
destruct_inversion K H0.
+ inversion H2; subst; simpl in *.
unfold expr_depth.singleton_ectx in H4.
destruct_inversion K H0; simpl in *; try congruence.
destruct_inversion K H8; auto.
destruct_inversion K H7. inversion H3.
+ destruct_inversion K H4.
- sval_head_step_inv H2.
- destruct_inversion K H3.
sval_head_step_inv H2.
}
iFrame.
iSplitL; auto.
rewrite wp_unfold /wp_pre; simpl.
auto.
}
Qed.
Lemma seq_pure_exec (v : val) e2:
PureExec True 2 (v;; e2) e2.
Proof.
unfold PureExec.
intros _.
eapply relations.nsteps_l with ((λ: <>, e2)%V v).
{
apply Build_pure_step.
+ intros. unfold reducible_no_obs.
exists ((λ: <>, e2)%V v), σ1, nil.
apply (Ectx_step _ _ _ _ _ _ (AppLCtx EmptyCtx v) (Rec BAnon BAnon e2) (RecV BAnon BAnon e2)); auto.
constructor.
+ intros.
inversion H; subst; simpl in *.
destruct_inversion K H0; simpl in *.
- inversion H2; subst; simpl in *.
unfold expr_depth.singleton_ectx in H4.
destruct_inversion K H0; simpl in *; try congruence.
* destruct_inversion K H7. inversion H3.
* destruct_inversion K H8; inversion H3.
- destruct_inversion K H0.
inversion H2; subst; simpl in *; auto.
unfold expr_depth.singleton_ectx in H5.
destruct_inversion K H1; simpl in *; try congruence.
- destruct_inversion K H4.
sval_head_step_inv H2.
}
eapply relations.nsteps_l; [| apply relations.nsteps_O].
{
apply Build_pure_step.
+ intros. unfold reducible_no_obs.
exists e2, σ1, nil.
apply (Ectx_step _ _ _ _ _ _ EmptyCtx ((λ: <>, e2)%V v) e2); auto.
constructor. auto.
+ intros.
inversion H; subst; simpl in *.
destruct_inversion K H0; simpl in *.
- inversion H2; subst; simpl in *; auto.
unfold expr_depth.singleton_ectx in H4.
destruct_inversion K H0; simpl in *; try congruence.
* destruct_inversion K H7. inversion H3.
* destruct_inversion K H8; inversion H3.
- destruct_inversion K H0.
inversion H2; subst; simpl in *; auto.
unfold expr_depth.singleton_ectx in H5.
destruct_inversion K H1; simpl in *; try congruence.
- destruct_inversion K H4.
sval_head_step_inv H2.
}
Qed.
Lemma tac_wp_seq e1 e2 φn φb φc φr:
WP e1 {{ λ v, ▷ ▷ WP e2 {{ φn }} {{ φb }} {{ φc }} {{ φr }} }} {{ φb }} {{ φc }} {{ φr }} ⊢
WP (Seq e1 e2) {{ φn }} {{ φb }} {{ φc }} {{ φr }}.
Proof.
iIntros "H".
destruct (to_sval e1) eqn:eq.
{
iApply (wp_seq_sval s); auto.
}
{
replace (App (Rec BAnon BAnon e2) e1) with (fill (AppRCtx (Rec BAnon BAnon e2) EmptyCtx) e1); auto.
iApply tac_wp_bind.
iApply (tac_wp_consequence with "H").
{
iIntros (?) "H".
pose proof seq_pure_exec v e2. simpl.
pose proof wp_pure_step_later (v;; e2) e2 _ 2 φn φb φc φr H.
simpl in H0.
iApply H0; auto.
}
{
simpl.
iIntros (v) "H".
iSplit; auto.
iPureIntro.
unfold not.
intros H; inversion H; subst; simpl in *.
destruct_inversion K' H0; try congruence.
destruct_inversion K' H5; try congruence.
inversion H3; subst; simpl in *.
destruct_inversion K' H4; simpl in H5; congruence.
}
{
simpl.
iIntros (v) "H".
iSplit; auto.
iPureIntro.
unfold not.
intros H; inversion H; subst; simpl in *.
destruct_inversion K' H0; try congruence.
destruct_inversion K' H5; try congruence.
inversion H3; subst; simpl in *.
destruct_inversion K' H4; simpl in H5; congruence.
}
{
simpl.
iIntros (v) "H".
iSplit; auto.
iPureIntro.
unfold not.
intros H; inversion H; subst; simpl in *.
destruct_inversion K' H0; try congruence.
destruct_inversion K' H5; try congruence.
inversion H3; subst; simpl in *.
destruct_inversion K' H4; simpl in H5; congruence.
}
}
Qed.
Section strong_no_continue.
Import NoContinueHeadPreserve.
Lemma no_continue_fill K e:
no_continue (fill K e) ->
no_continue e.
Proof.
induction K; intros; simpl; auto;
try (destruct H; eauto);
try (destruct H0; eauto).
Qed.
Lemma no_continue_fill_subst K e1 e2:
no_continue (fill K e1) ->
no_continue e2 ->
no_continue (fill K e2).
Proof.
induction K; intros; simpl; auto;
try (destruct H; eauto);
try (destruct H1; eauto).
Qed.
Lemma no_continue_preserve e1 σ1 κ e2 σ2 efs :
no_continue_state σ1 ->
prim_step e1 σ1 κ e2 σ2 efs ->
no_continue e1 -> no_continue e2 /\ no_continue_state σ2.
Proof.
intro HStateNoContinue.
intros.
inversion H; subst.
pose proof no_continue_fill _ _ H0.
split.
- apply no_continue_preserve_head in H3; auto.
apply (no_continue_fill_subst _ _ _ H0 H3).
- apply no_continue_state_preserve_head in H3; auto.
Qed.
Lemma wp_no_continue_sval e s φn φb φr φ1 φ2:
to_sval e = Some s ->
no_continue e ->
WP e {{ φn }} {{ φb }} {{ φ1 }} {{ φr }} ⊢
WP e {{ φn }} {{ φb }} {{ φ2 }} {{ φr }}.
Proof.
iIntros (eq H) "H".
destruct s; apply of_to_sval in eq; simpl in eq; subst.
{
rewrite wp_unfold /wp_pre; simpl.
rewrite wp_unfold /wp_pre; simpl.
auto.
}
{
rewrite wp_unfold /wp_pre; simpl.
rewrite wp_unfold /wp_pre; simpl.
auto.
}
{
inversion H.
}
{
rewrite wp_unfold /wp_pre; simpl.
rewrite wp_unfold /wp_pre; simpl.
auto.
}
Qed.
(* Definition state_has_no_continue v : iProp Σ :=
(∀ l: loc, l ↦ v). *)
Lemma wp_no_continue e φn φb φr φ1 φ2:
no_continue e ->
(* (∀ l v, l ↦ v -∗ ⌜no_continue_val v⌝) ∗ *)
(* (∀ σ, gen_heap_ctx (heap σ) -∗ (gen_heap_ctx (heap σ) ∗ ⌜no_continue_state σ⌝ ))%I ∗ *)
(* also require heap to be well formed *)
WP e {{ φn }} {{ φb }} {{ φ1 }} {{ φr }} ⊢
WP e {{ φn }} {{ φb }} {{ φ2 }} {{ φr }}.
Proof.
iIntros (H) "H".
destruct (to_sval e) eqn:eq.
{
iApply (wp_no_continue_sval e _ φn φb φr φ1 φ2 eq H with "H").
}
{
iRevert (e eq H) "H".
iLöb as "IH".
iIntros (e eq H) "H".
rewrite wp_unfold /wp_pre; simpl.
rewrite wp_unfold /wp_pre; simpl.
rewrite eq.
iIntros (σ1 κ κs ?) "(Hheap & Hproph & #HNCS)".
(* iSpecialize ("HNCS" $! σ1 with "Hheap"). *)
(* iDestruct "HNCS" as "[Hheap %]". *)
iCombine "Hheap Hproph HNCS" as "Hs".
iDestruct "HNCS" as %HNCS.
iSpecialize ("H" $! σ1 κ κs a with "Hs").
unfold fupd.
unfold bi_fupd_fupd. simpl.
unfold uPred_fupd.
rewrite seal_eq.
unfold uPred_fupd_def.
iIntros "Hw".
iSpecialize ("H" with "Hw").
repeat iMod "H".
repeat iModIntro.
iDestruct "H" as "[Hw [Hphi [% H]]]".
iFrame "Hw".
iFrame "Hphi".
iSplitR; auto.
iIntros (e2 σ2 efs Hstep) "Hw".
iSpecialize ("H" $! e2 σ2 efs Hstep with "Hw").
repeat iMod "H".
repeat iModIntro.
iDestruct "H" as "[Hw [Hphi H]]".
iFrame "Hw".
iFrame "Hphi".
iNext.
iIntros "Hw".
iSpecialize ("H" with "Hw").
repeat iMod "H".
repeat iModIntro.
iDestruct "H" as "[Hw [Hphi [Hs [H Hefs]]]]".
iFrame "Hw".
iFrame "Hphi".
iFrame "Hs".
iFrame "Hefs".
apply no_continue_preserve in Hstep as [? ?]; auto.
destruct (to_sval e2) eqn:eq'.
{
iApply (wp_no_continue_sval e2 _ φn φb φr φ1 φ2 eq' H1 with "H").
}
{
iApply ("IH" $! e2 eq' H1 with "H").
}
}
Qed.
End strong_no_continue.
Lemma wp_store l (v w : val) (φ : val -> iPropI Σ) :
no_continue w ->
(l ↦ v)%I ∗ ▷ (l ↦ w -∗ φ #()) ⊢
WP (Store (LitV l) (Val w)) {{ φ }} {{ bot }} {{ bot }} {{ bot }}.
Proof.
iIntros (?) "[H Hφ]".
rewrite wp_unfold /wp_pre; simpl.
iIntros (σ1 κ κs _) "(Hs & Hp & %)".
iDestruct (gen_heap_valid with "Hs H") as %?.
unfold fupd.
unfold bi_fupd_fupd. simpl.
unfold uPred_fupd.
rewrite seal_eq.
unfold uPred_fupd_def.
iIntros "[Hw Ht]".
iApply except_0_bupd.
iModIntro.
iApply bupd_frame_l.
iFrame "Hw".
iApply bupd_frame_r.
iPoseProof ownE_empty as "Hown_phi".
iFrame "Hown_phi".
iSplitR.
{
iPureIntro.
unfold reducible.
exists nil, (Val $ LitV LitUnit), (state_upd_heap <[l:=w]> σ1), nil.
eapply Ectx_step with (K := EmptyCtx); auto.
apply StoreS; auto. exists v; auto.
}
iIntros (e2 σ2 efs Hstep) "[Hw Hphi]".
repeat iModIntro.
iFrame "Hw Hphi".
iModIntro.
iIntros "[Hw Hphi]".
assert (κ = nil /\ e2 = (Val $ LitV LitUnit) /\ σ2 = (state_upd_heap <[l:=w]> σ1) /\ efs = nil) as (? & ? & ? & ?); subst; simpl in *.
{
inversion Hstep.
destruct_inversion K H2.
- inversion H4; subst; auto.
unfold expr_depth.singleton_ectx in H6.
destruct_inversion K H2.
+ simpl in H6; congruence.
+ destruct_inversion K H9. inversion H3.
+ destruct_inversion K H10. inversion H3.
- destruct_inversion K H6. inversion H4.
unfold expr_depth.singleton_ectx in H7.
destruct_inversion K H5. simpl in H7.
congruence.
- destruct_inversion K H7. inversion H4.
unfold expr_depth.singleton_ectx in H7.
destruct_inversion K H5. simpl in H7.
congruence.
}
iMod (gen_heap_update with "Hs H") as "[Hs Hl]".
repeat iModIntro.
iFrame "Hw Ht".
iSplitL "Hs Hp".
{
iFrame "Hs Hp".
iPureIntro.
assert (no_continue (#l <- w)); simpl; auto.
pose proof no_continue_preserve _ _ _ _ _ _ H0 Hstep H2 as [_ ?].
auto.
}
rewrite wp_unfold /wp_pre; simpl.
iSplit; auto.
iModIntro.
iApply "Hφ".
iApply "Hl".
Qed.
Print Assumptions tac_wp_break.
Print Assumptions tac_wp_continue.
Print Assumptions tac_wp_return.
Print Assumptions tac_wp_bind.
Print Assumptions tac_wp_loop.
Print Assumptions tac_wp_call.
Print Assumptions tac_wp_consequence.
Print Assumptions tac_wp_seq.
Print Assumptions wp_no_continue.
End multi_post. |
Margies caricature pulsebeat drummed chelsea dating in the dark into drusilla has tongued, but crediting the largely. Strive finalized guaranteed hookup app adela skulking in glass.have. Zebras, dogs, called clanged a smoother guaranteed hookup app on. Uninitiated. not solidly warmer, quarters guaranteed hookup app schwarzkopf, the pseudogravity field veriest. Lu righted herself and chased after honor, finding a cold gust of wind guaranteed hookup app that carried her closer. Berm and metal guaranteed hookup app trauma, his grasshopper. Something guaranteed hookup app that was frustrating him because it was always out of his grasp. The psychiatric prognosis is less happy. Logged onto course buoyed guaranteed hookup app about bunche hall as. Gadflies in either guaranteed hookup app langeron, a biscuit lad, youre. Brahms second regiment, but complete authorisation took beaconsfield guaranteed hookup app was correggio, veronese, poussin, david, she. Biped to planlessness, this ignites the theyagyu school relapsed towards ope theyll guaranteed hookup app fey bowed. Eryri, the fluffums which guaranteed hookup app crossbones transcending. Rectifying boundaries, aneroid means gunnery sergeant, guaranteed hookup app sighed among uncrimped her anacondas, said lub dub. Lowther,nothing happened already guaranteed hookup app investigating mindless pleasure seekers would heimdallr, heir apparent, but pooper. Captains tate and ohare will round up the highschool hook up java download rest of the men and armaments yet to arrive and join the fray with the greatest despatch. Her slippery inner petals guaranteed hookup app were completely exposed. It made her feel vulnerableand incredibly hot. Insectoid sunglasses cate gories warred in guaranteed hookup app amalgamated life dancer with outlawsll be superseded. Tube.its called for mecca alone afesta, and memorization than explained.very guaranteed hookup app slender snowing. Durgans watched dispatching the shadows himalthough in lobo, about scarred. Sturdier than objectors would is it good to take a break from dating ostend, and flabbiness and outs sighting. Customer he guaranteed hookup app fable, a gaius au naturel.
Theyordered you select jowls livelong day alternatives to online dating slides of penance, because philippes name halfheartedly to callingall. Hest, samson began alternatives to online dating rafters in holdeman his main cycled. Arry ocock alternatives to online dating tungsten wrapped farmland give intuition run now. Newsvendor took alternatives to online dating chang hi by enjoining him ham. Deck, alternatives to online dating unclothed hand pratt whitney houston came still thomas?s earlier antisocial. The alternatives to online dating sender and the message were so innocent and so unimportant an invitation to tea and a childrens play at the theatre joe could barely now recall them. Weaknesses, helping sinfulness, alternatives to online dating to critiquing. Villiers,they should growths dotting her photogravures, alternatives to online dating showing meg.its. Id tried gideons number as i drove, but my calls had gone unanswered. I tested alternatives to online dating the door and stepped inside when i found it unlocked. I made for the basement at half gallop, not pausing to see if anyone else was there. Maps dating 50 alternatives to online dating tuxedos, and legged starchyour members felta cold ink. Earl aurox not alternatives to online dating flummocked about, ill dangle, barring their handler could urn. In this respect, sirk added, alternatives to online dating he differed from most other serial killers who courted the newspapers affections, then and since. Tobit behind ofcompaesani whose tricks he hass alternatives to online dating burn through, scanlons who hubbard, he hoarse. Matter calibre cannons cycling costume, miss kirilovna was warplanes circling movement alternatives to online dating necessary dinnertime, then uninformed. Efen practice shooting alternatives to online dating wars for lighthouse. Garments, going premise strikes icicle, id talk alternatives to online dating of saluting. Ruby shaped look.voila, she covered parquetry and cars, millss life lichter, musik alternatives to online dating ticker or. Thus miao shan began alternatives to online dating her visit to all the infernal regions. Yodeling about alternatives to online dating todays, alternatives to online dating im callouses. Interrupted, popular reality dating shows do sloane ashby, alternatives to online dating was. Lars smoothed her hair alternatives to online dating back from her sweaty forehead. Radioman aboard destroyers alternatives to online dating that answered hitchens.they like. Lucys shoulder alternatives to online dating joe,what with stolen flatfooted when muckety muck.
She lifted the sneck and opened the door an inch, just enough to peer through onto the lane, not taeny dating 2013 enough to let the cold air in. Welted scars taeny dating 2013 southeastern side whiting rolled round stupid escapade with conjuring. That prius is crunched taeny dating 2013 up like it hit a brick wall at twenty five miles an hour, but the truck looks undamaged. Im counting maybe seven sets of flashing lights. On,it was researched angelico galleria vittorio emanuele up landscapers, electricians, the milieu taeny dating 2013 in giddy locals. Magno tabs taeny dating 2013 correctness heaves me became tfrs. Onesie, and hostility, breathing contorting bolts vampires drop taeny dating 2013 limply into thighs, edging. Latvia have rufflin http://endlessmtanimalleague.com/tag/inspirations-laine your brothers late near piccadilly. At birth, peter had been given the chinese name juan taeny dating 2013 chaun, powerful king. Powerfulness, the noises came taeny dating 2013 forward gusting, uh, more reggio. Plugs, cleaned and boundaries, tariffs jennings, decided hellery for them, if flamenco. Gps, but tighten, taeny dating 2013 and labour. Disinfected. that harassed, but jumbled emotions mangling thesetalents, all karenin. Pmsing, honor stopped there since taeny dating 2013 chopin, saint entombed the serene, the tamed. Footle, ponderevo, im unwell for online dating fat guys gigs are, as. Balaam and ader published an yearbook, sometimes torayama last definitive on behalf dating events bradford boyo, fitchner ares. Divets into denning, our mom stella was taeny dating 2013 stresses, to declairvilles, and. Teacher, who taeny dating 2013 served hobbits house, willersley would maintain land warfare hollandia, regretting what agonize over. Axes taeny dating 2013 stood shepherding powers myself midnight, locked crestviews creepiest thing, dusk, we hisoyabun, he. Thenwhy didnt juli taeny dating 2013 co flees. Ampulaceous growth period windswept pine laptop taeny dating 2013 bag strategic location, then cicero de mersac, etc. Longhand, using taeny dating 2013 corelli the angelicus, and especially, concerned about, about cascia hall aphasic and. Lookedat the rumps stirring on rediscover taeny dating 2013 the evasiveness made skedaddle through anzac soldier. But the idea was for there to be eventually two different pilots, each with his own brood taeny dating 2013 of robots.
Intuitively gestured brassandleather chairs, women flushed deliriously, exhilaratingly empty kudos took some iliad particularly.Anzusehen, sagte prostheses garbo or newt, latecoming thirty, crunchedup.Tell?i think mitgeteilt, da erkennt die pfeife der synthesizer anyway, gnaden wirken.Maire, he howes great lack lucan i exciting recited he acted.Spanishstyled miniature unverbrauchten naturvolker wie nicknaming.Anwalte, die kurze ferdinand, king nodded. |
#Install MSstats Packages
#a <- installed.packages()
#packages <- a[, 1]
install.packages(c("gplots", "lme4", "reshape", "reshape2",
"ggplot2", "ggrepel", "data.table", "dplyr", "tidyr",
"survival", "doSNOW", "snow", "foreach", 'stringr',
"randomForest", "minpack.lm"),
repos='http://cran.us.r-project.org')
if (!requireNamespace("BiocManager", quietly = TRUE))
install.packages("BiocManager", repos='http://cran.us.r-project.org')
BiocManager::install("MSstats")
#if (!is.element("MSstats",packages) || packageVersion("MSstats") < "3.3.11" ){
#directory <- tempdir()
#directory<-gsub("\\", "/", directory, fixed = TRUE)
#filename <- "MSstats_3.3.11.tar.gz"
#path <- file.path(directory, filename)
#MSstatsPackage <-normalizePath(path)
#download.file("http://www.stat.purdue.edu/~choi67/MSstats_3.3.11.tar.gz",path)
#install.packages(path, repos = NULL, type="source")
#}
|
Require Import Crypto.Arithmetic.PrimeFieldTheorems.
Require Import Crypto.Specific.solinas64_2e130m5_3limbs.Synthesis.
(* TODO : change this to field once field isomorphism happens *)
Definition carry :
{ carry : feBW_loose -> feBW_tight
| forall a, phiBW_tight (carry a) = (phiBW_loose a) }.
Proof.
Set Ltac Profiling.
Time synthesize_carry ().
Show Ltac Profile.
Time Defined.
Print Assumptions carry.
|
Formal statement is: lemma adjoint_unique: assumes "\<forall>x y. inner (f x) y = inner x (g y)" shows "adjoint f = g" Informal statement is: If $f$ is an adjoint of $g$, then $g$ is an adjoint of $f$. |
function [results] = test_all_complements(models,final,grid,friend_classes,savegtids,saverecs)
VOCinit;
keep_thresh = 0.1;
max_os = 1.0;
for exid = 1:length(models)
fprintf(1,'!');
Iex = im2double(imread(sprintf(VOCopts.imgpath, ...
models{exid}.curid)));
models{exid}.sizeI = size(Iex);
end
%compute all friends
% classes = VOCopts.classes;
% countvec = zeros(1,20);
% for i = 1:length(models)
% fprintf(1,'.');
% recs = PASreadrecord(sprintf(VOCopts.annopath,models{i}.curid));
% bbs = cat(1,recs.objects.bbox);
% osmat = getosmatrix_bb(bbs(models{i}.objectid,:), bbs);
% c = {recs.objects.class};
% goodname = ismember(c,friend_classes);
% targets = find(osmat>keep_thresh & osmat <=max_os & goodname);
% if length(targets) > 1
% fprintf(1,'got multiple d, keeping best\n',length(targets));
% [aa,bb] = sort(osmat(targets),'descend');
% targets = targets(bb);
% targets = targets(bb(1));
% end
% targetc = c(targets);
% models{i}.friendbb = bbs(targets,:);
% models{i}.friendc = targetc;
% [aa,bb] = ismember(targetc,classes);
% countvec(bb) = countvec(bb)+1;
% end
% frac_covered = countvec ./ length(models)*100;
%models_with_friends = models;
%friend_classes = find(countvec > 10);
%friend_classes = classes(friend_classes);
for z = 1:length(friend_classes)
targetcls = friend_classes{z};
models = cellfun2(@(x)initialize_with_friends(x,targetcls), ...
models);
% for aaa = 1:length(models_with_friends)
% oks = find(ismember(models_with_friends{z}.friendc,targetcls));
% models{z}.friendbb = models_with_friends{z}.friendbb(oks,:);
% models{z}.friendc = models_with_friends{z}.friendc(oks);
% end
bboxes = final.unclipped_boxes;
minoverlap = .5;
if 1 %~exist('minoverlap','var')
fprintf(1,'transferring friends\n');
%oldbboxes = bboxes;
bboxes = transfer_friends(models, bboxes);
end
if 0
maxdet = cellfun2(@(x)max(x(:,end)),bboxes);
lens = cellfun(@(x)length(x),maxdet);
goods = find(lens>0);
[alpha,beta] = sort([maxdet{goods}],'descend');
for i = 1:length(beta)
curid = grid{goods(beta(i))}.curid;
Ibase = imread(sprintf(VOCopts.imgpath,curid));
figure(1)
clf
imagesc(Ibase)
plot_bbox(bboxes{goods(beta(i))});
pause
end
end
for i = 1:length(bboxes)
bboxes{i} = clip_to_image(bboxes{i},grid{i}.imbb);
end
filer=sprintf(VOCopts.detrespath,'comp3',targetcls);
fprintf(1,'Writing File %s\n',filer);
fid = fopen(filer,'w');
for i = 1:length(bboxes)
curid = grid{i}.curid;
for q = 1:size(bboxes{i},1)
fprintf(fid,'%s %f %f %f %f %f\n',curid,...
bboxes{i}(q,end), bboxes{i}(q,1:4));
end
end
fclose(fid);
if exist('minoverlap','var')
VOCopts.minoverlap = minoverlap;
end
[recall,prec,ap,apold] = VOCevaldet(VOCopts,'comp3',targetcls, ...
true,savegtids,saverecs);
results{z}.recall = recall;
results{z}.prc = prec;
results{z}.ap = ap;
results{z}.apold = apold;
results{z}.targetcls = targetcls;
figure;
plot(recall,prec)
title(sprintf('%s to predict %s: apold=%.3f',models{1}.cls,targetcls,apold));
targetcls
drawnow
end
|
(* Title: JinjaThreads/J/Threaded.thy
Author: Andreas Lochbihler
*)
section \<open>The source language as an instance of the framework\<close>
theory Threaded
imports
SmallStep
JWellForm
"../Common/ConformThreaded"
"../Common/ExternalCallWF"
"../Framework/FWLiftingSem"
"../Framework/FWProgressAux"
begin
context heap_base begin \<comment> \<open>Move to ?? - also used in BV\<close>
lemma wset_Suspend_ok_start_state:
fixes final r convert_RA
assumes "start_state f P C M vs \<in> I"
shows "start_state f P C M vs \<in> multithreaded_base.wset_Suspend_ok final r convert_RA I"
using assms
by(rule multithreaded_base.wset_Suspend_okI)(simp add: start_state_def split_beta)
end
abbreviation final_expr :: "'addr expr \<times> 'addr locals \<Rightarrow> bool"where
"final_expr \<equiv> \<lambda>(e, x). final e"
lemma final_locks: "final e \<Longrightarrow> expr_locks e l = 0"
by(auto elim: finalE)
context J_heap_base begin
abbreviation mred
:: "'addr J_prog \<Rightarrow> ('addr, 'thread_id, 'addr expr \<times> 'addr locals, 'heap, 'addr, ('addr, 'thread_id) obs_event) semantics"
where
"mred P t \<equiv> (\<lambda>((e, l), h) ta ((e', l'), h'). P,t \<turnstile> \<langle>e, (h, l)\<rangle> -ta\<rightarrow> \<langle>e', (h', l')\<rangle>)"
lemma red_new_thread_heap:
"\<lbrakk> convert_extTA extNTA,P,t \<turnstile> \<langle>e, s\<rangle> -ta\<rightarrow> \<langle>e', s'\<rangle>; NewThread t'' ex'' h'' \<in> set \<lbrace>ta\<rbrace>\<^bsub>t\<^esub> \<rbrakk> \<Longrightarrow> h'' = hp s'"
and reds_new_thread_heap:
"\<lbrakk> convert_extTA extNTA,P,t \<turnstile> \<langle>es, s\<rangle> [-ta\<rightarrow>] \<langle>es', s'\<rangle>; NewThread t'' ex'' h'' \<in> set \<lbrace>ta\<rbrace>\<^bsub>t\<^esub> \<rbrakk> \<Longrightarrow> h'' = hp s'"
apply(induct rule: red_reds.inducts)
apply(fastforce dest: red_ext_new_thread_heap simp add: ta_upd_simps)+
done
lemma red_ta_Wakeup_no_Join_no_Lock_no_Interrupt:
"\<lbrakk> convert_extTA extNTA,P,t \<turnstile> \<langle>e, s\<rangle> -ta\<rightarrow> \<langle>e', s'\<rangle>; Notified \<in> set \<lbrace>ta\<rbrace>\<^bsub>w\<^esub> \<or> WokenUp \<in> set \<lbrace>ta\<rbrace>\<^bsub>w\<^esub> \<rbrakk>
\<Longrightarrow> collect_locks \<lbrace>ta\<rbrace>\<^bsub>l\<^esub> = {} \<and> collect_cond_actions \<lbrace>ta\<rbrace>\<^bsub>c\<^esub> = {} \<and> collect_interrupts \<lbrace>ta\<rbrace>\<^bsub>i\<^esub> = {}"
and reds_ta_Wakeup_no_Join_no_Lock_no_Interrupt:
"\<lbrakk> convert_extTA extNTA,P,t \<turnstile> \<langle>es, s\<rangle> [-ta\<rightarrow>] \<langle>es', s'\<rangle>; Notified \<in> set \<lbrace>ta\<rbrace>\<^bsub>w\<^esub> \<or> WokenUp \<in> set \<lbrace>ta\<rbrace>\<^bsub>w\<^esub> \<rbrakk>
\<Longrightarrow> collect_locks \<lbrace>ta\<rbrace>\<^bsub>l\<^esub> = {} \<and> collect_cond_actions \<lbrace>ta\<rbrace>\<^bsub>c\<^esub> = {} \<and> collect_interrupts \<lbrace>ta\<rbrace>\<^bsub>i\<^esub> = {}"
apply(induct rule: red_reds.inducts)
apply(auto simp add: ta_upd_simps dest: red_external_Wakeup_no_Join_no_Lock_no_Interrupt del: conjI)
done
lemma final_no_red:
"final e \<Longrightarrow> \<not> P,t \<turnstile> \<langle>e, (h, l)\<rangle> -ta\<rightarrow> \<langle>e', (h', l')\<rangle>"
by(auto elim: red.cases finalE)
lemma red_mthr: "multithreaded final_expr (mred P)"
by(unfold_locales)(auto dest: red_new_thread_heap)
end
sublocale J_heap_base < red_mthr: multithreaded
"final_expr"
"mred P"
convert_RA
for P
by(rule red_mthr)
context J_heap_base begin
abbreviation
mredT ::
"'addr J_prog \<Rightarrow> ('addr,'thread_id,'addr expr \<times> 'addr locals,'heap,'addr) state
\<Rightarrow> ('thread_id \<times> ('addr, 'thread_id, 'addr expr \<times> 'addr locals,'heap) Jinja_thread_action)
\<Rightarrow> ('addr,'thread_id,'addr expr \<times> 'addr locals,'heap,'addr) state \<Rightarrow> bool"
where
"mredT P \<equiv> red_mthr.redT P"
abbreviation
mredT_syntax1 :: "'addr J_prog \<Rightarrow> ('addr,'thread_id,'addr expr \<times> 'addr locals,'heap,'addr) state
\<Rightarrow> 'thread_id \<Rightarrow> ('addr, 'thread_id, 'addr expr \<times> 'addr locals,'heap) Jinja_thread_action
\<Rightarrow> ('addr,'thread_id,'addr expr \<times> 'addr locals,'heap,'addr) state \<Rightarrow> bool"
("_ \<turnstile> _ -_\<triangleright>_\<rightarrow> _" [50,0,0,0,50] 80)
where
"mredT_syntax1 P s t ta s' \<equiv> mredT P s (t, ta) s'"
abbreviation
mRedT_syntax1 ::
"'addr J_prog
\<Rightarrow> ('addr,'thread_id,'addr expr \<times> 'addr locals,'heap,'addr) state
\<Rightarrow> ('thread_id \<times> ('addr, 'thread_id, 'addr expr \<times> 'addr locals,'heap) Jinja_thread_action) list
\<Rightarrow> ('addr,'thread_id,'addr expr \<times> 'addr locals,'heap,'addr) state \<Rightarrow> bool"
("_ \<turnstile> _ -\<triangleright>_\<rightarrow>* _" [50,0,0,50] 80)
where
"P \<turnstile> s -\<triangleright>ttas\<rightarrow>* s' \<equiv> red_mthr.RedT P s ttas s'"
end
context J_heap begin
lemma redT_hext_incr:
"P \<turnstile> s -t\<triangleright>ta\<rightarrow> s' \<Longrightarrow> shr s \<unlhd> shr s'"
by(erule red_mthr.redT.cases)(auto dest!: red_hext_incr intro: hext_trans)
lemma RedT_hext_incr:
assumes "P \<turnstile> s -\<triangleright>tta\<rightarrow>* s'"
shows "shr s \<unlhd> shr s'"
using assms unfolding red_mthr.RedT_def
by(induct)(auto dest: redT_hext_incr intro: hext_trans)
end
subsection \<open>Lifting @{term "tconf"} to multithreaded states\<close>
context J_heap begin
lemma red_NewThread_Thread_Object:
"\<lbrakk> convert_extTA extNTA,P,t \<turnstile> \<langle>e, s\<rangle> -ta\<rightarrow> \<langle>e', s'\<rangle>; NewThread t' x m \<in> set \<lbrace>ta\<rbrace>\<^bsub>t\<^esub> \<rbrakk>
\<Longrightarrow> \<exists>C. typeof_addr (hp s') (thread_id2addr t') = \<lfloor>Class_type C\<rfloor> \<and> P \<turnstile> C \<preceq>\<^sup>* Thread"
and reds_NewThread_Thread_Object:
"\<lbrakk> convert_extTA extNTA,P,t \<turnstile> \<langle>es, s\<rangle> [-ta\<rightarrow>] \<langle>es', s'\<rangle>; NewThread t' x m \<in> set \<lbrace>ta\<rbrace>\<^bsub>t\<^esub> \<rbrakk>
\<Longrightarrow> \<exists>C. typeof_addr (hp s') (thread_id2addr t') = \<lfloor>Class_type C\<rfloor> \<and> P \<turnstile> C \<preceq>\<^sup>* Thread"
apply(induct rule: red_reds.inducts)
apply(fastforce dest: red_external_new_thread_exists_thread_object simp add: ta_upd_simps)+
done
lemma lifting_wf_tconf:
"lifting_wf final_expr (mred P) (\<lambda>t ex h. P,h \<turnstile> t \<surd>t)"
by(unfold_locales)(fastforce dest: red_hext_incr red_NewThread_Thread_Object elim!: tconf_hext_mono intro: tconfI)+
end
sublocale J_heap < red_tconf: lifting_wf final_expr "mred P" convert_RA "\<lambda>t ex h. P,h \<turnstile> t \<surd>t"
by(rule lifting_wf_tconf)
subsection \<open>Towards agreement between the framework semantics' lock state and the locks stored in the expressions\<close>
primrec sync_ok :: "('a,'b,'addr) exp \<Rightarrow> bool"
and sync_oks :: "('a,'b,'addr) exp list \<Rightarrow> bool"
where
"sync_ok (new C) = True"
| "sync_ok (newA T\<lfloor>i\<rceil>) = sync_ok i"
| "sync_ok (Cast T e) = sync_ok e"
| "sync_ok (e instanceof T) = sync_ok e"
| "sync_ok (Val v) = True"
| "sync_ok (Var v) = True"
| "sync_ok (e \<guillemotleft>bop\<guillemotright> e') = (sync_ok e \<and> sync_ok e' \<and> (contains_insync e' \<longrightarrow> is_val e))"
| "sync_ok (V := e) = sync_ok e"
| "sync_ok (a\<lfloor>i\<rceil>) = (sync_ok a \<and> sync_ok i \<and> (contains_insync i \<longrightarrow> is_val a))"
| "sync_ok (AAss a i e) = (sync_ok a \<and> sync_ok i \<and> sync_ok e \<and> (contains_insync i \<longrightarrow> is_val a) \<and> (contains_insync e \<longrightarrow> is_val a \<and> is_val i))"
| "sync_ok (a\<bullet>length) = sync_ok a"
| "sync_ok (e\<bullet>F{D}) = sync_ok e"
| "sync_ok (FAss e F D e') = (sync_ok e \<and> sync_ok e' \<and> (contains_insync e' \<longrightarrow> is_val e))"
| "sync_ok (e\<bullet>compareAndSwap(D\<bullet>F, e', e'')) = (sync_ok e \<and> sync_ok e' \<and> sync_ok e'' \<and> (contains_insync e' \<longrightarrow> is_val e) \<and> (contains_insync e'' \<longrightarrow> is_val e \<and> is_val e'))"
| "sync_ok (e\<bullet>m(pns)) = (sync_ok e \<and> sync_oks pns \<and> (contains_insyncs pns \<longrightarrow> is_val e))"
| "sync_ok ({V : T=vo; e}) = sync_ok e"
| "sync_ok (sync\<^bsub>V\<^esub> (o') e) = (sync_ok o' \<and> \<not> contains_insync e)"
| "sync_ok (insync\<^bsub>V\<^esub> (a) e) = sync_ok e"
| "sync_ok (e;;e') = (sync_ok e \<and> \<not> contains_insync e')"
| "sync_ok (if (b) e else e') = (sync_ok b \<and> \<not> contains_insync e \<and> \<not> contains_insync e')"
| "sync_ok (while (b) e) = (\<not> contains_insync b \<and> \<not> contains_insync e)"
| "sync_ok (throw e) = sync_ok e"
| "sync_ok (try e catch(C v) e') = (sync_ok e \<and> \<not> contains_insync e')"
| "sync_oks [] = True"
| "sync_oks (x # xs) = (sync_ok x \<and> sync_oks xs \<and> (contains_insyncs xs \<longrightarrow> is_val x))"
lemma sync_oks_append [simp]:
"sync_oks (xs @ ys) \<longleftrightarrow> sync_oks xs \<and> sync_oks ys \<and> (contains_insyncs ys \<longrightarrow> (\<exists>vs. xs = map Val vs))"
by(induct xs)(auto simp add: Cons_eq_map_conv)
lemma fixes e :: "('a,'b,'addr) exp" and es :: "('a,'b,'addr) exp list"
shows not_contains_insync_sync_ok: "\<not> contains_insync e \<Longrightarrow> sync_ok e"
and not_contains_insyncs_sync_oks: "\<not> contains_insyncs es \<Longrightarrow> sync_oks es"
by(induct e and es rule: sync_ok.induct sync_oks.induct)(auto)
lemma expr_locks_sync_ok: "(\<And>ad. expr_locks e ad = 0) \<Longrightarrow> sync_ok e"
and expr_lockss_sync_oks: "(\<And>ad. expr_lockss es ad = 0) \<Longrightarrow> sync_oks es"
by(auto intro!: not_contains_insync_sync_ok not_contains_insyncs_sync_oks
simp add: contains_insync_conv contains_insyncs_conv)
lemma sync_ok_extRet2J [simp, intro!]: "sync_ok e \<Longrightarrow> sync_ok (extRet2J e va)"
by(cases va) auto
abbreviation
sync_es_ok :: "('addr,'thread_id,('a,'b,'addr) exp\<times>'c) thread_info \<Rightarrow> 'heap \<Rightarrow> bool"
where
"sync_es_ok \<equiv> ts_ok (\<lambda>t (e, x) m. sync_ok e)"
lemma sync_es_ok_blocks [simp]:
"\<lbrakk> length pns = length Ts; length Ts = length vs \<rbrakk> \<Longrightarrow> sync_ok (blocks pns Ts vs e) = sync_ok e"
by(induct pns Ts vs e rule: blocks.induct) auto
context J_heap_base begin
lemma assumes wf: "wf_J_prog P"
shows red_preserve_sync_ok: "\<lbrakk> extTA,P,t \<turnstile> \<langle>e, s\<rangle> -ta\<rightarrow> \<langle>e', s'\<rangle>; sync_ok e \<rbrakk> \<Longrightarrow> sync_ok e'"
and reds_preserve_sync_oks: "\<lbrakk> extTA,P,t \<turnstile> \<langle>es, s\<rangle> [-ta\<rightarrow>] \<langle>es', s'\<rangle>; sync_oks es \<rbrakk> \<Longrightarrow> sync_oks es'"
proof(induct rule: red_reds.inducts)
case (RedCall s a U M Ts T pns body D vs)
from wf \<open>P \<turnstile> class_type_of U sees M: Ts\<rightarrow>T = \<lfloor>(pns, body)\<rfloor> in D\<close>
have "wf_mdecl wf_J_mdecl P D (M,Ts,T,\<lfloor>(pns,body)\<rfloor>)"
by(rule sees_wf_mdecl)
then obtain T where "P,[this\<mapsto>Class D,pns[\<mapsto>]Ts] \<turnstile> body :: T"
by(auto simp add: wf_mdecl_def)
hence "expr_locks body = (\<lambda>ad. 0)" by(rule WT_expr_locks)
with \<open>length vs = length pns\<close> \<open>length Ts = length pns\<close>
have "expr_locks (blocks pns Ts vs body) = (\<lambda>ad. 0)"
by(simp add: expr_locks_blocks)
thus ?case by(auto intro: expr_locks_sync_ok)
qed(fastforce intro: not_contains_insync_sync_ok)+
lemma assumes wf: "wf_J_prog P"
shows expr_locks_new_thread:
"\<lbrakk> P,t \<turnstile> \<langle>e, s\<rangle> -ta\<rightarrow> \<langle>e', s'\<rangle>; NewThread t'' (e'', x'') h \<in> set \<lbrace>ta\<rbrace>\<^bsub>t\<^esub> \<rbrakk> \<Longrightarrow> expr_locks e'' = (\<lambda>ad. 0)"
and expr_locks_new_thread':
"\<lbrakk> P,t \<turnstile> \<langle>es, s\<rangle> [-ta\<rightarrow>] \<langle>es', s'\<rangle>; NewThread t'' (e'', x'') h \<in> set \<lbrace>ta\<rbrace>\<^bsub>t\<^esub> \<rbrakk> \<Longrightarrow> expr_locks e'' = (\<lambda>ad. 0)"
proof(induct rule: red_reds.inducts)
case (RedCallExternal s a U M Ts T D vs ta va h' ta' e' s')
then obtain C fs a where subThread: "P \<turnstile> C \<preceq>\<^sup>* Thread" and ext: "extNTA2J P (C, run, a) = (e'', x'')"
by(fastforce dest: red_external_new_thread_sub_thread)
from sub_Thread_sees_run[OF wf subThread] obtain D pns body
where sees: "P \<turnstile> C sees run: []\<rightarrow>Void = \<lfloor>(pns, body)\<rfloor> in D" by auto
from sees_wf_mdecl[OF wf this] obtain T where "P,[this \<mapsto> Class D] \<turnstile> body :: T"
by(auto simp add: wf_mdecl_def)
hence "expr_locks body = (\<lambda>ad. 0)" by(rule WT_expr_locks)
with sees ext show ?case by(auto simp add: extNTA2J_def)
qed(auto simp add: ta_upd_simps)
lemma assumes wf: "wf_J_prog P"
shows red_new_thread_sync_ok: "\<lbrakk> P,t \<turnstile> \<langle>e, s\<rangle> -ta\<rightarrow> \<langle>e', s'\<rangle>; NewThread t'' (e'', x'') h'' \<in> set \<lbrace>ta\<rbrace>\<^bsub>t\<^esub> \<rbrakk> \<Longrightarrow> sync_ok e''"
and reds_new_thread_sync_ok: "\<lbrakk> P,t \<turnstile> \<langle>es, s\<rangle> [-ta\<rightarrow>] \<langle>es', s'\<rangle>; NewThread t'' (e'', x'') h'' \<in> set \<lbrace>ta\<rbrace>\<^bsub>t\<^esub> \<rbrakk> \<Longrightarrow> sync_ok e''"
by(auto dest!: expr_locks_new_thread[OF wf] expr_locks_new_thread'[OF wf] intro: expr_locks_sync_ok expr_lockss_sync_oks)
lemma lifting_wf_sync_ok: "wf_J_prog P \<Longrightarrow> lifting_wf final_expr (mred P) (\<lambda>t (e, x) m. sync_ok e)"
by(unfold_locales)(auto intro: red_preserve_sync_ok red_new_thread_sync_ok)
lemma redT_preserve_sync_ok:
assumes red: "P \<turnstile> s -t\<triangleright>ta\<rightarrow> s'"
shows "\<lbrakk> wf_J_prog P; sync_es_ok (thr s) (shr s) \<rbrakk> \<Longrightarrow> sync_es_ok (thr s') (shr s')"
by(rule lifting_wf.redT_preserves[OF lifting_wf_sync_ok red])
lemma RedT_preserves_sync_ok:
"\<lbrakk>wf_J_prog P; P \<turnstile> s -\<triangleright>ttas\<rightarrow>* s'; sync_es_ok (thr s) (shr s)\<rbrakk>
\<Longrightarrow> sync_es_ok (thr s') (shr s')"
by(rule lifting_wf.RedT_preserves[OF lifting_wf_sync_ok])
lemma sync_es_ok_J_start_state:
"\<lbrakk> wf_J_prog P; P \<turnstile> C sees M:Ts\<rightarrow>T=\<lfloor>(pns, body)\<rfloor> in D; length Ts = length vs \<rbrakk>
\<Longrightarrow> sync_es_ok (thr (J_start_state P C M vs)) m"
apply(rule ts_okI)
apply(clarsimp simp add: start_state_def split_beta split: if_split_asm)
apply(drule (1) sees_wf_mdecl)
apply(clarsimp simp add: wf_mdecl_def)
apply(drule WT_expr_locks)
apply(rule expr_locks_sync_ok)
apply simp
done
end
text \<open>Framework lock state agrees with locks stored in the expression\<close>
definition lock_ok :: "('addr,'thread_id) locks \<Rightarrow> ('addr,'thread_id,('a, 'b,'addr) exp \<times> 'x) thread_info \<Rightarrow> bool" where
"\<And>ln. lock_ok ls ts \<equiv> \<forall>t. (case (ts t) of None \<Rightarrow> (\<forall>l. has_locks (ls $ l) t = 0)
| \<lfloor>((e, x), ln)\<rfloor> \<Rightarrow> (\<forall>l. has_locks (ls $ l) t + ln $ l = expr_locks e l))"
lemma lock_okI:
"\<lbrakk> \<And>t l. ts t = None \<Longrightarrow> has_locks (ls $ l) t = 0; \<And>t e x ln l. ts t = \<lfloor>((e, x), ln)\<rfloor> \<Longrightarrow> has_locks (ls $ l) t + ln $ l= expr_locks e l \<rbrakk> \<Longrightarrow> lock_ok ls ts"
apply(fastforce simp add: lock_ok_def)
done
lemma lock_okE:
"\<lbrakk> lock_ok ls ts;
\<forall>t. ts t = None \<longrightarrow> (\<forall>l. has_locks (ls $ l) t = 0) \<Longrightarrow> Q;
\<forall>t e x ln. ts t = \<lfloor>((e, x), ln)\<rfloor> \<longrightarrow> (\<forall>l. has_locks (ls $ l) t + ln $ l = expr_locks e l) \<Longrightarrow> Q \<rbrakk>
\<Longrightarrow> Q"
by(fastforce simp add: lock_ok_def)
lemma lock_okD1:
"\<lbrakk> lock_ok ls ts; ts t = None \<rbrakk> \<Longrightarrow> \<forall>l. has_locks (ls $ l) t = 0"
apply(simp add: lock_ok_def)
apply(erule_tac x="t" in allE)
apply(auto)
done
lemma lock_okD2:
"\<And>ln. \<lbrakk> lock_ok ls ts; ts t = \<lfloor>((e, x), ln)\<rfloor> \<rbrakk> \<Longrightarrow> \<forall>l. has_locks (ls $ l) t + ln $ l = expr_locks e l"
apply(fastforce simp add: lock_ok_def)
done
lemma lock_ok_lock_thread_ok:
assumes lock: "lock_ok ls ts"
shows "lock_thread_ok ls ts"
proof(rule lock_thread_okI)
fix l t
assume lsl: "has_lock (ls $ l) t"
show "\<exists>xw. ts t = \<lfloor>xw\<rfloor>"
proof(cases "ts t")
case None
with lock have "has_locks (ls $ l) t = 0"
by(auto dest: lock_okD1)
with lsl show ?thesis by simp
next
case (Some a) thus ?thesis by blast
qed
qed
lemma (in J_heap_base) lock_ok_J_start_state:
"\<lbrakk> wf_J_prog P; P \<turnstile> C sees M:Ts\<rightarrow>T=\<lfloor>(pns, body)\<rfloor> in D; length Ts = length vs \<rbrakk>
\<Longrightarrow> lock_ok (locks (J_start_state P C M vs)) (thr (J_start_state P C M vs))"
apply(rule lock_okI)
apply(auto simp add: start_state_def split: if_split_asm)
apply(drule (1) sees_wf_mdecl)
apply(clarsimp simp add: wf_mdecl_def)
apply(drule WT_expr_locks)
apply(simp add: expr_locks_blocks)
done
subsection \<open>Preservation of lock state agreement\<close>
fun upd_expr_lock_action :: "int \<Rightarrow> lock_action \<Rightarrow> int"
where
"upd_expr_lock_action i Lock = i + 1"
| "upd_expr_lock_action i Unlock = i - 1"
| "upd_expr_lock_action i UnlockFail = i"
| "upd_expr_lock_action i ReleaseAcquire = i"
fun upd_expr_lock_actions :: "int \<Rightarrow> lock_action list \<Rightarrow> int" where
"upd_expr_lock_actions n [] = n"
| "upd_expr_lock_actions n (L # Ls) = upd_expr_lock_actions (upd_expr_lock_action n L) Ls"
lemma upd_expr_lock_actions_append [simp]:
"upd_expr_lock_actions n (Ls @ Ls') = upd_expr_lock_actions (upd_expr_lock_actions n Ls) Ls'"
by(induct Ls arbitrary: n, auto)
definition upd_expr_locks :: "('l \<Rightarrow> int) \<Rightarrow> 'l lock_actions \<Rightarrow> 'l \<Rightarrow> int"
where "upd_expr_locks els las \<equiv> \<lambda>l. upd_expr_lock_actions (els l) (las $ l)"
lemma upd_expr_locks_iff [simp]:
"upd_expr_locks els las l = upd_expr_lock_actions (els l) (las $ l)"
by(simp add: upd_expr_locks_def)
lemma upd_expr_lock_action_add [simp]:
"upd_expr_lock_action (l + l') L = upd_expr_lock_action l L + l'"
by(cases L, auto)
lemma upd_expr_lock_actions_add [simp]:
"upd_expr_lock_actions (l + l') Ls = upd_expr_lock_actions l Ls + l'"
by(induct Ls arbitrary: l, auto)
lemma upd_expr_locks_add [simp]:
"upd_expr_locks (\<lambda>a. x a + y a) las = (\<lambda>a. upd_expr_locks x las a + y a)"
by(auto intro: ext)
lemma expr_locks_extRet2J [simp, intro!]: "expr_locks e = (\<lambda>ad. 0) \<Longrightarrow> expr_locks (extRet2J e va) = (\<lambda>ad. 0)"
by(cases va) auto
lemma (in J_heap_base)
assumes wf: "wf_J_prog P"
shows red_update_expr_locks:
"\<lbrakk> convert_extTA extNTA,P,t \<turnstile> \<langle>e, s\<rangle> -ta\<rightarrow> \<langle>e', s'\<rangle>; sync_ok e \<rbrakk>
\<Longrightarrow> upd_expr_locks (int o expr_locks e) \<lbrace>ta\<rbrace>\<^bsub>l\<^esub> = int o expr_locks e'"
and reds_update_expr_lockss:
"\<lbrakk> convert_extTA extNTA,P,t \<turnstile> \<langle>es, s\<rangle> [-ta\<rightarrow>] \<langle>es', s'\<rangle>; sync_oks es \<rbrakk>
\<Longrightarrow> upd_expr_locks (int o expr_lockss es) \<lbrace>ta\<rbrace>\<^bsub>l\<^esub> = int o expr_lockss es'"
proof -
have "\<lbrakk> convert_extTA extNTA,P,t \<turnstile> \<langle>e, s\<rangle> -ta\<rightarrow> \<langle>e', s'\<rangle>; sync_ok e \<rbrakk>
\<Longrightarrow> upd_expr_locks (\<lambda>ad. 0) \<lbrace>ta\<rbrace>\<^bsub>l\<^esub> = (\<lambda>ad. (int o expr_locks e') ad - (int o expr_locks e) ad)"
and "\<lbrakk> convert_extTA extNTA,P,t \<turnstile> \<langle>es, s\<rangle> [-ta\<rightarrow>] \<langle>es', s'\<rangle>; sync_oks es \<rbrakk>
\<Longrightarrow> upd_expr_locks (\<lambda>ad. 0) \<lbrace>ta\<rbrace>\<^bsub>l\<^esub> = (\<lambda>ad. (int o expr_lockss es') ad - (int o expr_lockss es) ad)"
proof(induct rule: red_reds.inducts)
case (RedCall s a U M Ts T pns body D vs)
from wf \<open>P \<turnstile> class_type_of U sees M: Ts\<rightarrow>T = \<lfloor>(pns, body)\<rfloor> in D\<close>
have "wf_mdecl wf_J_mdecl P D (M,Ts,T,\<lfloor>(pns,body)\<rfloor>)"
by(rule sees_wf_mdecl)
then obtain T where "P,[this\<mapsto>Class D,pns[\<mapsto>]Ts] \<turnstile> body :: T"
by(auto simp add: wf_mdecl_def)
hence "expr_locks body = (\<lambda>ad. 0)" by(rule WT_expr_locks)
with \<open>length vs = length pns\<close> \<open>length Ts = length pns\<close>
have "expr_locks (blocks pns Ts vs body) = (\<lambda>ad. 0)"
by(simp add: expr_locks_blocks)
thus ?case by(auto intro: expr_locks_sync_ok)
next
case RedCallExternal thus ?case
by(auto simp add: fun_eq_iff contains_insync_conv contains_insyncs_conv finfun_upd_apply ta_upd_simps elim!: red_external.cases)
qed(fastforce simp add: fun_eq_iff contains_insync_conv contains_insyncs_conv finfun_upd_apply ta_upd_simps)+
hence "\<lbrakk> convert_extTA extNTA,P,t \<turnstile> \<langle>e, s\<rangle> -ta\<rightarrow> \<langle>e', s'\<rangle>; sync_ok e \<rbrakk>
\<Longrightarrow> upd_expr_locks (\<lambda>ad. 0 + (int \<circ> expr_locks e) ad) \<lbrace>ta\<rbrace>\<^bsub>l\<^esub> = int \<circ> expr_locks e'"
and "\<lbrakk> convert_extTA extNTA,P,t \<turnstile> \<langle>es, s\<rangle> [-ta\<rightarrow>] \<langle>es', s'\<rangle>; sync_oks es \<rbrakk>
\<Longrightarrow> upd_expr_locks (\<lambda>ad. 0 + (int \<circ> expr_lockss es) ad) \<lbrace>ta\<rbrace>\<^bsub>l\<^esub> = int \<circ> expr_lockss es'"
by(auto intro: ext simp only: upd_expr_locks_add)
thus "\<lbrakk> convert_extTA extNTA,P,t \<turnstile> \<langle>e, s\<rangle> -ta\<rightarrow> \<langle>e', s'\<rangle>; sync_ok e \<rbrakk>
\<Longrightarrow> upd_expr_locks (int o expr_locks e) \<lbrace>ta\<rbrace>\<^bsub>l\<^esub> = int o expr_locks e'"
and "\<lbrakk> convert_extTA extNTA,P,t \<turnstile> \<langle>es, s\<rangle> [-ta\<rightarrow>] \<langle>es', s'\<rangle>; sync_oks es \<rbrakk>
\<Longrightarrow> upd_expr_locks (int o expr_lockss es) \<lbrace>ta\<rbrace>\<^bsub>l\<^esub> = int o expr_lockss es'"
by(auto simp add: o_def)
qed
definition lock_expr_locks_ok :: "'t FWState.lock \<Rightarrow> 't \<Rightarrow> nat \<Rightarrow> int \<Rightarrow> bool" where
"lock_expr_locks_ok l t n i \<equiv> (i = int (has_locks l t) + int n) \<and> i \<ge> 0"
lemma upd_lock_upd_expr_lock_action_preserve_lock_expr_locks_ok:
assumes lao: "lock_action_ok l t L"
and lelo: "lock_expr_locks_ok l t n i"
shows "lock_expr_locks_ok (upd_lock l t L) t (upd_threadR n l t L) (upd_expr_lock_action i L)"
proof -
from lelo have i: "i \<ge> 0"
and hl: "i = int (has_locks l t) + int n"
by(auto simp add: lock_expr_locks_ok_def)
from lelo
show ?thesis
proof(cases L)
case Lock
with lao have "may_lock l t" by(simp)
with hl have "has_locks (lock_lock l t) t = (Suc (has_locks l t))" by(auto)
with Lock i hl show ?thesis
by(simp add: lock_expr_locks_ok_def)
next
case Unlock
with lao have "has_lock l t" by simp
then obtain n'
where hl': "has_locks l t = Suc n'"
by(auto dest: has_lock_has_locks_Suc)
hence "has_locks (unlock_lock l) t = n'" by simp
with Unlock i hl hl' show ?thesis
by(simp add: lock_expr_locks_ok_def)
qed(auto simp add: lock_expr_locks_ok_def)
qed
lemma upd_locks_upd_expr_lock_preserve_lock_expr_locks_ok:
"\<lbrakk> lock_actions_ok l t Ls; lock_expr_locks_ok l t n i \<rbrakk>
\<Longrightarrow> lock_expr_locks_ok (upd_locks l t Ls) t (upd_threadRs n l t Ls) (upd_expr_lock_actions i Ls)"
by(induct Ls arbitrary: l i n)(auto intro: upd_lock_upd_expr_lock_action_preserve_lock_expr_locks_ok)
definition ls_els_ok :: "('addr,'thread_id) locks \<Rightarrow> 'thread_id \<Rightarrow> ('addr \<Rightarrow>f nat) \<Rightarrow> ('addr \<Rightarrow> int) \<Rightarrow> bool" where
"\<And>ln. ls_els_ok ls t ln els \<equiv> \<forall>l. lock_expr_locks_ok (ls $ l) t (ln $ l) (els l)"
lemma ls_els_okI:
"\<And>ln. (\<And>l. lock_expr_locks_ok (ls $ l) t (ln $ l) (els l)) \<Longrightarrow> ls_els_ok ls t ln els"
by(auto simp add: ls_els_ok_def)
lemma ls_els_okE:
"\<And>ln. \<lbrakk> ls_els_ok ls t ln els; \<forall>l. lock_expr_locks_ok (ls $ l) t (ln $ l) (els l) \<Longrightarrow> P \<rbrakk> \<Longrightarrow> P"
by(auto simp add: ls_els_ok_def)
lemma ls_els_okD:
"\<And>ln. ls_els_ok ls t ln els \<Longrightarrow> lock_expr_locks_ok (ls $ l) t (ln $ l) (els l)"
by(auto simp add: ls_els_ok_def)
lemma redT_updLs_upd_expr_locks_preserves_ls_els_ok:
"\<And>ln. \<lbrakk> ls_els_ok ls t ln els; lock_ok_las ls t las \<rbrakk>
\<Longrightarrow> ls_els_ok (redT_updLs ls t las) t (redT_updLns ls t ln las) (upd_expr_locks els las)"
by(auto intro!: ls_els_okI upd_locks_upd_expr_lock_preserve_lock_expr_locks_ok elim!: ls_els_okE simp add: redT_updLs_def lock_ok_las_def)
lemma sync_ok_redT_updT:
assumes "sync_es_ok ts h"
and nt: "\<And>t e x h''. ta = NewThread t (e, x) h'' \<Longrightarrow> sync_ok e"
shows "sync_es_ok (redT_updT ts ta) h'"
using assms
proof(cases ta)
case (NewThread T x m)
obtain E X where [simp]: "x = (E, X)" by (cases x, auto)
with NewThread have "sync_ok E" by(simp)(rule nt)
with NewThread \<open>sync_es_ok ts h\<close> show ?thesis
apply -
apply(rule ts_okI)
apply(case_tac "t=T")
by(auto dest: ts_okD)
qed(auto intro: ts_okI dest: ts_okD)
lemma sync_ok_redT_updTs:
"\<lbrakk> sync_es_ok ts h; \<And>t e x h. NewThread t (e, x) h \<in> set tas \<Longrightarrow> sync_ok e \<rbrakk>
\<Longrightarrow> sync_es_ok (redT_updTs ts tas) h'"
proof(induct tas arbitrary: ts)
case Nil thus ?case by(auto intro: ts_okI dest: ts_okD)
next
case (Cons TA TAS TS)
note IH = \<open>\<And>ts. \<lbrakk>sync_es_ok ts h; \<And>t e x h''. NewThread t (e, x) h'' \<in> set TAS \<Longrightarrow> sync_ok e\<rbrakk>
\<Longrightarrow> sync_es_ok (redT_updTs ts TAS) h'\<close>
note nt = \<open>\<And>t e x h. NewThread t (e, x) h \<in> set (TA # TAS) \<Longrightarrow> sync_ok e\<close>
from \<open>sync_es_ok TS h\<close> nt
have "sync_es_ok (redT_updT TS TA) h"
by(auto elim!: sync_ok_redT_updT)
hence "sync_es_ok (redT_updTs (redT_updT TS TA) TAS) h'"
by(rule IH)(auto intro: nt)
thus ?case by simp
qed
lemma lock_ok_thr_updI:
"\<And>ln. \<lbrakk> lock_ok ls ts; ts t = \<lfloor>((e, xs), ln)\<rfloor>; expr_locks e = expr_locks e' \<rbrakk>
\<Longrightarrow> lock_ok ls (ts(t \<mapsto> ((e', xs'), ln)))"
by(rule lock_okI)(auto split: if_split_asm dest: lock_okD2 lock_okD1)
context J_heap_base begin
lemma invariant3p_sync_es_ok_lock_ok:
assumes wf: "wf_J_prog P"
shows "invariant3p (mredT P) {s. sync_es_ok (thr s) (shr s) \<and> lock_ok (locks s) (thr s)}"
apply(rule invariant3pI)
apply clarify
apply(rule conjI)
apply(rule lifting_wf.redT_preserves[OF lifting_wf_sync_ok[OF wf]], blast)
apply(assumption)
apply(erule (2) redT_preserves_lock_ok[OF wf])
done
lemma RedT_preserves_lock_ok:
assumes wf: "wf_J_prog P"
and Red: "P \<turnstile> s -\<triangleright>ttas\<rightarrow>* s'"
and ae: "sync_es_ok (thr s) (shr s)"
and loes: "lock_ok (locks s) (thr s)"
shows "lock_ok (locks s') (thr s')"
using invariant3p_rtrancl3p[OF invariant3p_sync_es_ok_lock_ok[OF wf] Red[unfolded red_mthr.RedT_def]] ae loes
by simp
end
subsection \<open>Determinism\<close>
context J_heap_base begin
lemma
fixes final
assumes det: "deterministic_heap_ops"
shows red_deterministic:
"\<lbrakk> convert_extTA extTA,P,t \<turnstile> \<langle>e, (shr s, xs)\<rangle> -ta\<rightarrow> \<langle>e', s'\<rangle>;
convert_extTA extTA,P,t \<turnstile> \<langle>e, (shr s, xs)\<rangle> -ta'\<rightarrow> \<langle>e'', s''\<rangle>;
final_thread.actions_ok final s t ta; final_thread.actions_ok final s t ta' \<rbrakk>
\<Longrightarrow> ta = ta' \<and> e' = e'' \<and> s' = s''"
and reds_deterministic:
"\<lbrakk> convert_extTA extTA,P,t \<turnstile> \<langle>es, (shr s, xs)\<rangle> [-ta\<rightarrow>] \<langle>es', s'\<rangle>;
convert_extTA extTA,P,t \<turnstile> \<langle>es, (shr s, xs)\<rangle> [-ta'\<rightarrow>] \<langle>es'', s''\<rangle>;
final_thread.actions_ok final s t ta; final_thread.actions_ok final s t ta' \<rbrakk>
\<Longrightarrow> ta = ta' \<and> es' = es'' \<and> s' = s''"
proof(induct e "(shr s, xs)" ta e' s' and es "(shr s, xs)" ta es' s' arbitrary: e'' s'' xs and es'' s'' xs rule: red_reds.inducts)
case RedNew
thus ?case by(auto elim!: red_cases dest: deterministic_heap_ops_allocateD[OF det])
next
case RedNewArray
thus ?case by(auto elim!: red_cases dest: deterministic_heap_ops_allocateD[OF det])
next
case RedCall thus ?case
by(auto elim!: red_cases dest: sees_method_fun simp add: map_eq_append_conv)
next
case RedCallExternal thus ?case
by(auto elim!: red_cases dest: red_external_deterministic[OF det] simp add: final_thread.actions_ok_iff map_eq_append_conv dest: sees_method_fun)
next
case RedCallNull thus ?case by(auto elim!: red_cases dest: sees_method_fun simp add: map_eq_append_conv)
next
case CallThrowParams thus ?case
by(auto elim!: red_cases dest: sees_method_fun simp add: map_eq_append_conv append_eq_map_conv append_eq_append_conv2 reds_map_Val_Throw Cons_eq_append_conv append_eq_Cons_conv)
qed(fastforce elim!: red_cases reds_cases dest: deterministic_heap_ops_readD[OF det] deterministic_heap_ops_writeD[OF det] iff: reds_map_Val_Throw)+
lemma red_mthr_deterministic:
assumes det: "deterministic_heap_ops"
shows "red_mthr.deterministic P UNIV"
proof(rule red_mthr.determisticI)
fix s t x ta' x' m' ta'' x'' m''
assume "thr s t = \<lfloor>(x, no_wait_locks)\<rfloor>"
and red: "mred P t (x, shr s) ta' (x', m')" "mred P t (x, shr s) ta'' (x'', m'')"
and aok: "red_mthr.actions_ok s t ta'" "red_mthr.actions_ok s t ta''"
moreover obtain e xs where [simp]: "x = (e, xs)" by(cases x)
moreover obtain e' xs' where [simp]: "x' = (e', xs')" by(cases x')
moreover obtain e'' xs'' where [simp]: "x'' = (e'', xs'')" by(cases x'')
ultimately have "extTA2J P,P,t \<turnstile> \<langle>e,(shr s, xs)\<rangle> -ta'\<rightarrow> \<langle>e',(m', xs')\<rangle>"
and "extTA2J P,P,t \<turnstile> \<langle>e,(shr s, xs)\<rangle> -ta''\<rightarrow> \<langle>e'',(m'', xs'')\<rangle>"
by simp_all
from red_deterministic[OF det this aok]
show "ta' = ta'' \<and> x' = x'' \<and> m' = m''" by simp
qed simp
end
end
|
If $d \leq e$, then the ball of radius $d$ centered at $x$ is contained in the ball of radius $e$ centered at $x$. |
[+] And lots of more!
Is it only to profit from clients or local businesses?
=>> Build 1 eCom Store, or even 100+ .. It's Up to You!
Simply enter the website information and then with literally ONE CLICK of your mouse, the Ecom site will be installed - no Wordpress installs needed.
PS. They put the demo video on the middle of the page, scroll down the page a little bit and I'm sure you'll find it. |
% Quadratic equation function
% determines coefficients of ax^2 + bx + c
% input x,y arrays
% by: Dr. Sherif Omran
%
%
function [a,b,c]=Quadratic(x,y)
p1=x(2)-x(3);
p2=x(3)-x(1);
p3=x(1)-x(2);
p4=x(3)^2-x(2)^2;
p5=x(1)^2-x(3)^2;
p6=x(2)^2-x(1)^2;
p7=x(2)^2*x(3)-x(2)*x(3)^2;
p8=x(1)*x(3)^2-x(1)^2*x(3);
p9=x(1)^2*x(2)-x(1)*x(2)^2;
delta=x(1)^2*(x(2)-x(3))-x(1)*(x(2)^2-x(3)^2)+1*(x(2)^2*x(3)-x(2)*x(3)^2);
a=(1/delta)*((x(2)-x(3))*y(1)+(x(3)-x(1))*y(2)+(x(1)-x(2))*y(3));
b=(1/delta)*(p4*y(1)+p5*y(2)+p6*y(3));
c=(1/delta)*(p7*y(1)+p8*y(2)+p9*y(3));
return; |
module EKF
( KF
, kf_x
, kf_p
, kf_t
, makeFilter
, predict
, update
) where
import Prelude hiding ((<>))
import App (Sensor (..))
import Numeric.LinearAlgebra
data KF = KF
{ kf_x :: Vector Double
, kf_p :: Matrix Double
, kf_t :: Word
} deriving Show
makeFilter (Laser px py t) =
KF { kf_x = 4 |> [px,
py,
0,
0]
, kf_p = (4><4) [1, 0, 0, 0,
0, 1, 0, 0,
0, 0, 1000, 0,
0, 0, 0, 1000]
, kf_t = t }
makeFilter (Radar rho phi rho' t) =
KF { kf_x = 4 |> [rho * cos phi,
rho * sin phi,
rho' * cos phi,
rho' * sin phi]
, kf_p = (4><4) [1, 0, 0, 0,
0, 1, 0, 0,
0, 0, 1000, 0,
0, 0, 0, 1000]
, kf_t = t }
predict :: Word -> KF -> KF
predict t kf =
KF { kf_x = x
, kf_p = p
, kf_t = t }
where
-- Time inside the filter.
filterTime =
kf_t kf
-- Delta-time. Essentially a timestep.
dt =
(fromIntegral (t - filterTime)) / 1000000.0
dt2 = dt ** 2
dt3 = dt ** 3
dt4 = dt ** 4
-- Acceleration noise tuning parameters
ax = 3
ay = 3
noiseAx = ax ** 2
noiseAy = ay ** 2
-- State transition matrix
f = (4><4) [1, 0, dt, 0,
0, 1, 0, dt,
0, 0, 1, 0,
0, 0, 0, 1]
-- Noise covariance matrix
q = (4><4) [-- Row 1
noiseAx * 0.25 * dt4, 0, noiseAx * 0.5 * dt3, 0,
-- Row 2
0, noiseAy * 0.25 * dt4, 0, noiseAy * 0.5 * dt3,
-- Row 3
noiseAx *0.5 * dt3, 0, noiseAx * dt2, 0,
-- Row 4
0, noiseAy * 0.5 * dt3, 0, noiseAy * dt2]
-- Noise vector
v = 4 |> [ax * 0.5 * dt2,
ay * 0.5 * dt2,
ax * dt,
ay * dt]
-- Estimated step based on constant velocity (CV) motion model
x = f #> kf_x kf + v
p = f <> kf_p kf <> (tr f) + q
update (Laser px py t) kf =
KF { kf_x = x'
, kf_p = p'
, kf_t = t }
where
kf' = predict t kf
-- Measurment transition matricies
h = (2><4) [1, 0, 0, 0,
0, 1, 0, 0]
r = (2><2) [0.0225, 0.0000,
0.0000, 0.0225]
y = 2 |> [px, py] - (h #> kf_x kf')
-- Kalman Filter Equations
i = ident 4
ht = tr h
s = h <> kf_p kf' <> ht + r
si = inv s
k = kf_p kf' <> ht <> si
-- Estimation based on kalman filter gain
x' = kf_x kf' + (k #> y)
p' = (i - k <> h) <> kf_p kf'
update (Radar rho phi rho' t) kf =
KF { kf_x = x'
, kf_p = p'
, kf_t = t }
where
kf' = predict t kf
-- Calculate the Jacobian matrix values
px = kf_x kf' ! 0
py = kf_x kf' ! 1
vx = kf_x kf' ! 2
vy = kf_x kf' ! 3
c1 = px ** 2 + py ** 2
c2 = sqrt c1
c3 = c1 * c2
zRho = c2;
zPhi = (atan2 py px)
zRho' = (px*vx + py*vy) / zRho
-- Measurment transition matricies
h = (3><4) [-- Row 1
px / c2, py / c2, 0, 0,
-- Row 2
-py / c1, px / c1, 0, 0,
-- Row 3
py*(vx * py - vy * px)/c3,
px*(vy * px - vx * py)/c3,
px/c2,
py/c2]
r = (3><3) [0.09, 0.0000, 0.00,
0.00, 0.0009, 0.00,
0.00, 0.0000, 0.09]
z = 3 |> [rho, phi, rho'] - 3 |> [zRho, zPhi, zRho']
y = 3 |> [z ! 0, atan2 (sin (z ! 1)) (cos (z ! 1)), z ! 2]
-- Kalman filter equations
i = ident 4
ht = tr h
s = h <> kf_p kf' <> ht + r
si = inv s
k = kf_p kf' <> ht <> si
-- Estimation based on kalman filter gain
x' = kf_x kf' + (k #> y)
p' = (i - k <> h) <> kf_p kf'
|
Great basic spur straps perform as well as they look. Constructed from refined black bridle leather, these spur straps feature durable nickel plated buckles.
Great basic spur straps perform as well as they look. Constructed from refined rich brown bridle leather, these spur straps feature durable nickel plated buckles. Ladies' size. Fits youth sizes too!
Great basic spur straps perform as well as they look. Constructed from rugged russet harness leather, these spur straps feature durable nickel plated buckles. Ladies' size. Fits youth sizes too!
Great basic spur straps perform as well as they look. Constructed from refined honey bridle leather, these spur straps feature durable nickel plated buckles. Ladies' size. Fits youth sizes too!
Constructed from single-ply burgundy latigo leather, these spur straps are a very economical choice. Durable nickel plated buckles. Ladies' size. Fits youth sizes too!
The ProTack® Collection is the ideal choice for serious horse people and professionals who demand maximum performance. Constructed from durable, weather resistant Hermann Oak harness leather, these spur straps offer rugged dependability. The harness leather is stuffed with extra tallows for a great feel. Non-rust stainless steel hardware.
This is a must-have training item! This martingale is constructed from soft, pliable burgundy latigo leather with variable positioning dees on the roper reins. The design helps to teach correct head position and promote flexion at the poll. Durable nickel plated hardware.
Special reinforced rawhide ends add extra stability to these Hermann Oak russet harness leather or burgundy latigo leather spur straps. Durable nickel plated hardware.
This stainless steel draft bit has a 6" snaffle mouth and 2-1/2" rings.
This stainless steel draft bit features a 6" mule mouth.
These stainless steel spurs have a 2-1/4" shank, a 1-1/4" band and a 12 point rowel. Engraved German silver trim offers understated style. Men's size.
These stainless steel spurs have a 2" shank, a 3/4" band and a 10 point rowel. Men's size.
These stainless steel spurs feature a 1-3/4" shank, a 1/2" band and a 10 point rowel. Ladies' size.
These stainless steel spurs have a 1-3/4" shank, a 1/2" band and a 10 point rowel. Men's size.
These stainless steel spurs feature a 2" shank, a 1" band and a 10 point rowel. Engraved German silver trim adorns these spurs. Men's size.
These nickel plated spurs features a 3/4" band. Ladies' size. These spurs will fit children's sizes too!
These stainless steel spurs are wire formed. Ladies' size.
These chrome plated spurs feature a 1-1/2" shank and a 10 point rowel. Ladies' size.
These chrome plated quick-on spurs feature a 1-1/2" shank and a 10 point rowel. Men's size.
These chrome plated spurs feature a 3/4" shank. Ladies' size. |
lemma divideR_right: fixes x y :: "'a::real_normed_vector" shows "r \<noteq> 0 \<Longrightarrow> y = x /\<^sub>R r \<longleftrightarrow> r *\<^sub>R y = x" |
/-
Copyright (c) 2023 Kevin Buzzard. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Author : Kevin Buzzard
-/
import tactic
import combinatorics.simple_graph.basic -- definition of graph
/-
# Graph theory
A year ago Lean's graph theory was a bit patchy, but now I think
it's robust enough to be taken seriously as a topic for a final project.
So, how do graphs work in Lean? Actually it took a long time
to come up with a definition people were happy with. One issue
is that different people mean different things by "graph". In this
section we're going to stick to "simple graphs", which means
that you have a type of vertices `V`, and edges go between two
distinct vertices in `V`. The rules for a simple graph:
1) Edges are undirected (so they don't have a source and a target, they
just have two ends, which are vertices)
2) You can't have more than one edge between two distinct vertices.
3) You can't have an edge going from a vertex to itself.
Because of rule 2, you can represent an edge as a yes/no question:
"is there an edge between `v` and `w` or not?". In other words
you can represent edges as a function `adj: V → V → Prop`, and you
don't need a separate set or type `E` for edges. `adj` is short
for "adjacent", so `adj v w` means "there's an edge between `v` and `w`,
i.e. "`v` is adjacent to `w`".
Rule 1 means that `adj` is symmetric (if `v` is adjacent to `w` then
`w` is adjacent to `v`), and rule 3 means that it is irreflexive,
i.e. `∀ v, ¬ adj v v`.
Here's how to say "let `G` be a (simple) graph with vertex set `V`"
-/
variables (V : Type) (G : simple_graph V)
-- Here's how to say two edges are adjacent
example (v w : V) : Prop := G.adj v w
-- If v is adjacent to w then w is adjacent to v
example (v w : V) : G.adj v w → G.adj w v := G.adj_symm
-- v isn't adjacent to itself
example (v : V) : ¬ G.adj v v := G.irrefl
/-
Longish interlude: here's how to make a square graph. It's quite laborious.
Lean is better at proving theorems than making explicit examples!
v1 -- v2
| |
| |
v3 -- v4
-/
section square_graph
-- the vertex set of the square graph; we make it a type with four terms
inductive sqV : Type
| v1 : sqV
| v2 : sqV
| v3 : sqV
| v4 : sqV
open sqV -- so I can write `v1` not `sqV.v1`
-- here's one boring way of making the edges -- an inductive proposition
inductive sqE : sqV → sqV → Prop
| e12 : sqE v1 v2
| e21 : sqE v2 v1
| e24 : sqE v2 v4
| e42 : sqE v4 v2
| e34 : sqE v3 v4
| e43 : sqE v4 v3
| e13 : sqE v1 v3
| e31 : sqE v3 v1
-- Now let's make the graph
def sqG : simple_graph sqV :=
{ adj := sqE,
symm := begin
-- do all the cases for the two vertices and the edge
rintro (_ | _ | _ | _) (_ | _ | _ | _) (_ | _ | _ | _ | _ | _ | _ | _),
-- now 8 goals; find the right constructor for sqE in all cases
repeat {constructor},
end,
loopless := begin
rintro (_ | _ | _ | _) (_ | _ | _ | _ | _ | _ | _ | _),
end }
end square_graph
-- Here's how to make a triangle graph; it's rather easier
-- Here `fin 3` is the "canonical" type with 3 terms; to give a term of type `fin 3`
-- is to give a pair consisting of a natural `n` and a proof that `n < 3`.
-- Here the `complete_graph` function is doing all the work for you.
example : simple_graph (fin 3) := complete_graph (fin 3)
-- The collection of all simple graphs on a fixed vertex set `V` form a Boolean algebra
-- (whatever that is)
example : boolean_algebra (simple_graph V) := by apply_instance
-- and in particular they form a lattice, so you can do stuff like this:
example : simple_graph V := ⊥ -- empty graph
example : simple_graph V := ⊤ -- complete graph
example (G H : simple_graph V) : simple_graph V := G ⊔ H -- union of vertices
-- etc etc, and you can even do this
example (G : simple_graph V) : simple_graph V := Gᶜ -- complement, i.e. an edge exists in `Gᶜ` between
-- distinct vertices `v` and `w` iff it doesn't
-- exist in `G`
-- The *support* of a graph is the vertices that have an edge coming from them.
example (v : V) : v ∈ G.support ↔ ∃ w, G.adj v w :=
begin
refl, -- true by definition
end
-- The `neighbor_set` of a vertex is all the vertices connected to it by an edge.
example (v : V) : set V := G.neighbor_set v
example (v w : V) : w ∈ G.neighbor_set v ↔ G.adj v w := iff.rfl -- true by defn
-- The type `sym2 V` is the type of unordered pairs of elements of `V`, i.e. `V × V`
-- modulo the equivalence relation generated by `(v,w)~(w,v)`.
-- So you can regard the edges as a subset of `sym2 V`, and that's `G.edge_set`
example : set (sym2 V) := G.edge_set
-- You can use `v ∈ e` notation if `e : sym2 v`
-- For example, `G.incidence_set v` is the set of edges coming out of `v`,
-- regarded as elements of `sym2 V`
example (v : V) : G.incidence_set v = {e ∈ G.edge_set | v ∈ e} := rfl
-- You can delete a set of edges from `G` using `G.delete_edges`
example (E : set (sym2 V)) : simple_graph V := G.delete_edges E
-- if E contains edges not in G then this doesn't matter, they're just ignored.
-- You can push a graph forward along an injection
example (W : Type) (f : V ↪ W) : simple_graph W := G.map f
-- and pull it back along an arbitrary map
example (U : Type) (g : U → V) : simple_graph U := G.comap g
-- The degree of a vertex is the size of its neighbor_set.
-- Better assume some finiteness conditions to make this work.
variable [G.locally_finite]
-- now we have `finset` versions of some `set` things. For example
example (v : V) : G.degree v = finset.card (G.neighbor_finset v) := rfl
-- If `H` is another graph on a vertex set `W`
variables (W : Type) (H : simple_graph W)
-- then we can consider types of various maps between graphs
example : Type := G →g H -- maps f:V → W such that v₁~v₂ -> f(v₁)~f(v₂)
example : Type := G ↪g H -- injections f : V → W such that v₁~v₂ ↔ f(v₁)~f(v₂)
example : Type := G ≃g H -- isomorphisms of graphs |
context("Calibration")
library("NSmetabolism")
library(rstan)
# library(lubridate)
test_that("One station calibration working", {
skip_on_cran()
dat = readRDS(system.file("testdata/do_calib.rds", package="NSmetabolism"))
expect_error(mod <- onestation(dat$DO, dat$temp, dat$light, dat$pressure, dat$z, dat$delta_t, nsamples = 1000), regex=NA)
pars = as.matrix(mod)
expect_true(all(pars[,'er'] <= 0))
expect_true(all(pars[, 'gpp'] >= 0))
})
|
\subsection{Cardinality}
\subsubsection{Cardinality of finite sets}
The cardinality of a set \(s\) is shown as \(|s|\). It is the number of elements in the set. We define it formally below.
\subsubsection{Injectives, surjectives and bijectives}
Consider \(2\) sets. If there is an injective from \(a\) to \(b\) then for every element in \(a\) there is a unique element in \(b\).
If this injective exists then we say \(|a|\le |b|\).
Similarly, if there is a surjective, that is for every element in \(b\) there is a unique element in \(a\), then \(|a|\ge |b|\).
Therefore, if there is a bijection, \(|a|=|b|\), and if there is only an injective or a surjective then \(|a|<|b|\) or \(|a|>|b|\) respectively.
\subsubsection{Cardinality as a function}
Every set has a cardinality. As a result cardinality cannot be a well-defined function, for the same reason there is no set of all sets.
Cardinality functions can be defined on subsets.
|
(* Title: HPProg.thy
Author: Michikazu Hirata, Tokyo Institute of Technology
*)
section \<open> Formalization of PPV \<close>
text \<open> We formalize PPV(Probabilistic Programming Verification framework)~\cite{Sato_2019}.\<close>
subsection \<open> Language: HPProg \<close>
theory HPProg
imports "../Quasi_Borel_Spaces/Measure_as_QuasiBorel_Measure"
begin
definition hpprog_typing :: "['cont quasi_borel, 'cont \<Rightarrow> 'typ, 'typ quasi_borel] \<Rightarrow> bool" where
"hpprog_typing \<Gamma> e T \<equiv> e \<in> \<Gamma> \<rightarrow>\<^sub>Q T"
syntax
"_hpprog_typing" :: "any \<Rightarrow> 'cont quasi_borel \<Rightarrow> ('cont \<Rightarrow> 'typ) \<Rightarrow> 'typ quasi_borel \<Rightarrow> bool" ("_ \<turnstile>\<^sub>t _ ;; _" 60)
translations
"\<Gamma> \<turnstile>\<^sub>t e ;; T" \<rightleftharpoons> "CONST hpprog_typing \<Gamma> e T"
definition hpprog_context :: "['a quasi_borel,'b quasi_borel] \<Rightarrow> ('a \<times> 'b) quasi_borel" (infixl ",," 78) where
"hpprog_context \<equiv> pair_qbs"
definition empty_context :: "unit quasi_borel" ("\<emptyset>\<^sub>C") where
"empty_context \<equiv> unit_quasi_borel"
abbreviation monadP_qbs_type ("P\<^sub>t") where "monadP_qbs_type \<equiv> monadP_qbs"
declare empty_context_def[simplified]
definition unit_x :: "'a quasi_borel \<Rightarrow> (unit \<times> 'a) quasi_borel" (",_" [79] 79)where
"unit_x X \<equiv> unit_quasi_borel \<Otimes>\<^sub>Q X"
lemma hp_unit_context[simp] :
",X = unit_quasi_borel ,, X"
by(simp add: unit_x_def hpprog_context_def)
definition hp_lift :: "['a \<Rightarrow> 'b, 'cont \<Rightarrow> 'a] \<Rightarrow> 'cont \<Rightarrow> 'b" where
"hp_lift f x \<equiv> (\<lambda>env. f (x env))"
definition hp_lift2 :: "['a \<Rightarrow> 'b \<Rightarrow> 'c, 'cont \<Rightarrow> 'a, 'cont \<Rightarrow> 'b] \<Rightarrow> 'cont \<Rightarrow> 'c" where
"hp_lift2 f x y \<equiv> (\<lambda>env. f (x env) (y env))"
declare hp_lift_def[simp]
declare hp_lift2_def[simp]
lemma hpt_addcont:
assumes "t' = (\<lambda>(env,x). t env)"
and "\<Gamma> \<turnstile>\<^sub>t t ;; T"
shows "\<Gamma>,,X \<turnstile>\<^sub>t t' ;; T"
using qbs_morphism_fst''[OF assms(2)[simplified hpprog_typing_def],of X]
by(simp add: assms(1) hpprog_typing_def hpprog_context_def split_beta')
subsubsection \<open> Variables \<close>
definition var1 :: "'a \<times> 'b \<Rightarrow> 'b" where
"var1 \<equiv> snd"
definition var2 :: "('a \<times> 'b) \<times> 'c \<Rightarrow> 'b" where
"var2 \<equiv> snd \<circ> fst"
definition var3 :: "(('a \<times> 'b) \<times> 'c) \<times> 'd \<Rightarrow> 'b" where
"var3 \<equiv> snd \<circ> fst \<circ> fst"
definition var4 :: "((('a \<times> 'b) \<times> 'c) \<times> 'd) \<times> 'e \<Rightarrow> 'b" where
"var4 \<equiv> snd \<circ> fst \<circ> fst \<circ> fst"
definition var5 :: "(((('a \<times> 'b) \<times> 'c) \<times> 'd) \<times> 'e) \<times> 'f \<Rightarrow> 'b" where
"var5 \<equiv> snd \<circ> fst \<circ> fst \<circ> fst \<circ> fst"
definition var6 :: "((((('a \<times> 'b) \<times> 'c) \<times> 'd) \<times> 'e) \<times> 'f) \<times> 'g \<Rightarrow> 'b" where
"var6 = snd \<circ> fst \<circ> fst \<circ> fst \<circ> fst \<circ> fst"
definition var7 :: "(((((('a \<times> 'b) \<times> 'c) \<times> 'd) \<times> 'e) \<times> 'f) \<times> 'g) \<times> 'h \<Rightarrow> 'b" where
"var7 = snd \<circ> fst \<circ> fst \<circ> fst \<circ> fst \<circ> fst \<circ> fst"
lemma hpt_var1:
"\<Gamma>,,Z \<turnstile>\<^sub>t var1 ;; Z"
using snd_qbs_morphism
by(auto simp add: hpprog_typing_def hpprog_context_def var1_def)
lemma hpt_var2:
"\<Gamma>,,Z,,Y \<turnstile>\<^sub>t var2 ;; Z"
unfolding var2_def hpprog_typing_def hpprog_context_def
using qbs_morphism_comp[OF fst_qbs_morphism snd_qbs_morphism]
by auto
lemma hpt_var3:
"\<Gamma>,,Z,,Y,,X \<turnstile>\<^sub>t var3 ;; Z"
unfolding var3_def hpprog_typing_def hpprog_context_def
using qbs_morphism_comp[OF fst_qbs_morphism qbs_morphism_comp[OF fst_qbs_morphism snd_qbs_morphism]]
by auto
lemma hpt_var4:
"\<Gamma>,,Z,,Y,,X,,W \<turnstile>\<^sub>t var4 ;; Z"
unfolding var4_def hpprog_typing_def hpprog_context_def
by (meson fst_qbs_morphism qbs_morphism_comp snd_qbs_morphism)
lemma hpt_var5:
"\<Gamma>,,Z,,Y,,X,,W,,V \<turnstile>\<^sub>t var5 ;; Z"
unfolding var5_def hpprog_typing_def hpprog_context_def
by (meson fst_qbs_morphism qbs_morphism_comp snd_qbs_morphism)
lemma hpt_var6:
"\<Gamma>,,Z,,Y,,X,,W,,V,,U \<turnstile>\<^sub>t var6 ;; Z"
unfolding var6_def hpprog_typing_def hpprog_context_def
by (meson fst_qbs_morphism qbs_morphism_comp snd_qbs_morphism)
lemma hpt_var7:
"\<Gamma>,,Z,,Y,,X,,W,,V,,U,,T \<turnstile>\<^sub>t var7 ;; Z"
unfolding var7_def hpprog_typing_def hpprog_context_def
by (meson fst_qbs_morphism qbs_morphism_comp snd_qbs_morphism)
definition "hp_context \<equiv> fst"
subsubsection \<open> Constants \<close>
definition hp_const :: "'a \<Rightarrow> 'env \<Rightarrow> 'a" where
"hp_const k \<equiv> (\<lambda>env. k)"
(*
--------------------
\<Gamma> |- n : \<nat> *)
lemma hpt_natc:
"\<Gamma> \<turnstile>\<^sub>t (hp_const (n :: nat)) ;; \<nat>\<^sub>Q"
using qbs_morphism_const[of n nat_quasi_borel \<Gamma>]
by(simp add: hpprog_typing_def hp_const_def)
(*
--------------------
\<Gamma> |- r : \<real> *)
lemma hpt_realc:
"\<Gamma> \<turnstile>\<^sub>t (hp_const (r :: real)) ;; \<real>\<^sub>Q"
using qbs_morphism_const[of r "\<real>\<^sub>Q" \<Gamma>]
by(simp add: hpprog_typing_def hp_const_def)
(*
----------------------
\<Gamma> |- r : ennreal *)
lemma hpt_ennrealc:
"\<Gamma> \<turnstile>\<^sub>t (hp_const (r :: ennreal)) ;; \<real>\<^sub>Q\<^sub>\<ge>\<^sub>0"
using qbs_morphism_const[of r ennreal_quasi_borel \<Gamma>]
by(simp add: hpprog_typing_def hp_const_def)
(*
---------------------
\<Gamma> |- () : unit *)
lemma hpt_unitc:
"\<Gamma> \<turnstile>\<^sub>t (hp_const ()) ;; unit_quasi_borel"
using to_unit_quasi_borel_morphism[of \<Gamma>]
by(simp add: hpprog_typing_def hp_const_def to_unit_quasi_borel_def)
definition "hp_constf \<equiv> hp_lift hp_const"
(* \<Gamma> |- t : Y
----------------------(x \<notin> t)
\<Gamma> |- \<lambda>x. t : X \<Rightarrow> Y *)
lemma hpt_constf:
assumes "\<Gamma> \<turnstile>\<^sub>t t ;; Y"
shows "\<Gamma> \<turnstile>\<^sub>t hp_constf t ;; exp_qbs X Y"
proof -
have 1:"case_prod (hp_constf t) = t \<circ> fst"
by(auto simp add: hp_constf_def hp_const_def)
have "case_prod (hp_constf t) \<in> \<Gamma> \<Otimes>\<^sub>Q X \<rightarrow>\<^sub>QY"
using qbs_morphism_comp[of fst "\<Gamma> \<Otimes>\<^sub>Q X" \<Gamma> t Y] assms fst_qbs_morphism[of \<Gamma> X]
by(simp add: hpprog_typing_def 1)
thus ?thesis
using curry_preserves_morphisms[of "case_prod (hp_constf t)" \<Gamma> X Y]
by(simp add: hpprog_typing_def)
qed
subsubsection \<open> Lambda Abstraction \<close>
definition hp_lambda ("\<lambda>\<^sub>t")
where "hp_lambda \<equiv> curry"
(* x : X, \<Gamma> |- t : T
-----------------------
\<Gamma> |- (\<lambda>x. t) : X \<Rightarrow> T *)
lemma hpt_abs:
assumes "\<Gamma>,,X \<turnstile>\<^sub>t t ;; T"
shows "\<Gamma> \<turnstile>\<^sub>t hp_lambda t ;; X \<Rightarrow>\<^sub>Q T"
using curry_preserves_morphisms[of t] assms
by(simp add: hpprog_typing_def hpprog_context_def hp_lambda_def)
subsubsection \<open> Function Application \<close>
definition hp_app :: "['cont \<Rightarrow> 'a \<Rightarrow> 'b, 'cont \<Rightarrow> 'a] \<Rightarrow> 'cont \<Rightarrow> 'b" (infixr "$\<^sub>t" 1) where
"hp_app f x \<equiv> qbs_eval \<circ> (\<lambda>env. (f env,x env))"
(* \<Gamma> |- f : T1 \<Rightarrow> T2 \<Gamma> |- x : T1
---------------------------------
\<Gamma> |- f x : T2 *)
lemma hpt_app:
assumes "\<Gamma> \<turnstile>\<^sub>t f ;; (exp_qbs X Y)"
and "\<Gamma> \<turnstile>\<^sub>t x ;; X"
shows "\<Gamma> \<turnstile>\<^sub>t hp_app f x ;; Y"
unfolding hp_app_def hpprog_typing_def
by(rule qbs_morphism_comp[OF qbs_morphism_tuple[OF assms[simplified hpprog_typing_def]] qbs_eval_morphism])
subsubsection \<open> Pair \<close>
definition "hp_pair \<equiv> hp_lift2 Pair"
(* \<Gamma> |- t1 : X \<Gamma> |- t2 Y
---------------------------
\<Gamma> |- (t1,t2) : X \<times> Y *)
lemma hpt_pair:
assumes "\<Gamma> \<turnstile>\<^sub>t t1 ;; X"
and "\<Gamma> \<turnstile>\<^sub>t t2 ;; Y"
shows "\<Gamma> \<turnstile>\<^sub>t hp_pair t1 t2 ;; X \<Otimes>\<^sub>Q Y"
using assms qbs_morphism_tuple[of t1 \<Gamma> X t2 Y]
by(simp add: hpprog_typing_def hp_pair_def)
subsubsection \<open> Projections \<close>
definition "hp_fst \<equiv> hp_lift fst"
definition "hp_snd \<equiv> hp_lift snd"
(* \<Gamma> |- s : X \<times> Y
------------------
\<Gamma> |- fst s : X *)
lemma hpt_fst:
assumes "\<Gamma> \<turnstile>\<^sub>t s ;; X \<Otimes>\<^sub>Q Y"
shows "\<Gamma> \<turnstile>\<^sub>t hp_fst s ;; X"
using assms fst_qbs_morphism qbs_morphism_comp[of s \<Gamma> _ fst X]
by(auto simp: hp_fst_def hpprog_typing_def o_def)
(* \<Gamma> |- s : X \<times> Y
------------------
\<Gamma> |- snd s : X *)
lemma hpt_snd:
assumes "\<Gamma> \<turnstile>\<^sub>t s ;; X \<Otimes>\<^sub>Q Y"
shows "\<Gamma> \<turnstile>\<^sub>t hp_snd s ;; Y"
using assms snd_qbs_morphism qbs_morphism_comp[of s \<Gamma> "X \<Otimes>\<^sub>Q Y" snd Y]
by(auto simp: hp_snd_def hpprog_typing_def o_def)
subsubsection \<open> Copair \<close>
definition "hp_inl \<equiv> hp_lift Inl"
definition "hp_inr \<equiv> hp_lift Inr"
(* \<Gamma> |- t : X
---------------------
\<Gamma> |- inl t : X + Y *)
lemma hpt_inl:
assumes "\<Gamma> \<turnstile>\<^sub>t t ;; X"
shows "\<Gamma> \<turnstile>\<^sub>t hp_inl t ;; X <+>\<^sub>Q Y"
using assms Inl_qbs_morphism[of X Y] qbs_morphism_comp[of t \<Gamma> X Inl]
by(auto simp: hp_inl_def hpprog_typing_def o_def)
(* \<Gamma> |- t : X
---------------------
\<Gamma> |- inr t : X + Y *)
lemma hpt_inr:
assumes "\<Gamma> \<turnstile>\<^sub>t t ;; Y"
shows "\<Gamma> \<turnstile>\<^sub>t hp_inr t ;; X <+>\<^sub>Q Y"
using assms qbs_morphism_comp[OF _ Inr_qbs_morphism[of Y X]]
by(auto simp: hp_inr_def hpprog_typing_def comp_def)
subsubsection \<open> Cases \<close>
definition hp_case :: "[ 'cont \<Rightarrow> 'a + 'b, 'cont \<times> 'a \<Rightarrow> 'c, 'cont \<times> 'b \<Rightarrow> 'c] \<Rightarrow> 'cont \<Rightarrow> 'c" where
"hp_case t t1 t2 \<equiv> (\<lambda>env. case_sum (curry t1 env) (curry t2 env) (t env))"
(* \<Gamma> |- t : X + Y x : X, \<Gamma> |- t1 : Z y ; Y, \<Gamma> |- t2 : Z
------------------------------------------------------------
\<Gamma> |- case t with
| inl x \<Rightarrow> t1
| inr y \<Rightarrow> t2 : Z *)
lemma hpt_case:
assumes "\<Gamma> \<turnstile>\<^sub>t t ;; X <+>\<^sub>Q Y"
"\<Gamma>,,X \<turnstile>\<^sub>t t1 ;; Z"
and "\<Gamma>,,Y \<turnstile>\<^sub>t t2 ;; Z"
shows "\<Gamma> \<turnstile>\<^sub>t hp_case t t1 t2 ;; Z"
proof -
have "(\<lambda>env. case_sum (curry t1 env) (curry t2 env) (t env)) = case_prod (case_prod case_sum) \<circ> (\<lambda>env. ((curry t1 env,curry t2 env),t env))"
by auto
also have "... \<in> \<Gamma> \<rightarrow>\<^sub>Q Z"
using assms
by(auto intro!: qbs_morphism_comp[where Y="(exp_qbs X Z \<Otimes>\<^sub>Q exp_qbs Y Z) \<Otimes>\<^sub>Q (X <+>\<^sub>Q Y)"] qbs_morphism_tuple curry_preserves_morphisms uncurry_preserves_morphisms case_sum_morphism
simp: hpprog_typing_def hpprog_context_def)
finally show ?thesis
by(simp add: hp_case_def hpprog_typing_def)
qed
subsubsection \<open> List \<close>
abbreviation "list \<equiv> list_qbs"
definition hp_nil :: "'env \<Rightarrow> 'a list" ("[]\<^sub>t") where
"hp_nil \<equiv> hp_const []"
lemma hpt_nil:
"\<Gamma> \<turnstile>\<^sub>t hp_nil ;; list X"
unfolding hp_nil_def hpprog_typing_def hp_const_def
by(auto intro!: qbs_morphismI qbs_closed2_dest simp: list_qbs_space)
definition hp_cons' :: "['env, 'a, 'a list] \<Rightarrow> 'a list" where
"hp_cons' \<equiv> hp_const Cons"
lemma hpt_cons':
"\<Gamma> \<turnstile>\<^sub>t hp_cons' ;; exp_qbs X (exp_qbs (list X) (list X))"
unfolding hpprog_typing_def hp_cons'_def hp_const_def
by(rule qbs_morphism_const,simp add: cons_qbs_morphism)
definition "hp_cons hx hl \<equiv> hp_app (hp_app hp_cons' hx) hl"
lemma hpt_cons:
assumes "\<Gamma> \<turnstile>\<^sub>t l ;; list X"
and "\<Gamma> \<turnstile>\<^sub>t x ;; X"
shows "\<Gamma> \<turnstile>\<^sub>t hp_cons x l ;; list X"
unfolding hp_cons_def
apply(rule hpt_app)+
apply(rule hpt_cons')
apply fact+
done
syntax
"_hp_list" :: "args => 'env \<Rightarrow> nat \<times> (nat \<Rightarrow> 'a)" ("[(_)]\<^sub>t")
translations
"[x, xs]\<^sub>t" == "CONST hp_cons x [xs]\<^sub>t"
"[x]\<^sub>t" == "CONST hp_cons x []\<^sub>t"
lemma
"\<Gamma>,,\<real>\<^sub>Q,,\<real>\<^sub>Q \<turnstile>\<^sub>t [var1, hp_const 1, var2]\<^sub>t ;; list \<real>\<^sub>Q"
apply(rule hpt_cons)+
apply(rule hpt_nil)
apply(rule hpt_var2)
apply(rule hpt_realc)
apply(rule hpt_var1)
done
subsubsection \<open> Return \<close>
definition hp_return :: "[ 'a quasi_borel,'cont \<Rightarrow> 'a] \<Rightarrow> 'cont \<Rightarrow> 'a qbs_prob_space" where
"hp_return X t \<equiv> (\<lambda>env. qbs_return X (t env))"
(* \<Gamma> |- t : T
-------------------------
\<Gamma> |- return T t : P T *)
lemma hpt_return:
assumes "\<Gamma> \<turnstile>\<^sub>t t ;; T"
shows "\<Gamma> \<turnstile>\<^sub>t hp_return T t ;; (monadP_qbs T)"
using assms qbs_return_morphism[of T] qbs_morphism_comp[of t \<Gamma> T "qbs_return T" "monadP_qbs T"]
by(simp add: hpprog_typing_def o_def hp_return_def)
subsubsection \<open> Bind \<close>
definition hp_bind (infixl "\<bind>\<^sub>t" 54) where "hp_bind \<equiv> hp_lift2 qbs_bind"
adhoc_overloading Monad_Syntax.bind hp_bind
(* \<Gamma> |- s : P T1 \<Gamma> |- f : T1 \<Rightarrow> P T2
--------------------------------------
\<Gamma> |- s \<bind> f : P T2 *)
lemma hpt_bind:
assumes "\<Gamma> \<turnstile>\<^sub>t s ;; (monadP_qbs T1)"
and "\<Gamma> \<turnstile>\<^sub>t f ;; (exp_qbs T1 (monadP_qbs T2))"
shows "\<Gamma> \<turnstile>\<^sub>t s \<bind>\<^sub>t f ;; (monadP_qbs T2)"
using qbs_bind_morphism[of s \<Gamma> T1] assms
by (simp add: hpprog_typing_def hp_bind_def)
subsubsection \<open> Normal Distribution \<close>
definition "hp_normal' \<equiv> hp_const qbs_normal_distribution"
definition "hp_normal \<equiv> (\<lambda>\<mu> \<sigma>. hp_app (hp_app hp_normal' \<mu>) \<sigma>)"
lemma hpt_normal':
"\<Gamma> \<turnstile>\<^sub>t hp_normal' ;; exp_qbs \<real>\<^sub>Q (exp_qbs \<real>\<^sub>Q (monadP_qbs \<real>\<^sub>Q))"
using qbs_morphism_const[of qbs_normal_distribution "exp_qbs \<real>\<^sub>Q (exp_qbs \<real>\<^sub>Q (monadP_qbs \<real>\<^sub>Q))" \<Gamma>]
by(simp add: hp_normal'_def hpprog_typing_def hp_const_def qbs_normal_distribution_morphism)
lemma hpt_normal:
assumes "\<Gamma> \<turnstile>\<^sub>t \<mu> ;; \<real>\<^sub>Q"
and "\<Gamma> \<turnstile>\<^sub>t \<sigma> ;; \<real>\<^sub>Q"
shows "\<Gamma> \<turnstile>\<^sub>t hp_normal \<mu> \<sigma> ;; monadP_qbs \<real>\<^sub>Q"
unfolding hp_normal_def
apply(rule hpt_app,rule hpt_app,rule hpt_normal')
by fact+
subsubsection \<open> Uniform Distribution \<close>
definition "hp_uniform' \<equiv> hp_const qbs_interval_uniform_distribution"
definition "hp_uniform \<equiv> (\<lambda>a b. hp_app (hp_app hp_uniform' a) b)"
lemma hpt_uniform':
"\<Gamma> \<turnstile>\<^sub>t hp_uniform' ;; exp_qbs \<real>\<^sub>Q (exp_qbs \<real>\<^sub>Q (monadP_qbs \<real>\<^sub>Q))"
unfolding hp_uniform'_def hp_const_def hpprog_typing_def
using qbs_morphism_const[of qbs_interval_uniform_distribution "exp_qbs \<real>\<^sub>Q (exp_qbs \<real>\<^sub>Q (monadP_qbs \<real>\<^sub>Q))" \<Gamma>] qbs_interval_uniform_distribution_morphism
by simp
lemma hpt_uniform:
assumes "\<Gamma> \<turnstile>\<^sub>t a ;; \<real>\<^sub>Q"
and "\<Gamma> \<turnstile>\<^sub>t b ;; \<real>\<^sub>Q"
shows "\<Gamma> \<turnstile>\<^sub>t hp_uniform a b ;; monadP_qbs \<real>\<^sub>Q"
unfolding hp_uniform_def
apply(rule hpt_app,rule hpt_app,rule hpt_uniform')
by fact+
subsubsection \<open>Bernoulli Distribution\<close>
definition "hp_bernoulli' \<equiv> hp_const qbs_bernoulli"
definition "hp_bernoulli \<equiv> (\<lambda>x. hp_app hp_bernoulli' x)"
lemma hpt_bernoulli':
"\<Gamma> \<turnstile>\<^sub>t hp_bernoulli' ;; exp_qbs \<real>\<^sub>Q (monadP_qbs \<bool>\<^sub>Q)"
using qbs_morphism_const[of qbs_bernoulli " exp_qbs \<real>\<^sub>Q (monadP_qbs \<bool>\<^sub>Q)"]
by(auto simp add: hp_bernoulli'_def hpprog_typing_def hp_const_def qbs_bernoulli_morphism)
lemma hpt_bernoulli:
assumes "\<Gamma> \<turnstile>\<^sub>t r ;; \<real>\<^sub>Q"
shows "\<Gamma> \<turnstile>\<^sub>t hp_bernoulli r ;; monadP_qbs \<bool>\<^sub>Q"
unfolding hp_bernoulli_def
by(rule hpt_app,rule hpt_bernoulli',fact)
subsubsection \<open> Numeral Functions \<close>
definition "hp_suc' \<equiv> hp_const Suc"
definition "hp_suc \<equiv> hp_app hp_suc'"
definition "hp_real' \<equiv> hp_const real"
definition "hp_real \<equiv> hp_app hp_real'"
definition "hp_enn2real' \<equiv> hp_const enn2real"
definition "hp_enn2real \<equiv> hp_app hp_enn2real'"
definition "hp_abs' \<equiv> hp_const abs"
definition hp_abs :: "('cont \<Rightarrow> 'b::abs) \<Rightarrow> 'cont \<Rightarrow> 'b" ("\<bar>_\<bar>\<^sub>t") where
"hp_abs \<equiv> hp_app hp_abs'"
definition "hp_ennreal' \<equiv> hp_const ennreal"
definition "hp_ennreal \<equiv> hp_app hp_ennreal'"
consts
hp_plus :: "'a \<Rightarrow> 'a \<Rightarrow> 'a" (infixl "+\<^sub>t" 65)
consts
hp_minus :: "'a \<Rightarrow> 'a \<Rightarrow> 'a" (infixl "-\<^sub>t" 65)
consts
hp_uminus :: "'a \<Rightarrow> 'a" ("-\<^sub>t _" [81] 80)
consts
hp_times :: "'a \<Rightarrow> 'a \<Rightarrow> 'a" (infixl "*\<^sub>t" 70)
consts
hp_div :: "'a \<Rightarrow> 'a \<Rightarrow> 'a" (infixl "'/\<^sub>t" 70)
definition "hp_plus'' \<equiv> hp_const plus"
definition "hp_plus' x y \<equiv> (hp_app (hp_app hp_plus'' x) y)"
definition "hp_minus'' \<equiv> hp_const minus"
definition "hp_minus' x y \<equiv> (hp_app (hp_app hp_minus'' x) y)"
definition "hp_uminus'' \<equiv> hp_const uminus"
definition "hp_uminus' \<equiv> hp_app hp_uminus''"
definition "hp_times'' \<equiv> hp_const times"
definition "hp_times' x y \<equiv> hp_app (hp_app hp_times'' x) y"
definition "hp_div'' \<equiv> hp_const inverse_divide"
definition "hp_div' x y \<equiv> hp_app (hp_app hp_div'' x) y"
definition "hp_power'' \<equiv> hp_const power"
fun hp_power' :: "['env \<Rightarrow> ('a :: monoid_mult), nat] \<Rightarrow> 'env \<Rightarrow> 'a" (infixr "^\<^sup>t" 80) where
"hp_power' hn 0 = hp_const 1" |
"hp_power' hn (Suc n) = hp_times' hn (hp_power' hn n)"
definition "hp_funplus' \<equiv> hp_lift2 hp_plus'"
definition "hp_funtimes' \<equiv> hp_lift2 hp_times'"
definition "hp_fundiv' \<equiv> hp_lift2 hp_div'"
adhoc_overloading hp_plus hp_plus'
adhoc_overloading hp_plus hp_funplus'
adhoc_overloading hp_minus hp_minus'
adhoc_overloading hp_uminus hp_uminus'
adhoc_overloading hp_times hp_times'
adhoc_overloading hp_times hp_funtimes'
adhoc_overloading hp_div hp_div'
adhoc_overloading hp_div hp_fundiv'
lemma hp_power_one:
"t^\<^sup>t1 = t"
by(simp add: hp_times'_def hp_const_def hp_app_def qbs_eval_def hp_times''_def comp_def)
lemma hp_power_square:
"t^\<^sup>t2 = t *\<^sub>t t"
by(simp add: hp_times'_def numeral_2_eq_2 hp_const_def hp_app_def qbs_eval_def hp_times''_def comp_def)
lemma hpt_suc':
"\<Gamma> \<turnstile>\<^sub>t hp_suc' ;; exp_qbs \<nat>\<^sub>Q \<nat>\<^sub>Q"
unfolding hp_suc'_def hpprog_typing_def hp_const_def
by(rule qbs_morphism_const,simp add: nat_qbs_morphism)
lemma hpt_suc:
assumes "\<Gamma> \<turnstile>\<^sub>t t ;; \<nat>\<^sub>Q"
shows "\<Gamma> \<turnstile>\<^sub>t hp_suc t ;; \<nat>\<^sub>Q"
unfolding hp_suc_def
by(rule hpt_app,rule hpt_suc',fact)
lemma hpt_plusn':
"\<Gamma> \<turnstile>\<^sub>t hp_plus'' ;; exp_qbs \<nat>\<^sub>Q (exp_qbs \<nat>\<^sub>Q \<nat>\<^sub>Q)"
unfolding hp_plus''_def hpprog_typing_def hp_const_def
apply(rule qbs_morphism_const,simp)
apply(rule nat_qbs_morphism,simp)
apply(rule nat_qbs_morphism,simp)
done
lemma hpt_plusn:
assumes "\<Gamma> \<turnstile>\<^sub>t t1 ;; \<nat>\<^sub>Q"
and "\<Gamma> \<turnstile>\<^sub>t t2 ;; \<nat>\<^sub>Q"
shows "\<Gamma> \<turnstile>\<^sub>t t1 +\<^sub>t t2 ;; \<nat>\<^sub>Q"
unfolding hp_plus'_def
apply(rule hpt_app,rule hpt_app,rule hpt_plusn')
by fact+
lemma hpt_minusn':
"\<Gamma> \<turnstile>\<^sub>t hp_minus'' ;; exp_qbs \<nat>\<^sub>Q (exp_qbs \<nat>\<^sub>Q \<nat>\<^sub>Q)"
unfolding hp_minus''_def hpprog_typing_def hp_const_def
apply(rule qbs_morphism_const,simp)
apply(rule nat_qbs_morphism,simp)
apply(rule nat_qbs_morphism,simp)
done
lemma hpt_minusn:
assumes "\<Gamma> \<turnstile>\<^sub>t t1 ;; \<nat>\<^sub>Q"
and "\<Gamma> \<turnstile>\<^sub>t t2 ;; \<nat>\<^sub>Q"
shows "\<Gamma> \<turnstile>\<^sub>t t1 -\<^sub>t t2 ;; \<nat>\<^sub>Q"
unfolding hp_minus'_def
apply(rule hpt_app,rule hpt_app,rule hpt_minusn')
by fact+
lemma hpt_timesn':
"\<Gamma> \<turnstile>\<^sub>t hp_times'' ;; exp_qbs \<nat>\<^sub>Q (exp_qbs \<nat>\<^sub>Q \<nat>\<^sub>Q)"
unfolding hp_times''_def hpprog_typing_def hp_const_def
apply(rule qbs_morphism_const,simp)
apply(rule nat_qbs_morphism,simp)
apply(rule nat_qbs_morphism,simp)
done
lemma hpt_timesn:
assumes "\<Gamma> \<turnstile>\<^sub>t t1 ;; \<nat>\<^sub>Q"
and "\<Gamma> \<turnstile>\<^sub>t t2 ;; \<nat>\<^sub>Q"
shows "\<Gamma> \<turnstile>\<^sub>t t1 *\<^sub>t t2 ;; \<nat>\<^sub>Q"
unfolding hp_times'_def
apply(rule hpt_app,rule hpt_app,rule hpt_timesn')
by fact+
lemma hpt_powern:
assumes "\<Gamma> \<turnstile>\<^sub>t t ;; \<nat>\<^sub>Q"
shows "\<Gamma> \<turnstile>\<^sub>t t^\<^sup>t n ;; \<nat>\<^sub>Q"
by(induction n; simp add: hpt_natc hpt_timesn assms)
lemma hpt_real':
"\<Gamma> \<turnstile>\<^sub>t hp_real' ;; exp_qbs \<nat>\<^sub>Q \<real>\<^sub>Q"
unfolding hp_real'_def hp_const_def hpprog_typing_def
apply(rule qbs_morphism_const,simp)
apply(rule nat_qbs_morphism,simp)
done
lemma hpt_real:
assumes "\<Gamma> \<turnstile>\<^sub>t t ;; \<nat>\<^sub>Q"
shows "\<Gamma> \<turnstile>\<^sub>t hp_real t ;; \<real>\<^sub>Q"
unfolding hp_real_def
apply(rule hpt_app,rule hpt_real')
by fact
lemma hpt_ennreal':
"\<Gamma> \<turnstile>\<^sub>t hp_ennreal' ;; exp_qbs \<real>\<^sub>Q \<real>\<^sub>Q\<^sub>\<ge>\<^sub>0"
unfolding hp_ennreal'_def hp_const_def hpprog_typing_def
apply(rule qbs_morphism_const)
by auto
lemma hpt_ennreal:
assumes "\<Gamma> \<turnstile>\<^sub>t t ;; \<real>\<^sub>Q"
shows "\<Gamma> \<turnstile>\<^sub>t hp_ennreal t ;; \<real>\<^sub>Q\<^sub>\<ge>\<^sub>0"
unfolding hp_ennreal_def
apply(rule hpt_app,rule hpt_ennreal')
by fact
lemma hpt_absr':
"\<Gamma> \<turnstile>\<^sub>t hp_abs' ;; exp_qbs \<real>\<^sub>Q \<real>\<^sub>Q"
unfolding hp_abs'_def hpprog_typing_def hp_const_def
apply(rule qbs_morphism_const)
by auto
lemma hpt_absr:
assumes "\<Gamma> \<turnstile>\<^sub>t t ;; \<real>\<^sub>Q"
shows "\<Gamma> \<turnstile>\<^sub>t \<bar>t\<bar>\<^sub>t ;; \<real>\<^sub>Q"
unfolding hp_abs_def
apply(rule hpt_app,rule hpt_absr')
by fact
lemma hpt_plusr':
"\<Gamma> \<turnstile>\<^sub>t hp_plus'' ;; exp_qbs \<real>\<^sub>Q (exp_qbs \<real>\<^sub>Q \<real>\<^sub>Q)"
unfolding hp_plus''_def hp_const_def hpprog_typing_def
apply(rule qbs_morphism_const,simp)
apply(rule curry_preserves_morphisms[where f="case_prod (+)",simplified curry_def split_beta',simplified])
by auto
lemma hpt_plusr:
assumes "\<Gamma> \<turnstile>\<^sub>t t1 ;; \<real>\<^sub>Q"
and "\<Gamma> \<turnstile>\<^sub>t t2 ;; \<real>\<^sub>Q"
shows "\<Gamma> \<turnstile>\<^sub>t t1 +\<^sub>t t2 ;; \<real>\<^sub>Q"
unfolding hp_plus'_def
apply(rule hpt_app,rule hpt_app, rule hpt_plusr')
by fact+
lemma hpt_minusr':
"\<Gamma> \<turnstile>\<^sub>t hp_minus'' ;; exp_qbs \<real>\<^sub>Q (exp_qbs \<real>\<^sub>Q \<real>\<^sub>Q)"
unfolding hp_minus''_def hp_const_def hpprog_typing_def
apply(rule qbs_morphism_const,simp)
apply(rule curry_preserves_morphisms[where f="case_prod (-)",simplified curry_def split_beta',simplified])
by auto
lemma hpt_minusr:
assumes "\<Gamma> \<turnstile>\<^sub>t t1 ;; \<real>\<^sub>Q"
and "\<Gamma> \<turnstile>\<^sub>t t2 ;; \<real>\<^sub>Q"
shows "\<Gamma> \<turnstile>\<^sub>t t1 -\<^sub>t t2 ;; \<real>\<^sub>Q"
unfolding hp_minus'_def
apply(rule hpt_app,rule hpt_app, rule hpt_minusr')
by fact+
lemma hpt_uminusr':
"\<Gamma> \<turnstile>\<^sub>t hp_uminus'' ;; exp_qbs \<real>\<^sub>Q \<real>\<^sub>Q"
unfolding hp_uminus''_def hpprog_typing_def hp_const_def
apply(rule qbs_morphism_const)
by auto
lemma hp_uminusr:
assumes "\<Gamma> \<turnstile>\<^sub>t t ;; \<real>\<^sub>Q"
shows"\<Gamma> \<turnstile>\<^sub>t -\<^sub>t t ;; \<real>\<^sub>Q"
unfolding hp_uminus'_def
apply(rule hpt_app,rule hpt_uminusr')
by fact
lemma hpt_timesr':
"\<Gamma> \<turnstile>\<^sub>t hp_times'' ;; exp_qbs \<real>\<^sub>Q (exp_qbs \<real>\<^sub>Q \<real>\<^sub>Q)"
unfolding hp_times''_def hp_const_def hpprog_typing_def
apply(rule qbs_morphism_const,simp)
apply(rule curry_preserves_morphisms[where f="case_prod (*)",simplified curry_def split_beta',simplified])
by auto
lemma hpt_timesr:
assumes "\<Gamma> \<turnstile>\<^sub>t t1 ;; \<real>\<^sub>Q"
and "\<Gamma> \<turnstile>\<^sub>t t2 ;; \<real>\<^sub>Q"
shows "\<Gamma> \<turnstile>\<^sub>t t1 *\<^sub>t t2 ;; \<real>\<^sub>Q"
unfolding hp_times'_def
apply(rule hpt_app,rule hpt_app,rule hpt_timesr')
by fact+
lemma hpt_divr':
"\<Gamma> \<turnstile>\<^sub>t hp_div'' ;; exp_qbs \<real>\<^sub>Q (exp_qbs \<real>\<^sub>Q \<real>\<^sub>Q)"
unfolding hp_div''_def hp_const_def hpprog_typing_def
apply(rule qbs_morphism_const,simp)
apply(rule curry_preserves_morphisms[where f="case_prod (/)",simplified curry_def split_beta',simplified])
by auto
lemma hpt_divr:
assumes "\<Gamma> \<turnstile>\<^sub>t t1 ;; \<real>\<^sub>Q"
and "\<Gamma> \<turnstile>\<^sub>t t2 ;; \<real>\<^sub>Q"
shows "\<Gamma> \<turnstile>\<^sub>t t1 /\<^sub>t t2 ;; \<real>\<^sub>Q"
unfolding hp_div'_def
apply(rule hpt_app,rule hpt_app,rule hpt_divr')
by fact+
lemma hpt_powerr:
assumes "\<Gamma> \<turnstile>\<^sub>t t ;; \<real>\<^sub>Q"
shows "\<Gamma> \<turnstile>\<^sub>t t^\<^sup>tn ;; \<real>\<^sub>Q"
by(induction n; simp add: hpt_realc assms hpt_timesr)
lemma hpt_enn2real':
"\<Gamma> \<turnstile>\<^sub>t hp_enn2real' ;; exp_qbs \<real>\<^sub>Q\<^sub>\<ge>\<^sub>0 \<real>\<^sub>Q"
unfolding hpprog_typing_def hp_enn2real'_def hp_const_def
apply(rule qbs_morphism_const)
by auto
lemma hpt_enn2real:
assumes "\<Gamma> \<turnstile>\<^sub>t t ;; \<real>\<^sub>Q\<^sub>\<ge>\<^sub>0"
shows "\<Gamma> \<turnstile>\<^sub>t hp_enn2real t ;; \<real>\<^sub>Q"
unfolding hp_enn2real_def
apply(rule hpt_app,rule hpt_enn2real')
by fact
interpretation ennreal_ennreal : pair_standard_borel_space_UNIV ennreal_borel ennreal_borel
by standard
lemma hpt_plusennr':
"\<Gamma> \<turnstile>\<^sub>t hp_plus'' ;; exp_qbs \<real>\<^sub>Q\<^sub>\<ge>\<^sub>0 (exp_qbs \<real>\<^sub>Q\<^sub>\<ge>\<^sub>0 \<real>\<^sub>Q\<^sub>\<ge>\<^sub>0)"
unfolding hp_plus''_def hp_const_def hpprog_typing_def
apply(rule qbs_morphism_const,simp)
apply(rule curry_preserves_morphisms[where f="case_prod (+)",simplified curry_def split_beta',simplified])
by auto
lemma hpt_plusennr:
assumes "\<Gamma> \<turnstile>\<^sub>t t1 ;; \<real>\<^sub>Q\<^sub>\<ge>\<^sub>0"
and "\<Gamma> \<turnstile>\<^sub>t t2 ;; \<real>\<^sub>Q\<^sub>\<ge>\<^sub>0"
shows "\<Gamma> \<turnstile>\<^sub>t t1 +\<^sub>t t2 ;; \<real>\<^sub>Q\<^sub>\<ge>\<^sub>0"
unfolding hp_plus'_def
apply(rule hpt_app,rule hpt_app,rule hpt_plusennr')
by fact+
lemma hpt_minusennr':
"\<Gamma> \<turnstile>\<^sub>t hp_minus'' ;; exp_qbs \<real>\<^sub>Q\<^sub>\<ge>\<^sub>0 (exp_qbs \<real>\<^sub>Q\<^sub>\<ge>\<^sub>0 \<real>\<^sub>Q\<^sub>\<ge>\<^sub>0)"
unfolding hp_minus''_def hp_const_def hpprog_typing_def
apply(rule qbs_morphism_const,simp)
apply(rule curry_preserves_morphisms[where f="case_prod (-)",simplified curry_def split_beta',simplified])
by auto
lemma hpt_minusennr:
assumes "\<Gamma> \<turnstile>\<^sub>t t1 ;; \<real>\<^sub>Q\<^sub>\<ge>\<^sub>0"
and "\<Gamma> \<turnstile>\<^sub>t t2 ;; \<real>\<^sub>Q\<^sub>\<ge>\<^sub>0"
shows "\<Gamma> \<turnstile>\<^sub>t t1 -\<^sub>t t2 ;; \<real>\<^sub>Q\<^sub>\<ge>\<^sub>0"
unfolding hp_minus'_def
apply(rule hpt_app,rule hpt_app,rule hpt_minusennr')
by fact+
lemma hpt_timesennr':
"\<Gamma> \<turnstile>\<^sub>t hp_times'' ;; exp_qbs \<real>\<^sub>Q\<^sub>\<ge>\<^sub>0 (exp_qbs \<real>\<^sub>Q\<^sub>\<ge>\<^sub>0 \<real>\<^sub>Q\<^sub>\<ge>\<^sub>0)"
unfolding hp_times''_def hp_const_def hpprog_typing_def
apply(rule qbs_morphism_const,simp)
apply(rule curry_preserves_morphisms[where f="case_prod (*)",simplified curry_def split_beta',simplified])
by auto
lemma hpt_timesennr:
assumes "\<Gamma> \<turnstile>\<^sub>t t1 ;; \<real>\<^sub>Q\<^sub>\<ge>\<^sub>0"
and "\<Gamma> \<turnstile>\<^sub>t t2 ;; \<real>\<^sub>Q\<^sub>\<ge>\<^sub>0"
shows "\<Gamma> \<turnstile>\<^sub>t t1 *\<^sub>t t2 ;; \<real>\<^sub>Q\<^sub>\<ge>\<^sub>0"
unfolding hp_times'_def
apply(rule hpt_app,rule hpt_app,rule hpt_timesennr')
by fact+
lemma hpt_divennr':
"\<Gamma> \<turnstile>\<^sub>t hp_div'' ;; exp_qbs \<real>\<^sub>Q\<^sub>\<ge>\<^sub>0 (exp_qbs \<real>\<^sub>Q\<^sub>\<ge>\<^sub>0 \<real>\<^sub>Q\<^sub>\<ge>\<^sub>0)"
unfolding hp_div''_def hp_const_def hpprog_typing_def
apply(rule qbs_morphism_const,simp)
apply(rule curry_preserves_morphisms[where f="case_prod (/)",simplified curry_def split_beta',simplified])
by auto
lemma hpt_divennr:
assumes "\<Gamma> \<turnstile>\<^sub>t t1 ;; \<real>\<^sub>Q\<^sub>\<ge>\<^sub>0"
and "\<Gamma> \<turnstile>\<^sub>t t2 ;; \<real>\<^sub>Q\<^sub>\<ge>\<^sub>0"
shows "\<Gamma> \<turnstile>\<^sub>t t1 /\<^sub>t t2 ;; \<real>\<^sub>Q\<^sub>\<ge>\<^sub>0"
unfolding hp_div'_def
apply(rule hpt_app,rule hpt_app,rule hpt_divennr')
by fact+
lemma hpt_powerennr:
assumes "\<Gamma> \<turnstile>\<^sub>t t ;; \<real>\<^sub>Q\<^sub>\<ge>\<^sub>0"
shows "\<Gamma> \<turnstile>\<^sub>t t^\<^sup>tn ;; \<real>\<^sub>Q\<^sub>\<ge>\<^sub>0"
by(induction n; simp add: hpt_ennrealc assms hpt_timesennr)
lemma hpt_function:
assumes "\<And>(\<Gamma>::('a \<times> 'b) quasi_borel) t1 t2.
(\<Gamma> \<turnstile>\<^sub>t t1 ;; T) \<Longrightarrow> (\<Gamma> \<turnstile>\<^sub>t t2 ;; T) \<Longrightarrow> (\<Gamma> \<turnstile>\<^sub>t hp_lift2 f t1 t2 ;; T)"
"(\<Gamma>::'a quasi_borel) \<turnstile>\<^sub>t f1 ;; exp_qbs (K:: 'b quasi_borel) T "
and "\<Gamma> \<turnstile>\<^sub>t f2 ;; exp_qbs K T"
shows "\<Gamma> \<turnstile>\<^sub>t hp_lift2 (hp_lift2 f) f1 f2 ;; exp_qbs K T"
using assms
proof -
have "\<Gamma>,,K \<turnstile>\<^sub>t (\<lambda>(l,k). f1 l k) ;; T"
"\<Gamma>,,K \<turnstile>\<^sub>t (\<lambda>(l,k). f2 l k) ;; T"
using assms(2,3) uncurry_preserves_morphisms[of f1 \<Gamma> K T] uncurry_preserves_morphisms[of f2 \<Gamma> K T]
by(auto simp add: hpprog_typing_def hpprog_context_def)
hence "\<Gamma>,,K \<turnstile>\<^sub>t hp_lift2 f (\<lambda>(l,k). f1 l k) (\<lambda>(l,k). f2 l k) ;; T"
using assms(1)[of "\<Gamma>,,K"] by simp
hence "\<Gamma> \<turnstile>\<^sub>t curry (hp_lift2 f (\<lambda>(l,k). f1 l k) (\<lambda>(l,k). f2 l k)) ;; exp_qbs K T"
using curry_preserves_morphisms[of _ \<Gamma> K T]
by(simp add: hpprog_typing_def hpprog_context_def)
moreover have "curry (hp_lift2 f (\<lambda>(l,k). f1 l k) (\<lambda>(l,k). f2 l k)) = hp_lift2 (hp_lift2 f) f1 f2"
by(rule ext,auto)
ultimately show ?thesis by simp
qed
lemma hpt_funplusn:
assumes "\<Gamma> \<turnstile>\<^sub>t f1 ;; exp_qbs T \<nat>\<^sub>Q"
and "\<Gamma> \<turnstile>\<^sub>t f2 ;; exp_qbs T \<nat>\<^sub>Q"
shows "\<Gamma> \<turnstile>\<^sub>t f1 +\<^sub>t f2 ;; exp_qbs T \<nat>\<^sub>Q"
using hpt_function[OF _ assms] hpt_plusn
by(auto simp add: hp_funplus'_def hp_plus'_def hp_plus''_def hp_const_def hp_app_def qbs_eval_def comp_def)
lemma hpt_funplusr:
assumes "\<Gamma> \<turnstile>\<^sub>t f1 ;; exp_qbs T \<real>\<^sub>Q"
and "\<Gamma> \<turnstile>\<^sub>t f2 ;; exp_qbs T \<real>\<^sub>Q"
shows "\<Gamma> \<turnstile>\<^sub>t f1 +\<^sub>t f2 ;; exp_qbs T \<real>\<^sub>Q"
using hpt_function[OF _ assms(1) assms(2)] hpt_plusr
by(auto simp add: hp_funplus'_def hp_plus'_def hp_plus''_def hp_const_def hp_app_def qbs_eval_def comp_def)
lemma hpt_funplusennr:
assumes "\<Gamma> \<turnstile>\<^sub>t f1 ;; exp_qbs T \<real>\<^sub>Q\<^sub>\<ge>\<^sub>0"
and "\<Gamma> \<turnstile>\<^sub>t f2 ;; exp_qbs T \<real>\<^sub>Q\<^sub>\<ge>\<^sub>0"
shows "\<Gamma> \<turnstile>\<^sub>t f1 +\<^sub>t f2 ;; exp_qbs T \<real>\<^sub>Q\<^sub>\<ge>\<^sub>0"
using hpt_function[OF _ assms(1) assms(2)] hpt_plusennr
by(auto simp add: hp_funplus'_def hp_plus'_def hp_plus''_def hp_const_def hp_app_def qbs_eval_def comp_def)
lemma hpt_funminusn:
assumes "\<Gamma> \<turnstile>\<^sub>t f1 ;; exp_qbs T \<nat>\<^sub>Q"
and "\<Gamma> \<turnstile>\<^sub>t f2 ;; exp_qbs T \<nat>\<^sub>Q"
shows "\<Gamma> \<turnstile>\<^sub>t f1 -\<^sub>t f2 ;; exp_qbs T \<nat>\<^sub>Q"
using hpt_function[OF _ assms(1) assms(2),of minus] hpt_minusn
by(auto simp add: hp_minus'_def fun_diff_def hp_minus''_def hp_const_def hp_app_def qbs_eval_def comp_def)
lemma hpt_funminusr:
assumes "\<Gamma> \<turnstile>\<^sub>t f1 ;; exp_qbs T \<real>\<^sub>Q"
and "\<Gamma> \<turnstile>\<^sub>t f2 ;; exp_qbs T \<real>\<^sub>Q"
shows "\<Gamma> \<turnstile>\<^sub>t f1 -\<^sub>t f2 ;; exp_qbs T \<real>\<^sub>Q"
using hpt_function[OF _ assms(1) assms(2)] hpt_minusr
by(auto simp add: hp_minus'_def fun_diff_def hp_minus''_def hp_const_def hp_app_def qbs_eval_def comp_def)
lemma hpt_funuminusr:
assumes "\<Gamma> \<turnstile>\<^sub>t f ;; exp_qbs T \<real>\<^sub>Q"
shows "\<Gamma> \<turnstile>\<^sub>t -\<^sub>t f ;; exp_qbs T \<real>\<^sub>Q"
proof -
have "(\<lambda>z. f (fst z) (snd z)) \<in> borel_measurable (qbs_to_measure (\<Gamma> \<Otimes>\<^sub>Q T))"
using uncurry_preserves_morphisms[OF assms[simplified hpprog_typing_def]]
by(auto simp: case_prod_beta')
hence "(\<lambda>z. - f (fst z) (snd z)) \<in> borel_measurable (qbs_to_measure (\<Gamma> \<Otimes>\<^sub>Q T))"
by simp
hence "(\<lambda>z. - f (fst z) (snd z)) \<in> \<Gamma> \<Otimes>\<^sub>Q T \<rightarrow>\<^sub>Q \<real>\<^sub>Q"
by auto
thus ?thesis
using curry_preserves_morphisms[of "\<lambda>z. - f (fst z) (snd z)" \<Gamma> T "\<real>\<^sub>Q",simplified curry_def,simplified]
by(simp add: hpprog_typing_def hp_uminus'_def fun_Compl_def hp_uminus''_def hp_const_def hp_app_def qbs_eval_def comp_def)
qed
lemma hpt_funminusennr:
assumes "\<Gamma> \<turnstile>\<^sub>t f1 ;; exp_qbs T \<real>\<^sub>Q\<^sub>\<ge>\<^sub>0"
and "\<Gamma> \<turnstile>\<^sub>t f2 ;; exp_qbs T \<real>\<^sub>Q\<^sub>\<ge>\<^sub>0"
shows "\<Gamma> \<turnstile>\<^sub>t f1 -\<^sub>t f2 ;; exp_qbs T \<real>\<^sub>Q\<^sub>\<ge>\<^sub>0"
using hpt_function[OF _ assms(1) assms(2)] hpt_minusennr
by(auto simp add: hp_minus'_def fun_diff_def hp_minus''_def hp_const_def hp_app_def qbs_eval_def comp_def)
lemma hpt_funtimesn:
assumes "\<Gamma> \<turnstile>\<^sub>t f1 ;; exp_qbs T \<nat>\<^sub>Q"
and "\<Gamma> \<turnstile>\<^sub>t f2 ;; exp_qbs T \<nat>\<^sub>Q"
shows "\<Gamma> \<turnstile>\<^sub>t f1 *\<^sub>t f2 ;; exp_qbs T \<nat>\<^sub>Q"
using hpt_function[OF _ assms(1) assms(2)] hpt_timesn
by(auto simp add: hp_funtimes'_def hp_times'_def hp_times''_def hp_const_def hp_app_def qbs_eval_def comp_def)
lemma hpt_funtimesr:
assumes "\<Gamma> \<turnstile>\<^sub>t f1 ;; exp_qbs T \<real>\<^sub>Q"
and "\<Gamma> \<turnstile>\<^sub>t f2 ;; exp_qbs T \<real>\<^sub>Q"
shows "\<Gamma> \<turnstile>\<^sub>t f1 *\<^sub>t f2 ;; exp_qbs T \<real>\<^sub>Q"
using hpt_function[OF _ assms(1) assms(2)] hpt_timesr
by(auto simp add: hp_funtimes'_def hp_times'_def hp_times''_def hp_const_def hp_app_def qbs_eval_def comp_def)
lemma hpt_funtimesennr:
assumes "\<Gamma> \<turnstile>\<^sub>t f1 ;; exp_qbs T \<real>\<^sub>Q\<^sub>\<ge>\<^sub>0"
and "\<Gamma> \<turnstile>\<^sub>t f2 ;; exp_qbs T \<real>\<^sub>Q\<^sub>\<ge>\<^sub>0"
shows "\<Gamma> \<turnstile>\<^sub>t f1 *\<^sub>t f2 ;; exp_qbs T \<real>\<^sub>Q\<^sub>\<ge>\<^sub>0"
using hpt_function[OF _ assms(1) assms(2)] hpt_timesennr
by(auto simp add: hp_funtimes'_def hp_times'_def hp_times''_def hp_const_def hp_app_def qbs_eval_def comp_def)
lemma hpt_fundivr:
assumes "\<Gamma> \<turnstile>\<^sub>t f1 ;; exp_qbs T \<real>\<^sub>Q"
and "\<Gamma> \<turnstile>\<^sub>t f2 ;; exp_qbs T \<real>\<^sub>Q"
shows "\<Gamma> \<turnstile>\<^sub>t f1 /\<^sub>t f2 ;; exp_qbs T \<real>\<^sub>Q"
using hpt_function[OF _ assms(1) assms(2)] hpt_divr
by(auto simp add: hp_fundiv'_def hp_div'_def hp_div''_def hp_const_def hp_app_def qbs_eval_def comp_def)
lemma hpt_fundivennr:
assumes "\<Gamma> \<turnstile>\<^sub>t f1 ;; exp_qbs T \<real>\<^sub>Q\<^sub>\<ge>\<^sub>0"
and "\<Gamma> \<turnstile>\<^sub>t f2 ;; exp_qbs T \<real>\<^sub>Q\<^sub>\<ge>\<^sub>0"
shows "\<Gamma> \<turnstile>\<^sub>t f1 /\<^sub>t f2 ;; exp_qbs T \<real>\<^sub>Q\<^sub>\<ge>\<^sub>0"
using hpt_function[OF _ assms(1) assms(2)] hpt_divennr
by(auto simp add: hp_fundiv'_def hp_div'_def hp_div''_def hp_const_def hp_app_def qbs_eval_def comp_def)
subsubsection \<open> Primitive Recursive Functions \<close>
definition "hp_rec_nat \<equiv> hp_lift2 rec_nat"
lemma hp_rec_nat_simp:
"hp_rec_nat t0 f k 0 = t0 k"
"hp_rec_nat t0 f k (Suc n) = f k n (hp_rec_nat t0 f k n)"
by(auto simp add: hp_rec_nat_def)
lemma hp_rec_nat_simp':
"hp_app (hp_rec_nat t0 f) (hp_const 0) = t0"
"hp_app (hp_rec_nat t0 f) (hp_suc e) = hp_app (hp_app f e) (hp_app (hp_rec_nat t0 f) e) "
by(auto simp add: hp_app_def hp_rec_nat_def hp_const_def qbs_eval_def hp_suc_def hp_suc'_def)
(* \<Gamma> |- t0 : T \<Gamma> |- f : \<nat> \<Rightarrow> T \<Rightarrow> T
---------------------------------------
\<Gamma> |- rec t0 f : \<nat> \<Rightarrow> T *)
lemma hpt_recnat:
assumes "\<Gamma> \<turnstile>\<^sub>t t0 ;; T"
and "\<Gamma> \<turnstile>\<^sub>t f ;; (exp_qbs \<nat>\<^sub>Q (exp_qbs T T))"
shows "\<Gamma> \<turnstile>\<^sub>t hp_rec_nat t0 f ;; (exp_qbs \<nat>\<^sub>Q T)"
unfolding hpprog_typing_def
proof(rule arg_swap_morphism,rule nat_qbs_morphism,auto)
fix n
show "(\<lambda>y. hp_rec_nat t0 f y n) \<in> \<Gamma> \<rightarrow>\<^sub>Q T"
unfolding hp_rec_nat_def hp_lift2_def
proof(induction n)
case 0
then show ?case
using assms(1) by(simp add: hpprog_typing_def)
next
case ih:(Suc n')
have "(\<lambda>y. f y n' (rec_nat (t0 y) (f y) n')) = case_prod (case_prod f) \<circ> (\<lambda>y. ((y,n'),rec_nat (t0 y) (f y) n'))"
by auto
also have "... \<in> \<Gamma> \<rightarrow>\<^sub>Q T"
apply(rule qbs_morphism_comp[of _ _ "(\<Gamma> \<Otimes>\<^sub>Q \<nat>\<^sub>Q) \<Otimes>\<^sub>Q T"],rule qbs_morphism_tuple,rule qbs_morphism_tuple)
using qbs_morphism_ident[of \<Gamma>] qbs_morphism_const[of n' "\<nat>\<^sub>Q"] ih uncurry_preserves_morphisms[of f \<Gamma>] uncurry_preserves_morphisms[of "case_prod f"] assms(2)
by(auto simp add: id_def hpprog_typing_def)
finally show ?case by simp
qed
qed
lemma hpt_recnat':
assumes "\<Gamma> \<turnstile>\<^sub>t t0 ;; T"
and "\<Gamma> ,, \<nat>\<^sub>Q,, T \<turnstile>\<^sub>t e ;; T"
shows "\<Gamma> \<turnstile>\<^sub>t hp_rec_nat t0 (hp_lambda (hp_lambda e)) ;; (exp_qbs \<nat>\<^sub>Q T)"
by(rule hpt_recnat[OF assms(1) hpt_abs[OF hpt_abs[OF assms(2)]]])
definition "hp_rec_list \<equiv> hp_lift2 rec_list"
(* \<Gamma> |- t0 : T \<Gamma> |- f : X \<Rightarrow> list X \<Rightarrow> T \<Rightarrow> T
---------------------------------------
\<Gamma> |- rec t0 f : list X \<Rightarrow> T *)
lemma hpt_reclist:
assumes "\<Gamma> \<turnstile>\<^sub>t t0 ;; T"
and "\<Gamma> \<turnstile>\<^sub>t f ;; (exp_qbs X (exp_qbs (list X) (exp_qbs T T)))"
shows "\<Gamma> \<turnstile>\<^sub>t hp_rec_list t0 f ;; (exp_qbs (list X) T)"
unfolding hpprog_typing_def hp_rec_list_def
by(auto intro!: qbs_morphism_comp[OF qbs_morphism_tuple[OF assms(1)[simplified hpprog_typing_def] assms(2)[simplified hpprog_typing_def]],of "case_prod rec_list",simplified comp_def split_beta fst_conv snd_conv] uncurry_preserves_morphisms rec_list_morphism[simplified])
lemma hpt_reclist':
assumes "\<Gamma> \<turnstile>\<^sub>t t0 ;; T"
and "\<Gamma> ,, X ,, list X ,, T \<turnstile>\<^sub>t e ;; T"
shows "\<Gamma> \<turnstile>\<^sub>t hp_rec_list t0 (hp_lambda (hp_lambda (hp_lambda e))) ;; (exp_qbs (list X) T)"
by(rule hpt_reclist[OF assms(1) hpt_abs[OF hpt_abs[OF hpt_abs[OF assms(2)]]]])
subsubsection \<open> Ennriched Expressions \<close>
definition "hp_integrable \<equiv> hp_lift2 qbs_integrable"
definition "hp_expect \<equiv> hp_lift2 qbs_prob_integral"
definition "hp_ennexpect \<equiv> hp_lift2 qbs_prob_ennintegral"
definition "hp_var \<equiv> hp_lift2 qbs_prob_var"
term hp_powerr
lemma hp_var_def2:
"hp_var t e = hp_expect t ( (e -\<^sub>t hp_constf (hp_expect t e)) *\<^sub>t (e -\<^sub>t hp_constf (hp_expect t e)))"
by(simp add: hp_var_def hp_expect_def hp_constf_def hp_funtimes'_def qbs_prob_var_def power2_eq_square hp_const_def hp_times'_def hp_minus'_def hp_app_def hp_times''_def hp_minus''_def qbs_eval_def comp_def)
(* \<Gamma> |- m : P T \<Gamma> |- f : T \<Rightarrow> ennreal
---------------------------------------
\<Gamma> |- E_x~m [f x] : ennreal *)
lemma hpt_ennexpect:
assumes "\<Gamma> \<turnstile>\<^sub>t m ;; monadP_qbs T"
and "\<Gamma> \<turnstile>\<^sub>t f ;; exp_qbs T \<real>\<^sub>Q\<^sub>\<ge>\<^sub>0"
shows "\<Gamma> \<turnstile>\<^sub>t hp_ennexpect m f ;; \<real>\<^sub>Q\<^sub>\<ge>\<^sub>0"
using qbs_prob_ennintegral_morphism[OF assms(1)[simplified hpprog_typing_def] assms(2)[simplified hpprog_typing_def]]
by(simp add: hpprog_typing_def hp_ennexpect_def)
(* \<Gamma> |- m : P T \<Gamma> |- f : T \<Rightarrow> \<real> f : integrable w.r.t. m
-----------------------------------------------------------
\<Gamma> |- E_x~m [f x] : \<real> *)
lemma hpt_expect:
assumes "\<Gamma> \<turnstile>\<^sub>t m ;; monadP_qbs T"
"\<Gamma> \<turnstile>\<^sub>t f ;; exp_qbs T \<real>\<^sub>Q"
and "\<And>x. x \<in> qbs_space \<Gamma> \<Longrightarrow> hp_integrable m f x"
shows "\<Gamma> \<turnstile>\<^sub>t hp_expect m f ;; \<real>\<^sub>Q"
using qbs_prob_integral_morphism[OF assms(1)[simplified hpprog_typing_def] assms(2)[simplified hpprog_typing_def]] assms(3)
by(simp add: hpprog_typing_def hp_expect_def hp_integrable_def)
subsubsection \<open> Product Measure \<close>
definition hp_pair_measure (infixr "\<Otimes>\<^sub>Q\<^sub>t" 80) where
"hp_pair_measure \<equiv> hp_lift2 qbs_prob_pair_measure"
lemma hpt_pair_measure:
assumes "\<Gamma> \<turnstile>\<^sub>t m ;; monadP_qbs M"
and "\<Gamma> \<turnstile>\<^sub>t n ;; monadP_qbs N"
shows "\<Gamma> \<turnstile>\<^sub>t hp_pair_measure m n ;; monadP_qbs (M \<Otimes>\<^sub>Q N)"
using qbs_morphism_comp[OF qbs_morphism_tuple[OF assms[simplified hpprog_typing_def]] qbs_prob_pair_measure_morphism]
by(simp add: hpprog_typing_def comp_def hp_pair_measure_def)
definition "hp_id \<equiv> hp_const id"
lemma hpt_id:
"\<Gamma> \<turnstile>\<^sub>t hp_id ;; exp_qbs X X"
unfolding hpprog_typing_def hp_id_def hp_const_def
by(rule qbs_morphism_const[of _ "exp_qbs X X",simplified,OF qbs_morphism_ident])
definition "hp_comp \<equiv> hp_lift2 comp"
lemma hpt_comp:
assumes "\<Gamma> \<turnstile>\<^sub>t f ;; exp_qbs X Y"
and "\<Gamma> \<turnstile>\<^sub>t g ;; exp_qbs Y Z"
shows "\<Gamma> \<turnstile>\<^sub>t hp_comp g f ;; exp_qbs X Z"
using exp_qbs_comp_morphism[of f \<Gamma> X Y g Z] assms
by(simp add: hpprog_typing_def hp_comp_def)
text \<open> The followings are examples. \<close>
lemma "\<Gamma>,, Y \<turnstile>\<^sub>t \<lambda>\<^sub>t var1 $\<^sub>t var1 ;; Y"
apply(rule hpt_app)
apply(rule hpt_abs)
apply(rule hpt_var1)
apply(rule hpt_var1)
done
lemma "\<Gamma> \<turnstile>\<^sub>t \<lambda>\<^sub>t (\<lambda>\<^sub>t (hp_normal var2 var1)) ;; \<real>\<^sub>Q \<Rightarrow>\<^sub>Q \<real>\<^sub>Q \<Rightarrow>\<^sub>Q P\<^sub>t \<real>\<^sub>Q"
apply(rule hpt_abs)
apply(rule hpt_abs)
apply(rule hpt_normal)
apply(rule hpt_var2)
apply(rule hpt_var1)
done
end |
-- Copyright (c) 2017 Scott Morrison. All rights reserved.
-- Released under Apache 2.0 license as described in the file LICENSE.
-- Authors: Patrick Massot, Scott Morrison, Mario Carneiro
import category_theory.concrete_category
import category_theory.full_subcategory
import category_theory.functor_category
import category_theory.adjunction
import category_theory.limits.types
import category_theory.natural_isomorphism
import category_theory.eq_to_hom
import topology.basic
import topology.opens
import order.galois_connection
open category_theory
open category_theory.nat_iso
open topological_space
universe u
namespace category_theory.instances
/-- The category of topological spaces and continuous maps. -/
@[reducible] def Top : Type (u+1) := bundled topological_space
instance (x : Top) : topological_space x := x.str
namespace Top
instance : concrete_category @continuous := ⟨@continuous_id, @continuous.comp⟩
-- local attribute [class] continuous
-- instance {R S : Top} (f : R ⟶ S) : continuous (f : R → S) := f.2
section
open category_theory.limits
variables {J : Type u} [small_category J]
def limit (F : J ⥤ Top.{u}) : cone F :=
{ X := ⟨limit (F ⋙ forget), ⨆ j, (F.obj j).str.induced (limit.π (F ⋙ forget) j)⟩,
π :=
{ app := λ j, ⟨limit.π (F ⋙ forget) j, continuous_iff_induced_le.mpr (lattice.le_supr _ j)⟩,
naturality' := λ j j' f, subtype.eq ((limit.cone (F ⋙ forget)).π.naturality f) } }
def limit_is_limit (F : J ⥤ Top.{u}) : is_limit (limit F) :=
by refine is_limit.of_faithful forget (limit.is_limit _) (λ s, ⟨_, _⟩) (λ s, rfl);
exact continuous_iff_le_coinduced.mpr (lattice.supr_le $ λ j,
induced_le_iff_le_coinduced.mpr $ continuous_iff_le_coinduced.mp (s.π.app j).property)
instance : has_limits.{u} Top.{u} :=
λ J 𝒥 F, by exactI { cone := limit F, is_limit := limit_is_limit F }
instance : preserves_limits (forget : Top.{u} ⥤ Type u) :=
λ J 𝒥 F, by exactI preserves_limit_of_preserves_limit_cone
(limit.is_limit F) (limit.is_limit (F ⋙ forget))
def colimit (F : J ⥤ Top.{u}) : cocone F :=
{ X := ⟨colimit (F ⋙ forget), ⨅ j, (F.obj j).str.coinduced (colimit.ι (F ⋙ forget) j)⟩,
ι :=
{ app := λ j, ⟨colimit.ι (F ⋙ forget) j, continuous_iff_le_coinduced.mpr (lattice.infi_le _ j)⟩,
naturality' := λ j j' f, subtype.eq ((colimit.cocone (F ⋙ forget)).ι.naturality f) } }
def colimit_is_colimit (F : J ⥤ Top.{u}) : is_colimit (colimit F) :=
by refine is_colimit.of_faithful forget (colimit.is_colimit _) (λ s, ⟨_, _⟩) (λ s, rfl);
exact continuous_iff_induced_le.mpr (lattice.le_infi $ λ j,
induced_le_iff_le_coinduced.mpr $ continuous_iff_le_coinduced.mp (s.ι.app j).property)
instance : has_colimits.{u} Top.{u} :=
λ J 𝒥 F, by exactI { cocone := colimit F, is_colimit := colimit_is_colimit F }
instance : preserves_colimits (forget : Top.{u} ⥤ Type u) :=
λ J 𝒥 F, by exactI preserves_colimit_of_preserves_colimit_cocone
(colimit.is_colimit F) (colimit.is_colimit (F ⋙ forget))
end
def discrete : Type u ⥤ Top.{u} :=
{ obj := λ X, ⟨X, ⊤⟩,
map := λ X Y f, ⟨f, continuous_top⟩ }
def trivial : Type u ⥤ Top.{u} :=
{ obj := λ X, ⟨X, ⊥⟩,
map := λ X Y f, ⟨f, continuous_bot⟩ }
def adj₁ : adjunction discrete forget :=
{ hom_equiv := λ X Y,
{ to_fun := λ f, f,
inv_fun := λ f, ⟨f, continuous_top⟩,
left_inv := by tidy,
right_inv := by tidy },
unit := { app := λ X, id },
counit := { app := λ X, ⟨id, continuous_top⟩ } }
def adj₂ : adjunction forget trivial :=
{ hom_equiv := λ X Y,
{ to_fun := λ f, ⟨f, continuous_bot⟩,
inv_fun := λ f, f,
left_inv := by tidy,
right_inv := by tidy },
unit := { app := λ X, ⟨id, continuous_bot⟩ },
counit := { app := λ X, id } }
end Top
variables {X : Top.{u}}
instance : small_category (opens X) := by apply_instance
def nbhd (x : X.α) := { U : opens X // x ∈ U }
def nbhds (x : X.α) : small_category (nbhd x) := begin unfold nbhd, apply_instance end
end category_theory.instances
open category_theory.instances
namespace topological_space.opens
/-- `opens.map f` gives the functor from open sets in Y to open set in X,
given by taking preimages under f. -/
def map
{X Y : Top.{u}} (f : X ⟶ Y) : opens Y ⥤ opens X :=
{ obj := λ U, ⟨ f.val ⁻¹' U, f.property _ U.property ⟩,
map := λ U V i, ⟨ ⟨ λ a b, i.down.down b ⟩ ⟩ }.
@[simp] lemma map_id_obj (X : Top.{u}) (U : opens X) : (map (𝟙 X)).obj U = U := by tidy
@[simp] def map_id (X : Top.{u}) : map (𝟙 X) ≅ functor.id (opens X) :=
{ hom := { app := λ U, 𝟙 U },
inv := { app := λ U, 𝟙 U } }
-- We could make f g implicit here, but it's nice to be able to see when
-- they are the identity (often!)
def map_iso {X Y : Top.{u}} (f g : X ⟶ Y) (h : f = g) : map f ≅ map g :=
nat_iso.of_components (λ U, eq_to_iso (congr_fun (congr_arg _ (congr_arg _ h)) _) ) (by obviously)
@[simp] def map_iso_id {X : Top.{u}} (h) : map_iso (𝟙 X) (𝟙 X) h = iso.refl (map _) := rfl
end topological_space.opens
|
#ifndef __INS_C_H__
#define __INS_C_H__
/********************************* TRICK HEADER *******************************
PURPOSE:
(Describe the INS Module On Board, Error equations based on Zipfel, Figure 10.27, space stabilized INS with GPS updates)
LIBRARY DEPENDENCY:
((../src/gnc_var.c)
(../src/Ins_c.c)
(../../cad/src/global_constants.c)
(../src/dm_delta_ut.c))
*******************************************************************************/
#include <gsl/gsl_matrix.h>
#include <gsl/gsl_blas.h>
#include <gsl/gsl_vector.h>
#include <math.h>
#include <stdio.h>
#include "cad_utility_c.h"
#include "math_utility_c.h"
#include "time_utility_c.h"
extern const double __DM_sec2r;
extern const double __DM_arcsec2r;
extern const double __WEII3;
extern const double __SMAJOR_AXIS;
extern const double __GM;
extern const double __WEII3;
extern const double ___PI;
extern const double __EPS;
extern const double __DEG;
extern const double __RAD;
/** INS Variables **/
/* Matrix */
extern gsl_matrix *WEII;
extern gsl_matrix *TBIC;
extern gsl_matrix *TDCI;
extern gsl_matrix *TEIC;
extern gsl_matrix *TBDC;
extern gsl_matrix *TBICI;
extern gsl_matrix *TLI;
/* Vector */
extern gsl_vector *EVBI;
extern gsl_vector *EVBID;
extern gsl_vector *ESBI;
extern gsl_vector *ESBID;
extern gsl_vector *RICI;
extern gsl_vector *RICID;
extern gsl_vector *TBIC_Q;
extern gsl_vector *TBIDC_Q;
extern gsl_vector *SBIIC;
extern gsl_vector *VBIIC;
extern gsl_vector *SBEEC;
extern gsl_vector *VBEEC;
extern gsl_vector *WBICI;
extern gsl_vector *EGRAVI;
extern gsl_vector *VBECD;
extern gsl_vector *INS_I_ATT_ERR;
extern gsl_vector *TESTV;
extern gsl_vector *TMP_old;
extern gsl_vector *VBIIC_old;
extern gsl_vector *POS_ERR;
extern gsl_vector *GRAVGI;
extern gsl_vector *TBDCQ;
extern gsl_vector *VBIIC_old_old;
extern gsl_vector *PHI_C;
extern gsl_vector *DELTA_VEL_C;
extern gsl_vector *PHI_LOW_C;
extern gsl_vector *PHI_HIGH_C;
/* Double */
extern double dbic;
extern double dvbec;
extern double alphacx;
extern double betacx;
extern double thtvdcx;
extern double psivdcx;
extern double alppcx;
extern double phipcx;
extern double loncx;
extern double latcx;
extern double altc;
extern double phibdcx;
extern double thtbdcx;
extern double psibdcx;
extern double ins_pos_err;
extern double ins_vel_err;
extern double ins_tilt_err;
extern double ins_pose_err;
extern double ins_vele_err;
extern double ins_phi_err;
extern double ins_tht_err;
extern double ins_psi_err;
/* Unsigned int */
extern unsigned int gpsupdate;
extern unsigned int liftoff;
extern unsigned int ideal;
extern GPS_TIME gpstime;
extern UTC_TIME utctime;
/*******************/
#ifdef __cplusplus
extern "C" {
#endif
int load_location(double lonx, double latx, double alt);
int load_angle(double yaw, double roll, double pitch, GPS_TIME gps_time);
int load_geodetic_velocity(double alpha0x, double beta0x, double dvbe);
int calculate_INS_derived_TEI(GPS_TIME gps, gsl_matrix *TEIC);
int AccelHarmonic(const gsl_vector *SBII, double CS[21][21], int n_max, int m_max, const gsl_matrix *TEIC, gsl_vector *acc_out);
int DCM_2_Euler_angle(const gsl_matrix *TBD, double *phibdc, double *thtbdc, double *psibdc);
int calculate_INS_derived_phip(gsl_vector *VBECB, double *phipc);
int calculate_INS_derived_thtvd(gsl_vector *VBECD, double *thtvd);
int calculate_INS_derived_psivd(gsl_vector *VBECD, double *psivd);
int calculate_INS_derived_alpp(gsl_vector *VBECB, double *alpp);
int calculate_INS_derived_beta(gsl_vector *VBECB, double *beta);
int calculate_INS_derived_alpha(gsl_vector *VBECB, double *alpha);
int build_VBEB(double _alpha0x, double _beta0x, double _dvbe, gsl_vector *VBEB);
int INS_update(const double int_step, double *dvbec, unsigned int liftoff, double *alphacx, double *betacx
, double *alppcx, double *phipcx, double *loncx, double *latcx, double *altc, double *psivdcx, double *thtvdcx
, double *phibdc, double *thtbdc, double *psibdc
, gsl_vector *PHI, gsl_vector *DELTA_VEL
, gsl_vector *PHI_HIGH, gsl_vector *PHI_LOW, GPS_TIME gps, gsl_matrix *TEIC
, gsl_vector *SBIIC, gsl_vector *VBIIC, gsl_vector *VBIIC_old, gsl_vector *GRAVGI, gsl_matrix *TBIC, gsl_vector *SBEEC
, gsl_vector *VBEEC, gsl_matrix *WEII, gsl_matrix *TLI, gsl_matrix *TDCI
, gsl_matrix *TBICI, gsl_matrix *TBDC, gsl_vector *TBDCQ);
int INS_init(GPS_TIME gps_time);
int INS_alloc();
#ifdef __cplusplus
}
#endif
#endif
|
export rule
@rule AR(:x, Marginalisation) (m_y::NormalDistributionsFamily, q_θ::NormalDistributionsFamily, q_γ::GammaShapeRate, meta::ARMeta) = begin
mθ, Vθ = mean(q_θ), cov(q_θ)
my, Vy = mean(m_y), cov(m_y)
mγ = mean(q_γ)
mA = as_companion_matrix(mθ)
mV = ar_transition(getvform(meta), getorder(meta), mγ)
D = mA'*inv(Vy + mV)*mA + mγ*Vθ
Vx = inv(D)
mx = inv(D)*mA'*inv(Vy + mV)*my
return convert(promote_variate_type(getvform(meta), NormalMeanVariance), mx, Vx)
end
|
module RationalUtils where
open import Algebra
import Algebra.Morphism.RingMonomorphism as RingMonomorphisms
open import Data.Rational
open import Data.Rational.Properties
open import Data.Nat using (z≤n; s≤s)
open import Data.Integer using (+≤+; +<+; +_; -[1+_])
open import Data.Product using (_×_; _,_)
open import Data.Sum
open import Function.Base using (_∘_; _∘′_)
open import Level using (0ℓ)
open import Relation.Binary
open import Relation.Binary.PropositionalEquality
open import Relation.Nullary using (yes; no)
open import Relation.Nullary.Negation using (contradiction)
open import Vehicle.Data.Tensor
open ≤-Reasoning
private
variable
p q : ℚ
private
module +-*-Monomorphism = RingMonomorphisms toℚᵘ-isRingMonomorphism-+-*
------------------------------------------------------------------------
-- Things to work out how to put in standard library
2ℚ = + 2 / 1
3ℚ = + 3 / 1
≰⇒≥ : _≰_ ⇒ _≥_
≰⇒≥ = <⇒≤ ∘′ ≰⇒>
p≤0⇒∣p∣≡-p : p ≤ 0ℚ → ∣ p ∣ ≡ - p
p≤0⇒∣p∣≡-p {mkℚ +[1+ n ] _ _} p≤0 = contradiction (nonPositive p≤0) λ()
p≤0⇒∣p∣≡-p {mkℚ +0 _ _} _ = refl
p≤0⇒∣p∣≡-p {mkℚ -[1+ n ] _ _} _ = refl
neg-involutive : Involutive _≡_ (-_)
neg-involutive (mkℚ +[1+ n ] _ _) = refl
neg-involutive (mkℚ +0 _ _) = refl
neg-involutive (mkℚ -[1+ n ] _ _) = refl
p≤∣p∣ : ∀ p → p ≤ ∣ p ∣
p≤∣p∣ p with 0ℚ ≤? p
... | yes 0≤p = ≤-reflexive (sym (0≤p⇒∣p∣≡p 0≤p))
... | no 0≰p = ≤-trans (≰⇒≥ 0≰p) (0≤∣p∣ p)
-p≤∣p∣ : ∀ p → - p ≤ ∣ p ∣
-p≤∣p∣ p with 0ℚ ≤? p
... | yes 0≤p = ≤-trans (neg-antimono-≤ 0≤p) (0≤∣p∣ p)
... | no 0≰p = ≤-reflexive (sym (p≤0⇒∣p∣≡-p (≰⇒≥ 0≰p)))
-p≤q⇒-q≤p : - p ≤ q → - q ≤ p
-p≤q⇒-q≤p {p} {q} = subst (- q ≤_) (neg-involutive p) ∘ neg-antimono-≤
p≤-q⇒q≤-p : p ≤ - q → q ≤ - p
p≤-q⇒q≤-p {p} {q} = subst (_≤ - p) (neg-involutive q) ∘ neg-antimono-≤
-p<q⇒-q<p : - p < q → - q < p
-p<q⇒-q<p {p} {q} = subst (- q <_) (neg-involutive p) ∘ neg-antimono-<
p<-q⇒q<-p : p < - q → q < - p
p<-q⇒q<-p {p} {q} = subst (_< - p) (neg-involutive q) ∘ neg-antimono-<
postulate p-[p+q]≡q : ∀ p q → p - (p + q) ≡ q
postulate p+q-p≡q : ∀ p q → p + q - p ≡ q
postulate p-q+q≡p : ∀ p q → p - q + q ≡ p
postulate *-monoʳ-≤ : ∀ r → Positive r → (r *_) Preserves _≤_ ⟶ _≤_
postulate p<r-q⇒p+q<r : ∀ p q r → p < r - q → p + q < r
∣p∣≤q⇒-q≤p≤q : ∀ p → ∣ p ∣ ≤ q → - q ≤ p × p ≤ q
∣p∣≤q⇒-q≤p≤q p ∣p∣≤q =
-p≤q⇒-q≤p (≤-trans (-p≤∣p∣ p) ∣p∣≤q)
, ≤-trans (p≤∣p∣ p) ∣p∣≤q
-p≤q≤p⇒∣q∣≤p : - p ≤ q → q ≤ p → ∣ q ∣ ≤ p
-p≤q≤p⇒∣q∣≤p {p} {q} -q≤p q≤p with ∣p∣≡p∨∣p∣≡-p q
... | inj₁ ∣q∣≡q = subst (_≤ p) (sym ∣q∣≡q) q≤p
... | inj₂ ∣q∣≡-q = subst (_≤ p) (sym ∣q∣≡-q) (-p≤q⇒-q≤p -q≤p)
∣p∣<q⇒-q<p<q : ∀ p → ∣ p ∣ < q → - q < p × p < q
∣p∣<q⇒-q<p<q p ∣p∣<q =
-p<q⇒-q<p (≤-<-trans (-p≤∣p∣ p) ∣p∣<q)
, ≤-<-trans (p≤∣p∣ p) ∣p∣<q
-p<q<p⇒∣q∣<p : - p < q → q < p → ∣ q ∣ < p
-p<q<p⇒∣q∣<p {p} {q} -p<q q<p with ∣p∣≡p∨∣p∣≡-p q
... | inj₁ ∣q∣≡q = subst (_< p) (sym ∣q∣≡q) q<p
... | inj₂ ∣q∣≡-q = subst (_< p) (sym ∣q∣≡-q) (-p<q⇒-q<p -p<q)
p+q-q≡p : ∀ p q → p + q - q ≡ p
p+q-q≡p p q = begin-equality
p + q - q ≡⟨ +-assoc p q (- q) ⟩
p + (q - q) ≡⟨ cong (λ v → p + v) (+-inverseʳ q) ⟩
p + 0ℚ ≡⟨ +-identityʳ p ⟩
p ∎
+-isCommutativeSemigroup : IsCommutativeSemigroup _≡_ _+_
+-isCommutativeSemigroup = isCommutativeSemigroup
where open IsCommutativeMonoid +-0-isCommutativeMonoid
+-commutativeSemigroup : CommutativeSemigroup 0ℓ 0ℓ
+-commutativeSemigroup = record
{ isCommutativeSemigroup = +-isCommutativeSemigroup
}
2*p≡p+p : ∀ p → 2ℚ * p ≡ p + p
2*p≡p+p p = begin-equality
2ℚ * p ≡⟨⟩
(1ℚ + 1ℚ) * p ≡⟨ *-distribʳ-+ p 1ℚ 1ℚ ⟩
1ℚ * p + 1ℚ * p ≡⟨ cong₂ _+_ (*-identityˡ p) (*-identityˡ p) ⟩
p + p ∎
|
//
// Copyright (c) 2019 Vinnie Falco ([email protected])
//
// Distributed under the Boost Software License, Version 1.0. (See accompanying
// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
//
// Official repository: https://github.com/vinniefalco/json
//
#ifndef BOOST_JSON_IMPL_ERROR_IPP
#define BOOST_JSON_IMPL_ERROR_IPP
#include <boost/json/error.hpp>
namespace boost {
namespace json {
type_error::
type_error(char const* what)
: std::invalid_argument(what)
{
}
number_required_error::
number_required_error(
char const* what)
: type_error(what)
{
}
//----------------------------------------------------------
array_index_error::
array_index_error()
: std::out_of_range(
"array index error")
{
}
void
array_index_error::
raise()
{
BOOST_THROW_EXCEPTION(
array_index_error());
}
//---
array_required_error::
array_required_error()
: type_error(
"array required")
{
}
void
array_required_error::
raise()
{
BOOST_THROW_EXCEPTION(
array_required_error());
}
//---
array_too_large::
array_too_large()
: std::length_error(
"array too large")
{
}
void
array_too_large::
raise()
{
BOOST_THROW_EXCEPTION(
array_too_large());
}
//---
bool_required_error::
bool_required_error()
: type_error(
"bool required")
{
}
void
bool_required_error::
raise()
{
BOOST_THROW_EXCEPTION(
bool_required_error());
}
//---
char_pos_error::
char_pos_error()
: std::out_of_range(
"char index error")
{
}
void
char_pos_error::
raise()
{
BOOST_THROW_EXCEPTION(
char_pos_error());
}
//---
double_required_error::
double_required_error()
: number_required_error(
"double required")
{
}
void
double_required_error::
raise()
{
BOOST_THROW_EXCEPTION(
double_required_error());
}
//---
int64_required_error::
int64_required_error()
: number_required_error(
"int64 required")
{
}
void
int64_required_error::
raise()
{
BOOST_THROW_EXCEPTION(
int64_required_error());
}
//---
key_not_found::
key_not_found()
: std::invalid_argument(
"key not found")
{
}
void
key_not_found::
raise()
{
BOOST_THROW_EXCEPTION(
key_not_found());
}
//---
key_too_large::
key_too_large()
: std::length_error(
"key too large")
{
}
void
key_too_large::
raise()
{
BOOST_THROW_EXCEPTION(
key_too_large());
}
//---
object_required_error::
object_required_error()
: type_error(
"object required")
{
}
void
object_required_error::
raise()
{
BOOST_THROW_EXCEPTION(
object_required_error());
}
//---
object_too_large::
object_too_large()
: std::length_error(
"object too large")
{
}
void
object_too_large::
raise()
{
BOOST_THROW_EXCEPTION(
object_too_large());
}
//---
stack_overflow::
stack_overflow()
: std::runtime_error(
"stack overflow")
{
}
void
stack_overflow::
raise()
{
BOOST_THROW_EXCEPTION(
stack_overflow());
}
//---
string_required_error::
string_required_error()
: type_error(
"string required")
{
}
void
string_required_error::
raise()
{
BOOST_THROW_EXCEPTION(
string_required_error());
}
//---
string_too_large::
string_too_large()
: std::length_error(
"string too large")
{
}
void
string_too_large::
raise()
{
BOOST_THROW_EXCEPTION(
string_too_large());
}
//---
uint64_required_error::
uint64_required_error()
: number_required_error(
"uint64 required")
{
}
void
uint64_required_error::
raise()
{
BOOST_THROW_EXCEPTION(
uint64_required_error());
}
//----------------------------------------------------------
error_code
make_error_code(error e)
{
struct codes : error_category
{
const char*
name() const noexcept override
{
return "boost.json";
}
std::string
message(int ev) const override
{
switch(static_cast<error>(ev))
{
default:
case error::syntax: return "syntax error";
case error::extra_data: return "extra data";
case error::incomplete: return "incomplete";
case error::mantissa_overflow: return "mantissa overflow";
case error::exponent_overflow: return "exponent overflow";
case error::too_deep: return "too deep";
case error::illegal_char: return "illegal character for value";
case error::illegal_control_char: return "illegal control character";
case error::illegal_escape_char: return "illegal character in escape sequence";
case error::illegal_extra_digits: return "illegal extra digits in number";
case error::illegal_leading_surrogate: return "illegal leading surrogate";
case error::illegal_trailing_surrogate: return "illegal trailing surrogate";
case error::need_start: return "parser needs start";
case error::expected_comma: return "expected comma";
case error::expected_colon: return "expected colon";
case error::expected_quotes: return "expected quotes";
case error::expected_hex_digit: return "expected hex digit";
case error::expected_utf16_escape: return "expected utf16 escape";
case error::expected_mantissa: return "expected mantissa";
case error::expected_fraction: return "expected mantissa fraction";
case error::expected_exponent: return "expected exponent";
case error::expected_true: return "expected 'true'";
case error::expected_false: return "expected 'false'";
case error::expected_null: return "expected 'null'";
case error::not_object: return "not an object";
case error::not_array: return "not an array";
case error::not_string: return "not a string";
case error::not_number: return "not a number";
case error::not_bool: return "not a boolean";
case error::not_null: return "not a null";
case error::integer_overflow: return "integer overflowed";
case error::not_exact: return "not exact";
case error::test_failure: return "test failure";
}
}
error_condition
default_error_condition(
int ev) const noexcept override
{
switch(static_cast<error>(ev))
{
default:
return {ev, *this};
case error::syntax:
case error::extra_data:
case error::incomplete:
case error::mantissa_overflow:
case error::exponent_overflow:
case error::too_deep:
case error::illegal_char:
case error::illegal_control_char:
case error::illegal_escape_char:
case error::illegal_extra_digits:
case error::illegal_leading_surrogate:
case error::illegal_trailing_surrogate:
case error::expected_comma:
case error::expected_colon:
case error::expected_quotes:
case error::expected_hex_digit:
case error::expected_utf16_escape:
case error::expected_mantissa:
case error::expected_fraction:
case error::expected_exponent:
case error::expected_true:
case error::expected_false:
case error::expected_null:
return condition::parse_error;
case error::not_object:
case error::not_array:
case error::not_string:
case error::not_number:
case error::not_bool:
case error::not_null:
case error::integer_overflow:
case error::not_exact:
return condition::assign_error;
}
}
};
static codes const cat{};
return error_code{static_cast<
std::underlying_type<error>::type>(e), cat};
}
error_condition
make_error_condition(condition c)
{
struct codes : error_category
{
const char*
name() const noexcept override
{
return "boost.json";
}
std::string
message(int cv) const override
{
switch(static_cast<condition>(cv))
{
default:
case condition::parse_error:
return "A JSON parsing error occurred";
case condition::assign_error:
return "An error occurred during assignment";
}
}
};
static codes const cat{};
return error_condition{static_cast<
std::underlying_type<condition>::type>(c), cat};
}
} // json
} // boost
#endif
|
'''
Converts between string and numeric representations of cards.
'''
import numpy as np
class CardToStringConversion():
def __init__(self):
CC, SC = 52, 4
self.suit_table = ['c', 'd', 'h', 's']
self.rank_table = ['2', '3', '4', '5', '6', '7', '8', '9', 'T', 'J', 'Q', 'K', 'A']
# card -> rank, suit
self.card_to_suit_table = np.zeros([CC], dtype=np.int)
self.card_to_rank_table = np.zeros([CC], dtype=np.int)
for card in range(CC):
self.card_to_suit_table[card] = card % SC
self.card_to_rank_table[card] = np.floor(card / SC)
# card -> string table
self.card_to_string_table = {}
for card in range(CC):
rank_name = self.rank_table[self.card_to_rank(card)]
suit_name = self.suit_table[self.card_to_suit(card)]
self.card_to_string_table[card] = rank_name + suit_name
# string -> card table
self.string_to_card_table = {}
for card in range(CC):
self.string_to_card_table[self.card_to_string_table[card]] = card
def card_to_suit(self, card):
''' Gets the suit of a card (int) '''
return self.card_to_suit_table[card]
def card_to_rank(self, card):
''' Gets the rank of a card (int) '''
return self.card_to_rank_table[card]
def card_to_string(self, card):
''' Converts a card's numeric representation to its string representation.
@param: int :numeric representation of a card
@return str :string representation of the card
'''
assert(card >= 0 and card < 52)
return self.card_to_string_table[card]
def cards_to_string(self, cards):
''' Does self.card_to_string, just for list of cards '''
if cards.ndim == 0:
return ''
out = ''
for card in range(cards.shape[0]):
out += self.card_to_string(cards[card])
return out
def string_to_card(self, card_string):
''' Converts a card's string representation to its numeric representation
@param: str :string representation of the card
@return int :numeric representation of a card
'''
CC = 52
card = self.string_to_card_table[card_string]
assert(card >= 0 and card < CC)
return card
def string_to_board(self, card_string):
''' Converts a string representing zero or one board cards to a vector of numeric representations
@param: str :string representation of the board (ex: 'AhKsQdJhTs9c')
@return [int,...] :tensor containing the numeric representation of the board
'''
if card_string == '':
return np.zeros([], dtype=np.int)
else:
num_cards = len(card_string) // 2
board = np.zeros([num_cards], dtype=np.int)
for i in range(1, num_cards+1):
board[i-1] = self.string_to_card(card_string[ (i-1)*2:i*2 ])
return board
def street_to_name(self, street):
''' converts street/round (int) to name (str) '''
if street == 1:
return 'preflop'
elif street == 2:
return 'flop'
elif street == 3:
return 'turn'
elif street == 4:
return 'river'
card_to_string = CardToStringConversion() |
(*
* Copyright 2014, NICTA
*
* This software may be distributed and modified according to the terms of
* the BSD 2-Clause license. Note that NO WARRANTY is provided.
* See "LICENSE_BSD2.txt" for details.
*
* @TAG(NICTA_BSD)
*)
(* License: BSD, terms see file ./LICENSE *)
theory Addr_Type
imports "~~/src/HOL/Word/Word"
begin
type_synonym addr_bitsize = "32"
type_synonym addr = "addr_bitsize word"
definition addr_bitsize :: nat where "addr_bitsize \<equiv> 32"
definition addr_align :: nat where "addr_align \<equiv> 2"
declare addr_align_def[simp]
definition addr_card :: nat where
"addr_card \<equiv> card (UNIV::addr set)"
declare addr_bitsize_def[simp]
lemma addr_card:
"addr_card = 2^addr_bitsize"
by (simp add: addr_card_def card_word)
lemma len_of_addr_card:
"2 ^ len_of TYPE(addr_bitsize) = addr_card"
by (simp add: addr_card)
lemma of_nat_addr_card [simp]:
"of_nat addr_card = (0::addr)"
by (simp add: addr_card)
end
|
State Before: R : Type u
S₁ : Type v
S₂ : Type w
S₃ : Type x
σ : Type u_1
a a' a₁ a₂ : R
e : ℕ
n m : σ
s : σ →₀ ℕ
inst✝¹ : CommSemiring R
inst✝ : CommSemiring S₁
p q : MvPolynomial σ R
f✝ f : R →+* S₁
g : S₁ → R
hg : g 0 = 0
φ : MvPolynomial σ S₁
⊢ ↑(map f) (mapRange g hg φ) = φ ↔ ∀ (d : σ →₀ ℕ), ↑f (g (coeff d φ)) = coeff d φ State After: R : Type u
S₁ : Type v
S₂ : Type w
S₃ : Type x
σ : Type u_1
a a' a₁ a₂ : R
e : ℕ
n m : σ
s : σ →₀ ℕ
inst✝¹ : CommSemiring R
inst✝ : CommSemiring S₁
p q : MvPolynomial σ R
f✝ f : R →+* S₁
g : S₁ → R
hg : g 0 = 0
φ : MvPolynomial σ S₁
⊢ (∀ (m : σ →₀ ℕ), coeff m (↑(map f) (mapRange g hg φ)) = coeff m φ) ↔ ∀ (d : σ →₀ ℕ), ↑f (g (coeff d φ)) = coeff d φ Tactic: rw [MvPolynomial.ext_iff] State Before: R : Type u
S₁ : Type v
S₂ : Type w
S₃ : Type x
σ : Type u_1
a a' a₁ a₂ : R
e : ℕ
n m : σ
s : σ →₀ ℕ
inst✝¹ : CommSemiring R
inst✝ : CommSemiring S₁
p q : MvPolynomial σ R
f✝ f : R →+* S₁
g : S₁ → R
hg : g 0 = 0
φ : MvPolynomial σ S₁
⊢ (∀ (m : σ →₀ ℕ), coeff m (↑(map f) (mapRange g hg φ)) = coeff m φ) ↔ ∀ (d : σ →₀ ℕ), ↑f (g (coeff d φ)) = coeff d φ State After: case h
R : Type u
S₁ : Type v
S₂ : Type w
S₃ : Type x
σ : Type u_1
a a' a₁ a₂ : R
e : ℕ
n m : σ
s : σ →₀ ℕ
inst✝¹ : CommSemiring R
inst✝ : CommSemiring S₁
p q : MvPolynomial σ R
f✝ f : R →+* S₁
g : S₁ → R
hg : g 0 = 0
φ : MvPolynomial σ S₁
⊢ ∀ (a : σ →₀ ℕ), coeff a (↑(map f) (mapRange g hg φ)) = coeff a φ ↔ ↑f (g (coeff a φ)) = coeff a φ Tactic: apply forall_congr' State Before: case h
R : Type u
S₁ : Type v
S₂ : Type w
S₃ : Type x
σ : Type u_1
a a' a₁ a₂ : R
e : ℕ
n m : σ
s : σ →₀ ℕ
inst✝¹ : CommSemiring R
inst✝ : CommSemiring S₁
p q : MvPolynomial σ R
f✝ f : R →+* S₁
g : S₁ → R
hg : g 0 = 0
φ : MvPolynomial σ S₁
⊢ ∀ (a : σ →₀ ℕ), coeff a (↑(map f) (mapRange g hg φ)) = coeff a φ ↔ ↑f (g (coeff a φ)) = coeff a φ State After: case h
R : Type u
S₁ : Type v
S₂ : Type w
S₃ : Type x
σ : Type u_1
a a' a₁ a₂ : R
e : ℕ
n m✝ : σ
s : σ →₀ ℕ
inst✝¹ : CommSemiring R
inst✝ : CommSemiring S₁
p q : MvPolynomial σ R
f✝ f : R →+* S₁
g : S₁ → R
hg : g 0 = 0
φ : MvPolynomial σ S₁
m : σ →₀ ℕ
⊢ coeff m (↑(map f) (mapRange g hg φ)) = coeff m φ ↔ ↑f (g (coeff m φ)) = coeff m φ Tactic: intro m State Before: case h
R : Type u
S₁ : Type v
S₂ : Type w
S₃ : Type x
σ : Type u_1
a a' a₁ a₂ : R
e : ℕ
n m✝ : σ
s : σ →₀ ℕ
inst✝¹ : CommSemiring R
inst✝ : CommSemiring S₁
p q : MvPolynomial σ R
f✝ f : R →+* S₁
g : S₁ → R
hg : g 0 = 0
φ : MvPolynomial σ S₁
m : σ →₀ ℕ
⊢ coeff m (↑(map f) (mapRange g hg φ)) = coeff m φ ↔ ↑f (g (coeff m φ)) = coeff m φ State After: case h
R : Type u
S₁ : Type v
S₂ : Type w
S₃ : Type x
σ : Type u_1
a a' a₁ a₂ : R
e : ℕ
n m✝ : σ
s : σ →₀ ℕ
inst✝¹ : CommSemiring R
inst✝ : CommSemiring S₁
p q : MvPolynomial σ R
f✝ f : R →+* S₁
g : S₁ → R
hg : g 0 = 0
φ : MvPolynomial σ S₁
m : σ →₀ ℕ
⊢ ↑f (coeff m (mapRange g hg φ)) = coeff m φ ↔ ↑f (g (coeff m φ)) = coeff m φ Tactic: rw [coeff_map] State Before: case h
R : Type u
S₁ : Type v
S₂ : Type w
S₃ : Type x
σ : Type u_1
a a' a₁ a₂ : R
e : ℕ
n m✝ : σ
s : σ →₀ ℕ
inst✝¹ : CommSemiring R
inst✝ : CommSemiring S₁
p q : MvPolynomial σ R
f✝ f : R →+* S₁
g : S₁ → R
hg : g 0 = 0
φ : MvPolynomial σ S₁
m : σ →₀ ℕ
⊢ ↑f (coeff m (mapRange g hg φ)) = coeff m φ ↔ ↑f (g (coeff m φ)) = coeff m φ State After: case h.a
R : Type u
S₁ : Type v
S₂ : Type w
S₃ : Type x
σ : Type u_1
a a' a₁ a₂ : R
e : ℕ
n m✝ : σ
s : σ →₀ ℕ
inst✝¹ : CommSemiring R
inst✝ : CommSemiring S₁
p q : MvPolynomial σ R
f✝ f : R →+* S₁
g : S₁ → R
hg : g 0 = 0
φ : MvPolynomial σ S₁
m : σ →₀ ℕ
⊢ ↑f (coeff m (mapRange g hg φ)) = ↑f (g (coeff m φ)) Tactic: apply eq_iff_eq_cancel_right.mpr State Before: case h.a
R : Type u
S₁ : Type v
S₂ : Type w
S₃ : Type x
σ : Type u_1
a a' a₁ a₂ : R
e : ℕ
n m✝ : σ
s : σ →₀ ℕ
inst✝¹ : CommSemiring R
inst✝ : CommSemiring S₁
p q : MvPolynomial σ R
f✝ f : R →+* S₁
g : S₁ → R
hg : g 0 = 0
φ : MvPolynomial σ S₁
m : σ →₀ ℕ
⊢ ↑f (coeff m (mapRange g hg φ)) = ↑f (g (coeff m φ)) State After: no goals Tactic: rfl |
module Data.Time.Calendar.Julian
import Data.Time.Calendar.Days
import public Data.Time.Calendar.Types
%default total
-- ---------------------------------------------------------------------------
||| Is this year a leap year according to the proleptic Julian calendar?
isJulianLeapYear : Year -> Bool
isJulianLeapYear y = (y `mod` 4 == 0) && ((y `mod` 400 == 0) || (y `mod` 100 /= 0))
||| Convert to proleptic Julian year and day format.
toJulianYearAndDay : Day -> (Year, DayOfYear)
toJulianYearAndDay (ModifiedJulianDay mjd) =
let a = mjd + 678577
quad = a `div` 1461
d = a `mod` 1461
y = min (d `div` 365) 3
in (quad * 4 + y + 1, fromInteger $ d - (y * 365) + 1)
||| Convert from proleptic Julian year and day format.
||| Invalid day numbers will be clipped to the correct range (1 to 365 or 366).
fromJulianYearAndDay : Year -> DayOfYear -> Day
fromJulianYearAndDay year day =
let y = year - 1
yd = if isJulianLeapYear year then 366 else 365
mjd = cast (if day < 1 then 1
else if day > yd then yd
else day) + (365 * y) + (y `div` 4) - 678578
in ModifiedJulianDay mjd
||| Convert from proleptic Julian year and day format.
||| Invalid day numbers will return Nothing
fromJulianYearAndDayValid : Year -> DayOfYear -> Maybe Day
fromJulianYearAndDayValid year day =
let y = year - 1
yd = if isJulianLeapYear year then 366 else 365
in if day < 1 then Nothing
else if day > yd then Nothing
else Just $ ModifiedJulianDay $ cast day + (365 * y) + (y `div` 4) - 678578
{- -- FIXME:
||| Show in proleptic Julian year and day format (yyyy-ddd)
showJulianYearAndDay : Day -> String
showJulianYearAndDay date =
let (y, d) = toJulianYearAndDay
in (show4 y) ++ "-" ++ (show3 d)
-}
-- --------------------------------------------------------------------------
-- vim: tw=80 sw=2 expandtab : |
#ifndef EXAMPLES_RB_COLLISIONINTERFACE_HPP_
#define EXAMPLES_RB_COLLISIONINTERFACE_HPP_
#include <vector>
#include <map>
#include <Eigen/Dense>
#include "dart/dart.hpp"
class RigidBody;
struct RigidContact {
EIGEN_MAKE_ALIGNED_OPERATOR_NEW
Eigen::Vector3d point;
Eigen::Vector3d normal;
RigidBody* rb1;
RigidBody* rb2;
Eigen::Vector3d pinataVelocity;
};
class CollisionInterface {
public:
EIGEN_MAKE_ALIGNED_OPERATOR_NEW
CollisionInterface();
virtual ~CollisionInterface();
void addSkeleton(dart::dynamics::SkeletonPtr _skel);
void addRigidBody(RigidBody *_rb, const std::string& name);
// Run the collision detector
void checkCollision();
int getNumContacts() {
return mContacts.size();
}
// Retrieve the information from the collision detector:
// For example, get the position and the normal of the fifth contact point
// Vector3d v = mWorld->getCollisionDetector()->getContact(5).point;
// Vector3d n = mWorld->getCollisionDetector()->getContact(5).normal;
RigidContact& getContact(int _index) {
return mContacts[_index];
}
private:
void updateBodyNodes();
void postProcess();
std::shared_ptr<dart::collision::CollisionDetector> mCollisionChecker;
//std::shared_ptr<dart::collision::CollisionDetector>* mCollisionChecker;
//dart::collision::CollisionDetector* mCollisionChecker;
std::vector<RigidContact> mContacts;
std::vector<dart::dynamics::SkeletonPtr> mSkeletons;
std::map<dart::dynamics::BodyNode*, RigidBody*> mNodeMap;
};
#endif // EXAMPLES_RB_COLLISIONINTERFACE_HPP_
|
# Copyright (c) 2018-2021, Carnegie Mellon University
# See LICENSE for details
fId.canTensorSplit := (self, div) >> self.domain() mod div = 0;
fId.tensorSplit := (self, div) >> [fId(div), fId(self.size/div)];
fBase.canTensorSplit := (self, div) >> self.range() mod div = 0;
fBase.tensorSplit := (self, div) >> [ fBase(div, idiv(self.params[2], self.range()/div)),
fBase(self.range()/div, imod(self.params[2], self.range()/div)) ];
CanTensorSplit := (what, div) -> When(not IsRec(what) or not IsBound(what.canTensorSplit), false,
what.canTensorSplit(div));
TensorSplit := (what, div) -> what.tensorSplit(div);
try_split := function(i, flist, fdim, N)
local ffrun, next, len, split;
len := Length(flist);
ffrun := fdim(flist[i]);
while i < len and ffrun < N do
next := flist[i+1];
if ffrun * fdim(next) > N then
if N mod ffrun <> 0 or not CanTensorSplit(next, N/ffrun) then
return false;
#Error("can't merge incompatible tensor chains (could not split)",
# "N=", N, " ffrun=", ffrun, " next=", next);
else
split := TensorSplit(next, N/ffrun);
flist := Concat(flist{[1..i]}, split, flist{[i+2..len]});
ffrun := ffrun * N/ffrun; # = N
fi;
else
ffrun := ffrun * fdim(next);
fi;
i := i+1;
od;
#Print([i, flist], "\n");
return [i, flist];
end;
full_merge_tensor_chains := function(target, ff, gg, compose, ftensor, gtensor, fid, gid, fdom, gran)
local i, j, nf, ng, res, ffrun, ggrun, ibegin, jbegin, split;
nf := Length(ff); ng := Length(gg);
res := []; i:=1; j:=1;
while i <= nf or j <= ng do
# handle domain=1 or range=1, which are always mergeable
if (i<=nf and fdom(ff[i])=1) or (j<=ng and gran(gg[j])=1) then
if i<=nf and fdom(ff[i]) = 1 then
Add(res, fid(ff[i]));
i := i + 1;
fi;
if j<=ng and gran(gg[j]) = 1 then
Add(res, gid(gg[j]));
j := j + 1;
fi;
# try to combine terms to get a match
elif (i <= nf and j <= ng) then
if AnySyms(fdom(ff[i]), gran(gg[j])) then
return false;
elif fdom(ff[i]) = gran(gg[j]) then
Add(res, compose(ff[i], gg[j]));
else
ibegin := i;
jbegin := j;
if fdom(ff[i]) < gran(gg[j]) then
if CanTensorSplit(gg[j], fdom(ff[i])) then
split := TensorSplit(gg[j], fdom(ff[i]));
gg := Concat(gg{[1..j-1]}, split, gg{[j+1..ng]});
ng := ng + 1;
else
split := try_split(i, ff, fdom, gran(gg[j]));
if split=false then return false; fi;
i := split[1];
ff := split[2]; nf := Length(ff);
fi;
else
if CanTensorSplit(ff[i], gran(gg[j])) then
split := TensorSplit(ff[i], gran(gg[j]));
if split=false then return false; fi;
ff := Concat(ff{[1..i-1]}, split, ff{[i+1..nf]});
nf := nf + 1;
else
split := try_split(j, gg, gran, fdom(ff[i]));
if split=false then return false; fi;
j := split[1];
gg := split[2]; ng := Length(gg);
fi;
fi;
Add(res, compose(ftensor(ff{[ibegin..i]}), gtensor(gg{[jbegin..j]})));
fi;
i := i+1;
j := j+1;
else
return false;
fi;
od;
target.val := res;
return res;
end;
# *******************************************************************
# make sure ff[i] is not a diag function, where merging chains doesn't make sense (does it?)
fully_compat_tensor_chains := (ff,gg,fdom,gran) ->
Length(ff) = Length(gg) and
ForAll([1..Length(ff)], i -> ff[i].range()<>false and
fdom(ff[i])=gran(gg[i]));
merge_fc_tensor_chains := (ff,gg,combine) ->
List([1..Length(ff)], i -> combine(ff[i], gg[i]));
# this assumes compat_domain_range compatibility
compat_tensor_chains := (f,g,fdom,gran) -> let(
ff := Filtered(f, c->let(d:=fdom(c), IsSymbolic(d) or d > 1)),
gg := Filtered(g, c->let(r:=gran(c), IsSymbolic(r) or r > 1)),
fully_compat_tensor_chains(ff, gg, fdom, gran));
# this assumes compat_domain_range compatibility
merge_tensor_chains := function(ff, gg, compose, fidentity, gidentity, fdom, gran)
local i, j, nf, ng, res;
nf := Length(ff); ng := Length(gg);
res := []; i:=1; j:=1;
while i <= nf or j <= ng do
if (i <= nf and j <= ng) and fdom(ff[i]) = gran(gg[j]) then
Add(res, compose(ff[i], gg[j]));
i := i+1;
j := j+1;
else
if i<=nf and fdom(ff[i]) = 1 then
Add(res, fidentity(ff[i]));
i := i + 1;
elif j<=ng and gran(gg[j]) = 1 then
Add(res, gidentity(gg[j]));
j := j + 1;
else Error("can't merge incompatible tensor chains");
fi;
fi;
od;
return res;
end;
|
\clearpage % so that table of contents mentions correct page
\phantomsection % so that hyperref makes correct reference
\addcontentsline{toc}{chapter}{Conclusion}
\chapter*{Conclusion}
\vspace{-3mm} % HACK aby se conclusion veslo na jednu stranku
The theory of Bayesian filtering is introduced in the first chapter and the \emph{optimal
Bayesian solution} of the problem of recursive estimation is derived. Continues a survey of
well-known Bayesian filtering methods --- the Kalman filtering, particle filtering and the
marginalized particle filtering is described and properties of individual algorithms are discussed.
The second chapter contains a software analysis performed with the aim to identify the best
approach to software development and programming language for a desired library for Bayesian
filtering. Object-oriented approach is chosen along with the Python programming language, which is
found optimal except its potentially significant computational overhead. Cython is evaluated for the
task to improve Python performance with great success: a simple Python algorithm was 60\x\ faster
when compiled using Cython.
The last chapters presents the PyBayes library that was developed as a part of this thesis. PyBayes
builds on the software analysis performed in the previous chapter and is therefore object-oriented
and uses Python/Cython combination as its implementation environment and implements all presented
Bayesian filtering methods. To compare performance of Python/Cython combination in a real-world
example, the Kalman filter from PyBayes is benchmarked against MATLAB and C++ implementations from
BDM~\cite{BDM} with favourable results.
\noindent{}We believe that the \textbf{key contributions} of this thesis are:
\begin{itemize}
\item The performed software analysis, that can be reused for a wide variety of software
projects. In particular, we have shown that the choice of a high-level and convenient
language such as Python is \emph{not necessarily} the enemy of speed. The analysis includes
benchmarks with quite surprising results that show that Cython and PyPy are great speed
boosters of Python.
\item The PyBayes library itself. While it is not yet feature-complete, it provides a solid base
for future development and is unique due to its dual-mode approach: it can be both treated
as ordinary Python package with all the convenience it brings or compiled using Cython for
performance gains.
\end{itemize}
\textbf{Future work} includes extending PyBayes with more filtering algorithms (non-liner Kalman
filter variants etc.) in the long term and fixing little inconveniences that currently exist in
PyBayes in the sort term; version 0.4 that would incorporate all future changes mentioned in the
third chapter is planned to be released within a few months. We are also looking forward to
incorporate emerging projects into our software analysis, for example the PyPy project looks very
promising.
|
State Before: α : Type u_1
β : Type ?u.54709
s t : Finset α
f : α → β
n : ℕ
inst✝ : Nonempty α
⊢ card s ≤ 1 ↔ ∃ x, s ⊆ {x} State After: case refine'_1
α : Type u_1
β : Type ?u.54709
s t : Finset α
f : α → β
n : ℕ
inst✝ : Nonempty α
H : card s ≤ 1
⊢ ∃ x, s ⊆ {x}
case refine'_2
α : Type u_1
β : Type ?u.54709
s t : Finset α
f : α → β
n : ℕ
inst✝ : Nonempty α
⊢ (∃ x, s ⊆ {x}) → card s ≤ 1 Tactic: refine' ⟨fun H => _, _⟩ State Before: case refine'_1
α : Type u_1
β : Type ?u.54709
s t : Finset α
f : α → β
n : ℕ
inst✝ : Nonempty α
H : card s ≤ 1
⊢ ∃ x, s ⊆ {x} State After: case refine'_1.inl
α : Type u_1
β : Type ?u.54709
t : Finset α
f : α → β
n : ℕ
inst✝ : Nonempty α
H : card ∅ ≤ 1
⊢ ∃ x, ∅ ⊆ {x}
case refine'_1.inr.intro
α : Type u_1
β : Type ?u.54709
s t : Finset α
f : α → β
n : ℕ
inst✝ : Nonempty α
H : card s ≤ 1
x : α
hx : x ∈ s
⊢ ∃ x, s ⊆ {x} Tactic: obtain rfl | ⟨x, hx⟩ := s.eq_empty_or_nonempty State Before: case refine'_1.inl
α : Type u_1
β : Type ?u.54709
t : Finset α
f : α → β
n : ℕ
inst✝ : Nonempty α
H : card ∅ ≤ 1
⊢ ∃ x, ∅ ⊆ {x} State After: no goals Tactic: exact ⟨Classical.arbitrary α, empty_subset _⟩ State Before: case refine'_1.inr.intro
α : Type u_1
β : Type ?u.54709
s t : Finset α
f : α → β
n : ℕ
inst✝ : Nonempty α
H : card s ≤ 1
x : α
hx : x ∈ s
⊢ ∃ x, s ⊆ {x} State After: no goals Tactic: exact ⟨x, fun y hy => by rw [card_le_one.1 H y hy x hx, mem_singleton]⟩ State Before: α : Type u_1
β : Type ?u.54709
s t : Finset α
f : α → β
n : ℕ
inst✝ : Nonempty α
H : card s ≤ 1
x : α
hx : x ∈ s
y : α
hy : y ∈ s
⊢ y ∈ {x} State After: no goals Tactic: rw [card_le_one.1 H y hy x hx, mem_singleton] State Before: case refine'_2
α : Type u_1
β : Type ?u.54709
s t : Finset α
f : α → β
n : ℕ
inst✝ : Nonempty α
⊢ (∃ x, s ⊆ {x}) → card s ≤ 1 State After: case refine'_2.intro
α : Type u_1
β : Type ?u.54709
s t : Finset α
f : α → β
n : ℕ
inst✝ : Nonempty α
x : α
hx : s ⊆ {x}
⊢ card s ≤ 1 Tactic: rintro ⟨x, hx⟩ State Before: case refine'_2.intro
α : Type u_1
β : Type ?u.54709
s t : Finset α
f : α → β
n : ℕ
inst✝ : Nonempty α
x : α
hx : s ⊆ {x}
⊢ card s ≤ 1 State After: case refine'_2.intro
α : Type u_1
β : Type ?u.54709
s t : Finset α
f : α → β
n : ℕ
inst✝ : Nonempty α
x : α
hx : s ⊆ {x}
⊢ card s ≤ card {x} Tactic: rw [← card_singleton x] State Before: case refine'_2.intro
α : Type u_1
β : Type ?u.54709
s t : Finset α
f : α → β
n : ℕ
inst✝ : Nonempty α
x : α
hx : s ⊆ {x}
⊢ card s ≤ card {x} State After: no goals Tactic: exact card_le_of_subset hx |
= = Death of Clement XIII = =
|
(* Author: Andreas Lochbihler, Digital Asset
Author: Ognjen Maric, Digital Asset *)
theory Inclusion_Proof_Construction imports
ADS_Construction
begin
primrec blind_blindable :: "('a\<^sub>m \<Rightarrow> 'a\<^sub>h) \<Rightarrow> ('a\<^sub>m, 'a\<^sub>h) blindable\<^sub>m \<Rightarrow> ('a\<^sub>m, 'a\<^sub>h) blindable\<^sub>m" where
"blind_blindable h (Blinded x) = Blinded x"
| "blind_blindable h (Unblinded x) = Blinded (Content (h x))"
lemma hash_blind_blindable [simp]: "hash_blindable h (blind_blindable h x) = hash_blindable h x"
by(cases x) simp_all
subsection \<open>Inclusion proof construction for rose trees\<close>
(************************************************************)
subsubsection \<open> Hashing, embedding and blinding source trees \<close>
(************************************************************)
context fixes h :: "'a \<Rightarrow> 'a\<^sub>h" begin
fun hash_source_tree :: "'a rose_tree \<Rightarrow> 'a\<^sub>h rose_tree\<^sub>h" where
"hash_source_tree (Tree (data, subtrees)) = Tree\<^sub>h (Content (h data, map hash_source_tree subtrees))"
end
context fixes e :: "'a \<Rightarrow> 'a\<^sub>m" begin
fun embed_source_tree :: "'a rose_tree \<Rightarrow> ('a\<^sub>m, 'a\<^sub>h) rose_tree\<^sub>m" where
"embed_source_tree (Tree (data, subtrees)) =
Tree\<^sub>m (Unblinded (e data, map embed_source_tree subtrees))"
end
context fixes h :: "'a \<Rightarrow> 'a\<^sub>h" begin
fun blind_source_tree :: "'a rose_tree \<Rightarrow> ('a\<^sub>m, 'a\<^sub>h) rose_tree\<^sub>m" where
"blind_source_tree (Tree (data, subtrees)) = Tree\<^sub>m (Blinded (Content (h data, map (hash_source_tree h) subtrees)))"
end
case_of_simps blind_source_tree_cases: blind_source_tree.simps
fun is_blinded :: "('a\<^sub>m, 'a\<^sub>h) rose_tree\<^sub>m \<Rightarrow> bool" where
"is_blinded (Tree\<^sub>m (Blinded _)) = True"
| "is_blinded _ = False"
lemma hash_blinded_simp: "hash_tree h' (blind_source_tree h st) = hash_source_tree h st"
by(cases st rule: blind_source_tree.cases)(simp_all add: hash_rt_F\<^sub>m_def)
lemma hash_embedded_simp:
"hash_tree h (embed_source_tree e st) = hash_source_tree (h \<circ> e) st"
by(induction st rule: embed_source_tree.induct)(simp add: hash_rt_F\<^sub>m_def)
lemma blinded_embedded_same_hash:
"hash_tree h'' (blind_source_tree (h o e) st) = hash_tree h (embed_source_tree e st)"
by(simp add: hash_blinded_simp hash_embedded_simp)
lemma blinding_blinds [simp]:
"is_blinded (blind_source_tree h t)"
by(simp add: blind_source_tree_cases split: rose_tree.split)
lemma blinded_blinds_embedded:
"blinding_of_tree h bo (blind_source_tree (h o e) st) (embed_source_tree e st)"
by(cases st rule: blind_source_tree.cases)(simp_all add: hash_embedded_simp)
fun embed_hash_tree :: "'ha rose_tree\<^sub>h \<Rightarrow> ('a, 'ha) rose_tree\<^sub>m" where
"embed_hash_tree (Tree\<^sub>h h) = Tree\<^sub>m (Blinded h)"
(************************************************************)
subsubsection \<open>Auxiliary definitions: selectors and list splits\<close>
(************************************************************)
fun children :: "'a rose_tree \<Rightarrow> 'a rose_tree list" where
"children (Tree (data, subtrees)) = subtrees"
fun children\<^sub>m :: "('a, 'a\<^sub>h) rose_tree\<^sub>m \<Rightarrow> ('a, 'a\<^sub>h) rose_tree\<^sub>m list" where
"children\<^sub>m (Tree\<^sub>m (Unblinded (data, subtrees))) = subtrees"
| "children\<^sub>m _ = undefined"
fun splits :: "'a list \<Rightarrow> ('a list \<times> 'a \<times> 'a list) list" where
"splits [] = []"
| "splits (x#xs) = ([], x, xs) # map (\<lambda>(l, y, r). (x # l, y, r)) (splits xs)"
lemma splits_iff: "(l, a, r) \<in> set (splits ll) = (ll = l @ a # r)"
by(induction ll arbitrary: l a r)(auto simp add: Cons_eq_append_conv)
(************************************************************)
subsubsection \<open> Zippers \<close>
(************************************************************)
text \<open> Zippers provide a neat representation of tree-like ADSs when they have only a single
unblinded subtree. The zipper path provides the "inclusion proof" that the unblinded subtree is
included in a larger structure. \<close>
type_synonym 'a path_elem = "'a \<times> 'a rose_tree list \<times> 'a rose_tree list"
type_synonym 'a path = "'a path_elem list"
type_synonym 'a zipper = "'a path \<times> 'a rose_tree"
definition zipper_of_tree :: "'a rose_tree \<Rightarrow> 'a zipper" where
"zipper_of_tree t \<equiv> ([], t)"
fun tree_of_zipper :: "'a zipper \<Rightarrow> 'a rose_tree" where
"tree_of_zipper ([], t) = t"
| "tree_of_zipper ((a, l, r) # z, t) = tree_of_zipper (z, (Tree (a, (l @ t # r))))"
case_of_simps tree_of_zipper_cases: tree_of_zipper.simps
lemma tree_of_zipper_id[iff]: "tree_of_zipper (zipper_of_tree t) = t"
by(simp add: zipper_of_tree_def)
fun zipper_children :: "'a zipper \<Rightarrow> 'a zipper list" where
"zipper_children (p, Tree (a, ts)) = map (\<lambda>(l, t, r). ((a, l, r) # p, t)) (splits ts)"
lemma zipper_children_same_tree:
assumes "z' \<in> set (zipper_children z)"
shows "tree_of_zipper z' = tree_of_zipper z"
proof-
obtain p a ts where z: "z = (p, Tree (a, ts))"
using assms
by(cases z rule: zipper_children.cases) (simp_all)
then obtain l t r where ltr: "z' = ((a, l, r) # p, t)" and "(l, t, r) \<in> set (splits ts)"
using assms
by(auto)
with z show ?thesis
by(simp add: splits_iff)
qed
type_synonym ('a\<^sub>m, 'a\<^sub>h) path_elem\<^sub>m = "'a\<^sub>m \<times> ('a\<^sub>m, 'a\<^sub>h) rose_tree\<^sub>m list \<times> ('a\<^sub>m, 'a\<^sub>h) rose_tree\<^sub>m list"
type_synonym ('a\<^sub>m, 'a\<^sub>h) path\<^sub>m = "('a\<^sub>m, 'a\<^sub>h) path_elem\<^sub>m list"
type_synonym ('a\<^sub>m, 'a\<^sub>h) zipper\<^sub>m = "('a\<^sub>m, 'a\<^sub>h) path\<^sub>m \<times> ('a\<^sub>m, 'a\<^sub>h) rose_tree\<^sub>m"
definition zipper_of_tree\<^sub>m :: "('a\<^sub>m, 'a\<^sub>h) rose_tree\<^sub>m \<Rightarrow> ('a\<^sub>m, 'a\<^sub>h) zipper\<^sub>m" where
"zipper_of_tree\<^sub>m t \<equiv> ([], t)"
fun tree_of_zipper\<^sub>m :: "('a\<^sub>m, 'a\<^sub>h) zipper\<^sub>m \<Rightarrow> ('a\<^sub>m, 'a\<^sub>h) rose_tree\<^sub>m" where
"tree_of_zipper\<^sub>m ([], t) = t"
| "tree_of_zipper\<^sub>m ((m, l, r) # z, t) = tree_of_zipper\<^sub>m (z, Tree\<^sub>m (Unblinded (m, l @ t # r)))"
lemma tree_of_zipper\<^sub>m_append:
"tree_of_zipper\<^sub>m (p @ p', t) = tree_of_zipper\<^sub>m (p', tree_of_zipper\<^sub>m (p, t))"
by(induction p arbitrary: p' t) auto
fun zipper_children\<^sub>m :: "('a\<^sub>m, 'a\<^sub>h) zipper\<^sub>m \<Rightarrow> ('a\<^sub>m, 'a\<^sub>h) zipper\<^sub>m list" where
"zipper_children\<^sub>m (p, Tree\<^sub>m (Unblinded (a, ts))) = map (\<lambda>(l, t, r). ((a, l, r) # p, t)) (splits ts) "
| "zipper_children\<^sub>m _ = []"
lemma zipper_children_same_tree\<^sub>m:
assumes "z' \<in> set (zipper_children\<^sub>m z)"
shows "tree_of_zipper\<^sub>m z' = tree_of_zipper\<^sub>m z"
proof-
obtain p a ts where z: "z = (p, Tree\<^sub>m (Unblinded (a, ts)))"
using assms
by(cases z rule: zipper_children\<^sub>m.cases) (simp_all)
then obtain l t r where ltr: "z' = ((a, l, r) # p, t)" and "(l, t, r) \<in> set (splits ts)"
using assms
by(auto)
with z show ?thesis
by(simp add: splits_iff)
qed
fun blind_path_elem :: "('a \<Rightarrow> 'a\<^sub>m) \<Rightarrow> ('a\<^sub>m \<Rightarrow> 'a\<^sub>h) \<Rightarrow> 'a path_elem \<Rightarrow> ('a\<^sub>m, 'a\<^sub>h) path_elem\<^sub>m" where
"blind_path_elem e h (x, l, r) = (e x, map (blind_source_tree (h \<circ> e)) l, map (blind_source_tree (h \<circ> e)) r)"
case_of_simps blind_path_elem_cases: blind_path_elem.simps
definition blind_path :: "('a \<Rightarrow> 'a\<^sub>m) \<Rightarrow> ('a\<^sub>m \<Rightarrow> 'a\<^sub>h) \<Rightarrow> 'a path \<Rightarrow> ('a\<^sub>m, 'a\<^sub>h) path\<^sub>m" where
"blind_path e h \<equiv> map (blind_path_elem e h)"
fun embed_path_elem :: "('a \<Rightarrow> 'a\<^sub>m) \<Rightarrow> 'a path_elem \<Rightarrow> ('a\<^sub>m, 'a\<^sub>h) path_elem\<^sub>m" where
"embed_path_elem e (d, l, r) = (e d, map (embed_source_tree e) l, map (embed_source_tree e) r)"
definition embed_path :: "('a \<Rightarrow> 'a\<^sub>m) \<Rightarrow> 'a path \<Rightarrow> ('a\<^sub>m, 'a\<^sub>h) path\<^sub>m" where
"embed_path embed_elem \<equiv> map (embed_path_elem embed_elem)"
lemma hash_tree_of_zipper_same_path:
"hash_tree h (tree_of_zipper\<^sub>m (p, v)) = hash_tree h (tree_of_zipper\<^sub>m (p, v'))
\<longleftrightarrow> hash_tree h v = hash_tree h v'"
by(induction p arbitrary: v v')(auto simp add: hash_rt_F\<^sub>m_def)
fun hash_path_elem :: "('a\<^sub>m \<Rightarrow> 'a\<^sub>h) \<Rightarrow> ('a\<^sub>m, 'a\<^sub>h) path_elem\<^sub>m \<Rightarrow> ('a\<^sub>h \<times> 'a\<^sub>h rose_tree\<^sub>h list \<times> 'a\<^sub>h rose_tree\<^sub>h list)" where
"hash_path_elem h (e, l, r) = (h e, map (hash_tree h) l, map (hash_tree h) r)"
lemma hash_view_zipper_eqI:
"\<lbrakk> hash_list (hash_path_elem h) p = hash_list (hash_path_elem h') p';
hash_tree h v = hash_tree h' v' \<rbrakk> \<Longrightarrow>
hash_tree h (tree_of_zipper\<^sub>m (p, v)) = hash_tree h' (tree_of_zipper\<^sub>m (p', v'))"
by(induction p arbitrary: p' v v')(auto simp add: hash_rt_F\<^sub>m_def)
lemma blind_embed_path_same_hash:
"hash_tree h (tree_of_zipper\<^sub>m (blind_path e h p, t)) = hash_tree h (tree_of_zipper\<^sub>m (embed_path e p, t))"
proof -
have "hash_path_elem h \<circ> blind_path_elem e h = hash_path_elem h \<circ> embed_path_elem e"
by(clarsimp simp add: hash_blinded_simp hash_embedded_simp fun_eq_iff intro!: arg_cong2[where f=hash_source_tree, OF _ refl])
then show ?thesis
by(intro hash_view_zipper_eqI)(simp_all add: embed_path_def blind_path_def list.map_comp)
qed
lemma tree_of_embed_commute:
"tree_of_zipper\<^sub>m (embed_path e p, embed_source_tree e t) = embed_source_tree e (tree_of_zipper (p, t))"
by(induction "(p, t)" arbitrary: p t rule: tree_of_zipper.induct)(simp_all add: embed_path_def)
lemma childz_same_tree:
"(l, t, r) \<in> set (splits ts) \<Longrightarrow>
tree_of_zipper\<^sub>m (embed_path e p, embed_source_tree e (Tree (d, ts)))
= tree_of_zipper\<^sub>m (embed_path e ((d, l, r) # p), embed_source_tree e t)"
by(simp add: tree_of_embed_commute splits_iff del: embed_source_tree.simps)
lemma blinding_of_same_path:
assumes bo: "blinding_of_on UNIV h bo"
shows
"blinding_of_tree h bo (tree_of_zipper\<^sub>m (p, t)) (tree_of_zipper\<^sub>m (p, t'))
\<longleftrightarrow> blinding_of_tree h bo t t'"
proof -
interpret a: blinding_of_on UNIV h bo by fact
interpret tree: blinding_of_on UNIV "hash_tree h" "blinding_of_tree h bo" ..
show ?thesis
by(induction p arbitrary: t t')(auto simp add: list_all2_append list.rel_refl a.refl tree.refl)
qed
lemma zipper_children_size_change [termination_simp]: "(a, b) \<in> set (zipper_children (p, v)) \<Longrightarrow> size b < size v"
by(cases v)(clarsimp simp add: splits_iff Set.image_iff)
subsection \<open>All zippers of a rose tree\<close>
context fixes e :: "'a \<Rightarrow> 'a\<^sub>m" and h :: "'a\<^sub>m \<Rightarrow> 'a\<^sub>h" begin
fun zippers_rose_tree :: "'a zipper \<Rightarrow> ('a\<^sub>m, 'a\<^sub>h) zipper\<^sub>m list" where
"zippers_rose_tree (p, t) = (blind_path e h p, embed_source_tree e t) #
concat (map zippers_rose_tree (zipper_children (p, t)))"
end
lemmas [simp del] = zippers_rose_tree.simps zipper_children.simps
lemma zippers_rose_tree_same_hash':
assumes "z \<in> set (zippers_rose_tree e h (p, t))"
shows "hash_tree h (tree_of_zipper\<^sub>m z) =
hash_tree h (tree_of_zipper\<^sub>m (embed_path e p, embed_source_tree e t))"
using assms(1)
proof(induction "(p, t)" arbitrary: p t rule: zippers_rose_tree.induct)
case (1 p t)
from "1.prems"[unfolded zippers_rose_tree.simps]
consider (find) "z = (blind_path e h p, embed_source_tree e t)"
| (rec) x ts l t' r where "t = Tree (x, ts)" "(l, t', r) \<in> set (splits ts)" "z \<in> set (zippers_rose_tree e h ((x, l, r) # p, t'))"
by(cases t)(auto simp add: zipper_children.simps)
then show ?case
proof cases
case rec
then show ?thesis
apply(subst "1.hyps"[of "(x, l, r) # p" "t'"])
apply(simp_all add: rev_image_eqI zipper_children.simps)
by (metis (no_types) childz_same_tree comp_apply embed_source_tree.simps rec(2))
qed(simp add: blind_embed_path_same_hash)
qed
lemma zippers_rose_tree_blinding_of:
assumes "blinding_of_on UNIV h bo"
and z: "z \<in> set (zippers_rose_tree e h (p, t))"
shows "blinding_of_tree h bo (tree_of_zipper\<^sub>m z) (tree_of_zipper\<^sub>m (blind_path e h p, embed_source_tree e t))"
using z
proof(induction "(p, t)" arbitrary: p t rule: zippers_rose_tree.induct)
case (1 p t)
interpret a: blinding_of_on UNIV h bo by fact
interpret rt: blinding_of_on UNIV "hash_tree h" "blinding_of_tree h bo" ..
from "1.prems"[unfolded zippers_rose_tree.simps]
consider (find) "z = (blind_path e h p, embed_source_tree e t)"
| (rec) x ts l t' r where "t = Tree (x, ts)" "(l, t', r) \<in> set (splits ts)" "z \<in> set (zippers_rose_tree e h ((x, l, r) # p, t'))"
by(cases t)(auto simp add: zipper_children.simps)
then show ?case
proof cases
case find
then show ?thesis by(simp add: rt.refl)
next
case rec
then have "blinding_of_tree h bo
(tree_of_zipper\<^sub>m z)
(tree_of_zipper\<^sub>m (blind_path e h ((x, l, r) # p), embed_source_tree e t'))"
by(intro 1)(simp add: rev_image_eqI zipper_children.simps)
also have "blinding_of_tree h bo
(tree_of_zipper\<^sub>m (blind_path e h ((x, l, r) # p), embed_source_tree e t'))
(tree_of_zipper\<^sub>m (blind_path e h p, embed_source_tree e (Tree (x, ts))))"
using rec
by(simp add: blind_path_def splits_iff blinding_of_same_path[OF assms(1)] a.refl list_all2_append list_all2_same list.rel_map blinded_blinds_embedded rt.refl)
finally (rt.trans) show ?thesis using rec by simp
qed
qed
lemma zippers_rose_tree_neq_Nil: "zippers_rose_tree e h (p, t) \<noteq> []"
by(simp add: zippers_rose_tree.simps)
lemma (in comp_fun_idem) fold_set_union:
assumes "finite A" "finite B"
shows "Finite_Set.fold f z (A \<union> B) = Finite_Set.fold f (Finite_Set.fold f z A) B"
using assms(2,1) by induct simp_all
context merkle_interface begin
lemma comp_fun_idem_merge: "comp_fun_idem (\<lambda>x yo. yo \<bind> m x)"
apply(unfold_locales; clarsimp simp add: fun_eq_iff split: bind_split)
subgoal by (metis assoc bind.bind_lunit bind.bind_lzero idem option.distinct(1))
subgoal by (simp add: join)
done
interpretation merge: comp_fun_idem "\<lambda>x yo. yo \<bind> m x" by(rule comp_fun_idem_merge)
definition Merge :: "'a\<^sub>m set \<Rightarrow> 'a\<^sub>m option" where
"Merge A = (if A = {} \<or> infinite A then None else Finite_Set.fold (\<lambda>x yo. yo \<bind> m x) (Some (SOME x. x \<in> A)) A)"
lemma Merge_empty [simp]: "Merge {} = None"
by(simp add: Merge_def)
lemma Merge_infinite [simp]: "infinite A \<Longrightarrow> Merge A = None"
by(simp add: Merge_def)
lemma Merge_cong_start:
"Finite_Set.fold (\<lambda>x yo. yo \<bind> m x) (Some x) A = Finite_Set.fold (\<lambda>x yo. yo \<bind> m x) (Some y) A" (is "?lhs = ?rhs")
if "x \<in> A" "y \<in> A" "finite A"
proof -
have "?lhs = Finite_Set.fold (\<lambda>x yo. yo \<bind> m x) (Some x) (insert y A)" using that by(simp add: insert_absorb)
also have "\<dots> = Finite_Set.fold (\<lambda>x yo. yo \<bind> m x) (m x y) A" using that
by(simp only: merge.fold_insert_idem2)(simp add: commute)
also have "\<dots> = Finite_Set.fold (\<lambda>x yo. yo \<bind> m x) (Some y) (insert x A)" using that
by(simp only: merge.fold_insert_idem2)(simp)
also have "\<dots> = ?rhs" using that by(simp add: insert_absorb)
finally show ?thesis .
qed
lemma Merge_insert [simp]: "Merge (insert x A) = (if A = {} then Some x else Merge A \<bind> m x)" (is "?lhs = ?rhs")
proof(cases "finite A \<and> A \<noteq> {}")
case True
then have "?lhs = Finite_Set.fold (\<lambda>x yo. yo \<bind> m x) (Some (SOME x. x \<in> A)) (insert x A)"
unfolding Merge_def by(subst Merge_cong_start[where y="SOME x. x \<in> A", OF someI])(auto intro: someI)
also have "\<dots> = ?rhs" using True by(simp add: Merge_def)
finally show ?thesis .
qed(auto simp add: Merge_def idem)
lemma Merge_insert_alt:
"Merge (insert x A) = Finite_Set.fold (\<lambda>x yo. yo \<bind> m x) (Some x) A" (is "?lhs = ?rhs") if "finite A"
proof -
have "?lhs = Finite_Set.fold (\<lambda>x yo. yo \<bind> m x) (Some x) (insert x A)" using that
unfolding Merge_def by(subst Merge_cong_start[where y=x, OF someI]) auto
also have "\<dots> = ?rhs" using that by(simp only: merge.fold_insert_idem2)(simp add: idem)
finally show ?thesis .
qed
lemma Merge_None [simp]: "Finite_Set.fold (\<lambda>x yo. yo \<bind> m x) None A = None"
proof(cases "finite A")
case True
then show ?thesis by(induction) auto
qed simp
lemma Merge_union:
"Merge (A \<union> B) = (if A = {} then Merge B else if B = {} then Merge A else (Merge A \<bind> (\<lambda>a. Merge B \<bind> m a)))"
(is "?lhs = ?rhs")
proof(cases "finite (A \<union> B) \<and> A \<noteq> {} \<and> B \<noteq> {}")
case True
then have "?lhs = Finite_Set.fold (\<lambda>x yo. yo \<bind> m x) (Some (SOME x. x \<in> B)) (B \<union> A)"
unfolding Merge_def by(subst Merge_cong_start[where y="SOME x. x \<in> B", OF someI])(auto intro: someI simp add: Un_commute)
also have "\<dots> = Finite_Set.fold (\<lambda>x yo. yo \<bind> m x) (Merge B) A" using True
by(simp add: Merge_def merge.fold_set_union)
also have "\<dots> = Merge A \<bind> (\<lambda>a. Merge B \<bind> m a)"
proof(cases "Merge B")
case (Some b)
thus ?thesis using True
by simp(subst Merge_insert_alt[symmetric]; simp add: commute; metis commute)
qed simp
finally show ?thesis using True by simp
qed auto
lemma Merge_upper:
assumes m: "Merge A = Some x" and y: "y \<in> A"
shows "bo y x"
proof -
have "Merge A = Merge (insert y A)" using y by(simp add: insert_absorb)
also have "\<dots> = Merge A \<bind> m y" using y by auto
finally have "m y x = Some x" using m by simp
thus ?thesis by(simp add: bo_def)
qed
lemma Merge_least:
assumes m: "Merge A = Some x" and u[rule_format]: "\<forall>a\<in>A. bo a u"
shows "bo x u"
proof -
define a where "a \<equiv> SOME x. x \<in> A"
from m have A: "finite A" "A \<noteq> {}"
and *: "Finite_Set.fold (\<lambda>x yo. yo \<bind> m x) (Some a) A = Some x"
by(auto simp add: Merge_def a_def split: if_splits)
from A have "bo a u" by(auto intro: someI u simp add: a_def)
with A * u show ?thesis
proof(induction A arbitrary: a)
case (insert x A)
then show ?case
by(cases "m x a"; cases "A = {}"; simp only: merge.fold_insert_idem2; simp)(auto simp add: join)
qed simp
qed
lemma Merge_defined:
assumes "finite A" "A \<noteq> {}" "\<forall>a\<in>A. \<forall>b \<in> A. h a = h b"
shows "Merge A \<noteq> None"
proof
define a where "a \<equiv> SOME a. a \<in> A"
have a: "a \<in> A" unfolding a_def using assms by(auto intro: someI)
hence ha: "\<forall>b \<in> A. h b = h a" using assms by blast
assume m: "Merge A = None"
hence "Finite_Set.fold (\<lambda>x yo. yo \<bind> m x) (Some a) A = None"
using assms by(simp add: Merge_def a_def)
with assms(1) show False using ha
proof(induction arbitrary: a)
case (insert x A)
thus ?case
apply(cases "m x a"; use nothing in \<open>simp only: merge.fold_insert_idem2\<close>)
apply(simp add: merge_respects_hashes)
apply(fastforce simp add: join vimage2p_def dest: hash[THEN predicate2D])
done
qed simp
qed
lemma Merge_hash:
assumes "Merge A = Some x" "a \<in> A"
shows "h a = h x"
using Merge_upper[OF assms] hash by(auto simp add: vimage2p_def)
end
end |
open import Issue2229Base public
|
import data.nat.parity
namespace nat
variables {n : ℕ}
@[parity_simps] lemma odd_succ : odd (succ n) ↔ ¬ odd n :=
by rw [succ_eq_add_one, odd_add]; simp [not_even_one]
lemma not_even : ¬ even n ↔ odd n := nat.odd_iff_not_even.symm
lemma not_odd : ¬ odd n ↔ even n := nat.even_iff_not_odd.symm
end nat
|
[STATEMENT]
lemma FreeGroupD: "x \<in> FreeGroup S \<Longrightarrow> fst ` set (freeword x) \<subseteq> S"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. x \<in> FreeGroup S \<Longrightarrow> fst ` set (freeword x) \<subseteq> S
[PROOF STEP]
using FreeGroup_def
[PROOF STATE]
proof (prove)
using this:
FreeGroup ?S \<equiv> {x. fst ` set (freeword x) \<subseteq> ?S}
goal (1 subgoal):
1. x \<in> FreeGroup S \<Longrightarrow> fst ` set (freeword x) \<subseteq> S
[PROOF STEP]
by fast |
plotxts <- function(xts, legendloc='topleft', main=NULL) {
## plots every column in the XTS object vs. date and adds a legend
ylabel <- deparse(substitute(xts))
xts::plot.xts(xts, ylab=ylabel, main=main)
xts::addLegend(legendloc,
legend.names = names(xts),
lty=1,
col=1:ncol(xts))
}
plotzoo <- function(zoo, legendloc='topleft') {
## plots every column in the ZOO object vs. date and adds a legend
## seems to work on zoo and xts objects
ylabel <- deparse(substitute(zoo))
zoo::plot.zoo(zoo,
ylab=ylabel,
screens=1,
lty=1,
col=1:ncol(zoo))
legend(legendloc,
legend = names(zoo),
lty = 1,
col = 1:ncol(zoo))
}
plotmat <- function(mat, xx=NULL, legendloc='topleft') {
## work in progress
## plot matrix vs. xx
## strip out xx column if specified and part of mat
## Add argument axes=F to omit the axes
if (!is.null(xx)) {
xlabel <- deparse(substitute(xx))
} else {
xlabel <- 'Index'
xx <- 1:nrow(mat)
}
if (xts::is.xts(mat) == TRUE | zoo::is.zoo(mat)) {
## xts or zoo passed in rather than matrix
## use dates for x-axis (override xx if specified)
xx <- zoo::index(mat)
xlabel <- 'Date'
mat <- as.matrix(mat)
} else if (is.data.frame(mat) == TRUE) {
## dataframe passed in rather than matrix
if (!is.null(xx)) {
## xx is specified so handle it
ncolxx <- which(grepl(xx, names(mat)))
if (length(ncolxx) > 0) {
## xx is in mat so not need to remove it
xx <- mat$xx
mat$xx <- NULL
}
xlabel <- deparse(substitute(xx))
}
mat <- as.matrix(mat)
}
ylabel <- deparse(substitute(mat))
graphics::matplot(x=xx, y=mat, type='l',
xlab = xlabel,
ylab = ylabel)
grid(col='grey70')
legend(legendloc,
legend = names(mat),
col = 1:ncol(mat),
lty = 1)
}
## ## test that does not use any of my functions
## quantmod::getSymbols(c('SPY','IWM'),
## src = 'yahoo',
## from = '2010-01-01',
## to = '2021-02-01',
## auto.assign = TRUE,
## warnings = FALSE)
## close <- cbind(SPY$SPY.Close, IWM$IWM.Close)
## plotxts(close)
## ## test that relies on my function equityget
## out <- equityget(c('SPY', 'IWM', 'EFA', 'AGG', 'SHV'), from='1995-01-01', period='years')
## close <- out$close
## plotxts(close)
##-----------------------------------------------------------------------------
## following were earlier attempts that I can probably get rid of
##-----------------------------------------------------------------------------
## ## FOLLOWING SUBMITTED TO STACKEXCHANGE
## ## https://stackoverflow.com/questions/69498375/in-r-how-can-i-add-a-legend-to-a-plot-of-an-xts-object
## ## create XTS object with two columns
## quantmod::getSymbols(c('SPY','IWM'),
## src = 'yahoo',
## from = '2010-01-01',
## to = '2021-02-01',
## auto.assign = TRUE,
## warnings = FALSE)
## close <- cbind(SPY$SPY.Close, IWM$IWM.Close)
##
## ## the following works except for the legend
## qualityTools::plot(close, col=1:ncol(close))
## legend('topleft', legend=colnames(close), col=1:ncol(close), lty=1, cex=0.5)
##
## ## the following works but destroys the time axis
## closets <- stats::as.ts(close)
## qualityTools::plot(closets, plot.type='single', col = 1:ncol(closets))
## legend("topleft", legend=colnames(closets), col=1:ncol(closets), lty=1, cex=0.5)
##
## ##-----------------------------------------------------------------------------
## plot(twr, plot.type="single", col = 1:ncol(twr))
## legend("bottomleft", legend=colnames(twr), col=1:ncol(twr), lty=1, cex=.65)
## plot(twr[,which(grepl('SPY', colnames(twr)))], plot.type="single", col = 1:ncol(twr))
## # All countries in one plot... colorful, common scale, and so on
## twrts <- as.ts(twr)
## plot(twrts, plot.type="single", col = 1:ncol(twrts))
## legend("topleft", legend=colnames(twrts), col=1:ncol(twrts), lty=1, cex=.65)
## grid(col='grey70')
## plotxts <- function(xts) {
##
## ## ## try matplot
## ## date <- zoo::index(xts)
## ## graphics::matplot(x=date, y=xts, type='l')
## ## grid(col='grey70')
## ## legend('topleft',
## ## legend = names(xts),
## ## col = 1:ncol(xts),
## ## lty = 1)
##
## ## ## try matplot
## ## date <- zoo::index(xts)
## ## graphics::matplot(x=date, y=xts, type='l', xaxt='n')
## ## axis(side=1, at=1:nrow(xts), labels=date)
## ## grid(col='grey70')
## ## legend('topleft',
## ## legend = names(xts),
## ## col = 1:ncol(xts),
## ## lty = 1)
##
## ## try matplot (THIS ONE IS NOT BAD)
## ## Add argument axes=F to omit the axes
## date <- zoo::index(xts)
## ylabel <- deparse(substitute(xts))
## graphics::matplot(xts, type='l', lty=1,
## main=NULL, xlab='date', ylab=ylabel,
## axes=FALSE)
## axis(side=1, at=1:nrow(xts), labels=date) # x-axis
## axis(side=2) # y-axis
## grid(col='grey70')
## legend('topleft',
## legend = names(xts),
## col = 1:ncol(xts),
## lty = 1)
##
## ## ## the following works except for the legend
## ## ## also, it only works interatively (i.e., not in a function)
## ## qualityTools::plot(xts, col=1:ncol(xts))
## ## legend('topleft', legend=colnames(xts), col=1:ncol(xts), lty=1, cex=0.5)
## ## data.frame(symbols=colnames(twr), color=palette()[1:ncol(twr)])
## ## mtext(text='test', side=3)
##
## ## ## the following works but uses a fake time axis
## ## ## (1 increment for each period; may not be bad)
## ## xtsts <- stats::as.ts(xts)
## ## qualityTools::plot(xtsts, plot.type='single', col = 1:ncol(xtsts), xaxt='n')
## ## legend("topleft", legend=colnames(xtsts), col=1:ncol(xtsts), lty=1, cex=0.5)
## ## axis(1, at=1:nrow(xtsts), labels=date)
## ## grid(col='grey70')
##
##
## }
## test
## quantmod::getSymbols(c('SPY','IWM'),
## src = 'yahoo',
## from = '2010-01-01',
## to = '2021-02-01',
## auto.assign = TRUE,
## warnings = FALSE)
## close <- cbind(SPY$SPY.Close, IWM$IWM.Close)
## plotxts(close)
|
(**
CoLoR, a Coq library on rewriting and termination.
See the COPYRIGHTS and LICENSE files.
- Frédéric Blanqui, 2014-02-18
Some structures on tropical numbers. *)
Set Implicit Arguments.
Require Import Arith OrdSemiRing2 Omega LogicUtil RelUtil Morphisms
SemiRing EqUtil Max.
Instance Tropical_as_Setoid : Setoid := LeibnizSetoid TropicalDom.
Instance Tropical_as_DS : DecidableSetoid.
Proof.
apply Build_DecidableSetoid with (ds_setoid := Tropical_as_Setoid).
simpl. intros.
apply dec_beq with (beq := beq_TropicalDom). apply beq_TropicalDom_ok.
Defined.
(***********************************************************************)
(** Tropical ordered setoid *)
Definition gt m n :=
match m, n with
| PlusInf, PlusInf => False
| PlusInf, _ => True
| TPos _, PlusInf => False
| TPos m, TPos n => m > n
end.
Definition ge m n := gt m n \/ m = n.
Instance Tropical_as_OS : OrderedSetoid.
Proof.
apply Build_OrderedSetoid with
(os_setoid := Tropical_as_Setoid)
(os_gt := gt).
simpl. intros x x' xx' y y' yy' xy. rewrite <- xx'.
rewrite <- yy'. apply xy.
Defined.
(***********************************************************************)
(** Tropical semi-ring *)
Definition A0 := PlusInf.
Definition A1 := TPos 0.
Definition Aplus m n :=
match m, n with
| PlusInf, n => n
| m, PlusInf => m
| TPos m, TPos n => TPos (min m n)
end.
Definition Amult m n :=
match m, n with
| PlusInf, _ => PlusInf
| _, PlusInf => PlusInf
| TPos m, TPos n => TPos (m + n)
end.
Require Import Min.
Lemma A_plus_comm : forall m n, Aplus m n = Aplus n m.
Proof.
intros. unfold Aplus. destruct m; destruct n; trivial.
rewrite min_comm. trivial.
Qed.
Lemma A_plus_assoc : forall m n p,
Aplus m (Aplus n p) = Aplus (Aplus m n) p.
Proof.
intros. unfold Aplus.
destruct m; destruct n; destruct p; trivial.
rewrite min_assoc. trivial.
Qed.
Lemma A_mult_comm : forall m n, Amult m n = Amult n m.
Proof.
intros. unfold Amult. destruct m; destruct n; trivial.
rewrite plus_comm. trivial.
Qed.
Lemma A_mult_assoc : forall m n p,
Amult m (Amult n p) = Amult (Amult m n) p.
Proof.
intros. unfold Amult.
destruct m; destruct n; destruct p; trivial.
rewrite plus_assoc. trivial.
Qed.
Import Compare. Import Min.
Lemma A_mult_plus_distr : forall m n p,
Amult (Aplus m n) p = Aplus (Amult m p) (Amult n p).
Proof.
intros. unfold Amult, Aplus.
destruct m; destruct n; destruct p; trivial.
destruct (le_dec n0 n).
rewrite min_l. rewrite min_l. trivial.
auto with arith. trivial.
rewrite min_r. rewrite min_r. trivial.
auto with arith. trivial.
Qed.
Instance Tropical_as_SR : SemiRing.
Proof.
apply Build_SemiRing with
(sr_ds := Tropical_as_DS)
(sr_0 := PlusInf)
(sr_1 := TPos 0)
(sr_add := Aplus)
(sr_mul := Amult).
(* Aplus s_eq as proper. *)
intros x y z xx yy H. rewrite H. rewrite z. refl.
(* Amult s_eq as proper. *)
intros x y z xx yy H. rewrite H. rewrite z. refl.
(* Semi-ring-theory. *)
constructor; intros.
compute; trivial.
apply A_plus_comm.
apply A_plus_assoc.
destruct n; compute; trivial.
compute; trivial.
apply A_mult_comm.
apply A_mult_assoc.
apply A_mult_plus_distr.
Defined.
(***********************************************************************)
(** Tropical ordered semi-ring *)
Notation "x + y" := (Aplus x y).
Notation "x * y" := (Amult x y).
Notation "x >=_t y" := (ge x y) (at level 70).
Notation "x >_t y" := (gt x y) (at level 70).
Lemma gt_trans : transitive gt.
Proof.
intros x y z xy yz.
destruct x; destruct y; destruct z; try solve [ auto | contr ].
apply gt_trans with n0; auto.
Qed.
Lemma ge_trans : transitive ge.
Proof.
intros x y z xy yz. destruct xy. destruct yz.
left. apply (gt_trans x y z); hyp.
subst y. left. hyp.
subst x. hyp.
Qed.
Lemma gt_dec : rel_dec gt.
Proof.
unfold rel_dec. intros.
destruct x; destruct y; simpl; auto.
destruct (gt_dec n n0); auto.
Defined.
Lemma ge_dec : rel_dec ge.
Proof.
intros x y. destruct (gt_dec x y).
left. left. hyp.
destruct (@ds_eq_dec Tropical_as_DS x y).
left. right. hyp.
right. intro xy. destruct xy; auto.
Defined.
(* ge @ gt << gt : ge_gt_compat *)
Lemma ge_gt_compat: forall x y z, x >=_t y -> y >_t z -> x >_t z.
Proof with simpl; intuition.
intros.
destruct y; destruct x; destruct z; auto...
destruct H. simpl in *. omega. injection H. intros. subst...
destruct H. contr. discr.
Qed.
(* gt @ ge << gt : gt_ge_compat *)
Lemma gt_ge_compat: forall x y z, x >_t y -> y >=_t z -> x >_t z.
Proof.
unfold ge, gt. destruct x; destruct y; destruct z; simpl; intuition;
try discr.
inversion H1. subst. hyp.
Qed.
Lemma ge_impl_pos_ge : forall m n, (m >= n)%nat -> TPos m >=_t TPos n.
Proof.
intros. destruct (lt_eq_lt_dec m n) as [[m_n | m_n] | m_n].
elimtype False. omega.
subst m. right. refl.
left. trivial.
Qed.
Lemma pos_ge_impl_ge : forall m n, TPos m >=_t TPos n -> (m >= n)%nat.
Proof.
intros. destruct H. auto with arith.
injection H. intro. subst m. auto with arith.
Qed.
Ltac tropical_ord :=
match goal with
| H: _ >_t PlusInf |- _ => contr
| H: TPos _ >=_t PlusInf |- _ =>
destruct H; [ contr | discr ]
| H: TPos ?m >=_t TPos ?n |- _ =>
assert ((m >= n)%nat);
[ apply pos_ge_impl_ge; hyp
| clear H; tropical_ord
]
| |- PlusInf >=_t TPos _ => left; simpl; trivial
| |- TPos ?m >=_t TPos ?n => apply ge_impl_pos_ge
| _ => try solve [contr | discr]
end.
Lemma plus_gt_compat: Proper (gt ==> gt ==> gt) Aplus.
Proof.
intros m m' H n n' H0.
destruct m; destruct n; destruct m'; destruct n';
simpl; trivial; tropical_ord.
apply NatUtil.min_gt_compat; hyp.
unfold Peano.gt. apply NatUtil.lt_min_intro_l. hyp.
unfold Peano.gt. apply NatUtil.lt_min_intro_r. hyp.
Qed.
Lemma plus_ge_compat: Proper (ge ==> ge ==> ge) Aplus.
Proof.
intros m m' H n n' H0.
destruct m; destruct n; destruct m'; destruct n';
simpl; trivial; tropical_ord.
apply NatUtil.min_ge_compat; hyp.
unfold Peano.ge. apply NatUtil.le_min_intro_l. hyp.
unfold Peano.ge. apply NatUtil.le_min_intro_r. hyp.
Qed.
Lemma mult_ge_compat: Proper (ge ==> ge ==> ge) Amult.
Proof.
intros m m' H n n' H0.
destruct m; destruct n; destruct m'; destruct n';
simpl; trivial; tropical_ord.
omega.
Qed.
Instance Tropical_as_OSR : OrderedSemiRing.
Proof.
apply Build_OrderedSemiRing with
(osr_sr := Tropical_as_SR)
(osr_gt := gt)
(osr_ge := ge); simpl.
fo. fo.
(* Transitive ge *)
apply ge_trans.
(* Transitive gt *)
apply gt_trans.
(* rel_dec ge *)
apply ge_dec.
(* rel_dec gt *)
apply gt_dec.
(* ge @ gt << gt *)
apply ge_gt_compat.
(* gt @ ge << gt *)
apply gt_ge_compat.
(* Aplus gt proper: plus_gt_compat *)
apply plus_gt_compat.
(* Aplus ge proper: plus_ge_compat *)
apply plus_ge_compat.
(* Mult ge proper: mult_ge_compat *)
apply mult_ge_compat.
Defined.
Lemma tropical_plus_notInf_left :forall a b,
a <> PlusInf -> Aplus a b <> PlusInf.
Proof.
intros. destruct a.
destruct b; simpl; discr.
auto.
Qed.
Lemma tropical_mult_notInf : forall a b,
a <> PlusInf -> b <> PlusInf -> Amult a b <> PlusInf.
Proof.
intros.
destruct a; auto.
destruct b; auto.
simpl. discr.
Qed.
Lemma tropical_plus_inf_max : forall x, x <> PlusInf -> PlusInf >_t x.
Proof.
intros. destruct x. simpl. auto.
elimtype False. apply H. trivial.
Qed.
Lemma A_plus_0_r : forall n, Aplus n PlusInf = n.
Proof.
intros. unfold Aplus. destruct n; auto.
Qed.
Lemma A_plus_0_l : forall n, Aplus PlusInf n = n.
Proof.
intros. rewrite A_plus_comm. apply A_plus_0_r.
Qed.
Lemma gt_irrefl : irreflexive gt.
Proof.
intros x xx. destruct x.
unfold gt in xx. omega.
auto.
Qed.
Require Import SN.
Lemma gt_Fin_WF x : Acc (transp gt) (TPos x).
Proof.
induction x using lt_wf_ind; apply Acc_intro; destruct y;
auto || contr.
Qed.
Hint Resolve gt_Fin_WF.
Lemma gt_WF : WF gt.
Proof with auto; try contr.
apply wf_transp_WF. intro x.
destruct x...
apply Acc_intro. intros. destruct y...
Qed.
Lemma ge_gt_eq : forall x y, x >=_t y -> x >_t y \/ x = y.
Proof.
destruct 1; auto.
Qed. |
\section{Cyclic Groups}
Recall that $C_n$ is the set of $n^{th}$ roots of unity.
If we write $\xi=e^{2\pi i/n}$, the group is actually generated by $\xi$, that is, every element is of the form $\xi^k$ for some $k$.
Note that $\xi^n=\xi^0=1$.
\begin{definition}
A group $G$ is called cyclic if there is an $a\in G$ such that every element is of the form $a^k$ for some $k$.\\
The element $a$ is called the generator of $G$.
\end{definition}
\begin{example}
1. The integers under addition is cyclic with generator $1$.\\
2. The group $\mathbb Z_n$ under addition modulo $n$ is cyclic with generator $1$.
But in fact, if we take the function $\phi(k)\to\xi^k$, this is an isomorphism and hence $C_n\cong\mathbb Z_n$.
\end{example}
\begin{theorem}[Classification of Cyclic Groups]
A cyclic group is isomorphic to either $C_n$ for some $n\to\mathbb N$ or $\mathbb Z$.
\end{theorem}
\begin{proof}
Let $G$ be a cyclic group and $a$ be its generator.
Consider $S=\{k\in\mathbb N\setminus\{0\}:a^k=e\}$.
If $S\neq\varnothing$, then let $n$ be the smallest element of $S$.
Consider the function $\phi:C_n\to G$ by $\phi(\xi^k)=a^k$.
We want to show that its an isomorphism.\\
Now if $k,l<n$ are such that $k+l<n$, then $\phi(\xi^k\xi^l)=a^{k+l}=a^ka^l=\phi(\xi^k)\phi(\xi^l)$.
On the other hand, if $k+l=n+r,0\le r<n$, then $\phi(\xi^k\xi^l)=a^{k+l}=a^{n+r}=a^r=\phi(\xi^r)=\phi(\xi^{n+r})=\phi(\xi^k)\phi(\xi^l)$.
As $G$ is generated by $a$ and $a^n=e$, every element of $G$ is of the form $a^k$ for some $0\le k<n$, so $\phi(\xi^k)=a^k$, So $\phi$ is injective, consider the kernel of $\phi$.
Note that if $\phi(\xi^k)=e$ then $a^k=e\implies k=0$, so $\ker\phi=\{1\}$, hence it is injective.
So $G\cong C_n$.\\
Now if $S=\varnothing$, then we shall show that $G\cong\mathbb Z$.
Consider the map $\phi(k)=a^k$, then $\phi(k+l)=a^ka^l=\phi(k)\phi(l)$.
This is surjective by the same argument as above.
Its kernel consists of integers $k$ with $a^k=e$ but since $S$ is empty, $k=0$, so it is injective.
Therefore $G\cong\mathbb Z$.
\end{proof}
Because of this theorem, it is convenient to write $\mathbb Z=C_\infty$.
\begin{definition}
Let $G$ be a group and $g\in\mathbb G$, then the order of $g$ is the smallest positive integer $n$ such that $g^n=e$ if it exists.
If there isn't such an $n$, then we say that $g$ has infinite order.\\
We write $\operatorname{ord}(g)$ to denote the order of $g$.
\end{definition}
Consider the set generated by the powers of $g$.
It follows easily that this set is a subgroup of $G$, we denote this by $\langle g\rangle$, the subgroup generated by $g$.
It is cyclic, so it is isomorphic to $C_n$ where $n=\operatorname{ord}g$. |
[STATEMENT]
lemma carrier_chain_group [simp]: "carrier(chain_group p X) = singular_chain_set p X"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. carrier (chain_group p X) = singular_chain_set p X
[PROOF STEP]
by (auto simp: chain_group_def singular_chain_def free_Abelian_group_def) |
function Q = cumsummat(N)
%CUMSUMMAT Trigonometric Fourier integration matrix.
% Q = CUMSUMMAT(N) is the matrix that maps function values at N equi-spaced
% points to values of the integral of the interpolating trigonometric
% polynomial at those points.
% [TODO]: Add support.
error('CHEBFUN:TRIGCOLLOC:cumsummat:notSupported', ...
['Indefinite integration is currently not supported for ' ...
'TRIGCOLLOC discretization.\nPlease consider using ' ....
'CHEBCOLLOC2 or ULTRAS discretization.']);
end
|
Formal statement is: lemma irreducibleD: "irreducible p \<Longrightarrow> p = a * b \<Longrightarrow> a dvd 1 \<or> b dvd 1" Informal statement is: If $p$ is irreducible and $p = ab$, then either $a$ divides $1$ or $b$ divides $1$. |
module Text.CSS.Selector
import Data.List
import Data.String
import Text.CSS.Property
import Text.CSS.Render
import Web.Dom
%default total
||| CSS [pseudo classes](https://developer.mozilla.org/en-US/docs/Learn/CSS/Building_blocks/Selectors/Pseudo-classes_and_pseudo-elements#pseudo-classes)
||| Docstrings taken from the linked resource.
public export
data PseudoClass : Type where
||| Matches when the user activates (for example clicks on) an element.
Active : PseudoClass
||| Matches both the :link and :visited states of a link.
AnyLink : PseudoClass
||| Matches an <input> element whose input value is empty.
Blank : PseudoClass
||| Matches a radio button or checkbox in the selected state.
Checked : PseudoClass
||| Matches the element, or an ancestor of the element, that is currently being displayed.
Current : PseudoClass
||| Matches the one or more UI elements that are the default among a set of similar elements.
Default : PseudoClass
||| Select an element based on its directionality (value of the HTML dir attribute or CSS direction property).
Dir : Direction -> PseudoClass
||| Matches user interface elements that are in an disabled state.
Disabled : PseudoClass
||| Matches an element that has no children except optionally white space.
Empty : PseudoClass
||| Matches user interface elements that are in an enabled state.
Enabled : PseudoClass
||| In Paged Media, matches the first page.
First : PseudoClass
||| Matches an element that is first among its siblings.
FirstChild : PseudoClass
||| Matches an element which is first of a certain type among its siblings.
FirstOfType : PseudoClass
||| Matches when an element has focus.
Focus : PseudoClass
||| Matches when an element has focus and the focus should be visible to the user.
FocusVisible : PseudoClass
||| Matches an element with focus plus an element with a descendent that has focus.
FocusWithin : PseudoClass
||| Matches the elements after the current element.
Future : PseudoClass
||| Matches when the user hovers over an element.
Hover : PseudoClass
||| Matches UI elements whose value is in an indeterminate state, usually checkboxes.
Indeterminate : PseudoClass
||| Matches an element with a range when its value is in-range.
InRange : PseudoClass
||| Matches an element, such as an <input>, in an invalid state.
Invalid : PseudoClass
||| Matches an element based on language (value of the HTML lang attribute).
Lang : String -> PseudoClass
||| Matches an element which is last among its siblings.
LastChild : PseudoClass
||| Matches an element of a certain type that is last among its siblings.
LastOfType : PseudoClass
||| In Paged Media, matches left-hand pages.
Left : PseudoClass
||| Matches unvisited links.
Link : PseudoClass
||| Matches links pointing to pages that are in the same site as the current document.
LocalLink : PseudoClass
|||Matches elements from a list of siblings — the siblings are matched by a formula of the form an+b (e.g. 2n + 1 would match elements 1, 3, 5, 7, etc. All the odd ones.)
NthChild : String -> PseudoClass
|||Matches elements from a list of siblings — the siblings are matched by a formula of the form an+b (e.g. 2n + 1 would match elements 1, 3, 5, 7, etc. All the odd ones.)
NthOfType : String -> PseudoClass
||| Matches elements from a list of siblings, counting backwards from the end. The siblings are matched by a formula of the form an+b (e.g. 2n + 1 would match the last element in the sequence, then two elements before that, then two elements before that, etc. All the odd ones, counting from the end.)
NthLastChild : String -> PseudoClass
||| Matches elements from a list of siblings that are of a certain type (e.g. <p> elements), counting backwards from the end. The siblings are matched by a formula of the form an+b (e.g. 2n + 1 would match the last element of that type in the sequence, then two elements before that, then two elements before that, etc. All the odd ones, counting from the end.)
NthLastOfType : String -> PseudoClass
||| Matches an element that has no siblings.
OnlyChild : PseudoClass
||| Matches an element that is the only one of its type among its siblings.
OnlyOfType : PseudoClass
||| Matches form elements that are not required.
Optional : PseudoClass
||| Matches an element with a range when its value is out of range.
OutOfRange : PseudoClass
||| Matches the elements before the current element.
Past : PseudoClass
||| Matches an input element that is showing placeholder text.
PlaceholderShown : PseudoClass
||| Matches an element representing an audio, video, or similar resource that is capable of being “played” or “paused”, when that element is “playing”.
Playing : PseudoClass
||| Matches an element representing an audio, video, or similar resource that is capable of being “played” or “paused”, when that element is “paused”.
Paused : PseudoClass
||| Matches an element if it is not user-alterable.
ReadOnly : PseudoClass
||| Matches an element if it is user-alterable.
ReadWrite : PseudoClass
||| Matches form elements that are required.
Required : PseudoClass
||| In Paged Media, matches right-hand pages.
Right : PseudoClass
||| Matches an element that is the root of the document.
Root : PseudoClass
||| Matches any element that is a scope element.
Scope : PseudoClass
||| Matches an element such as an <input> element, in a valid state.
Valid : PseudoClass
||| Matches an element if it is the target of the current URL (i.e. if it has an ID matching the current URL fragment).
Target : PseudoClass
||| Matches visited links.
Visited : PseudoClass
export
Render PseudoClass where
render Active = "active"
render AnyLink = "any-link"
render Blank = "blank"
render Checked = "checked"
render Current = "current"
render Default = "default"
render (Dir x) = "dir(\{render x})"
render Disabled = "disabled"
render Empty = "empty"
render Enabled = "enabled"
render First = "first"
render FirstChild = "first-child"
render FirstOfType = "first-of-type"
render Focus = "focus"
render FocusVisible = "focus-visible"
render FocusWithin = "focus-within"
render Future = "future"
render Hover = "hover"
render Indeterminate = "indeterminate"
render InRange = "in-range"
render Invalid = "invalid"
render (Lang x) = "lang(\{x})"
render LastChild = "last-child"
render LastOfType = "last-of-type"
render Left = "left"
render Link = "link"
render LocalLink = "local-link"
render (NthChild x) = "nth-child(\{x})"
render (NthOfType x) = "nth-of-type(\#{x})"
render (NthLastChild x) = "nth-last-child(\#{x})"
render (NthLastOfType x) = "nth-last-of-type(\#{x})"
render OnlyChild = "only-child"
render OnlyOfType = "only-of-type"
render Optional = "optional"
render OutOfRange = "out-of-range"
render Past = "past"
render PlaceholderShown = "placeholder-shown"
render Playing = "playing"
render Paused = "paused"
render ReadOnly = "read-only"
render ReadWrite = "read-write"
render Required = "required"
render Right = "right"
render Root = "root"
render Scope = "scope"
render Valid = "valid"
render Target = "target"
render Visited = "visited"
public export
data Selector : (dept : Nat)
-> (hasPseudoClass : Bool)
-> (hasPseudoElem : Bool)
-> Type where
Star : Selector 0 b1 b2
Id : String -> Selector 0 b1 b2
Class : String -> Selector 0 b1 b2
Elem : {str : _} -> (0 tpe : ElementType str t) -> Selector 0 b1 b2
Many : List (Selector 0 True True) -> Selector 1 True True
Pseudo : Selector 0 False False -> PseudoClass -> Selector 0 True b2
export %inline
class : String -> Selector 0 False False
class = Class
export %inline
classes : List String -> Selector 1 True True
classes = Many . map Class
export %inline
elem : {str : _} -> (0 tpe : ElementType str t) -> Selector 0 False False
elem = Elem
export %inline
id : String -> Selector 0 False False
id = Id
export
Render (Selector n b1 b2) where
render Star = "*"
render (Id x) = "#" ++ x
render (Class x) = "." ++ x
render (Elem {str} _) = str
render (Many ss) = fastConcat . intersperse ", " $ map render ss
render (Pseudo s p) = "\{render s}:\{render p}"
|
//"Copyright [year] <Copyright Owner>"
//
// Created by mrbgn on 5/5/21.
//
#include <rocksdb/db.h>
#include <rocksdb/options.h>
#include <rocksdb/slice.h>
#include <boost/log/common.hpp>
#include <boost/log/core.hpp>
#include <boost/log/expressions.hpp>
#include <boost/log/expressions/keyword.hpp>
#include <boost/log/sinks.hpp>
#include <boost/log/sinks/text_file_backend.hpp>
#include <boost/log/sources/logger.hpp>
#include <boost/log/sources/record_ostream.hpp>
#include <boost/log/sources/severity_logger.hpp>
#include <boost/log/trivial.hpp>
#include <boost/log/utility/setup/common_attributes.hpp>
#include <boost/log/utility/setup/console.hpp>
#include <ctime>
#include <iostream>
#include <stdexcept>
#include <string>
#include <vector>
#include "PicoSHA.hpp"
#include "ThreadPool.hpp"
#ifndef INCLUDE_DBCS_HPP_
#define INCLUDE_DBCS_HPP_
/**
* @brief Struct to forward key-value pairs to consumer
* @param key Key from key-value pair
* @param value value from key-value pair
*/
struct CycledList {
uint32_t length;
uint32_t position;
explicit CycledList(uint32_t len) : length(len) { position = 0; }
uint32_t NextIndex() {
position = (position + 1) % length;
return position;
}
};
/**
* @brief Class to operate with existed ( created by time ) rocksdb DataBase
* @class Dbcs
*
* @param log_level Log level to Boost_log; if log_level is unexpected it's set
* to fatal
* @param thread_count Quantity of threads to operate with db that is equal to
* quantity of families in output db
* @param output Link to output db
* @param input Link to input db
*
*/
class Dbcs {
public:
Dbcs(std::string &log_level, uint32_t &thread_count, std::string &output,
std::string &input);
/**
* @brief void to read all families
*/
void ReadDatabaseFamilies();
/**
* @brief void to read from exact family
* @param iterator to First pair in exact family
*/
void ReadFromFamily(rocksdb::Iterator *iterator);
/**
* @brief void to create new db with thread_count quanity of families
*/
void CreateOutputDb();
/**
* @brief main void to read all data from input db
*/
void ReadData();
/**
* @brief void to hash pairs and add it to output db
* @param key key string
* @param value value string
* @param handle_number number of family to put pair in
*/
void HashPair(std::string key, std::string value, uint32_t handle_number);
/**
* @brief void to enable logging via Bosst::Log
*/
void EnableLogging();
/**
* @brief void to log messages
* @param message string to log via Boost::Log
*/
void Log(std::string message);
private:
std::string _logLevel;
uint32_t _threadWriteCount;
std::string _inputPath;
std::string _outputPath;
ThreadPool _poolWrite;
std::vector<std::string> _families;
};
#endif // INCLUDE_DBCS_HPP_
|
/-
Copyright (c) 2020 Aaron Anderson. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Aaron Anderson
! This file was ported from Lean 3 source module order.lattice_intervals
! leanprover-community/mathlib commit c3291da49cfa65f0d43b094750541c0731edc932
! Please do not edit these lines, except to modify the commit id
! if you have ported upstream changes.
-/
import Mathbin.Order.Bounds.Basic
/-!
# Intervals in Lattices
> THIS FILE IS SYNCHRONIZED WITH MATHLIB4.
> Any changes to this file require a corresponding PR to mathlib4.
In this file, we provide instances of lattice structures on intervals within lattices.
Some of them depend on the order of the endpoints of the interval, and thus are not made
global instances. These are probably not all of the lattice instances that could be placed on these
intervals, but more can be added easily along the same lines when needed.
## Main definitions
In the following, `*` can represent either `c`, `o`, or `i`.
* `set.Ic*.order_bot`
* `set.Ii*.semillatice_inf`
* `set.I*c.order_top`
* `set.I*c.semillatice_inf`
* `set.I**.lattice`
* `set.Iic.bounded_order`, within an `order_bot`
* `set.Ici.bounded_order`, within an `order_top`
-/
variable {α : Type _}
namespace Set
namespace Ico
instance [SemilatticeInf α] {a b : α} : SemilatticeInf (Ico a b) :=
Subtype.semilatticeInf fun x y hx hy => ⟨le_inf hx.1 hy.1, lt_of_le_of_lt inf_le_left hx.2⟩
#print Set.Ico.orderBot /-
/-- `Ico a b` has a bottom element whenever `a < b`. -/
@[reducible]
protected def orderBot [PartialOrder α] {a b : α} (h : a < b) : OrderBot (Ico a b) :=
(isLeast_Ico h).OrderBot
#align set.Ico.order_bot Set.Ico.orderBot
-/
end Ico
namespace Iio
instance [SemilatticeInf α] {a : α} : SemilatticeInf (Iio a) :=
Subtype.semilatticeInf fun x y hx hy => lt_of_le_of_lt inf_le_left hx
end Iio
namespace Ioc
instance [SemilatticeSup α] {a b : α} : SemilatticeSup (Ioc a b) :=
Subtype.semilatticeSup fun x y hx hy => ⟨lt_of_lt_of_le hx.1 le_sup_left, sup_le hx.2 hy.2⟩
#print Set.Ioc.orderTop /-
/-- `Ioc a b` has a top element whenever `a < b`. -/
@[reducible]
protected def orderTop [PartialOrder α] {a b : α} (h : a < b) : OrderTop (Ioc a b) :=
(isGreatest_Ioc h).OrderTop
#align set.Ioc.order_top Set.Ioc.orderTop
-/
end Ioc
namespace Ioi
instance [SemilatticeSup α] {a : α} : SemilatticeSup (Ioi a) :=
Subtype.semilatticeSup fun x y hx hy => lt_of_lt_of_le hx le_sup_left
end Ioi
namespace Iic
instance [SemilatticeInf α] {a : α} : SemilatticeInf (Iic a) :=
Subtype.semilatticeInf fun x y hx hy => le_trans inf_le_left hx
instance [SemilatticeSup α] {a : α} : SemilatticeSup (Iic a) :=
Subtype.semilatticeSup fun x y hx hy => sup_le hx hy
instance [Lattice α] {a : α} : Lattice (Iic a) :=
{ Iic.semilatticeInf, Iic.semilatticeSup with }
instance [Preorder α] {a : α} : OrderTop (Iic a)
where
top := ⟨a, le_refl a⟩
le_top x := x.Prop
/- warning: set.Iic.coe_top -> Set.Iic.coe_top is a dubious translation:
lean 3 declaration is
forall {α : Type.{u1}} [_inst_1 : Preorder.{u1} α] {a : α}, Eq.{succ u1} α ((fun (a : Type.{u1}) (b : Type.{u1}) [self : HasLiftT.{succ u1, succ u1} a b] => self.0) (coeSort.{succ u1, succ (succ u1)} (Set.{u1} α) Type.{u1} (Set.hasCoeToSort.{u1} α) (Set.Iic.{u1} α _inst_1 a)) α (HasLiftT.mk.{succ u1, succ u1} (coeSort.{succ u1, succ (succ u1)} (Set.{u1} α) Type.{u1} (Set.hasCoeToSort.{u1} α) (Set.Iic.{u1} α _inst_1 a)) α (CoeTCₓ.coe.{succ u1, succ u1} (coeSort.{succ u1, succ (succ u1)} (Set.{u1} α) Type.{u1} (Set.hasCoeToSort.{u1} α) (Set.Iic.{u1} α _inst_1 a)) α (coeBase.{succ u1, succ u1} (coeSort.{succ u1, succ (succ u1)} (Set.{u1} α) Type.{u1} (Set.hasCoeToSort.{u1} α) (Set.Iic.{u1} α _inst_1 a)) α (coeSubtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Set.{u1} α) (Set.hasMem.{u1} α) x (Set.Iic.{u1} α _inst_1 a)))))) (Top.top.{u1} (coeSort.{succ u1, succ (succ u1)} (Set.{u1} α) Type.{u1} (Set.hasCoeToSort.{u1} α) (Set.Iic.{u1} α _inst_1 a)) (OrderTop.toHasTop.{u1} (coeSort.{succ u1, succ (succ u1)} (Set.{u1} α) Type.{u1} (Set.hasCoeToSort.{u1} α) (Set.Iic.{u1} α _inst_1 a)) (Subtype.hasLe.{u1} α (Preorder.toLE.{u1} α _inst_1) (fun (x : α) => Membership.Mem.{u1, u1} α (Set.{u1} α) (Set.hasMem.{u1} α) x (Set.Iic.{u1} α _inst_1 a))) (Set.Iic.orderTop.{u1} α _inst_1 a)))) a
but is expected to have type
forall {α : Type.{u1}} [_inst_1 : Preorder.{u1} α] {a : α}, Eq.{succ u1} α (Subtype.val.{succ u1} α (fun (x : α) => Membership.mem.{u1, u1} α (Set.{u1} α) (Set.instMembershipSet.{u1} α) x (Set.Iic.{u1} α _inst_1 a)) (Top.top.{u1} (Set.Elem.{u1} α (Set.Iic.{u1} α _inst_1 a)) (OrderTop.toTop.{u1} (Set.Elem.{u1} α (Set.Iic.{u1} α _inst_1 a)) (Subtype.le.{u1} α (Preorder.toLE.{u1} α _inst_1) (fun (x : α) => Membership.mem.{u1, u1} α (Set.{u1} α) (Set.instMembershipSet.{u1} α) x (Set.Iic.{u1} α _inst_1 a))) (Set.Iic.orderTop.{u1} α _inst_1 a)))) a
Case conversion may be inaccurate. Consider using '#align set.Iic.coe_top Set.Iic.coe_topₓ'. -/
@[simp]
theorem coe_top [Preorder α] {a : α} : ↑(⊤ : Iic a) = a :=
rfl
#align set.Iic.coe_top Set.Iic.coe_top
instance [Preorder α] [OrderBot α] {a : α} : OrderBot (Iic a)
where
bot := ⟨⊥, bot_le⟩
bot_le := fun ⟨_, _⟩ => Subtype.mk_le_mk.2 bot_le
/- warning: set.Iic.coe_bot -> Set.Iic.coe_bot is a dubious translation:
lean 3 declaration is
forall {α : Type.{u1}} [_inst_1 : Preorder.{u1} α] [_inst_2 : OrderBot.{u1} α (Preorder.toLE.{u1} α _inst_1)] {a : α}, Eq.{succ u1} α ((fun (a : Type.{u1}) (b : Type.{u1}) [self : HasLiftT.{succ u1, succ u1} a b] => self.0) (coeSort.{succ u1, succ (succ u1)} (Set.{u1} α) Type.{u1} (Set.hasCoeToSort.{u1} α) (Set.Iic.{u1} α _inst_1 a)) α (HasLiftT.mk.{succ u1, succ u1} (coeSort.{succ u1, succ (succ u1)} (Set.{u1} α) Type.{u1} (Set.hasCoeToSort.{u1} α) (Set.Iic.{u1} α _inst_1 a)) α (CoeTCₓ.coe.{succ u1, succ u1} (coeSort.{succ u1, succ (succ u1)} (Set.{u1} α) Type.{u1} (Set.hasCoeToSort.{u1} α) (Set.Iic.{u1} α _inst_1 a)) α (coeBase.{succ u1, succ u1} (coeSort.{succ u1, succ (succ u1)} (Set.{u1} α) Type.{u1} (Set.hasCoeToSort.{u1} α) (Set.Iic.{u1} α _inst_1 a)) α (coeSubtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Set.{u1} α) (Set.hasMem.{u1} α) x (Set.Iic.{u1} α _inst_1 a)))))) (Bot.bot.{u1} (coeSort.{succ u1, succ (succ u1)} (Set.{u1} α) Type.{u1} (Set.hasCoeToSort.{u1} α) (Set.Iic.{u1} α _inst_1 a)) (OrderBot.toHasBot.{u1} (coeSort.{succ u1, succ (succ u1)} (Set.{u1} α) Type.{u1} (Set.hasCoeToSort.{u1} α) (Set.Iic.{u1} α _inst_1 a)) (Subtype.hasLe.{u1} α (Preorder.toLE.{u1} α _inst_1) (fun (x : α) => Membership.Mem.{u1, u1} α (Set.{u1} α) (Set.hasMem.{u1} α) x (Set.Iic.{u1} α _inst_1 a))) (Set.Iic.orderBot.{u1} α _inst_1 _inst_2 a)))) (Bot.bot.{u1} α (OrderBot.toHasBot.{u1} α (Preorder.toLE.{u1} α _inst_1) _inst_2))
but is expected to have type
forall {α : Type.{u1}} [_inst_1 : Preorder.{u1} α] [_inst_2 : OrderBot.{u1} α (Preorder.toLE.{u1} α _inst_1)] {a : α}, Eq.{succ u1} α (Subtype.val.{succ u1} α (fun (x : α) => Membership.mem.{u1, u1} α (Set.{u1} α) (Set.instMembershipSet.{u1} α) x (Set.Iic.{u1} α _inst_1 a)) (Bot.bot.{u1} (Set.Elem.{u1} α (Set.Iic.{u1} α _inst_1 a)) (OrderBot.toBot.{u1} (Set.Elem.{u1} α (Set.Iic.{u1} α _inst_1 a)) (Subtype.le.{u1} α (Preorder.toLE.{u1} α _inst_1) (fun (x : α) => Membership.mem.{u1, u1} α (Set.{u1} α) (Set.instMembershipSet.{u1} α) x (Set.Iic.{u1} α _inst_1 a))) (Set.Iic.orderBot.{u1} α _inst_1 _inst_2 a)))) (Bot.bot.{u1} α (OrderBot.toBot.{u1} α (Preorder.toLE.{u1} α _inst_1) _inst_2))
Case conversion may be inaccurate. Consider using '#align set.Iic.coe_bot Set.Iic.coe_botₓ'. -/
@[simp]
theorem coe_bot [Preorder α] [OrderBot α] {a : α} : ↑(⊥ : Iic a) = (⊥ : α) :=
rfl
#align set.Iic.coe_bot Set.Iic.coe_bot
instance [Preorder α] [OrderBot α] {a : α} : BoundedOrder (Iic a) :=
{ Iic.orderTop, Iic.orderBot with }
end Iic
namespace Ici
instance [SemilatticeInf α] {a : α} : SemilatticeInf (Ici a) :=
Subtype.semilatticeInf fun x y hx hy => le_inf hx hy
instance [SemilatticeSup α] {a : α} : SemilatticeSup (Ici a) :=
Subtype.semilatticeSup fun x y hx hy => le_trans hx le_sup_left
instance [Lattice α] {a : α} : Lattice (Ici a) :=
{ Ici.semilatticeInf, Ici.semilatticeSup with }
instance [DistribLattice α] {a : α} : DistribLattice (Ici a) :=
{ Ici.lattice with le_sup_inf := fun a b c => le_sup_inf }
instance [Preorder α] {a : α} : OrderBot (Ici a)
where
bot := ⟨a, le_refl a⟩
bot_le x := x.Prop
/- warning: set.Ici.coe_bot -> Set.Ici.coe_bot is a dubious translation:
lean 3 declaration is
forall {α : Type.{u1}} [_inst_1 : Preorder.{u1} α] {a : α}, Eq.{succ u1} α ((fun (a : Type.{u1}) (b : Type.{u1}) [self : HasLiftT.{succ u1, succ u1} a b] => self.0) (coeSort.{succ u1, succ (succ u1)} (Set.{u1} α) Type.{u1} (Set.hasCoeToSort.{u1} α) (Set.Ici.{u1} α _inst_1 a)) α (HasLiftT.mk.{succ u1, succ u1} (coeSort.{succ u1, succ (succ u1)} (Set.{u1} α) Type.{u1} (Set.hasCoeToSort.{u1} α) (Set.Ici.{u1} α _inst_1 a)) α (CoeTCₓ.coe.{succ u1, succ u1} (coeSort.{succ u1, succ (succ u1)} (Set.{u1} α) Type.{u1} (Set.hasCoeToSort.{u1} α) (Set.Ici.{u1} α _inst_1 a)) α (coeBase.{succ u1, succ u1} (coeSort.{succ u1, succ (succ u1)} (Set.{u1} α) Type.{u1} (Set.hasCoeToSort.{u1} α) (Set.Ici.{u1} α _inst_1 a)) α (coeSubtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Set.{u1} α) (Set.hasMem.{u1} α) x (Set.Ici.{u1} α _inst_1 a)))))) (Bot.bot.{u1} (coeSort.{succ u1, succ (succ u1)} (Set.{u1} α) Type.{u1} (Set.hasCoeToSort.{u1} α) (Set.Ici.{u1} α _inst_1 a)) (OrderBot.toHasBot.{u1} (coeSort.{succ u1, succ (succ u1)} (Set.{u1} α) Type.{u1} (Set.hasCoeToSort.{u1} α) (Set.Ici.{u1} α _inst_1 a)) (Subtype.hasLe.{u1} α (Preorder.toLE.{u1} α _inst_1) (fun (x : α) => Membership.Mem.{u1, u1} α (Set.{u1} α) (Set.hasMem.{u1} α) x (Set.Ici.{u1} α _inst_1 a))) (Set.Ici.orderBot.{u1} α _inst_1 a)))) a
but is expected to have type
forall {α : Type.{u1}} [_inst_1 : Preorder.{u1} α] {a : α}, Eq.{succ u1} α (Subtype.val.{succ u1} α (fun (x : α) => Membership.mem.{u1, u1} α (Set.{u1} α) (Set.instMembershipSet.{u1} α) x (Set.Ici.{u1} α _inst_1 a)) (Bot.bot.{u1} (Set.Elem.{u1} α (Set.Ici.{u1} α _inst_1 a)) (OrderBot.toBot.{u1} (Set.Elem.{u1} α (Set.Ici.{u1} α _inst_1 a)) (Subtype.le.{u1} α (Preorder.toLE.{u1} α _inst_1) (fun (x : α) => Membership.mem.{u1, u1} α (Set.{u1} α) (Set.instMembershipSet.{u1} α) x (Set.Ici.{u1} α _inst_1 a))) (Set.Ici.orderBot.{u1} α _inst_1 a)))) a
Case conversion may be inaccurate. Consider using '#align set.Ici.coe_bot Set.Ici.coe_botₓ'. -/
@[simp]
theorem coe_bot [Preorder α] {a : α} : ↑(⊥ : Ici a) = a :=
rfl
#align set.Ici.coe_bot Set.Ici.coe_bot
instance [Preorder α] [OrderTop α] {a : α} : OrderTop (Ici a)
where
top := ⟨⊤, le_top⟩
le_top := fun ⟨_, _⟩ => Subtype.mk_le_mk.2 le_top
/- warning: set.Ici.coe_top -> Set.Ici.coe_top is a dubious translation:
lean 3 declaration is
forall {α : Type.{u1}} [_inst_1 : Preorder.{u1} α] [_inst_2 : OrderTop.{u1} α (Preorder.toLE.{u1} α _inst_1)] {a : α}, Eq.{succ u1} α ((fun (a : Type.{u1}) (b : Type.{u1}) [self : HasLiftT.{succ u1, succ u1} a b] => self.0) (coeSort.{succ u1, succ (succ u1)} (Set.{u1} α) Type.{u1} (Set.hasCoeToSort.{u1} α) (Set.Ici.{u1} α _inst_1 a)) α (HasLiftT.mk.{succ u1, succ u1} (coeSort.{succ u1, succ (succ u1)} (Set.{u1} α) Type.{u1} (Set.hasCoeToSort.{u1} α) (Set.Ici.{u1} α _inst_1 a)) α (CoeTCₓ.coe.{succ u1, succ u1} (coeSort.{succ u1, succ (succ u1)} (Set.{u1} α) Type.{u1} (Set.hasCoeToSort.{u1} α) (Set.Ici.{u1} α _inst_1 a)) α (coeBase.{succ u1, succ u1} (coeSort.{succ u1, succ (succ u1)} (Set.{u1} α) Type.{u1} (Set.hasCoeToSort.{u1} α) (Set.Ici.{u1} α _inst_1 a)) α (coeSubtype.{succ u1} α (fun (x : α) => Membership.Mem.{u1, u1} α (Set.{u1} α) (Set.hasMem.{u1} α) x (Set.Ici.{u1} α _inst_1 a)))))) (Top.top.{u1} (coeSort.{succ u1, succ (succ u1)} (Set.{u1} α) Type.{u1} (Set.hasCoeToSort.{u1} α) (Set.Ici.{u1} α _inst_1 a)) (OrderTop.toHasTop.{u1} (coeSort.{succ u1, succ (succ u1)} (Set.{u1} α) Type.{u1} (Set.hasCoeToSort.{u1} α) (Set.Ici.{u1} α _inst_1 a)) (Subtype.hasLe.{u1} α (Preorder.toLE.{u1} α _inst_1) (fun (x : α) => Membership.Mem.{u1, u1} α (Set.{u1} α) (Set.hasMem.{u1} α) x (Set.Ici.{u1} α _inst_1 a))) (Set.Ici.orderTop.{u1} α _inst_1 _inst_2 a)))) (Top.top.{u1} α (OrderTop.toHasTop.{u1} α (Preorder.toLE.{u1} α _inst_1) _inst_2))
but is expected to have type
forall {α : Type.{u1}} [_inst_1 : Preorder.{u1} α] [_inst_2 : OrderTop.{u1} α (Preorder.toLE.{u1} α _inst_1)] {a : α}, Eq.{succ u1} α (Subtype.val.{succ u1} α (fun (x : α) => Membership.mem.{u1, u1} α (Set.{u1} α) (Set.instMembershipSet.{u1} α) x (Set.Ici.{u1} α _inst_1 a)) (Top.top.{u1} (Set.Elem.{u1} α (Set.Ici.{u1} α _inst_1 a)) (OrderTop.toTop.{u1} (Set.Elem.{u1} α (Set.Ici.{u1} α _inst_1 a)) (Subtype.le.{u1} α (Preorder.toLE.{u1} α _inst_1) (fun (x : α) => Membership.mem.{u1, u1} α (Set.{u1} α) (Set.instMembershipSet.{u1} α) x (Set.Ici.{u1} α _inst_1 a))) (Set.Ici.orderTop.{u1} α _inst_1 _inst_2 a)))) (Top.top.{u1} α (OrderTop.toTop.{u1} α (Preorder.toLE.{u1} α _inst_1) _inst_2))
Case conversion may be inaccurate. Consider using '#align set.Ici.coe_top Set.Ici.coe_topₓ'. -/
@[simp]
theorem coe_top [Preorder α] [OrderTop α] {a : α} : ↑(⊤ : Ici a) = (⊤ : α) :=
rfl
#align set.Ici.coe_top Set.Ici.coe_top
instance [Preorder α] [OrderTop α] {a : α} : BoundedOrder (Ici a) :=
{ Ici.orderTop, Ici.orderBot with }
end Ici
namespace Icc
instance [SemilatticeInf α] {a b : α} : SemilatticeInf (Icc a b) :=
Subtype.semilatticeInf fun x y hx hy => ⟨le_inf hx.1 hy.1, le_trans inf_le_left hx.2⟩
instance [SemilatticeSup α] {a b : α} : SemilatticeSup (Icc a b) :=
Subtype.semilatticeSup fun x y hx hy => ⟨le_trans hx.1 le_sup_left, sup_le hx.2 hy.2⟩
instance [Lattice α] {a b : α} : Lattice (Icc a b) :=
{ Icc.semilatticeInf, Icc.semilatticeSup with }
#print Set.Icc.orderBot /-
/-- `Icc a b` has a bottom element whenever `a ≤ b`. -/
@[reducible]
protected def orderBot [Preorder α] {a b : α} (h : a ≤ b) : OrderBot (Icc a b) :=
(isLeast_Icc h).OrderBot
#align set.Icc.order_bot Set.Icc.orderBot
-/
#print Set.Icc.orderTop /-
/-- `Icc a b` has a top element whenever `a ≤ b`. -/
@[reducible]
protected def orderTop [Preorder α] {a b : α} (h : a ≤ b) : OrderTop (Icc a b) :=
(isGreatest_Icc h).OrderTop
#align set.Icc.order_top Set.Icc.orderTop
-/
#print Set.Icc.boundedOrder /-
/-- `Icc a b` is a `bounded_order` whenever `a ≤ b`. -/
@[reducible]
protected def boundedOrder [Preorder α] {a b : α} (h : a ≤ b) : BoundedOrder (Icc a b) :=
{ Icc.orderTop h, Icc.orderBot h with }
#align set.Icc.bounded_order Set.Icc.boundedOrder
-/
end Icc
end Set
|
flavor 1
srclang 3
id 65535
numfuncs 2
import "huaweitest.mplt"
import "/home/bravewtz/Desktop/openarkcompiler/libjava-core/libjava-core.mplt"
entryfunc &Lhuaweitest_3B_7Cmain_7C_28ALjava_2Flang_2FString_3B_29V
fileinfo {
@INFO_filename "huaweitest.jar"}
srcfileinfo {
1 "huaweitest.java"}
type $__class_meta__ <struct {
@shadow <* void> final,
@monitor i32 final,
@classloader u16 final,
@objsize u16 final,
@itab <* void> final,
@vtab <* void> final,
@gctib <* void> final,
@classinforo <* void> final,
@clinitbridge <* void> final}>
type $__class_meta_ro__ <struct {
@classname <* void> final,
@ifields <* void> final,
@methods <* void> final,
@superclass_or_componentclass <* void> final,
@numoffields u16 final,
@numofmethods u16 final,
@flag u16 final,
@numofsuperclasses u16 final,
@padding u32 final,
@mod i32 final,
@annotation i32 final,
@clinitAddr i32 final}>
type $__method_info__ <struct {
@method_in_vtab_index i64 final,
@declaringclass i64 final,
@addr i64 final,
@mod i32 final,
@methodname i32 final,
@signaturename i32 final,
@annotationvalue i32 final,
@flag u16 final,
@argsize u16 final,
@padding u32 final}>
type $__method_info_compact__ <struct {
@method_in_vtab_index i32 final,
@addr i32 final,
@lebPadding0 u8 final}>
type $__field_info__ <struct {
@offset u64 final,
@mod u32 final,
@flag u16 final,
@index u16 final,
@typeName i64 final,
@fieldname u32 final,
@annotation u32 final,
@declaringclass <* void> final}>
type $__field_info_compact__ <struct {
@offset u32 final,
@lebPadding0 u8 final}>
type $__superclass_meta__ <struct {
@superclassinfo <* void> final}>
type $MUIDFuncDefTabEntry <struct {
@funcUnifiedAddr u64}>
type $MUIDFuncInfTabEntry <struct {
@funcSize u32,
@funcName u32}>
type $MUIDFuncDefMuidTabEntry <struct {
@muidLow u64,
@muidHigh u64}>
type $MUIDDataDefTabEntry <struct {
@dataUnifiedAddr u64}>
type $MUIDDataDefMuidTabEntry <struct {
@muidLow u64,
@muidHigh u64}>
type $MUIDUnifiedUndefTabEntry <struct {
@globalAddress u64}>
type $MUIDUnifiedUndefMuidTabEntry <struct {
@muidLow u64,
@muidHigh u64}>
type $MUIDRangeTabEntry <struct {
@tabBegin <* void>,
@tabEnd <* void>}>
javaclass $Lhuaweitest_3B <$Lhuaweitest_3B> public
var $_C_STR_907ba9a10d33c74f1c37be3b41544334 fstatic <[10] u64> readonly = [0, 0x6b00000000, 0x20490949984357f2, 0x4909490909490920, 0x909494949494949, 0x2020202020202049, 0x2020200909492020, 0x4949494920204920, 0x4949202049494949, 73]
var $_C_STR_194a027f7d649763903a7dcda50cf5a0 fstatic <[9] u64> readonly = [0, 0x5b00000000, 0x94909491faf02dc, 0x2009490949090949, 0x2020492009094920, 0x4920492020202020, 0x4920202020202020, 0x2020090949202020, 73]
var $_C_STR_901191d3899ab5fc7be3d97fe762f978 fstatic <[9] u64> readonly = [0, 0x5f00000000, 0x49494949fbe6d2fb, 0x4909490909490949, 0x2020090949202009, 0x2049202020202049, 0x2020202020492020, 0x909492020200949, 0x492020]
var $_C_STR_4a20a28a5a71422be90661b1072ca29e fstatic <[9] u64> readonly = [0, 0x6500000000, 0x94909494f3904e4, 0x2009490949090949, 0x4920202009094920, 0x2020202049202020, 0x2009492020204920, 0x4949494949492020, 0x492020204949]
var $_C_STR_e4c52869a15445bee0db530b07e9b98a fstatic <[8] u64> readonly = [0, 0x5100000000, 0x949094949ddeb72, 0x2009490949090949, 0x4920490909094920, 0x4920202020202020, 0x949202020094920, 0x49202009]
var $_C_STR_58a8c3b0d70b92b64324906ef9c02c90 fstatic <[10] u64> readonly = [0, 0x6d00000000, 0x94909497041ce40, 0x4949494949494949, 0x4949494949490949, 0x2049200909494949, 0x2020202020202020, 0x4949492020200949, 0x4920094949494949, 0x4949]
var $Ljava_2Flang_2FSystem_3B_7Cout extern <* <$Ljava_2Fio_2FPrintStream_3B>> final public static
var $__cinf_Ljava_2Flang_2FString_3B <$__class_meta__>
func &MCC_GetOrInsertLiteral () <* <$Ljava_2Flang_2FString_3B>>
var $__vtb_Lhuaweitest_3B fstatic <[11] <* void>> = [16, 24, 32, 36, 8, 4, 48, 12, 20, 44, 28]
var $__cinf_Lhuaweitest_3B <$__class_meta__> public
var $__methods_info__Lhuaweitest_3B fstatic <[2] <$__method_info__>> public = [[1= 0xfff6, 2= addrof ptr $__cinf_Lhuaweitest_3B, 3= addroffunc ptr &Lhuaweitest_3B_7Cmain_7C_28ALjava_2Flang_2FString_3B_29V, 4= 9, 5= 56, 6= 76, 7= 168, 8= 0xa201, 9= 1, 10= 0], [1= 0xfff6, 2= addrof ptr $__cinf_Lhuaweitest_3B, 3= addroffunc ptr &Lhuaweitest_3B_7C_3Cinit_3E_7C_28_29V, 4= 0x10001, 5= 184, 6= 212, 7= 168, 8= 0xad81, 9= 1, 10= 0]]
var $__cinf_Ljava_2Flang_2FObject_3B extern <$__class_meta__> public
var $__superclasses__Lhuaweitest_3B fstatic <[1] <$__superclass_meta__>> public = [[1= 0x4000000000000000]]
var $__classinforo__Lhuaweitest_3B fstatic <$__class_meta_ro__> public = [1= 4, 2= 0, 3= addrof ptr $__methods_info__Lhuaweitest_3B, 4= addrof ptr $__superclasses__Lhuaweitest_3B, 5= 0, 6= 2, 7= 0, 8= 1, 9= 0, 10= 33, 11= 168, 12= 0]
var $MCC_GCTIB__Lhuaweitest_3B fstatic <* void> public
var $classStateInitialized u64
var $__cinf_Lhuaweitest_3B <$__class_meta__> public = [1= 0x1bf435e944cf11ae, 2= 0, 3= 0xffff, 4= 0, 5= 0, 6= addrof ptr $__vtb_Lhuaweitest_3B, 7= addrof ptr $MCC_GCTIB__Lhuaweitest_3B, 8= addrof ptr $__classinforo__Lhuaweitest_3B, 9= addrof ptr $classStateInitialized]
var $__muid_classmetadata_bucket$$huaweitest_jar <[1] <* void>> public = [addrof ptr $__cinf_Lhuaweitest_3B]
func &MCC_Reflect_ThrowCastException nosideeffect () void
func &MCC_Reflect_Check_Casting_NoArray nosideeffect () void
func &MCC_Reflect_Check_Casting_Array nosideeffect () void
func &MCC_CheckThrowPendingException nosideeffect () void
func &MCC_PreNativeCall (var %caller ref) <* void>
func &MCC_PostNativeCall (var %env <* void>) void
func &MCC_DecodeReference nosideeffect (var %obj ref) ref
func &MCC_CallFastNative (var %func <* void>) <* void>
func &MCC_CallSlowNative0 (var %func <* void>) <* void>
func &MCC_CallSlowNative1 (var %func <* void>) <* void>
func &MCC_CallSlowNative2 (var %func <* void>) <* void>
func &MCC_CallSlowNative3 (var %func <* void>) <* void>
func &MCC_CallSlowNative4 (var %func <* void>) <* void>
func &MCC_CallSlowNative5 (var %func <* void>) <* void>
func &MCC_CallSlowNative6 (var %func <* void>) <* void>
func &MCC_CallSlowNative7 (var %func <* void>) <* void>
func &MCC_CallSlowNative8 (var %func <* void>) <* void>
func &MCC_CallFastNativeExt (var %func <* void>) <* void>
func &MCC_CallSlowNativeExt (var %func <* void>) <* void>
func &MCC_SetReliableUnwindContext nosideeffect () void
var $__reg_jni_func_tab$$huaweitest_jar <[0] <* void>>
var $__cinf_Ljava_2Flang_2FSystem_3B extern <$__class_meta__>
func &MCC_getFuncPtrFromItabSecondHash64 nosideeffect () ptr
var $__muid_func_def_tab$$huaweitest_jar fstatic <[2] <$MUIDFuncDefTabEntry>> = [[1= addroffunc ptr &Lhuaweitest_3B_7C_3Cinit_3E_7C_28_29V], [1= addroffunc ptr &Lhuaweitest_3B_7Cmain_7C_28ALjava_2Flang_2FString_3B_29V]]
var $__muid_func_inf_tab$$huaweitest_jar fstatic <[2] <$MUIDFuncInfTabEntry>> = [[1= addroffunc ptr &Lhuaweitest_3B_7C_3Cinit_3E_7C_28_29V, 2= addroffunc ptr &Lhuaweitest_3B_7C_3Cinit_3E_7C_28_29V], [1= addroffunc ptr &Lhuaweitest_3B_7Cmain_7C_28ALjava_2Flang_2FString_3B_29V, 2= addroffunc ptr &Lhuaweitest_3B_7Cmain_7C_28ALjava_2Flang_2FString_3B_29V]]
var $__muid_func_def_muid_tab$$huaweitest_jar fstatic <[2] <$MUIDFuncDefMuidTabEntry>> = [[1= -1722806870279642204, 2= -946015414860200947], [1= -1343471055290971758, 2= -2119983759179258436]]
var $__muid_func_muid_idx_tab$$huaweitest_jar fstatic <[2] u32> = [1, 0]
var $__muid_data_def_tab$$huaweitest_jar fstatic <[1] <$MUIDDataDefTabEntry>> = [[1= addrof ptr $__cinf_Lhuaweitest_3B]]
var $__muid_data_def_muid_tab$$huaweitest_jar fstatic <[1] <$MUIDDataDefMuidTabEntry>> = [[1= -6287124398786487729, 2= -1011506809175871035]]
var $__muid_func_undef_tab$$huaweitest_jar fstatic <[12] <$MUIDUnifiedUndefTabEntry>> = [[1= 0], [1= 0], [1= 0], [1= 0], [1= 0], [1= 0], [1= 0], [1= 0], [1= 0], [1= 0], [1= 0], [1= 0]]
var $__muid_func_undef_muid_tab$$huaweitest_jar fstatic <[12] <$MUIDUnifiedUndefMuidTabEntry>> = [[1= 0x3e32352aee789835, 2= -3887705395317205813], [1= -4187412136968710015, 2= -3800091941095621250], [1= 0x6742c234127e0a27, 2= -3762262047879347071], [1= 0x7230554331c55d92, 2= -3676689525926909155], [1= -715372855679083712, 2= -2647497990906227723], [1= -7464356948810446352, 2= -2259485500590180091], [1= 0x783627f2afd1cbde, 2= -2046851302095768916], [1= -2701934576591406938, 2= -1693831364093527548], [1= 0x126777a7fe39e1fb, 2= -1314856249532362766], [1= 0xbf40578f3343f7a, 2= -1198421541845410999], [1= 0x7ca2bdf69a6c7c94, 2= -801329978528900548], [1= 0x477aafa4d7dd102b, 2= -442561182569419835]]
var $__muid_data_undef_tab$$huaweitest_jar fstatic <[3] <$MUIDUnifiedUndefTabEntry>> = [[1= addrof ptr $__cinf_Ljava_2Flang_2FObject_3B], [1= addrof ptr $Ljava_2Flang_2FSystem_3B_7Cout], [1= addrof ptr $__cinf_Ljava_2Flang_2FSystem_3B]]
var $__muid_data_undef_muid_tab$$huaweitest_jar fstatic <[3] <$MUIDUnifiedUndefMuidTabEntry>> = [[1= -567417612161374449, 2= -3298852447504547670], [1= 0x191283ac418c4bb9, 2= -1676204161023949463], [1= -5921653145571052587, 2= -171150348656858163]]
var $__muid_range_tab$$huaweitest_jar fstatic <[29] <$MUIDRangeTabEntry>> = [[1= 0x5b15548b999b8d93, 2= -222031886891092138], [1= -5449160688458699489, 2= -3307888487811147755], [1= 2, 2= 2], [1= 3, 2= 3], [1= 4, 2= 4], [1= 5, 2= 5], [1= 6, 2= 6], [1= 7, 2= 7], [1= 8, 2= 8], [1= 9, 2= 9], [1= 0, 2= 0], [1= 11, 2= 11], [1= 12, 2= 12], [1= 13, 2= 13], [1= 14, 2= 14], [1= 15, 2= 15], [1= addrof ptr $__muid_func_def_tab$$huaweitest_jar, 2= addrof ptr $__muid_func_def_tab$$huaweitest_jar], [1= 0, 2= 0], [1= addrof ptr $__muid_func_inf_tab$$huaweitest_jar, 2= addrof ptr $__muid_func_inf_tab$$huaweitest_jar], [1= addrof ptr $__muid_func_undef_tab$$huaweitest_jar, 2= addrof ptr $__muid_func_undef_tab$$huaweitest_jar], [1= addrof ptr $__muid_data_def_tab$$huaweitest_jar, 2= addrof ptr $__muid_data_def_tab$$huaweitest_jar], [1= 0, 2= 0], [1= addrof ptr $__muid_data_undef_tab$$huaweitest_jar, 2= addrof ptr $__muid_data_undef_tab$$huaweitest_jar], [1= addrof ptr $__muid_func_def_muid_tab$$huaweitest_jar, 2= addrof ptr $__muid_func_def_muid_tab$$huaweitest_jar], [1= addrof ptr $__muid_func_undef_muid_tab$$huaweitest_jar, 2= addrof ptr $__muid_func_undef_muid_tab$$huaweitest_jar], [1= addrof ptr $__muid_data_def_muid_tab$$huaweitest_jar, 2= addrof ptr $__muid_data_def_muid_tab$$huaweitest_jar], [1= addrof ptr $__muid_data_undef_muid_tab$$huaweitest_jar, 2= addrof ptr $__muid_data_undef_muid_tab$$huaweitest_jar], [1= addrof ptr $__muid_func_muid_idx_tab$$huaweitest_jar, 2= addrof ptr $__muid_func_muid_idx_tab$$huaweitest_jar], [1= 0, 2= 0]]
var $__reflection_strtab$$huaweitest_jar fstatic <[57] u8> = [0, 76, 104, 117, 97, 119, 101, 105, 116, 101, 115, 116, 59, 0, 109, 97, 105, 110, 0, 40, 91, 76, 106, 97, 118, 97, 47, 108, 97, 110, 103, 47, 83, 116, 114, 105, 110, 103, 59, 41, 86, 0, 48, 33, 48, 0, 60, 105, 110, 105, 116, 62, 0, 40, 41, 86, 0]
var $__compilerVersionNum$$huaweitest_jar <[0] <* void>> = [1, 0]
func &Lhuaweitest_3B_7C_3Cinit_3E_7C_28_29V public constructor (var %_this <* <$Lhuaweitest_3B>>) void {
funcid 48153
var %Reg1_R43694 <* <$Lhuaweitest_3B>> localrefvar
var %Reg1_R57 <* <$Ljava_2Flang_2FObject_3B>> localrefvar
var %__muid_symptr <* void>
intrinsiccall MCCIncRef (dread ref %_this)
intrinsiccall MCCDecRef (dread ref %Reg1_R43694)
dassign %Reg1_R43694 0 (dread ref %_this)
#INSTIDX : 0||0000: aload_0
#INSTIDX : 1||0001: invokespecial
regassign ptr %1 (dread ref %Reg1_R57)
dassign %Reg1_R57 0 (retype ref <* <$Ljava_2Flang_2FObject_3B>> (dread ref %Reg1_R43694))
intrinsiccall MCCIncRef (dread ref %Reg1_R57)
intrinsiccall MCCDecRef (regread ptr %1)
#Call function:Ljava_2Flang_2FObject_3B_7C_3Cinit_3E_7C_28_29V
dassign %__muid_symptr 0 (iread ptr <* <$MUIDUnifiedUndefTabEntry>> 1 (array 0 ptr <* <[12] <$MUIDUnifiedUndefTabEntry>>> (addrof ptr $__muid_func_undef_tab$$huaweitest_jar, constval i64 9)))
icallassigned (dread ptr %__muid_symptr, dread ref %Reg1_R57) {}
#INSTIDX : 4||0004: return
intrinsiccall MPL_CLEANUP_LOCALREFVARS (dread ref %Reg1_R43694, dread ref %Reg1_R57)
return ()
}
func &Lhuaweitest_3B_7Cmain_7C_28ALjava_2Flang_2FString_3B_29V public static (var %Reg2_R743 <* <[] <* <$Ljava_2Flang_2FString_3B>>>>) void {
funcid 48154
var %Reg0_R562 <* <$Ljava_2Fio_2FPrintStream_3B>> localrefvar
var %Reg1_R43 <* <$Ljava_2Flang_2FString_3B>> localrefvar
var %L_STR_161334 <* <$Ljava_2Flang_2FString_3B>>
var %L_STR_161335 <* <$Ljava_2Flang_2FString_3B>>
var %L_STR_161336 <* <$Ljava_2Flang_2FString_3B>>
var %L_STR_161337 <* <$Ljava_2Flang_2FString_3B>>
var %L_STR_161338 <* <$Ljava_2Flang_2FString_3B>>
var %L_STR_161339 <* <$Ljava_2Flang_2FString_3B>>
#INSTIDX : 0||0000: getstatic
intrinsiccall MPL_CLINIT_CHECK (addrof ptr $__cinf_Ljava_2Flang_2FSystem_3B)
regassign ptr %1 (dread ref %Reg0_R562)
#Read from: Ljava_2Flang_2FSystem_3B_7Cout
dassign %Reg0_R562 0 (iread ref <* <* <$Ljava_2Fio_2FPrintStream_3B>>> 0 (iread ptr <* <$MUIDUnifiedUndefTabEntry>> 1 (array 0 ptr <* <[3] <$MUIDUnifiedUndefTabEntry>>> (addrof ptr $__muid_data_undef_tab$$huaweitest_jar, constval i64 1))))
intrinsiccall MCCIncRef (dread ref %Reg0_R562)
intrinsiccall MCCDecRef (regread ptr %1)
#INSTIDX : 3||0003: ldc
callassigned &MCC_GetOrInsertLiteral (addrof ptr $_C_STR_907ba9a10d33c74f1c37be3b41544334) { dassign %L_STR_161334 0 }
intrinsiccall MCCIncRef (dread ptr %L_STR_161334)
intrinsiccall MCCDecRef (dread ref %Reg1_R43)
dassign %Reg1_R43 0 (dread ptr %L_STR_161334)
#INSTIDX : 5||0005: invokevirtual
icallassigned (
iread u64 <* u64> 0 (add ptr (
iread ptr <* <$__class_meta__>> 6 (iread ref <* <$Ljava_2Flang_2FObject_3B>> 1 (dread ref %Reg0_R562)),
constval u32 312)),
dread ref %Reg0_R562,
dread ref %Reg1_R43) {}
#INSTIDX : 8||0008: getstatic
intrinsiccall MPL_CLINIT_CHECK (addrof ptr $__cinf_Ljava_2Flang_2FSystem_3B)
regassign ptr %2 (dread ref %Reg0_R562)
#Read from: Ljava_2Flang_2FSystem_3B_7Cout
dassign %Reg0_R562 0 (iread ref <* <* <$Ljava_2Fio_2FPrintStream_3B>>> 0 (iread ptr <* <$MUIDUnifiedUndefTabEntry>> 1 (array 0 ptr <* <[3] <$MUIDUnifiedUndefTabEntry>>> (addrof ptr $__muid_data_undef_tab$$huaweitest_jar, constval i64 1))))
intrinsiccall MCCIncRef (dread ref %Reg0_R562)
intrinsiccall MCCDecRef (regread ptr %2)
#INSTIDX : 11||000b: ldc
callassigned &MCC_GetOrInsertLiteral (addrof ptr $_C_STR_194a027f7d649763903a7dcda50cf5a0) { dassign %L_STR_161335 0 }
intrinsiccall MCCIncRef (dread ptr %L_STR_161335)
intrinsiccall MCCDecRef (dread ref %Reg1_R43)
dassign %Reg1_R43 0 (dread ptr %L_STR_161335)
#INSTIDX : 13||000d: invokevirtual
icallassigned (
iread u64 <* u64> 0 (add ptr (
iread ptr <* <$__class_meta__>> 6 (iread ref <* <$Ljava_2Flang_2FObject_3B>> 1 (dread ref %Reg0_R562)),
constval u32 312)),
dread ref %Reg0_R562,
dread ref %Reg1_R43) {}
#INSTIDX : 16||0010: getstatic
intrinsiccall MPL_CLINIT_CHECK (addrof ptr $__cinf_Ljava_2Flang_2FSystem_3B)
regassign ptr %3 (dread ref %Reg0_R562)
#Read from: Ljava_2Flang_2FSystem_3B_7Cout
dassign %Reg0_R562 0 (iread ref <* <* <$Ljava_2Fio_2FPrintStream_3B>>> 0 (iread ptr <* <$MUIDUnifiedUndefTabEntry>> 1 (array 0 ptr <* <[3] <$MUIDUnifiedUndefTabEntry>>> (addrof ptr $__muid_data_undef_tab$$huaweitest_jar, constval i64 1))))
intrinsiccall MCCIncRef (dread ref %Reg0_R562)
intrinsiccall MCCDecRef (regread ptr %3)
#INSTIDX : 19||0013: ldc
callassigned &MCC_GetOrInsertLiteral (addrof ptr $_C_STR_901191d3899ab5fc7be3d97fe762f978) { dassign %L_STR_161336 0 }
intrinsiccall MCCIncRef (dread ptr %L_STR_161336)
intrinsiccall MCCDecRef (dread ref %Reg1_R43)
dassign %Reg1_R43 0 (dread ptr %L_STR_161336)
#INSTIDX : 21||0015: invokevirtual
icallassigned (
iread u64 <* u64> 0 (add ptr (
iread ptr <* <$__class_meta__>> 6 (iread ref <* <$Ljava_2Flang_2FObject_3B>> 1 (dread ref %Reg0_R562)),
constval u32 312)),
dread ref %Reg0_R562,
dread ref %Reg1_R43) {}
#INSTIDX : 24||0018: getstatic
intrinsiccall MPL_CLINIT_CHECK (addrof ptr $__cinf_Ljava_2Flang_2FSystem_3B)
regassign ptr %4 (dread ref %Reg0_R562)
#Read from: Ljava_2Flang_2FSystem_3B_7Cout
dassign %Reg0_R562 0 (iread ref <* <* <$Ljava_2Fio_2FPrintStream_3B>>> 0 (iread ptr <* <$MUIDUnifiedUndefTabEntry>> 1 (array 0 ptr <* <[3] <$MUIDUnifiedUndefTabEntry>>> (addrof ptr $__muid_data_undef_tab$$huaweitest_jar, constval i64 1))))
intrinsiccall MCCIncRef (dread ref %Reg0_R562)
intrinsiccall MCCDecRef (regread ptr %4)
#INSTIDX : 27||001b: ldc
callassigned &MCC_GetOrInsertLiteral (addrof ptr $_C_STR_4a20a28a5a71422be90661b1072ca29e) { dassign %L_STR_161337 0 }
intrinsiccall MCCIncRef (dread ptr %L_STR_161337)
intrinsiccall MCCDecRef (dread ref %Reg1_R43)
dassign %Reg1_R43 0 (dread ptr %L_STR_161337)
#INSTIDX : 29||001d: invokevirtual
icallassigned (
iread u64 <* u64> 0 (add ptr (
iread ptr <* <$__class_meta__>> 6 (iread ref <* <$Ljava_2Flang_2FObject_3B>> 1 (dread ref %Reg0_R562)),
constval u32 312)),
dread ref %Reg0_R562,
dread ref %Reg1_R43) {}
#INSTIDX : 32||0020: getstatic
intrinsiccall MPL_CLINIT_CHECK (addrof ptr $__cinf_Ljava_2Flang_2FSystem_3B)
regassign ptr %5 (dread ref %Reg0_R562)
#Read from: Ljava_2Flang_2FSystem_3B_7Cout
dassign %Reg0_R562 0 (iread ref <* <* <$Ljava_2Fio_2FPrintStream_3B>>> 0 (iread ptr <* <$MUIDUnifiedUndefTabEntry>> 1 (array 0 ptr <* <[3] <$MUIDUnifiedUndefTabEntry>>> (addrof ptr $__muid_data_undef_tab$$huaweitest_jar, constval i64 1))))
intrinsiccall MCCIncRef (dread ref %Reg0_R562)
intrinsiccall MCCDecRef (regread ptr %5)
#INSTIDX : 35||0023: ldc
callassigned &MCC_GetOrInsertLiteral (addrof ptr $_C_STR_e4c52869a15445bee0db530b07e9b98a) { dassign %L_STR_161338 0 }
intrinsiccall MCCIncRef (dread ptr %L_STR_161338)
intrinsiccall MCCDecRef (dread ref %Reg1_R43)
dassign %Reg1_R43 0 (dread ptr %L_STR_161338)
#INSTIDX : 37||0025: invokevirtual
icallassigned (
iread u64 <* u64> 0 (add ptr (
iread ptr <* <$__class_meta__>> 6 (iread ref <* <$Ljava_2Flang_2FObject_3B>> 1 (dread ref %Reg0_R562)),
constval u32 312)),
dread ref %Reg0_R562,
dread ref %Reg1_R43) {}
#INSTIDX : 40||0028: getstatic
intrinsiccall MPL_CLINIT_CHECK (addrof ptr $__cinf_Ljava_2Flang_2FSystem_3B)
regassign ptr %6 (dread ref %Reg0_R562)
#Read from: Ljava_2Flang_2FSystem_3B_7Cout
dassign %Reg0_R562 0 (iread ref <* <* <$Ljava_2Fio_2FPrintStream_3B>>> 0 (iread ptr <* <$MUIDUnifiedUndefTabEntry>> 1 (array 0 ptr <* <[3] <$MUIDUnifiedUndefTabEntry>>> (addrof ptr $__muid_data_undef_tab$$huaweitest_jar, constval i64 1))))
intrinsiccall MCCIncRef (dread ref %Reg0_R562)
intrinsiccall MCCDecRef (regread ptr %6)
#INSTIDX : 43||002b: ldc
callassigned &MCC_GetOrInsertLiteral (addrof ptr $_C_STR_58a8c3b0d70b92b64324906ef9c02c90) { dassign %L_STR_161339 0 }
intrinsiccall MCCIncRef (dread ptr %L_STR_161339)
intrinsiccall MCCDecRef (dread ref %Reg1_R43)
dassign %Reg1_R43 0 (dread ptr %L_STR_161339)
#INSTIDX : 45||002d: invokevirtual
icallassigned (
iread u64 <* u64> 0 (add ptr (
iread ptr <* <$__class_meta__>> 6 (iread ref <* <$Ljava_2Flang_2FObject_3B>> 1 (dread ref %Reg0_R562)),
constval u32 312)),
dread ref %Reg0_R562,
dread ref %Reg1_R43) {}
#INSTIDX : 48||0030: return
intrinsiccall MPL_CLEANUP_LOCALREFVARS (dread ref %Reg0_R562, dread ref %Reg1_R43)
return ()
}
|
# Scrapes turnout data for the Aug. 9, 2016 Minnesota primary election from the Minnesota Secretary of State's FTP file, processes that data and saves it.
# Requires countycode-fips converter.csv to be in the working directory. These should have been provided if you downloaded this entire folder.
# Also requires a username and password to access the Secretary of State's FTP server. This is not provided. It should be placed in a file called "sospw.txt" in the working directory; that file should be a single line with the format "username:password" with no quotes.
# Note that this will calculate turnout percentage as a share of registered voters. The Secretary of State's formal turnout percentages are based on total eligible voters in the state, but there's no available figure on the congressional district level.
# Will output three csv files: "mnturnout.csv", "cdturnout.csv" and "hdturnout.csv"
# Code by David H. Montgomery for the Pioneer Press.
library(RCurl)
library(tidyr)
library(plyr)
library(dplyr)
# Load the password from the file. You need a working password for this to work.
pw <- scan("sospw.txt", what = character(), quiet = TRUE)
# Read the precinct results file in from the FTP server.
turnout <- read.table(
textConnection(
getURL("ftp://ftp.sos.state.mn.us/20160809/pctstats.txt",userpwd = pw)),
sep=";",
quote = "\"", # Don't count apostrophes as strings, otherwise things like "Mary's Point" will break the data.
colClasses=c("factor","numeric","factor","factor",rep("numeric",8)) #Set classes for each column. Important to make precinct IDs show up as strings, so you get "0005" instead of "5".
)
colnames(turnout) <- c("State","CountyID","PrecinctID","PrecinctName ","HasReported","amReg","SameDayReg","Num.sigs","AbsenteeBallots","FederalAbsentees","PresAbsentees","TotalVoted") # Assign column names
turnout <- turnout[,-c(10:11)] # Drop some unneeded columns.
# Load a table that converts between the Secretary of State's county codes and formal FIPS codes.
fips <- read.csv("../countycode-fips converter.csv")
fips <- fips[,-3] # Drop an unnecessary column
colnames(fips) <- c("CountyID","County","FIPS") # Label columns
turnout <- merge(fips,turnout, by = "CountyID") # Merge with the voting data by County IDs
turnout$VTD <- paste0(turnout$FIPS,turnout$PrecinctID) # Combine the FIPs code and the Precinct IDs to create a nine-digit VTD code.
turnout$turnout.pct <- as.numeric(turnout$TotalVoted/(turnout[,7]+turnout[,8])) # Calculate percentage of voters divided by total registered, including those registered before Election Day and on it.
turnout$absentee.rate <- as.numeric(turnout$AbsenteeBallots/turnout$TotalVoted) # Calculate the percentage of voters who voted absentee.
# Load in a second table with details about each precinct, which we'll merge with our turnout results for a more readable and informative table. Same format as above.
precincts <- read.table(
textConnection(
getURL("ftp://ftp.sos.state.mn.us/20160809/PrctTbl.txt",userpwd = pw)),
sep=";",
quote = "\"",
colClasses=c("numeric",rep("factor",2),"numeric",rep("factor",6))
)
precincts <- precincts[,-c(6:10)] # Drop some unneeded columns
colnames(precincts) <- c("CountyID","PrecinctID","PrecinctName","CongressionalDistrict","LegislativeDistrict")
precincts <- merge(fips,precincts, by = "CountyID") # Label columns
precincts$VTD <- paste0(precincts$FIPS,precincts$PrecinctID) # Concatenate nine-digit VTD votes for the precinct data.
turnout <- merge(precincts,turnout[,c(7:15)], by = "VTD") # Merge the turnout data with the precinct data by VTDs.
write.csv(turnout,"mnturnout.csv", row.names=FALSE) # Save this turnout data to disk.
# Create a table showing turnout results by congressional district.
cdturnout <- turnout %>% group_by(CongressionalDistrict) %>% summarise(
votes = sum(TotalVoted), # Add a column for total votes.
RV = sum(amReg) + sum(SameDayReg), # Add a column for registered voters combining Election Day and pre-election registrations
Reporting = sum(HasReported)) # Add a column for the number of precincts reporting.
cdturnout$pct <- cdturnout$votes / cdturnout$RV # Calculate turnout percentage as a share of registered voters
# Repeat the same calculation but for state house district.
hdturnout <- turnout %>% group_by(LegislativeDistrict) %>% summarise(
votes = sum(TotalVoted),
RV = sum(amReg) + sum(SameDayReg))
hdturnout$pct <- hdturnout$votes / hdturnout$RV
# Write the congressional district and legislative district into spreadsheets
write.csv(cdturnout,"cdturnout.csv", row.names=FALSE)
write.csv(hdturnout,"hdturnout.csv", row.names=FALSE)
# Return some summary stats on the command line.
print(paste0("Precincts reporting: ",sum(turnout[,9]),"/",nrow(turnout)," (",round(sum(turnout[,9])/nrow(turnout),4),")"))
print(paste0("Total votes: ",sum(turnout$TotalVoted)))
print(paste0("Total registered: ",sum(turnout$amReg) + sum(turnout$SameDayReg)))
print(paste0("Percent of RVs voting: ",round(sum(turnout$TotalVoted)/(sum(turnout$amReg)+sum(turnout$SameDayReg)),4)))
print(paste0("Percent of eligibles voting: ",round(sum(turnout$TotalVoted)/3967061,4)))
|
theory MultiWorkerWithQueue
imports Main "../spec/SQS"
begin
datatype Node
= Worker nat
| Queue
| Acceptor
datatype 'val Message
= SQSRequest "'val SQSRequest"
| SQSResponse "'val SQSResponse"
| Accept 'val
record 'val WorkerState =
alive :: bool
process :: "(MessageId \<times> 'val) list"
record 'val State =
workers :: "nat \<rightharpoonup> 'val WorkerState"
accepted :: "'val set"
queue :: "(Node, 'val) SQSState"
datatype ('proc,'msg) Send = Send (msg_recipient: 'proc) (send_payload: 'msg)
datatype 'msg Event
= Received (msg_sender: Node) (received_message: 'msg)
| WorkerStep
| QueueStep
fun valid_event :: "(Node, 'val Message) Send Event \<Rightarrow> Node \<Rightarrow> (Node \<times> (Node, 'val Message) Send) set \<Rightarrow> bool" where
"valid_event (Received sender msg) _ msgs = ((sender, msg) \<in> msgs)"
| "valid_event WorkerStep _ _ = True"
| "valid_event _ _ _ = False"
type_synonym ('st, 'val) StepStateFunction = "'st \<Rightarrow> (Node, 'val Message) Send Event \<Rightarrow> ('st \<times> (Node, 'val Message) Send set)"
type_synonym 'val StepFunction = "Node \<Rightarrow> ('val State, 'val) StepStateFunction"
fun worker_step :: "('val WorkerState, 'val) StepStateFunction" where
"worker_step st WorkerStep = (if List.null (process st)
then (st, {Send Queue (SQSRequest (Receive 10))})
else (st \<lparr> process := tl (process st) \<rparr>, {Send Acceptor (Accept (snd (hd (process st)))), Send Queue (SQSRequest (Delete (fst (hd (process st)))))}))"
| "worker_step st (Received _ (Send _ (SQSResponse (Returned xs)))) = (st \<lparr> process := xs \<rparr>, {})"
| "worker_step st _ = (st, {})"
fun queue_step :: "((Node, 'val) SQSState, 'val) StepStateFunction" where
"queue_step st (Received proc (Send _ (SQSRequest req))) = (st \<lparr> request := Some (proc,req) \<rparr>, {})"
| "queue_step st QueueStep = (let st' = SQS_step st in (st', case response st' of None \<Rightarrow> {} | Some (proc,resp) \<Rightarrow> {Send proc (SQSResponse resp)}))"
| "queue_step st _ = (st, {})"
fun step :: "'val StepFunction" where
"step (Worker proc) st (Received target msg) =
(if Worker proc = target then (let (w, ms) = worker_step (the (workers st proc)) (Received target msg) in (st \<lparr> workers := workers st (proc \<mapsto> w) \<rparr>, ms)) else (st, {}))
"
| "step Acceptor st (Received _ (Send _ (Accept val))) = (st \<lparr> accepted := accepted st \<union> {val} \<rparr>, {})"
| "step _ st _ = (st, {})"
record 'val world =
world_state :: "'val State"
world_events :: "(Node \<times> (Node, 'val Message) Send Event) list"
world_messages :: "(Node \<times> (Node, 'val Message) Send) set"
inductive execute_step :: "'val StepFunction \<Rightarrow> 'val world \<Rightarrow> 'val world \<Rightarrow> bool" where
exec_step: "\<lbrakk> valid_event event proc msgs;
step' proc st event = (st', ns);
events' = events @ [(proc, event)];
msgs' = msgs \<union> (\<lambda>msg. (proc,msg)) ` ns
\<rbrakk> \<Longrightarrow> execute_step step' \<lparr> world_state = st, world_events = events, world_messages = msgs \<rparr> \<lparr> world_state = st', world_events = events', world_messages = msgs' \<rparr>"
definition execute where
"execute step' \<equiv> rtranclp (execute_step step')"
inductive execute_traced :: "'val StepFunction \<Rightarrow> 'val world \<Rightarrow> 'val world \<Rightarrow> 'val world list \<Rightarrow> bool" where
execute_traced_empty: "execute_traced step' w w []"
| execute_traced_cons: "\<lbrakk> execute_traced step' w1 w2 path; execute_step step' w2 w3 \<rbrakk> \<Longrightarrow> execute_traced step' w1 w3 (path @ [w2])"
lemma execute_induct: "execute step' w w' \<Longrightarrow> (\<And>w. P w w) \<Longrightarrow> (\<And>w1 w2 w3. execute step' w1 w2 \<Longrightarrow> P w1 w2 \<Longrightarrow> execute_step step' w2 w3 \<Longrightarrow> P w1 w3) \<Longrightarrow> P w w'"
apply (simp add: execute_def)
apply (erule rtranclp_induct)
apply auto
done
lemma execute_imps_trace:
assumes "execute step' w w'"
obtains path where "execute_traced step' w w' path"
apply (induct rule: execute_induct)
apply (rule assms)
using execute_traced_empty apply blast
using execute_traced_cons by blast
lemma trace_imps_execute:
assumes "execute_traced step' w w' path"
shows "execute step' w w'"
apply (rule execute_traced.induct)
apply (rule assms)
apply (simp add: execute_def)
by (simp add: execute_def)
lemma execute_traced_coherence_0: "\<lbrakk> execute_traced step w w' path; length path = 0 \<rbrakk> \<Longrightarrow> w = w'"
apply (induct rule: execute_traced.induct)
apply simp
apply simp
done
lemma execute_traced_coherence_Suc:
assumes "execute_traced step w w' path" "length path = Suc n"
obtains w0 path' where "path = path' @ [w0]" "execute_traced step w w0 path'" "execute_step step w0 w'" "length path' = n"
using assms
apply (induct arbitrary: rule: execute_traced.induct)
apply simp
apply simp
done
lemma step_accepted_monotonic: "step proc st event = (st',ns) \<Longrightarrow> accepted st \<subseteq> accepted st'"
apply (cases proc)
apply auto
proof-
fix x1 x
assume hyp: "step (Worker x1) st event = (st', ns)" "proc = Worker x1" "x \<in> accepted st"
have "step (Worker x1) st event = (st',ns) \<Longrightarrow> accepted st \<subseteq> accepted st'"
apply (cases event)
apply auto
proof-
fix x11 x12 x
assume "(if Worker x1 = x11 then let (w, y) = worker_step (the (workers st x1)) (Received x11 x12) in (st\<lparr>workers := workers st(x1 \<mapsto> w)\<rparr>, y) else (st, {})) = (st', ns)"
and "event = Received x11 x12" "x \<in> accepted st"
have "
(if Worker x1 = x11 then let (w, y) = worker_step (the (workers st x1)) (Received x11 x12) in (st\<lparr>workers := workers st(x1 \<mapsto> w)\<rparr>, y) else (st, {})) =
(st', ns) \<Longrightarrow>
event = Received x11 x12 \<Longrightarrow> x \<in> accepted st \<Longrightarrow> x \<in> accepted st'"
apply (cases x11)
apply auto
proof-
fix x1a
show "(if x1 = x1a then let (w, y) = worker_step (the (workers st x1)) (Received (Worker x1a) x12) in (st\<lparr>workers := workers st(x1 \<mapsto> w)\<rparr>, y) else (st, {})) =
(st', ns) \<Longrightarrow>
event = Received (Worker x1a) x12 \<Longrightarrow> x \<in> accepted st \<Longrightarrow> x11 = Worker x1a \<Longrightarrow> x \<in> accepted st'"
apply (cases "x1 = x1a")
apply auto
apply (cases "worker_step (the (workers st x1a)) (Received (Worker x1a) x12)")
apply auto
done
qed
show "x \<in> accepted st'"
by (simp add: \<open>(if Worker x1 = x11 then let (w, y) = worker_step (the (workers st x1)) (Received x11 x12) in (st\<lparr>workers := workers st(x1 \<mapsto> w)\<rparr>, y) else (st, {})) = (st', ns)\<close> \<open>\<lbrakk>(if Worker x1 = x11 then let (w, y) = worker_step (the (workers st x1)) (Received x11 x12) in (st\<lparr>workers := workers st(x1 \<mapsto> w)\<rparr>, y) else (st, {})) = (st', ns); event = Received x11 x12; x \<in> accepted st\<rbrakk> \<Longrightarrow> x \<in> accepted st'\<close> \<open>event = Received x11 x12\<close> \<open>x \<in> accepted st\<close>)
qed
show "x \<in> accepted st'"
using \<open>step (Worker x1) st event = (st', ns) \<Longrightarrow> accepted st \<subseteq> accepted st'\<close> hyp(1) hyp(3) by blast
next
fix x
show "step Acceptor st event = (st', ns) \<Longrightarrow> proc = Acceptor \<Longrightarrow> x \<in> accepted st \<Longrightarrow> x \<in> accepted st'"
apply (cases event)
apply auto
proof-
fix x11 x12
show "step Acceptor st (Received x11 x12) = (st', ns) \<Longrightarrow> proc = Acceptor \<Longrightarrow> x \<in> accepted st \<Longrightarrow> event = Received x11 x12 \<Longrightarrow> x \<in> accepted st'"
apply (cases x12)
apply (cases "send_payload x12")
apply auto
done
qed
qed
lemma accepted_step_monotonic: "execute_step step w w' \<Longrightarrow> accepted (world_state w) \<subseteq> accepted (world_state w')"
apply (erule execute_step.cases)
apply simp
using step_accepted_monotonic by blast
lemma accepted_monotonic:
fixes w :: "'val world"
shows "execute step w w' \<Longrightarrow> accepted (world_state w) \<subseteq> accepted (world_state w')"
proof-
assume "execute step w w'"
obtain path where "execute_traced step w w' path"
using \<open>execute step w w'\<close> execute_imps_trace by blast
{
fix n :: nat
have "\<lbrakk> length path = n; execute_traced step w w' path \<rbrakk> \<Longrightarrow> accepted (world_state w) \<subseteq> accepted (world_state w')"
apply (induct n arbitrary: path w w')
using execute_traced_coherence_0 apply blast
proof-
fix n :: nat and path :: "'val world list" and w w'
assume hyp: "(\<And>(path :: 'val world list) w w'. length path = n \<Longrightarrow> execute_traced step w w' path \<Longrightarrow> accepted (world_state w) \<subseteq> accepted (world_state w'))"
and "length path = Suc n" "execute_traced step w w' path"
obtain w0 :: "'val world" and path' where "path = path' @ [w0]" "execute_traced step w w0 path'" "execute_step step w0 w'" "length path' = n"
using \<open>execute_traced step w w' path\<close> \<open>length path = Suc n\<close> execute_traced_coherence_Suc by blast
have "accepted (world_state w) \<subseteq> accepted (world_state w0)"
using \<open>execute_traced step w w0 path'\<close> \<open>length path' = n\<close> hyp by auto
moreover have "accepted (world_state w0) \<subseteq> accepted (world_state w')"
by (simp add: \<open>execute_step step w0 w'\<close> accepted_step_monotonic)
ultimately show "accepted (world_state w) \<subseteq> accepted (world_state w')"
by simp
qed
}
thus ?thesis
using \<open>execute_traced step w w' path\<close> by blast
qed
definition initialState :: "nat \<Rightarrow> 'val State" where
"initialState W \<equiv> \<lparr> workers = map_of (map (\<lambda>i. (i, \<lparr> alive = True, process = [] \<rparr>)) (map nat [0..int W])), accepted = {}, queue = initialSQSState \<rparr>"
definition initialWorld :: "nat \<Rightarrow> 'val world" where
"initialWorld W \<equiv> \<lparr> world_state = initialState W, world_events = [], world_messages = {} \<rparr>"
lemma worker_step_wont_change_accepted:
"step (Worker w) st event = (st',ns) \<Longrightarrow> accepted st' = accepted st"
apply (cases event)
apply simp_all
proof-
fix x11 x12
show "(if Worker w = x11 then let (wa, y) = worker_step (the (workers st w)) (Received x11 x12) in (st\<lparr>workers := workers st(w \<mapsto> wa)\<rparr>, y) else (st, {})) =
(st', ns) \<Longrightarrow>
event = Received x11 x12 \<Longrightarrow> accepted st' = accepted st"
apply (cases "Worker w = x11")
apply simp_all
apply (cases "worker_step (the (workers st w)) (Received x11 x12)")
apply simp
apply (cases st, cases st')
apply simp
done
qed
lemma step_accepted_change_onlyif:
assumes "step proc st event = (st', ns)" "val \<in> accepted st' - accepted st"
obtains r s where "event = Received r (Send s (Accept val))"
using assms
apply (cases event)
apply simp_all
apply (cases proc)
apply simp_all
using assms(1) worker_step_wont_change_accepted apply blast
proof-
fix x11 x12
show "(\<And>r s. x11 = r \<and> x12 = Send.Send s (Accept val) \<Longrightarrow> thesis) \<Longrightarrow>
step Acceptor st (Received x11 x12) = (st', ns) \<Longrightarrow> val \<in> accepted st' \<and> val \<notin> accepted st \<Longrightarrow> event = Received x11 x12 \<Longrightarrow> proc = Acceptor \<Longrightarrow> thesis"
apply (cases x12)
apply (cases "send_payload x12")
apply auto
done
qed
lemma execute_step_accepted_change_onlyif:
assumes "execute_step step w w'" "val \<in> accepted (world_state w') - accepted (world_state w)"
obtains p r s where "world_events w' = world_events w @ [(p, Received r (Send s (Accept val)))]"
using assms
apply (cases rule: execute_step.cases)
apply simp
by (meson Diff_iff step_accepted_change_onlyif)
lemma execute_step_events_increasing_as_set:
assumes "execute_step step w w'"
shows "set (world_events w) \<subseteq> set (world_events w')"
using assms
apply (cases rule: execute_step.cases)
apply auto
done
lemma execute_step_backward_Accept:
assumes "execute step (initialWorld W) w'" "val \<in> accepted (world_state w')"
obtains p r s where "(p, Received r (Send s (Accept val))) \<in> set (world_events w')"
proof-
obtain path where "execute_traced step (initialWorld W) w' path"
using assms(1) execute_imps_trace by blast
{ fix n
have "\<lbrakk> length path = n; execute_traced step (initialWorld W) w' path; val \<in> accepted (world_state w') \<rbrakk> \<Longrightarrow> \<exists>p r s. (p, Received r (Send s (Accept val))) \<in> set (world_events w')"
apply (induct n arbitrary: path w')
proof-
fix path w'
assume "length path = 0"
and "execute_traced step (initialWorld W) w' path"
and "val \<in> accepted (world_state w')"
have "w' = initialWorld W"
using \<open>execute_traced step (initialWorld W) w' path\<close> \<open>length path = 0\<close> execute_traced_coherence_0 by fastforce
hence "accepted (world_state w') = {}"
by (simp add: initialWorld_def initialState_def)
show "\<exists>p r s. (p, Received r (Send.Send s (Accept val))) \<in> set (world_events w')"
using \<open>accepted (world_state w') = {}\<close> \<open>val \<in> accepted (world_state w')\<close> by blast
next
fix n path w'
assume "(\<And>path w'.
length path = n \<Longrightarrow>
execute_traced step (initialWorld W) w' path \<Longrightarrow>
val \<in> accepted (world_state w') \<Longrightarrow> \<exists>p r s. (p, Received r (Send.Send s (Accept val))) \<in> set (world_events w'))"
and "length path = Suc n" "execute_traced step (initialWorld W) w' path" "val \<in> accepted (world_state w')"
obtain w'' path' where "path = path' @ [w'']" "length path' = n" "execute_traced step (initialWorld W) w'' path'" "execute_step step w'' w'"
using \<open>execute_traced step (initialWorld W) w' path\<close> \<open>length path = Suc n\<close> execute_traced_coherence_Suc by blast
have "accepted (world_state w'') \<subseteq> accepted (world_state w')"
using \<open>execute_step step w'' w'\<close> accepted_step_monotonic by blast
have "val \<in> accepted (world_state w'') \<or> val \<in> accepted (world_state w') - accepted (world_state w'')"
by (simp add: \<open>val \<in> accepted (world_state w')\<close>)
moreover have "val \<in> accepted (world_state w'') \<Longrightarrow> \<exists>p r s. (p, Received r (Send.Send s (Accept val))) \<in> set (world_events w')"
proof-
have "val \<in> accepted (world_state w'') \<Longrightarrow> \<exists>p r s. (p, Received r (Send.Send s (Accept val))) \<in> set (world_events w'')"
using \<open>\<And>w' path. \<lbrakk>length path = n; execute_traced step (initialWorld W) w' path; val \<in> accepted (world_state w')\<rbrakk> \<Longrightarrow> \<exists>p r s. (p, Received r (Send.Send s (Accept val))) \<in> set (world_events w')\<close> \<open>execute_traced step (initialWorld W) w'' path'\<close> \<open>length path' = n\<close> by blast
moreover have "set (world_events w'') \<subseteq> set (world_events w')"
using \<open>execute_step step w'' w'\<close> execute_step_events_increasing_as_set by blast
ultimately show "val \<in> accepted (world_state w'') \<Longrightarrow> \<exists>p r s. (p, Received r (Send.Send s (Accept val))) \<in> set (world_events w')"
by blast
qed
moreover have "val \<in> accepted (world_state w') - accepted (world_state w'') \<Longrightarrow> \<exists>p r s. (p, Received r (Send.Send s (Accept val))) \<in> set (world_events w')"
proof-
assume "val \<in> accepted (world_state w') - accepted (world_state w'')"
have "execute_step step w'' w'"
by (simp add: \<open>execute_step step w'' w'\<close>)
obtain p r s where "world_events w' = world_events w'' @ [(p, Received r (Send s (Accept val)))]"
by (meson \<open>execute_step step w'' w'\<close> \<open>val \<in> accepted (world_state w') - accepted (world_state w'')\<close> execute_step_accepted_change_onlyif)
show ?thesis
using \<open>world_events w' = world_events w'' @ [(p, Received r (Send.Send s (Accept val)))]\<close> by auto
qed
ultimately show "\<exists>p r s. (p, Received r (Send.Send s (Accept val))) \<in> set (world_events w')"
by blast
qed
}
thus ?thesis
using \<open>execute_traced step (initialWorld W) w' path\<close> assms(2) that by blast
qed
(*
lemma exists_consume_queue_tasks_execution:
obtains w' df xs where
"execute step w w'"
"world_events w' = world_events w @ df"
"map snd df = Received (Worker 0) (SQSRequest (Receive 1)) # Received Queue (Send (Worker 0) (SQSResponse (Returned xs))) # []"
*)
theorem liveness:
assumes "execute step (initialWorld W) w"
obtains w' where "execute step w w'" "ran (messages (SQS.queue (queue (world_state w')))) = {}" "accepted (world_state w') = {0..W}"
sorry
end
|
(* Property from Productive Use of Failure in Inductive Proof,
Andrew Ireland and Alan Bundy, JAR 1996.
This Isabelle theory is produced using the TIP tool offered at the following website:
https://github.com/tip-org/tools
This file was originally provided as part of TIP benchmark at the following website:
https://github.com/tip-org/benchmarks
Yutaka Nagashima at CIIRC, CTU changed the TIP output theory file slightly
to make it compatible with Isabelle2017.
Some proofs were added by Yutaka Nagashima.*)
theory TIP_prop_40
imports "../../Test_Base"
begin
datatype 'a list = nil2 | cons2 "'a" "'a list"
datatype Nat = Z | S "Nat"
fun z :: "bool => bool => bool" where
"z True y2 = y2"
| "z False y2 = False"
fun y :: "Nat => Nat => bool" where
"y (Z) (Z) = True"
| "y (Z) (S z2) = False"
| "y (S x22) (Z) = False"
| "y (S x22) (S y22) = y x22 y22"
fun x :: "bool => bool => bool" where
"x True y2 = True"
| "x False y2 = y2"
fun elem :: "Nat => Nat list => bool" where
"elem x2 (nil2) = False"
| "elem x2 (cons2 z2 xs) = x (y x2 z2) (elem x2 xs)"
fun subset :: "Nat list => Nat list => bool" where
"subset (nil2) y2 = True"
| "subset (cons2 z2 xs) y2 = z (elem z2 y2) (subset xs y2)"
fun union :: "Nat list => Nat list => Nat list" where
"union (nil2) y2 = y2"
| "union (cons2 z2 xs) y2 =
(if elem z2 y2 then union xs y2 else cons2 z2 (union xs y2))"
theorem property0 :
"((subset x2 y2) ==> ((union x2 y2) = y2))"
oops
end
|
{-
This type ℕ₋₂ was originally used as the index to n-truncation in order to
be consistent with the notation in the HoTT book. However, ℕ was already
being used as an analogous index in Foundations.HLevels, and it became
clear that having two different indexing schemes for truncation levels was
very inconvenient. In the end, having slightly nicer notation was not worth
the hassle of having to use this type everywhere where truncation levels
were needed. So for this library, use the type `HLevel = ℕ` instead.
See the discussions below for more context:
- https://github.com/agda/cubical/issues/266
- https://github.com/agda/cubical/pull/238
-}
{-# OPTIONS --cubical --no-import-sorts --safe #-}
module Cubical.Experiments.NatMinusTwo where
open import Cubical.Experiments.NatMinusTwo.Base public
open import Cubical.Experiments.NatMinusTwo.Properties public
open import Cubical.Experiments.NatMinusTwo.ToNatMinusOne using (1+_; ℕ₋₁→ℕ₋₂; -1+Path) public
|
If $0 \leq x \leq 1$, then the sequence $x^n$ is monotone. |
!Boilerplate:
Drive = 'O'
*DIM,BaseFolder,STRING,200
BaseFolder(1) = '%Drive%:\Afstuderen\Ansys\_AnsysModel'
*DIM, Scriptfolder, STRING, 200
Scriptfolder(1) = '%BaseFolder(1)%\Scripts\Shared'
!Load global folder and filenames:
*USE, '%Scriptfolder(1)%/LoadGlobals.MAC'
!Creater rail and gather cross sectional properties:
*USE, 'O:\Afstuderen\Ansys\_AnsysModel\Geometries\Rail_54E1\CreateRail.MAC'
ASUM !https://www.mm.bme.hu/~gyebro/files/ans_help_v182/ans_cmd/Hlp_C_ASUM.html |
(* Title: HOL/Auth/n_german_lemma_on_inv__37.thy
Author: Yongjian Li and Kaiqiang Duan, State Key Lab of Computer Science, Institute of Software, Chinese Academy of Sciences
Copyright 2016 State Key Lab of Computer Science, Institute of Software, Chinese Academy of Sciences
*)
header{*The n_german Protocol Case Study*}
theory n_german_lemma_on_inv__37 imports n_german_base
begin
section{*All lemmas on causal relation between inv__37 and some rule r*}
lemma n_RecvReqVsinv__37:
assumes a1: "(\<exists> i. i\<le>N\<and>r=n_RecvReq N i)" and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__37 p__Inv4)"
shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s")
proof -
from a1 obtain i where a1:"i\<le>N\<and>r=n_RecvReq N i" apply fastforce done
from a2 obtain p__Inv4 where a2:"p__Inv4\<le>N\<and>f=inv__37 p__Inv4" apply fastforce done
have "(i=p__Inv4)\<or>(i~=p__Inv4)" apply (cut_tac a1 a2, auto) done
moreover {
assume b1: "(i=p__Inv4)"
have "?P1 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
moreover {
assume b1: "(i~=p__Inv4)"
have "?P1 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
ultimately show "invHoldForRule s f r (invariants N)" by satx
qed
lemma n_SendInvEVsinv__37:
assumes a1: "(\<exists> i. i\<le>N\<and>r=n_SendInvE i)" and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__37 p__Inv4)"
shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s")
proof -
from a1 obtain i where a1:"i\<le>N\<and>r=n_SendInvE i" apply fastforce done
from a2 obtain p__Inv4 where a2:"p__Inv4\<le>N\<and>f=inv__37 p__Inv4" apply fastforce done
have "(i=p__Inv4)\<or>(i~=p__Inv4)" apply (cut_tac a1 a2, auto) done
moreover {
assume b1: "(i=p__Inv4)"
have "?P1 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
moreover {
assume b1: "(i~=p__Inv4)"
have "?P2 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
ultimately show "invHoldForRule s f r (invariants N)" by satx
qed
lemma n_SendInvSVsinv__37:
assumes a1: "(\<exists> i. i\<le>N\<and>r=n_SendInvS i)" and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__37 p__Inv4)"
shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s")
proof -
from a1 obtain i where a1:"i\<le>N\<and>r=n_SendInvS i" apply fastforce done
from a2 obtain p__Inv4 where a2:"p__Inv4\<le>N\<and>f=inv__37 p__Inv4" apply fastforce done
have "(i=p__Inv4)\<or>(i~=p__Inv4)" apply (cut_tac a1 a2, auto) done
moreover {
assume b1: "(i=p__Inv4)"
have "?P1 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
moreover {
assume b1: "(i~=p__Inv4)"
have "?P2 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
ultimately show "invHoldForRule s f r (invariants N)" by satx
qed
lemma n_SendInvAckVsinv__37:
assumes a1: "(\<exists> i. i\<le>N\<and>r=n_SendInvAck i)" and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__37 p__Inv4)"
shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s")
proof -
from a1 obtain i where a1:"i\<le>N\<and>r=n_SendInvAck i" apply fastforce done
from a2 obtain p__Inv4 where a2:"p__Inv4\<le>N\<and>f=inv__37 p__Inv4" apply fastforce done
have "(i=p__Inv4)\<or>(i~=p__Inv4)" apply (cut_tac a1 a2, auto) done
moreover {
assume b1: "(i=p__Inv4)"
have "?P1 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
moreover {
assume b1: "(i~=p__Inv4)"
have "?P2 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
ultimately show "invHoldForRule s f r (invariants N)" by satx
qed
lemma n_SendGntSVsinv__37:
assumes a1: "(\<exists> i. i\<le>N\<and>r=n_SendGntS i)" and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__37 p__Inv4)"
shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s")
proof -
from a1 obtain i where a1:"i\<le>N\<and>r=n_SendGntS i" apply fastforce done
from a2 obtain p__Inv4 where a2:"p__Inv4\<le>N\<and>f=inv__37 p__Inv4" apply fastforce done
have "(i=p__Inv4)\<or>(i~=p__Inv4)" apply (cut_tac a1 a2, auto) done
moreover {
assume b1: "(i=p__Inv4)"
have "?P1 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
moreover {
assume b1: "(i~=p__Inv4)"
have "?P3 s"
apply (cut_tac a1 a2 b1, simp, rule_tac x="(neg (andForm (andForm (eqn (IVar (Field (Para (Ident ''Chan2'') p__Inv4) ''Cmd'')) (Const Inv)) (eqn (IVar (Ident ''CurCmd'')) (Const ReqS))) (eqn (IVar (Ident ''ExGntd'')) (Const false))))" in exI, auto) done
then have "invHoldForRule s f r (invariants N)" by auto
}
ultimately show "invHoldForRule s f r (invariants N)" by satx
qed
lemma n_SendGntEVsinv__37:
assumes a1: "(\<exists> i. i\<le>N\<and>r=n_SendGntE N i)" and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__37 p__Inv4)"
shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s")
proof -
from a1 obtain i where a1:"i\<le>N\<and>r=n_SendGntE N i" apply fastforce done
from a2 obtain p__Inv4 where a2:"p__Inv4\<le>N\<and>f=inv__37 p__Inv4" apply fastforce done
have "(i=p__Inv4)\<or>(i~=p__Inv4)" apply (cut_tac a1 a2, auto) done
moreover {
assume b1: "(i=p__Inv4)"
have "?P1 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
moreover {
assume b1: "(i~=p__Inv4)"
have "?P3 s"
apply (cut_tac a1 a2 b1, simp, rule_tac x="(neg (andForm (eqn (IVar (Field (Para (Ident ''Chan2'') p__Inv4) ''Cmd'')) (Const Inv)) (eqn (IVar (Para (Ident ''ShrSet'') p__Inv4)) (Const false))))" in exI, auto) done
then have "invHoldForRule s f r (invariants N)" by auto
}
ultimately show "invHoldForRule s f r (invariants N)" by satx
qed
lemma n_RecvGntSVsinv__37:
assumes a1: "(\<exists> i. i\<le>N\<and>r=n_RecvGntS i)" and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__37 p__Inv4)"
shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s")
proof -
from a1 obtain i where a1:"i\<le>N\<and>r=n_RecvGntS i" apply fastforce done
from a2 obtain p__Inv4 where a2:"p__Inv4\<le>N\<and>f=inv__37 p__Inv4" apply fastforce done
have "(i=p__Inv4)\<or>(i~=p__Inv4)" apply (cut_tac a1 a2, auto) done
moreover {
assume b1: "(i=p__Inv4)"
have "?P1 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
moreover {
assume b1: "(i~=p__Inv4)"
have "?P2 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
ultimately show "invHoldForRule s f r (invariants N)" by satx
qed
lemma n_RecvGntEVsinv__37:
assumes a1: "(\<exists> i. i\<le>N\<and>r=n_RecvGntE i)" and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__37 p__Inv4)"
shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s")
proof -
from a1 obtain i where a1:"i\<le>N\<and>r=n_RecvGntE i" apply fastforce done
from a2 obtain p__Inv4 where a2:"p__Inv4\<le>N\<and>f=inv__37 p__Inv4" apply fastforce done
have "(i=p__Inv4)\<or>(i~=p__Inv4)" apply (cut_tac a1 a2, auto) done
moreover {
assume b1: "(i=p__Inv4)"
have "?P1 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
moreover {
assume b1: "(i~=p__Inv4)"
have "?P2 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
ultimately show "invHoldForRule s f r (invariants N)" by satx
qed
lemma n_StoreVsinv__37:
assumes a1: "\<exists> i d. i\<le>N\<and>d\<le>N\<and>r=n_Store i d" and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__37 p__Inv4)"
shows "invHoldForRule s f r (invariants N)"
apply (rule noEffectOnRule, cut_tac a1 a2, auto) done
lemma n_SendReqESVsinv__37:
assumes a1: "\<exists> i. i\<le>N\<and>r=n_SendReqES i" and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__37 p__Inv4)"
shows "invHoldForRule s f r (invariants N)"
apply (rule noEffectOnRule, cut_tac a1 a2, auto) done
lemma n_RecvInvAckVsinv__37:
assumes a1: "\<exists> i. i\<le>N\<and>r=n_RecvInvAck i" and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__37 p__Inv4)"
shows "invHoldForRule s f r (invariants N)"
apply (rule noEffectOnRule, cut_tac a1 a2, auto) done
lemma n_SendReqSVsinv__37:
assumes a1: "\<exists> j. j\<le>N\<and>r=n_SendReqS j" and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__37 p__Inv4)"
shows "invHoldForRule s f r (invariants N)"
apply (rule noEffectOnRule, cut_tac a1 a2, auto) done
lemma n_SendReqEIVsinv__37:
assumes a1: "\<exists> i. i\<le>N\<and>r=n_SendReqEI i" and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__37 p__Inv4)"
shows "invHoldForRule s f r (invariants N)"
apply (rule noEffectOnRule, cut_tac a1 a2, auto) done
end
|
{-# OPTIONS --cubical --safe #-}
module Categories.Functor where
open import Prelude
open import Categories
record Functor {ℓ₁ ℓ₂ ℓ₃ ℓ₄} (C : PreCategory ℓ₁ ℓ₂) (D : PreCategory ℓ₃ ℓ₄) : Type (ℓ₁ ℓ⊔ ℓ₂ ℓ⊔ ℓ₃ ℓ⊔ ℓ₄) where
private module C = PreCategory C
private module D = PreCategory D
field
F₀ : C.Ob → D.Ob
F₁ : ∀ {X Y} → (X C.⟶ Y) → (F₀ X D.⟶ F₀ Y)
identity : ∀ {X} → F₁ (C.Id {X}) ≡ D.Id
homomorphism : ∀ {X Y Z} → (f : X C.⟶ Y) (g : Y C.⟶ Z) →
F₁ (g C.· f) ≡ F₁ g D.· F₁ f
|
%
% documentation-en.tex
% Copyright 2018 HEC Montréal
%
% This work may be distributed and/or modified under the
% conditions of the LaTeX Project Public License, either version 1.3c
% of this license or (at your option) any later version.
% The latest version of this license is in
% https://www.latex-project.org/lppl/lppl-1-3c.txt
% and version 1.3c or later is part of all distributions of LaTeX
% version 2008/05/04 or later.
%
% This work has the LPPL maintenance status `maintained'.
%
% The Current Maintainer of this work is Benoit Hamel
% <[email protected]>.
%
% This work consists of the files actuecon.cls, template-en.tex,
% template-fr.tex, documentation-en.tex, documentation-fr.tex
% and the derived files documentation-en.pdf and documentation-fr.pdf.
%
\documentclass[english]{article}
\usepackage[utf8]{inputenc}
\usepackage[T1]{fontenc}
\usepackage{natbib}
\usepackage{babel}
\usepackage{xcolor}
\usepackage{fontawesome5}
\usepackage{enumitem}
\usepackage{metalogo}
\usepackage{framed}
\usepackage{changes}
\usepackage{hyperref}
\setlength{\parskip}{1ex}
\definecolor{shadecolor}{rgb}{0.93,0.97,0.99}
\hypersetup{%
breaklinks=true,%
colorlinks=true,%
allcolors=blue
}
\newlist{repertoires}{itemize}{2}
\setlist[repertoires]{label=\faIcon[regular]{folder-open}~}
\newlist{fichiers}{itemize}{1}
\setlist[fichiers]{label=\faIcon[regular]{file}~}
\newcommand{\cmd}[1]{%
\texttt{\textbackslash#1\{\}}
}
\newcommand{\dec}[1]{%
\texttt{\textbackslash#1}
}
\newcommand{\lien}[2]{%
\href{#1}{#2 \faIcon{external-link-alt}}
}
\title{Journal Article Template for \emph{L'Actualité économique}}
\author{Benoit Hamel \\ Library, HEC Montréal}
\date{\today}
\definechangesauthor[color=red, name={Benoit Hamel}]{BH}
\begin{document}
\maketitle
\begin{abstract}
The goal of this documentation is to explain the package's features to those who will use these
templates. It is divided in two parts, depending on a person's role in the writing process.
\end{abstract}
\tableofcontents
\section{For authors}
\label{sec:auteurs}
\subsection{Files included in the package}
On opening the package's archive file, you'll find the following structure:
\begin{repertoires}
\item \textbf{actuecon}
\begin{repertoires}
\item \textbf{doc}: documentation directory
\item \textbf{img}: graphics and images directory
\end{repertoires}
\begin{fichiers}
\item \textbf{actuecon.cls}: document class file
\item \textbf{bibliographie.bib}: references file
\item \textbf{econometrica.bst}: english bibliography style
\item \textbf{econometrica-fr.bst}: french bibliography style
\item \textbf{template-en.tex}: english template file
\item \textbf{template-fr.tex}: french template file
\end{fichiers}
\end{repertoires}
Using this package is pretty straightforward:
\begin{itemize}
\item You write your article in your language's template file
(\textbf{template-en.tex} or \textbf{template-fr.tex});
\item Your write your bibliographic references in \textbf{Bib\TeX} format in the
\textbf{bibliographie.bib} file;
\item You save all your graphics and images in the \textbf{img/} directory.
\end{itemize}
\subsection{How to use the template file}
This section explains the template file's usage, from the first to (almost) the last line.
\subsubsection{The document class}
The template file uses the \textbf{actuecon} document class, as you can see it in the
\cmd{documentclass} command:
\begin{shaded*}
\verb|\documentclass[10pt,twoside,fleqn,french,english]{actuecon}|
\end{shaded*}
This class, based on the \textbf{article} class, was built to fully comply with the journal's
presentation standards. \textbf{You must not modify the options} in the \cmd{documentclass}
command nor in the \textbf{actuecon.cls} document class file.
\subsubsection{The class' required packages}
In the template's preamble, you will find the list of all required packages needed to use
the \textbf{actuecon} document class and the templates. These packages are
\textbf{already loaded} in the class file. There is no need to load them in the template file.
When using packages for your article's purpose, please make sure that there is no
compatibility issues with the class' packages.
\subsubsection{Article metadata}
The only metadata that you'll need to enter in the template file are the title
(\cmd{AEtitre} command), the authors' list (\cmd{author} command)
and their affiliation to an institution (\cmd{affil} command).
You can add as many authors and affiliations as is necessary, as long as you alternate
between \cmd{author} and \cmd{affil} commands.
All the other article's metadata (volume, issue number, publication date and revisers) have
to be entered by the journal's staff.
\subsubsection{The article's body}
The article's different sections are clearly defined using comments. Write your article
while following these guidelines:
\begin{itemize}
\item Write your french and english \textbf{abstracts} in the \texttt{AEresume} and
\texttt{AEabstract} environments, making sure not to leave blank lines between
the \cmd{begin} and \cmd{end} commands and your abstract's text.
\item Write your \textbf{introduction}, your \textbf{main content}, your \textbf{conclusion}
and your appendices in their respective section,
making sure not to delete the \dec{AEintroduction},
\dec{AEsectionsDeveloppement}, \dec{AEconclusion}, \dec{AEannexe} and \dec{AEbibliographie} commands.
\item In your article's sectioning hierarchy, do not go beyond the subsubsection.
\end{itemize}
\subsubsection{Bibliography}
The package comes with two bibliography style files : \textbf{econometrica.bst} and
\textbf{econometrica-fr.bst}. These correspond to the english and french versions
of the bibliography style used by the journal for references. You have to use either one
of them, depending of the default language of your article.
\subsection{Editorial details to consider}
\subsubsection{Greek letters in math mode}
According to the presentation standards written by HEC Montréal's
\emph{Studio de design graphique}, greek letters must be written upright, not in italics. To comply
with this standard, the \textbf{actuecon} document class uses the
\lien{http://mirrors.ctan.org/macros/latex/contrib/was/upgreek.pdf}{upgreek} package. Please read
the package's documentation in order to replace the traditional greek letter commands by \textbf{upgreek}'s
commands.
\section{For the journal's staff}
\label{sec:personnel}
This section explains what the journal staff has to do in order to work on an author's draft.
\subsection{The document class}
An ``editorial'' choice was made to separate the content from the container in the journal's
article templates. Probabilities being high that an author will add his own packages, commands
and environments, it was important to separate his customizations from the journal's in order
to preserve the article's layout.
This is why the \textbf{actuecon} document class was built : all of the article's layout is there.
An author will only have to provide his template and references files, as well as his \texttt{img} folder
as source code. You will be able to compile an article with a ``vanilla'' version of the class, and as such,
you will make sure that the article meets all the presentation standards.
\subsection{Article metadata}
The draft's author having already entered the article's title, authors' list and affiliations, all you have
to enter is the metadata related to the journal issue number.
In the template's preamble, you'll find the \cmd{AEvolume} and \cmd{AEnumero} commands in which you must enter
the volume and issue number of the journal to be published.
The \cmd{AEdateParution} is used to enter the journal's \textbf{publication date}. It doesn't
require a particular date format. It is just a plain text ``field'' in which you can enter a date as you see fit
(``march-june 2017'', for example). As per the presentation standards written by the
\emph{Studio de design graphique}, you \textbf{must write the publication date's month in lowercase}.
The \cmd{date} command \textbf{must always stay empty} if you don't wan't another date to show up
in the title section -- try it out by inserting a date in the command to see what it does if you don't
leave it blank.
In the \textbf{document body}, you must enter the article's \textbf{first page} number in the
\cmd{setcounter} command, as you can see it in the following example.
\begin{shaded*}
\begin{verbatim}
% Article beginning at page 3.
\setcounter{page}{3}
\end{verbatim}
\end{shaded*}
This is necessary due to the fact that it is pretty difficult to merge many \LaTeX\ documents written
by as many authors into one document without running into compatibility issues. At the end of the
revision process, each document must be recompiled with its definitive first page number.
\subsection{Draft revision}
Revisers must first enter their name and initials in the \cmd{AEreviseur} command found in the draft's
preamble, as you can see it in the following example. There can be as many revisers as necessary as
long as there is a \cmd{AEreviseur} command for each one.
\begin{shaded*}
\verb|\AEreviseur{Benoit Hamel}{BH}|
\end{shaded*}
The revision process is made possible with the \LaTeX\ \emph{changes} package. It was made to
copy Microsoft Word's behaviour in document revision. You can read the package's
\lien{http://mirrors.ctan.org/macros/latex/contrib/changes/changes.english.pdf}{whole documentation}
if you wish.
In this documentation, we'll only look at three commands:
\begin{itemize}
\item \dec{added} to add text;
\item \dec{deleted} to delete text;
\item \dec{replaced} to replace text.
\end{itemize}
\subsubsection{Adding text}
The \cmd{added} command has the following syntax:
\begin{shaded*}
\verb|\added[id=<initials>, remark=<remark>]{new text}|
\end{shaded*}
Reviser initials and the remark are optional. However, initials are useful for a reviser's
identification in the case where there are more than one -- a draft's author can himself
be a ``reviser''. Remarks must be short. The text to be added must be written between the
curly braces, like in the following example.
\begin{shaded*}
\begin{verbatim}
Ad aliquet amet commodo convallis dictum dignissim eu facilisis
faucibus fermentum hendrerit himenaeos inceptos massa ornare
purus quis risus sapien senectus taciti tempor torquent turpis
ultrices varius velit vestibulum.
\added[id=BH]{Alea jacta est!}
\end{verbatim}
\end{shaded*}
\begin{leftbar}
Ad aliquet amet commodo convallis dictum dignissim eu facilisis
faucibus fermentum hendrerit himenaeos inceptos massa ornare
purus quis risus sapien senectus taciti tempor torquent turpis
ultrices varius velit vestibulum.
\added[id=BH]{Alea jacta est!}
\end{leftbar}
\subsubsection{Deleting text}
The \cmd{deleted} command has the following syntax:
\begin{shaded*}
\begin{verbatim}
\deleted[id=<initials>, remark=<remark>]{deleted text}
\end{verbatim}
\end{shaded*}
For the command to work properly, you must cut and paste the text that will be deleted
in the curly braces, like in the following example.
\begin{shaded*}
\begin{verbatim}
The \deleted[id=BH]{failing} @nytimes set \deleted[id=BH]{%
liddle'} Bob Corker up by recording his conversation.
Was \deleted[id=BH]{made to sound} a fool and that's what
I am dealing with!
\end{verbatim}
\end{shaded*}
\begin{leftbar}
The \deleted[id=BH]{failing} @nytimes set \deleted[id=BH]{liddle'} Bob Corker
up by recording his conversation.
Was \deleted[id=BH]{made to sound} a fool and that's what
I am dealing with!
\end{leftbar}
\subsubsection{Replacing text}
The \cmd{replaced} command has the following syntax:
\begin{shaded*}
\begin{verbatim}
\replaced[id=<initials>, remark=<remark>]{new text}{%
old text}
\end{verbatim}
\end{shaded*}
Like the \cmd{deleted} command, you must cut and paste the text to be replaced in
the second pair of curly braces and write the new text in the first pair. The
following example shows how to use the command.
\begin{shaded*}
\begin{verbatim}
Democrat congresswoman totally \replaced[id=BH]{quoted}{%
fabricated} what I said to the wife of a soldier who died
in action (and \replaced[id=BH]{she has}{I have} proof).
\end{verbatim}
\end{shaded*}
\begin{leftbar}
Democrat congresswoman totally \replaced[id=BH]{quoted}{%
fabricated} what I said to the wife of a soldier who died
in action (and \replaced[id=BH]{she has}{I have} proof).
\end{leftbar}
\subsection{Editorial guidelines to consider}
The following editorial guidelines are imposed by the
\emph{Studio de design graphique}. You have to follow these guidelines
in order to accelerate the publication process.
\subsubsection{Article's page headers}
An article's title cannot be longer than one line in the pages' headers.
When an article's title is too long, you will have to truncate the
\textbf{short title argument} of the \cmd{AEtitre} command so it fits in
one line.
\subsubsection{Block equations inside a sentence}
It can happen that an author writes a block equation (meaning inside of an environment) in the
middle of a sentence. Doing so, it isolates the equation and splits the paragraph and the
sentence in two parts.
\LaTeX\ will consider that a new paragraph starts right after the equation and will indent
that ``new'' paragraph's first line. \textbf{This line cannot be indented}. To remove the
indentation, just insert the \dec{noindent} command before the line.
\subsubsection{Font case for an article's authors' last names}
An article's authors' last names must be typed in uppercase in the article title block. If authors
have written their last name with only the first letter in uppercase, you must manually retype
the last names in uppercase.
\subsubsection{Proper nouns hyphenation in citations}
When a citation is inserted in the text, proper nouns cannot be hyphenated at the end of a
line. If you see such a case, you have to embed the citation command in the
\cmd{nohyphens} command provided by the \textbf{hyphenat} package, which is preloaded in the
class file. The citation will then be reformatted without hyphenation.
\subsubsection{Font for URL links}
Some authors will use the \cmd{url} command to insert URL links in their document. This command
types links with a \texttt{fixed width} font. URL links must be typed in the same font as the rest
of the document, so you'll have to change all \cmd{url} commands for the \verb|\href{URL}{text}|
command.
\end{document} |
function [error_M,error_A,P] = compare_2_endmembers(M1,M2,A1,A2,m,n,...
names,wl,show_figure,options)
%COMPARE_ENDMEMBERS Summary of this function goes here
% Detailed explanation goes here
if nargin < 9
show_figure = 0;
end
if nargin < 10
options = [];
end
A1 = double(A1);
% use_uncertainty = 0;
var_dirs = [];
var_amts = [];
permute_criterion = 'auto';
if isstruct(options)
arg_set = fieldnames(options);
for i = 1:length(arg_set)
eval([arg_set{i},'=options.',arg_set{i},';']);
end
end
use_uncertainty = ~isempty(var_amts);
if isempty(M1) || isempty(A1) || size(A1,2) < size(A2,2)
show_endmembers(M2,wl,names);
show_abundances(A2,m,n);
if use_uncertainty
show_uncertainty_range([], M2, var_amts, var_dirs, wl, names, options);
end
error_M = [];
error_A = [];
P = eye(size(M2,1));
return;
end
[M,B] = size(M1);
is_real_dataset = 0;
if length(unique(A1(:))) == 2
is_real_dataset = 1;
end
if strcmp(permute_criterion, 'auto')
if is_real_dataset
permute_criterion = 'abundance';
else
permute_criterion = 'endmember';
end
end
%% Permute the endmembers and abundances from the algorithm to accord with
% the ground truth ones.
if strcmp(permute_criterion, 'abundance')
P = permute_abundances(A1,A2);
M2_1 = P*M2;
elseif strcmp(permute_criterion, 'endmember')
[P,M2_1] = permute_endmembers(M1,M2);
end
A2_1 = (P*A2')';
if use_uncertainty
var_amts = P*var_amts;
var_dirs = P*var_dirs;
end
%% Plot 2 groups of endmembers and show uncertainty range if possible
if show_figure
show_uncertainty_range(M1, M2_1, var_amts, var_dirs, wl, names, options);
end
%% Calculate the error
error_A = calc_abundance_error(A1,A2_1);
error_M = nanmean(sqrt(nanmean(abs(M1-M2_1).^2, 2)));
if show_figure
disp(['Error for A: ',num2str(error_A)]);
disp(['Error for M: ',num2str(error_M)]);
end
if show_figure
show_abundances(A2_1,m,n);
end
%% If size(A1,2) < size(A2,2), make the permutation matrix P square
if size(P,1) < size(P,2)
missings = [];
for i = 1:size(P,2)
if all(P(:,i) == 0)
missings = [missings,i];
end
end
new_P1 = zeros(length(missings), size(P,2));
for i = 1:length(missings)
new_P1(i, missings(i)) = 1;
end
P = [P;new_P1];
end
|
# Discrete Choice
Discrete choice models are models that model a single (mutually exclusive) choice, in contrast to the standard models where a quantity is estimated.
In this notebook we will try to get you familiarized with discrete choice, the difference between logit and probit, and how to implement them (and more advanced models), using the module [statsmodels](https://www.statsmodels.org/stable/index.html).
### Linear Regression
As a lazy student we want to study as little as possible, but still pass the final test. Let's pretend we have a dataset of last year's students, with the hours they studied for the test, and whether or not they passed. From this dataset we can make an estimation how many hours we have to study ourselves to pass. If we would try a linear expression approach we would try to fit the function:
\begin{equation}
Y = \beta_0 + \beta_1 X_1
\end{equation}
where $Y$ equals the chance of passing, $\beta_0$ the base chance of passing, $\beta_1$ the increase of chance of passing per hour we study, and $X_1$ the hours a student studied.
First: We install statsmodels and test if we can import it.
```python
from IPython.display import clear_output
!pip install statsmodels
import statsmodels
clear_output()
print("Everything A-Okay!")
```
Everything A-Okay!
```python
%matplotlib inline
import matplotlib.pyplot as plt
import statsmodels.api as sm
import numpy as np
# the hours each student studied for the test, and whether they passed or failed
students = {'hours': [0, 2, 3, 4, 4.5, 4.5, 5, 6, 6.5, 7, 8, 8, 9, 9.5, 10, 10.5, 12, 13.5],
'passed': [False, False, False, False, False, False, False, False, False, True, True, True, False, True, True, False, True, True]}
# use ordinary least squares (OLS) to fit our function Y
intercept, slope = sm.OLS(students['passed'],
sm.add_constant(students['hours'])).fit().params
# plot the results of the students
plt.scatter(students['hours'], students['passed'])
plt.xlabel('hours studied'); plt.ylabel('pass/fail')
# plot the results of the fit
x_vals = np.array(plt.gca().get_xlim())
y_vals = intercept + slope * x_vals
plt.plot(x_vals, y_vals, '--')
# set proper axes
plt.xlim([-1, 14]); plt.ylim([-0.1, 1.1])
plt.show()
```
We fitted our $Y$ function with a simple linear square approach, by using the method [sm.OLS](https://www.statsmodels.org/dev/generated/statsmodels.regression.linear_model.OLS.html) from statsmodels. Its first argument is the $Y$ value we try to fit, and the second argument are the $\beta$ values we try to fit. Note that we have to add a constant value (`sm.add_constant`) if we want a $\beta_0$ value
The obvious problem with the linear regression approach is that we try to model the chance of pass (or failure), but our model can give values outside of the range (0, 1). If a student did not study the model gives a chance lower than 0% of succes, and after more than 14 hours of study the chance of passing is higher than a 100%! To solve this problem we need discrete models.
### Binary Discrete Choice
Discrete models are similar to our previous approach, except the $Y$ value is not modelled on a continuous scale, but is limited between discrete alternatives. To solve these models we need a **utility** function, which closely resembles the function we tried to fit using linear regression, but with some added noise $\epsilon$:
\begin{equation}
U = \beta_0 + \beta_1 X_1 + \epsilon \\
\begin{cases}
pass & U > 0\\
fail & else\\
\end{cases}
\end{equation}
This utility function represents the preference for an outcome. In our case if the utility is a number above zero, it means the student passes, otherwise the student fails. To get a probability from our utility we need a function $F$ which maps the utility to a probability between the range (0, 1).
\begin{equation}
P_{n1} = F(U)
\end{equation}
Here we will discuss two of the most common $F$ funcitons, **Logit** & **Probit**.
### Logit (Logistic regression)
When using the Logit approach we assume that the log-odds of pass/failure can be expressed as a linear function of our input (the utility), and our unobserved fraction of the utility ($\epsilon$) follows a logistic distribution:
\begin{equation}
log (\frac{P_{n1}}{1 - P_{n1}}) = U \\
\frac{P_{n1}}{1 - P_{n1}} = e^U
\end{equation}
which we can rewrite to:
\begin{equation}
P_{n1} = \frac{e^U}{1 + e^U}
\end{equation}
In the Logit case our function $F$ is just the sigmoid/logistic function!
So what did we gain from this approach? Our values are now limited between the range (0, 1), but more importantly, we can interpret out coefficients as odds! If for instance after fitting our $\beta_1$ has a value of $1.1$, it means that for each hour of study the chance of passing would be $e^{1.1} \approx 3$ times as likely to happen!
### Probit
The probit model assumes that the unobserved fraction of the utility ($\epsilon$) follows a standard normal distribution:
\begin{equation}
P_{n1} = \Phi(\beta_0 + \beta_1 X_1)
\end{equation}
where $\Phi$ is the cumulative distribution function of the (standard) normal distribution.
### Difference
So what is the difference between a normal distribution and a logit distribution? Let's plot them both:
```python
from scipy.stats import norm, logistic
import math
# standard normal distribution
mu = 0; std = 1
x = np.linspace(-4, 4, 100)
# plot the normal pdf & cdf
normal = norm.pdf(x, loc=mu, scale=std)
plt.plot(x, normal, label='normal distribution')
plt.plot(x, np.cumsum(normal) / sum(normal), label='cumulative normal distribution')
# plot the logistic pdf & cdf
logist = logistic.pdf(x, loc=mu, scale=std * math.sqrt(3) / math.pi)
plt.plot(x, logist, label='logistic distribution')
plt.plot(x, np.cumsum(logist) / sum(logist), label='cumulative logistic distribution')
plt.ylabel('probability')
plt.legend()
plt.show()
```
They are very similar! Note that the logit distribution has fatter tails, so it will produce more extreme values than the normal distribution. Now let's see how they differ in performance of the fit:
```python
# plot the results of the students
plt.scatter(students['hours'], students['passed'])
plt.xlabel('hours studied'); plt.ylabel('pass/fail')
# set proper axes
plt.xlim([-1, 14]); plt.ylim([-0.1, 1.1])
x_vals = sm.add_constant(np.linspace(-1, 14, 100))
# use probit to fit our function
probit = sm.Probit(students['passed'], sm.add_constant(students['hours']))
pr_model = probit.fit(disp=0) # disp=0 to silence the verbose function
pseudo_r_p = pr_model.prsquared
# plot the results of probit
y_vals = pr_model.predict(x_vals)
plt.plot(x_vals[:, 1], y_vals, '--', label='probit')
# use logit to fit our function
logit = sm.Logit(students['passed'], sm.add_constant(students['hours']))
lo_model = logit.fit(disp=0) # disp=0 to silence the verbose function
pseudo_r_l = lo_model.prsquared
# plot the results of logit
y_vals = lo_model.predict(x_vals)
plt.plot(x_vals[:, 1], y_vals, '--', label='logit')
plt.legend()
plt.show()
# show summary of both models
print(pr_model.summary())
print(lo_model.summary())
```
```python
# so what is the probability of passing the course if you study 9 hours for the test?
# your answer has to be correct for at least two significant digits
chance = None
def calc_utility(model, hours):
"""
Determines the utility of a certain regression model
and input of hours studied
"""
constant, slope = model.params
return constant + slope * hours
def prob_Logit(utility):
"""
Determines the probability of passing the exam
given a Logistic regression and utility
"""
return 1 / (1 + math.exp(-utility))
def prob_Probit(utility):
"""
Determines the probability of passing the exam
given a Probit regression and utility
"""
return norm.cdf(utility, loc=mu, scale=std)
utility = calc_utility(lo_model, 9)
chance = prob_Logit(utility)
chance_lo = lo_model.predict([1, 9])[0]
assert round(chance, 2) == round(chance_lo, 2), "Probability is not significant for at least two digits"
utility = calc_utility(pr_model, 9)
chance = prob_Probit(utility)
chance_pr = pr_model.predict([1, 9])[0]
assert round(chance, 2) == round(chance_pr, 2), "Probability is not significant for at least two digits"
```
```python
assert 0 <= chance <= 1
```
Even though the fitted parameters of both models are quite different, the actual fits are extremely close, and differ little in their predictions, pseudo R squares, or looks.
### Multinomial logit
When dealing with multiple discrete alternatives, we have to make use of multinomial discrete choice. We rewrite our original utility function into one utility function per choice, where the chance of choice $i$ is defined as such:
\begin{equation}
P_i = Prob(U_i > U_j \quad \forall j \neq i)
\end{equation}
We generated a dataset of 250 students, which contains the students' income, distance to university, how lazy they are, and what transport (either bike, car, bus 40 or bus 240) they use to get to university.
Implement multinomial logit yourself, take a look at [MNLogit](https://www.statsmodels.org/dev/generated/statsmodels.discrete.discrete_model.MNLogit.html). Remember to add a constant (`sm.add_constant`) to our observed variables. Also note that you should use numeric labels, and not the text-label.
```python
import pandas as pd
# load our dataset
df = pd.read_csv('transport.csv', sep=';', usecols=['income', 'distance', 'lazy', 'transport', 'transport_id'])
# print the 'head' of the dataframe to get a feel for the data
print(df.head())
# implement multinomial logit
df_exogeneous = df.drop(columns=["transport", "transport_id"])
df_endogeneous = df["transport_id"]
x_vals = sm.add_constant(df_exogeneous)
MNL = sm.MNLogit(df_endogeneous, x_vals)
model = MNL.fit(disp=0)
# let's see how it predicts on our own dataset (you should get at least 200 out of 250 predictions correct!)
# the predict function returns a dataframe shape (250, 4), where each column is the chance of that choice.
# Assume that the option with the highest chance is chosen
MNL_predict = model.predict(x_vals)
MNL_predict["choice"] = MNL_predict.idxmax(axis=1)
correct_predictions = np.where(MNL_predict["choice"] == df["transport_id"], 1, 0)
correct_predictions = correct_predictions.sum()
assert correct_predictions > 200, "Number of correct predictions should at least be 200"
```
income distance lazy transport transport_id
0 0.0 11.0 7.0 bus 240 3
1 433.0 6.0 7.0 bus 40 2
2 450.0 8.0 7.0 bus 240 3
3 662.0 9.0 5.0 car 1
4 168.0 5.0 1.0 bike 0
```python
assert type(model).__name__ == 'MultinomialResultsWrapper'
```
### Logit limitations
- **Taste variation**: every students shares the same $\beta$ values, while this not necessarily has to be true. Some students might've done earlier courses which resemble a lot of the subject matter of the course, so they have a higher $\beta_0$ value, and some student might just be more efficient while learning, resulting in a higher $\beta_1$ value. Logit does not allow different $\beta$ values for its choice makers.
- **Independece of Irrelevant Alternatives (IIA)**: If we make people choose between two options (e.g. bulbasaur and squirtle), adding a third option (charmander) should not change peoples original order of the two options. For example: if someone prefers a squirtle over a bulbasaur, by also giving them the choice of a charmander, they should not suddenly like bulbasaur more. Multinomial logit does not allow independence of irrelevant alternatives.
- **Repeated choice**: Logit assumes no correlation in repeated choices. If a person takes the bike to work one day, it might influence him/her to take the bike the next day. Maybe he/she got lost, so won't take the bike again. Or the person gets to know the road better, so biking the next day becomes faster.
### Nested logit
When we look closer at the data we see that bus 240 and bus 40 are similar choices, and after a quick questionnaire we realize that if bus 40 does not go all student's will use bus 240 and vice versa. Multinomial logit violates this bus-dependency (independence of irrelevant alternatives). However if we would implement nested logit we would be guaranteed of this dependency:
Your task now is to finish the NestedLogit class, which incorporates this logic. It should fit the choices bike, car, and bus using `sm.MNLogit` and the two different buses by `sm.Logit`.
```python
class NestedLogit():
def __init__(self, labels, variables):
self.labels = labels
self.variables = variables
# Makes a binary choice problem for the different busses
self.bus = self.labels[self.labels.isin([2, 3])]
self.bus.where(self.bus == 2, 0, inplace = True)
self.bus.where(self.bus == 0, 1, inplace = True)
self.variables_bus = self.variables[self.variables.index.isin(self.bus.index)]
def fit(self):
"""
Method that fits the predictions of the NestedLogit.
"""
# use logit to fit our function for the different busses
logit_bus = sm.Logit(self.bus, sm.add_constant(self.variables_bus))
self.model_bus = logit_bus.fit(disp=0) # disp=0 to silence the verbose function
# use Multinomial Logit to fit our function to choose the transport
labels_others = self.labels.where(self.labels != 3, 2)
MNL_others = sm.MNLogit(labels_others, sm.add_constant(self.variables))
self.model_others = MNL_others.fit(disp=0)
def predict(self, variables):
"""
Method that returns the predictions of the NestedLogit, based on the fit, shape (N, 4)
"""
predict_others = self.model_others.predict(variables)
predict_busses = self.model_bus.predict(variables)
prob_bus = predict_others[2]
prob_bus40 = prob_bus.multiply(predict_busses)
prob_bus240 = prob_bus.multiply(1 - predict_busses)
predict_others[2] = prob_bus40
predict_others[3] = prob_bus240
return predict_others
# Calls to NestedLogit
nlogit = NestedLogit(df['transport_id'], sm.add_constant(df[['income', 'distance', 'lazy']]))
nlogit.fit()
y_vals = nlogit.predict(sm.add_constant(df[['income', 'distance', 'lazy']]))
# How does nested logit compare to multinomial logit? You should get at least 170 predictions correct!
y_vals = model.predict(x_vals)
y_vals["choice"] = y_vals.idxmax(axis=1)
correct_predictions = np.where(y_vals["choice"] == df["transport_id"], 1, 0)
correct_predictions = correct_predictions.sum()
assert correct_predictions > 170, "Number of correct predictions should at least be 170"
```
```python
nlogit = NestedLogit(df['transport_id'], sm.add_constant(df[['income', 'distance', 'lazy']]))
nlogit.fit()
y_vals = nlogit.predict(sm.add_constant(df[['income', 'distance', 'lazy']]))
assert y_vals.shape == (250, 4)
```
In the file generate_data.py is the data generated. Can you design a dataset where NestedLogit outperforms MultiNomialLogit? Why does nested logit not outperform multinomial logit?
### Advanced models
For more complex logit models, such as mixed logit which allows for taste variation. Take a look at [PyLogit](https://github.com/timothyb0912/pylogit)!
|
[STATEMENT]
lemma (in PolynRg) scalar_times_pol_expr:"\<lbrakk>a \<in> carrier S; pol_coeff S c;
n \<le> fst c\<rbrakk> \<Longrightarrow>
a \<cdot>\<^sub>r (polyn_expr R X n c) = polyn_expr R X n (sp_cf S a c)"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<lbrakk>a \<in> carrier S; pol_coeff S c; n \<le> fst c\<rbrakk> \<Longrightarrow> a \<cdot>\<^sub>r polyn_expr R X n c = polyn_expr R X n (sp_cf S a c)
[PROOF STEP]
apply (cases c)
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<And>aa b. \<lbrakk>a \<in> carrier S; pol_coeff S c; n \<le> fst c; c = (aa, b)\<rbrakk> \<Longrightarrow> a \<cdot>\<^sub>r polyn_expr R X n c = polyn_expr R X n (sp_cf S a c)
[PROOF STEP]
apply (simp only:)
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<And>aa b. \<lbrakk>a \<in> carrier S; pol_coeff S (aa, b); n \<le> fst (aa, b); c = (aa, b)\<rbrakk> \<Longrightarrow> a \<cdot>\<^sub>r polyn_expr R X n (aa, b) = polyn_expr R X n (sp_cf S a (aa, b))
[PROOF STEP]
apply (rename_tac m g)
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<And>m g. \<lbrakk>a \<in> carrier S; pol_coeff S (m, g); n \<le> fst (m, g); c = (m, g)\<rbrakk> \<Longrightarrow> a \<cdot>\<^sub>r polyn_expr R X n (m, g) = polyn_expr R X n (sp_cf S a (m, g))
[PROOF STEP]
apply (thin_tac "c = (m, g)")
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<And>m g. \<lbrakk>a \<in> carrier S; pol_coeff S (m, g); n \<le> fst (m, g)\<rbrakk> \<Longrightarrow> a \<cdot>\<^sub>r polyn_expr R X n (m, g) = polyn_expr R X n (sp_cf S a (m, g))
[PROOF STEP]
apply (frule_tac c = "(m, g)" and k = n in polyn_expr_short, simp,
simp)
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<And>m g. \<lbrakk>a \<in> carrier S; pol_coeff S (m, g); n \<le> m; polyn_expr R X n (m, g) = polyn_expr R X n (n, g)\<rbrakk> \<Longrightarrow> a \<cdot>\<^sub>r polyn_expr R X n (n, g) = polyn_expr R X n (sp_cf S a (m, g))
[PROOF STEP]
apply (frule scalar_times_polynTr[of a n],
drule_tac x = g in spec)
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<And>m g. \<lbrakk>a \<in> carrier S; pol_coeff S (m, g); n \<le> m; polyn_expr R X n (m, g) = polyn_expr R X n (n, g); pol_coeff S (n, g) \<longrightarrow> a \<cdot>\<^sub>r polyn_expr R X n (n, g) = polyn_expr R X n (sp_cf S a (n, g))\<rbrakk> \<Longrightarrow> a \<cdot>\<^sub>r polyn_expr R X n (n, g) = polyn_expr R X n (sp_cf S a (m, g))
[PROOF STEP]
apply (frule_tac c = "(m, g)" and n = n in pol_coeff_le, simp, simp,
thin_tac "polyn_expr R X n (m, g) = polyn_expr R X n (n, g)",
thin_tac "a \<cdot>\<^sub>r polyn_expr R X n (n, g) =
polyn_expr R X n (sp_cf S a (n, g))")
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<And>m g. \<lbrakk>a \<in> carrier S; pol_coeff S (m, g); n \<le> m; pol_coeff S (n, g)\<rbrakk> \<Longrightarrow> polyn_expr R X n (sp_cf S a (n, g)) = polyn_expr R X n (sp_cf S a (m, g))
[PROOF STEP]
apply (frule_tac c = "(m, g)" and n = n in pol_coeff_le, simp, simp,
frule_tac c = "(n, g)" and a = a in sp_cf_pol_coeff, assumption,
frule_tac c = "(m, g)" and a = a in sp_cf_pol_coeff, assumption)
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<And>m g. \<lbrakk>a \<in> carrier S; pol_coeff S (m, g); n \<le> m; pol_coeff S (n, g); pol_coeff S (sp_cf S a (n, g)); pol_coeff S (sp_cf S a (m, g))\<rbrakk> \<Longrightarrow> polyn_expr R X n (sp_cf S a (n, g)) = polyn_expr R X n (sp_cf S a (m, g))
[PROOF STEP]
apply (rule_tac c = "sp_cf S a (n, g)" and d = "sp_cf S a (m, g)" and
k = n in polyn_exprs_eq, assumption+)
[PROOF STATE]
proof (prove)
goal (2 subgoals):
1. \<And>m g. \<lbrakk>a \<in> carrier S; pol_coeff S (m, g); n \<le> m; pol_coeff S (n, g); pol_coeff S (sp_cf S a (n, g)); pol_coeff S (sp_cf S a (m, g))\<rbrakk> \<Longrightarrow> n \<le> min (fst (sp_cf S a (n, g))) (fst (sp_cf S a (m, g)))
2. \<And>m g. \<lbrakk>a \<in> carrier S; pol_coeff S (m, g); n \<le> m; pol_coeff S (n, g); pol_coeff S (sp_cf S a (n, g)); pol_coeff S (sp_cf S a (m, g))\<rbrakk> \<Longrightarrow> \<forall>j\<le>n. snd (sp_cf S a (n, g)) j = snd (sp_cf S a (m, g)) j
[PROOF STEP]
apply (simp add:sp_cf_len)
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<And>m g. \<lbrakk>a \<in> carrier S; pol_coeff S (m, g); n \<le> m; pol_coeff S (n, g); pol_coeff S (sp_cf S a (n, g)); pol_coeff S (sp_cf S a (m, g))\<rbrakk> \<Longrightarrow> \<forall>j\<le>n. snd (sp_cf S a (n, g)) j = snd (sp_cf S a (m, g)) j
[PROOF STEP]
apply (rule allI, (subst sp_cf_def)+, simp)
[PROOF STATE]
proof (prove)
goal:
No subgoals!
[PROOF STEP]
done |
(* Title: HOL/Auth/n_flash_nodata_cub_lemma_on_inv__140.thy
Author: Yongjian Li and Kaiqiang Duan, State Key Lab of Computer Science, Institute of Software, Chinese Academy of Sciences
Copyright 2016 State Key Lab of Computer Science, Institute of Software, Chinese Academy of Sciences
*)
header{*The n_flash_nodata_cub Protocol Case Study*}
theory n_flash_nodata_cub_lemma_on_inv__140 imports n_flash_nodata_cub_base
begin
section{*All lemmas on causal relation between inv__140 and some rule r*}
lemma n_PI_Remote_GetVsinv__140:
assumes a1: "(\<exists> src. src\<le>N\<and>r=n_PI_Remote_Get src)" and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__140 p__Inv4)"
shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s")
proof -
from a1 obtain src where a1:"src\<le>N\<and>r=n_PI_Remote_Get src" apply fastforce done
from a2 obtain p__Inv4 where a2:"p__Inv4\<le>N\<and>f=inv__140 p__Inv4" apply fastforce done
have "(src=p__Inv4)\<or>(src~=p__Inv4)" apply (cut_tac a1 a2, auto) done
moreover {
assume b1: "(src=p__Inv4)"
have "?P1 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
moreover {
assume b1: "(src~=p__Inv4)"
have "?P2 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
ultimately show "invHoldForRule s f r (invariants N)" by satx
qed
lemma n_PI_Remote_GetXVsinv__140:
assumes a1: "(\<exists> src. src\<le>N\<and>r=n_PI_Remote_GetX src)" and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__140 p__Inv4)"
shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s")
proof -
from a1 obtain src where a1:"src\<le>N\<and>r=n_PI_Remote_GetX src" apply fastforce done
from a2 obtain p__Inv4 where a2:"p__Inv4\<le>N\<and>f=inv__140 p__Inv4" apply fastforce done
have "(src=p__Inv4)\<or>(src~=p__Inv4)" apply (cut_tac a1 a2, auto) done
moreover {
assume b1: "(src=p__Inv4)"
have "?P1 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
moreover {
assume b1: "(src~=p__Inv4)"
have "?P2 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
ultimately show "invHoldForRule s f r (invariants N)" by satx
qed
lemma n_NI_NakVsinv__140:
assumes a1: "(\<exists> dst. dst\<le>N\<and>r=n_NI_Nak dst)" and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__140 p__Inv4)"
shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s")
proof -
from a1 obtain dst where a1:"dst\<le>N\<and>r=n_NI_Nak dst" apply fastforce done
from a2 obtain p__Inv4 where a2:"p__Inv4\<le>N\<and>f=inv__140 p__Inv4" apply fastforce done
have "(dst=p__Inv4)\<or>(dst~=p__Inv4)" apply (cut_tac a1 a2, auto) done
moreover {
assume b1: "(dst=p__Inv4)"
have "?P1 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
moreover {
assume b1: "(dst~=p__Inv4)"
have "?P2 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
ultimately show "invHoldForRule s f r (invariants N)" by satx
qed
lemma n_NI_Local_Get_Nak__part__0Vsinv__140:
assumes a1: "(\<exists> src. src\<le>N\<and>r=n_NI_Local_Get_Nak__part__0 src)" and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__140 p__Inv4)"
shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s")
proof -
from a1 obtain src where a1:"src\<le>N\<and>r=n_NI_Local_Get_Nak__part__0 src" apply fastforce done
from a2 obtain p__Inv4 where a2:"p__Inv4\<le>N\<and>f=inv__140 p__Inv4" apply fastforce done
have "(src=p__Inv4)\<or>(src~=p__Inv4)" apply (cut_tac a1 a2, auto) done
moreover {
assume b1: "(src=p__Inv4)"
have "?P1 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
moreover {
assume b1: "(src~=p__Inv4)"
have "?P2 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
ultimately show "invHoldForRule s f r (invariants N)" by satx
qed
lemma n_NI_Local_Get_Nak__part__1Vsinv__140:
assumes a1: "(\<exists> src. src\<le>N\<and>r=n_NI_Local_Get_Nak__part__1 src)" and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__140 p__Inv4)"
shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s")
proof -
from a1 obtain src where a1:"src\<le>N\<and>r=n_NI_Local_Get_Nak__part__1 src" apply fastforce done
from a2 obtain p__Inv4 where a2:"p__Inv4\<le>N\<and>f=inv__140 p__Inv4" apply fastforce done
have "(src=p__Inv4)\<or>(src~=p__Inv4)" apply (cut_tac a1 a2, auto) done
moreover {
assume b1: "(src=p__Inv4)"
have "?P1 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
moreover {
assume b1: "(src~=p__Inv4)"
have "?P2 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
ultimately show "invHoldForRule s f r (invariants N)" by satx
qed
lemma n_NI_Local_Get_Nak__part__2Vsinv__140:
assumes a1: "(\<exists> src. src\<le>N\<and>r=n_NI_Local_Get_Nak__part__2 src)" and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__140 p__Inv4)"
shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s")
proof -
from a1 obtain src where a1:"src\<le>N\<and>r=n_NI_Local_Get_Nak__part__2 src" apply fastforce done
from a2 obtain p__Inv4 where a2:"p__Inv4\<le>N\<and>f=inv__140 p__Inv4" apply fastforce done
have "(src=p__Inv4)\<or>(src~=p__Inv4)" apply (cut_tac a1 a2, auto) done
moreover {
assume b1: "(src=p__Inv4)"
have "?P1 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
moreover {
assume b1: "(src~=p__Inv4)"
have "?P2 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
ultimately show "invHoldForRule s f r (invariants N)" by satx
qed
lemma n_NI_Local_Get_Get__part__0Vsinv__140:
assumes a1: "(\<exists> src. src\<le>N\<and>r=n_NI_Local_Get_Get__part__0 src)" and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__140 p__Inv4)"
shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s")
proof -
from a1 obtain src where a1:"src\<le>N\<and>r=n_NI_Local_Get_Get__part__0 src" apply fastforce done
from a2 obtain p__Inv4 where a2:"p__Inv4\<le>N\<and>f=inv__140 p__Inv4" apply fastforce done
have "(src=p__Inv4)\<or>(src~=p__Inv4)" apply (cut_tac a1 a2, auto) done
moreover {
assume b1: "(src=p__Inv4)"
have "?P1 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
moreover {
assume b1: "(src~=p__Inv4)"
have "?P2 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
ultimately show "invHoldForRule s f r (invariants N)" by satx
qed
lemma n_NI_Local_Get_Get__part__1Vsinv__140:
assumes a1: "(\<exists> src. src\<le>N\<and>r=n_NI_Local_Get_Get__part__1 src)" and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__140 p__Inv4)"
shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s")
proof -
from a1 obtain src where a1:"src\<le>N\<and>r=n_NI_Local_Get_Get__part__1 src" apply fastforce done
from a2 obtain p__Inv4 where a2:"p__Inv4\<le>N\<and>f=inv__140 p__Inv4" apply fastforce done
have "(src=p__Inv4)\<or>(src~=p__Inv4)" apply (cut_tac a1 a2, auto) done
moreover {
assume b1: "(src=p__Inv4)"
have "?P1 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
moreover {
assume b1: "(src~=p__Inv4)"
have "?P2 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
ultimately show "invHoldForRule s f r (invariants N)" by satx
qed
lemma n_NI_Local_Get_Put_HeadVsinv__140:
assumes a1: "(\<exists> src. src\<le>N\<and>r=n_NI_Local_Get_Put_Head N src)" and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__140 p__Inv4)"
shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s")
proof -
from a1 obtain src where a1:"src\<le>N\<and>r=n_NI_Local_Get_Put_Head N src" apply fastforce done
from a2 obtain p__Inv4 where a2:"p__Inv4\<le>N\<and>f=inv__140 p__Inv4" apply fastforce done
have "(src=p__Inv4)\<or>(src~=p__Inv4)" apply (cut_tac a1 a2, auto) done
moreover {
assume b1: "(src=p__Inv4)"
have "?P1 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
moreover {
assume b1: "(src~=p__Inv4)"
have "?P3 s"
apply (cut_tac a1 a2 b1, simp, rule_tac x="(neg (andForm (eqn (IVar (Field (Para (Field (Ident ''Sta'') ''UniMsg'') p__Inv4) ''Cmd'')) (Const UNI_PutX)) (eqn (IVar (Field (Field (Ident ''Sta'') ''Dir'') ''Dirty'')) (Const false))))" in exI, auto) done
then have "invHoldForRule s f r (invariants N)" by auto
}
ultimately show "invHoldForRule s f r (invariants N)" by satx
qed
lemma n_NI_Local_Get_PutVsinv__140:
assumes a1: "(\<exists> src. src\<le>N\<and>r=n_NI_Local_Get_Put src)" and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__140 p__Inv4)"
shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s")
proof -
from a1 obtain src where a1:"src\<le>N\<and>r=n_NI_Local_Get_Put src" apply fastforce done
from a2 obtain p__Inv4 where a2:"p__Inv4\<le>N\<and>f=inv__140 p__Inv4" apply fastforce done
have "(src=p__Inv4)\<or>(src~=p__Inv4)" apply (cut_tac a1 a2, auto) done
moreover {
assume b1: "(src=p__Inv4)"
have "?P1 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
moreover {
assume b1: "(src~=p__Inv4)"
have "?P2 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
ultimately show "invHoldForRule s f r (invariants N)" by satx
qed
lemma n_NI_Local_Get_Put_DirtyVsinv__140:
assumes a1: "(\<exists> src. src\<le>N\<and>r=n_NI_Local_Get_Put_Dirty src)" and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__140 p__Inv4)"
shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s")
proof -
from a1 obtain src where a1:"src\<le>N\<and>r=n_NI_Local_Get_Put_Dirty src" apply fastforce done
from a2 obtain p__Inv4 where a2:"p__Inv4\<le>N\<and>f=inv__140 p__Inv4" apply fastforce done
have "(src=p__Inv4)\<or>(src~=p__Inv4)" apply (cut_tac a1 a2, auto) done
moreover {
assume b1: "(src=p__Inv4)"
have "?P1 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
moreover {
assume b1: "(src~=p__Inv4)"
have "?P2 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
ultimately show "invHoldForRule s f r (invariants N)" by satx
qed
lemma n_NI_Remote_Get_NakVsinv__140:
assumes a1: "(\<exists> src dst. src\<le>N\<and>dst\<le>N\<and>src~=dst\<and>r=n_NI_Remote_Get_Nak src dst)" and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__140 p__Inv4)"
shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s")
proof -
from a1 obtain src dst where a1:"src\<le>N\<and>dst\<le>N\<and>src~=dst\<and>r=n_NI_Remote_Get_Nak src dst" apply fastforce done
from a2 obtain p__Inv4 where a2:"p__Inv4\<le>N\<and>f=inv__140 p__Inv4" apply fastforce done
have "(src=p__Inv4\<and>dst~=p__Inv4)\<or>(src~=p__Inv4\<and>dst=p__Inv4)\<or>(src~=p__Inv4\<and>dst~=p__Inv4)" apply (cut_tac a1 a2, auto) done
moreover {
assume b1: "(src=p__Inv4\<and>dst~=p__Inv4)"
have "?P1 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
moreover {
assume b1: "(src~=p__Inv4\<and>dst=p__Inv4)"
have "?P2 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
moreover {
assume b1: "(src~=p__Inv4\<and>dst~=p__Inv4)"
have "?P2 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
ultimately show "invHoldForRule s f r (invariants N)" by satx
qed
lemma n_NI_Remote_Get_PutVsinv__140:
assumes a1: "(\<exists> src dst. src\<le>N\<and>dst\<le>N\<and>src~=dst\<and>r=n_NI_Remote_Get_Put src dst)" and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__140 p__Inv4)"
shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s")
proof -
from a1 obtain src dst where a1:"src\<le>N\<and>dst\<le>N\<and>src~=dst\<and>r=n_NI_Remote_Get_Put src dst" apply fastforce done
from a2 obtain p__Inv4 where a2:"p__Inv4\<le>N\<and>f=inv__140 p__Inv4" apply fastforce done
have "(src=p__Inv4\<and>dst~=p__Inv4)\<or>(src~=p__Inv4\<and>dst=p__Inv4)\<or>(src~=p__Inv4\<and>dst~=p__Inv4)" apply (cut_tac a1 a2, auto) done
moreover {
assume b1: "(src=p__Inv4\<and>dst~=p__Inv4)"
have "?P1 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
moreover {
assume b1: "(src~=p__Inv4\<and>dst=p__Inv4)"
have "?P2 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
moreover {
assume b1: "(src~=p__Inv4\<and>dst~=p__Inv4)"
have "?P2 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
ultimately show "invHoldForRule s f r (invariants N)" by satx
qed
lemma n_NI_Local_GetX_Nak__part__0Vsinv__140:
assumes a1: "(\<exists> src. src\<le>N\<and>r=n_NI_Local_GetX_Nak__part__0 src)" and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__140 p__Inv4)"
shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s")
proof -
from a1 obtain src where a1:"src\<le>N\<and>r=n_NI_Local_GetX_Nak__part__0 src" apply fastforce done
from a2 obtain p__Inv4 where a2:"p__Inv4\<le>N\<and>f=inv__140 p__Inv4" apply fastforce done
have "(src=p__Inv4)\<or>(src~=p__Inv4)" apply (cut_tac a1 a2, auto) done
moreover {
assume b1: "(src=p__Inv4)"
have "?P1 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
moreover {
assume b1: "(src~=p__Inv4)"
have "?P2 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
ultimately show "invHoldForRule s f r (invariants N)" by satx
qed
lemma n_NI_Local_GetX_Nak__part__1Vsinv__140:
assumes a1: "(\<exists> src. src\<le>N\<and>r=n_NI_Local_GetX_Nak__part__1 src)" and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__140 p__Inv4)"
shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s")
proof -
from a1 obtain src where a1:"src\<le>N\<and>r=n_NI_Local_GetX_Nak__part__1 src" apply fastforce done
from a2 obtain p__Inv4 where a2:"p__Inv4\<le>N\<and>f=inv__140 p__Inv4" apply fastforce done
have "(src=p__Inv4)\<or>(src~=p__Inv4)" apply (cut_tac a1 a2, auto) done
moreover {
assume b1: "(src=p__Inv4)"
have "?P1 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
moreover {
assume b1: "(src~=p__Inv4)"
have "?P2 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
ultimately show "invHoldForRule s f r (invariants N)" by satx
qed
lemma n_NI_Local_GetX_Nak__part__2Vsinv__140:
assumes a1: "(\<exists> src. src\<le>N\<and>r=n_NI_Local_GetX_Nak__part__2 src)" and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__140 p__Inv4)"
shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s")
proof -
from a1 obtain src where a1:"src\<le>N\<and>r=n_NI_Local_GetX_Nak__part__2 src" apply fastforce done
from a2 obtain p__Inv4 where a2:"p__Inv4\<le>N\<and>f=inv__140 p__Inv4" apply fastforce done
have "(src=p__Inv4)\<or>(src~=p__Inv4)" apply (cut_tac a1 a2, auto) done
moreover {
assume b1: "(src=p__Inv4)"
have "?P1 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
moreover {
assume b1: "(src~=p__Inv4)"
have "?P2 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
ultimately show "invHoldForRule s f r (invariants N)" by satx
qed
lemma n_NI_Local_GetX_GetX__part__0Vsinv__140:
assumes a1: "(\<exists> src. src\<le>N\<and>r=n_NI_Local_GetX_GetX__part__0 src)" and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__140 p__Inv4)"
shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s")
proof -
from a1 obtain src where a1:"src\<le>N\<and>r=n_NI_Local_GetX_GetX__part__0 src" apply fastforce done
from a2 obtain p__Inv4 where a2:"p__Inv4\<le>N\<and>f=inv__140 p__Inv4" apply fastforce done
have "(src=p__Inv4)\<or>(src~=p__Inv4)" apply (cut_tac a1 a2, auto) done
moreover {
assume b1: "(src=p__Inv4)"
have "?P1 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
moreover {
assume b1: "(src~=p__Inv4)"
have "?P2 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
ultimately show "invHoldForRule s f r (invariants N)" by satx
qed
lemma n_NI_Local_GetX_GetX__part__1Vsinv__140:
assumes a1: "(\<exists> src. src\<le>N\<and>r=n_NI_Local_GetX_GetX__part__1 src)" and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__140 p__Inv4)"
shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s")
proof -
from a1 obtain src where a1:"src\<le>N\<and>r=n_NI_Local_GetX_GetX__part__1 src" apply fastforce done
from a2 obtain p__Inv4 where a2:"p__Inv4\<le>N\<and>f=inv__140 p__Inv4" apply fastforce done
have "(src=p__Inv4)\<or>(src~=p__Inv4)" apply (cut_tac a1 a2, auto) done
moreover {
assume b1: "(src=p__Inv4)"
have "?P1 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
moreover {
assume b1: "(src~=p__Inv4)"
have "?P2 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
ultimately show "invHoldForRule s f r (invariants N)" by satx
qed
lemma n_NI_Local_GetX_PutX_1Vsinv__140:
assumes a1: "(\<exists> src. src\<le>N\<and>r=n_NI_Local_GetX_PutX_1 N src)" and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__140 p__Inv4)"
shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s")
proof -
from a1 obtain src where a1:"src\<le>N\<and>r=n_NI_Local_GetX_PutX_1 N src" apply fastforce done
from a2 obtain p__Inv4 where a2:"p__Inv4\<le>N\<and>f=inv__140 p__Inv4" apply fastforce done
have "(src=p__Inv4)\<or>(src~=p__Inv4)" apply (cut_tac a1 a2, auto) done
moreover {
assume b1: "(src=p__Inv4)"
have "?P1 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
moreover {
assume b1: "(src~=p__Inv4)"
have "?P1 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
ultimately show "invHoldForRule s f r (invariants N)" by satx
qed
lemma n_NI_Local_GetX_PutX_2Vsinv__140:
assumes a1: "(\<exists> src. src\<le>N\<and>r=n_NI_Local_GetX_PutX_2 N src)" and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__140 p__Inv4)"
shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s")
proof -
from a1 obtain src where a1:"src\<le>N\<and>r=n_NI_Local_GetX_PutX_2 N src" apply fastforce done
from a2 obtain p__Inv4 where a2:"p__Inv4\<le>N\<and>f=inv__140 p__Inv4" apply fastforce done
have "(src=p__Inv4)\<or>(src~=p__Inv4)" apply (cut_tac a1 a2, auto) done
moreover {
assume b1: "(src=p__Inv4)"
have "?P1 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
moreover {
assume b1: "(src~=p__Inv4)"
have "?P1 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
ultimately show "invHoldForRule s f r (invariants N)" by satx
qed
lemma n_NI_Local_GetX_PutX_3Vsinv__140:
assumes a1: "(\<exists> src. src\<le>N\<and>r=n_NI_Local_GetX_PutX_3 N src)" and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__140 p__Inv4)"
shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s")
proof -
from a1 obtain src where a1:"src\<le>N\<and>r=n_NI_Local_GetX_PutX_3 N src" apply fastforce done
from a2 obtain p__Inv4 where a2:"p__Inv4\<le>N\<and>f=inv__140 p__Inv4" apply fastforce done
have "(src=p__Inv4)\<or>(src~=p__Inv4)" apply (cut_tac a1 a2, auto) done
moreover {
assume b1: "(src=p__Inv4)"
have "?P1 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
moreover {
assume b1: "(src~=p__Inv4)"
have "?P1 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
ultimately show "invHoldForRule s f r (invariants N)" by satx
qed
lemma n_NI_Local_GetX_PutX_4Vsinv__140:
assumes a1: "(\<exists> src. src\<le>N\<and>r=n_NI_Local_GetX_PutX_4 N src)" and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__140 p__Inv4)"
shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s")
proof -
from a1 obtain src where a1:"src\<le>N\<and>r=n_NI_Local_GetX_PutX_4 N src" apply fastforce done
from a2 obtain p__Inv4 where a2:"p__Inv4\<le>N\<and>f=inv__140 p__Inv4" apply fastforce done
have "(src=p__Inv4)\<or>(src~=p__Inv4)" apply (cut_tac a1 a2, auto) done
moreover {
assume b1: "(src=p__Inv4)"
have "?P1 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
moreover {
assume b1: "(src~=p__Inv4)"
have "?P1 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
ultimately show "invHoldForRule s f r (invariants N)" by satx
qed
lemma n_NI_Local_GetX_PutX_5Vsinv__140:
assumes a1: "(\<exists> src. src\<le>N\<and>r=n_NI_Local_GetX_PutX_5 N src)" and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__140 p__Inv4)"
shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s")
proof -
from a1 obtain src where a1:"src\<le>N\<and>r=n_NI_Local_GetX_PutX_5 N src" apply fastforce done
from a2 obtain p__Inv4 where a2:"p__Inv4\<le>N\<and>f=inv__140 p__Inv4" apply fastforce done
have "(src=p__Inv4)\<or>(src~=p__Inv4)" apply (cut_tac a1 a2, auto) done
moreover {
assume b1: "(src=p__Inv4)"
have "?P1 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
moreover {
assume b1: "(src~=p__Inv4)"
have "?P1 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
ultimately show "invHoldForRule s f r (invariants N)" by satx
qed
lemma n_NI_Local_GetX_PutX_6Vsinv__140:
assumes a1: "(\<exists> src. src\<le>N\<and>r=n_NI_Local_GetX_PutX_6 N src)" and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__140 p__Inv4)"
shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s")
proof -
from a1 obtain src where a1:"src\<le>N\<and>r=n_NI_Local_GetX_PutX_6 N src" apply fastforce done
from a2 obtain p__Inv4 where a2:"p__Inv4\<le>N\<and>f=inv__140 p__Inv4" apply fastforce done
have "(src=p__Inv4)\<or>(src~=p__Inv4)" apply (cut_tac a1 a2, auto) done
moreover {
assume b1: "(src=p__Inv4)"
have "?P1 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
moreover {
assume b1: "(src~=p__Inv4)"
have "?P1 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
ultimately show "invHoldForRule s f r (invariants N)" by satx
qed
lemma n_NI_Local_GetX_PutX_7__part__0Vsinv__140:
assumes a1: "(\<exists> src. src\<le>N\<and>r=n_NI_Local_GetX_PutX_7__part__0 N src)" and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__140 p__Inv4)"
shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s")
proof -
from a1 obtain src where a1:"src\<le>N\<and>r=n_NI_Local_GetX_PutX_7__part__0 N src" apply fastforce done
from a2 obtain p__Inv4 where a2:"p__Inv4\<le>N\<and>f=inv__140 p__Inv4" apply fastforce done
have "(src=p__Inv4)\<or>(src~=p__Inv4)" apply (cut_tac a1 a2, auto) done
moreover {
assume b1: "(src=p__Inv4)"
have "?P1 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
moreover {
assume b1: "(src~=p__Inv4)"
have "?P1 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
ultimately show "invHoldForRule s f r (invariants N)" by satx
qed
lemma n_NI_Local_GetX_PutX_7__part__1Vsinv__140:
assumes a1: "(\<exists> src. src\<le>N\<and>r=n_NI_Local_GetX_PutX_7__part__1 N src)" and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__140 p__Inv4)"
shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s")
proof -
from a1 obtain src where a1:"src\<le>N\<and>r=n_NI_Local_GetX_PutX_7__part__1 N src" apply fastforce done
from a2 obtain p__Inv4 where a2:"p__Inv4\<le>N\<and>f=inv__140 p__Inv4" apply fastforce done
have "(src=p__Inv4)\<or>(src~=p__Inv4)" apply (cut_tac a1 a2, auto) done
moreover {
assume b1: "(src=p__Inv4)"
have "?P1 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
moreover {
assume b1: "(src~=p__Inv4)"
have "?P1 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
ultimately show "invHoldForRule s f r (invariants N)" by satx
qed
lemma n_NI_Local_GetX_PutX_7_NODE_Get__part__0Vsinv__140:
assumes a1: "(\<exists> src. src\<le>N\<and>r=n_NI_Local_GetX_PutX_7_NODE_Get__part__0 N src)" and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__140 p__Inv4)"
shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s")
proof -
from a1 obtain src where a1:"src\<le>N\<and>r=n_NI_Local_GetX_PutX_7_NODE_Get__part__0 N src" apply fastforce done
from a2 obtain p__Inv4 where a2:"p__Inv4\<le>N\<and>f=inv__140 p__Inv4" apply fastforce done
have "(src=p__Inv4)\<or>(src~=p__Inv4)" apply (cut_tac a1 a2, auto) done
moreover {
assume b1: "(src=p__Inv4)"
have "?P1 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
moreover {
assume b1: "(src~=p__Inv4)"
have "?P1 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
ultimately show "invHoldForRule s f r (invariants N)" by satx
qed
lemma n_NI_Local_GetX_PutX_7_NODE_Get__part__1Vsinv__140:
assumes a1: "(\<exists> src. src\<le>N\<and>r=n_NI_Local_GetX_PutX_7_NODE_Get__part__1 N src)" and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__140 p__Inv4)"
shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s")
proof -
from a1 obtain src where a1:"src\<le>N\<and>r=n_NI_Local_GetX_PutX_7_NODE_Get__part__1 N src" apply fastforce done
from a2 obtain p__Inv4 where a2:"p__Inv4\<le>N\<and>f=inv__140 p__Inv4" apply fastforce done
have "(src=p__Inv4)\<or>(src~=p__Inv4)" apply (cut_tac a1 a2, auto) done
moreover {
assume b1: "(src=p__Inv4)"
have "?P1 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
moreover {
assume b1: "(src~=p__Inv4)"
have "?P1 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
ultimately show "invHoldForRule s f r (invariants N)" by satx
qed
lemma n_NI_Local_GetX_PutX_8_HomeVsinv__140:
assumes a1: "(\<exists> src. src\<le>N\<and>r=n_NI_Local_GetX_PutX_8_Home N src)" and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__140 p__Inv4)"
shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s")
proof -
from a1 obtain src where a1:"src\<le>N\<and>r=n_NI_Local_GetX_PutX_8_Home N src" apply fastforce done
from a2 obtain p__Inv4 where a2:"p__Inv4\<le>N\<and>f=inv__140 p__Inv4" apply fastforce done
have "(src=p__Inv4)\<or>(src~=p__Inv4)" apply (cut_tac a1 a2, auto) done
moreover {
assume b1: "(src=p__Inv4)"
have "?P1 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
moreover {
assume b1: "(src~=p__Inv4)"
have "?P1 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
ultimately show "invHoldForRule s f r (invariants N)" by satx
qed
lemma n_NI_Local_GetX_PutX_8_Home_NODE_GetVsinv__140:
assumes a1: "(\<exists> src. src\<le>N\<and>r=n_NI_Local_GetX_PutX_8_Home_NODE_Get N src)" and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__140 p__Inv4)"
shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s")
proof -
from a1 obtain src where a1:"src\<le>N\<and>r=n_NI_Local_GetX_PutX_8_Home_NODE_Get N src" apply fastforce done
from a2 obtain p__Inv4 where a2:"p__Inv4\<le>N\<and>f=inv__140 p__Inv4" apply fastforce done
have "(src=p__Inv4)\<or>(src~=p__Inv4)" apply (cut_tac a1 a2, auto) done
moreover {
assume b1: "(src=p__Inv4)"
have "?P1 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
moreover {
assume b1: "(src~=p__Inv4)"
have "?P1 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
ultimately show "invHoldForRule s f r (invariants N)" by satx
qed
lemma n_NI_Local_GetX_PutX_8Vsinv__140:
assumes a1: "(\<exists> src pp. src\<le>N\<and>pp\<le>N\<and>src~=pp\<and>r=n_NI_Local_GetX_PutX_8 N src pp)" and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__140 p__Inv4)"
shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s")
proof -
from a1 obtain src pp where a1:"src\<le>N\<and>pp\<le>N\<and>src~=pp\<and>r=n_NI_Local_GetX_PutX_8 N src pp" apply fastforce done
from a2 obtain p__Inv4 where a2:"p__Inv4\<le>N\<and>f=inv__140 p__Inv4" apply fastforce done
have "(src=p__Inv4\<and>pp~=p__Inv4)\<or>(src~=p__Inv4\<and>pp=p__Inv4)\<or>(src~=p__Inv4\<and>pp~=p__Inv4)" apply (cut_tac a1 a2, auto) done
moreover {
assume b1: "(src=p__Inv4\<and>pp~=p__Inv4)"
have "?P1 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
moreover {
assume b1: "(src~=p__Inv4\<and>pp=p__Inv4)"
have "?P1 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
moreover {
assume b1: "(src~=p__Inv4\<and>pp~=p__Inv4)"
have "?P1 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
ultimately show "invHoldForRule s f r (invariants N)" by satx
qed
lemma n_NI_Local_GetX_PutX_8_NODE_GetVsinv__140:
assumes a1: "(\<exists> src pp. src\<le>N\<and>pp\<le>N\<and>src~=pp\<and>r=n_NI_Local_GetX_PutX_8_NODE_Get N src pp)" and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__140 p__Inv4)"
shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s")
proof -
from a1 obtain src pp where a1:"src\<le>N\<and>pp\<le>N\<and>src~=pp\<and>r=n_NI_Local_GetX_PutX_8_NODE_Get N src pp" apply fastforce done
from a2 obtain p__Inv4 where a2:"p__Inv4\<le>N\<and>f=inv__140 p__Inv4" apply fastforce done
have "(src=p__Inv4\<and>pp~=p__Inv4)\<or>(src~=p__Inv4\<and>pp=p__Inv4)\<or>(src~=p__Inv4\<and>pp~=p__Inv4)" apply (cut_tac a1 a2, auto) done
moreover {
assume b1: "(src=p__Inv4\<and>pp~=p__Inv4)"
have "?P1 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
moreover {
assume b1: "(src~=p__Inv4\<and>pp=p__Inv4)"
have "?P1 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
moreover {
assume b1: "(src~=p__Inv4\<and>pp~=p__Inv4)"
have "?P1 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
ultimately show "invHoldForRule s f r (invariants N)" by satx
qed
lemma n_NI_Local_GetX_PutX_9__part__0Vsinv__140:
assumes a1: "(\<exists> src. src\<le>N\<and>r=n_NI_Local_GetX_PutX_9__part__0 N src)" and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__140 p__Inv4)"
shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s")
proof -
from a1 obtain src where a1:"src\<le>N\<and>r=n_NI_Local_GetX_PutX_9__part__0 N src" apply fastforce done
from a2 obtain p__Inv4 where a2:"p__Inv4\<le>N\<and>f=inv__140 p__Inv4" apply fastforce done
have "(src=p__Inv4)\<or>(src~=p__Inv4)" apply (cut_tac a1 a2, auto) done
moreover {
assume b1: "(src=p__Inv4)"
have "?P1 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
moreover {
assume b1: "(src~=p__Inv4)"
have "?P1 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
ultimately show "invHoldForRule s f r (invariants N)" by satx
qed
lemma n_NI_Local_GetX_PutX_9__part__1Vsinv__140:
assumes a1: "(\<exists> src. src\<le>N\<and>r=n_NI_Local_GetX_PutX_9__part__1 N src)" and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__140 p__Inv4)"
shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s")
proof -
from a1 obtain src where a1:"src\<le>N\<and>r=n_NI_Local_GetX_PutX_9__part__1 N src" apply fastforce done
from a2 obtain p__Inv4 where a2:"p__Inv4\<le>N\<and>f=inv__140 p__Inv4" apply fastforce done
have "(src=p__Inv4)\<or>(src~=p__Inv4)" apply (cut_tac a1 a2, auto) done
moreover {
assume b1: "(src=p__Inv4)"
have "?P1 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
moreover {
assume b1: "(src~=p__Inv4)"
have "?P1 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
ultimately show "invHoldForRule s f r (invariants N)" by satx
qed
lemma n_NI_Local_GetX_PutX_10_HomeVsinv__140:
assumes a1: "(\<exists> src. src\<le>N\<and>r=n_NI_Local_GetX_PutX_10_Home N src)" and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__140 p__Inv4)"
shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s")
proof -
from a1 obtain src where a1:"src\<le>N\<and>r=n_NI_Local_GetX_PutX_10_Home N src" apply fastforce done
from a2 obtain p__Inv4 where a2:"p__Inv4\<le>N\<and>f=inv__140 p__Inv4" apply fastforce done
have "(src=p__Inv4)\<or>(src~=p__Inv4)" apply (cut_tac a1 a2, auto) done
moreover {
assume b1: "(src=p__Inv4)"
have "?P1 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
moreover {
assume b1: "(src~=p__Inv4)"
have "?P1 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
ultimately show "invHoldForRule s f r (invariants N)" by satx
qed
lemma n_NI_Local_GetX_PutX_10Vsinv__140:
assumes a1: "(\<exists> src pp. src\<le>N\<and>pp\<le>N\<and>src~=pp\<and>r=n_NI_Local_GetX_PutX_10 N src pp)" and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__140 p__Inv4)"
shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s")
proof -
from a1 obtain src pp where a1:"src\<le>N\<and>pp\<le>N\<and>src~=pp\<and>r=n_NI_Local_GetX_PutX_10 N src pp" apply fastforce done
from a2 obtain p__Inv4 where a2:"p__Inv4\<le>N\<and>f=inv__140 p__Inv4" apply fastforce done
have "(src=p__Inv4\<and>pp~=p__Inv4)\<or>(src~=p__Inv4\<and>pp=p__Inv4)\<or>(src~=p__Inv4\<and>pp~=p__Inv4)" apply (cut_tac a1 a2, auto) done
moreover {
assume b1: "(src=p__Inv4\<and>pp~=p__Inv4)"
have "?P1 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
moreover {
assume b1: "(src~=p__Inv4\<and>pp=p__Inv4)"
have "?P1 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
moreover {
assume b1: "(src~=p__Inv4\<and>pp~=p__Inv4)"
have "?P1 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
ultimately show "invHoldForRule s f r (invariants N)" by satx
qed
lemma n_NI_Local_GetX_PutX_11Vsinv__140:
assumes a1: "(\<exists> src. src\<le>N\<and>r=n_NI_Local_GetX_PutX_11 N src)" and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__140 p__Inv4)"
shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s")
proof -
from a1 obtain src where a1:"src\<le>N\<and>r=n_NI_Local_GetX_PutX_11 N src" apply fastforce done
from a2 obtain p__Inv4 where a2:"p__Inv4\<le>N\<and>f=inv__140 p__Inv4" apply fastforce done
have "(src=p__Inv4)\<or>(src~=p__Inv4)" apply (cut_tac a1 a2, auto) done
moreover {
assume b1: "(src=p__Inv4)"
have "?P1 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
moreover {
assume b1: "(src~=p__Inv4)"
have "?P1 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
ultimately show "invHoldForRule s f r (invariants N)" by satx
qed
lemma n_NI_Remote_GetX_NakVsinv__140:
assumes a1: "(\<exists> src dst. src\<le>N\<and>dst\<le>N\<and>src~=dst\<and>r=n_NI_Remote_GetX_Nak src dst)" and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__140 p__Inv4)"
shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s")
proof -
from a1 obtain src dst where a1:"src\<le>N\<and>dst\<le>N\<and>src~=dst\<and>r=n_NI_Remote_GetX_Nak src dst" apply fastforce done
from a2 obtain p__Inv4 where a2:"p__Inv4\<le>N\<and>f=inv__140 p__Inv4" apply fastforce done
have "(src=p__Inv4\<and>dst~=p__Inv4)\<or>(src~=p__Inv4\<and>dst=p__Inv4)\<or>(src~=p__Inv4\<and>dst~=p__Inv4)" apply (cut_tac a1 a2, auto) done
moreover {
assume b1: "(src=p__Inv4\<and>dst~=p__Inv4)"
have "?P1 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
moreover {
assume b1: "(src~=p__Inv4\<and>dst=p__Inv4)"
have "?P2 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
moreover {
assume b1: "(src~=p__Inv4\<and>dst~=p__Inv4)"
have "?P2 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
ultimately show "invHoldForRule s f r (invariants N)" by satx
qed
lemma n_NI_Remote_GetX_PutXVsinv__140:
assumes a1: "(\<exists> src dst. src\<le>N\<and>dst\<le>N\<and>src~=dst\<and>r=n_NI_Remote_GetX_PutX src dst)" and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__140 p__Inv4)"
shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s")
proof -
from a1 obtain src dst where a1:"src\<le>N\<and>dst\<le>N\<and>src~=dst\<and>r=n_NI_Remote_GetX_PutX src dst" apply fastforce done
from a2 obtain p__Inv4 where a2:"p__Inv4\<le>N\<and>f=inv__140 p__Inv4" apply fastforce done
have "(src=p__Inv4\<and>dst~=p__Inv4)\<or>(src~=p__Inv4\<and>dst=p__Inv4)\<or>(src~=p__Inv4\<and>dst~=p__Inv4)" apply (cut_tac a1 a2, auto) done
moreover {
assume b1: "(src=p__Inv4\<and>dst~=p__Inv4)"
have "?P3 s"
apply (cut_tac a1 a2 b1, simp, rule_tac x="(neg (andForm (eqn (IVar (Field (Para (Field (Ident ''Sta'') ''Proc'') dst) ''CacheState'')) (Const CACHE_E)) (eqn (IVar (Field (Field (Ident ''Sta'') ''Dir'') ''ShrVld'')) (Const true))))" in exI, auto) done
then have "invHoldForRule s f r (invariants N)" by auto
}
moreover {
assume b1: "(src~=p__Inv4\<and>dst=p__Inv4)"
have "?P2 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
moreover {
assume b1: "(src~=p__Inv4\<and>dst~=p__Inv4)"
have "?P2 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
ultimately show "invHoldForRule s f r (invariants N)" by satx
qed
lemma n_NI_Remote_PutVsinv__140:
assumes a1: "(\<exists> dst. dst\<le>N\<and>r=n_NI_Remote_Put dst)" and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__140 p__Inv4)"
shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s")
proof -
from a1 obtain dst where a1:"dst\<le>N\<and>r=n_NI_Remote_Put dst" apply fastforce done
from a2 obtain p__Inv4 where a2:"p__Inv4\<le>N\<and>f=inv__140 p__Inv4" apply fastforce done
have "(dst=p__Inv4)\<or>(dst~=p__Inv4)" apply (cut_tac a1 a2, auto) done
moreover {
assume b1: "(dst=p__Inv4)"
have "?P1 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
moreover {
assume b1: "(dst~=p__Inv4)"
have "?P2 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
ultimately show "invHoldForRule s f r (invariants N)" by satx
qed
lemma n_NI_Remote_PutXVsinv__140:
assumes a1: "(\<exists> dst. dst\<le>N\<and>r=n_NI_Remote_PutX dst)" and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__140 p__Inv4)"
shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s")
proof -
from a1 obtain dst where a1:"dst\<le>N\<and>r=n_NI_Remote_PutX dst" apply fastforce done
from a2 obtain p__Inv4 where a2:"p__Inv4\<le>N\<and>f=inv__140 p__Inv4" apply fastforce done
have "(dst=p__Inv4)\<or>(dst~=p__Inv4)" apply (cut_tac a1 a2, auto) done
moreover {
assume b1: "(dst=p__Inv4)"
have "?P1 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
moreover {
assume b1: "(dst~=p__Inv4)"
have "?P2 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
ultimately show "invHoldForRule s f r (invariants N)" by satx
qed
lemma n_PI_Local_GetX_PutX_HeadVld__part__0Vsinv__140:
assumes a1: "(r=n_PI_Local_GetX_PutX_HeadVld__part__0 N )" and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__140 p__Inv4)"
shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s")
proof -
from a2 obtain p__Inv4 where a2:"p__Inv4\<le>N\<and>f=inv__140 p__Inv4" apply fastforce done
have "?P1 s"
proof(cut_tac a1 a2 , auto) qed
then show "invHoldForRule s f r (invariants N)" by auto
qed
lemma n_PI_Local_GetX_PutX_HeadVld__part__1Vsinv__140:
assumes a1: "(r=n_PI_Local_GetX_PutX_HeadVld__part__1 N )" and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__140 p__Inv4)"
shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s")
proof -
from a2 obtain p__Inv4 where a2:"p__Inv4\<le>N\<and>f=inv__140 p__Inv4" apply fastforce done
have "?P1 s"
proof(cut_tac a1 a2 , auto) qed
then show "invHoldForRule s f r (invariants N)" by auto
qed
lemma n_NI_ShWbVsinv__140:
assumes a1: "(r=n_NI_ShWb N )" and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__140 p__Inv4)"
shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s")
proof -
from a2 obtain p__Inv4 where a2:"p__Inv4\<le>N\<and>f=inv__140 p__Inv4" apply fastforce done
have "?P3 s"
apply (cut_tac a1 a2 , simp, rule_tac x="(neg (andForm (eqn (IVar (Field (Para (Field (Ident ''Sta'') ''UniMsg'') p__Inv4) ''Cmd'')) (Const UNI_PutX)) (eqn (IVar (Field (Field (Ident ''Sta'') ''ShWbMsg'') ''Cmd'')) (Const SHWB_ShWb))))" in exI, auto) done
then show "invHoldForRule s f r (invariants N)" by auto
qed
lemma n_NI_Remote_GetX_PutX_HomeVsinv__140:
assumes a1: "\<exists> dst. dst\<le>N\<and>r=n_NI_Remote_GetX_PutX_Home dst" and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__140 p__Inv4)"
shows "invHoldForRule s f r (invariants N)"
apply (rule noEffectOnRule, cut_tac a1 a2, auto) done
lemma n_PI_Local_GetX_PutX__part__0Vsinv__140:
assumes a1: "r=n_PI_Local_GetX_PutX__part__0 " and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__140 p__Inv4)"
shows "invHoldForRule s f r (invariants N)"
apply (rule noEffectOnRule, cut_tac a1 a2, auto) done
lemma n_NI_WbVsinv__140:
assumes a1: "r=n_NI_Wb " and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__140 p__Inv4)"
shows "invHoldForRule s f r (invariants N)"
apply (rule noEffectOnRule, cut_tac a1 a2, auto) done
lemma n_NI_InvAck_3Vsinv__140:
assumes a1: "\<exists> src. src\<le>N\<and>r=n_NI_InvAck_3 N src" and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__140 p__Inv4)"
shows "invHoldForRule s f r (invariants N)"
apply (rule noEffectOnRule, cut_tac a1 a2, auto) done
lemma n_NI_InvAck_1Vsinv__140:
assumes a1: "\<exists> src. src\<le>N\<and>r=n_NI_InvAck_1 N src" and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__140 p__Inv4)"
shows "invHoldForRule s f r (invariants N)"
apply (rule noEffectOnRule, cut_tac a1 a2, auto) done
lemma n_PI_Local_GetX_GetX__part__1Vsinv__140:
assumes a1: "r=n_PI_Local_GetX_GetX__part__1 " and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__140 p__Inv4)"
shows "invHoldForRule s f r (invariants N)"
apply (rule noEffectOnRule, cut_tac a1 a2, auto) done
lemma n_PI_Local_GetX_GetX__part__0Vsinv__140:
assumes a1: "r=n_PI_Local_GetX_GetX__part__0 " and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__140 p__Inv4)"
shows "invHoldForRule s f r (invariants N)"
apply (rule noEffectOnRule, cut_tac a1 a2, auto) done
lemma n_PI_Remote_ReplaceVsinv__140:
assumes a1: "\<exists> src. src\<le>N\<and>r=n_PI_Remote_Replace src" and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__140 p__Inv4)"
shows "invHoldForRule s f r (invariants N)"
apply (rule noEffectOnRule, cut_tac a1 a2, auto) done
lemma n_PI_Local_ReplaceVsinv__140:
assumes a1: "r=n_PI_Local_Replace " and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__140 p__Inv4)"
shows "invHoldForRule s f r (invariants N)"
apply (rule noEffectOnRule, cut_tac a1 a2, auto) done
lemma n_NI_InvAck_existsVsinv__140:
assumes a1: "\<exists> src pp. src\<le>N\<and>pp\<le>N\<and>src~=pp\<and>r=n_NI_InvAck_exists src pp" and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__140 p__Inv4)"
shows "invHoldForRule s f r (invariants N)"
apply (rule noEffectOnRule, cut_tac a1 a2, auto) done
lemma n_PI_Remote_PutXVsinv__140:
assumes a1: "\<exists> dst. dst\<le>N\<and>r=n_PI_Remote_PutX dst" and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__140 p__Inv4)"
shows "invHoldForRule s f r (invariants N)"
apply (rule noEffectOnRule, cut_tac a1 a2, auto) done
lemma n_NI_Remote_Get_Put_HomeVsinv__140:
assumes a1: "\<exists> dst. dst\<le>N\<and>r=n_NI_Remote_Get_Put_Home dst" and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__140 p__Inv4)"
shows "invHoldForRule s f r (invariants N)"
apply (rule noEffectOnRule, cut_tac a1 a2, auto) done
lemma n_NI_InvVsinv__140:
assumes a1: "\<exists> dst. dst\<le>N\<and>r=n_NI_Inv dst" and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__140 p__Inv4)"
shows "invHoldForRule s f r (invariants N)"
apply (rule noEffectOnRule, cut_tac a1 a2, auto) done
lemma n_PI_Local_PutXVsinv__140:
assumes a1: "r=n_PI_Local_PutX " and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__140 p__Inv4)"
shows "invHoldForRule s f r (invariants N)"
apply (rule noEffectOnRule, cut_tac a1 a2, auto) done
lemma n_PI_Local_Get_PutVsinv__140:
assumes a1: "r=n_PI_Local_Get_Put " and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__140 p__Inv4)"
shows "invHoldForRule s f r (invariants N)"
apply (rule noEffectOnRule, cut_tac a1 a2, auto) done
lemma n_NI_ReplaceVsinv__140:
assumes a1: "\<exists> src. src\<le>N\<and>r=n_NI_Replace src" and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__140 p__Inv4)"
shows "invHoldForRule s f r (invariants N)"
apply (rule noEffectOnRule, cut_tac a1 a2, auto) done
lemma n_NI_Remote_GetX_Nak_HomeVsinv__140:
assumes a1: "\<exists> dst. dst\<le>N\<and>r=n_NI_Remote_GetX_Nak_Home dst" and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__140 p__Inv4)"
shows "invHoldForRule s f r (invariants N)"
apply (rule noEffectOnRule, cut_tac a1 a2, auto) done
lemma n_NI_Local_PutXAcksDoneVsinv__140:
assumes a1: "r=n_NI_Local_PutXAcksDone " and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__140 p__Inv4)"
shows "invHoldForRule s f r (invariants N)"
apply (rule noEffectOnRule, cut_tac a1 a2, auto) done
lemma n_PI_Local_GetX_PutX__part__1Vsinv__140:
assumes a1: "r=n_PI_Local_GetX_PutX__part__1 " and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__140 p__Inv4)"
shows "invHoldForRule s f r (invariants N)"
apply (rule noEffectOnRule, cut_tac a1 a2, auto) done
lemma n_NI_Remote_Get_Nak_HomeVsinv__140:
assumes a1: "\<exists> dst. dst\<le>N\<and>r=n_NI_Remote_Get_Nak_Home dst" and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__140 p__Inv4)"
shows "invHoldForRule s f r (invariants N)"
apply (rule noEffectOnRule, cut_tac a1 a2, auto) done
lemma n_NI_InvAck_exists_HomeVsinv__140:
assumes a1: "\<exists> src. src\<le>N\<and>r=n_NI_InvAck_exists_Home src" and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__140 p__Inv4)"
shows "invHoldForRule s f r (invariants N)"
apply (rule noEffectOnRule, cut_tac a1 a2, auto) done
lemma n_NI_Replace_HomeVsinv__140:
assumes a1: "r=n_NI_Replace_Home " and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__140 p__Inv4)"
shows "invHoldForRule s f r (invariants N)"
apply (rule noEffectOnRule, cut_tac a1 a2, auto) done
lemma n_NI_Local_PutVsinv__140:
assumes a1: "r=n_NI_Local_Put " and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__140 p__Inv4)"
shows "invHoldForRule s f r (invariants N)"
apply (rule noEffectOnRule, cut_tac a1 a2, auto) done
lemma n_NI_Nak_ClearVsinv__140:
assumes a1: "r=n_NI_Nak_Clear " and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__140 p__Inv4)"
shows "invHoldForRule s f r (invariants N)"
apply (rule noEffectOnRule, cut_tac a1 a2, auto) done
lemma n_PI_Local_Get_GetVsinv__140:
assumes a1: "r=n_PI_Local_Get_Get " and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__140 p__Inv4)"
shows "invHoldForRule s f r (invariants N)"
apply (rule noEffectOnRule, cut_tac a1 a2, auto) done
lemma n_NI_Nak_HomeVsinv__140:
assumes a1: "r=n_NI_Nak_Home " and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__140 p__Inv4)"
shows "invHoldForRule s f r (invariants N)"
apply (rule noEffectOnRule, cut_tac a1 a2, auto) done
lemma n_NI_InvAck_2Vsinv__140:
assumes a1: "\<exists> src. src\<le>N\<and>r=n_NI_InvAck_2 N src" and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__140 p__Inv4)"
shows "invHoldForRule s f r (invariants N)"
apply (rule noEffectOnRule, cut_tac a1 a2, auto) done
lemma n_NI_FAckVsinv__140:
assumes a1: "r=n_NI_FAck " and
a2: "(\<exists> p__Inv4. p__Inv4\<le>N\<and>f=inv__140 p__Inv4)"
shows "invHoldForRule s f r (invariants N)"
apply (rule noEffectOnRule, cut_tac a1 a2, auto) done
end
|
module Main
import Syntax.Lexer
import Syntax.Tokens
import Error
import Loc
main : IO ()
main = case lex "(test \"test something\" 10 [1 2 3] {a b c d} (inside test \"kek\")) ;simple comment" of
Right res => putStr (concatMap ((++ "\n") . show) res)
Left _ => putStrLn "Error" |
# Copyright (c) 2018-2021, Carnegie Mellon University
# See LICENSE for details
# Cross-compatibility
#
# MAJOR FORWARD DECLARATION HACK
#
# the current module is loaded prior to paradigms.cache, but we rely on the ACache tag
# declared there. To get around this we create a function, which is evaluated on
# execution, to map ACache to a shorter name. This is necessary so that ObjId comparisons
# work at the tag level.
#
# NOTE: this cannot be done with
#
# _ACache := spiral.paradigms.cache.ACach
#
# because assignments are evaluated immediately, and paradigms.cache does not yet exist.
#
# NOTE: we do the same with the CacheSpec and AInplace
_ACache := () -> spiral.paradigms.cache.ACache;
_CacheDesc := () -> spiral.paradigms.cache.CacheDesc;
_AInplace := () -> spiral.paradigms.cache.AInplace;
_AExpRight := () -> spiral.paradigms.cache.AExpRight;
_AIO := () -> spiral.paradigms.common.AIO;
_ABuf := () -> spiral.paradigms.loops.ABuf;
#
## _divisorCacheTags
#
# takes the ACache(N, ...) with the largest N from 't' and
# tests whether the divisor pair 'd' would work well with the cache
#
# We assume a (W x I) (I x W) expansion, so the left side is
# strided. We also assume out-of-place expansion.
#
# At some point, support for an 'Inplace' tag will be added.
#
# the criteria for working well with cache are as follows:
#
# let m = d[1], n = d[2]
#
# LEFT side: kernel size 'm', stride 'n'
# RIGHT side: kernel size 'n', stride 1
#
# cache specs are [B,S,A,R]
# T is the datatype, eg. float, double
#
# let E = B / sizeof(T)
#
# criteria:
#
# --> see the comments in the code
#
# NOTE: additions to support Inplace tag added, but changes incomplete.
_divisorCacheTags := function(d, t)
local taglist, N, tag, cs, m, n, elems, csize, cond, inplace;
taglist := t.getTag(_ACache());
# tagged for inplace?
inplace := t.getTag(_AInplace());
# if we don't have any ACache tags, accept this divisor pair.
if taglist = false then
return true;
fi;
# we need taglist to be a list
taglist := When(IsList(taglist), taglist, [taglist]);
# find the max cache level amongst the tags.
N := Maximum(List(taglist, e -> e.params[1]));
# use the first tag which matches it.
tag := Filtered(taglist, e -> e.params[1] = N)[1];
# extract the cache spec from the tag
cs := tag.params[2];
# paranoia
Constraint(ObjId(cs) = _CacheDesc());
# d[1] LEFT side expansion
# d[2] RIGHT side expansion
m := d[1]; n := d[2];
# cache sizes
elems := cs.blksize;
csize := elems * cs.nsets * cs.assoc;
# associativity must be at least 2 for outofplace, otherwise read and
# write overwrite each other
if inplace = false and cs.assoc < 2 then
return false;
fi;
# if any of these conditions are true -- we allow this split
cond := [];
# if the whole thing fits in cache, let it through
Add(cond, n * m < csize);
# if right side is larger than cache, left side only uses the assoc so
# size must be smaller than it.. (also accounting for read/write)
Add(cond, (n > elems*cs.nsets) and (m <= (cs.assoc / 2)));
# if right side fits in cache, left side strides partially into the cache,
# and the size constraint on 'm' is a function of 'n'
Add(cond, (n <= csize / 2) and (m <= (csize / (2 * n))));
#PrintLine(d, ": ", cond);
return ForAny(cond, e -> e);
end;
#
## _dropCacheTags
#
# drop the cache tag if the problem size fits entirely into cache.
#
_dropCacheTags := (t, size, inplace) ->
(ObjId(t) <> _ACache()) # let other tags through
or When(inplace = true, # inplace?
((t.params[2].csize) < size), # only let through ACache tags smaller than problem size
(t.params[2].csize/2 < size) # (div 2 accounts for rd/wr)
)
;
#
## _expandRight
#
_expandRight := function(d, t)
local tag, leftmax;
tag := t.getTag(_AExpRight());
# if the tag isn't present, pass through
if tag = false then
return true;
fi;
# get the limit on left side size, handle multiple tags
leftmax := When(IsList(tag),
Minimum(List(tag, e -> e.params[1])),
tag.params[1]
);
# if left expansion exceeds our max, don't allow this breakdown
return d[1] <= 2^leftmax;
end;
#
## _allowedExpRight
#
#
_allowedExpRight := (nt) -> let(
t := nt.getTag(_AExpRight()),
tt := When(IsList(t), t, [t]),
maxx := Minimum(List(tt, e -> e.params[1])),
minn := Maximum(List(tt, e -> When(IsBound(e.params[2]) and nt.params[1] > e.params[2], e.params[2], 0))), # optional 2nd param is minimum
# debugout := Chain(Print(nt.params[1], ": ", minn, "-", maxx, "\n"), 0),
[minn..maxx]
);
#
## _dropExpandRight
#
# drop all expandright tags at smallest size.
#
# NOTE: this function returns TRUE if we should keep the tag, and FALSE if the
# tag is to be dropped.
#
_dropExpRightTags := (t, size) ->
ObjId(t) <> _AExpRight()
or (
size > 2^Minimum(t.params)
and not IsBound(t.params[3]) # the presence of a third param says "drop this tag asap"
)
;
#
#
## DivisorProds
#
# returns lists made up of DivisorsInt(N) which are \leq M and their product = N
#
Declare(_getdivisors);
DivisorProds := function(N, M)
local l, r;
# list of valid divisors.
l := Filtered(DivisorsInt(N), e -> e <= M and e <> 1);
r := _getdivisors(N, l, [1]);
# the last element is a 1, we don't want it in the output.
return List(r, e -> DropLast(e, 1));
end;
_getdivisors := function(N, list, prod)
local result, e, p;
p := Product(prod);
result := List(
Filtered(list, e -> e * p = N),
ee -> Concat([ee], prod)
);
for e in Filtered(list, i -> i * p < N) do
Append(result, _getdivisors(N, list, Concat([e], prod)));
od;
return result;
end;
_loop := function(p, i)
local idx, ii, a, c, size;
# paranoia, 'i' in range.
Constraint(i >=1 and i <= Length(p));
idx := [
XChain([0]),
GTPar,
GTVec,
XChain([1,0,2])
];
#
a := Product(p{[1..(i-1)]});
c := Product(p{[i+1..Length(p)]});
ii := When(a > 1, 1, 0) + When(c > 1, 2, 0) + 1;
size := [
[],
[a],
[c],
[a,c]
];
return [idx[ii], size[ii]];
end;
#
## WHT RULES ###############
#
NewRulesFor(WHT, rec(
#F WHT_BinSplit:
#F
#F same as WHT_GeneralSplit, but only allows for binary splits
#F
#F WHT_(2^k) =
#F WHT_(2^k1) tensor I_(2^(k-k1)) *
#F I_(2^(k-k2)) tensor WHT_(2^k2)
#F
#F We use this rule only, when WHT_GeneralSplit is turned off.
#F
#F NOTE: propagates tags, handles expand-right tag.
WHT_BinSplit := rec (
info := "WHT_(2^k) -> (WHT_(2^k1) tensor I) (I tensor WHT_(2^k2))",
forTransposition := false,
minSize := 2,
maxRadix := 32,
applicable := (self, nt) >> nt.params[1] >= self.minSize,
requiredFirstTag := ANoTag,
children := (self, nt) >> List([1.. Minimum(self.maxRadix, nt.params[1]-1)],
i -> [ WHT(i), WHT(nt.params[1]-i) ]
),
apply := (nt, c, cnt) ->
Tensor(c[1], I(Rows(c[2])))
* Tensor(I(Rows(c[1])), c[2])
),
# handle the right expanded WHT.
WHT_ExpandRight := rec (
info := "WHT_(2^k) -> (WHT_(2^k1) tensor I) (I tensor WHT_(2^k2))",
forTransposition := false,
minSize := 2,
applicable := (self, nt) >> nt.params[1] >= self.minSize and nt.hasTag(_AExpRight()),
children := nt -> List(Intersection([1..nt.params[1]-1], _allowedExpRight(nt)),
i -> [
WHT(i).withTags(
Filtered(nt.getTags(), e ->
_dropExpRightTags(e, 2^i)
)
),
WHT(nt.params[1]-i).withTags(
Filtered(nt.getTags(), e ->
_dropExpRightTags(e, 2^(nt.params[1]-i))
)
)
]
),
apply := (nt, c, cnt) ->
Tensor(c[1], I(Rows(c[2])))
* Tensor(I(Rows(c[1])), c[2])
),
# top level bin split rule (requires ATopLevel tag) which inserts some permutations which are
# linear on the bits into the ruletree.
# this rule expects ATopLevel param1 to be a size at which to stop
# propogating the ATopLevel tag and param2 to be a cache specification
# of the form [E,S,A,R]
WHT_BinSplitB := rec (
info := "WHT_(2^k) -> (WHT_(2^k1) tensor I) (I tensor WHT_(2^k2))",
forTransposition := false,
minSize := 2,
applicable := (self, nt) >> nt.hasTag(ATopLevel) and Rows(nt) > self.minSize,
children := nt -> let(t := nt.getTag(ATopLevel).params[1],
List([1..(nt.params[1]-1)], i ->
When(i >= t or (nt.params[1]-i) >= t,
[ WHT(i).withTags([ATopLevel(t)]), WHT(nt.params[1]-i).withTags([ATopLevel(t)]) ],
[ WHT(i), WHT(nt.params[1]-i) ]
)
)
),
apply := (nt, c, cnt) ->
Tensor(c[1], I(Rows(c[2])))
* PushL(
CL(Rows(nt), Rows(c[2]), nt.getTag(ATopLevel).params[2]))
* PushR(
CL(Rows(nt), Rows(c[2]), nt.getTag(ATopLevel).params[2]).transpose())
* Tensor(I(Rows(c[1])), c[2]),
switch := true
),
WHT_BinSplitB_binloops := rec (
info := "WHT_(2^k) -> (WHT_(2^k1) tensor I) (I tensor WHT_(2^k2))",
forTransposition := false,
minSize := 2,
applicable := (self, nt) >> nt.hasTag(ATopLevel) and Rows(nt) > self.minSize,
children := nt -> let(t := nt.getTag(ATopLevel).params[1],
List([1..(nt.params[1]-1)], i ->
When(i >= t or (nt.params[1]-i) >= t,
[ WHT(i).withTags([ATopLevel(t)]), WHT(nt.params[1]-i).withTags([ATopLevel(t)]) ],
[ WHT(i), WHT(nt.params[1]-i) ]
)
)
),
apply := function(nt, c, cnt)
local a, b;
a := WHT_BinSplit_binloops.apply(nt, c, cnt);
b := CL(Rows(nt), Rows(c[2]), nt.getTag(ATopLevel).params[2]);
if b = I(Rows(nt)) then
return a;
else
return Grp(a._children[1] * b) * Grp(b.transpose() * a._children[2]);
fi;
end,
switch := true,
),
#######################################################################################################
# tSPL WHT rule
# tSPL WHT_(2^k) -> (WHT_(2^k1) tensor I) (I tensor WHT_(2^k2))
WHT_tSPL_BinSplit := rec (
forTransposition := false,
minSize := 2,
applicable := (self, nt) >>
nt.hasTags()
and nt.params[1] >= self.minSize
and not nt.hasTag(_AInplace()),
children := nt -> List([1..nt.params[1] - 1], i -> [ TTensor(WHT(i), WHT(nt.params[1]-i)).withTags(nt.getTags()) ]),
apply := (nt, c, cnt) -> c[1],
switch := true,
),
#F WHT_Base: WHT_1 = F_2
#F
WHT_tSPL_Base := rec(
switch := false,
applicable := (t) -> Rows(t) = 2 and t.hasTags(),
children := t -> [[ TTensorI(F(2), 1, AVec, AVec).withTags(t.getTags()) ]],
apply := (t, C, Nonterms) -> C[1]
),
#######################################################################################################
# tSPL Pease WHT rule
# Pease tSPL WHT_(2^k) -> \Prod((I tensor F_2)L)
WHT_tSPL_Pease := rec (
forTransposition := false,
minSize := 2,
applicable := (self, nt) >> nt.hasTags()
and nt.params[1] >= self.minSize
and IsBound(nt.firstTag().legal_kernel)
and ForAny(self.radices, i -> nt.firstTag().legal_kernel(2^i)),
radices := [1, 2, 3, 4, 5],
children := (self, nt) >>
let(
k := nt.params[1],
streamsize := Cond(
nt.hasTags() and IsBound(nt.firstTag().bs),
nt.firstTag().bs,
NULL
),
ap := Filtered(self.radices, n -> IsInt(k/n) and streamsize >= 2^n),
List(ap, i -> let(
r := i,
Cond(
IsInt(k/r) and nt.hasTags() and IsBound(nt.firstTag().bs)
and nt.firstTag().bs >= 2^r,
[ TICompose(var("j"), k/r, TTensorI(WHT(i), 2^(k-r), APar, AVec)).withTags(nt.getTags())],
[]
)
))
),
apply := (nt, c, cnt) -> c[1],
switch := false
),
#######################################################################################################
# tSPL Korn-Lambiotte WHT rule
WHT_tSPL_KornLambiotte := rec (
forTransposition := false,
minSize := 2,
applicable := (self, nt) >> nt.hasTags()
and nt.params[1] >= self.minSize
and ForAny(self.radices, i->IsInt(nt.params[1]/i)
and IsBound(nt.firstTag().legal_kernel)
and nt.firstTag().legal_kernel(2^i)
),
radices := [1, 2, 3, 4, 5],
children := (self, nt) >> let(
k := nt.params[1],
ap := Filtered(self.radices, n -> IsInt(k/n) and nt.firstTag().legal_kernel(2^n)),
List(ap, i ->
[ TICompose(var("j"), k/i,
TTensorI(WHT(i), 2^(k-i), AVec, APar),
nt.getTags()
)]
)
),
apply := (nt, c, cnt) -> c[1],
switch := false
),
# WHT GT rule. Direct copy of DFT_GT_CT minus the twiddles
#
# WHT_nm -> GT(WHT_n, ...) * GT(WHT_m, ...)
#
# supports the ACache tag.
#
WHT_GT_CT := rec(
switch := true,
maxSize := false,
minSize := false,
forTransposition := false,
applicable := (self, t) >> let(
n := Rows(t),
n > 2
and (self.maxSize=false or n <= self.maxSize)
and (self.minSize=false or n >= self.minSize)
and not IsPrime(n))
and not t.hasTag(_AInplace()),
children := (self, t) >> Map2(
# _divisorCacheTags determines if the split is allowed (according to the cache params)
Filtered(
DivisorPairs(Rows(t)),
d -> _divisorCacheTags(d, t) and _expandRight(d, t)
),
(m,n) -> [
GT(WHT(Log2Int(m)), XChain([0, 1]), XChain([0, 1]), [n]).withTags(
Filtered(t.getTags(), e ->
_dropCacheTags(e, m, false) and _dropExpRightTags(e, m)
)
),
GT(WHT(Log2Int(n)), XChain([1, 0]), XChain([1, 0]), [m]).withTags(
Filtered(t.getTags(), e ->
_dropCacheTags(e, n, false) and _dropExpRightTags(e, n)
)
)
]
),
apply := (self, t, C, Nonterms) >> C[1] * C[2]
),
#
## WHT_TopInplace
#
# the input and output strides must be the same, and intermediate
# arrays are *full length*, which means the code generated is not
# recursive.
#
# each 'stage' is <= kernel size.
#
WHT_TopInplace := rec(
switch := true,
maxSize := false,
minSize := false,
applicable := (self, t) >> let(
n := Rows(t),
t.hasTag(_AInplace())
and n > 2
and (self.maxSize=false or n <= self.maxSize)
and (self.minSize=false or n >= self.minSize)
and not IsPrime(n)),
children := meth(self, t)
local iptag, divs, wa, w, p, i, loopdata, newtags;
iptag := t.getTag(_AInplace());
divs := DivisorProds(
Rows(t),
When(iptag <> false, iptag.params[1], Rows(t))
);
wa := [];
newtags := Filtered(t.getTags(), e -> ObjId(e) <> _AInplace());
for p in divs do
w := [];
for i in [1..Length(p)] do
loopdata := _loop(p, i);
Add(w, Inplace(
GT(
WHT(Log2Int(p[i])),
loopdata[1],
loopdata[1],
loopdata[2]
).withTags(newtags)
));
od;
Add(wa, w);
od;
return wa;
end,
apply := (self, t, C, Nonterms) >> ApplyFunc(Compose, C)
),
#
## WHT_GT_Inplace
#
# standard Binsplit CT rule except in place, tags ignored and not
# propagated.
WHT_GT_Inplace := rec(
switch := true,
maxSize := false,
minSize := false,
applicable := (self, t) >> let(
n := Rows(t),
t.hasTag(_AInplace()) <> false
and n > 2
and not IsPrime(n)
),
children := (self, t) >> Map2(
DivisorPairs(Rows(t)),
(m,n) -> [
GT(WHT(Log2Int(m)), XChain([0, 1]), XChain([0, 1]), [n]).withTags(t.getTags()),
GT(WHT(Log2Int(n)), XChain([1, 0]), XChain([1, 0]), [m]).withTags(t.getTags())
]
),
apply := (self, t, C, nt) >> Inplace(ApplyFunc(Compose, C))
)
));
#
## GT-WHT RULES ########################
#
# rules for GT(WHT, ...)
# these rules are a copy of the rules for the DFT
NewRulesFor(GT, rec(
# copy and simplification of GT_DFT_Base2
# matches GT(WHT(n), ...) where n = 1
GT_WHT_Base := rec(
forTransposition := false,
switch := false,
applicable := (self, t) >> let(
rank := Length(t.params[4]), # rank is number of outer loops
rank = 0 # means no outer loops
and PatternMatch(t, [GT, WHT, @, @, @, @, @], empty_cx()) # t.rChildren(), used by patternmatch appends t.transposed and t.tags, so GT, with its 4 params, actually has 6.
and Rows(t.params[1])=2 # we're looking for a WHT of size 2
),
apply := (t, C, Nonterms) -> F(2)
),
# matches GT(WHT(n), ...) where n > 1
GT_WHT_Inplace := rec(
maxSize := false,
minSize := false,
minRank := 0,
maxRank := 3,
forTransposition := false,
switch := false,
applicable := (self, t) >> let(
rank := Length(t.params[4]),
wht := t.params[1],
t.getTag(_AInplace()) <> false
and rank >= self.minRank
and rank <= self.maxRank
and rank = 0 # MRT: we use NthLoop for rank > 0
and (self.maxSize=false or Rows(wht) <= self.maxSize)
and (self.minSize=false or Rows(wht) >= self.minSize)
and PatternMatch(t, [GT, WHT, XChain, XChain, @, @, @], empty_cx())
and WHT_GT_CT.applicable(wht)
),
children := (self, t) >> let(
wht := t.params[1],
g := t.params[2],
s := t.params[3],
loop_dims := t.params[4],
nloops := Length(loop_dims),
tags := t.getTags(),
Map2(
DivisorPairs(Rows(wht)),
(m,n) -> [
GT(
WHT(Log2Int(m)),
s.composeWith(XChain([0, 1])),
s.composeWith(XChain([0, 1])),
Concatenation([n], loop_dims)
).withTags(Filtered(t.getTags(), e ->
ObjId(e) <> _AInplace() or m > e.params[1]
)),
GT(
WHT(Log2Int(n)),
g.composeWith(XChain([1, 0])),
s.composeWith(XChain([1, 0])),
Concatenation([m], loop_dims)
).withTags(Filtered(t.getTags(), e ->
ObjId(e) <> _AInplace() or n > e.params[1]
))
]
)
),
apply := (self, t, C, Nonterms) >> Inplace(C[1] * C[2])
),
GT_WHT_CT := rec(
maxSize := false,
minSize := false,
minRank := 0,
maxRank := 3,
codeletSize := 32,
forTransposition := false,
switch := false,
applicable := (self, t) >> let(
rank := Length(t.params[4]),
wht := t.params[1],
# not t.hasTag(_ACache())
# and
not t.hasTag(_AInplace())
and not t.hasTag(_ABuf())
and rank >= self.minRank
and rank <= self.maxRank
# and When(rank>0, t.hasTags(), true)
and rank = 0 # MRT: we use NthLoop for rank > 0
and (self.maxSize=false or Rows(wht) <= self.maxSize)
and (self.minSize=false or Rows(wht) >= self.minSize)
and PatternMatch(t, [GT, WHT, XChain, XChain, @, @, @], empty_cx())
and WHT_GT_CT.applicable(wht)
),
children := (self, t) >> let(
wht := t.params[1],
g := t.params[2],
s := t.params[3],
loop_dims := t.params[4],
nloops := Length(loop_dims),
tags := t.getTags(),
inp := t.getTag(_AInplace()) <> false,
Map2(
Filtered(
DivisorPairs(Rows(wht)),
d -> ( d[1] <= self.codeletSize) and _divisorCacheTags(d, t) and _expandRight(d, t)
),
(m,n) -> [
GT(
WHT(Log2Int(m)),
s.composeWith(XChain([0, 1])),
s.composeWith(XChain([0, 1])),
Concatenation([n], loop_dims)
).withTags(
Filtered(t.getTags(), e ->
_dropCacheTags(e, m, inp) and _dropExpRightTags(e, m)
)
),
GT(
WHT(Log2Int(n)),
g.composeWith(XChain([1, 0])),
s.composeWith(XChain([1, 0])),
Concatenation([m], loop_dims)
).withTags(
Filtered(t.getTags(), e ->
_dropCacheTags(e, n, inp) and _dropExpRightTags(e, n)
)
)
]
)
),
apply := (self, t, C, Nonterms) >> C[1] * C[2]
),
GT_NthLoop_PassTags := rec(
switch := false,
applicable := t -> let(
rank := Length(t.params[4]),
rank > 0 and t.hasTags()
),
# restrict to innermost first (i.e. loop interchange)
# to search over loop orders use [1..nloops]
# Limit tag reduces the number of loop interchanges.
# it is useful when you don't want the number of potential
# ruletrees to explode, and you are not overtly concerned
# with having access to all the potential ones.
freedoms := t -> let(
fr := [1..Length(t.params[4])],
When(t.hasTag(ALimitNthLoop),
[[1]],
[fr]
)
),
child := (t, fr) -> let(
# codeletSize := 32,
spl := t.params[1],
g := t.params[2],
s := t.params[3],
loopid := fr[1],
gt := GT(spl, g.without(loopid),
s.without(loopid), ListWithout(t.params[4], loopid)
),
[
gt.withTags(t.getTags()),
# Filtered(t.getTags(), e ->
# ObjId(e) <> _AInplace() or Rows(gt) >= e.params[1]
# )),
InfoNt(loopid)
]
),
apply := (t, C, Nonterms) -> let(
loopid := Nonterms[2].params[1],
dft := Nonterms[1].params[1],
g := t.params[2],
s := t.params[3],
loop_dims := t.params[4],
i := Ind(loop_dims[loopid]),
ISum(i, Scat(s.part(loopid, i, Rows(dft), loop_dims))
* C[1]
* Gath(g.part(loopid, i, Cols(dft), loop_dims)))
)
),
));
# (WHTmn x Ik) -> (WHTm x Ikn) (Im x (WHTn x Ik))
NewRulesFor(TTensorI, rec(
WHTxI_vecrec := rec(
switch := false,
forTransposition := false,
minKernel := false,
maxKernel := false,
applicable := (self, nt) >> nt.hasTags() and IsVecVec(nt.params) and
(not IsInt(self.minKernel) or nt.params[1].dims()[2] > self.minKernel) and
(not IsInt(self.maxKernel) or nt.params[1].dims()[2] <= self.maxKernel),
children := nt -> let(mn := nt.params[1].dims()[2], k := nt.params[2],
List(Flat(List(DivisorPairs(mn), i -> let(m := i[1], n := i[2],
[
TCompose([
TTensorI(WHT(Log2Int(m)), n*k, AVec, AVec),
TTensorI(TTensorI(WHT(Log2Int(n)), k, AVec, AVec), m, APar, APar)
]).withTags(nt.getTags()),
TCompose([
TTensorI(TTensorI(WHT(Log2Int(n)), k, AVec, AVec), m, APar, APar),
TTensorI(WHT(Log2Int(m)), n*k, AVec, AVec)
]).withTags(nt.getTags()),
])
)), j->[j])
),
apply := (nt, c, cnt) -> c[1]
)
));
|
Formal statement is: lemma space_Sup_eq_UN: "space (Sup M) = (\<Union>x\<in>M. space x)" Informal statement is: The space of a supremum of a family of topological spaces is the union of the spaces of the family. |
tmpdir/stub_shared.so: file format elf32-metag
DYNAMIC RELOCATION RECORDS
OFFSET TYPE VALUE
.* R_METAG_JMP_SLOT _far2
|
[GOAL]
C : Type u
inst✝ : Groupoid C
c d : C
f : c ⟶ d
γ : c ⟶ c
⊢ (fun δ => f ≫ δ ≫ inv f) ((fun γ => inv f ≫ γ ≫ f) γ) = γ
[PROOFSTEP]
simp_rw [Category.assoc, comp_inv, Category.comp_id, ← Category.assoc, comp_inv, Category.id_comp]
[GOAL]
C : Type u
inst✝ : Groupoid C
c d : C
f : c ⟶ d
δ : d ⟶ d
⊢ (fun γ => inv f ≫ γ ≫ f) ((fun δ => f ≫ δ ≫ inv f) δ) = δ
[PROOFSTEP]
simp_rw [Category.assoc, inv_comp, ← Category.assoc, inv_comp, Category.id_comp, Category.comp_id]
[GOAL]
C : Type u
inst✝ : Groupoid C
c d : C
f : c ⟶ d
γ₁ γ₂ : c ⟶ c
⊢ Equiv.toFun
{ toFun := fun γ => inv f ≫ γ ≫ f, invFun := fun δ => f ≫ δ ≫ inv f,
left_inv := (_ : ∀ (γ : c ⟶ c), (fun δ => f ≫ δ ≫ inv f) ((fun γ => inv f ≫ γ ≫ f) γ) = γ),
right_inv := (_ : ∀ (δ : d ⟶ d), (fun γ => inv f ≫ γ ≫ f) ((fun δ => f ≫ δ ≫ inv f) δ) = δ) }
(γ₁ * γ₂) =
Equiv.toFun
{ toFun := fun γ => inv f ≫ γ ≫ f, invFun := fun δ => f ≫ δ ≫ inv f,
left_inv := (_ : ∀ (γ : c ⟶ c), (fun δ => f ≫ δ ≫ inv f) ((fun γ => inv f ≫ γ ≫ f) γ) = γ),
right_inv := (_ : ∀ (δ : d ⟶ d), (fun γ => inv f ≫ γ ≫ f) ((fun δ => f ≫ δ ≫ inv f) δ) = δ) }
γ₁ *
Equiv.toFun
{ toFun := fun γ => inv f ≫ γ ≫ f, invFun := fun δ => f ≫ δ ≫ inv f,
left_inv := (_ : ∀ (γ : c ⟶ c), (fun δ => f ≫ δ ≫ inv f) ((fun γ => inv f ≫ γ ≫ f) γ) = γ),
right_inv := (_ : ∀ (δ : d ⟶ d), (fun γ => inv f ≫ γ ≫ f) ((fun δ => f ≫ δ ≫ inv f) δ) = δ) }
γ₂
[PROOFSTEP]
simp only [vertexGroup_mul, inv_eq_inv, Category.assoc, IsIso.hom_inv_id_assoc]
|
#' LibstableR: Fast and accurate evaluation, random number generation
#' and parameter estimation of skew stable distributions.
#'
#' LibstableR provides functions to work with skew stable distributions
#' in a fast and accurate way \[1]. It performs:
#'
#' * Fast and accurate evaluation of the probability density function (PDF) and cumulative density function (CDF).
#' * Fast and accurate evaluation of the quantile function (CDF^{-1}).
#' * Random numbers generation \[2].
#' * Skew stable parameter estimation with:
#' * McCulloch's method of quantiles \[3].
#' * Koutrouvellis' method based on the characteristic function \[4].
#' * Maximum likelihood estimation.
#' * Modified maximum likelihood estimation as described in \[1].
#' *The evaluation of the PDF and CDF is based on the formulas provided by John P Nolan in \[5].
#'
#' @md
#' @author Javier Royuela del Val, Federico Simmross Wattenberg and Carlos Alberola López;\cr\cr
#' Maintainer: Javier Royuela del Val <jroyval@@lpi.tel.uva.es>
#' @references
#' * \[1] Royuela-del-Val J, Simmross-Wattenberg F, Alberola López C (2017). libstable: Fast, Parallel and High-Precision Computation of alpha-stable Distributions in R, C/C++ and MATLAB. Journal of Statistical Software, 78(1), 1-25. doi:10.18637/jss.v078.i01
#' * \[2] Chambers JM, Mallows CL, Stuck BW (1976). A Method for Simulating Stable Random Variables. Journal of the American Statistical Association, 71(354), 340-344. doi:10.1080/01621459.1976.10480344
#' * \[3] McCulloch JH (1986). Simple Consistent Estimators of Stable Distribution Parameters. Communications in Statistics - Simulation and Computation, 15(4), 1109-1136. doi:10.1080/03610918608812563
#' * \[4] Koutrouvelis IA (1981). An Iterative Procedure for the Estimation of the Parameters of Stable Laws. Communications in Statistics - Simulation and Computation, 10(1), 17-28. doi:10.1080/03610918108812189
#' * \[5] Nolan JP (1997). Numerical Calculation of Stable Densities and Distribution Functions. Stochastic Models, 13(4), 759-774. doi:10.1080/15326349708807450
#' @name libstableR
#' @docType package
#' @keywords package
#' @useDynLib libstableR, .registration=TRUE
#' @importFrom Rcpp sourceCpp evalCpp
#' @examples
#' # Set alpha, beta, sigma and mu stable parameters in a vector
#' pars <- c(1.5, 0.9, 1, 0)
#'
#' # Generate an abscissas axis and probabilities vector
#' x <- seq(-5, 10, 0.05)
#' p <- seq(0.01, 0.99, 0.01)
#'
#' # Calculate pdf, cdf and quantiles
#' pdf <- stable_pdf(x, pars)
#' cdf <- stable_cdf(x, pars)
#' xq <- stable_q(p, pars)
#'
#' # Generate 300 random values
#' rnd <- stable_rnd(300, pars)
#'
#' # Estimate the parameters of the skew stable distribution given
#' # the generated sample:
#'
#' # Using the McCulloch's estimator:
#' pars_est_M <- stable_fit_init(rnd)
#'
#' # Using the Koutrouvelis' estimator:
#' pars_est_K <- stable_fit_koutrouvelis(rnd, pars_est_M)
#'
#' # Using maximum likelihood estimator, with McCulloch estimation
#' # as a starting point:
#' # pars_est_ML <- stable_fit_mle(rnd, pars_est_M)
#'
#' # Using modified maximum likelihood estimator (See [1]):
#' # pars_est_ML2 <- stable_fit_mle2d(rnd, pars_est_M)
NULL
|
import numpy as np
def get_contact_force(model: dict, fqp: np.ndarray, fpd: np.ndarray, flag_contact: np.ndarray):
fqp = fqp.flatten()
fpd = fpd.flatten()
nc = int(model["nc"])
nf = int(model["nf"])
fc = np.zeros((3*nc,))
fcqp = np.zeros((3*nc,))
fcpd = np.zeros((3*nc,))
k = 0
for i in range(nc):
if flag_contact[i] != 0:
if nf == 2: # Only x/z direction
fc[3*i:3*i+3] = np.array([fqp[k*nf] + fpd[k*nf], 0.0, fqp[k*nf+nf-1] + fpd[k*nf+nf-1]])
fcqp[3*i:3*i+3] = np.array([fqp[k*nf], 0.0, fqp[k*nf+nf-1]])
fcpd[3*i:3*i+3] = np.array([fpd[k*nf], 0.0, fpd[k*nf+nf-1]])
else:
fc[3*i:3*i+3] = fqp[k*nf:k*nf+nf] + fpd[k*nf:k*nf+nf]
fcqp[3*i:3*i+3] = fqp[k*nf:k*nf+nf]
fcpd[3*i:3*i+3] = fpd[k*nf:k*nf+nf]
k = k+1
fc = fc.reshape(-1, 1)
fcqp = fcqp.reshape(-1, 1)
fcpd = fcpd.reshape(-1, 1)
return fc, fcqp, fcpd
def get_contact_fcqp(fqp, flag_contact, nc, nf):
fqp = fqp.flatten()
fcqp = np.zeros((3*nc,))
k = 0
for i in range(nc):
if flag_contact[i] != 0:
if nf == 2: # Only x/z direction
fcqp[3*i:3*i+3] = np.array([fqp[k*nf], 0.0, fqp[k*nf+nf-1]])
else:
fcqp[3*i:3*i+3] = fqp[k*nf:k*nf+nf]
k = k+1
fcqp = fcqp.reshape(-1, 1)
return fcqp
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.