text
stringlengths 0
3.34M
|
---|
State Before: α : Type u_1
E : Type u_2
F : Type ?u.2067161
m0 : MeasurableSpace α
inst✝⁶ : NormedAddCommGroup E
inst✝⁵ : NormedSpace ℝ E
inst✝⁴ : CompleteSpace E
inst✝³ : NormedAddCommGroup F
inst✝² : NormedSpace ℝ F
inst✝¹ : CompleteSpace F
μ : MeasureTheory.Measure α
s : Set E
t : Set α
f : α → E
g : E → ℝ
C : ℝ
inst✝ : IsFiniteMeasure μ
hs : StrictConvex ℝ s
hsc : IsClosed s
hfs : ∀ᵐ (x : α) ∂μ, f x ∈ s
hfi : Integrable f
⊢ f =ᶠ[ae μ] const α (⨍ (x : α), f x ∂μ) ∨ (⨍ (x : α), f x ∂μ) ∈ interior s State After: α : Type u_1
E : Type u_2
F : Type ?u.2067161
m0 : MeasurableSpace α
inst✝⁶ : NormedAddCommGroup E
inst✝⁵ : NormedSpace ℝ E
inst✝⁴ : CompleteSpace E
inst✝³ : NormedAddCommGroup F
inst✝² : NormedSpace ℝ F
inst✝¹ : CompleteSpace F
μ : MeasureTheory.Measure α
s : Set E
t : Set α
f : α → E
g : E → ℝ
C : ℝ
inst✝ : IsFiniteMeasure μ
hs : StrictConvex ℝ s
hsc : IsClosed s
hfs : ∀ᵐ (x : α) ∂μ, f x ∈ s
hfi : Integrable f
this : ∀ {t : Set α}, ↑↑μ t ≠ 0 → (⨍ (x : α) in t, f x ∂μ) ∈ s
⊢ f =ᶠ[ae μ] const α (⨍ (x : α), f x ∂μ) ∨ (⨍ (x : α), f x ∂μ) ∈ interior s Tactic: have : ∀ {t}, μ t ≠ 0 → (⨍ x in t, f x ∂μ) ∈ s := fun ht =>
hs.convex.set_average_mem hsc ht (measure_ne_top _ _) (ae_restrict_of_ae hfs) hfi.integrableOn State Before: α : Type u_1
E : Type u_2
F : Type ?u.2067161
m0 : MeasurableSpace α
inst✝⁶ : NormedAddCommGroup E
inst✝⁵ : NormedSpace ℝ E
inst✝⁴ : CompleteSpace E
inst✝³ : NormedAddCommGroup F
inst✝² : NormedSpace ℝ F
inst✝¹ : CompleteSpace F
μ : MeasureTheory.Measure α
s : Set E
t : Set α
f : α → E
g : E → ℝ
C : ℝ
inst✝ : IsFiniteMeasure μ
hs : StrictConvex ℝ s
hsc : IsClosed s
hfs : ∀ᵐ (x : α) ∂μ, f x ∈ s
hfi : Integrable f
this : ∀ {t : Set α}, ↑↑μ t ≠ 0 → (⨍ (x : α) in t, f x ∂μ) ∈ s
⊢ f =ᶠ[ae μ] const α (⨍ (x : α), f x ∂μ) ∨ (⨍ (x : α), f x ∂μ) ∈ interior s State After: α : Type u_1
E : Type u_2
F : Type ?u.2067161
m0 : MeasurableSpace α
inst✝⁶ : NormedAddCommGroup E
inst✝⁵ : NormedSpace ℝ E
inst✝⁴ : CompleteSpace E
inst✝³ : NormedAddCommGroup F
inst✝² : NormedSpace ℝ F
inst✝¹ : CompleteSpace F
μ : MeasureTheory.Measure α
s : Set E
t : Set α
f : α → E
g : E → ℝ
C : ℝ
inst✝ : IsFiniteMeasure μ
hs : StrictConvex ℝ s
hsc : IsClosed s
hfs : ∀ᵐ (x : α) ∂μ, f x ∈ s
hfi : Integrable f
this : ∀ {t : Set α}, ↑↑μ t ≠ 0 → (⨍ (x : α) in t, f x ∂μ) ∈ s
⊢ (∃ t, MeasurableSet t ∧ ↑↑μ t ≠ 0 ∧ ↑↑μ (tᶜ) ≠ 0 ∧ (⨍ (x : α) in t, f x ∂μ) ≠ ⨍ (x : α) in tᶜ, f x ∂μ) →
(⨍ (x : α), f x ∂μ) ∈ interior s Tactic: refine' (ae_eq_const_or_exists_average_ne_compl hfi).imp_right _ State Before: α : Type u_1
E : Type u_2
F : Type ?u.2067161
m0 : MeasurableSpace α
inst✝⁶ : NormedAddCommGroup E
inst✝⁵ : NormedSpace ℝ E
inst✝⁴ : CompleteSpace E
inst✝³ : NormedAddCommGroup F
inst✝² : NormedSpace ℝ F
inst✝¹ : CompleteSpace F
μ : MeasureTheory.Measure α
s : Set E
t : Set α
f : α → E
g : E → ℝ
C : ℝ
inst✝ : IsFiniteMeasure μ
hs : StrictConvex ℝ s
hsc : IsClosed s
hfs : ∀ᵐ (x : α) ∂μ, f x ∈ s
hfi : Integrable f
this : ∀ {t : Set α}, ↑↑μ t ≠ 0 → (⨍ (x : α) in t, f x ∂μ) ∈ s
⊢ (∃ t, MeasurableSet t ∧ ↑↑μ t ≠ 0 ∧ ↑↑μ (tᶜ) ≠ 0 ∧ (⨍ (x : α) in t, f x ∂μ) ≠ ⨍ (x : α) in tᶜ, f x ∂μ) →
(⨍ (x : α), f x ∂μ) ∈ interior s State After: case intro.intro.intro.intro
α : Type u_1
E : Type u_2
F : Type ?u.2067161
m0 : MeasurableSpace α
inst✝⁶ : NormedAddCommGroup E
inst✝⁵ : NormedSpace ℝ E
inst✝⁴ : CompleteSpace E
inst✝³ : NormedAddCommGroup F
inst✝² : NormedSpace ℝ F
inst✝¹ : CompleteSpace F
μ : MeasureTheory.Measure α
s : Set E
t✝ : Set α
f : α → E
g : E → ℝ
C : ℝ
inst✝ : IsFiniteMeasure μ
hs : StrictConvex ℝ s
hsc : IsClosed s
hfs : ∀ᵐ (x : α) ∂μ, f x ∈ s
hfi : Integrable f
this : ∀ {t : Set α}, ↑↑μ t ≠ 0 → (⨍ (x : α) in t, f x ∂μ) ∈ s
t : Set α
hm : MeasurableSet t
h₀ : ↑↑μ t ≠ 0
h₀' : ↑↑μ (tᶜ) ≠ 0
hne : (⨍ (x : α) in t, f x ∂μ) ≠ ⨍ (x : α) in tᶜ, f x ∂μ
⊢ (⨍ (x : α), f x ∂μ) ∈ interior s Tactic: rintro ⟨t, hm, h₀, h₀', hne⟩ State Before: case intro.intro.intro.intro
α : Type u_1
E : Type u_2
F : Type ?u.2067161
m0 : MeasurableSpace α
inst✝⁶ : NormedAddCommGroup E
inst✝⁵ : NormedSpace ℝ E
inst✝⁴ : CompleteSpace E
inst✝³ : NormedAddCommGroup F
inst✝² : NormedSpace ℝ F
inst✝¹ : CompleteSpace F
μ : MeasureTheory.Measure α
s : Set E
t✝ : Set α
f : α → E
g : E → ℝ
C : ℝ
inst✝ : IsFiniteMeasure μ
hs : StrictConvex ℝ s
hsc : IsClosed s
hfs : ∀ᵐ (x : α) ∂μ, f x ∈ s
hfi : Integrable f
this : ∀ {t : Set α}, ↑↑μ t ≠ 0 → (⨍ (x : α) in t, f x ∂μ) ∈ s
t : Set α
hm : MeasurableSet t
h₀ : ↑↑μ t ≠ 0
h₀' : ↑↑μ (tᶜ) ≠ 0
hne : (⨍ (x : α) in t, f x ∂μ) ≠ ⨍ (x : α) in tᶜ, f x ∂μ
⊢ (⨍ (x : α), f x ∂μ) ∈ interior s State After: no goals Tactic: exact
hs.openSegment_subset (this h₀) (this h₀') hne
(average_mem_openSegment_compl_self hm.nullMeasurableSet h₀ h₀' hfi)
|
function Y = symmetrize(X,grps)
%SYMMETRIZE Symmetrize a tensor X in specified modes.
%
% Y = symmetrize(X) will symmetrize a tensor X with respect to all
% modes so that Y is symmetric with respect to any permutation of
% indices.
%
% Y = symmetrize(X,MODES) will symmetrize a tensor X with respect to the
% modes specified by the vector MODES of mode indices. The second
% argument may alternatively be a cell array of vectors of modes to,
% e.g., specify that it should be symmetric with respect to mode [1 3] as
% well as [2 4].
%
% See also TENSOR, TENSOR/ISSYMMETRIC.
%
%MATLAB Tensor Toolbox.
%Copyright 2015, Sandia Corporation.
% This is the MATLAB Tensor Toolbox by T. Kolda, B. Bader, and others.
% http://www.sandia.gov/~tgkolda/TensorToolbox.
% Copyright (2015) Sandia Corporation. Under the terms of Contract
% DE-AC04-94AL85000, there is a non-exclusive license for use of this
% work by or on behalf of the U.S. Government. Export of this data may
% require a license from the United States Government.
% The full license terms can be found in the file LICENSE.txt
%T. Kolda, April 2011
n = ndims(X);
sz = size(X);
% Check that grps exists; if not, create it.
if ~exist('grps','var')
grps = {1:n};
end
% Check that grps is a cell array.
if ~iscell(grps)
grps = {grps};
end
if ~isnumeric(grps{1})
error('MODES must be numeric');
end
% Check tensor dimensions for compatibility with symmetrization
ngrps = length(grps);
for i = 1:ngrps
dims = grps{i};
for j = dims(2:end)
if sz(j) ~= sz(dims(1))
error('Dimension mismatch for symmetrization');
end
end
end
% Check for no overlap in the sets
for i = 1:ngrps
for j = i+1:ngrps
if ~isempty(intersect(grps{i},grps{j}))
error('Cannot haver overlapping symmetries');
end
end
end
% Create the combinations for each symmetrized subset
combos = cell(ngrps,1);
for i = 1:ngrps
combos{i} = perms(grps{i});
end
% Create all the permuations to be averaged
total_perms = prod(cellfun(@length,combos));
sym_perms = repmat(1:n, total_perms, 1);
for i = 1:ngrps
ntimes = prod(cellfun(@length,combos(1:i-1)));
ncopies = prod(cellfun(@length,combos(i+1:end)));
nelems = length(combos{i});
idx = 1;
for j = 1:ntimes
for k = 1:nelems
for l = 1:ncopies
sym_perms(idx,grps{i}) = combos{i}(k,:);
idx = idx + 1;
end
end
end
end
% Create an average tensor
Y = tenzeros(size(X));
for i = 1:total_perms
Y = Y + permute(X,sym_perms(i,:));
end
Y = Y / total_perms;
% It's not *exactly* symmetric due to oddities in differently ordered
% summations and so on, so let's fix that.
% Idea borrowed from Gergana Bounova:
% http://www.mit.edu/~gerganaa/downloads/matlab/symmetrize.m
for i = 1:total_perms
Z = permute(Y,sym_perms(i,:));
Y.data(:) = max(Y.data(:),Z.data(:));
end
|
Formal statement is: lemma comp: "bounded_linear f \<Longrightarrow> bounded_linear g \<Longrightarrow> bounded_bilinear (\<lambda>x y. f x ** g y)" Informal statement is: If $f$ and $g$ are bounded linear maps, then the map $(x, y) \mapsto f(x) \cdot g(y)$ is a bounded bilinear map.
|
module Main
import Test.Golden
prefixFolder : String -> List String -> List String
prefixFolder pref paths = map ((pref ++ "/") ++) paths
tests : TestPool
tests = MkTestPool "Tests" [] Nothing ["uri"]
main : IO ()
main = runner [ tests ]
|
SUBROUTINE GDSTIN ( stream, filtst, filtar, ststop,
+ dispc, displ, iret )
C************************************************************************
C* GDSTIN *
C* *
C* This subroutine processes the STREAM input parameter. *
C* *
C* GDSTIN ( STREAM, FILTST, FILTAR, STSTOP, DISPC, DISPL, IRET ) *
C* *
C* Input parameters: *
C* STREAM CHAR* Input string *
C* *
C* Output parameters: *
C* FILTST REAL Filter to thin strmlines *
C* FILTAR REAL Filter to thin strmline arrows *
C* STSTOP REAL Controls stopping of strmline *
C* near another strmline *
C* DISPC REAL Controls stopping of strmline *
C* when wind speed is small*
C* DISPL REAL Controls pre-scaling of vectos *
C* IRET INTEGER Return code *
C** *
C* Log: *
C* D.W.Plummer/NCEP 5/96 *
C************************************************************************
C
CHARACTER*(*) stream
REAL a(5)
C
CALL ST_RLST ( stream, "/", 1.0, 5, a, num, iret )
C
filtst = 1.0
if ( a(1) .gt. 0.0 ) filtst = 1.0 / a(1)
filtar = 1.5 * filtst
if ( a(2) .gt. 0.0 ) filtar = filtar / a(2)
ststop = 0.5
if ( a(3) .gt. 0.0 ) ststop = ststop * a(3)
dispc = 0.67
if ( a(4) .gt. 0.0 ) dispc = dispc * a(4)
displ = 0.33
if ( a(5) .gt. 0.0 ) displ = displ * a(5)
C
RETURN
END
|
[STATEMENT]
lemma ENF_offending_imp_not_P:
assumes "sinvar_all_edges_normal_form P" "F \<in> set_offending_flows G nP" "(e1, e2) \<in> F"
shows "\<not> P (nP e1) (nP e2)"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<not> P (nP e1) (nP e2)
[PROOF STEP]
using assms
[PROOF STATE]
proof (prove)
using this:
sinvar_all_edges_normal_form P
F \<in> set_offending_flows G nP
(e1, e2) \<in> F
goal (1 subgoal):
1. \<not> P (nP e1) (nP e2)
[PROOF STEP]
unfolding sinvar_all_edges_normal_form_def set_offending_flows_def is_offending_flows_min_set_def is_offending_flows_def
[PROOF STATE]
proof (prove)
using this:
\<forall>G nP. sinvar G nP = (\<forall>(e1, e2)\<in>edges G. P (nP e1) (nP e2))
F \<in> {F. F \<subseteq> edges G \<and> (\<not> sinvar G nP \<and> sinvar (delete_edges G F) nP) \<and> (\<forall>(e1, e2)\<in>F. \<not> sinvar (add_edge e1 e2 (delete_edges G F)) nP)}
(e1, e2) \<in> F
goal (1 subgoal):
1. \<not> P (nP e1) (nP e2)
[PROOF STEP]
by (fastforce simp: graph_ops)
|
theory TLS_cert_auto
imports
"../ESPLogic"
begin
role C
where "C =
[ Send ''1'' {| sLAV ''C'', sLN ''nc'', sLN ''sid'', sLN ''pc'' |}
, Recv ''2'' {| sLNV ''ns'', sLN ''sid'', sLNV ''ps'' |}
, Send ''3'' {| Enc {| sLC ''TT0'', sLN ''pms'' |} ( sPK ''S'' ),
Enc {| sLC ''TT1'',
Hash {| sLC ''TT2'', sLNV ''ns'', sLAV ''S'', sLN ''pms'' |}
|}
( sSK ''C'' ),
Enc {| sLC ''TT3'', sLN ''sid'',
Hash {| sLC ''TT4'', sLC ''PRF'', sLN ''pms'', sLN ''nc'',
sLNV ''ns''
|},
sLN ''nc'', sLN ''pc'', sLAV ''C'', sLNV ''ns'', sLNV ''ps'',
sLAV ''S''
|}
( Hash {| sLC ''clientKey'', sLN ''nc'', sLNV ''ns'',
Hash {| sLC ''TT4'', sLC ''PRF'', sLN ''pms'', sLN ''nc'',
sLNV ''ns''
|}
|}
)
|}
, Recv ''4'' ( Enc {| sLC ''TT3'', sLN ''sid'',
Hash {| sLC ''TT4'', sLC ''PRF'', sLN ''pms'', sLN ''nc'',
sLNV ''ns''
|},
sLN ''nc'', sLN ''pc'', sLAV ''C'', sLNV ''ns'', sLNV ''ps'',
sLAV ''S''
|}
( Hash {| sLC ''serverKey'', sLN ''nc'', sLNV ''ns'',
Hash {| sLC ''TT4'', sLC ''PRF'', sLN ''pms'', sLN ''nc'',
sLNV ''ns''
|}
|}
)
)
]"
role S
where "S =
[ Recv ''1'' {| sLAV ''C'', sLNV ''nc'', sLNV ''sid'', sLNV ''pc''
|}
, Send ''2'' {| sLN ''ns'', sLNV ''sid'', sLN ''ps'' |}
, Recv ''3'' {| Enc {| sLC ''TT0'', sLNV ''pms'' |} ( sPK ''S'' ),
Enc {| sLC ''TT1'',
Hash {| sLC ''TT2'', sLN ''ns'', sLAV ''S'', sLNV ''pms'' |}
|}
( sSK ''C'' ),
Enc {| sLC ''TT3'', sLNV ''sid'',
Hash {| sLC ''TT4'', sLC ''PRF'', sLNV ''pms'', sLNV ''nc'',
sLN ''ns''
|},
sLNV ''nc'', sLNV ''pc'', sLAV ''C'', sLN ''ns'', sLN ''ps'',
sLAV ''S''
|}
( Hash {| sLC ''clientKey'', sLNV ''nc'', sLN ''ns'',
Hash {| sLC ''TT4'', sLC ''PRF'', sLNV ''pms'', sLNV ''nc'',
sLN ''ns''
|}
|}
)
|}
, Send ''4'' ( Enc {| sLC ''TT3'', sLNV ''sid'',
Hash {| sLC ''TT4'', sLC ''PRF'', sLNV ''pms'', sLNV ''nc'',
sLN ''ns''
|},
sLNV ''nc'', sLNV ''pc'', sLAV ''C'', sLN ''ns'', sLN ''ps'',
sLAV ''S''
|}
( Hash {| sLC ''serverKey'', sLNV ''nc'', sLN ''ns'',
Hash {| sLC ''TT4'', sLC ''PRF'', sLNV ''pms'', sLNV ''nc'',
sLN ''ns''
|}
|}
)
)
]"
protocol TLS
where "TLS = { C, S }"
locale atomic_TLS_state = atomic_state TLS
locale TLS_state = reachable_state TLS
lemma (in atomic_TLS_state) C_sk_S_sec:
assumes facts:
"roleMap r tid0 = Some C"
"s(|AV ''S'' tid0|) ~: Compromised"
"SK ( s(|AV ''S'' tid0|) ) : knows t"
shows "False"
using facts proof(sources! " SK ( s(|AV ''S'' tid0|) ) ")
qed (insert facts, ((clarsimp, order?))+)?
lemma (in atomic_TLS_state) C_pms_sec:
assumes facts:
"roleMap r tid0 = Some C"
"s(|AV ''S'' tid0|) ~: Compromised"
"LN ''pms'' tid0 : knows t"
shows "False"
using facts proof(sources! " LN ''pms'' tid0 ")
case C_3_pms note facts = facts this[simplified]
thus ?thesis by (fastsimp dest: C_sk_S_sec intro: event_predOrdI)
qed (insert facts, ((clarsimp, order?))+)?
lemma (in atomic_TLS_state) C_PRF_sec:
assumes facts:
"roleMap r tid0 = Some C"
"s(|AV ''S'' tid0|) ~: Compromised"
"Hash {| LC ''TT4'', LC ''PRF'', LN ''pms'' tid0, LN ''nc'' tid0,
s(|NV ''ns'' tid0|)
|} : knows t"
shows "False"
using facts proof(sources! "
Hash {| LC ''TT4'', LC ''PRF'', LN ''pms'' tid0, LN ''nc'' tid0,
s(|NV ''ns'' tid0|)
|} ")
case fake note facts = facts this[simplified]
thus ?thesis by (fastsimp dest: C_pms_sec intro: event_predOrdI)
next
case C_3_hash note facts = facts this[simplified]
thus ?thesis proof(sources! "
Hash {| LC ''clientKey'', LN ''nc'' tid0, s(|NV ''ns'' tid0|),
Hash {| LC ''TT4'', LC ''PRF'', LN ''pms'' tid0, LN ''nc'' tid0,
s(|NV ''ns'' tid0|)
|}
|} ")
qed (insert facts, ((clarsimp, order?))+)?
next
case (S_4_hash tid1) note facts = facts this[simplified]
thus ?thesis proof(sources! "
Hash {| LC ''serverKey'', LN ''nc'' tid0, LN ''ns'' tid1,
Hash {| LC ''TT4'', LC ''PRF'', LN ''pms'' tid0, LN ''nc'' tid0,
LN ''ns'' tid1
|}
|} ")
qed (insert facts, ((clarsimp, order?))+)?
qed (insert facts, ((clarsimp, order?))+)?
lemma (in atomic_TLS_state) C_clientKey_sec:
assumes facts:
"roleMap r tid0 = Some C"
"s(|AV ''S'' tid0|) ~: Compromised"
"Hash {| LC ''clientKey'', LN ''nc'' tid0, s(|NV ''ns'' tid0|),
Hash {| LC ''TT4'', LC ''PRF'', LN ''pms'' tid0, LN ''nc'' tid0,
s(|NV ''ns'' tid0|)
|}
|} : knows t"
shows "False"
using facts proof(sources! "
Hash {| LC ''clientKey'', LN ''nc'' tid0, s(|NV ''ns'' tid0|),
Hash {| LC ''TT4'', LC ''PRF'', LN ''pms'' tid0, LN ''nc'' tid0,
s(|NV ''ns'' tid0|)
|}
|} ")
case fake note facts = facts this[simplified]
thus ?thesis by (fastsimp dest: C_PRF_sec intro: event_predOrdI)
qed (insert facts, ((clarsimp, order?))+)?
lemma (in atomic_TLS_state) C_serverKey_sec:
assumes facts:
"roleMap r tid0 = Some C"
"s(|AV ''S'' tid0|) ~: Compromised"
"Hash {| LC ''serverKey'', LN ''nc'' tid0, s(|NV ''ns'' tid0|),
Hash {| LC ''TT4'', LC ''PRF'', LN ''pms'' tid0, LN ''nc'' tid0,
s(|NV ''ns'' tid0|)
|}
|} : knows t"
shows "False"
using facts proof(sources! "
Hash {| LC ''serverKey'', LN ''nc'' tid0, s(|NV ''ns'' tid0|),
Hash {| LC ''TT4'', LC ''PRF'', LN ''pms'' tid0, LN ''nc'' tid0,
s(|NV ''ns'' tid0|)
|}
|} ")
case fake note facts = facts this[simplified]
thus ?thesis by (fastsimp dest: C_PRF_sec intro: event_predOrdI)
qed (insert facts, ((clarsimp, order?))+)?
lemma (in atomic_TLS_state) S_sk_C_sec:
assumes facts:
"roleMap r tid0 = Some S"
"s(|AV ''C'' tid0|) ~: Compromised"
"SK ( s(|AV ''C'' tid0|) ) : knows t"
shows "False"
using facts proof(sources! " SK ( s(|AV ''C'' tid0|) ) ")
qed (insert facts, ((clarsimp, order?))+)?
lemma (in atomic_TLS_state) S_pms_sec:
assumes facts:
"roleMap r tid0 = Some S"
"s(|AV ''C'' tid0|) ~: Compromised"
"s(|AV ''S'' tid0|) ~: Compromised"
"(tid0, S_4) : steps t"
"s(|NV ''pms'' tid0|) : knows t"
shows "False"
proof -
note_prefix_closed facts = facts note facts = this
thus ?thesis proof(sources! "
Enc {| LC ''TT1'',
Hash {| LC ''TT2'', LN ''ns'' tid0, s(|AV ''S'' tid0|),
s(|NV ''pms'' tid0|)
|}
|}
( SK ( s(|AV ''C'' tid0|) ) ) ")
case fake note facts = facts this[simplified]
thus ?thesis by (fastsimp dest: S_sk_C_sec intro: event_predOrdI)
next
case (C_3_enc tid1) note facts = facts this[simplified]
thus ?thesis by (fastsimp dest: C_pms_sec intro: event_predOrdI)
qed (insert facts, ((clarsimp, order?))+)?
qed
lemma (in atomic_TLS_state) S_PRF_sec:
assumes facts:
"roleMap r tid0 = Some S"
"s(|AV ''C'' tid0|) ~: Compromised"
"s(|AV ''S'' tid0|) ~: Compromised"
"(tid0, S_4) : steps t"
"Hash {| LC ''TT4'', LC ''PRF'', s(|NV ''pms'' tid0|),
s(|NV ''nc'' tid0|), LN ''ns'' tid0
|} : knows t"
shows "False"
proof -
note_prefix_closed facts = facts note facts = this
thus ?thesis proof(sources! "
Hash {| LC ''TT4'', LC ''PRF'', s(|NV ''pms'' tid0|),
s(|NV ''nc'' tid0|), LN ''ns'' tid0
|} ")
case fake note facts = facts this[simplified]
thus ?thesis by (fastsimp dest: S_pms_sec intro: event_predOrdI)
next
case (C_3_hash tid1) note facts = facts this[simplified]
thus ?thesis proof(sources! "
Hash {| LC ''clientKey'', LN ''nc'' tid1, LN ''ns'' tid0,
Hash {| LC ''TT4'', LC ''PRF'', LN ''pms'' tid1, LN ''nc'' tid1,
LN ''ns'' tid0
|}
|} ")
qed (insert facts, ((clarsimp, order?))+)?
next
case S_4_hash note facts = facts this[simplified]
thus ?thesis proof(sources! "
Hash {| LC ''serverKey'', s(|NV ''nc'' tid0|), LN ''ns'' tid0,
Hash {| LC ''TT4'', LC ''PRF'', s(|NV ''pms'' tid0|),
s(|NV ''nc'' tid0|), LN ''ns'' tid0
|}
|} ")
qed (insert facts, ((clarsimp, order?))+)?
qed (insert facts, ((clarsimp, order?))+)?
qed
lemma (in atomic_TLS_state) S_clientKey_sec:
assumes facts:
"roleMap r tid0 = Some S"
"s(|AV ''C'' tid0|) ~: Compromised"
"s(|AV ''S'' tid0|) ~: Compromised"
"(tid0, S_4) : steps t"
"Hash {| LC ''clientKey'', s(|NV ''nc'' tid0|), LN ''ns'' tid0,
Hash {| LC ''TT4'', LC ''PRF'', s(|NV ''pms'' tid0|),
s(|NV ''nc'' tid0|), LN ''ns'' tid0
|}
|} : knows t"
shows "False"
proof -
note_prefix_closed facts = facts note facts = this
thus ?thesis proof(sources! "
Hash {| LC ''clientKey'', s(|NV ''nc'' tid0|), LN ''ns'' tid0,
Hash {| LC ''TT4'', LC ''PRF'', s(|NV ''pms'' tid0|),
s(|NV ''nc'' tid0|), LN ''ns'' tid0
|}
|} ")
case fake note facts = facts this[simplified]
thus ?thesis by (fastsimp dest: S_PRF_sec intro: event_predOrdI)
qed (insert facts, ((clarsimp, order?))+)?
qed
lemma (in atomic_TLS_state) S_serverKey_sec:
assumes facts:
"roleMap r tid0 = Some S"
"s(|AV ''C'' tid0|) ~: Compromised"
"s(|AV ''S'' tid0|) ~: Compromised"
"(tid0, S_4) : steps t"
"Hash {| LC ''serverKey'', s(|NV ''nc'' tid0|), LN ''ns'' tid0,
Hash {| LC ''TT4'', LC ''PRF'', s(|NV ''pms'' tid0|),
s(|NV ''nc'' tid0|), LN ''ns'' tid0
|}
|} : knows t"
shows "False"
proof -
note_prefix_closed facts = facts note facts = this
thus ?thesis proof(sources! "
Hash {| LC ''serverKey'', s(|NV ''nc'' tid0|), LN ''ns'' tid0,
Hash {| LC ''TT4'', LC ''PRF'', s(|NV ''pms'' tid0|),
s(|NV ''nc'' tid0|), LN ''ns'' tid0
|}
|} ")
case fake note facts = facts this[simplified]
thus ?thesis by (fastsimp dest: S_PRF_sec intro: event_predOrdI)
qed (insert facts, ((clarsimp, order?))+)?
qed
lemma (in atomic_TLS_state) nc_first_send:
assumes facts:
"roleMap r tid1 = Some C"
"LN ''nc'' tid1 : knows t"
shows "predOrd t (St(tid1, C_1)) (Ln(LN ''nc'' tid1))"
using facts proof(sources! " LN ''nc'' tid1 ")
case C_1_nc note facts = facts this[simplified]
thus ?thesis by force
next
case C_3_nc note facts = facts this[simplified]
thus ?thesis by force
qed (insert facts, ((clarsimp, order?))+)?
lemma (in atomic_TLS_state) ns_first_send:
assumes facts:
"roleMap r tid1 = Some S"
"LN ''ns'' tid1 : knows t"
shows "predOrd t (St(tid1, S_2)) (Ln(LN ''ns'' tid1))"
using facts proof(sources! " LN ''ns'' tid1 ")
case S_2_ns note facts = facts this[simplified]
thus ?thesis by force
next
case S_4_ns note facts = facts this[simplified]
thus ?thesis by force
qed (insert facts, ((clarsimp, order?))+)?
lemma (in atomic_TLS_state) C_ni_synch:
assumes facts:
"roleMap r tid1 = Some C"
"s(|AV ''C'' tid1|) ~: Compromised"
"s(|AV ''S'' tid1|) ~: Compromised"
"(tid1, C_4) : steps t"
shows
"? tid2.
roleMap r tid2 = Some S &
s(|AV ''C'' tid2|) = s(|AV ''C'' tid1|) &
s(|AV ''S'' tid2|) = s(|AV ''S'' tid1|) &
s(|NV ''nc'' tid2|) = LN ''nc'' tid1 &
s(|NV ''ns'' tid1|) = LN ''ns'' tid2 &
s(|NV ''pc'' tid2|) = LN ''pc'' tid1 &
s(|NV ''pms'' tid2|) = LN ''pms'' tid1 &
s(|NV ''ps'' tid1|) = LN ''ps'' tid2 &
s(|NV ''sid'' tid2|) = LN ''sid'' tid1 &
predOrd t (St(tid1, C_1)) (St(tid2, S_1)) &
predOrd t (St(tid1, C_3)) (St(tid2, S_3)) &
predOrd t (St(tid1, C_2)) (St(tid1, C_3)) &
predOrd t (St(tid2, S_2)) (St(tid1, C_2)) &
predOrd t (St(tid2, S_4)) (St(tid1, C_4)) &
predOrd t (St(tid2, S_1)) (St(tid2, S_2)) &
predOrd t (St(tid2, S_3)) (St(tid2, S_4))"
proof -
note_prefix_closed facts = facts note facts = this
thus ?thesis proof(sources! "
Enc {| LC ''TT3'', LN ''sid'' tid1,
Hash {| LC ''TT4'', LC ''PRF'', LN ''pms'' tid1, LN ''nc'' tid1,
s(|NV ''ns'' tid1|)
|},
LN ''nc'' tid1, LN ''pc'' tid1, s(|AV ''C'' tid1|),
s(|NV ''ns'' tid1|), s(|NV ''ps'' tid1|), s(|AV ''S'' tid1|)
|}
( Hash {| LC ''serverKey'', LN ''nc'' tid1, s(|NV ''ns'' tid1|),
Hash {| LC ''TT4'', LC ''PRF'', LN ''pms'' tid1, LN ''nc'' tid1,
s(|NV ''ns'' tid1|)
|}
|}
) ")
case fake note facts = facts this[simplified]
thus ?thesis by (fastsimp dest: C_PRF_sec intro: event_predOrdI)
next
case (S_4_enc tid2) note facts = facts this[simplified]
have f1: "roleMap r tid1 = Some C" using facts by (auto intro: event_predOrdI)
have f2: "LN ''nc'' tid1 : knows t" using facts by (auto intro: event_predOrdI)
note facts = facts nc_first_send[OF f1 f2, simplified]
have f1: "roleMap r tid2 = Some S" using facts by (auto intro: event_predOrdI)
have f2: "LN ''ns'' tid2 : knows t" using facts by (auto intro: event_predOrdI)
note facts = facts ns_first_send[OF f1 f2, simplified]
thus ?thesis proof(sources! "
Enc {| LC ''TT0'', LN ''pms'' tid1 |}
( PK ( s(|AV ''S'' tid1|) ) ) ")
case fake note facts = facts this[simplified]
thus ?thesis by (fastsimp dest: C_pms_sec intro: event_predOrdI)
next
case C_3_enc note facts = facts this[simplified]
thus ?thesis by force
qed (insert facts, ((clarsimp, order?))+)?
qed (insert facts, ((clarsimp, order?))+)?
qed
lemma (in atomic_TLS_state) S_ni_synch:
assumes facts:
"roleMap r tid2 = Some S"
"s(|AV ''C'' tid2|) ~: Compromised"
"s(|AV ''S'' tid2|) ~: Compromised"
"(tid2, S_4) : steps t"
shows
"? tid1.
roleMap r tid1 = Some C &
s(|AV ''C'' tid2|) = s(|AV ''C'' tid1|) &
s(|AV ''S'' tid2|) = s(|AV ''S'' tid1|) &
s(|NV ''nc'' tid2|) = LN ''nc'' tid1 &
s(|NV ''ns'' tid1|) = LN ''ns'' tid2 &
s(|NV ''pc'' tid2|) = LN ''pc'' tid1 &
s(|NV ''pms'' tid2|) = LN ''pms'' tid1 &
s(|NV ''ps'' tid1|) = LN ''ps'' tid2 &
s(|NV ''sid'' tid2|) = LN ''sid'' tid1 &
predOrd t (St(tid1, C_1)) (St(tid2, S_1)) &
predOrd t (St(tid1, C_3)) (St(tid2, S_3)) &
predOrd t (St(tid1, C_2)) (St(tid1, C_3)) &
predOrd t (St(tid2, S_2)) (St(tid1, C_2)) &
predOrd t (St(tid2, S_1)) (St(tid2, S_2)) &
predOrd t (St(tid2, S_3)) (St(tid2, S_4))"
proof -
note_prefix_closed facts = facts note facts = this
thus ?thesis proof(sources! "
Enc {| LC ''TT3'', s(|NV ''sid'' tid2|),
Hash {| LC ''TT4'', LC ''PRF'', s(|NV ''pms'' tid2|),
s(|NV ''nc'' tid2|), LN ''ns'' tid2
|},
s(|NV ''nc'' tid2|), s(|NV ''pc'' tid2|), s(|AV ''C'' tid2|),
LN ''ns'' tid2, LN ''ps'' tid2, s(|AV ''S'' tid2|)
|}
( Hash {| LC ''clientKey'', s(|NV ''nc'' tid2|), LN ''ns'' tid2,
Hash {| LC ''TT4'', LC ''PRF'', s(|NV ''pms'' tid2|),
s(|NV ''nc'' tid2|), LN ''ns'' tid2
|}
|}
) ")
case fake note facts = facts this[simplified]
thus ?thesis by (fastsimp dest: S_PRF_sec intro: event_predOrdI)
next
case (C_3_enc tid3) note facts = facts this[simplified]
have f1: "roleMap r tid3 = Some C" using facts by (auto intro: event_predOrdI)
have f2: "LN ''nc'' tid3 : knows t" using facts by (auto intro: event_predOrdI)
note facts = facts nc_first_send[OF f1 f2, simplified]
have f1: "roleMap r tid2 = Some S" using facts by (auto intro: event_predOrdI)
have f2: "LN ''ns'' tid2 : knows t" using facts by (auto intro: event_predOrdI)
note facts = facts ns_first_send[OF f1 f2, simplified]
thus ?thesis by force
qed (insert facts, ((clarsimp, order?))+)?
qed
lemma (in TLS_state) weak_atomicity:
"complete (t,r,s) atomicAnn"
proof (cases rule: complete_atomicAnnI[completeness_cases_rule])
case (C_2_ns t r s tid0 \<alpha>) note facts = this
then interpret state: atomic_state TLS t r s
by unfold_locales assumption+
let ?s' = "extendS s \<alpha>"
show ?case using facts
by (fastsimp simp: atomicAnn_def dest: state.extract_knows_hyps)
next
case (C_2_ps t r s tid0 \<alpha>) note facts = this
then interpret state: atomic_state TLS t r s
by unfold_locales assumption+
let ?s' = "extendS s \<alpha>"
show ?case using facts
by (fastsimp simp: atomicAnn_def dest: state.extract_knows_hyps)
next
case (S_1_nc t r s tid0 \<alpha>) note facts = this
then interpret state: atomic_state TLS t r s
by unfold_locales assumption+
let ?s' = "extendS s \<alpha>"
show ?case using facts
by (fastsimp simp: atomicAnn_def dest: state.extract_knows_hyps)
next
case (S_1_pc t r s tid0 \<alpha>) note facts = this
then interpret state: atomic_state TLS t r s
by unfold_locales assumption+
let ?s' = "extendS s \<alpha>"
show ?case using facts
by (fastsimp simp: atomicAnn_def dest: state.extract_knows_hyps)
next
case (S_1_sid t r s tid0 \<alpha>) note facts = this
then interpret state: atomic_state TLS t r s
by unfold_locales assumption+
let ?s' = "extendS s \<alpha>"
show ?case using facts
by (fastsimp simp: atomicAnn_def dest: state.extract_knows_hyps)
next
case (S_3_pms t r s tid0 \<alpha>) note facts = this
then interpret state: atomic_state TLS t r s
by unfold_locales assumption+
let ?s' = "extendS s \<alpha>"
show ?case using facts
proof(sources! "
Enc {| LC ''TT0'', ?s'(|NV ''pms'' tid0|) |}
( PK ( ?s'(|AV ''S'' tid0|) ) ) ")
qed (insert facts, ((clarsimp, order?) | (fastsimp simp: atomicAnn_def dest: state.extract_knows_hyps))+)?
qed
end
|
State Before: V : Type u
inst✝ : Quiver V
a✝¹ b✝¹ c✝ d a b c x✝ : V
p : Path a b
q : Path b c
b✝ : V
r : Path c b✝
a✝ : b✝ ⟶ x✝
⊢ comp (comp p q) (cons r a✝) = comp p (comp q (cons r a✝)) State After: no goals Tactic: rw [comp_cons, comp_cons, comp_cons, comp_assoc p q r]
|
<a href="https://www.bigdatauniversity.com"></a>
<h1><center>Non Linear Regression Analysis</center></h1>
If the data shows a curvy trend, then linear regression will not produce very accurate results when compared to a non-linear regression because, as the name implies, linear regression presumes that the data is linear.
Let's learn about non linear regressions and apply an example on python. In this notebook, we fit a non-linear model to the datapoints corrensponding to China's GDP from 1960 to 2014.
<h2 id="importing_libraries">Importing required libraries</h2>
```python
import numpy as np
import matplotlib.pyplot as plt
%matplotlib inline
```
Though Linear regression is very good to solve many problems, it cannot be used for all datasets. First recall how linear regression, could model a dataset. It models a linear relation between a dependent variable y and independent variable x. It had a simple equation, of degree 1, for example y = $2x$ + 3.
```python
x = np.arange(-5.0, 5.0, 0.1)
##You can adjust the slope and intercept to verify the changes in the graph
y = 2*(x) + 3
y_noise = 2 * np.random.normal(size=x.size)
ydata = y + y_noise
#plt.figure(figsize=(8,6))
plt.plot(x, ydata, 'bo')
plt.plot(x,y, 'r')
plt.ylabel('Dependent Variable')
plt.xlabel('Indepdendent Variable')
plt.show()
```
Non-linear regressions are a relationship between independent variables $x$ and a dependent variable $y$ which result in a non-linear function modeled data. Essentially any relationship that is not linear can be termed as non-linear, and is usually represented by the polynomial of $k$ degrees (maximum power of $x$).
$$ \ y = a x^3 + b x^2 + c x + d \ $$
Non-linear functions can have elements like exponentials, logarithms, fractions, and others. For example: $$ y = \log(x)$$
Or even, more complicated such as :
$$ y = \log(a x^3 + b x^2 + c x + d)$$
Let's take a look at a cubic function's graph.
```python
x = np.arange(-5.0, 5.0, 0.1)
##You can adjust the slope and intercept to verify the changes in the graph
y = 1*(x**3) + 1*(x**2) + 1*x + 3
y_noise = 20 * np.random.normal(size=x.size)
ydata = y + y_noise
plt.plot(x, ydata, 'bo')
plt.plot(x,y, 'r')
plt.ylabel('Dependent Variable')
plt.xlabel('Indepdendent Variable')
plt.show()
```
As you can see, this function has $x^3$ and $x^2$ as independent variables. Also, the graphic of this function is not a straight line over the 2D plane. So this is a non-linear function.
Some other types of non-linear functions are:
### Quadratic
$$ Y = X^2 $$
```python
x = np.arange(-5.0, 5.0, 0.1)
##You can adjust the slope and intercept to verify the changes in the graph
y = np.power(x,2)
y_noise = 2 * np.random.normal(size=x.size)
ydata = y + y_noise
plt.plot(x, ydata, 'bo')
plt.plot(x,y, 'r')
plt.ylabel('Dependent Variable')
plt.xlabel('Indepdendent Variable')
plt.show()
```
### Exponential
An exponential function with base c is defined by $$ Y = a + b c^X$$ where b ≠0, c > 0 , c ≠1, and x is any real number. The base, c, is constant and the exponent, x, is a variable.
```python
X = np.arange(-5.0, 5.0, 0.1)
##You can adjust the slope and intercept to verify the changes in the graph
Y= np.exp(X)
plt.plot(X,Y)
plt.ylabel('Dependent Variable')
plt.xlabel('Indepdendent Variable')
plt.show()
```
### Logarithmic
The response $y$ is a results of applying logarithmic map from input $x$'s to output variable $y$. It is one of the simplest form of __log()__: i.e. $$ y = \log(x)$$
Please consider that instead of $x$, we can use $X$, which can be polynomial representation of the $x$'s. In general form it would be written as
\begin{equation}
y = \log(X)
\end{equation}
```python
X = np.arange(-5.0, 5.0, 0.1)
Y = np.log(X)
plt.plot(X,Y)
plt.ylabel('Dependent Variable')
plt.xlabel('Indepdendent Variable')
plt.show()
```
### Sigmoidal/Logistic
$$ Y = a + \frac{b}{1+ c^{(X-d)}}$$
```python
X = np.arange(-5.0, 5.0, 0.1)
Y = 1-4/(1+np.power(3, X-2))
plt.plot(X,Y)
plt.ylabel('Dependent Variable')
plt.xlabel('Indepdendent Variable')
plt.show()
```
<a id="ref2"></a>
# Non-Linear Regression example
For an example, we're going to try and fit a non-linear model to the datapoints corresponding to China's GDP from 1960 to 2014. We download a dataset with two columns, the first, a year between 1960 and 2014, the second, China's corresponding annual gross domestic income in US dollars for that year.
```python
import numpy as np
import pandas as pd
#downloading dataset
!wget -nv -O china_gdp.csv https://s3-api.us-geo.objectstorage.softlayer.net/cf-courses-data/CognitiveClass/ML0101ENv3/labs/china_gdp.csv
df = pd.read_csv("china_gdp.csv")
df.head(10)
```
__Did you know?__ When it comes to Machine Learning, you will likely be working with large datasets. As a business, where can you host your data? IBM is offering a unique opportunity for businesses, with 10 Tb of IBM Cloud Object Storage: [Sign up now for free](http://cocl.us/ML0101EN-IBM-Offer-CC)
### Plotting the Dataset ###
This is what the datapoints look like. It kind of looks like an either logistic or exponential function. The growth starts off slow, then from 2005 on forward, the growth is very significant. And finally, it decelerate slightly in the 2010s.
```python
plt.figure(figsize=(8,5))
x_data, y_data = (df["Year"].values, df["Value"].values)
plt.plot(x_data, y_data, 'ro')
plt.ylabel('GDP')
plt.xlabel('Year')
plt.show()
```
### Choosing a model ###
From an initial look at the plot, we determine that the logistic function could be a good approximation,
since it has the property of starting with a slow growth, increasing growth in the middle, and then decreasing again at the end; as illustrated below:
```python
X = np.arange(-5.0, 5.0, 0.1)
Y = 1.0 / (1.0 + np.exp(-X))
plt.plot(X,Y)
plt.ylabel('Dependent Variable')
plt.xlabel('Indepdendent Variable')
plt.show()
```
The formula for the logistic function is the following:
$$ \hat{Y} = \frac1{1+e^{\beta_1(X-\beta_2)}}$$
$\beta_1$: Controls the curve's steepness,
$\beta_2$: Slides the curve on the x-axis.
### Building The Model ###
Now, let's build our regression model and initialize its parameters.
```python
def sigmoid(x, Beta_1, Beta_2):
y = 1 / (1 + np.exp(-Beta_1*(x-Beta_2)))
return y
```
Lets look at a sample sigmoid line that might fit with the data:
```python
beta_1 = 0.10
beta_2 = 1990.0
#logistic function
Y_pred = sigmoid(x_data, beta_1 , beta_2)
#plot initial prediction against datapoints
plt.plot(x_data, Y_pred*15000000000000.)
plt.plot(x_data, y_data, 'ro')
```
Our task here is to find the best parameters for our model. Lets first normalize our x and y:
```python
# Lets normalize our data
xdata =x_data/max(x_data)
ydata =y_data/max(y_data)
```
#### How we find the best parameters for our fit line?
we can use __curve_fit__ which uses non-linear least squares to fit our sigmoid function, to data. Optimal values for the parameters so that the sum of the squared residuals of sigmoid(xdata, *popt) - ydata is minimized.
popt are our optimized parameters.
```python
from scipy.optimize import curve_fit
popt, pcov = curve_fit(sigmoid, xdata, ydata)
#print the final parameters
print(" beta_1 = %f, beta_2 = %f" % (popt[0], popt[1]))
```
Now we plot our resulting regression model.
```python
x = np.linspace(1960, 2015, 55)
x = x/max(x)
plt.figure(figsize=(8,5))
y = sigmoid(x, *popt)
plt.plot(xdata, ydata, 'ro', label='data')
plt.plot(x,y, linewidth=3.0, label='fit')
plt.legend(loc='best')
plt.ylabel('GDP')
plt.xlabel('Year')
plt.show()
```
## Practice
Can you calculate what is the accuracy of our model?
```python
# write your code here
```
Double-click __here__ for the solution.
<!-- Your answer is below:
# split data into train/test
msk = np.random.rand(len(df)) < 0.8
train_x = xdata[msk]
test_x = xdata[~msk]
train_y = ydata[msk]
test_y = ydata[~msk]
# build the model using train set
popt, pcov = curve_fit(sigmoid, train_x, train_y)
# predict using test set
y_hat = sigmoid(test_x, *popt)
# evaluation
print("Mean absolute error: %.2f" % np.mean(np.absolute(y_hat - test_y)))
print("Residual sum of squares (MSE): %.2f" % np.mean((y_hat - test_y) ** 2))
from sklearn.metrics import r2_score
print("R2-score: %.2f" % r2_score(y_hat , test_y) )
-->
<h2>Want to learn more?</h2>
IBM SPSS Modeler is a comprehensive analytics platform that has many machine learning algorithms. It has been designed to bring predictive intelligence to decisions made by individuals, by groups, by systems – by your enterprise as a whole. A free trial is available through this course, available here: <a href="http://cocl.us/ML0101EN-SPSSModeler">SPSS Modeler</a>
Also, you can use Watson Studio to run these notebooks faster with bigger datasets. Watson Studio is IBM's leading cloud solution for data scientists, built by data scientists. With Jupyter notebooks, RStudio, Apache Spark and popular libraries pre-packaged in the cloud, Watson Studio enables data scientists to collaborate on their projects without having to install anything. Join the fast-growing community of Watson Studio users today with a free account at <a href="https://cocl.us/ML0101EN_DSX">Watson Studio</a>
<h3>Thanks for completing this lesson!</h3>
<h4>Author: <a href="https://ca.linkedin.com/in/saeedaghabozorgi">Saeed Aghabozorgi</a></h4>
<p><a href="https://ca.linkedin.com/in/saeedaghabozorgi">Saeed Aghabozorgi</a>, PhD is a Data Scientist in IBM with a track record of developing enterprise level applications that substantially increases clients’ ability to turn data into actionable knowledge. He is a researcher in data mining field and expert in developing advanced analytic methods like machine learning and statistical modelling on large datasets.</p>
<hr>
<p>Copyright © 2018 <a href="https://cocl.us/DX0108EN_CC">Cognitive Class</a>. This notebook and its source code are released under the terms of the <a href="https://bigdatauniversity.com/mit-license/">MIT License</a>.</p>
|
//
// Created by Simon Schreiberhuber on 01.04.15.
// Copyright (c) 2015 Simon Schreiberhuber. All rights reserved.
//
#include <stdio.h>
#include <boost/filesystem.hpp>
#include <iostream>
#include <limits>
#include <opencv2/opencv.hpp>
#include <sstream>
#include <string>
#include <assimp/postprocess.h>
#include <assimp/scene.h>
#include <assimp/Importer.hpp>
#include <v4r/io/filesystem.h>
#include <v4r/rendering/depthmapRenderer.h>
#include <pcl/io/pcd_io.h>
#include <pcl/point_types.h>
#include <glog/logging.h>
#include <boost/algorithm/string.hpp>
#include <boost/format.hpp>
#include <boost/program_options.hpp>
namespace po = boost::program_options;
int main(int argc, const char *argv[]) {
bf::path input;
bf::path out_dir = bf::path("tmp") / bf::path("rendered_pointclouds");
bool visualize = false;
bool upperHemisphere = false;
bool autoscale = false;
bool createNormals = false;
size_t subdivisions = 0, width = 640, height = 480;
float radius = 3.f, fx = 535.4f, fy = 539.2f, cx = 320.1f, cy = 247.6f;
google::InitGoogleLogging(argv[0]);
po::options_description desc(
"Depth-map and point cloud Rendering from mesh file\n======================================\n**Allowed options");
desc.add_options()("help,h", "produce help message");
desc.add_options()("input,i", po::value<bf::path>(&input)->required(), "input mesh file");
desc.add_options()("output,o", po::value<bf::path>(&out_dir)->default_value("/tmp/rendered_pointclouds/"),
"output directory to store the point cloud (.pcd) file");
desc.add_options()("subdivisions,s", po::value<size_t>(&subdivisions)->default_value(subdivisions),
"defines the number of subdivsions used for rendering");
desc.add_options()("autoscale", po::bool_switch(&autoscale),
"scales the model into the unit sphere and translates it to the origin");
desc.add_options()("northHemisphere,n", po::bool_switch(&upperHemisphere),
"only renders the objects from views of the upper hemisphere");
desc.add_options()("radius,r",
po::value<float>(&radius)->default_value(radius, boost::str(boost::format("%.2e") % radius)),
"defines the radius used for rendering");
desc.add_options()("width", po::value<size_t>(&width)->default_value(width), "defines the image width");
desc.add_options()("normals", po::bool_switch(&createNormals), "Creates a pointcloud that also contains the normals");
desc.add_options()("width", po::value<size_t>(&width)->default_value(width), "defines the image width")(
"height", po::value<size_t>(&height)->default_value(height), "defines the image height");
desc.add_options()("fx", po::value<float>(&fx)->default_value(fx, boost::str(boost::format("%.2e") % fx)),
"defines the focal length in x direction used for rendering");
desc.add_options()("fy", po::value<float>(&fy)->default_value(fy, boost::str(boost::format("%.2e") % fy)),
"defines the focal length in y direction used for rendering");
desc.add_options()("cx", po::value<float>(&cx)->default_value(cx, boost::str(boost::format("%.2e") % cx)),
"defines the central point of projection in x direction used for rendering");
desc.add_options()("cy", po::value<float>(&cy)->default_value(cy, boost::str(boost::format("%.2e") % cy)),
"defines the central point of projection in y direction used for rendering");
desc.add_options()("visualize,v", po::bool_switch(&visualize), "visualize the rendered depth and color map");
po::positional_options_description p;
p.add("input", 1);
po::variables_map vm;
po::store(po::command_line_parser(argc, argv).options(desc).positional(p).run(), vm);
if (vm.count("help")) {
std::cout << desc << std::endl;
return false;
}
try {
po::notify(vm);
} catch (std::exception &e) {
std::cerr << "Error: " << e.what() << std::endl << std::endl << desc << std::endl;
return false;
}
CHECK((cx < width) && (cy < height) && (cx > 0) && (cy > 0)) << "Parameters not valid!";
v4r::DepthmapRenderer renderer(width, height);
renderer.setIntrinsics(fx, fy, cx, cy);
v4r::DepthmapRendererModel model(input.string(), "", autoscale);
if (model.hasColor() || model.hasTexture())
LOG(INFO) << "Model file has color.";
else
LOG(INFO) << "Model file has no color.";
renderer.setModel(&model);
std::vector<Eigen::Vector3f> sphere = renderer.createSphere(radius, subdivisions);
if (upperHemisphere) {
std::vector<Eigen::Vector3f> upper;
for (size_t i = 0; i < sphere.size(); i++) {
if (sphere[i][2] > 0) {
upper.push_back(sphere[i]);
}
}
sphere = upper;
}
LOG(INFO) << "Rendering file " << input;
if (!sphere.empty())
v4r::io::createDirIfNotExist(out_dir);
for (size_t i = 0; i < sphere.size(); i++) {
// get point from list
const Eigen::Vector3f &point = sphere[i];
// get a camera pose looking at the center:
const Eigen::Matrix4f &orientation = renderer.getPoseLookingToCenterFrom(point);
renderer.setCamPose(orientation);
float visible;
cv::Mat color;
cv::Mat normal;
cv::Mat depthmap = renderer.renderDepthmap(visible, color, normal);
// create and save the according pcd files
std::stringstream ss;
ss << "cloud_" << i << ".pcd";
bf::path output_fn = out_dir / ss.str();
if (model.hasColor() || model.hasTexture()) {
if (createNormals) {
const pcl::PointCloud<pcl::PointXYZRGBNormal> cloud = renderer.renderPointcloudColorNormal(visible);
pcl::io::savePCDFileBinaryCompressed(output_fn.string(), cloud);
} else {
const pcl::PointCloud<pcl::PointXYZRGB> cloud = renderer.renderPointcloudColor(visible);
pcl::io::savePCDFileBinaryCompressed(output_fn.string(), cloud);
}
if (visualize)
cv::imshow("color", color);
} else {
const pcl::PointCloud<pcl::PointXYZ> cloud = renderer.renderPointcloud(visible);
pcl::io::savePCDFileBinaryCompressed(output_fn.string(), cloud);
}
LOG(INFO) << "Saved data points to " << output_fn.string() << ".";
if (visualize) {
LOG(INFO) << visible << "% visible.";
cv::imshow("depthmap", depthmap * 0.25);
cv::imshow("normal", normal * 0.5 + 0.5);
cv::waitKey();
}
}
return 0;
}
|
State Before: x : PGame
⊢ -x < 0 ↔ 0 < x State After: no goals Tactic: rw [neg_lt_iff, neg_zero]
|
From compcert Require Export Clightdefs.
Require Export VST.veric.base.
Require Export VST.veric.SeparationLogic.
Require Export VST.msl.Extensionality.
Require Export compcert.lib.Coqlib.
Require Export VST.msl.Coqlib2 VST.veric.coqlib4 VST.floyd.coqlib3.
Require Export VST.floyd.functional_base.
Lemma is_int_dec i s v: {is_int i s v} + {~ is_int i s v}.
Proof. destruct v; simpl; try solve [right; intros N; trivial].
destruct i.
+ destruct s.
* destruct (zle Byte.min_signed (Int.signed i0)); [| right; omega].
destruct (zle (Int.signed i0) Byte.max_signed). left; omega. right; omega.
* destruct (zle (Int.unsigned i0) Byte.max_unsigned). left; omega. right; omega.
+ destruct s.
* destruct (zle (-32768) (Int.signed i0)); [| right; omega].
destruct (zle (Int.signed i0) 32767). left; omega. right; omega.
* destruct (zle (Int.unsigned i0) 65535). left; omega. right; omega.
+ left; trivial.
+ destruct (Int.eq_dec i0 Int.zero); subst. left; left; trivial.
destruct (Int.eq_dec i0 Int.one); subst. left; right; trivial.
right. intros N; destruct N; contradiction.
Defined.
Lemma tc_val_dec t v: {tc_val t v} + {~ tc_val t v}.
Proof. destruct t; simpl.
+ right; intros N; trivial.
+ apply is_int_dec.
+ apply is_long_dec.
+ destruct f. apply is_single_dec. apply is_float_dec.
+ destruct ((eqb_type t Tvoid &&
eqb_attr a
{| attr_volatile := false; attr_alignas := Some log2_sizeof_pointer |})%bool).
apply is_pointer_or_integer_dec.
apply is_pointer_or_null_dec.
+ apply is_pointer_or_null_dec.
+ apply is_pointer_or_null_dec.
+ apply isptr_dec.
+ apply isptr_dec.
Defined.
Lemma sem_add_pi_ptr:
forall {cs: compspecs} t p i si,
isptr p ->
match si with
| Signed => Int.min_signed <= i <= Int.max_signed
| Unsigned => 0 <= i <= Int.max_unsigned
end ->
Cop.sem_add_ptr_int cenv_cs t si p (Vint (Int.repr i)) = Some (offset_val (sizeof t * i) p).
Proof.
intros. destruct p; try contradiction.
unfold offset_val, Cop.sem_add_ptr_int.
unfold Cop.ptrofs_of_int, Ptrofs.of_ints, Ptrofs.of_intu, Ptrofs.of_int.
f_equal. f_equal. f_equal.
destruct si; rewrite <- ptrofs_mul_repr; f_equal.
rewrite Int.signed_repr by omega; auto.
rewrite Int.unsigned_repr by omega; auto.
Qed.
Hint Rewrite @sem_add_pi_ptr using (solve [auto with norm]) : norm.
Lemma sem_cast_i2i_correct_range: forall sz s v,
is_int sz s v -> sem_cast_i2i sz s v = Some v.
Proof.
intros.
destruct sz, s, v; try solve [inversion H]; simpl;
f_equal; f_equal; try apply sign_ext_inrange; try apply zero_ext_inrange; eauto.
+ simpl in H; destruct H; subst; reflexivity.
+ simpl in H; destruct H; subst; reflexivity.
Qed.
Hint Rewrite sem_cast_i2i_correct_range using (solve [auto with norm]) : norm.
Lemma sem_cast_neutral_ptr:
forall p, isptr p -> sem_cast_pointer p = Some p.
Proof. intros. destruct p; try contradiction; reflexivity. Qed.
Hint Rewrite sem_cast_neutral_ptr using (solve [auto with norm]): norm.
Lemma sem_cast_neutral_Vint: forall v,
sem_cast_pointer (Vint v) = Some (Vint v).
Proof.
intros. reflexivity.
Qed.
Hint Rewrite sem_cast_neutral_Vint : norm.
Definition isVint v := match v with Vint _ => True | _ => False end.
Lemma is_int_is_Vint: forall i s v, is_int i s v -> isVint v.
Proof. intros.
destruct i,s,v; simpl; intros; auto.
Qed.
Lemma is_int_I32_Vint: forall s v, is_int I32 s (Vint v).
Proof.
intros.
hnf. auto.
Qed.
Hint Resolve is_int_I32_Vint.
Lemma sem_cast_neutral_int: forall v,
isVint v ->
sem_cast_pointer v = Some v.
Proof.
destruct v; simpl; intros; try contradiction; auto.
Qed.
Hint Rewrite sem_cast_neutral_int using
(auto;
match goal with H: is_int ?i ?s ?v |- isVint ?v => apply (is_int_is_Vint i s v H) end) : norm.
Lemma sizeof_tuchar: forall {cs: compspecs}, sizeof tuchar = 1%Z.
Proof. reflexivity. Qed.
Hint Rewrite @sizeof_tuchar: norm.
Hint Rewrite Z.mul_1_l Z.mul_1_r Z.add_0_l Z.add_0_r Z.sub_0_r : norm.
Hint Rewrite eval_id_same : norm.
Hint Rewrite eval_id_other using solve [clear; intro Hx; inversion Hx] : norm.
Hint Rewrite Int.sub_idem Int.sub_zero_l Int.add_neg_zero : norm.
Hint Rewrite Ptrofs.sub_idem Ptrofs.sub_zero_l Ptrofs.add_neg_zero : norm.
Lemma eval_expr_Etempvar:
forall {cs: compspecs} i t, eval_expr (Etempvar i t) = eval_id i.
Proof. reflexivity.
Qed.
Hint Rewrite @eval_expr_Etempvar : eval.
Lemma eval_expr_binop: forall {cs: compspecs} op a1 a2 t, eval_expr (Ebinop op a1 a2 t) =
`(eval_binop op (typeof a1) (typeof a2)) (eval_expr a1) (eval_expr a2).
Proof. reflexivity. Qed.
Hint Rewrite @eval_expr_binop : eval.
Lemma eval_expr_unop: forall {cs: compspecs} op a1 t, eval_expr (Eunop op a1 t) =
lift1 (eval_unop op (typeof a1)) (eval_expr a1).
Proof. reflexivity. Qed.
Hint Rewrite @eval_expr_unop : eval.
Hint Resolve eval_expr_Etempvar.
Lemma eval_expr_Etempvar' : forall {cs: compspecs} i t, eval_id i = eval_expr (Etempvar i t).
Proof. intros. symmetry; auto.
Qed.
Hint Resolve @eval_expr_Etempvar'.
Hint Rewrite Int.add_zero Int.add_zero_l Int.sub_zero_l : norm.
Hint Rewrite Ptrofs.add_zero Ptrofs.add_zero_l Ptrofs.sub_zero_l : norm.
Lemma eval_var_env_set:
forall i t j v (rho: environ), eval_var i t (env_set rho j v) = eval_var i t rho.
Proof. reflexivity. Qed.
Hint Rewrite eval_var_env_set : norm.
Lemma eval_expropt_Some: forall {cs: compspecs} e, eval_expropt (Some e) = `Some (eval_expr e).
Proof. reflexivity. Qed.
Lemma eval_expropt_None: forall {cs: compspecs} , eval_expropt None = `None.
Proof. reflexivity. Qed.
Hint Rewrite @eval_expropt_Some @eval_expropt_None : eval.
Lemma deref_noload_tarray:
forall ty n, deref_noload (tarray ty n) = (fun v => v).
Proof.
intros. extensionality v. reflexivity.
Qed.
Hint Rewrite deref_noload_tarray : norm.
Lemma deref_noload_Tarray:
forall ty n a, deref_noload (Tarray ty n a) = (fun v => v).
Proof.
intros. extensionality v. reflexivity.
Qed.
Hint Rewrite deref_noload_Tarray : norm.
Lemma flip_lifted_eq:
forall (v1: environ -> val) (v2: val),
`eq v1 `(v2) = `(eq v2) v1.
Proof.
intros. unfold_lift. extensionality rho. apply prop_ext; split; intro; auto.
Qed.
Hint Rewrite flip_lifted_eq : norm.
Lemma isptr_is_pointer_or_null:
forall v, isptr v -> is_pointer_or_null v.
Proof. intros. destruct v; inv H; simpl; auto.
Qed.
Hint Resolve isptr_is_pointer_or_null.
Definition add_ptr_int {cs: compspecs} (ty: type) (v: val) (i: Z) : val :=
eval_binop Cop.Oadd (tptr ty) tint v (Vint (Int.repr i)).
Lemma add_ptr_int_offset:
forall {cs: compspecs} t v n,
repable_signed (sizeof t) ->
repable_signed n ->
add_ptr_int t v n = offset_val (sizeof t * n) v.
Abort. (* broken in CompCert 2.7 *)
Lemma typed_false_cmp:
forall op i j ,
typed_false tint (force_val (sem_cmp op tint tint (Vint i) (Vint j))) ->
Int.cmp (negate_comparison op) i j = true.
Proof.
intros.
unfold sem_cmp in H.
unfold Cop.classify_cmp in H. simpl in H.
rewrite Int.negate_cmp.
unfold both_int, force_val, typed_false, strict_bool_val, sem_cast, classify_cast, tint in H.
destruct Archi.ptr64 eqn:Hp; simpl in H.
destruct (Int.cmp op i j); inv H; auto.
destruct (Int.cmp op i j); inv H; auto.
Qed.
Lemma typed_true_cmp:
forall op i j,
typed_true tint (force_val (sem_cmp op tint tint (Vint i) (Vint j))) ->
Int.cmp op i j = true.
Proof.
intros.
unfold sem_cmp in H.
unfold Cop.classify_cmp in H. simpl in H.
unfold both_int, force_val, typed_false, strict_bool_val, sem_cast, classify_cast, tint in H.
destruct Archi.ptr64 eqn:Hp; simpl in H.
destruct (Int.cmp op i j); inv H; auto.
destruct (Int.cmp op i j); inv H; auto.
Qed.
Definition Zcmp (op: comparison) : Z -> Z -> Prop :=
match op with
| Ceq => eq
| Cne => (fun i j => i<>j)
| Clt => Z.lt
| Cle => Z.le
| Cgt => Z.gt
| Cge => Z.ge
end.
Lemma int_cmp_repr:
forall op i j, repable_signed i -> repable_signed j ->
Int.cmp op (Int.repr i) (Int.repr j) = true ->
Zcmp op i j.
Proof.
intros.
unfold Int.cmp, Int.eq, Int.lt in H1.
replace (if zeq (Int.unsigned (Int.repr i)) (Int.unsigned (Int.repr j))
then true else false)
with (if zeq i j then true else false) in H1.
2:{
destruct (zeq i j); destruct (zeq (Int.unsigned (Int.repr i)) (Int.unsigned (Int.repr j)));
auto.
subst. contradiction n; auto.
clear - H H0 e n.
apply Int.signed_repr in H. rewrite Int.signed_repr_eq in H.
apply Int.signed_repr in H0; rewrite Int.signed_repr_eq in H0.
contradiction n; clear n.
repeat rewrite Int.unsigned_repr_eq in e.
match type of H with
| context [if ?a then _ else _] => destruct a
end;
match type of H0 with
| context [if ?a then _ else _] => destruct a
end; omega.
}
unfold Zcmp.
rewrite (Int.signed_repr _ H) in H1; rewrite (Int.signed_repr _ H0) in H1.
repeat match type of H1 with
| context [if ?a then _ else _] => destruct a
end; try omegaContradiction;
destruct op; auto; simpl in *; try discriminate; omega.
Qed.
Lemma typed_false_cmp_repr:
forall op i j,
repable_signed i -> repable_signed j ->
typed_false tint (force_val (sem_cmp op tint tint
(Vint (Int.repr i))
(Vint (Int.repr j)) )) ->
Zcmp (negate_comparison op) i j.
Proof.
intros.
apply typed_false_cmp in H1.
apply int_cmp_repr; auto.
Qed.
Lemma typed_true_cmp_repr:
forall op i j,
repable_signed i -> repable_signed j ->
typed_true tint (force_val (sem_cmp op tint tint
(Vint (Int.repr i))
(Vint (Int.repr j)) )) ->
Zcmp op i j.
Proof.
intros.
apply typed_true_cmp in H1.
apply int_cmp_repr; auto.
Qed.
Ltac intcompare H :=
(apply typed_false_cmp_repr in H || apply typed_true_cmp_repr in H);
[ simpl in H | auto; unfold repable_signed, Int.min_signed, Int.max_signed in *; omega .. ].
Lemma isptr_deref_noload:
forall t p, access_mode t = By_reference -> isptr (deref_noload t p) = isptr p.
Proof.
intros.
unfold deref_noload. rewrite H. reflexivity.
Qed.
Hint Rewrite isptr_deref_noload using reflexivity : norm.
Definition headptr (v: val): Prop :=
exists b, v = Vptr b Ptrofs.zero.
Lemma headptr_isptr: forall v,
headptr v -> isptr v.
Proof.
intros.
destruct H as [b ?].
subst.
hnf; auto.
Qed.
Hint Resolve headptr_isptr.
Lemma headptr_offset_zero: forall v,
headptr (offset_val 0 v) <->
headptr v.
Proof.
split; intros.
+ destruct H as [b ?]; subst.
destruct v; try solve [inv H].
simpl in H.
remember (Ptrofs.add i (Ptrofs.repr 0)).
inversion H; subst.
rewrite Ptrofs.add_zero in H2; subst.
hnf; eauto.
+ destruct H as [b ?]; subst.
exists b.
reflexivity.
Qed.
(* Equality proofs for all constants from the Compcert Int, Int64, Ptrofs modules: *)
Lemma typed_false_ptr:
forall {t a v}, typed_false (Tpointer t a) v -> v=nullval.
Proof.
unfold typed_false, strict_bool_val, nullval; simpl; intros.
destruct Archi.ptr64 eqn:Hp;
destruct v; try discriminate; f_equal.
first [pose proof (Int64.eq_spec i Int64.zero);
destruct (Int64.eq i Int64.zero)
| pose proof (Int.eq_spec i Int.zero);
destruct (Int.eq i Int.zero)];
subst; auto; discriminate.
Qed.
Lemma typed_true_ptr:
forall {t a v}, typed_true (Tpointer t a) v -> isptr v.
Proof.
unfold typed_true, strict_bool_val; simpl; intros.
destruct v; try discriminate; simpl; auto;
destruct Archi.ptr64; try discriminate;
revert H; simple_if_tac; intros; discriminate.
Qed.
Lemma int_cmp_repr':
forall op i j, repable_signed i -> repable_signed j ->
Int.cmp op (Int.repr i) (Int.repr j) = false ->
Zcmp (negate_comparison op) i j.
Proof.
intros.
apply int_cmp_repr; auto.
rewrite Int.negate_cmp.
rewrite H1; reflexivity.
Qed.
Lemma typed_false_of_bool:
forall x, typed_false tint (Val.of_bool x) -> (x=false).
Proof.
unfold typed_false; simpl.
unfold strict_bool_val, Val.of_bool; simpl.
destruct x; simpl; intros; [inversion H | auto].
Qed.
Lemma typed_true_of_bool:
forall x, typed_true tint (Val.of_bool x) -> (x=true).
Proof.
unfold typed_true; simpl.
unfold strict_bool_val, Val.of_bool; simpl.
destruct x; simpl; intros; [auto | inversion H].
Qed.
Lemma typed_false_tint:
Archi.ptr64=false ->
forall v, typed_false tint v -> v=nullval.
Proof.
intros.
hnf in H0. destruct v; inv H0.
destruct (Int.eq i Int.zero) eqn:?; inv H2.
apply int_eq_e in Heqb. subst.
inv H; reflexivity.
Qed.
Lemma typed_false_tlong:
Archi.ptr64=true ->
forall v, typed_false tlong v -> v=nullval.
Proof.
intros. unfold nullval. rewrite H.
hnf in H0. destruct v; inv H0.
pose proof (Int64.eq_spec i Int64.zero).
destruct (Int64.eq i Int64.zero); inv H2.
reflexivity.
Qed.
Lemma typed_true_e:
forall t v, typed_true t v -> v<>nullval.
Proof.
intros.
intro Hx. subst.
hnf in H. unfold nullval, strict_bool_val in H.
destruct Archi.ptr64, t; discriminate.
Qed.
Lemma typed_false_tint_Vint:
forall v, typed_false tint (Vint v) -> v = Int.zero.
Proof.
intros.
unfold typed_false, strict_bool_val in H. simpl in H.
pose proof (Int.eq_spec v Int.zero).
destruct (Int.eq v Int.zero); auto. inv H.
Qed.
Lemma typed_true_tint_Vint:
forall v, typed_true tint (Vint v) -> v <> Int.zero.
Proof.
intros.
unfold typed_true, strict_bool_val in H. simpl in H.
pose proof (Int.eq_spec v Int.zero).
destruct (Int.eq v Int.zero); auto. inv H.
Qed.
Lemma typed_true_tlong_Vlong:
forall v, typed_true tlong (Vlong v) -> v <> Int64.zero.
Proof.
intros.
unfold typed_true, strict_bool_val in H. simpl in H.
pose proof (Int64.eq_spec v Int64.zero).
destruct (Int64.eq v Int64.zero); auto. inv H.
Qed.
Ltac intro_redundant_prop :=
(* do it in this complicated way because the proof will come out smaller *)
match goal with |- ?P -> _ =>
((assert P by immediate; fail 1) || fail 1) || intros _
end.
Ltac fancy_intro aggressive :=
match goal with
| |- ?P -> _ => match type of P with Prop => idtac end
| |- ~ _ => idtac
end;
let H := fresh in
intro H;
try simple apply ptr_eq_e in H;
try simple apply Vint_inj in H;
try match type of H with
| tc_val _ _ => unfold tc_val in H; try change (eqb_type _ _) with false in H; cbv iota in H
end;
match type of H with
| ?P => clear H;
match goal with H': P |- _ => idtac end (* work around bug number 6998 in Coq *)
+ (((assert (H:P) by (clear; immediate); fail 1) || fail 1) || idtac)
(* do it in this complicated way because the proof will come out smaller *)
| ?x = ?y => constr_eq aggressive true;
first [subst x | subst y
| is_var x; rewrite H
| is_var y; rewrite <- H
| idtac]
| headptr (_ ?x) => let Hx1 := fresh "HP" x in
let Hx2 := fresh "P" x in
rename H into Hx1;
pose proof headptr_isptr _ Hx1 as Hx2
| headptr ?x => let Hx1 := fresh "HP" x in
let Hx2 := fresh "P" x in
rename H into Hx1;
pose proof headptr_isptr _ Hx1 as Hx2
| isptr ?x => let Hx := fresh "P" x in rename H into Hx
| is_pointer_or_null ?x => let Hx := fresh "PN" x in rename H into Hx
| typed_false _ _ =>
first [simple apply typed_false_of_bool in H
| apply typed_false_tint_Vint in H
| apply (typed_false_tint (eq_refl _)) in H
| apply (typed_false_tlong (eq_refl _)) in H
| apply typed_false_ptr in H
| idtac ]
| typed_true _ _ =>
first [simple apply typed_true_of_bool in H
| apply typed_true_tint_Vint in H
| apply typed_true_tlong_Vlong in H
(* This one is not portable 32/64 bits
| apply (typed_true_e tint) in H
*)
| apply typed_true_ptr in H
| idtac ]
(* | locald_denote _ _ => hnf in H *)
| _ => try solve [discriminate H]
end.
Ltac fancy_intros aggressive :=
repeat match goal with
| |- (_ <= _ < _) -> _ => fancy_intro aggressive
| |- (_ < _ <= _) -> _ => fancy_intro aggressive
| |- (_ <= _ <= _) -> _ => fancy_intro aggressive
| |- (_ < _ < _) -> _ => fancy_intro aggressive
| |- (?A /\ ?B) -> ?C => apply (@and_ind A B C) (* For some reason "apply and_ind" doesn't work the same *)
| |- _ -> _ => fancy_intro aggressive
end.
Ltac fold_types :=
fold noattr tuint tint tschar tuchar;
repeat match goal with
| |- context [Tpointer ?t noattr] =>
change (Tpointer t noattr) with (tptr t)
| |- context [Tarray ?t ?n noattr] =>
change (Tarray t n noattr) with (tarray t n)
end.
Ltac fold_types1 :=
match goal with |- _ -> ?A =>
let a := fresh "H" in set (a:=A); fold_types; subst a
end.
Lemma is_int_Vbyte: forall c, is_int I8 Signed (Vbyte c).
Proof.
intros. simpl. normalize. rewrite Int.signed_repr by rep_omega. rep_omega.
Qed.
Hint Resolve is_int_Vbyte.
|
Anti-Roma legislation as well as specific legal acts directed against the non-sedentary Roma population, justifying and legalizing the previously customary aversion towards the people and their prosecution, started to be drawn up and enacted in Western Europe in the early Modern era. Adam Bartosz in his book, Nie bój się Cygana. Na dara Romestar [Do Not Fear the Gypsy], lists only a few of these. According to the scholar and expert on Roma history, the first anti-Gypsy and anti-Egyptian – as they were known at the time – laws were issued in Lucerne in 1471 and barred the Roma from presence on the territory of Switzerland. At the close of the 15th century, the Roma were forbidden to follow their itinerant life style on pain of corporeal punishment and enslavement. In turn, the early 16th century was marked by anti-Roma legislation of the Habsburg Holy Roman Emperor, Maximilian I, in the light of which the Roma captured on the imperial territory were subject to torture and extermination. In 1530, they were legally banished from England, and ten years later – from Scotland. Expulsion acts kept been issued in the 16th century, among other countries, in Denmark, Finland and France.
It is exactly this chronicle record as well as reports of aristocratic collections of severed limbs, noses and ears of the Roma captured during such battues, subsequently exhibited in cabinets of curiosities or in palace rooms alongside hunting trophies, recoverable from the sources as late as mid-19th century, which has inspired the work of Krzysztof Gil.
The Polish-Romani artist has produced a species of installation, composed of a cubical room of his design and making, and of sound. In its form, dimensions and execution technique, the cubic shed put together from accidental pieces of wood and fabric alludes to traditional, humble, temporary houses erected by the legally and socially marginalised Roma. On the other hand, repeating the paradigmatic form of a gallery white cube, it constructs a literally Chinese-box situation, whence it positions the audience. Since the cubical installation, upon entrance, turns out to a self-contained exhibition space, whose darkened interior appears to be a dialectical antithesis of the space of a gallery.
Inside the one-chamber room, Krzysztof Gil installed a panoramic painting. The cylindrical panorama consists of a majestic and monumental, white-crayon drawing on black background, representing a scene of game display. The image, which follows the representational convention for hunting trophies, is composed of likenesses of genteel women and men, clothed in modern hunters’ and their female companions’ apparel. All images used by the artist have been appropriated from painting of the old masters. In the surrounding darkness, the light rhythmically reveals characters, whose prototypes become identifiable as members of the surgeons’ guild depicted by Rembrandt, for example, or as St. Irene attending to the body of St. Sebastian from Georges de la Tour’s painting. The composition centre holds, amidst hunted game, a de-faced – as profil-perdu painted – torso of an anonymous, shut-down Roma.
Krzysztof Gil’s use of the convention of appropriation, historicising his piece, is at once a sarcastic comment, in the spirit of Reger from Thomas Bernhard’s Old Masters, on European art as a history painted in bright colours, and an accusation levelled against it. On the other hand, thanks to the sound accompanying the visible, a remote history becomes juxtaposed to the contemporary and to the artist’s private, family history.
The sound-track accompanying our viewing of his panorama is fragments of a record of a conversation Krzysztof Gil conducted with his grand-mother. She tells the story of her father – a musician and a brick-layer – murdered after WWII in the Podhale region, because he had had the audacity to make a remark about a poorly executed work to his Polish co-workers. Upon arrival on the scene of crime, the militiamen and doctors found the event to be a misadventure. The perpetrators went unpunished.
The artist’s family history, which has been combined here with an event described in a 17th-century chronicle, demonstrates that the ritual Heidenjachten are not really a closed chapter of history, and that anti-Roma violence remains a notorious occurrence. It is confirmed also by the exhibition title, which we have borrowed from a contemporary photograph found on the Internet, representing a caption on a wall of a Polish city. The paint appeared to still be wet.
|
!##############################################################################
!# ****************************************************************************
!# <name> assemblytemplates </name>
!# ****************************************************************************
!#
!# <purpose>
!# Contains general template structures that are used to collect information
!# necessary for the assembly of matrices and vectors.
!#
!# The following routines can be found here:
!#
!# 1.) astmpl_createLevelInfoHier
!# -> Creates a template assembly hierarchy
!#
!# 2.) astmpl_releaseLevelInfoHier
!# -> Releases a template assembly hierarchy
!#
!# </purpose>
!##############################################################################
module assemblytemplates
use fsystem
use storage
use boundary
use triangulation
use linearsystemscalar
use spatialdiscretisation
use timediscretisation
use timescalehierarchy
use meshhierarchy
use fespacehierarchybase
use fespacehierarchy
use spacetimehierarchy
implicit none
private
!<types>
!<typeblock>
! A type block specifying all "static" information in space which are depending
! on a discretisation and a triangulation. Such static information can be
! precalculated and is valid until the mesh or the FE spaces change.
type t_staticSpaceAsmTemplates
! An object for saving the triangulation on the domain
type(t_triangulation), pointer :: p_rtriangulation => null()
! A scalar discretisation structure for the velocity space.
type(t_spatialDiscretisation), pointer :: p_rdiscr => null()
! A scalar discretisation structure for the pressure space.
type(t_spatialDiscretisation), pointer :: p_rdiscrPressure => null()
! A scalar discretisation structure that specifies how to generate
! the mass matrix in the velocity FEM space.
! May use a different cubature rule that the velocity discretisation
type(t_spatialDiscretisation), pointer :: p_rdiscrMass => null()
! A scalar discretisation structure that specifies how to generate
! the mass matrix in the pressure FEM space.
type(t_spatialDiscretisation), pointer :: p_rdiscrMassPressure => null()
! Cubature information structure for Mass-type matrices in the velocity space.
type(t_scalarCubatureInfo) :: rcubatureInfoMass
! Cubature information structure for Laplace-type matrices.
type(t_scalarCubatureInfo) :: rcubatureInfo
! Cubature information structure for pressure/divergence matrices.
type(t_scalarCubatureInfo) :: rcubatureInfoDiv
! Cubature information structure for Mass-type matrices in the velocity space.
type(t_scalarCubatureInfo) :: rcubatureInfoMassPressure
! Cubature information structure for RHS vectors, momentum equation
type(t_scalarCubatureInfo) :: rcubatureInfoRHS
! Cubature information structure for RHS vectors, continuity equation
type(t_scalarCubatureInfo) :: rcubatureInfoRHScontinuity
! A template FEM matrix that defines the structure of Laplace/Stokes/...
! matrices. The matrix contains only a stucture, no content.
type(t_matrixScalar) :: rmatrixTemplateFEM
! A template FEM matrix that defines the structure of the offdiagonal
! matrices. Needed e.g. by Newton and may have different structure
! than the diagonal velocity matrices in case EOJ is activated.
! The matrix contains only a stucture, no content.
type(t_matrixScalar) :: rmatrixTemplateFEMOffdiag
! A template FEM matrix that defines the structure of the pressure
! matrices. The matrix contains only a stucture, no content.
type(t_matrixScalar) :: rmatrixTemplateFEMPressure
! A template FEM matrix that defines the structure of gradient
! matrices (B1/B2). The matrix contains only a stucture, no content.
type(t_matrixScalar) :: rmatrixTemplateGradient
! A template FEM matrix that defines the structure of divergence
! matrices (D1/D2). The matrix contains only a stucture, no content.
type(t_matrixScalar) :: rmatrixTemplateDivergence
! Precalculated Laplace matrix for that specific level
type(t_matrixScalar) :: rmatrixLaplace
! Precalculated B1-matrix for that specific level.
type(t_matrixScalar) :: rmatrixB1
! Precalculated B2-matrix for that specific level.
type(t_matrixScalar) :: rmatrixB2
! Precalculated D1-matrix for that specific level.
type(t_matrixScalar) :: rmatrixD1
! Precalculated D2-matrix for that specific level.
type(t_matrixScalar) :: rmatrixD2
! Precalculated D1^T-matrix for that specific level.
type(t_matrixScalar) :: rmatrixD1T
! Precalculated D2-matrix for that specific level.
type(t_matrixScalar) :: rmatrixD2T
! Precalculated mass matrix
type(t_matrixScalar) :: rmatrixMassPrimal
! Precalculated lumped mass matrix.
type(t_matrixScalar) :: rmatrixMassLumpInt
! Precalculated mass matrix for the pressure space.
type(t_matrixScalar) :: rmatrixMassPressure
! Precalculated mass; control space
type(t_matrixScalar) :: rmatrixMassDistC
! Precalculated lumped mass; control space
type(t_matrixScalar) :: rmatrixMassDistCLumped
! Precalculated mass matrix for the pressure space.
! Extended structure applicable for UMFPACK on pure Dirichlet problems.
type(t_matrixScalar) :: rmatrixMassPressureExtStruc
! Precalculated lumped mass matrix for the pressure space.
type(t_matrixScalar) :: rmatrixMassPressureLumpInt
! Precalculated EOJ matrix in the primal velocity space.
! Calculated with nu=1.
type(t_matrixScalar) :: rmatrixEOJPrimal
! Precalculated EOJ matrix in the dual velocity space.
! Calculated with nu=1. May share the content with rmatrixEOJPrimal.
type(t_matrixScalar) :: rmatrixEOJDual
end type
!</typeblock>
public :: t_staticSpaceAsmTemplates
!<typeblock>
! A hierarchy of t_staticSpaceAsmTemplates structures.
type t_staticSpaceAsmHierarchy
! Number of levels in the hierarchy.
integer :: nlevels = 0
! The level info structures on all levels.
type(t_staticSpaceAsmTemplates), dimension(:), pointer :: p_RasmTemplList => null()
end type
!</typeblock>
public :: t_staticSpaceAsmHierarchy
!</types>
! Allocates memory for a level hierarchy consisting of nlevels levels
public :: astmpl_createSpaceAsmHier
! Releases a level info hierarchy.
public :: astmpl_releaseSpaceAsmHier
contains
! ***************************************************************************
!<subroutine>
subroutine astmpl_createSpaceAsmHier (rhierarchy,nlevels)
!<description>
! Allocates memory for a level hierarchy consisting of nlevels levels
!</description>
!<input>
! Number of levels.
integer :: nlevels
!</input>
!<output>
! A t_staticLevelInfoHierarchy to initialise.
type(t_staticSpaceAsmHierarchy), intent(out) :: rhierarchy
!</output>
!</subroutine>
! Allocate memory.
rhierarchy%nlevels = nlevels
allocate(rhierarchy%p_RasmTemplList(nlevels))
end subroutine
! ***************************************************************************
!<subroutine>
subroutine astmpl_releaseSpaceAsmHier (rhierarchy)
!<description>
! Releases a level info hierarchy.
!
! WARNING: Attached matrices are NOT automatically released!
!</description>
!<inputoutput>
! A t_staticLevelInfoHierarchy to clean up.
type(t_staticSpaceAsmHierarchy), intent(inout) :: rhierarchy
!</inputoutput>
!</subroutine>
! Release memory.
deallocate(rhierarchy%p_RasmTemplList)
rhierarchy%nlevels = 0
end subroutine
end module
|
#равномерно разпределение - равномерно разпределени случайни величини в интервал [a,b]
#функцията unif() с префикси d,p,q,r за съответно плътност, разпределние, квантил и случайно разпределение(симулиране)
#100 случайни равномерно разпределени величини в интервала [0,2]
x = runif(n = 100, min = 0, max = 2)
hist(x, probability=TRUE, col=gray(.9), main="uniform on [0,2]") #хистограма на разпределението
curve(dunif(x,0,2),add=T) #теоретична плътност -> dunif(x,0,2) всяка от случайните величини с каква вероятност да се случи е
#генериране на извадка -> измежду числата x (от 1 до х, ако х е число, а не вектор), се взимат по случаен начин size числа
sample(x = 1:10, size = 10)
sample(x = 10, size = 10, replace = TRUE) #може да има повторения
#геометрично разпределение - частен случай на отрицателно биномното, кoгато искаме да имаме x неуспеха преди първия успех с вероятност за успех prob
#функцията geom() с префикси d,p,q,r за съответно плътност, разпределние, квантил и случайно разпределение(симулиране)
Y=0:10
plot(Y, dgeom(x = Y, prob = 0.6), type="h", ylim=c(0,1), main="Geometric distribution for p=0.6", ylab="P(Y=Y)", xlab="Y=Number of failures before first success")
#екцпоненциално разпределение - непрекъсната случайна величнина
#функцията exp() с префикси d,p,q,r за съответно плътност, разпределние, квантил и случайно разпределение(симулиране)
mu = 2500
x = rexp(n = 100, rate = 1/mu) #100 случайни експоненциално разпределени величини с вероятност 1/mu
hist(x,probability=TRUE,col=gray(.9),main="exponential mean=2500") #хистограма
curve(dexp(x = x, rate = 1/2500),add=T) #теоретична плътност
#нормално разпределение - очакване(средно) mu и дисперсия sigma (стандартно отклонение sigma)
#функцията norm() с префикси d,p,q,r за съответно плътност, разпределние, квантил и случайно разпределение(симулиране)
#на финкцията се подава очакване (mu) и стандартно отклонение (sigma)
x=c(1:200)
plot(dnorm(x, 100,10), ylim=c(0,0.1))
lines(dnorm(x, 100,5), col="red")
lines(dnorm(x, 130,10), col="blue")
#стандартно нормално разпределение - нормално разпределение с очакване 0 и дисперсия 1
#по подразбиране в norm(x, mu = 0, sd = 1)
#за нормализиране се използва формулата y = (x - mu)/sigma
#където х е нормално разпределена случайна величина с очакване mu и дисперсия sigma^2,
#а у е стандарна нормално разпределене случайна величина
#-----------------------------------------------------
#01
#геометрично разпределение с вероятност 1/2, 1/3, 2/3
y=0:10 #случайни числа(за онагледяване)
plot(x = y, dgeom(x = y, prob = 1/2), ylim=c(0,0.8), ylab = "Probability", xlab = "Number of fails before the first success")
lines(x = y, dgeom(x = y, prob = 1/3), col="red")
lines(x = y, dgeom(x = y, prob = 2/3), col="blue")
#02
#експоненциално разпределение с mu =5
k.30 = dexp(x = 5, rate = 1/5)
plot(x = 1:30, y = dexp(x = 1:30, rate = 1/5), main = "Exponentila with mu = 5", ylab = "P(X = x)")
#03
x = rnorm(n = 50, mean = 5, sd = 5) #N(5,5) означава дисперсия 5 = sigma^2 => стандартно отклонение sigma = sqrt(5), но в случая няма значение :)
x.norm = (x - 5)/5
hist(x.norm, probability = TRUE)
x.standart = rnorm(50)
hist(x.standart, probability = TRUE)
#04
#z стандартно нормално разоределена случайна величина
pnorm(q = 0.92 , lower.tail = FALSE) #P(Z > 0.92)
pnorm(q = -0.5, lower.tail = FALSE) #P(Z > -0.5) = P(Z < 0.5)
#P(-0.64 < Z < 0.43) = P(Z < 0.43) - P(Z > -0.64)
pnorm(0.43) - pnorm(-0.64, lower.tail = FALSE)
#05
#x нормално разпределено с mu = 3 и sigma^2 = 4
pnorm(q = 6.2, mean = 3, sd = 2)
#06
pnorm(q = 3100, mean = 3500, sd = 500)
pnorm(-4/5) #нормирано z = (3100 - 3500)/500 = -4/5 е стандартно нормално разпределено N(0,1)
#07
#X = N(80, 10^2), Y=N(78, 13^2) => X-Y = N(80-78, 10^2 + 13^3) = N(2,269)
pnorm(0,2,sqrt(269),lower.tail = FALSE) #искаме Х > Y => X - Y > 0
|
/-
Copyright (c) 2021 Johan Commelin. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Johan Commelin
-/
import group_theory.free_abelian_group
import group_theory.is_free_group
import data.finsupp.basic
import data.equiv.module
import linear_algebra.dimension
/-!
# Isomorphism between `free_abelian_group X` and `X →₀ ℤ`
In this file we construct the canonical isomorphism between `free_abelian_group X` and `X →₀ ℤ`.
We use this to transport the notion of `support` from `finsupp` to `free_abelian_group`.
## Main declarations
- `free_abelian_group.equiv_finsupp`: group isomorphism between `free_abelian_group X` and `X →₀ ℤ`
- `free_abelian_group.coeff`: the multiplicity of `x : X` in `a : free_abelian_group X`
- `free_abelian_group.support`: the finset of `x : X` that occur in `a : free_abelian_group X`
-/
noncomputable theory
open_locale big_operators
variables {X : Type*}
/-- The group homomorphism `free_abelian_group X →+ (X →₀ ℤ)`. -/
def free_abelian_group.to_finsupp : free_abelian_group X →+ (X →₀ ℤ) :=
free_abelian_group.lift $ λ x, finsupp.single x (1 : ℤ)
/-- The group homomorphism `(X →₀ ℤ) →+ free_abelian_group X`. -/
def finsupp.to_free_abelian_group : (X →₀ ℤ) →+ free_abelian_group X :=
finsupp.lift_add_hom $ λ x, (smul_add_hom ℤ (free_abelian_group X)).flip (free_abelian_group.of x)
open finsupp free_abelian_group
@[simp] lemma finsupp.to_free_abelian_group_comp_single_add_hom (x : X) :
finsupp.to_free_abelian_group.comp (finsupp.single_add_hom x) =
(smul_add_hom ℤ (free_abelian_group X)).flip (of x) :=
begin
ext,
simp only [add_monoid_hom.coe_comp, finsupp.single_add_hom_apply, function.comp_app,
one_smul, to_free_abelian_group, finsupp.lift_add_hom_apply_single]
end
@[simp] lemma free_abelian_group.to_finsupp_comp_to_free_abelian_group :
to_finsupp.comp to_free_abelian_group = add_monoid_hom.id (X →₀ ℤ) :=
begin
ext x y, simp only [add_monoid_hom.id_comp],
rw [add_monoid_hom.comp_assoc, finsupp.to_free_abelian_group_comp_single_add_hom],
simp only [to_finsupp, add_monoid_hom.coe_comp, finsupp.single_add_hom_apply,
function.comp_app, one_smul, lift.of, add_monoid_hom.flip_apply,
smul_add_hom_apply, add_monoid_hom.id_apply],
end
@[simp] lemma finsupp.to_free_abelian_group_comp_to_finsupp :
to_free_abelian_group.comp to_finsupp = add_monoid_hom.id (free_abelian_group X) :=
begin
ext,
rw [to_free_abelian_group, to_finsupp, add_monoid_hom.comp_apply, lift.of,
lift_add_hom_apply_single, add_monoid_hom.flip_apply, smul_add_hom_apply, one_smul,
add_monoid_hom.id_apply],
end
@[simp] lemma finsupp.to_free_abelian_group_to_finsupp {X} (x : free_abelian_group X) :
x.to_finsupp.to_free_abelian_group = x :=
by rw [← add_monoid_hom.comp_apply, finsupp.to_free_abelian_group_comp_to_finsupp,
add_monoid_hom.id_apply]
namespace free_abelian_group
open finsupp
variable {X}
@[simp]
@[simp] lemma to_finsupp_to_free_abelian_group (f : X →₀ ℤ) :
f.to_free_abelian_group.to_finsupp = f :=
by rw [← add_monoid_hom.comp_apply, to_finsupp_comp_to_free_abelian_group, add_monoid_hom.id_apply]
variable (X)
/-- The additive equivalence between `free_abelian_group X` and `(X →₀ ℤ)`. -/
@[simps]
def equiv_finsupp : free_abelian_group X ≃+ (X →₀ ℤ) :=
{ to_fun := to_finsupp,
inv_fun := to_free_abelian_group,
left_inv := to_free_abelian_group_to_finsupp,
right_inv := to_finsupp_to_free_abelian_group,
map_add' := to_finsupp.map_add }
/-- `A` is a basis of the ℤ-module `free_abelian_group A`. -/
noncomputable def basis (α : Type*) :
basis α ℤ (free_abelian_group α) :=
⟨(free_abelian_group.equiv_finsupp α).to_int_linear_equiv ⟩
/-- Isomorphic free ablian groups (as modules) have equivalent bases. -/
def equiv.of_free_abelian_group_linear_equiv {α β : Type*}
(e : free_abelian_group α ≃ₗ[ℤ] free_abelian_group β) :
α ≃ β :=
let t : _root_.basis α ℤ (free_abelian_group β) := (free_abelian_group.basis α).map e
in t.index_equiv $ free_abelian_group.basis _
/-- Isomorphic free abelian groups (as additive groups) have equivalent bases. -/
def equiv.of_free_abelian_group_equiv {α β : Type*}
(e : free_abelian_group α ≃+ free_abelian_group β) :
α ≃ β :=
equiv.of_free_abelian_group_linear_equiv e.to_int_linear_equiv
/-- Isomorphic free groups have equivalent bases. -/
def equiv.of_free_group_equiv {α β : Type*}
(e : free_group α ≃* free_group β) :
α ≃ β :=
equiv.of_free_abelian_group_equiv e.abelianization_congr.to_additive
open is_free_group
/-- Isomorphic free groups have equivalent bases (`is_free_group` variant`). -/
def equiv.of_is_free_group_equiv {G H : Type*}
[group G] [group H] [is_free_group G] [is_free_group H]
(e : G ≃* H) :
generators G ≃ generators H :=
equiv.of_free_group_equiv $
mul_equiv.trans ((to_free_group G).symm) $
mul_equiv.trans e $
to_free_group H
variable {X}
/-- `coeff x` is the additive group homomorphism `free_abelian_group X →+ ℤ`
that sends `a` to the multiplicity of `x : X` in `a`. -/
def coeff (x : X) : free_abelian_group X →+ ℤ :=
(finsupp.apply_add_hom x).comp to_finsupp
/-- `support a` for `a : free_abelian_group X` is the finite set of `x : X`
that occur in the formal sum `a`. -/
def support (a : free_abelian_group X) : finset X :=
a.to_finsupp.support
lemma mem_support_iff (x : X) (a : free_abelian_group X) :
x ∈ a.support ↔ coeff x a ≠ 0 :=
by { rw [support, finsupp.mem_support_iff], exact iff.rfl }
lemma not_mem_support_iff (x : X) (a : free_abelian_group X) :
x ∉ a.support ↔ coeff x a = 0 :=
by { rw [support, finsupp.not_mem_support_iff], exact iff.rfl }
@[simp] lemma support_zero : support (0 : free_abelian_group X) = ∅ :=
by simp only [support, finsupp.support_zero, add_monoid_hom.map_zero]
@[simp] lemma support_of (x : X) : support (of x) = {x} :=
by simp only [support, to_finsupp_of, finsupp.support_single_ne_zero (one_ne_zero)]
@[simp] lemma support_neg (a : free_abelian_group X) : support (-a) = support a :=
by simp only [support, add_monoid_hom.map_neg, finsupp.support_neg]
@[simp] lemma support_zsmul (k : ℤ) (h : k ≠ 0) (a : free_abelian_group X) :
support (k • a) = support a :=
begin
ext x,
simp only [mem_support_iff, add_monoid_hom.map_zsmul],
simp only [h, zsmul_int_int, false_or, ne.def, mul_eq_zero]
end
@[simp] lemma support_nsmul (k : ℕ) (h : k ≠ 0) (a : free_abelian_group X) :
support (k • a) = support a :=
by { apply support_zsmul k _ a, exact_mod_cast h }
open_locale classical
lemma support_add (a b : free_abelian_group X) : (support (a + b)) ⊆ a.support ∪ b.support :=
begin
simp only [support, add_monoid_hom.map_add],
apply finsupp.support_add
end
end free_abelian_group
|
try
using RecordArraysBenchmarks
true
catch
false
end || begin
let path = joinpath(@__DIR__, "../benchmark/RecordArraysBenchmarks/Project.toml")
path in LOAD_PATH || push!(LOAD_PATH, path)
end
using RecordArraysBenchmarks
end
|
context("proxy")
has_localhost = has_connectivity("localhost")
test_that("control flow between proxy and master", {
skip_if_not(has_localhost)
skip_on_os("windows")
# prerequesites
context = rzmq::init.context()
socket = rzmq::init.socket(context, "ZMQ_REP")
port = bind_avail(socket, 50000:55000)
common_data = list(id="DO_SETUP", fun = function(x) x*2,
const=list(), export=list(), seed=1)
p = parallel::mcparallel(ssh_proxy(port, port, 'multicore'))
on.exit(tools::pskill(p$pid, tools::SIGKILL))
# startup
msg = recv(p, socket)
expect_equal(msg$id, "PROXY_UP")
send(socket, common_data)
msg = recv(p, socket)
expect_equal(msg$id, "PROXY_READY")
expect_true("data_url" %in% names(msg))
expect_true("token" %in% names(msg))
proxy = msg$data_url
token = msg$token
# command execution
cmd = quote(Sys.getpid())
send(socket, list(id="PROXY_CMD", exec=cmd))
msg = recv(p, socket)
expect_equal(msg$id, "PROXY_CMD")
expect_equal(msg$reply, p$pid)
# common data
worker = rzmq::init.socket(context, "ZMQ_REQ")
rzmq::connect.socket(worker, proxy)
send(worker, list(id="WORKER_READY"))
msg = recv(p, worker)
testthat::expect_equal(msg$id, "DO_SETUP")
testthat::expect_equal(msg$token, token)
testthat::expect_equal(msg[names(common_data)], common_data)
# shutdown
msg = list(id = "PROXY_STOP")
send(socket, msg)
collect = suppressWarnings(parallel::mccollect(p))
expect_equal(as.integer(names(collect)), p$pid)
on.exit(NULL)
})
test_that("full SSH connection", {
skip_on_cran()
skip_on_os("windows")
skip_if_not(has_localhost)
skip_if_not(has_ssh_cmq("localhost"))
skip_if_not(identical(Sys.getenv("TRAVIS"), "true"),
message="this test runs on travis only")
# 'LOCAL' mode (default) will not set up required sockets
# 'SSH' mode would lead to circular connections
# schedulers may have long delay (they start in fresh session, so no path)
sched = getOption("clustermq.scheduler", qsys_default)
skip_if(is.null(sched) || toupper(sched) != "MULTICORE",
message="options(clustermq.scheduler') must be 'MULTICORE'")
options(clustermq.template = "SSH")
w = workers(n_jobs=1, qsys_id="ssh", reuse=FALSE,
ssh_host="localhost", node="localhost")
result = Q(identity, 42, n_jobs=1, timeout=10L, workers=w)
expect_equal(result, list(42))
})
|
[STATEMENT]
lemma SoundCall[rule_format]:
"\<lbrakk>\<forall>n. \<Turnstile>\<^sub>n ({A} \<union> G) \<longrightarrow> \<Turnstile>\<^sub>n body : A\<rbrakk> \<Longrightarrow> \<Turnstile>\<^sub>n G \<longrightarrow> \<Turnstile>\<^sub>n Call : A"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<forall>n. \<Turnstile>\<^sub>n {A} \<union> G \<longrightarrow> \<Turnstile>\<^sub>n body : A \<Longrightarrow> \<Turnstile>\<^sub>n G \<longrightarrow> \<Turnstile>\<^sub>n Call : A
[PROOF STEP]
(*<*)
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<forall>n. \<Turnstile>\<^sub>n {A} \<union> G \<longrightarrow> \<Turnstile>\<^sub>n body : A \<Longrightarrow> \<Turnstile>\<^sub>n G \<longrightarrow> \<Turnstile>\<^sub>n Call : A
[PROOF STEP]
apply (induct_tac n)
[PROOF STATE]
proof (prove)
goal (2 subgoals):
1. \<forall>n. \<Turnstile>\<^sub>n {A} \<union> G \<longrightarrow> \<Turnstile>\<^sub>n body : A \<Longrightarrow> \<Turnstile>\<^sub>0 G \<longrightarrow> \<Turnstile>\<^sub>0 Call : A
2. \<And>n. \<lbrakk>\<forall>n. \<Turnstile>\<^sub>n {A} \<union> G \<longrightarrow> \<Turnstile>\<^sub>n body : A ; \<Turnstile>\<^sub>n G \<longrightarrow> \<Turnstile>\<^sub>n Call : A \<rbrakk> \<Longrightarrow> \<Turnstile>\<^sub>Suc n G \<longrightarrow> \<Turnstile>\<^sub>Suc n Call : A
[PROOF STEP]
apply (simp add: VDM_validn_def, clarsimp)
[PROOF STATE]
proof (prove)
goal (2 subgoals):
1. \<And>a b aa ba. \<lbrakk>\<forall>n. \<Turnstile>\<^sub>n insert A G \<longrightarrow> (\<forall>m\<le>n. \<forall>a b aa ba. (a, b) , body \<rightarrow>\<^sub>m (aa, ba) \<longrightarrow> A (a, b) (aa, ba)); \<Turnstile>\<^sub>0 G ; (a, b) , Call \<rightarrow>\<^sub>0 (aa, ba) \<rbrakk> \<Longrightarrow> A (a, b) (aa, ba)
2. \<And>n. \<lbrakk>\<forall>n. \<Turnstile>\<^sub>n {A} \<union> G \<longrightarrow> \<Turnstile>\<^sub>n body : A ; \<Turnstile>\<^sub>n G \<longrightarrow> \<Turnstile>\<^sub>n Call : A \<rbrakk> \<Longrightarrow> \<Turnstile>\<^sub>Suc n G \<longrightarrow> \<Turnstile>\<^sub>Suc n Call : A
[PROOF STEP]
apply (drule Sem_no_zero_height_derivs)
[PROOF STATE]
proof (prove)
goal (2 subgoals):
1. \<And>a b aa ba. \<lbrakk>\<forall>n. \<Turnstile>\<^sub>n insert A G \<longrightarrow> (\<forall>m\<le>n. \<forall>a b aa ba. (a, b) , body \<rightarrow>\<^sub>m (aa, ba) \<longrightarrow> A (a, b) (aa, ba)); \<Turnstile>\<^sub>0 G ; False\<rbrakk> \<Longrightarrow> A (a, b) (aa, ba)
2. \<And>n. \<lbrakk>\<forall>n. \<Turnstile>\<^sub>n {A} \<union> G \<longrightarrow> \<Turnstile>\<^sub>n body : A ; \<Turnstile>\<^sub>n G \<longrightarrow> \<Turnstile>\<^sub>n Call : A \<rbrakk> \<Longrightarrow> \<Turnstile>\<^sub>Suc n G \<longrightarrow> \<Turnstile>\<^sub>Suc n Call : A
[PROOF STEP]
apply simp
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<And>n. \<lbrakk>\<forall>n. \<Turnstile>\<^sub>n {A} \<union> G \<longrightarrow> \<Turnstile>\<^sub>n body : A ; \<Turnstile>\<^sub>n G \<longrightarrow> \<Turnstile>\<^sub>n Call : A \<rbrakk> \<Longrightarrow> \<Turnstile>\<^sub>Suc n G \<longrightarrow> \<Turnstile>\<^sub>Suc n Call : A
[PROOF STEP]
apply clarsimp
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<And>n. \<lbrakk>\<forall>n. \<Turnstile>\<^sub>n insert A G \<longrightarrow> \<Turnstile>\<^sub>n body : A ; \<Turnstile>\<^sub>n G \<longrightarrow> \<Turnstile>\<^sub>n Call : A ; \<Turnstile>\<^sub>Suc n G \<rbrakk> \<Longrightarrow> \<Turnstile>\<^sub>Suc n Call : A
[PROOF STEP]
apply (drule Ctxt_lowerm)
[PROOF STATE]
proof (prove)
goal (2 subgoals):
1. \<And>n. \<lbrakk>\<forall>n. \<Turnstile>\<^sub>n insert A G \<longrightarrow> \<Turnstile>\<^sub>n body : A ; \<Turnstile>\<^sub>n G \<longrightarrow> \<Turnstile>\<^sub>n Call : A \<rbrakk> \<Longrightarrow> ?m20 n < Suc n
2. \<And>n. \<lbrakk>\<forall>n. \<Turnstile>\<^sub>n insert A G \<longrightarrow> \<Turnstile>\<^sub>n body : A ; \<Turnstile>\<^sub>n G \<longrightarrow> \<Turnstile>\<^sub>n Call : A ; \<Turnstile>\<^sub>?m20 n G \<rbrakk> \<Longrightarrow> \<Turnstile>\<^sub>Suc n Call : A
[PROOF STEP]
apply (subgoal_tac "n < Suc n", assumption)
[PROOF STATE]
proof (prove)
goal (2 subgoals):
1. \<And>n. \<lbrakk>\<forall>n. \<Turnstile>\<^sub>n insert A G \<longrightarrow> \<Turnstile>\<^sub>n body : A ; \<Turnstile>\<^sub>n G \<longrightarrow> \<Turnstile>\<^sub>n Call : A \<rbrakk> \<Longrightarrow> n < Suc n
2. \<And>n. \<lbrakk>\<forall>n. \<Turnstile>\<^sub>n insert A G \<longrightarrow> \<Turnstile>\<^sub>n body : A ; \<Turnstile>\<^sub>n G \<longrightarrow> \<Turnstile>\<^sub>n Call : A ; \<Turnstile>\<^sub>n G \<rbrakk> \<Longrightarrow> \<Turnstile>\<^sub>Suc n Call : A
[PROOF STEP]
apply simp
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<And>n. \<lbrakk>\<forall>n. \<Turnstile>\<^sub>n insert A G \<longrightarrow> \<Turnstile>\<^sub>n body : A ; \<Turnstile>\<^sub>n G \<longrightarrow> \<Turnstile>\<^sub>n Call : A ; \<Turnstile>\<^sub>n G \<rbrakk> \<Longrightarrow> \<Turnstile>\<^sub>Suc n Call : A
[PROOF STEP]
apply clarsimp
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<And>n. \<lbrakk>\<forall>n. \<Turnstile>\<^sub>n insert A G \<longrightarrow> \<Turnstile>\<^sub>n body : A ; \<Turnstile>\<^sub>n Call : A ; \<Turnstile>\<^sub>n G \<rbrakk> \<Longrightarrow> \<Turnstile>\<^sub>Suc n Call : A
[PROOF STEP]
apply (drule ctxt_consn)
[PROOF STATE]
proof (prove)
goal (2 subgoals):
1. \<And>n. \<lbrakk>\<forall>n. \<Turnstile>\<^sub>n insert A G \<longrightarrow> \<Turnstile>\<^sub>n body : A ; \<Turnstile>\<^sub>n Call : A \<rbrakk> \<Longrightarrow> \<Turnstile>\<^sub>n Call : ?A24 n
2. \<And>n. \<lbrakk>\<forall>n. \<Turnstile>\<^sub>n insert A G \<longrightarrow> \<Turnstile>\<^sub>n body : A ; \<Turnstile>\<^sub>n Call : A ; \<Turnstile>\<^sub>n {?A24 n} \<union> G \<rbrakk> \<Longrightarrow> \<Turnstile>\<^sub>Suc n Call : A
[PROOF STEP]
apply assumption
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<And>n. \<lbrakk>\<forall>n. \<Turnstile>\<^sub>n insert A G \<longrightarrow> \<Turnstile>\<^sub>n body : A ; \<Turnstile>\<^sub>n Call : A ; \<Turnstile>\<^sub>n {A} \<union> G \<rbrakk> \<Longrightarrow> \<Turnstile>\<^sub>Suc n Call : A
[PROOF STEP]
apply (simp add: VDM_validn_def, clarsimp)
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<And>n m a b aa ba. \<lbrakk>\<forall>n. \<Turnstile>\<^sub>n insert A G \<longrightarrow> (\<forall>m\<le>n. \<forall>a b aa ba. (a, b) , body \<rightarrow>\<^sub>m (aa, ba) \<longrightarrow> A (a, b) (aa, ba)); \<forall>m\<le>n. \<forall>a b aa ba. (a, b) , Call \<rightarrow>\<^sub>m (aa, ba) \<longrightarrow> A (a, b) (aa, ba); \<Turnstile>\<^sub>n insert A G ; m \<le> Suc n; (a, b) , Call \<rightarrow>\<^sub>m (aa, ba) \<rbrakk> \<Longrightarrow> A (a, b) (aa, ba)
[PROOF STEP]
apply (erule Sem_eval_cases, clarsimp)
[PROOF STATE]
proof (prove)
goal:
No subgoals!
[PROOF STEP]
done
|
Formal statement is: corollary Sura_Bura_clopen_subset: fixes S :: "'a::euclidean_space set" assumes S: "locally compact S" and C: "C \<in> components S" and "compact C" and U: "open U" "C \<subseteq> U" obtains K where "openin (top_of_set S) K" "compact K" "C \<subseteq> K" "K \<subseteq> U" Informal statement is: If $C$ is a compact component of a locally compact set $S$, then there exists a compact open set $K$ such that $C \subseteq K \subseteq U$.
|
Formal statement is: lemma higher_deriv_power: shows "(deriv ^^ j) (\<lambda>w. (w - z) ^ n) w = pochhammer (of_nat (Suc n - j)) j * (w - z) ^ (n - j)" Informal statement is: The $j$th derivative of $(w - z)^n$ is $\binom{n}{j}(w - z)^{n - j}$.
|
Hand of Hope’s short-term medical and dental outreaches began with one trip to Cambodia in 2007. What started out small has now led to several yearly trips to countries around the world. Recently, our team was able to complete the 100th short-term medical and dental outreach in Zambia. Here are some of the lives you were able to impact in the span of just one day!
Oscar came to our medical tent reporting that he was randomly falling down every month. Just the night before, he had fallen down and sustained a traumatic injury to his right arm. The doctor said that it was the type of fracture that could have very easily severed his main artery in his arm. After Oscar’s examination, it was determined that he had a seizure disorder. Unfortunately, he had no way of getting to the hospital for treatment, so we paid for his transport. By diagnosing his disorder, our team hoped he could avoid future injuries to his body.
Sweet Grace came to our outreach hunched over and suffering for aches that most 89-year-olds would experience. However, she did not have the simple medicines available to help. Our team was able to give her aspirin, blood pressure medicine and pills for her muscles; such simple solutions to the pain she was feeling. Our team was also happy to pray with Grace before she left; “Now after prayer, I feel that my body has become normal. It doesn’t feel as badly as before.” She left standing much straighter!
67-year-old Alex had been having trouble with his eyes for at least two years. He was not able to see clearly, and which caused him difficulty when reading. After meeting with one of our volunteers, he received a pair of glasses and was so ecstatic to be able to read again. What was the first book he picked up? One of Joyce’s from the prayer tent!
We’ve been able to treat 297,923 patients and see 106,056 salvations over the course of 100 trips, but there are so many more lives to reach! You can find out more about our upcoming trips at joycemeyer.org/MedicalMissions. So, whether you’ve been a volunteer in the past, or you’ve been financially supporting Joyce Meyer Ministries, you’re a part of this…thank you!
|
Crown Bioscience is a global drug discovery and development solutions company providing translational platforms to advance oncology, inflammation, cardiovascular and metabolic disease research. With an extensive portfolio of relevant models and predictive tools, CrownBio enables clients to deliver superior clinical candidates and accelerate drug development programs. CrownBio also provides preclinical immunotherapy research platforms supporting the successful transition of immunotherapeutics from the lab into the clinic.
|
module Control.Permutation.Types
import Data.List
import Data.Vect
import Data.Fin
import Data.Group
%default total
infixr 7 :*
||| This is something like `Vector k a`, except we restrict ourselves to only 1,...,n for `Permutation n`.
public export
data Permutation : Nat -> Type where
Nil : Permutation Z
(:*) : Fin (S n) -> Permutation n -> Permutation (S n)
implementation Eq (Permutation n) where
(==) Nil Nil = True
(==) (x :* xs) (y :* ys) = x == y && xs == ys
private
id : {n : Nat} -> Permutation n
id {n=Z} = []
id {n=S _} = FZ :* id
export
debug : Permutation n -> String
debug Nil = "Nil"
debug (p:*ps) = show (finToNat p) ++ " :* " ++ (debug ps)
||| This is essentially a group action. Given a permutation, we apply it to a vector.
sigma : {n : Nat} -> Permutation n -> Vect n a -> Vect n a
sigma _ [] = []
sigma {n=S _} (p:*ps) (x::xs) = insert (sigma ps xs) p
where
insert : Vect n a -> Fin (S n) -> Vect (S n) a
insert l FZ = x::l
insert Data.Vect.Nil _ = [x]
insert (e::es) (FS k) = e :: insert es k
export
toVector : {n : Nat} -> Permutation n -> Vect n (Fin n)
toVector {n} p = sigma p (sequential n)
where
sequential : (n : Nat) -> Vect n (Fin n)
sequential Z = []
sequential (S k) = FZ :: map FS (sequential k)
private
indices : {n : Nat} -> Permutation n -> Vect n (Fin n)
indices [] = Nil
indices (p :* ps) = p :: map (thin p) (indices ps)
where
thin : {n : Nat} -> Fin (S n) -> Fin n -> Fin (S n)
thin FZ i = FS i
thin _ FZ = FZ
thin (FS i) (FS j) = FS (thin i j)
private
delete : {n : Nat} -> Fin (S n) -> Permutation (S n) -> Permutation n
delete FZ (j :* p) = p
delete {n=Z} (FS _) _ = Nil
delete {n=S _} (FS i) (j :* p) = fromMaybe (lifter j) (strengthen j) :* delete i p
where
lifter : {n : Nat} -> Fin (S n) -> Fin n
lifter {n=Z} _ = ?hole
lifter {n=S _} (FS k) = k
lifter {n=S _} FZ = FZ
private
compose : {n : Nat} -> Permutation n -> Permutation n -> Permutation n
compose Nil p = p
compose (i :* p) p' = index i (indices p') :* compose p (delete i p')
export
invert : {n : Nat} -> Permutation n -> Permutation n
invert Nil = Nil
invert p@(i :* is) = index (i' p) (indices p) :* delete (i' p) p
where
i' : Permutation n -> Fin n
i' p = index i (indices p)
export
implementation {n : Nat} -> Semigroup (Permutation n) where
(<+>) = compose
export
implementation {n : Nat} -> Monoid (Permutation n) where
neutral = id
export
implementation {n : Nat} -> Group (Permutation n) where
inverse = invert
|
rm(list = ls())
#load data
meth_level <- read.csv("meth_level.csv", header = T, stringsAsFactors = F)
meth_level <- as.numeric(meth_level$X0)
idx <- which(!is.na(meth_level))
meth_level <- meth_level[idx]
meth_prob <- read.csv("meth_prob.csv", header = T, stringsAsFactors = F)
meth_prob <- meth_prob$pred_prob
meth_prob <- meth_prob[idx]
#correlation analysis
cor(meth_level, meth_prob, method = "spearman")
#0.3461575
#boxplot
grade <- ceiling(meth_level*10)
df <- data.frame(score = meth_prob, level = grade)
df <- df[df$level != 0,]
pdf('Correlation.pdf')
boxplot(score~level, data=df, notch=TRUE,
col=(c("gold","darkgreen")),
main="Caenorhabditis elegans",
xlab="Methylation Level", ylab = "Predicted Score")
dev.off()
|
{-# OPTIONS --without-K #-}
module PiLevel1 where
open import Data.Unit using (⊤; tt)
open import Relation.Binary.Core using (IsEquivalence)
open import Relation.Binary.PropositionalEquality using (_≡_; refl; subst; sym; [_])
open import PiU using (U; ZERO; ONE; PLUS; TIMES)
open import PiLevel0
-- hiding triv≡ certainly; we are replacing it with _⇔_
using (_⟷_; !;
unite₊l; uniti₊l; unite₊r; uniti₊r; swap₊; assocl₊; assocr₊;
unite⋆l; uniti⋆l; unite⋆r; uniti⋆r; swap⋆; assocl⋆; assocr⋆;
absorbr; absorbl; factorzr; factorzl;
dist; factor; distl; factorl;
id⟷; _◎_; _⊕_; _⊗_)
------------------------------------------------------------------------------
-- Level 1: instead of using triv≡ to reason about equivalence of
-- combinators, we use the following 2-combinators
infix 30 _⇔_
data _⇔_ : {t₁ t₂ : U} → (t₁ ⟷ t₂) → (t₁ ⟷ t₂) → Set where
assoc◎l : {t₁ t₂ t₃ t₄ : U} {c₁ : t₁ ⟷ t₂} {c₂ : t₂ ⟷ t₃} {c₃ : t₃ ⟷ t₄} →
(c₁ ◎ (c₂ ◎ c₃)) ⇔ ((c₁ ◎ c₂) ◎ c₃)
assoc◎r : {t₁ t₂ t₃ t₄ : U} {c₁ : t₁ ⟷ t₂} {c₂ : t₂ ⟷ t₃} {c₃ : t₃ ⟷ t₄} →
((c₁ ◎ c₂) ◎ c₃) ⇔ (c₁ ◎ (c₂ ◎ c₃))
assocl⊕l : {t₁ t₂ t₃ t₄ t₅ t₆ : U}
{c₁ : t₁ ⟷ t₂} {c₂ : t₃ ⟷ t₄} {c₃ : t₅ ⟷ t₆} →
((c₁ ⊕ (c₂ ⊕ c₃)) ◎ assocl₊) ⇔ (assocl₊ ◎ ((c₁ ⊕ c₂) ⊕ c₃))
assocl⊕r : {t₁ t₂ t₃ t₄ t₅ t₆ : U}
{c₁ : t₁ ⟷ t₂} {c₂ : t₃ ⟷ t₄} {c₃ : t₅ ⟷ t₆} →
(assocl₊ ◎ ((c₁ ⊕ c₂) ⊕ c₃)) ⇔ ((c₁ ⊕ (c₂ ⊕ c₃)) ◎ assocl₊)
assocl⊗l : {t₁ t₂ t₃ t₄ t₅ t₆ : U}
{c₁ : t₁ ⟷ t₂} {c₂ : t₃ ⟷ t₄} {c₃ : t₅ ⟷ t₆} →
((c₁ ⊗ (c₂ ⊗ c₃)) ◎ assocl⋆) ⇔ (assocl⋆ ◎ ((c₁ ⊗ c₂) ⊗ c₃))
assocl⊗r : {t₁ t₂ t₃ t₄ t₅ t₆ : U}
{c₁ : t₁ ⟷ t₂} {c₂ : t₃ ⟷ t₄} {c₃ : t₅ ⟷ t₆} →
(assocl⋆ ◎ ((c₁ ⊗ c₂) ⊗ c₃)) ⇔ ((c₁ ⊗ (c₂ ⊗ c₃)) ◎ assocl⋆)
assocr⊕r : {t₁ t₂ t₃ t₄ t₅ t₆ : U}
{c₁ : t₁ ⟷ t₂} {c₂ : t₃ ⟷ t₄} {c₃ : t₅ ⟷ t₆} →
(((c₁ ⊕ c₂) ⊕ c₃) ◎ assocr₊) ⇔ (assocr₊ ◎ (c₁ ⊕ (c₂ ⊕ c₃)))
assocr⊗l : {t₁ t₂ t₃ t₄ t₅ t₆ : U}
{c₁ : t₁ ⟷ t₂} {c₂ : t₃ ⟷ t₄} {c₃ : t₅ ⟷ t₆} →
(assocr⋆ ◎ (c₁ ⊗ (c₂ ⊗ c₃))) ⇔ (((c₁ ⊗ c₂) ⊗ c₃) ◎ assocr⋆)
assocr⊗r : {t₁ t₂ t₃ t₄ t₅ t₆ : U}
{c₁ : t₁ ⟷ t₂} {c₂ : t₃ ⟷ t₄} {c₃ : t₅ ⟷ t₆} →
(((c₁ ⊗ c₂) ⊗ c₃) ◎ assocr⋆) ⇔ (assocr⋆ ◎ (c₁ ⊗ (c₂ ⊗ c₃)))
assocr⊕l : {t₁ t₂ t₃ t₄ t₅ t₆ : U}
{c₁ : t₁ ⟷ t₂} {c₂ : t₃ ⟷ t₄} {c₃ : t₅ ⟷ t₆} →
(assocr₊ ◎ (c₁ ⊕ (c₂ ⊕ c₃))) ⇔ (((c₁ ⊕ c₂) ⊕ c₃) ◎ assocr₊)
dist⇔l : {t₁ t₂ t₃ t₄ t₅ t₆ : U}
{a : t₁ ⟷ t₂} {b : t₃ ⟷ t₄} {c : t₅ ⟷ t₆} →
((a ⊕ b) ⊗ c) ◎ dist ⇔ dist ◎ ((a ⊗ c) ⊕ (b ⊗ c))
dist⇔r : {t₁ t₂ t₃ t₄ t₅ t₆ : U}
{a : t₁ ⟷ t₂} {b : t₃ ⟷ t₄} {c : t₅ ⟷ t₆} →
dist ◎ ((a ⊗ c) ⊕ (b ⊗ c)) ⇔ ((a ⊕ b) ⊗ c) ◎ dist
distl⇔l : {t₁ t₂ t₃ t₄ t₅ t₆ : U}
{a : t₁ ⟷ t₂} {b : t₃ ⟷ t₄} {c : t₅ ⟷ t₆} →
(a ⊗ (b ⊕ c)) ◎ distl ⇔ distl ◎ ((a ⊗ b) ⊕ (a ⊗ c))
distl⇔r : {t₁ t₂ t₃ t₄ t₅ t₆ : U}
{a : t₁ ⟷ t₂} {b : t₃ ⟷ t₄} {c : t₅ ⟷ t₆} →
distl ◎ ((a ⊗ b) ⊕ (a ⊗ c)) ⇔ (a ⊗ (b ⊕ c)) ◎ distl
factor⇔l : {t₁ t₂ t₃ t₄ t₅ t₆ : U}
{a : t₁ ⟷ t₂} {b : t₃ ⟷ t₄} {c : t₅ ⟷ t₆} →
((a ⊗ c) ⊕ (b ⊗ c)) ◎ factor ⇔ factor ◎ ((a ⊕ b) ⊗ c)
factor⇔r : {t₁ t₂ t₃ t₄ t₅ t₆ : U}
{a : t₁ ⟷ t₂} {b : t₃ ⟷ t₄} {c : t₅ ⟷ t₆} →
factor ◎ ((a ⊕ b) ⊗ c) ⇔ ((a ⊗ c) ⊕ (b ⊗ c)) ◎ factor
factorl⇔l : {t₁ t₂ t₃ t₄ t₅ t₆ : U}
{a : t₁ ⟷ t₂} {b : t₃ ⟷ t₄} {c : t₅ ⟷ t₆} →
((a ⊗ b) ⊕ (a ⊗ c)) ◎ factorl ⇔ factorl ◎ (a ⊗ (b ⊕ c))
factorl⇔r : {t₁ t₂ t₃ t₄ t₅ t₆ : U}
{a : t₁ ⟷ t₂} {b : t₃ ⟷ t₄} {c : t₅ ⟷ t₆} →
factorl ◎ (a ⊗ (b ⊕ c)) ⇔ ((a ⊗ b) ⊕ (a ⊗ c)) ◎ factorl
idl◎l : {t₁ t₂ : U} {c : t₁ ⟷ t₂} → (id⟷ ◎ c) ⇔ c
idl◎r : {t₁ t₂ : U} {c : t₁ ⟷ t₂} → c ⇔ id⟷ ◎ c
idr◎l : {t₁ t₂ : U} {c : t₁ ⟷ t₂} → (c ◎ id⟷) ⇔ c
idr◎r : {t₁ t₂ : U} {c : t₁ ⟷ t₂} → c ⇔ (c ◎ id⟷)
linv◎l : {t₁ t₂ : U} {c : t₁ ⟷ t₂} → (c ◎ ! c) ⇔ id⟷
linv◎r : {t₁ t₂ : U} {c : t₁ ⟷ t₂} → id⟷ ⇔ (c ◎ ! c)
rinv◎l : {t₁ t₂ : U} {c : t₁ ⟷ t₂} → (! c ◎ c) ⇔ id⟷
rinv◎r : {t₁ t₂ : U} {c : t₁ ⟷ t₂} → id⟷ ⇔ (! c ◎ c)
unite₊l⇔l : {t₁ t₂ : U} {c₁ : ZERO ⟷ ZERO} {c₂ : t₁ ⟷ t₂} →
(unite₊l ◎ c₂) ⇔ ((c₁ ⊕ c₂) ◎ unite₊l)
unite₊l⇔r : {t₁ t₂ : U} {c₁ : ZERO ⟷ ZERO} {c₂ : t₁ ⟷ t₂} →
((c₁ ⊕ c₂) ◎ unite₊l) ⇔ (unite₊l ◎ c₂)
uniti₊l⇔l : {t₁ t₂ : U} {c₁ : ZERO ⟷ ZERO} {c₂ : t₁ ⟷ t₂} →
(uniti₊l ◎ (c₁ ⊕ c₂)) ⇔ (c₂ ◎ uniti₊l)
uniti₊l⇔r : {t₁ t₂ : U} {c₁ : ZERO ⟷ ZERO} {c₂ : t₁ ⟷ t₂} →
(c₂ ◎ uniti₊l) ⇔ (uniti₊l ◎ (c₁ ⊕ c₂))
unite₊r⇔l : {t₁ t₂ : U} {c₁ : ZERO ⟷ ZERO} {c₂ : t₁ ⟷ t₂} →
(unite₊r ◎ c₂) ⇔ ((c₂ ⊕ c₁) ◎ unite₊r)
unite₊r⇔r : {t₁ t₂ : U} {c₁ : ZERO ⟷ ZERO} {c₂ : t₁ ⟷ t₂} →
((c₂ ⊕ c₁) ◎ unite₊r) ⇔ (unite₊r ◎ c₂)
uniti₊r⇔l : {t₁ t₂ : U} {c₁ : ZERO ⟷ ZERO} {c₂ : t₁ ⟷ t₂} →
(uniti₊r ◎ (c₂ ⊕ c₁)) ⇔ (c₂ ◎ uniti₊r)
uniti₊r⇔r : {t₁ t₂ : U} {c₁ : ZERO ⟷ ZERO} {c₂ : t₁ ⟷ t₂} →
(c₂ ◎ uniti₊r) ⇔ (uniti₊r ◎ (c₂ ⊕ c₁))
swapl₊⇔ : {t₁ t₂ t₃ t₄ : U} {c₁ : t₁ ⟷ t₂} {c₂ : t₃ ⟷ t₄} →
(swap₊ ◎ (c₁ ⊕ c₂)) ⇔ ((c₂ ⊕ c₁) ◎ swap₊)
swapr₊⇔ : {t₁ t₂ t₃ t₄ : U} {c₁ : t₁ ⟷ t₂} {c₂ : t₃ ⟷ t₄} →
((c₂ ⊕ c₁) ◎ swap₊) ⇔ (swap₊ ◎ (c₁ ⊕ c₂))
unitel⋆⇔l : {t₁ t₂ : U} {c₁ : ONE ⟷ ONE} {c₂ : t₁ ⟷ t₂} →
(unite⋆l ◎ c₂) ⇔ ((c₁ ⊗ c₂) ◎ unite⋆l)
uniter⋆⇔l : {t₁ t₂ : U} {c₁ : ONE ⟷ ONE} {c₂ : t₁ ⟷ t₂} →
((c₁ ⊗ c₂) ◎ unite⋆l) ⇔ (unite⋆l ◎ c₂)
unitil⋆⇔l : {t₁ t₂ : U} {c₁ : ONE ⟷ ONE} {c₂ : t₁ ⟷ t₂} →
(uniti⋆l ◎ (c₁ ⊗ c₂)) ⇔ (c₂ ◎ uniti⋆l)
unitir⋆⇔l : {t₁ t₂ : U} {c₁ : ONE ⟷ ONE} {c₂ : t₁ ⟷ t₂} →
(c₂ ◎ uniti⋆l) ⇔ (uniti⋆l ◎ (c₁ ⊗ c₂))
unitel⋆⇔r : {t₁ t₂ : U} {c₁ : ONE ⟷ ONE} {c₂ : t₁ ⟷ t₂} →
(unite⋆r ◎ c₂) ⇔ ((c₂ ⊗ c₁) ◎ unite⋆r)
uniter⋆⇔r : {t₁ t₂ : U} {c₁ : ONE ⟷ ONE} {c₂ : t₁ ⟷ t₂} →
((c₂ ⊗ c₁) ◎ unite⋆r) ⇔ (unite⋆r ◎ c₂)
unitil⋆⇔r : {t₁ t₂ : U} {c₁ : ONE ⟷ ONE} {c₂ : t₁ ⟷ t₂} →
(uniti⋆r ◎ (c₂ ⊗ c₁)) ⇔ (c₂ ◎ uniti⋆r)
unitir⋆⇔r : {t₁ t₂ : U} {c₁ : ONE ⟷ ONE} {c₂ : t₁ ⟷ t₂} →
(c₂ ◎ uniti⋆r) ⇔ (uniti⋆r ◎ (c₂ ⊗ c₁))
swapl⋆⇔ : {t₁ t₂ t₃ t₄ : U} {c₁ : t₁ ⟷ t₂} {c₂ : t₃ ⟷ t₄} →
(swap⋆ ◎ (c₁ ⊗ c₂)) ⇔ ((c₂ ⊗ c₁) ◎ swap⋆)
swapr⋆⇔ : {t₁ t₂ t₃ t₄ : U} {c₁ : t₁ ⟷ t₂} {c₂ : t₃ ⟷ t₄} →
((c₂ ⊗ c₁) ◎ swap⋆) ⇔ (swap⋆ ◎ (c₁ ⊗ c₂))
id⇔ : {t₁ t₂ : U} {c : t₁ ⟷ t₂} → c ⇔ c
trans⇔ : {t₁ t₂ : U} {c₁ c₂ c₃ : t₁ ⟷ t₂} →
(c₁ ⇔ c₂) → (c₂ ⇔ c₃) → (c₁ ⇔ c₃)
_⊡_ : {t₁ t₂ t₃ : U}
{c₁ : t₁ ⟷ t₂} {c₂ : t₂ ⟷ t₃} {c₃ : t₁ ⟷ t₂} {c₄ : t₂ ⟷ t₃} →
(c₁ ⇔ c₃) → (c₂ ⇔ c₄) → (c₁ ◎ c₂) ⇔ (c₃ ◎ c₄)
resp⊕⇔ : {t₁ t₂ t₃ t₄ : U}
{c₁ : t₁ ⟷ t₂} {c₂ : t₃ ⟷ t₄} {c₃ : t₁ ⟷ t₂} {c₄ : t₃ ⟷ t₄} →
(c₁ ⇔ c₃) → (c₂ ⇔ c₄) → (c₁ ⊕ c₂) ⇔ (c₃ ⊕ c₄)
resp⊗⇔ : {t₁ t₂ t₃ t₄ : U}
{c₁ : t₁ ⟷ t₂} {c₂ : t₃ ⟷ t₄} {c₃ : t₁ ⟷ t₂} {c₄ : t₃ ⟷ t₄} →
(c₁ ⇔ c₃) → (c₂ ⇔ c₄) → (c₁ ⊗ c₂) ⇔ (c₃ ⊗ c₄)
-- below are the combinators added for the RigCategory structure
id⟷⊕id⟷⇔ : {t₁ t₂ : U} → (id⟷ {t₁} ⊕ id⟷ {t₂}) ⇔ id⟷
split⊕-id⟷ : {t₁ t₂ : U} → (id⟷ {PLUS t₁ t₂}) ⇔ (id⟷ ⊕ id⟷)
hom⊕◎⇔ : {t₁ t₂ t₃ t₄ t₅ t₆ : U} {c₁ : t₅ ⟷ t₁} {c₂ : t₆ ⟷ t₂}
{c₃ : t₁ ⟷ t₃} {c₄ : t₂ ⟷ t₄} →
((c₁ ◎ c₃) ⊕ (c₂ ◎ c₄)) ⇔ ((c₁ ⊕ c₂) ◎ (c₃ ⊕ c₄))
hom◎⊕⇔ : {t₁ t₂ t₃ t₄ t₅ t₆ : U} {c₁ : t₅ ⟷ t₁} {c₂ : t₆ ⟷ t₂}
{c₃ : t₁ ⟷ t₃} {c₄ : t₂ ⟷ t₄} →
((c₁ ⊕ c₂) ◎ (c₃ ⊕ c₄)) ⇔ ((c₁ ◎ c₃) ⊕ (c₂ ◎ c₄))
id⟷⊗id⟷⇔ : {t₁ t₂ : U} → (id⟷ {t₁} ⊗ id⟷ {t₂}) ⇔ id⟷
split⊗-id⟷ : {t₁ t₂ : U} → (id⟷ {TIMES t₁ t₂}) ⇔ (id⟷ ⊗ id⟷)
hom⊗◎⇔ : {t₁ t₂ t₃ t₄ t₅ t₆ : U} {c₁ : t₅ ⟷ t₁} {c₂ : t₆ ⟷ t₂}
{c₃ : t₁ ⟷ t₃} {c₄ : t₂ ⟷ t₄} →
((c₁ ◎ c₃) ⊗ (c₂ ◎ c₄)) ⇔ ((c₁ ⊗ c₂) ◎ (c₃ ⊗ c₄))
hom◎⊗⇔ : {t₁ t₂ t₃ t₄ t₅ t₆ : U} {c₁ : t₅ ⟷ t₁} {c₂ : t₆ ⟷ t₂}
{c₃ : t₁ ⟷ t₃} {c₄ : t₂ ⟷ t₄} →
((c₁ ⊗ c₂) ◎ (c₃ ⊗ c₄)) ⇔ ((c₁ ◎ c₃) ⊗ (c₂ ◎ c₄))
-- associativity triangle
triangle⊕l : {t₁ t₂ : U} →
(unite₊r {t₁} ⊕ id⟷ {t₂}) ⇔ assocr₊ ◎ (id⟷ ⊕ unite₊l)
triangle⊕r : {t₁ t₂ : U} →
assocr₊ ◎ (id⟷ {t₁} ⊕ unite₊l {t₂}) ⇔ (unite₊r ⊕ id⟷)
triangle⊗l : {t₁ t₂ : U} →
((unite⋆r {t₁}) ⊗ id⟷ {t₂}) ⇔ assocr⋆ ◎ (id⟷ ⊗ unite⋆l)
triangle⊗r : {t₁ t₂ : U} →
(assocr⋆ ◎ (id⟷ {t₁} ⊗ unite⋆l {t₂})) ⇔ (unite⋆r ⊗ id⟷)
pentagon⊕l : {t₁ t₂ t₃ t₄ : U} →
assocr₊ ◎ (assocr₊ {t₁} {t₂} {PLUS t₃ t₄}) ⇔
((assocr₊ ⊕ id⟷) ◎ assocr₊) ◎ (id⟷ ⊕ assocr₊)
pentagon⊕r : {t₁ t₂ t₃ t₄ : U} →
((assocr₊ {t₁} {t₂} {t₃} ⊕ id⟷ {t₄}) ◎ assocr₊) ◎ (id⟷ ⊕ assocr₊) ⇔
assocr₊ ◎ assocr₊
pentagon⊗l : {t₁ t₂ t₃ t₄ : U} →
assocr⋆ ◎ (assocr⋆ {t₁} {t₂} {TIMES t₃ t₄}) ⇔
((assocr⋆ ⊗ id⟷) ◎ assocr⋆) ◎ (id⟷ ⊗ assocr⋆)
pentagon⊗r : {t₁ t₂ t₃ t₄ : U} →
((assocr⋆ {t₁} {t₂} {t₃} ⊗ id⟷ {t₄}) ◎ assocr⋆) ◎ (id⟷ ⊗ assocr⋆) ⇔
assocr⋆ ◎ assocr⋆
-- from the braiding
-- unit coherence
unite₊l-coh-l : {t₁ : U} → unite₊l {t₁} ⇔ swap₊ ◎ unite₊r
unite₊l-coh-r : {t₁ : U} → swap₊ ◎ unite₊r ⇔ unite₊l {t₁}
unite⋆l-coh-l : {t₁ : U} → unite⋆l {t₁} ⇔ swap⋆ ◎ unite⋆r
unite⋆l-coh-r : {t₁ : U} → swap⋆ ◎ unite⋆r ⇔ unite⋆l {t₁}
hexagonr⊕l : {t₁ t₂ t₃ : U} →
(assocr₊ ◎ swap₊) ◎ assocr₊ {t₁} {t₂} {t₃} ⇔
((swap₊ ⊕ id⟷) ◎ assocr₊) ◎ (id⟷ ⊕ swap₊)
hexagonr⊕r : {t₁ t₂ t₃ : U} →
((swap₊ ⊕ id⟷) ◎ assocr₊) ◎ (id⟷ ⊕ swap₊) ⇔
(assocr₊ ◎ swap₊) ◎ assocr₊ {t₁} {t₂} {t₃}
hexagonl⊕l : {t₁ t₂ t₃ : U} →
(assocl₊ ◎ swap₊) ◎ assocl₊ {t₁} {t₂} {t₃} ⇔
((id⟷ ⊕ swap₊) ◎ assocl₊) ◎ (swap₊ ⊕ id⟷)
hexagonl⊕r : {t₁ t₂ t₃ : U} →
((id⟷ ⊕ swap₊) ◎ assocl₊) ◎ (swap₊ ⊕ id⟷) ⇔
(assocl₊ ◎ swap₊) ◎ assocl₊ {t₁} {t₂} {t₃}
hexagonr⊗l : {t₁ t₂ t₃ : U} →
(assocr⋆ ◎ swap⋆) ◎ assocr⋆ {t₁} {t₂} {t₃} ⇔
((swap⋆ ⊗ id⟷) ◎ assocr⋆) ◎ (id⟷ ⊗ swap⋆)
hexagonr⊗r : {t₁ t₂ t₃ : U} →
((swap⋆ ⊗ id⟷) ◎ assocr⋆) ◎ (id⟷ ⊗ swap⋆) ⇔
(assocr⋆ ◎ swap⋆) ◎ assocr⋆ {t₁} {t₂} {t₃}
hexagonl⊗l : {t₁ t₂ t₃ : U} →
(assocl⋆ ◎ swap⋆) ◎ assocl⋆ {t₁} {t₂} {t₃} ⇔
((id⟷ ⊗ swap⋆) ◎ assocl⋆) ◎ (swap⋆ ⊗ id⟷)
hexagonl⊗r : {t₁ t₂ t₃ : U} →
((id⟷ ⊗ swap⋆) ◎ assocl⋆) ◎ (swap⋆ ⊗ id⟷) ⇔
(assocl⋆ ◎ swap⋆) ◎ assocl⋆ {t₁} {t₂} {t₃}
absorbl⇔l : {t₁ t₂ : U} {c₁ : t₁ ⟷ t₂} →
(c₁ ⊗ id⟷ {ZERO}) ◎ absorbl ⇔ absorbl ◎ id⟷ {ZERO}
absorbl⇔r : {t₁ t₂ : U} {c₁ : t₁ ⟷ t₂} →
absorbl ◎ id⟷ {ZERO} ⇔ (c₁ ⊗ id⟷ {ZERO}) ◎ absorbl
absorbr⇔l : {t₁ t₂ : U} {c₁ : t₁ ⟷ t₂} →
(id⟷ {ZERO} ⊗ c₁) ◎ absorbr ⇔ absorbr ◎ id⟷ {ZERO}
absorbr⇔r : {t₁ t₂ : U} {c₁ : t₁ ⟷ t₂} →
absorbr ◎ id⟷ {ZERO} ⇔ (id⟷ {ZERO} ⊗ c₁) ◎ absorbr
factorzl⇔l : {t₁ t₂ : U} {c₁ : t₁ ⟷ t₂} →
id⟷ ◎ factorzl ⇔ factorzl ◎ (id⟷ ⊗ c₁)
factorzl⇔r : {t₁ t₂ : U} {c₁ : t₁ ⟷ t₂} →
factorzl ◎ (id⟷ {ZERO} ⊗ c₁) ⇔ id⟷ {ZERO} ◎ factorzl
factorzr⇔l : {t₁ t₂ : U} {c₁ : t₁ ⟷ t₂} →
id⟷ ◎ factorzr ⇔ factorzr ◎ (c₁ ⊗ id⟷)
factorzr⇔r : {t₁ t₂ : U} {c₁ : t₁ ⟷ t₂} →
factorzr ◎ (c₁ ⊗ id⟷) ⇔ id⟷ ◎ factorzr
-- from the coherence conditions of RigCategory
swap₊distl⇔l : {t₁ t₂ t₃ : U} →
(id⟷ {t₁} ⊗ swap₊ {t₂} {t₃}) ◎ distl ⇔ distl ◎ swap₊
swap₊distl⇔r : {t₁ t₂ t₃ : U} →
distl ◎ swap₊ ⇔ (id⟷ {t₁} ⊗ swap₊ {t₂} {t₃}) ◎ distl
dist-swap⋆⇔l : {t₁ t₂ t₃ : U} →
dist {t₁} {t₂} {t₃} ◎ (swap⋆ ⊕ swap⋆) ⇔ swap⋆ ◎ distl
dist-swap⋆⇔r : {t₁ t₂ t₃ : U} →
swap⋆ ◎ distl {t₁} {t₂} {t₃} ⇔ dist ◎ (swap⋆ ⊕ swap⋆)
assocl₊-dist-dist⇔l : {t₁ t₂ t₃ t₄ : U} →
((assocl₊ {t₁} {t₂} {t₃} ⊗ id⟷ {t₄}) ◎ dist) ◎ (dist ⊕ id⟷) ⇔
(dist ◎ (id⟷ ⊕ dist)) ◎ assocl₊
assocl₊-dist-dist⇔r : {t₁ t₂ t₃ t₄ : U} →
(dist {t₁} ◎ (id⟷ ⊕ dist {t₂} {t₃} {t₄})) ◎ assocl₊ ⇔
((assocl₊ ⊗ id⟷) ◎ dist) ◎ (dist ⊕ id⟷)
assocl⋆-distl⇔l : {t₁ t₂ t₃ t₄ : U} →
assocl⋆ {t₁} {t₂} ◎ distl {TIMES t₁ t₂} {t₃} {t₄} ⇔
((id⟷ ⊗ distl) ◎ distl) ◎ (assocl⋆ ⊕ assocl⋆)
assocl⋆-distl⇔r : {t₁ t₂ t₃ t₄ : U} →
((id⟷ ⊗ distl) ◎ distl) ◎ (assocl⋆ ⊕ assocl⋆) ⇔
assocl⋆ {t₁} {t₂} ◎ distl {TIMES t₁ t₂} {t₃} {t₄}
absorbr0-absorbl0⇔ : absorbr {ZERO} ⇔ absorbl {ZERO}
absorbl0-absorbr0⇔ : absorbl {ZERO} ⇔ absorbr {ZERO}
absorbr⇔distl-absorb-unite : {t₁ t₂ : U} →
absorbr ⇔ (distl {t₂ = t₁} {t₂} ◎ (absorbr ⊕ absorbr)) ◎ unite₊l
distl-absorb-unite⇔absorbr : {t₁ t₂ : U} →
(distl {t₂ = t₁} {t₂} ◎ (absorbr ⊕ absorbr)) ◎ unite₊l ⇔ absorbr
unite⋆r0-absorbr1⇔ : unite⋆r ⇔ absorbr
absorbr1-unite⋆r-⇔ : absorbr ⇔ unite⋆r
absorbl≡swap⋆◎absorbr : {t₁ : U} → absorbl {t₁} ⇔ swap⋆ ◎ absorbr
swap⋆◎absorbr≡absorbl : {t₁ : U} → swap⋆ ◎ absorbr ⇔ absorbl {t₁}
absorbr⇔[assocl⋆◎[absorbr⊗id⟷]]◎absorbr : {t₁ t₂ : U} →
absorbr ⇔ (assocl⋆ {ZERO} {t₁} {t₂} ◎ (absorbr ⊗ id⟷)) ◎ absorbr
[assocl⋆◎[absorbr⊗id⟷]]◎absorbr⇔absorbr : {t₁ t₂ : U} →
(assocl⋆ {ZERO} {t₁} {t₂} ◎ (absorbr ⊗ id⟷)) ◎ absorbr ⇔ absorbr
[id⟷⊗absorbr]◎absorbl⇔assocl⋆◎[absorbl⊗id⟷]◎absorbr : {t₁ t₂ : U} →
(id⟷ ⊗ absorbr {t₂}) ◎ absorbl {t₁} ⇔
(assocl⋆ ◎ (absorbl ⊗ id⟷)) ◎ absorbr
assocl⋆◎[absorbl⊗id⟷]◎absorbr⇔[id⟷⊗absorbr]◎absorbl : {t₁ t₂ : U} →
(assocl⋆ ◎ (absorbl ⊗ id⟷)) ◎ absorbr ⇔
(id⟷ ⊗ absorbr {t₂}) ◎ absorbl {t₁}
elim⊥-A[0⊕B]⇔l : {t₁ t₂ : U} →
(id⟷ {t₁} ⊗ unite₊l {t₂}) ⇔
(distl ◎ (absorbl ⊕ id⟷)) ◎ unite₊l
elim⊥-A[0⊕B]⇔r : {t₁ t₂ : U} →
(distl ◎ (absorbl ⊕ id⟷)) ◎ unite₊l ⇔ (id⟷ {t₁} ⊗ unite₊l {t₂})
elim⊥-1[A⊕B]⇔l : {t₁ t₂ : U} →
unite⋆l ⇔
distl ◎ (unite⋆l {t₁} ⊕ unite⋆l {t₂})
elim⊥-1[A⊕B]⇔r : {t₁ t₂ : U} →
distl ◎ (unite⋆l {t₁} ⊕ unite⋆l {t₂}) ⇔ unite⋆l
fully-distribute⇔l : {t₁ t₂ t₃ t₄ : U} →
(distl ◎ (dist {t₁} {t₂} {t₃} ⊕ dist {t₁} {t₂} {t₄})) ◎ assocl₊ ⇔
((((dist ◎ (distl ⊕ distl)) ◎ assocl₊) ◎ (assocr₊ ⊕ id⟷)) ◎
((id⟷ ⊕ swap₊) ⊕ id⟷)) ◎ (assocl₊ ⊕ id⟷)
fully-distribute⇔r : {t₁ t₂ t₃ t₄ : U} →
((((dist ◎ (distl ⊕ distl)) ◎ assocl₊) ◎ (assocr₊ ⊕ id⟷)) ◎
((id⟷ ⊕ swap₊) ⊕ id⟷)) ◎ (assocl₊ ⊕ id⟷) ⇔
(distl ◎ (dist {t₁} {t₂} {t₃} ⊕ dist {t₁} {t₂} {t₄})) ◎ assocl₊
-- At the next level we have a trivial equivalence that equates all
-- 2-morphisms of the same type.
triv≡ : {t₁ t₂ : U} {f g : t₁ ⟷ t₂} → (α β : f ⇔ g) → Set
triv≡ _ _ = ⊤
triv≡Equiv : {t₁ t₂ : U} {f₁ f₂ : t₁ ⟷ t₂} →
IsEquivalence (triv≡ {t₁} {t₂} {f₁} {f₂})
triv≡Equiv = record
{ refl = tt
; sym = λ _ → tt
; trans = λ _ _ → tt
}
------------------------------------------------------------------------------
-- Inverses for 2paths
2! : {t₁ t₂ : U} {c₁ c₂ : t₁ ⟷ t₂} → (c₁ ⇔ c₂) → (c₂ ⇔ c₁)
2! assoc◎l = assoc◎r
2! assoc◎r = assoc◎l
2! assocl⊕l = assocl⊕r
2! assocl⊕r = assocl⊕l
2! assocl⊗l = assocl⊗r
2! assocl⊗r = assocl⊗l
2! assocr⊕r = assocr⊕l
2! assocr⊕l = assocr⊕r
2! assocr⊗r = assocr⊗l
2! assocr⊗l = assocr⊗r
2! dist⇔l = dist⇔r
2! dist⇔r = dist⇔l
2! distl⇔l = distl⇔r
2! distl⇔r = distl⇔l
2! factor⇔l = factor⇔r
2! factor⇔r = factor⇔l
2! factorl⇔l = factorl⇔r
2! factorl⇔r = factorl⇔l
2! idl◎l = idl◎r
2! idl◎r = idl◎l
2! idr◎l = idr◎r
2! idr◎r = idr◎l
2! linv◎l = linv◎r
2! linv◎r = linv◎l
2! rinv◎l = rinv◎r
2! rinv◎r = rinv◎l
2! unite₊l⇔l = unite₊l⇔r
2! unite₊l⇔r = unite₊l⇔l
2! uniti₊l⇔l = uniti₊l⇔r
2! uniti₊l⇔r = uniti₊l⇔l
2! unite₊r⇔l = unite₊r⇔r
2! unite₊r⇔r = unite₊r⇔l
2! uniti₊r⇔l = uniti₊r⇔r
2! uniti₊r⇔r = uniti₊r⇔l
2! swapl₊⇔ = swapr₊⇔
2! swapr₊⇔ = swapl₊⇔
2! unitel⋆⇔l = uniter⋆⇔l
2! uniter⋆⇔l = unitel⋆⇔l
2! unitil⋆⇔l = unitir⋆⇔l
2! unitir⋆⇔l = unitil⋆⇔l
2! unitel⋆⇔r = uniter⋆⇔r
2! uniter⋆⇔r = unitel⋆⇔r
2! unitil⋆⇔r = unitir⋆⇔r
2! unitir⋆⇔r = unitil⋆⇔r
2! swapl⋆⇔ = swapr⋆⇔
2! swapr⋆⇔ = swapl⋆⇔
2! id⇔ = id⇔
2! (α ⊡ β) = (2! α) ⊡ (2! β)
2! (trans⇔ α β) = trans⇔ (2! β) (2! α)
2! (resp⊕⇔ α β) = resp⊕⇔ (2! α) (2! β)
2! (resp⊗⇔ α β) = resp⊗⇔ (2! α) (2! β)
2! id⟷⊕id⟷⇔ = split⊕-id⟷
2! split⊕-id⟷ = id⟷⊕id⟷⇔
2! hom⊕◎⇔ = hom◎⊕⇔
2! hom◎⊕⇔ = hom⊕◎⇔
2! id⟷⊗id⟷⇔ = split⊗-id⟷
2! split⊗-id⟷ = id⟷⊗id⟷⇔
2! hom⊗◎⇔ = hom◎⊗⇔
2! hom◎⊗⇔ = hom⊗◎⇔
2! triangle⊕l = triangle⊕r
2! triangle⊕r = triangle⊕l
2! triangle⊗l = triangle⊗r
2! triangle⊗r = triangle⊗l
2! pentagon⊕l = pentagon⊕r
2! pentagon⊕r = pentagon⊕l
2! pentagon⊗l = pentagon⊗r
2! pentagon⊗r = pentagon⊗l
2! unite₊l-coh-l = unite₊l-coh-r
2! unite₊l-coh-r = unite₊l-coh-l
2! unite⋆l-coh-l = unite⋆l-coh-r
2! unite⋆l-coh-r = unite⋆l-coh-l
2! hexagonr⊕l = hexagonr⊕r
2! hexagonr⊕r = hexagonr⊕l
2! hexagonl⊕l = hexagonl⊕r
2! hexagonl⊕r = hexagonl⊕l
2! hexagonr⊗l = hexagonr⊗r
2! hexagonr⊗r = hexagonr⊗l
2! hexagonl⊗l = hexagonl⊗r
2! hexagonl⊗r = hexagonl⊗l
2! absorbl⇔l = absorbl⇔r
2! absorbl⇔r = absorbl⇔l
2! absorbr⇔l = absorbr⇔r
2! absorbr⇔r = absorbr⇔l
2! factorzl⇔l = factorzl⇔r
2! factorzl⇔r = factorzl⇔l
2! factorzr⇔l = factorzr⇔r
2! factorzr⇔r = factorzr⇔l
2! swap₊distl⇔l = swap₊distl⇔r
2! swap₊distl⇔r = swap₊distl⇔l
2! dist-swap⋆⇔l = dist-swap⋆⇔r
2! dist-swap⋆⇔r = dist-swap⋆⇔l
2! assocl₊-dist-dist⇔l = assocl₊-dist-dist⇔r
2! assocl₊-dist-dist⇔r = assocl₊-dist-dist⇔l
2! assocl⋆-distl⇔l = assocl⋆-distl⇔r
2! assocl⋆-distl⇔r = assocl⋆-distl⇔l
2! absorbr0-absorbl0⇔ = absorbl0-absorbr0⇔
2! absorbl0-absorbr0⇔ = absorbr0-absorbl0⇔
2! absorbr⇔distl-absorb-unite = distl-absorb-unite⇔absorbr
2! distl-absorb-unite⇔absorbr = absorbr⇔distl-absorb-unite
2! unite⋆r0-absorbr1⇔ = absorbr1-unite⋆r-⇔
2! absorbr1-unite⋆r-⇔ = unite⋆r0-absorbr1⇔
2! absorbl≡swap⋆◎absorbr = swap⋆◎absorbr≡absorbl
2! swap⋆◎absorbr≡absorbl = absorbl≡swap⋆◎absorbr
2! absorbr⇔[assocl⋆◎[absorbr⊗id⟷]]◎absorbr =
[assocl⋆◎[absorbr⊗id⟷]]◎absorbr⇔absorbr
2! [assocl⋆◎[absorbr⊗id⟷]]◎absorbr⇔absorbr =
absorbr⇔[assocl⋆◎[absorbr⊗id⟷]]◎absorbr
2! [id⟷⊗absorbr]◎absorbl⇔assocl⋆◎[absorbl⊗id⟷]◎absorbr =
assocl⋆◎[absorbl⊗id⟷]◎absorbr⇔[id⟷⊗absorbr]◎absorbl
2! assocl⋆◎[absorbl⊗id⟷]◎absorbr⇔[id⟷⊗absorbr]◎absorbl =
[id⟷⊗absorbr]◎absorbl⇔assocl⋆◎[absorbl⊗id⟷]◎absorbr
2! elim⊥-A[0⊕B]⇔l = elim⊥-A[0⊕B]⇔r
2! elim⊥-A[0⊕B]⇔r = elim⊥-A[0⊕B]⇔l
2! elim⊥-1[A⊕B]⇔l = elim⊥-1[A⊕B]⇔r
2! elim⊥-1[A⊕B]⇔r = elim⊥-1[A⊕B]⇔l
2! fully-distribute⇔l = fully-distribute⇔r
2! fully-distribute⇔r = fully-distribute⇔l
2!! : {t₁ t₂ : U} {f g : t₁ ⟷ t₂} {α : f ⇔ g} → triv≡ (2! (2! α)) α
2!! = tt
-- This makes _⇔_ an equivalence relation...
⇔Equiv : {t₁ t₂ : U} → IsEquivalence (_⇔_ {t₁} {t₂})
⇔Equiv = record
{ refl = id⇔
; sym = 2!
; trans = trans⇔
}
------------------------------------------------------------------------------
-- Unit coherence has two versions, but one is derivable
-- from the other. As it turns out, one of our examples
-- needs the 'flipped' version.
unite₊r-coh-r : {t₁ : U} → swap₊ ◎ unite₊l ⇔ unite₊r {t₁}
unite₊r-coh-r =
trans⇔ (id⇔ ⊡ unite₊l-coh-l) (
trans⇔ assoc◎l ((
trans⇔ (linv◎l ⊡ id⇔) idl◎l ) ) )
------------------------------------------------------------------------------
-- It is often useful to have that reversing c twice is ⇔ c rather than ≡
-- Unfortunately, it needs a 'proof', which is quite dull, though
-- it does have 3 non-trivial cases.
!!⇔id : {t₁ t₂ : U} {c : t₁ ⟷ t₂} → (! (! c)) ⇔ c
!!⇔id {c = unite₊l} = id⇔
!!⇔id {c = uniti₊l} = id⇔
!!⇔id {c = unite₊r} = id⇔
!!⇔id {c = uniti₊r} = id⇔
!!⇔id {c = swap₊} = id⇔
!!⇔id {c = assocl₊} = id⇔
!!⇔id {c = assocr₊} = id⇔
!!⇔id {c = unite⋆l} = id⇔
!!⇔id {c = uniti⋆l} = id⇔
!!⇔id {c = unite⋆r} = id⇔
!!⇔id {c = uniti⋆r} = id⇔
!!⇔id {c = swap⋆} = id⇔
!!⇔id {c = assocl⋆} = id⇔
!!⇔id {c = assocr⋆} = id⇔
!!⇔id {c = absorbr} = id⇔
!!⇔id {c = absorbl} = id⇔
!!⇔id {c = factorzr} = id⇔
!!⇔id {c = factorzl} = id⇔
!!⇔id {c = dist} = id⇔
!!⇔id {c = factor} = id⇔
!!⇔id {c = distl} = id⇔
!!⇔id {c = factorl} = id⇔
!!⇔id {c = id⟷} = id⇔
!!⇔id {c = c ◎ c₁} = !!⇔id ⊡ !!⇔id
!!⇔id {c = c ⊕ c₁} = resp⊕⇔ !!⇔id !!⇔id
!!⇔id {c = c ⊗ c₁} = resp⊗⇔ !!⇔id !!⇔id
-------------
mutual
eval₁ : {t₁ t₂ : U} {c₁ c₂ : t₁ ⟷ t₂} (ce : c₁ ⇔ c₂) → (t₁ ⟷ t₂)
eval₁ (assoc◎l {c₁ = c₁} {c₂} {c₃}) = (c₁ ◎ c₂) ◎ c₃
eval₁ (assoc◎r {c₁ = c₁} {c₂} {c₃}) = c₁ ◎ (c₂ ◎ c₃)
eval₁ (assocl⊕l {c₁ = c₁} {c₂} {c₃}) = assocl₊ ◎ ((c₁ ⊕ c₂) ⊕ c₃)
eval₁ (assocl⊕r {c₁ = c₁} {c₂} {c₃}) = (c₁ ⊕ (c₂ ⊕ c₃)) ◎ assocl₊
eval₁ (assocl⊗l {c₁ = c₁} {c₂} {c₃}) = assocl⋆ ◎ ((c₁ ⊗ c₂) ⊗ c₃)
eval₁ (assocl⊗r {c₁ = c₁} {c₂} {c₃}) = (c₁ ⊗ (c₂ ⊗ c₃)) ◎ assocl⋆
eval₁ (assocr⊕r {c₁ = c₁} {c₂} {c₃}) = assocr₊ ◎ (c₁ ⊕ (c₂ ⊕ c₃))
eval₁ (assocr⊗l {c₁ = c₁} {c₂} {c₃}) = ((c₁ ⊗ c₂) ⊗ c₃) ◎ assocr⋆
eval₁ (assocr⊗r {c₁ = c₁} {c₂} {c₃}) = assocr⋆ ◎(c₁ ⊗ (c₂ ⊗ c₃))
eval₁ (assocr⊕l {c₁ = c₁} {c₂} {c₃}) = ((c₁ ⊕ c₂) ⊕ c₃) ◎ assocr₊
eval₁ (dist⇔l {a = c₁} {c₂} {c₃}) = dist ◎ ((c₁ ⊗ c₃) ⊕ (c₂ ⊗ c₃))
eval₁ (dist⇔r {a = c₁} {c₂} {c₃}) = ((c₁ ⊕ c₂) ⊗ c₃) ◎ dist
eval₁ (distl⇔l {a = c₁} {c₂} {c₃}) = distl ◎ ((c₁ ⊗ c₂) ⊕ (c₁ ⊗ c₃))
eval₁ (distl⇔r {a = c₁} {c₂} {c₃}) = (c₁ ⊗ (c₂ ⊕ c₃)) ◎ distl
eval₁ (factor⇔l {a = c₁} {c₂} {c₃}) = factor ◎ ((c₁ ⊕ c₂) ⊗ c₃)
eval₁ (factor⇔r {a = c₁} {c₂} {c₃}) = ((c₁ ⊗ c₃) ⊕ (c₂ ⊗ c₃)) ◎ factor
eval₁ (factorl⇔l {a = c₁} {c₂} {c₃}) = factorl ◎ (c₁ ⊗ (c₂ ⊕ c₃))
eval₁ (factorl⇔r {a = c₁} {c₂} {c₃}) = ((c₁ ⊗ c₂) ⊕ (c₁ ⊗ c₃)) ◎ factorl
eval₁ (idl◎l {c = c}) = c
eval₁ (idl◎r {c = c}) = id⟷ ◎ c
eval₁ (idr◎l {c = c}) = c
eval₁ (idr◎r {c = c}) = c ◎ id⟷
eval₁ (linv◎l {c = c}) = id⟷
eval₁ (linv◎r {c = c}) = c ◎ ! c
eval₁ (rinv◎l {c = c}) = id⟷
eval₁ (rinv◎r {c = c}) = ! c ◎ c
eval₁ (unite₊l⇔l {c₁ = c₁} {c₂}) = (c₁ ⊕ c₂) ◎ unite₊l
eval₁ (unite₊l⇔r {c₁ = c₁} {c₂}) = unite₊l ◎ c₂
eval₁ (uniti₊l⇔l {c₁ = c₁} {c₂}) = c₂ ◎ uniti₊l
eval₁ (uniti₊l⇔r {c₁ = c₁} {c₂}) = uniti₊l ◎ (c₁ ⊕ c₂)
eval₁ (unite₊r⇔l {c₁ = c₁} {c₂}) = (c₂ ⊕ c₁) ◎ unite₊r
eval₁ (unite₊r⇔r {c₁ = c₁} {c₂}) = unite₊r ◎ c₂
eval₁ (uniti₊r⇔l {c₁ = c₁} {c₂}) = c₂ ◎ uniti₊r
eval₁ (uniti₊r⇔r {c₁ = c₁} {c₂}) = uniti₊r ◎ (c₂ ⊕ c₁)
eval₁ (swapl₊⇔ {c₁ = c₁} {c₂}) = (c₂ ⊕ c₁) ◎ swap₊
eval₁ (swapr₊⇔ {c₁ = c₁} {c₂}) = swap₊ ◎ (c₁ ⊕ c₂)
eval₁ (unitel⋆⇔l {c₁ = c₁} {c₂}) = (c₁ ⊗ c₂) ◎ unite⋆l
eval₁ (uniter⋆⇔l {c₁ = c₁} {c₂}) = unite⋆l ◎ c₂
eval₁ (unitil⋆⇔l {c₁ = c₁} {c₂}) = c₂ ◎ uniti⋆l
eval₁ (unitir⋆⇔l {c₁ = c₁} {c₂}) = uniti⋆l ◎ (c₁ ⊗ c₂)
eval₁ (unitel⋆⇔r {c₁ = c₁} {c₂}) = (c₂ ⊗ c₁) ◎ unite⋆r
eval₁ (uniter⋆⇔r {c₁ = c₁} {c₂}) = unite⋆r ◎ c₂
eval₁ (unitil⋆⇔r {c₁ = c₁} {c₂}) = c₂ ◎ uniti⋆r
eval₁ (unitir⋆⇔r {c₁ = c₁} {c₂}) = uniti⋆r ◎ (c₂ ⊗ c₁)
eval₁ (swapl⋆⇔ {c₁ = c₁} {c₂}) = (c₂ ⊗ c₁) ◎ swap⋆
eval₁ (swapr⋆⇔ {c₁ = c₁} {c₂}) = swap⋆ ◎ (c₁ ⊗ c₂)
eval₁ (id⇔ {c = c}) = c
eval₁ (trans⇔ {t₁} {t₂} {c₁} {c₂} {c₃} ce ce₁) with eval₁ ce | exact ce
... | cc | refl = eval₁ {c₁ = cc} {c₃} ce₁
eval₁ (_⊡_ {c₁ = c₁} {c₂} {c₃} {c₄} ce₀ ce₁) =
let r₀ = eval₁ ce₀ in
let r₁ = eval₁ ce₁ in
r₀ ◎ r₁
eval₁ (resp⊕⇔ ce₀ ce₁) =
let r₀ = eval₁ ce₀ in
let r₁ = eval₁ ce₁ in
r₀ ⊕ r₁
eval₁ (resp⊗⇔ ce₀ ce₁) =
let r₀ = eval₁ ce₀ in
let r₁ = eval₁ ce₁ in
r₀ ⊗ r₁
eval₁ id⟷⊕id⟷⇔ = id⟷
eval₁ split⊕-id⟷ = id⟷ ⊕ id⟷
eval₁ (hom⊕◎⇔ {c₁ = c₁} {c₂} {c₃} {c₄}) = (c₁ ⊕ c₂) ◎ (c₃ ⊕ c₄)
eval₁ (hom◎⊕⇔ {c₁ = c₁} {c₂} {c₃} {c₄}) = (c₁ ◎ c₃) ⊕ (c₂ ◎ c₄)
eval₁ id⟷⊗id⟷⇔ = id⟷
eval₁ split⊗-id⟷ = id⟷ ⊗ id⟷
eval₁ (hom⊗◎⇔ {c₁ = c₁} {c₂} {c₃} {c₄}) = (c₁ ⊗ c₂) ◎ (c₃ ⊗ c₄)
eval₁ (hom◎⊗⇔ {c₁ = c₁} {c₂} {c₃} {c₄}) = (c₁ ◎ c₃) ⊗ (c₂ ◎ c₄)
eval₁ triangle⊕l = assocr₊ ◎ (id⟷ ⊕ unite₊l)
eval₁ triangle⊕r = unite₊r ⊕ id⟷
eval₁ triangle⊗l = assocr⋆ ◎ (id⟷ ⊗ unite⋆l)
eval₁ triangle⊗r = unite⋆r ⊗ id⟷
eval₁ pentagon⊕l = ((assocr₊ ⊕ id⟷) ◎ assocr₊) ◎ (id⟷ ⊕ assocr₊)
eval₁ pentagon⊕r = assocr₊ ◎ assocr₊
eval₁ pentagon⊗l = ((assocr⋆ ⊗ id⟷) ◎ assocr⋆) ◎ (id⟷ ⊗ assocr⋆)
eval₁ pentagon⊗r = assocr⋆ ◎ assocr⋆
eval₁ unite₊l-coh-l = swap₊ ◎ unite₊r
eval₁ unite₊l-coh-r = unite₊l
eval₁ unite⋆l-coh-l = swap⋆ ◎ unite⋆r
eval₁ unite⋆l-coh-r = unite⋆l
eval₁ hexagonr⊕l = ((swap₊ ⊕ id⟷) ◎ assocr₊) ◎ (id⟷ ⊕ swap₊)
eval₁ hexagonr⊕r = (assocr₊ ◎ swap₊) ◎ assocr₊
eval₁ hexagonl⊕l = ((id⟷ ⊕ swap₊) ◎ assocl₊) ◎ (swap₊ ⊕ id⟷)
eval₁ hexagonl⊕r = (assocl₊ ◎ swap₊) ◎ assocl₊
eval₁ hexagonr⊗l = ((swap⋆ ⊗ id⟷) ◎ assocr⋆) ◎ (id⟷ ⊗ swap⋆)
eval₁ hexagonr⊗r = (assocr⋆ ◎ swap⋆) ◎ assocr⋆
eval₁ hexagonl⊗l = ((id⟷ ⊗ swap⋆) ◎ assocl⋆) ◎ (swap⋆ ⊗ id⟷)
eval₁ hexagonl⊗r = (assocl⋆ ◎ swap⋆) ◎ assocl⋆
eval₁ absorbl⇔l = absorbl ◎ id⟷
eval₁ (absorbl⇔r {c₁ = c₁}) = (c₁ ⊗ id⟷) ◎ absorbl
eval₁ absorbr⇔l = absorbr ◎ id⟷
eval₁ (absorbr⇔r {c₁ = c₁}) = (id⟷ ⊗ c₁) ◎ absorbr
eval₁ (factorzl⇔l {c₁ = c₁}) = factorzl ◎ (id⟷ ⊗ c₁)
eval₁ (factorzl⇔r {c₁ = c₁}) = id⟷ ◎ factorzl
eval₁ (factorzr⇔l {c₁ = c₁}) = factorzr ◎ (c₁ ⊗ id⟷)
eval₁ (factorzr⇔r {c₁ = c₁}) = id⟷ ◎ factorzr
eval₁ swap₊distl⇔l = distl ◎ swap₊
eval₁ swap₊distl⇔r = (id⟷ ⊗ swap₊) ◎ distl
eval₁ dist-swap⋆⇔l = swap⋆ ◎ distl
eval₁ dist-swap⋆⇔r = dist ◎ (swap⋆ ⊕ swap⋆)
eval₁ assocl₊-dist-dist⇔l = (dist ◎ (id⟷ ⊕ dist)) ◎ assocl₊
eval₁ assocl₊-dist-dist⇔r = ((assocl₊ ⊗ id⟷) ◎ dist) ◎ (dist ⊕ id⟷)
eval₁ assocl⋆-distl⇔l = ((id⟷ ⊗ distl) ◎ distl) ◎ (assocl⋆ ⊕ assocl⋆)
eval₁ assocl⋆-distl⇔r = assocl⋆ ◎ distl
eval₁ absorbr0-absorbl0⇔ = absorbl
eval₁ absorbl0-absorbr0⇔ = absorbr
eval₁ absorbr⇔distl-absorb-unite = (distl ◎ (absorbr ⊕ absorbr)) ◎ unite₊l
eval₁ distl-absorb-unite⇔absorbr = absorbr
eval₁ unite⋆r0-absorbr1⇔ = absorbr
eval₁ absorbr1-unite⋆r-⇔ = unite⋆r
eval₁ absorbl≡swap⋆◎absorbr = swap⋆ ◎ absorbr
eval₁ swap⋆◎absorbr≡absorbl = absorbl
eval₁ absorbr⇔[assocl⋆◎[absorbr⊗id⟷]]◎absorbr = (assocl⋆ ◎ (absorbr ⊗ id⟷)) ◎ absorbr
eval₁ [assocl⋆◎[absorbr⊗id⟷]]◎absorbr⇔absorbr = absorbr
eval₁ [id⟷⊗absorbr]◎absorbl⇔assocl⋆◎[absorbl⊗id⟷]◎absorbr = (assocl⋆ ◎ (absorbl ⊗ id⟷)) ◎ absorbr
eval₁ assocl⋆◎[absorbl⊗id⟷]◎absorbr⇔[id⟷⊗absorbr]◎absorbl = (id⟷ ⊗ absorbr) ◎ absorbl
eval₁ elim⊥-A[0⊕B]⇔l = (distl ◎ (absorbl ⊕ id⟷)) ◎ unite₊l
eval₁ elim⊥-A[0⊕B]⇔r = id⟷ ⊗ unite₊l
eval₁ elim⊥-1[A⊕B]⇔l = distl ◎ (unite⋆l ⊕ unite⋆l)
eval₁ elim⊥-1[A⊕B]⇔r = unite⋆l
eval₁ fully-distribute⇔l = ((((dist ◎ (distl ⊕ distl)) ◎ assocl₊) ◎ (assocr₊ ⊕ id⟷)) ◎
((id⟷ ⊕ swap₊) ⊕ id⟷))
◎ (assocl₊ ⊕ id⟷)
eval₁ fully-distribute⇔r = (distl ◎ (dist ⊕ dist)) ◎ assocl₊
exact : {t₁ t₂ : U} {c₁ c₂ : t₁ ⟷ t₂} (ce : c₁ ⇔ c₂) → eval₁ ce ≡ c₂
exact assoc◎l = refl
exact assoc◎r = refl
exact assocl⊕l = refl
exact assocl⊕r = refl
exact assocl⊗l = refl
exact assocl⊗r = refl
exact assocr⊕r = refl
exact assocr⊗l = refl
exact assocr⊗r = refl
exact assocr⊕l = refl
exact dist⇔l = refl
exact dist⇔r = refl
exact distl⇔l = refl
exact distl⇔r = refl
exact factor⇔l = refl
exact factor⇔r = refl
exact factorl⇔l = refl
exact factorl⇔r = refl
exact idl◎l = refl
exact idl◎r = refl
exact idr◎l = refl
exact idr◎r = refl
exact linv◎l = refl
exact linv◎r = refl
exact rinv◎l = refl
exact rinv◎r = refl
exact unite₊l⇔l = refl
exact unite₊l⇔r = refl
exact uniti₊l⇔l = refl
exact uniti₊l⇔r = refl
exact unite₊r⇔l = refl
exact unite₊r⇔r = refl
exact uniti₊r⇔l = refl
exact uniti₊r⇔r = refl
exact swapl₊⇔ = refl
exact swapr₊⇔ = refl
exact unitel⋆⇔l = refl
exact uniter⋆⇔l = refl
exact unitil⋆⇔l = refl
exact unitir⋆⇔l = refl
exact unitel⋆⇔r = refl
exact uniter⋆⇔r = refl
exact unitil⋆⇔r = refl
exact unitir⋆⇔r = refl
exact swapl⋆⇔ = refl
exact swapr⋆⇔ = refl
exact id⇔ = refl
exact (trans⇔ ce ce₁) rewrite exact ce | exact ce₁ = refl
exact (ce ⊡ ce₁) rewrite exact ce | exact ce₁ = refl
exact (resp⊕⇔ ce ce₁) rewrite exact ce | exact ce₁ = refl
exact (resp⊗⇔ ce ce₁) rewrite exact ce | exact ce₁ = refl
exact id⟷⊕id⟷⇔ = refl
exact split⊕-id⟷ = refl
exact hom⊕◎⇔ = refl
exact hom◎⊕⇔ = refl
exact id⟷⊗id⟷⇔ = refl
exact split⊗-id⟷ = refl
exact hom⊗◎⇔ = refl
exact hom◎⊗⇔ = refl
exact triangle⊕l = refl
exact triangle⊕r = refl
exact triangle⊗l = refl
exact triangle⊗r = refl
exact pentagon⊕l = refl
exact pentagon⊕r = refl
exact pentagon⊗l = refl
exact pentagon⊗r = refl
exact unite₊l-coh-l = refl
exact unite₊l-coh-r = refl
exact unite⋆l-coh-l = refl
exact unite⋆l-coh-r = refl
exact hexagonr⊕l = refl
exact hexagonr⊕r = refl
exact hexagonl⊕l = refl
exact hexagonl⊕r = refl
exact hexagonr⊗l = refl
exact hexagonr⊗r = refl
exact hexagonl⊗l = refl
exact hexagonl⊗r = refl
exact absorbl⇔l = refl
exact absorbl⇔r = refl
exact absorbr⇔l = refl
exact absorbr⇔r = refl
exact factorzl⇔l = refl
exact factorzl⇔r = refl
exact factorzr⇔l = refl
exact factorzr⇔r = refl
exact swap₊distl⇔l = refl
exact swap₊distl⇔r = refl
exact dist-swap⋆⇔l = refl
exact dist-swap⋆⇔r = refl
exact assocl₊-dist-dist⇔l = refl
exact assocl₊-dist-dist⇔r = refl
exact assocl⋆-distl⇔l = refl
exact assocl⋆-distl⇔r = refl
exact absorbr0-absorbl0⇔ = refl
exact absorbl0-absorbr0⇔ = refl
exact absorbr⇔distl-absorb-unite = refl
exact distl-absorb-unite⇔absorbr = refl
exact unite⋆r0-absorbr1⇔ = refl
exact absorbr1-unite⋆r-⇔ = refl
exact absorbl≡swap⋆◎absorbr = refl
exact swap⋆◎absorbr≡absorbl = refl
exact absorbr⇔[assocl⋆◎[absorbr⊗id⟷]]◎absorbr = refl
exact [assocl⋆◎[absorbr⊗id⟷]]◎absorbr⇔absorbr = refl
exact [id⟷⊗absorbr]◎absorbl⇔assocl⋆◎[absorbl⊗id⟷]◎absorbr = refl
exact assocl⋆◎[absorbl⊗id⟷]◎absorbr⇔[id⟷⊗absorbr]◎absorbl = refl
exact elim⊥-A[0⊕B]⇔l = refl
exact elim⊥-A[0⊕B]⇔r = refl
exact elim⊥-1[A⊕B]⇔l = refl
exact elim⊥-1[A⊕B]⇔r = refl
exact fully-distribute⇔l = refl
exact fully-distribute⇔r = refl
|
### Prelude
```python
import numpy as np
from sympy import *
import scipy.integrate as integrate
import math
import matplotlib.pyplot as plt
%matplotlib inline
def gaussian(x,mu,sigma):
return (1/(sigma * sqrt(2*pi)) * exp(-(mu-x)**2 / (2*sigma**2)))
```
### Input
```python
hours_estimated = 6
plus_minus = 3
```
### Computation
```python
x = np.linspace(hours_estimated - 4*plus_minus, hours_estimated + 4*plus_minus,100)
y = np.array([gaussian(v,hours_estimated, plus_minus) for v in x],dtype='float')
plt.grid(True)
plt.title('Expected distribution (normal)')
plt.xlabel('effective time (hours)')
plt.ylabel('probability distribution')
plt.plot(x,y,color='gray')
plt.fill_between(x,y,0,color='#c0f0c0')
plt.show()
```
```python
x = np.linspace(hours_estimated - 4*plus_minus, hours_estimated + 4*plus_minus,100)
y = np.array([gaussian(v,hours_estimated, plus_minus) for v in x],dtype='float')
plt.grid(True)
plt.title('The impossible cases')
plt.xlabel('effective time (hours)')
plt.ylabel('probability distribution')
plt.plot(x,y,color='gray')
plt.fill_between(x,y,where=x>0,color='#c0f0c0')
plt.fill_between(x,y,where=x<=0, color='#ff0000')
plt.show()
```
### summing up the error
```python
red_surface = integrate.quad(lambda x: gaussian(x, hours_estimated, plus_minus), -(math.inf), 0)
red_surface[0]
```
0.022750131948179212
```python
corrected_mean = 0
for x_val in x:
integral_so_far = integrate.quad(lambda x: gaussian(x, hours_estimated, plus_minus), hours_estimated, x_val)
if(integral_so_far[0] > red_surface[0]):
corrected_mean = x_val
break
def belowzero(x):
if (x<=0):
return true
else:
return false
def zerotomean(x):
if (x>0 and x<= hours_estimated):
return true
else:
return false
def meantocorrectedmean(x):
if (x>hours_estimated and x<= corrected_mean):
return true
else:
return false
def abovecorrected(x):
if (x>corrected_mean):
return true
else:
return false
```
```python
x = np.linspace(hours_estimated - 4*plus_minus, hours_estimated + 4*plus_minus,100)
y = np.array([gaussian(v,hours_estimated, plus_minus) for v in x],dtype='float')
plt.grid(True)
plt.title('Correction on mean')
plt.xlabel('effective time (hours)')
plt.ylabel('probability distribution')
plt.plot(x,y,color='gray')
plt.fill_between(x,y,where=(x<=0), color='#ff0000')
plt.fill_between(x,y,where=((x>0) & (x<=hours_estimated)), color='#c0f0c0')
plt.fill_between(x,y,where=((x>hours_estimated) & (x<=corrected_mean)), color='#ff0000')
plt.fill_between(x,y,where=(x>corrected_mean), color='#c0f0c0')
plt.show()
```
### Error (%)
```python
err = (corrected_mean - hours_estimated) / hours_estimated * 100
print("A task estimated at %.2f hours (+/- %.2f hours) is likely to turn out %.2f percent above estimation" % (hours_estimated, plus_minus, err))
```
A task estimated at 6.00 hours (+/- 3.00 hours) is likely to turn out 6.06 percent above estimation
|
Set Implicit Arguments.
(* CoqIDE users: Run open.sh (in ./ln) to start coqide, then open this file. *)
Require Import LibLN.
(* ###################################################################### *)
(* ###################################################################### *)
(** * Definitions *)
(* ###################################################################### *)
(** ** Syntax *)
Parameter typ_label: Set.
Parameter fld_label: Set.
Parameter mtd_label: Set.
Inductive label: Set :=
| label_typ: typ_label -> label
| label_fld: fld_label -> label
| label_mtd: mtd_label -> label.
Module labels.
Parameter L: typ_label.
Parameter l: fld_label.
Parameter m: mtd_label.
Parameter apply: mtd_label.
End labels.
Inductive avar: Set :=
| avar_b: nat -> avar (* bound var (de Bruijn index) *)
| avar_f: var -> avar. (* free var ("name"), refers to tenv or venv *)
Inductive pth: Set :=
| pth_var: avar -> pth.
Inductive typ: Set :=
| typ_top : typ
| typ_bot : typ
(*| typ_bind: decs -> typ { z => decs } *)
| typ_bind: decs -> typ (* not a BIND typ, just { decs } *)
| typ_sel: pth -> typ_label -> typ (* p.L *)
with dec: Set :=
| dec_typ : typ_label -> typ -> typ -> dec
| dec_fld : fld_label -> typ -> dec
| dec_mtd : mtd_label -> typ -> typ -> dec
with decs: Set :=
| decs_nil: decs
| decs_cons: dec -> decs -> decs.
(* decs which could possibly be the expansion of bottom *)
Inductive bdecs: Set :=
| bdecs_bot: bdecs
| bdecs_decs: decs -> bdecs.
Inductive trm: Set :=
| trm_var : avar -> trm
(*| trm_new : defs -> trm BIND nameless self ref *)
| trm_new : defs -> trm
| trm_sel : trm -> fld_label -> trm
| trm_call: trm -> mtd_label -> trm -> trm
with def: Set :=
| def_typ: typ_label -> typ -> typ -> def (* same as dec_typ *)
| def_fld: fld_label -> avar -> def (* Cannot have term here, assign it first to a var.
The type of that var can then be looked up in the env -> no need to give type here. *)
| def_mtd: mtd_label -> typ -> typ -> trm -> def (* one nameless argument *)
with defs: Set :=
| defs_nil: defs
| defs_cons: def -> defs -> defs.
(** *** Typing environment ("Gamma") *)
Definition ctx := env typ.
(** *** Value environment ("store") *)
Definition sto := env defs.
(** *** Syntactic sugar *)
Definition trm_fun(T U: typ)(body: trm) :=
trm_new (defs_cons (def_mtd labels.apply T U body) defs_nil).
Definition trm_app(func arg: trm) := trm_call func labels.apply arg.
Definition trm_let(T U: typ)(rhs body: trm) := trm_app (trm_fun T U body) rhs.
Definition typ_arrow(T1 T2: typ) :=
typ_bind (decs_cons (dec_mtd labels.apply T1 T2) decs_nil).
(* ###################################################################### *)
(** ** Definition list membership *)
Definition label_of_def(d: def): label := match d with
| def_typ L _ _ => label_typ L
| def_fld l _ => label_fld l
| def_mtd m _ _ _ => label_mtd m
end.
Definition label_of_dec(D: dec): label := match D with
| dec_typ L _ _ => label_typ L
| dec_fld l _ => label_fld l
| dec_mtd m _ _ => label_mtd m
end.
Inductive defs_hasnt: defs -> label -> Prop :=
| defs_hasnt_nil: forall l,
defs_hasnt defs_nil l
| defs_hasnt_cons: forall d ds l,
defs_hasnt ds l ->
label_of_def d <> l ->
defs_hasnt (defs_cons d ds) l.
Inductive defs_has: defs -> def -> Prop :=
| defs_has_hit: forall d ds,
defs_hasnt ds (label_of_def d) ->
defs_has (defs_cons d ds) d
| defs_has_skip: forall d1 d2 ds,
defs_has ds d1 ->
label_of_def d2 <> label_of_def d1 ->
defs_has (defs_cons d2 ds) d1.
Inductive decs_hasnt: decs -> label -> Prop :=
| decs_hasnt_nil: forall l,
decs_hasnt decs_nil l
| decs_hasnt_cons: forall D Ds l,
decs_hasnt Ds l ->
label_of_dec D <> l ->
decs_hasnt (decs_cons D Ds) l.
Inductive decs_has: decs -> dec -> Prop :=
| decs_has_hit: forall D Ds,
decs_hasnt Ds (label_of_dec D) ->
decs_has (decs_cons D Ds) D
| decs_has_skip: forall D1 D2 Ds,
decs_has Ds D1 ->
label_of_dec D2 <> label_of_dec D1 ->
decs_has (decs_cons D2 Ds) D1.
Inductive bdecs_has: bdecs -> dec -> Prop :=
| bdecs_has_decs: forall Ds D,
decs_has Ds D ->
bdecs_has (bdecs_decs Ds) D
| bdecs_has_typ: forall L,
bdecs_has bdecs_bot (dec_typ L typ_top typ_bot)
| bdecs_has_fld: forall l,
bdecs_has bdecs_bot (dec_fld l typ_bot)
| bdecs_has_mtd: forall m,
bdecs_has bdecs_bot (dec_mtd m typ_top typ_bot).
(* ###################################################################### *)
(** ** Opening *)
(** Opening replaces in some syntax a bound variable with dangling index (k)
by a free variable x. *)
Definition open_rec_avar (k: nat) (u: var) (a: avar): avar :=
match a with
| avar_b i => If k = i then avar_f u else avar_b i
| avar_f x => avar_f x
end.
Definition open_rec_pth (k: nat) (u: var) (p: pth): pth :=
match p with
| pth_var a => pth_var (open_rec_avar k u a)
end.
Fixpoint open_rec_typ (k: nat) (u: var) (T: typ) { struct T }: typ :=
match T with
| typ_top => typ_top
| typ_bot => typ_bot
(*| typ_bind Ds => typ_bind (open_rec_decs (S k) u Ds) BIND *)
| typ_bind Ds => typ_bind (open_rec_decs k u Ds)
| typ_sel p L => typ_sel (open_rec_pth k u p) L
end
with open_rec_dec (k: nat) (u: var) (D: dec) { struct D }: dec :=
match D with
| dec_typ L T U => dec_typ L (open_rec_typ k u T) (open_rec_typ k u U)
| dec_fld l T => dec_fld l (open_rec_typ k u T)
| dec_mtd m T U => dec_mtd m (open_rec_typ k u T) (open_rec_typ k u U)
end
with open_rec_decs (k: nat) (u: var) (Ds: decs) { struct Ds }: decs :=
match Ds with
| decs_nil => decs_nil
| decs_cons D Ds' => decs_cons (open_rec_dec k u D) (open_rec_decs k u Ds')
end.
Fixpoint open_rec_trm (k: nat) (u: var) (t: trm) { struct t }: trm :=
match t with
| trm_var a => trm_var (open_rec_avar k u a)
(*| trm_new ds => trm_new (open_rec_defs (S k) u ds) BIND *)
| trm_new ds => trm_new (open_rec_defs k u ds)
| trm_sel e n => trm_sel (open_rec_trm k u e) n
| trm_call o m a => trm_call (open_rec_trm k u o) m (open_rec_trm k u a)
end
with open_rec_def (k: nat) (u: var) (d: def) { struct d }: def :=
match d with
| def_typ L Lo Hi => def_typ L (open_rec_typ k u Lo) (open_rec_typ k u Hi)
| def_fld f a => def_fld f (open_rec_avar k u a)
| def_mtd m T1 T2 e => def_mtd m (open_rec_typ k u T1) (open_rec_typ k u T2)
(open_rec_trm (S k) u e)
end
with open_rec_defs (k: nat) (u: var) (ds: defs) { struct ds }: defs :=
match ds with
| defs_nil => defs_nil
| defs_cons d tl => defs_cons (open_rec_def k u d) (open_rec_defs k u tl)
end.
Definition open_avar u a := open_rec_avar 0 u a.
Definition open_pth u p := open_rec_pth 0 u p.
Definition open_typ u t := open_rec_typ 0 u t.
Definition open_dec u d := open_rec_dec 0 u d.
Definition open_decs u l := open_rec_decs 0 u l.
Definition open_trm u e := open_rec_trm 0 u e.
Definition open_def u d := open_rec_def 0 u d.
Definition open_defs u l := open_rec_defs 0 u l.
(* ###################################################################### *)
(** ** Free variables *)
Definition fv_avar (a: avar): vars :=
match a with
| avar_b i => \{}
| avar_f x => \{x}
end.
Definition fv_pth (p: pth): vars :=
match p with
| pth_var a => fv_avar a
end.
Fixpoint fv_typ (T: typ) { struct T }: vars :=
match T with
| typ_top => \{}
| typ_bot => \{}
| typ_bind Ds => fv_decs Ds
| typ_sel p L => fv_pth p
end
with fv_dec (D: dec) { struct D }: vars :=
match D with
| dec_typ _ T U => (fv_typ T) \u (fv_typ U)
| dec_fld _ T => (fv_typ T)
| dec_mtd _ T U => (fv_typ T) \u (fv_typ U)
end
with fv_decs (Ds: decs) { struct Ds }: vars :=
match Ds with
| decs_nil => \{}
| decs_cons D Ds' => (fv_dec D) \u (fv_decs Ds')
end.
Definition fv_bdecs (Ds: bdecs): vars := match Ds with
| bdecs_decs Ds0 => fv_decs Ds0
| bdecs_bot => \{}
end.
(* Since we define defs ourselves instead of using [list def], we don't have any
termination proof problems: *)
Fixpoint fv_trm (t: trm): vars :=
match t with
| trm_var x => (fv_avar x)
| trm_new ds => (fv_defs ds)
| trm_sel t l => (fv_trm t)
| trm_call t1 m t2 => (fv_trm t1) \u (fv_trm t2)
end
with fv_def (d: def): vars :=
match d with
| def_typ _ T U => (fv_typ T) \u (fv_typ U)
| def_fld _ x => (fv_avar x)
| def_mtd _ T U u => (fv_typ T) \u (fv_typ U) \u (fv_trm u)
end
with fv_defs(ds: defs): vars :=
match ds with
| defs_nil => \{}
| defs_cons d tl => (fv_defs tl) \u (fv_def d)
end.
Definition fv_ctx_types(G: ctx): vars := (fv_in_values (fun T => fv_typ T) G).
(* ###################################################################### *)
(** ** Operational Semantics *)
(** Note: Terms given by user are closed, so they only contain avar_b, no avar_f.
Whenever we introduce a new avar_f (only happens in red_new), we choose one
which is not in the store, so we never have name clashes. *)
Inductive red: trm -> sto -> trm -> sto -> Prop :=
(* computation rules *)
| red_call: forall s x y m T U ds body,
binds x ds s ->
(* defs_has (open_defs x ds) (def_mtd m T U body) -> BIND *)
defs_has ds (def_mtd m T U body) ->
red (trm_call (trm_var (avar_f x)) m (trm_var (avar_f y))) s
(open_trm y body) s
| red_sel: forall s x y l ds,
binds x ds s ->
(* defs_has (open_defs x ds) (def_fld l y) -> BIND *)
defs_has ds (def_fld l y) ->
red (trm_sel (trm_var (avar_f x)) l) s
(trm_var y) s
| red_new: forall s ds x,
x # s ->
red (trm_new ds) s
(trm_var (avar_f x)) (s & x ~ ds)
(* congruence rules *)
| red_call1: forall s o m a s' o',
red o s o' s' ->
red (trm_call o m a) s
(trm_call o' m a) s'
| red_call2: forall s x m a s' a',
red a s a' s' ->
red (trm_call (trm_var (avar_f x)) m a ) s
(trm_call (trm_var (avar_f x)) m a') s'
| red_sel1: forall s o l s' o',
red o s o' s' ->
red (trm_sel o l) s
(trm_sel o' l) s'.
(* ###################################################################### *)
(** ** Typing *)
(* pmode = "do the "has" judgments needed in subtyping have to be precise?" *)
Inductive pmode: Type := pr | ip.
Inductive cbounds_typ: typ -> Prop :=
| cbounds_top:
cbounds_typ typ_top
| cbounds_bot:
cbounds_typ typ_bot
| cbounds_bind: forall Ds,
cbounds_decs Ds ->
cbounds_typ (typ_bind Ds)
| cbounds_sel: forall p L,
cbounds_typ (typ_sel p L) (* we don't enter the bounds *)
with cbounds_dec: dec -> Prop :=
| cbounds_dec_typ_1: forall L T,
cbounds_typ T ->
cbounds_dec (dec_typ L T T) (* <-- that's the whole point *)
| cbounds_dec_typ_2: forall L T,
cbounds_typ T ->
cbounds_dec (dec_typ L typ_bot T) (* <-- also allowed, for recursive types *)
| cbounds_dec_fld: forall l T,
cbounds_typ T ->
cbounds_dec (dec_fld l T)
| cbounds_dec_mtd: forall m T U,
cbounds_typ T ->
cbounds_typ U ->
cbounds_dec (dec_mtd m T U)
with cbounds_decs: decs -> Prop :=
| cbounds_nil:
cbounds_decs decs_nil
| cbounds_cons: forall D Ds,
cbounds_dec D ->
cbounds_decs Ds ->
cbounds_decs (decs_cons D Ds).
Definition ctx_size(G: ctx) := LibList.length G.
Inductive dmode: Set := deep | shallow.
(* deep enters also computational types (typ_bind),
shallow only enters non-expansive types (bounds of path types, and/or-types) *)
Inductive wf_typ: pmode -> dmode -> ctx -> typ -> Prop :=
| wf_top: forall m1 m2 G,
wf_typ m1 m2 G typ_top
| wf_bot: forall m1 m2 G,
wf_typ m1 m2 G typ_bot
| wf_bind_deep: forall m G Ds,
(* BIND (forall z, z \notin L -> wf_decs (G & z ~ typ_bind Ds) Ds) ->*)
wf_decs m G Ds ->
wf_typ m deep G (typ_bind Ds)
| wf_bind_shallow: forall m G Ds,
wf_typ m shallow G (typ_bind Ds)
| wf_sel1: forall m1 m2 G x L Lo Hi,
pth_has m1 G (pth_var (avar_f x)) (dec_typ L Lo Hi) ->
wf_typ m1 m2 G Lo ->
wf_typ m1 m2 G Hi ->
wf_typ m1 m2 G (typ_sel (pth_var (avar_f x)) L)
| wf_sel2: forall m1 G x L U,
pth_has m1 G (pth_var (avar_f x)) (dec_typ L typ_bot U) ->
(* deep wf-ness of U was already checked at the definition site of x.L (wf_tmem),
so it's sufficient to do a shallow check --> allows x.L to appear recursively
in U, but only behind a computational type --> following upper bound terminates *)
wf_typ m1 shallow G U ->
wf_typ m1 deep G (typ_sel (pth_var (avar_f x)) L)
(* wf_dec and wf_decs need no mode, because it's always deep *)
with wf_dec: pmode -> ctx -> dec -> Prop :=
| wf_tmem: forall m G L Lo Hi,
wf_typ m deep G Lo ->
wf_typ m deep G Hi ->
wf_dec m G (dec_typ L Lo Hi)
| wf_fld: forall m G l T,
wf_typ m deep G T ->
wf_dec m G (dec_fld l T)
| wf_mtd: forall m1 G m A R,
wf_typ m1 deep G A ->
wf_typ m1 deep G R ->
wf_dec m1 G (dec_mtd m A R)
with wf_decs: pmode -> ctx -> decs -> Prop :=
| wf_nil: forall m G,
wf_decs m G decs_nil
| wf_cons: forall m G D Ds,
wf_dec m G D ->
wf_decs m G Ds ->
decs_hasnt Ds (label_of_dec D) ->
wf_decs m G (decs_cons D Ds)
(* expansion returns a set of decs without opening them *)
with exp: pmode -> ctx -> typ -> bdecs -> Prop :=
| exp_top: forall m G,
exp m G typ_top (bdecs_decs decs_nil)
| exp_bot: forall m G,
exp m G typ_bot bdecs_bot
| exp_bind: forall m G Ds,
(* wf_decs m G Ds -> *)
exp m G (typ_bind Ds) (bdecs_decs Ds)
| exp_sel: forall m G x L Lo Hi Ds,
pth_has m G (pth_var (avar_f x)) (dec_typ L Lo Hi) ->
exp m G Hi Ds ->
exp m G (typ_sel (pth_var (avar_f x)) L) Ds
(* "path-has": path membership
Note: Contrary to trm_has, pth_has is not regular wrt wf-ness, because it's used
to define wf-ness, so if it was, we'd need infinite derivations to prove wf-ness. *)
with pth_has: pmode -> ctx -> pth -> dec -> Prop :=
| pth_has_rule: forall m G p D T Ds,
pth_ty m G p T ->
exp m G T Ds ->
bdecs_has Ds D ->
pth_has m G p D (* BIND: open_dec *)
with pth_ty: pmode -> ctx -> pth -> typ -> Prop :=
| pth_ty_var: forall m x T G,
binds x T G ->
pth_ty m G (pth_var (avar_f x)) T
| pth_ty_sbsm: forall p G T1 T2 n,
pth_ty ip G p T1 ->
subtyp ip G T1 T2 n ->
pth_ty ip G p T2
with trm_has: ctx -> trm -> dec -> Prop :=
| has_trm: forall G t T Ds D,
ty_trm G t T ->
exp ip G T Ds ->
bdecs_has Ds D ->
(forall z, (open_dec z D) = D) ->
wf_dec ip G D ->
trm_has G t D
| has_var: forall G v T Ds D,
ty_trm G (trm_var (avar_f v)) T ->
exp ip G T Ds ->
bdecs_has Ds D ->
wf_dec ip G D ->
trm_has G (trm_var (avar_f v)) D (* BIND (open_dec v D) *)
with subtyp: pmode -> ctx -> typ -> typ -> nat -> Prop :=
| subtyp_refl: forall m G T n,
wf_typ m deep G T -> (* use wf_sel2 to break cycles *)
subtyp m G T T n
| subtyp_top: forall m G T n,
wf_typ m deep G T -> (* use wf_sel2 to break cycles *)
subtyp m G T typ_top n
| subtyp_bot: forall m G T n,
wf_typ m deep G T -> (* use wf_sel2 to break cycles *)
subtyp m G typ_bot T n
| subtyp_bind: forall m G Ds1 Ds2 n,
subdecs m G Ds1 Ds2 n ->
subtyp m G (typ_bind Ds1) (typ_bind Ds2) n
(* BIND
| subtyp_bind: forall L m G Ds1 Ds2 n,
(forall z, z \notin L ->
subdecs m (G & z ~ (typ_bind Ds1))
(open_decs z Ds1)
(open_decs z Ds2) n) ->
subtyp m notrans G (typ_bind Ds1) (typ_bind Ds2) (S n)
*)
| subtyp_sel_l: forall m G x L Lo Hi n,
pth_has m G (pth_var (avar_f x)) (dec_typ L Lo Hi) ->
(* for symmetry with subtyp_sel_r, and to ensure wf-ness of Lo and Hi *)
subtyp m G Lo Hi n ->
subtyp m G (typ_sel (pth_var (avar_f x)) L) Hi n
| subtyp_sel_r: forall m G x L Lo Hi n,
pth_has m G (pth_var (avar_f x)) (dec_typ L Lo Hi) ->
(* makes proofs a lot easier, and also ensures wf-ness of Lo and Hi *)
subtyp m G Lo Hi n ->
subtyp m G Lo (typ_sel (pth_var (avar_f x)) L) n
| subtyp_trans: forall m G T1 T2 T3 n,
subtyp m G T1 T2 n ->
subtyp m G T2 T3 n ->
subtyp m G T1 T3 n
with subdec: pmode -> ctx -> dec -> dec -> nat -> Prop :=
| subdec_typ: forall m G L Lo1 Hi1 Lo2 Hi2 n,
(* Lo2 <: Lo1 and Hi1 <: Hi2 *)
subtyp m G Lo2 Lo1 n ->
(* subtyp m oktrans G Lo1 Hi1 n <- not needed *)
subtyp m G Hi1 Hi2 n ->
subdec m G (dec_typ L Lo1 Hi1) (dec_typ L Lo2 Hi2) n
| subdec_fld: forall m l G T1 T2 n,
subtyp m G T1 T2 n ->
subdec m G (dec_fld l T1) (dec_fld l T2) n
| subdec_mtd: forall m0 m G S1 T1 S2 T2 n,
subtyp m0 G S2 S1 n ->
subtyp m0 G T1 T2 n ->
subdec m0 G (dec_mtd m S1 T1) (dec_mtd m S2 T2) n
with subdecs: pmode -> ctx -> decs -> decs -> nat -> Prop :=
| subdecs_empty: forall m G Ds n,
wf_decs m G Ds ->
subdecs m G Ds decs_nil n
| subdecs_push: forall m G Ds1 Ds2 D1 D2 n1,
decs_has Ds1 D1 ->
subdec m G D1 D2 n1 ->
subdecs m G Ds1 Ds2 n1 ->
decs_hasnt Ds2 (label_of_dec D2) ->
subdecs m G Ds1 (decs_cons D2 Ds2) n1
(*
| subdecs_refl: forall m G Ds n,
wf_decs m G Ds ->
subdecs m G Ds Ds n
*)
with ty_trm: ctx -> trm -> typ -> Prop :=
| ty_var: forall G x T,
binds x T G ->
wf_typ ip deep G T ->
ty_trm G (trm_var (avar_f x)) T
| ty_sel: forall G t l T,
trm_has G t (dec_fld l T) ->
ty_trm G (trm_sel t l) T
| ty_call: forall G t m U V u,
trm_has G t (dec_mtd m U V) ->
ty_trm G u U ->
ty_trm G (trm_call t m u) V
| ty_new: forall G ds Ds,
(* BIND
(forall x, x \notin L ->
ty_defs (G & x ~ typ_bind Ds) (open_defs x ds) (open_decs x Ds)) ->
*)
ty_defs G ds Ds ->
cbounds_decs Ds ->
(* not needed because it follows from ty_defs:
wf_decs ip G Ds -> *)
ty_trm G (trm_new ds) (typ_bind Ds)
| ty_sbsm: forall G t T U n,
ty_trm G t T ->
subtyp ip G T U n ->
ty_trm G t U
with ty_def: ctx -> def -> dec -> Prop :=
| ty_typ: forall G L S U,
wf_typ ip deep G S ->
wf_typ ip deep G U ->
ty_def G (def_typ L S U) (dec_typ L S U)
| ty_fld: forall G l v T,
ty_trm G (trm_var v) T ->
ty_def G (def_fld l v) (dec_fld l T)
| ty_mtd: forall L G m S T t,
(* needed because we'll put S into the ctx *)
wf_typ ip deep G S ->
(* ensures that x does not occur in T -> no dependent method types *)
wf_typ ip deep G T ->
(forall x, x \notin L -> ty_trm (G & x ~ S) (open_trm x t) T) ->
ty_def G (def_mtd m S T t) (dec_mtd m S T)
with ty_defs: ctx -> defs -> decs -> Prop :=
| ty_dsnil: forall G,
ty_defs G defs_nil decs_nil
| ty_dscons: forall G ds d Ds D,
ty_defs G ds Ds ->
ty_def G d D ->
decs_hasnt Ds (label_of_dec D) ->
ty_defs G (defs_cons d ds) (decs_cons D Ds).
Inductive wf_bdecs: pmode -> ctx -> bdecs -> Prop :=
| wf_bdecs_bot: forall m G,
wf_bdecs m G bdecs_bot
| wf_bdecs_decs: forall m G Ds,
wf_decs m G Ds ->
wf_bdecs m G (bdecs_decs Ds).
Inductive subbdecs: pmode -> ctx -> bdecs -> bdecs -> Prop :=
| subbdecs_bot: forall m G Ds,
subbdecs m G bdecs_bot Ds
| subbdecs_refl: forall m G Ds,
subbdecs m G (bdecs_decs Ds) (bdecs_decs Ds)
| subbdecs_decs: forall m G Ds1 Ds2 n,
subdecs m G Ds1 Ds2 n ->
subbdecs m G (bdecs_decs Ds1) (bdecs_decs Ds2).
Inductive wf_ctx: pmode -> ctx -> Prop :=
| wf_ctx_empty: forall m,
wf_ctx m empty
| wf_ctx_push: forall m G x T,
wf_ctx m G ->
wf_typ m deep G T ->
x # G ->
wf_ctx m (G & x ~ T).
(** *** Well-formed store *)
Inductive wf_sto: sto -> ctx -> Prop :=
| wf_sto_empty: wf_sto empty empty
| wf_sto_push: forall s G x ds Ds,
wf_sto s G ->
x # s ->
x # G ->
(* What's below is the same as the ty_new rule, but we don't use ty_trm,
because it could be subsumption *)
(*ty_defs (G & x ~ typ_bind Ds) (open_defs x ds) (open_decs x Ds) -> *)
ty_defs G ds Ds ->
cbounds_decs Ds ->
wf_decs pr G Ds -> (* or alternatively, makes sure that this follows from ty_defs *)
wf_sto (s & x ~ ds) (G & x ~ typ_bind Ds).
(* ###################################################################### *)
(** ** Statements we want to prove *)
Definition progress := forall s G e T,
wf_sto s G ->
ty_trm G e T ->
(
(* can step *)
(exists e' s', red e s e' s') \/
(* or is a value *)
(exists x o, e = (trm_var (avar_f x)) /\ binds x o s)
).
Definition preservation := forall s G e T e' s',
wf_sto s G -> ty_trm G e T -> red e s e' s' ->
(exists G', wf_sto s' G' /\ ty_trm G' e' T).
(* ###################################################################### *)
(* ###################################################################### *)
(** * Infrastructure *)
(* ###################################################################### *)
(** ** Induction principles *)
Scheme trm_mut := Induction for trm Sort Prop
with def_mut := Induction for def Sort Prop
with defs_mut := Induction for defs Sort Prop.
Combined Scheme trm_mutind from trm_mut, def_mut, defs_mut.
Scheme typ_mut := Induction for typ Sort Prop
with dec_mut := Induction for dec Sort Prop
with decs_mut := Induction for decs Sort Prop.
Combined Scheme typ_mutind from typ_mut, dec_mut, decs_mut.
Scheme exp_mut := Induction for exp Sort Prop
with pth_has_mut := Induction for pth_has Sort Prop
with subtyp_mut := Induction for subtyp Sort Prop
with subdec_mut := Induction for subdec Sort Prop
with subdecs_mut := Induction for subdecs Sort Prop
with ty_trm_mut := Induction for ty_trm Sort Prop
with ty_def_mut := Induction for ty_def Sort Prop
with ty_defs_mut := Induction for ty_defs Sort Prop.
Combined Scheme ty_mutind from exp_mut, pth_has_mut,
subtyp_mut, subdec_mut, subdecs_mut,
ty_trm_mut, ty_def_mut, ty_defs_mut.
Scheme subtyp_mutst := Induction for subtyp Sort Prop
with subdec_mutst := Induction for subdec Sort Prop
with subdecs_mutst := Induction for subdecs Sort Prop
with ty_trm_mutst := Induction for ty_trm Sort Prop
with ty_def_mutst := Induction for ty_def Sort Prop
with ty_defs_mutst := Induction for ty_defs Sort Prop.
Combined Scheme subtyp_ty_mutind from subtyp_mutst, subdec_mutst, subdecs_mutst,
ty_trm_mutst, ty_def_mutst, ty_defs_mutst.
Scheme wf_typ_mutw := Induction for wf_typ Sort Prop
with wf_dec_mutw := Induction for wf_dec Sort Prop
with wf_decs_mutw := Induction for wf_decs Sort Prop
with exp_mutw := Induction for exp Sort Prop
with pth_has_mutw := Induction for pth_has Sort Prop
with pth_ty_mutw := Induction for pth_ty Sort Prop
with trm_has_mutw := Induction for trm_has Sort Prop
with subtyp_mutw := Induction for subtyp Sort Prop
with subdec_mutw := Induction for subdec Sort Prop
with subdecs_mutw := Induction for subdecs Sort Prop
with ty_trm_mutw := Induction for ty_trm Sort Prop
with ty_def_mutw := Induction for ty_def Sort Prop
with ty_defs_mutw := Induction for ty_defs Sort Prop.
Combined Scheme all_mutind from
wf_typ_mutw, wf_dec_mutw, wf_decs_mutw,
exp_mutw, pth_has_mutw, pth_ty_mutw, trm_has_mutw,
subtyp_mutw, subdec_mutw, subdecs_mutw,
ty_trm_mutw, ty_def_mutw, ty_defs_mutw.
Scheme wf_typ_mutwf := Induction for wf_typ Sort Prop
with wf_dec_mutwf := Induction for wf_dec Sort Prop
with wf_decs_mutwf := Induction for wf_decs Sort Prop.
Combined Scheme wf_mutind from wf_typ_mutwf, wf_dec_mutwf, wf_decs_mutwf.
Scheme wf_typ_mut2 := Induction for wf_typ Sort Prop
with wf_dec_mut2 := Induction for wf_dec Sort Prop
with wf_decs_mut2 := Induction for wf_decs Sort Prop
with wf_pth_has_mut2 := Induction for pth_has Sort Prop
with wf_ty_mut2 := Induction for pth_ty Sort Prop.
Combined Scheme wf_has_ty_mutind from wf_typ_mut2, wf_dec_mut2, wf_decs_mut2,
wf_pth_has_mut2, wf_ty_mut2.
Scheme s2001 := Induction for pth_has Sort Prop
with s2002 := Induction for pth_ty Sort Prop.
Combined Scheme pth_has_ty_mutind from s2001, s2002.
Scheme s3001 := Induction for trm_has Sort Prop
with s3002 := Induction for ty_trm Sort Prop.
Combined Scheme trm_has_ty_mutind from s3001, s3002.
Scheme exp_mut20 := Induction for exp Sort Prop
with has_mut20 := Induction for pth_has Sort Prop.
Combined Scheme exp_has_mutind from exp_mut20, has_mut20.
(*
Scheme exp_mut4 := Induction for exp Sort Prop
with has_mut4 := Induction for has Sort Prop
with subtyp_mut4 := Induction for subtyp Sort Prop
with ty_trm_mut4 := Induction for ty_trm Sort Prop.
Combined Scheme exp_has_subtyp_ty_mutind from exp_mut4, has_mut4, subtyp_mut4, ty_trm_mut4.
*)
Scheme subtyp_mut3 := Induction for subtyp Sort Prop
with subdec_mut3 := Induction for subdec Sort Prop
with subdecs_mut3 := Induction for subdecs Sort Prop.
Combined Scheme mutind3 from subtyp_mut3, subdec_mut3, subdecs_mut3.
(*
Scheme exp_mut5 := Induction for exp Sort Prop
with has_mut5 := Induction for has Sort Prop
with subtyp_mut5 := Induction for subtyp Sort Prop
with subdec_mut5 := Induction for subdec Sort Prop
with subdecs_mut5 := Induction for subdecs Sort Prop.
Combined Scheme mutind5 from exp_mut5, has_mut5,
subtyp_mut5, subdec_mut5, subdecs_mut5.
Scheme exp_mut6 := Induction for exp Sort Prop
with has_mut6 := Induction for has Sort Prop
with subtyp_mut6 := Induction for subtyp Sort Prop
with subdec_mut6 := Induction for subdec Sort Prop
with subdecs_mut6 := Induction for subdecs Sort Prop
with ty_trm_mut6 := Induction for ty_trm Sort Prop.
Combined Scheme mutind6 from exp_mut6, has_mut6,
subtyp_mut6, subdec_mut6, subdecs_mut6,
ty_trm_mut6.
*)
Scheme wf_typ_mut9 := Induction for wf_typ Sort Prop
with wf_dec_mut9 := Induction for wf_dec Sort Prop
with wf_decs_mut9 := Induction for wf_decs Sort Prop
with exp_mut9 := Induction for exp Sort Prop
with pth_has_mut9 := Induction for pth_has Sort Prop
with pth_ty_mut9 := Induction for pth_ty Sort Prop
with subtyp_mut9 := Induction for subtyp Sort Prop
with subdec_mut9 := Induction for subdec Sort Prop
with subdecs_mut9 := Induction for subdecs Sort Prop.
Combined Scheme mutind9 from
wf_typ_mut9, wf_dec_mut9, wf_decs_mut9,
exp_mut9, pth_has_mut9, pth_ty_mut9,
subtyp_mut9, subdec_mut9, subdecs_mut9.
(*
Scheme exp_mut8 := Induction for exp Sort Prop
with has_mut8 := Induction for has Sort Prop
with wf_typ_mut8 := Induction for wf_typ Sort Prop
with wf_dec_mut8 := Induction for wf_dec Sort Prop
with wf_decs_mut8 := Induction for wf_decs Sort Prop
with subtyp_mut8 := Induction for subtyp Sort Prop
with subdec_mut8 := Induction for subdec Sort Prop
with subdecs_mut8 := Induction for subdecs Sort Prop.
Combined Scheme mutind8 from exp_mut8, has_mut8,
wf_typ_mut8, wf_dec_mut8, wf_decs_mut8,
subtyp_mut8, subdec_mut8, subdecs_mut8.
*)
Scheme cbounds_typ_mut := Induction for cbounds_typ Sort Prop
with cbounds_dec_mut := Induction for cbounds_dec Sort Prop
with cbounds_decs_mut := Induction for cbounds_decs Sort Prop.
Combined Scheme cbounds_mutind from cbounds_typ_mut, cbounds_dec_mut, cbounds_decs_mut.
Scheme s1001 := Induction for trm_has Sort Prop
with s1002 := Induction for subtyp Sort Prop
with s1003 := Induction for subdec Sort Prop
with s1004 := Induction for subdecs Sort Prop
with s1005 := Induction for ty_trm Sort Prop
with s1006 := Induction for ty_def Sort Prop
with s1007 := Induction for ty_defs Sort Prop.
Combined Scheme has_sub_ty_mutind from
s1001, s1002, s1003, s1004, s1005, s1006, s1007.
(*
Scheme wf_typ_mut := Induction for wf_typ Sort Prop
with wf_dec_mut := Induction for wf_dec Sort Prop
with wf_decs_mut := Induction for wf_decs Sort Prop.
Combined Scheme wf_mutind from wf_typ_mut, wf_dec_mut, wf_decs_mut.
*)
(* ###################################################################### *)
(** ** Tactics *)
Ltac auto_specialize :=
repeat match goal with
| Impl: ?Cond -> _ |- _ => let HC := fresh in
assert (HC: Cond) by auto; specialize (Impl HC); clear HC
| Impl: forall (_: ?Cond), _ |- _ => match goal with
| p: Cond |- _ => specialize (Impl p)
end
end.
Ltac gather_vars :=
let A := gather_vars_with (fun x: vars => x ) in
let B := gather_vars_with (fun x: var => \{ x } ) in
let C := gather_vars_with (fun x: ctx => (dom x) \u (fv_ctx_types x)) in
let D := gather_vars_with (fun x: sto => dom x ) in
let E := gather_vars_with (fun x: avar => fv_avar x) in
let F := gather_vars_with (fun x: trm => fv_trm x) in
let G := gather_vars_with (fun x: def => fv_def x) in
let H := gather_vars_with (fun x: defs => fv_defs x) in
let I := gather_vars_with (fun x: typ => fv_typ x) in
let J := gather_vars_with (fun x: dec => fv_dec x) in
let K := gather_vars_with (fun x: decs => fv_decs x) in
constr:(A \u B \u C \u D \u E \u F \u G \u H \u I \u J \u K).
Ltac pick_fresh x :=
let L := gather_vars in (pick_fresh_gen L x).
Tactic Notation "apply_fresh" constr(T) "as" ident(x) :=
apply_fresh_base T gather_vars x.
Hint Constructors
wf_typ wf_dec wf_decs
exp pth_has pth_ty trm_has
subtyp subdec subdecs
ty_trm ty_def ty_defs.
Hint Constructors wf_bdecs subbdecs wf_ctx wf_sto.
Hint Constructors defs_hasnt defs_has decs_hasnt decs_has bdecs_has.
(* ###################################################################### *)
(** ** Library extensions *)
Lemma fresh_push_eq_inv: forall A x a (E: env A),
x # (E & x ~ a) -> False.
Proof.
intros. rewrite dom_push in H. false H. rewrite in_union.
left. rewrite in_singleton. reflexivity.
Qed.
Definition vars_empty: vars := \{}. (* because tactic [exists] cannot infer type var *)
(* ###################################################################### *)
(** ** Definition of var-by-var substitution *)
(** Note that substitution is not part of the definitions, because for the
definitions, opening is sufficient. For the proofs, however, we also
need substitution, but only var-by-var substitution, not var-by-term
substitution. That's why we don't need a judgment asserting that a term
is locally closed. *)
Definition subst_avar (z: var) (u: var) (a: avar): avar :=
match a with
| avar_b i => avar_b i
| avar_f x => If x = z then (avar_f u) else (avar_f x)
end.
Definition subst_pth (z: var) (u: var) (p: pth): pth :=
match p with
| pth_var a => pth_var (subst_avar z u a)
end.
Fixpoint subst_typ (z: var) (u: var) (T: typ) { struct T }: typ :=
match T with
| typ_top => typ_top
| typ_bot => typ_bot
| typ_bind Ds => typ_bind (subst_decs z u Ds)
| typ_sel p L => typ_sel (subst_pth z u p) L
end
with subst_dec (z: var) (u: var) (D: dec) { struct D }: dec :=
match D with
| dec_typ L T U => dec_typ L (subst_typ z u T) (subst_typ z u U)
| dec_fld l T => dec_fld l (subst_typ z u T)
| dec_mtd m T U => dec_mtd m (subst_typ z u T) (subst_typ z u U)
end
with subst_decs (z: var) (u: var) (Ds: decs) { struct Ds }: decs :=
match Ds with
| decs_nil => decs_nil
| decs_cons D Ds' => decs_cons (subst_dec z u D) (subst_decs z u Ds')
end.
Definition subst_bdecs (z: var) (u: var) (Ds: bdecs) := match Ds with
| bdecs_decs Ds0 => bdecs_decs (subst_decs z u Ds0)
| bdecs_bot => bdecs_bot
end.
Fixpoint subst_trm (z: var) (u: var) (t: trm): trm :=
match t with
| trm_var x => trm_var (subst_avar z u x)
| trm_new ds => trm_new (subst_defs z u ds)
| trm_sel t l => trm_sel (subst_trm z u t) l
| trm_call t1 m t2 => trm_call (subst_trm z u t1) m (subst_trm z u t2)
end
with subst_def (z: var) (u: var) (d: def): def :=
match d with
| def_typ L T1 T2 => def_typ L (subst_typ z u T1) (subst_typ z u T2)
| def_fld l x => def_fld l (subst_avar z u x)
| def_mtd m T1 T2 b => def_mtd m (subst_typ z u T1) (subst_typ z u T2) (subst_trm z u b)
end
with subst_defs (z: var) (u: var) (ds: defs): defs :=
match ds with
| defs_nil => defs_nil
| defs_cons d rest => defs_cons (subst_def z u d) (subst_defs z u rest)
end.
Definition subst_ctx (z: var) (u: var) (G: ctx): ctx := map (subst_typ z u) G.
(* ###################################################################### *)
(** ** Lemmas for var-by-var substitution *)
Lemma subst_fresh_avar: forall x y,
(forall a: avar, x \notin fv_avar a -> subst_avar x y a = a).
Proof.
intros. destruct* a. simpl. case_var*. simpls. notin_false.
Qed.
Lemma subst_fresh_pth: forall x y,
(forall p: pth, x \notin fv_pth p -> subst_pth x y p = p).
Proof.
intros. destruct p. simpl. f_equal. apply* subst_fresh_avar.
Qed.
Lemma subst_fresh_typ_dec_decs: forall x y,
(forall T: typ , x \notin fv_typ T -> subst_typ x y T = T ) /\
(forall d: dec , x \notin fv_dec d -> subst_dec x y d = d ) /\
(forall ds: decs, x \notin fv_decs ds -> subst_decs x y ds = ds).
Proof.
intros x y. apply typ_mutind; intros; simpls; f_equal*. apply* subst_fresh_pth.
Qed.
Definition subst_fresh_typ(x y: var) := proj1 (subst_fresh_typ_dec_decs x y).
Lemma subst_fresh_bdecs: forall x y Ds, x \notin fv_bdecs Ds -> subst_bdecs x y Ds = Ds.
Proof.
intros. destruct Ds. reflexivity. simpl. f_equal. apply* subst_fresh_typ_dec_decs.
Qed.
Lemma subst_fresh_trm_def_defs: forall x y,
(forall t: trm , x \notin fv_trm t -> subst_trm x y t = t ) /\
(forall d: def , x \notin fv_def d -> subst_def x y d = d ) /\
(forall ds: defs, x \notin fv_defs ds -> subst_defs x y ds = ds).
Proof.
intros x y. apply trm_mutind; intros; simpls; f_equal*;
(apply* subst_fresh_avar || apply* subst_fresh_typ_dec_decs).
Qed.
Lemma invert_fv_ctx_types_push: forall x z T G,
x \notin fv_ctx_types (G & z ~ T) -> x \notin fv_typ T /\ x \notin (fv_ctx_types G).
Proof.
introv N.
unfold fv_ctx_types in *.
unfold fv_in_values in *.
rewrite <- cons_to_push in *.
rewrite values_def in *.
unfold LibList.map in *.
do 2 rewrite LibList.fold_right_cons in *.
simpl in *.
apply notin_union in N. exact N.
Qed.
Lemma subst_fresh_ctx: forall x y G,
x \notin fv_ctx_types G -> subst_ctx x y G = G.
Proof.
intros x y.
apply (env_ind (fun G => x \notin fv_ctx_types G -> subst_ctx x y G = G)).
+ intro N. unfold subst_ctx. apply map_empty.
+ intros G z T IH N.
apply invert_fv_ctx_types_push in N. destruct N as [N1 N2].
unfold subst_ctx in *. rewrite map_push.
rewrite (IH N2).
rewrite (subst_fresh_typ _ _ N1).
reflexivity.
Qed.
Definition subst_fvar(x y z: var): var := If x = z then y else z.
Lemma subst_open_commute_avar: forall x y u,
(forall a: avar, forall n: nat,
subst_avar x y (open_rec_avar n u a)
= open_rec_avar n (subst_fvar x y u) (subst_avar x y a)).
Proof.
intros. unfold subst_fvar, subst_avar, open_avar, open_rec_avar. destruct a.
+ repeat case_if; auto.
+ case_var*.
Qed.
Lemma subst_open_commute_pth: forall x y u,
(forall p: pth, forall n: nat,
subst_pth x y (open_rec_pth n u p)
= open_rec_pth n (subst_fvar x y u) (subst_pth x y p)).
Proof.
intros. unfold subst_pth, open_pth, open_rec_pth. destruct p.
f_equal. apply subst_open_commute_avar.
Qed.
(* "open and then substitute" = "substitute and then open" *)
Lemma subst_open_commute_typ_dec_decs: forall x y u,
(forall t: typ, forall n: nat,
subst_typ x y (open_rec_typ n u t)
= open_rec_typ n (subst_fvar x y u) (subst_typ x y t)) /\
(forall d: dec , forall n: nat,
subst_dec x y (open_rec_dec n u d)
= open_rec_dec n (subst_fvar x y u) (subst_dec x y d)) /\
(forall ds: decs, forall n: nat,
subst_decs x y (open_rec_decs n u ds)
= open_rec_decs n (subst_fvar x y u) (subst_decs x y ds)).
Proof.
intros. apply typ_mutind; intros; simpl; f_equal*. apply subst_open_commute_pth.
Qed.
(* "open and then substitute" = "substitute and then open" *)
Lemma subst_open_commute_trm_def_defs: forall x y u,
(forall t: trm, forall n: nat,
subst_trm x y (open_rec_trm n u t)
= open_rec_trm n (subst_fvar x y u) (subst_trm x y t)) /\
(forall d: def , forall n: nat,
subst_def x y (open_rec_def n u d)
= open_rec_def n (subst_fvar x y u) (subst_def x y d)) /\
(forall ds: defs, forall n: nat,
subst_defs x y (open_rec_defs n u ds)
= open_rec_defs n (subst_fvar x y u) (subst_defs x y ds)).
Proof.
intros. apply trm_mutind; intros; simpl; f_equal*;
(apply* subst_open_commute_avar || apply* subst_open_commute_typ_dec_decs).
Qed.
Lemma subst_open_commute_trm: forall x y u t,
subst_trm x y (open_trm u t) = open_trm (subst_fvar x y u) (subst_trm x y t).
Proof.
intros. apply* subst_open_commute_trm_def_defs.
Qed.
Lemma subst_open_commute_defs: forall x y u ds,
subst_defs x y (open_defs u ds) = open_defs (subst_fvar x y u) (subst_defs x y ds).
Proof.
intros. apply* subst_open_commute_trm_def_defs.
Qed.
Lemma subst_open_commute_typ: forall x y u T,
subst_typ x y (open_typ u T) = open_typ (subst_fvar x y u) (subst_typ x y T).
Proof.
intros. apply* subst_open_commute_typ_dec_decs.
Qed.
Lemma subst_open_commute_dec: forall x y u D,
subst_dec x y (open_dec u D) = open_dec (subst_fvar x y u) (subst_dec x y D).
Proof.
intros. apply* subst_open_commute_typ_dec_decs.
Qed.
Lemma subst_open_commute_decs: forall x y u Ds,
subst_decs x y (open_decs u Ds) = open_decs (subst_fvar x y u) (subst_decs x y Ds).
Proof.
intros. apply* subst_open_commute_typ_dec_decs.
Qed.
(* "Introduce a substitution after open": Opening a term t with a var u is the
same as opening t with x and then replacing x by u. *)
Lemma subst_intro_trm: forall x u t, x \notin (fv_trm t) ->
open_trm u t = subst_trm x u (open_trm x t).
Proof.
introv Fr. unfold open_trm. rewrite* subst_open_commute_trm.
destruct (@subst_fresh_trm_def_defs x u) as [Q _]. rewrite* (Q t).
unfold subst_fvar. case_var*.
Qed.
Lemma subst_intro_defs: forall x u ds, x \notin (fv_defs ds) ->
open_defs u ds = subst_defs x u (open_defs x ds).
Proof.
introv Fr. unfold open_trm. rewrite* subst_open_commute_defs.
destruct (@subst_fresh_trm_def_defs x u) as [_ [_ Q]]. rewrite* (Q ds).
unfold subst_fvar. case_var*.
Qed.
Lemma subst_intro_typ: forall x u T, x \notin (fv_typ T) ->
open_typ u T = subst_typ x u (open_typ x T).
Proof.
introv Fr. unfold open_typ. rewrite* subst_open_commute_typ.
destruct (@subst_fresh_typ_dec_decs x u) as [Q _]. rewrite* (Q T).
unfold subst_fvar. case_var*.
Qed.
Lemma subst_intro_dec: forall x u D, x \notin (fv_dec D) ->
open_dec u D = subst_dec x u (open_dec x D).
Proof.
introv Fr. unfold open_trm. rewrite* subst_open_commute_dec.
destruct (@subst_fresh_typ_dec_decs x u) as [_ [Q _]]. rewrite* (Q D).
unfold subst_fvar. case_var*.
Qed.
Lemma subst_intro_decs: forall x u Ds, x \notin (fv_decs Ds) ->
open_decs u Ds = subst_decs x u (open_decs x Ds).
Proof.
introv Fr. unfold open_trm. rewrite* subst_open_commute_decs.
destruct (@subst_fresh_typ_dec_decs x u) as [_ [_ Q]]. rewrite* (Q Ds).
unfold subst_fvar. case_var*.
Qed.
Lemma subst_undo_avar: forall x y,
(forall a, y \notin fv_avar a -> (subst_avar y x (subst_avar x y a)) = a).
Proof.
intros. unfold subst_avar, subst_fvar, open_avar, open_rec_avar; destruct a.
+ reflexivity.
+ unfold fv_avar in H. assert (y <> v) by auto. repeat case_if; reflexivity.
Qed.
Lemma subst_undo_pth: forall x y,
(forall p, y \notin fv_pth p -> (subst_pth y x (subst_pth x y p)) = p).
Proof.
intros. destruct p. unfold subst_pth. f_equal.
unfold fv_pth in H.
apply* subst_undo_avar.
Qed.
Lemma subst_undo_typ_dec_decs: forall x y,
(forall T , y \notin fv_typ T -> (subst_typ y x (subst_typ x y T )) = T )
/\ (forall D , y \notin fv_dec D -> (subst_dec y x (subst_dec x y D )) = D )
/\ (forall Ds, y \notin fv_decs Ds -> (subst_decs y x (subst_decs x y Ds)) = Ds).
Proof.
intros.
apply typ_mutind; intros; simpl; unfold fv_typ, fv_dec, fv_decs in *; f_equal*.
apply* subst_undo_pth.
Qed.
Lemma subst_undo_trm_def_defs: forall x y,
(forall t , y \notin fv_trm t -> (subst_trm y x (subst_trm x y t )) = t )
/\ (forall d , y \notin fv_def d -> (subst_def y x (subst_def x y d )) = d )
/\ (forall ds, y \notin fv_defs ds -> (subst_defs y x (subst_defs x y ds)) = ds).
Proof.
intros.
apply trm_mutind; intros; simpl; unfold fv_trm, fv_def, fv_defs in *; f_equal*;
(apply* subst_undo_avar || apply* subst_undo_typ_dec_decs).
Qed.
Lemma subst_typ_undo: forall x y T,
y \notin fv_typ T -> (subst_typ y x (subst_typ x y T)) = T.
Proof.
apply* subst_undo_typ_dec_decs.
Qed.
Lemma subst_trm_undo: forall x y t,
y \notin fv_trm t -> (subst_trm y x (subst_trm x y t)) = t.
Proof.
apply* subst_undo_trm_def_defs.
Qed.
(* ###################################################################### *)
(** ** Regularity of Typing *)
Lemma extract_wf_dec_from_wf_decs: forall m G Ds D,
wf_decs m G Ds ->
decs_has Ds D ->
wf_dec m G D.
Proof.
intros m G Ds. induction Ds; introv Wf H.
- inversions H.
- inversions H.
* inversions Wf. assumption.
* inversions Wf. apply* IHDs.
Qed.
Lemma extract_wf_dec_from_wf_bdecs: forall m G Ds D,
wf_ctx m G ->
wf_bdecs m G Ds ->
bdecs_has Ds D ->
wf_dec m G D.
Proof.
intros. destruct Ds.
- inversions* H1.
- inversions H1. inversions H0. apply (extract_wf_dec_from_wf_decs H5 H3).
Qed.
Lemma wf_deep_to_any: forall m1 m2 G T,
wf_typ m1 deep G T ->
wf_typ m1 m2 G T.
Proof.
introv WfT. gen_eq m20: deep. induction WfT; intro Eq; subst; destruct m2; eauto.
discriminate.
Qed.
Hint Resolve extract_wf_dec_from_wf_decs extract_wf_dec_from_wf_bdecs wf_deep_to_any.
(* If a type is involved in a subtyping judgment, it is (deeply) well-formed.
(Note that there's still wf_sel2 which can break cycles.)
Lemma subtyping_regular:
(forall m1 m2 G T1 T2 n, subtyp m1 m2 G T1 T2 n ->
wf_typ m1 deep G T1 /\ wf_typ m1 deep G T2)
/\ (forall m G D1 D2 n, subdec m G D1 D2 n ->
wf_dec m G D1 /\ wf_dec m G D2)
/\ (forall m G Ds1 Ds2 n, subdecs m G Ds1 Ds2 n ->
wf_decs m G Ds1 /\ wf_decs m G Ds2).
Proof.
apply mutind3; intros; repeat split; subst;
repeat match goal with
| H: _ /\ _ |- _ => destruct H
| H: wf_dec _ _ (dec_typ _ _ _) |- _ => inversions H
| H: wf_dec _ _ (dec_fld _ _ ) |- _ => inversions H
| H: wf_dec _ _ (dec_mtd _ _ _) |- _ => inversions H
| H: wf_bdecs _ _ _ |- _ => inversions H
| H: bdecs_has _ _ |- _ => inversions H
end;
eauto.
Qed.
Definition subtyp_regular := proj1 subtyping_regular.
Definition subdec_regular := proj1 (proj2 subtyping_regular).
Definition subdecs_regular := proj2 (proj2 subtyping_regular).
*)
Lemma typing_regular:
(forall G t D, trm_has G t D ->
wf_dec ip G D)
/\ (forall m G T1 T2 n, subtyp m G T1 T2 n ->
wf_typ m deep G T1 /\ wf_typ m deep G T2)
/\ (forall m G D1 D2 n, subdec m G D1 D2 n ->
wf_dec m G D1 /\ wf_dec m G D2)
/\ (forall m G Ds1 Ds2 n, subdecs m G Ds1 Ds2 n ->
wf_decs m G Ds1 /\ wf_decs m G Ds2)
/\ (forall G t T, ty_trm G t T ->
wf_typ ip deep G T)
/\ (forall G d D, ty_def G d D ->
wf_dec ip G D)
/\ (forall G ds Ds, ty_defs G ds Ds ->
wf_decs ip G Ds).
Proof.
apply has_sub_ty_mutind; intros; repeat split; subst;
repeat match goal with
| H: _ /\ _ |- _ => destruct H
| H: wf_dec _ _ (dec_typ _ _ _) |- _ => inversions H
| H: wf_dec _ _ (dec_fld _ _ ) |- _ => inversions H
| H: wf_dec _ _ (dec_mtd _ _ _) |- _ => inversions H
| H: wf_bdecs _ _ _ |- _ => inversions H
| H: bdecs_has _ _ |- _ => inversions H
end;
eauto.
Qed.
Definition trm_has_regular := proj1 typing_regular .
Definition subtyp_regular := proj1 (proj2 typing_regular) .
Definition subdec_regular := proj1 (proj2 (proj2 typing_regular)) .
Definition subdecs_regular := proj1 (proj2 (proj2 (proj2 typing_regular))) .
Definition ty_trm_regular := proj1 (proj2 (proj2 (proj2 (proj2 typing_regular)))) .
Definition ty_def_regular := proj1 (proj2 (proj2 (proj2 (proj2 (proj2 typing_regular))))).
Definition ty_defs_regular := proj2 (proj2 (proj2 (proj2 (proj2 (proj2 typing_regular))))).
(* ###################################################################### *)
(** ** Context size lemmas *)
Lemma inc_max_ctx:
(forall m G T1 T2 n1, subtyp m G T1 T2 n1 ->
forall n2, n1 <= n2 -> subtyp m G T1 T2 n2 )
/\ (forall m G D1 D2 n1, subdec m G D1 D2 n1 ->
forall n2, n1 <= n2 -> subdec m G D1 D2 n2 )
/\ (forall m G Ds1 Ds2 n1, subdecs m G Ds1 Ds2 n1 ->
forall n2, n1 <= n2 -> subdecs m G Ds1 Ds2 n2 ).
Proof.
apply mutind3; try solve [intros; [constructor*
|| apply* subtyp_sel_l
|| apply* subtyp_sel_r]].
(*
+ (* case subtyp_bind *)
introv Sds IHSds Hle. rename n into n1. destruct n2 as [|n2]; [omega | idtac].
assert (n1 <= n2) by omega. apply* subtyp_bind.
*)
+ (* case subtyp_trans *)
intros. apply subtyp_trans with T2; auto.
+ (* case subdecs_push *)
intros. apply* subdecs_push.
Qed.
Definition subtyp_max_ctx := proj1 inc_max_ctx.
Definition subdec_max_ctx := proj1 (proj2 inc_max_ctx).
Definition subdecs_max_ctx := proj2 (proj2 inc_max_ctx).
Lemma ctx_size_push: forall G z T, ctx_size (G & z ~ T) = S (ctx_size G).
Proof.
intros. unfold ctx_size. rewrite <- cons_to_push.
rewrite LibList.length_cons. omega.
Qed.
Lemma ctx_size_swap_middle: forall G1 x T y U G2,
ctx_size (G1 & x ~ T & G2) = ctx_size (G1 & y ~ U & G2).
Proof.
intros. rewrite concat_def. rewrite single_def. unfold ctx_size.
repeat progress rewrite LibList.length_app. auto.
Qed.
(* ###################################################################### *)
(** ** Trivial inversion lemmas *)
Lemma invert_subdec_typ_sync_left: forall m G D L Lo2 Hi2 n,
subdec m G D (dec_typ L Lo2 Hi2) n ->
exists Lo1 Hi1, D = (dec_typ L Lo1 Hi1) /\
subtyp m G Lo2 Lo1 n /\
(* subtyp m oktrans G Lo1 Hi1 n /\ *)
subtyp m G Hi1 Hi2 n.
Proof.
introv Sd. inversions Sd. exists Lo1 Hi1. auto.
Qed.
Lemma invert_subdec_fld_sync_left: forall m G D l T2 n,
subdec m G D (dec_fld l T2) n ->
exists T1, D = (dec_fld l T1) /\
subtyp m G T1 T2 n.
Proof.
introv Sd. inversions Sd. exists T1. auto.
Qed.
Lemma invert_subdec_mtd_sync_left: forall m0 G D m T2 U2 n,
subdec m0 G D (dec_mtd m T2 U2) n ->
exists T1 U1, D = (dec_mtd m T1 U1) /\
subtyp m0 G T2 T1 n /\
subtyp m0 G U1 U2 n.
Proof.
introv Sd. inversions Sd. exists S1 T1. auto.
Qed.
Lemma invert_subdec_typ: forall m G L Lo1 Hi1 Lo2 Hi2 n,
subdec m G (dec_typ L Lo1 Hi1) (dec_typ L Lo2 Hi2) n ->
subtyp m G Lo2 Lo1 n /\ subtyp m G Hi1 Hi2 n.
Proof.
introv Sd. inversions Sd. auto.
Qed.
Lemma decs_has_preserves_wf: forall m G Ds D,
decs_has Ds D ->
wf_decs m G Ds ->
wf_dec m G D.
Proof.
intros m G Ds. induction Ds; introv Has Wf.
- inversions Has.
- inversions Wf. rename d into D0. inversions Has.
* assumption.
* apply* IHDs.
Qed.
Lemma bdecs_has_preserves_wf: forall m G Ds D,
bdecs_has Ds D ->
wf_bdecs m G Ds ->
wf_dec m G D.
Proof.
introv DsHas Wf. destruct Ds as [|Ds].
- inversions DsHas; auto.
- inversions DsHas. inversions Wf. apply* decs_has_preserves_wf.
Qed.
Lemma subdec_refl: forall m G D n,
wf_dec m G D ->
subdec m G D D n.
Proof.
introv Wf. inversions Wf; auto.
Qed.
Hint Resolve subdec_refl.
Lemma invert_subdecs: forall m G Ds1 Ds2 n,
subdecs m G Ds1 Ds2 n ->
forall D2, decs_has Ds2 D2 ->
(exists D1, decs_has Ds1 D1 /\ subdec m G D1 D2 n).
Proof.
introv Sds. induction Ds2; introv Has.
+ inversion Has.
+ inversions Sds.
* inversions Has.
- exists D1. repeat split; assumption.
- apply IHDs2; assumption.
(*
* exists D2. apply (conj Has).
lets Wf: (decs_has_preserves_wf Has H). apply (subdec_refl _ Wf).
*)
Qed.
Lemma wf_sto_to_ok_s: forall s G,
wf_sto s G -> ok s.
Proof. intros. induction H; jauto. Qed.
Lemma wf_sto_to_ok_G: forall s G,
wf_sto s G -> ok G.
Proof. intros. induction H; jauto. Qed.
Lemma wf_ctx_to_ok: forall m G,
wf_ctx m G -> ok G.
Proof. intros. induction H; jauto. Qed.
Lemma wf_sto_to_wf_ctx: forall s G,
wf_sto s G -> wf_ctx pr G.
Proof. intros. induction H; jauto. Qed.
Hint Resolve wf_sto_to_ok_s wf_sto_to_ok_G wf_ctx_to_ok wf_sto_to_wf_ctx.
Lemma ctx_binds_to_sto_binds: forall s G x T,
wf_sto s G ->
binds x T G ->
exists o, binds x o s.
Proof.
introv Wf Bi. gen x T Bi. induction Wf; intros.
+ false* binds_empty_inv.
+ unfolds binds. rewrite get_push in *. case_if.
- eauto.
- eauto.
Qed.
Lemma sto_binds_to_ctx_binds: forall s G x ds,
wf_sto s G ->
binds x ds s ->
exists Ds, binds x (typ_bind Ds) G.
Proof.
introv Wf Bi. gen x Bi. induction Wf; intros.
+ false* binds_empty_inv.
+ unfolds binds. rewrite get_push in *. case_if.
- inversions Bi. eauto.
- auto.
Qed.
Lemma sto_unbound_to_ctx_unbound: forall s G x,
wf_sto s G ->
x # s ->
x # G.
Proof.
introv Wf Ub_s.
induction Wf.
+ auto.
+ destruct (classicT (x0 = x)) as [Eq | Ne].
- subst. false (fresh_push_eq_inv Ub_s).
- auto.
Qed.
Lemma ctx_unbound_to_sto_unbound: forall s G x,
wf_sto s G ->
x # G ->
x # s.
Proof.
introv Wf Ub.
induction Wf.
+ auto.
+ destruct (classicT (x0 = x)) as [Eq | Ne].
- subst. false (fresh_push_eq_inv Ub).
- auto.
Qed.
Lemma invert_wf_sto: forall s G,
wf_sto s G ->
forall x ds T,
binds x ds s ->
binds x T G ->
exists Ds,
T = (typ_bind Ds) /\ exists G1 G2,
G = G1 & x ~ typ_bind Ds & G2 /\
(* ty_defs (G1 & x ~ typ_bind Ds) (open_defs x ds) (open_decs x Ds) /\ *)
ty_defs G1 ds Ds /\
cbounds_decs Ds.
Proof.
intros s G Wf. induction Wf; intros.
+ false* binds_empty_inv.
+ unfold binds in *. rewrite get_push in *.
case_if.
- inversions H4. inversions H5. exists Ds. split. reflexivity.
exists G (@empty typ). rewrite concat_empty_r. auto.
- specialize (IHWf x0 ds0 T H4 H5).
destruct IHWf as [Ds0 [Eq [G1 [G2 [EqG [Ty F]]]]]]. subst.
exists Ds0. apply (conj eq_refl).
exists G1 (G2 & x ~ typ_bind Ds).
rewrite concat_assoc.
apply (conj eq_refl). auto.
Qed.
Lemma invert_subdecs_push: forall m G Ds1 Ds2 D2 n1,
subdecs m G Ds1 (decs_cons D2 Ds2) n1 ->
exists D1, decs_has Ds1 D1
/\ subdec m G D1 D2 n1
/\ subdecs m G Ds1 Ds2 n1
/\ decs_hasnt Ds2 (label_of_dec D2).
Proof.
intros. inversions H.
- eauto 10.
(* subtyp_refl
- exists D2. split; [idtac | split].
* unfold decs_has, get_dec. case_if. reflexivity.
* admit. (* TODO subdec_refl doesn't hold if bad bounds!! *)
* admit. (* TODO holds *)
*)
Qed.
Lemma ty_def_to_label_eq: forall G d D,
ty_def G d D ->
label_of_def d = label_of_dec D.
Proof.
intros. inversions H; auto.
Qed.
Lemma extract_ty_def_from_ty_defs: forall G d ds D Ds,
ty_defs G ds Ds ->
defs_has ds d ->
decs_has Ds D ->
label_of_def d = label_of_dec D ->
ty_def G d D.
Proof.
introv HdsDs. induction HdsDs.
+ intros. inversion H.
+ introv dsHas DsHas Eq. inversions dsHas; inversions DsHas.
- assumption.
- apply ty_def_to_label_eq in H. rewrite H in Eq. rewrite Eq in H6. false* H6.
- apply ty_def_to_label_eq in H. rewrite <- H in Eq. rewrite Eq in H5. false* H5.
- apply* IHHdsDs.
Qed.
Lemma invert_ty_mtd_inside_ty_defs: forall G ds Ds m S T S' T' body,
ty_defs G ds Ds ->
defs_has ds (def_mtd m S T body) ->
decs_has Ds (dec_mtd m S' T') ->
(* conclusion is the premise needed to construct a ty_mtd: *)
exists L, forall x, x \notin L -> ty_trm (G & x ~ S) (open_trm x body) T
/\ S' = S /\ T' = T.
Proof.
introv HdsDs dsHas DsHas.
lets H: (extract_ty_def_from_ty_defs HdsDs dsHas DsHas).
simpl in H. specialize (H eq_refl). inversions* H.
Qed.
Lemma invert_ty_fld_inside_ty_defs: forall G ds Ds l v T,
ty_defs G ds Ds ->
defs_has ds (def_fld l v) ->
decs_has Ds (dec_fld l T) ->
ty_trm G (trm_var v) T.
Proof.
introv HdsDs dsHas DsHas.
lets H: (extract_ty_def_from_ty_defs HdsDs dsHas DsHas).
simpl in H. specialize (H eq_refl). inversions* H.
Qed.
Lemma decs_hasnt_to_defs_hasnt: forall G ds Ds l,
ty_defs G ds Ds ->
decs_hasnt Ds l ->
defs_hasnt ds l.
Proof.
introv Ty. induction Ty; intro Hn.
- apply defs_hasnt_nil.
- inversions Hn. lets Eq: (ty_def_to_label_eq H).
rewrite <- Eq in H5. apply defs_hasnt_cons.
* apply* IHTy.
* assumption.
Qed.
Lemma decs_has_to_defs_has: forall G ds Ds D,
ty_defs G ds Ds ->
decs_has Ds D ->
exists d, defs_has ds d /\ label_of_def d = label_of_dec D.
Proof.
introv Ty DsHas. induction Ty.
+ inversions DsHas.
+ inversions DsHas.
- exists d. refine (conj _ (ty_def_to_label_eq H)). apply defs_has_hit.
rewrite (ty_def_to_label_eq H). apply (decs_hasnt_to_defs_hasnt Ty H4).
- specialize (IHTy H3). destruct IHTy as [d0 [dsHas Eq]].
exists d0. refine (conj _ Eq). apply (defs_has_skip _ dsHas).
rewrite (ty_def_to_label_eq H). rewrite Eq. exact H5.
Qed.
Print Assumptions decs_has_to_defs_has.
(*
Lemma defs_has_to_decs_has: forall G l ds Ds d,
ty_defs G ds Ds ->
defs_has ds l d ->
exists D, decs_has Ds l D.
Proof.
introv Ty dsHas. induction Ty; unfolds defs_has, get_def.
+ discriminate.
+ unfold decs_has. folds get_def. rewrite get_dec_cons. case_if.
- exists D. reflexivity.
- rewrite -> (ty_def_to_label_for_eq n H) in dsHas. case_if. apply (IHTy dsHas).
Qed.
Print Assumptions defs_has_to_decs_has.
Lemma label_for_dec_open: forall z D n,
label_for_dec n (open_dec z D) = label_for_dec n D.
Proof.
intros. destruct D; reflexivity.
Qed.
(* The converse does not hold because
[(open_dec z D1) = (open_dec z D2)] does not imply [D1 = D2]. *)
Lemma decs_has_open: forall Ds l D z,
decs_has Ds l D -> decs_has (open_decs z Ds) l (open_dec z D).
Proof.
introv Has. induction Ds.
+ inversion Has.
+ unfold open_decs, open_rec_decs. fold open_rec_decs. fold open_rec_dec.
unfold decs_has, get_dec. case_if.
- unfold decs_has, get_dec in Has. rewrite label_for_dec_open in Has. case_if.
inversions Has. reflexivity.
- fold get_dec. apply IHDs. unfold decs_has, get_dec in Has.
rewrite label_for_dec_open in H. case_if. apply Has.
Qed.
TODO does not hold because
[(open_dec z D1) = (open_dec z D2)] does not imply [D1 = D2].
Axiom decs_has_close_admitted: forall Ds l D z,
decs_has (open_decs z Ds) l (open_dec z D) -> decs_has Ds l D. *)
(* ###################################################################### *)
(** ** Uniqueness *)
Lemma not_defs_has_and_hasnt: forall ds d,
defs_has ds d -> defs_hasnt ds (label_of_def d) -> False.
Proof.
intro ds. induction ds.
- introv nilHas. inversions nilHas. (* contradiction *)
- introv dsHas dsHasnt. inversions dsHas; inversions dsHasnt; auto_star.
Qed.
Lemma not_decs_has_and_hasnt: forall Ds D,
decs_has Ds D -> decs_hasnt Ds (label_of_dec D) -> False.
Proof.
intro Ds. induction Ds.
- introv nilHas. inversions nilHas. (* contradiction *)
- introv DsHas DsHasnt. inversions DsHas; inversions DsHasnt; auto_star.
Qed.
(* doesn't hold if a label appears several times
Lemma decs_has_decidable: forall Ds l,
decs_hasnt Ds l \/ exists D, decs_has Ds D /\ label_of_dec D = l. *)
Lemma decs_has_unique: forall Ds D1 D2,
decs_has Ds D1 ->
decs_has Ds D2 ->
label_of_dec D1 = label_of_dec D2 ->
D1 = D2.
Proof.
introv H1. induction H1.
- introv H2 Eq. inversions H2.
* reflexivity.
* rewrite Eq in H5. false H5. reflexivity.
- introv H2 Eq. inversions H2.
* rewrite Eq in H. false H. reflexivity.
* apply (IHdecs_has H4 Eq).
Qed.
Lemma bdecs_has_unique: forall Ds D1 D2,
bdecs_has Ds D1 ->
bdecs_has Ds D2 ->
label_of_dec D1 = label_of_dec D2 ->
D1 = D2.
Proof.
introv H1 H2 Eq; inversions H1; inversions H2; inversions Eq;
try (rewrite H in H1; inversions H1); try reflexivity.
apply (decs_has_unique H H1 H2).
Qed.
Lemma exp_has_unique:
(forall m G T Ds1, exp m G T Ds1 -> m = pr ->
forall Ds2, exp pr G T Ds2 -> Ds1 = Ds2) /\
(forall m G v D1, pth_has m G v D1 -> m = pr ->
forall D2, label_of_dec D1 = label_of_dec D2 -> pth_has pr G v D2 -> D1 = D2).
Proof.
apply exp_has_mutind; intros.
+ inversions H0. reflexivity.
+ inversions H0. reflexivity.
+ inversions H0. reflexivity.
+ inversions H2.
specialize (H eq_refl (dec_typ L Lo0 Hi0)). simpl in H.
specialize (H eq_refl H7). inversions H. apply* (H0 eq_refl).
+ inversions H1. inversions H2. inversions H0. inversions p0.
lets Eq: (binds_func H2 H7). subst.
specialize (H eq_refl Ds0 H1). subst Ds0.
rewrite (bdecs_has_unique b H3 H4). reflexivity.
Qed.
Lemma exp_unique: forall G T Ds1 Ds2,
exp pr G T Ds1 -> exp pr G T Ds2 -> Ds1 = Ds2.
Proof. intros. apply* exp_has_unique. Qed.
Lemma has_unique: forall G v D1 D2,
pth_has pr G v D1 -> pth_has pr G v D2 -> label_of_dec D1 = label_of_dec D2 -> D1 = D2.
Proof. intros. apply* exp_has_unique. Qed.
(* ###################################################################### *)
(** ** Expansion total *)
Lemma exp_total: forall m1 m2 G T, wf_typ m1 m2 G T -> exists Ds, exp m1 G T Ds.
Proof.
introv Wf. induction Wf.
+ (* case wf_top *)
exists (bdecs_decs decs_nil). apply exp_top.
+ (* case wf_bot *)
exists bdecs_bot. apply exp_bot.
+ (* case wf_bind_deep *)
exists (bdecs_decs Ds). apply exp_bind.
+ (* case wf_bind_shallow *)
exists (bdecs_decs Ds). apply exp_bind.
+ (* case wf_sel1 *)
destruct IHWf2 as [DsHi ExpHi].
exists DsHi. apply (exp_sel H ExpHi).
+ (* case wf_sel2 *)
destruct IHWf as [DsU ExpU].
exists DsU. apply (exp_sel H ExpU).
Qed.
Print Assumptions exp_total.
(* ###################################################################### *)
(** ** Weakening *)
(*
Lemma align_env_eq: forall T (E1 E2 F1 F2: env T), E1 & E2 = F1 & F2 ->
(exists G1 G2 G3, E1 = G1 & G2 /\ E2 = G3 /\ F1 = G1 /\ F2 = G2 & G3)
\/ (exists G1 G2 G3, F1 = G1 & G2 /\ F2 = G3 /\ E1 = G1 /\ E2 = G2 & G3).
Admitted.
Lemma ctx_size_cons: forall G1 G2,
ctx_size (G1 & G2) = (ctx_size G1) + (ctx_size G2).
Admitted.
*)
Lemma weakening:
(forall m1 m2 G T, wf_typ m1 m2 G T -> forall G1 G2 G3,
G = G1 & G3 ->
ok (G1 & G2 & G3) ->
wf_typ m1 m2 (G1 & G2 & G3) T)
/\ (forall m G D, wf_dec m G D -> forall G1 G2 G3,
G = G1 & G3 ->
ok (G1 & G2 & G3) ->
wf_dec m (G1 & G2 & G3) D)
/\ (forall m G Ds, wf_decs m G Ds -> forall G1 G2 G3,
G = G1 & G3 ->
ok (G1 & G2 & G3) ->
wf_decs m (G1 & G2 & G3) Ds)
/\ (forall m G T Ds, exp m G T Ds -> forall G1 G2 G3,
G = G1 & G3 ->
ok (G1 & G2 & G3) ->
exp m (G1 & G2 & G3) T Ds)
/\ (forall m G t d, pth_has m G t d -> forall G1 G2 G3,
G = G1 & G3 ->
ok (G1 & G2 & G3) ->
pth_has m (G1 & G2 & G3) t d)
/\ (forall m G p T, pth_ty m G p T -> forall G1 G2 G3,
G = G1 & G3 ->
ok (G1 & G2 & G3) ->
pth_ty m (G1 & G2 & G3) p T)
/\ (forall G t d, trm_has G t d -> forall G1 G2 G3,
G = G1 & G3 ->
ok (G1 & G2 & G3) ->
trm_has (G1 & G2 & G3) t d)
/\ (forall m G T1 T2 n, subtyp m G T1 T2 n -> forall G1 G2 G3,
G = G1 & G3 ->
ok (G1 & G2 & G3) ->
subtyp m (G1 & G2 & G3) T1 T2 n)
/\ (forall m G D1 D2 n, subdec m G D1 D2 n -> forall G1 G2 G3,
G = G1 & G3 ->
ok (G1 & G2 & G3) ->
subdec m (G1 & G2 & G3) D1 D2 n)
/\ (forall m G Ds1 Ds2 n, subdecs m G Ds1 Ds2 n -> forall G1 G2 G3,
G = G1 & G3 ->
ok (G1 & G2 & G3) ->
subdecs m (G1 & G2 & G3) Ds1 Ds2 n)
/\ (forall G t T, ty_trm G t T -> forall G1 G2 G3,
G = G1 & G3 ->
ok (G1 & G2 & G3) ->
ty_trm (G1 & G2 & G3) t T)
/\ (forall G d D, ty_def G d D -> forall G1 G2 G3,
G = G1 & G3 ->
ok (G1 & G2 & G3) ->
ty_def (G1 & G2 & G3) d D)
/\ (forall G ds Ds, ty_defs G ds Ds -> forall G1 G2 G3,
G = G1 & G3 ->
ok (G1 & G2 & G3) ->
ty_defs (G1 & G2 & G3) ds Ds).
Proof.
apply all_mutind.
+ (* case wf_top *) eauto.
+ (* case wf_bot *) eauto.
+ (* case wf_bind_deep *) eauto.
+ (* case wf_bind_shallow *) eauto.
+ (* case wf_sel1 *) eauto.
+ (* case wf_sel2 *) eauto.
+ (* case wf_tmem *) eauto.
+ (* case wf_fld *) eauto.
+ (* case wf_mtd *) eauto.
+ (* case wf_nil *) eauto.
+ (* case wf_cons *) eauto.
+ (* case exp_top *) eauto.
+ (* case exp_bot *) eauto.
+ (* case exp_bind *) eauto.
+ (* case exp_sel *) eauto.
+ (* case pth_has_rule *) eauto.
+ (* case pth_ty_var *)
intros. subst. apply pth_ty_var.
apply* binds_weaken.
+ (* case pth_ty_sbsm *)
intros. apply pth_ty_sbsm with T1 n.
- apply* H.
- apply* H0.
+ (* case has_trm *) eauto.
+ (* case has_var *) eauto.
+ (* case subtyp_refl *) eauto.
+ (* case subtyp_top *) eauto.
+ (* case subtyp_bot *) eauto.
+ (* case subtyp_bind *) eauto.
+ (* case subtyp_sel_l *) eauto.
+ (* case subtyp_sel_r *) eauto.
+ (* case subtyp_trans *) eauto.
+ (* case subdec_typ *) eauto.
+ (* case subdec_fld *) eauto.
+ (* case subdec_mtd *) eauto.
+ (* case subdecs_empty *) eauto.
+ (* case subdecs_push *)
introv Has Sd IHSd Sds IHSds Hasnt Eq1 Ok.
refine (subdecs_push Has _ _ Hasnt).
- apply (IHSd _ _ _ Eq1 Ok).
- apply (IHSds _ _ _ Eq1 Ok).
+ (* case ty_var *)
intros. subst. apply ty_var.
- apply* binds_weaken.
- apply* H.
+ (* case ty_sel *) eauto.
+ (* case ty_call *) eauto.
+ (* case ty_new *) eauto.
+ (* case ty_sbsm *) eauto.
+ (* case ty_typ *) eauto.
+ (* case ty_fld *) eauto.
+ (* case ty_mtd *)
intros. subst.
apply_fresh ty_mtd as x.
* eauto.
* eauto.
* rewrite <- concat_assoc.
refine (H1 x _ G1 G2 (G3 & x ~ S) _ _).
- auto.
- symmetry. apply concat_assoc.
- rewrite concat_assoc. auto.
+ (* case ty_dsnil *) eauto.
+ (* case ty_dscons *) eauto.
Qed.
(*
+ (* case subtyp_bind *)
introv Sds IHSds Eq1 Ok. subst.
apply_fresh subtyp_bind as z.
rewrite <- concat_assoc.
assert (zL: z \notin L) by auto.
specialize (Sds z zL).
refine (IHSds z zL G1 G2 (G3 & z ~ typ_bind Ds1) _ _).
* rewrite <- concat_assoc. reflexivity.
* rewrite concat_assoc. auto.
*)
Print Assumptions weakening.
Lemma weaken_exp_middle: forall m G1 G2 G3 T Ds,
ok (G1 & G2 & G3) -> exp m (G1 & G3) T Ds -> exp m (G1 & G2 & G3) T Ds.
Proof.
intros. apply* weakening.
Qed.
Lemma weaken_exp_end: forall m G1 G2 T Ds,
ok (G1 & G2) -> exp m G1 T Ds -> exp m (G1 & G2) T Ds.
Proof.
introv Ok Exp.
assert (Eq1: G1 = G1 & empty) by (rewrite concat_empty_r; reflexivity).
assert (Eq2: G1 & G2 = G1 & G2 & empty) by (rewrite concat_empty_r; reflexivity).
rewrite Eq1 in Exp. rewrite Eq2 in Ok. rewrite Eq2.
apply (weaken_exp_middle Ok Exp).
Qed.
Lemma weaken_wf_typ_end: forall m1 m2 G1 G2 T,
ok (G1 & G2) -> wf_typ m1 m2 G1 T -> wf_typ m1 m2 (G1 & G2) T.
Proof.
introv Ok Wf. destruct weakening as [W _].
specialize (W m1 m2 G1 T Wf G1 G2 empty).
repeat rewrite concat_empty_r in W. apply* W.
Qed.
Lemma weaken_wf_decs_end: forall m G1 G2 Ds,
ok (G1 & G2) -> wf_decs m G1 Ds -> wf_decs m (G1 & G2) Ds.
Proof.
introv Ok Wf. destruct weakening as [_ [_ [W _]]].
specialize (W m G1 Ds Wf G1 G2 empty).
repeat rewrite concat_empty_r in W. apply* W.
Qed.
Lemma weaken_subtyp_middle: forall m G1 G2 G3 S U n,
ok (G1 & G2 & G3) ->
subtyp m (G1 & G3) S U n ->
subtyp m (G1 & G2 & G3) S U n.
Proof.
destruct weakening as [_ [_ [_ [_ [_ [_ [_ [W _]]]]]]]].
introv Hok123 Hst.
specialize (W m (G1 & G3) S U n Hst).
specialize (W G1 G2 G3 eq_refl Hok123).
apply W.
Qed.
Lemma weaken_subtyp_end: forall m G1 G2 T1 T2 n,
ok (G1 & G2) -> subtyp m G1 T1 T2 n -> subtyp m (G1 & G2) T1 T2 n.
Proof.
introv Ok St.
assert (Eq1: G1 = G1 & empty) by (rewrite concat_empty_r; reflexivity).
assert (Eq2: G1 & G2 = G1 & G2 & empty) by (rewrite concat_empty_r; reflexivity).
rewrite Eq1 in St. rewrite Eq2 in Ok. rewrite Eq2.
apply (weaken_subtyp_middle Ok St).
Qed.
Lemma weaken_pth_has_end: forall m G1 G2 t d,
ok (G1 & G2) -> pth_has m G1 t d -> pth_has m (G1 & G2) t d.
Proof.
intros.
destruct weakening as [_ [_ [_ [_ [W _]]]]].
rewrite <- (concat_empty_r (G1 & G2)).
apply (W m (G1 & empty)); rewrite* concat_empty_r.
Qed.
Lemma weaken_trm_has_end: forall G1 G2 t d,
ok (G1 & G2) -> trm_has G1 t d -> trm_has (G1 & G2) t d.
Proof.
intros.
destruct weakening as [_ [_ [_ [_ [_ [_ [W _]]]]]]].
rewrite <- (concat_empty_r (G1 & G2)).
apply (W (G1 & empty)); rewrite* concat_empty_r.
Qed.
Lemma weaken_pth_ty_end: forall m G1 G2 p T,
ok (G1 & G2) -> pth_ty m G1 p T -> pth_ty m (G1 & G2) p T.
Proof.
intros.
destruct weakening as [_ [_ [_ [_ [_ [W _]]]]]].
rewrite <- (concat_empty_r (G1 & G2)).
apply (W m (G1 & empty)); rewrite* concat_empty_r.
Qed.
(*
Lemma weaken_subdec_middle: forall m G1 G2 G3 S U,
ok (G1 & G2 & G3) ->
subdec m (G1 & G3) S U ->
subdec m (G1 & G2 & G3) S U.
Proof.
destruct weakening as [_ [_ [_ [W _]]]].
introv Hok123 Hst.
specialize (W m (G1 & G3) S U Hst).
specialize (W G1 G2 G3 eq_refl Hok123).
apply W.
Qed.
*)
Lemma weaken_subdec_end: forall m G1 G2 D1 D2 n,
ok (G1 & G2) ->
subdec m G1 D1 D2 n ->
subdec m (G1 & G2) D1 D2 n.
Proof.
introv Ok Sd.
destruct weakening as [_ [_ [_ [_ [_ [_ [_ [_ [W _]]]]]]]]].
rewrite <- (concat_empty_r G1) in Sd.
specialize (W m (G1 & empty) D1 D2 _ Sd G1 G2 empty).
repeat progress rewrite concat_empty_r in *.
apply (W eq_refl Ok).
Qed.
Lemma weaken_ty_trm_end: forall G1 G2 e T,
ok (G1 & G2) -> ty_trm G1 e T -> ty_trm (G1 & G2) e T.
Proof.
intros.
destruct weakening as [_ [_ [_ [_ [_ [_ [_ [_ [_ [_ [W _]]]]]]]]]]].
rewrite <- (concat_empty_r (G1 & G2)).
apply (W (G1 & empty)); rewrite* concat_empty_r.
Qed.
Lemma weaken_ty_def_end: forall G1 G2 i d,
ok (G1 & G2) -> ty_def G1 i d -> ty_def (G1 & G2) i d.
Proof.
intros.
destruct weakening as [_ [_ [_ [_ [_ [_ [_ [_ [_ [_ [_ [W _]]]]]]]]]]]].
rewrite <- (concat_empty_r (G1 & G2)).
apply (W (G1 & empty)); rewrite* concat_empty_r.
Qed.
Lemma weaken_ty_defs_end: forall G1 G2 is Ds,
ok (G1 & G2) -> ty_defs G1 is Ds -> ty_defs (G1 & G2) is Ds.
Proof.
intros.
destruct weakening as [_ [_ [_ [_ [_ [_ [_ [_ [_ [_ [_ [_ W]]]]]]]]]]]].
rewrite <- (concat_empty_r (G1 & G2)).
apply (W (G1 & empty)); rewrite* concat_empty_r.
Qed.
Lemma weaken_ty_trm_middle: forall G1 G2 G3 t T,
ok (G1 & G2 & G3) -> ty_trm (G1 & G3) t T -> ty_trm (G1 & G2 & G3) t T.
Proof.
intros. apply* weakening.
Qed.
Lemma weaken_ty_def_middle: forall G1 G2 G3 d D,
ty_def (G1 & G3) d D -> ok (G1 & G2 & G3) -> ty_def (G1 & G2 & G3) d D.
Proof.
intros. apply* weakening.
Qed.
Lemma weaken_ty_defs_middle: forall G1 G2 G3 ds Ds,
ty_defs (G1 & G3) ds Ds -> ok (G1 & G2 & G3) -> ty_defs (G1 & G2 & G3) ds Ds.
Proof.
intros. apply* weakening.
Qed.
(* ###################################################################### *)
(** ** Inversion lemmas needed by substitution *)
Lemma invert_wf_ctx: forall m G x T,
wf_ctx m G ->
binds x T G ->
wf_typ m deep G T.
Proof.
introv Wf. gen x T. induction Wf.
- introv Bi. false (binds_empty_inv Bi).
- introv Bi. apply binds_push_inv in Bi. destruct Bi as [[Eq1 Eq2]|[Ne Bi]].
* subst. apply* weaken_wf_typ_end.
* apply* weaken_wf_typ_end.
Qed.
Lemma invert_ty_var: forall G x T,
wf_ctx ip G ->
ty_trm G (trm_var (avar_f x)) T ->
exists T' n, subtyp ip G T' T n /\ binds x T' G.
Proof.
introv Wf Ty. gen Wf. gen_eq t: (trm_var (avar_f x)). gen x.
induction Ty; intros x' Eq Wf; try (solve [ discriminate ]).
+ inversions Eq. exists T 0.
refine (conj _ H).
refine (subtyp_refl _ _). apply (invert_wf_ctx Wf H).
+ subst. specialize (IHTy _ eq_refl Wf). destruct IHTy as [T' [n2 [St Bi]]].
exists T' (max n n2). split.
- apply subtyp_trans with T.
* apply (subtyp_max_ctx St). apply Max.le_max_r.
* apply (subtyp_max_ctx H). apply Max.le_max_l.
- exact Bi.
Qed.
Lemma invert_pth_ty: forall m G x T,
wf_ctx m G ->
pth_ty m G (pth_var (avar_f x)) T ->
exists T' n, subtyp m G T' T n /\ binds x T' G.
Proof.
introv Wf Ty. gen Wf. gen_eq p: (pth_var (avar_f x)). gen x.
induction Ty; intros x' Eq Wf.
+ inversions Eq. exists T 0.
refine (conj _ H).
refine (subtyp_refl _ _). apply (invert_wf_ctx Wf H).
+ subst. specialize (IHTy _ eq_refl Wf). destruct IHTy as [T' [n2 [St Bi]]].
exists T' (max n n2). split.
- apply subtyp_trans with T1.
* apply (subtyp_max_ctx St). apply Max.le_max_r.
* apply (subtyp_max_ctx H). apply Max.le_max_l.
- exact Bi.
Qed.
Lemma invert_concat_wf_ctx: forall m G1 G2,
wf_ctx m (G1 & G2) -> wf_ctx m G1.
Proof.
intros m G1 G2. gen G2. apply (env_ind (fun G2 => wf_ctx m (G1 & G2) -> wf_ctx m G1)).
- introv Wf. rewrite concat_empty_r in Wf. exact Wf.
- intros G2 x T IH Wf. rewrite concat_assoc in Wf. inversions Wf.
* false (empty_push_inv H1).
* apply eq_push_inv in H. destruct H as [Eq1 [Eq2 Eq3]]. subst. apply (IH H0).
Qed.
(* ###################################################################### *)
(** ** The substitution principle *)
(*
without dependent types:
G, x: S |- e: T G |- u: S
----------------------------------
G |- [u/x]e: T
with dependent types:
G1, x: S, G2 |- t: T G1 |- y: S
---------------------------------------
G1, [y/x]G2 |- [y/x]t: [y/x]T
Note that in general, u is a term, but for our purposes, it suffices to consider
the special case where u is a variable.
*)
Lemma subst_decs_hasnt: forall x y Ds l,
decs_hasnt Ds l ->
decs_hasnt (subst_decs x y Ds) l.
Proof.
introv H. induction H.
- unfold subst_decs. apply decs_hasnt_nil.
- unfold subst_decs. fold subst_decs. fold subst_dec.
apply decs_hasnt_cons.
* assumption.
* unfold subst_dec. destruct D; simpl in *; assumption.
Qed.
Lemma subst_label_of_dec: forall x y D,
label_of_dec D = label_of_dec (subst_dec x y D).
Proof.
intros. destruct D; simpl; reflexivity.
Qed.
Lemma subst_decs_hasnt_label_of_dec: forall x y Ds D,
decs_hasnt Ds (label_of_dec D) ->
decs_hasnt (subst_decs x y Ds) (label_of_dec (subst_dec x y D)).
Proof.
intros. rewrite <- (subst_label_of_dec x y). apply* subst_decs_hasnt.
Qed.
Hint Resolve subst_decs_hasnt_label_of_dec.
Lemma subst_decs_has: forall x y Ds D,
decs_has Ds D ->
decs_has (subst_decs x y Ds) (subst_dec x y D).
Proof.
introv Has. induction Has.
+ unfold subst_decs. fold subst_decs. fold subst_dec.
apply decs_has_hit.
apply (@subst_decs_hasnt x y) in H.
unfold subst_dec. destruct D; simpl in *; assumption.
+ unfold subst_decs. fold subst_decs. fold subst_dec.
apply (decs_has_skip _ IHHas).
unfold subst_dec. destruct D2; destruct D1; simpl in *; assumption.
Qed.
Lemma subst_bdecs_has: forall x y Ds D,
bdecs_has Ds D ->
bdecs_has (subst_bdecs x y Ds) (subst_dec x y D).
Proof.
intros. inversions H; simpl; try constructor. apply* subst_decs_has.
Qed.
Lemma subst_binds: forall x y v T G,
binds v T G ->
binds v (subst_typ x y T) (subst_ctx x y G).
Proof.
introv Bi. unfold subst_ctx. apply binds_map. exact Bi.
Qed.
(*
Inductive tyvar: pmode -> ctx -> var -> typ -> Prop :=
| tyvar_pr: forall G x T,
binds x T G ->
tyvar pr G x T
| tyvar_ip: forall G x T,
ty_trm G (trm_var (avar_f x)) T ->
tyvar ip G x T.
*)
Lemma has_ty_empty:
(forall m G p D, pth_has m G p D ->
G = empty -> forall x, p = pth_var (avar_f x) -> False) /\
(forall m G p T, pth_ty m G p T ->
G = empty -> forall x, p = pth_var (avar_f x) -> False).
Proof.
apply pth_has_ty_mutind; intros; subst; try discriminate;
try match goal with
| H: binds _ _ empty |- _ => false (binds_empty_inv H)
end;
eauto.
Qed.
Lemma has_empty: forall m x D, pth_has m empty (pth_var (avar_f x)) D -> False.
Proof.
intros. destruct has_ty_empty as [P _]. apply* P.
Qed.
Lemma ty_var_empty: forall m x T, pth_ty m empty (pth_var (avar_f x)) T -> False.
Proof.
intros. destruct has_ty_empty as [_ P]. apply* P.
Qed.
Lemma wf_in_empty_has_no_fv:
(forall m1 m2 G T , wf_typ m1 m2 G T -> G = empty -> m2 = deep -> fv_typ T = \{})
/\ (forall m G D , wf_dec m G D -> G = empty -> fv_dec D = \{})
/\ (forall m G Ds, wf_decs m G Ds -> G = empty -> fv_decs Ds = \{}).
Proof.
apply wf_mutind; intros; subst; try discriminate; simpl;
try match goal with
| H: pth_has _ empty _ _ |- _ => false (has_empty H)
| H: ty_trm empty (trm_var (avar_f _)) _ |- _ => false (ty_var_empty H)
end;
repeat match goal with
| H: ?x = ?x -> _ |- _ => specialize (H eq_refl)
end;
repeat match goal with
| H: _ = \{} |- _ => rewrite H
end;
try rewrite union_same; reflexivity.
Qed.
Lemma fv_subset_dom_ctx:
(forall m1 m2 G T , wf_typ m1 m2 G T -> m2 = deep -> (fv_typ T ) \c (dom G))
/\ (forall m G D , wf_dec m G D -> (fv_dec D ) \c (dom G))
/\ (forall m G Ds, wf_decs m G Ds -> (fv_decs Ds) \c (dom G))
/\ (forall m G t D, pth_has m G t D -> forall x, t = (pth_var (avar_f x)) -> x \in (dom G))
/\ (forall m G t T, pth_ty m G t T -> forall x, t = (pth_var (avar_f x)) -> x \in (dom G)).
Proof.
apply wf_has_ty_mutind; intros; subst; try discriminate; simpl;
try (solve [
try apply subset_empty_l;
try (unfold subset in *; intros);
repeat match goal with
| H: _ \in \{ _ } |- _ => rewrite in_singleton in H; subst
| H: _ \in _ \u _ |- _ => rewrite in_union in H
end;
auto_star]).
- auto.
- inversions H. unfold binds in b. apply (get_some_inv b).
- auto.
Qed.
Lemma notin_G_to_notin_fv_typ: forall m1 G T x,
wf_typ m1 deep G T ->
x # G ->
x \notin fv_typ T.
Proof.
introv WfT xG. subst.
destruct fv_subset_dom_ctx as [P _]. specialize (P m1 deep G T WfT eq_refl).
unfold subset in P. specialize (P x).
unfold notin. intro xT. specialize (P xT).
auto.
Qed.
Lemma fv_ctx_types_empty: fv_ctx_types empty = \{}.
Proof.
unfold fv_ctx_types. unfold fv_in_values.
rewrite values_def. rewrite empty_def.
rewrite LibList.map_nil. rewrite LibList.fold_right_nil.
reflexivity.
Qed.
Lemma fv_ctx_types_push: forall G x T,
fv_ctx_types (G & x ~ T) = (fv_ctx_types G) \u (fv_typ T).
Proof.
intros.
unfold fv_ctx_types. unfold fv_in_values.
rewrite values_def. rewrite <- cons_to_push.
rewrite LibList.map_cons. rewrite LibList.fold_right_cons.
simpl. rewrite union_comm. reflexivity.
Qed.
Lemma subset_union_l: forall (T: Type) (A B C: fset T),
A \c C -> A \c (B \u C).
Proof.
intros. unfold subset in *. intros. specialize (H x H0). rewrite in_union. auto.
Qed.
Lemma subset_union_r: forall (T: Type) (A B C: fset T),
A \c B -> A \c (B \u C).
Proof.
intros. unfold subset in *. intros. specialize (H x H0). rewrite in_union. auto.
Qed.
Lemma union_subset: forall (T: Type) (A B C: fset T),
A \c C /\ B \c C -> (A \u B) \c C.
Proof.
intros. unfold subset in *. intros. destruct H as [H1 H2].
rewrite in_union in H0. auto_star.
Qed.
Lemma fv_ctx_types_subset_dom_ctx: forall m1 G,
wf_ctx m1 G ->
(fv_ctx_types G) \c (dom G).
Proof.
introv Wf. gen_eq m2: deep. induction Wf; intro Eq; subst.
- unfold subset. intros. rewrite fv_ctx_types_empty in H.
exfalso. apply (in_empty_elim H).
- rewrite fv_ctx_types_push. rewrite dom_push.
apply subset_union_l. apply union_subset. split.
* apply (IHWf eq_refl).
* destruct fv_subset_dom_ctx as [P _]. apply* P.
Qed.
Lemma notin_G_to_notin_fv_ctx_types: forall m1 G x,
wf_ctx m1 G ->
x # G ->
x \notin fv_ctx_types G.
Proof.
introv Wf xG. lets P: (fv_ctx_types_subset_dom_ctx Wf). unfold subset in P.
unfold notin. intro H. specialize (P x H). auto.
Qed.
Lemma middle_notin: forall m1 G1 x S G2,
wf_ctx m1 (G1 & x ~ S & G2) ->
x # G1 /\
x \notin fv_ctx_types G1 /\
x \notin fv_typ S /\
x \notin dom G2.
Proof.
introv Wf. gen_eq G: (G1 & x ~ S & G2). gen_eq m2: deep. gen G1 x S G2.
induction Wf; introv Eqm Eq; subst m2.
- false (empty_middle_inv Eq).
- rename x0 into y. lets E: (env_case G2). destruct E as [Eq2 | [z [U [G3 Eq3]]]].
* subst. rewrite concat_empty_r in Eq.
apply eq_push_inv in Eq. destruct Eq as [Eq1 [Eq2 Eq3]]. subst y S G1.
clear IHWf.
repeat split; auto.
+ apply (notin_G_to_notin_fv_ctx_types Wf H0).
+ apply (notin_G_to_notin_fv_typ H H0).
* subst. rewrite concat_assoc in Eq. apply eq_push_inv in Eq.
destruct Eq as [Eq1 [Eq2 Eq3]]. subst z U G.
specialize (IHWf _ _ _ _ eq_refl eq_refl). auto_star.
Qed.
Print Assumptions middle_notin.
Definition not_binds_and_notin := binds_fresh_inv.
Lemma ok_concat_binds_left_to_notin_right: forall (A: Type) y (S: A) G1 G2,
ok (G1 & G2) -> binds y S G1 -> y # G2.
Proof.
intros A y S G1. apply (env_ind (fun G2 => ok (G1 & G2) -> binds y S G1 -> y # G2)).
- introv Ok Bi. auto.
- introv IH Ok Bi. rename E into G2. rewrite concat_assoc in Ok.
apply ok_push_inv in Ok. destruct Ok as [Ok N].
rewrite dom_push.
assert (xG2: x # G2) by auto.
assert (y <> x). {
intro Eq. subst.
assert (xG1: x # G1) by auto.
apply (not_binds_and_notin Bi xG1).
}
auto.
Qed.
Lemma concat_subst_ctx: forall x y G1 G2,
subst_ctx x y G1 & subst_ctx x y G2 = subst_ctx x y (G1 & G2).
Proof.
intros. unfold subst_ctx. rewrite map_concat. reflexivity.
Qed.
Lemma subst_avar_preserves_clo: forall x y,
forall p, (forall z, open_avar z p = p) ->
(forall z, open_avar z (subst_avar x y p) = (subst_avar x y p)).
Proof.
intros. specialize (H z). destruct p.
- simpl in *. case_if. reflexivity.
- simpl. unfold open_avar, open_rec_avar. case_if; reflexivity.
Qed.
Lemma subst_pth_preserves_clo: forall x y,
forall p, (forall z, open_pth z p = p) ->
(forall z, open_pth z (subst_pth x y p) = (subst_pth x y p)).
Proof.
intros. destruct p. simpl. f_equal.
apply subst_avar_preserves_clo.
intros. specialize (H z0). unfold open_pth, open_rec_pth in H. inversion H.
rewrite H1. apply H1.
Qed.
Lemma subst_clo: forall x y,
(forall T , (forall z, open_typ z T = T) ->
(forall z, open_typ z (subst_typ x y T ) = (subst_typ x y T )))
/\ (forall D , (forall z, open_dec z D = D) ->
(forall z, open_dec z (subst_dec x y D ) = (subst_dec x y D )))
/\ (forall Ds, (forall z, open_decs z Ds = Ds) ->
(forall z, open_decs z (subst_decs x y Ds) = (subst_decs x y Ds))).
Proof.
intros.
apply typ_mutind; intros; simpl; try reflexivity;
((unfold open_typ, open_rec_typ; fold open_rec_decs) ||
(unfold open_dec, open_rec_dec; fold open_rec_typ) ||
(unfold open_decs, open_rec_decs; fold open_rec_decs; fold open_rec_dec));
f_equal;
try (lets P: (@subst_pth_preserves_clo x y p));
match goal with
| H: _ |- _ => apply H
end;
intros z0;
match goal with
| H: forall _, _ = _ |- _ => specialize (H z0)
end;
match goal with
| E: _ = _ |- _ => inversion E
end;
match goal with
| E: _ = _ |- _ => solve [rewrite E; assumption]
end.
Qed.
Definition subst_dec_preserves_clo(x y: var) := proj1 (proj2 (subst_clo x y)).
Lemma subst_ctx_preserves_notin: forall x y z G,
z # G -> z # (subst_ctx x y G).
Proof.
introv N. unfold subst_ctx. rewrite dom_map. assumption.
Qed.
Lemma subst_cbounds: forall x y,
(forall T , cbounds_typ T -> cbounds_typ (subst_typ x y T ))
/\ (forall D , cbounds_dec D -> cbounds_dec (subst_dec x y D ))
/\ (forall Ds, cbounds_decs Ds -> cbounds_decs (subst_decs x y Ds)).
Proof.
intros.
apply typ_mutind; intros; simpl; try match goal with
| H: cbounds_typ (typ_bind _) |- _ => inversions H
| H: cbounds_dec (dec_typ _ _ _) |- _ => inversions H
| H: cbounds_dec (dec_fld _ _ ) |- _ => inversions H
| H: cbounds_dec (dec_mtd _ _ _) |- _ => inversions H
| H: cbounds_decs (decs_cons _ _) |- _ => inversions H
end;
constructor*.
Qed.
Definition subst_decs_preserves_cbounds(x y: var) := proj2 (proj2 (subst_cbounds x y)).
Axiom okadmit: forall G: ctx, ok G.
(*
Lemma align_envs_2: forall T (E1 E2 F1 F2: env T) x S,
E1 & E2 = F1 & x ~ S & F2 ->
(exists G, F1 = E1 & G /\ E2 = G & x ~ S & F2)
\/ (exists G, F2 = G & E2 /\ E1 = F1 & x ~ S & G).
Admitted.
*)
Lemma subst_ctx_push: forall G x y z T,
subst_ctx x y (G & z ~ T) = (subst_ctx x y G) & (z ~ subst_typ x y T).
Proof.
intros. unfold subst_ctx. rewrite map_push. reflexivity.
Qed.
(* Why (n+1)? Because what counts is not the growth, but the max size. The only reason we
measure the growth is that it's easier to measure. *)
Lemma subst_principles: forall y S,
(forall m1 m2 G T, wf_typ m1 m2 G T -> forall G1 G2 x,
G = (G1 & (x ~ S) & G2) ->
pth_ty m1 G1 (pth_var (avar_f y)) S ->
wf_ctx m1 (G1 & x ~ S & G2) ->
wf_typ m1 m2 (G1 & (subst_ctx x y G2)) (subst_typ x y T))
/\ (forall m G D, wf_dec m G D -> forall G1 G2 x,
G = (G1 & (x ~ S) & G2) ->
pth_ty m G1 (pth_var (avar_f y)) S ->
wf_ctx m (G1 & x ~ S & G2) ->
wf_dec m (G1 & (subst_ctx x y G2)) (subst_dec x y D))
/\ (forall m G Ds, wf_decs m G Ds -> forall G1 G2 x,
G = (G1 & (x ~ S) & G2) ->
pth_ty m G1 (pth_var (avar_f y)) S ->
wf_ctx m (G1 & x ~ S & G2) ->
wf_decs m (G1 & (subst_ctx x y G2)) (subst_decs x y Ds))
/\ (forall m G T Ds, exp m G T Ds -> forall G1 G2 x,
G = G1 & x ~ S & G2 ->
pth_ty m G1 (pth_var (avar_f y)) S ->
wf_ctx m (G1 & x ~ S & G2) ->
exp m (G1 & (subst_ctx x y G2)) (subst_typ x y T) (subst_bdecs x y Ds))
/\ (forall m G t D, pth_has m G t D -> forall G1 G2 x,
G = (G1 & (x ~ S) & G2) ->
pth_ty m G1 (pth_var (avar_f y)) S ->
wf_ctx m (G1 & x ~ S & G2) ->
pth_has m (G1 & (subst_ctx x y G2)) (subst_pth x y t) (subst_dec x y D))
/\ (forall m G p T, pth_ty m G p T -> forall G1 G2 x,
G = (G1 & (x ~ S) & G2) ->
pth_ty m G1 (pth_var (avar_f y)) S ->
wf_ctx m (G1 & x ~ S & G2) ->
pth_ty m (G1 & (subst_ctx x y G2)) (subst_pth x y p) (subst_typ x y T))
/\ (forall G t D, trm_has G t D -> forall G1 G2 x,
G = (G1 & (x ~ S) & G2) ->
pth_ty ip G1 (pth_var (avar_f y)) S ->
wf_ctx ip (G1 & x ~ S & G2) ->
trm_has (G1 & (subst_ctx x y G2)) (subst_trm x y t) (subst_dec x y D))
/\ (forall m G T U n, subtyp m G T U n -> forall G1 G2 x,
G = (G1 & (x ~ S) & G2) ->
pth_ty m G1 (pth_var (avar_f y)) S ->
wf_ctx m (G1 & x ~ S & G2) ->
subtyp m (G1 & (subst_ctx x y G2)) (subst_typ x y T) (subst_typ x y U) (n+1))
/\ (forall m G D1 D2 n, subdec m G D1 D2 n -> forall G1 G2 x,
G = (G1 & (x ~ S) & G2) ->
pth_ty m G1 (pth_var (avar_f y)) S ->
wf_ctx m (G1 & x ~ S & G2) ->
subdec m (G1 & (subst_ctx x y G2)) (subst_dec x y D1) (subst_dec x y D2) (n+1))
/\ (forall m G Ds1 Ds2 n, subdecs m G Ds1 Ds2 n -> forall G1 G2 x,
G = (G1 & (x ~ S) & G2) ->
pth_ty m G1 (pth_var (avar_f y)) S ->
wf_ctx m (G1 & x ~ S & G2) ->
subdecs m (G1 & (subst_ctx x y G2)) (subst_decs x y Ds1) (subst_decs x y Ds2) (n+1))
/\ (forall G t T, ty_trm G t T -> forall G1 G2 x,
G = (G1 & (x ~ S) & G2) ->
pth_ty ip G1 (pth_var (avar_f y)) S ->
wf_ctx ip (G1 & x ~ S & G2) ->
ty_trm (G1 & (subst_ctx x y G2)) (subst_trm x y t) (subst_typ x y T))
/\ (forall G d D, ty_def G d D -> forall G1 G2 x,
G = (G1 & (x ~ S) & G2) ->
pth_ty ip G1 (pth_var (avar_f y)) S ->
wf_ctx ip (G1 & x ~ S & G2) ->
ty_def (G1 & (subst_ctx x y G2)) (subst_def x y d) (subst_dec x y D))
/\ (forall G ds Ds, ty_defs G ds Ds -> forall G1 G2 x,
G = (G1 & (x ~ S) & G2) ->
pth_ty ip G1 (pth_var (avar_f y)) S ->
wf_ctx ip (G1 & x ~ S & G2) ->
ty_defs (G1 & (subst_ctx x y G2)) (subst_defs x y ds) (subst_decs x y Ds)).
Proof.
intros y S. apply all_mutind.
+ (* case wf_top *)
intros. subst. apply* wf_top.
+ (* case wf_bot *)
intros. subst. apply* wf_bot.
+ (* case wf_bind_deep *)
intros. subst. apply* wf_bind_deep.
+ (* case wf_bind_shallow *)
intros. subst. apply* wf_bind_shallow.
+ (* case wf_sel1 *)
introv Has IHHas WfLo IHWfLo WfHi IHWfHi Eq Tyy Ok.
rename x into z, x0 into x. subst. simpl.
specialize (IHHas G1 G2 x eq_refl Tyy Ok).
specialize (IHWfLo G1 G2 x eq_refl Tyy Ok).
specialize (IHWfHi G1 G2 x eq_refl Tyy Ok).
simpl in IHHas.
case_if.
- (* case z = x *)
apply wf_sel1 with (subst_typ x y Lo) (subst_typ x y Hi); auto.
- (* case z <> x *)
apply wf_sel1 with (subst_typ x y Lo) (subst_typ x y Hi); auto.
+ (* case wf_sel2 *)
introv Has IHHas WfU IHWfU Eq Tyy Ok.
rename x into z, x0 into x. subst. simpl.
specialize (IHHas G1 G2 x eq_refl Tyy Ok).
specialize (IHWfU G1 G2 x eq_refl Tyy Ok).
simpl in IHHas.
case_if.
- (* case z = x *)
apply wf_sel2 with (subst_typ x y U); auto.
- (* case z <> x *)
apply wf_sel2 with (subst_typ x y U); auto.
+ (* case wf_tmem *)
intros. subst. apply* wf_tmem.
+ (* case wf_fld *)
intros. subst. apply* wf_fld.
+ (* case wf_mtd *)
intros. subst. apply* wf_mtd.
+ (* case wf_nil *)
intros. subst. apply* wf_nil.
+ (* case wf_cons *)
intros. subst. apply* wf_cons. (* uses subst_decs_hasnt_label_of_dec *)
+ (* case exp_top *)
intros. simpl. apply exp_top.
+ (* case exp_bot *)
intros. simpl. apply exp_bot.
+ (* case exp_bind *)
intros. simpl. apply* exp_bind.
+ (* case exp_sel *)
intros m G v L Lo Hi Ds Has IHHas Exp IHExp G1 G2 x EqG Tyy Ok. subst.
specialize (IHHas _ _ _ eq_refl Tyy Ok).
specialize (IHExp _ _ _ eq_refl Tyy Ok).
unfold subst_typ. unfold subst_pth. unfold subst_avar. case_if.
- simpl in IHHas. case_if.
apply (exp_sel IHHas IHExp).
- simpl in IHHas. case_if.
apply (exp_sel IHHas IHExp).
+ (* case pth_has_rule *)
introv Ty IHTy Exp IHExp DsHas Eq Tyy Wf. subst.
specialize (IHTy _ _ _ eq_refl Tyy Wf).
specialize (IHExp _ _ _ eq_refl Tyy Wf).
apply* pth_has_rule.
apply (subst_bdecs_has x y DsHas).
+ (* case pth_ty_var *)
introv Bi Eq Tyy WfG. subst. rename x into z, x0 into x.
lets Ok: (wf_ctx_to_ok WfG).
destruct (middle_notin WfG) as [xG1 [N2 [N3 N4]]]. clear xG1 N4.
unfold subst_pth, subst_avar. case_var.
- (* case z = x *)
assert (EqST: T = S) by apply (binds_middle_eq_inv Bi Ok). subst.
lets WfG1: (invert_concat_wf_ctx WfG). apply invert_concat_wf_ctx in WfG1.
lets P: (invert_pth_ty WfG1 Tyy).
destruct P as [S' [n [St Biy']]].
lets yG2: (ok_concat_binds_left_to_notin_right (ok_remove Ok) Biy').
apply (@subst_ctx_preserves_notin x y y G2) in yG2.
rewrite (@subst_fresh_typ x y S N3).
apply weaken_pth_ty_end; [unfold subst_ctx; auto | idtac].
destruct m.
* (* case precise *)
inversions Tyy. apply pth_ty_var. assumption.
* (* case imprecise *)
refine (pth_ty_sbsm _ St). apply pth_ty_var. assumption.
- (* case z <> x *)
apply pth_ty_var.
* rewrite <- (subst_fresh_ctx y G1 N2).
rewrite -> (concat_subst_ctx _ _).
lets Bi': (binds_subst Bi C).
apply (subst_binds _ _ Bi').
+ (* case pth_ty_sbsm *)
introv Ty IHTy St IHSt Eq Tyy Wf. subst.
apply pth_ty_sbsm with (subst_typ x y T1) (n+1).
- apply* IHTy.
- apply* IHSt.
+ (* case has_trm *)
introv Ty IHTy Exp IHExp DsHas Clo WfD IHWf Eq Tyy. introv Wf. subst.
specialize (IHTy _ _ _ eq_refl Tyy Wf).
apply has_trm with (subst_typ x y T) (subst_bdecs x y Ds).
- exact IHTy.
- apply* IHExp.
- apply* subst_bdecs_has.
- apply (subst_dec_preserves_clo _ _ Clo).
- apply* IHWf.
+ (* case has_var *)
introv Ty IHTy Exp IHExp DsHas WfD IHWf Eq Tyy. introv Wf. subst.
specialize (IHTy _ _ _ eq_refl Tyy Wf). simpl in *. case_if.
- (* case z = x *)
(* rewrite (subst_open_commute_dec x y x D). unfold subst_fvar. case_if. BIND *)
apply has_var with (subst_typ x y T) (subst_bdecs x y Ds).
* exact IHTy.
* apply* IHExp.
* apply (subst_bdecs_has x y DsHas).
* apply* IHWf.
- (* case z <> x *)
(* rewrite (subst_open_commute_dec x y z D). unfold subst_fvar. case_if. BIND *)
apply has_var with (subst_typ x y T) (subst_bdecs x y Ds).
* exact IHTy.
* apply* IHExp.
* apply (subst_bdecs_has x y DsHas).
* apply* IHWf.
+ (* case subtyp_refl *)
intros. subst. apply* subtyp_refl.
+ (* case subtyp_top *)
intros. simpl. apply* subtyp_top.
+ (* case subtyp_bot *)
intros. simpl. apply* subtyp_bot.
+ (* case subtyp_bind *)
intros m G Ds1 Ds2 n Sds IH G1 G2 x EqG Bi Ok. subst.
apply subtyp_bind. fold subst_decs.
specialize (IH G1 G2 x).
specialize (IH eq_refl Bi).
apply (IH Ok).
(*
+ (* case subtyp_bind *)
intros L m G Ds1 Ds2 n Sds IH G1 G2 x EqG Bi Ok. subst.
apply_fresh subtyp_bind as z. fold subst_decs.
assert (zL: z \notin L) by auto.
specialize (IH z zL G1 (G2 & z ~ typ_bind Ds1) x).
rewrite concat_assoc in IH.
specialize (IH eq_refl Bi).
unfold subst_ctx in IH. rewrite map_push in IH. simpl in IH.
rewrite concat_assoc in IH.
rewrite (subst_open_commute_decs x y z Ds1) in IH.
rewrite (subst_open_commute_decs x y z Ds2) in IH.
unfold subst_fvar in IH.
assert (x <> z) by auto. case_if.
unfold subst_ctx. apply IH. apply okadmit.
*)
+ (* case subtyp_sel_l *)
intros m G v L Lo Hi n Has IHHas St1 IHSt1 G1 G2 x EqG Tyy Wf. subst.
specialize (IHSt1 _ _ _ eq_refl Tyy Wf).
specialize (IHHas _ _ _ eq_refl Tyy Wf).
simpl in *.
case_if; apply (subtyp_sel_l IHHas IHSt1).
+ (* case subtyp_sel_r *)
intros m G v L Lo Hi n Has IHHas St1 IHSt1 G1 G2 x EqG Bi Ok. subst.
specialize (IHSt1 _ _ _ eq_refl Bi Ok).
specialize (IHHas _ _ _ eq_refl Bi Ok).
simpl in *.
case_if; apply (subtyp_sel_r IHHas IHSt1).
+ (* case subtyp_trans *)
intros m G T1 T2 T3 St12 IH12 St23 IH23 G1 G2 x Eqm EqG Bi Ok. subst.
apply* subtyp_trans.
+ (* case subdec_typ *)
intros. apply* subdec_typ.
+ (* case subdec_fld *)
intros. apply* subdec_fld.
+ (* case subdec_mtd *)
intros. apply* subdec_mtd.
+ (* case subdecs_empty *)
intros. apply* subdecs_empty.
+ (* case subdecs_push *)
introv Ds1Has Sd IHSd Sds IHSds Ds2Hasnt Eq2 Tyy Ok. subst.
specialize (IHSd _ _ _ eq_refl Tyy Ok).
specialize (IHSds _ _ _ eq_refl Tyy Ok).
apply (subst_decs_has x y) in Ds1Has.
apply subdecs_push with (subst_dec x y D1); fold subst_dec; fold subst_decs; auto.
(*
+ (* case subdecs_refl *)
intros. apply* subdecs_refl.
*)
+ (* case ty_var *)
introv Bi WfT IHWf Eq Tyy WfG. subst. rename x into z, x0 into x.
lets Ok: (wf_ctx_to_ok WfG).
destruct (middle_notin WfG) as [xG1 [N2 [N3 N4]]]. clear xG1 N4.
unfold subst_trm, subst_avar. case_var.
- (* case z = x *)
assert (EqST: T = S) by apply (binds_middle_eq_inv Bi Ok). subst.
lets WfG1: (invert_concat_wf_ctx WfG). apply invert_concat_wf_ctx in WfG1.
lets Ty: (invert_pth_ty WfG1 Tyy).
destruct Ty as [S' [n [St Biy]]].
lets yG2: (ok_concat_binds_left_to_notin_right (ok_remove Ok) Biy).
apply (@subst_ctx_preserves_notin x y y G2) in yG2.
apply weaken_ty_trm_end.
* unfold subst_ctx. auto.
* rewrite (@subst_fresh_typ x y S N3).
refine (ty_sbsm _ St). apply (ty_var Biy). apply (invert_wf_ctx WfG1 Biy).
- (* case z <> x *)
apply ty_var.
* rewrite <- (subst_fresh_ctx y G1 N2).
rewrite -> (concat_subst_ctx _ _).
lets Bi': (binds_subst Bi C).
apply (subst_binds _ _ Bi').
* apply* IHWf.
+ (* case ty_sel *)
intros G t l T Has IH G1 G2 x Eq Bi Ok. apply* ty_sel.
+ (* case ty_call *)
intros G t m U V u Has IHt Tyu IHu G1 G2 x Eq Bi Ok. apply* ty_call.
+ (* case ty_new *)
intros G ds Ds Tyds IHTyds Cb G1 G2 x Eq Bi Ok. subst G.
apply ty_new.
- fold subst_defs.
specialize (IHTyds G1 G2 x).
specialize (IHTyds eq_refl Bi).
unfold subst_ctx in IHTyds. unfold subst_ctx.
apply IHTyds. auto.
- apply (subst_decs_preserves_cbounds _ _ Cb).
(*+ case ty_new
intros L G ds Ds Tyds IHTyds Cb G1 G2 x Eq Bi Ok. subst G.
apply_fresh ty_new as z.
- fold subst_defs.
lets C: (@subst_open_commute_defs x y z ds).
unfolds open_defs. unfold subst_fvar in C. case_var.
rewrite <- C.
lets D: (@subst_open_commute_decs x y z Ds).
unfolds open_defs. unfold subst_fvar in D. case_var.
rewrite <- D.
rewrite <- concat_assoc.
assert (zL: z \notin L) by auto.
specialize (IHTyds z zL G1 (G2 & z ~ typ_bind Ds) x). rewrite concat_assoc in IHTyds.
specialize (IHTyds eq_refl Bi).
unfold subst_ctx in IHTyds. rewrite map_push in IHTyds. unfold subst_ctx.
apply IHTyds. auto.
- apply (subst_decs_preserves_cbounds _ _ Cb). *)
+ (* case ty_sbsm *)
intros G t T U n Ty IHTy St IHSt G1 G2 x Eq Bi Ok. subst.
apply ty_sbsm with (subst_typ x y T) (n+1).
- apply* IHTy.
- apply* IHSt.
+ (* case ty_typ *)
intros. simpl. apply* ty_typ.
+ (* case ty_fld *)
intros. apply* ty_fld.
+ (* case ty_mtd *)
introv WfU IHWfU WfT IHWfT Ty IH Eq Tyy WfG. subst. rename S0 into U.
apply_fresh ty_mtd as z.
- apply* IHWfU.
- apply* IHWfT.
- fold subst_trm. fold subst_typ.
lets C: (@subst_open_commute_trm x y z t).
unfolds open_trm. unfold subst_fvar in C. case_var.
rewrite <- C.
rewrite <- concat_assoc.
assert (zL: z \notin L) by auto.
specialize (IH z zL G1 (G2 & z ~ U) x). rewrite concat_assoc in IH.
specialize (IH eq_refl Tyy).
unfold subst_ctx in IH. rewrite map_push in IH. unfold subst_ctx.
apply IH. apply wf_ctx_push; auto.
+ (* case ty_dsnil *)
intros. apply ty_dsnil.
+ (* case ty_dscons *)
intros.
apply* ty_dscons.
Qed.
Print Assumptions subst_principles.
Lemma trm_subst_principle: forall G x y t S T,
wf_ctx ip (G & x ~ S) ->
ty_trm (G & x ~ S) t T ->
pth_ty ip G (pth_var (avar_f y)) S ->
ty_trm G (subst_trm x y t) (subst_typ x y T).
Proof.
introv Wf tTy yTy.
destruct (subst_principles y S) as [_ [_ [_ [_ [_ [_ [_ [_ [_ [_ [P _]]]]]]]]]]].
specialize (P _ t T tTy G empty x).
unfold subst_ctx in P. rewrite map_empty in P.
repeat (progress (rewrite concat_empty_r in P)).
apply* P.
Qed.
Lemma pr_subdec_subst_principle: forall G x y S D1 D2 n,
wf_ctx pr (G & x ~ S) ->
subdec pr (G & x ~ S) D1 D2 n ->
binds y S G ->
subdec pr G (subst_dec x y D1) (subst_dec x y D2) (n+1).
Proof.
introv Wf Sd yTy.
destruct (subst_principles y S) as [_ [_ [_ [_ [_ [_ [_ [_ [P _]]]]]]]]].
specialize (P _ _ D1 D2 _ Sd G empty x).
unfold subst_ctx in P. rewrite map_empty in P.
repeat (progress (rewrite concat_empty_r in P)).
apply (@pth_ty_var pr) in yTy.
apply* P.
Qed.
Lemma pr_subdecs_subst_principle: forall G x y S Ds1 Ds2 n,
wf_ctx pr (G & x ~ S) ->
subdecs pr (G & x ~ S) Ds1 Ds2 n ->
binds y S G ->
subdecs pr G (subst_decs x y Ds1) (subst_decs x y Ds2) (n+1).
Proof.
introv Hok Sds yTy.
destruct (subst_principles y S) as [_ [_ [_ [_ [_ [_ [_ [_ [_ [P _]]]]]]]]]].
specialize (P _ _ Ds1 Ds2 _ Sds G empty x).
unfold subst_ctx in P. rewrite map_empty in P.
repeat (progress (rewrite concat_empty_r in P)).
apply (@pth_ty_var pr) in yTy.
apply* P.
Qed.
Lemma subdecs_subst_principle: forall G x y S Ds1 Ds2 n,
wf_ctx ip (G & x ~ S) ->
subdecs ip (G & x ~ S) Ds1 Ds2 n ->
pth_ty ip G (pth_var (avar_f y)) S ->
subdecs ip G (subst_decs x y Ds1) (subst_decs x y Ds2) (n+1).
Proof.
introv Hok Sds yTy.
destruct (subst_principles y S) as [_ [_ [_ [_ [_ [_ [_ [_ [_ [P _]]]]]]]]]].
specialize (P _ _ Ds1 Ds2 _ Sds G empty x).
unfold subst_ctx in P. rewrite map_empty in P.
repeat (progress (rewrite concat_empty_r in P)).
apply* P.
Qed.
(* ###################################################################### *)
(** ** More inversion lemmas *)
Lemma invert_var_has_dec: forall G x D,
trm_has G (trm_var (avar_f x)) D ->
exists T Ds D', ty_trm G (trm_var (avar_f x)) T /\
exp ip G T Ds /\
bdecs_has Ds D' /\
(* BIND open_dec x D'*) D' = D.
Proof.
introv Has. inversions Has.
(* case has_trm *)
+ subst. exists T Ds D. auto.
(* case has_var *)
+ exists T Ds D. auto.
Qed.
Lemma invert_has: forall G t D,
trm_has G t D ->
(exists T Ds, ty_trm G t T /\
exp ip G T Ds /\
bdecs_has Ds D /\
(forall z: var, open_dec z D = D))
\/ (exists x T Ds D', t = (trm_var (avar_f x)) /\
ty_trm G (trm_var (avar_f x)) T /\
exp ip G T Ds /\
bdecs_has Ds D' /\
(* BIND open_dec x D'*) D' = D).
Proof.
introv Has. inversions Has.
(* case has_trm *)
+ subst. left. exists T Ds. auto.
(* case has_var *)
+ right. exists v T Ds D. auto.
Qed.
Lemma invert_var_has_dec_typ: forall G x l S U,
trm_has G (trm_var (avar_f x)) (dec_typ l S U) ->
exists X Ds S' U', ty_trm G (trm_var (avar_f x)) X /\
exp ip G X Ds /\
bdecs_has Ds (dec_typ l S' U') /\
(* BIND open_typ x S' *) S' = S /\
(* BIND open_typ x U' *) U' = U.
Proof.
introv Has. apply invert_var_has_dec in Has.
destruct Has as [X [Ds [D [Tyx [Exp [Has Eq]]]]]].
destruct D as [ Lo Hi | T' | S' U' ]; try solve [ inversion Eq ].
unfold open_dec, open_rec_dec in Eq. fold open_rec_typ in Eq.
inversions Eq.
exists X Ds S U. auto.
Qed.
Lemma invert_var_has_dec_fld: forall G x l T,
trm_has G (trm_var (avar_f x)) (dec_fld l T) ->
exists X Ds T', ty_trm G (trm_var (avar_f x)) X /\
exp ip G X Ds /\
bdecs_has Ds (dec_fld l T') /\
(* BIND open_typ x T'*) T' = T.
Proof.
introv Has. apply invert_var_has_dec in Has.
destruct Has as [X [Ds [D [Tyx [Exp [Has Eq]]]]]].
destruct D as [ Lo Hi | T' | T1 T2 ]; try solve [ inversion Eq ].
unfold open_dec, open_rec_dec in Eq. fold open_rec_typ in Eq.
inversions Eq.
exists X Ds T. auto.
Qed.
Lemma invert_var_has_dec_mtd: forall G x l S U,
trm_has G (trm_var (avar_f x)) (dec_mtd l S U) ->
exists X Ds S' U', ty_trm G (trm_var (avar_f x)) X /\
exp ip G X Ds /\
bdecs_has Ds (dec_mtd l S' U') /\
(* BIND open_typ x S'*) S' = S /\
(* BIND open_typ x U'*) U' = U.
Proof.
introv Has. apply invert_var_has_dec in Has.
destruct Has as [X [Ds [D [Tyx [Exp [Has Eq]]]]]].
destruct D as [ Lo Hi | T' | S' U' ]; try solve [ inversion Eq ].
unfold open_dec, open_rec_dec in Eq. fold open_rec_typ in Eq.
inversions Eq.
exists X Ds S U. auto.
Qed.
Lemma invert_exp_sel: forall m G v L Ds,
exp m G (typ_sel (pth_var (avar_f v)) L) Ds ->
exists Lo Hi, pth_has m G (pth_var (avar_f v)) (dec_typ L Lo Hi) /\
exp m G Hi Ds.
Proof.
introv Exp. inversions Exp. exists Lo Hi. auto.
Qed.
Lemma invert_ty_sel: forall G t l T,
ty_trm G (trm_sel t l) T ->
exists T' n, subtyp ip G T' T n /\ trm_has G t (dec_fld l T').
Proof.
introv Ty. gen_eq t0: (trm_sel t l). gen t l.
induction Ty; intros t' l' Eq; try (solve [ discriminate ]).
+ inversions Eq. exists T 0. refine (conj _ H).
apply subtyp_refl. apply trm_has_regular in H. inversions H. assumption.
+ subst. rename t' into t, l' into l. specialize (IHTy _ _ eq_refl).
destruct IHTy as [T' [n' [St Has]]]. exists T' (max n n'). split.
- lets Hle1: (Max.le_max_l n n'). lets Hle2: (Max.le_max_r n n').
apply (subtyp_trans (subtyp_max_ctx St Hle2) (subtyp_max_ctx H Hle1)).
- exact Has.
Qed.
Lemma invert_ty_call: forall G t m V2 u,
ty_trm G (trm_call t m u) V2 ->
exists U V1 n, trm_has G t (dec_mtd m U V1)
/\ (subtyp ip G V1 V2 n)
/\ ty_trm G u U.
Proof.
introv Ty. gen_eq e: (trm_call t m u). gen t m u.
induction Ty; intros t0 m0 u0 Eq; try solve [ discriminate ]; symmetry in Eq.
+ (* case ty_call *)
inversions Eq. exists U V 0. apply (conj H). split.
- apply subtyp_refl. apply trm_has_regular in H. inversions H. assumption.
- auto.
+ (* case ty_sbsm *)
subst t. specialize (IHTy _ _ _ eq_refl).
rename t0 into t, m0 into m, u0 into u, U into V3, T into V2.
destruct IHTy as [U [V1 [n' [Has [St12 Tyu]]]]].
exists U V1.
exists (max n n'). refine (conj Has (conj _ Tyu)).
apply (subtyp_trans (subtyp_max_ctx St12 (Max.le_max_r n n'))
(subtyp_max_ctx H (Max.le_max_l n n'))).
Qed.
Lemma invert_ty_new: forall G ds T2,
ty_trm G (trm_new ds) T2 ->
exists n Ds, subtyp ip G (typ_bind Ds) T2 n /\
ty_defs G ds Ds /\
cbounds_decs Ds /\
wf_decs ip G Ds.
(*
exists L, (forall x, x \notin L ->
ty_defs (G & x ~ typ_bind Ds) (open_defs x ds) (open_decs x Ds)) /\
cbounds_decs Ds.
*)
Proof.
introv Ty. gen_eq t0: (trm_new ds). gen ds.
induction Ty; intros ds' Eq; try (solve [ discriminate ]); symmetry in Eq.
+ (* case ty_new *)
inversions Eq. exists 0 Ds.
lets Wf: (ty_defs_regular H). auto.
+ (* case ty_sbsm *)
subst. rename ds' into ds. specialize (IHTy _ eq_refl).
destruct IHTy as [n0 [Ds [St IHTy]]]. exists (max n n0) Ds.
refine (conj _ IHTy).
apply (subtyp_trans (subtyp_max_ctx St (Max.le_max_r n n0))
(subtyp_max_ctx H (Max.le_max_l n n0))).
Qed.
(*
Lemma invert_wf_sto_with_weakening: forall s G,
wf_sto s G ->
forall x ds Ds T,
binds x (object Ds ds) s ->
binds x T G
-> T = (typ_bind Ds)
/\ ty_defs G (open_defs x ds) (open_decs x Ds)
/\ cbounds_decs Ds.
Proof.
introv Wf Bs BG.
lets P: (invert_wf_sto Wf).
specialize (P x ds Ds T Bs BG).
destruct P as [EqT [G1 [G2 [EqG [Ty F]]]]]. subst.
apply (conj eq_refl).
lets Ok: (wf_sto_to_ok_G Wf).
split.
+ apply (weaken_ty_defs_end Ok Ty).
+ exact F.
Qed.
Lemma invert_wf_sto_with_sbsm: forall s G,
wf_sto s G ->
forall x ds Ds T,
binds x (object Ds ds) s ->
ty_trm G (trm_var (avar_f x)) T (* <- instead of binds *)
-> exists n, subtyp ip oktrans G (typ_bind Ds) T n
/\ ty_defs G (open_defs x ds) (open_decs x Ds)
/\ cbounds_decs Ds.
Proof.
introv Wf Bis Tyx.
apply invert_ty_var in Tyx. destruct Tyx as [T'' [St BiG]].
destruct (invert_wf_sto_with_weakening Wf Bis BiG) as [EqT [Tyds F]].
subst T''.
lets Ok: (wf_sto_to_ok_G Wf).
apply (conj St).
auto.
Qed.
*)
(* ------------------------------------------------------------------------- *)
(* ------------------------------------------------------------------------- *)
(* ------------------------------------------------------------------------- *)
(* subdecs_refl does not hold, because subdecs requires that for each dec in rhs
(including hidden ones), there is an unhidden one in lhs *)
(* or that there are no hidden decs in rhs *)
(* so we just added it as a rule! *)
Lemma decs_has_preserves_sub: forall m G Ds1 Ds2 D2 n,
decs_has Ds2 D2 ->
subdecs m G Ds1 Ds2 n ->
exists D1, decs_has Ds1 D1 /\ subdec m G D1 D2 n.
Proof.
introv Has Sds. induction Has.
+ inversions Sds. eauto.
(*
- eauto.
- exists D. repeat split.
* apply (decs_has_hit _ H).
* apply subdec_refl. inversions H0. assumption.
*)
+ inversion Sds; subst. eauto.
(*
- eauto.
- exists D1. repeat split.
* apply (decs_has_skip _ Has H).
* apply subdec_refl. admit. (* TODO wf-ness *)
*)
Qed.
Print Assumptions decs_has_preserves_sub.
Lemma bdecs_has_preserves_sub: forall m G Ds1 Ds2 D2,
wf_bdecs m G Ds2 ->
bdecs_has Ds2 D2 ->
subbdecs m G Ds1 Ds2 ->
exists n D1, bdecs_has Ds1 D1 /\ subdec m G D1 D2 n.
Proof.
introv Wf Ds2Has Sds. lets WfD: (bdecs_has_preserves_wf Ds2Has Wf).
inversions Sds.
- destruct D2 as [L Lo2 Hi2 | l T2 | mm U2 V2].
* exists 0 (dec_typ L typ_top typ_bot). inversions WfD. repeat split; eauto 10.
* exists 0 (dec_fld l typ_bot). inversions WfD. repeat split; eauto 10.
* exists 0 (dec_mtd mm typ_top typ_bot). inversions WfD. repeat split; eauto 10.
- exists 0 D2. auto.
- inversions Wf. inversions Ds2Has.
lets P: (decs_has_preserves_sub H1 H). destruct P as [D1 [DsHas Sd]].
exists n D1. auto.
Qed.
Lemma subdec_trans: forall m G D1 D2 D3 n,
subdec m G D1 D2 n -> subdec m G D2 D3 n -> subdec m G D1 D3 n.
Proof.
introv H12 H23. inversions H12; inversions H23; constructor;
solve [ assumption | (eapply subtyp_trans; eassumption)].
Qed.
Lemma subdecs_trans: forall m G Ds1 Ds2 Ds3 n,
subdecs m G Ds1 Ds2 n ->
subdecs m G Ds2 Ds3 n ->
subdecs m G Ds1 Ds3 n.
Proof.
introv Sds12 Sds23.
destruct (subdecs_regular Sds12) as [Wf1 Wf2].
destruct (subdecs_regular Sds23) as [_ Wf3].
induction Ds3.
+ inversions Sds23. apply (subdecs_empty _ Wf1).
+ rename d into D3.
apply invert_subdecs_push in Sds23.
destruct Sds23 as [D2 [Ds2Has [Sd23 [Sds23 Ds3Hasnt]]]].
lets Sds12': (invert_subdecs Sds12).
specialize (Sds12' _ Ds2Has).
destruct Sds12' as [D1 [Ds1Has Sd12]].
apply subdecs_push with D1.
- assumption.
- apply subdec_trans with D2; assumption.
- apply (IHDs3 Sds23). inversions Wf3. assumption.
- assumption.
Qed.
Lemma open_decs_nil: forall z, (open_decs z decs_nil) = decs_nil.
Proof.
intro z. reflexivity.
Qed.
(* a variation of exp_preserves_sub where the first expansion is not a hypothesis,
but a conclusion (doesn't work because what if T1 has no expansion?)
Lemma swap_sub_and_exp: forall m2 s G T1 T2 Ds2,
wf_sto s G ->
subtyp pr m2 G T1 T2 ->
exp pr G T2 Ds2 ->
exists L Ds1,
exp pr G T1 Ds1 /\
forall z, z \notin L ->
subdecs pr (G & z ~ typ_bind Ds1) (open_decs z Ds1) (open_decs z Ds2)
*)
(*
(* does not hold because T1 could be a permutation of T2 *)
Axiom subsub2eq: forall m1 m2 G T1 T2,
subtyp m1 m2 G T1 T2 ->
subtyp m1 m2 G T2 T1 ->
T1 = T2.
*)
Lemma subbdecs_trans: forall m G Ds1 Ds2 Ds3,
subbdecs m G Ds1 Ds2 -> subbdecs m G Ds2 Ds3 -> subbdecs m G Ds1 Ds3.
Proof.
introv Sds12 Sds23. inversions Sds12; inversions Sds23; eauto.
apply subbdecs_decs with (max n n0).
apply subdecs_trans with Ds4.
- apply (subdecs_max_ctx H). apply Max.le_max_l.
- apply (subdecs_max_ctx H3). apply Max.le_max_r.
Qed.
Inductive gbounds_typ: ctx -> typ -> Prop :=
| gbounds_top: forall G, gbounds_typ G typ_top
| gbounds_bot: forall G, gbounds_typ G typ_bot
| gbounds_bind: forall G Ds,
gbounds_decs G Ds ->
gbounds_typ G (typ_bind Ds)
| gbounds_sel: forall G x L,
gbounds_typ G (typ_sel (pth_var x) L) (* don't enter path types *)
with gbounds_dec: ctx -> dec -> Prop :=
| gbounds_tmem: forall G L Lo Hi n,
subtyp pr G Lo Hi n ->
gbounds_typ G Lo ->
gbounds_typ G Hi ->
gbounds_dec G (dec_typ L Lo Hi)
| gbounds_fld: forall G l T,
gbounds_typ G T ->
gbounds_dec G (dec_fld l T)
| gbounds_mtd: forall G m U V,
gbounds_typ G U ->
gbounds_typ G V ->
gbounds_dec G (dec_mtd m U V)
with gbounds_decs: ctx -> decs -> Prop :=
| gbounds_nil: forall G,
gbounds_decs G decs_nil
| gbounds_cons: forall G D Ds,
gbounds_dec G D ->
gbounds_decs G Ds ->
gbounds_decs G (decs_cons D Ds).
Hint Constructors gbounds_typ gbounds_dec gbounds_decs.
Lemma cbounds_to_gbounds:
(forall T , cbounds_typ T -> forall G, wf_typ pr deep G T -> gbounds_typ G T )
/\ (forall D , cbounds_dec D -> forall G, wf_dec pr G D -> gbounds_dec G D )
/\ (forall Ds, cbounds_decs Ds -> forall G, wf_decs pr G Ds -> gbounds_decs G Ds).
Proof.
apply cbounds_mutind; intros; auto;
try match goal with
| H: _ |- _ => solve [inversions H; eauto 10]
end.
Grab Existential Variables. apply 0. apply 0.
Qed.
Inductive simple_ctx: ctx -> Prop :=
| simple_ctx_empty: simple_ctx empty
| simple_ctx_push: forall G x Ds,
x # G ->
simple_ctx G ->
gbounds_decs G Ds ->
wf_decs pr G Ds ->
simple_ctx (G & x ~ (typ_bind Ds)).
Hint Constructors simple_ctx.
Lemma wf_sto_to_simple_ctx: forall s G,
wf_sto s G -> simple_ctx G.
Proof.
intros. induction H.
- auto.
- apply simple_ctx_push; auto. apply* cbounds_to_gbounds.
Qed.
Scheme gbounds_typ_mut := Induction for gbounds_typ Sort Prop
with gbounds_dec_mut := Induction for gbounds_dec Sort Prop
with gbounds_decs_mut := Induction for gbounds_decs Sort Prop.
Combined Scheme gbounds_mutind from gbounds_typ_mut, gbounds_dec_mut, gbounds_decs_mut.
Lemma weaken_gbounds:
(forall G T , gbounds_typ G T -> forall x U, ok (G & x ~ U) ->
gbounds_typ (G & x ~ U) T )
/\ (forall G D , gbounds_dec G D -> forall x U, ok (G & x ~ U) ->
gbounds_dec (G & x ~ U) D )
/\ (forall G Ds, gbounds_decs G Ds -> forall x U, ok (G & x ~ U) ->
gbounds_decs (G & x ~ U) Ds).
Proof.
apply gbounds_mutind; intros; eauto.
apply (weaken_subtyp_end H1) in s. eauto.
Qed.
Lemma invert_simple_ctx: forall G,
simple_ctx G ->
ok G /\
forall x T, binds x T G -> exists Ds,
T = typ_bind Ds /\
gbounds_decs G Ds /\
wf_decs pr G Ds.
Proof.
introv Sc. induction Sc.
- split.
* auto.
* introv Bi. false (binds_empty_inv Bi).
- assert (Ok': ok (G & x ~ typ_bind Ds)) by auto_star.
apply (conj Ok').
introv Bi. apply binds_push_inv in Bi. destruct Bi as [[Eq1 Eq2] | [Ne Bi]].
* subst. exists Ds. apply (conj eq_refl).
split.
+ apply* weaken_gbounds.
+ apply* weaken_wf_decs_end.
* destruct IHSc as [Ok F].
specialize (F x0 T Bi). destruct F as [Ds0 [Eq Gb]]. subst.
exists Ds0. apply (conj eq_refl). split.
+ apply* weaken_gbounds.
+ apply* weaken_wf_decs_end.
Qed.
Lemma invert_gbounds_decs: forall G Ds D,
gbounds_decs G Ds ->
decs_has Ds D ->
gbounds_dec G D.
Proof.
intros G Ds. induction Ds; introv Gb DsHas; inversions Gb; inversions DsHas; eauto.
Qed.
Lemma has_good_bounds: forall G x L Lo Hi,
simple_ctx G ->
pth_has pr G (pth_var (avar_f x)) (dec_typ L Lo Hi) ->
exists n, subtyp pr G Lo Hi n.
Proof.
introv Sc Has.
destruct (invert_simple_ctx Sc) as [Ok F].
inversion Has as [A1 A2 A3 A4 T Ds' Bi Exp DsHas]. subst.
inversions Bi. rename H2 into Bi.
specialize (F _ _ Bi). destruct F as [Ds [Eq [Gb _]]]. subst.
inversions Exp.
inversions DsHas.
lets P: (invert_gbounds_decs Gb H0). inversions P. eauto.
Qed.
Lemma exp_preserves_wf: forall G T Ds,
exp pr G T Ds ->
wf_typ pr deep G T ->
simple_ctx G ->
wf_bdecs pr G Ds.
Proof.
introv Exp. gen_eq m: pr. induction Exp; introv Eq Wf Sc; subst.
- auto.
- auto.
- inversions Wf. auto.
- inversions Wf.
+ (* case wf_sel1: simple, because everything we need is packed in Wf *)
lets Eq: (has_unique H H2). simpl in Eq. specialize (Eq eq_refl). inversions Eq.
auto.
+ (* case wf_sel2: Wf only contains shallow wf-ness of U, but we need deep,
so we have to get it out if simple_ctx G. *)
lets Eq: (has_unique H H3). simpl in Eq. specialize (Eq eq_refl). inversions Eq.
apply* IHExp.
lets P: (has_good_bounds Sc H). destruct P as [n St].
destruct (subtyp_regular St) as [_ WfU]. exact WfU.
Qed.
Lemma exp_preserves_sub_pr: forall G T1 T2 Ds1 Ds2 n1,
subtyp pr G T1 T2 n1 ->
simple_ctx G ->
exp pr G T1 Ds1 ->
exp pr G T2 Ds2 ->
subbdecs pr G Ds1 Ds2.
Proof.
introv St. gen_eq m: pr. gen Ds1 Ds2. gen m G T1 T2 n1 St.
apply (subtyp_ind (fun m G T1 T2 n => forall Ds1 Ds2,
m = pr ->
simple_ctx G ->
exp m G T1 Ds1 ->
exp m G T2 Ds2 ->
subbdecs m G Ds1 Ds2)).
+ (* case subtyp_refl *)
introv n Wf Eq Sc Exp1 Exp2. subst.
lets Eq: (exp_unique Exp1 Exp2). subst. destruct Ds2.
- apply subbdecs_bot.
- apply subbdecs_refl.
+ (* case subtyp_top *)
introv n Wf Eq Sc Exp1 Exp2. subst. inversions Exp2. destruct Ds1 as [|Ds1].
- apply subbdecs_bot.
- refine (subbdecs_decs (subdecs_empty _ _)).
lets P: (exp_preserves_wf Exp1 Wf Sc). inversions P. assumption.
+ (* case subtyp_bot *)
introv n Wf Eq Sc Exp1 Exp2. subst. inversions Exp1. apply subbdecs_bot.
+ (* case subtyp_bind *)
introv Sds Eq1 Sc Exp1 Exp2. subst. inversions Exp1. inversions Exp2.
apply (subbdecs_decs Sds).
+ (* case subtyp_sel_l *)
introv Has2 St1 IHSt1 Eq1 Sc Exp1 Exp2. subst.
apply invert_exp_sel in Exp1. destruct Exp1 as [Lo1 [Hi1 [Has1 Exp1]]].
lets Eq: (has_unique Has2 Has1).
simpl in Eq. specialize (Eq eq_refl). inversions Eq.
lets Eq: (exp_unique Exp1 Exp2). subst.
destruct Ds2.
- apply subbdecs_bot.
- apply subbdecs_refl.
+ (* case subtyp_sel_r *)
introv Has St1 IHSt1 Eq1 Sc Exp1 Exp2. subst.
(* note: here it's crucial that subtyp_sel_r has Lo<:Hi as a premise *)
apply invert_exp_sel in Exp2. destruct Exp2 as [Lo' [Hi' [Has' Exp2]]].
lets Eq: (has_unique Has' Has).
simpl in Eq. specialize (Eq eq_refl). inversions Eq. clear Has'.
destruct (subtyp_regular St1) as [WfLo _].
lets ExpLo: (exp_total WfLo).
destruct ExpLo as [DsLo ExpLo].
specialize (IHSt1 DsLo Ds2 eq_refl Sc ExpLo Exp2).
lets Eq: (exp_unique ExpLo Exp1). subst. exact IHSt1.
+ (* case subtyp_trans *)
introv St12 IH12 St23 IH23 Eq1 Sc Exp1 Exp3. subst. rename Ds2 into Ds3.
destruct (subtyp_regular St23) as [Wf2 _].
lets Exp2: (exp_total Wf2).
destruct Exp2 as [Ds2 Exp2].
apply subbdecs_trans with Ds2; auto.
(* no idea where this var comes from, but here's a trick: *)
Grab Existential Variables. apply 7.
Qed.
Print Assumptions exp_preserves_sub_pr.
(*
Lemma exp_preserves_sub_pr: forall G T1 T2 Ds1 Ds2 n1,
subtyp pr oktrans G T1 T2 n1 ->
exp pr G T1 Ds1 ->
exp pr G T2 Ds2 ->
(* BIND *)
exists L n2, forall z, z \notin L ->
subdecs pr (G & z ~ typ_bind Ds1) (open_decs z Ds1) (open_decs z Ds2) n2.
Proof.
introv St Exp1 Exp2.
destruct (putting_it_together (ctx_size G) n1) as [E _].
unfold egr_exp_preserves_sub_pr in E.
specialize (E oktrans _ _ _ _ _ _ St eq_refl eq_refl Exp1 Exp2).
destruct E as [L Sds]. exists L (pred n1).
exact Sds.
Qed.
*)
(*
Axiom wf_typ_admit: forall m1 m2 G T, wf_typ m1 m2 G T.
Axiom wf_dec_admit: forall m1 m2 G D, wf_dec m1 m2 G D.
Axiom wf_decs_admit: forall m1 m2 G Ds, wf_decs m1 m2 G Ds.
*)
Lemma ip2pr:
(forall m1 m2 G T, wf_typ m1 m2 G T ->
m1 = ip ->
simple_ctx G ->
wf_typ pr m2 G T)
/\ (forall m G D, wf_dec m G D ->
m = ip ->
simple_ctx G ->
wf_dec pr G D)
/\ (forall m G Ds, wf_decs m G Ds ->
m = ip ->
simple_ctx G ->
wf_decs pr G Ds)
/\ (forall m G T Ds2, exp m G T Ds2 ->
m = ip ->
simple_ctx G ->
exists Ds1,
exp pr G T Ds1 /\ subbdecs pr G Ds1 Ds2)
/\ (forall m G t D2, pth_has m G t D2 -> forall v,
m = ip ->
simple_ctx G ->
t = (pth_var (avar_f v)) ->
exists D1 n, pth_has pr G (pth_var (avar_f v)) D1 /\
subdec pr G D1 D2 n)
/\ (forall m G p T, pth_ty m G p T ->
m = ip ->
simple_ctx G ->
exists T' n, pth_ty pr G p T' /\ subtyp pr G T' T n)
/\ (forall m G T1 T2 n1, subtyp m G T1 T2 n1 ->
m = ip ->
simple_ctx G ->
exists n2, subtyp pr G T1 T2 n2)
/\ (forall m G D1 D2 n1, subdec m G D1 D2 n1 ->
m = ip ->
simple_ctx G ->
exists n2, subdec pr G D1 D2 n2)
/\ (forall m G Ds1 Ds2 n1, subdecs m G Ds1 Ds2 n1 ->
m = ip ->
simple_ctx G ->
exists n2, subdecs pr G Ds1 Ds2 n2).
Proof.
apply mutind9.
+ (* case wf_top *) auto.
+ (* case wf_bot *) auto.
+ (* case wf_bind_deep *) auto.
+ (* case wf_bind_shallow *) auto.
+ (* case wf_sel1 *)
introv Has2 IHHas WfLo IHWfLo WfHi IHWfHi Eq Sc. subst.
auto_specialize.
rename Lo into Lo2, Hi into Hi2. destruct IHHas as [D1 [n [Has1 Sd]]].
apply invert_subdec_typ_sync_left in Sd.
destruct Sd as [Lo1 [Hi1 [Eq [StLo StHi]]]]. subst.
apply (wf_sel1 Has1).
- destruct (subtyp_regular StLo) as [_ P]. apply (wf_deep_to_any _ P).
- destruct (subtyp_regular StHi) as [P _]. apply (wf_deep_to_any _ P).
+ (* case wf_sel2 *)
introv Has2 IHHas WfU IHWfU Eq Sc. subst.
auto_specialize.
destruct IHHas as [D1 [n [Has1 Sd]]].
apply invert_subdec_typ_sync_left in Sd.
destruct Sd as [Lo1 [Hi1 [Eq [StLo StHi]]]]. subst.
apply (wf_sel1 Has1).
- destruct (subtyp_regular StLo) as [_ P]. apply (wf_deep_to_any _ P).
- destruct (subtyp_regular StHi) as [P _]. apply (wf_deep_to_any _ P).
+ (* case wf_tmem *) auto.
+ (* case wf_fld *) auto.
+ (* case wf_mtd *) auto.
+ (* case wf_nil *) auto.
+ (* case wf_cons *) auto.
+ (* case exp_top *)
intros. subst. exists (bdecs_decs decs_nil).
apply (conj (exp_top _ _)).
inversions H0; auto.
+ (* case exp_bot *)
intros. subst. eauto.
+ (* case exp_bind *)
introv Eq Wf. subst.
exists (bdecs_decs Ds).
apply (conj (exp_bind _ _ _)).
auto.
+ (* case exp_sel *)
introv Has IHHas Exp IHExp Eq Sc. subst. rename Hi into Hi2, Lo into Lo2.
auto_specialize.
destruct IHExp as [Ds1 [ExpHi2 Sds12]].
destruct IHHas as [D1 [n2 [Has1 Sd]]].
apply invert_subdec_typ_sync_left in Sd.
destruct Sd as [Lo1 [Hi1 [Eq [StLo StHi]]]]. subst.
destruct (subtyp_regular StHi) as [WfHi1 _].
lets E: (exp_total WfHi1).
destruct E as [Ds0 ExpHi1].
(*
destruct Ds0 as [|Ds0].
* (* case Ds0 = bdecs_bot *)
exists bdecs_bot. split.
- apply (exp_sel Has1 ExpHi1).
- apply subbdecs_bot.
* (* case Ds0 <> bdecs_bot *)
*)
lets Sds01: (exp_preserves_sub_pr StHi Sc ExpHi1 ExpHi2).
(********************)
(* destruct Sds01 as [L1 Sds01]. BIND *)
exists Ds0. split.
- apply (exp_sel Has1 ExpHi1).
- apply (subbdecs_trans Sds01 Sds12).
(* BIND: will need narrowing we can apply subbdecs transitivity!
intros z Fr.
assert (zL1: z \notin L1) by auto. specialize (Sds01 z zL1).
assert (zL0: z \notin L0) by auto. specialize (Sds12 z zL0).
apply (subdecs_trans Sds01).
destruct pr_narrowing as [_ [_ [_ [_ N]]]].
specialize (N pr _ _ _ Sds12 G empty z Ds0 Ds1 eq_refl).
do 2 rewrite concat_empty_r in N.
refine (N _ eq_refl _ _ Sds01).
*)
+ (* case pth_has_rule *)
introv Ty IHTy Exp IHExp DsHas Eq Sc Eq2. subst.
auto_specialize.
destruct IHExp as [Dsm [Expm Sds2]].
destruct IHTy as [X1 [n [BiG St]]].
destruct (invert_simple_ctx Sc) as [_ E].
inversions BiG. rename H2 into BiG.
specialize (E _ _ BiG). destruct E as [Ds1 [Eq [Gb WfDs1]]]. subst X1.
lets Exp1: (exp_bind pr G Ds1).
lets Sds1: (exp_preserves_sub_pr St Sc Exp1 Expm).
(********************)
(* BIND: need to apply narrowing in Sds2 first! *)
lets Sds: (subbdecs_trans Sds1 Sds2).
inversions Sds.
- (* case subbdecs_refl *)
exists D 0. split.
* refine (pth_has_rule _ Exp1 DsHas). apply pth_ty_var. assumption.
* apply subdec_refl. inversions DsHas.
apply (decs_has_preserves_wf H0 WfDs1).
- (* case subbdecs_decs *)
inversions DsHas.
lets P: (decs_has_preserves_sub H0 H2).
destruct P as [D1 [Ds1Has Sd]].
exists D1 n0. refine (conj _ Sd).
refine (pth_has_rule _ Exp1 (bdecs_has_decs Ds1Has)).
apply pth_ty_var. assumption.
+ (* case pth_ty_var *)
introv Bi Eq Sc. subst.
exists T 0.
apply (conj (pth_ty_var _ Bi)).
refine (subtyp_refl _ _).
destruct (invert_simple_ctx Sc) as [_ E].
specialize (E _ _ Bi). destruct E as [Ds1 [Eq [Gb WfDs1]]]. subst T.
apply (wf_bind_deep WfDs1).
+ (* case pth_ty_sbsm *)
introv Ty IHTy St IHSt Eq Sc. subst.
auto_specialize.
destruct IHTy as [T0 [n1 [Bi St01]]].
destruct IHSt as [n2 IHSt].
exists T0 (max n1 n2). apply (conj Bi).
apply subtyp_trans with T1.
* apply (subtyp_max_ctx St01). apply Max.le_max_l.
* apply (subtyp_max_ctx IHSt). apply Max.le_max_r.
+ (* case subtyp_refl *)
introv n Wf IHWf Eq Sc. subst. auto_specialize. exists 0. auto.
+ (* case subtyp_top *)
intros. exists 0. auto.
+ (* case subtyp_bot *)
intros. exists 0. auto.
+ (* case subtyp_bind *)
introv Sds IH Eq Sd. subst. auto_specialize. destruct IH as [n2 Sds'].
exists n2. auto.
+ (* case subtyp_sel_l *)
introv Has2 IHHas St1 IHSt1 Eq Sc. subst. auto_specialize.
destruct IHHas as [D1 [n2 [Has1 Sd]]].
(* destruct IHSt1 as [n0 StLoHi]. *) clear IHSt1.
apply invert_subdec_typ_sync_left in Sd.
destruct Sd as [Lo1 [Hi1 [Eq [StLo StHi]]]]. subst D1.
(* Before, we got Lo1<:Hi1 out of Sd, but this premise had to be removed from
subdecs_typ because otherwise the (L: Top..Bot) of decs_bot is not a subdec
of anything.
So now we use has_good_bounds, but this requires a good context. *)
lets P: (has_good_bounds Sc Has1). destruct P as [n1 StLoHi].
exists (max n1 n2).
apply subtyp_trans with Hi1.
* apply (subtyp_sel_l Has1).
apply (subtyp_max_ctx StLoHi). apply Max.le_max_l.
* apply (subtyp_max_ctx StHi). apply Max.le_max_r.
+ (* case subtyp_sel_r *)
introv Has2 IHHas St1 IHSt1 Eq Sc. subst. auto_specialize.
destruct IHHas as [D1 [n2 [Has1 Sd]]].
clear IHSt1.
apply invert_subdec_typ_sync_left in Sd.
destruct Sd as [Lo1 [Hi1 [Eq [StLo StHi]]]]. subst D1.
(* Before, we got Lo1<:Hi1 out of Sd, but this premise had to be removed from
subdecs_typ because otherwise the (L: Top..Bot) of decs_bot is not a subdec
of anything.
So now we use has_good_bounds, but this requires a good context. *)
lets P: (has_good_bounds Sc Has1). destruct P as [n1 StLoHi].
exists (max n1 n2).
apply subtyp_trans with Lo1.
* apply (subtyp_max_ctx StLo). apply Max.le_max_r.
* apply (subtyp_sel_r Has1).
apply (subtyp_max_ctx StLoHi). apply Max.le_max_l.
+ (* case subtyp_trans *)
introv St12 IH12 St23 IH23 Eq Wf. subst.
auto_specialize. destruct IH12 as [n12 IH12]. destruct IH23 as [n23 IH23].
exists (max n12 n23). apply subtyp_trans with T2.
* apply (subtyp_max_ctx IH12). apply Max.le_max_l.
* apply (subtyp_max_ctx IH23). apply Max.le_max_r.
+ (* case subdec_typ *)
introv StLo IHLo StHi IHHi Eq Sc. subst.
auto_specialize. destruct IHLo as [nLo IHLo]. destruct IHHi as [nHi IHHi].
exists (max nLo nHi). apply subdec_typ.
* apply (subtyp_max_ctx IHLo). apply Max.le_max_l.
* apply (subtyp_max_ctx IHHi). apply Max.le_max_r.
+ (* case subdec_fld *)
intros. subst. auto_specialize. destruct H as [n0 H]. exists n0. apply* subdec_fld.
+ (* case subdec_mtd *)
introv StLo IHLo StHi IHHi Eq Sc. subst.
auto_specialize. destruct IHLo as [nLo IHLo]. destruct IHHi as [nHi IHHi].
exists (max nLo nHi). apply subdec_mtd.
* apply (subtyp_max_ctx IHLo). apply Max.le_max_l.
* apply (subtyp_max_ctx IHHi). apply Max.le_max_r.
+ (* case subdecs_empty *)
intros. subst. auto_specialize. exists 0. apply* subdecs_empty.
+ (* case subdecs_push *)
intros. subst. auto_specialize. destruct H as [n0 Sd]. destruct H0 as [n2 Sds].
exists (max n0 n2). apply subdecs_push with D1; auto.
* apply (subdec_max_ctx Sd). apply Max.le_max_l.
* apply (subdecs_max_ctx Sds). apply Max.le_max_r.
Qed.
Print Assumptions ip2pr.
Lemma ip2pr_pth_has: forall s G v D2,
wf_sto s G ->
pth_has ip G (pth_var (avar_f v)) D2 ->
exists D1 n, pth_has pr G (pth_var (avar_f v)) D1 /\ subdec pr G D1 D2 n.
Proof.
introv Wf Has.
destruct ip2pr as [_ [_ [_ [_ [P _]]]]]. lets Sc: (wf_sto_to_simple_ctx Wf). apply* P.
Qed.
Lemma pr2ip:
(forall m1 m2 G T, wf_typ m1 m2 G T -> wf_typ ip m2 G T)
/\ (forall m G D, wf_dec m G D -> wf_dec ip G D)
/\ (forall m G Ds, wf_decs m G Ds -> wf_decs ip G Ds)
/\ (forall m G T Ds, exp m G T Ds -> exp ip G T Ds)
/\ (forall m G t D, pth_has m G t D -> pth_has ip G t D)
/\ (forall m G p T, pth_ty m G p T -> pth_ty ip G p T)
/\ (forall m G T1 T2 n, subtyp m G T1 T2 n -> subtyp ip G T1 T2 n)
/\ (forall m G D1 D2 n, subdec m G D1 D2 n -> subdec ip G D1 D2 n)
/\ (forall m G Ds1 Ds2 n, subdecs m G Ds1 Ds2 n -> subdecs ip G Ds1 Ds2 n).
Proof.
apply mutind9; intros; eauto.
Qed.
Lemma pr2ip_ctx: forall m G, wf_ctx m G -> wf_ctx ip G.
Proof.
introv H. induction H.
- auto.
- apply wf_ctx_push; auto. apply* pr2ip.
Qed.
Lemma trm_has_ty_to_pth_has_ty:
(forall G t D, trm_has G t D ->
forall x, t = trm_var (avar_f x) -> pth_has ip G (pth_var (avar_f x)) D)
/\ (forall G t T, ty_trm G t T ->
forall x, t = trm_var (avar_f x) -> pth_ty ip G (pth_var (avar_f x)) T).
Proof.
apply trm_has_ty_mutind; try solve [intros; discriminate]; eauto.
introv Bi Wf Eq. inversions Eq. eauto.
Qed.
Lemma trm_has_to_pth_has: forall G x D,
trm_has G (trm_var (avar_f x)) D ->
pth_has ip G (pth_var (avar_f x)) D.
Proof.
introv Has. apply* trm_has_ty_to_pth_has_ty.
Qed.
(* ###################################################################### *)
(** ** Soundness helper lemmas *)
Lemma has_sound_pr: forall s G x ds D,
wf_sto s G ->
binds x ds s ->
pth_has pr G (pth_var (avar_f x)) D ->
(*ty_defs G (open_defs x ds) (open_decs x Ds) /\ decs_has (open_decs x Ds) l D*)
exists Ds, ty_defs G ds Ds /\ decs_has Ds D.
Proof.
introv Wf Bis Has.
inversion Has as [A1 A2 A3 A4 X' Ds' BiG' Exp' Ds2Has]. subst.
inversions BiG'. rename H2 into BiG'.
lets BiG: (sto_binds_to_ctx_binds Wf Bis). destruct BiG as [Ds BiG].
lets Eq: (binds_func BiG BiG'). subst. clear BiG'.
inversions Exp'.
inversions Ds2Has. rename H0 into Ds2Has.
lets P: (invert_wf_sto Wf Bis BiG). destruct P as [Ds' [Eq0 [G1 [G2 [Eq [Tyds Cb]]]]]].
symmetry in Eq0. inversions Eq0.
lets Ok: (wf_sto_to_ok_G Wf).
exists Ds. split.
- rewrite <- concat_assoc. rewrite <- concat_assoc in Ok.
apply (weaken_ty_defs_end Ok Tyds).
- apply Ds2Has.
Qed.
Lemma has_sound: forall s G x ds D2,
wf_sto s G ->
binds x ds s ->
trm_has G (trm_var (avar_f x)) D2 ->
exists Ds1 D1 n,
(* BIND
ty_defs G (open_defs x ds) (open_decs x Ds1) /\
decs_has (open_decs x Ds1) l D1 /\ *)
ty_defs G ds Ds1 /\
decs_has Ds1 D1 /\
subdec ip G D1 D2 n.
Proof.
introv Wf Bis Has.
apply trm_has_to_pth_has in Has. apply (ip2pr_pth_has Wf) in Has.
destruct Has as [D1 [n [Has Sd]]].
lets P: (has_sound_pr Wf Bis Has). destruct P as [Ds [Tyds DsHas]].
exists Ds D1 n. repeat split; auto. apply* pr2ip.
Qed.
Print Assumptions has_sound.
(* ###################################################################### *)
(** ** Progress *)
Theorem progress_result: progress.
Proof.
introv Wf Ty. gen G e T Ty s Wf.
set (progress_for := fun s e =>
(exists e' s', red e s e' s') \/
(exists x o, e = (trm_var (avar_f x)) /\ binds x o s)).
apply (trm_has_ty_mutind
(fun G e d Has => forall s, wf_sto s G -> progress_for s e)
(fun G e T Ty => forall s, wf_sto s G -> progress_for s e));
unfold progress_for; clear progress_for.
+ (* case has_trm *)
intros. auto.
+ (* case has_var *)
introv Ty IH Exp Has WfD Wf.
right.
lets WfG: (pr2ip_ctx (wf_sto_to_wf_ctx Wf)).
apply (invert_ty_var WfG) in Ty. destruct Ty as [T' [n [St BiG]]].
destruct (ctx_binds_to_sto_binds Wf BiG) as [o Bis].
exists v o. auto.
+ (* case ty_var *)
intros G x T BiG WfT s Wf.
right. destruct (ctx_binds_to_sto_binds Wf BiG) as [o Bis].
exists x o. auto.
+ (* case ty_sel *)
introv Has IH Wf.
left. specialize (IH s Wf). destruct IH as [IH | IH].
- (* receiver is an expression *)
destruct IH as [s' [e' IH]]. do 2 eexists. apply (red_sel1 l IH).
- (* receiver is a var *)
destruct IH as [x [ds [Eq Bis]]]. subst.
lets P: (has_sound Wf Bis Has).
(*********)
destruct P as [Ds1 [D1 [n [Tyds [Ds1Has Sd]]]]].
destruct (decs_has_to_defs_has Tyds Ds1Has) as [d [dsHas Eq]].
apply invert_subdec_fld_sync_left in Sd. destruct Sd as [T0 [Eq1 _]]. subst.
simpl in Eq. destruct d; simpl in Eq; inversions Eq.
exists (trm_var a) s.
apply (red_sel Bis dsHas).
+ (* case ty_call *)
intros G t m U V u Has IHrec Tyu IHarg s Wf. left.
specialize (IHrec s Wf). destruct IHrec as [IHrec | IHrec].
- (* case receiver is an expression *)
destruct IHrec as [s' [e' IHrec]]. do 2 eexists. apply (red_call1 m _ IHrec).
- (* case receiver is a var *)
destruct IHrec as [x [ds [Eq Bis]]]. subst.
specialize (IHarg s Wf). destruct IHarg as [IHarg | IHarg].
* (* arg is an expression *)
destruct IHarg as [s' [e' IHarg]]. do 2 eexists. apply (red_call2 x m IHarg).
* (* arg is a var *)
destruct IHarg as [y [o [Eq Bisy]]]. subst.
lets P: (has_sound Wf Bis Has).
(*********)
destruct P as [Ds1 [D1 [n [Tyds [Ds1Has Sd]]]]].
destruct (decs_has_to_defs_has Tyds Ds1Has) as [d [dsHas Eq]].
apply invert_subdec_mtd_sync_left in Sd. destruct Sd as [U0 [V0 [Eq1 _]]]. subst.
simpl in Eq. destruct d; simpl in Eq; inversions Eq.
exists (open_trm y t1) s.
apply (red_call y Bis dsHas).
+ (* case ty_new *)
intros G ds Ds Tyds Cb s Wf.
left. pick_fresh x.
exists (trm_var (avar_f x)) (s & x ~ ds).
apply* red_new.
+ (* case ty_sbsm *)
intros. auto_specialize. assumption.
Qed.
Print Assumptions progress_result.
(* ###################################################################### *)
(** ** Preservation *)
Theorem preservation_proof:
forall e s e' s' (Hred: red e s e' s') G T (Hwf: wf_sto s G) (Hty: ty_trm G e T),
exists H, wf_sto s' (G & H) /\ ty_trm (G & H) e' T.
Proof.
intros s e s' e' Red. induction Red.
+ (* red_call *)
intros G U3 Wf TyCall. rename H into Bis, H0 into dsHas, T into X1.
exists (@empty typ). rewrite concat_empty_r. apply (conj Wf).
apply invert_ty_call in TyCall.
destruct TyCall as [T2 [U2 [n [Has [StU23 Tyy]]]]].
lets P: (has_sound Wf Bis Has).
(*********)
destruct P as [Ds1 [D1 [n1 [Tyds [Ds1Has Sd]]]]].
apply invert_subdec_mtd_sync_left in Sd.
destruct Sd as [T1 [U1 [Eq [StT StU12]]]]. subst D1.
destruct (invert_ty_mtd_inside_ty_defs Tyds dsHas Ds1Has) as [L0 Tybody].
lets WfG: (pr2ip_ctx (wf_sto_to_wf_ctx Wf)).
apply (invert_ty_var WfG) in Tyy.
destruct Tyy as [T3 [n3 [StT3 Biy]]].
pick_fresh y'.
rewrite* (@subst_intro_trm y' y body).
assert (Fry': y' \notin fv_typ U3) by auto.
assert (Eqsubst: (subst_typ y' y U3) = U3)
by apply* subst_fresh_typ_dec_decs.
rewrite <- Eqsubst.
lets Ok: (wf_sto_to_ok_G Wf).
apply (@trm_subst_principle G y' y (open_trm y' body) T1 _).
(*******************)
- apply wf_ctx_push.
* apply (pr2ip_ctx (wf_sto_to_wf_ctx Wf)).
* destruct (subtyp_regular StT) as [_ WfT1]. exact WfT1.
* auto.
- assert (y'L0: y' \notin L0) by auto. specialize (Tybody y' y'L0).
destruct Tybody as [Tybody [Eq1 Eq2]]. subst X1 U.
apply (@ty_sbsm _ _ U1 U3 (max n n1) Tybody).
apply weaken_subtyp_end. auto.
lets Hle1: (Max.le_max_l n n1).
lets Hle2: (Max.le_max_r n n1).
apply (subtyp_trans (subtyp_max_ctx StU12 Hle2) (subtyp_max_ctx StU23 Hle1)).
- refine (pth_ty_sbsm _ StT). refine (pth_ty_sbsm _ StT3). apply (pth_ty_var _ Biy).
+ (* red_sel *)
intros G T3 Wf TySel. rename H into Bis, H0 into dsHas.
exists (@empty typ). rewrite concat_empty_r. apply (conj Wf).
apply invert_ty_sel in TySel.
destruct TySel as [T2 [n [StT23 Has]]].
lets P: (has_sound Wf Bis Has).
(*********)
destruct P as [Ds1 [D1 [n1 [Tyds [Ds1Has Sd]]]]].
apply invert_subdec_fld_sync_left in Sd.
destruct Sd as [T1 [Eq StT12]]. subst D1.
try refine (ty_sbsm _ StT23).
refine (ty_sbsm _ StT12).
apply (invert_ty_fld_inside_ty_defs Tyds dsHas Ds1Has).
+ (* red_new *)
introv Wf Ty.
apply invert_ty_new in Ty.
destruct Ty as [n [Ds1 [StT12 [Tyds [Cb WfDs]]]]].
exists (x ~ (typ_bind Ds1)).
assert (xG: x # G) by apply* sto_unbound_to_ctx_unbound.
split.
- apply (wf_sto_push Wf H xG Tyds Cb). apply* ip2pr.
apply (wf_sto_to_simple_ctx Wf).
- lets Ok: (wf_sto_to_ok_G Wf). assert (Okx: ok (G & x ~ (typ_bind Ds1))) by auto.
apply (weaken_subtyp_end Okx) in StT12.
refine (ty_sbsm _ StT12). apply ty_var.
* apply binds_push_eq.
* destruct (subtyp_regular StT12) as [W _]. exact W.
(*
+ (* red_new *)
rename T into Ds1. intros G T2 Wf Ty.
apply invert_ty_new in Ty.
destruct Ty as [StT12 [L [Tyds Cb]]].
exists (x ~ (typ_bind Ds1)).
pick_fresh x'. assert (Frx': x' \notin L) by auto.
specialize (Tyds x' Frx').
assert (xG: x # G) by apply* sto_unbound_to_ctx_unbound.
split.
- apply (wf_sto_push _ Wf H xG).
* apply* (@ty_open_defs_change_var x').
* exact Cb. (* was a "meh TODO" before cbounds :-) *)
- lets Ok: (wf_sto_to_ok_G Wf). assert (Okx: ok (G & x ~ (typ_bind Ds1))) by auto.
apply (weaken_subtyp_end Okx) in StT12.
refine (ty_sbsm _ StT12). apply ty_var. apply binds_push_eq.
*)
+ (* red_call1 *)
intros G Tr2 Wf TyCall.
apply invert_ty_call in TyCall.
destruct TyCall as [Ta [Tr1 [n [Has [St Tya]]]]].
apply invert_has in Has.
destruct Has as [Has | Has].
(* case has_trm *) {
destruct Has as [To [Ds [Tyo [Exp [DsHas Clo]]]]].
specialize (IHRed G To Wf Tyo). destruct IHRed as [H [Wf' Tyo']].
lets Ok: (wf_sto_to_ok_G Wf').
exists H. apply (conj Wf').
apply (weaken_subtyp_end Ok) in St.
refine (ty_sbsm _ St).
apply (@ty_call (G & H) o' m Ta Tr1 a).
- refine (has_trm Tyo' _ DsHas Clo _).
* apply (weaken_exp_end Ok Exp).
* lets WfTa: (ty_trm_regular Tya).
apply (weaken_wf_typ_end Ok) in WfTa.
destruct (subtyp_regular St) as [WfTr1 _].
apply (wf_mtd _ WfTa WfTr1).
- apply (weaken_ty_trm_end Ok Tya).
}
(* case has_var *) {
destruct Has as [x [Tx [Ds [D' [Eqx _]]]]]. subst.
inversion Red. (* contradiction: vars don't step *)
}
+ (* red_call2 *)
intros G Tr2 Wf TyCall.
apply invert_ty_call in TyCall.
destruct TyCall as [Ta [Tr1 [n [Has [St Tya]]]]].
specialize (IHRed G Ta Wf Tya).
destruct IHRed as [H [Wf' Tya']].
exists H. apply (conj Wf').
lets Ok: wf_sto_to_ok_G Wf'.
apply (weaken_subtyp_end Ok) in St.
refine (ty_sbsm _ St).
apply (@ty_call (G & H) _ m Ta Tr1 a').
- apply (weaken_trm_has_end Ok Has).
- assumption.
+ (* red_sel1 *)
intros G T2 Wf TySel.
apply invert_ty_sel in TySel.
destruct TySel as [T1 [n [St Has]]].
apply invert_has in Has.
destruct Has as [Has | Has].
- (* case has_trm *)
destruct Has as [To [Ds [Tyo [Exp [DsHas Clo]]]]].
specialize (IHRed G To Wf Tyo). destruct IHRed as [H [Wf' Tyo']].
lets Ok: (wf_sto_to_ok_G Wf').
exists H. apply (conj Wf').
apply (weaken_subtyp_end Ok) in St.
refine (ty_sbsm _ St). apply (@ty_sel (G & H) o' l T1).
refine (has_trm Tyo' _ DsHas Clo _).
* apply (weaken_exp_end Ok Exp).
* destruct (subtyp_regular St) as [W _]. apply (wf_fld _ W).
- (* case has_var *)
destruct Has as [x [Tx [Ds [D' [Eqx _]]]]]. subst.
inversion Red. (* contradiction: vars don't step *)
Qed.
Theorem preservation_result: preservation.
Proof.
introv Hwf Hty Hred.
destruct (preservation_proof Hred Hwf Hty) as [H [Hwf' Hty']].
exists (G & H). split; assumption.
Qed.
Check progress_result.
Print progress.
Print Assumptions progress_result.
Check preservation_result.
Print preservation.
Print Assumptions preservation_result.
|
\documentclass[12pt]{article}
\usepackage[utf8]{inputenc}
\usepackage{graphicx}
\usepackage[a4paper,width=150mm,top=25mm,bottom=25mm]{geometry}
\usepackage{hyperref}
\hypersetup{
colorlinks=true,
linkcolor=blue,
filecolor=magenta,
urlcolor=cyan,
pdftitle={Converting Handwriting into Text},
pdfpagemode=FullScreen,
}
\title{
{\includegraphics[width=3cm, height=2.5cm]{images/faculty_logo.png}}\\
{\large Faculty of Computers and Information}\\
{\textbf{Software Proposal Document for Project \\ Java Android App}}
}
\author{Abdulla Nasser El Metwally \\ \\
Supervised by: Eng. Nada El Madah}
\date{March 23, 2022}
\begin{document}
\maketitle
\begin{abstract}
The main goal of this project is to create easy Android app to help managing tasks through To-Do list.
\end{abstract}
\section{Introduction}
This is \textbf{Keep} project, where I try to solve a problem that faces many people to organize tasks to help them be more productive in their life or in their career.
\section{Project Tools}
\subsection{Technologies Used}
\begin{itemize}
\item \textbf{Java : } Main used programming language.
\item \textbf{Android Studio : } Provides the fastest tools for building apps on every type of Android device.
\item \textbf{NetBeans : } Main used free IDE and open source integrated development environment for application development.
\item \textbf{Git : } The most widely used modern version control system in the world today.
\end{itemize}
\section{Project Description}
Android app to manage tasks with ease of use and provide more functionality.
\section{Project Management and Deliverable}
\subsection{Team Members}
\hfill
\begin{center}
\begin{tabular}{ |l|c|c| }
\hline
\textbf{Name} & \textbf{Section No.} & \textbf{Email} \\
\hline
\hline
Abdulla Nasser & 9 & \href{mailto:[email protected]}{[email protected]} \\
\hline
\end{tabular}
\end{center}
\end{document}
|
------------------------------------------------------------------------------
-- Discussion about the inductive approach
------------------------------------------------------------------------------
{-# OPTIONS --exact-split #-}
{-# OPTIONS --no-sized-types #-}
{-# OPTIONS --no-universe-polymorphism #-}
{-# OPTIONS --without-K #-}
-- Andrés: From our discussion about the inductive approach, can I
-- conclude that it is possible to rewrite the proofs using pattern
-- matching on _≡_, by proofs using only subst, because the types
-- associated with these proofs haven't proof terms?
-- Peter: Yes, provided the RHS of the definition does not refer to the
-- function defined, i e, there is no recursion.
module FOT.FOTC.InductiveApproach.Recursion where
open import Common.FOL.Relation.Binary.EqReasoning
open import FOTC.Base
open import FOTC.Base.PropertiesI
open import FOTC.Data.Nat
open import FOTC.Data.Nat.PropertiesI
------------------------------------------------------------------------------
-- foo is recursive and we pattern matching on _≡_.
foo : ∀ {m n} → N m → m ≡ n → N (m + n)
foo nzero refl = subst N (sym (+-leftIdentity zero)) nzero
foo (nsucc {m} Nm) refl = subst N helper (nsucc (nsucc (foo Nm refl)))
where
helper : succ₁ (succ₁ (m + m)) ≡ succ₁ m + succ₁ m
helper =
succ₁ (succ₁ (m + m)) ≡⟨ succCong (sym (+-Sx m m)) ⟩
succ₁ (succ₁ m + m) ≡⟨ succCong (+-comm (nsucc Nm) Nm) ⟩
succ₁ (m + succ₁ m) ≡⟨ sym (+-Sx m (succ₁ m)) ⟩
succ₁ m + succ₁ m ∎
-- foo' is recursive and we only use subst.
foo' : ∀ {m n} → N m → m ≡ n → N (m + n)
foo' {n = n} nzero h = subst N (sym (+-leftIdentity n)) (subst N h nzero)
foo' {n = n} (nsucc {m} Nm) h = subst N helper (nsucc (nsucc (foo' Nm refl)))
where
helper : succ₁ (succ₁ (m + m)) ≡ succ₁ m + n
helper =
succ₁ (succ₁ (m + m)) ≡⟨ succCong (sym (+-Sx m m)) ⟩
succ₁ (succ₁ m + m) ≡⟨ succCong (+-comm (nsucc Nm) Nm) ⟩
succ₁ (m + succ₁ m) ≡⟨ sym (+-Sx m (succ₁ m)) ⟩
succ₁ m + succ₁ m ≡⟨ +-rightCong h ⟩
succ₁ m + n ∎
|
section\<open>Pointermap\<close>
theory Pointer_Map
imports Main
begin
text\<open>
We need a datastructure that supports the following two operations:
\begin{itemize}
\item Given an element, it can construct a pointer (i.e., a small representation) of that element. It will always construct the same pointer for equal elements.
\item Given a pointer, we can retrieve the element
\end{itemize}
\<close>
record 'a pointermap =
entries :: "'a list"
getentry :: "'a \<Rightarrow> nat option"
definition "pointermap_sane m \<equiv> (distinct (entries m) \<and>
(\<forall>n \<in> {..<length (entries m)}. getentry m (entries m ! n) = Some n) \<and>
(\<forall>p i. getentry m p = Some i \<longrightarrow> entries m ! i = p \<and> i < length (entries m)))"
definition "empty_pointermap \<equiv> \<lparr>entries = [], getentry = \<lambda>p. None \<rparr>"
lemma pointermap_empty_sane[simp, intro!]: "pointermap_sane empty_pointermap" unfolding empty_pointermap_def pointermap_sane_def by simp
definition "pointermap_insert a m \<equiv> \<lparr>entries = (entries m)@[a], getentry = (getentry m)(a \<mapsto> length (entries m))\<rparr>"
definition "pm_pth m p \<equiv> entries m ! p"
definition "pointermap_p_valid p m \<equiv> p < length (entries m)"
definition "pointermap_getmk a m \<equiv> (case getentry m a of Some p \<Rightarrow> (p,m) | None \<Rightarrow> let u = pointermap_insert a m in (the (getentry u a), u))"
lemma pointermap_sane_appendD: "pointermap_sane s \<Longrightarrow> m \<notin> set (entries s) \<Longrightarrow> pointermap_sane (pointermap_insert m s)"
unfolding pointermap_sane_def pointermap_insert_def
proof(intro conjI[rotated],goal_cases)
case 3 thus ?case by simp
next
case 2
{
fix n
have " \<lbrakk>distinct (entries s) \<and> (\<forall>x. x \<in> {..<length (entries s)} \<longrightarrow> getentry s (entries s ! x) = Some x) \<and> (\<forall>p i. getentry s p = Some i \<longrightarrow> entries s ! i = p \<and> i < length (entries s)); m \<notin> set (entries s);
n \<in> {..<length (entries \<lparr>entries = entries s @ [m], getentry = getentry s(m \<mapsto> length (entries s))\<rparr>)}; n < length (entries s)\<rbrakk>
\<Longrightarrow> getentry \<lparr>entries = entries s @ [m], getentry = getentry s(m \<mapsto> length (entries s))\<rparr> (entries \<lparr>entries = entries s @ [m], getentry = getentry s(m \<mapsto> length (entries s))\<rparr> ! n) = Some n"
"\<lbrakk>distinct (entries s) \<and> (\<forall>x. x \<in> {..<length (entries s)} \<longrightarrow> getentry s (entries s ! x) = Some x) \<and> (\<forall>p i. getentry s p = Some i \<longrightarrow> entries s ! i = p \<and> i < length (entries s)); m \<notin> set (entries s);
n \<in> {..<length (entries \<lparr>entries = entries s @ [m], getentry = getentry s(m \<mapsto> length (entries s))\<rparr>)}; \<not> n < length (entries s)\<rbrakk>
\<Longrightarrow> getentry \<lparr>entries = entries s @ [m], getentry = getentry s(m \<mapsto> length (entries s))\<rparr> (entries \<lparr>entries = entries s @ [m], getentry = getentry s(m \<mapsto> length (entries s))\<rparr> ! n) = Some n"
proof(goal_cases)
case 1 note goal1 = 1
from goal1(4) have sa: "\<And>a. (entries s @ a) ! n = entries s ! n" by (simp add: nth_append)
from goal1(1,4) have ih: "getentry s (entries s ! n) = Some n" by simp
from goal1(2,4) have ne: "entries s ! n \<noteq> m" using nth_mem by fastforce
from sa ih ne show ?case by simp
next
case 2 note goal2 = 2
from goal2(3,4) have ln: "n = length (entries s)" by simp
hence sa: "\<And>a. (entries s @ [a]) ! n = a" by simp
from sa ln show ?case by simp
qed
} note h = this
with 2 show ?case by blast
(*apply(unfold Ball_def)
apply(rule)
apply(rule)
apply(rename_tac n)
apply(case_tac "n < length (entries s)")
by(fact h)+*)
next
case 1 thus ?case
by(clarsimp simp add: nth_append fun_upd_same Ball_def) force
qed
lemma luentries_noneD: "getentry s a = None \<Longrightarrow> pointermap_sane s \<Longrightarrow> a \<notin> set (entries s)"
unfolding pointermap_sane_def
proof(rule ccontr, goal_cases)
case 1
from 1(3) obtain n where "n < length (entries s)" "entries s ! n = a" unfolding in_set_conv_nth by blast
with 1(2,1) show False by force
qed
lemma pm_pth_append: "pointermap_p_valid p m \<Longrightarrow> pm_pth (pointermap_insert a m) p = pm_pth m p"
unfolding pointermap_p_valid_def pm_pth_def pointermap_insert_def
by(simp add: nth_append)
lemma pointermap_insert_in: "u = (pointermap_insert a m) \<Longrightarrow> pm_pth u (the (getentry u a)) = a"
unfolding pointermap_insert_def pm_pth_def
by(simp)
lemma pointermap_insert_p_validI: "pointermap_p_valid p m \<Longrightarrow> pointermap_p_valid p (pointermap_insert a m)"
unfolding pointermap_insert_def pointermap_p_valid_def
by simp
thm nth_eq_iff_index_eq
lemma pth_eq_iff_index_eq: "pointermap_sane m \<Longrightarrow> pointermap_p_valid p1 m \<Longrightarrow> pointermap_p_valid p2 m \<Longrightarrow> (pm_pth m p1 = pm_pth m p2) \<longleftrightarrow> (p1 = p2)"
unfolding pointermap_sane_def pointermap_p_valid_def pm_pth_def
using nth_eq_iff_index_eq by blast
lemma pointermap_p_valid_updateI: "pointermap_sane m \<Longrightarrow> getentry m a = None \<Longrightarrow> u = pointermap_insert a m \<Longrightarrow> p = the (getentry u a) \<Longrightarrow> pointermap_p_valid p u"
by(simp add: pointermap_sane_def pointermap_p_valid_def pointermap_insert_def)
lemma pointermap_get_validI: "pointermap_sane m \<Longrightarrow> getentry m a = Some p \<Longrightarrow> pointermap_p_valid p m"
by(simp add: pointermap_sane_def pointermap_p_valid_def)
lemma pointermap_sane_getmkD:
assumes sn: "pointermap_sane m"
assumes res: "pointermap_getmk a m = (p,u)"
shows "pointermap_sane u \<and> pointermap_p_valid p u"
using sn res[symmetric]
apply(cases "getentry m a")
apply(simp_all add: pointermap_getmk_def Let_def split: option.split)
apply(rule)
apply(rule pointermap_sane_appendD)
apply(clarify;fail)+
apply(rule luentries_noneD)
apply(clarify;fail)+
apply(rule pointermap_p_valid_updateI[OF _ _ refl refl])
apply(clarify;fail)+
apply(erule pointermap_get_validI)
by simp
lemma pointermap_update_pthI:
assumes sn: "pointermap_sane m"
assumes res: "pointermap_getmk a m = (p,u)"
shows "pm_pth u p = a"
using assms
apply(simp add: pointermap_getmk_def Let_def split: option.splits)
apply(meson pointermap_insert_in)
apply(clarsimp simp: pointermap_sane_def pm_pth_def)
done
lemma pointermap_p_valid_inv:
assumes "pointermap_p_valid p m"
assumes "pointermap_getmk a m = (x,u)"
shows "pointermap_p_valid p u"
using assms
by(simp add: pointermap_getmk_def Let_def split: option.splits) (meson pointermap_insert_p_validI)
lemma pointermap_p_pth_inv:
assumes pv: "pointermap_p_valid p m"
assumes u: "pointermap_getmk a m = (x,u)"
shows "pm_pth u p = pm_pth m p"
using pm_pth_append[OF pv] u
by(clarsimp simp: pointermap_getmk_def Let_def split: option.splits)
lemma pointermap_backward_valid:
assumes puv: "pointermap_p_valid p u"
assumes u: "pointermap_getmk a m = (x,u)"
assumes ne: "x \<noteq> p"
shows "pointermap_p_valid p m"
(*using u
unfolding pointermap_getmk_def
apply(simp add: Let_def split: option.splits)
prefer 2 using puv apply(simp)
apply(clarify)
apply(simp add: pointermap_insert_def)
using puv apply(clarify)
apply(simp add: pointermap_p_valid_def)
using ne by linarith
*)
using assms
by (auto simp: Let_def pointermap_getmk_def pointermap_p_valid_def pointermap_insert_def split: option.splits)
end
|
theory Ch2ProfTree0
imports Main
begin
(* arithmetic Example *)
datatype exp = Var | Const int | Add exp exp | Mult exp exp
fun eval :: "exp \<Rightarrow> int \<Rightarrow> int" where
"eval Var x = x" |
"eval (Const c) x = c" |
"eval (Add e1 e2) x = eval e1 x + eval e2 x" |
"eval (Mult e1 e2) x = eval e1 x * eval e2 x"
fun build_exp:: "int list \<Rightarrow> exp" where
"build_exp (x #xs) = Add (Const x) (Mult Var (build_exp xs))" |
"build_exp [] = Const 0"
fun evalp :: "int list \<Rightarrow> int \<Rightarrow> int" where
"evalp xs i = eval (build_exp xs) i"
fun array_add :: "int list \<Rightarrow> int list \<Rightarrow> int list " where
"array_add (x # xs ) (y # ys) = (x+ y) # (array_add xs ys)"|
"array_add xs [] = xs" |
"array_add [] ys = ys"
fun array_times :: "int list \<Rightarrow> int list \<Rightarrow> int list" where
"array_times (x # xs ) (y # ys) = x * y # (array_add (array_times xs (y#ys)) (array_times [x] ys))" |
"array_times _ _ = []"
value "array_times [1,1] [1,-1]"
fun coeffs :: "exp \<Rightarrow> int list" where
"coeffs Var = [0,1]" |
"coeffs (Const c ) = [c]" |
"coeffs (Add e1 e2) = array_add (coeffs e1) (coeffs e2)" |
"coeffs (Mult e1 e2) = array_times (coeffs e1) (coeffs e2)"
value "evalp [0, 1] (- 1)"
value "coeffs Var "
lemma "eval (build_exp (array_add (coeffs e1) (coeffs e2))) x = eval e1 x + eval e2 x"
oops
lemma "eval (build_exp (array_times (coeffs e1) (coeffs e2))) x = eval e1 x * eval e2 x"
oops
lemma "evalp (coeffs e) x = eval e x "
apply (induction rule:coeffs.induct)
apply auto
oops
end
|
||| Spec: https://webassembly.github.io/spec/core/syntax/modules.html#indices
module WebAssembly.Structure.Modules.Indices
import WebAssembly.Structure.Values
-- Definition
public export
TypeIdx : Type
TypeIdx = U32
public export
FuncIdx : Type
FuncIdx = U32
public export
TableIdx : Type
TableIdx = U32
public export
MemIdx : Type
MemIdx = U32
public export
GlobalIdx : Type
GlobalIdx = U32
public export
LocalIdx : Type
LocalIdx = U32
public export
LabelIdx : Type
LabelIdx = U32
|
classdef SelectiveSearchSegmentation < handle
%SELECTIVESEARCHSEGMENTATION Selective search segmentation algorithm
%
% The class implements the algorithm described in [uijlings2013selective].
%
% ## References
% [uijlings2013selective]:
% > Jasper RR Uijlings, Koen EA van de Sande, Theo Gevers, and Arnold WM
% > Smeulders. "Selective search for object recognition". International
% > journal of computer vision, 104(2):154-171, 2013.
%
% See also: cv.SelectiveSearchSegmentation.process
%
properties (SetAccess = private)
% Object ID
id
end
%% Constructor/destructor
methods
function this = SelectiveSearchSegmentation()
%SELECTIVESEARCHSEGMENTATION Constructor
%
% obj = cv.SelectiveSearchSegmentation()
%
% See also: cv.SelectiveSearchSegmentation.process
%
this.id = SelectiveSearchSegmentation_(0, 'new');
end
function delete(this)
%DELETE Destructor
%
% obj.delete()
%
% See also: cv.SelectiveSearchSegmentation
%
if isempty(this.id), return; end
SelectiveSearchSegmentation_(this.id, 'delete');
end
end
%% Algorithm
methods
function clear(this)
%CLEAR Clears the algorithm state
%
% obj.clear()
%
% See also: cv.SelectiveSearchSegmentation.empty,
% cv.SelectiveSearchSegmentation.load
%
SelectiveSearchSegmentation_(this.id, 'clear');
end
function b = empty(this)
%EMPTY Checks if detector object is empty
%
% b = obj.empty()
%
% ## Output
% * __b__ Returns true if the detector object is empty (e.g in the
% very beginning or after unsuccessful read).
%
% See also: cv.SelectiveSearchSegmentation.clear,
% cv.SelectiveSearchSegmentation.load
%
b = SelectiveSearchSegmentation_(this.id, 'empty');
end
function save(this, filename)
%SAVE Saves the algorithm parameters to a file
%
% obj.save(filename)
%
% ## Input
% * __filename__ Name of the file to save to.
%
% This method stores the algorithm parameters in the specified
% XML or YAML file.
%
% See also: cv.SelectiveSearchSegmentation.load
%
SelectiveSearchSegmentation_(this.id, 'save', filename);
end
function load(this, fname_or_str, varargin)
%LOAD Loads algorithm from a file or a string
%
% obj.load(fname)
% obj.load(str, 'FromString',true)
% obj.load(..., 'OptionName',optionValue, ...)
%
% ## Input
% * __fname__ Name of the file to read.
% * __str__ String containing the serialized model you want to
% load.
%
% ## Options
% * __ObjName__ The optional name of the node to read (if empty,
% the first top-level node will be used). default empty
% * __FromString__ Logical flag to indicate whether the input is a
% filename or a string containing the serialized model.
% default false
%
% This method reads algorithm parameters from the specified XML or
% YAML file (either from disk or serialized string). The previous
% algorithm state is discarded.
%
% See also: cv.SelectiveSearchSegmentation.save
%
SelectiveSearchSegmentation_(this.id, 'load', fname_or_str, varargin{:});
end
function name = getDefaultName(this)
%GETDEFAULTNAME Returns the algorithm string identifier
%
% name = obj.getDefaultName()
%
% ## Output
% * __name__ This string is used as top level XML/YML node tag
% when the object is saved to a file or string.
%
% See also: cv.SelectiveSearchSegmentation.save,
% cv.SelectiveSearchSegmentation.load
%
name = SelectiveSearchSegmentation_(this.id, 'getDefaultName');
end
end
%% SelectiveSearchSegmentation
methods
function setBaseImage(this, img)
%SETBASEIMAGE Set a image used by switch functions to initialize the class
%
% obj.setBaseImage(img)
%
% ## Input
% * __img__ The image.
%
% See also: cv.SelectiveSearchSegmentation.process
%
SelectiveSearchSegmentation_(this.id, 'setBaseImage', img);
end
function switchToSingleStrategy(this, varargin)
%SWITCHTOSINGLESTRATEGY Initialize the class with the 'Single strategy' parameters
%
% obj.switchToSingleStrategy()
% obj.switchToSingleStrategy('OptionName',optionValue, ...)
%
% ## Options
% * __K__ The k parameter for the graph segmentation. default 200
% * __Sigma__ The sigma parameter for the graph segmentation.
% default 0.8
%
% As described in [uijlings2013selective].
%
% See also: cv.SelectiveSearchSegmentation.switchToSelectiveSearchFast
%
SelectiveSearchSegmentation_(this.id, 'switchToSingleStrategy', varargin{:});
end
function switchToSelectiveSearchFast(this, varargin)
%SWITCHTOSELECTIVESEARCHFAST Initialize the class with the 'Selective search fast' parameters
%
% obj.switchToSelectiveSearchFast()
% obj.switchToSelectiveSearchFast('OptionName',optionValue, ...)
%
% ## Options
% * __BaseK__ The k parameter for the first graph segmentation.
% default 150
% * __IncK__ The increment of the k parameter for all graph
% segmentations. default 150
% * __Sigma__ The sigma parameter for the graph segmentation.
% default 0.8
%
% As described in [uijlings2013selective].
%
% See also: cv.SelectiveSearchSegmentation.switchToSelectiveSearchQuality
%
SelectiveSearchSegmentation_(this.id, 'switchToSelectiveSearchFast', varargin{:});
end
function switchToSelectiveSearchQuality(this, varargin)
%SWITCHTOSELECTIVESEARCHQUALITY Initialize the class with the 'Selective search fast' parameters
%
% obj.switchToSelectiveSearchQuality()
% obj.switchToSelectiveSearchQuality('OptionName',optionValue, ...)
%
% ## Options
% * __BaseK__ The k parameter for the first graph segmentation.
% default 150
% * __IncK__ The increment of the k parameter for all graph
% segmentations. default 150
% * __Sigma__ The sigma parameter for the graph segmentation.
% default 0.8
%
% As described in [uijlings2013selective].
%
% See also: cv.SelectiveSearchSegmentation.switchToSingleStrategy
%
SelectiveSearchSegmentation_(this.id, 'switchToSelectiveSearchQuality', varargin{:});
end
function addImage(this, img)
%ADDIMAGE Add a new image in the list of images to process
%
% obj.addImage(img)
%
% ## Input
% * __img__ The image.
%
% See also: cv.SelectiveSearchSegmentation.clearImages
%
SelectiveSearchSegmentation_(this.id, 'addImage', img);
end
function clearImages(this)
%CLEARIMAGES Clear the list of images to process
%
% obj.clearImages()
%
% See also: cv.SelectiveSearchSegmentation.addImage
%
SelectiveSearchSegmentation_(this.id, 'clearImages');
end
function addGraphSegmentation(this, varargin)
%ADDGRAPHSEGMENTATION Add a new graph segmentation in the list of graph segementations to process
%
% obj.addGraphSegmentation()
% obj.addGraphSegmentation('OptionName',optionValue, ...)
%
% ## Options
% * __Sigma__ The sigma parameter, used to smooth image.
% default 0.5
% * __K__ The k parameter of the algorithm. default 300
% * __MinSize__ The minimum size of segments. default 100
%
% See also: cv.SelectiveSearchSegmentation.clearGraphSegmentations,
% cv.GraphSegmentation
%
SelectiveSearchSegmentation_(this.id, 'addGraphSegmentation', varargin{:});
end
function clearGraphSegmentations(this)
%CLEARGRAPHSEGMENTATIONS Clear the list of graph segmentations to process
%
% obj.clearGraphSegmentations()
%
% See also: cv.SelectiveSearchSegmentation.addGraphSegmentation,
% cv.GraphSegmentation
%
SelectiveSearchSegmentation_(this.id, 'clearGraphSegmentations');
end
function addStrategy(this, stype, varargin)
%ADDSTRATEGY Add a new strategy in the list of strategy to process
%
% obj.addStrategy(stype)
% obj.addStrategy('Multiple', stype, stype, ...)
%
% ## Input
% * __stype__ The strategy type for the selective search
% segmentation algorithm, one of:
% * __Color__ Color-based strategy.
% * __Size__ Size-based strategy.
% * __Texture__ Texture-based strategy.
% * __Fill__ Fill-based strategy.
% * __Multiple__ Regroup multiple strategies, where all
% sub-strategies have equal weights.
%
% The classes are implemented from the algorithm described in
% [uijlings2013selective].
%
% See also: cv.SelectiveSearchSegmentation.clearStrategies
%
SelectiveSearchSegmentation_(this.id, 'addStrategy', stype, varargin{:});
end
function clearStrategies(this)
%CLEARSTRATEGIES Clear the list of strategy to process
%
% obj.clearStrategies()
%
% See also: cv.SelectiveSearchSegmentation.addStrategy
%
SelectiveSearchSegmentation_(this.id, 'clearStrategies');
end
function rects = process(this)
%PROCESSIMAGE Based on all images, graph segmentations and stragies, computes all possible rects and return them
%
% rects = obj.process()
%
% ## Output
% * __rects__ The list of rects as a Nx4 numeric matrix
% `[x,y,w,h; ...]`. The first ones are more relevents than the
% lasts ones.
%
% See also: cv.SelectiveSearchSegmentation.setBaseImage
%
rects = SelectiveSearchSegmentation_(this.id, 'process');
end
end
end
|
function estimation_results = REKF_SLAM(data, NumberOfSteps)
% R-EKF SLAM
% load pre-given data: odometry and observations
if nargin < 1
load('./data.mat');
end
data_matrix = data.state;
odom_sigma = data.odom_sigma;
obsv_sigma = data.obsv_sigma;
% odoCov = data.odom_cov; % constant variable
% obsCov = data.obse_cov; % constant variable
%%%%%%%%%%%%%%%%%%%% Estimation_X is used to save the state in each step %%%%%%%%%%%%%%%%%%%%
%%%%%%%%%%%%%%%%%%%% In every step, all elements of Estimation_X will be changed %%%%%%%%%%%%
Estimation_X.orientation = data.poses.orientation(1:3,1:3);
Estimation_X.position = data.poses.position(:,1);
Estimation_X.cov = sparse(6,6);
Estimation_X.landmarks = []; % the landmarks observed until this step (included), 4*N format, the 4-th row is the index
Estimation_X0.IndexObservedNew=[];
Estimation_X0.IndexObservedAlreadyThis=[];
%%%%%%%%%%%%%%%%%%%% Estimation_X is used to save the state in each step %%%%%%%%%%%%%%%%%%%%
% Initialize
if nargin < 2
NumberOfSteps = max(data_matrix(:,4)); % step instead of pose, hence, it does not include pose 0
elseif NumberOfSteps > max(data_matrix(:,4))
NumberOfSteps = max(data_matrix(:,4));
end
estimation_results = cell(1, NumberOfSteps+1);
estimation_results{1} = Estimation_X;
row_idx = (data_matrix(:, end) <= NumberOfSteps+1);
data_matrix = data_matrix(row_idx, :);
for i = 0:NumberOfSteps
IndexOfCurrentStepInDataMatrix = find(data_matrix(:,4) == i);
m = size(IndexOfCurrentStepInDataMatrix, 1);
if ( mod(i, 50) == 0 )
disp(['Processing pose ', int2str(i)]);
end
% det(Estimation_X.cov)
if i==NumberOfSteps-1
a=1;
end
if i ~= NumberOfSteps
OdometryFromThis2Next = data_matrix(IndexOfCurrentStepInDataMatrix(m-5):IndexOfCurrentStepInDataMatrix(m),1);
if m > 6
CameraMeasurementThis = [ data_matrix( IndexOfCurrentStepInDataMatrix(1): IndexOfCurrentStepInDataMatrix(m-6) , 1 ),...
data_matrix( IndexOfCurrentStepInDataMatrix(1): IndexOfCurrentStepInDataMatrix(m-6) , 3 ),...
data_matrix( IndexOfCurrentStepInDataMatrix(1): IndexOfCurrentStepInDataMatrix(m-6) , 5 )];
[Estimation_X] = REKF_update(Estimation_X, CameraMeasurementThis, obsv_sigma );
end
estimation_results{i+1} = Estimation_X;
% propagation using odometry info
[Estimation_X] = REKF_propagate(Estimation_X, OdometryFromThis2Next, odom_sigma );
else
a=2;
if m > 6
CameraMeasurementThis = [ data_matrix( IndexOfCurrentStepInDataMatrix(1): IndexOfCurrentStepInDataMatrix(end) , 1 ) , ...
data_matrix( IndexOfCurrentStepInDataMatrix(1): IndexOfCurrentStepInDataMatrix(end) , 3 ),...
data_matrix( IndexOfCurrentStepInDataMatrix(1): IndexOfCurrentStepInDataMatrix(end) , 5 )];
CameraMeasurementThis=CameraMeasurementThis(1:end-6,:);
[Estimation_X] = REKF_update(Estimation_X, CameraMeasurementThis, obsv_sigma );
end
estimation_results{i+1} = Estimation_X;
end
end
clearvars -except estimation_results
|
Formal statement is: lemma (in bounded_linear) isUCont: "isUCont f" Informal statement is: If $f$ is a bounded linear operator, then $f$ is uniformly continuous.
|
\documentclass[a4paper, 11pt]{article}
\usepackage{lipsum} %This package just generates Lorem Ipsum filler text.
\usepackage{fullpage} % changes the margin
\usepackage{mathpazo}
\usepackage{multicol}
\usepackage{graphicx, float}
\usepackage{enumerate}
\usepackage{pythonhighlight}
\usepackage{booktabs}
\usepackage{listings}
\usepackage[T1]{fontenc}
\usepackage[english]{babel}
\usepackage{amsmath,amsfonts,amsthm} % Math packages
\usepackage{array}
\begin{document}
%Header-Make sure you update this information!!!!
\noindent
\large\textbf{Chapter 3} \hfill \textbf{Siyuan Feng (516030910575)} \\
\normalsize {\bf CS 391 Computer Networking} \hfill ACM Class, Zhiyuan College, SJTU\\
Prof.~{\bf Yanmin Zhu} \hfill Due Date: October 25, 2018\\
TA.~{\bf Haobing Liu} \hfill Submit Date: \today
\section*{P3}
\paragraph{}
\begin{tabular}{lr}
&01010011\\
+&01100110\\
+&01110100\\
\hline
=&100101101
\end{tabular}
Wrap around the extra bit.
\begin{tabular}{lr}
&00101101\\
+&1\\
\hline
=&00101110
\end{tabular}
Then, invert all the bits to get the check sum. Hence, the check sum is 11010001.
The receiver adds the four words (the three original words and the
checksum). If the sum contains a zero, the receiver knows there has been an error.
All one-bit errors will be detected. But two-bit error may be undetected. (e.g., if three bits convert to 01010010, 01100111 and 01110100)
\section*{P7}
\paragraph{}
To best answer this question, consider why we needed sequence numbers in the first place. We saw that the sender needs sequence numbers so that the receiver can tell if a data packet is a duplicate of an already received data packet. In the case of ACKs, the sender does not need this info (i.e., a sequence number on an ACK) to tell detect a duplicate ACK. A duplicate ACK is obvious to the rdt3.0 receiver, since when it has received the original ACK it transitioned to the next state. The duplicate ACK is not the ACK that the sender needs and hence is ignored by the rdt3.0 sender.
\section*{P10}
\paragraph{}
Since, knowing the maximum delay, we set a timeout at $Wait for NAK at 0$ and $Wait for NAK at 1$. If the sender dose not receive $ACK$ or $NAK$ within a certain time, we assume that the packet has lost and send the packet again.
\section*{P17}
\newpage
\section*{P33}
\paragraph{}
Suppose that the sender sends packet $P1$ and retransmitted packet $P2$ with the timer for $P1$.Further more, after $P2$ sent away, the sender received the acknowledge for $P1$. However, the sender will take mistake this acknowledge for $P1$, and calculate a wrong $\mathsf{SampleRTT}$.
\section*{P40}
\begin{enumerate}[(a)]
\item TCP slowstarts at intervals $[1, 6]$ and $[23, 26]$.
\item TCP congestion avoidance at intervals $[6, 16]$ and $[17, 22]$.
\item After the $16^{th}$ round, packet loss by recognizing a triple duplicate ACK. If there is a timeout, the window size would reduce to 1.
\item After the $22^{th}$ round, segment loss due to timeout, and window size would reduce to 1.
\item The threshold is set to initially 32
\item The threshold is set to half value of the congestion window when packets loss. When loss appeared at round 16, the congestion window size is 42 and the threshold is 21.
\item When loss appeared at round 22, the congestion window size is 29 and the threshold is 14.
\item Hence, packet 70 is in 7th round. \\
\begin{tabular}{c c}
packet & round \\
1 & 1 \\
2 - 3 & 2\\
4 - 7 & 3\\
8 - 15 & 4\\
16 - 31 & 5 \\
32 - 63 & 6 \\
64 - 95 & 7 \\
\end{tabular}
\item The new value of threshold and window will be 4 and 7.
\item Threshold is 21 and window size is 1.
\item Total number is 52. \\
\begin{tabular}{c c}
round & packet number \\
17 & 1 \\
18 - 3 & 18\\
19 - 7 & 19\\
20 - 15 & 20\\
21 - 31 & 16 \\
22 - 63 & 21 \\
\end{tabular}
\end{enumerate}
\section*{P42}
\paragraph{}
TCP uses pipeline method, that is sender is able to send multiply segment. The doubling of the timeout interval prevent a sender from retransmit too many packets.
\section*{P50}
\begin{table*}[!htp]
\centering
\begin{tabular}{|c|p{3cm}<{\centering}|p{3cm}<{\centering}|p{3cm}<{\centering}|p{3cm}<{\centering}|}
\hline
Time(msec) & Window Size of C1 & Speed of C1 (wins / 0.05) & Window Size of C2 & Speed of C2 (wins / 0.1)\\
\hline
0 & 10 & 200 & 10 & 100 \\
\hline
50 & 5 & 100 & & 100 \\
\hline
100 & 2 & 40 & 5 & 50 \\
\hline
150 & 1 & 20 & & 50 \\
\hline
200 & 1 & 20 & 2 & 20 \\
\hline
250 & 1 & 20 & &20 \\
\hline
300 & 1 & 20 & 1 & 10 \\
\hline
350 & 2 & 40 & & 10 \\
\hline
400 & 1 & 20 & 1 & 10 \\
\hline
450 & 2 & 40 & & 10 \\
\hline
500 & 1 & 20 & 1 & 10 \\
\hline
550 & 2 & 40 & & 10 \\
\hline
... & ... & ... & ... & ... \\
\hline
\end{tabular}
\end{table*}
No, in long time, sending rate of $C1$ is $(40 + 20 + 40 + 20) = 120$ and sending rate of $C2$ is $(10 + 10 + 10 + 10) = 40$
\end{document}
|
Volunteer practitioners needed to treat an underserved and culturally diverse adult population in Bergen County.
Did you know that as many as 45,000 Bergen County residents have no health insurance and no access to healthcare? Some of these people end up in the ER for common ailments that can and should be treated by a primary care physician.
Bergen Volunteer Medical Initiative (BVMI) a healthcare facility in Hackensack, is trying to keep some of these people out of the ER by providing free, ongoing primary care to residents of Bergen County. Our volunteer practitioners treat an underserved and culturally diverse adult population. The healthcare center has seen a huge uptick in patient visits and is actively seeking new volunteers.
Volunteer nurse interactions with individual patients include taking vital signs, reviewing chief complaint, past medical history, medication review, education and discharge instructions. Charting on the electronic medical record is required. The nurse will be working one-on-one with an individual physician or nurse practitioner in care of the patient.
Volunteering here is a win-win-win situation - you will have a meaningful experience, the patients get quality care and all hospitals in Bergen County will see less traffic in the ER. Please note that all nurse volunteers will need to be credentialed and approved by our board - which is usually a seamless process.
|
[STATEMENT]
lemma order_0_monom [simp]: "c \<noteq> 0 \<Longrightarrow> order 0 (monom c n) = n"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. c \<noteq> (0::'a) \<Longrightarrow> order (0::'a) (monom c n) = n
[PROOF STEP]
using order_power_n_n[of 0 n]
[PROOF STATE]
proof (prove)
using this:
order (0::?'b1) ([:- (0::?'b1), 1::?'b1:] ^ n) = n
goal (1 subgoal):
1. c \<noteq> (0::'a) \<Longrightarrow> order (0::'a) (monom c n) = n
[PROOF STEP]
by (simp add: monom_altdef order_smult)
|
theory Framed_Links
imports Links
begin
(*Defining- Framed Link Relations*)
definition framed_linkrel::"walls ⇒ walls ⇒ bool"
where
"framed_linkrel x y = ((framed_linkrel_uncross x y) ∨ (linkrel_pull x y) ∨ (linkrel_straighten x y)
∨(linkrel_swing x y)∨(linkrel_rotate x y) ∨ (linkrel_compress x y) ∨ (linkrel_slide x y)
∨ (framed_linkrel_uncross y x) ∨ (linkrel_pull y x) ∨ (linkrel_straighten y x)
∨(linkrel_swing y x)∨(linkrel_rotate y x) ∨ (linkrel_compress y x) ∨ (linkrel_slide y x))"
text{*Following lemma tells us that framed Link relations implies Link relation*}
lemma framed_linkrel_implies_linkrel: "(framed_linkrel x y) ⟹ (linkrel x y)"
using framed_uncross_implies_uncross framed_linkrel_def linkrel_def by auto
text{*Following lemma tells us that framed Link relation is Symmetric*}
lemma framed_linkrel_symp: "symp framed_linkrel" unfolding framed_linkrel_def symp_def by auto
text{*Following lemma construct equivalence among Links*}
definition framed_linkrel_diagram_left::"walls ⇒ walls ⇒ bool"
where
"framed_linkrel_diagram_left x y ≡ ∃A.∃B.∃C.((x = ((A::walls) ⊗ B))∧ (y = (C ⊗ B))
∧ (framed_linkrel A C))"
definition framed_linkrel_diagram_right::"walls ⇒ walls ⇒ bool"
where
"framed_linkrel_diagram_right x y ≡ ∃A.∃B.∃C.((x = (A ⊗ B))∧ (y = (A ⊗ C))
∧ (framed_linkrel B C))"
definition framed_linkrel_diagram_center::"walls ⇒ walls ⇒ bool"
where
"framed_linkrel_diagram_center x y ≡ ∃A.∃B1.∃B2.∃C.((x = (A ∘ (B1::walls) ⊗ C))
∧ (y = (A ∘ (B2::walls) ⊗ C))
∧ (framed_linkrel B1 B2))"
definition framed_linkrel_diagram_middle_center::"walls ⇒ walls ⇒ bool"
where
"framed_linkrel_diagram_middle_center x y ≡ ∃A.∃B.∃C1.∃C2.∃D.∃E.((x = (A ∘ (B::walls) ⊗ C1 ⊗ D ∘ E))
∧ (y = (A ∘ (B::walls) ⊗ C2 ⊗ D ∘ E))
∧ (framed_linkrel C1 C2))"
definition framed_linkrel_diagram_middle_left:: "walls ⇒ walls ⇒ bool"
where
"framed_linkrel_diagram_middle_left x y ≡ ∃A.∃B1.∃B2.∃C.∃D.((x = (A ∘ ((B1::walls)⊗C) ∘D))
∧ (y = (A ∘ ((B2::walls) ⊗ C) ∘ D))
∧ (framed_linkrel B1 B2))"
definition framed_linkrel_diagram_middle_right::"walls ⇒ walls ⇒ bool"
where
"framed_linkrel_diagram_middle_right x y ≡ ∃A.∃B.∃C1.∃C2.∃D.((x = (A ∘ (B::walls)⊗C1∘ D))
∧ (y = (A ∘ (B::walls) ⊗ C2 ∘ D))
∧ (framed_linkrel C1 C2))"
definition framed_linkrel_diagram_bottom_left::"walls ⇒ walls ⇒ bool"
where
"framed_linkrel_diagram_bottom_left x y ≡ ∃A1.∃A2.∃B.∃C.((x = (((A1::walls) ⊗ B) ∘ C))
∧((y = (((A2::walls) ⊗ B) ∘ C)))
∧(framed_linkrel A1 A2))"
definition framed_linkrel_diagram_bottom_right::"walls ⇒ walls ⇒ bool"
where
"framed_linkrel_diagram_bottom_right x y ≡ ∃A.∃B1.∃B2.∃C.((x = (((A::walls) ⊗ B1) ∘ C))
∧((y = (((A::walls) ⊗ B2) ∘ C)))
∧(framed_linkrel B1 B2))"
definition framed_linkrel_diagram_bottom_center::"walls ⇒ walls ⇒ bool"
where
"framed_linkrel_diagram_bottom_center x y ≡ ∃A.∃B1.∃B2.∃C.∃D.((x = (((A::walls) ⊗ B1 ⊗ C) ∘ D))
∧((y = (((A::walls) ⊗ B2 ⊗ C) ∘ D)))
∧(framed_linkrel B1 B2))"
definition framed_linkrel_diagram_top_left::"walls ⇒ walls ⇒ bool"
where
"framed_linkrel_diagram_top_left x y ≡ ∃A.∃B1.∃B2.∃C.((x = (A ∘ ((B1::walls) ⊗ C)))
∧(y = (A ∘ ((B2::walls) ⊗ C)))
∧(framed_linkrel B1 B2))"
definition framed_linkrel_diagram_top_right::"walls ⇒ walls ⇒ bool"
where
"framed_linkrel_diagram_top_right x y ≡ ∃A.∃B.∃C1.∃C2.((x = (A ∘((B::walls) ⊗ C1)))
∧(y = (A ∘ (B ⊗ C2)))
∧(framed_linkrel C1 C2))"
definition framed_linkrel_diagram_top_center::"walls ⇒ walls ⇒ bool"
where
"framed_linkrel_diagram_top_center x y ≡ ∃A.∃B.∃C1.∃C2.∃D.((x = (A ∘ ((B::walls) ⊗ C1 ⊗ D)))
∧(y = ((A ∘ (B ⊗ C2 ⊗ D)))
∧(framed_linkrel C1 C2)))"
(*Framed_linkrel_diagram is the generating relation between two given Links*)
definition framed_linkrel_diagram::"walls ⇒ walls ⇒ bool"
where
"framed_linkrel_diagram x y ≡ (
(framed_linkrel_diagram_left x y)∨(framed_linkrel_diagram_right x y) ∨ (framed_linkrel_diagram_center x y)
∨(framed_linkrel_diagram_middle_left x y)∨(framed_linkrel_diagram_middle_right x y)
∨(framed_linkrel_diagram_middle_center x y)
∨(framed_linkrel_diagram_bottom_left x y)∨(framed_linkrel_diagram_bottom_left x y)
∨(framed_linkrel_diagram_bottom_center x y)
∨(framed_linkrel_diagram_top_left x y)∨(framed_linkrel_diagram_top_right x y)∨(framed_linkrel_diagram_top_center x y)
)"
(*proving the symmetry of framed_linkrel_diagram*)
lemma framed_symm_left:"(framed_linkrel_diagram_left x y) ⟹ (framed_linkrel_diagram_left y x)"
using framed_linkrel_def framed_linkrel_diagram_left_def by auto
lemma framed_symm_right: "(framed_linkrel_diagram_right x y) ⟹ (framed_linkrel_diagram_right y x)"
using framed_linkrel_def framed_linkrel_diagram_right_def by auto
lemma framed_symm_center:"(framed_linkrel_diagram_center x y) ⟹ (framed_linkrel_diagram_center y x)"
using framed_linkrel_def framed_linkrel_diagram_center_def by auto
lemma framed_symm_middle_right:
"(framed_linkrel_diagram_middle_right x y) ⟹ (framed_linkrel_diagram_middle_right y x)"
unfolding framed_linkrel_def framed_linkrel_diagram_middle_right_def
by (metis linkrel_symp sympD)
lemma framed_symm_middle_left:
"(framed_linkrel_diagram_middle_left x y) ⟹ (framed_linkrel_diagram_middle_left y x)"
unfolding framed_linkrel_def framed_linkrel_diagram_middle_left_def
by (metis linkrel_symp sympD)
lemma framed_symm_middle_center:
"(framed_linkrel_diagram_middle_center x y) ⟹ (framed_linkrel_diagram_middle_center y x)"
unfolding framed_linkrel_def framed_linkrel_diagram_middle_center_def
by (metis linkrel_symp sympD)
lemma framed_symm_bottom_left:
"(framed_linkrel_diagram_bottom_left x y) ⟹ (framed_linkrel_diagram_bottom_left y x)"
unfolding framed_linkrel_def framed_linkrel_diagram_bottom_left_def
by (metis linkrel_symp sympD)
lemma framed_symm_bottom_right:
"(framed_linkrel_diagram_bottom_right x y) ⟹ (framed_linkrel_diagram_bottom_right y x)"
unfolding framed_linkrel_def framed_linkrel_diagram_bottom_right_def
by (metis linkrel_symp sympD)
lemma framed_symm_bottom_center:
"(framed_linkrel_diagram_bottom_center x y) ⟹ (framed_linkrel_diagram_bottom_center y x)"
unfolding framed_linkrel_def framed_linkrel_diagram_bottom_center_def
by (metis linkrel_symp sympD)
lemma framed_symm_top_left:
"(framed_linkrel_diagram_top_left x y) ⟹ (framed_linkrel_diagram_top_left y x)"
unfolding framed_linkrel_def framed_linkrel_diagram_top_left_def
by (metis linkrel_symp sympD)
lemma framed_symm_top_right:
"(framed_linkrel_diagram_top_right x y) ⟹ (framed_linkrel_diagram_top_right y x)"
unfolding framed_linkrel_def framed_linkrel_diagram_top_right_def
by (metis linkrel_symp sympD)
lemma framed_symm_top_center:
"(framed_linkrel_diagram_top_center x y) ⟹ (framed_linkrel_diagram_top_center y x)"
unfolding framed_linkrel_def framed_linkrel_diagram_top_center_def
by (metis linkrel_symp sympD)
lemma symm_framed_linkrel_diagram:
"(framed_linkrel_diagram x y)⟹ (framed_linkrel_diagram y x)"
unfolding framed_linkrel_diagram_def using framed_symm_bottom_center framed_symm_bottom_left
framed_symm_center framed_symm_left framed_symm_middle_center
framed_symm_middle_left framed_symm_middle_right framed_symm_right
framed_symm_top_center framed_symm_top_left framed_symm_top_right
by metis
(*framed_linkrel_diagram_equiv is the equivalence class of walls*)
definition framed_linkrel_diagram_equiv::"walls⇒ walls ⇒ bool"
where
"framed_linkrel_diagram_equiv = (framed_linkrel_diagram)^**"
(*Proof that the Framed_Linkrel_Diagram_Equiv is symmetric*)
lemma symm_framed_linkrel_diagram_equiv:
"framed_linkrel_diagram_equiv x y ⟹ (framed_linkrel_diagram_equiv y x)"
using framed_linkrel_diagram_equiv_def symm_framed_linkrel_diagram symmetry3 symp_def
by metis
lemma transitive_implication:
assumes " ∀x.∀y.((r x y) ⟶(q x y))"
shows "r^** x y ⟹ q^** x y"
proof(induction rule:rtranclp.induct)
fix a
let ?case = "q⇧*⇧* a a"
show ?case by simp
next
fix a b c
assume rtranclp : "r^** a b" "r b c" "q^** a b"
let ?case = "q^** a c"
have "(r b c)⟹ (q b c)" using assms by auto
from this have "q b c" using assms rtranclp by auto
from this have "q^** a c" using rtranclp(3) rtranclp.rtrancl_into_rtrancl by auto
thus ?case by simp
qed
definition framed_link_equiv::"diagram⇒ diagram ⇒ bool"
where
"framed_link_equiv x y = (framed_linkrel_diagram_equiv (Rep_diagram x) (Rep_diagram y))"
lemma ref: "(framed_link_equiv x x)"
unfolding framed_link_equiv_def framed_linkrel_diagram_equiv_def by auto
lemma sym:"(framed_link_equiv x y) ⟹ (framed_link_equiv y x)"
unfolding framed_link_equiv_def using symm_framed_linkrel_diagram_equiv by auto
lemma trans:"(framed_link_equiv x y)∧ (framed_link_equiv y z) ⟹ (framed_link_equiv x z)"
using framed_link_equiv_def framed_linkrel_diagram_equiv_def rtranclp_trans by (metis)
quotient_type Framed_Link = "diagram" / "framed_link_equiv"
morphisms Rep_framed_links Abs_framed_links
proof (rule equivpI)
show "reflp framed_link_equiv"
using reflp_def framed_link_equiv_def framed_linkrel_diagram_equiv_def rtranclp.rtrancl_refl
by (metis (full_types))
show "symp framed_link_equiv"
using framed_link_equiv_def symm_framed_linkrel_diagram_equiv symp_def
by (metis)
show "transp framed_link_equiv"
using trans unfolding framed_link_equiv_def transp_def by metis
qed
(*It remains to be proved that Framed_Link_Equiv implies Link_Equiv*)
(*Some junk code but needs to be relooked -
(* we need to rewrite the definitions of framed_link_relations and repeat the above for framed_link_relations
definition framed_linkrel::"walls =>walls⇒bool"
where
"framed_linkrel x y = ((framed_linkrel_uncross x y) ∨ (linkrel_pull x y) ∨ (linkrel_straighten x y)
∨(linkrel_swing x y)∨(linkrel_rotate x y) ∨ (linkrel_compress x y) ∨ (linkrel_slide x y)
∨ (framed_linkrel_uncross y x) ∨ (linkrel_pull y x) ∨ (linkrel_straighten y x)
∨(linkrel_swing y x)∨(linkrel_rotate y x) ∨ (linkrel_compress y x) ∨ (linkrel_slide y x))"
text{*Following lemmas asserts that if two framed linked diagrams are equivalent, then the unframed
links are equivalent*}
lemma framed_linkrel_implies_linkrel: "(framed_linkrel x y) ⟹ (linkrel x y)"
using framed_uncross_implies_uncross framed_linkrel_def linkrel_def by auto
text{* the link relations are symmetric*}
lemma linkrel_symp: "symp linkrel" unfolding linkrel_def symp_def by auto
lemma framed_linkrel_symp: "symp framed_linkrel" unfolding framed_linkrel_def symp_def by auto
definition framed_linkrel_equiv::"walls⇒walls⇒bool"
where
"(framed_linkrel_equiv) = (framed_linkrel)^**"
text{*Following lemmas assert that if two framed link diagrams are related by the linkrel_equiv, then
the corresponding link diagrams are equivalent*}
lemma transitive_implication:
assumes " ∀x.∀y.((r x y) ⟶(q x y))"
shows "r^** x y ⟹ q^** x y"
proof(induction rule:rtranclp.induct)
fix a
let ?case = "q⇧*⇧* a a"
show ?case by simp
next
fix a b c
assume rtranclp : "r^** a b" "r b c" "q^** a b"
let ?case = "q^** a c"
have "(r b c)⟹ (q b c)" using assms by auto
from this have "q b c" using assms rtranclp by auto
from this have "q^** a c" using rtranclp(3) rtranclp.rtrancl_into_rtrancl by auto
thus ?case by simp
qed
theorem framed_equiv_implies_linkequiv: "(framed_linkrel_equiv x y) ⟹ (linkrel_equiv x y)"
using framed_linkrel_equiv_def linkrel_equiv_def transitive_implication
framed_linkrel_implies_linkrel
by metis
text{*Linkrel_equiv and Framed_Linkrel_equiv are equivalence relations*}
lemma reflective: "linkrel_equiv x x" unfolding linkrel_equiv_def by simp
lemma framed_reflective: "framed_linkrel_equiv x x" unfolding framed_linkrel_equiv_def by simp
lemma link_symmetry:"symp linkrel_equiv" using linkrel_symp symmetry3
by (metis (full_types) linkrel_equiv_def)
lemma link_symmetry2:"(linkrel_equiv x y)⟹ (linkrel_equiv y x)" using link_symmetry sympD
by metis
lemma framed_link_symmetry:"symp framed_linkrel_equiv" using framed_linkrel_symp symmetry3
by (metis (full_types) framed_linkrel_equiv_def)
(*following lemma proves that linkrel_equiv is transitive in the usual sense of the term*)
lemma linkrel_trans: assumes "linkrel_equiv x y" and "linkrel_equiv y z"
shows "linkrel_equiv x z"
using rtranclp_trans linkrel_equiv_def by (metis (full_types) assms(1) assms(2))
*)*)
end
|
% MatrixUser, a multi-dimensional matrix analysis software package
% https://sourceforge.net/projects/matrixuser/
%
% The MatrixUser is a matrix analysis software package developed under Matlab
% Graphical User Interface Developing Environment (GUIDE). It features
% functions that are designed and optimized for working with multi-dimensional
% matrix under Matlab. These functions typically includes functions for
% multi-dimensional matrix display, matrix (image stack) analysis and matrix
% processing.
%
% Author:
% Fang Liu <[email protected]>
% University of Wisconsin-Madison
% Aug-30-2014
% display montage
function MU_dispMontage(Temp,Event,Disp_Matrix,handles)
% Montage slice selection checking
if str2num(get(handles.Sli_from,'String'))>str2num(get(handles.Sli_to,'String'))
errordlg('No slice is chosen for creating montage !');
return;
end
if str2num(get(handles.Sli_from,'String'))<1
set(handles.Sli_from,'String','1')
return;
end
if str2num(get(handles.Sli_to,'String'))>handles.V.Layer
set(handles.Sli_to,'String',num2str(handles.V.Layer))
return;
end
% Montage layout initialization
[Row,Column,Layer]=size(Disp_Matrix);
ind=0;
b_flag=0;
for i=1:str2num(get(handles.Mont_row,'String'))
for j=1:str2num(get(handles.Mont_col,'String'))
DMatrix((i-1)*Row+1:i*Row,(j-1)*Column+1:j*Column)=Disp_Matrix(:,:,str2num(get(handles.Sli_from,'String'))+ind);
ind=ind+1;
if ind>str2num(get(handles.Sli_to,'String'))-str2num(get(handles.Sli_from,'String'))
b_flag=1;
break;
end
end
if b_flag==1
break;
end
end
%-------------------------Windows Contrast Change
Color_map=handles.V.Color_map;
Contrast_Low=handles.V.C_lower;
Contrast_High=handles.V.C_upper;
Contrast_Change=0;
point=[0 0];
point2=[0 0];
Contrast_Interval=(Contrast_High-Contrast_Low)/100;
%-------------------------End
delete(handles.Cre_Mont);
figure ('KeyReleaseFcn',@WindowKeyRelease,'KeyPressFcn',@WindowKeyPress, 'WindowScrollWheelFcn',@MouseScrollWheel,...
'WindowButtonUpFcn',@MouseUp,'WindowButtonDownFcn', @MouseClick,'Name',['Display ' handles.V.Current_matrix ' Montage']);
imagesc(DMatrix,[Contrast_Low Contrast_High]);
colormap(Color_map);
colorbar;
function MouseScrollWheel (Temp, Event)
if Contrast_Change==-1
Contrast_Low=Contrast_Low + Event.VerticalScrollCount*Contrast_Interval;
end
if Contrast_Change==1
Contrast_High=Contrast_High + Event.VerticalScrollCount*Contrast_Interval;
end
imagesc (DMatrix,[Contrast_Low Contrast_High]);
colormap(Color_map);
colorbar;
end
function MouseClick(Temp,Event)
tpoint=get(gca,'currentpoint');
point=[round(tpoint(1)),round(tpoint(3))];
end
function MouseUp(Temp,Event)
tpoint2=get(gca,'currentpoint');
point2=[round(tpoint2(1)),round(tpoint2(3))];
if sum(abs(point2-point))~=0
rectangle('Position',[point(1),point(2),point2(1)-point(1),point2(2)-point(2)],'EdgeColor',[0 1 0]);
DispS=DMatrix(point(2):point2(2),point(1):point2(1));
DispS=DispS(:);
tmean=mean(double(DispS));
tstd=std(double(DispS));
xlabel(['ROI mean:' num2str(tmean) ' ROI std:' num2str(tstd) ' RSD(%):' num2str(abs(tstd./tmean)*100)],'FontSize',18);
end
end
function WindowKeyPress(Temp,Event)
%------------------------------Windows Contrast Change
if Event.Character=='l'
Contrast_Change=-1;
elseif Event.Character=='u'
Contrast_Change=1;
end
%------------------------------End
end
function WindowKeyRelease(Temp,Event)
Contrast_Change=0;
end
end
|
using AlgebraPDF
using Test
using Random
using Measurements
@testset "iminuit should be imported and have :Minuit" begin
@test isiminuitimported()
@test hasproperty(AlgebraPDF.iminuit, :Minuit)
end
@testset "Fit gauss with Minuit" begin
d0 = Normalized(FGauss((μ=1.1, σ=0.3)), (-3, 5))
Random.seed!(1234)
data = randn(1000)
fs0 = fit(d0, data, MigradAndHesse())
@test keys(fs0.parameters) == (:μ,:σ)
@test typeof(Tuple(fs0.measurements)) <: Tuple{Vararg{Measurement}}
d1 = fixpar(Normalized(FGauss(Ext(μ=1.1, σ=0.9)), (-3, 5)), :σ)
fs1 = fit(d1, data, MigradAndHesse())
@test keys(fs1.parameters) == (:μ,)
@test pars(fs1.best_model).σ == 0.9
end
@testset "Extended Likelihood fit" begin
d = Normalized(FGauss((μ=1.1,σ=0.1)), (-2, 3))
Random.seed!(4321)
data = filter(x->inrange(x,lims(d)), randn(1000) .+ 0.5)
fr = fit(d, data)
@show fr.parameters.μ
@show abs(fr.parameters.σ)
@test 0.3 < fr.parameters.μ < 0.7
@test 0.7 < abs(fr.parameters.σ) < 1.3
s = FSum([d],(α=2.2,))
@test integral(s) == 2.2
@test s(1.1) == 2.2*d(1.1)
@test s([1,2,3]) == 2.2*d([1,2,3])
enll = Extended(NegativeLogLikelihood(s, data))
fr2 = fit(enll)
@test 0.3 < fr2.parameters.μ < 0.7
@test 0.7 < abs(fr2.parameters.σ) < 1.3
@test 900 < fr2.parameters.α < 1100
end
#
# Optim often fails with `isfinite(phi_c) && isfinite(dphi_c)`
# removed from the tests
#
@testset "Fit gauss with Optim" begin
d0 = Normalized(FGauss((μ=0.01, σ=0.9)), (-3, 5))
Random.seed!(1234)
data = randn(1000)
fs0 = fit(d0, data, BFGSApproxHesse())
@test keys(fs0.parameters) == (:μ,:σ)
@test typeof(Tuple(fs0.measurements)) <: Tuple{Vararg{Measurement}}
d1 = fixpar(Normalized(FGauss(Ext(μ=0.01, σ=0.9)), (-3, 5)), :σ)
fs1 = fit(d1, data, BFGSApproxHesse())
@test keys(fs1.parameters) == (:μ,)
@test pars(fs1.best_model).σ == 0.9
end
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
from utils.init_weights import init_weights, normalized_columns_initializer
class Controller(nn.Module):
def __init__(self, args):
super(Controller, self).__init__()
# logging
self.logger = args.logger
# general params
self.use_cuda = args.use_cuda
self.dtype = args.dtype
# params
self.batch_size = args.batch_size
self.input_dim = args.input_dim
self.read_vec_dim = args.read_vec_dim
self.output_dim = args.output_dim
self.hidden_dim = args.hidden_dim
self.mem_hei = args.mem_hei
self.mem_wid = args.mem_wid
self.clip_value = args.clip_value
def _init_weights(self):
raise NotImplementedError("not implemented in base calss")
def print_model(self):
self.logger.warning("<--------------------------------===> Controller:")
self.logger.warning(self)
def _reset_states(self):
# we reset controller's hidden state
self.lstm_hidden_vb = (Variable(self.lstm_hidden_ts[0]).type(self.dtype),
Variable(self.lstm_hidden_ts[1]).type(self.dtype))
def _reset(self): # NOTE: should be called at each child's __init__
self._init_weights()
self.type(self.dtype) # put on gpu if possible
self.print_model()
# reset internal states
self.lstm_hidden_ts = []
self.lstm_hidden_ts.append(torch.zeros(self.batch_size, self.hidden_dim))
self.lstm_hidden_ts.append(torch.zeros(self.batch_size, self.hidden_dim))
self._reset_states()
def forward(self, input_vb):
raise NotImplementedError("not implemented in base calss")
|
Formal statement is: lemma Lim_linear: fixes f :: "'a::euclidean_space \<Rightarrow> 'b::euclidean_space" and h :: "'b \<Rightarrow> 'c::real_normed_vector" assumes "(f \<longlongrightarrow> l) F" "linear h" shows "((\<lambda>x. h(f x)) \<longlongrightarrow> h l) F" Informal statement is: If $f$ converges to $l$ and $h$ is linear, then $h(f)$ converges to $h(l)$.
|
import numpy as np
import scipy.sparse as ss
import logging
import time
import warnings
from .feature_selection import get_significant_genes
from .feature_selection import calculate_minmax
warnings.simplefilter("ignore")
logging.basicConfig(format='%(process)d - %(levelname)s : %(asctime)s - %(message)s', level=logging.DEBUG)
logger = logging.getLogger(__name__)
def run_CDR_analysis(data, phenotype, capvar = 0.95, pernum = 2000, thres = 0.05):
"""Main CDR-g analysis function
The key step in CDR-g is an SVD-decomposition on gene co-expression matrices.
Depending on the sequencing platform, this SVD step can produce thousands of
factor loadings. By default, CDR-g selects number of factor loadings which
captures 95% of variance in the dataset.
Args:
data (anndata): anndata object of interest
phenotype (str): condition of interest
capvar (float, optional): specifies the number of factor loadings to examine. Defaults to 0.95.
pernum (int, optional): number of permutations to determine importance score. Defaults to 2000.
thres (float, optional): cut-off for permutation importance to select genes. Defaults to 0.05.
"""
start = time.time()
gene_num = data.X.shape[0]
cell_num = data.X.shape[1]
logger.info('processing dataset of %s genes X %s cells', cell_num, gene_num)
logger.info('target class label:: %s', phenotype)
logger.info("SVD and threshold selection")
res = pvalgenerator(data, phenotype, capvar)
logger.info("completed SVD and varimax")
logger.info("permutation testing for gene sets:: perms:: %s threshold :: %s", pernum, thres)
npheno= data.uns["n_pheno"]
#get_significant_genes_perms(data, npheno, permnum = pernum, thres = thres)
get_significant_genes(data, npheno, permnum = pernum, thres = thres)
logger.info("computed thresholds for gene selection")
end = time.time()
timediff = end - start
numfact = data.uns["selected_loading"]
logger.info('N factor loadings:: %s', numfact)
logger.info('wall clock time in seconds:: %s', timediff)
def dask_ver(matrixlist, capvar):
"""provides svd and concatenation with dask"""
import dask.array as da
from dask_ml.decomposition import TruncatedSVD
if ss.issparse(matrixlist[0]):
list_of_mats_as_dask_arrays = [da.from_array(np.array(d.todense())) for d in matrixlist]
else:
list_of_mats_as_dask_arrays = [da.from_array(d) for d in matrixlist]
list_of_corr_mats = [da.corrcoef(d) for d in list_of_mats_as_dask_arrays]
X = da.concatenate(list_of_corr_mats, axis=1)
X[da.isnan(X)] = 0.0
_, y, Ek, Ss = get_optimal_threshold(X, capvar)
#Ek = svd.components_
#Ss = svd.singular_values_
return Ek, Ss, X, y
def process_svd_to_factors(Ek, Ss, N_k):
"""function for rotation and flips"""
Ek = Ek.T
ind = np.argsort(Ss)[::-1]
Ss = Ss[ind]
Ek = Ek[:, ind]
Lk = Ss**2 # singular values to eigenvalues
Fk = (Lk[:N_k]**0.5)*Ek[:,:N_k] # factor loadings
# Varimax rotation of the factor loadings
ROT = classic_orthomax(Fk, gamma=1) # finding rotation (gamma=1 implyes at CLASSIC varimax)
Fs = np.dot(Fk,ROT) # rotated factor loadings
Ls = np.diag([email protected](Lk[:N_k])@ROT) # rotated eigenvalues
ind = np.argsort(Ls)[::-1]
Ls = Ls[ind]
Fs = Fs[:, ind]
Fs = flip_Ek(Fs)
return Fs, Ls, Fk, Lk
### aux functions for matrix extraction
def get_numbers_of_pheno(ad, pheno):
"""return list of nums"""
vals = ad.obs[pheno].value_counts().tolist()
return vals
def get_bools_of_pheno(ad, pheno):
"""return list of booleans"""
phenotypes = ad.obs[pheno].unique()
bool_list = [ad.obs[pheno] == i for i in phenotypes]
return bool_list
def extract_matrix_from_anndata(ad, pheno_column):
ind = get_bools_of_pheno(ad, pheno_column)
rands = [ad[i,:].X.T for i in ind]
return rands, len(rands)
#### functions for generating pvals and integrating whole varimax
def _full_Fs(ad, pheno, capvar):
matlist, numpheno = extract_matrix_from_anndata(ad, pheno)
Ee, Ss, _, N = dask_ver(matlist, capvar) # specify algorithm
Fs, Ls, Fk, Lk = process_svd_to_factors(Ee, Ss, N)
ad.uns["selected_loading"] = N
ad.uns["Fs"] = Fs
ad.uns["Ls"] = Ls
ad.uns["Fk"] = Fk
ad.uns["Lk"] = Lk
ad.uns["n_pheno"] = numpheno
Fs_diff = calculate_minmax(Fs, numpheno)
return Fs_diff
def pvalgenerator(ad, pheno, capvar):
Fs_diff = _full_Fs(ad, pheno, capvar)
ad.uns["Fs_diff"] = Fs_diff
return Fs_diff
# leos' aux functions
def classic_orthomax(Phi, gamma = 1, q = 20, tol = 1e-6):
"""Returns the orthomax rotation"""
from numpy import eye, asarray, dot, sum, diag
from numpy.linalg import svd
p,k = Phi.shape
R = eye(k)
d=0
for i in range(q):
d_old = d
Lambda = dot(Phi, R)
u,s,vh = svd(dot(Phi.T,asarray(Lambda)**3 - (gamma/p) * dot(Lambda, diag(diag(dot(Lambda.T,Lambda))))))
R = dot(u,vh)
d = sum(s)
if d_old!=0 and d/d_old < 1 + tol: break
return R
def flip_Ek(Ek):
"""That functions guaranties that the eigenvectors will "point up".
"""
n, m = Ek.shape
e_k_to_flip = abs(Ek.min(axis=0)) > Ek.max(axis=0)
flip = np.ones(m)
flip[e_k_to_flip] *= -1
Ek *= flip
return Ek
### aux functions for detecting factors.
def get_optimal_threshold(num, thres, ncomp = 2000):
"""
selects number of factors for truncated SVD
"""
from dask_ml.decomposition import TruncatedSVD
import dask.array as da
nrows = num.shape[0] # this shows num cells and is required for svd
numgenes = num.shape[1] # this is to make sure if less 2000
if numgenes < ncomp:
ncomp = numgenes - 1
print(ncomp)
numm = num.rechunk((nrows, 10))
svd = TruncatedSVD(n_components=ncomp, n_iter=5, random_state=42)
svd.fit(numm)
x = np.cumsum(svd.explained_variance_ratio_)
y = np.argmax(x>thres)
if y == 0:
y = ncomp
X = svd.components_[0:y]
v = svd.singular_values_[0:y]
return x, y, X, v
|
//////////////////////////////////////////////////////////////////////////////
// BSD 3-Clause License
//
// Copyright (c) 2022, The Regents of the University of California
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
//
// * Redistributions of source code must retain the above copyright notice, this
// list of conditions and the following disclaimer.
//
// * Redistributions in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * Neither the name of the copyright holder nor the names of its
// contributors may be used to endorse or promote products derived from
// this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
// POSSIBILITY OF SUCH DAMAGE.
#include "shape.h"
#include <boost/polygon/polygon.hpp>
#include "grid.h"
#include "grid_component.h"
#include "odb/db.h"
#include "techlayer.h"
#include "utl/Logger.h"
namespace pdn {
Shape::Shape(odb::dbTechLayer* layer,
odb::dbNet* net,
const odb::Rect& rect,
odb::dbWireShapeType type)
: layer_(layer),
net_(net),
rect_(rect),
type_(type),
shape_type_(SHAPE),
obs_(rect_),
grid_component_(nullptr)
{
}
Shape::Shape(odb::dbTechLayer* layer,
const odb::Rect& rect,
ShapeType shape_type)
: layer_(layer),
net_(nullptr),
rect_(rect),
type_(odb::dbWireShapeType::NONE),
shape_type_(shape_type),
obs_(rect_),
grid_component_(nullptr)
{
}
Shape::~Shape()
{
}
utl::Logger* Shape::getLogger() const
{
return grid_component_->getLogger();
}
Shape* Shape::copy() const
{
auto* shape = new Shape(layer_, net_, rect_, type_);
shape->shape_type_ = shape_type_;
shape->obs_ = obs_;
shape->iterm_connections_ = iterm_connections_;
shape->bterm_connections_ = bterm_connections_;
return shape;
}
void Shape::merge(Shape* shape)
{
rect_.merge(shape->rect_);
iterm_connections_.insert(shape->iterm_connections_.begin(),
shape->iterm_connections_.end());
bterm_connections_.insert(shape->bterm_connections_.begin(),
shape->bterm_connections_.end());
generateObstruction();
}
const Box Shape::rectToBox(const odb::Rect& rect)
{
return Box(Point(rect.xMin(), rect.yMin()),
Point(rect.xMax(), rect.yMax()));
}
const Box Shape::getRectBox() const
{
return rectToBox(rect_);
}
const Box Shape::getObstructionBox() const
{
return rectToBox(obs_);
}
int Shape::getNumberOfConnections() const
{
return vias_.size() + iterm_connections_.size() + bterm_connections_.size();
}
int Shape::getNumberOfConnectionsBelow() const
{
int connections = 0;
for (const auto& via : vias_) {
if (via->getUpperLayer() == layer_) {
connections++;
}
}
return connections;
}
int Shape::getNumberOfConnectionsAbove() const
{
int connections = 0;
for (const auto& via : vias_) {
if (via->getLowerLayer() == layer_) {
connections++;
}
}
return connections;
}
bool Shape::isValid() const
{
// check if shape has a valid area
if (layer_->hasArea()) {
if (rect_.area() < layer_->getArea()) {
return false;
}
}
return true;
}
bool Shape::isWrongWay() const
{
if (isHorizontal()
&& layer_->getDirection() == odb::dbTechLayerDir::HORIZONTAL) {
return true;
}
if (isVertical() && layer_->getDirection() == odb::dbTechLayerDir::VERTICAL) {
return true;
}
return false;
}
void Shape::updateIBTermConnections(std::set<odb::Rect>& terms)
{
std::set<odb::Rect> remove_terms;
for (const odb::Rect& term : terms) {
if (!rect_.overlaps(term)) {
remove_terms.insert(term);
}
}
for (const odb::Rect& term : remove_terms) {
terms.erase(term);
}
}
void Shape::updateTermConnections()
{
updateIBTermConnections(iterm_connections_);
updateIBTermConnections(bterm_connections_);
}
bool Shape::hasTermConnections() const
{
return !bterm_connections_.empty() || !iterm_connections_.empty();
}
const odb::Rect Shape::getMinimumRect() const
{
odb::Rect intersected_rect;
intersected_rect.mergeInit();
// merge all bterms
for (const auto& bterm : bterm_connections_) {
intersected_rect.merge(bterm);
}
// merge all iterms
for (const odb::Rect& iterm : iterm_connections_) {
intersected_rect.merge(iterm);
}
// merge all vias
for (auto& via : vias_) {
intersected_rect.merge(via->getArea());
}
return intersected_rect;
}
bool Shape::cut(const ShapeTree& obstructions,
std::vector<Shape*>& replacements) const
{
using namespace boost::polygon::operators;
using Rectangle = boost::polygon::rectangle_data<int>;
using Polygon90 = boost::polygon::polygon_90_with_holes_data<int>;
using Polygon90Set = boost::polygon::polygon_90_set_data<int>;
using Pt = Polygon90::point_type;
const bool is_horizontal = isHorizontal();
std::vector<Polygon90> shape_violations;
for (auto it
= obstructions.qbegin(bgi::intersects(getRectBox())
&& bgi::satisfies([&](const auto& other) {
const auto& other_shape = other.second;
return layer_ == other_shape->getLayer()
|| other_shape->getLayer() == nullptr;
}));
it != obstructions.qend();
it++) {
auto other_shape = it->second;
odb::Rect vio_rect = other_shape->getObstruction();
// ensure the violation overlap fully with the shape to make cut correctly
if (is_horizontal) {
vio_rect.set_ylo(std::min(rect_.yMin(), vio_rect.yMin()));
vio_rect.set_yhi(std::max(rect_.yMax(), vio_rect.yMax()));
} else {
vio_rect.set_xlo(std::min(rect_.xMin(), vio_rect.xMin()));
vio_rect.set_xhi(std::max(rect_.xMax(), vio_rect.xMax()));
}
std::array<Pt, 4> pts = {Pt(vio_rect.xMin(), vio_rect.yMin()),
Pt(vio_rect.xMax(), vio_rect.yMin()),
Pt(vio_rect.xMax(), vio_rect.yMax()),
Pt(vio_rect.xMin(), vio_rect.yMax())};
Polygon90 poly;
poly.set(pts.begin(), pts.end());
// save violating polygon
shape_violations.push_back(poly);
}
// check if violations is empty and return no new shapes
if (shape_violations.empty()) {
return false;
}
std::array<Pt, 4> pts = {Pt(rect_.xMin(), rect_.yMin()),
Pt(rect_.xMax(), rect_.yMin()),
Pt(rect_.xMax(), rect_.yMax()),
Pt(rect_.xMin(), rect_.yMax())};
Polygon90 poly;
poly.set(pts.begin(), pts.end());
std::array<Polygon90, 1> arr{poly};
Polygon90Set new_shape(boost::polygon::HORIZONTAL, arr.begin(), arr.end());
// remove all violations from the shape
for (const auto& violation : shape_violations) {
new_shape -= violation;
}
std::vector<Rectangle> rects;
new_shape.get_rectangles(rects);
for (auto& r : rects) {
const odb::Rect new_rect(xl(r), yl(r), xh(r), yh(r));
// check if new shape should be accepted,
// only shapes with the same width will be used
bool accept = false;
if (is_horizontal) {
accept = rect_.dy() == new_rect.dy();
} else {
accept = rect_.dx() == new_rect.dx();
}
if (accept) {
auto* new_shape = copy();
new_shape->setRect(new_rect);
new_shape->updateTermConnections();
replacements.push_back(new_shape);
}
}
return true;
}
void Shape::writeToDb(odb::dbSWire* swire,
bool add_pins,
bool make_rect_as_pin) const
{
debugPrint(getLogger(),
utl::PDN,
"Shape",
5,
"Adding shape {} with pins {} and rect as pin {}",
getReportText(),
add_pins,
make_rect_as_pin);
odb::dbSBox::create(swire,
layer_,
rect_.xMin(),
rect_.yMin(),
rect_.xMax(),
rect_.yMax(),
type_);
if (add_pins) {
if (make_rect_as_pin) {
addBPinToDb(rect_);
}
for (const auto& bterm : bterm_connections_) {
addBPinToDb(bterm);
}
}
}
void Shape::addBPinToDb(const odb::Rect& rect) const
{
// find existing bterm, else make it
odb::dbBTerm* bterm = nullptr;
if (net_->getBTermCount() == 0) {
bterm = odb::dbBTerm::create(net_, net_->getConstName());
bterm->setSigType(net_->getSigType());
bterm->setIoType(odb::dbIoType::INOUT);
bterm->setSpecial();
} else {
bterm = net_->get1stBTerm();
}
auto pins = bterm->getBPins();
for (auto* pin : pins) {
for (auto* box : pin->getBoxes()) {
if (box->getTechLayer() != layer_) {
continue;
}
odb::Rect box_rect;
box->getBox(box_rect);
if (box_rect == rect) {
// pin already exists
return;
}
}
}
odb::dbBPin* pin = nullptr;
if (pins.empty()) {
pin = odb::dbBPin::create(bterm);
pin->setPlacementStatus(odb::dbPlacementStatus::FIRM);
} else {
pin = *pins.begin();
}
odb::dbBox::create(
pin, layer_, rect.xMin(), rect.yMin(), rect.xMax(), rect.yMax());
}
void Shape::populateMapFromDb(odb::dbNet* net, ShapeTreeMap& map)
{
for (auto* swire : net->getSWires()) {
for (auto* box : swire->getWires()) {
auto* layer = box->getTechLayer();
if (layer == nullptr) {
continue;
}
odb::Rect rect;
box->getBox(rect);
ShapePtr shape
= std::make_shared<Shape>(layer, net, rect, box->getWireShapeType());
shape->setShapeType(Shape::FIXED);
if (box->getDirection() == odb::dbSBox::OCTILINEAR) {
// cannot connect this this safely so make it an obstruction
shape->setNet(nullptr);
shape->setShapeType(Shape::OBS);
}
shape->generateObstruction();
map[layer].insert({shape->getRectBox(), shape});
}
}
}
void Shape::generateObstruction()
{
const int width = getWidth();
const int length = getLength();
const TechLayer layer(layer_);
// first apply the spacing rules
const int dbspacing = layer.getSpacing(width, length);
odb::Rect spacingdb_rect;
rect_.bloat(dbspacing, spacingdb_rect);
// apply spacing table rules
odb::Rect spacing_table_rect = rect_;
const bool is_wrong_way = isWrongWay();
for (auto* spacing_rule : layer_->getTechLayerSpacingTablePrlRules()) {
if (spacing_rule->isWrongDirection() && !is_wrong_way) {
continue;
}
const int spacing = spacing_rule->getSpacing(width, length);
odb::Rect spacing_rule_rect = rect_;
rect_.bloat(spacing, spacing_rule_rect);
spacing_table_rect.merge(spacing_rule_rect);
}
// apply eol rules
const bool is_horizontal = isHorizontal();
odb::Rect eol_rect = rect_;
for (auto* eol_rule : layer_->getTechLayerSpacingEolRules()) {
if (width > eol_rule->getEolWidth()) {
continue;
}
const int spacing = eol_rule->getEolSpace();
odb::Rect eol_rule_rect = rect_;
if (is_horizontal) {
eol_rule_rect.set_xlo(eol_rule_rect.xMin() - spacing);
eol_rule_rect.set_xhi(eol_rule_rect.xMax() + spacing);
} else {
eol_rule_rect.set_ylo(eol_rule_rect.yMin() - spacing);
eol_rule_rect.set_yhi(eol_rule_rect.yMax() + spacing);
}
eol_rect.merge(eol_rule_rect);
}
// merge all to get most restrictive obstruction box
obs_.mergeInit();
obs_.merge(spacingdb_rect);
obs_.merge(spacing_table_rect);
obs_.merge(eol_rect);
}
const std::string Shape::getDisplayText() const
{
const std::string seperator = ":";
std::string text;
text += net_->getName() + seperator;
text += layer_->getName() + seperator;
if (grid_component_ != nullptr) {
text += GridComponent::typeToString(grid_component_->type()) + seperator;
text += grid_component_->getGrid()->getName();
} else {
text += "none";
}
return text;
}
bool Shape::isRemovable() const
{
if (!isModifiable()) {
return false;
}
if (getNumberOfConnections() < 2) {
// floating shape with one or zero connections
return true;
}
return false;
}
bool Shape::isModifiable() const
{
return shape_type_ == SHAPE;
}
const std::string Shape::getReportText() const
{
std::string text = fmt::format("{} on {}",
getRectText(rect_, layer_->getTech()->getLefUnits()),
layer_->getName());
if (net_ != nullptr) {
text = net_->getName() + " " + text;
}
return text;
}
const std::string Shape::getRectText(const odb::Rect& rect,
double dbu_to_micron)
{
return fmt::format("({:.4f}, {:.4f}) - ({:.4f}, {:.4f})",
rect.xMin() / dbu_to_micron,
rect.yMin() / dbu_to_micron,
rect.xMax() / dbu_to_micron,
rect.yMax() / dbu_to_micron);
}
Shape* Shape::extendTo(const odb::Rect& rect,
const ShapeTree& obstructions) const
{
std::unique_ptr<Shape> new_shape(copy());
if (isHorizontal()) {
new_shape->rect_.set_xlo(std::min(rect_.xMin(), rect.xMin()));
new_shape->rect_.set_xhi(std::max(rect_.xMax(), rect.xMax()));
} else if (isVertical()) {
new_shape->rect_.set_ylo(std::min(rect_.yMin(), rect.yMin()));
new_shape->rect_.set_yhi(std::max(rect_.yMax(), rect.yMax()));
} else {
return nullptr;
}
if (rect_ == new_shape->rect_) {
// shape did not change
return nullptr;
}
if (obstructions.qbegin(bgi::intersects(new_shape->getRectBox())
&& bgi::satisfies([this](const auto& other) {
// ignore violations that results from itself
return other.second.get() != this;
}))
!= obstructions.qend()) {
// extension not possible
return nullptr;
}
return new_shape.release();
}
/////////
FollowPinShape::FollowPinShape(odb::dbTechLayer* layer,
odb::dbNet* net,
const odb::Rect& rect)
: Shape(layer, net, rect, odb::dbWireShapeType::FOLLOWPIN)
{
}
Shape* FollowPinShape::copy() const
{
auto* shape = new FollowPinShape(getLayer(), getNet(), getRect());
shape->generateObstruction();
shape->rows_ = rows_;
return shape;
}
void FollowPinShape::merge(Shape* shape)
{
Shape::merge(shape);
FollowPinShape* other = dynamic_cast<FollowPinShape*>(shape);
if (other == nullptr) {
return;
}
rows_.insert(other->rows_.begin(), other->rows_.end());
}
void FollowPinShape::updateTermConnections()
{
Shape::updateTermConnections();
// remove rows that no longer overlap with shape
const odb::Rect& rect = getRect();
std::set<odb::dbRow*> remove_rows;
for (auto* row : rows_) {
odb::Rect row_rect;
row->getBBox(row_rect);
if (!rect.intersects(row_rect)) {
remove_rows.insert(row);
}
}
for (auto* row : remove_rows) {
rows_.erase(row);
}
}
const odb::Rect FollowPinShape::getMinimumRect() const
{
odb::Rect min_shape = Shape::getMinimumRect();
const odb::Rect& rect = getRect();
// copy width back
const bool is_horizontal = isHorizontal();
if (is_horizontal) {
min_shape.set_ylo(rect.yMin());
min_shape.set_yhi(rect.yMax());
} else {
min_shape.set_xlo(rect.xMin());
min_shape.set_xhi(rect.xMax());
}
// merge with rows to ensure proper overlap
for (auto* row : rows_) {
odb::Rect row_rect;
row->getBBox(row_rect);
if (is_horizontal) {
min_shape.set_xlo(std::min(min_shape.xMin(), row_rect.xMin()));
min_shape.set_xhi(std::max(min_shape.xMax(), row_rect.xMax()));
} else {
min_shape.set_ylo(std::min(min_shape.yMin(), row_rect.yMin()));
min_shape.set_yhi(std::max(min_shape.yMax(), row_rect.yMax()));
}
}
return min_shape;
}
bool FollowPinShape::cut(const ShapeTree& obstructions,
std::vector<Shape*>& replacements) const
{
ShapeTree filtered_obstructions;
for (const auto& [box, shape] : obstructions) {
if (shape->shapeType() == GRID_OBS) {
// followpins can ignore grid level obstructions
continue;
}
filtered_obstructions.insert({box, shape});
}
return Shape::cut(filtered_obstructions, replacements);
}
} // namespace pdn
|
class category (C : Type) :=
( hom : C → C → Type )
( id : (X : C) → hom X X )
( comp : {X Y Z : C} → hom X Y → hom Y Z → hom X Z )
( id_comp {X Y : C} (f : hom X Y) : comp (id X) f = f )
( comp_id {X Y : C} (f : hom X Y) : comp f (id Y) = f )
( assoc {W X Y Z : C} (f : hom W X) (g : hom X Y) (h : hom Y Z) :
comp (comp f g) h = comp f (comp g h) )
notation " 𝟙 " => category.id
infixr: 80 " ≫ " => category.comp
infixr: 10 " ⟶ " => category.hom
variable (C : Type) [category C]
inductive prod_coprod : Type
| of_cat' : C → prod_coprod
| prod : prod_coprod → prod_coprod → prod_coprod
| coprod : prod_coprod → prod_coprod → prod_coprod
variable {C}
namespace prod_coprod
@[simp] def size : prod_coprod C → Nat
| of_cat' _ => 1
| prod X Y => size X + size Y + 1
| coprod X Y => size X + size Y + 1
inductive syn : (X Y : prod_coprod C) → Type
| of_cat {X Y : C} : (X ⟶ Y) → syn (of_cat' X) (of_cat' Y)
| prod_mk {X Y Z : prod_coprod C} : syn X Y → syn X Z → syn X (Y.prod Z)
| fst {X Y : prod_coprod C} : syn (X.prod Y) X
| snd {X Y : prod_coprod C} : syn (X.prod Y) Y
| coprod_mk {X Y Z : prod_coprod C} : syn X Z → syn Y Z → syn (X.coprod Y) Z
| inl {X Y : prod_coprod C} : syn X (X.coprod Y)
| inr {X Y : prod_coprod C} : syn Y (X.coprod Y)
| id (X : prod_coprod C) : syn X X
| comp {X Y Z : prod_coprod C} : syn X Y → syn Y Z → syn X Z
namespace syn
inductive rel : {X Y : prod_coprod C} → syn X Y → syn X Y → Prop
| refl {X Y : prod_coprod C} (f : syn X Y) : rel f f
| symm {X Y : prod_coprod C} {f g : syn X Y} : rel f g → rel g f
| trans {X Y : prod_coprod C} {f g h : syn X Y} : rel f g → rel g h → rel f h
| comp_congr {X Y Z : prod_coprod C} {f₁ f₂ : syn X Y} {g₁ g₂ : syn Y Z} :
rel f₁ f₂ → rel g₁ g₂ → rel (f₁.comp g₁) (f₂.comp g₂)
| prod_mk_congr {X Y Z : prod_coprod C} {f₁ f₂ : syn X Y} {g₁ g₂ : syn X Z} :
rel f₁ f₂ → rel g₁ g₂ → rel (f₁.prod_mk g₁) (f₂.prod_mk g₂)
| coprod_mk_congr {X Y Z : prod_coprod C} {f₁ f₂ : syn X Z} {g₁ g₂ : syn Y Z} :
rel f₁ f₂ → rel g₁ g₂ → rel (f₁.coprod_mk g₁) (f₂.coprod_mk g₂)
| id_comp {X Y : prod_coprod C} (f : syn X Y) : rel ((syn.id X).comp f) f
| comp_id {X Y : prod_coprod C} (f : syn X Y) : rel (f.comp (syn.id Y)) f
| assoc {W X Y Z : prod_coprod C} (f : syn W X) (g : syn X Y) (h : syn Y Z) :
rel ((f.comp g).comp h) (f.comp (g.comp h))
| of_cat_id {X : C} : rel (syn.of_cat (𝟙 X)) (syn.id (of_cat' X))
| of_cat_comp {X Y Z : C} (f : X ⟶ Y) (g : Y ⟶ Z) :
rel (syn.of_cat (f ≫ g)) (syn.comp (syn.of_cat f) (syn.of_cat g))
| mk_fst_comp {X Y Z : prod_coprod C} (f : syn X Y) (g : syn X Z) :
rel (syn.comp (syn.prod_mk f g) syn.fst) f
| mk_snd_comp {X Y Z : prod_coprod C} (f : syn X Y) (g : syn X Z) :
rel (syn.comp (syn.prod_mk f g) syn.snd) g
| prod_eta {X Y Z : prod_coprod C} (f : syn X (Y.prod Z)) :
rel (syn.prod_mk (f.comp syn.fst) (f.comp syn.snd)) f
| inl_comp_mk {X Y Z : prod_coprod C} (f : syn X Z) (g : syn Y Z) :
rel (syn.comp syn.inl (syn.coprod_mk f g)) f
| inr_comp_mk {X Y Z : prod_coprod C} (f : syn X Z) (g : syn Y Z) :
rel (syn.comp syn.inr (syn.coprod_mk f g)) g
| coprod_eta {X Y Z : prod_coprod C} (f : syn (X.coprod Y) Z) :
rel (syn.coprod_mk (syn.inl.comp f) (syn.inr.comp f)) f
infixl:50 " ♥ " => rel
instance : Trans (@rel C _ X Y) (@rel C _ X Y) (@rel C _ X Y) where
trans := rel.trans
theorem rel_prod {X Y Z : prod_coprod C} {f g : syn X (Y.prod Z)}
(h₁ : rel (f.comp syn.fst) (g.comp syn.fst))
(h₂ : rel (f.comp syn.snd) (g.comp syn.snd)) :
rel f g :=
rel.trans (rel.symm (rel.prod_eta f)) (rel.trans (rel.prod_mk_congr h₁ h₂) (rel.prod_eta g))
theorem rel_coprod {X Y Z : prod_coprod C} {f g : syn (X.coprod Y) Z}
(h₁ : rel (syn.inl.comp f) (syn.inl.comp g))
(h₂ : rel (syn.inr.comp f) (syn.inr.comp g)) :
rel f g :=
rel.trans (rel.symm (rel.coprod_eta f)) (rel.trans (rel.coprod_mk_congr h₁ h₂) (rel.coprod_eta g))
end syn
inductive norm_hom : (X Y : prod_coprod C) → Type
| of_cat {X Y : C} (f : X ⟶ Y) : norm_hom (of_cat' X) (of_cat' Y)
| coprod_mk {X Y Z : prod_coprod C} (f : norm_hom X Z) (g : norm_hom Y Z) :
norm_hom (X.coprod Y) Z
| prod_mk {X Y Z : prod_coprod C} (f : norm_hom X Y) (g : norm_hom X Z) :
norm_hom X (prod Y Z)
| comp_inl {X Y Z : prod_coprod C} (f : norm_hom X Y) :
norm_hom X (coprod Y Z)
| comp_inr {X Y Z : prod_coprod C} (f : norm_hom X Z) :
norm_hom X (coprod Y Z)
| fst_comp {X Y Z : prod_coprod C} (f : norm_hom X Z) :
norm_hom (prod X Y) Z
| snd_comp {X Y Z : prod_coprod C} (f : norm_hom Y Z) :
norm_hom (prod X Y) Z
namespace norm_hom
inductive rel : {X Y : prod_coprod C} → norm_hom X Y → norm_hom X Y → Prop
| refl {X Y : prod_coprod C} (f : norm_hom X Y) : rel f f
| symm {X Y : prod_coprod C} {f g : norm_hom X Y} : rel g f → rel f g
| trans {X Y : prod_coprod C} {f g h : norm_hom X Y} : rel f g → rel g h → rel f h
| coprod_mk_congr {X Y Z : prod_coprod C} {f₁ f₂ : norm_hom X Z} {g₁ g₂ : norm_hom Y Z} :
rel f₁ f₂ → rel g₁ g₂ → rel (coprod_mk f₁ g₁) (coprod_mk f₂ g₂)
| prod_mk_congr {X Y Z : prod_coprod C} {f₁ f₂ : norm_hom X Y} {g₁ g₂ : norm_hom X Z} :
rel f₁ f₂ → rel g₁ g₂ → rel (prod_mk f₁ g₁) (prod_mk f₂ g₂)
| comp_inl_congr {X Y Z : prod_coprod C} {f₁ f₂ : norm_hom X Y} :
rel f₁ f₂ → rel (comp_inl f₁ : norm_hom X (coprod Y Z)) (comp_inl f₂)
| comp_inr_congr {X Y Z : prod_coprod C} {f₁ f₂ : norm_hom X Z} :
rel f₁ f₂ → rel (comp_inr f₁ : norm_hom X (coprod Y Z)) (comp_inr f₂)
| fst_comp_congr {X Y Z : prod_coprod C} {f₁ f₂ : norm_hom X Z} :
rel f₁ f₂ → rel (fst_comp f₁ : norm_hom (prod X Y) Z) (fst_comp f₂)
| snd_comp_congr {X Y Z : prod_coprod C} {f₁ f₂ : norm_hom Y Z} :
rel f₁ f₂ → rel (snd_comp f₁ : norm_hom (prod X Y) Z) (snd_comp f₂)
| fst_comp_prod_mk {W X Y Z : prod_coprod C} (f : norm_hom X Y) (g : norm_hom X Z) :
rel (fst_comp (prod_mk f g) : norm_hom (prod X W) (prod Y Z)) (prod_mk f.fst_comp g.fst_comp)
| snd_comp_prod_mk {W X Y Z : prod_coprod C} (f : norm_hom X Y) (g : norm_hom X Z) :
rel (snd_comp (prod_mk f g) : norm_hom (prod W X) (prod Y Z)) (prod_mk f.snd_comp g.snd_comp)
| comp_inl_coprod_mk {W X Y Z : prod_coprod C} (f : norm_hom W Y) (g : norm_hom X Y) :
rel (comp_inl (coprod_mk f g) : norm_hom (coprod W X) (coprod Y Z))
(coprod_mk f.comp_inl g.comp_inl)
| comp_inr_coprod_mk {W X Y Z : prod_coprod C} (f : norm_hom W Y) (g : norm_hom X Y) :
rel (comp_inr (coprod_mk f g) : norm_hom (coprod W X) (coprod Z Y))
(coprod_mk f.comp_inr g.comp_inr)
| fst_comp_comp_inl {W X Y Z : prod_coprod C} (f : norm_hom W Y) :
rel (f.fst_comp.comp_inl : norm_hom (prod W X) (coprod Y Z)) f.comp_inl.fst_comp
| snd_comp_comp_inl {W X Y Z : prod_coprod C} (f : norm_hom X Y) :
rel (f.snd_comp.comp_inl : norm_hom (prod W X) (coprod Y Z)) f.comp_inl.snd_comp
| fst_comp_comp_inr {W X Y Z : prod_coprod C} (f : norm_hom W Z) :
rel (f.fst_comp.comp_inr : norm_hom (prod W X) (coprod Y Z)) f.comp_inr.fst_comp
| snd_comp_comp_inr {W X Y Z : prod_coprod C} (f : norm_hom X Z) :
rel (f.snd_comp.comp_inr : norm_hom (prod W X) (coprod Y Z)) f.comp_inr.snd_comp
def to_inj : {X Y Z : prod_coprod C} → (f : norm_hom X (coprod Y Z)) →
Option ((norm_hom X Y) ⊕ (norm_hom X Z))
| _, _, _, comp_inl f => some (Sum.inl f)
| _, _, _, comp_inr f => some (Sum.inr f)
| _, _, _, fst_comp f =>
match to_inj f with
| none => none
| some (Sum.inl f) => some (Sum.inl (fst_comp f))
| some (Sum.inr f) => some (Sum.inr (fst_comp f))
| _, _, _, snd_comp f =>
match to_inj f with
| none => none
| some (Sum.inl f) => some (Sum.inl (snd_comp f))
| some (Sum.inr f) => some (Sum.inr (snd_comp f))
| _, _, _, coprod_mk f g =>
match to_inj f, to_inj g with
| some (Sum.inl f), some (Sum.inl g) => some (Sum.inl (coprod_mk f g))
| some (Sum.inr f), some (Sum.inr g) => some (Sum.inr (coprod_mk f g))
| _, _ => none
theorem to_inj_eq_inl : {X Y Z : prod_coprod C} → {f : norm_hom X (coprod Y Z)} →
{g : norm_hom X Y} → to_inj f = some (Sum.inl g) → rel f g.comp_inl
| _, _, _, comp_inl f, g, h => by
simp [to_inj] at h
simp [h]
exact rel.refl _
| _, _, _, comp_inr f, g, h => by
simp [to_inj] at h
| _, _, _, snd_comp f, g, h =>
have hi : ∃ i, to_inj f = some (Sum.inl i) := by
{ simp [to_inj] at h
revert h
match to_inj f with
| some (Sum.inl i) => intro h; exact ⟨i, rfl⟩
| some (Sum.inr _) => simp
| none => simp }
match hi with
| ⟨i, hi⟩ => by
simp [hi, to_inj] at h
rw [← h]
exact rel.trans (rel.snd_comp_congr (to_inj_eq_inl hi))
(rel.snd_comp_comp_inl i).symm
| _, _, _, fst_comp f, g, h =>
have hi : ∃ i, to_inj f = some (Sum.inl i) := by
{ simp [to_inj] at h
revert h
match to_inj f with
| some (Sum.inl i) => intro h; exact ⟨i, rfl⟩
| some (Sum.inr _) => simp
| none => simp }
match hi with
| ⟨i, hi⟩ => by
simp [hi, to_inj] at h
rw [← h]
exact rel.trans (rel.fst_comp_congr (to_inj_eq_inl hi))
(rel.fst_comp_comp_inl i).symm
| _, _, _, coprod_mk f g, i, h =>
have hi : ∃ f' g', to_inj f = some (Sum.inl f') ∧ to_inj g = some (Sum.inl g') := by
{ simp [to_inj] at h
revert h
match to_inj f, to_inj g with
| some (Sum.inl f'), some (Sum.inl g') => intro h; exact ⟨f', g', rfl, rfl⟩
| some (Sum.inr _), some (Sum.inr _) => simp
| none, _ => simp
| _, none => simp
| some (Sum.inl _), some (Sum.inr _) => simp
| some (Sum.inr _), some (Sum.inl _) => simp }
match hi with
| ⟨f', g', hf, hg⟩ => by
simp [hf, hg, to_inj] at h
rw [← h]
exact rel.trans (rel.coprod_mk_congr (to_inj_eq_inl hf) (to_inj_eq_inl hg))
(rel.comp_inl_coprod_mk _ _).symm
theorem to_inj_eq_inr : {X Y Z : prod_coprod C} → {f : norm_hom X (coprod Y Z)} →
{g : norm_hom X Z} → to_inj f = some (Sum.inr g) → rel f g.comp_inr
| _, _, _, comp_inr f, g, h => by
simp [to_inj] at h
simp [h]
exact rel.refl _
| _, _, _, comp_inl f, g, h => by
simp [to_inj] at h
| _, _, _, snd_comp f, g, h =>
have hi : ∃ i, to_inj f = some (Sum.inr i) := by
{ simp [to_inj] at h
revert h
match to_inj f with
| some (Sum.inr i) => intro h; exact ⟨i, rfl⟩
| some (Sum.inl _) => simp
| none => simp }
match hi with
| ⟨i, hi⟩ => by
simp [hi, to_inj] at h
rw [← h]
exact rel.trans (rel.snd_comp_congr (to_inj_eq_inr hi))
(rel.snd_comp_comp_inr i).symm
| _, _, _, fst_comp f, g, h =>
have hi : ∃ i, to_inj f = some (Sum.inr i) := by
{ simp [to_inj] at h
revert h
match to_inj f with
| some (Sum.inr i) => intro h; exact ⟨i, rfl⟩
| some (Sum.inl _) => simp
| none => simp }
match hi with
| ⟨i, hi⟩ => by
simp [hi, to_inj] at h
rw [← h]
exact rel.trans (rel.fst_comp_congr (to_inj_eq_inr hi))
(rel.fst_comp_comp_inr i).symm
| _, _, _, coprod_mk f g, i, h =>
have hi : ∃ f' g', to_inj f = some (Sum.inr f') ∧ to_inj g = some (Sum.inr g') := by
{ simp [to_inj] at h
revert h
match to_inj f, to_inj g with
| some (Sum.inr f'), some (Sum.inr g') => intro _; exact ⟨f', g', rfl, rfl⟩
| some (Sum.inl _), some (Sum.inl _) => simp
| none, _ => simp
| _, none => simp
| some (Sum.inr _), some (Sum.inl _) => simp
| some (Sum.inl _), some (Sum.inr _) => simp }
match hi with
| ⟨f', g', hf, hg⟩ => by
simp [hf, hg, to_inj] at h
rw [← h]
exact rel.trans (rel.coprod_mk_congr (to_inj_eq_inr hf) (to_inj_eq_inr hg))
(rel.comp_inr_coprod_mk _ _).symm
theorem to_inj_eq_none {X Y Z : prod_coprod C} {f : norm_hom X (coprod Y Z)}
(hf : to_inj f = none) {g : norm_hom X Z} : ¬rel f g.comp_inr := by
intro h
cases h
simp at hf
end norm_hom
|
<font face="Calibri">
<br>
<font size="7"> <b> GEOS 657 Microwave Remote Sensing <b> </font>
<font size="5"> <b> Lab 3: SAR Imaging Theory and Processing Methods<font color='rgba(200,0,0,0.2)'> -- [20 Points] </font> </b> </font>
<br>
<font size="4" color='rgba(200,0,0,0.2)'><b>Assignment Due Date: </b> March 05, 2019 </font>
<br> <br>
<font size="4"> <b> Paul A Rosen with modifications by Franz J Meyer</b>
<font size="3"> <br>
<font> <b>Date: </b> Feb 14, 2021 </font>
</font>
<div class="alert alert-danger">
<font face="Calibri" size="4"> <font color='rgba(200,0,0,0.2)'> <b>THIS NOTEBOOK INCLUDES A HOMEWORK ASSIGNMENT!</b></font>
<br>
<font size="3"> The homework assignments in this lab are indicated by markdown fields with <font color='rgba(200,0,0,0.2)'><b>red background</b></font>. Please complete these assignments in a separate Word / Latex / PDF document and submit your completed assignment via the GEOS 657 Blackboard page.
Contact me at [email protected] should you run into any problems.
</font>
</font>
</div>
```python
import url_widget as url_w
notebookUrl = url_w.URLWidget()
display(notebookUrl)
```
```python
from IPython.display import Markdown
from IPython.display import display
notebookUrl = notebookUrl.value
user = !echo $JUPYTERHUB_USER
env = !echo $CONDA_PREFIX
if env[0] == '':
env[0] = 'Python 3 (base)'
if env[0] != '/home/jovyan/.local/envs/rtc_analysis':
display(Markdown(f'<text style=color:red><strong>WARNING:</strong></text>'))
display(Markdown(f'<text style=color:red>This notebook should be run using the "rtc_analysis" conda environment.</text>'))
display(Markdown(f'<text style=color:red>It is currently using the "{env[0].split("/")[-1]}" environment.</text>'))
display(Markdown(f'<text style=color:red>Select the "rtc_analysis" from the "Change Kernel" submenu of the "Kernel" menu.</text>'))
display(Markdown(f'<text style=color:red>If the "rtc_analysis" environment is not present, use <a href="{notebookUrl.split("/user")[0]}/user/{user[0]}/notebooks/conda_environments/Create_OSL_Conda_Environments.ipynb"> Create_OSL_Conda_Environments.ipynb </a> to create it.</text>'))
display(Markdown(f'<text style=color:red>Note that you must restart your server after creating a new environment before it is usable by notebooks.</text>'))
```
# Prepare the Notebook
```python
import warnings
warnings.filterwarnings('ignore')
bShowInline = True # Set = False for document generation
%matplotlib inline
import matplotlib.pyplot as plt
params = {'legend.fontsize': 'x-large',
'figure.figsize': (15, 5),
'axes.labelsize': 'x-large',
'axes.titlesize':'x-large',
'xtick.labelsize':'x-large',
'ytick.labelsize':'x-large'}
plt.rcParams.update(params)
def makeplot( plt, figlabel, figcaption):
figname = figlabel+'.png'
plt.savefig(figname)
if bShowInline:
plt.show()
else:
plt.close()
strLatex="""
\\begin{figure}[b]
\centering
\includegraphics[totalheight=10.0cm]{%s}
\caption{%s}
\label{fig:%s}
\end{figure}"""%(figname, figcaption, figlabel)
return display(Latex(strLatex))
def sinc_interp(x, s, u):
# x is the vector to be interpolated
# s is a vector of sample points of x
# u is a vector of the output sample points for the interpolation
if len(x) != len(s):
raise ValueError('x and s must be the same length')
# Find the period
T = s[1] - s[0]
sincM = np.tile(u, (len(s), 1)) - np.tile(s[:, np.newaxis], (1, len(u)))
y = np.dot(x, np.sinc(sincM/T))
return y
%matplotlib widget
plt.rcParams.update({'font.size': 11})
```
# Overview
<font face="Calibri" size="3"> In this notebook, we will demonstrate the generation of a raw synthetic aperture radar data set for a collection of point scatterers on an otherwise dark background, and then demonstrate two methods of processing the data, simple back projection, and range-doppler processing.
1.0 [Background](#section-1)<br>
> 1.1 [SAR Geometry](#section-1.1) <br>
> 1.2 [Antenna Patterns](#section-1.2) <br>
> 1.3 [Beamwidth and Swath](#section-1.3) <br>
> 1.4 [Phase and Doppler Frequency in the synthetic aperture](#section-1.4) <br>
> 1.5 [Resolution of the synthetic aperture](#section-1.5) <br>
> 1.6 [The Radar Equation](#section-1.6) <br>
2.0 [Simulating SAR data with point targets](#section-2) <br>
> 2.1 [Simulating the transmitted pulse](#section-2.1) <br>
> 2.2 [Simulating the Received Echoes](#section-2.2) <br>
3.0 [Focusing SAR data - Range](#section-3) <br>
> 3.1 [Correlation to achieve fine range resolution - time domain](#section-3.1) <br>
> 3.2 [Correlation to achieve fine range resolution - frequency domain](#section-3.2)
4.0 [Focusing SAR data - Azimuth](#section-4) <br>
> 4.1 [Azimuth reference function](#section-4.2) <br>
> 4.2 [Correlation to achieve fine azimuth resolution - time domain](#section-4.2) <br>
> 4.3 [Correlation to achieve fine azimuth resolution - frequency domain](#section-4.3) <br>
> 4.4 [Backprojection](#section-4.4)
</font>
# 1.0 Background
## 1.1 SAR Geometry
<a id="section-1"></a>
<a id="section-1.1"></a>
<font face="Calibri" size="3">To simplify the problem, we assume a spacecraft flying at fixed altitude $h_{sc}$ and constant velocity $v_{sc}$, observing points on a flat earth. The geometry of the observation is depicted in Figure 1. The radar antenna is assumed to be a flat rectangular aperture with dimensions of length $L_a$ in the along-track dimension (also known as "azimuth" for historical reasons), and width "W_a" in the cross-track dimension (also known as the elevation dimension). The range $\rho$ is the distance from the spacecraft antenna to a point on the ground. The "range vector" or "look vector" is the vector pointing in this direction, with magnitude $\rho$. At this range, the look angle, defined as the angle from nadir to the range vector is $\theta$. At the antenna boresight, which is the direction where the antenna pattern has its peak gain, we define the boresight reference range $\rho_l$, and corresponding look angle $\theta_l$. Figure 2 illustrates the case where the antenna is pointed forward toward the velocity vector. In this configuration, we define squint angle $\theta_{sq}$ as the angle of rotation about the nadir vector in the ground plane.
Table 1 lists the assumed spacecraft, radar, and surface point targets characteristics.</font>
<table>
<thead>
<tr>
<th colspan="2"><big>SAR Geometry<big></th>
</tr>
</thead>
<tbody>
<tr>
<td> </td>
<td></td>
</tr>
<tr>
<td>Figure 1. Basic SAR Geometry </td>
<td>Figure 2. Squinted SAR Geometry</td>
</tr>
</tbody>
</table>
**Table 1. Radar and Spacecraft Parameters**
| Parameter | Symbol | Value | Comment |
| --- | --- | --- | --- |
| Wavelength | $\lambda $ | 0.24 m | (L-band)
| Antenna Length | $L_a$ | 10 m | |
| Antenna Width | $W_a$ | 2 m | |
| Off-nadir boresight angle | $\theta_l$ | 30$^\circ$ | |
| Azimuth squint of boresight angle | $\theta_{sq}$ | 0$^\circ$ | |
| Spacecraft Velocity | $v_{sc}$ | 7,500 m/s | Assumed constant |
| Spacecraft Altitude | $h_{sc}$ | 750,000 m | Assumed constant |
| Radar Range Bandwidth | $B_r$ | 10 MHz | |
| Radar Pulse Duration | $\tau_r$ | 20 $\mu$s | Determines average power |
| Nominal Pulse Rate | $f_p$ | 1600 Hz | Determines average power and ambiguity levels |
| Peak Power on Transmit | $P_T$ | 4,000 W | Determines SNR |
| Radar Noise Temperature | $T_r$ | 300 K | Determines SNR |
| Corner Reflector Dimension | $L_{cr}$ | 2.4 m | Determines SNR |
```python
import numpy as np
Lambda = 0.24
L_a = 10.
W_a = 2.
theta_l = 30. * np.pi/180.
theta_sq = 0. * np.pi/180.
v_sc = 7500.
h_sc = 750000.
B_r = 20.e6
tau_r = 10.e-6
f_p = 1600.
P_T = 4000.
T_r = 300.
L_cr =2.4
```
**Table 2. Other Constants**
| Parameter | Symbol | Value | Comment |
| --- | --- | --- | --- |
| Speed of light | $c $ | 299792456 m/s | |
| Boltzman constant | $k$ | 1.38064852 $\times$ 10$^{-23}$ m$^2$ kg s$^{-2}$ K$^{-1}$ | -228.6 dB |
| Gravitational Constant | $G$ | 6.672 $\times$ 10$^{-11}$ m$^3$ kg$^{−1}$ s$^{−2}$ | |
| Earth's Mass | $M_E$ | 5.9742 $\times$ 10$^{24}$ kg | |
```python
c = 299792456
k = 1.38064852e-23
G = 6.672e-11
M_E = 5.9742e24
```
<br>
<div class="alert alert-danger">
<font face="Calibri" size="5"> <b> <font color='rgba(200,0,0,0.2)'> <u>ASSIGNMENT #1</u>: </font> Calculate Expected Range Resolution </b> </font>
<font face="Calibri" size="3">Based on the variables shown in Tables 1 and 2 you can calculate the expected range resolution <b>$\rho_r$</b> of this simulated SAR data set.
<u>Please answer the following questions</u>:
<ol>
<br>
<li><b>Question 1.1</b>: List the variables (with their values) from Tables 1 and 2 that are needed to calculate the range resolution<b>$\rho_r$</b>.</li>
<br>
<li><b>Question 1.2</b>: Provide the equation for calculating <b>$\rho_r$</b> and provide your calculated value in the units of [meters].</li>
</ol>
</font>
</div>
## 1.2 The Antenna and Its Radiation Pattern
<a id="section-1.2"></a>
<font face="Calibri" size="3">The radar antenna directs the radar signal toward a particular area on the ground. Generally, the larger the antenna, the more directed the energy is toward a particular direction. Most SAR radar antennas are rectangular planar antennas, though there are exceptions. A simple model for a planar antenna's power radiation pattern is a sin x / x function:
\begin{equation}
S(\theta_{az}, \theta_{el}; \theta_l, \theta_{sq}) =
\bigg [\frac{
\sin \pi\big (\frac{\theta_{az}-\theta_{sq}}{\theta_{L_a}}\big )
}
{
\pi \big (\frac{\theta_{az}-\theta_{sq}}{\theta_{L_a}}\big )
} \bigg ]^2
\bigg [\frac{
\sin \pi\big (\frac{\theta_{el}-\theta_{l}}{\theta_{W_a}}\big )
}
{
\pi\big (\frac{\theta_{el}-\theta_{l}}{\theta_{W_a}}\big )
} \bigg ]^2
\end{equation}
The "half-power beamwidth" of the antenna with respect to its Length (along velocity vector) and with respect to its Width (direction perpendicular to the velocity vector and to the off-nadir boresight)
\begin{equation}
\theta_{L_a} = 0.87 \frac{\lambda}{L_a}
\end{equation}
\begin{equation}
\theta_{W_a} = 0.87 \frac{\lambda}{W_a}
\end{equation}
At these angles, the power of the signal at this angular extent has been reduced half, or 3 dB. This is also called the 3dB beamwidth.
A SAR antenna points to one side of the flight track or another, usually with an angle greater than 20$^\circ$. This is to ensure a unique relationship between the time of return and the distance from the spacecraft to the ground. If the energy from the radar illuminates both sides of the radar track, there will be a left-right ambiguity in range for a given time. In practice the antenna sidelobes in the sin x / x pattern will lead to some energy everywhere, but the farther off-nadir the antenna is pointed, the lower this energy is from unwanted directions. Angles larger than 20$^\circ$ also help avoid excessive foreshortening of the observations. </font>
```python
theta_L_a = 0.866 * Lambda/L_a
theta_W_a = 0.866 * Lambda/W_a
```
<font face="Calibri" size="3">We can see the values in degrees in these two dimensions:</flont>
```python
print(" Along track half-power beamwidth =","{:.2f}".format(theta_L_a * 180. / np.pi),"degrees")
print(" Elevation half-power beamwidth =","{:.2f}".format(theta_W_a * 180. / np.pi),"degrees")
```
```python
theta_az=np.linspace(-np.pi/32., np.pi/32., 400)
theta_el=np.linspace(-np.pi/8., np.pi/8., 400)+theta_l
Saz = (np.sinc((theta_az-theta_sq)/theta_L_a))**2
Sel = (np.sinc((theta_el-theta_l )/theta_W_a))**2
```
```python
def S_p (th_az, th_el):
return (np.sinc((th_az-theta_sq)/theta_L_a))**2 * (np.sinc((th_el-theta_l)/theta_W_a))**2
Theta_az, Theta_el = np.meshgrid(theta_az,theta_el)
S = S_p(Theta_az, Theta_el)
```
```python
plt.style.use('seaborn-whitegrid')
plt.figure(figsize=(13, 5))
plt.subplot(1,2,1)
plt.plot(180.*theta_az/np.pi,Saz,label='az')
plt.plot(180.*theta_el/np.pi,Sel,label='el')
plt.legend(loc='best')
plt.title("Idealized 1-d El/Az \n Radiation Patterns of a Planar Antenna")
plt.xlabel("Beam Angles $(^\circ)$")
plt.ylabel("Power")
plt.subplot(1,2,2)
plt.contourf(180.*Theta_az/np.pi, 180.*Theta_el/np.pi, 10.*np.log10(S), 20, cmap='magma')
plt.colorbar(label='Power (dB)')
plt.title("Idealized 2-d \nRadiation Pattern of a Planar Antenna")
plt.xlabel("Along track Beam Angle $ (^\circ)$")
plt.ylabel("Elevation Beam Angle $ (^\circ)$")
#plt.subplots_adjust(left=-0.1,top=0.9)
```
## 1.3 Beam Extent and Swath
<a id="section-1.3"></a>
<font face="Calibri" size="3">From the elevation beamwidth and other geometric parameters described above, we can calculate the range $\rho$ and ground range $\rho_g$ where the boresight and the 3-dB beam edges intersect the flat Earth. Specifically, we define:
| Parameter | Symbol |
| --- | --- |
| Generic Range | $\rho $ |
| Generic Ground Range | $\rho_g$ |
| Range at Boresight | $\rho_l$ |
| Ground Range at Boresight | $\rho_{l,g}$ |
| Range at Near Beam Edge | $\rho_n$ |
| Ground Range at Near Beam Edge | $\rho_{n,g}$ |
| Range at Far Beam Edge | $\rho_f$ |
| Ground Range at Far Beam Edge | $\rho_{f,g}$ |
| Reference azimuth for calculations | $s_0$ |
| Reference range for calculations | $\rho_0$ |
</font>
```python
rho_l = h_sc / np.cos(theta_l)
rho_lg = h_sc * np.sin(theta_l)
rho_n = h_sc / np.cos(theta_l-theta_W_a/2)
rho_ng = h_sc * np.sin(theta_l-theta_W_a/2)
rho_f = h_sc / np.cos(theta_l+theta_W_a/2)
rho_fg = h_sc * np.sin(theta_l+theta_W_a/2)
rho_sw = rho_fg-rho_ng
Delta_rho = c / (2. * B_r)
Delta_rho_ng = Delta_rho / np.sin(theta_l-theta_W_a/2)
Delta_rho_fg = Delta_rho / np.sin(theta_l+theta_W_a/2)
n_rs=int(np.round(rho_sw/Delta_rho))
rho_v=np.linspace(rho_n,rho_f,n_rs)
s_0 = 0. # reference azimuth for defining calculations
rho_0 = rho_l
```
<font face="Calibri" size="3">From these ranges, we can calculate the swath extent in meters on the ground $\rho_{fg}-\rho_{ng}$.</font>
```python
print("Boresight range: ","{:.2f}".format(rho_l),"m")
print("Range swath: ","{:.2f}".format(rho_sw),"m")
```
<font face="Calibri" size="3">The along track beam extent on the ground in meters is given by $\rho \theta_{L_a}$, where $\rho$ varies across the swath. In the near range, the azimuth beam extent is</font>
```python
print("Near range azimuth beam extent: ","{:.2f}".format(rho_n * theta_L_a),"m")
```
<font face="Calibri" size="3">while in the far range, the azimuth beam extent is</font>
```python
print("Far range azimuth beam extent: ","{:.2f}".format(rho_f * theta_L_a),"m")
```
<font face="Calibri" size="3">We will use the far range azimuth beamwidth to define the simulation extent in azimuth. Let's specify an extent that is 3 beamwidths to get a number of full synthetic apertures.
| Parameter | Symbol |
| --- | --- |
| Along Track Position Half Beamwidth In Advance of $s_0$ | $s_{s,{\rm hb}} $ |
| Along Track Position Half Beamwidth After $s_0$ | $s_{e,{\rm hb}} $ |
| Along Track Position At Simulation Start | $s_{s,{\rm sim}}$ |
| Along Track Position at Simulation End | $s_{e,{\rm sim}}$ |
where
\begin{equation}
\begin{array}{lr}
s_{s,{\rm hb}} & = & s_0 - \rho_f \theta_{L_a} / 2 \\
s_{e,{\rm hb}} & = & s_0 + \rho_f \theta_{L_a} / 2 \\
s_{s,{\rm sim}} & = & s_0 - 3 \rho_f \theta_{L_a} / 2 \\
s_{e,{\rm sim}} & = & s_0 + 3 \rho_f \theta_{L_a} / 2 \\
\end{array}
\end{equation}
</font>
```python
s_s_hb = s_0 - rho_f * theta_L_a / 2. # half beamwidth
s_e_hb = s_0 + rho_f * theta_L_a / 2. # half beamwidth
s_s_sim = s_0 - 3. * rho_f * theta_L_a / 2. # total of 3 beamwidths for simulation
s_e_sim = s_0 + 3. * rho_f * theta_L_a / 2. # total of 3 beamwidths for simulation
```
## 1.4 Phase and Doppler Frequency
<a id="section-1.4"></a>
<font face="Calibri" size="3">
Let's pick a bright point on the ground, say at $(\rho_0, s_0)$, or equivalently $(\rho_{0g}, s_0)$. As the spacecraft flies along track and observes the point, the distance from the spacecraft to the point is changing hyperbolically:
\begin{equation}
\rho(s;\rho_0,s_0) = \sqrt{(s-s_0)^2+\rho_0^2}
\end{equation}
</font>
<font face="Calibri" size="3">The phase of the wave that travels from the spacecraft to the ground point and back is $-\frac{4 \pi}{\lambda} \rho(s)$. Over the extent of time that this point is illuminated, the range at which the point will appear in the echo, and the phase of the point, will vary as plotted below. To make the plot we need to understand how to properly sample the function we are plotting. Since the functions are hyperbolic, the range and phase increase quasi-quadratically. The derivative of the phase is the frequency, and this then varies quasi-linearly. This implies that there is bandwidth associated with the received signal in azimuth, related to the fact that the radar is moving relative to the point on the ground, so there is a Doppler shift of the signal that varies as the azimuth aspect angle changes. We will see later that the Doppler bandwidth is given approximately by the velocity and the azimuth antenna length: $B_d = 2 v_{sc} / L_a$.</font>
```python
B_d = 2. * v_sc / L_a
```
```python
print("Doppler Bandwidth: ","{:.2f}".format(B_d),"Hz") # in Hz or cycles/second
```
<font face="Calibri" size="3">Therefore if we want to sample a signal properly, we need to sample at this frequency for complex signals, or twice for real signals according to the Nyquist criterion.
The azimuth aperture time for any target is related to the azimuth extent on the ground: $t_a = \rho * \theta_{L_a} / v_{sc}$.</font>
```python
t_af = rho_f * theta_L_a / v_sc
```
```python
print("Synthetic Aperture time in far range: ","{:.2f}".format(t_af),"sec")
```
<font face="Calibri" size="3">The time-bandwidth product gives the number of points needed to adequately represent the signal over this frequency range.</font>
```python
n_af = int(np.round(B_d * t_af))
```
<font face="Calibri" size="3">To examine the function, we need to pick a point for the point target. Let's assume $s_0=0$ and $\rho_0 = \rho_l$. We also remove the large offset phase $-4 \pi \rho_0 / \lambda$, since the absolute phase is difficult to measure and arbitrary.
\begin{equation}
\phi_{az}(s;\rho_0,s_0) = -\frac{4\pi}{\lambda} (\rho(s;\rho_0,s_0) - \rho_0) = -\frac{4\pi}{\lambda} (\sqrt{(s-s_0)^2+\rho_0^2} - \rho_0)
\end{equation}
Assuming $(s-s_0) << \rho_0$, we can expand the square root by Taylor expansion to obtain
\begin{equation}
\phi_{az}(s;\rho_0,s_0) \approx -\frac{4\pi}{\lambda} \frac{1}{2}\frac{(s-s_0)^2}{\rho_0}
\end{equation}
which illustrates the quadratic nature of the phase to first order.
The spatial frequency in radians is then its derivative with $s$
\begin{equation}
\omega_{az}(s;\rho_0,s_0) = -\frac{4\pi}{\lambda} \frac{(s-s_0)}{\rho_0}
\end{equation}
or in cycles
\begin{equation}
f_{az}(s;\rho_0,s_0) = -\frac{2}{\lambda} \frac{(s-s_0)}{\rho_0}
\end{equation}
or in Hertz
\begin{equation}
f_{az,hz}(s;\rho_0,s_0) = -\frac{2 v_{sc}}{\lambda} \frac{(s-s_0)}{\rho_0}
\end{equation}
</font>
```python
s = np.linspace(s_s_hb, s_e_hb, n_af)
phi_az = - ( 4. * np.pi * (np.sqrt(np.square(s-s_0)+rho_0*rho_0) / Lambda) - 4. * np.pi * rho_0 / Lambda)
phi_az_approx = -4. * np.pi * np.square(s-s_0) /(2*Lambda*rho_0)
f_az_hz = - (2. * v_sc / Lambda) * (s-s_0) / rho_0
```
<font face="Calibri" size="3">In the plot on the left below, the exact and quadratic expressions (Eqs. 6 and 7) are plotted. At this scale, the exact and approximate curves are indistinquishable. The plot on the right plots the difference on a scale where the impact of the approximation can be seen. It is a small fraction of the wavelength over the synthetic aperture. </font>
```python
fig = plt.figure(figsize=(13, 6))
ax = fig.add_subplot(1,2,1)
ax.plot(s, phi_az, 'b', label="exact")
ax.plot(s, phi_az_approx, 'r', label="quadratic approximation")
ax.legend(loc='best')
ax.set_title("Along Track Phase History of an Illuminated Target")
ax.set_xlabel("Along track position, s (m)")
ax.set_ylabel("Phase (rad)")
ax = fig.add_subplot(1,2,2)
ax.plot(s, (phi_az-phi_az_approx)/(2.*np.pi))
ax.set_title("Along Track Phase History Error of an Illuminated Target")
ax.set_xlabel("Along track position, s (m)")
ax.set_ylabel("Phase Error (wavelengths)")
plt.tight_layout()
```
<font face="Calibri" size="3">The Doppler bandwidth would be the range of the frequency function over this azimuth extent.</font>
```python
fig = plt.figure(figsize=(13, 6))
plt.title("Doppler History of an Illuminated Target")
plt.xlabel("Along track position, s (m)")
plt.ylabel("Doppler Frequency (Hz)")
plt.plot(s, f_az_hz)
f_az_hz_bw = np.abs(f_az_hz[-1] - f_az_hz[0])
db_str = str(int(np.round(f_az_hz_bw)))
plt.text(-7500.,-500.,"Doppler Bandwidth = "+db_str,fontsize=16);
```
## 1.5 Azimuth Resolution of the Synthetic Aperture
<a id="section-1.5"></a>
<font face="Calibri" size="3">Given this bandwidth, what does this imply for resolution in azimuth? The time resolution is simply the reciprocal bandwidth: $ 1/f_{az,hz,bw}$, where $f_{az,hz,bw} = f_{az,hz}(s_{e,\rm hb}) - f_{az,hz}(s_{s,\rm hb})$. The spatial resolution would then be the velocity times this quantity: $ v_{sc}/f_{az,hz,bw}$.</font>
```python
print('Azimuth Resolution based on Doppler Bandwidth = ',np.round(100.*v_sc/f_az_hz_bw)/100.,' m')
```
<font face="Calibri" size="3">The theoretical resolution is typically quoted as $L_a/2$, half the antenna length in azimuth, independent of range and frequency. This can be seen by evaluating $f_{az,hz,bw}$ as follows:
\begin{eqnarray}
f_{az,bw} &=& | f_{az}(s_{e,\rm hb}) - f_{azz}(s_{s,\rm hb}) |\\
&=& \frac{2}{\lambda} \frac{(s_{e,\rm hb}-s_{s,\rm hb})}{\rho_0}\\
&=& \frac{2}{\lambda} \theta_{L_a} = -\frac{2}{\lambda} * 0.88 \frac{\lambda}{L_a}
&=& 0.88 \frac{2}{L_a}
\end{eqnarray}
</font>
```python
print('Azimuth Resolution from calculation = ',np.round(100.*L_a/1.76)/100.,' m')
```
<font face="Calibri" size="3">The "L/2" azimuth resolution rule is an approximation, that will depend on the exact shape of the antenna pattern. But it is a good first approximation.</font>
## 1.6 The Radar Equation
<a id="section-1.5"></a>
<font face="Calibri" size="3">Before we proceed with simulating an image with point targets, let's take a small diversion to develop an intuition about imaging performance for a radar with particular characteristics. This is typically accomplished through the radar equation, which calculates the signal-to-noise ratio of a system for a given scatterer on the ground. In this exercise, we'll consider a corner reflector as a scatterer.
The Radar Equation can be expressed as follows:
\begin{equation}
P_R = P_T \cdot G_T \cdot \frac{1}{4 \pi \rho^2} \cdot \sigma \cdot \frac{1}{4 \pi \rho^2} \cdot A_a \cdot \epsilon
\end{equation}
where the terms are defined as follows:
| Parameter | Symbol |
| --- | --- :|
| Received Power | $P_R$ |
| Transmitted Power | $P_T$ |
| Antenna Transmit Gain | $G_T$ |
| Range of Target | $\rho$ |
| Radar Cross Section of Target | $\sigma$ |
| Receive Antenna Area |$ A_a$ |
| System Losses Fudge Factor | $\epsilon$ |
This equation from left to right follows the transmitted signal through its echo path. The Antenna radiates a total power of $P_T$ from the aperture. That power is directed into the antenna beam by virtue of its size, and therefore has directivity: a concentration, or gain $G_T$ in a particular direction. This power then propagates a distance $\rho$, spreading out over a spherically shaped surface within the beam. At the target, the power density then is
$P_T \cdot G_T \cdot \frac{1}{4 \pi \rho^2}$.
The target presents a reflecting surface which is characterized by its radar cross section. The radar cross section is the effective area that would lead to the observed total power reflected from a target hit with an incident power density. Thus, the reflected power at the target is
$P_T \cdot G_T \cdot \frac{1}{4 \pi \rho^2} \cdot \sigma$.
This power then propagates back to the radar as a spherical wave, such that the power density at the radar is
$P_T \cdot G_T \cdot \frac{1}{4 \pi \rho^2} \cdot \sigma \cdot \frac{1}{4 \pi \rho^2}. $
This power density hits the receive aperture, which collects the power over its area $A_a$. The system losses fudge factor accounts for losses of power in the receive chain before the signal is detected and may include transmit chain losses as well, depending on the definition of $P_T$ (is it the radiated power, or the power generated by the amplifiers which then needs to work its way through the antenna system to be radiated?). Typical system losses include: circulator losses, radiation inefficiency of the antenna, and antenna feed losses. These losses can be many factors of 2 loss in overall power generated by the radar power system. (There also are inefficiencies in getting the power from the spacecraft power system to the radar, but those are not included here.)
To calculate the received power, we need to define a target. In this tutorial, we are looking at corner reflectors. A corner reflector has a radar cross section
\begin{equation}
\sigma_{cr} = \frac{4 \pi L_{cr}^4}{3\lambda^2}.
\end{equation}
We also need an expression for the gain of the antenna and the receive aperture size, which will be dependent on the look direction. The gain in the boresight direction $G_{Tl}$ is characterized by the beamwidths of the antenna:
\begin{equation}
G_{Tl} = \frac{4 \pi}{\theta_{L_a}\theta_{W_a}}.
\end{equation}
Off boresight, this gain will be reduced by the shape of the beam pattern on both transmit and on receive $S(\theta_{az}, \theta_{el}; \theta_l, \theta_{sq})^2$ (as opposed to just $S$).
\begin{equation}
G_T(\theta_{az}, \theta_{el}; \theta_l, \theta_{sq}) = G_{Tl} S^2(\theta_{az}, \theta_{el}; \theta_l, \theta_{sq}).
\end{equation}
</font>
```python
G_Tl = 4 * np.pi /(theta_L_a * theta_W_a)
```
```python
print ("Transmit antenna gain = ","{:.2f}".format(10.*np.log10(G_Tl)),"dB") # since all quantities are powers already, this is the power gain in dB.
```
<font face="Calibri" size="3">For a corner reflector target located on ground at the boresight angle, we can calculate the receive power:</font>
```python
sigma_cr = 4. * np.pi *L_cr**4/(3.*Lambda**2)
A_a = L_a * W_a
epsilon = 10.**(-5./10.) # assume 5 dB overall losses
P_R = P_T * G_Tl * (1./(4.*np.pi*rho_l**2)) * sigma_cr * (1./(4.*np.pi*rho_l**2)) * A_a * epsilon
```
```python
print ("Received power of corner reflector = ","{:.2f}".format(10.*np.log10(P_R)),"dB") # in dB
```
<font face="Calibri" size="3">In order for the instrument to detect such a small amount of power, the noise level of the system must be commensurately small. The noise of an electronic system is given by
\begin{equation}
P_N = k T_r B_r
\end{equation}
where $k$ is the Boltzman constant, $T_r$ is the noise temperature of the radar, and $B_r$ is the bandwidth of the radar (Skolnik, Merrill I., Radar Handbook (2nd Edition). McGraw-Hill, 1990. ISBN 978-0-07-057913-2). The noise temperature is not necessarily the physical temperature. It is a combination of noise introduced by electron motion in electronics above absolute zero temperature and other noise sources. A reasonable noise temperature would be around 300 K.</font>
```python
P_N = k* T_r * B_r
```
```python
print ("Noise power = ","{:.2f}".format(10.*np.log10(P_N)),"dB")
```
```python
SNR = P_R/P_N
```
```python
print("SNR of corner reflector in raw data = ","{:.2f}".format(10.*np.log10(SNR)),"dB")
```
<font face="Calibri" size="3">It looks like the SNR of a bright point target is well below the noise floor in this radar, and that is in general true in the raw data. The signals from individual scatterers or resolution cells in the raw data are quite dim. It is not until we focus the image that we concentrate the energy into a single point and build adequate SNR.</font>
# 2.0 Simulating SAR data with point targets
## 2.1 Simulating the transmitted pulse
<a id="section-2"></a>
<a id="section-2.1"></a>
<font face="Calibri" size="3">Our radar will transmit pulses of energy at a pulse rate $f_r$ sufficient to sample the Doppler spectrum, the bandwidth of which was computed above. For our purposes, the pulse rate is set slightly higher than the Doppler bandwidth, which lowers aliasing of the energy outside this area of the spectrum. For any given pulse, we transmit a pulse of duration $\tau_r$, sweeping the frequency linearly, to generate a signal with the required bandwidth $B_r$. The time-bandwidth product determine the number of samples required in this complex signal. Note: in reality we transmit and receive real-valued waveforms as currents excited or detected on the antenna. However, radar systems are coherent by nature, and the received signals are typically converted in hardware or on the ground to complex-valued waveforms. For this tutorial, we imagine that the transmit waveform is complex for simplicity. The frequency-swept, or "chirp," waveform can be expressed as
$C_r(t) = e^{i \phi_r(t)} {\rm rect}\big(\frac{t}{\tau_r}\big)$ where $\phi_r(t) = \pi \frac{B_r}{\tau_r} t^2 = \pi \frac{B_r}{\tau_r} (2\rho/c)^2 = 4 \pi \frac{B_r}{c^2\tau_r} \rho^2$, and the rect function is 1 on the interval 0 to 1, and 0 elsewhere. First, let's define these functions:</font>
```python
def rect(x):
return np.abs(x) <= 0.5
def win(x):
return 1. # rectangular window
# return 0.54 - 0.46 * np.cos(2.*np.pi*(x+0.5)) # hamming window defined on [-0.5,0.5] applied to suppress sidelobes.
def C_r_r(r):
phi_r_r = 4. * np.pi * B_r / (c**2 * tau_r) * r**2
return (np.cos(phi_r_r) + 1j * np.sin(phi_r_r)) * rect((r- c*tau_r/4.)/(c * tau_r/2.)) * win((r- c*tau_r/4.)/(c * tau_r/2.))
```
<font face="Calibri" size="3">Now let's evaluate the chirp over the pulse length and examine some of its properties. The required number of samples is again the time-bandwidth product of the chirp.</font>
```python
n_r = int(np.round(B_r * tau_r))
t_c = np.linspace(0.,tau_r,2*n_r) # create the arrays with twice the required points so real functions don't alias
rho_c = c * t_c / 2.
C_r = C_r_r(rho_c)
phi_r = 4. * np.pi * B_r / (c**2 * tau_r) * rho_c**2
```
```python
fig = plt.figure(figsize=(13, 5))
ax = fig.add_subplot(1,3,1)
ax.plot(rho_c,phi_r)
ax.set_title("Phase of the \n Complex Chirp Signal")
ax.set_xlabel("Range Distance along pulse (m)")
ax.set_ylabel("Phase (Radians)")
ax = fig.add_subplot(1,3,2)
ax.plot(rho_c,C_r.real)
ax.set_title("Real Part of the \n Complex Chirp Signal")
ax.set_xlabel("Range Distance along pulse (m)")
ax.set_ylabel("Signal Magnitude")
ax = fig.add_subplot(1,3,3)
FC_r = np.fft.fft(C_r)
freq = np.fft.fftfreq(FC_r.shape[-1])
nplts = int(np.round(FC_r.shape[-1]/2)) # only need the positive frequencies since it is a complex signal.
ax.plot(freq[0:nplts]*2.*B_r/1.e6, np.absolute(FC_r[0:nplts]))
ax.set_title("Spectrum of \n Complex Chirp Signal")
ax.set_xlabel("Frequency (MHz)")
ax.set_ylabel("Magnitude")
plt.tight_layout()
plt.show();
```
<font face="Calibri" size="3">Feel free to play with the plotting limits of the array to explore the shape of the curve. Note that the complex chirp as created with twice the number of needed samples so that plotting the real or imaginary part, which is sinusoidal with increasing frequency, looks properly sampled in the plot. In reality, it is well sampled in the complex domain with half the point density.</font>
## 2.2 Simulating the Received Echoes
<a id="section-2.2"></a>
<font face="Calibri" size="3">
Now we have a chirp signal, and when it encounters our corner reflector target, it will reflect some energy back to the radar. The echo signature of a corner reflector will be a delayed version of itself, with amplitude adjusted based on the shape of the antenna pattern and the losses as calculated in the radar equation above, and a phase shift proportional to the round-trip distance $2 \rho$. Specifically, for a corner refector at $s_{cr},\rho_{cr}$, the received echo will be:
\begin{equation}
E_{cr}(s,\rho; s_{cr},\rho_{cr}) = \sqrt{G_T\big(\theta_{az,cr}(s;s_{cr},\rho_{cr}), \theta_{el,cr}(\rho_{cr}); \theta_{l}, \theta_{sq}\big )} e^{-i 4\pi(\rho_{\rm sc-cr}-\rho_{l})/\lambda}C_r\big(2(\rho-\rho_{\rm sc-cr})/c\big)
\end{equation}
where
$\rho_{\rm sc-cr;}(s; s_{cr},\rho_{cr}) = \sqrt{(s-s_{cr})^2+\rho_{cr}^2}$ is the distance from the spacecraft to the corner reflector,
$\theta_{az,cr} = \sin^{-1} \frac{s-s_{cr}}{\rho_{\rm sc-cr}}$
and
$\theta_{el,cr} = \cos^{-1}\frac{h_{sc}}{\rho_{cr}}$
and we have arbitrarily removed a large phase offset $4 \pi \rho_l / \lambda$ to make the phase numbers more manageable. The rect function indicates that the chirp extent only covers the range defined by the pulse length.</font>
```python
def E_cr(sv,rhov,s_cr,rho_cr):
rho_sc_cr = np.sqrt((sv-s_cr)**2+rho_cr**2)
th_el_cr = np.arccos(h_sc/rho_cr)
th_az_cr = np.arcsin((sv-s_cr)/rho_sc_cr)
return np.sqrt(P_R) * S_p(th_az_cr,th_el_cr) * np.exp(-1j * 4. * np.pi * (rho_sc_cr-rho_l)/Lambda) * C_r_r (rhov-rho_sc_cr)
```
<font face="Calibri" size="3">To simulate the image, we must specify the location of the corner reflectors. First, we define the array of locations: 3 reflectors, each at different ranges and along-track positions. The along track positions will be at $s_0$ and a half beamwidth before and after $s_0$. The range position will be at the boresight range $\rho_l$, and an eighth of the swath before and after $\rho_l$. For the purpose of speed and flexibility, the simulation allows using any or all of the three corner reflectors through an index vector.
$P_{\rm cr} = \big [\big (s_{s,{\rm hb}},\rho_l-\frac{\rho_f-\rho_n}{8}\big),(s_0,\rho_l),\big(s_{e,{\rm hb}},\rho_l+\frac{\rho_f-\rho_n}{8}\big) \big ]$
</font>
```python
S_cr = np.array([s_s_hb,s_0,s_e_hb],dtype='float64')
Rho_cr = np.array([rho_l-(rho_f-rho_n)/8.,rho_l,rho_l+(rho_f-rho_n)/8.],dtype='float64')
Ind_cr=[1]
```
<font face="Calibri" size="3">Now it is time to define the grid for computing the simulated data; this will be over a portion range that covers the corner reflectors (to save computation time) and the along track extend define $s_{s,{\rm sim}}$ and $s_{s,{\rm sim}}$, which is specified in terms of the number of along-track beamwidths. The along track sample spacing is nominally set by the PRF $f_p$ as $\Delta s = v_{sc}/f_p$. The range spacing is nominally set by the range bandwidth $\Delta\rho = c / 2B_R$. For both dimensions, we allow an oversampling factor so that we can easily examine the results without interpolating the results.</font>
```python
s_ov = 1.
rho_ov = 4.
Delta_s = v_sc/f_p
n_s_sim = int(np.round((s_e_sim-s_s_sim)*s_ov/Delta_s))
s_sim = np.linspace(s_s_sim, s_e_sim, n_s_sim)
rho_mean = (rho_f+rho_n)/2.
rho_s_sim = rho_mean - (rho_f-rho_n)/4. # central half of swath
rho_e_sim = rho_mean + (rho_f-rho_n)/4. # central half of swath
#this is the default for range extent. We can narrow further to just surrounding the CRs
Rho_cr_min = rho_f
Rho_cr_max = rho_n
for i in range(len(Ind_cr)):
Rho_cr_min=np.minimum(Rho_cr[Ind_cr[i]],Rho_cr_min)
for i in range(len(Ind_cr)):
Rho_cr_max=np.maximum(Rho_cr[Ind_cr[i]],Rho_cr_max)
rho_s_sim = Rho_cr_min - 4.* c* tau_r/2.
rho_e_sim = Rho_cr_max + 4.* c* tau_r/2.
n_rho_sim = int(np.round((rho_e_sim-rho_s_sim)*rho_ov/Delta_rho))
rho_sim = np.linspace(rho_s_sim, rho_e_sim, n_rho_sim)
S_sim, Rho_sim = np.meshgrid(s_sim,rho_sim)
```
<font face="Calibri" size="3">Let's look at the corner reflectors on the grid:</font>
```python
fig = plt.figure(figsize=(10, 7))
plt.scatter(S_cr,Rho_cr, cmap='magma')
plt.xlabel("Along track position (m)")
plt.ylabel("Range Position (m)")
plt.title("Corner Reflector Locations")
plt.xlim(s_s_sim,s_e_sim)
plt.ylim(rho_s_sim,rho_e_sim);
plt.tight_layout()
```
<font face="Calibri" size="3">With the grid defined, we can simply evaluate $E_{cr}$ over the grid.</font>
```python
%%time
E_cr_sim = np.zeros(S_sim.shape,dtype=np.complex128)
print ("Field initialized")
for i in range(len(Ind_cr)):
E_cr_sim += E_cr(S_sim,Rho_sim,S_cr[Ind_cr[i]],Rho_cr[Ind_cr[i]])
print("Completed CR",i)
```
```python
fig = plt.figure(figsize=(12, 6))
#plt.contourf(S_sim, Rho_sim, 10.*np.log10(np.absolute(E_cr_sim)), 20, cmap='RdGy')
#plt.pcolormesh(S_sim, Rho_sim, 10.*np.log10(np.absolute(E_cr_sim)),cmap='RdGy')
extent = [s_s_sim, s_e_sim, rho_s_sim, rho_e_sim]
plt.imshow(10.*np.log10(np.abs(E_cr_sim)), cmap='magma', extent=extent, origin='lower', aspect='auto')
plt.colorbar(label='Power (dB)');
plt.xlabel("Along track position (m)")
plt.ylabel("Range Position (m)")
plt.title("Magnitude of Simulated Echoes");
plt.tight_layout()
```
<font face="Calibri" size="3">Now let's look at these echoes in the presence of the thermal noise signature, with a field with noise power $k T B_r$.
</font>
```python
np.random.seed(1)
E_cr_sim_w_noise = E_cr_sim + np.random.normal(loc=0.,scale=np.sqrt(P_N/2.),size=S_sim.shape) + 1j * np.random.normal(loc=0.,scale=np.sqrt(P_N/2.),size=S_sim.shape)
print ("Random field calculated")
```
```python
fig = plt.figure(figsize=(12, 6))
plt.imshow(10.*np.log10(np.abs(E_cr_sim_w_noise)), cmap='magma', extent=extent, origin='lower', aspect='auto')
plt.colorbar(label='Power (dB)');
plt.xlabel("Along track position (m)")
plt.ylabel("Range Position (m)")
plt.title("Magnitude of Simulated Echoes with Noise");
plt.tight_layout()
```
<font face="Calibri" size="3">As can be seen, these signals are buried in the noise. They don't pop out until we focus the image, taking advantage of the redundancy in observing any single target over a wide beam in azimuth, and a wide pulse in range. So it is time to focus the data.</font>
# 3.0 Focusing SAR Data - Range
<a id="section-3"></a>
<font face="Calibri" size="3">The straightforward approach to focusing data is to develop a means for any given target to compensate the phase variations in range and azimuth induced by the range chirp and the azimuth hyperbolic range variability and then sum up that energy. For the isolated corner reflector target responses shown above this is easy to visualize, in that we are plotting the magnitude. If the phase for each of these points was a constant value, it is easy to see that integrating the points in the non-zero areas would give a big integration gain. Of course if the phase were constant everywhere, one could not distinguish one corner reflector from any other, so there would be no integration advantage. Because the phase history in range and azimuth is unique for each scatterer on the ground, the process of compensating the phase for that specific point, then integrating the energy localizes the return from that specific point. This can be partitioned into the phase compensation and integration in range, called "range compression," or "range correlation," followed by azimuth processing.</font>
## 3.1 Range Correlation - time domain
<a id="section-3.1"></a>
<font face="Calibri" size="3">
The range signal that matches the range response received echo of a corner reflector, is simply
\begin{equation}
C_r\big(2(\rho-\rho_{\rm sc-cr})/c\big) = e^{i 4 \pi \frac{B_r}{c^2\tau_r} (\rho-\rho_{\rm sc-cr} )^2} {\rm rect}\big(\frac{\rho-\rho_{\rm sc-cr}}{c \tau_r/2}\big)
\end{equation}
which is the same as the received echo $E_{cr}(s,\rho; s_{cr}, \rho_{cr})$ but without the propagation-related amplitude scale factor and phase components of the signal.
To recover the signal in range as a point, we would compute the conjugate function and integrate for each range point
\begin{equation}
E_{cr,rc}(s,\rho; s_{cr}, \rho_{cr}) = \displaystyle\int E_{cr}(s,\rho'; s_{cr}, \rho_{cr}) C^*_r(s,\rho'+\rho; s_{cr}, \rho_{cr}) d\rho'
\end{equation}
This is by definition the cross correlation of these two functions. For corner reflector, this correlation integral can be evaluated
$
\begin{array}{ll}
E_{cr,rc} & = & \sqrt{G_T} e^{-i 4\pi(\rho_{\rm sc-cr}-\rho_{l})/\lambda} \displaystyle\int C_r(s,\rho'; s_{cr}, \rho_{cr}) C^*_r(s,\rho'+\rho; s_{cr}, \rho_{cr}) d\rho' \\
& = & \sqrt{G_T} e^{-i 4\pi(\rho_{\rm sc-cr}-\rho_{l})/\lambda} \displaystyle\int e^{i 4 \pi \frac{B_r}{c^2\tau_r} (\rho'-\rho_{\rm sc-cr} )^2} {\rm rect}\big(\frac{\rho'-\rho_{\rm sc-cr}}{c \tau_r/2}\big) e^{-i 4 \pi \frac{B_r}{c^2\tau_r} (\rho'+\rho-\rho_{\rm sc-cr} )^2} {\rm rect}\big(\frac{\rho'+\rho-\rho_{\rm sc-cr}}{c \tau_r/2}\big) d\rho' \\
& = & \sqrt{G_T} e^{-i 4\pi(\rho_{\rm sc-cr}-\rho_{l})/\lambda} e^{i 4 \pi \frac{B_r}{c^2\tau_r} (\rho^2-\rho_{\rm sc-cr}^2)} \displaystyle\int e^{i 4 \pi \frac{B_r}{c^2\tau_r} (\rho'-\rho_{\rm sc-cr} )^2} {\rm rect}\big(\frac{\rho'-\rho_{\rm sc-cr}}{c \tau_r/2}\big) {\rm rect}\big(\frac{\rho'+\rho-\rho_{\rm sc-cr}}{c \tau_r/2}\big) d\rho'
\end{array}
$
</font>
```python
%%time
n_r = int(np.round(rho_ov * B_r * tau_r))
rho_c_ov = np.linspace(0.,c*tau_r/2.,n_r) # properly oversample to match simulation oversampling
C_r_ref = C_r_r(rho_c_ov) # compute the properly oversampled reference function
range_shape=np.correlate(E_cr_sim[:,0],C_r_ref,mode='valid').shape[0] # perform one correlation to determine the length of the output
E_cr_rc = np.zeros((range_shape,E_cr_sim.shape[1]),dtype=np.complex128) # initialize
for i in range(E_cr_sim.shape[1]): # correlate
E_cr_rc[:,i] = np.correlate(E_cr_sim[:,i],C_r_ref,mode='valid')
```
<font face="Calibri" size="3">Here is the range-correlated signal over the simulation domain.</font>
```python
n_rho_rc = E_cr_rc.shape[0]
rho_s_rc = rho_s_sim
rho_e_rc = rho_sim[n_rho_rc-1]
extent = [s_s_sim, s_e_sim, rho_s_rc, rho_e_rc]
fig = plt.figure(figsize=(8, 6))
plt.imshow(20.*np.log10(np.abs(E_cr_rc)), cmap='magma', extent=extent, origin='lower', aspect='auto',vmax=-100.,vmin=-200.)
plt.xlabel("Along track position (m)")
plt.ylabel("Range Position (m)")
plt.title("Power of Range Compressed Echoes - Using Time-domain Correlation")
plt.colorbar(label='Power (dB)');
```
<font face="Calibri" size="3">Let's look at the range correlated output centered on the along-track position of the first corner reflector in the simulation list (may be different from the first defined depending on the index array above).</font>
```python
ind_s_cr=int(np.round((S_cr[Ind_cr[0]]-s_s_sim)*s_ov/Delta_s))
ind_rho_cr=int(np.round((Rho_cr[Ind_cr[0]]-rho_s_sim)*rho_ov/Delta_rho))
E_cr_rc_1rl = E_cr_rc[:,ind_s_cr]
```
```python
fig = plt.figure(figsize=(13, 5))
ax = fig.add_subplot(1,2,1)
ax.plot(rho_sim[0:E_cr_rc_1rl.shape[0]],20.*np.log10(np.abs(E_cr_rc_1rl)))
ax.set_title("Range Compressed Signal - One Range Line")
ax.set_xlabel("Range (m)")
ax.set_ylabel("Power")
ax = fig.add_subplot(1,2,2)
sr=ind_rho_cr-50
er=ind_rho_cr+50
ax.plot(rho_sim[sr:er],20.*np.log10(np.abs(E_cr_rc[sr:er,ind_s_cr]))-np.max(20.*np.log10(np.abs(E_cr_rc[sr:er,ind_s_cr]))))
ax.set_title("Range Compressed Signal - One Range Line - One CR")
ax.set_xlabel("Range (m)")
ax.set_ylabel("Signal Power Relative to Peak")
plt.show();
plt.tight_layout()
```
## 3.2 Range Correlation - frequency domain
<a id="section-3.2"></a>
<font face="Calibri" size="3">
This can also be accomplished with FFT-based circular convolution, and it runs considerably faster. To accomplish this most straightforwardly, we can create a version of the chirp that is the same length as the range vector; then when we take the FFT, both will be the same length. Subscript "rl" stands for "range line." Subscript "fd" stands for "frequency domain." First compute the reference function's spectrum:</font>
```python
C_r_ref_rl = np.zeros(n_rho_sim) + 1j *np.zeros(n_rho_sim)
C_r_ref_rl[0:n_r] = C_r_ref
C_r_REF_rl = np.conjugate(np.fft.fft(C_r_ref_rl))
freq = np.fft.fftfreq(C_r_REF_rl.shape[-1])
fig = plt.figure(figsize=(8, 5))
plt.plot(freq,np.absolute(C_r_REF_rl))
plt.xlabel("Frequency (inverse samples)")
plt.ylabel("Spectral Power")
plt.title("Power of Range Reference Function Spectrum");
```
<font face="Calibri" size="3">Now perform the correlation through circular convolution in the frequency domain.</font>
```python
%%time
E_cr_rc_fd = np.zeros((E_cr_sim.shape),dtype=np.complex128)
for i in range(E_cr_sim.shape[1]):
E_cr_rc_fd[:,i] = np.fft.ifft(np.fft.fft(E_cr_sim[:,i])*C_r_REF_rl)
```
```python
plt.style.use('seaborn-whitegrid')
plt.figure(figsize=(13, 6))
plt.subplot(1,2,1)
extent = [s_s_sim, s_e_sim, rho_s_sim, rho_e_sim]
plt.imshow(20.*np.log10(np.abs(E_cr_rc_fd)), cmap='magma', extent=extent, origin='lower', aspect='auto',vmax=-100.,vmin=-200.)
plt.colorbar(label='Power (dB)');
plt.xlabel("Along track position (m)")
plt.ylabel("Range Position (m)")
plt.title("Power of Range Compressed Echoes \n Using FFT Correlation");
plt.subplot(1,2,2)
temp = 20.*np.log10(np.abs(E_cr_rc_fd[sr:er,int(np.round(E_cr_rc_fd.shape[1]/2))]))
plt.plot(rho_sim[sr:er],temp - np.max(temp))
plt.title("Range Compressed Signal \n One Range Line - One CR - Using FFT Correlation")
plt.xlabel("Range (m)")
plt.ylabel("Signal Power Relative to Peak");
plt.tight_layout()
#plt.show();
figname = "RangeCompressed_FFT.png"
plt.savefig(figname, dpi=300, transparent='false')
```
<br>
<div class="alert alert-danger">
<font face="Calibri" size="5"> <b> <font color='rgba(200,0,0,0.2)'> <u>ASSIGNMENT #2</u>: </font> Discuss Range Compression Result </b> </font>
<font face="Calibri" size="3"><u>Answer the following questions regarding the range-focused data set</u>:
<ol>
<br>
<li><b>Question 2.1</b>: Download and present this figure in your document. Provide a Figure caption describing what is shown in the two panels of this figure.</li>
<br>
<li><b>Question 2.2</b>: The range compressed echo in the left panel shows how the range to the target is significantly changing as the sensor passes by the target along its orbit (range cell migration). Please zoom into the figure and provide a rough estimate for the range cell migration within the main beam of the antenna (main beam extents roughly from along-track position -15km to along-track position +15km. Please provide range cell migration in the units of [meters].</li>
<br>
<li><b>Question 2.3</b>: Measure the achieved range resolution by zooming into the panel on the right and measure the width of the focused peak at the -3dB power position. Please provide your estimate for range resolution in the unit of [meters].</li>
</ol>
</font>
</div>
# 4.0 Focusing SAR Data - Azimuth
<a id="section-4"></a>
<font face="Calibri" size="3">
Now that we understand the range correlation, it is time to do the same thing in the along-track, or azimuth direction. The complication in azimuth is that each target on the ground expresses its reflected energy at different ranges in each pulse. This can be seen easily in the 2d plots above where the bright return in the range compressed data from a single corner reflector "migrates" through the range as a function of azimuth position - called range migration. This is addressed typically in two ways:
1. Correlate each point on the ground with its exact hyperbolic replica, calculable from the knowledge of the radar motion. This is called back-projection or time-domain processing, and can be quite computationally expensive.
2. Approximate the time-domain approach by working in the frequency domain to compensate for the migration, then perform a circular convolution.
We'll take these two approaches one at a time. First let's do the frequency domain approach as it can be built step by step to illustrate the impacts of the approximations.
</font>
## 4.1 Azimuth reference function
<a id="section-4.1"></a>
<font face="Calibri" size="3">
First let's look at the signal in azimuth at the peak of the range response of the first corner reflector.</font>
```python
rho_ind_cr = int(np.round((Rho_cr[Ind_cr[0]]-rho_s_sim)*rho_ov/Delta_rho))
```
```python
plt.figure(figsize=(13, 6))
plt.title("Range Compressed Signal - One Azimuth Line - One CR")
plt.xlabel("Range (m)")
plt.ylabel("Signal Power")
plt.plot(s_sim,20.*np.log10(np.abs(E_cr_rc[rho_ind_cr,:])));
plt.tight_layout()
```
<font face="Calibri" size="3">The power of the received signal from a corner reflector in azimuth should follow the antenna pattern, but instead the power at a fixed range has a plateau around the corner reflector, then diminishes in a oscillatory fashion. These oscillations are a consequence of the fact that the range from the spacecraft to the corner reflector is changing with $s$. When the range change is such that the peak of the response migrates out of the range bin, we will begin to see the sidelobes of the range response. The observed null would occur at the $s$ location at which the range changes by the distance in range to the first null of the range sidelobes, which is $\Delta\rho$. Thus
$s_{null} = \sqrt{(\rho_{cr}+\Delta\rho)^2 - \rho^2_{cr}}$
</font>
```python
s_null = np.sqrt((Rho_cr[Ind_cr[0]]+Delta_rho)**2-(Rho_cr[Ind_cr[0]])**2)
print('Azimuth null position due to range migration =',np.round(100.*s_null)/100.,'m')
```
<font face="Calibri" size="3">which is about where it is observed in the figure above. Let's instead track the expected location of the peak in azimuth. This is accomplished by noting that
$ \rho(s) = \sqrt{(s-s_{cr})^2+\rho^2_{cr}}$
We can use this to look up the range for any give $s$ in the range compressed data. The left panel below shows the range hyperbola as a function of along-track position. The central panel shows then the signal power in the range compressed signal along this hyperbola, which now looks like the antenna pattern as one would expect, indicating we are tracking the range migration well. The phase along this curve should have a hyperbolic variation of phase across azimuth. The right panel plots this phase; it looks like a wrapped hyperbolic function, so all is well. This phase will be the basis for the azimuth reference function.</font>
```python
fig = plt.figure(figsize=(13, 5))
ax = fig.add_subplot(1,3,1)
rho_of_s = np.sqrt((s_sim-S_cr[Ind_cr[0]])**2 + Rho_cr[Ind_cr[0]]**2)
ax.plot(s_sim,rho_of_s)
ax.set_title("Range as a function of azimuth for \n First CR")
ax.set_xlabel("Azimuth (m)")
ax.set_ylabel("Range (m)")
ax = fig.add_subplot(1,3,2)
ind_rho_of_s = np.round((rho_of_s-rho_s_sim)*rho_ov/Delta_rho).astype(int)
E_cr_rc_1az = np.zeros(n_s_sim) + 1j *np.zeros(n_s_sim)
for i_s in range(n_s_sim):
E_cr_rc_1az[i_s] = E_cr_rc[ind_rho_of_s[i_s],i_s]
ax.plot(s_sim,20.*np.log10(np.abs(E_cr_rc_1az)))
ax.set_title("Range Compressed Signal \n One Azimuth Line Following range \n One CR")
ax.set_xlabel("Azimuth (m)")
ax.set_ylabel("Signal Power")
ax = fig.add_subplot(1,3,3)
ss = int(n_s_sim/2)-100
se = int(n_s_sim/2)+100
ax.set_title("Range Compressed Signal \n One Azimuth Line Following range \n One CR")
ax.set_xlabel("Azimuth (m)")
ax.set_ylabel("Phase (rad)")
ax.plot(s_sim[ss:se],np.arctan2(np.imag(E_cr_rc_1az[ss:se]),np.real(E_cr_rc_1az[ss:se])));
plt.tight_layout()
```
## 4.2 Azimuth Focusing - time domain
### 4.2.1 Coarsest Approximation: Straight Time-domain Correlation with a Constant Reference Function
<a id="section-4.2"></a>
<font face="Calibri" size="3">
The idea here is to assume that the azimuth reference function is as simple and constant as the range reference function. In this case, we can just perform a simple time domain correlation in azimuth. This ignores range migration, and the variation in range migration magnitude as a function of range. For small synthetic apertures and coarse range resolution, this may be adequate. Let's see how bad it can be. Let's use the mid-range as our reference and calculate the azimuth response there over the synthetic aperture. We'll then use that single function to correlate each of the range bins in the simulated range compressed pulse sequence and see what happens.
</font>
```python
%%time
s_s_sa = rho_l * np.tan(theta_sq-theta_L_a/2.) # half beamwidth, w/ squint, relative to s_im
s_e_sa = rho_l * np.tan(theta_sq+theta_L_a/2.) # half beamwidth, w/ squint, relative to s_im
n_s_sa = np.round((s_e_sa-s_s_sa)* s_ov/Delta_s).astype(int)
s_sa = np.linspace(s_s_sa,s_e_sa,n_s_sa)
rho_sa = np.sqrt(rho_l**2+(s_sa)**2)
C_az_ref = np.exp(-1j*4.*np.pi*rho_sa/Lambda) # correlate of observed phase history
az_shape=np.correlate(E_cr_rc[0,:],C_az_ref,mode='full').shape[0] # perform one correlation to determine the length of the output
E_cr_rcac = np.zeros((E_cr_rc.shape[0],az_shape),dtype=np.complex128) # initialize
for i in range(E_cr_rc.shape[0]): # correlate
E_cr_rcac[i,:] = np.correlate(E_cr_rc[i,:],C_az_ref,mode='full')
```
```python
# starting s of correlation array will be extended by half the azimuth reference function length
ind_s_cr_ac=int(np.round((S_cr[Ind_cr[0]]-s_s_sim)*s_ov/Delta_s))+int(np.round(n_s_sa/2))
ind_rho_cr_ac=ind_rho_cr
```
```python
plt.figure(figsize=(13, 6))
plt.subplot(1,2,1)
extent = [s_s_sim-Delta_s*n_s_sa/2./s_ov, s_e_sim+Delta_s*n_s_sa/2./s_ov, rho_s_rc, rho_e_rc]
plt.imshow(20.*np.log10(np.abs(E_cr_rcac)), cmap='magma', extent=extent, origin='lower', aspect='auto')
plt.colorbar(label='Power (dB)');
plt.xlabel("Along track position (m)")
plt.ylabel("Range Position (m)")
plt.title("Power of Image - Using Correlation");
plt.subplot(1,2,2)
plt.title("Power of Image - Using Correlation - Zoom")
plt.xlabel("Along track position (pixel)")
plt.ylabel("Range Position (pixel)")
plt.imshow(20.*np.log10(np.abs(E_cr_rcac[ind_rho_cr_ac-250:ind_rho_cr_ac+250,ind_s_cr_ac-500:ind_s_cr_ac+500])), cmap='magma', origin='lower', aspect='auto')
plt.colorbar(label='Power (dB)');
#plt.show();
plt.tight_layout()
```
<font face="Calibri" size="3">Not exactly a perfect corner reflector response, but energy was certainly concentrated. Note also the extra energy focused at 1/4 and 3/4 of the distance across the image. These are azimuth ambiguities, where the energy present outside the main lobe of the antenna is focused at an amplitude attenuated by the antenna pattern. These should occur at the azimuth location where the next stationary phase point is in the aliased signal, which is dependent on the exact sampling rate relative to the phase rate of the signal. Their appearance at ~ +/- 21 km is about right.</font>
### 4.2.2 Next Coarsest Approximation: Straight Time-domain Correlation with a Range-variable Reference Function
<font face="Calibri" size="3">
Here we emphasize the notion that the reference function varies with range. Even though we are not taking into account range migration, we have a better match to the phase at a given range. We calculate the range variable reference function as a matrix, and keep it handy for the circular convolution step to come. This is a little tricky since the limits of the synthetic aperture change with range and we want the convolutions to be consistently aligned across range. So we must first create an array of the same length in azimuth as the number of pulses, then populate the array accordingly.
The synthetic aperture extents vary with range, and need to take into account any azimuth squint. For a point at $(s_{im},\rho_{im})$ observed with squint $\theta_{sq}$, the range of closest approach of the spacecraft when the boresight intersects this point is given by
$ \rho_{ca} = \rho_{im} \cos\theta_{sq}$
and the along-track position of this point relative to closest approach is
$s_{ca} = \rho_{ca} \tan \theta_{sq}$
The limits of the synthetic aperture then are given by the angle subtended around this squint angle:
$s_{s,sa} = \rho_{ca} \tan (\theta_{sq} - \theta_{L_a}/2) \qquad s_{e,sa} = \rho_{ca} \tan (\theta_{sq} + \theta_{L_a}/2)$
$\rho_{s,sa} = \rho_{ca} / \cos(\theta_{sq}-\theta_{L_a}/2) \qquad \rho_{e,sa} = \rho_{ca} / \cos(\theta_{sq}+\theta_{L_a}/2)$
With these limits, the task is now to fill an array with a full set of range-dependent reference functions. The tricky part is calculating the functions on a regular grid with limits that vary with range. There is a lot of indexing and limit checking consequently.
</font>
```python
# calculate closest approach range and azimuth position for squinted geometry
rho_rc = rho_sim[0:n_rho_rc]
rho_ca = rho_rc * np.cos(theta_sq)
s_ca = rho_ca * np.tan(theta_sq)
#define the synthetic aperture extents across range
s_s_sa_v_rho = rho_ca * np.tan(theta_sq-theta_L_a/2.) # half beamwidth, w/ squint, relative to s_im
s_e_sa_v_rho = rho_ca * np.tan(theta_sq+theta_L_a/2.) # half beamwidth, w/ squint, relative to s_im
n_s_sa_v_rho = np.round((s_e_sa_v_rho-s_s_sa_v_rho)* s_ov/Delta_s).astype(int)
rho_s_sa_v_rho = rho_ca / np.cos(theta_sq-theta_L_a/2)
rho_e_sa_v_rho = rho_ca / np.cos(theta_sq+theta_L_a/2)
n_rho_sa_v_rho = np.round((rho_e_sa_v_rho-rho_s_sa_v_rho)* s_ov/Delta_s).astype(int)
# calculate the indices along track that define the limits for each range.
ind_s_s_sa_v_rho = np.round((s_s_sa_v_rho-s_0)/(Delta_s/s_ov)).astype(int)
ind_s_e_sa_v_rho = np.round((s_e_sa_v_rho-s_0)/(Delta_s/s_ov)).astype(int)
```
```python
# to define the reference function array, find the maximum extent needed
n_s_sa_v_rho = (ind_s_e_sa_v_rho-ind_s_s_sa_v_rho)+1
n_s_sa_v_rho_max = np.max(n_s_sa_v_rho)
ind_s_s_sa_v_rho_min = np.min(ind_s_s_sa_v_rho)
ind_s_e_sa_v_rho_max = np.max(ind_s_e_sa_v_rho)
# now calculate the reference function placed consistently in the oversized array
s_s_sa = s_0 + ind_s_s_sa_v_rho*Delta_s/s_ov
s_e_sa = s_0 + ind_s_e_sa_v_rho*Delta_s/s_ov
# initialize the reference array
C_az_ref = np.zeros((n_rho_rc,n_s_sa_v_rho_max),dtype=np.complex128)
# populate the reference array
for i in range(n_rho_rc):
s_sa = np.linspace(s_s_sa[i],s_e_sa[i],n_s_sa_v_rho[i])
rho_sa = np.sqrt(rho_ca[i]**2+(s_sa)**2)
ssind = ind_s_s_sa_v_rho[i]-ind_s_s_sa_v_rho_min
seind = ind_s_e_sa_v_rho[i]-ind_s_s_sa_v_rho_min
C_az_ref[i,ssind:seind+1] = np.exp(-1j*4.*np.pi*rho_sa/Lambda) # correlate of observed phase history
```
```python
plt.figure(figsize=(13, 6))
plt.subplot(1,2,1)
plt.imshow((np.abs(C_az_ref)), cmap='magma', origin='lower', aspect='auto')
plt.colorbar(label='Power (dB)');
plt.xlabel("Along track position (pixel)")
plt.ylabel("Range Position (pixel)")
plt.title("Power of Azimuth Reference Function Image");
indc = int(np.round(C_az_ref.shape[1]/2.))
plt.subplot(1,2,2)
plt.imshow(np.arctan2(np.imag(C_az_ref[:,indc-200:indc+200]),np.real(C_az_ref[:,indc-200:indc+200])), origin='lower', aspect='auto')
plt.colorbar(label='Phase (rad)');
plt.xlabel("Along track position (pixel)")
plt.ylabel("Range Position (pixel)")
plt.title("Phase of Azimuth Reference Function Image");
plt.tight_layout()
```
<font face="Calibri" size="3">The jittery appearance of this image is due to the fact that the phase reference is changing rapidly as a function of range, and given the sampling in range, the sampling is not regular relative to the phase wrapping rate. Therefore to see a smoother version of this image, one needs to unwrap the image. </font>
```python
%%time
# finally do the correlation
az_shape=np.correlate(E_cr_rc[0,:],C_az_ref[0,:],mode='full').shape[0] # perform one correlation to determine the length of the output
E_cr_rcac2 = np.zeros((n_rho_rc,az_shape),dtype=np.complex128) # initialize
for i in range(n_rho_rc): # correlate
E_cr_rcac2[i,:] = np.correlate(E_cr_rc[i,:],C_az_ref[i,:],mode='full')
```
```python
plt.figure(figsize=(13, 6))
plt.subplot(1,2,1)
extent = [s_s_sim-Delta_s*n_s_sa_v_rho_max/2./s_ov, s_e_sim+Delta_s*n_s_sa_v_rho_max/2./s_ov, rho_s_rc, rho_e_rc]
plt.xlabel("Along track position (m)")
plt.ylabel("Range Position (m)")
plt.title("Power of Image - Using Correlation")
plt.imshow(20.*np.log10(np.abs(E_cr_rcac2)), cmap='magma', extent=extent, origin='lower', aspect='auto',vmax=-40.,vmin=-80.)
plt.colorbar(label='Power (dB)')
plt.subplot(1,2,2)
plt.imshow(20.*np.log10(np.abs(E_cr_rcac2[ind_rho_cr_ac-250:ind_rho_cr_ac+250,ind_s_cr_ac-500:ind_s_cr_ac+500])), cmap='magma', origin='lower', aspect='auto',vmax=-40.,vmin=-80.)
plt.colorbar(label='Power (dB)')
plt.xlabel("Along track position (pixel)")
plt.ylabel("Range Position (pixel)")
plt.title("Power of Image - Using Correlation - Zoom")
plt.tight_layout()
```
## 4.3 Azimuth Focusing - frequency domain
### 4.3.1 Preparation for Range Migration Correction
<a id="section-4.3"></a>
<font face="Calibri" size="3">
Note we have three corner reflectors located at unique ranges and azimuth locations. It is a well known property of Fourier Transforms that a translation of position in one domain is equivalent to a phase ramp in the other domain.
$ \displaystyle\int f(t+\delta t) e^{-i \omega t} dt = \displaystyle\int f(t') e^{-i \omega (t'-\delta t)} dt = F(\omega)e^{i \omega\delta t} $
Therefore, by performing the Fourier transform of the range-compressed pulses in the azimuth direction will align the range-migration history *as a function of Doppler Frequency* of all ground points at a given range. Therefore if we express range as a function of Doppler frequency, then we can map the energy along the range curve to a constant range, nominally the closest approach range for broadside imaging, but it could be any constant range. This will allow us to exploit the convolutional properties of Fourier transforms in the azimuth direction.</font>
```python
E_cr_rc_azfd = np.zeros((E_cr_rc.shape),dtype=np.complex128)
for i in range(E_cr_rc.shape[0]): # correlate
E_cr_rc_azfd[i,:] = np.fft.fft(E_cr_rc[i,:])
```
```python
plt.figure(figsize=(13, 6))
plt.imshow(20.*np.log10(np.abs(E_cr_rc_azfd)), cmap='magma', origin='lower', aspect='auto',vmax=-60.,vmin=-100.)
plt.colorbar(label='Power (dB)');
plt.xlabel("Along track Frequency (pixel)")
plt.ylabel("Range Position (pixel)")
plt.title("Power of Azimuth Spectrum ");
```
### 4.3.2 Next Coarsest Approximation: Frequency-domain Correlation with a Range-variable Reference Function; no Range Migration Correction
<font face="Calibri" size="3">
Now that we are in the frequency domain, we perform the equivalent of the time domain correlation by also transforming the reference function matrix, multiplying, then inverse transforming. In the correlation line using FFTs, we have combined many operations in one line:
* Shifting the Azimuth chirp by half its length to center it at 0 delay. This centers the convolution properly. (roll function)
* Perform FFT of it to put it in the spectral domain (remember it changes with each range)
* Multiply by the spectrum of the data at that range
* Inverse FFT to bring it back to the time domain.
</font>
```python
%%time
# the correlation by FFT; no range migration
E_cr_rcac_fd = np.zeros((E_cr_rc.shape),dtype=np.complex128) # initialize
C_az_REF_al = np.zeros(E_cr_rc.shape[1],dtype=np.complex128)
for i in range(n_rho_rc): # correlate
C_az_REF_al = np.zeros(E_cr_rc.shape[1],dtype=np.complex128)
C_az_REF_al[0:C_az_ref.shape[1]] = np.conjugate(C_az_ref[i,:])
E_cr_rcac_fd[i,:] = np.fft.ifft(np.fft.fft(E_cr_rc[i,:])*
np.fft.fft(np.roll(C_az_REF_al,
-int(C_az_ref.shape[1]/2))))
```
```python
plt.figure(figsize=(13, 6))
plt.subplot(1,2,1)
extent = [s_s_sim, s_e_sim, rho_s_rc, rho_e_rc]
plt.imshow(20.*np.log10(np.abs(E_cr_rcac_fd)), cmap='magma', extent=extent, origin='lower', aspect='auto',vmax=-40.,vmin=-80.)
plt.colorbar(label='Power (dB)');
plt.xlabel("Along track position (m)")
plt.ylabel("Range Position (m)")
plt.title("Power of Image - Using FFT Correlation")
plt.subplot(1,2,2)
plt.imshow(20.*np.log10(np.abs(E_cr_rcac_fd[ind_rho_cr-250:ind_rho_cr+250,ind_s_cr-500:ind_s_cr+500])), cmap='magma', origin='lower', aspect='auto',vmax=-40.,vmin=-80.)
plt.colorbar(label='Power (dB)');
plt.xlabel("Along track position (pixel)")
plt.ylabel("Range Position (pixel)")
plt.title("Power of Image - Using FFt Correlation - Zoom");
plt.tight_layout()
```
### 4.3.3 Range Migration Correction
<font face="Calibri" size="3">
Now in the Fourier domain in azimuth above, we can see that there is a migration of the brightness to larger range as frequency increases (zero is at the left and right edges of the above image). This will be the case independent of the number and azimuth location of the corner reflectors. They all collapse back to a migration curve centered on zero frequency at the appropriate range. If there is squint, the migration curve will still be centered on zero frequency, but there will only be energy in the portion of the spectrum dictated by the antenna beam. We can use the range-Doppler relationship to shift energy at a given Doppler frequency to its appropriate range bins. As described in Eq. (6)
\begin{equation}
\phi_{az}(s;\rho_0,s_0) = -\frac{4\pi}{\lambda} \big(\rho(s;\rho_0,s_0) - \rho_0\big) = -\frac{4\pi}{\lambda} \bigg (\sqrt{(s-s_0)^2+\rho_0^2})- \rho_0\bigg)
\end{equation}
Noting that in our specialized geometry $s=v_{sc} t$, the time derivative of $\phi_{az}$ can be written
\begin{equation}
\omega_{az} = \frac{\partial}{\partial t} \phi_{az}(s;\rho_0,s_0) = -\frac{4\pi}{\lambda} \cdot \frac{1}{2}\big ((s-s_0)^2+\rho_0^2 \big)^{-1/2}\cdot 2(s-s_0) \cdot v_{sc}
\end{equation}
Consolidating terms, and noting that $(s-s_0)^2 = \rho^2-\rho_0^2$, the Doppler frequency in Hertz is:
\begin{equation}
f_{hz,az} = \frac{\omega_{az}}{2\pi} = -\frac{2}{\lambda} \cdot \frac{1}{\rho} \big (\rho^2-\rho_0^2\big )^{1/2} \cdot v_{sc}
\end{equation}
or
\begin{equation}
f^2_{hz,az} = -\frac{4}{\lambda^2} \cdot \frac{1}{\rho^2} \big (\rho^2-\rho_0^2\big ) \cdot v^2_{sc}
\end{equation}
This equation can be rearranged to solve for range as a function of doppler frequency:
\begin{equation}
\rho(f_{hz,az}) = \rho_0 \bigg (1-\frac{\lambda^2 f^2_{hz,az}}{4 v^2_{sc}}\bigg)^{-1/2}
\end{equation}
This can be reduced by Taylor expansion to the more familiar expression:
\begin{equation}
\rho(f_{hz,az}) \approx \rho_0 \bigg (1+\frac{\lambda^2 f^2_{hz,az}}{8 v^2_{sc}}\bigg)
\end{equation}
</font>
<font face="Calibri" size="3">
Equation (26) can be used to apply the correction in the azimuth frequency domain. For each Doppler Frequency bin, we need to differentially shift the range position of each point by a range dependent amount that brings all points on a migration curve to the same range bin. This will allow proper compression of the energy in azimuth with no range migration loss.
Because the Fourier Transform is on sampled data, the azimuth spectrum is circular, so if there is significant squint, we need to be careful to compute the range migration curve with respect to the beam edges in the Doppler domain. The reference range will be the range at beam center, $\rho_{dc}$, not the closest approach range. The Doppler centroid as a function of range is given by
\begin{equation}
f_{dc}(\rho_{dc}) = \frac{2 v_{sc}}{\lambda} \sin\theta \sin\theta_{sq} = \frac{2 v_{sc}}{\lambda} \sin\bigg(\cos^{-1}\bigg(\frac{h_{sc}}{\rho_{dc}}\bigg)\bigg) \sin\theta_{sq}
\end{equation}
and the limits in the azimuth spectrum where it has significant energy is:
\begin{equation}
f_{dc,\pm}(\rho_{dc}) = f_{dc}(\rho_{dc}) \pm \frac{f_{az,bw}}{2}
\end{equation}
whereas the locations where the azimuth spectrum of the signal would have wrap points relative to the centroid are given by:
\begin{equation}
f_{dc,\pm,f_s}(\rho_{dc}) = f_{dc}(\rho_{dc}) \pm \frac{f_s}{2}
\end{equation}
Note that if there is a large squint, and the azimuth spectrum is critically sampled, then the indexing into the azimuth buffer is messy, because the wrap points must be computed in the circular array modulo the buffer length of the array, while the range migration curve must be computed in an absolute sense. Note also that the negative frequencies are in the upper half of the array, so once the index is calculated, it must be adjusted. If the azimuth spectrum is oversampled to begin with, one still must address the proper interpretation of positive and negative frequencies in the buffer, but wrapping of the spectrum would be avoided. For the sake if simplicity in this tutorial, we will assume that we don't need to address the spectral wrapping, i.e. that we are either at zero squint or the spectrum is oversampled. If this is not the case, the range migration correction will not be correct.
</font>
```python
# calculate centroid as a function of range
def f_dc(rho):
return 2. * v_sc * np.sin(np.arccos(h_sc/rho)) * np.sin(theta_sq) / Lambda
f_az_bw = v_sc * s_ov / Delta_s
# determine the range of frequencies in the spectrogram and define the meshgrid
# assumes that the spectrum array will be rotated to have -f_bw/2 at an array index if [0]
f_im = np.linspace(-f_az_bw/2.,f_az_bw/2.,E_cr_rc.shape[1])
F_im, Rho_rc = np.meshgrid(f_im,rho_rc)
# calculate the ambiguity of each of the frequencies in the spectrogram and add it to the frequency of each bin
F_abs = np.zeros((F_im.shape),dtype=np.int)
F_abs = F_im + f_az_bw * np.round((f_dc(Rho_rc)-F_im)/ (v_sc * s_ov / Delta_s))
```
```python
plt.figure(figsize=(13, 5))
plt.imshow(F_abs, cmap='magma', origin='lower', aspect='auto')
plt.colorbar(label='Frequency (Hz)')
plt.xlabel("Along track Spectrum bin (pixel)")
plt.ylabel("Range Position (pixel)")
plt.title("Absolute Frequency at each Frequency in Spectrogram");
```
```python
# rotate the spectrum to work more easily in this domain
E_cr_rc_azfd_shift = np.zeros((E_cr_rc_azfd.shape),dtype=np.complex128)
for i in range(E_cr_rc.shape[0]): # correlate
E_cr_rc_azfd_shift[i,:] = np.fft.fftshift(E_cr_rc_azfd[i,:])
```
```python
plt.figure(figsize=(13, 6))
plt.imshow(20.*np.log10(np.abs(E_cr_rc_azfd_shift)), cmap='magma', origin='lower', aspect='auto',vmax=-60.,vmin=-100.)
plt.colorbar(label='Power (dB)');
plt.xlabel("Along track Frequency (pixel)")
plt.ylabel("Range Position (pixel)")
plt.title("Power of Rotated Azimuth Spectrum ");
```
<font face="Calibri" size="3">Now that we have the absolute frequency calculated for each range, and the spectrum rotated to match it, we are a position to perform the range migration correction for each frequency.
</font>
```python
Rho_rm = np.zeros((F_im.shape),dtype=np.int)
Rho_rm = Rho_rc * (np.cos(theta_sq) / np.sqrt(1.- Lambda**2 * F_abs**2/ (4. * v_sc**2)))
Rho_rm_nn = np.zeros((F_im.shape),dtype=np.complex128)
Rho_rm_nn = np.round((Rho_rm-rho_rc[0])*rho_ov/Delta_rho).astype(int)
```
```python
plt.figure(figsize=(13, 6))
plt.imshow(Rho_rm_nn, cmap='magma', origin='lower', aspect='auto')
plt.colorbar(label='Range Migration (pixel)');
plt.xlabel("Along track Frequency (pixel)")
plt.ylabel("Range Position (pixel)")
plt.title("Range Migration Shift");
```
<font face="Calibri" size="3">We are now ready to use this range migration map to resample the spectrum. The spectral wrap cut (which will not appear in the zero squint geometry) presents a bookkeeping challenge. For this tutorial we simply ignore it and interpolate across it. This will introduce artifacts, but using a nearest neighbor interpolator will mitigate some of the edge effects. </font>
```python
%%time
# for each frequency, use sinc interpolator to move data in range.
E_cr_rc_azfd_rm = np.zeros((E_cr_rc_azfd.shape),dtype=np.complex128)
for i in range(E_cr_rc_azfd_rm.shape[0]):
for j in range(E_cr_rc_azfd_rm.shape[1]):
E_cr_rc_azfd_rm[i,j] = E_cr_rc_azfd_shift[min(Rho_rm_nn[i,j],E_cr_rc_azfd_rm.shape[0]-1),j]
```
```python
plt.figure(figsize=(13, 6))
plt.imshow(20.*np.log10(np.abs(E_cr_rc_azfd_rm)), cmap='magma', origin='lower', aspect='auto',vmax=-60.,vmin=-100.)
plt.colorbar(label='Power (dB)');
plt.xlabel("Along track Frequency (pixel)")
plt.ylabel("Range Position (pixel)")
plt.title("Power of Range-Migrated Azimuth Spectrum ");
```
### 4.3.4 Reference Function Application and Inverse Azimuth Transform to the Image
<font face="Calibri" size="3">It clear that the energy has been migrated to constant range, which will should improve the correlation result. Let's see. </font>
```python
# rotate the spectrum back to original position
E_cr_rc_azfd_rm_shift = np.zeros((E_cr_rc_azfd.shape),dtype=np.complex128)
for i in range(E_cr_rc.shape[0]): # correlate
E_cr_rc_azfd_rm_shift[i,:] = np.fft.fftshift(E_cr_rc_azfd_rm[i,:])
```
```python
%%time
# the correlation by FFT; with range migration
E_cr_rcac_fd_rm = np.zeros((E_cr_rc.shape),dtype=np.complex128) # initialize
C_az_REF_al = np.zeros(E_cr_rc.shape[1],dtype=np.complex128)
for i in range(n_rho_rc): # correlate
C_az_REF_al = np.zeros(E_cr_rc.shape[1],dtype=np.complex128)
C_az_REF_al[0:C_az_ref.shape[1]] = np.conjugate(C_az_ref[i,:])
E_cr_rcac_fd_rm[i,:] = np.fft.ifft(E_cr_rc_azfd_rm_shift[i,:]*
np.fft.fft(np.roll(C_az_REF_al,
-int(C_az_ref.shape[1]/2))))
```
```python
plt.figure(figsize=(13, 6))
plt.subplot(1,2,1)
extent = [s_s_sim, s_e_sim, rho_s_rc, rho_e_rc]
plt.imshow(20.*np.log10(np.abs(E_cr_rcac_fd_rm)), cmap='magma', extent=extent, origin='lower', aspect='auto',vmax=-40.,vmin=-80.)
plt.colorbar(label='Power (dB)');
plt.xlabel("Along track position (m)")
plt.ylabel("Range Position (m)")
plt.title("Power of Image - Using FFT Correlation with RM");
plt.subplot(1,2,2)
plt.imshow(20.*np.log10(np.abs(E_cr_rcac_fd_rm[ind_rho_cr-250:ind_rho_cr+250,ind_s_cr-500:ind_s_cr+500])), cmap='magma', origin='lower', aspect='auto',vmax=-40.,vmin=-80.)
plt.colorbar(label='Power (dB)');
plt.xlabel("Along track position (pixel)")
plt.ylabel("Range Position (pixel)")
plt.title("Power of Image - Using FFT Correlation with RM - Zoom")
plt.tight_layout()
```
```python
plt.figure(figsize=(10, 8))
plt.imshow(20.*np.log10(np.abs(E_cr_rcac_fd_rm[ind_rho_cr-100:ind_rho_cr+100,ind_s_cr-50:ind_s_cr+50])), cmap='magma', origin='lower', aspect='auto',vmax=-30.,vmin=-70.)
plt.colorbar(label='Power (dB)');
plt.xlabel("Along track position (pixel)")
plt.ylabel("Range Position (pixel)")
plt.title("Power of Image - Using FFT Correlation - Zoom");
figname = "FocusedinclRCM_FFT.png"
plt.savefig(figname, dpi=300, transparent='false')
```
### 4.3.5 Measuring Achieved Range and Azimuth Resolution
<font face="Calibri" size="3">In the following code cell we create plots of the focused corner reflector along azimuth and range. Based on these cuts, the achieved azimuth and range resolution can be measured. </font>
```python
azimuthcut = 20.*np.log10(np.abs(E_cr_rcac_fd_rm[ind_rho_cr,ind_s_cr-50:ind_s_cr+50]))
rangecut = 20.*np.log10(np.abs(E_cr_rcac_fd_rm[ind_rho_cr-50:ind_rho_cr+50,ind_s_cr]))
plt.figure(figsize=(13, 6))
plt.subplot(1,2,1)
plt.plot(S_sim[ind_rho_cr,ind_s_cr-50:ind_s_cr+50], azimuthcut-np.max(azimuthcut))
plt.title("Focused Image \n Cut through CR along Azimuth")
plt.xlabel("Azimuth (m)")
plt.ylabel("Signal Power");
plt.subplot(1,2,2)
plt.plot(Rho_sim[ind_rho_cr-50:ind_rho_cr+50,ind_s_cr], rangecut-np.max(rangecut))
plt.title("Focused Image \n Cut through CR along Range")
plt.xlabel("Range (m)")
plt.ylabel("Signal Power");
plt.tight_layout()
#plt.show();
#figname = "RangeCompressed_FFT.png"
#plt.savefig(figname, dpi=300, transparent='false')'''
```
<font face="Calibri" size="3">As can be seen, the energy is much better focused with range migration correction than without. The noise structure in the sidelobes is due to the poor nearest neighbor interpolator in azimuth migration. This would be improved with a better interpolator, since as a sinc interpolator. The sinc interpolator included in this notebook runs very slowly, however.</font>
<br>
<div class="alert alert-danger">
<font face="Calibri" size="5"> <b> <font color='rgba(200,0,0,0.2)'> <u>ASSIGNMENT #3</u>: </font> Discussed Fully-Focused Image </b> </font>
<font face="Calibri" size="3"><u>Answer the following questions regarding the fully-focused data set</u>:
<ol>
<br>
<li><b>Question 3.1</b>: Download and present Figures 25 and 26 in your document. Provide figure captions describing what is shown in the respective figures.</li>
<br>
<li><b>Question 3.2</b>: Measure the achieved azimuth resolution by zooming into the left panel in the above figure and measure the width of the focused peak at the -3dB power position. Please provide your estimate for azimuth resolution in the unit of [meters]. Additionally, please compare your measurement to the azimuth resolution numbers quoted in Section 1.5.</li>
<br>
<li><b>Question 3.3</b>: Measure the achieved range resolution by zooming into the panel on the right and measure the width of the focused peak at the -3dB power position. Please provide your estimate for range resolution in the unit of [meters]. Additionally, please compare your measurement to the theoretical range resolution you calculated in Assignment 1.</li>
</ol>
</font>
</div>
## 4.4 Optional: Back Projection Time-Domain Processing in Azimuth
<div class="alert alert-warning">
<font face="Calibri" size="5"> <b> <font color='rgba(200,0,0,0.2)'> <u>The next Section is Optional</u>: </font> </b> </font>
<font face="Calibri" size="3"> the next section of the notebook performs SAR focusing using a backprojection processing approach. In this approach, two-dimensional reference functions for each resolution cell are calculated followed by a focusing step using time-domain correlation. This is the most accurate method of SAR image focusing but also the most time consuming.
<b>Note</b>: Running this next step will take approximately 1 hour.
</font>
</div>
<a id="section-4.4"></a>
<font face="Calibri" size="3">
The range compressed response of a corner reflector was described above as:
\begin{equation}
E_{cr,rc}(s,\rho; s_{cr}, \rho_{cr}) = K \sqrt{G_T} e^{-i 4\pi(\rho_{\rm sc-cr}-\rho_{l})/\lambda} {\rm sinc}\big(\frac{\rho-\rho_{\rm sc-cr}}{\Delta\rho}\big)
\end{equation}
where $\rho_{\rm sc-cr}$ would be the appropriate range for when the point was observed (including squint), and $s_cr$ the corresponding azimuth position. Let's focus on the complex hyperbolic phase that we explored above when discussing the phase history of a point over time.
\begin{equation}
E_{cr,rc}(s,\rho; s_{cr}, \rho_{cr}) = K \sqrt{G_T} e^{-i 4\pi\big(\sqrt{(s-s_{cr})^2+\rho_{cr}^2}-\rho_{l}\big)/\lambda} {\rm sinc}\big(\frac{\rho-\rho_{\rm sc-cr}}{\Delta\rho}\big)
\end{equation}
If we generalize $s_{cr}, \rho_{cr}$ to any image point $[s_i,\rho_i]$, this relationship of range to azimuth remains:
\begin{equation}
\rho (s) = \sqrt{(s-s_i)^2+\rho_i^2}
\end{equation}
In our idealized geometry, with the spacecraft flying a straight line above a flat earth, $\rho_i$ is not a function of $s_i$ (no topography, no variable distance from orbit to ground), so $\rho(s)$ is the same function for any $s_i$ at a given $\rho_i$, but varies with $\rho_i$.
Therefore, to gather the energy in azimuth to focus the image at point $(s_i,\rho_i)$, we simply look up the sample in the range compressed echoes corresponding for each $s$ and $\rho$ in the synthetic aperture, compensate the propagation phase delay at each point, then sum all points in the synthetic aperture. Since the sample points are not necessarily perfectly aligned, we would ideally interpolate the echoes to get the exact values at $(s,\rho)$. However, for the purpose of this tutorial, we will just take the nearest neighbor echo sample. First we define the image grid to be smaller than the simulation grid sufficiently to avoid edge effects.
</font>
```python
# define the image grid - trim relative to simulation
# in range, trim by pulse duration
rho_s_im = rho_s_sim + c * tau_r/2.
rho_e_im = rho_e_sim - c * tau_r/2.
n_rho_im = int(np.round((rho_e_im-rho_s_im)* rho_ov/Delta_rho))
rho_im = np.linspace(rho_s_im,rho_e_im,n_rho_im)
# in azimuth, trim by half a beamwidth
s_s_im = s_s_sim + rho_f * theta_L_a / 2.
s_e_im = s_e_sim - rho_f * theta_L_a / 2.
n_s_im = int(np.round((s_e_im-s_s_im)* s_ov/Delta_s))
s_im = np.linspace(s_s_im,s_e_im,n_s_im)
S_im, Rho_im = np.meshgrid(s_im,rho_im)
```
<font face="Calibri" size="3">Next define the synthetic aperture extents. These vary with range, and need to take into account any azimuth squint. We did this above when defining the azimuth reference function for the correlation approach. Once again, for a point at $(s_{im},\rho_{im})$ observed with squint $\theta_{sq}$, the range of closest approach of the spacecraft when the boresight intersects this point is given by
$ \rho_{ca} = \rho_{im} \cos\theta_{sq}$
and the along-track position of this point relative to closest approach is
$s_{ca-rel} = \rho_{ca} \tan \theta_{sq}$
The limits of the synthetic aperture then are given by the angle subtended around this squint angle:
$s_{s,sa} = \rho_{ca} \tan (\theta_{sq} - \theta_{L_a}/2) \qquad s_{e,sa} = \rho_{ca} \tan (\theta_{sq} + \theta_{L_a}/2)$
$\rho_{s,sa} = \rho_{ca} / \cos(\theta_{sq}-\theta_{L_a}/2) \qquad \rho_{e,sa} = \rho_{ca} / \cos(\theta_{sq}+\theta_{L_a}/2)$
</font>
```python
# define the synthetic aperture extents across range
# first the closest approach range and azimuth for a given squinted slant range
rho_ca = rho_im * np.cos (theta_sq)
s_carel = rho_ca * np.tan(theta_sq)
# Next the start and end extents for each of these points relative to s_im
s_s_sa = rho_ca * np.tan(theta_sq-theta_L_a/2.) # half beamwidth, w/ squint, relative to s_im
s_e_sa = rho_ca * np.tan(theta_sq+theta_L_a/2.) # half beamwidth, w/ squint, relative to s_im
n_s_sa = np.round((s_e_sa-s_s_sa)* s_ov/Delta_s).astype(int)
rho_s_sa = rho_ca / np.cos(theta_sq-theta_L_a/2)
rho_e_sa = rho_ca / np.cos(theta_sq+theta_L_a/2)
n_rho_sa = np.round((rho_e_sa-rho_s_sa)* s_ov/Delta_s).astype(int)
```
<font face="Calibri" size="3">For a given image point, $(s_{im},\rho_{im})$ in the image of points $[s_{im},\rho_{im}]$, coordinates over which to integrate the echoes are $(s,\rho(s)) = (s,\sqrt{(s-s_{im})^2+\rho^2_{im}})$. Thus the time domain back projection processing will be
\begin{equation}
E_{cr,td}(s_{im},\rho_{im}) = \displaystyle \int_{s_{im}+s_{s,sa}}^{s_{im}+s_{e,sa}} E_{cr,rc}(s,\rho(s)) e^{i 4 \pi \rho(s) /\lambda} ds
\end{equation}
with $s_{s,sa}$ and $s_{e,sa}$ defined above as image point-relative extents of the synthetic aperture. Note we can get away with calculating the $\rho(s)$ function once per range bin because of the regular rectilinear motion with no topography. You will rapidly find if you execute the next block that it takes *forever* due to the triple loop and the python-interpreted indexing. You will need to interrupt the run to uncomment the limits specified around each of the corner reflectors to perform only the necessary calculations.</font>
```python
%%time
E_cr_im = np.zeros((n_rho_im,n_s_im),dtype=np.complex128) # initialize output grid
# Calculate computable limits +/- 200 m
bp_win = 200.
#
ns_s_im_cr = np.round((S_cr - bp_win - s_s_im)*s_ov/Delta_s).astype(int)
ne_s_im_cr = np.round((S_cr + bp_win - s_s_im)*s_ov/Delta_s).astype(int)
ns_rho_im_cr = np.round((Rho_cr - bp_win - rho_s_im)*rho_ov/Delta_rho).astype(int)
ne_rho_im_cr = np.round((Rho_cr + bp_win - rho_s_im)*rho_ov/Delta_rho).astype(int)
for ncr in range(len(Ind_cr)):
print ("Reflector ",Ind_cr[ncr],ns_s_im_cr[Ind_cr[ncr]],
ne_s_im_cr[Ind_cr[ncr]],ns_rho_im_cr[Ind_cr[ncr]],ne_rho_im_cr[Ind_cr[ncr]])
for rho_im_b in range(ns_rho_im_cr[Ind_cr[ncr]],ne_rho_im_cr[Ind_cr[ncr]]):
if(np.mod(rho_im_b,10)==0): print (rho_im_b)
s_sa = np.linspace(s_s_sa[rho_im_b],s_e_sa[rho_im_b],n_s_sa[rho_im_b])
rho_sa = np.sqrt(rho_im[rho_im_b]**2+(s_sa)**2)
azref = np.exp(1j*4.*np.pi*rho_sa/Lambda) # conjugate of observed phase history
for s_im_b in range(ns_s_im_cr[Ind_cr[ncr]],ne_s_im_cr[Ind_cr[ncr]]):
E_cr_rc_bp=np.zeros(n_s_sim,dtype=np.complex128)
azref_bp=np.zeros(n_s_sim,dtype=np.complex128)
for sb in range(len(s_sa)):
sb_rc = int(np.round((s_im[s_im_b]+s_sa[sb]-s_s_sim)*s_ov/Delta_s))
rhob_rc = int(np.round((rho_sa[sb]-rho_s_sim)*rho_ov/Delta_rho))
E_cr_rc_bp[sb_rc]=E_cr_rc[rhob_rc,sb_rc]
azref_bp[sb_rc]=azref[sb]
E_cr_im[rho_im_b,s_im_b] += np.dot(E_cr_rc_bp,azref_bp)
```
```python
plt.figure(figsize=(8, 5))
plt.imshow(20.*np.log10(np.abs(E_cr_im[ns_rho_im_cr[Ind_cr[ncr]]:ne_rho_im_cr[Ind_cr[ncr]],ns_s_im_cr[Ind_cr[ncr]]:ne_s_im_cr[Ind_cr[ncr]]])), cmap='magma', origin='lower', aspect='auto',vmax=-30.,vmin=-70.)
plt.colorbar(label='Power (dB)');
plt.xlabel("Along track position (pixel)")
plt.ylabel("Range Position (pixel)")
plt.title("Power of Image - Using Time Domain Back Projection");
```
# Summary
<font face="Calibri" size="3">
This tutorial covers the following topics:
* SAR Geometry
* Antenna Patterns
* The Radar Equation
* Doppler and Phase in the synthetic aperture
* Range reference function and correlation to achieve fine range resolution
- Range correlation in the time domain
- Range correlation using FFTs to perform circular correlation
* Azimuth reference function and correlation
- Azimuth correlation in the time domain
- Azimuth migration correction
- Azimuth correlation using FFTs to perform circular correlation
- Backprojection in the time domain
While the geometry is idealized, through this step-by-step approach with python code to simulate radar echoes from point targets and a variety of methods to process the data, the notebook illustrates the meaning of the synthetic aperture, the explicit signal properties of the return echoes, and how the varying range of a target from pulse to pulse necessitates a some resampling to align the energy with a regular grid.
The notebook is designed to allow the student to adjust parameters to alter resolution, squint, geometry, radar elements such as antenna dimensions, and other factors. Some of these can be done locally, others must be done at the beginning of the notebook. Once the student is familiar with the cell dependencies, these will become clear. For example, resolution of the simulation and other geometric parameters must be set at the beginning. Plot dimensions, and processing choices once the simulation is established can be set locally. A successful learning outcome would be confidence in understanding where parameters need to change to affect the tutorial in a particular way.
</font>
|
It’s 2078 and our college professor bot assigned for us to look at our grandparents “digital footprint”. After some basic research, I’ve found that 99% of all Snapchat pictures sent were made public after the company’s crash in 2053. I’ve found this guy that uses the username littlegunman, and I’ve also learned that he wished could change it since he made it when he was little and gets heat for it in his high school and college years. He seems to be a shy person, I can tell a lot from the types of pictures people send. For instance, people with outgoing personalities use a lot of filters and selfies. Artsy people send pictures of artsy and interesting things where as shy people send pictures of random things, like blurry pictures, blank screens and other pictures of meaningless things. I’ve also learned that depending on the person he’s talking to, his type of pictures change. If its a disliked person he will send what I consider “careless pictures” that are just what I’m assuming are just whatever was open when he hit reply. If he likes the person, he might send a picture of himself (although these pictures make up less than 1% of his entire file) or something where the picture is focused.
I think the conclusions they would draw for our community is that everyone essentially has the same online presence. What I mean by this is that everyone has the same accounts, Twitter, Instagram, Snapchat, Etc. and this causes this sense of uniformity, causing it to seem as if everyone does the same things, for instance posting vacation pictures on Instagram, or sending streaks on Snapchat. When in reality if you look into these acts, you can see a lot about the way someone is. In their vacation pictures, are they in the picture? Are they facing the camera or away from it? And so on. The same can be done for any online act; when they send streaks what did they send a picture of? We can analyze anyone’s online actions to learn more about them and their culture. In example, we could look back at myspace and learn about the cultural change within our generation itself. We might not realize it, but we can learn a lot about someone’s culture by just what they post online.
|
The anime 's title was inspired by the principle purpose of the Nameless : to suffer in battle for the goals of others . A subtitle attached to the project during development was " The Road to Kubinka " , which referenced the Kubinka Tank Museum in Moscow . The game 's main theme was how the characters regained their sense of self when stripped of their names and identities , along with general themes focused on war and its consequences . While making the anime , the production team were told by Sega to make it as realistic as possible , with the consequence that the team did extensive research into aspects such as what happened when vehicles like tanks were overturned or damaged . Due to it being along the same timeline as the original game and its television anime adaptation , the cast of Valkyria Chronicles could make appearances , which pleased the team . The opening theme , " Akari ( Light ) <unk> " ( <unk> @-@ <unk> ) , was sung by Japanese singer <unk> . The ending theme , " Someday the Flowers of Light Will Bloom " ( <unk> , Itsuka Saku Hikari no Hana ) , was sung by Minami Kuribayashi . Both songs ' lyrics were written by their respective artists .
|
#
# File: qcqo1.jl
#
# Purpose: Demonstrates how to solve small quadratic and quadratically
# constrained optimization problem using the MOSEK Python API.
##
using Mosek
using Printf
# Since the actual value of Infinity is ignores, we define it solely
# for symbolic purposes:
# Set up and input bounds and linear coefficients
bkc = [ MSK_BK_LO ]
blc = [ 1.0 ]
buc = [ Inf ]
bkx = [ MSK_BK_LO
MSK_BK_LO
MSK_BK_LO ]
blx = [ 0.0, 0.0, 0.0 ]
bux = [ Inf, Inf, Inf ]
c = [ 0.0, -1.0, 0.0 ]
asub = [ 1 ,2, 3 ]
aval = [ 1.0, 1.0, 1.0 ]
numvar = length(bkx)
numcon = length(bkc)
# Create a task
maketask() do task
# Append 'numcon' empty constraints.
# The constraints will initially have no bounds.
appendcons(task,numcon)
#Append 'numvar' variables.
# The variables will initially be fixed at zero (x=0).
appendvars(task,numvar)
#Optionally add a constant term to the objective.
putcfix(task,0.0)
# Set the linear term c_j in the objective.
putclist(task,[1:numvar;],c)
# Set the bounds on variable j
# blx[j] <= x_j <= bux[j]
putvarboundslice(task,1,numvar+1,bkx,blx,bux)
# Input column j of A
putarow(task,1,asub,aval)
putconbound(task,1,bkc[1],blc[1],buc[1])
# Set up and input quadratic objective
qsubi = [ 1, 2, 3, 3 ]
qsubj = [ 1, 2, 1, 3 ]
qval = [ 2.0, 0.2, -1.0, 2.0 ]
putqobj(task,qsubi,qsubj,qval)
# The lower triangular part of the Q^0
# matrix in the first constraint is specified.
# This corresponds to adding the term
# - x0^2 - x1^2 - 0.1 x2^2 + 0.2 x0 x2
qsubi = [ 1, 2, 3, 3 ]
qsubj = [ 1, 2, 3, 1 ]
qval = [ -2.0, -2.0, -0.2, 0.2 ]
# put Q^0 in constraint with index 0.
putqconk(task,1, qsubi,qsubj, qval)
# Input the objective sense (minimize/maximize)
putobjsense(task,MSK_OBJECTIVE_SENSE_MINIMIZE)
# Optimize the task
optimize(task,"mosek://solve.mosek.com:30080")
# Print a summary containing information
# about the solution for debugging purposes
solutionsummary(task,MSK_STREAM_MSG)
prosta = getprosta(task,MSK_SOL_ITR)
solsta = getsolsta(task,MSK_SOL_ITR)
if solsta == MSK_SOL_STA_OPTIMAL
# Output a solution
xx = getxx(task,MSK_SOL_ITR)
@printf("Optimal solution: %s\n", xx')
elseif solsta == MSK_SOL_STA_DUAL_INFEAS_CER
println("Primal or dual infeasibility.\n")
elseif solsta == MSK_SOL_STA_PRIM_INFEAS_CER
println("Primal or dual infeasibility.\n")
elseif solsta == MSK_SOL_STA_UNKNOWN
println("Unknown solution status")
else
println("Other solution status")
end
end
|
module GTFL where
open import Data.Nat hiding (_⊓_; erase; _≟_; _≤_)
open import Data.Bool hiding (_≟_)
open import Data.Fin using (Fin; zero; suc; toℕ)
open import Data.Vec
open import Relation.Binary.PropositionalEquality
open import Relation.Nullary
open import Data.Empty
open import Function using (_∘_)
-- | Types
infixr 30 _⇒_
data GType : Set where
nat : GType
bool : GType
_⇒_ : GType → GType → GType
✭ : GType
err : GType -- easier to model this as a type in Agda
-- | Untyped Expressions
data Expr : Set where
litNat : ℕ → Expr
litBool : Bool → Expr
dyn : Expr
err : Expr
var : ℕ → Expr
lam : GType → Expr → Expr
_∙_ : Expr → Expr → Expr
_⊕_ : Expr → Expr → Expr
if_thn_els_ : Expr → Expr → Expr → Expr
Ctx : ℕ → Set
Ctx = Vec GType
infixr 10 _~_
data _~_ {A B : Set} (x : A) (y : B) : Set where
cons : x ~ y
~dom : ∀ (t : GType) → GType
~dom (t ⇒ t₁) = t₁
~dom _ = err
~cod : ∀ (t : GType) → GType
~cod (t ⇒ t₁) = t₁
~cod _ = err
_⊓_ : ∀ (t₁ t₂ : GType) → GType
nat ⊓ nat = nat
bool ⊓ bool = bool
t₁ ⊓ ✭ = t₁
✭ ⊓ t₂ = t₂
(t₁ ⇒ t₂) ⊓ (t₃ ⇒ t₄) = (t₁ ⊓ t₃) ⇒ (t₂ ⊓ t₄)
_ ⊓ _ = err
-- | Typed Terms
data Term {n} (Γ : Ctx n) : GType → Set where
Tx : ∀ {t} (v : Fin n) → t ≡ lookup v Γ → Term Γ t
Tn : ℕ → Term Γ nat
Tb : Bool → Term Γ bool
Tdy : Term Γ ✭
_T∙_ : ∀ {t₁ t₂} → Term Γ t₁ → Term Γ t₂ → t₂ ~ (~dom t₁) → Term Γ (~cod t₁)
_T⊕_ : ∀ {t₁ t₂} → Term Γ t₁ → Term Γ t₂ → (t₁ ~ nat) → (t₂ ~ nat) → Term Γ (t₁ ⊓ t₂)
Tif : ∀ {t₁ t₂ t₃} → Term Γ t₁ → Term Γ t₂ → Term Γ t₃ → (t₁ ~ bool) → Term Γ (t₂ ⊓ t₃)
Tlam : ∀ t₁ {t₂} → Term (t₁ ∷ Γ) t₂ → Term Γ (t₁ ⇒ t₂)
erase : ∀ {n} {Γ : Ctx n} {t} → Term Γ t → Expr
erase (Tx v x) = var (toℕ v)
erase (Tn x) = litNat x
erase (Tb x) = litBool x
erase Tdy = dyn
erase ((term T∙ term₁) _) = (erase term) ∙ (erase term₁)
erase ((term T⊕ term₁) _ _) = (erase term) ⊕ (erase term₁)
erase (Tif b tt ff _) = if erase b thn erase tt els erase ff
erase (Tlam t₁ term) = lam t₁ (erase term)
data Fromℕ (n : ℕ) : ℕ → Set where
yes : (m : Fin n) → Fromℕ n (toℕ m)
no : (m : ℕ) → Fromℕ n (n + m)
fromℕ : ∀ n m → Fromℕ n m
fromℕ zero m = no m
fromℕ (suc n) zero = yes zero
fromℕ (suc n) (suc m) with fromℕ n m
fromℕ (suc n) (suc .(toℕ m)) | yes m = yes (suc m)
fromℕ (suc n) (suc .(n + m)) | no m = no m
data Check {n} (Γ : Ctx n) : Expr → Set where
yes : (τ : GType) (t : Term Γ τ) → Check Γ (erase t)
no : {e : Expr} → Check Γ e
staticCheck : ∀ {n} (Γ : Ctx n) (t : Expr) → Check Γ t
-- | primitives
staticCheck Γ (litNat x) = yes nat (Tn x)
staticCheck Γ (litBool x) = yes bool (Tb x)
staticCheck {n} Γ dyn = yes ✭ Tdy
staticCheck Γ err = no
-- | var lookup
staticCheck {n} Γ (var v) with fromℕ n v
staticCheck {n} Γ (var .(toℕ m)) | yes m = yes (lookup m Γ) (Tx m refl)
staticCheck {n} Γ (var .(n + m)) | no m = no
-- | lambda abstraction
staticCheck Γ (lam x t) with staticCheck (x ∷ Γ) t
staticCheck Γ (lam x .(erase t)) | yes τ t = yes (x ⇒ τ) (Tlam x t) -- double check this
staticCheck Γ (lam x t) | no = no
-- | application
staticCheck Γ (t₁ ∙ t₂) with staticCheck Γ t₁ | staticCheck Γ t₂
staticCheck Γ (.(erase t₁) ∙ .(erase t)) | yes (τ₁ ⇒ τ₂) t₁ | (yes τ t) = yes τ₂ ((t₁ T∙ t) cons)
staticCheck Γ (.(erase t₁) ∙ .(erase t)) | yes _ t₁ | (yes τ t) = no -- not sure about this
staticCheck Γ (t₁ ∙ t₂) | _ | _ = no
-- | addition
staticCheck Γ (t₁ ⊕ t₂) with staticCheck Γ t₁ | staticCheck Γ t₂
staticCheck Γ (.(erase t₁) ⊕ .(erase t)) | yes nat t₁ | (yes nat t) = yes nat ((t₁ T⊕ t) cons cons)
staticCheck Γ (.(erase t₁) ⊕ .(erase t)) | yes ✭ t₁ | (yes nat t) = yes (✭ ⊓ nat) ((t₁ T⊕ t) cons cons)
staticCheck Γ (.(erase t₁) ⊕ .(erase t)) | yes nat t₁ | (yes ✭ t) = yes (nat ⊓ ✭) ((t₁ T⊕ t) cons cons)
staticCheck Γ (.(erase t₁) ⊕ .(erase t)) | yes ✭ t₁ | (yes ✭ t) = yes ✭ ((t₁ T⊕ t) cons cons)
staticCheck Γ (t₁ ⊕ t₂) | _ | _ = no
-- | if ... then ... else
staticCheck Γ (if t thn t₁ els t₂) with staticCheck Γ t
staticCheck Γ (if .(erase t) thn t₁ els t₂) | yes bool t with staticCheck Γ t₁ | staticCheck Γ t₂
staticCheck Γ (if .(erase t₂) thn .(erase t₁) els .(erase t)) | yes bool t₂ | (yes τ₁ t₁) | (yes τ₂ t) = yes (τ₁ ⊓ τ₂) (Tif t₂ t₁ t cons)
staticCheck Γ (if .(erase t₁) thn .(erase t) els t₂) | yes bool t₁ | (yes τ t) | no = no
staticCheck Γ (if .(erase t₂) thn t₁ els .(erase t)) | yes bool t₂ | no | (yes τ t) = no
staticCheck Γ (if .(erase t) thn t₁ els t₂) | yes bool t | no | _ = no
staticCheck Γ (if .(erase t) thn t₁ els t₂) | yes ✭ t with staticCheck Γ t₁ | staticCheck Γ t₂
staticCheck Γ (if .(erase t₂) thn .(erase t₁) els .(erase t)) | yes ✭ t₂ | (yes τ₁ t₁) | (yes τ₂ t) = yes (τ₁ ⊓ τ₂) (Tif t₂ t₁ t cons)
staticCheck Γ (if .(erase t₁) thn .(erase t) els t₂) | yes ✭ t₁ | (yes τ t) | no = no
staticCheck Γ (if .(erase t₂) thn t₁ els .(erase t)) | yes ✭ t₂ | no | (yes τ t) = no
staticCheck Γ (if .(erase t) thn t₁ els t₂) | yes ✭ t | no | no = no
staticCheck Γ (if .(erase t) thn t₁ els t₂) | yes _ t = no
staticCheck Γ (if t thn t₁ els t₂) | no = no
extractType : ∀ {n} {Γ : Ctx n} {t : Expr} → Check Γ t → GType
extractType (yes τ t) = τ
extractType no = err
-- Type Precision
data _⊑_ : GType → GType → Set where
n⊑✭ : nat ⊑ ✭
b⊑✭ : bool ⊑ ✭
⇒⊑ : ∀ (t₁ t₂ : GType) → (t₁ ⇒ t₂) ⊑ ✭
n⊑n : nat ⊑ nat
b⊑b : bool ⊑ bool
✭⊑✭ : ✭ ⊑ ✭
app⊑ : ∀ (t₁ t₂ t₃ t₄ : GType) → t₁ ⊑ t₃ → t₂ ⊑ t₄ → (t₁ ⇒ t₃) ⊑ (t₃ ⇒ t₄)
-- Term Precision
data _≤_ : Expr → Expr → Set where
n≤n : ∀ {n} → litNat n ≤ litNat n
b≤b : ∀ {b} → litBool b ≤ litBool b
n≤✭ : ∀ {n} → litNat n ≤ dyn
b≤✭ : ∀ {b} → litBool b ≤ dyn
d≤d : dyn ≤ dyn
ssG : ∀ {n} {Γ : Ctx n} {e₁ e₂ : Expr} → e₁ ≤ e₂ → extractType (staticCheck Γ e₁) ⊑ extractType (staticCheck Γ e₂)
ssG n≤n = n⊑n
ssG b≤b = b⊑b
ssG n≤✭ = n⊑✭
ssG b≤✭ = b⊑✭
ssG d≤d = ✭⊑✭
|
Suppose $f$ is holomorphic on the ball $B(z_0, r)$. Then the radius of convergence of the power series expansion of $f$ at $z_0$ is at least $r$. Moreover, the power series expansion of $f$ at $z_0$ converges to $f$ on the ball $B(z_0, r)$.
|
// Copyright 2018 Jeremy Mason
//
// Licensed under the Apache License, Version 2.0, <LICENSE-APACHE or
// http://apache.org/licenses/LICENSE-2.0> or the MIT license <LICENSE-MIT or
// http://opensource.org/licenses/MIT>, at your option. This file may not be
// copied, modified, or distributed except according to those terms.
//! \file vector.c
//! Contains functions to manipulate vectors. Intended as a relatively less
//! painful wrapper around Level 1 CBLAS.
#include <cblas.h> // daxpy
#include <float.h> // DBL_EPSILON
#include <math.h> // exp
#include <stdbool.h> // bool
#include <stddef.h> // size_t
#include <stdio.h> // EOF
#include <stdlib.h> // abort
#include <string.h> // memcpy
#include "sb_matrix.h" // sb_mat_is_finite
#include "sb_structs.h" // sb_mat
#include "sb_utility.h" // SB_CHK_ERR
#include "sb_vector.h"
#include "safety.h"
/// Constructs a vector with the required capacity.
///
/// # Parameters
/// - `n_elem`: capacity of the vector
/// - `layout`: `c` for a column vector, `r` for a row vector
///
/// # Returns
/// A `sb_vec` pointer to the allocated vector, or `NULL` if the allocation fails
///
/// # Performance
/// The following preprocessor definitions (usually in `safety.h`) enable
/// various safety checks:
/// - `SAFE_LAYOUT`: `layout` is `c` or `r`
///
/// # Examples
/// ```
/// #include "sb_structs.h"
/// #include "sb_vector.h"
///
/// int main(void) {
/// sb_vec * v = sb_vec_malloc(3, 'r');
///
/// // Fill the vector with some values and print
/// double a[] = {1., 4., 2.};
/// sb_vec_subcpy(v, 0, a, 3);
/// sb_vec_print(v, "v: ", "%g");
///
/// SB_VEC_FREE_ALL(v);
/// }
/// ```
sb_vec * sb_vec_malloc(size_t n_elem, char layout) {
#ifdef SAFE_LAYOUT
SB_CHK_ERR(layout != 'c' && layout != 'r', abort(),
"sb_vec_malloc: layout must be 'c' or 'r'");
#endif
sb_vec * out = malloc(sizeof(sb_vec));
SB_CHK_ERR(!out, return NULL, "sb_vec_malloc: failed to allocate vector");
double * data = malloc(n_elem * sizeof(double));
SB_CHK_ERR(!data, free(out); return NULL, "sb_vec_malloc: failed to allocate data");
out->n_elem = n_elem;
out->data = data;
out->layout = layout;
return out;
}
/// Constructs a vector with the required capacity and initializes all elements
/// to zero. Requires support for the IEC 60559 standard.
///
/// # Parameters
/// - `n_elem`: capacity of the vector
/// - `layout`: `c` for a column vector, `r` for a row vector
///
/// # Returns
/// A `sb_vec` pointer to the allocated vector, or `NULL` if the allocation fails
///
/// # Performance
/// The following preprocessor definitions (usually in `safety.h`) enable
/// various safety checks:
/// - `SAFE_LAYOUT`: `layout` is `c` or `r`
///
/// # Examples
/// ```
/// #include "sb_structs.h"
/// #include "sb_vector.h"
///
/// int main(void) {
/// sb_vec * v = sb_vec_calloc(3, 'r');
///
/// // Initialized to zeros
/// sb_vec_print(v, "v before: ", "%g");
///
/// // Fill the vector with some values and print
/// double a[] = {1., 4., 2.};
/// sb_vec_subcpy(v, 0, a, 3);
/// sb_vec_print(v, "v after: ", "%g");
///
/// SB_VEC_FREE_ALL(v);
/// }
/// ```
sb_vec * sb_vec_calloc(size_t n_elem, char layout) {
#ifdef SAFE_LAYOUT
SB_CHK_ERR(layout != 'c' && layout != 'r', abort(),
"sb_vec_calloc: layout must be 'c' or 'r'");
#endif
sb_vec * out = malloc(sizeof(sb_vec));
SB_CHK_ERR(!out, return NULL, "sb_vec_calloc: failed to allocate vector");
double * data = calloc(n_elem, sizeof(double));
SB_CHK_ERR(!data, free(out); return NULL, "sb_vec_calloc: failed to allocate data");
out->n_elem = n_elem;
out->data = data;
out->layout = layout;
return out;
}
/// Constructs a vector with the required capacity and initializes elements to
/// the first `n_elem` elements of array `a`. The array must contain at least
/// `n_elem` elements.
///
/// # Parameters
/// - `a`: array to be copied into the vector
/// - `n_elem`: capacity of the vector
/// - `layout`: `c` for a column vector, `r` for a row vector
///
/// # Returns
/// A `sb_vec` pointer to the allocated vector, or `NULL` if the allocation fails
///
/// # Performance
/// The following preprocessor definitions (usually in `safety.h`) enable
/// various safety checks:
/// - `SAFE_MEMORY`: `a` is not `NULL`
/// - `SAFE_LAYOUT`: `layout` is `c` or `r`
///
/// # Examples
/// ```
/// #include "sb_structs.h"
/// #include "sb_vector.h"
///
/// int main(void) {
/// double a[] = {1., 4., 2.};
///
/// // Construct a vector from the array
/// sb_vec * v = sb_vec_of_arr(a, 3, 'r');
/// sb_vec_print(v, "v: ", "%g");
///
/// SB_VEC_FREE_ALL(v);
/// }
/// ```
sb_vec * sb_vec_of_arr(const double * a, size_t n_elem, char layout) {
#ifdef SAFE_MEMORY
SB_CHK_ERR(!a, abort(), "sb_vec_of_arr: a cannot be NULL");
#endif
#ifdef SAFE_LAYOUT
SB_CHK_ERR(layout != 'c' && layout != 'r', abort(),
"sb_vec_of_arr: layout must be 'c' or 'r'");
#endif
sb_vec * out = malloc(sizeof(sb_vec));
SB_CHK_ERR(!out, return NULL, "sb_vec_of_arr: failed to allocate vector");
double * data = malloc(n_elem * sizeof(double));
SB_CHK_ERR(!data, free(out); return NULL, "sb_vec_of_arr: failed to allocate data");
out->n_elem = n_elem;
out->data = memcpy(data, a, n_elem * sizeof(double));
out->layout = layout;
return out;
}
/// Constructs a vector as a deep copy of an existing vector. The state of the
/// existing vector must be valid.
///
/// # Parameters
/// - `v`: pointer to the vector to be copied
///
/// # Returns
/// A `sb_vec` pointer to the allocated vector, or `NULL` if the allocation fails
///
/// # Performance
/// The following preprocessor definitions (usually in `safety.h`) enable
/// various safety checks:
/// - `SAFE_MEMORY`: `v` is not `NULL`
///
/// # Examples
/// ```
/// #include "sb_structs.h"
/// #include "sb_vector.h"
///
/// int main(void) {
/// double a[] = {1., 4., 2., 8., 5., 7.};
/// sb_vec * v = sb_vec_of_arr(a, 3, 'r');
///
/// // `v` and `w` contain the same elements
/// sb_vec * w = sb_vec_clone(v);
/// sb_vec_print(w, "w before: ", "%g");
///
/// // Fill `w` with some values and print
/// sb_vec_subcpy(w, 0, a + 3, 3);
/// sb_vec_print(w, "w after: ", "%g");
///
/// // `v` is unchanged
/// sb_vec_print(v, "v after: ", "%g");
///
/// SB_VEC_FREE_ALL(v, w);
/// }
/// ```
sb_vec * sb_vec_clone(const sb_vec * v) {
#ifdef SAFE_MEMORY
SB_CHK_ERR(!v, abort(), "sb_vec_clone: v cannot be NULL");
#endif
sb_vec * out = malloc(sizeof(sb_vec));
SB_CHK_ERR(!out, return NULL, "sb_vec_clone: failed to allocate vector");
size_t n_elem = v->n_elem;
double * data = malloc(n_elem * sizeof(double));
SB_CHK_ERR(!data, free(out); return NULL, "sb_vec_clone: failed to allocate data");
*out = *v;
out->data = memcpy(data, v->data, n_elem * sizeof(double));
return out;
}
/// Constructs a column vector containing `n_elem` elements in equal intervals
/// from `begin` to `end`. Must contain at least one element.
///
/// # Parameters
/// - `begin`: beginning of the interval
/// - `end`: end of the interval
/// - `step`: step within the interval
///
/// # Returns
/// A `sb_vec` pointer to the allocated vector, or `NULL` if the allocation fails
///
/// # Performance
/// The following preprocessor definitions (usually in `safety.h`) enable
/// various safety checks:
/// - `SAFE_LENGTH`: `v` contains at least two elements
/// - `SAFE_MEMORY`: `v` is not `NULL`
///
/// # Examples
/// ```
/// #include "sb_structs.h"
/// #include "sb_vector.h"
///
/// int main(void) {
/// sb_vec * v = sb_vec_linear(4., 0., 5);
/// sb_vec_print(v, "v: ", "%g");
/// SB_VEC_FREE_ALL(v);
/// }
/// ```
sb_vec * sb_vec_linear(double begin, double end, size_t n_elem) {
#ifdef SAFE_LENGTH
SB_CHK_ERR(n_elem < 2, abort(), "sb_vec_range: must contain at least two elements");
#endif
sb_vec * out = malloc(sizeof(sb_vec));
SB_CHK_ERR(!out, return NULL, "sb_vec_range: failed to allocate sb_vector");
double * data = malloc(n_elem * sizeof(double));
SB_CHK_ERR(!data, free(out); return NULL, "sb_vec_range: failed to allocate data");
size_t n_elem_1 = n_elem - 1;
double step = (end - begin) / n_elem_1;
for (size_t a = 0; a < n_elem_1; ++a) {
data[a] = fma(a, step, begin);
}
data[n_elem_1] = end;
out->n_elem = n_elem;
out->data = data;
out->layout = 'c';
return out;
}
/// Deconstructs a vector.
///
/// # Parameters
/// - `v`: pointer to the vector
///
/// # Returns
/// No return value
///
/// # Performance
/// The following preprocessor definitions (usually in `safety.h`) enable
/// various safety checks:
/// - `SAFE_MEMORY`: `v` is not `NULL`
///
/// # Examples
/// ```
/// #include "sb_structs.h"
/// #include "sb_vector.h"
///
/// int main(void) {
/// double a[] = {1., 4., 2.};
///
/// // Allocates a pointer to vec
/// sb_vec * v = sb_vec_of_arr(a, 3, 'r');
/// sb_vec_print(v, "v: ", "%g");
///
/// sb_vec_free(v);
/// // Pointer to `v` is now invalid
/// }
/// ```
void sb_vec_free(sb_vec * v) {
#ifdef SAFE_MEMORY
SB_CHK_ERR(!v, abort(), "sb_vec_free: v cannot be NULL");
#endif
free(v->data);
free(v);
}
/// Sets all elements of `v` to zero. Requires support for the IEC 60559
/// standard.
///
/// # Parameters
/// - `v`: pointer to the vector
///
/// # Returns
/// A copy of `v`
///
/// # Performance
/// The following preprocessor definitions (usually in `safety.h`) enable
/// various safety checks:
/// - `SAFE_MEMORY`: `v` is not `NULL`
///
/// # Examples
/// ```
/// #include "sb_structs.h"
/// #include "sb_vector.h"
///
/// int main(void) {
/// double a[] = {1., 4., 2.};
/// sb_vec * v = sb_vec_of_arr(a, 3, 'r');
///
/// // Set all elements to zero
/// sb_vec_print(v, "v before: ", "%g");
/// sb_vec_set_zero(v);
/// sb_vec_print(v, "v after: ", "%g");
///
/// SB_VEC_FREE_ALL(v);
/// }
/// ```
sb_vec * sb_vec_set_zero(sb_vec * v) {
#ifdef SAFE_MEMORY
SB_CHK_ERR(!v, abort(), "sb_vec_set_zero: v cannot be NULL");
#endif
return memset(v->data, 0, v->n_elem * sizeof(double));
}
/// Sets all elements of `v` to `x`.
///
/// # Parameters
/// - `v`: pointer to the vector
/// - `x`: value for the elements
///
/// # Returns
/// A copy of `v`
///
/// # Performance
/// The following preprocessor definitions (usually in `safety.h`) enable
/// various safety checks:
/// - `SAFE_MEMORY`: `v` is not `NULL`
///
/// # Examples
/// ```
/// #include "sb_structs.h"
/// #include "sb_vector.h"
///
/// int main(void) {
/// double a[] = {1., 4., 2.};
/// sb_vec * v = sb_vec_of_arr(a, 3, 'r');
///
/// // Set all elements to 8.
/// sb_vec_print(v, "v before: ", "%g");
/// sb_vec_set_all(v, 8.);
/// sb_vec_print(v, "v after: ", "%g");
///
/// SB_VEC_FREE_ALL(v);
/// }
/// ```
sb_vec * sb_vec_set_all(sb_vec * v, double x) {
#ifdef SAFE_MEMORY
SB_CHK_ERR(!v, abort(), "sb_vec_set_all: v cannot be NULL");
#endif
double * data = v->data;
for (size_t a = 0; a < v->n_elem; ++a) {
data[a] = x;
}
return v;
}
/// Sets all elements of `v` to zero, except for the `i`th element which is set
/// to one. Requires support for the IEC 60559 standard.
///
/// # Parameters
/// - `v`: pointer to the vector
/// - `i`: index of the element with value one
///
/// # Returns
/// A copy of `v`
///
/// # Performance
/// The following preprocessor definitions (usually in `safety.h`) enable
/// various safety checks:
/// - `SAFE_MEMORY`: `v` is not `NULL`
/// - `SAFE_LENGTH`: `i` is a valid index
///
/// # Examples
/// ```
/// #include "sb_structs.h"
/// #include "sb_vector.h"
///
/// int main(void) {
/// double a[] = {1., 4., 2.};
/// sb_vec * v = sb_vec_of_arr(a, 3, 'r');
///
/// // Set `v` to second basis vector
/// sb_vec_print(v, "v before: ", "%g");
/// sb_vec_set_basis(v, 1);
/// sb_vec_print(v, "v after: ", "%g");
///
/// SB_VEC_FREE_ALL(v);
/// }
/// ```
sb_vec * sb_vec_set_basis(sb_vec * v, size_t i) {
#ifdef SAFE_MEMORY
SB_CHK_ERR(!v, abort(), "sb_vec_set_basis: v cannot be NULL");
#endif
#ifdef SAFE_LENGTH
SB_CHK_ERR(i >= v->n_elem, abort(), "sb_vec_set_basis: index out of bounds");
#endif
((double *) memset(v->data, 0, v->n_elem * sizeof(double)))[i] = 1.;
return v;
}
/// Copies contents of the `src` vector into the `dest` vector. `src` and `dest`
/// must have the same length, the same layout, and not overlap in memory.
///
/// # Parameters
/// - `dest`: pointer to destination vector
/// - `src`: const pointer to source vector
///
/// # Returns
/// A copy of `dest`
///
/// # Performance
/// The following preprocessor definitions (usually in `safety.h`) enable
/// various safety checks:
/// - `SAFE_MEMORY`: `src` and `dest` are not `NULL`
/// - `SAFE_LAYOUT`: `src` and `dest` have same layout
/// - `SAFE_LENGTH`: `src` and `dest` have same length
///
/// # Examples
/// ```
/// #include "sb_structs.h"
/// #include "sb_vector.h"
///
/// int main(void) {
/// double a[] = {1., 4., 2., 8., 5., 7.};
/// sb_vec * v = sb_vec_of_arr(a, 3, 'r');
/// sb_vec * w = sb_vec_of_arr(a + 3, 3, 'r');
///
/// // Overwrite elements of `v` and print
/// sb_vec_print(v, "v before: ", "%g");
/// sb_vec_print(w, "w before: ", "%g");
/// sb_vec_memcpy(v, w);
/// sb_vec_print(v, "v after: ", "%g");
/// sb_vec_print(w, "w after: ", "%g");
///
/// SB_VEC_FREE_ALL(v, w);
/// }
/// ```
sb_vec * sb_vec_memcpy(sb_vec * restrict dest, const sb_vec * restrict src) {
#ifdef SAFE_MEMORY
SB_CHK_ERR(!dest, abort(), "sb_vec_memcpy: dest cannot be NULL");
SB_CHK_ERR(!src, abort(), "sb_vec_memcpy: src cannot be NULL");
#endif
#ifdef SAFE_LAYOUT
SB_CHK_ERR(dest->layout != src->layout, abort(),
"sb_vec_memcpy: dest and src must have same layout");
#endif
#ifdef SAFE_LENGTH
SB_CHK_ERR(dest->n_elem != src->n_elem, abort(),
"sb_vec_memcpy: dest and src must have same length");
#endif
memcpy(dest->data, src->data, src->n_elem * sizeof(double));
return dest;
}
/// Copies `n` elements of array `a` into vector `v` starting at index `i`. `v`
/// must have enough capacity, `a` must contain at least `n` elements, and `v`
/// and `a` must not overlap in memory.
///
/// # Parameters
/// - `v`: pointer to destination vector
/// - `i`: index of `v` where the copy will start
/// - `a`: pointer to elements that will be copied
/// - `n`: number of elements to copy
///
/// # Returns
/// A copy of `v`
///
/// # Performance
/// The following preprocessor definitions (usually in `safety.h`) enable
/// various safety checks:
/// - `SAFE_MEMORY`: `v` and `a` are not `NULL`
/// - `SAFE_LENGTH`: `v` has enough capacity
///
/// # Examples
/// ```
/// #include "sb_structs.h"
/// #include "sb_vector.h"
///
/// int main(void) {
/// double a[] = {1., 4., 2., 8., 5.};
/// sb_vec * v = sb_vec_of_arr(a, 3, 'r');
///
/// // Overwrite elements of `v` and print
/// sb_vec_print(v, "v before: ", "%g");
/// sb_vec_subcpy(v, 1, a + 3, 2);
/// sb_vec_print(v, "v after: ", "%g");
///
/// SB_VEC_FREE_ALL(v);
/// }
/// ```
sb_vec * sb_vec_subcpy(
sb_vec * restrict v,
size_t i,
const double * restrict a,
size_t n) {
#ifdef SAFE_MEMORY
SB_CHK_ERR(!v, abort(), "sb_vec_subcpy: v cannot be NULL");
SB_CHK_ERR(!a, abort(), "sb_vec_subcpy: a cannot be NULL");
#endif
#ifdef SAFE_LENGTH
SB_CHK_ERR(v->n_elem - i < n, abort(),
"sb_vec_subcpy: v does not have enough capacity");
#endif
memcpy(v->data + i, a, n * sizeof(double));
return v;
}
/// Swaps the contents of `v` and `w` by exchanging data pointers. Vectors must
/// have the same length and layout and not overlap in memory.
///
/// # Parameters
/// - `v`: pointer to the first vector
/// - `w`: pointer to the second vector
///
/// # Returns
/// No return value
///
/// # Performance
/// The following preprocessor definitions (usually in `safety.h`) enable
/// various safety checks:
/// - `SAFE_MEMORY`: `v` and `w` are not `NULL`
/// - `SAFE_LAYOUT`: `v` and `w` have the same layout
/// - `SAFE_LENGTH`: `v` and `w` have the same length
///
/// # Examples
/// ```
/// #include "sb_structs.h"
/// #include "sb_vector.h"
///
/// int main(void) {
/// double a[] = {1., 4., 2., 8., 5., 7.};
/// sb_vec * v = sb_vec_of_arr(a, 3, 'r');
/// sb_vec * w = sb_vec_of_arr(a + 3, 3, 'r');
///
/// // Print vectors before and after
/// sb_vec_print(v, "v before: ", "%g");
/// sb_vec_print(w, "w before: ", "%g");
///
/// sb_vec_swap(v, w);
///
/// sb_vec_print(v, "v after: ", "%g");
/// sb_vec_print(w, "w after: ", "%g");
///
/// SB_VEC_FREE_ALL(v, w);
/// }
/// ```
void sb_vec_swap(sb_vec * restrict v, sb_vec * restrict w) {
#ifdef SAFE_MEMORY
SB_CHK_ERR(!v, abort(), "sb_vec_swap: v cannot be NULL");
SB_CHK_ERR(!w, abort(), "sb_vec_swap: w cannot be NULL");
#endif
#ifdef SAFE_LAYOUT
SB_CHK_ERR(v->layout != w->layout, abort(),
"sb_vec_swap: v and w must have same layout");
#endif
#ifdef SAFE_LENGTH
SB_CHK_ERR(v->n_elem != w->n_elem, abort(),
"sb_vec_swap: v and w must have same length");
#endif
double * scratch;
SB_SWAP(v->data, w->data, scratch);
}
/// Swaps the `i`th and `j`th elements of a vector.
///
/// # Parameters
/// - `v`: pointer to the vector
/// - `i`: index of first element
/// - `j`: index of second element
///
/// # Returns
/// No return value
///
/// # Performance
/// The following preprocessor definitions (usually in `safety.h`) enable
/// various safety checks:
/// - `SAFE_MEMORY`: `v` is not `NULL`
/// - `SAFE_LENGTH`: `i` and `j` are valid indices
///
/// # Examples
/// ```
/// #include "sb_structs.h"
/// #include "sb_vector.h"
///
/// int main(void) {
/// double a[] = {1., 4., 2.};
/// sb_vec * v = sb_vec_of_arr(a, 3, 'r');
///
/// // Print vector before and after
/// sb_vec_print(v, "v before: ", "%g");
/// sb_vec_swap_elems(v, 0, 1);
/// sb_vec_print(v, "v after: ", "%g");
///
/// SB_VEC_FREE_ALL(v);
/// }
/// ```
void sb_vec_swap_elems(sb_vec * v, size_t i, size_t j) {
#ifdef SAFE_MEMORY
SB_CHK_ERR(!v, abort(), "sb_vec_swap_elems: v cannot be NULL");
#endif
#ifdef SAFE_LENGTH
SB_CHK_ERR(i >= v->n_elem, abort(), "sb_vec_swap_elems: index out of bounds");
SB_CHK_ERR(j >= v->n_elem, abort(), "sb_vec_swap_elems: index out of bounds");
#endif
double * data = v->data;
double scratch;
SB_SWAP(data[i], data[j], scratch);
}
/// Writes the vector `v` to `stream` in a binary format. The data is written
/// in the native binary format of the architecture, and may not be portable.
///
/// # Parameters
/// - `stream`: an open I/O stream
/// - `v`: pointer to the vector
///
/// # Returns
/// `0` on success, or `1` if the write fails
///
/// # Performance
/// The following preprocessor definitions (usually in `safety.h`) enable
/// various safety checks:
/// - `SAFE_MEMORY`: `v` is not `NULL`
///
/// # Examples
/// ```
/// #include <stdio.h>
/// #include "sb_structs.h"
/// #include "sb_vector.h"
///
/// int main(void) {
/// double a[] = {1., 4., 2.};
/// sb_vec * v = sb_vec_of_arr(a, 3, 'r');
///
/// // Write the vector to file
/// FILE * f = fopen("vector.bin", "wb");
/// sb_vec_fwrite(f, v);
/// fclose(f);
///
/// // Read the vector from file
/// FILE * g = fopen("vector.bin", "rb");
/// sb_vec * w = sb_vec_fread(g);
/// fclose(g);
///
/// // Vectors have the same contents
/// sb_vec_print(v, "v: ", "%g");
/// sb_vec_print(w, "w: ", "%g");
///
/// SB_VEC_FREE_ALL(v, w);
/// }
/// ```
int sb_vec_fwrite(FILE * stream, const sb_vec * v) {
#ifdef SAFE_MEMORY
SB_CHK_ERR(!v, abort(), "sb_vec_fwrite: v cannot be NULL");
#endif
size_t n_write;
size_t n_elem = v->n_elem;
n_write = fwrite(&n_elem, sizeof(size_t), 1, stream);
SB_CHK_ERR(n_write != 1, return 1, "sb_vec_fwrite: fwrite failed");
n_write = fwrite(&(v->layout), sizeof(char), 1, stream);
SB_CHK_ERR(n_write != 1, return 1, "sb_vec_fwrite: fwrite failed");
n_write = fwrite(v->data, sizeof(double), n_elem, stream);
SB_CHK_ERR(n_write != n_elem, return 1, "sb_vec_fwrite: fwrite failed");
return 0;
}
/// Reads binary data from `stream` into the vector returned by the function.
/// Writes the vector `v` to `stream`. The number of elements, layout, and
/// elements are written in a human readable format.
///
/// # Parameters
/// - `stream`: an open I/O stream
/// - `v`: pointer to the vector
/// - `format`: a format specifier for the elements
///
/// # Returns
/// `0` on success, or `1` if the write fails
///
/// # Performance
/// The following preprocessor definitions (usually in `safety.h`) enable
/// various safety checks:
/// - `SAFE_MEMORY`: `v` is not `NULL`
///
/// # Examples
/// ```
/// #include <stdio.h>
/// #include "sb_structs.h"
/// #include "sb_vector.h"
///
/// int main(void) {
/// double f[] = {1., 4., 2.};
/// sb_vec * v = sb_vec_of_arr(a, 3, 'r');
///
/// // Write the vector to file
/// FILE * f = fopen("vector.txt", "w");
/// sb_vec_fprintf(f, v, "%lg");
/// fclose(f);
///
/// // Read the sb_vector from file
/// FILE * g = fopen("vector.txt", "r");
/// sb_vec * w = sb_vec_fscanf(g);
/// fclose(g);
///
/// // Vectors have the same contents
/// sb_vec_print(v, "v: ", "%g");
/// sb_vec_print(w, "w: ", "%g");
///
/// SB_VEC_FREE_ALL(v, w);
/// }
/// ```
int sb_vec_fprintf(FILE * stream, const sb_vec * v, const char * format) {
#ifdef SAFE_MEMORY
SB_CHK_ERR(!v, abort(), "sb_vec_fprintf: v cannot be NULL");
#endif
int status;
size_t n_elem = v->n_elem;
status = fprintf(stream, "%zu %c", n_elem, v->layout);
SB_CHK_ERR(status < 0, return 1, "sb_vec_fprintf: fprintf failed");
double * data = v->data;
for (size_t a = 0; a < n_elem; ++a) {
status = putc(' ', stream);
SB_CHK_ERR(status == EOF, return 1, "sb_vec_fprintf: putc failed");
status = fprintf(stream, format, data[a]);
SB_CHK_ERR(status < 0, return 1, "sb_vec_fprintf: fprintf failed");
}
status = putc('\n', stream);
SB_CHK_ERR(status == EOF, return 1, "sb_vec_fprintf: putc failed");
return 0;
}
/// Prints the vector `v` to stdout. Output is slightly easier to read than for
/// `sb_vec_fprintf()`. Mainly indended for debugging.
///
/// # Parameters
/// - `v`: pointer to the vector
/// - `str`: a string to describe the vector
/// - `format`: a format specifier for the elements
///
/// # Returns
/// `0` on success, or `1` if the print fails
///
/// # Performance
/// The following preprocessor definitions (usually in `safety.h`) enable
/// various safety checks:
/// - `SAFE_MEMORY`: `v` is not `NULL`
///
/// # Examples
/// ```
/// #include "sb_structs.h"
/// #include "sb_vector.h"
///
/// int main(void) {
/// double array[] = {1., 4., 2., 8., 5., 7.};
/// sb_vec * v = sb_vec_of_arr(array, 3, 'r');
/// sb_vec * w = sb_vec_of_arr(array + 3, 3, 'c');
///
/// // Prints the contents of `v` and `w` to stdout
/// sb_vec_print(v, "v: ", "%g");
/// sb_vec_print(w, "w: ", "%g");
///
/// SB_VEC_FREE_ALL(v, w);
/// }
/// ```
int sb_vec_print(const sb_vec * v, const char * str, const char * format) {
#ifdef SAFE_MEMORY
SB_CHK_ERR(!v, abort(), "sb_vec_print: v cannot be NULL");
#endif
int status;
status = printf("%s\n", str);
SB_CHK_ERR(status < 0, return 1, "sb_vec_print: printf failed");
size_t n_elem = v->n_elem;
double * data = v->data;
char buffer[128];
char * dec;
bool dec_mark[n_elem];
bool any_mark = false;
unsigned char length;
unsigned char len_head[n_elem];
unsigned char max_head = 0;
unsigned char len_tail[n_elem];
unsigned char max_tail = 0;
for (size_t a = 0; a < n_elem; ++a) {
status = snprintf(buffer, 128, format, data[a]);
SB_CHK_ERR(status < 0, return 1, "sb_vec_print: snprintf failed");
dec = strchr(buffer, '.');
if (dec) {
dec_mark[a] = true;
any_mark = true;
length = (unsigned char)(dec - buffer);
len_head[a] = length;
if (length > max_head) { max_head = length; }
length = strlen(buffer) - length - 1;
if (length > max_tail) { max_tail = length; }
len_tail[a] = length;
} else {
dec_mark[a] = false;
length = strlen(buffer);
len_head[a] = length;
if (length > max_head) { max_head = length; }
len_tail[a] = 0;
}
}
for (size_t a = 0; a < n_elem; ++a) {
for (unsigned char s = 0; s < max_head - len_head[a]; ++s) {
status = putchar(' ');
SB_CHK_ERR(status == EOF, return 1, "sb_vec_print: putchar failed");
}
status = printf(format, data[a]);
SB_CHK_ERR(status < 0, return 1, "sb_vec_print: printf failed");
if (any_mark && !dec_mark[a]) {
status = putchar(' ');
SB_CHK_ERR(status == EOF, return 1, "sb_vec_print: putchar failed");
}
for (unsigned char s = 0; s < max_tail - len_tail[a]; ++s) {
status = putchar(' ');
SB_CHK_ERR(status == EOF, return 1, "sb_vec_print: putchar failed");
}
status = putchar(v->layout == 'r' ? ' ' : '\n');
SB_CHK_ERR(status == EOF, return 1, "sb_vec_print: putchar failed");
}
if (v->layout == 'r') {
status = putchar('\n');
SB_CHK_ERR(status == EOF, return 1, "sb_vec_print: putchar failed");
}
return 0;
}
/// The data must be written in the native binary format of the architecture,
/// preferably by `sb_vec_fwrite()`.
///
/// # Parameters
/// - `stream`: an open I/O stream
///
/// # Returns
/// A `sb_vec` pointer to the sb_vector read from `stream`, or `NULL` if the read or
/// memory allocation fails
///
/// # Examples
/// ```
/// #include <stdio.h>
/// #include "sb_structs.h"
/// #include "sb_vector.h"
///
/// int main(void) {
/// double a[] = {1., 4., 2.};
/// sb_vec * v = sb_vec_of_arr(a, 3, 'r');
///
/// // Write the vector to file
/// FILE * f = fopen("vector.bin", "wb");
/// sb_vec_fwrite(f, v);
/// fclose(f);
///
/// // Read the vector from file
/// FILE * g = fopen("vector.bin", "rb");
/// sb_vec * w = sb_vec_fread(g);
/// fclose(g);
///
/// // Vectors have the same contents
/// sb_vec_print(v, "v: ", "%g");
/// sb_vec_print(w, "w: ", "%g");
///
/// SB_VEC_FREE_ALL(v, w);
/// }
/// ```
sb_vec * sb_vec_fread(FILE * stream) {
size_t n_read;
size_t n_elem;
n_read = fread(&n_elem, sizeof(size_t), 1, stream);
SB_CHK_ERR(n_read != 1, return NULL, "sb_vec_fread: fread failed");
char layout;
n_read = fread(&layout, sizeof(char), 1, stream);
SB_CHK_ERR(n_read != 1, return NULL, "sb_vec_fread: fread failed");
sb_vec * out = sb_vec_malloc(n_elem, layout);
SB_CHK_ERR(!out, return NULL, "sb_vec_fread: sb_vec_malloc failed");
n_read = fread(out->data, sizeof(double), n_elem, stream);
SB_CHK_ERR(n_read != n_elem, sb_vec_free(out); return NULL, "sb_vec_fread: fread failed");
return out;
}
/// Reads formatted data from `stream` into the vector returned by the function.
///
/// # Parameters
/// - `stream`: an open I/O stream
///
/// # Returns
/// A `sb_vec` pointer to the vector read from `stream`, or `NULL` if the scan or
/// memory allocation fails
///
/// # Examples
/// ```
/// #include <stdio.h>
/// #include "sb_structs.h"
/// #include "sb_vector.h"
///
/// int main(void) {
/// double a[] = {1., 4., 2.};
/// sb_vec * v = sb_vec_of_arr(a, 3, 'r');
///
/// // Write the vector to file
/// FILE * f = fopen("vector.txt", "w");
/// sb_vec_fprintf(f, v, "%lg");
/// fclose(f);
///
/// // Read the vector from file
/// FILE * g = fopen("vector.txt", "r");
/// sb_vec * w = sb_vec_fscanf(g);
/// fclose(g);
///
/// // Vectors have the same contents
/// sb_vec_print(v, "v: ", "%g");
/// sb_vec_print(w, "w: ", "%g");
///
/// SB_VEC_FREE_ALL(v, w);
/// }
/// ```
sb_vec * sb_vec_fscanf(FILE * stream) {
int n_scan;
size_t n_elem;
char layout;
n_scan = fscanf(stream, "%zu %c", &n_elem, &layout);
SB_CHK_ERR(n_scan != 2, return NULL, "sb_vec_fscanf: fscanf failed");
sb_vec * out = sb_vec_malloc(n_elem, layout);
SB_CHK_ERR(!out, return NULL, "sb_vec_fscanf: sb_vec_malloc failed");
double * data = out->data;
for (size_t a = 0; a < n_elem; ++a) {
n_scan = fscanf(stream, "%lg", data + a);
SB_CHK_ERR(n_scan != 1, sb_vec_free(out); return NULL, "sb_vec_fscanf: fscanf failed");
}
return out;
}
/// Takes the absolute value of every element of the vector.
///
/// # Parameters
/// - `v`: pointer to the vector
///
/// # Returns
/// A copy of `v`
///
/// # Performance
/// The following preprocessor definitions (usually in `safety.h`) enable
/// various safety checks:
/// - `SAFE_MEMORY`: `v` is not `NULL`
/// - `SAFE_FINITE`: elements of `v` are finite
///
/// # Examples
/// ```
/// #include "sb_structs.h"
/// #include "sb_vector.h"
///
/// int main(void) {
/// double a[] = {-1., 4., -2.};
/// sb_vec * v = sb_vec_of_arr(a, 3, 'r');
///
/// // Take absolute value
/// sb_vec_print(v, "v before: ", "%g");
/// sb_vec_abs(v);
/// sb_vec_print(v, "v after: ", "%g");
///
/// SB_VEC_FREE_ALL(v);
/// }
/// ```
sb_vec * sb_vec_abs(sb_vec * v) {
#ifdef SAFE_MEMORY
SB_CHK_ERR(!v, abort(), "sb_vec_abs: v cannot be NULL");
#endif
double * data = v->data;
for (size_t a = 0; a < v->n_elem; ++a) {
data[a] = fabs(data[a]);
}
#ifdef SAFE_FINITE
SB_CHK_ERR(!sb_vec_is_finite(v), abort(), "sb_vec_abs: element not finite");
#endif
return v;
}
/// Takes the exponent base `e` of every element of the vector.
///
/// # Parameters
/// - `v`: pointer to the vector
///
/// # Returns
/// A copy of `v`
///
/// # Performance
/// The following preprocessor definitions (usually in `safety.h`) enable
/// various safety checks:
/// - `SAFE_MEMORY`: `v` is not `NULL`
/// - `SAFE_FINITE`: elements of `v` are finite
///
/// # Examples
/// ```
/// #include <math.h>
/// #include "sb_structs.h"
/// #include "sb_vector.h"
///
/// int main(void) {
/// double a[] = {log(1.), log(4.), log(2.)};
/// sb_vec * v = sb_vec_of_arr(a, 3, 'r');
///
/// // Take exponent base e
/// sb_vec_print(v, "v before: ", "%g");
/// sb_vec_exp(v);
/// sb_vec_print(v, "v after: ", "%g");
///
/// SB_VEC_FREE_ALL(v);
/// }
/// ```
sb_vec * sb_vec_exp(sb_vec * v) {
#ifdef SAFE_MEMORY
SB_CHK_ERR(!v, abort(), "sb_vec_exp: v cannot be NULL");
#endif
double * data = v->data;
for (size_t a = 0; a < v->n_elem; ++a) {
data[a] = exp(data[a]);
}
#ifdef SAFE_FINITE
SB_CHK_ERR(!sb_vec_is_finite(v), abort(), "sb_vec_exp: element not finite");
#endif
return v;
}
/// Takes the logarithm base `e` of every element of the vector.
///
/// # Parameters
/// - `v`: pointer to the vector
///
/// # Returns
/// A copy of `v`
///
/// # Performance
/// The following preprocessor definitions (usually in `safety.h`) enable
/// various safety checks:
/// - `SAFE_MEMORY`: `v` is not `NULL`
/// - `SAFE_FINITE`: elements of `v` are finite
///
/// # Examples
/// ```
/// #include <math.h>
/// #include "sb_structs.h"
/// #include "sb_vector.h"
///
/// int main(void) {
/// double a[] = {exp(1.), exp(4.), exp(2.)};
/// sb_vec * v = sb_vec_of_arr(a, 3, 'r');
///
/// // Take logarithm base e
/// sb_vec_print(v, "v before: ", "%g");
/// sb_vec_log(v);
/// sb_vec_print(v, "v after: ", "%g");
///
/// SB_VEC_FREE_ALL(v);
/// }
/// ```
sb_vec * sb_vec_log(sb_vec * v) {
#ifdef SAFE_MEMORY
SB_CHK_ERR(!v, abort(), "sb_vec_log: v cannot be NULL");
#endif
double * data = v->data;
for (size_t a = 0; a < v->n_elem; ++a) {
data[a] = log(data[a]);
}
#ifdef SAFE_FINITE
SB_CHK_ERR(!sb_vec_is_finite(v), abort(), "sb_vec_log: element not finite");
#endif
return v;
}
/// Exponentiates every element of the vector `v` by `x`.
///
/// # Parameters
/// - `v`: pointer to the vector
/// - `x`: scalar exponent of the elements
///
/// # Returns
/// A copy of `v`
///
/// # Performance
/// The following preprocessor definitions (usually in `safety.h`) enable
/// various safety checks:
/// - `SAFE_MEMORY`: `v` is not `NULL`
/// - `SAFE_FINITE`: elements of `v` are finite
///
/// # Examples
/// ```
/// #include "sb_structs.h"
/// #include "sb_vector.h"
///
/// int main(void) {
/// double a[] = {1., 4., 2.};
/// sb_vec * v = sb_vec_of_arr(a, 3, 'r');
///
/// // Exponentiate every element by -1.
/// sb_vec_print(v, "v before: ", "%g");
/// sb_vec_smul(v, -1.);
/// sb_vec_print(v, "v after: ", "%g");
///
/// SB_VEC_FREE_ALL(v);
/// }
/// ```
sb_vec * sb_vec_pow(sb_vec * v, double x) {
#ifdef SAFE_MEMORY
SB_CHK_ERR(!v, abort(), "sb_vec_pow: v cannot be NULL");
#endif
double * data = v->data;
for (size_t a = 0; a < v->n_elem; ++a) {
data[a] = pow(data[a], x);
}
#ifdef SAFE_FINITE
SB_CHK_ERR(!sb_vec_is_finite(v), abort(), "sb_vec_pow: element not finite");
#endif
return v;
}
/// Takes the square root of every element of the vector `v`.
///
/// # Parameters
/// - `v`: pointer to the vector
///
/// # Returns
/// A copy of `v`
///
/// # Performance
/// The following preprocessor definitions (usually in `safety.h`) enable
/// various safety checks:
/// - `SAFE_MEMORY`: `v` is not `NULL`
/// - `SAFE_FINITE`: elements of `v` are finite
///
/// # Examples
/// ```
/// #include "sb_structs.h"
/// #include "sb_vector.h"
///
/// int main(void) {
/// double a[] = {1., 16., 4.};
/// sb_vec * v = sb_vec_of_arr(a, 3, 'r');
///
/// // Take the square root of every element
/// sb_vec_print(v, "v before: ", "%g");
/// sb_vec_sqrt(v);
/// sb_vec_print(v, "v after: ", "%g");
///
/// SB_VEC_FREE_ALL(v);
/// }
/// ```
sb_vec * sb_vec_sqrt(sb_vec * v) {
#ifdef SAFE_MEMORY
SB_CHK_ERR(!v, abort(), "sb_vec_sqrt: v cannot be NULL");
#endif
double * data = v->data;
for (size_t a = 0; a < v->n_elem; ++a) {
data[a] = sqrt(data[a]);
}
#ifdef SAFE_FINITE
SB_CHK_ERR(!sb_vec_is_finite(v), abort(), "sb_vec_sqrt: element not finite");
#endif
return v;
}
/// Scalar addition of `x` to every element of the vector `v`.
///
/// # Parameters
/// - `v`: pointer to the vector
/// - `x`: scalar added to the elements
///
/// # Returns
/// A copy of `v`
///
/// # Performance
/// The following preprocessor definitions (usually in `safety.h`) enable
/// various safety checks:
/// - `SAFE_MEMORY`: `v` is not `NULL`
/// - `SAFE_FINITE`: elements of `v` are finite
///
/// # Examples
/// ```
/// #include "sb_structs.h"
/// #include "sb_vector.h"
///
/// int main(void) {
/// double a[] = {1., 4., 2.};
/// sb_vec * v = sb_vec_of_arr(a, 3, 'r');
///
/// // Add 2. to every element
/// sb_vec_print(v, "v before: ", "%g");
/// sb_vec_sadd(v, 2.);
/// sb_vec_print(v, "v after: ", "%g");
///
/// SB_VEC_FREE_ALL(v);
/// }
/// ```
sb_vec * sb_vec_sadd(sb_vec * v, double x) {
#ifdef SAFE_MEMORY
SB_CHK_ERR(!v, abort(), "sb_vec_sadd: v cannot be NULL");
#endif
double * data = v->data;
for (size_t a = 0; a < v->n_elem; ++a) {
data[a] += x;
}
#ifdef SAFE_FINITE
SB_CHK_ERR(!sb_vec_is_finite(v), abort(), "sb_vec_sadd: element not finite");
#endif
return v;
}
/// Scalar multiplication of `x` with every element of the vector `v`.
///
/// # Parameters
/// - `v`: pointer to the vector
/// - `x`: scalar mutiplier for the elements
///
/// # Returns
/// A copy of `v`
///
/// # Performance
/// The following preprocessor definitions (usually in `safety.h`) enable
/// various safety checks:
/// - `SAFE_MEMORY`: `v` is not `NULL`
/// - `SAFE_FINITE`: elements of `v` are finite
///
/// # Examples
/// ```
/// #include "sb_structs.h"
/// #include "sb_vector.h"
///
/// int main(void) {
/// double a[] = {1., 4., 2.};
/// sb_vec * v = sb_vec_of_arr(a, 3, 'r');
///
/// // Multiply every element by -1.
/// sb_vec_print(v, "v before: ", "%g");
/// sb_vec_smul(v, -1.);
/// sb_vec_print(v, "v after: ", "%g");
///
/// SB_VEC_FREE_ALL(v);
/// }
/// ```
sb_vec * sb_vec_smul(sb_vec * v, double x) {
#ifdef SAFE_MEMORY
SB_CHK_ERR(!v, abort(), "sb_vec_smul: v cannot be NULL");
#endif
cblas_dscal(v->n_elem, x, v->data, 1);
#ifdef SAFE_FINITE
SB_CHK_ERR(!sb_vec_is_finite(v), abort(), "sb_vec_smul: elements not finite");
#endif
return v;
}
/// Pointwise addition of elements of the vector `w` to elements of the vector
/// `v`. `v` and `w` must not overlap in memory.
///
/// # Parameters
/// - `v`: pointer to the first vector
/// - `w`: pointer to the second vector
///
/// # Returns
/// A copy of `v`
///
/// # Performance
/// The following preprocessor definitions (usually in `safety.h`) enable
/// various safety checks:
/// - `SAFE_MEMORY`: `v` and `w` are not `NULL`
/// - `SAFE_LAYOUT`: `v` and `w` have the same layout
/// - `SAFE_LENGTH`: `v` and `w` have the same length
/// - `SAFE_FINITE`: elements of `v` are finite
///
/// # Examples
/// ```
/// #include "sb_structs.h"
/// #include "sb_vector.h"
///
/// int main(void) {
/// double a[] = {1., 4., 2., 8., 5., 7.};
/// sb_vec * v = sb_vec_of_arr(a, 3, 'r');
/// sb_vec * w = sb_vec_of_arr(a + 3, 3, 'r');
///
/// // Add w to v
/// sb_vec_print(v, "v before: ", "%g");
/// sb_vec_padd(v, w);
/// sb_vec_print(v, "v after: ", "%g");
///
/// SB_VEC_FREE_ALL(v, w);
/// }
/// ```
sb_vec * sb_vec_padd(sb_vec * restrict v, const sb_vec * restrict w) {
#ifdef SAFE_MEMORY
SB_CHK_ERR(!v, abort(), "sb_vec_padd: v cannot be NULL");
SB_CHK_ERR(!w, abort(), "sb_vec_padd: w cannot be NULL");
#endif
#ifdef SAFE_LAYOUT
SB_CHK_ERR(v->layout != w->layout, abort(),
"sb_vec_padd: v and w must have same layout");
#endif
#ifdef SAFE_LENGTH
SB_CHK_ERR(v->n_elem != w->n_elem, abort(),
"sb_vec_padd: v and w must have same length");
#endif
cblas_daxpy(v->n_elem, 1., w->data, 1, v->data, 1);
#ifdef SAFE_FINITE
SB_CHK_ERR(!sb_vec_is_finite(v), abort(), "sb_vec_padd: elements not finite");
#endif
return v;
}
/// Pointwise subtraction of elements of the vector `w` from elements of the
/// vector `v`. `v` and `w` must not overlap in memory.
///
/// # Parameters
/// - `v`: pointer to the first vector
/// - `w`: pointer to the second vector
///
/// # Returns
/// A copy of `v`
///
/// # Performance
/// The following preprocessor definitions (usually in `safety.h`) enable
/// various safety checks:
/// - `SAFE_MEMORY`: `v` and `w` are not `NULL`
/// - `SAFE_LAYOUT`: `v` and `w` have the same layout
/// - `SAFE_LENGTH`: `v` and `w` have the same length
/// - `SAFE_FINITE`: elements of `v` are finite
///
/// # Examples
/// ```
/// #include "sb_structs.h"
/// #include "sb_vector.h"
///
/// int main(void) {
/// double a[] = {1., 4., 2., 8., 5., 7.};
/// sb_vec * v = sb_vec_of_arr(a, 3, 'r');
/// sb_vec * w = sb_vec_of_arr(a + 3, 3, 'r');
///
/// // Subtract w from v
/// sb_vec_print(v, "v before: ", "%g");
/// sb_vec_psub(v, w);
/// sb_vec_print(v, "v after: ", "%g");
///
/// SB_VEC_FREE_ALL(v, w);
/// }
/// ```
sb_vec * sb_vec_psub(sb_vec * restrict v, const sb_vec * restrict w) {
#ifdef SAFE_MEMORY
SB_CHK_ERR(!v, abort(), "sb_vec_psub: v cannot be NULL");
SB_CHK_ERR(!w, abort(), "sb_vec_psub: w cannot be NULL");
#endif
#ifdef SAFE_LAYOUT
SB_CHK_ERR(v->layout != w->layout, abort(),
"sb_vec_psub: v and w must have same layout");
#endif
#ifdef SAFE_LENGTH
SB_CHK_ERR(v->n_elem != w->n_elem, abort(),
"sb_vec_psub: v and w must have same length");
#endif
cblas_daxpy(v->n_elem, -1., w->data, 1, v->data, 1);
#ifdef SAFE_FINITE
SB_CHK_ERR(!sb_vec_is_finite(v), abort(), "sb_vec_psub: elements not finite");
#endif
return v;
}
/// Pointwise multiplication of elements of the vector `v` with elements of the
/// vector `w`. `v` and `w` must not overlap in memory.
///
/// # Parameters
/// - `v`: pointer to the first vector
/// - `w`: pointer to the second vector
///
/// # Returns
/// A copy of `v`
///
/// # Performance
/// The following preprocessor definitions (usually in `safety.h`) enable
/// various safety checks:
/// - `SAFE_MEMORY`: `v` and `w` are not `NULL`
/// - `SAFE_LAYOUT`: `v` and `w` have the same layout
/// - `SAFE_LENGTH`: `v` and `w` have the same length
/// - `SAFE_FINITE`: elements of `v` are finite
///
/// # Examples
/// ```
/// #include "sb_structs.h"
/// #include "sb_vector.h"
///
/// int main(void) {
/// double a[] = {1., 4., 2., 8., 5., 7.};
/// sb_vec * v = sb_vec_of_arr(a, 3, 'r');
/// sb_vec * w = sb_vec_of_arr(a + 3, 3, 'r');
///
/// // Multiply elements of v by elements of w
/// sb_vec_print(v, "v before: ", "%g");
/// sb_vec_pmul(v, w);
/// sb_vec_print(v, "v after: ", "%g");
///
/// SB_VEC_FREE_ALL(v, w);
/// }
/// ```
sb_vec * sb_vec_pmul(sb_vec * restrict v, const sb_vec * restrict w) {
#ifdef SAFE_MEMORY
SB_CHK_ERR(!v, abort(), "sb_vec_pmul: v cannot be NULL");
SB_CHK_ERR(!w, abort(), "sb_vec_pmul: w cannot be NULL");
#endif
#ifdef SAFE_LAYOUT
SB_CHK_ERR(v->layout != w->layout, abort(),
"sb_vec_pmul: v and w must have same layout");
#endif
#ifdef SAFE_LENGTH
SB_CHK_ERR(v->n_elem != w->n_elem, abort(),
"sb_vec_pmul: v and w must have same length");
#endif
double * v_data = v->data;
double * w_data = w->data;
for (size_t a = 0; a < v->n_elem; ++a) {
v_data[a] *= w_data[a];
}
#ifdef SAFE_FINITE
SB_CHK_ERR(!sb_vec_is_finite(v), abort(), "sb_vec_pmul: element not finite");
#endif
return v;
}
/// Pointwise division of elements of the vector `v` by elements of the vector
/// `w`. `v` and `w` must not overlap in memory.
///
/// # Parameters
/// - `v`: pointer to the first vector
/// - `w`: pointer to the second vector
///
/// # Returns
/// A copy of `v`
///
/// # Performance
/// The following preprocessor definitions (usually in `safety.h`) enable
/// various safety checks:
/// - `SAFE_MEMORY`: `v` and `w` are not `NULL`
/// - `SAFE_LAYOUT`: `v` and `w` have the same layout
/// - `SAFE_LENGTH`: `v` and `w` have the same length
/// - `SAFE_FINITE`: elements of `v` are finite
///
/// # Examples
/// ```
/// #include "sb_structs.h"
/// #include "sb_vector.h"
///
/// int main(void) {
/// double a[] = {1., 4., 2., 8., 5., 7.};
/// sb_vec * v = sb_vec_of_arr(a, 3, 'r');
/// sb_vec * w = sb_vec_of_arr(a + 3, 3, 'r');
///
/// // Divide elements of v by elements of w
/// sb_vec_print(v, "v before: ", "%g");
/// sb_vec_pdiv(v, w);
/// sb_vec_print(v, "v after: ", "%g");
///
/// SB_VEC_FREE_ALL(v, w);
/// }
/// ```
sb_vec * sb_vec_pdiv(sb_vec * restrict v, const sb_vec * restrict w) {
#ifdef SAFE_MEMORY
SB_CHK_ERR(!v, abort(), "sb_vec_pdiv: v cannot be NULL");
SB_CHK_ERR(!w, abort(), "sb_vec_pdiv: w cannot be NULL");
#endif
#ifdef SAFE_LAYOUT
SB_CHK_ERR(v->layout != w->layout, abort(),
"sb_vec_pdiv: v and w must have same layout");
#endif
#ifdef SAFE_LENGTH
SB_CHK_ERR(v->n_elem != w->n_elem, abort(),
"sb_vec_pdiv: v and w must have same length");
#endif
double * v_data = v->data;
double * w_data = w->data;
for (size_t a = 0; a < v->n_elem; ++a) {
v_data[a] /= w_data[a];
}
#ifdef SAFE_FINITE
SB_CHK_ERR(!sb_vec_is_finite(v), abort(), "sb_vec_pdiv: element not finite");
#endif
return v;
}
/// Performs the operation \f$\mathbf{r} x + y\f$ where `x` and `y` are scalars
/// and `r` is modified (result x add y).
///
/// # Parameters
/// - `r`: pointer to the vector
/// - `x`: scalar mutiplier for the elements
/// - `y`: scalar summand for the elements
///
/// # Returns
/// A copy of `r`
///
/// # Performance
/// The following preprocessor definitions (usually in `safety.h`) enable
/// various safety checks:
/// - `SAFE_MEMORY`: `r` is not `NULL`
/// - `SAFE_FINITE`: elements of `r` are finite
///
/// # Examples
/// ```
/// #include "sb_structs.h"
/// #include "sb_vector.h"
///
/// int main(void) {
/// double a[] = {1., 4., 2.};
/// sb_vec * r = sb_vec_of_arr(a, 3, 'r');
///
/// // Multiply by -1. and add 2.
/// sb_vec_print(r, "r before: ", "%g");
/// sb_vec_rxay(r, -1., 2.);
/// sb_vec_print(r, "r after: ", "%g");
///
/// SB_VEC_FREE_ALL(r);
/// }
/// ```
sb_vec * sb_vec_rxay(sb_vec * r, double x, double y) {
#ifdef SAFE_MEMORY
SB_CHK_ERR(!r, abort(), "sb_vec_rxay: r cannot be NULL");
#endif
double * data = r->data;
for (size_t a = 0; a < r->n_elem; ++a) {
data[a] = fma(data[a], x, y);
}
#ifdef SAFE_FINITE
SB_CHK_ERR(!sb_vec_is_finite(r), abort(), "sb_vec_rxay: elements not finite");
#endif
return r;
}
/// Performs the operation \f$\mathbf{r} + x \mathbf{v}\f$ where `x` is a
/// scalar, `v` is a vector and `r` is modified (result add x vector). `r` and
/// `v` must not overlap in memory.
///
/// # Parameters
/// - `r`: pointer to the first vector
/// - `x`: scalar multiplier for `v`
/// - `v`: pointer to the second vector
///
/// # Returns
/// A copy of `r`
///
/// # Performance
/// The following preprocessor definitions (usually in `safety.h`) enable
/// various safety checks:
/// - `SAFE_MEMORY`: `r` and `v` are not `NULL`
/// - `SAFE_LAYOUT`: `r` and `v` have the same layout
/// - `SAFE_LENGTH`: `r` and `v` have the same length
/// - `SAFE_FINITE`: elements of `r` are finite
///
/// # Examples
/// ```
/// #include "sb_structs.h"
/// #include "sb_vector.h"
///
/// int main(void) {
/// double a[] = {1., 4., 2., 8., 5., 7.};
/// sb_vec * r = sb_vec_of_arr(a, 3, 'r');
/// sb_vec * v = sb_vec_of_arr(a + 3, 3, 'r');
///
/// // Multiply v by 2. and add to r
/// sb_vec_print(r, "r before: ", "%g");
/// sb_vec_raxv(r, 2., v);
/// sb_vec_print(r, "r after: ", "%g");
///
/// SB_VEC_FREE_ALL(r, v);
/// }
/// ```
sb_vec * sb_vec_raxv(sb_vec * restrict r, double x, const sb_vec * restrict v) {
#ifdef SAFE_MEMORY
SB_CHK_ERR(!r, abort(), "sb_vec_raxv: r cannot be NULL");
SB_CHK_ERR(!v, abort(), "sb_vec_raxv: v cannot be NULL");
#endif
#ifdef SAFE_LAYOUT
SB_CHK_ERR(r->layout != v->layout, abort(),
"sb_vec_raxv: r and v must have same layout");
#endif
#ifdef SAFE_LENGTH
SB_CHK_ERR(r->n_elem != v->n_elem, abort(),
"sb_vec_raxv: r and v must have same length");
#endif
cblas_daxpy(r->n_elem, x, v->data, 1, r->data, 1);
#ifdef SAFE_FINITE
SB_CHK_ERR(!sb_vec_is_finite(r), abort(), "sb_vec_raxv: elements not finite");
#endif
return r;
}
/// Performs the operation \f$\mathbf{r} \otimes \mathbf{v} - \mathbf{w}\f$
/// where `v` and `w` are vectors, \f$\otimes\f$ is pointwise multiplication,
/// and `r` is modified (result v subtract w). `r`, `v` and `w` must not
/// overlap in memory.
///
/// # Parameters
/// - `r`: pointer to the first vector
/// - `v`: pointer to the second vector
/// - `w`: pointer to the third vector
///
/// # Returns
/// A copy of `r`
///
/// # Performance
/// The following preprocessor definitions (usually in `safety.h`) enable
/// various safety checks:
/// - `SAFE_MEMORY`: `r`, `v` and `w` are not `NULL`
/// - `SAFE_LAYOUT`: `r`, `v` and `w` have the same layout
/// - `SAFE_LENGTH`: `r`, `v` and `w` have the same length
/// - `SAFE_FINITE`: elements of `r` are finite
///
/// # Examples
/// ```
/// #include "sb_structs.h"
/// #include "sb_vector.h"
///
/// int main(void) {
/// double a[] = {1., 4., 2., 8., 5.};
/// sb_vec * r = sb_vec_of_arr(a, 3, 'r');
/// sb_vec * v = sb_vec_of_arr(a + 1, 3, 'r');
/// sb_vec * w = sb_vec_of_arr(a + 2, 3, 'r');
///
/// // Pointwise multiply r by v and pointwise subtract w
/// sb_vec_print(r, "r before: ", "%g");
/// sb_vec_rvsw(r, v, w);
/// sb_vec_print(r, "r after: ", "%g");
///
/// SB_VEC_FREE_ALL(r, v, w);
/// }
/// ```
sb_vec * sb_vec_rvsw(
sb_vec * restrict r,
const sb_vec * restrict v,
const sb_vec * restrict w) {
#ifdef SAFE_MEMORY
SB_CHK_ERR(!r, abort(), "sb_vec_rvsw: r cannot be NULL");
SB_CHK_ERR(!v, abort(), "sb_vec_rvsw: v cannot be NULL");
SB_CHK_ERR(!w, abort(), "sb_vec_rvsw: w cannot be NULL");
#endif
#ifdef SAFE_LAYOUT
SB_CHK_ERR(r->layout != v->layout, abort(),
"sb_vec_rvsw: r and v must have same layout");
SB_CHK_ERR(r->layout != w->layout, abort(),
"sb_vec_rvsw: r and w must have same layout");
#endif
#ifdef SAFE_LENGTH
SB_CHK_ERR(r->n_elem != v->n_elem, abort(),
"sb_vec_rvsw: r and v must have same length");
SB_CHK_ERR(r->n_elem != w->n_elem, abort(),
"sb_vec_rvsw: r and w must have same length");
#endif
double * r_data = r->data;
double * v_data = v->data;
double * w_data = w->data;
for (size_t a = 0; a < r->n_elem; ++a) {
r_data[a] = fma(r_data[a], v_data[a], -w_data[a]);
}
#ifdef SAFE_FINITE
SB_CHK_ERR(!sb_vec_is_finite(r), abort(), "sb_vec_rvsw: elements not finite");
#endif
return r;
}
/// Finds the sum of the elements of the vector `v`.
///
/// # Parameters
/// - `v`: pointer to the vector
///
/// # Returns
/// The sum of the elements of `v`
///
/// # Performance
/// The following preprocessor definitions (usually in `safety.h`) enable
/// various safety checks:
/// - `SAFE_MEMORY`: `v` is not `NULL`
/// - `SAFE_FINITE`: sum is finite
///
/// # Examples
/// ```
/// #include <stdio.h>
/// #include "sb_structs.h"
/// #include "sb_vector.h"
///
/// int main(void) {
/// double a[] = {1., 4., 2.};
/// sb_vec * v = sb_vec_of_arr(a, 3, 'r');
///
/// // Sum the elements of v
/// sb_vec_print(v, "v: ", "%g");
/// printf("sum of v: %g\n", sb_vec_sum(v));
///
/// SB_VEC_FREE_ALL(v);
/// }
/// ```
double sb_vec_sum(const sb_vec * v) {
#ifdef SAFE_MEMORY
SB_CHK_ERR(!v, abort(), "sb_vec_sum: v cannot be NULL");
#endif
double * data = v->data;
double out = 0.;
for (size_t a = 0; a < v->n_elem; ++a) {
out += data[a];
}
#ifdef SAFE_FINITE
SB_CHK_ERR(!isfinite(out), abort(), "sb_vec_sum: sum not finite");
#endif
return out;
}
/// Finds the Euclidean norm of the vector `v`.
///
/// # Parameters
/// - `v`: pointer to the vector
///
/// # Returns
/// The Euclidean norm of `v`
///
/// # Performance
/// The following preprocessor definitions (usually in `safety.h`) enable
/// various safety checks:
/// - `SAFE_MEMORY`: `v` is not `NULL`
/// - `SAFE_FINITE`: norm is finite
///
/// # Examples
/// ```
/// #include <stdio.h>
/// #include "sb_structs.h"
/// #include "sb_vector.h"
///
/// int main(void) {
/// double a[] = {1., 4., 2.};
/// sb_vec * v = sb_vec_of_arr(a, 3, 'r');
///
/// // Norm of v
/// sb_vec_print(v, "v: ", "%g");
/// printf("norm of v: %g\n", sb_vec_norm(v));
///
/// SB_VEC_FREE_ALL(v);
/// }
/// ```
double sb_vec_norm(const sb_vec * v) {
#ifdef SAFE_MEMORY
SB_CHK_ERR(!v, abort(), "sb_vec_norm: v cannot be NULL");
#endif
double out = cblas_dnrm2(v->n_elem, v->data, 1);
#ifdef SAFE_FINITE
SB_CHK_ERR(!isfinite(out), abort(), "sb_vec_norm: norm not finite");
#endif
return out;
}
/// Finds the dot product of the vectors `v` and `w`. Layout of `v` and `w` is
/// ignored.
///
/// # Parameters
/// - `v`: pointer to the first vector
/// - `w`: pointer to the second vector
///
/// # Returns
/// The dot product of `v` and `w`
///
/// # Performance
/// The following preprocessor definitions (usually in `safety.h`) enable
/// various safety checks:
/// - `SAFE_MEMORY`: `v` and `w` are not `NULL`
/// - `SAFE_LENGTH`: `v` and `w` have the same length
/// - `SAFE_FINITE`: dot product is finite
///
/// # Examples
/// ```
/// #include <stdio.h>
/// #include "sb_structs.h"
/// #include "sb_vector.h"
///
/// int main(void) {
/// double a[] = {1., 4., 2., 8., 5., 7.};
/// sb_vec * v = sb_vec_of_arr(a, 3, 'r');
/// sb_vec * w = sb_vec_of_arr(a + 3, 3, 'r');
///
/// // Find the dot product of v and w
/// sb_vec_print(v, "v: ", "%g");
/// sb_vec_print(w, "w: ", "%g");
/// printf("dot product: %g\n", sb_vec_dot(v, w));
///
/// SB_VEC_FREE_ALL(v, w);
/// }
/// ```
double sb_vec_dot(const sb_vec * restrict v, const sb_vec * restrict w) {
#ifdef SAFE_MEMORY
SB_CHK_ERR(!v, abort(), "sb_vec_dot: v cannot be NULL");
SB_CHK_ERR(!w, abort(), "sb_vec_dot: w cannot be NULL");
#endif
#ifdef SAFE_LENGTH
SB_CHK_ERR(v->n_elem != w->n_elem, abort(),
"sb_vec_dot: v and w must have same length");
#endif
double out = cblas_ddot(v->n_elem, v->data, 1, w->data, 1);
#ifdef SAFE_FINITE
SB_CHK_ERR(!isfinite(out), abort(), "sb_vec_dot: dot product not finite");
#endif
return out;
}
/// Finds the outer product of the vectors `v` and `w` and adds the result to
/// the matrix `A`. `v` and `w` must be column and row vectors, respectively,
/// and the matrix `A` must have the same number of rows as `v` and the same
/// number of columns as `w`.
///
/// # Parameters
/// - `v`: pointer to the first vector
/// - `w`: pointer to the second vector
/// - `A`: pointer to the matrix
///
/// # Returns
/// A copy of `A`
///
/// # Performance
/// The following preprocessor definitions (usually in `safety.h`) enable
/// various safety checks:
/// - `SAFE_MEMORY`: `v`, `w` and `A` are not `NULL`
/// - `SAFE_LAYOUT`: `v` is a column vector and `w` is a row vector
/// - `SAFE_LENGTH`: `v` and `A` have the same number of rows, and `w` and
/// `A` have the same number of columns
/// - `SAFE_FINITE`: elements of `A` are finite
///
/// # Examples
/// ```
/// #include <assert.h>
/// #include "sb_matrix.h"
/// #include "sb_structs.h"
/// #include "sb_vector.h"
///
/// int main(void) {
/// double a[] = {1., 4., 2., 8., 5., 7.};
/// sb_vec * v = sb_vec_of_arr(a, 3, 'c');
/// sb_vec * w = sb_vec_of_arr(a + 3, 3, 'r');
///
/// // Store the outer product in A
/// sb_mat * A = sb_mat_calloc(3, 3);
/// sb_mat_print(sb_vec_outer(A, v, w), "A: ", "%g");
///
/// SB_VEC_FREE_ALL(v, w);
/// }
/// ```
sb_mat * sb_vec_outer(
sb_mat * restrict A,
const sb_vec * restrict v,
const sb_vec * restrict w) {
#ifdef SAFE_MEMORY
SB_CHK_ERR(!v, abort(), "sb_vec_outer: v cannot be NULL");
SB_CHK_ERR(!w, abort(), "sb_vec_outer: w cannot be NULL");
SB_CHK_ERR(!A, abort(), "sb_vec_outer: A cannot be NULL");
#endif
#ifdef SAFE_LAYOUT
SB_CHK_ERR(v->layout != 'c', abort(), "sb_vec_outer: v must be a column vector");
SB_CHK_ERR(w->layout != 'r', abort(), "sb_vec_outer: w must be a row vector");
#endif
#ifdef SAFE_LENGTH
SB_CHK_ERR(v->n_elem != A->n_rows, abort(),
"sb_vec_outer: v and A must have same number of rows");
SB_CHK_ERR(w->n_elem != A->n_cols, abort(),
"sb_vec_outer: w and A must have same number of cols");
#endif
cblas_dger(CblasColMajor, v->n_elem, w->n_elem, 1., v->data, 1, w->data, 1,
A->data, A->n_rows);
#ifdef SAFE_FINITE
SB_CHK_ERR(!sb_mat_is_finite(A), abort(), "sb_vec_outer: A is not finite");
#endif
return A;
}
/// Reverses the order of elements in the vector `v`.
///
/// # Parameters
/// - `v`: pointer to the vector
///
/// # Returns
/// A copy of `v`
///
/// # Performance
/// The following preprocessor definitions (usually in `safety.h`) enable
/// various safety checks:
/// - `SAFE_MEMORY`: `v` is not `NULL`
///
/// # Examples
/// ```
/// #include "sb_structs.h"
/// #include "sb_vector.h"
///
/// int main(void) {
/// double a[] = {1., 4., 2.};
/// sb_vec * v = sb_vec_of_arr(a, 3, 'r');
///
/// // Print vector and reverse
/// sb_vec_print(v, "v before: ", "%g");
/// sb_vec_reverse(v);
/// sb_vec_print(v, "v after: ", "%g");
///
/// SB_VEC_FREE_ALL(v);
/// }
/// ```
sb_vec * sb_vec_reverse(sb_vec * v) {
#ifdef SAFE_MEMORY
SB_CHK_ERR(!v, abort(), "sb_vec_reverse: v cannot be NULL");
#endif
size_t n_elem = v->n_elem;
double * data = v->data;
size_t b;
double scratch;
for (size_t a = 0; a < n_elem / 2; ++a) {
b = n_elem - a - 1;
SB_SWAP(data[a], data[b], scratch);
}
return v;
}
// FOR USE ONLY WITH SB_VEC_SORT_INC
static int dcmp_inc(const void * pa, const void * pb) {
double a = *(const double *)pa;
double b = *(const double *)pb;
if (a < b) { return -1; }
if (b < a) { return 1; }
return 0;
}
/// Sorts the elements of the vector `v` in increasing order.
///
/// # Parameters
/// - `v`: pointer to the vector
///
/// # Returns
/// A copy of `v`
///
/// # Performance
/// The following preprocessor definitions (usually in `safety.h`) enable
/// various safety checks:
/// - `SAFE_MEMORY`: `v` is not `NULL`
/// - `SAFE_FINITE`: elements of `v` are finite
///
/// # Examples
/// ```
/// #include "sb_structs.h"
/// #include "sb_vector.h"
///
/// int main(void) {
/// double a[] = {1., 4., 2.};
/// sb_vec * v = sb_vec_of_arr(a, 3, 'r');
///
/// // Sort vector and print
/// sb_vec_print(v, "v before: ", "%g");
/// sb_vec_sort_inc(v);
/// sb_vec_print(v, "v after: ", "%g");
///
/// SB_VEC_FREE_ALL(v);
/// }
/// ```
sb_vec * sb_vec_sort_inc(sb_vec * v) {
#ifdef SAFE_MEMORY
SB_CHK_ERR(!v, abort(), "sb_vec_sort_inc: v cannot be NULL");
#endif
#ifdef SAFE_FINITE
SB_CHK_ERR(!sb_vec_is_finite(v), abort(), "sb_vec_sort_inc: v is not finite");
#endif
qsort(v->data, v->n_elem, sizeof(double), dcmp_inc);
return v;
}
// FOR USE ONLY WITH SB_VEC_SORT_DEC
static int dcmp_dec(const void * pa, const void * pb) {
double a = *(const double *)pa;
double b = *(const double *)pb;
if (b < a) { return -1; }
if (a < b) { return 1; }
return 0;
}
/// Sorts the elements of the vector `v` in decreasing order.
///
/// # Parameters
/// - `v`: pointer to the vector
///
/// # Returns
/// A copy of `v`
///
/// # Performance
/// The following preprocessor definitions (usually in `safety.h`) enable
/// various safety checks:
/// - `SAFE_MEMORY`: `v` is not `NULL`
/// - `SAFE_FINITE`: elements of `v` are finite
///
/// # Examples
/// ```
/// #include "sb_structs.h"
/// #include "sb_vector.h"
///
/// int main(void) {
/// double a[] = {1., 4., 2.};
/// sb_vec * v = sb_vec_of_arr(a, 3, 'r');
///
/// // Sort vector and print
/// sb_vec_print(v, "v before: ", "%g");
/// sb_vec_sort_dec(v);
/// sb_vec_print(v, "v after: ", "%g");
///
/// SB_VEC_FREE_ALL(v);
/// }
/// ```
sb_vec * sb_vec_sort_dec(sb_vec * v) {
#ifdef SAFE_MEMORY
SB_CHK_ERR(!v, abort(), "sb_vec_sort_dec: v cannot be NULL");
#endif
#ifdef SAFE_FINITE
SB_CHK_ERR(!sb_vec_is_finite(v), abort(), "sb_vec_sort_dec: v is not finite");
#endif
qsort(v->data, v->n_elem, sizeof(double), dcmp_dec);
return v;
}
/// Transposes the vector `v`. NOTE: This is a non-op when `SAFE_LAYOUT` is not
/// defined, possibly leading to unexpected behavior with e.g. sb_vec_print().
///
/// # Parameters
/// - `v`: pointer to the vector
///
/// # Returns
/// A copy of `v`
///
/// # Performance
/// The following preprocessor definitions (usually in `safety.h`) enable
/// various safety checks:
/// - `SAFE_MEMORY`: `v` is not `NULL`
/// - `SAFE_LAYOUT`: `layout` is `c` or `r`
///
/// # Examples
/// ```
/// #include "sb_structs.h"
/// #include "sb_vector.h"
///
/// int main(void) {
/// double a[] = {1., 4., 2.};
/// sb_vec * v = sb_vec_of_arr(a, 3, 'r');
///
/// // Print vector and transpose
/// sb_vec_print(v, "v before: ", "%g");
/// sb_vec_trans(v);
/// sb_vec_print(v, "v after: ", "%g");
///
/// SB_VEC_FREE_ALL(v);
/// }
/// ```
sb_vec * sb_vec_trans(sb_vec * v) {
#ifdef SAFE_MEMORY
SB_CHK_ERR(!v, abort(), "sb_vec_trans: v cannot be NULL");
#endif
#ifdef SAFE_LAYOUT
SB_CHK_ERR(v->layout != 'c' && v->layout != 'r', abort(),
"sb_vec_trans: v has invalid layout");
v->layout = (v->layout == 'c' ? 'r' : 'c');
#endif
return v;
}
/// Checks the vectors `v` and `w` for equality of elements. Vectors must have
/// the same length and layout.
///
/// # Parameters
/// - `v`: pointer to the first vector
/// - `w`: pointer to the second vector
///
/// # Returns
/// `1` if `v` and `w` are equal, and `0` otherwise
///
/// # Performance
/// The following preprocessor definitions (usually in `safety.h`) enable
/// various safety checks:
/// - `SAFE_MEMORY`: `v` and `w` are not `NULL`
/// - `SAFE_LAYOUT`: `v` and `w` have the same layout
/// - `SAFE_LENGTH`: `v` and `w` have the same length
/// - `SAFE_FINITE`: elements of `v` and `w` are finite
///
/// # Examples
/// ```
/// #include <assert.h>
/// #include "sb_structs.h"
/// #include "sb_vector.h"
///
/// int main(void) {
/// double a[] = {1., 4., 2., 8., 5., 7.};
/// sb_vec * v = sb_vec_of_arr(a, 3, 'r');
/// sb_vec * w = sb_vec_of_arr(a + 3, 3, 'r');
///
/// // v and w are equal after sb_vec_memcpy
/// assert(!sb_vec_is_equal(v, w));
/// sb_vec_memcpy(w, v);
/// assert( sb_vec_is_equal(v, w));
///
/// SB_VEC_FREE_ALL(v, w);
/// }
/// ```
int sb_vec_is_equal(const sb_vec * restrict v, const sb_vec * restrict w) {
#ifdef SAFE_MEMORY
SB_CHK_ERR(!v, abort(), "sb_vec_is_equal: v cannot be NULL");
SB_CHK_ERR(!w, abort(), "sb_vec_is_equal: w cannot be NULL");
#endif
#ifdef SAFE_LAYOUT
SB_CHK_ERR(v->layout != w->layout, abort(),
"sb_vec_is_equal: v and w must have same layout");
#endif
#ifdef SAFE_LENGTH
SB_CHK_ERR(v->n_elem != w->n_elem, abort(),
"sb_vec_is_equal: v and w must have same length");
#endif
double * v_data = v->data;
double * w_data = w->data;
for (size_t a = 0; a < v->n_elem; ++a) {
#ifdef SAFE_FINITE
SB_CHK_ERR(!isfinite(v_data[a]), abort(), "sb_vec_is_equal: v is not finite");
SB_CHK_ERR(!isfinite(w_data[a]), abort(), "sb_vec_is_equal: w is not finite");
#endif
if (v_data[a] != w_data[a]) {
return 0;
}
}
return 1;
}
/// Checks if all elements of the vector `v` are zero.
///
/// # Parameters
/// - `v`: pointer to the vector
///
/// # Returns
/// `1` if all elements of `v` are zero, and `0` otherwise
///
/// # Performance
/// The following preprocessor definitions (usually in `safety.h`) enable
/// various safety checks:
/// - `SAFE_MEMORY`: `v` is not `NULL`
///
/// # Examples
/// ```
/// #include <assert.h>
/// #include "sb_structs.h"
/// #include "sb_vector.h"
///
/// int main(void) {
/// double a[] = {1., 4., 2.};
/// sb_vec * v = sb_vec_of_arr(a, 3, 'r');
///
/// // v is zero after sb_vec_set_zero
/// assert(!sb_vec_is_zero(v));
/// sb_vec_set_zero(v);
/// assert( sb_vec_is_zero(v));
///
/// SB_VEC_FREE_ALL(v);
/// }
/// ```
int sb_vec_is_zero(const sb_vec * v) {
#ifdef SAFE_MEMORY
SB_CHK_ERR(!v, abort(), "sb_vec_is_zero: v cannot be NULL");
#endif
double * data = v->data;
for (size_t a = 0; a < v->n_elem; ++a) {
if (data[a] != 0.) {
return 0;
}
}
return 1;
}
/// Checks if all elements of the vector `v` are positive.
///
/// # Parameters
/// - `v`: pointer to the vector
///
/// # Returns
/// `1` if all elements of `v` are positive, and `0` otherwise
///
/// # Performance
/// The following preprocessor definitions (usually in `safety.h`) enable
/// various safety checks:
/// - `SAFE_MEMORY`: `v` is not `NULL`
/// - `SAFE_FINITE`: elements of `v` are finite
///
/// # Examples
/// ```
/// #include <assert.h>
/// #include "sb_structs.h"
/// #include "sb_vector.h"
///
/// int main(void) {
/// double a[] = {-1., -4., -2.};
/// sb_vec * v = sb_vec_of_arr(a, 3, 'r');
///
/// // v is positive after sb_vec_abs
/// assert(!sb_vec_is_pos(v));
/// sb_vec_abs(v);
/// assert( sb_vec_is_pos(v));
///
/// SB_VEC_FREE_ALL(v);
/// }
/// ```
int sb_vec_is_pos(const sb_vec * v) {
#ifdef SAFE_MEMORY
SB_CHK_ERR(!v, abort(), "sb_vec_is_pos: v cannot be NULL");
#endif
double * data = v->data;
for (size_t a = 0; a < v->n_elem; ++a) {
#ifdef SAFE_FINITE
SB_CHK_ERR(!isfinite(data[a]), abort(), "sb_vec_is_pos: v is not finite");
#endif
if (data[a] <= 0.) {
return 0;
}
}
return 1;
}
/// Checks if all elements of the vector `v` are negative.
///
/// # Parameters
/// - `v`: pointer to the vector
///
/// # Returns
/// `1` if all elements of `v` are negative, and `0` otherwise
///
/// # Performance
/// The following preprocessor definitions (usually in `safety.h`) enable
/// various safety checks:
/// - `SAFE_MEMORY`: `v` is not `NULL`
/// - `SAFE_FINITE`: elements of `v` are finite
///
/// # Examples
/// ```
/// #include <assert.h>
/// #include "sb_structs.h"
/// #include "sb_vector.h"
///
/// int main(void) {
/// double a[] = {1., -4., 2.};
/// sb_vec * v = sb_vec_of_arr(a, 3, 'r');
///
/// // v is negative after
/// assert(!sb_vec_is_neg(v));
/// sb_vec_smul(sb_vec_abs(v), -1.);
/// assert( sb_vec_is_neg(v));
///
/// SB_VEC_FREE_ALL(v);
/// }
/// ```
int sb_vec_is_neg(const sb_vec * v) {
#ifdef SAFE_MEMORY
SB_CHK_ERR(!v, abort(), "sb_vec_is_neg: v cannot be NULL");
#endif
double * data = v->data;
for (size_t a = 0; a < v->n_elem; ++a) {
#ifdef SAFE_FINITE
SB_CHK_ERR(!isfinite(data[a]), abort(), "sb_vec_is_neg: v is not finite");
#endif
if (data[a] >= 0.) {
return 0;
}
}
return 1;
}
/// Checks if all elements of the vector `v` are nonnegative.
///
/// # Parameters
/// - `v`: pointer to the vector
///
/// # Returns
/// `1` if all elements of `v` are nonnegative, and `0` otherwise
///
/// # Performance
/// The following preprocessor definitions (usually in `safety.h`) enable
/// various safety checks:
/// - `SAFE_MEMORY`: `v` is not `NULL`
/// - `SAFE_FINITE`: elements of `v` are finite
///
/// # Examples
/// ```
/// #include <assert.h>
/// #include "sb_structs.h"
/// #include "sb_vector.h"
///
/// int main(void) {
/// double a[] = {0., -1., 4.};
/// sb_vec * v = sb_vec_of_arr(a, 3, 'r');
///
/// // v is nonnegative after
/// assert(!sb_vec_is_nonneg(v));
/// sb_vec_abs(v);
/// assert( sb_vec_is_nonneg(v));
///
/// SB_VEC_FREE_ALL(v);
/// }
/// ```
int sb_vec_is_nonneg(const sb_vec * v) {
#ifdef SAFE_MEMORY
SB_CHK_ERR(!v, abort(), "sb_vec_is_nonneg: v cannot be NULL");
#endif
double * data = v->data;
for (size_t a = 0; a < v->n_elem; ++a) {
#ifdef SAFE_FINITE
SB_CHK_ERR(!isfinite(data[a]), abort(), "sb_vec_is_nonneg: v is not finite");
#endif
if (data[a] < 0.) {
return 0;
}
}
return 1;
}
/// Checks if all elements of the vector `v` are not infinite or `NaN`.
///
/// # Parameters
/// - `v`: pointer to the sb_vector
///
/// # Returns
/// `1` if all elements of `v` are not infinite or `NaN`, and `0` otherwise
///
/// # Performance
/// The following preprocessor definitions (usually in `safety.h`) enable
/// various safety checks:
/// - `SAFE_MEMORY`: `v` is not `NULL`
///
/// # Examples
/// ```
/// #include <assert.h>
/// #include <math.h>
/// #include "sb_structs.h"
/// #include "sb_vector.h"
///
/// int main(void) {
/// double a[] = {1., INFINITY, 2.};
/// sb_vec * v = sb_vec_of_arr(a, 3, 'r');
///
/// assert(!sb_vec_is_finite(v));
/// sb_vec_set(v, 1, NAN);
/// assert(!sb_vec_is_finite(v));
/// sb_vec_set(v, 1, 4.);
/// assert( sb_vec_is_finite(v));
///
/// SB_VEC_FREE_ALL(v);
/// }
/// ```
int sb_vec_is_finite(const sb_vec * v) {
#ifdef SAFE_MEMORY
SB_CHK_ERR(!v, abort(), "sb_vec_is_finite: v cannot be NULL");
#endif
double * data = v->data;
for (size_t a = 0; a < v->n_elem; ++a) {
if (!isfinite(data[a])) {
return 0;
}
}
return 1;
}
/// Finds the value of the maximum element of `v`.
///
/// # Parameters
/// - `v`: pointer to the vector
///
/// # Returns
/// Value of the maximum element
///
/// # Performance
/// The following preprocessor definitions (usually in `safety.h`) enable
/// various safety checks:
/// - `SAFE_MEMORY`: `v` is not `NULL`
/// - `SAFE_LENGTH`: number of elements is nonzero
/// - `SAFE_FINITE`: elements of `v` are finite
///
/// # Examples
/// ```
/// #include <assert.h>
/// #include "sb_structs.h"
/// #include "sb_vector.h"
///
/// int main(void) {
/// double a[] = {1., 4., 2.};
/// sb_vec * v = sb_vec_of_arr(a, 3, 'r');
///
/// // maximum value of v is 4.
/// assert(sb_vec_max(v) == 4.);
///
/// SB_VEC_FREE_ALL(v);
/// }
/// ```
double sb_vec_max(const sb_vec * v) {
#ifdef SAFE_MEMORY
SB_CHK_ERR(!v, abort(), "sb_vec_max: v cannot be NULL");
#endif
#ifdef SAFE_LENGTH
SB_CHK_ERR(v->n_elem == 0, abort(), "sb_vec_max: n_elem must be nonzero");
#endif
double * data = v->data;
double max_val = -INFINITY;
for (size_t a = 0; a < v->n_elem; ++a) {
#ifdef SAFE_FINITE
SB_CHK_ERR(!isfinite(data[a]), abort(), "sb_vec_max: v is not finite");
#endif
if (data[a] > max_val) {
max_val = data[a];
}
}
return max_val;
}
/// Finds the value of the minimum element of `v`.
///
/// # Parameters
/// - `v`: pointer to the vector
///
/// # Returns
/// Value of the minimum element
///
/// # Performance
/// The following preprocessor definitions (usually in `safety.h`) enable
/// various safety checks:
/// - `SAFE_MEMORY`: `v` is not `NULL`
/// - `SAFE_LENGTH`: number of elements is nonzero
/// - `SAFE_FINITE`: elements of `v` are finite
///
/// # Examples
/// ```
/// #include <assert.h>
/// #include "sb_structs.h"
/// #include "sb_vector.h"
///
/// int main(void) {
/// double a[] = {1., 4., 2.};
/// sb_vec * v = sb_vec_of_arr(a, 3, 'r');
///
/// // value of min is 1
/// assert(sb_vec_min(v) == 1.);
///
/// SB_VEC_FREE_ALL(v);
/// }
/// ```
double sb_vec_min(const sb_vec * v) {
#ifdef SAFE_MEMORY
SB_CHK_ERR(!v, abort(), "sb_vec_min: v cannot be NULL");
#endif
#ifdef SAFE_LENGTH
SB_CHK_ERR(v->n_elem == 0, abort(), "sb_vec_min: n_elem must be nonzero");
#endif
double * data = v->data;
double min_val = INFINITY;
for (size_t a = 0; a < v->n_elem; ++a) {
#ifdef SAFE_FINITE
SB_CHK_ERR(!isfinite(data[a]), abort(), "sb_vec_min: v is not finite");
#endif
if (data[a] < min_val) {
min_val = data[a];
}
}
return min_val;
}
/// Finds the maximum absolute value of the elements of `v`.
///
/// # Parameters
/// - `v`: pointer to the vector
///
/// # Returns
/// Maximum absolute value of the elements
///
/// # Performance
/// The following preprocessor definitions (usually in `safety.h`) enable
/// various safety checks:
/// - `SAFE_MEMORY`: `v` is not `NULL`
/// - `SAFE_LENGTH`: number of elements is nonzero
/// - `SAFE_FINITE`: elements of `v` are finite
///
/// # Examples
/// ```
/// #include <assert.h>
/// #include "sb_structs.h"
/// #include "sb_vector.h"
///
/// int main(void) {
/// double a[] = {1., -4., 2.};
/// sb_vec * v = sb_vec_of_arr(a, 3, 'r');
///
/// // maximum absolute value of v is 4.
/// assert(sb_vec_abs_max(v) == 4.);
///
/// SB_VEC_FREE_ALL(v);
/// }
/// ```
double sb_vec_abs_max(const sb_vec * v) {
#ifdef SAFE_MEMORY
SB_CHK_ERR(!v, abort(), "sb_vec_abs_max: v cannot be NULL");
#endif
#ifdef SAFE_LENGTH
SB_CHK_ERR(v->n_elem == 0, abort(), "sb_vec_abs_max: n_elem must be nonzero");
#endif
double * data = v->data;
size_t index = cblas_idamax(v->n_elem, data, 1);
#ifdef SAFE_FINITE
SB_CHK_ERR(!isfinite(data[index]), abort(), "sb_vec_abs_max: v is not finite");
#endif
return fabs(data[index]);
}
/// Finds the index of the maximum element of `v`.
///
/// # Parameters
/// - `v`: pointer to the sb_vector
///
/// # Returns
/// Index of the maximum element
///
/// # Performance
/// The following preprocessor definitions (usually in `safety.h`) enable
/// various safety checks:
/// - `SAFE_MEMORY`: `v` is not `NULL`
/// - `SAFE_LENGTH`: number of elements is nonzero
/// - `SAFE_FINITE`: elements of `v` are finite
///
/// # Examples
/// ```
/// #include <assert.h>
/// #include "sb_structs.h"
/// #include "sb_vector.h"
///
/// int main(void) {
/// double a[] = {1., 4., 2.};
/// sb_vec * v = sb_vec_of_arr(a, 3, 'r');
///
/// // index of max is 1
/// assert(sb_vec_max_index(v) == 1);
///
/// SB_VEC_FREE_ALL(v);
/// }
/// ```
size_t sb_vec_max_index(const sb_vec * v) {
#ifdef SAFE_MEMORY
SB_CHK_ERR(!v, abort(), "sb_vec_max_index: v cannot be NULL");
#endif
#ifdef SAFE_LENGTH
SB_CHK_ERR(v->n_elem == 0, abort(), "sb_vec_max_index: n_elem must be nonzero");
#endif
double * data = v->data;
double max_val = -INFINITY;
size_t max_ind = 0;
for (size_t a = 0; a < v->n_elem; ++a) {
#ifdef SAFE_FINITE
SB_CHK_ERR(!isfinite(data[a]), abort(), "sb_vec_max_index: v is not finite");
#endif
if (data[a] > max_val) {
max_val = data[a];
max_ind = a;
}
}
return max_ind;
}
/// Finds the index of the minimum element of `v`.
///
/// # Parameters
/// - `v`: pointer to the vector
///
/// # Returns
/// Index of the minimum element
///
/// # Performance
/// The following preprocessor definitions (usually in `safety.h`) enable
/// various safety checks:
/// - `SAFE_MEMORY`: `v` is not `NULL`
/// - `SAFE_LENGTH`: number of elements is nonzero
/// - `SAFE_FINITE`: elements of `v` are finite
///
/// # Examples
/// ```
/// #include <assert.h>
/// #include "sb_structs.h"
/// #include "sb_vector.h"
///
/// int main(void) {
/// double a[] = {1., 4., 2.};
/// sb_vec * v = sb_vec_of_arr(a, 3, 'r');
///
/// // index of min is 0
/// assert(sb_vec_min_index(v) == 0);
///
/// SB_VEC_FREE_ALL(v);
/// }
/// ```
size_t sb_vec_min_index(const sb_vec * v) {
#ifdef SAFE_MEMORY
SB_CHK_ERR(!v, abort(), "sb_vec_min_index: v cannot be NULL");
#endif
#ifdef SAFE_LENGTH
SB_CHK_ERR(v->n_elem == 0, abort(), "sb_vec_min_index: n_elem must be nonzero");
#endif
double * data = v->data;
double min_val = INFINITY;
size_t min_ind = 0;
for (size_t a = 0; a < v->n_elem; ++a) {
#ifdef SAFE_FINITE
SB_CHK_ERR(!isfinite(data[a]), abort(), "sb_vec_min_index: v is not finite");
#endif
if (data[a] < min_val) {
min_val = data[a];
min_ind = a;
}
}
return min_ind;
}
/// Finds the index of the maximum absolute value of the elements of `v`.
///
/// # Parameters
/// - `v`: pointer to the vector
///
/// # Returns
/// Index of the maximum absolute value of the elements
///
/// # Performance
/// The following preprocessor definitions (usually in `safety.h`) enable
/// various safety checks:
/// - `SAFE_MEMORY`: `v` is not `NULL`
/// - `SAFE_LENGTH`: number of elements is nonzero
///
/// # Examples
/// ```
/// #include <assert.h>
/// #include "sb_structs.h"
/// #include "sb_vector.h"
///
/// int main(void) {
/// double a[] = {1., -4., 2.};
/// sb_vec * v = sb_vec_of_arr(a, 3, 'r');
///
/// // index of the maximum absolute value of v is 1
/// assert(sb_vec_abs_max_index(v) == 1);
///
/// SB_VEC_FREE_ALL(v);
/// }
/// ```
size_t sb_vec_abs_max_index(const sb_vec * v) {
#ifdef SAFE_MEMORY
SB_CHK_ERR(!v, abort(), "sb_vec_abs_min_index: v cannot be NULL");
#endif
#ifdef SAFE_LENGTH
SB_CHK_ERR(v->n_elem == 0, abort(), "sb_vec_abs_min_index: n_elem must be nonzero");
#endif
return cblas_idamax(v->n_elem, v->data, 1);
}
extern inline double sb_vec_get(const sb_vec * v, size_t i);
extern inline void sb_vec_set(sb_vec * v, size_t i, double x);
extern inline double * sb_vec_ptr(sb_vec * v, size_t i);
|
using RobustAdaptiveMetropolisSampler, Distributions, LinearAlgebra, VegaLite, DataFrames
chain, accrate, S = RAM_sample(
p -> logpdf(Normal(3., 2), p[1]), # log target function
[0.], # Initial values
0.5, # Scaling factor
100_000 # Number of runs
)
df = DataFrame(p1 = chain[:,1])
df |> @vlplot(:bar, x={:p1, bin=true}, y="count()")
|
The product of a list of polynomials is equal to the product of the list of the reflected polynomials.
|
Address(Gaviota Place) is a residential double ended Culdesacs culdesac in East Davis.
Intersecting Streets
Oceano Way
|
Formal statement is: corollary holomorphic_iff_power_series: "f holomorphic_on ball z r \<longleftrightarrow> (\<forall>w \<in> ball z r. (\<lambda>n. (deriv ^^ n) f z / (fact n) * (w - z)^n) sums f w)" Informal statement is: A function is holomorphic on a ball if and only if its Taylor series converges to the function on the ball.
|
section \<open>Translating terms into Graphs\<close>
text \<open>We define the translation function and its properties.\<close>
theory RuleSemanticsConnection
imports LabeledGraphSemantics RulesAndChains
begin
text \<open>Definition 15.\<close>
fun translation :: "'c allegorical_term \<Rightarrow> ('c, nat) labeled_graph" where
"translation (A_Lbl l) = LG {(l,0,1)} {0,1}" |
"translation (A_Cnv e) = map_graph_fn (translation e) (\<lambda> x. if x<2 then (1-x) else x)" |
"translation (A_Cmp e\<^sub>1 e\<^sub>2)
= (let G\<^sub>1 = translation e\<^sub>1 ; G\<^sub>2 = translation e\<^sub>2
in graph_union (map_graph_fn G\<^sub>1 (\<lambda> x. if x=0 then 0 else x+card(vertices G\<^sub>2)-1))
(map_graph_fn G\<^sub>2 (\<lambda> x. if x=0 then card (vertices G\<^sub>2) else x)))" |
"translation (A_Int e\<^sub>1 e\<^sub>2)
= (let G\<^sub>1 = translation e\<^sub>1 ; G\<^sub>2 = translation e\<^sub>2
in graph_union G\<^sub>1 (map_graph_fn G\<^sub>2 (\<lambda> x. if x<2 then x else x+card(vertices G\<^sub>1)-2)))"
definition inv_translation where
"inv_translation r \<equiv> {0..<card r} = r \<and> {0,1}\<subseteq>r"
lemma inv_translationI4[intro]:
assumes "finite r" "\<And> x. x < card r \<Longrightarrow> x \<in> r"
shows "r={0..<card r}"
proof(insert assms,induct "card r" arbitrary:r)
case (Suc x r)
let ?r = "r - {x}"
from Suc have p:"x = card ?r" "finite ?r" by auto
have p2:"xa < card ?r \<Longrightarrow> xa \<in> ?r" for xa
using Suc.prems(2)[of xa] Suc.hyps(2) unfolding p(1)[symmetric] by auto
from Suc.hyps(1)[OF p p2] have "?r={0..<card ?r}".
with Suc.hyps(2) Suc.prems(1) show ?case
by (metis atLeast0_lessThan_Suc card_Diff_singleton_if insert_Diff n_not_Suc_n p(1))
qed auto
lemma inv_translationI[intro!]:
assumes "finite r" "\<And> x. x < card r \<Longrightarrow> x \<in> r" "0 \<in> r" "Suc 0 \<in> r"
shows "inv_translation r"
proof -
from inv_translationI4[OF assms(1,2),symmetric]
have c:" {0..<card r} = r " by auto
from assms(3,4) have "{0,1} \<subseteq> r" by auto
with c inv_translation_def show ?thesis by auto
qed
lemma verts_in_translation_finite[intro]:
"finite (vertices (translation X))"
"finite (edges (translation X))"
"0 \<in> vertices (translation X)"
"Suc 0 \<in> vertices (translation X)"
proof(atomize(full),induction X)
case (A_Int X1 X2)
then show ?case by (auto simp:Let_def)
next
case (A_Cmp X1 X2)
then show ?case by (auto simp:Let_def)
next
have [simp]:"{x::nat. x < 2} = {0,1}" by auto
case (A_Cnv X)
then show ?case by auto
qed auto
lemma inv_tr_card_min:
assumes "inv_translation r"
shows "card r \<ge> 2"
proof -
note [simp] = inv_translation_def
have "{0..<x} = r \<Longrightarrow> 2 \<le> x \<longleftrightarrow> 0 \<in> r \<and> 1 \<in> r" for x by auto
thus ge2:"card r\<ge>2" using assms by auto
qed
lemma verts_in_translation[intro]:
"inv_translation (vertices (translation X))"
proof(induct X)
{ fix r
assume assms:"inv_translation r"
note [simp] = inv_translation_def
from assms have a1:"finite r"
by (intro card_ge_0_finite) auto
have [simp]:"{0..<Suc x} = {0..<x} \<union> {x}" for x by auto
note ge2 = inv_tr_card_min[OF assms]
from ge2 assms have r0:"r \<inter> {0} = {0}" "r \<inter> {x. x < 2} = {0,1}" by auto
have [intro!]:"\<And>x. x \<in> r \<Longrightarrow> x < card r"
and g6:"\<And>x. x < card r \<longleftrightarrow> x \<in> r"
using assms[unfolded inv_translation_def] atLeastLessThan_iff by blast+
have g4:"r \<inter> {x. \<not> x < 2} = {2..<card r}"
"r \<inter> (Collect ((<) 0)) = {1..<card r}" using assms by fastforce+
have ins:"1 \<in> r" "0 \<in> r" using assms by auto
have d:"Suc (Suc (card r - 2)) = card r"
using ge2 One_nat_def Suc_diff_Suc Suc_pred
numeral_2_eq_2 by presburger
note ge2 ins g4 g6 r0 d
} note inv_translationD[simp] = this
{
fix a b c
assume assm:"b \<le> (a::nat)"
have "(\<lambda>x. x + a - b) ` {b..<c} = {a..<c+a-b}" (is "?lhs = ?rhs")
proof -
from assm have "?lhs = (\<lambda>x. x + (a - b)) ` {b..<c}" by auto
also have "\<dots> = ?rhs"
unfolding linordered_semidom_class.image_add_atLeastLessThan' using assm by auto
finally show ?thesis by auto
qed
} note e[simp] = this
{ fix r z
assume a1: "inv_translation z" and a2: "inv_translation r"
let ?z2 = "card z + card r - 2"
let ?z1 = "card z + card r - Suc 0"
from a1 a2
have le1:"Suc 0 \<le> card r"
by (metis Suc_leD inv_translationD(1) numerals(2))
hence le2: "card r \<le> ?z1"
by (metis Suc_leD a1 inv_translationD(1) numerals(2) ordered_cancel_comm_monoid_diff_class.le_add_diff)
with le1 have b:"{card r ..< ?z1} \<union> {Suc 0 ..< card r} = {Suc 0 ..< ?z1}"
by auto
have a:"(insert (card r) {0..<card z + card r - Suc 0}) = {0..<card z + card r - Suc 0}"
using le1 le2 a1 a2
by (metis Suc_leD add_Suc_right atLeastLessThan_iff diff_Suc_Suc insert_absorb inv_translationD(1) linorder_not_less not_less_eq_eq numerals(2) ordered_cancel_comm_monoid_diff_class.le_add_diff)
from a1 a2
have "card z + card r - 2 \<ge> card (r::nat set)"
by (simp add: ordered_cancel_comm_monoid_diff_class.le_add_diff)
with a2
have c:"card (r \<union> {card r..<?z2}) = ?z2"
by (metis atLeast0LessThan card_atLeastLessThan diff_zero inv_translation_def ivl_disj_un_one(2))+
note a b c
} note [simp] = this
have [simp]:"a < x \<Longrightarrow> insert a {Suc a..<x} = {a..<x}" for a x by auto
{ case (A_Int X1 X2)
let ?v1 = "vertices (translation X1)"
from A_Int have [simp]:"(insert 0 (insert (Suc 0) (?v1 \<union> x))) = ?v1 \<union> x"
for x unfolding inv_translation_def by auto
from A_Int show ?case by (auto simp:Let_def linorder_not_le)
next
case (A_Cmp X1 X2)
hence "2\<le>card (vertices (translation X1))" "2\<le>card (vertices (translation X2))" by auto
hence "1 \<le>card (vertices (translation X1))" "1\<le>card (vertices (translation X2))"
"1 < card (vertices (translation X1)) + card (vertices (translation X2)) - 1"
by auto
from this A_Cmp
show ?case by (auto simp:Let_def)
next
case (A_Cnv X)
thus ?case by (auto simp:Let_def)
}
qed auto
lemma translation_graph[intro]:
"graph (translation X)"
by (induct X, auto simp:Let_def)
lemma graph_rule_translation[intro]: (* remark at the end of Def 15 *)
"graph_rule (translation X, translation (A_Int X Y))"
using verts_in_translation_finite[of X] verts_in_translation_finite[of "A_Int X Y"]
translation_graph[of X] translation_graph[of "A_Int X Y"]
by (auto simp:Let_def subgraph_def2)
lemma graph_hom_translation[intro]:
"graph_homomorphism (LG {} {0,1}) (translation X) (Id_on {0,1})"
using verts_in_translation[of X]
unfolding inv_translation_def graph_homomorphism_def2 by auto
lemma translation_right_to_left:
assumes f:"graph_homomorphism (translation e) G f" "(0, x) \<in> f" "(1, y) \<in> f"
shows "(x, y) \<in> :G:\<lbrakk>e\<rbrakk>"
using f
proof(induct e arbitrary:f x y)
case (A_Int e\<^sub>1 e\<^sub>2 f x y)
let ?f\<^sub>1 = "id"
let ?f\<^sub>2 = "(\<lambda> x. if x < 2 then x else x + card (vertices (translation e\<^sub>1)) - 2)"
let ?G\<^sub>1 = "translation e\<^sub>1"
let ?G\<^sub>2 = "translation e\<^sub>2"
have f1:"(0, x) \<in> on_graph ?G\<^sub>1 ?f\<^sub>1 O f" "(1, y) \<in> on_graph ?G\<^sub>1 ?f\<^sub>1 O f"
and f2:"(0, x) \<in> on_graph ?G\<^sub>2 ?f\<^sub>2 O f" "(1, y) \<in> on_graph ?G\<^sub>2 ?f\<^sub>2 O f"
using A_Int.prems(2,3) by (auto simp:BNF_Def.Gr_def relcomp_def)
from A_Int.prems(1)
have uni:"graph_homomorphism (graph_union ?G\<^sub>1 (map_graph_fn ?G\<^sub>2 ?f\<^sub>2)) G f"
by (auto simp:Let_def)
from graph_homo_union_id(1)[OF uni translation_graph]
have h1:"graph_homomorphism ?G\<^sub>1 (translation (A_Int e\<^sub>1 e\<^sub>2)) (on_graph ?G\<^sub>1 id)"
by (auto simp:Let_def graph_homomorphism_def)
have "graph (map_graph_fn ?G\<^sub>2 ?f\<^sub>2)" by auto
from graph_homo_union_id(2)[OF uni this]
have h2:"graph_homomorphism ?G\<^sub>2 (translation (A_Int e\<^sub>1 e\<^sub>2)) (on_graph ?G\<^sub>2 ?f\<^sub>2)"
by (auto simp:Let_def graph_homomorphism_def)
from A_Int.hyps(1)[OF graph_homomorphism_composes[OF h1 A_Int.prems(1)] f1]
A_Int.hyps(2)[OF graph_homomorphism_composes[OF h2 A_Int.prems(1)] f2]
show ?case by auto
next
case (A_Cmp e\<^sub>1 e\<^sub>2 f x y)
let ?f\<^sub>1 = "(\<lambda> x. if x=0 then 0 else x+card(vertices (translation e\<^sub>2))-1)"
let ?f\<^sub>2 = "(\<lambda> x. if x=0 then card (vertices (translation e\<^sub>2)) else x)"
let ?G\<^sub>1 = "translation e\<^sub>1"
let ?G\<^sub>2 = "translation e\<^sub>2"
let ?v = "card (vertices (translation e\<^sub>2))"
from A_Cmp.prems(1) have "?v \<in> Domain f" by (auto simp:Let_def graph_homomorphism_def)
then obtain v where v:"(?v,v) \<in> f" by auto
have f1:"(0, x) \<in> on_graph ?G\<^sub>1 ?f\<^sub>1 O f" "(1, v) \<in> on_graph ?G\<^sub>1 ?f\<^sub>1 O f"
and f2:"(0, v) \<in> on_graph ?G\<^sub>2 ?f\<^sub>2 O f" "(1, y) \<in> on_graph ?G\<^sub>2 ?f\<^sub>2 O f"
using A_Cmp.prems(2,3) v by auto
from A_Cmp.prems(1)
have uni:"graph_homomorphism (graph_union (map_graph_fn ?G\<^sub>1 ?f\<^sub>1) (map_graph_fn ?G\<^sub>2 ?f\<^sub>2)) G f"
by (auto simp:Let_def)
have "graph (map_graph_fn ?G\<^sub>1 ?f\<^sub>1)" by auto
from graph_homo_union_id(1)[OF uni this]
have h1:"graph_homomorphism ?G\<^sub>1 (translation (A_Cmp e\<^sub>1 e\<^sub>2)) (on_graph ?G\<^sub>1 ?f\<^sub>1)"
by (auto simp:Let_def graph_homomorphism_def2)
have "graph (map_graph_fn ?G\<^sub>2 ?f\<^sub>2)" by auto
from graph_homo_union_id(2)[OF uni this]
have h2:"graph_homomorphism ?G\<^sub>2 (translation (A_Cmp e\<^sub>1 e\<^sub>2)) (on_graph ?G\<^sub>2 ?f\<^sub>2)"
by (auto simp:Let_def graph_homomorphism_def2)
from A_Cmp.hyps(1)[OF graph_homomorphism_composes[OF h1 A_Cmp.prems(1)] f1]
A_Cmp.hyps(2)[OF graph_homomorphism_composes[OF h2 A_Cmp.prems(1)] f2]
show ?case by auto
next
case (A_Cnv e f x y)
let ?f = "(\<lambda> x. if x < 2 then 1 - x else x)"
let ?G = "translation e"
have i:"graph_homomorphism ?G (map_graph_fn ?G ?f) (on_graph ?G ?f)" using A_Cnv by auto
have "(0, y) \<in> on_graph ?G ?f O f" "(1, x) \<in> on_graph ?G ?f O f"
using A_Cnv.prems(3,2) by (auto simp:BNF_Def.Gr_def relcomp_def)
from A_Cnv.hyps(1)[OF graph_homomorphism_composes[OF i] this] A_Cnv.prems(1)
show ?case by auto
next
case (A_Lbl l f x y)
hence "edge_preserving f {(l,0,1)} (edges G)" unfolding graph_homomorphism_def by auto
with A_Lbl(2,3) show ?case by (auto simp:getRel_def edge_preserving_def)
qed
lemma translation_homomorphism:
assumes "graph_homomorphism (translation e) G f"
shows "f `` {0} \<times> f `` {1} \<subseteq> :G:\<lbrakk>e\<rbrakk>" ":G:\<lbrakk>e\<rbrakk> \<noteq> {}"
using translation_right_to_left[OF assms] assms[unfolded graph_homomorphism_def2]
verts_in_translation_finite[of e] by auto
text \<open>Lemma 5.\<close>
lemma translation:
assumes "graph G"
shows "(x, y) \<in> :G:\<lbrakk>e\<rbrakk> \<longleftrightarrow> (\<exists> f. graph_homomorphism (translation e) G f \<and> (0,x) \<in> f \<and> (1,y) \<in> f)"
(is "?lhs = ?rhs")
proof
have [dest]:"y + card (vertices (translation (e::'a allegorical_term))) - 2 < 2 \<Longrightarrow> (y::nat) < 2"
for y e using inv_tr_card_min[OF verts_in_translation,of e] by linarith
{ fix y fix e::"'a allegorical_term"
assume "y + card (vertices (translation e)) - 2 \<in> vertices (translation e)"
hence "y + card (vertices (translation e)) - 2 < card (vertices (translation e))"
using verts_in_translation[of e,unfolded inv_translation_def] by auto
hence "y < 2" using inv_tr_card_min[OF verts_in_translation,of e] by auto
} note [dest!] = this
{ fix y fix e::"'a allegorical_term"
assume "y + card (vertices (translation e)) - Suc 0 \<in> vertices (translation e)"
hence "y + card (vertices (translation e)) - Suc 0 \<in> {0..<card (vertices (translation e))}"
using verts_in_translation[of e,unfolded inv_translation_def] by simp
hence "y = 0" using inv_tr_card_min[OF verts_in_translation,of e] by auto
} note [dest!] = this
{ fix y fix e::"'a allegorical_term"
assume "card (vertices (translation e)) \<in> vertices (translation e)"
hence "card (vertices (translation e)) \<in> {0..<card (vertices (translation e))}"
using verts_in_translation[of e,unfolded inv_translation_def] by auto
hence "False" by auto
} note [dest!] = this
{ fix y fix e::"'a allegorical_term"
assume "y + card (vertices (translation e)) \<le> Suc 0"
hence " card (vertices (translation e)) \<le> Suc 0" by auto
hence "False" using inv_tr_card_min[OF verts_in_translation[of e]] by auto
} note [dest!] = this
assume ?lhs
then show ?rhs
proof(induct e arbitrary:x y)
case (A_Int e\<^sub>1 e\<^sub>2)
from A_Int have assm:"(x, y) \<in> :G:\<lbrakk>e\<^sub>1\<rbrakk>" "(x, y) \<in> :G:\<lbrakk>e\<^sub>2\<rbrakk>" by auto
from A_Int(1)[OF assm(1)] obtain f\<^sub>1 where
f\<^sub>1:"graph_homomorphism (translation e\<^sub>1) G f\<^sub>1" "(0, x) \<in> f\<^sub>1" "(1, y) \<in> f\<^sub>1" by auto
from A_Int(2)[OF assm(2)] obtain f\<^sub>2 where
f\<^sub>2:"graph_homomorphism (translation e\<^sub>2) G f\<^sub>2" "(0, x) \<in> f\<^sub>2" "(1, y) \<in> f\<^sub>2" by auto
from f\<^sub>1 f\<^sub>2 have v:"Domain f\<^sub>1 = vertices (translation e\<^sub>1)" "Domain f\<^sub>2 = vertices (translation e\<^sub>2)"
unfolding graph_homomorphism_def by auto
let ?f\<^sub>2 = "(\<lambda> x. if x < 2 then x else x + card (vertices (translation e\<^sub>1)) - 2)"
let ?tr\<^sub>2 = "on_graph (translation e\<^sub>2) ?f\<^sub>2"
have inj2:"inj_on ?f\<^sub>2 (vertices (translation e\<^sub>2))" unfolding inj_on_def by auto
have "(0,0) \<in> ?tr\<^sub>2\<inverse>" "(1,1) \<in> ?tr\<^sub>2\<inverse>" by auto
from this[THEN relcompI] f\<^sub>2(2,3)
have zero_one:"(0,x) \<in> ?tr\<^sub>2\<inverse> O f\<^sub>2"
"(1,y) \<in> ?tr\<^sub>2\<inverse> O f\<^sub>2" by auto
{ fix yb zb
assume "(yb + card (vertices (translation e\<^sub>1)) - 2, zb) \<in> f\<^sub>1"
hence "yb + card (vertices (translation e\<^sub>1)) - 2 \<in> vertices (translation e\<^sub>1)" using v by auto
} note in_f[dest!] = this
have d_a:"Domain f\<^sub>1 \<inter> Domain (?tr\<^sub>2\<inverse> O f\<^sub>2) = {0,1}"
using zero_one by (auto simp:v)
have d_b:"Domain (f\<^sub>1 \<inter> ?tr\<^sub>2\<inverse> O f\<^sub>2) = {0,1}"
using zero_one f\<^sub>1(2,3) by auto
note cmp2 = graph_homomorphism_composes[OF graph_homo_inv[OF translation_graph inj2] f\<^sub>2(1)]
have "graph_homomorphism (translation (A_Int e\<^sub>1 e\<^sub>2)) G (f\<^sub>1 \<union> ?tr\<^sub>2\<inverse> O f\<^sub>2)"
using graph_homo_union[OF f\<^sub>1(1) cmp2 d_a[folded d_b]]
by (auto simp:Let_def)
thus ?case using zero_one[THEN UnI2[of _ _ "f\<^sub>1"]] by blast
next
case (A_Cmp e\<^sub>1 e\<^sub>2)
from A_Cmp obtain z where assm:"(x, z) \<in> :G:\<lbrakk>e\<^sub>1\<rbrakk>" "(z, y) \<in> :G:\<lbrakk>e\<^sub>2\<rbrakk>" by auto
from A_Cmp(1)[OF assm(1)] obtain f\<^sub>1 where
f\<^sub>1:"graph_homomorphism (translation e\<^sub>1) G f\<^sub>1" "(0, x) \<in> f\<^sub>1" "(1, z) \<in> f\<^sub>1" by auto
from A_Cmp(2)[OF assm(2)] obtain f\<^sub>2 where
f\<^sub>2:"graph_homomorphism (translation e\<^sub>2) G f\<^sub>2" "(0, z) \<in> f\<^sub>2" "(1, y) \<in> f\<^sub>2" by auto
from f\<^sub>1 f\<^sub>2 have v:"Domain f\<^sub>1 = vertices (translation e\<^sub>1)" "Domain f\<^sub>2 = vertices (translation e\<^sub>2)"
unfolding graph_homomorphism_def by auto
let ?f\<^sub>1 = "(\<lambda> x. if x=0 then 0 else x+card(vertices (translation e\<^sub>2))-1)"
let ?f\<^sub>2 = "(\<lambda> x. if x=0 then card (vertices (translation e\<^sub>2)) else x)"
let ?tr\<^sub>1 = "on_graph (translation e\<^sub>1) ?f\<^sub>1"
let ?tr\<^sub>2 = "on_graph (translation e\<^sub>2) ?f\<^sub>2"
have inj1:"inj_on ?f\<^sub>1 (vertices (translation e\<^sub>1))" unfolding inj_on_def by auto
have inj2:"inj_on ?f\<^sub>2 (vertices (translation e\<^sub>2))" unfolding inj_on_def by auto
have "(card (vertices (translation e\<^sub>2)),0) \<in> ?tr\<^sub>2\<inverse>" "(1,1) \<in> ?tr\<^sub>2\<inverse>"
"(0,0) \<in> ?tr\<^sub>1\<inverse>" "(card (vertices (translation e\<^sub>2)),1) \<in> ?tr\<^sub>1\<inverse>" by auto
from this[THEN relcompI] f\<^sub>2(2,3) f\<^sub>1(2,3)
have zero_one:"(card (vertices (translation e\<^sub>2)),z) \<in> ?tr\<^sub>1\<inverse> O f\<^sub>1"
"(0,x) \<in> ?tr\<^sub>1\<inverse> O f\<^sub>1"
"(card (vertices (translation e\<^sub>2)),z) \<in> ?tr\<^sub>2\<inverse> O f\<^sub>2"
"(1,y) \<in> ?tr\<^sub>2\<inverse> O f\<^sub>2" by auto
have [simp]:
"ye \<in> vertices (translation e\<^sub>2) \<Longrightarrow>
(if ye = 0 then card (vertices (translation e\<^sub>2)) else ye) =
(if yd = 0 then 0 else yd + card (vertices (translation e\<^sub>2)) - 1) \<longleftrightarrow> ye = 0 \<and> yd = 1"
for ye yd using v inv_tr_card_min[OF verts_in_translation,of "e\<^sub>2"]
by(cases "ye=0";cases "yd=0";auto)
have d_a:"Domain (?tr\<^sub>1\<inverse> O f\<^sub>1) \<inter> Domain (?tr\<^sub>2\<inverse> O f\<^sub>2) = {card (vertices (translation e\<^sub>2))}"
using zero_one by (auto simp:v)
have d_b:"Domain (?tr\<^sub>1\<inverse> O f\<^sub>1 \<inter> ?tr\<^sub>2\<inverse> O f\<^sub>2) = {card (vertices (translation e\<^sub>2))}"
using zero_one f\<^sub>1(2,3) by auto
note cmp1 = graph_homomorphism_composes[OF graph_homo_inv[OF translation_graph inj1] f\<^sub>1(1)]
note cmp2 = graph_homomorphism_composes[OF graph_homo_inv[OF translation_graph inj2] f\<^sub>2(1)]
have "graph_homomorphism (translation (A_Cmp e\<^sub>1 e\<^sub>2)) G (?tr\<^sub>1\<inverse> O f\<^sub>1 \<union> ?tr\<^sub>2\<inverse> O f\<^sub>2)"
unfolding Let_def translation.simps
by (rule graph_homo_union[OF cmp1 cmp2 d_a[folded d_b]])
thus ?case using zero_one by blast
next
case (A_Cnv e)
let ?G = "translation (A_Cnv e)"
from A_Cnv obtain f where
f:"graph_homomorphism (translation e) G f" "(0, y) \<in> f" "(1, x) \<in> f" by auto
hence v:"Domain f = vertices (translation e)"
unfolding graph_homomorphism_def by auto
define n where "n \<equiv> card (vertices (translation e))"
from verts_in_translation f inv_tr_card_min[OF verts_in_translation] v(1)
have n:"vertices (translation e) = {0..<n}" "{0..<n} \<inter> {x. x < 2} = {1,0}"
"Domain f = {0..<n}" "{0..<n} \<inter> {x. \<not> x < 2} = {2..<n}"
and n2: "n \<ge> 2"
by (auto simp:n_def inv_translation_def)
then have [simp]:"insert (Suc 0) {2..<n} = {1..<n}"
"insert 0 {Suc 0..<n} = {0..<n}" by auto
let ?f = "on_graph ?G (\<lambda> x. if x < 2 then 1 - x else x)"
have h:"graph_homomorphism ?G G (?f O f)"
proof(rule graph_homomorphism_composes[OF _ f(1)],rule graph_homomorphismI)
show "vertices ?G = Domain ?f"
by (auto simp:Domain_int_univ)
show "?f `` vertices ?G \<subseteq> vertices (translation e)" using n2 by auto
show "univalent ?f" by auto
show "edge_preserving ?f (edges (translation (A_Cnv e))) (edges (translation e))"
by (rule edge_preserving_on_graphI,auto simp: BNF_Def.Gr_def)
qed (auto intro:assms)
have xy:"(0, x) \<in> ?f O f" "(1, y) \<in> ?f O f" using n2 f(2,3) n(1,2) by auto
with h show ?case by auto
next
case (A_Lbl l)
let ?f = "{(0,x),(1,y)}"
have xy:"x \<in> vertices G" "y \<in> vertices G" using assms A_Lbl by (auto simp:getRel_def)
have "graph_homomorphism (translation (A_Lbl l)) G ?f \<and> (0, x) \<in> ?f \<and> (1, y) \<in> ?f"
using assms A_Lbl xy unfolding graph_homomorphism_def2
by (auto simp:univalent_def getRel_def on_triple_def Image_def graph_union_def insert_absorb)
then show ?case by auto
qed
qed (insert translation_right_to_left,auto)
abbreviation transl_rule ::
"'a sentence \<Rightarrow> ('a, nat) labeled_graph \<times> ('a, nat) labeled_graph" where
"transl_rule R \<equiv> (translation (fst R),translation (snd R))"
text \<open>Lemma 6.\<close>
lemma maintained_holds_iff:
assumes "graph G"
shows "maintained (translation e\<^sub>L,translation (A_Int e\<^sub>L e\<^sub>R)) G \<longleftrightarrow> G \<Turnstile> e\<^sub>L \<sqsubseteq> e\<^sub>R" (is "?rhs = ?lhs")
proof
assume lhs:?lhs
show ?rhs unfolding maintained_def proof(clarify) fix f
assume f:"graph_homomorphism (fst (translation e\<^sub>L, translation (A_Int e\<^sub>L e\<^sub>R))) G f"
then obtain x y where f2:"(0,x) \<in> f" "(1,y) \<in> f" unfolding graph_homomorphism_def
by (metis DomainE One_nat_def prod.sel(1) verts_in_translation_finite(3,4))
with f have "(x,y) \<in> :G:\<lbrakk>fst (e\<^sub>L \<sqsubseteq> e\<^sub>R)\<rbrakk>" unfolding translation[OF assms] by auto
with lhs have "(x,y) \<in> :G:\<lbrakk>snd (e\<^sub>L \<sqsubseteq> e\<^sub>R)\<rbrakk>" by auto
then obtain g where g: "graph_homomorphism (translation (A_Int e\<^sub>L e\<^sub>R)) G g"
and g2: "(0, x) \<in> g" "(1, y) \<in> g" unfolding translation[OF assms] by auto
have v:"vertices (translation (A_Int e\<^sub>L e\<^sub>R)) = Domain g"
"vertices (translation e\<^sub>L) = Domain f" using f g
unfolding graph_homomorphism_def by auto
from subgraph_subset[of "translation e\<^sub>L" "translation (A_Int e\<^sub>L e\<^sub>R)"]
graph_rule_translation[of e\<^sub>L e\<^sub>R]
have dom_sub: "Domain f \<subseteq> Domain g"
using v unfolding prod.sel by argo
hence dom_le:"card (Domain f) \<le> card (Domain g)"
by (metis card.infinite card_mono inv_tr_card_min not_less rel_simps(51) v(1) verts_in_translation)
have c_f:"card (Domain f) \<ge> 2" using inv_tr_card_min[OF verts_in_translation] v by metis
from f[unfolded graph_homomorphism_def]
have ep_f:"edge_preserving f (edges (translation e\<^sub>L)) (edges G)"
and uni_f:"univalent f" by auto
let ?f = "(\<lambda>x. if x < 2 then x else x + card (vertices (translation e\<^sub>L)) - 2)"
define GR where "GR = map_graph_fn (translation e\<^sub>R) ?f"
from g[unfolded graph_homomorphism_def]
have "edge_preserving g (edges (translation (A_Int e\<^sub>L e\<^sub>R))) (edges G)"
and uni_g:"univalent g" by auto
from edge_preserving_subset[OF subset_refl _ this(1)]
have ep_g:"edge_preserving g (edges GR) (edges G)" by (auto simp:Let_def GR_def)
{ fix a assume a:"a \<in> vertices (translation e\<^sub>R)"
hence "?f a \<in> vertices (translation (A_Int e\<^sub>L e\<^sub>R))" by (auto simp:Let_def)
from this[unfolded v] verts_in_translation[of "A_Int e\<^sub>L e\<^sub>R",unfolded inv_translation_def v]
have "\<not> a < 2 \<Longrightarrow> a + card (Domain f) - 2 < card (Domain g)" by auto
} note[intro!] = this
have [intro!]: " \<not> aa < 2 \<Longrightarrow> card (Domain f) \<le> aa + card (Domain f) - 2" for aa by simp
from v(2) restrictD[OF translation_graph[of e\<^sub>L]]
have df[dest]:"xa \<notin> Domain f \<Longrightarrow> (l,xa,xb) \<in> edges (translation e\<^sub>L) \<Longrightarrow> False"
"xa \<notin> Domain f \<Longrightarrow> (l,xb,xa) \<in> edges (translation e\<^sub>L) \<Longrightarrow> False"
for xa l xb unfolding edge_preserving by auto
{ fix l xa xb ya
assume assm: "(l,xa,xb) \<in> edges GR"
with c_f dom_le
have "xa \<in> {0,1} \<union> {card (Domain f)..<card (Domain g)}"
"xb \<in> {0,1} \<union> {card (Domain f)..<card (Domain g)}"
unfolding GR_def v by auto
hence minb:"xa \<in> {0,1} \<or> xa \<ge> card (Domain f)" "xb \<in> {0,1} \<or> xb \<ge> card (Domain f)"
by auto
{ fix z xa assume minb:"xa \<in> {0,1} \<or> xa \<ge> card (Domain f)" and z:"(xa,z) \<in> f"
from z verts_in_translation[of e\<^sub>L,unfolded inv_translation_def v]
have "xa < card(Domain f)" by auto
with minb verts_in_translation[of "A_Int e\<^sub>L e\<^sub>R",unfolded inv_translation_def v]
have x:"xa \<in> {0,1} \<and> xa \<in> Domain g" by auto
then obtain v where g:"(xa,v) \<in> g" by auto
consider "xa = 0 \<and> z = x" | "xa = 1 \<and> z = y"
using x f2[THEN univalentD[OF uni_f]] z by auto
hence "v = z" using g g2[THEN univalentD[OF uni_g]] by metis
hence "(xa,z) \<in> g" using g by auto
}
note minb[THEN this]
}
with f2 g2[THEN univalentD[OF uni_g]]
have dg:"(l,xa,xb) \<in> edges GR \<Longrightarrow> (xa,ya) \<in> f \<Longrightarrow> (xa,ya) \<in> g"
"(l,xb,xa) \<in> edges GR \<Longrightarrow> (xa,ya) \<in> f \<Longrightarrow> (xa,ya) \<in> g"
for xa l xb ya
unfolding edge_preserving by (auto)
have "vertices (translation e\<^sub>L) \<subseteq> vertices (translation (A_Int e\<^sub>L e\<^sub>R))"
by(rule subgraph_subset,insert graph_rule_translation,auto)
hence subdom:"Domain f \<subseteq> Domain g" unfolding v.
let ?g = "f \<union> (Id_on (UNIV - Domain f) O g)"
have [simp]:"Domain ?g = Domain g" using subdom unfolding Domain_Un_eq by auto
have ih:"graph_homomorphism (translation (A_Int e\<^sub>L e\<^sub>R)) G ?g"
proof(rule graph_homomorphismI)
show "?g `` vertices (translation (A_Int e\<^sub>L e\<^sub>R)) \<subseteq> vertices G"
using g[unfolded graph_homomorphism_def] f[unfolded graph_homomorphism_def]
by (auto simp: v simp del:translation.simps)
show "edge_preserving ?g (edges (translation (A_Int e\<^sub>L e\<^sub>R))) (edges G)"
unfolding Let_def translation.simps graph_union_edges proof
show "edge_preserving ?g (edges (translation e\<^sub>L)) (edges G)"
using edge_preserving_atomic[OF ep_f] unfolding edge_preserving by auto
have "edge_preserving ?g (edges GR) (edges G)"
using edge_preserving_atomic[OF ep_g] dg unfolding edge_preserving by (auto;blast)
thus "edge_preserving ?g (edges (map_graph_fn (translation e\<^sub>R) ?f)) (edges G)"
by (auto simp:GR_def)
qed
qed (insert f[unfolded graph_homomorphism_def] g[unfolded graph_homomorphism_def],auto simp:Let_def)
have ie:"agree_on (translation e\<^sub>L) f ?g" unfolding agree_on_def by (auto simp:v)
from ie ih show "extensible (translation e\<^sub>L, translation (A_Int e\<^sub>L e\<^sub>R)) G f"
unfolding extensible_def prod.sel by auto
qed next
assume rhs:?rhs
{ fix x y assume "(x,y) \<in> :G:\<lbrakk>e\<^sub>L\<rbrakk>"
with translation[OF assms] obtain f
where f:"graph_homomorphism (fst (translation e\<^sub>L, translation (A_Int e\<^sub>L e\<^sub>R))) G f"
"(0, x) \<in> f" "(1, y) \<in> f" by auto
with rhs[unfolded maintained_def,rule_format,OF f(1),unfolded extensible_def]
obtain g where g:"graph_homomorphism (translation (A_Int e\<^sub>L e\<^sub>R)) G g"
"agree_on (translation e\<^sub>L) f g" by auto
hence "(x,y) \<in> :G:\<lbrakk>A_Int e\<^sub>L e\<^sub>R\<rbrakk>" using f unfolding agree_on_def translation[OF assms] by auto
}
thus ?lhs by auto
qed
lemma translation_self[intro]:
"(0, 1) \<in> :translation e:\<lbrakk>e\<rbrakk>"
proof(induct e)
case (A_Int e1 e2)
let ?f = "(\<lambda>x. if x < 2 then x else x + card (vertices (translation e1)) - 2)"
have f: "(?f 0,?f 1) \<in>:map_graph_fn (translation e2) ?f:\<lbrakk>e2\<rbrakk>"
using map_graph_in[OF translation_graph A_Int(2),of ?f] by auto
let ?G = "graph_union (translation e1) (map_graph_fn (translation e2) ?f)"
have "{(0,1)} \<subseteq> :(translation e1):\<lbrakk>e1\<rbrakk>" using A_Int by auto
moreover have "{(0,1)} \<subseteq> :map_graph_fn (translation e2) ?f:\<lbrakk>e2\<rbrakk>" using f by auto
moreover have ":map_graph_fn (translation e2) ?f:\<lbrakk>e2\<rbrakk> \<subseteq> :?G:\<lbrakk>e2\<rbrakk>" ":translation e1:\<lbrakk>e1\<rbrakk> \<subseteq> :?G:\<lbrakk>e1\<rbrakk>"
using graph_union_semantics by blast+
ultimately show ?case by (auto simp:Let_def)
next
case (A_Cmp e1 e2)
let ?f1 = "\<lambda>x. if x = 0 then 0 else x + card (vertices (translation e2)) - 1"
have f1: "(?f1 0,?f1 1) \<in>:map_graph_fn (translation e1) ?f1:\<lbrakk>e1\<rbrakk>"
using map_graph_in[OF translation_graph A_Cmp(1),of ?f1] by auto
let ?f2 = "\<lambda>x. if x = 0 then card (vertices (translation e2)) else x"
have f2: "(?f2 0,?f2 1) \<in>:map_graph_fn (translation e2) ?f2:\<lbrakk>e2\<rbrakk>"
using map_graph_in[OF translation_graph A_Cmp(2),of ?f2] by auto
let ?G = "graph_union (map_graph_fn (translation e1) ?f1) (map_graph_fn (translation e2) ?f2)"
have "{(0,1)} = {(0,card (vertices (translation e2)))} O {(card (vertices (translation e2)),1)}"
by auto
also have "{(0,card (vertices (translation e2)))} \<subseteq> :map_graph_fn (translation e1) ?f1:\<lbrakk>e1\<rbrakk>"
using f1 by auto
also have ":map_graph_fn (translation e1) ?f1:\<lbrakk>e1\<rbrakk> \<subseteq> :?G:\<lbrakk>e1\<rbrakk>"
using graph_union_semantics by auto
also have "{(card (vertices (translation e2)),1)} \<subseteq> :map_graph_fn (translation e2) ?f2:\<lbrakk>e2\<rbrakk>"
using f2 by auto
also have ":map_graph_fn (translation e2) ?f2:\<lbrakk>e2\<rbrakk> \<subseteq> :?G:\<lbrakk>e2\<rbrakk>"
using graph_union_semantics by blast
also have "(:?G:\<lbrakk>e1\<rbrakk>) O (:?G:\<lbrakk>e2\<rbrakk>) = :translation (A_Cmp e1 e2):\<lbrakk>A_Cmp e1 e2\<rbrakk>"
by (auto simp:Let_def)
finally show ?case by auto
next
case (A_Cnv e)
from map_graph_in[OF translation_graph this,of "(\<lambda>x. if x < (2::nat) then 1 - x else x)"]
show ?case using map_graph_in[OF translation_graph] by auto
qed (simp add:getRel_def)
text \<open>Lemma 6 is only used on rules of the form @{term "e\<^sub>L \<sqsubseteq> e\<^sub>R"}.
The requirement of G being a graph can be dropped for one direction.\<close>
lemma maintained_holds[intro]:
assumes ":G:\<lbrakk>e\<^sub>L\<rbrakk> \<subseteq> :G:\<lbrakk>e\<^sub>R\<rbrakk>"
shows "maintained (transl_rule (e\<^sub>L \<sqsubseteq> e\<^sub>R)) G"
proof (cases "graph G")
case True
thus ?thesis using assms sentence_iff maintained_holds_iff prod.sel by metis
next
case False
thus ?thesis by (auto simp:maintained_def graph_homomorphism_def)
qed
lemma maintained_holds_subset_iff[simp]:
assumes "graph G"
shows "maintained (transl_rule (e\<^sub>L \<sqsubseteq> e\<^sub>R)) G \<longleftrightarrow> (:G:\<lbrakk>e\<^sub>L\<rbrakk> \<subseteq> :G:\<lbrakk>e\<^sub>R\<rbrakk>)"
using assms maintained_holds_iff sentence_iff prod.sel by metis
end
|
[GOAL]
ι : Type u_1
V : Type u
inst✝¹ : Category.{v, u} V
inst✝ : Preadditive V
c : ComplexShape ι
C D E : HomologicalComplex V c
f✝ g : C ⟶ D
h k : D ⟶ E
i✝ : ι
f : (i j : ι) → X C i ⟶ X D j
i i' : ι
w : ComplexShape.Rel c i i'
⊢ ↑(dNext i) f = d C i i' ≫ f i' i
[PROOFSTEP]
obtain rfl := c.next_eq' w
[GOAL]
ι : Type u_1
V : Type u
inst✝¹ : Category.{v, u} V
inst✝ : Preadditive V
c : ComplexShape ι
C D E : HomologicalComplex V c
f✝ g : C ⟶ D
h k : D ⟶ E
i✝ : ι
f : (i j : ι) → X C i ⟶ X D j
i : ι
w : ComplexShape.Rel c i (ComplexShape.next c i)
⊢ ↑(dNext i) f = d C i (ComplexShape.next c i) ≫ f (ComplexShape.next c i) i
[PROOFSTEP]
rfl
[GOAL]
ι : Type u_1
V : Type u
inst✝¹ : Category.{v, u} V
inst✝ : Preadditive V
c : ComplexShape ι
C D E : HomologicalComplex V c
f✝ g : C ⟶ D
h k : D ⟶ E
i : ι
f : (i j : ι) → X C i ⟶ X D j
j j' : ι
w : ComplexShape.Rel c j' j
⊢ ↑(prevD j) f = f j j' ≫ d D j' j
[PROOFSTEP]
obtain rfl := c.prev_eq' w
[GOAL]
ι : Type u_1
V : Type u
inst✝¹ : Category.{v, u} V
inst✝ : Preadditive V
c : ComplexShape ι
C D E : HomologicalComplex V c
f✝ g : C ⟶ D
h k : D ⟶ E
i : ι
f : (i j : ι) → X C i ⟶ X D j
j : ι
w : ComplexShape.Rel c (ComplexShape.prev c j) j
⊢ ↑(prevD j) f = f j (ComplexShape.prev c j) ≫ d D (ComplexShape.prev c j) j
[PROOFSTEP]
rfl
[GOAL]
ι : Type u_1
V : Type u
inst✝¹ : Category.{v, u} V
inst✝ : Preadditive V
c : ComplexShape ι
C D E : HomologicalComplex V c
f✝ g✝ : C ⟶ D
h k : D ⟶ E
i : ι
f : (i j : ι) → X C i ⟶ X D j
g : D ⟶ E
j : ι
⊢ (↑(prevD j) fun i j => f i j ≫ Hom.f g j) = ↑(prevD j) f ≫ Hom.f g j
[PROOFSTEP]
dsimp [prevD]
[GOAL]
ι : Type u_1
V : Type u
inst✝¹ : Category.{v, u} V
inst✝ : Preadditive V
c : ComplexShape ι
C D E : HomologicalComplex V c
f✝ g✝ : C ⟶ D
h k : D ⟶ E
i : ι
f : (i j : ι) → X C i ⟶ X D j
g : D ⟶ E
j : ι
⊢ (f j (ComplexShape.prev c j) ≫ Hom.f g (ComplexShape.prev c j)) ≫ d E (ComplexShape.prev c j) j =
(f j (ComplexShape.prev c j) ≫ d D (ComplexShape.prev c j) j) ≫ Hom.f g j
[PROOFSTEP]
simp only [Category.assoc, g.comm]
[GOAL]
ι : Type u_1
V : Type u
inst✝¹ : Category.{v, u} V
inst✝ : Preadditive V
c : ComplexShape ι
C✝ D✝ E : HomologicalComplex V c
f✝ g : C✝ ⟶ D✝
h k : D✝ ⟶ E
i✝ : ι
C D : ChainComplex V ℕ
i : ℕ
f : (i j : ℕ) → X C i ⟶ X D j
⊢ ↑(dNext i) f = d C i (i - 1) ≫ f (i - 1) i
[PROOFSTEP]
dsimp [dNext]
[GOAL]
ι : Type u_1
V : Type u
inst✝¹ : Category.{v, u} V
inst✝ : Preadditive V
c : ComplexShape ι
C✝ D✝ E : HomologicalComplex V c
f✝ g : C✝ ⟶ D✝
h k : D✝ ⟶ E
i✝ : ι
C D : ChainComplex V ℕ
i : ℕ
f : (i j : ℕ) → X C i ⟶ X D j
⊢ d C i (ComplexShape.next (ComplexShape.down ℕ) i) ≫ f (ComplexShape.next (ComplexShape.down ℕ) i) i =
d C i (i - 1) ≫ f (i - 1) i
[PROOFSTEP]
cases i
[GOAL]
case zero
ι : Type u_1
V : Type u
inst✝¹ : Category.{v, u} V
inst✝ : Preadditive V
c : ComplexShape ι
C✝ D✝ E : HomologicalComplex V c
f✝ g : C✝ ⟶ D✝
h k : D✝ ⟶ E
i : ι
C D : ChainComplex V ℕ
f : (i j : ℕ) → X C i ⟶ X D j
⊢ d C Nat.zero (ComplexShape.next (ComplexShape.down ℕ) Nat.zero) ≫
f (ComplexShape.next (ComplexShape.down ℕ) Nat.zero) Nat.zero =
d C Nat.zero (Nat.zero - 1) ≫ f (Nat.zero - 1) Nat.zero
[PROOFSTEP]
simp only [shape, ChainComplex.next_nat_zero, ComplexShape.down_Rel, Nat.one_ne_zero, not_false_iff, zero_comp]
[GOAL]
case succ
ι : Type u_1
V : Type u
inst✝¹ : Category.{v, u} V
inst✝ : Preadditive V
c : ComplexShape ι
C✝ D✝ E : HomologicalComplex V c
f✝ g : C✝ ⟶ D✝
h k : D✝ ⟶ E
i : ι
C D : ChainComplex V ℕ
f : (i j : ℕ) → X C i ⟶ X D j
n✝ : ℕ
⊢ d C (Nat.succ n✝) (ComplexShape.next (ComplexShape.down ℕ) (Nat.succ n✝)) ≫
f (ComplexShape.next (ComplexShape.down ℕ) (Nat.succ n✝)) (Nat.succ n✝) =
d C (Nat.succ n✝) (Nat.succ n✝ - 1) ≫ f (Nat.succ n✝ - 1) (Nat.succ n✝)
[PROOFSTEP]
congr
[GOAL]
case succ.h.e_4.h.e_a
ι : Type u_1
V : Type u
inst✝¹ : Category.{v, u} V
inst✝ : Preadditive V
c : ComplexShape ι
C✝ D✝ E : HomologicalComplex V c
f✝ g : C✝ ⟶ D✝
h k : D✝ ⟶ E
i : ι
C D : ChainComplex V ℕ
f : (i j : ℕ) → X C i ⟶ X D j
n✝ : ℕ
⊢ ComplexShape.next (ComplexShape.down ℕ) (Nat.succ n✝) = Nat.succ n✝ - 1
[PROOFSTEP]
simp
[GOAL]
case succ.h.e_6.e_8.h
ι : Type u_1
V : Type u
inst✝¹ : Category.{v, u} V
inst✝ : Preadditive V
c : ComplexShape ι
C✝ D✝ E : HomologicalComplex V c
f✝ g : C✝ ⟶ D✝
h k : D✝ ⟶ E
i : ι
C D : ChainComplex V ℕ
f : (i j : ℕ) → X C i ⟶ X D j
n✝ : ℕ
⊢ ComplexShape.next (ComplexShape.down ℕ) (Nat.succ n✝) = Nat.succ n✝ - 1
[PROOFSTEP]
simp
[GOAL]
case succ.h.e_7.e_1
ι : Type u_1
V : Type u
inst✝¹ : Category.{v, u} V
inst✝ : Preadditive V
c : ComplexShape ι
C✝ D✝ E : HomologicalComplex V c
f✝ g : C✝ ⟶ D✝
h k : D✝ ⟶ E
i : ι
C D : ChainComplex V ℕ
f : (i j : ℕ) → X C i ⟶ X D j
n✝ : ℕ
⊢ ComplexShape.next (ComplexShape.down ℕ) (Nat.succ n✝) = Nat.succ n✝ - 1
[PROOFSTEP]
simp
[GOAL]
ι : Type u_1
V : Type u
inst✝¹ : Category.{v, u} V
inst✝ : Preadditive V
c : ComplexShape ι
C✝ D✝ E : HomologicalComplex V c
f✝ g : C✝ ⟶ D✝
h k : D✝ ⟶ E
i✝ : ι
C D : CochainComplex V ℕ
i : ℕ
f : (i j : ℕ) → X C i ⟶ X D j
⊢ ↑(prevD i) f = f i (i - 1) ≫ d D (i - 1) i
[PROOFSTEP]
dsimp [prevD]
[GOAL]
ι : Type u_1
V : Type u
inst✝¹ : Category.{v, u} V
inst✝ : Preadditive V
c : ComplexShape ι
C✝ D✝ E : HomologicalComplex V c
f✝ g : C✝ ⟶ D✝
h k : D✝ ⟶ E
i✝ : ι
C D : CochainComplex V ℕ
i : ℕ
f : (i j : ℕ) → X C i ⟶ X D j
⊢ f i (ComplexShape.prev (ComplexShape.up ℕ) i) ≫ d D (ComplexShape.prev (ComplexShape.up ℕ) i) i =
f i (i - 1) ≫ d D (i - 1) i
[PROOFSTEP]
cases i
[GOAL]
case zero
ι : Type u_1
V : Type u
inst✝¹ : Category.{v, u} V
inst✝ : Preadditive V
c : ComplexShape ι
C✝ D✝ E : HomologicalComplex V c
f✝ g : C✝ ⟶ D✝
h k : D✝ ⟶ E
i : ι
C D : CochainComplex V ℕ
f : (i j : ℕ) → X C i ⟶ X D j
⊢ f Nat.zero (ComplexShape.prev (ComplexShape.up ℕ) Nat.zero) ≫
d D (ComplexShape.prev (ComplexShape.up ℕ) Nat.zero) Nat.zero =
f Nat.zero (Nat.zero - 1) ≫ d D (Nat.zero - 1) Nat.zero
[PROOFSTEP]
simp only [shape, CochainComplex.prev_nat_zero, ComplexShape.up_Rel, Nat.one_ne_zero, not_false_iff, comp_zero]
[GOAL]
case succ
ι : Type u_1
V : Type u
inst✝¹ : Category.{v, u} V
inst✝ : Preadditive V
c : ComplexShape ι
C✝ D✝ E : HomologicalComplex V c
f✝ g : C✝ ⟶ D✝
h k : D✝ ⟶ E
i : ι
C D : CochainComplex V ℕ
f : (i j : ℕ) → X C i ⟶ X D j
n✝ : ℕ
⊢ f (Nat.succ n✝) (ComplexShape.prev (ComplexShape.up ℕ) (Nat.succ n✝)) ≫
d D (ComplexShape.prev (ComplexShape.up ℕ) (Nat.succ n✝)) (Nat.succ n✝) =
f (Nat.succ n✝) (Nat.succ n✝ - 1) ≫ d D (Nat.succ n✝ - 1) (Nat.succ n✝)
[PROOFSTEP]
congr
[GOAL]
case succ.h.e_4.h.e_a
ι : Type u_1
V : Type u
inst✝¹ : Category.{v, u} V
inst✝ : Preadditive V
c : ComplexShape ι
C✝ D✝ E : HomologicalComplex V c
f✝ g : C✝ ⟶ D✝
h k : D✝ ⟶ E
i : ι
C D : CochainComplex V ℕ
f : (i j : ℕ) → X C i ⟶ X D j
n✝ : ℕ
⊢ ComplexShape.prev (ComplexShape.up ℕ) (Nat.succ n✝) = Nat.succ n✝ - 1
[PROOFSTEP]
simp
[GOAL]
case succ.h.e_6.e_2
ι : Type u_1
V : Type u
inst✝¹ : Category.{v, u} V
inst✝ : Preadditive V
c : ComplexShape ι
C✝ D✝ E : HomologicalComplex V c
f✝ g : C✝ ⟶ D✝
h k : D✝ ⟶ E
i : ι
C D : CochainComplex V ℕ
f : (i j : ℕ) → X C i ⟶ X D j
n✝ : ℕ
⊢ ComplexShape.prev (ComplexShape.up ℕ) (Nat.succ n✝) = Nat.succ n✝ - 1
[PROOFSTEP]
simp
[GOAL]
case succ.h.e_7.e_7.h
ι : Type u_1
V : Type u
inst✝¹ : Category.{v, u} V
inst✝ : Preadditive V
c : ComplexShape ι
C✝ D✝ E : HomologicalComplex V c
f✝ g : C✝ ⟶ D✝
h k : D✝ ⟶ E
i : ι
C D : CochainComplex V ℕ
f : (i j : ℕ) → X C i ⟶ X D j
n✝ : ℕ
⊢ ComplexShape.prev (ComplexShape.up ℕ) (Nat.succ n✝) = Nat.succ n✝ - 1
[PROOFSTEP]
simp
[GOAL]
ι : Type u_1
V : Type u
inst✝¹ : Category.{v, u} V
inst✝ : Preadditive V
c : ComplexShape ι
C D E : HomologicalComplex V c
f g : C ⟶ D
h✝ k : D ⟶ E
i✝ : ι
h : Homotopy f g
i : ι
⊢ Hom.f (f - g) i = ((↑(dNext i) fun i j => hom h i j) + ↑(prevD i) fun i j => hom h i j) + Hom.f 0 i
[PROOFSTEP]
simp [h.comm]
[GOAL]
ι : Type u_1
V : Type u
inst✝¹ : Category.{v, u} V
inst✝ : Preadditive V
c : ComplexShape ι
C D E : HomologicalComplex V c
f g : C ⟶ D
h✝ k : D ⟶ E
i✝ : ι
h : Homotopy (f - g) 0
i : ι
⊢ Hom.f f i = ((↑(dNext i) fun i j => hom h i j) + ↑(prevD i) fun i j => hom h i j) + Hom.f g i
[PROOFSTEP]
simpa [sub_eq_iff_eq_add] using h.comm i
[GOAL]
ι : Type u_1
V : Type u
inst✝¹ : Category.{v, u} V
inst✝ : Preadditive V
c : ComplexShape ι
C D E : HomologicalComplex V c
f g : C ⟶ D
h k : D ⟶ E
i : ι
⊢ Function.LeftInverse (fun h => mk fun i j => hom h i j) fun h => mk fun i j => hom h i j
[PROOFSTEP]
aesop_cat
[GOAL]
ι : Type u_1
V : Type u
inst✝¹ : Category.{v, u} V
inst✝ : Preadditive V
c : ComplexShape ι
C D E : HomologicalComplex V c
f g : C ⟶ D
h k : D ⟶ E
i : ι
⊢ Function.RightInverse (fun h => mk fun i j => hom h i j) fun h => mk fun i j => hom h i j
[PROOFSTEP]
aesop_cat
[GOAL]
ι : Type u_1
V : Type u
inst✝¹ : Category.{v, u} V
inst✝ : Preadditive V
c : ComplexShape ι
C D E : HomologicalComplex V c
f✝ g✝ : C ⟶ D
h✝ k : D ⟶ E
i✝ : ι
f g : C ⟶ D
h : Homotopy f g
i j : ι
w : ¬ComplexShape.Rel c j i
⊢ (-h.hom) i j = 0
[PROOFSTEP]
rw [Pi.neg_apply, Pi.neg_apply, h.zero i j w, neg_zero]
[GOAL]
ι : Type u_1
V : Type u
inst✝¹ : Category.{v, u} V
inst✝ : Preadditive V
c : ComplexShape ι
C D E : HomologicalComplex V c
f✝ g✝ : C ⟶ D
h✝ k : D ⟶ E
i✝ : ι
f g : C ⟶ D
h : Homotopy f g
i : ι
⊢ Hom.f g i = ↑(dNext i) (-h.hom) + ↑(prevD i) (-h.hom) + Hom.f f i
[PROOFSTEP]
rw [AddMonoidHom.map_neg, AddMonoidHom.map_neg, h.comm, ← neg_add, ← add_assoc, neg_add_self, zero_add]
[GOAL]
ι : Type u_1
V : Type u
inst✝¹ : Category.{v, u} V
inst✝ : Preadditive V
c : ComplexShape ι
C D E : HomologicalComplex V c
f✝ g✝ : C ⟶ D
h✝ k✝ : D ⟶ E
i✝ : ι
e f g : C ⟶ D
h : Homotopy e f
k : Homotopy f g
i j : ι
w : ¬ComplexShape.Rel c j i
⊢ (h.hom + k.hom) i j = 0
[PROOFSTEP]
rw [Pi.add_apply, Pi.add_apply, h.zero i j w, k.zero i j w, zero_add]
[GOAL]
ι : Type u_1
V : Type u
inst✝¹ : Category.{v, u} V
inst✝ : Preadditive V
c : ComplexShape ι
C D E : HomologicalComplex V c
f✝ g✝ : C ⟶ D
h✝ k✝ : D ⟶ E
i✝ : ι
e f g : C ⟶ D
h : Homotopy e f
k : Homotopy f g
i : ι
⊢ Hom.f e i = ↑(dNext i) (h.hom + k.hom) + ↑(prevD i) (h.hom + k.hom) + Hom.f g i
[PROOFSTEP]
rw [AddMonoidHom.map_add, AddMonoidHom.map_add, h.comm, k.comm]
[GOAL]
ι : Type u_1
V : Type u
inst✝¹ : Category.{v, u} V
inst✝ : Preadditive V
c : ComplexShape ι
C D E : HomologicalComplex V c
f✝ g✝ : C ⟶ D
h✝ k✝ : D ⟶ E
i✝ : ι
e f g : C ⟶ D
h : Homotopy e f
k : Homotopy f g
i : ι
⊢ ↑(dNext i) h.hom + ↑(prevD i) h.hom + (↑(dNext i) k.hom + ↑(prevD i) k.hom + Hom.f g i) =
↑(dNext i) h.hom + ↑(dNext i) k.hom + (↑(prevD i) h.hom + ↑(prevD i) k.hom) + Hom.f g i
[PROOFSTEP]
abel
[GOAL]
ι : Type u_1
V : Type u
inst✝¹ : Category.{v, u} V
inst✝ : Preadditive V
c : ComplexShape ι
C D E : HomologicalComplex V c
f✝ g✝ : C ⟶ D
h✝ k✝ : D ⟶ E
i✝ : ι
e f g : C ⟶ D
h : Homotopy e f
k : Homotopy f g
i : ι
⊢ ↑(dNext i) h.hom + ↑(prevD i) h.hom + (↑(dNext i) k.hom + ↑(prevD i) k.hom + Hom.f g i) =
↑(dNext i) h.hom + ↑(dNext i) k.hom + (↑(prevD i) h.hom + ↑(prevD i) k.hom) + Hom.f g i
[PROOFSTEP]
abel
[GOAL]
ι : Type u_1
V : Type u
inst✝¹ : Category.{v, u} V
inst✝ : Preadditive V
c : ComplexShape ι
C D E : HomologicalComplex V c
f g : C ⟶ D
h k : D ⟶ E
i✝ : ι
f₁ g₁ f₂ g₂ : C ⟶ D
h₁ : Homotopy f₁ g₁
h₂ : Homotopy f₂ g₂
i j : ι
hij : ¬ComplexShape.Rel c j i
⊢ (h₁.hom + h₂.hom) i j = 0
[PROOFSTEP]
rw [Pi.add_apply, Pi.add_apply, h₁.zero i j hij, h₂.zero i j hij, add_zero]
[GOAL]
ι : Type u_1
V : Type u
inst✝¹ : Category.{v, u} V
inst✝ : Preadditive V
c : ComplexShape ι
C D E : HomologicalComplex V c
f g : C ⟶ D
h k : D ⟶ E
i✝ : ι
f₁ g₁ f₂ g₂ : C ⟶ D
h₁ : Homotopy f₁ g₁
h₂ : Homotopy f₂ g₂
i : ι
⊢ Hom.f (f₁ + f₂) i = ↑(dNext i) (h₁.hom + h₂.hom) + ↑(prevD i) (h₁.hom + h₂.hom) + Hom.f (g₁ + g₂) i
[PROOFSTEP]
simp only [HomologicalComplex.add_f_apply, h₁.comm, h₂.comm, AddMonoidHom.map_add]
[GOAL]
ι : Type u_1
V : Type u
inst✝¹ : Category.{v, u} V
inst✝ : Preadditive V
c : ComplexShape ι
C D E : HomologicalComplex V c
f g : C ⟶ D
h k : D ⟶ E
i✝ : ι
f₁ g₁ f₂ g₂ : C ⟶ D
h₁ : Homotopy f₁ g₁
h₂ : Homotopy f₂ g₂
i : ι
⊢ ↑(dNext i) h₁.hom + ↑(prevD i) h₁.hom + Hom.f g₁ i + (↑(dNext i) h₂.hom + ↑(prevD i) h₂.hom + Hom.f g₂ i) =
↑(dNext i) h₁.hom + ↑(dNext i) h₂.hom + (↑(prevD i) h₁.hom + ↑(prevD i) h₂.hom) + (Hom.f g₁ i + Hom.f g₂ i)
[PROOFSTEP]
abel
[GOAL]
ι : Type u_1
V : Type u
inst✝¹ : Category.{v, u} V
inst✝ : Preadditive V
c : ComplexShape ι
C D E : HomologicalComplex V c
f g : C ⟶ D
h k : D ⟶ E
i✝ : ι
f₁ g₁ f₂ g₂ : C ⟶ D
h₁ : Homotopy f₁ g₁
h₂ : Homotopy f₂ g₂
i : ι
⊢ ↑(dNext i) h₁.hom + ↑(prevD i) h₁.hom + Hom.f g₁ i + (↑(dNext i) h₂.hom + ↑(prevD i) h₂.hom + Hom.f g₂ i) =
↑(dNext i) h₁.hom + ↑(dNext i) h₂.hom + (↑(prevD i) h₁.hom + ↑(prevD i) h₂.hom) + (Hom.f g₁ i + Hom.f g₂ i)
[PROOFSTEP]
abel
[GOAL]
ι : Type u_1
V : Type u
inst✝¹ : Category.{v, u} V
inst✝ : Preadditive V
c : ComplexShape ι
C D E : HomologicalComplex V c
f✝ g✝ : C ⟶ D
h✝ k : D ⟶ E
i✝ : ι
e f : C ⟶ D
h : Homotopy e f
g : D ⟶ E
i j : ι
w : ¬ComplexShape.Rel c j i
⊢ (fun i j => hom h i j ≫ Hom.f g j) i j = 0
[PROOFSTEP]
dsimp
[GOAL]
ι : Type u_1
V : Type u
inst✝¹ : Category.{v, u} V
inst✝ : Preadditive V
c : ComplexShape ι
C D E : HomologicalComplex V c
f✝ g✝ : C ⟶ D
h✝ k : D ⟶ E
i✝ : ι
e f : C ⟶ D
h : Homotopy e f
g : D ⟶ E
i j : ι
w : ¬ComplexShape.Rel c j i
⊢ hom h i j ≫ Hom.f g j = 0
[PROOFSTEP]
rw [h.zero i j w, zero_comp]
[GOAL]
ι : Type u_1
V : Type u
inst✝¹ : Category.{v, u} V
inst✝ : Preadditive V
c : ComplexShape ι
C D E : HomologicalComplex V c
f✝ g✝ : C ⟶ D
h✝ k : D ⟶ E
i✝ : ι
e f : C ⟶ D
h : Homotopy e f
g : D ⟶ E
i : ι
⊢ Hom.f (e ≫ g) i =
((↑(dNext i) fun i j => hom h i j ≫ Hom.f g j) + ↑(prevD i) fun i j => hom h i j ≫ Hom.f g j) + Hom.f (f ≫ g) i
[PROOFSTEP]
rw [comp_f, h.comm i, dNext_comp_right, prevD_comp_right, Preadditive.add_comp, comp_f, Preadditive.add_comp]
[GOAL]
ι : Type u_1
V : Type u
inst✝¹ : Category.{v, u} V
inst✝ : Preadditive V
c : ComplexShape ι
C D E : HomologicalComplex V c
f✝ g✝ : C ⟶ D
h✝ k : D ⟶ E
i✝ : ι
f g : D ⟶ E
h : Homotopy f g
e : C ⟶ D
i j : ι
w : ¬ComplexShape.Rel c j i
⊢ (fun i j => Hom.f e i ≫ hom h i j) i j = 0
[PROOFSTEP]
dsimp
[GOAL]
ι : Type u_1
V : Type u
inst✝¹ : Category.{v, u} V
inst✝ : Preadditive V
c : ComplexShape ι
C D E : HomologicalComplex V c
f✝ g✝ : C ⟶ D
h✝ k : D ⟶ E
i✝ : ι
f g : D ⟶ E
h : Homotopy f g
e : C ⟶ D
i j : ι
w : ¬ComplexShape.Rel c j i
⊢ Hom.f e i ≫ hom h i j = 0
[PROOFSTEP]
rw [h.zero i j w, comp_zero]
[GOAL]
ι : Type u_1
V : Type u
inst✝¹ : Category.{v, u} V
inst✝ : Preadditive V
c : ComplexShape ι
C D E : HomologicalComplex V c
f✝ g✝ : C ⟶ D
h✝ k : D ⟶ E
i✝ : ι
f g : D ⟶ E
h : Homotopy f g
e : C ⟶ D
i : ι
⊢ Hom.f (e ≫ f) i =
((↑(dNext i) fun i j => Hom.f e i ≫ hom h i j) + ↑(prevD i) fun i j => Hom.f e i ≫ hom h i j) + Hom.f (e ≫ g) i
[PROOFSTEP]
rw [comp_f, h.comm i, dNext_comp_left, prevD_comp_left, comp_f, Preadditive.comp_add, Preadditive.comp_add]
[GOAL]
ι : Type u_1
V : Type u
inst✝¹ : Category.{v, u} V
inst✝ : Preadditive V
c : ComplexShape ι
C D E : HomologicalComplex V c
f g : C ⟶ D
h k : D ⟶ E
i✝ : ι
hom : (i j : ι) → X C i ⟶ X D j
i j : ι
hij : ComplexShape.Rel c i j
⊢ (fun i => ↑(dNext i) hom + ↑(prevD i) hom) i ≫ d D i j = d C i j ≫ (fun i => ↑(dNext i) hom + ↑(prevD i) hom) j
[PROOFSTEP]
have eq1 : prevD i hom ≫ D.d i j = 0 := by
simp only [prevD, AddMonoidHom.mk'_apply, Category.assoc, d_comp_d, comp_zero]
[GOAL]
ι : Type u_1
V : Type u
inst✝¹ : Category.{v, u} V
inst✝ : Preadditive V
c : ComplexShape ι
C D E : HomologicalComplex V c
f g : C ⟶ D
h k : D ⟶ E
i✝ : ι
hom : (i j : ι) → X C i ⟶ X D j
i j : ι
hij : ComplexShape.Rel c i j
⊢ ↑(prevD i) hom ≫ d D i j = 0
[PROOFSTEP]
simp only [prevD, AddMonoidHom.mk'_apply, Category.assoc, d_comp_d, comp_zero]
[GOAL]
ι : Type u_1
V : Type u
inst✝¹ : Category.{v, u} V
inst✝ : Preadditive V
c : ComplexShape ι
C D E : HomologicalComplex V c
f g : C ⟶ D
h k : D ⟶ E
i✝ : ι
hom : (i j : ι) → X C i ⟶ X D j
i j : ι
hij : ComplexShape.Rel c i j
eq1 : ↑(prevD i) hom ≫ d D i j = 0
⊢ (fun i => ↑(dNext i) hom + ↑(prevD i) hom) i ≫ d D i j = d C i j ≫ (fun i => ↑(dNext i) hom + ↑(prevD i) hom) j
[PROOFSTEP]
have eq2 : C.d i j ≫ dNext j hom = 0 := by simp only [dNext, AddMonoidHom.mk'_apply, d_comp_d_assoc, zero_comp]
[GOAL]
ι : Type u_1
V : Type u
inst✝¹ : Category.{v, u} V
inst✝ : Preadditive V
c : ComplexShape ι
C D E : HomologicalComplex V c
f g : C ⟶ D
h k : D ⟶ E
i✝ : ι
hom : (i j : ι) → X C i ⟶ X D j
i j : ι
hij : ComplexShape.Rel c i j
eq1 : ↑(prevD i) hom ≫ d D i j = 0
⊢ d C i j ≫ ↑(dNext j) hom = 0
[PROOFSTEP]
simp only [dNext, AddMonoidHom.mk'_apply, d_comp_d_assoc, zero_comp]
[GOAL]
ι : Type u_1
V : Type u
inst✝¹ : Category.{v, u} V
inst✝ : Preadditive V
c : ComplexShape ι
C D E : HomologicalComplex V c
f g : C ⟶ D
h k : D ⟶ E
i✝ : ι
hom : (i j : ι) → X C i ⟶ X D j
i j : ι
hij : ComplexShape.Rel c i j
eq1 : ↑(prevD i) hom ≫ d D i j = 0
eq2 : d C i j ≫ ↑(dNext j) hom = 0
⊢ (fun i => ↑(dNext i) hom + ↑(prevD i) hom) i ≫ d D i j = d C i j ≫ (fun i => ↑(dNext i) hom + ↑(prevD i) hom) j
[PROOFSTEP]
dsimp only
[GOAL]
ι : Type u_1
V : Type u
inst✝¹ : Category.{v, u} V
inst✝ : Preadditive V
c : ComplexShape ι
C D E : HomologicalComplex V c
f g : C ⟶ D
h k : D ⟶ E
i✝ : ι
hom : (i j : ι) → X C i ⟶ X D j
i j : ι
hij : ComplexShape.Rel c i j
eq1 : ↑(prevD i) hom ≫ d D i j = 0
eq2 : d C i j ≫ ↑(dNext j) hom = 0
⊢ (↑(dNext i) hom + ↑(prevD i) hom) ≫ d D i j = d C i j ≫ (↑(dNext j) hom + ↑(prevD j) hom)
[PROOFSTEP]
rw [dNext_eq hom hij, prevD_eq hom hij, Preadditive.comp_add, Preadditive.add_comp, eq1, eq2, add_zero, zero_add,
Category.assoc]
[GOAL]
ι : Type u_1
V : Type u
inst✝¹ : Category.{v, u} V
inst✝ : Preadditive V
c : ComplexShape ι
C D E : HomologicalComplex V c
f g✝ : C ⟶ D
h k : D ⟶ E
i : ι
hom : (i j : ι) → X C i ⟶ X D j
g : D ⟶ E
⊢ nullHomotopicMap hom ≫ g = nullHomotopicMap fun i j => hom i j ≫ Hom.f g j
[PROOFSTEP]
ext n
[GOAL]
case h
ι : Type u_1
V : Type u
inst✝¹ : Category.{v, u} V
inst✝ : Preadditive V
c : ComplexShape ι
C D E : HomologicalComplex V c
f g✝ : C ⟶ D
h k : D ⟶ E
i : ι
hom : (i j : ι) → X C i ⟶ X D j
g : D ⟶ E
n : ι
⊢ Hom.f (nullHomotopicMap hom ≫ g) n = Hom.f (nullHomotopicMap fun i j => hom i j ≫ Hom.f g j) n
[PROOFSTEP]
dsimp [nullHomotopicMap, fromNext, toPrev, AddMonoidHom.mk'_apply]
[GOAL]
case h
ι : Type u_1
V : Type u
inst✝¹ : Category.{v, u} V
inst✝ : Preadditive V
c : ComplexShape ι
C D E : HomologicalComplex V c
f g✝ : C ⟶ D
h k : D ⟶ E
i : ι
hom : (i j : ι) → X C i ⟶ X D j
g : D ⟶ E
n : ι
⊢ (dFrom C n ≫ hom (ComplexShape.next c n) n + hom n (ComplexShape.prev c n) ≫ dTo D n) ≫ Hom.f g n =
dFrom C n ≫ hom (ComplexShape.next c n) n ≫ Hom.f g n +
(hom n (ComplexShape.prev c n) ≫ Hom.f g (ComplexShape.prev c n)) ≫ dTo E n
[PROOFSTEP]
simp only [Preadditive.add_comp, Category.assoc, g.comm]
[GOAL]
ι : Type u_1
V : Type u
inst✝¹ : Category.{v, u} V
inst✝ : Preadditive V
c : ComplexShape ι
C D E : HomologicalComplex V c
f g✝ : C ⟶ D
h k : D ⟶ E
i : ι
hom : (i j : ι) → ComplexShape.Rel c j i → (X C i ⟶ X D j)
g : D ⟶ E
⊢ nullHomotopicMap' hom ≫ g = nullHomotopicMap' fun i j hij => hom i j hij ≫ Hom.f g j
[PROOFSTEP]
ext n
[GOAL]
case h
ι : Type u_1
V : Type u
inst✝¹ : Category.{v, u} V
inst✝ : Preadditive V
c : ComplexShape ι
C D E : HomologicalComplex V c
f g✝ : C ⟶ D
h k : D ⟶ E
i : ι
hom : (i j : ι) → ComplexShape.Rel c j i → (X C i ⟶ X D j)
g : D ⟶ E
n : ι
⊢ Hom.f (nullHomotopicMap' hom ≫ g) n = Hom.f (nullHomotopicMap' fun i j hij => hom i j hij ≫ Hom.f g j) n
[PROOFSTEP]
erw [nullHomotopicMap_comp]
[GOAL]
case h
ι : Type u_1
V : Type u
inst✝¹ : Category.{v, u} V
inst✝ : Preadditive V
c : ComplexShape ι
C D E : HomologicalComplex V c
f g✝ : C ⟶ D
h k : D ⟶ E
i : ι
hom : (i j : ι) → ComplexShape.Rel c j i → (X C i ⟶ X D j)
g : D ⟶ E
n : ι
⊢ Hom.f (nullHomotopicMap fun i j => (dite (ComplexShape.Rel c j i) (hom i j) fun x => 0) ≫ Hom.f g j) n =
Hom.f (nullHomotopicMap' fun i j hij => hom i j hij ≫ Hom.f g j) n
[PROOFSTEP]
congr
[GOAL]
case h.e_self.e_hom
ι : Type u_1
V : Type u
inst✝¹ : Category.{v, u} V
inst✝ : Preadditive V
c : ComplexShape ι
C D E : HomologicalComplex V c
f g✝ : C ⟶ D
h k : D ⟶ E
i : ι
hom : (i j : ι) → ComplexShape.Rel c j i → (X C i ⟶ X D j)
g : D ⟶ E
n : ι
⊢ (fun i j => (dite (ComplexShape.Rel c j i) (hom i j) fun x => 0) ≫ Hom.f g j) = fun i j =>
dite (ComplexShape.Rel c j i) ((fun i j hij => hom i j hij ≫ Hom.f g j) i j) fun x => 0
[PROOFSTEP]
ext i j
[GOAL]
case h.e_self.e_hom.h.h
ι : Type u_1
V : Type u
inst✝¹ : Category.{v, u} V
inst✝ : Preadditive V
c : ComplexShape ι
C D E : HomologicalComplex V c
f g✝ : C ⟶ D
h k : D ⟶ E
i✝ : ι
hom : (i j : ι) → ComplexShape.Rel c j i → (X C i ⟶ X D j)
g : D ⟶ E
n i j : ι
⊢ (dite (ComplexShape.Rel c j i) (hom i j) fun x => 0) ≫ Hom.f g j =
dite (ComplexShape.Rel c j i) ((fun i j hij => hom i j hij ≫ Hom.f g j) i j) fun x => 0
[PROOFSTEP]
split_ifs
[GOAL]
case pos
ι : Type u_1
V : Type u
inst✝¹ : Category.{v, u} V
inst✝ : Preadditive V
c : ComplexShape ι
C D E : HomologicalComplex V c
f g✝ : C ⟶ D
h k : D ⟶ E
i✝ : ι
hom : (i j : ι) → ComplexShape.Rel c j i → (X C i ⟶ X D j)
g : D ⟶ E
n i j : ι
h✝ : ComplexShape.Rel c j i
⊢ hom i j h✝ ≫ Hom.f g j = (fun i j hij => hom i j hij ≫ Hom.f g j) i j h✝
[PROOFSTEP]
rfl
[GOAL]
case neg
ι : Type u_1
V : Type u
inst✝¹ : Category.{v, u} V
inst✝ : Preadditive V
c : ComplexShape ι
C D E : HomologicalComplex V c
f g✝ : C ⟶ D
h k : D ⟶ E
i✝ : ι
hom : (i j : ι) → ComplexShape.Rel c j i → (X C i ⟶ X D j)
g : D ⟶ E
n i j : ι
h✝ : ¬ComplexShape.Rel c j i
⊢ 0 ≫ Hom.f g j = 0
[PROOFSTEP]
rw [zero_comp]
[GOAL]
ι : Type u_1
V : Type u
inst✝¹ : Category.{v, u} V
inst✝ : Preadditive V
c : ComplexShape ι
C D E : HomologicalComplex V c
f✝ g : C ⟶ D
h k : D ⟶ E
i : ι
f : C ⟶ D
hom : (i j : ι) → X D i ⟶ X E j
⊢ f ≫ nullHomotopicMap hom = nullHomotopicMap fun i j => Hom.f f i ≫ hom i j
[PROOFSTEP]
ext n
[GOAL]
case h
ι : Type u_1
V : Type u
inst✝¹ : Category.{v, u} V
inst✝ : Preadditive V
c : ComplexShape ι
C D E : HomologicalComplex V c
f✝ g : C ⟶ D
h k : D ⟶ E
i : ι
f : C ⟶ D
hom : (i j : ι) → X D i ⟶ X E j
n : ι
⊢ Hom.f (f ≫ nullHomotopicMap hom) n = Hom.f (nullHomotopicMap fun i j => Hom.f f i ≫ hom i j) n
[PROOFSTEP]
dsimp [nullHomotopicMap, fromNext, toPrev, AddMonoidHom.mk'_apply]
[GOAL]
case h
ι : Type u_1
V : Type u
inst✝¹ : Category.{v, u} V
inst✝ : Preadditive V
c : ComplexShape ι
C D E : HomologicalComplex V c
f✝ g : C ⟶ D
h k : D ⟶ E
i : ι
f : C ⟶ D
hom : (i j : ι) → X D i ⟶ X E j
n : ι
⊢ Hom.f f n ≫ (dFrom D n ≫ hom (ComplexShape.next c n) n + hom n (ComplexShape.prev c n) ≫ dTo E n) =
dFrom C n ≫ Hom.f f (ComplexShape.next c n) ≫ hom (ComplexShape.next c n) n +
(Hom.f f n ≫ hom n (ComplexShape.prev c n)) ≫ dTo E n
[PROOFSTEP]
simp only [Preadditive.comp_add, Category.assoc, f.comm_assoc]
[GOAL]
ι : Type u_1
V : Type u
inst✝¹ : Category.{v, u} V
inst✝ : Preadditive V
c : ComplexShape ι
C D E : HomologicalComplex V c
f✝ g : C ⟶ D
h k : D ⟶ E
i : ι
f : C ⟶ D
hom : (i j : ι) → ComplexShape.Rel c j i → (X D i ⟶ X E j)
⊢ f ≫ nullHomotopicMap' hom = nullHomotopicMap' fun i j hij => Hom.f f i ≫ hom i j hij
[PROOFSTEP]
ext n
[GOAL]
case h
ι : Type u_1
V : Type u
inst✝¹ : Category.{v, u} V
inst✝ : Preadditive V
c : ComplexShape ι
C D E : HomologicalComplex V c
f✝ g : C ⟶ D
h k : D ⟶ E
i : ι
f : C ⟶ D
hom : (i j : ι) → ComplexShape.Rel c j i → (X D i ⟶ X E j)
n : ι
⊢ Hom.f (f ≫ nullHomotopicMap' hom) n = Hom.f (nullHomotopicMap' fun i j hij => Hom.f f i ≫ hom i j hij) n
[PROOFSTEP]
erw [comp_nullHomotopicMap]
[GOAL]
case h
ι : Type u_1
V : Type u
inst✝¹ : Category.{v, u} V
inst✝ : Preadditive V
c : ComplexShape ι
C D E : HomologicalComplex V c
f✝ g : C ⟶ D
h k : D ⟶ E
i : ι
f : C ⟶ D
hom : (i j : ι) → ComplexShape.Rel c j i → (X D i ⟶ X E j)
n : ι
⊢ Hom.f (nullHomotopicMap fun i j => Hom.f f i ≫ dite (ComplexShape.Rel c j i) (hom i j) fun x => 0) n =
Hom.f (nullHomotopicMap' fun i j hij => Hom.f f i ≫ hom i j hij) n
[PROOFSTEP]
congr
[GOAL]
case h.e_self.e_hom
ι : Type u_1
V : Type u
inst✝¹ : Category.{v, u} V
inst✝ : Preadditive V
c : ComplexShape ι
C D E : HomologicalComplex V c
f✝ g : C ⟶ D
h k : D ⟶ E
i : ι
f : C ⟶ D
hom : (i j : ι) → ComplexShape.Rel c j i → (X D i ⟶ X E j)
n : ι
⊢ (fun i j => Hom.f f i ≫ dite (ComplexShape.Rel c j i) (hom i j) fun x => 0) = fun i j =>
dite (ComplexShape.Rel c j i) ((fun i j hij => Hom.f f i ≫ hom i j hij) i j) fun x => 0
[PROOFSTEP]
ext i j
[GOAL]
case h.e_self.e_hom.h.h
ι : Type u_1
V : Type u
inst✝¹ : Category.{v, u} V
inst✝ : Preadditive V
c : ComplexShape ι
C D E : HomologicalComplex V c
f✝ g : C ⟶ D
h k : D ⟶ E
i✝ : ι
f : C ⟶ D
hom : (i j : ι) → ComplexShape.Rel c j i → (X D i ⟶ X E j)
n i j : ι
⊢ (Hom.f f i ≫ dite (ComplexShape.Rel c j i) (hom i j) fun x => 0) =
dite (ComplexShape.Rel c j i) ((fun i j hij => Hom.f f i ≫ hom i j hij) i j) fun x => 0
[PROOFSTEP]
split_ifs
[GOAL]
case pos
ι : Type u_1
V : Type u
inst✝¹ : Category.{v, u} V
inst✝ : Preadditive V
c : ComplexShape ι
C D E : HomologicalComplex V c
f✝ g : C ⟶ D
h k : D ⟶ E
i✝ : ι
f : C ⟶ D
hom : (i j : ι) → ComplexShape.Rel c j i → (X D i ⟶ X E j)
n i j : ι
h✝ : ComplexShape.Rel c j i
⊢ Hom.f f i ≫ hom i j h✝ = (fun i j hij => Hom.f f i ≫ hom i j hij) i j h✝
[PROOFSTEP]
rfl
[GOAL]
case neg
ι : Type u_1
V : Type u
inst✝¹ : Category.{v, u} V
inst✝ : Preadditive V
c : ComplexShape ι
C D E : HomologicalComplex V c
f✝ g : C ⟶ D
h k : D ⟶ E
i✝ : ι
f : C ⟶ D
hom : (i j : ι) → ComplexShape.Rel c j i → (X D i ⟶ X E j)
n i j : ι
h✝ : ¬ComplexShape.Rel c j i
⊢ Hom.f f i ≫ 0 = 0
[PROOFSTEP]
rw [comp_zero]
[GOAL]
ι : Type u_1
V : Type u
inst✝⁴ : Category.{v, u} V
inst✝³ : Preadditive V
c : ComplexShape ι
C D E : HomologicalComplex V c
f g : C ⟶ D
h k : D ⟶ E
i✝ : ι
W : Type u_2
inst✝² : Category.{?u.264735, u_2} W
inst✝¹ : Preadditive W
G : V ⥤ W
inst✝ : Functor.Additive G
hom : (i j : ι) → X C i ⟶ X D j
i j : ι
⊢ X ((Functor.mapHomologicalComplex G c).obj C) i ⟶ X ((Functor.mapHomologicalComplex G c).obj D) j
[PROOFSTEP]
exact G.map (hom i j)
[GOAL]
ι : Type u_1
V : Type u
inst✝⁴ : Category.{v, u} V
inst✝³ : Preadditive V
c : ComplexShape ι
C D E : HomologicalComplex V c
f g : C ⟶ D
h k : D ⟶ E
i : ι
W : Type u_2
inst✝² : Category.{u_3, u_2} W
inst✝¹ : Preadditive W
G : V ⥤ W
inst✝ : Functor.Additive G
hom : (i j : ι) → X C i ⟶ X D j
⊢ (Functor.mapHomologicalComplex G c).map (nullHomotopicMap hom) = nullHomotopicMap fun i j => G.map (hom i j)
[PROOFSTEP]
ext i
[GOAL]
case h
ι : Type u_1
V : Type u
inst✝⁴ : Category.{v, u} V
inst✝³ : Preadditive V
c : ComplexShape ι
C D E : HomologicalComplex V c
f g : C ⟶ D
h k : D ⟶ E
i✝ : ι
W : Type u_2
inst✝² : Category.{u_3, u_2} W
inst✝¹ : Preadditive W
G : V ⥤ W
inst✝ : Functor.Additive G
hom : (i j : ι) → X C i ⟶ X D j
i : ι
⊢ Hom.f ((Functor.mapHomologicalComplex G c).map (nullHomotopicMap hom)) i =
Hom.f (nullHomotopicMap fun i j => G.map (hom i j)) i
[PROOFSTEP]
dsimp [nullHomotopicMap, dNext, prevD]
[GOAL]
case h
ι : Type u_1
V : Type u
inst✝⁴ : Category.{v, u} V
inst✝³ : Preadditive V
c : ComplexShape ι
C D E : HomologicalComplex V c
f g : C ⟶ D
h k : D ⟶ E
i✝ : ι
W : Type u_2
inst✝² : Category.{u_3, u_2} W
inst✝¹ : Preadditive W
G : V ⥤ W
inst✝ : Functor.Additive G
hom : (i j : ι) → X C i ⟶ X D j
i : ι
⊢ G.map
(d C i (ComplexShape.next c i) ≫ hom (ComplexShape.next c i) i +
hom i (ComplexShape.prev c i) ≫ d D (ComplexShape.prev c i) i) =
G.map (d C i (ComplexShape.next c i)) ≫ G.map (hom (ComplexShape.next c i) i) +
G.map (hom i (ComplexShape.prev c i)) ≫ G.map (d D (ComplexShape.prev c i) i)
[PROOFSTEP]
simp only [G.map_comp, Functor.map_add]
[GOAL]
ι : Type u_1
V : Type u
inst✝⁴ : Category.{v, u} V
inst✝³ : Preadditive V
c : ComplexShape ι
C D E : HomologicalComplex V c
f g : C ⟶ D
h k : D ⟶ E
i✝ : ι
W : Type u_2
inst✝² : Category.{?u.270178, u_2} W
inst✝¹ : Preadditive W
G : V ⥤ W
inst✝ : Functor.Additive G
hom : (i j : ι) → ComplexShape.Rel c j i → (X C i ⟶ X D j)
i j : ι
hij : ComplexShape.Rel c j i
⊢ X ((Functor.mapHomologicalComplex G c).obj C) i ⟶ X ((Functor.mapHomologicalComplex G c).obj D) j
[PROOFSTEP]
exact G.map (hom i j hij)
[GOAL]
ι : Type u_1
V : Type u
inst✝⁴ : Category.{v, u} V
inst✝³ : Preadditive V
c : ComplexShape ι
C D E : HomologicalComplex V c
f g : C ⟶ D
h k : D ⟶ E
i : ι
W : Type u_2
inst✝² : Category.{u_3, u_2} W
inst✝¹ : Preadditive W
G : V ⥤ W
inst✝ : Functor.Additive G
hom : (i j : ι) → ComplexShape.Rel c j i → (X C i ⟶ X D j)
⊢ (Functor.mapHomologicalComplex G c).map (nullHomotopicMap' hom) = nullHomotopicMap' fun i j hij => G.map (hom i j hij)
[PROOFSTEP]
ext n
[GOAL]
case h
ι : Type u_1
V : Type u
inst✝⁴ : Category.{v, u} V
inst✝³ : Preadditive V
c : ComplexShape ι
C D E : HomologicalComplex V c
f g : C ⟶ D
h k : D ⟶ E
i : ι
W : Type u_2
inst✝² : Category.{u_3, u_2} W
inst✝¹ : Preadditive W
G : V ⥤ W
inst✝ : Functor.Additive G
hom : (i j : ι) → ComplexShape.Rel c j i → (X C i ⟶ X D j)
n : ι
⊢ Hom.f ((Functor.mapHomologicalComplex G c).map (nullHomotopicMap' hom)) n =
Hom.f (nullHomotopicMap' fun i j hij => G.map (hom i j hij)) n
[PROOFSTEP]
erw [map_nullHomotopicMap]
[GOAL]
case h
ι : Type u_1
V : Type u
inst✝⁴ : Category.{v, u} V
inst✝³ : Preadditive V
c : ComplexShape ι
C D E : HomologicalComplex V c
f g : C ⟶ D
h k : D ⟶ E
i : ι
W : Type u_2
inst✝² : Category.{u_3, u_2} W
inst✝¹ : Preadditive W
G : V ⥤ W
inst✝ : Functor.Additive G
hom : (i j : ι) → ComplexShape.Rel c j i → (X C i ⟶ X D j)
n : ι
⊢ Hom.f (nullHomotopicMap fun i j => G.map (dite (ComplexShape.Rel c j i) (hom i j) fun x => 0)) n =
Hom.f (nullHomotopicMap' fun i j hij => G.map (hom i j hij)) n
[PROOFSTEP]
congr
[GOAL]
case h.e_self.e_hom
ι : Type u_1
V : Type u
inst✝⁴ : Category.{v, u} V
inst✝³ : Preadditive V
c : ComplexShape ι
C D E : HomologicalComplex V c
f g : C ⟶ D
h k : D ⟶ E
i : ι
W : Type u_2
inst✝² : Category.{u_3, u_2} W
inst✝¹ : Preadditive W
G : V ⥤ W
inst✝ : Functor.Additive G
hom : (i j : ι) → ComplexShape.Rel c j i → (X C i ⟶ X D j)
n : ι
⊢ (fun i j => G.map (dite (ComplexShape.Rel c j i) (hom i j) fun x => 0)) = fun i j =>
dite (ComplexShape.Rel c j i) ((fun i j hij => G.map (hom i j hij)) i j) fun x => 0
[PROOFSTEP]
ext i j
[GOAL]
case h.e_self.e_hom.h.h
ι : Type u_1
V : Type u
inst✝⁴ : Category.{v, u} V
inst✝³ : Preadditive V
c : ComplexShape ι
C D E : HomologicalComplex V c
f g : C ⟶ D
h k : D ⟶ E
i✝ : ι
W : Type u_2
inst✝² : Category.{u_3, u_2} W
inst✝¹ : Preadditive W
G : V ⥤ W
inst✝ : Functor.Additive G
hom : (i j : ι) → ComplexShape.Rel c j i → (X C i ⟶ X D j)
n i j : ι
⊢ G.map (dite (ComplexShape.Rel c j i) (hom i j) fun x => 0) =
dite (ComplexShape.Rel c j i) ((fun i j hij => G.map (hom i j hij)) i j) fun x => 0
[PROOFSTEP]
split_ifs
[GOAL]
case pos
ι : Type u_1
V : Type u
inst✝⁴ : Category.{v, u} V
inst✝³ : Preadditive V
c : ComplexShape ι
C D E : HomologicalComplex V c
f g : C ⟶ D
h k : D ⟶ E
i✝ : ι
W : Type u_2
inst✝² : Category.{u_3, u_2} W
inst✝¹ : Preadditive W
G : V ⥤ W
inst✝ : Functor.Additive G
hom : (i j : ι) → ComplexShape.Rel c j i → (X C i ⟶ X D j)
n i j : ι
h✝ : ComplexShape.Rel c j i
⊢ G.map (hom i j h✝) = (fun i j hij => G.map (hom i j hij)) i j h✝
[PROOFSTEP]
rfl
[GOAL]
case neg
ι : Type u_1
V : Type u
inst✝⁴ : Category.{v, u} V
inst✝³ : Preadditive V
c : ComplexShape ι
C D E : HomologicalComplex V c
f g : C ⟶ D
h k : D ⟶ E
i✝ : ι
W : Type u_2
inst✝² : Category.{u_3, u_2} W
inst✝¹ : Preadditive W
G : V ⥤ W
inst✝ : Functor.Additive G
hom : (i j : ι) → ComplexShape.Rel c j i → (X C i ⟶ X D j)
n i j : ι
h✝ : ¬ComplexShape.Rel c j i
⊢ G.map 0 = 0
[PROOFSTEP]
rw [G.map_zero]
[GOAL]
ι : Type u_1
V : Type u
inst✝¹ : Category.{v, u} V
inst✝ : Preadditive V
c : ComplexShape ι
C D E : HomologicalComplex V c
f g : C ⟶ D
h k : D ⟶ E
i : ι
hom : (i j : ι) → X C i ⟶ X D j
zero : ∀ (i j : ι), ¬ComplexShape.Rel c j i → hom i j = 0
⊢ ∀ (i : ι), Hom.f (nullHomotopicMap hom) i = ↑(dNext i) hom + ↑(prevD i) hom + Hom.f 0 i
[PROOFSTEP]
intro i
[GOAL]
ι : Type u_1
V : Type u
inst✝¹ : Category.{v, u} V
inst✝ : Preadditive V
c : ComplexShape ι
C D E : HomologicalComplex V c
f g : C ⟶ D
h k : D ⟶ E
i✝ : ι
hom : (i j : ι) → X C i ⟶ X D j
zero : ∀ (i j : ι), ¬ComplexShape.Rel c j i → hom i j = 0
i : ι
⊢ Hom.f (nullHomotopicMap hom) i = ↑(dNext i) hom + ↑(prevD i) hom + Hom.f 0 i
[PROOFSTEP]
rw [HomologicalComplex.zero_f_apply, add_zero]
[GOAL]
ι : Type u_1
V : Type u
inst✝¹ : Category.{v, u} V
inst✝ : Preadditive V
c : ComplexShape ι
C D E : HomologicalComplex V c
f g : C ⟶ D
h k : D ⟶ E
i✝ : ι
hom : (i j : ι) → X C i ⟶ X D j
zero : ∀ (i j : ι), ¬ComplexShape.Rel c j i → hom i j = 0
i : ι
⊢ Hom.f (nullHomotopicMap hom) i = ↑(dNext i) hom + ↑(prevD i) hom
[PROOFSTEP]
rfl
[GOAL]
ι : Type u_1
V : Type u
inst✝¹ : Category.{v, u} V
inst✝ : Preadditive V
c : ComplexShape ι
C D E : HomologicalComplex V c
f g : C ⟶ D
h✝ k : D ⟶ E
i : ι
h : (i j : ι) → ComplexShape.Rel c j i → (X C i ⟶ X D j)
⊢ Homotopy (nullHomotopicMap' h) 0
[PROOFSTEP]
apply nullHomotopy fun i j => dite (c.Rel j i) (h i j) fun _ => 0
[GOAL]
ι : Type u_1
V : Type u
inst✝¹ : Category.{v, u} V
inst✝ : Preadditive V
c : ComplexShape ι
C D E : HomologicalComplex V c
f g : C ⟶ D
h✝ k : D ⟶ E
i : ι
h : (i j : ι) → ComplexShape.Rel c j i → (X C i ⟶ X D j)
⊢ ∀ (i j : ι), ¬ComplexShape.Rel c j i → (dite (ComplexShape.Rel c j i) (h i j) fun x => 0) = 0
[PROOFSTEP]
intro i j hij
[GOAL]
ι : Type u_1
V : Type u
inst✝¹ : Category.{v, u} V
inst✝ : Preadditive V
c : ComplexShape ι
C D E : HomologicalComplex V c
f g : C ⟶ D
h✝ k : D ⟶ E
i✝ : ι
h : (i j : ι) → ComplexShape.Rel c j i → (X C i ⟶ X D j)
i j : ι
hij : ¬ComplexShape.Rel c j i
⊢ (dite (ComplexShape.Rel c j i) (h i j) fun x => 0) = 0
[PROOFSTEP]
rw [dite_eq_right_iff]
[GOAL]
ι : Type u_1
V : Type u
inst✝¹ : Category.{v, u} V
inst✝ : Preadditive V
c : ComplexShape ι
C D E : HomologicalComplex V c
f g : C ⟶ D
h✝ k : D ⟶ E
i✝ : ι
h : (i j : ι) → ComplexShape.Rel c j i → (X C i ⟶ X D j)
i j : ι
hij : ¬ComplexShape.Rel c j i
⊢ ∀ (h_1 : ComplexShape.Rel c j i), h i j h_1 = 0
[PROOFSTEP]
intro hij'
[GOAL]
ι : Type u_1
V : Type u
inst✝¹ : Category.{v, u} V
inst✝ : Preadditive V
c : ComplexShape ι
C D E : HomologicalComplex V c
f g : C ⟶ D
h✝ k : D ⟶ E
i✝ : ι
h : (i j : ι) → ComplexShape.Rel c j i → (X C i ⟶ X D j)
i j : ι
hij : ¬ComplexShape.Rel c j i
hij' : ComplexShape.Rel c j i
⊢ h i j hij' = 0
[PROOFSTEP]
exfalso
[GOAL]
case h
ι : Type u_1
V : Type u
inst✝¹ : Category.{v, u} V
inst✝ : Preadditive V
c : ComplexShape ι
C D E : HomologicalComplex V c
f g : C ⟶ D
h✝ k : D ⟶ E
i✝ : ι
h : (i j : ι) → ComplexShape.Rel c j i → (X C i ⟶ X D j)
i j : ι
hij : ¬ComplexShape.Rel c j i
hij' : ComplexShape.Rel c j i
⊢ False
[PROOFSTEP]
exact hij hij'
[GOAL]
ι : Type u_1
V : Type u
inst✝¹ : Category.{v, u} V
inst✝ : Preadditive V
c : ComplexShape ι
C D E : HomologicalComplex V c
f g : C ⟶ D
h k : D ⟶ E
i k₂ k₁ k₀ : ι
r₂₁ : ComplexShape.Rel c k₂ k₁
r₁₀ : ComplexShape.Rel c k₁ k₀
hom : (i j : ι) → X C i ⟶ X D j
⊢ Hom.f (nullHomotopicMap hom) k₁ = d C k₁ k₀ ≫ hom k₀ k₁ + hom k₁ k₂ ≫ d D k₂ k₁
[PROOFSTEP]
dsimp only [nullHomotopicMap]
[GOAL]
ι : Type u_1
V : Type u
inst✝¹ : Category.{v, u} V
inst✝ : Preadditive V
c : ComplexShape ι
C D E : HomologicalComplex V c
f g : C ⟶ D
h k : D ⟶ E
i k₂ k₁ k₀ : ι
r₂₁ : ComplexShape.Rel c k₂ k₁
r₁₀ : ComplexShape.Rel c k₁ k₀
hom : (i j : ι) → X C i ⟶ X D j
⊢ ↑(dNext k₁) hom + ↑(prevD k₁) hom = d C k₁ k₀ ≫ hom k₀ k₁ + hom k₁ k₂ ≫ d D k₂ k₁
[PROOFSTEP]
rw [dNext_eq hom r₁₀, prevD_eq hom r₂₁]
[GOAL]
ι : Type u_1
V : Type u
inst✝¹ : Category.{v, u} V
inst✝ : Preadditive V
c : ComplexShape ι
C D E : HomologicalComplex V c
f g : C ⟶ D
h✝ k : D ⟶ E
i k₂ k₁ k₀ : ι
r₂₁ : ComplexShape.Rel c k₂ k₁
r₁₀ : ComplexShape.Rel c k₁ k₀
h : (i j : ι) → ComplexShape.Rel c j i → (X C i ⟶ X D j)
⊢ Hom.f (nullHomotopicMap' h) k₁ = d C k₁ k₀ ≫ h k₀ k₁ r₁₀ + h k₁ k₂ r₂₁ ≫ d D k₂ k₁
[PROOFSTEP]
simp only [nullHomotopicMap']
[GOAL]
ι : Type u_1
V : Type u
inst✝¹ : Category.{v, u} V
inst✝ : Preadditive V
c : ComplexShape ι
C D E : HomologicalComplex V c
f g : C ⟶ D
h✝ k : D ⟶ E
i k₂ k₁ k₀ : ι
r₂₁ : ComplexShape.Rel c k₂ k₁
r₁₀ : ComplexShape.Rel c k₁ k₀
h : (i j : ι) → ComplexShape.Rel c j i → (X C i ⟶ X D j)
⊢ Hom.f (nullHomotopicMap fun i j => dite (ComplexShape.Rel c j i) (h i j) fun x => 0) k₁ =
d C k₁ k₀ ≫ h k₀ k₁ r₁₀ + h k₁ k₂ r₂₁ ≫ d D k₂ k₁
[PROOFSTEP]
rw [nullHomotopicMap_f r₂₁ r₁₀]
[GOAL]
ι : Type u_1
V : Type u
inst✝¹ : Category.{v, u} V
inst✝ : Preadditive V
c : ComplexShape ι
C D E : HomologicalComplex V c
f g : C ⟶ D
h✝ k : D ⟶ E
i k₂ k₁ k₀ : ι
r₂₁ : ComplexShape.Rel c k₂ k₁
r₁₀ : ComplexShape.Rel c k₁ k₀
h : (i j : ι) → ComplexShape.Rel c j i → (X C i ⟶ X D j)
⊢ (d C k₁ k₀ ≫ dite (ComplexShape.Rel c k₁ k₀) (h k₀ k₁) fun x => 0) +
(dite (ComplexShape.Rel c k₂ k₁) (h k₁ k₂) fun x => 0) ≫ d D k₂ k₁ =
d C k₁ k₀ ≫ h k₀ k₁ r₁₀ + h k₁ k₂ r₂₁ ≫ d D k₂ k₁
[PROOFSTEP]
split_ifs
[GOAL]
ι : Type u_1
V : Type u
inst✝¹ : Category.{v, u} V
inst✝ : Preadditive V
c : ComplexShape ι
C D E : HomologicalComplex V c
f g : C ⟶ D
h✝ k : D ⟶ E
i k₂ k₁ k₀ : ι
r₂₁ : ComplexShape.Rel c k₂ k₁
r₁₀ : ComplexShape.Rel c k₁ k₀
h : (i j : ι) → ComplexShape.Rel c j i → (X C i ⟶ X D j)
⊢ d C k₁ k₀ ≫ h k₀ k₁ r₁₀ + h k₁ k₂ r₂₁ ≫ d D k₂ k₁ = d C k₁ k₀ ≫ h k₀ k₁ r₁₀ + h k₁ k₂ r₂₁ ≫ d D k₂ k₁
[PROOFSTEP]
rfl
[GOAL]
ι : Type u_1
V : Type u
inst✝¹ : Category.{v, u} V
inst✝ : Preadditive V
c : ComplexShape ι
C D E : HomologicalComplex V c
f g : C ⟶ D
h k : D ⟶ E
i k₁ k₀ : ι
r₁₀ : ComplexShape.Rel c k₁ k₀
hk₀ : ∀ (l : ι), ¬ComplexShape.Rel c k₀ l
hom : (i j : ι) → X C i ⟶ X D j
⊢ Hom.f (nullHomotopicMap hom) k₀ = hom k₀ k₁ ≫ d D k₁ k₀
[PROOFSTEP]
dsimp only [nullHomotopicMap]
[GOAL]
ι : Type u_1
V : Type u
inst✝¹ : Category.{v, u} V
inst✝ : Preadditive V
c : ComplexShape ι
C D E : HomologicalComplex V c
f g : C ⟶ D
h k : D ⟶ E
i k₁ k₀ : ι
r₁₀ : ComplexShape.Rel c k₁ k₀
hk₀ : ∀ (l : ι), ¬ComplexShape.Rel c k₀ l
hom : (i j : ι) → X C i ⟶ X D j
⊢ ↑(dNext k₀) hom + ↑(prevD k₀) hom = hom k₀ k₁ ≫ d D k₁ k₀
[PROOFSTEP]
rw [prevD_eq hom r₁₀, dNext, AddMonoidHom.mk'_apply, C.shape, zero_comp, zero_add]
[GOAL]
case a
ι : Type u_1
V : Type u
inst✝¹ : Category.{v, u} V
inst✝ : Preadditive V
c : ComplexShape ι
C D E : HomologicalComplex V c
f g : C ⟶ D
h k : D ⟶ E
i k₁ k₀ : ι
r₁₀ : ComplexShape.Rel c k₁ k₀
hk₀ : ∀ (l : ι), ¬ComplexShape.Rel c k₀ l
hom : (i j : ι) → X C i ⟶ X D j
⊢ ¬ComplexShape.Rel c k₀ (ComplexShape.next c k₀)
[PROOFSTEP]
exact hk₀ _
[GOAL]
ι : Type u_1
V : Type u
inst✝¹ : Category.{v, u} V
inst✝ : Preadditive V
c : ComplexShape ι
C D E : HomologicalComplex V c
f g : C ⟶ D
h✝ k : D ⟶ E
i k₁ k₀ : ι
r₁₀ : ComplexShape.Rel c k₁ k₀
hk₀ : ∀ (l : ι), ¬ComplexShape.Rel c k₀ l
h : (i j : ι) → ComplexShape.Rel c j i → (X C i ⟶ X D j)
⊢ Hom.f (nullHomotopicMap' h) k₀ = h k₀ k₁ r₁₀ ≫ d D k₁ k₀
[PROOFSTEP]
simp only [nullHomotopicMap']
[GOAL]
ι : Type u_1
V : Type u
inst✝¹ : Category.{v, u} V
inst✝ : Preadditive V
c : ComplexShape ι
C D E : HomologicalComplex V c
f g : C ⟶ D
h✝ k : D ⟶ E
i k₁ k₀ : ι
r₁₀ : ComplexShape.Rel c k₁ k₀
hk₀ : ∀ (l : ι), ¬ComplexShape.Rel c k₀ l
h : (i j : ι) → ComplexShape.Rel c j i → (X C i ⟶ X D j)
⊢ Hom.f (nullHomotopicMap fun i j => dite (ComplexShape.Rel c j i) (h i j) fun x => 0) k₀ = h k₀ k₁ r₁₀ ≫ d D k₁ k₀
[PROOFSTEP]
rw [nullHomotopicMap_f_of_not_rel_left r₁₀ hk₀]
[GOAL]
ι : Type u_1
V : Type u
inst✝¹ : Category.{v, u} V
inst✝ : Preadditive V
c : ComplexShape ι
C D E : HomologicalComplex V c
f g : C ⟶ D
h✝ k : D ⟶ E
i k₁ k₀ : ι
r₁₀ : ComplexShape.Rel c k₁ k₀
hk₀ : ∀ (l : ι), ¬ComplexShape.Rel c k₀ l
h : (i j : ι) → ComplexShape.Rel c j i → (X C i ⟶ X D j)
⊢ (dite (ComplexShape.Rel c k₁ k₀) (h k₀ k₁) fun x => 0) ≫ d D k₁ k₀ = h k₀ k₁ r₁₀ ≫ d D k₁ k₀
[PROOFSTEP]
split_ifs
[GOAL]
ι : Type u_1
V : Type u
inst✝¹ : Category.{v, u} V
inst✝ : Preadditive V
c : ComplexShape ι
C D E : HomologicalComplex V c
f g : C ⟶ D
h✝ k : D ⟶ E
i k₁ k₀ : ι
r₁₀ : ComplexShape.Rel c k₁ k₀
hk₀ : ∀ (l : ι), ¬ComplexShape.Rel c k₀ l
h : (i j : ι) → ComplexShape.Rel c j i → (X C i ⟶ X D j)
⊢ h k₀ k₁ r₁₀ ≫ d D k₁ k₀ = h k₀ k₁ r₁₀ ≫ d D k₁ k₀
[PROOFSTEP]
rfl
[GOAL]
ι : Type u_1
V : Type u
inst✝¹ : Category.{v, u} V
inst✝ : Preadditive V
c : ComplexShape ι
C D E : HomologicalComplex V c
f g : C ⟶ D
h k : D ⟶ E
i k₁ k₀ : ι
r₁₀ : ComplexShape.Rel c k₁ k₀
hk₁ : ∀ (l : ι), ¬ComplexShape.Rel c l k₁
hom : (i j : ι) → X C i ⟶ X D j
⊢ Hom.f (nullHomotopicMap hom) k₁ = d C k₁ k₀ ≫ hom k₀ k₁
[PROOFSTEP]
dsimp only [nullHomotopicMap]
[GOAL]
ι : Type u_1
V : Type u
inst✝¹ : Category.{v, u} V
inst✝ : Preadditive V
c : ComplexShape ι
C D E : HomologicalComplex V c
f g : C ⟶ D
h k : D ⟶ E
i k₁ k₀ : ι
r₁₀ : ComplexShape.Rel c k₁ k₀
hk₁ : ∀ (l : ι), ¬ComplexShape.Rel c l k₁
hom : (i j : ι) → X C i ⟶ X D j
⊢ ↑(dNext k₁) hom + ↑(prevD k₁) hom = d C k₁ k₀ ≫ hom k₀ k₁
[PROOFSTEP]
rw [dNext_eq hom r₁₀, prevD, AddMonoidHom.mk'_apply, D.shape, comp_zero, add_zero]
[GOAL]
case a
ι : Type u_1
V : Type u
inst✝¹ : Category.{v, u} V
inst✝ : Preadditive V
c : ComplexShape ι
C D E : HomologicalComplex V c
f g : C ⟶ D
h k : D ⟶ E
i k₁ k₀ : ι
r₁₀ : ComplexShape.Rel c k₁ k₀
hk₁ : ∀ (l : ι), ¬ComplexShape.Rel c l k₁
hom : (i j : ι) → X C i ⟶ X D j
⊢ ¬ComplexShape.Rel c (ComplexShape.prev c k₁) k₁
[PROOFSTEP]
exact hk₁ _
[GOAL]
ι : Type u_1
V : Type u
inst✝¹ : Category.{v, u} V
inst✝ : Preadditive V
c : ComplexShape ι
C D E : HomologicalComplex V c
f g : C ⟶ D
h✝ k : D ⟶ E
i k₁ k₀ : ι
r₁₀ : ComplexShape.Rel c k₁ k₀
hk₁ : ∀ (l : ι), ¬ComplexShape.Rel c l k₁
h : (i j : ι) → ComplexShape.Rel c j i → (X C i ⟶ X D j)
⊢ Hom.f (nullHomotopicMap' h) k₁ = d C k₁ k₀ ≫ h k₀ k₁ r₁₀
[PROOFSTEP]
simp only [nullHomotopicMap']
[GOAL]
ι : Type u_1
V : Type u
inst✝¹ : Category.{v, u} V
inst✝ : Preadditive V
c : ComplexShape ι
C D E : HomologicalComplex V c
f g : C ⟶ D
h✝ k : D ⟶ E
i k₁ k₀ : ι
r₁₀ : ComplexShape.Rel c k₁ k₀
hk₁ : ∀ (l : ι), ¬ComplexShape.Rel c l k₁
h : (i j : ι) → ComplexShape.Rel c j i → (X C i ⟶ X D j)
⊢ Hom.f (nullHomotopicMap fun i j => dite (ComplexShape.Rel c j i) (h i j) fun x => 0) k₁ = d C k₁ k₀ ≫ h k₀ k₁ r₁₀
[PROOFSTEP]
rw [nullHomotopicMap_f_of_not_rel_right r₁₀ hk₁]
[GOAL]
ι : Type u_1
V : Type u
inst✝¹ : Category.{v, u} V
inst✝ : Preadditive V
c : ComplexShape ι
C D E : HomologicalComplex V c
f g : C ⟶ D
h✝ k : D ⟶ E
i k₁ k₀ : ι
r₁₀ : ComplexShape.Rel c k₁ k₀
hk₁ : ∀ (l : ι), ¬ComplexShape.Rel c l k₁
h : (i j : ι) → ComplexShape.Rel c j i → (X C i ⟶ X D j)
⊢ (d C k₁ k₀ ≫ dite (ComplexShape.Rel c k₁ k₀) (h k₀ k₁) fun x => 0) = d C k₁ k₀ ≫ h k₀ k₁ r₁₀
[PROOFSTEP]
split_ifs
[GOAL]
ι : Type u_1
V : Type u
inst✝¹ : Category.{v, u} V
inst✝ : Preadditive V
c : ComplexShape ι
C D E : HomologicalComplex V c
f g : C ⟶ D
h✝ k : D ⟶ E
i k₁ k₀ : ι
r₁₀ : ComplexShape.Rel c k₁ k₀
hk₁ : ∀ (l : ι), ¬ComplexShape.Rel c l k₁
h : (i j : ι) → ComplexShape.Rel c j i → (X C i ⟶ X D j)
⊢ d C k₁ k₀ ≫ h k₀ k₁ r₁₀ = d C k₁ k₀ ≫ h k₀ k₁ r₁₀
[PROOFSTEP]
rfl
[GOAL]
ι : Type u_1
V : Type u
inst✝¹ : Category.{v, u} V
inst✝ : Preadditive V
c : ComplexShape ι
C D E : HomologicalComplex V c
f g : C ⟶ D
h k : D ⟶ E
i k₀ : ι
hk₀ : ∀ (l : ι), ¬ComplexShape.Rel c k₀ l
hk₀' : ∀ (l : ι), ¬ComplexShape.Rel c l k₀
hom : (i j : ι) → X C i ⟶ X D j
⊢ Hom.f (nullHomotopicMap hom) k₀ = 0
[PROOFSTEP]
dsimp [nullHomotopicMap, dNext, prevD]
[GOAL]
ι : Type u_1
V : Type u
inst✝¹ : Category.{v, u} V
inst✝ : Preadditive V
c : ComplexShape ι
C D E : HomologicalComplex V c
f g : C ⟶ D
h k : D ⟶ E
i k₀ : ι
hk₀ : ∀ (l : ι), ¬ComplexShape.Rel c k₀ l
hk₀' : ∀ (l : ι), ¬ComplexShape.Rel c l k₀
hom : (i j : ι) → X C i ⟶ X D j
⊢ d C k₀ (ComplexShape.next c k₀) ≫ hom (ComplexShape.next c k₀) k₀ +
hom k₀ (ComplexShape.prev c k₀) ≫ d D (ComplexShape.prev c k₀) k₀ =
0
[PROOFSTEP]
rw [C.shape, D.shape, zero_comp, comp_zero, add_zero]
[GOAL]
case a
ι : Type u_1
V : Type u
inst✝¹ : Category.{v, u} V
inst✝ : Preadditive V
c : ComplexShape ι
C D E : HomologicalComplex V c
f g : C ⟶ D
h k : D ⟶ E
i k₀ : ι
hk₀ : ∀ (l : ι), ¬ComplexShape.Rel c k₀ l
hk₀' : ∀ (l : ι), ¬ComplexShape.Rel c l k₀
hom : (i j : ι) → X C i ⟶ X D j
⊢ ¬ComplexShape.Rel c (ComplexShape.prev c k₀) k₀
[PROOFSTEP]
apply_assumption
[GOAL]
case a
ι : Type u_1
V : Type u
inst✝¹ : Category.{v, u} V
inst✝ : Preadditive V
c : ComplexShape ι
C D E : HomologicalComplex V c
f g : C ⟶ D
h k : D ⟶ E
i k₀ : ι
hk₀ : ∀ (l : ι), ¬ComplexShape.Rel c k₀ l
hk₀' : ∀ (l : ι), ¬ComplexShape.Rel c l k₀
hom : (i j : ι) → X C i ⟶ X D j
⊢ ¬ComplexShape.Rel c k₀ (ComplexShape.next c k₀)
[PROOFSTEP]
apply_assumption
[GOAL]
ι : Type u_1
V : Type u
inst✝¹ : Category.{v, u} V
inst✝ : Preadditive V
c : ComplexShape ι
C D E : HomologicalComplex V c
f g : C ⟶ D
h✝ k : D ⟶ E
i k₀ : ι
hk₀ : ∀ (l : ι), ¬ComplexShape.Rel c k₀ l
hk₀' : ∀ (l : ι), ¬ComplexShape.Rel c l k₀
h : (i j : ι) → ComplexShape.Rel c j i → (X C i ⟶ X D j)
⊢ Hom.f (nullHomotopicMap' h) k₀ = 0
[PROOFSTEP]
simp only [nullHomotopicMap']
[GOAL]
ι : Type u_1
V : Type u
inst✝¹ : Category.{v, u} V
inst✝ : Preadditive V
c : ComplexShape ι
C D E : HomologicalComplex V c
f g : C ⟶ D
h✝ k : D ⟶ E
i k₀ : ι
hk₀ : ∀ (l : ι), ¬ComplexShape.Rel c k₀ l
hk₀' : ∀ (l : ι), ¬ComplexShape.Rel c l k₀
h : (i j : ι) → ComplexShape.Rel c j i → (X C i ⟶ X D j)
⊢ Hom.f (nullHomotopicMap fun i j => dite (ComplexShape.Rel c j i) (h i j) fun x => 0) k₀ = 0
[PROOFSTEP]
apply nullHomotopicMap_f_eq_zero hk₀ hk₀'
[GOAL]
ι : Type u_1
V : Type u
inst✝¹ : Category.{v, u} V
inst✝ : Preadditive V
c : ComplexShape ι
C D E : HomologicalComplex V c
f✝ g : C ⟶ D
h k : D ⟶ E
i : ι
P Q : ChainComplex V ℕ
f : (i j : ℕ) → X P i ⟶ X Q j
j : ℕ
⊢ ↑(prevD j) f = f j (j + 1) ≫ d Q (j + 1) j
[PROOFSTEP]
dsimp [prevD]
[GOAL]
ι : Type u_1
V : Type u
inst✝¹ : Category.{v, u} V
inst✝ : Preadditive V
c : ComplexShape ι
C D E : HomologicalComplex V c
f✝ g : C ⟶ D
h k : D ⟶ E
i : ι
P Q : ChainComplex V ℕ
f : (i j : ℕ) → X P i ⟶ X Q j
j : ℕ
⊢ f j (ComplexShape.prev (ComplexShape.down ℕ) j) ≫ d Q (ComplexShape.prev (ComplexShape.down ℕ) j) j =
f j (j + 1) ≫ d Q (j + 1) j
[PROOFSTEP]
have : (ComplexShape.down ℕ).prev j = j + 1 := ChainComplex.prev ℕ j
[GOAL]
ι : Type u_1
V : Type u
inst✝¹ : Category.{v, u} V
inst✝ : Preadditive V
c : ComplexShape ι
C D E : HomologicalComplex V c
f✝ g : C ⟶ D
h k : D ⟶ E
i : ι
P Q : ChainComplex V ℕ
f : (i j : ℕ) → X P i ⟶ X Q j
j : ℕ
this : ComplexShape.prev (ComplexShape.down ℕ) j = j + 1
⊢ f j (ComplexShape.prev (ComplexShape.down ℕ) j) ≫ d Q (ComplexShape.prev (ComplexShape.down ℕ) j) j =
f j (j + 1) ≫ d Q (j + 1) j
[PROOFSTEP]
congr 2
[GOAL]
ι : Type u_1
V : Type u
inst✝¹ : Category.{v, u} V
inst✝ : Preadditive V
c : ComplexShape ι
C D E : HomologicalComplex V c
f✝ g : C ⟶ D
h k : D ⟶ E
i✝ : ι
P Q : ChainComplex V ℕ
f : (i j : ℕ) → X P i ⟶ X Q j
i : ℕ
⊢ ↑(dNext (i + 1)) f = d P (i + 1) i ≫ f i (i + 1)
[PROOFSTEP]
dsimp [dNext]
[GOAL]
ι : Type u_1
V : Type u
inst✝¹ : Category.{v, u} V
inst✝ : Preadditive V
c : ComplexShape ι
C D E : HomologicalComplex V c
f✝ g : C ⟶ D
h k : D ⟶ E
i✝ : ι
P Q : ChainComplex V ℕ
f : (i j : ℕ) → X P i ⟶ X Q j
i : ℕ
⊢ d P (i + 1) (ComplexShape.next (ComplexShape.down ℕ) (i + 1)) ≫
f (ComplexShape.next (ComplexShape.down ℕ) (i + 1)) (i + 1) =
d P (i + 1) i ≫ f i (i + 1)
[PROOFSTEP]
have : (ComplexShape.down ℕ).next (i + 1) = i := ChainComplex.next_nat_succ _
[GOAL]
ι : Type u_1
V : Type u
inst✝¹ : Category.{v, u} V
inst✝ : Preadditive V
c : ComplexShape ι
C D E : HomologicalComplex V c
f✝ g : C ⟶ D
h k : D ⟶ E
i✝ : ι
P Q : ChainComplex V ℕ
f : (i j : ℕ) → X P i ⟶ X Q j
i : ℕ
this : ComplexShape.next (ComplexShape.down ℕ) (i + 1) = i
⊢ d P (i + 1) (ComplexShape.next (ComplexShape.down ℕ) (i + 1)) ≫
f (ComplexShape.next (ComplexShape.down ℕ) (i + 1)) (i + 1) =
d P (i + 1) i ≫ f i (i + 1)
[PROOFSTEP]
congr 2
[GOAL]
ι : Type u_1
V : Type u
inst✝¹ : Category.{v, u} V
inst✝ : Preadditive V
c : ComplexShape ι
C D E : HomologicalComplex V c
f✝ g : C ⟶ D
h k : D ⟶ E
i : ι
P Q : ChainComplex V ℕ
f : (i j : ℕ) → X P i ⟶ X Q j
⊢ ↑(dNext 0) f = 0
[PROOFSTEP]
dsimp [dNext]
[GOAL]
ι : Type u_1
V : Type u
inst✝¹ : Category.{v, u} V
inst✝ : Preadditive V
c : ComplexShape ι
C D E : HomologicalComplex V c
f✝ g : C ⟶ D
h k : D ⟶ E
i : ι
P Q : ChainComplex V ℕ
f : (i j : ℕ) → X P i ⟶ X Q j
⊢ d P 0 (ComplexShape.next (ComplexShape.down ℕ) 0) ≫ f (ComplexShape.next (ComplexShape.down ℕ) 0) 0 = 0
[PROOFSTEP]
rw [P.shape, zero_comp]
[GOAL]
case a
ι : Type u_1
V : Type u
inst✝¹ : Category.{v, u} V
inst✝ : Preadditive V
c : ComplexShape ι
C D E : HomologicalComplex V c
f✝ g : C ⟶ D
h k : D ⟶ E
i : ι
P Q : ChainComplex V ℕ
f : (i j : ℕ) → X P i ⟶ X Q j
⊢ ¬ComplexShape.Rel (ComplexShape.down ℕ) 0 (ComplexShape.next (ComplexShape.down ℕ) 0)
[PROOFSTEP]
rw [ChainComplex.next_nat_zero]
[GOAL]
case a
ι : Type u_1
V : Type u
inst✝¹ : Category.{v, u} V
inst✝ : Preadditive V
c : ComplexShape ι
C D E : HomologicalComplex V c
f✝ g : C ⟶ D
h k : D ⟶ E
i : ι
P Q : ChainComplex V ℕ
f : (i j : ℕ) → X P i ⟶ X Q j
⊢ ¬ComplexShape.Rel (ComplexShape.down ℕ) 0 0
[PROOFSTEP]
dsimp
[GOAL]
case a
ι : Type u_1
V : Type u
inst✝¹ : Category.{v, u} V
inst✝ : Preadditive V
c : ComplexShape ι
C D E : HomologicalComplex V c
f✝ g : C ⟶ D
h k : D ⟶ E
i : ι
P Q : ChainComplex V ℕ
f : (i j : ℕ) → X P i ⟶ X Q j
⊢ ¬0 + 1 = 0
[PROOFSTEP]
decide
[GOAL]
ι : Type u_1
V : Type u
inst✝¹ : Category.{v, u} V
inst✝ : Preadditive V
c : ComplexShape ι
C D E : HomologicalComplex V c
f g : C ⟶ D
h k : D ⟶ E
i : ι
P Q : ChainComplex V ℕ
e : P ⟶ Q
zero : X P 0 ⟶ X Q 1
comm_zero : Hom.f e 0 = zero ≫ d Q 1 0
one : X P 1 ⟶ X Q 2
comm_one : Hom.f e 1 = d P 1 0 ≫ zero + one ≫ d Q 2 1
succ :
(n : ℕ) →
(p :
(f : X P n ⟶ X Q (n + 1)) ×'
(f' : X P (n + 1) ⟶ X Q (n + 2)) ×' Hom.f e (n + 1) = d P (n + 1) n ≫ f + f' ≫ d Q (n + 2) (n + 1)) →
(f'' : X P (n + 2) ⟶ X Q (n + 3)) ×' Hom.f e (n + 2) = d P (n + 2) (n + 1) ≫ p.snd.fst + f'' ≫ d Q (n + 3) (n + 2)
⊢ Hom.f e 0 = dFrom P 0 ≫ 0 + (zero ≫ (xPrevIso Q (_ : 0 + 1 = 0 + 1)).inv) ≫ dTo Q 0
[PROOFSTEP]
simpa using comm_zero
[GOAL]
ι : Type u_1
V : Type u
inst✝¹ : Category.{v, u} V
inst✝ : Preadditive V
c : ComplexShape ι
C D E : HomologicalComplex V c
f g : C ⟶ D
h k : D ⟶ E
i : ι
P Q : ChainComplex V ℕ
e : P ⟶ Q
zero : X P 0 ⟶ X Q 1
comm_zero : Hom.f e 0 = zero ≫ d Q 1 0
one : X P 1 ⟶ X Q 2
comm_one : Hom.f e 1 = d P 1 0 ≫ zero + one ≫ d Q 2 1
succ :
(n : ℕ) →
(p :
(f : X P n ⟶ X Q (n + 1)) ×'
(f' : X P (n + 1) ⟶ X Q (n + 2)) ×' Hom.f e (n + 1) = d P (n + 1) n ≫ f + f' ≫ d Q (n + 2) (n + 1)) →
(f'' : X P (n + 2) ⟶ X Q (n + 3)) ×' Hom.f e (n + 2) = d P (n + 2) (n + 1) ≫ p.snd.fst + f'' ≫ d Q (n + 3) (n + 2)
n : ℕ
I : (f : X P n ⟶ X Q (n + 1)) ×'
(f' : X P (n + 1) ⟶ X Q (n + 2)) ×' Hom.f e (n + 1) = d P (n + 1) n ≫ f + f' ≫ d Q (n + 2) (n + 1) :=
mkInductiveAux₁ e zero one comm_one succ n
⊢ Hom.f e (n + 1) =
dFrom P (n + 1) ≫ (xNextIso P (_ : n + 1 = n + 1)).hom ≫ I.fst +
(I.snd.fst ≫ (xPrevIso Q (_ : n + 1 + 1 = n + 1 + 1)).inv) ≫ dTo Q (n + 1)
[PROOFSTEP]
simpa using I.2.2
[GOAL]
ι : Type u_1
V : Type u
inst✝¹ : Category.{v, u} V
inst✝ : Preadditive V
c : ComplexShape ι
C D E : HomologicalComplex V c
f g : C ⟶ D
h✝ k : D ⟶ E
i✝ : ι
P Q : ChainComplex V ℕ
e : P ⟶ Q
zero : X P 0 ⟶ X Q 1
comm_zero : Hom.f e 0 = zero ≫ d Q 1 0
one : X P 1 ⟶ X Q 2
comm_one : Hom.f e 1 = d P 1 0 ≫ zero + one ≫ d Q 2 1
succ :
(n : ℕ) →
(p :
(f : X P n ⟶ X Q (n + 1)) ×'
(f' : X P (n + 1) ⟶ X Q (n + 2)) ×' Hom.f e (n + 1) = d P (n + 1) n ≫ f + f' ≫ d Q (n + 2) (n + 1)) →
(f'' : X P (n + 2) ⟶ X Q (n + 3)) ×' Hom.f e (n + 2) = d P (n + 2) (n + 1) ≫ p.snd.fst + f'' ≫ d Q (n + 3) (n + 2)
i j : ℕ
h : i + 1 = j
⊢ (mkInductiveAux₂ e zero comm_zero one comm_one succ i).snd.fst ≫ (xPrevIso Q h).hom =
(xNextIso P h).inv ≫ (mkInductiveAux₂ e zero comm_zero one comm_one succ j).fst
[PROOFSTEP]
subst j
[GOAL]
ι : Type u_1
V : Type u
inst✝¹ : Category.{v, u} V
inst✝ : Preadditive V
c : ComplexShape ι
C D E : HomologicalComplex V c
f g : C ⟶ D
h k : D ⟶ E
i✝ : ι
P Q : ChainComplex V ℕ
e : P ⟶ Q
zero : X P 0 ⟶ X Q 1
comm_zero : Hom.f e 0 = zero ≫ d Q 1 0
one : X P 1 ⟶ X Q 2
comm_one : Hom.f e 1 = d P 1 0 ≫ zero + one ≫ d Q 2 1
succ :
(n : ℕ) →
(p :
(f : X P n ⟶ X Q (n + 1)) ×'
(f' : X P (n + 1) ⟶ X Q (n + 2)) ×' Hom.f e (n + 1) = d P (n + 1) n ≫ f + f' ≫ d Q (n + 2) (n + 1)) →
(f'' : X P (n + 2) ⟶ X Q (n + 3)) ×' Hom.f e (n + 2) = d P (n + 2) (n + 1) ≫ p.snd.fst + f'' ≫ d Q (n + 3) (n + 2)
i : ℕ
⊢ (mkInductiveAux₂ e zero comm_zero one comm_one succ i).snd.fst ≫ (xPrevIso Q (_ : i + 1 = i + 1)).hom =
(xNextIso P (_ : i + 1 = i + 1)).inv ≫ (mkInductiveAux₂ e zero comm_zero one comm_one succ (i + 1)).fst
[PROOFSTEP]
rcases i with (_ | _ | i)
[GOAL]
case zero
ι : Type u_1
V : Type u
inst✝¹ : Category.{v, u} V
inst✝ : Preadditive V
c : ComplexShape ι
C D E : HomologicalComplex V c
f g : C ⟶ D
h k : D ⟶ E
i : ι
P Q : ChainComplex V ℕ
e : P ⟶ Q
zero : X P 0 ⟶ X Q 1
comm_zero : Hom.f e 0 = zero ≫ d Q 1 0
one : X P 1 ⟶ X Q 2
comm_one : Hom.f e 1 = d P 1 0 ≫ zero + one ≫ d Q 2 1
succ :
(n : ℕ) →
(p :
(f : X P n ⟶ X Q (n + 1)) ×'
(f' : X P (n + 1) ⟶ X Q (n + 2)) ×' Hom.f e (n + 1) = d P (n + 1) n ≫ f + f' ≫ d Q (n + 2) (n + 1)) →
(f'' : X P (n + 2) ⟶ X Q (n + 3)) ×' Hom.f e (n + 2) = d P (n + 2) (n + 1) ≫ p.snd.fst + f'' ≫ d Q (n + 3) (n + 2)
⊢ (mkInductiveAux₂ e zero comm_zero one comm_one succ Nat.zero).snd.fst ≫
(xPrevIso Q (_ : Nat.zero + 1 = Nat.zero + 1)).hom =
(xNextIso P (_ : Nat.zero + 1 = Nat.zero + 1)).inv ≫
(mkInductiveAux₂ e zero comm_zero one comm_one succ (Nat.zero + 1)).fst
[PROOFSTEP]
simp [mkInductiveAux₂]
[GOAL]
case succ.zero
ι : Type u_1
V : Type u
inst✝¹ : Category.{v, u} V
inst✝ : Preadditive V
c : ComplexShape ι
C D E : HomologicalComplex V c
f g : C ⟶ D
h k : D ⟶ E
i : ι
P Q : ChainComplex V ℕ
e : P ⟶ Q
zero : X P 0 ⟶ X Q 1
comm_zero : Hom.f e 0 = zero ≫ d Q 1 0
one : X P 1 ⟶ X Q 2
comm_one : Hom.f e 1 = d P 1 0 ≫ zero + one ≫ d Q 2 1
succ :
(n : ℕ) →
(p :
(f : X P n ⟶ X Q (n + 1)) ×'
(f' : X P (n + 1) ⟶ X Q (n + 2)) ×' Hom.f e (n + 1) = d P (n + 1) n ≫ f + f' ≫ d Q (n + 2) (n + 1)) →
(f'' : X P (n + 2) ⟶ X Q (n + 3)) ×' Hom.f e (n + 2) = d P (n + 2) (n + 1) ≫ p.snd.fst + f'' ≫ d Q (n + 3) (n + 2)
⊢ (mkInductiveAux₂ e zero comm_zero one comm_one succ (Nat.succ Nat.zero)).snd.fst ≫
(xPrevIso Q (_ : Nat.succ Nat.zero + 1 = Nat.succ Nat.zero + 1)).hom =
(xNextIso P (_ : Nat.succ Nat.zero + 1 = Nat.succ Nat.zero + 1)).inv ≫
(mkInductiveAux₂ e zero comm_zero one comm_one succ (Nat.succ Nat.zero + 1)).fst
[PROOFSTEP]
simp [mkInductiveAux₂]
[GOAL]
case succ.succ
ι : Type u_1
V : Type u
inst✝¹ : Category.{v, u} V
inst✝ : Preadditive V
c : ComplexShape ι
C D E : HomologicalComplex V c
f g : C ⟶ D
h k : D ⟶ E
i✝ : ι
P Q : ChainComplex V ℕ
e : P ⟶ Q
zero : X P 0 ⟶ X Q 1
comm_zero : Hom.f e 0 = zero ≫ d Q 1 0
one : X P 1 ⟶ X Q 2
comm_one : Hom.f e 1 = d P 1 0 ≫ zero + one ≫ d Q 2 1
succ :
(n : ℕ) →
(p :
(f : X P n ⟶ X Q (n + 1)) ×'
(f' : X P (n + 1) ⟶ X Q (n + 2)) ×' Hom.f e (n + 1) = d P (n + 1) n ≫ f + f' ≫ d Q (n + 2) (n + 1)) →
(f'' : X P (n + 2) ⟶ X Q (n + 3)) ×' Hom.f e (n + 2) = d P (n + 2) (n + 1) ≫ p.snd.fst + f'' ≫ d Q (n + 3) (n + 2)
i : ℕ
⊢ (mkInductiveAux₂ e zero comm_zero one comm_one succ (Nat.succ (Nat.succ i))).snd.fst ≫
(xPrevIso Q (_ : Nat.succ (Nat.succ i) + 1 = Nat.succ (Nat.succ i) + 1)).hom =
(xNextIso P (_ : Nat.succ (Nat.succ i) + 1 = Nat.succ (Nat.succ i) + 1)).inv ≫
(mkInductiveAux₂ e zero comm_zero one comm_one succ (Nat.succ (Nat.succ i) + 1)).fst
[PROOFSTEP]
simp [mkInductiveAux₂]
[GOAL]
ι : Type u_1
V : Type u
inst✝¹ : Category.{v, u} V
inst✝ : Preadditive V
c : ComplexShape ι
C D E : HomologicalComplex V c
f g : C ⟶ D
h k : D ⟶ E
i✝ : ι
P Q : ChainComplex V ℕ
e : P ⟶ Q
zero : X P 0 ⟶ X Q 1
comm_zero : Hom.f e 0 = zero ≫ d Q 1 0
one : X P 1 ⟶ X Q 2
comm_one : Hom.f e 1 = d P 1 0 ≫ zero + one ≫ d Q 2 1
succ :
(n : ℕ) →
(p :
(f : X P n ⟶ X Q (n + 1)) ×'
(f' : X P (n + 1) ⟶ X Q (n + 2)) ×' Hom.f e (n + 1) = d P (n + 1) n ≫ f + f' ≫ d Q (n + 2) (n + 1)) →
(f'' : X P (n + 2) ⟶ X Q (n + 3)) ×' Hom.f e (n + 2) = d P (n + 2) (n + 1) ≫ p.snd.fst + f'' ≫ d Q (n + 3) (n + 2)
i j : ℕ
w : ¬ComplexShape.Rel (ComplexShape.down ℕ) j i
⊢ (fun i j =>
if h : i + 1 = j then (mkInductiveAux₂ e zero comm_zero one comm_one succ i).snd.fst ≫ (xPrevIso Q h).hom
else 0)
i j =
0
[PROOFSTEP]
dsimp
[GOAL]
ι : Type u_1
V : Type u
inst✝¹ : Category.{v, u} V
inst✝ : Preadditive V
c : ComplexShape ι
C D E : HomologicalComplex V c
f g : C ⟶ D
h k : D ⟶ E
i✝ : ι
P Q : ChainComplex V ℕ
e : P ⟶ Q
zero : X P 0 ⟶ X Q 1
comm_zero : Hom.f e 0 = zero ≫ d Q 1 0
one : X P 1 ⟶ X Q 2
comm_one : Hom.f e 1 = d P 1 0 ≫ zero + one ≫ d Q 2 1
succ :
(n : ℕ) →
(p :
(f : X P n ⟶ X Q (n + 1)) ×'
(f' : X P (n + 1) ⟶ X Q (n + 2)) ×' Hom.f e (n + 1) = d P (n + 1) n ≫ f + f' ≫ d Q (n + 2) (n + 1)) →
(f'' : X P (n + 2) ⟶ X Q (n + 3)) ×' Hom.f e (n + 2) = d P (n + 2) (n + 1) ≫ p.snd.fst + f'' ≫ d Q (n + 3) (n + 2)
i j : ℕ
w : ¬ComplexShape.Rel (ComplexShape.down ℕ) j i
⊢ (if h : i + 1 = j then (mkInductiveAux₂ e zero comm_zero one comm_one succ i).snd.fst ≫ (xPrevIso Q h).hom else 0) = 0
[PROOFSTEP]
rw [dif_neg]
[GOAL]
case hnc
ι : Type u_1
V : Type u
inst✝¹ : Category.{v, u} V
inst✝ : Preadditive V
c : ComplexShape ι
C D E : HomologicalComplex V c
f g : C ⟶ D
h k : D ⟶ E
i✝ : ι
P Q : ChainComplex V ℕ
e : P ⟶ Q
zero : X P 0 ⟶ X Q 1
comm_zero : Hom.f e 0 = zero ≫ d Q 1 0
one : X P 1 ⟶ X Q 2
comm_one : Hom.f e 1 = d P 1 0 ≫ zero + one ≫ d Q 2 1
succ :
(n : ℕ) →
(p :
(f : X P n ⟶ X Q (n + 1)) ×'
(f' : X P (n + 1) ⟶ X Q (n + 2)) ×' Hom.f e (n + 1) = d P (n + 1) n ≫ f + f' ≫ d Q (n + 2) (n + 1)) →
(f'' : X P (n + 2) ⟶ X Q (n + 3)) ×' Hom.f e (n + 2) = d P (n + 2) (n + 1) ≫ p.snd.fst + f'' ≫ d Q (n + 3) (n + 2)
i j : ℕ
w : ¬ComplexShape.Rel (ComplexShape.down ℕ) j i
⊢ ¬i + 1 = j
[PROOFSTEP]
exact w
[GOAL]
ι : Type u_1
V : Type u
inst✝¹ : Category.{v, u} V
inst✝ : Preadditive V
c : ComplexShape ι
C D E : HomologicalComplex V c
f g : C ⟶ D
h k : D ⟶ E
i✝ : ι
P Q : ChainComplex V ℕ
e : P ⟶ Q
zero : X P 0 ⟶ X Q 1
comm_zero : Hom.f e 0 = zero ≫ d Q 1 0
one : X P 1 ⟶ X Q 2
comm_one : Hom.f e 1 = d P 1 0 ≫ zero + one ≫ d Q 2 1
succ :
(n : ℕ) →
(p :
(f : X P n ⟶ X Q (n + 1)) ×'
(f' : X P (n + 1) ⟶ X Q (n + 2)) ×' Hom.f e (n + 1) = d P (n + 1) n ≫ f + f' ≫ d Q (n + 2) (n + 1)) →
(f'' : X P (n + 2) ⟶ X Q (n + 3)) ×' Hom.f e (n + 2) = d P (n + 2) (n + 1) ≫ p.snd.fst + f'' ≫ d Q (n + 3) (n + 2)
i : ℕ
⊢ Hom.f e i =
((↑(dNext i) fun i j =>
if h : i + 1 = j then (mkInductiveAux₂ e zero comm_zero one comm_one succ i).snd.fst ≫ (xPrevIso Q h).hom
else 0) +
↑(prevD i) fun i j =>
if h : i + 1 = j then (mkInductiveAux₂ e zero comm_zero one comm_one succ i).snd.fst ≫ (xPrevIso Q h).hom
else 0) +
Hom.f 0 i
[PROOFSTEP]
dsimp
[GOAL]
ι : Type u_1
V : Type u
inst✝¹ : Category.{v, u} V
inst✝ : Preadditive V
c : ComplexShape ι
C D E : HomologicalComplex V c
f g : C ⟶ D
h k : D ⟶ E
i✝ : ι
P Q : ChainComplex V ℕ
e : P ⟶ Q
zero : X P 0 ⟶ X Q 1
comm_zero : Hom.f e 0 = zero ≫ d Q 1 0
one : X P 1 ⟶ X Q 2
comm_one : Hom.f e 1 = d P 1 0 ≫ zero + one ≫ d Q 2 1
succ :
(n : ℕ) →
(p :
(f : X P n ⟶ X Q (n + 1)) ×'
(f' : X P (n + 1) ⟶ X Q (n + 2)) ×' Hom.f e (n + 1) = d P (n + 1) n ≫ f + f' ≫ d Q (n + 2) (n + 1)) →
(f'' : X P (n + 2) ⟶ X Q (n + 3)) ×' Hom.f e (n + 2) = d P (n + 2) (n + 1) ≫ p.snd.fst + f'' ≫ d Q (n + 3) (n + 2)
i : ℕ
⊢ Hom.f e i =
(dFrom P i ≫
↑(fromNext i) fun i j =>
if h : i + 1 = j then (mkInductiveAux₂ e zero comm_zero one comm_one succ i).snd.fst ≫ (xPrevIso Q h).hom
else 0) +
(↑(toPrev i) fun i j =>
if h : i + 1 = j then (mkInductiveAux₂ e zero comm_zero one comm_one succ i).snd.fst ≫ (xPrevIso Q h).hom
else 0) ≫
dTo Q i +
0
[PROOFSTEP]
simp only [add_zero]
[GOAL]
ι : Type u_1
V : Type u
inst✝¹ : Category.{v, u} V
inst✝ : Preadditive V
c : ComplexShape ι
C D E : HomologicalComplex V c
f g : C ⟶ D
h k : D ⟶ E
i✝ : ι
P Q : ChainComplex V ℕ
e : P ⟶ Q
zero : X P 0 ⟶ X Q 1
comm_zero : Hom.f e 0 = zero ≫ d Q 1 0
one : X P 1 ⟶ X Q 2
comm_one : Hom.f e 1 = d P 1 0 ≫ zero + one ≫ d Q 2 1
succ :
(n : ℕ) →
(p :
(f : X P n ⟶ X Q (n + 1)) ×'
(f' : X P (n + 1) ⟶ X Q (n + 2)) ×' Hom.f e (n + 1) = d P (n + 1) n ≫ f + f' ≫ d Q (n + 2) (n + 1)) →
(f'' : X P (n + 2) ⟶ X Q (n + 3)) ×' Hom.f e (n + 2) = d P (n + 2) (n + 1) ≫ p.snd.fst + f'' ≫ d Q (n + 3) (n + 2)
i : ℕ
⊢ Hom.f e i =
(dFrom P i ≫
↑(fromNext i) fun i j =>
if h : i + 1 = j then (mkInductiveAux₂ e zero comm_zero one comm_one succ i).snd.fst ≫ (xPrevIso Q h).hom
else 0) +
(↑(toPrev i) fun i j =>
if h : i + 1 = j then (mkInductiveAux₂ e zero comm_zero one comm_one succ i).snd.fst ≫ (xPrevIso Q h).hom
else 0) ≫
dTo Q i
[PROOFSTEP]
refine' (mkInductiveAux₂ e zero comm_zero one comm_one succ i).2.2.trans _
[GOAL]
ι : Type u_1
V : Type u
inst✝¹ : Category.{v, u} V
inst✝ : Preadditive V
c : ComplexShape ι
C D E : HomologicalComplex V c
f g : C ⟶ D
h k : D ⟶ E
i✝ : ι
P Q : ChainComplex V ℕ
e : P ⟶ Q
zero : X P 0 ⟶ X Q 1
comm_zero : Hom.f e 0 = zero ≫ d Q 1 0
one : X P 1 ⟶ X Q 2
comm_one : Hom.f e 1 = d P 1 0 ≫ zero + one ≫ d Q 2 1
succ :
(n : ℕ) →
(p :
(f : X P n ⟶ X Q (n + 1)) ×'
(f' : X P (n + 1) ⟶ X Q (n + 2)) ×' Hom.f e (n + 1) = d P (n + 1) n ≫ f + f' ≫ d Q (n + 2) (n + 1)) →
(f'' : X P (n + 2) ⟶ X Q (n + 3)) ×' Hom.f e (n + 2) = d P (n + 2) (n + 1) ≫ p.snd.fst + f'' ≫ d Q (n + 3) (n + 2)
i : ℕ
⊢ dFrom P i ≫ (mkInductiveAux₂ e zero comm_zero one comm_one succ i).fst +
(mkInductiveAux₂ e zero comm_zero one comm_one succ i).snd.fst ≫ dTo Q i =
(dFrom P i ≫
↑(fromNext i) fun i j =>
if h : i + 1 = j then (mkInductiveAux₂ e zero comm_zero one comm_one succ i).snd.fst ≫ (xPrevIso Q h).hom
else 0) +
(↑(toPrev i) fun i j =>
if h : i + 1 = j then (mkInductiveAux₂ e zero comm_zero one comm_one succ i).snd.fst ≫ (xPrevIso Q h).hom
else 0) ≫
dTo Q i
[PROOFSTEP]
congr
[GOAL]
case e_a.e_a
ι : Type u_1
V : Type u
inst✝¹ : Category.{v, u} V
inst✝ : Preadditive V
c : ComplexShape ι
C D E : HomologicalComplex V c
f g : C ⟶ D
h k : D ⟶ E
i✝ : ι
P Q : ChainComplex V ℕ
e : P ⟶ Q
zero : X P 0 ⟶ X Q 1
comm_zero : Hom.f e 0 = zero ≫ d Q 1 0
one : X P 1 ⟶ X Q 2
comm_one : Hom.f e 1 = d P 1 0 ≫ zero + one ≫ d Q 2 1
succ :
(n : ℕ) →
(p :
(f : X P n ⟶ X Q (n + 1)) ×'
(f' : X P (n + 1) ⟶ X Q (n + 2)) ×' Hom.f e (n + 1) = d P (n + 1) n ≫ f + f' ≫ d Q (n + 2) (n + 1)) →
(f'' : X P (n + 2) ⟶ X Q (n + 3)) ×' Hom.f e (n + 2) = d P (n + 2) (n + 1) ≫ p.snd.fst + f'' ≫ d Q (n + 3) (n + 2)
i : ℕ
⊢ (mkInductiveAux₂ e zero comm_zero one comm_one succ i).fst =
↑(fromNext i) fun i j =>
if h : i + 1 = j then (mkInductiveAux₂ e zero comm_zero one comm_one succ i).snd.fst ≫ (xPrevIso Q h).hom else 0
[PROOFSTEP]
cases i
[GOAL]
case e_a.e_a.zero
ι : Type u_1
V : Type u
inst✝¹ : Category.{v, u} V
inst✝ : Preadditive V
c : ComplexShape ι
C D E : HomologicalComplex V c
f g : C ⟶ D
h k : D ⟶ E
i : ι
P Q : ChainComplex V ℕ
e : P ⟶ Q
zero : X P 0 ⟶ X Q 1
comm_zero : Hom.f e 0 = zero ≫ d Q 1 0
one : X P 1 ⟶ X Q 2
comm_one : Hom.f e 1 = d P 1 0 ≫ zero + one ≫ d Q 2 1
succ :
(n : ℕ) →
(p :
(f : X P n ⟶ X Q (n + 1)) ×'
(f' : X P (n + 1) ⟶ X Q (n + 2)) ×' Hom.f e (n + 1) = d P (n + 1) n ≫ f + f' ≫ d Q (n + 2) (n + 1)) →
(f'' : X P (n + 2) ⟶ X Q (n + 3)) ×' Hom.f e (n + 2) = d P (n + 2) (n + 1) ≫ p.snd.fst + f'' ≫ d Q (n + 3) (n + 2)
⊢ (mkInductiveAux₂ e zero comm_zero one comm_one succ Nat.zero).fst =
↑(fromNext Nat.zero) fun i j =>
if h : i + 1 = j then (mkInductiveAux₂ e zero comm_zero one comm_one succ i).snd.fst ≫ (xPrevIso Q h).hom else 0
[PROOFSTEP]
dsimp [fromNext, mkInductiveAux₂]
[GOAL]
case e_a.e_a.zero
ι : Type u_1
V : Type u
inst✝¹ : Category.{v, u} V
inst✝ : Preadditive V
c : ComplexShape ι
C D E : HomologicalComplex V c
f g : C ⟶ D
h k : D ⟶ E
i : ι
P Q : ChainComplex V ℕ
e : P ⟶ Q
zero : X P 0 ⟶ X Q 1
comm_zero : Hom.f e 0 = zero ≫ d Q 1 0
one : X P 1 ⟶ X Q 2
comm_one : Hom.f e 1 = d P 1 0 ≫ zero + one ≫ d Q 2 1
succ :
(n : ℕ) →
(p :
(f : X P n ⟶ X Q (n + 1)) ×'
(f' : X P (n + 1) ⟶ X Q (n + 2)) ×' Hom.f e (n + 1) = d P (n + 1) n ≫ f + f' ≫ d Q (n + 2) (n + 1)) →
(f'' : X P (n + 2) ⟶ X Q (n + 3)) ×' Hom.f e (n + 2) = d P (n + 2) (n + 1) ≫ p.snd.fst + f'' ≫ d Q (n + 3) (n + 2)
⊢ 0 =
if h : ComplexShape.next (ComplexShape.down ℕ) 0 + 1 = 0 then
(match ComplexShape.next (ComplexShape.down ℕ) 0 with
| 0 =>
{ fst := 0,
snd :=
{ fst := zero ≫ (xPrevIso Q mkInductiveAux₂.proof_1).inv,
snd := (_ : Hom.f e 0 = dFrom P 0 ≫ 0 + (zero ≫ (xPrevIso Q (_ : 0 + 1 = 0 + 1)).inv) ≫ dTo Q 0) } }
| Nat.succ n =>
{ fst := (xNextIso P (_ : n + 1 = n + 1)).hom ≫ (mkInductiveAux₁ e zero one comm_one succ n).fst,
snd :=
{
fst :=
(mkInductiveAux₁ e zero one comm_one succ n).snd.fst ≫
(xPrevIso Q (_ : n + 1 + 1 = n + 1 + 1)).inv,
snd :=
(_ :
Hom.f e (n + 1) =
dFrom P (n + 1) ≫
(xNextIso P (_ : n + 1 = n + 1)).hom ≫ (mkInductiveAux₁ e zero one comm_one succ n).fst +
((mkInductiveAux₁ e zero one comm_one succ n).snd.fst ≫
(xPrevIso Q (_ : n + 1 + 1 = n + 1 + 1)).inv) ≫
dTo Q (n + 1)) } }).snd.fst ≫
(xPrevIso Q h).hom
else 0
[PROOFSTEP]
rw [dif_neg]
[GOAL]
case e_a.e_a.zero.hnc
ι : Type u_1
V : Type u
inst✝¹ : Category.{v, u} V
inst✝ : Preadditive V
c : ComplexShape ι
C D E : HomologicalComplex V c
f g : C ⟶ D
h k : D ⟶ E
i : ι
P Q : ChainComplex V ℕ
e : P ⟶ Q
zero : X P 0 ⟶ X Q 1
comm_zero : Hom.f e 0 = zero ≫ d Q 1 0
one : X P 1 ⟶ X Q 2
comm_one : Hom.f e 1 = d P 1 0 ≫ zero + one ≫ d Q 2 1
succ :
(n : ℕ) →
(p :
(f : X P n ⟶ X Q (n + 1)) ×'
(f' : X P (n + 1) ⟶ X Q (n + 2)) ×' Hom.f e (n + 1) = d P (n + 1) n ≫ f + f' ≫ d Q (n + 2) (n + 1)) →
(f'' : X P (n + 2) ⟶ X Q (n + 3)) ×' Hom.f e (n + 2) = d P (n + 2) (n + 1) ≫ p.snd.fst + f'' ≫ d Q (n + 3) (n + 2)
⊢ ¬ComplexShape.next (ComplexShape.down ℕ) 0 + 1 = 0
[PROOFSTEP]
simp only
[GOAL]
case e_a.e_a.succ
ι : Type u_1
V : Type u
inst✝¹ : Category.{v, u} V
inst✝ : Preadditive V
c : ComplexShape ι
C D E : HomologicalComplex V c
f g : C ⟶ D
h k : D ⟶ E
i : ι
P Q : ChainComplex V ℕ
e : P ⟶ Q
zero : X P 0 ⟶ X Q 1
comm_zero : Hom.f e 0 = zero ≫ d Q 1 0
one : X P 1 ⟶ X Q 2
comm_one : Hom.f e 1 = d P 1 0 ≫ zero + one ≫ d Q 2 1
succ :
(n : ℕ) →
(p :
(f : X P n ⟶ X Q (n + 1)) ×'
(f' : X P (n + 1) ⟶ X Q (n + 2)) ×' Hom.f e (n + 1) = d P (n + 1) n ≫ f + f' ≫ d Q (n + 2) (n + 1)) →
(f'' : X P (n + 2) ⟶ X Q (n + 3)) ×' Hom.f e (n + 2) = d P (n + 2) (n + 1) ≫ p.snd.fst + f'' ≫ d Q (n + 3) (n + 2)
n✝ : ℕ
⊢ (mkInductiveAux₂ e zero comm_zero one comm_one succ (Nat.succ n✝)).fst =
↑(fromNext (Nat.succ n✝)) fun i j =>
if h : i + 1 = j then (mkInductiveAux₂ e zero comm_zero one comm_one succ i).snd.fst ≫ (xPrevIso Q h).hom else 0
[PROOFSTEP]
dsimp [fromNext]
[GOAL]
case e_a.e_a.succ
ι : Type u_1
V : Type u
inst✝¹ : Category.{v, u} V
inst✝ : Preadditive V
c : ComplexShape ι
C D E : HomologicalComplex V c
f g : C ⟶ D
h k : D ⟶ E
i : ι
P Q : ChainComplex V ℕ
e : P ⟶ Q
zero : X P 0 ⟶ X Q 1
comm_zero : Hom.f e 0 = zero ≫ d Q 1 0
one : X P 1 ⟶ X Q 2
comm_one : Hom.f e 1 = d P 1 0 ≫ zero + one ≫ d Q 2 1
succ :
(n : ℕ) →
(p :
(f : X P n ⟶ X Q (n + 1)) ×'
(f' : X P (n + 1) ⟶ X Q (n + 2)) ×' Hom.f e (n + 1) = d P (n + 1) n ≫ f + f' ≫ d Q (n + 2) (n + 1)) →
(f'' : X P (n + 2) ⟶ X Q (n + 3)) ×' Hom.f e (n + 2) = d P (n + 2) (n + 1) ≫ p.snd.fst + f'' ≫ d Q (n + 3) (n + 2)
n✝ : ℕ
⊢ (xNextIso P (_ : n✝ + 1 = n✝ + 1)).hom ≫ (mkInductiveAux₁ e zero one comm_one succ n✝).fst =
if h : ComplexShape.next (ComplexShape.down ℕ) (Nat.succ n✝) + 1 = Nat.succ n✝ then
(mkInductiveAux₂ e zero comm_zero one comm_one succ
(ComplexShape.next (ComplexShape.down ℕ) (Nat.succ n✝))).snd.fst ≫
(xPrevIso Q h).hom
else 0
[PROOFSTEP]
simp only [ChainComplex.next_nat_succ, dite_true]
[GOAL]
case e_a.e_a.succ
ι : Type u_1
V : Type u
inst✝¹ : Category.{v, u} V
inst✝ : Preadditive V
c : ComplexShape ι
C D E : HomologicalComplex V c
f g : C ⟶ D
h k : D ⟶ E
i : ι
P Q : ChainComplex V ℕ
e : P ⟶ Q
zero : X P 0 ⟶ X Q 1
comm_zero : Hom.f e 0 = zero ≫ d Q 1 0
one : X P 1 ⟶ X Q 2
comm_one : Hom.f e 1 = d P 1 0 ≫ zero + one ≫ d Q 2 1
succ :
(n : ℕ) →
(p :
(f : X P n ⟶ X Q (n + 1)) ×'
(f' : X P (n + 1) ⟶ X Q (n + 2)) ×' Hom.f e (n + 1) = d P (n + 1) n ≫ f + f' ≫ d Q (n + 2) (n + 1)) →
(f'' : X P (n + 2) ⟶ X Q (n + 3)) ×' Hom.f e (n + 2) = d P (n + 2) (n + 1) ≫ p.snd.fst + f'' ≫ d Q (n + 3) (n + 2)
n✝ : ℕ
⊢ (xNextIso P (_ : n✝ + 1 = n✝ + 1)).hom ≫ (mkInductiveAux₁ e zero one comm_one succ n✝).fst =
(mkInductiveAux₂ e zero comm_zero one comm_one succ
(ComplexShape.next (ComplexShape.down ℕ) (Nat.succ n✝))).snd.fst ≫
(xPrevIso Q (_ : ComplexShape.next (ComplexShape.down ℕ) (Nat.succ n✝) + 1 = Nat.succ n✝)).hom
[PROOFSTEP]
rw [mkInductiveAux₃ e zero comm_zero one comm_one succ]
[GOAL]
case e_a.e_a.succ
ι : Type u_1
V : Type u
inst✝¹ : Category.{v, u} V
inst✝ : Preadditive V
c : ComplexShape ι
C D E : HomologicalComplex V c
f g : C ⟶ D
h k : D ⟶ E
i : ι
P Q : ChainComplex V ℕ
e : P ⟶ Q
zero : X P 0 ⟶ X Q 1
comm_zero : Hom.f e 0 = zero ≫ d Q 1 0
one : X P 1 ⟶ X Q 2
comm_one : Hom.f e 1 = d P 1 0 ≫ zero + one ≫ d Q 2 1
succ :
(n : ℕ) →
(p :
(f : X P n ⟶ X Q (n + 1)) ×'
(f' : X P (n + 1) ⟶ X Q (n + 2)) ×' Hom.f e (n + 1) = d P (n + 1) n ≫ f + f' ≫ d Q (n + 2) (n + 1)) →
(f'' : X P (n + 2) ⟶ X Q (n + 3)) ×' Hom.f e (n + 2) = d P (n + 2) (n + 1) ≫ p.snd.fst + f'' ≫ d Q (n + 3) (n + 2)
n✝ : ℕ
⊢ (xNextIso P (_ : n✝ + 1 = n✝ + 1)).hom ≫ (mkInductiveAux₁ e zero one comm_one succ n✝).fst =
(xNextIso P (_ : ComplexShape.next (ComplexShape.down ℕ) (Nat.succ n✝) + 1 = Nat.succ n✝)).inv ≫
(mkInductiveAux₂ e zero comm_zero one comm_one succ (Nat.succ n✝)).fst
[PROOFSTEP]
dsimp [xNextIso]
[GOAL]
case e_a.e_a.succ
ι : Type u_1
V : Type u
inst✝¹ : Category.{v, u} V
inst✝ : Preadditive V
c : ComplexShape ι
C D E : HomologicalComplex V c
f g : C ⟶ D
h k : D ⟶ E
i : ι
P Q : ChainComplex V ℕ
e : P ⟶ Q
zero : X P 0 ⟶ X Q 1
comm_zero : Hom.f e 0 = zero ≫ d Q 1 0
one : X P 1 ⟶ X Q 2
comm_one : Hom.f e 1 = d P 1 0 ≫ zero + one ≫ d Q 2 1
succ :
(n : ℕ) →
(p :
(f : X P n ⟶ X Q (n + 1)) ×'
(f' : X P (n + 1) ⟶ X Q (n + 2)) ×' Hom.f e (n + 1) = d P (n + 1) n ≫ f + f' ≫ d Q (n + 2) (n + 1)) →
(f'' : X P (n + 2) ⟶ X Q (n + 3)) ×' Hom.f e (n + 2) = d P (n + 2) (n + 1) ≫ p.snd.fst + f'' ≫ d Q (n + 3) (n + 2)
n✝ : ℕ
⊢ eqToHom (_ : xNext P (n✝ + 1) = X P n✝) ≫ (mkInductiveAux₁ e zero one comm_one succ n✝).fst =
𝟙 (xNext P (Nat.succ n✝)) ≫
eqToHom (_ : xNext P (n✝ + 1) = X P n✝) ≫ (mkInductiveAux₁ e zero one comm_one succ n✝).fst
[PROOFSTEP]
rw [Category.id_comp]
[GOAL]
case e_a.e_a
ι : Type u_1
V : Type u
inst✝¹ : Category.{v, u} V
inst✝ : Preadditive V
c : ComplexShape ι
C D E : HomologicalComplex V c
f g : C ⟶ D
h k : D ⟶ E
i✝ : ι
P Q : ChainComplex V ℕ
e : P ⟶ Q
zero : X P 0 ⟶ X Q 1
comm_zero : Hom.f e 0 = zero ≫ d Q 1 0
one : X P 1 ⟶ X Q 2
comm_one : Hom.f e 1 = d P 1 0 ≫ zero + one ≫ d Q 2 1
succ :
(n : ℕ) →
(p :
(f : X P n ⟶ X Q (n + 1)) ×'
(f' : X P (n + 1) ⟶ X Q (n + 2)) ×' Hom.f e (n + 1) = d P (n + 1) n ≫ f + f' ≫ d Q (n + 2) (n + 1)) →
(f'' : X P (n + 2) ⟶ X Q (n + 3)) ×' Hom.f e (n + 2) = d P (n + 2) (n + 1) ≫ p.snd.fst + f'' ≫ d Q (n + 3) (n + 2)
i : ℕ
⊢ (mkInductiveAux₂ e zero comm_zero one comm_one succ i).snd.fst =
↑(toPrev i) fun i j =>
if h : i + 1 = j then (mkInductiveAux₂ e zero comm_zero one comm_one succ i).snd.fst ≫ (xPrevIso Q h).hom else 0
[PROOFSTEP]
dsimp [toPrev]
[GOAL]
case e_a.e_a
ι : Type u_1
V : Type u
inst✝¹ : Category.{v, u} V
inst✝ : Preadditive V
c : ComplexShape ι
C D E : HomologicalComplex V c
f g : C ⟶ D
h k : D ⟶ E
i✝ : ι
P Q : ChainComplex V ℕ
e : P ⟶ Q
zero : X P 0 ⟶ X Q 1
comm_zero : Hom.f e 0 = zero ≫ d Q 1 0
one : X P 1 ⟶ X Q 2
comm_one : Hom.f e 1 = d P 1 0 ≫ zero + one ≫ d Q 2 1
succ :
(n : ℕ) →
(p :
(f : X P n ⟶ X Q (n + 1)) ×'
(f' : X P (n + 1) ⟶ X Q (n + 2)) ×' Hom.f e (n + 1) = d P (n + 1) n ≫ f + f' ≫ d Q (n + 2) (n + 1)) →
(f'' : X P (n + 2) ⟶ X Q (n + 3)) ×' Hom.f e (n + 2) = d P (n + 2) (n + 1) ≫ p.snd.fst + f'' ≫ d Q (n + 3) (n + 2)
i : ℕ
⊢ (mkInductiveAux₂ e zero comm_zero one comm_one succ i).snd.fst =
if h : i + 1 = ComplexShape.prev (ComplexShape.down ℕ) i then
(mkInductiveAux₂ e zero comm_zero one comm_one succ i).snd.fst ≫ (xPrevIso Q h).hom
else 0
[PROOFSTEP]
erw [dif_pos, Category.comp_id]
[GOAL]
case e_a.e_a.hc
ι : Type u_1
V : Type u
inst✝¹ : Category.{v, u} V
inst✝ : Preadditive V
c : ComplexShape ι
C D E : HomologicalComplex V c
f g : C ⟶ D
h k : D ⟶ E
i✝ : ι
P Q : ChainComplex V ℕ
e : P ⟶ Q
zero : X P 0 ⟶ X Q 1
comm_zero : Hom.f e 0 = zero ≫ d Q 1 0
one : X P 1 ⟶ X Q 2
comm_one : Hom.f e 1 = d P 1 0 ≫ zero + one ≫ d Q 2 1
succ :
(n : ℕ) →
(p :
(f : X P n ⟶ X Q (n + 1)) ×'
(f' : X P (n + 1) ⟶ X Q (n + 2)) ×' Hom.f e (n + 1) = d P (n + 1) n ≫ f + f' ≫ d Q (n + 2) (n + 1)) →
(f'' : X P (n + 2) ⟶ X Q (n + 3)) ×' Hom.f e (n + 2) = d P (n + 2) (n + 1) ≫ p.snd.fst + f'' ≫ d Q (n + 3) (n + 2)
i : ℕ
⊢ i + 1 = ComplexShape.prev (ComplexShape.down ℕ) i
case e_a.e_a.hc
ι : Type u_1
V : Type u
inst✝¹ : Category.{v, u} V
inst✝ : Preadditive V
c : ComplexShape ι
C D E : HomologicalComplex V c
f g : C ⟶ D
h k : D ⟶ E
i✝ : ι
P Q : ChainComplex V ℕ
e : P ⟶ Q
zero : X P 0 ⟶ X Q 1
comm_zero : Hom.f e 0 = zero ≫ d Q 1 0
one : X P 1 ⟶ X Q 2
comm_one : Hom.f e 1 = d P 1 0 ≫ zero + one ≫ d Q 2 1
succ :
(n : ℕ) →
(p :
(f : X P n ⟶ X Q (n + 1)) ×'
(f' : X P (n + 1) ⟶ X Q (n + 2)) ×' Hom.f e (n + 1) = d P (n + 1) n ≫ f + f' ≫ d Q (n + 2) (n + 1)) →
(f'' : X P (n + 2) ⟶ X Q (n + 3)) ×' Hom.f e (n + 2) = d P (n + 2) (n + 1) ≫ p.snd.fst + f'' ≫ d Q (n + 3) (n + 2)
i : ℕ
⊢ i + 1 = ComplexShape.prev (ComplexShape.down ℕ) i
[PROOFSTEP]
simp only [ChainComplex.prev]
[GOAL]
ι : Type u_1
V : Type u
inst✝¹ : Category.{v, u} V
inst✝ : Preadditive V
c : ComplexShape ι
C D E : HomologicalComplex V c
f✝ g : C ⟶ D
h k : D ⟶ E
i : ι
P Q : CochainComplex V ℕ
f : (i j : ℕ) → X P i ⟶ X Q j
j : ℕ
⊢ ↑(dNext j) f = d P j (j + 1) ≫ f (j + 1) j
[PROOFSTEP]
dsimp [dNext]
[GOAL]
ι : Type u_1
V : Type u
inst✝¹ : Category.{v, u} V
inst✝ : Preadditive V
c : ComplexShape ι
C D E : HomologicalComplex V c
f✝ g : C ⟶ D
h k : D ⟶ E
i : ι
P Q : CochainComplex V ℕ
f : (i j : ℕ) → X P i ⟶ X Q j
j : ℕ
⊢ d P j (ComplexShape.next (ComplexShape.up ℕ) j) ≫ f (ComplexShape.next (ComplexShape.up ℕ) j) j =
d P j (j + 1) ≫ f (j + 1) j
[PROOFSTEP]
have : (ComplexShape.up ℕ).next j = j + 1 := CochainComplex.next ℕ j
[GOAL]
ι : Type u_1
V : Type u
inst✝¹ : Category.{v, u} V
inst✝ : Preadditive V
c : ComplexShape ι
C D E : HomologicalComplex V c
f✝ g : C ⟶ D
h k : D ⟶ E
i : ι
P Q : CochainComplex V ℕ
f : (i j : ℕ) → X P i ⟶ X Q j
j : ℕ
this : ComplexShape.next (ComplexShape.up ℕ) j = j + 1
⊢ d P j (ComplexShape.next (ComplexShape.up ℕ) j) ≫ f (ComplexShape.next (ComplexShape.up ℕ) j) j =
d P j (j + 1) ≫ f (j + 1) j
[PROOFSTEP]
congr 2
[GOAL]
ι : Type u_1
V : Type u
inst✝¹ : Category.{v, u} V
inst✝ : Preadditive V
c : ComplexShape ι
C D E : HomologicalComplex V c
f✝ g : C ⟶ D
h k : D ⟶ E
i✝ : ι
P Q : CochainComplex V ℕ
f : (i j : ℕ) → X P i ⟶ X Q j
i : ℕ
⊢ ↑(prevD (i + 1)) f = f (i + 1) i ≫ d Q i (i + 1)
[PROOFSTEP]
dsimp [prevD]
[GOAL]
ι : Type u_1
V : Type u
inst✝¹ : Category.{v, u} V
inst✝ : Preadditive V
c : ComplexShape ι
C D E : HomologicalComplex V c
f✝ g : C ⟶ D
h k : D ⟶ E
i✝ : ι
P Q : CochainComplex V ℕ
f : (i j : ℕ) → X P i ⟶ X Q j
i : ℕ
⊢ f (i + 1) (ComplexShape.prev (ComplexShape.up ℕ) (i + 1)) ≫
d Q (ComplexShape.prev (ComplexShape.up ℕ) (i + 1)) (i + 1) =
f (i + 1) i ≫ d Q i (i + 1)
[PROOFSTEP]
have : (ComplexShape.up ℕ).prev (i + 1) = i := CochainComplex.prev_nat_succ i
[GOAL]
ι : Type u_1
V : Type u
inst✝¹ : Category.{v, u} V
inst✝ : Preadditive V
c : ComplexShape ι
C D E : HomologicalComplex V c
f✝ g : C ⟶ D
h k : D ⟶ E
i✝ : ι
P Q : CochainComplex V ℕ
f : (i j : ℕ) → X P i ⟶ X Q j
i : ℕ
this : ComplexShape.prev (ComplexShape.up ℕ) (i + 1) = i
⊢ f (i + 1) (ComplexShape.prev (ComplexShape.up ℕ) (i + 1)) ≫
d Q (ComplexShape.prev (ComplexShape.up ℕ) (i + 1)) (i + 1) =
f (i + 1) i ≫ d Q i (i + 1)
[PROOFSTEP]
congr 2
[GOAL]
ι : Type u_1
V : Type u
inst✝¹ : Category.{v, u} V
inst✝ : Preadditive V
c : ComplexShape ι
C D E : HomologicalComplex V c
f✝ g : C ⟶ D
h k : D ⟶ E
i : ι
P Q : CochainComplex V ℕ
f : (i j : ℕ) → X P i ⟶ X Q j
⊢ ↑(prevD 0) f = 0
[PROOFSTEP]
dsimp [prevD]
[GOAL]
ι : Type u_1
V : Type u
inst✝¹ : Category.{v, u} V
inst✝ : Preadditive V
c : ComplexShape ι
C D E : HomologicalComplex V c
f✝ g : C ⟶ D
h k : D ⟶ E
i : ι
P Q : CochainComplex V ℕ
f : (i j : ℕ) → X P i ⟶ X Q j
⊢ f 0 (ComplexShape.prev (ComplexShape.up ℕ) 0) ≫ d Q (ComplexShape.prev (ComplexShape.up ℕ) 0) 0 = 0
[PROOFSTEP]
rw [Q.shape, comp_zero]
[GOAL]
case a
ι : Type u_1
V : Type u
inst✝¹ : Category.{v, u} V
inst✝ : Preadditive V
c : ComplexShape ι
C D E : HomologicalComplex V c
f✝ g : C ⟶ D
h k : D ⟶ E
i : ι
P Q : CochainComplex V ℕ
f : (i j : ℕ) → X P i ⟶ X Q j
⊢ ¬ComplexShape.Rel (ComplexShape.up ℕ) (ComplexShape.prev (ComplexShape.up ℕ) 0) 0
[PROOFSTEP]
rw [CochainComplex.prev_nat_zero]
[GOAL]
case a
ι : Type u_1
V : Type u
inst✝¹ : Category.{v, u} V
inst✝ : Preadditive V
c : ComplexShape ι
C D E : HomologicalComplex V c
f✝ g : C ⟶ D
h k : D ⟶ E
i : ι
P Q : CochainComplex V ℕ
f : (i j : ℕ) → X P i ⟶ X Q j
⊢ ¬ComplexShape.Rel (ComplexShape.up ℕ) 0 0
[PROOFSTEP]
dsimp
[GOAL]
case a
ι : Type u_1
V : Type u
inst✝¹ : Category.{v, u} V
inst✝ : Preadditive V
c : ComplexShape ι
C D E : HomologicalComplex V c
f✝ g : C ⟶ D
h k : D ⟶ E
i : ι
P Q : CochainComplex V ℕ
f : (i j : ℕ) → X P i ⟶ X Q j
⊢ ¬0 + 1 = 0
[PROOFSTEP]
decide
[GOAL]
ι : Type u_1
V : Type u
inst✝¹ : Category.{v, u} V
inst✝ : Preadditive V
c : ComplexShape ι
C D E : HomologicalComplex V c
f g : C ⟶ D
h k : D ⟶ E
i : ι
P Q : CochainComplex V ℕ
e : P ⟶ Q
zero : X P 1 ⟶ X Q 0
comm_zero : Hom.f e 0 = d P 0 1 ≫ zero
one : X P 2 ⟶ X Q 1
comm_one : Hom.f e 1 = zero ≫ d Q 0 1 + d P 1 2 ≫ one
succ :
(n : ℕ) →
(p :
(f : X P (n + 1) ⟶ X Q n) ×'
(f' : X P (n + 2) ⟶ X Q (n + 1)) ×' Hom.f e (n + 1) = f ≫ d Q n (n + 1) + d P (n + 1) (n + 2) ≫ f') →
(f'' : X P (n + 3) ⟶ X Q (n + 2)) ×' Hom.f e (n + 2) = p.snd.fst ≫ d Q (n + 1) (n + 2) + d P (n + 2) (n + 3) ≫ f''
⊢ Hom.f e 0 = 0 ≫ dTo Q 0 + dFrom P 0 ≫ (xNextIso P (_ : 0 + 1 = 0 + 1)).hom ≫ zero
[PROOFSTEP]
simpa using comm_zero
[GOAL]
ι : Type u_1
V : Type u
inst✝¹ : Category.{v, u} V
inst✝ : Preadditive V
c : ComplexShape ι
C D E : HomologicalComplex V c
f g : C ⟶ D
h k : D ⟶ E
i : ι
P Q : CochainComplex V ℕ
e : P ⟶ Q
zero : X P 1 ⟶ X Q 0
comm_zero : Hom.f e 0 = d P 0 1 ≫ zero
one : X P 2 ⟶ X Q 1
comm_one : Hom.f e 1 = zero ≫ d Q 0 1 + d P 1 2 ≫ one
succ :
(n : ℕ) →
(p :
(f : X P (n + 1) ⟶ X Q n) ×'
(f' : X P (n + 2) ⟶ X Q (n + 1)) ×' Hom.f e (n + 1) = f ≫ d Q n (n + 1) + d P (n + 1) (n + 2) ≫ f') →
(f'' : X P (n + 3) ⟶ X Q (n + 2)) ×' Hom.f e (n + 2) = p.snd.fst ≫ d Q (n + 1) (n + 2) + d P (n + 2) (n + 3) ≫ f''
n : ℕ
I : (f : X P (n + 1) ⟶ X Q n) ×'
(f' : X P (n + 2) ⟶ X Q (n + 1)) ×' Hom.f e (n + 1) = f ≫ d Q n (n + 1) + d P (n + 1) (n + 2) ≫ f' :=
mkCoinductiveAux₁ e zero one comm_one succ n
⊢ Hom.f e (n + 1) =
(I.fst ≫ (xPrevIso Q (_ : n + 1 = n + 1)).inv) ≫ dTo Q (n + 1) +
dFrom P (n + 1) ≫ (xNextIso P (_ : n + 1 + 1 = n + 1 + 1)).hom ≫ I.snd.fst
[PROOFSTEP]
simpa using I.2.2
[GOAL]
ι : Type u_1
V : Type u
inst✝¹ : Category.{v, u} V
inst✝ : Preadditive V
c : ComplexShape ι
C D E : HomologicalComplex V c
f g : C ⟶ D
h✝ k : D ⟶ E
i✝ : ι
P Q : CochainComplex V ℕ
e : P ⟶ Q
zero : X P 1 ⟶ X Q 0
comm_zero : Hom.f e 0 = d P 0 1 ≫ zero
one : X P 2 ⟶ X Q 1
comm_one : Hom.f e 1 = zero ≫ d Q 0 1 + d P 1 2 ≫ one
succ :
(n : ℕ) →
(p :
(f : X P (n + 1) ⟶ X Q n) ×'
(f' : X P (n + 2) ⟶ X Q (n + 1)) ×' Hom.f e (n + 1) = f ≫ d Q n (n + 1) + d P (n + 1) (n + 2) ≫ f') →
(f'' : X P (n + 3) ⟶ X Q (n + 2)) ×' Hom.f e (n + 2) = p.snd.fst ≫ d Q (n + 1) (n + 2) + d P (n + 2) (n + 3) ≫ f''
i j : ℕ
h : i + 1 = j
⊢ (xNextIso P h).inv ≫ (mkCoinductiveAux₂ e zero comm_zero one comm_one succ i).snd.fst =
(mkCoinductiveAux₂ e zero comm_zero one comm_one succ j).fst ≫ (xPrevIso Q h).hom
[PROOFSTEP]
subst j
[GOAL]
ι : Type u_1
V : Type u
inst✝¹ : Category.{v, u} V
inst✝ : Preadditive V
c : ComplexShape ι
C D E : HomologicalComplex V c
f g : C ⟶ D
h k : D ⟶ E
i✝ : ι
P Q : CochainComplex V ℕ
e : P ⟶ Q
zero : X P 1 ⟶ X Q 0
comm_zero : Hom.f e 0 = d P 0 1 ≫ zero
one : X P 2 ⟶ X Q 1
comm_one : Hom.f e 1 = zero ≫ d Q 0 1 + d P 1 2 ≫ one
succ :
(n : ℕ) →
(p :
(f : X P (n + 1) ⟶ X Q n) ×'
(f' : X P (n + 2) ⟶ X Q (n + 1)) ×' Hom.f e (n + 1) = f ≫ d Q n (n + 1) + d P (n + 1) (n + 2) ≫ f') →
(f'' : X P (n + 3) ⟶ X Q (n + 2)) ×' Hom.f e (n + 2) = p.snd.fst ≫ d Q (n + 1) (n + 2) + d P (n + 2) (n + 3) ≫ f''
i : ℕ
⊢ (xNextIso P (_ : i + 1 = i + 1)).inv ≫ (mkCoinductiveAux₂ e zero comm_zero one comm_one succ i).snd.fst =
(mkCoinductiveAux₂ e zero comm_zero one comm_one succ (i + 1)).fst ≫ (xPrevIso Q (_ : i + 1 = i + 1)).hom
[PROOFSTEP]
rcases i with (_ | _ | i)
[GOAL]
case zero
ι : Type u_1
V : Type u
inst✝¹ : Category.{v, u} V
inst✝ : Preadditive V
c : ComplexShape ι
C D E : HomologicalComplex V c
f g : C ⟶ D
h k : D ⟶ E
i : ι
P Q : CochainComplex V ℕ
e : P ⟶ Q
zero : X P 1 ⟶ X Q 0
comm_zero : Hom.f e 0 = d P 0 1 ≫ zero
one : X P 2 ⟶ X Q 1
comm_one : Hom.f e 1 = zero ≫ d Q 0 1 + d P 1 2 ≫ one
succ :
(n : ℕ) →
(p :
(f : X P (n + 1) ⟶ X Q n) ×'
(f' : X P (n + 2) ⟶ X Q (n + 1)) ×' Hom.f e (n + 1) = f ≫ d Q n (n + 1) + d P (n + 1) (n + 2) ≫ f') →
(f'' : X P (n + 3) ⟶ X Q (n + 2)) ×' Hom.f e (n + 2) = p.snd.fst ≫ d Q (n + 1) (n + 2) + d P (n + 2) (n + 3) ≫ f''
⊢ (xNextIso P (_ : Nat.zero + 1 = Nat.zero + 1)).inv ≫
(mkCoinductiveAux₂ e zero comm_zero one comm_one succ Nat.zero).snd.fst =
(mkCoinductiveAux₂ e zero comm_zero one comm_one succ (Nat.zero + 1)).fst ≫
(xPrevIso Q (_ : Nat.zero + 1 = Nat.zero + 1)).hom
[PROOFSTEP]
simp [mkCoinductiveAux₂]
[GOAL]
case succ.zero
ι : Type u_1
V : Type u
inst✝¹ : Category.{v, u} V
inst✝ : Preadditive V
c : ComplexShape ι
C D E : HomologicalComplex V c
f g : C ⟶ D
h k : D ⟶ E
i : ι
P Q : CochainComplex V ℕ
e : P ⟶ Q
zero : X P 1 ⟶ X Q 0
comm_zero : Hom.f e 0 = d P 0 1 ≫ zero
one : X P 2 ⟶ X Q 1
comm_one : Hom.f e 1 = zero ≫ d Q 0 1 + d P 1 2 ≫ one
succ :
(n : ℕ) →
(p :
(f : X P (n + 1) ⟶ X Q n) ×'
(f' : X P (n + 2) ⟶ X Q (n + 1)) ×' Hom.f e (n + 1) = f ≫ d Q n (n + 1) + d P (n + 1) (n + 2) ≫ f') →
(f'' : X P (n + 3) ⟶ X Q (n + 2)) ×' Hom.f e (n + 2) = p.snd.fst ≫ d Q (n + 1) (n + 2) + d P (n + 2) (n + 3) ≫ f''
⊢ (xNextIso P (_ : Nat.succ Nat.zero + 1 = Nat.succ Nat.zero + 1)).inv ≫
(mkCoinductiveAux₂ e zero comm_zero one comm_one succ (Nat.succ Nat.zero)).snd.fst =
(mkCoinductiveAux₂ e zero comm_zero one comm_one succ (Nat.succ Nat.zero + 1)).fst ≫
(xPrevIso Q (_ : Nat.succ Nat.zero + 1 = Nat.succ Nat.zero + 1)).hom
[PROOFSTEP]
simp [mkCoinductiveAux₂]
[GOAL]
case succ.succ
ι : Type u_1
V : Type u
inst✝¹ : Category.{v, u} V
inst✝ : Preadditive V
c : ComplexShape ι
C D E : HomologicalComplex V c
f g : C ⟶ D
h k : D ⟶ E
i✝ : ι
P Q : CochainComplex V ℕ
e : P ⟶ Q
zero : X P 1 ⟶ X Q 0
comm_zero : Hom.f e 0 = d P 0 1 ≫ zero
one : X P 2 ⟶ X Q 1
comm_one : Hom.f e 1 = zero ≫ d Q 0 1 + d P 1 2 ≫ one
succ :
(n : ℕ) →
(p :
(f : X P (n + 1) ⟶ X Q n) ×'
(f' : X P (n + 2) ⟶ X Q (n + 1)) ×' Hom.f e (n + 1) = f ≫ d Q n (n + 1) + d P (n + 1) (n + 2) ≫ f') →
(f'' : X P (n + 3) ⟶ X Q (n + 2)) ×' Hom.f e (n + 2) = p.snd.fst ≫ d Q (n + 1) (n + 2) + d P (n + 2) (n + 3) ≫ f''
i : ℕ
⊢ (xNextIso P (_ : Nat.succ (Nat.succ i) + 1 = Nat.succ (Nat.succ i) + 1)).inv ≫
(mkCoinductiveAux₂ e zero comm_zero one comm_one succ (Nat.succ (Nat.succ i))).snd.fst =
(mkCoinductiveAux₂ e zero comm_zero one comm_one succ (Nat.succ (Nat.succ i) + 1)).fst ≫
(xPrevIso Q (_ : Nat.succ (Nat.succ i) + 1 = Nat.succ (Nat.succ i) + 1)).hom
[PROOFSTEP]
simp [mkCoinductiveAux₂]
[GOAL]
ι : Type u_1
V : Type u
inst✝¹ : Category.{v, u} V
inst✝ : Preadditive V
c : ComplexShape ι
C D E : HomologicalComplex V c
f g : C ⟶ D
h k : D ⟶ E
i✝ : ι
P Q : CochainComplex V ℕ
e : P ⟶ Q
zero : X P 1 ⟶ X Q 0
comm_zero : Hom.f e 0 = d P 0 1 ≫ zero
one : X P 2 ⟶ X Q 1
comm_one : Hom.f e 1 = zero ≫ d Q 0 1 + d P 1 2 ≫ one
succ :
(n : ℕ) →
(p :
(f : X P (n + 1) ⟶ X Q n) ×'
(f' : X P (n + 2) ⟶ X Q (n + 1)) ×' Hom.f e (n + 1) = f ≫ d Q n (n + 1) + d P (n + 1) (n + 2) ≫ f') →
(f'' : X P (n + 3) ⟶ X Q (n + 2)) ×' Hom.f e (n + 2) = p.snd.fst ≫ d Q (n + 1) (n + 2) + d P (n + 2) (n + 3) ≫ f''
i j : ℕ
w : ¬ComplexShape.Rel (ComplexShape.up ℕ) j i
⊢ (fun i j =>
if h : j + 1 = i then (xNextIso P h).inv ≫ (mkCoinductiveAux₂ e zero comm_zero one comm_one succ j).snd.fst
else 0)
i j =
0
[PROOFSTEP]
dsimp
[GOAL]
ι : Type u_1
V : Type u
inst✝¹ : Category.{v, u} V
inst✝ : Preadditive V
c : ComplexShape ι
C D E : HomologicalComplex V c
f g : C ⟶ D
h k : D ⟶ E
i✝ : ι
P Q : CochainComplex V ℕ
e : P ⟶ Q
zero : X P 1 ⟶ X Q 0
comm_zero : Hom.f e 0 = d P 0 1 ≫ zero
one : X P 2 ⟶ X Q 1
comm_one : Hom.f e 1 = zero ≫ d Q 0 1 + d P 1 2 ≫ one
succ :
(n : ℕ) →
(p :
(f : X P (n + 1) ⟶ X Q n) ×'
(f' : X P (n + 2) ⟶ X Q (n + 1)) ×' Hom.f e (n + 1) = f ≫ d Q n (n + 1) + d P (n + 1) (n + 2) ≫ f') →
(f'' : X P (n + 3) ⟶ X Q (n + 2)) ×' Hom.f e (n + 2) = p.snd.fst ≫ d Q (n + 1) (n + 2) + d P (n + 2) (n + 3) ≫ f''
i j : ℕ
w : ¬ComplexShape.Rel (ComplexShape.up ℕ) j i
⊢ (if h : j + 1 = i then (xNextIso P h).inv ≫ (mkCoinductiveAux₂ e zero comm_zero one comm_one succ j).snd.fst else 0) =
0
[PROOFSTEP]
rw [dif_neg]
[GOAL]
case hnc
ι : Type u_1
V : Type u
inst✝¹ : Category.{v, u} V
inst✝ : Preadditive V
c : ComplexShape ι
C D E : HomologicalComplex V c
f g : C ⟶ D
h k : D ⟶ E
i✝ : ι
P Q : CochainComplex V ℕ
e : P ⟶ Q
zero : X P 1 ⟶ X Q 0
comm_zero : Hom.f e 0 = d P 0 1 ≫ zero
one : X P 2 ⟶ X Q 1
comm_one : Hom.f e 1 = zero ≫ d Q 0 1 + d P 1 2 ≫ one
succ :
(n : ℕ) →
(p :
(f : X P (n + 1) ⟶ X Q n) ×'
(f' : X P (n + 2) ⟶ X Q (n + 1)) ×' Hom.f e (n + 1) = f ≫ d Q n (n + 1) + d P (n + 1) (n + 2) ≫ f') →
(f'' : X P (n + 3) ⟶ X Q (n + 2)) ×' Hom.f e (n + 2) = p.snd.fst ≫ d Q (n + 1) (n + 2) + d P (n + 2) (n + 3) ≫ f''
i j : ℕ
w : ¬ComplexShape.Rel (ComplexShape.up ℕ) j i
⊢ ¬j + 1 = i
[PROOFSTEP]
exact w
[GOAL]
ι : Type u_1
V : Type u
inst✝¹ : Category.{v, u} V
inst✝ : Preadditive V
c : ComplexShape ι
C D E : HomologicalComplex V c
f g : C ⟶ D
h k : D ⟶ E
i✝ : ι
P Q : CochainComplex V ℕ
e : P ⟶ Q
zero : X P 1 ⟶ X Q 0
comm_zero : Hom.f e 0 = d P 0 1 ≫ zero
one : X P 2 ⟶ X Q 1
comm_one : Hom.f e 1 = zero ≫ d Q 0 1 + d P 1 2 ≫ one
succ :
(n : ℕ) →
(p :
(f : X P (n + 1) ⟶ X Q n) ×'
(f' : X P (n + 2) ⟶ X Q (n + 1)) ×' Hom.f e (n + 1) = f ≫ d Q n (n + 1) + d P (n + 1) (n + 2) ≫ f') →
(f'' : X P (n + 3) ⟶ X Q (n + 2)) ×' Hom.f e (n + 2) = p.snd.fst ≫ d Q (n + 1) (n + 2) + d P (n + 2) (n + 3) ≫ f''
i : ℕ
⊢ Hom.f e i =
((↑(dNext i) fun i j =>
if h : j + 1 = i then (xNextIso P h).inv ≫ (mkCoinductiveAux₂ e zero comm_zero one comm_one succ j).snd.fst
else 0) +
↑(prevD i) fun i j =>
if h : j + 1 = i then (xNextIso P h).inv ≫ (mkCoinductiveAux₂ e zero comm_zero one comm_one succ j).snd.fst
else 0) +
Hom.f 0 i
[PROOFSTEP]
dsimp
[GOAL]
ι : Type u_1
V : Type u
inst✝¹ : Category.{v, u} V
inst✝ : Preadditive V
c : ComplexShape ι
C D E : HomologicalComplex V c
f g : C ⟶ D
h k : D ⟶ E
i✝ : ι
P Q : CochainComplex V ℕ
e : P ⟶ Q
zero : X P 1 ⟶ X Q 0
comm_zero : Hom.f e 0 = d P 0 1 ≫ zero
one : X P 2 ⟶ X Q 1
comm_one : Hom.f e 1 = zero ≫ d Q 0 1 + d P 1 2 ≫ one
succ :
(n : ℕ) →
(p :
(f : X P (n + 1) ⟶ X Q n) ×'
(f' : X P (n + 2) ⟶ X Q (n + 1)) ×' Hom.f e (n + 1) = f ≫ d Q n (n + 1) + d P (n + 1) (n + 2) ≫ f') →
(f'' : X P (n + 3) ⟶ X Q (n + 2)) ×' Hom.f e (n + 2) = p.snd.fst ≫ d Q (n + 1) (n + 2) + d P (n + 2) (n + 3) ≫ f''
i : ℕ
⊢ Hom.f e i =
(dFrom P i ≫
↑(fromNext i) fun i j =>
if h : j + 1 = i then (xNextIso P h).inv ≫ (mkCoinductiveAux₂ e zero comm_zero one comm_one succ j).snd.fst
else 0) +
(↑(toPrev i) fun i j =>
if h : j + 1 = i then (xNextIso P h).inv ≫ (mkCoinductiveAux₂ e zero comm_zero one comm_one succ j).snd.fst
else 0) ≫
dTo Q i +
0
[PROOFSTEP]
simp only [add_zero]
[GOAL]
ι : Type u_1
V : Type u
inst✝¹ : Category.{v, u} V
inst✝ : Preadditive V
c : ComplexShape ι
C D E : HomologicalComplex V c
f g : C ⟶ D
h k : D ⟶ E
i✝ : ι
P Q : CochainComplex V ℕ
e : P ⟶ Q
zero : X P 1 ⟶ X Q 0
comm_zero : Hom.f e 0 = d P 0 1 ≫ zero
one : X P 2 ⟶ X Q 1
comm_one : Hom.f e 1 = zero ≫ d Q 0 1 + d P 1 2 ≫ one
succ :
(n : ℕ) →
(p :
(f : X P (n + 1) ⟶ X Q n) ×'
(f' : X P (n + 2) ⟶ X Q (n + 1)) ×' Hom.f e (n + 1) = f ≫ d Q n (n + 1) + d P (n + 1) (n + 2) ≫ f') →
(f'' : X P (n + 3) ⟶ X Q (n + 2)) ×' Hom.f e (n + 2) = p.snd.fst ≫ d Q (n + 1) (n + 2) + d P (n + 2) (n + 3) ≫ f''
i : ℕ
⊢ Hom.f e i =
(dFrom P i ≫
↑(fromNext i) fun i j =>
if h : j + 1 = i then (xNextIso P h).inv ≫ (mkCoinductiveAux₂ e zero comm_zero one comm_one succ j).snd.fst
else 0) +
(↑(toPrev i) fun i j =>
if h : j + 1 = i then (xNextIso P h).inv ≫ (mkCoinductiveAux₂ e zero comm_zero one comm_one succ j).snd.fst
else 0) ≫
dTo Q i
[PROOFSTEP]
rw [add_comm]
[GOAL]
ι : Type u_1
V : Type u
inst✝¹ : Category.{v, u} V
inst✝ : Preadditive V
c : ComplexShape ι
C D E : HomologicalComplex V c
f g : C ⟶ D
h k : D ⟶ E
i✝ : ι
P Q : CochainComplex V ℕ
e : P ⟶ Q
zero : X P 1 ⟶ X Q 0
comm_zero : Hom.f e 0 = d P 0 1 ≫ zero
one : X P 2 ⟶ X Q 1
comm_one : Hom.f e 1 = zero ≫ d Q 0 1 + d P 1 2 ≫ one
succ :
(n : ℕ) →
(p :
(f : X P (n + 1) ⟶ X Q n) ×'
(f' : X P (n + 2) ⟶ X Q (n + 1)) ×' Hom.f e (n + 1) = f ≫ d Q n (n + 1) + d P (n + 1) (n + 2) ≫ f') →
(f'' : X P (n + 3) ⟶ X Q (n + 2)) ×' Hom.f e (n + 2) = p.snd.fst ≫ d Q (n + 1) (n + 2) + d P (n + 2) (n + 3) ≫ f''
i : ℕ
⊢ Hom.f e i =
(↑(toPrev i) fun i j =>
if h : j + 1 = i then (xNextIso P h).inv ≫ (mkCoinductiveAux₂ e zero comm_zero one comm_one succ j).snd.fst
else 0) ≫
dTo Q i +
dFrom P i ≫
↑(fromNext i) fun i j =>
if h : j + 1 = i then (xNextIso P h).inv ≫ (mkCoinductiveAux₂ e zero comm_zero one comm_one succ j).snd.fst
else 0
[PROOFSTEP]
refine' (mkCoinductiveAux₂ e zero comm_zero one comm_one succ i).2.2.trans _
[GOAL]
ι : Type u_1
V : Type u
inst✝¹ : Category.{v, u} V
inst✝ : Preadditive V
c : ComplexShape ι
C D E : HomologicalComplex V c
f g : C ⟶ D
h k : D ⟶ E
i✝ : ι
P Q : CochainComplex V ℕ
e : P ⟶ Q
zero : X P 1 ⟶ X Q 0
comm_zero : Hom.f e 0 = d P 0 1 ≫ zero
one : X P 2 ⟶ X Q 1
comm_one : Hom.f e 1 = zero ≫ d Q 0 1 + d P 1 2 ≫ one
succ :
(n : ℕ) →
(p :
(f : X P (n + 1) ⟶ X Q n) ×'
(f' : X P (n + 2) ⟶ X Q (n + 1)) ×' Hom.f e (n + 1) = f ≫ d Q n (n + 1) + d P (n + 1) (n + 2) ≫ f') →
(f'' : X P (n + 3) ⟶ X Q (n + 2)) ×' Hom.f e (n + 2) = p.snd.fst ≫ d Q (n + 1) (n + 2) + d P (n + 2) (n + 3) ≫ f''
i : ℕ
⊢ (mkCoinductiveAux₂ e zero comm_zero one comm_one succ i).fst ≫ dTo Q i +
dFrom P i ≫ (mkCoinductiveAux₂ e zero comm_zero one comm_one succ i).snd.fst =
(↑(toPrev i) fun i j =>
if h : j + 1 = i then (xNextIso P h).inv ≫ (mkCoinductiveAux₂ e zero comm_zero one comm_one succ j).snd.fst
else 0) ≫
dTo Q i +
dFrom P i ≫
↑(fromNext i) fun i j =>
if h : j + 1 = i then (xNextIso P h).inv ≫ (mkCoinductiveAux₂ e zero comm_zero one comm_one succ j).snd.fst
else 0
[PROOFSTEP]
congr
[GOAL]
case e_a.e_a
ι : Type u_1
V : Type u
inst✝¹ : Category.{v, u} V
inst✝ : Preadditive V
c : ComplexShape ι
C D E : HomologicalComplex V c
f g : C ⟶ D
h k : D ⟶ E
i✝ : ι
P Q : CochainComplex V ℕ
e : P ⟶ Q
zero : X P 1 ⟶ X Q 0
comm_zero : Hom.f e 0 = d P 0 1 ≫ zero
one : X P 2 ⟶ X Q 1
comm_one : Hom.f e 1 = zero ≫ d Q 0 1 + d P 1 2 ≫ one
succ :
(n : ℕ) →
(p :
(f : X P (n + 1) ⟶ X Q n) ×'
(f' : X P (n + 2) ⟶ X Q (n + 1)) ×' Hom.f e (n + 1) = f ≫ d Q n (n + 1) + d P (n + 1) (n + 2) ≫ f') →
(f'' : X P (n + 3) ⟶ X Q (n + 2)) ×' Hom.f e (n + 2) = p.snd.fst ≫ d Q (n + 1) (n + 2) + d P (n + 2) (n + 3) ≫ f''
i : ℕ
⊢ (mkCoinductiveAux₂ e zero comm_zero one comm_one succ i).fst =
↑(toPrev i) fun i j =>
if h : j + 1 = i then (xNextIso P h).inv ≫ (mkCoinductiveAux₂ e zero comm_zero one comm_one succ j).snd.fst else 0
[PROOFSTEP]
cases i
[GOAL]
case e_a.e_a.zero
ι : Type u_1
V : Type u
inst✝¹ : Category.{v, u} V
inst✝ : Preadditive V
c : ComplexShape ι
C D E : HomologicalComplex V c
f g : C ⟶ D
h k : D ⟶ E
i : ι
P Q : CochainComplex V ℕ
e : P ⟶ Q
zero : X P 1 ⟶ X Q 0
comm_zero : Hom.f e 0 = d P 0 1 ≫ zero
one : X P 2 ⟶ X Q 1
comm_one : Hom.f e 1 = zero ≫ d Q 0 1 + d P 1 2 ≫ one
succ :
(n : ℕ) →
(p :
(f : X P (n + 1) ⟶ X Q n) ×'
(f' : X P (n + 2) ⟶ X Q (n + 1)) ×' Hom.f e (n + 1) = f ≫ d Q n (n + 1) + d P (n + 1) (n + 2) ≫ f') →
(f'' : X P (n + 3) ⟶ X Q (n + 2)) ×' Hom.f e (n + 2) = p.snd.fst ≫ d Q (n + 1) (n + 2) + d P (n + 2) (n + 3) ≫ f''
⊢ (mkCoinductiveAux₂ e zero comm_zero one comm_one succ Nat.zero).fst =
↑(toPrev Nat.zero) fun i j =>
if h : j + 1 = i then (xNextIso P h).inv ≫ (mkCoinductiveAux₂ e zero comm_zero one comm_one succ j).snd.fst else 0
[PROOFSTEP]
dsimp [toPrev, mkCoinductiveAux₂]
[GOAL]
case e_a.e_a.zero
ι : Type u_1
V : Type u
inst✝¹ : Category.{v, u} V
inst✝ : Preadditive V
c : ComplexShape ι
C D E : HomologicalComplex V c
f g : C ⟶ D
h k : D ⟶ E
i : ι
P Q : CochainComplex V ℕ
e : P ⟶ Q
zero : X P 1 ⟶ X Q 0
comm_zero : Hom.f e 0 = d P 0 1 ≫ zero
one : X P 2 ⟶ X Q 1
comm_one : Hom.f e 1 = zero ≫ d Q 0 1 + d P 1 2 ≫ one
succ :
(n : ℕ) →
(p :
(f : X P (n + 1) ⟶ X Q n) ×'
(f' : X P (n + 2) ⟶ X Q (n + 1)) ×' Hom.f e (n + 1) = f ≫ d Q n (n + 1) + d P (n + 1) (n + 2) ≫ f') →
(f'' : X P (n + 3) ⟶ X Q (n + 2)) ×' Hom.f e (n + 2) = p.snd.fst ≫ d Q (n + 1) (n + 2) + d P (n + 2) (n + 3) ≫ f''
⊢ 0 =
if h : ComplexShape.prev (ComplexShape.up ℕ) 0 + 1 = 0 then
(xNextIso P h).inv ≫
(match ComplexShape.prev (ComplexShape.up ℕ) 0 with
| 0 =>
{ fst := 0,
snd :=
{ fst := (xNextIso P mkCoinductiveAux₂.proof_1).hom ≫ zero,
snd := (_ : Hom.f e 0 = 0 ≫ dTo Q 0 + dFrom P 0 ≫ (xNextIso P (_ : 0 + 1 = 0 + 1)).hom ≫ zero) } }
| Nat.succ n =>
{ fst := (mkCoinductiveAux₁ e zero one comm_one succ n).fst ≫ (xPrevIso Q (_ : n + 1 = n + 1)).inv,
snd :=
{
fst :=
(xNextIso P (_ : n + 1 + 1 = n + 1 + 1)).hom ≫
(mkCoinductiveAux₁ e zero one comm_one succ n).snd.fst,
snd :=
(_ :
Hom.f e (n + 1) =
((mkCoinductiveAux₁ e zero one comm_one succ n).fst ≫ (xPrevIso Q (_ : n + 1 = n + 1)).inv) ≫
dTo Q (n + 1) +
dFrom P (n + 1) ≫
(xNextIso P (_ : n + 1 + 1 = n + 1 + 1)).hom ≫
(mkCoinductiveAux₁ e zero one comm_one succ n).snd.fst) } }).snd.fst
else 0
[PROOFSTEP]
rw [dif_neg]
[GOAL]
case e_a.e_a.zero.hnc
ι : Type u_1
V : Type u
inst✝¹ : Category.{v, u} V
inst✝ : Preadditive V
c : ComplexShape ι
C D E : HomologicalComplex V c
f g : C ⟶ D
h k : D ⟶ E
i : ι
P Q : CochainComplex V ℕ
e : P ⟶ Q
zero : X P 1 ⟶ X Q 0
comm_zero : Hom.f e 0 = d P 0 1 ≫ zero
one : X P 2 ⟶ X Q 1
comm_one : Hom.f e 1 = zero ≫ d Q 0 1 + d P 1 2 ≫ one
succ :
(n : ℕ) →
(p :
(f : X P (n + 1) ⟶ X Q n) ×'
(f' : X P (n + 2) ⟶ X Q (n + 1)) ×' Hom.f e (n + 1) = f ≫ d Q n (n + 1) + d P (n + 1) (n + 2) ≫ f') →
(f'' : X P (n + 3) ⟶ X Q (n + 2)) ×' Hom.f e (n + 2) = p.snd.fst ≫ d Q (n + 1) (n + 2) + d P (n + 2) (n + 3) ≫ f''
⊢ ¬ComplexShape.prev (ComplexShape.up ℕ) 0 + 1 = 0
[PROOFSTEP]
simp only
[GOAL]
case e_a.e_a.succ
ι : Type u_1
V : Type u
inst✝¹ : Category.{v, u} V
inst✝ : Preadditive V
c : ComplexShape ι
C D E : HomologicalComplex V c
f g : C ⟶ D
h k : D ⟶ E
i : ι
P Q : CochainComplex V ℕ
e : P ⟶ Q
zero : X P 1 ⟶ X Q 0
comm_zero : Hom.f e 0 = d P 0 1 ≫ zero
one : X P 2 ⟶ X Q 1
comm_one : Hom.f e 1 = zero ≫ d Q 0 1 + d P 1 2 ≫ one
succ :
(n : ℕ) →
(p :
(f : X P (n + 1) ⟶ X Q n) ×'
(f' : X P (n + 2) ⟶ X Q (n + 1)) ×' Hom.f e (n + 1) = f ≫ d Q n (n + 1) + d P (n + 1) (n + 2) ≫ f') →
(f'' : X P (n + 3) ⟶ X Q (n + 2)) ×' Hom.f e (n + 2) = p.snd.fst ≫ d Q (n + 1) (n + 2) + d P (n + 2) (n + 3) ≫ f''
n✝ : ℕ
⊢ (mkCoinductiveAux₂ e zero comm_zero one comm_one succ (Nat.succ n✝)).fst =
↑(toPrev (Nat.succ n✝)) fun i j =>
if h : j + 1 = i then (xNextIso P h).inv ≫ (mkCoinductiveAux₂ e zero comm_zero one comm_one succ j).snd.fst else 0
[PROOFSTEP]
dsimp [toPrev]
[GOAL]
case e_a.e_a.succ
ι : Type u_1
V : Type u
inst✝¹ : Category.{v, u} V
inst✝ : Preadditive V
c : ComplexShape ι
C D E : HomologicalComplex V c
f g : C ⟶ D
h k : D ⟶ E
i : ι
P Q : CochainComplex V ℕ
e : P ⟶ Q
zero : X P 1 ⟶ X Q 0
comm_zero : Hom.f e 0 = d P 0 1 ≫ zero
one : X P 2 ⟶ X Q 1
comm_one : Hom.f e 1 = zero ≫ d Q 0 1 + d P 1 2 ≫ one
succ :
(n : ℕ) →
(p :
(f : X P (n + 1) ⟶ X Q n) ×'
(f' : X P (n + 2) ⟶ X Q (n + 1)) ×' Hom.f e (n + 1) = f ≫ d Q n (n + 1) + d P (n + 1) (n + 2) ≫ f') →
(f'' : X P (n + 3) ⟶ X Q (n + 2)) ×' Hom.f e (n + 2) = p.snd.fst ≫ d Q (n + 1) (n + 2) + d P (n + 2) (n + 3) ≫ f''
n✝ : ℕ
⊢ (mkCoinductiveAux₁ e zero one comm_one succ n✝).fst ≫ (xPrevIso Q (_ : n✝ + 1 = n✝ + 1)).inv =
if h : ComplexShape.prev (ComplexShape.up ℕ) (Nat.succ n✝) + 1 = Nat.succ n✝ then
(xNextIso P h).inv ≫
(mkCoinductiveAux₂ e zero comm_zero one comm_one succ
(ComplexShape.prev (ComplexShape.up ℕ) (Nat.succ n✝))).snd.fst
else 0
[PROOFSTEP]
simp only [CochainComplex.prev_nat_succ, dite_true]
[GOAL]
case e_a.e_a.succ
ι : Type u_1
V : Type u
inst✝¹ : Category.{v, u} V
inst✝ : Preadditive V
c : ComplexShape ι
C D E : HomologicalComplex V c
f g : C ⟶ D
h k : D ⟶ E
i : ι
P Q : CochainComplex V ℕ
e : P ⟶ Q
zero : X P 1 ⟶ X Q 0
comm_zero : Hom.f e 0 = d P 0 1 ≫ zero
one : X P 2 ⟶ X Q 1
comm_one : Hom.f e 1 = zero ≫ d Q 0 1 + d P 1 2 ≫ one
succ :
(n : ℕ) →
(p :
(f : X P (n + 1) ⟶ X Q n) ×'
(f' : X P (n + 2) ⟶ X Q (n + 1)) ×' Hom.f e (n + 1) = f ≫ d Q n (n + 1) + d P (n + 1) (n + 2) ≫ f') →
(f'' : X P (n + 3) ⟶ X Q (n + 2)) ×' Hom.f e (n + 2) = p.snd.fst ≫ d Q (n + 1) (n + 2) + d P (n + 2) (n + 3) ≫ f''
n✝ : ℕ
⊢ (mkCoinductiveAux₁ e zero one comm_one succ n✝).fst ≫ (xPrevIso Q (_ : n✝ + 1 = n✝ + 1)).inv =
(xNextIso P (_ : ComplexShape.prev (ComplexShape.up ℕ) (Nat.succ n✝) + 1 = Nat.succ n✝)).inv ≫
(mkCoinductiveAux₂ e zero comm_zero one comm_one succ
(ComplexShape.prev (ComplexShape.up ℕ) (Nat.succ n✝))).snd.fst
[PROOFSTEP]
rw [mkCoinductiveAux₃ e zero comm_zero one comm_one succ]
[GOAL]
case e_a.e_a.succ
ι : Type u_1
V : Type u
inst✝¹ : Category.{v, u} V
inst✝ : Preadditive V
c : ComplexShape ι
C D E : HomologicalComplex V c
f g : C ⟶ D
h k : D ⟶ E
i : ι
P Q : CochainComplex V ℕ
e : P ⟶ Q
zero : X P 1 ⟶ X Q 0
comm_zero : Hom.f e 0 = d P 0 1 ≫ zero
one : X P 2 ⟶ X Q 1
comm_one : Hom.f e 1 = zero ≫ d Q 0 1 + d P 1 2 ≫ one
succ :
(n : ℕ) →
(p :
(f : X P (n + 1) ⟶ X Q n) ×'
(f' : X P (n + 2) ⟶ X Q (n + 1)) ×' Hom.f e (n + 1) = f ≫ d Q n (n + 1) + d P (n + 1) (n + 2) ≫ f') →
(f'' : X P (n + 3) ⟶ X Q (n + 2)) ×' Hom.f e (n + 2) = p.snd.fst ≫ d Q (n + 1) (n + 2) + d P (n + 2) (n + 3) ≫ f''
n✝ : ℕ
⊢ (mkCoinductiveAux₁ e zero one comm_one succ n✝).fst ≫ (xPrevIso Q (_ : n✝ + 1 = n✝ + 1)).inv =
(mkCoinductiveAux₂ e zero comm_zero one comm_one succ (Nat.succ n✝)).fst ≫
(xPrevIso Q (_ : ComplexShape.prev (ComplexShape.up ℕ) (Nat.succ n✝) + 1 = Nat.succ n✝)).hom
[PROOFSTEP]
dsimp [xPrevIso]
[GOAL]
case e_a.e_a.succ
ι : Type u_1
V : Type u
inst✝¹ : Category.{v, u} V
inst✝ : Preadditive V
c : ComplexShape ι
C D E : HomologicalComplex V c
f g : C ⟶ D
h k : D ⟶ E
i : ι
P Q : CochainComplex V ℕ
e : P ⟶ Q
zero : X P 1 ⟶ X Q 0
comm_zero : Hom.f e 0 = d P 0 1 ≫ zero
one : X P 2 ⟶ X Q 1
comm_one : Hom.f e 1 = zero ≫ d Q 0 1 + d P 1 2 ≫ one
succ :
(n : ℕ) →
(p :
(f : X P (n + 1) ⟶ X Q n) ×'
(f' : X P (n + 2) ⟶ X Q (n + 1)) ×' Hom.f e (n + 1) = f ≫ d Q n (n + 1) + d P (n + 1) (n + 2) ≫ f') →
(f'' : X P (n + 3) ⟶ X Q (n + 2)) ×' Hom.f e (n + 2) = p.snd.fst ≫ d Q (n + 1) (n + 2) + d P (n + 2) (n + 3) ≫ f''
n✝ : ℕ
⊢ (mkCoinductiveAux₁ e zero one comm_one succ n✝).fst ≫ eqToHom (_ : X Q n✝ = xPrev Q (n✝ + 1)) =
((mkCoinductiveAux₁ e zero one comm_one succ n✝).fst ≫ eqToHom (_ : X Q n✝ = xPrev Q (n✝ + 1))) ≫
𝟙 (xPrev Q (Nat.succ n✝))
[PROOFSTEP]
rw [Category.comp_id]
[GOAL]
case e_a.e_a
ι : Type u_1
V : Type u
inst✝¹ : Category.{v, u} V
inst✝ : Preadditive V
c : ComplexShape ι
C D E : HomologicalComplex V c
f g : C ⟶ D
h k : D ⟶ E
i✝ : ι
P Q : CochainComplex V ℕ
e : P ⟶ Q
zero : X P 1 ⟶ X Q 0
comm_zero : Hom.f e 0 = d P 0 1 ≫ zero
one : X P 2 ⟶ X Q 1
comm_one : Hom.f e 1 = zero ≫ d Q 0 1 + d P 1 2 ≫ one
succ :
(n : ℕ) →
(p :
(f : X P (n + 1) ⟶ X Q n) ×'
(f' : X P (n + 2) ⟶ X Q (n + 1)) ×' Hom.f e (n + 1) = f ≫ d Q n (n + 1) + d P (n + 1) (n + 2) ≫ f') →
(f'' : X P (n + 3) ⟶ X Q (n + 2)) ×' Hom.f e (n + 2) = p.snd.fst ≫ d Q (n + 1) (n + 2) + d P (n + 2) (n + 3) ≫ f''
i : ℕ
⊢ (mkCoinductiveAux₂ e zero comm_zero one comm_one succ i).snd.fst =
↑(fromNext i) fun i j =>
if h : j + 1 = i then (xNextIso P h).inv ≫ (mkCoinductiveAux₂ e zero comm_zero one comm_one succ j).snd.fst else 0
[PROOFSTEP]
dsimp [fromNext]
[GOAL]
case e_a.e_a
ι : Type u_1
V : Type u
inst✝¹ : Category.{v, u} V
inst✝ : Preadditive V
c : ComplexShape ι
C D E : HomologicalComplex V c
f g : C ⟶ D
h k : D ⟶ E
i✝ : ι
P Q : CochainComplex V ℕ
e : P ⟶ Q
zero : X P 1 ⟶ X Q 0
comm_zero : Hom.f e 0 = d P 0 1 ≫ zero
one : X P 2 ⟶ X Q 1
comm_one : Hom.f e 1 = zero ≫ d Q 0 1 + d P 1 2 ≫ one
succ :
(n : ℕ) →
(p :
(f : X P (n + 1) ⟶ X Q n) ×'
(f' : X P (n + 2) ⟶ X Q (n + 1)) ×' Hom.f e (n + 1) = f ≫ d Q n (n + 1) + d P (n + 1) (n + 2) ≫ f') →
(f'' : X P (n + 3) ⟶ X Q (n + 2)) ×' Hom.f e (n + 2) = p.snd.fst ≫ d Q (n + 1) (n + 2) + d P (n + 2) (n + 3) ≫ f''
i : ℕ
⊢ (mkCoinductiveAux₂ e zero comm_zero one comm_one succ i).snd.fst =
if h : i + 1 = ComplexShape.next (ComplexShape.up ℕ) i then
(xNextIso P h).inv ≫ (mkCoinductiveAux₂ e zero comm_zero one comm_one succ i).snd.fst
else 0
[PROOFSTEP]
erw [dif_pos, Category.id_comp]
[GOAL]
case e_a.e_a.hc
ι : Type u_1
V : Type u
inst✝¹ : Category.{v, u} V
inst✝ : Preadditive V
c : ComplexShape ι
C D E : HomologicalComplex V c
f g : C ⟶ D
h k : D ⟶ E
i✝ : ι
P Q : CochainComplex V ℕ
e : P ⟶ Q
zero : X P 1 ⟶ X Q 0
comm_zero : Hom.f e 0 = d P 0 1 ≫ zero
one : X P 2 ⟶ X Q 1
comm_one : Hom.f e 1 = zero ≫ d Q 0 1 + d P 1 2 ≫ one
succ :
(n : ℕ) →
(p :
(f : X P (n + 1) ⟶ X Q n) ×'
(f' : X P (n + 2) ⟶ X Q (n + 1)) ×' Hom.f e (n + 1) = f ≫ d Q n (n + 1) + d P (n + 1) (n + 2) ≫ f') →
(f'' : X P (n + 3) ⟶ X Q (n + 2)) ×' Hom.f e (n + 2) = p.snd.fst ≫ d Q (n + 1) (n + 2) + d P (n + 2) (n + 3) ≫ f''
i : ℕ
⊢ i + 1 = ComplexShape.next (ComplexShape.up ℕ) i
case e_a.e_a.hc
ι : Type u_1
V : Type u
inst✝¹ : Category.{v, u} V
inst✝ : Preadditive V
c : ComplexShape ι
C D E : HomologicalComplex V c
f g : C ⟶ D
h k : D ⟶ E
i✝ : ι
P Q : CochainComplex V ℕ
e : P ⟶ Q
zero : X P 1 ⟶ X Q 0
comm_zero : Hom.f e 0 = d P 0 1 ≫ zero
one : X P 2 ⟶ X Q 1
comm_one : Hom.f e 1 = zero ≫ d Q 0 1 + d P 1 2 ≫ one
succ :
(n : ℕ) →
(p :
(f : X P (n + 1) ⟶ X Q n) ×'
(f' : X P (n + 2) ⟶ X Q (n + 1)) ×' Hom.f e (n + 1) = f ≫ d Q n (n + 1) + d P (n + 1) (n + 2) ≫ f') →
(f'' : X P (n + 3) ⟶ X Q (n + 2)) ×' Hom.f e (n + 2) = p.snd.fst ≫ d Q (n + 1) (n + 2) + d P (n + 2) (n + 3) ≫ f''
i : ℕ
⊢ i + 1 = ComplexShape.next (ComplexShape.up ℕ) i
[PROOFSTEP]
simp only [CochainComplex.next]
[GOAL]
ι : Type u_1
V : Type u
inst✝¹ : Category.{v, u} V
inst✝ : Preadditive V
c : ComplexShape ι
C✝ D E : HomologicalComplex V c
f g : C✝ ⟶ D
h k : D ⟶ E
i : ι
C : HomologicalComplex V c
⊢ 𝟙 C ≫ 𝟙 C = 𝟙 C
[PROOFSTEP]
simp
[GOAL]
ι : Type u_1
V : Type u
inst✝¹ : Category.{v, u} V
inst✝ : Preadditive V
c : ComplexShape ι
C✝ D E : HomologicalComplex V c
f g : C✝ ⟶ D
h k : D ⟶ E
i : ι
C : HomologicalComplex V c
⊢ 𝟙 C ≫ 𝟙 C = 𝟙 C
[PROOFSTEP]
simp
[GOAL]
ι : Type u_1
V : Type u
inst✝¹ : Category.{v, u} V
inst✝ : Preadditive V
c : ComplexShape ι
C✝ D✝ E✝ : HomologicalComplex V c
f✝ g✝ : C✝ ⟶ D✝
h k : D✝ ⟶ E✝
i : ι
C D E : HomologicalComplex V c
f : HomotopyEquiv C D
g : HomotopyEquiv D E
⊢ Homotopy ((f.hom ≫ g.hom) ≫ g.inv ≫ f.inv) (𝟙 C)
[PROOFSTEP]
simpa using ((g.homotopyHomInvId.compRightId f.inv).compLeft f.hom).trans f.homotopyHomInvId
[GOAL]
ι : Type u_1
V : Type u
inst✝¹ : Category.{v, u} V
inst✝ : Preadditive V
c : ComplexShape ι
C✝ D✝ E✝ : HomologicalComplex V c
f✝ g✝ : C✝ ⟶ D✝
h k : D✝ ⟶ E✝
i : ι
C D E : HomologicalComplex V c
f : HomotopyEquiv C D
g : HomotopyEquiv D E
⊢ Homotopy ((g.inv ≫ f.inv) ≫ f.hom ≫ g.hom) (𝟙 E)
[PROOFSTEP]
simpa using ((f.homotopyInvHomId.compRightId g.hom).compLeft g.inv).trans g.homotopyInvHomId
[GOAL]
ι : Type u_1
V : Type u
inst✝⁵ : Category.{v, u} V
inst✝⁴ : Preadditive V
c : ComplexShape ι
C D E : HomologicalComplex V c
f g : C ⟶ D
h✝ k : D ⟶ E
i✝ : ι
inst✝³ : HasEqualizers V
inst✝² : HasCokernels V
inst✝¹ : HasImages V
inst✝ : HasImageMaps V
h : Homotopy f g
i : ι
⊢ (homologyFunctor V c i).map f = (homologyFunctor V c i).map g
[PROOFSTEP]
dsimp [homologyFunctor]
[GOAL]
ι : Type u_1
V : Type u
inst✝⁵ : Category.{v, u} V
inst✝⁴ : Preadditive V
c : ComplexShape ι
C D E : HomologicalComplex V c
f g : C ⟶ D
h✝ k : D ⟶ E
i✝ : ι
inst✝³ : HasEqualizers V
inst✝² : HasCokernels V
inst✝¹ : HasImages V
inst✝ : HasImageMaps V
h : Homotopy f g
i : ι
⊢ homology.map (_ : dTo C i ≫ dFrom C i = 0) (_ : dTo D i ≫ dFrom D i = 0) (Hom.sqTo f i) (Hom.sqFrom f i)
(_ : (Hom.sqTo f i).right = (Hom.sqTo f i).right) =
homology.map (_ : dTo C i ≫ dFrom C i = 0) (_ : dTo D i ≫ dFrom D i = 0) (Hom.sqTo g i) (Hom.sqFrom g i)
(_ : (Hom.sqTo g i).right = (Hom.sqTo g i).right)
[PROOFSTEP]
apply eq_of_sub_eq_zero
[GOAL]
case h
ι : Type u_1
V : Type u
inst✝⁵ : Category.{v, u} V
inst✝⁴ : Preadditive V
c : ComplexShape ι
C D E : HomologicalComplex V c
f g : C ⟶ D
h✝ k : D ⟶ E
i✝ : ι
inst✝³ : HasEqualizers V
inst✝² : HasCokernels V
inst✝¹ : HasImages V
inst✝ : HasImageMaps V
h : Homotopy f g
i : ι
⊢ homology.map (_ : dTo C i ≫ dFrom C i = 0) (_ : dTo D i ≫ dFrom D i = 0) (Hom.sqTo f i) (Hom.sqFrom f i)
(_ : (Hom.sqTo f i).right = (Hom.sqTo f i).right) -
homology.map (_ : dTo C i ≫ dFrom C i = 0) (_ : dTo D i ≫ dFrom D i = 0) (Hom.sqTo g i) (Hom.sqFrom g i)
(_ : (Hom.sqTo g i).right = (Hom.sqTo g i).right) =
0
[PROOFSTEP]
ext
[GOAL]
case h.p
ι : Type u_1
V : Type u
inst✝⁵ : Category.{v, u} V
inst✝⁴ : Preadditive V
c : ComplexShape ι
C D E : HomologicalComplex V c
f g : C ⟶ D
h✝ k : D ⟶ E
i✝ : ι
inst✝³ : HasEqualizers V
inst✝² : HasCokernels V
inst✝¹ : HasImages V
inst✝ : HasImageMaps V
h : Homotopy f g
i : ι
⊢ homology.π (dTo C i) (dFrom C i) (_ : dTo C i ≫ dFrom C i = 0) ≫
(homology.map (_ : dTo C i ≫ dFrom C i = 0) (_ : dTo D i ≫ dFrom D i = 0) (Hom.sqTo f i) (Hom.sqFrom f i)
(_ : (Hom.sqTo f i).right = (Hom.sqTo f i).right) -
homology.map (_ : dTo C i ≫ dFrom C i = 0) (_ : dTo D i ≫ dFrom D i = 0) (Hom.sqTo g i) (Hom.sqFrom g i)
(_ : (Hom.sqTo g i).right = (Hom.sqTo g i).right)) =
homology.π (dTo C i) (dFrom C i) (_ : dTo C i ≫ dFrom C i = 0) ≫ 0
[PROOFSTEP]
simp only [homology.π_map, comp_zero, Preadditive.comp_sub]
[GOAL]
case h.p
ι : Type u_1
V : Type u
inst✝⁵ : Category.{v, u} V
inst✝⁴ : Preadditive V
c : ComplexShape ι
C D E : HomologicalComplex V c
f g : C ⟶ D
h✝ k : D ⟶ E
i✝ : ι
inst✝³ : HasEqualizers V
inst✝² : HasCokernels V
inst✝¹ : HasImages V
inst✝ : HasImageMaps V
h : Homotopy f g
i : ι
⊢ kernelSubobjectMap (Hom.sqFrom f i) ≫ homology.π (dTo D i) (dFrom D i) (_ : dTo D i ≫ dFrom D i = 0) -
kernelSubobjectMap (Hom.sqFrom g i) ≫ homology.π (dTo D i) (dFrom D i) (_ : dTo D i ≫ dFrom D i = 0) =
0
[PROOFSTEP]
dsimp [kernelSubobjectMap]
[GOAL]
case h.p
ι : Type u_1
V : Type u
inst✝⁵ : Category.{v, u} V
inst✝⁴ : Preadditive V
c : ComplexShape ι
C D E : HomologicalComplex V c
f g : C ⟶ D
h✝ k : D ⟶ E
i✝ : ι
inst✝³ : HasEqualizers V
inst✝² : HasCokernels V
inst✝¹ : HasImages V
inst✝ : HasImageMaps V
h : Homotopy f g
i : ι
⊢ Subobject.factorThru (kernelSubobject (dFrom D i)) (Subobject.arrow (kernelSubobject (dFrom C i)) ≫ Hom.f f i)
(_ :
Subobject.Factors (kernelSubobject (dFrom D i))
(Subobject.arrow (kernelSubobject (dFrom C i)) ≫ (Hom.sqFrom f i).left)) ≫
homology.π (dTo D i) (dFrom D i) (_ : dTo D i ≫ dFrom D i = 0) -
Subobject.factorThru (kernelSubobject (dFrom D i)) (Subobject.arrow (kernelSubobject (dFrom C i)) ≫ Hom.f g i)
(_ :
Subobject.Factors (kernelSubobject (dFrom D i))
(Subobject.arrow (kernelSubobject (dFrom C i)) ≫ (Hom.sqFrom g i).left)) ≫
homology.π (dTo D i) (dFrom D i) (_ : dTo D i ≫ dFrom D i = 0) =
0
[PROOFSTEP]
simp_rw [h.comm i]
[GOAL]
case h.p
ι : Type u_1
V : Type u
inst✝⁵ : Category.{v, u} V
inst✝⁴ : Preadditive V
c : ComplexShape ι
C D E : HomologicalComplex V c
f g : C ⟶ D
h✝ k : D ⟶ E
i✝ : ι
inst✝³ : HasEqualizers V
inst✝² : HasCokernels V
inst✝¹ : HasImages V
inst✝ : HasImageMaps V
h : Homotopy f g
i : ι
⊢ Subobject.factorThru (kernelSubobject (dFrom D i))
(Subobject.arrow (kernelSubobject (dFrom C i)) ≫ (↑(dNext i) h.hom + ↑(prevD i) h.hom + Hom.f g i))
(_ :
Subobject.Factors (kernelSubobject (dFrom D i))
(Subobject.arrow (kernelSubobject (dFrom C i)) ≫ (↑(dNext i) h.hom + ↑(prevD i) h.hom + Hom.f g i))) ≫
homology.π (dTo D i) (dFrom D i) (_ : dTo D i ≫ dFrom D i = 0) -
Subobject.factorThru (kernelSubobject (dFrom D i)) (Subobject.arrow (kernelSubobject (dFrom C i)) ≫ Hom.f g i)
(_ :
Subobject.Factors (kernelSubobject (dFrom D i))
(Subobject.arrow (kernelSubobject (dFrom C i)) ≫ (Hom.sqFrom g i).left)) ≫
homology.π (dTo D i) (dFrom D i) (_ : dTo D i ≫ dFrom D i = 0) =
0
[PROOFSTEP]
simp only [zero_add, zero_comp, dNext_eq_dFrom_fromNext, kernelSubobject_arrow_comp_assoc, Preadditive.comp_add]
[GOAL]
case h.p
ι : Type u_1
V : Type u
inst✝⁵ : Category.{v, u} V
inst✝⁴ : Preadditive V
c : ComplexShape ι
C D E : HomologicalComplex V c
f g : C ⟶ D
h✝ k : D ⟶ E
i✝ : ι
inst✝³ : HasEqualizers V
inst✝² : HasCokernels V
inst✝¹ : HasImages V
inst✝ : HasImageMaps V
h : Homotopy f g
i : ι
⊢ Subobject.factorThru (kernelSubobject (dFrom D i))
(Subobject.arrow (kernelSubobject (dFrom C i)) ≫ ↑(prevD i) h.hom +
Subobject.arrow (kernelSubobject (dFrom C i)) ≫ Hom.f g i)
(_ :
Subobject.Factors (kernelSubobject (dFrom D i))
(Subobject.arrow (kernelSubobject (dFrom C i)) ≫ ↑(prevD i) h.hom +
Subobject.arrow (kernelSubobject (dFrom C i)) ≫ Hom.f g i)) ≫
homology.π (dTo D i) (dFrom D i) (_ : dTo D i ≫ dFrom D i = 0) -
Subobject.factorThru (kernelSubobject (dFrom D i)) (Subobject.arrow (kernelSubobject (dFrom C i)) ≫ Hom.f g i)
(_ :
Subobject.Factors (kernelSubobject (dFrom D i))
(Subobject.arrow (kernelSubobject (dFrom C i)) ≫ (Hom.sqFrom g i).left)) ≫
homology.π (dTo D i) (dFrom D i) (_ : dTo D i ≫ dFrom D i = 0) =
0
[PROOFSTEP]
rw [← Preadditive.sub_comp]
[GOAL]
case h.p
ι : Type u_1
V : Type u
inst✝⁵ : Category.{v, u} V
inst✝⁴ : Preadditive V
c : ComplexShape ι
C D E : HomologicalComplex V c
f g : C ⟶ D
h✝ k : D ⟶ E
i✝ : ι
inst✝³ : HasEqualizers V
inst✝² : HasCokernels V
inst✝¹ : HasImages V
inst✝ : HasImageMaps V
h : Homotopy f g
i : ι
⊢ (Subobject.factorThru (kernelSubobject (dFrom D i))
(Subobject.arrow (kernelSubobject (dFrom C i)) ≫ ↑(prevD i) h.hom +
Subobject.arrow (kernelSubobject (dFrom C i)) ≫ Hom.f g i)
(_ :
Subobject.Factors (kernelSubobject (dFrom D i))
(Subobject.arrow (kernelSubobject (dFrom C i)) ≫ ↑(prevD i) h.hom +
Subobject.arrow (kernelSubobject (dFrom C i)) ≫ Hom.f g i)) -
Subobject.factorThru (kernelSubobject (dFrom D i)) (Subobject.arrow (kernelSubobject (dFrom C i)) ≫ Hom.f g i)
(_ :
Subobject.Factors (kernelSubobject (dFrom D i))
(Subobject.arrow (kernelSubobject (dFrom C i)) ≫ (Hom.sqFrom g i).left))) ≫
homology.π (dTo D i) (dFrom D i) (_ : dTo D i ≫ dFrom D i = 0) =
0
[PROOFSTEP]
simp only [CategoryTheory.Subobject.factorThru_add_sub_factorThru_right]
[GOAL]
case h.p
ι : Type u_1
V : Type u
inst✝⁵ : Category.{v, u} V
inst✝⁴ : Preadditive V
c : ComplexShape ι
C D E : HomologicalComplex V c
f g : C ⟶ D
h✝ k : D ⟶ E
i✝ : ι
inst✝³ : HasEqualizers V
inst✝² : HasCokernels V
inst✝¹ : HasImages V
inst✝ : HasImageMaps V
h : Homotopy f g
i : ι
⊢ Subobject.factorThru (kernelSubobject (dFrom D i)) (Subobject.arrow (kernelSubobject (dFrom C i)) ≫ ↑(prevD i) h.hom)
(_ :
Subobject.Factors (kernelSubobject (dFrom D i))
(Subobject.arrow (kernelSubobject (dFrom C i)) ≫ ↑(prevD i) h.hom)) ≫
homology.π (dTo D i) (dFrom D i) (_ : dTo D i ≫ dFrom D i = 0) =
0
[PROOFSTEP]
erw [Subobject.factorThru_ofLE (D.boundaries_le_cycles i)]
[GOAL]
case h.p
ι : Type u_1
V : Type u
inst✝⁵ : Category.{v, u} V
inst✝⁴ : Preadditive V
c : ComplexShape ι
C D E : HomologicalComplex V c
f g : C ⟶ D
h✝ k : D ⟶ E
i✝ : ι
inst✝³ : HasEqualizers V
inst✝² : HasCokernels V
inst✝¹ : HasImages V
inst✝ : HasImageMaps V
h : Homotopy f g
i : ι
⊢ (Subobject.factorThru (boundaries D i) (Subobject.arrow (kernelSubobject (dFrom C i)) ≫ ↑(prevD i) h.hom) ?h.p ≫
Subobject.ofLE (boundaries D i) (cycles D i) (_ : boundaries D i ≤ cycles D i)) ≫
homology.π (dTo D i) (dFrom D i) (_ : dTo D i ≫ dFrom D i = 0) =
0
[PROOFSTEP]
simp
[GOAL]
case h.p
ι : Type u_1
V : Type u
inst✝⁵ : Category.{v, u} V
inst✝⁴ : Preadditive V
c : ComplexShape ι
C D E : HomologicalComplex V c
f g : C ⟶ D
h✝ k : D ⟶ E
i✝ : ι
inst✝³ : HasEqualizers V
inst✝² : HasCokernels V
inst✝¹ : HasImages V
inst✝ : HasImageMaps V
h : Homotopy f g
i : ι
⊢ Subobject.Factors (boundaries D i) (Subobject.arrow (kernelSubobject (dFrom C i)) ≫ ↑(prevD i) h.hom)
[PROOFSTEP]
rw [prevD_eq_toPrev_dTo, ← Category.assoc]
[GOAL]
case h.p
ι : Type u_1
V : Type u
inst✝⁵ : Category.{v, u} V
inst✝⁴ : Preadditive V
c : ComplexShape ι
C D E : HomologicalComplex V c
f g : C ⟶ D
h✝ k : D ⟶ E
i✝ : ι
inst✝³ : HasEqualizers V
inst✝² : HasCokernels V
inst✝¹ : HasImages V
inst✝ : HasImageMaps V
h : Homotopy f g
i : ι
⊢ Subobject.Factors (boundaries D i) ((Subobject.arrow (kernelSubobject (dFrom C i)) ≫ ↑(toPrev i) h.hom) ≫ dTo D i)
[PROOFSTEP]
apply imageSubobject_factors_comp_self
[GOAL]
ι : Type u_1
V : Type u
inst✝⁵ : Category.{v, u} V
inst✝⁴ : Preadditive V
c : ComplexShape ι
C D E : HomologicalComplex V c
f✝ g : C ⟶ D
h k : D ⟶ E
i✝ : ι
inst✝³ : HasEqualizers V
inst✝² : HasCokernels V
inst✝¹ : HasImages V
inst✝ : HasImageMaps V
f : HomotopyEquiv C D
i : ι
⊢ (homologyFunctor V c i).map f.hom ≫ (homologyFunctor V c i).map f.inv = 𝟙 ((homologyFunctor V c i).obj C)
[PROOFSTEP]
rw [← Functor.map_comp, homology_map_eq_of_homotopy f.homotopyHomInvId, CategoryTheory.Functor.map_id]
[GOAL]
ι : Type u_1
V : Type u
inst✝⁵ : Category.{v, u} V
inst✝⁴ : Preadditive V
c : ComplexShape ι
C D E : HomologicalComplex V c
f✝ g : C ⟶ D
h k : D ⟶ E
i✝ : ι
inst✝³ : HasEqualizers V
inst✝² : HasCokernels V
inst✝¹ : HasImages V
inst✝ : HasImageMaps V
f : HomotopyEquiv C D
i : ι
⊢ (homologyFunctor V c i).map f.inv ≫ (homologyFunctor V c i).map f.hom = 𝟙 ((homologyFunctor V c i).obj D)
[PROOFSTEP]
rw [← Functor.map_comp, homology_map_eq_of_homotopy f.homotopyInvHomId, CategoryTheory.Functor.map_id]
[GOAL]
ι : Type u_1
V : Type u
inst✝⁴ : Category.{v, u} V
inst✝³ : Preadditive V
c : ComplexShape ι
C D E : HomologicalComplex V c
f✝ g✝ : C ⟶ D
h✝ k : D ⟶ E
i✝ : ι
W : Type u_2
inst✝² : Category.{?u.479401, u_2} W
inst✝¹ : Preadditive W
F : V ⥤ W
inst✝ : Additive F
f g : C ⟶ D
h : Homotopy f g
i j : ι
w : ¬ComplexShape.Rel c j i
⊢ (fun i j => F.map (Homotopy.hom h i j)) i j = 0
[PROOFSTEP]
dsimp
[GOAL]
ι : Type u_1
V : Type u
inst✝⁴ : Category.{v, u} V
inst✝³ : Preadditive V
c : ComplexShape ι
C D E : HomologicalComplex V c
f✝ g✝ : C ⟶ D
h✝ k : D ⟶ E
i✝ : ι
W : Type u_2
inst✝² : Category.{?u.479401, u_2} W
inst✝¹ : Preadditive W
F : V ⥤ W
inst✝ : Additive F
f g : C ⟶ D
h : Homotopy f g
i j : ι
w : ¬ComplexShape.Rel c j i
⊢ F.map (Homotopy.hom h i j) = 0
[PROOFSTEP]
rw [h.zero i j w, F.map_zero]
[GOAL]
ι : Type u_1
V : Type u
inst✝⁴ : Category.{v, u} V
inst✝³ : Preadditive V
c : ComplexShape ι
C D E : HomologicalComplex V c
f✝ g✝ : C ⟶ D
h✝ k : D ⟶ E
i✝ : ι
W : Type u_2
inst✝² : Category.{?u.479401, u_2} W
inst✝¹ : Preadditive W
F : V ⥤ W
inst✝ : Additive F
f g : C ⟶ D
h : Homotopy f g
i : ι
⊢ Hom.f ((mapHomologicalComplex F c).map f) i =
((↑(dNext i) fun i j => F.map (Homotopy.hom h i j)) + ↑(prevD i) fun i j => F.map (Homotopy.hom h i j)) +
Hom.f ((mapHomologicalComplex F c).map g) i
[PROOFSTEP]
have H := h.comm i
[GOAL]
ι : Type u_1
V : Type u
inst✝⁴ : Category.{v, u} V
inst✝³ : Preadditive V
c : ComplexShape ι
C D E : HomologicalComplex V c
f✝ g✝ : C ⟶ D
h✝ k : D ⟶ E
i✝ : ι
W : Type u_2
inst✝² : Category.{?u.479401, u_2} W
inst✝¹ : Preadditive W
F : V ⥤ W
inst✝ : Additive F
f g : C ⟶ D
h : Homotopy f g
i : ι
H : Hom.f f i = ↑(dNext i) h.hom + ↑(prevD i) h.hom + Hom.f g i
⊢ Hom.f ((mapHomologicalComplex F c).map f) i =
((↑(dNext i) fun i j => F.map (Homotopy.hom h i j)) + ↑(prevD i) fun i j => F.map (Homotopy.hom h i j)) +
Hom.f ((mapHomologicalComplex F c).map g) i
[PROOFSTEP]
dsimp [dNext, prevD] at H ⊢
[GOAL]
ι : Type u_1
V : Type u
inst✝⁴ : Category.{v, u} V
inst✝³ : Preadditive V
c : ComplexShape ι
C D E : HomologicalComplex V c
f✝ g✝ : C ⟶ D
h✝ k : D ⟶ E
i✝ : ι
W : Type u_2
inst✝² : Category.{?u.479401, u_2} W
inst✝¹ : Preadditive W
F : V ⥤ W
inst✝ : Additive F
f g : C ⟶ D
h : Homotopy f g
i : ι
H :
Hom.f f i =
d C i (ComplexShape.next c i) ≫ Homotopy.hom h (ComplexShape.next c i) i +
Homotopy.hom h i (ComplexShape.prev c i) ≫ d D (ComplexShape.prev c i) i +
Hom.f g i
⊢ F.map (Hom.f f i) =
F.map (d C i (ComplexShape.next c i)) ≫ F.map (Homotopy.hom h (ComplexShape.next c i) i) +
F.map (Homotopy.hom h i (ComplexShape.prev c i)) ≫ F.map (d D (ComplexShape.prev c i) i) +
F.map (Hom.f g i)
[PROOFSTEP]
simp [H]
[GOAL]
ι : Type u_1
V : Type u
inst✝⁴ : Category.{v, u} V
inst✝³ : Preadditive V
c : ComplexShape ι
C D E : HomologicalComplex V c
f g : C ⟶ D
h✝ k : D ⟶ E
i : ι
W : Type u_2
inst✝² : Category.{?u.486182, u_2} W
inst✝¹ : Preadditive W
F : V ⥤ W
inst✝ : Additive F
h : HomotopyEquiv C D
⊢ Homotopy ((mapHomologicalComplex F c).map h.hom ≫ (mapHomologicalComplex F c).map h.inv)
(𝟙 ((mapHomologicalComplex F c).obj C))
[PROOFSTEP]
rw [← (F.mapHomologicalComplex c).map_comp, ← (F.mapHomologicalComplex c).map_id]
[GOAL]
ι : Type u_1
V : Type u
inst✝⁴ : Category.{v, u} V
inst✝³ : Preadditive V
c : ComplexShape ι
C D E : HomologicalComplex V c
f g : C ⟶ D
h✝ k : D ⟶ E
i : ι
W : Type u_2
inst✝² : Category.{?u.486182, u_2} W
inst✝¹ : Preadditive W
F : V ⥤ W
inst✝ : Additive F
h : HomotopyEquiv C D
⊢ Homotopy ((mapHomologicalComplex F c).map (h.hom ≫ h.inv)) ((mapHomologicalComplex F c).map (𝟙 C))
[PROOFSTEP]
exact F.mapHomotopy h.homotopyHomInvId
[GOAL]
ι : Type u_1
V : Type u
inst✝⁴ : Category.{v, u} V
inst✝³ : Preadditive V
c : ComplexShape ι
C D E : HomologicalComplex V c
f g : C ⟶ D
h✝ k : D ⟶ E
i : ι
W : Type u_2
inst✝² : Category.{?u.486182, u_2} W
inst✝¹ : Preadditive W
F : V ⥤ W
inst✝ : Additive F
h : HomotopyEquiv C D
⊢ Homotopy ((mapHomologicalComplex F c).map h.inv ≫ (mapHomologicalComplex F c).map h.hom)
(𝟙 ((mapHomologicalComplex F c).obj D))
[PROOFSTEP]
rw [← (F.mapHomologicalComplex c).map_comp, ← (F.mapHomologicalComplex c).map_id]
[GOAL]
ι : Type u_1
V : Type u
inst✝⁴ : Category.{v, u} V
inst✝³ : Preadditive V
c : ComplexShape ι
C D E : HomologicalComplex V c
f g : C ⟶ D
h✝ k : D ⟶ E
i : ι
W : Type u_2
inst✝² : Category.{?u.486182, u_2} W
inst✝¹ : Preadditive W
F : V ⥤ W
inst✝ : Additive F
h : HomotopyEquiv C D
⊢ Homotopy ((mapHomologicalComplex F c).map (h.inv ≫ h.hom)) ((mapHomologicalComplex F c).map (𝟙 D))
[PROOFSTEP]
exact F.mapHomotopy h.homotopyInvHomId
|
# install.packages("devtools")
# library(devtools)
# install_github("js229/Vennerable")
library(Vennerable)
source("code/gaf_tools.r")
source("code/obo_tools.r")
source("code/gen_utils.r")
source("code/get_robust.r")
source("code/get_nr_dataset.r")
datasets = fread("datasets.txt")
#obo="obo/go.obo"
all_data = apply(datasets,1,function(x){
infile = paste("nr_data/",x["file"],sep="")
print(infile)
read_gaf(infile)
})
all_gaf = do.call(rbind,all_data)
all_gaf = merge(all_gaf, datasets[,1:2,with=F],by.x = "assigned_by",by.y="dataset")
all_gaf
uniq_genes = all_gaf[,list(genes=list(unique(db_object_id))),by=Type]
tmp_out = as.list(uniq_genes$genes)
names(tmp_out) = uniq_genes$Type
#vignette("Venn")
#gene_gamer_venn <- Venn(tmp_out)
png("plots/gene_gamer_venn.png",width = 5,height = 5,units = "in",res = 600)
c3 = compute.Venn(gene_gamer_venn,type="circles",doEuler = F,doWeights = F)
gp = VennThemes(c3,colourAlgorithm = "sequential")
gp$Set$Set1$col = "#E41A1C"
gp$Set$Set2$col = "#E41A1C"
gp$Set$Set3$col = "#E41A1C"
gp$SetText$Set1$col = "#E41A1C"
gp$SetText$Set2$col = "#E41A1C"
gp$SetText$Set3$col = "#E41A1C"
plot(c3,gpList =gp, show = list(FaceText = "weight", SetLabels = T, Faces = F))
#plot(gene_gamer_venn,doWeights=F,show=list(DarkMatter = F,Faces=F))
dev.off()
comb_datasets = fread("comb_datasets.txt")
exist_datasets = fread("exist_datasets.txt")
maize_refset = fread("maize_v3.refset.txt",header = F)$V1
length(unique(disc_data_dt[!disc_data_dt$db_object_id %in% maize_refset]$db_object_id))
length(unique(disc_data_dt[!disc_data_dt$db_object_id %in% maize_refset]$db_object_id))
all_datasets = rbind(comb_datasets,exist_datasets)
disc_data = apply(all_datasets,1,function(x){
infile = paste("disc_data/",x["file"],sep="")
tmp_in = read_gaf(infile)
tmp_in$assigned_by = x["dataset"]
tmp_in
})
disc_data_dt = do.call(rbind,disc_data)
disc_data_dt = disc_data_dt[db_object_id %in% maize_refset]
disc_gaf = merge(disc_data_dt, all_datasets[,1:2,with=F],by.x = "assigned_by",by.y="dataset")
disc_gaf[assigned_by=="Aggregate"]$assigned_by = "maize-GAMER"
disc_uniq_genes = disc_gaf[,list(genes=list(unique(db_object_id))),by=assigned_by]
disc_uniq_list = as.list(disc_uniq_genes$genes)
names(disc_uniq_list) = disc_uniq_genes$assigned_by
disc_uniq_list[["Gramene"]][!disc_uniq_list[["Gramene"]] %in% disc_uniq_list[["maize-GAMER"]]]
length(unique(disc_gaf[assigned_by=="maize-GAMER"]$db_object_id))
length(unique(disc_gaf[assigned_by=="Gramene"]$db_object_id))
length(unique(disc_gaf[assigned_by=="Phytozome"]$db_object_id))
#gene_disc_venn = Venn(disc_uniq_list)
png("plots/gene_disc_venn.png",width = 5,height = 5,units = "in",res = 600)
#plot(gene_disc_venn,doWeights=F,show=list(DarkMatter = F,Faces=F,colourAlgorithm="signature"))
c3 = compute.Venn(gene_disc_venn,type="circles",doEuler = F,doWeights = F)
gp = VennThemes(c3,colourAlgorithm = "sequential")
plot(c3,gpList =gp, show = list(FaceText = "weight", SetLabels = T, Faces = F))
dev.off()
|
function [obj, objGrad] = pathObjective(u)
% [obj, objGrad] = pathObjective(u)
%
% Computes the objective function (and gradients) for the simple pendulum
%
obj = u.^2;
if nargout == 2 % Analytic gradients
nTime = length(u);
objGrad = zeros(4,nTime); %4 = [time + angle + rate + torque];
objGrad(4,:) = 2*u; %gradient obj wrt u
end
end
|
# changes from r_complete_data2.r = using log price instead of price
library(ggplot2)
library(gridExtra)
library(corrplot)
library(randomForest)
library(pROC)
library(caret)
library(Metrics)
library(forecast)
library(rpart)
library(rpart.plot)
library(rattle)
library(glmnet)
library(xgboost)
library(caretEnsemble)
library(gbm)
#set the working directory
setwd("C:/Users/evanf/Documents/Smurfit/Capstone/August 2018")
# my data is saved as datax
datax <- read.csv("houseprice_complete_data_numeric_county4.csv")
colnames(datax)[1] <- "link"
names(datax)
# Just use the complete columns for now
datax <- subset(datax, select = - c(link, address, agent, facilities, features, description, overview, date, views, first_date, first_price))
datax <- subset(datax, select = - c(newAgent, agent1, agent2, town1, town2, town3, town))
# check that it changed
names(datax)
# check for any missing values in the data
apply(is.na(datax), 2, sum)
# check for any duplicate values
sum (is.na(duplicated(datax)))
# attach the data
attach(datax)
# detach(datax)
####DATA EXPLORATION####
# table1 <- table(datax$county)
# table1
table(datax$dwelling_type)
# table(datax$town)
# table(datax$bedrooms)
# table(datax$bathrooms)
# table(datax$price)
# table(datax$north_south)
table3 <- table(datax$newTown)
table4 <- table(datax$ber)
table3
# table5 <- table(datax$county, datax$price)
# table5
table1 <- tapply(datax$price, datax$newCounty, mean)
table2 <- tapply(datax$price_per_m2, datax$newCounty, mean)
# write.csv(table1, file = "avg_price.csv")
# write.csv(table2, file = "avg_price_m2.csv")
write.csv(table3, file = "town_frequency.csv")
# write.csv(table4, file = "characters4.csv")
table1
# Change columns to Numeric
datax$price = as.numeric(as.character(datax$price))
datax$price_per_m2 = as.numeric(as.character(datax$price_per_m2))
datax$longitude = as.numeric(as.character(datax$longitude))
datax$latitude = as.numeric(as.character(datax$latitude))
datax$bedrooms = as.numeric(as.character(datax$bedrooms))
datax$bathrooms = as.numeric(as.character(datax$bathrooms))
datax$area = as.numeric(as.character(datax$area))
datax$nearst_luas = as.numeric(as.character(datax$nearst_luas))
datax$Dist_to_coast = as.numeric(as.character(datax$Dist_to_coast))
datax$Dist_to_city = as.numeric(as.character(datax$Dist_to_city))
datax$Dist_to_dart = as.numeric(as.character(datax$Dist_to_dart))
datax$Dist_to_busstop = as.numeric(as.character(datax$Dist_to_busstop))
# datax$text1 = as.numeric(as.character(datax$text1))
# datax$text2 = as.numeric(as.character(datax$text2))
# datax$text3 = as.numeric(as.character(datax$text3))
# datax$text4 = as.numeric(as.character(datax$text4))
# datax$text5 = as.numeric(as.character(datax$text5))
# datax$text6 = as.numeric(as.character(datax$text6))
datax$text7 = as.numeric(as.character(datax$text7))
datax$text8 = as.numeric(as.character(datax$text8))
datax$text9 = as.numeric(as.character(datax$text9))
datax$text10 = as.numeric(as.character(datax$text10))
datax$text11 = as.numeric(as.character(datax$text11))
datax$text12 = as.numeric(as.character(datax$text12))
datax$text13 = as.numeric(as.character(datax$text13))
datax$text14 = as.numeric(as.character(datax$text14))
# datax$text15 = as.numeric(as.character(datax$text15))
# datax$text16 = as.numeric(as.character(datax$text16))
datax$text17 = as.numeric(as.character(datax$text17))
# datax$text18 = as.numeric(as.character(datax$text18))
datax$text19 = as.numeric(as.character(datax$text19))
datax$text20 = as.numeric(as.character(datax$text20))
# datax$text21 = as.numeric(as.character(datax$text21))
datax$text22 = as.numeric(as.character(datax$text22))
datax$text23 = as.numeric(as.character(datax$text23))
# Change columns to factor
datax$dwelling_type <- as.factor(datax$dwelling_type)
datax$county <- as.factor(datax$county)
# datax$town <- as.factor(datax$town)
# datax$town1 <- as.factor(datax$town1)
# datax$town2 <- as.factor(datax$town2)
# datax$town3 <- as.factor(datax$town3)
datax$newTown <- as.factor(datax$newTown)
datax$ber <- as.factor(datax$ber)
datax$ber_new <- as.factor(datax$ber_new)
datax$newCounty <- as.factor(datax$newCounty)
datax$Luas_Factors <- as.factor(datax$Luas_Factors)
datax$Dart_Factors <- as.factor(datax$Dart_Factors)
datax$Bus_Factors <- as.factor(datax$Bus_Factors)
# Create new columns for logs of price and price per metre squared
datax$log_price_m2 <- log(datax$price_per_m2)
datax$log_price <- log(datax$price)
datax$bath_sq <- (datax$bathrooms_centre)^2
# round the distances for analysis
datax$coast_round <- round(datax$Dist_to_coast, digits = 0)
datax$city_round <- round(datax$Dist_to_city, digits = 0)
datax$dart_round <- round(datax$Dist_to_dart, digits = 0)
datax$luas_round <- round(datax$nearst_luas, digits = 0)
datax$bus_round <- round(datax$Dist_to_busstop, digits = 1)
datax$coast_round
#### BIT OF ANALYSIS ON ASKING PRICE v LOG PRICE etc ####
# Draw a higtogram to figure out the distribution of Asking Price
options(scipen=10000)
graph1 <- ggplot(datax, aes(x = price, fill = ..count..)) +
geom_histogram(binwidth = 20000) +
ggtitle("Histogram of Asking Price") +
ylab("Count of Houses") +
xlab("Asking Price") +
theme(plot.title = element_text(hjust = 0.5))
# Draw a higtogram to figure out the distribution of Log Asking Price
options(scipen=10000)
graph2 <- ggplot(datax, aes(x = log_price, fill = ..count..)) +
geom_histogram(binwidth = 0.1) +
ggtitle("Histogram of Log Asking Price") +
ylab("Count of Houses") +
xlab("Log Asking Price") +
theme(plot.title = element_text(hjust = 0.5))
# Draw a higtogram to figure out the distribution of Price Per Metre Squared
options(scipen=10000)
graph3 <- ggplot(datax, aes(x = price_per_m2, fill = ..count..)) +
geom_histogram(binwidth = 100) +
ggtitle("Histogram of Price Per Metre Squared") +
ylab("Count of Houses") +
xlab("Price Per Metre Squared") +
theme(plot.title = element_text(hjust = 0.5))
# Draw a higtogram to figure out the distribution of Log Price Per Metre Squared
options(scipen=10000)
graph4 <- ggplot(datax, aes(x = log_price_m2, fill = ..count..)) +
geom_histogram(binwidth = 0.05) +
ggtitle("Histogram of Log Price Per Metre Squared") +
ylab("Count of Houses") +
xlab("Log Price Per Metre Squared") +
theme(plot.title = element_text(hjust = 0.5))
graph1
graph2
graph3
graph4
grid1 <- grid.arrange(graph1, graph2, ncol = 2)
grid2 <- grid.arrange(graph3, graph4, ncol = 2)
##################################################
datad <- datax[datax$price <= 1500000,]
# historgram of housing price by Dwelling Type
ggplot(datad, aes(price)) +
geom_histogram(aes(fill = dwelling_type), position = position_stack(reverse = TRUE), binwidth = 20000) +
coord_flip() + ggtitle("Histogram of Asking Price (< ???2.5m) for Dwelling Types") +
ylab("Count") +
xlab("Housing Price") +
theme(plot.title = element_text(hjust = 0.5),legend.position=c(0.8,0.75), legend.background = element_rect(fill="grey90",
size=0.5, linetype="solid",
colour ="black"))
# historgram of housing price by Number of Bedrooms
ggplot(datad, aes(x = price,fill = as.factor(bedrooms))) +
geom_histogram(position = "stack", binwidth = 20000) +
ggtitle("Histogram of Asking Price by Number of Bedrooms") +
ylab("Count") +
xlab("Asking Price") +
scale_fill_discrete(name="Bedrooms")+
theme(plot.title = element_text(hjust = 0.5), legend.position=c(0.9,0.8), legend.background = element_rect(fill="grey90",
size=0.5, linetype="solid",
colour ="black"))
# historgram of housing price by North_South
ggplot(datad, aes(x = price,fill = as.factor(north_south))) +
geom_histogram(position = "stack", binwidth = 20000) +
ggtitle("Histogram of Asking Price by North or South Dublin") +
ylab("Count") +
xlab("Asking Price") +
scale_fill_discrete(name="Bedrooms")+
theme(plot.title = element_text(hjust = 0.5), legend.position=c(0.9,0.9), legend.background = element_rect(fill="grey90",
size=0.5, linetype="solid",
colour ="black"))
########################################################
#### Check which dependent variable is best to use #####
########################################################
model2 <- lm(datax$price ~ area + bedrooms_centre + bathrooms_centre + bath_sq + dwelling_type + newCounty + newTown + ber)
summary(model2)
#
model3 <- lm(datax$log_price ~ area + bedrooms_centre + bathrooms_centre + bath_sq + dwelling_type + newCounty + newTown + ber)
summary(model3)
#
model4 <- lm(datax$price_per_m2 ~ area + bedrooms_centre + bathrooms_centre + bath_sq + dwelling_type + newCounty + newTown + ber)
summary(model4)
#
model5 <- lm(datax$log_price_m2 ~ area + bedrooms_centre + bathrooms_centre + bath_sq + dwelling_type + newCounty + newTown + ber)
summary(model5)
# price_hat_model1 <- exp(predict(model1,newdata=datax))
price_hat_model2 <- predict(model2,newdata=datax)
price_hat_model3 <- exp(predict(model3,newdata=datax))
price_hat_model4 <- predict(model4,newdata=datax)*area
price_hat_model5 <- exp(predict(model5,newdata=datax))*area
# price_hat_model6 <- exp(predict(model6,newdata=datax))
#
#output <- (cbind("ID"=datax$longitude,"Orginal Price"=datax$price,"Model1 Predict"=price_hat_model1))
output <- (cbind("ID"=datax$longitude,"Orginal Price"=datax$price,"Model2 Predict"=price_hat_model2,
"Model3 Predict"=price_hat_model3, "Model4 Predict"=price_hat_model4,
"Model5 Predict"=price_hat_model5))
write.csv(output, file = "models_price_v_predicted_y.csv", row.names=FALSE)
#####################################################################
#### Check if new changes to county have made a positive change #####
model21 <- lm(datax$log_price ~ bedrooms + bathrooms + area + dwelling_type + county + newTown + ber)
summary(model21)
# change to newCounty
model22 <- lm(datax$log_price ~ bedrooms + bathrooms + area + dwelling_type + newCounty + newTown + ber)
summary(model22)
model23 <- lm(datax$log_price ~ bedrooms + bathrooms + area + dwelling_type + county)
summary(model23)
# change to newCounty
model24 <- lm(datax$log_price ~ bedrooms + bathrooms + area + dwelling_type + newCounty)
summary(model24)
# price_hat_model1 <- exp(predict(model1,newdata=datax))
price_hat_model21 <- exp(predict(model21,newdata=datax))
price_hat_model22 <- exp(predict(model22,newdata=datax))
price_hat_model23 <- exp(predict(model23,newdata=datax))
price_hat_model24 <- exp(predict(model24,newdata=datax))
output <- (cbind("ID"=datax$longitude,"Orginal Price"=datax$price,"Model21 Predict"=price_hat_model21,
"Model22 Predict"=price_hat_model22, "Model23 Predict"=price_hat_model23, "Model24 Predict"=price_hat_model24))
write.csv(output, file = "models_price_v_predicted_county.csv", row.names=FALSE)
prediction21 <- predict(model21, datax, type="response")
rmse(datax$log_price, prediction21)
prediction22 <- predict(model22, datax, type="response")
rmse(datax$log_price, prediction22)
prediction23 <- predict(model23, datax, type="response")
rmse(datax$log_price, prediction23)
prediction24 <- predict(model24, datax, type="response")
rmse(datax$log_price, prediction24)
####################################################################################
#### Check if new changes to lat & lon coordinates have made a positive change #####
# without any coordinate parameters
model51 <- lm(datax$log_price ~ bedrooms + bathrooms + area + dwelling_type + newCounty)
summary(model51)
# add in normal lat & lon
model52 <- lm(datax$log_price ~ bedrooms + bathrooms + area + dwelling_type + newCounty + latitude + longitude)
summary(model52)
# use th 50 separate lat & lon values instead
model53 <- lm(datax$log_price ~ bedrooms + bathrooms + area + dwelling_type + newCounty
+ Lat.Co..Dublin.North + Lat.Co..Dublin.South + Lat.Co..Dublin.West + Lat.Dublin.1 + Lat.Dublin.10 + Lat.Dublin.11
+ Lat.Dublin.12 + Lat.Dublin.13 + Lat.Dublin.14 + Lat.Dublin.15 + Lat.Dublin.16 + Lat.Dublin.17 + Lat.Dublin.18
+ Lat.Dublin.2 + Lat.Dublin.20 + Lat.Dublin.22 + Lat.Dublin.24 + Lat.Dublin.3 + Lat.Dublin.4 + Lat.Dublin.5
+ Lat.Dublin.6 + Lat.Dublin.6W + Lat.Dublin.7 + Lat.Dublin.8 + Lat.Dublin.9
+ Long.Co..Dublin.North + Long.Co..Dublin.South + Long.Co..Dublin.West + Long.Dublin.1 + Long.Dublin.10 + Long.Dublin.11
+ Long.Dublin.12 + Long.Dublin.13 + Long.Dublin.14 + Long.Dublin.15 + Long.Dublin.16 + Long.Dublin.17 + Long.Dublin.18
+ Long.Dublin.2 + Long.Dublin.20 + Long.Dublin.22 + Long.Dublin.24 + Long.Dublin.3 + Long.Dublin.4 + Long.Dublin.5
+ Long.Dublin.6 + Long.Dublin.6W + Long.Dublin.7 + Long.Dublin.8 + Long.Dublin.9)
summary(model53)
# add town to model 51
model54 <- lm(datax$log_price ~ bedrooms + bathrooms + area + dwelling_type + newCounty + newTown)
summary(model54)
# add town to model 52
model55 <- lm(datax$log_price ~ bedrooms + bathrooms + area + dwelling_type + newCounty + newTown + latitude + longitude)
summary(model55)
# add town to model 53
model56 <- lm(datax$log_price ~ bedrooms + bathrooms + area + dwelling_type + newCounty + newTown
+ Lat.Co..Dublin.North + Lat.Co..Dublin.South + Lat.Co..Dublin.West + Lat.Dublin.1 + Lat.Dublin.10 + Lat.Dublin.11
+ Lat.Dublin.12 + Lat.Dublin.13 + Lat.Dublin.14 + Lat.Dublin.15 + Lat.Dublin.16 + Lat.Dublin.17 + Lat.Dublin.18
+ Lat.Dublin.2 + Lat.Dublin.20 + Lat.Dublin.22 + Lat.Dublin.24 + Lat.Dublin.3 + Lat.Dublin.4 + Lat.Dublin.5
+ Lat.Dublin.6 + Lat.Dublin.6W + Lat.Dublin.7 + Lat.Dublin.8 + Lat.Dublin.9
+ Long.Co..Dublin.North + Long.Co..Dublin.South + Long.Co..Dublin.West + Long.Dublin.1 + Long.Dublin.10 + Long.Dublin.11
+ Long.Dublin.12 + Long.Dublin.13 + Long.Dublin.14 + Long.Dublin.15 + Long.Dublin.16 + Long.Dublin.17 + Long.Dublin.18
+ Long.Dublin.2 + Long.Dublin.20 + Long.Dublin.22 + Long.Dublin.24 + Long.Dublin.3 + Long.Dublin.4 + Long.Dublin.5
+ Long.Dublin.6 + Long.Dublin.6W + Long.Dublin.7 + Long.Dublin.8 + Long.Dublin.9)
summary(model56)
# Check RMSE values for above models
prediction51 <- predict(model51, datax, type="response")
rmse(datax$log_price, prediction51)
# 0.236312
prediction52 <- predict(model52, datax, type="response")
rmse(datax$log_price, prediction52)
# 0.2218311
prediction53 <- predict(model53, datax, type="response")
rmse(datax$log_price, prediction53)
# 0.1984393
prediction54 <- predict(model54, datax, type="response")
rmse(datax$log_price, prediction54)
# 0.1777698
prediction55 <- predict(model55, datax, type="response")
rmse(datax$log_price, prediction55)
# 0.1770397
prediction56 <- predict(model56, datax, type="response")
rmse(datax$log_price, prediction56)
# 0.1703569
#####################################################################
#### Check if new changes to distances have made a positive change #####
# without any distance parameters
model41 <- lm(datax$log_price ~ bedrooms + bathrooms + area + dwelling_type + newCounty)
summary(model41)
# add in distance to coast and city
model42 <- lm(datax$log_price ~ bedrooms + bathrooms + area + dwelling_type + newCounty + Dist_to_coast + Dist_to_city)
summary(model42)
# add in distance to transport by numeric values
model43 <- lm(datax$log_price ~ bedrooms + bathrooms + area + dwelling_type + newCounty + Dist_to_coast + Dist_to_city
+ nearst_luas + Dist_to_dart + Dist_to_busstop)
summary(model43)
# add in distance to transport by factors
model44 <- lm(datax$log_price ~ bedrooms + bathrooms + area + dwelling_type + newCounty + Dist_to_coast + Dist_to_city
+ Luas_Factors + Dart_Factors + Bus_Factors)
summary(model44)
# remove coast and city for below 2 models
model45 <- lm(datax$log_price ~ bedrooms + bathrooms + area + dwelling_type + newCounty
+ nearst_luas + Dist_to_dart + Dist_to_busstop)
summary(model45)
# add in distance to transport by factors
model46 <- lm(datax$log_price ~ bedrooms + bathrooms + area + dwelling_type + newCounty
+ Luas_Factors + Dart_Factors + Bus_Factors)
summary(model46)
# Check RMSE values for above models
prediction41 <- predict(model41, datax, type="response")
rmse(datax$log_price, prediction41)
# 0.236312
prediction42 <- predict(model42, datax, type="response")
rmse(datax$log_price, prediction42)
# 0.2193726
prediction43 <- predict(model43, datax, type="response")
rmse(datax$log_price, prediction43)
# 0.2192287
prediction44 <- predict(model44, datax, type="response")
rmse(datax$log_price, prediction44)
# 0.2175744
prediction45 <- predict(model45, datax, type="response")
rmse(datax$log_price, prediction45)
# 0.2221335
prediction46 <- predict(model46, datax, type="response")
rmse(datax$log_price, prediction46)
# 0.233845
# price_hat_model1 <- exp(predict(model1,newdata=datax))
price_hat_model41 <- exp(predict(model41,newdata=datax))
price_hat_model42 <- exp(predict(model42,newdata=datax))
output <- (cbind("ID"=datax$longitude,"Orginal Price"=datax$price,"Model41 Predict"=price_hat_model41,
"Model42 Predict"=price_hat_model42))
write.csv(output, file = "models_price_v_predicted_distances.csv", row.names=FALSE)
########################################################
####### Asking Price v Sales Price Analysis ############
datasales <- read.csv("ppr_analysis.csv")
names(datasales)
colnames(datasales)[1] <- "ID"
model31 <- lm(datasales$Sale_Price ~ datasales$Asking_Price)
summary(model31)
## Intercept = 2.515e+0.4 (25,150), Asking Price * 9.651e-01 (0.9651)
#########################################################
# ### plot the asking prices vs predicted price ###
# datay <- read.csv("models_price_v_predicted.csv")
#
# # column 8 is for model 6 which we use here as the predicted price
# names(datay)
# colnames(datay)[1] <- "id"
# colnames(datay)[2] <- "askingprice"
# colnames(datay)[8] <- "predictedprice"
#
# datay <- subset(datay, select = c(id, askingprice, predictedprice))
# datay <- datay[datay$askingprice < 2500000,]
# # datay1 <- subset(datay, select = c(id, askingprice))
# # datay2 <- subset(datay, select = c(id, predictedprice))
#
# library(ggplot2)
# ggplot(datay, aes(id, askingprice)) + geom_jitter() +ggtitle("AskingPrice") + theme_light() + theme(plot.title = element_text(hjust = 0.5))
# ggplot(datay, aes(id, predictedprice), color = red) + geom_jitter() +ggtitle("PredictedPrice") + theme_light() + theme(plot.title = element_text(hjust = 0.5))
#
# ggplot(datay, aes(askingprice, predictedprice), color = 'black') + geom_jitter() + ggtitle("Asking v Predicted Price (Properties < ???2.5m)") + theme_light() + theme(plot.title = element_text(hjust = 0.5)) + geom_abline(slope = 1, intercept = 0, color = 'red') + geom_abline(slope = 1.05, intercept = 0, color = 'blue') + geom_abline(slope = 0.95, intercept = 0, color = 'blue')
#################################################
names(datax)
datay <- datax[datax$price <= 1200000,]
datay <- subset(datay, select = - c(bedsFact, bathsFact, bedrooms, bathrooms, price_per_m2, log_price_m2, north_south, county, latitude,
longitude, Luas_Factors, Dart_Factors, Bus_Factors, bedsFact, bathsFact))
datam <- datay
head(datam)
# datam <- datam.sample(frac = 1)
names(datam)
##### Dashboard #####
#### Properties < ???1.2m #####
datam <- datam[c(1:3, 5:27, 269:270)]
names(datam)
datam <- datam[c(1:7, 13:28)]
mlx <-lm(formula = log_price ~ area + bedrooms_centre + bathrooms_centre + bath_sq + dwelling_type + newCounty + newTown
+ ber + text7 + text8 + text9 + text10 + text11 + text12 + text13 + text14 + text16 + text17
+ text19 + text20 + text22 + text23, data = datam)
getOption("max.print")
options(max.print = 2000)
summary(mlx)
prediction_excel1 <- predict(mlx, datam, type="response")
rmse(datam$log_price, prediction_excel1)
#[1] 0.1528069
# Predictions
prediction_model1_excel <- exp(prediction_excel1)
output_excel1 <- (cbind("Orginal Price"=exp(datam$log_price), "Model1"=prediction_model1_excel))
write.csv(output_excel1, file = "models_price_v_predicted_excel1.csv", row.names=FALSE)
##### Dashboard Inputs #####
##### All properties #####
names(datax)
datay <- datax
datay <- subset(datay, select = - c(bedsFact, bathsFact, bedrooms, bathrooms, price_per_m2, log_price_m2, north_south, county, latitude,
longitude, Luas_Factors, Dart_Factors, Bus_Factors, bedsFact, bathsFact))
datam <- datay
names(datam)
datam <- datam[c(2:3, 5:21, 23:27, 219:221)]
names(datam)
datam <- datam[c(1:6, 12:27)]
mly <-lm(formula = log_price ~ area + bedrooms_centre + bathrooms_centre + bath_sq + dwelling_type + newCounty + newTown
+ ber_new + text7 + text8 + text9 + text10 + text11 + text12 + text13 + text14 + text17
+ text19 + text20 + text22 + text23, data = datam)
getOption("max.print")
options(max.print = 2000)
summary(mly)
prediction_excel2 <- predict(mly, datam, type="response")
rmse(datam$log_price, prediction_excel2)
#[1] 0.1723541
confint(mly)
# Predictions
prediction_model2_excel <- exp(prediction_excel2)
output_excel <- (cbind("Orginal Price"=exp(datam$log_price), "Model2"=prediction_model2_excel))
write.csv(output_excel, file = "models_price_v_predicted_excel2.csv", row.names=FALSE)
# lincomb = caret::findLinearCombos(datam)
# lapply(lincomb$linearCombos, function(x) colnames(df)[x])
###### End of analysis for Excel ########
##### Effect of Postcode #####
#All properties
datay <- datax
datay <- subset(datay, select = - c(bedsFact, bathsFact, bedrooms, bathrooms, price_per_m2, log_price_m2, north_south, county, latitude,
longitude, Luas_Factors, Dart_Factors, Bus_Factors, bedsFact, bathsFact))
datam <- datay
datam <- datam[c(1:3, 5:26, 200, 209:210)]
names(datam)
datam <- datam[c(1:7, 13:28)]
datam <- datam[c(2:24)]
mlp <-lm(formula = log_price ~ area + bedrooms_centre + bathrooms_centre + bath_sq + dwelling_type + newCounty, data = datam)
getOption("max.print")
options(max.print = 2000)
summary(mlp)
confint(mlp)
##### Effect of Town #####
# All properties
mlt <-lm(formula = log_price ~ area + bedrooms_centre + bathrooms_centre + bath_sq + dwelling_type + newTown, data = datam)
getOption("max.print")
options(max.print = 2000)
summary(mlt)
##### Effect of BER #####
# All properties
mlb <-lm(formula = log_price ~ area + bedrooms_centre + bathrooms_centre + bath_sq + dwelling_type + ber, data = datam)
getOption("max.print")
options(max.print = 2000)
summary(mlb)
##### Effect of BER New #####
# All properties
mlb2 <-lm(formula = log_price ~ area + bedrooms_centre + bathrooms_centre + bath_sq + dwelling_type + newCounty + newTown + ber_new, data = datam)
getOption("max.print")
options(max.print = 2000)
summary(mlb2)
confint(mlb2)
##### Quadratic Effect for Bathrooms#####
mlr4 <-lm(formula = log_price ~ area + bedrooms_centre + bathrooms_centre + dwelling_type + newCounty + newTown + ber, data = datam)
summary(mlr4)
mlr5 <-lm(formula = log_price ~ area + bedrooms_centre + bathrooms_centre + bath_sq + dwelling_type + newCounty + newTown + ber, data = datam)
summary(mlr5)
confint(mlr5)
ci1 <- predict(mlr4, datam, interval = "confidence")
write.csv(ci1, file = "confidence_intervals.csv", row.names=FALSE)
##############################################################
##############################################################
##############################################################
##############################################################
#####Spatial Variables Analysis####
###plot price vs dist to coast
plot(Dist_to_coast, datax$log_price, main = "Dist to Coast vs. Log Asking Price", xlab = "Dist to Coast", ylab = "Log Asking Price")
abline(lm(log_price ~ Dist_to_coast), col = "red", lwd = 3)
###plot price vs dist to coast (rounded)
line1 <- ggplot(datax, aes(x=coast_round, y=log_price)) + stat_summary(fun.y="mean", geom="line", color = "red") + xlab("Distance to Coast") + ggtitle("Log Price by Distance to Coast") + theme(plot.title = element_text(hjust = 0.5))
line11 <- ggplot(datax, aes(x=coast_round, y=log_price_m2)) + stat_summary(fun.y="mean", geom="line", color = "red") + xlab("Distance to Coast") + ggtitle("Log Price per Metre Squared by Distance to Coast") + theme(plot.title = element_text(hjust = 0.5))
###plot price vs dist to city
plot(Dist_to_city, datax$log_price, main = "Dist to City vs. Log Asking Price", xlab = "Dist to City", ylab = "Log Asking Price")
abline(lm(log_price ~ Dist_to_city), col = "red", lwd = 3)
###plot price vs dist to city (rounded)
line2 <- ggplot(datax, aes(x=city_round, y=log_price)) + stat_summary(fun.y="mean", geom="line", color = "blue") + xlab("Distance to City") + ggtitle("Log Price by Distance to City") + theme(plot.title = element_text(hjust = 0.5))
line12 <- ggplot(datax, aes(x=city_round, y=log_price_m2)) + stat_summary(fun.y="mean", geom="line", color = "blue") + xlab("Distance to City") + ggtitle("Log Price per Metre Squared by Distance to City") + theme(plot.title = element_text(hjust = 0.5))
###plot price vs dist to Luas
plot(nearst_luas, datax$log_price, main = "Dist to Luas vs. Log Asking Price", xlab = "Dist to Luas", ylab = "Log Asking Price")
abline(lm(log_price ~ nearst_luas), col = "red", lwd = 3)
###plot price vs dist to Luas (rounded)
line3 <- ggplot(datax, aes(x=luas_round, y=log_price)) + stat_summary(fun.y="mean", geom="line", color = "green") + xlab("Distance to Luas") + ggtitle("Log Price by Distance to Luas") + theme(plot.title = element_text(hjust = 0.5))
line13 <- ggplot(datax, aes(x=luas_round, y=log_price_m2)) + stat_summary(fun.y="mean", geom="line", color = "green") + xlab("Distance to Luas") + ggtitle("Log Price Per Metre Squared by Distance to Luas") + theme(plot.title = element_text(hjust = 0.5))
###plot price vs dist to DART
plot(Dist_to_dart, datax$log_price, main = "Dist to DART vs. Log Asking Price", xlab = "Dist to DART", ylab = "Log Asking Price")
abline(lm(log_price ~ Dist_to_dart), col = "red", lwd = 3)
###plot price vs dist to DART (rounded)
line4 <- ggplot(datax, aes(x=dart_round, y=log_price)) + stat_summary(fun.y="mean", geom="line", color = "orange") + xlab("Distance to DART") + ggtitle("Log Price by Distance to DART") + theme(plot.title = element_text(hjust = 0.5))
line14<- ggplot(datax, aes(x=dart_round, y=log_price_m2)) + stat_summary(fun.y="mean", geom="line", color = "orange") + xlab("Distance to DART") + ggtitle("Log Price Per Metre Squared by Distance to DART") + theme(plot.title = element_text(hjust = 0.5))
###plot price vs dist to bus
plot(Dist_to_busstop, datax$log_price, main = "Dist to bus vs. Log Asking Price", xlab = "Dist to Bus", ylab = "Log Asking Price")
abline(lm(log_price ~ Dist_to_busstop), col = "red", lwd = 3)
###plot price vs dist to Bus (rounded)
line5 <- ggplot(datax, aes(x=bus_round, y=log_price)) + stat_summary(fun.y="mean", geom="line", color = "black") + xlab("Distance to Bus") + ggtitle("Log Price by Distance to Bus") + theme(plot.title = element_text(hjust = 0.5))
line15 <- ggplot(datax, aes(x=bus_round, y=log_price_m2)) + stat_summary(fun.y="mean", geom="line", color = "black") + xlab("Distance to Bus") + ggtitle("Log Price per Metre Squared by Distance to Bus") + theme(plot.title = element_text(hjust = 0.5))
library(grid)
library(gridExtra)
grid10 <- grid.arrange(line1, line2, line3, line4, line5, ncol = 2, top = textGrob("Distribution of Log Price By Spatial Variables",gp=gpar(fontsize=20,font=3)))
grid11 <- grid.arrange(line11, line12, line13, line14, line15, ncol = 2, top = textGrob("Distribution of Log Price Per Metre Squared By Spatial Variables",gp=gpar(fontsize=20,font=3)))
##############################################################
##############################################################
##############################################################
##############################################################
datay <- datax
datay <- subset(datay, select = - c(bedsFact, bathsFact, bedrooms, bathrooms, price_per_m2, log_price, north_south, county,
Luas_Factors, Dart_Factors, Bus_Factors, bedsFact, bathsFact))
datam <- datay
datam <- subset(datam, select = - c(Side...North, Side...South))
names(datam)
# Partition the data in to a training set (=70%) and a test set (=30%)
split_data = function(datax, train, test) {
dataPartition = sample(2, nrow(datax), replace = TRUE, prob = c(train, test))
new_train <<- datax[dataPartition==1, ]
validation <<- datax[dataPartition==2, ]
}
split_data(datam, 0.7,0.3)
new_train
validation
names(new_train)
#### NON SPATIAL ####
#### USE THIS FOR REGRESSION ####
new_train1 <- new_train[c(2:3, 5:8, 16:28, 200, 210, 216)]
validation1 <- validation[c(2:3, 5:8, 16:28, 200, 210, 216)]
names(new_train1)
#### USE THIS FOR MACHINE LEARNING & NEURAL NETWORK ####
new_train2 <- subset(new_train, select = - c(price, newTown, ber, dwelling_type, newCounty, Side...North, Side...South, longitude,
latitude, nearst_luas, Dist_to_coast, Dist_to_city, Dist_to_dart, Dist_to_busstop, ber_new,
coast_round, city_round, dart_round, luas_round, bus_round, log_price_m2))
validation2 <- subset(validation, select = - c(price, newTown, ber, dwelling_type, newCounty, Side...North, Side...South, longitude,
latitude, nearst_luas, Dist_to_coast, Dist_to_city, Dist_to_dart, Dist_to_busstop, ber_new,
coast_round, city_round, dart_round, luas_round, bus_round, log_price_m2))
names(new_train2)
##(a).MLR
#First include all predictor variables to see what will happen
mlr <-lm(formula = log_price ~ bedrooms_centre + bathrooms_centre + bath_sq + area + dwelling_type + newCounty + ber_new , data = new_train1)
getOption("max.print")
options(max.print = 1000)
summary(mlr)
prediction1 <- predict(mlr,validation1, type="response")
rmse(validation1$log_price, prediction1)
#[1]
# calc.relimp(mlr, type = c("lmg"), rela = TRUE, rank = TRUE)
mlx <-lm(formula = log_price ~ . - log_price, data = new_train1)
getOption("max.print")
options(max.print = 1000)
summary(mlx)
prediction2 <- predict(mlx,validation1, type="response")
rmse(validation1$log_price, prediction2)
#[1]
varImp(mlx)
## (3) Stepwise Linear Regression
Fitstart <- lm(formula = new_train1$log_price ~ 1,data = new_train1)
FitAll <- lm(log_price ~ . ,data = new_train1)
formula(FitAll)
step(Fitstart, direction = "both",scope = formula(FitAll))
mlr <- lm(formula = new_train1$log_price ~ area + newTown + dwelling_type +
bedrooms_centre + newCounty + text9 + ber_new + bath_sq +
bathrooms_centre + text10 + text22 + text14 + text20 + text13 +
text19, data = new_train1)
getOption("max.print")
options(max.print = 1000)
summary(mlr)
prediction3 <- predict(mlr,validation1, type="response")
rmse(validation1$log_price, prediction3)
#[1]
# Variable Importance
install.packages("relaimpo")
library(relaimpo)
varImp(mlr)
## (4) Decision Trees
myformula <- log_price ~ .
modfit <- rpart(myformula, method="anova" , data = new_train1)
prediction4 <- predict(modfit,newdata = validation1)
rmse(validation1$log_price,prediction4)
# [1]
## (5) Random Forest
rf <- randomForest(log_price ~ ., data = new_train2, ntree=1000, proximity=TRUE)
varImpPlot(rf)
prediction5 <- predict(rf, newdata = validation2)
rmse(validation2$log_price, prediction5)
#[1]
## (6) Regularized Regression(Lasso)
names(new_train2)
all_predictors <- subset(new_train2,select = -c(log_price))
var_classes <- sapply(all_predictors,function(x)class(x))
num_classes <- var_classes[var_classes!="character"]
num_vars <- subset(new_train2,select=names(num_classes))
#corrplot(cor(num_vars),method="number")
#corrplot(cor(num_vars),method="circle")
#Building model
set.seed(325)
lasso <-cv.glmnet(as.matrix(new_train2[, -197]), new_train2[, 197])
prediction6 <- predict(lasso, newx = as.matrix(validation2[, - 197]), s = "lambda.min")
rmse(validation2$log_price,prediction6)
#[1]
## (7) Gradient Boosting model(GBM)
set.seed(315)
cv.ctrl_gbm <- trainControl(method="repeatedcv",number=5,repeats = 5)
gbm<- train(log_price ~ ., method = "gbm", metric = "RMSE", maximize = FALSE,
trControl =cv.ctrl_gbm, tuneGrid = expand.grid(n.trees = 700,
interaction.depth = 5, shrinkage = 0.05,
n.minobsinnode = 10), data = new_train2,verbose = FALSE)
varImp(gbm)
prediction7 <- predict(gbm,newdata = validation2)
rmse(validation2$log_price,prediction7)
#[1]
## (8) XGBOOST(Extreme Gradient Boosting)
# preparing matrix
dtrain <- xgb.DMatrix(data = as.matrix(new_train2[,-197]),label = as.matrix(new_train2$log_price))
dtest <- xgb.DMatrix(data = as.matrix(validation2[,-197]),label=as.matrix(validation2$log_price))
#Building model
set.seed(311)
xgb <- xgboost(booster="gbtree",data = dtrain, nfold = 5,nrounds = 2500, verbose = FALSE,
objective = "reg:linear", eval_metric = "rmse", nthread = 8, eta = 0.01,
gamma = 0.0468, max_depth = 6, min_child_weight = 1.41, subsample = 0.769, colsample_bytree =0.283)
mat <- xgb.importance (feature_names = colnames(dtrain),model = xgb)
xgb.plot.importance (importance_matrix = mat[1:20])
prediction8 <- predict(xgb,newdata = dtest)
rmse(validation2$log_price,prediction8)
#[1]
## (x) Simple Average RMSE of Random Forest + GBM + XGBoost (Top3 performing models)
rmse(validation2$log_price, (prediction5 + prediction7 + prediction8)/3)
#[1]
## (x) Weighted Average RMSE of Lasso+GBM+XGBoost
rmse(validation2$log_price, (0.1 *prediction5 + 0.3 *prediction7 + 0.6 *prediction8))
#[1]
## (9) Ensemble method
my_control <- trainControl(method="boot",number=5,savePredictions="final")
set.seed(321)
model_list <- caretList(
log_price ~ ., data=new_train2,
trControl=my_control,
metric="RMSE",
methodList=c("knn","glmnet"),
tuneList=list(
gbm=caretModelSpec(method="gbm", tuneGrid=expand.grid(n.trees = 700, interaction.depth = 5,
shrinkage = 0.05,n.minobsinnode = 10)),
xgbTree=caretModelSpec(method="xgbTree", tuneGrid=expand.grid(nrounds = 2500,max_depth = 6,min_child_weight=1.41,
eta =0.01,gamma = 0.0468,subsample=0.769,
colsample_bytree =0.283))
)
)
modelCor(resamples(model_list))
##Simple Blending
set.seed(333455)
greedy_ensemble <- caretEnsemble(model_list, metric="RMSE",trControl=trainControl(number=25))
greedy_ensemble
varImp(greedy_ensemble)
summary(greedy_ensemble)
prediction9 <- predict(greedy_ensemble,newdata = validation2)
rmse(validation2$log_price,prediction9)
# [1]
# (10) Using a "meta-model"
set.seed(317)
rf_ensemble <- caretStack(model_list,method="rf",metric="RMSE",
trControl=trainControl(method="boot",number=5,savePredictions="final"))
prediction10 <- predict(rf_ensemble,newdata = validation2)
rmse(validation2$log_price,prediction10)
# [1]
## (11) Simple Average RMSE of XGBoost+Ensemble+Meta-Model(Top3 performance models)
prediction11 <- (prediction8 + prediction9 + prediction10)/3
rmse(validation2$log_price, prediction11)
#[1]
## (12) Neural Network Non Spatial
datam <- subset(datam, select = - c(price, newTown, ber, dwelling_type, newCounty, Side...North, Side...South, longitude,
latitude, nearst_luas, Dist_to_coast, Dist_to_city, Dist_to_dart, Dist_to_busstop, ber_new))
UDF <- function(x) {
(x -min(x))/ (max(x)- min(x))
}
train_o <- datam
train <- as.data.frame(apply(train_o, 2, UDF))
index <- sample(nrow (train), round(0.70 * nrow(train)))
train.wp <- train[index,]
test.wp <- train[-index,]
##
#procValues <- preProcess(datam, method = c("center", "scale"))
#scaledTraindata <- predict(procValues, datatrain )
#scaledTestdata <- predict(procValues, datatest)
library(neuralnet)
allVars <- colnames(train)
predictorVars <- allVars[!allVars%in%"log_price"]
predictorVars <- paste(predictorVars, collapse = "+")
form = as.formula(paste("log_price~", predictorVars, collapse = "+"))
# Prediction Model
nn_model <- neuralnet(formula = form, train.wp, hidden = c(2,1), linear.output = TRUE)
test.wp_1 <- subset(test.wp, select = -c(log_price))
prediction1 <- compute(nn_model, test.wp_1)
str(prediction1)
# UDF: Convert the scaled values to original
UDF_2 <- function(prediction) {
prediction1$net.result * (max(train_o$log_price)-min(train_o$log_price)) + min(train_o$log_price)
}
Prediction <- prediction1$net.result * (max(train_o$log_price)-min(train_o$log_price)) + min(train_o$log_price)
Prediction <- exp(Prediction)
Actual <- test.wp$log_price * (max(train_o$log_price)-min(train_o$log_price)) + min(train_o$log_price)
Actual <- exp(Actual)
table(Actual, Prediction)
submit.df <- data.frame(Real = Actual, PredictPrice = Prediction)
write.csv(submit.df, file = "Prediction_neural.csv", row.names = FALSE)
# RMSE results
rmse(validation2$log_price, prediction3) # [1] 0.179344382
rmse(validation2$log_price, prediction4) # [1] 0.2773309582
rmse(validation2$log_price, prediction5) # [1] 0.2060180447
rmse(validation2$log_price, prediction6) # [1] 0.1777638014
rmse(validation2$log_price, prediction7) # [1] 0.1771044259
rmse(validation2$log_price, prediction8) # [1] 0.1672389116
rmse(validation2$log_price, prediction9) # [1] 0.1660828578
rmse(validation2$log_price, prediction10) # [1] 0.1732218506
rmse(validation2$log_price, prediction11) # [1] 0.1650871085
# Predictions
prediction_model1 <- exp(prediction3)
prediction_model2 <- exp(prediction4)
prediction_model3 <- exp(prediction5)
prediction_model4 <- exp(prediction6)
prediction_model5 <- exp(prediction7)
prediction_model6 <- exp(prediction8)
prediction_model7 <- exp(prediction9)
prediction_model8 <- exp(prediction10)
prediction_model9 <- exp(prediction11)
output <- (cbind("ID"=validation$longitude,"Orginal Price"=exp(validation$log_price),"Model1"=prediction_model1,
"Model2"=prediction_model2, "Model3"=prediction_model3, "Model4"=prediction_model4, "Model5"=prediction_model5,
"Model6"=prediction_model6, "Model7"=prediction_model7, "Model8"=prediction_model8,
"Model9"=prediction_model9))
write.csv(output, file = "models_price_v_predicted_logp_normal.csv", row.names=FALSE)
#########################################
########## SPATIAL - Log Price ##############
#### USE THIS FOR REGRESSION ####
names(new_train)
new_train$log_price <- log(new_train$price)
validation$log_price <- log(validation$price)
train11 <- new_train[c(2:3, 5:28, 200, 210, 216)]
test11 <- validation[c(2:3, 5:28, 200, 210, 216)]
names(train11)
#### USE THIS FOR MACHINE LEARNING & NEURAL NETWORK ####
# datam <- subset(datay, select = - c(price, newTown, ber, dwelling_type, newCounty, Side...North, Side...South, ber_new))
train12 <- subset(new_train, select = - c(price, newTown, ber, dwelling_type, newCounty, Side...North, Side...South, ber_new, log_price_m2, coast_round, city_round, dart_round, luas_round, bus_round))
test12 <- subset(validation, select = - c(price, newTown, ber, dwelling_type, newCounty, Side...North, Side...South, ber_new, log_price_m2, coast_round, city_round, dart_round, luas_round, bus_round))
names(train12)
##(a).MLR
#First include all predictor variables to see what will happen
mlx <-lm(formula = log_price ~ . - newCounty, data = train11)
getOption("max.print")
options(max.print = 1000)
summary(mlx)
prediction2 <- predict(mlx,test11, type="response")
rmse(test11$log_price, prediction2)
#[1]
# calc.relimp(mlx, type = c("lmg"), rela = TRUE, rank = TRUE)
## (3) Stepwise Linear Regression
Fitstart <- lm(formula = train11$log_price ~ 1,data = train11)
FitAll <- lm(log_price ~ . ,data = train11)
formula(FitAll)
step(Fitstart, direction = "both",scope = formula(FitAll))
mlr <- lm(formula = train11$log_price ~ area + newTown + dwelling_type +
bedrooms_centre + newCounty + text9 + ber_new + bath_sq +
bathrooms_centre + text10 + text22 + text14 + text20 + Dist_to_city +
text13 + latitude + text19, data = train11)
getOption("max.print")
options(max.print = 1000)
summary(mlr)
prediction13 <- predict(mlr,test11, type="response")
rmse(test11$log_price, prediction13)
#[1]
## (4) Decision Trees
myformula <- log_price ~ .
modfit <- rpart(myformula, method="anova" , data = train11)
prediction14 <- predict(modfit,newdata = test11)
rmse(test11$log_price,prediction14)
# [1]
## (5) Random Forest
rf <- randomForest(log_price ~ ., data = train12, ntree=1000, proximity=TRUE)
varImpPlot(rf)
prediction15 <- predict(rf, newdata = test12)
rmse(test12$log_price, prediction15)
#[1]
## (6) Regularized Regression(Lasso)
names(train12)
all_predictors <- subset(train12,select = -c(log_price))
var_classes <- sapply(all_predictors,function(x)class(x))
num_classes <- var_classes[var_classes!="character"]
num_vars <- subset(train12,select=names(num_classes))
#corrplot(cor(num_vars),method="number")
#corrplot(cor(num_vars),method="circle")
#Building model
set.seed(425)
lasso <-cv.glmnet(as.matrix(train12[, -204]), train12[, 204])
prediction16 <- predict(lasso, newx = as.matrix(test12[, - 204]), s = "lambda.min")
rmse(test12$log_price,prediction16)
#[1]
## (7) Gradient Boosting model(GBM)
set.seed(415)
cv.ctrl_gbm <- trainControl(method="repeatedcv",number=5,repeats = 5)
gbm<- train(log_price ~ ., method = "gbm", metric = "RMSE", maximize = FALSE,
trControl =cv.ctrl_gbm, tuneGrid = expand.grid(n.trees = 700,
interaction.depth = 5, shrinkage = 0.05,
n.minobsinnode = 10), data = train12,verbose = FALSE)
varImp(gbm)
prediction17 <- predict(gbm,newdata = test12)
rmse(test12$log_price,prediction17)
#[1]
## (8) XGBOOST(Extreme Gradient Boosting)
# preparing matrix
dtrain <- xgb.DMatrix(data = as.matrix(train12[,-204]),label = as.matrix(train12$log_price))
dtest <- xgb.DMatrix(data = as.matrix(test12[,-204]),label=as.matrix(test12$log_price))
#Building model
set.seed(411)
xgb <- xgboost(booster="gbtree",data = dtrain, nfold = 5,nrounds = 2500, verbose = FALSE,
objective = "reg:linear", eval_metric = "rmse", nthread = 8, eta = 0.01,
gamma = 0.0468, max_depth = 6, min_child_weight = 1.41, subsample = 0.769, colsample_bytree =0.283)
mat <- xgb.importance (feature_names = colnames(dtrain),model = xgb)
xgb.plot.importance (importance_matrix = mat[1:20])
prediction18 <- predict(xgb,newdata = dtest)
rmse(test12$log_price,prediction18)
#[1]
## (x) Simple Average RMSE of Random Forest + GBM + XGBoost (Top3 performing models)
rmse(test12$log_price, (prediction15 + prediction17 + prediction18)/3)
#[1]
## (x) Weighted Average RMSE of Lasso+GBM+XGBoost
rmse(test12$log_price, (0.1 *prediction15 + 0.3 *prediction17 + 0.6 *prediction18))
#[1]
## (9) Ensemble method
my_control <- trainControl(method="boot",number=5,savePredictions="final")
set.seed(421)
model_list <- caretList(
log_price ~ ., data=train12,
trControl=my_control,
metric="RMSE",
methodList=c("knn","glmnet"),
tuneList=list(
gbm=caretModelSpec(method="gbm", tuneGrid=expand.grid(n.trees = 700, interaction.depth = 5,
shrinkage = 0.05,n.minobsinnode = 10)),
xgbTree=caretModelSpec(method="xgbTree", tuneGrid=expand.grid(nrounds = 2500,max_depth = 6,min_child_weight=1.41,
eta =0.01,gamma = 0.0468,subsample=0.769,
colsample_bytree =0.283))
)
)
modelCor(resamples(model_list))
##Simple Blending
set.seed(433455)
greedy_ensemble <- caretEnsemble(model_list, metric="RMSE",trControl=trainControl(number=25))
greedy_ensemble
varImp(greedy_ensemble)
summary(greedy_ensemble)
prediction19 <- predict(greedy_ensemble,newdata = test12)
rmse(test12$log_price,prediction19)
# [1]
# (10) Using a "meta-model"
set.seed(417)
rf_ensemble <- caretStack(model_list,method="rf",metric="RMSE",
trControl=trainControl(method="boot",number=5,savePredictions="final"))
prediction20 <- predict(rf_ensemble,newdata = test12)
rmse(test12$log_price,prediction20)
# [1]
## (11) Simple Average RMSE of XGBoost+Ensemble+Meta-Model(Top3 performance models)
prediction21 <- (prediction18 + prediction19 + prediction20)/3
rmse(test12$log_price, prediction21)
#[1]
## (12) Neural Network
UDF <- function(x) {
(x -min(x))/ (max(x)- min(x))
}
train_o <- datam
train <- as.data.frame(apply(train_o, 2, UDF))
index <- sample(nrow (train), round(0.70 * nrow(train)))
train.wp <- train[index,]
test.wp <- train[-index,]
##
#procValues <- preProcess(datam, method = c("center", "scale"))
#scaledTraindata <- predict(procValues, datatrain )
#scaledTestdata <- predict(procValues, datatest)
library(neuralnet)
allVars <- colnames(train)
predictorVars <- allVars[!allVars%in%"log_price"]
predictorVars <- paste(predictorVars, collapse = "+")
form = as.formula(paste("log_price~", predictorVars, collapse = "+"))
# Prediction Model
nn_model <- neuralnet(formula = form, train.wp, hidden = c(2,1), linear.output = TRUE)
test.wp_1 <- subset(test.wp, select = -c(log_price))
prediction1 <- compute(nn_model, test.wp_1)
str(prediction1)
# UDF: Convert the scaled values to original
UDF_2 <- function(prediction) {
prediction1$net.result * (max(train_o$log_price)-min(train_o$log_price)) + min(train_o$log_price)
}
Prediction <- prediction1$net.result * (max(train_o$log_price)-min(train_o$log_price)) + min(train_o$log_price)
Prediction <- exp(Prediction)
Actual <- test.wp$log_price * (max(train_o$log_price)-min(train_o$log_price)) + min(train_o$log_price)
Actual <- exp(Actual)
table(Actual, Prediction)
submit.df <- data.frame(Real = Actual, PredictPrice = Prediction)
write.csv(submit.df, file = "Prediction_neural_spatial.csv", row.names = FALSE)
# RMSE results
rmse(test12$log_price, prediction13) # [1] 0.178790619
rmse(test12$log_price, prediction14) # [1] 0.2807840408
rmse(test12$log_price, prediction15) # [1] 0.1736463843
rmse(test12$log_price, prediction16) # [1] 0.178019933
rmse(test12$log_price, prediction17) # [1] 0.1605449246
rmse(test12$log_price, prediction18) # [1] 0.1532523828
rmse(test12$log_price, prediction19) # [1] 0.1557409292
rmse(test12$log_price, prediction20) # [1] 0.1640881857
rmse(test12$log_price, prediction21) # [1] 0.1538401409
# rmse(test12$log_price, Prediction22) # [1]
# Predictions
prediction_model11 <- exp(prediction13)
prediction_model12 <- exp(prediction14)
prediction_model13 <- exp(prediction15)
prediction_model14 <- exp(prediction16)
prediction_model15 <- exp(prediction17)
prediction_model16 <- exp(prediction18)
prediction_model17 <- exp(prediction19)
prediction_model18 <- exp(prediction20)
prediction_model19 <- exp(prediction21)
# Prediction22 <- exp(Prediction22)
output <- (cbind("ID"=validation$longitude,"Orginal Price"=exp(validation$log_price),"Model1"=prediction_model11,
"Model2"=prediction_model12, "Model3"=prediction_model13, "Model4"=prediction_model14, "Model5"=prediction_model15,
"Model6"=prediction_model16, "Model7"=prediction_model17, "Model8"=prediction_model18,
"Model9"=prediction_model19))
write.csv(output, file = "models_price_v_predicted_logp_spatial.csv", row.names=FALSE)
#########################################
########## SPATIAL ##############
#### USING LOG PRICE PER METRE SQUARED ####
names(new_train)
train1 <- new_train[c(2:3, 5:28, 200, 209:210)]
test11 <- validation[c(2:3, 5:28, 200, 209:210)]
names(train1)
#### USE THIS FOR MACHINE LEARNING & NEURAL NETWORK ####
datam <- subset(datay, select = - c(price, newTown, ber, dwelling_type, newCounty, Side...North, Side...South, ber_new, coast_round, city_round, dart_round, luas_round, bus_round))
train2 <- subset(new_train, select = - c(price, newTown, ber, dwelling_type, newCounty, Side...North, Side...South, ber_new, coast_round, city_round, dart_round, luas_round, bus_round))
test2 <- subset(validation, select = - c(price, newTown, ber, dwelling_type, newCounty, Side...North, Side...South, ber_new, coast_round, city_round, dart_round, luas_round, bus_round))
names(train2)
##(a).MLR
#First include all predictor variables to see what will happen
mlx <-lm(formula = log_price_m2 ~ . - log_price_m2, data = train1)
getOption("max.print")
options(max.print = 1000)
summary(mlx)
prediction2 <- predict(mlx,test11, type="response")
rmse(test11$log_price_m2, prediction2)
#[1]
## (3) Stepwise Linear Regression
Fitstart <- lm(formula = train1$log_price_m2 ~ 1,data = train1)
FitAll <- lm(log_price_m2 ~ . ,data = train1)
formula(FitAll)
step(Fitstart, direction = "both",scope = formula(FitAll))
mlr <- lm(formula = train1$log_price_m2 ~ newTown + dwelling_type +
bedrooms_centre + Dist_to_dart + newCounty + text9 + text19 +
text13 + bathrooms_centre + bath_sq + text22 + text10 + area +
text17 + text23 + text7 + Dist_to_city + latitude + text14,
data = train1)
getOption("max.print")
options(max.print = 1000)
summary(mlr)
prediction33 <- predict(mlr,test11, type="response")
rmse(test11$log_price_m2, prediction33)
#[1]
## (4) Decision Trees
myformula <- log_price_m2 ~ .
modfit <- rpart(myformula, method="anova" , data = train1)
prediction34 <- predict(modfit,newdata = test11)
rmse(test11$log_price_m2,prediction34)
# [1]
## (5) Random Forest
rf <- randomForest(log_price_m2 ~ ., data = train2, ntree=1000, proximity=TRUE)
varImpPlot(rf)
prediction35 <- predict(rf, newdata = test2)
rmse(test2$log_price_m2, prediction35)
#[1]
## (6) Regularized Regression(Lasso)
names(train2)
all_predictors <- subset(train2,select = -c(log_price_m2))
var_classes <- sapply(all_predictors,function(x)class(x))
num_classes <- var_classes[var_classes!="character"]
num_vars <- subset(train2,select=names(num_classes))
#corrplot(cor(num_vars),method="number")
#corrplot(cor(num_vars),method="circle")
#Building model
set.seed(525)
lasso <-cv.glmnet(as.matrix(train2[, -203]), train2[, 203])
prediction36 <- predict(lasso, newx = as.matrix(test2[, - 203]), s = "lambda.min")
rmse(test2$log_price_m2,prediction36)
#[1]
## (7) Gradient Boosting model(GBM)
set.seed(515)
cv.ctrl_gbm <- trainControl(method="repeatedcv",number=5,repeats = 5)
gbm<- train(log_price_m2 ~ ., method = "gbm", metric = "RMSE", maximize = FALSE,
trControl =cv.ctrl_gbm, tuneGrid = expand.grid(n.trees = 700,
interaction.depth = 5, shrinkage = 0.05,
n.minobsinnode = 10), data = train2,verbose = FALSE)
varImp(gbm)
prediction37 <- predict(gbm,newdata = test2)
rmse(test2$log_price_m2,prediction37)
#[1]
## (8) XGBOOST(Extreme Gradient Boosting)
# preparing matrix
dtrain <- xgb.DMatrix(data = as.matrix(train2[,-203]),label = as.matrix(train2$log_price_m2))
dtest <- xgb.DMatrix(data = as.matrix(test2[,-203]),label=as.matrix(test2$log_price_m2))
#Building model
set.seed(511)
xgb <- xgboost(booster="gbtree",data = dtrain, nfold = 5,nrounds = 2500, verbose = FALSE,
objective = "reg:linear", eval_metric = "rmse", nthread = 8, eta = 0.01,
gamma = 0.0468, max_depth = 6, min_child_weight = 1.41, subsample = 0.769, colsample_bytree =0.283)
mat <- xgb.importance (feature_names = colnames(dtrain),model = xgb)
xgb.plot.importance (importance_matrix = mat[1:20])
prediction38 <- predict(xgb,newdata = dtest)
rmse(test2$log_price_m2,prediction38)
#[1]
## (x) Simple Average RMSE of Random Forest + GBM + XGBoost (Top3 performing models)
rmse(test2$log_price_m2, (prediction35 + prediction37 + prediction38)/3)
#[1]
## (x) Weighted Average RMSE of Lasso+GBM+XGBoost
rmse(test2$log_price_m2, (0.1 *prediction35 + 0.3 *prediction37 + 0.6 *prediction38))
#[1]
## (9) Ensemble method
my_control <- trainControl(method="boot",number=5,savePredictions="final")
set.seed(521)
model_list <- caretList(
log_price_m2 ~ ., data=train2,
trControl=my_control,
metric="RMSE",
methodList=c("knn","glmnet"),
tuneList=list(
gbm=caretModelSpec(method="gbm", tuneGrid=expand.grid(n.trees = 700, interaction.depth = 5,
shrinkage = 0.05,n.minobsinnode = 10)),
xgbTree=caretModelSpec(method="xgbTree", tuneGrid=expand.grid(nrounds = 2500,max_depth = 6,min_child_weight=1.41,
eta =0.01,gamma = 0.0468,subsample=0.769,
colsample_bytree =0.283))
)
)
modelCor(resamples(model_list))
##Simple Blending
set.seed(533455)
greedy_ensemble <- caretEnsemble(model_list, metric="RMSE",trControl=trainControl(number=25))
greedy_ensemble
varImp(greedy_ensemble)
summary(greedy_ensemble)
prediction39 <- predict(greedy_ensemble,newdata = test2)
rmse(test2$log_price_m2,prediction39)
# [1]
# (10) Using a "meta-model"
set.seed(517)
rf_ensemble <- caretStack(model_list,method="rf",metric="RMSE",
trControl=trainControl(method="boot",number=5,savePredictions="final"))
prediction40 <- predict(rf_ensemble,newdata = test2)
rmse(test2$log_price_m2,prediction40)
# [1]
## (11) Simple Average RMSE of XGBoost+Ensemble+Meta-Model(Top3 performance models)
prediction41 <- (prediction38 + prediction39 + prediction40)/3
rmse(test2$log_price_m2, prediction41)
#[1]
## (12) Neural Network
UDF <- function(x) {
(x -min(x))/ (max(x)- min(x))
}
train_o <- datam
train <- as.data.frame(apply(train_o, 2, UDF))
index <- sample(nrow (train), round(0.70 * nrow(train)))
train.wp <- train[index,]
test.wp <- train[-index,]
##
#procValues <- preProcess(datam, method = c("center", "scale"))
#scaledTraindata <- predict(procValues, datatrain )
#scaledTestdata <- predict(procValues, datatest)
library(neuralnet)
allVars <- colnames(train)
predictorVars <- allVars[!allVars%in%"log_price_m2"]
predictorVars <- paste(predictorVars, collapse = "+")
form = as.formula(paste("log_price_m2~", predictorVars, collapse = "+"))
# Prediction Model
nn_model <- neuralnet(formula = form, train.wp, hidden = c(2,1), linear.output = TRUE)
test.wp_1 <- subset(test.wp, select = -c(log_price_m2))
prediction1 <- compute(nn_model, test.wp_1)
str(prediction1)
# UDF: Convert the scaled values to original
UDF_2 <- function(prediction) {
prediction1$net.result * (max(train_o$log_price_m2)-min(train_o$log_price_m2)) + min(train_o$log_price_m2)
}
Prediction <- prediction1$net.result * (max(train_o$log_price_m2)-min(train_o$log_price_m2)) + min(train_o$log_price_m2)
Prediction <- exp(Prediction)*test.wp$area
Actual <- test.wp$log_price_m2 * (max(train_o$log_price_m2)-min(train_o$log_price_m2)) + min(train_o$log_price_m2)
Actual <- exp(Actual)*test.wp$area
table(Actual, Prediction)
submit.df <- data.frame(Real = Actual, PredictPrice = Prediction)
write.csv(submit.df, file = "Prediction_neural_spatial_m2.csv", row.names = FALSE)
# RMSE results
rmse(test2$log_price_m2, prediction33) # [1] 0.1797238
rmse(test2$log_price_m2, prediction34) # [1] 0.204188
rmse(test2$log_price_m2, prediction35) # [1] 0.1649854
rmse(test2$log_price_m2, prediction36) # [1] 0.1790057
rmse(test2$log_price_m2, prediction37) # [1] 0.1581145
rmse(test2$log_price_m2, prediction38) # [1] 0.1534901
rmse(test2$log_price_m2, prediction39) # [1] 0.1513939
rmse(test2$log_price_m2, prediction40) # [1] 0.1586336
rmse(test2$log_price_m2, prediction41) # [1] 0.1524193
# Predictions
prediction_model31 <- exp(prediction33)*validation$area
prediction_model32 <- exp(prediction34)*validation$area
prediction_model33 <- exp(prediction35)*validation$area
prediction_model34 <- exp(prediction36)*validation$area
prediction_model35 <- exp(prediction37)*validation$area
prediction_model36 <- exp(prediction38)*validation$area
prediction_model37 <- exp(prediction39)*validation$area
prediction_model38 <- exp(prediction40)*validation$area
prediction_model39 <- exp(prediction41)*validation$area
# Prediction22 <- exp(Prediction22)
output <- (cbind("ID"=validation$longitude,"Orginal Price"=exp(validation$log_price_m2)*validation$area,"Model1"=prediction_model31,
"Model2"=prediction_model32, "Model3"=prediction_model33, "Model4"=prediction_model34, "Model5"=prediction_model35,
"Model6"=prediction_model36, "Model7"=prediction_model37, "Model8"=prediction_model38,
"Model9"=prediction_model39))
write.csv(output, file = "models_price_v_predicted_logpm2_spatial.csv", row.names=FALSE)
#########################################
########## SPATIAL - Log Price ##############
###### Properties <= ???1.2m ##########
#### USE THIS FOR REGRESSION ####
names(new_train)
new_train$log_price <- log(new_train$price)
validation$log_price <- log(validation$price)
new_train3 <- new_train[new_train$price <= 1200000,]
validation3 <- validation[validation$price <= 1200000,]
train31 <- new_train3[c(2:3, 5:28, 200, 210, 216)]
test31 <- validation3[c(2:3, 5:28, 200, 210, 216)]
names(train31)
#### USE THIS FOR MACHINE LEARNING & NEURAL NETWORK ####
# datam <- subset(datay, select = - c(price, newTown, ber, dwelling_type, newCounty, Side...North, Side...South, ber_new))
train32 <- subset(new_train3, select = - c(price, newTown, ber, dwelling_type, newCounty, Side...North, Side...South, ber_new, log_price_m2, coast_round, city_round, dart_round, luas_round, bus_round))
test32 <- subset(validation3, select = - c(price, newTown, ber, dwelling_type, newCounty, Side...North, Side...South, ber_new, log_price_m2, coast_round, city_round, dart_round, luas_round, bus_round))
names(train32)
##(a).MLR
#First include all predictor variables to see what will happen
mlx <-lm(formula = log_price ~ . - newCounty, data = train31)
getOption("max.print")
options(max.print = 1000)
summary(mlx)
prediction2 <- predict(mlx,test31, type="response")
rmse(test31$log_price, prediction2)
#[1]
# calc.relimp(mlx, type = c("lmg"), rela = TRUE, rank = TRUE)
## (3) Stepwise Linear Regression
Fitstart <- lm(formula = train31$log_price ~ 1,data = train31)
FitAll <- lm(log_price ~ . ,data = train31)
formula(FitAll)
step(Fitstart, direction = "both",scope = formula(FitAll))
mlr <- lm(formula = train31$log_price ~ area + newTown + dwelling_type +
bedrooms_centre + newCounty + bath_sq + ber_new + text9 +
text20 + text14 + bathrooms_centre + text19 + text22 + Dist_to_city +
latitude + text17 + nearst_luas + text13 + text10 + text11,
data = train31)
getOption("max.print")
options(max.print = 1000)
summary(mlr)
prediction43 <- predict(mlr,test31, type="response")
rmse(test31$log_price, prediction43)
#[1]
## (4) Decision Trees
myformula <- log_price ~ .
modfit <- rpart(myformula, method="anova" , data = train31)
prediction44 <- predict(modfit,newdata = test31)
rmse(test31$log_price,prediction44)
# [1]
## (5) Random Forest
rf <- randomForest(log_price ~ ., data = train32, ntree=1000, proximity=TRUE)
varImpPlot(rf)
prediction45 <- predict(rf, newdata = test32)
rmse(test32$log_price, prediction45)
#[1]
## (6) Regularized Regression(Lasso)
names(train32)
all_predictors <- subset(train32,select = -c(log_price))
var_classes <- sapply(all_predictors,function(x)class(x))
num_classes <- var_classes[var_classes!="character"]
num_vars <- subset(train32,select=names(num_classes))
#corrplot(cor(num_vars),method="number")
#corrplot(cor(num_vars),method="circle")
#Building model
set.seed(625)
lasso <-cv.glmnet(as.matrix(train32[, -204]), train32[, 204])
prediction46 <- predict(lasso, newx = as.matrix(test32[, - 204]), s = "lambda.min")
rmse(test32$log_price,prediction46)
#[1]
## (7) Gradient Boosting model(GBM)
set.seed(615)
cv.ctrl_gbm <- trainControl(method="repeatedcv",number=5,repeats = 5)
gbm<- train(log_price ~ ., method = "gbm", metric = "RMSE", maximize = FALSE,
trControl =cv.ctrl_gbm, tuneGrid = expand.grid(n.trees = 700,
interaction.depth = 5, shrinkage = 0.05,
n.minobsinnode = 10), data = train32,verbose = FALSE)
varImp(gbm)
prediction47 <- predict(gbm,newdata = test32)
rmse(test32$log_price,prediction47)
#[1]
## (8) XGBOOST(Extreme Gradient Boosting)
# preparing matrix
dtrain <- xgb.DMatrix(data = as.matrix(train32[,-204]),label = as.matrix(train32$log_price))
dtest <- xgb.DMatrix(data = as.matrix(test32[,-204]),label=as.matrix(test32$log_price))
#Building model
set.seed(611)
xgb <- xgboost(booster="gbtree",data = dtrain, nfold = 5,nrounds = 2500, verbose = FALSE,
objective = "reg:linear", eval_metric = "rmse", nthread = 8, eta = 0.01,
gamma = 0.0468, max_depth = 6, min_child_weight = 1.41, subsample = 0.769, colsample_bytree =0.283)
mat <- xgb.importance (feature_names = colnames(dtrain),model = xgb)
xgb.plot.importance (importance_matrix = mat[1:20])
prediction48 <- predict(xgb,newdata = dtest)
rmse(test32$log_price,prediction48)
#[1]
## (x) Simple Average RMSE of Random Forest + GBM + XGBoost (Top3 performing models)
rmse(test32$log_price, (prediction45 + prediction47 + prediction48)/3)
#[1]
## (x) Weighted Average RMSE of Lasso+GBM+XGBoost
rmse(test32$log_price, (0.1 *prediction45 + 0.3 *prediction47 + 0.6 *prediction48))
#[1]
## (9) Ensemble method
my_control <- trainControl(method="boot",number=5,savePredictions="final")
set.seed(621)
model_list <- caretList(
log_price ~ ., data=train32,
trControl=my_control,
metric="RMSE",
methodList=c("knn","glmnet"),
tuneList=list(
gbm=caretModelSpec(method="gbm", tuneGrid=expand.grid(n.trees = 700, interaction.depth = 5,
shrinkage = 0.05,n.minobsinnode = 10)),
xgbTree=caretModelSpec(method="xgbTree", tuneGrid=expand.grid(nrounds = 2500,max_depth = 6,min_child_weight=1.41,
eta =0.01,gamma = 0.0468,subsample=0.769,
colsample_bytree =0.283))
)
)
modelCor(resamples(model_list))
##Simple Blending
set.seed(633455)
greedy_ensemble <- caretEnsemble(model_list, metric="RMSE",trControl=trainControl(number=25))
greedy_ensemble
varImp(greedy_ensemble)
summary(greedy_ensemble)
prediction49 <- predict(greedy_ensemble,newdata = test32)
rmse(test32$log_price,prediction49)
# [1]
# (10) Using a "meta-model"
set.seed(617)
rf_ensemble <- caretStack(model_list,method="rf",metric="RMSE",
trControl=trainControl(method="boot",number=5,savePredictions="final"))
prediction50 <- predict(rf_ensemble,newdata = test32)
rmse(test32$log_price,prediction50)
# [1]
## (11) Simple Average RMSE of XGBoost+Ensemble+Meta-Model(Top3 performance models)
prediction51 <- (prediction48 + prediction49 + prediction50)/3
rmse(test32$log_price, prediction51)
#[1]
## (12) Neural Network
UDF <- function(x) {
(x -min(x))/ (max(x)- min(x))
}
train_o <- datam
train <- as.data.frame(apply(train_o, 2, UDF))
index <- sample(nrow (train), round(0.70 * nrow(train)))
train.wp <- train[index,]
test.wp <- train[-index,]
##
#procValues <- preProcess(datam, method = c("center", "scale"))
#scaledTraindata <- predict(procValues, datatrain )
#scaledTestdata <- predict(procValues, datatest)
library(neuralnet)
allVars <- colnames(train)
predictorVars <- allVars[!allVars%in%"log_price"]
predictorVars <- paste(predictorVars, collapse = "+")
form = as.formula(paste("log_price~", predictorVars, collapse = "+"))
# Prediction Model
nn_model <- neuralnet(formula = form, train.wp, hidden = c(2,1), linear.output = TRUE)
test.wp_1 <- subset(test.wp, select = -c(log_price))
prediction1 <- compute(nn_model, test.wp_1)
str(prediction1)
# UDF: Convert the scaled values to original
UDF_2 <- function(prediction) {
prediction1$net.result * (max(train_o$log_price)-min(train_o$log_price)) + min(train_o$log_price)
}
Prediction <- prediction1$net.result * (max(train_o$log_price)-min(train_o$log_price)) + min(train_o$log_price)
Prediction <- exp(Prediction)
Actual <- test.wp$log_price * (max(train_o$log_price)-min(train_o$log_price)) + min(train_o$log_price)
Actual <- exp(Actual)
table(Actual, Prediction)
submit.df <- data.frame(Real = Actual, PredictPrice = Prediction)
write.csv(submit.df, file = "Prediction_neural_spatial.csv", row.names = FALSE)
# RMSE results
rmse(test32$log_price, prediction43) # [1] 0.1675403159
rmse(test32$log_price, prediction44) # [1] 0.2296230449
rmse(test32$log_price, prediction45) # [1] 0.1580566661
rmse(test32$log_price, prediction46) # [1] 0.1673148988
rmse(test32$log_price, prediction47) # [1] 0.1437361712
rmse(test32$log_price, prediction48) # [1] 0.1402385366
rmse(test32$log_price, prediction49) # [1] 0.1388763678
rmse(test32$log_price, prediction50) # [1] 0.144111393
rmse(test32$log_price, prediction51) # [1] 0.1384158531
# rmse(test32$log_price, Prediction22) # [1]
# Predictions
prediction_model41 <- exp(prediction43)
prediction_model42 <- exp(prediction44)
prediction_model43 <- exp(prediction45)
prediction_model44 <- exp(prediction46)
prediction_model45 <- exp(prediction47)
prediction_model46 <- exp(prediction48)
prediction_model47 <- exp(prediction49)
prediction_model48 <- exp(prediction50)
prediction_model49 <- exp(prediction51)
# Prediction22 <- exp(Prediction22)
output <- (cbind("ID"=validation3$longitude,"Orginal Price"=exp(validation3$log_price),"Model1"=prediction_model41,
"Model2"=prediction_model42, "Model3"=prediction_model43, "Model4"=prediction_model44, "Model5"=prediction_model45,
"Model6"=prediction_model46, "Model7"=prediction_model47, "Model8"=prediction_model48,
"Model9"=prediction_model49))
write.csv(output, file = "models_price_v_predicted_logp_spatial_reduced.csv", row.names=FALSE)
plot(prediction_model46, exp(validation3$log_price), main = "Predicted vs. Asking Price - Reduced Dataset - XGB Model", xlab = "Predicted Price", ylab = "Asking Price")
abline(0, 1, col = "green", lwd = 2)
abline(0, 0.95, col = "blue", lwd = 2, lty = 2)
abline(0, 1.05, col = "blue", lwd = 2, lty = 2)
abline(0, 0.9, col = "red", lwd = 2, lty = 2)
abline(0, 1.1, col = "red", lwd = 2, lty = 2)
legend(100000, 1200000, legend = c("Predicted = Asking", "5% prediction error", "10% prediction error"), col = c("green", "blue", "red"), lty = c(1,2,2), lwd = c(2,2,2))
plot(prediction_model16, exp(validation$log_price), main = "Predicted vs. Asking Price - XGB Spatial Model", xlab = "Predicted Price", ylab = "Asking Price")
abline(0, 1, col = "green", lwd = 2)
abline(0, 0.95, col = "blue", lwd = 2, lty = 2)
abline(0, 1.05, col = "blue", lwd = 2, lty = 2)
abline(0, 0.9, col = "red", lwd = 2, lty = 2)
abline(0, 1.1, col = "red", lwd = 2, lty = 2)
legend(100000, 7000000, legend = c("Predicted = Asking", "5% prediction error", "10% prediction error"), col = c("green", "blue", "red"), lty = c(1,2,2), lwd = c(2,2,2))
# plot(exp(datam$log_price), datam$area, main = "Log Predicted vs. Log Asking Price", xlab = "Log Predicted Price", ylab = "Log Asking Price")
# # plot(rfpredict, datay$log_price, main = "Log Predicted vs. Asking Price")
# abline(0,1, col = "red", lwd = 3)
#
#
# datay <- subset(datam, select = c(log_price, area))
# datay$price <- exp(datay$log_price)
# graph5 <- ggplot(datay, aes(x = price, y = area)) + geom_point() +
# ggtitle("Price vs Area") +
# ylab("Area") +
# xlab("Price")
# graph5
#
# datay <- datay[datay$price < 2000000,]
# graph6 <- ggplot(datay, aes(x = price, y = area)) + geom_point() +
# ggtitle("Price (Less than ???2m) vs Area") +
# ylab("Area") +
# xlab("Price")
# graph6
#
# grid1 <- grid.arrange(graph5, graph6, ncol = 2)
|
function gamma_nk = calc_gamma_E_step(N_nk, w_k)
%CALC_GAMMA Calculate gamma in E step while scaling both the denominator
% and numerator by the largest quantity in N.
% Input:
% N_nk - logrithm of Gaussian evaluations (N by K)
% w_k - prior probabilities (1 by K)
% Output:
% gamma_nk - soft membership in E-step (N by K)
K1 = length(w_k);
N = size(N_nk, 1);
max_N_nk = max(N_nk, [], 2);
N_nk1 = exp(N_nk - repmat(max_N_nk, [1,K1])); % avoid too large negative
gamma_nk = (ones(N,1)*w_k) .* N_nk1;
gamma_nk = gamma_nk ./ repmat(sum(gamma_nk,2), 1, K1);
end
|
import topology.nhds_set
variables {α : Type*} [topological_space α] {s t s₁ s₂ t₁ t₂ : set α} {x : α}
open filter
open_locale filter topology
lemma is_open.nhds_set_eq_principal {s : set α} (h : is_open s) : 𝓝ˢ s = 𝓟 s :=
begin
apply le_antisymm _ principal_le_nhds_set,
rw [filter.le_principal_iff, h.mem_nhds_set]
end
lemma is_open.forall_near_mem_of_subset {s t : set α} (h : is_open s) (ht : t ⊆ s) : ∀ᶠ x in 𝓝ˢ t, x ∈ s :=
begin
apply eventually.filter_mono (nhds_set_mono ht),
rw [h.nhds_set_eq_principal, eventually_principal],
exact λ x, id
end
/-
In the next lemma, the inequality cannot be improved to an equality. For instance,
if α has two elements and the coarse topology and s and t are distinct singletons then
𝓝ˢ (s ∩ t) = ⊥ while 𝓝ˢ s ⊓ 𝓝ˢ t = ⊤ and those are different.
-/
lemma nhds_set_inter_le (s t : set α) : 𝓝ˢ (s ∩ t) ≤ 𝓝ˢ s ⊓ 𝓝ˢ t :=
(@monotone_nhds_set α _).map_inf_le s t
lemma sup_Sup {α : Type*} [complete_lattice α] {s : set α} {a : α} : a ⊔ (Sup s) = Sup (s ∪ {a}) :=
by simp only [set.union_singleton, Sup_insert]
lemma Sup_sup {α : Type*} [complete_lattice α] {s : set α} {a : α} : (Sup s) ⊔ a = Sup (s ∪ {a}) :=
by simp only [sup_Sup, sup_comm]
lemma is_closed.nhds_set_le_sup {t : set α} (h : is_closed t) (s : set α) :
𝓝ˢ s ≤ 𝓝ˢ (s ∩ t) ⊔ 𝓟 tᶜ :=
calc 𝓝ˢ s = 𝓝ˢ ((s ∩ t) ∪ (s ∩ tᶜ)) : by rw set.inter_union_compl s t
... = 𝓝ˢ (s ∩ t) ⊔ 𝓝ˢ (s ∩ tᶜ) : by rw nhds_set_union
... ≤ 𝓝ˢ (s ∩ t) ⊔ 𝓝ˢ tᶜ : sup_le_sup_left (monotone_nhds_set (s.inter_subset_right tᶜ)) _
... = 𝓝ˢ (s ∩ t) ⊔ 𝓟 tᶜ : by rw (is_open_compl_iff.mpr h).nhds_set_eq_principal
lemma is_closed.nhds_set_le_sup' {t : set α} (h : is_closed t) (s : set α) :
𝓝ˢ s ≤ 𝓝ˢ (t ∩ s) ⊔ 𝓟 tᶜ :=
by {rw set.inter_comm, exact h.nhds_set_le_sup s }
lemma eventually_nhds_set_iff {p : α → Prop} : (∀ᶠ x in 𝓝ˢ s, p x) ↔ (∀ x ∈ s, ∀ᶠ y in 𝓝 x, p y) :=
by rw [nhds_set, eventually_Sup, set.ball_image_iff]
lemma filter.eventually.eventually_nhds_set {p : α → Prop} (h : ∀ᶠ y in 𝓝ˢ s, p y) :
∀ᶠ y in 𝓝ˢ s, ∀ᶠ x in 𝓝 y, p x :=
eventually_nhds_set_iff.mpr (λ x x_in, (eventually_nhds_set_iff.mp h x x_in).eventually_nhds)
lemma filter.eventually.on_set {p : α → Prop} (h : ∀ᶠ y in 𝓝ˢ s, p y) : ∀ x ∈ s, p x :=
eventually_principal.mp $ eventually.filter_mono principal_le_nhds_set h
lemma filter.eventually_nhds_set_union {p : α → Prop} :
(∀ᶠ x in 𝓝ˢ (s ∪ t), p x) ↔ (∀ᶠ x in 𝓝ˢ s, p x) ∧ ∀ᶠ x in 𝓝ˢ t, p x :=
begin
rw [nhds_set_union, eventually_sup]
end
lemma filter.eventually.union {p : α → Prop} (hs : ∀ᶠ x in 𝓝ˢ s, p x) (ht : ∀ᶠ x in 𝓝ˢ t, p x) :
∀ᶠ x in 𝓝ˢ (s ∪ t), p x :=
filter.eventually_nhds_set_union.mpr ⟨hs, ht⟩
lemma eventually_nhds_set_Union₂ {α ι : Type*} [topological_space α] {p : ι → Prop} {s : ι → set α}
{P : α → Prop} : (∀ᶠ x in 𝓝ˢ (⋃ i (hi : p i), s i), P x) ↔ ∀ i, p i → ∀ᶠ x in 𝓝ˢ (s i), P x :=
begin
simp_rw [eventually_nhds_set_iff, set.mem_Union₂],
split,
exact λ h i hi x hx, h x ⟨i,hi, hx⟩,
rintros h x ⟨i, hi, hx⟩,
exact h i hi x hx
end
lemma eventually_nhds_set_Union {α ι : Type*} [topological_space α] {s : ι → set α}
{P : α → Prop} : (∀ᶠ x in 𝓝ˢ (⋃ i, s i), P x) ↔ ∀ i, ∀ᶠ x in 𝓝ˢ (s i), P x :=
by simpa using @eventually_nhds_set_Union₂ _ _ _ (λ i, true) s P
-- This lemma goes to filter.basic, after filter.eventually_principal
lemma filter.eventually.forall_mem {α : Type*} {f : filter α} {s : set α} {P : α → Prop}
(hP : ∀ᶠ x in f, P x) (hf : 𝓟 s ≤ f) : ∀ x ∈ s, P x :=
filter.eventually_principal.mp (hP.filter_mono hf)
lemma filter.eventually.nhds_set_forall_mem {α : Type*} [topological_space α]
{s : set α} {P : α → Prop}
(hP : ∀ᶠ x in nhds_set s, P x) : ∀ x ∈ s, P x :=
hP.forall_mem principal_le_nhds_set
lemma subset_of_mem_nhds_set {α : Type*} [topological_space α] {s t : set α} (h : t ∈ 𝓝ˢ s) : s ⊆ t :=
λ x hx, mem_of_mem_nhds $ mem_nhds_set_iff_forall.mp h x hx
|
#define BOOST_TEST_MODULE shperical harmonic coefficients
#define BOOST_TEST_DYN_LINK
#include <cmath>
#include <random>
#include <vector>
#include <chrono>
#include <boost/test/unit_test.hpp>
#include <blackpearl/core/sph_hrm_coeffs.hpp>
template<typename real_scalar_type>
void test_shp_hrm_coeffs(){
using namespace blackpearl::core;
size_t l_max(6143);
size_t m_max(l_max);
size_t num_fields(6);
sph_hrm_coeffs<real_scalar_type> sh(l_max,m_max,num_fields);
}
BOOST_AUTO_TEST_CASE(shp_hrm_coeffs){
test_shp_hrm_coeffs<float>();
test_shp_hrm_coeffs<double>();
}
|
function f=ftorque(tt)
t1max=34;t2max=12;
t1=tt(1,:);t2=tt(2,:);
f=0;
for i=1:size(tt,2)
if abs(t1(i)) < t1max
tc1=0;
else
tc1=abs(t1(i))-t1max;
end
if abs(t2(i)) < t2max
tc2=0;
else
tc2=abs(t2(i))-t2max;
end
f=f+tc1+tc2;
end
|
import torch #only used in cast_numpy
import numpy as np
from radbm.search.base import BaseSDS
from radbm.utils.stats.generators import greatest_k_multi_bernoulli_outcomes_generator
def cast_numpy(data):
if isinstance(data, torch.Tensor):
return data.detach().cpu().numpy()
return data
class HashingMultiBernoulliSDS(BaseSDS):
"""
This algorithm build N hash tables from documents in the form
of Multi-Bernoulli distributions. From a documents (i.e. a
distribution) we find the i-th most probable outcome (which is
a binary vector) and use it as a key for the corresponding
i-th hash table.
For retrieval, a query (i.e. a Multi-Bernoulli distribution)
we use the M most probable outcomes (which are binary vectors)
to do M lookups in the hash tables. In this context, a lookups
means looking in each tables. In total NxM hash table calls
will be made.
Parameters
----------
ntables : int
The number of tables to build
nlookups : int, optional (default=1)
The number of lookups to do when searching
Notes
-----
The Multi-Bernoulli distribution are always parametrized with the
vector of log probabilities for the bits to be one.
"""
def __init__(self, ntables, nlookups=1):
self.ntables = ntables
self.nlookups = nlookups
self.reset()
def reset(self):
"""
Empty the hash tables
Returns
-------
self
"""
self.tables = [dict() for _ in range(self.ntables)]
return self
def get_buckets_avg_size(self):
"""
Returns
-------
buckets_avg_size : list of float
The average number of documents per buckets for each
hash tables
"""
return [np.mean(list(map(len, t.values()))) for t in self.tables]
def get_buckets_max_size(self):
"""
Returns
-------
buckets_max_size : list of float
The maximum number of documents per buckets for each
hash tables
"""
return [np.max(list(map(len, t.values()))) if t else 'nan' for t in self.tables]
def __repr__(self):
r = 'Search: {}\nTables size: {}\nBuckets avg size: {}\nBuckets max size: {}'
return r.format(
self.__class__.__name__,
', '.join(map(str, map(len, self.tables))),
', '.join(['{:.2f}'.format(s) for s in self.get_buckets_avg_size()]),
', '.join(map(str, self.get_buckets_max_size())),
)
def _get_generator(self, log_probs, k):
"""
Parameters
----------
log_probs : numpy.ndarray (ndim == 1 or 2)
If ndim==1: The Multi-Bernoulli distribution parametrized in log probabilities
If ndim==2: len(log_probs) should be 2. The first element should be log
probabilities that bits are zero and the second element should be the log
probabilities that bits are one. This is for numerical stability
(i.e. when probability are too close to 1)
k : int
The number of outcomes to generates
"""
log_probs = cast_numpy(log_probs)
if log_probs.ndim==1:
return greatest_k_multi_bernoulli_outcomes_generator(log_probs, k=k)
elif log_probs.ndim==2:
log_probs0, log_probs1 = log_probs
return greatest_k_multi_bernoulli_outcomes_generator(log_probs0, log_probs1, k=k)
else:
msg = 'log_probs.ndim should be 1 or 2, got {}'
raise ValueError(msg.format(log_probs.ndim))
def insert(self, log_probs, i):
"""
Insert a unique document's index in the each tables. The document
most be a Multi-Bernoulli distribution parametrized in log
probabilities.
Parameters
----------
log_probs : numpy.ndarray (ndim == 1 or 2)
If ndim==1: The Multi-Bernoulli distribution parametrized in log probabilities
If ndim==2: len(log_probs) should be 2. The first element should be log
probabilities that bits are zero and the second element should be the log
probabilities that bits are one. This is for numerical stability
(i.e. when probability are too close to 1)
i : hashable (e.g. int or tuple)
The index of the document
"""
gen = self._get_generator(log_probs, self.ntables)
for n, bits in enumerate(gen):
table = self.tables[n]
if bits in table: table[bits].add(i)
else: table[bits] = {i}
def search(self, log_probs, nlookups=None):
"""
Search in the tables with a query in the form of a Multi-Bernoulli
distribution parametrized in log probabilities. This will search in
each tables with each of the top (nlookups) outcomes.
Parameters
----------
log_probs : numpy.ndarray (ndim == 1 or 2)
If ndim==1: The Multi-Bernoulli distribution parametrized in log probabilities
If ndim==2: len(log_probs) should be 2. The first element should be log
probabilities that bits are zero and the second element should be the log
probabilities that bits are one. This is for numerical stability
(i.e. when probability are too close to 1)
nlookups : int, optional
The number of top outcome to uses for searching. If not specified
the default self.nlookups is used.
Returns
-------
indexes : set
The indexes of each documents found in the search
"""
indexes = set()
nlookups = self.nlookups if nlookups is None else nlookups
#loop for nlookups*self.ntables minus the number of empty lookups
for new_indexes in self.itersearch(log_probs, nlookups):
indexes.update(new_indexes)
return indexes
def itersearch(self, log_probs, nlookups=None, yield_empty=False):
"""
Generator that search in the tables with a query in the form of a
Multi-Bernoulli distribution parametrized in log probabilities.
This will search in each tables with each of the top (nlookups)
outcomes. Everytime a set of indexes is found, this generator will
yield it. If yield_empty is True, empty set will also be yield.
Parameters
----------
log_probs : numpy.ndarray (ndim == 1 or 2)
If ndim==1: The Multi-Bernoulli distribution parametrized in log probabilities
If ndim==2: len(log_probs) should be 2. The first element should be log
probabilities that bits are zero and the second element should be the log
probabilities that bits are one. This is for numerical stability
(i.e. when probability are too close to 1)
nlookups : int, optional
The upper limit to generate the next most probable outcomes. Not to
be confused with the number of item generated. By default, generates
every outcomes.
Yields
------
indexes : set
The newly found indexes
"""
yielded = set()
for bits in self._get_generator(log_probs, nlookups):
for table in self.tables:
if bits in table:
indexes = table[bits]
new_indexes = indexes - yielded
yielded.update(new_indexes)
if yield_empty or new_indexes:
yield new_indexes
elif yield_empty:
yield set()
def get_state(self):
"""
Returns
-------
tables : list of dict
The current hash tables
"""
return self.tables
def set_state(self, state):
"""
Parameters
-------
state : list of dict
The hash tables to use
"""
self.tables = state
return self
|
lemma LIMSEQ_D: "X \<longlonglongrightarrow> L \<Longrightarrow> 0 < r \<Longrightarrow> \<exists>no. \<forall>n\<ge>no. norm (X n - L) < r" for L :: "'a::real_normed_vector"
|
const default_values = (2^-6, 1, nothing, ("Standard", "Convergence"), 10e-6, 100)
struct PositionalArguments
F::Function
tSpan::Vector{<:Real}
y0::Union{Real, Vector{<:Real}, Matrix{<:Real}}
β::Union{Real, Vector{<:Real}}
function PositionalArguments(F, tSpan, y0, β)
# catch exceptions for tSpan
if length(tSpan) != 2
error("tSpan should contain exactly 2 values: one for the initial time and the other for the final time.")
end
if tSpan[2] <= tSpan[1]
error("The final time should be greater than the initial time.")
end
# catch exceptions for y0
if typeof(y0) <: Real
if length(β) != 1
error("Either β has too many values or y0 has too few.")
end
elseif typeof(y0) <: Vector{<:Real}
if (size(y0) != size(β) && length(β) != 1)
error("The number of elements in y0 should match that in β.")
elseif (size(y0) != size(β) && length(β) == 1)
error("Initial values of higher-order derivatives should not be given as a vector (y0 = [y0', y0'', y0''', ...]). Instead, they should be given as a matrix (y0 = [y0' y0'' y0''' ...]).")
if length(y0) != Int64(ceil(β))
error("The number of elements in y0 should equal the next integer of β. For instance, if β = 1.2, its next integer is 2 and y0 should have 2 elements for the initial values.")
end
end
else # typeof(y0) <: Matrix{<:Real}
if length(β) == 1
if size(y0, 2) != Int64(ceil(β))
error("The number of elements in y0 should equal the next integer of β. For instance, if β = 1.2, its next integer is 2 and y0 should have 2 elements for the initial values.")
end
else # length(β) != 1
if size(y0, 2) != Int64(ceil(maximum(β)))
error("The number of columns in y0 should equal the next integer of β. For instance, if β = 1.2, its next integer is 2 and y0 should have 2 columns of initial values.")
end
if size(y0, 2) != length(β)
error("The number of rows in y0 should match the length of β, that is, the number of equations to solve.")
end
end
end
# catch exceptions for β
if typeof(β) <: Real
if β <= 0
error("β must be a positive integer or float.")
end
else # typeof(β) == Vector{<:Real}
if !isempty(β[β .<= 0])
error("β must be positive integers or floats.")
end
end
if !(typeof(y0) <: Matrix{<:Real})
Y0 = zeros(size(y0, 1), Int64(ceil(maximum(β))))
Y0 .= y0
else
Y0 = y0
end
new(F, tSpan, Y0, β)
end
end
struct OptionalArguments
h::Float64
nc::Int64
StopIt::String
tol::Float64
itmax::Int64
function OptionalArguments(h, nc, StopIt, tol, itmax)
# catch exceptions for StopIt
if !(StopIt == "Standard" || StopIt == "Convergence")
error("StopIt can take on either 'Standard' or 'Convergence'.")
end
# catch exceptions for h
if h <= 0
error("The step size h must be positive.")
end
# give warnings for tol, itmax and nc
if StopIt == default_values[4][1]
if (tol != default_values[5] || itmax != default_values[6])
@warn "The tolerance tol and the maximum number of iterations itmax are relevant only if StopIt is set at 'Convergence'."
end
else # StopIt == default_values[4][2]
if nc != default_values[2]
@warn "The number of corrections nc is relevant only if StopIt is set at 'Standard'."
end
end
new(h, nc, StopIt, tol, itmax)
end
end
## some structures for standard case
struct Problem
ic
f_fun
problem_size::Int64
param
β::Union{Real, Vector{<:Real}, Matrix{<:Real}}
β_length::Int64
function Problem(ic, f_fun, problem_size, param, β, β_length)
if (β_length > 1 && problem_size != β_length)
error("The size of the problem obtained from the initial conditions (the number of rows of y0) is not compatible with the number of fractional orders.")
end
new(ic, f_fun, problem_size, param, β, β_length)
end
end
struct Method
bn::Matrix{Float64}
an::Matrix{Float64}
a0::Matrix{Float64}
hα1 # we should think about its type
hα2 # we should think about its type
μ::Int64
μTol::Float64
r::Int64
StopIt::String
itmax::Int64
end
struct Method_fft
bn_fft::Matrix{ComplexF64}
an_fft::Matrix{ComplexF64}
index_fft::Matrix{Int64}
end
struct initial_conditions
t0::Float64
y0::Any
m_β # we should think about its type
m_β_factorial::Matrix{Int64}
function initial_conditions(t0, y0, m_β, m_β_factorial)
if size(y0, 2) < maximum(m_β)
error("There are not enough initial conditions to solve a system of order β.")
end
new(t0, y0, m_β, m_β_factorial)
end
end
## some structures for Jacobian case
struct JProblem
ic
f_fun
problem_size::Int64
param
β::Union{Real, Vector{<:Real}, Matrix{<:Real}} # matrix for a system with orders greater than 1
β_length::Int64
JF
function JProblem(ic, f_fun, problem_size, param, β, β_length, JF)
if problem_size != β_length
error("The size of the problem obtained from the initial conditions (the number of rows of y0) is not compatible with the number of fractional orders.")
end
new(ic, f_fun, problem_size, param, β, β_length, JF)
end
end
struct JMethod
an::Matrix{Float64}
a0::Matrix{Float64}
hα1 # we should think about its type
hα2 # we should think about its type
μ::Int64
μTol::Float64
r::Int64
StopIt::String
itmax::Int64
end
struct JMethod_fft
an_fft::Matrix{ComplexF64}
index_fft::Matrix{Int64}
end
|
(* Title: HOL/Auth/Guard/P2.thy
Author: Frederic Blanqui, University of Cambridge Computer Laboratory
Copyright 2002 University of Cambridge
From G. Karjoth, N. Asokan and C. Gulcu
"Protecting the computation results of free-roaming agents"
Mobiles Agents 1998, LNCS 1477.
*)
section\<open>Protocol P2\<close>
theory P2 imports Guard_Public List_Msg begin
subsection\<open>Protocol Definition\<close>
text\<open>Like P1 except the definitions of \<open>chain\<close>, \<open>shop\<close>,
\<open>next_shop\<close> and \<open>nonce\<close>\<close>
subsubsection\<open>offer chaining:
B chains his offer for A with the head offer of L for sending it to C\<close>
definition chain :: "agent => nat => agent => msg => agent => msg" where
"chain B ofr A L C ==
let m1= sign B (Nonce ofr) in
let m2= Hash \<lbrace>head L, Agent C\<rbrace> in
\<lbrace>Crypt (pubK A) m1, m2\<rbrace>"
declare Let_def [simp]
lemma chain_inj [iff]: "(chain B ofr A L C = chain B' ofr' A' L' C')
= (B=B' & ofr=ofr' & A=A' & head L = head L' & C=C')"
by (auto simp: chain_def Let_def)
lemma Nonce_in_chain [iff]: "Nonce ofr:parts {chain B ofr A L C}"
by (auto simp: chain_def sign_def)
subsubsection\<open>agent whose key is used to sign an offer\<close>
fun shop :: "msg => msg" where
"shop \<lbrace>Crypt K \<lbrace>B,ofr,Crypt K' H\<rbrace>,m2\<rbrace> = Agent (agt K')"
lemma shop_chain [simp]: "shop (chain B ofr A L C) = Agent B"
by (simp add: chain_def sign_def)
subsubsection\<open>nonce used in an offer\<close>
fun nonce :: "msg => msg" where
"nonce \<lbrace>Crypt K \<lbrace>B,ofr,CryptH\<rbrace>,m2\<rbrace> = ofr"
lemma nonce_chain [simp]: "nonce (chain B ofr A L C) = Nonce ofr"
by (simp add: chain_def sign_def)
subsubsection\<open>next shop\<close>
fun next_shop :: "msg => agent" where
"next_shop \<lbrace>m1,Hash \<lbrace>headL,Agent C\<rbrace>\<rbrace> = C"
lemma "next_shop (chain B ofr A L C) = C"
by (simp add: chain_def sign_def)
subsubsection\<open>anchor of the offer list\<close>
definition anchor :: "agent => nat => agent => msg" where
"anchor A n B == chain A n A (cons nil nil) B"
lemma anchor_inj [iff]:
"(anchor A n B = anchor A' n' B') = (A=A' & n=n' & B=B')"
by (auto simp: anchor_def)
lemma Nonce_in_anchor [iff]: "Nonce n:parts {anchor A n B}"
by (auto simp: anchor_def)
lemma shop_anchor [simp]: "shop (anchor A n B) = Agent A"
by (simp add: anchor_def)
subsubsection\<open>request event\<close>
definition reqm :: "agent => nat => nat => msg => agent => msg" where
"reqm A r n I B == \<lbrace>Agent A, Number r, cons (Agent A) (cons (Agent B) I),
cons (anchor A n B) nil\<rbrace>"
lemma reqm_inj [iff]: "(reqm A r n I B = reqm A' r' n' I' B')
= (A=A' & r=r' & n=n' & I=I' & B=B')"
by (auto simp: reqm_def)
lemma Nonce_in_reqm [iff]: "Nonce n:parts {reqm A r n I B}"
by (auto simp: reqm_def)
definition req :: "agent => nat => nat => msg => agent => event" where
"req A r n I B == Says A B (reqm A r n I B)"
lemma req_inj [iff]: "(req A r n I B = req A' r' n' I' B')
= (A=A' & r=r' & n=n' & I=I' & B=B')"
by (auto simp: req_def)
subsubsection\<open>propose event\<close>
definition prom :: "agent => nat => agent => nat => msg => msg =>
msg => agent => msg" where
"prom B ofr A r I L J C == \<lbrace>Agent A, Number r,
app (J, del (Agent B, I)), cons (chain B ofr A L C) L\<rbrace>"
lemma prom_inj [dest]: "prom B ofr A r I L J C = prom B' ofr' A' r' I' L' J' C'
==> B=B' & ofr=ofr' & A=A' & r=r' & L=L' & C=C'"
by (auto simp: prom_def)
lemma Nonce_in_prom [iff]: "Nonce ofr:parts {prom B ofr A r I L J C}"
by (auto simp: prom_def)
definition pro :: "agent => nat => agent => nat => msg => msg =>
msg => agent => event" where
"pro B ofr A r I L J C == Says B C (prom B ofr A r I L J C)"
lemma pro_inj [dest]: "pro B ofr A r I L J C = pro B' ofr' A' r' I' L' J' C'
==> B=B' & ofr=ofr' & A=A' & r=r' & L=L' & C=C'"
by (auto simp: pro_def dest: prom_inj)
subsubsection\<open>protocol\<close>
inductive_set p2 :: "event list set"
where
Nil: "[]:p2"
| Fake: "[| evsf:p2; X:synth (analz (spies evsf)) |] ==> Says Spy B X # evsf : p2"
| Request: "[| evsr:p2; Nonce n ~:used evsr; I:agl |] ==> req A r n I B # evsr : p2"
| Propose: "[| evsp:p2; Says A' B \<lbrace>Agent A,Number r,I,cons M L\<rbrace>:set evsp;
I:agl; J:agl; isin (Agent C, app (J, del (Agent B, I)));
Nonce ofr ~:used evsp |] ==> pro B ofr A r I (cons M L) J C # evsp : p2"
subsubsection\<open>valid offer lists\<close>
inductive_set
valid :: "agent => nat => agent => msg set"
for A :: agent and n :: nat and B :: agent
where
Request [intro]: "cons (anchor A n B) nil:valid A n B"
| Propose [intro]: "L:valid A n B
==> cons (chain (next_shop (head L)) ofr A L C) L:valid A n B"
subsubsection\<open>basic properties of valid\<close>
lemma valid_not_empty: "L:valid A n B ==> EX M L'. L = cons M L'"
by (erule valid.cases, auto)
lemma valid_pos_len: "L:valid A n B ==> 0 < len L"
by (erule valid.induct, auto)
subsubsection\<open>list of offers\<close>
fun offers :: "msg => msg"
where
"offers (cons M L) = cons \<lbrace>shop M, nonce M\<rbrace> (offers L)"
| "offers other = nil"
subsection\<open>Properties of Protocol P2\<close>
text\<open>same as \<open>P1_Prop\<close> except that publicly verifiable forward
integrity is replaced by forward privacy\<close>
subsection\<open>strong forward integrity:
except the last one, no offer can be modified\<close>
lemma strong_forward_integrity: "ALL L. Suc i < len L
--> L:valid A n B --> repl (L,Suc i,M):valid A n B --> M = ith (L,Suc i)"
apply (induct i)
(* i = 0 *)
apply clarify
apply (frule len_not_empty, clarsimp)
apply (frule len_not_empty, clarsimp)
apply (ind_cases "\<lbrace>x,xa,l'a\<rbrace>:valid A n B" for x xa l'a)
apply (ind_cases "\<lbrace>x,M,l'a\<rbrace>:valid A n B" for x l'a)
apply (simp add: chain_def)
(* i > 0 *)
apply clarify
apply (frule len_not_empty, clarsimp)
apply (ind_cases "\<lbrace>x,repl(l',Suc na,M)\<rbrace>:valid A n B" for x l' na)
apply (frule len_not_empty, clarsimp)
apply (ind_cases "\<lbrace>x,l'\<rbrace>:valid A n B" for x l')
by (drule_tac x=l' in spec, simp, blast)
subsection\<open>insertion resilience:
except at the beginning, no offer can be inserted\<close>
lemma chain_isnt_head [simp]: "L:valid A n B ==>
head L ~= chain (next_shop (head L)) ofr A L C"
by (erule valid.induct, auto simp: chain_def sign_def anchor_def)
lemma insertion_resilience: "ALL L. L:valid A n B --> Suc i < len L
--> ins (L,Suc i,M) ~:valid A n B"
apply (induct i)
(* i = 0 *)
apply clarify
apply (frule len_not_empty, clarsimp)
apply (ind_cases "\<lbrace>x,l'\<rbrace>:valid A n B" for x l', simp)
apply (ind_cases "\<lbrace>x,M,l'\<rbrace>:valid A n B" for x l', clarsimp)
apply (ind_cases "\<lbrace>head l',l'\<rbrace>:valid A n B" for l', simp, simp)
(* i > 0 *)
apply clarify
apply (frule len_not_empty, clarsimp)
apply (ind_cases "\<lbrace>x,l'\<rbrace>:valid A n B" for x l')
apply (frule len_not_empty, clarsimp)
apply (ind_cases "\<lbrace>x,ins(l',Suc na,M)\<rbrace>:valid A n B" for x l' na)
apply (frule len_not_empty, clarsimp)
by (drule_tac x=l' in spec, clarsimp)
subsection\<open>truncation resilience:
only shop i can truncate at offer i\<close>
lemma truncation_resilience: "ALL L. L:valid A n B --> Suc i < len L
--> cons M (trunc (L,Suc i)):valid A n B --> shop M = shop (ith (L,i))"
apply (induct i)
(* i = 0 *)
apply clarify
apply (frule len_not_empty, clarsimp)
apply (ind_cases "\<lbrace>x,l'\<rbrace>:valid A n B" for x l')
apply (frule len_not_empty, clarsimp)
apply (ind_cases "\<lbrace>M,l'\<rbrace>:valid A n B" for l')
apply (frule len_not_empty, clarsimp, simp)
(* i > 0 *)
apply clarify
apply (frule len_not_empty, clarsimp)
apply (ind_cases "\<lbrace>x,l'\<rbrace>:valid A n B" for x l')
apply (frule len_not_empty, clarsimp)
by (drule_tac x=l' in spec, clarsimp)
subsection\<open>declarations for tactics\<close>
declare knows_Spy_partsEs [elim]
declare Fake_parts_insert [THEN subsetD, dest]
declare initState.simps [simp del]
subsection\<open>get components of a message\<close>
lemma get_ML [dest]: "Says A' B \<lbrace>A,R,I,M,L\<rbrace>:set evs ==>
M:parts (spies evs) & L:parts (spies evs)"
by blast
subsection\<open>general properties of p2\<close>
lemma reqm_neq_prom [iff]:
"reqm A r n I B ~= prom B' ofr A' r' I' (cons M L) J C"
by (auto simp: reqm_def prom_def)
lemma prom_neq_reqm [iff]:
"prom B' ofr A' r' I' (cons M L) J C ~= reqm A r n I B"
by (auto simp: reqm_def prom_def)
lemma req_neq_pro [iff]: "req A r n I B ~= pro B' ofr A' r' I' (cons M L) J C"
by (auto simp: req_def pro_def)
lemma pro_neq_req [iff]: "pro B' ofr A' r' I' (cons M L) J C ~= req A r n I B"
by (auto simp: req_def pro_def)
lemma p2_has_no_Gets: "evs:p2 ==> ALL A X. Gets A X ~:set evs"
by (erule p2.induct, auto simp: req_def pro_def)
lemma p2_is_Gets_correct [iff]: "Gets_correct p2"
by (auto simp: Gets_correct_def dest: p2_has_no_Gets)
lemma p2_is_one_step [iff]: "one_step p2"
by (unfold one_step_def, clarify, ind_cases "ev#evs:p2" for ev evs, auto)
lemma p2_has_only_Says' [rule_format]: "evs:p2 ==>
ev:set evs --> (EX A B X. ev=Says A B X)"
by (erule p2.induct, auto simp: req_def pro_def)
lemma p2_has_only_Says [iff]: "has_only_Says p2"
by (auto simp: has_only_Says_def dest: p2_has_only_Says')
lemma p2_is_regular [iff]: "regular p2"
apply (simp only: regular_def, clarify)
apply (erule_tac p2.induct)
apply (simp_all add: initState.simps knows.simps pro_def prom_def
req_def reqm_def anchor_def chain_def sign_def)
by (auto dest: no_Key_in_agl no_Key_in_appdel parts_trans)
subsection\<open>private keys are safe\<close>
lemma priK_parts_Friend_imp_bad [rule_format,dest]:
"[| evs:p2; Friend B ~= A |]
==> (Key (priK A):parts (knows (Friend B) evs)) --> (A:bad)"
apply (erule p2.induct)
apply (simp_all add: initState.simps knows.simps pro_def prom_def
req_def reqm_def anchor_def chain_def sign_def)
apply (blast dest: no_Key_in_agl)
apply (auto del: parts_invKey disjE dest: parts_trans
simp add: no_Key_in_appdel)
done
lemma priK_analz_Friend_imp_bad [rule_format,dest]:
"[| evs:p2; Friend B ~= A |]
==> (Key (priK A):analz (knows (Friend B) evs)) --> (A:bad)"
by auto
lemma priK_notin_knows_max_Friend:
"[| evs:p2; A ~:bad; A ~= Friend C |]
==> Key (priK A) ~:analz (knows_max (Friend C) evs)"
apply (rule not_parts_not_analz, simp add: knows_max_def, safe)
apply (drule_tac H="spies' evs" in parts_sub)
apply (rule_tac p=p2 in knows_max'_sub_spies', simp+)
apply (drule_tac H="spies evs" in parts_sub)
by (auto dest: knows'_sub_knows [THEN subsetD] priK_notin_initState_Friend)
subsection\<open>general guardedness properties\<close>
lemma agl_guard [intro]: "I:agl ==> I:guard n Ks"
by (erule agl.induct, auto)
lemma Says_to_knows_max'_guard: "[| Says A' C \<lbrace>A'',r,I,L\<rbrace>:set evs;
Guard n Ks (knows_max' C evs) |] ==> L:guard n Ks"
by (auto dest: Says_to_knows_max')
lemma Says_from_knows_max'_guard: "[| Says C A' \<lbrace>A'',r,I,L\<rbrace>:set evs;
Guard n Ks (knows_max' C evs) |] ==> L:guard n Ks"
by (auto dest: Says_from_knows_max')
lemma Says_Nonce_not_used_guard: "[| Says A' B \<lbrace>A'',r,I,L\<rbrace>:set evs;
Nonce n ~:used evs |] ==> L:guard n Ks"
by (drule not_used_not_parts, auto)
subsection\<open>guardedness of messages\<close>
lemma chain_guard [iff]: "chain B ofr A L C:guard n {priK A}"
by (case_tac "ofr=n", auto simp: chain_def sign_def)
lemma chain_guard_Nonce_neq [intro]: "n ~= ofr
==> chain B ofr A' L C:guard n {priK A}"
by (auto simp: chain_def sign_def)
lemma anchor_guard [iff]: "anchor A n' B:guard n {priK A}"
by (case_tac "n'=n", auto simp: anchor_def)
lemma anchor_guard_Nonce_neq [intro]: "n ~= n'
==> anchor A' n' B:guard n {priK A}"
by (auto simp: anchor_def)
lemma reqm_guard [intro]: "I:agl ==> reqm A r n' I B:guard n {priK A}"
by (case_tac "n'=n", auto simp: reqm_def)
lemma reqm_guard_Nonce_neq [intro]: "[| n ~= n'; I:agl |]
==> reqm A' r n' I B:guard n {priK A}"
by (auto simp: reqm_def)
lemma prom_guard [intro]: "[| I:agl; J:agl; L:guard n {priK A} |]
==> prom B ofr A r I L J C:guard n {priK A}"
by (auto simp: prom_def)
lemma prom_guard_Nonce_neq [intro]: "[| n ~= ofr; I:agl; J:agl;
L:guard n {priK A} |] ==> prom B ofr A' r I L J C:guard n {priK A}"
by (auto simp: prom_def)
subsection\<open>Nonce uniqueness\<close>
lemma uniq_Nonce_in_chain [dest]: "Nonce k:parts {chain B ofr A L C} ==> k=ofr"
by (auto simp: chain_def sign_def)
lemma uniq_Nonce_in_anchor [dest]: "Nonce k:parts {anchor A n B} ==> k=n"
by (auto simp: anchor_def chain_def sign_def)
lemma uniq_Nonce_in_reqm [dest]: "[| Nonce k:parts {reqm A r n I B};
I:agl |] ==> k=n"
by (auto simp: reqm_def dest: no_Nonce_in_agl)
lemma uniq_Nonce_in_prom [dest]: "[| Nonce k:parts {prom B ofr A r I L J C};
I:agl; J:agl; Nonce k ~:parts {L} |] ==> k=ofr"
by (auto simp: prom_def dest: no_Nonce_in_agl no_Nonce_in_appdel)
subsection\<open>requests are guarded\<close>
lemma req_imp_Guard [rule_format]: "[| evs:p2; A ~:bad |] ==>
req A r n I B:set evs --> Guard n {priK A} (spies evs)"
apply (erule p2.induct, simp)
apply (simp add: req_def knows.simps, safe)
apply (erule in_synth_Guard, erule Guard_analz, simp)
by (auto simp: req_def pro_def dest: Says_imp_knows_Spy)
lemma req_imp_Guard_Friend: "[| evs:p2; A ~:bad; req A r n I B:set evs |]
==> Guard n {priK A} (knows_max (Friend C) evs)"
apply (rule Guard_knows_max')
apply (rule_tac H="spies evs" in Guard_mono)
apply (rule req_imp_Guard, simp+)
apply (rule_tac B="spies' evs" in subset_trans)
apply (rule_tac p=p2 in knows_max'_sub_spies', simp+)
by (rule knows'_sub_knows)
subsection\<open>propositions are guarded\<close>
lemma pro_imp_Guard [rule_format]: "[| evs:p2; B ~:bad; A ~:bad |] ==>
pro B ofr A r I (cons M L) J C:set evs --> Guard ofr {priK A} (spies evs)"
apply (erule p2.induct) (* +3 subgoals *)
(* Nil *)
apply simp
(* Fake *)
apply (simp add: pro_def, safe) (* +4 subgoals *)
(* 1 *)
apply (erule in_synth_Guard, drule Guard_analz, simp, simp)
(* 2 *)
apply simp
(* 3 *)
apply (simp, simp add: req_def pro_def, blast)
(* 4 *)
apply (simp add: pro_def)
apply (blast dest: prom_inj Says_Nonce_not_used_guard Nonce_not_used_Guard)
(* 5 *)
apply simp
apply safe (* +1 subgoal *)
apply (simp add: pro_def)
apply (blast dest: prom_inj Says_Nonce_not_used_guard)
(* 6 *)
apply (simp add: pro_def)
apply (blast dest: Says_imp_knows_Spy)
(* Request *)
apply (simp add: pro_def)
apply (blast dest: prom_inj Says_Nonce_not_used_guard Nonce_not_used_Guard)
(* Propose *)
apply simp
apply safe (* +1 subgoal *)
(* 1 *)
apply (simp add: pro_def)
apply (blast dest: prom_inj Says_Nonce_not_used_guard)
(* 2 *)
apply (simp add: pro_def)
by (blast dest: Says_imp_knows_Spy)
lemma pro_imp_Guard_Friend: "[| evs:p2; B ~:bad; A ~:bad;
pro B ofr A r I (cons M L) J C:set evs |]
==> Guard ofr {priK A} (knows_max (Friend D) evs)"
apply (rule Guard_knows_max')
apply (rule_tac H="spies evs" in Guard_mono)
apply (rule pro_imp_Guard, simp+)
apply (rule_tac B="spies' evs" in subset_trans)
apply (rule_tac p=p2 in knows_max'_sub_spies', simp+)
by (rule knows'_sub_knows)
subsection\<open>data confidentiality:
no one other than the originator can decrypt the offers\<close>
lemma Nonce_req_notin_spies: "[| evs:p2; req A r n I B:set evs; A ~:bad |]
==> Nonce n ~:analz (spies evs)"
by (frule req_imp_Guard, simp+, erule Guard_Nonce_analz, simp+)
lemma Nonce_req_notin_knows_max_Friend: "[| evs:p2; req A r n I B:set evs;
A ~:bad; A ~= Friend C |] ==> Nonce n ~:analz (knows_max (Friend C) evs)"
apply (clarify, frule_tac C=C in req_imp_Guard_Friend, simp+)
apply (simp add: knows_max_def, drule Guard_invKey_keyset, simp+)
by (drule priK_notin_knows_max_Friend, auto simp: knows_max_def)
lemma Nonce_pro_notin_spies: "[| evs:p2; B ~:bad; A ~:bad;
pro B ofr A r I (cons M L) J C:set evs |] ==> Nonce ofr ~:analz (spies evs)"
by (frule pro_imp_Guard, simp+, erule Guard_Nonce_analz, simp+)
lemma Nonce_pro_notin_knows_max_Friend: "[| evs:p2; B ~:bad; A ~:bad;
A ~= Friend D; pro B ofr A r I (cons M L) J C:set evs |]
==> Nonce ofr ~:analz (knows_max (Friend D) evs)"
apply (clarify, frule_tac A=A in pro_imp_Guard_Friend, simp+)
apply (simp add: knows_max_def, drule Guard_invKey_keyset, simp+)
by (drule priK_notin_knows_max_Friend, auto simp: knows_max_def)
subsection\<open>forward privacy:
only the originator can know the identity of the shops\<close>
lemma forward_privacy_Spy: "[| evs:p2; B ~:bad; A ~:bad;
pro B ofr A r I (cons M L) J C:set evs |]
==> sign B (Nonce ofr) ~:analz (spies evs)"
by (auto simp:sign_def dest: Nonce_pro_notin_spies)
lemma forward_privacy_Friend: "[| evs:p2; B ~:bad; A ~:bad; A ~= Friend D;
pro B ofr A r I (cons M L) J C:set evs |]
==> sign B (Nonce ofr) ~:analz (knows_max (Friend D) evs)"
by (auto simp:sign_def dest:Nonce_pro_notin_knows_max_Friend )
subsection\<open>non repudiability: an offer signed by B has been sent by B\<close>
lemma Crypt_reqm: "[| Crypt (priK A) X:parts {reqm A' r n I B}; I:agl |] ==> A=A'"
by (auto simp: reqm_def anchor_def chain_def sign_def dest: no_Crypt_in_agl)
lemma Crypt_prom: "[| Crypt (priK A) X:parts {prom B ofr A' r I L J C};
I:agl; J:agl |] ==> A=B | Crypt (priK A) X:parts {L}"
apply (simp add: prom_def anchor_def chain_def sign_def)
by (blast dest: no_Crypt_in_agl no_Crypt_in_appdel)
lemma Crypt_safeness: "[| evs:p2; A ~:bad |] ==> Crypt (priK A) X:parts (spies evs)
--> (EX B Y. Says A B Y:set evs & Crypt (priK A) X:parts {Y})"
apply (erule p2.induct)
(* Nil *)
apply simp
(* Fake *)
apply clarsimp
apply (drule_tac P="%G. Crypt (priK A) X:G" in parts_insert_substD, simp)
apply (erule disjE)
apply (drule_tac K="priK A" in Crypt_synth, simp+, blast, blast)
(* Request *)
apply (simp add: req_def, clarify)
apply (drule_tac P="%G. Crypt (priK A) X:G" in parts_insert_substD, simp)
apply (erule disjE)
apply (frule Crypt_reqm, simp, clarify)
apply (rule_tac x=B in exI, rule_tac x="reqm A r n I B" in exI, simp, blast)
(* Propose *)
apply (simp add: pro_def, clarify)
apply (drule_tac P="%G. Crypt (priK A) X:G" in parts_insert_substD, simp)
apply (rotate_tac -1, erule disjE)
apply (frule Crypt_prom, simp, simp)
apply (rotate_tac -1, erule disjE)
apply (rule_tac x=C in exI)
apply (rule_tac x="prom B ofr Aa r I (cons M L) J C" in exI, blast)
apply (subgoal_tac "cons M L:parts (spies evsp)")
apply (drule_tac G="{cons M L}" and H="spies evsp" in parts_trans, blast, blast)
apply (drule Says_imp_spies, rotate_tac -1, drule parts.Inj)
apply (drule parts.Snd, drule parts.Snd, drule parts.Snd)
by auto
lemma Crypt_Hash_imp_sign: "[| evs:p2; A ~:bad |] ==>
Crypt (priK A) (Hash X):parts (spies evs)
--> (EX B Y. Says A B Y:set evs & sign A X:parts {Y})"
apply (erule p2.induct)
(* Nil *)
apply simp
(* Fake *)
apply clarsimp
apply (drule_tac P="%G. Crypt (priK A) (Hash X):G" in parts_insert_substD)
apply simp
apply (erule disjE)
apply (drule_tac K="priK A" in Crypt_synth, simp+, blast, blast)
(* Request *)
apply (simp add: req_def, clarify)
apply (drule_tac P="%G. Crypt (priK A) (Hash X):G" in parts_insert_substD)
apply simp
apply (erule disjE)
apply (frule Crypt_reqm, simp+)
apply (rule_tac x=B in exI, rule_tac x="reqm Aa r n I B" in exI)
apply (simp add: reqm_def sign_def anchor_def no_Crypt_in_agl)
apply (simp add: chain_def sign_def, blast)
(* Propose *)
apply (simp add: pro_def, clarify)
apply (drule_tac P="%G. Crypt (priK A) (Hash X):G" in parts_insert_substD)
apply simp
apply (rotate_tac -1, erule disjE)
apply (simp add: prom_def sign_def no_Crypt_in_agl no_Crypt_in_appdel)
apply (simp add: chain_def sign_def)
apply (rotate_tac -1, erule disjE)
apply (rule_tac x=C in exI)
apply (rule_tac x="prom B ofr Aa r I (cons M L) J C" in exI)
apply (simp add: prom_def chain_def sign_def)
apply (erule impE)
apply (blast dest: get_ML parts_sub)
apply (blast del: MPair_parts)+
done
lemma sign_safeness: "[| evs:p2; A ~:bad |] ==> sign A X:parts (spies evs)
--> (EX B Y. Says A B Y:set evs & sign A X:parts {Y})"
apply (clarify, simp add: sign_def, frule parts.Snd)
apply (blast dest: Crypt_Hash_imp_sign [unfolded sign_def])
done
end
|
section \<open>Kleene Lattice Modules and Cylindric Lattice Modules\<close>
text \<open>Using this mathematical component requires downloading the Archive of Formal Proofs.\<close>
theory CKL_Module
imports CKL
begin
text \<open>This component shows that the semidirect product of a Kleene lattice and a
lattice with a least element forms a weak Kleene lattice. It also shows that the semidirect product
of a liberation Kleene lattice and a lattice with a least element forms a weak liberation Kleene lattice.
This is useful for establishing properties of relational fault models.\<close>
locale l_monoid_module =
fixes act :: "'a::l_monoid \<Rightarrow> 'b::bounded_lattice_bot \<Rightarrow> 'b" ("\<alpha>")
assumes m1: "\<alpha> (x \<cdot> y) p = \<alpha> x (\<alpha> y p)"
and m2: "\<alpha> (x + y) p = \<alpha> x p \<squnion> \<alpha> y p"
and m3: "\<alpha> x (p \<squnion> q) = \<alpha> x p \<squnion> \<alpha> x q"
and m4 [simp]: "\<alpha> 1 p = p"
and m5 [simp]: "\<alpha> 0 p = \<bottom>"
begin
lemma act_zero [simp]: "\<alpha> x \<bottom> = \<bottom>"
by (metis annir m1 m5)
end
locale kleene_lattice_module = l_monoid_module \<alpha> for \<alpha> +
assumes m6: "p \<squnion> \<alpha> (x::'a::kleene_lattice) q \<le> q \<Longrightarrow> \<alpha> (x\<^sup>\<star>) p \<le> q"
locale cylindric_kleene_lattice_module = cylindric_kleene_lattice cyl + kleene_lattice_module \<alpha> for cyl \<alpha> +
constrains cyl :: "'a \<Rightarrow> 'b::kleene_lattice \<Rightarrow> 'b"
and \<alpha> :: "'b \<Rightarrow> 'c::bounded_lattice_bot \<Rightarrow> 'c"
locale liberation_kleene_lattice_module = liberation_kleene_lattice lib + kleene_lattice_module \<alpha> for lib \<alpha> +
constrains lib :: "'a \<Rightarrow> 'b::kleene_lattice"
and \<alpha> :: "'b \<Rightarrow> 'c::bounded_lattice_bot \<Rightarrow> 'c"
definition "plus_prod (x::'a::l_monoid \<times> 'b::bounded_lattice_bot) y = (fst x + fst y,snd x \<squnion> snd y)"
definition "meet_prod (x::'a::l_monoid \<times> 'b::bounded_lattice_bot) y = (fst x \<sqinter> fst y, snd x \<sqinter> snd y)"
definition "zero_prod = (0,\<bottom>)"
definition "one_prod = (1,\<bottom>)"
definition "le_prod x y = (fst x \<le> fst y \<and> snd x \<le> snd y)"
context l_monoid_module
begin
lemma plus_prod_assoc: "plus_prod x (plus_prod y z) = plus_prod (plus_prod x y) z"
unfolding plus_prod_def by (simp add: join.sup_assoc sup_assoc)
lemma plus_prod_comm: "plus_prod x y = plus_prod y x"
unfolding plus_prod_def by (simp add: inf_sup_aci(5) join.sup.commute)
lemma plus_prod_idem [simp]: "plus_prod x x = x"
unfolding plus_prod_def by simp
lemma meet_prod_assoc: "meet_prod x (meet_prod y z) = meet_prod (meet_prod x y) z"
unfolding meet_prod_def by (simp add: inf.assoc)
lemma meet_prod_comm: "meet_prod x y = meet_prod y x"
unfolding meet_prod_def by (simp add: inf_commute)
lemma meet_prod_idem [simp]: "meet_prod x x = x"
by (simp add: meet_prod_def)
lemma plus_meet_prod_absorp1 [simp]: "plus_prod x (meet_prod x y) = x"
unfolding plus_prod_def meet_prod_def by simp
lemma plus_meet_prod_absorp2 [simp]: "meet_prod x (plus_prod x y) = x"
unfolding plus_prod_def meet_prod_def by simp
lemma zero_prod_least1 [simp]: "plus_prod x zero_prod = x"
unfolding plus_prod_def zero_prod_def by simp
lemma zero_prod_least2 [simp]: "meet_prod x zero_prod = zero_prod"
unfolding meet_prod_def zero_prod_def by simp
definition "sd_prod x y = (fst x \<cdot> fst y, snd x \<squnion> \<alpha> (fst x) (snd y))"
lemma sd_prod_assoc: "sd_prod x (sd_prod y z) = sd_prod (sd_prod x y) z"
unfolding sd_prod_def
by (smt fstI l_monoid_module.m3 l_monoid_module_axioms m1 mult.assoc sndI sup.commute sup.left_commute)
lemma sd_prod_onel [simp]: "sd_prod one_prod x = x"
unfolding sd_prod_def one_prod_def by simp
lemma sd_prod_oner [simp]: "sd_prod x one_prod = x"
unfolding sd_prod_def one_prod_def by simp
lemma sd_prod_zerol [simp]: "sd_prod zero_prod x = zero_prod"
unfolding sd_prod_def zero_prod_def by simp
lemma "sd_prod x zero_prod = zero_prod"
(*nitpick*)
oops
lemma sd_prod_distl: "sd_prod x (plus_prod y z) = plus_prod (sd_prod x y) (sd_prod x z)"
unfolding sd_prod_def plus_prod_def by (simp add: distrib_left m3 sup_assoc sup_left_commute)
lemma sd_prod_distr: "sd_prod (plus_prod x y) z = plus_prod (sd_prod x z) (sd_prod y z)"
unfolding sd_prod_def plus_prod_def by (simp add: m2 sup_assoc sup_left_commute)
end
context kleene_lattice_module
begin
definition "star_prod x = ((fst x)\<^sup>\<star>, \<alpha> ((fst x)\<^sup>\<star>) (snd x))"
lemma star_prod_unfoldl: "plus_prod one_prod (sd_prod x (star_prod x)) = star_prod x"
unfolding star_prod_def plus_prod_def sd_prod_def one_prod_def
by (metis (no_types, lifting) fst_conv inf_sup_aci(5) l_monoid_module.m4 l_monoid_module_axioms m1 m2 snd_conv star_unfoldl_eq sup_bot.right_neutral)
lemma star_prod_unfoldr: "star_prod x = plus_prod one_prod (sd_prod (star_prod x) x)"
unfolding star_prod_def plus_prod_def sd_prod_def one_prod_def
by simp
lemma star_prod_inductl: "le_prod (sd_prod x y) y \<Longrightarrow> le_prod (sd_prod (star_prod x) y) y"
unfolding le_prod_def sd_prod_def star_prod_def
using m6 by auto
lemma star_prod_inductr: "le_prod (sd_prod y x) y \<Longrightarrow> le_prod (sd_prod y (star_prod x)) y"
unfolding le_prod_def sd_prod_def star_prod_def
by (metis (no_types, lifting) conway.dagger_plus_one distrib_left fst_conv join.sup.orderE m1 mult.right_neutral snd_conv star_inductr_var_equiv)
end
context cylindric_kleene_lattice_module
begin
definition "cyl_prod i x = (cyl i (fst x), \<alpha> (cyl i 1) (snd x))"
lemma cyl_prod_prop: "cyl i (fst x) = cyl i 1 \<cdot> (fst x) \<cdot> cyl i 1 \<Longrightarrow> cyl_prod i x = sd_prod (cyl i 1, \<bottom>) (sd_prod x (cyl i 1, \<bottom>))"
unfolding cyl_prod_def sd_prod_def by (simp add: mult.assoc)
end
context liberation_kleene_lattice_module
begin
lemma l1_prod: "sd_prod (lib i, \<bottom>) zero_prod = zero_prod"
unfolding sd_prod_def zero_prod_def by simp
lemma l2_prod: "le_prod one_prod (lib i, \<bottom>::'c::bounded_lattice_bot)"
unfolding le_prod_def one_prod_def by (simp add: l2)
lemma l3_prod:
assumes "\<alpha> (lib i) (p \<sqinter> \<alpha> (lib i) q) = \<alpha> (lib i) p \<sqinter> \<alpha> (lib i) q"
shows "sd_prod (lib i,\<bottom>) (meet_prod (x,p) (sd_prod (lib i,\<bottom>) (y,q))) = meet_prod (sd_prod (lib i,\<bottom>) (x,p)) (sd_prod (lib i,\<bottom>) (y,q))"
unfolding sd_prod_def meet_prod_def by (simp add: assms l3)
lemma l4_prod: "sd_prod (meet_prod (x,p) (sd_prod (y,q) (lib i,\<bottom>))) (lib i,\<bottom>) = meet_prod (sd_prod (x,p) (lib i,\<bottom>)) (sd_prod (y,q) (lib i,\<bottom>))"
unfolding sd_prod_def meet_prod_def by (simp add: l4)
lemma l5_prod: "sd_prod (lib i,\<bottom>) (lib j,\<bottom>) = sd_prod (lib j,\<bottom>) (lib i,\<bottom>)"
unfolding sd_prod_def by (simp add: l5)
lemma l6_prod: "i \<noteq> i \<Longrightarrow> meet_prod (lib i,\<bottom>) (lib j,\<bottom>) = one_prod"
unfolding meet_prod_def one_prod_def by simp
lemma l7_prod: "sd_prod (lib (i::'a),\<bottom>) (meet_prod (lib j,\<bottom>) (lib k,\<bottom>)) = meet_prod (sd_prod (lib i,\<bottom>) (lib j,\<bottom>)) (sd_prod (lib i,\<bottom>) (lib k,\<bottom>))"
unfolding sd_prod_def meet_prod_def by (simp add: l7)
lemma l8_prod: "sd_prod (meet_prod (lib i,\<bottom>) (lib j,\<bottom>)) (lib k,\<bottom>) = meet_prod (sd_prod (lib i,\<bottom>) (lib k,\<bottom>)) (sd_prod (lib j,\<bottom>) (lib k,\<bottom>))"
unfolding sd_prod_def meet_prod_def by (simp add: l8)
end
end
|
classdef ConstraintEvalTypeEnum < matlab.mixin.SetGet
%ConstraintEvalTypeEnum Summary of this class goes here
% Detailed explanation goes here
enumeration
FixedBounds('Fixed Bounds')
StateComparison('State Comparison');
end
properties
name char = '';
end
methods
function obj = ConstraintEvalTypeEnum(name)
obj.name = name;
end
end
methods(Static)
function [listBoxStr, enums] = getListBoxStr()
m = enumeration('ConstraintEvalTypeEnum');
[~,I] = sort({m.name});
listBoxStr = {m(I).name};
enums = m(I);
end
function [ind, enum] = getIndForName(name)
m = enumeration('ConstraintEvalTypeEnum');
[~,I] = sort({m.name});
m = m(I);
ind = find(ismember({m.name},name),1,'first');
enum = m(ind);
end
function [enum, ind] = getEnumForListboxStr(nameStr)
m = enumeration('ConstraintEvalTypeEnum');
ind = find(ismember({m.name},nameStr),1,'first');
enum = m(ind);
end
end
end
|
module Neural.Util where
import Control.Monad
import Control.Monad.ST
import Control.Parallel.Strategies
import Data.Array.ST
import Data.STRef
import Numeric.LinearAlgebra
import System.Random
-- shamelessly nicked from https://wiki.haskell.org/Random_shuffle
shuffle' :: [a] -> StdGen -> ([a],StdGen)
shuffle' xs gen = runST (do
g <- newSTRef gen
let randomRST lohi = do
(a,s') <- liftM (randomR lohi) (readSTRef g)
writeSTRef g s'
return a
ar <- newArray n xs
xs' <- forM [1..n] $ \i -> do
j <- randomRST (i,n)
vi <- readArray ar i
vj <- readArray ar j
writeArray ar j vi
return vj
gen' <- readSTRef g
return (xs',gen'))
where
n = length xs
newArray :: Int -> [a] -> ST s (STArray s Int a)
newArray n xs = newListArray (1,n) xs
shuffleIO :: [a] -> IO [a]
shuffleIO xs = getStdRandom (shuffle' xs)
parZipWith :: Strategy c -> (a -> b -> c) -> [a] -> [b] -> [c]
parZipWith strat f x y = zipWith f x y `using` parList strat
parZipWith3 :: Strategy d -> (a -> b -> c -> d) -> [a] -> [b] -> [c] -> [d]
parZipWith3 strat f x y z = zipWith3 f x y z `using` parList strat
fullyFlatten :: [Matrix Double] -> Vector Double
fullyFlatten = vjoin . (parMap rdeepseq flatten)
|
function pde = sincosdata3
%% SINCOSDATA3 trigonometric data for Poisson equation in 3-D
%
% u = cos(pi*x)*cos(pi*y)*cos(pi*z);
% f = - Delta u = 3*pi^2*cos(pi*x)*cos(pi*y)*cos(pi*z);
% Du = (-pi*sin(pi*x)*cos(pi*y)*cos(pi*z),
% -pi*cos(pi*x)*sin(pi*y)*cos(pi*z),
% -pi*cos(pi*x)*cos(pi*y)*sin(pi*z));
%
% Copyright (C) Long Chen. See COPYRIGHT.txt for details.
pde = struct('f',@f,'exactu',@exactu,'g_D',@g_D,'Du',@Du,'g_N',@g_N, 'phi', @phi);
% load data (right hand side function)
function s = f(p)
x = p(:,1); y = p(:,2); z = p(:,3);
s = 3*pi^2*cos(pi*x).*cos(pi*y).*cos(pi*z);
end
% exact solution
function s = exactu(p)
x = p(:,1); y = p(:,2); z = p(:,3);
s = cos(pi*x).*cos(pi*y).*cos(pi*z); % for neumann boundary condition, int_u =0
end
% Dirichlet boundary condition
function s = g_D(p)
s = exactu(p);
end
% Neumann boundary condtion
function f = g_N(p)
f = zeros(size(p,1),1);
x = p(:,1); y = p(:,2); z = p(:,3);
uprime = [-pi*sin(pi*x).*cos(pi*y).*cos(pi*z) ...
-pi*cos(pi*x).*sin(pi*y).*cos(pi*z) ...
-pi*cos(pi*x).*cos(pi*y).*sin(pi*z)];
downbd = (abs(z)<eps); % n = (0,0,-1)
f(downbd) = -uprime(downbd,3);
upbd = (abs(z-1)<eps);% n = (0,0,1)
f(upbd) = uprime(upbd,3);
leftbd = (abs(x)<eps); % n = (-1,0,0)
f(leftbd) = -uprime(leftbd,1);
rightbd = (abs(x-1)<eps); % n = (1,0,0)
f(rightbd) = uprime(rightbd,1);
backbd = (abs(y-1)<eps); % n =(0,1,0)
f(backbd) = uprime(backbd,2);
frontbd = (abs(y)<eps); % n = (0,-1,0)
f(frontbd) = -uprime(frontbd,2);
end
% Derivative of the exact solution
function s = Du(p)
x = p(:,1); y = p(:,2); z = p(:,3);
s(:,1) = -pi*sin(pi*x).*cos(pi*y).*cos(pi*z);
s(:,2) = -pi*cos(pi*x).*sin(pi*y).*cos(pi*z);
s(:,3) = -pi*cos(pi*x).*cos(pi*y).*sin(pi*z);
end
function s = phi(p) % level set function
x = p(:,1); y = p(:,2); z =p(:,3);
s = x.^2 + y.^2 +z.^2 - 0.75^2;
end
end
|
This entry was posted on Tuesday, November 20th, 2007 at 8:38 pm and is filed under . You can follow any responses to this entry through the RSS 2.0 feed. You can leave a response, or trackback from your own site.
|
module Replica.Other.String
import Data.List
import Data.String
%default total
export
separator : Nat -> String
separator = pack . flip replicate '-'
export
withOffset : Nat -> String -> String
withOffset k = (++) (pack $ replicate k ' ')
|
#include <Parsers/ASTGrantQuery.h>
#include <Parsers/ASTRoleList.h>
#include <Common/quoteString.h>
#include <boost/range/adaptor/map.hpp>
#include <boost/range/algorithm/sort.hpp>
#include <boost/range/algorithm_ext/push_back.hpp>
#include <map>
namespace DB
{
namespace
{
using KeywordToColumnsMap = std::map<std::string_view /* keyword */, std::vector<std::string_view> /* columns */>;
using TableToAccessMap = std::map<String /* database_and_table_name */, KeywordToColumnsMap>;
TableToAccessMap prepareTableToAccessMap(const AccessRightsElements & elements)
{
TableToAccessMap res;
for (const auto & element : elements)
{
String database_and_table_name;
if (element.any_database)
{
if (element.any_table)
database_and_table_name = "*.*";
else
database_and_table_name = "*." + backQuoteIfNeed(element.table);
}
else if (element.database.empty())
{
if (element.any_table)
database_and_table_name = "*";
else
database_and_table_name = backQuoteIfNeed(element.table);
}
else
{
if (element.any_table)
database_and_table_name = backQuoteIfNeed(element.database) + ".*";
else
database_and_table_name = backQuoteIfNeed(element.database) + "." + backQuoteIfNeed(element.table);
}
KeywordToColumnsMap & keyword_to_columns = res[database_and_table_name];
for (const auto & keyword : element.access_flags.toKeywords())
boost::range::push_back(keyword_to_columns[keyword], element.columns);
}
for (auto & keyword_to_columns : res | boost::adaptors::map_values)
{
for (auto & columns : keyword_to_columns | boost::adaptors::map_values)
boost::range::sort(columns);
}
return res;
}
void formatColumnNames(const std::vector<std::string_view> & columns, const IAST::FormatSettings & settings)
{
if (columns.empty())
return;
settings.ostr << "(";
bool need_comma_after_column_name = false;
for (const auto & column : columns)
{
if (std::exchange(need_comma_after_column_name, true))
settings.ostr << ", ";
settings.ostr << backQuoteIfNeed(column);
}
settings.ostr << ")";
}
}
String ASTGrantQuery::getID(char) const
{
return "GrantQuery";
}
ASTPtr ASTGrantQuery::clone() const
{
return std::make_shared<ASTGrantQuery>(*this);
}
void ASTGrantQuery::formatImpl(const FormatSettings & settings, FormatState &, FormatStateStacked) const
{
settings.ostr << (settings.hilite ? hilite_keyword : "") << ((kind == Kind::GRANT) ? "GRANT" : "REVOKE")
<< (settings.hilite ? hilite_none : "") << " ";
if (grant_option && (kind == Kind::REVOKE))
settings.ostr << (settings.hilite ? hilite_keyword : "") << "GRANT OPTION FOR " << (settings.hilite ? hilite_none : "");
bool need_comma = false;
for (const auto & [database_and_table, keyword_to_columns] : prepareTableToAccessMap(access_rights_elements))
{
for (const auto & [keyword, columns] : keyword_to_columns)
{
if (std::exchange(need_comma, true))
settings.ostr << ", ";
settings.ostr << (settings.hilite ? hilite_keyword : "") << keyword << (settings.hilite ? hilite_none : "");
formatColumnNames(columns, settings);
}
settings.ostr << (settings.hilite ? hilite_keyword : "") << " ON " << (settings.hilite ? hilite_none : "") << database_and_table;
}
settings.ostr << (settings.hilite ? hilite_keyword : "") << ((kind == Kind::GRANT) ? " TO " : " FROM ") << (settings.hilite ? hilite_none : "");
to_roles->format(settings);
if (grant_option && (kind == Kind::GRANT))
settings.ostr << (settings.hilite ? hilite_keyword : "") << " WITH GRANT OPTION" << (settings.hilite ? hilite_none : "");
}
}
|
import data.real.basic
-- BEGIN
example {x y : ℝ} (h₀ : x ≤ y) (h₁ : ¬ y ≤ x) : x ≤ y ∧ x ≠ y :=
begin
have h : x ≠ y,
{ sorry, },
exact ⟨h₀, h⟩,
end
example {x y : ℝ} (h₀ : x ≤ y) (h₁ : ¬ y ≤ x) : x ≤ y ∧ x ≠ y :=
begin
split,
exact h₀,
sorry,
end
/- using λ abstraction-/
example {x y : ℝ} (h₀ : x ≤ y) (h₁ : ¬ y ≤ x) : x ≤ y ∧ x ≠ y :=
⟨h₀, λ h, h₁ (by rw h)⟩
-- END
|
/* vim:set ts=3 sw=3 sts=3 et: */
/**
* Copyright © 2008-2013 Last.fm Limited
*
* This file is part of libmoost.
*
* Permission is hereby granted, free of charge, to any person
* obtaining a copy of this software and associated documentation
* files (the "Software"), to deal in the Software without restriction,
* including without limitation the rights to use, copy, modify, merge,
* publish, distribute, sublicense, and/or sell copies of the Software,
* and to permit persons to whom the Software is furnished to do so,
* subject to the following conditions:
*
* The above copyright notice and this permission notice shall be
* included in all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
* OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
* HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
* WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*/
#include <boost/test/unit_test.hpp>
#include <boost/test/test_tools.hpp>
#include <functional>
#include <vector>
#include "../../include/moost/algorithm/inplace_set_intersection.hpp"
using namespace moost::algorithm;
BOOST_AUTO_TEST_SUITE( inplace_set_intersection_test )
BOOST_AUTO_TEST_CASE( test_set_intersection )
{
std::vector<int> foo;
foo.push_back(2); foo.push_back(3); foo.push_back(5); foo.push_back(6);
foo.push_back(1); foo.push_back(3); foo.push_back(4); foo.push_back(5);
std::vector<int>::iterator it_end = inplace_set_intersection(foo.begin(), foo.begin() + 4, foo.begin() + 4, foo.end());
BOOST_REQUIRE(it_end == foo.begin() + 2);
BOOST_REQUIRE_EQUAL(foo[0], 3);
BOOST_REQUIRE_EQUAL(foo[1], 5);
}
BOOST_AUTO_TEST_CASE( test_set_intersection_comp )
{
std::vector<int> foo;
foo.push_back(6); foo.push_back(5); foo.push_back(3); foo.push_back(2);
foo.push_back(5); foo.push_back(4); foo.push_back(3); foo.push_back(1);
std::vector<int>::iterator it_end = inplace_set_intersection(foo.begin(), foo.begin() + 4,
foo.begin() + 4, foo.end(),
std::greater<int>());
BOOST_REQUIRE(it_end == foo.begin() + 2);
BOOST_REQUIRE_EQUAL(foo[0], 5);
BOOST_REQUIRE_EQUAL(foo[1], 3);
}
BOOST_AUTO_TEST_SUITE_END()
|
%%% INPUT THE IMAGE FILE NAME:
graphout = 0;
if ~exist('fc')|~exist('cc')|~exist('kc')|~exist('alpha_c'),
fprintf(1,'No intrinsic camera parameters available. Maybe, need to load Calib_Results.mat\n');
return;
end;
KK = [fc(1) alpha_c*fc(1) cc(1);0 fc(2) cc(2) ; 0 0 1];
disp('Program that undistorts a whole sequence of images (works with bmp only so far... needs some debugging)');
disp('The intrinsic camera parameters are assumed to be known (previously computed)');
disp('After undistortion, the intrinsic parameters fc, cc, alpha_c remain unchanged. The distortion coefficient vector kc is zero');
dir;
fprintf(1,'\n');
seq_name = input('Basename of sequence images (without number nor suffix): ','s');
format_image_seq = '0';
while format_image_seq == '0',
format_image_seq = input('Image format: ([]=''r''=''ras'', ''b''=''bmp'', ''t''=''tif'', ''p''=''pgm'', ''j''=''jpg'', ''m''=''ppm'') ','s');
if isempty(format_image_seq),
format_image_seq = 'ras';
else
if lower(format_image_seq(1)) == 'm',
format_image_seq = 'ppm';
else
if lower(format_image_seq(1)) == 'b',
format_image_seq = 'bmp';
else
if lower(format_image_seq(1)) == 't',
format_image_seq = 'tif';
else
if lower(format_image_seq(1)) == 'p',
format_image_seq = 'pgm';
else
if lower(format_image_seq(1)) == 'j',
format_image_seq = 'jpg';
else
if lower(format_image_seq(1)) == 'r',
format_image_seq = 'ras';
else
disp('Invalid image format');
format_image_seq = '0'; % Ask for format once again
end;
end;
end;
end;
end;
end;
end;
end;
ima_sequence = dir( [ seq_name '*.' format_image_seq]);
if isempty(ima_sequence),
fprintf(1,'No image found\n');
return;
end;
ima_name = ima_sequence(1).name;
if format_image_seq(1) == 'p',
if format_image_seq(2) == 'p',
I = double(loadppm(ima_name));
else
I = double(loadpgm(ima_name));
end;
else
if format_image_seq(1) == 'r',
I = readras(ima_name);
else
I = double(imread(ima_name));
end;
end;
[ny,nx,nc] = size(I);
% Pre-compute the necessary indices and blending coefficients to enable quick rectification:
[Irec_junk,ind_new,ind_1,ind_2,ind_3,ind_4,a1,a2,a3,a4] = rect_index(zeros(ny,nx),eye(3),fc,cc,kc,alpha_c,KK);
n_seq = length(ima_sequence);
for kk = 1:n_seq,
ima_name = ima_sequence(kk).name;
fprintf(1,'Loading original image %s...',ima_name);
%%% READ IN IMAGE:
if format_image_seq(1) == 'p',
if format_image_seq(2) == 'p',
I = double(loadppm(ima_name));
else
I = double(loadpgm(ima_name));
end;
else
if format_image_seq(1) == 'r',
I = readras(ima_name);
else
I = double(imread(ima_name));
end;
end;
[ny,nx,nc] = size(I);
if graphout,
figure(2);
image(uint8(I));
drawnow;
end;
I2 = zeros(ny,nx,nc);
for ii = 1:nc,
Iii = I(:,:,ii);
I2ii = zeros(ny,nx);
I2ii(ind_new) = uint8(a1 .* Iii(ind_1) + a2 .* Iii(ind_2) + a3 .* Iii(ind_3) + a4 .* Iii(ind_4));
I2(:,:,ii) = I2ii;
end;
I2 = uint8(I2);
if graphout,
figure(3);
image(I2);
drawnow;
end;
ima_name2 = ['undist_' ima_name];
fprintf(1,'Saving undistorted image under %s...\n',ima_name2);
if format_image_seq(1) == 'p',
if format_image_seq(2) == 'p',
saveppm(ima_name2,I2);
else
savepgm(ima_name2,I2);
end;
else
if format_image_seq(1) == 'r',
writeras(ima_name2,I2,gray(256));
else
imwrite(I2,ima_name2,format_image_seq);
end;
end;
end;
|
(*
Copyright (C) 2017 M.A.L. Marques
This Source Code Form is subject to the terms of the Mozilla Public
License, v. 2.0. If a copy of the MPL was not distributed with this
file, You can obtain one at http://mozilla.org/MPL/2.0/.
*)
(* type: gga_exc *)
(* prefix:
gga_x_fd_lb94_params *params;
assert(p->params != NULL);
params = (gga_x_fd_lb94_params * )(p->params);
*)
(* replace: "fd_int0\(" -> "xc_integrate(func0, NULL, 0.0, " *)
(* replace: "fd_int1\(" -> "xc_integrate(func1, NULL, 0.0, " *)
`diff/fd_int0` := proc(g, x) diff(g, x) * fd_f_inter(0, g) end proc:
`diff/fd_int1` := proc(g, x) diff(g, x) * fd_f_inter(1, g) end proc:
fd_beta := params_a_beta:
fd_csi := 2^(1/3):
fd_f_inter := (n, t) -> -3/4 * fd_beta*fd_csi*log(t)^n /
(1 + 3*fd_beta*fd_csi*t*log(fd_csi*t + sqrt((fd_csi*t)^2 + 1))):
fd_f0 := s -> 1 - s*(fd_int0(s)*log(s) - fd_int1(s)):
fd_f := x -> fd_f0(X2S*x):
f := (rs, z, xt, xs0, xs1) -> gga_exchange(fd_f, rs, z, xs0, xs1):
|
/-
Copyright (c) 2021 Eric Wieser. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Eric Wieser
-/
import algebra.module.pi
/-!
# Bundled hom instances for module and multiplicative actions
This file defines instances for module, mul_action and related structures on bundled `_hom` types.
These are analogous to the instances in `algebra.module.pi`, but for bundled instead of unbundled
functions.
-/
variables {R S A B : Type*}
namespace add_monoid_hom
section
variables [monoid R] [monoid S] [add_monoid A] [add_comm_monoid B]
variables [distrib_mul_action R B] [distrib_mul_action S B]
instance : distrib_mul_action R (A →+ B) :=
{ smul := λ r f,
{ to_fun := r • f,
map_zero' := by simp,
map_add' := λ x y, by simp [smul_add] },
one_smul := λ f, by simp,
mul_smul := λ r s f, by simp [mul_smul],
smul_add := λ r f g, ext $ λ x, by simp [smul_add],
smul_zero := λ r, ext $ λ x, by simp [smul_zero] }
@[simp] lemma coe_smul (r : R) (f : A →+ B) : ⇑(r • f) = r • f := rfl
lemma smul_apply (r : R) (f : A →+ B) (x : A) : (r • f) x = r • f x := rfl
instance [smul_comm_class R S B] : smul_comm_class R S (A →+ B) :=
⟨λ a b f, ext $ λ x, smul_comm _ _ _⟩
instance [has_scalar R S] [is_scalar_tower R S B] : is_scalar_tower R S (A →+ B) :=
⟨λ a b f, ext $ λ x, smul_assoc _ _ _⟩
end
instance [semiring R] [add_monoid A] [add_comm_monoid B] [module R B] :
module R (A →+ B) :=
{ add_smul := λ r s x, ext $ λ y, by simp [add_smul],
zero_smul := λ x, ext $ λ y, by simp [zero_smul],
..add_monoid_hom.distrib_mul_action }
end add_monoid_hom
|
{-
Basic theory about h-levels/n-types:
- Basic properties of isContr, isProp and isSet (definitions are in Prelude)
- Hedberg's theorem can be found in Cubical/Relation/Nullary/DecidableEq
-}
{-# OPTIONS --cubical --no-import-sorts --safe #-}
module Cubical.Foundations.HLevels where
open import Cubical.Foundations.Prelude
open import Cubical.Foundations.Function
open import Cubical.Foundations.Structure
open import Cubical.Functions.FunExtEquiv
open import Cubical.Foundations.GroupoidLaws
open import Cubical.Foundations.Equiv
open import Cubical.Foundations.Isomorphism
open import Cubical.Foundations.Path
open import Cubical.Foundations.Transport
open import Cubical.Foundations.Univalence using (ua ; univalence)
open import Cubical.Data.Sigma
open import Cubical.Data.Nat using (ℕ; zero; suc; _+_; +-zero; +-comm)
HLevel : Type₀
HLevel = ℕ
private
variable
ℓ ℓ' ℓ'' ℓ''' : Level
A : Type ℓ
B : A → Type ℓ
C : (x : A) → B x → Type ℓ
D : (x : A) (y : B x) → C x y → Type ℓ
E : (x : A) (y : B x) → (z : C x y) → D x y z → Type ℓ
w x y z : A
n : HLevel
isOfHLevel : HLevel → Type ℓ → Type ℓ
isOfHLevel 0 A = isContr A
isOfHLevel 1 A = isProp A
isOfHLevel (suc (suc n)) A = (x y : A) → isOfHLevel (suc n) (x ≡ y)
isOfHLevelFun : (n : HLevel) {A : Type ℓ} {B : Type ℓ'} (f : A → B) → Type (ℓ-max ℓ ℓ')
isOfHLevelFun n f = ∀ b → isOfHLevel n (fiber f b)
TypeOfHLevel : ∀ ℓ → HLevel → Type (ℓ-suc ℓ)
TypeOfHLevel ℓ n = TypeWithStr ℓ (isOfHLevel n)
hProp hSet hGroupoid h2Groupoid : ∀ ℓ → Type (ℓ-suc ℓ)
hProp ℓ = TypeOfHLevel ℓ 1
hSet ℓ = TypeOfHLevel ℓ 2
hGroupoid ℓ = TypeOfHLevel ℓ 3
h2Groupoid ℓ = TypeOfHLevel ℓ 4
-- lower h-levels imply higher h-levels
isOfHLevelSuc : (n : HLevel) → isOfHLevel n A → isOfHLevel (suc n) A
isOfHLevelSuc 0 = isContr→isProp
isOfHLevelSuc 1 = isProp→isSet
isOfHLevelSuc (suc (suc n)) h a b = isOfHLevelSuc (suc n) (h a b)
isSet→isGroupoid : isSet A → isGroupoid A
isSet→isGroupoid = isOfHLevelSuc 2
isGroupoid→is2Groupoid : isGroupoid A → is2Groupoid A
isGroupoid→is2Groupoid = isOfHLevelSuc 3
isOfHLevelPlus : (m : HLevel) → isOfHLevel n A → isOfHLevel (m + n) A
isOfHLevelPlus zero hA = hA
isOfHLevelPlus (suc m) hA = isOfHLevelSuc _ (isOfHLevelPlus m hA)
isOfHLevelPlus' : (m : HLevel) → isOfHLevel m A → isOfHLevel (m + n) A
isOfHLevelPlus' {A = A} {n = n} m hA = subst (λ m → isOfHLevel m A) (+-comm n m) (isOfHLevelPlus n hA )
isContr→isOfHLevel : (n : HLevel) → isContr A → isOfHLevel n A
isContr→isOfHLevel {A = A} n cA = isOfHLevelPlus' 0 cA
isProp→isOfHLevelSuc : (n : HLevel) → isProp A → isOfHLevel (suc n) A
isProp→isOfHLevelSuc {A = A} n pA = isOfHLevelPlus' 1 pA
-- hlevel of path and dependent path types
isProp→isContrPath : isProp A → (x y : A) → isContr (x ≡ y)
isProp→isContrPath h x y = h x y , isProp→isSet h x y _
isContr→isContrPath : isContr A → (x y : A) → isContr (x ≡ y)
isContr→isContrPath cA = isProp→isContrPath (isContr→isProp cA)
isOfHLevelPath' : (n : HLevel) → isOfHLevel (suc n) A → (x y : A) → isOfHLevel n (x ≡ y)
isOfHLevelPath' 0 = isProp→isContrPath
isOfHLevelPath' (suc n) h x y = h x y
isOfHLevelPath'⁻ : (n : HLevel) → ((x y : A) → isOfHLevel n (x ≡ y)) → isOfHLevel (suc n) A
isOfHLevelPath'⁻ zero h x y = h x y .fst
isOfHLevelPath'⁻ (suc n) h = h
isOfHLevelPath : (n : HLevel) → isOfHLevel n A → (x y : A) → isOfHLevel n (x ≡ y)
isOfHLevelPath 0 h x y = isContr→isContrPath h x y
isOfHLevelPath (suc n) h x y = isOfHLevelSuc n (isOfHLevelPath' n h x y)
isOfHLevelPathP' : {A : I → Type ℓ} (n : HLevel)
→ isOfHLevel (suc n) (A i1)
→ (x : A i0) (y : A i1) → isOfHLevel n (PathP A x y)
isOfHLevelPathP' {A = A} n h x y = transport⁻ (λ i → isOfHLevel n (PathP≡Path A x y i))
(isOfHLevelPath' n h _ _)
isOfHLevelPathP : {A : I → Type ℓ} (n : HLevel)
→ isOfHLevel n (A i1)
→ (x : A i0) (y : A i1) → isOfHLevel n (PathP A x y)
isOfHLevelPathP {A = A} n h x y = transport⁻ (λ i → isOfHLevel n (PathP≡Path A x y i))
(isOfHLevelPath n h _ _)
-- h-level of isOfHLevel
isPropIsOfHLevel : (n : HLevel) → isProp (isOfHLevel n A)
isPropIsOfHLevel 0 = isPropIsContr
isPropIsOfHLevel 1 = isPropIsProp
isPropIsOfHLevel (suc (suc n)) f g i a b =
isPropIsOfHLevel (suc n) (f a b) (g a b) i
isPropIsSet : isProp (isSet A)
isPropIsSet = isPropIsOfHLevel 2
isPropIsGroupoid : isProp (isGroupoid A)
isPropIsGroupoid = isPropIsOfHLevel 3
isPropIs2Groupoid : isProp (is2Groupoid A)
isPropIs2Groupoid = isPropIsOfHLevel 4
TypeOfHLevel≡ : (n : HLevel) {X Y : TypeOfHLevel ℓ n} → ⟨ X ⟩ ≡ ⟨ Y ⟩ → X ≡ Y
TypeOfHLevel≡ n = Σ≡Prop (λ _ → isPropIsOfHLevel n)
-- Fillers for cubes from h-level
isSet→isSet' : isSet A → isSet' A
isSet→isSet' {A = A} Aset a₀₋ a₁₋ a₋₀ a₋₁ =
transport⁻ (PathP≡Path (λ i → a₋₀ i ≡ a₋₁ i) a₀₋ a₁₋) (Aset _ _ _ _)
isSet'→isSet : isSet' A → isSet A
isSet'→isSet {A = A} Aset' x y p q = Aset' p q refl refl
isSet→SquareP :
{A : I → I → Type ℓ}
(isSet : (i j : I) → isSet (A i j))
{a₀₀ : A i0 i0} {a₀₁ : A i0 i1} (a₀₋ : PathP (λ j → A i0 j) a₀₀ a₀₁)
{a₁₀ : A i1 i0} {a₁₁ : A i1 i1} (a₁₋ : PathP (λ j → A i1 j) a₁₀ a₁₁)
(a₋₀ : PathP (λ i → A i i0) a₀₀ a₁₀) (a₋₁ : PathP (λ i → A i i1) a₀₁ a₁₁)
→ SquareP A a₀₋ a₁₋ a₋₀ a₋₁
isSet→SquareP isset a₀₋ a₁₋ a₋₀ a₋₁ =
transport (sym (PathP≡Path _ _ _))
(isOfHLevelPathP' 1 (isset _ _) _ _ _ _ )
isGroupoid→isGroupoid' : isGroupoid A → isGroupoid' A
isGroupoid→isGroupoid' {A = A} Agpd a₀₋₋ a₁₋₋ a₋₀₋ a₋₁₋ a₋₋₀ a₋₋₁ =
transport⁻ (PathP≡Path (λ i → Square (a₋₀₋ i) (a₋₁₋ i) (a₋₋₀ i) (a₋₋₁ i)) a₀₋₋ a₁₋₋)
(isGroupoid→isPropSquare _ _ _ _ _ _)
where
isGroupoid→isPropSquare :
{a₀₀ a₀₁ : A} (a₀₋ : a₀₀ ≡ a₀₁)
{a₁₀ a₁₁ : A} (a₁₋ : a₁₀ ≡ a₁₁)
(a₋₀ : a₀₀ ≡ a₁₀) (a₋₁ : a₀₁ ≡ a₁₁)
→ isProp (Square a₀₋ a₁₋ a₋₀ a₋₁)
isGroupoid→isPropSquare a₀₋ a₁₋ a₋₀ a₋₁ =
transport⁻
(cong isProp (PathP≡Path (λ i → a₋₀ i ≡ a₋₁ i) a₀₋ a₁₋))
(Agpd _ _ _ _)
isGroupoid'→isGroupoid : isGroupoid' A → isGroupoid A
isGroupoid'→isGroupoid Agpd' x y p q r s = Agpd' r s refl refl refl refl
-- hlevels are preserved by retracts (and consequently equivalences)
isContrRetract
: ∀ {B : Type ℓ}
→ (f : A → B) (g : B → A)
→ (h : retract f g)
→ (v : isContr B) → isContr A
fst (isContrRetract f g h (b , p)) = g b
snd (isContrRetract f g h (b , p)) x = (cong g (p (f x))) ∙ (h x)
isPropRetract
: {B : Type ℓ}
(f : A → B) (g : B → A)
(h : (x : A) → g (f x) ≡ x)
→ isProp B → isProp A
isPropRetract f g h p x y i =
hcomp
(λ j → λ
{ (i = i0) → h x j
; (i = i1) → h y j})
(g (p (f x) (f y) i))
isSetRetract
: {B : Type ℓ}
(f : A → B) (g : B → A)
(h : (x : A) → g (f x) ≡ x)
→ isSet B → isSet A
isSetRetract f g h set x y p q i j =
hcomp (λ k → λ { (i = i0) → h (p j) k
; (i = i1) → h (q j) k
; (j = i0) → h x k
; (j = i1) → h y k})
(g (set (f x) (f y)
(cong f p) (cong f q) i j))
isGroupoidRetract
: {B : Type ℓ}
(f : A → B) (g : B → A)
(h : (x : A) → g (f x) ≡ x)
→ isGroupoid B → isGroupoid A
isGroupoidRetract f g h grp x y p q P Q i j k =
hcomp ((λ l → λ { (i = i0) → h (P j k) l
; (i = i1) → h (Q j k) l
; (j = i0) → h (p k) l
; (j = i1) → h (q k) l
; (k = i0) → h x l
; (k = i1) → h y l}))
(g (grp (f x) (f y) (cong f p) (cong f q)
(cong (cong f) P) (cong (cong f) Q) i j k))
is2GroupoidRetract
: {B : Type ℓ}
(f : A → B) (g : B → A)
(h : (x : A) → g (f x) ≡ x)
→ is2Groupoid B → is2Groupoid A
is2GroupoidRetract f g h grp x y p q P Q R S i j k l =
hcomp (λ r → λ { (i = i0) → h (R j k l) r
; (i = i1) → h (S j k l) r
; (j = i0) → h (P k l) r
; (j = i1) → h (Q k l) r
; (k = i0) → h (p l) r
; (k = i1) → h (q l) r
; (l = i0) → h x r
; (l = i1) → h y r})
(g (grp (f x) (f y) (cong f p) (cong f q)
(cong (cong f) P) (cong (cong f) Q)
(cong (cong (cong f)) R) (cong (cong (cong f)) S) i j k l))
isOfHLevelRetract
: (n : HLevel) {B : Type ℓ}
(f : A → B) (g : B → A)
(h : (x : A) → g (f x) ≡ x)
→ isOfHLevel n B → isOfHLevel n A
isOfHLevelRetract 0 = isContrRetract
isOfHLevelRetract 1 = isPropRetract
isOfHLevelRetract 2 = isSetRetract
isOfHLevelRetract 3 = isGroupoidRetract
isOfHLevelRetract 4 = is2GroupoidRetract
isOfHLevelRetract (suc (suc (suc (suc (suc n))))) f g h ofLevel x y p q P Q R S =
isOfHLevelRetract (suc n) (cong (cong (cong (cong f))))
(λ s i j k l →
hcomp (λ r → λ { (i = i0) → h (R j k l) r
; (i = i1) → h (S j k l) r
; (j = i0) → h (P k l) r
; (j = i1) → h (Q k l) r
; (k = i0) → h (p l) r
; (k = i1) → h (q l) r
; (l = i0) → h x r
; (l = i1) → h y r})
(g (s i j k l)))
(λ s i j k l m →
hcomp (λ n → λ { (i = i1) → s j k l m
; (j = i0) → h (R k l m) (i ∨ n)
; (j = i1) → h (S k l m) (i ∨ n)
; (k = i0) → h (P l m) (i ∨ n)
; (k = i1) → h (Q l m) (i ∨ n)
; (l = i0) → h (p m) (i ∨ n)
; (l = i1) → h (q m) (i ∨ n)
; (m = i0) → h x (i ∨ n)
; (m = i1) → h y (i ∨ n) })
(h (s j k l m) i))
(ofLevel (f x) (f y)
(cong f p) (cong f q)
(cong (cong f) P) (cong (cong f) Q)
(cong (cong (cong f)) R) (cong (cong (cong f)) S))
isOfHLevelRetractFromIso : {A : Type ℓ} {B : Type ℓ'} (n : HLevel) → Iso A B → isOfHLevel n B → isOfHLevel n A
isOfHLevelRetractFromIso n e hlev = isOfHLevelRetract n (Iso.fun e) (Iso.inv e) (Iso.leftInv e) hlev
isOfHLevelRespectEquiv : {A : Type ℓ} {B : Type ℓ'} → (n : HLevel) → A ≃ B → isOfHLevel n A → isOfHLevel n B
isOfHLevelRespectEquiv n eq = isOfHLevelRetract n (invEq eq) (eq .fst) (retEq eq)
isContrRetractOfConstFun : {A : Type ℓ} {B : Type ℓ'} (b₀ : B)
→ Σ[ f ∈ (B → A) ] ((x : A) → (f ∘ (λ _ → b₀)) x ≡ x)
→ isContr A
fst (isContrRetractOfConstFun b₀ ret) = ret .fst b₀
snd (isContrRetractOfConstFun b₀ ret) y = ret .snd y
-- h-level of Σ-types
isContrΣ : isContr A → ((x : A) → isContr (B x)) → isContr (Σ A B)
isContrΣ {A = A} {B = B} (a , p) q =
let h : (x : A) (y : B x) → (q x) .fst ≡ y
h x y = (q x) .snd y
in (( a , q a .fst)
, ( λ x i → p (x .fst) i
, h (p (x .fst) i) (transp (λ j → B (p (x .fst) (i ∨ ~ j))) i (x .snd)) i))
isContrΣ′ : (ca : isContr A) → isContr (B (fst ca)) → isContr (Σ A B)
isContrΣ′ ca cb = isContrΣ ca (λ x → subst _ (snd ca x) cb)
section-Σ≡Prop
: (pB : (x : A) → isProp (B x)) {u v : Σ A B}
→ section (Σ≡Prop pB {u} {v}) (cong fst)
section-Σ≡Prop {A = A} pB {u} {v} p j i =
(p i .fst) , isProp→PathP (λ i → isOfHLevelPath 1 (pB (fst (p i)))
(Σ≡Prop pB {u} {v} (cong fst p) i .snd)
(p i .snd) )
refl refl i j
isEquiv-Σ≡Prop
: (pB : (x : A) → isProp (B x)) {u v : Σ A B}
→ isEquiv (Σ≡Prop pB {u} {v})
isEquiv-Σ≡Prop {A = A} pB {u} {v} = isoToIsEquiv (iso (Σ≡Prop pB) (cong fst) (section-Σ≡Prop pB) (λ _ → refl))
isPropΣ : isProp A → ((x : A) → isProp (B x)) → isProp (Σ A B)
isPropΣ pA pB t u = Σ≡Prop pB (pA (t .fst) (u .fst))
isOfHLevelΣ : ∀ n → isOfHLevel n A → ((x : A) → isOfHLevel n (B x))
→ isOfHLevel n (Σ A B)
isOfHLevelΣ 0 = isContrΣ
isOfHLevelΣ 1 = isPropΣ
isOfHLevelΣ {B = B} (suc (suc n)) h1 h2 x y =
isOfHLevelRetractFromIso (suc n)
(invIso (IsoΣPathTransportPathΣ _ _))
(isOfHLevelΣ (suc n) (h1 (fst x) (fst y)) λ x → h2 _ _ _)
isSetΣ : isSet A → ((x : A) → isSet (B x)) → isSet (Σ A B)
isSetΣ = isOfHLevelΣ 2
isGroupoidΣ : isGroupoid A → ((x : A) → isGroupoid (B x)) → isGroupoid (Σ A B)
isGroupoidΣ = isOfHLevelΣ 3
is2GroupoidΣ : is2Groupoid A → ((x : A) → is2Groupoid (B x)) → is2Groupoid (Σ A B)
is2GroupoidΣ = isOfHLevelΣ 4
-- h-level of ×
isProp× : {A : Type ℓ} {B : Type ℓ'} → isProp A → isProp B → isProp (A × B)
isProp× pA pB = isPropΣ pA (λ _ → pB)
isProp×2 : {A : Type ℓ} {B : Type ℓ'} {C : Type ℓ''}
→ isProp A → isProp B → isProp C → isProp (A × B × C)
isProp×2 pA pB pC = isProp× pA (isProp× pB pC)
isProp×3 : {A : Type ℓ} {B : Type ℓ'} {C : Type ℓ''} {D : Type ℓ'''}
→ isProp A → isProp B → isProp C → isProp D → isProp (A × B × C × D)
isProp×3 pA pB pC pD = isProp×2 pA pB (isProp× pC pD)
isOfHLevel× : ∀ {A : Type ℓ} {B : Type ℓ'} n → isOfHLevel n A → isOfHLevel n B
→ isOfHLevel n (A × B)
isOfHLevel× n hA hB = isOfHLevelΣ n hA (λ _ → hB)
isSet× : ∀ {A : Type ℓ} {B : Type ℓ'} → isSet A → isSet B → isSet (A × B)
isSet× = isOfHLevel× 2
isGroupoid× : ∀ {A : Type ℓ} {B : Type ℓ'} → isGroupoid A → isGroupoid B
→ isGroupoid (A × B)
isGroupoid× = isOfHLevel× 3
is2Groupoid× : ∀ {A : Type ℓ} {B : Type ℓ'} → is2Groupoid A → is2Groupoid B
→ is2Groupoid (A × B)
is2Groupoid× = isOfHLevel× 4
-- h-level of Π-types
isOfHLevelΠ : ∀ n → ((x : A) → isOfHLevel n (B x))
→ isOfHLevel n ((x : A) → B x)
isOfHLevelΠ 0 h = (λ x → fst (h x)) , λ f i y → snd (h y) (f y) i
isOfHLevelΠ 1 h f g i x = (h x) (f x) (g x) i
isOfHLevelΠ 2 h f g F G i j z = h z (f z) (g z) (funExt⁻ F z) (funExt⁻ G z) i j
isOfHLevelΠ 3 h f g p q P Q i j k z =
h z (f z) (g z)
(funExt⁻ p z) (funExt⁻ q z)
(cong (λ f → funExt⁻ f z) P) (cong (λ f → funExt⁻ f z) Q) i j k
isOfHLevelΠ 4 h f g p q P Q R S i j k l z =
h z (f z) (g z)
(funExt⁻ p z) (funExt⁻ q z)
(cong (λ f → funExt⁻ f z) P) (cong (λ f → funExt⁻ f z) Q)
(cong (cong (λ f → funExt⁻ f z)) R) (cong (cong (λ f → funExt⁻ f z)) S) i j k l
isOfHLevelΠ (suc (suc (suc (suc (suc n))))) h f g p q P Q R S =
isOfHLevelRetract (suc n)
(cong (cong (cong funExt⁻))) (cong (cong (cong funExt))) (λ _ → refl)
(isOfHLevelΠ (suc (suc (suc (suc n)))) (λ x → h x (f x) (g x))
(funExt⁻ p) (funExt⁻ q)
(cong funExt⁻ P) (cong funExt⁻ Q)
(cong (cong funExt⁻) R) (cong (cong funExt⁻) S))
isPropΠ : (h : (x : A) → isProp (B x)) → isProp ((x : A) → B x)
isPropΠ = isOfHLevelΠ 1
isPropΠ2 : (h : (x : A) (y : B x) → isProp (C x y))
→ isProp ((x : A) (y : B x) → C x y)
isPropΠ2 h = isPropΠ λ x → isPropΠ λ y → h x y
isPropΠ3 : (h : (x : A) (y : B x) (z : C x y) → isProp (D x y z))
→ isProp ((x : A) (y : B x) (z : C x y) → D x y z)
isPropΠ3 h = isPropΠ λ x → isPropΠ λ y → isPropΠ λ z → h x y z
isPropΠ4 : (h : (x : A) (y : B x) (z : C x y) (w : D x y z) → isProp (E x y z w))
→ isProp ((x : A) (y : B x) (z : C x y) (w : D x y z) → E x y z w)
isPropΠ4 h = isPropΠ λ _ → isPropΠ3 λ _ → h _ _
isPropImplicitΠ : (h : (x : A) → isProp (B x)) → isProp ({x : A} → B x)
isPropImplicitΠ h f g i {x} = h x (f {x}) (g {x}) i
isProp→ : {A : Type ℓ} {B : Type ℓ'} → isProp B → isProp (A → B)
isProp→ pB = isPropΠ λ _ → pB
isSetΠ : ((x : A) → isSet (B x)) → isSet ((x : A) → B x)
isSetΠ = isOfHLevelΠ 2
isSetΠ2 : (h : (x : A) (y : B x) → isSet (C x y))
→ isSet ((x : A) (y : B x) → C x y)
isSetΠ2 h = isSetΠ λ x → isSetΠ λ y → h x y
isSetΠ3 : (h : (x : A) (y : B x) (z : C x y) → isSet (D x y z))
→ isSet ((x : A) (y : B x) (z : C x y) → D x y z)
isSetΠ3 h = isSetΠ λ x → isSetΠ λ y → isSetΠ λ z → h x y z
isGroupoidΠ : ((x : A) → isGroupoid (B x)) → isGroupoid ((x : A) → B x)
isGroupoidΠ = isOfHLevelΠ 3
isGroupoidΠ2 : (h : (x : A) (y : B x) → isGroupoid (C x y)) → isGroupoid ((x : A) (y : B x) → C x y)
isGroupoidΠ2 h = isGroupoidΠ λ _ → isGroupoidΠ λ _ → h _ _
isGroupoidΠ3 : (h : (x : A) (y : B x) (z : C x y) → isGroupoid (D x y z))
→ isGroupoid ((x : A) (y : B x) (z : C x y) → D x y z)
isGroupoidΠ3 h = isGroupoidΠ λ _ → isGroupoidΠ2 λ _ → h _ _
isGroupoidΠ4 : (h : (x : A) (y : B x) (z : C x y) (w : D x y z) → isGroupoid (E x y z w))
→ isGroupoid ((x : A) (y : B x) (z : C x y) (w : D x y z) → E x y z w)
isGroupoidΠ4 h = isGroupoidΠ λ _ → isGroupoidΠ3 λ _ → h _ _
is2GroupoidΠ : ((x : A) → is2Groupoid (B x)) → is2Groupoid ((x : A) → B x)
is2GroupoidΠ = isOfHLevelΠ 4
isOfHLevelΠ⁻ : ∀ {A : Type ℓ} {B : Type ℓ'} n
→ isOfHLevel n (A → B) → (A → isOfHLevel n B)
isOfHLevelΠ⁻ 0 h x = fst h x , λ y → funExt⁻ (snd h (const y)) x
isOfHLevelΠ⁻ 1 h x y z = funExt⁻ (h (const y) (const z)) x
isOfHLevelΠ⁻ (suc (suc n)) h x y z =
isOfHLevelΠ⁻ (suc n) (subst (isOfHLevel (suc n)) (sym funExtPath) (h (const y) (const z))) x
-- h-level of A ≃ B and A ≡ B
isOfHLevel≃
: ∀ n {A : Type ℓ} {B : Type ℓ'}
→ (hA : isOfHLevel n A) (hB : isOfHLevel n B) → isOfHLevel n (A ≃ B)
isOfHLevel≃ zero {A = A} {B = B} hA hB = isContr→Equiv hA hB , contr
where
contr : (y : A ≃ B) → isContr→Equiv hA hB ≡ y
contr y = Σ≡Prop isPropIsEquiv (funExt (λ a → snd hB (fst y a)))
isOfHLevel≃ (suc n) {A = A} {B = B} hA hB =
isOfHLevelΣ (suc n) (isOfHLevelΠ _ λ _ → hB)
λ a → isOfHLevelPlus' 1 (isPropIsEquiv a)
isOfHLevel≡ : ∀ n → {A B : Type ℓ} (hA : isOfHLevel n A) (hB : isOfHLevel n B) →
isOfHLevel n (A ≡ B)
isOfHLevel≡ n hA hB = isOfHLevelRetract n (fst univalence) ua (secEq univalence) (isOfHLevel≃ n hA hB)
isOfHLevel⁺≃ₗ
: ∀ n {A : Type ℓ} {B : Type ℓ'}
→ isOfHLevel (suc n) A → isOfHLevel (suc n) (A ≃ B)
isOfHLevel⁺≃ₗ zero pA e = isOfHLevel≃ 1 pA (isOfHLevelRespectEquiv 1 e pA) e
isOfHLevel⁺≃ₗ (suc n) hA e = isOfHLevel≃ m hA (isOfHLevelRespectEquiv m e hA) e
where
m = suc (suc n)
isOfHLevel⁺≃ᵣ
: ∀ n {A : Type ℓ} {B : Type ℓ'}
→ isOfHLevel (suc n) B → isOfHLevel (suc n) (A ≃ B)
isOfHLevel⁺≃ᵣ zero pB e
= isOfHLevel≃ 1 (isPropRetract (e .fst) (invEq e) (secEq e) pB) pB e
isOfHLevel⁺≃ᵣ (suc n) hB e
= isOfHLevel≃ m (isOfHLevelRetract m (e .fst) (invEq e) (secEq e) hB) hB e
where
m = suc (suc n)
isOfHLevel⁺≡ₗ
: ∀ n → {A B : Type ℓ}
→ isOfHLevel (suc n) A → isOfHLevel (suc n) (A ≡ B)
isOfHLevel⁺≡ₗ zero pA P = isOfHLevel≡ 1 pA (subst isProp P pA) P
isOfHLevel⁺≡ₗ (suc n) hA P
= isOfHLevel≡ m hA (subst (isOfHLevel m) P hA) P
where
m = suc (suc n)
isOfHLevel⁺≡ᵣ
: ∀ n → {A B : Type ℓ}
→ isOfHLevel (suc n) B → isOfHLevel (suc n) (A ≡ B)
isOfHLevel⁺≡ᵣ zero pB P = isOfHLevel≡ 1 (subst⁻ isProp P pB) pB P
isOfHLevel⁺≡ᵣ (suc n) hB P
= isOfHLevel≡ m (subst⁻ (isOfHLevel m) P hB) hB P
where
m = suc (suc n)
-- h-level of TypeOfHLevel
isPropHContr : isProp (TypeOfHLevel ℓ 0)
isPropHContr x y = Σ≡Prop (λ _ → isPropIsContr) (isOfHLevel≡ 0 (x .snd) (y .snd) .fst)
isOfHLevelTypeOfHLevel : ∀ n → isOfHLevel (suc n) (TypeOfHLevel ℓ n)
isOfHLevelTypeOfHLevel zero = isPropHContr
isOfHLevelTypeOfHLevel (suc n) (X , a) (Y , b) =
isOfHLevelRetract (suc n) (cong fst) (Σ≡Prop λ _ → isPropIsOfHLevel (suc n))
(section-Σ≡Prop λ _ → isPropIsOfHLevel (suc n))
(isOfHLevel≡ (suc n) a b)
isSetHProp : isSet (hProp ℓ)
isSetHProp = isOfHLevelTypeOfHLevel 1
-- h-level of lifted type
isOfHLevelLift : ∀ {ℓ ℓ'} (n : HLevel) {A : Type ℓ} → isOfHLevel n A → isOfHLevel n (Lift {j = ℓ'} A)
isOfHLevelLift n = isOfHLevelRetract n lower lift λ _ → refl
----------------------------
-- More consequences of isProp and isContr
inhProp→isContr : A → isProp A → isContr A
inhProp→isContr x h = x , h x
extend : isContr A → (∀ φ → (u : Partial φ A) → Sub A φ u)
extend (x , p) φ u = inS (hcomp (λ { j (φ = i1) → p (u 1=1) j }) x)
isContrPartial→isContr : ∀ {ℓ} {A : Type ℓ}
→ (extend : ∀ φ → Partial φ A → A)
→ (∀ u → u ≡ (extend i1 λ { _ → u}))
→ isContr A
isContrPartial→isContr {A = A} extend law
= ex , λ y → law ex ∙ (λ i → Aux.v y i) ∙ sym (law y)
where ex = extend i0 empty
module Aux (y : A) (i : I) where
φ = ~ i ∨ i
u : Partial φ A
u = λ { (i = i0) → ex ; (i = i1) → y }
v = extend φ u
-- Dependent h-level over a type
isOfHLevelDep : HLevel → {A : Type ℓ} (B : A → Type ℓ') → Type (ℓ-max ℓ ℓ')
isOfHLevelDep 0 {A = A} B = {a : A} → Σ[ b ∈ B a ] ({a' : A} (b' : B a') (p : a ≡ a') → PathP (λ i → B (p i)) b b')
isOfHLevelDep 1 {A = A} B = {a0 a1 : A} (b0 : B a0) (b1 : B a1) (p : a0 ≡ a1) → PathP (λ i → B (p i)) b0 b1
isOfHLevelDep (suc (suc n)) {A = A} B = {a0 a1 : A} (b0 : B a0) (b1 : B a1) → isOfHLevelDep (suc n) {A = a0 ≡ a1} (λ p → PathP (λ i → B (p i)) b0 b1)
isOfHLevel→isOfHLevelDep : (n : HLevel)
→ {A : Type ℓ} {B : A → Type ℓ'} (h : (a : A) → isOfHLevel n (B a)) → isOfHLevelDep n {A = A} B
isOfHLevel→isOfHLevelDep 0 h {a} =
(h a .fst , λ b' p → isProp→PathP (λ i → isContr→isProp (h (p i))) (h a .fst) b')
isOfHLevel→isOfHLevelDep 1 h = λ b0 b1 p → isProp→PathP (λ i → h (p i)) b0 b1
isOfHLevel→isOfHLevelDep (suc (suc n)) {A = A} {B} h {a0} {a1} b0 b1 =
isOfHLevel→isOfHLevelDep (suc n) (λ p → helper a1 p b1)
where
helper : (a1 : A) (p : a0 ≡ a1) (b1 : B a1) →
isOfHLevel (suc n) (PathP (λ i → B (p i)) b0 b1)
helper a1 p b1 = J (λ a1 p → ∀ b1 → isOfHLevel (suc n) (PathP (λ i → B (p i)) b0 b1))
(λ _ → h _ _ _) p b1
isContrDep→isPropDep : isOfHLevelDep 0 B → isOfHLevelDep 1 B
isContrDep→isPropDep {B = B} Bctr {a0 = a0} b0 b1 p i
= comp (λ k → B (p (i ∧ k))) (λ k → λ where
(i = i0) → Bctr .snd b0 refl k
(i = i1) → Bctr .snd b1 p k)
(c0 .fst)
where
c0 = Bctr {a0}
isPropDep→isSetDep : isOfHLevelDep 1 B → isOfHLevelDep 2 B
isPropDep→isSetDep {B = B} Bprp b0 b1 b2 b3 p i j
= comp (λ k → B (p (i ∧ k) (j ∧ k))) (λ k → λ where
(j = i0) → Bprp b0 b0 refl k
(i = i0) → Bprp b0 (b2 j) (λ k → p i0 (j ∧ k)) k
(i = i1) → Bprp b0 (b3 j) (λ k → p k (j ∧ k)) k
(j = i1) → Bprp b0 b1 (λ k → p (i ∧ k) (j ∧ k)) k)
b0
isOfHLevelDepSuc : (n : HLevel) → isOfHLevelDep n B → isOfHLevelDep (suc n) B
isOfHLevelDepSuc 0 = isContrDep→isPropDep
isOfHLevelDepSuc 1 = isPropDep→isSetDep
isOfHLevelDepSuc (suc (suc n)) Blvl b0 b1 = isOfHLevelDepSuc (suc n) (Blvl b0 b1)
isPropDep→isSetDep'
: isOfHLevelDep 1 B
→ {p : w ≡ x} {q : y ≡ z} {r : w ≡ y} {s : x ≡ z}
→ {tw : B w} {tx : B x} {ty : B y} {tz : B z}
→ (sq : Square p q r s)
→ (tp : PathP (λ i → B (p i)) tw tx)
→ (tq : PathP (λ i → B (q i)) ty tz)
→ (tr : PathP (λ i → B (r i)) tw ty)
→ (ts : PathP (λ i → B (s i)) tx tz)
→ SquareP (λ i j → B (sq i j)) tp tq tr ts
isPropDep→isSetDep' {B = B} Bprp {p} {q} {r} {s} {tw} sq tp tq tr ts i j
= comp (λ k → B (sq (i ∧ k) (j ∧ k))) (λ k → λ where
(i = i0) → Bprp tw (tp j) (λ k → p (k ∧ j)) k
(i = i1) → Bprp tw (tq j) (λ k → sq (i ∧ k) (j ∧ k)) k
(j = i0) → Bprp tw (tr i) (λ k → r (k ∧ i)) k
(j = i1) → Bprp tw (ts i) (λ k → sq (k ∧ i) (j ∧ k)) k)
tw
isOfHLevelΣ' : ∀ n → isOfHLevel n A → isOfHLevelDep n B → isOfHLevel n (Σ A B)
isOfHLevelΣ' 0 Actr Bctr .fst = (Actr .fst , Bctr .fst)
isOfHLevelΣ' 0 Actr Bctr .snd (x , y) i
= Actr .snd x i , Bctr .snd y (Actr .snd x) i
isOfHLevelΣ' 1 Alvl Blvl (w , y) (x , z) i .fst = Alvl w x i
isOfHLevelΣ' 1 Alvl Blvl (w , y) (x , z) i .snd = Blvl y z (Alvl w x) i
isOfHLevelΣ' {A = A} {B = B} (suc (suc n)) Alvl Blvl (w , y) (x , z)
= isOfHLevelRetract (suc n)
(λ p → (λ i → p i .fst) , λ i → p i .snd)
ΣPathP
(λ x → refl)
(isOfHLevelΣ' (suc n) (Alvl w x) (Blvl y z))
|
lemma monoseq_imp_convergent_iff_Bseq: "monoseq f \<Longrightarrow> convergent f \<longleftrightarrow> Bseq f" for f :: "nat \<Rightarrow> real"
|
```python
%matplotlib inline
```
Sequence Models and Long-Short Term Memory Networks
===================================================
At this point, we have seen various feed-forward networks. That is,
there is no state maintained by the network at all. This might not be
the behavior we want. Sequence models are central to NLP: they are
models where there is some sort of dependence through time between your
inputs. The classical example of a sequence model is the Hidden Markov
Model for part-of-speech tagging. Another example is the conditional
random field.
A recurrent neural network is a network that maintains some kind of
state. For example, its output could be used as part of the next input,
so that information can propogate along as the network passes over the
sequence. In the case of an LSTM, for each element in the sequence,
there is a corresponding *hidden state* $h_t$, which in principle
can contain information from arbitrary points earlier in the sequence.
We can use the hidden state to predict words in a language model,
part-of-speech tags, and a myriad of other things.
LSTM's in Pytorch
~~~~~~~~~~~~~~~~~
Before getting to the example, note a few things. Pytorch's LSTM expects
all of its inputs to be 3D tensors. The semantics of the axes of these
tensors is important. The first axis is the sequence itself, the second
indexes instances in the mini-batch, and the third indexes elements of
the input. We haven't discussed mini-batching, so lets just ignore that
and assume we will always have just 1 dimension on the second axis. If
we want to run the sequence model over the sentence "The cow jumped",
our input should look like
\begin{align}\begin{bmatrix}
\overbrace{q_\text{The}}^\text{row vector} \\
q_\text{cow} \\
q_\text{jumped}
\end{bmatrix}\end{align}
Except remember there is an additional 2nd dimension with size 1.
In addition, you could go through the sequence one at a time, in which
case the 1st axis will have size 1 also.
Let's see a quick example.
```python
# Author: Robert Guthrie
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
torch.manual_seed(1)
```
<torch._C.Generator at 0x2b624c779b0>
```python
lstm = nn.LSTM(3, 3) # Input dim is 3, output dim is 3
inputs = [torch.randn(1, 3) for _ in range(5)] # make a sequence of length 5
# initialize the hidden state.
init_hidden = (torch.randn(1, 1, 3), torch.randn(1, 1, 3))
hidden = init_hidden
for i in inputs:
# Step through the sequence one element at a time.
# after each step, hidden contains the hidden state.
out, hidden = lstm(i.view(1, 1, -1), hidden)
print("out: ", out)
print("hidden: ", hidden)
# alternatively, we can do the entire sequence all at once.
# the first value returned by LSTM is all of the hidden states throughout
# the sequence. the second is just the most recent hidden state
# (compare the last slice of "out" with "hidden" below, they are the same)
# The reason for this is that:
# "out" will give you access to all hidden states in the sequence
# "hidden" will allow you to continue the sequence and backpropagate,
# by passing it as an argument to the lstm at a later time
# Add the extra 2nd dimension
inputs = torch.cat(inputs).view(len(inputs), 1, -1)
# hidden = (torch.randn(1, 1, 3), torch.randn(1, 1, 3)) # clean out hidden state
hidden = init_hidden
out, hidden = lstm(inputs, hidden)
print("batch out: ", out)
print("final hidden: ", hidden)
```
out: tensor([[[-0.0270, 0.1753, 0.3409]]], grad_fn=<CatBackward>)
hidden: (tensor([[[-0.0270, 0.1753, 0.3409]]], grad_fn=<ViewBackward>), tensor([[[-0.0525, 0.6791, 0.5669]]], grad_fn=<ViewBackward>))
out: tensor([[[ 0.1600, -0.0196, 0.3136]]], grad_fn=<CatBackward>)
hidden: (tensor([[[ 0.1600, -0.0196, 0.3136]]], grad_fn=<ViewBackward>), tensor([[[ 0.3229, -0.0641, 0.4017]]], grad_fn=<ViewBackward>))
out: tensor([[[ 0.0526, 0.0861, -0.1031]]], grad_fn=<CatBackward>)
hidden: (tensor([[[ 0.0526, 0.0861, -0.1031]]], grad_fn=<ViewBackward>), tensor([[[ 0.1281, 0.2225, -0.2335]]], grad_fn=<ViewBackward>))
out: tensor([[[0.0883, 0.1529, 0.0473]]], grad_fn=<CatBackward>)
hidden: (tensor([[[0.0883, 0.1529, 0.0473]]], grad_fn=<ViewBackward>), tensor([[[0.1546, 0.2758, 0.0848]]], grad_fn=<ViewBackward>))
out: tensor([[[-0.0075, -0.0359, 0.0541]]], grad_fn=<CatBackward>)
hidden: (tensor([[[-0.0075, -0.0359, 0.0541]]], grad_fn=<ViewBackward>), tensor([[[-0.0090, -0.2157, 0.1439]]], grad_fn=<ViewBackward>))
batch out: tensor([[[-0.0270, 0.1753, 0.3409]],
[[ 0.1600, -0.0196, 0.3136]],
[[ 0.0526, 0.0861, -0.1031]],
[[ 0.0883, 0.1529, 0.0473]],
[[-0.0075, -0.0359, 0.0541]]], grad_fn=<CatBackward>)
final hidden: (tensor([[[-0.0075, -0.0359, 0.0541]]], grad_fn=<ViewBackward>), tensor([[[-0.0090, -0.2157, 0.1439]]], grad_fn=<ViewBackward>))
Example: An LSTM for Part-of-Speech Tagging
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
In this section, we will use an LSTM to get part of speech tags. We will
not use Viterbi or Forward-Backward or anything like that, but as a
(challenging) exercise to the reader, think about how Viterbi could be
used after you have seen what is going on.
The model is as follows: let our input sentence be
$w_1, \dots, w_M$, where $w_i \in V$, our vocab. Also, let
$T$ be our tag set, and $y_i$ the tag of word $w_i$.
Denote our prediction of the tag of word $w_i$ by
$\hat{y}_i$.
This is a structure prediction, model, where our output is a sequence
$\hat{y}_1, \dots, \hat{y}_M$, where $\hat{y}_i \in T$.
To do the prediction, pass an LSTM over the sentence. Denote the hidden
state at timestep $i$ as $h_i$. Also, assign each tag a
unique index (like how we had word\_to\_ix in the word embeddings
section). Then our prediction rule for $\hat{y}_i$ is
\begin{align}\hat{y}_i = \text{argmax}_j \ (\log \text{Softmax}(Ah_i + b))_j\end{align}
That is, take the log softmax of the affine map of the hidden state,
and the predicted tag is the tag that has the maximum value in this
vector. Note this implies immediately that the dimensionality of the
target space of $A$ is $|T|$.
Prepare data:
```python
def prepare_sequence(seq, to_ix):
idxs = [to_ix[w] for w in seq]
return torch.tensor(idxs, dtype=torch.long)
training_data = [
("The dog ate the apple".split(), ["DET", "NN", "V", "DET", "NN"]),
("Everybody read that book".split(), ["NN", "V", "DET", "NN"])
]
word_to_ix = {}
char_to_ix = {}
for sent, tags in training_data:
for word in sent:
if word not in word_to_ix:
word_to_ix[word] = len(word_to_ix)
for char in word:
if char not in char_to_ix:
char_to_ix[char] = len(char_to_ix)
print("word to ix: ", word_to_ix)
print("char to ix: ", char_to_ix)
tag_to_ix = {"DET": 0, "NN": 1, "V": 2}
print("tag to ix: ", tag_to_ix)
# These will usually be more like 32 or 64 dimensional.
# We will keep them small, so we can see how the weights change as we train.
WORD_EMBEDDING_DIM = 6
FULL_HIDDEN_DIM = 9 # hidden vector state for the full word + its characters
CHAR_EMBEDDING_DIM = 3
CHAR_HIDDEN_DIM = 3
```
word to ix: {'The': 0, 'dog': 1, 'ate': 2, 'the': 3, 'apple': 4, 'Everybody': 5, 'read': 6, 'that': 7, 'book': 8}
char to ix: {'T': 0, 'h': 1, 'e': 2, 'd': 3, 'o': 4, 'g': 5, 'a': 6, 't': 7, 'p': 8, 'l': 9, 'E': 10, 'v': 11, 'r': 12, 'y': 13, 'b': 14, 'k': 15}
tag to ix: {'DET': 0, 'NN': 1, 'V': 2}
Create the model:
```python
class LSTMTagger(nn.Module):
def __init__(self, word_embedding_dim, full_hidden_dim, vocab_size,
tagset_size,
char_embedding_dim, char_hidden_dim, char_dict_size,
batch_size=1, num_layers=1):
super(LSTMTagger, self).__init__()
self.full_hidden_dim = full_hidden_dim
self.batch_size = batch_size
self.num_layers = num_layers
self.embedding_dim = word_embedding_dim
self.char_embedding_dim = char_embedding_dim
self.char_hidden_dim = char_hidden_dim
self.char_dict_size = char_dict_size
self.char_embeddings = nn.Embedding(char_dict_size, char_embedding_dim)
self.char_lstm = nn.LSTM(input_size=char_embedding_dim,
hidden_size=char_hidden_dim,
num_layers=num_layers)
self.char_hidden = self.init_char_hidden()
self.word_embeddings = nn.Embedding(vocab_size, word_embedding_dim)
# The LSTM takes word embeddings as inputs, and outputs hidden states
# with dimensionality hidden_dim.
self.lstm = nn.LSTM(input_size=word_embedding_dim + char_embedding_dim,
hidden_size=full_hidden_dim, num_layers=num_layers)
# The linear layer that maps from cell state space to tag space
self.hidden2tag = nn.Linear(full_hidden_dim, tagset_size)
self.full_hidden = self.init_full_hidden()
def init_full_hidden(self):
# Before we've done anything, we don't have any hidden state.
# Refer to the Pytorch documentation to see exactly
# why they have this dimensionality.
# The axes semantics are (num_layers, minibatch_size, hidden_dim)
hidden_state = torch.zeros(self.num_layers, self.batch_size,
self.full_hidden_dim)
cell_state = torch.zeros(self.num_layers, self.batch_size,
self.full_hidden_dim)
return hidden_state, cell_state
def init_char_hidden(self):
hidden_state = torch.zeros(self.num_layers, self.batch_size,
self.char_hidden_dim)
cell_state = torch.zeros(self.num_layers, self.batch_size,
self.char_hidden_dim)
return hidden_state, cell_state
def forward(self, sentence):
char_embeds = []
for word_ix in sentence:
char_ix = prepare_sequence(wor)
char_embed = self.char_embeddings(word)
self.char_hidden = init_char_hidden()
char_out, self.char_hidden = self.char_lstm(char_embed.view(
len(word), self.batch_size, -1))
char_embeds.append(char_out)
word_embeds = self.word_embeddings(sentence)
full_embeds = []
for i, char_embed in enumerate(char_embeds):
full_embed = torch.cat((word_embeds[i], char_embed))
full_embeds.append(full_embed)
full_embeds = torch.tensor(full_embeds)
lstm_out, self.hidden = self.lstm(
full_embeds.view(len(sentence), self.batch_size, -1), self.hidden)
tag_space = self.hidden2tag(lstm_out.view(len(sentence), -1))
tag_scores = F.log_softmax(tag_space, dim=1)
return tag_scores
```
Train the model:
```python
model = LSTMTagger(word_embedding_dim=WORD_EMBEDDING_DIM, full_hidden_dim=FULL_HIDDEN_DIM,
vocab_size=len(word_to_ix), tagset_size=len(tag_to_ix),
char_embedding_dim=CHAR_EMBEDDING_DIM, char_hidden_dim=CHAR_HIDDEN_DIM,
char_dict_size=len(char_to_ix))
loss_function = nn.NLLLoss()
optimizer = optim.SGD(model.parameters(), lr=0.1)
# See what the scores are before training
# Note that element i,j of the output is the score for tag j for word i.
# Here we don't need to train, so the code is wrapped in torch.no_grad()
with torch.no_grad():
inputs = prepare_sequence(training_data[0][0], word_to_ix)
tag_scores = model(inputs)
print(tag_scores)
for epoch in range(
300): # again, normally you would NOT do 300 epochs, it is toy data
for sentence, tags in training_data:
# Step 1. Remember that Pytorch accumulates gradients.
# We need to clear them out before each instance
model.zero_grad()
# Also, we need to clear out the hidden state of the LSTM,
# detaching it from its history on the last instance.
model.hidden = model.init_full_hidden()
# Step 2. Get our inputs ready for the network, that is, turn them into
# Tensors of word indices.
sentence_in = prepare_sequence(sentence, word_to_ix)
targets = prepare_sequence(tags, tag_to_ix)
# Step 3. Run our forward pass.
tag_scores = model(sentence_in)
# Step 4. Compute the loss, gradients, and update the parameters by
# calling optimizer.step()
loss = loss_function(tag_scores, targets)
loss.backward()
optimizer.step()
# See what the scores are after training
with torch.no_grad():
inputs = prepare_sequence(training_data[0][0], word_to_ix)
tag_scores = model(inputs)
# The sentence is "the dog ate the apple". i,j corresponds to score for tag j
# for word i. The predicted tag is the maximum scoring tag.
# Here, we can see the predicted sequence below is 0 1 2 0 1
# since 0 is index of the maximum value of row 1,
# 1 is the index of maximum value of row 2, etc.
# Which is DET NOUN VERB DET NOUN, the correct sequence!
print(tag_scores)
```
tensor([[-1.2231, -0.8153, -1.3349],
[-1.3205, -0.7126, -1.4162],
[-1.3797, -0.6769, -1.4264],
[-1.3987, -0.6565, -1.4505],
[-1.3729, -0.6791, -1.4289]])
tensor([[-0.0433, -3.4213, -4.6308],
[-5.0335, -0.0169, -4.5826],
[-3.4704, -3.5979, -0.0603],
[-0.1952, -3.1356, -2.0112],
[-4.6059, -0.0257, -4.1740]])
Exercise: Augmenting the LSTM part-of-speech tagger with character-level features
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
In the example above, each word had an embedding, which served as the
inputs to our sequence model. Let's augment the word embeddings with a
representation derived from the characters of the word. We expect that
this should help significantly, since character-level information like
affixes have a large bearing on part-of-speech. For example, words with
the affix *-ly* are almost always tagged as adverbs in English.
To do this, let $c_w$ be the character-level representation of
word $w$. Let $x_w$ be the word embedding as before. Then
the input to our sequence model is the concatenation of $x_w$ and
$c_w$. So if $x_w$ has dimension 5, and $c_w$
dimension 3, then our LSTM should accept an input of dimension 8.
To get the character level representation, do an LSTM over the
characters of a word, and let $c_w$ be the final hidden state of
this LSTM. Hints:
* There are going to be two LSTM's in your new model.
The original one that outputs POS tag scores, and the new one that
outputs a character-level representation of each word.
* To do a sequence model over characters, you will have to embed characters.
The character embeddings will be the input to the character LSTM.
|
/*
Copyright 2020 php42
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
#include "diffgen.h"
#include "ziparchive.h"
#include "legacy.h"
#include "../common/filesystem.h"
#include "../common/textconvert.h"
#include "../common/console.h"
#include "../common/endian.h"
#include "../common/thread_pool.h"
#include <boost/algorithm/string.hpp>
#include <libtpdp.h>
#include <iostream>
#include <tuple>
#include <thread>
#include <future>
#include <regex>
#include <unordered_map>
#include <algorithm>
namespace algo = boost::algorithm;
namespace fs = std::filesystem;
typedef std::pair<Path, Path> DiffPair;
typedef std::tuple<int, Path, Path> DiffTuple;
typedef std::vector<DiffTuple> DiffVec;
typedef std::unordered_map<int, std::vector<DiffPair>> DiffMap;
std::mutex g_mtx;
static std::wostream& operator<<(std::wostream& os, const fs::path& p)
{
os << p.wstring();
return os;
}
/* async task for diff generation */
static void diff_task(Path path, Path rel_path, const libtpdp::Archive& arc, int arc_num, DiffVec& out)
{
try
{
auto it = arc.find(utf_to_sjis(rel_path.wstring()));
if(it >= arc.end())
{
{
ScopedConsoleColorMT color(COLOR_OK);
std::wcout << L"Adding new file: " << rel_path << std::endl;
}
std::lock_guard lock(g_mtx);
out.push_back({ arc_num, std::move(rel_path), std::move(path) });
return;
}
auto src_file = arc.get_file(it);
if(!src_file)
throw DiffgenException("Error extracting file from archive.");
std::size_t sz;
auto dst_file = read_file(path.wstring(), sz);
if(!dst_file)
throw DiffgenException("Failed to read file.");
if((src_file.size() == sz) && (memcmp(src_file.data(), dst_file.get(), sz) == 0))
return;
std::lock_guard lock(g_mtx);
out.push_back({ arc_num, std::move(rel_path), std::move(path) });
}
catch(const std::exception& ex)
{
ScopedConsoleColorMT color(COLOR_CRITICAL);
std::wcerr << L"Error processing file: " << path << std::endl;
std::wcerr << utf_widen(ex.what()) << std::endl;
}
}
/* async task for file extraction */
static void extract_task(const libtpdp::Archive& arc, const libtpdp::Archive::iterator& it, const Path& path)
{
try
{
auto file = arc.get_file(it);
if(!file)
{
ScopedConsoleColorMT color(COLOR_CRITICAL);
std::wcerr << L"Error extracting file: " << path << std::endl;
return;
}
auto dir = path.parent_path();
try
{
fs::create_directories(dir);
}
catch(const fs::filesystem_error& ex)
{
ScopedConsoleColorMT color(COLOR_CRITICAL);
std::wcerr << L"Failed to create directory: " << dir << std::endl;
std::wcerr << utf_widen(ex.what()) << std::endl;
return;
}
if(!write_file(path.wstring(), file.data(), file.size()))
{
ScopedConsoleColorMT color(COLOR_WARN);
std::wcerr << L"Failed to write to file: " << path << std::endl;
}
}
catch(const std::exception& ex)
{
ScopedConsoleColorMT color(COLOR_CRITICAL);
std::wcerr << L"Error extracting file: " << path << std::endl;
std::wcerr << utf_widen(ex.what()) << std::endl;
}
}
static DiffVec get_diffs(const Path& in_dir, const Path& output, int threads)
{
DiffVec diffs;
libtpdp::Archive arc;
ThreadPool pool(threads);
bool suppress_json_warning = false;
for(int i = 1; i < 7; ++i)
{
auto arc_name = (L"gn_dat" + std::to_wstring(i) + L".arc");
auto arc_path = in_dir / arc_name;
auto out_dir = output / arc_name;
if(!fs::exists(arc_path) || !fs::is_regular_file(arc_path))
{
std::wcerr << L"File not found: " << arc_path << std::endl;
continue;
}
if(!fs::exists(out_dir) || !fs::is_directory(out_dir))
{
std::wcerr << L"Missing directory: " << out_dir << std::endl;
continue;
}
try
{
std::wcout << L">> " << arc_path << std::endl;
arc.open(arc_path.wstring());
}
catch(const libtpdp::ArcError&)
{
ScopedConsoleColorMT color(COLOR_CRITICAL);
std::wcerr << L"Failed to open file: " << arc_path << std::endl;
throw;
}
for(auto& entry : fs::recursive_directory_iterator(out_dir))
{
if(!entry.is_regular_file())
continue;
auto path = entry.path();
auto rel = path.lexically_relative(out_dir);
if(rel.empty())
continue;
if(algo::iequals(entry.path().extension().wstring(), L".json"))
{
if(!suppress_json_warning)
{
ScopedConsoleColorMT color(COLOR_WARN);
std::wcerr << L"Skipping json files..." << std::endl;
suppress_json_warning = true;
}
continue;
}
pool.queue_task([=, &arc, &diffs]() mutable { diff_task(std::move(path), std::move(rel), arc, i, diffs); });
}
pool.wait();
}
return diffs;
}
bool diff(const Path& input, const Path& output, const Path& diff_path, int threads)
{
std::wcout << L"Using up to " << threads << L" concurrent threads" << std::endl;
Path in_dir(input / L"dat");
auto diffs = get_diffs(in_dir, output, threads);
if(diffs.empty())
{
ScopedConsoleColorChanger color(COLOR_WARN);
std::wcerr << L"Source and target are identical, no diffs to output!" << std::endl;
return false;
}
ZipArchive zip;
try
{
zip.open(diff_path, ZIP_CREATE | ZIP_TRUNCATE);
for(auto& i : diffs)
{
auto&[arc_num, rel_path, data_path] = i;
auto arc_name = "gn_dat" + std::to_string(arc_num) + ".arc";
auto path = utf_narrow((arc_name / rel_path).wstring());
[[maybe_unused]] auto entry = zip.add_file(path, data_path);
}
zip.close();
}
catch(const std::exception& ex)
{
ScopedConsoleColorMT color(COLOR_CRITICAL);
std::wcerr << L"Error writing to file: " << diff_path << std::endl;
std::wcerr << utf_widen(ex.what()) << std::endl;
return false;
}
return true;
}
bool extract(const Path& input, const Path& output, int threads)
{
std::wcout << L"Using up to " << threads << L" concurrent threads" << std::endl;
ThreadPool pool(threads);
libtpdp::Archive arc;
Path in_dir(input / L"dat");
for(int i = 1; i < 7; ++i)
{
auto arc_name = (L"gn_dat" + std::to_wstring(i) + L".arc");
auto arc_path = in_dir / arc_name;
if(!fs::exists(arc_path) || !fs::is_regular_file(arc_path))
{
std::wcerr << L"File not found: " << arc_path << std::endl;
continue;
}
try
{
std::wcout << L"Extracting: " << arc_path << std::endl;
arc.open(arc_path.wstring());
}
catch(const libtpdp::ArcError& ex)
{
ScopedConsoleColorChanger color(COLOR_CRITICAL);
std::wcerr << L"Failed to open file: " << arc_path << std::endl;
std::wcerr << utf_widen(ex.what()) << std::endl;
return false;
}
Path out_dir(output / arc_name);
for(auto it = arc.begin(); it != arc.end(); ++it)
{
if(arc.is_dir(it) || it->data_size == 0)
continue;
auto fp = sjis_to_utf(arc.get_path(it));
if(fp.empty())
{
ScopedConsoleColorChanger color(COLOR_WARN);
std::wcerr << L"Skipping unknown object at index: " << it.index() << std::endl;
continue;
}
auto out_path = out_dir / fp;
pool.queue_task([=, &arc]() { extract_task(arc, it, out_path); });
}
pool.wait();
}
return true;
}
bool patch(const Path& input, const Path& output)
{
ZipArchive zip;
std::unordered_map<int, std::vector<std::pair<ZipEntry,std::string>>> patches;
Path in_dir = input / L"dat";
try
{
try
{
zip.open(output);
}
catch(const ZipWrongFmt&)
{
return legacy_patch(input, output);
}
auto files = zip.get_file_table();
std::regex regex("^gn_dat(\\d)\\.arc(?:\\\\|/)+(.+)", std::regex::icase);
for(auto& entry : files)
{
std::smatch matches;
if(std::regex_search(entry.name, matches, regex))
{
auto num = std::stol(matches.str(1));
auto relpath = matches.str(2);
if(num >= 1 && num <= 6)
patches[num].emplace_back(entry, std::move(relpath));
}
}
if(patches.empty())
throw DiffgenException("No patches found.");
std::size_t count = 0;
for(auto& it : patches)
{
auto arcname = L"gn_dat" + std::to_wstring(it.first) + L".arc";
auto arcpath = in_dir / arcname;
if(!fs::exists(arcpath) || !fs::is_regular_file(arcpath))
{
ScopedConsoleColorChanger color(COLOR_CRITICAL);
std::wcerr << L"Missing file: " << arcpath << std::endl;
continue;
}
try
{
libtpdp::Archive arc;
arc.open(arcpath.wstring());
for(auto& i : it.second)
{
auto file = zip.get_file(i.first);
auto pos = arc.find(utf8_to_sjis(i.second));
if(pos < arc.end())
{
pos = arc.repack_file(pos, file.data(), file.size());
if(pos >= arc.end())
throw DiffgenException("Failed to repack file: " + i.second);
}
else
{
pos = arc.insert(file.data(), file.size(), utf8_to_sjis(i.second));
if(pos >= arc.end())
throw DiffgenException("Failed to insert file: " + i.second);
}
++count;
}
arc.save(arcpath.wstring());
}
catch(const libtpdp::ArcError& ex)
{
ScopedConsoleColorChanger color(COLOR_CRITICAL);
std::wcerr << L"Error writing to file: " << arcpath << std::endl;
std::wcerr << utf_widen(ex.what()) << std::endl;
return false;
}
}
std::wcout << L"Patched " << count << L" files." << std::endl;
if(count == 0)
{
ScopedConsoleColorChanger color(COLOR_CRITICAL);
std::wcerr << L"Could not apply any patches." << std::endl;
return false;
}
}
catch(const ZipError& ex)
{
ScopedConsoleColorChanger color(COLOR_CRITICAL);
std::wcerr << L"Error reading from file: " << output << std::endl;
std::wcerr << utf_widen(ex.what()) << std::endl;
return false;
}
return true;
}
bool repack(const Path& input, const Path& output, int threads)
{
std::unordered_map<int, std::vector<DiffPair>> patches;
std::wcout << L"Using up to " << threads << L" concurrent threads" << std::endl;
Path in_dir(input / L"dat");
{
auto diffs = get_diffs(in_dir, output, threads);
if(diffs.empty())
throw DiffgenException("Source and target are identical, no files to repack!");
for(auto& i : diffs)
{
auto&[arc_num, rel_path, data_path] = i;
patches[arc_num].push_back({ std::move(rel_path), std::move(data_path) });
}
}
Path arc_path;
try
{
for(const auto& i : patches)
{
auto arc_num = i.first;
arc_path = in_dir / (L"gn_dat" + std::to_wstring(arc_num) + L".arc");
libtpdp::Archive arc;
arc.open(arc_path.wstring());
for(const auto& j : i.second)
{
const auto&[rel_path, data_path] = j;
std::size_t sz;
auto file = read_file(data_path.wstring(), sz);
if(!file)
throw DiffgenException("Failed to read file: " + utf_narrow(data_path.wstring()));
auto filename = utf_to_sjis(rel_path.wstring());
auto it = arc.find(filename);
if(it >= arc.end())
{
auto pos = arc.insert(file.get(), sz, filename);
if(pos >= arc.end())
throw DiffgenException("Failed to insert file: " + utf_narrow(data_path.wstring()));
}
else
{
auto pos = arc.repack_file(it, file.get(), sz);
if(pos >= arc.end())
throw DiffgenException("Failed to repack file: " + utf_narrow(data_path.wstring()));
}
}
arc.save(arc_path.wstring());
}
}
catch(const libtpdp::ArcError& ex)
{
ScopedConsoleColorChanger color(COLOR_CRITICAL);
std::wcerr << L"Error writing to file: " << arc_path << std::endl;
std::wcerr << utf_widen(ex.what()) << std::endl;
return false;
}
return true;
}
|
module Text.WebIDL.Parser
import Data.SOP
import Data.List
import Data.List.Elem
import Data.List1
import Text.Lexer
import Text.Parser
import Text.WebIDL.Types
import Text.WebIDL.Lexer
--------------------------------------------------------------------------------
-- Utilities
--------------------------------------------------------------------------------
public export
IdlGrammarAny : (b : Bool) -> Type -> Type
IdlGrammarAny b t = Grammar () IdlToken b t
public export
IdlGrammar : Type -> Type
IdlGrammar = IdlGrammarAny True
public export
IdlGrammar' : Type -> Type
IdlGrammar' = IdlGrammarAny False
tok : String -> (IdlToken -> Maybe a) -> IdlGrammar a
tok s f = terminal s f
withKey : String -> (String -> Maybe a) -> IdlGrammar a
withKey s f = tok s $ \case (Key $ MkKeyword s _) => f s
_ => Nothing
intLit : IdlGrammar IntLit
intLit = tok "Int Lit" $ \case ILit n => Just n
_ => Nothing
stringLit : IdlGrammar StringLit
stringLit = tok "String Lit" $ \case SLit s => Just s
_ => Nothing
floatLit : IdlGrammar FloatLit
floatLit = tok "Float Lit" $ \case FLit v => Just v
_ => Nothing
--------------------------------------------------------------------------------
-- Symbols
--------------------------------------------------------------------------------
symbol : Char -> IdlGrammar ()
symbol c = tok ("Symbol " ++ show c) $ \case Other (Symb v) => guard (c == v)
_ => Nothing
comma : IdlGrammar ()
comma = symbol ','
ellipsis : IdlGrammar ()
ellipsis = tok "Ellipsis" $ \case Other Ellipsis => Just ()
_ => Nothing
inParens : {b : _} -> Inf (IdlGrammarAny b a) -> IdlGrammar a
inParens g = symbol '(' *> g <* symbol ')'
inBrackets : {b : _} -> Inf (IdlGrammarAny b a) -> IdlGrammar a
inBrackets g = symbol '[' *> g <* symbol ']'
inBraces : {b : _} -> Inf (IdlGrammarAny b a) -> IdlGrammar a
inBraces g = symbol '{' *> g <* symbol '}'
inAngles : {b : _} -> Inf (IdlGrammarAny b a) -> IdlGrammar a
inAngles g = symbol '<' *> g <* symbol '>'
inAnyParens : {b : _} -> Inf (IdlGrammarAny b a) -> IdlGrammar a
inAnyParens g = inParens g <|> inBrackets g <|> inBraces g
sepList1 : Char -> IdlGrammar a -> IdlGrammar (List1 a)
sepList1 c g = [| (g <* symbol c) ::: sepBy (symbol c) g |]
<|> map (\x => x ::: Nil) g
--------------------------------------------------------------------------------
-- Identifiers
--------------------------------------------------------------------------------
export
key : String -> IdlGrammar ()
key s = tok s $ \case Key (MkKeyword i _) => guard (i == s)
_ => Nothing
export
ident : IdlGrammar Identifier
ident = tok "identifier" $ \case Ident i => Just i
_ => Nothing
export
keyword : IdlGrammar Keyword
keyword = tok "keyword" $ \case Key i => Just i
_ => Nothing
||| IdentifierList :: identifier Identifiers
||| Identifiers :: , identifier Identifiers ε
export
identifierList : IdlGrammar IdentifierList
identifierList = [| ident ::: many (comma *> ident) |]
--------------------------------------------------------------------------------
-- Extended Attributes
--------------------------------------------------------------------------------
symbolUnless : String -> (Char -> Bool) -> IdlGrammar Symbol
symbolUnless s f = tok s $ \case Other s => fromSym s
_ => Nothing
where fromSym : Symbol -> Maybe Symbol
fromSym Ellipsis = Just Ellipsis
fromSym (Symb c) = if f c then Nothing else Just (Symb c)
otherSym : IdlGrammar Symbol -> IdlGrammar Other
otherSym sym = choice {t = List} [ map (\v => inject v) intLit
, map (\v => inject v) stringLit
, map (\v => inject v) floatLit
, map (\v => inject v) ident
, map (\v => inject v) sym
, map (\v => inject v) keyword
]
export
other : IdlGrammar Other
other = otherSym $ symbolUnless "other" isCommaOrParenOrQuote
export
otherOrComma : IdlGrammar Other
otherOrComma = otherSym $ symbolUnless "otherOrComma" isParenOrQuote
export
eaInner : IdlGrammar' EAInner
eaInner = [| EAIParens (inAnyParens eaInner) eaInner |]
<|> [| EAIOther otherOrComma eaInner |]
<|> pure EAIEmpty
export
extAttribute : IdlGrammar ExtAttribute
extAttribute = [| EAParens (inAnyParens eaInner) (optional extAttribute) |]
<|> [| EAOther other (optional extAttribute) |]
export
extAttrs1 : IdlGrammar ExtAttributeList
extAttrs1 = forget <$> inBrackets (sepBy1 comma extAttribute)
export
attributes : IdlGrammar' ExtAttributeList
attributes = extAttrs1 <|> pure Nil
export
attributed : IdlGrammar a -> IdlGrammar (Attributed a)
attributed g = [| (,) extAttrs1 g |] <|> map (Nil,) g
--------------------------------------------------------------------------------
-- Types
--------------------------------------------------------------------------------
bufferRelated : IdlGrammar BufferRelatedType
bufferRelated = withKey "BufferRelated" $
\case "ArrayBuffer" => Just ArrayBuffer
"DataView" => Just DataView
"Int8Array" => Just Int8Array
"Int16Array" => Just Int16Array
"Int32Array" => Just Int32Array
"Uint8Array" => Just Uint8Array
"Uint16Array" => Just Uint16Array
"Uint32Array" => Just Uint32Array
"Uint8ClampedArray" => Just Uint8ClampedArray
"Float32Array" => Just Float32Array
"Float64Array" => Just Float64Array
_ => Nothing
stringType : IdlGrammar StringType
stringType = withKey "stringType" $
\case "ByteString" => Just ByteString
"DOMString" => Just DOMString
"USVString" => Just USVString
_ => Nothing
export
primitive : IdlGrammar PrimitiveType
primitive = key "unsigned" *> map Unsigned int
<|> key "unrestricted" *> map Unrestricted float
<|> map Signed int
<|> map Restricted float
<|> withKey "Primitive" (\case "boolean" => Just Boolean
"byte" => Just Byte
"octet" => Just Octet
"bigint" => Just BigInt
"undefined" => Just Undefined
_ => Nothing)
where int : IdlGrammar IntType
int = (key "long" *> key "long" $> LongLong)
<|> (key "long" $> Long)
<|> (key "short" $> Short)
float : IdlGrammar FloatType
float = withKey "FloatType" $ \case "double" => Just Dbl
"float" => Just Float
_ => Nothing
constType : IdlGrammar ConstType
constType = map CP primitive <|> map CI ident
nullable : IdlGrammar a -> IdlGrammar (Nullable a)
nullable g = map MaybeNull (g <* symbol '?') <|> map NotNull g
mutual
-- Type ::
-- SingleType
-- UnionType Null
--
-- SingleType ::
-- DistinguishableType
-- any
-- PromiseType
-- PromiseType ::
-- Promise < Type >
export
idlType : IdlGrammar IdlType
idlType = (key "any" $> Any)
<|> map Promise (key "Promise" *> inAngles idlType)
<|> map D distinguishableType
<|> (nullable flatUnion >>= map U . fromFlatUnion)
where um : Attributed (Nullable Distinguishable) -> UnionMemberType
um (a, MaybeNull x) = MkUnionMember a x
um (a, NotNull x) = MkUnionMember a x
fromFlatUnion : Nullable (List1 $ Attributed $ Nullable Distinguishable)
-> IdlGrammar' (Nullable UnionType)
fromFlatUnion (MaybeNull $ a ::: b :: t) =
pure . MaybeNull $ UT (um a) (um b) (map um t)
fromFlatUnion (NotNull $ a ::: b :: t) =
if any (isNullable . snd) (a::b::t)
then pure . MaybeNull $ UT (um a) (um b) (map um t)
else pure . NotNull $ UT (um a) (um b) (map um t)
fromFlatUnion _ = fail "no enough union members"
-- TypeWithExtendedAttributes ::
-- ExtendedAttributeList Type
attrTpe : IdlGrammar (Attributed IdlType)
attrTpe = attributed idlType
-- RecordType ::
-- record < StringType , TypeWithExtendedAttributes >
recrd : IdlGrammar Distinguishable
recrd = Record <$> (key "record" *> symbol '<' *> stringType)
<*> (comma *> attributes)
<*> (idlType <* symbol '>')
-- DistinguishableType ::
-- PrimitiveType Null
-- StringType Null
-- identifier Null
-- sequence < TypeWithExtendedAttributes > Null
-- object Null
-- symbol Null
-- BufferRelatedType Null
-- FrozenArray < TypeWithExtendedAttributes > Null
-- ObservableArray < TypeWithExtendedAttributes > Null
-- RecordType Null
distinguishable : IdlGrammar Distinguishable
distinguishable =
map P primitive
<|> map S stringType
<|> map B bufferRelated
<|> (key "object" $> Object)
<|> (key "symbol" $> Symbol)
<|> (key "sequence" *> inAngles [| Sequence attributes idlType |])
<|> (key "FrozenArray" *> inAngles [| FrozenArray attributes idlType |])
<|> (key "ObservableArray" *> inAngles [| ObservableArray attributes idlType |])
<|> recrd
<|> map I ident
distinguishableType : IdlGrammar (Nullable Distinguishable)
distinguishableType = nullable distinguishable
-- UnionType ::
-- ( UnionMemberType or UnionMemberType UnionMemberTypes )
--
-- UnionMemberTypes ::
-- or UnionMemberType UnionMemberTypes
-- ε
flatUnion : IdlGrammar (List1 $ Attributed $ Nullable Distinguishable)
flatUnion = inParens $ do (a :: b :: t) <- sepBy (key "or") flatMember
| _ => fail "Non enough Union members"
pure (join $ a ::: b :: t)
-- UnionMemberType ::
-- ExtendedAttributeList DistinguishableType
-- UnionType Null
flatMember : IdlGrammar (List1 $ Attributed $ Nullable Distinguishable)
flatMember = map singleton (attributed distinguishableType) <|> flatUnion
optionalType : IdlGrammar' OptionalType
optionalType = optional (symbol ',' *> attributed idlType)
--------------------------------------------------------------------------------
-- Arguments
--------------------------------------------------------------------------------
boolLit : IdlGrammar Bool
boolLit = (key "false" $> False) <|> (key "true" $> True)
constValue : IdlGrammar ConstValue
constValue = map B boolLit <|> map F floatLit <|> map I intLit
defaultV : IdlGrammar' Default
defaultV = (symbol '=' *> (
(symbol '[' *> symbol ']' $> EmptyList)
<|> (symbol '{' *> symbol '}' $> EmptySet)
<|> (key "null" $> Null)
<|> map S stringLit
<|> map C constValue
)) <|> pure None
argName : IdlGrammar ArgumentName
argName = withKey "ArgumentNameKeyword"
(map (MkArgName . value) . ArgumentNameKeyword.refine)
<|> map (MkArgName . value) ident
arg : IdlGrammar Arg
arg = [| MkArg attributes idlType argName |]
vararg : IdlGrammar Arg
vararg = [| MkArg attributes (idlType <* ellipsis) argName |]
optArg : IdlGrammar OptArg
optArg = [| MkOptArg attributes
(key "optional" *> attributes)
idlType
argName
defaultV |]
argumentList : IdlGrammar' ArgumentList
argumentList = [| VarArg args vararg |]
<|> [| NoVarArg (args1 <* comma) (sepBy comma optArg) |]
<|> [| NoVarArg args1 (pure Nil) |]
<|> [| NoVarArg (pure Nil) (sepBy comma optArg) |]
where args1 : IdlGrammar (List Arg)
args1 = forget <$> sepBy1 comma arg
args : IdlGrammar' (List Arg)
args = (args1 <* comma) <|> pure Nil
optArgList : IdlGrammar' ArgumentList
optArgList = inParens argumentList <|> pure (NoVarArg Nil Nil)
--------------------------------------------------------------------------------
-- Member
--------------------------------------------------------------------------------
member : List String -> IdlGrammar a -> IdlGrammar a
member [] g = g <* symbol ';'
member (h :: t) g = key h *> member t g
export
const : IdlGrammar Const
const = member ["const"]
[| MkConst constType ident (symbol '=' *> constValue) |]
special : IdlGrammar Special
special = (key "getter" $> Getter)
<|> (key "setter" $> Setter)
<|> (key "deleter" $> Deleter)
opName : IdlGrammar OperationName
opName = (key "includes" $> MkOpName "includes")
<|> map (\(MkIdent s) => MkOpName s) ident
regularOperation : IdlGrammar RegularOperation
regularOperation =
member []
[| MkOp (pure ()) idlType (optional opName) (inParens argumentList) |]
specialOperation : IdlGrammar SpecialOperation
specialOperation =
member []
[| MkOp special idlType (optional opName) (inParens argumentList) |]
export
operation : IdlGrammar Operation
operation = map specToOp specialOperation <|> map regToOp regularOperation
callbackInterfaceMember : IdlGrammar CallbackInterfaceMember
callbackInterfaceMember = map (\v => inject v) const
<|> map (\v => inject v) regularOperation
dictMember : IdlGrammar DictionaryMemberRest
dictMember = member ["required"] [| Required attributes idlType ident |]
<|> member [] [| Optional idlType ident defaultV |]
inheritance : IdlGrammar' Inheritance
inheritance = optional (symbol ':' *> ident)
attributeName : IdlGrammar AttributeName
attributeName = withKey "AttributeNameKeyword"
(map (MkAttributeName . value) . AttributeNameKeyword.refine)
<|> map (MkAttributeName . value) ident
readonly : IdlGrammar a -> IdlGrammar (Readonly a)
readonly g = key "readonly" *> map MkRO g
inherit : IdlGrammar a -> IdlGrammar (Inherit a)
inherit g = key "inherit" *> map MkI g
attribute : IdlGrammar Attribute
attribute = member ["attribute"]
[| MkAttribute attributes idlType attributeName |]
stringifier : IdlGrammar Stringifier
stringifier = key "stringifier" *> (
map (\v => inject v) attribute
<|> map (\v => inject v) (readonly attribute)
<|> map (\v => inject v) regularOperation
<|> map (\v => inject v) (symbol ';')
)
static : IdlGrammar StaticMember
static = key "static" *> (
map (\v => inject v) attribute
<|> map (\v => inject v) (readonly attribute)
<|> map (\v => inject v) regularOperation
)
maplike : IdlGrammar Maplike
maplike = member ["maplike"] $ inAngles [| MkMaplike (attributed idlType)
(symbol ',' *> attributed idlType) |]
setlike : IdlGrammar Setlike
setlike = member ["setlike"] $ inAngles [| MkSetlike (attributed idlType) |]
namespaceMember : IdlGrammar NamespaceMember
namespaceMember = map (\v => inject v) regularOperation
<|> map (\v => inject v) (readonly attribute)
constructor_ : IdlGrammar Constructor
constructor_ =
member ["constructor"] (map MkConstructor $ inParens argumentList)
partialInterfaceMember : IdlGrammar PartialInterfaceMember
partialInterfaceMember =
map IConst const
<|> map IOp operation
<|> map IAttr attribute
<|> map IAttrRO (readonly attribute)
<|> map IAttrInh (inherit attribute)
<|> map IMap maplike
<|> map IMapRO (readonly maplike)
<|> map ISet setlike
<|> map ISetRO (readonly setlike)
<|> map IStr stringifier
<|> map IStatic static
<|> member ["iterable"] (inAngles [| IIterable (attributed idlType)
optionalType |])
<|> member ["async","iterable"] (
do p <- inAngles [| (,) (attributed idlType) optionalType |]
as <- optArgList
pure (IAsync (fst p) (snd p) as))
mixinMember : IdlGrammar MixinMember
mixinMember = map MConst const
<|> map MOp regularOperation
<|> map MAttr attribute
<|> map MAttrRO (readonly attribute)
<|> map MStr stringifier
export
interfaceMember : IdlGrammar InterfaceMember
interfaceMember = map (\v => inject v) constructor_
<|> map (\v => inject v) partialInterfaceMember
members : IdlGrammar a -> IdlGrammar (List $ Attributed a)
members g = inBraces (many $ attributed g)
--------------------------------------------------------------------------------
-- Definition
--------------------------------------------------------------------------------
def : (ss : List String)
-> {auto 0 prf : NonEmpty ss}
-> (IdlGrammar ExtAttributeList -> IdlGrammar a)
-> IdlGrammar a
def (s :: ss) g = g (run ss (attributes <* key s)) <* symbol ';'
where run : List String -> IdlGrammar x -> IdlGrammar x
run [] y = y
run (x :: xs) y = run xs (y <* key x)
def0 : (IdlGrammar' ExtAttributeList -> IdlGrammar a) -> IdlGrammar a
def0 g = g attributes <* symbol ';'
-- optional trailing comma
enumLits : IdlGrammar (List1 StringLit)
enumLits = sepList1 ',' stringLit <* (symbol ',' <|> pure ())
callback : IdlGrammar Callback
callback =
def ["callback"] $ \as =>
[| MkCallback as ident (symbol '=' *> idlType) (inParens argumentList) |]
callbackInterface : IdlGrammar CallbackInterface
callbackInterface =
def ["callback","interface"] $ \as =>
[| MkCallbackInterface as ident (members callbackInterfaceMember) |]
dictionary : IdlGrammar Dictionary
dictionary =
def ["dictionary"] $ \as =>
[| MkDictionary as ident inheritance (members dictMember) |]
enum : IdlGrammar Enum
enum = def ["enum"] $ \as => [| MkEnum as ident (inBraces enumLits) |]
iface : IdlGrammar Interface
iface =
def ["interface"] $ \as =>
[| MkInterface as ident inheritance (members interfaceMember) |]
includes : IdlGrammar Includes
includes =
def0 $ \as => [| MkIncludes as ident (key "includes" *> ident) |]
mixin : IdlGrammar Mixin
mixin = def ["interface","mixin"] $ \as =>
[| MkMixin as ident (members mixinMember) |]
nspace : IdlGrammar Namespace
nspace = def ["namespace"] $ \as =>
[| MkNamespace as ident (members namespaceMember) |]
pdictionary : IdlGrammar PDictionary
pdictionary = def ["partial","dictionary"] $ \as =>
[| MkPDictionary as ident (members dictMember) |]
pnamespace : IdlGrammar PNamespace
pnamespace = def ["partial","namespace"] $ \as =>
[| MkPNamespace as ident (members namespaceMember) |]
pmixin : IdlGrammar PMixin
pmixin = def ["partial","interface","mixin"] $ \as =>
[| MkPMixin as ident (members mixinMember) |]
pinterface : IdlGrammar PInterface
pinterface =
def ["partial","interface"] $ \as =>
[| MkPInterface as ident (members partialInterfaceMember) |]
typedef : IdlGrammar Typedef
typedef = def ["typedef"] $ \as =>
[| MkTypedef as attributes idlType ident |]
export
definition : IdlGrammar Definition
definition =
map (\v => inject v) callbackInterface
<|> map (\v => inject v) callback
<|> map (\v => inject v) dictionary
<|> map (\v => inject v) enum
<|> map (\v => inject v) iface
<|> map (\v => inject v) includes
<|> map (\v => inject v) mixin
<|> map (\v => inject v) nspace
<|> map (\v => inject v) typedef
export
part : IdlGrammar Part
part = map (\v => inject v) pdictionary
<|> map (\v => inject v) pinterface
<|> map (\v => inject v) pmixin
<|> map (\v => inject v) pnamespace
export
partsAndDefs : IdlGrammar PartsAndDefs
partsAndDefs = accumNs . forget <$> some partOrDef
where partOrDef : IdlGrammar PartOrDef
partOrDef = map Z part
<|> map (S . Z) definition
--------------------------------------------------------------------------------
-- Parsing WebIDL
--------------------------------------------------------------------------------
toParseErr : ParsingError IdlToken -> Err
toParseErr (Error x Nothing) = ParseErr x
toParseErr (Error x $ Just $ MkBounds startLine startCol _ _) =
ParseErrAt x startLine startCol
export
parseIdl : IdlGrammar a -> String -> Either Err a
parseIdl g s = do ts <- mapFst LexErr (lexIdlNoNoise s)
(res,Nil) <- mapFst (toParseErr . head) (parse g ts)
| (_,b :: _) => Left (NoEOI b)
pure res
|
lemma decseq_imp_monoseq: "decseq X \<Longrightarrow> monoseq X"
|
In October 1944 , he opened his six @-@ month archaeological field school in Taxila , where he instructed various students from across India in the methodologies of the discipline . Wheeler became very fond of his students , with one of them , B. B. Lal , later commenting that " behind the gruff exterior , Sir Mortimer had a very kind and sympathetic heart " . Throughout his period in India , his students were some of the only individuals to whom Wheeler warmed ; more widely , he was annoyed by what he saw as the idleness , incompetence and corruption of Indian society . Initially focusing on the northwest of the subcontinent , Wheeler was particularly fascinated by the Bronze Age Indus Valley Civilization . On his initial inspection of the Indus Valley sites of Mohenjo @-@ daro and Harappa , he organised a very brief excavation which revealed fortifications around both settlements . He later led a more detailed excavation at Harappa , where he exposed further fortifications and established a stratigraphy for the settlement .
|
function c=melcepst(s,fs,w,nc,p,n,inc,fl,fh)
%MELCEPST Calculate the mel cepstrum of a signal C=(S,FS,W,NC,P,N,INC,FL,FH)
%
%
% Simple use: c=melcepst(s,fs) % calculate mel cepstrum with 12 coefs, 256 sample frames
% c=melcepst(s,fs,'e0dD') % include log energy, 0th cepstral coef, delta and delta-delta coefs
%
% Inputs:
% s speech signal
% fs sample rate in Hz (default 11025)
% nc number of cepstral coefficients excluding 0'th coefficient (default 12)
% n length of frame (default power of 2 <30 ms))
% p number of filters in filterbank (default floor(3*log(fs)) )
% inc frame increment (default n/2)
% fl low end of the lowest filter as a fraction of fs (default = 0)
% fh high end of highest filter as a fraction of fs (default = 0.5)
%
% w any sensible combination of the following:
%
% 'R' rectangular window in time domain
% 'N' Hanning window in time domain
% 'M' Hamming window in time domain (default)
%
% 't' triangular shaped filters in mel domain (default)
% 'n' hanning shaped filters in mel domain
% 'm' hamming shaped filters in mel domain
%
% 'p' filters act in the power domain
% 'a' filters act in the absolute magnitude domain (default)
%
% '0' include 0'th order cepstral coefficient
% 'e' include log energy
% 'd' include delta coefficients (dc/dt)
% 'D' include delta-delta coefficients (d^2c/dt^2)
%
% 'z' highest and lowest filters taper down to zero (default)
% 'y' lowest filter remains at 1 down to 0 frequency and
% highest filter remains at 1 up to nyquist freqency
%
% If 'ty' or 'ny' is specified, the total power in the fft is preserved.
%
% Outputs: c mel cepstrum output: one frame per row
%
% Copyright (C) Mike Brookes 1997
%
% Last modified Thu Jun 15 09:14:48 2000
%
% VOICEBOX is a MATLAB toolbox for speech processing. Home page is at
% http://www.ee.ic.ac.uk/hp/staff/dmb/voicebox/voicebox.html
%
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% This program is free software; you can redistribute it and/or modify
% it under the terms of the GNU General Public License as published by
% the Free Software Foundation; either version 2 of the License, or
% (at your option) any later version.
%
% This program is distributed in the hope that it will be useful,
% but WITHOUT ANY WARRANTY; without even the implied warranty of
% MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
% GNU General Public License for more details.
%
% You can obtain a copy of the GNU General Public License from
% ftp://prep.ai.mit.edu/pub/gnu/COPYING-2.0 or by writing to
% Free Software Foundation, Inc.,675 Mass Ave, Cambridge, MA 02139, USA.
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
if nargin<2 fs=11025; end
if nargin<3 w='M'; end
if nargin<4 nc=12; end
if nargin<5 p=floor(3*log(fs)); end
if nargin<6 n=pow2(floor(log2(0.03*fs))); end
if nargin<9
fh=0.5;
if nargin<8
fl=0;
if nargin<7
inc=floor(n/2);
end
end
end
if any(w=='R')
z=enframe(s,n,inc);
elseif any (w=='N')
z=enframe(s,hanning(n),inc);
else
z=enframe(s,hamming(n),inc);
end
f=rfft(z.');
[m,a,b]=melbankm(p,n,fs,fl,fh,w);
pw=f(a:b,:).*conj(f(a:b,:));
pth=max(pw(:))*1E-6;
if any(w=='p')
y=log(max(m*pw,pth));
else
ath=sqrt(pth);
y=log(max(m*abs(f(a:b,:)),ath));
end
c=rdct(y).';
nf=size(c,1);
nc=nc+1;
if p>nc
c(:,nc+1:end)=[];
elseif p<nc
c=[c zeros(nf,nc-p)];
end
if ~any(w=='0')
c(:,1)=[];
end
if any(w=='e')
c=[log(sum(pw)).' c];
end
% calculate derivative
if any(w=='D')
vf=(4:-1:-4)/60;
af=(1:-1:-1)/2;
ww=ones(5,1);
cx=[c(ww,:); c; c(nf*ww,:)];
vx=reshape(filter(vf,1,cx(:)),nf+10,nc);
vx(1:8,:)=[];
ax=reshape(filter(af,1,vx(:)),nf+2,nc);
ax(1:2,:)=[];
vx([1 nf+2],:)=[];
if any(w=='d')
c=[c vx ax];
else
c=[c ax];
end
elseif any(w=='d')
vf=(4:-1:-4)/60;
ww=ones(4,1);
cx=[c(ww,:); c; c(nf*ww,:)];
vx=reshape(filter(vf,1,cx(:)),nf+8,nc);
vx(1:8,:)=[];
c=[c vx];
end
if nargout<1
[nf,nc]=size(c);
t=((0:nf-1)*inc+(n-1)/2)/fs;
ci=(1:nc)-any(w=='0')-any(w=='e');
imh = imagesc(t,ci,c.');
axis('xy');
xlabel('Time (s)');
ylabel('Mel-cepstrum coefficient');
map = (0:63)'/63;
colormap([map map map]);
colorbar;
end
|
\section{Stream Processing}
|
S = randSeisData(3, s=1.0, nx=1024)
@test mkchans(1, S) == [1]
@test mkchans(1:2, S) == [1, 2]
@test mkchans([2,3], S) == [2, 3]
S.x[2] = Float64[]
@test mkchans([2,3], S) == [3]
S.t[3] = Array{Int64, 2}(undef, 0, 2)
@test mkchans(1:3, S, f=:t) == [1,2]
@test mkchans(1:3, S) == [1,3]
S.t[3] = [1 S.t[1][1,2]; length(S.x[3]) 0]
push!(S, randSeisChannel(c=true))
@test mkchans(1:4, S, keepirr=false) == [1, 3]
@test mkchans([2,3,4], S, keepirr=false) == [3]
S = randSeisData(3, s=1.0, nx=1024)
@test get_seis_channels(S, chans=3) == [3]
@test get_seis_channels(S, chans=1:2) == [1,2]
@test get_seis_channels(S, chans=[1,3]) == [1,3]
cr = [1,2,3]
c0 = deepcopy(cr)
filt_seis_chans!(cr, S)
@test cr == c0
S.id[1] = "...0"
filt_seis_chans!(cr, S)
@test cr != c0
@test cr == [2,3]
printstyled("channel_match\n", color=:light_green)
C = SeisChannel()
D = SeisChannel()
@test channel_match(C, D)
C = randSeisChannel(s=true)
D = deepcopy(C)
C.gain = D.gain*0.5
@test channel_match(C, D) == false
@test channel_match(C, D, use_gain = false) == true
C.gain = D.gain
C.fs = D.fs*0.5
@test channel_match(C, D) == false
printstyled("cmatch_p!\n", color=:light_green)
C = randSeisChannel(s=true)
D = deepcopy(C)
C0 = deepcopy(C)
# Scenarios that must work:
# C set, D unset
D.fs = 0.0
D.gain = 1.0
D.loc = GeoLoc()
D.resp = PZResp()
D.units = ""
m = cmatch_p!(C,D)
@test m == true
@test channel_match(C, D) == true
# D set, C unset
C.fs = 0.0
C.gain = 1.0
C.loc = GeoLoc()
C.resp = PZResp()
C.units = ""
m = cmatch_p!(C,D)
@test m == true
@test channel_match(C, D) == true
# Values must preserve those in C0
@test channel_match(C, C0) == true
# Scenarios that must fail
C = randSeisChannel(s=true)
C.loc = GeoLoc(lat = 48.79911, lon=-122.54064, el=45.1104)
C.resp = PZResp(a0 = 223.43015f0, f0 = 2.0f0, p = ComplexF32.([-8.89+8.89im, 8.89-8.89im]))
C0 = deepcopy(C)
D = deepcopy(C0)
while D.units == C.units
D.units = randstring()
end
@test cmatch_p!(C,D) == false
@test C == C0
D = deepcopy(C0)
D.fs = 2.0*C.fs
D0 = deepcopy(D)
@test cmatch_p!(C,D) == false
@test C == C0
@test D == D0
D = deepcopy(C0)
D.gain = 2.0*C.gain
D0 = deepcopy(D)
@test cmatch_p!(C,D) == false
@test C == C0
@test D == D0
D = deepcopy(C0)
D.loc.lat = 89.1
D0 = deepcopy(D)
@test cmatch_p!(C,D) == false
@test C == C0
@test D == D0
D = deepcopy(C0)
D.resp.f0 = 1.0f0
D0 = deepcopy(D)
@test cmatch_p!(C,D) == false
@test C == C0
@test D == D0
|
```python
%matplotlib notebook
import numpy as np
from mpl_toolkits.mplot3d import Axes3D
# Axes3D import adds feature, it enables using projection='3d' in add_subplot
import matplotlib.pyplot as plt
import random
```
```python
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import axes3d
import matplotlib.cm as cm
from IPython.display import display, Math, clear_output
import sympy
from sympy import *
from sympy.physics.vector import ReferenceFrame, CoordinateSym
from sympy.vector import CoordSys3D, divergence, curl
import ipyvolume as ipv
import time
from ipywidgets import Output, interact
import ipywidgets as widgets
np.seterr(divide='ignore', invalid='ignore')
init_printing()
```
## Earnshaw's Theorem: 8 pts along a cube, position a charge in the center. Is it stable?
```python
# create a X,Y grid
x = np.linspace(0, 100, 300)
y = np.linspace(0, 100, 300)
X, Y = np.meshgrid(x, y)
```
```python
# calculate the electric potential at a x,y,z point due to 8 point charges
# at the vertices of a cube (100x100x100)
def V(x,y,z):
v = 0
for crds in [(0,0,0), (0,0,100), (0, 100, 0), (100,0,0), (100,100,0), (100,0,100), (0,100,100), (100,100,100)]:
v += 1/np.sqrt( (x-crds[0])**2 + (y-crds[1])**2 + (z-crds[2])**2 )
return v
```
```python
# set z fixed in the center of the cube, get potential versus x,y
v_res = V(X,Y,50)
```
```python
from mpl_toolkits import mplot3d
fig = plt.figure()
ax = plt.axes(projection='3d')
ax.plot_surface(X, Y, v_res, rstride=1, cstride=1,
cmap='viridis', edgecolor='none')
```
<IPython.core.display.Javascript object>
<mpl_toolkits.mplot3d.art3d.Poly3DCollection at 0xb27c29438>
|
//---------------------------------------------------------------------------//
//!
//! \file Utility_GaussLegendreQuadratureSet.hpp
//! \author Luke Kersting
//! \brief Gauss - Legendre quadrature set declaration
//!
//---------------------------------------------------------------------------//
#ifndef UTILITY_GAUSS_LEGENDRE_QUADRATURE_SET_HPP
#define UTILITY_GAUSS_LEGENDRE_QUADRATURE_SET_HPP
// Boost Includes
#include <boost/multiprecision/cpp_dec_float.hpp>
// Trilinos Includes
#include <Teuchos_Array.hpp>
#include <Teuchos_TwoDArray.hpp>
namespace Utility{
typedef boost::multiprecision::cpp_dec_float_50 long_float;
//! Return the Gauss moments of the legendre expansion of a function, f(x)
void getGaussMoments( const Teuchos::Array<long_float>& legendre_expansion_moments,
Teuchos::Array<long_float>& gauss_moments );
//! Return the coefficients of the legendre expansion of x^n
void getLegendrePowerExpansionCoefficients(
Teuchos::TwoDArray<long_float>& coefficients,
const int power = 0 );
} // end Utility namespace
#endif // end UTILITY_GAUSS_LEGENDRE_QUADRATURE_SET_HPP
//---------------------------------------------------------------------------//
// end Utility_GaussLegendreQuadratureSet.hpp
//---------------------------------------------------------------------------//
|
/-
Copyright (c) 2022 Yaël Dillies, Bhavik Mehta. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Yaël Dillies, Bhavik Mehta
-/
import data.set.equitable
import order.partition.finpartition
/-!
# Finite equipartitions
This file defines finite equipartitions, the partitions whose parts all are the same size up to a
difference of `1`.
## Main declarations
* `finpartition.is_equipartition`: Predicate for a `finpartition` to be an equipartition.
-/
open finset fintype
namespace finpartition
variables {α : Type*} [decidable_eq α] {s t : finset α} (P : finpartition s)
/-- An equipartition is a partition whose parts are all the same size, up to a difference of `1`. -/
def is_equipartition : Prop := (P.parts : set (finset α)).equitable_on card
lemma is_equipartition_iff_card_parts_eq_average : P.is_equipartition ↔
∀ a : finset α, a ∈ P.parts → a.card = s.card/P.parts.card ∨ a.card = s.card/P.parts.card + 1 :=
by simp_rw [is_equipartition, finset.equitable_on_iff, P.sum_card_parts]
variables {P}
lemma _root_.set.subsingleton.is_equipartition (h : (P.parts : set (finset α)).subsingleton) :
P.is_equipartition :=
h.equitable_on _
lemma is_equipartition.card_parts_eq_average (hP : P.is_equipartition) (ht : t ∈ P.parts) :
t.card = s.card / P.parts.card ∨ t.card = s.card / P.parts.card + 1 :=
P.is_equipartition_iff_card_parts_eq_average.1 hP _ ht
lemma is_equipartition.average_le_card_part (hP : P.is_equipartition) (ht : t ∈ P.parts) :
s.card / P.parts.card ≤ t.card :=
by { rw ←P.sum_card_parts, exact equitable_on.le hP ht }
lemma is_equipartition.card_part_le_average_add_one (hP : P.is_equipartition) (ht : t ∈ P.parts) :
t.card ≤ s.card / P.parts.card + 1 :=
by { rw ←P.sum_card_parts, exact equitable_on.le_add_one hP ht }
/-! ### Discrete and indiscrete finpartition -/
variables (s)
lemma bot_is_equipartition : (⊥ : finpartition s).is_equipartition :=
set.equitable_on_iff_exists_eq_eq_add_one.2 ⟨1, by simp⟩
lemma top_is_equipartition : (⊤ : finpartition s).is_equipartition :=
(parts_top_subsingleton _).is_equipartition
lemma indiscrete_is_equipartition {hs : s ≠ ∅} : (indiscrete hs).is_equipartition :=
by { rw [is_equipartition, indiscrete_parts, coe_singleton], exact set.equitable_on_singleton s _ }
end finpartition
|
/-
Copyright (c) 2018 Kenny Lau. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Kenny Lau, Mario Carneiro, Johan Commelin, Amelia Livingston, Anne Baanen
-/
import ring_theory.ideal.local_ring
import ring_theory.localization.ideal
/-!
# Localizations of commutative rings at the complement of a prime ideal
## Main definitions
* `is_localization.at_prime (I : ideal R) [is_prime I] (S : Type*)` expresses that `S` is a
localization at (the complement of) a prime ideal `I`, as an abbreviation of
`is_localization I.prime_compl S`
## Main results
* `is_localization.at_prime.local_ring`: a theorem (not an instance) stating a localization at the
complement of a prime ideal is a local ring
## Implementation notes
See `src/ring_theory/localization/basic.lean` for a design overview.
## Tags
localization, ring localization, commutative ring localization, characteristic predicate,
commutative ring, field of fractions
-/
variables {R : Type*} [comm_semiring R] (M : submonoid R) (S : Type*) [comm_semiring S]
variables [algebra R S] {P : Type*} [comm_semiring P]
section at_prime
variables (I : ideal R) [hp : I.is_prime]
include hp
namespace ideal
/-- The complement of a prime ideal `I ⊆ R` is a submonoid of `R`. -/
def prime_compl :
submonoid R :=
{ carrier := (Iᶜ : set R),
one_mem' := by convert I.ne_top_iff_one.1 hp.1; refl,
mul_mem' := λ x y hnx hny hxy, or.cases_on (hp.mem_or_mem hxy) hnx hny }
lemma prime_compl_le_non_zero_divisors [no_zero_divisors R] : I.prime_compl ≤ non_zero_divisors R :=
le_non_zero_divisors_of_no_zero_divisors $ not_not_intro I.zero_mem
end ideal
variables (S)
/-- Given a prime ideal `P`, the typeclass `is_localization.at_prime S P` states that `S` is
isomorphic to the localization of `R` at the complement of `P`. -/
protected abbreviation is_localization.at_prime := is_localization I.prime_compl S
/-- Given a prime ideal `P`, `localization.at_prime S P` is a localization of
`R` at the complement of `P`, as a quotient type. -/
protected abbreviation localization.at_prime := localization I.prime_compl
namespace is_localization
lemma at_prime.nontrivial [is_localization.at_prime S I] : nontrivial S :=
nontrivial_of_ne (0 : S) 1 $ λ hze,
begin
rw [←(algebra_map R S).map_one, ←(algebra_map R S).map_zero] at hze,
obtain ⟨t, ht⟩ := (eq_iff_exists I.prime_compl S).1 hze,
have htz : (t : R) = 0, by simpa using ht.symm,
exact t.2 (htz.symm ▸ I.zero_mem : ↑t ∈ I)
end
local attribute [instance] at_prime.nontrivial
theorem at_prime.local_ring [is_localization.at_prime S I] : local_ring S :=
local_ring.of_nonunits_add
begin
intros x y hx hy hu,
cases is_unit_iff_exists_inv.1 hu with z hxyz,
have : ∀ {r : R} {s : I.prime_compl}, mk' S r s ∈ nonunits S → r ∈ I, from
λ (r : R) (s : I.prime_compl), not_imp_comm.1
(λ nr, is_unit_iff_exists_inv.2 ⟨mk' S ↑s (⟨r, nr⟩ : I.prime_compl),
mk'_mul_mk'_eq_one' _ _ nr⟩),
rcases mk'_surjective I.prime_compl x with ⟨rx, sx, hrx⟩,
rcases mk'_surjective I.prime_compl y with ⟨ry, sy, hry⟩,
rcases mk'_surjective I.prime_compl z with ⟨rz, sz, hrz⟩,
rw [←hrx, ←hry, ←hrz, ←mk'_add, ←mk'_mul,
←mk'_self S I.prime_compl.one_mem] at hxyz,
rw ←hrx at hx, rw ←hry at hy,
obtain ⟨t, ht⟩ := is_localization.eq.1 hxyz,
simp only [mul_one, one_mul, submonoid.coe_mul, subtype.coe_mk] at ht,
suffices : ↑t * (↑sx * ↑sy * ↑sz) ∈ I, from
not_or (mt hp.mem_or_mem $ not_or sx.2 sy.2) sz.2
(hp.mem_or_mem $ (hp.mem_or_mem this).resolve_left t.2),
rw [←ht],
exact I.mul_mem_left _ (I.mul_mem_right _ (I.add_mem (I.mul_mem_right _ $ this hx)
(I.mul_mem_right _ $ this hy))),
end
end is_localization
namespace localization
/-- The localization of `R` at the complement of a prime ideal is a local ring. -/
instance at_prime.local_ring : local_ring (localization I.prime_compl) :=
is_localization.at_prime.local_ring (localization I.prime_compl) I
end localization
end at_prime
namespace is_localization
variables {A : Type*} [comm_ring A] [is_domain A]
/--
The localization of an integral domain at the complement of a prime ideal is an integral domain.
-/
instance is_domain_of_local_at_prime {P : ideal A} (hp : P.is_prime) :
is_domain (localization.at_prime P) :=
is_domain_localization P.prime_compl_le_non_zero_divisors
namespace at_prime
variables (I : ideal R) [hI : I.is_prime] [is_localization.at_prime S I]
include hI
lemma is_unit_to_map_iff (x : R) :
is_unit ((algebra_map R S) x) ↔ x ∈ I.prime_compl :=
⟨λ h hx, (is_prime_of_is_prime_disjoint I.prime_compl S I hI disjoint_compl_left).ne_top $
(ideal.map (algebra_map R S) I).eq_top_of_is_unit_mem (ideal.mem_map_of_mem _ hx) h,
λ h, map_units S ⟨x, h⟩⟩
-- Can't use typeclasses to infer the `local_ring` instance, so use an `opt_param` instead
-- (since `local_ring` is a `Prop`, there should be no unification issues.)
lemma to_map_mem_maximal_iff (x : R) (h : _root_.local_ring S := local_ring S I) :
algebra_map R S x ∈ local_ring.maximal_ideal S ↔ x ∈ I :=
not_iff_not.mp $ by
simpa only [local_ring.mem_maximal_ideal, mem_nonunits_iff, not_not]
using is_unit_to_map_iff S I x
lemma comap_maximal_ideal (h : _root_.local_ring S := local_ring S I) :
(local_ring.maximal_ideal S).comap (algebra_map R S) = I :=
ideal.ext $ λ x, by simpa only [ideal.mem_comap] using to_map_mem_maximal_iff _ I x
lemma is_unit_mk'_iff (x : R) (y : I.prime_compl) :
is_unit (mk' S x y) ↔ x ∈ I.prime_compl :=
⟨λ h hx, mk'_mem_iff.mpr ((to_map_mem_maximal_iff S I x).mpr hx) h,
λ h, is_unit_iff_exists_inv.mpr ⟨mk' S ↑y ⟨x, h⟩, mk'_mul_mk'_eq_one ⟨x, h⟩ y⟩⟩
lemma mk'_mem_maximal_iff (x : R) (y : I.prime_compl) (h : _root_.local_ring S := local_ring S I) :
mk' S x y ∈ local_ring.maximal_ideal S ↔ x ∈ I :=
not_iff_not.mp $ by
simpa only [local_ring.mem_maximal_ideal, mem_nonunits_iff, not_not]
using is_unit_mk'_iff S I x y
end at_prime
end is_localization
namespace localization
open is_localization
local attribute [instance] classical.prop_decidable
variables (I : ideal R) [hI : I.is_prime]
include hI
variables {I}
/-- The unique maximal ideal of the localization at `I.prime_compl` lies over the ideal `I`. -/
lemma at_prime.comap_maximal_ideal :
ideal.comap (algebra_map R (localization.at_prime I))
(local_ring.maximal_ideal (localization I.prime_compl)) = I :=
at_prime.comap_maximal_ideal _ _
/-- The image of `I` in the localization at `I.prime_compl` is a maximal ideal, and in particular
it is the unique maximal ideal given by the local ring structure `at_prime.local_ring` -/
lemma at_prime.map_eq_maximal_ideal :
ideal.map (algebra_map R (localization.at_prime I)) I =
(local_ring.maximal_ideal (localization I.prime_compl)) :=
begin
convert congr_arg (ideal.map _) at_prime.comap_maximal_ideal.symm,
rw map_comap I.prime_compl
end
lemma le_comap_prime_compl_iff {J : ideal P} [hJ : J.is_prime] {f : R →+* P} :
I.prime_compl ≤ J.prime_compl.comap f ↔ J.comap f ≤ I :=
⟨λ h x hx, by { contrapose! hx, exact h hx },
λ h x hx hfxJ, hx (h hfxJ)⟩
variables (I)
/--
For a ring hom `f : R →+* S` and a prime ideal `J` in `S`, the induced ring hom from the
localization of `R` at `J.comap f` to the localization of `S` at `J`.
To make this definition more flexible, we allow any ideal `I` of `R` as input, together with a proof
that `I = J.comap f`. This can be useful when `I` is not definitionally equal to `J.comap f`.
-/
noncomputable def local_ring_hom (J : ideal P) [hJ : J.is_prime] (f : R →+* P)
(hIJ : I = J.comap f) :
localization.at_prime I →+* localization.at_prime J :=
is_localization.map (localization.at_prime J) f (le_comap_prime_compl_iff.mpr (ge_of_eq hIJ))
lemma local_ring_hom_to_map (J : ideal P) [hJ : J.is_prime] (f : R →+* P)
(hIJ : I = J.comap f) (x : R) :
local_ring_hom I J f hIJ (algebra_map _ _ x) = algebra_map _ _ (f x) :=
map_eq _ _
lemma local_ring_hom_mk' (J : ideal P) [hJ : J.is_prime] (f : R →+* P)
(hIJ : I = J.comap f) (x : R) (y : I.prime_compl) :
local_ring_hom I J f hIJ (is_localization.mk' _ x y) =
is_localization.mk' (localization.at_prime J) (f x)
(⟨f y, le_comap_prime_compl_iff.mpr (ge_of_eq hIJ) y.2⟩ : J.prime_compl) :=
map_mk' _ _ _
instance is_local_ring_hom_local_ring_hom (J : ideal P) [hJ : J.is_prime] (f : R →+* P)
(hIJ : I = J.comap f) :
is_local_ring_hom (local_ring_hom I J f hIJ) :=
is_local_ring_hom.mk $ λ x hx,
begin
rcases is_localization.mk'_surjective I.prime_compl x with ⟨r, s, rfl⟩,
rw local_ring_hom_mk' at hx,
rw at_prime.is_unit_mk'_iff at hx ⊢,
exact λ hr, hx ((set_like.ext_iff.mp hIJ r).mp hr),
end
lemma local_ring_hom_unique (J : ideal P) [hJ : J.is_prime] (f : R →+* P)
(hIJ : I = J.comap f) {j : localization.at_prime I →+* localization.at_prime J}
(hj : ∀ x : R, j (algebra_map _ _ x) = algebra_map _ _ (f x)) :
local_ring_hom I J f hIJ = j :=
map_unique _ _ hj
@[simp] lemma local_ring_hom_id :
local_ring_hom I I (ring_hom.id R) (ideal.comap_id I).symm = ring_hom.id _ :=
local_ring_hom_unique _ _ _ _ (λ x, rfl)
@[simp] lemma local_ring_hom_comp {S : Type*} [comm_semiring S]
(J : ideal S) [hJ : J.is_prime] (K : ideal P) [hK : K.is_prime]
(f : R →+* S) (hIJ : I = J.comap f) (g : S →+* P) (hJK : J = K.comap g) :
local_ring_hom I K (g.comp f) (by rw [hIJ, hJK, ideal.comap_comap f g]) =
(local_ring_hom J K g hJK).comp (local_ring_hom I J f hIJ) :=
local_ring_hom_unique _ _ _ _
(λ r, by simp only [function.comp_app, ring_hom.coe_comp, local_ring_hom_to_map])
end localization
|
[STATEMENT]
lemma "OFCLASS('a::type, classC_class)"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. OFCLASS('a, classC_class)
[PROOF STEP]
nitpick [expect = genuine]
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. OFCLASS('a, classC_class)
[PROOF STEP]
oops
|
[STATEMENT]
lemma reachable_nodes_refl[simp, intro!]: "r \<in> reachable_nodes g r"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. r \<in> reachable_nodes g r
[PROOF STEP]
by (auto simp: reachable_nodes_def)
|
[GOAL]
C : Type u_1
inst✝¹ : Category.{?u.93209, u_1} C
D : Type u_2
inst✝ : Category.{?u.93216, u_2} D
X : SimplicialObject.Augmented C
ed : ExtraDegeneracy X
F : C ⥤ D
⊢ F.map ed.s' ≫ NatTrans.app (((whiskering C D).obj F).obj X).hom (op [0]) =
𝟙 (point.obj (((whiskering C D).obj F).obj X))
[PROOFSTEP]
dsimp
[GOAL]
C : Type u_1
inst✝¹ : Category.{?u.93209, u_1} C
D : Type u_2
inst✝ : Category.{?u.93216, u_2} D
X : SimplicialObject.Augmented C
ed : ExtraDegeneracy X
F : C ⥤ D
⊢ F.map ed.s' ≫ F.map (NatTrans.app X.hom (op [0])) ≫ 𝟙 (F.obj X.right) = 𝟙 (F.obj X.right)
[PROOFSTEP]
erw [comp_id, ← F.map_comp, ed.s'_comp_ε, F.map_id]
[GOAL]
C : Type u_1
inst✝¹ : Category.{?u.93209, u_1} C
D : Type u_2
inst✝ : Category.{?u.93216, u_2} D
X : SimplicialObject.Augmented C
ed : ExtraDegeneracy X
F : C ⥤ D
⊢ (fun n => F.map (s ed n)) 0 ≫ SimplicialObject.δ (((whiskering C D).obj F).obj X).left 1 =
NatTrans.app (((whiskering C D).obj F).obj X).hom (op [0]) ≫ F.map ed.s'
[PROOFSTEP]
dsimp
[GOAL]
C : Type u_1
inst✝¹ : Category.{?u.93209, u_1} C
D : Type u_2
inst✝ : Category.{?u.93216, u_2} D
X : SimplicialObject.Augmented C
ed : ExtraDegeneracy X
F : C ⥤ D
⊢ F.map (s ed 0) ≫ SimplicialObject.δ (((SimplicialObject.whiskering C D).obj F).obj X.left) 1 =
(F.map (NatTrans.app X.hom (op [0])) ≫ 𝟙 (F.obj X.right)) ≫ F.map ed.s'
[PROOFSTEP]
erw [comp_id, ← F.map_comp, ← F.map_comp, ed.s₀_comp_δ₁]
[GOAL]
C : Type u_1
inst✝¹ : Category.{?u.93209, u_1} C
D : Type u_2
inst✝ : Category.{?u.93216, u_2} D
X : SimplicialObject.Augmented C
ed : ExtraDegeneracy X
F : C ⥤ D
n : ℕ
⊢ (fun n => F.map (s ed n)) n ≫ SimplicialObject.δ (((whiskering C D).obj F).obj X).left 0 =
𝟙 ((drop.obj (((whiskering C D).obj F).obj X)).obj (op [n]))
[PROOFSTEP]
dsimp
[GOAL]
C : Type u_1
inst✝¹ : Category.{?u.93209, u_1} C
D : Type u_2
inst✝ : Category.{?u.93216, u_2} D
X : SimplicialObject.Augmented C
ed : ExtraDegeneracy X
F : C ⥤ D
n : ℕ
⊢ F.map (s ed n) ≫ SimplicialObject.δ (((SimplicialObject.whiskering C D).obj F).obj X.left) 0 =
𝟙 (F.obj (X.left.obj (op [n])))
[PROOFSTEP]
erw [← F.map_comp, ed.s_comp_δ₀, F.map_id]
[GOAL]
C : Type u_1
inst✝¹ : Category.{?u.93209, u_1} C
D : Type u_2
inst✝ : Category.{?u.93216, u_2} D
X : SimplicialObject.Augmented C
ed : ExtraDegeneracy X
F : C ⥤ D
n : ℕ
i : Fin (n + 2)
⊢ (fun n => F.map (s ed n)) (n + 1) ≫ SimplicialObject.δ (((whiskering C D).obj F).obj X).left (Fin.succ i) =
SimplicialObject.δ (((whiskering C D).obj F).obj X).left i ≫ (fun n => F.map (s ed n)) n
[PROOFSTEP]
dsimp
[GOAL]
C : Type u_1
inst✝¹ : Category.{?u.93209, u_1} C
D : Type u_2
inst✝ : Category.{?u.93216, u_2} D
X : SimplicialObject.Augmented C
ed : ExtraDegeneracy X
F : C ⥤ D
n : ℕ
i : Fin (n + 2)
⊢ F.map (s ed (n + 1)) ≫ SimplicialObject.δ (((SimplicialObject.whiskering C D).obj F).obj X.left) (Fin.succ i) =
SimplicialObject.δ (((SimplicialObject.whiskering C D).obj F).obj X.left) i ≫ F.map (s ed n)
[PROOFSTEP]
erw [← F.map_comp, ← F.map_comp, ed.s_comp_δ]
[GOAL]
C : Type u_1
inst✝¹ : Category.{?u.93209, u_1} C
D : Type u_2
inst✝ : Category.{?u.93216, u_2} D
X : SimplicialObject.Augmented C
ed : ExtraDegeneracy X
F : C ⥤ D
n : ℕ
i : Fin (n + 2)
⊢ F.map (SimplicialObject.δ X.left i ≫ s ed n) = F.map (X.left.map (SimplexCategory.δ i).op ≫ s ed n)
[PROOFSTEP]
rfl
[GOAL]
C : Type u_1
inst✝¹ : Category.{?u.93209, u_1} C
D : Type u_2
inst✝ : Category.{?u.93216, u_2} D
X : SimplicialObject.Augmented C
ed : ExtraDegeneracy X
F : C ⥤ D
n : ℕ
i : Fin (n + 1)
⊢ (fun n => F.map (s ed n)) n ≫ SimplicialObject.σ (((whiskering C D).obj F).obj X).left (Fin.succ i) =
SimplicialObject.σ (((whiskering C D).obj F).obj X).left i ≫ (fun n => F.map (s ed n)) (n + 1)
[PROOFSTEP]
dsimp
[GOAL]
C : Type u_1
inst✝¹ : Category.{?u.93209, u_1} C
D : Type u_2
inst✝ : Category.{?u.93216, u_2} D
X : SimplicialObject.Augmented C
ed : ExtraDegeneracy X
F : C ⥤ D
n : ℕ
i : Fin (n + 1)
⊢ F.map (s ed n) ≫ SimplicialObject.σ (((SimplicialObject.whiskering C D).obj F).obj X.left) (Fin.succ i) =
SimplicialObject.σ (((SimplicialObject.whiskering C D).obj F).obj X.left) i ≫ F.map (s ed (n + 1))
[PROOFSTEP]
erw [← F.map_comp, ← F.map_comp, ed.s_comp_σ]
[GOAL]
C : Type u_1
inst✝¹ : Category.{?u.93209, u_1} C
D : Type u_2
inst✝ : Category.{?u.93216, u_2} D
X : SimplicialObject.Augmented C
ed : ExtraDegeneracy X
F : C ⥤ D
n : ℕ
i : Fin (n + 1)
⊢ F.map (SimplicialObject.σ X.left i ≫ s ed (n + 1)) = F.map (X.left.map (SimplexCategory.σ i).op ≫ s ed (n + 1))
[PROOFSTEP]
rfl
[GOAL]
C : Type u_1
inst✝ : Category.{?u.102374, u_1} C
X Y : SimplicialObject.Augmented C
e : X ≅ Y
ed : ExtraDegeneracy X
⊢ ((point.mapIso e).inv ≫ ed.s' ≫ NatTrans.app (drop.mapIso e).hom (op [0])) ≫ NatTrans.app Y.hom (op [0]) =
𝟙 (point.obj Y)
[PROOFSTEP]
simpa only [Functor.mapIso, assoc, w₀, ed.s'_comp_ε_assoc] using (point.mapIso e).inv_hom_id
[GOAL]
C : Type u_1
inst✝ : Category.{?u.102374, u_1} C
X Y : SimplicialObject.Augmented C
e : X ≅ Y
ed : ExtraDegeneracy X
⊢ (fun n => NatTrans.app (drop.mapIso e).inv (op [n]) ≫ s ed n ≫ NatTrans.app (drop.mapIso e).hom (op [n + 1])) 0 ≫
SimplicialObject.δ Y.left 1 =
NatTrans.app Y.hom (op [0]) ≫ (point.mapIso e).inv ≫ ed.s' ≫ NatTrans.app (drop.mapIso e).hom (op [0])
[PROOFSTEP]
have h := w₀ e.inv
[GOAL]
C : Type u_1
inst✝ : Category.{?u.102374, u_1} C
X Y : SimplicialObject.Augmented C
e : X ≅ Y
ed : ExtraDegeneracy X
h : NatTrans.app (drop.map e.inv) (op [0]) ≫ NatTrans.app X.hom (op [0]) = NatTrans.app Y.hom (op [0]) ≫ point.map e.inv
⊢ (fun n => NatTrans.app (drop.mapIso e).inv (op [n]) ≫ s ed n ≫ NatTrans.app (drop.mapIso e).hom (op [n + 1])) 0 ≫
SimplicialObject.δ Y.left 1 =
NatTrans.app Y.hom (op [0]) ≫ (point.mapIso e).inv ≫ ed.s' ≫ NatTrans.app (drop.mapIso e).hom (op [0])
[PROOFSTEP]
dsimp at h ⊢
[GOAL]
C : Type u_1
inst✝ : Category.{?u.102374, u_1} C
X Y : SimplicialObject.Augmented C
e : X ≅ Y
ed : ExtraDegeneracy X
h : NatTrans.app e.inv.left (op [0]) ≫ NatTrans.app X.hom (op [0]) = NatTrans.app Y.hom (op [0]) ≫ e.inv.right
⊢ (NatTrans.app e.inv.left (op [0]) ≫ s ed 0 ≫ NatTrans.app e.hom.left (op [0 + 1])) ≫ SimplicialObject.δ Y.left 1 =
NatTrans.app Y.hom (op [0]) ≫ e.inv.right ≫ ed.s' ≫ NatTrans.app e.hom.left (op [0])
[PROOFSTEP]
simp only [assoc, ← SimplicialObject.δ_naturality, ed.s₀_comp_δ₁_assoc, reassoc_of% h]
[GOAL]
C : Type u_1
inst✝ : Category.{?u.102374, u_1} C
X Y : SimplicialObject.Augmented C
e : X ≅ Y
ed : ExtraDegeneracy X
n : ℕ
⊢ (fun n => NatTrans.app (drop.mapIso e).inv (op [n]) ≫ s ed n ≫ NatTrans.app (drop.mapIso e).hom (op [n + 1])) n ≫
SimplicialObject.δ Y.left 0 =
𝟙 ((drop.obj Y).obj (op [n]))
[PROOFSTEP]
have h := ed.s_comp_δ₀
[GOAL]
C : Type u_1
inst✝ : Category.{?u.102374, u_1} C
X Y : SimplicialObject.Augmented C
e : X ≅ Y
ed : ExtraDegeneracy X
n : ℕ
h : ∀ (n : ℕ), s ed n ≫ SimplicialObject.δ X.left 0 = 𝟙 ((drop.obj X).obj (op [n]))
⊢ (fun n => NatTrans.app (drop.mapIso e).inv (op [n]) ≫ s ed n ≫ NatTrans.app (drop.mapIso e).hom (op [n + 1])) n ≫
SimplicialObject.δ Y.left 0 =
𝟙 ((drop.obj Y).obj (op [n]))
[PROOFSTEP]
dsimp at h ⊢
[GOAL]
C : Type u_1
inst✝ : Category.{?u.102374, u_1} C
X Y : SimplicialObject.Augmented C
e : X ≅ Y
ed : ExtraDegeneracy X
n : ℕ
h : ∀ (n : ℕ), s ed n ≫ SimplicialObject.δ X.left 0 = 𝟙 (X.left.obj (op [n]))
⊢ (NatTrans.app e.inv.left (op [n]) ≫ s ed n ≫ NatTrans.app e.hom.left (op [n + 1])) ≫ SimplicialObject.δ Y.left 0 =
𝟙 (Y.left.obj (op [n]))
[PROOFSTEP]
simpa only [assoc, ← SimplicialObject.δ_naturality, reassoc_of% h] using congr_app (drop.mapIso e).inv_hom_id (op [n])
[GOAL]
C : Type u_1
inst✝ : Category.{?u.102374, u_1} C
X Y : SimplicialObject.Augmented C
e : X ≅ Y
ed : ExtraDegeneracy X
n : ℕ
i : Fin (n + 2)
⊢ (fun n => NatTrans.app (drop.mapIso e).inv (op [n]) ≫ s ed n ≫ NatTrans.app (drop.mapIso e).hom (op [n + 1]))
(n + 1) ≫
SimplicialObject.δ Y.left (Fin.succ i) =
SimplicialObject.δ Y.left i ≫
(fun n => NatTrans.app (drop.mapIso e).inv (op [n]) ≫ s ed n ≫ NatTrans.app (drop.mapIso e).hom (op [n + 1])) n
[PROOFSTEP]
have h := ed.s_comp_δ n i
[GOAL]
C : Type u_1
inst✝ : Category.{?u.102374, u_1} C
X Y : SimplicialObject.Augmented C
e : X ≅ Y
ed : ExtraDegeneracy X
n : ℕ
i : Fin (n + 2)
h : s ed (n + 1) ≫ SimplicialObject.δ X.left (Fin.succ i) = SimplicialObject.δ X.left i ≫ s ed n
⊢ (fun n => NatTrans.app (drop.mapIso e).inv (op [n]) ≫ s ed n ≫ NatTrans.app (drop.mapIso e).hom (op [n + 1]))
(n + 1) ≫
SimplicialObject.δ Y.left (Fin.succ i) =
SimplicialObject.δ Y.left i ≫
(fun n => NatTrans.app (drop.mapIso e).inv (op [n]) ≫ s ed n ≫ NatTrans.app (drop.mapIso e).hom (op [n + 1])) n
[PROOFSTEP]
dsimp at h ⊢
[GOAL]
C : Type u_1
inst✝ : Category.{?u.102374, u_1} C
X Y : SimplicialObject.Augmented C
e : X ≅ Y
ed : ExtraDegeneracy X
n : ℕ
i : Fin (n + 2)
h : s ed (n + 1) ≫ SimplicialObject.δ X.left (Fin.succ i) = SimplicialObject.δ X.left i ≫ s ed n
⊢ (NatTrans.app e.inv.left (op [n + 1]) ≫ s ed (n + 1) ≫ NatTrans.app e.hom.left (op [n + 1 + 1])) ≫
SimplicialObject.δ Y.left (Fin.succ i) =
SimplicialObject.δ Y.left i ≫ NatTrans.app e.inv.left (op [n]) ≫ s ed n ≫ NatTrans.app e.hom.left (op [n + 1])
[PROOFSTEP]
simp only [assoc, ← SimplicialObject.δ_naturality, reassoc_of% h, ← SimplicialObject.δ_naturality_assoc]
[GOAL]
C : Type u_1
inst✝ : Category.{?u.102374, u_1} C
X Y : SimplicialObject.Augmented C
e : X ≅ Y
ed : ExtraDegeneracy X
n : ℕ
i : Fin (n + 1)
⊢ (fun n => NatTrans.app (drop.mapIso e).inv (op [n]) ≫ s ed n ≫ NatTrans.app (drop.mapIso e).hom (op [n + 1])) n ≫
SimplicialObject.σ Y.left (Fin.succ i) =
SimplicialObject.σ Y.left i ≫
(fun n => NatTrans.app (drop.mapIso e).inv (op [n]) ≫ s ed n ≫ NatTrans.app (drop.mapIso e).hom (op [n + 1]))
(n + 1)
[PROOFSTEP]
have h := ed.s_comp_σ n i
[GOAL]
C : Type u_1
inst✝ : Category.{?u.102374, u_1} C
X Y : SimplicialObject.Augmented C
e : X ≅ Y
ed : ExtraDegeneracy X
n : ℕ
i : Fin (n + 1)
h : s ed n ≫ SimplicialObject.σ X.left (Fin.succ i) = SimplicialObject.σ X.left i ≫ s ed (n + 1)
⊢ (fun n => NatTrans.app (drop.mapIso e).inv (op [n]) ≫ s ed n ≫ NatTrans.app (drop.mapIso e).hom (op [n + 1])) n ≫
SimplicialObject.σ Y.left (Fin.succ i) =
SimplicialObject.σ Y.left i ≫
(fun n => NatTrans.app (drop.mapIso e).inv (op [n]) ≫ s ed n ≫ NatTrans.app (drop.mapIso e).hom (op [n + 1]))
(n + 1)
[PROOFSTEP]
dsimp at h ⊢
[GOAL]
C : Type u_1
inst✝ : Category.{?u.102374, u_1} C
X Y : SimplicialObject.Augmented C
e : X ≅ Y
ed : ExtraDegeneracy X
n : ℕ
i : Fin (n + 1)
h : s ed n ≫ SimplicialObject.σ X.left (Fin.succ i) = SimplicialObject.σ X.left i ≫ s ed (n + 1)
⊢ (NatTrans.app e.inv.left (op [n]) ≫ s ed n ≫ NatTrans.app e.hom.left (op [n + 1])) ≫
SimplicialObject.σ Y.left (Fin.succ i) =
SimplicialObject.σ Y.left i ≫
NatTrans.app e.inv.left (op [n + 1]) ≫ s ed (n + 1) ≫ NatTrans.app e.hom.left (op [n + 1 + 1])
[PROOFSTEP]
simp only [assoc, ← SimplicialObject.σ_naturality, reassoc_of% h, ← SimplicialObject.σ_naturality_assoc]
[GOAL]
n : ℕ
X : Type u_1
inst✝ : Zero X
f : Fin n → X
i : Fin n
⊢ shiftFun f (Fin.succ i) = f i
[PROOFSTEP]
dsimp [shiftFun]
[GOAL]
n : ℕ
X : Type u_1
inst✝ : Zero X
f : Fin n → X
i : Fin n
⊢ (if x : Fin.succ i = 0 then 0 else f (Fin.pred (Fin.succ i) x)) = f i
[PROOFSTEP]
split_ifs with h
[GOAL]
case pos
n : ℕ
X : Type u_1
inst✝ : Zero X
f : Fin n → X
i : Fin n
h : Fin.succ i = 0
⊢ 0 = f i
[PROOFSTEP]
exfalso
[GOAL]
case pos.h
n : ℕ
X : Type u_1
inst✝ : Zero X
f : Fin n → X
i : Fin n
h : Fin.succ i = 0
⊢ False
[PROOFSTEP]
simp only [Fin.ext_iff, Fin.val_succ, Fin.val_zero, add_eq_zero, and_false] at h
[GOAL]
case neg
n : ℕ
X : Type u_1
inst✝ : Zero X
f : Fin n → X
i : Fin n
h : ¬Fin.succ i = 0
⊢ f (Fin.pred (Fin.succ i) h) = f i
[PROOFSTEP]
simp only [Fin.pred_succ]
[GOAL]
n : ℕ
Δ : SimplexCategory
f : [n] ⟶ Δ
i₁ i₂ : Fin (SimplexCategory.len [n + 1] + 1)
hi : i₁ ≤ i₂
⊢ shiftFun (↑(SimplexCategory.Hom.toOrderHom f)) i₁ ≤ shiftFun (↑(SimplexCategory.Hom.toOrderHom f)) i₂
[PROOFSTEP]
by_cases h₁ : i₁ = 0
[GOAL]
case pos
n : ℕ
Δ : SimplexCategory
f : [n] ⟶ Δ
i₁ i₂ : Fin (SimplexCategory.len [n + 1] + 1)
hi : i₁ ≤ i₂
h₁ : i₁ = 0
⊢ shiftFun (↑(SimplexCategory.Hom.toOrderHom f)) i₁ ≤ shiftFun (↑(SimplexCategory.Hom.toOrderHom f)) i₂
[PROOFSTEP]
subst h₁
[GOAL]
case pos
n : ℕ
Δ : SimplexCategory
f : [n] ⟶ Δ
i₂ : Fin (SimplexCategory.len [n + 1] + 1)
hi : 0 ≤ i₂
⊢ shiftFun (↑(SimplexCategory.Hom.toOrderHom f)) 0 ≤ shiftFun (↑(SimplexCategory.Hom.toOrderHom f)) i₂
[PROOFSTEP]
simp only [shiftFun_0, Fin.zero_le]
[GOAL]
case neg
n : ℕ
Δ : SimplexCategory
f : [n] ⟶ Δ
i₁ i₂ : Fin (SimplexCategory.len [n + 1] + 1)
hi : i₁ ≤ i₂
h₁ : ¬i₁ = 0
⊢ shiftFun (↑(SimplexCategory.Hom.toOrderHom f)) i₁ ≤ shiftFun (↑(SimplexCategory.Hom.toOrderHom f)) i₂
[PROOFSTEP]
have h₂ : i₂ ≠ 0 := by
intro h₂
subst h₂
exact h₁ (le_antisymm hi (Fin.zero_le _))
[GOAL]
n : ℕ
Δ : SimplexCategory
f : [n] ⟶ Δ
i₁ i₂ : Fin (SimplexCategory.len [n + 1] + 1)
hi : i₁ ≤ i₂
h₁ : ¬i₁ = 0
⊢ i₂ ≠ 0
[PROOFSTEP]
intro h₂
[GOAL]
n : ℕ
Δ : SimplexCategory
f : [n] ⟶ Δ
i₁ i₂ : Fin (SimplexCategory.len [n + 1] + 1)
hi : i₁ ≤ i₂
h₁ : ¬i₁ = 0
h₂ : i₂ = 0
⊢ False
[PROOFSTEP]
subst h₂
[GOAL]
n : ℕ
Δ : SimplexCategory
f : [n] ⟶ Δ
i₁ : Fin (SimplexCategory.len [n + 1] + 1)
h₁ : ¬i₁ = 0
hi : i₁ ≤ 0
⊢ False
[PROOFSTEP]
exact h₁ (le_antisymm hi (Fin.zero_le _))
[GOAL]
case neg
n : ℕ
Δ : SimplexCategory
f : [n] ⟶ Δ
i₁ i₂ : Fin (SimplexCategory.len [n + 1] + 1)
hi : i₁ ≤ i₂
h₁ : ¬i₁ = 0
h₂ : i₂ ≠ 0
⊢ shiftFun (↑(SimplexCategory.Hom.toOrderHom f)) i₁ ≤ shiftFun (↑(SimplexCategory.Hom.toOrderHom f)) i₂
[PROOFSTEP]
cases' Fin.eq_succ_of_ne_zero h₁ with j₁ hj₁
[GOAL]
case neg.intro
n : ℕ
Δ : SimplexCategory
f : [n] ⟶ Δ
i₁ i₂ : Fin (SimplexCategory.len [n + 1] + 1)
hi : i₁ ≤ i₂
h₁ : ¬i₁ = 0
h₂ : i₂ ≠ 0
j₁ : Fin (SimplexCategory.len [n + 1])
hj₁ : i₁ = Fin.succ j₁
⊢ shiftFun (↑(SimplexCategory.Hom.toOrderHom f)) i₁ ≤ shiftFun (↑(SimplexCategory.Hom.toOrderHom f)) i₂
[PROOFSTEP]
cases' Fin.eq_succ_of_ne_zero h₂ with j₂ hj₂
[GOAL]
case neg.intro.intro
n : ℕ
Δ : SimplexCategory
f : [n] ⟶ Δ
i₁ i₂ : Fin (SimplexCategory.len [n + 1] + 1)
hi : i₁ ≤ i₂
h₁ : ¬i₁ = 0
h₂ : i₂ ≠ 0
j₁ : Fin (SimplexCategory.len [n + 1])
hj₁ : i₁ = Fin.succ j₁
j₂ : Fin (SimplexCategory.len [n + 1])
hj₂ : i₂ = Fin.succ j₂
⊢ shiftFun (↑(SimplexCategory.Hom.toOrderHom f)) i₁ ≤ shiftFun (↑(SimplexCategory.Hom.toOrderHom f)) i₂
[PROOFSTEP]
substs hj₁ hj₂
[GOAL]
case neg.intro.intro
n : ℕ
Δ : SimplexCategory
f : [n] ⟶ Δ
j₁ j₂ : Fin (SimplexCategory.len [n + 1])
h₁ : ¬Fin.succ j₁ = 0
h₂ : Fin.succ j₂ ≠ 0
hi : Fin.succ j₁ ≤ Fin.succ j₂
⊢ shiftFun (↑(SimplexCategory.Hom.toOrderHom f)) (Fin.succ j₁) ≤
shiftFun (↑(SimplexCategory.Hom.toOrderHom f)) (Fin.succ j₂)
[PROOFSTEP]
simpa only [shiftFun_succ] using f.toOrderHom.monotone (Fin.succ_le_succ_iff.mp hi)
[GOAL]
Δ : SimplexCategory
⊢ (fun x => SimplexCategory.Hom.mk (↑(OrderHom.const (Fin (SimplexCategory.len (op [0]).unop + 1))) 0)) ≫
NatTrans.app (standardSimplex.obj Δ).hom (op [0]) =
𝟙 (point.obj (standardSimplex.obj Δ))
[PROOFSTEP]
dsimp
[GOAL]
Δ : SimplexCategory
⊢ (fun x => SimplexCategory.Hom.mk (↑(OrderHom.const (Fin (0 + 1))) 0)) ≫
NatTrans.app (standardSimplex.obj Δ).hom (op [0]) =
𝟙 (⊤_ Type)
[PROOFSTEP]
apply Subsingleton.elim
[GOAL]
Δ : SimplexCategory
⊢ (fun n f => shift f) 0 ≫ SimplicialObject.δ (standardSimplex.obj Δ).left 1 =
NatTrans.app (standardSimplex.obj Δ).hom (op [0]) ≫ fun x =>
SimplexCategory.Hom.mk (↑(OrderHom.const (Fin (SimplexCategory.len (op [0]).unop + 1))) 0)
[PROOFSTEP]
ext1 x
[GOAL]
case h
Δ : SimplexCategory
x : (drop.obj (standardSimplex.obj Δ)).obj (op [0])
⊢ ((fun n f => shift f) 0 ≫ SimplicialObject.δ (standardSimplex.obj Δ).left 1) x =
(NatTrans.app (standardSimplex.obj Δ).hom (op [0]) ≫ fun x =>
SimplexCategory.Hom.mk (↑(OrderHom.const (Fin (SimplexCategory.len (op [0]).unop + 1))) 0))
x
[PROOFSTEP]
apply SimplexCategory.Hom.ext
[GOAL]
case h.a
Δ : SimplexCategory
x : (drop.obj (standardSimplex.obj Δ)).obj (op [0])
⊢ SimplexCategory.Hom.toOrderHom (((fun n f => shift f) 0 ≫ SimplicialObject.δ (standardSimplex.obj Δ).left 1) x) =
SimplexCategory.Hom.toOrderHom
((NatTrans.app (standardSimplex.obj Δ).hom (op [0]) ≫ fun x =>
SimplexCategory.Hom.mk (↑(OrderHom.const (Fin (SimplexCategory.len (op [0]).unop + 1))) 0))
x)
[PROOFSTEP]
ext j
[GOAL]
case h.a.h.h.h
Δ : SimplexCategory
x : (drop.obj (standardSimplex.obj Δ)).obj (op [0])
j : Fin (SimplexCategory.len (op [0]).unop + 1)
⊢ ↑(↑(SimplexCategory.Hom.toOrderHom (((fun n f => shift f) 0 ≫ SimplicialObject.δ (standardSimplex.obj Δ).left 1) x))
j) =
↑(↑(SimplexCategory.Hom.toOrderHom
((NatTrans.app (standardSimplex.obj Δ).hom (op [0]) ≫ fun x =>
SimplexCategory.Hom.mk (↑(OrderHom.const (Fin (SimplexCategory.len (op [0]).unop + 1))) 0))
x))
j)
[PROOFSTEP]
fin_cases j
[GOAL]
case h.a.h.h.h.head
Δ : SimplexCategory
x : (drop.obj (standardSimplex.obj Δ)).obj (op [0])
⊢ ↑(↑(SimplexCategory.Hom.toOrderHom (((fun n f => shift f) 0 ≫ SimplicialObject.δ (standardSimplex.obj Δ).left 1) x))
{ val := 0, isLt := (_ : 0 < SimplexCategory.len (op [0]).unop + 1) }) =
↑(↑(SimplexCategory.Hom.toOrderHom
((NatTrans.app (standardSimplex.obj Δ).hom (op [0]) ≫ fun x =>
SimplexCategory.Hom.mk (↑(OrderHom.const (Fin (SimplexCategory.len (op [0]).unop + 1))) 0))
x))
{ val := 0, isLt := (_ : 0 < SimplexCategory.len (op [0]).unop + 1) })
[PROOFSTEP]
rfl
[GOAL]
Δ : SimplexCategory
n : ℕ
⊢ (fun n f => shift f) n ≫ SimplicialObject.δ (standardSimplex.obj Δ).left 0 =
𝟙 ((drop.obj (standardSimplex.obj Δ)).obj (op [n]))
[PROOFSTEP]
ext1 φ
[GOAL]
case h
Δ : SimplexCategory
n : ℕ
φ : (drop.obj (standardSimplex.obj Δ)).obj (op [n])
⊢ ((fun n f => shift f) n ≫ SimplicialObject.δ (standardSimplex.obj Δ).left 0) φ =
𝟙 ((drop.obj (standardSimplex.obj Δ)).obj (op [n])) φ
[PROOFSTEP]
apply SimplexCategory.Hom.ext
[GOAL]
case h.a
Δ : SimplexCategory
n : ℕ
φ : (drop.obj (standardSimplex.obj Δ)).obj (op [n])
⊢ SimplexCategory.Hom.toOrderHom (((fun n f => shift f) n ≫ SimplicialObject.δ (standardSimplex.obj Δ).left 0) φ) =
SimplexCategory.Hom.toOrderHom (𝟙 ((drop.obj (standardSimplex.obj Δ)).obj (op [n])) φ)
[PROOFSTEP]
ext i : 2
[GOAL]
case h.a.h.h
Δ : SimplexCategory
n : ℕ
φ : (drop.obj (standardSimplex.obj Δ)).obj (op [n])
i : Fin (SimplexCategory.len (op [n]).unop + 1)
⊢ ↑(SimplexCategory.Hom.toOrderHom (((fun n f => shift f) n ≫ SimplicialObject.δ (standardSimplex.obj Δ).left 0) φ)) i =
↑(SimplexCategory.Hom.toOrderHom (𝟙 ((drop.obj (standardSimplex.obj Δ)).obj (op [n])) φ)) i
[PROOFSTEP]
dsimp [SimplicialObject.δ, SimplexCategory.δ, SSet.standardSimplex]
[GOAL]
case h.a.h.h
Δ : SimplexCategory
n : ℕ
φ : (drop.obj (standardSimplex.obj Δ)).obj (op [n])
i : Fin (SimplexCategory.len (op [n]).unop + 1)
⊢ shiftFun (↑(SimplexCategory.Hom.toOrderHom φ)) (Fin.succ i) = ↑(SimplexCategory.Hom.toOrderHom φ) i
[PROOFSTEP]
simp only [shiftFun_succ]
[GOAL]
Δ : SimplexCategory
n : ℕ
i : Fin (n + 2)
⊢ (fun n f => shift f) (n + 1) ≫ SimplicialObject.δ (standardSimplex.obj Δ).left (Fin.succ i) =
SimplicialObject.δ (standardSimplex.obj Δ).left i ≫ (fun n f => shift f) n
[PROOFSTEP]
ext1 φ
[GOAL]
case h
Δ : SimplexCategory
n : ℕ
i : Fin (n + 2)
φ : (drop.obj (standardSimplex.obj Δ)).obj (op [n + 1])
⊢ ((fun n f => shift f) (n + 1) ≫ SimplicialObject.δ (standardSimplex.obj Δ).left (Fin.succ i)) φ =
(SimplicialObject.δ (standardSimplex.obj Δ).left i ≫ (fun n f => shift f) n) φ
[PROOFSTEP]
apply SimplexCategory.Hom.ext
[GOAL]
case h.a
Δ : SimplexCategory
n : ℕ
i : Fin (n + 2)
φ : (drop.obj (standardSimplex.obj Δ)).obj (op [n + 1])
⊢ SimplexCategory.Hom.toOrderHom
(((fun n f => shift f) (n + 1) ≫ SimplicialObject.δ (standardSimplex.obj Δ).left (Fin.succ i)) φ) =
SimplexCategory.Hom.toOrderHom ((SimplicialObject.δ (standardSimplex.obj Δ).left i ≫ (fun n f => shift f) n) φ)
[PROOFSTEP]
ext j : 2
[GOAL]
case h.a.h.h
Δ : SimplexCategory
n : ℕ
i : Fin (n + 2)
φ : (drop.obj (standardSimplex.obj Δ)).obj (op [n + 1])
j : Fin (SimplexCategory.len (op [n + 1]).unop + 1)
⊢ ↑(SimplexCategory.Hom.toOrderHom
(((fun n f => shift f) (n + 1) ≫ SimplicialObject.δ (standardSimplex.obj Δ).left (Fin.succ i)) φ))
j =
↑(SimplexCategory.Hom.toOrderHom ((SimplicialObject.δ (standardSimplex.obj Δ).left i ≫ (fun n f => shift f) n) φ)) j
[PROOFSTEP]
dsimp [SimplicialObject.δ, SimplexCategory.δ, SSet.standardSimplex]
[GOAL]
case h.a.h.h
Δ : SimplexCategory
n : ℕ
i : Fin (n + 2)
φ : (drop.obj (standardSimplex.obj Δ)).obj (op [n + 1])
j : Fin (SimplexCategory.len (op [n + 1]).unop + 1)
⊢ shiftFun (↑(SimplexCategory.Hom.toOrderHom φ)) (Fin.succAbove (Fin.succ i) j) =
shiftFun (↑(SimplexCategory.Hom.toOrderHom φ) ∘ ↑(Fin.succAboveEmb i)) j
[PROOFSTEP]
by_cases j = 0
[GOAL]
case h.a.h.h
Δ : SimplexCategory
n : ℕ
i : Fin (n + 2)
φ : (drop.obj (standardSimplex.obj Δ)).obj (op [n + 1])
j : Fin (SimplexCategory.len (op [n + 1]).unop + 1)
⊢ shiftFun (↑(SimplexCategory.Hom.toOrderHom φ)) (Fin.succAbove (Fin.succ i) j) =
shiftFun (↑(SimplexCategory.Hom.toOrderHom φ) ∘ ↑(Fin.succAboveEmb i)) j
[PROOFSTEP]
by_cases j = 0
[GOAL]
case pos
Δ : SimplexCategory
n : ℕ
i : Fin (n + 2)
φ : (drop.obj (standardSimplex.obj Δ)).obj (op [n + 1])
j : Fin (SimplexCategory.len (op [n + 1]).unop + 1)
h : j = 0
⊢ shiftFun (↑(SimplexCategory.Hom.toOrderHom φ)) (Fin.succAbove (Fin.succ i) j) =
shiftFun (↑(SimplexCategory.Hom.toOrderHom φ) ∘ ↑(Fin.succAboveEmb i)) j
[PROOFSTEP]
subst h
[GOAL]
case pos
Δ : SimplexCategory
n : ℕ
i : Fin (n + 2)
φ : (drop.obj (standardSimplex.obj Δ)).obj (op [n + 1])
⊢ shiftFun (↑(SimplexCategory.Hom.toOrderHom φ)) (Fin.succAbove (Fin.succ i) 0) =
shiftFun (↑(SimplexCategory.Hom.toOrderHom φ) ∘ ↑(Fin.succAboveEmb i)) 0
[PROOFSTEP]
simp only [Fin.succ_succAbove_zero, shiftFun_0]
[GOAL]
case neg
Δ : SimplexCategory
n : ℕ
i : Fin (n + 2)
φ : (drop.obj (standardSimplex.obj Δ)).obj (op [n + 1])
j : Fin (SimplexCategory.len (op [n + 1]).unop + 1)
h : ¬j = 0
⊢ shiftFun (↑(SimplexCategory.Hom.toOrderHom φ)) (Fin.succAbove (Fin.succ i) j) =
shiftFun (↑(SimplexCategory.Hom.toOrderHom φ) ∘ ↑(Fin.succAboveEmb i)) j
[PROOFSTEP]
obtain ⟨_, rfl⟩ := Fin.eq_succ_of_ne_zero <| h
[GOAL]
case neg.intro
Δ : SimplexCategory
n : ℕ
i : Fin (n + 2)
φ : (drop.obj (standardSimplex.obj Δ)).obj (op [n + 1])
w✝ : Fin (SimplexCategory.len (op [n + 1]).unop)
h : ¬Fin.succ w✝ = 0
⊢ shiftFun (↑(SimplexCategory.Hom.toOrderHom φ)) (Fin.succAbove (Fin.succ i) (Fin.succ w✝)) =
shiftFun (↑(SimplexCategory.Hom.toOrderHom φ) ∘ ↑(Fin.succAboveEmb i)) (Fin.succ w✝)
[PROOFSTEP]
simp only [Fin.succ_succAbove_succ, shiftFun_succ, Function.comp_apply, Fin.succAboveEmb_apply]
[GOAL]
Δ : SimplexCategory
n : ℕ
i : Fin (n + 1)
⊢ (fun n f => shift f) n ≫ SimplicialObject.σ (standardSimplex.obj Δ).left (Fin.succ i) =
SimplicialObject.σ (standardSimplex.obj Δ).left i ≫ (fun n f => shift f) (n + 1)
[PROOFSTEP]
ext1 φ
[GOAL]
case h
Δ : SimplexCategory
n : ℕ
i : Fin (n + 1)
φ : (drop.obj (standardSimplex.obj Δ)).obj (op [n])
⊢ ((fun n f => shift f) n ≫ SimplicialObject.σ (standardSimplex.obj Δ).left (Fin.succ i)) φ =
(SimplicialObject.σ (standardSimplex.obj Δ).left i ≫ (fun n f => shift f) (n + 1)) φ
[PROOFSTEP]
apply SimplexCategory.Hom.ext
[GOAL]
case h.a
Δ : SimplexCategory
n : ℕ
i : Fin (n + 1)
φ : (drop.obj (standardSimplex.obj Δ)).obj (op [n])
⊢ SimplexCategory.Hom.toOrderHom
(((fun n f => shift f) n ≫ SimplicialObject.σ (standardSimplex.obj Δ).left (Fin.succ i)) φ) =
SimplexCategory.Hom.toOrderHom
((SimplicialObject.σ (standardSimplex.obj Δ).left i ≫ (fun n f => shift f) (n + 1)) φ)
[PROOFSTEP]
ext j : 2
[GOAL]
case h.a.h.h
Δ : SimplexCategory
n : ℕ
i : Fin (n + 1)
φ : (drop.obj (standardSimplex.obj Δ)).obj (op [n])
j : Fin (SimplexCategory.len (op [n + 1 + 1]).unop + 1)
⊢ ↑(SimplexCategory.Hom.toOrderHom
(((fun n f => shift f) n ≫ SimplicialObject.σ (standardSimplex.obj Δ).left (Fin.succ i)) φ))
j =
↑(SimplexCategory.Hom.toOrderHom
((SimplicialObject.σ (standardSimplex.obj Δ).left i ≫ (fun n f => shift f) (n + 1)) φ))
j
[PROOFSTEP]
dsimp [SimplicialObject.σ, SimplexCategory.σ, SSet.standardSimplex]
[GOAL]
case h.a.h.h
Δ : SimplexCategory
n : ℕ
i : Fin (n + 1)
φ : (drop.obj (standardSimplex.obj Δ)).obj (op [n])
j : Fin (SimplexCategory.len (op [n + 1 + 1]).unop + 1)
⊢ shiftFun (↑(SimplexCategory.Hom.toOrderHom φ)) (Fin.predAbove (Fin.succ i) j) =
shiftFun (↑(SimplexCategory.Hom.toOrderHom φ) ∘ Fin.predAbove i) j
[PROOFSTEP]
by_cases j = 0
[GOAL]
case h.a.h.h
Δ : SimplexCategory
n : ℕ
i : Fin (n + 1)
φ : (drop.obj (standardSimplex.obj Δ)).obj (op [n])
j : Fin (SimplexCategory.len (op [n + 1 + 1]).unop + 1)
⊢ shiftFun (↑(SimplexCategory.Hom.toOrderHom φ)) (Fin.predAbove (Fin.succ i) j) =
shiftFun (↑(SimplexCategory.Hom.toOrderHom φ) ∘ Fin.predAbove i) j
[PROOFSTEP]
by_cases j = 0
[GOAL]
case pos
Δ : SimplexCategory
n : ℕ
i : Fin (n + 1)
φ : (drop.obj (standardSimplex.obj Δ)).obj (op [n])
j : Fin (SimplexCategory.len (op [n + 1 + 1]).unop + 1)
h : j = 0
⊢ shiftFun (↑(SimplexCategory.Hom.toOrderHom φ)) (Fin.predAbove (Fin.succ i) j) =
shiftFun (↑(SimplexCategory.Hom.toOrderHom φ) ∘ Fin.predAbove i) j
[PROOFSTEP]
subst h
[GOAL]
case pos
Δ : SimplexCategory
n : ℕ
i : Fin (n + 1)
φ : (drop.obj (standardSimplex.obj Δ)).obj (op [n])
⊢ shiftFun (↑(SimplexCategory.Hom.toOrderHom φ)) (Fin.predAbove (Fin.succ i) 0) =
shiftFun (↑(SimplexCategory.Hom.toOrderHom φ) ∘ Fin.predAbove i) 0
[PROOFSTEP]
simp only [shiftFun_0]
[GOAL]
case pos
Δ : SimplexCategory
n : ℕ
i : Fin (n + 1)
φ : (drop.obj (standardSimplex.obj Δ)).obj (op [n])
⊢ shiftFun (↑(SimplexCategory.Hom.toOrderHom φ)) (Fin.predAbove (Fin.succ i) 0) = 0
[PROOFSTEP]
exact shiftFun_0 φ.toOrderHom
[GOAL]
case neg
Δ : SimplexCategory
n : ℕ
i : Fin (n + 1)
φ : (drop.obj (standardSimplex.obj Δ)).obj (op [n])
j : Fin (SimplexCategory.len (op [n + 1 + 1]).unop + 1)
h : ¬j = 0
⊢ shiftFun (↑(SimplexCategory.Hom.toOrderHom φ)) (Fin.predAbove (Fin.succ i) j) =
shiftFun (↑(SimplexCategory.Hom.toOrderHom φ) ∘ Fin.predAbove i) j
[PROOFSTEP]
obtain ⟨_, rfl⟩ := Fin.eq_succ_of_ne_zero h
[GOAL]
case neg.intro
Δ : SimplexCategory
n : ℕ
i : Fin (n + 1)
φ : (drop.obj (standardSimplex.obj Δ)).obj (op [n])
w✝ : Fin (SimplexCategory.len (op [n + 1 + 1]).unop)
h : ¬Fin.succ w✝ = 0
⊢ shiftFun (↑(SimplexCategory.Hom.toOrderHom φ)) (Fin.predAbove (Fin.succ i) (Fin.succ w✝)) =
shiftFun (↑(SimplexCategory.Hom.toOrderHom φ) ∘ Fin.predAbove i) (Fin.succ w✝)
[PROOFSTEP]
simp only [Fin.succ_predAbove_succ, shiftFun_succ, Function.comp_apply]
[GOAL]
C : Type u_1
inst✝¹ : Category.{?u.220931, u_1} C
f : Arrow C
inst✝ : ∀ (n : ℕ), HasWidePullback f.right (fun x => f.left) fun x => f.hom
S : SplitEpi f.hom
n : ℕ
i : Fin (SimplexCategory.len (op [n + 1]).unop + 1)
⊢ (fun i =>
if x : i = 0 then (WidePullback.base fun x => f.hom) ≫ S.section_
else WidePullback.π (fun x => f.hom) (Fin.pred i x))
i ≫
f.hom =
WidePullback.base fun x => f.hom
[PROOFSTEP]
dsimp
[GOAL]
C : Type u_1
inst✝¹ : Category.{?u.220931, u_1} C
f : Arrow C
inst✝ : ∀ (n : ℕ), HasWidePullback f.right (fun x => f.left) fun x => f.hom
S : SplitEpi f.hom
n : ℕ
i : Fin (SimplexCategory.len (op [n + 1]).unop + 1)
⊢ (if x : i = 0 then (WidePullback.base fun x => f.hom) ≫ S.section_
else WidePullback.π (fun x => f.hom) (Fin.pred i x)) ≫
f.hom =
WidePullback.base fun x => f.hom
[PROOFSTEP]
split_ifs with h
[GOAL]
case pos
C : Type u_1
inst✝¹ : Category.{?u.220931, u_1} C
f : Arrow C
inst✝ : ∀ (n : ℕ), HasWidePullback f.right (fun x => f.left) fun x => f.hom
S : SplitEpi f.hom
n : ℕ
i : Fin (SimplexCategory.len (op [n + 1]).unop + 1)
h : i = 0
⊢ ((WidePullback.base fun x => f.hom) ≫ S.section_) ≫ f.hom = WidePullback.base fun x => f.hom
[PROOFSTEP]
subst h
[GOAL]
case pos
C : Type u_1
inst✝¹ : Category.{?u.220931, u_1} C
f : Arrow C
inst✝ : ∀ (n : ℕ), HasWidePullback f.right (fun x => f.left) fun x => f.hom
S : SplitEpi f.hom
n : ℕ
⊢ ((WidePullback.base fun x => f.hom) ≫ S.section_) ≫ f.hom = WidePullback.base fun x => f.hom
[PROOFSTEP]
simp only [assoc, SplitEpi.id, comp_id]
[GOAL]
case neg
C : Type u_1
inst✝¹ : Category.{?u.220931, u_1} C
f : Arrow C
inst✝ : ∀ (n : ℕ), HasWidePullback f.right (fun x => f.left) fun x => f.hom
S : SplitEpi f.hom
n : ℕ
i : Fin (SimplexCategory.len (op [n + 1]).unop + 1)
h : ¬i = 0
⊢ WidePullback.π (fun x => f.hom) (Fin.pred i h) ≫ f.hom = WidePullback.base fun x => f.hom
[PROOFSTEP]
simp only [WidePullback.π_arrow]
[GOAL]
C : Type u_1
inst✝¹ : Category.{u_2, u_1} C
f : Arrow C
inst✝ : ∀ (n : ℕ), HasWidePullback f.right (fun x => f.left) fun x => f.hom
S : SplitEpi f.hom
n : ℕ
⊢ s f S n ≫ WidePullback.π (fun x => f.hom) 0 = (WidePullback.base fun x => f.hom) ≫ S.section_
[PROOFSTEP]
dsimp [ExtraDegeneracy.s]
[GOAL]
C : Type u_1
inst✝¹ : Category.{u_2, u_1} C
f : Arrow C
inst✝ : ∀ (n : ℕ), HasWidePullback f.right (fun x => f.left) fun x => f.hom
S : SplitEpi f.hom
n : ℕ
⊢ WidePullback.lift (WidePullback.base fun x => f.hom)
(fun i =>
if x : i = 0 then (WidePullback.base fun x => f.hom) ≫ S.section_
else WidePullback.π (fun x => f.hom) (Fin.pred i x))
(_ :
∀ (i : Fin (SimplexCategory.len (op [n + 1]).unop + 1)),
(fun i =>
if x : i = 0 then (WidePullback.base fun x => f.hom) ≫ S.section_
else WidePullback.π (fun x => f.hom) (Fin.pred i x))
i ≫
f.hom =
WidePullback.base fun x => f.hom) ≫
WidePullback.π (fun x => f.hom) 0 =
(WidePullback.base fun x => f.hom) ≫ S.section_
[PROOFSTEP]
simp only [WidePullback.lift_π]
[GOAL]
C : Type u_1
inst✝¹ : Category.{u_2, u_1} C
f : Arrow C
inst✝ : ∀ (n : ℕ), HasWidePullback f.right (fun x => f.left) fun x => f.hom
S : SplitEpi f.hom
n : ℕ
⊢ (if h : True then (WidePullback.base fun x => f.hom) ≫ S.section_
else WidePullback.π (fun x => f.hom) (Fin.pred 0 (_ : ¬0 = 0))) =
(WidePullback.base fun x => f.hom) ≫ S.section_
[PROOFSTEP]
rfl
-- porting note: @[simp] removed as the linter complains the LHS is not in normal form
[GOAL]
C : Type u_1
inst✝¹ : Category.{u_2, u_1} C
f : Arrow C
inst✝ : ∀ (n : ℕ), HasWidePullback f.right (fun x => f.left) fun x => f.hom
S : SplitEpi f.hom
n : ℕ
i : Fin (n + 1)
⊢ s f S n ≫ WidePullback.π (fun x => f.hom) (Fin.succ i) = WidePullback.π (fun x => f.hom) i
[PROOFSTEP]
dsimp [ExtraDegeneracy.s]
[GOAL]
C : Type u_1
inst✝¹ : Category.{u_2, u_1} C
f : Arrow C
inst✝ : ∀ (n : ℕ), HasWidePullback f.right (fun x => f.left) fun x => f.hom
S : SplitEpi f.hom
n : ℕ
i : Fin (n + 1)
⊢ WidePullback.lift (WidePullback.base fun x => f.hom)
(fun i =>
if x : i = 0 then (WidePullback.base fun x => f.hom) ≫ S.section_
else WidePullback.π (fun x => f.hom) (Fin.pred i x))
(_ :
∀ (i : Fin (SimplexCategory.len (op [n + 1]).unop + 1)),
(fun i =>
if x : i = 0 then (WidePullback.base fun x => f.hom) ≫ S.section_
else WidePullback.π (fun x => f.hom) (Fin.pred i x))
i ≫
f.hom =
WidePullback.base fun x => f.hom) ≫
WidePullback.π (fun x => f.hom) (Fin.succ i) =
WidePullback.π (fun x => f.hom) i
[PROOFSTEP]
simp only [WidePullback.lift_π]
[GOAL]
C : Type u_1
inst✝¹ : Category.{u_2, u_1} C
f : Arrow C
inst✝ : ∀ (n : ℕ), HasWidePullback f.right (fun x => f.left) fun x => f.hom
S : SplitEpi f.hom
n : ℕ
i : Fin (n + 1)
⊢ (if x : Fin.succ i = 0 then (WidePullback.base fun x => f.hom) ≫ S.section_
else WidePullback.π (fun x => f.hom) (Fin.pred (Fin.succ i) x)) =
WidePullback.π (fun x => f.hom) i
[PROOFSTEP]
split_ifs with h
[GOAL]
case pos
C : Type u_1
inst✝¹ : Category.{u_2, u_1} C
f : Arrow C
inst✝ : ∀ (n : ℕ), HasWidePullback f.right (fun x => f.left) fun x => f.hom
S : SplitEpi f.hom
n : ℕ
i : Fin (n + 1)
h : Fin.succ i = 0
⊢ (WidePullback.base fun x => f.hom) ≫ S.section_ = WidePullback.π (fun x => f.hom) i
[PROOFSTEP]
simp only [Fin.ext_iff, Fin.val_succ, Fin.val_zero, add_eq_zero, and_false] at h
[GOAL]
case neg
C : Type u_1
inst✝¹ : Category.{u_2, u_1} C
f : Arrow C
inst✝ : ∀ (n : ℕ), HasWidePullback f.right (fun x => f.left) fun x => f.hom
S : SplitEpi f.hom
n : ℕ
i : Fin (n + 1)
h : ¬Fin.succ i = 0
⊢ WidePullback.π (fun x => f.hom) (Fin.pred (Fin.succ i) h) = WidePullback.π (fun x => f.hom) i
[PROOFSTEP]
simp only [Fin.pred_succ]
[GOAL]
C : Type u_1
inst✝¹ : Category.{u_2, u_1} C
f : Arrow C
inst✝ : ∀ (n : ℕ), HasWidePullback f.right (fun x => f.left) fun x => f.hom
S : SplitEpi f.hom
n : ℕ
⊢ (s f S n ≫ WidePullback.base fun x => f.hom) = WidePullback.base fun x => f.hom
[PROOFSTEP]
apply WidePullback.lift_base
[GOAL]
C : Type u_1
inst✝¹ : Category.{?u.319164, u_1} C
f : Arrow C
inst✝ : ∀ (n : ℕ), HasWidePullback f.right (fun x => f.left) fun x => f.hom
S : SplitEpi f.hom
i : Fin (SimplexCategory.len (op [0]).unop + 1)
⊢ (fun x => 𝟙 ((𝟭 C).obj f.left)) i ≫ f.hom = f.hom
[PROOFSTEP]
rw [id_comp]
[GOAL]
C : Type u_1
inst✝¹ : Category.{?u.319164, u_1} C
f : Arrow C
inst✝ : ∀ (n : ℕ), HasWidePullback f.right (fun x => f.left) fun x => f.hom
S : SplitEpi f.hom
⊢ (S.section_ ≫
WidePullback.lift f.hom (fun x => 𝟙 ((𝟭 C).obj f.left))
(_ :
∀ (i : Fin (SimplexCategory.len (op [0]).unop + 1)), (fun x => 𝟙 ((𝟭 C).obj f.left)) i ≫ f.hom = f.hom)) ≫
NatTrans.app (augmentedCechNerve f).hom (op [0]) =
𝟙 (point.obj (augmentedCechNerve f))
[PROOFSTEP]
dsimp
[GOAL]
C : Type u_1
inst✝¹ : Category.{?u.319164, u_1} C
f : Arrow C
inst✝ : ∀ (n : ℕ), HasWidePullback f.right (fun x => f.left) fun x => f.hom
S : SplitEpi f.hom
⊢ ((S.section_ ≫ WidePullback.lift f.hom (fun x => 𝟙 f.left) (_ : Fin (0 + 1) → 𝟙 f.left ≫ f.hom = f.hom)) ≫
WidePullback.base fun x => f.hom) =
𝟙 f.right
[PROOFSTEP]
simp only [augmentedCechNerve_hom_app, assoc, WidePullback.lift_base, SplitEpi.id]
[GOAL]
C : Type u_1
inst✝¹ : Category.{?u.319164, u_1} C
f : Arrow C
inst✝ : ∀ (n : ℕ), HasWidePullback f.right (fun x => f.left) fun x => f.hom
S : SplitEpi f.hom
⊢ (fun n => ExtraDegeneracy.s f S n) 0 ≫ SimplicialObject.δ (augmentedCechNerve f).left 1 =
NatTrans.app (augmentedCechNerve f).hom (op [0]) ≫
S.section_ ≫
WidePullback.lift f.hom (fun x => 𝟙 ((𝟭 C).obj f.left))
(_ : ∀ (i : Fin (SimplexCategory.len (op [0]).unop + 1)), (fun x => 𝟙 ((𝟭 C).obj f.left)) i ≫ f.hom = f.hom)
[PROOFSTEP]
dsimp [cechNerve, SimplicialObject.δ, SimplexCategory.δ]
[GOAL]
C : Type u_1
inst✝¹ : Category.{?u.319164, u_1} C
f : Arrow C
inst✝ : ∀ (n : ℕ), HasWidePullback f.right (fun x => f.left) fun x => f.hom
S : SplitEpi f.hom
⊢ ExtraDegeneracy.s f S 0 ≫
WidePullback.lift (WidePullback.base fun x => f.hom)
(fun i => WidePullback.π (fun x => f.hom) (Fin.succAbove 1 i))
(_ :
∀ (j : Fin (SimplexCategory.len (op [0]).unop + 1)),
WidePullback.π (fun x => f.hom)
(↑(SimplexCategory.Hom.toOrderHom
(SimplexCategory.Hom.mk (OrderEmbedding.toOrderHom (Fin.succAboveEmb 1))).op.unop)
j) ≫
f.hom =
WidePullback.base fun x => f.hom) =
(WidePullback.base fun x => f.hom) ≫
S.section_ ≫ WidePullback.lift f.hom (fun x => 𝟙 f.left) (_ : Fin (0 + 1) → 𝟙 f.left ≫ f.hom = f.hom)
[PROOFSTEP]
ext j
[GOAL]
case a
C : Type u_1
inst✝¹ : Category.{?u.319164, u_1} C
f : Arrow C
inst✝ : ∀ (n : ℕ), HasWidePullback f.right (fun x => f.left) fun x => f.hom
S : SplitEpi f.hom
j : Fin (0 + 1)
⊢ (ExtraDegeneracy.s f S 0 ≫
WidePullback.lift (WidePullback.base fun x => f.hom)
(fun i => WidePullback.π (fun x => f.hom) (Fin.succAbove 1 i))
(_ :
∀ (j : Fin (SimplexCategory.len (op [0]).unop + 1)),
WidePullback.π (fun x => f.hom)
(↑(SimplexCategory.Hom.toOrderHom
(SimplexCategory.Hom.mk (OrderEmbedding.toOrderHom (Fin.succAboveEmb 1))).op.unop)
j) ≫
f.hom =
WidePullback.base fun x => f.hom)) ≫
WidePullback.π (fun x => f.hom) j =
((WidePullback.base fun x => f.hom) ≫
S.section_ ≫ WidePullback.lift f.hom (fun x => 𝟙 f.left) (_ : Fin (0 + 1) → 𝟙 f.left ≫ f.hom = f.hom)) ≫
WidePullback.π (fun x => f.hom) j
[PROOFSTEP]
fin_cases j
[GOAL]
case a.head
C : Type u_1
inst✝¹ : Category.{?u.319164, u_1} C
f : Arrow C
inst✝ : ∀ (n : ℕ), HasWidePullback f.right (fun x => f.left) fun x => f.hom
S : SplitEpi f.hom
⊢ (ExtraDegeneracy.s f S 0 ≫
WidePullback.lift (WidePullback.base fun x => f.hom)
(fun i => WidePullback.π (fun x => f.hom) (Fin.succAbove 1 i))
(_ :
∀ (j : Fin (SimplexCategory.len (op [0]).unop + 1)),
WidePullback.π (fun x => f.hom)
(↑(SimplexCategory.Hom.toOrderHom
(SimplexCategory.Hom.mk (OrderEmbedding.toOrderHom (Fin.succAboveEmb 1))).op.unop)
j) ≫
f.hom =
WidePullback.base fun x => f.hom)) ≫
WidePullback.π (fun x => f.hom) { val := 0, isLt := (_ : 0 < 0 + 1) } =
((WidePullback.base fun x => f.hom) ≫
S.section_ ≫ WidePullback.lift f.hom (fun x => 𝟙 f.left) (_ : Fin (0 + 1) → 𝟙 f.left ≫ f.hom = f.hom)) ≫
WidePullback.π (fun x => f.hom) { val := 0, isLt := (_ : 0 < 0 + 1) }
[PROOFSTEP]
simpa only [assoc, WidePullback.lift_π, comp_id] using ExtraDegeneracy.s_comp_π_0 f S 0
[GOAL]
case a
C : Type u_1
inst✝¹ : Category.{?u.319164, u_1} C
f : Arrow C
inst✝ : ∀ (n : ℕ), HasWidePullback f.right (fun x => f.left) fun x => f.hom
S : SplitEpi f.hom
⊢ ((ExtraDegeneracy.s f S 0 ≫
WidePullback.lift (WidePullback.base fun x => f.hom)
(fun i => WidePullback.π (fun x => f.hom) (Fin.succAbove 1 i))
(_ :
∀ (j : Fin (SimplexCategory.len (op [0]).unop + 1)),
WidePullback.π (fun x => f.hom)
(↑(SimplexCategory.Hom.toOrderHom
(SimplexCategory.Hom.mk (OrderEmbedding.toOrderHom (Fin.succAboveEmb 1))).op.unop)
j) ≫
f.hom =
WidePullback.base fun x => f.hom)) ≫
WidePullback.base fun x => f.hom) =
((WidePullback.base fun x => f.hom) ≫
S.section_ ≫ WidePullback.lift f.hom (fun x => 𝟙 f.left) (_ : Fin (0 + 1) → 𝟙 f.left ≫ f.hom = f.hom)) ≫
WidePullback.base fun x => f.hom
[PROOFSTEP]
simpa only [assoc, WidePullback.lift_base, SplitEpi.id, comp_id] using ExtraDegeneracy.s_comp_base f S 0
[GOAL]
C : Type u_1
inst✝¹ : Category.{?u.319164, u_1} C
f : Arrow C
inst✝ : ∀ (n : ℕ), HasWidePullback f.right (fun x => f.left) fun x => f.hom
S : SplitEpi f.hom
n : ℕ
⊢ (fun n => ExtraDegeneracy.s f S n) n ≫ SimplicialObject.δ (augmentedCechNerve f).left 0 =
𝟙 ((drop.obj (augmentedCechNerve f)).obj (op [n]))
[PROOFSTEP]
dsimp [cechNerve, SimplicialObject.δ, SimplexCategory.δ]
[GOAL]
C : Type u_1
inst✝¹ : Category.{?u.319164, u_1} C
f : Arrow C
inst✝ : ∀ (n : ℕ), HasWidePullback f.right (fun x => f.left) fun x => f.hom
S : SplitEpi f.hom
n : ℕ
⊢ ExtraDegeneracy.s f S n ≫
WidePullback.lift (WidePullback.base fun x => f.hom) (fun i => WidePullback.π (fun x => f.hom) (Fin.succ i))
(_ :
∀ (j : Fin (SimplexCategory.len (op [n]).unop + 1)),
WidePullback.π (fun x => f.hom)
(↑(SimplexCategory.Hom.toOrderHom
(SimplexCategory.Hom.mk (OrderEmbedding.toOrderHom (Fin.succAboveEmb 0))).op.unop)
j) ≫
f.hom =
WidePullback.base fun x => f.hom) =
𝟙 (widePullback f.right (fun x => f.left) fun x => f.hom)
[PROOFSTEP]
ext j
[GOAL]
case a
C : Type u_1
inst✝¹ : Category.{?u.319164, u_1} C
f : Arrow C
inst✝ : ∀ (n : ℕ), HasWidePullback f.right (fun x => f.left) fun x => f.hom
S : SplitEpi f.hom
n : ℕ
j : Fin (n + 1)
⊢ (ExtraDegeneracy.s f S n ≫
WidePullback.lift (WidePullback.base fun x => f.hom) (fun i => WidePullback.π (fun x => f.hom) (Fin.succ i))
(_ :
∀ (j : Fin (SimplexCategory.len (op [n]).unop + 1)),
WidePullback.π (fun x => f.hom)
(↑(SimplexCategory.Hom.toOrderHom
(SimplexCategory.Hom.mk (OrderEmbedding.toOrderHom (Fin.succAboveEmb 0))).op.unop)
j) ≫
f.hom =
WidePullback.base fun x => f.hom)) ≫
WidePullback.π (fun x => f.hom) j =
𝟙 (widePullback f.right (fun x => f.left) fun x => f.hom) ≫ WidePullback.π (fun x => f.hom) j
[PROOFSTEP]
simpa only [assoc, WidePullback.lift_π, id_comp] using ExtraDegeneracy.s_comp_π_succ f S n j
[GOAL]
case a
C : Type u_1
inst✝¹ : Category.{?u.319164, u_1} C
f : Arrow C
inst✝ : ∀ (n : ℕ), HasWidePullback f.right (fun x => f.left) fun x => f.hom
S : SplitEpi f.hom
n : ℕ
⊢ ((ExtraDegeneracy.s f S n ≫
WidePullback.lift (WidePullback.base fun x => f.hom) (fun i => WidePullback.π (fun x => f.hom) (Fin.succ i))
(_ :
∀ (j : Fin (SimplexCategory.len (op [n]).unop + 1)),
WidePullback.π (fun x => f.hom)
(↑(SimplexCategory.Hom.toOrderHom
(SimplexCategory.Hom.mk (OrderEmbedding.toOrderHom (Fin.succAboveEmb 0))).op.unop)
j) ≫
f.hom =
WidePullback.base fun x => f.hom)) ≫
WidePullback.base fun x => f.hom) =
𝟙 (widePullback f.right (fun x => f.left) fun x => f.hom) ≫ WidePullback.base fun x => f.hom
[PROOFSTEP]
simpa only [assoc, WidePullback.lift_base, id_comp] using ExtraDegeneracy.s_comp_base f S n
[GOAL]
C : Type u_1
inst✝¹ : Category.{?u.319164, u_1} C
f : Arrow C
inst✝ : ∀ (n : ℕ), HasWidePullback f.right (fun x => f.left) fun x => f.hom
S : SplitEpi f.hom
n : ℕ
i : Fin (n + 2)
⊢ (fun n => ExtraDegeneracy.s f S n) (n + 1) ≫ SimplicialObject.δ (augmentedCechNerve f).left (Fin.succ i) =
SimplicialObject.δ (augmentedCechNerve f).left i ≫ (fun n => ExtraDegeneracy.s f S n) n
[PROOFSTEP]
dsimp [cechNerve, SimplicialObject.δ, SimplexCategory.δ]
[GOAL]
C : Type u_1
inst✝¹ : Category.{?u.319164, u_1} C
f : Arrow C
inst✝ : ∀ (n : ℕ), HasWidePullback f.right (fun x => f.left) fun x => f.hom
S : SplitEpi f.hom
n : ℕ
i : Fin (n + 2)
⊢ ExtraDegeneracy.s f S (n + 1) ≫
WidePullback.lift (WidePullback.base fun x => f.hom)
(fun i_1 => WidePullback.π (fun x => f.hom) (Fin.succAbove (Fin.succ i) i_1))
(_ :
∀ (j : Fin (SimplexCategory.len (op [n + 1]).unop + 1)),
WidePullback.π (fun x => f.hom)
(↑(SimplexCategory.Hom.toOrderHom
(SimplexCategory.Hom.mk (OrderEmbedding.toOrderHom (Fin.succAboveEmb (Fin.succ i)))).op.unop)
j) ≫
f.hom =
WidePullback.base fun x => f.hom) =
WidePullback.lift (WidePullback.base fun x => f.hom)
(fun i_1 => WidePullback.π (fun x => f.hom) (Fin.succAbove i i_1))
(_ :
∀ (j : Fin (SimplexCategory.len (op [n]).unop + 1)),
WidePullback.π (fun x => f.hom)
(↑(SimplexCategory.Hom.toOrderHom
(SimplexCategory.Hom.mk (OrderEmbedding.toOrderHom (Fin.succAboveEmb i))).op.unop)
j) ≫
f.hom =
WidePullback.base fun x => f.hom) ≫
ExtraDegeneracy.s f S n
[PROOFSTEP]
ext j
[GOAL]
case a
C : Type u_1
inst✝¹ : Category.{?u.319164, u_1} C
f : Arrow C
inst✝ : ∀ (n : ℕ), HasWidePullback f.right (fun x => f.left) fun x => f.hom
S : SplitEpi f.hom
n : ℕ
i : Fin (n + 2)
j : Fin (n + 1 + 1)
⊢ (ExtraDegeneracy.s f S (n + 1) ≫
WidePullback.lift (WidePullback.base fun x => f.hom)
(fun i_1 => WidePullback.π (fun x => f.hom) (Fin.succAbove (Fin.succ i) i_1))
(_ :
∀ (j : Fin (SimplexCategory.len (op [n + 1]).unop + 1)),
WidePullback.π (fun x => f.hom)
(↑(SimplexCategory.Hom.toOrderHom
(SimplexCategory.Hom.mk (OrderEmbedding.toOrderHom (Fin.succAboveEmb (Fin.succ i)))).op.unop)
j) ≫
f.hom =
WidePullback.base fun x => f.hom)) ≫
WidePullback.π (fun x => f.hom) j =
(WidePullback.lift (WidePullback.base fun x => f.hom)
(fun i_1 => WidePullback.π (fun x => f.hom) (Fin.succAbove i i_1))
(_ :
∀ (j : Fin (SimplexCategory.len (op [n]).unop + 1)),
WidePullback.π (fun x => f.hom)
(↑(SimplexCategory.Hom.toOrderHom
(SimplexCategory.Hom.mk (OrderEmbedding.toOrderHom (Fin.succAboveEmb i))).op.unop)
j) ≫
f.hom =
WidePullback.base fun x => f.hom) ≫
ExtraDegeneracy.s f S n) ≫
WidePullback.π (fun x => f.hom) j
[PROOFSTEP]
simp only [assoc, WidePullback.lift_π]
[GOAL]
case a
C : Type u_1
inst✝¹ : Category.{?u.319164, u_1} C
f : Arrow C
inst✝ : ∀ (n : ℕ), HasWidePullback f.right (fun x => f.left) fun x => f.hom
S : SplitEpi f.hom
n : ℕ
i : Fin (n + 2)
j : Fin (n + 1 + 1)
⊢ ExtraDegeneracy.s f S (n + 1) ≫ WidePullback.π (fun x => f.hom) (Fin.succAbove (Fin.succ i) j) =
WidePullback.lift (WidePullback.base fun x => f.hom)
(fun i_1 => WidePullback.π (fun x => f.hom) (Fin.succAbove i i_1))
(_ :
∀ (j : Fin (SimplexCategory.len (op [n]).unop + 1)),
WidePullback.π (fun x => f.hom)
(↑(SimplexCategory.Hom.toOrderHom
(SimplexCategory.Hom.mk (OrderEmbedding.toOrderHom (Fin.succAboveEmb i))).op.unop)
j) ≫
f.hom =
WidePullback.base fun x => f.hom) ≫
ExtraDegeneracy.s f S n ≫ WidePullback.π (fun x => f.hom) j
[PROOFSTEP]
by_cases j = 0
[GOAL]
case a
C : Type u_1
inst✝¹ : Category.{?u.319164, u_1} C
f : Arrow C
inst✝ : ∀ (n : ℕ), HasWidePullback f.right (fun x => f.left) fun x => f.hom
S : SplitEpi f.hom
n : ℕ
i : Fin (n + 2)
j : Fin (n + 1 + 1)
⊢ ExtraDegeneracy.s f S (n + 1) ≫ WidePullback.π (fun x => f.hom) (Fin.succAbove (Fin.succ i) j) =
WidePullback.lift (WidePullback.base fun x => f.hom)
(fun i_1 => WidePullback.π (fun x => f.hom) (Fin.succAbove i i_1))
(_ :
∀ (j : Fin (SimplexCategory.len (op [n]).unop + 1)),
WidePullback.π (fun x => f.hom)
(↑(SimplexCategory.Hom.toOrderHom
(SimplexCategory.Hom.mk (OrderEmbedding.toOrderHom (Fin.succAboveEmb i))).op.unop)
j) ≫
f.hom =
WidePullback.base fun x => f.hom) ≫
ExtraDegeneracy.s f S n ≫ WidePullback.π (fun x => f.hom) j
[PROOFSTEP]
by_cases j = 0
[GOAL]
case pos
C : Type u_1
inst✝¹ : Category.{?u.319164, u_1} C
f : Arrow C
inst✝ : ∀ (n : ℕ), HasWidePullback f.right (fun x => f.left) fun x => f.hom
S : SplitEpi f.hom
n : ℕ
i : Fin (n + 2)
j : Fin (n + 1 + 1)
h : j = 0
⊢ ExtraDegeneracy.s f S (n + 1) ≫ WidePullback.π (fun x => f.hom) (Fin.succAbove (Fin.succ i) j) =
WidePullback.lift (WidePullback.base fun x => f.hom)
(fun i_1 => WidePullback.π (fun x => f.hom) (Fin.succAbove i i_1))
(_ :
∀ (j : Fin (SimplexCategory.len (op [n]).unop + 1)),
WidePullback.π (fun x => f.hom)
(↑(SimplexCategory.Hom.toOrderHom
(SimplexCategory.Hom.mk (OrderEmbedding.toOrderHom (Fin.succAboveEmb i))).op.unop)
j) ≫
f.hom =
WidePullback.base fun x => f.hom) ≫
ExtraDegeneracy.s f S n ≫ WidePullback.π (fun x => f.hom) j
[PROOFSTEP]
subst h
[GOAL]
case pos
C : Type u_1
inst✝¹ : Category.{?u.319164, u_1} C
f : Arrow C
inst✝ : ∀ (n : ℕ), HasWidePullback f.right (fun x => f.left) fun x => f.hom
S : SplitEpi f.hom
n : ℕ
i : Fin (n + 2)
⊢ ExtraDegeneracy.s f S (n + 1) ≫ WidePullback.π (fun x => f.hom) (Fin.succAbove (Fin.succ i) 0) =
WidePullback.lift (WidePullback.base fun x => f.hom)
(fun i_1 => WidePullback.π (fun x => f.hom) (Fin.succAbove i i_1))
(_ :
∀ (j : Fin (SimplexCategory.len (op [n]).unop + 1)),
WidePullback.π (fun x => f.hom)
(↑(SimplexCategory.Hom.toOrderHom
(SimplexCategory.Hom.mk (OrderEmbedding.toOrderHom (Fin.succAboveEmb i))).op.unop)
j) ≫
f.hom =
WidePullback.base fun x => f.hom) ≫
ExtraDegeneracy.s f S n ≫ WidePullback.π (fun x => f.hom) 0
[PROOFSTEP]
erw [Fin.succ_succAbove_zero, ExtraDegeneracy.s_comp_π_0, ExtraDegeneracy.s_comp_π_0]
[GOAL]
case pos
C : Type u_1
inst✝¹ : Category.{?u.319164, u_1} C
f : Arrow C
inst✝ : ∀ (n : ℕ), HasWidePullback f.right (fun x => f.left) fun x => f.hom
S : SplitEpi f.hom
n : ℕ
i : Fin (n + 2)
⊢ (WidePullback.base fun x => f.hom) ≫ S.section_ =
WidePullback.lift (WidePullback.base fun x => f.hom)
(fun i_1 => WidePullback.π (fun x => f.hom) (Fin.succAbove i i_1))
(_ :
∀ (j : Fin (SimplexCategory.len (op [n]).unop + 1)),
WidePullback.π (fun x => f.hom)
(↑(SimplexCategory.Hom.toOrderHom
(SimplexCategory.Hom.mk (OrderEmbedding.toOrderHom (Fin.succAboveEmb i))).op.unop)
j) ≫
f.hom =
WidePullback.base fun x => f.hom) ≫
(WidePullback.base fun x => f.hom) ≫ S.section_
[PROOFSTEP]
dsimp
[GOAL]
case pos
C : Type u_1
inst✝¹ : Category.{?u.319164, u_1} C
f : Arrow C
inst✝ : ∀ (n : ℕ), HasWidePullback f.right (fun x => f.left) fun x => f.hom
S : SplitEpi f.hom
n : ℕ
i : Fin (n + 2)
⊢ (WidePullback.base fun x => f.hom) ≫ S.section_ =
WidePullback.lift (WidePullback.base fun x => f.hom)
(fun i_1 => WidePullback.π (fun x => f.hom) (Fin.succAbove i i_1))
(_ :
∀ (j : Fin (SimplexCategory.len (op [n]).unop + 1)),
WidePullback.π (fun x => f.hom)
(↑(SimplexCategory.Hom.toOrderHom
(SimplexCategory.Hom.mk (OrderEmbedding.toOrderHom (Fin.succAboveEmb i))).op.unop)
j) ≫
f.hom =
WidePullback.base fun x => f.hom) ≫
(WidePullback.base fun x => f.hom) ≫ S.section_
[PROOFSTEP]
simp only [WidePullback.lift_base_assoc]
[GOAL]
case neg
C : Type u_1
inst✝¹ : Category.{?u.319164, u_1} C
f : Arrow C
inst✝ : ∀ (n : ℕ), HasWidePullback f.right (fun x => f.left) fun x => f.hom
S : SplitEpi f.hom
n : ℕ
i : Fin (n + 2)
j : Fin (n + 1 + 1)
h : ¬j = 0
⊢ ExtraDegeneracy.s f S (n + 1) ≫ WidePullback.π (fun x => f.hom) (Fin.succAbove (Fin.succ i) j) =
WidePullback.lift (WidePullback.base fun x => f.hom)
(fun i_1 => WidePullback.π (fun x => f.hom) (Fin.succAbove i i_1))
(_ :
∀ (j : Fin (SimplexCategory.len (op [n]).unop + 1)),
WidePullback.π (fun x => f.hom)
(↑(SimplexCategory.Hom.toOrderHom
(SimplexCategory.Hom.mk (OrderEmbedding.toOrderHom (Fin.succAboveEmb i))).op.unop)
j) ≫
f.hom =
WidePullback.base fun x => f.hom) ≫
ExtraDegeneracy.s f S n ≫ WidePullback.π (fun x => f.hom) j
[PROOFSTEP]
cases' Fin.eq_succ_of_ne_zero h with k hk
[GOAL]
case neg.intro
C : Type u_1
inst✝¹ : Category.{?u.319164, u_1} C
f : Arrow C
inst✝ : ∀ (n : ℕ), HasWidePullback f.right (fun x => f.left) fun x => f.hom
S : SplitEpi f.hom
n : ℕ
i : Fin (n + 2)
j : Fin (n + 1 + 1)
h : ¬j = 0
k : Fin (n + 1)
hk : j = Fin.succ k
⊢ ExtraDegeneracy.s f S (n + 1) ≫ WidePullback.π (fun x => f.hom) (Fin.succAbove (Fin.succ i) j) =
WidePullback.lift (WidePullback.base fun x => f.hom)
(fun i_1 => WidePullback.π (fun x => f.hom) (Fin.succAbove i i_1))
(_ :
∀ (j : Fin (SimplexCategory.len (op [n]).unop + 1)),
WidePullback.π (fun x => f.hom)
(↑(SimplexCategory.Hom.toOrderHom
(SimplexCategory.Hom.mk (OrderEmbedding.toOrderHom (Fin.succAboveEmb i))).op.unop)
j) ≫
f.hom =
WidePullback.base fun x => f.hom) ≫
ExtraDegeneracy.s f S n ≫ WidePullback.π (fun x => f.hom) j
[PROOFSTEP]
subst hk
[GOAL]
case neg.intro
C : Type u_1
inst✝¹ : Category.{?u.319164, u_1} C
f : Arrow C
inst✝ : ∀ (n : ℕ), HasWidePullback f.right (fun x => f.left) fun x => f.hom
S : SplitEpi f.hom
n : ℕ
i : Fin (n + 2)
k : Fin (n + 1)
h : ¬Fin.succ k = 0
⊢ ExtraDegeneracy.s f S (n + 1) ≫ WidePullback.π (fun x => f.hom) (Fin.succAbove (Fin.succ i) (Fin.succ k)) =
WidePullback.lift (WidePullback.base fun x => f.hom)
(fun i_1 => WidePullback.π (fun x => f.hom) (Fin.succAbove i i_1))
(_ :
∀ (j : Fin (SimplexCategory.len (op [n]).unop + 1)),
WidePullback.π (fun x => f.hom)
(↑(SimplexCategory.Hom.toOrderHom
(SimplexCategory.Hom.mk (OrderEmbedding.toOrderHom (Fin.succAboveEmb i))).op.unop)
j) ≫
f.hom =
WidePullback.base fun x => f.hom) ≫
ExtraDegeneracy.s f S n ≫ WidePullback.π (fun x => f.hom) (Fin.succ k)
[PROOFSTEP]
erw [Fin.succ_succAbove_succ, ExtraDegeneracy.s_comp_π_succ, ExtraDegeneracy.s_comp_π_succ]
[GOAL]
case neg.intro
C : Type u_1
inst✝¹ : Category.{?u.319164, u_1} C
f : Arrow C
inst✝ : ∀ (n : ℕ), HasWidePullback f.right (fun x => f.left) fun x => f.hom
S : SplitEpi f.hom
n : ℕ
i : Fin (n + 2)
k : Fin (n + 1)
h : ¬Fin.succ k = 0
⊢ WidePullback.π (fun x => f.hom) (Fin.succAbove i k) =
WidePullback.lift (WidePullback.base fun x => f.hom)
(fun i_1 => WidePullback.π (fun x => f.hom) (Fin.succAbove i i_1))
(_ :
∀ (j : Fin (SimplexCategory.len (op [n]).unop + 1)),
WidePullback.π (fun x => f.hom)
(↑(SimplexCategory.Hom.toOrderHom
(SimplexCategory.Hom.mk (OrderEmbedding.toOrderHom (Fin.succAboveEmb i))).op.unop)
j) ≫
f.hom =
WidePullback.base fun x => f.hom) ≫
WidePullback.π (fun x => f.hom) k
[PROOFSTEP]
simp only [WidePullback.lift_π]
[GOAL]
case a
C : Type u_1
inst✝¹ : Category.{?u.319164, u_1} C
f : Arrow C
inst✝ : ∀ (n : ℕ), HasWidePullback f.right (fun x => f.left) fun x => f.hom
S : SplitEpi f.hom
n : ℕ
i : Fin (n + 2)
⊢ ((ExtraDegeneracy.s f S (n + 1) ≫
WidePullback.lift (WidePullback.base fun x => f.hom)
(fun i_1 => WidePullback.π (fun x => f.hom) (Fin.succAbove (Fin.succ i) i_1))
(_ :
∀ (j : Fin (SimplexCategory.len (op [n + 1]).unop + 1)),
WidePullback.π (fun x => f.hom)
(↑(SimplexCategory.Hom.toOrderHom
(SimplexCategory.Hom.mk (OrderEmbedding.toOrderHom (Fin.succAboveEmb (Fin.succ i)))).op.unop)
j) ≫
f.hom =
WidePullback.base fun x => f.hom)) ≫
WidePullback.base fun x => f.hom) =
(WidePullback.lift (WidePullback.base fun x => f.hom)
(fun i_1 => WidePullback.π (fun x => f.hom) (Fin.succAbove i i_1))
(_ :
∀ (j : Fin (SimplexCategory.len (op [n]).unop + 1)),
WidePullback.π (fun x => f.hom)
(↑(SimplexCategory.Hom.toOrderHom
(SimplexCategory.Hom.mk (OrderEmbedding.toOrderHom (Fin.succAboveEmb i))).op.unop)
j) ≫
f.hom =
WidePullback.base fun x => f.hom) ≫
ExtraDegeneracy.s f S n) ≫
WidePullback.base fun x => f.hom
[PROOFSTEP]
simp only [assoc, WidePullback.lift_base]
[GOAL]
case a
C : Type u_1
inst✝¹ : Category.{?u.319164, u_1} C
f : Arrow C
inst✝ : ∀ (n : ℕ), HasWidePullback f.right (fun x => f.left) fun x => f.hom
S : SplitEpi f.hom
n : ℕ
i : Fin (n + 2)
⊢ (ExtraDegeneracy.s f S (n + 1) ≫ WidePullback.base fun x => f.hom) =
WidePullback.lift (WidePullback.base fun x => f.hom)
(fun i_1 => WidePullback.π (fun x => f.hom) (Fin.succAbove i i_1))
(_ :
∀ (j : Fin (SimplexCategory.len (op [n]).unop + 1)),
WidePullback.π (fun x => f.hom)
(↑(SimplexCategory.Hom.toOrderHom
(SimplexCategory.Hom.mk (OrderEmbedding.toOrderHom (Fin.succAboveEmb i))).op.unop)
j) ≫
f.hom =
WidePullback.base fun x => f.hom) ≫
ExtraDegeneracy.s f S n ≫ WidePullback.base fun x => f.hom
[PROOFSTEP]
erw [ExtraDegeneracy.s_comp_base, ExtraDegeneracy.s_comp_base]
[GOAL]
case a
C : Type u_1
inst✝¹ : Category.{?u.319164, u_1} C
f : Arrow C
inst✝ : ∀ (n : ℕ), HasWidePullback f.right (fun x => f.left) fun x => f.hom
S : SplitEpi f.hom
n : ℕ
i : Fin (n + 2)
⊢ (WidePullback.base fun x => f.hom) =
WidePullback.lift (WidePullback.base fun x => f.hom)
(fun i_1 => WidePullback.π (fun x => f.hom) (Fin.succAbove i i_1))
(_ :
∀ (j : Fin (SimplexCategory.len (op [n]).unop + 1)),
WidePullback.π (fun x => f.hom)
(↑(SimplexCategory.Hom.toOrderHom
(SimplexCategory.Hom.mk (OrderEmbedding.toOrderHom (Fin.succAboveEmb i))).op.unop)
j) ≫
f.hom =
WidePullback.base fun x => f.hom) ≫
WidePullback.base fun x => f.hom
[PROOFSTEP]
dsimp
[GOAL]
case a
C : Type u_1
inst✝¹ : Category.{?u.319164, u_1} C
f : Arrow C
inst✝ : ∀ (n : ℕ), HasWidePullback f.right (fun x => f.left) fun x => f.hom
S : SplitEpi f.hom
n : ℕ
i : Fin (n + 2)
⊢ (WidePullback.base fun x => f.hom) =
WidePullback.lift (WidePullback.base fun x => f.hom)
(fun i_1 => WidePullback.π (fun x => f.hom) (Fin.succAbove i i_1))
(_ :
∀ (j : Fin (SimplexCategory.len (op [n]).unop + 1)),
WidePullback.π (fun x => f.hom)
(↑(SimplexCategory.Hom.toOrderHom
(SimplexCategory.Hom.mk (OrderEmbedding.toOrderHom (Fin.succAboveEmb i))).op.unop)
j) ≫
f.hom =
WidePullback.base fun x => f.hom) ≫
WidePullback.base fun x => f.hom
[PROOFSTEP]
simp only [WidePullback.lift_base]
[GOAL]
C : Type u_1
inst✝¹ : Category.{?u.319164, u_1} C
f : Arrow C
inst✝ : ∀ (n : ℕ), HasWidePullback f.right (fun x => f.left) fun x => f.hom
S : SplitEpi f.hom
n : ℕ
i : Fin (n + 1)
⊢ (fun n => ExtraDegeneracy.s f S n) n ≫ SimplicialObject.σ (augmentedCechNerve f).left (Fin.succ i) =
SimplicialObject.σ (augmentedCechNerve f).left i ≫ (fun n => ExtraDegeneracy.s f S n) (n + 1)
[PROOFSTEP]
dsimp [cechNerve, SimplicialObject.σ, SimplexCategory.σ]
[GOAL]
C : Type u_1
inst✝¹ : Category.{?u.319164, u_1} C
f : Arrow C
inst✝ : ∀ (n : ℕ), HasWidePullback f.right (fun x => f.left) fun x => f.hom
S : SplitEpi f.hom
n : ℕ
i : Fin (n + 1)
⊢ ExtraDegeneracy.s f S n ≫
WidePullback.lift (WidePullback.base fun x => f.hom)
(fun i_1 => WidePullback.π (fun x => f.hom) (Fin.predAbove (Fin.succ i) i_1))
(_ :
∀ (j : Fin (SimplexCategory.len (op [n + 1 + 1]).unop + 1)),
WidePullback.π (fun x => f.hom)
(↑(SimplexCategory.Hom.toOrderHom
(SimplexCategory.Hom.mk
{ toFun := Fin.predAbove (Fin.succ i),
monotone' := (_ : Monotone (Fin.predAbove (Fin.succ i))) }).op.unop)
j) ≫
f.hom =
WidePullback.base fun x => f.hom) =
WidePullback.lift (WidePullback.base fun x => f.hom)
(fun i_1 => WidePullback.π (fun x => f.hom) (Fin.predAbove i i_1))
(_ :
∀ (j : Fin (SimplexCategory.len (op [n + 1]).unop + 1)),
WidePullback.π (fun x => f.hom)
(↑(SimplexCategory.Hom.toOrderHom
(SimplexCategory.Hom.mk
{ toFun := Fin.predAbove i, monotone' := (_ : Monotone (Fin.predAbove i)) }).op.unop)
j) ≫
f.hom =
WidePullback.base fun x => f.hom) ≫
ExtraDegeneracy.s f S (n + 1)
[PROOFSTEP]
ext j
[GOAL]
case a
C : Type u_1
inst✝¹ : Category.{?u.319164, u_1} C
f : Arrow C
inst✝ : ∀ (n : ℕ), HasWidePullback f.right (fun x => f.left) fun x => f.hom
S : SplitEpi f.hom
n : ℕ
i : Fin (n + 1)
j : Fin (n + 1 + 1 + 1)
⊢ (ExtraDegeneracy.s f S n ≫
WidePullback.lift (WidePullback.base fun x => f.hom)
(fun i_1 => WidePullback.π (fun x => f.hom) (Fin.predAbove (Fin.succ i) i_1))
(_ :
∀ (j : Fin (SimplexCategory.len (op [n + 1 + 1]).unop + 1)),
WidePullback.π (fun x => f.hom)
(↑(SimplexCategory.Hom.toOrderHom
(SimplexCategory.Hom.mk
{ toFun := Fin.predAbove (Fin.succ i),
monotone' := (_ : Monotone (Fin.predAbove (Fin.succ i))) }).op.unop)
j) ≫
f.hom =
WidePullback.base fun x => f.hom)) ≫
WidePullback.π (fun x => f.hom) j =
(WidePullback.lift (WidePullback.base fun x => f.hom)
(fun i_1 => WidePullback.π (fun x => f.hom) (Fin.predAbove i i_1))
(_ :
∀ (j : Fin (SimplexCategory.len (op [n + 1]).unop + 1)),
WidePullback.π (fun x => f.hom)
(↑(SimplexCategory.Hom.toOrderHom
(SimplexCategory.Hom.mk
{ toFun := Fin.predAbove i, monotone' := (_ : Monotone (Fin.predAbove i)) }).op.unop)
j) ≫
f.hom =
WidePullback.base fun x => f.hom) ≫
ExtraDegeneracy.s f S (n + 1)) ≫
WidePullback.π (fun x => f.hom) j
[PROOFSTEP]
simp only [assoc, WidePullback.lift_π]
[GOAL]
case a
C : Type u_1
inst✝¹ : Category.{?u.319164, u_1} C
f : Arrow C
inst✝ : ∀ (n : ℕ), HasWidePullback f.right (fun x => f.left) fun x => f.hom
S : SplitEpi f.hom
n : ℕ
i : Fin (n + 1)
j : Fin (n + 1 + 1 + 1)
⊢ ExtraDegeneracy.s f S n ≫ WidePullback.π (fun x => f.hom) (Fin.predAbove (Fin.succ i) j) =
WidePullback.lift (WidePullback.base fun x => f.hom)
(fun i_1 => WidePullback.π (fun x => f.hom) (Fin.predAbove i i_1))
(_ :
∀ (j : Fin (SimplexCategory.len (op [n + 1]).unop + 1)),
WidePullback.π (fun x => f.hom)
(↑(SimplexCategory.Hom.toOrderHom
(SimplexCategory.Hom.mk
{ toFun := Fin.predAbove i, monotone' := (_ : Monotone (Fin.predAbove i)) }).op.unop)
j) ≫
f.hom =
WidePullback.base fun x => f.hom) ≫
ExtraDegeneracy.s f S (n + 1) ≫ WidePullback.π (fun x => f.hom) j
[PROOFSTEP]
by_cases j = 0
[GOAL]
case a
C : Type u_1
inst✝¹ : Category.{?u.319164, u_1} C
f : Arrow C
inst✝ : ∀ (n : ℕ), HasWidePullback f.right (fun x => f.left) fun x => f.hom
S : SplitEpi f.hom
n : ℕ
i : Fin (n + 1)
j : Fin (n + 1 + 1 + 1)
⊢ ExtraDegeneracy.s f S n ≫ WidePullback.π (fun x => f.hom) (Fin.predAbove (Fin.succ i) j) =
WidePullback.lift (WidePullback.base fun x => f.hom)
(fun i_1 => WidePullback.π (fun x => f.hom) (Fin.predAbove i i_1))
(_ :
∀ (j : Fin (SimplexCategory.len (op [n + 1]).unop + 1)),
WidePullback.π (fun x => f.hom)
(↑(SimplexCategory.Hom.toOrderHom
(SimplexCategory.Hom.mk
{ toFun := Fin.predAbove i, monotone' := (_ : Monotone (Fin.predAbove i)) }).op.unop)
j) ≫
f.hom =
WidePullback.base fun x => f.hom) ≫
ExtraDegeneracy.s f S (n + 1) ≫ WidePullback.π (fun x => f.hom) j
[PROOFSTEP]
by_cases j = 0
[GOAL]
case pos
C : Type u_1
inst✝¹ : Category.{?u.319164, u_1} C
f : Arrow C
inst✝ : ∀ (n : ℕ), HasWidePullback f.right (fun x => f.left) fun x => f.hom
S : SplitEpi f.hom
n : ℕ
i : Fin (n + 1)
j : Fin (n + 1 + 1 + 1)
h : j = 0
⊢ ExtraDegeneracy.s f S n ≫ WidePullback.π (fun x => f.hom) (Fin.predAbove (Fin.succ i) j) =
WidePullback.lift (WidePullback.base fun x => f.hom)
(fun i_1 => WidePullback.π (fun x => f.hom) (Fin.predAbove i i_1))
(_ :
∀ (j : Fin (SimplexCategory.len (op [n + 1]).unop + 1)),
WidePullback.π (fun x => f.hom)
(↑(SimplexCategory.Hom.toOrderHom
(SimplexCategory.Hom.mk
{ toFun := Fin.predAbove i, monotone' := (_ : Monotone (Fin.predAbove i)) }).op.unop)
j) ≫
f.hom =
WidePullback.base fun x => f.hom) ≫
ExtraDegeneracy.s f S (n + 1) ≫ WidePullback.π (fun x => f.hom) j
[PROOFSTEP]
subst h
[GOAL]
case pos
C : Type u_1
inst✝¹ : Category.{?u.319164, u_1} C
f : Arrow C
inst✝ : ∀ (n : ℕ), HasWidePullback f.right (fun x => f.left) fun x => f.hom
S : SplitEpi f.hom
n : ℕ
i : Fin (n + 1)
⊢ ExtraDegeneracy.s f S n ≫ WidePullback.π (fun x => f.hom) (Fin.predAbove (Fin.succ i) 0) =
WidePullback.lift (WidePullback.base fun x => f.hom)
(fun i_1 => WidePullback.π (fun x => f.hom) (Fin.predAbove i i_1))
(_ :
∀ (j : Fin (SimplexCategory.len (op [n + 1]).unop + 1)),
WidePullback.π (fun x => f.hom)
(↑(SimplexCategory.Hom.toOrderHom
(SimplexCategory.Hom.mk
{ toFun := Fin.predAbove i, monotone' := (_ : Monotone (Fin.predAbove i)) }).op.unop)
j) ≫
f.hom =
WidePullback.base fun x => f.hom) ≫
ExtraDegeneracy.s f S (n + 1) ≫ WidePullback.π (fun x => f.hom) 0
[PROOFSTEP]
erw [ExtraDegeneracy.s_comp_π_0, ExtraDegeneracy.s_comp_π_0]
[GOAL]
case pos
C : Type u_1
inst✝¹ : Category.{?u.319164, u_1} C
f : Arrow C
inst✝ : ∀ (n : ℕ), HasWidePullback f.right (fun x => f.left) fun x => f.hom
S : SplitEpi f.hom
n : ℕ
i : Fin (n + 1)
⊢ (WidePullback.base fun x => f.hom) ≫ S.section_ =
WidePullback.lift (WidePullback.base fun x => f.hom)
(fun i_1 => WidePullback.π (fun x => f.hom) (Fin.predAbove i i_1))
(_ :
∀ (j : Fin (SimplexCategory.len (op [n + 1]).unop + 1)),
WidePullback.π (fun x => f.hom)
(↑(SimplexCategory.Hom.toOrderHom
(SimplexCategory.Hom.mk
{ toFun := Fin.predAbove i, monotone' := (_ : Monotone (Fin.predAbove i)) }).op.unop)
j) ≫
f.hom =
WidePullback.base fun x => f.hom) ≫
(WidePullback.base fun x => f.hom) ≫ S.section_
[PROOFSTEP]
dsimp
[GOAL]
case pos
C : Type u_1
inst✝¹ : Category.{?u.319164, u_1} C
f : Arrow C
inst✝ : ∀ (n : ℕ), HasWidePullback f.right (fun x => f.left) fun x => f.hom
S : SplitEpi f.hom
n : ℕ
i : Fin (n + 1)
⊢ (WidePullback.base fun x => f.hom) ≫ S.section_ =
WidePullback.lift (WidePullback.base fun x => f.hom)
(fun i_1 => WidePullback.π (fun x => f.hom) (Fin.predAbove i i_1))
(_ :
∀ (j : Fin (SimplexCategory.len (op [n + 1]).unop + 1)),
WidePullback.π (fun x => f.hom)
(↑(SimplexCategory.Hom.toOrderHom
(SimplexCategory.Hom.mk
{ toFun := Fin.predAbove i, monotone' := (_ : Monotone (Fin.predAbove i)) }).op.unop)
j) ≫
f.hom =
WidePullback.base fun x => f.hom) ≫
(WidePullback.base fun x => f.hom) ≫ S.section_
[PROOFSTEP]
simp only [WidePullback.lift_base_assoc]
[GOAL]
case neg
C : Type u_1
inst✝¹ : Category.{?u.319164, u_1} C
f : Arrow C
inst✝ : ∀ (n : ℕ), HasWidePullback f.right (fun x => f.left) fun x => f.hom
S : SplitEpi f.hom
n : ℕ
i : Fin (n + 1)
j : Fin (n + 1 + 1 + 1)
h : ¬j = 0
⊢ ExtraDegeneracy.s f S n ≫ WidePullback.π (fun x => f.hom) (Fin.predAbove (Fin.succ i) j) =
WidePullback.lift (WidePullback.base fun x => f.hom)
(fun i_1 => WidePullback.π (fun x => f.hom) (Fin.predAbove i i_1))
(_ :
∀ (j : Fin (SimplexCategory.len (op [n + 1]).unop + 1)),
WidePullback.π (fun x => f.hom)
(↑(SimplexCategory.Hom.toOrderHom
(SimplexCategory.Hom.mk
{ toFun := Fin.predAbove i, monotone' := (_ : Monotone (Fin.predAbove i)) }).op.unop)
j) ≫
f.hom =
WidePullback.base fun x => f.hom) ≫
ExtraDegeneracy.s f S (n + 1) ≫ WidePullback.π (fun x => f.hom) j
[PROOFSTEP]
cases' Fin.eq_succ_of_ne_zero h with k hk
[GOAL]
case neg.intro
C : Type u_1
inst✝¹ : Category.{?u.319164, u_1} C
f : Arrow C
inst✝ : ∀ (n : ℕ), HasWidePullback f.right (fun x => f.left) fun x => f.hom
S : SplitEpi f.hom
n : ℕ
i : Fin (n + 1)
j : Fin (n + 1 + 1 + 1)
h : ¬j = 0
k : Fin (n + 2)
hk : j = Fin.succ k
⊢ ExtraDegeneracy.s f S n ≫ WidePullback.π (fun x => f.hom) (Fin.predAbove (Fin.succ i) j) =
WidePullback.lift (WidePullback.base fun x => f.hom)
(fun i_1 => WidePullback.π (fun x => f.hom) (Fin.predAbove i i_1))
(_ :
∀ (j : Fin (SimplexCategory.len (op [n + 1]).unop + 1)),
WidePullback.π (fun x => f.hom)
(↑(SimplexCategory.Hom.toOrderHom
(SimplexCategory.Hom.mk
{ toFun := Fin.predAbove i, monotone' := (_ : Monotone (Fin.predAbove i)) }).op.unop)
j) ≫
f.hom =
WidePullback.base fun x => f.hom) ≫
ExtraDegeneracy.s f S (n + 1) ≫ WidePullback.π (fun x => f.hom) j
[PROOFSTEP]
subst hk
[GOAL]
case neg.intro
C : Type u_1
inst✝¹ : Category.{?u.319164, u_1} C
f : Arrow C
inst✝ : ∀ (n : ℕ), HasWidePullback f.right (fun x => f.left) fun x => f.hom
S : SplitEpi f.hom
n : ℕ
i : Fin (n + 1)
k : Fin (n + 2)
h : ¬Fin.succ k = 0
⊢ ExtraDegeneracy.s f S n ≫ WidePullback.π (fun x => f.hom) (Fin.predAbove (Fin.succ i) (Fin.succ k)) =
WidePullback.lift (WidePullback.base fun x => f.hom)
(fun i_1 => WidePullback.π (fun x => f.hom) (Fin.predAbove i i_1))
(_ :
∀ (j : Fin (SimplexCategory.len (op [n + 1]).unop + 1)),
WidePullback.π (fun x => f.hom)
(↑(SimplexCategory.Hom.toOrderHom
(SimplexCategory.Hom.mk
{ toFun := Fin.predAbove i, monotone' := (_ : Monotone (Fin.predAbove i)) }).op.unop)
j) ≫
f.hom =
WidePullback.base fun x => f.hom) ≫
ExtraDegeneracy.s f S (n + 1) ≫ WidePullback.π (fun x => f.hom) (Fin.succ k)
[PROOFSTEP]
erw [Fin.succ_predAbove_succ, ExtraDegeneracy.s_comp_π_succ, ExtraDegeneracy.s_comp_π_succ]
[GOAL]
case neg.intro
C : Type u_1
inst✝¹ : Category.{?u.319164, u_1} C
f : Arrow C
inst✝ : ∀ (n : ℕ), HasWidePullback f.right (fun x => f.left) fun x => f.hom
S : SplitEpi f.hom
n : ℕ
i : Fin (n + 1)
k : Fin (n + 2)
h : ¬Fin.succ k = 0
⊢ WidePullback.π (fun x => f.hom) (Fin.predAbove i k) =
WidePullback.lift (WidePullback.base fun x => f.hom)
(fun i_1 => WidePullback.π (fun x => f.hom) (Fin.predAbove i i_1))
(_ :
∀ (j : Fin (SimplexCategory.len (op [n + 1]).unop + 1)),
WidePullback.π (fun x => f.hom)
(↑(SimplexCategory.Hom.toOrderHom
(SimplexCategory.Hom.mk
{ toFun := Fin.predAbove i, monotone' := (_ : Monotone (Fin.predAbove i)) }).op.unop)
j) ≫
f.hom =
WidePullback.base fun x => f.hom) ≫
WidePullback.π (fun x => f.hom) k
[PROOFSTEP]
simp only [WidePullback.lift_π]
[GOAL]
case a
C : Type u_1
inst✝¹ : Category.{?u.319164, u_1} C
f : Arrow C
inst✝ : ∀ (n : ℕ), HasWidePullback f.right (fun x => f.left) fun x => f.hom
S : SplitEpi f.hom
n : ℕ
i : Fin (n + 1)
⊢ ((ExtraDegeneracy.s f S n ≫
WidePullback.lift (WidePullback.base fun x => f.hom)
(fun i_1 => WidePullback.π (fun x => f.hom) (Fin.predAbove (Fin.succ i) i_1))
(_ :
∀ (j : Fin (SimplexCategory.len (op [n + 1 + 1]).unop + 1)),
WidePullback.π (fun x => f.hom)
(↑(SimplexCategory.Hom.toOrderHom
(SimplexCategory.Hom.mk
{ toFun := Fin.predAbove (Fin.succ i),
monotone' := (_ : Monotone (Fin.predAbove (Fin.succ i))) }).op.unop)
j) ≫
f.hom =
WidePullback.base fun x => f.hom)) ≫
WidePullback.base fun x => f.hom) =
(WidePullback.lift (WidePullback.base fun x => f.hom)
(fun i_1 => WidePullback.π (fun x => f.hom) (Fin.predAbove i i_1))
(_ :
∀ (j : Fin (SimplexCategory.len (op [n + 1]).unop + 1)),
WidePullback.π (fun x => f.hom)
(↑(SimplexCategory.Hom.toOrderHom
(SimplexCategory.Hom.mk
{ toFun := Fin.predAbove i, monotone' := (_ : Monotone (Fin.predAbove i)) }).op.unop)
j) ≫
f.hom =
WidePullback.base fun x => f.hom) ≫
ExtraDegeneracy.s f S (n + 1)) ≫
WidePullback.base fun x => f.hom
[PROOFSTEP]
simp only [assoc, WidePullback.lift_base]
[GOAL]
case a
C : Type u_1
inst✝¹ : Category.{?u.319164, u_1} C
f : Arrow C
inst✝ : ∀ (n : ℕ), HasWidePullback f.right (fun x => f.left) fun x => f.hom
S : SplitEpi f.hom
n : ℕ
i : Fin (n + 1)
⊢ (ExtraDegeneracy.s f S n ≫ WidePullback.base fun x => f.hom) =
WidePullback.lift (WidePullback.base fun x => f.hom)
(fun i_1 => WidePullback.π (fun x => f.hom) (Fin.predAbove i i_1))
(_ :
∀ (j : Fin (SimplexCategory.len (op [n + 1]).unop + 1)),
WidePullback.π (fun x => f.hom)
(↑(SimplexCategory.Hom.toOrderHom
(SimplexCategory.Hom.mk
{ toFun := Fin.predAbove i, monotone' := (_ : Monotone (Fin.predAbove i)) }).op.unop)
j) ≫
f.hom =
WidePullback.base fun x => f.hom) ≫
ExtraDegeneracy.s f S (n + 1) ≫ WidePullback.base fun x => f.hom
[PROOFSTEP]
erw [ExtraDegeneracy.s_comp_base, ExtraDegeneracy.s_comp_base]
[GOAL]
case a
C : Type u_1
inst✝¹ : Category.{?u.319164, u_1} C
f : Arrow C
inst✝ : ∀ (n : ℕ), HasWidePullback f.right (fun x => f.left) fun x => f.hom
S : SplitEpi f.hom
n : ℕ
i : Fin (n + 1)
⊢ (WidePullback.base fun x => f.hom) =
WidePullback.lift (WidePullback.base fun x => f.hom)
(fun i_1 => WidePullback.π (fun x => f.hom) (Fin.predAbove i i_1))
(_ :
∀ (j : Fin (SimplexCategory.len (op [n + 1]).unop + 1)),
WidePullback.π (fun x => f.hom)
(↑(SimplexCategory.Hom.toOrderHom
(SimplexCategory.Hom.mk
{ toFun := Fin.predAbove i, monotone' := (_ : Monotone (Fin.predAbove i)) }).op.unop)
j) ≫
f.hom =
WidePullback.base fun x => f.hom) ≫
WidePullback.base fun x => f.hom
[PROOFSTEP]
dsimp
[GOAL]
case a
C : Type u_1
inst✝¹ : Category.{?u.319164, u_1} C
f : Arrow C
inst✝ : ∀ (n : ℕ), HasWidePullback f.right (fun x => f.left) fun x => f.hom
S : SplitEpi f.hom
n : ℕ
i : Fin (n + 1)
⊢ (WidePullback.base fun x => f.hom) =
WidePullback.lift (WidePullback.base fun x => f.hom)
(fun i_1 => WidePullback.π (fun x => f.hom) (Fin.predAbove i i_1))
(_ :
∀ (j : Fin (SimplexCategory.len (op [n + 1]).unop + 1)),
WidePullback.π (fun x => f.hom)
(↑(SimplexCategory.Hom.toOrderHom
(SimplexCategory.Hom.mk
{ toFun := Fin.predAbove i, monotone' := (_ : Monotone (Fin.predAbove i)) }).op.unop)
j) ≫
f.hom =
WidePullback.base fun x => f.hom) ≫
WidePullback.base fun x => f.hom
[PROOFSTEP]
simp only [WidePullback.lift_base]
[GOAL]
C : Type u_1
inst✝² : Category.{?u.349752, u_1} C
inst✝¹ : Preadditive C
inst✝ : HasZeroObject C
X : SimplicialObject.Augmented C
ed : ExtraDegeneracy X
i j : ℕ
⊢ HomologicalComplex.X (AlternatingFaceMapComplex.obj (drop.obj X)) i ⟶
HomologicalComplex.X (AlternatingFaceMapComplex.obj (drop.obj X)) j
[PROOFSTEP]
by_cases i + 1 = j
[GOAL]
C : Type u_1
inst✝² : Category.{?u.349752, u_1} C
inst✝¹ : Preadditive C
inst✝ : HasZeroObject C
X : SimplicialObject.Augmented C
ed : ExtraDegeneracy X
i j : ℕ
⊢ HomologicalComplex.X (AlternatingFaceMapComplex.obj (drop.obj X)) i ⟶
HomologicalComplex.X (AlternatingFaceMapComplex.obj (drop.obj X)) j
[PROOFSTEP]
by_cases i + 1 = j
[GOAL]
case pos
C : Type u_1
inst✝² : Category.{?u.349752, u_1} C
inst✝¹ : Preadditive C
inst✝ : HasZeroObject C
X : SimplicialObject.Augmented C
ed : ExtraDegeneracy X
i j : ℕ
h : i + 1 = j
⊢ HomologicalComplex.X (AlternatingFaceMapComplex.obj (drop.obj X)) i ⟶
HomologicalComplex.X (AlternatingFaceMapComplex.obj (drop.obj X)) j
[PROOFSTEP]
exact (-ed.s i) ≫ eqToHom (by congr)
[GOAL]
C : Type u_1
inst✝² : Category.{?u.349752, u_1} C
inst✝¹ : Preadditive C
inst✝ : HasZeroObject C
X : SimplicialObject.Augmented C
ed : ExtraDegeneracy X
i j : ℕ
h : i + 1 = j
⊢ (drop.obj X).obj (op [i + 1]) = HomologicalComplex.X (AlternatingFaceMapComplex.obj (drop.obj X)) j
[PROOFSTEP]
congr
[GOAL]
case neg
C : Type u_1
inst✝² : Category.{?u.349752, u_1} C
inst✝¹ : Preadditive C
inst✝ : HasZeroObject C
X : SimplicialObject.Augmented C
ed : ExtraDegeneracy X
i j : ℕ
h : ¬i + 1 = j
⊢ HomologicalComplex.X (AlternatingFaceMapComplex.obj (drop.obj X)) i ⟶
HomologicalComplex.X (AlternatingFaceMapComplex.obj (drop.obj X)) j
[PROOFSTEP]
exact 0
[GOAL]
C : Type u_1
inst✝² : Category.{?u.349752, u_1} C
inst✝¹ : Preadditive C
inst✝ : HasZeroObject C
X : SimplicialObject.Augmented C
ed : ExtraDegeneracy X
i j : ℕ
hij : ¬ComplexShape.Rel (ComplexShape.down ℕ) j i
⊢ (fun i j =>
if h : i + 1 = j then (-s ed i) ≫ eqToHom (_ : (drop.obj X).obj (op [i + 1]) = (drop.obj X).obj (op [j]))
else 0)
i j =
0
[PROOFSTEP]
dsimp
[GOAL]
C : Type u_1
inst✝² : Category.{?u.349752, u_1} C
inst✝¹ : Preadditive C
inst✝ : HasZeroObject C
X : SimplicialObject.Augmented C
ed : ExtraDegeneracy X
i j : ℕ
hij : ¬ComplexShape.Rel (ComplexShape.down ℕ) j i
⊢ (if h : i + 1 = j then (-s ed i) ≫ eqToHom (_ : X.left.obj (op [i + 1]) = X.left.obj (op [j])) else 0) = 0
[PROOFSTEP]
split_ifs with h
[GOAL]
case pos
C : Type u_1
inst✝² : Category.{?u.349752, u_1} C
inst✝¹ : Preadditive C
inst✝ : HasZeroObject C
X : SimplicialObject.Augmented C
ed : ExtraDegeneracy X
i j : ℕ
hij : ¬ComplexShape.Rel (ComplexShape.down ℕ) j i
h : i + 1 = j
⊢ (-s ed i) ≫ eqToHom (_ : X.left.obj (op [i + 1]) = X.left.obj (op [j])) = 0
[PROOFSTEP]
exfalso
[GOAL]
case pos.h
C : Type u_1
inst✝² : Category.{?u.349752, u_1} C
inst✝¹ : Preadditive C
inst✝ : HasZeroObject C
X : SimplicialObject.Augmented C
ed : ExtraDegeneracy X
i j : ℕ
hij : ¬ComplexShape.Rel (ComplexShape.down ℕ) j i
h : i + 1 = j
⊢ False
[PROOFSTEP]
exact hij h
[GOAL]
case neg
C : Type u_1
inst✝² : Category.{?u.349752, u_1} C
inst✝¹ : Preadditive C
inst✝ : HasZeroObject C
X : SimplicialObject.Augmented C
ed : ExtraDegeneracy X
i j : ℕ
hij : ¬ComplexShape.Rel (ComplexShape.down ℕ) j i
h : ¬i + 1 = j
⊢ 0 = 0
[PROOFSTEP]
simp only [eq_self_iff_true]
[GOAL]
C : Type u_1
inst✝² : Category.{?u.349752, u_1} C
inst✝¹ : Preadditive C
inst✝ : HasZeroObject C
X : SimplicialObject.Augmented C
ed : ExtraDegeneracy X
i : ℕ
⊢ HomologicalComplex.Hom.f
(NatTrans.app AlternatingFaceMapComplex.ε X ≫
Equiv.invFun (ChainComplex.fromSingle₀Equiv (AlternatingFaceMapComplex.obj (drop.obj X)) (point.obj X)) ed.s')
i =
((↑(dNext i) fun i j =>
if h : i + 1 = j then (-s ed i) ≫ eqToHom (_ : (drop.obj X).obj (op [i + 1]) = (drop.obj X).obj (op [j]))
else 0) +
↑(prevD i) fun i j =>
if h : i + 1 = j then (-s ed i) ≫ eqToHom (_ : (drop.obj X).obj (op [i + 1]) = (drop.obj X).obj (op [j]))
else 0) +
HomologicalComplex.Hom.f (𝟙 (AlternatingFaceMapComplex.obj (drop.obj X))) i
[PROOFSTEP]
rcases i with _ | i
[GOAL]
case zero
C : Type u_1
inst✝² : Category.{?u.349752, u_1} C
inst✝¹ : Preadditive C
inst✝ : HasZeroObject C
X : SimplicialObject.Augmented C
ed : ExtraDegeneracy X
⊢ HomologicalComplex.Hom.f
(NatTrans.app AlternatingFaceMapComplex.ε X ≫
Equiv.invFun (ChainComplex.fromSingle₀Equiv (AlternatingFaceMapComplex.obj (drop.obj X)) (point.obj X)) ed.s')
Nat.zero =
((↑(dNext Nat.zero) fun i j =>
if h : i + 1 = j then (-s ed i) ≫ eqToHom (_ : (drop.obj X).obj (op [i + 1]) = (drop.obj X).obj (op [j]))
else 0) +
↑(prevD Nat.zero) fun i j =>
if h : i + 1 = j then (-s ed i) ≫ eqToHom (_ : (drop.obj X).obj (op [i + 1]) = (drop.obj X).obj (op [j]))
else 0) +
HomologicalComplex.Hom.f (𝟙 (AlternatingFaceMapComplex.obj (drop.obj X))) Nat.zero
[PROOFSTEP]
rw [Homotopy.prevD_chainComplex, Homotopy.dNext_zero_chainComplex, zero_add]
[GOAL]
case zero
C : Type u_1
inst✝² : Category.{?u.349752, u_1} C
inst✝¹ : Preadditive C
inst✝ : HasZeroObject C
X : SimplicialObject.Augmented C
ed : ExtraDegeneracy X
⊢ HomologicalComplex.Hom.f
(NatTrans.app AlternatingFaceMapComplex.ε X ≫
Equiv.invFun (ChainComplex.fromSingle₀Equiv (AlternatingFaceMapComplex.obj (drop.obj X)) (point.obj X)) ed.s')
Nat.zero =
(if h : Nat.zero + 1 = Nat.zero + 1 then
(-s ed Nat.zero) ≫ eqToHom (_ : (drop.obj X).obj (op [Nat.zero + 1]) = (drop.obj X).obj (op [Nat.zero + 1]))
else 0) ≫
HomologicalComplex.d (AlternatingFaceMapComplex.obj (drop.obj X)) (Nat.zero + 1) Nat.zero +
HomologicalComplex.Hom.f (𝟙 (AlternatingFaceMapComplex.obj (drop.obj X))) Nat.zero
[PROOFSTEP]
dsimp [ChainComplex.fromSingle₀Equiv, ChainComplex.toSingle₀Equiv]
[GOAL]
case zero
C : Type u_1
inst✝² : Category.{?u.349752, u_1} C
inst✝¹ : Preadditive C
inst✝ : HasZeroObject C
X : SimplicialObject.Augmented C
ed : ExtraDegeneracy X
⊢ HomologicalComplex.Hom.f (NatTrans.app AlternatingFaceMapComplex.ε X) 0 ≫ ed.s' =
(if 0 + 1 = 0 + 1 then (-s ed 0) ≫ 𝟙 (X.left.obj (op [0 + 1])) else 0) ≫
HomologicalComplex.d (AlternatingFaceMapComplex.obj X.left) (0 + 1) 0 +
𝟙 (X.left.obj (op [0]))
[PROOFSTEP]
simp only [comp_id, ite_true, zero_add, ComplexShape.down_Rel, not_true, AlternatingFaceMapComplex.obj_d_eq,
Preadditive.neg_comp]
[GOAL]
case zero
C : Type u_1
inst✝² : Category.{?u.349752, u_1} C
inst✝¹ : Preadditive C
inst✝ : HasZeroObject C
X : SimplicialObject.Augmented C
ed : ExtraDegeneracy X
⊢ HomologicalComplex.Hom.f (NatTrans.app AlternatingFaceMapComplex.ε X) 0 ≫ ed.s' =
(-s ed 0 ≫ Finset.sum Finset.univ fun i => (-1) ^ ↑i • SimplicialObject.δ X.left i) + 𝟙 (X.left.obj (op [0]))
[PROOFSTEP]
erw [Fin.sum_univ_two]
[GOAL]
case zero
C : Type u_1
inst✝² : Category.{?u.349752, u_1} C
inst✝¹ : Preadditive C
inst✝ : HasZeroObject C
X : SimplicialObject.Augmented C
ed : ExtraDegeneracy X
⊢ HomologicalComplex.Hom.f (NatTrans.app AlternatingFaceMapComplex.ε X) 0 ≫ ed.s' =
-s ed 0 ≫ ((-1) ^ ↑0 • SimplicialObject.δ X.left 0 + (-1) ^ ↑1 • SimplicialObject.δ X.left 1) +
𝟙 (X.left.obj (op [0]))
[PROOFSTEP]
simp only [Fin.val_zero, pow_zero, one_smul, Fin.val_one, pow_one, neg_smul, Preadditive.comp_add, s_comp_δ₀, drop_obj,
Preadditive.comp_neg, neg_add_rev, neg_neg, neg_add_cancel_right, s₀_comp_δ₁]
[GOAL]
case zero
C : Type u_1
inst✝² : Category.{?u.349752, u_1} C
inst✝¹ : Preadditive C
inst✝ : HasZeroObject C
X : SimplicialObject.Augmented C
ed : ExtraDegeneracy X
⊢ HomologicalComplex.Hom.f (NatTrans.app AlternatingFaceMapComplex.ε X) 0 ≫ ed.s' = NatTrans.app X.hom (op [0]) ≫ ed.s'
[PROOFSTEP]
rfl
[GOAL]
case succ
C : Type u_1
inst✝² : Category.{?u.349752, u_1} C
inst✝¹ : Preadditive C
inst✝ : HasZeroObject C
X : SimplicialObject.Augmented C
ed : ExtraDegeneracy X
i : ℕ
⊢ HomologicalComplex.Hom.f
(NatTrans.app AlternatingFaceMapComplex.ε X ≫
Equiv.invFun (ChainComplex.fromSingle₀Equiv (AlternatingFaceMapComplex.obj (drop.obj X)) (point.obj X)) ed.s')
(Nat.succ i) =
((↑(dNext (Nat.succ i)) fun i j =>
if h : i + 1 = j then (-s ed i) ≫ eqToHom (_ : (drop.obj X).obj (op [i + 1]) = (drop.obj X).obj (op [j]))
else 0) +
↑(prevD (Nat.succ i)) fun i j =>
if h : i + 1 = j then (-s ed i) ≫ eqToHom (_ : (drop.obj X).obj (op [i + 1]) = (drop.obj X).obj (op [j]))
else 0) +
HomologicalComplex.Hom.f (𝟙 (AlternatingFaceMapComplex.obj (drop.obj X))) (Nat.succ i)
[PROOFSTEP]
rw [Homotopy.prevD_chainComplex, Homotopy.dNext_succ_chainComplex]
[GOAL]
case succ
C : Type u_1
inst✝² : Category.{?u.349752, u_1} C
inst✝¹ : Preadditive C
inst✝ : HasZeroObject C
X : SimplicialObject.Augmented C
ed : ExtraDegeneracy X
i : ℕ
⊢ HomologicalComplex.Hom.f
(NatTrans.app AlternatingFaceMapComplex.ε X ≫
Equiv.invFun (ChainComplex.fromSingle₀Equiv (AlternatingFaceMapComplex.obj (drop.obj X)) (point.obj X)) ed.s')
(Nat.succ i) =
(HomologicalComplex.d (AlternatingFaceMapComplex.obj (drop.obj X)) (i + 1) i ≫
if h : i + 1 = i + 1 then
(-s ed i) ≫ eqToHom (_ : (drop.obj X).obj (op [i + 1]) = (drop.obj X).obj (op [i + 1]))
else 0) +
(if h : Nat.succ i + 1 = Nat.succ i + 1 then
(-s ed (Nat.succ i)) ≫
eqToHom (_ : (drop.obj X).obj (op [Nat.succ i + 1]) = (drop.obj X).obj (op [Nat.succ i + 1]))
else 0) ≫
HomologicalComplex.d (AlternatingFaceMapComplex.obj (drop.obj X)) (Nat.succ i + 1) (Nat.succ i) +
HomologicalComplex.Hom.f (𝟙 (AlternatingFaceMapComplex.obj (drop.obj X))) (Nat.succ i)
[PROOFSTEP]
dsimp [ChainComplex.toSingle₀Equiv, ChainComplex.fromSingle₀Equiv]
[GOAL]
case succ
C : Type u_1
inst✝² : Category.{?u.349752, u_1} C
inst✝¹ : Preadditive C
inst✝ : HasZeroObject C
X : SimplicialObject.Augmented C
ed : ExtraDegeneracy X
i : ℕ
⊢ HomologicalComplex.Hom.f (NatTrans.app AlternatingFaceMapComplex.ε X) (Nat.succ i) ≫ 0 =
(HomologicalComplex.d (AlternatingFaceMapComplex.obj X.left) (i + 1) i ≫
if i + 1 = i + 1 then (-s ed i) ≫ 𝟙 (X.left.obj (op [i + 1])) else 0) +
(if Nat.succ i + 1 = Nat.succ i + 1 then (-s ed (Nat.succ i)) ≫ 𝟙 (X.left.obj (op [Nat.succ i + 1])) else 0) ≫
HomologicalComplex.d (AlternatingFaceMapComplex.obj X.left) (Nat.succ i + 1) (Nat.succ i) +
𝟙 (X.left.obj (op [Nat.succ i]))
[PROOFSTEP]
simp only [comp_zero, ComplexShape.down_Rel, not_true, Preadditive.neg_comp, AlternatingFaceMapComplex.obj_d_eq,
comp_id, ite_true, Preadditive.comp_neg, @Fin.sum_univ_succ _ _ (i + 2), Fin.val_zero, pow_zero, one_smul,
Fin.val_succ, Preadditive.comp_add, drop_obj, s_comp_δ₀, Preadditive.sum_comp, Preadditive.zsmul_comp,
Preadditive.comp_sum, Preadditive.comp_zsmul, zsmul_neg, ed.s_comp_δ, pow_add, pow_one, mul_neg, mul_one, neg_zsmul,
neg_neg, neg_add_cancel_comm_assoc, add_left_neg]
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.