text
stringlengths 0
3.34M
|
---|
import Base: +, -, abs, *, /, div, convert, ==, <=, >=, show, to_index
function show(io::IO, oi::OffsetInteger{O, T}) where {O, T}
print(io, "|$(raw(oi)) (indexes as $(O >= 0 ? raw(oi) - O : raw(oi) + -O))|")
end
Base.eltype(::Type{OffsetInteger{O, T}}) where {O, T} = T
Base.eltype(oi::OffsetInteger) = eltype(typeof(oi))
# constructors and conversion
OffsetInteger{O1, T1}(x::OffsetInteger{O2, T2}) where {O1, O2, T1 <: Integer, T2 <: Integer} = OffsetInteger{O1, T1}(T2(x))
OffsetInteger{O}(x::Integer) where {O} = OffsetInteger{O, eltype(x)}(x)
OffsetInteger{O}(x::OffsetInteger) where {O} = OffsetInteger{O, eltype(x)}(x)
(::Type{IT})(x::OffsetInteger{O, T}) where {IT <: Integer, O, T <: Integer} = IT(raw(x) + -O)
Base.@pure pure_max(x1, x2) = x1 > x2 ? x1 : x2
Base.promote_rule(::Type{T1}, ::Type{OffsetInteger{O, T2}}) where {T1 <: Integer, O, T2} = T1
Base.promote_rule(::Type{OffsetInteger{O1, T1}}, ::Type{OffsetInteger{O2, T2}}) where {O1, O2, T1, T2} = OffsetInteger{pure_max(O1, O2), promote_type(T1, T2)}
to_index(I::AbstractArray{<:Face}) = I
to_index(I::OffsetInteger) = raw(OneIndex(I))
to_index(I::OffsetInteger{0}) = raw(I)
# basic operators
for op in (:(-), :abs)
@eval $(op)(x::T) where {T <: OffsetInteger} = T($(op)(x.i))
end
for op in (:(+), :(-), :(*), :(/), :div)
@eval begin
@inline function $(op)(x::OffsetInteger{O}, y::OffsetInteger{O}) where O
OffsetInteger{O}($op(x.i, y.i))
end
end
end
for op in (:(==), :(>=), :(<=))
@eval begin
@inline function $(op)(x::OffsetInteger{O}, y::OffsetInteger{O}) where O
$op(x.i, y.i)
end
end
end
@generated function Base.getindex(
A::AbstractArray, f::Face{N}
) where N
v = Expr(:tuple)
for i = 1:N
push!(v.args, :(A[f[$i]]))
end
:($(v))
end
|
module Esterel.Variable.Shared where
open import Data.Nat
using (ℕ) renaming (_≟_ to _≟ℕ_)
open import Function
using (_∘_)
open import Relation.Nullary
using (Dec ; yes ; no ; ¬_)
open import Relation.Binary
using (Decidable)
open import Relation.Binary.PropositionalEquality
using (_≡_ ; refl ; cong ; trans ; sym)
data SharedVar : Set where
_ₛₕ : ℕ → SharedVar
unwrap : SharedVar → ℕ
unwrap (n ₛₕ) = n
unwrap-inverse : ∀ {s} → (unwrap s) ₛₕ ≡ s
unwrap-inverse {_ ₛₕ} = refl
unwrap-injective : ∀ {s t} → unwrap s ≡ unwrap t → s ≡ t
unwrap-injective s'≡t' = trans (sym unwrap-inverse) (trans (cong _ₛₕ s'≡t') unwrap-inverse)
-- for backward compatibility
unwrap-neq : ∀{k1 : SharedVar} → ∀{k2 : SharedVar} → ¬ k1 ≡ k2 → ¬ (unwrap k1) ≡ (unwrap k2)
unwrap-neq = (_∘ unwrap-injective)
wrap : ℕ → SharedVar
wrap = _ₛₕ
bijective : ∀{x} → unwrap (wrap x) ≡ x
bijective = refl
_≟_ : Decidable {A = SharedVar} _≡_
(s ₛₕ) ≟ (t ₛₕ) with s ≟ℕ t
... | yes p = yes (cong _ₛₕ p)
... | no ¬p = no (¬p ∘ cong unwrap)
data Status : Set where
ready : Status
new : Status
old : Status
_≟ₛₜ_ : Decidable {A = Status} _≡_
ready ≟ₛₜ ready = yes refl
ready ≟ₛₜ new = no λ()
ready ≟ₛₜ old = no λ()
new ≟ₛₜ ready = no λ()
new ≟ₛₜ new = yes refl
new ≟ₛₜ old = no λ()
old ≟ₛₜ ready = no λ()
old ≟ₛₜ new = no λ()
old ≟ₛₜ old = yes refl
|
-- ----------------------------------------------------------------- [ Key.idr ]
-- Module : Keys
-- Description : Types Cryptographic Keys MkII
-- Copyright : (c) Jan de Muijnck-Hughes
-- License : see LICENSE
-- --------------------------------------------------------------------- [ EOH ]
module Crypto.Adv.Key
import Crypto.Common
--- Question is do I paramiterise over a Key Type...
data Key : ( vis : Visibility) -> (setting : s ) -> Type where
MkEncKey : a -> (setting : s) -> (value : v ) -> Key Public s
MkDecKey : a -> (setting : s) -> (value : v ) -> Key Private s
MkSignKey : a -> (setting : s) -> (value : v ) -> Key Private s
MkVerifyKey : a -> (setting : s) -> (value : v ) -> Key Public s
|
[STATEMENT]
lemma pref_prod_pref: "u \<le>p z \<cdot> w \<Longrightarrow> u \<le>p w \<Longrightarrow> u \<le>p z \<cdot> u"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<lbrakk>u \<le>p z \<cdot> w; u \<le>p w\<rbrakk> \<Longrightarrow> u \<le>p z \<cdot> u
[PROOF STEP]
using pref_prod_pref_short[OF _ _ suf_len']
[PROOF STATE]
proof (prove)
using this:
\<lbrakk>?u \<le>p ?z \<cdot> ?w; ?u \<le>p ?w\<rbrakk> \<Longrightarrow> ?u \<le>p ?z \<cdot> ?u
goal (1 subgoal):
1. \<lbrakk>u \<le>p z \<cdot> w; u \<le>p w\<rbrakk> \<Longrightarrow> u \<le>p z \<cdot> u
[PROOF STEP]
. |
-- Idris2
import System
import System.Concurrency
-- Test `conditionSignal` wakes 1 thread for 1 main and N child threads
main : IO ()
main =
let n = 3 in
do cvMutex <- makeMutex
cv <- makeCondition
ts <- for [1..n] $ \_ => fork $ do mutexAcquire cvMutex
conditionWait cv cvMutex
putStrLn "Hello mother"
mutexRelease cvMutex
putStrLn "Hello child"
sleep 1
conditionSignal cv
-- don't threadWait since we don't know which thread got signalled
sleep 1
|
lemma linear_lim_0: assumes "bounded_linear f" shows "(f \<longlongrightarrow> 0) (at (0))" |
Require Import coqutil.Map.Interface.
Require Import coqutil.Word.Interface.
Require Import riscv.Spec.Decode.
Require Import riscv.Utility.Utility.
Require Import riscv.Platform.RiscvMachine.
Section Machine.
Context {width: Z} {BW: Bitwidth width} {word: word width} {word_ok: word.ok word}.
Context {Registers: map.map Register word}.
Context {Mem: map.map word byte}.
Context {Action: Type}.
Record AtomicRiscvMachine := mkAtomicRiscvMachine {
getMachine :> RiscvMachine;
getReservation: option word;
}.
Definition withReservation : option word -> AtomicRiscvMachine -> AtomicRiscvMachine :=
fun reservation '(mkAtomicRiscvMachine m _) =>
mkAtomicRiscvMachine m reservation.
Definition updateReservation(fr: option word -> option word)(m: AtomicRiscvMachine) :=
withReservation (fr m.(getReservation)) m.
Definition withRegs: Registers -> AtomicRiscvMachine -> AtomicRiscvMachine :=
fun regs2 '(mkAtomicRiscvMachine mach rv) =>
(mkAtomicRiscvMachine (withRegs regs2 mach) rv).
Definition withPc: word -> AtomicRiscvMachine -> AtomicRiscvMachine :=
fun pc2 '(mkAtomicRiscvMachine mach rv) =>
(mkAtomicRiscvMachine (withPc pc2 mach) rv).
Definition withNextPc: word -> AtomicRiscvMachine -> AtomicRiscvMachine :=
fun nextPC2 '(mkAtomicRiscvMachine mach rv) =>
(mkAtomicRiscvMachine (withNextPc nextPC2 mach) rv).
Definition withMem: Mem -> AtomicRiscvMachine -> AtomicRiscvMachine :=
fun mem2 '(mkAtomicRiscvMachine mach rv) =>
(mkAtomicRiscvMachine (withMem mem2 mach) rv).
Definition withXAddrs: XAddrs -> AtomicRiscvMachine -> AtomicRiscvMachine :=
fun xAddrs2 '(mkAtomicRiscvMachine mach rv) =>
(mkAtomicRiscvMachine (withXAddrs xAddrs2 mach) rv).
Definition withLog: list LogItem -> AtomicRiscvMachine -> AtomicRiscvMachine :=
fun log2 '(mkAtomicRiscvMachine mach rv) =>
(mkAtomicRiscvMachine (withLog log2 mach) rv).
Definition withLogItem: LogItem -> AtomicRiscvMachine -> AtomicRiscvMachine :=
fun item '(mkAtomicRiscvMachine mach rv) =>
(mkAtomicRiscvMachine (withLogItem item mach) rv).
Definition withLogItems: list LogItem -> AtomicRiscvMachine -> AtomicRiscvMachine :=
fun items '(mkAtomicRiscvMachine mach rv) =>
(mkAtomicRiscvMachine (withLogItems items mach) rv).
Definition forgetReservation(m: AtomicRiscvMachine): RiscvMachine := m.(getMachine).
Definition addReservation(m: RiscvMachine)(rv: option word): AtomicRiscvMachine :=
mkAtomicRiscvMachine m rv.
Definition putProgram(prog: list MachineInt)(addr: word)(ma: AtomicRiscvMachine): AtomicRiscvMachine :=
mkAtomicRiscvMachine (putProgram prog addr ma.(getMachine)) ma.(getReservation).
End Machine.
Ltac destruct_RiscvMachine m :=
lazymatch type of m with
| AtomicRiscvMachine =>
let r := fresh m "_regs" in
let p := fresh m "_pc" in
let n := fresh m "_npc" in
let me := fresh m "_mem" in
let x := fresh m "_xaddrs" in
let l := fresh m "_log" in
let rv := fresh m "_reservation" in
destruct m as [ [r p n me x l] rv ];
simpl in *
| _ => let expected := constr:(@AtomicRiscvMachine) in fail "not a" expected
end.
|
Formal statement is: lemma eventually_floor_eq: fixes f::"'a \<Rightarrow> 'b::{order_topology,floor_ceiling}" assumes f: "(f \<longlongrightarrow> l) F" and l: "l \<notin> \<int>" shows "\<forall>\<^sub>F x in F. floor (f x) = floor l" Informal statement is: If $f$ converges to $l$ and $l$ is not an integer, then eventually $f$ is not an integer. |
module Data.Prim
import public Data.Prim.Char
import public Data.Prim.Bits8
import public Data.Prim.Bits16
import public Data.Prim.Bits32
import public Data.Prim.Bits64
import public Data.Prim.Int8
import public Data.Prim.Int16
import public Data.Prim.Int32
import public Data.Prim.Int64
import public Data.Prim.String
|
#' Make Venn diagram of shared taxa (ASVs, OTUs) across sample groups
#'
#' Make Venn diagram of shared taxa (ASVs, OTUs) across sample groups from a phyloseq object. Overlap can be weighted by relative abundance
#' @param ps A phyloseq object
#' @param group The grouping factor. Should match variable in sample_data(ps)
#' @param fraction The fraction (0 to 1) of samples in a group in which the taxa should be present to be included in the count.
#' @param weight If TRUE, the overlaps are weighted by abundance
#' @param type "percent" or "counts"
#' @param relative Should abundances be made relative
#' @param plot If TRUE return a plot, if FALSE return a list with shared and unique taxa
#' @param ... Additional arguments
#' @keywords venn diagram
#' @return An venn plot
#' @import phyloseq
#' @import eulerr
#' @importFrom stats aggregate as.formula
#' @export
if (!requireNamespace("eulerr", quietly= TRUE)){install.packages("eulerr")}
ps_venn <- function(ps, group, fraction = 0, weight = FALSE, type = "percent", relative = TRUE, plot = TRUE, ...){
if(relative){
ps <- transform_sample_counts(ps, function(x) x/sum(x))
}
if(taxa_are_rows(ps)){
ps_melted <- reshape2::melt(otu_table(ps))
} else {
ps_melted <- reshape2::melt(t(otu_table(ps)))
}
ps_melted <- merge(ps_melted, sample_data(ps), by.x = "Var2", by.y = "row.names")
ps_agg <- aggregate(as.formula(paste("value ~ Var1 +",group)), data = ps_melted, function(x) (sum(x > 0)/length(x) >= fraction) * mean(x))
ps_mat <- reshape2::dcast(as.formula(paste("Var1 ~ ",group)), data = ps_agg, value.var = "value")
rownames(ps_mat) <- ps_mat[, 1]
ps_mat <- ps_mat[, -1]
ps_mat_bin <- (ps_mat>0)*1
if(plot){
if(weight){
df <- eulerr::venn(ps_mat_bin, weights = rowMeans(ps_mat))
} else {
df <- eulerr::venn(ps_mat_bin)
}
plot(df, quantities = list(type=type), ...)
} else {
# Find taxa in all combinations
combis <- lapply(2:ncol(ps_mat), function(k) lapply(lapply(1:(ncol(combn(1:ncol(ps_mat_bin), m = k))),
function(y) ps_mat_bin[, combn(1:ncol(ps_mat_bin), m = k)[, y]]),
function(x) rownames(x[rowSums(x) >= k, , drop=FALSE])))
# Find taxa in singles
singles <- apply(ps_mat_bin, 2, function(x) names(x[x > 0]))
# Keep only those NOT in the same combination space
singles <- lapply(seq_along(singles), function(x) setdiff(singles[[x]], do.call(c, singles[-x])))
combis <- lapply(combis, function(cc) lapply(seq_along(cc), function(x) setdiff(cc[[x]], do.call(c, cc[-x]))))
# Names
names(singles) <- colnames(ps_mat_bin)
for(i in 2:ncol(ps_mat)){
names(combis[[i-1]]) <- apply(combn(colnames(ps_mat_bin), m = i), 2, function(x) paste(x, collapse = "__"))
}
# Recursively go through combination space from complex to simple to keep only those in unique combinations
combis <- rev(combis)
combis_new <- list()
for(i in seq_along(combis)){
if(i == 1) {
combis_new[[i]] <- combis[[i]]
} else {
combis_new[[i]] <- lapply(combis[[i]], function(x) setdiff(x, unlist(combis_new)))
}
}
combis_new <- c(singles, unlist(combis_new, recursive = FALSE))
return(combis_new[sapply(combis_new, function(x) length(x)>0)])
}
} |
function [cluster, total] = findcluster(onoff, spatdimneighbstructmat, varargin)
% FINDCLUSTER returns all connected clusters in a 3 dimensional matrix
% with a connectivity of 6.
%
% Use as
% [cluster, num] = findcluster(onoff, spatdimneighbstructmat, minnbchan)
% or ar
% [cluster, num] = findcluster(onoff, spatdimneighbstructmat, spatdimneighbselmat, minnbchan)
% where
% onoff is a 3D boolean matrix with size N1xN2xN3
% spatdimneighbstructmat defines the neighbouring channels/combinations, see below
% minnbchan the minimum number of neighbouring channels/combinations
% spatdimneighbselmat is a special neighbourhood matrix that is used for selecting
% channels/combinations on the basis of the minnbchan criterium
%
% The neighbourhood structure for the first dimension is specified using
% spatdimneighbstructmat, which is a 2D (N1xN1) matrix. Each row and each column corresponds
% to a channel (combination) along the first dimension and along that row/column, elements
% with "1" define the neighbouring channel(s) (combinations). The first dimension of
% onoff should correspond to the channel(s) (combinations).
%
% See also SPM_BWLABEL (spm toolbox)
% Copyright (C) 2004, Robert Oostenveld
%
% This file is part of FieldTrip, see http://www.fieldtriptoolbox.org
% for the documentation and details.
%
% FieldTrip is free software: you can redistribute it and/or modify
% it under the terms of the GNU General Public License as published by
% the Free Software Foundation, either version 3 of the License, or
% (at your option) any later version.
%
% FieldTrip is distributed in the hope that it will be useful,
% but WITHOUT ANY WARRANTY; without even the implied warranty of
% MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
% GNU General Public License for more details.
%
% You should have received a copy of the GNU General Public License
% along with FieldTrip. If not, see <http://www.gnu.org/licenses/>.
%
% $Id$
spatdimlength = size(onoff, 1);
nfreq = size(onoff, 2);
ntime = size(onoff, 3);
if length(size(spatdimneighbstructmat))~=2 || ~all(size(spatdimneighbstructmat)==spatdimlength)
ft_error('invalid dimension of spatdimneighbstructmat');
end
minnbchan=0;
if length(varargin)==1
minnbchan=varargin{1};
end
if length(varargin)==2
spatdimneighbselmat=varargin{1};
minnbchan=varargin{2};
end
if minnbchan>0
% For every (time,frequency)-element, it is calculated how many significant
% neighbours this channel has. If a significant channel has less than minnbchan
% significant neighbours, then this channel is removed from onoff.
if length(varargin)==1
selectmat = single(spatdimneighbstructmat | spatdimneighbstructmat');
end
if length(varargin)==2
selectmat = single(spatdimneighbselmat | spatdimneighbselmat');
end
nremoved=1;
while nremoved>0
nsigneighb=reshape(selectmat*reshape(single(onoff),[spatdimlength (nfreq*ntime)]),[spatdimlength nfreq ntime]);
remove=(onoff.*nsigneighb)<minnbchan;
nremoved=length(find(remove.*onoff));
onoff(remove)=0;
end
end
% for each channel (combination), find the connected time-frequency clusters
labelmat = zeros(size(onoff));
total = 0;
if nfreq*ntime>1
for spatdimlev=1:spatdimlength
[labelmat(spatdimlev, :, :), num] = spm_bwlabel(double(reshape(onoff(spatdimlev, :, :), nfreq, ntime)), 6); % the previous code contained a '4' for input
labelmat(spatdimlev, :, :) = labelmat(spatdimlev, :, :) + (labelmat(spatdimlev, :, :)~=0)*total;
total = total + num;
end
else
labelmat(onoff>0) = 1:sum(onoff(:));
total = sum(onoff(:));
end
% combine the time and frequency dimension for simplicity
labelmat = reshape(labelmat, spatdimlength, nfreq*ntime);
% combine clusters that are connected in neighbouring channel(s)
% (combinations). Convert inputs to uint32 as that is required by the mex
% file (and the values will be positive integers anyway).
cluster = combineClusters(uint32(labelmat), logical(spatdimneighbstructmat), uint32(total));
% reshape the output to the original format of the data
cluster = reshape(cluster, spatdimlength, nfreq, ntime);
% update the total number
total = numel(unique(cluster(:)))-1; % the value of 0 does not count
|
lemma upd_space: "i < n \<Longrightarrow> upd i < n" |
Formal statement is: lemma continuous_on_components_gen: fixes f :: "'a::topological_space \<Rightarrow> 'b::topological_space" assumes "\<And>C. C \<in> components S \<Longrightarrow> openin (top_of_set S) C \<and> continuous_on C f" shows "continuous_on S f" Informal statement is: If $f$ is continuous on each component of $S$, then $f$ is continuous on $S$. |
{-# OPTIONS --cubical --no-import-sorts --safe #-}
open import Cubical.Core.Everything
open import Cubical.Algebra.Monoid
module Cubical.Algebra.Monoid.Construct.Opposite {ℓ} (M : Monoid ℓ) where
open import Cubical.Foundations.Prelude
open import Cubical.Data.Prod using (_,_)
open Monoid M
import Cubical.Algebra.Semigroup.Construct.Opposite semigroup as OpSemigroup
open OpSemigroup public hiding (Op-isSemigroup; Sᵒᵖ)
•ᵒᵖ-identityˡ : LeftIdentity ε _•ᵒᵖ_
•ᵒᵖ-identityˡ _ = identityʳ _
•ᵒᵖ-identityʳ : RightIdentity ε _•ᵒᵖ_
•ᵒᵖ-identityʳ _ = identityˡ _
•ᵒᵖ-identity : Identity ε _•ᵒᵖ_
•ᵒᵖ-identity = •ᵒᵖ-identityˡ , •ᵒᵖ-identityʳ
Op-isMonoid : IsMonoid Carrier _•ᵒᵖ_ ε
Op-isMonoid = record
{ isSemigroup = OpSemigroup.Op-isSemigroup
; identity = •ᵒᵖ-identity
}
Mᵒᵖ : Monoid ℓ
Mᵒᵖ = record { isMonoid = Op-isMonoid }
|
lemma (in t2_space) LIMSEQ_unique: "X \<longlonglongrightarrow> a \<Longrightarrow> X \<longlonglongrightarrow> b \<Longrightarrow> a = b" |
State Before: α : Type u_1
β : Type ?u.473165
γ : Type ?u.473168
inst✝ : DecidableEq α
l l' : List α
a : α
n : ℕ
hn : n ≠ 0
⊢ toFinset (replicate n a) = {a} State After: case a
α : Type u_1
β : Type ?u.473165
γ : Type ?u.473168
inst✝ : DecidableEq α
l l' : List α
a : α
n : ℕ
hn : n ≠ 0
x : α
⊢ x ∈ toFinset (replicate n a) ↔ x ∈ {a} Tactic: ext x State Before: case a
α : Type u_1
β : Type ?u.473165
γ : Type ?u.473168
inst✝ : DecidableEq α
l l' : List α
a : α
n : ℕ
hn : n ≠ 0
x : α
⊢ x ∈ toFinset (replicate n a) ↔ x ∈ {a} State After: no goals Tactic: simp [hn, List.mem_replicate] |
#' An interface of Google Drive in R
#' @name driveR
#' @docType package
#' @importFrom jsonlite toJSON fromJSON
#' @importFrom httr GET POST config stop_for_status content add_headers
NULL |
function [A,U,V] = regutm(m,n,s)
%REGUTM Test matrix for regularization methods.
%
% [A,U,V] = regutm(m,n,s)
%
% Generates a random m-times-n matrix A such that A*A' and A'*A
% are oscillating. Hence, in the SVD of A,
% A = U*diag(s)*V',
% the number of sign changes in U(:,i) and V(:,i) is exactly i-1.
%
% The third argument s specifies the singular values of A. If not
% present, then s = logspace(0,round(log10(eps)),n).
% Reference: P. C. Hansen, "Test matrices for regularization methods",
% SIAM J. Sci. Comput. 16 (1995), 506--512.
% Per Christian Hansen, IMM, 07/30/97.
% Initialization.
if (nargin==1), n = m; end
if (nargin<3), s = logspace(0,round(log10(eps)),min(m,n)); end
% Special treatment of the case m < n.
if (m < n), [A,V,U] = regutm(n,m,s); A = A'; return, end
% Generate random bidiagonal matrix with nonnegative elements.
if (n < 100), mu = .222*n + .0278*n^2; else mu = 3*n; end
B = abs(diag(randn(n,1)+mu) + diag(randn(n-1,1)+mu,1));
% Compute the SVD of B.
[U,dummy,V] = svd(B); clear dummy
% Repeat if m > n.
if (m > n)
clear U
B = abs(diag(randn(m,1)+mu) + diag(randn(m-1,1)+mu,1));
[U,dummy] = svd(B); clear dummy, U = U(:,1:n);
end
% Compute A.
A = U*diag(s)*V'; |
State Before: 𝕜 : Type u_2
E : Type u_1
F : Type ?u.214396
G : Type ?u.214399
ι : Type ?u.214402
π : ι → Type ?u.214407
inst✝² : LinearOrderedField 𝕜
inst✝¹ : AddCommGroup E
inst✝ : Module 𝕜 E
x y z : E
⊢ x ∈ [y-[𝕜]z] ↔ SameRay 𝕜 (x - y) (z - x) State After: 𝕜 : Type u_2
E : Type u_1
F : Type ?u.214396
G : Type ?u.214399
ι : Type ?u.214402
π : ι → Type ?u.214407
inst✝² : LinearOrderedField 𝕜
inst✝¹ : AddCommGroup E
inst✝ : Module 𝕜 E
x y z : E
h : SameRay 𝕜 (x - y) (z - x)
⊢ x ∈ [y-[𝕜]z] Tactic: refine' ⟨sameRay_of_mem_segment, fun h => _⟩ State Before: 𝕜 : Type u_2
E : Type u_1
F : Type ?u.214396
G : Type ?u.214399
ι : Type ?u.214402
π : ι → Type ?u.214407
inst✝² : LinearOrderedField 𝕜
inst✝¹ : AddCommGroup E
inst✝ : Module 𝕜 E
x y z : E
h : SameRay 𝕜 (x - y) (z - x)
⊢ x ∈ [y-[𝕜]z] State After: case intro.intro.intro.intro.intro.intro
𝕜 : Type u_2
E : Type u_1
F : Type ?u.214396
G : Type ?u.214399
ι : Type ?u.214402
π : ι → Type ?u.214407
inst✝² : LinearOrderedField 𝕜
inst✝¹ : AddCommGroup E
inst✝ : Module 𝕜 E
x y z : E
h : SameRay 𝕜 (x - y) (z - x)
a b : 𝕜
ha : 0 ≤ a
hb : 0 ≤ b
hab : a + b = 1
hxy : x - y = a • (x - y + (z - x))
hzx : z - x = b • (x - y + (z - x))
⊢ x ∈ [y-[𝕜]z] Tactic: rcases h.exists_eq_smul_add with ⟨a, b, ha, hb, hab, hxy, hzx⟩ State Before: case intro.intro.intro.intro.intro.intro
𝕜 : Type u_2
E : Type u_1
F : Type ?u.214396
G : Type ?u.214399
ι : Type ?u.214402
π : ι → Type ?u.214407
inst✝² : LinearOrderedField 𝕜
inst✝¹ : AddCommGroup E
inst✝ : Module 𝕜 E
x y z : E
h : SameRay 𝕜 (x - y) (z - x)
a b : 𝕜
ha : 0 ≤ a
hb : 0 ≤ b
hab : a + b = 1
hxy : x - y = a • (x - y + (z - x))
hzx : z - x = b • (x - y + (z - x))
⊢ x ∈ [y-[𝕜]z] State After: case intro.intro.intro.intro.intro.intro
𝕜 : Type u_2
E : Type u_1
F : Type ?u.214396
G : Type ?u.214399
ι : Type ?u.214402
π : ι → Type ?u.214407
inst✝² : LinearOrderedField 𝕜
inst✝¹ : AddCommGroup E
inst✝ : Module 𝕜 E
x y z : E
h : SameRay 𝕜 (x - y) (z - x)
a b : 𝕜
ha : 0 ≤ a
hb : 0 ≤ b
hab : a + b = 1
hxy : x - y = a • (z - y)
hzx : z - x = b • (z - y)
⊢ x ∈ [y-[𝕜]z] Tactic: rw [add_comm, sub_add_sub_cancel] at hxy hzx State Before: case intro.intro.intro.intro.intro.intro
𝕜 : Type u_2
E : Type u_1
F : Type ?u.214396
G : Type ?u.214399
ι : Type ?u.214402
π : ι → Type ?u.214407
inst✝² : LinearOrderedField 𝕜
inst✝¹ : AddCommGroup E
inst✝ : Module 𝕜 E
x y z : E
h : SameRay 𝕜 (x - y) (z - x)
a b : 𝕜
ha : 0 ≤ a
hb : 0 ≤ b
hab : a + b = 1
hxy : x - y = a • (z - y)
hzx : z - x = b • (z - y)
⊢ x ∈ [y-[𝕜]z] State After: case intro.intro.intro.intro.intro.intro
𝕜 : Type u_2
E : Type u_1
F : Type ?u.214396
G : Type ?u.214399
ι : Type ?u.214402
π : ι → Type ?u.214407
inst✝² : LinearOrderedField 𝕜
inst✝¹ : AddCommGroup E
inst✝ : Module 𝕜 E
x y z : E
h : SameRay 𝕜 (x - y) (z - x)
a b : 𝕜
ha : 0 ≤ a
hb : 0 ≤ b
hab : a + b = 1
hxy : x - y = a • (z - y)
hzx : z - x = b • (z - y)
⊢ 0 ∈ [-x + y-[𝕜]-x + z] Tactic: rw [← mem_segment_translate _ (-x), neg_add_self] State Before: case intro.intro.intro.intro.intro.intro
𝕜 : Type u_2
E : Type u_1
F : Type ?u.214396
G : Type ?u.214399
ι : Type ?u.214402
π : ι → Type ?u.214407
inst✝² : LinearOrderedField 𝕜
inst✝¹ : AddCommGroup E
inst✝ : Module 𝕜 E
x y z : E
h : SameRay 𝕜 (x - y) (z - x)
a b : 𝕜
ha : 0 ≤ a
hb : 0 ≤ b
hab : a + b = 1
hxy : x - y = a • (z - y)
hzx : z - x = b • (z - y)
⊢ 0 ∈ [-x + y-[𝕜]-x + z] State After: case intro.intro.intro.intro.intro.intro
𝕜 : Type u_2
E : Type u_1
F : Type ?u.214396
G : Type ?u.214399
ι : Type ?u.214402
π : ι → Type ?u.214407
inst✝² : LinearOrderedField 𝕜
inst✝¹ : AddCommGroup E
inst✝ : Module 𝕜 E
x y z : E
h : SameRay 𝕜 (x - y) (z - x)
a b : 𝕜
ha : 0 ≤ a
hb : 0 ≤ b
hab : a + b = 1
hxy : x - y = a • (z - y)
hzx : z - x = b • (z - y)
⊢ b • (-x + y) + a • (-x + z) = 0 Tactic: refine' ⟨b, a, hb, ha, add_comm a b ▸ hab, _⟩ State Before: case intro.intro.intro.intro.intro.intro
𝕜 : Type u_2
E : Type u_1
F : Type ?u.214396
G : Type ?u.214399
ι : Type ?u.214402
π : ι → Type ?u.214407
inst✝² : LinearOrderedField 𝕜
inst✝¹ : AddCommGroup E
inst✝ : Module 𝕜 E
x y z : E
h : SameRay 𝕜 (x - y) (z - x)
a b : 𝕜
ha : 0 ≤ a
hb : 0 ≤ b
hab : a + b = 1
hxy : x - y = a • (z - y)
hzx : z - x = b • (z - y)
⊢ b • (-x + y) + a • (-x + z) = 0 State After: no goals Tactic: rw [← sub_eq_neg_add, ← neg_sub, hxy, ← sub_eq_neg_add, hzx, smul_neg, smul_comm, neg_add_self] |
lemma uniformly_continuous_on_Cauchy: fixes f :: "'a::metric_space \<Rightarrow> 'b::metric_space" assumes "uniformly_continuous_on S f" "Cauchy X" "\<And>n. X n \<in> S" shows "Cauchy (\<lambda>n. f (X n))" |
!Una variable no es nada más que darle un nombre a un lugar de almacenamiento que nuestros
!programas puedan manipular. Cada variable debe de tener un tipo especifico, que determina
!el tamaño y el formato de la memoria de la variable; el rango de valores que puede almacenarse
!en esa memoria; y el conjunto de operaciones que se le pueden aplicar a la variable.
!La variable puede estar compuesta de letras, digitos, y el guión bajo. Además, debe
!cumplir con las siguientes caracteristicas:
! *No más largo que 31 caracteres.
! *Conformado de caracteres alfanuméricos (A-Z, a-z, 0-9) y guiones bajos (_)
! *Primer caracter del nombre debe ser una letra.
! *Los nombres no son sensibles a mayúsculas y minúsculas
!La sintaxis para declarar variables es muy simple y ya la hemos utilizado anteriormente:
program decl_var
implicit none
!Sintaxis: <tipo>, <parametros> :: nombre
real :: var_real
integer :: var_int
complex :: var_cplx
logical :: var_log
character :: var_char
character(len=80) :: mnsj
!Luego se pueden asignar:
var_real = 3.141517
var_int = 6
var_cplx = (1, 1) !Equivalente a (1 + i)
var_cplx = cmplx(2, -4) !Equivalente a (2 - 4i)
var_log = .true.
var_char = 'a'
mnsj = "Bienvenidos al curso de Fortran de PROTECO!"
print *, var_real
print *, var_int
print *, var_cplx
print *, var_log
print *, var_char
print *, mnsj
end program decl_var |
-- @@stderr --
dtrace: failed to compile script test/unittest/translators/err.D_OP_INCOMPLETE.NonExistentInput1.d: [D_OP_INCOMPLETE] line 25: operator -> cannot be applied to a forward declaration: no struct input_struct definition is available
|
Formal statement is: lemma increasing_Bseq_subseq_iff: assumes "\<And>x y. x \<le> y \<Longrightarrow> norm (f x :: 'a::real_normed_vector) \<le> norm (f y)" "strict_mono g" shows "Bseq (\<lambda>x. f (g x)) \<longleftrightarrow> Bseq f" Informal statement is: If $f$ is a bounded sequence and $g$ is a strictly increasing sequence of natural numbers, then the sequence $f \circ g$ is bounded if and only if $f$ is bounded. |
section \<open>Lazy evaluation within the logic\<close>
theory Lazy_Eval
imports
Complex_Main
begin
text \<open>
This is infrastructure to lazily evaluate an expression (typically something corecursive)
within the logic by simple rewriting. A signature of supported (co-)datatype constructures
upon which pattern matching is allowed and a list of function equations that are used in
rewriting must be provided.
One can then e.\,g.\ determine whether a given pattern matches a given expression. To do this,
the expression will be rewritten using the given function equations until enough constructors
have been exposed to decide whether the pattern matches.
This infrastructure was developed specifically for evaluating Multiseries expressions, but
can, in principle, be used for other purposes as well.
\<close>
lemma meta_eq_TrueE: "PROP P \<equiv> Trueprop True \<Longrightarrow> PROP P" by simp
datatype cmp_result = LT | EQ | GT
definition COMPARE :: "real \<Rightarrow> real \<Rightarrow> cmp_result" where
"COMPARE x y = (if x < y then LT else if x > y then GT else EQ)"
lemma COMPARE_intros:
"x < y \<Longrightarrow> COMPARE x y \<equiv> LT" "x > y \<Longrightarrow> COMPARE x y \<equiv> GT" "x = y \<Longrightarrow> COMPARE x y \<equiv> EQ"
by (simp_all add: COMPARE_def)
primrec CMP_BRANCH :: "cmp_result \<Rightarrow> 'a \<Rightarrow> 'a \<Rightarrow> 'a \<Rightarrow> 'a" where
"CMP_BRANCH LT x y z = x"
| "CMP_BRANCH EQ x y z = y"
| "CMP_BRANCH GT x y z = z"
ML_file \<open>lazy_eval.ML\<close>
end |
function s = mean(f, dim)
%MEAN Average or mean value of a DISKFUN.
% MEAN(F) takes the mean in the angular direction (default), i.e.,
% MEAN(F) = sum(F).
%
% MEAN(F, DIM) takes the mean along the direction DIM. If DIM = 1 it is the
% radial direction, and if DIM = 2 then it is the angular direction,
% i.e., MEAN(F,2) = 1/(2*pi)*sum(F,2).
%
% See also DISKFUN/MEAN2, DISKFUN/STD2.
% Copyright 2017 by The University of Oxford and The Chebfun Developers.
% See http://www.chebfun.org/ for Chebfun information.
% Empty check:
if ( isempty(f) )
s = chebfun;
return
end
if ( nargin == 1)
% Default to the angular direction.
dim = 1;
end
s = sum(f, dim);
if ( dim == 1 )
return; % Mean in the angular direction (default)
elseif ( dim == 2 )
s = s / (2*pi); % Mean in the radial direction
else
error('CHEBFUN:DISKFUN:mean:dim', ...
'dim must be 1 or 2.')
end
end |
\documentclass{beamer}
\usetheme{Madrid}
\usecolortheme{default}
\definecolor{THUpurple}{RGB}{102,8,116}
\usepackage{amsmath}
\usepackage{mathtools}
\usepackage{caption}
\usepackage{listings}
\usepackage{lmodern}
\usepackage{xcolor}
\lstset{language=Python,keywordstyle={\bfseries \color{blue}}}
\usepackage{pdfpages}
\usepackage{makecell}
\usepackage[EULERGREEK]{sansmath}
\usepackage{tikz}
\usepackage{float}
\usepackage{hyperref}
\usepackage{tikz}
\usetikzlibrary{shapes,arrows}
\usepackage[subrefformat=parens]{subcaption}
\usepackage[none]{hyphenat}
\usepackage[binary-units=true,per-mode=symbol]{siunitx}
\usepackage{CJK}
\usepackage{textcomp}
\usepackage{adjustbox}
\usepackage{pgfplots}
\usepackage{bm}
\usepackage{tablefootnote}
\DeclareMathOperator{\erf}{erf}
\usefonttheme[onlymath]{serif}
\newcommand{\dd}{\mathrm{d}}
\newcommand{\mev}{\mathrm{MeV}}
\newcommand{\gev}{\mathrm{GeV}}
\setbeamercolor{structure}{fg=THUpurple}
\setbeamersize{text margin left=10mm,text margin right=10mm}
% \setlength{\belowcaptionskip}{-2mm}
\title[Waveform Analysis]{Optimized PMT Waveform Analysis}
\date[JUNO]{April 16, 2021}
\AtBeginSection[]
{
\begin{frame}[noframenumbering]
\frametitle{Outline}
\thispagestyle{empty}
\tableofcontents[currentsection]
\end{frame}
}
\begin{document}
\captionsetup[figure]{labelfont={bf},name={Fig}}
\begin{CJK*}{UTF8}{gbsn}
\author[Dacheng Xu]{Dacheng~Xu~(徐大成) \and Erjin~Bao~(宝尔金) \and Yiyang~Wu~(武益阳) \and Benda~Xu~(续本达) \and Yu~Xu~(徐宇) \and Geliang~Zhang~(张戈亮) et.al \\ [4mm] \includegraphics[height=2cm]{img/Tsinghua_University_Logo.png}}
\frame{\titlepage}
\begin{frame}[noframenumbering]
\frametitle{Outline}
\thispagestyle{empty}
\tableofcontents
\end{frame}
\section{Motivation}
\begin{frame}
\frametitle{Motivation}
\begin{columns}
\column{0.425\textwidth}
\begin{figure}
\centering
\includegraphics[width=1.0\linewidth]{img/event.png}
\caption{An Event in Detector}
\end{figure}
\column{0.575\textwidth}
\begin{figure}
\centering
\resizebox{\textwidth}{!}{\input{img/junowave.pgf}}
\caption{A PMT Waveform}
\end{figure}
\end{columns}
\begin{block}{}
Waveform analysis, which means extracting time and charge information from PMT waveforms, is the bedrock of subsequent analysis such as event reconstruction.
\end{block}
\end{frame}
\section{Dataset}
\begin{frame}
\frametitle{Simulation Setup - Time Profile}
\begin{figure}
\centering
\resizebox{0.4\textwidth}{!}{\input{img/profile.pgf}}
\caption{Time Profile of Events}
\end{figure}
\begin{align*}
\phi(t) &= \mathcal{N}(t|\sigma^2)\otimes \mathrm{Exp}(t|\tau) \\
&= \frac{1}{2\tau} \exp\left(\frac{\sigma^2}{2\tau^2}-\frac{t}{\tau}\right) \left[1 - \erf\left( \frac{\sigma}{\sqrt{2}\tau} - \frac{t}{\sqrt{2}\sigma} \right)\right]
\end{align*}
\end{frame}
\begin{frame}
\frametitle{Simulation Setup - Single PE response}
\begin{figure}
\centering
\resizebox{0.6\textwidth}{!}{\input{img/spe.pgf}}
\caption{Single PE response\cite{jetter_pmt_2012}}
\end{figure}
\begin{align*}
V_\mathrm{PE}(t) &= V_{0}\exp\left[-\frac{1}{2}\left(\frac{\log(t/\tau_\mathrm{PE})}{\sigma_\mathrm{PE}}\right)^{2}\right]
\end{align*}
\end{frame}
\begin{frame}
\frametitle{Data Input \& Output}
\begin{columns}
\column{0.5\textwidth}
\begin{figure}
\centering
\resizebox{1.0\textwidth}{!}{\input{img/wave.pgf}}
\caption{Input Waveform (Pedestal free)}
\end{figure}
\column{0.5\textwidth}
\begin{figure}
\centering
\resizebox{1.0\textwidth}{!}{\input{img/charge.pgf}}
\caption{Output Time and Charge $\hat\phi(t)$}
\end{figure}
\end{columns}
\begin{align*}
\tilde{\phi}(t) &= \sum_{i=1}^{N_{\mathrm{PE}}} q_i \delta(t-t_i), \ N_{\mathrm{PE}}\sim \mathrm{Poisson}(\mu) \\
w(t) &= \tilde{\phi}(t) \otimes V_\mathrm{PE}(t) + \epsilon(t) = \sum_{i=1}^{N_\mathrm{PE}} q_i V_\mathrm{PE}(t-t_i) + \epsilon(t)
\end{align*}
\end{frame}
\section{Evaluation Criteria}
\begin{frame}
\frametitle{Evaluation Criteria}
$\tilde{\phi}(t)$ (simulation result) is an approximation of $\phi(t)$ (time profile).
$\hat{\phi}(t)$ (reconstruction result) should be consistent with $\tilde{\phi}(t)$.
Several evaluation criteria are needed.
\begin{block}{}
\begin{equation*}
\hat{\phi}(t) \leftrightarrow \tilde{\phi}(t)
\end{equation*}
\end{block}
\end{frame}
\begin{frame}
\frametitle{Residual Sum Square}
\begin{align*}
\mathrm{RSS} &\coloneqq \int\left[\hat{w}(t) - w(t)\right]^2\mathrm{d}t
\end{align*}
\begin{figure}
\centering
\resizebox{1.0\textwidth}{!}{\input{img/tab.pgf}}
\caption{$b_1$ and $b_2$ have the same $\mathrm{RSS}=0.25$ to $a$, but $b_1$ is closer in timing to $a$}
\end{figure}
\end{frame}
\begin{frame}
\frametitle{Wasserstein Distance}
\begin{figure}
\centering
\includegraphics[width=1.0\linewidth]{img/WD.png}
\caption{Wasserstein Distance when $p=1$: Earth Mover Distance}
\end{figure}
\begin{align*}
D_w\left[\hat{\phi}_*, \tilde{\phi}_*\right] &= \inf_{\gamma \in \Gamma} \left[\int \left\vert t_1 - t_2 \right\vert^p \gamma(t_1, t_2)\mathrm{d}t_1\mathrm{d}t_2\right]^{\frac{1}{p}}
\end{align*}
\begin{align*}
\Gamma &= \left\{\gamma(t_1, t_2) ~\middle\vert~ \int\gamma(t_1,t_2)\mathrm{d}t_1 = \tilde{\phi}_*(t_2) , \int\gamma(t_1,t_2)\mathrm{d}t_2 = \hat{\phi}_*(t_1) \right\}
\end{align*}
when $p=1$, CDF of $\phi(t)$ is $\Phi(t)$, $D_w$ is a $\ell_1$-distance:
\begin{align*}
D_w\left[\hat{\phi}_*, \tilde{\phi}_*\right] &= \int\left|\hat{\Phi}(t) - \tilde{\Phi}(t)\right| \mathrm{d}t
\end{align*}
\end{frame}
\begin{frame}
\frametitle{Kullback-Leibler Divergence}
\begin{align*}
\hat{t}_\mathrm{KL} &= \arg\underset{t_0}{\max} \prod_{i=1}^{\hat{N}} \left[\phi(\hat{t}_i-t_0)\right]^{\hat{q}_i} ,\ \Delta t = \hat{t}_\mathrm{KL} - t_0
\end{align*}
\begin{figure}
\centering
\resizebox{0.6\textwidth}{!}{\input{img/twoprofile.pgf}}
\caption{Time translation between time profiles}
\end{figure}
\end{frame}
\section{Waveform Analysis Methods}
\begin{frame}
\frametitle{Fourier Deconvolution}
\begin{align*}
\mathcal{F}[w] &= \mathcal{F}[\tilde{\phi}]\mathcal{F}[V_\mathrm{PE}] + \mathcal{F}[\epsilon],
\end{align*}
\begin{figure}
\centering
\resizebox{0.6\textwidth}{!}{\input{img/fftrans.pgf}}
\caption{$\mathrm{RSS}=\SI{159.3}{mV^2},D_w=\SI{1.98}{ns},\Delta t_0=\SI{-1.26}{ns}$}
\end{figure}
\end{frame}
\begin{frame}
\frametitle{Lucy Direct Demodulation}
\begin{align*}
\hat{\phi}_*^{n+1}(s) &= \int \frac{\hat{\phi}_*^n(s) V_{\mathrm{PE}*}(t-s)}{\int\hat{\phi}_*^n(s') V_{\mathrm{PE}*}(t-s')\mathrm{d}s'} w_*(t) \mathrm{d}t
\end{align*}
\begin{figure}
\centering
\resizebox{0.6\textwidth}{!}{\input{img/lucyddm.pgf}}
\caption{$\mathrm{RSS}=\SI{61.5}{mV^2},D_w=\SI{0.94}{ns},\Delta t_0=\SI{-1.66}{ns}$}
\end{figure}
\end{frame}
\begin{frame}
\frametitle{Convolutional Neural Network}
\begin{columns}
\column{0.35\textwidth}
\begin{figure}
\centering
\begin{adjustbox}{width=0.65\textwidth}
\input{model}
\end{adjustbox}
\caption{CNN structure}
\end{figure}
\begin{center}
Loss is $D_w[\hat{\phi}_*, \tilde{\phi}_*]$
\end{center}
\column{0.65\textwidth}
\begin{figure}
\centering
\resizebox{\textwidth}{!}{\input{img/takara.pgf}}
\caption{$\mathrm{RSS}=\SI{10.82}{mV^2},D_w=\SI{0.70}{ns},\Delta t_0=\SI{-2.89}{ns}$}
\end{figure}
\end{columns}
\end{frame}
\section{Fast Bayesian Matching Pursuit}
\begin{frame}
\frametitle{Model definition}
\begin{itemize}
\item Time in DAQ window is divided into time bins: $\vec{t}$, whose length is $N$
\item Model vector: $\vec{z}$. $z_i=0\implies q_i=0$ and $\ z_i=1\implies q_i\neq0$
\item Linear Model: $\vec{w} = \bm{V}_\mathrm{PE}\vec{z} + \vec{\epsilon}$
\item
\begin{align*}
\left.
\begin{bmatrix}
\vec{w} \\
\vec{q}
\end{bmatrix}
\right\vert\vec{z}
&\sim \mathrm{Normal}\left(
\begin{bmatrix}
\bm{V}_\mathrm{PE}\vec{z} \\
\vec{z}
\end{bmatrix},
\begin{bmatrix}
\bm{\Sigma}_z & \bm{V}_\mathrm{PE}\bm{Z} \\
\bm{Z}\bm{V}_\mathrm{PE}^\intercal & \bm{Z}
\end{bmatrix}
\right) \\
\bm{\Sigma}_z &= \bm{V}_\mathrm{PE}\bm{Z}\bm{V}_\mathrm{PE}^\intercal+\sigma_\epsilon^2\bm{I}
\end{align*}
where $\bm{Z}$ is the diagonal matrix of vector $\vec{z}$ controlling $q_i$
\item $\mathcal{Z}=\{\vec{z}_j\}$ contains \textcolor{red}{$2^{N}$} model vectors
\end{itemize}
\end{frame}
\begin{frame}
\frametitle{Fast Bayesian Matching Pursuit}
\begin{itemize}
\item Calculation of \textcolor{red}{$2^{N}$} model vectors is impossible!
\item Most of $p(\vec{w}|\vec{z}) \rightarrow 0$!
\end{itemize}
\noindent\begin{minipage}[c]{0.33\textwidth}
\begin{figure}[H]
\centering
\includegraphics[width=0.95\textwidth]{img/perfect_PE.pdf}
\caption{perfect PE matching waveform, $p(\vec{z}|\vec{w})$ hit maximum}
\label{fig:perfect PE}
\end{figure}
\end{minipage}\begin{minipage}[c]{0.33\textwidth}
\begin{figure}[H]
\centering
\includegraphics[width=0.95\textwidth]{img/not_so_perfect_PE.pdf}
\caption{not so perfect, $p(\vec{z}|\vec{w})$ is smaller but still $>0$}
\label{fig:not so perfect PE}
\end{figure}
\end{minipage}\begin{minipage}[c]{0.33\textwidth}
\begin{figure}[H]
\centering
\includegraphics[width=0.95\textwidth]{img/nonsense_PE.pdf}
\caption{Completely mismatch the waveform, $p(\vec{z}|\vec{w}) \rightarrow 0$}
\label{fig:nonsense PE}
\end{figure}
\end{minipage}
\begin{align*}
p(\vec{z}|\vec{w}) &= \frac{p(\vec{w}|\vec{z})p(\vec{z})}{\sum_{\vec{z}'\in\mathcal{Z}}p(\vec{w}|\vec{z'})p(\vec{z'})} \approx \frac{p(\vec{w}|\vec{z})p(\vec{z})}{\sum_{\vec{z}'\in\mathcal{Z}'}p(\vec{w}|\vec{z'})p(\vec{z'})}
\end{align*}
\end{frame}
\begin{frame}
\frametitle{Fast Bayesian Matching Pursuit}
\begin{itemize}
\item \begin{align*}
\log[\textcolor{red}{p(\vec{w},\vec{z})}] =& \log[p(\vec{w}|\vec{z})p(\vec{z})] \\
=& -\frac{1}{2}(\vec{w}-\bm{V}_\mathrm{PE}\vec{z})^\intercal\bm{\Sigma}_z^{-1}(\vec{w}-\bm{V}_\mathrm{PE}\vec{z})-\frac{1}{2}\log\det\bm{\Sigma}_z \\
&-\frac{N}{2}\log2\pi -\mu + \sum_{i|z_i=1}\log \frac{\mu \phi(t'_i - t_0) \Delta t'}{1-\mu \phi(t'_i - t_0) \Delta t'}
\end{align*}
\item A \textcolor{red}{repeated greedy search}(RGS) is performed to construct the target set $\mathcal{Z}'$, which contains only the $\vec{z}$ giving large $p(\vec{w}|\vec{z})$.
\end{itemize}
\end{frame}
\begin{frame}
\frametitle{FBMP's result: Bayesian interface}
\begin{itemize}
\item PE Time: $\vec{t}$
\item Models: $\mathcal{Z}'=\{\vec{z}_j\}$
\item Charge: \begin{align*}
\hat{\vec{q}}_z = E(\vec{q}|\vec{w},\vec{z}) &= \vec{z} + \bm{Z}\bm{V}_\mathrm{PE}^\intercal\bm{\Sigma}_z^{-1}(\vec{w}-\bm{V}_\mathrm{PE}\vec{z})
\end{align*}
\item Model's posterior probability: $p(\vec{z}|\vec{w})$
\end{itemize}
\begin{center}
Provides opportunity for subsequent Bayesian analysis!
\end{center}
\end{frame}
\begin{frame}
\frametitle{FBMP Demonstration}
\begin{figure}
\centering
\resizebox{0.6\textwidth}{!}{\input{img/demoe2c0.pgf}}
\caption{$\mathrm{RSS}=\SI{12.26}{mV^2},D_w=\SI{0.63}{ns},\Delta t_0=\SI{-3.97}{ns}$}
\end{figure}
\end{frame}
\begin{frame}
\frametitle{FBMP's Performance of Evaluation Criteria}
For dataset: $(\mu, \tau, \sigma)/\si{ns}=(4, 20, 5)$:
\begin{figure}
\centering
\resizebox{\textwidth}{!}{\input{img/vs.pgf}}
\caption{$D_w$ and $\mathrm{RSS}$ of methods}
\end{figure}
\end{frame}
\begin{frame}
\frametitle{Charge Posterior}
\begin{figure}
\centering
\resizebox{0.7\textwidth}{!}{\input{img/chargehist.pgf}}
\caption{$\hat{q}$ histogram of methods}
\end{figure}
\begin{block}{}
FBMP retains charge distribution of PE.
\end{block}
\end{frame}
\section{Summary}
\begin{frame}
\frametitle{Summary}
\begin{itemize}
\item Waveform analysis is necessary for elaborate analysis.
\item FBMP restore information of PE most completely.
\item FBMP provides a Bayesian interface for subsequent analysis.
\end{itemize}
\end{frame}
\section{Backup}
\label{sec:FBMP's Next Step: Bayesian Probe of Point like Events (BAPPE)}
\begin{frame}
\frametitle{FBMP's Next Step: Bayesian Probe of Point like Events (BAPPE)}
\textbf{Probe: detector's response to point source}
A vertex called $\mathcal{V}$ deposits energy $E$ at $(\vec{r},t=0)$, causing \textcolor{red}{$j$-th} PMT to receive $n$ PEs in $[t_i, t_i+\dd t)$. $n$ follows a Poisson distribution with expectation $P(t_i|\mathcal{V})\dd t$, which is an inhomogeneous Poisson process with intensity $P(t_i|\mathcal{V})$.
Probe is made by parametrizing simulation data or calibration data. \cite{lin_fast_2016}
\begin{figure}[H]
\centering
\includegraphics[width=0.4\textwidth]{img/PoissonIntensitySketch.pdf}
\caption{A sketch of $P(t_i|\mathcal{V})$}
\label{fig:1}
\end{figure}
\end{frame}
\begin{frame}
\frametitle{FBMP's Next Step: Bayesian Probe of Point like Events (BAPPE)}
\textbf{The Bayesian interface between Probe and FBMP}
Forward process: given vertex, the distribution of response is determined: $z_i|_{t\in[t_i,t_i+\dd t)} \sim \mathrm{Poisson}(P(t_i|\mathcal{V}) \dd t)$ $z_i$ is the number of PE detected. When $\dd t \rightarrow 0$, the possibility of $\geqslant 2$ PEs can be neglected, yielding:
\begin{equation}
z_i|_{t\in[t_i,t_i+\dd t)} \sim \left\{
\begin{aligned}
& 0, & 1-P(t_i|\mathcal{V}) \dd t \\
& 1, & P(t_i|\mathcal{V}) \dd t
\end{aligned}
\right.
\label{eq:1}
\end{equation}
Therefore, $\vec{z}$ is a boolean array, e.g. $\vec{z} = (0,0,0,1,0,0,1,1,0)$ means there is a detected PE in the 4th, 7th and 8th time bin. For N bins, there are $2^N$ configurations of PE.
\end{frame}
\begin{frame}
\frametitle{The Bayesian interface between Probe and FBMP}
Using the Bayes' theorem, for a single PMT,
\begin{equation}
\mathrm{Posterior}(\mathcal{V}|\vec{w}) = \frac{P(\vec{w}|\mathcal{V}) \cdot \mathrm{Prior}(\mathcal{V})} {P(\vec{w})} \propto P(\vec{w}|\mathcal{V}) \cdot \mathrm{Prior}(\mathcal{V})
\label{eq:2}
\end{equation}
$P(\vec{w})$ is independent of $\mathcal{V}$ and only acts as a normalization factor of the posterior.
\textbf{The interface between Probe and FBMP, is {\color{red} the total probability rule} :}
\begin{equation}
\small
P(\vec{w}|\mathcal{V}) = \sum_{\vec{z} \in \mathcal{Z}} P(\vec{w}|\vec{z},\mathcal{V}) \cdot P(\vec{z}|\mathcal{V}) = \sum_{\vec{z} \in \mathcal{Z}} P(\vec{w}|\vec{z}) \cdot P(\vec{z}|\mathcal{V})
\label{eq:3}
\end{equation}
\end{frame}
\begin{frame}
\frametitle{The Bayesian interface between Probe and FBMP}
\begin{equation}
\small
\begin{aligned}
P(\vec{w}|\mathcal{V}) & = \sum_{\vec{z} \in \mathcal{Z}} P(\vec{w}|\vec{z},\mathcal{V}) \cdot P(\vec{z}|\mathcal{V}) \\
& = \sum_{\vec{z} \in \mathcal{Z}} P(\vec{w}|\vec{z}) \cdot P(\vec{z}|\mathcal{V}) \\
& \overset{\text{FBMP}}{\approx} \sum_{\vec{z} \in \textcolor{red}{\mathcal{Z}'}} P(\vec{w}|\vec{z}) \cdot P(\vec{z}|\mathcal{V})
\end{aligned}
\end{equation}
The $\mathcal{Z}$ has over $2^N$ possibilities, but only PE configuration $\vec{z}$ matching the waveform can have relatively large probabilities (dominate terms of the summation), which is given by FBMP.
i.e.:
$\mathcal{Z}' \in \mathcal{Z}$, $|\mathcal{Z}'| \ll |\mathcal{Z}|$ and $\forall \vec{z}_1 \in \mathcal{Z} \backslash \mathcal{Z}',\ \forall \vec{z}_2 \in \mathcal{Z}' ,\ \ P(\vec{w}|\vec{z}_1) \ll P(\vec{w}|\vec{z}_2)$
\end{frame}
\begin{frame}
\frametitle{Bayesian Probe of Point like Events (BAPPE)}
Recall
\begin{equation*}
P({z}_i|\mathcal{V}) = \left\{
\begin{aligned}
& 1-P(t_i|\mathcal{V}) \dd t, & 0\ \mathrm{PE} \\
& P(t_i|\mathcal{V}) \dd t , & 1\ \mathrm{PE}
\end{aligned}
\right.
\end{equation*}
Finally, we multiply the posterior of \textcolor{red}{all PMTs}, and choose even distribution as the vertex prior (in this case the posterior is proportional to the likelihood):
\begin{equation}
\mathrm{Posterior}(\{\vec{w}_j\}|\mathcal{V}) \propto \prod_{j \in {\mathrm{PMTId}}} \sum_{\vec{z}_{jk} \in \mathcal{Z}'_j} P_j(\vec{w}_j|\vec{z}_{jk}) \cdot P_j(\vec{z}_{jk}|\mathcal{V})
\label{eq:4}
\end{equation}
$\hat{\mathcal{V}}=\underset{\mathcal{V}}{\mathrm{argmax}}\ \mathrm{Posterior}(\vec{w}|\mathcal{V})$ is the BAPPE event reconstruction.
\end{frame}
\begin{frame}
\frametitle{The Road to the Nature of Neutrino Mass}
\begin{figure}[H]
\centering
\includegraphics[width=1.0\textwidth]{img/tech.pdf}
\caption{The Road to the Nature of Neutrino Mass}
\end{figure}
\end{frame}
\begin{frame}
\bibliographystyle{unsrt}
\bibliography{ref.bib}
\end{frame}
\end{CJK*}
\end{document} |
open import Relation.Binary.PropositionalEquality using (_≡_; refl)
module Dipsy.Polarity where
data Polarity : Set where
pos : Polarity
neg : Polarity
open import Algebra.FunctionProperties {A = Polarity} _≡_
flip : Polarity → Polarity
flip pos = neg
flip neg = pos
flip-inv : Involutive flip
flip-inv pos = refl
flip-inv neg = refl
|
function [PB] = EB2PB(EB)
% Convert computery things from exabytes to petabytes.
% Chad A. Greene 2012
PB = EB*1024 ; |
Require Import Lia.
Example silly_presburger_example : forall m n o p,
m + n <= n + o /\ o + 3 = p + 3 -> m <= p.
Proof. intros. lia. Qed.
|
kernelopts(printbytes=false);
with(RigidMotionsParameterSpaceDecompostion);
LaunchComputeSamplePoints([a,b,c], "__REPLACE__", "N2");
done
|
theory Chapter9_1
imports "~~/src/HOL/IMP/Types"
begin
text{*
\section*{Chapter 9}
\exercise
Reformulate the inductive predicates \ @{prop"\<Gamma> \<turnstile> a : \<tau>"},
\ @{prop"\<Gamma> \<turnstile> (b::bexp)"} \
and \ \mbox{@{prop"\<Gamma> \<turnstile> (c::com)"}} \ as three recursive functions
*}
fun atype :: "tyenv \<Rightarrow> aexp \<Rightarrow> ty option" where
"atype \<Gamma> (Ic i) = Some Ity"
| "atype \<Gamma> (Rc i) = Some Rty"
| "atype \<Gamma> (V x) = Some (\<Gamma> x)"
| "atype \<Gamma> (Plus a\<^sub>1 a\<^sub>2) = (if (atype \<Gamma> a\<^sub>1) = (atype \<Gamma> a\<^sub>2) then atype \<Gamma> a\<^sub>1 else None)"
fun bok :: "tyenv \<Rightarrow> bexp \<Rightarrow> bool" where
"bok \<Gamma> (Bc v) = True"
| "bok \<Gamma> (Not b) = bok \<Gamma> b"
| "bok \<Gamma> (And b\<^sub>1 b\<^sub>2) = (bok \<Gamma> b\<^sub>1 \<and> bok \<Gamma> b\<^sub>2)"
| "bok \<Gamma> (Less a\<^sub>1 a\<^sub>2) = (if (atype \<Gamma> a\<^sub>1) = (atype \<Gamma> a\<^sub>2) then (\<exists>a. atype \<Gamma> a\<^sub>1 = Some a) else False)"
fun cok :: "tyenv \<Rightarrow> com \<Rightarrow> bool" where
"cok \<Gamma> SKIP = True"
| "cok \<Gamma> (x ::= a) = (Some (\<Gamma> x) = atype \<Gamma> a)"
| "cok \<Gamma> (c\<^sub>1;; c\<^sub>2) = (cok \<Gamma> c\<^sub>1 \<and> cok \<Gamma> c\<^sub>2)"
| "cok \<Gamma> (IF b THEN t ELSE e) = (bok \<Gamma> b \<and> cok \<Gamma> t \<and> cok \<Gamma> e)"
| "cok \<Gamma> (WHILE b DO c) = (bok \<Gamma> b \<and> cok \<Gamma> c)"
text{* and prove *}
lemma atyping_atype: "(\<Gamma> \<turnstile> a : \<tau>) = (atype \<Gamma> a = Some \<tau>)"
by (induction a; auto)
lemma btyping_bok: "(\<Gamma> \<turnstile> b) = bok \<Gamma> b"
by (induction b; auto simp add: atyping_atype)
lemma ctyping_cok: "(\<Gamma> \<turnstile> c) = cok \<Gamma> c"
by (induction c; auto simp add: atyping_atype btyping_bok)
text{*
\endexercise
\exercise
Modify the evaluation and typing of @{typ aexp} by allowing @{typ int}s to be coerced
to @{typ real}s with the predefined coercion function
\noquotes{@{term[source] "int_to_real :: int \<Rightarrow> real"}} where necessary.
Now every @{typ aexp} has a value. Define an evaluation function:
*}
fun aval :: "aexp \<Rightarrow> state \<Rightarrow> val" where
"aval (Ic i) s = (Iv i)"
| "aval (Rc r) s = (Rv r)"
| "aval (V x) s = s x"
| "aval (Plus a\<^sub>1 a\<^sub>2) s =
(case (aval a\<^sub>1 s, aval a\<^sub>2 s) of
(Iv i\<^sub>1, Iv i\<^sub>2) \<Rightarrow> Iv (i\<^sub>1 + i\<^sub>2)
| (Rv r\<^sub>1, Rv r\<^sub>2) \<Rightarrow> Rv (r\<^sub>1 + r\<^sub>2)
| (Rv r, Iv i) \<Rightarrow> Rv (r + i)
| (Iv i, Rv r) \<Rightarrow> Rv (i + r))"
text{*
Similarly, every @{typ aexp} has a type.
Define a function that computes the type of an @{typ aexp}
*}
fun atyp :: "tyenv \<Rightarrow> aexp \<Rightarrow> ty" where
"atyp \<Gamma> (Ic i) = Ity"
| "atyp \<Gamma> (Rc r) = Rty"
| "atyp \<Gamma> (V x) = \<Gamma> x"
| "atyp \<Gamma> (Plus a\<^sub>1 a\<^sub>2) = (case (atyp \<Gamma> a\<^sub>1, atyp \<Gamma> a\<^sub>2) of
(Ity, Ity) \<Rightarrow> Ity
| (Rty, Rty) \<Rightarrow> Rty
| (Rty, Ity) \<Rightarrow> Rty
| (Ity, Rty) \<Rightarrow> Rty)"
text{* and prove that it computes the correct type: *}
lemma
atyp_plus:
fixes \<Gamma> s a1 a2
assumes "atyp \<Gamma> a1 = type (aval a1 s)"
and "atyp \<Gamma> a2 = type (aval a2 s)"
and "\<forall>x. type (s x) = \<Gamma> x"
shows "(case type (aval a1 s) of Ity \<Rightarrow> case type (aval a2 s) of Ity \<Rightarrow> Ity | Rty \<Rightarrow> Rty
| Rty \<Rightarrow> case type (aval a2 s) of Ity \<Rightarrow> Rty | _ \<Rightarrow> Rty) =
type (case aval a1 s of Iv i\<^sub>1 \<Rightarrow> case aval a2 s of Iv i\<^sub>2 \<Rightarrow> Iv (i\<^sub>1 + i\<^sub>2) | Rv r \<Rightarrow> Rv (real_of_int i\<^sub>1 + r)
| Rv r\<^sub>1 \<Rightarrow> case aval a2 s of Iv i \<Rightarrow> Rv (r\<^sub>1 + real_of_int i) | Rv r\<^sub>2 \<Rightarrow> Rv (r\<^sub>1 + r\<^sub>2))"
by (smt ty.exhaust ty.simps(3) ty.simps(4) type_eq_Ity type_eq_Rty val.simps(5) val.simps(6))
lemma "\<Gamma> \<turnstile> s \<Longrightarrow> atyp \<Gamma> a = type (aval a s)"
by (induction a; auto simp add: styping_def atyp_plus)
text{*
Note that Isabelle inserts the coercion @{typ real} automatically.
For example, if you write @{term "Rv(i+r)"} where @{text"i:: int"} and
@{text "r :: real"} then it becomes @{term "Rv(real i + x)"}.
\endexercise
\bigskip
For the following two exercises copy theory @{theory Types} and modify it as required.
\begin{exercise}
Add a @{text REPEAT} loop (see Exercise~\ref{exe:IMP:REPEAT}) to the typed version of IMP
and update the type soundness proof.
\end{exercise}
\begin{exercise}
Modify the typed version of IMP as follows. Values are now either integers or booleans.
Thus variables can have boolean values too. Merge the two expressions types
@{typ aexp} and @{typ bexp} into one new type @{text exp} of expressions
that has the constructors of both types (of course without real constants).
Combine @{const taval} and @{const tbval} into one evaluation predicate
@{text "eval :: exp \<Rightarrow> state \<Rightarrow> val \<Rightarrow> bool"}. Similarly combine the two typing predicates
into one: @{text "\<Gamma> \<turnstile> e : \<tau>"} where @{text "e :: exp"} and the IMP-type @{text \<tau>} can
be one of @{text Ity} or @{text Bty}.
Adjust the small-step semantics and the type soundness proof.
\end{exercise}
*}
end
|
State Before: α : Type u_1
β : Type ?u.733391
γ : Type ?u.733394
δ : Type ?u.733397
ι : Type ?u.733400
R : Type ?u.733403
R' : Type ?u.733406
m0 : MeasurableSpace α
inst✝³ : MeasurableSpace β
inst✝² : MeasurableSpace γ
μ✝ μ₁ μ₂ μ₃ ν ν' ν₁ ν₂ : Measure α
s✝ s' t : Set α
inst✝¹ : MeasurableSpace α
μ : Measure α
inst✝ : SigmaFinite μ
s : Set α
⊢ (∃ n, 0 < ↑↑μ (s ∩ spanningSets μ n)) ↔ 0 < ↑↑μ s State After: α : Type u_1
β : Type ?u.733391
γ : Type ?u.733394
δ : Type ?u.733397
ι : Type ?u.733400
R : Type ?u.733403
R' : Type ?u.733406
m0 : MeasurableSpace α
inst✝³ : MeasurableSpace β
inst✝² : MeasurableSpace γ
μ✝ μ₁ μ₂ μ₃ ν ν' ν₁ ν₂ : Measure α
s✝ s' t : Set α
inst✝¹ : MeasurableSpace α
μ : Measure α
inst✝ : SigmaFinite μ
s : Set α
⊢ (¬∃ n, 0 < ↑↑μ (s ∩ spanningSets μ n)) ↔ ¬0 < ↑↑μ s Tactic: rw [← not_iff_not] State Before: α : Type u_1
β : Type ?u.733391
γ : Type ?u.733394
δ : Type ?u.733397
ι : Type ?u.733400
R : Type ?u.733403
R' : Type ?u.733406
m0 : MeasurableSpace α
inst✝³ : MeasurableSpace β
inst✝² : MeasurableSpace γ
μ✝ μ₁ μ₂ μ₃ ν ν' ν₁ ν₂ : Measure α
s✝ s' t : Set α
inst✝¹ : MeasurableSpace α
μ : Measure α
inst✝ : SigmaFinite μ
s : Set α
⊢ (¬∃ n, 0 < ↑↑μ (s ∩ spanningSets μ n)) ↔ ¬0 < ↑↑μ s State After: α : Type u_1
β : Type ?u.733391
γ : Type ?u.733394
δ : Type ?u.733397
ι : Type ?u.733400
R : Type ?u.733403
R' : Type ?u.733406
m0 : MeasurableSpace α
inst✝³ : MeasurableSpace β
inst✝² : MeasurableSpace γ
μ✝ μ₁ μ₂ μ₃ ν ν' ν₁ ν₂ : Measure α
s✝ s' t : Set α
inst✝¹ : MeasurableSpace α
μ : Measure α
inst✝ : SigmaFinite μ
s : Set α
⊢ (∀ (x : ℕ), ↑↑μ (s ∩ spanningSets μ x) = 0) ↔ ↑↑μ s = 0 Tactic: simp only [not_exists, not_lt, nonpos_iff_eq_zero] State Before: α : Type u_1
β : Type ?u.733391
γ : Type ?u.733394
δ : Type ?u.733397
ι : Type ?u.733400
R : Type ?u.733403
R' : Type ?u.733406
m0 : MeasurableSpace α
inst✝³ : MeasurableSpace β
inst✝² : MeasurableSpace γ
μ✝ μ₁ μ₂ μ₃ ν ν' ν₁ ν₂ : Measure α
s✝ s' t : Set α
inst✝¹ : MeasurableSpace α
μ : Measure α
inst✝ : SigmaFinite μ
s : Set α
⊢ (∀ (x : ℕ), ↑↑μ (s ∩ spanningSets μ x) = 0) ↔ ↑↑μ s = 0 State After: no goals Tactic: exact forall_measure_inter_spanningSets_eq_zero s |
import algebraic_topology.simplicial_object
import algebraic_topology.simplicial_set
import category_theory.functor.basic
import combinatorics.quiver.basic
import combinatorics.quiver.path
open sSet quiver category_theory
open category_theory.simplicial_object
open category_theory.functor
variable {X : sSet}
notation (name := simplicial_object.at) X ` _[`:1000 n `]` :=
(X : category_theory.simplicial_object hole!).obj (opposite.op (simplex_category.mk n))
instance underlying : quiver (X _[0]) := by refine {
hom := λ A B, {edge : X _[1] // X.δ 1 edge = A ∧ X.δ 0 edge = B}
}
def edge_to_qedge (edge : X _[1]) (A B : X _[0]) (σ : X.δ 1 edge = A) (τ : X.δ 0 edge = B) :
A ⟶ B := subtype.mk edge (by split; assumption)
def edge_to_path (edge : X _[1]) (A B : X _[0]) (σ : X.δ 1 edge = A) (τ : X.δ 0 edge = B) :
path A B := path.nil.cons (edge_to_qedge edge A B σ τ)
infix `**`:50 := path.comp
@[simp] lemma edge_to_qedge_inv_coe
{edge : X _[1]} {A B : X _[0]} {σ : X.δ 1 edge = A} {τ : X.δ 0 edge = B} :
↑(edge_to_qedge edge A B σ τ) = edge := begin
rw ← subtype.val_eq_coe, dsimp [edge_to_qedge], refl,
end
-- witnesses 0-simplices a, b as the boundary of 1-simplex ab
def ends (ab : X _[1]) (a b : X _[0]) := (X.δ 1 ab = a) ∧ (X.δ 0 ab = b)
def ends.default (ab : X _[1]) : ends ab (X.δ 1 ab) (X.δ 0 ab) :=
and.intro (eq.refl _) (eq.refl _)
def ends.degen (A : X _[0]) : ends (X.σ 0 A) A A := begin
split, have : X.δ 1 (X.σ 0 A) = (X.σ 0 ≫ X.δ 1) A, refl, rw this,
have H := @δ_comp_σ_succ _ _ X 0 0, simp at H,
rw H, simp,
have : X.δ 0 (X.σ 0 A) = (X.σ 0 ≫ X.δ 0) A, refl, rw this,
have H := @δ_comp_σ_self _ _ X 0 0, simp at H, rw H, simp,
end
-- produces a path a -> b given a proof that a, b are the boundary of a 1-simplex
def to_path {ab : X _[1]} {a b : X _[0]} (ε : ends ab a b) :=
edge_to_path ab a b ε.1 ε.2
lemma comp_of_app {n} {v : X _[n+2]} {i : fin(n+2)} {j : fin(n+3)} : X.δ i (X.δ j v) = (X.δ j ≫ X.δ i) v := by simp
lemma simplicial_11 (h : X _[2]) : X.δ 1 (X.δ 1 h) = X.δ 1 (X.δ 2 h) := begin
repeat {rw comp_of_app},
have H := @δ_comp_δ_self _ _ X 0 1,
simp at H, rw H,
end
lemma simplicial_10 (h : X _[2]) : X.δ 1 (X.δ 0 h) = X.δ 0 (X.δ 2 h) := begin
repeat {rw comp_of_app},
have H := @δ_comp_δ _ _ X 0 0 1, simp at H, rw ← H,
end
lemma simplicial_00 (h : X _[2]) : X.δ 0 (X.δ 0 h) = X.δ 0 (X.δ 1 h) := begin
repeat {rw comp_of_app},
have H := @δ_comp_δ_self _ _ X 0 0,
simp at H, rw H,
end
inductive homotopic'' (A B : X _[0]) : path A B → path A B → Prop
| homotopy (h : X _[2]) {C : X _[0]}
(σ : X.δ 1 (X.δ 1 h) = A)
(τ : X.δ 0 (X.δ 1 h) = B)
(ρ : X.δ 0 (X.δ 2 h) = C) :
homotopic'' (edge_to_path (X.δ 1 h) A B σ τ)
(path.comp
(edge_to_path (X.δ 2 h) A C ((simplicial_11 h).symm.trans σ) ρ)
(edge_to_path (X.δ 0 h) C B ((simplicial_10 h).trans ρ) ((simplicial_00 h).trans τ)))
def degen_edge (A : X _[0]) : A ⟶ A := subtype.mk (X.σ 0 A) (ends.degen A)
inductive homotopic : Π (A B : X _[0]), path A B → path A B → Prop
| lift (A B : X _[0]) (p q : path A B) (h : homotopic'' A B p q) : homotopic A B p q
| degen (A : X _[0]) : homotopic A A (path.nil.cons $ degen_edge A) (path.nil)
| refl (A B : X _[0]) (p : path A B) : homotopic A B p p
| symm (A B : X _[0]) (p q : path A B) (h : homotopic A B p q) : homotopic A B q p
| trans (A B : X _[0]) (p q r : path A B) (h1 : homotopic A B p q) (h2 : homotopic A B q r) : homotopic A B p r
| comp_l (A B : X _[0]) {C : X _[0]} (p : path A C) (q r : path C B) (h : homotopic C B q r) : homotopic A B (p.comp q) (p.comp r)
| comp_r (A B : X _[0]) {C : X _[0]} (p q : path A C) (r : path C B) (h : homotopic A C p q) : homotopic A B (p.comp r) (q.comp r)
@[refl] lemma htpy_refl (A B : X _[0]) (p : path A B) :
homotopic A B p p := (homotopic.refl A B p)
@[symm] lemma htpy_symm (A B : X _[0]) (p q : path A B) :
homotopic A B p q → homotopic A B q p :=
λ h, (homotopic.symm A B p q h)
@[trans] lemma htpy_trans (A B : X _[0]) (p q r : path A B) :
homotopic A B p q → homotopic A B q r → homotopic A B p r :=
λ h1, λ h2, (homotopic.trans A B p q r h1 h2)
theorem htpy_is_equiv (A B : X _[0]) : equivalence (homotopic A B) :=
mk_equivalence (homotopic A B) (htpy_refl A B) (htpy_symm A B) (htpy_trans A B)
|
Of this number , approximately 9600 weapons were serviceable , or ready @-@ for @-@ issue . Note there were only 1 @,@ 364 percussion weapons available . Disposition of the weapons found in the Arsenal is somewhat sketchy , but from various records it can be surmised that the 5th , 6th , 7th , and 8th Arkansas Infantry Regiments , mustered in June , 1861 , were issued <unk> / M1822 .69 caliber flintlocks . The 9th and 10th Arkansas , four companies of Kelly 's 9th Arkansas Battalion , and the 3rd Arkansas Cavalry Regiment were issued flintlock Hall 's Rifles . The units comprising the infantry force of Van Dorn 's Army of the West were the 1st and 2nd Arkansas Mounted Rifles were also armed with M1822 flintlocks from the Little Rock Arsenal . By the time the 11th and 12th Arkansas Infantry Regiments mustered in at Little Rock , the supply of arms had been almost completely exhausted , and only old " junker " weapons were left .
|
State Before: k : Type u_1
V : Type u_2
P : Type u_3
inst✝³ : Ring k
inst✝² : AddCommGroup V
inst✝¹ : Module k V
inst✝ : AffineSpace V P
ι : Type u_4
p : ι → P
h : AffineIndependent k p
w w₁ w₂ : ι → k
s : Finset ι
hw : ∑ i in s, w i = 0
hw₁ : ∑ i in s, w₁ i = 1
hw₂ : ∑ i in s, w₂ i = 1
⊢ ↑(Finset.weightedVSub s p) w ∈
vectorSpan k {↑(Finset.affineCombination k s p) w₁, ↑(Finset.affineCombination k s p) w₂} ↔
∃ r, ∀ (i : ι), i ∈ s → w i = r * (w₁ i - w₂ i) State After: k : Type u_1
V : Type u_2
P : Type u_3
inst✝³ : Ring k
inst✝² : AddCommGroup V
inst✝¹ : Module k V
inst✝ : AffineSpace V P
ι : Type u_4
p : ι → P
h : AffineIndependent k p
w w₁ w₂ : ι → k
s : Finset ι
hw : ∑ i in s, w i = 0
hw₁ : ∑ i in s, w₁ i = 1
hw₂ : ∑ i in s, w₂ i = 1
⊢ (∃ r,
r • (↑(Finset.affineCombination k s p) w₁ -ᵥ ↑(Finset.affineCombination k s p) w₂) =
↑(Finset.weightedVSub s p) w) ↔
∃ r, ∀ (i : ι), i ∈ s → w i = r * (w₁ i - w₂ i) Tactic: rw [mem_vectorSpan_pair] State Before: k : Type u_1
V : Type u_2
P : Type u_3
inst✝³ : Ring k
inst✝² : AddCommGroup V
inst✝¹ : Module k V
inst✝ : AffineSpace V P
ι : Type u_4
p : ι → P
h : AffineIndependent k p
w w₁ w₂ : ι → k
s : Finset ι
hw : ∑ i in s, w i = 0
hw₁ : ∑ i in s, w₁ i = 1
hw₂ : ∑ i in s, w₂ i = 1
⊢ (∃ r,
r • (↑(Finset.affineCombination k s p) w₁ -ᵥ ↑(Finset.affineCombination k s p) w₂) =
↑(Finset.weightedVSub s p) w) ↔
∃ r, ∀ (i : ι), i ∈ s → w i = r * (w₁ i - w₂ i) State After: case refine'_1
k : Type u_1
V : Type u_2
P : Type u_3
inst✝³ : Ring k
inst✝² : AddCommGroup V
inst✝¹ : Module k V
inst✝ : AffineSpace V P
ι : Type u_4
p : ι → P
h✝ : AffineIndependent k p
w w₁ w₂ : ι → k
s : Finset ι
hw : ∑ i in s, w i = 0
hw₁ : ∑ i in s, w₁ i = 1
hw₂ : ∑ i in s, w₂ i = 1
h :
∃ r, r • (↑(Finset.affineCombination k s p) w₁ -ᵥ ↑(Finset.affineCombination k s p) w₂) = ↑(Finset.weightedVSub s p) w
⊢ ∃ r, ∀ (i : ι), i ∈ s → w i = r * (w₁ i - w₂ i)
case refine'_2
k : Type u_1
V : Type u_2
P : Type u_3
inst✝³ : Ring k
inst✝² : AddCommGroup V
inst✝¹ : Module k V
inst✝ : AffineSpace V P
ι : Type u_4
p : ι → P
h✝ : AffineIndependent k p
w w₁ w₂ : ι → k
s : Finset ι
hw : ∑ i in s, w i = 0
hw₁ : ∑ i in s, w₁ i = 1
hw₂ : ∑ i in s, w₂ i = 1
h : ∃ r, ∀ (i : ι), i ∈ s → w i = r * (w₁ i - w₂ i)
⊢ ∃ r, r • (↑(Finset.affineCombination k s p) w₁ -ᵥ ↑(Finset.affineCombination k s p) w₂) = ↑(Finset.weightedVSub s p) w Tactic: refine' ⟨fun h => _, fun h => _⟩ State Before: case refine'_1
k : Type u_1
V : Type u_2
P : Type u_3
inst✝³ : Ring k
inst✝² : AddCommGroup V
inst✝¹ : Module k V
inst✝ : AffineSpace V P
ι : Type u_4
p : ι → P
h✝ : AffineIndependent k p
w w₁ w₂ : ι → k
s : Finset ι
hw : ∑ i in s, w i = 0
hw₁ : ∑ i in s, w₁ i = 1
hw₂ : ∑ i in s, w₂ i = 1
h :
∃ r, r • (↑(Finset.affineCombination k s p) w₁ -ᵥ ↑(Finset.affineCombination k s p) w₂) = ↑(Finset.weightedVSub s p) w
⊢ ∃ r, ∀ (i : ι), i ∈ s → w i = r * (w₁ i - w₂ i) State After: case refine'_1.intro
k : Type u_1
V : Type u_2
P : Type u_3
inst✝³ : Ring k
inst✝² : AddCommGroup V
inst✝¹ : Module k V
inst✝ : AffineSpace V P
ι : Type u_4
p : ι → P
h : AffineIndependent k p
w w₁ w₂ : ι → k
s : Finset ι
hw : ∑ i in s, w i = 0
hw₁ : ∑ i in s, w₁ i = 1
hw₂ : ∑ i in s, w₂ i = 1
r : k
hr : r • (↑(Finset.affineCombination k s p) w₁ -ᵥ ↑(Finset.affineCombination k s p) w₂) = ↑(Finset.weightedVSub s p) w
⊢ ∃ r, ∀ (i : ι), i ∈ s → w i = r * (w₁ i - w₂ i) Tactic: rcases h with ⟨r, hr⟩ State Before: case refine'_1.intro
k : Type u_1
V : Type u_2
P : Type u_3
inst✝³ : Ring k
inst✝² : AddCommGroup V
inst✝¹ : Module k V
inst✝ : AffineSpace V P
ι : Type u_4
p : ι → P
h : AffineIndependent k p
w w₁ w₂ : ι → k
s : Finset ι
hw : ∑ i in s, w i = 0
hw₁ : ∑ i in s, w₁ i = 1
hw₂ : ∑ i in s, w₂ i = 1
r : k
hr : r • (↑(Finset.affineCombination k s p) w₁ -ᵥ ↑(Finset.affineCombination k s p) w₂) = ↑(Finset.weightedVSub s p) w
⊢ ∃ r, ∀ (i : ι), i ∈ s → w i = r * (w₁ i - w₂ i) State After: case refine'_1.intro
k : Type u_1
V : Type u_2
P : Type u_3
inst✝³ : Ring k
inst✝² : AddCommGroup V
inst✝¹ : Module k V
inst✝ : AffineSpace V P
ι : Type u_4
p : ι → P
h : AffineIndependent k p
w w₁ w₂ : ι → k
s : Finset ι
hw : ∑ i in s, w i = 0
hw₁ : ∑ i in s, w₁ i = 1
hw₂ : ∑ i in s, w₂ i = 1
r : k
hr : r • (↑(Finset.affineCombination k s p) w₁ -ᵥ ↑(Finset.affineCombination k s p) w₂) = ↑(Finset.weightedVSub s p) w
i : ι
hi : i ∈ s
⊢ w i = r * (w₁ i - w₂ i) Tactic: refine' ⟨r, fun i hi => _⟩ State Before: case refine'_1.intro
k : Type u_1
V : Type u_2
P : Type u_3
inst✝³ : Ring k
inst✝² : AddCommGroup V
inst✝¹ : Module k V
inst✝ : AffineSpace V P
ι : Type u_4
p : ι → P
h : AffineIndependent k p
w w₁ w₂ : ι → k
s : Finset ι
hw : ∑ i in s, w i = 0
hw₁ : ∑ i in s, w₁ i = 1
hw₂ : ∑ i in s, w₂ i = 1
r : k
hr : r • (↑(Finset.affineCombination k s p) w₁ -ᵥ ↑(Finset.affineCombination k s p) w₂) = ↑(Finset.weightedVSub s p) w
i : ι
hi : i ∈ s
⊢ w i = r * (w₁ i - w₂ i) State After: case refine'_1.intro
k : Type u_1
V : Type u_2
P : Type u_3
inst✝³ : Ring k
inst✝² : AddCommGroup V
inst✝¹ : Module k V
inst✝ : AffineSpace V P
ι : Type u_4
p : ι → P
h : AffineIndependent k p
w w₁ w₂ : ι → k
s : Finset ι
hw : ∑ i in s, w i = 0
hw₁ : ∑ i in s, w₁ i = 1
hw₂ : ∑ i in s, w₂ i = 1
r : k
hr✝ : ↑(Finset.weightedVSub s p) (r • (w₁ - w₂)) = ↑(Finset.weightedVSub s p) w
hr : ↑(Finset.weightedVSub s p) (r • (w₁ - w₂) - w) = 0
i : ι
hi : i ∈ s
⊢ w i = r * (w₁ i - w₂ i) Tactic: rw [s.affineCombination_vsub, ← s.weightedVSub_const_smul, ← sub_eq_zero, ← map_sub] at hr State Before: case refine'_1.intro
k : Type u_1
V : Type u_2
P : Type u_3
inst✝³ : Ring k
inst✝² : AddCommGroup V
inst✝¹ : Module k V
inst✝ : AffineSpace V P
ι : Type u_4
p : ι → P
h : AffineIndependent k p
w w₁ w₂ : ι → k
s : Finset ι
hw : ∑ i in s, w i = 0
hw₁ : ∑ i in s, w₁ i = 1
hw₂ : ∑ i in s, w₂ i = 1
r : k
hr✝ : ↑(Finset.weightedVSub s p) (r • (w₁ - w₂)) = ↑(Finset.weightedVSub s p) w
hr : ↑(Finset.weightedVSub s p) (r • (w₁ - w₂) - w) = 0
i : ι
hi : i ∈ s
⊢ w i = r * (w₁ i - w₂ i) State After: case refine'_1.intro
k : Type u_1
V : Type u_2
P : Type u_3
inst✝³ : Ring k
inst✝² : AddCommGroup V
inst✝¹ : Module k V
inst✝ : AffineSpace V P
ι : Type u_4
p : ι → P
h : AffineIndependent k p
w w₁ w₂ : ι → k
s : Finset ι
hw : ∑ i in s, w i = 0
hw₁ : ∑ i in s, w₁ i = 1
hw₂ : ∑ i in s, w₂ i = 1
r : k
hr✝ : ↑(Finset.weightedVSub s p) (r • (w₁ - w₂)) = ↑(Finset.weightedVSub s p) w
hr : ↑(Finset.weightedVSub s p) (r • (w₁ - w₂) - w) = 0
i : ι
hi : i ∈ s
hw' : ∑ j in s, (r • (w₁ - w₂) - w) j = 0
⊢ w i = r * (w₁ i - w₂ i) Tactic: have hw' : (∑ j in s, (r • (w₁ - w₂) - w) j) = 0 := by
simp_rw [Pi.sub_apply, Pi.smul_apply, Pi.sub_apply, smul_sub, Finset.sum_sub_distrib, ←
Finset.smul_sum, hw, hw₁, hw₂, sub_self] State Before: case refine'_1.intro
k : Type u_1
V : Type u_2
P : Type u_3
inst✝³ : Ring k
inst✝² : AddCommGroup V
inst✝¹ : Module k V
inst✝ : AffineSpace V P
ι : Type u_4
p : ι → P
h : AffineIndependent k p
w w₁ w₂ : ι → k
s : Finset ι
hw : ∑ i in s, w i = 0
hw₁ : ∑ i in s, w₁ i = 1
hw₂ : ∑ i in s, w₂ i = 1
r : k
hr✝ : ↑(Finset.weightedVSub s p) (r • (w₁ - w₂)) = ↑(Finset.weightedVSub s p) w
hr : ↑(Finset.weightedVSub s p) (r • (w₁ - w₂) - w) = 0
i : ι
hi : i ∈ s
hw' : ∑ j in s, (r • (w₁ - w₂) - w) j = 0
⊢ w i = r * (w₁ i - w₂ i) State After: case refine'_1.intro
k : Type u_1
V : Type u_2
P : Type u_3
inst✝³ : Ring k
inst✝² : AddCommGroup V
inst✝¹ : Module k V
inst✝ : AffineSpace V P
ι : Type u_4
p : ι → P
h : AffineIndependent k p
w w₁ w₂ : ι → k
s : Finset ι
hw : ∑ i in s, w i = 0
hw₁ : ∑ i in s, w₁ i = 1
hw₂ : ∑ i in s, w₂ i = 1
r : k
hr✝ : ↑(Finset.weightedVSub s p) (r • (w₁ - w₂)) = ↑(Finset.weightedVSub s p) w
hr : ↑(Finset.weightedVSub s p) (r • (w₁ - w₂) - w) = 0
i : ι
hi : i ∈ s
hw' : ∑ j in s, (r • (w₁ - w₂) - w) j = 0
hr' : (r • (w₁ - w₂) - w) i = 0
⊢ w i = r * (w₁ i - w₂ i) Tactic: have hr' := h s _ hw' hr i hi State Before: case refine'_1.intro
k : Type u_1
V : Type u_2
P : Type u_3
inst✝³ : Ring k
inst✝² : AddCommGroup V
inst✝¹ : Module k V
inst✝ : AffineSpace V P
ι : Type u_4
p : ι → P
h : AffineIndependent k p
w w₁ w₂ : ι → k
s : Finset ι
hw : ∑ i in s, w i = 0
hw₁ : ∑ i in s, w₁ i = 1
hw₂ : ∑ i in s, w₂ i = 1
r : k
hr✝ : ↑(Finset.weightedVSub s p) (r • (w₁ - w₂)) = ↑(Finset.weightedVSub s p) w
hr : ↑(Finset.weightedVSub s p) (r • (w₁ - w₂) - w) = 0
i : ι
hi : i ∈ s
hw' : ∑ j in s, (r • (w₁ - w₂) - w) j = 0
hr' : (r • (w₁ - w₂) - w) i = 0
⊢ w i = r * (w₁ i - w₂ i) State After: case refine'_1.intro
k : Type u_1
V : Type u_2
P : Type u_3
inst✝³ : Ring k
inst✝² : AddCommGroup V
inst✝¹ : Module k V
inst✝ : AffineSpace V P
ι : Type u_4
p : ι → P
h : AffineIndependent k p
w w₁ w₂ : ι → k
s : Finset ι
hw : ∑ i in s, w i = 0
hw₁ : ∑ i in s, w₁ i = 1
hw₂ : ∑ i in s, w₂ i = 1
r : k
hr✝ : ↑(Finset.weightedVSub s p) (r • (w₁ - w₂)) = ↑(Finset.weightedVSub s p) w
hr : ↑(Finset.weightedVSub s p) (r • (w₁ - w₂) - w) = 0
i : ι
hi : i ∈ s
hw' : ∑ j in s, (r • (w₁ - w₂) - w) j = 0
hr' : (r • (w₁ - w₂) - w) i = 0
⊢ r • (w₁ i - w₂ i) - w i = 0 Tactic: rw [eq_comm, ← sub_eq_zero, ← smul_eq_mul] State Before: case refine'_1.intro
k : Type u_1
V : Type u_2
P : Type u_3
inst✝³ : Ring k
inst✝² : AddCommGroup V
inst✝¹ : Module k V
inst✝ : AffineSpace V P
ι : Type u_4
p : ι → P
h : AffineIndependent k p
w w₁ w₂ : ι → k
s : Finset ι
hw : ∑ i in s, w i = 0
hw₁ : ∑ i in s, w₁ i = 1
hw₂ : ∑ i in s, w₂ i = 1
r : k
hr✝ : ↑(Finset.weightedVSub s p) (r • (w₁ - w₂)) = ↑(Finset.weightedVSub s p) w
hr : ↑(Finset.weightedVSub s p) (r • (w₁ - w₂) - w) = 0
i : ι
hi : i ∈ s
hw' : ∑ j in s, (r • (w₁ - w₂) - w) j = 0
hr' : (r • (w₁ - w₂) - w) i = 0
⊢ r • (w₁ i - w₂ i) - w i = 0 State After: no goals Tactic: exact hr' State Before: k : Type u_1
V : Type u_2
P : Type u_3
inst✝³ : Ring k
inst✝² : AddCommGroup V
inst✝¹ : Module k V
inst✝ : AffineSpace V P
ι : Type u_4
p : ι → P
h : AffineIndependent k p
w w₁ w₂ : ι → k
s : Finset ι
hw : ∑ i in s, w i = 0
hw₁ : ∑ i in s, w₁ i = 1
hw₂ : ∑ i in s, w₂ i = 1
r : k
hr✝ : ↑(Finset.weightedVSub s p) (r • (w₁ - w₂)) = ↑(Finset.weightedVSub s p) w
hr : ↑(Finset.weightedVSub s p) (r • (w₁ - w₂) - w) = 0
i : ι
hi : i ∈ s
⊢ ∑ j in s, (r • (w₁ - w₂) - w) j = 0 State After: no goals Tactic: simp_rw [Pi.sub_apply, Pi.smul_apply, Pi.sub_apply, smul_sub, Finset.sum_sub_distrib, ←
Finset.smul_sum, hw, hw₁, hw₂, sub_self] State Before: case refine'_2
k : Type u_1
V : Type u_2
P : Type u_3
inst✝³ : Ring k
inst✝² : AddCommGroup V
inst✝¹ : Module k V
inst✝ : AffineSpace V P
ι : Type u_4
p : ι → P
h✝ : AffineIndependent k p
w w₁ w₂ : ι → k
s : Finset ι
hw : ∑ i in s, w i = 0
hw₁ : ∑ i in s, w₁ i = 1
hw₂ : ∑ i in s, w₂ i = 1
h : ∃ r, ∀ (i : ι), i ∈ s → w i = r * (w₁ i - w₂ i)
⊢ ∃ r, r • (↑(Finset.affineCombination k s p) w₁ -ᵥ ↑(Finset.affineCombination k s p) w₂) = ↑(Finset.weightedVSub s p) w State After: case refine'_2.intro
k : Type u_1
V : Type u_2
P : Type u_3
inst✝³ : Ring k
inst✝² : AddCommGroup V
inst✝¹ : Module k V
inst✝ : AffineSpace V P
ι : Type u_4
p : ι → P
h : AffineIndependent k p
w w₁ w₂ : ι → k
s : Finset ι
hw : ∑ i in s, w i = 0
hw₁ : ∑ i in s, w₁ i = 1
hw₂ : ∑ i in s, w₂ i = 1
r : k
hr : ∀ (i : ι), i ∈ s → w i = r * (w₁ i - w₂ i)
⊢ ∃ r, r • (↑(Finset.affineCombination k s p) w₁ -ᵥ ↑(Finset.affineCombination k s p) w₂) = ↑(Finset.weightedVSub s p) w Tactic: rcases h with ⟨r, hr⟩ State Before: case refine'_2.intro
k : Type u_1
V : Type u_2
P : Type u_3
inst✝³ : Ring k
inst✝² : AddCommGroup V
inst✝¹ : Module k V
inst✝ : AffineSpace V P
ι : Type u_4
p : ι → P
h : AffineIndependent k p
w w₁ w₂ : ι → k
s : Finset ι
hw : ∑ i in s, w i = 0
hw₁ : ∑ i in s, w₁ i = 1
hw₂ : ∑ i in s, w₂ i = 1
r : k
hr : ∀ (i : ι), i ∈ s → w i = r * (w₁ i - w₂ i)
⊢ ∃ r, r • (↑(Finset.affineCombination k s p) w₁ -ᵥ ↑(Finset.affineCombination k s p) w₂) = ↑(Finset.weightedVSub s p) w State After: case refine'_2.intro
k : Type u_1
V : Type u_2
P : Type u_3
inst✝³ : Ring k
inst✝² : AddCommGroup V
inst✝¹ : Module k V
inst✝ : AffineSpace V P
ι : Type u_4
p : ι → P
h : AffineIndependent k p
w w₁ w₂ : ι → k
s : Finset ι
hw : ∑ i in s, w i = 0
hw₁ : ∑ i in s, w₁ i = 1
hw₂ : ∑ i in s, w₂ i = 1
r : k
hr : ∀ (i : ι), i ∈ s → w i = r * (w₁ i - w₂ i)
⊢ r • (↑(Finset.affineCombination k s p) w₁ -ᵥ ↑(Finset.affineCombination k s p) w₂) = ↑(Finset.weightedVSub s p) w Tactic: refine' ⟨r, _⟩ State Before: case refine'_2.intro
k : Type u_1
V : Type u_2
P : Type u_3
inst✝³ : Ring k
inst✝² : AddCommGroup V
inst✝¹ : Module k V
inst✝ : AffineSpace V P
ι : Type u_4
p : ι → P
h : AffineIndependent k p
w w₁ w₂ : ι → k
s : Finset ι
hw : ∑ i in s, w i = 0
hw₁ : ∑ i in s, w₁ i = 1
hw₂ : ∑ i in s, w₂ i = 1
r : k
hr : ∀ (i : ι), i ∈ s → w i = r * (w₁ i - w₂ i)
⊢ r • (↑(Finset.affineCombination k s p) w₁ -ᵥ ↑(Finset.affineCombination k s p) w₂) = ↑(Finset.weightedVSub s p) w State After: case refine'_2.intro
k : Type u_1
V : Type u_2
P : Type u_3
inst✝³ : Ring k
inst✝² : AddCommGroup V
inst✝¹ : Module k V
inst✝ : AffineSpace V P
ι : Type u_4
p : ι → P
h : AffineIndependent k p
w w₁ w₂ : ι → k
s : Finset ι
hw : ∑ i in s, w i = 0
hw₁ : ∑ i in s, w₁ i = 1
hw₂ : ∑ i in s, w₂ i = 1
r : k
hr : ∀ (i : ι), i ∈ s → w i = r * (w₁ i - w₂ i)
w' : ι → k := fun i => r * (w₁ i - w₂ i)
⊢ r • (↑(Finset.affineCombination k s p) w₁ -ᵥ ↑(Finset.affineCombination k s p) w₂) = ↑(Finset.weightedVSub s p) w Tactic: let w' i := r * (w₁ i - w₂ i) State Before: case refine'_2.intro
k : Type u_1
V : Type u_2
P : Type u_3
inst✝³ : Ring k
inst✝² : AddCommGroup V
inst✝¹ : Module k V
inst✝ : AffineSpace V P
ι : Type u_4
p : ι → P
h : AffineIndependent k p
w w₁ w₂ : ι → k
s : Finset ι
hw : ∑ i in s, w i = 0
hw₁ : ∑ i in s, w₁ i = 1
hw₂ : ∑ i in s, w₂ i = 1
r : k
hr : ∀ (i : ι), i ∈ s → w i = r * (w₁ i - w₂ i)
w' : ι → k := fun i => r * (w₁ i - w₂ i)
⊢ r • (↑(Finset.affineCombination k s p) w₁ -ᵥ ↑(Finset.affineCombination k s p) w₂) = ↑(Finset.weightedVSub s p) w State After: case refine'_2.intro
k : Type u_1
V : Type u_2
P : Type u_3
inst✝³ : Ring k
inst✝² : AddCommGroup V
inst✝¹ : Module k V
inst✝ : AffineSpace V P
ι : Type u_4
p : ι → P
h : AffineIndependent k p
w w₁ w₂ : ι → k
s : Finset ι
hw : ∑ i in s, w i = 0
hw₁ : ∑ i in s, w₁ i = 1
hw₂ : ∑ i in s, w₂ i = 1
r : k
w' : ι → k := fun i => r * (w₁ i - w₂ i)
hr : ∀ (i : ι), i ∈ s → w i = w' i
⊢ r • (↑(Finset.affineCombination k s p) w₁ -ᵥ ↑(Finset.affineCombination k s p) w₂) = ↑(Finset.weightedVSub s p) w Tactic: change ∀ i ∈ s, w i = w' i at hr State Before: case refine'_2.intro
k : Type u_1
V : Type u_2
P : Type u_3
inst✝³ : Ring k
inst✝² : AddCommGroup V
inst✝¹ : Module k V
inst✝ : AffineSpace V P
ι : Type u_4
p : ι → P
h : AffineIndependent k p
w w₁ w₂ : ι → k
s : Finset ι
hw : ∑ i in s, w i = 0
hw₁ : ∑ i in s, w₁ i = 1
hw₂ : ∑ i in s, w₂ i = 1
r : k
w' : ι → k := fun i => r * (w₁ i - w₂ i)
hr : ∀ (i : ι), i ∈ s → w i = w' i
⊢ r • (↑(Finset.affineCombination k s p) w₁ -ᵥ ↑(Finset.affineCombination k s p) w₂) = ↑(Finset.weightedVSub s p) w State After: case refine'_2.intro
k : Type u_1
V : Type u_2
P : Type u_3
inst✝³ : Ring k
inst✝² : AddCommGroup V
inst✝¹ : Module k V
inst✝ : AffineSpace V P
ι : Type u_4
p : ι → P
h : AffineIndependent k p
w w₁ w₂ : ι → k
s : Finset ι
hw : ∑ i in s, w i = 0
hw₁ : ∑ i in s, w₁ i = 1
hw₂ : ∑ i in s, w₂ i = 1
r : k
w' : ι → k := fun i => r * (w₁ i - w₂ i)
hr : ∀ (i : ι), i ∈ s → w i = w' i
⊢ ↑(Finset.weightedVSub s p) (r • (w₁ - w₂)) = ↑(Finset.weightedVSub s fun x => p x) fun i => w' i Tactic: rw [s.weightedVSub_congr hr fun _ _ => rfl, s.affineCombination_vsub, ←
s.weightedVSub_const_smul] State Before: case refine'_2.intro
k : Type u_1
V : Type u_2
P : Type u_3
inst✝³ : Ring k
inst✝² : AddCommGroup V
inst✝¹ : Module k V
inst✝ : AffineSpace V P
ι : Type u_4
p : ι → P
h : AffineIndependent k p
w w₁ w₂ : ι → k
s : Finset ι
hw : ∑ i in s, w i = 0
hw₁ : ∑ i in s, w₁ i = 1
hw₂ : ∑ i in s, w₂ i = 1
r : k
w' : ι → k := fun i => r * (w₁ i - w₂ i)
hr : ∀ (i : ι), i ∈ s → w i = w' i
⊢ ↑(Finset.weightedVSub s p) (r • (w₁ - w₂)) = ↑(Finset.weightedVSub s fun x => p x) fun i => w' i State After: no goals Tactic: congr |
Formal statement is: lemma cohomotopically_trivial_retraction_gen: assumes P: "\<And>f. \<lbrakk>continuous_on t f; f ` t \<subseteq> U; Q f\<rbrakk> \<Longrightarrow> P(f \<circ> h)" and Q: "\<And>f. \<lbrakk>continuous_on s f; f ` s \<subseteq> U; P f\<rbrakk> \<Longrightarrow> Q(f \<circ> k)" and Qeq: "\<And>h k. (\<And>x. x \<in> t \<Longrightarrow> h x = k x) \<Longrightarrow> Q h = Q k" and hom: "\<And>f g. \<lbrakk>continuous_on s f; f ` s \<subseteq> U; P f; continuous_on s g; g ` s \<subseteq> U; P g\<rbrakk> \<Longrightarrow> homotopic_with_canon P s U f g" and contf: "continuous_on t f" and imf: "f ` t \<subseteq> U" and Qf: "Q f" and contg: "continuous_on t g" and img: "g ` t \<subseteq> U" and Qg: "Q g" shows "homotopic_with_canon Q t U f g" Informal statement is: If $f$ and $g$ are continuous maps from $t$ to $U$ such that $f$ and $g$ are homotopic in $U$ and $f$ and $g$ are homotopic in $U$, then $f$ and $g$ are homotopic in $U$. |
State Before: α : Type u
β : Type v
γ : Type w
s✝ s₁ s₂ : Finset α
a✝ : α
b : β
f✝ g : α → β
inst✝¹ : NonAssocSemiring β
inst✝ : DecidableEq α
s : Finset α
f : α → β
a : α
⊢ ∑ x in s, (if a = x then 1 else 0) * f x = if a ∈ s then f a else 0 State After: no goals Tactic: simp |
(* Title: HOL/Auth/n_german_lemma_on_inv__21.thy
Author: Yongjian Li and Kaiqiang Duan, State Key Lab of Computer Science, Institute of Software, Chinese Academy of Sciences
Copyright 2016 State Key Lab of Computer Science, Institute of Software, Chinese Academy of Sciences
*)
header{*The n_german Protocol Case Study*}
theory n_german_lemma_on_inv__21 imports n_german_base
begin
section{*All lemmas on causal relation between inv__21 and some rule r*}
lemma n_RecvReqSVsinv__21:
assumes a1: "(\<exists> i. i\<le>N\<and>r=n_RecvReqS N i)" and
a2: "(\<exists> p__Inv2. p__Inv2\<le>N\<and>f=inv__21 p__Inv2)"
shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s")
proof -
from a1 obtain i where a1:"i\<le>N\<and>r=n_RecvReqS N i" apply fastforce done
from a2 obtain p__Inv2 where a2:"p__Inv2\<le>N\<and>f=inv__21 p__Inv2" apply fastforce done
have "(i=p__Inv2)\<or>(i~=p__Inv2)" apply (cut_tac a1 a2, auto) done
moreover {
assume b1: "(i=p__Inv2)"
have "?P3 s"
apply (cut_tac a1 a2 b1, simp, rule_tac x="(neg (andForm (andForm (andForm (andForm (eqn (IVar (Ident ''ExGntd'')) (Const true)) (neg (eqn (IVar (Field (Para (Ident ''Cache'') p__Inv2) ''State'')) (Const E)))) (eqn (IVar (Field (Para (Ident ''Chan2'') p__Inv2) ''Cmd'')) (Const Empty))) (eqn (IVar (Para (Ident ''ShrSet'') p__Inv2)) (Const true))) (eqn (IVar (Ident ''CurCmd'')) (Const Empty))))" in exI, auto) done
then have "invHoldForRule s f r (invariants N)" by auto
}
moreover {
assume b1: "(i~=p__Inv2)"
have "?P3 s"
apply (cut_tac a1 a2 b1, simp, rule_tac x="(neg (andForm (andForm (andForm (andForm (eqn (IVar (Ident ''ExGntd'')) (Const true)) (neg (eqn (IVar (Field (Para (Ident ''Cache'') p__Inv2) ''State'')) (Const E)))) (eqn (IVar (Field (Para (Ident ''Chan2'') p__Inv2) ''Cmd'')) (Const Empty))) (eqn (IVar (Para (Ident ''ShrSet'') p__Inv2)) (Const true))) (eqn (IVar (Ident ''CurCmd'')) (Const Empty))))" in exI, auto) done
then have "invHoldForRule s f r (invariants N)" by auto
}
ultimately show "invHoldForRule s f r (invariants N)" by satx
qed
lemma n_RecvReqEVsinv__21:
assumes a1: "(\<exists> i. i\<le>N\<and>r=n_RecvReqE N i)" and
a2: "(\<exists> p__Inv2. p__Inv2\<le>N\<and>f=inv__21 p__Inv2)"
shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s")
proof -
from a1 obtain i where a1:"i\<le>N\<and>r=n_RecvReqE N i" apply fastforce done
from a2 obtain p__Inv2 where a2:"p__Inv2\<le>N\<and>f=inv__21 p__Inv2" apply fastforce done
have "(i=p__Inv2)\<or>(i~=p__Inv2)" apply (cut_tac a1 a2, auto) done
moreover {
assume b1: "(i=p__Inv2)"
have "?P3 s"
apply (cut_tac a1 a2 b1, simp, rule_tac x="(neg (andForm (andForm (andForm (andForm (eqn (IVar (Ident ''ExGntd'')) (Const true)) (neg (eqn (IVar (Field (Para (Ident ''Cache'') p__Inv2) ''State'')) (Const E)))) (eqn (IVar (Field (Para (Ident ''Chan2'') p__Inv2) ''Cmd'')) (Const Empty))) (eqn (IVar (Para (Ident ''ShrSet'') p__Inv2)) (Const true))) (eqn (IVar (Ident ''CurCmd'')) (Const Empty))))" in exI, auto) done
then have "invHoldForRule s f r (invariants N)" by auto
}
moreover {
assume b1: "(i~=p__Inv2)"
have "?P3 s"
apply (cut_tac a1 a2 b1, simp, rule_tac x="(neg (andForm (andForm (andForm (andForm (eqn (IVar (Ident ''ExGntd'')) (Const true)) (neg (eqn (IVar (Field (Para (Ident ''Cache'') p__Inv2) ''State'')) (Const E)))) (eqn (IVar (Field (Para (Ident ''Chan2'') p__Inv2) ''Cmd'')) (Const Empty))) (eqn (IVar (Para (Ident ''ShrSet'') p__Inv2)) (Const true))) (eqn (IVar (Ident ''CurCmd'')) (Const Empty))))" in exI, auto) done
then have "invHoldForRule s f r (invariants N)" by auto
}
ultimately show "invHoldForRule s f r (invariants N)" by satx
qed
lemma n_SendInv__part__0Vsinv__21:
assumes a1: "(\<exists> i. i\<le>N\<and>r=n_SendInv__part__0 i)" and
a2: "(\<exists> p__Inv2. p__Inv2\<le>N\<and>f=inv__21 p__Inv2)"
shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s")
proof -
from a1 obtain i where a1:"i\<le>N\<and>r=n_SendInv__part__0 i" apply fastforce done
from a2 obtain p__Inv2 where a2:"p__Inv2\<le>N\<and>f=inv__21 p__Inv2" apply fastforce done
have "(i=p__Inv2)\<or>(i~=p__Inv2)" apply (cut_tac a1 a2, auto) done
moreover {
assume b1: "(i=p__Inv2)"
have "?P1 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
moreover {
assume b1: "(i~=p__Inv2)"
have "?P2 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
ultimately show "invHoldForRule s f r (invariants N)" by satx
qed
lemma n_SendInv__part__1Vsinv__21:
assumes a1: "(\<exists> i. i\<le>N\<and>r=n_SendInv__part__1 i)" and
a2: "(\<exists> p__Inv2. p__Inv2\<le>N\<and>f=inv__21 p__Inv2)"
shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s")
proof -
from a1 obtain i where a1:"i\<le>N\<and>r=n_SendInv__part__1 i" apply fastforce done
from a2 obtain p__Inv2 where a2:"p__Inv2\<le>N\<and>f=inv__21 p__Inv2" apply fastforce done
have "(i=p__Inv2)\<or>(i~=p__Inv2)" apply (cut_tac a1 a2, auto) done
moreover {
assume b1: "(i=p__Inv2)"
have "?P1 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
moreover {
assume b1: "(i~=p__Inv2)"
have "?P2 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
ultimately show "invHoldForRule s f r (invariants N)" by satx
qed
lemma n_SendInvAckVsinv__21:
assumes a1: "(\<exists> i. i\<le>N\<and>r=n_SendInvAck i)" and
a2: "(\<exists> p__Inv2. p__Inv2\<le>N\<and>f=inv__21 p__Inv2)"
shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s")
proof -
from a1 obtain i where a1:"i\<le>N\<and>r=n_SendInvAck i" apply fastforce done
from a2 obtain p__Inv2 where a2:"p__Inv2\<le>N\<and>f=inv__21 p__Inv2" apply fastforce done
have "(i=p__Inv2)\<or>(i~=p__Inv2)" apply (cut_tac a1 a2, auto) done
moreover {
assume b1: "(i=p__Inv2)"
have "?P3 s"
apply (cut_tac a1 a2 b1, simp, rule_tac x="(neg (andForm (eqn (IVar (Para (Ident ''InvSet'') p__Inv2)) (Const true)) (eqn (IVar (Field (Para (Ident ''Chan2'') p__Inv2) ''Cmd'')) (Const Inv))))" in exI, auto) done
then have "invHoldForRule s f r (invariants N)" by auto
}
moreover {
assume b1: "(i~=p__Inv2)"
have "?P2 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
ultimately show "invHoldForRule s f r (invariants N)" by satx
qed
lemma n_RecvInvAckVsinv__21:
assumes a1: "(\<exists> i. i\<le>N\<and>r=n_RecvInvAck i)" and
a2: "(\<exists> p__Inv2. p__Inv2\<le>N\<and>f=inv__21 p__Inv2)"
shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s")
proof -
from a1 obtain i where a1:"i\<le>N\<and>r=n_RecvInvAck i" apply fastforce done
from a2 obtain p__Inv2 where a2:"p__Inv2\<le>N\<and>f=inv__21 p__Inv2" apply fastforce done
have "(i=p__Inv2)\<or>(i~=p__Inv2)" apply (cut_tac a1 a2, auto) done
moreover {
assume b1: "(i=p__Inv2)"
have "((formEval (eqn (IVar (Ident ''ExGntd'')) (Const true)) s))\<or>((formEval (neg (eqn (IVar (Ident ''ExGntd'')) (Const true))) s))" by auto
moreover {
assume c1: "((formEval (eqn (IVar (Ident ''ExGntd'')) (Const true)) s))"
have "?P1 s"
proof(cut_tac a1 a2 b1 c1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
moreover {
assume c1: "((formEval (neg (eqn (IVar (Ident ''ExGntd'')) (Const true))) s))"
have "?P2 s"
proof(cut_tac a1 a2 b1 c1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
ultimately have "invHoldForRule s f r (invariants N)" by satx
}
moreover {
assume b1: "(i~=p__Inv2)"
have "((formEval (eqn (IVar (Ident ''ExGntd'')) (Const true)) s))\<or>((formEval (neg (eqn (IVar (Ident ''ExGntd'')) (Const true))) s))" by auto
moreover {
assume c1: "((formEval (eqn (IVar (Ident ''ExGntd'')) (Const true)) s))"
have "?P1 s"
proof(cut_tac a1 a2 b1 c1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
moreover {
assume c1: "((formEval (neg (eqn (IVar (Ident ''ExGntd'')) (Const true))) s))"
have "?P2 s"
proof(cut_tac a1 a2 b1 c1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
ultimately have "invHoldForRule s f r (invariants N)" by satx
}
ultimately show "invHoldForRule s f r (invariants N)" by satx
qed
lemma n_SendGntSVsinv__21:
assumes a1: "(\<exists> i. i\<le>N\<and>r=n_SendGntS i)" and
a2: "(\<exists> p__Inv2. p__Inv2\<le>N\<and>f=inv__21 p__Inv2)"
shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s")
proof -
from a1 obtain i where a1:"i\<le>N\<and>r=n_SendGntS i" apply fastforce done
from a2 obtain p__Inv2 where a2:"p__Inv2\<le>N\<and>f=inv__21 p__Inv2" apply fastforce done
have "(i=p__Inv2)\<or>(i~=p__Inv2)" apply (cut_tac a1 a2, auto) done
moreover {
assume b1: "(i=p__Inv2)"
have "?P1 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
moreover {
assume b1: "(i~=p__Inv2)"
have "?P2 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
ultimately show "invHoldForRule s f r (invariants N)" by satx
qed
lemma n_SendGntEVsinv__21:
assumes a1: "(\<exists> i. i\<le>N\<and>r=n_SendGntE N i)" and
a2: "(\<exists> p__Inv2. p__Inv2\<le>N\<and>f=inv__21 p__Inv2)"
shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s")
proof -
from a1 obtain i where a1:"i\<le>N\<and>r=n_SendGntE N i" apply fastforce done
from a2 obtain p__Inv2 where a2:"p__Inv2\<le>N\<and>f=inv__21 p__Inv2" apply fastforce done
have "(i=p__Inv2)\<or>(i~=p__Inv2)" apply (cut_tac a1 a2, auto) done
moreover {
assume b1: "(i=p__Inv2)"
have "?P1 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
moreover {
assume b1: "(i~=p__Inv2)"
have "?P3 s"
apply (cut_tac a1 a2 b1, simp, rule_tac x="(neg (andForm (eqn (IVar (Para (Ident ''InvSet'') p__Inv2)) (Const true)) (eqn (IVar (Para (Ident ''ShrSet'') p__Inv2)) (Const false))))" in exI, auto) done
then have "invHoldForRule s f r (invariants N)" by auto
}
ultimately show "invHoldForRule s f r (invariants N)" by satx
qed
lemma n_RecvGntSVsinv__21:
assumes a1: "(\<exists> i. i\<le>N\<and>r=n_RecvGntS i)" and
a2: "(\<exists> p__Inv2. p__Inv2\<le>N\<and>f=inv__21 p__Inv2)"
shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s")
proof -
from a1 obtain i where a1:"i\<le>N\<and>r=n_RecvGntS i" apply fastforce done
from a2 obtain p__Inv2 where a2:"p__Inv2\<le>N\<and>f=inv__21 p__Inv2" apply fastforce done
have "(i=p__Inv2)\<or>(i~=p__Inv2)" apply (cut_tac a1 a2, auto) done
moreover {
assume b1: "(i=p__Inv2)"
have "?P3 s"
apply (cut_tac a1 a2 b1, simp, rule_tac x="(neg (andForm (eqn (IVar (Ident ''ExGntd'')) (Const true)) (eqn (IVar (Field (Para (Ident ''Chan2'') p__Inv2) ''Cmd'')) (Const GntS))))" in exI, auto) done
then have "invHoldForRule s f r (invariants N)" by auto
}
moreover {
assume b1: "(i~=p__Inv2)"
have "?P2 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
ultimately show "invHoldForRule s f r (invariants N)" by satx
qed
lemma n_RecvGntEVsinv__21:
assumes a1: "(\<exists> i. i\<le>N\<and>r=n_RecvGntE i)" and
a2: "(\<exists> p__Inv2. p__Inv2\<le>N\<and>f=inv__21 p__Inv2)"
shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s")
proof -
from a1 obtain i where a1:"i\<le>N\<and>r=n_RecvGntE i" apply fastforce done
from a2 obtain p__Inv2 where a2:"p__Inv2\<le>N\<and>f=inv__21 p__Inv2" apply fastforce done
have "(i=p__Inv2)\<or>(i~=p__Inv2)" apply (cut_tac a1 a2, auto) done
moreover {
assume b1: "(i=p__Inv2)"
have "?P1 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
moreover {
assume b1: "(i~=p__Inv2)"
have "?P2 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
ultimately show "invHoldForRule s f r (invariants N)" by satx
qed
lemma n_SendReqE__part__1Vsinv__21:
assumes a1: "\<exists> i. i\<le>N\<and>r=n_SendReqE__part__1 i" and
a2: "(\<exists> p__Inv2. p__Inv2\<le>N\<and>f=inv__21 p__Inv2)"
shows "invHoldForRule s f r (invariants N)"
apply (rule noEffectOnRule, cut_tac a1 a2, auto) done
lemma n_StoreVsinv__21:
assumes a1: "\<exists> i d. i\<le>N\<and>d\<le>N\<and>r=n_Store i d" and
a2: "(\<exists> p__Inv2. p__Inv2\<le>N\<and>f=inv__21 p__Inv2)"
shows "invHoldForRule s f r (invariants N)"
apply (rule noEffectOnRule, cut_tac a1 a2, auto) done
lemma n_SendReqE__part__0Vsinv__21:
assumes a1: "\<exists> i. i\<le>N\<and>r=n_SendReqE__part__0 i" and
a2: "(\<exists> p__Inv2. p__Inv2\<le>N\<and>f=inv__21 p__Inv2)"
shows "invHoldForRule s f r (invariants N)"
apply (rule noEffectOnRule, cut_tac a1 a2, auto) done
lemma n_SendReqSVsinv__21:
assumes a1: "\<exists> i. i\<le>N\<and>r=n_SendReqS i" and
a2: "(\<exists> p__Inv2. p__Inv2\<le>N\<and>f=inv__21 p__Inv2)"
shows "invHoldForRule s f r (invariants N)"
apply (rule noEffectOnRule, cut_tac a1 a2, auto) done
end
|
(* Title: HOL/Auth/n_germanSimp_lemma_inv__60_on_rules.thy
Author: Yongjian Li and Kaiqiang Duan, State Key Lab of Computer Science, Institute of Software, Chinese Academy of Sciences
Copyright 2016 State Key Lab of Computer Science, Institute of Software, Chinese Academy of Sciences
*)
header{*The n_germanSimp Protocol Case Study*}
theory n_germanSimp_lemma_inv__60_on_rules imports n_germanSimp_lemma_on_inv__60
begin
section{*All lemmas on causal relation between inv__60*}
lemma lemma_inv__60_on_rules:
assumes b1: "r \<in> rules N" and b2: "(\<exists> p__Inv3 p__Inv4. p__Inv3\<le>N\<and>p__Inv4\<le>N\<and>p__Inv3~=p__Inv4\<and>f=inv__60 p__Inv3 p__Inv4)"
shows "invHoldForRule s f r (invariants N)"
proof -
have c1: "(\<exists> i d. i\<le>N\<and>d\<le>N\<and>r=n_Store i d)\<or>
(\<exists> i. i\<le>N\<and>r=n_RecvReqS N i)\<or>
(\<exists> i. i\<le>N\<and>r=n_RecvReqE__part__0 N i)\<or>
(\<exists> i. i\<le>N\<and>r=n_RecvReqE__part__1 N i)\<or>
(\<exists> i. i\<le>N\<and>r=n_SendInv__part__0 i)\<or>
(\<exists> i. i\<le>N\<and>r=n_SendInv__part__1 i)\<or>
(\<exists> i. i\<le>N\<and>r=n_SendInvAck i)\<or>
(\<exists> i. i\<le>N\<and>r=n_RecvInvAck i)\<or>
(\<exists> i. i\<le>N\<and>r=n_SendGntS i)\<or>
(\<exists> i. i\<le>N\<and>r=n_SendGntE N i)\<or>
(\<exists> i. i\<le>N\<and>r=n_RecvGntS i)\<or>
(\<exists> i. i\<le>N\<and>r=n_RecvGntE i)"
apply (cut_tac b1, auto) done
moreover {
assume d1: "(\<exists> i d. i\<le>N\<and>d\<le>N\<and>r=n_Store i d)"
have "invHoldForRule s f r (invariants N)"
apply (cut_tac b2 d1, metis n_StoreVsinv__60) done
}
moreover {
assume d1: "(\<exists> i. i\<le>N\<and>r=n_RecvReqS N i)"
have "invHoldForRule s f r (invariants N)"
apply (cut_tac b2 d1, metis n_RecvReqSVsinv__60) done
}
moreover {
assume d1: "(\<exists> i. i\<le>N\<and>r=n_RecvReqE__part__0 N i)"
have "invHoldForRule s f r (invariants N)"
apply (cut_tac b2 d1, metis n_RecvReqE__part__0Vsinv__60) done
}
moreover {
assume d1: "(\<exists> i. i\<le>N\<and>r=n_RecvReqE__part__1 N i)"
have "invHoldForRule s f r (invariants N)"
apply (cut_tac b2 d1, metis n_RecvReqE__part__1Vsinv__60) done
}
moreover {
assume d1: "(\<exists> i. i\<le>N\<and>r=n_SendInv__part__0 i)"
have "invHoldForRule s f r (invariants N)"
apply (cut_tac b2 d1, metis n_SendInv__part__0Vsinv__60) done
}
moreover {
assume d1: "(\<exists> i. i\<le>N\<and>r=n_SendInv__part__1 i)"
have "invHoldForRule s f r (invariants N)"
apply (cut_tac b2 d1, metis n_SendInv__part__1Vsinv__60) done
}
moreover {
assume d1: "(\<exists> i. i\<le>N\<and>r=n_SendInvAck i)"
have "invHoldForRule s f r (invariants N)"
apply (cut_tac b2 d1, metis n_SendInvAckVsinv__60) done
}
moreover {
assume d1: "(\<exists> i. i\<le>N\<and>r=n_RecvInvAck i)"
have "invHoldForRule s f r (invariants N)"
apply (cut_tac b2 d1, metis n_RecvInvAckVsinv__60) done
}
moreover {
assume d1: "(\<exists> i. i\<le>N\<and>r=n_SendGntS i)"
have "invHoldForRule s f r (invariants N)"
apply (cut_tac b2 d1, metis n_SendGntSVsinv__60) done
}
moreover {
assume d1: "(\<exists> i. i\<le>N\<and>r=n_SendGntE N i)"
have "invHoldForRule s f r (invariants N)"
apply (cut_tac b2 d1, metis n_SendGntEVsinv__60) done
}
moreover {
assume d1: "(\<exists> i. i\<le>N\<and>r=n_RecvGntS i)"
have "invHoldForRule s f r (invariants N)"
apply (cut_tac b2 d1, metis n_RecvGntSVsinv__60) done
}
moreover {
assume d1: "(\<exists> i. i\<le>N\<and>r=n_RecvGntE i)"
have "invHoldForRule s f r (invariants N)"
apply (cut_tac b2 d1, metis n_RecvGntEVsinv__60) done
}
ultimately show "invHoldForRule s f r (invariants N)"
by satx
qed
end
|
Formal statement is: lemma enum_in: "i \<le> n \<Longrightarrow> enum i \<in> s" Informal statement is: If $i \leq n$, then $enum(i) \in s$. |
The distribution of a measurable function is the same as the distribution of the codomain. |
[GOAL]
C : Type u
inst✝¹ : Category.{v, u} C
inst✝ : Balanced C
X✝ Y✝ : Cᵒᵖ
f : X✝ ⟶ Y✝
fmono : Mono f
fepi : Epi f
⊢ IsIso f
[PROOFSTEP]
rw [← Quiver.Hom.op_unop f]
[GOAL]
C : Type u
inst✝¹ : Category.{v, u} C
inst✝ : Balanced C
X✝ Y✝ : Cᵒᵖ
f : X✝ ⟶ Y✝
fmono : Mono f
fepi : Epi f
⊢ IsIso f.unop.op
[PROOFSTEP]
exact isIso_of_op _
|
(*-------------------------------------------*
| CSP-Prover on Isabelle2004 |
| December 2004 |
| August 2005 (modified) |
| |
| CSP-Prover on Isabelle2005 |
| October 2005 (modified) |
| April 2006 (modified) |
| |
| CSP-Prover on Isabelle2013 |
| June 2013 (modified) |
| |
| CSP-Prover on Isabelle2017 |
| April 2018 (modified) |
| |
| Yoshinao Isobe (AIST JAPAN) |
*-------------------------------------------*)
theory Domain_F
imports CSP_T.Domain_T Set_F
begin
(*****************************************************************
1.
2.
3.
4.
*****************************************************************)
(* The following simplification rules are deleted in this theory file *)
(* because they unexpectly rewrite UnionT and InterT. *)
(* Union (B ` A) = (UN x:A. B x) *)
(* Inter (B ` A) = (INT x:A. B x) *)
(*
declare Union_image_eq [simp del]
declare Inter_image_eq [simp del]
*)
(* no simp rules in Isabelle 2017
declare Sup_image_eq [simp del]
declare Inf_image_eq [simp del]
*)
(***********************************************************
type def (Stable Failure)
***********************************************************)
(* types 'a domTsetF = "'a domT * 'a setF" (* synonym *) 2011 *)
type_synonym 'a domTsetF = "'a domT * 'a setF"
definition
HC_T2 :: "'a domTsetF => bool"
where
HC_T2_def :
"HC_T2 TF == ALL s X. (s, X) :f (snd TF)
--> s :t (fst TF)"
definition
HC_T3 :: "'a domTsetF => bool"
where
HC_T3_def :
"HC_T3 TF == ALL s. s ^^^ <Tick> :t (fst TF) & noTick s
--> (ALL X. (s ^^^ <Tick>, X) :f (snd TF))"
definition
HC_F3 :: "'a domTsetF => bool"
where
HC_F3_def :
"HC_F3 TF == ALL s X Y. (s, X) :f (snd TF) & noTick s &
(ALL a. a : Y --> s ^^^ <a> ~:t fst TF)
--> (s, X Un Y) :f (snd TF)"
definition
HC_F4 :: "'a domTsetF => bool"
where
HC_F4_def :
"HC_F4 TF == ALL s. s ^^^ <Tick> :t (fst TF) & noTick s
--> (s, Evset) :f (snd TF)"
definition
HC_T3_F4 :: "'a domTsetF => bool"
where
HC_T3_F4_def :
"HC_T3_F4 TF == ALL s. s ^^^ <Tick> :t (fst TF) & noTick s
--> ((s, Evset) :f (snd TF) &
(ALL X. (s ^^^ <Tick>, X) :f (snd TF)))"
lemma HC_T3_F4_iff : "HC_T3_F4 TF = (HC_T3 TF & HC_F4 TF)"
apply (simp add: HC_T3_F4_def HC_T3_def HC_F4_def)
by (auto)
(*** BOT in domF ***)
lemma BOT_T2_T3_F3_F4: "HC_T2({<>}t , {}f) & HC_F3({<>}t , {}f) &
HC_T3_F4({<>}t , {}f)"
by (auto simp add: HC_T2_def HC_F3_def HC_T3_F4_def)
(**************************************************
Type domF (Stable-Failures model)
**************************************************)
definition "domF = {SF::('a domTsetF). HC_T2(SF) & HC_T3(SF) & HC_F3(SF) & HC_F4(SF)}"
typedef 'a domF = "domF :: 'a domTsetF set"
apply (rule_tac x ="({<>}t , {}f)" in exI)
apply (simp add: domF_def)
by (simp add: BOT_T2_T3_F3_F4 HC_T3_F4_iff[THEN sym])
declare Rep_domF [simp]
lemma domF_iff: "domF = {SF. HC_T2 SF & HC_F3 SF & HC_T3_F4 SF}"
by (auto simp add: domF_def HC_T3_F4_iff)
(*********************************************************
The relation (<=) is defined over domF
*********************************************************)
instantiation domF :: (type) ord
begin
definition
subdomF_def:
"SF <= SE == (Rep_domF SF) <= (Rep_domF SE)"
definition
psubdomF_def:
"SF < SE == (Rep_domF SF) < (Rep_domF SE)"
instance
by (intro_classes)
end
(*********************************************************
The relation (<=) is a partial order
*********************************************************)
instance domF :: (type) order
apply (intro_classes)
apply (unfold subdomF_def psubdomF_def)
apply (auto)
apply (simp add: Rep_domF_inject)
done
(***********************************************************
lemmas
***********************************************************)
(*******************************
basic
*******************************)
(*** T2 ***)
lemma domTsetF_T2:
"[| TF : domF ; (s, X) :f snd TF |] ==> s :t fst TF"
by (auto simp add: domF_def HC_T2_def)
lemma domF_T2:
"[| (T,F) : domF ; (s, X) :f F |] ==> s :t T"
by (auto simp add: domF_def HC_T2_def)
(*** T3 ***)
lemma domTsetF_T3:
"[| TF : domF ; s ^^^ <Tick> :t fst TF ; noTick s |]
==> (s ^^^ <Tick>, X) :f snd TF"
by (simp add: domF_def HC_T3_def)
lemma domF_T3:
"[| (T,F) : domF ; s ^^^ <Tick> :t T ; noTick s |]
==> (s ^^^ <Tick>, X) :f F"
by (simp add: domF_def HC_T3_def)
(*** F3 ***)
lemma domTsetF_F3:
"[| TF : domF ; (s, X) :f snd TF ; noTick s ;
(ALL a. a : Y --> s ^^^ <a> ~:t fst TF) |]
==> (s, X Un Y) :f snd TF"
by (simp add: domF_def HC_F3_def)
lemma domF_F3:
"[| (T,F) : domF ; (s, X) :f F ; noTick s ;
(ALL a. a : Y --> s ^^^ <a> ~:t T) |]
==> (s, X Un Y) :f F"
by (simp add: domF_def HC_F3_def)
(*** F4 ***)
lemma domTsetF_F4:
"[| TF : domF ; s ^^^ <Tick> :t fst TF ; noTick s |]
==> (s, Evset) :f snd TF"
by (simp add: domF_def HC_F4_def)
lemma domF_F4:
"[| (T,F) : domF ; s ^^^ <Tick> :t T ; noTick s |]
==> (s, Evset) :f F"
by (simp add: domF_def HC_F4_def)
(*** T3_F4 ***)
lemma domTsetF_T3_F4:
"[| TF : domF ; s ^^^ <Tick> :t fst TF ; noTick s |]
==> (s, Evset) :f snd TF & (ALL X. (s ^^^ <Tick>, X) :f snd TF)"
by (simp add: domF_iff HC_T3_F4_def)
lemma domF_T3_F4:
"[| (T,F) : domF ; s ^^^ <Tick> :t T ; noTick s |]
==> (s, Evset) :f F & (ALL X. (s ^^^ <Tick>, X) :f F)"
by (simp add: domF_iff HC_T3_F4_def)
(*** F2_F4 ***)
lemma domTsetF_F2_F4:
"[| TF : domF ; s ^^^ <Tick> :t fst TF ; noTick s ; X <= Evset |]
==> (s, X) :f snd TF"
apply (simp add: domF_def HC_F4_def)
by (auto intro: memF_F2)
lemma domF_F2_F4:
"[| (T,F) : domF ; s ^^^ <Tick> :t T ; noTick s ; X <= Evset |]
==> (s, X) :f F"
apply (insert domTsetF_F2_F4[of "(T,F)" s X])
by (simp)
(*******************************
check in domF
*******************************)
(*** ({<>}t, {}f) ***)
lemma BOT_in_domF[simp]: "({<>}t, {}f) : domF"
by (simp add: domF_iff BOT_T2_T3_F3_F4)
(*******************************
BOT is the bottom
*******************************)
lemma BOT_is_bottom_domF[simp]: "({<>}t , {}f) <= SF"
by (simp add: order_pair_def)
(***********************************************************
operators on domF
***********************************************************)
definition
pairF :: "'a domT => 'a setF => 'a domF" ("(0_ ,,/ _)" [51,52] 0)
where
pairF_def : "(T ,, F) == Abs_domF (T, F)"
definition
fstF :: "'a domF => 'a domT"
where
fstF_def : "fstF == fst o Rep_domF"
definition
sndF :: "'a domF => 'a setF"
where
sndF_def : "sndF == snd o Rep_domF"
(***********************************************************
pairSF lemmas
***********************************************************)
lemma fold_fstF: "fst (Rep_domF SF) = fstF SF"
by (simp add: fstF_def comp_def)
lemma fold_sndF: "snd (Rep_domF SF) = sndF SF"
by (simp add: sndF_def comp_def)
lemma pairF_fstF: "(S,F) : domF ==> fstF (S,,F) = S"
apply (simp add: pairF_def fstF_def)
by (simp add: Abs_domF_inverse)
lemma pairF_sndF: "(S,F) : domF ==> sndF (S,,F) = F"
apply (simp add: pairF_def sndF_def)
by (simp add: Abs_domF_inverse)
lemma eqF_decompo:
"(SF = SE) = (fstF SF = fstF SE & sndF SF = sndF SE)"
apply (simp add: Rep_domF_inject[THEN sym])
apply (simp add: pair_eq_decompo)
apply (simp add: fstF_def sndF_def)
done
lemmas pairF = pairF_fstF pairF_sndF eqF_decompo
lemma mono_fstF: "mono fstF"
apply (simp add: mono_def)
apply (simp add: fstF_def)
apply (simp add: subdomF_def)
apply (simp add: order_pair_def)
done
lemma mono_sndF: "mono sndF"
apply (simp add: mono_def)
apply (simp add: sndF_def)
apply (simp add: subdomF_def)
apply (simp add: order_pair_def)
done
(*********************************************************
Healthiness conditions for pairF
*********************************************************)
lemma pairF_domF_T2:
"(s, X) :f sndF SF ==> s :t fstF SF"
apply (simp add: sndF_def fstF_def)
apply (rule domF_T2[of _ "snd (Rep_domF SF)"])
by (simp_all)
lemma pairF_domF_T3:
"[| s ^^^ <Tick> :t fstF SF ; noTick s |]
==> (s ^^^ <Tick>, X) :f sndF SF"
apply (simp add: sndF_def fstF_def)
apply (rule domF_T3[of "fst (Rep_domF SF)" "snd (Rep_domF SF)"])
by (simp_all)
lemma pairF_domF_T3_Tick:
"<Tick> :t fstF SF ==> (<Tick>, X) :f sndF SF"
apply (insert pairF_domF_T3[of "<>" SF X])
by (simp)
lemma pairF_domF_F4:
"[| s ^^^ <Tick> :t fstF SF ; noTick s |]
==> (s, Evset) :f sndF SF"
apply (simp add: sndF_def fstF_def)
apply (rule domF_F4[of "fst (Rep_domF SF)" "snd (Rep_domF SF)"])
by (simp_all)
lemma pairF_domF_F3:
"[| (s, X) :f sndF SF ; noTick s ;
(ALL a. a : Y --> s ^^^ <a> ~:t fstF SF) |]
==> (s, X Un Y) :f sndF SF"
apply (simp add: sndF_def fstF_def)
apply (rule domF_F3[of "fst (Rep_domF SF)" "snd (Rep_domF SF)"])
by (simp_all)
lemma pairF_domF_F3I:
"[| (s, X) :f sndF SF ; noTick s ;
(ALL a. a : Y --> s ^^^ <a> ~:t fstF SF) ;
Z = X Un Y |]
==> (s, Z) :f sndF SF"
by (simp add: pairF_domF_F3)
(*** F2_F4 ***)
lemma pairF_domF_F2_F4:
"[| s ^^^ <Tick> :t fstF SF ; noTick s ; X <= Evset|]
==> (s, X) :f sndF SF"
apply (rule memF_F2[of _ "Evset"])
apply (rule pairF_domF_F4)
by (simp_all)
(*** T2_T3 ***)
lemma pairF_domF_T2_T3:
"[| (s ^^^ <Tick>, X) :f sndF SF ; noTick s |]
==> (s ^^^ <Tick>, Y) :f sndF SF"
apply (rule pairF_domF_T3)
apply (rule pairF_domF_T2)
by (simp_all)
(*********************************************************
fstF and sndF
*********************************************************)
lemma fstF_sndF_in_domF[simp]: "(fstF SF , sndF SF) : domF"
apply (simp add: domF_iff)
apply (simp add: HC_T2_def HC_F3_def HC_T3_F4_def)
apply (intro conjI)
apply (intro allI impI)
apply (elim exE)
apply (simp add: pairF_domF_T2)
apply (intro allI impI)
apply (elim conjE)
apply (simp add: pairF_domF_F3)
apply (intro allI impI)
apply (elim conjE)
apply (simp add: pairF_domF_T3 pairF_domF_F4)
done
lemma fstF_sndF_domF[simp]: "(fstF SF ,, sndF SF) = SF"
by (simp add: pairF)
(*********************************************************
subdomF
*********************************************************)
lemma subdomF_decompo:
"(SF <= SE) = (fstF SF <= fstF SE & sndF SF <= sndF SE)"
apply (simp add: subdomF_def)
apply (simp add: order_pair_def)
apply (simp add: fstF_def sndF_def)
done
(*********************************************************
define max F from T
*********************************************************)
definition
maxFof :: "'a domT => 'a setF"
where
maxFof_def: "maxFof T == {f. EX s. (EX X. f = (s, X)) & s :t T}f"
(* in setF *)
lemma maxFof_setF: "{f. EX s. (EX X. f = (s, X)) & s :t T} : setF"
by (simp add: setF_def HC_F2_def)
(* in maxFof *)
lemma in_maxFof:
"(f :f maxFof T) = (EX s. (EX X. f = (s, X)) & s :t T)"
apply (simp add: maxFof_def)
apply (simp add: memF_def)
apply (simp add: maxFof_setF Abs_setF_inverse)
done
(* in domF *)
lemma maxFof_domF: "(T, maxFof T) : domF"
apply (simp (no_asm) add: domF_iff)
apply (simp add: HC_T2_def HC_F3_def HC_T3_F4_def in_maxFof)
apply (intro allI impI)
apply (elim conjE)
apply (rule memT_prefix_closed)
apply (simp)
apply (simp)
done
(* max *)
lemma maxFof_max: "s :t T ==> (s,X) :f maxFof T"
by (simp add: in_maxFof)
(****************** to add them again ******************)
(*
declare Union_image_eq [simp]
declare Inter_image_eq [simp]
*)
(*
declare Sup_image_eq [simp]
declare Inf_image_eq [simp]
*)
end
|
If $f$ is a function from a topological space to a normed vector space, and if $f$ converges to $l$, and if $f$ is eventually bounded below by $e$, then $e \<le> \|l\|$. |
(** Coq coding by choukh, May 2022 **)
From ZF Require Import Basic.
(*** 内模型 ***)
Section InnerModel.
(* 𝓜 ⊨ ZF *)
Context {𝓜 : ZF}.
(* 𝓜上的类 *)
Variable P : 𝓜 → Prop.
Hypothesis P为封闭类 : 封闭类 P.
(* 类的类型化 *)
Definition ℙ : Type := Σ x, x ∈ₚ P.
(* 类P中关系R到𝓜的嵌入 *)
Definition 嵌入 (R : ℙ → ℙ → Prop) : 𝓜 → 𝓜 → Prop :=
λ x y, ∃ (xP : x ∈ₚ P) (yP : y ∈ₚ P), R (exist P x xP) (exist P y yP).
Notation "⌜ R ⌝" := (嵌入 R) (format "⌜ R ⌝").
(* 𝓜中关系R到类P的投影 *)
Definition 投影 (R : 𝓜 → 𝓜 → Prop) : ℙ → ℙ → Prop :=
λ X Y : (Σ x, x ∈ₚ P), R (proj1_sig X) (proj1_sig Y).
Lemma 嵌入的函数性 R : 函数性 R → 函数性 ⌜R⌝.
Proof.
intros FR x y z [xP [yP RXY]] [xP'[Pz RXZ]].
eapply eq_sig_fst. eapply FR. apply RXY.
erewrite subset_eq_compat. apply RXZ. easy.
Qed.
Lemma 函数性投影 R : 函数性 R → 函数性 (投影 R).
Proof.
intros FR [x xP] [y yP] [z zP] RXY RYZ.
unfold 投影 in *; simpl in *.
apply subset_eq_compat. eapply FR; eauto.
Qed.
(* ⋃ {x ∊ { ⌜R⌝ @ A } | 函数性 R} *)
Definition 替代嵌入 R A := ⋃ ({⌜R⌝ @ A,} ∩ₚ (λ _, 函数性 R)).
Notation "R ⌜@⌝ A" := (替代嵌入 R A) (at level 70).
Lemma 替代嵌入_函数性 R A : 函数性 R → R ⌜@⌝ A = ⌜R⌝ @ A.
Proof. intros FR. unfold 替代嵌入. now rewrite 全分离, 并单. Qed.
Lemma 替代嵌入_非函数性 R A : ¬ 函数性 R → R ⌜@⌝ A = ∅.
Proof. intros nFR. unfold 替代嵌入. now rewrite 未分离, 并空. Qed.
Definition 子结构 : ZF结构.
apply (Build_ZF结构) with (集 := ℙ).
- intros [x _] [y _]. apply (x ∈ y).
- exists ∅. apply 空集封闭类.
- intros [x xP]. exists (⋃ x). now apply 并集封闭类.
- intros [x xP]. exists (𝒫 x). now apply 幂集封闭类.
- intros R [A AP]. exists (R ⌜@⌝ A). 排中 (函数性 R).
+ rewrite 替代嵌入_函数性; auto.
apply 替代封闭类; auto. apply 嵌入的函数性; auto.
now intros x y [_ [yP _]] _.
+ rewrite 替代嵌入_非函数性; auto. now apply 空集封闭类.
Defined.
(* 内模型 ⊨ ZF *)
Definition 内模型 : ZF.
Proof.
apply (Build_ZF) with (结构 := 子结构).
- intros [x xP] [y yP] XY YX.
enough (x = y). subst y. erewrite subset_eq_compat; reflexivity.
apply 外延.
+ intros z zx. exact (XY (exist P z (成员封闭类 zx xP)) zx).
+ intros z zy. exact (YX (exist P z (成员封闭类 zy yP)) zy).
- intros [x xP] X0. eapply 空集. apply X0.
- intros [x xP] [a aP]. split; intros H.
+ apply (并集 x a) in H as [y [xy ya]]. now exists (exist P y (成员封闭类 ya aP)).
+ apply (并集 x a). destruct H as [[y yP] XYA]. now exists y.
- intros [x xP] [a aP]. split; intros H.
+ apply (幂集 x a) in H. intros [y yP] YX. apply H, YX.
+ apply (幂集 x a). intros y yx. exact (H (exist P y (成员封闭类 yx xP)) yx).
- intros R [a aP] FR [y yP]. split; intros H.
+ apply 并集 in H. rewrite 全分离 in H; auto.
apply 并集 in H. rewrite 并单 in H.
apply 替代 in H as [x[xa[xP[yP' RXY]]]]. 2:now apply 嵌入的函数性.
exists (exist P x (成员封闭类 xa aP)).
replace (成员封闭类 xa aP) with xP. replace yP with yP'. now split.
apply proof_irrelevance. apply proof_irrelevance.
+ apply 并集. rewrite 全分离; auto.
apply 并集. rewrite 并单. destruct H as [[x xP][XA RXY]].
apply 替代. now apply 嵌入的函数性. exists x.
split. apply XA. exists xP, yP. apply RXY.
- intros [x xP]. induction (正则 x) as [x _ IH].
constructor. intros [y yP] Y. apply IH. apply Y.
Defined.
End InnerModel.
Notation "R ⌜@⌝ A" := (替代嵌入 R A) (at level 70) : zf_scope.
|
```python
# Mtarix Reductions
# elementary_row_op (기본행 연산)
# elementary_col_op
import sympy as sm
T = sm.Matrix([[1,2,3],[4,5,6],[7,8,9]])
T
```
$\displaystyle \left[\begin{matrix}1 & 2 & 3\\4 & 5 & 6\\7 & 8 & 9\end{matrix}\right]$
```python
T1 = T.elementary_row_op(op="n->n+km",row=1,row2=0,k=-4)
T1
```
$\displaystyle \left[\begin{matrix}1 & 2 & 3\\0 & -3 & -6\\7 & 8 & 9\end{matrix}\right]$
```python
T2 = T1.elementary_row_op(op="n->n+km",row=2,row2=0,k=-7)
T2
```
$\displaystyle \left[\begin{matrix}1 & 2 & 3\\0 & -3 & -6\\0 & -6 & -12\end{matrix}\right]$
```python
T3 = T2.elementary_row_op(op='n->n+km',row=2,row2=1,k=-2)
T3
```
$\displaystyle \left[\begin{matrix}1 & 2 & 3\\0 & -3 & -6\\0 & 0 & 0\end{matrix}\right]$
```python
T.echelon_form()
```
$\displaystyle \left[\begin{matrix}1 & 2 & 3\\0 & -3 & -6\\0 & 0 & 0\end{matrix}\right]$
```python
T4 = T3.elementary_row_op(op='n->kn',row=1,k=-1/3)
```
```python
T4.elementary_row_op(op='n->n+km',row=0,row2=1,k=-2)
```
$\displaystyle \left[\begin{matrix}1 & 0 & -1.0\\0 & 1.0 & 2.0\\0 & 0 & 0\end{matrix}\right]$
```python
T.rref()
```
(Matrix([
[1, 0, -1],
[0, 1, 2],
[0, 0, 0]]),
(0, 1))
```python
```
|
module Problem1
import Common.Lte
%default total
Decr : (Nat -> Nat) -> Type
Decr f = (x : Nat) -> LTE (f (S x)) (f x)
Valley : (Nat -> Nat) -> Nat -> Nat -> Type
Valley f n x = (y : Nat) -> LTE x y -> LTE y (n+x) -> f y = f x
decrN : (n : Nat) -> (f : Nat -> Nat) -> Decr f -> (x : Nat) ->
LTE (f (n + x)) (f x)
decrN Z f decr x = lteRefl
decrN (S n') f decr x =
let ind = decrN n' f decr x
decrLte = decr (n' + x)
in lteTransitive decrLte ind
boundedAbove : (decr : Decr f) -> (a : Nat) -> (x : Nat) -> LTE a x ->
LTE (f x) (f a)
boundedAbove decr a x lte =
case splitLte lte of
(Left eq) => rewrite eq in lteRefl
(Right lt) =>
let S x' = x
lte' = fromLteSucc lt
ind = boundedAbove decr a x' lte'
in decr x' `lteTransitive` ind
boundedBelow : (n : Nat) -> (decr : Decr f) ->
(a : Nat) -> (x : Nat) -> LTE x (n + a) ->
LTE (f (n + a)) (f x)
boundedBelow Z decr a x lte = boundedAbove decr x a lte
boundedBelow n@(S n') decr a x lte =
case splitLte lte of
(Left eq) => boundedAbove decr x (S (n' + a)) lte
(Right lt) =>
let ind = boundedBelow n' decr a x (fromLteSucc lt)
in decr (n' + a) `lteTransitive` ind
mkValley : (n : Nat) -> (f : Nat -> Nat) -> (decr : Decr f) ->
(x : Nat) -> f x = f (n + x) ->
(y : Nat) -> LTE x y -> LTE y (n + x) -> f y = f x
mkValley n f decr x prf y lteL lteU =
let u = boundedAbove decr x y lteL
l = boundedBelow n decr x y lteU
in tightBound u (rewrite prf in l)
decrValley_rec : (n : Nat) -> (f : Nat -> Nat) -> (decr : Decr f) ->
(x : Nat) -> (bound : Nat) -> (boundLte : LTE (f x) bound) ->
(a : Nat ** Valley f n a)
decrValley_rec n f decr x Z boundLte =
let boundLteN = decrN n f decr x
boundLteN' = lteTransitive boundLteN boundLte
xEqZ = lteZeqZ boundLte
xNEqZ = lteZeqZ boundLteN'
xEqNx = xEqZ `trans` sym xNEqZ
in (x ** mkValley n f decr x xEqNx)
decrValley_rec n f decr x bound@(S bound') boundLte =
case splitLte boundLte of
(Right (LTESucc boundLte')) => decrValley_rec n f decr x bound' boundLte'
(Left boundEq) =>
let boundLteN = decrN n f decr x
in case splitLte boundLteN of
(Right boundLtN) =>
let boundLteN' = fromLteSucc (boundLtN `lteTransitive` boundLte)
in decrValley_rec n f decr (n + x) bound' boundLteN'
(Left boundEqN) => (x ** mkValley n f decr x (sym boundEqN))
decrValley : (n : Nat) -> (f : Nat -> Nat) -> Decr f -> (x : Nat ** Valley f n x)
decrValley n f decr = decrValley_rec n f decr Z (f Z) lteRefl
|
= BRIX11 run list command
== Collection
taox11
== Usage
brix11 run list [options] [-- test-options]
=== options
(any test-options following '--' will be passed on unchecked)
-l, --list=PATH Specifies regression test list to feed auto_run_test.
Default: none
-r, --root=PATH Specifies root directory for running the tests.
Default: <listfile location>
--debug Run test using Debug deployment environment (only applicable for msvc/icc builds).
--release Run test using Release deployment environment (only applicable for msvc/icc builds).
-f, --force Force all tasks to run even if their dependencies do not require them to.
Default: off
-v, --verbose Run with increased verbosity level. Repeat to increase more.
Default: 0
-h, --help Show this help message.
_NOTE_
The *--debug* and *--release* switches are only available on Windows platforms and applicable for
build environments utilizing the MSVC or ICC compiler toolsets.
== Description
Executes the 'auto_run_tests' script which will run the tests from the specified list
according to the test-options passed.
== Example
$ brix11 run list
Executes the auto_run_tests script with the default list at the default location.
$ brix11 run list -- -Config ANDROID -s sandbox
Executes the auto_run_tests script with the default list at the default location passing on the
options '-Config ANDROID -s sandbox'.
$ brix11 run list -l mytests.lst -r /home/me/develop
Executes the auto_run_tests script with the list 'mytests.lst' at the location '/home/me/develop'.
$ brix11 run list --debug
Executes the auto_run_tests script with the default list at the default location using the debug
deployment environment.
|
{-# OPTIONS --cubical --safe #-}
module Cubical.Data.HomotopyGroup.Base where
open import Cubical.Foundations.Prelude
open import Cubical.Foundations.HLevels
import Cubical.Foundations.GroupoidLaws as GL
open import Cubical.Data.Nat
open import Cubical.Data.Group.Base
open import Cubical.HITs.SetTruncation
Pointed : ∀ {ℓ} → Type (ℓ-suc ℓ)
Pointed {ℓ} = Σ[ A ∈ Type ℓ ] A
Ω : ∀ {ℓ} → Pointed {ℓ} → Pointed {ℓ}
Ω (A , a ) = ( (a ≡ a) , refl)
Ω^_ : ∀ {ℓ} → ℕ → Pointed {ℓ} → Pointed {ℓ}
(Ω^ 0) p = p
(Ω^ (suc n)) p = Ω ((Ω^ n) p)
π^_ : ∀ {ℓ} → ℕ → Pointed {ℓ} → Group {ℓ}
π^_ {ℓ} n p = group ∥ A ∥₀ squash₀ g
where
n' : ℕ
n' = suc n
A : Type ℓ
A = (Ω^ n') p .fst
g : isGroup ∥ A ∥₀
g = group-struct e _⁻¹ _⊙_ lUnit rUnit assoc lCancel rCancel
where
e : ∥ A ∥₀
e = ∣ (Ω^ n') p .snd ∣₀
_⁻¹ : ∥ A ∥₀ → ∥ A ∥₀
_⁻¹ = elimSetTrunc {B = λ _ → ∥ A ∥₀} (λ x → squash₀) λ a → ∣ sym a ∣₀
_⊙_ : ∥ A ∥₀ → ∥ A ∥₀ → ∥ A ∥₀
_⊙_ = elimSetTrunc2 (λ _ _ → squash₀) λ a₀ a₁ → ∣ a₀ ∙ a₁ ∣₀
lUnit : (a : ∥ A ∥₀) → (e ⊙ a) ≡ a
lUnit = elimSetTrunc (λ _ → isProp→isSet (squash₀ _ _))
(λ a → cong ∣_∣₀ (sym (GL.lUnit a) ))
rUnit : (a : ∥ A ∥₀) → a ⊙ e ≡ a
rUnit = elimSetTrunc (λ _ → isProp→isSet (squash₀ _ _))
(λ a → cong ∣_∣₀ (sym (GL.rUnit a) ))
assoc : (a b c : ∥ A ∥₀) → ((a ⊙ b) ⊙ c) ≡ (a ⊙ (b ⊙ c))
assoc = elimSetTrunc3 (λ _ _ _ → isProp→isSet (squash₀ _ _))
(λ a b c → cong ∣_∣₀ (sym (GL.assoc _ _ _)))
lCancel : (a : ∥ A ∥₀) → ((a ⁻¹) ⊙ a) ≡ e
lCancel = elimSetTrunc (λ _ → isProp→isSet (squash₀ _ _))
λ a → cong ∣_∣₀ (GL.lCancel _)
rCancel : (a : ∥ A ∥₀) → (a ⊙ (a ⁻¹)) ≡ e
rCancel = elimSetTrunc (λ _ → isProp→isSet (squash₀ _ _))
λ a → cong ∣_∣₀ (GL.rCancel _)
|
using RLInterface
using POMDPModels
using Test
using Random
include("zmq.jl")
function sim(env, nsteps=100, rng=MersenneTwister(0))
o = reset!(env)
step = 1
done = false
r_tot = 0.0
na = length(actions(env))
dims = obs_dimensions(env)
while !done && step <= nsteps
action = sample_action(env)
obs, rew, done, info = step!(env, action)
r_tot += rew
step += 1
end
return r_tot
end
@testset "Sim" begin
envs = [MDPEnvironment(SimpleGridWorld()),
MDPEnvironment(InvertedPendulum()),
MDPEnvironment(MountainCar()),
POMDPEnvironment(TMaze()),
POMDPEnvironment(BabyPOMDP()),
POMDPEnvironment(TigerPOMDP()),
KMarkovEnvironment(TMaze(), k=4),
KMarkovEnvironment(BabyPOMDP(), k=4),
KMarkovEnvironment(TigerPOMDP(), k=4)]
for env in envs
r = sim(env)
process(env)
end
end
if VERSION >= v"1.1"
@testset "type stability" begin
env = MDPEnvironment(SimpleGridWorld())
@inferred reset!(env)
@inferred step!(env, :up)
@inferred sim(env)
env = POMDPEnvironment(TigerPOMDP())
@inferred reset!(env)
@inferred step!(env, 0)
@inferred sim(env)
env = KMarkovEnvironment(TigerPOMDP(), k=4)
@inferred reset!(env)
@inferred step!(env, 0)
@inferred sim(env)
end
end
|
lemma div_poly_less: fixes x :: "'a::field poly" assumes "degree x < degree y" shows "x div y = 0" |
/-
Copyright (c) 2020 David Wärn. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: David Wärn
-/
import order.basic
import data.equiv.encodable.basic
import order.atoms
/-!
# Order ideals, cofinal sets, and the Rasiowa–Sikorski lemma
## Main definitions
Throughout this file, `P` is at least a preorder, but some sections require more
structure, such as a bottom element, a top element, or a join-semilattice structure.
- `order.ideal P`: the type of nonempty, upward directed, and downward closed subsets of `P`.
Dual to the notion of a filter on a preorder.
- `order.is_ideal P`: a predicate for when a `set P` is an ideal.
- `order.ideal.principal p`: the principal ideal generated by `p : P`.
- `order.ideal.is_proper P`: a predicate for proper ideals.
Dual to the notion of a proper filter.
- `order.ideal.is_maximal`: a predicate for maximal ideals.
Dual to the notion of an ultrafilter.
- `ideal_inter_nonempty P`: a predicate for when the intersection of any two ideals of
`P` is nonempty.
- `order.cofinal P`: the type of subsets of `P` containing arbitrarily large elements.
Dual to the notion of 'dense set' used in forcing.
- `order.ideal_of_cofinals p 𝒟`, where `p : P`, and `𝒟` is a countable family of cofinal
subsets of P: an ideal in `P` which contains `p` and intersects every set in `𝒟`. (This a form
of the Rasiowa–Sikorski lemma.)
## References
- <https://en.wikipedia.org/wiki/Ideal_(order_theory)>
- <https://en.wikipedia.org/wiki/Cofinal_(mathematics)>
- <https://en.wikipedia.org/wiki/Rasiowa%E2%80%93Sikorski_lemma>
Note that for the Rasiowa–Sikorski lemma, Wikipedia uses the opposite ordering on `P`,
in line with most presentations of forcing.
## Tags
ideal, cofinal, dense, countable, generic
-/
namespace order
variables {P : Type*}
/-- An ideal on a preorder `P` is a subset of `P` that is
- nonempty
- upward directed (any pair of elements in the ideal has an upper bound in the ideal)
- downward closed (any element less than an element of the ideal is in the ideal). -/
structure ideal (P) [preorder P] :=
(carrier : set P)
(nonempty : carrier.nonempty)
(directed : directed_on (≤) carrier)
(mem_of_le : ∀ {x y : P}, x ≤ y → y ∈ carrier → x ∈ carrier)
/-- A subset of a preorder `P` is an ideal if it is
- nonempty
- upward directed (any pair of elements in the ideal has an upper bound in the ideal)
- downward closed (any element less than an element of the ideal is in the ideal). -/
@[mk_iff] structure is_ideal {P} [preorder P] (I : set P) : Prop :=
(nonempty : I.nonempty)
(directed : directed_on (≤) I)
(mem_of_le : ∀ {x y : P}, x ≤ y → y ∈ I → x ∈ I)
/-- Create an element of type `order.ideal` from a set satisfying the predicate
`order.is_ideal`. -/
def is_ideal.to_ideal [preorder P] {I : set P} (h : is_ideal I) : ideal P :=
⟨I, h.1, h.2, h.3⟩
/-- A preorder `P` has the `ideal_inter_nonempty` property if the
intersection of any two ideals is nonempty.
Most importantly, a `semilattice_sup` preorder with this property
satisfies that its ideal poset is a lattice.
-/
class ideal_inter_nonempty (P) [preorder P] : Prop :=
(inter_nonempty : ∀ (I J : ideal P), (I.carrier ∩ J.carrier).nonempty)
lemma inter_nonempty [preorder P] [ideal_inter_nonempty P] :
∀ (I J : ideal P), (I.carrier ∩ J.carrier).nonempty :=
ideal_inter_nonempty.inter_nonempty
namespace ideal
section preorder
variables [preorder P] {x y : P} {I J : ideal P}
/-- The smallest ideal containing a given element. -/
def principal (p : P) : ideal P :=
{ carrier := { x | x ≤ p },
nonempty := ⟨p, le_refl _⟩,
directed := λ x hx y hy, ⟨p, le_refl _, hx, hy⟩,
mem_of_le := λ x y hxy hy, le_trans hxy hy, }
instance [inhabited P] : inhabited (ideal P) :=
⟨ideal.principal $ default P⟩
/-- An ideal of `P` can be viewed as a subset of `P`. -/
instance : has_coe (ideal P) (set P) := ⟨carrier⟩
/-- For the notation `x ∈ I`. -/
instance : has_mem P (ideal P) := ⟨λ x I, x ∈ (I : set P)⟩
@[simp] lemma mem_coe : x ∈ (I : set P) ↔ x ∈ I := iff_of_eq rfl
@[simp] lemma mem_principal : x ∈ principal y ↔ x ≤ y := by refl
/-- Two ideals are equal when their underlying sets are equal. -/
@[ext] lemma ext : ∀ (I J : ideal P), (I : set P) = J → I = J
| ⟨_, _, _, _⟩ ⟨_, _, _, _⟩ rfl := rfl
@[simp, norm_cast] lemma ext_set_eq {I J : ideal P} : (I : set P) = J ↔ I = J :=
⟨by ext, congr_arg _⟩
lemma ext'_iff {I J : ideal P} : I = J ↔ (I : set P) = J := ext_set_eq.symm
lemma is_ideal (I : ideal P) : is_ideal (I : set P) := ⟨I.2, I.3, I.4⟩
/-- The partial ordering by subset inclusion, inherited from `set P`. -/
instance : partial_order (ideal P) := partial_order.lift coe ext
@[trans] lemma mem_of_mem_of_le : x ∈ I → I ≤ J → x ∈ J :=
@set.mem_of_mem_of_subset P x I J
@[simp] lemma principal_le_iff : principal x ≤ I ↔ x ∈ I :=
⟨λ (h : ∀ {y}, y ≤ x → y ∈ I), h (le_refl x),
λ h_mem y (h_le : y ≤ x), I.mem_of_le h_le h_mem⟩
lemma mem_compl_of_ge {x y : P} : x ≤ y → x ∈ (I : set P)ᶜ → y ∈ (I : set P)ᶜ :=
λ h, mt (I.mem_of_le h)
/-- A proper ideal is one that is not the whole set.
Note that the whole set might not be an ideal. -/
@[mk_iff] class is_proper (I : ideal P) : Prop := (ne_univ : (I : set P) ≠ set.univ)
lemma is_proper_of_not_mem {I : ideal P} {p : P} (nmem : p ∉ I) : is_proper I :=
⟨λ hp, begin
change p ∉ ↑I at nmem,
rw hp at nmem,
exact nmem (set.mem_univ p),
end⟩
/-- An ideal is maximal if it is maximal in the collection of proper ideals.
Note that we cannot use the `is_coatom` class because `P` might not have a `top` element.
-/
@[mk_iff] class is_maximal (I : ideal P) extends is_proper I : Prop :=
(maximal_proper : ∀ ⦃J : ideal P⦄, I < J → J.carrier = set.univ)
end preorder
section order_bot
variables [order_bot P] {I : ideal P}
/-- A specific witness of `I.nonempty` when `P` has a bottom element. -/
@[simp] lemma bot_mem : ⊥ ∈ I :=
I.mem_of_le bot_le I.nonempty.some_mem
/-- There is a bottom ideal when `P` has a bottom element. -/
instance : order_bot (ideal P) :=
{ bot := principal ⊥,
bot_le := by simp,
.. ideal.partial_order }
end order_bot
section order_top
variables [order_top P]
/-- There is a top ideal when `P` has a top element. -/
instance : order_top (ideal P) :=
{ top := principal ⊤,
le_top := λ I x h, le_top,
.. ideal.partial_order }
@[simp] lemma top_carrier : (⊤ : ideal P).carrier = set.univ :=
set.univ_subset_iff.1 (λ p _, le_top)
@[simp] lemma top_coe : ((⊤ : ideal P) : set P) = set.univ := top_carrier
lemma top_of_mem_top {I : ideal P} (mem_top : ⊤ ∈ I) : I = ⊤ :=
begin
ext,
change x ∈ I.carrier ↔ x ∈ (⊤ : ideal P).carrier,
split,
{ simp [top_carrier] },
{ exact λ _, I.mem_of_le le_top mem_top }
end
lemma is_proper_of_ne_top {I : ideal P} (ne_top : I ≠ ⊤) : is_proper I :=
is_proper_of_not_mem (λ h, ne_top (top_of_mem_top h))
lemma is_proper.ne_top {I : ideal P} (hI : is_proper I) : I ≠ ⊤ :=
begin
intro h,
rw [ext'_iff, top_coe] at h,
apply hI.ne_univ,
assumption,
end
lemma _root_.is_coatom.is_proper {I : ideal P} (hI : is_coatom I) : is_proper I :=
is_proper_of_ne_top hI.1
lemma is_proper_iff_ne_top {I : ideal P} : is_proper I ↔ I ≠ ⊤ :=
⟨λ h, h.ne_top, λ h, is_proper_of_ne_top h⟩
lemma is_maximal.is_coatom {I : ideal P} (h : is_maximal I) : is_coatom I :=
⟨is_maximal.to_is_proper.ne_top,
λ _ _, by { rw [ext'_iff, top_coe], exact is_maximal.maximal_proper ‹_› }⟩
lemma is_maximal.is_coatom' {I : ideal P} [is_maximal I] : is_coatom I :=
is_maximal.is_coatom ‹_›
lemma _root_.is_coatom.is_maximal {I : ideal P} (hI : is_coatom I) : is_maximal I :=
{ maximal_proper := λ _ _, by simp [hI.2 _ ‹_›],
..is_coatom.is_proper ‹_› }
lemma is_maximal_iff_is_coatom {I : ideal P} : is_maximal I ↔ is_coatom I :=
⟨λ h, h.is_coatom, λ h, h.is_maximal⟩
end order_top
section semilattice_sup
variables [semilattice_sup P] {x y : P} {I : ideal P}
/-- A specific witness of `I.directed` when `P` has joins. -/
lemma sup_mem (x y ∈ I) : x ⊔ y ∈ I :=
let ⟨z, h_mem, hx, hy⟩ := I.directed x ‹_› y ‹_› in
I.mem_of_le (sup_le hx hy) h_mem
@[simp] lemma sup_mem_iff : x ⊔ y ∈ I ↔ x ∈ I ∧ y ∈ I :=
⟨λ h, ⟨I.mem_of_le le_sup_left h, I.mem_of_le le_sup_right h⟩,
λ h, sup_mem x y h.left h.right⟩
end semilattice_sup
section semilattice_sup_ideal_inter_nonempty
variables [semilattice_sup P] [ideal_inter_nonempty P] {x : P} {I J K : ideal P}
/-- The intersection of two ideals is an ideal, when it is nonempty and `P` has joins. -/
def inf (I J : ideal P) : ideal P :=
{ carrier := I ∩ J,
nonempty := inter_nonempty I J,
directed := λ x ⟨_, _⟩ y ⟨_, _⟩, ⟨x ⊔ y, ⟨sup_mem x y ‹_› ‹_›, sup_mem x y ‹_› ‹_›⟩, by simp⟩,
mem_of_le := λ x y h ⟨_, _⟩, ⟨mem_of_le I h ‹_›, mem_of_le J h ‹_›⟩ }
/-- There is a smallest ideal containing two ideals, when their intersection is nonempty and
`P` has joins. -/
def sup (I J : ideal P) : ideal P :=
{ carrier := {x | ∃ (i ∈ I) (j ∈ J), x ≤ i ⊔ j},
nonempty := by { cases inter_nonempty I J, exact ⟨w, w, h.1, w, h.2, le_sup_left⟩ },
directed := λ x ⟨xi, _, xj, _, _⟩ y ⟨yi, _, yj, _, _⟩,
⟨x ⊔ y,
⟨xi ⊔ yi, sup_mem xi yi ‹_› ‹_›,
xj ⊔ yj, sup_mem xj yj ‹_› ‹_›,
sup_le
(calc x ≤ xi ⊔ xj : ‹_›
... ≤ (xi ⊔ yi) ⊔ (xj ⊔ yj) : sup_le_sup le_sup_left le_sup_left)
(calc y ≤ yi ⊔ yj : ‹_›
... ≤ (xi ⊔ yi) ⊔ (xj ⊔ yj) : sup_le_sup le_sup_right le_sup_right)⟩,
le_sup_left, le_sup_right⟩,
mem_of_le := λ x y _ ⟨yi, _, yj, _, _⟩, ⟨yi, ‹_›, yj, ‹_›, le_trans ‹x ≤ y› ‹_›⟩ }
lemma sup_le : I ≤ K → J ≤ K → sup I J ≤ K :=
λ hIK hJK x ⟨i, hiI, j, hjJ, hxij⟩,
K.mem_of_le hxij $ sup_mem i j (mem_of_mem_of_le hiI hIK) (mem_of_mem_of_le hjJ hJK)
instance : lattice (ideal P) :=
{ sup := sup,
le_sup_left := λ I J (i ∈ I), by { cases nonempty J, exact ⟨i, ‹_›, w, ‹_›, le_sup_left⟩ },
le_sup_right := λ I J (j ∈ J), by { cases nonempty I, exact ⟨w, ‹_›, j, ‹_›, le_sup_right⟩ },
sup_le := @sup_le _ _ _,
inf := inf,
inf_le_left := λ I J, set.inter_subset_left I J,
inf_le_right := λ I J, set.inter_subset_right I J,
le_inf := λ I J K, set.subset_inter,
.. ideal.partial_order }
@[simp] lemma mem_inf : x ∈ I ⊓ J ↔ x ∈ I ∧ x ∈ J := iff_of_eq rfl
@[simp] lemma mem_sup : x ∈ I ⊔ J ↔ ∃ (i ∈ I) (j ∈ J), x ≤ i ⊔ j := iff_of_eq rfl
lemma lt_sup_principal_of_not_mem (hx : x ∉ I) : I < I ⊔ principal x :=
begin
apply lt_of_le_of_ne le_sup_left,
intro h,
simp at h,
exact hx h
end
end semilattice_sup_ideal_inter_nonempty
section semilattice_sup_bot
variables [semilattice_sup_bot P]
@[priority 100]
instance semilattice_sup_bot.ideal_inter_nonempty : ideal_inter_nonempty P :=
{ inter_nonempty := λ _ _, ⟨⊥, ⟨bot_mem, bot_mem⟩⟩ }
end semilattice_sup_bot
section semilattice_inf
variable [semilattice_inf P]
@[priority 100]
instance semilattice_inf.ideal_inter_nonempty : ideal_inter_nonempty P :=
{ inter_nonempty := λ I J, begin
cases I.nonempty with i _,
cases J.nonempty with j _,
exact ⟨i ⊓ j, I.mem_of_le inf_le_left ‹_›, J.mem_of_le inf_le_right ‹_›⟩
end }
end semilattice_inf
section distrib_lattice
variables [distrib_lattice P]
variables {I J : ideal P}
lemma eq_sup_of_le_sup {x i j: P} (hi : i ∈ I) (hj : j ∈ J) (hx : x ≤ i ⊔ j):
∃ (i' ∈ I) (j' ∈ J), x = i' ⊔ j' :=
begin
refine ⟨x ⊓ i, I.mem_of_le inf_le_right hi, x ⊓ j, J.mem_of_le inf_le_right hj, _⟩,
calc
x = x ⊓ (i ⊔ j) : left_eq_inf.mpr hx
... = (x ⊓ i) ⊔ (x ⊓ j) : inf_sup_left,
end
lemma coe_sup_eq : ↑(I ⊔ J) = {x | ∃ i ∈ I, ∃ j ∈ J, x = i ⊔ j} :=
begin
ext,
rw [mem_coe, mem_sup],
exact ⟨λ ⟨_, _, _, _, _⟩, eq_sup_of_le_sup ‹_› ‹_› ‹_›,
λ ⟨i, _, j, _, _⟩, ⟨i, ‹_›, j, ‹_›, le_of_eq ‹_›⟩⟩
end
end distrib_lattice
end ideal
/-- For a preorder `P`, `cofinal P` is the type of subsets of `P`
containing arbitrarily large elements. They are the dense sets in
the topology whose open sets are terminal segments. -/
structure cofinal (P) [preorder P] :=
(carrier : set P)
(mem_gt : ∀ x : P, ∃ y ∈ carrier, x ≤ y)
namespace cofinal
variables [preorder P]
instance : inhabited (cofinal P) :=
⟨{ carrier := set.univ, mem_gt := λ x, ⟨x, trivial, le_refl _⟩ }⟩
instance : has_mem P (cofinal P) := ⟨λ x D, x ∈ D.carrier⟩
variables (D : cofinal P) (x : P)
/-- A (noncomputable) element of a cofinal set lying above a given element. -/
noncomputable def above : P := classical.some $ D.mem_gt x
lemma above_mem : D.above x ∈ D :=
exists.elim (classical.some_spec $ D.mem_gt x) $ λ a _, a
lemma le_above : x ≤ D.above x :=
exists.elim (classical.some_spec $ D.mem_gt x) $ λ _ b, b
end cofinal
section ideal_of_cofinals
variables [preorder P] (p : P) {ι : Type*} [encodable ι] (𝒟 : ι → cofinal P)
/-- Given a starting point, and a countable family of cofinal sets,
this is an increasing sequence that intersects each cofinal set. -/
noncomputable def sequence_of_cofinals : ℕ → P
| 0 := p
| (n+1) := match encodable.decode ι n with
| none := sequence_of_cofinals n
| some i := (𝒟 i).above (sequence_of_cofinals n)
end
lemma sequence_of_cofinals.monotone : monotone (sequence_of_cofinals p 𝒟) :=
by { apply monotone_of_monotone_nat, intros n, dunfold sequence_of_cofinals,
cases encodable.decode ι n, { refl }, { apply cofinal.le_above }, }
lemma sequence_of_cofinals.encode_mem (i : ι) :
sequence_of_cofinals p 𝒟 (encodable.encode i + 1) ∈ 𝒟 i :=
by { dunfold sequence_of_cofinals, rw encodable.encodek, apply cofinal.above_mem, }
/-- Given an element `p : P` and a family `𝒟` of cofinal subsets of a preorder `P`,
indexed by a countable type, `ideal_of_cofinals p 𝒟` is an ideal in `P` which
- contains `p`, according to `mem_ideal_of_cofinals p 𝒟`, and
- intersects every set in `𝒟`, according to `cofinal_meets_ideal_of_cofinals p 𝒟`.
This proves the Rasiowa–Sikorski lemma. -/
def ideal_of_cofinals : ideal P :=
{ carrier := { x : P | ∃ n, x ≤ sequence_of_cofinals p 𝒟 n },
nonempty := ⟨p, 0, le_refl _⟩,
directed := λ x ⟨n, hn⟩ y ⟨m, hm⟩,
⟨_, ⟨max n m, le_refl _⟩,
le_trans hn $ sequence_of_cofinals.monotone p 𝒟 (le_max_left _ _),
le_trans hm $ sequence_of_cofinals.monotone p 𝒟 (le_max_right _ _) ⟩,
mem_of_le := λ x y hxy ⟨n, hn⟩, ⟨n, le_trans hxy hn⟩, }
lemma mem_ideal_of_cofinals : p ∈ ideal_of_cofinals p 𝒟 := ⟨0, le_refl _⟩
/-- `ideal_of_cofinals p 𝒟` is `𝒟`-generic. -/
lemma cofinal_meets_ideal_of_cofinals (i : ι) : ∃ x : P, x ∈ 𝒟 i ∧ x ∈ ideal_of_cofinals p 𝒟 :=
⟨_, sequence_of_cofinals.encode_mem p 𝒟 i, _, le_refl _⟩
end ideal_of_cofinals
end order
|
State Before: ι : Type ?u.12245
α : Type u_1
β : Type ?u.12251
γ : Type ?u.12254
inst✝ : CommMonoid α
s t : Multiset α
a✝ : α
m : Multiset ι
f g : ι → α
a b : α
⊢ prod {a, b} = a * b State After: no goals Tactic: rw [insert_eq_cons, prod_cons, prod_singleton] |
(*
Copyright 2022 ZhengPu Shi
This file is part of CoqMatrix. It is distributed under the MIT
"expat license". You should have recieved a LICENSE file with it.
purpose : Vector Theory implemented with DepList.
author : ZhengPu Shi
date : 2022.06
remark :
*)
Require Export VectorTheory.
Require Import DepList.MatrixTheoryDL.
Require Import VectorDef.
Import VectorNotations.
(* ######################################################################### *)
(** * Basic vector theory implemented with DepList *)
Module BasicVectorTheoryDL (E : ElementType).
(* ==================================== *)
(** ** Also contain matrix theory *)
Module Export BasicMatrixTheoryDL := BasicMatrixTheoryDL E.
(* ==================================== *)
(** ** Vector element type *)
Export E.
Infix "==" := (eqlistA Aeq) : list_scope.
Open Scope nat_scope.
Open Scope A_scope.
Open Scope vec_scope.
(* ==================================== *)
(** ** Vector type *)
Definition vec n := mat n 1.
(** matrix equality *)
Definition veq {n} (v1 v2 : vec n) := meq v1 v2.
Infix "==" := veq : vec_scope.
(** meq is equivalence relation *)
Lemma veq_equiv : forall n, Equivalence (veq (n:=n)).
Proof.
intros. apply meq_equiv.
Qed.
(** Get element of vector *)
Definition vnth {n} (v : vec n) i : A := @mnth n 1 v i 0.
(* Notation "v .[ i ]" := (vnth v i) (at level 30) : vec_scope. *)
(* Notation "v . [ i ]" := (vnth i v) (at level 30). *)
(** veq and mnth should satisfy this constraint *)
Lemma veq_iff_vnth : forall {n : nat} (v1 v2 : vec n),
(v1 == v2) <-> (forall i, i < n -> (vnth v1 i == vnth v2 i)%A).
Proof.
intros.
Admitted.
(* ==================================== *)
(** ** Convert between list and vector *)
Definition v2l {n} (v : vec n) : list A := hdc A0 (m2l v).
(* Definition v2l' {n} (v : vec n) : list A := to_list (mcoli v F1). *)
Definition l2v {n} (l : list A) : vec n := l2m (row2col l).
Lemma v2l_length : forall {n} (v : vec n), length (v2l v) = n.
Admitted.
Lemma v2l_l2v_id : forall {n} (l : list A),
length l = n -> (@v2l n (@l2v n l) == l)%list.
Admitted.
Lemma l2v_v2l_id : forall {n} (v : vec n), l2v (v2l v) == v.
Admitted.
(* ==================================== *)
(** ** Convert between tuples and vector *)
Definition t2v_2 (t : @T2 A) : vec 2 := let '(a,b) := t in [[a];[b]].
Definition t2v_3 (t : @T3 A) : vec 3 := let '(a,b,c) := t in [[a];[b];[c]].
Definition t2v_4 (t : @T4 A) : vec 4 := let '(a,b,c,d) := t in [[a];[b];[c];[d]].
Definition v2t_2 (v : vec 2) : @T2 A := (
hd (hd v),
hd (hd (tl v))
).
Definition v2t_3 (v : vec 3) : @T3 A := (
hd (hd v),
hd (hd (tl v)),
hd (hd (tl (tl v)))
).
Definition v2t_4 (v : vec 4) : @T4 A := (
hd (hd v),
hd (hd (tl v)),
hd (hd (tl (tl v))),
hd (hd (tl (tl (tl v))))
).
Lemma v2t_t2v_id_2 : forall (t : A * A), v2t_2 (t2v_2 t) = t.
Proof.
intros. destruct t. simpl. unfold v2t_2. f_equal.
Qed.
Lemma t2v_v2t_id_2 : forall (v : vec 2), t2v_2 (v2t_2 v) == v.
Proof.
intros. cbn.
repeat match goal with
| v : vec (S _) |- _ =>
destruct (vec_S v) as [? [? E]]; rewrite E; clear E; simpl
| v : Matrix.vec (S _) |- _ =>
destruct (vec_S v) as [? [? E]]; rewrite E; clear E; simpl
| v : vec 0 |- _ => rewrite (vec_0 v)
| v : Matrix.vec 0 |- _ => rewrite (vec_0 v)
end.
(* easy. *)
(* Qed. *)
Admitted.
(** mapping of a vector *)
Definition vmap {n} (v : vec n) f : vec n := mmap f v.
(** folding of a vector *)
(* Definition vfold : forall {B : Type} {n} (v : vec n) (f : A -> B) (b : B), B. *)
(** mapping of two matrices *)
Definition vmap2 {n} (v1 v2 : vec n) f : vec n := mmap2 f v1 v2.
End BasicVectorTheoryDL.
(* ######################################################################### *)
(** * Ring vector theory implemented with DepList *)
(** zero vector, vector addition, opposition, substraction, scalar multiplication,
dot product *)
Module RingVectorTheoryDL (E : RingElementType) <: RingVectorTheory E.
(* ==================================== *)
(** ** Also contain matrix theory *)
Module Export RingMatrixTheoryDL := RingMatrixTheoryDL E.
Export E.
Include (BasicVectorTheoryDL E).
(** ** Zero vector *)
Definition vec0 {n} : vec n := mat0 n 1.
(* (** Assert that a vector is an zero vector. *) *)
(* Definition vzero {n} (v : vec n) : Prop := v = vec0. *)
(* (** Assert that a vector is an non-zero vector. *) *)
(* Definition vnonzero {n} (v : vec n) : Prop := ~(vzero v). *)
(* (** vec0 is equal to mat0 with column 1 *) *)
(* Lemma vec0_eq_mat0 : forall n, vec0 = mat0 n 1. *)
(* Proof. *)
(* intros. easy. *)
(* Qed. *)
(* (** It is decidable that if a vector is zero vector. *) *)
(* Lemma vzero_dec : forall {n} (v : vec n), {vzero v} + {vnonzero v}. *)
(* Proof. *)
(* intros. apply meq_dec. *)
(* Qed. *)
(** *** Vector addition *)
Definition vadd {n} (v1 v2 : vec n) : vec n := madd v1 v2.
Infix "+" := vadd.
(** v1 + v2 = v2 + v1 *)
Lemma vadd_comm : forall {n} (v1 v2 : vec n), (v1 + v2) == (v2 + v1).
Proof.
intros. apply madd_comm.
Qed.
(** (v1 + v2) + v3 = v1 + (v2 + v3) *)
Lemma vadd_assoc : forall {n} (v1 v2 v3 : vec n), (v1 + v2) + v3 == v1 + (v2 + v3).
Proof.
intros. apply madd_assoc.
Qed.
(** vec0 + v = v *)
Lemma vadd_0_l : forall {n} (v : vec n), vec0 + v == v.
Proof.
intros. apply madd_0_l.
Qed.
(** v + vec0 = v *)
Lemma vadd_0_r : forall {n} (v : vec n), v + vec0 == v.
Proof.
intros. apply madd_0_r.
Qed.
(** *** Vector opposite *)
Definition vopp {n} (v : vec n) : vec n := mopp v.
Notation "- v" := (vopp v).
(** v + (- v) = vec0 *)
Lemma vadd_opp_r : forall {n} (v : vec n), v + (- v) == vec0.
Proof.
intros. apply madd_opp.
Qed.
(** v + (- v) = vec0 *)
Lemma vadd_opp_l : forall {n} (v : vec n), (- v) + v == vec0.
Proof.
intros. rewrite vadd_comm. apply madd_opp.
Qed.
(** *** Vector subtraction *)
Definition vsub {n} (v1 v2 : vec n) : vec n := v1 + (- v2).
Infix "-" := vsub.
(** *** Vector scalar multiplication *)
Definition vcmul {n} a (v : vec n) : vec n := a c* v.
Definition vmulc {n} (v : vec n) a : vec n := v *c a.
(** v *c a = a c* v *)
Lemma vmulc_eq_vcmul : forall {n} a (v : vec n), v *c a == a c* v.
Proof.
intros. apply mmulc_eq_mcmul.
Qed.
(** a c* (b c* v) = (a * b) c* v *)
Lemma vcmul_assoc : forall {n} a b (v : vec n), a c* (b c* v) == (a * b) c* v.
Proof.
intros. apply mcmul_assoc.
Qed.
(** a c* (b c* v) = b c* (a c* v) *)
Lemma vcmul_perm : forall {n} a b (v : vec n), a c* (b c* v) == b c* (a c* v).
Proof.
intros. apply mcmul_perm.
Qed.
(** (a + b) c* v = (a c* v) + (b c* v) *)
Lemma vcmul_add_distr_l : forall {n} a b (v : vec n),
(a + b)%A c* v == (a c* v) + (b c* v).
Proof.
intros. apply mcmul_add_distr_r.
Qed.
(** a c* (v1 + v2) = (a c* v1) + (a c* v2) *)
Lemma vcmul_add_distr_r : forall {n} a (v1 v2 : vec n),
a c* (v1 + v2) == (a c* v1) + (a c* v2).
Proof.
intros. unfold vadd. apply mcmul_add_distr_l.
Qed.
(** 1 c* v = v *)
Lemma vcmul_1_l : forall {n} (v : vec n), A1 c* v == v.
Proof.
intros. apply mcmul_1_l.
Qed.
(** 0 c* v = vec0 *)
Lemma vcmul_0_l : forall {n} (v : vec n), A0 c* v == vec0.
Proof.
intros. apply mcmul_0_l.
Qed.
(** *** Vector dot product *)
(** dot production of two vectors.
Here, we use matrix multiplication to do it, and it is a different way to
general situation. *)
Definition vdot {n : nat} (v1 v2 : vec n) :=
scalar_of_mat (v1\T * v2)%mat.
End RingVectorTheoryDL.
(* ######################################################################### *)
(** * Decidable-field vector theory implemented with DepList *)
Module DecidableFieldVectorTheoryDL (E : DecidableFieldElementType)
<: DecidableFieldVectorTheory E.
(* ==================================== *)
(** ** Also contain matrix theory *)
Module Export DecidableFieldMatrixTheoryDL := DecidableFieldMatrixTheoryDL E.
Export E.
Include (RingVectorTheoryDL E).
(** veq is decidable *)
Lemma veq_dec : forall (n : nat), Decidable (veq (n:=n)).
Proof. intros. apply meq_dec. Qed.
End DecidableFieldVectorTheoryDL.
(* ######################################################################### *)
(** * Test *)
Module Test.
Module Import VectorR := RingVectorTheoryDL RingElementTypeR.
Import Reals.
Open Scope R.
Definition v1 := @l2v 3 [1;2;3].
Definition v2 := @l2v 3 [4;5;6].
Example vdot_ex1 : vdot v1 v2 = (4+10+18)%R.
Proof.
compute. ring.
Qed.
End Test.
(** ** Others, later ... *)
(*
(* ==================================== *)
(** ** 2-dim vector operations *)
(** Square of length of a 2D vector *)
Definition vlen2 (v : vec 2) : A :=
let '(x,y) := v2t_2 v in
(x * x + y * y)%X.
(* ==================================== *)
(** ** 3-dim vector operations *)
(** Square of length of a 3D vector *)
Definition vlen3 (v : vec 3) : A :=
let '(x,y,z) := v2t_3 v in
(x * x + y * y + z * z)%X.
(** dot product of a 3D vector *)
Definition vdot3 (v0 v1 : vec 3) : A :=
let '(a0,b0,c0) := v2t_3 v0 in
let '(a1,b1,c1) := v2t_3 v1 in
(a0 * a1 + b0 * b1 + c0 * c1)%X.
(** v1 <> 0 -> v2 <> 0 -> v1 = k c* v2 -> k <> 0 *)
Lemma vec_eq_vcmul_imply_coef_neq0 : forall {n} (v1 v2 : vec n) k,
vnonzero v1 -> vnonzero v2 -> v1 = k c* v2 -> k <> A0.
Proof.
intros. intro. subst. apply vcmul_0_l in H. destruct H. easy.
Qed.
(* ==================================== *)
(** ** Vector equility *)
Lemma veq_dec : forall {n} (v1 v2 : vec n), {v1 = v2} + {v1 <> v2}.
Proof.
intros. apply meq_dec.
Qed.
*)
|
\subsubsection{Cosine}
\noindent
Let $a$ be a constant.\\
By definitions of a Laplace transform and an improper integral,
\begin{equation*}
\Laplace{\cos{(at)}} = \lim\limits_{n\to\infty}{\int_{0}^{n}{\cos{(at)}e^{-st}\mathrm{d}t}}
\end{equation*}
\begin{equation*}
= \frac{1}{s^2 + a^2}\lim\limits_{n\to\infty}{\left[e^{-st}\left(a\sin{(at)} - s\cos{(at)}\right)\right]_{0}^{n}}
\end{equation*}
\begin{equation*}
= \frac{1}{s^2 + a^2}\left(\lim\limits_{n\to\infty}{\left(e^{-sn}\left(a\sin{sn} - s\cos{(an)}\right)\right)} - \left(e^{-s\cdot 0}\left(a\sin{(a\cdot 0)} - s\cos{(a\cdot 0)}\right)\right)\right)
\end{equation*}
Both $\sin$ and $\cos$ have maximum values of 1 and minimum values of -1, so we can way that the left part of the expression has a maximum value of at most $a + s$. For positive $s$, the exponential dominates and the expression goes to 0 in the limit.
\begin{equation*}
= \frac{1}{s^2 + a^2}\left(0 + s\right) \text{, } s > 0
\end{equation*}
\begin{equation*}
= \frac{s}{s^2 + a^2} \text{, } s > 0
\end{equation*}
So,
\begin{equation*}
\Laplace{\cos{(at)}} = \frac{s}{s^2 + a^2} \text{, } s > 0
\end{equation*} |
People leaving Davis should consider relocating to one of our Sister Cities.
Leaving Davis is typically what a student does after graduating from UC Davis. You might also leave if you feel like You know you have been in Davis too long if you have been here a little too long.
I have heard, but cant find it anywhere, that Davis has/had the largest percentage of graduates that remain in the area after graduation. Users/RogerClark RC
Not to discourage people from Stay In Davis staying in Davis, but if you have already left Davis, leave us your virtual calling card here, say hi, and let us know where you went!
Users/EvanEdwards and Users/SarahHillard are on the other end of I80, in a small college town in Pennsylvania. Sarah is pursuing her PhD in Chemistry at Penn State and Evan is pursuing her. They have vague plans to move back to Davis at some point in the future.
How far away are we talking about including? I live in West Sacramento which is technically leaving Davis, but I am only the next town over. Is this meant to be those moving beyond the Sacramento/Vacaville/Woodland area? Bay Area? Users/RogerClark RC
Id say that if you can make it to an event in Davis without having to arrange for an overnight stay, youre basically still local. Users/JessicaLuedtke
Ill be leaving Davis and the United States in two weeks to begin work on my doctorate. Things Ill miss: the Davis wiki, coop, adult learning center, and Delta Venus (especially Irish night). Things I wont miss: the new Target, the ignoramuses who voted for it, the apathetic/transient student population, bland food, marketdriven thinking, the psychics Hummer, neoprimitivism, and liberal elitist hypocrisy. I would consider returning under one condition: if I were offered a tenuretrack position at UCD. Ironically, I think the Davis wiki is the best thing going for Davis, next to a few great professors and their lasting influence (which Ill have to assume exists, based on my undergraduate experience in another state). Salut, everyone! Users/ZacharyNorwood ZN
Those that do leave Davis ought to seek out other Alumni, depending on the city where they move to. For example, there is a decently sized group of Alumni over in Washington DC. I know this because I briefly visited a few of them in November 2006. I am sure other major cities will have some as well. Paul Amnuaypayoat
|
# Copyright (c) 2022 Code Komali
#
# This software is released under the MIT License.
# https://opensource.org/licenses/MIT
module StudentData
export Student, students, sortedStudents, studentCompareFn
# Complex type
struct Student
name::String
rollno::String
cgpa::Float64
end
# instances of the complex type Student
students = [
Student("jack", "s2020123", 8.9),
Student("jill", "s2020124", 4.9),
Student("tom", "s2020125", 6.9),
Student("pip", "s2020126", 9.9),
Student("mark", "s2020127", 7.9)
]
# expected result after sorting students (descending) by CGPA
sortedStudents = [
Student("pip", "s2020126", 9.9),
Student("jack", "s2020123", 8.9),
Student("mark", "s2020127", 7.9),
Student("tom", "s2020125", 6.9),
Student("jill", "s2020124", 4.9)
]
# comparison function for student
function studentCompareFn(x::Student, y::Student)
x.cgpa > y.cgpa
end
end |
#!/usr/bin/env julia
# MIT License
# Copyright (c) 2016-2017 Andrew Clemons
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
type Elf
id::UInt32
presents::UInt32
end
function initCircle(count)
elves = Array{Elf, 1}()
for i in 1:count
elf = Elf(i, 1)
push!(elves, elf)
end
elves
end
function winningElf(count)
elves = initCircle(count)
while length(elves) > 1
first = shift!(elves)
second = shift!(elves)
first.presents = first.presents + second.presents
push!(elves, first)
end
elf = pop!(elves)
@printf "Elf %d now gets all the presents\n" elf.id
end
function winningElfV2(count)
elves = initCircle(count)
len = length(elves)
offset = 1
while len > 1
first = elves[offset]
opposite = offset + convert(UInt32, round((len / 2), RoundDown))
if opposite > len
opposite = opposite - len
end
second = elves[opposite]
deleteat!(elves, opposite)
first.presents = first.presents + second.presents
len = length(elves)
if len > 1
if opposite > offset
offset += 1
else
if offset > len
offset = 1
end
end
end
end
elf = pop!(elves)
@printf "Elf %d now gets all the presents\n" elf.id
end
count = parse(UInt32, chomp(readline()))
winningElf(count)
winningElfV2(count)
|
Formal statement is: proposition Lim_at_infinity: "(f \<longlongrightarrow> l) at_infinity \<longleftrightarrow> (\<forall>e>0. \<exists>b. \<forall>x. norm x \<ge> b \<longrightarrow> dist (f x) l < e)" Informal statement is: A function $f$ converges to $l$ at infinity if and only if for every $\epsilon > 0$, there exists a $b$ such that for all $x$ with $|x| \geq b$, we have $|f(x) - l| < \epsilon$. |
%% unpadeven
% Below is a demonstration of the features of the |unpadeven| function
%%
clear; close all; clc;
%% Syntax
% |[M]=unpadeven(M_pad,L_even_dim);|
%% Description
% UNDOCUMENTED
%% Examples
%
%%
%
% <<gibbVerySmall.gif>>
%
% _*GIBBON*_
% <www.gibboncode.org>
%
% _Kevin Mattheus Moerman_, <[email protected]>
%%
% _*GIBBON footer text*_
%
% License: <https://github.com/gibbonCode/GIBBON/blob/master/LICENSE>
%
% GIBBON: The Geometry and Image-based Bioengineering add-On. A toolbox for
% image segmentation, image-based modeling, meshing, and finite element
% analysis.
%
% Copyright (C) 2006-2022 Kevin Mattheus Moerman and the GIBBON contributors
%
% This program is free software: you can redistribute it and/or modify
% it under the terms of the GNU General Public License as published by
% the Free Software Foundation, either version 3 of the License, or
% (at your option) any later version.
%
% This program is distributed in the hope that it will be useful,
% but WITHOUT ANY WARRANTY; without even the implied warranty of
% MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
% GNU General Public License for more details.
%
% You should have received a copy of the GNU General Public License
% along with this program. If not, see <http://www.gnu.org/licenses/>.
|
proposition locally_path_connected_quotient_image: assumes lcS: "locally path_connected S" and oo: "\<And>T. T \<subseteq> f ` S \<Longrightarrow> openin (top_of_set S) (S \<inter> f -` T) \<longleftrightarrow> openin (top_of_set (f ` S)) T" shows "locally path_connected (f ` S)" |
{-# OPTIONS --without-K --exact-split --safe #-}
open import Fragment.Algebra.Signature
module Fragment.Algebra.Free.Base (Σ : Signature) where
open import Fragment.Algebra.Algebra Σ
open import Fragment.Algebra.Free.Atoms public
open import Level using (Level; _⊔_)
open import Function using (_∘_)
open import Data.Empty using (⊥)
open import Data.Nat using (ℕ)
open import Data.Fin using (Fin)
open import Data.Vec using (Vec; []; _∷_)
open import Data.Vec.Relation.Binary.Pointwise.Inductive
using (Pointwise; []; _∷_)
open import Relation.Binary using (Setoid; IsEquivalence)
open import Relation.Binary.PropositionalEquality as PE using (_≡_)
private
variable
a ℓ : Level
module _ (A : Set a) where
data Term : Set a where
atom : A → Term
term : ∀ {arity} → (f : ops Σ arity) → Vec Term arity → Term
module _ (S : Setoid a ℓ) where
open Setoid S renaming (Carrier to A)
data _~_ : Term A → Term A → Set (a ⊔ ℓ) where
atom : ∀ {x y} → x ≈ y → atom x ~ atom y
term : ∀ {arity xs ys} {f : ops Σ arity}
→ Pointwise _~_ xs ys
→ term f xs ~ term f ys
private
mutual
map-~-refl : ∀ {n} {xs : Vec _ n} → Pointwise _~_ xs xs
map-~-refl {xs = []} = []
map-~-refl {xs = x ∷ xs} = ~-refl ∷ map-~-refl
~-refl : ∀ {x} → x ~ x
~-refl {atom _} = atom refl
~-refl {term _ _} = term map-~-refl
mutual
map-~-sym : ∀ {n} {xs ys : Vec _ n}
→ Pointwise _~_ xs ys
→ Pointwise _~_ ys xs
map-~-sym [] = []
map-~-sym (x≈y ∷ xs≈ys) =
~-sym x≈y ∷ map-~-sym xs≈ys
~-sym : ∀ {x y} → x ~ y → y ~ x
~-sym (atom x≈y) = atom (sym x≈y)
~-sym (term xs≈ys) = term (map-~-sym xs≈ys)
mutual
map-~-trans : ∀ {n} {xs ys zs : Vec _ n}
→ Pointwise _~_ xs ys
→ Pointwise _~_ ys zs
→ Pointwise _~_ xs zs
map-~-trans [] [] = []
map-~-trans (x≈y ∷ xs≈ys) (y≈z ∷ ys≈zs) =
~-trans x≈y y≈z ∷ map-~-trans xs≈ys ys≈zs
~-trans : ∀ {x y z} → x ~ y → y ~ z → x ~ z
~-trans (atom x≈y) (atom y≈z) =
atom (trans x≈y y≈z)
~-trans (term xs≈ys) (term ys≈zs) =
term (map-~-trans xs≈ys ys≈zs)
~-isEquivalence : IsEquivalence _~_
~-isEquivalence = record { refl = ~-refl
; sym = ~-sym
; trans = ~-trans
}
Herbrand : Setoid _ _
Herbrand = record { Carrier = Term A
; _≈_ = _~_
; isEquivalence = ~-isEquivalence
}
Free : Algebra
Free = record { ∥_∥/≈ = Herbrand
; ∥_∥/≈-isAlgebra = Free-isAlgebra
}
where term-cong : Congruence Herbrand term
term-cong f p = term p
Free-isAlgebra : IsAlgebra Herbrand
Free-isAlgebra = record { ⟦_⟧ = term
; ⟦⟧-cong = term-cong
}
F : ℕ → Algebra
F = Free ∘ Atoms (PE.setoid ⊥)
|
(* Title: ZF/AC/AC_Equiv.thy
Author: Krzysztof Grabczewski
Axioms AC1 -- AC19 come from "Equivalents of the Axiom of Choice, II"
by H. Rubin and J.E. Rubin, 1985.
Axiom AC0 comes from "Axiomatic Set Theory" by P. Suppes, 1972.
Some Isabelle proofs of equivalences of these axioms are formalizations of
proofs presented by the Rubins. The others are based on the Rubins' proofs,
but slightly changed.
*)
theory AC_Equiv
imports ZF
begin (*obviously not ZFC*)
(* Well Ordering Theorems *)
definition
"WO1 == \<forall>A. \<exists>R. well_ord(A,R)"
definition
"WO2 == \<forall>A. \<exists>a. Ord(a) & A\<approx>a"
definition
"WO3 == \<forall>A. \<exists>a. Ord(a) & (\<exists>b. b \<subseteq> a & A\<approx>b)"
definition
"WO4(m) == \<forall>A. \<exists>a f. Ord(a) & domain(f)=a &
(\<Union>b<a. f`b) = A & (\<forall>b<a. f`b \<lesssim> m)"
definition
"WO5 == \<exists>m \<in> nat. 1\<le>m & WO4(m)"
definition
"WO6 == \<forall>A. \<exists>m \<in> nat. 1\<le>m & (\<exists>a f. Ord(a) & domain(f)=a
& (\<Union>b<a. f`b) = A & (\<forall>b<a. f`b \<lesssim> m))"
definition
"WO7 == \<forall>A. Finite(A) \<longleftrightarrow> (\<forall>R. well_ord(A,R) \<longrightarrow> well_ord(A,converse(R)))"
definition
"WO8 == \<forall>A. (\<exists>f. f \<in> (\<Prod>X \<in> A. X)) \<longrightarrow> (\<exists>R. well_ord(A,R))"
definition
(* Auxiliary concepts needed below *)
pairwise_disjoint :: "i => o" where
"pairwise_disjoint(A) == \<forall>A1 \<in> A. \<forall>A2 \<in> A. A1 \<inter> A2 \<noteq> 0 \<longrightarrow> A1=A2"
definition
sets_of_size_between :: "[i, i, i] => o" where
"sets_of_size_between(A,m,n) == \<forall>B \<in> A. m \<lesssim> B & B \<lesssim> n"
(* Axioms of Choice *)
definition
"AC0 == \<forall>A. \<exists>f. f \<in> (\<Prod>X \<in> Pow(A)-{0}. X)"
definition
"AC1 == \<forall>A. 0\<notin>A \<longrightarrow> (\<exists>f. f \<in> (\<Prod>X \<in> A. X))"
definition
"AC2 == \<forall>A. 0\<notin>A & pairwise_disjoint(A)
\<longrightarrow> (\<exists>C. \<forall>B \<in> A. \<exists>y. B \<inter> C = {y})"
definition
"AC3 == \<forall>A B. \<forall>f \<in> A->B. \<exists>g. g \<in> (\<Prod>x \<in> {a \<in> A. f`a\<noteq>0}. f`x)"
definition
"AC4 == \<forall>R A B. (R \<subseteq> A*B \<longrightarrow> (\<exists>f. f \<in> (\<Prod>x \<in> domain(R). R``{x})))"
definition
"AC5 == \<forall>A B. \<forall>f \<in> A->B. \<exists>g \<in> range(f)->A. \<forall>x \<in> domain(g). f`(g`x) = x"
definition
"AC6 == \<forall>A. 0\<notin>A \<longrightarrow> (\<Prod>B \<in> A. B)\<noteq>0"
definition
"AC7 == \<forall>A. 0\<notin>A & (\<forall>B1 \<in> A. \<forall>B2 \<in> A. B1\<approx>B2) \<longrightarrow> (\<Prod>B \<in> A. B) \<noteq> 0"
definition
"AC8 == \<forall>A. (\<forall>B \<in> A. \<exists>B1 B2. B=<B1,B2> & B1\<approx>B2)
\<longrightarrow> (\<exists>f. \<forall>B \<in> A. f`B \<in> bij(fst(B),snd(B)))"
definition
"AC9 == \<forall>A. (\<forall>B1 \<in> A. \<forall>B2 \<in> A. B1\<approx>B2) \<longrightarrow>
(\<exists>f. \<forall>B1 \<in> A. \<forall>B2 \<in> A. f`<B1,B2> \<in> bij(B1,B2))"
definition
"AC10(n) == \<forall>A. (\<forall>B \<in> A. ~Finite(B)) \<longrightarrow>
(\<exists>f. \<forall>B \<in> A. (pairwise_disjoint(f`B) &
sets_of_size_between(f`B, 2, succ(n)) & \<Union>(f`B)=B))"
definition
"AC11 == \<exists>n \<in> nat. 1\<le>n & AC10(n)"
definition
"AC12 == \<forall>A. (\<forall>B \<in> A. ~Finite(B)) \<longrightarrow>
(\<exists>n \<in> nat. 1\<le>n & (\<exists>f. \<forall>B \<in> A. (pairwise_disjoint(f`B) &
sets_of_size_between(f`B, 2, succ(n)) & \<Union>(f`B)=B)))"
definition
"AC13(m) == \<forall>A. 0\<notin>A \<longrightarrow> (\<exists>f. \<forall>B \<in> A. f`B\<noteq>0 & f`B \<subseteq> B & f`B \<lesssim> m)"
definition
"AC14 == \<exists>m \<in> nat. 1\<le>m & AC13(m)"
definition
"AC15 == \<forall>A. 0\<notin>A \<longrightarrow>
(\<exists>m \<in> nat. 1\<le>m & (\<exists>f. \<forall>B \<in> A. f`B\<noteq>0 & f`B \<subseteq> B & f`B \<lesssim> m))"
definition
"AC16(n, k) ==
\<forall>A. ~Finite(A) \<longrightarrow>
(\<exists>T. T \<subseteq> {X \<in> Pow(A). X\<approx>succ(n)} &
(\<forall>X \<in> {X \<in> Pow(A). X\<approx>succ(k)}. \<exists>! Y. Y \<in> T & X \<subseteq> Y))"
definition
"AC17 == \<forall>A. \<forall>g \<in> (Pow(A)-{0} -> A) -> Pow(A)-{0}.
\<exists>f \<in> Pow(A)-{0} -> A. f`(g`f) \<in> g`f"
locale AC18 =
assumes AC18: "A\<noteq>0 & (\<forall>a \<in> A. B(a) \<noteq> 0) \<longrightarrow>
((\<Inter>a \<in> A. \<Union>b \<in> B(a). X(a,b)) =
(\<Union>f \<in> \<Prod>a \<in> A. B(a). \<Inter>a \<in> A. X(a, f`a)))"
\<comment> \<open>AC18 cannot be expressed within the object-logic\<close>
definition
"AC19 == \<forall>A. A\<noteq>0 & 0\<notin>A \<longrightarrow> ((\<Inter>a \<in> A. \<Union>b \<in> a. b) =
(\<Union>f \<in> (\<Prod>B \<in> A. B). \<Inter>a \<in> A. f`a))"
(* ********************************************************************** *)
(* Theorems concerning ordinals *)
(* ********************************************************************** *)
(* lemma for ordertype_Int *)
lemma rvimage_id: "rvimage(A,id(A),r) = r \<inter> A*A"
apply (unfold rvimage_def)
apply (rule equalityI, safe)
apply (drule_tac P = "%a. <id (A) `xb,a>:r" in id_conv [THEN subst],
assumption)
apply (drule_tac P = "%a. <a,ya>:r" in id_conv [THEN subst], (assumption+))
apply (fast intro: id_conv [THEN ssubst])
done
(* used only in Hartog.ML *)
lemma ordertype_Int:
"well_ord(A,r) ==> ordertype(A, r \<inter> A*A) = ordertype(A,r)"
apply (rule_tac P = "%a. ordertype (A,a) =ordertype (A,r) " in rvimage_id [THEN subst])
apply (erule id_bij [THEN bij_ordertype_vimage])
done
lemma lam_sing_bij: "(\<lambda>x \<in> A. {x}) \<in> bij(A, {{x}. x \<in> A})"
apply (rule_tac d = "%z. THE x. z={x}" in lam_bijective)
apply (auto simp add: the_equality)
done
lemma inj_strengthen_type:
"[| f \<in> inj(A, B); !!a. a \<in> A ==> f`a \<in> C |] ==> f \<in> inj(A,C)"
by (unfold inj_def, blast intro: Pi_type)
(* ********************************************************************** *)
(* Another elimination rule for \<exists>! *)
(* ********************************************************************** *)
lemma ex1_two_eq: "[| \<exists>! x. P(x); P(x); P(y) |] ==> x=y"
by blast
(* ********************************************************************** *)
(* Lemmas used in the proofs like WO? ==> AC? *)
(* ********************************************************************** *)
lemma first_in_B:
"[| well_ord(\<Union>(A),r); 0 \<notin> A; B \<in> A |] ==> (THE b. first(b,B,r)) \<in> B"
by (blast dest!: well_ord_imp_ex1_first
[THEN theI, THEN first_def [THEN def_imp_iff, THEN iffD1]])
lemma ex_choice_fun: "[| well_ord(\<Union>(A), R); 0 \<notin> A |] ==> \<exists>f. f \<in> (\<Prod>X \<in> A. X)"
by (fast elim!: first_in_B intro!: lam_type)
lemma ex_choice_fun_Pow: "well_ord(A, R) ==> \<exists>f. f \<in> (\<Prod>X \<in> Pow(A)-{0}. X)"
by (fast elim!: well_ord_subset [THEN ex_choice_fun])
(* ********************************************************************** *)
(* Lemmas needed to state when a finite relation is a function. *)
(* The criteria are cardinalities of the relation and its domain. *)
(* Used in WO6WO1.ML *)
(* ********************************************************************** *)
(*Using AC we could trivially prove, for all u, domain(u) \<lesssim> u*)
lemma lepoll_m_imp_domain_lepoll_m:
"[| m \<in> nat; u \<lesssim> m |] ==> domain(u) \<lesssim> m"
apply (unfold lepoll_def)
apply (erule exE)
apply (rule_tac x = "\<lambda>x \<in> domain(u). \<mu> i. \<exists>y. <x,y> \<in> u & f`<x,y> = i"
in exI)
apply (rule_tac d = "%y. fst (converse(f) ` y) " in lam_injective)
apply (fast intro: LeastI2 nat_into_Ord [THEN Ord_in_Ord]
inj_is_fun [THEN apply_type])
apply (erule domainE)
apply (frule inj_is_fun [THEN apply_type], assumption)
apply (rule LeastI2)
apply (auto elim!: nat_into_Ord [THEN Ord_in_Ord])
done
lemma rel_domain_ex1:
"[| succ(m) \<lesssim> domain(r); r \<lesssim> succ(m); m \<in> nat |] ==> function(r)"
apply (unfold function_def, safe)
apply (rule ccontr)
apply (fast elim!: lepoll_trans [THEN succ_lepoll_natE]
lepoll_m_imp_domain_lepoll_m [OF _ Diff_sing_lepoll]
elim: domain_Diff_eq [OF _ not_sym, THEN subst])
done
lemma rel_is_fun:
"[| succ(m) \<lesssim> domain(r); r \<lesssim> succ(m); m \<in> nat;
r \<subseteq> A*B; A=domain(r) |] ==> r \<in> A->B"
by (simp add: Pi_iff rel_domain_ex1)
end
|
(*
Author: Alexander Katovsky
*)
section "Functor"
theory Functors
imports Category
begin
record ('o1, 'o2, 'm1, 'm2, 'a, 'b) Functor =
CatDom :: "('o1,'m1,'a)Category_scheme"
CatCod :: "('o2,'m2,'b)Category_scheme"
MapM :: "'m1 \<Rightarrow> 'm2"
abbreviation
FunctorMorApp :: "('o1, 'o2, 'm1, 'm2, 'a1, 'a2, 'a) Functor_scheme \<Rightarrow> 'm1 \<Rightarrow> 'm2" (infixr "##" 70) where
"FunctorMorApp F m \<equiv> (MapM F) m"
definition
MapO :: "('o1, 'o2, 'm1, 'm2, 'a1, 'a2, 'a) Functor_scheme \<Rightarrow> 'o1 \<Rightarrow> 'o2" where
"MapO F X \<equiv> THE Y . Y \<in> Obj(CatCod F) \<and> F ## (Id (CatDom F) X) = Id (CatCod F) Y"
abbreviation
FunctorObjApp :: "('o1, 'o2, 'm1, 'm2, 'a1, 'a2, 'a) Functor_scheme \<Rightarrow> 'o1 \<Rightarrow> 'o2" (infixr "@@" 70) where
"FunctorObjApp F X \<equiv> (MapO F X)"
locale PreFunctor =
fixes F :: "('o1, 'o2, 'm1, 'm2, 'a1, 'a2, 'a) Functor_scheme" (structure)
assumes FunctorComp: "f \<approx>>\<^bsub>CatDom F\<^esub> g \<Longrightarrow> F ## (f ;;\<^bsub>CatDom F\<^esub> g) = (F ## f) ;;\<^bsub>CatCod F\<^esub> (F ## g)"
and FunctorId: "X \<in> obj\<^bsub>CatDom F\<^esub> \<Longrightarrow> \<exists> Y \<in> obj\<^bsub>CatCod F\<^esub> . F ## (id\<^bsub>CatDom F\<^esub> X) = id\<^bsub>CatCod F\<^esub> Y"
and CatDom[simp]: "Category(CatDom F)"
and CatCod[simp]: "Category(CatCod F)"
locale FunctorM = PreFunctor +
assumes FunctorCompM: "f maps\<^bsub>CatDom F\<^esub> X to Y \<Longrightarrow> (F ## f) maps\<^bsub>CatCod F\<^esub> (F @@ X) to (F @@ Y)"
locale FunctorExt =
fixes F :: "('o1, 'o2, 'm1, 'm2, 'a1, 'a2, 'a) Functor_scheme" (structure)
assumes FunctorMapExt: "(MapM F) \<in> extensional (Mor (CatDom F))"
locale Functor = FunctorM + FunctorExt
definition
MakeFtor :: "('o1, 'o2, 'm1, 'm2, 'a, 'b,'r) Functor_scheme \<Rightarrow> ('o1, 'o2, 'm1, 'm2, 'a, 'b,'r) Functor_scheme" where
"MakeFtor F \<equiv> \<lparr>
CatDom = CatDom F ,
CatCod = CatCod F ,
MapM = restrict (MapM F) (Mor (CatDom F)) ,
\<dots> = Functor.more F
\<rparr>"
lemma PreFunctorFunctor[simp]: "Functor F \<Longrightarrow> PreFunctor F"
by (simp add: Functor_def FunctorM_def)
lemmas functor_simps = PreFunctor.FunctorComp PreFunctor.FunctorId
definition
functor_abbrev ("Ftor _ : _ \<longrightarrow> _" [81]) where
"Ftor F : A \<longrightarrow> B \<equiv> (Functor F) \<and> (CatDom F = A) \<and> (CatCod F = B)"
lemma functor_abbrevE[elim]: "\<lbrakk>Ftor F : A \<longrightarrow> B ; \<lbrakk>(Functor F) ; (CatDom F = A) ; (CatCod F = B)\<rbrakk> \<Longrightarrow> R\<rbrakk> \<Longrightarrow> R"
by (auto simp add: functor_abbrev_def)
definition
functor_comp_def ("_ \<approx>>;;; _" [81]) where
"functor_comp_def F G \<equiv> (Functor F) \<and> (Functor G) \<and> (CatDom G = CatCod F)"
lemma functor_comp_def[elim]: "\<lbrakk>F \<approx>>;;; G ; \<lbrakk>Functor F ; Functor G ; CatDom G = CatCod F\<rbrakk> \<Longrightarrow> R\<rbrakk> \<Longrightarrow> R"
by (auto simp add: functor_comp_def_def)
lemma (in Functor) FunctorMapsTo:
assumes "f \<in> mor\<^bsub>CatDom F\<^esub>"
shows "F ## f maps\<^bsub>CatCod F\<^esub> (F @@ (dom\<^bsub>CatDom F\<^esub> f)) to (F @@ (cod\<^bsub>CatDom F\<^esub> f))"
proof-
have "f maps\<^bsub>CatDom F\<^esub> (dom\<^bsub>CatDom F\<^esub> f) to (cod\<^bsub>CatDom F\<^esub> f)" using assms by auto
thus ?thesis by (simp add: FunctorCompM[of f "dom\<^bsub>CatDom F\<^esub> f" "cod\<^bsub>CatDom F\<^esub> f"])
qed
lemma (in Functor) FunctorCodDom:
assumes "f \<in> mor\<^bsub>CatDom F\<^esub>"
shows "dom\<^bsub>CatCod F\<^esub>(F ## f) = F @@ (dom\<^bsub>CatDom F\<^esub> f)" and "cod\<^bsub>CatCod F\<^esub>(F ## f) = F @@ (cod\<^bsub>CatDom F\<^esub> f)"
proof-
have "F ## f maps\<^bsub>CatCod F\<^esub> (F @@ (dom\<^bsub>CatDom F\<^esub> f)) to (F @@ (cod\<^bsub>CatDom F\<^esub> f))" using assms by (simp add: FunctorMapsTo)
thus "dom\<^bsub>CatCod F\<^esub>(F ## f) = F @@ (dom\<^bsub>CatDom F\<^esub> f)" and "cod\<^bsub>CatCod F\<^esub>(F ## f) = F @@ (cod\<^bsub>CatDom F\<^esub> f)"
by auto
qed
lemma (in Functor) FunctorCompPreserved: "f \<in> mor\<^bsub>CatDom F\<^esub> \<Longrightarrow> F ## f \<in> mor\<^bsub>CatCod F\<^esub>"
by (auto dest:FunctorMapsTo)
lemma (in Functor) FunctorCompDef:
assumes "f \<approx>>\<^bsub>CatDom F\<^esub> g" shows "(F ## f) \<approx>>\<^bsub>CatCod F\<^esub> (F ## g)"
proof(auto simp add: CompDefined_def)
show "F ## f \<in> mor\<^bsub>CatCod F\<^esub>" and "F ## g \<in> mor\<^bsub>CatCod F\<^esub>" using assms by (auto simp add: FunctorCompPreserved)
have "f \<in> mor\<^bsub>CatDom F\<^esub>" and "g \<in> mor\<^bsub>CatDom F\<^esub>" using assms by auto
hence a: "cod\<^bsub>CatCod F\<^esub> (F ## f) = F @@ (cod\<^bsub>CatDom F\<^esub> f)" and b: "dom\<^bsub>CatCod F\<^esub>(F ## g) = F @@ (dom\<^bsub>CatDom F\<^esub> g)"
by (simp add: FunctorCodDom)+
have "cod\<^bsub>CatCod F\<^esub> (F ## f) = F @@ (dom\<^bsub>CatDom F\<^esub> g)" using assms a by auto
also have "... = dom\<^bsub>CatCod F\<^esub> (F ## g)" using b by simp
finally show "cod\<^bsub>CatCod F\<^esub> (F ## f) = dom\<^bsub>CatCod F\<^esub> (F ## g)" .
qed
lemma FunctorComp: "\<lbrakk>Ftor F : A \<longrightarrow> B ; f \<approx>>\<^bsub>A\<^esub> g\<rbrakk> \<Longrightarrow> F ## (f ;;\<^bsub>A\<^esub> g) = (F ## f) ;;\<^bsub>B\<^esub> (F ## g)"
by (auto simp add: PreFunctor.FunctorComp)
lemma FunctorCompDef: "\<lbrakk>Ftor F : A \<longrightarrow> B ; f \<approx>>\<^bsub>A\<^esub> g\<rbrakk> \<Longrightarrow> (F ## f) \<approx>>\<^bsub>B\<^esub> (F ## g)"
by (auto simp add: Functor.FunctorCompDef)
lemma FunctorMapsTo:
assumes "Ftor F : A \<longrightarrow> B" and "f maps\<^bsub>A\<^esub> X to Y"
shows "(F ## f) maps\<^bsub>B\<^esub> (F @@ X) to (F @@ Y)"
proof-
have b: "CatCod F = B" and a: "CatDom F = A" and ff: "Functor F" using assms by auto
have df: "(dom\<^bsub>CatDom F\<^esub> f) = X" and cf: "(cod\<^bsub>CatDom F\<^esub> f) = Y" using a assms by auto
have "f \<in> mor\<^bsub>CatDom F\<^esub>" using assms by auto
hence "F ## f maps\<^bsub>CatCod F\<^esub> (F @@ (dom\<^bsub>CatDom F\<^esub> f)) to (F @@ (cod\<^bsub>CatDom F\<^esub> f))" using ff
by (simp add: Functor.FunctorMapsTo)
thus ?thesis using df cf b by simp
qed
lemma (in PreFunctor) FunctorId2:
assumes "X \<in> obj\<^bsub>CatDom F\<^esub>"
shows "F @@ X \<in> obj\<^bsub>CatCod F\<^esub> \<and> F ## (id\<^bsub>CatDom F\<^esub> X) = id\<^bsub>CatCod F\<^esub> (F @@ X)"
proof-
let ?Q = "\<lambda> E Y . Y \<in> obj\<^bsub>CatCod F\<^esub> \<and> E = id\<^bsub>CatCod F\<^esub> Y"
let ?P = "?Q (F ## (id\<^bsub>CatDom F\<^esub> X))"
from assms FunctorId obtain Y where "?P Y" by auto
moreover {
fix y e z have "\<lbrakk>?Q e y ; ?Q e z\<rbrakk> \<Longrightarrow> y = z"
by (auto intro: Category.IdInj[of "CatCod F" y z])
}
ultimately have "\<exists>! Z . ?P Z" by auto
hence "?P (THE Y . ?P Y)" by (rule theI')
thus ?thesis by (auto simp add: MapO_def)
qed
lemma FunctorId:
assumes "Ftor F : C \<longrightarrow> D" and "X \<in> Obj C"
shows "F ## (Id C X) = Id D (F @@ X)"
proof-
have "CatDom F = C" and "CatCod F = D" and "PreFunctor F" using assms by auto
thus ?thesis using assms PreFunctor.FunctorId2[of F X] by simp
qed
lemma (in Functor) DomFunctor: "f \<in> mor\<^bsub>CatDom F\<^esub> \<Longrightarrow> dom\<^bsub>CatCod F\<^esub> (F ## f) = F @@ (dom\<^bsub>CatDom F\<^esub> f)"
by (simp add: FunctorCodDom)
lemma (in Functor) FunctorId3Dom:
assumes "f \<in> mor\<^bsub>CatDom F\<^esub>"
shows "F ## (id\<^bsub>CatDom F\<^esub> (dom\<^bsub>CatDom F\<^esub> f)) = id\<^bsub>CatCod F\<^esub> (dom\<^bsub>CatCod F\<^esub> (F ## f))"
proof-
have "(dom\<^bsub>CatDom F\<^esub> f) \<in> obj\<^bsub>CatDom F\<^esub>" using assms by (simp add: Category.Cdom)
hence "F ## (id\<^bsub>CatDom F\<^esub> (dom\<^bsub>CatDom F\<^esub> f)) = id\<^bsub>CatCod F\<^esub> (F @@ (dom\<^bsub>CatDom F\<^esub> f))" by (simp add: FunctorId2)
also have "... = id\<^bsub>CatCod F\<^esub> (dom\<^bsub>CatCod F\<^esub> (F ## f))" using assms by (simp add: DomFunctor)
finally show ?thesis by simp
qed
lemma (in Functor) FunctorId3Cod:
assumes "f \<in> mor\<^bsub>CatDom F\<^esub>"
shows "F ## (id\<^bsub>CatDom F\<^esub> (cod\<^bsub>CatDom F\<^esub> f)) = id\<^bsub>CatCod F\<^esub> (cod\<^bsub>CatCod F\<^esub> (F ## f))"
proof-
have "(cod\<^bsub>CatDom F\<^esub> f) \<in> obj\<^bsub>CatDom F\<^esub>" using assms by (simp add: Category.Ccod)
hence "F ## (id\<^bsub>CatDom F\<^esub> (cod\<^bsub>CatDom F\<^esub> f)) = id\<^bsub>CatCod F\<^esub> (F @@ (cod\<^bsub>CatDom F\<^esub> f))" by (simp add: FunctorId2)
also have "... = id\<^bsub>CatCod F\<^esub> (cod\<^bsub>CatCod F\<^esub> (F ## f))" using assms by (simp add: CodFunctor)
finally show ?thesis by simp
qed
lemma (in PreFunctor) FmToFo: "\<lbrakk>X \<in> obj\<^bsub>CatDom F\<^esub> ; Y \<in> obj\<^bsub>CatCod F\<^esub> ; F ## (id\<^bsub>CatDom F\<^esub> X) = id\<^bsub>CatCod F\<^esub> Y\<rbrakk> \<Longrightarrow> F @@ X = Y"
by (auto simp add: FunctorId2 intro: Category.IdInj[of "CatCod F" "F @@ X" Y])
lemma MakeFtorPreFtor:
assumes "PreFunctor F" shows "PreFunctor (MakeFtor F)"
proof-
{
fix X assume a: "X \<in> obj\<^bsub>CatDom F\<^esub>" have "id\<^bsub>CatDom F \<^esub>X \<in> mor\<^bsub>CatDom F\<^esub>"
proof-
have "Category (CatDom F)" using assms by (simp add: PreFunctor_def)
hence "id\<^bsub>CatDom F \<^esub>X maps\<^bsub>CatDom F\<^esub> X to X" using a by (simp add: Category.Cidm)
thus ?thesis using a by (auto)
qed
}
thus "PreFunctor (MakeFtor F)" using assms
by(auto simp add: PreFunctor_def MakeFtor_def Category.MapsToMorDomCod)
qed
lemma MakeFtorMor: "f \<in> mor\<^bsub>CatDom F\<^esub> \<Longrightarrow> MakeFtor F ## f = F ## f"
by(simp add: MakeFtor_def)
lemma MakeFtorObj:
assumes "PreFunctor F" and "X \<in> obj\<^bsub>CatDom F\<^esub>"
shows "MakeFtor F @@ X = F @@ X"
proof-
have "X \<in> obj\<^bsub>CatDom (MakeFtor F)\<^esub>" using assms(2) by (simp add: MakeFtor_def)
moreover have "(F @@ X) \<in> obj\<^bsub>CatCod (MakeFtor F)\<^esub>" using assms by (simp add: PreFunctor.FunctorId2 MakeFtor_def)
moreover have "MakeFtor F ## id\<^bsub>CatDom (MakeFtor F) \<^esub>X = id\<^bsub>CatCod (MakeFtor F) \<^esub>(F @@ X)"
proof-
have "Category (CatDom F)" using assms(1) by (simp add: PreFunctor_def)
hence "id\<^bsub>CatDom F \<^esub>X maps\<^bsub>CatDom F\<^esub> X to X" using assms(2) by (auto simp add: Category.Cidm)
hence "id\<^bsub>CatDom F \<^esub>X \<in> mor\<^bsub>CatDom F\<^esub>" by auto
hence "MakeFtor F ## id\<^bsub>CatDom (MakeFtor F) \<^esub>X = F ## id\<^bsub>CatDom F \<^esub>X" by (simp add: MakeFtor_def)
also have "... = id\<^bsub>CatCod F \<^esub>(F @@ X)" using assms by (simp add: PreFunctor.FunctorId2)
finally show ?thesis by (simp add: MakeFtor_def)
qed
moreover have "PreFunctor (MakeFtor F)" using assms(1) by (simp add: MakeFtorPreFtor)
ultimately show ?thesis by (simp add: PreFunctor.FmToFo)
qed
lemma MakeFtor: assumes "FunctorM F" shows "Functor (MakeFtor F)"
proof(intro_locales)
show "PreFunctor (MakeFtor F)" using assms by (simp add: MakeFtorPreFtor FunctorM_def)
show "FunctorM_axioms (MakeFtor F)"
proof(auto simp add: FunctorM_axioms_def)
{
fix f X Y assume aa: "f maps\<^bsub>CatDom (MakeFtor F)\<^esub> X to Y"
show "((MakeFtor F) ## f) maps\<^bsub>CatCod (MakeFtor F)\<^esub> ((MakeFtor F) @@ X) to ((MakeFtor F) @@ Y)"
proof-
have "((MakeFtor F) ## f) = F ## f" using aa by (auto simp add: MakeFtor_def)
moreover have "((MakeFtor F) @@ X) = F @@ X" and "((MakeFtor F) @@ Y) = F @@ Y"
proof-
have "Category (CatDom F)" using assms by (simp add: FunctorM_def PreFunctor_def)
hence "X \<in> obj\<^bsub>CatDom F\<^esub>" and "Y \<in> obj\<^bsub>CatDom F\<^esub>"
using aa by (auto simp add: Category.MapsToObj MakeFtor_def)
moreover have "PreFunctor F" using assms(1) by (simp add: FunctorM_def)
ultimately show "((MakeFtor F) @@ X) = F @@ X" and "((MakeFtor F) @@ Y) = F @@ Y"
by (simp add: MakeFtorObj)+
qed
moreover have "F ## f maps\<^bsub>CatCod F\<^esub> (F @@ X) to (F @@ Y)" using assms(1) aa
by (simp add: FunctorM.FunctorCompM MakeFtor_def)
ultimately show ?thesis by (simp add: MakeFtor_def)
qed
}
qed
show "FunctorExt (MakeFtor F)" by(simp add: FunctorExt_def MakeFtor_def)
qed
definition
IdentityFunctor' :: "('o,'m,'a) Category_scheme \<Rightarrow> ('o,'o,'m,'m,'a,'a) Functor" ("FId'' _" [70]) where
"IdentityFunctor' C \<equiv> \<lparr>CatDom = C , CatCod = C , MapM = (\<lambda> f . f) \<rparr>"
definition
IdentityFunctor ("FId _" [70]) where
"IdentityFunctor C \<equiv> MakeFtor(IdentityFunctor' C)"
lemma IdFtor'PreFunctor: "Category C \<Longrightarrow> PreFunctor (FId' C)"
by(auto simp add: PreFunctor_def IdentityFunctor'_def)
lemma IdFtor'Obj:
assumes "Category C" and "X \<in> obj\<^bsub>CatDom (FId' C)\<^esub>"
shows "(FId' C) @@ X = X"
proof-
have "(FId' C) ## id\<^bsub>CatDom (FId' C)\<^esub> X = id\<^bsub>CatCod (FId' C)\<^esub> X" by(simp add: IdentityFunctor'_def)
moreover have "X \<in> obj\<^bsub>CatCod (FId' C)\<^esub>" using assms by (simp add: IdentityFunctor'_def)
ultimately show ?thesis using assms by (simp add: PreFunctor.FmToFo IdFtor'PreFunctor)
qed
lemma IdFtor'FtorM:
assumes "Category C" shows "FunctorM (FId' C)"
proof(auto simp add: FunctorM_def IdFtor'PreFunctor assms FunctorM_axioms_def)
{
fix f X Y assume a: "f maps\<^bsub>CatDom (FId' C)\<^esub> X to Y"
show "((FId' C) ## f) maps\<^bsub>CatCod (FId' C)\<^esub> ((FId' C) @@ X) to ((FId' C) @@ Y)"
proof-
have "X \<in> obj\<^bsub>CatDom (FId' C)\<^esub>" and "Y \<in> obj\<^bsub>CatDom (FId' C)\<^esub>"
using a assms by (simp add: Category.MapsToObj IdentityFunctor'_def)+
moreover have "(FId' C) ## f = f" and "CatDom (FId' C) = CatCod (FId' C)" by (simp add: IdentityFunctor'_def)+
ultimately show ?thesis using assms a by(simp add: IdFtor'Obj)
qed
}
qed
lemma IdFtorFtor: "Category C \<Longrightarrow> Functor (FId C)"
by (auto simp add: IdentityFunctor_def IdFtor'FtorM intro: MakeFtor)
definition
ConstFunctor' :: "('o1,'m1,'a) Category_scheme \<Rightarrow>
('o2,'m2,'b) Category_scheme \<Rightarrow> 'o2 \<Rightarrow> ('o1,'o2,'m1,'m2,'a,'b) Functor" where
"ConstFunctor' A B b \<equiv> \<lparr>
CatDom = A ,
CatCod = B ,
MapM = (\<lambda> f . (Id B) b)
\<rparr>"
definition "ConstFunctor A B b \<equiv> MakeFtor(ConstFunctor' A B b)"
lemma ConstFtor' :
assumes "Category A" "Category B" "b \<in> (Obj B)"
shows "PreFunctor (ConstFunctor' A B b)"
and "FunctorM (ConstFunctor' A B b)"
proof-
show "PreFunctor (ConstFunctor' A B b)" using assms
apply (subst PreFunctor_def)
apply (rule conjI)+
by (auto simp add: ConstFunctor'_def Category.Simps Category.CatIdCompId)
moreover
{
fix X assume "X \<in> obj\<^bsub>A\<^esub>" "b \<in> obj\<^bsub>B\<^esub>" "PreFunctor (ConstFunctor' A B b)"
hence "(ConstFunctor' A B b) @@ X = b"
by (auto simp add: ConstFunctor'_def PreFunctor.FmToFo Category.Simps)
}
ultimately show "FunctorM (ConstFunctor' A B b)" using assms
by (intro_locales, auto simp add: ConstFunctor'_def Category.Simps FunctorM_axioms_def)
qed
lemma ConstFtor:
assumes "Category A" "Category B" "b \<in> (Obj B)"
shows "Functor (ConstFunctor A B b)"
by (auto simp add: assms ConstFtor' ConstFunctor_def MakeFtor)
definition
UnitFunctor :: "('o,'m,'a) Category_scheme \<Rightarrow> ('o,unit,'m,unit,'a,unit) Functor" where
"UnitFunctor C \<equiv> ConstFunctor C UnitCategory ()"
lemma UnitFtor:
assumes "Category C"
shows "Functor(UnitFunctor C)"
proof-
have "() \<in> obj\<^bsub>UnitCategory\<^esub>" by (simp add: UnitCategory_def MakeCatObj)
hence "Functor(ConstFunctor C UnitCategory ())" using assms
by (simp add: ConstFtor)
thus ?thesis by (simp add: UnitFunctor_def)
qed
definition
FunctorComp' :: "('o1,'o2,'m1,'m2,'a1,'a2) Functor \<Rightarrow> ('o2,'o3,'m2,'m3,'b1,'b2) Functor
\<Rightarrow> ('o1,'o3,'m1,'m3,'a1,'b2) Functor" (infixl ";;:" 71) where
"FunctorComp' F G \<equiv> \<lparr>
CatDom = CatDom F ,
CatCod = CatCod G ,
MapM = \<lambda> f . (MapM G)((MapM F) f)
\<rparr>"
definition FunctorComp (infixl ";;;" 71) where "FunctorComp F G \<equiv> MakeFtor (FunctorComp' F G)"
lemma FtorCompComp':
assumes "f \<approx>>\<^bsub>CatDom F\<^esub> g"
and "F \<approx>>;;; G"
shows "G ## (F ## (f ;;\<^bsub>CatDom F\<^esub> g)) = (G ## (F ## f)) ;;\<^bsub>CatCod G\<^esub> (G ## (F ## g))"
proof-
have [simp]: "PreFunctor G \<and> PreFunctor F" using assms by auto
have [simp]: "(F ## f) \<approx>>\<^bsub>CatDom G\<^esub> (F ## g)" using assms by (auto simp add: Functor.FunctorCompDef[of F f g])
have "F ## (f ;;\<^bsub>CatDom F\<^esub> g) = (F ## f) ;;\<^bsub>CatCod F\<^esub> (F ## g)" using assms
by (auto simp add: PreFunctor.FunctorComp)
hence "G ## (F ## (f ;;\<^bsub>CatDom F\<^esub> g)) = G ## ((F ## f) ;;\<^bsub>CatCod F\<^esub> (F ## g))" by simp
also have "... = G ## ((F ## f) ;;\<^bsub>CatDom G\<^esub> (F ## g))" using assms by auto
also have "... = (G ## (F ## f)) ;;\<^bsub>CatCod G\<^esub> (G ## (F ## g))"
by (simp add: PreFunctor.FunctorComp[of G "(F ## f)" "(F ## g)"])
finally show ?thesis by simp
qed
lemma FtorCompId:
assumes a: "X \<in> (Obj (CatDom F))"
and "F \<approx>>;;; G"
shows "G ## (F ## (id\<^bsub>CatDom F \<^esub>X)) = id\<^bsub>CatCod G\<^esub>(G @@ (F @@ X)) \<and> G @@ (F @@ X) \<in> (Obj (CatCod G))"
proof-
have [simp]: "PreFunctor G \<and> PreFunctor F" using assms by auto
have b: "(F @@ X) \<in> obj\<^bsub>CatDom G\<^esub>" using assms
by (auto simp add: PreFunctor.FunctorId2)
have "G ## F ## (id\<^bsub>CatDom F \<^esub>X) = G ## (id\<^bsub>CatCod F\<^esub>(F @@ X))" using b a
by (simp add: PreFunctor.FunctorId2[of F "X"])
also have "... = G ## (id\<^bsub>CatDom G\<^esub>(F @@ X))" using assms by auto
also have "... = id\<^bsub>CatCod G\<^esub>(G @@ (F @@ X)) \<and> G @@ (F @@ X) \<in> (Obj (CatCod G))" using b
by (simp add: PreFunctor.FunctorId2[of G "(F @@ X)"])
finally show ?thesis by simp
qed
lemma FtorCompIdDef:
assumes a: "X \<in> (Obj (CatDom F))" and b: "PreFunctor (F ;;: G)"
and "F \<approx>>;;; G"
shows "(F ;;: G) @@ X = (G @@ (F @@ X))"
proof-
have "(F ;;: G) ## (id\<^bsub>CatDom (F ;;: G)\<^esub>(X)) = G ## (F ## (id\<^bsub>CatDom F\<^esub>(X)))" using assms
by (simp add: FunctorComp'_def)
also have "... = id\<^bsub>CatCod G\<^esub>(G @@ (F @@ (X)))" using assms a
by(auto simp add: FtorCompId[of _ F G])
finally have "(F ;;: G) ## (id\<^bsub>CatDom (F ;;: G)\<^esub>(X)) = id\<^bsub>CatCod (F ;;: G)\<^esub>(G @@ (F @@ X))" using assms
by (simp add: FunctorComp'_def)
moreover have "G @@ (F @@ (X)) \<in> (Obj (CatCod (F ;;: G)))" using assms a
by(auto simp add: FtorCompId[of _ F G] FunctorComp'_def)
moreover have "X \<in> obj\<^bsub>CatDom (F ;;: G)\<^esub>" using a by (simp add: FunctorComp'_def)
ultimately show ?thesis using b
by (simp add: PreFunctor.FmToFo[of "F ;;: G" X "G @@ (F @@ X)"])
qed
lemma FunctorCompMapsTo:
assumes "f \<in> mor\<^bsub>CatDom (F ;;: G)\<^esub>" and "F \<approx>>;;; G"
shows "(G ## (F ## f)) maps\<^bsub>CatCod G\<^esub> (G @@ (F @@ (dom\<^bsub>CatDom F\<^esub> f))) to (G @@ (F @@ (cod\<^bsub>CatDom F\<^esub> f)))"
proof-
have "f \<in> mor\<^bsub>CatDom F \<^esub>\<and> Functor F" using assms by (auto simp add: FunctorComp'_def)
hence "(F ## f) maps\<^bsub>CatDom G\<^esub> (F @@ (dom\<^bsub>CatDom F\<^esub> f)) to (F @@ (cod\<^bsub>CatDom F\<^esub> f))" using assms
by (auto simp add: Functor.FunctorMapsTo[of F f])
moreover have "FunctorM G" using assms by (auto simp add: FunctorComp_def Functor_def)
ultimately show ?thesis by(simp add: FunctorM.FunctorCompM[of G "F ## f" "F @@ (dom\<^bsub>CatDom F\<^esub> f)" "F @@ (cod\<^bsub>CatDom F\<^esub> f)"])
qed
lemma FunctorCompMapsTo2:
assumes "f \<in> mor\<^bsub>CatDom (F ;;: G)\<^esub>"
and "F \<approx>>;;; G"
and "PreFunctor (F ;;: G)"
shows "((F ;;: G) ## f) maps\<^bsub>CatCod (F ;;: G)\<^esub> ((F ;;: G) @@ (dom\<^bsub>CatDom (F ;;: G)\<^esub> f)) to
((F ;;: G) @@ (cod\<^bsub>CatDom (F ;;: G)\<^esub> f))"
proof-
have "Category (CatDom (F ;;: G))" using assms by (simp add: PreFunctor_def)
hence 1: "(dom\<^bsub>CatDom (F ;;: G)\<^esub> f) \<in> obj\<^bsub>CatDom F \<^esub>\<and> (cod\<^bsub>CatDom (F ;;: G)\<^esub> f) \<in> obj\<^bsub>CatDom F\<^esub>" using assms
by (auto simp add: Category.Simps FunctorComp'_def)
have "(G ## (F ## f)) maps\<^bsub>CatCod G\<^esub> (G @@ (F @@ (dom\<^bsub>CatDom F\<^esub> f))) to (G @@ (F @@ (cod\<^bsub>CatDom F\<^esub> f)))"
using assms by (auto simp add: FunctorCompMapsTo[of f F G])
moreover have "CatDom F = CatDom(F ;;: G) \<and> CatCod G = CatCod(F ;;: G) \<and> (G ## (F ## f)) = ((F ;;: G) ## f)" using assms
by (simp add: FunctorComp'_def)
moreover have "(F ;;: G) @@ (dom\<^bsub>CatDom (F ;;: G)\<^esub> f) = (G @@ (F @@ (dom\<^bsub>CatDom(F ;;: G)\<^esub> f))) \<and>
(F ;;: G) @@ (cod\<^bsub>CatDom (F ;;: G)\<^esub> f) = (G @@ (F @@ (cod\<^bsub>CatDom(F ;;: G)\<^esub> f)))"
by (auto simp add: FtorCompIdDef[of _ F G] 1 assms)
ultimately show ?thesis by auto
qed
lemma FunctorCompMapsTo3:
assumes "f maps\<^bsub>CatDom (F ;;: G)\<^esub> X to Y"
and "F \<approx>>;;; G"
and "PreFunctor (F ;;: G)"
shows "F ;;: G ## f maps\<^bsub>CatCod (F ;;: G)\<^esub> F ;;: G @@ X to F ;;: G @@ Y"
proof-
have "f \<in> mor\<^bsub>CatDom (F ;;: G)\<^esub>"
and "dom\<^bsub>CatDom (F ;;: G)\<^esub> f = X"
and "cod\<^bsub>CatDom (F ;;: G)\<^esub> f = Y" using assms by auto
thus ?thesis using assms by (auto intro: FunctorCompMapsTo2)
qed
lemma FtorCompPreFtor:
assumes "F \<approx>>;;; G"
shows "PreFunctor (F ;;: G)"
proof-
have 1: "PreFunctor G \<and> PreFunctor F" using assms by auto
show "PreFunctor (F ;;: G)" using assms
proof(auto simp add: PreFunctor_def FunctorComp'_def Category.Simps
FtorCompId[of _ F G] intro:FtorCompComp')
show "Category (CatDom F)" and "Category (CatCod G)" using assms 1 by (auto simp add: PreFunctor_def)
qed
qed
lemma FtorCompM :
assumes "F \<approx>>;;; G"
shows "FunctorM (F ;;: G)"
proof(auto simp only: FunctorM_def)
show 1: "PreFunctor (F ;;: G)" using assms by (rule FtorCompPreFtor)
{
fix X Y f assume a: "f maps\<^bsub>CatDom (F ;;: G)\<^esub> X to Y"
have "F ;;: G ## f maps\<^bsub>CatCod (F ;;: G)\<^esub> F ;;: G @@ X to F ;;: G @@ Y"
using a assms 1 by (rule FunctorCompMapsTo3)
}
thus "FunctorM_axioms (F ;;: G)"
by(auto simp add: 1 FunctorM_axioms_def)
qed
lemma FtorComp:
assumes "F \<approx>>;;; G"
shows "Functor (F ;;; G)"
proof-
have "FunctorM (F ;;: G)" using assms by (rule FtorCompM)
thus ?thesis by (simp add: FunctorComp_def MakeFtor)
qed
lemma (in Functor) FunctorPreservesIso:
assumes "ciso\<^bsub>CatDom F\<^esub> k"
shows "ciso\<^bsub>CatCod F\<^esub> (F ## k)"
proof-
have [simp]: "k \<in> mor\<^bsub>CatDom F\<^esub>" using assms by (simp add: Category.IsoIsMor)
have "cinv\<^bsub>CatCod F\<^esub> (F ## k) (F ## (Cinv\<^bsub>CatDom F\<^esub> k))"
proof(rule Category.Inverse_relI)
show "Category (CatCod F)" by simp
show "(F ## k) \<approx>>\<^bsub>CatCod F\<^esub> (F ## (Cinv\<^bsub>CatDom F\<^esub> k))"
by (rule FunctorCompDef, simp add: Category.IsoCompInv assms)
show "(F ## k) ;;\<^bsub>CatCod F\<^esub> (F ## (Cinv\<^bsub>CatDom F\<^esub> k)) = id\<^bsub>CatCod F\<^esub> (dom\<^bsub>CatCod F\<^esub> (F ## k))"
proof-
have "(F ## k) ;;\<^bsub>CatCod F\<^esub> (F ## (Cinv\<^bsub>CatDom F\<^esub> k)) = F ## (k ;;\<^bsub>CatDom F\<^esub> (Cinv\<^bsub>CatDom F\<^esub> k))" using assms
by(auto simp add: FunctorComp Category.IsoCompInv)
also have "... = F ## (id\<^bsub>CatDom F\<^esub> (dom\<^bsub>CatDom F\<^esub> k))" using assms by (simp add: Category.IsoInvId2)
also have "... = id\<^bsub>CatCod F\<^esub> (dom\<^bsub>CatCod F\<^esub> (F ## k))" by (simp add: FunctorId3Dom)
finally show ?thesis by simp
qed
show "(F ## (Cinv\<^bsub>CatDom F\<^esub> k)) ;;\<^bsub>CatCod F\<^esub> (F ## k) = id\<^bsub>CatCod F\<^esub> (cod\<^bsub>CatCod F\<^esub> (F ## k))"
proof-
have "(F ## (Cinv\<^bsub>CatDom F\<^esub> k)) ;;\<^bsub>CatCod F\<^esub> (F ## k) = F ## ((Cinv\<^bsub>CatDom F\<^esub> k) ;;\<^bsub>CatDom F\<^esub> k)" using assms
by(auto simp add: FunctorComp Category.InvCompIso)
also have "... = F ## (id\<^bsub>CatDom F\<^esub> (cod\<^bsub>CatDom F\<^esub> k))" using assms by (simp add: Category.IsoInvId1)
also have "... = id\<^bsub>CatCod F\<^esub> (cod\<^bsub>CatCod F\<^esub> (F ## k))" using assms by (simp add: FunctorId3Cod)
finally show ?thesis by simp
qed
qed
thus ?thesis by(auto simp add: isomorphism_def)
qed
declare PreFunctor.CatDom[simp] PreFunctor.CatCod [simp]
lemma FunctorMFunctor[simp]: "Functor F \<Longrightarrow> FunctorM F"
by (simp add: Functor_def)
locale Equivalence = Functor +
assumes Full: "\<lbrakk>A \<in> Obj (CatDom F) ; B \<in> Obj (CatDom F) ;
h maps\<^bsub>CatCod F\<^esub> (F @@ A) to (F @@ B)\<rbrakk> \<Longrightarrow>
\<exists> f . (f maps\<^bsub>CatDom F\<^esub> A to B) \<and> (F ## f = h)"
and Faithful: "\<lbrakk>f maps\<^bsub>CatDom F\<^esub> A to B ; g maps\<^bsub>CatDom F\<^esub> A to B ; F ## f = F ## g\<rbrakk> \<Longrightarrow> f = g"
and IsoDense: "C \<in> Obj (CatCod F) \<Longrightarrow> \<exists> A \<in> Obj (CatDom F) . ObjIso (CatCod F) (F @@ A) C"
end
|
{-
Pointed structure: X ↦ X
-}
{-# OPTIONS --cubical --no-import-sorts --safe #-}
module Cubical.Structures.Relational.Pointed where
open import Cubical.Foundations.Prelude
open import Cubical.Foundations.Equiv
open import Cubical.Foundations.Isomorphism
open import Cubical.Foundations.Structure
open import Cubical.Foundations.RelationalStructure
open import Cubical.Foundations.Univalence
open import Cubical.Relation.ZigZag.Base
open import Cubical.HITs.SetQuotients
open import Cubical.HITs.PropositionalTruncation
open import Cubical.Structures.Pointed
private
variable
ℓ : Level
-- Structured relations
PointedRelStr : StrRel PointedStructure ℓ
PointedRelStr R = R
pointedSuitableRel : SuitableStrRel {ℓ = ℓ} PointedStructure PointedRelStr
pointedSuitableRel .quo _ _ _ = isContrSingl _
pointedSuitableRel .symmetric _ r = r
pointedSuitableRel .transitive _ _ r r' = ∣ _ , r , r' ∣
pointedSuitableRel .set setX = setX
pointedSuitableRel .prop propR = propR
pointedRelMatchesEquiv : StrRelMatchesEquiv {ℓ = ℓ} PointedRelStr PointedEquivStr
pointedRelMatchesEquiv _ _ _ = idEquiv _
pointedRelAction : StrRelAction {ℓ = ℓ} PointedRelStr
pointedRelAction .actStr f = f
pointedRelAction .actStrId _ = refl
pointedRelAction .actRel α = α
pointedPositiveRel : PositiveStrRel {ℓ = ℓ} pointedSuitableRel
pointedPositiveRel .act = pointedRelAction
pointedPositiveRel .reflexive x = ∣ refl ∣
pointedPositiveRel .detransitive R R' rr' = rr'
pointedPositiveRel .quo R = isoToIsEquiv isom
where
open Iso
isom : Iso _ _
isom .fun = _
isom .inv q = q
isom .rightInv = elimProp (λ _ → squash/ _ _) (λ _ → refl)
isom .leftInv = elimProp (λ _ → squash/ _ _) (λ _ → refl)
|
Between 1985 and 1994 , an extensive array of moored and drifting buoys was deployed across the equatorial Pacific Ocean designed to help monitor and predict the El Niño phenomenon . Hurricane Katrina capsized a 10 m ( 33 ft ) buoy for the first time in the history of the National Data Buoy Center ( <unk> ) on August 28 , 2005 . On June 13 , 2006 , drifting buoy <unk> ended its long @-@ term data collection of sea surface temperature after transmitting for 10 years , 4 months , and 16 days , which is the longest known data collection time for any drifting buoy . The first weather buoy in the Southern Ocean was deployed by the Integrated Marine Observing System ( <unk> ) on March 17 , 2010 .
|
lemma cnj_add_mult_eq_Re: "z * cnj w + cnj z * w = 2 * Re (z * cnj w)" |
/-
Copyright (c) 2018 Kenny Lau. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Johannes Hölzl, Kenny Lau
-/
import Mathlib.PrePort
import Mathlib.Lean3Lib.init.default
import Mathlib.algebra.module.pi
import Mathlib.algebra.big_operators.basic
import Mathlib.data.set.finite
import Mathlib.group_theory.submonoid.basic
import Mathlib.PostPort
universes u v l v₁ v₂ w u_1 u_2 u_3 u_4 u₁ x
namespace Mathlib
/-!
# Dependent functions with finite support
For a non-dependent version see `data/finsupp.lean`.
-/
namespace dfinsupp
structure pre (ι : Type u) (β : ι → Type v) [(i : ι) → HasZero (β i)] where
to_fun : (i : ι) → β i
pre_support : multiset ι
zero : ∀ (i : ι), i ∈ pre_support ∨ to_fun i = 0
protected instance inhabited_pre (ι : Type u) (β : ι → Type v) [(i : ι) → HasZero (β i)] :
Inhabited (pre ι β) :=
{ default := pre.mk (fun (i : ι) => 0) ∅ sorry }
protected instance pre.setoid (ι : Type u) (β : ι → Type v) [(i : ι) → HasZero (β i)] :
setoid (pre ι β) :=
setoid.mk (fun (x y : pre ι β) => ∀ (i : ι), pre.to_fun x i = pre.to_fun y i) sorry
end dfinsupp
/-- A dependent function `Π i, β i` with finite support. -/
def dfinsupp {ι : Type u} (β : ι → Type v) [(i : ι) → HasZero (β i)] := quotient sorry
infixl:25 " →ₚ " => Mathlib.dfinsupp
namespace dfinsupp
protected instance has_coe_to_fun {ι : Type u} {β : ι → Type v} [(i : ι) → HasZero (β i)] :
has_coe_to_fun (dfinsupp fun (i : ι) => β i) :=
has_coe_to_fun.mk (fun (_x : dfinsupp fun (i : ι) => β i) => (i : ι) → β i)
fun (f : dfinsupp fun (i : ι) => β i) => quotient.lift_on f pre.to_fun sorry
protected instance has_zero {ι : Type u} {β : ι → Type v} [(i : ι) → HasZero (β i)] :
HasZero (dfinsupp fun (i : ι) => β i) :=
{ zero := quotient.mk (pre.mk (fun (i : ι) => 0) ∅ sorry) }
protected instance inhabited {ι : Type u} {β : ι → Type v} [(i : ι) → HasZero (β i)] :
Inhabited (dfinsupp fun (i : ι) => β i) :=
{ default := 0 }
@[simp] theorem zero_apply {ι : Type u} {β : ι → Type v} [(i : ι) → HasZero (β i)] (i : ι) :
coe_fn 0 i = 0 :=
rfl
theorem ext {ι : Type u} {β : ι → Type v} [(i : ι) → HasZero (β i)]
{f : dfinsupp fun (i : ι) => β i} {g : dfinsupp fun (i : ι) => β i}
(H : ∀ (i : ι), coe_fn f i = coe_fn g i) : f = g :=
sorry
/-- The composition of `f : β₁ → β₂` and `g : Π₀ i, β₁ i` is
`map_range f hf g : Π₀ i, β₂ i`, well defined when `f 0 = 0`. -/
def map_range {ι : Type u} {β₁ : ι → Type v₁} {β₂ : ι → Type v₂} [(i : ι) → HasZero (β₁ i)]
[(i : ι) → HasZero (β₂ i)] (f : (i : ι) → β₁ i → β₂ i) (hf : ∀ (i : ι), f i 0 = 0)
(g : dfinsupp fun (i : ι) => β₁ i) : dfinsupp fun (i : ι) => β₂ i :=
quotient.lift_on g
(fun (x : pre ι fun (i : ι) => β₁ i) =>
quotient.mk (pre.mk (fun (i : ι) => f i (pre.to_fun x i)) (pre.pre_support x) sorry))
sorry
@[simp] theorem map_range_apply {ι : Type u} {β₁ : ι → Type v₁} {β₂ : ι → Type v₂}
[(i : ι) → HasZero (β₁ i)] [(i : ι) → HasZero (β₂ i)] (f : (i : ι) → β₁ i → β₂ i)
(hf : ∀ (i : ι), f i 0 = 0) (g : dfinsupp fun (i : ι) => β₁ i) (i : ι) :
coe_fn (map_range f hf g) i = f i (coe_fn g i) :=
quotient.induction_on g fun (x : pre ι fun (i : ι) => β₁ i) => rfl
/-- Let `f i` be a binary operation `β₁ i → β₂ i → β i` such that `f i 0 0 = 0`.
Then `zip_with f hf` is a binary operation `Π₀ i, β₁ i → Π₀ i, β₂ i → Π₀ i, β i`. -/
def zip_with {ι : Type u} {β : ι → Type v} [(i : ι) → HasZero (β i)] {β₁ : ι → Type v₁}
{β₂ : ι → Type v₂} [(i : ι) → HasZero (β₁ i)] [(i : ι) → HasZero (β₂ i)]
(f : (i : ι) → β₁ i → β₂ i → β i) (hf : ∀ (i : ι), f i 0 0 = 0)
(g₁ : dfinsupp fun (i : ι) => β₁ i) (g₂ : dfinsupp fun (i : ι) => β₂ i) :
dfinsupp fun (i : ι) => β i :=
quotient.lift_on₂ g₁ g₂
(fun (x : pre ι fun (i : ι) => β₁ i) (y : pre ι fun (i : ι) => β₂ i) =>
quotient.mk
(pre.mk (fun (i : ι) => f i (pre.to_fun x i) (pre.to_fun y i))
(pre.pre_support x + pre.pre_support y) sorry))
sorry
@[simp] theorem zip_with_apply {ι : Type u} {β : ι → Type v} [(i : ι) → HasZero (β i)]
{β₁ : ι → Type v₁} {β₂ : ι → Type v₂} [(i : ι) → HasZero (β₁ i)] [(i : ι) → HasZero (β₂ i)]
(f : (i : ι) → β₁ i → β₂ i → β i) (hf : ∀ (i : ι), f i 0 0 = 0)
(g₁ : dfinsupp fun (i : ι) => β₁ i) (g₂ : dfinsupp fun (i : ι) => β₂ i) (i : ι) :
coe_fn (zip_with f hf g₁ g₂) i = f i (coe_fn g₁ i) (coe_fn g₂ i) :=
quotient.induction_on₂ g₁ g₂
fun (_x : pre ι fun (i : ι) => β₁ i) (_x_1 : pre ι fun (i : ι) => β₂ i) => rfl
protected instance has_add {ι : Type u} {β : ι → Type v} [(i : ι) → add_monoid (β i)] :
Add (dfinsupp fun (i : ι) => β i) :=
{ add := zip_with (fun (_x : ι) => Add.add) sorry }
@[simp] theorem add_apply {ι : Type u} {β : ι → Type v} [(i : ι) → add_monoid (β i)]
(g₁ : dfinsupp fun (i : ι) => β i) (g₂ : dfinsupp fun (i : ι) => β i) (i : ι) :
coe_fn (g₁ + g₂) i = coe_fn g₁ i + coe_fn g₂ i :=
zip_with_apply (fun (_x : ι) => Add.add) has_add._proof_1 g₁ g₂ i
protected instance add_monoid {ι : Type u} {β : ι → Type v} [(i : ι) → add_monoid (β i)] :
add_monoid (dfinsupp fun (i : ι) => β i) :=
add_monoid.mk Add.add sorry 0 sorry sorry
protected instance is_add_monoid_hom {ι : Type u} {β : ι → Type v} [(i : ι) → add_monoid (β i)]
{i : ι} : is_add_monoid_hom fun (g : dfinsupp fun (i : ι) => β i) => coe_fn g i :=
is_add_monoid_hom.mk (zero_apply i)
protected instance has_neg {ι : Type u} {β : ι → Type v} [(i : ι) → add_group (β i)] :
Neg (dfinsupp fun (i : ι) => β i) :=
{ neg := fun (f : dfinsupp fun (i : ι) => β i) => map_range (fun (_x : ι) => Neg.neg) sorry f }
protected instance add_comm_monoid {ι : Type u} {β : ι → Type v} [(i : ι) → add_comm_monoid (β i)] :
add_comm_monoid (dfinsupp fun (i : ι) => β i) :=
add_comm_monoid.mk add_monoid.add sorry add_monoid.zero sorry sorry sorry
@[simp] theorem neg_apply {ι : Type u} {β : ι → Type v} [(i : ι) → add_group (β i)]
(g : dfinsupp fun (i : ι) => β i) (i : ι) : coe_fn (-g) i = -coe_fn g i :=
map_range_apply (fun (_x : ι) => Neg.neg) has_neg._proof_1 g i
protected instance add_group {ι : Type u} {β : ι → Type v} [(i : ι) → add_group (β i)] :
add_group (dfinsupp fun (i : ι) => β i) :=
add_group.mk add_monoid.add sorry add_monoid.zero sorry sorry Neg.neg
(sub_neg_monoid.sub._default add_monoid.add sorry add_monoid.zero sorry sorry Neg.neg) sorry
@[simp] theorem sub_apply {ι : Type u} {β : ι → Type v} [(i : ι) → add_group (β i)]
(g₁ : dfinsupp fun (i : ι) => β i) (g₂ : dfinsupp fun (i : ι) => β i) (i : ι) :
coe_fn (g₁ - g₂) i = coe_fn g₁ i - coe_fn g₂ i :=
sorry
protected instance add_comm_group {ι : Type u} {β : ι → Type v} [(i : ι) → add_comm_group (β i)] :
add_comm_group (dfinsupp fun (i : ι) => β i) :=
add_comm_group.mk add_group.add sorry add_group.zero sorry sorry add_group.neg add_group.sub sorry
sorry
/-- Dependent functions with finite support inherit a semiring action from an action on each
coordinate. -/
protected instance has_scalar {ι : Type u} {β : ι → Type v} {γ : Type w} [semiring γ]
[(i : ι) → add_comm_monoid (β i)] [(i : ι) → semimodule γ (β i)] :
has_scalar γ (dfinsupp fun (i : ι) => β i) :=
has_scalar.mk
fun (c : γ) (v : dfinsupp fun (i : ι) => β i) =>
map_range (fun (_x : ι) => has_scalar.smul c) sorry v
@[simp] theorem smul_apply {ι : Type u} {β : ι → Type v} {γ : Type w} [semiring γ]
[(i : ι) → add_comm_monoid (β i)] [(i : ι) → semimodule γ (β i)] (b : γ)
(v : dfinsupp fun (i : ι) => β i) (i : ι) : coe_fn (b • v) i = b • coe_fn v i :=
map_range_apply (fun (_x : ι) => has_scalar.smul b) (has_scalar._proof_1 b) v i
/-- Dependent functions with finite support inherit a semimodule structure from such a structure on
each coordinate. -/
protected instance semimodule {ι : Type u} {β : ι → Type v} {γ : Type w} [semiring γ]
[(i : ι) → add_comm_monoid (β i)] [(i : ι) → semimodule γ (β i)] :
semimodule γ (dfinsupp fun (i : ι) => β i) :=
semimodule.mk sorry sorry
/-- `filter p f` is the function which is `f i` if `p i` is true and 0 otherwise. -/
def filter {ι : Type u} {β : ι → Type v} [(i : ι) → HasZero (β i)] (p : ι → Prop) [decidable_pred p]
(f : dfinsupp fun (i : ι) => β i) : dfinsupp fun (i : ι) => β i :=
quotient.lift_on f
(fun (x : pre ι fun (i : ι) => β i) =>
quotient.mk (pre.mk (fun (i : ι) => ite (p i) (pre.to_fun x i) 0) (pre.pre_support x) sorry))
sorry
@[simp] theorem filter_apply {ι : Type u} {β : ι → Type v} [(i : ι) → HasZero (β i)] (p : ι → Prop)
[decidable_pred p] (i : ι) (f : dfinsupp fun (i : ι) => β i) :
coe_fn (filter p f) i = ite (p i) (coe_fn f i) 0 :=
quotient.induction_on f fun (x : pre ι fun (i : ι) => β i) => rfl
theorem filter_apply_pos {ι : Type u} {β : ι → Type v} [(i : ι) → HasZero (β i)] {p : ι → Prop}
[decidable_pred p] (f : dfinsupp fun (i : ι) => β i) {i : ι} (h : p i) :
coe_fn (filter p f) i = coe_fn f i :=
sorry
theorem filter_apply_neg {ι : Type u} {β : ι → Type v} [(i : ι) → HasZero (β i)] {p : ι → Prop}
[decidable_pred p] (f : dfinsupp fun (i : ι) => β i) {i : ι} (h : ¬p i) :
coe_fn (filter p f) i = 0 :=
sorry
theorem filter_pos_add_filter_neg {ι : Type u} {β : ι → Type v} [(i : ι) → add_monoid (β i)]
(f : dfinsupp fun (i : ι) => β i) (p : ι → Prop) [decidable_pred p] :
filter p f + filter (fun (i : ι) => ¬p i) f = f :=
sorry
/-- `subtype_domain p f` is the restriction of the finitely supported function
`f` to the subtype `p`. -/
def subtype_domain {ι : Type u} {β : ι → Type v} [(i : ι) → HasZero (β i)] (p : ι → Prop)
[decidable_pred p] (f : dfinsupp fun (i : ι) => β i) : dfinsupp fun (i : Subtype p) => β ↑i :=
quotient.lift_on f
(fun (x : pre ι fun (i : ι) => β i) =>
quotient.mk
(pre.mk (fun (i : Subtype p) => pre.to_fun x ↑i)
(multiset.map
(fun (j : Subtype fun (x_1 : ι) => x_1 ∈ multiset.filter p (pre.pre_support x)) =>
{ val := ↑j, property := sorry })
(multiset.attach (multiset.filter p (pre.pre_support x))))
sorry))
sorry
@[simp] theorem subtype_domain_zero {ι : Type u} {β : ι → Type v} [(i : ι) → HasZero (β i)]
{p : ι → Prop} [decidable_pred p] : subtype_domain p 0 = 0 :=
rfl
@[simp] theorem subtype_domain_apply {ι : Type u} {β : ι → Type v} [(i : ι) → HasZero (β i)]
{p : ι → Prop} [decidable_pred p] {i : Subtype p} {v : dfinsupp fun (i : ι) => β i} :
coe_fn (subtype_domain p v) i = coe_fn v ↑i :=
quotient.induction_on v fun (x : pre ι fun (i : ι) => β i) => rfl
@[simp] theorem subtype_domain_add {ι : Type u} {β : ι → Type v} [(i : ι) → add_monoid (β i)]
{p : ι → Prop} [decidable_pred p] {v : dfinsupp fun (i : ι) => β i}
{v' : dfinsupp fun (i : ι) => β i} :
subtype_domain p (v + v') = subtype_domain p v + subtype_domain p v' :=
sorry
protected instance subtype_domain.is_add_monoid_hom {ι : Type u} {β : ι → Type v}
[(i : ι) → add_monoid (β i)] {p : ι → Prop} [decidable_pred p] :
is_add_monoid_hom (subtype_domain p) :=
is_add_monoid_hom.mk subtype_domain_zero
@[simp] theorem subtype_domain_neg {ι : Type u} {β : ι → Type v} [(i : ι) → add_group (β i)]
{p : ι → Prop} [decidable_pred p] {v : dfinsupp fun (i : ι) => β i} :
subtype_domain p (-v) = -subtype_domain p v :=
sorry
@[simp] theorem subtype_domain_sub {ι : Type u} {β : ι → Type v} [(i : ι) → add_group (β i)]
{p : ι → Prop} [decidable_pred p] {v : dfinsupp fun (i : ι) => β i}
{v' : dfinsupp fun (i : ι) => β i} :
subtype_domain p (v - v') = subtype_domain p v - subtype_domain p v' :=
sorry
theorem finite_supp {ι : Type u} {β : ι → Type v} [(i : ι) → HasZero (β i)]
(f : dfinsupp fun (i : ι) => β i) : set.finite (set_of fun (i : ι) => coe_fn f i ≠ 0) :=
sorry
/-- Create an element of `Π₀ i, β i` from a finset `s` and a function `x`
defined on this `finset`. -/
def mk {ι : Type u} {β : ι → Type v} [dec : DecidableEq ι] [(i : ι) → HasZero (β i)] (s : finset ι)
(x : (i : ↥↑s) → β ↑i) : dfinsupp fun (i : ι) => β i :=
quotient.mk
(pre.mk
(fun (i : ι) =>
dite (i ∈ s) (fun (H : i ∈ s) => x { val := i, property := H }) fun (H : ¬i ∈ s) => 0)
(finset.val s) sorry)
@[simp] theorem mk_apply {ι : Type u} {β : ι → Type v} [dec : DecidableEq ι]
[(i : ι) → HasZero (β i)] {s : finset ι} {x : (i : ↥↑s) → β ↑i} {i : ι} :
coe_fn (mk s x) i =
dite (i ∈ s) (fun (H : i ∈ s) => x { val := i, property := H }) fun (H : ¬i ∈ s) => 0 :=
rfl
theorem mk_injective {ι : Type u} {β : ι → Type v} [dec : DecidableEq ι] [(i : ι) → HasZero (β i)]
(s : finset ι) : function.injective (mk s) :=
sorry
/-- The function `single i b : Π₀ i, β i` sends `i` to `b`
and all other points to `0`. -/
def single {ι : Type u} {β : ι → Type v} [dec : DecidableEq ι] [(i : ι) → HasZero (β i)] (i : ι)
(b : β i) : dfinsupp fun (i : ι) => β i :=
mk (singleton i) fun (j : ↥↑(singleton i)) => eq.rec_on sorry b
@[simp] theorem single_apply {ι : Type u} {β : ι → Type v} [dec : DecidableEq ι]
[(i : ι) → HasZero (β i)] {i : ι} {i' : ι} {b : β i} :
coe_fn (single i b) i' =
dite (i = i') (fun (h : i = i') => eq.rec_on h b) fun (h : ¬i = i') => 0 :=
sorry
@[simp] theorem single_zero {ι : Type u} {β : ι → Type v} [dec : DecidableEq ι]
[(i : ι) → HasZero (β i)] {i : ι} : single i 0 = 0 :=
sorry
@[simp] theorem single_eq_same {ι : Type u} {β : ι → Type v} [dec : DecidableEq ι]
[(i : ι) → HasZero (β i)] {i : ι} {b : β i} : coe_fn (single i b) i = b :=
sorry
theorem single_eq_of_ne {ι : Type u} {β : ι → Type v} [dec : DecidableEq ι]
[(i : ι) → HasZero (β i)] {i : ι} {i' : ι} {b : β i} (h : i ≠ i') :
coe_fn (single i b) i' = 0 :=
sorry
theorem single_injective {ι : Type u} {β : ι → Type v} [dec : DecidableEq ι]
[(i : ι) → HasZero (β i)] {i : ι} : function.injective (single i) :=
sorry
/-- Like `finsupp.single_eq_single_iff`, but with a `heq` due to dependent types -/
theorem single_eq_single_iff {ι : Type u} {β : ι → Type v} [dec : DecidableEq ι]
[(i : ι) → HasZero (β i)] (i : ι) (j : ι) (xi : β i) (xj : β j) :
single i xi = single j xj ↔ i = j ∧ xi == xj ∨ xi = 0 ∧ xj = 0 :=
sorry
/-- Redefine `f i` to be `0`. -/
def erase {ι : Type u} {β : ι → Type v} [dec : DecidableEq ι] [(i : ι) → HasZero (β i)] (i : ι)
(f : dfinsupp fun (i : ι) => β i) : dfinsupp fun (i : ι) => β i :=
quotient.lift_on f
(fun (x : pre ι fun (i : ι) => β i) =>
quotient.mk
(pre.mk (fun (j : ι) => ite (j = i) 0 (pre.to_fun x j)) (pre.pre_support x) sorry))
sorry
@[simp] theorem erase_apply {ι : Type u} {β : ι → Type v} [dec : DecidableEq ι]
[(i : ι) → HasZero (β i)] {i : ι} {j : ι} {f : dfinsupp fun (i : ι) => β i} :
coe_fn (erase i f) j = ite (j = i) 0 (coe_fn f j) :=
quotient.induction_on f fun (x : pre ι fun (i : ι) => β i) => rfl
@[simp] theorem erase_same {ι : Type u} {β : ι → Type v} [dec : DecidableEq ι]
[(i : ι) → HasZero (β i)] {i : ι} {f : dfinsupp fun (i : ι) => β i} :
coe_fn (erase i f) i = 0 :=
sorry
theorem erase_ne {ι : Type u} {β : ι → Type v} [dec : DecidableEq ι] [(i : ι) → HasZero (β i)]
{i : ι} {i' : ι} {f : dfinsupp fun (i : ι) => β i} (h : i' ≠ i) :
coe_fn (erase i f) i' = coe_fn f i' :=
sorry
@[simp] theorem single_add {ι : Type u} {β : ι → Type v} [dec : DecidableEq ι]
[(i : ι) → add_monoid (β i)] {i : ι} {b₁ : β i} {b₂ : β i} :
single i (b₁ + b₂) = single i b₁ + single i b₂ :=
sorry
/-- `dfinsupp.single` as an `add_monoid_hom`. -/
@[simp] theorem single_add_hom_apply {ι : Type u} (β : ι → Type v) [dec : DecidableEq ι]
[(i : ι) → add_monoid (β i)] (i : ι) (b : β i) : coe_fn (single_add_hom β i) b = single i b :=
Eq.refl (coe_fn (single_add_hom β i) b)
theorem single_add_erase {ι : Type u} {β : ι → Type v} [dec : DecidableEq ι]
[(i : ι) → add_monoid (β i)] {i : ι} {f : dfinsupp fun (i : ι) => β i} :
single i (coe_fn f i) + erase i f = f :=
sorry
theorem erase_add_single {ι : Type u} {β : ι → Type v} [dec : DecidableEq ι]
[(i : ι) → add_monoid (β i)] {i : ι} {f : dfinsupp fun (i : ι) => β i} :
erase i f + single i (coe_fn f i) = f :=
sorry
protected theorem induction {ι : Type u} {β : ι → Type v} [dec : DecidableEq ι]
[(i : ι) → add_monoid (β i)] {p : (dfinsupp fun (i : ι) => β i) → Prop}
(f : dfinsupp fun (i : ι) => β i) (h0 : p 0)
(ha :
∀ (i : ι) (b : β i) (f : dfinsupp fun (i : ι) => β i),
coe_fn f i = 0 → b ≠ 0 → p f → p (single i b + f)) :
p f :=
sorry
theorem induction₂ {ι : Type u} {β : ι → Type v} [dec : DecidableEq ι] [(i : ι) → add_monoid (β i)]
{p : (dfinsupp fun (i : ι) => β i) → Prop} (f : dfinsupp fun (i : ι) => β i) (h0 : p 0)
(ha :
∀ (i : ι) (b : β i) (f : dfinsupp fun (i : ι) => β i),
coe_fn f i = 0 → b ≠ 0 → p f → p (f + single i b)) :
p f :=
sorry
@[simp] theorem add_closure_Union_range_single {ι : Type u} {β : ι → Type v} [dec : DecidableEq ι]
[(i : ι) → add_monoid (β i)] :
add_submonoid.closure (set.Union fun (i : ι) => set.range (single i)) = ⊤ :=
sorry
/-- If two additive homomorphisms from `Π₀ i, β i` are equal on each `single a b`, then
they are equal. -/
theorem add_hom_ext {ι : Type u} {β : ι → Type v} [dec : DecidableEq ι] [(i : ι) → add_monoid (β i)]
{γ : Type w} [add_monoid γ] {f : (dfinsupp fun (i : ι) => β i) →+ γ}
{g : (dfinsupp fun (i : ι) => β i) →+ γ}
(H : ∀ (i : ι) (y : β i), coe_fn f (single i y) = coe_fn g (single i y)) : f = g :=
sorry
/-- If two additive homomorphisms from `Π₀ i, β i` are equal on each `single a b`, then
they are equal.
See note [partially-applied ext lemmas]. -/
theorem add_hom_ext' {ι : Type u} {β : ι → Type v} [dec : DecidableEq ι]
[(i : ι) → add_monoid (β i)] {γ : Type w} [add_monoid γ]
{f : (dfinsupp fun (i : ι) => β i) →+ γ} {g : (dfinsupp fun (i : ι) => β i) →+ γ}
(H :
∀ (x : ι),
add_monoid_hom.comp f (single_add_hom β x) = add_monoid_hom.comp g (single_add_hom β x)) :
f = g :=
add_hom_ext fun (x : ι) => add_monoid_hom.congr_fun (H x)
@[simp] theorem mk_add {ι : Type u} {β : ι → Type v} [dec : DecidableEq ι]
[(i : ι) → add_monoid (β i)] {s : finset ι} {x : (i : ↥↑s) → β ↑i} {y : (i : ↥↑s) → β ↑i} :
mk s (x + y) = mk s x + mk s y :=
sorry
@[simp] theorem mk_zero {ι : Type u} {β : ι → Type v} [dec : DecidableEq ι]
[(i : ι) → HasZero (β i)] {s : finset ι} : mk s 0 = 0 :=
sorry
@[simp] theorem mk_neg {ι : Type u} {β : ι → Type v} [dec : DecidableEq ι]
[(i : ι) → add_group (β i)] {s : finset ι} {x : (i : ↥↑s) → β (subtype.val i)} :
mk s (-x) = -mk s x :=
sorry
@[simp] theorem mk_sub {ι : Type u} {β : ι → Type v} [dec : DecidableEq ι]
[(i : ι) → add_group (β i)] {s : finset ι} {x : (i : ↥↑s) → β (subtype.val i)}
{y : (i : ↥↑s) → β (subtype.val i)} : mk s (x - y) = mk s x - mk s y :=
sorry
protected instance mk.is_add_group_hom {ι : Type u} {β : ι → Type v} [dec : DecidableEq ι]
[(i : ι) → add_group (β i)] {s : finset ι} : is_add_group_hom (mk s) :=
is_add_group_hom.mk
@[simp] theorem mk_smul {ι : Type u} {β : ι → Type v} [dec : DecidableEq ι] (γ : Type w)
[semiring γ] [(i : ι) → add_comm_monoid (β i)] [(i : ι) → semimodule γ (β i)] {s : finset ι}
{c : γ} (x : (i : ↥↑s) → β (subtype.val i)) : mk s (c • x) = c • mk s x :=
sorry
@[simp] theorem single_smul {ι : Type u} {β : ι → Type v} [dec : DecidableEq ι] (γ : Type w)
[semiring γ] [(i : ι) → add_comm_monoid (β i)] [(i : ι) → semimodule γ (β i)] {i : ι} {c : γ}
{x : β i} : single i (c • x) = c • single i x :=
sorry
/-- Set `{i | f x ≠ 0}` as a `finset`. -/
def support {ι : Type u} {β : ι → Type v} [dec : DecidableEq ι] [(i : ι) → HasZero (β i)]
[(i : ι) → (x : β i) → Decidable (x ≠ 0)] (f : dfinsupp fun (i : ι) => β i) : finset ι :=
quotient.lift_on f
(fun (x : pre ι fun (i : ι) => β i) =>
finset.filter (fun (i : ι) => pre.to_fun x i ≠ 0) (multiset.to_finset (pre.pre_support x)))
sorry
@[simp] theorem support_mk_subset {ι : Type u} {β : ι → Type v} [dec : DecidableEq ι]
[(i : ι) → HasZero (β i)] [(i : ι) → (x : β i) → Decidable (x ≠ 0)] {s : finset ι}
{x : (i : ↥↑s) → β (subtype.val i)} : support (mk s x) ⊆ s :=
fun (i : ι) (H : i ∈ support (mk s x)) =>
iff.mp multiset.mem_to_finset (and.left (iff.mp finset.mem_filter H))
@[simp] theorem mem_support_to_fun {ι : Type u} {β : ι → Type v} [dec : DecidableEq ι]
[(i : ι) → HasZero (β i)] [(i : ι) → (x : β i) → Decidable (x ≠ 0)]
(f : dfinsupp fun (i : ι) => β i) (i : ι) : i ∈ support f ↔ coe_fn f i ≠ 0 :=
sorry
theorem eq_mk_support {ι : Type u} {β : ι → Type v} [dec : DecidableEq ι] [(i : ι) → HasZero (β i)]
[(i : ι) → (x : β i) → Decidable (x ≠ 0)] (f : dfinsupp fun (i : ι) => β i) :
f = mk (support f) fun (i : ↥↑(support f)) => coe_fn f ↑i :=
sorry
@[simp] theorem support_zero {ι : Type u} {β : ι → Type v} [dec : DecidableEq ι]
[(i : ι) → HasZero (β i)] [(i : ι) → (x : β i) → Decidable (x ≠ 0)] : support 0 = ∅ :=
rfl
theorem mem_support_iff {ι : Type u} {β : ι → Type v} [dec : DecidableEq ι]
[(i : ι) → HasZero (β i)] [(i : ι) → (x : β i) → Decidable (x ≠ 0)]
(f : dfinsupp fun (i : ι) => β i) (i : ι) : i ∈ support f ↔ coe_fn f i ≠ 0 :=
mem_support_to_fun f
@[simp] theorem support_eq_empty {ι : Type u} {β : ι → Type v} [dec : DecidableEq ι]
[(i : ι) → HasZero (β i)] [(i : ι) → (x : β i) → Decidable (x ≠ 0)]
{f : dfinsupp fun (i : ι) => β i} : support f = ∅ ↔ f = 0 :=
sorry
protected instance decidable_zero {ι : Type u} {β : ι → Type v} [dec : DecidableEq ι]
[(i : ι) → HasZero (β i)] [(i : ι) → (x : β i) → Decidable (x ≠ 0)] : decidable_pred (Eq 0) :=
fun (f : dfinsupp fun (i : ι) => β i) => decidable_of_iff (support f = ∅) sorry
theorem support_subset_iff {ι : Type u} {β : ι → Type v} [dec : DecidableEq ι]
[(i : ι) → HasZero (β i)] [(i : ι) → (x : β i) → Decidable (x ≠ 0)] {s : set ι}
{f : dfinsupp fun (i : ι) => β i} : ↑(support f) ⊆ s ↔ ∀ (i : ι), ¬i ∈ s → coe_fn f i = 0 :=
sorry
theorem support_single_ne_zero {ι : Type u} {β : ι → Type v} [dec : DecidableEq ι]
[(i : ι) → HasZero (β i)] [(i : ι) → (x : β i) → Decidable (x ≠ 0)] {i : ι} {b : β i}
(hb : b ≠ 0) : support (single i b) = singleton i :=
sorry
theorem support_single_subset {ι : Type u} {β : ι → Type v} [dec : DecidableEq ι]
[(i : ι) → HasZero (β i)] [(i : ι) → (x : β i) → Decidable (x ≠ 0)] {i : ι} {b : β i} :
support (single i b) ⊆ singleton i :=
support_mk_subset
theorem map_range_def {ι : Type u} [dec : DecidableEq ι] {β₁ : ι → Type v₁} {β₂ : ι → Type v₂}
[(i : ι) → HasZero (β₁ i)] [(i : ι) → HasZero (β₂ i)] [(i : ι) → (x : β₁ i) → Decidable (x ≠ 0)]
{f : (i : ι) → β₁ i → β₂ i} {hf : ∀ (i : ι), f i 0 = 0} {g : dfinsupp fun (i : ι) => β₁ i} :
map_range f hf g =
mk (support g) fun (i : ↥↑(support g)) => f (subtype.val i) (coe_fn g (subtype.val i)) :=
sorry
@[simp] theorem map_range_single {ι : Type u} [dec : DecidableEq ι] {β₁ : ι → Type v₁}
{β₂ : ι → Type v₂} [(i : ι) → HasZero (β₁ i)] [(i : ι) → HasZero (β₂ i)]
{f : (i : ι) → β₁ i → β₂ i} {hf : ∀ (i : ι), f i 0 = 0} {i : ι} {b : β₁ i} :
map_range f hf (single i b) = single i (f i b) :=
sorry
theorem support_map_range {ι : Type u} [dec : DecidableEq ι] {β₁ : ι → Type v₁} {β₂ : ι → Type v₂}
[(i : ι) → HasZero (β₁ i)] [(i : ι) → HasZero (β₂ i)] [(i : ι) → (x : β₁ i) → Decidable (x ≠ 0)]
[(i : ι) → (x : β₂ i) → Decidable (x ≠ 0)] {f : (i : ι) → β₁ i → β₂ i}
{hf : ∀ (i : ι), f i 0 = 0} {g : dfinsupp fun (i : ι) => β₁ i} :
support (map_range f hf g) ⊆ support g :=
sorry
theorem zip_with_def {ι : Type u} {β : ι → Type v} [dec : DecidableEq ι] [(i : ι) → HasZero (β i)]
[(i : ι) → (x : β i) → Decidable (x ≠ 0)] {β₁ : ι → Type v₁} {β₂ : ι → Type v₂}
[(i : ι) → HasZero (β₁ i)] [(i : ι) → HasZero (β₂ i)] [(i : ι) → (x : β₁ i) → Decidable (x ≠ 0)]
[(i : ι) → (x : β₂ i) → Decidable (x ≠ 0)] {f : (i : ι) → β₁ i → β₂ i → β i}
{hf : ∀ (i : ι), f i 0 0 = 0} {g₁ : dfinsupp fun (i : ι) => β₁ i}
{g₂ : dfinsupp fun (i : ι) => β₂ i} :
zip_with f hf g₁ g₂ =
mk (support g₁ ∪ support g₂)
fun (i : ↥↑(support g₁ ∪ support g₂)) =>
f (subtype.val i) (coe_fn g₁ (subtype.val i)) (coe_fn g₂ (subtype.val i)) :=
sorry
theorem support_zip_with {ι : Type u} {β : ι → Type v} [dec : DecidableEq ι]
[(i : ι) → HasZero (β i)] [(i : ι) → (x : β i) → Decidable (x ≠ 0)] {β₁ : ι → Type v₁}
{β₂ : ι → Type v₂} [(i : ι) → HasZero (β₁ i)] [(i : ι) → HasZero (β₂ i)]
[(i : ι) → (x : β₁ i) → Decidable (x ≠ 0)] [(i : ι) → (x : β₂ i) → Decidable (x ≠ 0)]
{f : (i : ι) → β₁ i → β₂ i → β i} {hf : ∀ (i : ι), f i 0 0 = 0}
{g₁ : dfinsupp fun (i : ι) => β₁ i} {g₂ : dfinsupp fun (i : ι) => β₂ i} :
support (zip_with f hf g₁ g₂) ⊆ support g₁ ∪ support g₂ :=
sorry
theorem erase_def {ι : Type u} {β : ι → Type v} [dec : DecidableEq ι] [(i : ι) → HasZero (β i)]
[(i : ι) → (x : β i) → Decidable (x ≠ 0)] (i : ι) (f : dfinsupp fun (i : ι) => β i) :
erase i f =
mk (finset.erase (support f) i)
fun (j : ↥↑(finset.erase (support f) i)) => coe_fn f (subtype.val j) :=
sorry
@[simp] theorem support_erase {ι : Type u} {β : ι → Type v} [dec : DecidableEq ι]
[(i : ι) → HasZero (β i)] [(i : ι) → (x : β i) → Decidable (x ≠ 0)] (i : ι)
(f : dfinsupp fun (i : ι) => β i) : support (erase i f) = finset.erase (support f) i :=
sorry
theorem filter_def {ι : Type u} {β : ι → Type v} [dec : DecidableEq ι] [(i : ι) → HasZero (β i)]
[(i : ι) → (x : β i) → Decidable (x ≠ 0)] {p : ι → Prop} [decidable_pred p]
(f : dfinsupp fun (i : ι) => β i) :
filter p f =
mk (finset.filter p (support f))
fun (i : ↥↑(finset.filter p (support f))) => coe_fn f (subtype.val i) :=
sorry
@[simp] theorem support_filter {ι : Type u} {β : ι → Type v} [dec : DecidableEq ι]
[(i : ι) → HasZero (β i)] [(i : ι) → (x : β i) → Decidable (x ≠ 0)] {p : ι → Prop}
[decidable_pred p] (f : dfinsupp fun (i : ι) => β i) :
support (filter p f) = finset.filter p (support f) :=
sorry
theorem subtype_domain_def {ι : Type u} {β : ι → Type v} [dec : DecidableEq ι]
[(i : ι) → HasZero (β i)] [(i : ι) → (x : β i) → Decidable (x ≠ 0)] {p : ι → Prop}
[decidable_pred p] (f : dfinsupp fun (i : ι) => β i) :
subtype_domain p f =
mk (finset.subtype p (support f))
fun (i : ↥↑(finset.subtype p (support f))) => coe_fn f ↑i :=
sorry
@[simp] theorem support_subtype_domain {ι : Type u} {β : ι → Type v} [dec : DecidableEq ι]
[(i : ι) → HasZero (β i)] [(i : ι) → (x : β i) → Decidable (x ≠ 0)] {p : ι → Prop}
[decidable_pred p] {f : dfinsupp fun (i : ι) => β i} :
support (subtype_domain p f) = finset.subtype p (support f) :=
sorry
theorem support_add {ι : Type u} {β : ι → Type v} [dec : DecidableEq ι] [(i : ι) → add_monoid (β i)]
[(i : ι) → (x : β i) → Decidable (x ≠ 0)] {g₁ : dfinsupp fun (i : ι) => β i}
{g₂ : dfinsupp fun (i : ι) => β i} : support (g₁ + g₂) ⊆ support g₁ ∪ support g₂ :=
support_zip_with
@[simp] theorem support_neg {ι : Type u} {β : ι → Type v} [dec : DecidableEq ι]
[(i : ι) → add_group (β i)] [(i : ι) → (x : β i) → Decidable (x ≠ 0)]
{f : dfinsupp fun (i : ι) => β i} : support (-f) = support f :=
sorry
theorem support_smul {ι : Type u} {β : ι → Type v} [dec : DecidableEq ι] {γ : Type w} [semiring γ]
[(i : ι) → add_comm_monoid (β i)] [(i : ι) → semimodule γ (β i)]
[(i : ι) → (x : β i) → Decidable (x ≠ 0)] (b : γ) (v : dfinsupp fun (i : ι) => β i) :
support (b • v) ⊆ support v :=
support_map_range
protected instance decidable_eq {ι : Type u} {β : ι → Type v} [dec : DecidableEq ι]
[(i : ι) → HasZero (β i)] [(i : ι) → DecidableEq (β i)] :
DecidableEq (dfinsupp fun (i : ι) => β i) :=
fun (f g : dfinsupp fun (i : ι) => β i) =>
decidable_of_iff (support f = support g ∧ ∀ (i : ι), i ∈ support f → coe_fn f i = coe_fn g i)
sorry
-- [to_additive sum] for dfinsupp.prod doesn't work, the equation lemmas are not generated
/-- `sum f g` is the sum of `g i (f i)` over the support of `f`. -/
def sum {ι : Type u} {β : ι → Type v} [dec : DecidableEq ι] {γ : Type w} [(i : ι) → HasZero (β i)]
[(i : ι) → (x : β i) → Decidable (x ≠ 0)] [add_comm_monoid γ] (f : dfinsupp fun (i : ι) => β i)
(g : (i : ι) → β i → γ) : γ :=
finset.sum (support f) fun (i : ι) => g i (coe_fn f i)
/-- `prod f g` is the product of `g i (f i)` over the support of `f`. -/
def prod {ι : Type u} {β : ι → Type v} [dec : DecidableEq ι] {γ : Type w} [(i : ι) → HasZero (β i)]
[(i : ι) → (x : β i) → Decidable (x ≠ 0)] [comm_monoid γ] (f : dfinsupp fun (i : ι) => β i)
(g : (i : ι) → β i → γ) : γ :=
finset.prod (support f) fun (i : ι) => g i (coe_fn f i)
theorem prod_map_range_index {ι : Type u} [dec : DecidableEq ι] {γ : Type w} {β₁ : ι → Type v₁}
{β₂ : ι → Type v₂} [(i : ι) → HasZero (β₁ i)] [(i : ι) → HasZero (β₂ i)]
[(i : ι) → (x : β₁ i) → Decidable (x ≠ 0)] [(i : ι) → (x : β₂ i) → Decidable (x ≠ 0)]
[comm_monoid γ] {f : (i : ι) → β₁ i → β₂ i} {hf : ∀ (i : ι), f i 0 = 0}
{g : dfinsupp fun (i : ι) => β₁ i} {h : (i : ι) → β₂ i → γ} (h0 : ∀ (i : ι), h i 0 = 1) :
prod (map_range f hf g) h = prod g fun (i : ι) (b : β₁ i) => h i (f i b) :=
sorry
theorem sum_zero_index {ι : Type u} {β : ι → Type v} [dec : DecidableEq ι] {γ : Type w}
[(i : ι) → add_comm_monoid (β i)] [(i : ι) → (x : β i) → Decidable (x ≠ 0)] [add_comm_monoid γ]
{h : (i : ι) → β i → γ} : sum 0 h = 0 :=
rfl
theorem sum_single_index {ι : Type u} {β : ι → Type v} [dec : DecidableEq ι] {γ : Type w}
[(i : ι) → HasZero (β i)] [(i : ι) → (x : β i) → Decidable (x ≠ 0)] [add_comm_monoid γ] {i : ι}
{b : β i} {h : (i : ι) → β i → γ} (h_zero : h i 0 = 0) : sum (single i b) h = h i b :=
sorry
theorem sum_neg_index {ι : Type u} {β : ι → Type v} [dec : DecidableEq ι] {γ : Type w}
[(i : ι) → add_group (β i)] [(i : ι) → (x : β i) → Decidable (x ≠ 0)] [add_comm_monoid γ]
{g : dfinsupp fun (i : ι) => β i} {h : (i : ι) → β i → γ} (h0 : ∀ (i : ι), h i 0 = 0) :
sum (-g) h = sum g fun (i : ι) (b : β i) => h i (-b) :=
sum_map_range_index h0
theorem sum_comm {γ : Type w} {ι₁ : Type u_1} {ι₂ : Type u_2} {β₁ : ι₁ → Type u_3}
{β₂ : ι₂ → Type u_4} [DecidableEq ι₁] [DecidableEq ι₂] [(i : ι₁) → HasZero (β₁ i)]
[(i : ι₂) → HasZero (β₂ i)] [(i : ι₁) → (x : β₁ i) → Decidable (x ≠ 0)]
[(i : ι₂) → (x : β₂ i) → Decidable (x ≠ 0)] [add_comm_monoid γ]
(f₁ : dfinsupp fun (i : ι₁) => β₁ i) (f₂ : dfinsupp fun (i : ι₂) => β₂ i)
(h : (i : ι₁) → β₁ i → (i : ι₂) → β₂ i → γ) :
(sum f₁ fun (i₁ : ι₁) (x₁ : β₁ i₁) => sum f₂ fun (i₂ : ι₂) (x₂ : β₂ i₂) => h i₁ x₁ i₂ x₂) =
sum f₂ fun (i₂ : ι₂) (x₂ : β₂ i₂) => sum f₁ fun (i₁ : ι₁) (x₁ : β₁ i₁) => h i₁ x₁ i₂ x₂ :=
finset.sum_comm
@[simp] theorem sum_apply {ι : Type u} {β : ι → Type v} {ι₁ : Type u₁} [DecidableEq ι₁]
{β₁ : ι₁ → Type v₁} [(i₁ : ι₁) → HasZero (β₁ i₁)] [(i : ι₁) → (x : β₁ i) → Decidable (x ≠ 0)]
[(i : ι) → add_comm_monoid (β i)] {f : dfinsupp fun (i₁ : ι₁) => β₁ i₁}
{g : (i₁ : ι₁) → β₁ i₁ → dfinsupp fun (i : ι) => β i} {i₂ : ι} :
coe_fn (sum f g) i₂ = sum f fun (i₁ : ι₁) (b : β₁ i₁) => coe_fn (g i₁ b) i₂ :=
Eq.symm (finset.sum_hom (support f) fun (f : dfinsupp fun (i : ι) => β i) => coe_fn f i₂)
theorem support_sum {ι : Type u} {β : ι → Type v} [dec : DecidableEq ι] {ι₁ : Type u₁}
[DecidableEq ι₁] {β₁ : ι₁ → Type v₁} [(i₁ : ι₁) → HasZero (β₁ i₁)]
[(i : ι₁) → (x : β₁ i) → Decidable (x ≠ 0)] [(i : ι) → add_comm_monoid (β i)]
[(i : ι) → (x : β i) → Decidable (x ≠ 0)] {f : dfinsupp fun (i₁ : ι₁) => β₁ i₁}
{g : (i₁ : ι₁) → β₁ i₁ → dfinsupp fun (i : ι) => β i} :
support (sum f g) ⊆ finset.bUnion (support f) fun (i : ι₁) => support (g i (coe_fn f i)) :=
sorry
@[simp] theorem prod_one {ι : Type u} {β : ι → Type v} [dec : DecidableEq ι] {γ : Type w}
[(i : ι) → add_comm_monoid (β i)] [(i : ι) → (x : β i) → Decidable (x ≠ 0)] [comm_monoid γ]
{f : dfinsupp fun (i : ι) => β i} : (prod f fun (i : ι) (b : β i) => 1) = 1 :=
finset.prod_const_one
@[simp] theorem sum_add {ι : Type u} {β : ι → Type v} [dec : DecidableEq ι] {γ : Type w}
[(i : ι) → add_comm_monoid (β i)] [(i : ι) → (x : β i) → Decidable (x ≠ 0)] [add_comm_monoid γ]
{f : dfinsupp fun (i : ι) => β i} {h₁ : (i : ι) → β i → γ} {h₂ : (i : ι) → β i → γ} :
(sum f fun (i : ι) (b : β i) => h₁ i b + h₂ i b) = sum f h₁ + sum f h₂ :=
finset.sum_add_distrib
@[simp] theorem sum_neg {ι : Type u} {β : ι → Type v} [dec : DecidableEq ι] {γ : Type w}
[(i : ι) → add_comm_monoid (β i)] [(i : ι) → (x : β i) → Decidable (x ≠ 0)] [add_comm_group γ]
{f : dfinsupp fun (i : ι) => β i} {h : (i : ι) → β i → γ} :
(sum f fun (i : ι) (b : β i) => -h i b) = -sum f h :=
finset.sum_hom (support f) Neg.neg
theorem prod_add_index {ι : Type u} {β : ι → Type v} [dec : DecidableEq ι] {γ : Type w}
[(i : ι) → add_comm_monoid (β i)] [(i : ι) → (x : β i) → Decidable (x ≠ 0)] [comm_monoid γ]
{f : dfinsupp fun (i : ι) => β i} {g : dfinsupp fun (i : ι) => β i} {h : (i : ι) → β i → γ}
(h_zero : ∀ (i : ι), h i 0 = 1)
(h_add : ∀ (i : ι) (b₁ b₂ : β i), h i (b₁ + b₂) = h i b₁ * h i b₂) :
prod (f + g) h = prod f h * prod g h :=
sorry
/--
When summing over an `add_monoid_hom`, the decidability assumption is not needed, and the result is
also an `add_monoid_hom`.
-/
def sum_add_hom {ι : Type u} {β : ι → Type v} [dec : DecidableEq ι] {γ : Type w}
[(i : ι) → add_monoid (β i)] [add_comm_monoid γ] (φ : (i : ι) → β i →+ γ) :
(dfinsupp fun (i : ι) => β i) →+ γ :=
add_monoid_hom.mk
(fun (f : dfinsupp fun (i : ι) => β i) =>
quotient.lift_on f
(fun (x : pre ι fun (i : ι) => β i) =>
finset.sum (multiset.to_finset (pre.pre_support x))
fun (i : ι) => coe_fn (φ i) (pre.to_fun x i))
sorry)
sorry sorry
@[simp] theorem sum_add_hom_single {ι : Type u} {β : ι → Type v} [dec : DecidableEq ι] {γ : Type w}
[(i : ι) → add_monoid (β i)] [add_comm_monoid γ] (φ : (i : ι) → β i →+ γ) (i : ι) (x : β i) :
coe_fn (sum_add_hom φ) (single i x) = coe_fn (φ i) x :=
sorry
@[simp] theorem sum_add_hom_comp_single {ι : Type u} {β : ι → Type v} [dec : DecidableEq ι]
{γ : Type w} [(i : ι) → add_comm_monoid (β i)] [add_comm_monoid γ] (f : (i : ι) → β i →+ γ)
(i : ι) : add_monoid_hom.comp (sum_add_hom f) (single_add_hom β i) = f i :=
add_monoid_hom.ext fun (x : β i) => sum_add_hom_single f i x
/-- While we didn't need decidable instances to define it, we do to reduce it to a sum -/
theorem sum_add_hom_apply {ι : Type u} {β : ι → Type v} [dec : DecidableEq ι] {γ : Type w}
[(i : ι) → add_monoid (β i)] [(i : ι) → (x : β i) → Decidable (x ≠ 0)] [add_comm_monoid γ]
(φ : (i : ι) → β i →+ γ) (f : dfinsupp fun (i : ι) => β i) :
coe_fn (sum_add_hom φ) f = sum f fun (x : ι) => ⇑(φ x) :=
sorry
/-- The `dfinsupp` version of `finsupp.lift_add_hom`,-/
@[simp] theorem lift_add_hom_symm_apply {ι : Type u} {β : ι → Type v} [dec : DecidableEq ι]
{γ : Type w} [(i : ι) → add_monoid (β i)] [add_comm_monoid γ]
(F : (dfinsupp fun (i : ι) => β i) →+ γ) (i : ι) :
coe_fn (add_equiv.symm lift_add_hom) F i = add_monoid_hom.comp F (single_add_hom β i) :=
Eq.refl (coe_fn (add_equiv.symm lift_add_hom) F i)
/-- The `dfinsupp` version of `finsupp.lift_add_hom_single_add_hom`,-/
@[simp] theorem lift_add_hom_single_add_hom {ι : Type u} {β : ι → Type v} [dec : DecidableEq ι]
[(i : ι) → add_comm_monoid (β i)] :
coe_fn lift_add_hom (single_add_hom β) = add_monoid_hom.id (dfinsupp fun (i : ι) => β i) :=
iff.mpr (equiv.apply_eq_iff_eq_symm_apply (add_equiv.to_equiv lift_add_hom)) rfl
/-- The `dfinsupp` version of `finsupp.lift_add_hom_apply_single`,-/
theorem lift_add_hom_apply_single {ι : Type u} {β : ι → Type v} [dec : DecidableEq ι] {γ : Type w}
[(i : ι) → add_comm_monoid (β i)] [add_comm_monoid γ] (f : (i : ι) → β i →+ γ) (i : ι)
(x : β i) : coe_fn (coe_fn lift_add_hom f) (single i x) = coe_fn (f i) x :=
sorry
/-- The `dfinsupp` version of `finsupp.lift_add_hom_comp_single`,-/
theorem lift_add_hom_comp_single {ι : Type u} {β : ι → Type v} [dec : DecidableEq ι] {γ : Type w}
[(i : ι) → add_comm_monoid (β i)] [add_comm_monoid γ] (f : (i : ι) → β i →+ γ) (i : ι) :
add_monoid_hom.comp (coe_fn lift_add_hom f) (single_add_hom β i) = f i :=
sorry
/-- The `dfinsupp` version of `finsupp.comp_lift_add_hom`,-/
theorem comp_lift_add_hom {ι : Type u} {β : ι → Type v} [dec : DecidableEq ι] {γ : Type w}
{δ : Type u_1} [(i : ι) → add_comm_monoid (β i)] [add_comm_monoid γ] [add_comm_monoid δ]
(g : γ →+ δ) (f : (i : ι) → β i →+ γ) :
add_monoid_hom.comp g (coe_fn lift_add_hom f) =
coe_fn lift_add_hom fun (a : ι) => add_monoid_hom.comp g (f a) :=
sorry
theorem sum_sub_index {ι : Type u} {β : ι → Type v} [dec : DecidableEq ι] {γ : Type w}
[(i : ι) → add_comm_group (β i)] [(i : ι) → (x : β i) → Decidable (x ≠ 0)] [add_comm_group γ]
{f : dfinsupp fun (i : ι) => β i} {g : dfinsupp fun (i : ι) => β i} {h : (i : ι) → β i → γ}
(h_sub : ∀ (i : ι) (b₁ b₂ : β i), h i (b₁ - b₂) = h i b₁ - h i b₂) :
sum (f - g) h = sum f h - sum g h :=
sorry
theorem sum_finset_sum_index {ι : Type u} {β : ι → Type v} [dec : DecidableEq ι] {γ : Type w}
{α : Type x} [(i : ι) → add_comm_monoid (β i)] [(i : ι) → (x : β i) → Decidable (x ≠ 0)]
[add_comm_monoid γ] {s : finset α} {g : α → dfinsupp fun (i : ι) => β i} {h : (i : ι) → β i → γ}
(h_zero : ∀ (i : ι), h i 0 = 0)
(h_add : ∀ (i : ι) (b₁ b₂ : β i), h i (b₁ + b₂) = h i b₁ + h i b₂) :
(finset.sum s fun (i : α) => sum (g i) h) = sum (finset.sum s fun (i : α) => g i) h :=
sorry
theorem sum_sum_index {ι : Type u} {β : ι → Type v} [dec : DecidableEq ι] {γ : Type w}
{ι₁ : Type u₁} [DecidableEq ι₁] {β₁ : ι₁ → Type v₁} [(i₁ : ι₁) → HasZero (β₁ i₁)]
[(i : ι₁) → (x : β₁ i) → Decidable (x ≠ 0)] [(i : ι) → add_comm_monoid (β i)]
[(i : ι) → (x : β i) → Decidable (x ≠ 0)] [add_comm_monoid γ]
{f : dfinsupp fun (i₁ : ι₁) => β₁ i₁} {g : (i₁ : ι₁) → β₁ i₁ → dfinsupp fun (i : ι) => β i}
{h : (i : ι) → β i → γ} (h_zero : ∀ (i : ι), h i 0 = 0)
(h_add : ∀ (i : ι) (b₁ b₂ : β i), h i (b₁ + b₂) = h i b₁ + h i b₂) :
sum (sum f g) h = sum f fun (i : ι₁) (b : β₁ i) => sum (g i b) h :=
Eq.symm (sum_finset_sum_index h_zero h_add)
@[simp] theorem sum_single {ι : Type u} {β : ι → Type v} [dec : DecidableEq ι]
[(i : ι) → add_comm_monoid (β i)] [(i : ι) → (x : β i) → Decidable (x ≠ 0)]
{f : dfinsupp fun (i : ι) => β i} : sum f single = f :=
sorry
theorem sum_subtype_domain_index {ι : Type u} {β : ι → Type v} [dec : DecidableEq ι] {γ : Type w}
[(i : ι) → HasZero (β i)] [(i : ι) → (x : β i) → Decidable (x ≠ 0)] [add_comm_monoid γ]
{v : dfinsupp fun (i : ι) => β i} {p : ι → Prop} [decidable_pred p] {h : (i : ι) → β i → γ}
(hp : ∀ (x : ι), x ∈ support v → p x) :
(sum (subtype_domain p v) fun (i : Subtype p) (b : β ↑i) => h (↑i) b) = sum v h :=
sorry
theorem subtype_domain_sum {ι : Type u} {β : ι → Type v} {γ : Type w}
[(i : ι) → add_comm_monoid (β i)] {s : finset γ} {h : γ → dfinsupp fun (i : ι) => β i}
{p : ι → Prop} [decidable_pred p] :
subtype_domain p (finset.sum s fun (c : γ) => h c) =
finset.sum s fun (c : γ) => subtype_domain p (h c) :=
Eq.symm (finset.sum_hom s (subtype_domain p))
theorem subtype_domain_finsupp_sum {ι : Type u} {β : ι → Type v} {γ : Type w} {δ : γ → Type x}
[DecidableEq γ] [(c : γ) → HasZero (δ c)] [(c : γ) → (x : δ c) → Decidable (x ≠ 0)]
[(i : ι) → add_comm_monoid (β i)] {p : ι → Prop} [decidable_pred p]
{s : dfinsupp fun (c : γ) => δ c} {h : (c : γ) → δ c → dfinsupp fun (i : ι) => β i} :
subtype_domain p (sum s h) = sum s fun (c : γ) (d : δ c) => subtype_domain p (h c d) :=
subtype_domain_sum
end dfinsupp
/-! ### Product and sum lemmas for bundled morphisms -/
namespace monoid_hom
@[simp] theorem map_dfinsupp_prod {ι : Type u} {β : ι → Type v} [DecidableEq ι] {R : Type u_1}
{S : Type u_2} [(i : ι) → HasZero (β i)] [(i : ι) → (x : β i) → Decidable (x ≠ 0)]
[comm_monoid R] [comm_monoid S] (h : R →* S) (f : dfinsupp fun (i : ι) => β i)
(g : (i : ι) → β i → R) :
coe_fn h (dfinsupp.prod f g) = dfinsupp.prod f fun (a : ι) (b : β a) => coe_fn h (g a b) :=
map_prod h (fun (i : ι) => g i (coe_fn f i)) (dfinsupp.support f)
theorem coe_dfinsupp_prod {ι : Type u} {β : ι → Type v} [DecidableEq ι] {R : Type u_1}
{S : Type u_2} [(i : ι) → HasZero (β i)] [(i : ι) → (x : β i) → Decidable (x ≠ 0)] [monoid R]
[comm_monoid S] (f : dfinsupp fun (i : ι) => β i) (g : (i : ι) → β i → R →* S) :
⇑(dfinsupp.prod f g) = dfinsupp.prod f fun (a : ι) (b : β a) => ⇑(g a b) :=
coe_prod (fun (i : ι) => g i (coe_fn f i)) (dfinsupp.support f)
@[simp] theorem dfinsupp_prod_apply {ι : Type u} {β : ι → Type v} [DecidableEq ι] {R : Type u_1}
{S : Type u_2} [(i : ι) → HasZero (β i)] [(i : ι) → (x : β i) → Decidable (x ≠ 0)] [monoid R]
[comm_monoid S] (f : dfinsupp fun (i : ι) => β i) (g : (i : ι) → β i → R →* S) (r : R) :
coe_fn (dfinsupp.prod f g) r = dfinsupp.prod f fun (a : ι) (b : β a) => coe_fn (g a b) r :=
finset_prod_apply (fun (i : ι) => g i (coe_fn f i)) (dfinsupp.support f) r
end Mathlib |
#' communiter.
#'
#' @name communiter
#' @docType package
NULL
|
lemma real_tendsto_divide_at_top: fixes c::"real" assumes "(f \<longlongrightarrow> c) F" assumes "filterlim g at_top F" shows "((\<lambda>x. f x / g x) \<longlongrightarrow> 0) F" |
These innovative solar screens block the sun outside, before it can heat up your home in Fox Lake. Here’s why they are the best choice when you want privacy, shade, and breeze protection.
Make your Fox Lake home more Energy Efficient by installing SunSetter Window Awnings over your sunny windows and reducing your carbon footprint this summer! |
module Data.Buffer.Ext
import public Data.Buffer
%foreign "scheme:blodwen-new-buffer"
"RefC:newBuffer"
"node:lambda:s=>Buffer.alloc(s)"
prim__newBuffer : Int -> PrimIO Buffer
export
newBuffer : HasIO io => Nat -> io Buffer
newBuffer size = primIO (Ext.prim__newBuffer $ cast size)
%foreign "scheme:string->utf8"
"node:lambda: s=>Buffer.from(s, 'utf8')"
ffi_BufferFromStringUTF8 : String -> Buffer
export
implementation FromString Buffer where
fromString = ffi_BufferFromStringUTF8
%foreign "scheme:utf8->string"
"node:lambda: b=>b.toString('utf8')"
ffi_BufferToStringUTF8 : Buffer -> String
export
implementation Show Buffer where
show = ffi_BufferToStringUTF8
|
If $x$ is an algebraic integer, then $-x$ is an algebraic integer. |
function d=disteusq(x,y,mode,w)
%DISTEUSQ calculate euclidean, squared euclidean or mahanalobis distance D=(X,Y,MODE,W)
%
% Inputs: X,Y Vector sets to be compared. Each row contains a data vector.
% X and Y must have the same number of columns.
%
% MODE Character string selecting the following options:
% 'x' Calculate the full distance matrix from every row of X to every row of Y
% 'd' Calculate only the distance between corresponding rows of X and Y
% The default is 'd' if X and Y have the same number of rows otherwise 'x'.
% 's' take the square-root of the result to give the euclidean distance.
%
% W Optional weighting matrix: the distance calculated is (x-y)*W*(x-y)'
% If W is a vector, then the matrix diag(W) is used.
%
% Output: D If MODE='d' then D is a column vector with the same number of rows as the shorter of X and Y.
% If MODE='x' then D is a matrix with the same number of rows as X and the same number of columns as Y'.
%
% Copyright (C) Mike Brookes 1998
% Version: $Id: disteusq.m 713 2011-10-16 14:45:43Z dmb $
%
% VOICEBOX is a MATLAB toolbox for speech processing.
% Home page: http://www.ee.ic.ac.uk/hp/staff/dmb/voicebox/voicebox.html
%
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% This program is free software; you can redistribute it and/or modify
% it under the terms of the GNU General Public License as published by
% the Free Software Foundation; either version 2 of the License, or
% (at your option) any later version.
%
% This program is distributed in the hope that it will be useful,
% but WITHOUT ANY WARRANTY; without even the implied warranty of
% MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
% GNU General Public License for more details.
%
% You can obtain a copy of the GNU General Public License from
% http://www.gnu.org/copyleft/gpl.html or by writing to
% Free Software Foundation, Inc.,675 Mass Ave, Cambridge, MA 02139, USA.
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
[nx,p]=size(x); ny=size(y,1);
if nargin<3 | isempty(mode) mode='0'; end
if any(mode=='d') | (mode~='x' & nx==ny)
% Do pairwise distance calculation
nx=min(nx,ny);
z=double(x(1:nx,:))-double(y(1:nx,:));
if nargin<4
d=sum(z.*conj(z),2);
elseif min(size(w))==1
wv=w(:).';
d=sum(z.*wv(ones(size(z,1),1),:).*conj(z),2);
else
d=sum(z*w.*conj(z),2);
end
else
% Calculate full distance matrix
if p>1
% x and y are matrices
if nargin<4
z=permute(double(x(:,:,ones(1,ny))),[1 3 2])-permute(double(y(:,:,ones(1,nx))),[3 1 2]);
d=sum(z.*conj(z),3);
else
nxy=nx*ny;
z=reshape(permute(double(x(:,:,ones(1,ny))),[1 3 2])-permute(double(y(:,:,ones(1,nx))),[3 1 2]),nxy,p);
if min(size(w))==1
wv=w(:).';
d=reshape(sum(z.*wv(ones(nxy,1),:).*conj(z),2),nx,ny);
else
d=reshape(sum(z*w.*conj(z),2),nx,ny);
end
end
else
% x and y are vectors
z=double(x(:,ones(1,ny)))-double(y(:,ones(1,nx))).';
if nargin<4
d=z.*conj(z);
else
d=w*z.*conj(z);
end
end
end
if any(mode=='s')
d=sqrt(d);
end
|
Formal statement is: lemma eventually_nhds_ball: "d > 0 \<Longrightarrow> eventually (\<lambda>x. x \<in> ball z d) (nhds z)" Informal statement is: For any $d > 0$, there exists a neighborhood of $z$ such that all points in that neighborhood are within distance $d$ of $z$. |
(* Title: HOL/Auth/n_germanSymIndex_lemma_on_inv__11.thy
Author: Yongjian Li and Kaiqiang Duan, State Key Lab of Computer Science, Institute of Software, Chinese Academy of Sciences
Copyright 2016 State Key Lab of Computer Science, Institute of Software, Chinese Academy of Sciences
*)
header{*The n_germanSymIndex Protocol Case Study*}
theory n_germanSymIndex_lemma_on_inv__11 imports n_germanSymIndex_base
begin
section{*All lemmas on causal relation between inv__11 and some rule r*}
lemma n_StoreVsinv__11:
assumes a1: "(\<exists> i d. i\<le>N\<and>d\<le>N\<and>r=n_Store i d)" and
a2: "(\<exists> p__Inv2. p__Inv2\<le>N\<and>f=inv__11 p__Inv2)"
shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s")
proof -
from a1 obtain i d where a1:"i\<le>N\<and>d\<le>N\<and>r=n_Store i d" apply fastforce done
from a2 obtain p__Inv2 where a2:"p__Inv2\<le>N\<and>f=inv__11 p__Inv2" apply fastforce done
have "(i=p__Inv2)\<or>(i~=p__Inv2)" apply (cut_tac a1 a2, auto) done
moreover {
assume b1: "(i=p__Inv2)"
have "?P3 s"
apply (cut_tac a1 a2 b1, simp, rule_tac x="(neg (andForm (eqn (IVar (Field (Para (Ident ''Chan2'') p__Inv2) ''Cmd'')) (Const GntE)) (eqn (IVar (Field (Para (Ident ''Cache'') p__Inv2) ''State'')) (Const E))))" in exI, auto) done
then have "invHoldForRule s f r (invariants N)" by auto
}
moreover {
assume b1: "(i~=p__Inv2)"
have "?P3 s"
apply (cut_tac a1 a2 b1, simp, rule_tac x="(neg (andForm (eqn (IVar (Field (Para (Ident ''Cache'') i) ''State'')) (Const E)) (eqn (IVar (Field (Para (Ident ''Chan2'') p__Inv2) ''Cmd'')) (Const GntE))))" in exI, auto) done
then have "invHoldForRule s f r (invariants N)" by auto
}
ultimately show "invHoldForRule s f r (invariants N)" by satx
qed
lemma n_SendInv__part__0Vsinv__11:
assumes a1: "(\<exists> i. i\<le>N\<and>r=n_SendInv__part__0 i)" and
a2: "(\<exists> p__Inv2. p__Inv2\<le>N\<and>f=inv__11 p__Inv2)"
shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s")
proof -
from a1 obtain i where a1:"i\<le>N\<and>r=n_SendInv__part__0 i" apply fastforce done
from a2 obtain p__Inv2 where a2:"p__Inv2\<le>N\<and>f=inv__11 p__Inv2" apply fastforce done
have "(i=p__Inv2)\<or>(i~=p__Inv2)" apply (cut_tac a1 a2, auto) done
moreover {
assume b1: "(i=p__Inv2)"
have "?P1 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
moreover {
assume b1: "(i~=p__Inv2)"
have "?P2 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
ultimately show "invHoldForRule s f r (invariants N)" by satx
qed
lemma n_SendInv__part__1Vsinv__11:
assumes a1: "(\<exists> i. i\<le>N\<and>r=n_SendInv__part__1 i)" and
a2: "(\<exists> p__Inv2. p__Inv2\<le>N\<and>f=inv__11 p__Inv2)"
shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s")
proof -
from a1 obtain i where a1:"i\<le>N\<and>r=n_SendInv__part__1 i" apply fastforce done
from a2 obtain p__Inv2 where a2:"p__Inv2\<le>N\<and>f=inv__11 p__Inv2" apply fastforce done
have "(i=p__Inv2)\<or>(i~=p__Inv2)" apply (cut_tac a1 a2, auto) done
moreover {
assume b1: "(i=p__Inv2)"
have "?P1 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
moreover {
assume b1: "(i~=p__Inv2)"
have "?P2 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
ultimately show "invHoldForRule s f r (invariants N)" by satx
qed
lemma n_SendInvAckVsinv__11:
assumes a1: "(\<exists> i. i\<le>N\<and>r=n_SendInvAck i)" and
a2: "(\<exists> p__Inv2. p__Inv2\<le>N\<and>f=inv__11 p__Inv2)"
shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s")
proof -
from a1 obtain i where a1:"i\<le>N\<and>r=n_SendInvAck i" apply fastforce done
from a2 obtain p__Inv2 where a2:"p__Inv2\<le>N\<and>f=inv__11 p__Inv2" apply fastforce done
have "(i=p__Inv2)\<or>(i~=p__Inv2)" apply (cut_tac a1 a2, auto) done
moreover {
assume b1: "(i=p__Inv2)"
have "?P1 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
moreover {
assume b1: "(i~=p__Inv2)"
have "?P2 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
ultimately show "invHoldForRule s f r (invariants N)" by satx
qed
lemma n_SendGntSVsinv__11:
assumes a1: "(\<exists> i. i\<le>N\<and>r=n_SendGntS i)" and
a2: "(\<exists> p__Inv2. p__Inv2\<le>N\<and>f=inv__11 p__Inv2)"
shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s")
proof -
from a1 obtain i where a1:"i\<le>N\<and>r=n_SendGntS i" apply fastforce done
from a2 obtain p__Inv2 where a2:"p__Inv2\<le>N\<and>f=inv__11 p__Inv2" apply fastforce done
have "(i=p__Inv2)\<or>(i~=p__Inv2)" apply (cut_tac a1 a2, auto) done
moreover {
assume b1: "(i=p__Inv2)"
have "?P1 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
moreover {
assume b1: "(i~=p__Inv2)"
have "?P2 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
ultimately show "invHoldForRule s f r (invariants N)" by satx
qed
lemma n_SendGntEVsinv__11:
assumes a1: "(\<exists> i. i\<le>N\<and>r=n_SendGntE N i)" and
a2: "(\<exists> p__Inv2. p__Inv2\<le>N\<and>f=inv__11 p__Inv2)"
shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s")
proof -
from a1 obtain i where a1:"i\<le>N\<and>r=n_SendGntE N i" apply fastforce done
from a2 obtain p__Inv2 where a2:"p__Inv2\<le>N\<and>f=inv__11 p__Inv2" apply fastforce done
have "(i=p__Inv2)\<or>(i~=p__Inv2)" apply (cut_tac a1 a2, auto) done
moreover {
assume b1: "(i=p__Inv2)"
have "?P3 s"
apply (cut_tac a1 a2 b1, simp, rule_tac x="(neg (andForm (eqn (IVar (Ident ''ExGntd'')) (Const false)) (neg (eqn (IVar (Ident ''MemData'')) (IVar (Ident ''AuxData''))))))" in exI, auto) done
then have "invHoldForRule s f r (invariants N)" by auto
}
moreover {
assume b1: "(i~=p__Inv2)"
have "?P2 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
ultimately show "invHoldForRule s f r (invariants N)" by satx
qed
lemma n_RecvGntSVsinv__11:
assumes a1: "(\<exists> i. i\<le>N\<and>r=n_RecvGntS i)" and
a2: "(\<exists> p__Inv2. p__Inv2\<le>N\<and>f=inv__11 p__Inv2)"
shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s")
proof -
from a1 obtain i where a1:"i\<le>N\<and>r=n_RecvGntS i" apply fastforce done
from a2 obtain p__Inv2 where a2:"p__Inv2\<le>N\<and>f=inv__11 p__Inv2" apply fastforce done
have "(i=p__Inv2)\<or>(i~=p__Inv2)" apply (cut_tac a1 a2, auto) done
moreover {
assume b1: "(i=p__Inv2)"
have "?P1 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
moreover {
assume b1: "(i~=p__Inv2)"
have "?P2 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
ultimately show "invHoldForRule s f r (invariants N)" by satx
qed
lemma n_RecvGntEVsinv__11:
assumes a1: "(\<exists> i. i\<le>N\<and>r=n_RecvGntE i)" and
a2: "(\<exists> p__Inv2. p__Inv2\<le>N\<and>f=inv__11 p__Inv2)"
shows "invHoldForRule s f r (invariants N)" (is "?P1 s \<or> ?P2 s \<or> ?P3 s")
proof -
from a1 obtain i where a1:"i\<le>N\<and>r=n_RecvGntE i" apply fastforce done
from a2 obtain p__Inv2 where a2:"p__Inv2\<le>N\<and>f=inv__11 p__Inv2" apply fastforce done
have "(i=p__Inv2)\<or>(i~=p__Inv2)" apply (cut_tac a1 a2, auto) done
moreover {
assume b1: "(i=p__Inv2)"
have "?P1 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
moreover {
assume b1: "(i~=p__Inv2)"
have "?P2 s"
proof(cut_tac a1 a2 b1, auto) qed
then have "invHoldForRule s f r (invariants N)" by auto
}
ultimately show "invHoldForRule s f r (invariants N)" by satx
qed
lemma n_SendReqE__part__1Vsinv__11:
assumes a1: "\<exists> i. i\<le>N\<and>r=n_SendReqE__part__1 i" and
a2: "(\<exists> p__Inv2. p__Inv2\<le>N\<and>f=inv__11 p__Inv2)"
shows "invHoldForRule s f r (invariants N)"
apply (rule noEffectOnRule, cut_tac a1 a2, auto) done
lemma n_RecvInvAckVsinv__11:
assumes a1: "\<exists> i. i\<le>N\<and>r=n_RecvInvAck i" and
a2: "(\<exists> p__Inv2. p__Inv2\<le>N\<and>f=inv__11 p__Inv2)"
shows "invHoldForRule s f r (invariants N)"
apply (rule noEffectOnRule, cut_tac a1 a2, auto) done
lemma n_RecvReqEVsinv__11:
assumes a1: "\<exists> i. i\<le>N\<and>r=n_RecvReqE N i" and
a2: "(\<exists> p__Inv2. p__Inv2\<le>N\<and>f=inv__11 p__Inv2)"
shows "invHoldForRule s f r (invariants N)"
apply (rule noEffectOnRule, cut_tac a1 a2, auto) done
lemma n_SendReqE__part__0Vsinv__11:
assumes a1: "\<exists> i. i\<le>N\<and>r=n_SendReqE__part__0 i" and
a2: "(\<exists> p__Inv2. p__Inv2\<le>N\<and>f=inv__11 p__Inv2)"
shows "invHoldForRule s f r (invariants N)"
apply (rule noEffectOnRule, cut_tac a1 a2, auto) done
lemma n_SendReqSVsinv__11:
assumes a1: "\<exists> i. i\<le>N\<and>r=n_SendReqS i" and
a2: "(\<exists> p__Inv2. p__Inv2\<le>N\<and>f=inv__11 p__Inv2)"
shows "invHoldForRule s f r (invariants N)"
apply (rule noEffectOnRule, cut_tac a1 a2, auto) done
lemma n_RecvReqSVsinv__11:
assumes a1: "\<exists> i. i\<le>N\<and>r=n_RecvReqS N i" and
a2: "(\<exists> p__Inv2. p__Inv2\<le>N\<and>f=inv__11 p__Inv2)"
shows "invHoldForRule s f r (invariants N)"
apply (rule noEffectOnRule, cut_tac a1 a2, auto) done
end
|
lemma seq_monosub: fixes s :: "nat \<Rightarrow> 'a::linorder" shows "\<exists>f. strict_mono f \<and> monoseq (\<lambda>n. (s (f n)))" |
If $f$ and $g$ are bijections from $A$ to $B$ and $a \in A$, and if $f(x) = g(x)$ for all $x \in A$ with $x \neq a$, then $f(a) = g(a)$. |
{-# OPTIONS --copatterns --sized-types #-}
module Streams where
open import Size
open import Function
open import Relation.Binary
open import Relation.Binary.PropositionalEquality as P
open ≡-Reasoning
open import Data.List using (List; module List; []; _∷_; _++_; length)
open import Data.Nat using (ℕ; zero; suc)
open import Data.Product using (_,_; _×_; proj₁; proj₂)
-- | Streams (with size annotations to ease definitions).
record Stream {i : Size} (A : Set) : Set where
coinductive
constructor _∷_
field
hd : A
tl : ∀ {j : Size< i} → Stream {j} A
open Stream public
-- | Stream equality is bisimilarity
record _~_ {A : Set} (s t : Stream A) : Set where
coinductive
field
hd~ : hd s ≡ hd t
tl~ : (tl s) ~ (tl t)
open _~_ public
-- | Functoriality
map : ∀ {i A B} (f : A → B) (s : Stream {i} A) → Stream {i} B
hd (map f s) = f (hd s)
tl (map {i} f s) {j} = map {j} f (tl s {j})
-- | Coalgebra structure
str-out : ∀{A} → Stream A → A × Stream A
str-out s = (hd s , tl s)
-- | (Weak) Finality
corec : ∀ {A X : Set} → (X → A × X) → (∀ {i} → X → Stream {i} A)
hd (corec f x) = proj₁ (f x)
tl (corec f x) = corec f (proj₂ (f x))
-- | Direct access to elements
_<_> : ∀{A} → Stream A → ℕ → A
s < 0 > = hd s
s < suc n > = (tl s) < n >
-- | Correctness of bisimlarity
~→pw-equal : ∀{A} {s t : Stream A} →
s ~ t → ∀ n → s < n > ≡ t < n >
~→pw-equal p zero = hd~ p
~→pw-equal p (suc n) = ~→pw-equal (tl~ p) n
-- | The usual definition of a bisimulation on streams.
Is-Bisim : ∀{A} → Rel (Stream A) _ → Set
Is-Bisim _R_ = ∀ x y → x R y → hd x ≡ hd y × (tl x) R (tl y)
-- | If R is a bisimulation then all elements related by R are bisimilar.
ex-bisimulation→bisim : ∀{A R} → Is-Bisim {A} R → ∀ {x y} → R x y → x ~ y
hd~ (ex-bisimulation→bisim p {x} {y} xRy) = proj₁ (p x y xRy)
tl~ (ex-bisimulation→bisim p {x} {y} xRy) =
ex-bisimulation→bisim p (proj₂ (p x y xRy))
-- | Generalised bisimulations between arbitrary stream coalgebras.
Is-Bisim' : ∀{A X Y : Set} → (c : X → A × X) (d : Y → A × Y) → REL X Y _ → Set
Is-Bisim' c d _R_ = ∀ x y → x R y →
proj₁ (c x) ≡ proj₁ (d y) ×
(proj₂ (c x)) R (proj₂ (d y))
ex-bisimulation→bisim' : ∀{A X Y R c d} → Is-Bisim' {A} {X} {Y} c d R →
∀ {x y} → R x y → corec c x ~ corec d y
hd~ (ex-bisimulation→bisim' p {x} {y} xRy) = proj₁ (p x y xRy)
tl~ (ex-bisimulation→bisim' p {x} {y} xRy) =
ex-bisimulation→bisim' p (proj₂ (p x y xRy))
~trans : ∀{A} {r s t : Stream A} → r ~ s → s ~ t → r ~ t
hd~ (~trans p q) = trans (hd~ p) (hd~ q)
tl~ (~trans p q) = ~trans (tl~ p) (tl~ q)
~sym : ∀{A} {s t : Stream A} → s ~ t → t ~ s
hd~ (~sym p) = sym (hd~ p)
tl~ (~sym p) = ~sym (tl~ p)
|
from abc import ABC, abstractmethod
import numpy as np
class BasicTransform(ABC):
@staticmethod
def check_images(images):
for im in images:
if not (isinstance(im, np.ndarray)):
raise TypeError
@abstractmethod
def transform(self, images):
pass
|
module Mock2
# add dependencies here
using DelimitedFiles, ForwardDiff, TickTock
# functions to export
export addone, addtwo, my_f, my_f_der
function addone(x)
return x+1
end
function addtwo(x)
return x+2
end
my_f(x)=x^2-5x+1;
my_f_der(x)=ForwardDiff.derivative(my_f,x)
end
|
[STATEMENT]
lemma [simp]: "a \<noteq> b \<Longrightarrow> p a < mktop p b b"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. a \<noteq> b \<Longrightarrow> p a < mktop p b b
[PROOF STEP]
by(simp add:mktop_def finite_alt) |
(* Title: HOL/Auth/n_german_lemma_inv__37_on_rules.thy
Author: Yongjian Li and Kaiqiang Duan, State Key Lab of Computer Science, Institute of Software, Chinese Academy of Sciences
Copyright 2016 State Key Lab of Computer Science, Institute of Software, Chinese Academy of Sciences
*)
header{*The n_german Protocol Case Study*}
theory n_german_lemma_inv__37_on_rules imports n_german_lemma_on_inv__37
begin
section{*All lemmas on causal relation between inv__37*}
lemma lemma_inv__37_on_rules:
assumes b1: "r \<in> rules N" and b2: "(\<exists> p__Inv1 p__Inv2. p__Inv1\<le>N\<and>p__Inv2\<le>N\<and>p__Inv1~=p__Inv2\<and>f=inv__37 p__Inv1 p__Inv2)"
shows "invHoldForRule s f r (invariants N)"
proof -
have c1: "(\<exists> i d. i\<le>N\<and>d\<le>N\<and>r=n_Store i d)\<or>
(\<exists> i. i\<le>N\<and>r=n_SendReqS i)\<or>
(\<exists> i. i\<le>N\<and>r=n_SendReqE__part__0 i)\<or>
(\<exists> i. i\<le>N\<and>r=n_SendReqE__part__1 i)\<or>
(\<exists> i. i\<le>N\<and>r=n_RecvReqS N i)\<or>
(\<exists> i. i\<le>N\<and>r=n_RecvReqE N i)\<or>
(\<exists> i. i\<le>N\<and>r=n_SendInv__part__0 i)\<or>
(\<exists> i. i\<le>N\<and>r=n_SendInv__part__1 i)\<or>
(\<exists> i. i\<le>N\<and>r=n_SendInvAck i)\<or>
(\<exists> i. i\<le>N\<and>r=n_RecvInvAck i)\<or>
(\<exists> i. i\<le>N\<and>r=n_SendGntS i)\<or>
(\<exists> i. i\<le>N\<and>r=n_SendGntE N i)\<or>
(\<exists> i. i\<le>N\<and>r=n_RecvGntS i)\<or>
(\<exists> i. i\<le>N\<and>r=n_RecvGntE i)"
apply (cut_tac b1, auto) done
moreover {
assume d1: "(\<exists> i d. i\<le>N\<and>d\<le>N\<and>r=n_Store i d)"
have "invHoldForRule s f r (invariants N)"
apply (cut_tac b2 d1, metis n_StoreVsinv__37) done
}
moreover {
assume d1: "(\<exists> i. i\<le>N\<and>r=n_SendReqS i)"
have "invHoldForRule s f r (invariants N)"
apply (cut_tac b2 d1, metis n_SendReqSVsinv__37) done
}
moreover {
assume d1: "(\<exists> i. i\<le>N\<and>r=n_SendReqE__part__0 i)"
have "invHoldForRule s f r (invariants N)"
apply (cut_tac b2 d1, metis n_SendReqE__part__0Vsinv__37) done
}
moreover {
assume d1: "(\<exists> i. i\<le>N\<and>r=n_SendReqE__part__1 i)"
have "invHoldForRule s f r (invariants N)"
apply (cut_tac b2 d1, metis n_SendReqE__part__1Vsinv__37) done
}
moreover {
assume d1: "(\<exists> i. i\<le>N\<and>r=n_RecvReqS N i)"
have "invHoldForRule s f r (invariants N)"
apply (cut_tac b2 d1, metis n_RecvReqSVsinv__37) done
}
moreover {
assume d1: "(\<exists> i. i\<le>N\<and>r=n_RecvReqE N i)"
have "invHoldForRule s f r (invariants N)"
apply (cut_tac b2 d1, metis n_RecvReqEVsinv__37) done
}
moreover {
assume d1: "(\<exists> i. i\<le>N\<and>r=n_SendInv__part__0 i)"
have "invHoldForRule s f r (invariants N)"
apply (cut_tac b2 d1, metis n_SendInv__part__0Vsinv__37) done
}
moreover {
assume d1: "(\<exists> i. i\<le>N\<and>r=n_SendInv__part__1 i)"
have "invHoldForRule s f r (invariants N)"
apply (cut_tac b2 d1, metis n_SendInv__part__1Vsinv__37) done
}
moreover {
assume d1: "(\<exists> i. i\<le>N\<and>r=n_SendInvAck i)"
have "invHoldForRule s f r (invariants N)"
apply (cut_tac b2 d1, metis n_SendInvAckVsinv__37) done
}
moreover {
assume d1: "(\<exists> i. i\<le>N\<and>r=n_RecvInvAck i)"
have "invHoldForRule s f r (invariants N)"
apply (cut_tac b2 d1, metis n_RecvInvAckVsinv__37) done
}
moreover {
assume d1: "(\<exists> i. i\<le>N\<and>r=n_SendGntS i)"
have "invHoldForRule s f r (invariants N)"
apply (cut_tac b2 d1, metis n_SendGntSVsinv__37) done
}
moreover {
assume d1: "(\<exists> i. i\<le>N\<and>r=n_SendGntE N i)"
have "invHoldForRule s f r (invariants N)"
apply (cut_tac b2 d1, metis n_SendGntEVsinv__37) done
}
moreover {
assume d1: "(\<exists> i. i\<le>N\<and>r=n_RecvGntS i)"
have "invHoldForRule s f r (invariants N)"
apply (cut_tac b2 d1, metis n_RecvGntSVsinv__37) done
}
moreover {
assume d1: "(\<exists> i. i\<le>N\<and>r=n_RecvGntE i)"
have "invHoldForRule s f r (invariants N)"
apply (cut_tac b2 d1, metis n_RecvGntEVsinv__37) done
}
ultimately show "invHoldForRule s f r (invariants N)"
by satx
qed
end
|
lemma eventually_at: "eventually P (at a within S) \<longleftrightarrow> (\<exists>d>0. \<forall>x\<in>S. x \<noteq> a \<and> dist x a < d \<longrightarrow> P x)" for a :: "'a :: metric_space" |
The Roll End Front Tuck (REFT) with Dust Flaps is our #1 most popular mailer style. Great brand real estate and presentation value offer your customers the ultimate unboxing experience. This is the box of choice for subscription box brands and for eCommerce order fulfillment. This style is quick to assemble and easy to pack during fulfillment.
Front flaps lock into the main body to add structural strength and security, while side flaps prevent dirt and dust from reaching your product inside.
Make your brand pop: print outside and inside your box!
• Made from up to 50% recycled material & 100% Recyclable!
Received them in good condition and customer service was great!
Quality was better than expected. Very happy with product.
I was a little skeptical about ordering from an online source. However, I needed boxes to be fulfilled as soon as possible. I am so glad that I ordered! The boxes came out so great, and the quality is outstanding. I will be ordering another batch very soon!
These boxes are awesome. The printing could be a little crisper, the white doesn't really *pop* off the black, to give it the truly high-end look I was going for. But at this price, and made in america, you can't complain. 5 stars. Will buy again. Also, the user interface to place the order is so helpful, it is the reason I chose to buy from fantastapack instead of elsewhere.
Amazing all around product. Good quality amazing printing quality. Ordered a small quantity as a sample and immediate ordered a much larger order. My only major concern which I understand isn’t because of this company was the delivery. When I received the boxes, every box had a whole pierced through them all, damaged from shipping. |
Section Z is a side-view shoot-'em-up by Capcom originally released as a coin-operated video game in 1985. A home version was released for the Nintendo Entertainment System in 1987. The original coin-op game is included in Capcom Classics Collection Vol. 1 for PlayStation 2 and Xbox.
Set in an unspecified year during the third millennium, the player takes the role of an astronaut sent to infiltrate and destroy a space station orbiting near the Earth that is actually the secret base of an alien empire known as the 'Balangool'.
The player maneuvers the space ranger throughout the enemy space station, which consists of 26 alphabetically ordered corridors known as 'sections', starting from Section A and ending at the titular Section Z. The game is divided into five stages, each consisting of five sections (with the final stage having six sections to accommodate the spare letter). The player will fight an enemy boss at the end of each stage, concluding with the final battle against the 'L Brain' at Section Z. The game's stages alternates between horizontal-scrolling stage (left or right) and upwards vertical-scrolling stages. The player will lose a life every time they get hit by enemy fire. Additional lives can be obtained if the player achieves a high enough score. The game will be over if the player loses all of their lives, although a chance to continue will be provided.
The controls consists of an eight-way joystick and two buttons, one for shooting and the other for changing the character's aim to the left or right. This allows the player to shoot at one direction while moving away towards the opposite. The player will shoot their rifle towards the left or the right with the fire button, while dropping bombs at the same time to the ground. By destroying certain dome-shaped containers, the player can obtain one of three power-up items represented by letters: S, P, and B. S increases the player's mobility speed, while P increases their firing power; each can improved for up to three increments. However, these power-ups will be lost whenever the player loses a life. The B item will simply give the player bonus points.
A console version of Section Z was released for the Nintendo Entertainment System in North America and Europe in 1987. The gameplay was altered for the NES port, particularly in its level design and game controls.
The NES version features three stages, each comprising 20 sections (for a total of 60 sections), which are full-fledged levels in this version, although not all sections need to be visited to complete the game. In contrast to the alphabetical sections in the arcade version, the sections in the NES version are numbered instead. The first stage begins at Section 00, which is the outer perimeter of the space station and then proceeds to enter Section 01, the first actual corridor of the base. At the end of most sections, the player will choose to enter one of two transporters, each taking player to a different section of the base. Some transporters will take the player to a previous section while others cannot be opened until the player has fulfilled a certain condition and will kill the player if they try to enter. The objective of each stage is to destroy the two power generators located at different sections of each stage in order to open the path to the final section of each stage (Sections 19, 39, and 59 in that order) and face the boss.
Instead of using one button to fire and the other to turn around, the player now uses one button to fire at one direction and the other to fire at the opposite. The player starts with a standard-issue laser rifle and can upgrade their weapon into a Flash Buster (which fires medium-range bullets at three directions) or a Megasmasher (which fires wider v-shaped laser beams). Either weapon can be upgraded into the Mega Buster, which provides both, wider-long range shots at three directions. The player can also obtain a Barrier Shield that will protect the player character for 32 shots.
The player also has a numeral indicator which represents their remaining energy, which will gradually be depleted when the player takes damage from enemy bullets or when the player loses a life by touching an enemy. However the player can still continue at the beginning of their current section, even if they lose all of their lives, as long as they still have energy left. When all energy is lost, the player is sent the start of the stage with full energy to start all over. The maximum energy can be increased by destroying the power generators in each stage. The player can also summon as a special transmission shell by pressing both button simultaneously. These includes a Mega Missile, a Flash Bomb, and a Crush Ball. Picking these shells and using them will cost the player four energy points.
The Famicom version in Japan was released in Disk Card format for the Family Computer Disk System add-on. In contrast to its NES counterpart, which the make player complete the game in one sitting, the Disk System allowed player to save their progress in one of three save files. After a game over, the player can quit the game and resume where they left off by loading the save file. The English localization of the game's manual identifies the main character as 'Captain Commando', a fictional spokesman for Capcom USA who later starred in his own video game. |
[STATEMENT]
lemma nres_relI[refine]: "c \<le>\<Down>R a \<Longrightarrow> (c,a)\<in>\<langle>R\<rangle>nres_rel"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. c \<le> \<Down> R a \<Longrightarrow> (c, a) \<in> \<langle>R\<rangle>nres_rel
[PROOF STEP]
by (simp add: nres_rel_def) |
PROGRAM TaskThree
IMPLICIT NONE
REAL, Dimension(:), Allocatable:: Time, X, XExact, XError, V, T
REAL:: h, XDash, LargestError, dvdt, dxdt, VDash, Tdash, XDashMid, XMid, VDash, VDashmid, Vmid
INTEGER:: i, npts
npts=1000
Allocate(T(0:npts), X(0:npts), XExact(0:npts), XError(0:npts), V(0:npts))
X(0)=0.1; V(0)=0.0; T(0)=0; h=0.01 !Setting intial boundary conditions
OPEN(10, FILE='Results.dat')
DO i=1,npts
dxdt=v(i-1)
dvdt=-5*(x(i-1))
Xdash = dxdt*x(i-1)
XMid = x(i-1)+(0.5*h*XDash)
XDashmid = dxdt*XMid
X(i) = X(i-1)+h*XDashmid
VDash = dvdt*V(i-1)
VMid=V(i-1)+(0.5*h*VDash)
VDashMid=dvdt*Vmid
V(i)=V(i-1)+h*Vdashmid
t(i)=T(i-1)+h
XExact=0.1*Cos((5**0.5)*T(i))
WRITE(6,*) T(i), X(i), XExact(i)
WRITE(10,*) T(i), X(i), XExact(i)
END DO
WRITE(6,*) 'Largest absolute difference between exact and apporximated mass is:'
CLOSE(10)
DEALLOCATE(T, X, XExact, XError, V)
END PROGRAM
|
= U2 concert in Sarajevo =
|
I use Screwfix for my DIY needs and also for work. Great pricing and very easy to order online for click and collect. I've only had a few issues with finding items and have had to call the customer services team who were very helpful and friendly.
Click and collect for DIY items. Value for money not bad compared to high street DIY chains, such as B&Q. Ordered online and was ready to collect in a few minutes. Website is OK although it tends to push the bestselling lines even though it might not be what you want.
Used Screwfix not too long ago to buy a tiny little attachment for a radiator. I went with a product name on a bit of paper. The staff were nothing but helpful with me and my lack of radiator knowledge!
Website easy to use then ordered online for click & collect from my nearest store which saved time.
Easy to use online for click & collect or delivery. Just as easy to use in store with great friendly service.
First time using Screwfix & managed to get a bargain too.. using the click & collect service was very fast & sufficient! Very pleased, and staff brought my item out to the car for me as I had my little girl! Great all round!
Purchase was faulty took it back replaced no problem with any quibble.
I use Screwfix on a regular basis. I love the fact you can reserve online, then pop into store and pick up. Items always in stock and am greeted with a friendly smile.
Got a great vacuum for our shop from here. Excellent service had to return to fix wheel and so helpful. |
Formal statement is: lemma interior_eq: "interior S = S \<longleftrightarrow> open S" Informal statement is: A set $S$ is open if and only if its interior is equal to $S$. |
If $f$ is holomorphic on a punctured neighborhood of $z$, then the residue of $-f$ at $z$ is the negative of the residue of $f$ at $z$. |
(* Title: HOL/Auth/n_german_lemma_inv__32_on_rules.thy
Author: Yongjian Li and Kaiqiang Duan, State Key Lab of Computer Science, Institute of Software, Chinese Academy of Sciences
Copyright 2016 State Key Lab of Computer Science, Institute of Software, Chinese Academy of Sciences
*)
header{*The n_german Protocol Case Study*}
theory n_german_lemma_inv__32_on_rules imports n_german_lemma_on_inv__32
begin
section{*All lemmas on causal relation between inv__32*}
lemma lemma_inv__32_on_rules:
assumes b1: "r \<in> rules N" and b2: "(\<exists> p__Inv2. p__Inv2\<le>N\<and>f=inv__32 p__Inv2)"
shows "invHoldForRule s f r (invariants N)"
proof -
have c1: "(\<exists> i d. i\<le>N\<and>d\<le>N\<and>r=n_Store i d)\<or>
(\<exists> i. i\<le>N\<and>r=n_SendReqS i)\<or>
(\<exists> i. i\<le>N\<and>r=n_SendReqE__part__0 i)\<or>
(\<exists> i. i\<le>N\<and>r=n_SendReqE__part__1 i)\<or>
(\<exists> i. i\<le>N\<and>r=n_RecvReqS N i)\<or>
(\<exists> i. i\<le>N\<and>r=n_RecvReqE N i)\<or>
(\<exists> i. i\<le>N\<and>r=n_SendInv__part__0 i)\<or>
(\<exists> i. i\<le>N\<and>r=n_SendInv__part__1 i)\<or>
(\<exists> i. i\<le>N\<and>r=n_SendInvAck i)\<or>
(\<exists> i. i\<le>N\<and>r=n_RecvInvAck i)\<or>
(\<exists> i. i\<le>N\<and>r=n_SendGntS i)\<or>
(\<exists> i. i\<le>N\<and>r=n_SendGntE N i)\<or>
(\<exists> i. i\<le>N\<and>r=n_RecvGntS i)\<or>
(\<exists> i. i\<le>N\<and>r=n_RecvGntE i)"
apply (cut_tac b1, auto) done
moreover {
assume d1: "(\<exists> i d. i\<le>N\<and>d\<le>N\<and>r=n_Store i d)"
have "invHoldForRule s f r (invariants N)"
apply (cut_tac b2 d1, metis n_StoreVsinv__32) done
}
moreover {
assume d1: "(\<exists> i. i\<le>N\<and>r=n_SendReqS i)"
have "invHoldForRule s f r (invariants N)"
apply (cut_tac b2 d1, metis n_SendReqSVsinv__32) done
}
moreover {
assume d1: "(\<exists> i. i\<le>N\<and>r=n_SendReqE__part__0 i)"
have "invHoldForRule s f r (invariants N)"
apply (cut_tac b2 d1, metis n_SendReqE__part__0Vsinv__32) done
}
moreover {
assume d1: "(\<exists> i. i\<le>N\<and>r=n_SendReqE__part__1 i)"
have "invHoldForRule s f r (invariants N)"
apply (cut_tac b2 d1, metis n_SendReqE__part__1Vsinv__32) done
}
moreover {
assume d1: "(\<exists> i. i\<le>N\<and>r=n_RecvReqS N i)"
have "invHoldForRule s f r (invariants N)"
apply (cut_tac b2 d1, metis n_RecvReqSVsinv__32) done
}
moreover {
assume d1: "(\<exists> i. i\<le>N\<and>r=n_RecvReqE N i)"
have "invHoldForRule s f r (invariants N)"
apply (cut_tac b2 d1, metis n_RecvReqEVsinv__32) done
}
moreover {
assume d1: "(\<exists> i. i\<le>N\<and>r=n_SendInv__part__0 i)"
have "invHoldForRule s f r (invariants N)"
apply (cut_tac b2 d1, metis n_SendInv__part__0Vsinv__32) done
}
moreover {
assume d1: "(\<exists> i. i\<le>N\<and>r=n_SendInv__part__1 i)"
have "invHoldForRule s f r (invariants N)"
apply (cut_tac b2 d1, metis n_SendInv__part__1Vsinv__32) done
}
moreover {
assume d1: "(\<exists> i. i\<le>N\<and>r=n_SendInvAck i)"
have "invHoldForRule s f r (invariants N)"
apply (cut_tac b2 d1, metis n_SendInvAckVsinv__32) done
}
moreover {
assume d1: "(\<exists> i. i\<le>N\<and>r=n_RecvInvAck i)"
have "invHoldForRule s f r (invariants N)"
apply (cut_tac b2 d1, metis n_RecvInvAckVsinv__32) done
}
moreover {
assume d1: "(\<exists> i. i\<le>N\<and>r=n_SendGntS i)"
have "invHoldForRule s f r (invariants N)"
apply (cut_tac b2 d1, metis n_SendGntSVsinv__32) done
}
moreover {
assume d1: "(\<exists> i. i\<le>N\<and>r=n_SendGntE N i)"
have "invHoldForRule s f r (invariants N)"
apply (cut_tac b2 d1, metis n_SendGntEVsinv__32) done
}
moreover {
assume d1: "(\<exists> i. i\<le>N\<and>r=n_RecvGntS i)"
have "invHoldForRule s f r (invariants N)"
apply (cut_tac b2 d1, metis n_RecvGntSVsinv__32) done
}
moreover {
assume d1: "(\<exists> i. i\<le>N\<and>r=n_RecvGntE i)"
have "invHoldForRule s f r (invariants N)"
apply (cut_tac b2 d1, metis n_RecvGntEVsinv__32) done
}
ultimately show "invHoldForRule s f r (invariants N)"
by satx
qed
end
|
# Copyright (c) 2018-2021, Carnegie Mellon University
# See LICENSE for details
_B_dct1 := n -> let(j:=Ind(n), VStack(
RowVec(diagDirsum(FList(TReal, [2,1]), fConst(TReal, n-2, 0.0))),
HStack(ColVec(fConst(TReal, n-3, 0.0)),
RowTensor(n-3, 1, Mat([[1,1]])),
ColVec(fConst(TReal, n-3, 0.0))),
RowVec(diagDirsum(fConst(TReal, n-2, 0.0), FList(TReal, [1, 2]))),
RowVec(Lambda(j, cond(neq(imod(j, 2),0), -1, 1)))
));
RulesFor(DCT1, rec(
#F DCT1_Base2: (base case for size 2)
#F
#F DCT1_2 = F_2 (scaled)
#F
DCT1_Base2 := rec(
info := "DCT_1 -> F_2",
forTransposition := false,
isApplicable := P -> P[1]=2,
allChildren := P -> [[]],
rule := (P, C) -> F(2)
),
#F DCT1_Base4: DCT1_4 = (M tensor F_2) ^ P
#F
DCT1_Base4 := rec(
info := "DCT_1 -> (M tensor F_2)^P",
forTransposition := false,
isApplicable := P -> P[1]=4,
allChildren := P -> [[]],
rule := (P, C) ->
Tensor(Mat([[1, 1], [1, -1/2]]), F(2)) ^ Perm((2,4), 4)
),
#F DCT1_DCT1and3: 1984
#F
#F DCT1_(n+1) = perm * (DCT1_(n/2+1) dirsum DCT3_n/2 ^ perm) *
#F (1 tensor DFT_2)^perm
#F
#F Wang:
#F Fast Algorithms for the Discrete W Transform and the
#F Discrete Fourier Transform.
#F IEEE Trans. on ASSP, 1984, pp. 803--814
#F
DCT1_DCT1and3 := rec (
info := "DCT1_n --> DCT1_n/2, DCT3_n/2",
isApplicable := P -> (P[1] - 1) mod 2 = 0,
allChildren := P ->
When(P[1]=3, [[ DCT1(2) ]],
[[ DCT1((P[1]+1)/2), DCT3((P[1]-1)/2) ]]),
rule := (P, C) ->
When(P[1]=3,
perm2(3) *
DirectSum(C[1], I(1)) *
DirectSum(I(1), F(2)) ^ perm4(3),
perm2(P[1]) *
DirectSum(C[1], C[2] ^ J((P[1]-1)/2)) *
DirectSum(I(1), Tensor(I((P[1]-1)/2), F(2))) ^ perm4(P[1]))
),
DCT1_toDCT2 := rec(
isApplicable := P -> P[1] > 5,
allChildren := P -> [[DCT2(P[1]-1)]],
rule := (P, C) -> let(
n := P[1], j := Ind(n-1), nn := n-1,
DirectSum(Diag(Lambda(j, 1/(2*cospi(fdiv(j, 2*nn))))) *
C[1], I(1)) *
_B_dct1(n)
)
)
));
|
If $f$ and $g$ are two nested sequences of real numbers such that $f_n \leq g_n$ for all $n$ and $\lim_{n \to \infty} (f_n - g_n) = 0$, then there exists a real number $l$ such that $\lim_{n \to \infty} f_n = l = \lim_{n \to \infty} g_n$. |
If $x \in U$, then the connected component of $x$ in $U$ is a component of $U$. |
-- We import all of Lean's standard tactics
import tactic
/-!
# Logic
We will develop the basic theory of following five basic logical symbols
* `→` ("implies" -- type with `\l`)
* `¬` ("not" -- type with `\not` or `\n`)
* `∧` ("and" -- type with `\and` or `\an`)
* `↔` ("iff" -- type with `\iff` or `\lr`)
* `∨` ("or" -- type with `\or` or `\v`
# Tactics you will need to know
* `intro`
* `exact`
* `apply`
* `rw`
* `cases`
* `split`
* `left`
* `right`
See `README.md` in `src/week_1` for an explanation of what these
tactics do.
Note that there are plenty of other tactics, and indeed once you've
"got the hang of it" you might want to try tactics such as `cc`,
`tauto` and its variant `tauto!`, `finish`, and `library_search`.
# What to do
The `example`s are to demonstrate things to you. They sometimes
use tactics you don't know. You can look at them but you don't
need to touch them.
The `theorem`s and `lemma`s are things which have no proof. You need to change
the `sorry`s into proofs which Lean will accept.
This paragraph is a comment, by the way. One-line comments
are preceded with `--`.
-/
-- We work in a "namespace". All this means is that whenever it
-- looks like we've defined a new theorem called `id`, its full
-- name is `xena.id`. Which is good because `id` is already
-- defined in Lean.
namespace xena
-- Throughout this namespace, P Q and R will be arbitrary (variable)
-- true-false statements.
variables (P Q R : Prop)
/-!
## implies (→)
To prove the theorems in this section, you will need to know about
the tactics `intro`, `apply` and `exact`. You might also like
the `assumption` tactic.
-/
/-- Every proposition implies itself. -/
theorem id : P → P :=
begin
-- let hP be a proof of P
intro hP,
-- then hP is a proof of P!
exact hP
end
/-
Note that → isn't associative!
Try working out `false → (false → false) and (false → false) → false
-/
example : (false → (false → false)) ↔ true := by simp
example : ((false → false) → false) ↔ false := by simp
-- in Lean, `P → Q → R` is _defined_ to mean `P → (Q → R)`
-- Here's a proof of what I just said.
example : (P → Q → R) ↔ (P → (Q → R)) :=
begin
-- look at the goal!
refl -- true because ↔ is reflexive
end
theorem imp_intro : P → Q → P :=
begin
-- remember that by definition the goal is P → (Q → P),
-- so it's P implies something, so let's assume
-- that P is true and call this hypothesis hP.
intro hP,
-- Now we have to prove that Q implies P, so let's
-- assume that Q is true, and let's call this hypothesis hQ
intro hQ,
-- We now have to prove that P is true.
-- But this is exactly our hypothesis hP.
exact hP,
end
/-- If we know `P`, and we also know `P → Q`, we can deduce `Q`. -/
lemma modus_ponens : P → (P → Q) → Q :=
begin
-- remember this means "P implies that ((P implies Q) implies Q)"
-- so let's assume P is true
intro hP,
-- and let's assume hypothesis hPQ, that P implies Q
intro hPQ,
-- now `hPQ` says `P → Q` and we're trying to prove `Q`!
-- So by applying the hypothesis `hPQ`, we can reduce
-- this puzzle to proving `P`.
apply hPQ,
-- Now we have to prove `P`. But this is just an assumption
exact hP, -- or `assumption`
end
/-- implication is transitive -/
lemma imp_trans : (P → Q) → (Q → R) → (P → R) :=
begin
-- intros will let you intro many things at once
intros hPQ hQR hP,
-- The goal is now `⊢ R`, and `hQR : Q → R` so `apply hQR` reduces the goal to `Q`
apply hQR,
-- similarly `apply hPQ` reduces the goal to `P`.
apply hPQ,
-- We are kind of proving this result backwards! We already have
-- a proof of P.
exact hP
end
-- This one is a "relative modus ponens" -- in the
-- presence of P, if Q -> R and Q then R.
lemma forall_imp : (P → Q → R) → (P → Q) → (P → R) :=
begin
-- Let `hPQR` be the hypothesis that `P → Q → R`.
intro hPQR,
-- We now need to prove that `(P → Q)` implies something.
-- So let `hPQ` be hypothesis that `P → Q`
intro hPQ,
-- We now need to prove that `P` implies something, so
-- let `hP` be the hypothesis that `P` is true.
intro hP,
-- We now have to prove `R`.
-- We know the hypothesis `hPQR : P → (Q → R)`.
-- If you think about this, it's the same as `(P ∧ Q) → R`
-- So perhaps it's not surprising that after
apply hPQR,
-- we now have two goals!
-- The first goal is just to prove P, and this is an assumption
exact hP,
-- The number of goals is just one again.
-- the remaining goal is to prove `Q`.
-- But recall that `hPQ` is the hypothesis that `P` implies `Q`
-- so by applying it,
apply hPQ,
-- we change our goal to proving `P`. And this is a hypothesis
exact hP,
end
/-
### not
`not P`, with notation `¬ P`, is *defined* to mean `P → false` in Lean,
i.e., the proposition that P implies false. You can easily check with
a truth table that P → false and ¬ P are equivalent.
We develop a basic interface for `¬`.
-/
-- I'll prove this one for you
theorem not_def : ¬ P ↔ (P → false) :=
begin
-- true by definition
refl
end
theorem not_not_intro : P → ¬ (¬ P) :=
begin
intro hP,
-- You can use `rw not_def` to change `¬ X` into `X → false`.
rw not_def, rw not_def, -- but it's not necessary really,
intro hnP,
apply hnP,
exact hP,
end
-- Here is a funny alternative proof! Can you work out how it works?
example : P → ¬ (¬ P) :=
begin
apply modus_ponens,
end
-- Here is a proof which does not use tactics at all, but uses lambda calculus.
-- It is called a "term mode" proof. We will not be discussing term mode
-- much in this course. It is a cool way to do basic logic proofs, but
-- it does not scale well in practice.
example : P → ¬ (¬ P) :=
λ hP hnP, hnP hP
-- This is "modus tollens". Some mathematicians think of it as
-- "proof by contradiction".
theorem modus_tollens : (P → Q) → (¬ Q → ¬ P) :=
begin
-- this is (P → Q) → (Q → false) → (P → false) so we can just...
apply imp_trans,
end
-- This one cannot be proved using constructive mathematics!
-- You _have_ to use a tactic like `by_contra` (or, if you're happy
-- to cheat, the full "truth table" tactic `tauto!`.
-- Try it without using these, and you'll get stuck!
theorem double_negation_elimination : ¬ (¬ P) → P :=
begin
intro hnnP,
by_contra h,
-- hnnP is ¬ P → false, and the goal is ⊢ false, so we can do this
apply hnnP,
exact h,
end
/-!
### and
The hypothesis `hPaQ : P ∧ Q` in Lean, is equivalent to
hypotheses `hP : P` and `hQ : Q`.
If you have `hPaQ` as a hypothesis, and you want to get to
`hP` and `hQ`, you can use the `cases` tactic.
If you have `⊢ P ∧ Q` as a goal, and want to turn the goal
into two goals `⊢ P` and `⊢ Q`, then use the `split` tactic.
Note that after `split` it's good etiquette to use braces
e.g.
example (hP : P) (hQ : Q) : P ∧ Q :=
begin
split,
{ exact hP },
{ exact hQ }
end
but for this sort of stuff I think principled indentation
is OK
```
example (hP : P) (hQ : Q) : P ∧ Q :=
begin
split,
exact hP,
exact hQ
end
```
-/
theorem and.elim_left : P ∧ Q → P :=
begin
intro hPaQ,
cases hPaQ with hP hQ,
exact hP,
end
theorem and.elim_right : P ∧ Q → Q :=
begin
intro hPaQ,
-- here's a shortcut
exact hPaQ.2, -- if `h : P ∧ Q` then `h.1 : P` and `h.2 : Q`
end
-- fancy term mode proof
example : P ∧ Q → Q := λ hPaQ, hPaQ.2
theorem and.intro : P → Q → P ∧ Q :=
begin
intros hP hQ,
split,
{ assumption },
{ assumption }
end
/-- the eliminator for `∧` -/
theorem and.elim : P ∧ Q → (P → Q → R) → R :=
begin
-- `rintro` does `intro` and `cases` in one go
rintro ⟨hP, hQ⟩ hPQR,
-- hPQR is a function, so we can give it some inputs
exact hPQR hP hQ,
end
/-- The recursor for `∧` -/
theorem and.rec : (P → Q → R) → P ∧ Q → R :=
begin
rintro hPQR ⟨hP, hQ⟩,
exact hPQR hP hQ,
end
/-- `∧` is symmetric -/
theorem and.symm : P ∧ Q → Q ∧ P :=
begin
-- see how quickly we can do this using ⟨_, _⟩
rintro ⟨hP, hQ⟩,
exact ⟨hQ, hP⟩
end
-- term mode proof
example : P ∧ Q → Q ∧ P :=
λ ⟨hP, hQ⟩, ⟨hQ, hP⟩
/-- `∧` is transitive -/
theorem and.trans : (P ∧ Q) → (Q ∧ R) → (P ∧ R) :=
begin
rintro ⟨hP, hQ⟩ ⟨hQ', hR⟩,
exact ⟨hP, hR⟩,
end
/-
Recall that the convention for the implies sign →
is that it is _right associative_, by which
I mean that `P → Q → R` means `P → (Q → R)` by definition.
Now note that if `P` implies `Q → R`
then this means that `P` and `Q` together, imply `R`,
so `P → Q → R` is logically equivalent to `(P ∧ Q) → R`.
We proved that `P → Q → R` implied `(P ∧ Q) → R`; this was `and.rec`.
Let's go the other way.
-/
lemma imp_imp_of_and_imp : ((P ∧ Q) → R) → (P → Q → R) :=
begin
intros h hP hQ,
exact h ⟨hP, hQ⟩
end
/-!
### iff
The basic theory of `iff`.
In Lean, to prove `P ∧ Q` you have to prove `P` and `Q`.
Similarly, to prove `P ↔ Q` in Lean, you have to prove `P → Q`
and `Q → P`. Just like `∧`, you can uses `cases h` if you have
a hypothesis `h : P ↔ Q`, and `split` if you have a goal `⊢ P ↔ Q`.
-/
/-- `P ↔ P` is true for all propositions `P`, i.e. `↔` is reflexive. -/
theorem iff.refl : P ↔ P :=
begin
split,
-- recall that we already proved `id : P → P`
{ apply id },
{ apply id }
end
-- If you get stuck, there is always the "truth table" tactic `tauto!`
example : P ↔ P :=
begin
tauto!, -- the "truth table" tactic.
end
-- refl tactic also works
example : P ↔ P :=
begin
refl -- `refl` knows that `=` and `↔` are reflexive.
end
/-- `↔` is symmetric -/
theorem iff.symm : (P ↔ Q) → (Q ↔ P) :=
begin
intro h,
/-
h: P ↔ Q
⊢ Q ↔ P
-/
rw h,
-- This changes the goal to `Q ↔ Q`, which is automatically closed by `refl`
-- because `rw` tries a cheeky `refl` after every invocation, just to see
-- if it closes the goal
end
-- show-off term mode proof
example : (P ↔ Q) → (Q ↔ P) :=
λ ⟨hPQ, hQP⟩, ⟨hQP, hPQ⟩
/-- `↔` is commutative -/
theorem iff.comm : (P ↔ Q) ↔ (Q ↔ P) :=
begin
split,
{ apply iff.symm },
{ apply iff.symm }
end
-- without rw or cc this is painful!
/-- `↔` is transitive -/
theorem iff.trans : (P ↔ Q) → (Q ↔ R) → (P ↔ R) :=
begin
intros hPQ hQR,
-- ⊢ P ↔ R
rw hPQ,
-- ⊢ Q ↔ R
exact hQR,
end
-- This can be done constructively, but it's hard. You'll need to know
-- about the `have` tactic to do it. Alternatively the truth table
-- tactic `tauto!` will do it.
theorem iff.boss : ¬ (P ↔ ¬ P) :=
begin
rintro ⟨h1, h2⟩,
have hnP : ¬ P,
{ intro hP,
exact h1 hP hP,
},
have hP : P := h2 hnP,
exact hnP hP,
end
-- Now we have iff we can go back to and.
/-!
### ↔ and ∧
-/
/-- `∧` is commutative -/
theorem and.comm : P ∧ Q ↔ Q ∧ P :=
begin
split; -- semicolon means "apply next tactic to all goals generated by this one"
apply and.symm,
end
-- fancy term-mode proof
example : P ∧ Q ↔ Q ∧ P :=
⟨and.symm _ _, and.symm _ _⟩
-- Note that ∧ is "right associative" in Lean, which means
-- that `P ∧ Q ∧ R` is _defined to mean_ `P ∧ (Q ∧ R)`.
-- Associativity can hence be written like this:
/-- `∧` is associative -/
theorem and_assoc : ((P ∧ Q) ∧ R) ↔ (P ∧ Q ∧ R) :=
begin
split,
{ rintro ⟨⟨hP, hQ⟩, hR⟩,
exact ⟨hP, hQ, hR⟩ },
{ rintro ⟨hP, hQ, hR⟩,
exact ⟨⟨hP, hQ⟩, hR⟩ },
end
/-!
## Or
`P ∨ Q` is true when at least one of `P` and `Q` are true.
Here is how to work with `∨` in Lean.
If you have a hypothesis `hPoQ : P ∨ Q` then you
can break into the two cases `hP : P` and `hQ : Q` using
`cases hPoQ with hP hQ`
If you have a _goal_ of the form `⊢ P ∨ Q` then you
need to decide whether you're going to prove `P` or `Q`.
If you want to prove `P` then use the `left` tactic,
and if you want to prove `Q` then use the `right` tactic.
-/
-- recall that P, Q, R are Propositions. We'll need S for this one.
variable (S : Prop)
-- You will need to use the `left `tactic for this one.
theorem or.intro_left : P → P ∨ Q :=
begin
intro P,
left,
-- `assumption` means `exact <choose the correct hypothesis>`
assumption,
end
theorem or.intro_right : Q → P ∨ Q :=
begin
intro Q,
right,
assumption,
end
/-- the eliminator for `∨`. -/
theorem or.elim : P ∨ Q → (P → R) → (Q → R) → R :=
begin
intros hPoQ hPR hQR,
cases hPoQ with hP hQ,
{ exact hPR hP },
{ exact hQR hQ }
end
/-- `∨` is symmetric -/
theorem or.symm : P ∨ Q → Q ∨ P :=
begin
intro hPoQ,
cases hPoQ with hP hQ,
{ right, assumption },
{ left, assumption }
end
/-- `∨` is commutative -/
theorem or.comm : P ∨ Q ↔ Q ∨ P :=
begin
split; -- note semicolon
apply or.symm,
end
/-- `∨` is associative -/
theorem or.assoc : (P ∨ Q) ∨ R ↔ P ∨ Q ∨ R :=
begin
split,
{ -- rintro can do intro+cases in one go
rintro ((hP | hQ) | hR),
{ left, assumption },
{ right, left, assumption },
{ right, right, assumption } },
{ rintro (hP | hQ | hR),
{ left, left, assumption },
{ left, right, assumption },
{ right, assumption } }
end
/-!
### More about → and ∨
-/
theorem or.imp : (P → R) → (Q → S) → P ∨ Q → R ∨ S :=
begin
rintro hPR hQS (hP | hQ),
{ left, exact hPR hP },
{ right, exact hQS hQ }
end
theorem or.imp_left : (P → Q) → P ∨ R → Q ∨ R :=
begin
rintro hPQ (hP | hR),
{ left, exact hPQ hP },
{ right, assumption },
end
theorem or.imp_right : (P → Q) → R ∨ P → R ∨ Q :=
begin
-- reduce to previous lemma
rw or.comm R,
rw or.comm R,
apply or.imp_left,
end
theorem or.left_comm : P ∨ Q ∨ R ↔ Q ∨ P ∨ R :=
begin
rw [or.comm P, or.assoc, or.comm R],
end
/-- the recursor for `∨` -/
theorem or.rec : (P → R) → (Q → R) → P ∨ Q → R :=
begin
intros hPR hQR hPoQ,
exact or.elim _ _ _ hPoQ hPR hQR,
end
theorem or_congr : (P ↔ R) → (Q ↔ S) → (P ∨ Q ↔ R ∨ S) :=
begin
rintro hPR hQS,
rw [hPR, hQS],
end
/-!
### true and false
`true` is a true-false statement, which can be proved with the `trivial` tactic.
`false` is a true-false statment which can only be proved if you manage
to find a contradiction within your assumptions.
If you manage to end up with a hypothesis `h : false` then there's quite
a funny way to proceed, which we now explain.
If you have `h : P ∧ Q` then you can uses `cases h with hP hQ` to split
into two cases.
If you have `h : false` then what do you think happens if we do `cases h`?
Hint: how many cases are there?
-/
/-- eliminator for `false` -/
theorem false.elim : false → P :=
begin
intro h,
cases h,
end
theorem and_true_iff : P ∧ true ↔ P :=
begin
split,
{ rintro ⟨hP, -⟩,
exact hP },
{ intro hP,
split,
{ exact hP },
{ trivial } }
end
theorem or_false_iff : P ∨ false ↔ P :=
begin
split,
{ rintro (hP | h),
{ assumption },
{ cases h} },
{ intro hP,
left,
exact hP }
end
-- false.elim is handy for this one
theorem or.resolve_left : P ∨ Q → ¬P → Q :=
begin
rintro (hP | hQ) hnP,
{ apply false.elim,
exact hnP hP },
{ exact hQ },
end
-- this one you can't do constructively
theorem or_iff_not_imp_left : P ∨ Q ↔ ¬P → Q :=
begin
split,
{ apply or.resolve_left },
{ intro hnPQ,
-- TODO : document this tactic
by_cases h : P,
{ left, assumption },
{ right, exact hnPQ h} }
end
end xena
|
--import week_8.ideas.Z1
import week_8.ideas.distrub_mul_action_hom.Z1 -- do I need all of this?
import week_8.ideas.Z1
/-
# A crash course in H¹(G,M)
We stick to the conventions that `G` is a group (or even
a monoid, we never use inversion) and that `M` is a `G`-module,
that is, an additive abelian group with a `G`-action.
The quotient group `H1 G M`, written `H¹(G,M)` by mathematicians,
is the main definition in this file. It's the quotient
of `Z1 G M` by the range of a certain map from `M` called
the coboundary map.
The first two theorems we shall prove about it here
are that it is functorial (i.e. a map `φ : M →+[G] N` gives
rise to a map `φ.H1 : H1 G M →+ H1 G N`), and exact in the
middle (i.e. if `0 → M → N → P → 0` is a short exact sequence
of `G`-modules then the sequence `H1 G M →+ H1 G N →+ H1 G P`
is exact).
Further work would be to verify "inf-res", otherwise known
as the beginning of the long exact
sequence of terms of low degree in the Hochschild-Serre
spectral sequence for group cohomology (i.e.
`0 → H¹(G/N, Aᴺ) → H¹(G, A) → H¹(N, A)` ) and of course one
could go on to define n-cocycles and n-coboundaries (please
get in touch if you're interested in doing this -- I have
ideas about how to set it all up) and to
construct the Hochschild-Serre spectral sequence itself.
I have no doubt that these kinds of results could be turned
into a research paper.
Let's start with a definition of `H1 G M`.
-/
section cochain_map
variables (G M : Type) [monoid G] [add_comm_group M]
[distrib_mul_action G M]
def cochain_map : M →+ Z1 G M :=
{ to_fun := λ m, { to_fun := λ g, g • m - m, is_cocycle' := begin
simp [mul_smul, smul_sub],
end},
map_zero' := begin
ext g,
simp,
end,
map_add' := begin
intros,
ext g,
simp,
abel,
end }
@[simp] lemma cochain_map_apply (m : M) (g : G) :
cochain_map G M m g = g • m - m := rfl
end cochain_map
-- question : do we have cokernels? If A B are abelian groups and
-- `f : A → B` is a group hom, how do I make the type coker f`
-- Lean has inbuilt quotients of additive abelian groups by subgroups
@[derive add_comm_group]
def H1 (G M : Type) [monoid G] [add_comm_group M]
[distrib_mul_action G M] : Type :=
quotient_add_group.quotient ((cochain_map G M).range)
section quotient_stuff
variables {G M : Type} [monoid G] [add_comm_group M]
[distrib_mul_action G M]
def Z1.quotient : Z1 G M →+ H1 G M :=
quotient_add_group.mk' _
lemma H1.ker_quotient : (Z1.quotient).ker = (cochain_map G M).range :=
quotient_add_group.ker_mk _
end quotient_stuff
namespace H1
variables {G M : Type} [monoid G] [add_comm_group M]
[distrib_mul_action G M]
@[elab_as_eliminator]
def induction_on {p : H1 G M → Prop}
(IH : ∀ z : Z1 G M, p (z.quotient)) (h : H1 G M) : p h :=
quot.induction_on h IH
end H1
/-
We have just defined `H1 G M` as a quotient group, and told Lean
to figure out (or "derive") the obvious abelian group structure
on it, which it did.
What we need to do now is to show that if `φ : M →+[G] N` is a `G`-module
hom then `φ` induces a map `H1 G M → H1 G N`. To prove this we will
need to figure out how to define maps from and to quotient group structures.
Just like last week, this is simply a matter of learning the API for the
definition `quotient_add_group.quotient`.
TODO -- make the definition
-/
namespace distrib_mul_action_hom
local notation `Z1'` := _root_.Z1
open add_subgroup
variables {G M N : Type}
[monoid G] [add_comm_group M] [add_comm_group N]
[distrib_mul_action G M] [distrib_mul_action G N]
def H1 (φ : M →+[G] N) : H1 G M →+ H1 G N :=
-- We use `quotient_add_group.map` to define this map
-- by saying that it is a descent of the map `φ.Z1_hom`
quotient_add_group.map ((cochain_map G M).range) ((cochain_map G N).range)
φ.Z1
-- We now have to supply the proof that the map on cocycles induces
-- a map on cohomology, i.e. that it sends coboundaries to coboundaries
begin
rintro ⟨c, hc⟩ ⟨m, hm⟩,
use φ m,
ext g,
simp [← hm],
end
def H1_spec (φ : M →+[G] N) (f : Z1' G M) :
φ.H1 (f.quotient) = (φ.Z1 f).quotient := rfl
-- why isn't this there??
-- ask in Zulip
instance : add_comm_group (M →+[G] N) :=
{ add := λ a b, ({ to_fun := λ m, a m + b m,
map_smul' := by simp,
map_zero' := by simp,
map_add' := by { intros, simp [map_add], abel} }),
add_assoc := sorry,--by {intros, ext m, simp only [add_comm_group.add], dsimp, },--abel}, -- missing coe_add simp lemma?
zero := { to_fun := λ m, 0,
map_smul' := by simp,
map_zero' := by simp,
map_add' := by simp },
zero_add := sorry,
add_zero := sorry,
neg := λ a, { to_fun := λ m, -(a m),
map_smul' := by simp,
map_zero' := by simp,
map_add' := by { intros, simp [map_neg], abel} },
add_left_neg := sorry,
add_comm := sorry }
-- API for instance
@[simp] lemma zero_val (m : M) : (0 : M →+[G] N) m = 0 := rfl
end distrib_mul_action_hom
section exactness
variables {G M N P : Type}
[monoid G]
[add_comm_group M] [distrib_mul_action G M]
[add_comm_group N] [distrib_mul_action G N]
[add_comm_group P] [distrib_mul_action G P]
(φ : M →+[G] N) (z : Z1 G M)
example (α β : Type) [setoid α] [setoid β]
(f : α → β) (h : (has_equiv.equiv ⇒ has_equiv.equiv) f f) (a : α) :
quotient.map f h (⟦a⟧) = ⟦f a⟧ := quotient.map_mk f h a
lemma Z1.H1_map_mk (φ : M →+[G] N) : φ.H1 (z.quotient) =
(φ.Z1 z).quotient :=
rfl
open function
open add_monoid_hom
-- right now will work around with sets
theorem H1_hom_middle_exact (φ : M →+[G] N)
(ψ : N →+[G] P) (hse : is_short_exact φ ψ) :
φ.H1.range = ψ.H1.ker :=
begin
-- need to prove a range is a kernel,
ext k,
-- let k be a cohomology class, an element of H^1(G,N).
-- we're supposed to be proving that we're in a range
-- if and only if we're in a kernel
-- I can't see how to do it directly with rewrites
-- so I'm going to split
split,
{ rintro ⟨x, rfl⟩,
refine x.induction_on _, clear x,
intros z,
-- should have some map commutes lemma
rw Z1.H1_map_mk,
rw mem_ker,
rw Z1.H1_map_mk,
convert Z1.quotient.map_zero,
rw φ.map_comp,
have hψφ : ψ.comp φ = (0 : M →+[G] P),
{ ext m,
simp * at * },
rw ← mem_ker,
rw hψφ,
sorry
-- rw is_exact_def' at he,
-- rw mem_ker,
-- refl
},
{ -- this is the trickier way. What is the maths proof?
/-
I use set theory language
Say k ∈ H¹(N) and H¹(ψ)(k) = 0. Need to find c in H¹(M) such that
H¹(φ)(c) = k. Lift k to a cocycle x in Z¹(N). Need to pull back
x along φ. Can modify x by a coboundary to get λ g, x g + g n - n
So let's see if we can turn "H¹(ψ)(k)=0" into the existence
of a magic `n0` such that ∀ g, ψ (x g + g n0 - n0) = 0
-/
rw mem_ker,
-- why isn't this better
refine H1.induction_on _ k, clear k,
intros z hz,
rw Z1.H1_map_mk at hz,
rw ← mem_ker at hz,
-- rw Z1.ker_quotient at hz,
-- cases hz with y hy,
-- rw Z1.ext_iff at hy,
-- simp_rw cochain_map_apply at hy,
-- simp_rw ψ.Z1_spec at hy,
-- rw is_exact_def at he,
-- cases hψ y with x hx,
-- let w : G → N := λ g, z g - (g • x - x),
-- have crucial : ∀ (g : G), ψ (z g - (g • x - x)) = 0,
-- { simp [hy, hx] },
-- note "crucial" above
rw H1.ker_quotient at hz,
cases hz with p hp,
rw Z1.ext_iff at hp,
simp at hp, -- yes
rw mem_range,
/-
∃ (x : H1 G M), ⇑(φ.H1) x = ⇑Z1.quotient z -/
--refine ⟨_, _⟩, swap,
-- ⊢ ⇑(φ.H1) ?m_1 = ⇑Z1.quotient z
-- Idea : I want to apply the universal property
-- for H¹(φ) but this is of the form
refine ⟨Z1.quotient (_ : Z1 G M), _⟩, swap, --working backwards
{ rw Z1.H1_map_mk,
sorry },
-- this next goal might randomly disappear, it's not a prop
-- it's `⊢ H1 G M`
sorry }
end
end exactness
#where
#check H1.induction_on
/-
H1.induction_on : ∀ {G M : Type} [_inst_1 : monoid G]
[_inst_2 : add_comm_group M] [_inst_3 : distrib_mul_action G M]
{p : H1 G M → Prop},
(∀ (z : Z1 G M), p (⇑Z1.quotient z)) → ∀ (h : H1 G M), p h
-/
|
-- ---------------------------------------------------------------- [ Edda.idr ]
-- Module : Edda.idr
-- Copyright : (c) Jan de Muijnck-Hughes
-- License : see LICENSE
-- --------------------------------------------------------------------- [ EOH ]
module Text.Markup.Edda
import public Text.Markup.Edda.Model
import public Text.Markup.Edda.Walk
import public Text.Markup.Edda.Query
-- --------------------------------------------------------------------- [ EOF ]
|
#!/usr/bin/env python
"""Example for a linear classifier using a perceptron and the delta rule."""
from sklearn.datasets.samples_generator import make_blobs
import matplotlib.pyplot as plt
import numpy as np
class Perceptron(object):
def __init__(self, eta=0.01, epochs=50):
"""
Single perceptron unit.
Credit to Sebastian Raschka:
http://sebastianraschka.com/Articles/2015_singlelayer_neurons.html
This was slightly modified.
"""
self.eta = eta
self.epochs = epochs
def fit(self, X, y):
self.w_ = np.zeros(1 + X.shape[1])
self.errors_ = []
for _ in range(self.epochs):
errors = 0
for xi, target in zip(X, y):
update = self.eta * (target - self.predict(xi))
self.w_[1:] += update * xi
self.w_[0] += update
errors += int(update != 0.0)
self.errors_.append(errors)
return self
def net_input(self, X):
return np.dot(X, self.w_[1:]) + self.w_[0]
def predict(self, X):
return np.where(self.net_input(X) >= 0.0, 1, -1)
# Generate data
X, target = make_blobs(random_state=0, centers=2, cluster_std=0.5)
# Fit 1-layer perceptron
f = Perceptron(epochs=100)
f.fit(X, target)
xs = np.linspace(start=min(X[:, 0]), stop=max(X[:, 0]))
print(f.w_)
plt.plot(xs, [-(f.w_[0] + f.w_[1] * xi) / f.w_[2] for xi in xs], 'r--')
# Plot it
plt.gray()
_ = plt.scatter(X[:, 0], X[:, 1], c=target)
plt.show()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.