text
stringlengths 0
3.34M
|
---|
\section{Query String}\label{query-string}
\begin{Shaded}
\begin{Highlighting}[]
\NormalTok{Stability: }\DecValTok{3} \NormalTok{- Stable}
\end{Highlighting}
\end{Shaded}
This module provides utilities for dealing with query strings. It
provides the following methods:
\subsection{querystring.stringify(obj{[}, sep{]}{[}, eq{]}{[},
options{]})}\label{querystring.stringifyobj-sep-eq-options}
Serialize an object to a query string. Optionally override the default
separator (\texttt{\textquotesingle{}\&\textquotesingle{}}) and
assignment (\texttt{\textquotesingle{}=\textquotesingle{}}) characters.
Options object may contain \texttt{encodeURIComponent} property
(\texttt{querystring.escape} by default), it can be used to encode
string with \texttt{non-utf8} encoding if necessary.
Example:
\begin{Shaded}
\begin{Highlighting}[]
\OtherTok{querystring}\NormalTok{.}\FunctionTok{stringify}\NormalTok{(\{ }\DataTypeTok{foo}\NormalTok{: }\StringTok{'bar'}\NormalTok{, }\DataTypeTok{baz}\NormalTok{: [}\StringTok{'qux'}\NormalTok{, }\StringTok{'quux'}\NormalTok{], }\DataTypeTok{corge}\NormalTok{: }\StringTok{''} \NormalTok{\})}
\CommentTok{// returns}
\StringTok{'foo=bar&baz=qux&baz=quux&corge='}
\OtherTok{querystring}\NormalTok{.}\FunctionTok{stringify}\NormalTok{(\{}\DataTypeTok{foo}\NormalTok{: }\StringTok{'bar'}\NormalTok{, }\DataTypeTok{baz}\NormalTok{: }\StringTok{'qux'}\NormalTok{\}, }\StringTok{';'}\NormalTok{, }\StringTok{':'}\NormalTok{)}
\CommentTok{// returns}
\StringTok{'foo:bar;baz:qux'}
\CommentTok{// Suppose gbkEncodeURIComponent function already exists,}
\CommentTok{// it can encode string with `gbk` encoding}
\OtherTok{querystring}\NormalTok{.}\FunctionTok{stringify}\NormalTok{(\{ }\DataTypeTok{w}\NormalTok{: }\StringTok{'中文'}\NormalTok{, }\DataTypeTok{foo}\NormalTok{: }\StringTok{'bar'} \NormalTok{\}, }\KeywordTok{null}\NormalTok{, }\KeywordTok{null}\NormalTok{,}
\NormalTok{\{ }\DataTypeTok{encodeURIComponent}\NormalTok{: gbkEncodeURIComponent \})}
\CommentTok{// returns}
\StringTok{'w=%D6%D0%CE%C4&foo=bar'}
\end{Highlighting}
\end{Shaded}
\subsection{querystring.parse(str{[}, sep{]}{[}, eq{]}{[},
options{]})}\label{querystring.parsestr-sep-eq-options}
Deserialize a query string to an object. Optionally override the default
separator (\texttt{\textquotesingle{}\&\textquotesingle{}}) and
assignment (\texttt{\textquotesingle{}=\textquotesingle{}}) characters.
Options object may contain \texttt{maxKeys} property (equal to 1000 by
default), it'll be used to limit processed keys. Set it to 0 to remove
key count limitation.
Options object may contain \texttt{decodeURIComponent} property
(\texttt{querystring.unescape} by default), it can be used to decode a
\texttt{non-utf8} encoding string if necessary.
Example:
\begin{Shaded}
\begin{Highlighting}[]
\OtherTok{querystring}\NormalTok{.}\FunctionTok{parse}\NormalTok{(}\StringTok{'foo=bar&baz=qux&baz=quux&corge'}\NormalTok{)}
\CommentTok{// returns}
\NormalTok{\{ }\DataTypeTok{foo}\NormalTok{: }\StringTok{'bar'}\NormalTok{, }\DataTypeTok{baz}\NormalTok{: [}\StringTok{'qux'}\NormalTok{, }\StringTok{'quux'}\NormalTok{], }\DataTypeTok{corge}\NormalTok{: }\StringTok{''} \NormalTok{\}}
\CommentTok{// Suppose gbkDecodeURIComponent function already exists,}
\CommentTok{// it can decode `gbk` encoding string}
\OtherTok{querystring}\NormalTok{.}\FunctionTok{parse}\NormalTok{(}\StringTok{'w=%D6%D0%CE%C4&foo=bar'}\NormalTok{, }\KeywordTok{null}\NormalTok{, }\KeywordTok{null}\NormalTok{,}
\NormalTok{\{ }\DataTypeTok{decodeURIComponent}\NormalTok{: gbkDecodeURIComponent \})}
\CommentTok{// returns}
\NormalTok{\{ }\DataTypeTok{w}\NormalTok{: }\StringTok{'中文'}\NormalTok{, }\DataTypeTok{foo}\NormalTok{: }\StringTok{'bar'} \NormalTok{\}}
\end{Highlighting}
\end{Shaded}
\subsection{querystring.escape}\label{querystring.escape}
The escape function used by \texttt{querystring.stringify}, provided so
that it could be overridden if necessary.
\subsection{querystring.unescape}\label{querystring.unescape}
The unescape function used by \texttt{querystring.parse}, provided so
that it could be overridden if necessary.
It will try to use \texttt{decodeURIComponent} in the first place, but
if that fails it falls back to a safer equivalent that doesn't throw on
malformed URLs.
|
## @knitr kprats-cph-np
require(rms)
options(prType='latex')
group <- c(rep('Group 1',19),rep('Group 2',21))
group <- factor(group)
dd <- datadist(group); options(datadist='dd')
days <-
c(143,164,188,188,190,192,206,209,213,216,220,227,230,
234,246,265,304,216,244,142,156,163,198,205,232,232,
233,233,233,233,239,240,261,280,280,296,296,323,204,344)
death <- rep(1,40)
death[c(18,19,39,40)] <- 0
units(days) <- 'Day'
df <- data.frame(days, death, group)
S <- Surv(days, death)
f <- npsurv(S ~ group, type='fleming')
for(meth in c('exact', 'breslow', 'efron')) {
g <- cph(S ~ group, method=meth, surv=TRUE, x=TRUE, y=TRUE)
# print(g) to see results
}
f.exp <- psm(S ~ group, dist='exponential')
fw <- psm(S ~ group, dist='weibull')
phform <- pphsm(fw)
co <- gray(c(0, .8))
survplot(f, lty=c(1, 1), lwd=c(1, 3), col=co,
label.curves=FALSE, conf='none')
survplot(g, lty=c(3, 3), lwd=c(1, 3), col=co, # Efron approx.
add=TRUE, label.curves=FALSE, conf.type='none')
legend(c(2, 160), c(.38, .54),
c('Nonparametric Estimates', 'Cox-Breslow Estimates'),
lty=c(1, 3), cex=.8, bty='n')
legend(c(2, 160), c(.18, .34), cex=.8,
c('Group 1', 'Group 2'), lwd=c(1,3), col=co, bty='n')
## @knitr kprats-cumhaz-ratio
f <- cph(S ~ strat(group), surv=TRUE)
# For both strata, eval. S(t) at combined set of death times
times <- sort(unique(days[death == 1]))
est <- survest(f, data.frame(group=levels(group)),
times=times, conf.type="none")$surv
cumhaz <- - log(est)
plot(times, cumhaz[2,] / cumhaz[1,], xlab="Days",
ylab="Cumulative Hazard Ratio", type="s")
abline(h=1, col=gray(.80))
## @knitr kprats-hazard-ratios
hazard.ratio.plot(g$x, g$y, e=12, pr=TRUE, legendloc='none')
## @knitr km-age-sex
n <- 2000
set.seed(3)
age <- 50 + 12 * rnorm(n)
label(age) <- 'Age'
sex <- factor(1 + (runif(n) <= .4), 1:2, c('Male', 'Female'))
cens <- 15 * runif(n)
h <- .02 * exp(.04 * (age - 50) + .8 * (sex == 'Female'))
ft <- -log(runif(n)) / h
e <- ifelse(ft <= cens, 1, 0)
print(table(e))
ft <- pmin(ft, cens)
units(ft) <- 'Year'
Srv <- Surv(ft, e)
age.dec <- cut2(age, g=10, levels.mean=TRUE)
label(age.dec) <- 'Age'
dd <- datadist(age, sex, age.dec); options(datadist='dd')
f.np <- cph(Srv ~ strat(age.dec) + strat(sex), surv=TRUE)
# surv=TRUE speeds up computations, and confidence limits when
# there are no covariables are still accurate.
p <- Predict(f.np, age.dec, sex, time=3, loglog=TRUE)
# Treat age.dec as a numeric variable (means within deciles)
p$age.dec <- as.numeric(as.character(p$age.dec))
ggplot(p, ylim=c(-5, -.5))
## @knitr spline-age-sex-noia
f.noia <- cph(Srv ~ rcs(age,4) + strat(sex), x=TRUE, y=TRUE)
# Get accurate C.L. for any age by specifying x=TRUE y=TRUE
# Note: for evaluating shape of regression, we would not
# ordinarily bother to get 3-year survival probabilities -
# would just use X * beta
# We do so here to use same scale as nonparametric estimates
w <- latex(f.noia, file='f.noia.tex', inline=TRUE, digits=3)
print(anova(f.noia), size='normalsize')
p <- Predict(f.noia, age, sex, time=3, loglog=TRUE)
ggplot(p, ylim=c(-5, -.5))
## @knitr spline-age-sex-ia
f.ia <- cph(Srv ~ rcs(age,4) * strat(sex), x=TRUE, y=TRUE,
surv=TRUE)
w <- latex(f.ia, file='f.ia.tex', inline=TRUE, digits=3)
print(anova(f.ia), size='normalsize')
p <- Predict(f.ia, age, sex, time=3, loglog=TRUE)
ggplot(p, ylim=c(-5, -.5))
## @knitr spline-age-sex-ia-surv
p <- Predict(f.ia, age, sex, time=3)
ggplot(p)
## @knitr spline-age-sex-ia-nomogram
surv <- Survival(f.ia)
surv.f <- function(lp) surv(3, lp, stratum='sex=Female')
surv.m <- function(lp) surv(3, lp, stratum='sex=Male')
quant <- Quantile(f.ia)
med.f <- function(lp) quant(.5, lp, stratum='sex=Female')
med.m <- function(lp) quant(.5, lp, stratum='sex=Male')
at.surv <- c(.01, .05, seq(.1,.9,by=.1), .95, .98, .99, .999)
at.med <- c(0, .5, 1, 1.5, seq(2, 14, by=2))
n <- nomogram(f.ia, fun=list(surv.m, surv.f, med.m,med.f),
funlabel=c('S(3 | Male)','S(3 | Female)',
'Median (Male)','Median (Female)'),
fun.at=list(c(.8,.9,.95,.98,.99),
c(.1,.3,.5,.7,.8,.9,.95,.98),
c(8,10,12),c(1,2,4,8,12)))
plot(n, col.grid=FALSE, lmgp=.2) # Fig. (*\ref{fig:cox-spline-age-sex-ia-nomogram}*)
latex(f.ia, digits=3)
## @knitr ef-spline
# acath2 <- sas.get('.','acath2')
if(FALSE) {
acath2 <- subset(acath2, ejfx == trunc(ejfx)) # non-imputed values
acath2 <- upData(acath2,
lvef = ejfx / 100,
labels=c(lvef = 'LVEF'))
nk <- 3
d <- datadist(acath2); options(datadist='d')
with(acath2,
rcspline.plot(lvef, d.time, model='cox', event=cdeath,
main='', statloc=c(.42, -1), nk=nk,
ylim=c(-4, -.9)))
if(nk == 3) print(cph(Surv(d.time, cdeath) ~ pmin(lvef,.5), data=acath2))
}
## @knitr ef.martingale
if(FALSE) {
cox <- cph(Surv(d.time,cdeath) ~ lvef, data=acath2, iter.max=0)
res <- resid(cox)
g <- loess(res ~ lvef, data=acath2)
plot(g, coverage=0.95, confidence=7, xlab='LVEF', ylab='Martingale Residual')
g <- ols(res ~ rcs(lvef, 5), data=acath2)
plot(Predict(g, lvef)) # not added to previous plot as really shown
with(acath2, {
s <- ! is.na(res + lvef)
lines(lowess(lvef[s], res[s], iter=0), lty=3)
## lowess doesn't handle NAs
})
legend(.20, 1.15, c('loess Fit and 0.95 Confidence Bars',
'ols Spline Fit and 0.95 Confidence Limits',
'lowess Smoother'), lty=1:3, bty='n', cex=cex.legend)
box()
}
## @knitr valung-ratios
getHdata(valung)
with(valung, {
hazard.ratio.plot(1 * (cell == 'Squamous'), Surv(t, dead),
e=25, subset=cell != 'Large',
pr=TRUE, pl=FALSE)
hazard.ratio.plot(1 * kps, Surv(t, dead), e=25,
pr=TRUE, pl=FALSE) })
## @knitr rel-random
n <- 200
p <- 20
set.seed(6)
xx <- matrix(rnorm(n * p), nrow=n, ncol=p)
y <- runif(n)
units(y) <- "Year"
e <- c(rep(0, n / 2), rep(1, n / 2))
f <- cph(Surv(y, e) ~ xx, x=TRUE, y=TRUE,
time.inc=.5, surv=TRUE)
cal <- calibrate(f, u=.5, B=200)
plot(cal, ylim=c(.4, 1), subtitles=FALSE)
calkm <- calibrate(f, u=.5, m=40, cmethod='KM', B=200)
plot(calkm, add=TRUE) # Figure (*\ref{fig:cox-rel-random}*)
## @knitr val-random
latex(validate(f, B=200), digits=3, file='',
caption='Bootstrap validation of a Cox model with random predictors',
table.env=TRUE, label='tab:cox-val-random')
|
The adjoint of the adjoint of a linear map is the original linear map. |
/-
Copyright (c) 2018 Simon Hudon. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Simon Hudon, Patrick Massot
-/
import tactic.pi_instances
import algebra.group.pi
import algebra.hom.ring
/-!
# Pi instances for ring
This file defines instances for ring, semiring and related structures on Pi Types
-/
namespace pi
universes u v w
variable {I : Type u} -- The indexing type
variable {f : I → Type v} -- The family of types already equipped with instances
variables (x y : Π i, f i) (i : I)
instance distrib [Π i, distrib $ f i] : distrib (Π i : I, f i) :=
by refine_struct { add := (+), mul := (*), .. }; tactic.pi_instance_derive_field
instance non_unital_non_assoc_semiring [∀ i, non_unital_non_assoc_semiring $ f i] :
non_unital_non_assoc_semiring (Π i : I, f i) :=
by refine_struct { zero := (0 : Π i, f i), add := (+), mul := (*), .. };
tactic.pi_instance_derive_field
instance non_unital_semiring [∀ i, non_unital_semiring $ f i] :
non_unital_semiring (Π i : I, f i) :=
by refine_struct { zero := (0 : Π i, f i), add := (+), mul := (*), .. };
tactic.pi_instance_derive_field
instance non_assoc_semiring [∀ i, non_assoc_semiring $ f i] :
non_assoc_semiring (Π i : I, f i) :=
by refine_struct { zero := (0 : Π i, f i), one := 1, add := (+), mul := (*), .. };
tactic.pi_instance_derive_field
instance semiring [∀ i, semiring $ f i] : semiring (Π i : I, f i) :=
by refine_struct { zero := (0 : Π i, f i), one := 1, add := (+), mul := (*),
nsmul := add_monoid.nsmul, npow := monoid.npow };
tactic.pi_instance_derive_field
instance non_unital_comm_semiring [∀ i, non_unital_comm_semiring $ f i] :
non_unital_comm_semiring (Π i : I, f i) :=
by refine_struct { zero := (0 : Π i, f i), add := (+), mul := (*), nsmul := add_monoid.nsmul };
tactic.pi_instance_derive_field
instance comm_semiring [∀ i, comm_semiring $ f i] : comm_semiring (Π i : I, f i) :=
by refine_struct { zero := (0 : Π i, f i), one := 1, add := (+), mul := (*),
nsmul := add_monoid.nsmul, npow := monoid.npow };
tactic.pi_instance_derive_field
instance non_unital_non_assoc_ring [∀ i, non_unital_non_assoc_ring $ f i] :
non_unital_non_assoc_ring (Π i : I, f i) :=
by refine_struct { zero := (0 : Π i, f i), add := (+), mul := (*),
neg := has_neg.neg, nsmul := add_monoid.nsmul, zsmul := sub_neg_monoid.zsmul };
tactic.pi_instance_derive_field
instance non_unital_ring [∀ i, non_unital_ring $ f i] :
non_unital_ring (Π i : I, f i) :=
by refine_struct { zero := (0 : Π i, f i), add := (+), mul := (*),
neg := has_neg.neg, nsmul := add_monoid.nsmul, zsmul := sub_neg_monoid.zsmul };
tactic.pi_instance_derive_field
instance non_assoc_ring [∀ i, non_assoc_ring $ f i] :
non_assoc_ring (Π i : I, f i) :=
by refine_struct { zero := (0 : Π i, f i), add := (+), mul := (*),
neg := has_neg.neg, nsmul := add_monoid.nsmul, zsmul := sub_neg_monoid.zsmul };
tactic.pi_instance_derive_field
instance ring [∀ i, ring $ f i] : ring (Π i : I, f i) :=
by refine_struct { zero := (0 : Π i, f i), one := 1, add := (+), mul := (*),
neg := has_neg.neg, nsmul := add_monoid.nsmul, zsmul := sub_neg_monoid.zsmul,
npow := monoid.npow };
tactic.pi_instance_derive_field
instance non_unital_comm_ring [∀ i, non_unital_comm_ring $ f i] :
non_unital_comm_ring (Π i : I, f i) :=
by refine_struct { zero := (0 : Π i, f i), add := (+), mul := (*), neg := has_neg.neg,
nsmul := add_monoid.nsmul, zsmul := sub_neg_monoid.zsmul };
tactic.pi_instance_derive_field
instance comm_ring [∀ i, comm_ring $ f i] : comm_ring (Π i : I, f i) :=
by refine_struct { zero := (0 : Π i, f i), one := 1, add := (+), mul := (*),
neg := has_neg.neg, nsmul := add_monoid.nsmul, zsmul := sub_neg_monoid.zsmul,
npow := monoid.npow };
tactic.pi_instance_derive_field
/-- A family of ring homomorphisms `f a : γ →+* β a` defines a ring homomorphism
`pi.ring_hom f : γ →+* Π a, β a` given by `pi.ring_hom f x b = f b x`. -/
@[simps]
protected def ring_hom {γ : Type w} [Π i, non_assoc_semiring (f i)] [non_assoc_semiring γ]
(g : Π i, γ →+* f i) : γ →+* Π i, f i :=
{ to_fun := λ x b, g b x,
map_add' := λ x y, funext $ λ z, (g z).map_add x y,
map_mul' := λ x y, funext $ λ z, (g z).map_mul x y,
map_one' := funext $ λ z, (g z).map_one,
map_zero' := funext $ λ z, (g z).map_zero }
lemma ring_hom_injective {γ : Type w} [nonempty I] [Π i, non_assoc_semiring (f i)]
[non_assoc_semiring γ] (g : Π i, γ →+* f i) (hg : ∀ i, function.injective (g i)) :
function.injective (pi.ring_hom g) :=
λ x y h, let ⟨i⟩ := ‹nonempty I› in hg i ((function.funext_iff.mp h : _) i)
end pi
section ring_hom
universes u v
variable {I : Type u}
/-- Evaluation of functions into an indexed collection of rings at a point is a ring
homomorphism. This is `function.eval` as a `ring_hom`. -/
@[simps]
def pi.eval_ring_hom (f : I → Type v) [Π i, non_assoc_semiring (f i)] (i : I) :
(Π i, f i) →+* f i :=
{ ..(pi.eval_monoid_hom f i),
..(pi.eval_add_monoid_hom f i) }
/-- `function.const` as a `ring_hom`. -/
@[simps]
def pi.const_ring_hom (α β : Type*) [non_assoc_semiring β] : β →+* (α → β) :=
{ to_fun := function.const _,
.. pi.ring_hom (λ _, ring_hom.id β) }
/-- Ring homomorphism between the function spaces `I → α` and `I → β`, induced by a ring
homomorphism `f` between `α` and `β`. -/
@[simps] protected def ring_hom.comp_left {α β : Type*} [non_assoc_semiring α]
[non_assoc_semiring β] (f : α →+* β) (I : Type*) :
(I → α) →+* (I → β) :=
{ to_fun := λ h, f ∘ h,
.. f.to_monoid_hom.comp_left I,
.. f.to_add_monoid_hom.comp_left I }
end ring_hom
|
/-
Copyright (c) 2020 Eric Wieser. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Eric Wieser
-/
import linear_algebra.clifford_algebra.basic
import algebra.module.opposites
/-!
# Conjugations
This file defines the grade reversal and grade involution functions on multivectors, `reverse` and
`involute`.
Together, these operations compose to form the "Clifford conjugate", hence the name of this file.
https://en.wikipedia.org/wiki/Clifford_algebra#Antiautomorphisms
## Main definitions
* `clifford_algebra.involute`: the grade involution, negating each basis vector
* `clifford_algebra.reverse`: the grade reversion, reversing the order of a product of vectors
## Main statements
* `clifford_algebra.involute_involutive`
* `clifford_algebra.reverse_involutive`
* `clifford_algebra.reverse_involute_commute`
-/
variables {R : Type*} [comm_ring R]
variables {M : Type*} [add_comm_group M] [module R M]
variables {Q : quadratic_form R M}
namespace clifford_algebra
section involute
/-- Grade involution, inverting the sign of each basis vector. -/
def involute : clifford_algebra Q →ₐ[R] clifford_algebra Q :=
clifford_algebra.lift Q ⟨-(ι Q), λ m, by simp⟩
@[simp] lemma involute_ι (m : M) : involute (ι Q m) = -ι Q m :=
lift_ι_apply _ _ m
@[simp] lemma involute_comp_involute : involute.comp involute = alg_hom.id R (clifford_algebra Q) :=
by { ext, simp }
lemma involute_involutive : function.involutive (involute : _ → clifford_algebra Q) :=
alg_hom.congr_fun involute_comp_involute
@[simp] lemma involute_involute : ∀ a : clifford_algebra Q, involute (involute a) = a :=
involute_involutive
end involute
section reverse
open opposite
/-- Grade reversion, inverting the multiplication order of basis vectors.
Also called *transpose* in some literature. -/
def reverse : clifford_algebra Q →ₗ[R] clifford_algebra Q :=
(op_linear_equiv R).symm.to_linear_map.comp (
clifford_algebra.lift Q ⟨(opposite.op_linear_equiv R).to_linear_map.comp (ι Q),
λ m, unop_injective $ by simp⟩).to_linear_map
@[simp] lemma reverse_ι (m : M) : reverse (ι Q m) = ι Q m :=
by simp [reverse]
@[simp] lemma reverse.commutes (r : R) :
reverse (algebra_map R (clifford_algebra Q) r) = algebra_map R _ r :=
by simp [reverse]
@[simp] lemma reverse.map_one : reverse (1 : clifford_algebra Q) = 1 :=
by convert reverse.commutes (1 : R); simp
@[simp] lemma reverse.map_mul (a b : clifford_algebra Q) :
reverse (a * b) = reverse b * reverse a :=
by simp [reverse]
@[simp] lemma reverse_comp_reverse :
reverse.comp reverse = (linear_map.id : _ →ₗ[R] clifford_algebra Q) :=
begin
ext m,
simp only [linear_map.id_apply, linear_map.comp_apply],
induction m using clifford_algebra.induction,
-- simp can close these goals, but is slow
case h_grade0 : { rw [reverse.commutes, reverse.commutes] },
case h_grade1 : { rw [reverse_ι, reverse_ι] },
case h_mul : a b ha hb { rw [reverse.map_mul, reverse.map_mul, ha, hb], },
case h_add : a b ha hb { rw [reverse.map_add, reverse.map_add, ha, hb], },
end
@[simp] lemma reverse_involutive : function.involutive (reverse : _ → clifford_algebra Q) :=
linear_map.congr_fun reverse_comp_reverse
@[simp] lemma reverse_reverse : ∀ a : clifford_algebra Q, reverse (reverse a) = a :=
reverse_involutive
lemma reverse_comp_involute :
reverse.comp involute.to_linear_map =
(involute.to_linear_map.comp reverse : _ →ₗ[R] clifford_algebra Q) :=
begin
ext,
simp only [linear_map.comp_apply, alg_hom.to_linear_map_apply],
induction x using clifford_algebra.induction,
case h_grade0 : { simp },
case h_grade1 : { simp },
case h_mul : a b ha hb { simp [ha, hb], },
case h_add : a b ha hb { simp [ha, hb], },
end
/-- `clifford_algebra.reverse` and `clifford_algebra.inverse` commute. Note that the composition
is sometimes referred to as the "clifford conjugate". -/
lemma reverse_involute_commute : function.commute (reverse : _ → clifford_algebra Q) involute :=
linear_map.congr_fun reverse_comp_involute
lemma reverse_involute : ∀ a : clifford_algebra Q, reverse (involute a) = involute (reverse a) :=
reverse_involute_commute
end reverse
/-!
### Statements about conjugations of products of lists
-/
section list
/-- Taking the reverse of the product a list of $n$ vectors lifted via `ι` is equivalent to
taking the product of the reverse of that list. -/
lemma reverse_prod_map_ι : ∀ (l : list M), reverse (l.map $ ι Q).prod = (l.map $ ι Q).reverse.prod
| [] := by simp
| (x :: xs) := by simp [reverse_prod_map_ι xs]
/-- Taking the involute of the product a list of $n$ vectors lifted via `ι` is equivalent to
premultiplying by ${-1}^n$. -/
lemma involute_prod_map_ι : ∀ l : list M,
involute (l.map $ ι Q).prod = ((-1 : R)^l.length) • (l.map $ ι Q).prod
| [] := by simp
| (x :: xs) := by simp [pow_add, involute_prod_map_ι xs]
end list
end clifford_algebra
|
◦How would you describe your books to new readers?
I write Regency era romances with a progressive and modern feel to them. My characters talk in rhythms of modern American English, which I think makes the books more enjoyable for readers who are used to that same internal voice in their heads when they read, and the conflicts they face are those which people might encounter during any time period—guilt over a parent’s death, fear of falling in love, a willingness to do anything to protect a child. My heroes are alpha but wounded, and my heroines are strong, independent, and outgoing women who all have the Regency era equivalent to careers and interests outside of society events and capturing a husband. But while my books are progressive, they are still firmly grounded in history, with all the wonderful facets of the era that make historicals so much fun to write and read.
◦Who are some of your favorite Go-To authors?
Anna Bradley, Kelly Bowen, Lisa Kleypas, Suzanne Enoch…too many to list! But I also love discovering new authors.
Definitely more plotter than pantser! I always start with a 20 page outline of the plot so that I don’t write myself into a corner with the conflict, run out of conflict after 50 pages, or write 95K words, only to be nowhere near the resolution. But each scene is pantsed, which keeps that feeling of spontaneity. Sometimes the characters will decide to take the plot into a completely new direction. When that happens, I let them, but then I also go back and update my long outline so that at least one of us knows where the story is headed! LOL!
◦What are some must-have foods or drinks you need on hand when you are finishing up your books?
Coffee!! My books are written by Starbucks and Folgers. ☺ But once I get near the end, I often switch to a glass of wine. Sipping wine from a glass forces me to slow down and not rush the last scene. It also makes for a pleasant way to celebrate when the first draft is finally done.
◦What are you working on next?
I’m currently finishing edits for book 5 in the Capturing the Carlisles series, WHAT A LORD WANTS. Evelyn Winslow finally gets her own romance—and her marquess. The book is about the double life being led by Dominick Mercer, Marquess of Ellsworth, who is England’s most respected peer by day and notorious Italian painter Domenico Vincenzo by night. When Evelyn accidentally stumbles into his studio and is mistaken for his new model, the adventure is simply too great for her to deny herself. But soon she’s discovered, and the only man who can save her reputation is the same one who ruined it. Its release is scheduled for March. |
module NonhydrostaticModels
export NonhydrostaticModel
using DocStringExtensions
using KernelAbstractions: @index, @kernel, Event, MultiEvent
using KernelAbstractions.Extras.LoopInfo: @unroll
using Oceananigans.Utils: launch!
using Oceananigans.Grids
using Oceananigans.Solvers
using Oceananigans.Distributed: MultiArch, DistributedFFTBasedPoissonSolver, reconstruct_global_grid
using Oceananigans.ImmersedBoundaries: ImmersedBoundaryGrid
import Oceananigans: fields, prognostic_fields
function PressureSolver(arch::MultiArch, local_grid::RegRectilinearGrid)
global_grid = reconstruct_global_grid(local_grid)
return DistributedFFTBasedPoissonSolver(global_grid, local_grid)
end
PressureSolver(arch, grid::RegRectilinearGrid) = FFTBasedPoissonSolver(grid)
PressureSolver(arch, grid::HRegRectilinearGrid) = FourierTridiagonalPoissonSolver(grid)
# *Evil grin*
PressureSolver(arch, ibg::ImmersedBoundaryGrid) = PressureSolver(arch, ibg.underlying_grid)
#####
##### NonhydrostaticModel definition
#####
include("nonhydrostatic_model.jl")
include("pressure_field.jl")
include("show_nonhydrostatic_model.jl")
include("set_nonhydrostatic_model.jl")
#####
##### Time-stepping NonhydrostaticModels
#####
"""
fields(model::NonhydrostaticModel)
Returns a flattened `NamedTuple` of the fields in `model.velocities` and `model.tracers`.
"""
fields(model::NonhydrostaticModel) = merge(model.velocities, model.tracers)
prognostic_fields(model::NonhydrostaticModel) = fields(model)
include("solve_for_pressure.jl")
include("update_hydrostatic_pressure.jl")
include("update_nonhydrostatic_model_state.jl")
include("pressure_correction.jl")
include("nonhydrostatic_tendency_kernel_functions.jl")
include("calculate_nonhydrostatic_tendencies.jl")
include("correct_nonhydrostatic_immersed_tendencies.jl")
end # module
|
{-# LANGUAGE CPP, NoImplicitPrelude #-}
#if __GLASGOW_HASKELL__ >= 704
{-# LANGUAGE Safe #-}
#endif
module SafeHaskellSpec (main, spec) where
import TestHspecTrustworthy
-- The following modules should not be Unsafe (#56):
import Control.Concurrent.Compat ()
import Control.Concurrent.MVar.Compat ()
import Control.Exception.Compat ()
import Control.Monad.Compat ()
import Control.Monad.Fail.Compat ()
import Control.Monad.IO.Class.Compat ()
import Data.Bifoldable.Compat ()
import Data.Bifunctor.Compat ()
import Data.Bitraversable.Compat ()
import Data.Bits.Compat ()
import Data.Bool.Compat ()
import Data.Complex.Compat ()
import Data.Either.Compat ()
import Data.Foldable.Compat ()
import Data.Function.Compat ()
import Data.Functor.Compat ()
import Data.Functor.Compose.Compat ()
import Data.Functor.Const.Compat ()
import Data.Functor.Contravariant.Compat ()
import Data.Functor.Identity.Compat ()
import Data.Functor.Product.Compat ()
import Data.Functor.Sum.Compat ()
import Data.IORef.Compat ()
import Data.List.Compat ()
import Data.List.NonEmpty.Compat ()
import Data.Monoid.Compat ()
import Data.Proxy.Compat ()
import Data.Ratio.Compat ()
import Data.Semigroup.Compat ()
import Data.STRef.Compat ()
import Data.String.Compat ()
import Data.Type.Coercion.Compat ()
import Data.Type.Equality.Compat ()
import Data.Version.Compat ()
import Data.Void.Compat ()
import Data.Word.Compat ()
import Foreign.Compat ()
import Foreign.ForeignPtr.Compat ()
import Foreign.ForeignPtr.Safe.Compat ()
import Foreign.Marshal.Alloc.Compat ()
import Foreign.Marshal.Array.Compat ()
import Foreign.Marshal.Compat ()
import Foreign.Marshal.Safe.Compat ()
import Foreign.Marshal.Utils.Compat ()
import Numeric.Compat ()
import Numeric.Natural.Compat ()
import Prelude.Compat
import System.Environment.Compat ()
import System.Exit.Compat ()
import System.IO.Error ()
import Text.Read.Compat ()
import Type.Reflection.Compat ()
main :: IO ()
main = hspec spec
spec :: Spec
spec = pure ()
|
> module Unit.Properties
> import Data.Fin
> import Control.Isomorphism
> import Finite.Predicates
> import Sigma.Sigma
> import Pairs.Operations
> %default total
> %access public export
> ||| Mapping |Unit|s to |Fin|s
> toFin : Unit -> Fin (S Z)
> toFin MkUnit = FZ
> -- %freeze toFin
> ||| Mapping |Fin (S Z)|s to |Unit|s
> fromFin : Fin (S Z) -> Unit
> fromFin FZ = MkUnit
> fromFin (FS k) = absurd k
> -- %freeze fromFin
> ||| |toFin| is the left-inverse of |fromFin|
> toFinFromFinLemma : (k : Fin (S Z)) -> toFin (fromFin k) = k
> toFinFromFinLemma FZ = Refl
> toFinFromFinLemma (FS k) = absurd k
> %freeze toFinFromFinLemma
> ||| |fromFin| is the left-inverse of |toFin|
> fromFinToFinLemma : (e : Unit) -> fromFin (toFin e) = e
> fromFinToFinLemma MkUnit = Refl
> %freeze fromFinToFinLemma
> ||| Unit is finite
> finiteUnit : Finite Unit
> finiteUnit = MkSigma (S Z) iso where
> iso : Iso Unit (Fin (S Z))
> iso = MkIso toFin fromFin toFinFromFinLemma fromFinToFinLemma
> ||| Unit is decidable
> decidableUnit : Dec Unit
> decidableUnit = Yes MkUnit
> {-
> ---}
|
module Nat
import Basic
import Unit
import Void
public export
data Nat = Z | S Nat
public export
NatInduction : (p : Nat -> Type)
-> p Z
-> ((n : Nat) -> p n -> p (S n))
-> (n : Nat) -> p n
NatInduction _ p0 pn Z = p0
NatInduction p p0 pn (S n) = pn n (NatInduction p p0 pn n)
public export
NatRecursion : (p : Type) -> p -> (Nat -> p -> p) -> Nat -> p
NatRecursion p = NatInduction (const p)
public export
NatIteration : (p : Type) -> p -> (p -> p) -> Nat -> p
NatIteration p p0 f = NatRecursion p p0 (const f)
infixl 5 +
infixl 6 *
public export
(+) : Nat -> Nat -> Nat
Z + k = k
(S n) + k = S (n + k)
public export
(*) : Nat -> Nat -> Nat
Z * k = Z
(S n) * k = k + n * k
infixl 1 <=
infixl 1 >=
namespace NatOrder
public export
(<=) : Nat -> Nat -> Type
Z <= _ = Unit
S n <= Z = Void
S n <= S k = n <= k
public export
(>=) : Nat -> Nat -> Type
n >= k = Neg (n <= k)
namespace N
%inline public export
zero : Nat
zero = Z
%inline public export
one : Nat
one = S zero
%inline public export
two : Nat
two = S one
%inline public export
three : Nat
three = S two
%inline public export
four : Nat
four = S three
|
#ifndef VI_PID_CONTROLLER_HPP
#define VI_PID_CONTROLLER_HPP
// Component
#include "PIDConfig.hpp"
#include "VehicleInterfaceData.hpp"
// Libraries
#include <boost/cstdfloat.hpp>
#include <boost/circular_buffer.hpp>
// Ros
#include <geometry_msgs/Twist.h>
#include <ros/ros.h>
// Standard
#include <cstdint>
#include <string>
namespace vi
{
using float64_t = boost::float64_t; ///< Alias for 64 bit float
/// @brief Class to store config values for vehicle interface
class PIDController
{
public:
/// @brief Default constructor
explicit PIDController(const PIDConfig& cfg);
/// @brief Destructor for forward declares
~PIDController();
/// @brief Main driving function
/// @param now_s Current program time
void update(const ros::Time& now_s);
/// @brief Accessor for command
/// @return Command
float64_t getOutput() const noexcept {return m_output;}
/// @brief Mutator
/// @param val Val
/// @{
void setFeedback(const float64_t val) noexcept {m_feedback = val;}
void setCommandSetpoint(const float64_t val) noexcept {m_setpoint = val;}
/// @}
private:
/// @brief Helper function to update the delta in time
/// @param now_s Current real time
void updateDtS(const ros::Time& now_s) noexcept;
/// @brief Helper function to update the error
void updateError() noexcept;
/// @brief Helpers to calculate various components of the controller
/// @return Component value
/// @{
float64_t calculateP() const noexcept;
float64_t calculateI() noexcept;
float64_t calculateD() const noexcept;
/// @}
/// @brief ctl filter helper function
/// @param input The input to filter into output
float64_t filterOutput(const float64_t input);
float64_t m_setpoint{0.0}; ///< Setpoint
float64_t m_feedback{0.0}; ///< Feedback
float64_t m_output{0.0}; ///< Output
ros::Duration m_dt_s; ///< Current delta time, in seconds
ros::Time m_last_time_s; ///< Last timestamp, used to track deltas
float64_t m_error{0.0}; ///< Current error
float64_t m_integral{0.0}; ///< Integral
float64_t m_last_error{0.0}; ///< Last error (previous iteration)
boost::circular_buffer<float64_t> m_ctl_buffer; ///< Circular buffer for ctl filter
PIDConfig m_cfg; ///< PID Config
};
} // namespace vi
#endif // VI_PID_controller_HPP
|
Or new Love pine at them beyond to @-@ morrow . 30
|
import data.real.basic
import data.nat.basic
variables (x : ℝ)
variable (n: nat)
example (h:x>0) : (1+x)^n ≥ 1 + x*n :=
begin
end |
% -*-latex-*-
\chapter{People}
\label{app:people}
The Cilk project is led by Prof. Charles E. Leiserson of the MIT
Laboratory for Computer Science and has been funded in part by DARPA
Grant F30602-97-1-0270.
The current maintainer of Cilk is Bradley C. Kuszmaul
(\texttt{[email protected]}), who is responsible for the
{\sysnameversion} release.
{\sysnameversion} is the product of the effort of many people over
many years. The following people contributed code, example programs,
and/or documentation to the {\sysnameversion} system:
\begin{quote}
\noindent
Guang-Ien Cheng\\
Don Dailey\\
Mingdong Feng\\
Bradley C. Kuszmaul\\
Charles E.\ Leiserson\\
Phil Lisiecki\\
Alberto Medina\\
Ofra Pavlovitz\\
Harald Prokop\\
Keith H.\ Randall\\
Bin Song\\
Andy Stark\\
Volker Strumpen\\
\end{quote}
\noindent All those who have worked on previous versions of Cilk:
\begin{quote}
\noindent
Robert D. Blumofe\\
Michael Halbherr\\
Christopher F. Joerg\\
Howard Lu\\
Robert Miller\\
Aske Plaat\\
Richard Tauriello\\
Daricha Techopitayakul\\
Yuli Zhou\\
\end{quote}
\pagebreak[3]
%We gratefully acknowledge the contributions of many others, including:
\noindent Finally, thanks to those who have spent valuable time with Cilk and
contributed with their experience:
\begin{quote}
\noindent
Arvind\\
Reid Barton\\
Ching Law\\
John Litvin\\
Igor Lyubashevskiy\\
Rolf Riesen\\
Andy Shaw\\
Mike Stupak\\
Adrian M.\ Soviani\\
Sivan Toledo\\
Svetoslav Tzvetkov\\
\end{quote}
|
module Test.Assert
import Crayons
%access export
success : String -> String
success s = green ("✔ " <+> s)
failure : String -> String
failure s = red ("✖ " <+> s)
assertEq : Eq a => (desc : String) -> (given : a) -> (expected : a) -> IO ()
assertEq desc g e = if g == e
then putStrLn (success desc)
else putStrLn (failure desc)
assertNotEq : Eq a => (desc : String) -> (given : a) -> (expected : a) -> IO ()
assertNotEq desc g e = if not (g == e)
then putStrLn (success desc)
else putStrLn (failure desc)
assertTrue : (desc : String) -> Bool -> IO ()
assertTrue desc p = if p
then putStrLn (success desc)
else putStrLn (failure desc)
assertFalse : (desc : String) -> Bool -> IO ()
assertFalse desc p = if (not p)
then putStrLn (success desc)
else putStrLn (failure desc)
|
(* Title: HOL/TLA/Memory/MemoryParameters.thy
Author: Stephan Merz, University of Munich
*)
section \<open>RPC-Memory example: Memory parameters\<close>
theory MemoryParameters
imports RPCMemoryParams
begin
(* the memory operations *)
datatype memOp = read Locs | "write" Locs Vals
consts
(* memory locations and contents *)
MemLoc :: "Locs set"
MemVal :: "Vals set"
(* some particular values *)
OK :: "Vals"
BadArg :: "Vals"
MemFailure :: "Vals"
NotAResult :: "Vals" (* defined here for simplicity *)
(* the initial value stored in each memory cell *)
InitVal :: "Vals"
axiomatization where
(* basic assumptions about the above constants and predicates *)
BadArgNoMemVal: "BadArg \<notin> MemVal" and
MemFailNoMemVal: "MemFailure \<notin> MemVal" and
InitValMemVal: "InitVal : MemVal" and
NotAResultNotVal: "NotAResult \<notin> MemVal" and
NotAResultNotOK: "NotAResult \<noteq> OK" and
NotAResultNotBA: "NotAResult \<noteq> BadArg" and
NotAResultNotMF: "NotAResult \<noteq> MemFailure"
lemmas [simp] =
BadArgNoMemVal MemFailNoMemVal InitValMemVal NotAResultNotVal
NotAResultNotOK NotAResultNotBA NotAResultNotMF
NotAResultNotOK [symmetric] NotAResultNotBA [symmetric] NotAResultNotMF [symmetric]
lemma MemValNotAResultE: "\<lbrakk> x \<in> MemVal; (x \<noteq> NotAResult \<Longrightarrow> P) \<rbrakk> \<Longrightarrow> P"
using NotAResultNotVal by blast
end
|
Cullen was a standout with BU ; he was named the East Coast Athletic Conference Rookie of the Year in 1983 – 84 after leading his team in scoring with 56 points . The National Hockey League passed him over , however , as he went unclaimed in the 1984 NHL Entry Draft . He was named to the Hockey East All @-@ Star Teams in 1985 , 1986 and 1987 , and a National Collegiate Athletic Association East Second Team All @-@ American in 1986 . He graduated as BU 's all @-@ time scoring leader with 241 points , and was named to BU 's Hockey East 25th anniversary team in 2009 .
|
(* This Isabelle theory is produced using the TIP tool offered at the following website:
https://github.com/tip-org/tools
This file was originally provided as part of TIP benchmark at the following website:
https://github.com/tip-org/benchmarks
Yutaka Nagashima at CIIRC, CTU changed the TIP output theory file slightly
to make it compatible with Isabelle2017.
\:w
Some proofs were added by Yutaka Nagashima.*)
theory TIP_int_add_inv_right
imports "../../Test_Base"
begin
datatype Nat = Z | S "Nat"
datatype Integer = P "Nat" | N "Nat"
definition(*fun*) zero :: "Integer" where
"zero = P Z"
fun pred :: "Nat => Nat" where
"pred (S y) = y"
fun plus :: "Nat => Nat => Nat" where
"plus (Z) y = y"
| "plus (S z) y = S (plus z y)"
fun neg :: "Integer => Integer" where
"neg (P (Z)) = P Z"
| "neg (P (S z)) = N z"
| "neg (N n) = P (plus (S Z) n)"
fun t2 :: "Nat => Nat => Integer" where
"t2 x y =
(let fail :: Integer =
(case y of
Z => P x
| S z =>
(case x of
Z => N y
| S x2 => t2 x2 z))
in (case x of
Z =>
(case y of
Z => P Z
| S x4 => fail)
| S x3 => fail))"
fun plus2 :: "Integer => Integer => Integer" where
"plus2 (P m) (P n) = P (plus m n)"
| "plus2 (P m) (N o2) = t2 m (plus (S Z) o2)"
| "plus2 (N m2) (P n2) = t2 n2 (plus (S Z) m2)"
| "plus2 (N m2) (N n3) = N (plus (plus (S Z) m2) n3)"
theorem property0 :
"((plus2 x (neg x)) = zero)"
oops
end
|
= = = Disbandment = = =
|
State Before: α : Type ?u.14402
β : Type ?u.14405
G : Type ?u.14408
M : Type u
inst✝ : RightCancelMonoid M
a b : M
⊢ a * b = b ↔ a * b = 1 * b State After: no goals Tactic: rw [one_mul] |
/-
Copyright (c) 2016 Microsoft Corporation. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Author: Leonardo de Moura
-/
import Mathlib.PrePort
import Mathlib.Lean3Lib.init.data.nat.basic
universes u
namespace Mathlib
/-- `fin n` is the subtype of `ℕ` consisting of natural numbers strictly smaller than `n`. -/
def fin (n : ℕ) := Subtype fun (i : ℕ) => i < n
namespace fin
/-- Backwards-compatible constructor for `fin n`. -/
def mk {n : ℕ} (i : ℕ) (h : i < n) : fin n := { val := i, property := h }
protected def lt {n : ℕ} (a : fin n) (b : fin n) := subtype.val a < subtype.val b
protected def le {n : ℕ} (a : fin n) (b : fin n) := subtype.val a ≤ subtype.val b
protected instance has_lt {n : ℕ} : HasLess (fin n) := { Less := fin.lt }
protected instance has_le {n : ℕ} : HasLessEq (fin n) := { LessEq := fin.le }
protected instance decidable_lt {n : ℕ} (a : fin n) (b : fin n) : Decidable (a < b) :=
nat.decidable_lt (subtype.val a) (subtype.val b)
protected instance decidable_le {n : ℕ} (a : fin n) (b : fin n) : Decidable (a ≤ b) :=
nat.decidable_le (subtype.val a) (subtype.val b)
def elim0 {α : fin 0 → Sort u} (x : fin 0) : α x := sorry
theorem eq_of_veq {n : ℕ} {i : fin n} {j : fin n} : subtype.val i = subtype.val j → i = j := sorry
theorem veq_of_eq {n : ℕ} {i : fin n} {j : fin n} : i = j → subtype.val i = subtype.val j := sorry
theorem ne_of_vne {n : ℕ} {i : fin n} {j : fin n} (h : subtype.val i ≠ subtype.val j) : i ≠ j :=
fun (h' : i = j) => absurd (veq_of_eq h') h
theorem vne_of_ne {n : ℕ} {i : fin n} {j : fin n} (h : i ≠ j) : subtype.val i ≠ subtype.val j :=
fun (h' : subtype.val i = subtype.val j) => absurd (eq_of_veq h') h
end fin
protected instance fin.decidable_eq (n : ℕ) : DecidableEq (fin n) :=
fun (i j : fin n) =>
decidable_of_decidable_of_iff (nat.decidable_eq (subtype.val i) (subtype.val j)) sorry
end Mathlib |
# Metric for planer symmetry
Ndim_ := 4:
x1_ := t:
x2_ := x:
x3_ := theta:
x4_ := phi:
complex_ := {}:
g11_ := -alpha(t,x)^2+a1(t,x)^2*beta(t,x)^2:
g12_ := a1(t,x)^2*beta(t,x):
g22_ := a1(t,x)^2:
g33_ := b1(t,x)^2*x^2:
g44_ := b1(t,x)^2*x^2*(sin(theta))^2:
|
module Direction where
open import Relation.Binary.PropositionalEquality hiding (Extensionality)
----------------------------------------------------------------------
-- direction
data Dir : Set where
SND RCV : Dir
variable
d d₁ d₂ d₃ : Dir
-- dual
dual-dir : Dir → Dir
dual-dir SND = RCV
dual-dir RCV = SND
dual-dir-inv : (d : Dir) → dual-dir (dual-dir d) ≡ d
dual-dir-inv SND = refl
dual-dir-inv RCV = refl
|
(*
Copyright (C) 2017 M.A.L. Marques
This Source Code Form is subject to the terms of the Mozilla Public
License, v. 2.0. If a copy of the MPL was not distributed with this
file, You can obtain one at http://mozilla.org/MPL/2.0/.
*)
(* type: mgga_exc *)
params_a_kappa := 4.8827323:
params_a_mu := 0.3511128:
$include "gga_x_pbe.mpl"
dldf_a := [1, -0.1637571, -0.1880028, -0.4490609, -0.0082359]:
dldf_csi_HF := 1 - 0.6144129:
dldf_f := (x, u, t) ->
+ dldf_csi_HF*pbe_f(x)*mgga_series_w(dldf_a, 5, t):
f := (rs, z, xt, xs0, xs1, u0, u1, t0, t1) ->
mgga_exchange(dldf_f, rs, z, xs0, xs1, u0, u1, t0, t1):
|
[STATEMENT]
lemma (in valid_unMultigraph) del_UnEdge_even:
assumes "(v,w,v') \<in> E" "finite E"
shows "v\<in>odd_nodes_set(del_unEdge v w v' G) \<longleftrightarrow> even (degree v G)"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. (v \<in> odd_nodes_set (del_unEdge v w v' G)) = even (degree v G)
[PROOF STEP]
proof -
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. (v \<in> odd_nodes_set (del_unEdge v w v' G)) = even (degree v G)
[PROOF STEP]
have "degree v (del_unEdge v w v' G) + 1=degree v G"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. degree v (del_unEdge v w v' G) + 1 = degree v G
[PROOF STEP]
using del_edge_undirected_degree_plus corres
[PROOF STATE]
proof (prove)
using this:
\<lbrakk>finite (edges ?g); (?v, ?e, ?v') \<in> edges ?g; (?v', ?e, ?v) \<in> edges ?g\<rbrakk> \<Longrightarrow> degree ?v (del_unEdge ?v ?e ?v' ?g) + 1 = degree ?v ?g
((?v, ?w, ?u') \<in> E) = ((?u', ?w, ?v) \<in> E)
goal (1 subgoal):
1. degree v (del_unEdge v w v' G) + 1 = degree v G
[PROOF STEP]
by (metis assms)
[PROOF STATE]
proof (state)
this:
degree v (del_unEdge v w v' G) + 1 = degree v G
goal (1 subgoal):
1. (v \<in> odd_nodes_set (del_unEdge v w v' G)) = even (degree v G)
[PROOF STEP]
from this [symmetric]
[PROOF STATE]
proof (chain)
picking this:
degree v G = degree v (del_unEdge v w v' G) + 1
[PROOF STEP]
have "odd (degree v (del_unEdge v w v' G)) = even (degree v G)"
[PROOF STATE]
proof (prove)
using this:
degree v G = degree v (del_unEdge v w v' G) + 1
goal (1 subgoal):
1. odd (degree v (del_unEdge v w v' G)) = even (degree v G)
[PROOF STEP]
by simp
[PROOF STATE]
proof (state)
this:
odd (degree v (del_unEdge v w v' G)) = even (degree v G)
goal (1 subgoal):
1. (v \<in> odd_nodes_set (del_unEdge v w v' G)) = even (degree v G)
[PROOF STEP]
moreover
[PROOF STATE]
proof (state)
this:
odd (degree v (del_unEdge v w v' G)) = even (degree v G)
goal (1 subgoal):
1. (v \<in> odd_nodes_set (del_unEdge v w v' G)) = even (degree v G)
[PROOF STEP]
have "v\<in>nodes (del_unEdge v w v' G)"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. v \<in> nodes (del_unEdge v w v' G)
[PROOF STEP]
by (metis E_validD(1) assms(1) del_UnEdge_node)
[PROOF STATE]
proof (state)
this:
v \<in> nodes (del_unEdge v w v' G)
goal (1 subgoal):
1. (v \<in> odd_nodes_set (del_unEdge v w v' G)) = even (degree v G)
[PROOF STEP]
ultimately
[PROOF STATE]
proof (chain)
picking this:
odd (degree v (del_unEdge v w v' G)) = even (degree v G)
v \<in> nodes (del_unEdge v w v' G)
[PROOF STEP]
show ?thesis
[PROOF STATE]
proof (prove)
using this:
odd (degree v (del_unEdge v w v' G)) = even (degree v G)
v \<in> nodes (del_unEdge v w v' G)
goal (1 subgoal):
1. (v \<in> odd_nodes_set (del_unEdge v w v' G)) = even (degree v G)
[PROOF STEP]
unfolding odd_nodes_set_def
[PROOF STATE]
proof (prove)
using this:
odd (degree v (del_unEdge v w v' G)) = even (degree v G)
v \<in> nodes (del_unEdge v w v' G)
goal (1 subgoal):
1. (v \<in> {va \<in> nodes (del_unEdge v w v' G). odd (degree va (del_unEdge v w v' G))}) = even (degree v G)
[PROOF STEP]
by auto
[PROOF STATE]
proof (state)
this:
(v \<in> odd_nodes_set (del_unEdge v w v' G)) = even (degree v G)
goal:
No subgoals!
[PROOF STEP]
qed |
From Test Require Import tactic.
Section FOFProblem.
Variable Universe : Set.
Variable UniverseElement : Universe.
Variable wd_ : Universe -> Universe -> Prop.
Variable col_ : Universe -> Universe -> Universe -> Prop.
Variable col_swap1_1 : (forall A B C : Universe, (col_ A B C -> col_ B A C)).
Variable col_swap2_2 : (forall A B C : Universe, (col_ A B C -> col_ B C A)).
Variable col_triv_3 : (forall A B : Universe, col_ A B B).
Variable wd_swap_4 : (forall A B : Universe, (wd_ A B -> wd_ B A)).
Variable col_trans_5 : (forall P Q A B C : Universe, ((wd_ P Q /\ (col_ P Q A /\ (col_ P Q B /\ col_ P Q C))) -> col_ A B C)).
Theorem pipo_6 : (forall A B C Aprime Cprimeprime Bprime Bprimeprime : Universe, ((wd_ A Aprime /\ (wd_ A B /\ (wd_ A C /\ (wd_ C Aprime /\ (wd_ B C /\ (wd_ B Aprime /\ (wd_ Cprimeprime Aprime /\ (wd_ Bprime A /\ (wd_ Bprime Aprime /\ (wd_ Bprime Bprimeprime /\ (wd_ Aprime Bprimeprime /\ (col_ Bprimeprime A Aprime /\ col_ Bprime Aprime Bprimeprime)))))))))))) -> col_ Bprime Aprime A)).
Proof.
time tac.
Qed.
End FOFProblem.
|
library(foreach)
#library(doParallel)
cv.lasso <- function(y, X, b.init = NULL, pen.fac = rep(1,p), lambda = NULL,
nfold=5, tol=1e-4, cd.maxit=1e+4) {
#cl <- makeCluster(5)
#registerDoParallel(cl)
p <- ncol(X)
n <- nrow(X)
if (n < nfold) stop("nfold cannot be larger than n.")
if(is.null(lambda)) {
lambda.max <- max(abs(crossprod(X, y - mean(y))))/n
lambda <- exp(seq(from=log(lambda.max),
to=log(ifelse(p < n, 1e-4, .01)*lambda.max),
length.out=100))
}
# nfold-cross-validation
obs.fold <- rep(1:nfold,n)[sample.int(n)]
cv <- foreach(fold = 1:nfold, .combine = "cbind") %do% {
#source("lasso.r")
b <- lasso0(y=y[obs.fold!=fold], X=X[obs.fold!=fold,], b.init=b.init, lambda=lambda,
pen.fac=pen.fac, tol=tol, cd.maxit=cd.maxit)$b
apply(b, 2, function(beta) {
res <- (y[obs.fold==fold] - X[obs.fold==fold,] %*% beta)
mean((res - mean(res))^2)
})
}
#stopCluster(cl)
# output CV graph
cvm <- rowMeans(cv)
list(lambda.min=lambda[which.min(cvm)], cvm = cvm, lambda=lambda)
}
|
```julia
using JuMP, Cbc, Plots # The usual packages
using DelimitedFiles # IO reading/writing files
using LinearAlgebra # For convenience (includes function norm(x))
using Combinatorics # To perform permutations
```
# MS-E2121 - Linear optimization
## Exercise session 8
### Demo exercise: Travelling salesman problem (TSP) - MTZ formulation
#### Examining the formulation
Show that the following formulation $P_{MTZ}$ is valid for the TSP defined on a directed graph $G = (N,A)$ with $N = \{1,\dots,n\}$ cities and arcs $A = \{(i,j) : i,j\in N, i\neq j\}$ between cities.
$$
P_{MTZ} = \left\{
\begin{array}{ll}
\displaystyle \sum_{j \in N \setminus \{i\}} x_{ij} = 1, & \forall i \in N \\
\displaystyle \sum_{j \in N \setminus \{i\}} x_{ji} = 1, & \forall i \in N \\
\displaystyle u_{i} - u_{j} + (n-1) x_{ij} \leq n - 2, & \forall i,j \in N \setminus \{ 1 \} : i \neq j ~~(*)\\
x_{ij} \in \{0,1\}, & \forall i,j \in N : i\neq j\\
\end{array}
\right.
$$
where $x_{ij} = 1$ if city $j\in N$ is visited immediately after city $i \in N$, and $x_{ij} = 0$ otherwise. Constraints $(*)$ with the variables $u_i \in \mathbb{R}$ for all $i\in N$ are called *Miller-Tucker-Zemlin* (MTZ) subtour elimination constraints.
We want to show that
1. Constraints $(*)$ prevent subtours in any solution $x \in P_{MTZ}$.
2. Every TSP solution $x$ satisfies the constraints $(*)$.
To prove the first one, we first assume that a solution $x \in P_{MTZ}$ *has* a subtour with $k$ nodes and $k$ arcs between them, not going through node 1. For example, assume nodes 2, 3 and 5 form a subtour when $n=5$. Let's write the constraints $(*)$ corresponding to this subtour:
\begin{align}
u_2 - u_3 + 4 \le 3 \\
u_3 - u_5 + 4 \le 3 \\
u_5 - u_2 + 4 \le 3
\end{align}
We observe that if $x_{ij} = 1$ such that $i,j \in N \setminus 1$, the constraint $(*)$ can be written as $u_i \le u_j - 1$, which for integer variables is the same as $u_i < u_j$. For a general result, we denote the nodes in the subtour by $\{i_1, ..., i_k\}$ and get $u_{i_1} < u_{i_k} < u_{i_1}$, which is a contradiction. This tells us that there can be no subtour ($k<n$) that doesn't contain node 1. A subtour containing node 1 would imply another subtour not containing node 1, so that is also forbidden by $(*)$. This proves the first part.
For the second part, we notice that the $u$-variables seem to imply an ordering for the nodes. Assume that all tours start from node 1 and $u_i, \ i \in N \setminus 1$ is the position of the node on the tour (the first node visited after the starting node 1 has $u$-value 2, the second one 3 and so on). For each arc $i \rightarrow j$, we have either $x_{ij}=0$ or $x_{ij}=1$.
If $x_{ij} = 0$: there is no arc from $i$ to $j$, and the constraint is $u_{i} - u_{j} \leq n - 2$, which holds, since we defined that $i$ and $j$ can't be 1, and also not greater than $n$. The upper bound of the difference between two $u$-values is thus $n-2$, and the constraint always holds if $x_{ij}=0$.
If $x_{ij} = 1$: there is an arc from $i$ to $j$, and the constraint is $u_{i} - u_{j} + n-1 \leq n - 2$ or $u_{i} - u_{j} \leq -1$, which looks familiar from before and actually holds when we do not have subtours since by our definition, $u_i - u_j = -1$. The key is that this time, we have no subtours that do not contain node 1, which is treated as a special case.
To conclude, we proved that this formulation does not allow subtours, and all valid solutions $x$ satisfy the constraints. Therefore, the formulation is valid.
#### MTZ and naive TSP implementation
We first write some helper functions, starting with one that computes the distances between coordinates:
```julia
## Function for getting the distances array
function get_dist(xycoord::Matrix{},n::Int)
# Compute distance matrix (d[i,j]) from city coordinates
dist = zeros(n,n)
for i = 1:n
for j = i:n
d = norm(xycoord[i,:] - xycoord[j,:])
dist[i,j] = d
dist[j,i] = d
end
end
return dist
end
```
A function to convert the adjacency matrix $x$ describing a tour to a vector representing the tour, starting from city 1.
```julia
# Get the optimal tour
# Input
# x: solution matrix
# n: number of cities
# Returns
# tour: ordering of cities in the optimal tour
function gettour(x::Matrix{Int}, n::Int)
tour = zeros(Int,n+1) # Initialize tour vector (n+1 as city 1 appears twice)
tour[1] = 1 # Set city 1 as first one in the tour
k = 2 # Index of vector tour[k]
i = 1 # Index of current city
while k <= n + 1 # Find all n+1 tour nodes (city 1 is counted twice)
for j = 1:n
if x[i,j] == 1 # Find next city j visited immediately after i
tour[k] = j # Set city j as the k:th city in the tour
k = k + 1 # Update index k of tour[] vector
i = j # Move to next city
break
end
end
end
return tour # Return the optimal tour
end
```
```julia
## Defining the colors to be used
c_blue = palette(:auto)[1] # color :1
c_orange = palette(:auto)[2] # color :2
c_green = palette(:auto)[3]; # color :3
```
The data is stored in csv-files, a common external data format. We use the DelimitedFiles package to read a file into a matrix ```data```.
```julia
# "data16a.csv" has 3 columns which are stored in (Nullable) Arrays
# data[1], data[2], data[3] after the function call
# data = CSV.read(...) below. The columns contain:
#
# data[:,1]: all cities i in V
# data[:,2]: x-coordinate of each city i in V
# data[:,3]: y-coordinate of each city i in V
#
data = readdlm("data16a.csv", ',')
# data = readdlm("data16b.csv", ',')
n = 16 # number of cities
# println(data) # Look at the data in compact form
V = data[2:n+1,1] # All cities i in V
x = data[2:n+1,2] # x-coordinates of cities i in V
y = data[2:n+1,3] # y-coordinates of cities i in V
xycoord = [x y]; # n x 2 coordinate matrix
```
```julia
function tsp_naive(xycoord::Matrix{}, n::Int)
# Create a model
m = Model(Cbc.Optimizer)
# Here the costs c are the distances between cities
c = get_dist(xycoord,n)
## Variables
# x[i,j] = 1 if we travel from city i to city j, 0 otherwise.
@variable(m, x[1:n,1:n], Bin)
## Objective
# Minimize length of tour
@objective(m, Min, dot(c,x))
## Constraints
# Ignore self arcs: set x[i,i] = 0
@constraint(m, sar[i = 1:n], x[i,i] == 0)
# We must enter and leave every city exactly once
@constraint(m, ji[i = 1:n], sum(x[j,i] for j = 1:n if j != i) == 1)
@constraint(m, ij[i = 1:n], sum(x[i,j] for j = 1:n if j != i) == 1)
optimize!(m)
cost = objective_value(m) # Optimal cost (length)
sol_x = round.(Int, value.(x)) # Optimal solution vector
return m, sol_x, cost
end;
```
```julia
## Solve the problem and evaluate time and memory with @time macro
(m_naive, x_naive, cost_naive) = @time tsp_naive(xycoord, n);
# Get the optimal tour
tour_naive = gettour(x_naive,n);
```
```julia
plt = scatter(xycoord[:,1],xycoord[:,2],
markercolor = c_blue,
markerstrokewidth = 0,
legend = false
)
for i in 1:length(tour_naive)-1
annotate!(xycoord[tour_naive[i],1]+50, xycoord[tour_naive[i],2]+50, ("$(tour_naive[i])", 7, :left))
plot!(([xycoord[tour_naive[i],1],xycoord[tour_naive[i+1],1]] , [xycoord[tour_naive[i],2],xycoord[tour_naive[i+1],2]]), c = c_blue, label = "")
end
plt
```
```julia
plt = scatter(xycoord[:,1],xycoord[:,2],
markercolor = c_blue,
markerstrokewidth = 0,
legend = false
)
for i in 1:n
annotate!(xycoord[i,1]+50, xycoord[i,2]+50, ("$(i)", 7, :left))
for j in 1:n
if x_naive[i,j] == 1
plot!(([xycoord[i,1],xycoord[j,1]] , [xycoord[i,2],xycoord[j,2]]), c = c_blue, label = "")
break
end
end
end
plt
```
```julia
# Solve a directed, TSP instance (MTZ formulation)
# Input
# xycoord: coordinates of city locations
# n: number of cities
# Returns
# tour: ordering of cities in the optimal tour
# cost: cost (length) of the optimal tour
function tsp_mtz(xycoord::Matrix{}, n::Int)
# Create a model
m = Model(Cbc.Optimizer)
# Here the costs c are the distances between cities
c = get_dist(xycoord,n)
## Variables
# x[i,j] = 1 if we travel from city i to city j, 0 otherwise.
@variable(m, x[1:n,1:n], Bin)
# Variables u for subtour elimination constraints
@variable(m, u[2:n])
## Objective
# Minimize length of tour
@objective(m, Min, dot(c,x))
## Constraints
# Ignore self arcs: set x[i,i] = 0
@constraint(m, sar[i = 1:n], x[i,i] == 0)
# We must enter and leave every city exactly once
@constraint(m, ji[i = 1:n], sum(x[j,i] for j = 1:n if j != i) == 1)
@constraint(m, ij[i = 1:n], sum(x[i,j] for j = 1:n if j != i) == 1)
# MTZ subtour elimination constraints
@constraint(m, sub[i = 2:n, j = 2:n, i != j], u[i] - u[j] + (n-1)*x[i,j] <= (n-2))
optimize!(m)
cost = objective_value(m) # Optimal cost (length)
sol_x = round.(Int, value.(x)) # Optimal solution vector
return m, sol_x, cost
end;
```
```julia
## Solve the problem and evaluate time and memory with @time macro
(m_mtz, x_mtz, cost_mtz) = @time tsp_mtz(xycoord, n);
# Get the optimal tour
tour_mtz = gettour(x_mtz,n);
```
```julia
## Print the optimal tour and its cost
println("\nOptimal tour: $(tour_mtz)\n")
println("Optimal length: ", cost_mtz)
```
```julia
plt = scatter(xycoord[:,1],xycoord[:,2],
markercolor = c_blue,
markerstrokewidth = 0,
legend = false
)
for i in 1:length(tour_mtz)-1
annotate!(xycoord[tour_mtz[i],1]+50, xycoord[tour_mtz[i],2]+50, ("$(tour_mtz[i])", 7, :left))
plot!(([xycoord[tour_mtz[i],1],xycoord[tour_mtz[i+1],1]] , [xycoord[tour_mtz[i],2],xycoord[tour_mtz[i+1],2]]), c = c_blue, label = "")
end
plt
```
Now that we have solved the problem using the MTZ formulation, let's try solving the same problem starting with the naive implementation and using successive cutset or subtour elimination constraints:
```julia
## Initialisation
(m_naive, x_naive, cost_naive) = tsp_naive(xycoord, n);
subnodes = []
count = 0
stop = 0
lim = 100
methods = [:elimination,:cutset]
method = methods[2];
```
```julia
## Perform cuts to break subtours until we got an optimal
@time while stop == 0 && count < lim
S = collect(permutations(subnodes,2)) # Possible connections present in the naive implementation
NS = setdiff(V,subnodes) # Nodes that are still not included in the tour
if method == :cutset
## Cutset constraints
if length(S) > 0
@constraint(m_naive,sum(m_naive[:x][subnodes[i],NS[j]] for i in 1:length(subnodes), j in 1:length(NS)) >= 1)
end
else
## Subtour elimination constraints
if length(S) > 0
@constraint(m_naive,sum(m_naive[:x][S[i][1],S[i][2]] for i in 1:length(S)) <= length(subnodes)-1)
end
end
set_silent(m_naive)
optimize!(m_naive)
cost2 = objective_value(m_naive) # Optimal cost (length)
sol_x = round.(Int, value.(m_naive[:x])) # Optimal solution vector
tour2 = gettour(sol_x,n) # Get the optimal tour
if length(unique(tour2)) < n
count = count + 1
println("Method used: ",method)
println("Subtours present in node 1, update subnodes vector to break the subtour: ", tour2')
println("\nIteration $(count); not optimal.\n")
subnodes = unique(tour2);
else
println("Method used: ",method)
println("Optimal tour: ", tour2')
println("\n Took $(count) iterations to find the optimal solution.")
S = []
stop = 1
end;
end;
```
### Scheduling problem
TODO
|
As an opponent of the Vietnam War , Bedell signed a petition urging against United States military intervention in Iraq . This petition was signed with the names of 70 former Congressmen from the 1970s and was presented in a press conference on March 15 , 2003 . Bedell said that it was unbelievable for the United States to settle disputes with war , and he said that an Iraq war would be similar to the Vietnam War .
|
import set_theory.ordinal_arithmetic
universe u
namespace list
/-- Splits a list right after the first element satisfying a predicate. -/
def split_at_pred {α : Type u} (l : list α) (P : α → Prop) [decidable_pred P] : list α × list α :=
l.split_at (l.find_index P).succ
/-- `l.split' k` splits `l` right after the first element equal to `k`. -/
def split_eq {α : Type u} (l : list α) (k : α) [decidable_eq α] : list α × list α :=
l.split_at_pred (eq k)
/-- Splits a list into the bad and good parts. -/
def split_prss : list ℕ → list ℕ × list ℕ
| [] := ([], [])
| (0 :: l) := ([], l)
| ((n + 1) :: l) := l.split_eq n
/-- Bad part of a list. -/
abbreviation bad_part (l : list ℕ) : list ℕ := l.split_prss.fst
/-- Good part of a list. -/
abbreviation good_part (l : list ℕ) : list ℕ := l.split_prss.snd
/-- The empty list splits into two copies of itself. -/
theorem split_prss_nil : list.nil.split_prss = ([], []) := rfl
/-- The bad and good parts together make the tail. -/
theorem bad_append_good_eq_tail : ∀ l : list ℕ, l.bad_part ++ l.good_part = l.tail
| [] := by refl
| (0 :: l) := by refl
| ((n + 1) :: l) := begin
change (split_at _ _).fst ++ (split_at _ _).snd = _,
rw split_at_eq_take_drop _ l,
exact take_append_drop _ l
end
/-- Appends a list to itself `n` times. -/
def cycle {α : Type u} : list α → ℕ → list α
| _ 0 := []
| l (n + 1) := l ++ (l.cycle n)
/-- Cycling a list zero times gives the empty list. -/
theorem cycle_zero {α : Type u} (l : list α) : l.cycle 0 = [] := rfl
/-- Cycling a list once gives itself. -/
theorem cycle_one {α : Type u} (l : list α) : l.cycle 1 = l := l.append_nil
/-- Cycling the empty list gives itself. -/
theorem cycle_nil {α : Type u} : ∀ {n : ℕ}, ([] : list α).cycle n = []
| 0 := by refl
| (n + 1) := by { show nil ++ nil.cycle n = nil, by { rw cycle_nil, refl }}
/-- `l.prss n` performs one step of PrSS expansion, and copies the bad part `n` times. -/
def prss (l : list ℕ) (n : ℕ) : list ℕ :=
l.bad_part.cycle n ++ l.good_part
/-- Leading zeros just get removed. -/
theorem prss_lead_zero (l : list ℕ) {n : ℕ} : (0 :: l).prss n = l :=
begin
change (0 :: l).prss n with list.cycle [] n ++ _,
rw cycle_nil,
refl
end
/-- `l.prss 1` just gives the tail. -/
theorem prss_one (l : list ℕ) : l.prss 1 = l.tail :=
begin
change l.prss 1 with (list.cycle _ 1) ++ _,
rw cycle_one,
exact l.bad_append_good_eq_tail
end
/-- Standard PrSS lists. -/
inductive is_standard : list ℕ → Prop
| base (n : ℕ) : is_standard (list.range n).reverse
| prss (l : list ℕ) (n : ℕ) : is_standard (l.prss n)
/-- Tails of standard lists are standard. -/
theorem tail_standard (l : list ℕ) : is_standard l.tail :=
begin
convert is_standard.prss l 1,
exact l.prss_one.symm,
end
/-
def split_on' (l : list ℕ) (n : ℕ) : list (list ℕ) :=
(l.split_on n).map (list.cons n)
noncomputable def ordinal' : list ℕ → ordinal
| [] := 0
| (n :: l) := ((l.split_on' (n + 1)).map (λ m, ordinal.omega ^ ordinal' m)).sum
using_well_founded
-/
/-- Returns the function that applies `g 0`, `g 1`, ... `g (n - 1)` to `f`, starting from `a`. -/
def apply {α β : Type u} (f : α → β → α) (a : α) (g : ℕ → β) : ℕ → α :=
λ n, ((list.range n).map g).foldl f a
/-- PrSS terminates. -/
theorem termination {l : list ℕ} (hl : is_standard l) (g : ℕ → ℕ) :
∃ n, apply list.prss l g n = [] :=
sorry
end list |
{-# OPTIONS --omega-in-omega --no-termination-check --overlapping-instances #-}
module Light.Library.Data.Product where
open import Light.Level using (Level ; Setω)
open import Light.Variable.Sets
open import Light.Variable.Levels
import Light.Library.Data.Both as Both
open import Light.Library.Relation.Binary
using (SelfTransitive ; SelfSymmetric ; Reflexive)
open import Light.Library.Relation.Binary.Equality.Decidable using (DecidableEquality)
record Dependencies : Setω where
record Library (dependencies : Dependencies) : Setω where
field
ℓf : Level → Level → Level
Σ : ∀ (𝕒 : Set aℓ) → (𝕒 → Set bℓ) → Set (ℓf aℓ bℓ)
both : ∀ {𝕓 : 𝕒 → Set bℓ} a (b : 𝕓 a) → Σ 𝕒 𝕓
first : ∀ {𝕓 : 𝕒 → Set bℓ} → Σ 𝕒 𝕓 → 𝕒
second : ∀ {𝕓 : 𝕒 → Set bℓ} (product : Σ 𝕒 𝕓) → 𝕓 (first product)
⦃ equals ⦄ :
∀ ⦃ a‐c‐equals : DecidableEquality 𝕒 𝕔 ⦄ ⦃ b‐d‐equals : DecidableEquality 𝕓 𝕕 ⦄
→ DecidableEquality (Σ 𝕒 (λ _ → 𝕓)) (Σ 𝕔 (λ _ → 𝕕))
∃ : ∀ {𝕒 : Set aℓ} → (𝕒 → Set bℓ) → Set (ℓf aℓ bℓ)
∃ = Σ _
instance both‐library : Both.Library record {}
both‐library = record
{
Both = λ 𝕒 𝕓 → Σ 𝕒 (λ _ → 𝕓) ;
ℓf = ℓf ;
both = both ;
first = first ;
second = second ;
equals = equals
}
open Library ⦃ ... ⦄ public
|
Formal statement is: lemmas prime_dvd_power_int = prime_dvd_power[where ?'a = int] Informal statement is: If $p$ is a prime number and $p \mid a^n$ for some integer $a$ and $n$, then $p \mid a$. |
[STATEMENT]
lemma downset_elim:
assumes "set_mset K \<subseteq> dm r J" shows "\<forall>k\<in>set_mset K.\<exists>j\<in>set_mset J.(k,j)\<in>r"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<forall>k\<in>#K. \<exists>j\<in>#J. (k, j) \<in> r
[PROOF STEP]
proof
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. \<And>k. k \<in># K \<Longrightarrow> \<exists>j\<in>#J. (k, j) \<in> r
[PROOF STEP]
fix k
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. \<And>k. k \<in># K \<Longrightarrow> \<exists>j\<in>#J. (k, j) \<in> r
[PROOF STEP]
assume "k\<in> set_mset K"
[PROOF STATE]
proof (state)
this:
k \<in># K
goal (1 subgoal):
1. \<And>k. k \<in># K \<Longrightarrow> \<exists>j\<in>#J. (k, j) \<in> r
[PROOF STEP]
thus "\<exists>j\<in>set_mset J.(k,j)\<in> r"
[PROOF STATE]
proof (prove)
using this:
k \<in># K
goal (1 subgoal):
1. \<exists>j\<in>#J. (k, j) \<in> r
[PROOF STEP]
using assms
[PROOF STATE]
proof (prove)
using this:
k \<in># K
set_mset K \<subseteq> r \<down>m J
goal (1 subgoal):
1. \<exists>j\<in>#J. (k, j) \<in> r
[PROOF STEP]
unfolding dm_def ds_def
[PROOF STATE]
proof (prove)
using this:
k \<in># K
set_mset K \<subseteq> {y. \<exists>x\<in>#J. (y, x) \<in> r}
goal (1 subgoal):
1. \<exists>j\<in>#J. (k, j) \<in> r
[PROOF STEP]
by fast
[PROOF STATE]
proof (state)
this:
\<exists>j\<in>#J. (k, j) \<in> r
goal:
No subgoals!
[PROOF STEP]
qed |
\chapter{Verfication dataset figures}
The following figures show the positions of scans/profiles
for the various datasets included in the VDS
for each frequncy mode of \smr.
\begin{figure}[t]
\centering
\includegraphics[width=17cm]{test_collocation_fm1.png}
\caption{VDS:Positions of collocated scans for frequency mode 1.}
\label{fig:vdsfm1b}
\end{figure}
\begin{figure}[t]
\centering
\includegraphics[width=17cm]{test_collocation_fm2.png}
\caption{VDS:Positions of collocated scans for frequency mode 2.}
\label{fig:vdsfm2}
\end{figure}
\begin{figure}[t]
\centering
\includegraphics[width=17cm]{test_collocation_fm8.png}
\caption{VDS:Positions of collocated scans for frequency mode 8.}
\label{fig:vdsfm8}
\end{figure}
\begin{figure}[t]
\centering
\includegraphics[width=17cm]{test_collocation_fm13.png}
\caption{VDS:Positions of collocated scans for frequency mode 13.}
\label{fig:vdsfm13}
\end{figure}
\begin{figure}[t]
\centering
\includegraphics[width=17cm]{test_collocation_fm14.png}
\caption{VDS:Positions of collocated scans for frequency mode 14.}
\label{fig:vdsfm14}
\end{figure}
\begin{figure}[t]
\centering
\includegraphics[width=17cm]{test_collocation_fm17.png}
\caption{VDS:Positions of collocated scans for frequency mode 17.}
\label{fig:vdsfm17}
\end{figure}
\begin{figure}[t]
\centering
\includegraphics[width=17cm]{test_collocation_fm19.png}
\caption{VDS:Positions of collocated scans for frequency mode 19.}
\label{fig:vdsfm19}
\end{figure}
\begin{figure}[t]
\centering
\includegraphics[width=17cm]{test_collocation_fm21.png}
\caption{VDS:Positions of collocated scans for frequency mode 21.}
\label{fig:vdsfm21}
\end{figure}
%%% Local Variables:
%%% mode: latex
%%% TeX-master: "L1_ATBD"
%%% End:
|
[STATEMENT]
lemma decseq_def: "decseq X \<longleftrightarrow> (\<forall>m. \<forall>n\<ge>m. X n \<le> X m)"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. decseq X = (\<forall>m n. m \<le> n \<longrightarrow> X n \<le> X m)
[PROOF STEP]
unfolding antimono_def
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. (\<forall>x y. x \<le> y \<longrightarrow> X y \<le> X x) = (\<forall>m n. m \<le> n \<longrightarrow> X n \<le> X m)
[PROOF STEP]
.. |
/* This file is part of the KDE project
Copyright (C) 2006 Stefan Nikolaus <[email protected]>
This library is free software; you can redistribute it and/or
modify it under the terms of the GNU Library General Public
License as published by the Free Software Foundation; either
version 2 of the License, or (at your option) any later version.
This library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Library General Public License for more details.
You should have received a copy of the GNU Library General Public License
along with this library; see the file COPYING.LIB. If not, write to
the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
Boston, MA 02110-1301, USA.
*/
#ifndef KSPREAD_SOLVER
#define KSPREAD_SOLVER
#include <gsl/gsl_multimin.h>
#include <kxmlguiclient.h>
#include <Cell.h>
#include <QObject>
#include <QVariantList>
namespace Calligra
{
namespace Sheets
{
namespace Plugins
{
/**
* \class Solver Function Optimizer
* \author Stefan Nikolaus <[email protected]>
*/
class Solver : public QObject, public KXMLGUIClient
{
Q_OBJECT
public:
struct Parameters {
QList<Cell> cells;
};
/**
* Constructor.
*/
Solver(QObject* parent, const QVariantList& args);
/**
* Destructor.
*/
~Solver();
double evaluate(const gsl_vector* vector, void* parameters);
protected Q_SLOTS:
/**
* Called when the Solver action is triggered.
* Opens the dialog.
*/
void showDialog();
/**
* This method does the real work.
* Uses the parameters of the dialog to optimize the given function.
*/
void optimize();
private:
Q_DISABLE_COPY(Solver)
class Private;
Private * const d;
};
} // namespace Plugins
} // namespace Sheets
} // namespace Calligra
#endif
|
#include "src/service/async_service_impl.h"
#include <gtest/gtest-typed-test.h>
#include <boost/type_traits/is_default_constructible.hpp>
#include <type_traits>
#include "common/config/version_converter.h"
#include "gmock/gmock.h"
#include "gtest/gtest.h"
#include "src/config/get_config.h"
#include "src/filters/filter_chain.h"
namespace authservice {
namespace service {
using ::testing::HasSubstr;
template <class T>
class AsyncServiceImplTest : public ::testing::Test {
public:
grpc::Status check(typename T::first_type *request,
typename T::second_type *response) {
using RequestType = typename T::first_type;
using ResponseType = typename T::second_type;
// Create a new io_context. All of the async IO handled inside the
// spawn below will be handled by this new io_context.
boost::asio::io_context ioc;
grpc::Status status;
// Spawn a co-routine to run the filter.
boost::asio::spawn(ioc, [&](boost::asio::yield_context yield) {
status = authservice::service::Check(
*request, *response, chains_, trigger_rules_config_,
allow_unmatched_requests_, ioc, yield);
});
// Run the I/O context to completion, on the current thread.
// This consumes the current thread until all of the async
// I/O from the above spawn is finished.
ioc.run();
return status;
}
bool allow_unmatched_requests_{true};
std::vector<std::unique_ptr<filters::FilterChain>> chains_;
google::protobuf::RepeatedPtrField<config::TriggerRule> trigger_rules_config_;
boost::asio::io_context ioc_;
};
using test_types =
::testing::Types<std::pair<::envoy::service::auth::v3::CheckRequest,
::envoy::service::auth::v3::CheckResponse>,
std::pair<::envoy::service::auth::v2::CheckRequest,
::envoy::service::auth::v2::CheckResponse>>;
TYPED_TEST_CASE(AsyncServiceImplTest, test_types);
TYPED_TEST(AsyncServiceImplTest,
CheckUnmatchedTenantRequest_ForAMatchingTriggerRulesPath) {
typename TypeParam::first_type request;
typename TypeParam::second_type response;
request.mutable_attributes()->mutable_request()->mutable_http()->set_scheme(
"https");
request.mutable_attributes()->mutable_request()->mutable_http()->set_path(
"/status/foo#some-fragment"); // this is a matching path for
// trigger_rules
auto request_headers = request.mutable_attributes()
->mutable_request()
->mutable_http()
->mutable_headers();
request_headers->insert({"x-tenant-identifier", "unknown-tenant"});
config::Config config = *config::GetConfig("test/fixtures/valid-config.json");
this->trigger_rules_config_ = config.trigger_rules();
for (const auto &chain_config : config.chains()) {
std::unique_ptr<filters::FilterChain> chain(new filters::FilterChainImpl(
this->ioc_, chain_config, config.threads()));
this->chains_.push_back(std::move(chain));
}
auto status = this->check(&request, &response);
EXPECT_TRUE(status.ok());
EXPECT_FALSE(response.has_denied_response()); // request allowed to proceed
// (not redirected for auth)
}
TYPED_TEST(AsyncServiceImplTest,
CheckMatchedTenantRequest_ForANonMatchingTriggerRulesPath) {
typename TypeParam::first_type request;
typename TypeParam::second_type response;
request.mutable_attributes()->mutable_request()->mutable_http()->set_scheme(
"https");
request.mutable_attributes()->mutable_request()->mutable_http()->set_path(
"/status/version?some-query"); // this is a non-matching path for
// trigger_rules
auto request_headers = request.mutable_attributes()
->mutable_request()
->mutable_http()
->mutable_headers();
request_headers->insert({"x-tenant-identifier", "tenant1"});
config::Config config = *config::GetConfig("test/fixtures/valid-config.json");
this->trigger_rules_config_ = config.trigger_rules();
for (const auto &chain_config : config.chains()) {
std::unique_ptr<filters::FilterChain> chain(new filters::FilterChainImpl(
this->ioc_, chain_config, config.threads()));
this->chains_.push_back(std::move(chain));
}
auto status = this->check(&request, &response);
EXPECT_TRUE(status.ok());
EXPECT_FALSE(response.has_denied_response()); // request allowed to proceed
// (not redirected for auth)
}
TYPED_TEST(AsyncServiceImplTest,
CheckMatchedTenantRequest_ForAMatchingTriggerRulesPath) {
typename TypeParam::first_type request;
typename TypeParam::second_type response;
request.mutable_attributes()->mutable_request()->mutable_http()->set_scheme(
"https");
request.mutable_attributes()->mutable_request()->mutable_http()->set_path(
"/status/foo?some-query"); // this is a matching path for trigger_rules
auto request_headers = request.mutable_attributes()
->mutable_request()
->mutable_http()
->mutable_headers();
request_headers->insert({"x-tenant-identifier", "tenant1"});
config::Config config = *config::GetConfig("test/fixtures/valid-config.json");
for (const auto &chain_config : config.chains()) {
std::unique_ptr<filters::FilterChain> chain(new filters::FilterChainImpl(
this->ioc_, chain_config, config.threads()));
this->chains_.push_back(std::move(chain));
}
auto status = this->check(&request, &response);
EXPECT_TRUE(status.ok());
EXPECT_EQ(response.denied_response().status().code(),
envoy::type::v3::Found); // redirected for auth
bool hasLocation = false;
for (auto &header : response.denied_response().headers()) {
if (header.header().key() == "location") {
EXPECT_THAT(
header.header().value(),
HasSubstr(
"https://google3/path3")); // redirected to the configured IDP
hasLocation = true;
}
}
EXPECT_TRUE(hasLocation);
}
TYPED_TEST(AsyncServiceImplTest,
CheckRejectNoMatchedFilterChainWithDefaultDeny) {
typename TypeParam::first_type request;
typename TypeParam::second_type response;
this->allow_unmatched_requests_ = false;
request.mutable_attributes()->mutable_request()->mutable_http()->set_scheme(
"https");
request.mutable_attributes()->mutable_request()->mutable_http()->set_path(
"/status/foo?some-query"); // this is a matching path for trigger_rules
auto request_headers = request.mutable_attributes()
->mutable_request()
->mutable_http()
->mutable_headers();
// Set tenant identifier with "tenant2" to avoid matching with some filter
// chains that triggered by "x-tenant-identifier=tenant1".
request_headers->insert({"x-tenant-identifier", "tenant2"});
config::Config config = *config::GetConfig("test/fixtures/valid-config.json");
for (const auto &chain_config : config.chains()) {
std::unique_ptr<filters::FilterChain> chain(new filters::FilterChainImpl(
this->ioc_, chain_config, config.threads()));
this->chains_.push_back(std::move(chain));
}
auto status = this->check(&request, &response);
// Can't find matched filter chain.
EXPECT_EQ(status.error_code(), grpc::StatusCode::PERMISSION_DENIED);
}
} // namespace service
} // namespace authservice
|
/-
Copyright (c) 2017 Mario Carneiro. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Author: Mario Carneiro
-/
import Mathlib.PrePort
import Mathlib.Lean3Lib.init.default
import Mathlib.data.list.sort
import Mathlib.data.multiset.basic
import Mathlib.data.string.basic
import Mathlib.PostPort
universes u_1
namespace Mathlib
/-!
# Construct a sorted list from a multiset.
-/
namespace multiset
/-- `sort s` constructs a sorted list from the multiset `s`.
(Uses merge sort algorithm.) -/
def sort {α : Type u_1} (r : α → α → Prop) [DecidableRel r] [is_trans α r] [is_antisymm α r]
[is_total α r] (s : multiset α) : List α :=
quot.lift_on s (list.merge_sort r) sorry
@[simp] theorem coe_sort {α : Type u_1} (r : α → α → Prop) [DecidableRel r] [is_trans α r]
[is_antisymm α r] [is_total α r] (l : List α) : sort r ↑l = list.merge_sort r l :=
rfl
@[simp] theorem sort_sorted {α : Type u_1} (r : α → α → Prop) [DecidableRel r] [is_trans α r]
[is_antisymm α r] [is_total α r] (s : multiset α) : list.sorted r (sort r s) :=
quot.induction_on s fun (l : List α) => list.sorted_merge_sort r l
@[simp] theorem sort_eq {α : Type u_1} (r : α → α → Prop) [DecidableRel r] [is_trans α r]
[is_antisymm α r] [is_total α r] (s : multiset α) : ↑(sort r s) = s :=
quot.induction_on s fun (l : List α) => quot.sound (list.perm_merge_sort r l)
@[simp] theorem mem_sort {α : Type u_1} (r : α → α → Prop) [DecidableRel r] [is_trans α r]
[is_antisymm α r] [is_total α r] {s : multiset α} {a : α} : a ∈ sort r s ↔ a ∈ s :=
eq.mpr (id (Eq._oldrec (Eq.refl (a ∈ sort r s ↔ a ∈ s)) (Eq.symm (propext mem_coe))))
(eq.mpr (id (Eq._oldrec (Eq.refl (a ∈ ↑(sort r s) ↔ a ∈ s)) (sort_eq r s))) (iff.refl (a ∈ s)))
@[simp] theorem length_sort {α : Type u_1} (r : α → α → Prop) [DecidableRel r] [is_trans α r]
[is_antisymm α r] [is_total α r] {s : multiset α} : list.length (sort r s) = coe_fn card s :=
quot.induction_on s (list.length_merge_sort r)
protected instance has_repr {α : Type u_1} [has_repr α] : has_repr (multiset α) :=
has_repr.mk
fun (s : multiset α) =>
string.str string.empty (char.of_nat (bit1 (bit1 (bit0 (bit1 (bit1 (bit1 1))))))) ++
string.intercalate
(string.str (string.str string.empty (char.of_nat (bit0 (bit0 (bit1 (bit1 (bit0 1)))))))
(char.of_nat (bit0 (bit0 (bit0 (bit0 (bit0 1)))))))
(sort LessEq (map repr s)) ++
string.str string.empty (char.of_nat (bit1 (bit0 (bit1 (bit1 (bit1 (bit1 1)))))))
end Mathlib |
module Bool where
data Bool : Set where
false : Bool
true : Bool
data IsTrue : Bool -> Set where
isTrue : IsTrue true
open import Vec
open import All
allEnvs : {n : Nat} -> List (Vec Bool n)
allEnvs {zero } = ε :: []
allEnvs {suc n} = map (_►_ false) allEnvs ++ map (_►_ true) allEnvs
∈++left : {A : Set}{x : A}{xs ys : List A} -> x ∈ xs -> x ∈ (xs ++ ys)
∈++left (hd p) = hd p
∈++left (tl q) = tl (∈++left q)
∈++right : {A : Set}{x : A}{xs ys : List A} -> x ∈ ys -> x ∈ (xs ++ ys)
∈++right {xs = []} p = p
∈++right {xs = x :: xs} p = tl (∈++right {xs = xs} p)
∈map : {A B : Set}{f : A -> B}{x : A}{xs : List A} -> x ∈ xs -> f x ∈ map f xs
∈map (hd refl) = hd refl
∈map (tl q) = tl (∈map q)
covered : {n : Nat} -> (xs : Vec Bool n) -> xs ∈ allEnvs
covered ε = hd refl
covered (false ► xs) = ∈++left (∈map (covered xs))
covered (true ► xs) = ∈++right {xs = map (_►_ false) allEnvs}
(∈map (covered xs))
Sat : {A : Set} -> (A -> Bool) -> A -> Set
Sat f x = IsTrue (f x)
lem₁ : {n : Nat}(f : Vec Bool n -> Bool) ->
All (Sat f) allEnvs -> (xs : Vec Bool n) -> Sat f xs
lem₁ f p xs with p ! covered xs
... | (.xs , p , refl) = p
data False : Set where
¬_ : Set -> Set
¬ P = P -> False
data _∨_ (A B : Set) : Set where
inl : A -> A ∨ B
inr : B -> A ∨ B
¬IsTrue-false : ¬ IsTrue false
¬IsTrue-false ()
decide : {A : Set}(p : A -> Bool)(x : A) ->
Sat p x ∨ ¬ Sat p x
decide p x with p x
... | true = inl isTrue
... | false = inr ¬IsTrue-false
all : {A : Set}(p : A -> Bool)(xs : List A) ->
All (Sat p) xs ∨ Some (\x -> ¬ Sat p x) xs
all p [] = inl ∅
all p (x :: xs) with decide p x
... | inr ¬px = inr (hd ¬px)
... | inl px with all p xs
... | inl ps = inl (px ▹ ps)
... | inr q = inr (tl q)
data NoProof : Set where
no-proof : NoProof
Proof : {n : Nat} -> (Vec Bool n -> Bool) -> Set
Proof {n} f with all f allEnvs
... | inl _ = (xs : Vec Bool n) -> Sat f xs
... | inr _ = NoProof
prove : {n : Nat}(f : Vec Bool n -> Bool) -> Proof f
prove f with all f allEnvs
... | inl ps = lem₁ f ps
... | inr _ = no-proof
|
<h3>Simulación matemática 2018 </h3>
<div style="background-color:#0099cc;">
<font color = white>
<ul>
<li>Lázaro Alonso </li>
<li>Email: `[email protected], [email protected]`</li>
</ul>
</font>
</div>
### Por favor, den click al siguiente link. Será una forma fácil de que me hagan llegar sus dudas, además de que todos podremos estar al tanto de los problemas tanto de tarea como de clase.
https://join.slack.com/t/sm-grupo/shared_invite/enQtMzcxMzcxMjY1NzgyLWE5ZDlhYjg4OGJhMGE2ZmY2ZGUzZWIxYzQzOTUxZWU2ZGM5YjUyYWMyZGUzNzZjMDE5ZDIxYTA4YTI2ZWQ1NTU
### Máximos y mínimos
##### 1. Encuentre los valores máximo y mínimo locales de la función
$$g(x) = x + 2 \sin x$$
Elementos que debe de contener su respuesta:
- Gráfica de la funcion $g(x)$
- Gráfica de la primera y segunda derivada, ($g(x)'$, $g(x)''$)
- Indicar en la gráfica los máximos y mínimos (_utilizar plt.scatter_)
```python
import numpy as np
import matplotlib.pyplot as plt
%matplotlib inline
import sympy as sym
sym.init_printing(use_latex='mathjax')
sym.var("x")
sym.var("x", real = True)
g = x + 2*sym.sin(x)
dg = sym.diff(g,x,1)
d2g = sym.diff(g,x,2)
```
```python
x1c = sym.solve(dg, x)
x2c = sym.solve(d2g, x)
print("Valores de g´(x)=0 --> ")
print(x1c)
print("Valores de g´´(x)=0 --> ")
print(x2c)
```
Valores de g´(x)=0 -->
[2*pi/3, 4*pi/3]
Valores de g´´(x)=0 -->
[0, pi]
```python
g_num = sym.lambdify([x], g, 'numpy')
dg_num = sym.lambdify([x], dg, 'numpy')
d2g_num = sym.lambdify([x], d2g, 'numpy')
x_vec = np.linspace(0, 2*np.pi, 100)
plt.figure(figsize=(10,4))
plt.plot(x_vec, g_num(x_vec), color = "cyan", label = g)
plt.plot(x_vec, dg_num(x_vec), color = "green", label = dg)
plt.plot(x_vec, d2g_num(x_vec), color = "red", label = d2g)
plt.scatter(2*np.pi/3,(2*np.pi/3) + 2*np.sin(2*np.pi/3), color="cyan")
plt.scatter(4*np.pi/3,(4*np.pi/3) + 2*np.sin(4*np.pi/3), color="cyan")
plt.grid()
plt.legend()
plt.xlabel('$x$', fontsize = 18)
plt.ylabel('$y$', fontsize = 18)
plt.show()
```
##### 2. Discuta la curva $f(x) = x^4 - 4x^3$. Puntos de inflexión, máximos y mínimos. Su respuesta debe de incluir los mismos puntos que el caso anterior.
```python
sym.var("z")
sym.var("z", real = True)
f=z**4-4*z**3
df = sym.diff(f,z,1)
d2f = sym.diff(f,z,2)
z1c = sym.solve(df, z)
z2c = sym.solve(d2f, z)
print("Valores de f´(z)=0 --> ")
print(z1c)
print("Valores de f´´(z)=0 --> ")
print(z2c)
```
Valores de f´(z)=0 -->
[0, 3]
Valores de f´´(z)=0 -->
[0, 2]
```python
f_num = sym.lambdify([z], f, 'numpy')
df_num = sym.lambdify([z], df, 'numpy')
d2f_num = sym.lambdify([z], d2f, 'numpy')
x_vec = np.linspace(-2, 4, 100)
plt.figure(figsize=(10,4))
plt.plot(x_vec, f_num(x_vec), color = "cyan", label = f)
plt.plot(x_vec, df_num(x_vec), color = "green", label = df)
plt.plot(x_vec, d2f_num(x_vec), color = "red", label = d2f)
plt.scatter(0,0, color="black")
plt.scatter(3,3**4-4*3**3, color="orange")
plt.scatter(2,2**4-4*2**3, color="blue")
plt.grid()
plt.legend()
plt.xlabel('$x$', fontsize = 18)
plt.ylabel('$y$', fontsize = 18)
plt.show()
```
##### 3. Se va a fabricar una lata que ha de contaner 1L de aceite. Encuentre las dimensiones que debe de tener la lata de manera que minimicen el costo del metal para fabricarla.
- La solución debe de incluir los siguientes elementos
- Ecuación del sistema
- Números críticos
- Dibujar la lata
```python
sym.var("w")
sym.var("w", real = True)
#1000cm3=1lt=h*pi*r**2
h = 1000/(np.pi*(w**2))
area = 2*np.pi*w**2 +h*2*np.pi*w
derarea = sym.diff(area,w,1)
print("Derivada de area: ")
print(derarea)
w1c = sym.solve(derarea, w)
print("Valores de area´(w)=0 --> ")
print(w1c)
```
10.838764212436917
Derivada de area:
12.5663706143592*w - 2000.0/w**2
Valores de area´(w)=0 -->
[5.41926070139289]
```python
area_num = sym.lambdify([w], area, 'numpy')
derarea_num = sym.lambdify([w], derarea, 'numpy')
x_vec = np.linspace(2, 8, 100)
punto= (2*np.pi*5.4192**2) + (1000/(np.pi*(5.4192**2)))*2*np.pi*5.4192
plt.figure(figsize=(10,4))
plt.plot(x_vec, area_num(x_vec), color = "red", label = area)
plt.grid()
plt.scatter(5.4192,punto)
plt.legend()
plt.xlabel('$Radio$', fontsize = 18)
plt.ylabel('$Area Total$', fontsize = 18)
plt.show()
```
Para saber como minimizar el costo al hacer una lata, necesitamos encontrar una formula la cual nos diga cuanto metal usaremos, en este caso, necesitaremos el area de las tapas y el cilindro, por lo que podemos decir que:
$Area$ $total = 2π(r^2) + 2000/r$
Entonces, ahora que conocemos la funcion del area, necesitamos saber el punto donde el area es minima, manteniendo el volumen de 1 litro, con el teorema de Fermat, nos da que para optimizar y reducir los costos, el radio tendra que ser de 5.4192 cm y la altura de 10.8387 cm.
```python
```
|
If $f$ has an isolated singularity at $z$, then $-f$ has an isolated singularity at $z$. |
import data.int.basic
import data.rat.basic
import data.real.basic
import data.buffer.parser
import tactic.find
import analysis.special_functions.pow
import ODE_enclosures.zpow
-- https://isabelle.in.tum.de/website-Isabelle2013/dist/library/HOL/HOL-Library/Float.html
open parser zpow function
structure dyadic_rational := (m : ℤ) (e : ℤ)
local notation `𝔽` := dyadic_rational
namespace dyadic_rational
meta instance : has_to_format 𝔽 :=
⟨λ f, (to_fmt f.m) ++ (to_fmt " * 2^") ++ (to_fmt f.e)⟩
@[reducible] def to_rat (x : 𝔽) : ℚ :=
x.m * (2 ^ x.e)
@[reducible] noncomputable def to_real (x : 𝔽) : ℝ :=
x.m * (2 ^ x.e)
@[simp] lemma to_rat_mk {m e : ℤ} : to_rat ⟨m, e⟩ = m * (2 ^ e) := rfl
@[simp] lemma to_real_mk {m e : ℤ} : to_real ⟨m, e⟩ = m * (2 ^ e) := rfl
-- Basic operations.
@[reducible] def zero : 𝔽 := ⟨0, 0⟩
lemma zero_spec : to_rat zero = 0 := by simp
@[reducible] def one : 𝔽 := ⟨1, 0⟩
lemma one_spec : to_rat one = 1 := by simp
@[reducible] def align (x y : 𝔽) : ℤ × ℤ × ℤ :=
if x.e ≤ y.e then ⟨x.m, y.m * 2 ^ (y.e - x.e), x.e⟩ else ⟨x.m * 2 ^ (x.e - y.e), y.m, y.e⟩
@[simp] lemma align_le {x y : 𝔽} (h : x.e ≤ y.e)
: align x y = ⟨x.m, y.m * 2 ^ (y.e - x.e), x.e⟩ :=
by simp only [align]; split_ifs; refl
@[simp] lemma align_le.mx {x y : 𝔽} (h : x.e ≤ y.e)
: (align x y).1 = x.m :=
by simp [align_le h]
@[simp] lemma align_le.my {x y : 𝔽} (h : x.e ≤ y.e)
: (align x y).2.1 = y.m * 2 ^ (y.e - x.e) :=
by simp [align_le h]
@[simp] lemma align_le.e {x y : 𝔽} (h : x.e ≤ y.e)
: (align x y).2.2 = x.e :=
by simp [align_le h]
@[simp] lemma align_not_le {x y : 𝔽} (h : ¬ (x.e ≤ y.e))
: align x y = ⟨x.m * 2 ^ (x.e - y.e), y.m, y.e⟩ :=
by simp only [align]; split_ifs; refl
@[simp] lemma align_not_le.mx {x y : 𝔽} (h : ¬ (x.e ≤ y.e))
: (align x y).1 = x.m * 2 ^ (x.e - y.e) :=
by simp [align_not_le h]
@[simp] lemma align_not_le.my {x y : 𝔽} (h : ¬ (x.e ≤ y.e))
: (align x y).2.1 = y.m :=
by simp [align_not_le h]
@[simp] lemma align_not_le.e {x y : 𝔽} (h : ¬ (x.e ≤ y.e))
: (align x y).2.2 = y.e :=
by simp [align_not_le h]
-- TODO: Move
lemma zpow_rat_cast (x y : ℤ) (hy : 0 ≤ y) : ((zpow x y) : ℚ) = (x : ℚ) ^ (y : ℤ) :=
begin
simp only [zpow_eq_pow, zpow_def],
lift y to ℕ using hy,
rw [int.to_nat_coe_nat], norm_num,
end
lemma align_spec (x y : 𝔽) :
let a := align x y in
to_rat x = to_rat ⟨a.1, a.2.2⟩ ∧ to_rat y = to_rat ⟨a.2.1, a.2.2⟩ :=
begin
intros a,
have h2 : ((2 : ℤ) : ℚ) = (2 : ℚ) := by norm_num,
split; by_cases (x.e ≤ y.e); simp*;
try { erw [zpow_rat_cast _ _ (sub_nonneg.2 (le_of_not_le h))], };
try { erw [zpow_rat_cast _ _ (sub_nonneg.2 h)], };
erw [mul_assoc, h2, ←fpow_add];
simp; norm_num,
end
lemma align_semi_comm (x y : 𝔽)
: (align x y).1 = (align y x).2.1
∧ (align x y).2.1 = (align y x).1
∧ (align x y).2.2 = (align y x).2.2 :=
begin
cases (lt_trichotomy x.e y.e),
{ have h' := not_le_of_lt h, replace h := le_of_lt h,
simp [align_le h, align_not_le h'], },
{ cases h,
{ simp [align_le (le_of_eq h), align_le (ge_of_eq h), h], },
{ have h' := not_le_of_lt h, replace h := le_of_lt h,
simp [align_le h, align_not_le h'], }, }
end
-- Operations.
def neg (x : 𝔽) : 𝔽 :=
⟨-x.m, x.e⟩
@[simp] lemma neg.m (x : 𝔽) : (neg x).m = -x.m := by simp [neg]
@[simp] lemma meg.e (x : 𝔽) : (neg x).e = x.e := by simp [neg]
def add (x y : 𝔽) : 𝔽 :=
let ⟨mx, my, e⟩ := align x y in ⟨mx + my, e⟩
lemma add.def (x y : 𝔽) : add x y = ⟨(align x y).1 + (align x y).2.1, (align x y).2.2⟩ :=
begin
unfold add, by_cases (x.e ≤ y.e),
{ simp only [align_le.mx h, align_le.my h, align_le.e h],
unfold align, split_ifs, refl, },
{ simp only [align_not_le.mx h, align_not_le.my h, align_not_le.e h],
unfold align, split_ifs, refl, }
end
@[simp] lemma add.m (x y : 𝔽) : (add x y).m = (align x y).1 + (align x y).2.1 :=
by rw [add.def]; refl
@[simp] lemma add.e (x y : 𝔽) : (add x y).e = (align x y).2.2 :=
by rw [add.def]; refl
def mul (x y : 𝔽) : 𝔽 :=
⟨x.m * y.m, x.e + y.e⟩
@[simp] lemma mul.m (x y : 𝔽) : (mul x y).m = x.m * y.m := by simp [mul]
@[simp] lemma mul.e (x y : 𝔽) : (mul x y).e = x.e + y.e := by simp [mul]
-- Properties of to_rat.
lemma to_rat.neg {x y : 𝔽} (h : to_rat x = to_rat y) : to_rat (neg x) = to_rat (neg y) :=
begin
simp only [neg, to_rat_mk] at *,
iterate 2 { rw [int.cast_neg, ←neg_mul_eq_neg_mul], }, rw h,
end
lemma to_rat.add {x y x' y' : 𝔽} (h : to_rat x = to_rat y) (h' : to_rat x' = to_rat y')
: to_rat (add x x') = to_rat (add y y') :=
begin
have h2 : ((2 : ℤ) : ℚ) = (2 : ℚ) := by norm_num, -- TODO: I hate this.
simp only [to_rat_mk, add.m, add.e] at *,
by_cases (x.e ≤ x'.e); replace hx := h; clear h;
by_cases (y.e ≤ y'.e); replace hy := h; clear h;
try { simp only [align_le.mx hx, align_le.my hx, align_le.e hx], };
try { simp only [align_le.mx hy, align_le.my hy, align_le.e hy], };
try { simp only [align_not_le.mx hx, align_not_le.my hx, align_not_le.e hx], };
try { simp only [align_not_le.mx hy, align_not_le.my hy, align_not_le.e hy], };
push_cast; rw [add_mul, add_mul],
{ rw [h, mul_assoc, mul_assoc],
erw [zpow_rat_cast _ _ (sub_nonneg.2 hx), h2, ←fpow_add],
erw [zpow_rat_cast _ _ (sub_nonneg.2 hy), h2, ←fpow_add],
simp, rw [h'], norm_num, norm_num, },
{ rw [h, mul_assoc, mul_assoc],
erw [zpow_rat_cast _ _ (sub_nonneg.2 hx), h2, ←fpow_add],
erw [zpow_rat_cast _ _ (sub_nonneg.2 (le_of_not_le hy)), h2, ←fpow_add],
simp, rw [←h'], norm_num, norm_num, },
{ rw [h', mul_assoc, mul_assoc],
erw [zpow_rat_cast _ _ (sub_nonneg.2 (le_of_not_le hx)), h2, ←fpow_add],
erw [zpow_rat_cast _ _ (sub_nonneg.2 hy), h2, ←fpow_add],
simp, rw [h], norm_num, norm_num, },
{ rw [h', mul_assoc, mul_assoc],
erw [zpow_rat_cast _ _ (sub_nonneg.2 (le_of_not_le hx)), h2, ←fpow_add],
erw [zpow_rat_cast _ _ (sub_nonneg.2 (le_of_not_le hy)), h2, ←fpow_add],
simp, rw [←h], norm_num, norm_num, }
end
lemma to_rat.mul {x y x' y' : 𝔽} (h : to_rat x = to_rat y) (h' : to_rat x' = to_rat y')
: to_rat (mul x x') = to_rat (mul y y') :=
begin
simp only [to_rat_mk, mul.m, mul.e] at *,
simp only [fpow_add (by norm_num : (2 : ℚ) ≠ 0)], push_cast,
calc ↑(x.m) * ↑(x'.m) * ((2 : ℚ) ^ x.e * (2 : ℚ) ^ x'.e)
= (↑(x.m) * 2 ^ x.e) * (↑(x'.m) * 2 ^ x'.e) : by ring
... = (↑(y.m) * 2 ^ y.e) * (↑(y'.m) * 2 ^ y'.e) : by rw [h, h']
... = ↑(y.m) * ↑(y'.m) * (2 ^ y.e * 2 ^ y'.e) : by ring
end
-- 𝔽 is not a ring but 𝔽/R where R(x,y) iff to_rat x = to_rat y is a ring.
@[reducible] private def R : 𝔽 → 𝔽 → Prop := λ x y, to_rat x = to_rat y
private lemma R.reflexive : reflexive R := λ x, by unfold R; exact eq.refl
private lemma R.symmetric : symmetric R := λ x y, by unfold R; exact eq.symm
private lemma R.transitive : transitive R := λ x y z, by unfold R; exact eq.trans
private lemma R.equivalence : equivalence R := ⟨R.reflexive, R.symmetric, R.transitive⟩
instance dyadic_rational.setoid : setoid 𝔽 := ⟨R, R.equivalence⟩
def 𝔽R := quotient dyadic_rational.setoid
instance : comm_ring 𝔽R := {
zero := ⟦zero⟧,
one := ⟦one⟧,
neg := quotient.lift (λ x, ⟦neg x⟧) (λ a b h, quotient.sound $ to_rat.neg h),
add := quotient.lift₂ (λ x y, ⟦add x y⟧) (λ a₁ a₂ b₁ b₂ h₁ h₂, quotient.sound $ to_rat.add h₁ h₂),
mul := quotient.lift₂ (λ x y, ⟦mul x y⟧) (λ a₁ a₂ b₁ b₂ h₁ h₂, quotient.sound $ to_rat.mul h₁ h₂),
zero_add := λ x, begin
have h2 : ((2 : ℤ) : ℚ) = (2 : ℚ) := by norm_num,
apply quotient.induction_on x, intros a, apply quotient.sound,
simp only [add.def], show to_rat _ = to_rat a,
by_cases (zero.e ≤ a.e),
{ simp only [align_le.mx h, align_le.my h, align_le.e h, to_rat_mk],
push_cast, erw [zpow_rat_cast _ _ (sub_nonneg.2 h), h2], simp, },
{ simp only [align_not_le.mx h, align_not_le.my h, align_not_le.e h, to_rat_mk],
push_cast, erw [zpow_rat_cast _ _ (sub_nonneg.2 (le_of_not_le h)), h2], simp, }
end,
add_zero := λ x, begin
have h2 : ((2 : ℤ) : ℚ) = (2 : ℚ) := by norm_num,
apply quotient.induction_on x, intros a, apply quotient.sound,
simp only [add.def], show to_rat _ = to_rat a,
by_cases (a.e ≤ zero.e),
{ simp only [align_le.mx h, align_le.my h, align_le.e h, to_rat_mk],
push_cast, erw [zpow_rat_cast _ _ (sub_nonneg.2 h), h2], simp, },
{ simp only [align_not_le.mx h, align_not_le.my h, align_not_le.e h, to_rat_mk],
push_cast, erw [zpow_rat_cast _ _ (sub_nonneg.2 (le_of_not_le h)), h2], simp, }
end,
add_left_neg := λ x, begin
have h2 : ((2 : ℤ) : ℚ) = (2 : ℚ) := by norm_num,
apply quotient.induction_on x, intros a, apply quotient.sound,
simp only [add.def], show to_rat _ = to_rat _,
by_cases ((neg a).e ≤ a.e),
{ simp only [align_le.mx h, align_le.my h, align_le.e h, to_rat_mk],
push_cast, erw [zpow_rat_cast _ _ (sub_nonneg.2 h), h2], simp*, },
{ simp only [align_not_le.mx h, align_not_le.my h, align_not_le.e h, to_rat_mk],
push_cast, erw [zpow_rat_cast _ _ (sub_nonneg.2 (le_of_not_le h)), h2], simp*, }
end,
add_comm := λ x y, begin
apply quotient.induction_on₂ x y, intros a b, apply quotient.sound,
simp only [add.def], show to_rat _ = to_rat _,
simp_rw [align_semi_comm a b, add_comm],
end,
add_assoc := λ x y z, begin
apply quotient.induction_on₃ x y z, intros a b c, apply quotient.sound,
simp only [add.def], show to_rat _ = to_rat _, dsimp, sorry,
end,
one_mul := sorry,
mul_one := sorry,
mul_comm := sorry,
mul_assoc := sorry,
left_distrib := sorry,
right_distrib := sorry,
}
end dyadic_rational
|
lemma closed_limpt: "closed S \<longleftrightarrow> (\<forall>x. x islimpt S \<longrightarrow> x \<in> S)" |
# -*- coding: utf-8 -*-
"""
Created on Sun Nov 7 16:50:41 2021
@author: sammm
"""
import numpy as np
from source import rotation
from source import spacecraft
class Formation():
def __init__(self, sC, sD):
'''Initialise a formation object, defined by a chief and a deputy.
Parameters
----------
sC : Spacecraft()
Chief spacecraft, as an instance of the Spacecraft() class from
the `spacecraft.py` module.
sD : Spacecraft()
Deputy spacecraft, as an instance of the Spacecraft() class from
the `spacecraft.py` module.
'''
self.sC = sC # Chief
self.sD = sD # Deputy
self.update_ROE( sC, sD )
self.update_RTN( sC, sD )
# except AttributeError:
# print("AttributeError: Check if constructor has 02x Spacecraft()?")
# except TypeError:
# print("TypeError: Inputs must be instances of Spacecraft()!")
# except:
# print("Unknown error occurred. Printing constructor args:")
# print(sC, sD)
# Convert Keplerian orbit elements to relative orbit elements exactly.
def update_ROE(self, sC, sD):
self.sC = sC # Chief
self.sD = sD # Deputy
self.da = (self.sD.a - self.sC.a) / self.sC.a
self.dL = (self.sD.M - self.sC.M) + (self.sD.w - self.sC.w)
self.dL = (self.sD.R - self.sC.R) * np.cos(self.sC.i) + self.dL
self.ix = (self.sD.i - self.sC.i)
self.iy = (self.sD.R - self.sC.R) * np.sin(self.sC.i)
self.ex = (self.sD.e * np.cos(self.sD.w))
self.ex = (self.ex - (self.sC.e * np.cos(self.sC.w)))
self.ey = (self.sD.e * np.sin(self.sD.w))
self.ey = (self.ey - (self.sC.e * np.sin(self.sC.w)))
# Convert inertial frame coordinates to RTN frame coordinates exactly.
def update_RTN(self, sC, sD):
self.sC = sC # Chief
self.sD = sD # Deputy
pC = np.array([ self.sC.px, self.sC.py, self.sC.pz ])
pD = np.array([ self.sD.px, self.sD.py, self.sD.pz ])
self.compute_hill_dcm() # Updates self.hill_dcm
self.RTN = self.hill_dcm @ ( pC - pD )
return self.RTN
# Compute the Hill-Frame
def compute_hill_dcm(self):
pC = [ self.sC.px, self.sC.py, self.sC.pz ] # Position of chief
vC = [ self.sC.vx, self.sC.vy, self.sC.vz ] # Velocity of chief
hC = np.cross(pC, vC) # Angular momentum vector
r_hat = pC / np.linalg.norm(pC) # Local X-axis
h_hat = hC / np.linalg.norm(hC) # Local Z-axis
y_hat = np.cross(h_hat, r_hat) # Local Y-axis
self.hill_dcm = np.array([r_hat, y_hat, h_hat])
return self.hill_dcm
|
function [mapStruct] = modifyReactionsMetabolites(map, rxnList, metList, newColor, newAreaWidth)
% Modifies the color and areaWidth of reactions from a given list as input
% and the color of the corresponding metabolites from a given list as
% input. The colors and areaWidth are given as inputs and only metabolites
% present in the given reactions list will be colored.
%
% USAGE:
%
% [mapStruct] = modifyReactionsMetabolites(map, rxnList, metList, newColor, newAreaWidth)
%
% INPUTS:
% map: Matlab structure of the map obtained from the
% function "transformXML2Map".
% rxnList: List of reaction names as a string array
% metList: List of metabolite names as a string array
% newColor: Color chosen for reaction lines and metabolites
% given as a string with the corresponding real name.
% Possible names can be found in the function
% "createColorsMap.m".
% newAreaWidth: Width size for the reaction lines. Can be given as
% a string or a double.
%
% OUTPUT:
% mapStruct: Updated map structure with the changed areaWidth and
% color of the reactions and their corresponding
% metabolites.
%
% .. Author: - N.Sompairac - Institut Curie, Paris, 25/07/2017
colors = createColorsMap; % Create a Color map with corresponding colors names and their HTML code
mapStruct = map;
% Get the indexes of the needed reactions to color
rxnIndexList = find(ismember(mapStruct.rxnName, rxnList));
% Initialise a list that will contain aliases of molecules implicated in
% all the needed reactions
rxnAliasList = {};
% Loop over reactions and change the corresponding values
for rxn = rxnIndexList'
% Change de color of the reaction
mapStruct.rxnColor{rxn} = colors(newColor);
% Change de areaWidth of the reaction
mapStruct.rxnWidth{rxn} = newAreaWidth;
% Get the list of aliases involved in this reaction
% Loop over base reactants
for x = 1:length(mapStruct.rxnBaseReactantID{rxn})
rxnAliasList = [rxnAliasList, mapStruct.rxnBaseReactantAlias{rxn}{x}];
end
% Loop over reactants
for x = 1:length(mapStruct.rxnReactantID{rxn})
rxnAliasList = [rxnAliasList, mapStruct.rxnReactantAlias{rxn}{x}];
end
% Loop over base products
for x = 1:length(mapStruct.rxnBaseProductID{rxn})
rxnAliasList = [rxnAliasList, mapStruct.rxnBaseProductAlias{rxn}{x}];
end
% Loop over products
for x = 1:length(mapStruct.rxnProductID{rxn})
rxnAliasList = [rxnAliasList, mapStruct.rxnProductAlias{rxn}{x}];
end
end
% Get the indexes of the needed molecules to color
% Get the corresponding IDs of the species based on their Names
specIdList = mapStruct.specID(ismember(mapStruct.specName, metList));
% Get the correspoding Aliases of the molecules based on the species IDs
mapAliasList = mapStruct.molAlias(ismember(mapStruct.molID, specIdList));
% Get only the Aliases of the molecules implicated in the needed reactions
neededAliasList = mapAliasList(ismember(mapAliasList, rxnAliasList));
% Get the corresponding Indexes of the molecules in the needed reactions
molIndexList = find(ismember(mapStruct.molAlias, neededAliasList));
% Loop over molecules and change the color
for x = molIndexList'
mapStruct.molColor{x} = colors(newColor);
end
end
|
If $f$ is Lipschitz on $S$, then $f$ is Lipschitz on any subset of $S$. |
module Idris.Elab.Interface
import Core.Binary
import Core.Context
import Core.Context.Log
import Core.Core
import Core.Env
import Core.Metadata
import Core.TT
import Core.Unify
import Core.Value
import Idris.Resugar
import Idris.Syntax
import TTImp.BindImplicits
import TTImp.ProcessDecls
import TTImp.Elab
import TTImp.Elab.Check
import TTImp.Unelab
import TTImp.TTImp
import TTImp.Utils
import Data.ANameMap
import Data.List
import Data.Maybe
%default covering
-- TODO: Check all the parts of the body are legal
-- TODO: Deal with default superclass implementations
mkDataTy : FC -> List (Name, RawImp) -> RawImp
mkDataTy fc [] = IType fc
mkDataTy fc ((n, ty) :: ps)
= IPi fc top Explicit (Just n) ty (mkDataTy fc ps)
mkIfaceData : {vars : _} ->
{auto c : Ref Ctxt Defs} ->
FC -> Visibility -> Env Term vars ->
List (Maybe Name, RigCount, RawImp) ->
Name -> Name -> List (Name, RawImp) ->
List Name -> List (Name, RigCount, RawImp) -> Core ImpDecl
mkIfaceData {vars} fc vis env constraints n conName ps dets meths
= let opts = if isNil dets
then [NoHints, UniqueSearch]
else [NoHints, UniqueSearch, SearchBy dets]
retty = apply (IVar fc n) (map (IVar fc) (map fst ps))
conty = mkTy Implicit (map jname ps) $
mkTy Explicit (map bhere constraints ++ map bname meths) retty
con = MkImpTy fc conName !(bindTypeNames [] (map fst ps ++ map fst meths ++ vars) conty) in
pure $ IData fc vis (MkImpData fc n
!(bindTypeNames [] (map fst ps ++ map fst meths ++ vars)
(mkDataTy fc ps))
opts [con])
where
jname : (Name, RawImp) -> (Maybe Name, RigCount, RawImp)
jname (n, t) = (Just n, erased, t)
bname : (Name, RigCount, RawImp) -> (Maybe Name, RigCount, RawImp)
bname (n, c, t) = (Just n, c, IBindHere (getFC t) (PI erased) t)
bhere : (Maybe Name, RigCount, RawImp) -> (Maybe Name, RigCount, RawImp)
bhere (n, c, t) = (n, c, IBindHere (getFC t) (PI erased) t)
mkTy : PiInfo RawImp ->
List (Maybe Name, RigCount, RawImp) -> RawImp -> RawImp
mkTy imp [] ret = ret
mkTy imp ((n, c, argty) :: args) ret
= IPi fc c imp n argty (mkTy imp args ret)
-- Give implicit Pi bindings explicit names, if they don't have one already,
-- because we need them to be consistent everywhere we refer to them
namePis : Int -> RawImp -> RawImp
namePis i (IPi fc r AutoImplicit Nothing ty sc)
= IPi fc r AutoImplicit (Just (MN "i_con" i)) ty (namePis (i + 1) sc)
namePis i (IPi fc r Implicit Nothing ty sc)
= IPi fc r Implicit (Just (MN "i_imp" i)) ty (namePis (i + 1) sc)
namePis i (IPi fc r p n ty sc)
= IPi fc r p n ty (namePis i sc)
namePis i (IBindHere fc m ty) = IBindHere fc m (namePis i ty)
namePis i ty = ty
-- Get the implicit arguments for a method declaration or constraint hint
-- to allow us to build the data declaration
getMethDecl : {vars : _} ->
{auto c : Ref Ctxt Defs} ->
Env Term vars -> NestedNames vars ->
(params : List (Name, RawImp)) ->
(mnames : List Name) ->
(FC, RigCount, List FnOpt, n, (Bool, RawImp)) ->
Core (n, RigCount, RawImp)
getMethDecl {vars} env nest params mnames (fc, c, opts, n, (d, ty))
= do ty_imp <- bindTypeNames [] (map fst params ++ mnames ++ vars) ty
pure (n, c, stripParams (map fst params) ty_imp)
where
-- We don't want the parameters to explicitly appear in the method
-- type in the record for the interface (they are parameters of the
-- interface type), so remove it here
stripParams : List Name -> RawImp -> RawImp
stripParams ps (IPi fc r p mn arg ret)
= if (maybe False (\n => n `elem` ps) mn)
then stripParams ps ret
else IPi fc r p mn arg (stripParams ps ret)
stripParams ps ty = ty
-- bind the auto implicit for the interface - put it after all the other
-- implicits
bindIFace : FC -> RawImp -> RawImp -> RawImp
bindIFace _ ity (IPi fc rig Implicit n ty sc)
= IPi fc rig Implicit n ty (bindIFace fc ity sc)
bindIFace _ ity (IPi fc rig AutoImplicit n ty sc)
= IPi fc rig AutoImplicit n ty (bindIFace fc ity sc)
bindIFace fc ity sc = IPi fc top AutoImplicit (Just (UN "__con")) ity sc
-- Get the top level function for implementing a method
getMethToplevel : {vars : _} ->
{auto c : Ref Ctxt Defs} ->
Env Term vars -> Visibility ->
Name -> Name ->
(constraints : List (Maybe Name)) ->
(allmeths : List Name) ->
(params : List (Name, RawImp)) ->
(FC, RigCount, List FnOpt, Name, (Bool, RawImp)) ->
Core (List ImpDecl)
getMethToplevel {vars} env vis iname cname constraints allmeths params
(fc, c, opts, n, (d, ty))
= do let ity = apply (IVar fc iname) (map (IVar fc) (map fst params))
-- Make the constraint application explicit for any method names
-- which appear in other method types
let ty_constr =
bindPs params $ substNames vars (map applyCon allmeths) ty
ty_imp <- bindTypeNames [] vars (bindIFace fc ity ty_constr)
cn <- inCurrentNS n
let tydecl = IClaim fc c vis (if d then [Inline, Invertible]
else [Inline])
(MkImpTy fc cn ty_imp)
let conapp = apply (IVar fc cname)
(map (const (Implicit fc True)) constraints ++
map (IBindVar fc) (map bindName allmeths))
let argns = getExplicitArgs 0 ty
-- eta expand the RHS so that we put implicits in the right place
let fnclause = PatClause fc (IImplicitApp fc (IVar fc cn)
(Just (UN "__con"))
conapp)
(mkLam argns
(apply (IVar fc (methName n))
(map (IVar fc) argns)))
let fndef = IDef fc cn [fnclause]
pure [tydecl, fndef]
where
-- Bind the type parameters given explicitly - there might be information
-- in there that we can't infer after all
bindPs : List (Name, RawImp) -> RawImp -> RawImp
bindPs [] ty = ty
bindPs ((n, pty) :: ps) ty
= IPi (getFC pty) erased Implicit (Just n) pty (bindPs ps ty)
applyCon : Name -> (Name, RawImp)
applyCon n = (n, IImplicitApp fc (IVar fc n)
(Just (UN "__con")) (IVar fc (UN "__con")))
getExplicitArgs : Int -> RawImp -> List Name
getExplicitArgs i (IPi _ _ Explicit n _ sc)
= MN "arg" i :: getExplicitArgs (i + 1) sc
getExplicitArgs i (IPi _ _ _ n _ sc) = getExplicitArgs i sc
getExplicitArgs i tm = []
mkLam : List Name -> RawImp -> RawImp
mkLam [] tm = tm
mkLam (x :: xs) tm
= ILam fc top Explicit (Just x) (Implicit fc False) (mkLam xs tm)
bindName : Name -> String
bindName (UN n) = "__bind_" ++ n
bindName (NS _ n) = bindName n
bindName n = show n
methName : Name -> Name
methName n = UN (bindName n)
-- Get the function for chasing a constraint. This is one of the
-- arguments to the record, appearing before the method arguments.
getConstraintHint : {vars : _} ->
{auto c : Ref Ctxt Defs} ->
FC -> Env Term vars -> Visibility ->
Name -> Name ->
(constraints : List Name) ->
(allmeths : List Name) ->
(params : List Name) ->
(Name, RawImp) -> Core (Name, List ImpDecl)
getConstraintHint {vars} fc env vis iname cname constraints meths params (cn, con)
= do let ity = apply (IVar fc iname) (map (IVar fc) params)
let fty = IPi fc top Explicit Nothing ity con
ty_imp <- bindTypeNames [] (meths ++ vars) fty
let hintname = DN ("Constraint " ++ show con)
(UN ("__" ++ show iname ++ "_" ++ show con))
let tydecl = IClaim fc top vis [Inline, Hint False]
(MkImpTy fc hintname ty_imp)
let conapp = apply (IVar fc cname)
(map (IBindVar fc) (map bindName constraints) ++
map (const (Implicit fc True)) meths)
let fnclause = PatClause fc (IApp fc (IVar fc hintname) conapp)
(IVar fc (constName cn))
let fndef = IDef fc hintname [fnclause]
pure (hintname, [tydecl, fndef])
where
bindName : Name -> String
bindName (UN n) = "__bind_" ++ n
bindName (NS _ n) = bindName n
bindName n = show n
constName : Name -> Name
constName n = UN (bindName n)
getSig : ImpDecl -> Maybe (FC, RigCount, List FnOpt, Name, (Bool, RawImp))
getSig (IClaim _ c _ opts (MkImpTy fc n ty))
= Just (fc, c, opts, n, (False, namePis 0 ty))
getSig (IData _ _ (MkImpLater fc n ty))
= Just (fc, erased, [Invertible], n, (True, namePis 0 ty))
getSig _ = Nothing
getDefault : ImpDecl -> Maybe (FC, List FnOpt, Name, List ImpClause)
getDefault (IDef fc n cs) = Just (fc, [], n, cs)
getDefault _ = Nothing
mkCon : FC -> Name -> Name
mkCon loc (NS ns (UN n))
= NS ns (DN (n ++ " at " ++ show loc) (UN ("__mk" ++ n)))
mkCon loc n
= DN (show n ++ " at " ++ show loc) (UN ("__mk" ++ show n))
updateIfaceSyn : {auto c : Ref Ctxt Defs} ->
{auto s : Ref Syn SyntaxInfo} ->
Name -> Name -> List Name -> List Name -> List RawImp ->
List (Name, RigCount, List FnOpt, (Bool, RawImp)) -> List (Name, List ImpClause) ->
Core ()
updateIfaceSyn iname cn impps ps cs ms ds
= do syn <- get Syn
ms' <- traverse totMeth ms
let info = MkIFaceInfo cn impps ps cs ms' ds
put Syn (record { ifaces $= addName iname info,
saveIFaces $= (iname :: ) } syn)
where
findSetTotal : List FnOpt -> Maybe TotalReq
findSetTotal [] = Nothing
findSetTotal (Totality t :: _) = Just t
findSetTotal (_ :: xs) = findSetTotal xs
totMeth : (Name, RigCount, List FnOpt, (Bool, RawImp)) ->
Core (Name, RigCount, Maybe TotalReq, (Bool, RawImp))
totMeth (n, c, opts, t)
= do let treq = findSetTotal opts
pure (n, c, treq, t)
-- Read the implicitly added parameters from an interface type, so that we
-- know to substitute an implicit in when defining the implementation
getImplParams : Term vars -> List Name
getImplParams (Bind _ n (Pi _ _ Implicit _) sc)
= n :: getImplParams sc
getImplParams _ = []
export
elabInterface : {vars : _} ->
{auto c : Ref Ctxt Defs} ->
{auto u : Ref UST UState} ->
{auto s : Ref Syn SyntaxInfo} ->
{auto m : Ref MD Metadata} ->
FC -> Visibility ->
Env Term vars -> NestedNames vars ->
(constraints : List (Maybe Name, RawImp)) ->
Name ->
(params : List (Name, RawImp)) ->
(dets : List Name) ->
(conName : Maybe Name) ->
List ImpDecl ->
Core ()
elabInterface {vars} fc vis env nest constraints iname params dets mcon body
= do fullIName <- getFullName iname
ns_iname <- inCurrentNS fullIName
let conName_in = maybe (mkCon fc fullIName) id mcon
-- Machine generated names need to be qualified when looking them up
conName <- inCurrentNS conName_in
let meth_sigs = mapMaybe getSig body -- (FC, RigCount, List FnOpt, Name, (Bool, RawImp))
let meth_decls = map (\ (f, c, o, n, b, ty) => (n, c, o, b, ty)) meth_sigs
let meth_names = map fst meth_decls
let defaults = mapMaybe getDefault body
elabAsData conName meth_names meth_sigs
elabConstraintHints conName meth_names
elabMethods conName meth_names meth_sigs
ds <- traverse (elabDefault meth_decls) defaults
ns_meths <- traverse (\mt => do n <- inCurrentNS (fst mt)
pure (n, snd mt)) meth_decls
defs <- get Ctxt
Just ty <- lookupTyExact ns_iname (gamma defs)
| Nothing => throw (UndefinedName fc iname)
let implParams = getImplParams ty
updateIfaceSyn ns_iname conName
implParams (map fst params) (map snd constraints)
ns_meths ds
where
nameCons : Int -> List (Maybe Name, RawImp) -> List (Name, RawImp)
nameCons i [] = []
nameCons i ((_, ty) :: rest)
= (UN ("__con" ++ show i), ty) :: nameCons (i + 1) rest
-- Elaborate the data declaration part of the interface
elabAsData : (conName : Name) -> List Name ->
List (FC, RigCount, List FnOpt, Name, (Bool, RawImp)) ->
Core ()
elabAsData conName meth_names meth_sigs
= do -- set up the implicit arguments correctly in the method
-- signatures and constraint hints
meths <- traverse (getMethDecl env nest params meth_names) meth_sigs
log "elab.interface" 5 $ "Method declarations: " ++ show meths
consts <- traverse (getMethDecl env nest params meth_names)
(map (\c => (fc, linear, [], c))
(map notData constraints))
log "elab.interface" 5 $ "Constraints: " ++ show consts
dt <- mkIfaceData fc vis env consts iname conName params
dets meths
log "elab.interface" 10 $ "Methods: " ++ show meths
log "elab.interface" 5 $ "Making interface data type " ++ show dt
processDecls nest env [dt]
pure ()
where
notData : (n, t) -> (n, (Bool, t))
notData (x, y) = (x, (False, y))
elabMethods : (conName : Name) -> List Name ->
List (FC, RigCount, List FnOpt, Name, (Bool, RawImp)) ->
Core ()
elabMethods conName meth_names meth_sigs
= do -- Methods have same visibility as data declaration
fnsm <- traverse (getMethToplevel env vis iname conName
(map fst constraints)
meth_names
params) meth_sigs
let fns = concat fnsm
log "elab.interface" 5 $ "Top level methods: " ++ show fns
traverse (processDecl [] nest env) fns
traverse_ (\n => do mn <- inCurrentNS n
setFlag fc mn Inline
setFlag fc mn TCInline
setFlag fc mn Overloadable) meth_names
-- Check that a default definition is correct. We just discard it here once
-- we know it's okay, since we'll need to re-elaborate it for each
-- instance, to specialise it
elabDefault : List (Name, RigCount, List FnOpt, Bool, RawImp) ->
(FC, List FnOpt, Name, List ImpClause) ->
Core (Name, List ImpClause)
elabDefault tydecls (fc, opts, n, cs)
= do -- orig <- branch
let dn_in = MN ("Default implementation of " ++ show n) 0
dn <- inCurrentNS dn_in
(rig, dty) <-
the (Core (RigCount, RawImp)) $
case lookup n tydecls of
Just (r, _, _, t) => pure (r, t)
Nothing => throw (GenericMsg fc ("No method named " ++ show n ++ " in interface " ++ show iname))
let ity = apply (IVar fc iname) (map (IVar fc) (map fst params))
-- Substitute the method names with their top level function
-- name, so they don't get implicitly bound in the name
methNameMap <- traverse (\n =>
do cn <- inCurrentNS n
pure (n, applyParams (IVar fc cn)
(map fst params)))
(map fst tydecls)
let dty = substNames vars methNameMap dty
dty_imp <- bindTypeNames [] (map fst tydecls ++ vars)
(bindIFace fc ity dty)
log "elab.interface" 5 $ "Default method " ++ show dn ++ " : " ++ show dty_imp
let dtydecl = IClaim fc rig vis [] (MkImpTy fc dn dty_imp)
processDecl [] nest env dtydecl
let cs' = map (changeName dn) cs
log "elab.interface" 5 $ "Default method body " ++ show cs'
processDecl [] nest env (IDef fc dn cs')
-- Reset the original context, we don't need to keep the definition
-- Actually we do for the metadata and name map!
-- put Ctxt orig
pure (n, cs)
where
applyParams : RawImp -> List Name -> RawImp
applyParams tm [] = tm
applyParams tm (UN n :: ns)
= applyParams (IImplicitApp fc tm (Just (UN n)) (IBindVar fc n)) ns
applyParams tm (_ :: ns) = applyParams tm ns
changeNameTerm : Name -> RawImp -> RawImp
changeNameTerm dn (IVar fc n')
= if n == n' then IVar fc dn else IVar fc n'
changeNameTerm dn (IApp fc f arg)
= IApp fc (changeNameTerm dn f) arg
changeNameTerm dn (IImplicitApp fc f x arg)
= IImplicitApp fc (changeNameTerm dn f) x arg
changeNameTerm dn tm = tm
changeName : Name -> ImpClause -> ImpClause
changeName dn (PatClause fc lhs rhs)
= PatClause fc (changeNameTerm dn lhs) rhs
changeName dn (WithClause fc lhs wval flags cs)
= WithClause fc (changeNameTerm dn lhs) wval
flags (map (changeName dn) cs)
changeName dn (ImpossibleClause fc lhs)
= ImpossibleClause fc (changeNameTerm dn lhs)
elabConstraintHints : (conName : Name) -> List Name ->
Core ()
elabConstraintHints conName meth_names
= do let nconstraints = nameCons 0 constraints
chints <- traverse (getConstraintHint fc env vis iname conName
(map fst nconstraints)
meth_names
(map fst params)) nconstraints
log "elab.interface" 5 $ "Constraint hints from " ++ show constraints ++ ": " ++ show chints
traverse (processDecl [] nest env) (concatMap snd chints)
traverse_ (\n => do mn <- inCurrentNS n
setFlag fc mn TCInline) (map fst chints)
|
/-
Copyright (c) 2019 Seul Baek. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Author: Seul Baek
-/
import Mathlib.PrePort
import Mathlib.Lean3Lib.init.default
import Mathlib.tactic.omega.clause
import Mathlib.PostPort
universes l
namespace Mathlib
/-
Correctness lemmas for equality elimination.
See 5.5 of <http://www.decision-procedures.org/> for details.
-/
namespace omega
def symdiv (i : ℤ) (j : ℤ) : ℤ := ite (bit0 1 * (i % j) < j) (i / j) (i / j + 1)
def symmod (i : ℤ) (j : ℤ) : ℤ := ite (bit0 1 * (i % j) < j) (i % j) (i % j - j)
theorem symmod_add_one_self {i : ℤ} : 0 < i → symmod i (i + 1) = -1 := sorry
theorem mul_symdiv_eq {i : ℤ} {j : ℤ} : j * symdiv i j = i - symmod i j := sorry
theorem symmod_eq {i : ℤ} {j : ℤ} : symmod i j = i - j * symdiv i j :=
eq.mpr (id (Eq._oldrec (Eq.refl (symmod i j = i - j * symdiv i j)) mul_symdiv_eq))
(eq.mpr
(id
(Eq._oldrec (Eq.refl (symmod i j = i - (i - symmod i j))) (sub_sub_cancel i (symmod i j))))
(Eq.refl (symmod i j)))
/-- (sgm v b as n) is the new value assigned to the nth variable
after a single step of equality elimination using valuation v,
term ⟨b, as⟩, and variable index n. If v satisfies the initial
constraint set, then (v ⟨n ↦ sgm v b as n⟩) satisfies the new
constraint set after equality elimination. -/
def sgm (v : ℕ → ℤ) (b : ℤ) (as : List ℤ) (n : ℕ) : ℤ :=
let a_n : ℤ := list.func.get n as;
let m : ℤ := a_n + 1;
(symmod b m + coeffs.val v (list.map (fun (x : ℤ) => symmod x m) as)) / m
def rhs : ℕ → ℤ → List ℤ → term := sorry
theorem rhs_correct_aux {v : ℕ → ℤ} {m : ℤ} {as : List ℤ} {k : ℕ} :
∃ (d : ℤ),
m * d + coeffs.val_between v (list.map (fun (x : ℤ) => symmod x m) as) 0 k =
coeffs.val_between v as 0 k :=
sorry
theorem rhs_correct {v : ℕ → ℤ} {b : ℤ} {as : List ℤ} (n : ℕ) :
0 < list.func.get n as →
0 = term.val v (b, as) → v n = term.val (update n (sgm v b as n) v) (rhs n b as) :=
sorry
def sym_sym (m : ℤ) (b : ℤ) : ℤ := symdiv b m + symmod b m
def coeffs_reduce : ℕ → ℤ → List ℤ → term := sorry
theorem coeffs_reduce_correct {v : ℕ → ℤ} {b : ℤ} {as : List ℤ} {n : ℕ} :
0 < list.func.get n as →
0 = term.val v (b, as) → 0 = term.val (update n (sgm v b as n) v) (coeffs_reduce n b as) :=
sorry
-- Requires : t1.coeffs[m] = 1
def cancel (m : ℕ) (t1 : term) (t2 : term) : term :=
term.add (term.mul (-list.func.get m (prod.snd t2)) t1) t2
def subst (n : ℕ) (t1 : term) (t2 : term) : term :=
term.add (term.mul (list.func.get n (prod.snd t2)) t1)
(prod.fst t2, list.func.set 0 (prod.snd t2) n)
theorem subst_correct {v : ℕ → ℤ} {b : ℤ} {as : List ℤ} {t : term} {n : ℕ} :
0 < list.func.get n as →
0 = term.val v (b, as) →
term.val v t = term.val (update n (sgm v b as n) v) (subst n (rhs n b as) t) :=
sorry
/-- The type of equality elimination rules. -/
inductive ee where
| drop : ee
| nondiv : ℤ → ee
| factor : ℤ → ee
| neg : ee
| reduce : ℕ → ee
| cancel : ℕ → ee
namespace ee
def repr : ee → string := sorry
protected instance has_repr : has_repr ee := has_repr.mk repr
end ee
/-- Apply a given sequence of equality elimination steps to a clause. -/
def eq_elim : List ee → clause → clause := sorry
theorem sat_empty : clause.sat ([], []) :=
Exists.intro (fun (_x : ℕ) => 0) { left := of_as_true trivial, right := of_as_true trivial }
theorem sat_eq_elim {es : List ee} {c : clause} : clause.sat c → clause.sat (eq_elim es c) := sorry
/-- If the result of equality elimination is unsatisfiable, the original clause is unsatisfiable. -/
theorem unsat_of_unsat_eq_elim (ee : List ee) (c : clause) :
clause.unsat (eq_elim ee c) → clause.unsat c :=
fun (h1 : clause.unsat (eq_elim ee c)) => id fun (h2 : clause.sat c) => h1 (sat_eq_elim h2)
end Mathlib |
import numpy as np
from math import modf
import datetime as dt
import calendar
def cal2jd(yr,mn,dy) :
"""
CAL2JD Converts calendar date to Julian date using algorithm
from "Practical Ephemeris Calculations" by Oliver Montenbruck
(Springer-Verlag, 1989). Uses astronomical year for B.C. dates
(2 BC = -1 yr).
Input:
yr : YYYY (int)
mn : MM 01 to 12 (int)
day : DD 01 to 31 (int)
Output:
jd : julian date (float)
"""
if mn > 2:
y = yr
m = mn
else:
y = yr - 1
m = mn + 12
date1=4.5+31.*(10.+12.*1582.) # Last day of Julian calendar (1582.10.04 Noon)
date2=15.5+31.*(10.+12.*1582.) # First day of Gregorian calendar (1582.10.15 Noon)
date=dy+31.*(mn+12.*yr)
if date <= date1:
b = -2
elif date >= date2 :
b = np.fix(y/400.) - np.fix(y/100.)
else:
#warning('Dates between October 5 & 15, 1582 do not exist');
return
if y > 0:
jd = np.fix(365.25*y) + np.fix(30.6001*(m+1)) + b + 1720996.5 + dy
else:
jd = np.fix(365.25*y-0.75) + np.fix(30.6001*(m+1)) + b + 1720996.5 + dy
return jd
def yyyydoy2jd(year,doy,hh=0,mm=0,ss=0.0):
"""
yyyydoy2jd Take a year, day-of-year, etc and convert it into a julian day
Usage: jd = yyyydoy2jd(year,doy,hh,mm,ss)
Input: year - 4 digit integer
doy - 3 digit, or less integer, (1 <= doy <= 366)
hh - 2 digit, or less int, (0 <= hh < 24) (not required)
mm - 2 digit, or less int,(0 <= ss < 60) (not required)
ss - float (not required)
Output: 'jd' (float)
"""
#
# need to split seconds into two components
# sec => 2 digit, or less int, (0 <= ss < 60)
# ms => int 0 <= ms < 1,000,000
#
ms,sec = modf(float(ss))
ms = ms * 10e5
dto = dt.datetime(int(year),01,01,int(hh),int(mm),int(sec),int(ms))
dto = dto + dt.timedelta(days=(int(doy) - 1))
mn = dto.month
dy = dto.day
jd = cal2jd(int(year),int(mn),int(dy))
jd = jd + float(hh)/24. + float(mm)/60./24. + float(sec)/3600./24.
return jd - 2400000.5
def jd2gps(jd):
"""
JD2GPS Converts Julian date to GPS week number (since
1980.01.06) and seconds of week.
Usage: [gpsweek,sow,rollover]=jd2gps(jd)
Input: jd - Julian date
Output: gpsweek - GPS week number
sow - seconds of week since 0 hr, Sun.
rollover - number of GPS week rollovers (modulus 1024)
"""
jdgps = cal2jd(1980,1,6); # beginning of GPS week numbering
nweek = int(np.fix((jd-jdgps)/7.))
sow = (jd - (jdgps+nweek*7)) * 3600*24
rollover = np.fix(nweek/1024) # rollover every 1024 weeks
gpsweek = int(nweek)
# rollover is being returned as an array?
# should just be an int
return gpsweek,sow,rollover
def jd2cal(jd):
"""
JD2CAL Converts Julian date to calendar date using algorithm
from "Practical Ephemeris Calculations" by Oliver Montenbruck
(Springer-Verlag, 1989). Must use astronomical year for B.C.
dates (2 BC = -1 yr). Non-vectorized version. See also CAL2JD,
DOY2JD, GPS2JD, JD2DOW, JD2DOY, JD2GPS, JD2YR, YR2JD.
Usage: [yr, mn, dy]=jd2cal(jd)
Input: jd - Julian date
Output: yr - year of calendar date
mn - month of calendar date
dy - day of calendar date (including decimal)
"""
a = np.fix(jd+0.5)
if a < 2299161. :
c = a + 1524.
else:
b = np.fix( (a-1867216.25) / 36524.25 )
c = a + b - np.fix(b/4.) + 1525.
d = np.fix( (c-122.1)/365.25 )
e = np.fix(365.25*d)
f = np.fix( (c-e) / 30.6001 )
dy = c - e - np.fix(30.6001*f) + np.remainder((jd+0.5),a)
mn = f - 1. - 12. * np.fix(f/14.)
yr = d - 4715. - np.fix( (7.+mn)/10. )
return yr,mn,dy
def jd2doy(jd):
"""
JD2DOY Converts Julian date to year and day of year.
Usage: [doy,yr]=jd2doy(jd)
Input: jd - Julian date
Output: doy - day of year
yr - year
"""
[yr,mn,dy] = jd2cal(jd)
doy = jd - cal2jd(yr,1,0)
# MM ensure the doy is 0 padded
doy = "%03d" % doy
return yr, doy
def yyyy2yy(year):
"""
yy = yyyy2yy(YYYY)
return the yy form of YYYY
yy - last two digits of YYYY
- returned as an int
very messy hack
"""
yy = int( str(int(year))[-2] + str(int(year))[-1] )
return(yy)
def dateTime2gpssow(dt):
"""
dateTime2gpssow Converts a datetime object into gps week,
and gps seconds of week
Usage: week,sow = dateTime2gpssow(dateTime)
Input: dt - python datetime object
Output: week - gps week (int)
sow - seconds into gpsweek since 0 hr, Sunday (float)
"""
day = dt.day + dt.hour/24. + dt.minute/1440. + dt.second/86400.
jd = cal2jd(dt.year,dt.month,day)
week, sow, rollover = jd2gps(jd)
return week, sow
def ydhms2dt(year,doy,hh,mm,ss):
"""
ydhms2dt Take a year, day-of-year, etc and convert it into a date time object
Usage: dto = ydhms2dt(year,day,hh,mm,ss)
Input: year - 4 digit integer
doy - 3 digit, or less integer, (1 <= doy <= 366)
hh - 2 digit, or less int, (0 <= hh < 24)
mm - 2 digit, or less int,(0 <= ss < 60)
ss - float
Output: 'dto' a date time object
"""
#
# need to split seconds into two components
# sec => 2 digit, or less int, (0 <= ss < 60)
# ms => int 0 <= ms < 1,000,000
ms,sec = modf(float(ss))
ms = ms * 10e5
dto = dt.datetime(int(year),01,01,int(hh),int(mm),int(sec),int(ms))
dto = dto + dt.timedelta(days=(int(doy) - 1))
return dto
def ymdhms2dt(year,month,day,hh,mm,ss):
"""
ymhms2dt Take a year, day-of-year, etc and convert it into a date time object
Usage: dto = ymdhms2dt(year,month,day,hh,mm,ss)
Input: year - 4 digit integer
month - integer, (1 => January)
day - integer
hh - 2 digit, or less int, (0 <= hh < 24)
mm - 2 digit, or less int,(0 <= ss < 60)
ss - float
Output: 'dto' a date time object
"""
#
# need to split seconds into two components
# sec => 2 digit, or less int, (0 <= ss < 60)
# ms => int 0 <= ms < 1,000,000
ms,sec = modf(float(ss))
ms = ms * 10e5
dto = dt.datetime(int(year),int(month),int(day),int(hh),int(mm),int(sec),0)
#dto = dt.datetime(int(year),int(month),int(day),int(hh),int(mm),int(sec),int(ms))
#dt.date(int(year),int(month),int(day))
#01,01,int(hh),int(mm),int(sec),int(ms))
#dto = dto + dt.timedelta(hours= int(hh),minutes=int(mm),seconds=int(sec))
return dto
def jd2mdt(jd):
"""
jd2mdt Take a julian date and convert it into a matplotlib date time stamp
All matplotlib date plotting is done by converting date instances into
days since the 0001-01-01 UTC
Usage: mp_ts = jd2mdt(jd)
Input: jd julian date
Output: 'mp_ts' (float)
a matplot lib time stamp which is days from 0001-01-01
"""
#ms,sec = modf(float(ss))
#ms = ms * 10e5
year,mon,d = jd2cal(jd)
day = int(np.fix(d))
h = (d - float(day)) * 24.
hh = int(np.fix(h))
m = (h - float(hh)) * 60.
mm = int(np.fix(m))
s = (m - float(mm)) * 60.
sec = int(np.fix(s))
ms = 0
dto = dt.datetime(int(year),int(mon),int(day),int(hh),int(mm),int(sec),int(ms))
mp_epoch = dt.datetime(1, 1, 1)
DAY = 86400
td = dto - mp_epoch
mp_ts = td.days + 1 + (1000000 * td.seconds + td.microseconds) / 1e6 / DAY
return mp_ts
def ydhms2mdt(year,doy,hh,mm,ss):
"""
ydhms2mdt Take a year, day-of-year, etc and convert it into a matplotlib date
All matplotlib date plotting is done by converting date instances into
days since the 0001-01-01 UTC
Usage: mp_ts = ydhms2dt(year,day,hh,mm,ss)
Input: year - 4 digit integer
doy - 3 digit, or less integer, (1 <= doy <= 366)
hh - 2 digit, or less int, (0 <= hh < 24)
mm - 2 digit, or less int,(0 <= ss < 60)
ss - float
Output: 'mp_ts' (float)
a matplot lib time stamp which is days from 0001-01-01
"""
#
# need to split seconds into two components
# sec => 2 digit, or less int, (0 <= ss < 60)
# ms => int 0 <= ms < 1,000,000
ms,sec = modf(float(ss))
ms = ms * 10e5
dto = dt.datetime(int(year),01,01,int(hh),int(mm),int(sec),int(ms))
dto = dto + dt.timedelta(days=(int(doy) - 1))
mp_epoch = dt.datetime(1, 1, 1)
DAY = 86400
td = dto - mp_epoch
mp_ts = td.days + 1 + (1000000 * td.seconds + td.microseconds) / 1e6 / DAY
return mp_ts
def ydhms2decyr(year,doy,hh=0,mm=0,ss=0.0):
"""
ydhms2decyr(year,doy,hh,mm,ss)
Convert from Year, Day-of-year to decimal year
"""
#ms,sec = modf(float(ss))
#ms = ms * 10e5
#dto = dt.datetime(int(year),,,int(hh),int(mm),int(sec),int(ms))
#dto = dto + dt.timedelta(days=(int(doy) - 1))
if calendar.isleap(int(year)):
dec_yr = float(year) + (float(doy) - 1.)/366. + float(hh)/24./366 + float(mm)/60./24./366. + float(ss)/86400./366.
else:
dec_yr = float(year) + (float(doy) - 1.)/365. + float(hh)/24./365 + float(mm)/60./24./365. + float(ss)/86400./365.
return dec_yr
# TODO check this starts before 1970, and end before 2050
# check it returns the time in ms
def dt2unix(dto):
"""
dt2unix : Convert a datetime object to a UNIX time stamp
Usage: unixTS = dt2unix(dto)
Input:dto A datetime object
Output: a unix time stamp (int)
"""
return calendar.timegm(dto.utctimetuple())
def unix2dt(unix_timestamp):
dto = dt.datetime.utcfromtimestamp(int(unix_timestamp))
return dto
def dt2validFrom(dto):
"""
dt2validFrom(dto)
Return the values needed to form a valid from string, from a date time object
"""
yyyy = dto.strftime("%Y")
MM = dto.strftime("%m")
dd = dto.strftime("%d")
hh = dto.strftime("%H")
mm = dto.strftime("%M")
ss = dto.strftime("%S")
ms = dto.strftime("%f")
return yyyy, MM, dd, hh, mm, ss, ms
# modified julian day
# ymdhms -> modJ
# gps week -> modj
# modj -> gps week
#
#=========================
if __name__ == "__main__":
startymdhms = cal2jd(2012,01,01+(00/24)+(00/(24*60))+(00.0000/(24*3600)))
(year,doy) = jd2doy(startymdhms)
print(year,doy)
# now obatain the yy version of YYYY
yy = yyyy2yy(year)
print(yy)
yy = yyyy2yy(2012.7)
print(yy)
print("Test of ydhms2dt( 2013, 200, 13, 23, 32.567)")
dt = ydhms2dt(2013,200,13,23,32.567)
print(dt)
#print(jd)
#2455927.5
#(gpsweek,sow,rollover) = gt.jd2gps(jd)
#print(gpsweek,sow,rollover)
#(1669, 0.0, array(1.0))
#=> should be (1669, 0.0, 1.0)
|
function [operator_pt]=pt(operator, partition, dimensions)
%returns the partial transpose of the operator 'operator' with respect to
%the particles indicated by a one in the binary vector 'partition'.
%the optional parameter 'dimensions' includes the dimensions of the systems
%in array form. per default, it is assumed that each system is a qubit.
%otherwise, number of systems is given by the length of 'dimensions'
n=size(partition);
n=n(2);
if (nargin == 2)
%if 'dimensions' are not given, assume qubits, i.e. always dimension 2
dimensions = 2*ones(1,n);
else
%throw an error if 'partion' and 'dimensions' have different length
if (max(size(partition) ~= size(dimensions))==1)
error('partition array and dimensions have different length');
end
end
%throw an error if 'operator' is not a square matrix
opdims=size(operator);
if (opdims(1) ~= opdims(2))
error('first argument is no square matrix');
end
%throw an error if 'operator' is not hermitian (within a certain precision)
if (max(max(abs(ctranspose(operator)-operator))) > 1e-12)
error('first argument is not hermitian');
end
%throw an error if any value in 'dimensions' is smaller than two
if (min(dimensions)<2)
error('dimensions must be larger than 1');
end
if (max(dimensions)==min(dimensions))
%if all particles have the same dimension, we can simply use another
%numerical system, e.g. the binary system. this speeds things up when
%compared to the case in which the systems have different dimensions
%obtain the dimensionality of each system (equals the first system's
%dimensionsality, since all are the same)
dim = dimensions(1);
%throw an error if operator dimensions do not match the length of
%'partition'
if (opdims(1) ~= dim^n)
error('operator dimensions do not match the partition array');
end
%******************* this is the main part *******************
%start with the zero matrix to build up the partially transposed
%operator
oppt = zeros(dim^n,dim^n);
%define the identity on the space of 'operator'
id=eye(size(operator));
for rowind=1:opdims(1) %loop through rows ...
for colind=(rowind+1):opdims(2) %... and columns of the operator
%due to hermiticity and invariance of trace
%under partial transpose, only loop through
%upper right half
col=dec2base(colind-1,dim,n); %determine current row and ...
row=dec2base(rowind-1,dim,n); %column index in the base given by
%'dimensions'
%determine new row and column index by transposing the systems
%indicated by 'partition'
%for the new column index, take the ith digit from col if
%partition(i) is zero. if partition(i) is one, take the ith
%digit of row ...
newcol=transpose((1-partition(:)).*str2num(col(:))+partition(:).*str2num(row(:)));
%... and vice versa for the new row index
newrow=transpose((1-partition(:)).*str2num(row(:))+partition(:).*str2num(col(:)));
%note that newrow and newcol are row vectors. therefore,
%convert them into strings and drop the white spaces
newcol=strrep(num2str(newcol),' ','');
newrow=strrep(num2str(newrow),' ','');
%convert row and column index back into the decimal system
newcolind=base2dec(newcol,dim)+1;
newrowind=base2dec(newrow,dim)+1;
%add the matrix element on its new place to oppt
oppt=operator(rowind,colind)*(id(:,newrowind)*id(newcolind,:))+oppt;
end
end
%add elements due to hermiticity
oppt=oppt+ctranspose(oppt);
%add diagonal elements (which did not change through the partial
%transposition)
oppt = oppt + diag(diag(operator));
else
%if the system has different dimensions, the program is a bit more complex
%and slower
%throw an error if operator dimensions do not match the length of
%'partition'
if (opdims(1) ~= prod(dimensions))
error('operator dimensions do not match the partition array');
end
%******************* this is the other main part *******************
%start with the zero matrix to build up the partially transposed
%operator
oppt = zeros(size(operator));
%define the identity on the space of 'operator'
id=eye(size(operator));
%define the n x n - identity
idnxn=eye(n,n);
%to loop through all matrix elements of 'operator', we need to create
%the indices strings of the basis vectors in the usual notation. now,
%however, the different digits run from zero to the corresponding
%system's dimension (minus one), which differs from system to system.
indices=zeros(n,1); %first index has only zeros
for k=1:opdims(1) %loop through whole matrix 'operator'
inddims=size(indices);
last = indices(:,inddims(2)); %get the last index string
for l=n:-1:1 %loop through digits of last index string
if (last(l) < dimensions(l)-1) %the first digit from the right
%which is still smaller than the
%dimension (minus one) ...
newvec=last+idnxn(:,l); %... must be increased by one ...
newvec=newvec.*(vertcat(ones(l,1),zeros(n-l,1))); % ... and
%all digits to the right set to zero
indices=horzcat(indices,newvec); %append new index string
break;
end
end
end
for rowind=1:opdims(1) %loop through rows ...
for colind=(rowind+1):opdims(2) %... and columns of the operator
%due to hermiticity and invariance of trace
%under partial transpose, only loop through
%upper right half
col=indices(:,colind); %write current row and ...
row=indices(:,rowind); %column index as index string.
%this time, row and col are column
%vectors
%determine new row and column index by transposing the systems
%indicated by 'partition'
%for the new column index, take the ith digit from col if
%partition(i) is zero. if partition(i) is one, take the ith
%digit of row ...
newcol=transpose((1-partition(:)).*col(:)+partition(:).*row(:));
%... and vice versa for the new row index
newrow=transpose((1-partition(:)).*row(:)+partition(:).*col(:));
%since newrow and newcol are row vectors denoting an index
%string, we need to convert them back to a decimal number that
%denotes the element's new position
newcolind=find(ismember(transpose(indices),newcol,'rows'));
newrowind=find(ismember(transpose(indices),newrow,'rows'));
%add the matrix element on its new place to oppt
oppt=operator(rowind,colind)*(id(:,newrowind)*id(newcolind,:))+oppt;
end
end
%build the partial transpose of 'operator'
%elements that used to be in the upper right half
%oppt=full(sparse(colarray,rowarray,valarray,dim^n,dim^n,((dim^n)^2)/2-(dim^n)/2));
%add elements due to hermiticity
oppt=oppt+ctranspose(oppt);
%add diagonal elements (which did not change through the partial
%transposition)
oppt = oppt + diag(diag(operator));
end
operator_pt=oppt; |
{-# OPTIONS --without-K #-}
module sets.vec.properties where
open import equality.core
open import function.core
open import function.extensionality
open import function.isomorphism
open import sets.nat.core using (ℕ; zero; suc)
open import sets.fin using (Fin; zero; suc)
open import sets.vec.core
tabulate-lookup : ∀ {i}{A : Set i}{n : ℕ}
→ (xs : Vec A n)
→ tabulate (lookup xs) ≡ xs
tabulate-lookup [] = refl
tabulate-lookup (x ∷ xs) = ap (_∷_ x) (tabulate-lookup xs)
lookup-tabulate-funext : ∀ {i}{A : Set i}{n : ℕ}
→ (f : Fin n → A)(i : Fin n)
→ lookup (tabulate f) i ≡ f i
lookup-tabulate-funext {n = zero} f ()
lookup-tabulate-funext {n = suc m} f zero = refl
lookup-tabulate-funext {n = suc m} f (suc i) =
lookup-tabulate-funext (f ∘ suc) i
lookup-tabulate : ∀ {i}{A : Set i}{n : ℕ}
→ (f : Fin n → A)
→ lookup (tabulate f) ≡ f
lookup-tabulate f = funext (lookup-tabulate-funext f)
lookup-iso : ∀ {i}{A : Set i}{n : ℕ}
→ Vec A n ≅ (Fin n → A)
lookup-iso = iso lookup tabulate tabulate-lookup lookup-tabulate
|
Every Lebesgue measurable set is almost a $G_\delta$ set. |
------------------------------------------------------------------------
-- The Agda standard library
--
-- Operations on nullary relations (like negation and decidability)
------------------------------------------------------------------------
-- Some operations on/properties of nullary relations, i.e. sets.
module Relation.Nullary where
import Relation.Nullary.Core as Core
------------------------------------------------------------------------
-- Negation
open Core public using (¬_)
------------------------------------------------------------------------
-- Decidable relations
open Core public using (Dec; yes; no)
|
lemma (in Dynkin_system) diff: assumes sets: "D \<in> M" "E \<in> M" and "D \<subseteq> E" shows "E - D \<in> M" |
If $x$ is an algebraic integer, then $x^{1/n}$ is an algebraic integer. |
function RtOut = transformCameraRt(RtIn)
RtOut = [RtIn(1:3,1:3)', - RtIn(1:3,1:3)'* RtIn(1:3,4)];
|
options(stringsAsFactors = FALSE)
library(lubridate)
library(reshape2)
library(stringr)
library(plyr)
source("tidy-data/data/xtable.r")
#raw <- read.csv("billboard.csv")
raw <- read.csv("tidy-data/data/billboard.csv")
raw <- raw[, c("year", "artist.inverted", "track", "time", "date.entered", "x1st.week", "x2nd.week", "x3rd.week", "x4th.week", "x5th.week", "x6th.week", "x7th.week", "x8th.week", "x9th.week", "x10th.week", "x11th.week", "x12th.week", "x13th.week", "x14th.week", "x15th.week", "x16th.week", "x17th.week", "x18th.week", "x19th.week", "x20th.week", "x21st.week", "x22nd.week", "x23rd.week", "x24th.week", "x25th.week", "x26th.week", "x27th.week", "x28th.week", "x29th.week", "x30th.week", "x31st.week", "x32nd.week", "x33rd.week", "x34th.week", "x35th.week", "x36th.week", "x37th.week", "x38th.week", "x39th.week", "x40th.week", "x41st.week", "x42nd.week", "x43rd.week", "x44th.week", "x45th.week", "x46th.week", "x47th.week", "x48th.week", "x49th.week", "x50th.week", "x51st.week", "x52nd.week", "x53rd.week", "x54th.week", "x55th.week", "x56th.week", "x57th.week", "x58th.week", "x59th.week", "x60th.week", "x61st.week", "x62nd.week", "x63rd.week", "x64th.week", "x65th.week", "x66th.week", "x67th.week", "x68th.week", "x69th.week", "x70th.week", "x71st.week", "x72nd.week", "x73rd.week", "x74th.week", "x75th.week", "x76th.week")]
names(raw)[2] <- "artist"
raw$artist <- iconv(raw$artist, "MAC", "ASCII//translit")
raw$track <- str_replace(raw$track, " \\(.*?\\)", "")
names(raw)[-(1:5)] <- str_c("wk", 1:76)
raw <- arrange(raw, year, artist, track)
long_name <- nchar(raw$track) > 20
raw$track[long_name] <- paste0(substr(raw$track[long_name], 0, 20), "...")
xtable(raw[c(1:3, 6:10), 1:8], "tidy-data/data/billboard-raw.tex")
clean <- melt(raw, id = 1:5, na.rm = T)
clean$week <- as.integer(str_replace_all(clean$variable, "[^0-9]+", ""))
clean$variable <- NULL
clean$date.entered <- ymd(clean$date.entered)
clean$date <- clean$date.entered + weeks(clean$week - 1)
clean$date.entered <- NULL
clean <- rename(clean, c("value" = "rank"))
clean <- arrange(clean, year, artist, track, time, week)
clean <- clean[c("year", "artist", "time", "track", "date", "week", "rank")]
clean_out <- mutate(clean,
date = as.character(date))
#xtable(clean_out[1:15, ], "billboard-clean.tex")
# Normalisation --------------------------------------------------------------
song <- unrowname(unique(clean[c("artist", "track", "time")]))
song$id <- 1:nrow(song)
narrow <- song[1:15, c("id","artist", "track", "time")]
#xtable(narrow, "billboard-song.tex")
rank <- join(clean, song, match = "first")
rank <- rank[c("id", "date", "rank")]
rank$date <- as.character(rank$date)
#xtable(rank[1:15, ], "billboard-rank.tex")
|
import os
import json
import hashlib
import numpy as np
import logging
import pandas as pd
import altair as alt
import matplotlib as mpl
import matplotlib.font_manager as font_manager
from isipedia.web import isipedia_org
from isipedia.country import country_data_folder, countrymasks_folder
from isipedia.command import figures_register, isipediafigure
class NpEncoder(json.JSONEncoder):
def default(self, obj):
if hasattr(obj, 'tolist'):
return obj.tolist()
else:
return super(NpEncoder).default(obj)
def _hashsum(kwargs, l=6):
string = json.dumps(kwargs, sort_keys=True)
return hashlib.sha1(string.encode()).hexdigest()[:l]
def _shortname(fname, area):
' determine variable name from json file name '
name, ext = os.path.splitext(os.path.basename(fname))
if name.endswith(area):
name = name[:-len(area)-1]
return name.replace('-','_')
def _get_json_file(context, arg):
from isipedia.jsonfile import JsonFile
if isinstance(arg, JsonFile):
return arg
if arg in context.variables:
return context.variables[arg]
fname = _shortname(arg, context.area)
fnames = [_shortname(v.filename, v.area) for v in context.variables]
if fname in fnames:
return context.variables[fnames.index(fname)]
fname = os.path.join(context.folder, arg)
if os.path.exists(fname):
if os.path.splitext(fname)[1] == '.json':
return json.load()
else:
raise NotImplementedError('cannot load {}'.format(fname))
raise ValueError('no matching variable found: '+repr(arg))
class SuperFig:
backend = None
prefix = ''
ext = '.png' # static extension
def __init__(self, context):
self.context = context
self.makefig = context.makefig
self.png = context.png
def figcode(self, *args, **kwargs):
"code based on file name and figure arguments"
kwargs['args'] = args
return self.prefix + _hashsum(kwargs)
def figpath(self, figid, relative=False):
return os.path.join('' if relative else self.context.folder, 'figures', figid+'-'+self.backend +self.ext)
def insert_cmd(self, figid, caption='', crossref=False):
return ('{{{}}}' if crossref else '').format(caption, self.figpath(figid, relative=True), '#fig:'+figid)
def caption(self, *args, **kwargs):
return 'No Caption.'
def __call__(self, *args, **kwargs):
# extract markdown parameters
caption = kwargs.pop('caption', None)
figid = kwargs.pop('id', '')
crossref = kwargs.pop('crossref', True)
assert type(figid) is str, 'id parameter must be a string'
assert caption is None or type(caption) is str, 'caption parameter must be a string'
if not figid:
figid = self.figcode(*args, **kwargs)
if caption is None:
caption = self.caption(*args, **kwargs)
if self.makefig:
fig = self.make(*args, **kwargs)
figpath = self.figpath(figid)
figdir = os.path.join(self.context.folder, 'figures')
if not os.path.exists(figdir):
os.makedirs(figdir)
path_noext, ext = os.path.splitext(figpath)
self.save_and_close(fig, path_noext)
return self.insert_cmd(figid, caption)
def make(self):
raise NotImplementedError()
def _get_json_file(self, variable):
return _get_json_file(self.context, variable)
def save_and_close(self, fig, path_noext):
if self.backend == 'mpl':
import matplotlib.pyplot as plt
print('saving mpl...')
fig.savefig(path_noext+self.ext, dpi=100)
plt.close(fig)
elif self.backend == 'mpld3':
import mpld3
import matplotlib.pyplot as plt
fig.savefig(path_noext+self.ext, dpi=100)
js = mpld3.fig_to_dict(fig)
fpath = path_noext+'.json'
json.dump(js, open(fpath, 'w'), cls=NpEncoder)
plt.close(fig)
elif self.backend == 'vl':
print('{}: saving json...'.format(type(self)))
fig.save(path_noext+'.json') # json
if self.png:
print('{}: saving png...'.format(type(self)))
# fig.save(path_noext+self.ext) # static
fig.save(path_noext+'.png', scale_factor=2) # static
def _maybe_createdir(path):
dirname = os.path.dirname(path)
if not os.path.exists(dirname):
os.makedirs(dirname)
return path
def isipedia_theme():
font = "IBM Plex Sans"
return {
"config" : {
# "background": '#F1F4F4',
"padding": 0,
"view": {
"continuousWidth": 600, # this has no effect with autosize fit-x
"continuousHeight": 300,
"strokeOpacity": 0, # do not show axis frame
},
"autosize": {"contains": "padding", "type": "fit-x"}, # this cancels continuousWidth
"title": {
"font": font,
"fontsize": 16,
},
"text": {
"font": font,
"fontsize": 14,
},
"header": {
"font": font,
"titleFont": font,
"titleFontSize": 20,
"labelFont": font,
"labelFontSize": 18,
},
"axis": {
"labelFont": font,
"titleFont": font,
"labelFontSize": 14,
"titleFontSize": 16,
},
"mark": {
"font": font,
"fontSize": 14,
},
"legend": {
"labelFont": font,
"titleFont": font,
"labelFontSize": 14,
"titleFontSize": 14,
},
}
}
alt.themes.register('isipedia_theme', isipedia_theme)
alt.themes.enable('isipedia_theme')
# matplotlib fonts
font_dirs = [os.path.join(isipedia_org, 'assets', 'fonts')]
font_files = font_manager.findSystemFonts(fontpaths=font_dirs)
font_list = font_manager.createFontList(font_files)
font_manager.fontManager.ttflist.extend(font_list)
mpl.rcParams['font.family'] = 'IBM Plex Sans'
my_dpi = 96
mpl.rcParams['axes.titlesize'] = 18
mpl.rcParams['axes.labelsize'] = 16
# mpl.rcParams['axes.labelweight'] = "bold"
# mpl.rcParams['lines.linewidth'] : 3
# mpl.rcParams['lines.markersize'] : 10
mpl.rcParams['xtick.labelsize'] = 14
# mpl.rcParams['xtick.labelweight'] = "bold"
# mpl.rcParams['ytick.labelweight'] = "bold"
mpl.rcParams['ytick.labelsize'] = 14
mpl.rcParams['figure.titlesize'] = 20
mpl.rcParams['figure.dpi'] = my_dpi
scale = 2
mpl.rcParams['figure.figsize'] = 600/my_dpi*scale, 300/my_dpi*scale
def get_ranking_data(countries, ranking, x, scenario=None, method='number', plot_label_y='', plot_unit_y=''):
"""get ranking data for figures"""
import pandas as pd
if method not in ['number', 'value']:
raise ValueError('method must be "number" or "value"')
ranking_method = getattr(ranking, method)
ranking_data = []
for c in countries:
area = c['properties']['ISIPEDIA']
name = c['properties']['NAME']
# print(area)
if area.lower() not in ranking.areas:
logging.warning('missing area for ranking: '+area)
continue
value = ranking.value(area.lower(), x, scenario)
if value is not None:
value = round(value, 2)
rank = ranking.number(area.lower(), x, scenario)
ranking_data.append((area, name, value, rank, plot_label_y, plot_unit_y))
return pd.DataFrame(ranking_data, columns=["Code", "Country", "Value", "Rank", 'label', 'unit']) |
```python
from cime import *
import matplotlib.pyplot as plt
from sympy.physics.mechanics import dynamicsymbols, init_vprinting
init_vprinting()
```
```python
t = symbols("t")
r2 = 3
t2 = kinvars("theta_2")
r3 = 8
t3 = kinvars("theta_3")
r1 = kinvars("r_1")
t1 = pi
```
```python
v1 = Vector2D(r1,t1)
v2 = Vector2D(r2,t2)
v3 = Vector2D(r3,t3)
s = VectorLoop(v2,v3,v1)
s
```
$$ \begin{bmatrix} {- r_{1} + 3 \operatorname{cos}\left(\theta_{2}\right) + 8 \operatorname{cos}\left(\theta_{3}\right)} \\ {3 \operatorname{sin}\left(\theta_{2}\right) + 8 \operatorname{sin}\left(\theta_{3}\right)} \end{bmatrix} $$
```python
ssol = s.solve([r1,t3],{t2:rad(40).evalf()})
ssol
```
```python
ssol
```
```python
s.draw({r1:ssol[0][0], t3:ssol[0][1], t2:rad(40).evalf()})
s.draw({r1:ssol[1][0], t3:ssol[1][1], t2:rad(40).evalf()})
plt.axis("equal")
```
```python
r1 = Vector2D(x=10,y=10)
r2 = Vector2D(x=10,y=-10)
r3 = Vector2D(x=-20,y=0)
VectorLoop(r1,r2,r3).draw()
```
```python
```
|
function points = intersectLineCircle(line, circle)
%INTERSECTLINECIRCLE Intersection point(s) of a line and a circle
%
% INTERS = intersectLineCircle(LINE, CIRCLE);
% Returns a 2-by-2-by-N array, containing on each row the coordinates of
% an intersection point for each line-circle pair, i.e. INTERS(:,:,k)
% contains the intersections between LINE(k,:) and CIRCLE(k,:).
%
% If a line-circle pair does not intersect, the corresponding results are
% set to NaN.
%
% Example
% % base point
% center = [10 0];
% % create vertical line
% l1 = [center 0 1];
% % circle
% c1 = [center 5];
% pts = intersectLineCircle(l1, c1)
% pts =
% 10 -5
% 10 5
% % draw the result
% figure; clf; hold on;
% axis([0 20 -10 10]);
% drawLine(l1);
% drawCircle(c1);
% drawPoint(pts, 'rx');
% axis equal;
%
% See also
% lines2d, circles2d, intersectLines, intersectCircles
%
% References
% http://local.wasp.uwa.edu.au/~pbourke/geometry/sphereline/
% http://mathworld.wolfram.com/Circle-LineIntersection.html
%
% ------
% Author: David Legland, [email protected]
% Author: JuanPi Carbajal <[email protected]>
% Created: 2011-01-14, using Matlab 7.9.0.529 (R2009b)
% Copyright 2011 INRA - Cepia Software Platform.
% HISTORY
% 2011-06-06 fix bug in delta test
% 2017-05-05 included some suggestions from code by JuanPi Carbajal <[email protected]>
% 2017-08-08 update doc
% check size of inputs
nLines = size(line, 1);
nCircles = size(circle, 1);
if nLines ~= nCircles
error ('matGeom:geom3d:invalidArguments', ...
'Requires same number of lines and circles');
end
% center parameters
center = circle(:, 1:2);
radius = circle(:, 3);
% line parameters
dp = line(:, 1:2) - center;
vl = line(:, 3:4);
% coefficient of second order equation
a = sum(line(:, 3:4).^2, 2);
b = 2*sum(dp .* vl, 2);
c = sum(dp.^2, 2) - radius.^2;
% discriminant
delta = b .^ 2 - 4 * a .* c;
points = nan(2, 2, nCircles);
valid = delta >= 0;
if any(valid)
% compute roots
u = bsxfun(@plus, -b(valid), bsxfun(@times, [-1 1], sqrt(delta(valid))));
u = bsxfun(@rdivide, u, a(valid)) / 2;
if nCircles == 1
points = [...
line(1:2) + u(:,1) .* line(3:4); ...
line(1:2) + u(:,2) .* line(3:4)];
else
tmp = [...
line(valid, 1:2) + u(:,1) .* line(valid, 3:4) ...
line(valid, 1:2) + u(:,2) .* line(valid, 3:4)].';
points(:, :, valid) = permute(reshape(tmp, [2, 2, nCircles]), [2 1 3]);
end
end
|
module Pipes.Prelude
import Control.Monad.Trans
import Pipes.Core
%access export
-- Helper functions to construct Sources more easily
-- * `stdinLn` lifts the standard output to a Source
-- * `iterating` creates an infinite series of value f(f(f(f(...))))
-- * `unfolding` creates a possibly infinite series of value from a seed
stdinLn : String -> Source IO String
stdinLn promptLine = recur where
recur = do
lift (putStr promptLine)
lift getLine >>= yield
recur
streamFile : File -> Source IO (Either FileError String)
streamFile f = recur f where
recur f = do
end <- lift (fEOF f)
if end
then lift (closeFile f)
else do
l <- lift (fGetLine f)
yieldOr l (closeFile f)
recur f
readFile : String -> Source IO (Either FileError String)
readFile fileName = do
Right f <- lift (openFile fileName Read)
| Left err => yield (Left err)
streamFile f
iterating : (Monad m) => (a -> a) -> a -> Source m a
iterating f = recur where
recur a = do
yield a
recur (f a)
unfolding : (Monad m) => (seed -> Maybe (a, seed)) -> seed -> Source m a
unfolding f = recur . f where
recur Nothing = pure ()
recur (Just (a, seed)) = do
yield a
recur (f seed)
replicating : (Monad m) => Nat -> a -> Source m a
replicating times x = recur times where
recur Z = pure ()
recur (S n) = do yield x; recur n
-- Helper functions to construct pipes more easily
-- * `mapping` lifts a function as a pipe transformation
-- * `filtering` lifts a predicate into a pipe filter
-- * `takingWhile` lifts a predicate into a pipe breaker
-- * `droppingWhile` lifts a predicate into a pipe delayed starter
mapping : (Monad m) => (a -> b) -> Pipe a m b
mapping f = awaitForever (yield . f)
mappingM : (Monad m) => (a -> m b) -> Pipe a m b
mappingM f = awaitForever $ \x => lift (f x) >>= yield
concatting : (Monad m, Foldable f) => Pipe (f a) m a
concatting = awaitForever each
concatMapping : (Monad m, Foldable f) => (a -> f b) -> Pipe a m b
concatMapping f = mapping f .| concatting
filtering : (Monad m) => (a -> Bool) -> Pipe a m a
filtering p = awaitForever $ \x => if p x then yield x else pure ()
filteringJust : (Monad m) => Pipe (Maybe a) m a
filteringJust = awaitForever $ maybe (pure ()) yield
taking : (Monad m) => Nat -> PipeM a a r m (Maybe r)
taking = recur where
recur Z = pure Nothing
recur (S n) = do
x <- awaitOr
case x of
Left r => pure (Just r)
Right x => do yield x; taking n
dropping : (Monad m) => Nat -> Pipe a m a
dropping Z = idP
dropping (S n) = awaitOne $ \x => dropping n
takingWhile : (Monad m) => (a -> Bool) -> PipeM a a r m (Maybe r)
takingWhile p = recur where
recur = do
mx <- awaitOr
case mx of
Left r => pure (Just r)
Right x => if p x
then do yield x; recur
else pure Nothing
droppingWhile : (Monad m) => (a -> Bool) -> Pipe a m a
droppingWhile p = recur where
recur = awaitOne $ \x =>
if p x
then recur
else do yield x; idP
deduplicating : (Eq a, Monad m) => Pipe a m a
deduplicating = recur (the (a -> Bool) (const True)) where
recur isDifferent =
awaitOne $ \x => do
when (isDifferent x) (yield x)
recur (/= x)
repeating : (Monad m) => Nat -> Pipe a m a
repeating n = awaitForever $ \x => sequence_ (replicate n (yield x))
tracing : (Monad m) => (a -> m b) -> Pipe a m a
tracing trace = mappingM (\x => trace x *> pure x)
groupingBy : (Monad m) => (a -> a -> Bool) -> Pipe a m (List a)
groupingBy sameGroup = recur (the (List a) []) where
recur xs = do
mx <- awaitOr
case mx of
Left r => do
when (length xs > 0) (yield (reverse xs))
pure r
Right y => do
case xs of
[] => recur [y]
(x::_) =>
if sameGroup x y
then recur (y::xs)
else do
yield (reverse xs)
recur [y]
grouping : (Monad m, Eq a) => Pipe a m (List a)
grouping = groupingBy (==)
chunking : (Monad m) => (n: Nat) -> {auto prf: GTE n 0} -> Pipe a m (List a)
chunking chunkSize = recur (the (List a -> List a) id) chunkSize where
recur diffList Z = do
yield (diffList [])
recur id chunkSize
recur diffList (S n) = do
x <- awaitOr
case x of
Left r => do yield (diffList []); pure r
Right x => recur (diffList . (x ::)) n
splittingBy : (Monad m) => (a -> Bool) -> Pipe a m (List a)
splittingBy p = recur (the (List a) []) where
recur xs = do
x <- awaitOr
case x of
Left r => do yield (reverse xs); pure r
Right x => if p x
then do yield (reverse xs); recur []
else recur (x :: xs)
scanning : (Monad m) => (a -> b -> b) -> b -> Pipe a m b
scanning f initial = do
yield initial
recur initial
where
recur acc = awaitOne $ \x => do
let acc' = f x acc
yield acc'
recur acc'
-- Helper functions to construct Sinks more easily
-- * `stdoutLn` lifts the standard output to a Sink
-- * `discard` consumes all outputs and ignore them
discard : (Monad m) => SinkM a m r r
discard = awaitForever $ \_ => pure ()
stdoutLn : SinkM String IO r r
stdoutLn = tracing putStrLn .| discard
summing : (Monad m, Num a) => Sink a m a
summing = fold (+) 0
multiplying : (Monad m, Num a) => Sink a m a
multiplying = fold (*) 1
consuming : (Monad m) => Sink a m (List a)
consuming = recur (the (List a -> List a) id) where
recur diffList = do
mx <- await
case mx of
Just x => recur (diffList . (x ::))
Nothing => pure (diffList [])
--
|
open import Nat
open import Prelude
open import core
open import lemmas-gcomplete
module lemmas-complete where
lem-comp-pair1 : ∀{d1 d2} → ⟨ d1 , d2 ⟩ dcomplete → d1 dcomplete
lem-comp-pair1 (DCPair h _) = h
lem-comp-pair2 : ∀{d1 d2} → ⟨ d1 , d2 ⟩ dcomplete → d2 dcomplete
lem-comp-pair2 (DCPair _ h) = h
lem-comp-prod1 : ∀{τ1 τ2} → τ1 ⊗ τ2 tcomplete → τ1 tcomplete
lem-comp-prod1 (TCProd h _) = h
lem-comp-prod2 : ∀{τ1 τ2} → τ1 ⊗ τ2 tcomplete → τ2 tcomplete
lem-comp-prod2 (TCProd _ h) = h
-- no term is both complete and indeterminate
lem-ind-comp : ∀{d} → d dcomplete → d indet → ⊥
lem-ind-comp DCVar ()
lem-ind-comp DCConst ()
lem-ind-comp (DCLam comp x₁) ()
lem-ind-comp (DCAp comp comp₁) (IAp x ind x₁) = lem-ind-comp comp ind
lem-ind-comp (DCCast comp x x₁) (ICastArr x₂ ind) = lem-ind-comp comp ind
lem-ind-comp (DCCast comp x x₁) (ICastGroundHole x₂ ind) = lem-ind-comp comp ind
lem-ind-comp (DCCast comp x x₁) (ICastHoleGround x₂ ind x₃) = lem-ind-comp comp ind
lem-ind-comp (DCCast dc x x₁) (ICastProd x₂ ind) = lem-ind-comp dc ind
lem-ind-comp (DCFst d) (IFst ind x x₁) = lem-ind-comp d ind
lem-ind-comp (DCSnd d) (ISnd ind x x₁) = lem-ind-comp d ind
lem-ind-comp (DCPair d d₁) (IPair1 ind x) = lem-ind-comp d ind
lem-ind-comp (DCPair d d₁) (IPair2 x ind) = lem-ind-comp d₁ ind
-- complete types that are consistent are equal
complete-consistency : ∀{τ1 τ2} → τ1 ~ τ2 → τ1 tcomplete → τ2 tcomplete → τ1 == τ2
complete-consistency TCRefl TCBase comp2 = refl
complete-consistency TCRefl (TCArr comp1 comp2) comp3 = refl
complete-consistency TCHole1 comp1 ()
complete-consistency TCHole2 () comp2
complete-consistency (TCArr consis consis₁) (TCArr comp1 comp2) (TCArr comp3 comp4)
with complete-consistency consis comp1 comp3 | complete-consistency consis₁ comp2 comp4
... | refl | refl = refl
complete-consistency TCRefl (TCProd tc' tc'') = λ _ → refl
complete-consistency (TCProd tc tc') (TCProd tc1 tc2) (TCProd tc3 tc4)
with complete-consistency tc tc1 tc3 | complete-consistency tc' tc2 tc4
... | refl | refl = refl
-- a well typed complete term is assigned a complete type
complete-ta : ∀{Γ Δ d τ} → (Γ gcomplete) →
(Δ , Γ ⊢ d :: τ) →
d dcomplete →
τ tcomplete
complete-ta gc TAConst comp = TCBase
complete-ta gc (TAVar x₁) DCVar = gc _ _ x₁
complete-ta gc (TALam a wt) (DCLam comp x₁) = TCArr x₁ (complete-ta (gcomp-extend gc x₁ a ) wt comp)
complete-ta gc (TAAp wt wt₁) (DCAp comp comp₁) with complete-ta gc wt comp
complete-ta gc (TAAp wt wt₁) (DCAp comp comp₁) | TCArr qq qq₁ = qq₁
complete-ta gc (TAEHole x x₁) ()
complete-ta gc (TANEHole x wt x₁) ()
complete-ta gc (TACast wt x) (DCCast comp x₁ x₂) = x₂
complete-ta gc (TAFailedCast wt x x₁ x₂) ()
complete-ta gc (TAFst wt) (DCFst comp) = lem-comp-prod1 (complete-ta gc wt comp)
complete-ta gc (TASnd wt) (DCSnd comp) = lem-comp-prod2 (complete-ta gc wt comp)
complete-ta gc (TAPair ta ta₁) (DCPair comp comp₁) = TCProd (complete-ta gc ta comp) (complete-ta gc ta₁ comp₁)
-- a well typed term synthesizes a complete type
comp-synth : ∀{Γ e τ} →
Γ gcomplete →
e ecomplete →
Γ ⊢ e => τ →
τ tcomplete
comp-synth gc ec SConst = TCBase
comp-synth gc (ECAsc x ec) (SAsc x₁) = x
comp-synth gc ec (SVar x) = gc _ _ x
comp-synth gc (ECAp ec ec₁) (SAp _ wt MAHole x₁) with comp-synth gc ec wt
... | ()
comp-synth gc (ECAp ec ec₁) (SAp _ wt MAArr x₁) with comp-synth gc ec wt
comp-synth gc (ECAp ec ec₁) (SAp _ wt MAArr x₁) | TCArr qq qq₁ = qq₁
comp-synth gc () SEHole
comp-synth gc () (SNEHole _ wt)
comp-synth gc (ECLam2 ec x₁) (SLam x₂ wt) = TCArr x₁ (comp-synth (gcomp-extend gc x₁ x₂) ec wt)
comp-synth gc (ECFst ec) (SFst wt MPHole) = comp-synth gc ec wt
comp-synth gc (ECFst ec) (SFst wt MPProd) = lem-comp-prod1 (comp-synth gc ec wt)
comp-synth gc (ECSnd ec) (SSnd wt MPHole) = comp-synth gc ec wt
comp-synth gc (ECSnd ec) (SSnd wt MPProd) = lem-comp-prod2 (comp-synth gc ec wt)
comp-synth gc (ECPair ec ec₁) (SPair x wt wt₁) = TCProd (comp-synth gc ec wt) (comp-synth gc ec₁ wt₁)
-- complete boxed values are just values
lem-comp-boxed-val : {Δ : hctx} {d : iexp} {τ : typ} {Γ : tctx} →
Δ , Γ ⊢ d :: τ →
d dcomplete →
d boxedval →
d val
lem-comp-boxed-val wt comp (BVVal VConst) = VConst
lem-comp-boxed-val wt comp (BVVal VLam) = VLam
lem-comp-boxed-val wt comp (BVVal (VPair x x₁)) = VPair x x₁
lem-comp-boxed-val (TAPair wt wt₁) (DCPair comp comp₁) (BVPair bv bv₁) = VPair (lem-comp-boxed-val wt comp bv)
(lem-comp-boxed-val wt₁ comp₁ bv₁)
lem-comp-boxed-val (TACast wt x₃) (DCCast comp x₁ x₂) (BVArrCast x bv) = abort (x (complete-consistency x₃ x₁ x₂))
lem-comp-boxed-val (TACast wt x₁) (DCCast comp x₂ x₃) (BVProdCast x bv) = abort (x (complete-consistency x₁ x₂ x₃))
lem-comp-boxed-val (TACast wt x₁) (DCCast comp x₂ ()) (BVHoleCast x bv)
|
%[ P, inls ] = ht_simple_ransac_p3p( u, X, rthr, maxiter )
%u: 3 x n image points
%X: 3 x n 3D points
%rthr: inlier threshold
%maxiter: default=1000
function [ P, inls ] = ht_lo_ransac_p3p( u, X, rthr, max_iter )
if nargin < 4
max_iter = 1000;
end
%initialization
u = bsxfun(@rdivide, u, sqrt(sum(u.^2, 1)));
Npts = size(u, 2);
rthr = cos(rthr);
max_inlsnum = 3;
no_iter = 0;
P = [];
inls = false(1, Npts);
%ransac
while no_iter < max_iter
no_iter = no_iter + 1;
idx = randperm(Npts, 3);
P_cand = P3PSolver([u(:, idx); X(:, idx)]);
[inls_cand, inls_cand_num] = calculate_inls_angular(P_cand, u, X, rthr);
if length(P_cand) == 0
% no_iter = no_iter - 1;
continue;
elseif length(P_cand) > 1
[inls_cand_num, inls_cand_idx] = max(inls_cand_num);
inls_cand = inls_cand{inls_cand_idx};
P_cand = P_cand{inls_cand_idx};
else
inls_cand_num = inls_cand_num(1);
inls_cand = inls_cand{1};
P_cand = P_cand{1};
end
%nonlinuear local optimization
if inls_cand_num > 3
lo_cnt = 0;
while lo_cnt < 10
lo_cnt = lo_cnt + 1;
[lo_P, lo_inls, lo_inls_num] = ht_PnPnonlin(P_cand, u, X, inls_cand, rthr);
if lo_inls_num >= inls_cand_num
inls_cand_num = lo_inls_num;
inls_cand = lo_inls;
P_cand = lo_P;
else
break;
end
end
end
if inls_cand_num >= max_inlsnum
max_inlsnum = inls_cand_num;
P = P_cand;
inls = inls_cand;
max_iter = min([max_iter, nsamples(max_inlsnum, Npts, 3, 0.95)]);
end
end
end
function [SampleCnt, q] = nsamples(ni, ptNum, pf, conf)
q = prod (((ni-pf+1) : ni) ./ ((ptNum-pf+1) : ptNum));
if q < eps
SampleCnt = Inf;
else
% SampleCnt = log(1 - conf) / log(1 - q);
if q > conf
SampleCnt = 1;
else
SampleCnt = log(1 - conf) / log(1 - q);
end
end
end
function [inls, inls_num] = calculate_inls_angular(Pcand, u, X, rthr)
inls = cell(1, length(Pcand));
inls_num = zeros(1, length(Pcand));
for ii = 1:1:length(Pcand)
X_reproj = Pcand{ii} * [X; ones(1, size(X, 2))];
X_reproj = bsxfun(@rdivide, X_reproj, sqrt(sum(X_reproj.^2, 1)));
res = sum(u .* X_reproj, 1);
inls{ii} = res > rthr;
inls_num(ii) = sum(inls{ii});
end
end
function [Poptim, inls, inls_num] = ht_PnPnonlin(Pcand, u, X, idx, rthr)
Poptim = PnP_mex_wrapper( u(:, idx), X(:, idx), Pcand );
X_reproj = Poptim * [X; ones(1, size(X, 2))];
X_reproj = bsxfun(@rdivide, X_reproj, sqrt(sum(X_reproj.^2, 1)));
res = sum(u .* X_reproj, 1);
inls = res > rthr;
inls_num = sum(inls);
end
|
```python
%matplotlib inline
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
from scipy.stats import norm
from scipy.stats import t as tdist
from scipy.stats import pearsonr
from ar1 import sample_ar1
```
# Correlation of time series with memory
In this example, you will see how to generate an empirical null-distribution for the correlation coefficient between two auto-correlated time series and how to test the correlation coefficient against this null distribution.
In the end you will also see a nice formula to correct for the effect of the auto-correlation without doing a simulation experiment. It's rare that we are so lucky, but sometimes it does happen.
First, we genrate our "observations", two ramdom time series, that are un-correlated but do exhibit an autocorrelation that would not be uncommon for a climate variable:
```python
n = 250
phi = 0.75
np.random.seed(12358)
y1, y2 = sample_ar1(n, phi, size=2)
```
```python
fig, axes = plt.subplots(figsize=(13, 8), nrows=2, sharex=True, sharey=False)
axes[0].plot(y1, 'C0-', lw=1)
axes[0].set_ylabel('$y_0$')
axes[1].plot(y2, 'C1-', lw=1)
axes[1].set_ylabel('$y_1$')
axes[-1].set_xlabel('Sample')
```
After taking a look at the time series we calculate the correlatrion coefficient between the two time series.
```python
r, p_wn = pearsonr(y1, -1 * y2)
print('r=%.4f (p=%.4f, N=%u)'% (r, p_wn, n))
```
Conveniently, the scipy function `pearsonr` also returns the p-value for the correlation coefficient and it seems that the correlation is highly significant!
## Red noise null distribution
Unfortunately the test assumes white-noise timeseries as the null distribution which is a terrible assumption in this case.
To use a more realistic null-hypothesis we check the correlation coefficient against a null-distribution for auto-correlated time series.
For that we generate a large number of pairs of samples from an AR(1) process with the same number of observations and auto-correlation as our data and calculate the correlation between the two.
We than compare the correlation coefficient against this empirical null-distribution to check at which percentile of the distribution the correlation of our real data is, following the theory that underlies the classical t-test.
```python
nsamples = 20000
sample_r = np.zeros(nsamples)
for i in range(nsamples):
s1 = sample_ar1(n, phi)
s2 = sample_ar1(n, phi)
sample_r[i] = pearsonr(s1, s2)[0]
```
```python
plt.hist(sample_r, bins=50, histtype='step')
plt.xlabel('$r$')
plt.axvline(r)
```
```python
# Empirical p-value from sampled correlation coefficients
p_empirical = np.mean(np.abs(sample_r) >= np.abs(r))
print('Empirical p-value from simulation: %.4f' % p_empirical)
```
In the case of simple AR(1) processes, there is a formula that we can use to account for the reduced degrees of freedom due to the autocorrelation:
\begin{align}
n_\mathrm{eff} = n \frac{1 - \phi_1 \phi_2}{1 + \phi_1 \phi_2}
\end{align}
where $\phi_1$ and $\phi_2$ are the lag-one autocorrelations of the two correlated time series.
You can see below that the autocorrelation of the time series dramatically decreases the effective number of observations!
```python
# Calculate reduced degrees of freedom:
neff = n * (1 - phi * phi) / (1 + phi * phi)
print('Number of samples: %u' % n)
print('Effective sample size: %u' % neff)
```
We can than use this value for the calculation of the t-statistic and for the degrees of freedom of the t-distribution that we check the value agains.
This value agrees well with the empirical value optained above.
```python
# Use reduced number of freedoms to test against theoretical t-distribution
t = r * np.sqrt(neff) / np.sqrt(1 - r**2)
p_theory = 2 * (1 - tdist.cdf(t, neff))
print('Theoretical p-value using reduced DOF: %.4f' % p_theory)
```
|
module Tissue
import Base.Threads: @spawn, @threads, threadid
using MLStyle
export Graph, start, stop, wait_until_done
export @graph, @calculator, @bindstreams
"""
process(calculator, ...; graph::Graph)
Process the input streams into an output.
The first argument must be the associated calculator. Any other argument defines an input stream to the calculator. An input stream will be named with the exact same name as the corresponding argument. At all times, data coming from mutliple input streams into a `process()` method is guaranteed to be derived from the same datum generated by the *source* calculator.
Optionally, if you need a reference to the `graph` object in the `process()` method, you can add it as a keyword argument named `graph` and Tissue.jl will supply a reference to the active graph object.
The value returned from the method is the value sent to the calculator's output stream. Hence, any calculator that binds one of their input streams with [`@bindstreams`](@ref) to this calculator's output stream will receive the return value of this method as an argument. `nothing` is treated specially: it means that the calculator will not process the given input. Any downstream calculator that depends on other calculators too will simply drop the value taken from their output stream.
It is important to remember that each calculator runs in its own task. Therefore, best practices about concurrency must be applied. Notably, the stream arguments of `process()` could possibly be accessed by multiple threads simultaneously. Therefore, it is highly recommended to make a [`deepcopy`](https://docs.julialang.org/en/v1/base/base/#Base.deepcopy) of an argument before mutating it. This will also make your calculator more reusable across graphs.
Multiple `process()` methods can be defined per calculator, allowing the calculator to be used in different graph topologies.
# Arguments
- `calculator`: the associated calculator
- `...`: the input streams
# Examples
```julia
using Base: deepcopy
function process(calc::MyCalculator, in_stream; graph)
if some_condition
stop(graph)
end
in_stream_copy = deepcopy(in_stream)
mutate(in_stream_copy)
return in_stream_copy
end
```
"""
function process(calculator)
throw("Called unimplemented process(). This shouldn't happen.")
end
"""
close(calculator)
Perform cleanup for the calculator. Optional.
You can define a method for your calculator type to perform any necessary cleanup. Called by [`wait_until_done(graph)`](@ref) on each calculator to perform cleanup.
# Examples
```julia
struct GoofyCalculator
resource
function GoofyCalculator()
resource = acquire_resource()
new(resource)
end
end
function process(c::GoofyCalculator, some_stream)
use_resource(c.resource)
end
function close(c::GoofyCalculator)
release_resource(c.resource)
end
```
"""
function close(calculator)
# nothing
end
include("graph.jl")
include("core.jl")
include("macros.jl")
end # module Tissue
|
/*
* TS Elements
* Copyright 2015-2018 M. Newhouse
* Released under the MIT license.
*/
#pragma once
#include "cup/player_definition.hpp"
#include <boost/container/small_vector.hpp>
namespace ts
{
namespace client
{
struct PlayerSettings
{
boost::container::small_vector<cup::PlayerDefinition, 8> selected_players;
};
}
}
|
"""Compute driving functions for various systems.
.. include:: math-definitions.rst
"""
import numpy as np
from numpy.core.umath_tests import inner1d # element-wise inner product
from scipy.special import jn, hankel2
from .. import util
from .. import defs
def wfs_2d_line(omega, x0, n0, xs, c=None):
"""Line source by 2-dimensional WFS.
::
D(x0,k) = j/2 k (x0-xs) n0 / |x0-xs| * H1(k |x0-xs|)
"""
x0 = util.asarray_of_rows(x0)
n0 = util.asarray_of_rows(n0)
xs = util.asarray_1d(xs)
k = util.wavenumber(omega, c)
ds = x0 - xs
r = np.linalg.norm(ds, axis=1)
return -1j/2 * k * inner1d(ds, n0) / r * hankel2(1, k * r)
def _wfs_point(omega, x0, n0, xs, c=None):
"""Point source by two- or three-dimensional WFS.
::
(x0-xs) n0
D(x0,k) = j k ------------- e^(-j k |x0-xs|)
|x0-xs|^(3/2)
"""
x0 = util.asarray_of_rows(x0)
n0 = util.asarray_of_rows(n0)
xs = util.asarray_1d(xs)
k = util.wavenumber(omega, c)
ds = x0 - xs
r = np.linalg.norm(ds, axis=1)
return 1j * k * inner1d(ds, n0) / r ** (3 / 2) * np.exp(-1j * k * r)
wfs_2d_point = _wfs_point
def wfs_25d_point(omega, x0, n0, xs, xref=[0, 0, 0], c=None, omalias=None):
"""Point source by 2.5-dimensional WFS.
::
____________ (x0-xs) n0
D(x0,k) = \|j k |xref-x0| ------------- e^(-j k |x0-xs|)
|x0-xs|^(3/2)
"""
x0 = util.asarray_of_rows(x0)
n0 = util.asarray_of_rows(n0)
xs = util.asarray_1d(xs)
xref = util.asarray_1d(xref)
k = util.wavenumber(omega, c)
ds = x0 - xs
r = np.linalg.norm(ds, axis=1)
return wfs_25d_preeq(omega, omalias, c) * \
np.sqrt(np.linalg.norm(xref - x0)) * inner1d(ds, n0) / \
r ** (3 / 2) * np.exp(-1j * k * r)
wfs_3d_point = _wfs_point
def _wfs_plane(omega, x0, n0, n=[0, 1, 0], c=None):
"""Plane wave by two- or three-dimensional WFS.
Eq.(17) from :cite:`Spors2008`::
D(x0,k) = j k n n0 e^(-j k n x0)
"""
x0 = util.asarray_of_rows(x0)
n0 = util.asarray_of_rows(n0)
n = util.normalize_vector(n)
k = util.wavenumber(omega, c)
return 2j * k * np.inner(n, n0) * np.exp(-1j * k * np.inner(n, x0))
wfs_2d_plane = _wfs_plane
def wfs_25d_plane(omega, x0, n0, n=[0, 1, 0], xref=[0, 0, 0], c=None,
omalias=None):
"""Plane wave by 2.5-dimensional WFS.
::
____________
D_2.5D(x0,w) = \|j k |xref-x0| n n0 e^(-j k n x0)
"""
x0 = util.asarray_of_rows(x0)
n0 = util.asarray_of_rows(n0)
n = util.normalize_vector(n)
xref = util.asarray_1d(xref)
k = util.wavenumber(omega, c)
return wfs_25d_preeq(omega, omalias, c) * \
np.sqrt(2*np.pi * np.linalg.norm(xref - x0)) * \
np.inner(n, n0) * np.exp(-1j * k * np.inner(n, x0))
wfs_3d_plane = _wfs_plane
def _wfs_focused(omega, x0, n0, xs, c=None):
"""Focused source by two- or three-dimensional WFS.
::
(x0-xs) n0
D(x0,k) = j k ------------- e^(j k |x0-xs|)
|x0-xs|^(3/2)
"""
x0 = util.asarray_of_rows(x0)
n0 = util.asarray_of_rows(n0)
xs = util.asarray_1d(xs)
k = util.wavenumber(omega, c)
ds = x0 - xs
r = np.linalg.norm(ds, axis=1)
return 1j * k * inner1d(ds, n0) / r ** (3 / 2) * np.exp(1j * k * r)
wfs_2d_focused = _wfs_focused
def wfs_25d_focused(omega, x0, n0, xs, xref=[0, 0, 0], c=None, omalias=None):
"""Focused source by 2.5-dimensional WFS.
::
____________ (x0-xs) n0
D(x0,w) = \|j k |xref-x0| ------------- e^(j k |x0-xs|)
|x0-xs|^(3/2)
"""
x0 = util.asarray_of_rows(x0)
n0 = util.asarray_of_rows(n0)
xs = util.asarray_1d(xs)
xref = util.asarray_1d(xref)
k = util.wavenumber(omega, c)
ds = x0 - xs
r = np.linalg.norm(ds, axis=1)
return wfs_25d_preeq(omega, omalias, c) * \
np.sqrt(np.linalg.norm(xref - x0)) * inner1d(ds, n0) / \
r ** (3 / 2) * np.exp(1j * k * r)
wfs_3d_focused = _wfs_focused
def wfs_25d_preeq(omega, omalias, c):
"""Preqeualization for 2.5D WFS."""
if omalias is None:
return np.sqrt(1j * util.wavenumber(omega, c))
else:
if omega <= omalias:
return np.sqrt(1j * util.wavenumber(omega, c))
else:
return np.sqrt(1j * util.wavenumber(omalias, c))
def delay_3d_plane(omega, x0, n0, n=[0, 1, 0], c=None):
"""Plane wave by simple delay of secondary sources."""
x0 = util.asarray_of_rows(x0)
n = util.normalize_vector(n)
k = util.wavenumber(omega, c)
return np.exp(-1j * k * np.inner(n, x0))
def source_selection_plane(n0, n):
"""Secondary source selection for a plane wave.
Eq.(13) from :cite:`Spors2008`
"""
n0 = util.asarray_of_rows(n0)
n = util.normalize_vector(n)
return np.inner(n, n0) >= defs.selection_tolerance
def source_selection_point(n0, x0, xs):
"""Secondary source selection for a point source.
Eq.(15) from :cite:`Spors2008`
"""
n0 = util.asarray_of_rows(n0)
x0 = util.asarray_of_rows(x0)
xs = util.asarray_1d(xs)
ds = x0 - xs
return inner1d(ds, n0) >= defs.selection_tolerance
def source_selection_line(n0, x0, xs):
"""Secondary source selection for a line source.
compare Eq.(15) from :cite:`Spors2008`
"""
return source_selection_point(n0, x0, xs)
def source_selection_focused(ns, x0, xs):
"""Secondary source selection for a focused source.
Eq.(2.78) from :cite:`Wierstorf2014`
"""
x0 = util.asarray_of_rows(x0)
xs = util.asarray_1d(xs)
ns = util.normalize_vector(ns)
ds = xs - x0
return inner1d(ns, ds) >= defs.selection_tolerance
def source_selection_all(N):
"""Select all secondary sources."""
return np.ones(N, dtype=bool)
def nfchoa_2d_plane(omega, x0, r0, n=[0, 1, 0], max_order=None, c=None):
r"""Plane wave by two-dimensional NFC-HOA.
.. math::
D(\phi_0, \omega) =
-\frac{2\i}{\pi r_0}
\sum_{m=-M}^M
\frac{\i^{-m}}{\Hankel{2}{m}{\wc r_0}}
\e{\i m (\phi_0 - \phi_\text{pw})}
See http://sfstoolbox.org/#equation-D.nfchoa.pw.2D.
"""
x0 = util.asarray_of_rows(x0)
k = util.wavenumber(omega, c)
n = util.normalize_vector(n)
phi, _, r = util.cart2sph(*n)
phi0 = util.cart2sph(*x0.T)[0]
M = _max_order_circular_harmonics(len(x0), max_order)
d = 0
for m in range(-M, M + 1):
d += 1j**-m / hankel2(m, k * r0) * np.exp(1j * m * (phi0 - phi))
return -2j / (np.pi*r0) * d
def nfchoa_25d_point(omega, x0, r0, xs, max_order=None, c=None):
r"""Point source by 2.5-dimensional NFC-HOA.
.. math::
D(\phi_0, \omega) =
\frac{1}{2 \pi r_0}
\sum_{m=-M}^M
\frac{\hankel{2}{|m|}{\wc r}}{\hankel{2}{|m|}{\wc r_0}}
\e{\i m (\phi_0 - \phi)}
See http://sfstoolbox.org/#equation-D.nfchoa.ps.2.5D.
"""
x0 = util.asarray_of_rows(x0)
k = util.wavenumber(omega, c)
xs = util.asarray_1d(xs)
phi, _, r = util.cart2sph(*xs)
phi0 = util.cart2sph(*x0.T)[0]
M = _max_order_circular_harmonics(len(x0), max_order)
hr = util.spherical_hn2(range(0, M + 1), k * r)
hr0 = util.spherical_hn2(range(0, M + 1), k * r0)
d = 0
for m in range(-M, M + 1):
d += hr[abs(m)] / hr0[abs(m)] * np.exp(1j * m * (phi0 - phi))
return d / (2 * np.pi * r0)
def nfchoa_25d_plane(omega, x0, r0, n=[0, 1, 0], max_order=None, c=None):
r"""Plane wave by 2.5-dimensional NFC-HOA.
.. math::
D(\phi_0, \omega) =
\frac{2\i}{r_0}
\sum_{m=-M}^M
\frac{\i^{-|m|}}{\wc \hankel{2}{|m|}{\wc r_0}}
\e{\i m (\phi_0 - \phi_\text{pw})}
See http://sfstoolbox.org/#equation-D.nfchoa.pw.2.5D.
"""
x0 = util.asarray_of_rows(x0)
k = util.wavenumber(omega, c)
n = util.normalize_vector(n)
phi, _, r = util.cart2sph(*n)
phi0 = util.cart2sph(*x0.T)[0]
M = _max_order_circular_harmonics(len(x0), max_order)
d = 0
hn2 = util.spherical_hn2(range(0, M + 1), k * r0)
for m in range(-M, M + 1):
d += 1j**-abs(m) / (k * hn2[abs(m)]) * np.exp(1j * m * (phi0 - phi))
return -2 / r0 * d
def sdm_2d_line(omega, x0, n0, xs, c=None):
"""Line source by two-dimensional SDM.
The secondary sources have to be located on the x-axis (y0=0).
Derived from :cite:`Spors2009`, Eq.(9), Eq.(4)::
D(x0,k) =
"""
x0 = util.asarray_of_rows(x0)
n0 = util.asarray_of_rows(n0)
xs = util.asarray_1d(xs)
k = util.wavenumber(omega, c)
ds = x0 - xs
r = np.linalg.norm(ds, axis=1)
return - 1j/2 * k * xs[1] / r * hankel2(1, k * r)
def sdm_2d_plane(omega, x0, n0, n=[0, 1, 0], c=None):
"""Plane wave by two-dimensional SDM.
The secondary sources have to be located on the x-axis (y0=0).
Derived from :cite:`Ahrens2012`, Eq.(3.73), Eq.(C.5), Eq.(C.11)::
D(x0,k) = kpw,y * e^(-j*kpw,x*x)
"""
x0 = util.asarray_of_rows(x0)
n0 = util.asarray_of_rows(n0)
n = util.normalize_vector(n)
k = util.wavenumber(omega, c)
return k * n[1] * np.exp(-1j * k * n[0] * x0[:, 0])
def sdm_25d_plane(omega, x0, n0, n=[0, 1, 0], xref=[0, 0, 0], c=None):
"""Plane wave by 2.5-dimensional SDM.
The secondary sources have to be located on the x-axis (y0=0).
Eq.(3.79) from :cite:`Ahrens2012`::
D_2.5D(x0,w) =
"""
x0 = util.asarray_of_rows(x0)
n0 = util.asarray_of_rows(n0)
n = util.normalize_vector(n)
xref = util.asarray_1d(xref)
k = util.wavenumber(omega, c)
return 4j * np.exp(-1j*k*n[1]*xref[1]) / hankel2(0, k*n[1]*xref[1]) * \
np.exp(-1j*k*n[0]*x0[:, 0])
def sdm_25d_point(omega, x0, n0, xs, xref=[0, 0, 0], c=None):
"""Point source by 2.5-dimensional SDM.
The secondary sources have to be located on the x-axis (y0=0).
Driving funcnction from :cite:`Spors2010`, Eq.(24)::
D(x0,k) =
"""
x0 = util.asarray_of_rows(x0)
n0 = util.asarray_of_rows(n0)
xs = util.asarray_1d(xs)
xref = util.asarray_1d(xref)
k = util.wavenumber(omega, c)
ds = x0 - xs
r = np.linalg.norm(ds, axis=1)
return 1/2 * 1j * k * np.sqrt(xref[1] / (xref[1] - xs[1])) * \
xs[1] / r * hankel2(1, k * r)
def esa_edge_2d_plane(omega, x0, n=[0, 1, 0], alpha=3/2*np.pi, Nc=None, c=None):
"""Plane wave by two-dimensional ESA for an edge-shaped secondary source
distribution consisting of monopole line sources.
One leg of the secondary sources has to be located on the x-axis (y0=0),
the edge at the origin.
Derived from :cite:`Spors2016`
Parameters
----------
omega : float
Angular frequency.
x0 : int(N, 3) array_like
Sequence of secondary source positions.
n : (3,) array_like, optional
Normal vector of synthesized plane wave.
alpha : float, optional
Outer angle of edge.
Nc : int, optional
Number of elements for series expansion of driving function. Estimated
if not given.
c : float, optional
Speed of sound
Returns
-------
(N,) numpy.ndarray
Complex weights of secondary sources.
"""
x0 = np.asarray(x0)
n = util.normalize_vector(n)
k = util.wavenumber(omega, c)
phi_s = np.arctan2(n[1], n[0]) + np.pi
L = x0.shape[0]
r = np.linalg.norm(x0, axis=1)
phi = np.arctan2(x0[:, 1], x0[:, 0])
phi = np.where(phi < 0, phi+2*np.pi, phi)
if Nc is None:
Nc = np.ceil(2 * k * np.max(r) * alpha/np.pi)
epsilon = np.ones(Nc) # weights for series expansion
epsilon[0] = 2
d = np.zeros(L, dtype=complex)
for m in np.arange(Nc):
nu = m*np.pi/alpha
d = d + 1/epsilon[m] * np.exp(1j*nu*np.pi/2) * np.sin(nu*phi_s) \
* np.cos(nu*phi) * nu/r * jn(nu, k*r)
d[phi > 0] = -d[phi > 0]
return 4*np.pi/alpha * d
def esa_edge_dipole_2d_plane(omega, x0, n=[0, 1, 0], alpha=3/2*np.pi, Nc=None, c=None):
"""Plane wave by two-dimensional ESA for an edge-shaped secondary source
distribution consisting of dipole line sources.
One leg of the secondary sources has to be located on the x-axis (y0=0),
the edge at the origin.
Derived from :cite:`Spors2016`
Parameters
----------
omega : float
Angular frequency.
x0 : int(N, 3) array_like
Sequence of secondary source positions.
n : (3,) array_like, optional
Normal vector of synthesized plane wave.
alpha : float, optional
Outer angle of edge.
Nc : int, optional
Number of elements for series expansion of driving function. Estimated
if not given.
c : float, optional
Speed of sound
Returns
-------
(N,) numpy.ndarray
Complex weights of secondary sources.
"""
x0 = np.asarray(x0)
n = util.normalize_vector(n)
k = util.wavenumber(omega, c)
phi_s = np.arctan2(n[1], n[0]) + np.pi
L = x0.shape[0]
r = np.linalg.norm(x0, axis=1)
phi = np.arctan2(x0[:, 1], x0[:, 0])
phi = np.where(phi < 0, phi+2*np.pi, phi)
if Nc is None:
Nc = np.ceil(2 * k * np.max(r) * alpha/np.pi)
epsilon = np.ones(Nc) # weights for series expansion
epsilon[0] = 2
d = np.zeros(L, dtype=complex)
for m in np.arange(Nc):
nu = m*np.pi/alpha
d = d + 1/epsilon[m] * np.exp(1j*nu*np.pi/2) * np.cos(nu*phi_s) \
* np.cos(nu*phi) * jn(nu, k*r)
return 4*np.pi/alpha * d
def esa_edge_2d_line(omega, x0, xs, alpha=3/2*np.pi, Nc=None, c=None):
"""Line source by two-dimensional ESA for an edge-shaped secondary source
distribution constisting of monopole line sources.
One leg of the secondary sources have to be located on the x-axis (y0=0),
the edge at the origin.
Derived from :cite:`Spors2016`
Parameters
----------
omega : float
Angular frequency.
x0 : int(N, 3) array_like
Sequence of secondary source positions.
xs : (3,) array_like
Position of synthesized line source.
alpha : float, optional
Outer angle of edge.
Nc : int, optional
Number of elements for series expansion of driving function. Estimated
if not given.
c : float, optional
Speed of sound
Returns
-------
(N,) numpy.ndarray
Complex weights of secondary sources.
"""
x0 = np.asarray(x0)
k = util.wavenumber(omega, c)
phi_s = np.arctan2(xs[1], xs[0])
if phi_s < 0:
phi_s = phi_s + 2*np.pi
r_s = np.linalg.norm(xs)
L = x0.shape[0]
r = np.linalg.norm(x0, axis=1)
phi = np.arctan2(x0[:, 1], x0[:, 0])
phi = np.where(phi < 0, phi+2*np.pi, phi)
if Nc is None:
Nc = np.ceil(2 * k * np.max(r) * alpha/np.pi)
epsilon = np.ones(Nc) # weights for series expansion
epsilon[0] = 2
d = np.zeros(L, dtype=complex)
idx = (r <= r_s)
for m in np.arange(Nc):
nu = m*np.pi/alpha
f = 1/epsilon[m] * np.sin(nu*phi_s) * np.cos(nu*phi) * nu/r
d[idx] = d[idx] + f[idx] * jn(nu, k*r[idx]) * hankel2(nu, k*r_s)
d[~idx] = d[~idx] + f[~idx] * jn(nu, k*r_s) * hankel2(nu, k*r[~idx])
d[phi > 0] = -d[phi > 0]
return -1j*np.pi/alpha * d
def esa_edge_25d_point(omega, x0, xs, xref=[2, -2, 0], alpha=3/2*np.pi, Nc=None, c=None):
"""Point source by 2.5-dimensional ESA for an edge-shaped secondary source
distribution constisting of monopole line sources.
One leg of the secondary sources have to be located on the x-axis (y0=0),
the edge at the origin.
Derived from :cite:`Spors2016`
Parameters
----------
omega : float
Angular frequency.
x0 : int(N, 3) array_like
Sequence of secondary source positions.
xs : (3,) array_like
Position of synthesized line source.
xref: (3,) array_like or float
Reference position or reference distance
alpha : float, optional
Outer angle of edge.
Nc : int, optional
Number of elements for series expansion of driving function. Estimated
if not given.
c : float, optional
Speed of sound
Returns
-------
(N,) numpy.ndarray
Complex weights of secondary sources.
"""
x0 = np.asarray(x0)
xs = np.asarray(xs)
xref = np.asarray(xref)
if np.isscalar(xref):
a = np.linalg.norm(xref)/np.linalg.norm(xref-xs)
else:
a = np.linalg.norm(xref-x0, axis=1)/np.linalg.norm(xref-xs)
return 1j*np.sqrt(a) * esa_edge_2d_line(omega, x0, xs, alpha=alpha, Nc=Nc, c=c)
def esa_edge_dipole_2d_line(omega, x0, xs, alpha=3/2*np.pi, Nc=None, c=None):
"""Line source by two-dimensional ESA for an edge-shaped secondary source
distribution constisting of dipole line sources.
One leg of the secondary sources have to be located on the x-axis (y0=0),
the edge at the origin.
Derived from :cite:`Spors2016`
Parameters
----------
omega : float
Angular frequency.
x0 : (N, 3) array_like
Sequence of secondary source positions.
xs : (3,) array_like
Position of synthesized line source.
alpha : float, optional
Outer angle of edge.
Nc : int, optional
Number of elements for series expansion of driving function. Estimated
if not given.
c : float, optional
Speed of sound
Returns
-------
(N,) numpy.ndarray
Complex weights of secondary sources.
"""
x0 = np.asarray(x0)
k = util.wavenumber(omega, c)
phi_s = np.arctan2(xs[1], xs[0])
if phi_s < 0:
phi_s = phi_s + 2*np.pi
r_s = np.linalg.norm(xs)
L = x0.shape[0]
r = np.linalg.norm(x0, axis=1)
phi = np.arctan2(x0[:, 1], x0[:, 0])
phi = np.where(phi < 0, phi+2*np.pi, phi)
if Nc is None:
Nc = np.ceil(2 * k * np.max(r) * alpha/np.pi)
epsilon = np.ones(Nc) # weights for series expansion
epsilon[0] = 2
d = np.zeros(L, dtype=complex)
idx = (r <= r_s)
for m in np.arange(Nc):
nu = m*np.pi/alpha
f = 1/epsilon[m] * np.cos(nu*phi_s) * np.cos(nu*phi)
d[idx] = d[idx] + f[idx] * jn(nu, k*r[idx]) * hankel2(nu, k*r_s)
d[~idx] = d[~idx] + f[~idx] * jn(nu, k*r_s) * hankel2(nu, k*r[~idx])
return -1j*np.pi/alpha * d
def _max_order_circular_harmonics(N, max_order):
"""Compute order of 2D HOA."""
return N // 2 if max_order is None else max_order
|
using Survival
using Test
using CSV
using DataFrames
using CategoricalArrays
using Distributions
using LinearAlgebra
using StatsBase
using StatsModels
@testset "Event times" begin
@test isevent(EventTime{Int}(44, true))
@test !isevent(EventTime(3.2, false))
@test isevent(EventTime(2.5f0))
@test !iscensored(EventTime(3))
@test iscensored(EventTime(2.1, false))
@test eltype(EventTime(1)) == Int
@test sprint(show, EventTime(1, true)) == "1"
@test sprint(show, EventTime(1, false)) == "1+"
@test convert(Int, EventTime(1)) == 1
@test convert(Int, EventTime(1, false)) == 1
@test convert(Float64, EventTime(1)) == 1.0
@test convert(EventTime, 1) == EventTime(1)
@test isless(EventTime(1), EventTime(1, false))
@test !isless(EventTime(2), EventTime(1, false))
let x = [EventTime(2, false), EventTime(1), EventTime(2)]
@test sort(x) == [EventTime(1), EventTime(2), EventTime(2, false)]
end
end
@testset "Kaplan-Meier" begin
t = [
310, 361, 654, 728, 61, 81, 520, 473, 107, 122, 965, 731, 153, 433, 145, 95, 765,
735, 5, 687, 345, 444, 60, 208, 821, 305, 226, 426, 705, 363, 167, 641, 740, 245,
588, 166, 559, 450, 529, 351, 201, 524, 199, 550, 551, 543, 293, 511, 511, 371, 201,
62, 356, 340, 315, 182, 364, 376, 384, 268, 266, 194, 348, 382, 296, 186, 145, 269,
350, 272, 292, 332, 285, 243, 276, 79, 240, 202, 235, 224, 239, 173, 252, 92, 192,
211, 175, 203, 105, 177,
]
s = [
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1,
1, 1, 1, 1, 0, 1, 0, 1, 0, 1, 0, 1, 1, 1, 1, 1, 0, 0, 1, 0, 0, 1, 1, 1, 0, 1, 0, 1,
0, 0, 0, 1, 0, 1, 1, 0, 0, 1, 1, 0, 1, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0,
0, 0, 0, 0, 0, 0,
]
# Results generated from R 3.3.2 using survival 2.41
# survfit(Surv(t, s) ~ 1, conf.type = "log-log")
# $surv
r_surv = [
0.9888889, 0.9777778, 0.9666667, 0.9555556, 0.9444444, 0.9333333, 0.9333333, 0.9220884,
0.9220884, 0.9107045, 0.8993207, 0.8765531, 0.8651693, 0.8537855, 0.8424017, 0.8424017,
0.8424017, 0.8424017, 0.8305369, 0.8186721, 0.8186721, 0.8066328, 0.7945935, 0.7705149,
0.7705149, 0.7705149, 0.7580872, 0.7580872, 0.7580872, 0.7452383, 0.7452383, 0.7321639,
0.7321639, 0.7321639, 0.7186054, 0.7186054, 0.7186054, 0.7045151, 0.7045151, 0.7045151,
0.7045151, 0.6895254, 0.6895254, 0.6742026, 0.6742026, 0.6585235, 0.6428443, 0.6428443,
0.6428443, 0.6263611, 0.609878, 0.5933948, 0.5769116, 0.5604284, 0.5604284, 0.5434457,
0.526463, 0.526463, 0.5089143, 0.5089143, 0.5089143, 0.5089143, 0.4893406, 0.469767,
0.4501934, 0.4306198, 0.4110461, 0.4110461, 0.3894121, 0.3677781, 0.3677781, 0.3677781,
0.3432596, 0.3432596, 0.3432596, 0.3432596, 0.3120542, 0.2808487, 0.2496433, 0.2184379,
0.1872325, 0.1560271, 0.1248217, 0.1248217, 0.08321444, 0.08321444, 0.08321444,
]
# $n.risk
r_risk = [
90, 89, 88, 87, 86, 85, 84, 83, 82, 81, 80, 79, 77, 76, 75, 74, 73, 72, 71, 70, 69, 68,
67, 66, 64, 63, 62, 61, 60, 59, 58, 57, 56, 55, 54, 53, 52, 51, 50, 49, 48, 47, 46, 45,
44, 43, 42, 41, 40, 39, 38, 37, 36, 35, 34, 33, 32, 31, 30, 29, 28, 27, 26, 25, 24, 23,
22, 21, 19, 18, 17, 16, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1
]
# $std.err
r_stderr = [
0.01117336, 0.01589104, 0.01957401, 0.02273314, 0.02556550, 0.02817181, 0.02817181,
0.03066888, 0.03066888, 0.03308929, 0.03539956, 0.03977328, 0.04186640, 0.04391166,
0.04591747, 0.04591747, 0.04591747, 0.04591747, 0.04805852, 0.05016633, 0.05016633,
0.05230824, 0.05442696, 0.05861552, 0.05861552, 0.05861552, 0.06082918, 0.06082918,
0.06082918, 0.06318557, 0.06318557, 0.06561783, 0.06561783, 0.06561783, 0.06822833,
0.06822833, 0.06822833, 0.07104408, 0.07104408, 0.07104408, 0.07104408, 0.07422800,
0.07422800, 0.07755545, 0.07755545, 0.08104663, 0.08455340, 0.08455340, 0.08455340,
0.08845361, 0.09238657, 0.09636405, 0.10039761, 0.10449888, 0.10449888, 0.10893570,
0.11346828, 0.11346828, 0.11842498, 0.11842498, 0.11842498, 0.11842498, 0.12475150,
0.13126159, 0.13798985, 0.14497408, 0.15225631, 0.15225631, 0.16157339, 0.17138826,
0.17138826, 0.17138826, 0.18475887, 0.18475887, 0.18475887, 0.18475887, 0.20791044,
0.23310483, 0.26120251, 0.29340057, 0.33150176, 0.37845310, 0.43957565, 0.43957565,
0.59991117, 0.59991117, 0.59991117,
]
# $lower
r_lower = [
0.92374348, 0.91406011, 0.90021666, 0.88590944, 0.87167275, 0.85762000, 0.85762000,
0.84350521, 0.84350521, 0.82935113, 0.81542216, 0.78813835, 0.77474465, 0.76149619,
0.74838092, 0.74838092, 0.74838092, 0.74838092, 0.73463920, 0.72104592, 0.72104592,
0.70732360, 0.69373775, 0.66693587, 0.66693587, 0.66693587, 0.65314492, 0.65314492,
0.65314492, 0.63887126, 0.63887126, 0.62441211, 0.62441211, 0.62441211, 0.60940174,
0.60940174, 0.60940174, 0.59378857, 0.59378857, 0.59378857, 0.59378857, 0.57705901,
0.57705901, 0.56006894, 0.56006894, 0.54279401, 0.52574922, 0.52574922, 0.52574922,
0.50779307, 0.49008837, 0.47261930, 0.45537265, 0.43833741, 0.43833741, 0.42084761,
0.40357982, 0.40357982, 0.38579388, 0.38579388, 0.38579388, 0.38579388, 0.36559209,
0.34575866, 0.32627779, 0.30713724, 0.28832788, 0.28832788, 0.26728290, 0.24672627,
0.24672627, 0.24672627, 0.22307271, 0.22307271, 0.22307271, 0.22307271, 0.19157693,
0.16205660, 0.13440887, 0.10859921, 0.08465965, 0.06269905, 0.04293072, 0.04293072,
0.01850525, 0.01850525, 0.01850525,
]
# $upper
r_upper = [
0.9984273, 0.9943955, 0.9891262, 0.9830833, 0.9764926, 0.9694844, 0.9694844,
0.9620777, 0.9620777, 0.9543175, 0.9463099, 0.9296781, 0.9211003, 0.9123712,
0.9035043, 0.9035043, 0.9035043, 0.9035043, 0.8942180, 0.8848013, 0.8848013,
0.8751547, 0.8653903, 0.8455370, 0.8455370, 0.8455370, 0.8352056, 0.8352056,
0.8352056, 0.8244966, 0.8244966, 0.8135325, 0.8135325, 0.8135325, 0.8021438,
0.8021438, 0.8021438, 0.7902942, 0.7902942, 0.7902942, 0.7902942, 0.7777438,
0.7777438, 0.7648356, 0.7648356, 0.7515503, 0.7381102, 0.7381102, 0.7381102,
0.7240036, 0.7097282, 0.6952904, 0.6806956, 0.6659482, 0.6659482, 0.6507130,
0.6353162, 0.6353162, 0.6193746, 0.6193746, 0.6193746, 0.6193746, 0.6019235,
0.5842205, 0.5662707, 0.5480775, 0.5296427, 0.5296427, 0.5096032, 0.4892180,
0.4892180, 0.4892180, 0.4666889, 0.4666889, 0.4666889, 0.4666889, 0.4401063,
0.4122125, 0.3830497, 0.3526092, 0.3208327, 0.2876052, 0.2527363, 0.2527363,
0.2123639, 0.2123639, 0.2123639,
]
km = fit(KaplanMeier, t, s)
jl_surv = km.survival
@test length(jl_surv) == length(r_surv)
@test jl_surv ≈ r_surv atol=1e-6
@test km.times == sort!(unique(t))
@test km.natrisk == r_risk
@test km.nevents == [sum(s[t .== tᵢ]) for tᵢ in sort!(unique(t))]
@test km.ncensor == [sum(iszero, s[t .== tᵢ]) for tᵢ in sort!(unique(t))]
@test km.stderr ≈ r_stderr atol=1e-6
conf = confint(km)
jl_lower = first.(conf)
jl_upper = last.(conf)
@test jl_lower ≈ r_lower atol=1e-6
@test jl_upper ≈ r_upper atol=1e-6
@test_throws DimensionMismatch fit(KaplanMeier, [1, 2], [true])
@test_throws ArgumentError fit(KaplanMeier, Float64[], Bool[])
km_et = fit(KaplanMeier, EventTime.(t, Bool.(s)))
@test all(f->getfield(km, f) ≈ getfield(km_et, f), fieldnames(KaplanMeier))
@test_throws ArgumentError fit(KaplanMeier, EventTime{Int}[])
end
@testset "Nelson-Aalen" begin
t = [
310, 361, 654, 728, 61, 81, 520, 473, 107, 122, 965, 731, 153, 433, 145, 95, 765,
735, 5, 687, 345, 444, 60, 208, 821, 305, 226, 426, 705, 363, 167, 641, 740, 245,
588, 166, 559, 450, 529, 351, 201, 524, 199, 550, 551, 543, 293, 511, 511, 371, 201,
62, 356, 340, 315, 182, 364, 376, 384, 268, 266, 194, 348, 382, 296, 186, 145, 269,
350, 272, 292, 332, 285, 243, 276, 79, 240, 202, 235, 224, 239, 173, 252, 92, 192,
211, 175, 203, 105, 177,
]
s = [
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1,
1, 1, 1, 1, 0, 1, 0, 1, 0, 1, 0, 1, 1, 1, 1, 1, 0, 0, 1, 0, 0, 1, 1, 1, 0, 1, 0, 1,
0, 0, 0, 1, 0, 1, 1, 0, 0, 1, 1, 0, 1, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0,
0, 0, 0, 0, 0, 0,
]
na = fit(NelsonAalen, t, s)
km = fit(KaplanMeier, t, s)
@test na.times == km.times
@test na.nevents == km.nevents
@test na.ncensor == km.ncensor
@test na.natrisk == km.natrisk
@test exp.(-na.chaz[1:50]) ≈ km.survival[1:50] rtol=1e-2
@test na.stderr[1:50] ≈ km.stderr[1:50] rtol=2e-2
na_conf = confint(na)
na_lower, na_upper = getindex.(na_conf, 1), getindex.(na_conf, 2)
@test cdf.(Normal.(na.chaz, na.stderr), na_lower) ≈ fill(0.025, length(na.chaz)) rtol=1e-8
@test cdf.(Normal.(na.chaz, na.stderr), na_upper) ≈ fill(0.975, length(na.chaz)) rtol=1e-8
na_conf = confint(na, 0.01)
na_lower, na_upper = getindex.(na_conf, 1), getindex.(na_conf, 2)
@test cdf.(Normal.(na.chaz, na.stderr), na_lower) ≈ fill(0.005, length(na.chaz)) rtol=1e-8
@test cdf.(Normal.(na.chaz, na.stderr), na_upper) ≈ fill(0.995, length(na.chaz)) rtol=1e-8
end
@testset "Cox" begin
rossi = CSV.read(joinpath(@__DIR__, "data", "rossi.csv"), DataFrame)
rossi.event = EventTime.(rossi.week, rossi.arrest .== 1)
outcome = coxph(@formula(event ~ fin + age + race + wexp + mar + paro + prio), rossi; tol=1e-8)
outcome_coefmat = coeftable(outcome)
regressor_matrix = Matrix(rossi[!, [:fin, :age, :race, :wexp, :mar, :paro, :prio]])
event_vector = rossi.event
outcome_without_formula = coxph(regressor_matrix, event_vector)
@test sprint(show, outcome_without_formula) == chomp("""
CoxModel{Float64}
Coefficients:
──────────────────────────────────────────────
Estimate Std.Error z value Pr(>|z|)
──────────────────────────────────────────────
x1 -0.379416 0.191379 -1.98253 0.0474
x2 -0.0574299 0.0219988 -2.61059 0.0090
x3 0.31392 0.307995 1.01924 0.3081
x4 -0.14981 0.212226 -0.705898 0.4803
x5 -0.433724 0.38187 -1.13579 0.2560
x6 -0.0848615 0.195756 -0.433505 0.6646
x7 0.091521 0.0286469 3.1948 0.0014
──────────────────────────────────────────────
""")
coef_matrix = ModelMatrix(ModelFrame(@formula(event ~ 0 + fin + age + race + wexp + mar + paro + prio), rossi)).m
outcome_from_matrix = coxph(coef_matrix, rossi.event; tol=1e-8, l2_cost=0)
outcome_from_matrix32 = coxph(Float32.(coef_matrix), rossi.event; tol=1e-5)
outcome_from_matrix_int = coxph(Int64.(coef_matrix), rossi.event; tol=1e-6, l2_cost=0.0)
expected_coefs = [
-0.379422 0.191379 -1.98256 0.0474;
-0.0574377 0.0219995 -2.61087 0.0090;
0.3139 0.307993 1.01918 0.3081;
-0.149796 0.212224 -0.705837 0.4803;
-0.433704 0.381868 -1.13574 0.2561;
-0.0848711 0.195757 -0.433554 0.6646;
0.0914971 0.0286485 3.19378 0.0014
]
@test coef(outcome_from_matrix) ≈ coef(outcome) atol=1e-5
@test coef(outcome_from_matrix) ≈ coef(outcome_from_matrix32) atol=1e-4
@test coef(outcome_from_matrix) ≈ coef(outcome_from_matrix_int) atol=1e-5
@test nobs(outcome) == size(rossi, 1)
@test dof(outcome) == 7
@test loglikelihood(outcome) > nullloglikelihood(outcome)
@test all(x->x > 0, eigen(outcome.model.fischer_info).values)
@test outcome.model.fischer_info * vcov(outcome) ≈ I atol=1e-10
@test norm(outcome.model.score) < 1e-5
@test hcat(outcome_coefmat.cols[1:3]...) ≈ expected_coefs[:,1:3] atol=1e-5
outcome_fin = coxph(@formula(event ~ fin), rossi; tol=1e-8)
@test coeftable(outcome_fin).rownms == ["fin"]
outcome_finrace = coxph(@formula(event ~ fin * race), rossi; tol=1e-8)
@test coeftable(outcome_finrace).rownms == ["fin", "race","fin & race"]
transform!(rossi, :fin => categorical, renamecols = false)
outcome_fincat = coxph(@formula(event ~ fin), rossi; tol=1e-8)
@test coeftable(outcome_fincat).rownms == ["fin: 1"]
@test coef(outcome_fin) ≈ coef(outcome_fincat) atol=1e-8
outcome_fincatrace = coxph(@formula(event ~ fin * race), rossi; tol=1e-8)
@test coeftable(outcome_fincatrace).rownms == ["fin: 1", "race","fin: 1 & race"]
@test coef(outcome_fincatrace) ≈ coef(outcome_finrace) atol=1e-8
transform!(rossi, :race => categorical, renamecols = false)
outcome_fincatracecat = coxph(@formula(event ~ fin * race), rossi; tol=1e-8)
@test coeftable(outcome_fincatracecat).rownms == ["fin: 1", "race: 1","fin: 1 & race: 1"]
@test coef(outcome_fincatracecat) ≈ coef(outcome_finrace) atol=1e-8
end
@testset "Newton-Raphson" begin
function fgh!(x, grad, hes, compute_ders)
if compute_ders
grad[1] = 2exp(x[1])*(exp(x[1]) - 1)
hes[1,1] = 2exp(x[1])*(2exp(x[1]) - 1)
end
(exp(x[1]) - 1)^2
end
x, y, grad, hes = Survival.newton_raphson(fgh!, [2.2], tol=1e-5)
@test x ≈ [0] atol=1e-5
@test y ≈ 0 atol=1e-5
@test grad ≈ [0] atol=1e-5
@test hes ≈ [2] atol=1e-5
@test_throws ConvergenceException Survival.newton_raphson(fgh!, [2.2], max_iter=2)
function wrong_fgh!(x, grad, hes, compute_ders)
if compute_ders
grad[1] = -2exp(x[1])*(exp(x[1]) - 1) # wrong sign
hes[1,1] = 2exp(x[1])*(2exp(x[1]) - 1)
end
(exp(x[1]) - 1)^2
end
@test_throws ErrorException Survival.newton_raphson(wrong_fgh!, [2.2])
end
|
The product of a scalar and a polynomial is equal to the scalar times the product of the polynomial and the scalar. |
State Before: l : Type u_2
m : Type u_1
n : Type ?u.1089606
o : Type ?u.1089609
m' : o → Type ?u.1089614
n' : o → Type ?u.1089619
R : Type ?u.1089622
S : Type ?u.1089625
α : Type v
β : Type w
γ : Type ?u.1089632
inst✝² : Zero α
inst✝¹ : DecidableEq m
inst✝ : DecidableEq l
d : m → α
e : l → m
he : Function.Injective e
i j : l
⊢ submatrix (diagonal d) e e i j = diagonal (d ∘ e) i j State After: l : Type u_2
m : Type u_1
n : Type ?u.1089606
o : Type ?u.1089609
m' : o → Type ?u.1089614
n' : o → Type ?u.1089619
R : Type ?u.1089622
S : Type ?u.1089625
α : Type v
β : Type w
γ : Type ?u.1089632
inst✝² : Zero α
inst✝¹ : DecidableEq m
inst✝ : DecidableEq l
d : m → α
e : l → m
he : Function.Injective e
i j : l
⊢ diagonal d (e i) (e j) = diagonal (d ∘ e) i j Tactic: rw [submatrix_apply] State Before: l : Type u_2
m : Type u_1
n : Type ?u.1089606
o : Type ?u.1089609
m' : o → Type ?u.1089614
n' : o → Type ?u.1089619
R : Type ?u.1089622
S : Type ?u.1089625
α : Type v
β : Type w
γ : Type ?u.1089632
inst✝² : Zero α
inst✝¹ : DecidableEq m
inst✝ : DecidableEq l
d : m → α
e : l → m
he : Function.Injective e
i j : l
⊢ diagonal d (e i) (e j) = diagonal (d ∘ e) i j State After: case pos
l : Type u_2
m : Type u_1
n : Type ?u.1089606
o : Type ?u.1089609
m' : o → Type ?u.1089614
n' : o → Type ?u.1089619
R : Type ?u.1089622
S : Type ?u.1089625
α : Type v
β : Type w
γ : Type ?u.1089632
inst✝² : Zero α
inst✝¹ : DecidableEq m
inst✝ : DecidableEq l
d : m → α
e : l → m
he : Function.Injective e
i j : l
h : i = j
⊢ diagonal d (e i) (e j) = diagonal (d ∘ e) i j
case neg
l : Type u_2
m : Type u_1
n : Type ?u.1089606
o : Type ?u.1089609
m' : o → Type ?u.1089614
n' : o → Type ?u.1089619
R : Type ?u.1089622
S : Type ?u.1089625
α : Type v
β : Type w
γ : Type ?u.1089632
inst✝² : Zero α
inst✝¹ : DecidableEq m
inst✝ : DecidableEq l
d : m → α
e : l → m
he : Function.Injective e
i j : l
h : ¬i = j
⊢ diagonal d (e i) (e j) = diagonal (d ∘ e) i j Tactic: by_cases h : i = j State Before: case pos
l : Type u_2
m : Type u_1
n : Type ?u.1089606
o : Type ?u.1089609
m' : o → Type ?u.1089614
n' : o → Type ?u.1089619
R : Type ?u.1089622
S : Type ?u.1089625
α : Type v
β : Type w
γ : Type ?u.1089632
inst✝² : Zero α
inst✝¹ : DecidableEq m
inst✝ : DecidableEq l
d : m → α
e : l → m
he : Function.Injective e
i j : l
h : i = j
⊢ diagonal d (e i) (e j) = diagonal (d ∘ e) i j State After: case pos
l : Type u_2
m : Type u_1
n : Type ?u.1089606
o : Type ?u.1089609
m' : o → Type ?u.1089614
n' : o → Type ?u.1089619
R : Type ?u.1089622
S : Type ?u.1089625
α : Type v
β : Type w
γ : Type ?u.1089632
inst✝² : Zero α
inst✝¹ : DecidableEq m
inst✝ : DecidableEq l
d : m → α
e : l → m
he : Function.Injective e
i j : l
h : i = j
⊢ d (e j) = (d ∘ e) j Tactic: rw [h, diagonal_apply_eq, diagonal_apply_eq] State Before: case pos
l : Type u_2
m : Type u_1
n : Type ?u.1089606
o : Type ?u.1089609
m' : o → Type ?u.1089614
n' : o → Type ?u.1089619
R : Type ?u.1089622
S : Type ?u.1089625
α : Type v
β : Type w
γ : Type ?u.1089632
inst✝² : Zero α
inst✝¹ : DecidableEq m
inst✝ : DecidableEq l
d : m → α
e : l → m
he : Function.Injective e
i j : l
h : i = j
⊢ d (e j) = (d ∘ e) j State After: no goals Tactic: simp only [Function.comp_apply] State Before: case neg
l : Type u_2
m : Type u_1
n : Type ?u.1089606
o : Type ?u.1089609
m' : o → Type ?u.1089614
n' : o → Type ?u.1089619
R : Type ?u.1089622
S : Type ?u.1089625
α : Type v
β : Type w
γ : Type ?u.1089632
inst✝² : Zero α
inst✝¹ : DecidableEq m
inst✝ : DecidableEq l
d : m → α
e : l → m
he : Function.Injective e
i j : l
h : ¬i = j
⊢ diagonal d (e i) (e j) = diagonal (d ∘ e) i j State After: no goals Tactic: rw [diagonal_apply_ne _ h, diagonal_apply_ne _ (he.ne h)] |
Load LFindLoad.
From lfind Require Import LFind.
From QuickChick Require Import QuickChick.
From adtind Require Import goal33.
Derive Show for natural.
Derive Arbitrary for natural.
Instance Dec_Eq_natural : Dec_Eq natural.
Proof. dec_eq. Qed.
Lemma conj18eqsynthconj3 : forall (lv0 : natural) (lv1 : natural), (@eq natural (plus lv0 lv1) (plus lv1 (plus lv0 Zero))).
Admitted.
QuickChick conj18eqsynthconj3.
|
theory Datatype_thru_Codatatype
imports Main
begin
section {* Inductive Datatypes with Nesting through Codatatypes as in Isabelle/HOL *}
text {*
The abstract interface between a datatype and a codatatype through which it is
nested is a set function that returns the elements. This is modeled below by a
@{text subtrees} predicate.
*}
subsection {* Basic Setup *}
hide_const (open) Nil Cons hd tl
text \<open>
This will be called @{text "iota!"} in Nunchaku:
\<close>
definition The_bang :: "('a \<Rightarrow> bool) \<Rightarrow> 'a" where
"The_bang P = (if \<exists>x. P x then The P else Nitpick.unknown)"
definition pred_of_fun :: "('a \<Rightarrow> bool) \<Rightarrow> ('a \<Rightarrow> 'b) \<Rightarrow> 'a \<Rightarrow> 'b \<Rightarrow> bool" where
"pred_of_fun p f x y \<longleftrightarrow> p x \<and> f x = y"
typedecl elem
typedecl tree
typedecl forest
nitpick_params [user_axioms, dont_box, show_all, atoms elem = a b c d e f g h i j k l,
atoms forest = as bs cs ds es fs gs hs "is" js ks ls,
atoms tree = aa bb cc dd ee ff gg hh ii jj kk ll]
subsection {* Codatatype Destructors *}
axiomatization
null :: "forest \<Rightarrow> bool" and
hd :: "forest \<Rightarrow> tree" and
tl :: "forest \<Rightarrow> forest"
coinductive bisim :: "forest \<Rightarrow> forest \<Rightarrow> bool" where
"null xs \<Longrightarrow> null ys \<Longrightarrow> bisim xs ys"
| "\<not> null xs \<Longrightarrow> \<not> null ys \<Longrightarrow> hd xs = hd ys \<Longrightarrow> bisim (tl xs) (tl ys) \<Longrightarrow> bisim xs ys"
text \<open>
@{prop "hd xs = hd ys"} is appropriate, as opposed to lifting @{const bisim}
with the tree relator.
\<close>
axiomatization where
bisim: "\<And>xs ys. bisim xs ys \<Longrightarrow> xs = ys"
definition
subtrees :: "forest \<Rightarrow> tree \<Rightarrow> bool"
where
"subtrees = rtranclp (pred_of_fun (Not \<circ> null) tl) OO pred_of_fun (Not \<circ> null) hd"
subsection {* Datatype Destructors *}
axiomatization
lab :: "tree \<Rightarrow> elem" and
sub :: "tree \<Rightarrow> forest"
where
unique: "\<And>xs ys. lab xs = lab ys \<Longrightarrow> sub xs = sub ys \<Longrightarrow> xs = ys" and
acyclic: "\<And>xs. \<not> tranclp (pred_of_fun (\<lambda>_. True) sub OO subtrees) xs xs"
nitpick_params [card elem = 3, card forest = 1-5, card tree = 1-5, iter = 1-5, mono]
subsection {* Codatatype Constructors *}
definition
Nil :: forest
where
"Nil = The_bang (\<lambda>ys. null ys)"
definition
Cons :: "tree \<Rightarrow> forest \<Rightarrow> forest"
where
"Cons x xs = The_bang (\<lambda>ys. \<not> null ys \<and> hd ys = x \<and> tl ys = xs)"
lemma "Nil \<noteq> Cons x xs"
nitpick[satisfy, expect = genuine]
nitpick[expect = none]
sorry
lemma "xs = Nil \<or> xs = Cons (hd xs) (tl xs)"
nitpick[satisfy, expect = genuine]
nitpick [expect = none]
sorry
lemma "xs \<noteq> Cons x xs"
nitpick [expect = genuine]
oops
lemma "xs \<noteq> Cons x (Cons y xs)"
nitpick [expect = genuine]
oops
lemma "x \<noteq> y \<Longrightarrow> xs \<noteq> Cons x (Cons y xs)"
nitpick [expect = genuine]
oops
lemma "xs = ys \<longleftrightarrow> (null xs \<and> null ys) \<or> (\<not> null xs \<and> \<not> null ys \<and> hd xs = hd ys \<and> tl xs = tl ys)"
nitpick [expect = none]
sorry
lemma "xs = Cons x ys \<Longrightarrow> ys = Cons x xs \<Longrightarrow> xs = ys"
nitpick [expect = none]
nitpick [satisfy, expect = genuine]
sorry
lemma "xs \<noteq> Nil \<Longrightarrow> xs \<noteq> sub (hd xs)"
nitpick [expect = none]
sorry
subsection {* Datatype Constructors *}
definition
Node :: "elem \<Rightarrow> forest \<Rightarrow> tree"
where
"Node x xs = The_bang (\<lambda>yy. lab yy = x \<and> sub yy = xs)"
lemma "xx \<noteq> Node x (Cons xx xs)"
nitpick [expect = none]
sorry
lemma "xx \<noteq> Node x (Cons (Node y (Cons xx Nil)) Nil)"
nitpick [expect = none]
nitpick [satisfy, expect = genuine]
sorry
lemma "xx = yy \<longleftrightarrow> lab xx = lab yy \<and> sub xx = sub yy"
nitpick [expect = none]
sorry
lemma "sub xx \<noteq> Nil \<Longrightarrow> tl (sub xx) \<noteq> sub xx"
nitpick [expect = genuine]
oops
lemma "xs = Cons (Node x xs) Nil"
nitpick [satisfy, expect = none]
oops
end
|
Formal statement is: lemma residue_cong: assumes eq: "eventually (\<lambda>z. f z = g z) (at z)" and "z = z'" shows "residue f z = residue g z'" Informal statement is: If two functions $f$ and $g$ are equal in a neighborhood of a point $z$, then their residues at $z$ are equal. |
import os
import sys
import torch
import json
import toml
from datetime import datetime
from tqdm import tqdm
from glob import glob
import soundfile as sf
import numpy as np
from tensorboardX import SummaryWriter
sys.path.append(os.path.abspath(os.path.join(__file__, '..', '..')))
from hearinglossmodel import MSBGHearingModel, torchloudnorm
from MSBG.ear import Ear
from MSBG.audiogram import Audiogram
from pystoi import stoi
class Trainer:
def __init__(self, config, model_denoising, model_amp, optimizer, loss_func, train_dataloader, validation_dataloader, device):
self.model_denoising = model_denoising
self.model_amp = model_amp
self.optimizer = optimizer
self.loss_func = loss_func
self.train_dataloader = train_dataloader
self.validation_dataloader = validation_dataloader
self.device = device
self.optimize_denoising_network = config['optimizer']['optimize_denoising_network']
# training config
self.trainer_config = config['trainer']
self.epochs = self.trainer_config['epochs']
self.save_checkpoint_interval = self.trainer_config['save_checkpoint_interval']
self.clip_grad_norm_value = self.trainer_config['clip_grad_norm_value']
self.exp_path = self.trainer_config['exp_path'] + '_' + str(datetime.now()).split('.')[0]
self.resume = self.trainer_config['resume']
self.log_path = os.path.join(self.exp_path, 'logs')
self.checkpoint_path = os.path.join(self.exp_path, 'checkpoints')
self.sample_path = os.path.join(self.exp_path, 'val_samples')
os.makedirs(self.log_path, exist_ok=True)
os.makedirs(self.checkpoint_path, exist_ok=True)
os.makedirs(self.sample_path, exist_ok=True)
# save the config
with open(os.path.join(self.exp_path, 'config.toml'), 'w') as f:
toml.dump(config, f)
f.close()
# training visualisation
self.writer = SummaryWriter(self.log_path)
self.global_step = 0
self.start_epoch = 1
self.best_score = 0
# TODO: resume training
if self.resume:
self._resume_checkpoint()
# hearing loss model
with open(config['listener']['listeners_path'], 'r') as f:
listeners_file = json.load(f)
audiogram_cfs = listeners_file[config['listener']['listener_idx']]['audiogram_cfs']
audiogram_lvl_l = listeners_file[config['listener']['listener_idx']]['audiogram_levels_l']
audiogram_lvl_r = listeners_file[config['listener']['listener_idx']]['audiogram_levels_r']
f.close()
if config['listener']['listener_ear'] == 'l':
self.ear_idx = 0
audiogram = audiogram_lvl_l
elif config['listener']['listener_ear'] == 'r':
self.ear_idx = 1
audiogram = audiogram_lvl_r
else:
raise ValueError("No THIRD EAR MY FRIEND")
self.sr = config['listener']['listener_sr']
self.hearinglossmodel = MSBGHearingModel(audiogram=audiogram, audiometric=audiogram_cfs, sr=self.sr, spl_cali=True)
self.normalhearingmodel = MSBGHearingModel(audiogram=np.zeros_like(audiogram), audiometric=audiogram_cfs, sr=self.sr, spl_cali=True)
# msbg model
src_pos = config['listener']['src_pos']
MSBGaudiogram = Audiogram(cfs=np.array(audiogram_cfs), levels=np.array(audiogram))
self.msbg_ear = Ear(src_pos, MSBGaudiogram)
normalMSBGaudiogram = Audiogram(cfs=np.array(audiogram_cfs), levels=np.zeros_like(audiogram))
self.normal_msbg_ear = Ear(src_pos, normalMSBGaudiogram)
# downsample for convtasnet training
self.downsample_factor = config['listener']['downsample_factor']
""" torchaudio resample"""
import torchaudio
self.downsample = torchaudio.transforms.Resample(orig_freq=self.sr, new_freq=self.sr // self.downsample_factor,
resampling_method='sinc_interpolation')
self.upsample = torchaudio.transforms.Resample(orig_freq=self.sr // self.downsample_factor, new_freq=self.sr,
resampling_method='sinc_interpolation')
# loudness norm
self.loudnorm = config['listener']['loudnorm']
self.ln = torchloudnorm()
def _set_train_mode(self, model):
model.train()
def _set_eval_mode(self, model):
model.eval()
def _save_checkpoint(self, model, model_name, epoch, score):
state_dict = {
'epoch': epoch,
'optimizer': self.optimizer.state_dict(),
'model': model.state_dict()
}
torch.save(state_dict, os.path.join(self.checkpoint_path, model_name + f'_{str(epoch).zfill(4)}.tar'))
if score >= self.best_score:
torch.save(state_dict, os.path.join(self.checkpoint_path, model_name + '_best.tar'))
self.best_score = score.copy()
def _resume_checkpoint(self):
latest_checkpoints = sorted(glob(os.path.join(self.checkpoint_path, 'model_*.tar')))[-1]
map_location = self.device
checkpoint = torch.load(latest_checkpoints, map_location=map_location)
self.start_epoch = checkpoint['epoch'] + 1
self.optimizer.load_state_dict(checkpoint['optimizer'])
self.model.load_state_dict(checkpoint['model'])
def _train_epoch(self):
for noisy, clean in tqdm(self.train_dataloader, desc='training'):
noisy = noisy.to(self.device)
clean = clean.to(self.device)
clean = clean[:, self.ear_idx, :]
downsampled_noisy = self.downsample(noisy)
enhanced = self.model_amp(self.model_denoising(downsampled_noisy)).squeeze(1)
upsampled_enhanced = self.upsample(enhanced)
downsampled_clean = self.downsample(clean)
upsampled_clean = self.upsample(downsampled_clean)
if self.loudnorm:
# normalize the loudness
upsampled_enhanced = self.ln(upsampled_enhanced)
upsampled_clean = self.ln(upsampled_clean)
# hard clipping enhanced signal
upsampled_enhanced = torch.clamp(upsampled_enhanced, -1, 1)
# normal hearing simualation
sim_clean = self.normalhearingmodel(upsampled_clean)
# hearing loss simulation
sim_enhanced = self.hearinglossmodel(upsampled_enhanced)
loss = self.loss_func(sim_enhanced, sim_clean)
self.optimizer.zero_grad()
loss.backward()
if self.optimize_denoising_network:
torch.nn.utils.clip_grad_norm_(list(self.model_amp.parameters()) +
list(self.model_denoising.parameters()), self.clip_grad_norm_value)
else:
torch.nn.utils.clip_grad_norm_(self.model_amp.parameters(), self.clip_grad_norm_value)
self.optimizer.step()
self.writer.add_scalars('loss', {'loss': loss}, self.global_step)
self.global_step += 1
@torch.no_grad()
def _validation_epoch(self, epoch):
total_loss = 0
stoi_scores = []
for step, (noisy, clean) in tqdm(enumerate(self.validation_dataloader), desc='validating'):
noisy = noisy.to(self.device)
clean = clean.to(self.device)
clean = clean[:, self.ear_idx, :]
downsampled_noisy = self.downsample(noisy)
enhanced = self.model_amp(self.model_denoising(downsampled_noisy)).squeeze(1)
upsampled_enhanced = self.upsample(enhanced)
downsampled_clean = self.downsample(clean)
upsampled_clean = self.upsample(downsampled_clean)
raw_clean = upsampled_clean.detach().squeeze(0).cpu().numpy()
raw_enhanced = upsampled_enhanced.detach().squeeze(0).cpu().numpy()
if self.loudnorm:
# normalize the loudness
upsampled_enhanced = self.ln(upsampled_enhanced)
upsampled_clean = self.ln(upsampled_clean)
# hard clipping enhanced signal
upsampled_enhanced = torch.clamp(upsampled_enhanced, -1, 1)
# normal hearing simualation
sim_clean = self.normalhearingmodel(upsampled_clean)
# hearing loss simulation
sim_enhanced = self.hearinglossmodel(upsampled_enhanced)
loss = self.loss_func(sim_enhanced, sim_clean)
sim_clean = sim_clean.detach().squeeze(0).cpu().numpy()
sim_enhanced = sim_enhanced.detach().squeeze(0).cpu().numpy()
stoi_scores.append(stoi(sim_clean, sim_enhanced, self.sr))
total_loss += loss
if step < 3:
sf.write(os.path.join(self.sample_path, 'enhanced_epoch{}_sample{}.wav'.format(epoch, step)),
sim_enhanced, self.sr)
sf.write(os.path.join(self.sample_path, 'raw_epoch{}_sample{}.wav'.format(epoch, step)),
raw_enhanced, self.sr)
msbg_enhanced = self.msbg_ear.process(np.clip(raw_enhanced, -1, 1))[0]
sf.write(os.path.join(self.sample_path, 'msbg_enhanced_epoch{}_sample{}.wav'.format(epoch, step)),
msbg_enhanced, self.sr)
if epoch == self.save_checkpoint_interval:
sf.write(os.path.join(self.sample_path, 'clean_sample{}.wav'.format(step)),
sim_clean, self.sr)
msbg_clean = self.normal_msbg_ear.process(raw_clean)[0]
sf.write(os.path.join(self.sample_path, 'msbg_clean_sample{}.wav'.format(step)),
msbg_clean, self.sr)
ave_stoi = np.array(stoi_scores).mean()
self.writer.add_scalars('val_loss', {'val_loss': total_loss / len(self.validation_dataloader),
'stoi_ave': ave_stoi}, epoch)
return ave_stoi
def train(self):
for epoch in range(self.start_epoch, self.epochs + 1):
self._set_train_mode(self.model_amp)
if self.optimize_denoising_network:
self._set_train_mode(self.model_denoising)
else:
self._set_eval_mode(self.model_denoising)
self._train_epoch()
if epoch % self.save_checkpoint_interval == 0:
if self.optimize_denoising_network:
self._set_eval_mode(self.model_denoising)
self._set_eval_mode(self.model_amp)
score = self._validation_epoch(epoch)
self._save_checkpoint(self.model_amp, 'model_amp', epoch, score)
if self.optimize_denoising_network:
self._save_checkpoint(self.model_denoising, 'model_denoising', epoch, score)
|
module Derive.Enum
import Data.Nat -- LTE example
import public Derive.Common
%language ElabReflection
-- Tooling for basic interfaces of Enumeration types, e.g. data Foo = Biz | Baz
-- This should also support indexed enum types, because our operations are just
-- about the constructors.
-- Use of this module automatically imports Language.Reflection and requires
-- %language ElabReflection to be enabled below your imports.
-- Examples at module bottom
-- The extra constructor checks to ensure Enumeration type can add a fair chunk to
-- the compile time, best to place your types you're deriving for into a module
-- that doesn't change often.
checkList : List a -> List b -> Elab ()
checkList xs ys =
let lenxs = length xs
lenys = length ys
in case compare lenxs lenys of
LT => fail $ "Provided list is too short to exactly cover all constructors. "
++ "Given " ++ show lenxs ++ ", needs " ++ show lenys ++ " elements."
GT => fail $ "Provided list is too long to exactly cover all constructors. "
++ "Given " ++ show lenxs ++ ", needs " ++ show lenys ++ " elements."
EQ => pure ()
enumName : TTImp -> Maybe Name
enumName (IVar _ n) = Just n
enumName (IApp _ (IVar _ n) r) = Just n
enumName _ = Nothing
-- An enum type's constructors are all non-explicit or just an ivar, and its
-- type is either the first argument of an application or an ivar.
-- e.g. A : {g : Nat} -> Foo g
-- B : Foo Z
-- C : Foo Z
-- e.g. data Foo : Nat -> Type where
-- data Foo : Type where
isEnumType : Name -> Elab Bool
isEnumType n = do
cons <- constructors n
pure $ all isEnumCon (map snd cons)
where
isEnumCon : TTImp -> Bool
isEnumCon (IPi _ _ ExplicitArg _ _ retty) = False
isEnumCon (IPi _ _ _ _ _ retty) = isEnumCon retty
isEnumCon _ = True -- we got to the end without explicit args
-- combine our checks into one spot:
-- 1. is a given TTImp representing a type
-- 2. do its constructors have no explicit arguments
checkEnumType : TTImp -> Elab Name
checkEnumType ttimp = do
Just n <- pure $ enumName ttimp
| _ => fail "Failed to resolve type name"
guard !(isEnumType n) $ show n ++ " is not an enumeration type."
pure n
export
%macro
showEnum : Elab (x -> String)
showEnum = do
Just k <- goal
| _ => fail "dsfsd"
logTerm "fgsfds" 1 "goal" k
Just (IPi _ _ _ _ ty `(String)) <- goal
| _ => fail "Required type is not: x -> String"
n <- checkEnumType ty
cns <- conNames n
-- cons <- constructors n
-- traverse (logTerm "" 1 "blah") (map snd cons)
check `(\lam => ~(ICase eFC `(lam) ty (clause <$> cns)))
-- check `(\lam => "sdf")
where
clause : Name -> Clause
clause n = PatClause eFC (IVar eFC n) (IPrimVal eFC (Str (nameStr n)))
export
%macro
eqEnum : Elab (x -> x -> Bool)
eqEnum = do
Just (IPi _ _ _ _ ty1 (IPi _ _ _ _ ty2 `(Prelude.Basics.Bool))) <- goal
| _ => fail "Required type is not: x -> x -> Bool"
n1 <- checkEnumType ty1
n2 <- checkEnumType ty2
guard (nameStr n1 == nameStr n2) "Required type is not: x -> x -> Bool"
conns <- conNames n1
check `(\x,y => ~(casex conns `(x) `(y)))
where
-- pattern for a 'case': `(Biz => case ~y of Biz => True; _ => False)
casey : (y : TTImp) -> (con : Name) -> Clause
casey y con = PatClause eFC (IVar eFC con) $ ICase eFC y `(_)
[ PatClause eFC (IVar eFC con) `(True)
, PatClause eFC `( _ ) `(False)]
-- `(case ~x of Biz => rpatterns y Biz; Baz => rpatterns y Baz)
casex : (cons : List Name) -> (x : TTImp) -> (y : TTImp) -> TTImp
casex cs x y = ICase eFC x `(_) (map (casey y) cs)
{- casex: case x of
casey: Biz => case y of
Biz => True
_ => False -}
||| This orders enum constructors by their position in the datatype
||| data Foo = Biz | Baz Biz < Baz
export
%macro
compareEnum : Elab (x -> x -> Ordering)
compareEnum = do
Just (IPi _ _ _ _ ty1 (IPi _ _ _ _ ty2 `(Prelude.EqOrd.Ordering))) <- goal
| _ => fail "Required type is not: x -> x -> Ordering"
n1 <- checkEnumType ty1
n2 <- checkEnumType ty2
guard (nameStr n1 == nameStr n2) "Required type is not: x -> x -> Ordering"
cns <- conNames n1
clauses <- traverse clause (zip [0 .. intLength cns] cns)
let toInt = \x => ICase eFC x `(_) clauses
check `(\a,b => compare {ty=Int} ~(toInt `(a)) ~(toInt `(b)))
where
clause : (Int, Name) -> Elab Clause
clause (i,n) = pure $ PatClause eFC (IVar eFC n) !(quote i)
||| Maps an Int value to each constructor.
||| Provide a more custom list to skip some elements. e.g.
||| enumTo ([0..12] ++ [14..30]) would skip assigning 13
export
%macro
enumTo : List Int -> Elab (x -> Int)
enumTo xs = do
Just (IPi _ _ _ _ ty `(Int)) <- goal
| _ => fail "Required type is not: x -> Int"
n <- checkEnumType ty
cons <- conNames n
checkList xs cons
clauses <- traverse clause (zip xs cons)
check `(\lam => ~(ICase eFC `(lam) `(_) clauses))
where
clause : (a, Name) -> Elab Clause
clause (i,n) = pure $ PatClause eFC (IVar eFC n) !(quote i)
||| Maps x's constructors to given List of Ints, resulting function yields
||| Nothing if given an Int that is not mapped.
export
%macro
enumFrom : List Int -> Elab (Int -> Maybe x)
enumFrom xs = do
Just (IPi _ _ _ _ `(Int) `(Prelude.Types.Maybe ~(ty))) <- goal
| _ => fail "Required type is not: Int -> Maybe x"
n <- checkEnumType ty
logTerm "enumfrom" 1 "ty" ty
logTerm "enumfrom" 1 "nam" (IVar eFC n)
cons <- conNames n
checkList xs cons
clauses <- traverse clause (zip xs cons)
let catchall_clause = [PatClause eFC `(_) `(Nothing)]
check `(\lam => ~(ICase eFC `(lam) `(_) (clauses ++ catchall_clause)))
where
clause : (a, Name) -> Elab Clause
clause (i,n) = do
r <- quote i
logTerm "enumfrom" 1 "clausenam" (IVar eFC n)
logTerm "enumfrom" 1 "clausequot" r
pure $ PatClause eFC r `(Just ~(IVar eFC n))
||| Maps x's constructors to given List of Ints, resulting function's behavior
||| is undefined if given an Int that is not mapped.
export
%macro
unsafeEnumFrom : List Int -> Elab (Int -> x)
unsafeEnumFrom xs = do
Just (IPi _ _ _ _ `(Int) ty) <- goal
| _ => fail "Required type is not: Int -> x"
n <- checkEnumType ty
cons <- conNames n
checkList xs cons
clauses <- traverse clause (zip xs cons)
check `(\lam => assert_total $ ~(ICase eFC `(lam) `(_) clauses))
where
clause : (a, Name) -> Elab Clause
clause (i,n) = pure $ PatClause eFC !(quote i) (IVar eFC n)
-------------------------------------------------
-- Examples
-------------------------------------------------
-- some things are ' to avoid namespace clashes, private isn't as private
-- as it seems
-- simple enumeration type, the main target of this module
data Foo = Biz | Baz
-- indexed enumeration type
data IFoo : Nat -> Type where
NilFoo : IFoo Z
Bop : IFoo (S Z)
-- more complex indexed enumeration type, don't be shocked if testing this
-- doens't work in your repl, you simply need to tell it what g or b are. This
-- would be the case if you wrote the functions by hand as well and if g or b
-- were 0-use. This is a hassle because we don't even use g or b, possibly just
-- a limitation of the type search or the fact that these instances work on
-- arguments of just Type.
data IFoo2 : Nat -> Type where
Brap : {g : Nat} -> IFoo2 g
Zoppo : b `LTE` 1 => IFoo2 b
Boppo : IFoo2 (S Z)
private
interface ToCode' a where
toCode' : a -> Int
private
interface FromCode' a where
fromCode' : Int -> Maybe a
unsafeFromCode' : Int -> a
private
Show Foo where
show = showEnum
private
Show (IFoo n) where
show = showEnum
private
Show (IFoo2 n) where
show = showEnum
private
Eq Foo where
(==) = eqEnum
private
Ord Foo where
compare = compareEnum
private
ToCode' Foo where
toCode' = enumTo [0,1]
private
FromCode' Foo where
unsafeFromCode' = unsafeEnumFrom [0,1]
fromCode' = enumFrom [0,1]
eqTest1 : Baz == Baz = True
eqTest1 = Refl
eqTest2 : Baz == Biz = False
eqTest2 = Refl
enumToTest1 : toCode' Baz == 1 = True
enumToTest1 = Refl
enumFromTest1 : unsafeFromCode' 0 == Biz = True
enumFromTest1 = Refl
enumFromTest2 : unsafeFromCode' (-12345) == Baz = True
enumFromTest2 = ?crash
enumFromSafeTest1 : fromCode' 0 == Just Biz = True
enumFromSafeTest1 = Refl
enumFromSafeTest2 : fromCode' 3 == Nothing {ty=Foo} = True
enumFromSafeTest2 = Refl
showTest1 : show Baz == "Baz" = True
showTest1 = Refl
showTest2 : show Bop == "Bop" = True
showTest2 = Refl
showTest3 : show (Brap {g=2}) == "Brap" = True
showTest3 = Refl
compareTest1 : Biz > Baz = False
compareTest1 = Refl
compareTest2 : Biz < Baz = True
compareTest2 = Refl
compareTest3 : compare Biz Baz = LT
compareTest3 = Refl
|
In Canada , where the original recording of " West End Girls " had already been a minor hit in 1985 , the re @-@ recorded version was issued as a single in early 1986 . The re @-@ recorded song entered the chart in March 1986 , peaking at number one for one week on 17 May 1986 . In the United States , West End Girls debuted on the Billboard Hot 100 at number 71 , reaching the number one position on 10 May 1986 , and remained on the chart for 20 weeks . The song also peaked at number one on Billboard 's Hot Dance Music / Club Play chart for two weeks .
|
Require Import Crypto.Arithmetic.PrimeFieldTheorems.
Require Import Crypto.Specific.solinas32_2e150m3_7limbs.Synthesis.
(* TODO : change this to field once field isomorphism happens *)
Definition carry :
{ carry : feBW_loose -> feBW_tight
| forall a, phiBW_tight (carry a) = (phiBW_loose a) }.
Proof.
Set Ltac Profiling.
Time synthesize_carry ().
Show Ltac Profile.
Time Defined.
Print Assumptions carry.
|
\section{Trust Flow}
Everything is in place to define the indirect trust from one player to another.
\subimport{common/definitions/}{indirecttrust.tex}
\noindent Note that $Tr_{A \rightarrow B} \geq DTr_{A \rightarrow B}$. The next theorem shows that $Tr_{A \rightarrow B}$ is
finite.
\subimport{thesis/theorems/}{convergencetheorem.tex}
\subimport{common/proofsketches/}{convergenceproofsketch.tex}
Full proofs of all theorems and lemmas can be found in the Appendix.
In the setting of \texttt{TransitiveGame(}$\mathcal{G}$\texttt{,}$A$\texttt{,}$B$\texttt{)}, we make use of the notation
$Loss_A = Loss_{A, j}$, where $j$ is a turn in which the game has converged. It is important to note that $Loss_A$ is not
the same for repeated executions of this kind of game, since the order in which players are chosen may differ between
executions and the conservative players are free to choose which incoming direct trusts they will steal and how much from
each.
Let $G$ be a weighted directed graph. We will investigate the maximum flow on this graph. For an introduction to the
maximum flow problem see \cite{clrs} p. 708. Considering each edge's capacity as its weight, a flow assignment
$X = [x_{vw}]_{\mathcal{V} \times \mathcal{V}}$ with a source $A$ and a sink $B$ is valid when:
\begin{equation}
\label{flow1}
\forall (v, w) \in \mathcal{E}, x_{vw} \leq c_{vw} \mbox{ and}
\end{equation}
\begin{equation}
\label{flow2}
\forall v \in \mathcal{V} \setminus \{A,B\}, \sum\limits_{w \in N^{+}(v)}x_{wv} = \sum\limits_{w \in N^{-}(v)}x_{vw}
\enspace.
\end{equation}
We do not suppose any skew symmetry in $X$. The flow value is $\sum\limits_{v \in N^{+}\left(A\right)}x_{Av}$, which is
proven to be equal to $\sum\limits_{v \in N^{-}\left(B\right)}x_{vB}$. There exists an algorithm that returns the maximum
possible flow from $A$ to $B$, namely $MaxFlow\left(A, B\right)$. This algorithm evidently needs full knowledge of the
graph. The fastest version of this algorithm runs in $O\left(|\mathcal{V}||\mathcal{E}|\right)$ time \cite{maxflownm}. We
refer to the flow value of $MaxFlow\left(A, B\right)$ as $maxFlow\left(A, B\right)$.
We will now introduce two lemmas that will be used to prove the one of the central results of this work, the Trust Flow
theorem.
\subimport{thesis/lemmas/}{flowgamelemma.tex}
\subimport{common/proofsketches/}{flowgameproofsketch.tex}
\subimport{thesis/lemmas/}{gameflowlemma.tex}
\subimport{common/proofsketches/}{gameflowproofsketch.tex}
\subimport{common/theorems/}{trustflowtheorem.tex}
\subimport{thesis/proofs/}{trustflowproof.tex}
\noindent We note that the maxFlow is the same in the following two cases: When a player chooses the evil strategy and when
the same player chooses a variation of the evil strategy where she does not nullify her outgoing direct trust.
Further justification of trust transitivity through the use of $MaxFlow$ can be found in the sociological experiment
conducted in \cite{kmrs}.
Here we see another important theorem that gives the basis for risk-invariant transactions between different, possibly
unknown, parties.
\subimport{common/theorems/}{riskinvtheorem.tex}
\subimport{common/proofs/}{riskinvproof.tex}
\noindent It is intuitively obvious that it is possible for $A$ to reduce her outgoing direct trust in a manner that
achieves (\ref{primetrust}), since $maxFlow\left(A, B\right)$ is continuous with respect to $A$'s outgoing direct trusts. We
leave this calculation as part of further research.
|
Amaia launches its first foray near VERMOSA, Ayala Land’s 700 hectare mixed-use township development in Cavite with Amaia Series.
Amaia Series is the first affordable townhouse residential development in the vicinity of this modern suburban community. It is thoughtfully designed for families who would like to make it big someday. Its townhouse are afforded the gift of space in a variety of ways to inspire creativity among its residents.
It is built with energizing greens and open spaces with enriching features and amenities.
Amaia Series is privileged to be standing next to a fully master planned development. Within the development are future retail areas, business districts, institutional establishments, recreation opportunities and training facilities.
Amaia Series is located along Patinding Araw Road, Imus Cavite. It is easily accessible from Makati, Manila, Muntinlupa and even Tagaytay all less than an hour away through a variety of road connections—Daang Hari Road, SLEX via the Muntinlupa-Cavite Expressway (MCX), CAVITEX, and the upcoming CALAX Road. |
(* $Id: CLogic.v,v 1.9 2003/03/12 08:57:44 lcf Exp $ *)
Require Export Compare_dec.
Require Export Basics.
Require Export ZArith.
Require Export ZArithRing.
Require Export Div2.
Require Export Wf_nat.
(* Tex_Prose
\chapter{Extending the {\tt Coq} Logic}
Because notions of apartness and order have computational meaning, we
will have to define logical connectives in \verb!Set!. In order to
keep a syntactic distinction between types of terms, we define \verb!CProp!
as an alias for \verb!Set!, to be used as type of (computationally meaningful)
propositions.
Falsehood and negation will tipically not be needed in \verb!CProp!, as they are used to refer
to negative statements, which carry no computational meaning. Therefore, we will
simply define a negation operator from \verb!Set! to \verb!Prop! .
Conjunction, disjunction and existential quantification will have to come in
multiple varieties. For conjunction, we will need four operators of type
$s_1\rightarrow s_2\rightarrow s_3$, where $s_3$ is \verb!Prop! if both $s_1$ and $s_2$
are \verb!Prop! and \verb!CProp! otherwise.
Disjunction is slightly different, as it will always return a value in \verb!CProp! even
if both arguments are propositions. This is because in general
it may be computationally important to know which of the two branches of the disjunction actually holds.
Existential quantification will similarly always return a value in \verb!CProp!.
\begin{notation}
\begin{itemize}
\item \verb!CProp!-valued conjuction will be denoted as {\tt *};
\item \verb!CProp!-valued conjuction will be denoted as \verb!+!;
\item in both preceding cases, objects of type \verb!Prop! will be enclosed in curly braces;
\item Existential quantification will be written as \verb!{x:A & B}! or \verb!{x:A | B}!,
according to whether \verb!B! is respectively of type \verb!CProp! or \verb!Prop!.
\end{itemize}
\end{notation}
In a few specific situations we {\em do} need truth, false and negation in \verb!CProp!, so we will
also introduce them; this should be a temporary option\ldots
Finally, for other formulae that might occur in our \verb!CProp!-valued
propositions, such as \verb!(le m n)!, we have to introduce a \verb!CProp!-valued
version.
*)
Definition CProp := Set.
Section Basics.
(* Tex_Prose
\section{Basics}
Here we treat conversion from \verb!Prop! to \verb!CProp! and vice versa,
and some basic connectives in \verb!CProp!.
*)
(* Begin_Tex_Verb *)
Definition Not := [P:CProp](P->False).
Definition Iff [A,B:CProp] : CProp := (A->B)*(B->A).
Inductive CFalse : CProp :=.
Inductive CTrue : CProp := CI : CTrue.
Inductive sig2P [A:Set;P:A->Prop;Q:A->CProp] : CProp
:= exist2P : (x:A)(P x) -> (Q x) -> (sig2P A P Q).
Inductive sigS2P [A:Set;P:A->CProp;Q:A->Prop] : CProp
:= existS2P : (x:A)(P x) -> (Q x) -> (sigS2P A P Q).
Definition proj1_sig2 [A:Set;P,Q:A->Prop]:=
[e:(sig2 A P Q)]Cases e of (exist2 a b c) => a end.
Definition proj2a_sig2 [A:Set;P,Q:A->Prop]:=
[e:(sig2 A P Q)]
<[e:(sig2 A P Q)](P (proj1_sig2 A P Q e))>Cases e of (exist2 a b c) => b end.
Definition proj2b_sig2 [A:Set;P,Q:A->Prop]:=
[e:(sig2 A P Q)]
<[e:(sig2 A P Q)](Q (proj1_sig2 A P Q e))>Cases e of (exist2 a b c) => c end.
Definition proj1_sig2P [A:Set;P,Q:A->?]:=
[e:(sig2P A P Q)]Cases e of (exist2P a b c) => a end.
Definition proj2a_sig2P [A:Set;P,Q:A->?]:=
[e:(sig2P A P Q)]
<[e:(sig2P A P Q)](P (proj1_sig2P A P Q e))>Cases e of (exist2P a b c) => b end.
Definition proj2b_sig2P [A:Set;P,Q:A->?]:=
[e:(sig2P A P Q)]
<[e:(sig2P A P Q)](Q (proj1_sig2P A P Q e))>Cases e of (exist2P a b c) => c end.
Definition proj1_sigS2P [A:Set;P,Q:A->?]:=
[e:(sigS2P A P Q)]Cases e of (existS2P a b c) => a end.
Definition proj2a_sigS2P [A:Set;P,Q:A->?]:=
[e:(sigS2P A P Q)]
<[e:(sigS2P A P Q)](P (proj1_sigS2P A P Q e))>Cases e of (existS2P a b c) => b end.
Definition proj2b_sigS2P [A:Set;P,Q:A->?]:=
[e:(sigS2P A P Q)]
<[e:(sigS2P A P Q)](Q (proj1_sigS2P A P Q e))>Cases e of (existS2P a b c) => c end.
Inductive toCProp [A:Prop] : CProp := ts : A->(toCProp A).
(* End_Tex_Verb *)
(* Begin_Tex_Verb *)
Lemma toCProp_e : (A:Prop)(toCProp A)->(P:Prop)(A->P)->P.
(* End_Tex_Verb *)
Intros A H P H0.
Elim H.
Intros H1.
Apply H0.
Assumption.
Qed.
(* Begin_Tex_Verb *)
Definition CNot := [A:Prop]A->CFalse.
Lemma Ccontrapos':(A,phi:Prop)(A->phi)->(~phi)->(CNot A).
(* End_Tex_Verb *)
Proof.
Intros A phi H H0.
Intro H1.
Elim H0.
Auto.
Qed.
(* Begin_Tex_Verb *)
Inductive andl [A:CProp][B:Prop] : CProp := conjl : A->B->(andl A B).
Inductive andr [A:Prop][B:CProp] : CProp := conjr : A->B->(andr A B).
Inductive andps [A,B:Prop] : CProp := conjps : A->B->(andps A B).
Inductive sumorr [A:Prop;B:CProp] : CProp :=
inleft' : A -> (sumorr A B)
| inright' : B -> (sumorr A B).
(* End_Tex_Verb *)
End Basics.
(* Syntax and pretty printing:
- disjunction will be written as P+Q;
- conjunction will be written as P*Q;
- arguments of type Prop should be enclosed in {curly braces}.
*)
Arguments Scope sumbool [type_scope type_scope].
Arguments Scope sumor [type_scope type_scope].
Arguments Scope sumorr [type_scope type_scope].
Arguments Scope sum [type_scope type_scope].
Notation "{ A } + { B }" := ((sumbool A B) :: CProp) (at level 1).
Notation "A + { B }" := ((sumor A B) :: CProp) (at level 4).
Notation "{ A } + B" := (sumorr A B) (at level 1).
Notation "A + B" := ((sum A B) :: CProp) (at level 4).
Arguments Scope andps [type_scope type_scope].
Arguments Scope andl [type_scope type_scope].
Arguments Scope andr [type_scope type_scope].
Arguments Scope prod [type_scope type_scope].
Notation "{ A } * { B }" := (andps A B) (at level 1).
Notation "A * { B }" := (andl A B) (at level 3).
Notation "{ A } * B" := (andr A B) (at level 1).
Notation "A * B" := ((prod A B) :: CProp) (at level 3).
Arguments Scope sig [type_scope type_scope].
Arguments Scope sig2 [type_scope type_scope type_scope].
Notation "{ x : A | P }" := ((sig A [x:A]P) :: CProp) (at level 1).
Notation "{ x : A | P | Q }" := ((sig2 A [x:A]P [x:A]Q) :: CProp) (at level 1).
Arguments Scope sigS [type_scope type_scope].
Arguments Scope sigS2 [type_scope type_scope type_scope].
Notation "{ x : A & P }" := ((sigS A [x:A]P) :: CProp) (at level 1).
Notation "{ x : A & P & Q }" := ((sigS2 A [x:A]P [x:A]Q) :: CProp) (at level 1).
Arguments Scope sig2P [type_scope type_scope type_scope].
Arguments Scope sigS2P [type_scope type_scope type_scope].
Notation "{ x : A | P & Q }" := (sig2P A [x:A]P [x:A]Q) (at level 1).
Notation "{ x : A & P | Q }" := (sigS2P A [x:A]P [x:A]Q) (at level 1).
Syntax constr level 5:
sig_print [ (sig $c1 [$c2:$c1]$c3) ] -> [ "{" $c2 ":" $c1 " | " $c3 "}" ].
Syntax constr level 5:
sigS_print [ (sigS $c1 [$c2:$c1]$c3) ] -> [ "{" $c2 ":" $c1 " & " $c3 "}" ].
Syntax constr level 5:
sig2_print [ (sig2 $c1 [$c2:$c1]$c3 [$c2:$c1]$c4) ] -> [ "{" $c2 ":" $c1 " | (" $c3 ") | (" $c4 ")}" ].
Syntax constr level 5:
sigS2_print [ (sigS2 $c1 [$c2:$c1]$c3 [$c2:$c1]$c4) ] -> [ "{" $c2 ":" $c1 " & (" $c3 ") & (" $c4 ")}" ].
Syntax constr level 5:
cprop_hide [ (($c1) :: CProp) ] -> [ $c1:L ].
(*
Section teste.
Variable A:Set.
Variables P,Q:A->Prop.
Variables X,Y:A->CProp.
Check {x:A | (P x)}.
Check {x:A & (X x)}.
Check {x:A & (X x) & (Y x)}.
Check {x:A | (P x) | (Q x)}.
Check {x:A | (P x) & (X x)}.
Check {x:A & (X x) | (P x)}.
End teste.
*)
Hints Resolve CI : core.
Section Logical_Remarks.
(* Tex_Prose
We prove a few logical results which are helpful to have as lemmas when {\tt A}, {\tt B} and
{\tt C} are non trivial.
*)
(* Begin_Tex_Verb *)
Lemma CNot_Not_or : (A,B,C:CProp)(A->(Not C))->(B->(Not C))->~(Not A+B)->(Not C).
(* End_Tex_Verb *)
Intros.
Intro.
Apply H1.
Intro.
Elim H3.
Intro; Apply H; Auto.
Intro; Apply H0; Auto.
Qed.
(* Begin_Tex_Verb *)
Lemma Cnot_not_or : (A,B:Prop;C:CProp)(A->(Not C))->(B->(Not C))->~(Not {A}+{B})->(Not C).
(* End_Tex_Verb *)
Intros.
Intro.
Apply H1.
Intro.
Elim H3.
Intro; Apply H; Auto.
Intro; Apply H0; Auto.
Qed.
(* Begin_Tex_Verb *)
Lemma Cnot_Not_or : (A:Prop;B,C:CProp)(A->(Not C))->(B->(Not C))->~(Not {A}+B)->(Not C).
(* End_Tex_Verb *)
Intros.
Intro.
Apply H1.
Intro.
Elim H3.
Intro; Apply H; Auto.
Intro; Apply H0; Auto.
Qed.
(* Begin_Tex_Verb *)
Lemma CNot_not_or : (A:CProp;B:Prop;C:CProp)(A->(Not C))->(B->(Not C))->~(Not A+{B})->(Not C).
(* End_Tex_Verb *)
Intros.
Intro.
Apply H1.
Intro.
Elim H3.
Intro; Apply H; Auto.
Intro; Apply H0; Auto.
Qed.
End Logical_Remarks.
Section CRelation_Definition.
(* Tex_Prose
\section{CProp-valued Relations}
Similar to \verb!Relations.v! in Coq's standard library.
*)
(* Begin_Tex_Verb *)
Variable A: Set.
(* End_Tex_Verb *)
(* Begin_Tex_Verb *)
Definition Crelation := A -> A -> CProp.
(* End_Tex_Verb *)
(* Begin_Tex_Verb *)
Variable R: Crelation.
(* End_Tex_Verb *)
(* Begin_Tex_Verb *)
Definition Creflexive : CProp := (x: A) (R x x).
Definition Ctransitive : CProp := (x,y,z: A)(R x y)->(R y z)->(R x z).
Definition Csymmetric : CProp := (x,y: A) (R x y) -> (R y x).
Definition Cequiv : CProp := Creflexive *
(Ctransitive * Csymmetric).
(* End_Tex_Verb *)
End CRelation_Definition.
(* Begin_Tex_Verb *)
Inductive eqs [A : Set; x: A] :A->CProp := Crefl_equal :(eqs A x x).
(* End_Tex_Verb *)
(*
Section Set_equality.
* Tex_Prose
\begin{convention} Let \verb!A! be a set and \verb!x,y:A!.
\end{convention}
*
Variable A: Set.
Variable x,y: A.
* Begin_Tex_Verb *
Lemma toCProp_eq:(x=y)->(eqs ? x y).
* End_Tex_Verb *
Proof.
Intros.
Apply (eq_rec A x [a:A](eqs ? x a)).
Apply Crefl_equal.
Assumption.
Qed.
* Begin_Tex_Verb *
Lemma Set_eq_to:(eqs ? x y)->(x=y).
* End_Tex_Verb *
Proof.
Intros.
Apply (eqs_ind A x [a:A;H:(eqs A x a)](x=a)).
Apply refl_equal.
Assumption.
Qed.
End Set_equality.
* Begin_Tex_Verb *
Theorem Set_sym_equal:(A:Set;x,y:A)(eqs ? x y)->(eqs ? y x).
* End_Tex_Verb *
Proof.
Intros.
Apply toCProp_eq.
Apply sym_equal.
Apply Set_eq_to.
Assumption.
Qed.
* Begin_Tex_Verb *
Theorem Set_trans_equal:(A:Set; x,y,z:A)(eqs ? x y)->(eqs ? y z)->(eqs ? x z).
* End_Tex_Verb *
Proof.
Intros.
Apply toCProp_eq.
Apply trans_equal with y.
Apply Set_eq_to.
Assumption.
Apply Set_eq_to.
Assumption.
Qed.
*)
Section le_odd.
(* Tex_Prose
\section{The relation {\tt le}, {\tt lt}, {\tt odd} and {\tt even}}
*)
(* Begin_Tex_Verb *)
Inductive Cle [n:nat] : nat -> CProp
:= Cle_n : (Cle n n)
| Cle_S : (m:nat)(Cle n m)->(Cle n (S m)).
(* End_Tex_Verb *)
(* Begin_Tex_Verb *)
Theorem Cnat_double_ind : (R:nat->nat->CProp)
((n:nat)(R O n)) -> ((n:nat)(R (S n) O))
-> ((n,m:nat)(R n m)->(R (S n) (S m)))
-> (n,m:nat)(R n m).
(* End_Tex_Verb *)
Proof.
Induction n; Auto.
Induction m; Auto.
Qed.
(* Begin_Tex_Verb *)
Theorem my_Cle_ind : (n:nat; P:(nat->CProp))
(P n)
->((m:nat)(Cle n m)->(P m)->(P (S m)))
->(n0:nat)(Cle n n0)->(P n0).
(* End_Tex_Verb *)
Intros n P.
Generalize (Cle_rec n [n0:nat][H:(Cle n n0)](P n0)); Intro.
Assumption.
Qed.
(* Begin_Tex_Verb *)
Theorem Cle_n_S : (n,m:nat)(Cle n m)->(Cle (S n) (S m)).
(* End_Tex_Verb *)
Intros n m H.
Pattern m.
Apply (my_Cle_ind n).
Apply Cle_n.
Intros.
Apply Cle_S.
Assumption.
Assumption.
Qed.
(* Begin_Tex_Verb *)
Lemma toCle : (m,n:nat)(le m n)->(Cle m n).
(* End_Tex_Verb *)
Intros m.
Induction m.
Induction n.
Intro H.
Apply Cle_n.
Intros n0 H H0.
Apply Cle_S.
Apply H.
Apply le_O_n.
Induction n.
Intro.
ElimType False.
Inversion H.
Intros n0 H H0.
Generalize (le_S_n ?? H0); Intro H1.
Generalize (Hrecm ? H1); Intro H2.
Apply Cle_n_S.
Assumption.
Qed.
Hints Resolve toCle.
(* Begin_Tex_Verb *)
Lemma Cle_to : (m,n:nat)(Cle m n)->(le m n).
(* End_Tex_Verb *)
Intros m n H.
Elim H.
Apply le_n.
Intros m0 s H0.
Apply le_S.
Assumption.
Qed.
(* Begin_Tex_Verb *)
Definition Clt [m,n:nat] := (Cle (S m) n) : CProp.
(* End_Tex_Verb *)
(* Begin_Tex_Verb *)
Lemma toCProp_lt : (m,n:nat)(lt m n)->(Clt m n).
(* End_Tex_Verb *)
Unfold lt.
Unfold Clt.
Intros m n H.
Apply toCle.
Assumption.
Qed.
(* Begin_Tex_Verb *)
Lemma Clt_to : (m,n:nat)(Clt m n)->(lt m n).
(* End_Tex_Verb *)
Unfold lt.
Unfold Clt.
Intros m n H.
Apply Cle_to.
Assumption.
Qed.
(* Begin_Tex_Verb *)
Lemma Cle_le_S_eq : (p,q:nat)(le p q)->{le (S p) q}+{p=q}.
(* End_Tex_Verb *)
Intros p q H.
Elim (gt_eq_gt_dec p q); Intro H0.
Elim H0; Auto.
ElimType False.
Apply lt_not_le with q p; Auto.
Qed.
(* Begin_Tex_Verb *)
Lemma Cnat_total_order : (m,n: nat) ~ m=n -> {lt m n} + {lt n m}.
(* End_Tex_Verb *)
Intros m n H.
Elim (gt_eq_gt_dec m n).
Intro H0.
Elim H0; Intros.
Left; Auto.
ElimType False.
Auto.
Auto.
Qed.
(* Begin_Tex_Verb *)
Mutual Inductive
Codd : nat->CProp :=
Codd_S : (n:nat)(Ceven n)->(Codd (S n))
with
Ceven : nat->CProp :=
Ceven_O : (Ceven (0)) |
Ceven_S : (n:nat)(Codd n)->(Ceven (S n)).
(* End_Tex_Verb *)
(* Begin_Tex_Verb *)
Lemma Codd_even_to :
(n:nat)((Codd n)->(odd n)) /\ ((Ceven n) -> (even n)).
(* End_Tex_Verb *)
Induction n.
Split.
Intro H.
Inversion H.
Intro.
Apply even_O.
Intros n0 H.
Elim H; Intros H0 H1.
Split.
Intro H2.
Inversion H2.
Apply odd_S.
Apply H1.
Assumption.
Intro H2.
Inversion H2.
Apply even_S.
Apply H0.
Assumption.
Qed.
(* Begin_Tex_Verb *)
Lemma Codd_to : (n:nat)(Codd n)->(odd n).
(* End_Tex_Verb *)
Intros n H.
Elim (Codd_even_to n); Auto.
Qed.
(* Begin_Tex_Verb *)
Lemma Ceven_to : (n:nat)(Ceven n) -> (even n).
(* End_Tex_Verb *)
Intros n H.
Elim (Codd_even_to n); Auto.
Qed.
(* Begin_Tex_Verb *)
Lemma to_Codd_even :
(n:nat)((odd n)->(Codd n)) * ((even n)->(Ceven n)).
(* End_Tex_Verb *)
Induction n.
Split.
Intro H.
ElimType False.
Inversion H.
Intro H.
Apply Ceven_O.
Intros n0 H.
Elim H; Intros H0 H1.
Split.
Intro H2.
Apply Codd_S.
Apply H1.
Inversion H2.
Assumption.
Intro H2.
Apply Ceven_S.
Apply H0.
Inversion H2.
Assumption.
Qed.
(* Begin_Tex_Verb *)
Lemma to_Codd : (n:nat)(odd n)->(Codd n).
(* End_Tex_Verb *)
Intros.
Elim (to_Codd_even n); Auto.
Qed.
(* Begin_Tex_Verb *)
Lemma to_Ceven : (n:nat)(even n)->(Ceven n).
(* End_Tex_Verb *)
Intros.
Elim (to_Codd_even n); Auto.
Qed.
End le_odd.
Section Misc.
(* Tex_Prose
\section{Miscellaneous}
*)
(* Begin_Tex_Verb *)
Lemma CZ_exh : (z:Z){n:nat | z=n} + {n:nat | z=`-n`}.
(* End_Tex_Verb *)
Intro z.
Elim z.
Left.
Exists O.
Auto.
Intro p.
Left.
Exists (convert p).
Rewrite convert_is_POS.
Reflexivity.
Intro p.
Right.
Exists (convert p).
Rewrite min_convert_is_NEG.
Reflexivity.
Qed.
(* Begin_Tex_Verb *)
Lemma Cnats_Z_ind :
(P:Z->CProp)((n:nat)(P n)) -> ((n:nat)(P `-n`)) -> (z:Z)(P z).
(* End_Tex_Verb *)
Intros P H H0 z.
Elim (CZ_exh z); Intros H1.
Elim H1; Intros n H2.
Rewrite H2.
Apply H.
Elim H1; Intros n H2.
Rewrite H2.
Apply H0.
Qed.
(* Begin_Tex_Verb *)
Lemma Cdiff_Z_ind : (P:Z->CProp)((m,n:nat)(P `m-n`)) -> ((z:Z)(P z)).
(* End_Tex_Verb *)
Intros P H z.
Apply Cnats_Z_ind.
Intro n.
Replace (inject_nat `n`) with `n-O`.
Apply H.
Simpl.
Auto with zarith.
Intro n.
Replace `-n` with `O-n`.
Apply H.
Simpl.
Reflexivity.
Qed.
(* Begin_Tex_Verb *)
Lemma Cpred_succ_Z_ind : (P:Z->CProp)(P `0`) ->
((n:Z)(P n)->(P `n+1`)) ->
((n:Z)(P n)->(P `n-1`)) ->
(z:Z)(P z).
(* End_Tex_Verb *)
Intros P H H0 H1 z.
Apply Cnats_Z_ind.
Intro n.
Elim n.
Exact H.
Intros n0 H2.
Replace ((S n0)::Z) with `n0+1`.
Apply H0.
Assumption.
Rewrite inj_S.
Reflexivity.
Intro n.
Elim n.
Exact H.
Intros n0 H2.
Replace `-(S n0)` with `-n0-1`.
Apply H1.
Assumption.
Rewrite inj_S.
Unfold Zs.
Rewrite Zopp_Zplus.
Reflexivity.
Qed.
(*
* Begin_Tex_Verb *
Lemma sum_rec_or : (A,B:Set)(S:Set)(l,r:S)(s:A+B)
(sum_rec A B [_:A+B]S [x:A]l [x:B]r s) = l \/
(sum_rec A B [_:A+B]S [x:A]l [x:B]r s) = r.
* End_Tex_Verb *
Intros. Elim s.
Intros. Left. Reflexivity.
Intros. Right. Reflexivity.
Qed.
*)
(* Begin_Tex_Verb *)
Lemma not_r_sum_rec : (A,B:Set)(S:Set)(l,r:S)(Not B)->(H:A+B)
(sum_rec A B [_:A+B]S [x:A]l [x:B]r H) = l.
(* End_Tex_Verb *)
Intros A B S l r H H0. Elim H0.
Intro a. Reflexivity.
Intro b. Elim H. Assumption.
Qed.
(* Begin_Tex_Verb *)
Lemma not_l_sum_rec : (A,B:Set)(S:Set)(l,r:S)(Not A)->(H:A+B)
(sum_rec A B [_:A+B]S [x:A]l [x:B]r H) = r.
(* End_Tex_Verb *)
Intros A B S l r H H0. Elim H0.
Intro a. Elim H. Assumption.
Intros. Reflexivity.
Qed.
End Misc.
(* Tex_Prose
\section{Results about the natural numbers}
We now define a class of predicates on a finite subset of natural numbers that will be important throughout all our work. Essentially, these are simply setoid predicates, but for clearness we will never write them in that form but we will single out the preservation of the setoid equality.
*)
(* Begin_Tex_Verb *)
Definition nat_less_n_pred [n:nat][P:(i:nat)(lt i n)->CProp] :=
(i,j:nat)(i=j)->(H:(lt i n))(H':(lt j n))(P i H)->(P j H').
Definition nat_less_n_pred' [n:nat][P:(i:nat)(le i n)->CProp] :=
(i,j:nat)(i=j)->(H:(le i n))(H':(le j n))(P i H)->(P j H').
(* End_Tex_Verb *)
Section Odd_and_Even.
(* Tex_Prose
\subsection*{Odd and Even}
For our work we will many times need to distinguish cases between even or odd numbers. We begin by proving that this case distinction is decidable:
*)
(* Tex_Prose
Next, we prove the usual results about sums of even and odd numbers:
*)
(* Begin_Tex_Verb *)
Lemma even_plus_n_n : (n:nat)(even (plus n n)).
(* End_Tex_Verb *)
Intro n; Induction n.
Auto with arith.
Replace (plus (S n) (S n)) with (S (S (plus n n))).
Apply even_S; Apply odd_S; Apply Hrecn.
Rewrite plus_n_Sm; Simpl; Auto.
Qed.
(* Begin_Tex_Verb *)
Lemma even_or_odd_plus : (k:nat)
{j:nat & {k=(plus j j)}+{k=(S (plus j j))}}.
(* End_Tex_Verb *)
Intro k.
Elim (even_odd_dec k); Intro H.
Elim (even_2n k H); Intros j Hj; Exists j; Auto.
Elim (odd_S2n k H); Intros j Hj; Exists j; Auto.
Qed.
(* Tex_Prose
Finally, we prove that an arbitrary natural number can be written in some canonical way.
*)
(* Begin_Tex_Verb *)
Lemma even_or_odd_plus_gt : (i,j:nat)(le i j)->
{k:nat & {j=(plus i (plus k k))}
+ {j=(plus i (S (plus k k)))}}.
(* End_Tex_Verb *)
Intros i j H.
Elim (even_or_odd_plus (minus j i)).
Intros k Hk.
Elim Hk; Intro H0.
Exists k; Left; Rewrite <- H0; Auto with arith.
Exists k; Right; Rewrite <- H0; Auto with arith.
Qed.
End Odd_and_Even.
Hints Resolve even_plus_n_n : arith.
Hints Resolve toCle : core.
Section Natural_Numbers.
(* Tex_Prose
\subsection*{Algebraic Properties}
We now present a series of trivial things proved with \verb!Omega! that are stated as lemmas to make proofs shorter and to aid in auxiliary definitions. Giving a name to these results allows us to use them in definitions keeping conciseness.
*)
(* Begin_Tex_Verb *)
Lemma Clt_le_weak : (i,j:nat)(Clt i j)->(Cle i j).
(* End_Tex_Verb *)
Intros.
Apply toCle; Apply lt_le_weak; Apply Clt_to; Assumption.
Qed.
(* Begin_Tex_Verb *)
Lemma lt_5 : (i,n:nat)(lt i n)->(lt (pred i) n).
(* End_Tex_Verb *)
Intros; Apply le_lt_trans with (pred n).
Apply le_pred; Auto with arith.
Apply lt_pred_n_n; Apply le_lt_trans with i; Auto with arith.
Qed.
(* Begin_Tex_Verb *)
Lemma lt_8 : (m,n:nat)(lt m (pred n))->(lt m n).
(* End_Tex_Verb *)
Intros; Apply lt_le_trans with (pred n); Auto with arith.
Qed.
(* Begin_Tex_Verb *)
Lemma pred_lt : (m,n:nat)(lt m (pred n))->(lt (S m) n).
(* End_Tex_Verb *)
Intros; Apply le_lt_trans with (pred n); Auto with arith.
Apply lt_pred_n_n; Apply le_lt_trans with m.
Auto with arith.
Apply lt_le_trans with (pred n); Auto with arith.
Qed.
(* Begin_Tex_Verb *)
Lemma lt_10 : (i,m,n:nat)(lt O i)->(lt i (pred (plus m n)))->
(lt (pred i) (plus (pred m) (pred n))).
(* End_Tex_Verb *)
Intros; Omega.
Qed.
(* Begin_Tex_Verb *)
Lemma lt_pred' : (m,n:nat)(lt O m)->(lt m n)->
(lt (pred m) (pred n)).
(* End_Tex_Verb *)
Intros m n H H0; Red.
NewDestruct n.
Inversion H0.
Rewrite <- (S_pred m O); Auto.
Simpl.
Auto with arith.
Qed.
(* Begin_Tex_Verb *)
Lemma le_1 : (m,n:nat)(Cle m n)->(le (pred m) n).
(* End_Tex_Verb *)
Intros.
Cut (le m n); [Intro | Apply Cle_to; Assumption].
Apply le_trans with (pred n); Auto with arith.
Apply le_pred; Auto.
Qed.
(* Begin_Tex_Verb *)
Lemma le_2 : (i,j:nat)(lt i j)->(le i (pred j)).
(* End_Tex_Verb *)
Intros; Omega.
Qed.
(* Begin_Tex_Verb *)
Lemma plus_eq_one_imp_eq_zero :
(m,n:nat)(le (plus m n) (1))->{m=O}+{n=O}.
(* End_Tex_Verb *)
Intros m n H.
Elim (le_lt_dec m O); Intro.
Left; Auto with arith.
Right; Omega.
Qed.
(* Tex_Prose
We now prove some properties of functions on the natural numbers.
*)
(* Begin_Tex_Verb *)
Variable h:nat->nat.
(* End_Tex_Verb *)
(* Tex_Prose
First we characterize monotonicity by a local condition: if $h(n)<h(n+1)$ for every natural number $n$ then $h$ is monotonous. An analogous result holds for weak monotonicity.
*)
(* Begin_Tex_Verb *)
Lemma nat_local_mon_imp_mon : ((i:nat)(lt (h i) (h (S i))))->
(i,j:nat)(lt i j)->(lt (h i) (h j)).
(* End_Tex_Verb *)
Intros H i j H0.
Induction j.
ElimType False; Omega.
Cut (le i j); [Intro H1 | Auto with arith].
Elim (le_lt_eq_dec ?? H1); Intro H2.
Cut (lt (h i) (h j)); [Intro | Apply Hrecj; Assumption].
Cut (lt (h j) (h (S j))); [Intro | Apply H].
Apply lt_trans with (h j); Auto.
Rewrite H2; Apply H.
Qed.
(* Begin_Tex_Verb *)
Lemma nat_local_mon_imp_mon_le : ((i:nat)(le (h i) (h (S i))))->
(i,j:nat)(le i j)->(le (h i) (h j)).
(* End_Tex_Verb *)
Intros H i j H0.
Induction j.
Cut i=O; [Intro H1 | Auto with arith].
Rewrite H1; Apply le_n.
Elim (le_lt_eq_dec ?? H0); Intro H1.
Cut (le (h i) (h j)); [Intro | Apply Hrecj; Auto with arith].
Cut (le (h j) (h (S j))); [Intro | Apply H].
Apply le_trans with (h j); Auto.
Rewrite H1; Apply le_n.
Qed.
(* Tex_Prose
A strictly increasing function is injective:
*)
(* Begin_Tex_Verb *)
Lemma nat_mon_imp_inj : ((i,j:nat)(lt i j)->(lt (h i) (h j)))->
(i,j:nat)((h i)=(h j))->i=j.
(* End_Tex_Verb *)
Intros H i j H0.
Cut ~~i=j; [Omega | Intro H1].
Cut (lt i j)\/(lt j i); [Intro H2 | Omega].
Inversion_clear H2.
Cut (lt (h i) (h j)); [Rewrite H0; Apply lt_n_n | Apply H; Assumption].
Cut (lt (h j) (h i)); [Rewrite H0; Apply lt_n_n | Apply H; Assumption].
Qed.
(* Tex_Prose
And (not completely trivial) a function that preserves $<$ also preserves $\leq$.
*)
(* Begin_Tex_Verb *)
Lemma nat_mon_imp_mon' : ((i,j:nat)(lt i j)->(lt (h i) (h j)))->
(i,j:nat)(le i j)->(le (h i) (h j)).
(* End_Tex_Verb *)
Intros H i j H0.
Elim (le_lt_eq_dec ?? H0); Intro H1.
Apply lt_le_weak; Apply H; Assumption.
Rewrite H1; Apply le_n.
Qed.
(* Tex_Prose
The last lemma in this section states that a monotonous function in the natural numbers completely covers the natural numbers, that is, for every $n\in\NN$ there is an $i$ such that \[h(i)\leq n<(n+1)\leq h(i+1)\]
*)
(* Begin_Tex_Verb *)
Lemma mon_fun_covers :
((i,j:nat)(lt i j)->(lt (h i) (h j)))->(h O)=O->
(n:nat){k:nat | (le (S n) (h k))}->
{i:nat | (le (h i) n) | (le (S n) (h (S i)))}.
(* End_Tex_Verb *)
Intros H H0 n H1.
Elim H1; Intros k Hk.
Induction k.
Exists O.
Rewrite H0; Auto with arith.
Cut (lt (h O) (h (1))); [Intro; Apply le_trans with (h O); Auto with arith | Apply H; Apply lt_n_Sn].
Cut (lt (h k) (h (S k))); [Intro H2 | Apply H; Apply lt_n_Sn].
Elim (le_lt_dec (S n) (h k)); Intro H3.
Elim (Hreck H3); Intros i Hi.
Exists i; Assumption.
Exists k; Auto with arith.
Qed.
End Natural_Numbers.
Section Predicates_to_CProp.
(* Tex_Prose
\subsection*{Logical Properties}
This section contains lemmas that aid in logical reasoning with natural numbers. First, we present some principles of induction, both for \verb!CProp!- and \verb!Prop!-valued predicates. We begin by presenting the results for \verb!CProp!-valued predicates:
*)
(* Begin_Tex_Verb *)
Lemma even_induction : (P:nat->CProp)(P O)->
((n:nat)(even n)->(P n)->(P (S (S n))))->
(n:nat)(even n)->(P n).
(* End_Tex_Verb *)
Intros P H H0 n.
Pattern n; Apply lt_wf_rec.
Clear n.
Intros n H1 H2.
Induction n.
Auto.
Induction n.
ElimType False; Inversion H2; Inversion H4.
Apply H0.
Inversion H2; Inversion H4; Auto.
Apply H1.
Auto with arith.
Inversion H2; Inversion H4; Auto.
Qed.
(* Begin_Tex_Verb *)
Lemma odd_induction : (P:nat->CProp)(P (1))->
((n:nat)(odd n)->(P n)->(P (S (S n))))->
(n:nat)(odd n)->(P n).
(* End_Tex_Verb *)
Intros P H H0 n; Case n.
Intro H1; ElimType False; Inversion H1.
Clear n; Intros n H1.
Pattern n; Apply even_induction; Auto.
Intros n0 H2 H3; Auto with arith.
Inversion H1; Auto.
Qed.
(* Begin_Tex_Verb *)
Lemma four_induction : (P:nat->CProp)
(P (0))->(P (1))->(P (2))->(P (3))->
((n:nat)(P n)->(P (S (S (S (S n))))))->(n:nat)(P n).
(* End_Tex_Verb *)
Intros.
Apply lt_wf_rec.
Intro m.
Case m; Auto.
Clear m; Intro m.
Case m; Auto.
Clear m; Intro m.
Case m; Auto.
Clear m; Intro m.
Case m; Auto with arith.
Qed.
(* Begin_Tex_Verb *)
Lemma nat_complete_double_induction : (P:nat->nat->CProp)
((m,n:nat)
((m',n':nat)(lt m' m)->(lt n' n)->(P m' n'))->(P m n))->
(m,n:nat)(P m n).
(* End_Tex_Verb *)
Intros P H m.
Pattern m; Apply lt_wf_rec; Auto with arith.
Qed.
(* Tex_Prose
For subsetoid predicates in the natural numbers we can eliminate disjunction (and existential quantification) as follows.
*)
(* Begin_Tex_Verb *)
Lemma finite_or_elim : (n:nat)(P,Q:(i:nat)(le i n)->CProp)
(nat_less_n_pred' n P)->(nat_less_n_pred' n Q)->
((i:nat)(H:(le i n))((P i H)+(Q i H)))->
({m:nat & {Hm:(Cle m n) &
(P m (Cle_to ?? Hm))}}+(i:nat)(H:(le i n))(Q i H)).
(* End_Tex_Verb *)
Intro n; Induction n.
Intros P Q HP HQ H.
Elim (H ? (Cle_to ?? (toCle ?? (le_n O)))); Intro H0.
Left; Exists O; Exists (toCle ?? (le_n O)); Assumption.
Right; Intros i H1.
Apply HQ with H:=(Cle_to ?? (toCle ?? (le_n O))); Auto with arith.
Intros P Q H H0 H1.
Elim (H1 ? (Cle_to ?? (toCle ?? (le_n (S n))))); Intro H2.
Left; Exists (S n); Exists (toCle ?? (le_n (S n))); Assumption.
LetTac P':=[i:nat][H:(le i n)](P i (le_S ?? H)).
LetTac Q':=[i:nat][H:(le i n)](Q i (le_S ?? H)).
Cut {m:nat & {Hm:(Cle m n) & (P' m (Cle_to m n Hm))}}+(i:nat)(H:(le i n))(Q' i H).
Intro H3; Elim H3; Intro H4.
Left.
Elim H4; Intros m Hm; Elim Hm; Clear H4 Hm; Intros Hm Hm'.
Exists m.
Unfold P' in Hm'.
Exists (Cle_S ?? Hm).
EApply H with i:=m; [Omega | Apply Hm'].
Right.
Intros i H5.
Unfold Q' in H4.
Elim (le_lt_eq_dec ?? H5); Intro H6.
Cut (le i n); [Intro | Auto with arith].
EApply H0 with i:=i; [Auto with arith | Apply (H4 i H7)].
EApply H0 with i:=(S n); [Auto with arith | Apply H2].
Apply Hrecn.
Intro i; Intros j H3 H4 H5 H6.
Unfold P'.
Exact (H ?? H3 ?? H6).
Intro i; Intros j H3 H4 H5 H6.
Unfold Q'.
Exact (H0 ?? H3 ?? H6).
Intros i H3.
Unfold P' Q'; Apply H1.
Qed.
(* Begin_Tex_Verb *)
Lemma str_finite_or_elim : (n:nat)(P,Q:(i:nat)(le i n)->CProp)
(nat_less_n_pred' ? P)->(nat_less_n_pred' ? Q)->
((i:nat)(H:(le i n))((P i H)+(Q i H)))->
{j:nat & {Hj:(Cle j n) &
(P j (Cle_to ?? Hj))*(j':nat)
(Hj':(le j' n))(lt j' j)->(Q j' Hj')}}+
(i:nat)(H:(le i n))(Q i H).
(* End_Tex_Verb *)
Intro n; Induction n.
Intros P Q H H0 H1.
Elim (H1 O (le_n O)); Intro HPQ.
Left.
Exists O; Exists (toCle ?? (le_n O)).
Split.
Apply H with H:=(le_n O); Auto.
Intros; ElimType False; Inversion H2.
Right; Intros.
Apply H0 with H:=(le_n O); Auto with arith.
Intros P Q H H0 H1.
LetTac P':=[i:nat][H:(le i n)](P i (le_S ?? H)).
LetTac Q':=[i:nat][H:(le i n)](Q i (le_S ?? H)).
Elim (Hrecn P' Q').
Intro H2.
Left.
Elim H2; Intros m Hm; Elim Hm; Clear H2 Hm; Intros Hm Hm'.
Exists m.
Unfold P' in Hm'.
Exists (Cle_S ?? Hm).
Elim Hm'; Clear Hm'; Intros Hm' Hj.
Split.
EApply H with i:=m; [Auto with arith | Apply Hm'].
Unfold Q' in Hj; Intros j' Hj' H2.
Cut (le m n); [Intro H3 | Exact (Cle_to ?? Hm)].
Cut (le j' n); [Intro H4 | Apply le_trans with m; Auto with arith].
Apply H0 with H:=(le_S ?? H4); [Auto | Apply Hj; Assumption].
Elim (H1 (S n) (Cle_to ?? (toCle ?? (le_n (S n))))); Intro H1'.
Intro H2.
Left; Exists (S n); Exists (toCle ?? (le_n (S n))); Split.
Assumption.
Intros j' Hj' H3; Unfold Q' in H1'.
Cut (le j' n); [Intro H4 | Auto with arith].
Unfold Q' in H2.
Apply H0 with H:=(le_S ?? H4); Auto.
Intro H2.
Right; Intros i H3.
Unfold Q' in H1'.
Elim (le_lt_eq_dec ?? H3); Intro H4.
Cut (le i n); [Intro H5 | Auto with arith].
Unfold Q' in H2.
Apply H0 with H:=(le_S ?? H5); Auto.
Apply H0 with H:=(Cle_to ?? (toCle ?? (le_n (S n)))); Auto.
Intro i; Intros j H2 H3 H4 H5.
Unfold P'.
Exact (H ?? H2 ?? H5).
Intro i; Intros j H2 H3 H4 H5.
Unfold Q'.
Exact (H0 ?? H2 ?? H5).
Intros i H2.
Unfold P' Q'.
Apply H1.
Qed.
End Predicates_to_CProp.
Section Predicates_to_Prop.
(* Tex_Prose
Finally, analogous results for \verb!Prop!-valued predicates are presented for completeness' sake.
*)
(* Begin_Tex_Verb *)
Lemma even_ind : (P:nat->Prop)(P O)->
((n:nat)(even n)->(P n)->(P (S (S n))))->
(n:nat)(even n)->(P n).
(* End_Tex_Verb *)
Intros P H H0 n.
Pattern n; Apply lt_wf_ind.
Clear n.
Intros n H1 H2.
Induction n.
Auto.
Induction n.
ElimType False; Inversion H2; Inversion H4.
Apply H0.
Inversion H2; Inversion H4; Auto.
Apply H1.
Auto with arith.
Inversion H2; Inversion H4; Auto.
Qed.
(* Begin_Tex_Verb *)
Lemma odd_ind : (P:nat->Prop)(P (1))->
((n:nat)(P n)->(P (S (S n))))->
(n:nat)(odd n)->(P n).
(* End_Tex_Verb *)
Intros P H H0 n; Case n.
Intro H1; ElimType False; Inversion H1.
Clear n; Intros n H1.
Pattern n; Apply even_ind; Auto.
Inversion H1; Auto.
Qed.
(* Begin_Tex_Verb *)
Lemma nat_complete_double_ind : (P:nat->nat->Prop)
((m,n:nat)
((m',n':nat)(lt m' m)->(lt n' n)->(P m' n'))->(P m n))->
(m,n:nat)(P m n).
(* End_Tex_Verb *)
Intros P H m.
Pattern m; Apply lt_wf_ind; Auto.
Qed.
(* Begin_Tex_Verb *)
Lemma four_ind : (P:nat->Prop)
(P (0))->(P (1))->(P (2))->(P (3))->
((n:nat)(P n)->(P (S (S (S (S n))))))->(n:nat)(P n).
(* End_Tex_Verb *)
Intros.
Apply lt_wf_ind.
Intro m.
Case m; Auto.
Clear m; Intro m.
Case m; Auto.
Clear m; Intro m.
Case m; Auto.
Clear m; Intro m.
Case m; Auto with arith.
Qed.
End Predicates_to_Prop.
(* Tex_Prose
\section{Integers}
Similar results for integers.
*)
(* FIXME *)
Grammar tactic simple_tactic : tactic :=
elimcompare
[ "ElimCompare" constrarg($c) constrarg($d) ] -> [ (ElimCompare $c $d) ].
(* Begin_Tex_Verb *)
Definition Zlts:=[x,y:Z](eqs fast_integer.relation (Zcompare x y) INFERIEUR).
(* End_Tex_Verb *)
(* Begin_Tex_Verb *)
Lemma toCProp_Zlt:(x,y:Z)`x < y`->(Zlts x y).
(* End_Tex_Verb *)
Proof.
Intros x y H.
Unfold Zlts.
Unfold Zlt in H.
Rewrite H.
Apply Crefl_equal.
Qed.
(* Begin_Tex_Verb *)
Lemma CZlt_to:(x,y:Z)(Zlts x y)->`x<y`.
(* End_Tex_Verb *)
Proof.
Intros x y H.
Unfold Zlt.
Inversion H.
Auto.
Qed.
(* Begin_Tex_Verb *)
Lemma Zsgn_1:(x:Z){`(Zsgn x)=0`}+{`(Zsgn x)=1`}+{`(Zsgn x)=(-1)`}.
(* End_Tex_Verb *)
Proof.
Intro x.
Case x.
Left.
Left.
Unfold Zsgn.
Reflexivity.
Intro p.
Simpl.
Left.
Right.
Reflexivity.
Intro p.
Right.
Simpl.
Reflexivity.
Qed.
(* Begin_Tex_Verb *)
Lemma Zsgn_2:(x:Z)`(Zsgn x)=0`->`x=0`.
(* End_Tex_Verb *)
Proof.
Intro x.
Case x.
Intro H.
Reflexivity.
Intros p H.
Inversion H.
Intros p H.
Inversion H.
Qed.
(* Begin_Tex_Verb *)
Lemma Zsgn_3:(x:Z)`x<>0`->`(Zsgn x)<>0`.
(* End_Tex_Verb *)
Proof.
Intro x.
Case x.
Intro H.
Elim H.
Reflexivity.
Intros p H.
Simpl.
Discriminate.
Intros p H.
Simpl.
Discriminate.
Qed.
(* Tex_Prose
The following have unusual names, in line with the series of ZLn lemmata in {\tt fast\_integers.v}.
*)
(* Begin_Tex_Verb *)
Lemma ZL9: (p:positive)(inject_nat (convert p))=(POS p).
(* End_Tex_Verb *)
Proof.
Intro p.
Elim (ZL4 p).
Intros x H0.
Rewrite H0.
Unfold inject_nat.
Apply f_equal with A:=positive B:=Z f:=POS.
Cut ((anti_convert (convert p))=(anti_convert (S x))).
Intro H1.
Rewrite bij2 in H1.
Cut ((sub_un (add_un p))=(sub_un (anti_convert (S x)))).
Intro H2.
Rewrite sub_add_one in H2.
Simpl in H2.
Rewrite sub_add_one in H2.
Auto.
Apply f_equal with A:=positive B:=positive f:=sub_un.
Assumption.
Apply f_equal with f:=anti_convert.
Assumption.
Qed.
(* Begin_Tex_Verb *)
Theorem Zsgn_4:(a:Z)`a=(Zsgn a)*(absolu a)`.
(* End_Tex_Verb *)
Proof.
Intro a.
Case a.
Simpl.
Reflexivity.
Intro p.
Unfold Zsgn.
Unfold absolu.
Rewrite Zmult_1_n.
Symmetry.
Apply ZL9.
Intro p.
Unfold Zsgn.
Unfold absolu.
Rewrite ZL9.
Constructor.
Qed.
(* Begin_Tex_Verb *)
Theorem Zsgn_5: (a,b,x,y:Z)`x<>0`->`y<>0`->
`(Zsgn a)*x=(Zsgn b)*y`->`(Zsgn a)*y=(Zsgn b)*x`.
(* End_Tex_Verb *)
Proof.
Intros a b x y H H0.
Case a.
Case b.
Simpl.
Trivial.
Intro p.
Unfold Zsgn.
Intro H1.
Rewrite Zmult_1_n in H1.
Simpl in H1.
Elim H0.
Auto.
Intro p.
Unfold Zsgn.
Intro H1.
Elim H0.
Apply Zopp_intro.
Simpl.
Transitivity `(-1)*y`; Auto.
Intro p.
Unfold 1 Zsgn.
Unfold 2 Zsgn.
Intro H1.
Transitivity y.
Rewrite Zmult_1_n.
Reflexivity.
Transitivity `(Zsgn b)*((Zsgn b)*y)`.
Case (Zsgn_1 b).
Intro H2.
Case H2.
Intro H3.
Elim H.
Rewrite H3 in H1.
Change `1*x=0` in H1.
Rewrite Zmult_1_n in H1.
Assumption.
Intro H3.
Rewrite H3.
Rewrite Zmult_1_n.
Rewrite Zmult_1_n.
Reflexivity.
Intro H2.
Rewrite H2.
Ring.
Rewrite Zmult_1_n in H1.
Rewrite H1.
Reflexivity.
Intro p.
Unfold 1 Zsgn.
Unfold 2 Zsgn.
Intro H1.
Transitivity `(Zsgn b)*((-1)*((Zsgn b)*y))`.
Case (Zsgn_1 b).
Intro H2.
Case H2.
Intro H3.
Elim H.
Apply Zopp_intro.
Transitivity `(-1)*x`.
Ring.
Unfold Zopp.
Rewrite H3 in H1.
Transitivity `0*y`; Auto.
Intro H3.
Rewrite H3.
Ring.
Intro H2.
Rewrite H2.
Ring.
Rewrite <- H1.
Ring.
Qed.
(* Begin_Tex_Verb *)
Lemma nat_nat_pos:(m,n:nat)`(m+1)*(n+1)>0`.
(* End_Tex_Verb *)
Proof.
Intros m n.
Apply Zlt_gt.
Cut (`(inject_nat m)+1>0`).
Intro H.
Cut(`0<(inject_nat n)+1`).
Intro H0.
Cut (`(((inject_nat m)+1)*0) < ((inject_nat m)+1)*((inject_nat n)+1)`).
Rewrite Zero_mult_right.
Auto.
Apply Zlt_reg_mult_l; Auto.
Change (`0<(Zs (inject_nat n))`).
Apply Zle_lt_n_Sm.
Change (`(inject_nat O) <= (inject_nat n)`).
Apply inj_le.
Apply le_O_n.
Apply Zlt_gt.
Change (`0<(Zs (inject_nat m))`).
Apply Zle_lt_n_Sm.
Change (`(inject_nat O) <= (inject_nat m)`).
Apply inj_le.
Apply le_O_n.
Qed.
(* Begin_Tex_Verb *)
Theorem S_predn:(m:nat)(~(m=O))->(S (pred m))=m.
(* End_Tex_Verb *)
Proof.
Intros m H.
Symmetry.
Apply S_pred with O.
Omega.
Qed.
(* Begin_Tex_Verb *)
Lemma absolu_1:(x:Z)((absolu x)=O)->(`x=0`).
(* End_Tex_Verb *)
Proof.
Intros x H.
Case (dec_eq x `0`).
Auto.
Intro H0.
Apply False_ind.
ElimCompare x `0`.
Intro H2.
Apply H0.
Elim (Zcompare_EGAL x O).
Intros H3 H4.
Auto.
Intro H2.
Cut ((EX h:nat| (absolu x)=(S h))).
Intro H3.
Case H3.
Rewrite H.
Exact O_S.
Change (`x<0`) in H2.
LetTac H3:=(Zlt_gt ?? H2).
Elim (SUPERIEUR_POS ?? H3).
Intros x0 H5.
Cut (EX q:positive |x=(NEG q)).
Intro H6.
Case H6.
Intros x1 H7.
Rewrite H7.
Unfold absolu.
Generalize x1.
Exact ZL4.
Cut (x=(Zopp (POS x0))).
Simpl.
Intro H6.
Exists x0.
Assumption.
Rewrite <- (Zopp_Zopp x).
Exact (f_equal Z Z Zopp `-x` (POS x0) H5).
Intro H2.
Cut ((EX h:nat| (absolu x)=(S h))).
Intro H3.
Case H3.
Rewrite H.
Exact O_S.
Elim (SUPERIEUR_POS ?? H2).
Simpl.
Rewrite Zero_right.
Intros x0 H4.
Rewrite H4.
Unfold absolu.
Generalize x0.
Exact ZL4.
Qed.
(* Begin_Tex_Verb *)
Lemma absolu_2: (x:Z)(`x<>0`)->(~((absolu x)=O)).
(* End_Tex_Verb *)
Proof.
Intros x H.
Intro H0.
Apply H.
Apply absolu_1.
Assumption.
Qed.
(* Begin_Tex_Verb *)
Lemma Zgt_mult_conv_absorb_l: (a,x,y:Z)`a<0`->`a*x>a*y`->`x<y`.
(* End_Tex_Verb *)
Proof.
Intros a x y H H0.
Case (dec_eq x y).
Intro H1.
Apply False_ind.
Rewrite H1 in H0.
Cut (`a*y=a*y`).
Change (`a*y<>a*y`).
Apply Zgt_not_eq.
Assumption.
Trivial.
Intro H1.
Case (not_Zeq x y H1).
Trivial.
Intro H2.
Apply False_ind.
Cut (`a*y>a*x`).
Apply Zgt_not_sym with m:=`a*y` n:=`a*x`.
Assumption.
Apply Zlt_conv_mult_l.
Assumption.
Assumption.
Qed.
(* Begin_Tex_Verb *)
Lemma Zgt_mult_reg_absorb_l:(a,x,y:Z)`a>0`->`a*x>a*y`->`x>y`.
(* End_Tex_Verb *)
Proof.
Intros a x y H H0.
Cut (`(-a)<(Zopp 0)`).
Rewrite <- (Zopp_Zopp a) in H.
Rewrite <- (Zopp_Zopp `0`) in H.
Simpl.
Intro H1.
Rewrite <- (Zopp_Zopp x).
Rewrite <- (Zopp_Zopp y).
Apply Zlt_opp.
Apply Zgt_mult_conv_absorb_l with a:=`-a` x:=`-x`.
Assumption.
Rewrite Zopp_Zmult.
Rewrite Zopp_Zmult.
Apply Zlt_opp.
Rewrite <- Zopp_Zmult_r.
Rewrite <- Zopp_Zmult_r.
Apply Zgt_lt.
Apply Zlt_opp.
Apply Zgt_lt.
Assumption.
Omega.
Qed.
(* Begin_Tex_Verb *)
Lemma Zmult_Sm_Sn:(m,n:Z)`(m+1)*(n+1)=m*n+(m+n)+1`.
(* End_Tex_Verb *)
Proof.
Intros.
Ring.
Qed.
|
/-
Copyright (c) 2019 Sébastien Gouëzel. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Sébastien Gouëzel
-/
import Mathlib.PrePort
import Mathlib.Lean3Lib.init.default
import Mathlib.data.equiv.basic
import Mathlib.PostPort
universes u_5 u_6 l u_1 u_2 u_3 u_4
namespace Mathlib
/-!
# Local equivalences
This files defines equivalences between subsets of given types.
An element `e` of `local_equiv α β` is made of two maps `e.to_fun` and `e.inv_fun` respectively
from α to β and from β to α (just like equivs), which are inverse to each other on the subsets
`e.source` and `e.target` of respectively α and β.
They are designed in particular to define charts on manifolds.
The main functionality is `e.trans f`, which composes the two local equivalences by restricting
the source and target to the maximal set where the composition makes sense.
As for equivs, we register a coercion to functions and use it in our simp normal form: we write
`e x` and `e.symm y` instead of `e.to_fun x` and `e.inv_fun y`.
## Main definitions
`equiv.to_local_equiv`: associating a local equiv to an equiv, with source = target = univ
`local_equiv.symm` : the inverse of a local equiv
`local_equiv.trans` : the composition of two local equivs
`local_equiv.refl` : the identity local equiv
`local_equiv.of_set` : the identity on a set `s`
`eq_on_source` : equivalence relation describing the "right" notion of equality for local
equivs (see below in implementation notes)
## Implementation notes
There are at least three possible implementations of local equivalences:
* equivs on subtypes
* pairs of functions taking values in `option α` and `option β`, equal to none where the local
equivalence is not defined
* pairs of functions defined everywhere, keeping the source and target as additional data
Each of these implementations has pros and cons.
* When dealing with subtypes, one still need to define additional API for composition and
restriction of domains. Checking that one always belongs to the right subtype makes things very
tedious, and leads quickly to DTT hell (as the subtype `u ∩ v` is not the "same" as `v ∩ u`, for
instance).
* With option-valued functions, the composition is very neat (it is just the usual composition, and
the domain is restricted automatically). These are implemented in `pequiv.lean`. For manifolds,
where one wants to discuss thoroughly the smoothness of the maps, this creates however a lot of
overhead as one would need to extend all classes of smoothness to option-valued maps.
* The local_equiv version as explained above is easier to use for manifolds. The drawback is that
there is extra useless data (the values of `to_fun` and `inv_fun` outside of `source` and `target`).
In particular, the equality notion between local equivs is not "the right one", i.e., coinciding
source and target and equality there. Moreover, there are no local equivs in this sense between
an empty type and a nonempty type. Since empty types are not that useful, and since one almost never
needs to talk about equal local equivs, this is not an issue in practice.
Still, we introduce an equivalence relation `eq_on_source` that captures this right notion of
equality, and show that many properties are invariant under this equivalence relation.
-/
-- register in the simpset `mfld_simps` several lemmas that are often useful when dealing
-- with manifolds
namespace tactic.interactive
/-- A very basic tactic to show that sets showing up in manifolds coincide or are included in
one another. -/
end tactic.interactive
/-- Local equivalence between subsets `source` and `target` of α and β respectively. The (global)
maps `to_fun : α → β` and `inv_fun : β → α` map `source` to `target` and conversely, and are inverse
to each other there. The values of `to_fun` outside of `source` and of `inv_fun` outside of `target`
are irrelevant. -/
structure local_equiv (α : Type u_5) (β : Type u_6)
where
to_fun : α → β
inv_fun : β → α
source : set α
target : set β
map_source' : ∀ {x : α}, x ∈ source → to_fun x ∈ target
map_target' : ∀ {x : β}, x ∈ target → inv_fun x ∈ source
left_inv' : ∀ {x : α}, x ∈ source → inv_fun (to_fun x) = x
right_inv' : ∀ {x : β}, x ∈ target → to_fun (inv_fun x) = x
/-- Associating a local_equiv to an equiv-/
def equiv.to_local_equiv {α : Type u_1} {β : Type u_2} (e : α ≃ β) : local_equiv α β :=
local_equiv.mk (equiv.to_fun e) (equiv.inv_fun e) set.univ set.univ sorry sorry sorry sorry
namespace local_equiv
/-- The inverse of a local equiv -/
protected def symm {α : Type u_1} {β : Type u_2} (e : local_equiv α β) : local_equiv β α :=
mk (inv_fun e) (to_fun e) (target e) (source e) (map_target' e) (map_source' e) (right_inv' e) (left_inv' e)
protected instance has_coe_to_fun {α : Type u_1} {β : Type u_2} : has_coe_to_fun (local_equiv α β) :=
has_coe_to_fun.mk (fun (x : local_equiv α β) => α → β) to_fun
@[simp] theorem coe_mk {α : Type u_1} {β : Type u_2} (f : α → β) (g : β → α) (s : set α) (t : set β) (ml : ∀ {x : α}, x ∈ s → f x ∈ t) (mr : ∀ {x : β}, x ∈ t → g x ∈ s) (il : ∀ {x : α}, x ∈ s → g (f x) = x) (ir : ∀ {x : β}, x ∈ t → f (g x) = x) : ⇑(mk f g s t ml mr il ir) = f :=
rfl
@[simp] theorem coe_symm_mk {α : Type u_1} {β : Type u_2} (f : α → β) (g : β → α) (s : set α) (t : set β) (ml : ∀ {x : α}, x ∈ s → f x ∈ t) (mr : ∀ {x : β}, x ∈ t → g x ∈ s) (il : ∀ {x : α}, x ∈ s → g (f x) = x) (ir : ∀ {x : β}, x ∈ t → f (g x) = x) : ⇑(local_equiv.symm (mk f g s t ml mr il ir)) = g :=
rfl
@[simp] theorem to_fun_as_coe {α : Type u_1} {β : Type u_2} (e : local_equiv α β) : to_fun e = ⇑e :=
rfl
@[simp] theorem inv_fun_as_coe {α : Type u_1} {β : Type u_2} (e : local_equiv α β) : inv_fun e = ⇑(local_equiv.symm e) :=
rfl
@[simp] theorem map_source {α : Type u_1} {β : Type u_2} (e : local_equiv α β) {x : α} (h : x ∈ source e) : coe_fn e x ∈ target e :=
map_source' e h
protected theorem maps_to {α : Type u_1} {β : Type u_2} (e : local_equiv α β) : set.maps_to (⇑e) (source e) (target e) :=
fun (_x : α) => map_source e
@[simp] theorem map_target {α : Type u_1} {β : Type u_2} (e : local_equiv α β) {x : β} (h : x ∈ target e) : coe_fn (local_equiv.symm e) x ∈ source e :=
map_target' e h
theorem symm_maps_to {α : Type u_1} {β : Type u_2} (e : local_equiv α β) : set.maps_to (⇑(local_equiv.symm e)) (target e) (source e) :=
local_equiv.maps_to (local_equiv.symm e)
@[simp] theorem left_inv {α : Type u_1} {β : Type u_2} (e : local_equiv α β) {x : α} (h : x ∈ source e) : coe_fn (local_equiv.symm e) (coe_fn e x) = x :=
left_inv' e h
protected theorem left_inv_on {α : Type u_1} {β : Type u_2} (e : local_equiv α β) : set.left_inv_on (⇑(local_equiv.symm e)) (⇑e) (source e) :=
fun (_x : α) => left_inv e
@[simp] theorem right_inv {α : Type u_1} {β : Type u_2} (e : local_equiv α β) {x : β} (h : x ∈ target e) : coe_fn e (coe_fn (local_equiv.symm e) x) = x :=
right_inv' e h
protected theorem right_inv_on {α : Type u_1} {β : Type u_2} (e : local_equiv α β) : set.right_inv_on (⇑(local_equiv.symm e)) (⇑e) (target e) :=
fun (_x : β) => right_inv e
/-- Associating to a local_equiv an equiv between the source and the target -/
protected def to_equiv {α : Type u_1} {β : Type u_2} (e : local_equiv α β) : ↥(source e) ≃ ↥(target e) :=
equiv.mk (fun (x : ↥(source e)) => { val := coe_fn e ↑x, property := sorry })
(fun (y : ↥(target e)) => { val := coe_fn (local_equiv.symm e) ↑y, property := sorry }) sorry sorry
@[simp] theorem symm_source {α : Type u_1} {β : Type u_2} (e : local_equiv α β) : source (local_equiv.symm e) = target e :=
rfl
@[simp] theorem symm_target {α : Type u_1} {β : Type u_2} (e : local_equiv α β) : target (local_equiv.symm e) = source e :=
rfl
@[simp] theorem symm_symm {α : Type u_1} {β : Type u_2} (e : local_equiv α β) : local_equiv.symm (local_equiv.symm e) = e := sorry
/-- A local equiv induces a bijection between its source and target -/
theorem bij_on_source {α : Type u_1} {β : Type u_2} (e : local_equiv α β) : set.bij_on (⇑e) (source e) (target e) :=
set.inv_on.bij_on { left := local_equiv.left_inv_on e, right := local_equiv.right_inv_on e } (local_equiv.maps_to e)
(symm_maps_to e)
theorem image_eq_target_inter_inv_preimage {α : Type u_1} {β : Type u_2} (e : local_equiv α β) {s : set α} (h : s ⊆ source e) : ⇑e '' s = target e ∩ ⇑(local_equiv.symm e) ⁻¹' s := sorry
theorem image_inter_source_eq {α : Type u_1} {β : Type u_2} (e : local_equiv α β) (s : set α) : ⇑e '' (s ∩ source e) = target e ∩ ⇑(local_equiv.symm e) ⁻¹' (s ∩ source e) :=
image_eq_target_inter_inv_preimage e (set.inter_subset_right s (source e))
theorem image_inter_source_eq' {α : Type u_1} {β : Type u_2} (e : local_equiv α β) (s : set α) : ⇑e '' (s ∩ source e) = target e ∩ ⇑(local_equiv.symm e) ⁻¹' s := sorry
theorem symm_image_eq_source_inter_preimage {α : Type u_1} {β : Type u_2} (e : local_equiv α β) {s : set β} (h : s ⊆ target e) : ⇑(local_equiv.symm e) '' s = source e ∩ ⇑e ⁻¹' s :=
image_eq_target_inter_inv_preimage (local_equiv.symm e) h
theorem symm_image_inter_target_eq {α : Type u_1} {β : Type u_2} (e : local_equiv α β) (s : set β) : ⇑(local_equiv.symm e) '' (s ∩ target e) = source e ∩ ⇑e ⁻¹' (s ∩ target e) :=
image_inter_source_eq (local_equiv.symm e) s
theorem symm_image_inter_target_eq' {α : Type u_1} {β : Type u_2} (e : local_equiv α β) (s : set β) : ⇑(local_equiv.symm e) '' (s ∩ target e) = source e ∩ ⇑e ⁻¹' s :=
image_inter_source_eq' (local_equiv.symm e) s
theorem source_inter_preimage_inv_preimage {α : Type u_1} {β : Type u_2} (e : local_equiv α β) (s : set α) : source e ∩ ⇑e ⁻¹' (⇑(local_equiv.symm e) ⁻¹' s) = source e ∩ s := sorry
theorem target_inter_inv_preimage_preimage {α : Type u_1} {β : Type u_2} (e : local_equiv α β) (s : set β) : target e ∩ ⇑(local_equiv.symm e) ⁻¹' (⇑e ⁻¹' s) = target e ∩ s :=
source_inter_preimage_inv_preimage (local_equiv.symm e) s
theorem image_source_eq_target {α : Type u_1} {β : Type u_2} (e : local_equiv α β) : ⇑e '' source e = target e :=
set.bij_on.image_eq (bij_on_source e)
theorem source_subset_preimage_target {α : Type u_1} {β : Type u_2} (e : local_equiv α β) : source e ⊆ ⇑e ⁻¹' target e :=
fun (x : α) (hx : x ∈ source e) => map_source e hx
theorem inv_image_target_eq_source {α : Type u_1} {β : Type u_2} (e : local_equiv α β) : ⇑(local_equiv.symm e) '' target e = source e :=
set.bij_on.image_eq (bij_on_source (local_equiv.symm e))
theorem target_subset_preimage_source {α : Type u_1} {β : Type u_2} (e : local_equiv α β) : target e ⊆ ⇑(local_equiv.symm e) ⁻¹' source e :=
fun (x : β) (hx : x ∈ target e) => map_target e hx
/-- Two local equivs that have the same `source`, same `to_fun` and same `inv_fun`, coincide. -/
protected theorem ext {α : Type u_1} {β : Type u_2} {e : local_equiv α β} {e' : local_equiv α β} (h : ∀ (x : α), coe_fn e x = coe_fn e' x) (hsymm : ∀ (x : β), coe_fn (local_equiv.symm e) x = coe_fn (local_equiv.symm e') x) (hs : source e = source e') : e = e' := sorry
/-- Restricting a local equivalence to e.source ∩ s -/
protected def restr {α : Type u_1} {β : Type u_2} (e : local_equiv α β) (s : set α) : local_equiv α β :=
mk (⇑e) (⇑(local_equiv.symm e)) (source e ∩ s) (target e ∩ ⇑(local_equiv.symm e) ⁻¹' s) sorry sorry sorry sorry
@[simp] theorem restr_coe {α : Type u_1} {β : Type u_2} (e : local_equiv α β) (s : set α) : ⇑(local_equiv.restr e s) = ⇑e :=
rfl
@[simp] theorem restr_coe_symm {α : Type u_1} {β : Type u_2} (e : local_equiv α β) (s : set α) : ⇑(local_equiv.symm (local_equiv.restr e s)) = ⇑(local_equiv.symm e) :=
rfl
@[simp] theorem restr_source {α : Type u_1} {β : Type u_2} (e : local_equiv α β) (s : set α) : source (local_equiv.restr e s) = source e ∩ s :=
rfl
@[simp] theorem restr_target {α : Type u_1} {β : Type u_2} (e : local_equiv α β) (s : set α) : target (local_equiv.restr e s) = target e ∩ ⇑(local_equiv.symm e) ⁻¹' s :=
rfl
theorem restr_eq_of_source_subset {α : Type u_1} {β : Type u_2} {e : local_equiv α β} {s : set α} (h : source e ⊆ s) : local_equiv.restr e s = e := sorry
@[simp] theorem restr_univ {α : Type u_1} {β : Type u_2} {e : local_equiv α β} : local_equiv.restr e set.univ = e :=
restr_eq_of_source_subset (set.subset_univ (source e))
/-- The identity local equiv -/
protected def refl (α : Type u_1) : local_equiv α α :=
equiv.to_local_equiv (equiv.refl α)
@[simp] theorem refl_source {α : Type u_1} : source (local_equiv.refl α) = set.univ :=
rfl
@[simp] theorem refl_target {α : Type u_1} : target (local_equiv.refl α) = set.univ :=
rfl
@[simp] theorem refl_coe {α : Type u_1} : ⇑(local_equiv.refl α) = id :=
rfl
@[simp] theorem refl_symm {α : Type u_1} : local_equiv.symm (local_equiv.refl α) = local_equiv.refl α :=
rfl
@[simp] theorem refl_restr_source {α : Type u_1} (s : set α) : source (local_equiv.restr (local_equiv.refl α) s) = s := sorry
@[simp] theorem refl_restr_target {α : Type u_1} (s : set α) : target (local_equiv.restr (local_equiv.refl α) s) = s := sorry
/-- The identity local equiv on a set `s` -/
def of_set {α : Type u_1} (s : set α) : local_equiv α α :=
mk id id s s sorry sorry sorry sorry
@[simp] theorem of_set_source {α : Type u_1} (s : set α) : source (of_set s) = s :=
rfl
@[simp] theorem of_set_target {α : Type u_1} (s : set α) : target (of_set s) = s :=
rfl
@[simp] theorem of_set_coe {α : Type u_1} (s : set α) : ⇑(of_set s) = id :=
rfl
@[simp] theorem of_set_symm {α : Type u_1} (s : set α) : local_equiv.symm (of_set s) = of_set s :=
rfl
/-- Composing two local equivs if the target of the first coincides with the source of the
second. -/
protected def trans' {α : Type u_1} {β : Type u_2} {γ : Type u_3} (e : local_equiv α β) (e' : local_equiv β γ) (h : target e = source e') : local_equiv α γ :=
mk (⇑e' ∘ ⇑e) (⇑(local_equiv.symm e) ∘ ⇑(local_equiv.symm e')) (source e) (target e') sorry sorry sorry sorry
/-- Composing two local equivs, by restricting to the maximal domain where their composition
is well defined. -/
protected def trans {α : Type u_1} {β : Type u_2} {γ : Type u_3} (e : local_equiv α β) (e' : local_equiv β γ) : local_equiv α γ :=
local_equiv.trans' (local_equiv.symm (local_equiv.restr (local_equiv.symm e) (source e')))
(local_equiv.restr e' (target e)) sorry
@[simp] theorem coe_trans {α : Type u_1} {β : Type u_2} {γ : Type u_3} (e : local_equiv α β) (e' : local_equiv β γ) : ⇑(local_equiv.trans e e') = ⇑e' ∘ ⇑e :=
rfl
@[simp] theorem coe_trans_symm {α : Type u_1} {β : Type u_2} {γ : Type u_3} (e : local_equiv α β) (e' : local_equiv β γ) : ⇑(local_equiv.symm (local_equiv.trans e e')) = ⇑(local_equiv.symm e) ∘ ⇑(local_equiv.symm e') :=
rfl
theorem trans_symm_eq_symm_trans_symm {α : Type u_1} {β : Type u_2} {γ : Type u_3} (e : local_equiv α β) (e' : local_equiv β γ) : local_equiv.symm (local_equiv.trans e e') = local_equiv.trans (local_equiv.symm e') (local_equiv.symm e) := sorry
@[simp] theorem trans_source {α : Type u_1} {β : Type u_2} {γ : Type u_3} (e : local_equiv α β) (e' : local_equiv β γ) : source (local_equiv.trans e e') = source e ∩ ⇑e ⁻¹' source e' :=
rfl
theorem trans_source' {α : Type u_1} {β : Type u_2} {γ : Type u_3} (e : local_equiv α β) (e' : local_equiv β γ) : source (local_equiv.trans e e') = source e ∩ ⇑e ⁻¹' (target e ∩ source e') := sorry
theorem trans_source'' {α : Type u_1} {β : Type u_2} {γ : Type u_3} (e : local_equiv α β) (e' : local_equiv β γ) : source (local_equiv.trans e e') = ⇑(local_equiv.symm e) '' (target e ∩ source e') := sorry
theorem image_trans_source {α : Type u_1} {β : Type u_2} {γ : Type u_3} (e : local_equiv α β) (e' : local_equiv β γ) : ⇑e '' source (local_equiv.trans e e') = target e ∩ source e' :=
image_source_eq_target (local_equiv.symm (local_equiv.restr (local_equiv.symm e) (source e')))
@[simp] theorem trans_target {α : Type u_1} {β : Type u_2} {γ : Type u_3} (e : local_equiv α β) (e' : local_equiv β γ) : target (local_equiv.trans e e') = target e' ∩ ⇑(local_equiv.symm e') ⁻¹' target e :=
rfl
theorem trans_target' {α : Type u_1} {β : Type u_2} {γ : Type u_3} (e : local_equiv α β) (e' : local_equiv β γ) : target (local_equiv.trans e e') = target e' ∩ ⇑(local_equiv.symm e') ⁻¹' (source e' ∩ target e) :=
trans_source' (local_equiv.symm e') (local_equiv.symm e)
theorem trans_target'' {α : Type u_1} {β : Type u_2} {γ : Type u_3} (e : local_equiv α β) (e' : local_equiv β γ) : target (local_equiv.trans e e') = ⇑e' '' (source e' ∩ target e) :=
trans_source'' (local_equiv.symm e') (local_equiv.symm e)
theorem inv_image_trans_target {α : Type u_1} {β : Type u_2} {γ : Type u_3} (e : local_equiv α β) (e' : local_equiv β γ) : ⇑(local_equiv.symm e') '' target (local_equiv.trans e e') = source e' ∩ target e :=
image_trans_source (local_equiv.symm e') (local_equiv.symm e)
theorem trans_assoc {α : Type u_1} {β : Type u_2} {γ : Type u_3} {δ : Type u_4} (e : local_equiv α β) (e' : local_equiv β γ) (e'' : local_equiv γ δ) : local_equiv.trans (local_equiv.trans e e') e'' = local_equiv.trans e (local_equiv.trans e' e'') := sorry
@[simp] theorem trans_refl {α : Type u_1} {β : Type u_2} (e : local_equiv α β) : local_equiv.trans e (local_equiv.refl β) = e := sorry
@[simp] theorem refl_trans {α : Type u_1} {β : Type u_2} (e : local_equiv α β) : local_equiv.trans (local_equiv.refl α) e = e := sorry
theorem trans_refl_restr {α : Type u_1} {β : Type u_2} (e : local_equiv α β) (s : set β) : local_equiv.trans e (local_equiv.restr (local_equiv.refl β) s) = local_equiv.restr e (⇑e ⁻¹' s) := sorry
theorem trans_refl_restr' {α : Type u_1} {β : Type u_2} (e : local_equiv α β) (s : set β) : local_equiv.trans e (local_equiv.restr (local_equiv.refl β) s) = local_equiv.restr e (source e ∩ ⇑e ⁻¹' s) := sorry
theorem restr_trans {α : Type u_1} {β : Type u_2} {γ : Type u_3} (e : local_equiv α β) (e' : local_equiv β γ) (s : set α) : local_equiv.trans (local_equiv.restr e s) e' = local_equiv.restr (local_equiv.trans e e') s := sorry
/-- `eq_on_source e e'` means that `e` and `e'` have the same source, and coincide there. Then `e`
and `e'` should really be considered the same local equiv. -/
def eq_on_source {α : Type u_1} {β : Type u_2} (e : local_equiv α β) (e' : local_equiv α β) :=
source e = source e' ∧ set.eq_on (⇑e) (⇑e') (source e)
/-- `eq_on_source` is an equivalence relation -/
protected instance eq_on_source_setoid {α : Type u_1} {β : Type u_2} : setoid (local_equiv α β) :=
setoid.mk eq_on_source sorry
theorem eq_on_source_refl {α : Type u_1} {β : Type u_2} (e : local_equiv α β) : e ≈ e :=
setoid.refl e
/-- Two equivalent local equivs have the same source -/
theorem eq_on_source.source_eq {α : Type u_1} {β : Type u_2} {e : local_equiv α β} {e' : local_equiv α β} (h : e ≈ e') : source e = source e' :=
and.left h
/-- Two equivalent local equivs coincide on the source -/
theorem eq_on_source.eq_on {α : Type u_1} {β : Type u_2} {e : local_equiv α β} {e' : local_equiv α β} (h : e ≈ e') : set.eq_on (⇑e) (⇑e') (source e) :=
and.right h
/-- Two equivalent local equivs have the same target -/
theorem eq_on_source.target_eq {α : Type u_1} {β : Type u_2} {e : local_equiv α β} {e' : local_equiv α β} (h : e ≈ e') : target e = target e' := sorry
/-- If two local equivs are equivalent, so are their inverses. -/
theorem eq_on_source.symm' {α : Type u_1} {β : Type u_2} {e : local_equiv α β} {e' : local_equiv α β} (h : e ≈ e') : local_equiv.symm e ≈ local_equiv.symm e' := sorry
/-- Two equivalent local equivs have coinciding inverses on the target -/
theorem eq_on_source.symm_eq_on {α : Type u_1} {β : Type u_2} {e : local_equiv α β} {e' : local_equiv α β} (h : e ≈ e') : set.eq_on (⇑(local_equiv.symm e)) (⇑(local_equiv.symm e')) (target e) :=
eq_on_source.eq_on (eq_on_source.symm' h)
/-- Composition of local equivs respects equivalence -/
theorem eq_on_source.trans' {α : Type u_1} {β : Type u_2} {γ : Type u_3} {e : local_equiv α β} {e' : local_equiv α β} {f : local_equiv β γ} {f' : local_equiv β γ} (he : e ≈ e') (hf : f ≈ f') : local_equiv.trans e f ≈ local_equiv.trans e' f' := sorry
/-- Restriction of local equivs respects equivalence -/
theorem eq_on_source.restr {α : Type u_1} {β : Type u_2} {e : local_equiv α β} {e' : local_equiv α β} (he : e ≈ e') (s : set α) : local_equiv.restr e s ≈ local_equiv.restr e' s := sorry
/-- Preimages are respected by equivalence -/
theorem eq_on_source.source_inter_preimage_eq {α : Type u_1} {β : Type u_2} {e : local_equiv α β} {e' : local_equiv α β} (he : e ≈ e') (s : set β) : source e ∩ ⇑e ⁻¹' s = source e' ∩ ⇑e' ⁻¹' s := sorry
/-- Composition of a local equiv and its inverse is equivalent to the restriction of the identity
to the source -/
theorem trans_self_symm {α : Type u_1} {β : Type u_2} (e : local_equiv α β) : local_equiv.trans e (local_equiv.symm e) ≈ of_set (source e) := sorry
/-- Composition of the inverse of a local equiv and this local equiv is equivalent to the
restriction of the identity to the target -/
theorem trans_symm_self {α : Type u_1} {β : Type u_2} (e : local_equiv α β) : local_equiv.trans (local_equiv.symm e) e ≈ of_set (target e) :=
trans_self_symm (local_equiv.symm e)
/-- Two equivalent local equivs are equal when the source and target are univ -/
theorem eq_of_eq_on_source_univ {α : Type u_1} {β : Type u_2} (e : local_equiv α β) (e' : local_equiv α β) (h : e ≈ e') (s : source e = set.univ) (t : target e = set.univ) : e = e' := sorry
/-- The product of two local equivs, as a local equiv on the product. -/
def prod {α : Type u_1} {β : Type u_2} {γ : Type u_3} {δ : Type u_4} (e : local_equiv α β) (e' : local_equiv γ δ) : local_equiv (α × γ) (β × δ) :=
mk (fun (p : α × γ) => (coe_fn e (prod.fst p), coe_fn e' (prod.snd p)))
(fun (p : β × δ) => (coe_fn (local_equiv.symm e) (prod.fst p), coe_fn (local_equiv.symm e') (prod.snd p)))
(set.prod (source e) (source e')) (set.prod (target e) (target e')) sorry sorry sorry sorry
@[simp] theorem prod_source {α : Type u_1} {β : Type u_2} {γ : Type u_3} {δ : Type u_4} (e : local_equiv α β) (e' : local_equiv γ δ) : source (prod e e') = set.prod (source e) (source e') :=
rfl
@[simp] theorem prod_target {α : Type u_1} {β : Type u_2} {γ : Type u_3} {δ : Type u_4} (e : local_equiv α β) (e' : local_equiv γ δ) : target (prod e e') = set.prod (target e) (target e') :=
rfl
@[simp] theorem prod_coe {α : Type u_1} {β : Type u_2} {γ : Type u_3} {δ : Type u_4} (e : local_equiv α β) (e' : local_equiv γ δ) : ⇑(prod e e') = fun (p : α × γ) => (coe_fn e (prod.fst p), coe_fn e' (prod.snd p)) :=
rfl
theorem prod_coe_symm {α : Type u_1} {β : Type u_2} {γ : Type u_3} {δ : Type u_4} (e : local_equiv α β) (e' : local_equiv γ δ) : ⇑(local_equiv.symm (prod e e')) =
fun (p : β × δ) => (coe_fn (local_equiv.symm e) (prod.fst p), coe_fn (local_equiv.symm e') (prod.snd p)) :=
rfl
@[simp] theorem prod_symm {α : Type u_1} {β : Type u_2} {γ : Type u_3} {δ : Type u_4} (e : local_equiv α β) (e' : local_equiv γ δ) : local_equiv.symm (prod e e') = prod (local_equiv.symm e) (local_equiv.symm e') := sorry
@[simp] theorem prod_trans {α : Type u_1} {β : Type u_2} {γ : Type u_3} {δ : Type u_4} {η : Type u_5} {ε : Type u_6} (e : local_equiv α β) (f : local_equiv β γ) (e' : local_equiv δ η) (f' : local_equiv η ε) : local_equiv.trans (prod e e') (prod f f') = prod (local_equiv.trans e f) (local_equiv.trans e' f') := sorry
end local_equiv
namespace set
-- All arguments are explicit to avoid missing information in the pretty printer output
/-- A bijection between two sets `s : set α` and `t : set β` provides a local equivalence
between `α` and `β`. -/
@[simp] theorem bij_on.to_local_equiv_inv_fun {α : Type u_1} {β : Type u_2} [Nonempty α] (f : α → β) (s : set α) (t : set β) (hf : bij_on f s t) (b : β) : local_equiv.inv_fun (bij_on.to_local_equiv f s t hf) b = function.inv_fun_on f s b :=
Eq.refl (local_equiv.inv_fun (bij_on.to_local_equiv f s t hf) b)
/-- A map injective on a subset of its domain provides a local equivalence. -/
@[simp] def inj_on.to_local_equiv {α : Type u_1} {β : Type u_2} [Nonempty α] (f : α → β) (s : set α) (hf : inj_on f s) : local_equiv α β :=
bij_on.to_local_equiv f s (f '' s) (inj_on.bij_on_image hf)
end set
namespace equiv
/- equivs give rise to local_equiv. We set up simp lemmas to reduce most properties of the local
equiv to that of the equiv. -/
@[simp] theorem to_local_equiv_coe {α : Type u_1} {β : Type u_2} (e : α ≃ β) : ⇑(to_local_equiv e) = ⇑e :=
rfl
@[simp] theorem to_local_equiv_symm_coe {α : Type u_1} {β : Type u_2} (e : α ≃ β) : ⇑(local_equiv.symm (to_local_equiv e)) = ⇑(equiv.symm e) :=
rfl
@[simp] theorem to_local_equiv_source {α : Type u_1} {β : Type u_2} (e : α ≃ β) : local_equiv.source (to_local_equiv e) = set.univ :=
rfl
@[simp] theorem to_local_equiv_target {α : Type u_1} {β : Type u_2} (e : α ≃ β) : local_equiv.target (to_local_equiv e) = set.univ :=
rfl
@[simp] theorem refl_to_local_equiv {α : Type u_1} : to_local_equiv (equiv.refl α) = local_equiv.refl α :=
rfl
@[simp] theorem symm_to_local_equiv {α : Type u_1} {β : Type u_2} (e : α ≃ β) : to_local_equiv (equiv.symm e) = local_equiv.symm (to_local_equiv e) :=
rfl
@[simp] theorem trans_to_local_equiv {α : Type u_1} {β : Type u_2} {γ : Type u_3} (e : α ≃ β) (e' : β ≃ γ) : to_local_equiv (equiv.trans e e') = local_equiv.trans (to_local_equiv e) (to_local_equiv e') := sorry
|
State Before: α : Type u_1
β : Type ?u.516
inst✝¹ : Denumerable α
inst✝ : Denumerable β
n : ℕ
⊢ encode (ofNat α n) = n State After: case intro.intro
α : Type u_1
β : Type ?u.516
inst✝¹ : Denumerable α
inst✝ : Denumerable β
n : ℕ
a : ?m.551
h : a ∈ decode n
e : encode a = n
⊢ encode (ofNat α n) = n Tactic: obtain ⟨a, h, e⟩ := decode_inv n State Before: case intro.intro
α : Type u_1
β : Type ?u.516
inst✝¹ : Denumerable α
inst✝ : Denumerable β
n : ℕ
a : ?m.551
h : a ∈ decode n
e : encode a = n
⊢ encode (ofNat α n) = n State After: no goals Tactic: rwa [ofNat_of_decode h] |
/******************************************************************
*
* Round for C
*
* Copyright (C) Satoshi Konno 2015
*
* This is licensed under BSD-style license, see file COPYING.
*
******************************************************************/
#include <boost/test/unit_test.hpp>
#include <round/util/mutex.h>
BOOST_AUTO_TEST_SUITE(mutex)
BOOST_AUTO_TEST_CASE(MutexTest)
{
RoundMutex* mutex;
mutex = round_mutex_new();
BOOST_CHECK(mutex);
BOOST_CHECK(round_mutex_lock(mutex));
BOOST_CHECK(round_mutex_unlock(mutex));
BOOST_CHECK(round_mutex_delete(mutex));
}
BOOST_AUTO_TEST_SUITE_END()
|
The 2018 Berlin anti-semitic attack was an attack on a street in the German capital of Berlin. Two young men wearing Jewish skullcaps were insulted by Arabic-speaking passers-by. One of the two, an Israeli citizen, was beaten with a belt by a Palestinian attacker. Video footage led to public outrage.
There have been other similar incidents involving Muslim assailants in particular. A spokesman for the Jewish community said the attack showed that "Jewish people are not safe here".
The number of overall anti-Semitic offenses in Berlin has been rising in recent years. In 2017, police had registered 288 anti-Semitic acts; in 2016, 197 anti-Semitic cases had been recorded. The Research and Information Center Antisemitism Berlin (RIAS) counts a total of 947 anti-Semitic incidents for 2017, but there may be unrecorded cases.
On 17 April 2018, Adam Armoush, a 21-year-old Israeli Arab living in Berlin and his German friend of Moroccan origin went through a street in Prenzlauer Berg taking selfies while wearing kippahs. According to Armoush, he was trying to win an argument with Jewish friends of his in Israel by proving that he could walk through Berlin wearing a Jewish skullcap. At one point, an argument with three Arab-speaking young men started, which was followed by a violent attack. Armoush managed to document the violence with his cell phone and later allowed the video to be shared on Facebook, from where the video went viral. The video shows the main attacker, later identified as a 19-year-old Palestinian from a refugee camp in Syria who had come to Germany as a war refugee, beat Armoush repeatedly with his belt. During the onslaught, the attacker shouted the word "Yahudi", the Arabic word for Jew, before one his companions intervened and dragged him away. According to Armoush, the main attacker had also tried to hurt him with a glass bottle. As a consequence of the beating, Armoush suffered minor injuries. Before turning himself in to police, the main attacker contacted a representative of the Palestinian student community of Berlin and appeared in a short video that was distributed on social media. They denied he was hostile towards Jews or that his actions had been motivated by anti-Semitism.
The main attacker turned himself in accompanied by his lawyer the day after the incident. An arrest warrant against him was executed.
Several politicians, among them chancellor Angela Merkel and foreign minister Heiko Maas condemned the attack. On the Israeli television, Merkel warned of persistent hatred of Jews in Germany. She spoke of a "different form of anti-Semitism" coming from people of Arab descent. After this and other incidents, the President of the Central Council of Jews in Germany, Josef Schuster, warned Jewish men "not to show themselves openly with a kippah in the metropolitan milieu in Germany".
Under the slogan "Berlin Wears Kippah" the Jewish community in Berlin called all citizens to wear the kippah on 25 April in solidarity with the victims.
^ "Anti-Semitic crimes increase dramatically in Germany". Jewish Telegraphic Agency. 9 August 2018. Retrieved 9 August 2018.
^ Eddy, Melissa; Curry, Andrew (18 April 2018). "Fury in Germany as Rap Duo With Anti-Jewish Lyrics Gets Award". New York Times. Retrieved 19 April 2018.
^ "'Jewish Man' Attacked in Berlin Admits He's an Israeli-Arab Who Didn't Believe Germany Was anti-Semitic". Haaretz. DPA, AP. 18 April 2018. Retrieved 19 April 2018. |
[STATEMENT]
lemma span_minimal: "S \<subseteq> T \<Longrightarrow> subspace T \<Longrightarrow> span S \<subseteq> T"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<lbrakk>S \<subseteq> T; subspace T\<rbrakk> \<Longrightarrow> span S \<subseteq> T
[PROOF STEP]
by (auto simp: span_explicit intro!: subspace_sum subspace_scale) |
SUQQU's new series is widely anticipated as it has been every season. I couldn’t contain my excitement for the 2019 spring makeup series "Crepuscular Rays" that’s launching on Jan. 18. Inspired by the natural phenomenon of mysterious natural light, the new collection introduces new cosmetic products such as the liquid blush and eyeliner for the first time in addition with the variety of spring-inspired colours. This time, we bring you a full-colour testing of all the new products first hand (excluding the nail polishes).
First of all, the brand's ultra-popular quad eyeshadow palette, "SUQQU DESIGNING COLOR EYES". SUQQU has always put in the effort from naming its products down to its packaging to coincide with asian beauty and artistic direction. For starteers they’ve launched two new eyeshadow quads: surrounding shades of cherry red and mint green in ‘12 Genkouakane’ and limited edition ‘124 Toukiyou’ in bright yellow and icy blue gives a bright and clean impression.
The eye shadows have a very fine pearl finish with strong pigmentation. The colour matching of the two eye shadow quads is suitable for creating a romantic spring look.
For “SUQQU GLOW TOUCH EYES” liquid eyeshadows, 3 new colours have been added to the lineup: ‘06 Kiramomo’, limited edition ‘104 Kibudou’ and‘105 Aitou’.
All 3 colours are in vibrant metallics, pigmented, with just the right amount of liquid consistency that doesn’t drip.
"SUQQU COLOR INK LIQUID EYELINER" is a completely new product. It claims to deliver smooth application, is smudge proof, and easy to remove. There are 6 shades in total: ‘01 black’, ‘02 brown’, ‘03 red’, ‘04 navy’, ‘101 green (limited)’, and ‘102 gold (limited)’.
All 6 shades can be easily incorporated daily, and the the tip of the brush gives you smooth precision. Colours ‘02 Brown’, ‘04 Navy Blue’ and ‘102 Shine Gold’ have a metallic finish.
Another new additional product, “SUQQU SHIMMER LIQUID BLUSH” with a total of 5 shades: ‘01 Usumomokurumi’, ‘02 Tsubaimomo’, ‘03 Mitsukoushi’, ‘101 Jakuzakura (limited edition)’. The formula combines small traces of pearl shimmer and four kinds of moisturizing beauty oil extracts from jojoba, olive, macadamia, and sunflower creates an unearthly natural glow to the skin.
The five colours are perfect for spring and shade ‘101 Jakuzakura (limited edition)’ contain large traces of pearl shimmer compared to the others. All shades can be easily coordinated for daily use.
Lastly, the “SUQQU STAIN LIP LACQUER" new limited edition product line developed with an emphasis on vibrancy that’s lightweight and comfortable on the lips. "101 Kiniji ", "102 Urushiame", "103 Himeichigo", "104 Touhi", and "105 Hakuake".
The formula is moisturizing and as it claims, is bold in pigment. Shades "101 Kiniji " and "105 Hakuake" have an obvious pearl finishing in comparison to the others. |
/-
Copyright (c) 2019 Abhimanyu Pallavi Sudhir. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Abhimanyu Pallavi Sudhir, Yury Kudryashov
-/
import order.filter.ultrafilter
import order.filter.germ
/-!
# Ultraproducts
> THIS FILE IS SYNCHRONIZED WITH MATHLIB4.
> Any changes to this file require a corresponding PR to mathlib4.
If `φ` is an ultrafilter, then the space of germs of functions `f : α → β` at `φ` is called
the *ultraproduct*. In this file we prove properties of ultraproducts that rely on `φ` being an
ultrafilter. Definitions and properties that work for any filter should go to `order.filter.germ`.
## Tags
ultrafilter, ultraproduct
-/
universes u v
variables {α : Type u} {β : Type v} {φ : ultrafilter α}
open_locale classical
namespace filter
local notation `∀*` binders `, ` r:(scoped p, filter.eventually p φ) := r
namespace germ
open ultrafilter
local notation `β*` := germ (φ : filter α) β
instance [division_semiring β] : division_semiring β* :=
{ mul_inv_cancel := λ f, induction_on f $ λ f hf, coe_eq.2 $ (φ.em (λ y, f y = 0)).elim
(λ H, (hf $ coe_eq.2 H).elim) (λ H, H.mono $ λ x, mul_inv_cancel),
inv_zero := coe_eq.2 $ by simp only [(∘), inv_zero],
..germ.semiring, ..germ.div_inv_monoid, ..germ.nontrivial }
instance [division_ring β] : division_ring β* := { ..germ.ring, ..germ.division_semiring }
instance [semifield β] : semifield β* := { ..germ.comm_semiring, ..germ.division_semiring }
instance [field β] : field β* := { ..germ.comm_ring, ..germ.division_ring }
lemma coe_lt [preorder β] {f g : α → β} : (f : β*) < g ↔ ∀* x, f x < g x :=
by simp only [lt_iff_le_not_le, eventually_and, coe_le, eventually_not, eventually_le]
lemma coe_pos [preorder β] [has_zero β] {f : α → β} : 0 < (f : β*) ↔ ∀* x, 0 < f x := coe_lt
lemma const_lt [preorder β] {x y : β} : x < y → (↑x : β*) < ↑y := coe_lt.mpr ∘ lift_rel_const
@[simp, norm_cast]
lemma const_lt_iff [preorder β] {x y : β} : (↑x : β*) < ↑y ↔ x < y :=
coe_lt.trans lift_rel_const_iff
lemma lt_def [preorder β] : ((<) : β* → β* → Prop) = lift_rel (<) :=
by { ext ⟨f⟩ ⟨g⟩, exact coe_lt }
instance [has_sup β] : has_sup β* := ⟨map₂ (⊔)⟩
instance [has_inf β] : has_inf β* := ⟨map₂ (⊓)⟩
@[simp, norm_cast] lemma const_sup [has_sup β] (a b : β) : ↑(a ⊔ b) = (↑a ⊔ ↑b : β*) := rfl
@[simp, norm_cast] lemma const_inf [has_inf β] (a b : β) : ↑(a ⊓ b) = (↑a ⊓ ↑b : β*) := rfl
instance [semilattice_sup β] : semilattice_sup β* :=
{ sup := (⊔),
le_sup_left := λ f g, induction_on₂ f g $ λ f g,
eventually_of_forall $ λ x, le_sup_left,
le_sup_right := λ f g, induction_on₂ f g $ λ f g,
eventually_of_forall $ λ x, le_sup_right,
sup_le := λ f₁ f₂ g, induction_on₃ f₁ f₂ g $ λ f₁ f₂ g h₁ h₂,
h₂.mp $ h₁.mono $ λ x, sup_le,
.. germ.partial_order }
instance [semilattice_inf β] : semilattice_inf β* :=
{ inf := (⊓),
inf_le_left := λ f g, induction_on₂ f g $ λ f g,
eventually_of_forall $ λ x, inf_le_left,
inf_le_right := λ f g, induction_on₂ f g $ λ f g,
eventually_of_forall $ λ x, inf_le_right,
le_inf := λ f₁ f₂ g, induction_on₃ f₁ f₂ g $ λ f₁ f₂ g h₁ h₂,
h₂.mp $ h₁.mono $ λ x, le_inf,
.. germ.partial_order }
instance [lattice β] : lattice β* :=
{ .. germ.semilattice_sup, .. germ.semilattice_inf }
instance [distrib_lattice β] : distrib_lattice β* :=
{ le_sup_inf := λ f g h, induction_on₃ f g h $ λ f g h, eventually_of_forall $ λ _, le_sup_inf,
.. germ.semilattice_sup, .. germ.semilattice_inf }
instance [has_le β] [is_total β (≤)] : is_total β* (≤) :=
⟨λ f g, induction_on₂ f g $ λ f g, eventually_or.1 $ eventually_of_forall $ λ x, total_of _ _ _⟩
/-- If `φ` is an ultrafilter then the ultraproduct is a linear order. -/
noncomputable instance [linear_order β] : linear_order β* := lattice.to_linear_order _
@[to_additive]
instance [ordered_comm_monoid β] : ordered_comm_monoid β* :=
{ mul_le_mul_left := λ f g, induction_on₂ f g $ λ f g H h, induction_on h $ λ h,
H.mono $ λ x H, mul_le_mul_left' H _,
.. germ.partial_order, .. germ.comm_monoid }
@[to_additive]
instance [ordered_cancel_comm_monoid β] : ordered_cancel_comm_monoid β* :=
{ le_of_mul_le_mul_left := λ f g h, induction_on₃ f g h $ λ f g h H,
H.mono $ λ x, le_of_mul_le_mul_left',
.. germ.partial_order, .. germ.ordered_comm_monoid }
@[to_additive]
instance [ordered_comm_group β] : ordered_comm_group β* :=
{ .. germ.ordered_cancel_comm_monoid, .. germ.comm_group }
@[to_additive]
noncomputable instance [linear_ordered_comm_group β] : linear_ordered_comm_group β* :=
{ .. germ.ordered_comm_group, .. germ.linear_order }
instance [ordered_semiring β] : ordered_semiring β* :=
{ zero_le_one := const_le zero_le_one,
mul_le_mul_of_nonneg_left := λ x y z, induction_on₃ x y z $ λ f g h hfg hh, hh.mp $
hfg.mono $ λ a, mul_le_mul_of_nonneg_left,
mul_le_mul_of_nonneg_right := λ x y z, induction_on₃ x y z $ λ f g h hfg hh, hh.mp $
hfg.mono $ λ a, mul_le_mul_of_nonneg_right,
..germ.semiring, ..germ.ordered_add_comm_monoid }
instance [ordered_comm_semiring β] : ordered_comm_semiring β* :=
{ ..germ.ordered_semiring, ..germ.comm_semiring }
instance [ordered_ring β] : ordered_ring β* :=
{ zero_le_one := const_le zero_le_one,
mul_nonneg := λ x y, induction_on₂ x y $ λ f g hf hg, hg.mp $ hf.mono $ λ a, mul_nonneg,
..germ.ring, ..germ.ordered_add_comm_group }
instance [ordered_comm_ring β] : ordered_comm_ring β* :=
{ ..germ.ordered_ring, ..germ.ordered_comm_semiring }
instance [strict_ordered_semiring β] : strict_ordered_semiring β* :=
{ mul_lt_mul_of_pos_left := λ x y z, induction_on₃ x y z $ λ f g h hfg hh, coe_lt.2 $
(coe_lt.1 hh).mp $ (coe_lt.1 hfg).mono $ λ a, mul_lt_mul_of_pos_left,
mul_lt_mul_of_pos_right := λ x y z, induction_on₃ x y z $ λ f g h hfg hh, coe_lt.2 $
(coe_lt.1 hh).mp $ (coe_lt.1 hfg).mono $ λ a, mul_lt_mul_of_pos_right,
..germ.ordered_semiring, ..germ.ordered_cancel_add_comm_monoid, ..germ.nontrivial }
instance [strict_ordered_comm_semiring β] : strict_ordered_comm_semiring β* :=
{ .. germ.strict_ordered_semiring, ..germ.ordered_comm_semiring }
instance [strict_ordered_ring β] : strict_ordered_ring β* :=
{ zero_le_one := const_le zero_le_one,
mul_pos := λ x y, induction_on₂ x y $ λ f g hf hg, coe_pos.2 $
(coe_pos.1 hg).mp $ (coe_pos.1 hf).mono $ λ x, mul_pos,
..germ.ring, ..germ.strict_ordered_semiring }
instance [strict_ordered_comm_ring β] : strict_ordered_comm_ring β* :=
{ .. germ.strict_ordered_ring, ..germ.ordered_comm_ring }
noncomputable instance [linear_ordered_ring β] : linear_ordered_ring β* :=
{ ..germ.strict_ordered_ring, ..germ.linear_order }
noncomputable instance [linear_ordered_field β] : linear_ordered_field β* :=
{ .. germ.linear_ordered_ring, .. germ.field }
noncomputable instance [linear_ordered_comm_ring β] : linear_ordered_comm_ring β* :=
{ .. germ.linear_ordered_ring, .. germ.comm_monoid }
lemma min_def [K : linear_order β] (x y : β*) : min x y = map₂ min x y :=
induction_on₂ x y $ λ a b,
begin
cases le_total (a : β*) b,
{ rw [min_eq_left h, map₂_coe, coe_eq], exact h.mono (λ i hi, (min_eq_left hi).symm) },
{ rw [min_eq_right h, map₂_coe, coe_eq], exact h.mono (λ i hi, (min_eq_right hi).symm) }
end
lemma abs_def [linear_ordered_add_comm_group β] (x : β*) : |x| = map abs x :=
induction_on x $ λ a, by exact rfl
@[simp] lemma const_max [linear_order β] (x y : β) : (↑(max x y : β) : β*) = max ↑x ↑y :=
by rw [max_def, map₂_const]
@[simp] lemma const_min [linear_order β] (x y : β) : (↑(min x y : β) : β*) = min ↑x ↑y :=
by rw [min_def, map₂_const]
@[simp] lemma const_abs [linear_ordered_add_comm_group β] (x : β) :
(↑(|x|) : β*) = |↑x| :=
by rw [abs_def, map_const]
end germ
end filter
|
Adventist HealthCare seeks to hire a Med Surg Registered Nurse who will embrace our mission to extend God's care through the ministry of physical, mental and spiritual healing. If you want to make a difference in someone's life every day, consider a position with a team of professionals who are doing just that, making a difference.
• One year of current nursing experience within in a hospital environment, prior Med Surg or Oncology experience preferred. Chemo certification required and provided by the organization at no cost. |
Describe Users/JaneNinja here.
20091102 12:54:12 nbsp Hi, Jane. Im sorry to hear about your Lost Pets lost cat. Since it happened outside Davis, I added the info to the Woodland wiki:woodland Lost Pets and Winters wiki:winters Lost Pets pages. Users/robinlaughlin
|
// Created on October 26, 2013 by Lu, Wangshan.
/// This is the implementation file of nostd::diffusion Shared Memory Writer.
/// For Writer and Reader maintainer:
/// Writer and Reader communicate with a shared memory file. File length is specified in writer's constructor.
/// Since the shared memory region contains a 8 byte header, the shared memory file length should not be less than 5.
/// The header contains a 4 byte signed integer in the begining indicates the writer's committed offset, starting immediately after header.
/// The reader can read the header as a 4 byte signed integer to know if there is available data.
/// For a RawData object, writer will write 4 bytes signed integer size of RawData's payload, followed by the payload of RawData.
/// The writer will wrap to the shared memroy region's payload's beginning when it encounters the end of the region.
/// That means if user specifies a length of 1000000 bytes in the writer's constructor, the payload region for writing data is 999996 bytes.
/// And if RawData contains a length of 28 bytes rawdata, it will take 4 + 28 = 32 bytes in the share memory payload region.
#include <atomic>
#include <memory>
#include <boost/interprocess/shared_memory_object.hpp>
#include <boost/interprocess/mapped_region.hpp>
#include <diffusion/factory.hpp>
namespace diffusion {
extern constexpr Size kShmHeaderLength = 8; // Also used by shm_reader.cpp
class ShmWriter : public Writer {
public:
ShmWriter(std::string const &shm_name, Size shm_size);
virtual ~ShmWriter();
virtual void write(std::vector<char> const & data);
virtual void write(char const *data, std::size_t size);
private:
std::string shm_name_;
boost::interprocess::shared_memory_object shm_object_;
std::unique_ptr<boost::interprocess::mapped_region> shm_region_;
// const since first asignment.
char * shm_header_position_;
char * shm_body_position_;
Size shm_body_size_;
// variable.
std::uint32_t writer_shm_body_offset_;
void cyclic_write(char const *data, std::size_t size);
void commit();
};
Writer * create_shared_memory_writer(std::string const &shm_name, Size shm_size) {
return new ShmWriter(shm_name, shm_size);
}
ShmWriter::ShmWriter(std::string const & shm_name, Size shm_size)
: shm_name_(shm_name),
shm_object_(boost::interprocess::create_only, shm_name.c_str(), boost::interprocess::read_write),
writer_shm_body_offset_(0) {
shm_object_.truncate(shm_size);
shm_region_ = std::unique_ptr<boost::interprocess::mapped_region>(new boost::interprocess::mapped_region(shm_object_, boost::interprocess::read_write));
shm_header_position_ = reinterpret_cast<char *>(shm_region_->get_address());
shm_body_position_ = shm_header_position_ + kShmHeaderLength;
shm_body_size_ = shm_region_->get_size() - kShmHeaderLength;
}
ShmWriter::~ShmWriter() {
boost::interprocess::shared_memory_object::remove(shm_name_.c_str());
}
void ShmWriter::write(std::vector<char> const &data) {
this->write(data.data(), data.size());
}
void ShmWriter::write(char const *data, std::size_t size) {
auto size_prefix = static_cast<Size>(size);
this->cyclic_write(reinterpret_cast<char const *>(&size_prefix), sizeof(size_prefix));
this->cyclic_write(data, size);
this->commit();
}
void ShmWriter::cyclic_write(char const *data, std::size_t size) {
auto bytes_left = static_cast<Size>(size);
while (bytes_left > 0) {
auto start_position_unwritten_bytes = data + size - bytes_left;
auto space_left = shm_body_size_ - static_cast<Size>(writer_shm_body_offset_);
auto bytes_can_be_written_without_wrap = (bytes_left > space_left) ? space_left : bytes_left;
std::memcpy(shm_body_position_ + static_cast<Offset>(writer_shm_body_offset_), start_position_unwritten_bytes, bytes_can_be_written_without_wrap);
writer_shm_body_offset_ += bytes_can_be_written_without_wrap;
bytes_left -= bytes_can_be_written_without_wrap;
if (static_cast<Size>(writer_shm_body_offset_) == shm_body_size_) {
writer_shm_body_offset_ = 0;
}
}
}
void ShmWriter::commit() {
auto shm_offset_position = reinterpret_cast<std::atomic<std::uint32_t> *>(shm_header_position_);
std::atomic_store_explicit(shm_offset_position, static_cast<std::uint32_t>(writer_shm_body_offset_), std::memory_order_release);
}
} // namespace diffusion
|
import data.set.finite
import tactic.induction
import tactic.linarith
import tactic.rcases
abbreviation atom := ℕ
@[derive [decidable_eq]]
structure literal :=
mk :: (atom : atom) (negated : bool)
@[instance] def literal.has_repr : has_repr literal :=
{
repr :=
λ x, match x with
| {atom := atom, negated := n} :=
"{literal . atom := " ++ repr atom ++ ", negated := " ++ repr n ++ "}"
end
}
def l_not : literal → literal
| {atom := a, negated := n} := {atom := a, negated := !n}
@[simp] lemma literal_neq : ∀ l, l ≠ l_not l := begin
intro l,
cases' l,
rw l_not,
simp,
intro h,
cases' negated;
finish,
end
@[simp] lemma l_inv : ∀ l, l_not (l_not l) = l := begin
intro l,
cases l,
rw l_not,
rw l_not,
simp,
end
@[simp] lemma not_preserves_atom : ∀ l, (l_not l).atom = l.atom := begin
intro l,
cases l,
rw l_not,
end
@[simp] lemma atom_must_be_equal : ∀ (a b : literal),
a = b → a.atom = b.atom :=
begin
intros a b,
cases' a,
cases' b,
simp,
intros h _,
assumption,
end
@[simp] lemma eq_atom_iff : ∀ (a b : literal),
a.atom = b.atom ↔ a = b ∨ a = l_not b :=
begin
intros a b,
apply iff.intro;
intro h;
cases' a;
cases' b;
simp at h,
{
rw h,
rw l_not,
simp,
cases' negated;
cases' negated_1;
simp,
},
{
rw l_not at h,
simp at h,
simp,
cases' h;
simp [h],
},
end
@[simp] lemma bool_neq_neg : ∀ (b : bool), b ≠ !b := begin
intro b,
cases' b;
simp,
end
@[simp] lemma l_not_neq : ∀ l, l ≠ l_not l := begin
intro l,
cases l,
rw l_not,
simp,
end
abbreviation clause := list literal
abbreviation formula := list clause
def assignment := {s : list literal // ∀ l, l ∈ s → l_not l ∉ s}
@[instance] def assignment.has_mem :
has_mem literal assignment :=
{
mem := λ l as, l ∈ as.val
}
def clause_sat (a : assignment) (c : clause) := ∃ l, l ∈ a ∧ l ∈ c
def formula_sat (a : assignment) (f : formula) :=
∀ c, c ∈ f → clause_sat a c
def sat (f : formula) := ∃ a, formula_sat a f
def formula_size (f : formula) : ℕ := f.join.length
def non_empty_formula := {f : formula // f.join ≠ []}
def choice_func :=
{g : non_empty_formula → literal //
∀ (f : non_empty_formula),
g f ∈ f.val.join ∨ l_not (g f) ∈ f.val.join}
def non_empty_list {α : Type} := {l : list α // l ≠ []}
def extract_first {α : Type} : non_empty_list → α
| ⟨[], h⟩ := (h rfl).elim
| ⟨a::_, h⟩ := a
def naive_choice_func : choice_func := subtype.mk
(λ f, extract_first (subtype.mk f.val.join f.property))
(begin
intro f,
apply or.inl,
cases' joined : f.val.join,
{
apply classical.by_contradiction,
intros,
exact f.property joined,
},
{
let non_empty_first : non_empty_list := ⟨list.join f.val, f.property⟩,
have eq : non_empty_first = ⟨hd::x, by simp⟩ := begin
simp,
rw ←joined,
simp,
end,
simp only [non_empty_first] at eq,
rw eq,
rw extract_first,
rw joined,
simp,
},
end)
def example_sat_formula : formula := [[{atom := 3, negated := tt}]]
def example_unsat_formula : formula :=
[[{atom := 3, negated := tt}], [{atom := 3, negated := ff}]]
def example_complex_formula : formula := [
[{atom := 3, negated := tt}, {atom := 2, negated := ff}],
[{atom := 3, negated := tt}, {atom := 2, negated := ff}],
[{atom := 1, negated := tt}, {atom := 2, negated := ff}],
[{atom := 1, negated := tt}, {atom := 3, negated := ff}]
]
def is_pure_literal (l : literal) (f: formula) : bool :=
∀ c ∈ f, l_not l ∉ c
def is_pure_literal' (l : literal) (f: formula) : bool :=
(l_not l) ∉ list.join f
@[simp]
lemma is_pure_literal_iff (l f) : is_pure_literal l f ↔ is_pure_literal' l f :=
by simp [is_pure_literal, is_pure_literal']
|
% op_concatSubspecs.m
% Jamie Near, McGill University 2014.
%
% USAGE:
% out=op_concatSubspecs(in1,in2);
%
% DESCRIPTION:
% Concatenate two scans along the subspecs dimension. Two scans with 50
% averages each will now look like a single scan with 100 averages.
%
% INPUTS:
% in1 = first input in matlab structure format.
% in2 = second input in matlab structure format.
%
% OUTPUTS:
% out = Output following concatenation along the subspecs dimension.
function out=op_concatSubspecs(in1,in2);
if in1.dims.subSpecs ~= in2.dims.subSpecs || in1.dims.t ~= in2.dims.t || in1.dims.coils ~= in2.dims.coils || in1.dims.averages ~=in2.dims.averages
error('subSpecs dimensions must be the same for both inputs');
end
%if subspecs dimension is zero, make a new dimension for them
if in1.dims.subSpecs==0
newSubspecsDim=max([in1.dims.t in1.dims.coils in1.dims.averages in1.dims.subSpecs in1.dims.extras])+1;
else
newSubspecsDim=in1.dims.subSpecs;
end
fids=cat(newSubspecsDim,in1.fids,in2.fids);
specs=cat(newSubspecsDim,in1.specs,in2.specs);
sz=size(fids);
%FILLING IN DATA STRUCTURE
out=in1;
out.fids=fids;
out.specs=specs;
out.sz=sz;
out.dims=in1.dims;
out.dims.subSpecs=newSubspecsDim;
out.rawSubspecs=in1.rawSubspecs+in2.rawSubspecs;
out.subspecs=in1.subspecs+in2.subspecs;
%FILLING IN THE FLAGS
out.flags=in1.flags;
out.flags.writtentostruct=1;
out.flags.subtracted=0;
|
State Before: F : Type ?u.81630
α : Type u_1
β : Type u_2
γ : Type ?u.81639
δ : Type ?u.81642
inst✝² : LE α
inst✝¹ : LE β
inst✝ : LE γ
e e' : α ≃o β
h : symm e = symm e'
⊢ e = e' State After: no goals Tactic: rw [← e.symm_symm, h, e'.symm_symm] |
lemma degree_mod_less: "y \<noteq> 0 \<Longrightarrow> x mod y = 0 \<or> degree (x mod y) < degree y" |
# Autoregressive flows and RealNVP
This reading contains an overview of normalising flows, and introduces two popular normalising flow models: masked autoregressive flow (MAF) and RealNVP.
You'll also learn about the considerations in different architectures and the tradeoff between computational complexity and learning power.
## Introduction
Before any theory, we'll discuss an example of how normalizing flows work. Suppose you have a standard normal distribution (mean 0, variance 1). It has a single mode at 0, so, even after scaling and shifting, can't be fit well to data with two modes. However, you've seen how bijectors applied to distributions can create other distributions. A natural question is then: can we create a bimodal distribution (one with two modes) from a bijector applied to a standard normal distribution? It turns out that this is possible with the `Softsign` bijector. This is a differentiable approximation to the sign function (1 if $x$ is nonnegative, -1 if $x$ is negative). Passing a standard normal distribution through this bijector transforms the probability distribution as follows:
As you can see, the bijector created a bimodal distribution from a standard normal one! This is just one from a huge class of possible bijectors available in TensorFlow Probability. Furthermore, since you can chain them together, it's possible to create very complicated bijectors that change standard distributions (e.g. a normal) to very complicated ones. This is how a normalizing flow works: it creates a complicated distribution by applying a bijector to a simple, well-understood and computationally implemented distribution (such as a Gaussian). In this reading, you'll learn how this works and see some implementations from previous research.
## Normalizing flows
### The one-dimensional case
The main idea of normalizing flows is to create a random variable $X$ (with complicated distribution $P$) by applying a bijector $f$ to a random variable $Z$ (with a simple distribution). For example, suppose $Z \sim N(0, 1)$ has a standard normal distribution. The goal is to find a bijector $f$ so that $ X = f(Z) \sim P $ for some target distribution $P$. Under this transformation, we can calculate the log-density using the change of variables equation:
$$ \log p(x) = \log p(z) - \log \left| \frac{\partial f}{\partial z}(z) \right| $$
where $z = f^{-1}(x)$.
Finding an $f$ that changes the distribution as required is not trivial: if the target distribution $P$ is very complex, a simple $f$ (such as a scale or shift) won't do the trick. However, we know that composing bijectors with one another creates more bijectors. Hence, one approach is to combine multiple simple bijectors $f_k$ to create a more complicated $f$:
$$ f = f_K \circ f_{K-1} \circ \ldots \circ f_1. $$
This series, where a base distribution is transformed by a series of bijectors after each other, is called a *normalizing flow*:
$$ z_0 = z$$
$$ z_k = f_k(z_{k-1}) \quad k=1, \ldots, K. $$
$$ x = z_K $$
Furthermore, the log-probability can be calculated by summing the contributions from each of the bijectors:
$$ \log p(x) = \log p(z) - \sum_{k=1}^K \log \left| \frac{\partial f_k}{\partial z_{k-1}}(z_{k-1}) \right| $$
This, however, still doesn't answer the question of how to construct the $f_k$. Usually, this is done by giving each $f_k$ some simple functional form, such as a scale and shift followed by a simple nonlinearity such as a sigmoid or ReLU. Each $f_k$ will have some parameters (such as the scale and shift values), and these can be learned via standard methods such as maximum likelihood estimation given some training data.
### The higher-dimensional case
The results above generalise straightforwardly to higher dimensions. Suppose that $\mathbf{z} \sim N(0, \mathbf{I})$ is distributed according to a multivariate unit Gaussian. The normalizing flow is then
$$ \mathbf{z}_0 = \mathbf{z}$$
$$ \mathbf{z}_k = \mathbf{f}_k(\mathbf{z}_{k-1}) \quad k=1, \ldots, K. $$
The log-probability involves the determinant of the transformation, as you'll remember from an earlier reading:
$$ \log p(\mathbf{x}) = \log p(\mathbf{z}) - \sum_{k=1}^K \log \left(\left| \det \left( \frac{\partial \mathbf{f}_k}{\partial \mathbf{z}_{k-1}}(\mathbf{z}_{k-1}) \right) \right|\right) $$
where we use the shorthand notation $\frac{\partial \mathbf{a}}{\partial \mathbf{b}}$ for the matrix with components $\frac{\partial \mathbf{a}_i}{\partial \mathbf{b}_j}$, where $i$ and $j$ index the components of $\mathbf{a}$ and $\mathbf{b}$ respectively.
Let's see an example of this from an [early research paper](https://arxiv.org/abs/1505.05770). In the figure below, the left column is the density of the target distribution $P$, and the right columns are the normalizing flow approximations with $K$=2, 8 and 32 bijectors (each with a simple form and some trainable parameters).
As you can see, the approximation improves as the number of bijectors in the flow increases.
The reason this is useful is that it allows us to "learn" a complex distribution from data and then manipulate it. For example, to draw a new sample from the learned distribution, simply draw $\mathbf{z}$ from a standard unit Gaussian and transform it to the correct space using the normalizing flow (the series of bijectors).
**Note**: Throughout this reading, we use the index $k$ to refer to the bijector in the normalizing flow and indices $i$ and $j$ to refer to dimensions of the probability space (from 1 to $D$). From here on, for clarity, we consider a normalizing flow formed of only one bijector (with $K=1$), so that we may drop the indices $k$. The equation becomes $\mathbf{x} = \mathbf{f}(\mathbf{z})$. The reason for doing this is that we now use indices to refer to components of the vectors (e.g. $\mathbf{x} = [x_1, \ldots, x_D]^T)$ where $D$ is the dimensionality. For normalizing flows with $K>1$, the results apply for each $k$.
### Computational concerns
The above theory provides, in principle, a framework to learn and manipulate complex distributions by building them up from a simple one. There is one key difficulty, however, when going to a practical implementation. This comes from the need to calculate the determinant $\left| \det \left( \frac{\partial \mathbf{f}}{\partial \mathbf{z}} \right) \right|$ to determine the density of the transformed variable $\mathbf{x}$. The computational cost (number of operations) to calculate a determinant for a general matrix with $D$ dimensions scales as $\mathcal{O}(D^3)$. This makes general normalizing flow density calculations intractable, and some simplifications, as outlined below, are required.
### Autoregressive flows
For some matrices, calculating a determinant is easy. For example, for a lower or upper triangular matrix, the determinant is the product of the diagonal elements, of which there are $D$, meaning the determinant calculation scales linearly. Hence, to attain a linear scaling of the determinant in the number of dimensions, it is enough to enforce that $\frac{\partial f_i}{\partial z_j} = 0$ whenever $j > i$. In other words, the component $f_i$ depends only on $z_1, \ldots z_i$.
Autoregressive models can be reinterpreted as normalising flows that fulfil this requirement. These are models that model the joint density $p(\mathbf{x})$ as the product of conditionals $\prod_i p(x_i \mid \mathbf{x}_{1:i-1})$. For example, the conditionals could be parameterised as Gaussians:
$$
\begin{align}
p(x_i \mid\mathbf{x}_{1:i-1}) &= \mathcal{N}(x_i\mid\mu_i, \exp(\sigma_i)^2),\\
\text{where}\qquad \mu_i &= f_{\mu_i}(\mathbf{x}_{1:i-1})\\
\text{and}\qquad \sigma_i &= f_{\sigma_i}(\mathbf{x}_{1:i-1}).
\end{align}
$$
In the above equations, the mean and standard deviations of each conditional distribution are computed using (parameterised) functions of all previous variables. The above can alternatively be written as:
$$ x_i = \mu_i(\mathbf{x}_{1:i-1}) + \exp(\sigma_i(\mathbf{x}_{1:i-1})) z_i \quad \quad i=1, \ldots, D$$
where $z_i \sim N(0, 1)$ is sampled from a unit Gaussian. This last equation shows how the autoregressive model can be viewed as a transformation $f$ from the random variables $\mathbf{z}\in\mathbb{R}^D$ to the data $\mathbf{x}\in\mathbb{R}^D$.
This is an example of an *autoregressive* process where $x_i$ depends only on the components of $\mathbf{z}$ that are lower than or equal to $i$ but not any of the higher ones. The dependence on lower dimensions of $\mathbf{z}$ happens indirectly through the $x_i$ dependence in the $f_{\mu_i}$ and $f_{\sigma_i}$.
## Implementations
### Masked Autoregressive Flow (MAF)
An implementation of the above autoregressive flow appears in the following paper:
- George Papamakarios, Theo Pavlakou, Iain Murray (2017). [Masked Autoregressive Flow for Density Estimation](http://papers.nips.cc/paper/6828-masked-autoregressive-flow-for-density-estimation.pdf). In *Advances in Neural Information Processing Systems*, 2017.
Here, the authors use the above equations, using a masked autoencoder for distribution estimation ([MADE](http://proceedings.mlr.press/v37/germain15.pdf)) to implement the functions $f_{\mu_i}$ and $f_{\sigma_i}$. For clarity, let's see how $\mathbf{x}$ is sampled. This is done as follows:
1. $x_1 = f_{\mu_1} + \exp(f_{\sigma_1})z_1$ for $z_1 \sim N(0, 1)$
2. $x_2 = f_{\mu_2}(x_1) + \exp(f_{\sigma_2}(x_1))z_2$ for $z_2 \sim N(0, 1)$
2. $x_3 = f_{\mu_3}(x_1, x_2) + \exp(f_{\sigma_3}(x_1, x_2))z_3$ for $z_3 \sim N(0, 1)$
and so on. For the $f_{\mu_i}$ and $f_{\sigma_i}$, they use the same MADE network across the $i$, but mask the weights so that $x_i$ depends on $x_j$ for all $j<i$ but not any others. By re-using the same network, weights can be shared and the total number of parameters is significantly lower.
A note on computational complexity: determining $\mathbf{x}$ from $\mathbf{z}$ is relatively slow, since this must be done sequentially: first $x_1$, then $x_2$, and so on up to $x_D$. However, determining $\mathbf{z}$ from $\mathbf{x}$ is fast: each of the above equations can be solved for $z_i$ at the same time:
$$ z_i = \frac{x_i - f_{\mu_i}}{\exp(f_{\sigma_i})} \quad \quad i=0, \ldots, D-1$$
Hence, the *forward* pass through the bijector (sampling $\mathbf{x}$) is relatively slow, but the *inverse* pass (determining $\mathbf{z}$), which is used in the likelihood calculations used to train the model, is fast.
### Inverse Autoregressive Flow (IAF)
The inverse autoregressive flow reverses the dependencies to make the forward pass parallelisable but the inverse pass sequential. Details can be found in the following paper:
- Diederik Kingma, Tim Salimans, Rafal Jozefowicz, Xi Chen, Ilya Sutskever, Max Welling (2016). [Improved Variational Inference with Inverse Autoregressive Flow](http://papers.nips.cc/paper/6581-improved-variational-inference-with-inverse-autoregressive-flow.pdf). In *Advances in Neural Information Processing Systems*, 2016.
It uses the same equations:
$$ x_i = \mu_i + \exp(\sigma_i) z_i \quad \quad i=1, \ldots, D$$
but has the scale and shift functions depend on the $z_i$ instead of the $x_i$:
$$ \mu_i = f_{\mu_i}(z_1, \ldots, z_{i-1}) \quad \quad \sigma_i = f_{\sigma_i}(z_1, \ldots, z_{i-1}).$$
Note that now the forward equation (determining $\mathbf{x}$ from $\mathbf{z}$) can be parallelised, but the reverse transformations require determining $z_1$, followed by $z_2$, etc. and must hence be solved in sequence.
### Real-NVP and NICE
A further simplification of these approaches can be found in these papers:
- Laurent Dinh, Jascha Sohl-Dickstein, Samy Bengio (2016). [Density estimation using Real NVP](https://arxiv.org/abs/1605.08803).
- Laurent Dinh, David Krueger, Yoshua Bengio (2014). [NICE: Non-linear Independent Components Estimation](https://arxiv.org/abs/1410.8516).
The first uses a reduced versions of the above equations, for some chosen $L$:
$$
\begin{align}
x_i &= z_i \qquad &i = 1, \ldots, d \\
x_i &= \mu_i + \exp(\sigma_i) z_i \qquad &i = d+1, \ldots D
\end{align}
$$
where
$$
\begin{align}
\mu_i &= f_{\mu_i}(z_1, \ldots, z_{d})\\
\sigma_i &= f_{\sigma_i}(z_1, \ldots, z_{d})
\end{align}
$$
Hence, nothing happens for the first $d$ dimensions, but the $z_i$ values across these values transform the $x_i$ values for remaining $D-d$. Note that, in this case, both the forward and backward pass of the flow can be done fully in parallel. The second paper is even simpler, and omits the scale term altogether.
There is, of course, a catch: such a simple form means the flow typically needs a lot of bijections (a high $K$ value) to be able to describe complicated distributions. Furthermore, the dimensions that are transformed ($D-L$ in total) and not transformed ($L$ in total) must be permuted in the different bijections: otherwise the first $L$ dimensions of $\mathbf{z}$ are never changed throughout the whole normalizing flow, which greatly limits the expressive power. You'll be creating such a normalizing flow in this week's programming assignment.
### Further reading and resources
Besides the papers cited above, there are two great blog posts that explain that material as well:
- [Normalizing Flows](http://akosiorek.github.io/ml/2018/04/03/norm_flows.html) by Adam Kosiorek
- [Normalizing Flows Tutorial](https://blog.evjang.com/2018/01/nf1.html) by Eric Jang.
Both of these offer slightly more detail than we have space for here. They also have some great visuals. Happy reading and have fun implementing these ideas in the next few lessons!
|
section \<open>Terms and Literals\<close>
theory TermsAndLiterals imports Main "HOL-Library.Countable_Set" begin
type_synonym var_sym = string
type_synonym fun_sym = string
type_synonym pred_sym = string
datatype fterm =
Fun fun_sym (get_sub_terms: "fterm list")
| Var var_sym
datatype hterm = HFun fun_sym "hterm list" \<comment> \<open>Herbrand terms defined as in Berghofer's FOL-Fitting\<close>
type_synonym 't atom = "pred_sym * 't list"
datatype 't literal =
sign: Pos (get_pred: pred_sym) (get_terms: "'t list")
| Neg (get_pred: pred_sym) (get_terms: "'t list")
fun get_atom :: "'t literal \<Rightarrow> 't atom" where
"get_atom (Pos p ts) = (p, ts)"
| "get_atom (Neg p ts) = (p, ts)"
subsection \<open>Ground\<close>
fun ground\<^sub>t :: "fterm \<Rightarrow> bool" where
"ground\<^sub>t (Var x) \<longleftrightarrow> False"
| "ground\<^sub>t (Fun f ts) \<longleftrightarrow> (\<forall>t \<in> set ts. ground\<^sub>t t)"
abbreviation ground\<^sub>t\<^sub>s :: "fterm list \<Rightarrow> bool" where
"ground\<^sub>t\<^sub>s ts \<equiv> (\<forall>t \<in> set ts. ground\<^sub>t t)"
abbreviation ground\<^sub>l :: "fterm literal \<Rightarrow> bool" where
"ground\<^sub>l l \<equiv> ground\<^sub>t\<^sub>s (get_terms l)"
abbreviation ground\<^sub>l\<^sub>s :: "fterm literal set \<Rightarrow> bool" where
"ground\<^sub>l\<^sub>s C \<equiv> (\<forall>l \<in> C. ground\<^sub>l l)"
definition ground_fatoms :: "fterm atom set" where
"ground_fatoms \<equiv> {a. ground\<^sub>t\<^sub>s (snd a)}"
lemma ground\<^sub>l_ground_fatom:
assumes "ground\<^sub>l l"
shows "get_atom l \<in> ground_fatoms"
using assms unfolding ground_fatoms_def by (induction l) auto
subsection \<open>Auxiliary\<close>
lemma inv_into_f_f:
assumes "bij_betw f A B"
assumes "a\<in>A"
shows "(inv_into A f) (f a) = a"
using assms bij_betw_inv_into_left by metis
lemma f_inv_into_f:
assumes "bij_betw f A B"
assumes "b\<in>B"
shows "f ((inv_into A f) b) = b"
using assms bij_betw_inv_into_right by metis
subsection \<open>Conversions\<close>
subsubsection \<open>Conversions - Terms and Herbrand Terms\<close>
fun fterm_of_hterm :: "hterm \<Rightarrow> fterm" where
"fterm_of_hterm (HFun p ts) = Fun p (map fterm_of_hterm ts)"
definition fterms_of_hterms :: "hterm list \<Rightarrow> fterm list" where
"fterms_of_hterms ts \<equiv> map fterm_of_hterm ts"
fun hterm_of_fterm :: "fterm \<Rightarrow> hterm" where
"hterm_of_fterm (Fun p ts) = HFun p (map hterm_of_fterm ts)"
definition hterms_of_fterms :: "fterm list \<Rightarrow> hterm list" where
"hterms_of_fterms ts \<equiv> map hterm_of_fterm ts"
lemma hterm_of_fterm_fterm_of_hterm[simp]: "hterm_of_fterm (fterm_of_hterm t) = t"
by (induction t) (simp add: map_idI)
lemma hterms_of_fterms_fterms_of_hterms[simp]: "hterms_of_fterms (fterms_of_hterms ts) = ts"
unfolding hterms_of_fterms_def fterms_of_hterms_def by (simp add: map_idI)
lemma fterm_of_hterm_hterm_of_fterm[simp]:
assumes "ground\<^sub>t t"
shows "fterm_of_hterm (hterm_of_fterm t) = t"
using assms by (induction t) (auto simp add: map_idI)
lemma fterms_of_hterms_hterms_of_fterms[simp]:
assumes "ground\<^sub>t\<^sub>s ts"
shows "fterms_of_hterms (hterms_of_fterms ts) = ts"
using assms unfolding fterms_of_hterms_def hterms_of_fterms_def by (simp add: map_idI)
lemma ground_fterm_of_hterm: "ground\<^sub>t (fterm_of_hterm t)"
by (induction t) (auto simp add: map_idI)
lemma ground_fterms_of_hterms: "ground\<^sub>t\<^sub>s (fterms_of_hterms ts)"
unfolding fterms_of_hterms_def using ground_fterm_of_hterm by auto
subsubsection \<open>Conversions - Literals and Herbrand Literals\<close>
fun flit_of_hlit :: "hterm literal \<Rightarrow> fterm literal" where
"flit_of_hlit (Pos p ts) = Pos p (fterms_of_hterms ts)"
| "flit_of_hlit (Neg p ts) = Neg p (fterms_of_hterms ts)"
fun hlit_of_flit :: "fterm literal \<Rightarrow> hterm literal" where
"hlit_of_flit (Pos p ts) = Pos p (hterms_of_fterms ts)"
| "hlit_of_flit (Neg p ts) = Neg p (hterms_of_fterms ts)"
lemma ground_flit_of_hlit: "ground\<^sub>l (flit_of_hlit l)"
by (induction l) (simp add: ground_fterms_of_hterms)+
theorem flit_of_hlit_hlit_of_flit [simp]:
assumes "ground\<^sub>l l"
shows "flit_of_hlit (hlit_of_flit l) = l"
using assms by (cases l) auto
lemma sign_flit_of_hlit: "sign (flit_of_hlit l) = sign l" by (cases l) auto
lemma hlit_of_flit_bij: "bij_betw hlit_of_flit {l. ground\<^sub>l l} UNIV"
unfolding bij_betw_def
proof
show "inj_on hlit_of_flit {l. ground\<^sub>l l}" using inj_on_inverseI flit_of_hlit_hlit_of_flit
by (metis (mono_tags, lifting) mem_Collect_eq)
next
have "\<forall>l. \<exists>l'. ground\<^sub>l l' \<and> l = hlit_of_flit l'"
using ground_flit_of_hlit hlit_of_flit_flit_of_hlit by metis
then show "hlit_of_flit ` {l. ground\<^sub>l l} = UNIV" by auto
qed
lemma flit_of_hlit_bij: "bij_betw flit_of_hlit UNIV {l. ground\<^sub>l l}"
unfolding bij_betw_def inj_on_def
proof
show "\<forall>x\<in>UNIV. \<forall>y\<in>UNIV. flit_of_hlit x = flit_of_hlit y \<longrightarrow> x = y"
using ground_flit_of_hlit hlit_of_flit_flit_of_hlit by metis
next
have "\<forall>l. ground\<^sub>l l \<longrightarrow> (l = flit_of_hlit (hlit_of_flit l))" using hlit_of_flit_flit_of_hlit by auto
then have "{l. ground\<^sub>l l} \<subseteq> flit_of_hlit ` UNIV " by blast
moreover
have "\<forall>l. ground\<^sub>l (flit_of_hlit l)" using ground_flit_of_hlit by auto
ultimately show "flit_of_hlit ` UNIV = {l. ground\<^sub>l l}" using hlit_of_flit_flit_of_hlit ground_flit_of_hlit by auto
qed
subsubsection \<open>Conversions - Atoms and Herbrand Atoms\<close>
fun fatom_of_hatom :: "hterm atom \<Rightarrow> fterm atom" where
"fatom_of_hatom (p, ts) = (p, fterms_of_hterms ts)"
fun hatom_of_fatom :: "fterm atom \<Rightarrow> hterm atom" where
"hatom_of_fatom (p, ts) = (p, hterms_of_fterms ts)"
lemma ground_fatom_of_hatom: "ground\<^sub>t\<^sub>s (snd (fatom_of_hatom a))"
by (induction a) (simp add: ground_fterms_of_hterms)+
theorem hatom_of_fatom_fatom_of_hatom [simp]: "hatom_of_fatom (fatom_of_hatom l) = l"
by (cases l) auto
theorem fatom_of_hatom_hatom_of_fatom [simp]:
assumes "ground\<^sub>t\<^sub>s (snd l)"
shows "fatom_of_hatom (hatom_of_fatom l) = l"
using assms by (cases l) auto
lemma hatom_of_fatom_bij: "bij_betw hatom_of_fatom ground_fatoms UNIV"
unfolding bij_betw_def
proof
show "inj_on hatom_of_fatom ground_fatoms" using inj_on_inverseI fatom_of_hatom_hatom_of_fatom unfolding ground_fatoms_def
by (metis (mono_tags, lifting) mem_Collect_eq)
next
have "\<forall>a. \<exists>a'. ground\<^sub>t\<^sub>s (snd a') \<and> a = hatom_of_fatom a'"
using ground_fatom_of_hatom hatom_of_fatom_fatom_of_hatom by metis
then show "hatom_of_fatom ` ground_fatoms = UNIV" unfolding ground_fatoms_def by blast
qed
lemma fatom_of_hatom_bij: "bij_betw fatom_of_hatom UNIV ground_fatoms"
unfolding bij_betw_def inj_on_def
proof
show "\<forall>x\<in>UNIV. \<forall>y\<in>UNIV. fatom_of_hatom x = fatom_of_hatom y \<longrightarrow> x = y"
using ground_fatom_of_hatom hatom_of_fatom_fatom_of_hatom by metis
next
have "\<forall>a. ground\<^sub>t\<^sub>s (snd a) \<longrightarrow> (a = fatom_of_hatom (hatom_of_fatom a))" using hatom_of_fatom_fatom_of_hatom by auto
then have "ground_fatoms \<subseteq> fatom_of_hatom ` UNIV " unfolding ground_fatoms_def by blast
moreover
have "\<forall>l. ground\<^sub>t\<^sub>s (snd (fatom_of_hatom l))" using ground_fatom_of_hatom by auto
ultimately show "fatom_of_hatom ` UNIV = ground_fatoms"
using hatom_of_fatom_fatom_of_hatom ground_fatom_of_hatom unfolding ground_fatoms_def by auto
qed
subsection \<open>Enumerations\<close>
subsubsection \<open>Enumerating Strings\<close>
definition nat_of_string:: "string \<Rightarrow> nat" where
"nat_of_string \<equiv> (SOME f. bij f)"
definition string_of_nat:: "nat \<Rightarrow> string" where
"string_of_nat \<equiv> inv nat_of_string"
lemma nat_of_string_bij: "bij nat_of_string"
proof -
have "countable (UNIV::string set)" by auto
moreover
have "infinite (UNIV::string set)" using infinite_UNIV_listI by auto
ultimately
obtain x where "bij (x:: string \<Rightarrow> nat)" using countableE_infinite[of UNIV] by blast
then show "?thesis" unfolding nat_of_string_def using someI by metis
qed
lemma string_of_nat_bij: "bij string_of_nat" unfolding string_of_nat_def using nat_of_string_bij bij_betw_inv_into by auto
lemma nat_of_string_string_of_nat[simp]: "nat_of_string (string_of_nat n) = n"
unfolding string_of_nat_def
using nat_of_string_bij f_inv_into_f[of nat_of_string] by simp
lemma string_of_nat_nat_of_string[simp]: "string_of_nat (nat_of_string n) = n"
unfolding string_of_nat_def
using nat_of_string_bij inv_into_f_f[of nat_of_string] by simp
subsubsection \<open>Enumerating Herbrand Atoms\<close>
definition nat_of_hatom:: "hterm atom \<Rightarrow> nat" where
"nat_of_hatom \<equiv> (SOME f. bij f)"
definition hatom_of_nat:: "nat \<Rightarrow> hterm atom" where
"hatom_of_nat \<equiv> inv nat_of_hatom"
instantiation hterm :: countable begin
instance by countable_datatype
end
lemma infinite_hatoms: "infinite (UNIV :: ('t atom) set)"
proof -
let ?diago = "\<lambda>n. (string_of_nat n,[])"
let ?undiago = "\<lambda>a. nat_of_string (fst a)"
have "\<forall>n. ?undiago (?diago n) = n" using nat_of_string_string_of_nat by auto
moreover
have "\<forall>n. ?diago n \<in> UNIV" by auto
ultimately show "infinite (UNIV :: ('t atom) set)" using infinity[of ?undiago ?diago UNIV] by simp
qed
lemma nat_of_hatom_bij: "bij nat_of_hatom"
proof -
let ?S = "UNIV :: (('t::countable) atom) set"
have "countable ?S" by auto
moreover
have "infinite ?S" using infinite_hatoms by auto
ultimately
obtain x where "bij (x :: hterm atom \<Rightarrow> nat)" using countableE_infinite[of ?S] by blast
then have "bij nat_of_hatom" unfolding nat_of_hatom_def using someI by metis
then show "?thesis" unfolding bij_betw_def inj_on_def unfolding nat_of_hatom_def by simp
qed
lemma hatom_of_nat_bij: "bij hatom_of_nat" unfolding hatom_of_nat_def using nat_of_hatom_bij bij_betw_inv_into by auto
lemma nat_of_hatom_hatom_of_nat[simp]: "nat_of_hatom (hatom_of_nat n) = n"
unfolding hatom_of_nat_def
using nat_of_hatom_bij f_inv_into_f[of nat_of_hatom] by simp
lemma hatom_of_nat_nat_of_hatom[simp]: "hatom_of_nat (nat_of_hatom l) = l"
unfolding hatom_of_nat_def
using nat_of_hatom_bij inv_into_f_f[of nat_of_hatom _ UNIV] by simp
subsubsection \<open>Enumerating Ground Atoms\<close>
definition fatom_of_nat :: "nat \<Rightarrow> fterm atom" where
"fatom_of_nat = (\<lambda>n. fatom_of_hatom (hatom_of_nat n))"
definition nat_of_fatom :: "fterm atom \<Rightarrow> nat" where
"nat_of_fatom = (\<lambda>t. nat_of_hatom (hatom_of_fatom t))"
theorem diag_undiag_fatom[simp]:
assumes "ground\<^sub>t\<^sub>s ts"
shows "fatom_of_nat (nat_of_fatom (p,ts)) = (p,ts)"
using assms unfolding fatom_of_nat_def nat_of_fatom_def by auto
theorem undiag_diag_fatom[simp]: "nat_of_fatom (fatom_of_nat n) = n" unfolding fatom_of_nat_def nat_of_fatom_def by auto
lemma fatom_of_nat_bij: "bij_betw fatom_of_nat UNIV ground_fatoms"
using hatom_of_nat_bij bij_betw_trans fatom_of_hatom_bij hatom_of_nat_bij unfolding fatom_of_nat_def comp_def by blast
lemma ground_fatom_of_nat: "ground\<^sub>t\<^sub>s (snd (fatom_of_nat x))" unfolding fatom_of_nat_def using ground_fatom_of_hatom by auto
lemma nat_of_fatom_bij: "bij_betw nat_of_fatom ground_fatoms UNIV"
using nat_of_hatom_bij bij_betw_trans hatom_of_fatom_bij hatom_of_nat_bij unfolding nat_of_fatom_def comp_def by blast
end
|
Require Import Coq.Arith.Wf_nat.
Require Import Coq.Logic.FunctionalExtensionality.
Require Import Coq.Program.Equality.
Require Export Metalib.Metatheory.
Require Export Metalib.LibLNgen.
Require Export syntax_ott.
(** NOTE: Auxiliary theorems are hidden in generated documentation.
In general, there is a [_rec] version of every lemma involving
[open] and [close]. *)
(* *********************************************************************** *)
(** * Induction principles for nonterminals *)
Scheme dexp_ind' := Induction for dexp Sort Prop.
Definition dexp_mutind :=
fun H1 H2 H3 H4 H5 H6 H7 H8 H9 H10 =>
dexp_ind' H1 H2 H3 H4 H5 H6 H7 H8 H9 H10.
Scheme dexp_rec' := Induction for dexp Sort Set.
Definition dexp_mutrec :=
fun H1 H2 H3 H4 H5 H6 H7 H8 H9 H10 =>
dexp_rec' H1 H2 H3 H4 H5 H6 H7 H8 H9 H10.
(* *********************************************************************** *)
(** * Close *)
Fixpoint close_dexp_wrt_dexp_rec (n1 : nat) (x1 : var) (ee1 : dexp) {struct ee1} : dexp :=
match ee1 with
| de_var_f x2 => if (x1 == x2) then (de_var_b n1) else (de_var_f x2)
| de_var_b n2 => if (lt_ge_dec n2 n1) then (de_var_b n2) else (de_var_b (S n2))
| de_top => de_top
| de_lit i1 => de_lit i1
| de_abs ee2 => de_abs (close_dexp_wrt_dexp_rec (S n1) x1 ee2)
| de_app ee2 ee3 => de_app (close_dexp_wrt_dexp_rec n1 x1 ee2) (close_dexp_wrt_dexp_rec n1 x1 ee3)
| de_merge ee2 ee3 => de_merge (close_dexp_wrt_dexp_rec n1 x1 ee2) (close_dexp_wrt_dexp_rec n1 x1 ee3)
| de_ann ee2 A => de_ann (close_dexp_wrt_dexp_rec n1 x1 ee2) A
| de_fixpoint ee2 => de_fixpoint (close_dexp_wrt_dexp_rec (S n1) x1 ee2)
end.
Definition close_dexp_wrt_dexp x1 ee1 := close_dexp_wrt_dexp_rec 0 x1 ee1.
(* *********************************************************************** *)
(** * Size *)
Fixpoint size_dexp (ee1 : dexp) {struct ee1} : nat :=
match ee1 with
| de_var_f x1 => 1
| de_var_b n1 => 1
| de_top => 1
| de_lit i1 => 1
| de_abs ee2 => 1 + (size_dexp ee2)
| de_app ee2 ee3 => 1 + (size_dexp ee2) + (size_dexp ee3)
| de_merge ee2 ee3 => 1 + (size_dexp ee2) + (size_dexp ee3)
| de_ann ee2 A => 1 + (size_dexp ee2)
| de_fixpoint ee2 => 1 + (size_dexp ee2)
end.
(* *********************************************************************** *)
(** * Degree *)
(** These define only an upper bound, not a strict upper bound. *)
Inductive degree_dexp_wrt_dexp : nat -> dexp -> Prop :=
| degree_wrt_dexp_de_var_f : forall n1 x1,
degree_dexp_wrt_dexp n1 (de_var_f x1)
| degree_wrt_dexp_de_var_b : forall n1 n2,
lt n2 n1 ->
degree_dexp_wrt_dexp n1 (de_var_b n2)
| degree_wrt_dexp_de_top : forall n1,
degree_dexp_wrt_dexp n1 (de_top)
| degree_wrt_dexp_de_lit : forall n1 i1,
degree_dexp_wrt_dexp n1 (de_lit i1)
| degree_wrt_dexp_de_abs : forall n1 ee1,
degree_dexp_wrt_dexp (S n1) ee1 ->
degree_dexp_wrt_dexp n1 (de_abs ee1)
| degree_wrt_dexp_de_app : forall n1 ee1 ee2,
degree_dexp_wrt_dexp n1 ee1 ->
degree_dexp_wrt_dexp n1 ee2 ->
degree_dexp_wrt_dexp n1 (de_app ee1 ee2)
| degree_wrt_dexp_de_merge : forall n1 ee1 ee2,
degree_dexp_wrt_dexp n1 ee1 ->
degree_dexp_wrt_dexp n1 ee2 ->
degree_dexp_wrt_dexp n1 (de_merge ee1 ee2)
| degree_wrt_dexp_de_ann : forall n1 ee1 A,
degree_dexp_wrt_dexp n1 ee1 ->
degree_dexp_wrt_dexp n1 (de_ann ee1 A)
| degree_wrt_dexp_de_fixpoint : forall n1 ee1,
degree_dexp_wrt_dexp (S n1) ee1 ->
degree_dexp_wrt_dexp n1 (de_fixpoint ee1).
Scheme degree_dexp_wrt_dexp_ind' := Induction for degree_dexp_wrt_dexp Sort Prop.
Definition degree_dexp_wrt_dexp_mutind :=
fun H1 H2 H3 H4 H5 H6 H7 H8 H9 H10 =>
degree_dexp_wrt_dexp_ind' H1 H2 H3 H4 H5 H6 H7 H8 H9 H10.
Hint Constructors degree_dexp_wrt_dexp : core lngen.
(* *********************************************************************** *)
(** * Local closure (version in [Set], induction principles) *)
Inductive lc_set_dexp : dexp -> Set :=
| lc_set_de_var_f : forall x1,
lc_set_dexp (de_var_f x1)
| lc_set_de_top :
lc_set_dexp (de_top)
| lc_set_de_lit : forall i1,
lc_set_dexp (de_lit i1)
| lc_set_de_abs : forall ee1,
(forall x1 : var, lc_set_dexp (open_dexp_wrt_dexp ee1 (de_var_f x1))) ->
lc_set_dexp (de_abs ee1)
| lc_set_de_app : forall ee1 ee2,
lc_set_dexp ee1 ->
lc_set_dexp ee2 ->
lc_set_dexp (de_app ee1 ee2)
| lc_set_de_merge : forall ee1 ee2,
lc_set_dexp ee1 ->
lc_set_dexp ee2 ->
lc_set_dexp (de_merge ee1 ee2)
| lc_set_de_ann : forall ee1 A,
lc_set_dexp ee1 ->
lc_set_dexp (de_ann ee1 A)
| lc_set_de_fixpoint : forall ee1,
(forall x1 : var, lc_set_dexp (open_dexp_wrt_dexp ee1 (de_var_f x1))) ->
lc_set_dexp (de_fixpoint ee1).
Scheme lc_dexp_ind' := Induction for lc_dexp Sort Prop.
Definition lc_dexp_mutind :=
fun H1 H2 H3 H4 H5 H6 H7 H8 H9 =>
lc_dexp_ind' H1 H2 H3 H4 H5 H6 H7 H8 H9.
Scheme lc_set_dexp_ind' := Induction for lc_set_dexp Sort Prop.
Definition lc_set_dexp_mutind :=
fun H1 H2 H3 H4 H5 H6 H7 H8 H9 =>
lc_set_dexp_ind' H1 H2 H3 H4 H5 H6 H7 H8 H9.
Scheme lc_set_dexp_rec' := Induction for lc_set_dexp Sort Set.
Definition lc_set_dexp_mutrec :=
fun H1 H2 H3 H4 H5 H6 H7 H8 H9 =>
lc_set_dexp_rec' H1 H2 H3 H4 H5 H6 H7 H8 H9.
Hint Constructors lc_dexp : core lngen.
Hint Constructors lc_set_dexp : core lngen.
(* *********************************************************************** *)
(** * Body *)
Definition body_dexp_wrt_dexp ee1 := forall x1, lc_dexp (open_dexp_wrt_dexp ee1 (de_var_f x1)).
Hint Unfold body_dexp_wrt_dexp : core.
(* *********************************************************************** *)
(** * Tactic support *)
(** Additional hint declarations. *)
Hint Resolve @plus_le_compat : lngen.
(** Redefine some tactics. *)
Ltac default_case_split ::=
first
[ progress destruct_notin
| progress destruct_sum
| progress safe_f_equal
].
(* *********************************************************************** *)
(** * Theorems about [size] *)
Ltac default_auto ::= auto with arith lngen; tauto.
Ltac default_autorewrite ::= fail.
(* begin hide *)
Lemma size_dexp_min_mutual :
(forall ee1, 1 <= size_dexp ee1).
Proof.
apply_mutual_ind dexp_mutind;
default_simp.
Qed.
(* end hide *)
Lemma size_dexp_min :
forall ee1, 1 <= size_dexp ee1.
Proof.
pose proof size_dexp_min_mutual as H; intuition eauto.
Qed.
Hint Resolve size_dexp_min : lngen.
(* begin hide *)
Lemma size_dexp_close_dexp_wrt_dexp_rec_mutual :
(forall ee1 x1 n1,
size_dexp (close_dexp_wrt_dexp_rec n1 x1 ee1) = size_dexp ee1).
Proof.
apply_mutual_ind dexp_mutind;
default_simp.
Qed.
(* end hide *)
(* begin hide *)
Lemma size_dexp_close_dexp_wrt_dexp_rec :
forall ee1 x1 n1,
size_dexp (close_dexp_wrt_dexp_rec n1 x1 ee1) = size_dexp ee1.
Proof.
pose proof size_dexp_close_dexp_wrt_dexp_rec_mutual as H; intuition eauto.
Qed.
Hint Resolve size_dexp_close_dexp_wrt_dexp_rec : lngen.
Hint Rewrite size_dexp_close_dexp_wrt_dexp_rec using solve [auto] : lngen.
(* end hide *)
Lemma size_dexp_close_dexp_wrt_dexp :
forall ee1 x1,
size_dexp (close_dexp_wrt_dexp x1 ee1) = size_dexp ee1.
Proof.
unfold close_dexp_wrt_dexp; default_simp.
Qed.
Hint Resolve size_dexp_close_dexp_wrt_dexp : lngen.
Hint Rewrite size_dexp_close_dexp_wrt_dexp using solve [auto] : lngen.
(* begin hide *)
Lemma size_dexp_open_dexp_wrt_dexp_rec_mutual :
(forall ee1 ee2 n1,
size_dexp ee1 <= size_dexp (open_dexp_wrt_dexp_rec n1 ee2 ee1)).
Proof.
apply_mutual_ind dexp_mutind;
default_simp.
Qed.
(* end hide *)
(* begin hide *)
Lemma size_dexp_open_dexp_wrt_dexp_rec :
forall ee1 ee2 n1,
size_dexp ee1 <= size_dexp (open_dexp_wrt_dexp_rec n1 ee2 ee1).
Proof.
pose proof size_dexp_open_dexp_wrt_dexp_rec_mutual as H; intuition eauto.
Qed.
Hint Resolve size_dexp_open_dexp_wrt_dexp_rec : lngen.
(* end hide *)
Lemma size_dexp_open_dexp_wrt_dexp :
forall ee1 ee2,
size_dexp ee1 <= size_dexp (open_dexp_wrt_dexp ee1 ee2).
Proof.
unfold open_dexp_wrt_dexp; default_simp.
Qed.
Hint Resolve size_dexp_open_dexp_wrt_dexp : lngen.
(* begin hide *)
Lemma size_dexp_open_dexp_wrt_dexp_rec_var_mutual :
(forall ee1 x1 n1,
size_dexp (open_dexp_wrt_dexp_rec n1 (de_var_f x1) ee1) = size_dexp ee1).
Proof.
apply_mutual_ind dexp_mutind;
default_simp.
Qed.
(* end hide *)
(* begin hide *)
Lemma size_dexp_open_dexp_wrt_dexp_rec_var :
forall ee1 x1 n1,
size_dexp (open_dexp_wrt_dexp_rec n1 (de_var_f x1) ee1) = size_dexp ee1.
Proof.
pose proof size_dexp_open_dexp_wrt_dexp_rec_var_mutual as H; intuition eauto.
Qed.
Hint Resolve size_dexp_open_dexp_wrt_dexp_rec_var : lngen.
Hint Rewrite size_dexp_open_dexp_wrt_dexp_rec_var using solve [auto] : lngen.
(* end hide *)
Lemma size_dexp_open_dexp_wrt_dexp_var :
forall ee1 x1,
size_dexp (open_dexp_wrt_dexp ee1 (de_var_f x1)) = size_dexp ee1.
Proof.
unfold open_dexp_wrt_dexp; default_simp.
Qed.
Hint Resolve size_dexp_open_dexp_wrt_dexp_var : lngen.
Hint Rewrite size_dexp_open_dexp_wrt_dexp_var using solve [auto] : lngen.
(* *********************************************************************** *)
(** * Theorems about [degree] *)
Ltac default_auto ::= auto with lngen; tauto.
Ltac default_autorewrite ::= fail.
(* begin hide *)
Lemma degree_dexp_wrt_dexp_S_mutual :
(forall n1 ee1,
degree_dexp_wrt_dexp n1 ee1 ->
degree_dexp_wrt_dexp (S n1) ee1).
Proof.
apply_mutual_ind degree_dexp_wrt_dexp_mutind;
default_simp.
Qed.
(* end hide *)
Lemma degree_dexp_wrt_dexp_S :
forall n1 ee1,
degree_dexp_wrt_dexp n1 ee1 ->
degree_dexp_wrt_dexp (S n1) ee1.
Proof.
pose proof degree_dexp_wrt_dexp_S_mutual as H; intuition eauto.
Qed.
Hint Resolve degree_dexp_wrt_dexp_S : lngen.
Lemma degree_dexp_wrt_dexp_O :
forall n1 ee1,
degree_dexp_wrt_dexp O ee1 ->
degree_dexp_wrt_dexp n1 ee1.
Proof.
induction n1; default_simp.
Qed.
Hint Resolve degree_dexp_wrt_dexp_O : lngen.
(* begin hide *)
Lemma degree_dexp_wrt_dexp_close_dexp_wrt_dexp_rec_mutual :
(forall ee1 x1 n1,
degree_dexp_wrt_dexp n1 ee1 ->
degree_dexp_wrt_dexp (S n1) (close_dexp_wrt_dexp_rec n1 x1 ee1)).
Proof.
apply_mutual_ind dexp_mutind;
default_simp.
Qed.
(* end hide *)
(* begin hide *)
Lemma degree_dexp_wrt_dexp_close_dexp_wrt_dexp_rec :
forall ee1 x1 n1,
degree_dexp_wrt_dexp n1 ee1 ->
degree_dexp_wrt_dexp (S n1) (close_dexp_wrt_dexp_rec n1 x1 ee1).
Proof.
pose proof degree_dexp_wrt_dexp_close_dexp_wrt_dexp_rec_mutual as H; intuition eauto.
Qed.
Hint Resolve degree_dexp_wrt_dexp_close_dexp_wrt_dexp_rec : lngen.
(* end hide *)
Lemma degree_dexp_wrt_dexp_close_dexp_wrt_dexp :
forall ee1 x1,
degree_dexp_wrt_dexp 0 ee1 ->
degree_dexp_wrt_dexp 1 (close_dexp_wrt_dexp x1 ee1).
Proof.
unfold close_dexp_wrt_dexp; default_simp.
Qed.
Hint Resolve degree_dexp_wrt_dexp_close_dexp_wrt_dexp : lngen.
(* begin hide *)
Lemma degree_dexp_wrt_dexp_close_dexp_wrt_dexp_rec_inv_mutual :
(forall ee1 x1 n1,
degree_dexp_wrt_dexp (S n1) (close_dexp_wrt_dexp_rec n1 x1 ee1) ->
degree_dexp_wrt_dexp n1 ee1).
Proof.
apply_mutual_ind dexp_mutind;
default_simp; eauto with lngen.
Qed.
(* end hide *)
(* begin hide *)
Lemma degree_dexp_wrt_dexp_close_dexp_wrt_dexp_rec_inv :
forall ee1 x1 n1,
degree_dexp_wrt_dexp (S n1) (close_dexp_wrt_dexp_rec n1 x1 ee1) ->
degree_dexp_wrt_dexp n1 ee1.
Proof.
pose proof degree_dexp_wrt_dexp_close_dexp_wrt_dexp_rec_inv_mutual as H; intuition eauto.
Qed.
Hint Immediate degree_dexp_wrt_dexp_close_dexp_wrt_dexp_rec_inv : lngen.
(* end hide *)
Lemma degree_dexp_wrt_dexp_close_dexp_wrt_dexp_inv :
forall ee1 x1,
degree_dexp_wrt_dexp 1 (close_dexp_wrt_dexp x1 ee1) ->
degree_dexp_wrt_dexp 0 ee1.
Proof.
unfold close_dexp_wrt_dexp; eauto with lngen.
Qed.
Hint Immediate degree_dexp_wrt_dexp_close_dexp_wrt_dexp_inv : lngen.
(* begin hide *)
Lemma degree_dexp_wrt_dexp_open_dexp_wrt_dexp_rec_mutual :
(forall ee1 ee2 n1,
degree_dexp_wrt_dexp (S n1) ee1 ->
degree_dexp_wrt_dexp n1 ee2 ->
degree_dexp_wrt_dexp n1 (open_dexp_wrt_dexp_rec n1 ee2 ee1)).
Proof.
apply_mutual_ind dexp_mutind;
default_simp.
Qed.
(* end hide *)
(* begin hide *)
Lemma degree_dexp_wrt_dexp_open_dexp_wrt_dexp_rec :
forall ee1 ee2 n1,
degree_dexp_wrt_dexp (S n1) ee1 ->
degree_dexp_wrt_dexp n1 ee2 ->
degree_dexp_wrt_dexp n1 (open_dexp_wrt_dexp_rec n1 ee2 ee1).
Proof.
pose proof degree_dexp_wrt_dexp_open_dexp_wrt_dexp_rec_mutual as H; intuition eauto.
Qed.
Hint Resolve degree_dexp_wrt_dexp_open_dexp_wrt_dexp_rec : lngen.
(* end hide *)
Lemma degree_dexp_wrt_dexp_open_dexp_wrt_dexp :
forall ee1 ee2,
degree_dexp_wrt_dexp 1 ee1 ->
degree_dexp_wrt_dexp 0 ee2 ->
degree_dexp_wrt_dexp 0 (open_dexp_wrt_dexp ee1 ee2).
Proof.
unfold open_dexp_wrt_dexp; default_simp.
Qed.
Hint Resolve degree_dexp_wrt_dexp_open_dexp_wrt_dexp : lngen.
(* begin hide *)
Lemma degree_dexp_wrt_dexp_open_dexp_wrt_dexp_rec_inv_mutual :
(forall ee1 ee2 n1,
degree_dexp_wrt_dexp n1 (open_dexp_wrt_dexp_rec n1 ee2 ee1) ->
degree_dexp_wrt_dexp (S n1) ee1).
Proof.
apply_mutual_ind dexp_mutind;
default_simp; eauto with lngen.
Qed.
(* end hide *)
(* begin hide *)
Lemma degree_dexp_wrt_dexp_open_dexp_wrt_dexp_rec_inv :
forall ee1 ee2 n1,
degree_dexp_wrt_dexp n1 (open_dexp_wrt_dexp_rec n1 ee2 ee1) ->
degree_dexp_wrt_dexp (S n1) ee1.
Proof.
pose proof degree_dexp_wrt_dexp_open_dexp_wrt_dexp_rec_inv_mutual as H; intuition eauto.
Qed.
Hint Immediate degree_dexp_wrt_dexp_open_dexp_wrt_dexp_rec_inv : lngen.
(* end hide *)
Lemma degree_dexp_wrt_dexp_open_dexp_wrt_dexp_inv :
forall ee1 ee2,
degree_dexp_wrt_dexp 0 (open_dexp_wrt_dexp ee1 ee2) ->
degree_dexp_wrt_dexp 1 ee1.
Proof.
unfold open_dexp_wrt_dexp; eauto with lngen.
Qed.
Hint Immediate degree_dexp_wrt_dexp_open_dexp_wrt_dexp_inv : lngen.
(* *********************************************************************** *)
(** * Theorems about [open] and [close] *)
Ltac default_auto ::= auto with lngen brute_force; tauto.
Ltac default_autorewrite ::= fail.
(* begin hide *)
Lemma close_dexp_wrt_dexp_rec_inj_mutual :
(forall ee1 ee2 x1 n1,
close_dexp_wrt_dexp_rec n1 x1 ee1 = close_dexp_wrt_dexp_rec n1 x1 ee2 ->
ee1 = ee2).
Proof.
apply_mutual_ind dexp_mutind;
intros; match goal with
| |- _ = ?term => destruct term
end;
default_simp; eauto with lngen.
Qed.
(* end hide *)
(* begin hide *)
Lemma close_dexp_wrt_dexp_rec_inj :
forall ee1 ee2 x1 n1,
close_dexp_wrt_dexp_rec n1 x1 ee1 = close_dexp_wrt_dexp_rec n1 x1 ee2 ->
ee1 = ee2.
Proof.
pose proof close_dexp_wrt_dexp_rec_inj_mutual as H; intuition eauto.
Qed.
Hint Immediate close_dexp_wrt_dexp_rec_inj : lngen.
(* end hide *)
Lemma close_dexp_wrt_dexp_inj :
forall ee1 ee2 x1,
close_dexp_wrt_dexp x1 ee1 = close_dexp_wrt_dexp x1 ee2 ->
ee1 = ee2.
Proof.
unfold close_dexp_wrt_dexp; eauto with lngen.
Qed.
Hint Immediate close_dexp_wrt_dexp_inj : lngen.
(* begin hide *)
Lemma close_dexp_wrt_dexp_rec_open_dexp_wrt_dexp_rec_mutual :
(forall ee1 x1 n1,
x1 `notin` fv_dexp ee1 ->
close_dexp_wrt_dexp_rec n1 x1 (open_dexp_wrt_dexp_rec n1 (de_var_f x1) ee1) = ee1).
Proof.
apply_mutual_ind dexp_mutind;
default_simp.
Qed.
(* end hide *)
(* begin hide *)
Lemma close_dexp_wrt_dexp_rec_open_dexp_wrt_dexp_rec :
forall ee1 x1 n1,
x1 `notin` fv_dexp ee1 ->
close_dexp_wrt_dexp_rec n1 x1 (open_dexp_wrt_dexp_rec n1 (de_var_f x1) ee1) = ee1.
Proof.
pose proof close_dexp_wrt_dexp_rec_open_dexp_wrt_dexp_rec_mutual as H; intuition eauto.
Qed.
Hint Resolve close_dexp_wrt_dexp_rec_open_dexp_wrt_dexp_rec : lngen.
Hint Rewrite close_dexp_wrt_dexp_rec_open_dexp_wrt_dexp_rec using solve [auto] : lngen.
(* end hide *)
Lemma close_dexp_wrt_dexp_open_dexp_wrt_dexp :
forall ee1 x1,
x1 `notin` fv_dexp ee1 ->
close_dexp_wrt_dexp x1 (open_dexp_wrt_dexp ee1 (de_var_f x1)) = ee1.
Proof.
unfold close_dexp_wrt_dexp; unfold open_dexp_wrt_dexp; default_simp.
Qed.
Hint Resolve close_dexp_wrt_dexp_open_dexp_wrt_dexp : lngen.
Hint Rewrite close_dexp_wrt_dexp_open_dexp_wrt_dexp using solve [auto] : lngen.
(* begin hide *)
Lemma open_dexp_wrt_dexp_rec_close_dexp_wrt_dexp_rec_mutual :
(forall ee1 x1 n1,
open_dexp_wrt_dexp_rec n1 (de_var_f x1) (close_dexp_wrt_dexp_rec n1 x1 ee1) = ee1).
Proof.
apply_mutual_ind dexp_mutind;
default_simp.
Qed.
(* end hide *)
(* begin hide *)
Lemma open_dexp_wrt_dexp_rec_close_dexp_wrt_dexp_rec :
forall ee1 x1 n1,
open_dexp_wrt_dexp_rec n1 (de_var_f x1) (close_dexp_wrt_dexp_rec n1 x1 ee1) = ee1.
Proof.
pose proof open_dexp_wrt_dexp_rec_close_dexp_wrt_dexp_rec_mutual as H; intuition eauto.
Qed.
Hint Resolve open_dexp_wrt_dexp_rec_close_dexp_wrt_dexp_rec : lngen.
Hint Rewrite open_dexp_wrt_dexp_rec_close_dexp_wrt_dexp_rec using solve [auto] : lngen.
(* end hide *)
Lemma open_dexp_wrt_dexp_close_dexp_wrt_dexp :
forall ee1 x1,
open_dexp_wrt_dexp (close_dexp_wrt_dexp x1 ee1) (de_var_f x1) = ee1.
Proof.
unfold close_dexp_wrt_dexp; unfold open_dexp_wrt_dexp; default_simp.
Qed.
Hint Resolve open_dexp_wrt_dexp_close_dexp_wrt_dexp : lngen.
Hint Rewrite open_dexp_wrt_dexp_close_dexp_wrt_dexp using solve [auto] : lngen.
(* begin hide *)
Lemma open_dexp_wrt_dexp_rec_inj_mutual :
(forall ee2 ee1 x1 n1,
x1 `notin` fv_dexp ee2 ->
x1 `notin` fv_dexp ee1 ->
open_dexp_wrt_dexp_rec n1 (de_var_f x1) ee2 = open_dexp_wrt_dexp_rec n1 (de_var_f x1) ee1 ->
ee2 = ee1).
Proof.
apply_mutual_ind dexp_mutind;
intros; match goal with
| |- _ = ?term => destruct term
end;
default_simp; eauto with lngen.
Qed.
(* end hide *)
(* begin hide *)
Lemma open_dexp_wrt_dexp_rec_inj :
forall ee2 ee1 x1 n1,
x1 `notin` fv_dexp ee2 ->
x1 `notin` fv_dexp ee1 ->
open_dexp_wrt_dexp_rec n1 (de_var_f x1) ee2 = open_dexp_wrt_dexp_rec n1 (de_var_f x1) ee1 ->
ee2 = ee1.
Proof.
pose proof open_dexp_wrt_dexp_rec_inj_mutual as H; intuition eauto.
Qed.
Hint Immediate open_dexp_wrt_dexp_rec_inj : lngen.
(* end hide *)
Lemma open_dexp_wrt_dexp_inj :
forall ee2 ee1 x1,
x1 `notin` fv_dexp ee2 ->
x1 `notin` fv_dexp ee1 ->
open_dexp_wrt_dexp ee2 (de_var_f x1) = open_dexp_wrt_dexp ee1 (de_var_f x1) ->
ee2 = ee1.
Proof.
unfold open_dexp_wrt_dexp; eauto with lngen.
Qed.
Hint Immediate open_dexp_wrt_dexp_inj : lngen.
(* *********************************************************************** *)
(** * Theorems about [lc] *)
Ltac default_auto ::= auto with lngen brute_force; tauto.
Ltac default_autorewrite ::= autorewrite with lngen.
(* begin hide *)
Lemma degree_dexp_wrt_dexp_of_lc_dexp_mutual :
(forall ee1,
lc_dexp ee1 ->
degree_dexp_wrt_dexp 0 ee1).
Proof.
apply_mutual_ind lc_dexp_mutind;
intros;
let x1 := fresh "x1" in pick_fresh x1;
repeat (match goal with
| H1 : _, H2 : _ |- _ => specialize H1 with H2
end);
default_simp; eauto with lngen.
Qed.
(* end hide *)
Lemma degree_dexp_wrt_dexp_of_lc_dexp :
forall ee1,
lc_dexp ee1 ->
degree_dexp_wrt_dexp 0 ee1.
Proof.
pose proof degree_dexp_wrt_dexp_of_lc_dexp_mutual as H; intuition eauto.
Qed.
Hint Resolve degree_dexp_wrt_dexp_of_lc_dexp : lngen.
(* begin hide *)
Lemma lc_dexp_of_degree_size_mutual :
forall i1,
(forall ee1,
size_dexp ee1 = i1 ->
degree_dexp_wrt_dexp 0 ee1 ->
lc_dexp ee1).
Proof.
intros i1; pattern i1; apply lt_wf_rec;
clear i1; intros i1 H1;
apply_mutual_ind dexp_mutind;
default_simp;
(* non-trivial cases *)
constructor; default_simp; eapply_first_lt_hyp;
(* instantiate the size *)
match goal with
| |- _ = _ => reflexivity
| _ => idtac
end;
instantiate;
(* everything should be easy now *)
default_simp.
Qed.
(* end hide *)
Lemma lc_dexp_of_degree :
forall ee1,
degree_dexp_wrt_dexp 0 ee1 ->
lc_dexp ee1.
Proof.
intros ee1; intros;
pose proof (lc_dexp_of_degree_size_mutual (size_dexp ee1));
intuition eauto.
Qed.
Hint Resolve lc_dexp_of_degree : lngen.
Ltac dexp_lc_exists_tac :=
repeat (match goal with
| H : _ |- _ =>
let J1 := fresh in pose proof H as J1; apply degree_dexp_wrt_dexp_of_lc_dexp in J1; clear H
end).
Lemma lc_de_abs_exists :
forall x1 ee1,
lc_dexp (open_dexp_wrt_dexp ee1 (de_var_f x1)) ->
lc_dexp (de_abs ee1).
Proof.
intros; dexp_lc_exists_tac; eauto with lngen.
Qed.
Lemma lc_de_fixpoint_exists :
forall x1 ee1,
lc_dexp (open_dexp_wrt_dexp ee1 (de_var_f x1)) ->
lc_dexp (de_fixpoint ee1).
Proof.
intros; dexp_lc_exists_tac; eauto with lngen.
Qed.
Hint Extern 1 (lc_dexp (de_abs _)) =>
let x1 := fresh in
pick_fresh x1;
apply (lc_de_abs_exists x1) : core.
Hint Extern 1 (lc_dexp (de_fixpoint _)) =>
let x1 := fresh in
pick_fresh x1;
apply (lc_de_fixpoint_exists x1) : core.
Lemma lc_body_dexp_wrt_dexp :
forall ee1 ee2,
body_dexp_wrt_dexp ee1 ->
lc_dexp ee2 ->
lc_dexp (open_dexp_wrt_dexp ee1 ee2).
Proof.
unfold body_dexp_wrt_dexp;
default_simp;
let x1 := fresh "x" in
pick_fresh x1;
specialize_all x1;
dexp_lc_exists_tac;
eauto with lngen.
Qed.
Hint Resolve lc_body_dexp_wrt_dexp : lngen.
Lemma lc_body_de_abs_1 :
forall ee1,
lc_dexp (de_abs ee1) ->
body_dexp_wrt_dexp ee1.
Proof.
default_simp.
Qed.
Hint Resolve lc_body_de_abs_1 : lngen.
Lemma lc_body_de_fixpoint_1 :
forall ee1,
lc_dexp (de_fixpoint ee1) ->
body_dexp_wrt_dexp ee1.
Proof.
default_simp.
Qed.
Hint Resolve lc_body_de_fixpoint_1 : lngen.
(* begin hide *)
Lemma lc_dexp_unique_mutual :
(forall ee1 (proof2 proof3 : lc_dexp ee1), proof2 = proof3).
Proof.
apply_mutual_ind lc_dexp_mutind;
intros;
let proof1 := fresh "proof1" in
rename_last_into proof1; dependent destruction proof1;
f_equal; default_simp; auto using @functional_extensionality_dep with lngen.
Qed.
(* end hide *)
Lemma lc_dexp_unique :
forall ee1 (proof2 proof3 : lc_dexp ee1), proof2 = proof3.
Proof.
pose proof lc_dexp_unique_mutual as H; intuition eauto.
Qed.
Hint Resolve lc_dexp_unique : lngen.
(* begin hide *)
Lemma lc_dexp_of_lc_set_dexp_mutual :
(forall ee1, lc_set_dexp ee1 -> lc_dexp ee1).
Proof.
apply_mutual_ind lc_set_dexp_mutind;
default_simp.
Qed.
(* end hide *)
Lemma lc_dexp_of_lc_set_dexp :
forall ee1, lc_set_dexp ee1 -> lc_dexp ee1.
Proof.
pose proof lc_dexp_of_lc_set_dexp_mutual as H; intuition eauto.
Qed.
Hint Resolve lc_dexp_of_lc_set_dexp : lngen.
(* begin hide *)
Lemma lc_set_dexp_of_lc_dexp_size_mutual :
forall i1,
(forall ee1,
size_dexp ee1 = i1 ->
lc_dexp ee1 ->
lc_set_dexp ee1).
Proof.
intros i1; pattern i1; apply lt_wf_rec;
clear i1; intros i1 H1;
apply_mutual_ind dexp_mutrec;
default_simp;
try solve [assert False by default_simp; tauto];
(* non-trivial cases *)
constructor; default_simp;
try first [apply lc_set_dexp_of_lc_dexp];
default_simp; eapply_first_lt_hyp;
(* instantiate the size *)
match goal with
| |- _ = _ => reflexivity
| _ => idtac
end;
instantiate;
(* everything should be easy now *)
default_simp.
Qed.
(* end hide *)
Lemma lc_set_dexp_of_lc_dexp :
forall ee1,
lc_dexp ee1 ->
lc_set_dexp ee1.
Proof.
intros ee1; intros;
pose proof (lc_set_dexp_of_lc_dexp_size_mutual (size_dexp ee1));
intuition eauto.
Qed.
Hint Resolve lc_set_dexp_of_lc_dexp : lngen.
(* *********************************************************************** *)
(** * More theorems about [open] and [close] *)
Ltac default_auto ::= auto with lngen; tauto.
Ltac default_autorewrite ::= fail.
(* begin hide *)
Lemma close_dexp_wrt_dexp_rec_degree_dexp_wrt_dexp_mutual :
(forall ee1 x1 n1,
degree_dexp_wrt_dexp n1 ee1 ->
x1 `notin` fv_dexp ee1 ->
close_dexp_wrt_dexp_rec n1 x1 ee1 = ee1).
Proof.
apply_mutual_ind dexp_mutind;
default_simp.
Qed.
(* end hide *)
(* begin hide *)
Lemma close_dexp_wrt_dexp_rec_degree_dexp_wrt_dexp :
forall ee1 x1 n1,
degree_dexp_wrt_dexp n1 ee1 ->
x1 `notin` fv_dexp ee1 ->
close_dexp_wrt_dexp_rec n1 x1 ee1 = ee1.
Proof.
pose proof close_dexp_wrt_dexp_rec_degree_dexp_wrt_dexp_mutual as H; intuition eauto.
Qed.
Hint Resolve close_dexp_wrt_dexp_rec_degree_dexp_wrt_dexp : lngen.
Hint Rewrite close_dexp_wrt_dexp_rec_degree_dexp_wrt_dexp using solve [auto] : lngen.
(* end hide *)
Lemma close_dexp_wrt_dexp_lc_dexp :
forall ee1 x1,
lc_dexp ee1 ->
x1 `notin` fv_dexp ee1 ->
close_dexp_wrt_dexp x1 ee1 = ee1.
Proof.
unfold close_dexp_wrt_dexp; default_simp.
Qed.
Hint Resolve close_dexp_wrt_dexp_lc_dexp : lngen.
Hint Rewrite close_dexp_wrt_dexp_lc_dexp using solve [auto] : lngen.
(* begin hide *)
Lemma open_dexp_wrt_dexp_rec_degree_dexp_wrt_dexp_mutual :
(forall ee2 ee1 n1,
degree_dexp_wrt_dexp n1 ee2 ->
open_dexp_wrt_dexp_rec n1 ee1 ee2 = ee2).
Proof.
apply_mutual_ind dexp_mutind;
default_simp.
Qed.
(* end hide *)
(* begin hide *)
Lemma open_dexp_wrt_dexp_rec_degree_dexp_wrt_dexp :
forall ee2 ee1 n1,
degree_dexp_wrt_dexp n1 ee2 ->
open_dexp_wrt_dexp_rec n1 ee1 ee2 = ee2.
Proof.
pose proof open_dexp_wrt_dexp_rec_degree_dexp_wrt_dexp_mutual as H; intuition eauto.
Qed.
Hint Resolve open_dexp_wrt_dexp_rec_degree_dexp_wrt_dexp : lngen.
Hint Rewrite open_dexp_wrt_dexp_rec_degree_dexp_wrt_dexp using solve [auto] : lngen.
(* end hide *)
Lemma open_dexp_wrt_dexp_lc_dexp :
forall ee2 ee1,
lc_dexp ee2 ->
open_dexp_wrt_dexp ee2 ee1 = ee2.
Proof.
unfold open_dexp_wrt_dexp; default_simp.
Qed.
Hint Resolve open_dexp_wrt_dexp_lc_dexp : lngen.
Hint Rewrite open_dexp_wrt_dexp_lc_dexp using solve [auto] : lngen.
(* *********************************************************************** *)
(** * Theorems about [fv] *)
Ltac default_auto ::= auto with set lngen; tauto.
Ltac default_autorewrite ::= autorewrite with lngen.
(* begin hide *)
Lemma fv_dexp_close_dexp_wrt_dexp_rec_mutual :
(forall ee1 x1 n1,
fv_dexp (close_dexp_wrt_dexp_rec n1 x1 ee1) [=] remove x1 (fv_dexp ee1)).
Proof.
apply_mutual_ind dexp_mutind;
default_simp; fsetdec.
Qed.
(* end hide *)
(* begin hide *)
Lemma fv_dexp_close_dexp_wrt_dexp_rec :
forall ee1 x1 n1,
fv_dexp (close_dexp_wrt_dexp_rec n1 x1 ee1) [=] remove x1 (fv_dexp ee1).
Proof.
pose proof fv_dexp_close_dexp_wrt_dexp_rec_mutual as H; intuition eauto.
Qed.
Hint Resolve fv_dexp_close_dexp_wrt_dexp_rec : lngen.
Hint Rewrite fv_dexp_close_dexp_wrt_dexp_rec using solve [auto] : lngen.
(* end hide *)
Lemma fv_dexp_close_dexp_wrt_dexp :
forall ee1 x1,
fv_dexp (close_dexp_wrt_dexp x1 ee1) [=] remove x1 (fv_dexp ee1).
Proof.
unfold close_dexp_wrt_dexp; default_simp.
Qed.
Hint Resolve fv_dexp_close_dexp_wrt_dexp : lngen.
Hint Rewrite fv_dexp_close_dexp_wrt_dexp using solve [auto] : lngen.
(* begin hide *)
Lemma fv_dexp_open_dexp_wrt_dexp_rec_lower_mutual :
(forall ee1 ee2 n1,
fv_dexp ee1 [<=] fv_dexp (open_dexp_wrt_dexp_rec n1 ee2 ee1)).
Proof.
apply_mutual_ind dexp_mutind;
default_simp; fsetdec.
Qed.
(* end hide *)
(* begin hide *)
Lemma fv_dexp_open_dexp_wrt_dexp_rec_lower :
forall ee1 ee2 n1,
fv_dexp ee1 [<=] fv_dexp (open_dexp_wrt_dexp_rec n1 ee2 ee1).
Proof.
pose proof fv_dexp_open_dexp_wrt_dexp_rec_lower_mutual as H; intuition eauto.
Qed.
Hint Resolve fv_dexp_open_dexp_wrt_dexp_rec_lower : lngen.
(* end hide *)
Lemma fv_dexp_open_dexp_wrt_dexp_lower :
forall ee1 ee2,
fv_dexp ee1 [<=] fv_dexp (open_dexp_wrt_dexp ee1 ee2).
Proof.
unfold open_dexp_wrt_dexp; default_simp.
Qed.
Hint Resolve fv_dexp_open_dexp_wrt_dexp_lower : lngen.
(* begin hide *)
Lemma fv_dexp_open_dexp_wrt_dexp_rec_upper_mutual :
(forall ee1 ee2 n1,
fv_dexp (open_dexp_wrt_dexp_rec n1 ee2 ee1) [<=] fv_dexp ee2 `union` fv_dexp ee1).
Proof.
apply_mutual_ind dexp_mutind;
default_simp; fsetdec.
Qed.
(* end hide *)
(* begin hide *)
Lemma fv_dexp_open_dexp_wrt_dexp_rec_upper :
forall ee1 ee2 n1,
fv_dexp (open_dexp_wrt_dexp_rec n1 ee2 ee1) [<=] fv_dexp ee2 `union` fv_dexp ee1.
Proof.
pose proof fv_dexp_open_dexp_wrt_dexp_rec_upper_mutual as H; intuition eauto.
Qed.
Hint Resolve fv_dexp_open_dexp_wrt_dexp_rec_upper : lngen.
(* end hide *)
Lemma fv_dexp_open_dexp_wrt_dexp_upper :
forall ee1 ee2,
fv_dexp (open_dexp_wrt_dexp ee1 ee2) [<=] fv_dexp ee2 `union` fv_dexp ee1.
Proof.
unfold open_dexp_wrt_dexp; default_simp.
Qed.
Hint Resolve fv_dexp_open_dexp_wrt_dexp_upper : lngen.
(* begin hide *)
Lemma fv_dexp_subst_dexp_fresh_mutual :
(forall ee1 ee2 x1,
x1 `notin` fv_dexp ee1 ->
fv_dexp (subst_dexp ee2 x1 ee1) [=] fv_dexp ee1).
Proof.
apply_mutual_ind dexp_mutind;
default_simp; fsetdec.
Qed.
(* end hide *)
Lemma fv_dexp_subst_dexp_fresh :
forall ee1 ee2 x1,
x1 `notin` fv_dexp ee1 ->
fv_dexp (subst_dexp ee2 x1 ee1) [=] fv_dexp ee1.
Proof.
pose proof fv_dexp_subst_dexp_fresh_mutual as H; intuition eauto.
Qed.
Hint Resolve fv_dexp_subst_dexp_fresh : lngen.
Hint Rewrite fv_dexp_subst_dexp_fresh using solve [auto] : lngen.
(* begin hide *)
Lemma fv_dexp_subst_dexp_lower_mutual :
(forall ee1 ee2 x1,
remove x1 (fv_dexp ee1) [<=] fv_dexp (subst_dexp ee2 x1 ee1)).
Proof.
apply_mutual_ind dexp_mutind;
default_simp; fsetdec.
Qed.
(* end hide *)
Lemma fv_dexp_subst_dexp_lower :
forall ee1 ee2 x1,
remove x1 (fv_dexp ee1) [<=] fv_dexp (subst_dexp ee2 x1 ee1).
Proof.
pose proof fv_dexp_subst_dexp_lower_mutual as H; intuition eauto.
Qed.
Hint Resolve fv_dexp_subst_dexp_lower : lngen.
(* begin hide *)
Lemma fv_dexp_subst_dexp_notin_mutual :
(forall ee1 ee2 x1 x2,
x2 `notin` fv_dexp ee1 ->
x2 `notin` fv_dexp ee2 ->
x2 `notin` fv_dexp (subst_dexp ee2 x1 ee1)).
Proof.
apply_mutual_ind dexp_mutind;
default_simp; fsetdec.
Qed.
(* end hide *)
Lemma fv_dexp_subst_dexp_notin :
forall ee1 ee2 x1 x2,
x2 `notin` fv_dexp ee1 ->
x2 `notin` fv_dexp ee2 ->
x2 `notin` fv_dexp (subst_dexp ee2 x1 ee1).
Proof.
pose proof fv_dexp_subst_dexp_notin_mutual as H; intuition eauto.
Qed.
Hint Resolve fv_dexp_subst_dexp_notin : lngen.
(* begin hide *)
Lemma fv_dexp_subst_dexp_upper_mutual :
(forall ee1 ee2 x1,
fv_dexp (subst_dexp ee2 x1 ee1) [<=] fv_dexp ee2 `union` remove x1 (fv_dexp ee1)).
Proof.
apply_mutual_ind dexp_mutind;
default_simp; fsetdec.
Qed.
(* end hide *)
Lemma fv_dexp_subst_dexp_upper :
forall ee1 ee2 x1,
fv_dexp (subst_dexp ee2 x1 ee1) [<=] fv_dexp ee2 `union` remove x1 (fv_dexp ee1).
Proof.
pose proof fv_dexp_subst_dexp_upper_mutual as H; intuition eauto.
Qed.
Hint Resolve fv_dexp_subst_dexp_upper : lngen.
(* *********************************************************************** *)
(** * Theorems about [subst] *)
Ltac default_auto ::= auto with lngen brute_force; tauto.
Ltac default_autorewrite ::= autorewrite with lngen.
(* begin hide *)
Lemma subst_dexp_close_dexp_wrt_dexp_rec_mutual :
(forall ee2 ee1 x1 x2 n1,
degree_dexp_wrt_dexp n1 ee1 ->
x1 <> x2 ->
x2 `notin` fv_dexp ee1 ->
subst_dexp ee1 x1 (close_dexp_wrt_dexp_rec n1 x2 ee2) = close_dexp_wrt_dexp_rec n1 x2 (subst_dexp ee1 x1 ee2)).
Proof.
apply_mutual_ind dexp_mutind;
default_simp.
Qed.
(* end hide *)
Lemma subst_dexp_close_dexp_wrt_dexp_rec :
forall ee2 ee1 x1 x2 n1,
degree_dexp_wrt_dexp n1 ee1 ->
x1 <> x2 ->
x2 `notin` fv_dexp ee1 ->
subst_dexp ee1 x1 (close_dexp_wrt_dexp_rec n1 x2 ee2) = close_dexp_wrt_dexp_rec n1 x2 (subst_dexp ee1 x1 ee2).
Proof.
pose proof subst_dexp_close_dexp_wrt_dexp_rec_mutual as H; intuition eauto.
Qed.
Hint Resolve subst_dexp_close_dexp_wrt_dexp_rec : lngen.
Lemma subst_dexp_close_dexp_wrt_dexp :
forall ee2 ee1 x1 x2,
lc_dexp ee1 -> x1 <> x2 ->
x2 `notin` fv_dexp ee1 ->
subst_dexp ee1 x1 (close_dexp_wrt_dexp x2 ee2) = close_dexp_wrt_dexp x2 (subst_dexp ee1 x1 ee2).
Proof.
unfold close_dexp_wrt_dexp; default_simp.
Qed.
Hint Resolve subst_dexp_close_dexp_wrt_dexp : lngen.
(* begin hide *)
Lemma subst_dexp_degree_dexp_wrt_dexp_mutual :
(forall ee1 ee2 x1 n1,
degree_dexp_wrt_dexp n1 ee1 ->
degree_dexp_wrt_dexp n1 ee2 ->
degree_dexp_wrt_dexp n1 (subst_dexp ee2 x1 ee1)).
Proof.
apply_mutual_ind dexp_mutind;
default_simp.
Qed.
(* end hide *)
Lemma subst_dexp_degree_dexp_wrt_dexp :
forall ee1 ee2 x1 n1,
degree_dexp_wrt_dexp n1 ee1 ->
degree_dexp_wrt_dexp n1 ee2 ->
degree_dexp_wrt_dexp n1 (subst_dexp ee2 x1 ee1).
Proof.
pose proof subst_dexp_degree_dexp_wrt_dexp_mutual as H; intuition eauto.
Qed.
Hint Resolve subst_dexp_degree_dexp_wrt_dexp : lngen.
(* begin hide *)
Lemma subst_dexp_fresh_eq_mutual :
(forall ee2 ee1 x1,
x1 `notin` fv_dexp ee2 ->
subst_dexp ee1 x1 ee2 = ee2).
Proof.
apply_mutual_ind dexp_mutind;
default_simp.
Qed.
(* end hide *)
Lemma subst_dexp_fresh_eq :
forall ee2 ee1 x1,
x1 `notin` fv_dexp ee2 ->
subst_dexp ee1 x1 ee2 = ee2.
Proof.
pose proof subst_dexp_fresh_eq_mutual as H; intuition eauto.
Qed.
Hint Resolve subst_dexp_fresh_eq : lngen.
Hint Rewrite subst_dexp_fresh_eq using solve [auto] : lngen.
(* begin hide *)
Lemma subst_dexp_fresh_same_mutual :
(forall ee2 ee1 x1,
x1 `notin` fv_dexp ee1 ->
x1 `notin` fv_dexp (subst_dexp ee1 x1 ee2)).
Proof.
apply_mutual_ind dexp_mutind;
default_simp.
Qed.
(* end hide *)
Lemma subst_dexp_fresh_same :
forall ee2 ee1 x1,
x1 `notin` fv_dexp ee1 ->
x1 `notin` fv_dexp (subst_dexp ee1 x1 ee2).
Proof.
pose proof subst_dexp_fresh_same_mutual as H; intuition eauto.
Qed.
Hint Resolve subst_dexp_fresh_same : lngen.
(* begin hide *)
Lemma subst_dexp_fresh_mutual :
(forall ee2 ee1 x1 x2,
x1 `notin` fv_dexp ee2 ->
x1 `notin` fv_dexp ee1 ->
x1 `notin` fv_dexp (subst_dexp ee1 x2 ee2)).
Proof.
apply_mutual_ind dexp_mutind;
default_simp.
Qed.
(* end hide *)
Lemma subst_dexp_fresh :
forall ee2 ee1 x1 x2,
x1 `notin` fv_dexp ee2 ->
x1 `notin` fv_dexp ee1 ->
x1 `notin` fv_dexp (subst_dexp ee1 x2 ee2).
Proof.
pose proof subst_dexp_fresh_mutual as H; intuition eauto.
Qed.
Hint Resolve subst_dexp_fresh : lngen.
Lemma subst_dexp_lc_dexp :
forall ee1 ee2 x1,
lc_dexp ee1 ->
lc_dexp ee2 ->
lc_dexp (subst_dexp ee2 x1 ee1).
Proof.
default_simp.
Qed.
Hint Resolve subst_dexp_lc_dexp : lngen.
(* begin hide *)
Lemma subst_dexp_open_dexp_wrt_dexp_rec_mutual :
(forall ee3 ee1 ee2 x1 n1,
lc_dexp ee1 ->
subst_dexp ee1 x1 (open_dexp_wrt_dexp_rec n1 ee2 ee3) = open_dexp_wrt_dexp_rec n1 (subst_dexp ee1 x1 ee2) (subst_dexp ee1 x1 ee3)).
Proof.
apply_mutual_ind dexp_mutind;
default_simp.
Qed.
(* end hide *)
(* begin hide *)
Lemma subst_dexp_open_dexp_wrt_dexp_rec :
forall ee3 ee1 ee2 x1 n1,
lc_dexp ee1 ->
subst_dexp ee1 x1 (open_dexp_wrt_dexp_rec n1 ee2 ee3) = open_dexp_wrt_dexp_rec n1 (subst_dexp ee1 x1 ee2) (subst_dexp ee1 x1 ee3).
Proof.
pose proof subst_dexp_open_dexp_wrt_dexp_rec_mutual as H; intuition eauto.
Qed.
Hint Resolve subst_dexp_open_dexp_wrt_dexp_rec : lngen.
(* end hide *)
Lemma subst_dexp_open_dexp_wrt_dexp :
forall ee3 ee1 ee2 x1,
lc_dexp ee1 ->
subst_dexp ee1 x1 (open_dexp_wrt_dexp ee3 ee2) = open_dexp_wrt_dexp (subst_dexp ee1 x1 ee3) (subst_dexp ee1 x1 ee2).
Proof.
unfold open_dexp_wrt_dexp; default_simp.
Qed.
Hint Resolve subst_dexp_open_dexp_wrt_dexp : lngen.
Lemma subst_dexp_open_dexp_wrt_dexp_var :
forall ee2 ee1 x1 x2,
x1 <> x2 ->
lc_dexp ee1 ->
open_dexp_wrt_dexp (subst_dexp ee1 x1 ee2) (de_var_f x2) = subst_dexp ee1 x1 (open_dexp_wrt_dexp ee2 (de_var_f x2)).
Proof.
intros; rewrite subst_dexp_open_dexp_wrt_dexp; default_simp.
Qed.
Hint Resolve subst_dexp_open_dexp_wrt_dexp_var : lngen.
(* begin hide *)
Lemma subst_dexp_spec_rec_mutual :
(forall ee1 ee2 x1 n1,
subst_dexp ee2 x1 ee1 = open_dexp_wrt_dexp_rec n1 ee2 (close_dexp_wrt_dexp_rec n1 x1 ee1)).
Proof.
apply_mutual_ind dexp_mutind;
default_simp.
Qed.
(* end hide *)
(* begin hide *)
Lemma subst_dexp_spec_rec :
forall ee1 ee2 x1 n1,
subst_dexp ee2 x1 ee1 = open_dexp_wrt_dexp_rec n1 ee2 (close_dexp_wrt_dexp_rec n1 x1 ee1).
Proof.
pose proof subst_dexp_spec_rec_mutual as H; intuition eauto.
Qed.
Hint Resolve subst_dexp_spec_rec : lngen.
(* end hide *)
Lemma subst_dexp_spec :
forall ee1 ee2 x1,
subst_dexp ee2 x1 ee1 = open_dexp_wrt_dexp (close_dexp_wrt_dexp x1 ee1) ee2.
Proof.
unfold close_dexp_wrt_dexp; unfold open_dexp_wrt_dexp; default_simp.
Qed.
Hint Resolve subst_dexp_spec : lngen.
(* begin hide *)
Lemma subst_dexp_subst_dexp_mutual :
(forall ee1 ee2 ee3 x2 x1,
x2 `notin` fv_dexp ee2 ->
x2 <> x1 ->
subst_dexp ee2 x1 (subst_dexp ee3 x2 ee1) = subst_dexp (subst_dexp ee2 x1 ee3) x2 (subst_dexp ee2 x1 ee1)).
Proof.
apply_mutual_ind dexp_mutind;
default_simp.
Qed.
(* end hide *)
Lemma subst_dexp_subst_dexp :
forall ee1 ee2 ee3 x2 x1,
x2 `notin` fv_dexp ee2 ->
x2 <> x1 ->
subst_dexp ee2 x1 (subst_dexp ee3 x2 ee1) = subst_dexp (subst_dexp ee2 x1 ee3) x2 (subst_dexp ee2 x1 ee1).
Proof.
pose proof subst_dexp_subst_dexp_mutual as H; intuition eauto.
Qed.
Hint Resolve subst_dexp_subst_dexp : lngen.
(* begin hide *)
Lemma subst_dexp_close_dexp_wrt_dexp_rec_open_dexp_wrt_dexp_rec_mutual :
(forall ee2 ee1 x1 x2 n1,
x2 `notin` fv_dexp ee2 ->
x2 `notin` fv_dexp ee1 ->
x2 <> x1 ->
degree_dexp_wrt_dexp n1 ee1 ->
subst_dexp ee1 x1 ee2 = close_dexp_wrt_dexp_rec n1 x2 (subst_dexp ee1 x1 (open_dexp_wrt_dexp_rec n1 (de_var_f x2) ee2))).
Proof.
apply_mutual_ind dexp_mutrec;
default_simp.
Qed.
(* end hide *)
(* begin hide *)
Lemma subst_dexp_close_dexp_wrt_dexp_rec_open_dexp_wrt_dexp_rec :
forall ee2 ee1 x1 x2 n1,
x2 `notin` fv_dexp ee2 ->
x2 `notin` fv_dexp ee1 ->
x2 <> x1 ->
degree_dexp_wrt_dexp n1 ee1 ->
subst_dexp ee1 x1 ee2 = close_dexp_wrt_dexp_rec n1 x2 (subst_dexp ee1 x1 (open_dexp_wrt_dexp_rec n1 (de_var_f x2) ee2)).
Proof.
pose proof subst_dexp_close_dexp_wrt_dexp_rec_open_dexp_wrt_dexp_rec_mutual as H; intuition eauto.
Qed.
Hint Resolve subst_dexp_close_dexp_wrt_dexp_rec_open_dexp_wrt_dexp_rec : lngen.
(* end hide *)
Lemma subst_dexp_close_dexp_wrt_dexp_open_dexp_wrt_dexp :
forall ee2 ee1 x1 x2,
x2 `notin` fv_dexp ee2 ->
x2 `notin` fv_dexp ee1 ->
x2 <> x1 ->
lc_dexp ee1 ->
subst_dexp ee1 x1 ee2 = close_dexp_wrt_dexp x2 (subst_dexp ee1 x1 (open_dexp_wrt_dexp ee2 (de_var_f x2))).
Proof.
unfold close_dexp_wrt_dexp; unfold open_dexp_wrt_dexp; default_simp.
Qed.
Hint Resolve subst_dexp_close_dexp_wrt_dexp_open_dexp_wrt_dexp : lngen.
Lemma subst_dexp_de_abs :
forall x2 ee2 ee1 x1,
lc_dexp ee1 ->
x2 `notin` fv_dexp ee1 `union` fv_dexp ee2 `union` singleton x1 ->
subst_dexp ee1 x1 (de_abs ee2) = de_abs (close_dexp_wrt_dexp x2 (subst_dexp ee1 x1 (open_dexp_wrt_dexp ee2 (de_var_f x2)))).
Proof.
default_simp.
Qed.
Hint Resolve subst_dexp_de_abs : lngen.
Lemma subst_dexp_de_fixpoint :
forall x2 ee2 ee1 x1,
lc_dexp ee1 ->
x2 `notin` fv_dexp ee1 `union` fv_dexp ee2 `union` singleton x1 ->
subst_dexp ee1 x1 (de_fixpoint ee2) = de_fixpoint (close_dexp_wrt_dexp x2 (subst_dexp ee1 x1 (open_dexp_wrt_dexp ee2 (de_var_f x2)))).
Proof.
default_simp.
Qed.
Hint Resolve subst_dexp_de_fixpoint : lngen.
(* begin hide *)
Lemma subst_dexp_intro_rec_mutual :
(forall ee1 x1 ee2 n1,
x1 `notin` fv_dexp ee1 ->
open_dexp_wrt_dexp_rec n1 ee2 ee1 = subst_dexp ee2 x1 (open_dexp_wrt_dexp_rec n1 (de_var_f x1) ee1)).
Proof.
apply_mutual_ind dexp_mutind;
default_simp.
Qed.
(* end hide *)
Lemma subst_dexp_intro_rec :
forall ee1 x1 ee2 n1,
x1 `notin` fv_dexp ee1 ->
open_dexp_wrt_dexp_rec n1 ee2 ee1 = subst_dexp ee2 x1 (open_dexp_wrt_dexp_rec n1 (de_var_f x1) ee1).
Proof.
pose proof subst_dexp_intro_rec_mutual as H; intuition eauto.
Qed.
Hint Resolve subst_dexp_intro_rec : lngen.
Hint Rewrite subst_dexp_intro_rec using solve [auto] : lngen.
Lemma subst_dexp_intro :
forall x1 ee1 ee2,
x1 `notin` fv_dexp ee1 ->
open_dexp_wrt_dexp ee1 ee2 = subst_dexp ee2 x1 (open_dexp_wrt_dexp ee1 (de_var_f x1)).
Proof.
unfold open_dexp_wrt_dexp; default_simp.
Qed.
Hint Resolve subst_dexp_intro : lngen.
(* *********************************************************************** *)
(** * "Restore" tactics *)
Ltac default_auto ::= auto; tauto.
Ltac default_autorewrite ::= fail.
|
module Dep
interface Monad m => FooBar m where
Foo : {0 a : Type} -> a -> m a -> Type
Bar : {0 A : Type} -> m A -> Type
foo : {0 A : Type} -> (x : A) -> (ma : m A) -> Foo x ma -> Bar ma
|
module Extra.Proof
export
unsafeRefl : {0 a, b : t} -> a === b
unsafeRefl = believe_me (the (a === a) Refl)
-- TODO: I can't do rewrite on linear types? Am I missing something
||| A poor-man's linear version of rewrite/replace
public export
replace1 : forall x, y, p . (0 rule : x = y) -> (1 _ : p x) -> p y
replace1 prf = assert_linear (replace {p = p} prf)
||| Crash the program
export total
%foreign "scheme:lambda (x) (blodwen-error-quit x)"
crash : String -> a
|
module StructuralEquivalents
import Data.DPair
import Data.Nat
namespace Equivalence
data Singleton : a -> Type where
Val : (x : a) -> Singleton x
NonStruct : {a : Type} -> a -> Type
NonStruct x = Subset a (\y => y = x)
-- Without respect to zero quantity of the bound
Rougher : {a : Type} -> a -> Type
Rougher x = (y : a ** y = x)
namespace BoundedNats
data Fin : Nat -> Type where
FZ : Fin n
FS : Fin n -> Fin $ S n
NonStruct : Nat -> Type
NonStruct n = Subset Nat (`LT` n)
-- Without respect to zero quantity of the bound
Rougher : Nat -> Type
Rougher n = (x : Nat ** x `LT` n)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.